2 * Copyright (c) 1998 Matthew Dillon,
3 * Copyright (c) 1994 John S. Dyson
4 * Copyright (c) 1990 University of Utah.
5 * Copyright (c) 1991, 1993
6 * The Regents of the University of California. All rights reserved.
8 * This code is derived from software contributed to Berkeley by
9 * the Systems Programming Group of the University of Utah Computer
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. All advertising materials mentioning features or use of this software
21 * must display the following acknowledgement:
22 * This product includes software developed by the University of
23 * California, Berkeley and its contributors.
24 * 4. Neither the name of the University nor the names of its contributors
25 * may be used to endorse or promote products derived from this software
26 * without specific prior written permission.
28 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
29 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
30 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
31 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
32 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
34 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
36 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
37 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
43 * Radix Bitmap 'blists'.
45 * - The new swapper uses the new radix bitmap code. This should scale
46 * to arbitrarily small or arbitrarily large swap spaces and an almost
47 * arbitrary degree of fragmentation.
51 * - on the fly reallocation of swap during putpages. The new system
52 * does not try to keep previously allocated swap blocks for dirty
55 * - on the fly deallocation of swap
57 * - No more garbage collection required. Unnecessarily allocated swap
58 * blocks only exist for dirty vm_page_t's now and these are already
59 * cycled (in a high-load system) by the pager. We also do on-the-fly
60 * removal of invalidated swap blocks when a page is destroyed
63 * from: Utah $Hdr: swap_pager.c 1.4 91/04/30$
65 * @(#)swap_pager.c 8.9 (Berkeley) 3/21/94
67 * $FreeBSD: src/sys/vm/swap_pager.c,v 1.130.2.12 2002/08/31 21:15:55 dillon Exp $
68 * $DragonFly: src/sys/vm/swap_pager.c,v 1.2 2003/06/17 04:29:00 dillon Exp $
71 #include <sys/param.h>
72 #include <sys/systm.h>
74 #include <sys/kernel.h>
77 #include <sys/vnode.h>
78 #include <sys/malloc.h>
79 #include <sys/vmmeter.h>
80 #include <sys/sysctl.h>
81 #include <sys/blist.h>
83 #include <sys/vmmeter.h>
85 #ifndef MAX_PAGEOUT_CLUSTER
86 #define MAX_PAGEOUT_CLUSTER 16
89 #define SWB_NPAGES MAX_PAGEOUT_CLUSTER
93 #include <vm/vm_object.h>
94 #include <vm/vm_page.h>
95 #include <vm/vm_pager.h>
96 #include <vm/vm_pageout.h>
97 #include <vm/swap_pager.h>
98 #include <vm/vm_extern.h>
99 #include <vm/vm_zone.h>
101 #define SWM_FREE 0x02 /* free, period */
102 #define SWM_POP 0x04 /* pop out */
105 * vm_swap_size is in page-sized chunks now. It was DEV_BSIZE'd chunks
109 extern int vm_swap_size; /* number of free swap blocks, in pages */
111 int swap_pager_full; /* swap space exhaustion (task killing) */
112 static int swap_pager_almost_full; /* swap space exhaustion (w/ hysteresis)*/
113 static int nsw_rcount; /* free read buffers */
114 static int nsw_wcount_sync; /* limit write buffers / synchronous */
115 static int nsw_wcount_async; /* limit write buffers / asynchronous */
116 static int nsw_wcount_async_max;/* assigned maximum */
117 static int nsw_cluster_max; /* maximum VOP I/O allowed */
118 static int sw_alloc_interlock; /* swap pager allocation interlock */
120 struct blist *swapblist;
121 static struct swblock **swhash;
122 static int swhash_mask;
123 static int swap_async_max = 4; /* maximum in-progress async I/O's */
125 extern struct vnode *swapdev_vp; /* from vm_swap.c */
127 SYSCTL_INT(_vm, OID_AUTO, swap_async_max,
128 CTLFLAG_RW, &swap_async_max, 0, "Maximum running async swap ops");
131 * "named" and "unnamed" anon region objects. Try to reduce the overhead
132 * of searching a named list by hashing it just a little.
137 #define NOBJLIST(handle) \
138 (&swap_pager_object_list[((int)(intptr_t)handle >> 4) & (NOBJLISTS-1)])
140 static struct pagerlst swap_pager_object_list[NOBJLISTS];
141 struct pagerlst swap_pager_un_object_list;
145 * pagerops for OBJT_SWAP - "swap pager". Some ops are also global procedure
146 * calls hooked from other parts of the VM system and do not appear here.
147 * (see vm/swap_pager.h).
151 swap_pager_alloc __P((void *handle, vm_ooffset_t size,
152 vm_prot_t prot, vm_ooffset_t offset));
153 static void swap_pager_dealloc __P((vm_object_t object));
154 static int swap_pager_getpages __P((vm_object_t, vm_page_t *, int, int));
155 static void swap_pager_init __P((void));
156 static void swap_pager_unswapped __P((vm_page_t));
157 static void swap_pager_strategy __P((vm_object_t, struct buf *));
159 struct pagerops swappagerops = {
160 swap_pager_init, /* early system initialization of pager */
161 swap_pager_alloc, /* allocate an OBJT_SWAP object */
162 swap_pager_dealloc, /* deallocate an OBJT_SWAP object */
163 swap_pager_getpages, /* pagein */
164 swap_pager_putpages, /* pageout */
165 swap_pager_haspage, /* get backing store status for page */
166 swap_pager_unswapped, /* remove swap related to page */
167 swap_pager_strategy /* pager strategy call */
171 * dmmax is in page-sized chunks with the new swap system. It was
172 * dev-bsized chunks in the old. dmmax is always a power of 2.
174 * swap_*() routines are externally accessible. swp_*() routines are
179 static int dmmax_mask;
180 int nswap_lowat = 128; /* in pages, swap_pager_almost_full warn */
181 int nswap_hiwat = 512; /* in pages, swap_pager_almost_full warn */
183 static __inline void swp_sizecheck __P((void));
184 static void swp_pager_sync_iodone __P((struct buf *bp));
185 static void swp_pager_async_iodone __P((struct buf *bp));
188 * Swap bitmap functions
191 static __inline void swp_pager_freeswapspace __P((daddr_t blk, int npages));
192 static __inline daddr_t swp_pager_getswapspace __P((int npages));
198 static void swp_pager_meta_build __P((vm_object_t, vm_pindex_t, daddr_t));
199 static void swp_pager_meta_free __P((vm_object_t, vm_pindex_t, daddr_t));
200 static void swp_pager_meta_free_all __P((vm_object_t));
201 static daddr_t swp_pager_meta_ctl __P((vm_object_t, vm_pindex_t, int));
204 * SWP_SIZECHECK() - update swap_pager_full indication
206 * update the swap_pager_almost_full indication and warn when we are
207 * about to run out of swap space, using lowat/hiwat hysteresis.
209 * Clear swap_pager_full ( task killing ) indication when lowat is met.
211 * No restrictions on call
212 * This routine may not block.
213 * This routine must be called at splvm()
219 if (vm_swap_size < nswap_lowat) {
220 if (swap_pager_almost_full == 0) {
221 printf("swap_pager: out of swap space\n");
222 swap_pager_almost_full = 1;
226 if (vm_swap_size > nswap_hiwat)
227 swap_pager_almost_full = 0;
232 * SWAP_PAGER_INIT() - initialize the swap pager!
234 * Expected to be started from system init. NOTE: This code is run
235 * before much else so be careful what you depend on. Most of the VM
236 * system has yet to be initialized at this point.
243 * Initialize object lists
247 for (i = 0; i < NOBJLISTS; ++i)
248 TAILQ_INIT(&swap_pager_object_list[i]);
249 TAILQ_INIT(&swap_pager_un_object_list);
252 * Device Stripe, in PAGE_SIZE'd blocks
255 dmmax = SWB_NPAGES * 2;
256 dmmax_mask = ~(dmmax - 1);
260 * SWAP_PAGER_SWAP_INIT() - swap pager initialization from pageout process
262 * Expected to be started from pageout process once, prior to entering
267 swap_pager_swap_init()
272 * Number of in-transit swap bp operations. Don't
273 * exhaust the pbufs completely. Make sure we
274 * initialize workable values (0 will work for hysteresis
275 * but it isn't very efficient).
277 * The nsw_cluster_max is constrained by the bp->b_pages[]
278 * array (MAXPHYS/PAGE_SIZE) and our locally defined
279 * MAX_PAGEOUT_CLUSTER. Also be aware that swap ops are
280 * constrained by the swap device interleave stripe size.
282 * Currently we hardwire nsw_wcount_async to 4. This limit is
283 * designed to prevent other I/O from having high latencies due to
284 * our pageout I/O. The value 4 works well for one or two active swap
285 * devices but is probably a little low if you have more. Even so,
286 * a higher value would probably generate only a limited improvement
287 * with three or four active swap devices since the system does not
288 * typically have to pageout at extreme bandwidths. We will want
289 * at least 2 per swap devices, and 4 is a pretty good value if you
290 * have one NFS swap device due to the command/ack latency over NFS.
291 * So it all works out pretty well.
294 nsw_cluster_max = min((MAXPHYS/PAGE_SIZE), MAX_PAGEOUT_CLUSTER);
296 nsw_rcount = (nswbuf + 1) / 2;
297 nsw_wcount_sync = (nswbuf + 3) / 4;
298 nsw_wcount_async = 4;
299 nsw_wcount_async_max = nsw_wcount_async;
302 * Initialize our zone. Right now I'm just guessing on the number
303 * we need based on the number of pages in the system. Each swblock
304 * can hold 16 pages, so this is probably overkill. This reservation
305 * is typically limited to around 32MB by default.
307 n = cnt.v_page_count / 2;
308 if (maxswzone && n > maxswzone / sizeof(struct swblock))
309 n = maxswzone / sizeof(struct swblock);
315 sizeof(struct swblock),
319 if (swap_zone != NULL)
322 * if the allocation failed, try a zone two thirds the
323 * size of the previous attempt.
328 if (swap_zone == NULL)
329 panic("swap_pager_swap_init: swap_zone == NULL");
331 printf("Swap zone entries reduced from %d to %d.\n", n2, n);
335 * Initialize our meta-data hash table. The swapper does not need to
336 * be quite as efficient as the VM system, so we do not use an
337 * oversized hash table.
339 * n: size of hash table, must be power of 2
340 * swhash_mask: hash table index mask
343 for (n = 1; n < n2 / 8; n *= 2)
346 swhash = malloc(sizeof(struct swblock *) * n, M_VMPGDATA, M_WAITOK);
347 bzero(swhash, sizeof(struct swblock *) * n);
353 * SWAP_PAGER_ALLOC() - allocate a new OBJT_SWAP VM object and instantiate
354 * its metadata structures.
356 * This routine is called from the mmap and fork code to create a new
357 * OBJT_SWAP object. We do this by creating an OBJT_DEFAULT object
358 * and then converting it with swp_pager_meta_build().
360 * This routine may block in vm_object_allocate() and create a named
361 * object lookup race, so we must interlock. We must also run at
362 * splvm() for the object lookup to handle races with interrupts, but
363 * we do not have to maintain splvm() in between the lookup and the
364 * add because (I believe) it is not possible to attempt to create
365 * a new swap object w/handle when a default object with that handle
370 swap_pager_alloc(void *handle, vm_ooffset_t size, vm_prot_t prot,
377 * Reference existing named region or allocate new one. There
378 * should not be a race here against swp_pager_meta_build()
379 * as called from vm_page_remove() in regards to the lookup
383 while (sw_alloc_interlock) {
384 sw_alloc_interlock = -1;
385 tsleep(&sw_alloc_interlock, PVM, "swpalc", 0);
387 sw_alloc_interlock = 1;
389 object = vm_pager_object_lookup(NOBJLIST(handle), handle);
391 if (object != NULL) {
392 vm_object_reference(object);
394 object = vm_object_allocate(OBJT_DEFAULT,
395 OFF_TO_IDX(offset + PAGE_MASK + size));
396 object->handle = handle;
398 swp_pager_meta_build(object, 0, SWAPBLK_NONE);
401 if (sw_alloc_interlock < 0)
402 wakeup(&sw_alloc_interlock);
404 sw_alloc_interlock = 0;
406 object = vm_object_allocate(OBJT_DEFAULT,
407 OFF_TO_IDX(offset + PAGE_MASK + size));
409 swp_pager_meta_build(object, 0, SWAPBLK_NONE);
416 * SWAP_PAGER_DEALLOC() - remove swap metadata from object
418 * The swap backing for the object is destroyed. The code is
419 * designed such that we can reinstantiate it later, but this
420 * routine is typically called only when the entire object is
421 * about to be destroyed.
423 * This routine may block, but no longer does.
425 * The object must be locked or unreferenceable.
429 swap_pager_dealloc(object)
435 * Remove from list right away so lookups will fail if we block for
436 * pageout completion.
439 if (object->handle == NULL) {
440 TAILQ_REMOVE(&swap_pager_un_object_list, object, pager_object_list);
442 TAILQ_REMOVE(NOBJLIST(object->handle), object, pager_object_list);
445 vm_object_pip_wait(object, "swpdea");
448 * Free all remaining metadata. We only bother to free it from
449 * the swap meta data. We do not attempt to free swapblk's still
450 * associated with vm_page_t's for this object. We do not care
451 * if paging is still in progress on some objects.
454 swp_pager_meta_free_all(object);
458 /************************************************************************
459 * SWAP PAGER BITMAP ROUTINES *
460 ************************************************************************/
463 * SWP_PAGER_GETSWAPSPACE() - allocate raw swap space
465 * Allocate swap for the requested number of pages. The starting
466 * swap block number (a page index) is returned or SWAPBLK_NONE
467 * if the allocation failed.
469 * Also has the side effect of advising that somebody made a mistake
470 * when they configured swap and didn't configure enough.
472 * Must be called at splvm() to avoid races with bitmap frees from
473 * vm_page_remove() aka swap_pager_page_removed().
475 * This routine may not block
476 * This routine must be called at splvm().
479 static __inline daddr_t
480 swp_pager_getswapspace(npages)
485 if ((blk = blist_alloc(swapblist, npages)) == SWAPBLK_NONE) {
486 if (swap_pager_full != 2) {
487 printf("swap_pager_getswapspace: failed\n");
489 swap_pager_almost_full = 1;
492 vm_swap_size -= npages;
499 * SWP_PAGER_FREESWAPSPACE() - free raw swap space
501 * This routine returns the specified swap blocks back to the bitmap.
503 * Note: This routine may not block (it could in the old swap code),
504 * and through the use of the new blist routines it does not block.
506 * We must be called at splvm() to avoid races with bitmap frees from
507 * vm_page_remove() aka swap_pager_page_removed().
509 * This routine may not block
510 * This routine must be called at splvm().
514 swp_pager_freeswapspace(blk, npages)
518 blist_free(swapblist, blk, npages);
519 vm_swap_size += npages;
524 * SWAP_PAGER_FREESPACE() - frees swap blocks associated with a page
525 * range within an object.
527 * This is a globally accessible routine.
529 * This routine removes swapblk assignments from swap metadata.
531 * The external callers of this routine typically have already destroyed
532 * or renamed vm_page_t's associated with this range in the object so
535 * This routine may be called at any spl. We up our spl to splvm temporarily
536 * in order to perform the metadata removal.
540 swap_pager_freespace(object, start, size)
546 swp_pager_meta_free(object, start, size);
551 * SWAP_PAGER_RESERVE() - reserve swap blocks in object
553 * Assigns swap blocks to the specified range within the object. The
554 * swap blocks are not zerod. Any previous swap assignment is destroyed.
556 * Returns 0 on success, -1 on failure.
560 swap_pager_reserve(vm_object_t object, vm_pindex_t start, vm_size_t size)
564 daddr_t blk = SWAPBLK_NONE;
565 vm_pindex_t beg = start; /* save start index */
571 while ((blk = swp_pager_getswapspace(n)) == SWAPBLK_NONE) {
574 swp_pager_meta_free(object, beg, start - beg);
580 swp_pager_meta_build(object, start, blk);
586 swp_pager_meta_free(object, start, n);
592 * SWAP_PAGER_COPY() - copy blocks from source pager to destination pager
593 * and destroy the source.
595 * Copy any valid swapblks from the source to the destination. In
596 * cases where both the source and destination have a valid swapblk,
597 * we keep the destination's.
599 * This routine is allowed to block. It may block allocating metadata
600 * indirectly through swp_pager_meta_build() or if paging is still in
601 * progress on the source.
603 * This routine can be called at any spl
605 * XXX vm_page_collapse() kinda expects us not to block because we
606 * supposedly do not need to allocate memory, but for the moment we
607 * *may* have to get a little memory from the zone allocator, but
608 * it is taken from the interrupt memory. We should be ok.
610 * The source object contains no vm_page_t's (which is just as well)
612 * The source object is of type OBJT_SWAP.
614 * The source and destination objects must be locked or
615 * inaccessible (XXX are they ?)
619 swap_pager_copy(srcobject, dstobject, offset, destroysource)
620 vm_object_t srcobject;
621 vm_object_t dstobject;
631 * If destroysource is set, we remove the source object from the
632 * swap_pager internal queue now.
636 if (srcobject->handle == NULL) {
638 &swap_pager_un_object_list,
644 NOBJLIST(srcobject->handle),
652 * transfer source to destination.
655 for (i = 0; i < dstobject->size; ++i) {
659 * Locate (without changing) the swapblk on the destination,
660 * unless it is invalid in which case free it silently, or
661 * if the destination is a resident page, in which case the
662 * source is thrown away.
665 dstaddr = swp_pager_meta_ctl(dstobject, i, 0);
667 if (dstaddr == SWAPBLK_NONE) {
669 * Destination has no swapblk and is not resident,
674 srcaddr = swp_pager_meta_ctl(
680 if (srcaddr != SWAPBLK_NONE)
681 swp_pager_meta_build(dstobject, i, srcaddr);
684 * Destination has valid swapblk or it is represented
685 * by a resident page. We destroy the sourceblock.
688 swp_pager_meta_ctl(srcobject, i + offset, SWM_FREE);
693 * Free left over swap blocks in source.
695 * We have to revert the type to OBJT_DEFAULT so we do not accidently
696 * double-remove the object from the swap queues.
700 swp_pager_meta_free_all(srcobject);
702 * Reverting the type is not necessary, the caller is going
703 * to destroy srcobject directly, but I'm doing it here
704 * for consistency since we've removed the object from its
707 srcobject->type = OBJT_DEFAULT;
713 * SWAP_PAGER_HASPAGE() - determine if we have good backing store for
714 * the requested page.
716 * We determine whether good backing store exists for the requested
717 * page and return TRUE if it does, FALSE if it doesn't.
719 * If TRUE, we also try to determine how much valid, contiguous backing
720 * store exists before and after the requested page within a reasonable
721 * distance. We do not try to restrict it to the swap device stripe
722 * (that is handled in getpages/putpages). It probably isn't worth
727 swap_pager_haspage(object, pindex, before, after)
737 * do we have good backing store at the requested index ?
741 blk0 = swp_pager_meta_ctl(object, pindex, 0);
743 if (blk0 == SWAPBLK_NONE) {
753 * find backwards-looking contiguous good backing store
756 if (before != NULL) {
759 for (i = 1; i < (SWB_NPAGES/2); ++i) {
764 blk = swp_pager_meta_ctl(object, pindex - i, 0);
772 * find forward-looking contiguous good backing store
778 for (i = 1; i < (SWB_NPAGES/2); ++i) {
781 blk = swp_pager_meta_ctl(object, pindex + i, 0);
792 * SWAP_PAGER_PAGE_UNSWAPPED() - remove swap backing store related to page
794 * This removes any associated swap backing store, whether valid or
795 * not, from the page.
797 * This routine is typically called when a page is made dirty, at
798 * which point any associated swap can be freed. MADV_FREE also
799 * calls us in a special-case situation
801 * NOTE!!! If the page is clean and the swap was valid, the caller
802 * should make the page dirty before calling this routine. This routine
803 * does NOT change the m->dirty status of the page. Also: MADV_FREE
806 * This routine may not block
807 * This routine must be called at splvm()
811 swap_pager_unswapped(m)
814 swp_pager_meta_ctl(m->object, m->pindex, SWM_FREE);
818 * SWAP_PAGER_STRATEGY() - read, write, free blocks
820 * This implements the vm_pager_strategy() interface to swap and allows
821 * other parts of the system to directly access swap as backing store
822 * through vm_objects of type OBJT_SWAP. This is intended to be a
823 * cacheless interface ( i.e. caching occurs at higher levels ).
824 * Therefore we do not maintain any resident pages. All I/O goes
825 * directly to and from the swap device.
827 * Note that b_blkno is scaled for PAGE_SIZE
829 * We currently attempt to run I/O synchronously or asynchronously as
830 * the caller requests. This isn't perfect because we loose error
831 * sequencing when we run multiple ops in parallel to satisfy a request.
832 * But this is swap, so we let it all hang out.
836 swap_pager_strategy(vm_object_t object, struct buf *bp)
842 struct buf *nbp = NULL;
844 if (bp->b_bcount & PAGE_MASK) {
845 bp->b_error = EINVAL;
846 bp->b_flags |= B_ERROR | B_INVAL;
848 printf("swap_pager_strategy: bp %p b_vp %p blk %d size %d, not page bounded\n", bp, bp->b_vp, (int)bp->b_pblkno, (int)bp->b_bcount);
853 * Clear error indication, initialize page index, count, data pointer.
857 bp->b_flags &= ~B_ERROR;
858 bp->b_resid = bp->b_bcount;
860 start = bp->b_pblkno;
861 count = howmany(bp->b_bcount, PAGE_SIZE);
867 * Deal with B_FREEBUF
870 if (bp->b_flags & B_FREEBUF) {
872 * FREE PAGE(s) - destroy underlying swap that is no longer
875 swp_pager_meta_free(object, start, count);
883 * Execute read or write
890 * Obtain block. If block not found and writing, allocate a
891 * new block and build it into the object.
894 blk = swp_pager_meta_ctl(object, start, 0);
895 if ((blk == SWAPBLK_NONE) && (bp->b_flags & B_READ) == 0) {
896 blk = swp_pager_getswapspace(1);
897 if (blk == SWAPBLK_NONE) {
898 bp->b_error = ENOMEM;
899 bp->b_flags |= B_ERROR;
902 swp_pager_meta_build(object, start, blk);
906 * Do we have to flush our current collection? Yes if:
908 * - no swap block at this index
909 * - swap block is not contiguous
910 * - we cross a physical disk boundry in the
915 nbp && (nbp->b_blkno + btoc(nbp->b_bcount) != blk ||
916 ((nbp->b_blkno ^ blk) & dmmax_mask)
920 if (bp->b_flags & B_READ) {
922 cnt.v_swappgsin += btoc(nbp->b_bcount);
925 cnt.v_swappgsout += btoc(nbp->b_bcount);
926 nbp->b_dirtyend = nbp->b_bcount;
934 * Add new swapblk to nbp, instantiating nbp if necessary.
935 * Zero-fill reads are able to take a shortcut.
938 if (blk == SWAPBLK_NONE) {
940 * We can only get here if we are reading. Since
941 * we are at splvm() we can safely modify b_resid,
942 * even if chain ops are in progress.
944 bzero(data, PAGE_SIZE);
945 bp->b_resid -= PAGE_SIZE;
948 nbp = getchainbuf(bp, swapdev_vp, (bp->b_flags & B_READ) | B_ASYNC);
953 nbp->b_bcount += PAGE_SIZE;
961 * Flush out last buffer
967 if ((bp->b_flags & B_ASYNC) == 0)
968 nbp->b_flags &= ~B_ASYNC;
969 if (nbp->b_flags & B_READ) {
971 cnt.v_swappgsin += btoc(nbp->b_bcount);
974 cnt.v_swappgsout += btoc(nbp->b_bcount);
975 nbp->b_dirtyend = nbp->b_bcount;
982 * Wait for completion.
985 if (bp->b_flags & B_ASYNC) {
988 waitchainbuf(bp, 0, 1);
993 * SWAP_PAGER_GETPAGES() - bring pages in from swap
995 * Attempt to retrieve (m, count) pages from backing store, but make
996 * sure we retrieve at least m[reqpage]. We try to load in as large
997 * a chunk surrounding m[reqpage] as is contiguous in swap and which
998 * belongs to the same object.
1000 * The code is designed for asynchronous operation and
1001 * immediate-notification of 'reqpage' but tends not to be
1002 * used that way. Please do not optimize-out this algorithmic
1003 * feature, I intend to improve on it in the future.
1005 * The parent has a single vm_object_pip_add() reference prior to
1006 * calling us and we should return with the same.
1008 * The parent has BUSY'd the pages. We should return with 'm'
1009 * left busy, but the others adjusted.
1013 swap_pager_getpages(object, m, count, reqpage)
1025 vm_pindex_t lastpindex;
1029 if (mreq->object != object) {
1030 panic("swap_pager_getpages: object mismatch %p/%p",
1036 * Calculate range to retrieve. The pages have already been assigned
1037 * their swapblks. We require a *contiguous* range that falls entirely
1038 * within a single device stripe. If we do not supply it, bad things
1039 * happen. Note that blk, iblk & jblk can be SWAPBLK_NONE, but the
1040 * loops are set up such that the case(s) are handled implicitly.
1042 * The swp_*() calls must be made at splvm(). vm_page_free() does
1043 * not need to be, but it will go a little faster if it is.
1047 blk = swp_pager_meta_ctl(mreq->object, mreq->pindex, 0);
1049 for (i = reqpage - 1; i >= 0; --i) {
1052 iblk = swp_pager_meta_ctl(m[i]->object, m[i]->pindex, 0);
1053 if (blk != iblk + (reqpage - i))
1055 if ((blk ^ iblk) & dmmax_mask)
1060 for (j = reqpage + 1; j < count; ++j) {
1063 jblk = swp_pager_meta_ctl(m[j]->object, m[j]->pindex, 0);
1064 if (blk != jblk - (j - reqpage))
1066 if ((blk ^ jblk) & dmmax_mask)
1071 * free pages outside our collection range. Note: we never free
1072 * mreq, it must remain busy throughout.
1078 for (k = 0; k < i; ++k)
1080 for (k = j; k < count; ++k)
1087 * Return VM_PAGER_FAIL if we have nothing to do. Return mreq
1088 * still busy, but the others unbusied.
1091 if (blk == SWAPBLK_NONE)
1092 return(VM_PAGER_FAIL);
1095 * Get a swap buffer header to perform the IO
1098 bp = getpbuf(&nsw_rcount);
1099 kva = (vm_offset_t) bp->b_data;
1102 * map our page(s) into kva for input
1104 * NOTE: B_PAGING is set by pbgetvp()
1107 pmap_qenter(kva, m + i, j - i);
1109 bp->b_flags = B_READ | B_CALL;
1110 bp->b_iodone = swp_pager_async_iodone;
1111 bp->b_rcred = bp->b_wcred = proc0.p_ucred;
1112 bp->b_data = (caddr_t) kva;
1113 crhold(bp->b_rcred);
1114 crhold(bp->b_wcred);
1115 bp->b_blkno = blk - (reqpage - i);
1116 bp->b_bcount = PAGE_SIZE * (j - i);
1117 bp->b_bufsize = PAGE_SIZE * (j - i);
1118 bp->b_pager.pg_reqpage = reqpage - i;
1123 for (k = i; k < j; ++k) {
1124 bp->b_pages[k - i] = m[k];
1125 vm_page_flag_set(m[k], PG_SWAPINPROG);
1128 bp->b_npages = j - i;
1130 pbgetvp(swapdev_vp, bp);
1133 cnt.v_swappgsin += bp->b_npages;
1136 * We still hold the lock on mreq, and our automatic completion routine
1137 * does not remove it.
1140 vm_object_pip_add(mreq->object, bp->b_npages);
1141 lastpindex = m[j-1]->pindex;
1144 * perform the I/O. NOTE!!! bp cannot be considered valid after
1145 * this point because we automatically release it on completion.
1146 * Instead, we look at the one page we are interested in which we
1147 * still hold a lock on even through the I/O completion.
1149 * The other pages in our m[] array are also released on completion,
1150 * so we cannot assume they are valid anymore either.
1152 * NOTE: b_blkno is destroyed by the call to VOP_STRATEGY
1156 VOP_STRATEGY(bp->b_vp, bp);
1159 * wait for the page we want to complete. PG_SWAPINPROG is always
1160 * cleared on completion. If an I/O error occurs, SWAPBLK_NONE
1161 * is set in the meta-data.
1166 while ((mreq->flags & PG_SWAPINPROG) != 0) {
1167 vm_page_flag_set(mreq, PG_WANTED | PG_REFERENCED);
1169 if (tsleep(mreq, PSWP, "swread", hz*20)) {
1171 "swap_pager: indefinite wait buffer: device:"
1172 " %s, blkno: %ld, size: %ld\n",
1173 devtoname(bp->b_dev), (long)bp->b_blkno,
1182 * mreq is left bussied after completion, but all the other pages
1183 * are freed. If we had an unrecoverable read error the page will
1187 if (mreq->valid != VM_PAGE_BITS_ALL) {
1188 return(VM_PAGER_ERROR);
1190 return(VM_PAGER_OK);
1194 * A final note: in a low swap situation, we cannot deallocate swap
1195 * and mark a page dirty here because the caller is likely to mark
1196 * the page clean when we return, causing the page to possibly revert
1197 * to all-zero's later.
1202 * swap_pager_putpages:
1204 * Assign swap (if necessary) and initiate I/O on the specified pages.
1206 * We support both OBJT_DEFAULT and OBJT_SWAP objects. DEFAULT objects
1207 * are automatically converted to SWAP objects.
1209 * In a low memory situation we may block in VOP_STRATEGY(), but the new
1210 * vm_page reservation system coupled with properly written VFS devices
1211 * should ensure that no low-memory deadlock occurs. This is an area
1214 * The parent has N vm_object_pip_add() references prior to
1215 * calling us and will remove references for rtvals[] that are
1216 * not set to VM_PAGER_PEND. We need to remove the rest on I/O
1219 * The parent has soft-busy'd the pages it passes us and will unbusy
1220 * those whos rtvals[] entry is not set to VM_PAGER_PEND on return.
1221 * We need to unbusy the rest on I/O completion.
1225 swap_pager_putpages(object, m, count, sync, rtvals)
1235 if (count && m[0]->object != object) {
1236 panic("swap_pager_getpages: object mismatch %p/%p",
1244 * Turn object into OBJT_SWAP
1245 * check for bogus sysops
1246 * force sync if not pageout process
1249 if (object->type != OBJT_SWAP)
1250 swp_pager_meta_build(object, 0, SWAPBLK_NONE);
1252 if (curproc != pageproc)
1258 * Update nsw parameters from swap_async_max sysctl values.
1259 * Do not let the sysop crash the machine with bogus numbers.
1262 if (swap_async_max != nsw_wcount_async_max) {
1269 if ((n = swap_async_max) > nswbuf / 2)
1276 * Adjust difference ( if possible ). If the current async
1277 * count is too low, we may not be able to make the adjustment
1281 n -= nsw_wcount_async_max;
1282 if (nsw_wcount_async + n >= 0) {
1283 nsw_wcount_async += n;
1284 nsw_wcount_async_max += n;
1285 wakeup(&nsw_wcount_async);
1293 * Assign swap blocks and issue I/O. We reallocate swap on the fly.
1294 * The page is left dirty until the pageout operation completes
1298 for (i = 0; i < count; i += n) {
1305 * Maximum I/O size is limited by a number of factors.
1308 n = min(BLIST_MAX_ALLOC, count - i);
1309 n = min(n, nsw_cluster_max);
1314 * Get biggest block of swap we can. If we fail, fall
1315 * back and try to allocate a smaller block. Don't go
1316 * overboard trying to allocate space if it would overly
1320 (blk = swp_pager_getswapspace(n)) == SWAPBLK_NONE &&
1325 if (blk == SWAPBLK_NONE) {
1326 for (j = 0; j < n; ++j)
1327 rtvals[i+j] = VM_PAGER_FAIL;
1333 * The I/O we are constructing cannot cross a physical
1334 * disk boundry in the swap stripe. Note: we are still
1337 if ((blk ^ (blk + n)) & dmmax_mask) {
1338 j = ((blk + dmmax) & dmmax_mask) - blk;
1339 swp_pager_freeswapspace(blk + j, n - j);
1344 * All I/O parameters have been satisfied, build the I/O
1345 * request and assign the swap space.
1347 * NOTE: B_PAGING is set by pbgetvp()
1351 bp = getpbuf(&nsw_wcount_sync);
1352 bp->b_flags = B_CALL;
1354 bp = getpbuf(&nsw_wcount_async);
1355 bp->b_flags = B_CALL | B_ASYNC;
1357 bp->b_spc = NULL; /* not used, but NULL-out anyway */
1359 pmap_qenter((vm_offset_t)bp->b_data, &m[i], n);
1361 bp->b_rcred = bp->b_wcred = proc0.p_ucred;
1362 bp->b_bcount = PAGE_SIZE * n;
1363 bp->b_bufsize = PAGE_SIZE * n;
1366 crhold(bp->b_rcred);
1367 crhold(bp->b_wcred);
1369 pbgetvp(swapdev_vp, bp);
1371 for (j = 0; j < n; ++j) {
1372 vm_page_t mreq = m[i+j];
1374 swp_pager_meta_build(
1379 vm_page_dirty(mreq);
1380 rtvals[i+j] = VM_PAGER_OK;
1382 vm_page_flag_set(mreq, PG_SWAPINPROG);
1383 bp->b_pages[j] = mreq;
1387 * Must set dirty range for NFS to work.
1390 bp->b_dirtyend = bp->b_bcount;
1393 cnt.v_swappgsout += bp->b_npages;
1394 swapdev_vp->v_numoutput++;
1401 * NOTE: b_blkno is destroyed by the call to VOP_STRATEGY
1404 if (sync == FALSE) {
1405 bp->b_iodone = swp_pager_async_iodone;
1407 VOP_STRATEGY(bp->b_vp, bp);
1409 for (j = 0; j < n; ++j)
1410 rtvals[i+j] = VM_PAGER_PEND;
1417 * NOTE: b_blkno is destroyed by the call to VOP_STRATEGY
1420 bp->b_iodone = swp_pager_sync_iodone;
1421 VOP_STRATEGY(bp->b_vp, bp);
1424 * Wait for the sync I/O to complete, then update rtvals.
1425 * We just set the rtvals[] to VM_PAGER_PEND so we can call
1426 * our async completion routine at the end, thus avoiding a
1431 while ((bp->b_flags & B_DONE) == 0) {
1432 tsleep(bp, PVM, "swwrt", 0);
1435 for (j = 0; j < n; ++j)
1436 rtvals[i+j] = VM_PAGER_PEND;
1439 * Now that we are through with the bp, we can call the
1440 * normal async completion, which frees everything up.
1443 swp_pager_async_iodone(bp);
1450 * swap_pager_sync_iodone:
1452 * Completion routine for synchronous reads and writes from/to swap.
1453 * We just mark the bp is complete and wake up anyone waiting on it.
1455 * This routine may not block. This routine is called at splbio() or better.
1459 swp_pager_sync_iodone(bp)
1462 bp->b_flags |= B_DONE;
1463 bp->b_flags &= ~B_ASYNC;
1468 * swp_pager_async_iodone:
1470 * Completion routine for asynchronous reads and writes from/to swap.
1471 * Also called manually by synchronous code to finish up a bp.
1473 * For READ operations, the pages are PG_BUSY'd. For WRITE operations,
1474 * the pages are vm_page_t->busy'd. For READ operations, we PG_BUSY
1475 * unbusy all pages except the 'main' request page. For WRITE
1476 * operations, we vm_page_t->busy'd unbusy all pages ( we can do this
1477 * because we marked them all VM_PAGER_PEND on return from putpages ).
1479 * This routine may not block.
1480 * This routine is called at splbio() or better
1482 * We up ourselves to splvm() as required for various vm_page related
1487 swp_pager_async_iodone(bp)
1488 register struct buf *bp;
1492 vm_object_t object = NULL;
1494 bp->b_flags |= B_DONE;
1500 if (bp->b_flags & B_ERROR) {
1502 "swap_pager: I/O error - %s failed; blkno %ld,"
1503 "size %ld, error %d\n",
1504 ((bp->b_flags & B_READ) ? "pagein" : "pageout"),
1512 * set object, raise to splvm().
1516 object = bp->b_pages[0]->object;
1520 * remove the mapping for kernel virtual
1523 pmap_qremove((vm_offset_t)bp->b_data, bp->b_npages);
1526 * cleanup pages. If an error occurs writing to swap, we are in
1527 * very serious trouble. If it happens to be a disk error, though,
1528 * we may be able to recover by reassigning the swap later on. So
1529 * in this case we remove the m->swapblk assignment for the page
1530 * but do not free it in the rlist. The errornous block(s) are thus
1531 * never reallocated as swap. Redirty the page and continue.
1534 for (i = 0; i < bp->b_npages; ++i) {
1535 vm_page_t m = bp->b_pages[i];
1537 vm_page_flag_clear(m, PG_SWAPINPROG);
1539 if (bp->b_flags & B_ERROR) {
1541 * If an error occurs I'd love to throw the swapblk
1542 * away without freeing it back to swapspace, so it
1543 * can never be used again. But I can't from an
1547 if (bp->b_flags & B_READ) {
1549 * When reading, reqpage needs to stay
1550 * locked for the parent, but all other
1551 * pages can be freed. We still want to
1552 * wakeup the parent waiting on the page,
1553 * though. ( also: pg_reqpage can be -1 and
1554 * not match anything ).
1556 * We have to wake specifically requested pages
1557 * up too because we cleared PG_SWAPINPROG and
1558 * someone may be waiting for that.
1560 * NOTE: for reads, m->dirty will probably
1561 * be overridden by the original caller of
1562 * getpages so don't play cute tricks here.
1564 * XXX IT IS NOT LEGAL TO FREE THE PAGE HERE
1565 * AS THIS MESSES WITH object->memq, and it is
1566 * not legal to mess with object->memq from an
1571 vm_page_flag_clear(m, PG_ZERO);
1573 if (i != bp->b_pager.pg_reqpage)
1578 * If i == bp->b_pager.pg_reqpage, do not wake
1579 * the page up. The caller needs to.
1583 * If a write error occurs, reactivate page
1584 * so it doesn't clog the inactive list,
1585 * then finish the I/O.
1588 vm_page_activate(m);
1589 vm_page_io_finish(m);
1591 } else if (bp->b_flags & B_READ) {
1593 * For read success, clear dirty bits. Nobody should
1594 * have this page mapped but don't take any chances,
1595 * make sure the pmap modify bits are also cleared.
1597 * NOTE: for reads, m->dirty will probably be
1598 * overridden by the original caller of getpages so
1599 * we cannot set them in order to free the underlying
1600 * swap in a low-swap situation. I don't think we'd
1601 * want to do that anyway, but it was an optimization
1602 * that existed in the old swapper for a time before
1603 * it got ripped out due to precisely this problem.
1605 * clear PG_ZERO in page.
1607 * If not the requested page then deactivate it.
1609 * Note that the requested page, reqpage, is left
1610 * busied, but we still have to wake it up. The
1611 * other pages are released (unbusied) by
1612 * vm_page_wakeup(). We do not set reqpage's
1613 * valid bits here, it is up to the caller.
1616 pmap_clear_modify(m);
1617 m->valid = VM_PAGE_BITS_ALL;
1619 vm_page_flag_clear(m, PG_ZERO);
1622 * We have to wake specifically requested pages
1623 * up too because we cleared PG_SWAPINPROG and
1624 * could be waiting for it in getpages. However,
1625 * be sure to not unbusy getpages specifically
1626 * requested page - getpages expects it to be
1629 if (i != bp->b_pager.pg_reqpage) {
1630 vm_page_deactivate(m);
1637 * For write success, clear the modify and dirty
1638 * status, then finish the I/O ( which decrements the
1639 * busy count and possibly wakes waiter's up ).
1641 pmap_clear_modify(m);
1643 vm_page_io_finish(m);
1644 if (!vm_page_count_severe() || !vm_page_try_to_cache(m))
1645 vm_page_protect(m, VM_PROT_READ);
1650 * adjust pip. NOTE: the original parent may still have its own
1651 * pip refs on the object.
1655 vm_object_pip_wakeupn(object, bp->b_npages);
1658 * release the physical I/O buffer
1663 ((bp->b_flags & B_READ) ? &nsw_rcount :
1664 ((bp->b_flags & B_ASYNC) ?
1673 /************************************************************************
1675 ************************************************************************
1677 * These routines manipulate the swap metadata stored in the
1678 * OBJT_SWAP object. All swp_*() routines must be called at
1679 * splvm() because swap can be freed up by the low level vm_page
1680 * code which might be called from interrupts beyond what splbio() covers.
1682 * Swap metadata is implemented with a global hash and not directly
1683 * linked into the object. Instead the object simply contains
1684 * appropriate tracking counters.
1688 * SWP_PAGER_HASH() - hash swap meta data
1690 * This is an inline helper function which hashes the swapblk given
1691 * the object and page index. It returns a pointer to a pointer
1692 * to the object, or a pointer to a NULL pointer if it could not
1695 * This routine must be called at splvm().
1698 static __inline struct swblock **
1699 swp_pager_hash(vm_object_t object, vm_pindex_t index)
1701 struct swblock **pswap;
1702 struct swblock *swap;
1704 index &= ~SWAP_META_MASK;
1705 pswap = &swhash[(index ^ (int)(intptr_t)object) & swhash_mask];
1707 while ((swap = *pswap) != NULL) {
1708 if (swap->swb_object == object &&
1709 swap->swb_index == index
1713 pswap = &swap->swb_hnext;
1719 * SWP_PAGER_META_BUILD() - add swap block to swap meta data for object
1721 * We first convert the object to a swap object if it is a default
1724 * The specified swapblk is added to the object's swap metadata. If
1725 * the swapblk is not valid, it is freed instead. Any previously
1726 * assigned swapblk is freed.
1728 * This routine must be called at splvm(), except when used to convert
1729 * an OBJT_DEFAULT object into an OBJT_SWAP object.
1734 swp_pager_meta_build(
1739 struct swblock *swap;
1740 struct swblock **pswap;
1743 * Convert default object to swap object if necessary
1746 if (object->type != OBJT_SWAP) {
1747 object->type = OBJT_SWAP;
1748 object->un_pager.swp.swp_bcount = 0;
1750 if (object->handle != NULL) {
1752 NOBJLIST(object->handle),
1758 &swap_pager_un_object_list,
1766 * Locate hash entry. If not found create, but if we aren't adding
1767 * anything just return. If we run out of space in the map we wait
1768 * and, since the hash table may have changed, retry.
1772 pswap = swp_pager_hash(object, index);
1774 if ((swap = *pswap) == NULL) {
1777 if (swapblk == SWAPBLK_NONE)
1780 swap = *pswap = zalloc(swap_zone);
1785 swap->swb_hnext = NULL;
1786 swap->swb_object = object;
1787 swap->swb_index = index & ~SWAP_META_MASK;
1788 swap->swb_count = 0;
1790 ++object->un_pager.swp.swp_bcount;
1792 for (i = 0; i < SWAP_META_PAGES; ++i)
1793 swap->swb_pages[i] = SWAPBLK_NONE;
1797 * Delete prior contents of metadata
1800 index &= SWAP_META_MASK;
1802 if (swap->swb_pages[index] != SWAPBLK_NONE) {
1803 swp_pager_freeswapspace(swap->swb_pages[index], 1);
1808 * Enter block into metadata
1811 swap->swb_pages[index] = swapblk;
1812 if (swapblk != SWAPBLK_NONE)
1817 * SWP_PAGER_META_FREE() - free a range of blocks in the object's swap metadata
1819 * The requested range of blocks is freed, with any associated swap
1820 * returned to the swap bitmap.
1822 * This routine will free swap metadata structures as they are cleaned
1823 * out. This routine does *NOT* operate on swap metadata associated
1824 * with resident pages.
1826 * This routine must be called at splvm()
1830 swp_pager_meta_free(vm_object_t object, vm_pindex_t index, daddr_t count)
1832 if (object->type != OBJT_SWAP)
1836 struct swblock **pswap;
1837 struct swblock *swap;
1839 pswap = swp_pager_hash(object, index);
1841 if ((swap = *pswap) != NULL) {
1842 daddr_t v = swap->swb_pages[index & SWAP_META_MASK];
1844 if (v != SWAPBLK_NONE) {
1845 swp_pager_freeswapspace(v, 1);
1846 swap->swb_pages[index & SWAP_META_MASK] =
1848 if (--swap->swb_count == 0) {
1849 *pswap = swap->swb_hnext;
1850 zfree(swap_zone, swap);
1851 --object->un_pager.swp.swp_bcount;
1857 int n = SWAP_META_PAGES - (index & SWAP_META_MASK);
1865 * SWP_PAGER_META_FREE_ALL() - destroy all swap metadata associated with object
1867 * This routine locates and destroys all swap metadata associated with
1870 * This routine must be called at splvm()
1874 swp_pager_meta_free_all(vm_object_t object)
1878 if (object->type != OBJT_SWAP)
1881 while (object->un_pager.swp.swp_bcount) {
1882 struct swblock **pswap;
1883 struct swblock *swap;
1885 pswap = swp_pager_hash(object, index);
1886 if ((swap = *pswap) != NULL) {
1889 for (i = 0; i < SWAP_META_PAGES; ++i) {
1890 daddr_t v = swap->swb_pages[i];
1891 if (v != SWAPBLK_NONE) {
1893 swp_pager_freeswapspace(v, 1);
1896 if (swap->swb_count != 0)
1897 panic("swap_pager_meta_free_all: swb_count != 0");
1898 *pswap = swap->swb_hnext;
1899 zfree(swap_zone, swap);
1900 --object->un_pager.swp.swp_bcount;
1902 index += SWAP_META_PAGES;
1903 if (index > 0x20000000)
1904 panic("swp_pager_meta_free_all: failed to locate all swap meta blocks");
1909 * SWP_PAGER_METACTL() - misc control of swap and vm_page_t meta data.
1911 * This routine is capable of looking up, popping, or freeing
1912 * swapblk assignments in the swap meta data or in the vm_page_t.
1913 * The routine typically returns the swapblk being looked-up, or popped,
1914 * or SWAPBLK_NONE if the block was freed, or SWAPBLK_NONE if the block
1915 * was invalid. This routine will automatically free any invalid
1916 * meta-data swapblks.
1918 * It is not possible to store invalid swapblks in the swap meta data
1919 * (other then a literal 'SWAPBLK_NONE'), so we don't bother checking.
1921 * When acting on a busy resident page and paging is in progress, we
1922 * have to wait until paging is complete but otherwise can act on the
1925 * This routine must be called at splvm().
1927 * SWM_FREE remove and free swap block from metadata
1928 * SWM_POP remove from meta data but do not free.. pop it out
1937 struct swblock **pswap;
1938 struct swblock *swap;
1942 * The meta data only exists of the object is OBJT_SWAP
1943 * and even then might not be allocated yet.
1946 if (object->type != OBJT_SWAP)
1947 return(SWAPBLK_NONE);
1950 pswap = swp_pager_hash(object, index);
1952 if ((swap = *pswap) != NULL) {
1953 index &= SWAP_META_MASK;
1954 r1 = swap->swb_pages[index];
1956 if (r1 != SWAPBLK_NONE) {
1957 if (flags & SWM_FREE) {
1958 swp_pager_freeswapspace(r1, 1);
1961 if (flags & (SWM_FREE|SWM_POP)) {
1962 swap->swb_pages[index] = SWAPBLK_NONE;
1963 if (--swap->swb_count == 0) {
1964 *pswap = swap->swb_hnext;
1965 zfree(swap_zone, swap);
1966 --object->un_pager.swp.swp_bcount;