BUF/BIO work, for removing the requirement of KVA mappings for I/O
[dragonfly.git] / sys / vm / swap_pager.c
CommitLineData
984263bc
MD
1/*
2 * Copyright (c) 1998 Matthew Dillon,
3 * Copyright (c) 1994 John S. Dyson
4 * Copyright (c) 1990 University of Utah.
5 * Copyright (c) 1991, 1993
6 * The Regents of the University of California. All rights reserved.
7 *
8 * This code is derived from software contributed to Berkeley by
9 * the Systems Programming Group of the University of Utah Computer
10 * Science Department.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. All advertising materials mentioning features or use of this software
21 * must display the following acknowledgement:
22 * This product includes software developed by the University of
23 * California, Berkeley and its contributors.
24 * 4. Neither the name of the University nor the names of its contributors
25 * may be used to endorse or promote products derived from this software
26 * without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
29 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
30 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
31 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
32 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
34 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
36 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
37 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38 * SUCH DAMAGE.
39 *
40 * New Swap System
41 * Matthew Dillon
42 *
43 * Radix Bitmap 'blists'.
44 *
45 * - The new swapper uses the new radix bitmap code. This should scale
46 * to arbitrarily small or arbitrarily large swap spaces and an almost
47 * arbitrary degree of fragmentation.
48 *
49 * Features:
50 *
51 * - on the fly reallocation of swap during putpages. The new system
52 * does not try to keep previously allocated swap blocks for dirty
53 * pages.
54 *
55 * - on the fly deallocation of swap
56 *
57 * - No more garbage collection required. Unnecessarily allocated swap
58 * blocks only exist for dirty vm_page_t's now and these are already
59 * cycled (in a high-load system) by the pager. We also do on-the-fly
60 * removal of invalidated swap blocks when a page is destroyed
61 * or renamed.
62 *
63 * from: Utah $Hdr: swap_pager.c 1.4 91/04/30$
64 *
65 * @(#)swap_pager.c 8.9 (Berkeley) 3/21/94
66 *
67 * $FreeBSD: src/sys/vm/swap_pager.c,v 1.130.2.12 2002/08/31 21:15:55 dillon Exp $
54f51aeb 68 * $DragonFly: src/sys/vm/swap_pager.c,v 1.13 2004/07/14 03:10:17 hmp Exp $
984263bc
MD
69 */
70
71#include <sys/param.h>
72#include <sys/systm.h>
73#include <sys/conf.h>
74#include <sys/kernel.h>
75#include <sys/proc.h>
76#include <sys/buf.h>
77#include <sys/vnode.h>
78#include <sys/malloc.h>
79#include <sys/vmmeter.h>
80#include <sys/sysctl.h>
81#include <sys/blist.h>
82#include <sys/lock.h>
83#include <sys/vmmeter.h>
84
85#ifndef MAX_PAGEOUT_CLUSTER
86#define MAX_PAGEOUT_CLUSTER 16
87#endif
88
89#define SWB_NPAGES MAX_PAGEOUT_CLUSTER
90
91#include "opt_swap.h"
92#include <vm/vm.h>
93#include <vm/vm_object.h>
94#include <vm/vm_page.h>
95#include <vm/vm_pager.h>
96#include <vm/vm_pageout.h>
97#include <vm/swap_pager.h>
98#include <vm/vm_extern.h>
99#include <vm/vm_zone.h>
100
3020e3be 101#include <sys/buf2.h>
12e4aaff 102#include <vm/vm_page2.h>
3020e3be 103
984263bc
MD
104#define SWM_FREE 0x02 /* free, period */
105#define SWM_POP 0x04 /* pop out */
106
107/*
108 * vm_swap_size is in page-sized chunks now. It was DEV_BSIZE'd chunks
109 * in the old system.
110 */
111
112extern int vm_swap_size; /* number of free swap blocks, in pages */
113
114int swap_pager_full; /* swap space exhaustion (task killing) */
115static int swap_pager_almost_full; /* swap space exhaustion (w/ hysteresis)*/
116static int nsw_rcount; /* free read buffers */
117static int nsw_wcount_sync; /* limit write buffers / synchronous */
118static int nsw_wcount_async; /* limit write buffers / asynchronous */
119static int nsw_wcount_async_max;/* assigned maximum */
120static int nsw_cluster_max; /* maximum VOP I/O allowed */
121static int sw_alloc_interlock; /* swap pager allocation interlock */
122
123struct blist *swapblist;
124static struct swblock **swhash;
125static int swhash_mask;
126static int swap_async_max = 4; /* maximum in-progress async I/O's */
127
128extern struct vnode *swapdev_vp; /* from vm_swap.c */
129
130SYSCTL_INT(_vm, OID_AUTO, swap_async_max,
131 CTLFLAG_RW, &swap_async_max, 0, "Maximum running async swap ops");
132
133/*
134 * "named" and "unnamed" anon region objects. Try to reduce the overhead
135 * of searching a named list by hashing it just a little.
136 */
137
138#define NOBJLISTS 8
139
140#define NOBJLIST(handle) \
141 (&swap_pager_object_list[((int)(intptr_t)handle >> 4) & (NOBJLISTS-1)])
142
143static struct pagerlst swap_pager_object_list[NOBJLISTS];
144struct pagerlst swap_pager_un_object_list;
145vm_zone_t swap_zone;
146
147/*
148 * pagerops for OBJT_SWAP - "swap pager". Some ops are also global procedure
149 * calls hooked from other parts of the VM system and do not appear here.
150 * (see vm/swap_pager.h).
151 */
152
153static vm_object_t
1388df65
RG
154 swap_pager_alloc (void *handle, vm_ooffset_t size,
155 vm_prot_t prot, vm_ooffset_t offset);
156static void swap_pager_dealloc (vm_object_t object);
157static int swap_pager_getpages (vm_object_t, vm_page_t *, int, int);
158static void swap_pager_init (void);
159static void swap_pager_unswapped (vm_page_t);
160static void swap_pager_strategy (vm_object_t, struct buf *);
984263bc
MD
161
162struct pagerops swappagerops = {
163 swap_pager_init, /* early system initialization of pager */
164 swap_pager_alloc, /* allocate an OBJT_SWAP object */
165 swap_pager_dealloc, /* deallocate an OBJT_SWAP object */
166 swap_pager_getpages, /* pagein */
167 swap_pager_putpages, /* pageout */
168 swap_pager_haspage, /* get backing store status for page */
169 swap_pager_unswapped, /* remove swap related to page */
170 swap_pager_strategy /* pager strategy call */
171};
172
173/*
174 * dmmax is in page-sized chunks with the new swap system. It was
175 * dev-bsized chunks in the old. dmmax is always a power of 2.
176 *
177 * swap_*() routines are externally accessible. swp_*() routines are
178 * internal.
179 */
180
181int dmmax;
182static int dmmax_mask;
183int nswap_lowat = 128; /* in pages, swap_pager_almost_full warn */
184int nswap_hiwat = 512; /* in pages, swap_pager_almost_full warn */
185
1388df65
RG
186static __inline void swp_sizecheck (void);
187static void swp_pager_sync_iodone (struct buf *bp);
188static void swp_pager_async_iodone (struct buf *bp);
984263bc
MD
189
190/*
191 * Swap bitmap functions
192 */
193
1388df65
RG
194static __inline void swp_pager_freeswapspace (daddr_t blk, int npages);
195static __inline daddr_t swp_pager_getswapspace (int npages);
984263bc
MD
196
197/*
198 * Metadata functions
199 */
200
1388df65
RG
201static void swp_pager_meta_build (vm_object_t, vm_pindex_t, daddr_t);
202static void swp_pager_meta_free (vm_object_t, vm_pindex_t, daddr_t);
203static void swp_pager_meta_free_all (vm_object_t);
204static daddr_t swp_pager_meta_ctl (vm_object_t, vm_pindex_t, int);
984263bc
MD
205
206/*
207 * SWP_SIZECHECK() - update swap_pager_full indication
208 *
209 * update the swap_pager_almost_full indication and warn when we are
210 * about to run out of swap space, using lowat/hiwat hysteresis.
211 *
212 * Clear swap_pager_full ( task killing ) indication when lowat is met.
213 *
214 * No restrictions on call
215 * This routine may not block.
216 * This routine must be called at splvm()
217 */
218
219static __inline void
57e43348 220swp_sizecheck(void)
984263bc
MD
221{
222 if (vm_swap_size < nswap_lowat) {
223 if (swap_pager_almost_full == 0) {
224 printf("swap_pager: out of swap space\n");
225 swap_pager_almost_full = 1;
226 }
227 } else {
228 swap_pager_full = 0;
229 if (vm_swap_size > nswap_hiwat)
230 swap_pager_almost_full = 0;
231 }
232}
233
234/*
235 * SWAP_PAGER_INIT() - initialize the swap pager!
236 *
237 * Expected to be started from system init. NOTE: This code is run
238 * before much else so be careful what you depend on. Most of the VM
239 * system has yet to be initialized at this point.
240 */
241
242static void
57e43348 243swap_pager_init(void)
984263bc
MD
244{
245 /*
246 * Initialize object lists
247 */
248 int i;
249
250 for (i = 0; i < NOBJLISTS; ++i)
251 TAILQ_INIT(&swap_pager_object_list[i]);
252 TAILQ_INIT(&swap_pager_un_object_list);
253
254 /*
255 * Device Stripe, in PAGE_SIZE'd blocks
256 */
257
258 dmmax = SWB_NPAGES * 2;
259 dmmax_mask = ~(dmmax - 1);
260}
261
262/*
263 * SWAP_PAGER_SWAP_INIT() - swap pager initialization from pageout process
264 *
265 * Expected to be started from pageout process once, prior to entering
266 * its main loop.
267 */
268
269void
57e43348 270swap_pager_swap_init(void)
984263bc
MD
271{
272 int n, n2;
273
274 /*
275 * Number of in-transit swap bp operations. Don't
276 * exhaust the pbufs completely. Make sure we
277 * initialize workable values (0 will work for hysteresis
278 * but it isn't very efficient).
279 *
54f51aeb
HP
280 * The nsw_cluster_max is constrained by the number of pages an XIO
281 * holds, i.e., (MAXPHYS/PAGE_SIZE) and our locally defined
984263bc
MD
282 * MAX_PAGEOUT_CLUSTER. Also be aware that swap ops are
283 * constrained by the swap device interleave stripe size.
284 *
285 * Currently we hardwire nsw_wcount_async to 4. This limit is
286 * designed to prevent other I/O from having high latencies due to
287 * our pageout I/O. The value 4 works well for one or two active swap
288 * devices but is probably a little low if you have more. Even so,
289 * a higher value would probably generate only a limited improvement
290 * with three or four active swap devices since the system does not
291 * typically have to pageout at extreme bandwidths. We will want
292 * at least 2 per swap devices, and 4 is a pretty good value if you
293 * have one NFS swap device due to the command/ack latency over NFS.
294 * So it all works out pretty well.
295 */
296
297 nsw_cluster_max = min((MAXPHYS/PAGE_SIZE), MAX_PAGEOUT_CLUSTER);
298
299 nsw_rcount = (nswbuf + 1) / 2;
300 nsw_wcount_sync = (nswbuf + 3) / 4;
301 nsw_wcount_async = 4;
302 nsw_wcount_async_max = nsw_wcount_async;
303
304 /*
305 * Initialize our zone. Right now I'm just guessing on the number
306 * we need based on the number of pages in the system. Each swblock
307 * can hold 16 pages, so this is probably overkill. This reservation
308 * is typically limited to around 32MB by default.
309 */
12e4aaff 310 n = vmstats.v_page_count / 2;
984263bc
MD
311 if (maxswzone && n > maxswzone / sizeof(struct swblock))
312 n = maxswzone / sizeof(struct swblock);
313 n2 = n;
314
315 do {
316 swap_zone = zinit(
317 "SWAPMETA",
318 sizeof(struct swblock),
319 n,
320 ZONE_INTERRUPT,
321 1);
322 if (swap_zone != NULL)
323 break;
324 /*
325 * if the allocation failed, try a zone two thirds the
326 * size of the previous attempt.
327 */
328 n -= ((n + 2) / 3);
329 } while (n > 0);
330
331 if (swap_zone == NULL)
332 panic("swap_pager_swap_init: swap_zone == NULL");
333 if (n2 != n)
334 printf("Swap zone entries reduced from %d to %d.\n", n2, n);
335 n2 = n;
336
337 /*
338 * Initialize our meta-data hash table. The swapper does not need to
339 * be quite as efficient as the VM system, so we do not use an
340 * oversized hash table.
341 *
342 * n: size of hash table, must be power of 2
343 * swhash_mask: hash table index mask
344 */
345
346 for (n = 1; n < n2 / 8; n *= 2)
347 ;
348
349 swhash = malloc(sizeof(struct swblock *) * n, M_VMPGDATA, M_WAITOK);
350 bzero(swhash, sizeof(struct swblock *) * n);
351
352 swhash_mask = n - 1;
353}
354
355/*
356 * SWAP_PAGER_ALLOC() - allocate a new OBJT_SWAP VM object and instantiate
357 * its metadata structures.
358 *
359 * This routine is called from the mmap and fork code to create a new
360 * OBJT_SWAP object. We do this by creating an OBJT_DEFAULT object
361 * and then converting it with swp_pager_meta_build().
362 *
363 * This routine may block in vm_object_allocate() and create a named
364 * object lookup race, so we must interlock. We must also run at
365 * splvm() for the object lookup to handle races with interrupts, but
366 * we do not have to maintain splvm() in between the lookup and the
367 * add because (I believe) it is not possible to attempt to create
368 * a new swap object w/handle when a default object with that handle
369 * already exists.
370 */
371
372static vm_object_t
373swap_pager_alloc(void *handle, vm_ooffset_t size, vm_prot_t prot,
374 vm_ooffset_t offset)
375{
376 vm_object_t object;
377
378 if (handle) {
379 /*
380 * Reference existing named region or allocate new one. There
381 * should not be a race here against swp_pager_meta_build()
382 * as called from vm_page_remove() in regards to the lookup
383 * of the handle.
384 */
385
386 while (sw_alloc_interlock) {
387 sw_alloc_interlock = -1;
377d4740 388 tsleep(&sw_alloc_interlock, 0, "swpalc", 0);
984263bc
MD
389 }
390 sw_alloc_interlock = 1;
391
392 object = vm_pager_object_lookup(NOBJLIST(handle), handle);
393
394 if (object != NULL) {
395 vm_object_reference(object);
396 } else {
397 object = vm_object_allocate(OBJT_DEFAULT,
398 OFF_TO_IDX(offset + PAGE_MASK + size));
399 object->handle = handle;
400
401 swp_pager_meta_build(object, 0, SWAPBLK_NONE);
402 }
403
404 if (sw_alloc_interlock < 0)
405 wakeup(&sw_alloc_interlock);
406
407 sw_alloc_interlock = 0;
408 } else {
409 object = vm_object_allocate(OBJT_DEFAULT,
410 OFF_TO_IDX(offset + PAGE_MASK + size));
411
412 swp_pager_meta_build(object, 0, SWAPBLK_NONE);
413 }
414
415 return (object);
416}
417
418/*
419 * SWAP_PAGER_DEALLOC() - remove swap metadata from object
420 *
421 * The swap backing for the object is destroyed. The code is
422 * designed such that we can reinstantiate it later, but this
423 * routine is typically called only when the entire object is
424 * about to be destroyed.
425 *
426 * This routine may block, but no longer does.
427 *
428 * The object must be locked or unreferenceable.
429 */
430
431static void
57e43348 432swap_pager_dealloc(vm_object_t object)
984263bc
MD
433{
434 int s;
435
436 /*
437 * Remove from list right away so lookups will fail if we block for
438 * pageout completion.
439 */
440
441 if (object->handle == NULL) {
442 TAILQ_REMOVE(&swap_pager_un_object_list, object, pager_object_list);
443 } else {
444 TAILQ_REMOVE(NOBJLIST(object->handle), object, pager_object_list);
445 }
446
447 vm_object_pip_wait(object, "swpdea");
448
449 /*
450 * Free all remaining metadata. We only bother to free it from
451 * the swap meta data. We do not attempt to free swapblk's still
452 * associated with vm_page_t's for this object. We do not care
453 * if paging is still in progress on some objects.
454 */
455 s = splvm();
456 swp_pager_meta_free_all(object);
457 splx(s);
458}
459
460/************************************************************************
461 * SWAP PAGER BITMAP ROUTINES *
462 ************************************************************************/
463
464/*
465 * SWP_PAGER_GETSWAPSPACE() - allocate raw swap space
466 *
467 * Allocate swap for the requested number of pages. The starting
468 * swap block number (a page index) is returned or SWAPBLK_NONE
469 * if the allocation failed.
470 *
471 * Also has the side effect of advising that somebody made a mistake
472 * when they configured swap and didn't configure enough.
473 *
474 * Must be called at splvm() to avoid races with bitmap frees from
475 * vm_page_remove() aka swap_pager_page_removed().
476 *
477 * This routine may not block
478 * This routine must be called at splvm().
479 */
480
481static __inline daddr_t
57e43348 482swp_pager_getswapspace(int npages)
984263bc
MD
483{
484 daddr_t blk;
485
486 if ((blk = blist_alloc(swapblist, npages)) == SWAPBLK_NONE) {
487 if (swap_pager_full != 2) {
488 printf("swap_pager_getswapspace: failed\n");
489 swap_pager_full = 2;
490 swap_pager_almost_full = 1;
491 }
492 } else {
493 vm_swap_size -= npages;
494 swp_sizecheck();
495 }
496 return(blk);
497}
498
499/*
500 * SWP_PAGER_FREESWAPSPACE() - free raw swap space
501 *
502 * This routine returns the specified swap blocks back to the bitmap.
503 *
504 * Note: This routine may not block (it could in the old swap code),
505 * and through the use of the new blist routines it does not block.
506 *
507 * We must be called at splvm() to avoid races with bitmap frees from
508 * vm_page_remove() aka swap_pager_page_removed().
509 *
510 * This routine may not block
511 * This routine must be called at splvm().
512 */
513
514static __inline void
57e43348 515swp_pager_freeswapspace(daddr_t blk, int npages)
984263bc
MD
516{
517 blist_free(swapblist, blk, npages);
518 vm_swap_size += npages;
519 swp_sizecheck();
520}
521
522/*
523 * SWAP_PAGER_FREESPACE() - frees swap blocks associated with a page
524 * range within an object.
525 *
526 * This is a globally accessible routine.
527 *
528 * This routine removes swapblk assignments from swap metadata.
529 *
530 * The external callers of this routine typically have already destroyed
531 * or renamed vm_page_t's associated with this range in the object so
532 * we should be ok.
533 *
534 * This routine may be called at any spl. We up our spl to splvm temporarily
535 * in order to perform the metadata removal.
536 */
537
538void
57e43348 539swap_pager_freespace(vm_object_t object, vm_pindex_t start, vm_size_t size)
984263bc
MD
540{
541 int s = splvm();
542 swp_pager_meta_free(object, start, size);
543 splx(s);
544}
545
546/*
547 * SWAP_PAGER_RESERVE() - reserve swap blocks in object
548 *
549 * Assigns swap blocks to the specified range within the object. The
550 * swap blocks are not zerod. Any previous swap assignment is destroyed.
551 *
552 * Returns 0 on success, -1 on failure.
553 */
554
555int
556swap_pager_reserve(vm_object_t object, vm_pindex_t start, vm_size_t size)
557{
558 int s;
559 int n = 0;
560 daddr_t blk = SWAPBLK_NONE;
561 vm_pindex_t beg = start; /* save start index */
562
563 s = splvm();
564 while (size) {
565 if (n == 0) {
566 n = BLIST_MAX_ALLOC;
567 while ((blk = swp_pager_getswapspace(n)) == SWAPBLK_NONE) {
568 n >>= 1;
569 if (n == 0) {
570 swp_pager_meta_free(object, beg, start - beg);
571 splx(s);
572 return(-1);
573 }
574 }
575 }
576 swp_pager_meta_build(object, start, blk);
577 --size;
578 ++start;
579 ++blk;
580 --n;
581 }
582 swp_pager_meta_free(object, start, n);
583 splx(s);
584 return(0);
585}
586
587/*
588 * SWAP_PAGER_COPY() - copy blocks from source pager to destination pager
589 * and destroy the source.
590 *
591 * Copy any valid swapblks from the source to the destination. In
592 * cases where both the source and destination have a valid swapblk,
593 * we keep the destination's.
594 *
595 * This routine is allowed to block. It may block allocating metadata
596 * indirectly through swp_pager_meta_build() or if paging is still in
597 * progress on the source.
598 *
599 * This routine can be called at any spl
600 *
601 * XXX vm_page_collapse() kinda expects us not to block because we
602 * supposedly do not need to allocate memory, but for the moment we
603 * *may* have to get a little memory from the zone allocator, but
604 * it is taken from the interrupt memory. We should be ok.
605 *
606 * The source object contains no vm_page_t's (which is just as well)
607 *
608 * The source object is of type OBJT_SWAP.
609 *
610 * The source and destination objects must be locked or
611 * inaccessible (XXX are they ?)
612 */
613
614void
57e43348
MD
615swap_pager_copy(vm_object_t srcobject, vm_object_t dstobject,
616 vm_pindex_t offset, int destroysource)
984263bc
MD
617{
618 vm_pindex_t i;
619 int s;
620
621 s = splvm();
622
623 /*
624 * If destroysource is set, we remove the source object from the
625 * swap_pager internal queue now.
626 */
627
628 if (destroysource) {
629 if (srcobject->handle == NULL) {
630 TAILQ_REMOVE(
631 &swap_pager_un_object_list,
632 srcobject,
633 pager_object_list
634 );
635 } else {
636 TAILQ_REMOVE(
637 NOBJLIST(srcobject->handle),
638 srcobject,
639 pager_object_list
640 );
641 }
642 }
643
644 /*
645 * transfer source to destination.
646 */
647
648 for (i = 0; i < dstobject->size; ++i) {
649 daddr_t dstaddr;
650
651 /*
652 * Locate (without changing) the swapblk on the destination,
653 * unless it is invalid in which case free it silently, or
654 * if the destination is a resident page, in which case the
655 * source is thrown away.
656 */
657
658 dstaddr = swp_pager_meta_ctl(dstobject, i, 0);
659
660 if (dstaddr == SWAPBLK_NONE) {
661 /*
662 * Destination has no swapblk and is not resident,
663 * copy source.
664 */
665 daddr_t srcaddr;
666
667 srcaddr = swp_pager_meta_ctl(
668 srcobject,
669 i + offset,
670 SWM_POP
671 );
672
673 if (srcaddr != SWAPBLK_NONE)
674 swp_pager_meta_build(dstobject, i, srcaddr);
675 } else {
676 /*
677 * Destination has valid swapblk or it is represented
678 * by a resident page. We destroy the sourceblock.
679 */
680
681 swp_pager_meta_ctl(srcobject, i + offset, SWM_FREE);
682 }
683 }
684
685 /*
686 * Free left over swap blocks in source.
687 *
688 * We have to revert the type to OBJT_DEFAULT so we do not accidently
689 * double-remove the object from the swap queues.
690 */
691
692 if (destroysource) {
693 swp_pager_meta_free_all(srcobject);
694 /*
695 * Reverting the type is not necessary, the caller is going
696 * to destroy srcobject directly, but I'm doing it here
697 * for consistency since we've removed the object from its
698 * queues.
699 */
700 srcobject->type = OBJT_DEFAULT;
701 }
702 splx(s);
703}
704
705/*
706 * SWAP_PAGER_HASPAGE() - determine if we have good backing store for
707 * the requested page.
708 *
709 * We determine whether good backing store exists for the requested
710 * page and return TRUE if it does, FALSE if it doesn't.
711 *
712 * If TRUE, we also try to determine how much valid, contiguous backing
713 * store exists before and after the requested page within a reasonable
714 * distance. We do not try to restrict it to the swap device stripe
715 * (that is handled in getpages/putpages). It probably isn't worth
716 * doing here.
717 */
718
719boolean_t
57e43348
MD
720swap_pager_haspage(vm_object_t object, vm_pindex_t pindex, int *before,
721 int *after)
984263bc
MD
722{
723 daddr_t blk0;
724 int s;
725
726 /*
727 * do we have good backing store at the requested index ?
728 */
729
730 s = splvm();
731 blk0 = swp_pager_meta_ctl(object, pindex, 0);
732
733 if (blk0 == SWAPBLK_NONE) {
734 splx(s);
735 if (before)
736 *before = 0;
737 if (after)
738 *after = 0;
739 return (FALSE);
740 }
741
742 /*
743 * find backwards-looking contiguous good backing store
744 */
745
746 if (before != NULL) {
747 int i;
748
749 for (i = 1; i < (SWB_NPAGES/2); ++i) {
750 daddr_t blk;
751
752 if (i > pindex)
753 break;
754 blk = swp_pager_meta_ctl(object, pindex - i, 0);
755 if (blk != blk0 - i)
756 break;
757 }
758 *before = (i - 1);
759 }
760
761 /*
762 * find forward-looking contiguous good backing store
763 */
764
765 if (after != NULL) {
766 int i;
767
768 for (i = 1; i < (SWB_NPAGES/2); ++i) {
769 daddr_t blk;
770
771 blk = swp_pager_meta_ctl(object, pindex + i, 0);
772 if (blk != blk0 + i)
773 break;
774 }
775 *after = (i - 1);
776 }
777 splx(s);
778 return (TRUE);
779}
780
781/*
782 * SWAP_PAGER_PAGE_UNSWAPPED() - remove swap backing store related to page
783 *
784 * This removes any associated swap backing store, whether valid or
785 * not, from the page.
786 *
787 * This routine is typically called when a page is made dirty, at
788 * which point any associated swap can be freed. MADV_FREE also
789 * calls us in a special-case situation
790 *
791 * NOTE!!! If the page is clean and the swap was valid, the caller
792 * should make the page dirty before calling this routine. This routine
793 * does NOT change the m->dirty status of the page. Also: MADV_FREE
794 * depends on it.
795 *
796 * This routine may not block
797 * This routine must be called at splvm()
798 */
799
800static void
57e43348 801swap_pager_unswapped(vm_page_t m)
984263bc
MD
802{
803 swp_pager_meta_ctl(m->object, m->pindex, SWM_FREE);
804}
805
806/*
807 * SWAP_PAGER_STRATEGY() - read, write, free blocks
808 *
809 * This implements the vm_pager_strategy() interface to swap and allows
810 * other parts of the system to directly access swap as backing store
811 * through vm_objects of type OBJT_SWAP. This is intended to be a
812 * cacheless interface ( i.e. caching occurs at higher levels ).
813 * Therefore we do not maintain any resident pages. All I/O goes
814 * directly to and from the swap device.
815 *
816 * Note that b_blkno is scaled for PAGE_SIZE
817 *
818 * We currently attempt to run I/O synchronously or asynchronously as
819 * the caller requests. This isn't perfect because we loose error
820 * sequencing when we run multiple ops in parallel to satisfy a request.
821 * But this is swap, so we let it all hang out.
822 */
823
824static void
825swap_pager_strategy(vm_object_t object, struct buf *bp)
826{
827 vm_pindex_t start;
828 int count;
829 int s;
830 char *data;
831 struct buf *nbp = NULL;
832
833 if (bp->b_bcount & PAGE_MASK) {
834 bp->b_error = EINVAL;
835 bp->b_flags |= B_ERROR | B_INVAL;
836 biodone(bp);
837 printf("swap_pager_strategy: bp %p b_vp %p blk %d size %d, not page bounded\n", bp, bp->b_vp, (int)bp->b_pblkno, (int)bp->b_bcount);
838 return;
839 }
840
841 /*
842 * Clear error indication, initialize page index, count, data pointer.
843 */
844
845 bp->b_error = 0;
846 bp->b_flags &= ~B_ERROR;
847 bp->b_resid = bp->b_bcount;
848
849 start = bp->b_pblkno;
850 count = howmany(bp->b_bcount, PAGE_SIZE);
851 data = bp->b_data;
852
853 s = splvm();
854
855 /*
856 * Deal with B_FREEBUF
857 */
858
859 if (bp->b_flags & B_FREEBUF) {
860 /*
861 * FREE PAGE(s) - destroy underlying swap that is no longer
862 * needed.
863 */
864 swp_pager_meta_free(object, start, count);
865 splx(s);
866 bp->b_resid = 0;
867 biodone(bp);
868 return;
869 }
870
871 /*
872 * Execute read or write
873 */
874
875 while (count > 0) {
876 daddr_t blk;
877
878 /*
879 * Obtain block. If block not found and writing, allocate a
880 * new block and build it into the object.
881 */
882
883 blk = swp_pager_meta_ctl(object, start, 0);
884 if ((blk == SWAPBLK_NONE) && (bp->b_flags & B_READ) == 0) {
885 blk = swp_pager_getswapspace(1);
886 if (blk == SWAPBLK_NONE) {
887 bp->b_error = ENOMEM;
888 bp->b_flags |= B_ERROR;
889 break;
890 }
891 swp_pager_meta_build(object, start, blk);
892 }
893
894 /*
895 * Do we have to flush our current collection? Yes if:
896 *
897 * - no swap block at this index
898 * - swap block is not contiguous
899 * - we cross a physical disk boundry in the
900 * stripe.
901 */
902
903 if (
904 nbp && (nbp->b_blkno + btoc(nbp->b_bcount) != blk ||
905 ((nbp->b_blkno ^ blk) & dmmax_mask)
906 )
907 ) {
908 splx(s);
909 if (bp->b_flags & B_READ) {
12e4aaff
MD
910 ++mycpu->gd_cnt.v_swapin;
911 mycpu->gd_cnt.v_swappgsin += btoc(nbp->b_bcount);
984263bc 912 } else {
12e4aaff
MD
913 ++mycpu->gd_cnt.v_swapout;
914 mycpu->gd_cnt.v_swappgsout += btoc(nbp->b_bcount);
984263bc
MD
915 nbp->b_dirtyend = nbp->b_bcount;
916 }
917 flushchainbuf(nbp);
918 s = splvm();
919 nbp = NULL;
920 }
921
922 /*
923 * Add new swapblk to nbp, instantiating nbp if necessary.
924 * Zero-fill reads are able to take a shortcut.
925 */
926
927 if (blk == SWAPBLK_NONE) {
928 /*
929 * We can only get here if we are reading. Since
930 * we are at splvm() we can safely modify b_resid,
931 * even if chain ops are in progress.
932 */
933 bzero(data, PAGE_SIZE);
934 bp->b_resid -= PAGE_SIZE;
935 } else {
936 if (nbp == NULL) {
937 nbp = getchainbuf(bp, swapdev_vp, (bp->b_flags & B_READ) | B_ASYNC);
938 nbp->b_blkno = blk;
939 nbp->b_bcount = 0;
940 nbp->b_data = data;
941 }
942 nbp->b_bcount += PAGE_SIZE;
943 }
944 --count;
945 ++start;
946 data += PAGE_SIZE;
947 }
948
949 /*
950 * Flush out last buffer
951 */
952
953 splx(s);
954
955 if (nbp) {
956 if ((bp->b_flags & B_ASYNC) == 0)
957 nbp->b_flags &= ~B_ASYNC;
958 if (nbp->b_flags & B_READ) {
12e4aaff
MD
959 ++mycpu->gd_cnt.v_swapin;
960 mycpu->gd_cnt.v_swappgsin += btoc(nbp->b_bcount);
984263bc 961 } else {
12e4aaff
MD
962 ++mycpu->gd_cnt.v_swapout;
963 mycpu->gd_cnt.v_swappgsout += btoc(nbp->b_bcount);
984263bc
MD
964 nbp->b_dirtyend = nbp->b_bcount;
965 }
966 flushchainbuf(nbp);
967 /* nbp = NULL; */
968 }
969
970 /*
971 * Wait for completion.
972 */
973
974 if (bp->b_flags & B_ASYNC) {
975 autochaindone(bp);
976 } else {
977 waitchainbuf(bp, 0, 1);
978 }
979}
980
981/*
982 * SWAP_PAGER_GETPAGES() - bring pages in from swap
983 *
984 * Attempt to retrieve (m, count) pages from backing store, but make
985 * sure we retrieve at least m[reqpage]. We try to load in as large
986 * a chunk surrounding m[reqpage] as is contiguous in swap and which
987 * belongs to the same object.
988 *
989 * The code is designed for asynchronous operation and
990 * immediate-notification of 'reqpage' but tends not to be
991 * used that way. Please do not optimize-out this algorithmic
992 * feature, I intend to improve on it in the future.
993 *
994 * The parent has a single vm_object_pip_add() reference prior to
995 * calling us and we should return with the same.
996 *
997 * The parent has BUSY'd the pages. We should return with 'm'
998 * left busy, but the others adjusted.
999 */
1000
1001static int
57e43348 1002swap_pager_getpages(vm_object_t object, vm_page_t *m, int count, int reqpage)
984263bc
MD
1003{
1004 struct buf *bp;
1005 vm_page_t mreq;
1006 int s;
1007 int i;
1008 int j;
1009 daddr_t blk;
1010 vm_offset_t kva;
1011 vm_pindex_t lastpindex;
1012
1013 mreq = m[reqpage];
1014
1015 if (mreq->object != object) {
1016 panic("swap_pager_getpages: object mismatch %p/%p",
1017 object,
1018 mreq->object
1019 );
1020 }
1021 /*
1022 * Calculate range to retrieve. The pages have already been assigned
1023 * their swapblks. We require a *contiguous* range that falls entirely
1024 * within a single device stripe. If we do not supply it, bad things
1025 * happen. Note that blk, iblk & jblk can be SWAPBLK_NONE, but the
1026 * loops are set up such that the case(s) are handled implicitly.
1027 *
1028 * The swp_*() calls must be made at splvm(). vm_page_free() does
1029 * not need to be, but it will go a little faster if it is.
1030 */
1031
1032 s = splvm();
1033 blk = swp_pager_meta_ctl(mreq->object, mreq->pindex, 0);
1034
1035 for (i = reqpage - 1; i >= 0; --i) {
1036 daddr_t iblk;
1037
1038 iblk = swp_pager_meta_ctl(m[i]->object, m[i]->pindex, 0);
1039 if (blk != iblk + (reqpage - i))
1040 break;
1041 if ((blk ^ iblk) & dmmax_mask)
1042 break;
1043 }
1044 ++i;
1045
1046 for (j = reqpage + 1; j < count; ++j) {
1047 daddr_t jblk;
1048
1049 jblk = swp_pager_meta_ctl(m[j]->object, m[j]->pindex, 0);
1050 if (blk != jblk - (j - reqpage))
1051 break;
1052 if ((blk ^ jblk) & dmmax_mask)
1053 break;
1054 }
1055
1056 /*
1057 * free pages outside our collection range. Note: we never free
1058 * mreq, it must remain busy throughout.
1059 */
1060
1061 {
1062 int k;
1063
1064 for (k = 0; k < i; ++k)
1065 vm_page_free(m[k]);
1066 for (k = j; k < count; ++k)
1067 vm_page_free(m[k]);
1068 }
1069 splx(s);
1070
1071
1072 /*
1073 * Return VM_PAGER_FAIL if we have nothing to do. Return mreq
1074 * still busy, but the others unbusied.
1075 */
1076
1077 if (blk == SWAPBLK_NONE)
1078 return(VM_PAGER_FAIL);
1079
1080 /*
1081 * Get a swap buffer header to perform the IO
1082 */
1083
1084 bp = getpbuf(&nsw_rcount);
1085 kva = (vm_offset_t) bp->b_data;
1086
1087 /*
1088 * map our page(s) into kva for input
1089 *
1090 * NOTE: B_PAGING is set by pbgetvp()
1091 */
1092
1093 pmap_qenter(kva, m + i, j - i);
1094
1095 bp->b_flags = B_READ | B_CALL;
1096 bp->b_iodone = swp_pager_async_iodone;
984263bc 1097 bp->b_data = (caddr_t) kva;
984263bc
MD
1098 bp->b_blkno = blk - (reqpage - i);
1099 bp->b_bcount = PAGE_SIZE * (j - i);
1100 bp->b_bufsize = PAGE_SIZE * (j - i);
1101 bp->b_pager.pg_reqpage = reqpage - i;
1102
1103 {
1104 int k;
1105
1106 for (k = i; k < j; ++k) {
54f51aeb 1107 bp->b_xio.xio_pages[k - i] = m[k];
984263bc
MD
1108 vm_page_flag_set(m[k], PG_SWAPINPROG);
1109 }
1110 }
54f51aeb 1111 bp->b_xio.xio_npages = j - i;
984263bc
MD
1112
1113 pbgetvp(swapdev_vp, bp);
1114
12e4aaff 1115 mycpu->gd_cnt.v_swapin++;
54f51aeb 1116 mycpu->gd_cnt.v_swappgsin += bp->b_xio.xio_npages;
984263bc
MD
1117
1118 /*
1119 * We still hold the lock on mreq, and our automatic completion routine
1120 * does not remove it.
1121 */
1122
54f51aeb 1123 vm_object_pip_add(mreq->object, bp->b_xio.xio_npages);
984263bc
MD
1124 lastpindex = m[j-1]->pindex;
1125
1126 /*
1127 * perform the I/O. NOTE!!! bp cannot be considered valid after
1128 * this point because we automatically release it on completion.
1129 * Instead, we look at the one page we are interested in which we
1130 * still hold a lock on even through the I/O completion.
1131 *
1132 * The other pages in our m[] array are also released on completion,
1133 * so we cannot assume they are valid anymore either.
1134 *
1135 * NOTE: b_blkno is destroyed by the call to VOP_STRATEGY
1136 */
1137
1138 BUF_KERNPROC(bp);
1139 VOP_STRATEGY(bp->b_vp, bp);
1140
1141 /*
1142 * wait for the page we want to complete. PG_SWAPINPROG is always
1143 * cleared on completion. If an I/O error occurs, SWAPBLK_NONE
1144 * is set in the meta-data.
1145 */
1146
1147 s = splvm();
1148
1149 while ((mreq->flags & PG_SWAPINPROG) != 0) {
1150 vm_page_flag_set(mreq, PG_WANTED | PG_REFERENCED);
12e4aaff 1151 mycpu->gd_cnt.v_intrans++;
377d4740 1152 if (tsleep(mreq, 0, "swread", hz*20)) {
984263bc
MD
1153 printf(
1154 "swap_pager: indefinite wait buffer: device:"
1155 " %s, blkno: %ld, size: %ld\n",
1156 devtoname(bp->b_dev), (long)bp->b_blkno,
1157 bp->b_bcount
1158 );
1159 }
1160 }
1161
1162 splx(s);
1163
1164 /*
1165 * mreq is left bussied after completion, but all the other pages
1166 * are freed. If we had an unrecoverable read error the page will
1167 * not be valid.
1168 */
1169
1170 if (mreq->valid != VM_PAGE_BITS_ALL) {
1171 return(VM_PAGER_ERROR);
1172 } else {
1173 return(VM_PAGER_OK);
1174 }
1175
1176 /*
1177 * A final note: in a low swap situation, we cannot deallocate swap
1178 * and mark a page dirty here because the caller is likely to mark
1179 * the page clean when we return, causing the page to possibly revert
1180 * to all-zero's later.
1181 */
1182}
1183
1184/*
1185 * swap_pager_putpages:
1186 *
1187 * Assign swap (if necessary) and initiate I/O on the specified pages.
1188 *
1189 * We support both OBJT_DEFAULT and OBJT_SWAP objects. DEFAULT objects
1190 * are automatically converted to SWAP objects.
1191 *
1192 * In a low memory situation we may block in VOP_STRATEGY(), but the new
1193 * vm_page reservation system coupled with properly written VFS devices
1194 * should ensure that no low-memory deadlock occurs. This is an area
1195 * which needs work.
1196 *
1197 * The parent has N vm_object_pip_add() references prior to
1198 * calling us and will remove references for rtvals[] that are
1199 * not set to VM_PAGER_PEND. We need to remove the rest on I/O
1200 * completion.
1201 *
1202 * The parent has soft-busy'd the pages it passes us and will unbusy
1203 * those whos rtvals[] entry is not set to VM_PAGER_PEND on return.
1204 * We need to unbusy the rest on I/O completion.
1205 */
1206
1207void
57e43348
MD
1208swap_pager_putpages(vm_object_t object, vm_page_t *m, int count, boolean_t sync,
1209 int *rtvals)
984263bc
MD
1210{
1211 int i;
1212 int n = 0;
1213
1214 if (count && m[0]->object != object) {
1215 panic("swap_pager_getpages: object mismatch %p/%p",
1216 object,
1217 m[0]->object
1218 );
1219 }
1220 /*
1221 * Step 1
1222 *
1223 * Turn object into OBJT_SWAP
1224 * check for bogus sysops
1225 * force sync if not pageout process
1226 */
1227
1228 if (object->type != OBJT_SWAP)
1229 swp_pager_meta_build(object, 0, SWAPBLK_NONE);
1230
bc6dffab 1231 if (curthread != pagethread)
984263bc
MD
1232 sync = TRUE;
1233
1234 /*
1235 * Step 2
1236 *
1237 * Update nsw parameters from swap_async_max sysctl values.
1238 * Do not let the sysop crash the machine with bogus numbers.
1239 */
1240
1241 if (swap_async_max != nsw_wcount_async_max) {
1242 int n;
1243 int s;
1244
1245 /*
1246 * limit range
1247 */
1248 if ((n = swap_async_max) > nswbuf / 2)
1249 n = nswbuf / 2;
1250 if (n < 1)
1251 n = 1;
1252 swap_async_max = n;
1253
1254 /*
1255 * Adjust difference ( if possible ). If the current async
1256 * count is too low, we may not be able to make the adjustment
1257 * at this time.
1258 */
1259 s = splvm();
1260 n -= nsw_wcount_async_max;
1261 if (nsw_wcount_async + n >= 0) {
1262 nsw_wcount_async += n;
1263 nsw_wcount_async_max += n;
1264 wakeup(&nsw_wcount_async);
1265 }
1266 splx(s);
1267 }
1268
1269 /*
1270 * Step 3
1271 *
1272 * Assign swap blocks and issue I/O. We reallocate swap on the fly.
1273 * The page is left dirty until the pageout operation completes
1274 * successfully.
1275 */
1276
1277 for (i = 0; i < count; i += n) {
1278 int s;
1279 int j;
1280 struct buf *bp;
1281 daddr_t blk;
1282
1283 /*
1284 * Maximum I/O size is limited by a number of factors.
1285 */
1286
1287 n = min(BLIST_MAX_ALLOC, count - i);
1288 n = min(n, nsw_cluster_max);
1289
1290 s = splvm();
1291
1292 /*
1293 * Get biggest block of swap we can. If we fail, fall
1294 * back and try to allocate a smaller block. Don't go
1295 * overboard trying to allocate space if it would overly
1296 * fragment swap.
1297 */
1298 while (
1299 (blk = swp_pager_getswapspace(n)) == SWAPBLK_NONE &&
1300 n > 4
1301 ) {
1302 n >>= 1;
1303 }
1304 if (blk == SWAPBLK_NONE) {
1305 for (j = 0; j < n; ++j)
1306 rtvals[i+j] = VM_PAGER_FAIL;
1307 splx(s);
1308 continue;
1309 }
1310
1311 /*
1312 * The I/O we are constructing cannot cross a physical
1313 * disk boundry in the swap stripe. Note: we are still
1314 * at splvm().
1315 */
1316 if ((blk ^ (blk + n)) & dmmax_mask) {
1317 j = ((blk + dmmax) & dmmax_mask) - blk;
1318 swp_pager_freeswapspace(blk + j, n - j);
1319 n = j;
1320 }
1321
1322 /*
1323 * All I/O parameters have been satisfied, build the I/O
1324 * request and assign the swap space.
1325 *
1326 * NOTE: B_PAGING is set by pbgetvp()
1327 */
1328
1329 if (sync == TRUE) {
1330 bp = getpbuf(&nsw_wcount_sync);
1331 bp->b_flags = B_CALL;
1332 } else {
1333 bp = getpbuf(&nsw_wcount_async);
1334 bp->b_flags = B_CALL | B_ASYNC;
1335 }
1336 bp->b_spc = NULL; /* not used, but NULL-out anyway */
1337
1338 pmap_qenter((vm_offset_t)bp->b_data, &m[i], n);
1339
984263bc
MD
1340 bp->b_bcount = PAGE_SIZE * n;
1341 bp->b_bufsize = PAGE_SIZE * n;
1342 bp->b_blkno = blk;
1343
984263bc
MD
1344 pbgetvp(swapdev_vp, bp);
1345
1346 for (j = 0; j < n; ++j) {
1347 vm_page_t mreq = m[i+j];
1348
1349 swp_pager_meta_build(
1350 mreq->object,
1351 mreq->pindex,
1352 blk + j
1353 );
1354 vm_page_dirty(mreq);
1355 rtvals[i+j] = VM_PAGER_OK;
1356
1357 vm_page_flag_set(mreq, PG_SWAPINPROG);
54f51aeb 1358 bp->b_xio.xio_pages[j] = mreq;
984263bc 1359 }
54f51aeb 1360 bp->b_xio.xio_npages = n;
984263bc
MD
1361 /*
1362 * Must set dirty range for NFS to work.
1363 */
1364 bp->b_dirtyoff = 0;
1365 bp->b_dirtyend = bp->b_bcount;
1366
12e4aaff 1367 mycpu->gd_cnt.v_swapout++;
54f51aeb 1368 mycpu->gd_cnt.v_swappgsout += bp->b_xio.xio_npages;
984263bc
MD
1369 swapdev_vp->v_numoutput++;
1370
1371 splx(s);
1372
1373 /*
1374 * asynchronous
1375 *
1376 * NOTE: b_blkno is destroyed by the call to VOP_STRATEGY
1377 */
1378
1379 if (sync == FALSE) {
1380 bp->b_iodone = swp_pager_async_iodone;
1381 BUF_KERNPROC(bp);
1382 VOP_STRATEGY(bp->b_vp, bp);
1383
1384 for (j = 0; j < n; ++j)
1385 rtvals[i+j] = VM_PAGER_PEND;
1386 continue;
1387 }
1388
1389 /*
1390 * synchronous
1391 *
1392 * NOTE: b_blkno is destroyed by the call to VOP_STRATEGY
1393 */
1394
1395 bp->b_iodone = swp_pager_sync_iodone;
1396 VOP_STRATEGY(bp->b_vp, bp);
1397
1398 /*
1399 * Wait for the sync I/O to complete, then update rtvals.
1400 * We just set the rtvals[] to VM_PAGER_PEND so we can call
1401 * our async completion routine at the end, thus avoiding a
1402 * double-free.
1403 */
1404 s = splbio();
1405
1406 while ((bp->b_flags & B_DONE) == 0) {
377d4740 1407 tsleep(bp, 0, "swwrt", 0);
984263bc
MD
1408 }
1409
1410 for (j = 0; j < n; ++j)
1411 rtvals[i+j] = VM_PAGER_PEND;
1412
1413 /*
1414 * Now that we are through with the bp, we can call the
1415 * normal async completion, which frees everything up.
1416 */
1417
1418 swp_pager_async_iodone(bp);
1419
1420 splx(s);
1421 }
1422}
1423
1424/*
1425 * swap_pager_sync_iodone:
1426 *
1427 * Completion routine for synchronous reads and writes from/to swap.
1428 * We just mark the bp is complete and wake up anyone waiting on it.
1429 *
1430 * This routine may not block. This routine is called at splbio() or better.
1431 */
1432
1433static void
57e43348 1434swp_pager_sync_iodone(struct buf *bp)
984263bc
MD
1435{
1436 bp->b_flags |= B_DONE;
1437 bp->b_flags &= ~B_ASYNC;
1438 wakeup(bp);
1439}
1440
1441/*
1442 * swp_pager_async_iodone:
1443 *
1444 * Completion routine for asynchronous reads and writes from/to swap.
1445 * Also called manually by synchronous code to finish up a bp.
1446 *
1447 * For READ operations, the pages are PG_BUSY'd. For WRITE operations,
1448 * the pages are vm_page_t->busy'd. For READ operations, we PG_BUSY
1449 * unbusy all pages except the 'main' request page. For WRITE
1450 * operations, we vm_page_t->busy'd unbusy all pages ( we can do this
1451 * because we marked them all VM_PAGER_PEND on return from putpages ).
1452 *
1453 * This routine may not block.
1454 * This routine is called at splbio() or better
1455 *
1456 * We up ourselves to splvm() as required for various vm_page related
1457 * calls.
1458 */
1459
1460static void
57e43348 1461swp_pager_async_iodone(struct buf *bp)
984263bc
MD
1462{
1463 int s;
1464 int i;
1465 vm_object_t object = NULL;
1466
1467 bp->b_flags |= B_DONE;
1468
1469 /*
1470 * report error
1471 */
1472
1473 if (bp->b_flags & B_ERROR) {
1474 printf(
1475 "swap_pager: I/O error - %s failed; blkno %ld,"
1476 "size %ld, error %d\n",
1477 ((bp->b_flags & B_READ) ? "pagein" : "pageout"),
1478 (long)bp->b_blkno,
1479 (long)bp->b_bcount,
1480 bp->b_error
1481 );
1482 }
1483
1484 /*
1485 * set object, raise to splvm().
1486 */
1487
54f51aeb
HP
1488 if (bp->b_xio.xio_npages)
1489 object = bp->b_xio.xio_pages[0]->object;
984263bc
MD
1490 s = splvm();
1491
1492 /*
1493 * remove the mapping for kernel virtual
1494 */
1495
54f51aeb 1496 pmap_qremove((vm_offset_t)bp->b_data, bp->b_xio.xio_npages);
984263bc
MD
1497
1498 /*
1499 * cleanup pages. If an error occurs writing to swap, we are in
1500 * very serious trouble. If it happens to be a disk error, though,
1501 * we may be able to recover by reassigning the swap later on. So
1502 * in this case we remove the m->swapblk assignment for the page
1503 * but do not free it in the rlist. The errornous block(s) are thus
1504 * never reallocated as swap. Redirty the page and continue.
1505 */
1506
54f51aeb
HP
1507 for (i = 0; i < bp->b_xio.xio_npages; ++i) {
1508 vm_page_t m = bp->b_xio.xio_pages[i];
984263bc
MD
1509
1510 vm_page_flag_clear(m, PG_SWAPINPROG);
1511
1512 if (bp->b_flags & B_ERROR) {
1513 /*
1514 * If an error occurs I'd love to throw the swapblk
1515 * away without freeing it back to swapspace, so it
1516 * can never be used again. But I can't from an
1517 * interrupt.
1518 */
1519
1520 if (bp->b_flags & B_READ) {
1521 /*
1522 * When reading, reqpage needs to stay
1523 * locked for the parent, but all other
1524 * pages can be freed. We still want to
1525 * wakeup the parent waiting on the page,
1526 * though. ( also: pg_reqpage can be -1 and
1527 * not match anything ).
1528 *
1529 * We have to wake specifically requested pages
1530 * up too because we cleared PG_SWAPINPROG and
1531 * someone may be waiting for that.
1532 *
1533 * NOTE: for reads, m->dirty will probably
1534 * be overridden by the original caller of
1535 * getpages so don't play cute tricks here.
1536 *
1537 * XXX IT IS NOT LEGAL TO FREE THE PAGE HERE
1538 * AS THIS MESSES WITH object->memq, and it is
1539 * not legal to mess with object->memq from an
1540 * interrupt.
1541 */
1542
1543 m->valid = 0;
1544 vm_page_flag_clear(m, PG_ZERO);
1545
1546 if (i != bp->b_pager.pg_reqpage)
1547 vm_page_free(m);
1548 else
1549 vm_page_flash(m);
1550 /*
1551 * If i == bp->b_pager.pg_reqpage, do not wake
1552 * the page up. The caller needs to.
1553 */
1554 } else {
1555 /*
1556 * If a write error occurs, reactivate page
1557 * so it doesn't clog the inactive list,
1558 * then finish the I/O.
1559 */
1560 vm_page_dirty(m);
1561 vm_page_activate(m);
1562 vm_page_io_finish(m);
1563 }
1564 } else if (bp->b_flags & B_READ) {
1565 /*
1566 * For read success, clear dirty bits. Nobody should
1567 * have this page mapped but don't take any chances,
1568 * make sure the pmap modify bits are also cleared.
1569 *
1570 * NOTE: for reads, m->dirty will probably be
1571 * overridden by the original caller of getpages so
1572 * we cannot set them in order to free the underlying
1573 * swap in a low-swap situation. I don't think we'd
1574 * want to do that anyway, but it was an optimization
1575 * that existed in the old swapper for a time before
1576 * it got ripped out due to precisely this problem.
1577 *
1578 * clear PG_ZERO in page.
1579 *
1580 * If not the requested page then deactivate it.
1581 *
1582 * Note that the requested page, reqpage, is left
1583 * busied, but we still have to wake it up. The
1584 * other pages are released (unbusied) by
1585 * vm_page_wakeup(). We do not set reqpage's
1586 * valid bits here, it is up to the caller.
1587 */
1588
1589 pmap_clear_modify(m);
1590 m->valid = VM_PAGE_BITS_ALL;
1591 vm_page_undirty(m);
1592 vm_page_flag_clear(m, PG_ZERO);
1593
1594 /*
1595 * We have to wake specifically requested pages
1596 * up too because we cleared PG_SWAPINPROG and
1597 * could be waiting for it in getpages. However,
1598 * be sure to not unbusy getpages specifically
1599 * requested page - getpages expects it to be
1600 * left busy.
1601 */
1602 if (i != bp->b_pager.pg_reqpage) {
1603 vm_page_deactivate(m);
1604 vm_page_wakeup(m);
1605 } else {
1606 vm_page_flash(m);
1607 }
1608 } else {
1609 /*
1610 * For write success, clear the modify and dirty
1611 * status, then finish the I/O ( which decrements the
1612 * busy count and possibly wakes waiter's up ).
1613 */
1614 pmap_clear_modify(m);
1615 vm_page_undirty(m);
1616 vm_page_io_finish(m);
1617 if (!vm_page_count_severe() || !vm_page_try_to_cache(m))
1618 vm_page_protect(m, VM_PROT_READ);
1619 }
1620 }
1621
1622 /*
1623 * adjust pip. NOTE: the original parent may still have its own
1624 * pip refs on the object.
1625 */
1626
1627 if (object)
54f51aeb 1628 vm_object_pip_wakeupn(object, bp->b_xio.xio_npages);
984263bc
MD
1629
1630 /*
1631 * release the physical I/O buffer
1632 */
1633
1634 relpbuf(
1635 bp,
1636 ((bp->b_flags & B_READ) ? &nsw_rcount :
1637 ((bp->b_flags & B_ASYNC) ?
1638 &nsw_wcount_async :
1639 &nsw_wcount_sync
1640 )
1641 )
1642 );
1643 splx(s);
1644}
1645
1646/************************************************************************
1647 * SWAP META DATA *
1648 ************************************************************************
1649 *
1650 * These routines manipulate the swap metadata stored in the
1651 * OBJT_SWAP object. All swp_*() routines must be called at
1652 * splvm() because swap can be freed up by the low level vm_page
1653 * code which might be called from interrupts beyond what splbio() covers.
1654 *
1655 * Swap metadata is implemented with a global hash and not directly
1656 * linked into the object. Instead the object simply contains
1657 * appropriate tracking counters.
1658 */
1659
1660/*
1661 * SWP_PAGER_HASH() - hash swap meta data
1662 *
1663 * This is an inline helper function which hashes the swapblk given
1664 * the object and page index. It returns a pointer to a pointer
1665 * to the object, or a pointer to a NULL pointer if it could not
1666 * find a swapblk.
1667 *
1668 * This routine must be called at splvm().
1669 */
1670
1671static __inline struct swblock **
1672swp_pager_hash(vm_object_t object, vm_pindex_t index)
1673{
1674 struct swblock **pswap;
1675 struct swblock *swap;
1676
1677 index &= ~SWAP_META_MASK;
1678 pswap = &swhash[(index ^ (int)(intptr_t)object) & swhash_mask];
1679
1680 while ((swap = *pswap) != NULL) {
1681 if (swap->swb_object == object &&
1682 swap->swb_index == index
1683 ) {
1684 break;
1685 }
1686 pswap = &swap->swb_hnext;
1687 }
1688 return(pswap);
1689}
1690
1691/*
1692 * SWP_PAGER_META_BUILD() - add swap block to swap meta data for object
1693 *
1694 * We first convert the object to a swap object if it is a default
1695 * object.
1696 *
1697 * The specified swapblk is added to the object's swap metadata. If
1698 * the swapblk is not valid, it is freed instead. Any previously
1699 * assigned swapblk is freed.
1700 *
1701 * This routine must be called at splvm(), except when used to convert
1702 * an OBJT_DEFAULT object into an OBJT_SWAP object.
1703
1704 */
1705
1706static void
1707swp_pager_meta_build(
1708 vm_object_t object,
1709 vm_pindex_t index,
1710 daddr_t swapblk
1711) {
1712 struct swblock *swap;
1713 struct swblock **pswap;
1714
1715 /*
1716 * Convert default object to swap object if necessary
1717 */
1718
1719 if (object->type != OBJT_SWAP) {
1720 object->type = OBJT_SWAP;
1721 object->un_pager.swp.swp_bcount = 0;
1722
1723 if (object->handle != NULL) {
1724 TAILQ_INSERT_TAIL(
1725 NOBJLIST(object->handle),
1726 object,
1727 pager_object_list
1728 );
1729 } else {
1730 TAILQ_INSERT_TAIL(
1731 &swap_pager_un_object_list,
1732 object,
1733 pager_object_list
1734 );
1735 }
1736 }
1737
1738 /*
1739 * Locate hash entry. If not found create, but if we aren't adding
1740 * anything just return. If we run out of space in the map we wait
1741 * and, since the hash table may have changed, retry.
1742 */
1743
1744retry:
1745 pswap = swp_pager_hash(object, index);
1746
1747 if ((swap = *pswap) == NULL) {
1748 int i;
1749
1750 if (swapblk == SWAPBLK_NONE)
1751 return;
1752
1753 swap = *pswap = zalloc(swap_zone);
1754 if (swap == NULL) {
659c6a07 1755 vm_wait();
984263bc
MD
1756 goto retry;
1757 }
1758 swap->swb_hnext = NULL;
1759 swap->swb_object = object;
1760 swap->swb_index = index & ~SWAP_META_MASK;
1761 swap->swb_count = 0;
1762
1763 ++object->un_pager.swp.swp_bcount;
1764
1765 for (i = 0; i < SWAP_META_PAGES; ++i)
1766 swap->swb_pages[i] = SWAPBLK_NONE;
1767 }
1768
1769 /*
1770 * Delete prior contents of metadata
1771 */
1772
1773 index &= SWAP_META_MASK;
1774
1775 if (swap->swb_pages[index] != SWAPBLK_NONE) {
1776 swp_pager_freeswapspace(swap->swb_pages[index], 1);
1777 --swap->swb_count;
1778 }
1779
1780 /*
1781 * Enter block into metadata
1782 */
1783
1784 swap->swb_pages[index] = swapblk;
1785 if (swapblk != SWAPBLK_NONE)
1786 ++swap->swb_count;
1787}
1788
1789/*
1790 * SWP_PAGER_META_FREE() - free a range of blocks in the object's swap metadata
1791 *
1792 * The requested range of blocks is freed, with any associated swap
1793 * returned to the swap bitmap.
1794 *
1795 * This routine will free swap metadata structures as they are cleaned
1796 * out. This routine does *NOT* operate on swap metadata associated
1797 * with resident pages.
1798 *
1799 * This routine must be called at splvm()
1800 */
1801
1802static void
1803swp_pager_meta_free(vm_object_t object, vm_pindex_t index, daddr_t count)
1804{
1805 if (object->type != OBJT_SWAP)
1806 return;
1807
1808 while (count > 0) {
1809 struct swblock **pswap;
1810 struct swblock *swap;
1811
1812 pswap = swp_pager_hash(object, index);
1813
1814 if ((swap = *pswap) != NULL) {
1815 daddr_t v = swap->swb_pages[index & SWAP_META_MASK];
1816
1817 if (v != SWAPBLK_NONE) {
1818 swp_pager_freeswapspace(v, 1);
1819 swap->swb_pages[index & SWAP_META_MASK] =
1820 SWAPBLK_NONE;
1821 if (--swap->swb_count == 0) {
1822 *pswap = swap->swb_hnext;
1823 zfree(swap_zone, swap);
1824 --object->un_pager.swp.swp_bcount;
1825 }
1826 }
1827 --count;
1828 ++index;
1829 } else {
1830 int n = SWAP_META_PAGES - (index & SWAP_META_MASK);
1831 count -= n;
1832 index += n;
1833 }
1834 }
1835}
1836
1837/*
1838 * SWP_PAGER_META_FREE_ALL() - destroy all swap metadata associated with object
1839 *
1840 * This routine locates and destroys all swap metadata associated with
1841 * an object.
1842 *
1843 * This routine must be called at splvm()
1844 */
1845
1846static void
1847swp_pager_meta_free_all(vm_object_t object)
1848{
1849 daddr_t index = 0;
1850
1851 if (object->type != OBJT_SWAP)
1852 return;
1853
1854 while (object->un_pager.swp.swp_bcount) {
1855 struct swblock **pswap;
1856 struct swblock *swap;
1857
1858 pswap = swp_pager_hash(object, index);
1859 if ((swap = *pswap) != NULL) {
1860 int i;
1861
1862 for (i = 0; i < SWAP_META_PAGES; ++i) {
1863 daddr_t v = swap->swb_pages[i];
1864 if (v != SWAPBLK_NONE) {
1865 --swap->swb_count;
1866 swp_pager_freeswapspace(v, 1);
1867 }
1868 }
1869 if (swap->swb_count != 0)
1870 panic("swap_pager_meta_free_all: swb_count != 0");
1871 *pswap = swap->swb_hnext;
1872 zfree(swap_zone, swap);
1873 --object->un_pager.swp.swp_bcount;
1874 }
1875 index += SWAP_META_PAGES;
1876 if (index > 0x20000000)
1877 panic("swp_pager_meta_free_all: failed to locate all swap meta blocks");
1878 }
1879}
1880
1881/*
1882 * SWP_PAGER_METACTL() - misc control of swap and vm_page_t meta data.
1883 *
1884 * This routine is capable of looking up, popping, or freeing
1885 * swapblk assignments in the swap meta data or in the vm_page_t.
1886 * The routine typically returns the swapblk being looked-up, or popped,
1887 * or SWAPBLK_NONE if the block was freed, or SWAPBLK_NONE if the block
1888 * was invalid. This routine will automatically free any invalid
1889 * meta-data swapblks.
1890 *
1891 * It is not possible to store invalid swapblks in the swap meta data
1892 * (other then a literal 'SWAPBLK_NONE'), so we don't bother checking.
1893 *
1894 * When acting on a busy resident page and paging is in progress, we
1895 * have to wait until paging is complete but otherwise can act on the
1896 * busy page.
1897 *
1898 * This routine must be called at splvm().
1899 *
1900 * SWM_FREE remove and free swap block from metadata
1901 * SWM_POP remove from meta data but do not free.. pop it out
1902 */
1903
1904static daddr_t
1905swp_pager_meta_ctl(
1906 vm_object_t object,
1907 vm_pindex_t index,
1908 int flags
1909) {
1910 struct swblock **pswap;
1911 struct swblock *swap;
1912 daddr_t r1;
1913
1914 /*
1915 * The meta data only exists of the object is OBJT_SWAP
1916 * and even then might not be allocated yet.
1917 */
1918
1919 if (object->type != OBJT_SWAP)
1920 return(SWAPBLK_NONE);
1921
1922 r1 = SWAPBLK_NONE;
1923 pswap = swp_pager_hash(object, index);
1924
1925 if ((swap = *pswap) != NULL) {
1926 index &= SWAP_META_MASK;
1927 r1 = swap->swb_pages[index];
1928
1929 if (r1 != SWAPBLK_NONE) {
1930 if (flags & SWM_FREE) {
1931 swp_pager_freeswapspace(r1, 1);
1932 r1 = SWAPBLK_NONE;
1933 }
1934 if (flags & (SWM_FREE|SWM_POP)) {
1935 swap->swb_pages[index] = SWAPBLK_NONE;
1936 if (--swap->swb_count == 0) {
1937 *pswap = swap->swb_hnext;
1938 zfree(swap_zone, swap);
1939 --object->un_pager.swp.swp_bcount;
1940 }
1941 }
1942 }
1943 }
1944 return(r1);
1945}