2 * Copyright (c) 1991, 1993, 2013
3 * The Regents of the University of California. All rights reserved.
5 * This code is derived from software contributed to Berkeley by
6 * The Mach Operating System project at Carnegie-Mellon University.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the name of the University nor the names of its contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * from: @(#)vm_object.c 8.5 (Berkeley) 3/22/94
35 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
36 * All rights reserved.
38 * Authors: Avadis Tevanian, Jr., Michael Wayne Young
40 * Permission to use, copy, modify and distribute this software and
41 * its documentation is hereby granted, provided that both the copyright
42 * notice and this permission notice appear in all copies of the
43 * software, derivative works or modified versions, and any portions
44 * thereof, and that both notices appear in supporting documentation.
46 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
47 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
48 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
50 * Carnegie Mellon requests users of this software to return to
52 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
53 * School of Computer Science
54 * Carnegie Mellon University
55 * Pittsburgh PA 15213-3890
57 * any improvements or extensions that they make and grant Carnegie the
58 * rights to redistribute these changes.
60 * $FreeBSD: src/sys/vm/vm_object.c,v 1.171.2.8 2003/05/26 19:17:56 alc Exp $
64 * Virtual memory object module.
67 #include <sys/param.h>
68 #include <sys/systm.h>
69 #include <sys/proc.h> /* for curproc, pageproc */
70 #include <sys/thread.h>
71 #include <sys/vnode.h>
72 #include <sys/vmmeter.h>
74 #include <sys/mount.h>
75 #include <sys/kernel.h>
76 #include <sys/malloc.h>
77 #include <sys/sysctl.h>
78 #include <sys/refcount.h>
81 #include <vm/vm_param.h>
83 #include <vm/vm_map.h>
84 #include <vm/vm_object.h>
85 #include <vm/vm_page.h>
86 #include <vm/vm_pageout.h>
87 #include <vm/vm_pager.h>
88 #include <vm/swap_pager.h>
89 #include <vm/vm_kern.h>
90 #include <vm/vm_extern.h>
91 #include <vm/vm_zone.h>
93 #include <vm/vm_page2.h>
95 #include <machine/specialreg.h>
97 #define EASY_SCAN_FACTOR 8
99 static void vm_object_page_collect_flush(vm_object_t object, vm_page_t p,
101 static void vm_object_lock_init(vm_object_t);
104 * Virtual memory objects maintain the actual data
105 * associated with allocated virtual memory. A given
106 * page of memory exists within exactly one object.
108 * An object is only deallocated when all "references"
109 * are given up. Only one "reference" to a given
110 * region of an object should be writeable.
112 * Associated with each object is a list of all resident
113 * memory pages belonging to that object; this list is
114 * maintained by the "vm_page" module, and locked by the object's
117 * Each object also records a "pager" routine which is
118 * used to retrieve (and store) pages to the proper backing
119 * storage. In addition, objects may be backed by other
120 * objects from which they were virtual-copied.
122 * The only items within the object structure which are
123 * modified after time of creation are:
124 * reference count locked by object's lock
125 * pager routine locked by object's lock
129 struct vm_object kernel_object;
131 struct vm_object_hash vm_object_hash[VMOBJ_HSIZE];
133 MALLOC_DEFINE(M_VM_OBJECT, "vm_object", "vm_object structures");
135 #define VMOBJ_HASH_PRIME1 66555444443333333ULL
136 #define VMOBJ_HASH_PRIME2 989042931893ULL
139 SYSCTL_INT(_vm, OID_AUTO, object_debug, CTLFLAG_RW, &vm_object_debug, 0, "");
142 struct vm_object_hash *
143 vmobj_hash(vm_object_t obj)
148 hash1 = (uintptr_t)obj + ((uintptr_t)obj >> 18);
149 hash1 %= VMOBJ_HASH_PRIME1;
150 hash2 = ((uintptr_t)obj >> 8) + ((uintptr_t)obj >> 24);
151 hash2 %= VMOBJ_HASH_PRIME2;
152 return (&vm_object_hash[(hash1 ^ hash2) & VMOBJ_HMASK]);
155 #if defined(DEBUG_LOCKS)
157 #define vm_object_vndeallocate(obj, vpp) \
158 debugvm_object_vndeallocate(obj, vpp, __FILE__, __LINE__)
161 * Debug helper to track hold/drop/ref/deallocate calls.
164 debugvm_object_add(vm_object_t obj, char *file, int line, int addrem)
168 i = atomic_fetchadd_int(&obj->debug_index, 1);
169 i = i & (VMOBJ_DEBUG_ARRAY_SIZE - 1);
170 ksnprintf(obj->debug_hold_thrs[i],
171 sizeof(obj->debug_hold_thrs[i]),
173 (addrem == -1 ? '-' : (addrem == 1 ? '+' : '=')),
174 (curthread->td_proc ? curthread->td_proc->p_pid : -1),
177 obj->debug_hold_file[i] = file;
178 obj->debug_hold_line[i] = line;
180 /* Uncomment for debugging obj refs/derefs in reproducable cases */
181 if (strcmp(curthread->td_comm, "sshd") == 0) {
182 kprintf("%d %p refs=%d ar=%d file: %s/%d\n",
183 (curthread->td_proc ? curthread->td_proc->p_pid : -1),
184 obj, obj->ref_count, addrem, file, line);
192 * Misc low level routines
195 vm_object_lock_init(vm_object_t obj)
197 #if defined(DEBUG_LOCKS)
200 obj->debug_index = 0;
201 for (i = 0; i < VMOBJ_DEBUG_ARRAY_SIZE; i++) {
202 obj->debug_hold_thrs[i][0] = 0;
203 obj->debug_hold_file[i] = NULL;
204 obj->debug_hold_line[i] = 0;
210 vm_object_lock_swap(void)
216 vm_object_lock(vm_object_t obj)
218 lwkt_gettoken(&obj->token);
222 * Returns TRUE on sucesss
225 vm_object_lock_try(vm_object_t obj)
227 return(lwkt_trytoken(&obj->token));
231 vm_object_lock_shared(vm_object_t obj)
233 lwkt_gettoken_shared(&obj->token);
237 vm_object_unlock(vm_object_t obj)
239 lwkt_reltoken(&obj->token);
243 vm_object_upgrade(vm_object_t obj)
245 lwkt_reltoken(&obj->token);
246 lwkt_gettoken(&obj->token);
250 vm_object_downgrade(vm_object_t obj)
252 lwkt_reltoken(&obj->token);
253 lwkt_gettoken_shared(&obj->token);
257 vm_object_assert_held(vm_object_t obj)
259 ASSERT_LWKT_TOKEN_HELD(&obj->token);
265 globaldata_t gd = mycpu;
268 pg_color = (int)(intptr_t)gd->gd_curthread >> 10;
269 pg_color += gd->gd_quick_color;
270 gd->gd_quick_color += PQ_PRIME2;
276 VMOBJDEBUG(vm_object_hold)(vm_object_t obj VMOBJDBARGS)
278 KKASSERT(obj != NULL);
281 * Object must be held (object allocation is stable due to callers
282 * context, typically already holding the token on a parent object)
283 * prior to potentially blocking on the lock, otherwise the object
284 * can get ripped away from us.
286 refcount_acquire(&obj->hold_count);
289 #if defined(DEBUG_LOCKS)
290 debugvm_object_add(obj, file, line, 1);
295 VMOBJDEBUG(vm_object_hold_try)(vm_object_t obj VMOBJDBARGS)
297 KKASSERT(obj != NULL);
300 * Object must be held (object allocation is stable due to callers
301 * context, typically already holding the token on a parent object)
302 * prior to potentially blocking on the lock, otherwise the object
303 * can get ripped away from us.
305 refcount_acquire(&obj->hold_count);
306 if (vm_object_lock_try(obj) == 0) {
307 if (refcount_release(&obj->hold_count)) {
308 if (obj->ref_count == 0 && (obj->flags & OBJ_DEAD))
309 kfree(obj, M_VM_OBJECT);
314 #if defined(DEBUG_LOCKS)
315 debugvm_object_add(obj, file, line, 1);
321 VMOBJDEBUG(vm_object_hold_shared)(vm_object_t obj VMOBJDBARGS)
323 KKASSERT(obj != NULL);
326 * Object must be held (object allocation is stable due to callers
327 * context, typically already holding the token on a parent object)
328 * prior to potentially blocking on the lock, otherwise the object
329 * can get ripped away from us.
331 refcount_acquire(&obj->hold_count);
332 vm_object_lock_shared(obj);
334 #if defined(DEBUG_LOCKS)
335 debugvm_object_add(obj, file, line, 1);
340 * Drop the token and hold_count on the object.
342 * WARNING! Token might be shared.
345 VMOBJDEBUG(vm_object_drop)(vm_object_t obj VMOBJDBARGS)
351 * No new holders should be possible once we drop hold_count 1->0 as
352 * there is no longer any way to reference the object.
354 KKASSERT(obj->hold_count > 0);
355 if (refcount_release(&obj->hold_count)) {
356 #if defined(DEBUG_LOCKS)
357 debugvm_object_add(obj, file, line, -1);
360 if (obj->ref_count == 0 && (obj->flags & OBJ_DEAD)) {
361 vm_object_unlock(obj);
362 kfree(obj, M_VM_OBJECT);
364 vm_object_unlock(obj);
367 #if defined(DEBUG_LOCKS)
368 debugvm_object_add(obj, file, line, -1);
370 vm_object_unlock(obj);
375 * Initialize a freshly allocated object, returning a held object.
377 * Used only by vm_object_allocate(), zinitna() and vm_object_init().
382 _vm_object_allocate(objtype_t type, vm_pindex_t size, vm_object_t object,
385 struct vm_object_hash *hash;
387 RB_INIT(&object->rb_memq);
388 lwkt_token_init(&object->token, ident);
390 TAILQ_INIT(&object->backing_list);
391 lockinit(&object->backing_lk, "baclk", 0, 0);
395 object->ref_count = 1;
396 object->memattr = VM_MEMATTR_DEFAULT;
397 object->hold_count = 0;
399 if ((object->type == OBJT_DEFAULT) || (object->type == OBJT_SWAP))
400 vm_object_set_flag(object, OBJ_ONEMAPPING);
401 object->paging_in_progress = 0;
402 object->resident_page_count = 0;
403 /* cpu localization twist */
404 object->pg_color = vm_quickcolor();
405 object->handle = NULL;
407 atomic_add_int(&object->generation, 1);
408 object->swblock_count = 0;
409 RB_INIT(&object->swblock_root);
410 vm_object_lock_init(object);
411 pmap_object_init(object);
413 vm_object_hold(object);
415 hash = vmobj_hash(object);
416 lwkt_gettoken(&hash->token);
417 TAILQ_INSERT_TAIL(&hash->list, object, object_entry);
418 lwkt_reltoken(&hash->token);
422 * Initialize a VM object.
425 vm_object_init(vm_object_t object, vm_pindex_t size)
427 _vm_object_allocate(OBJT_DEFAULT, size, object, "vmobj");
428 vm_object_drop(object);
432 * Initialize the VM objects module.
434 * Called from the low level boot code only. Note that this occurs before
435 * kmalloc is initialized so we cannot allocate any VM objects.
438 vm_object_init1(void)
442 for (i = 0; i < VMOBJ_HSIZE; ++i) {
443 TAILQ_INIT(&vm_object_hash[i].list);
444 lwkt_token_init(&vm_object_hash[i].token, "vmobjlst");
447 _vm_object_allocate(OBJT_DEFAULT, OFF_TO_IDX(KvaEnd),
448 &kernel_object, "kobj");
449 vm_object_drop(&kernel_object);
453 vm_object_init2(void)
455 kmalloc_set_unlimited(M_VM_OBJECT);
459 * Allocate and return a new object of the specified type and size.
464 vm_object_allocate(objtype_t type, vm_pindex_t size)
468 obj = kmalloc(sizeof(*obj), M_VM_OBJECT, M_INTWAIT|M_ZERO);
469 _vm_object_allocate(type, size, obj, "vmobj");
476 * This version returns a held object, allowing further atomic initialization
480 vm_object_allocate_hold(objtype_t type, vm_pindex_t size)
484 obj = kmalloc(sizeof(*obj), M_VM_OBJECT, M_INTWAIT|M_ZERO);
485 _vm_object_allocate(type, size, obj, "vmobj");
491 * Add an additional reference to a vm_object. The object must already be
492 * held. The original non-lock version is no longer supported. The object
493 * must NOT be chain locked by anyone at the time the reference is added.
495 * The object must be held, but may be held shared if desired (hence why
496 * we use an atomic op).
499 VMOBJDEBUG(vm_object_reference_locked)(vm_object_t object VMOBJDBARGS)
501 KKASSERT(object != NULL);
502 ASSERT_LWKT_TOKEN_HELD(vm_object_token(object));
503 atomic_add_int(&object->ref_count, 1);
504 if (object->type == OBJT_VNODE) {
505 vref(object->handle);
506 /* XXX what if the vnode is being destroyed? */
508 #if defined(DEBUG_LOCKS)
509 debugvm_object_add(object, file, line, 1);
514 * This version is only allowed in situations where the caller
515 * already knows that the object is deterministically referenced
516 * (usually because its taken from a ref'd vnode, or during a map_entry
520 VMOBJDEBUG(vm_object_reference_quick)(vm_object_t object VMOBJDBARGS)
522 KKASSERT(object->type == OBJT_VNODE || object->ref_count > 0);
523 atomic_add_int(&object->ref_count, 1);
524 if (object->type == OBJT_VNODE)
525 vref(object->handle);
526 #if defined(DEBUG_LOCKS)
527 debugvm_object_add(object, file, line, 1);
532 * Dereference an object and its underlying vnode. The object may be
533 * held shared. On return the object will remain held.
535 * This function may return a vnode in *vpp which the caller must release
536 * after the caller drops its own lock. If vpp is NULL, we assume that
537 * the caller was holding an exclusive lock on the object and we vrele()
541 VMOBJDEBUG(vm_object_vndeallocate)(vm_object_t object, struct vnode **vpp
544 struct vnode *vp = (struct vnode *) object->handle;
547 KASSERT(object->type == OBJT_VNODE,
548 ("vm_object_vndeallocate: not a vnode object"));
549 KASSERT(vp != NULL, ("vm_object_vndeallocate: missing vp"));
550 ASSERT_LWKT_TOKEN_HELD(vm_object_token(object));
552 if (object->ref_count == 0) {
553 vprint("vm_object_vndeallocate", vp);
554 panic("vm_object_vndeallocate: bad object reference count");
557 count = object->ref_count;
561 vm_object_upgrade(object);
562 if (atomic_fcmpset_int(&object->ref_count, &count, 0)) {
563 vclrflags(vp, VTEXT);
567 if (atomic_fcmpset_int(&object->ref_count,
568 &count, count - 1)) {
575 #if defined(DEBUG_LOCKS)
576 debugvm_object_add(object, file, line, -1);
580 * vrele or return the vp to vrele. We can only safely vrele(vp)
581 * if the object was locked exclusively. But there are two races
584 * We had to upgrade the object above to safely clear VTEXT
585 * but the alternative path where the shared lock is retained
586 * can STILL race to 0 in other paths and cause our own vrele()
587 * to terminate the vnode. We can't allow that if the VM object
588 * is still locked shared.
597 * Release a reference to the specified object, gained either through a
598 * vm_object_allocate or a vm_object_reference call. When all references
599 * are gone, storage associated with this object may be relinquished.
601 * The caller does not have to hold the object locked but must have control
602 * over the reference in question in order to guarantee that the object
603 * does not get ripped out from under us.
605 * XXX Currently all deallocations require an exclusive lock.
608 VMOBJDEBUG(vm_object_deallocate)(vm_object_t object VMOBJDBARGS)
616 count = object->ref_count;
620 * If decrementing the count enters into special handling
621 * territory (0, 1, or 2) we have to do it the hard way.
622 * Fortunate though, objects with only a few refs like this
623 * are not likely to be heavily contended anyway.
625 * For vnode objects we only care about 1->0 transitions.
627 if (count <= 3 || (object->type == OBJT_VNODE && count <= 1)) {
628 #if defined(DEBUG_LOCKS)
629 debugvm_object_add(object, file, line, 0);
631 vm_object_hold(object);
632 vm_object_deallocate_locked(object);
633 vm_object_drop(object);
638 * Try to decrement ref_count without acquiring a hold on
639 * the object. This is particularly important for the exec*()
640 * and exit*() code paths because the program binary may
641 * have a great deal of sharing and an exclusive lock will
642 * crowbar performance in those circumstances.
644 if (object->type == OBJT_VNODE) {
645 vp = (struct vnode *)object->handle;
646 if (atomic_fcmpset_int(&object->ref_count,
647 &count, count - 1)) {
648 #if defined(DEBUG_LOCKS)
649 debugvm_object_add(object, file, line, -1);
657 if (atomic_fcmpset_int(&object->ref_count,
658 &count, count - 1)) {
659 #if defined(DEBUG_LOCKS)
660 debugvm_object_add(object, file, line, -1);
672 VMOBJDEBUG(vm_object_deallocate_locked)(vm_object_t object VMOBJDBARGS)
681 * vnode case, caller either locked the object exclusively
682 * or this is a recursion with must_drop != 0 and the vnode
683 * object will be locked shared.
685 * If locked shared we have to drop the object before we can
686 * call vrele() or risk a shared/exclusive livelock.
688 if (object->type == OBJT_VNODE) {
689 ASSERT_LWKT_TOKEN_HELD(&object->token);
690 vm_object_vndeallocate(object, NULL);
693 ASSERT_LWKT_TOKEN_HELD_EXCL(&object->token);
696 * Normal case (object is locked exclusively)
698 if (object->ref_count == 0) {
699 panic("vm_object_deallocate: object deallocated "
700 "too many times: %d", object->type);
702 if (object->ref_count > 2) {
703 atomic_add_int(&object->ref_count, -1);
704 #if defined(DEBUG_LOCKS)
705 debugvm_object_add(object, file, line, -1);
711 * Drop the ref and handle termination on the 1->0 transition.
712 * We may have blocked above so we have to recheck.
714 KKASSERT(object->ref_count != 0);
715 if (object->ref_count >= 2) {
716 atomic_add_int(&object->ref_count, -1);
717 #if defined(DEBUG_LOCKS)
718 debugvm_object_add(object, file, line, -1);
723 atomic_add_int(&object->ref_count, -1);
724 if ((object->flags & OBJ_DEAD) == 0)
725 vm_object_terminate(object);
729 * Destroy the specified object, freeing up related resources.
731 * The object must have zero references.
733 * The object must held. The caller is responsible for dropping the object
734 * after terminate returns. Terminate does NOT drop the object.
736 static int vm_object_terminate_callback(vm_page_t p, void *data);
739 vm_object_terminate(vm_object_t object)
741 struct rb_vm_page_scan_info info;
742 struct vm_object_hash *hash;
745 * Make sure no one uses us. Once we set OBJ_DEAD we should be
746 * able to safely block.
748 ASSERT_LWKT_TOKEN_HELD(vm_object_token(object));
749 KKASSERT((object->flags & OBJ_DEAD) == 0);
750 vm_object_set_flag(object, OBJ_DEAD);
753 * Wait for the pageout daemon to be done with the object
755 vm_object_pip_wait(object, "objtrm1");
757 KASSERT(!object->paging_in_progress,
758 ("vm_object_terminate: pageout in progress"));
761 * Clean and free the pages, as appropriate. All references to the
762 * object are gone, so we don't need to lock it.
764 if (object->type == OBJT_VNODE) {
768 * Clean pages and flush buffers.
770 * NOTE! TMPFS buffer flushes do not typically flush the
771 * actual page to swap as this would be highly
772 * inefficient, and normal filesystems usually wrap
773 * page flushes with buffer cache buffers.
775 * To deal with this we have to call vinvalbuf() both
776 * before and after the vm_object_page_clean().
778 vp = (struct vnode *) object->handle;
779 vinvalbuf(vp, V_SAVE, 0, 0);
780 vm_object_page_clean(object, 0, 0, OBJPC_SYNC);
781 vinvalbuf(vp, V_SAVE, 0, 0);
785 * Wait for any I/O to complete, after which there had better not
786 * be any references left on the object.
788 vm_object_pip_wait(object, "objtrm2");
790 if (object->ref_count != 0) {
791 panic("vm_object_terminate: object with references, "
792 "ref_count=%d", object->ref_count);
796 * Cleanup any shared pmaps associated with this object.
798 pmap_object_free(object);
801 * Now free any remaining pages. For internal objects, this also
802 * removes them from paging queues. Don't free wired pages, just
803 * remove them from the object.
806 info.object = object;
809 vm_page_rb_tree_RB_SCAN(&object->rb_memq, NULL,
810 vm_object_terminate_callback, &info);
811 } while (info.error);
814 * Let the pager know object is dead.
816 vm_pager_deallocate(object);
819 * Wait for the object hold count to hit 1, clean out pages as
820 * we go. vmobj_token interlocks any race conditions that might
821 * pick the object up from the vm_object_list after we have cleared
825 if (RB_ROOT(&object->rb_memq) == NULL)
827 kprintf("vm_object_terminate: Warning, object %p "
828 "still has %ld pages\n",
829 object, object->resident_page_count);
830 vm_page_rb_tree_RB_SCAN(&object->rb_memq, NULL,
831 vm_object_terminate_callback, &info);
835 * There had better not be any pages left
837 KKASSERT(object->resident_page_count == 0);
840 * Remove the object from the global object list.
842 hash = vmobj_hash(object);
843 lwkt_gettoken(&hash->token);
844 TAILQ_REMOVE(&hash->list, object, object_entry);
845 lwkt_reltoken(&hash->token);
847 if (object->ref_count != 0) {
848 panic("vm_object_terminate2: object with references, "
849 "ref_count=%d", object->ref_count);
853 * NOTE: The object hold_count is at least 1, so we cannot kfree()
854 * the object here. See vm_object_drop().
859 * The caller must hold the object.
861 * NOTE: In PMAP_ADVANCED mode it is possible for vm_page's to remain flagged
862 * PG_MAPPED or PG_MAPPED|PG_WRITEABLE, even after pmap_mapped_sync()
863 * is called, due to normal pmap operations. This is because only
864 * global pmap operations on the vm_page can clear the bits and not
865 * just local operations on individual pmaps.
867 * Most interactions that necessitate the clearing of these bits
868 * proactively call vm_page_protect(), and we must do so here as well.
871 vm_object_terminate_callback(vm_page_t p, void *data)
873 struct rb_vm_page_scan_info *info = data;
877 KKASSERT(object == info->object);
878 if (vm_page_busy_try(p, TRUE)) {
879 vm_page_sleep_busy(p, TRUE, "vmotrm");
883 if (object != p->object) {
884 /* XXX remove once we determine it can't happen */
885 kprintf("vm_object_terminate: Warning: Encountered "
886 "busied page %p on queue %d\n", p, p->queue);
889 } else if (p->wire_count == 0) {
891 * NOTE: p->dirty and PG_NEED_COMMIT are ignored.
893 if (pmap_mapped_sync(p) & (PG_MAPPED | PG_WRITEABLE))
894 vm_page_protect(p, VM_PROT_NONE);
896 mycpu->gd_cnt.v_pfree++;
898 if (p->queue != PQ_NONE) {
899 kprintf("vm_object_terminate: Warning: Encountered "
900 "wired page %p on queue %d\n", p, p->queue);
901 if (vm_object_debug > 0) {
906 if (pmap_mapped_sync(p) & (PG_MAPPED | PG_WRITEABLE))
907 vm_page_protect(p, VM_PROT_NONE);
913 * Must be at end to avoid SMP races, caller holds object token
915 if ((++info->count & 63) == 0)
921 * Clean all dirty pages in the specified range of object. Leaves page
922 * on whatever queue it is currently on. If NOSYNC is set then do not
923 * write out pages with PG_NOSYNC set (originally comes from MAP_NOSYNC),
924 * leaving the object dirty.
926 * When stuffing pages asynchronously, allow clustering. XXX we need a
927 * synchronous clustering mode implementation.
929 * Odd semantics: if start == end, we clean everything.
931 * The object must be locked? XXX
933 static int vm_object_page_clean_pass1(struct vm_page *p, void *data);
934 static int vm_object_page_clean_pass2(struct vm_page *p, void *data);
937 vm_object_page_clean(vm_object_t object, vm_pindex_t start, vm_pindex_t end,
940 struct rb_vm_page_scan_info info;
946 vm_object_hold(object);
947 if (object->type != OBJT_VNODE ||
948 (object->flags & OBJ_MIGHTBEDIRTY) == 0) {
949 vm_object_drop(object);
953 pagerflags = (flags & (OBJPC_SYNC | OBJPC_INVAL)) ?
954 VM_PAGER_PUT_SYNC : VM_PAGER_CLUSTER_OK;
955 pagerflags |= (flags & OBJPC_INVAL) ? VM_PAGER_PUT_INVAL : 0;
960 * Interlock other major object operations. This allows us to
961 * temporarily clear OBJ_WRITEABLE and OBJ_MIGHTBEDIRTY.
963 vm_object_set_flag(object, OBJ_CLEANING);
966 * Handle 'entire object' case
968 info.start_pindex = start;
970 info.end_pindex = object->size - 1;
972 info.end_pindex = end - 1;
974 wholescan = (start == 0 && info.end_pindex == object->size - 1);
976 info.pagerflags = pagerflags;
977 info.object = object;
980 * If cleaning the entire object do a pass to mark the pages read-only.
981 * If everything worked out ok, clear OBJ_WRITEABLE and
987 vm_page_rb_tree_RB_SCAN(&object->rb_memq, rb_vm_page_scancmp,
988 vm_object_page_clean_pass1, &info);
989 if (info.error == 0) {
990 vm_object_clear_flag(object,
991 OBJ_WRITEABLE|OBJ_MIGHTBEDIRTY);
992 if (object->type == OBJT_VNODE &&
993 (vp = (struct vnode *)object->handle) != NULL) {
995 * Use new-style interface to clear VISDIRTY
996 * because the vnode is not necessarily removed
997 * from the syncer list(s) as often as it was
998 * under the old interface, which can leave
999 * the vnode on the syncer list after reclaim.
1007 * Do a pass to clean all the dirty pages we find.
1012 generation = object->generation;
1013 vm_page_rb_tree_RB_SCAN(&object->rb_memq, rb_vm_page_scancmp,
1014 vm_object_page_clean_pass2, &info);
1015 } while (info.error || generation != object->generation);
1017 vm_object_clear_flag(object, OBJ_CLEANING);
1018 vm_object_drop(object);
1022 * The caller must hold the object.
1026 vm_object_page_clean_pass1(struct vm_page *p, void *data)
1028 struct rb_vm_page_scan_info *info = data;
1030 KKASSERT(p->object == info->object);
1032 vm_page_flag_set(p, PG_CLEANCHK);
1033 if ((info->limit & OBJPC_NOSYNC) && (p->flags & PG_NOSYNC)) {
1035 } else if (vm_page_busy_try(p, FALSE)) {
1038 KKASSERT(p->object == info->object);
1039 vm_page_protect(p, VM_PROT_READ);
1044 * Must be at end to avoid SMP races, caller holds object token
1046 if ((++info->count & 63) == 0)
1052 * The caller must hold the object
1056 vm_object_page_clean_pass2(struct vm_page *p, void *data)
1058 struct rb_vm_page_scan_info *info = data;
1061 KKASSERT(p->object == info->object);
1064 * Do not mess with pages that were inserted after we started
1065 * the cleaning pass.
1067 if ((p->flags & PG_CLEANCHK) == 0)
1070 generation = info->object->generation;
1072 if (vm_page_busy_try(p, TRUE)) {
1073 vm_page_sleep_busy(p, TRUE, "vpcwai");
1078 KKASSERT(p->object == info->object &&
1079 info->object->generation == generation);
1082 * Before wasting time traversing the pmaps, check for trivial
1083 * cases where the page cannot be dirty.
1085 if (p->valid == 0 || (p->queue - p->pc) == PQ_CACHE) {
1086 KKASSERT((p->dirty & p->valid) == 0 &&
1087 (p->flags & PG_NEED_COMMIT) == 0);
1093 * Check whether the page is dirty or not. The page has been set
1094 * to be read-only so the check will not race a user dirtying the
1097 vm_page_test_dirty(p);
1098 if ((p->dirty & p->valid) == 0 && (p->flags & PG_NEED_COMMIT) == 0) {
1099 vm_page_flag_clear(p, PG_CLEANCHK);
1105 * If we have been asked to skip nosync pages and this is a
1106 * nosync page, skip it. Note that the object flags were
1107 * not cleared in this case (because pass1 will have returned an
1108 * error), so we do not have to set them.
1110 if ((info->limit & OBJPC_NOSYNC) && (p->flags & PG_NOSYNC)) {
1111 vm_page_flag_clear(p, PG_CLEANCHK);
1117 * Flush as many pages as we can. PG_CLEANCHK will be cleared on
1118 * the pages that get successfully flushed. Set info->error if
1119 * we raced an object modification.
1121 vm_object_page_collect_flush(info->object, p, info->pagerflags);
1122 /* vm_wait_nominal(); this can deadlock the system in syncer/pageout */
1125 * Must be at end to avoid SMP races, caller holds object token
1128 if ((++info->count & 63) == 0)
1134 * Collect the specified page and nearby pages and flush them out.
1135 * The number of pages flushed is returned. The passed page is busied
1136 * by the caller and we are responsible for its disposition.
1138 * The caller must hold the object.
1141 vm_object_page_collect_flush(vm_object_t object, vm_page_t p, int pagerflags)
1149 vm_page_t ma[BLIST_MAX_ALLOC];
1151 ASSERT_LWKT_TOKEN_HELD(vm_object_token(object));
1154 page_base = pi % BLIST_MAX_ALLOC;
1162 tp = vm_page_lookup_busy_try(object, pi - page_base + ib,
1168 if ((pagerflags & VM_PAGER_IGNORE_CLEANCHK) == 0 &&
1169 (tp->flags & PG_CLEANCHK) == 0) {
1173 if ((tp->queue - tp->pc) == PQ_CACHE) {
1174 vm_page_flag_clear(tp, PG_CLEANCHK);
1178 vm_page_test_dirty(tp);
1179 if ((tp->dirty & tp->valid) == 0 &&
1180 (tp->flags & PG_NEED_COMMIT) == 0) {
1181 vm_page_flag_clear(tp, PG_CLEANCHK);
1190 while (is < BLIST_MAX_ALLOC &&
1191 pi - page_base + is < object->size) {
1194 tp = vm_page_lookup_busy_try(object, pi - page_base + is,
1200 if ((pagerflags & VM_PAGER_IGNORE_CLEANCHK) == 0 &&
1201 (tp->flags & PG_CLEANCHK) == 0) {
1205 if ((tp->queue - tp->pc) == PQ_CACHE) {
1206 vm_page_flag_clear(tp, PG_CLEANCHK);
1210 vm_page_test_dirty(tp);
1211 if ((tp->dirty & tp->valid) == 0 &&
1212 (tp->flags & PG_NEED_COMMIT) == 0) {
1213 vm_page_flag_clear(tp, PG_CLEANCHK);
1222 * All pages in the ma[] array are busied now
1224 for (i = ib; i < is; ++i) {
1225 vm_page_flag_clear(ma[i], PG_CLEANCHK);
1226 vm_page_hold(ma[i]); /* XXX need this any more? */
1228 vm_pageout_flush(&ma[ib], is - ib, pagerflags);
1229 for (i = ib; i < is; ++i) /* XXX need this any more? */
1230 vm_page_unhold(ma[i]);
1234 * Implements the madvise function at the object/page level.
1236 * MADV_WILLNEED (any object)
1238 * Activate the specified pages if they are resident.
1240 * MADV_DONTNEED (any object)
1242 * Deactivate the specified pages if they are resident.
1244 * MADV_FREE (OBJT_DEFAULT/OBJT_SWAP objects, OBJ_ONEMAPPING only)
1246 * Deactivate and clean the specified pages if they are
1247 * resident. This permits the process to reuse the pages
1248 * without faulting or the kernel to reclaim the pages
1254 vm_object_madvise(vm_object_t object, vm_pindex_t pindex,
1255 vm_pindex_t count, int advise)
1264 end = pindex + count;
1266 vm_object_hold(object);
1269 * Locate and adjust resident pages. This only applies to the
1270 * primary object in the mapping.
1272 for (; pindex < end; pindex += 1) {
1275 * MADV_FREE only operates on OBJT_DEFAULT or OBJT_SWAP pages
1276 * and those pages must be OBJ_ONEMAPPING.
1278 if (advise == MADV_FREE) {
1279 if ((object->type != OBJT_DEFAULT &&
1280 object->type != OBJT_SWAP) ||
1281 (object->flags & OBJ_ONEMAPPING) == 0) {
1286 m = vm_page_lookup_busy_try(object, pindex, TRUE, &error);
1289 vm_page_sleep_busy(m, TRUE, "madvpo");
1294 * There may be swap even if there is no backing page
1296 if (advise == MADV_FREE && object->type == OBJT_SWAP)
1297 swap_pager_freespace(object, pindex, 1);
1302 * If the page is not in a normal active state, we skip it.
1303 * If the page is not managed there are no page queues to
1304 * mess with. Things can break if we mess with pages in
1305 * any of the below states.
1307 if (m->wire_count ||
1308 (m->flags & (PG_FICTITIOUS | PG_UNQUEUED |
1310 m->valid != VM_PAGE_BITS_ALL
1317 * Theoretically once a page is known not to be busy, an
1318 * interrupt cannot come along and rip it out from under us.
1320 if (advise == MADV_WILLNEED) {
1321 vm_page_activate(m);
1322 } else if (advise == MADV_DONTNEED) {
1323 vm_page_dontneed(m);
1324 } else if (advise == MADV_FREE) {
1326 * Mark the page clean. This will allow the page
1327 * to be freed up by the system. However, such pages
1328 * are often reused quickly by malloc()/free()
1329 * so we do not do anything that would cause
1330 * a page fault if we can help it.
1332 * Specifically, we do not try to actually free
1333 * the page now nor do we try to put it in the
1334 * cache (which would cause a page fault on reuse).
1336 * But we do make the page is freeable as we
1337 * can without actually taking the step of unmapping
1340 pmap_clear_modify(m);
1343 vm_page_dontneed(m);
1344 if (object->type == OBJT_SWAP)
1345 swap_pager_freespace(object, pindex, 1);
1349 vm_object_drop(object);
1353 * Removes all physical pages in the specified object range from the
1354 * object's list of pages.
1358 static int vm_object_page_remove_callback(vm_page_t p, void *data);
1361 vm_object_page_remove(vm_object_t object, vm_pindex_t start, vm_pindex_t end,
1362 boolean_t clean_only)
1364 struct rb_vm_page_scan_info info;
1368 * Degenerate cases and assertions
1370 vm_object_hold(object);
1371 if (object == NULL ||
1372 (object->resident_page_count == 0 && object->swblock_count == 0)) {
1373 vm_object_drop(object);
1376 KASSERT(object->type != OBJT_PHYS,
1377 ("attempt to remove pages from a physical object"));
1380 * Indicate that paging is occuring on the object
1382 vm_object_pip_add(object, 1);
1385 * Figure out the actual removal range and whether we are removing
1386 * the entire contents of the object or not. If removing the entire
1387 * contents, be sure to get all pages, even those that might be
1388 * beyond the end of the object.
1390 info.object = object;
1391 info.start_pindex = start;
1393 info.end_pindex = (vm_pindex_t)-1;
1395 info.end_pindex = end - 1;
1396 info.limit = clean_only;
1398 all = (start == 0 && info.end_pindex >= object->size - 1);
1401 * Loop until we are sure we have gotten them all.
1405 vm_page_rb_tree_RB_SCAN(&object->rb_memq, rb_vm_page_scancmp,
1406 vm_object_page_remove_callback, &info);
1407 } while (info.error);
1410 * Remove any related swap if throwing away pages, or for
1411 * non-swap objects (the swap is a clean copy in that case).
1413 if (object->type != OBJT_SWAP || clean_only == FALSE) {
1415 swap_pager_freespace_all(object);
1417 swap_pager_freespace(object, info.start_pindex,
1418 info.end_pindex - info.start_pindex + 1);
1424 vm_object_pip_wakeup(object);
1425 vm_object_drop(object);
1429 * The caller must hold the object.
1431 * NOTE: User yields are allowed when removing more than one page, but not
1432 * allowed if only removing one page (the path for single page removals
1433 * might hold a spinlock).
1436 vm_object_page_remove_callback(vm_page_t p, void *data)
1438 struct rb_vm_page_scan_info *info = data;
1440 if (info->object != p->object ||
1441 p->pindex < info->start_pindex ||
1442 p->pindex > info->end_pindex) {
1443 kprintf("vm_object_page_remove_callbackA: obj/pg race %p/%p\n",
1447 if (vm_page_busy_try(p, TRUE)) {
1448 vm_page_sleep_busy(p, TRUE, "vmopar");
1452 if (info->object != p->object) {
1453 /* this should never happen */
1454 kprintf("vm_object_page_remove_callbackB: obj/pg race %p/%p\n",
1461 * Wired pages cannot be destroyed, but they can be invalidated
1462 * and we do so if clean_only (limit) is not set.
1464 * WARNING! The page may be wired due to being part of a buffer
1465 * cache buffer, and the buffer might be marked B_CACHE.
1466 * This is fine as part of a truncation but VFSs must be
1467 * sure to fix the buffer up when re-extending the file.
1469 * NOTE! PG_NEED_COMMIT is ignored.
1471 if (p->wire_count != 0) {
1472 vm_page_protect(p, VM_PROT_NONE);
1473 if (info->limit == 0)
1480 * limit is our clean_only flag. If set and the page is dirty or
1481 * requires a commit, do not free it. If set and the page is being
1482 * held by someone, do not free it.
1484 if (info->limit && p->valid) {
1485 vm_page_test_dirty(p);
1486 if ((p->valid & p->dirty) || (p->flags & PG_NEED_COMMIT)) {
1493 * Destroy the page. But we have to re-test whether its dirty after
1494 * removing it from its pmaps.
1496 vm_page_protect(p, VM_PROT_NONE);
1497 if (info->limit && p->valid) {
1498 vm_page_test_dirty(p);
1499 if ((p->valid & p->dirty) || (p->flags & PG_NEED_COMMIT)) {
1507 * Must be at end to avoid SMP races, caller holds object token
1510 if ((++info->count & 63) == 0)
1517 * Try to extend prev_object into an adjoining region of virtual
1518 * memory, return TRUE on success.
1520 * The caller does not need to hold (prev_object) but must have a stable
1521 * pointer to it (typically by holding the vm_map locked).
1523 * This function only works for anonymous memory objects which either
1524 * have (a) one reference or (b) we are extending the object's size.
1525 * Otherwise the related VM pages we want to use for the object might
1526 * be in use by another mapping.
1529 vm_object_coalesce(vm_object_t prev_object, vm_pindex_t prev_pindex,
1530 vm_size_t prev_size, vm_size_t next_size)
1532 vm_pindex_t next_pindex;
1534 if (prev_object == NULL)
1537 vm_object_hold(prev_object);
1539 if (prev_object->type != OBJT_DEFAULT &&
1540 prev_object->type != OBJT_SWAP) {
1541 vm_object_drop(prev_object);
1546 /* caller now checks this */
1548 * Try to collapse the object first
1550 vm_object_collapse(prev_object, NULL);
1554 /* caller now checks this */
1556 * We can't coalesce if we shadow another object (figuring out the
1557 * relationships become too complex).
1559 if (prev_object->backing_object != NULL) {
1560 vm_object_chain_release(prev_object);
1561 vm_object_drop(prev_object);
1566 prev_size >>= PAGE_SHIFT;
1567 next_size >>= PAGE_SHIFT;
1568 next_pindex = prev_pindex + prev_size;
1571 * We can't if the object has more than one ref count unless we
1572 * are extending it into newly minted space.
1574 if (prev_object->ref_count > 1 &&
1575 prev_object->size != next_pindex) {
1576 vm_object_drop(prev_object);
1581 * Remove any pages that may still be in the object from a previous
1584 if (next_pindex < prev_object->size) {
1585 vm_object_page_remove(prev_object,
1587 next_pindex + next_size, FALSE);
1588 if (prev_object->type == OBJT_SWAP)
1589 swap_pager_freespace(prev_object,
1590 next_pindex, next_size);
1594 * Extend the object if necessary.
1596 if (next_pindex + next_size > prev_object->size)
1597 prev_object->size = next_pindex + next_size;
1598 vm_object_drop(prev_object);
1604 * Make the object writable and flag is being possibly dirty.
1606 * The object might not be held (or might be held but held shared),
1607 * the related vnode is probably not held either. Object and vnode are
1608 * stable by virtue of the vm_page busied by the caller preventing
1611 * If the related mount is flagged MNTK_THR_SYNC we need to call
1612 * vsetobjdirty(). Filesystems using this option usually shortcut
1613 * synchronization by only scanning the syncer list.
1616 vm_object_set_writeable_dirty(vm_object_t object)
1620 /*vm_object_assert_held(object);*/
1622 * Avoid contention in vm fault path by checking the state before
1623 * issuing an atomic op on it.
1625 if ((object->flags & (OBJ_WRITEABLE|OBJ_MIGHTBEDIRTY)) !=
1626 (OBJ_WRITEABLE|OBJ_MIGHTBEDIRTY)) {
1627 vm_object_set_flag(object, OBJ_WRITEABLE|OBJ_MIGHTBEDIRTY);
1629 if (object->type == OBJT_VNODE &&
1630 (vp = (struct vnode *)object->handle) != NULL) {
1631 if ((vp->v_flag & VOBJDIRTY) == 0) {
1633 (vp->v_mount->mnt_kern_flag & MNTK_THR_SYNC)) {
1635 * New style THR_SYNC places vnodes on the
1636 * syncer list more deterministically.
1641 * Old style scan would not necessarily place
1642 * a vnode on the syncer list when possibly
1643 * modified via mmap.
1645 vsetflags(vp, VOBJDIRTY);
1651 #include "opt_ddb.h"
1653 #include <sys/cons.h>
1655 #include <ddb/ddb.h>
1657 static int _vm_object_in_map (vm_map_t map, vm_object_t object,
1658 vm_map_entry_t entry);
1659 static int vm_object_in_map (vm_object_t object);
1662 * The caller must hold the object.
1665 _vm_object_in_map(vm_map_t map, vm_object_t object, vm_map_entry_t entry)
1667 vm_map_backing_t ba;
1669 vm_map_entry_t tmpe;
1674 if (entry == NULL) {
1675 tmpe = RB_MIN(vm_map_rb_tree, &map->rb_root);
1676 entcount = map->nentries;
1677 while (entcount-- && tmpe) {
1678 if( _vm_object_in_map(map, object, tmpe)) {
1681 tmpe = vm_map_rb_tree_RB_NEXT(tmpe);
1685 switch(entry->maptype) {
1686 case VM_MAPTYPE_SUBMAP:
1687 tmpm = entry->ba.sub_map;
1688 tmpe = RB_MIN(vm_map_rb_tree, &tmpm->rb_root);
1689 entcount = tmpm->nentries;
1690 while (entcount-- && tmpe) {
1691 if( _vm_object_in_map(tmpm, object, tmpe)) {
1694 tmpe = vm_map_rb_tree_RB_NEXT(tmpe);
1697 case VM_MAPTYPE_NORMAL:
1698 case VM_MAPTYPE_VPAGETABLE:
1701 if (ba->object == object)
1703 ba = ba->backing_ba;
1712 static int vm_object_in_map_callback(struct proc *p, void *data);
1714 struct vm_object_in_map_info {
1723 vm_object_in_map(vm_object_t object)
1725 struct vm_object_in_map_info info;
1728 info.object = object;
1730 allproc_scan(vm_object_in_map_callback, &info, 0);
1733 if( _vm_object_in_map(&kernel_map, object, 0))
1735 if( _vm_object_in_map(&pager_map, object, 0))
1737 if( _vm_object_in_map(&buffer_map, object, 0))
1746 vm_object_in_map_callback(struct proc *p, void *data)
1748 struct vm_object_in_map_info *info = data;
1751 if (_vm_object_in_map(&p->p_vmspace->vm_map, info->object, 0)) {
1759 DB_SHOW_COMMAND(vmochk, vm_object_check)
1761 struct vm_object_hash *hash;
1766 * make sure that internal objs are in a map somewhere
1767 * and none have zero ref counts.
1769 for (n = 0; n < VMOBJ_HSIZE; ++n) {
1770 hash = &vm_object_hash[n];
1771 for (object = TAILQ_FIRST(&hash->list);
1773 object = TAILQ_NEXT(object, object_entry)) {
1774 if (object->type == OBJT_MARKER)
1776 if (object->handle != NULL ||
1777 (object->type != OBJT_DEFAULT &&
1778 object->type != OBJT_SWAP)) {
1781 if (object->ref_count == 0) {
1782 db_printf("vmochk: internal obj has "
1783 "zero ref count: %ld\n",
1784 (long)object->size);
1786 if (vm_object_in_map(object))
1788 db_printf("vmochk: internal obj is not in a map: "
1789 "ref: %d, size: %lu: 0x%lx\n",
1790 object->ref_count, (u_long)object->size,
1791 (u_long)object->size);
1799 DB_SHOW_COMMAND(object, vm_object_print_static)
1801 /* XXX convert args. */
1802 vm_object_t object = (vm_object_t)addr;
1803 boolean_t full = have_addr;
1807 /* XXX count is an (unused) arg. Avoid shadowing it. */
1808 #define count was_count
1816 "Object %p: type=%d, size=0x%lx, res=%ld, ref=%d, flags=0x%x\n",
1817 object, (int)object->type, (u_long)object->size,
1818 object->resident_page_count, object->ref_count, object->flags);
1820 * XXX no %qd in kernel. Truncate object->backing_object_offset.
1829 RB_FOREACH(p, vm_page_rb_tree, &object->rb_memq) {
1831 db_iprintf("memory:=");
1832 else if (count == 6) {
1840 db_printf("(off=0x%lx,page=0x%lx)",
1841 (u_long) p->pindex, (u_long) VM_PAGE_TO_PHYS(p));
1852 * XXX need this non-static entry for calling from vm_map_print.
1857 vm_object_print(/* db_expr_t */ long addr,
1858 boolean_t have_addr,
1859 /* db_expr_t */ long count,
1862 vm_object_print_static(addr, have_addr, count, modif);
1868 DB_SHOW_COMMAND(vmopag, vm_object_print_pages)
1870 struct vm_object_hash *hash;
1876 for (n = 0; n < VMOBJ_HSIZE; ++n) {
1877 hash = &vm_object_hash[n];
1878 for (object = TAILQ_FIRST(&hash->list);
1880 object = TAILQ_NEXT(object, object_entry)) {
1881 vm_pindex_t idx, fidx;
1883 vm_paddr_t pa = -1, padiff;
1887 if (object->type == OBJT_MARKER)
1889 db_printf("new object: %p\n", (void *)object);
1899 osize = object->size;
1902 for (idx = 0; idx < osize; idx++) {
1903 m = vm_page_lookup(object, idx);
1906 db_printf(" index(%ld)run(%d)pa(0x%lx)\n",
1907 (long)fidx, rcount, (long)pa);
1921 (VM_PAGE_TO_PHYS(m) == pa + rcount * PAGE_SIZE)) {
1926 padiff = pa + rcount * PAGE_SIZE - VM_PAGE_TO_PHYS(m);
1927 padiff >>= PAGE_SHIFT;
1928 padiff &= PQ_L2_MASK;
1930 pa = VM_PAGE_TO_PHYS(m) - rcount * PAGE_SIZE;
1934 db_printf(" index(%ld)run(%d)pa(0x%lx)",
1935 (long)fidx, rcount, (long)pa);
1936 db_printf("pd(%ld)\n", (long)padiff);
1946 pa = VM_PAGE_TO_PHYS(m);
1950 db_printf(" index(%ld)run(%d)pa(0x%lx)\n",
1951 (long)fidx, rcount, (long)pa);