2 * Copyright (c) 1991, 1993, 2013
3 * The Regents of the University of California. All rights reserved.
5 * This code is derived from software contributed to Berkeley by
6 * The Mach Operating System project at Carnegie-Mellon University.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the name of the University nor the names of its contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * from: @(#)vm_object.c 8.5 (Berkeley) 3/22/94
35 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
36 * All rights reserved.
38 * Authors: Avadis Tevanian, Jr., Michael Wayne Young
40 * Permission to use, copy, modify and distribute this software and
41 * its documentation is hereby granted, provided that both the copyright
42 * notice and this permission notice appear in all copies of the
43 * software, derivative works or modified versions, and any portions
44 * thereof, and that both notices appear in supporting documentation.
46 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
47 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
48 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
50 * Carnegie Mellon requests users of this software to return to
52 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
53 * School of Computer Science
54 * Carnegie Mellon University
55 * Pittsburgh PA 15213-3890
57 * any improvements or extensions that they make and grant Carnegie the
58 * rights to redistribute these changes.
60 * $FreeBSD: src/sys/vm/vm_object.c,v 1.171.2.8 2003/05/26 19:17:56 alc Exp $
64 * Virtual memory object module.
67 #include <sys/param.h>
68 #include <sys/systm.h>
69 #include <sys/proc.h> /* for curproc, pageproc */
70 #include <sys/thread.h>
71 #include <sys/vnode.h>
72 #include <sys/vmmeter.h>
74 #include <sys/mount.h>
75 #include <sys/kernel.h>
76 #include <sys/sysctl.h>
77 #include <sys/refcount.h>
80 #include <vm/vm_param.h>
82 #include <vm/vm_map.h>
83 #include <vm/vm_object.h>
84 #include <vm/vm_page.h>
85 #include <vm/vm_pageout.h>
86 #include <vm/vm_pager.h>
87 #include <vm/swap_pager.h>
88 #include <vm/vm_kern.h>
89 #include <vm/vm_extern.h>
90 #include <vm/vm_zone.h>
92 #include <vm/vm_page2.h>
94 #include <machine/specialreg.h>
96 #define EASY_SCAN_FACTOR 8
98 static void vm_object_page_collect_flush(vm_object_t object, vm_page_t p,
100 static void vm_object_lock_init(vm_object_t);
103 * Virtual memory objects maintain the actual data
104 * associated with allocated virtual memory. A given
105 * page of memory exists within exactly one object.
107 * An object is only deallocated when all "references"
108 * are given up. Only one "reference" to a given
109 * region of an object should be writeable.
111 * Associated with each object is a list of all resident
112 * memory pages belonging to that object; this list is
113 * maintained by the "vm_page" module, and locked by the object's
116 * Each object also records a "pager" routine which is
117 * used to retrieve (and store) pages to the proper backing
118 * storage. In addition, objects may be backed by other
119 * objects from which they were virtual-copied.
121 * The only items within the object structure which are
122 * modified after time of creation are:
123 * reference count locked by object's lock
124 * pager routine locked by object's lock
128 struct vm_object kernel_object;
130 struct vm_object_hash vm_object_hash[VMOBJ_HSIZE];
132 MALLOC_DEFINE(M_VM_OBJECT, "vm_object", "vm_object structures");
134 #define VMOBJ_HASH_PRIME1 66555444443333333ULL
135 #define VMOBJ_HASH_PRIME2 989042931893ULL
138 SYSCTL_INT(_vm, OID_AUTO, object_debug, CTLFLAG_RW, &vm_object_debug, 0, "");
141 struct vm_object_hash *
142 vmobj_hash(vm_object_t obj)
147 hash1 = (uintptr_t)obj + ((uintptr_t)obj >> 18);
148 hash1 %= VMOBJ_HASH_PRIME1;
149 hash2 = ((uintptr_t)obj >> 8) + ((uintptr_t)obj >> 24);
150 hash2 %= VMOBJ_HASH_PRIME2;
151 return (&vm_object_hash[(hash1 ^ hash2) & VMOBJ_HMASK]);
154 #if defined(DEBUG_LOCKS)
156 #define vm_object_vndeallocate(obj, vpp) \
157 debugvm_object_vndeallocate(obj, vpp, __FILE__, __LINE__)
160 * Debug helper to track hold/drop/ref/deallocate calls.
163 debugvm_object_add(vm_object_t obj, char *file, int line, int addrem)
167 i = atomic_fetchadd_int(&obj->debug_index, 1);
168 i = i & (VMOBJ_DEBUG_ARRAY_SIZE - 1);
169 ksnprintf(obj->debug_hold_thrs[i],
170 sizeof(obj->debug_hold_thrs[i]),
172 (addrem == -1 ? '-' : (addrem == 1 ? '+' : '=')),
173 (curthread->td_proc ? curthread->td_proc->p_pid : -1),
176 obj->debug_hold_file[i] = file;
177 obj->debug_hold_line[i] = line;
179 /* Uncomment for debugging obj refs/derefs in reproducable cases */
180 if (strcmp(curthread->td_comm, "sshd") == 0) {
181 kprintf("%d %p refs=%d ar=%d file: %s/%d\n",
182 (curthread->td_proc ? curthread->td_proc->p_pid : -1),
183 obj, obj->ref_count, addrem, file, line);
191 * Misc low level routines
194 vm_object_lock_init(vm_object_t obj)
196 #if defined(DEBUG_LOCKS)
199 obj->debug_index = 0;
200 for (i = 0; i < VMOBJ_DEBUG_ARRAY_SIZE; i++) {
201 obj->debug_hold_thrs[i][0] = 0;
202 obj->debug_hold_file[i] = NULL;
203 obj->debug_hold_line[i] = 0;
209 vm_object_lock_swap(void)
215 vm_object_lock(vm_object_t obj)
217 lwkt_gettoken(&obj->token);
221 * Returns TRUE on sucesss
224 vm_object_lock_try(vm_object_t obj)
226 return(lwkt_trytoken(&obj->token));
230 vm_object_lock_shared(vm_object_t obj)
232 lwkt_gettoken_shared(&obj->token);
236 vm_object_unlock(vm_object_t obj)
238 lwkt_reltoken(&obj->token);
242 vm_object_upgrade(vm_object_t obj)
244 lwkt_reltoken(&obj->token);
245 lwkt_gettoken(&obj->token);
249 vm_object_downgrade(vm_object_t obj)
251 lwkt_reltoken(&obj->token);
252 lwkt_gettoken_shared(&obj->token);
256 vm_object_assert_held(vm_object_t obj)
258 ASSERT_LWKT_TOKEN_HELD(&obj->token);
264 globaldata_t gd = mycpu;
267 pg_color = (int)(intptr_t)gd->gd_curthread >> 10;
268 pg_color += gd->gd_quick_color;
269 gd->gd_quick_color += PQ_PRIME2;
275 VMOBJDEBUG(vm_object_hold)(vm_object_t obj VMOBJDBARGS)
277 KKASSERT(obj != NULL);
280 * Object must be held (object allocation is stable due to callers
281 * context, typically already holding the token on a parent object)
282 * prior to potentially blocking on the lock, otherwise the object
283 * can get ripped away from us.
285 refcount_acquire(&obj->hold_count);
288 #if defined(DEBUG_LOCKS)
289 debugvm_object_add(obj, file, line, 1);
294 VMOBJDEBUG(vm_object_hold_try)(vm_object_t obj VMOBJDBARGS)
296 KKASSERT(obj != NULL);
299 * Object must be held (object allocation is stable due to callers
300 * context, typically already holding the token on a parent object)
301 * prior to potentially blocking on the lock, otherwise the object
302 * can get ripped away from us.
304 refcount_acquire(&obj->hold_count);
305 if (vm_object_lock_try(obj) == 0) {
306 if (refcount_release(&obj->hold_count)) {
307 if (obj->ref_count == 0 && (obj->flags & OBJ_DEAD))
308 kfree(obj, M_VM_OBJECT);
313 #if defined(DEBUG_LOCKS)
314 debugvm_object_add(obj, file, line, 1);
320 VMOBJDEBUG(vm_object_hold_shared)(vm_object_t obj VMOBJDBARGS)
322 KKASSERT(obj != NULL);
325 * Object must be held (object allocation is stable due to callers
326 * context, typically already holding the token on a parent object)
327 * prior to potentially blocking on the lock, otherwise the object
328 * can get ripped away from us.
330 refcount_acquire(&obj->hold_count);
331 vm_object_lock_shared(obj);
333 #if defined(DEBUG_LOCKS)
334 debugvm_object_add(obj, file, line, 1);
339 * Drop the token and hold_count on the object.
341 * WARNING! Token might be shared.
344 VMOBJDEBUG(vm_object_drop)(vm_object_t obj VMOBJDBARGS)
350 * No new holders should be possible once we drop hold_count 1->0 as
351 * there is no longer any way to reference the object.
353 KKASSERT(obj->hold_count > 0);
354 if (refcount_release(&obj->hold_count)) {
355 #if defined(DEBUG_LOCKS)
356 debugvm_object_add(obj, file, line, -1);
359 if (obj->ref_count == 0 && (obj->flags & OBJ_DEAD)) {
360 vm_object_unlock(obj);
361 kfree(obj, M_VM_OBJECT);
363 vm_object_unlock(obj);
366 #if defined(DEBUG_LOCKS)
367 debugvm_object_add(obj, file, line, -1);
369 vm_object_unlock(obj);
374 * Initialize a freshly allocated object, returning a held object.
376 * Used only by vm_object_allocate(), zinitna() and vm_object_init().
381 _vm_object_allocate(objtype_t type, vm_pindex_t size, vm_object_t object)
383 struct vm_object_hash *hash;
385 RB_INIT(&object->rb_memq);
386 lwkt_token_init(&object->token, "vmobj");
390 object->ref_count = 1;
391 object->memattr = VM_MEMATTR_DEFAULT;
392 object->hold_count = 0;
394 if ((object->type == OBJT_DEFAULT) || (object->type == OBJT_SWAP))
395 vm_object_set_flag(object, OBJ_ONEMAPPING);
396 object->paging_in_progress = 0;
397 object->resident_page_count = 0;
398 /* cpu localization twist */
399 object->pg_color = vm_quickcolor();
400 object->handle = NULL;
402 atomic_add_int(&object->generation, 1);
403 object->swblock_count = 0;
404 RB_INIT(&object->swblock_root);
405 vm_object_lock_init(object);
406 pmap_object_init(object);
408 vm_object_hold(object);
410 hash = vmobj_hash(object);
411 lwkt_gettoken(&hash->token);
412 TAILQ_INSERT_TAIL(&hash->list, object, object_list);
413 lwkt_reltoken(&hash->token);
417 * Initialize a VM object.
420 vm_object_init(vm_object_t object, vm_pindex_t size)
422 _vm_object_allocate(OBJT_DEFAULT, size, object);
423 vm_object_drop(object);
427 * Initialize the VM objects module.
429 * Called from the low level boot code only. Note that this occurs before
430 * kmalloc is initialized so we cannot allocate any VM objects.
433 vm_object_init1(void)
437 for (i = 0; i < VMOBJ_HSIZE; ++i) {
438 TAILQ_INIT(&vm_object_hash[i].list);
439 lwkt_token_init(&vm_object_hash[i].token, "vmobjlst");
442 _vm_object_allocate(OBJT_DEFAULT, OFF_TO_IDX(KvaEnd),
444 vm_object_drop(&kernel_object);
448 vm_object_init2(void)
450 kmalloc_set_unlimited(M_VM_OBJECT);
454 * Allocate and return a new object of the specified type and size.
459 vm_object_allocate(objtype_t type, vm_pindex_t size)
463 obj = kmalloc(sizeof(*obj), M_VM_OBJECT, M_INTWAIT|M_ZERO);
464 _vm_object_allocate(type, size, obj);
471 * This version returns a held object, allowing further atomic initialization
475 vm_object_allocate_hold(objtype_t type, vm_pindex_t size)
479 obj = kmalloc(sizeof(*obj), M_VM_OBJECT, M_INTWAIT|M_ZERO);
480 _vm_object_allocate(type, size, obj);
486 * Add an additional reference to a vm_object. The object must already be
487 * held. The original non-lock version is no longer supported. The object
488 * must NOT be chain locked by anyone at the time the reference is added.
490 * The object must be held, but may be held shared if desired (hence why
491 * we use an atomic op).
494 VMOBJDEBUG(vm_object_reference_locked)(vm_object_t object VMOBJDBARGS)
496 KKASSERT(object != NULL);
497 ASSERT_LWKT_TOKEN_HELD(vm_object_token(object));
498 atomic_add_int(&object->ref_count, 1);
499 if (object->type == OBJT_VNODE) {
500 vref(object->handle);
501 /* XXX what if the vnode is being destroyed? */
503 #if defined(DEBUG_LOCKS)
504 debugvm_object_add(object, file, line, 1);
509 * This version is only allowed for vnode objects.
512 VMOBJDEBUG(vm_object_reference_quick)(vm_object_t object VMOBJDBARGS)
514 KKASSERT(object->type == OBJT_VNODE);
515 atomic_add_int(&object->ref_count, 1);
516 vref(object->handle);
517 #if defined(DEBUG_LOCKS)
518 debugvm_object_add(object, file, line, 1);
523 * Dereference an object and its underlying vnode. The object may be
524 * held shared. On return the object will remain held.
526 * This function may return a vnode in *vpp which the caller must release
527 * after the caller drops its own lock. If vpp is NULL, we assume that
528 * the caller was holding an exclusive lock on the object and we vrele()
532 VMOBJDEBUG(vm_object_vndeallocate)(vm_object_t object, struct vnode **vpp
535 struct vnode *vp = (struct vnode *) object->handle;
537 KASSERT(object->type == OBJT_VNODE,
538 ("vm_object_vndeallocate: not a vnode object"));
539 KASSERT(vp != NULL, ("vm_object_vndeallocate: missing vp"));
540 ASSERT_LWKT_TOKEN_HELD(vm_object_token(object));
542 if (object->ref_count == 0) {
543 vprint("vm_object_vndeallocate", vp);
544 panic("vm_object_vndeallocate: bad object reference count");
548 int count = object->ref_count;
551 vm_object_upgrade(object);
552 if (atomic_cmpset_int(&object->ref_count, count, 0)) {
553 vclrflags(vp, VTEXT);
557 if (atomic_cmpset_int(&object->ref_count,
564 #if defined(DEBUG_LOCKS)
565 debugvm_object_add(object, file, line, -1);
569 * vrele or return the vp to vrele. We can only safely vrele(vp)
570 * if the object was locked exclusively. But there are two races
573 * We had to upgrade the object above to safely clear VTEXT
574 * but the alternative path where the shared lock is retained
575 * can STILL race to 0 in other paths and cause our own vrele()
576 * to terminate the vnode. We can't allow that if the VM object
577 * is still locked shared.
586 * Release a reference to the specified object, gained either through a
587 * vm_object_allocate or a vm_object_reference call. When all references
588 * are gone, storage associated with this object may be relinquished.
590 * The caller does not have to hold the object locked but must have control
591 * over the reference in question in order to guarantee that the object
592 * does not get ripped out from under us.
594 * XXX Currently all deallocations require an exclusive lock.
597 VMOBJDEBUG(vm_object_deallocate)(vm_object_t object VMOBJDBARGS)
606 count = object->ref_count;
610 * If decrementing the count enters into special handling
611 * territory (0, 1, or 2) we have to do it the hard way.
612 * Fortunate though, objects with only a few refs like this
613 * are not likely to be heavily contended anyway.
615 * For vnode objects we only care about 1->0 transitions.
617 if (count <= 3 || (object->type == OBJT_VNODE && count <= 1)) {
618 #if defined(DEBUG_LOCKS)
619 debugvm_object_add(object, file, line, 0);
621 vm_object_hold(object);
622 vm_object_deallocate_locked(object);
623 vm_object_drop(object);
628 * Try to decrement ref_count without acquiring a hold on
629 * the object. This is particularly important for the exec*()
630 * and exit*() code paths because the program binary may
631 * have a great deal of sharing and an exclusive lock will
632 * crowbar performance in those circumstances.
634 if (object->type == OBJT_VNODE) {
635 vp = (struct vnode *)object->handle;
636 if (atomic_cmpset_int(&object->ref_count,
638 #if defined(DEBUG_LOCKS)
639 debugvm_object_add(object, file, line, -1);
647 if (atomic_cmpset_int(&object->ref_count,
649 #if defined(DEBUG_LOCKS)
650 debugvm_object_add(object, file, line, -1);
661 VMOBJDEBUG(vm_object_deallocate_locked)(vm_object_t object VMOBJDBARGS)
670 * vnode case, caller either locked the object exclusively
671 * or this is a recursion with must_drop != 0 and the vnode
672 * object will be locked shared.
674 * If locked shared we have to drop the object before we can
675 * call vrele() or risk a shared/exclusive livelock.
677 if (object->type == OBJT_VNODE) {
678 ASSERT_LWKT_TOKEN_HELD(&object->token);
679 vm_object_vndeallocate(object, NULL);
682 ASSERT_LWKT_TOKEN_HELD_EXCL(&object->token);
685 * Normal case (object is locked exclusively)
687 if (object->ref_count == 0) {
688 panic("vm_object_deallocate: object deallocated "
689 "too many times: %d", object->type);
691 if (object->ref_count > 2) {
692 atomic_add_int(&object->ref_count, -1);
693 #if defined(DEBUG_LOCKS)
694 debugvm_object_add(object, file, line, -1);
700 * Drop the ref and handle termination on the 1->0 transition.
701 * We may have blocked above so we have to recheck.
703 KKASSERT(object->ref_count != 0);
704 if (object->ref_count >= 2) {
705 atomic_add_int(&object->ref_count, -1);
706 #if defined(DEBUG_LOCKS)
707 debugvm_object_add(object, file, line, -1);
712 atomic_add_int(&object->ref_count, -1);
713 if ((object->flags & OBJ_DEAD) == 0)
714 vm_object_terminate(object);
718 * Destroy the specified object, freeing up related resources.
720 * The object must have zero references.
722 * The object must held. The caller is responsible for dropping the object
723 * after terminate returns. Terminate does NOT drop the object.
725 static int vm_object_terminate_callback(vm_page_t p, void *data);
728 vm_object_terminate(vm_object_t object)
730 struct rb_vm_page_scan_info info;
731 struct vm_object_hash *hash;
734 * Make sure no one uses us. Once we set OBJ_DEAD we should be
735 * able to safely block.
737 ASSERT_LWKT_TOKEN_HELD(vm_object_token(object));
738 KKASSERT((object->flags & OBJ_DEAD) == 0);
739 vm_object_set_flag(object, OBJ_DEAD);
742 * Wait for the pageout daemon to be done with the object
744 vm_object_pip_wait(object, "objtrm1");
746 KASSERT(!object->paging_in_progress,
747 ("vm_object_terminate: pageout in progress"));
750 * Clean and free the pages, as appropriate. All references to the
751 * object are gone, so we don't need to lock it.
753 if (object->type == OBJT_VNODE) {
757 * Clean pages and flush buffers.
759 * NOTE! TMPFS buffer flushes do not typically flush the
760 * actual page to swap as this would be highly
761 * inefficient, and normal filesystems usually wrap
762 * page flushes with buffer cache buffers.
764 * To deal with this we have to call vinvalbuf() both
765 * before and after the vm_object_page_clean().
767 vp = (struct vnode *) object->handle;
768 vinvalbuf(vp, V_SAVE, 0, 0);
769 vm_object_page_clean(object, 0, 0, OBJPC_SYNC);
770 vinvalbuf(vp, V_SAVE, 0, 0);
774 * Wait for any I/O to complete, after which there had better not
775 * be any references left on the object.
777 vm_object_pip_wait(object, "objtrm2");
779 if (object->ref_count != 0) {
780 panic("vm_object_terminate: object with references, "
781 "ref_count=%d", object->ref_count);
785 * Cleanup any shared pmaps associated with this object.
787 pmap_object_free(object);
790 * Now free any remaining pages. For internal objects, this also
791 * removes them from paging queues. Don't free wired pages, just
792 * remove them from the object.
795 info.object = object;
798 vm_page_rb_tree_RB_SCAN(&object->rb_memq, NULL,
799 vm_object_terminate_callback, &info);
800 } while (info.error);
803 * Let the pager know object is dead.
805 vm_pager_deallocate(object);
808 * Wait for the object hold count to hit 1, clean out pages as
809 * we go. vmobj_token interlocks any race conditions that might
810 * pick the object up from the vm_object_list after we have cleared
814 if (RB_ROOT(&object->rb_memq) == NULL)
816 kprintf("vm_object_terminate: Warning, object %p "
817 "still has %ld pages\n",
818 object, object->resident_page_count);
819 vm_page_rb_tree_RB_SCAN(&object->rb_memq, NULL,
820 vm_object_terminate_callback, &info);
824 * There had better not be any pages left
826 KKASSERT(object->resident_page_count == 0);
829 * Remove the object from the global object list.
831 hash = vmobj_hash(object);
832 lwkt_gettoken(&hash->token);
833 TAILQ_REMOVE(&hash->list, object, object_list);
834 lwkt_reltoken(&hash->token);
836 if (object->ref_count != 0) {
837 panic("vm_object_terminate2: object with references, "
838 "ref_count=%d", object->ref_count);
842 * NOTE: The object hold_count is at least 1, so we cannot kfree()
843 * the object here. See vm_object_drop().
848 * The caller must hold the object.
851 vm_object_terminate_callback(vm_page_t p, void *data)
853 struct rb_vm_page_scan_info *info = data;
857 KKASSERT(object == info->object);
858 if (vm_page_busy_try(p, TRUE)) {
859 vm_page_sleep_busy(p, TRUE, "vmotrm");
863 if (object != p->object) {
864 /* XXX remove once we determine it can't happen */
865 kprintf("vm_object_terminate: Warning: Encountered "
866 "busied page %p on queue %d\n", p, p->queue);
869 } else if (p->wire_count == 0) {
871 * NOTE: p->dirty and PG_NEED_COMMIT are ignored.
874 mycpu->gd_cnt.v_pfree++;
876 if (p->queue != PQ_NONE) {
877 kprintf("vm_object_terminate: Warning: Encountered "
878 "wired page %p on queue %d\n", p, p->queue);
879 if (vm_object_debug > 0) {
889 * Must be at end to avoid SMP races, caller holds object token
891 if ((++info->count & 63) == 0)
897 * Clean all dirty pages in the specified range of object. Leaves page
898 * on whatever queue it is currently on. If NOSYNC is set then do not
899 * write out pages with PG_NOSYNC set (originally comes from MAP_NOSYNC),
900 * leaving the object dirty.
902 * When stuffing pages asynchronously, allow clustering. XXX we need a
903 * synchronous clustering mode implementation.
905 * Odd semantics: if start == end, we clean everything.
907 * The object must be locked? XXX
909 static int vm_object_page_clean_pass1(struct vm_page *p, void *data);
910 static int vm_object_page_clean_pass2(struct vm_page *p, void *data);
913 vm_object_page_clean(vm_object_t object, vm_pindex_t start, vm_pindex_t end,
916 struct rb_vm_page_scan_info info;
922 vm_object_hold(object);
923 if (object->type != OBJT_VNODE ||
924 (object->flags & OBJ_MIGHTBEDIRTY) == 0) {
925 vm_object_drop(object);
929 pagerflags = (flags & (OBJPC_SYNC | OBJPC_INVAL)) ?
930 VM_PAGER_PUT_SYNC : VM_PAGER_CLUSTER_OK;
931 pagerflags |= (flags & OBJPC_INVAL) ? VM_PAGER_PUT_INVAL : 0;
936 * Interlock other major object operations. This allows us to
937 * temporarily clear OBJ_WRITEABLE and OBJ_MIGHTBEDIRTY.
939 vm_object_set_flag(object, OBJ_CLEANING);
942 * Handle 'entire object' case
944 info.start_pindex = start;
946 info.end_pindex = object->size - 1;
948 info.end_pindex = end - 1;
950 wholescan = (start == 0 && info.end_pindex == object->size - 1);
952 info.pagerflags = pagerflags;
953 info.object = object;
956 * If cleaning the entire object do a pass to mark the pages read-only.
957 * If everything worked out ok, clear OBJ_WRITEABLE and
963 vm_page_rb_tree_RB_SCAN(&object->rb_memq, rb_vm_page_scancmp,
964 vm_object_page_clean_pass1, &info);
965 if (info.error == 0) {
966 vm_object_clear_flag(object,
967 OBJ_WRITEABLE|OBJ_MIGHTBEDIRTY);
968 if (object->type == OBJT_VNODE &&
969 (vp = (struct vnode *)object->handle) != NULL) {
971 * Use new-style interface to clear VISDIRTY
972 * because the vnode is not necessarily removed
973 * from the syncer list(s) as often as it was
974 * under the old interface, which can leave
975 * the vnode on the syncer list after reclaim.
983 * Do a pass to clean all the dirty pages we find.
988 generation = object->generation;
989 vm_page_rb_tree_RB_SCAN(&object->rb_memq, rb_vm_page_scancmp,
990 vm_object_page_clean_pass2, &info);
991 } while (info.error || generation != object->generation);
993 vm_object_clear_flag(object, OBJ_CLEANING);
994 vm_object_drop(object);
998 * The caller must hold the object.
1002 vm_object_page_clean_pass1(struct vm_page *p, void *data)
1004 struct rb_vm_page_scan_info *info = data;
1006 KKASSERT(p->object == info->object);
1008 vm_page_flag_set(p, PG_CLEANCHK);
1009 if ((info->limit & OBJPC_NOSYNC) && (p->flags & PG_NOSYNC)) {
1011 } else if (vm_page_busy_try(p, FALSE)) {
1014 KKASSERT(p->object == info->object);
1015 vm_page_protect(p, VM_PROT_READ);
1020 * Must be at end to avoid SMP races, caller holds object token
1022 if ((++info->count & 63) == 0)
1028 * The caller must hold the object
1032 vm_object_page_clean_pass2(struct vm_page *p, void *data)
1034 struct rb_vm_page_scan_info *info = data;
1037 KKASSERT(p->object == info->object);
1040 * Do not mess with pages that were inserted after we started
1041 * the cleaning pass.
1043 if ((p->flags & PG_CLEANCHK) == 0)
1046 generation = info->object->generation;
1048 if (vm_page_busy_try(p, TRUE)) {
1049 vm_page_sleep_busy(p, TRUE, "vpcwai");
1054 KKASSERT(p->object == info->object &&
1055 info->object->generation == generation);
1058 * Before wasting time traversing the pmaps, check for trivial
1059 * cases where the page cannot be dirty.
1061 if (p->valid == 0 || (p->queue - p->pc) == PQ_CACHE) {
1062 KKASSERT((p->dirty & p->valid) == 0 &&
1063 (p->flags & PG_NEED_COMMIT) == 0);
1069 * Check whether the page is dirty or not. The page has been set
1070 * to be read-only so the check will not race a user dirtying the
1073 vm_page_test_dirty(p);
1074 if ((p->dirty & p->valid) == 0 && (p->flags & PG_NEED_COMMIT) == 0) {
1075 vm_page_flag_clear(p, PG_CLEANCHK);
1081 * If we have been asked to skip nosync pages and this is a
1082 * nosync page, skip it. Note that the object flags were
1083 * not cleared in this case (because pass1 will have returned an
1084 * error), so we do not have to set them.
1086 if ((info->limit & OBJPC_NOSYNC) && (p->flags & PG_NOSYNC)) {
1087 vm_page_flag_clear(p, PG_CLEANCHK);
1093 * Flush as many pages as we can. PG_CLEANCHK will be cleared on
1094 * the pages that get successfully flushed. Set info->error if
1095 * we raced an object modification.
1097 vm_object_page_collect_flush(info->object, p, info->pagerflags);
1098 /* vm_wait_nominal(); this can deadlock the system in syncer/pageout */
1101 * Must be at end to avoid SMP races, caller holds object token
1104 if ((++info->count & 63) == 0)
1110 * Collect the specified page and nearby pages and flush them out.
1111 * The number of pages flushed is returned. The passed page is busied
1112 * by the caller and we are responsible for its disposition.
1114 * The caller must hold the object.
1117 vm_object_page_collect_flush(vm_object_t object, vm_page_t p, int pagerflags)
1125 vm_page_t ma[BLIST_MAX_ALLOC];
1127 ASSERT_LWKT_TOKEN_HELD(vm_object_token(object));
1130 page_base = pi % BLIST_MAX_ALLOC;
1138 tp = vm_page_lookup_busy_try(object, pi - page_base + ib,
1144 if ((pagerflags & VM_PAGER_IGNORE_CLEANCHK) == 0 &&
1145 (tp->flags & PG_CLEANCHK) == 0) {
1149 if ((tp->queue - tp->pc) == PQ_CACHE) {
1150 vm_page_flag_clear(tp, PG_CLEANCHK);
1154 vm_page_test_dirty(tp);
1155 if ((tp->dirty & tp->valid) == 0 &&
1156 (tp->flags & PG_NEED_COMMIT) == 0) {
1157 vm_page_flag_clear(tp, PG_CLEANCHK);
1166 while (is < BLIST_MAX_ALLOC &&
1167 pi - page_base + is < object->size) {
1170 tp = vm_page_lookup_busy_try(object, pi - page_base + is,
1176 if ((pagerflags & VM_PAGER_IGNORE_CLEANCHK) == 0 &&
1177 (tp->flags & PG_CLEANCHK) == 0) {
1181 if ((tp->queue - tp->pc) == PQ_CACHE) {
1182 vm_page_flag_clear(tp, PG_CLEANCHK);
1186 vm_page_test_dirty(tp);
1187 if ((tp->dirty & tp->valid) == 0 &&
1188 (tp->flags & PG_NEED_COMMIT) == 0) {
1189 vm_page_flag_clear(tp, PG_CLEANCHK);
1198 * All pages in the ma[] array are busied now
1200 for (i = ib; i < is; ++i) {
1201 vm_page_flag_clear(ma[i], PG_CLEANCHK);
1202 vm_page_hold(ma[i]); /* XXX need this any more? */
1204 vm_pageout_flush(&ma[ib], is - ib, pagerflags);
1205 for (i = ib; i < is; ++i) /* XXX need this any more? */
1206 vm_page_unhold(ma[i]);
1210 * Same as vm_object_pmap_copy, except range checking really
1211 * works, and is meant for small sections of an object.
1213 * This code protects resident pages by making them read-only
1214 * and is typically called on a fork or split when a page
1215 * is converted to copy-on-write.
1217 * NOTE: If the page is already at VM_PROT_NONE, calling
1218 * vm_page_protect will have no effect.
1221 vm_object_pmap_copy_1(vm_object_t object, vm_pindex_t start, vm_pindex_t end)
1226 if (object == NULL || (object->flags & OBJ_WRITEABLE) == 0)
1229 vm_object_hold(object);
1230 for (idx = start; idx < end; idx++) {
1231 p = vm_page_lookup(object, idx);
1234 vm_page_protect(p, VM_PROT_READ);
1236 vm_object_drop(object);
1240 * Removes all physical pages in the specified object range from all
1243 * The object must *not* be locked.
1246 static int vm_object_pmap_remove_callback(vm_page_t p, void *data);
1249 vm_object_pmap_remove(vm_object_t object, vm_pindex_t start, vm_pindex_t end)
1251 struct rb_vm_page_scan_info info;
1257 info.start_pindex = start;
1258 info.end_pindex = end - 1;
1260 info.object = object;
1262 vm_object_hold(object);
1265 vm_page_rb_tree_RB_SCAN(&object->rb_memq, rb_vm_page_scancmp,
1266 vm_object_pmap_remove_callback, &info);
1267 } while (info.error);
1268 if (start == 0 && end == object->size)
1269 vm_object_clear_flag(object, OBJ_WRITEABLE);
1270 vm_object_drop(object);
1274 * The caller must hold the object
1277 vm_object_pmap_remove_callback(vm_page_t p, void *data)
1279 struct rb_vm_page_scan_info *info = data;
1281 if (info->object != p->object ||
1282 p->pindex < info->start_pindex ||
1283 p->pindex > info->end_pindex) {
1284 kprintf("vm_object_pmap_remove_callback: obj/pg race %p/%p\n",
1290 vm_page_protect(p, VM_PROT_NONE);
1293 * Must be at end to avoid SMP races, caller holds object token
1295 if ((++info->count & 63) == 0)
1301 * Implements the madvise function at the object/page level.
1303 * MADV_WILLNEED (any object)
1305 * Activate the specified pages if they are resident.
1307 * MADV_DONTNEED (any object)
1309 * Deactivate the specified pages if they are resident.
1311 * MADV_FREE (OBJT_DEFAULT/OBJT_SWAP objects, OBJ_ONEMAPPING only)
1313 * Deactivate and clean the specified pages if they are
1314 * resident. This permits the process to reuse the pages
1315 * without faulting or the kernel to reclaim the pages
1321 vm_object_madvise(vm_object_t object, vm_pindex_t pindex,
1322 vm_pindex_t count, int advise)
1331 end = pindex + count;
1333 vm_object_hold(object);
1336 * Locate and adjust resident pages. This only applies to the
1337 * primary object in the mapping.
1339 for (; pindex < end; pindex += 1) {
1342 * MADV_FREE only operates on OBJT_DEFAULT or OBJT_SWAP pages
1343 * and those pages must be OBJ_ONEMAPPING.
1345 if (advise == MADV_FREE) {
1346 if ((object->type != OBJT_DEFAULT &&
1347 object->type != OBJT_SWAP) ||
1348 (object->flags & OBJ_ONEMAPPING) == 0) {
1353 m = vm_page_lookup_busy_try(object, pindex, TRUE, &error);
1356 vm_page_sleep_busy(m, TRUE, "madvpo");
1361 * There may be swap even if there is no backing page
1363 if (advise == MADV_FREE &&
1364 object->type == OBJT_SWAP &&
1365 m->object == object) {
1366 swap_pager_freespace(object, pindex, 1);
1372 * If the page is not in a normal active state, we skip it.
1373 * If the page is not managed there are no page queues to
1374 * mess with. Things can break if we mess with pages in
1375 * any of the below states.
1377 if (m->wire_count ||
1378 (m->flags & (PG_UNMANAGED | PG_NEED_COMMIT)) ||
1379 m->valid != VM_PAGE_BITS_ALL
1386 * Theoretically once a page is known not to be busy, an
1387 * interrupt cannot come along and rip it out from under us.
1389 if (advise == MADV_WILLNEED) {
1390 vm_page_activate(m);
1391 } else if (advise == MADV_DONTNEED) {
1392 vm_page_dontneed(m);
1393 } else if (advise == MADV_FREE) {
1395 * Mark the page clean. This will allow the page
1396 * to be freed up by the system. However, such pages
1397 * are often reused quickly by malloc()/free()
1398 * so we do not do anything that would cause
1399 * a page fault if we can help it.
1401 * Specifically, we do not try to actually free
1402 * the page now nor do we try to put it in the
1403 * cache (which would cause a page fault on reuse).
1405 * But we do make the page is freeable as we
1406 * can without actually taking the step of unmapping
1409 pmap_clear_modify(m);
1412 vm_page_dontneed(m);
1413 if (object->type == OBJT_SWAP)
1414 swap_pager_freespace(object, pindex, 1);
1418 vm_object_drop(object);
1422 * Removes all physical pages in the specified object range from the
1423 * object's list of pages.
1427 static int vm_object_page_remove_callback(vm_page_t p, void *data);
1430 vm_object_page_remove(vm_object_t object, vm_pindex_t start, vm_pindex_t end,
1431 boolean_t clean_only)
1433 struct rb_vm_page_scan_info info;
1437 * Degenerate cases and assertions
1439 vm_object_hold(object);
1440 if (object == NULL ||
1441 (object->resident_page_count == 0 && object->swblock_count == 0)) {
1442 vm_object_drop(object);
1445 KASSERT(object->type != OBJT_PHYS,
1446 ("attempt to remove pages from a physical object"));
1449 * Indicate that paging is occuring on the object
1451 vm_object_pip_add(object, 1);
1454 * Figure out the actual removal range and whether we are removing
1455 * the entire contents of the object or not. If removing the entire
1456 * contents, be sure to get all pages, even those that might be
1457 * beyond the end of the object.
1459 info.object = object;
1460 info.start_pindex = start;
1462 info.end_pindex = (vm_pindex_t)-1;
1464 info.end_pindex = end - 1;
1465 info.limit = clean_only;
1467 all = (start == 0 && info.end_pindex >= object->size - 1);
1470 * Loop until we are sure we have gotten them all.
1474 vm_page_rb_tree_RB_SCAN(&object->rb_memq, rb_vm_page_scancmp,
1475 vm_object_page_remove_callback, &info);
1476 } while (info.error);
1479 * Remove any related swap if throwing away pages, or for
1480 * non-swap objects (the swap is a clean copy in that case).
1482 if (object->type != OBJT_SWAP || clean_only == FALSE) {
1484 swap_pager_freespace_all(object);
1486 swap_pager_freespace(object, info.start_pindex,
1487 info.end_pindex - info.start_pindex + 1);
1493 vm_object_pip_wakeup(object);
1494 vm_object_drop(object);
1498 * The caller must hold the object.
1500 * NOTE: User yields are allowed when removing more than one page, but not
1501 * allowed if only removing one page (the path for single page removals
1502 * might hold a spinlock).
1505 vm_object_page_remove_callback(vm_page_t p, void *data)
1507 struct rb_vm_page_scan_info *info = data;
1509 if (info->object != p->object ||
1510 p->pindex < info->start_pindex ||
1511 p->pindex > info->end_pindex) {
1512 kprintf("vm_object_page_remove_callbackA: obj/pg race %p/%p\n",
1516 if (vm_page_busy_try(p, TRUE)) {
1517 vm_page_sleep_busy(p, TRUE, "vmopar");
1521 if (info->object != p->object) {
1522 /* this should never happen */
1523 kprintf("vm_object_page_remove_callbackB: obj/pg race %p/%p\n",
1530 * Wired pages cannot be destroyed, but they can be invalidated
1531 * and we do so if clean_only (limit) is not set.
1533 * WARNING! The page may be wired due to being part of a buffer
1534 * cache buffer, and the buffer might be marked B_CACHE.
1535 * This is fine as part of a truncation but VFSs must be
1536 * sure to fix the buffer up when re-extending the file.
1538 * NOTE! PG_NEED_COMMIT is ignored.
1540 if (p->wire_count != 0) {
1541 vm_page_protect(p, VM_PROT_NONE);
1542 if (info->limit == 0)
1549 * limit is our clean_only flag. If set and the page is dirty or
1550 * requires a commit, do not free it. If set and the page is being
1551 * held by someone, do not free it.
1553 if (info->limit && p->valid) {
1554 vm_page_test_dirty(p);
1555 if ((p->valid & p->dirty) || (p->flags & PG_NEED_COMMIT)) {
1564 vm_page_protect(p, VM_PROT_NONE);
1568 * Must be at end to avoid SMP races, caller holds object token
1571 if ((++info->count & 63) == 0)
1578 * Try to extend prev_object into an adjoining region of virtual
1579 * memory, return TRUE on success.
1581 * The caller does not need to hold (prev_object) but must have a stable
1582 * pointer to it (typically by holding the vm_map locked).
1584 * This function only works for anonymous memory objects which either
1585 * have (a) one reference or (b) we are extending the object's size.
1586 * Otherwise the related VM pages we want to use for the object might
1587 * be in use by another mapping.
1590 vm_object_coalesce(vm_object_t prev_object, vm_pindex_t prev_pindex,
1591 vm_size_t prev_size, vm_size_t next_size)
1593 vm_pindex_t next_pindex;
1595 if (prev_object == NULL)
1598 vm_object_hold(prev_object);
1600 if (prev_object->type != OBJT_DEFAULT &&
1601 prev_object->type != OBJT_SWAP) {
1602 vm_object_drop(prev_object);
1607 /* caller now checks this */
1609 * Try to collapse the object first
1611 vm_object_collapse(prev_object, NULL);
1615 /* caller now checks this */
1617 * We can't coalesce if we shadow another object (figuring out the
1618 * relationships become too complex).
1620 if (prev_object->backing_object != NULL) {
1621 vm_object_chain_release(prev_object);
1622 vm_object_drop(prev_object);
1627 prev_size >>= PAGE_SHIFT;
1628 next_size >>= PAGE_SHIFT;
1629 next_pindex = prev_pindex + prev_size;
1632 * We can't if the object has more than one ref count unless we
1633 * are extending it into newly minted space.
1635 if (prev_object->ref_count > 1 &&
1636 prev_object->size != next_pindex) {
1637 vm_object_drop(prev_object);
1642 * Remove any pages that may still be in the object from a previous
1645 if (next_pindex < prev_object->size) {
1646 vm_object_page_remove(prev_object,
1648 next_pindex + next_size, FALSE);
1649 if (prev_object->type == OBJT_SWAP)
1650 swap_pager_freespace(prev_object,
1651 next_pindex, next_size);
1655 * Extend the object if necessary.
1657 if (next_pindex + next_size > prev_object->size)
1658 prev_object->size = next_pindex + next_size;
1659 vm_object_drop(prev_object);
1665 * Make the object writable and flag is being possibly dirty.
1667 * The object might not be held (or might be held but held shared),
1668 * the related vnode is probably not held either. Object and vnode are
1669 * stable by virtue of the vm_page busied by the caller preventing
1672 * If the related mount is flagged MNTK_THR_SYNC we need to call
1673 * vsetobjdirty(). Filesystems using this option usually shortcut
1674 * synchronization by only scanning the syncer list.
1677 vm_object_set_writeable_dirty(vm_object_t object)
1681 /*vm_object_assert_held(object);*/
1683 * Avoid contention in vm fault path by checking the state before
1684 * issuing an atomic op on it.
1686 if ((object->flags & (OBJ_WRITEABLE|OBJ_MIGHTBEDIRTY)) !=
1687 (OBJ_WRITEABLE|OBJ_MIGHTBEDIRTY)) {
1688 vm_object_set_flag(object, OBJ_WRITEABLE|OBJ_MIGHTBEDIRTY);
1690 if (object->type == OBJT_VNODE &&
1691 (vp = (struct vnode *)object->handle) != NULL) {
1692 if ((vp->v_flag & VOBJDIRTY) == 0) {
1694 (vp->v_mount->mnt_kern_flag & MNTK_THR_SYNC)) {
1696 * New style THR_SYNC places vnodes on the
1697 * syncer list more deterministically.
1702 * Old style scan would not necessarily place
1703 * a vnode on the syncer list when possibly
1704 * modified via mmap.
1706 vsetflags(vp, VOBJDIRTY);
1712 #include "opt_ddb.h"
1714 #include <sys/cons.h>
1716 #include <ddb/ddb.h>
1718 static int _vm_object_in_map (vm_map_t map, vm_object_t object,
1719 vm_map_entry_t entry);
1720 static int vm_object_in_map (vm_object_t object);
1723 * The caller must hold the object.
1726 _vm_object_in_map(vm_map_t map, vm_object_t object, vm_map_entry_t entry)
1728 vm_map_backing_t ba;
1730 vm_map_entry_t tmpe;
1735 if (entry == NULL) {
1736 tmpe = RB_MIN(vm_map_rb_tree, &map->rb_root);
1737 entcount = map->nentries;
1738 while (entcount-- && tmpe) {
1739 if( _vm_object_in_map(map, object, tmpe)) {
1742 tmpe = vm_map_rb_tree_RB_NEXT(tmpe);
1746 switch(entry->maptype) {
1747 case VM_MAPTYPE_SUBMAP:
1748 tmpm = entry->ba.sub_map;
1749 tmpe = RB_MIN(vm_map_rb_tree, &tmpm->rb_root);
1750 entcount = tmpm->nentries;
1751 while (entcount-- && tmpe) {
1752 if( _vm_object_in_map(tmpm, object, tmpe)) {
1755 tmpe = vm_map_rb_tree_RB_NEXT(tmpe);
1758 case VM_MAPTYPE_NORMAL:
1759 case VM_MAPTYPE_VPAGETABLE:
1762 if (ba->object == object)
1764 ba = ba->backing_ba;
1773 static int vm_object_in_map_callback(struct proc *p, void *data);
1775 struct vm_object_in_map_info {
1784 vm_object_in_map(vm_object_t object)
1786 struct vm_object_in_map_info info;
1789 info.object = object;
1791 allproc_scan(vm_object_in_map_callback, &info, 0);
1794 if( _vm_object_in_map(&kernel_map, object, 0))
1796 if( _vm_object_in_map(&pager_map, object, 0))
1798 if( _vm_object_in_map(&buffer_map, object, 0))
1807 vm_object_in_map_callback(struct proc *p, void *data)
1809 struct vm_object_in_map_info *info = data;
1812 if (_vm_object_in_map(&p->p_vmspace->vm_map, info->object, 0)) {
1820 DB_SHOW_COMMAND(vmochk, vm_object_check)
1822 struct vm_object_hash *hash;
1827 * make sure that internal objs are in a map somewhere
1828 * and none have zero ref counts.
1830 for (n = 0; n < VMOBJ_HSIZE; ++n) {
1831 hash = &vm_object_hash[n];
1832 for (object = TAILQ_FIRST(&hash->list);
1834 object = TAILQ_NEXT(object, object_list)) {
1835 if (object->type == OBJT_MARKER)
1837 if (object->handle != NULL ||
1838 (object->type != OBJT_DEFAULT &&
1839 object->type != OBJT_SWAP)) {
1842 if (object->ref_count == 0) {
1843 db_printf("vmochk: internal obj has "
1844 "zero ref count: %ld\n",
1845 (long)object->size);
1847 if (vm_object_in_map(object))
1849 db_printf("vmochk: internal obj is not in a map: "
1850 "ref: %d, size: %lu: 0x%lx\n",
1851 object->ref_count, (u_long)object->size,
1852 (u_long)object->size);
1860 DB_SHOW_COMMAND(object, vm_object_print_static)
1862 /* XXX convert args. */
1863 vm_object_t object = (vm_object_t)addr;
1864 boolean_t full = have_addr;
1868 /* XXX count is an (unused) arg. Avoid shadowing it. */
1869 #define count was_count
1877 "Object %p: type=%d, size=0x%lx, res=%ld, ref=%d, flags=0x%x\n",
1878 object, (int)object->type, (u_long)object->size,
1879 object->resident_page_count, object->ref_count, object->flags);
1881 * XXX no %qd in kernel. Truncate object->backing_object_offset.
1890 RB_FOREACH(p, vm_page_rb_tree, &object->rb_memq) {
1892 db_iprintf("memory:=");
1893 else if (count == 6) {
1901 db_printf("(off=0x%lx,page=0x%lx)",
1902 (u_long) p->pindex, (u_long) VM_PAGE_TO_PHYS(p));
1913 * XXX need this non-static entry for calling from vm_map_print.
1918 vm_object_print(/* db_expr_t */ long addr,
1919 boolean_t have_addr,
1920 /* db_expr_t */ long count,
1923 vm_object_print_static(addr, have_addr, count, modif);
1929 DB_SHOW_COMMAND(vmopag, vm_object_print_pages)
1931 struct vm_object_hash *hash;
1937 for (n = 0; n < VMOBJ_HSIZE; ++n) {
1938 hash = &vm_object_hash[n];
1939 for (object = TAILQ_FIRST(&hash->list);
1941 object = TAILQ_NEXT(object, object_list)) {
1942 vm_pindex_t idx, fidx;
1944 vm_paddr_t pa = -1, padiff;
1948 if (object->type == OBJT_MARKER)
1950 db_printf("new object: %p\n", (void *)object);
1960 osize = object->size;
1963 for (idx = 0; idx < osize; idx++) {
1964 m = vm_page_lookup(object, idx);
1967 db_printf(" index(%ld)run(%d)pa(0x%lx)\n",
1968 (long)fidx, rcount, (long)pa);
1982 (VM_PAGE_TO_PHYS(m) == pa + rcount * PAGE_SIZE)) {
1987 padiff = pa + rcount * PAGE_SIZE - VM_PAGE_TO_PHYS(m);
1988 padiff >>= PAGE_SHIFT;
1989 padiff &= PQ_L2_MASK;
1991 pa = VM_PAGE_TO_PHYS(m) - rcount * PAGE_SIZE;
1995 db_printf(" index(%ld)run(%d)pa(0x%lx)",
1996 (long)fidx, rcount, (long)pa);
1997 db_printf("pd(%ld)\n", (long)padiff);
2007 pa = VM_PAGE_TO_PHYS(m);
2011 db_printf(" index(%ld)run(%d)pa(0x%lx)\n",
2012 (long)fidx, rcount, (long)pa);