Merge branch 'vendor/GCC50'
[dragonfly.git] / sys / vm / vm_object.c
1 /*
2  * Copyright (c) 1991, 1993, 2013
3  *      The Regents of the University of California.  All rights reserved.
4  *
5  * This code is derived from software contributed to Berkeley by
6  * The Mach Operating System project at Carnegie-Mellon University.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. Neither the name of the University nor the names of its contributors
17  *    may be used to endorse or promote products derived from this software
18  *    without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  *
32  *      from: @(#)vm_object.c   8.5 (Berkeley) 3/22/94
33  *
34  *
35  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
36  * All rights reserved.
37  *
38  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
39  *
40  * Permission to use, copy, modify and distribute this software and
41  * its documentation is hereby granted, provided that both the copyright
42  * notice and this permission notice appear in all copies of the
43  * software, derivative works or modified versions, and any portions
44  * thereof, and that both notices appear in supporting documentation.
45  *
46  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
47  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
48  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
49  *
50  * Carnegie Mellon requests users of this software to return to
51  *
52  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
53  *  School of Computer Science
54  *  Carnegie Mellon University
55  *  Pittsburgh PA 15213-3890
56  *
57  * any improvements or extensions that they make and grant Carnegie the
58  * rights to redistribute these changes.
59  *
60  * $FreeBSD: src/sys/vm/vm_object.c,v 1.171.2.8 2003/05/26 19:17:56 alc Exp $
61  */
62
63 /*
64  *      Virtual memory object module.
65  */
66
67 #include <sys/param.h>
68 #include <sys/systm.h>
69 #include <sys/proc.h>           /* for curproc, pageproc */
70 #include <sys/thread.h>
71 #include <sys/vnode.h>
72 #include <sys/vmmeter.h>
73 #include <sys/mman.h>
74 #include <sys/mount.h>
75 #include <sys/kernel.h>
76 #include <sys/sysctl.h>
77 #include <sys/refcount.h>
78
79 #include <vm/vm.h>
80 #include <vm/vm_param.h>
81 #include <vm/pmap.h>
82 #include <vm/vm_map.h>
83 #include <vm/vm_object.h>
84 #include <vm/vm_page.h>
85 #include <vm/vm_pageout.h>
86 #include <vm/vm_pager.h>
87 #include <vm/swap_pager.h>
88 #include <vm/vm_kern.h>
89 #include <vm/vm_extern.h>
90 #include <vm/vm_zone.h>
91
92 #include <vm/vm_page2.h>
93
94 #include <machine/specialreg.h>
95
96 #define EASY_SCAN_FACTOR        8
97
98 static void     vm_object_qcollapse(vm_object_t object,
99                                     vm_object_t backing_object);
100 static void     vm_object_page_collect_flush(vm_object_t object, vm_page_t p,
101                                              int pagerflags);
102 static void     vm_object_lock_init(vm_object_t);
103
104
105 /*
106  *      Virtual memory objects maintain the actual data
107  *      associated with allocated virtual memory.  A given
108  *      page of memory exists within exactly one object.
109  *
110  *      An object is only deallocated when all "references"
111  *      are given up.  Only one "reference" to a given
112  *      region of an object should be writeable.
113  *
114  *      Associated with each object is a list of all resident
115  *      memory pages belonging to that object; this list is
116  *      maintained by the "vm_page" module, and locked by the object's
117  *      lock.
118  *
119  *      Each object also records a "pager" routine which is
120  *      used to retrieve (and store) pages to the proper backing
121  *      storage.  In addition, objects may be backed by other
122  *      objects from which they were virtual-copied.
123  *
124  *      The only items within the object structure which are
125  *      modified after time of creation are:
126  *              reference count         locked by object's lock
127  *              pager routine           locked by object's lock
128  *
129  */
130
131 struct vm_object kernel_object;
132
133 static long vm_object_count;
134
135 static long object_collapses;
136 static long object_bypasses;
137 static int next_index;
138 static vm_zone_t obj_zone;
139 static struct vm_zone obj_zone_store;
140 #define VM_OBJECTS_INIT 256
141 static struct vm_object vm_objects_init[VM_OBJECTS_INIT];
142
143 struct object_q vm_object_lists[VMOBJ_HSIZE];
144 struct lwkt_token vmobj_tokens[VMOBJ_HSIZE];
145
146 #if defined(DEBUG_LOCKS)
147
148 #define vm_object_vndeallocate(obj, vpp)        \
149                 debugvm_object_vndeallocate(obj, vpp, __FILE__, __LINE__)
150
151 /*
152  * Debug helper to track hold/drop/ref/deallocate calls.
153  */
154 static void
155 debugvm_object_add(vm_object_t obj, char *file, int line, int addrem)
156 {
157         int i;
158
159         i = atomic_fetchadd_int(&obj->debug_index, 1);
160         i = i & (VMOBJ_DEBUG_ARRAY_SIZE - 1);
161         ksnprintf(obj->debug_hold_thrs[i],
162                   sizeof(obj->debug_hold_thrs[i]),
163                   "%c%d:(%d):%s",
164                   (addrem == -1 ? '-' : (addrem == 1 ? '+' : '=')),
165                   (curthread->td_proc ? curthread->td_proc->p_pid : -1),
166                   obj->ref_count,
167                   curthread->td_comm);
168         obj->debug_hold_file[i] = file;
169         obj->debug_hold_line[i] = line;
170 #if 0
171         /* Uncomment for debugging obj refs/derefs in reproducable cases */
172         if (strcmp(curthread->td_comm, "sshd") == 0) {
173                 kprintf("%d %p refs=%d ar=%d file: %s/%d\n",
174                         (curthread->td_proc ? curthread->td_proc->p_pid : -1),
175                         obj, obj->ref_count, addrem, file, line);
176         }
177 #endif
178 }
179
180 #endif
181
182 /*
183  * Misc low level routines
184  */
185 static void
186 vm_object_lock_init(vm_object_t obj)
187 {
188 #if defined(DEBUG_LOCKS)
189         int i;
190
191         obj->debug_index = 0;
192         for (i = 0; i < VMOBJ_DEBUG_ARRAY_SIZE; i++) {
193                 obj->debug_hold_thrs[i][0] = 0;
194                 obj->debug_hold_file[i] = NULL;
195                 obj->debug_hold_line[i] = 0;
196         }
197 #endif
198 }
199
200 void
201 vm_object_lock_swap(void)
202 {
203         lwkt_token_swap();
204 }
205
206 void
207 vm_object_lock(vm_object_t obj)
208 {
209         lwkt_gettoken(&obj->token);
210 }
211
212 /*
213  * Returns TRUE on sucesss
214  */
215 static int
216 vm_object_lock_try(vm_object_t obj)
217 {
218         return(lwkt_trytoken(&obj->token));
219 }
220
221 void
222 vm_object_lock_shared(vm_object_t obj)
223 {
224         lwkt_gettoken_shared(&obj->token);
225 }
226
227 void
228 vm_object_unlock(vm_object_t obj)
229 {
230         lwkt_reltoken(&obj->token);
231 }
232
233 void
234 vm_object_upgrade(vm_object_t obj)
235 {
236         lwkt_reltoken(&obj->token);
237         lwkt_gettoken(&obj->token);
238 }
239
240 void
241 vm_object_downgrade(vm_object_t obj)
242 {
243         lwkt_reltoken(&obj->token);
244         lwkt_gettoken_shared(&obj->token);
245 }
246
247 static __inline void
248 vm_object_assert_held(vm_object_t obj)
249 {
250         ASSERT_LWKT_TOKEN_HELD(&obj->token);
251 }
252
253 void
254 VMOBJDEBUG(vm_object_hold)(vm_object_t obj VMOBJDBARGS)
255 {
256         KKASSERT(obj != NULL);
257
258         /*
259          * Object must be held (object allocation is stable due to callers
260          * context, typically already holding the token on a parent object)
261          * prior to potentially blocking on the lock, otherwise the object
262          * can get ripped away from us.
263          */
264         refcount_acquire(&obj->hold_count);
265         vm_object_lock(obj);
266
267 #if defined(DEBUG_LOCKS)
268         debugvm_object_add(obj, file, line, 1);
269 #endif
270 }
271
272 int
273 VMOBJDEBUG(vm_object_hold_try)(vm_object_t obj VMOBJDBARGS)
274 {
275         KKASSERT(obj != NULL);
276
277         /*
278          * Object must be held (object allocation is stable due to callers
279          * context, typically already holding the token on a parent object)
280          * prior to potentially blocking on the lock, otherwise the object
281          * can get ripped away from us.
282          */
283         refcount_acquire(&obj->hold_count);
284         if (vm_object_lock_try(obj) == 0) {
285                 if (refcount_release(&obj->hold_count)) {
286                         if (obj->ref_count == 0 && (obj->flags & OBJ_DEAD))
287                                 zfree(obj_zone, obj);
288                 }
289                 return(0);
290         }
291
292 #if defined(DEBUG_LOCKS)
293         debugvm_object_add(obj, file, line, 1);
294 #endif
295         return(1);
296 }
297
298 void
299 VMOBJDEBUG(vm_object_hold_shared)(vm_object_t obj VMOBJDBARGS)
300 {
301         KKASSERT(obj != NULL);
302
303         /*
304          * Object must be held (object allocation is stable due to callers
305          * context, typically already holding the token on a parent object)
306          * prior to potentially blocking on the lock, otherwise the object
307          * can get ripped away from us.
308          */
309         refcount_acquire(&obj->hold_count);
310         vm_object_lock_shared(obj);
311
312 #if defined(DEBUG_LOCKS)
313         debugvm_object_add(obj, file, line, 1);
314 #endif
315 }
316
317 /*
318  * Drop the token and hold_count on the object.
319  *
320  * WARNING! Token might be shared.
321  */
322 void
323 VMOBJDEBUG(vm_object_drop)(vm_object_t obj VMOBJDBARGS)
324 {
325         if (obj == NULL)
326                 return;
327
328         /*
329          * No new holders should be possible once we drop hold_count 1->0 as
330          * there is no longer any way to reference the object.
331          */
332         KKASSERT(obj->hold_count > 0);
333         if (refcount_release(&obj->hold_count)) {
334 #if defined(DEBUG_LOCKS)
335                 debugvm_object_add(obj, file, line, -1);
336 #endif
337
338                 if (obj->ref_count == 0 && (obj->flags & OBJ_DEAD)) {
339                         vm_object_unlock(obj);
340                         zfree(obj_zone, obj);
341                 } else {
342                         vm_object_unlock(obj);
343                 }
344         } else {
345 #if defined(DEBUG_LOCKS)
346                 debugvm_object_add(obj, file, line, -1);
347 #endif
348                 vm_object_unlock(obj);
349         }
350 }
351
352 /*
353  * Initialize a freshly allocated object, returning a held object.
354  *
355  * Used only by vm_object_allocate() and zinitna().
356  *
357  * No requirements.
358  */
359 void
360 _vm_object_allocate(objtype_t type, vm_pindex_t size, vm_object_t object)
361 {
362         int incr;
363         int n;
364
365         RB_INIT(&object->rb_memq);
366         LIST_INIT(&object->shadow_head);
367         lwkt_token_init(&object->token, "vmobj");
368
369         object->type = type;
370         object->size = size;
371         object->ref_count = 1;
372         object->memattr = VM_MEMATTR_DEFAULT;
373         object->hold_count = 0;
374         object->flags = 0;
375         if ((object->type == OBJT_DEFAULT) || (object->type == OBJT_SWAP))
376                 vm_object_set_flag(object, OBJ_ONEMAPPING);
377         object->paging_in_progress = 0;
378         object->resident_page_count = 0;
379         object->agg_pv_list_count = 0;
380         object->shadow_count = 0;
381         /* cpu localization twist */
382         object->pg_color = (int)(intptr_t)curthread;
383         if ( size > (PQ_L2_SIZE / 3 + PQ_PRIME1))
384                 incr = PQ_L2_SIZE / 3 + PQ_PRIME1;
385         else
386                 incr = size;
387         next_index = (next_index + incr) & PQ_L2_MASK;
388         object->handle = NULL;
389         object->backing_object = NULL;
390         object->backing_object_offset = (vm_ooffset_t)0;
391
392         object->generation++;
393         object->swblock_count = 0;
394         RB_INIT(&object->swblock_root);
395         vm_object_lock_init(object);
396         pmap_object_init(object);
397
398         vm_object_hold(object);
399
400         n = VMOBJ_HASH(object);
401         atomic_add_long(&vm_object_count, 1);
402         lwkt_gettoken(&vmobj_tokens[n]);
403         TAILQ_INSERT_TAIL(&vm_object_lists[n], object, object_list);
404         lwkt_reltoken(&vmobj_tokens[n]);
405 }
406
407 /*
408  * Initialize the VM objects module.
409  *
410  * Called from the low level boot code only.
411  */
412 void
413 vm_object_init(void)
414 {
415         int i;
416
417         for (i = 0; i < VMOBJ_HSIZE; ++i) {
418                 TAILQ_INIT(&vm_object_lists[i]);
419                 lwkt_token_init(&vmobj_tokens[i], "vmobjlst");
420         }
421         
422         _vm_object_allocate(OBJT_DEFAULT, OFF_TO_IDX(KvaEnd),
423                             &kernel_object);
424         vm_object_drop(&kernel_object);
425
426         obj_zone = &obj_zone_store;
427         zbootinit(obj_zone, "VM OBJECT", sizeof (struct vm_object),
428                 vm_objects_init, VM_OBJECTS_INIT);
429 }
430
431 void
432 vm_object_init2(void)
433 {
434         zinitna(obj_zone, NULL, NULL, 0, 0, ZONE_PANICFAIL, 1);
435 }
436
437 /*
438  * Allocate and return a new object of the specified type and size.
439  *
440  * No requirements.
441  */
442 vm_object_t
443 vm_object_allocate(objtype_t type, vm_pindex_t size)
444 {
445         vm_object_t result;
446
447         result = (vm_object_t) zalloc(obj_zone);
448
449         _vm_object_allocate(type, size, result);
450         vm_object_drop(result);
451
452         return (result);
453 }
454
455 /*
456  * This version returns a held object, allowing further atomic initialization
457  * of the object.
458  */
459 vm_object_t
460 vm_object_allocate_hold(objtype_t type, vm_pindex_t size)
461 {
462         vm_object_t result;
463
464         result = (vm_object_t) zalloc(obj_zone);
465
466         _vm_object_allocate(type, size, result);
467
468         return (result);
469 }
470
471 /*
472  * Add an additional reference to a vm_object.  The object must already be
473  * held.  The original non-lock version is no longer supported.  The object
474  * must NOT be chain locked by anyone at the time the reference is added.
475  *
476  * Referencing a chain-locked object can blow up the fairly sensitive
477  * ref_count and shadow_count tests in the deallocator.  Most callers
478  * will call vm_object_chain_wait() prior to calling
479  * vm_object_reference_locked() to avoid the case.
480  *
481  * The object must be held, but may be held shared if desired (hence why
482  * we use an atomic op).
483  */
484 void
485 VMOBJDEBUG(vm_object_reference_locked)(vm_object_t object VMOBJDBARGS)
486 {
487         KKASSERT(object != NULL);
488         ASSERT_LWKT_TOKEN_HELD(vm_object_token(object));
489         KKASSERT((object->chainlk & (CHAINLK_EXCL | CHAINLK_MASK)) == 0);
490         atomic_add_int(&object->ref_count, 1);
491         if (object->type == OBJT_VNODE) {
492                 vref(object->handle);
493                 /* XXX what if the vnode is being destroyed? */
494         }
495 #if defined(DEBUG_LOCKS)
496         debugvm_object_add(object, file, line, 1);
497 #endif
498 }
499
500 /*
501  * This version is only allowed for vnode objects.
502  */
503 void
504 VMOBJDEBUG(vm_object_reference_quick)(vm_object_t object VMOBJDBARGS)
505 {
506         KKASSERT(object->type == OBJT_VNODE);
507         atomic_add_int(&object->ref_count, 1);
508         vref(object->handle);
509 #if defined(DEBUG_LOCKS)
510         debugvm_object_add(object, file, line, 1);
511 #endif
512 }
513
514 /*
515  * Object OBJ_CHAINLOCK lock handling.
516  *
517  * The caller can chain-lock backing objects recursively and then
518  * use vm_object_chain_release_all() to undo the whole chain.
519  *
520  * Chain locks are used to prevent collapses and are only applicable
521  * to OBJT_DEFAULT and OBJT_SWAP objects.  Chain locking operations
522  * on other object types are ignored.  This is also important because
523  * it allows e.g. the vnode underlying a memory mapping to take concurrent
524  * faults.
525  *
526  * The object must usually be held on entry, though intermediate
527  * objects need not be held on release.  The object must be held exclusively,
528  * NOT shared.  Note that the prefault path checks the shared state and
529  * avoids using the chain functions.
530  */
531 void
532 vm_object_chain_wait(vm_object_t object, int shared)
533 {
534         ASSERT_LWKT_TOKEN_HELD(vm_object_token(object));
535         for (;;) {
536                 uint32_t chainlk = object->chainlk;
537
538                 cpu_ccfence();
539                 if (shared) {
540                         if (chainlk & (CHAINLK_EXCL | CHAINLK_EXCLREQ)) {
541                                 tsleep_interlock(object, 0);
542                                 if (atomic_cmpset_int(&object->chainlk,
543                                                       chainlk,
544                                                       chainlk | CHAINLK_WAIT)) {
545                                         tsleep(object, PINTERLOCKED,
546                                                "objchns", 0);
547                                 }
548                                 /* retry */
549                         } else {
550                                 break;
551                         }
552                         /* retry */
553                 } else {
554                         if (chainlk & (CHAINLK_MASK | CHAINLK_EXCL)) {
555                                 tsleep_interlock(object, 0);
556                                 if (atomic_cmpset_int(&object->chainlk,
557                                                       chainlk,
558                                                       chainlk | CHAINLK_WAIT))
559                                 {
560                                         tsleep(object, PINTERLOCKED,
561                                                "objchnx", 0);
562                                 }
563                                 /* retry */
564                         } else {
565                                 if (atomic_cmpset_int(&object->chainlk,
566                                                       chainlk,
567                                                       chainlk & ~CHAINLK_WAIT))
568                                 {
569                                         if (chainlk & CHAINLK_WAIT)
570                                                 wakeup(object);
571                                         break;
572                                 }
573                                 /* retry */
574                         }
575                 }
576                 /* retry */
577         }
578 }
579
580 void
581 vm_object_chain_acquire(vm_object_t object, int shared)
582 {
583         if (object->type != OBJT_DEFAULT && object->type != OBJT_SWAP)
584                 return;
585         if (vm_shared_fault == 0)
586                 shared = 0;
587
588         for (;;) {
589                 uint32_t chainlk = object->chainlk;
590
591                 cpu_ccfence();
592                 if (shared) {
593                         if (chainlk & (CHAINLK_EXCL | CHAINLK_EXCLREQ)) {
594                                 tsleep_interlock(object, 0);
595                                 if (atomic_cmpset_int(&object->chainlk,
596                                                       chainlk,
597                                                       chainlk | CHAINLK_WAIT)) {
598                                         tsleep(object, PINTERLOCKED,
599                                                "objchns", 0);
600                                 }
601                                 /* retry */
602                         } else if (atomic_cmpset_int(&object->chainlk,
603                                               chainlk, chainlk + 1)) {
604                                 break;
605                         }
606                         /* retry */
607                 } else {
608                         if (chainlk & (CHAINLK_MASK | CHAINLK_EXCL)) {
609                                 tsleep_interlock(object, 0);
610                                 if (atomic_cmpset_int(&object->chainlk,
611                                                       chainlk,
612                                                       chainlk |
613                                                        CHAINLK_WAIT |
614                                                        CHAINLK_EXCLREQ)) {
615                                         tsleep(object, PINTERLOCKED,
616                                                "objchnx", 0);
617                                 }
618                                 /* retry */
619                         } else {
620                                 if (atomic_cmpset_int(&object->chainlk,
621                                                       chainlk,
622                                                       (chainlk | CHAINLK_EXCL) &
623                                                       ~(CHAINLK_EXCLREQ |
624                                                         CHAINLK_WAIT))) {
625                                         if (chainlk & CHAINLK_WAIT)
626                                                 wakeup(object);
627                                         break;
628                                 }
629                                 /* retry */
630                         }
631                 }
632                 /* retry */
633         }
634 }
635
636 void
637 vm_object_chain_release(vm_object_t object)
638 {
639         /*ASSERT_LWKT_TOKEN_HELD(vm_object_token(object));*/
640         if (object->type != OBJT_DEFAULT && object->type != OBJT_SWAP)
641                 return;
642         KKASSERT(object->chainlk & (CHAINLK_MASK | CHAINLK_EXCL));
643         for (;;) {
644                 uint32_t chainlk = object->chainlk;
645
646                 cpu_ccfence();
647                 if (chainlk & CHAINLK_MASK) {
648                         if ((chainlk & CHAINLK_MASK) == 1 &&
649                             atomic_cmpset_int(&object->chainlk,
650                                               chainlk,
651                                               (chainlk - 1) & ~CHAINLK_WAIT)) {
652                                 if (chainlk & CHAINLK_WAIT)
653                                         wakeup(object);
654                                 break;
655                         }
656                         if ((chainlk & CHAINLK_MASK) > 1 &&
657                             atomic_cmpset_int(&object->chainlk,
658                                               chainlk, chainlk - 1)) {
659                                 break;
660                         }
661                         /* retry */
662                 } else {
663                         KKASSERT(chainlk & CHAINLK_EXCL);
664                         if (atomic_cmpset_int(&object->chainlk,
665                                               chainlk,
666                                               chainlk & ~(CHAINLK_EXCL |
667                                                           CHAINLK_WAIT))) {
668                                 if (chainlk & CHAINLK_WAIT)
669                                         wakeup(object);
670                                 break;
671                         }
672                 }
673         }
674 }
675
676 /*
677  * Release the chain from first_object through and including stopobj.
678  * The caller is typically holding the first and last object locked
679  * (shared or exclusive) to prevent destruction races.
680  *
681  * We release stopobj first as an optimization as this object is most
682  * likely to be shared across multiple processes.
683  */
684 void
685 vm_object_chain_release_all(vm_object_t first_object, vm_object_t stopobj)
686 {
687         vm_object_t backing_object;
688         vm_object_t object;
689
690         vm_object_chain_release(stopobj);
691         object = first_object;
692
693         while (object != stopobj) {
694                 KKASSERT(object);
695                 backing_object = object->backing_object;
696                 vm_object_chain_release(object);
697                 object = backing_object;
698         }
699 }
700
701 /*
702  * Dereference an object and its underlying vnode.  The object may be
703  * held shared.  On return the object will remain held.
704  *
705  * This function may return a vnode in *vpp which the caller must release
706  * after the caller drops its own lock.  If vpp is NULL, we assume that
707  * the caller was holding an exclusive lock on the object and we vrele()
708  * the vp ourselves.
709  */
710 static void
711 VMOBJDEBUG(vm_object_vndeallocate)(vm_object_t object, struct vnode **vpp
712                                    VMOBJDBARGS)
713 {
714         struct vnode *vp = (struct vnode *) object->handle;
715
716         KASSERT(object->type == OBJT_VNODE,
717             ("vm_object_vndeallocate: not a vnode object"));
718         KASSERT(vp != NULL, ("vm_object_vndeallocate: missing vp"));
719         ASSERT_LWKT_TOKEN_HELD(vm_object_token(object));
720 #ifdef INVARIANTS
721         if (object->ref_count == 0) {
722                 vprint("vm_object_vndeallocate", vp);
723                 panic("vm_object_vndeallocate: bad object reference count");
724         }
725 #endif
726         for (;;) {
727                 int count = object->ref_count;
728                 cpu_ccfence();
729                 if (count == 1) {
730                         vm_object_upgrade(object);
731                         if (atomic_cmpset_int(&object->ref_count, count, 0)) {
732                                 vclrflags(vp, VTEXT);
733                                 break;
734                         }
735                 } else {
736                         if (atomic_cmpset_int(&object->ref_count,
737                                               count, count - 1)) {
738                                 break;
739                         }
740                 }
741                 /* retry */
742         }
743 #if defined(DEBUG_LOCKS)
744         debugvm_object_add(object, file, line, -1);
745 #endif
746
747         /*
748          * vrele or return the vp to vrele.  We can only safely vrele(vp)
749          * if the object was locked exclusively.  But there are two races
750          * here.
751          *
752          * We had to upgrade the object above to safely clear VTEXT
753          * but the alternative path where the shared lock is retained
754          * can STILL race to 0 in other paths and cause our own vrele()
755          * to terminate the vnode.  We can't allow that if the VM object
756          * is still locked shared.
757          */
758         if (vpp)
759                 *vpp = vp;
760         else
761                 vrele(vp);
762 }
763
764 /*
765  * Release a reference to the specified object, gained either through a
766  * vm_object_allocate or a vm_object_reference call.  When all references
767  * are gone, storage associated with this object may be relinquished.
768  *
769  * The caller does not have to hold the object locked but must have control
770  * over the reference in question in order to guarantee that the object
771  * does not get ripped out from under us.
772  *
773  * XXX Currently all deallocations require an exclusive lock.
774  */
775 void
776 VMOBJDEBUG(vm_object_deallocate)(vm_object_t object VMOBJDBARGS)
777 {
778         struct vnode *vp;
779         int count;
780
781         if (object == NULL)
782                 return;
783
784         for (;;) {
785                 count = object->ref_count;
786                 cpu_ccfence();
787
788                 /*
789                  * If decrementing the count enters into special handling
790                  * territory (0, 1, or 2) we have to do it the hard way.
791                  * Fortunate though, objects with only a few refs like this
792                  * are not likely to be heavily contended anyway.
793                  *
794                  * For vnode objects we only care about 1->0 transitions.
795                  */
796                 if (count <= 3 || (object->type == OBJT_VNODE && count <= 1)) {
797 #if defined(DEBUG_LOCKS)
798                         debugvm_object_add(object, file, line, 0);
799 #endif
800                         vm_object_hold(object);
801                         vm_object_deallocate_locked(object);
802                         vm_object_drop(object);
803                         break;
804                 }
805
806                 /*
807                  * Try to decrement ref_count without acquiring a hold on
808                  * the object.  This is particularly important for the exec*()
809                  * and exit*() code paths because the program binary may
810                  * have a great deal of sharing and an exclusive lock will
811                  * crowbar performance in those circumstances.
812                  */
813                 if (object->type == OBJT_VNODE) {
814                         vp = (struct vnode *)object->handle;
815                         if (atomic_cmpset_int(&object->ref_count,
816                                               count, count - 1)) {
817 #if defined(DEBUG_LOCKS)
818                                 debugvm_object_add(object, file, line, -1);
819 #endif
820
821                                 vrele(vp);
822                                 break;
823                         }
824                         /* retry */
825                 } else {
826                         if (atomic_cmpset_int(&object->ref_count,
827                                               count, count - 1)) {
828 #if defined(DEBUG_LOCKS)
829                                 debugvm_object_add(object, file, line, -1);
830 #endif
831                                 break;
832                         }
833                         /* retry */
834                 }
835                 /* retry */
836         }
837 }
838
839 void
840 VMOBJDEBUG(vm_object_deallocate_locked)(vm_object_t object VMOBJDBARGS)
841 {
842         struct vm_object_dealloc_list *dlist = NULL;
843         struct vm_object_dealloc_list *dtmp;
844         vm_object_t temp;
845         int must_drop = 0;
846
847         /*
848          * We may chain deallocate object, but additional objects may
849          * collect on the dlist which also have to be deallocated.  We
850          * must avoid a recursion, vm_object chains can get deep.
851          */
852
853 again:
854         while (object != NULL) {
855                 /*
856                  * vnode case, caller either locked the object exclusively
857                  * or this is a recursion with must_drop != 0 and the vnode
858                  * object will be locked shared.
859                  *
860                  * If locked shared we have to drop the object before we can
861                  * call vrele() or risk a shared/exclusive livelock.
862                  */
863                 if (object->type == OBJT_VNODE) {
864                         ASSERT_LWKT_TOKEN_HELD(&object->token);
865                         if (must_drop) {
866                                 struct vnode *tmp_vp;
867
868                                 vm_object_vndeallocate(object, &tmp_vp);
869                                 vm_object_drop(object);
870                                 must_drop = 0;
871                                 object = NULL;
872                                 vrele(tmp_vp);
873                         } else {
874                                 vm_object_vndeallocate(object, NULL);
875                         }
876                         break;
877                 }
878                 ASSERT_LWKT_TOKEN_HELD_EXCL(&object->token);
879
880                 /*
881                  * Normal case (object is locked exclusively)
882                  */
883                 if (object->ref_count == 0) {
884                         panic("vm_object_deallocate: object deallocated "
885                               "too many times: %d", object->type);
886                 }
887                 if (object->ref_count > 2) {
888                         atomic_add_int(&object->ref_count, -1);
889 #if defined(DEBUG_LOCKS)
890                         debugvm_object_add(object, file, line, -1);
891 #endif
892                         break;
893                 }
894
895                 /*
896                  * Here on ref_count of one or two, which are special cases for
897                  * objects.
898                  *
899                  * Nominal ref_count > 1 case if the second ref is not from
900                  * a shadow.
901                  *
902                  * (ONEMAPPING only applies to DEFAULT AND SWAP objects)
903                  */
904                 if (object->ref_count == 2 && object->shadow_count == 0) {
905                         if (object->type == OBJT_DEFAULT ||
906                             object->type == OBJT_SWAP) {
907                                 vm_object_set_flag(object, OBJ_ONEMAPPING);
908                         }
909                         atomic_add_int(&object->ref_count, -1);
910 #if defined(DEBUG_LOCKS)
911                         debugvm_object_add(object, file, line, -1);
912 #endif
913                         break;
914                 }
915
916                 /*
917                  * If the second ref is from a shadow we chain along it
918                  * upwards if object's handle is exhausted.
919                  *
920                  * We have to decrement object->ref_count before potentially
921                  * collapsing the first shadow object or the collapse code
922                  * will not be able to handle the degenerate case to remove
923                  * object.  However, if we do it too early the object can
924                  * get ripped out from under us.
925                  */
926                 if (object->ref_count == 2 && object->shadow_count == 1 &&
927                     object->handle == NULL && (object->type == OBJT_DEFAULT ||
928                                                object->type == OBJT_SWAP)) {
929                         temp = LIST_FIRST(&object->shadow_head);
930                         KKASSERT(temp != NULL);
931                         vm_object_hold(temp);
932
933                         /*
934                          * Wait for any paging to complete so the collapse
935                          * doesn't (or isn't likely to) qcollapse.  pip
936                          * waiting must occur before we acquire the
937                          * chainlock.
938                          */
939                         while (
940                                 temp->paging_in_progress ||
941                                 object->paging_in_progress
942                         ) {
943                                 vm_object_pip_wait(temp, "objde1");
944                                 vm_object_pip_wait(object, "objde2");
945                         }
946
947                         /*
948                          * If the parent is locked we have to give up, as
949                          * otherwise we would be acquiring locks in the
950                          * wrong order and potentially deadlock.
951                          */
952                         if (temp->chainlk & (CHAINLK_EXCL | CHAINLK_MASK)) {
953                                 vm_object_drop(temp);
954                                 goto skip;
955                         }
956                         vm_object_chain_acquire(temp, 0);
957
958                         /*
959                          * Recheck/retry after the hold and the paging
960                          * wait, both of which can block us.
961                          */
962                         if (object->ref_count != 2 ||
963                             object->shadow_count != 1 ||
964                             object->handle ||
965                             LIST_FIRST(&object->shadow_head) != temp ||
966                             (object->type != OBJT_DEFAULT &&
967                              object->type != OBJT_SWAP)) {
968                                 vm_object_chain_release(temp);
969                                 vm_object_drop(temp);
970                                 continue;
971                         }
972
973                         /*
974                          * We can safely drop object's ref_count now.
975                          */
976                         KKASSERT(object->ref_count == 2);
977                         atomic_add_int(&object->ref_count, -1);
978 #if defined(DEBUG_LOCKS)
979                         debugvm_object_add(object, file, line, -1);
980 #endif
981
982                         /*
983                          * If our single parent is not collapseable just
984                          * decrement ref_count (2->1) and stop.
985                          */
986                         if (temp->handle || (temp->type != OBJT_DEFAULT &&
987                                              temp->type != OBJT_SWAP)) {
988                                 vm_object_chain_release(temp);
989                                 vm_object_drop(temp);
990                                 break;
991                         }
992
993                         /*
994                          * At this point we have already dropped object's
995                          * ref_count so it is possible for a race to
996                          * deallocate obj out from under us.  Any collapse
997                          * will re-check the situation.  We must not block
998                          * until we are able to collapse.
999                          *
1000                          * Bump temp's ref_count to avoid an unwanted
1001                          * degenerate recursion (can't call
1002                          * vm_object_reference_locked() because it asserts
1003                          * that CHAINLOCK is not set).
1004                          */
1005                         atomic_add_int(&temp->ref_count, 1);
1006                         KKASSERT(temp->ref_count > 1);
1007
1008                         /*
1009                          * Collapse temp, then deallocate the extra ref
1010                          * formally.
1011                          */
1012                         vm_object_collapse(temp, &dlist);
1013                         vm_object_chain_release(temp);
1014                         if (must_drop) {
1015                                 vm_object_lock_swap();
1016                                 vm_object_drop(object);
1017                         }
1018                         object = temp;
1019                         must_drop = 1;
1020                         continue;
1021                 }
1022
1023                 /*
1024                  * Drop the ref and handle termination on the 1->0 transition.
1025                  * We may have blocked above so we have to recheck.
1026                  */
1027 skip:
1028                 KKASSERT(object->ref_count != 0);
1029                 if (object->ref_count >= 2) {
1030                         atomic_add_int(&object->ref_count, -1);
1031 #if defined(DEBUG_LOCKS)
1032                         debugvm_object_add(object, file, line, -1);
1033 #endif
1034                         break;
1035                 }
1036                 KKASSERT(object->ref_count == 1);
1037
1038                 /*
1039                  * 1->0 transition.  Chain through the backing_object.
1040                  * Maintain the ref until we've located the backing object,
1041                  * then re-check.
1042                  */
1043                 while ((temp = object->backing_object) != NULL) {
1044                         if (temp->type == OBJT_VNODE)
1045                                 vm_object_hold_shared(temp);
1046                         else
1047                                 vm_object_hold(temp);
1048                         if (temp == object->backing_object)
1049                                 break;
1050                         vm_object_drop(temp);
1051                 }
1052
1053                 /*
1054                  * 1->0 transition verified, retry if ref_count is no longer
1055                  * 1.  Otherwise disconnect the backing_object (temp) and
1056                  * clean up.
1057                  */
1058                 if (object->ref_count != 1) {
1059                         vm_object_drop(temp);
1060                         continue;
1061                 }
1062
1063                 /*
1064                  * It shouldn't be possible for the object to be chain locked
1065                  * if we're removing the last ref on it.
1066                  *
1067                  * Removing object from temp's shadow list requires dropping
1068                  * temp, which we will do on loop.
1069                  *
1070                  * NOTE! vnodes do not use the shadow list, but still have
1071                  *       the backing_object reference.
1072                  */
1073                 KKASSERT((object->chainlk & (CHAINLK_EXCL|CHAINLK_MASK)) == 0);
1074
1075                 if (temp) {
1076                         if (object->flags & OBJ_ONSHADOW) {
1077                                 LIST_REMOVE(object, shadow_list);
1078                                 temp->shadow_count--;
1079                                 temp->generation++;
1080                                 vm_object_clear_flag(object, OBJ_ONSHADOW);
1081                         }
1082                         object->backing_object = NULL;
1083                 }
1084
1085                 atomic_add_int(&object->ref_count, -1);
1086                 if ((object->flags & OBJ_DEAD) == 0)
1087                         vm_object_terminate(object);
1088                 if (must_drop && temp)
1089                         vm_object_lock_swap();
1090                 if (must_drop)
1091                         vm_object_drop(object);
1092                 object = temp;
1093                 must_drop = 1;
1094         }
1095
1096         if (must_drop && object)
1097                 vm_object_drop(object);
1098
1099         /*
1100          * Additional tail recursion on dlist.  Avoid a recursion.  Objects
1101          * on the dlist have a hold count but are not locked.
1102          */
1103         if ((dtmp = dlist) != NULL) {
1104                 dlist = dtmp->next;
1105                 object = dtmp->object;
1106                 kfree(dtmp, M_TEMP);
1107
1108                 vm_object_lock(object); /* already held, add lock */
1109                 must_drop = 1;          /* and we're responsible for it */
1110                 goto again;
1111         }
1112 }
1113
1114 /*
1115  * Destroy the specified object, freeing up related resources.
1116  *
1117  * The object must have zero references.
1118  *
1119  * The object must held.  The caller is responsible for dropping the object
1120  * after terminate returns.  Terminate does NOT drop the object.
1121  */
1122 static int vm_object_terminate_callback(vm_page_t p, void *data);
1123
1124 void
1125 vm_object_terminate(vm_object_t object)
1126 {
1127         int n;
1128
1129         /*
1130          * Make sure no one uses us.  Once we set OBJ_DEAD we should be
1131          * able to safely block.
1132          */
1133         ASSERT_LWKT_TOKEN_HELD(vm_object_token(object));
1134         KKASSERT((object->flags & OBJ_DEAD) == 0);
1135         vm_object_set_flag(object, OBJ_DEAD);
1136
1137         /*
1138          * Wait for the pageout daemon to be done with the object
1139          */
1140         vm_object_pip_wait(object, "objtrm1");
1141
1142         KASSERT(!object->paging_in_progress,
1143                 ("vm_object_terminate: pageout in progress"));
1144
1145         /*
1146          * Clean and free the pages, as appropriate. All references to the
1147          * object are gone, so we don't need to lock it.
1148          */
1149         if (object->type == OBJT_VNODE) {
1150                 struct vnode *vp;
1151
1152                 /*
1153                  * Clean pages and flush buffers.
1154                  *
1155                  * NOTE!  TMPFS buffer flushes do not typically flush the
1156                  *        actual page to swap as this would be highly
1157                  *        inefficient, and normal filesystems usually wrap
1158                  *        page flushes with buffer cache buffers.
1159                  *
1160                  *        To deal with this we have to call vinvalbuf() both
1161                  *        before and after the vm_object_page_clean().
1162                  */
1163                 vp = (struct vnode *) object->handle;
1164                 vinvalbuf(vp, V_SAVE, 0, 0);
1165                 vm_object_page_clean(object, 0, 0, OBJPC_SYNC);
1166                 vinvalbuf(vp, V_SAVE, 0, 0);
1167         }
1168
1169         /*
1170          * Wait for any I/O to complete, after which there had better not
1171          * be any references left on the object.
1172          */
1173         vm_object_pip_wait(object, "objtrm2");
1174
1175         if (object->ref_count != 0) {
1176                 panic("vm_object_terminate: object with references, "
1177                       "ref_count=%d", object->ref_count);
1178         }
1179
1180         /*
1181          * Cleanup any shared pmaps associated with this object.
1182          */
1183         pmap_object_free(object);
1184
1185         /*
1186          * Now free any remaining pages. For internal objects, this also
1187          * removes them from paging queues. Don't free wired pages, just
1188          * remove them from the object. 
1189          */
1190         vm_page_rb_tree_RB_SCAN(&object->rb_memq, NULL,
1191                                 vm_object_terminate_callback, NULL);
1192
1193         /*
1194          * Let the pager know object is dead.
1195          */
1196         vm_pager_deallocate(object);
1197
1198         /*
1199          * Wait for the object hold count to hit 1, clean out pages as
1200          * we go.  vmobj_token interlocks any race conditions that might
1201          * pick the object up from the vm_object_list after we have cleared
1202          * rb_memq.
1203          */
1204         for (;;) {
1205                 if (RB_ROOT(&object->rb_memq) == NULL)
1206                         break;
1207                 kprintf("vm_object_terminate: Warning, object %p "
1208                         "still has %d pages\n",
1209                         object, object->resident_page_count);
1210                 vm_page_rb_tree_RB_SCAN(&object->rb_memq, NULL,
1211                                         vm_object_terminate_callback, NULL);
1212         }
1213
1214         /*
1215          * There had better not be any pages left
1216          */
1217         KKASSERT(object->resident_page_count == 0);
1218
1219         /*
1220          * Remove the object from the global object list.
1221          */
1222         n = VMOBJ_HASH(object);
1223         lwkt_gettoken(&vmobj_tokens[n]);
1224         TAILQ_REMOVE(&vm_object_lists[n], object, object_list);
1225         lwkt_reltoken(&vmobj_tokens[n]);
1226         atomic_add_long(&vm_object_count, -1);
1227
1228         if (object->ref_count != 0) {
1229                 panic("vm_object_terminate2: object with references, "
1230                       "ref_count=%d", object->ref_count);
1231         }
1232
1233         /*
1234          * NOTE: The object hold_count is at least 1, so we cannot zfree()
1235          *       the object here.  See vm_object_drop().
1236          */
1237 }
1238
1239 /*
1240  * The caller must hold the object.
1241  */
1242 static int
1243 vm_object_terminate_callback(vm_page_t p, void *data __unused)
1244 {
1245         vm_object_t object;
1246
1247         object = p->object;
1248         vm_page_busy_wait(p, TRUE, "vmpgtrm");
1249         if (object != p->object) {
1250                 kprintf("vm_object_terminate: Warning: Encountered "
1251                         "busied page %p on queue %d\n", p, p->queue);
1252                 vm_page_wakeup(p);
1253         } else if (p->wire_count == 0) {
1254                 /*
1255                  * NOTE: p->dirty and PG_NEED_COMMIT are ignored.
1256                  */
1257                 vm_page_free(p);
1258                 mycpu->gd_cnt.v_pfree++;
1259         } else {
1260                 if (p->queue != PQ_NONE)
1261                         kprintf("vm_object_terminate: Warning: Encountered "
1262                                 "wired page %p on queue %d\n", p, p->queue);
1263                 vm_page_remove(p);
1264                 vm_page_wakeup(p);
1265         }
1266         lwkt_yield();
1267         return(0);
1268 }
1269
1270 /*
1271  * Clean all dirty pages in the specified range of object.  Leaves page
1272  * on whatever queue it is currently on.   If NOSYNC is set then do not
1273  * write out pages with PG_NOSYNC set (originally comes from MAP_NOSYNC),
1274  * leaving the object dirty.
1275  *
1276  * When stuffing pages asynchronously, allow clustering.  XXX we need a
1277  * synchronous clustering mode implementation.
1278  *
1279  * Odd semantics: if start == end, we clean everything.
1280  *
1281  * The object must be locked? XXX
1282  */
1283 static int vm_object_page_clean_pass1(struct vm_page *p, void *data);
1284 static int vm_object_page_clean_pass2(struct vm_page *p, void *data);
1285
1286 void
1287 vm_object_page_clean(vm_object_t object, vm_pindex_t start, vm_pindex_t end,
1288                      int flags)
1289 {
1290         struct rb_vm_page_scan_info info;
1291         struct vnode *vp;
1292         int wholescan;
1293         int pagerflags;
1294         int generation;
1295
1296         vm_object_hold(object);
1297         if (object->type != OBJT_VNODE ||
1298             (object->flags & OBJ_MIGHTBEDIRTY) == 0) {
1299                 vm_object_drop(object);
1300                 return;
1301         }
1302
1303         pagerflags = (flags & (OBJPC_SYNC | OBJPC_INVAL)) ? 
1304                         VM_PAGER_PUT_SYNC : VM_PAGER_CLUSTER_OK;
1305         pagerflags |= (flags & OBJPC_INVAL) ? VM_PAGER_PUT_INVAL : 0;
1306
1307         vp = object->handle;
1308
1309         /*
1310          * Interlock other major object operations.  This allows us to 
1311          * temporarily clear OBJ_WRITEABLE and OBJ_MIGHTBEDIRTY.
1312          */
1313         vm_object_set_flag(object, OBJ_CLEANING);
1314
1315         /*
1316          * Handle 'entire object' case
1317          */
1318         info.start_pindex = start;
1319         if (end == 0) {
1320                 info.end_pindex = object->size - 1;
1321         } else {
1322                 info.end_pindex = end - 1;
1323         }
1324         wholescan = (start == 0 && info.end_pindex == object->size - 1);
1325         info.limit = flags;
1326         info.pagerflags = pagerflags;
1327         info.object = object;
1328
1329         /*
1330          * If cleaning the entire object do a pass to mark the pages read-only.
1331          * If everything worked out ok, clear OBJ_WRITEABLE and
1332          * OBJ_MIGHTBEDIRTY.
1333          */
1334         if (wholescan) {
1335                 info.error = 0;
1336                 vm_page_rb_tree_RB_SCAN(&object->rb_memq, rb_vm_page_scancmp,
1337                                         vm_object_page_clean_pass1, &info);
1338                 if (info.error == 0) {
1339                         vm_object_clear_flag(object,
1340                                              OBJ_WRITEABLE|OBJ_MIGHTBEDIRTY);
1341                         if (object->type == OBJT_VNODE &&
1342                             (vp = (struct vnode *)object->handle) != NULL) {
1343                                 /*
1344                                  * Use new-style interface to clear VISDIRTY
1345                                  * because the vnode is not necessarily removed
1346                                  * from the syncer list(s) as often as it was
1347                                  * under the old interface, which can leave
1348                                  * the vnode on the syncer list after reclaim.
1349                                  */
1350                                 vclrobjdirty(vp);
1351                         }
1352                 }
1353         }
1354
1355         /*
1356          * Do a pass to clean all the dirty pages we find.
1357          */
1358         do {
1359                 info.error = 0;
1360                 generation = object->generation;
1361                 vm_page_rb_tree_RB_SCAN(&object->rb_memq, rb_vm_page_scancmp,
1362                                         vm_object_page_clean_pass2, &info);
1363         } while (info.error || generation != object->generation);
1364
1365         vm_object_clear_flag(object, OBJ_CLEANING);
1366         vm_object_drop(object);
1367 }
1368
1369 /*
1370  * The caller must hold the object.
1371  */
1372 static 
1373 int
1374 vm_object_page_clean_pass1(struct vm_page *p, void *data)
1375 {
1376         struct rb_vm_page_scan_info *info = data;
1377
1378         vm_page_flag_set(p, PG_CLEANCHK);
1379         if ((info->limit & OBJPC_NOSYNC) && (p->flags & PG_NOSYNC)) {
1380                 info->error = 1;
1381         } else if (vm_page_busy_try(p, FALSE) == 0) {
1382                 vm_page_protect(p, VM_PROT_READ);       /* must not block */
1383                 vm_page_wakeup(p);
1384         } else {
1385                 info->error = 1;
1386         }
1387         lwkt_yield();
1388         return(0);
1389 }
1390
1391 /*
1392  * The caller must hold the object
1393  */
1394 static 
1395 int
1396 vm_object_page_clean_pass2(struct vm_page *p, void *data)
1397 {
1398         struct rb_vm_page_scan_info *info = data;
1399         int generation;
1400
1401         /*
1402          * Do not mess with pages that were inserted after we started
1403          * the cleaning pass.
1404          */
1405         if ((p->flags & PG_CLEANCHK) == 0)
1406                 goto done;
1407
1408         generation = info->object->generation;
1409         vm_page_busy_wait(p, TRUE, "vpcwai");
1410         if (p->object != info->object ||
1411             info->object->generation != generation) {
1412                 info->error = 1;
1413                 vm_page_wakeup(p);
1414                 goto done;
1415         }
1416
1417         /*
1418          * Before wasting time traversing the pmaps, check for trivial
1419          * cases where the page cannot be dirty.
1420          */
1421         if (p->valid == 0 || (p->queue - p->pc) == PQ_CACHE) {
1422                 KKASSERT((p->dirty & p->valid) == 0 &&
1423                          (p->flags & PG_NEED_COMMIT) == 0);
1424                 vm_page_wakeup(p);
1425                 goto done;
1426         }
1427
1428         /*
1429          * Check whether the page is dirty or not.  The page has been set
1430          * to be read-only so the check will not race a user dirtying the
1431          * page.
1432          */
1433         vm_page_test_dirty(p);
1434         if ((p->dirty & p->valid) == 0 && (p->flags & PG_NEED_COMMIT) == 0) {
1435                 vm_page_flag_clear(p, PG_CLEANCHK);
1436                 vm_page_wakeup(p);
1437                 goto done;
1438         }
1439
1440         /*
1441          * If we have been asked to skip nosync pages and this is a
1442          * nosync page, skip it.  Note that the object flags were
1443          * not cleared in this case (because pass1 will have returned an
1444          * error), so we do not have to set them.
1445          */
1446         if ((info->limit & OBJPC_NOSYNC) && (p->flags & PG_NOSYNC)) {
1447                 vm_page_flag_clear(p, PG_CLEANCHK);
1448                 vm_page_wakeup(p);
1449                 goto done;
1450         }
1451
1452         /*
1453          * Flush as many pages as we can.  PG_CLEANCHK will be cleared on
1454          * the pages that get successfully flushed.  Set info->error if
1455          * we raced an object modification.
1456          */
1457         vm_object_page_collect_flush(info->object, p, info->pagerflags);
1458         /* vm_wait_nominal(); this can deadlock the system in syncer/pageout */
1459 done:
1460         lwkt_yield();
1461         return(0);
1462 }
1463
1464 /*
1465  * Collect the specified page and nearby pages and flush them out.
1466  * The number of pages flushed is returned.  The passed page is busied
1467  * by the caller and we are responsible for its disposition.
1468  *
1469  * The caller must hold the object.
1470  */
1471 static void
1472 vm_object_page_collect_flush(vm_object_t object, vm_page_t p, int pagerflags)
1473 {
1474         int error;
1475         int is;
1476         int ib;
1477         int i;
1478         int page_base;
1479         vm_pindex_t pi;
1480         vm_page_t ma[BLIST_MAX_ALLOC];
1481
1482         ASSERT_LWKT_TOKEN_HELD(vm_object_token(object));
1483
1484         pi = p->pindex;
1485         page_base = pi % BLIST_MAX_ALLOC;
1486         ma[page_base] = p;
1487         ib = page_base - 1;
1488         is = page_base + 1;
1489
1490         while (ib >= 0) {
1491                 vm_page_t tp;
1492
1493                 tp = vm_page_lookup_busy_try(object, pi - page_base + ib,
1494                                              TRUE, &error);
1495                 if (error)
1496                         break;
1497                 if (tp == NULL)
1498                         break;
1499                 if ((pagerflags & VM_PAGER_IGNORE_CLEANCHK) == 0 &&
1500                     (tp->flags & PG_CLEANCHK) == 0) {
1501                         vm_page_wakeup(tp);
1502                         break;
1503                 }
1504                 if ((tp->queue - tp->pc) == PQ_CACHE) {
1505                         vm_page_flag_clear(tp, PG_CLEANCHK);
1506                         vm_page_wakeup(tp);
1507                         break;
1508                 }
1509                 vm_page_test_dirty(tp);
1510                 if ((tp->dirty & tp->valid) == 0 &&
1511                     (tp->flags & PG_NEED_COMMIT) == 0) {
1512                         vm_page_flag_clear(tp, PG_CLEANCHK);
1513                         vm_page_wakeup(tp);
1514                         break;
1515                 }
1516                 ma[ib] = tp;
1517                 --ib;
1518         }
1519         ++ib;   /* fixup */
1520
1521         while (is < BLIST_MAX_ALLOC &&
1522                pi - page_base + is < object->size) {
1523                 vm_page_t tp;
1524
1525                 tp = vm_page_lookup_busy_try(object, pi - page_base + is,
1526                                              TRUE, &error);
1527                 if (error)
1528                         break;
1529                 if (tp == NULL)
1530                         break;
1531                 if ((pagerflags & VM_PAGER_IGNORE_CLEANCHK) == 0 &&
1532                     (tp->flags & PG_CLEANCHK) == 0) {
1533                         vm_page_wakeup(tp);
1534                         break;
1535                 }
1536                 if ((tp->queue - tp->pc) == PQ_CACHE) {
1537                         vm_page_flag_clear(tp, PG_CLEANCHK);
1538                         vm_page_wakeup(tp);
1539                         break;
1540                 }
1541                 vm_page_test_dirty(tp);
1542                 if ((tp->dirty & tp->valid) == 0 &&
1543                     (tp->flags & PG_NEED_COMMIT) == 0) {
1544                         vm_page_flag_clear(tp, PG_CLEANCHK);
1545                         vm_page_wakeup(tp);
1546                         break;
1547                 }
1548                 ma[is] = tp;
1549                 ++is;
1550         }
1551
1552         /*
1553          * All pages in the ma[] array are busied now
1554          */
1555         for (i = ib; i < is; ++i) {
1556                 vm_page_flag_clear(ma[i], PG_CLEANCHK);
1557                 vm_page_hold(ma[i]);    /* XXX need this any more? */
1558         }
1559         vm_pageout_flush(&ma[ib], is - ib, pagerflags);
1560         for (i = ib; i < is; ++i)       /* XXX need this any more? */
1561                 vm_page_unhold(ma[i]);
1562 }
1563
1564 /*
1565  * Same as vm_object_pmap_copy, except range checking really
1566  * works, and is meant for small sections of an object.
1567  *
1568  * This code protects resident pages by making them read-only
1569  * and is typically called on a fork or split when a page
1570  * is converted to copy-on-write.  
1571  *
1572  * NOTE: If the page is already at VM_PROT_NONE, calling
1573  * vm_page_protect will have no effect.
1574  */
1575 void
1576 vm_object_pmap_copy_1(vm_object_t object, vm_pindex_t start, vm_pindex_t end)
1577 {
1578         vm_pindex_t idx;
1579         vm_page_t p;
1580
1581         if (object == NULL || (object->flags & OBJ_WRITEABLE) == 0)
1582                 return;
1583
1584         vm_object_hold(object);
1585         for (idx = start; idx < end; idx++) {
1586                 p = vm_page_lookup(object, idx);
1587                 if (p == NULL)
1588                         continue;
1589                 vm_page_protect(p, VM_PROT_READ);
1590         }
1591         vm_object_drop(object);
1592 }
1593
1594 /*
1595  * Removes all physical pages in the specified object range from all
1596  * physical maps.
1597  *
1598  * The object must *not* be locked.
1599  */
1600
1601 static int vm_object_pmap_remove_callback(vm_page_t p, void *data);
1602
1603 void
1604 vm_object_pmap_remove(vm_object_t object, vm_pindex_t start, vm_pindex_t end)
1605 {
1606         struct rb_vm_page_scan_info info;
1607
1608         if (object == NULL)
1609                 return;
1610         info.start_pindex = start;
1611         info.end_pindex = end - 1;
1612
1613         vm_object_hold(object);
1614         vm_page_rb_tree_RB_SCAN(&object->rb_memq, rb_vm_page_scancmp,
1615                                 vm_object_pmap_remove_callback, &info);
1616         if (start == 0 && end == object->size)
1617                 vm_object_clear_flag(object, OBJ_WRITEABLE);
1618         vm_object_drop(object);
1619 }
1620
1621 /*
1622  * The caller must hold the object
1623  */
1624 static int
1625 vm_object_pmap_remove_callback(vm_page_t p, void *data __unused)
1626 {
1627         vm_page_protect(p, VM_PROT_NONE);
1628         return(0);
1629 }
1630
1631 /*
1632  * Implements the madvise function at the object/page level.
1633  *
1634  * MADV_WILLNEED        (any object)
1635  *
1636  *      Activate the specified pages if they are resident.
1637  *
1638  * MADV_DONTNEED        (any object)
1639  *
1640  *      Deactivate the specified pages if they are resident.
1641  *
1642  * MADV_FREE    (OBJT_DEFAULT/OBJT_SWAP objects, OBJ_ONEMAPPING only)
1643  *
1644  *      Deactivate and clean the specified pages if they are
1645  *      resident.  This permits the process to reuse the pages
1646  *      without faulting or the kernel to reclaim the pages
1647  *      without I/O.
1648  *
1649  * No requirements.
1650  */
1651 void
1652 vm_object_madvise(vm_object_t object, vm_pindex_t pindex, int count, int advise)
1653 {
1654         vm_pindex_t end, tpindex;
1655         vm_object_t tobject;
1656         vm_object_t xobj;
1657         vm_page_t m;
1658         int error;
1659
1660         if (object == NULL)
1661                 return;
1662
1663         end = pindex + count;
1664
1665         vm_object_hold(object);
1666         tobject = object;
1667
1668         /*
1669          * Locate and adjust resident pages
1670          */
1671         for (; pindex < end; pindex += 1) {
1672 relookup:
1673                 if (tobject != object)
1674                         vm_object_drop(tobject);
1675                 tobject = object;
1676                 tpindex = pindex;
1677 shadowlookup:
1678                 /*
1679                  * MADV_FREE only operates on OBJT_DEFAULT or OBJT_SWAP pages
1680                  * and those pages must be OBJ_ONEMAPPING.
1681                  */
1682                 if (advise == MADV_FREE) {
1683                         if ((tobject->type != OBJT_DEFAULT &&
1684                              tobject->type != OBJT_SWAP) ||
1685                             (tobject->flags & OBJ_ONEMAPPING) == 0) {
1686                                 continue;
1687                         }
1688                 }
1689
1690                 m = vm_page_lookup_busy_try(tobject, tpindex, TRUE, &error);
1691
1692                 if (error) {
1693                         vm_page_sleep_busy(m, TRUE, "madvpo");
1694                         goto relookup;
1695                 }
1696                 if (m == NULL) {
1697                         /*
1698                          * There may be swap even if there is no backing page
1699                          */
1700                         if (advise == MADV_FREE && tobject->type == OBJT_SWAP)
1701                                 swap_pager_freespace(tobject, tpindex, 1);
1702
1703                         /*
1704                          * next object
1705                          */
1706                         while ((xobj = tobject->backing_object) != NULL) {
1707                                 KKASSERT(xobj != object);
1708                                 vm_object_hold(xobj);
1709                                 if (xobj == tobject->backing_object)
1710                                         break;
1711                                 vm_object_drop(xobj);
1712                         }
1713                         if (xobj == NULL)
1714                                 continue;
1715                         tpindex += OFF_TO_IDX(tobject->backing_object_offset);
1716                         if (tobject != object) {
1717                                 vm_object_lock_swap();
1718                                 vm_object_drop(tobject);
1719                         }
1720                         tobject = xobj;
1721                         goto shadowlookup;
1722                 }
1723
1724                 /*
1725                  * If the page is not in a normal active state, we skip it.
1726                  * If the page is not managed there are no page queues to
1727                  * mess with.  Things can break if we mess with pages in
1728                  * any of the below states.
1729                  */
1730                 if (m->wire_count ||
1731                     (m->flags & (PG_UNMANAGED | PG_NEED_COMMIT)) ||
1732                     m->valid != VM_PAGE_BITS_ALL
1733                 ) {
1734                         vm_page_wakeup(m);
1735                         continue;
1736                 }
1737
1738                 /*
1739                  * Theoretically once a page is known not to be busy, an
1740                  * interrupt cannot come along and rip it out from under us.
1741                  */
1742
1743                 if (advise == MADV_WILLNEED) {
1744                         vm_page_activate(m);
1745                 } else if (advise == MADV_DONTNEED) {
1746                         vm_page_dontneed(m);
1747                 } else if (advise == MADV_FREE) {
1748                         /*
1749                          * Mark the page clean.  This will allow the page
1750                          * to be freed up by the system.  However, such pages
1751                          * are often reused quickly by malloc()/free()
1752                          * so we do not do anything that would cause
1753                          * a page fault if we can help it.
1754                          *
1755                          * Specifically, we do not try to actually free
1756                          * the page now nor do we try to put it in the
1757                          * cache (which would cause a page fault on reuse).
1758                          *
1759                          * But we do make the page is freeable as we
1760                          * can without actually taking the step of unmapping
1761                          * it.
1762                          */
1763                         pmap_clear_modify(m);
1764                         m->dirty = 0;
1765                         m->act_count = 0;
1766                         vm_page_dontneed(m);
1767                         if (tobject->type == OBJT_SWAP)
1768                                 swap_pager_freespace(tobject, tpindex, 1);
1769                 }
1770                 vm_page_wakeup(m);
1771         }       
1772         if (tobject != object)
1773                 vm_object_drop(tobject);
1774         vm_object_drop(object);
1775 }
1776
1777 /*
1778  * Create a new object which is backed by the specified existing object
1779  * range.  Replace the pointer and offset that was pointing at the existing
1780  * object with the pointer/offset for the new object.
1781  *
1782  * If addref is non-zero the returned object is given an additional reference.
1783  * This mechanic exists to avoid the situation where refs might be 1 and
1784  * race against a collapse when the caller intends to bump it.  So the
1785  * caller cannot add the ref after the fact.  Used when the caller is
1786  * duplicating a vm_map_entry.
1787  *
1788  * No other requirements.
1789  */
1790 void
1791 vm_object_shadow(vm_object_t *objectp, vm_ooffset_t *offset, vm_size_t length,
1792                  int addref)
1793 {
1794         vm_object_t source;
1795         vm_object_t result;
1796         int useshadowlist;
1797
1798         source = *objectp;
1799
1800         /*
1801          * Don't create the new object if the old object isn't shared.
1802          * We have to chain wait before adding the reference to avoid
1803          * racing a collapse or deallocation.
1804          *
1805          * Clear OBJ_ONEMAPPING flag when shadowing.
1806          *
1807          * The caller owns a ref on source via *objectp which we are going
1808          * to replace.  This ref is inherited by the backing_object assignment.
1809          * from nobject and does not need to be incremented here.
1810          *
1811          * However, we add a temporary extra reference to the original source
1812          * prior to holding nobject in case we block, to avoid races where
1813          * someone else might believe that the source can be collapsed.
1814          */
1815         useshadowlist = 0;
1816         if (source) {
1817                 if (source->type != OBJT_VNODE) {
1818                         useshadowlist = 1;
1819                         vm_object_hold(source);
1820                         vm_object_chain_wait(source, 0);
1821                         if (source->ref_count == 1 &&
1822                             source->handle == NULL &&
1823                             (source->type == OBJT_DEFAULT ||
1824                              source->type == OBJT_SWAP)) {
1825                                 if (addref) {
1826                                         vm_object_reference_locked(source);
1827                                         vm_object_clear_flag(source,
1828                                                              OBJ_ONEMAPPING);
1829                                 }
1830                                 vm_object_drop(source);
1831                                 return;
1832                         }
1833                         vm_object_reference_locked(source);
1834                         vm_object_clear_flag(source, OBJ_ONEMAPPING);
1835                 } else {
1836                         vm_object_reference_quick(source);
1837                         vm_object_clear_flag(source, OBJ_ONEMAPPING);
1838                 }
1839         }
1840
1841         /*
1842          * Allocate a new object with the given length.  The new object
1843          * is returned referenced but we may have to add another one.
1844          * If we are adding a second reference we must clear OBJ_ONEMAPPING.
1845          * (typically because the caller is about to clone a vm_map_entry).
1846          *
1847          * The source object currently has an extra reference to prevent
1848          * collapses into it while we mess with its shadow list, which
1849          * we will remove later in this routine.
1850          *
1851          * The target object may require a second reference if asked for one
1852          * by the caller.
1853          */
1854         result = vm_object_allocate(OBJT_DEFAULT, length);
1855         if (result == NULL)
1856                 panic("vm_object_shadow: no object for shadowing");
1857         vm_object_hold(result);
1858         if (addref) {
1859                 vm_object_reference_locked(result);
1860                 vm_object_clear_flag(result, OBJ_ONEMAPPING);
1861         }
1862
1863         /*
1864          * The new object shadows the source object.  Chain wait before
1865          * adjusting shadow_count or the shadow list to avoid races.
1866          *
1867          * Try to optimize the result object's page color when shadowing
1868          * in order to maintain page coloring consistency in the combined 
1869          * shadowed object.
1870          *
1871          * The backing_object reference to source requires adding a ref to
1872          * source.  We simply inherit the ref from the original *objectp
1873          * (which we are replacing) so no additional refs need to be added.
1874          * (we must still clean up the extra ref we had to prevent collapse
1875          * races).
1876          *
1877          * SHADOWING IS NOT APPLICABLE TO OBJT_VNODE OBJECTS
1878          */
1879         KKASSERT(result->backing_object == NULL);
1880         result->backing_object = source;
1881         if (source) {
1882                 if (useshadowlist) {
1883                         vm_object_chain_wait(source, 0);
1884                         LIST_INSERT_HEAD(&source->shadow_head,
1885                                          result, shadow_list);
1886                         source->shadow_count++;
1887                         source->generation++;
1888                         vm_object_set_flag(result, OBJ_ONSHADOW);
1889                 }
1890                 /* cpu localization twist */
1891                 result->pg_color = (int)(intptr_t)curthread;
1892         }
1893
1894         /*
1895          * Adjust the return storage.  Drop the ref on source before
1896          * returning.
1897          */
1898         result->backing_object_offset = *offset;
1899         vm_object_drop(result);
1900         *offset = 0;
1901         if (source) {
1902                 if (useshadowlist) {
1903                         vm_object_deallocate_locked(source);
1904                         vm_object_drop(source);
1905                 } else {
1906                         vm_object_deallocate(source);
1907                 }
1908         }
1909
1910         /*
1911          * Return the new things
1912          */
1913         *objectp = result;
1914 }
1915
1916 #define OBSC_TEST_ALL_SHADOWED  0x0001
1917 #define OBSC_COLLAPSE_NOWAIT    0x0002
1918 #define OBSC_COLLAPSE_WAIT      0x0004
1919
1920 static int vm_object_backing_scan_callback(vm_page_t p, void *data);
1921
1922 /*
1923  * The caller must hold the object.
1924  */
1925 static __inline int
1926 vm_object_backing_scan(vm_object_t object, vm_object_t backing_object, int op)
1927 {
1928         struct rb_vm_page_scan_info info;
1929         int n;
1930
1931         vm_object_assert_held(object);
1932         vm_object_assert_held(backing_object);
1933
1934         KKASSERT(backing_object == object->backing_object);
1935         info.backing_offset_index = OFF_TO_IDX(object->backing_object_offset);
1936
1937         /*
1938          * Initial conditions
1939          */
1940         if (op & OBSC_TEST_ALL_SHADOWED) {
1941                 /*
1942                  * We do not want to have to test for the existence of
1943                  * swap pages in the backing object.  XXX but with the
1944                  * new swapper this would be pretty easy to do.
1945                  *
1946                  * XXX what about anonymous MAP_SHARED memory that hasn't
1947                  * been ZFOD faulted yet?  If we do not test for this, the
1948                  * shadow test may succeed! XXX
1949                  */
1950                 if (backing_object->type != OBJT_DEFAULT)
1951                         return(0);
1952         }
1953         if (op & OBSC_COLLAPSE_WAIT) {
1954                 KKASSERT((backing_object->flags & OBJ_DEAD) == 0);
1955                 vm_object_set_flag(backing_object, OBJ_DEAD);
1956
1957                 n = VMOBJ_HASH(backing_object);
1958                 lwkt_gettoken(&vmobj_tokens[n]);
1959                 TAILQ_REMOVE(&vm_object_lists[n], backing_object, object_list);
1960                 lwkt_reltoken(&vmobj_tokens[n]);
1961                 atomic_add_long(&vm_object_count, -1);
1962         }
1963
1964         /*
1965          * Our scan.   We have to retry if a negative error code is returned,
1966          * otherwise 0 or 1 will be returned in info.error.  0 Indicates that
1967          * the scan had to be stopped because the parent does not completely
1968          * shadow the child.
1969          */
1970         info.object = object;
1971         info.backing_object = backing_object;
1972         info.limit = op;
1973         do {
1974                 info.error = 1;
1975                 vm_page_rb_tree_RB_SCAN(&backing_object->rb_memq, NULL,
1976                                         vm_object_backing_scan_callback,
1977                                         &info);
1978         } while (info.error < 0);
1979
1980         return(info.error);
1981 }
1982
1983 /*
1984  * The caller must hold the object.
1985  */
1986 static int
1987 vm_object_backing_scan_callback(vm_page_t p, void *data)
1988 {
1989         struct rb_vm_page_scan_info *info = data;
1990         vm_object_t backing_object;
1991         vm_object_t object;
1992         vm_pindex_t pindex;
1993         vm_pindex_t new_pindex;
1994         vm_pindex_t backing_offset_index;
1995         int op;
1996
1997         pindex = p->pindex;
1998         new_pindex = pindex - info->backing_offset_index;
1999         op = info->limit;
2000         object = info->object;
2001         backing_object = info->backing_object;
2002         backing_offset_index = info->backing_offset_index;
2003
2004         if (op & OBSC_TEST_ALL_SHADOWED) {
2005                 vm_page_t pp;
2006
2007                 /*
2008                  * Ignore pages outside the parent object's range
2009                  * and outside the parent object's mapping of the 
2010                  * backing object.
2011                  *
2012                  * note that we do not busy the backing object's
2013                  * page.
2014                  */
2015                 if (pindex < backing_offset_index ||
2016                     new_pindex >= object->size
2017                 ) {
2018                         return(0);
2019                 }
2020
2021                 /*
2022                  * See if the parent has the page or if the parent's
2023                  * object pager has the page.  If the parent has the
2024                  * page but the page is not valid, the parent's
2025                  * object pager must have the page.
2026                  *
2027                  * If this fails, the parent does not completely shadow
2028                  * the object and we might as well give up now.
2029                  */
2030                 pp = vm_page_lookup(object, new_pindex);
2031                 if ((pp == NULL || pp->valid == 0) &&
2032                     !vm_pager_has_page(object, new_pindex)
2033                 ) {
2034                         info->error = 0;        /* problemo */
2035                         return(-1);             /* stop the scan */
2036                 }
2037         }
2038
2039         /*
2040          * Check for busy page.  Note that we may have lost (p) when we
2041          * possibly blocked above.
2042          */
2043         if (op & (OBSC_COLLAPSE_WAIT | OBSC_COLLAPSE_NOWAIT)) {
2044                 vm_page_t pp;
2045
2046                 if (vm_page_busy_try(p, TRUE)) {
2047                         if (op & OBSC_COLLAPSE_NOWAIT) {
2048                                 return(0);
2049                         } else {
2050                                 /*
2051                                  * If we slept, anything could have
2052                                  * happened.   Ask that the scan be restarted.
2053                                  *
2054                                  * Since the object is marked dead, the
2055                                  * backing offset should not have changed.  
2056                                  */
2057                                 vm_page_sleep_busy(p, TRUE, "vmocol");
2058                                 info->error = -1;
2059                                 return(-1);
2060                         }
2061                 }
2062
2063                 /*
2064                  * If (p) is no longer valid restart the scan.
2065                  */
2066                 if (p->object != backing_object || p->pindex != pindex) {
2067                         kprintf("vm_object_backing_scan: Warning: page "
2068                                 "%p ripped out from under us\n", p);
2069                         vm_page_wakeup(p);
2070                         info->error = -1;
2071                         return(-1);
2072                 }
2073
2074                 if (op & OBSC_COLLAPSE_NOWAIT) {
2075                         if (p->valid == 0 ||
2076                             p->wire_count ||
2077                             (p->flags & PG_NEED_COMMIT)) {
2078                                 vm_page_wakeup(p);
2079                                 return(0);
2080                         }
2081                 } else {
2082                         /* XXX what if p->valid == 0 , hold_count, etc? */
2083                 }
2084
2085                 KASSERT(
2086                     p->object == backing_object,
2087                     ("vm_object_qcollapse(): object mismatch")
2088                 );
2089
2090                 /*
2091                  * Destroy any associated swap
2092                  */
2093                 if (backing_object->type == OBJT_SWAP)
2094                         swap_pager_freespace(backing_object, p->pindex, 1);
2095
2096                 if (
2097                     p->pindex < backing_offset_index ||
2098                     new_pindex >= object->size
2099                 ) {
2100                         /*
2101                          * Page is out of the parent object's range, we 
2102                          * can simply destroy it. 
2103                          */
2104                         vm_page_protect(p, VM_PROT_NONE);
2105                         vm_page_free(p);
2106                         return(0);
2107                 }
2108
2109                 pp = vm_page_lookup(object, new_pindex);
2110                 if (pp != NULL || vm_pager_has_page(object, new_pindex)) {
2111                         /*
2112                          * page already exists in parent OR swap exists
2113                          * for this location in the parent.  Destroy 
2114                          * the original page from the backing object.
2115                          *
2116                          * Leave the parent's page alone
2117                          */
2118                         vm_page_protect(p, VM_PROT_NONE);
2119                         vm_page_free(p);
2120                         return(0);
2121                 }
2122
2123                 /*
2124                  * Page does not exist in parent, rename the
2125                  * page from the backing object to the main object. 
2126                  *
2127                  * If the page was mapped to a process, it can remain 
2128                  * mapped through the rename.
2129                  */
2130                 if ((p->queue - p->pc) == PQ_CACHE)
2131                         vm_page_deactivate(p);
2132
2133                 vm_page_rename(p, object, new_pindex);
2134                 vm_page_wakeup(p);
2135                 /* page automatically made dirty by rename */
2136         }
2137         return(0);
2138 }
2139
2140 /*
2141  * This version of collapse allows the operation to occur earlier and
2142  * when paging_in_progress is true for an object...  This is not a complete
2143  * operation, but should plug 99.9% of the rest of the leaks.
2144  *
2145  * The caller must hold the object and backing_object and both must be
2146  * chainlocked.
2147  *
2148  * (only called from vm_object_collapse)
2149  */
2150 static void
2151 vm_object_qcollapse(vm_object_t object, vm_object_t backing_object)
2152 {
2153         if (backing_object->ref_count == 1) {
2154                 atomic_add_int(&backing_object->ref_count, 2);
2155 #if defined(DEBUG_LOCKS)
2156                 debugvm_object_add(backing_object, "qcollapse", 1, 2);
2157 #endif
2158                 vm_object_backing_scan(object, backing_object,
2159                                        OBSC_COLLAPSE_NOWAIT);
2160                 atomic_add_int(&backing_object->ref_count, -2);
2161 #if defined(DEBUG_LOCKS)
2162                 debugvm_object_add(backing_object, "qcollapse", 2, -2);
2163 #endif
2164         }
2165 }
2166
2167 /*
2168  * Collapse an object with the object backing it.  Pages in the backing
2169  * object are moved into the parent, and the backing object is deallocated.
2170  * Any conflict is resolved in favor of the parent's existing pages.
2171  *
2172  * object must be held and chain-locked on call.
2173  *
2174  * The caller must have an extra ref on object to prevent a race from
2175  * destroying it during the collapse.
2176  */
2177 void
2178 vm_object_collapse(vm_object_t object, struct vm_object_dealloc_list **dlistp)
2179 {
2180         struct vm_object_dealloc_list *dlist = NULL;
2181         vm_object_t backing_object;
2182
2183         /*
2184          * Only one thread is attempting a collapse at any given moment.
2185          * There are few restrictions for (object) that callers of this
2186          * function check so reentrancy is likely.
2187          */
2188         KKASSERT(object != NULL);
2189         vm_object_assert_held(object);
2190         KKASSERT(object->chainlk & (CHAINLK_MASK | CHAINLK_EXCL));
2191
2192         for (;;) {
2193                 vm_object_t bbobj;
2194                 int dodealloc;
2195
2196                 /*
2197                  * We can only collapse a DEFAULT/SWAP object with a
2198                  * DEFAULT/SWAP object.
2199                  */
2200                 if (object->type != OBJT_DEFAULT && object->type != OBJT_SWAP) {
2201                         backing_object = NULL;
2202                         break;
2203                 }
2204
2205                 backing_object = object->backing_object;
2206                 if (backing_object == NULL)
2207                         break;
2208                 if (backing_object->type != OBJT_DEFAULT &&
2209                     backing_object->type != OBJT_SWAP) {
2210                         backing_object = NULL;
2211                         break;
2212                 }
2213
2214                 /*
2215                  * Hold the backing_object and check for races
2216                  */
2217                 vm_object_hold(backing_object);
2218                 if (backing_object != object->backing_object ||
2219                     (backing_object->type != OBJT_DEFAULT &&
2220                      backing_object->type != OBJT_SWAP)) {
2221                         vm_object_drop(backing_object);
2222                         continue;
2223                 }
2224
2225                 /*
2226                  * Chain-lock the backing object too because if we
2227                  * successfully merge its pages into the top object we
2228                  * will collapse backing_object->backing_object as the
2229                  * new backing_object.  Re-check that it is still our
2230                  * backing object.
2231                  */
2232                 vm_object_chain_acquire(backing_object, 0);
2233                 if (backing_object != object->backing_object) {
2234                         vm_object_chain_release(backing_object);
2235                         vm_object_drop(backing_object);
2236                         continue;
2237                 }
2238
2239                 /*
2240                  * we check the backing object first, because it is most likely
2241                  * not collapsable.
2242                  */
2243                 if (backing_object->handle != NULL ||
2244                     (backing_object->type != OBJT_DEFAULT &&
2245                      backing_object->type != OBJT_SWAP) ||
2246                     (backing_object->flags & OBJ_DEAD) ||
2247                     object->handle != NULL ||
2248                     (object->type != OBJT_DEFAULT &&
2249                      object->type != OBJT_SWAP) ||
2250                     (object->flags & OBJ_DEAD)) {
2251                         break;
2252                 }
2253
2254                 /*
2255                  * If paging is in progress we can't do a normal collapse.
2256                  */
2257                 if (
2258                     object->paging_in_progress != 0 ||
2259                     backing_object->paging_in_progress != 0
2260                 ) {
2261                         vm_object_qcollapse(object, backing_object);
2262                         break;
2263                 }
2264
2265                 /*
2266                  * We know that we can either collapse the backing object (if
2267                  * the parent is the only reference to it) or (perhaps) have
2268                  * the parent bypass the object if the parent happens to shadow
2269                  * all the resident pages in the entire backing object.
2270                  *
2271                  * This is ignoring pager-backed pages such as swap pages.
2272                  * vm_object_backing_scan fails the shadowing test in this
2273                  * case.
2274                  */
2275                 if (backing_object->ref_count == 1) {
2276                         /*
2277                          * If there is exactly one reference to the backing
2278                          * object, we can collapse it into the parent.  
2279                          */
2280                         KKASSERT(object->backing_object == backing_object);
2281                         vm_object_backing_scan(object, backing_object,
2282                                                OBSC_COLLAPSE_WAIT);
2283
2284                         /*
2285                          * Move the pager from backing_object to object.
2286                          */
2287                         if (backing_object->type == OBJT_SWAP) {
2288                                 vm_object_pip_add(backing_object, 1);
2289
2290                                 /*
2291                                  * scrap the paging_offset junk and do a 
2292                                  * discrete copy.  This also removes major 
2293                                  * assumptions about how the swap-pager 
2294                                  * works from where it doesn't belong.  The
2295                                  * new swapper is able to optimize the
2296                                  * destroy-source case.
2297                                  */
2298                                 vm_object_pip_add(object, 1);
2299                                 swap_pager_copy(backing_object, object,
2300                                     OFF_TO_IDX(object->backing_object_offset),
2301                                     TRUE);
2302                                 vm_object_pip_wakeup(object);
2303                                 vm_object_pip_wakeup(backing_object);
2304                         }
2305
2306                         /*
2307                          * Object now shadows whatever backing_object did.
2308                          * Remove object from backing_object's shadow_list.
2309                          *
2310                          * Removing object from backing_objects shadow list
2311                          * requires releasing object, which we will do below.
2312                          */
2313                         KKASSERT(object->backing_object == backing_object);
2314                         if (object->flags & OBJ_ONSHADOW) {
2315                                 LIST_REMOVE(object, shadow_list);
2316                                 backing_object->shadow_count--;
2317                                 backing_object->generation++;
2318                                 vm_object_clear_flag(object, OBJ_ONSHADOW);
2319                         }
2320
2321                         /*
2322                          * backing_object->backing_object moves from within
2323                          * backing_object to within object.
2324                          *
2325                          * OBJT_VNODE bbobj's should have empty shadow lists.
2326                          */
2327                         while ((bbobj = backing_object->backing_object) != NULL) {
2328                                 if (bbobj->type == OBJT_VNODE)
2329                                         vm_object_hold_shared(bbobj);
2330                                 else
2331                                         vm_object_hold(bbobj);
2332                                 if (bbobj == backing_object->backing_object)
2333                                         break;
2334                                 vm_object_drop(bbobj);
2335                         }
2336
2337                         /*
2338                          * We are removing backing_object from bbobj's
2339                          * shadow list and adding object to bbobj's shadow
2340                          * list, so the ref_count on bbobj is unchanged.
2341                          */
2342                         if (bbobj) {
2343                                 if (backing_object->flags & OBJ_ONSHADOW) {
2344                                         /* not locked exclusively if vnode */
2345                                         KKASSERT(bbobj->type != OBJT_VNODE);
2346                                         LIST_REMOVE(backing_object,
2347                                                     shadow_list);
2348                                         bbobj->shadow_count--;
2349                                         bbobj->generation++;
2350                                         vm_object_clear_flag(backing_object,
2351                                                              OBJ_ONSHADOW);
2352                                 }
2353                                 backing_object->backing_object = NULL;
2354                         }
2355                         object->backing_object = bbobj;
2356                         if (bbobj) {
2357                                 if (bbobj->type != OBJT_VNODE) {
2358                                         LIST_INSERT_HEAD(&bbobj->shadow_head,
2359                                                          object, shadow_list);
2360                                         bbobj->shadow_count++;
2361                                         bbobj->generation++;
2362                                         vm_object_set_flag(object,
2363                                                            OBJ_ONSHADOW);
2364                                 }
2365                         }
2366
2367                         object->backing_object_offset +=
2368                                 backing_object->backing_object_offset;
2369
2370                         vm_object_drop(bbobj);
2371
2372                         /*
2373                          * Discard the old backing_object.  Nothing should be
2374                          * able to ref it, other than a vm_map_split(),
2375                          * and vm_map_split() will stall on our chain lock.
2376                          * And we control the parent so it shouldn't be
2377                          * possible for it to go away either.
2378                          *
2379                          * Since the backing object has no pages, no pager
2380                          * left, and no object references within it, all
2381                          * that is necessary is to dispose of it.
2382                          */
2383                         KASSERT(backing_object->ref_count == 1,
2384                                 ("backing_object %p was somehow "
2385                                  "re-referenced during collapse!",
2386                                  backing_object));
2387                         KASSERT(RB_EMPTY(&backing_object->rb_memq),
2388                                 ("backing_object %p somehow has left "
2389                                  "over pages during collapse!",
2390                                  backing_object));
2391
2392                         /*
2393                          * The object can be destroyed.
2394                          *
2395                          * XXX just fall through and dodealloc instead
2396                          *     of forcing destruction?
2397                          */
2398                         atomic_add_int(&backing_object->ref_count, -1);
2399 #if defined(DEBUG_LOCKS)
2400                         debugvm_object_add(backing_object, "collapse", 1, -1);
2401 #endif
2402                         if ((backing_object->flags & OBJ_DEAD) == 0)
2403                                 vm_object_terminate(backing_object);
2404                         object_collapses++;
2405                         dodealloc = 0;
2406                 } else {
2407                         /*
2408                          * If we do not entirely shadow the backing object,
2409                          * there is nothing we can do so we give up.
2410                          */
2411                         if (vm_object_backing_scan(object, backing_object,
2412                                                 OBSC_TEST_ALL_SHADOWED) == 0) {
2413                                 break;
2414                         }
2415
2416                         /*
2417                          * bbobj is backing_object->backing_object.  Since
2418                          * object completely shadows backing_object we can
2419                          * bypass it and become backed by bbobj instead.
2420                          *
2421                          * The shadow list for vnode backing objects is not
2422                          * used and a shared hold is allowed.
2423                          */
2424                         while ((bbobj = backing_object->backing_object) != NULL) {
2425                                 if (bbobj->type == OBJT_VNODE)
2426                                         vm_object_hold_shared(bbobj);
2427                                 else
2428                                         vm_object_hold(bbobj);
2429                                 if (bbobj == backing_object->backing_object)
2430                                         break;
2431                                 vm_object_drop(bbobj);
2432                         }
2433
2434                         /*
2435                          * Make object shadow bbobj instead of backing_object.
2436                          * Remove object from backing_object's shadow list.
2437                          *
2438                          * Deallocating backing_object will not remove
2439                          * it, since its reference count is at least 2.
2440                          *
2441                          * Removing object from backing_object's shadow
2442                          * list requires releasing a ref, which we do
2443                          * below by setting dodealloc to 1.
2444                          */
2445                         KKASSERT(object->backing_object == backing_object);
2446                         if (object->flags & OBJ_ONSHADOW) {
2447                                 LIST_REMOVE(object, shadow_list);
2448                                 backing_object->shadow_count--;
2449                                 backing_object->generation++;
2450                                 vm_object_clear_flag(object, OBJ_ONSHADOW);
2451                         }
2452
2453                         /*
2454                          * Add a ref to bbobj, bbobj now shadows object.
2455                          *
2456                          * NOTE: backing_object->backing_object still points
2457                          *       to bbobj.  That relationship remains intact
2458                          *       because backing_object has > 1 ref, so
2459                          *       someone else is pointing to it (hence why
2460                          *       we can't collapse it into object and can
2461                          *       only handle the all-shadowed bypass case).
2462                          */
2463                         if (bbobj) {
2464                                 if (bbobj->type != OBJT_VNODE) {
2465                                         vm_object_chain_wait(bbobj, 0);
2466                                         vm_object_reference_locked(bbobj);
2467                                         LIST_INSERT_HEAD(&bbobj->shadow_head,
2468                                                          object, shadow_list);
2469                                         bbobj->shadow_count++;
2470                                         bbobj->generation++;
2471                                         vm_object_set_flag(object,
2472                                                            OBJ_ONSHADOW);
2473                                 } else {
2474                                         vm_object_reference_quick(bbobj);
2475                                 }
2476                                 object->backing_object_offset +=
2477                                         backing_object->backing_object_offset;
2478                                 object->backing_object = bbobj;
2479                                 vm_object_drop(bbobj);
2480                         } else {
2481                                 object->backing_object = NULL;
2482                         }
2483
2484                         /*
2485                          * Drop the reference count on backing_object.  To
2486                          * handle ref_count races properly we can't assume
2487                          * that the ref_count is still at least 2 so we
2488                          * have to actually call vm_object_deallocate()
2489                          * (after clearing the chainlock).
2490                          */
2491                         object_bypasses++;
2492                         dodealloc = 1;
2493                 }
2494
2495                 /*
2496                  * Ok, we want to loop on the new object->bbobj association,
2497                  * possibly collapsing it further.  However if dodealloc is
2498                  * non-zero we have to deallocate the backing_object which
2499                  * itself can potentially undergo a collapse, creating a
2500                  * recursion depth issue with the LWKT token subsystem.
2501                  *
2502                  * In the case where we must deallocate the backing_object
2503                  * it is possible now that the backing_object has a single
2504                  * shadow count on some other object (not represented here
2505                  * as yet), since it no longer shadows us.  Thus when we
2506                  * call vm_object_deallocate() it may attempt to collapse
2507                  * itself into its remaining parent.
2508                  */
2509                 if (dodealloc) {
2510                         struct vm_object_dealloc_list *dtmp;
2511
2512                         vm_object_chain_release(backing_object);
2513                         vm_object_unlock(backing_object);
2514                         /* backing_object remains held */
2515
2516                         /*
2517                          * Auto-deallocation list for caller convenience.
2518                          */
2519                         if (dlistp == NULL)
2520                                 dlistp = &dlist;
2521
2522                         dtmp = kmalloc(sizeof(*dtmp), M_TEMP, M_WAITOK);
2523                         dtmp->object = backing_object;
2524                         dtmp->next = *dlistp;
2525                         *dlistp = dtmp;
2526                 } else {
2527                         vm_object_chain_release(backing_object);
2528                         vm_object_drop(backing_object);
2529                 }
2530                 /* backing_object = NULL; not needed */
2531                 /* loop */
2532         }
2533
2534         /*
2535          * Clean up any left over backing_object
2536          */
2537         if (backing_object) {
2538                 vm_object_chain_release(backing_object);
2539                 vm_object_drop(backing_object);
2540         }
2541
2542         /*
2543          * Clean up any auto-deallocation list.  This is a convenience
2544          * for top-level callers so they don't have to pass &dlist.
2545          * Do not clean up any caller-passed dlistp, the caller will
2546          * do that.
2547          */
2548         if (dlist)
2549                 vm_object_deallocate_list(&dlist);
2550
2551 }
2552
2553 /*
2554  * vm_object_collapse() may collect additional objects in need of
2555  * deallocation.  This routine deallocates these objects.  The
2556  * deallocation itself can trigger additional collapses (which the
2557  * deallocate function takes care of).  This procedure is used to
2558  * reduce procedural recursion since these vm_object shadow chains
2559  * can become quite long.
2560  */
2561 void
2562 vm_object_deallocate_list(struct vm_object_dealloc_list **dlistp)
2563 {
2564         struct vm_object_dealloc_list *dlist;
2565
2566         while ((dlist = *dlistp) != NULL) {
2567                 *dlistp = dlist->next;
2568                 vm_object_lock(dlist->object);
2569                 vm_object_deallocate_locked(dlist->object);
2570                 vm_object_drop(dlist->object);
2571                 kfree(dlist, M_TEMP);
2572         }
2573 }
2574
2575 /*
2576  * Removes all physical pages in the specified object range from the
2577  * object's list of pages.
2578  *
2579  * No requirements.
2580  */
2581 static int vm_object_page_remove_callback(vm_page_t p, void *data);
2582
2583 void
2584 vm_object_page_remove(vm_object_t object, vm_pindex_t start, vm_pindex_t end,
2585                       boolean_t clean_only)
2586 {
2587         struct rb_vm_page_scan_info info;
2588         int all;
2589
2590         /*
2591          * Degenerate cases and assertions
2592          */
2593         vm_object_hold(object);
2594         if (object == NULL ||
2595             (object->resident_page_count == 0 && object->swblock_count == 0)) {
2596                 vm_object_drop(object);
2597                 return;
2598         }
2599         KASSERT(object->type != OBJT_PHYS, 
2600                 ("attempt to remove pages from a physical object"));
2601
2602         /*
2603          * Indicate that paging is occuring on the object
2604          */
2605         vm_object_pip_add(object, 1);
2606
2607         /*
2608          * Figure out the actual removal range and whether we are removing
2609          * the entire contents of the object or not.  If removing the entire
2610          * contents, be sure to get all pages, even those that might be 
2611          * beyond the end of the object.
2612          */
2613         info.start_pindex = start;
2614         if (end == 0)
2615                 info.end_pindex = (vm_pindex_t)-1;
2616         else
2617                 info.end_pindex = end - 1;
2618         info.limit = clean_only;
2619         all = (start == 0 && info.end_pindex >= object->size - 1);
2620
2621         /*
2622          * Loop until we are sure we have gotten them all.
2623          */
2624         do {
2625                 info.error = 0;
2626                 vm_page_rb_tree_RB_SCAN(&object->rb_memq, rb_vm_page_scancmp,
2627                                         vm_object_page_remove_callback, &info);
2628         } while (info.error);
2629
2630         /*
2631          * Remove any related swap if throwing away pages, or for
2632          * non-swap objects (the swap is a clean copy in that case).
2633          */
2634         if (object->type != OBJT_SWAP || clean_only == FALSE) {
2635                 if (all)
2636                         swap_pager_freespace_all(object);
2637                 else
2638                         swap_pager_freespace(object, info.start_pindex,
2639                              info.end_pindex - info.start_pindex + 1);
2640         }
2641
2642         /*
2643          * Cleanup
2644          */
2645         vm_object_pip_wakeup(object);
2646         vm_object_drop(object);
2647 }
2648
2649 /*
2650  * The caller must hold the object
2651  */
2652 static int
2653 vm_object_page_remove_callback(vm_page_t p, void *data)
2654 {
2655         struct rb_vm_page_scan_info *info = data;
2656
2657         if (vm_page_busy_try(p, TRUE)) {
2658                 vm_page_sleep_busy(p, TRUE, "vmopar");
2659                 info->error = 1;
2660                 return(0);
2661         }
2662
2663         /*
2664          * Wired pages cannot be destroyed, but they can be invalidated
2665          * and we do so if clean_only (limit) is not set.
2666          *
2667          * WARNING!  The page may be wired due to being part of a buffer
2668          *           cache buffer, and the buffer might be marked B_CACHE.
2669          *           This is fine as part of a truncation but VFSs must be
2670          *           sure to fix the buffer up when re-extending the file.
2671          *
2672          * NOTE!     PG_NEED_COMMIT is ignored.
2673          */
2674         if (p->wire_count != 0) {
2675                 vm_page_protect(p, VM_PROT_NONE);
2676                 if (info->limit == 0)
2677                         p->valid = 0;
2678                 vm_page_wakeup(p);
2679                 return(0);
2680         }
2681
2682         /*
2683          * limit is our clean_only flag.  If set and the page is dirty or
2684          * requires a commit, do not free it.  If set and the page is being
2685          * held by someone, do not free it.
2686          */
2687         if (info->limit && p->valid) {
2688                 vm_page_test_dirty(p);
2689                 if ((p->valid & p->dirty) || (p->flags & PG_NEED_COMMIT)) {
2690                         vm_page_wakeup(p);
2691                         return(0);
2692                 }
2693         }
2694
2695         /*
2696          * Destroy the page
2697          */
2698         vm_page_protect(p, VM_PROT_NONE);
2699         vm_page_free(p);
2700         return(0);
2701 }
2702
2703 /*
2704  * Coalesces two objects backing up adjoining regions of memory into a
2705  * single object.
2706  *
2707  * returns TRUE if objects were combined.
2708  *
2709  * NOTE: Only works at the moment if the second object is NULL -
2710  *       if it's not, which object do we lock first?
2711  *
2712  * Parameters:
2713  *      prev_object     First object to coalesce
2714  *      prev_offset     Offset into prev_object
2715  *      next_object     Second object into coalesce
2716  *      next_offset     Offset into next_object
2717  *
2718  *      prev_size       Size of reference to prev_object
2719  *      next_size       Size of reference to next_object
2720  *
2721  * The caller does not need to hold (prev_object) but must have a stable
2722  * pointer to it (typically by holding the vm_map locked).
2723  */
2724 boolean_t
2725 vm_object_coalesce(vm_object_t prev_object, vm_pindex_t prev_pindex,
2726                    vm_size_t prev_size, vm_size_t next_size)
2727 {
2728         vm_pindex_t next_pindex;
2729
2730         if (prev_object == NULL)
2731                 return (TRUE);
2732
2733         vm_object_hold(prev_object);
2734
2735         if (prev_object->type != OBJT_DEFAULT &&
2736             prev_object->type != OBJT_SWAP) {
2737                 vm_object_drop(prev_object);
2738                 return (FALSE);
2739         }
2740
2741         /*
2742          * Try to collapse the object first
2743          */
2744         vm_object_chain_acquire(prev_object, 0);
2745         vm_object_collapse(prev_object, NULL);
2746
2747         /*
2748          * Can't coalesce if: . more than one reference . paged out . shadows
2749          * another object . has a copy elsewhere (any of which mean that the
2750          * pages not mapped to prev_entry may be in use anyway)
2751          */
2752
2753         if (prev_object->backing_object != NULL) {
2754                 vm_object_chain_release(prev_object);
2755                 vm_object_drop(prev_object);
2756                 return (FALSE);
2757         }
2758
2759         prev_size >>= PAGE_SHIFT;
2760         next_size >>= PAGE_SHIFT;
2761         next_pindex = prev_pindex + prev_size;
2762
2763         if ((prev_object->ref_count > 1) &&
2764             (prev_object->size != next_pindex)) {
2765                 vm_object_chain_release(prev_object);
2766                 vm_object_drop(prev_object);
2767                 return (FALSE);
2768         }
2769
2770         /*
2771          * Remove any pages that may still be in the object from a previous
2772          * deallocation.
2773          */
2774         if (next_pindex < prev_object->size) {
2775                 vm_object_page_remove(prev_object,
2776                                       next_pindex,
2777                                       next_pindex + next_size, FALSE);
2778                 if (prev_object->type == OBJT_SWAP)
2779                         swap_pager_freespace(prev_object,
2780                                              next_pindex, next_size);
2781         }
2782
2783         /*
2784          * Extend the object if necessary.
2785          */
2786         if (next_pindex + next_size > prev_object->size)
2787                 prev_object->size = next_pindex + next_size;
2788
2789         vm_object_chain_release(prev_object);
2790         vm_object_drop(prev_object);
2791         return (TRUE);
2792 }
2793
2794 /*
2795  * Make the object writable and flag is being possibly dirty.
2796  *
2797  * The object might not be held (or might be held but held shared),
2798  * the related vnode is probably not held either.  Object and vnode are
2799  * stable by virtue of the vm_page busied by the caller preventing
2800  * destruction.
2801  *
2802  * If the related mount is flagged MNTK_THR_SYNC we need to call
2803  * vsetobjdirty().  Filesystems using this option usually shortcut
2804  * synchronization by only scanning the syncer list.
2805  */
2806 void
2807 vm_object_set_writeable_dirty(vm_object_t object)
2808 {
2809         struct vnode *vp;
2810
2811         /*vm_object_assert_held(object);*/
2812         /*
2813          * Avoid contention in vm fault path by checking the state before
2814          * issuing an atomic op on it.
2815          */
2816         if ((object->flags & (OBJ_WRITEABLE|OBJ_MIGHTBEDIRTY)) !=
2817             (OBJ_WRITEABLE|OBJ_MIGHTBEDIRTY)) {
2818                 vm_object_set_flag(object, OBJ_WRITEABLE|OBJ_MIGHTBEDIRTY);
2819         }
2820         if (object->type == OBJT_VNODE &&
2821             (vp = (struct vnode *)object->handle) != NULL) {
2822                 if ((vp->v_flag & VOBJDIRTY) == 0) {
2823                         if (vp->v_mount &&
2824                             (vp->v_mount->mnt_kern_flag & MNTK_THR_SYNC)) {
2825                                 /*
2826                                  * New style THR_SYNC places vnodes on the
2827                                  * syncer list more deterministically.
2828                                  */
2829                                 vsetobjdirty(vp);
2830                         } else {
2831                                 /*
2832                                  * Old style scan would not necessarily place
2833                                  * a vnode on the syncer list when possibly
2834                                  * modified via mmap.
2835                                  */
2836                                 vsetflags(vp, VOBJDIRTY);
2837                         }
2838                 }
2839         }
2840 }
2841
2842 #include "opt_ddb.h"
2843 #ifdef DDB
2844 #include <sys/kernel.h>
2845
2846 #include <sys/cons.h>
2847
2848 #include <ddb/ddb.h>
2849
2850 static int      _vm_object_in_map (vm_map_t map, vm_object_t object,
2851                                        vm_map_entry_t entry);
2852 static int      vm_object_in_map (vm_object_t object);
2853
2854 /*
2855  * The caller must hold the object.
2856  */
2857 static int
2858 _vm_object_in_map(vm_map_t map, vm_object_t object, vm_map_entry_t entry)
2859 {
2860         vm_map_t tmpm;
2861         vm_map_entry_t tmpe;
2862         vm_object_t obj, nobj;
2863         int entcount;
2864
2865         if (map == 0)
2866                 return 0;
2867         if (entry == 0) {
2868                 tmpe = map->header.next;
2869                 entcount = map->nentries;
2870                 while (entcount-- && (tmpe != &map->header)) {
2871                         if( _vm_object_in_map(map, object, tmpe)) {
2872                                 return 1;
2873                         }
2874                         tmpe = tmpe->next;
2875                 }
2876                 return (0);
2877         }
2878         switch(entry->maptype) {
2879         case VM_MAPTYPE_SUBMAP:
2880                 tmpm = entry->object.sub_map;
2881                 tmpe = tmpm->header.next;
2882                 entcount = tmpm->nentries;
2883                 while (entcount-- && tmpe != &tmpm->header) {
2884                         if( _vm_object_in_map(tmpm, object, tmpe)) {
2885                                 return 1;
2886                         }
2887                         tmpe = tmpe->next;
2888                 }
2889                 break;
2890         case VM_MAPTYPE_NORMAL:
2891         case VM_MAPTYPE_VPAGETABLE:
2892                 obj = entry->object.vm_object;
2893                 while (obj) {
2894                         if (obj == object) {
2895                                 if (obj != entry->object.vm_object)
2896                                         vm_object_drop(obj);
2897                                 return 1;
2898                         }
2899                         while ((nobj = obj->backing_object) != NULL) {
2900                                 vm_object_hold(nobj);
2901                                 if (nobj == obj->backing_object)
2902                                         break;
2903                                 vm_object_drop(nobj);
2904                         }
2905                         if (obj != entry->object.vm_object) {
2906                                 if (nobj)
2907                                         vm_object_lock_swap();
2908                                 vm_object_drop(obj);
2909                         }
2910                         obj = nobj;
2911                 }
2912                 break;
2913         default:
2914                 break;
2915         }
2916         return 0;
2917 }
2918
2919 static int vm_object_in_map_callback(struct proc *p, void *data);
2920
2921 struct vm_object_in_map_info {
2922         vm_object_t object;
2923         int rv;
2924 };
2925
2926 /*
2927  * Debugging only
2928  */
2929 static int
2930 vm_object_in_map(vm_object_t object)
2931 {
2932         struct vm_object_in_map_info info;
2933
2934         info.rv = 0;
2935         info.object = object;
2936
2937         allproc_scan(vm_object_in_map_callback, &info);
2938         if (info.rv)
2939                 return 1;
2940         if( _vm_object_in_map(&kernel_map, object, 0))
2941                 return 1;
2942         if( _vm_object_in_map(&pager_map, object, 0))
2943                 return 1;
2944         if( _vm_object_in_map(&buffer_map, object, 0))
2945                 return 1;
2946         return 0;
2947 }
2948
2949 /*
2950  * Debugging only
2951  */
2952 static int
2953 vm_object_in_map_callback(struct proc *p, void *data)
2954 {
2955         struct vm_object_in_map_info *info = data;
2956
2957         if (p->p_vmspace) {
2958                 if (_vm_object_in_map(&p->p_vmspace->vm_map, info->object, 0)) {
2959                         info->rv = 1;
2960                         return -1;
2961                 }
2962         }
2963         return (0);
2964 }
2965
2966 DB_SHOW_COMMAND(vmochk, vm_object_check)
2967 {
2968         vm_object_t object;
2969         int n;
2970
2971         /*
2972          * make sure that internal objs are in a map somewhere
2973          * and none have zero ref counts.
2974          */
2975         for (n = 0; n < VMOBJ_HSIZE; ++n) {
2976                 for (object = TAILQ_FIRST(&vm_object_lists[n]);
2977                                 object != NULL;
2978                                 object = TAILQ_NEXT(object, object_list)) {
2979                         if (object->type == OBJT_MARKER)
2980                                 continue;
2981                         if (object->handle != NULL ||
2982                             (object->type != OBJT_DEFAULT &&
2983                              object->type != OBJT_SWAP)) {
2984                                 continue;
2985                         }
2986                         if (object->ref_count == 0) {
2987                                 db_printf("vmochk: internal obj has "
2988                                           "zero ref count: %ld\n",
2989                                           (long)object->size);
2990                         }
2991                         if (vm_object_in_map(object))
2992                                 continue;
2993                         db_printf("vmochk: internal obj is not in a map: "
2994                                   "ref: %d, size: %lu: 0x%lx, "
2995                                   "backing_object: %p\n",
2996                                   object->ref_count, (u_long)object->size,
2997                                   (u_long)object->size,
2998                                   (void *)object->backing_object);
2999                 }
3000         }
3001 }
3002
3003 /*
3004  * Debugging only
3005  */
3006 DB_SHOW_COMMAND(object, vm_object_print_static)
3007 {
3008         /* XXX convert args. */
3009         vm_object_t object = (vm_object_t)addr;
3010         boolean_t full = have_addr;
3011
3012         vm_page_t p;
3013
3014         /* XXX count is an (unused) arg.  Avoid shadowing it. */
3015 #define count   was_count
3016
3017         int count;
3018
3019         if (object == NULL)
3020                 return;
3021
3022         db_iprintf(
3023             "Object %p: type=%d, size=0x%lx, res=%d, ref=%d, flags=0x%x\n",
3024             object, (int)object->type, (u_long)object->size,
3025             object->resident_page_count, object->ref_count, object->flags);
3026         /*
3027          * XXX no %qd in kernel.  Truncate object->backing_object_offset.
3028          */
3029         db_iprintf(" sref=%d, backing_object(%d)=(%p)+0x%lx\n",
3030             object->shadow_count, 
3031             object->backing_object ? object->backing_object->ref_count : 0,
3032             object->backing_object, (long)object->backing_object_offset);
3033
3034         if (!full)
3035                 return;
3036
3037         db_indent += 2;
3038         count = 0;
3039         RB_FOREACH(p, vm_page_rb_tree, &object->rb_memq) {
3040                 if (count == 0)
3041                         db_iprintf("memory:=");
3042                 else if (count == 6) {
3043                         db_printf("\n");
3044                         db_iprintf(" ...");
3045                         count = 0;
3046                 } else
3047                         db_printf(",");
3048                 count++;
3049
3050                 db_printf("(off=0x%lx,page=0x%lx)",
3051                     (u_long) p->pindex, (u_long) VM_PAGE_TO_PHYS(p));
3052         }
3053         if (count != 0)
3054                 db_printf("\n");
3055         db_indent -= 2;
3056 }
3057
3058 /* XXX. */
3059 #undef count
3060
3061 /*
3062  * XXX need this non-static entry for calling from vm_map_print.
3063  *
3064  * Debugging only
3065  */
3066 void
3067 vm_object_print(/* db_expr_t */ long addr,
3068                 boolean_t have_addr,
3069                 /* db_expr_t */ long count,
3070                 char *modif)
3071 {
3072         vm_object_print_static(addr, have_addr, count, modif);
3073 }
3074
3075 /*
3076  * Debugging only
3077  */
3078 DB_SHOW_COMMAND(vmopag, vm_object_print_pages)
3079 {
3080         vm_object_t object;
3081         int nl = 0;
3082         int c;
3083         int n;
3084
3085         for (n = 0; n < VMOBJ_HSIZE; ++n) {
3086                 for (object = TAILQ_FIRST(&vm_object_lists[n]);
3087                                 object != NULL;
3088                                 object = TAILQ_NEXT(object, object_list)) {
3089                         vm_pindex_t idx, fidx;
3090                         vm_pindex_t osize;
3091                         vm_paddr_t pa = -1, padiff;
3092                         int rcount;
3093                         vm_page_t m;
3094
3095                         if (object->type == OBJT_MARKER)
3096                                 continue;
3097                         db_printf("new object: %p\n", (void *)object);
3098                         if ( nl > 18) {
3099                                 c = cngetc();
3100                                 if (c != ' ')
3101                                         return;
3102                                 nl = 0;
3103                         }
3104                         nl++;
3105                         rcount = 0;
3106                         fidx = 0;
3107                         osize = object->size;
3108                         if (osize > 128)
3109                                 osize = 128;
3110                         for (idx = 0; idx < osize; idx++) {
3111                                 m = vm_page_lookup(object, idx);
3112                                 if (m == NULL) {
3113                                         if (rcount) {
3114                                                 db_printf(" index(%ld)run(%d)pa(0x%lx)\n",
3115                                                         (long)fidx, rcount, (long)pa);
3116                                                 if ( nl > 18) {
3117                                                         c = cngetc();
3118                                                         if (c != ' ')
3119                                                                 return;
3120                                                         nl = 0;
3121                                                 }
3122                                                 nl++;
3123                                                 rcount = 0;
3124                                         }
3125                                         continue;
3126                                 }
3127
3128                                 if (rcount &&
3129                                         (VM_PAGE_TO_PHYS(m) == pa + rcount * PAGE_SIZE)) {
3130                                         ++rcount;
3131                                         continue;
3132                                 }
3133                                 if (rcount) {
3134                                         padiff = pa + rcount * PAGE_SIZE - VM_PAGE_TO_PHYS(m);
3135                                         padiff >>= PAGE_SHIFT;
3136                                         padiff &= PQ_L2_MASK;
3137                                         if (padiff == 0) {
3138                                                 pa = VM_PAGE_TO_PHYS(m) - rcount * PAGE_SIZE;
3139                                                 ++rcount;
3140                                                 continue;
3141                                         }
3142                                         db_printf(" index(%ld)run(%d)pa(0x%lx)",
3143                                                 (long)fidx, rcount, (long)pa);
3144                                         db_printf("pd(%ld)\n", (long)padiff);
3145                                         if ( nl > 18) {
3146                                                 c = cngetc();
3147                                                 if (c != ' ')
3148                                                         return;
3149                                                 nl = 0;
3150                                         }
3151                                         nl++;
3152                                 }
3153                                 fidx = idx;
3154                                 pa = VM_PAGE_TO_PHYS(m);
3155                                 rcount = 1;
3156                         }
3157                         if (rcount) {
3158                                 db_printf(" index(%ld)run(%d)pa(0x%lx)\n",
3159                                         (long)fidx, rcount, (long)pa);
3160                                 if ( nl > 18) {
3161                                         c = cngetc();
3162                                         if (c != ' ')
3163                                                 return;
3164                                         nl = 0;
3165                                 }
3166                                 nl++;
3167                         }
3168                 }
3169         }
3170 }
3171 #endif /* DDB */