kernel - Move VM objects from pool tokens to per-vm-object tokens
[dragonfly.git] / sys / vm / vm_object.c
1 /*
2  * (MPSAFE)
3  *
4  * Copyright (c) 1991, 1993
5  *      The Regents of the University of California.  All rights reserved.
6  *
7  * This code is derived from software contributed to Berkeley by
8  * The Mach Operating System project at Carnegie-Mellon University.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. All advertising materials mentioning features or use of this software
19  *    must display the following acknowledgement:
20  *      This product includes software developed by the University of
21  *      California, Berkeley and its contributors.
22  * 4. Neither the name of the University nor the names of its contributors
23  *    may be used to endorse or promote products derived from this software
24  *    without specific prior written permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36  * SUCH DAMAGE.
37  *
38  *      from: @(#)vm_object.c   8.5 (Berkeley) 3/22/94
39  *
40  *
41  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
42  * All rights reserved.
43  *
44  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
45  *
46  * Permission to use, copy, modify and distribute this software and
47  * its documentation is hereby granted, provided that both the copyright
48  * notice and this permission notice appear in all copies of the
49  * software, derivative works or modified versions, and any portions
50  * thereof, and that both notices appear in supporting documentation.
51  *
52  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
53  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
54  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
55  *
56  * Carnegie Mellon requests users of this software to return to
57  *
58  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
59  *  School of Computer Science
60  *  Carnegie Mellon University
61  *  Pittsburgh PA 15213-3890
62  *
63  * any improvements or extensions that they make and grant Carnegie the
64  * rights to redistribute these changes.
65  *
66  * $FreeBSD: src/sys/vm/vm_object.c,v 1.171.2.8 2003/05/26 19:17:56 alc Exp $
67  */
68
69 /*
70  *      Virtual memory object module.
71  */
72
73 #include <sys/param.h>
74 #include <sys/systm.h>
75 #include <sys/proc.h>           /* for curproc, pageproc */
76 #include <sys/thread.h>
77 #include <sys/vnode.h>
78 #include <sys/vmmeter.h>
79 #include <sys/mman.h>
80 #include <sys/mount.h>
81 #include <sys/kernel.h>
82 #include <sys/sysctl.h>
83 #include <sys/refcount.h>
84
85 #include <vm/vm.h>
86 #include <vm/vm_param.h>
87 #include <vm/pmap.h>
88 #include <vm/vm_map.h>
89 #include <vm/vm_object.h>
90 #include <vm/vm_page.h>
91 #include <vm/vm_pageout.h>
92 #include <vm/vm_pager.h>
93 #include <vm/swap_pager.h>
94 #include <vm/vm_kern.h>
95 #include <vm/vm_extern.h>
96 #include <vm/vm_zone.h>
97
98 #define EASY_SCAN_FACTOR        8
99
100 static void     vm_object_qcollapse(vm_object_t object,
101                                     vm_object_t backing_object);
102 static int      vm_object_page_collect_flush(vm_object_t object, vm_page_t p,
103                                              int pagerflags);
104 static void     vm_object_lock_init(vm_object_t);
105
106
107 /*
108  *      Virtual memory objects maintain the actual data
109  *      associated with allocated virtual memory.  A given
110  *      page of memory exists within exactly one object.
111  *
112  *      An object is only deallocated when all "references"
113  *      are given up.  Only one "reference" to a given
114  *      region of an object should be writeable.
115  *
116  *      Associated with each object is a list of all resident
117  *      memory pages belonging to that object; this list is
118  *      maintained by the "vm_page" module, and locked by the object's
119  *      lock.
120  *
121  *      Each object also records a "pager" routine which is
122  *      used to retrieve (and store) pages to the proper backing
123  *      storage.  In addition, objects may be backed by other
124  *      objects from which they were virtual-copied.
125  *
126  *      The only items within the object structure which are
127  *      modified after time of creation are:
128  *              reference count         locked by object's lock
129  *              pager routine           locked by object's lock
130  *
131  */
132
133 struct object_q vm_object_list;         /* locked by vmobj_token */
134 struct vm_object kernel_object;
135
136 static long vm_object_count;            /* locked by vmobj_token */
137 extern int vm_pageout_page_count;
138
139 static long object_collapses;
140 static long object_bypasses;
141 static int next_index;
142 static vm_zone_t obj_zone;
143 static struct vm_zone obj_zone_store;
144 #define VM_OBJECTS_INIT 256
145 static struct vm_object vm_objects_init[VM_OBJECTS_INIT];
146
147 /*
148  * Misc low level routines
149  */
150 static void
151 vm_object_lock_init(vm_object_t obj)
152 {
153 #if defined(DEBUG_LOCKS)
154         int i;
155
156         obj->debug_hold_bitmap = 0;
157         obj->debug_hold_ovfl = 0;
158         for (i = 0; i < VMOBJ_DEBUG_ARRAY_SIZE; i++) {
159                 obj->debug_hold_thrs[i] = NULL;
160                 obj->debug_hold_file[i] = NULL;
161                 obj->debug_hold_line[i] = 0;
162         }
163 #endif
164 }
165
166 void
167 vm_object_lock_swap(void)
168 {
169         lwkt_token_swap();
170 }
171
172 void
173 vm_object_lock(vm_object_t obj)
174 {
175         lwkt_gettoken(&obj->token);
176 }
177
178 void
179 vm_object_lock_shared(vm_object_t obj)
180 {
181         lwkt_gettoken_shared(&obj->token);
182 }
183
184 void
185 vm_object_unlock(vm_object_t obj)
186 {
187         lwkt_reltoken(&obj->token);
188 }
189
190 static __inline void
191 vm_object_assert_held(vm_object_t obj)
192 {
193         ASSERT_LWKT_TOKEN_HELD(&obj->token);
194 }
195
196 void
197 #ifndef DEBUG_LOCKS
198 vm_object_hold(vm_object_t obj)
199 #else
200 debugvm_object_hold(vm_object_t obj, char *file, int line)
201 #endif
202 {
203         KKASSERT(obj != NULL);
204
205         /*
206          * Object must be held (object allocation is stable due to callers
207          * context, typically already holding the token on a parent object)
208          * prior to potentially blocking on the lock, otherwise the object
209          * can get ripped away from us.
210          */
211         refcount_acquire(&obj->hold_count);
212         vm_object_lock(obj);
213
214 #if defined(DEBUG_LOCKS)
215         int i;
216         u_int mask;
217
218         for (;;) {
219                 mask = ~obj->debug_hold_bitmap;
220                 cpu_ccfence();
221                 if (mask == 0xFFFFFFFFU) {
222                         if (obj->debug_hold_ovfl == 0)
223                                 obj->debug_hold_ovfl = 1;
224                         break;
225                 }
226                 i = ffs(mask) - 1;
227                 if (atomic_cmpset_int(&obj->debug_hold_bitmap, ~mask,
228                                       ~mask | (1 << i))) {
229                         obj->debug_hold_bitmap |= (1 << i);
230                         obj->debug_hold_thrs[i] = curthread;
231                         obj->debug_hold_file[i] = file;
232                         obj->debug_hold_line[i] = line;
233                         break;
234                 }
235         }
236 #endif
237 }
238
239 void
240 #ifndef DEBUG_LOCKS
241 vm_object_hold_shared(vm_object_t obj)
242 #else
243 debugvm_object_hold_shared(vm_object_t obj, char *file, int line)
244 #endif
245 {
246         KKASSERT(obj != NULL);
247
248         /*
249          * Object must be held (object allocation is stable due to callers
250          * context, typically already holding the token on a parent object)
251          * prior to potentially blocking on the lock, otherwise the object
252          * can get ripped away from us.
253          */
254         refcount_acquire(&obj->hold_count);
255         vm_object_lock_shared(obj);
256
257 #if defined(DEBUG_LOCKS)
258         int i;
259         u_int mask;
260
261         for (;;) {
262                 mask = ~obj->debug_hold_bitmap;
263                 cpu_ccfence();
264                 if (mask == 0xFFFFFFFFU) {
265                         if (obj->debug_hold_ovfl == 0)
266                                 obj->debug_hold_ovfl = 1;
267                         break;
268                 }
269                 i = ffs(mask) - 1;
270                 if (atomic_cmpset_int(&obj->debug_hold_bitmap, ~mask,
271                                       ~mask | (1 << i))) {
272                         obj->debug_hold_bitmap |= (1 << i);
273                         obj->debug_hold_thrs[i] = curthread;
274                         obj->debug_hold_file[i] = file;
275                         obj->debug_hold_line[i] = line;
276                         break;
277                 }
278         }
279 #endif
280 }
281
282 /*
283  * Drop the token and hold_count on the object.
284  */
285 void
286 vm_object_drop(vm_object_t obj)
287 {
288         if (obj == NULL)
289                 return;
290
291 #if defined(DEBUG_LOCKS)
292         int found = 0;
293         int i;
294
295         for (i = 0; i < VMOBJ_DEBUG_ARRAY_SIZE; i++) {
296                 if ((obj->debug_hold_bitmap & (1 << i)) &&
297                     (obj->debug_hold_thrs[i] == curthread)) {
298                         obj->debug_hold_bitmap &= ~(1 << i);
299                         obj->debug_hold_thrs[i] = NULL;
300                         obj->debug_hold_file[i] = NULL;
301                         obj->debug_hold_line[i] = 0;
302                         found = 1;
303                         break;
304                 }
305         }
306
307         if (found == 0 && obj->debug_hold_ovfl == 0)
308                 panic("vm_object: attempt to drop hold on non-self-held obj");
309 #endif
310
311         /*
312          * No new holders should be possible once we drop hold_count 1->0 as
313          * there is no longer any way to reference the object.
314          */
315         KKASSERT(obj->hold_count > 0);
316         if (refcount_release(&obj->hold_count)) {
317                 if (obj->ref_count == 0 && (obj->flags & OBJ_DEAD)) {
318                         vm_object_unlock(obj);
319                         zfree(obj_zone, obj);
320                 } else {
321                         vm_object_unlock(obj);
322                 }
323         } else {
324                 vm_object_unlock(obj);
325         }
326 }
327
328 /*
329  * Initialize a freshly allocated object
330  *
331  * Used only by vm_object_allocate() and zinitna().
332  *
333  * No requirements.
334  */
335 void
336 _vm_object_allocate(objtype_t type, vm_pindex_t size, vm_object_t object)
337 {
338         int incr;
339
340         RB_INIT(&object->rb_memq);
341         LIST_INIT(&object->shadow_head);
342         lwkt_token_init(&object->token, "vmobj");
343
344         object->type = type;
345         object->size = size;
346         object->ref_count = 1;
347         object->hold_count = 0;
348         object->flags = 0;
349         if ((object->type == OBJT_DEFAULT) || (object->type == OBJT_SWAP))
350                 vm_object_set_flag(object, OBJ_ONEMAPPING);
351         object->paging_in_progress = 0;
352         object->resident_page_count = 0;
353         object->agg_pv_list_count = 0;
354         object->shadow_count = 0;
355 #ifdef SMP
356         /* cpu localization twist */
357         object->pg_color = (int)(intptr_t)curthread;
358 #else
359         object->pg_color = next_index;
360 #endif
361         if ( size > (PQ_L2_SIZE / 3 + PQ_PRIME1))
362                 incr = PQ_L2_SIZE / 3 + PQ_PRIME1;
363         else
364                 incr = size;
365         next_index = (next_index + incr) & PQ_L2_MASK;
366         object->handle = NULL;
367         object->backing_object = NULL;
368         object->backing_object_offset = (vm_ooffset_t)0;
369
370         object->generation++;
371         object->swblock_count = 0;
372         RB_INIT(&object->swblock_root);
373         vm_object_lock_init(object);
374
375         lwkt_gettoken(&vmobj_token);
376         TAILQ_INSERT_TAIL(&vm_object_list, object, object_list);
377         vm_object_count++;
378         lwkt_reltoken(&vmobj_token);
379 }
380
381 /*
382  * Initialize the VM objects module.
383  *
384  * Called from the low level boot code only.
385  */
386 void
387 vm_object_init(void)
388 {
389         TAILQ_INIT(&vm_object_list);
390         
391         _vm_object_allocate(OBJT_DEFAULT, OFF_TO_IDX(KvaEnd),
392                             &kernel_object);
393
394         obj_zone = &obj_zone_store;
395         zbootinit(obj_zone, "VM OBJECT", sizeof (struct vm_object),
396                 vm_objects_init, VM_OBJECTS_INIT);
397 }
398
399 void
400 vm_object_init2(void)
401 {
402         zinitna(obj_zone, NULL, NULL, 0, 0, ZONE_PANICFAIL, 1);
403 }
404
405 /*
406  * Allocate and return a new object of the specified type and size.
407  *
408  * No requirements.
409  */
410 vm_object_t
411 vm_object_allocate(objtype_t type, vm_pindex_t size)
412 {
413         vm_object_t result;
414
415         result = (vm_object_t) zalloc(obj_zone);
416
417         _vm_object_allocate(type, size, result);
418
419         return (result);
420 }
421
422 /*
423  * Add an additional reference to a vm_object.  The object must already be
424  * held.  The original non-lock version is no longer supported.  The object
425  * must NOT be chain locked by anyone at the time the reference is added.
426  *
427  * Referencing a chain-locked object can blow up the fairly sensitive
428  * ref_count and shadow_count tests in the deallocator.  Most callers
429  * will call vm_object_chain_wait() prior to calling
430  * vm_object_reference_locked() to avoid the case.
431  *
432  * The object must be held.
433  */
434 void
435 vm_object_reference_locked(vm_object_t object)
436 {
437         KKASSERT(object != NULL);
438         ASSERT_LWKT_TOKEN_HELD(vm_object_token(object));
439         KKASSERT((object->flags & OBJ_CHAINLOCK) == 0);
440         object->ref_count++;
441         if (object->type == OBJT_VNODE) {
442                 vref(object->handle);
443                 /* XXX what if the vnode is being destroyed? */
444         }
445 }
446
447 /*
448  * Object OBJ_CHAINLOCK lock handling.
449  *
450  * The caller can chain-lock backing objects recursively and then
451  * use vm_object_chain_release_all() to undo the whole chain.
452  *
453  * Chain locks are used to prevent collapses and are only applicable
454  * to OBJT_DEFAULT and OBJT_SWAP objects.  Chain locking operations
455  * on other object types are ignored.  This is also important because
456  * it allows e.g. the vnode underlying a memory mapping to take concurrent
457  * faults.
458  *
459  * The object must usually be held on entry, though intermediate
460  * objects need not be held on release.
461  */
462 void
463 vm_object_chain_wait(vm_object_t object)
464 {
465         ASSERT_LWKT_TOKEN_HELD(vm_object_token(object));
466         while (object->flags & OBJ_CHAINLOCK) {
467                 vm_object_set_flag(object, OBJ_CHAINWANT);
468                 tsleep(object, 0, "objchain", 0);
469         }
470 }
471
472 void
473 vm_object_chain_acquire(vm_object_t object)
474 {
475         if (object->type == OBJT_DEFAULT || object->type == OBJT_SWAP) {
476                 vm_object_chain_wait(object);
477                 vm_object_set_flag(object, OBJ_CHAINLOCK);
478         }
479 }
480
481 void
482 vm_object_chain_release(vm_object_t object)
483 {
484         ASSERT_LWKT_TOKEN_HELD(vm_object_token(object));
485         if (object->type == OBJT_DEFAULT || object->type == OBJT_SWAP) {
486                 KKASSERT(object->flags & OBJ_CHAINLOCK);
487                 if (object->flags & OBJ_CHAINWANT) {
488                         vm_object_clear_flag(object,
489                                              OBJ_CHAINLOCK | OBJ_CHAINWANT);
490                         wakeup(object);
491                 } else {
492                         vm_object_clear_flag(object, OBJ_CHAINLOCK);
493                 }
494         }
495 }
496
497 /*
498  * This releases the entire chain of objects from first_object to and
499  * including stopobj, flowing through object->backing_object.
500  *
501  * We release stopobj first as an optimization as this object is most
502  * likely to be shared across multiple processes.
503  */
504 void
505 vm_object_chain_release_all(vm_object_t first_object, vm_object_t stopobj)
506 {
507         vm_object_t backing_object;
508         vm_object_t object;
509
510         vm_object_chain_release(stopobj);
511         object = first_object;
512
513         while (object != stopobj) {
514                 KKASSERT(object);
515                 if (object != first_object)
516                         vm_object_hold(object);
517                 backing_object = object->backing_object;
518                 vm_object_chain_release(object);
519                 if (object != first_object)
520                         vm_object_drop(object);
521                 object = backing_object;
522         }
523 }
524
525 /*
526  * Dereference an object and its underlying vnode.
527  *
528  * The object must be held and will be held on return.
529  */
530 static void
531 vm_object_vndeallocate(vm_object_t object)
532 {
533         struct vnode *vp = (struct vnode *) object->handle;
534
535         KASSERT(object->type == OBJT_VNODE,
536             ("vm_object_vndeallocate: not a vnode object"));
537         KASSERT(vp != NULL, ("vm_object_vndeallocate: missing vp"));
538         ASSERT_LWKT_TOKEN_HELD(vm_object_token(object));
539 #ifdef INVARIANTS
540         if (object->ref_count == 0) {
541                 vprint("vm_object_vndeallocate", vp);
542                 panic("vm_object_vndeallocate: bad object reference count");
543         }
544 #endif
545         object->ref_count--;
546         if (object->ref_count == 0)
547                 vclrflags(vp, VTEXT);
548         vrele(vp);
549 }
550
551 /*
552  * Release a reference to the specified object, gained either through a
553  * vm_object_allocate or a vm_object_reference call.  When all references
554  * are gone, storage associated with this object may be relinquished.
555  *
556  * The caller does not have to hold the object locked but must have control
557  * over the reference in question in order to guarantee that the object
558  * does not get ripped out from under us.
559  */
560 void
561 vm_object_deallocate(vm_object_t object)
562 {
563         if (object) {
564                 vm_object_hold(object);
565                 vm_object_deallocate_locked(object);
566                 vm_object_drop(object);
567         }
568 }
569
570 void
571 vm_object_deallocate_locked(vm_object_t object)
572 {
573         struct vm_object_dealloc_list *dlist = NULL;
574         struct vm_object_dealloc_list *dtmp;
575         vm_object_t temp;
576         int must_drop = 0;
577
578         /*
579          * We may chain deallocate object, but additional objects may
580          * collect on the dlist which also have to be deallocated.  We
581          * must avoid a recursion, vm_object chains can get deep.
582          */
583 again:
584         while (object != NULL) {
585 #if 0
586                 /*
587                  * Don't rip a ref_count out from under an object undergoing
588                  * collapse, it will confuse the collapse code.
589                  */
590                 vm_object_chain_wait(object);
591 #endif
592                 if (object->type == OBJT_VNODE) {
593                         vm_object_vndeallocate(object);
594                         break;
595                 }
596
597                 if (object->ref_count == 0) {
598                         panic("vm_object_deallocate: object deallocated "
599                               "too many times: %d", object->type);
600                 }
601                 if (object->ref_count > 2) {
602                         object->ref_count--;
603                         break;
604                 }
605
606                 /*
607                  * Here on ref_count of one or two, which are special cases for
608                  * objects.
609                  *
610                  * Nominal ref_count > 1 case if the second ref is not from
611                  * a shadow.
612                  */
613                 if (object->ref_count == 2 && object->shadow_count == 0) {
614                         vm_object_set_flag(object, OBJ_ONEMAPPING);
615                         object->ref_count--;
616                         break;
617                 }
618
619                 /*
620                  * If the second ref is from a shadow we chain along it
621                  * upwards if object's handle is exhausted.
622                  *
623                  * We have to decrement object->ref_count before potentially
624                  * collapsing the first shadow object or the collapse code
625                  * will not be able to handle the degenerate case to remove
626                  * object.  However, if we do it too early the object can
627                  * get ripped out from under us.
628                  */
629                 if (object->ref_count == 2 && object->shadow_count == 1 &&
630                     object->handle == NULL && (object->type == OBJT_DEFAULT ||
631                                                object->type == OBJT_SWAP)) {
632                         temp = LIST_FIRST(&object->shadow_head);
633                         KKASSERT(temp != NULL);
634                         vm_object_hold(temp);
635
636                         /*
637                          * Wait for any paging to complete so the collapse
638                          * doesn't (or isn't likely to) qcollapse.  pip
639                          * waiting must occur before we acquire the
640                          * chainlock.
641                          */
642                         while (
643                                 temp->paging_in_progress ||
644                                 object->paging_in_progress
645                         ) {
646                                 vm_object_pip_wait(temp, "objde1");
647                                 vm_object_pip_wait(object, "objde2");
648                         }
649
650                         /*
651                          * If the parent is locked we have to give up, as
652                          * otherwise we would be acquiring locks in the
653                          * wrong order and potentially deadlock.
654                          */
655                         if (temp->flags & OBJ_CHAINLOCK) {
656                                 vm_object_drop(temp);
657                                 goto skip;
658                         }
659                         vm_object_chain_acquire(temp);
660
661                         /*
662                          * Recheck/retry after the hold and the paging
663                          * wait, both of which can block us.
664                          */
665                         if (object->ref_count != 2 ||
666                             object->shadow_count != 1 ||
667                             object->handle ||
668                             LIST_FIRST(&object->shadow_head) != temp ||
669                             (object->type != OBJT_DEFAULT &&
670                              object->type != OBJT_SWAP)) {
671                                 vm_object_chain_release(temp);
672                                 vm_object_drop(temp);
673                                 continue;
674                         }
675
676                         /*
677                          * We can safely drop object's ref_count now.
678                          */
679                         KKASSERT(object->ref_count == 2);
680                         object->ref_count--;
681
682                         /*
683                          * If our single parent is not collapseable just
684                          * decrement ref_count (2->1) and stop.
685                          */
686                         if (temp->handle || (temp->type != OBJT_DEFAULT &&
687                                              temp->type != OBJT_SWAP)) {
688                                 vm_object_chain_release(temp);
689                                 vm_object_drop(temp);
690                                 break;
691                         }
692
693                         /*
694                          * At this point we have already dropped object's
695                          * ref_count so it is possible for a race to
696                          * deallocate obj out from under us.  Any collapse
697                          * will re-check the situation.  We must not block
698                          * until we are able to collapse.
699                          *
700                          * Bump temp's ref_count to avoid an unwanted
701                          * degenerate recursion (can't call
702                          * vm_object_reference_locked() because it asserts
703                          * that CHAINLOCK is not set).
704                          */
705                         temp->ref_count++;
706                         KKASSERT(temp->ref_count > 1);
707
708                         /*
709                          * Collapse temp, then deallocate the extra ref
710                          * formally.
711                          */
712                         vm_object_collapse(temp, &dlist);
713                         vm_object_chain_release(temp);
714                         if (must_drop) {
715                                 vm_object_lock_swap();
716                                 vm_object_drop(object);
717                         }
718                         object = temp;
719                         must_drop = 1;
720                         continue;
721                 }
722
723                 /*
724                  * Drop the ref and handle termination on the 1->0 transition.
725                  * We may have blocked above so we have to recheck.
726                  */
727 skip:
728                 KKASSERT(object->ref_count != 0);
729                 if (object->ref_count >= 2) {
730                         object->ref_count--;
731                         break;
732                 }
733                 KKASSERT(object->ref_count == 1);
734
735                 /*
736                  * 1->0 transition.  Chain through the backing_object.
737                  * Maintain the ref until we've located the backing object,
738                  * then re-check.
739                  */
740                 while ((temp = object->backing_object) != NULL) {
741                         vm_object_hold(temp);
742                         if (temp == object->backing_object)
743                                 break;
744                         vm_object_drop(temp);
745                 }
746
747                 /*
748                  * 1->0 transition verified, retry if ref_count is no longer
749                  * 1.  Otherwise disconnect the backing_object (temp) and
750                  * clean up.
751                  */
752                 if (object->ref_count != 1) {
753                         vm_object_drop(temp);
754                         continue;
755                 }
756
757                 /*
758                  * It shouldn't be possible for the object to be chain locked
759                  * if we're removing the last ref on it.
760                  */
761                 KKASSERT((object->flags & OBJ_CHAINLOCK) == 0);
762
763                 if (temp) {
764                         LIST_REMOVE(object, shadow_list);
765                         temp->shadow_count--;
766                         temp->generation++;
767                         object->backing_object = NULL;
768                 }
769
770                 --object->ref_count;
771                 if ((object->flags & OBJ_DEAD) == 0)
772                         vm_object_terminate(object);
773                 if (must_drop && temp)
774                         vm_object_lock_swap();
775                 if (must_drop)
776                         vm_object_drop(object);
777                 object = temp;
778                 must_drop = 1;
779         }
780         if (must_drop && object)
781                 vm_object_drop(object);
782
783         /*
784          * Additional tail recursion on dlist.  Avoid a recursion.  Objects
785          * on the dlist have a hold count but are not locked.
786          */
787         if ((dtmp = dlist) != NULL) {
788                 dlist = dtmp->next;
789                 object = dtmp->object;
790                 kfree(dtmp, M_TEMP);
791
792                 vm_object_lock(object); /* already held, add lock */
793                 must_drop = 1;          /* and we're responsible for it */
794                 goto again;
795         }
796 }
797
798 /*
799  * Destroy the specified object, freeing up related resources.
800  *
801  * The object must have zero references.
802  *
803  * The object must held.  The caller is responsible for dropping the object
804  * after terminate returns.  Terminate does NOT drop the object.
805  */
806 static int vm_object_terminate_callback(vm_page_t p, void *data);
807
808 void
809 vm_object_terminate(vm_object_t object)
810 {
811         /*
812          * Make sure no one uses us.  Once we set OBJ_DEAD we should be
813          * able to safely block.
814          */
815         ASSERT_LWKT_TOKEN_HELD(vm_object_token(object));
816         KKASSERT((object->flags & OBJ_DEAD) == 0);
817         vm_object_set_flag(object, OBJ_DEAD);
818
819         /*
820          * Wait for the pageout daemon to be done with the object
821          */
822         vm_object_pip_wait(object, "objtrm1");
823
824         KASSERT(!object->paging_in_progress,
825                 ("vm_object_terminate: pageout in progress"));
826
827         /*
828          * Clean and free the pages, as appropriate. All references to the
829          * object are gone, so we don't need to lock it.
830          */
831         if (object->type == OBJT_VNODE) {
832                 struct vnode *vp;
833
834                 /*
835                  * Clean pages and flush buffers.
836                  */
837                 vm_object_page_clean(object, 0, 0, OBJPC_SYNC);
838
839                 vp = (struct vnode *) object->handle;
840                 vinvalbuf(vp, V_SAVE, 0, 0);
841         }
842
843         /*
844          * Wait for any I/O to complete, after which there had better not
845          * be any references left on the object.
846          */
847         vm_object_pip_wait(object, "objtrm2");
848
849         if (object->ref_count != 0) {
850                 panic("vm_object_terminate: object with references, "
851                       "ref_count=%d", object->ref_count);
852         }
853
854         /*
855          * Now free any remaining pages. For internal objects, this also
856          * removes them from paging queues. Don't free wired pages, just
857          * remove them from the object. 
858          */
859         vm_page_rb_tree_RB_SCAN(&object->rb_memq, NULL,
860                                 vm_object_terminate_callback, NULL);
861
862         /*
863          * Let the pager know object is dead.
864          */
865         vm_pager_deallocate(object);
866
867         /*
868          * Wait for the object hold count to hit 1, clean out pages as
869          * we go.  vmobj_token interlocks any race conditions that might
870          * pick the object up from the vm_object_list after we have cleared
871          * rb_memq.
872          */
873         for (;;) {
874                 if (RB_ROOT(&object->rb_memq) == NULL)
875                         break;
876                 kprintf("vm_object_terminate: Warning, object %p "
877                         "still has %d pages\n",
878                         object, object->resident_page_count);
879                 vm_page_rb_tree_RB_SCAN(&object->rb_memq, NULL,
880                                         vm_object_terminate_callback, NULL);
881         }
882
883         /*
884          * There had better not be any pages left
885          */
886         KKASSERT(object->resident_page_count == 0);
887
888         /*
889          * Remove the object from the global object list.
890          */
891         lwkt_gettoken(&vmobj_token);
892         TAILQ_REMOVE(&vm_object_list, object, object_list);
893         vm_object_count--;
894         lwkt_reltoken(&vmobj_token);
895         vm_object_dead_wakeup(object);
896
897         if (object->ref_count != 0) {
898                 panic("vm_object_terminate2: object with references, "
899                       "ref_count=%d", object->ref_count);
900         }
901
902         /*
903          * NOTE: The object hold_count is at least 1, so we cannot zfree()
904          *       the object here.  See vm_object_drop().
905          */
906 }
907
908 /*
909  * The caller must hold the object.
910  */
911 static int
912 vm_object_terminate_callback(vm_page_t p, void *data __unused)
913 {
914         vm_object_t object;
915
916         object = p->object;
917         vm_page_busy_wait(p, TRUE, "vmpgtrm");
918         if (object != p->object) {
919                 kprintf("vm_object_terminate: Warning: Encountered "
920                         "busied page %p on queue %d\n", p, p->queue);
921                 vm_page_wakeup(p);
922         } else if (p->wire_count == 0) {
923                 vm_page_free(p);
924                 mycpu->gd_cnt.v_pfree++;
925         } else {
926                 if (p->queue != PQ_NONE)
927                         kprintf("vm_object_terminate: Warning: Encountered "
928                                 "wired page %p on queue %d\n", p, p->queue);
929                 vm_page_remove(p);
930                 vm_page_wakeup(p);
931         }
932         lwkt_yield();
933         return(0);
934 }
935
936 /*
937  * The object is dead but still has an object<->pager association.  Sleep
938  * and return.  The caller typically retests the association in a loop.
939  *
940  * The caller must hold the object.
941  */
942 void
943 vm_object_dead_sleep(vm_object_t object, const char *wmesg)
944 {
945         ASSERT_LWKT_TOKEN_HELD(vm_object_token(object));
946         if (object->handle) {
947                 vm_object_set_flag(object, OBJ_DEADWNT);
948                 tsleep(object, 0, wmesg, 0);
949                 /* object may be invalid after this point */
950         }
951 }
952
953 /*
954  * Wakeup anyone waiting for the object<->pager disassociation on
955  * a dead object.
956  *
957  * The caller must hold the object.
958  */
959 void
960 vm_object_dead_wakeup(vm_object_t object)
961 {
962         ASSERT_LWKT_TOKEN_HELD(vm_object_token(object));
963         if (object->flags & OBJ_DEADWNT) {
964                 vm_object_clear_flag(object, OBJ_DEADWNT);
965                 wakeup(object);
966         }
967 }
968
969 /*
970  * Clean all dirty pages in the specified range of object.  Leaves page
971  * on whatever queue it is currently on.   If NOSYNC is set then do not
972  * write out pages with PG_NOSYNC set (originally comes from MAP_NOSYNC),
973  * leaving the object dirty.
974  *
975  * When stuffing pages asynchronously, allow clustering.  XXX we need a
976  * synchronous clustering mode implementation.
977  *
978  * Odd semantics: if start == end, we clean everything.
979  *
980  * The object must be locked? XXX
981  */
982 static int vm_object_page_clean_pass1(struct vm_page *p, void *data);
983 static int vm_object_page_clean_pass2(struct vm_page *p, void *data);
984
985 void
986 vm_object_page_clean(vm_object_t object, vm_pindex_t start, vm_pindex_t end,
987                      int flags)
988 {
989         struct rb_vm_page_scan_info info;
990         struct vnode *vp;
991         int wholescan;
992         int pagerflags;
993         int generation;
994
995         vm_object_hold(object);
996         if (object->type != OBJT_VNODE ||
997             (object->flags & OBJ_MIGHTBEDIRTY) == 0) {
998                 vm_object_drop(object);
999                 return;
1000         }
1001
1002         pagerflags = (flags & (OBJPC_SYNC | OBJPC_INVAL)) ? 
1003                         VM_PAGER_PUT_SYNC : VM_PAGER_CLUSTER_OK;
1004         pagerflags |= (flags & OBJPC_INVAL) ? VM_PAGER_PUT_INVAL : 0;
1005
1006         vp = object->handle;
1007
1008         /*
1009          * Interlock other major object operations.  This allows us to 
1010          * temporarily clear OBJ_WRITEABLE and OBJ_MIGHTBEDIRTY.
1011          */
1012         vm_object_set_flag(object, OBJ_CLEANING);
1013
1014         /*
1015          * Handle 'entire object' case
1016          */
1017         info.start_pindex = start;
1018         if (end == 0) {
1019                 info.end_pindex = object->size - 1;
1020         } else {
1021                 info.end_pindex = end - 1;
1022         }
1023         wholescan = (start == 0 && info.end_pindex == object->size - 1);
1024         info.limit = flags;
1025         info.pagerflags = pagerflags;
1026         info.object = object;
1027
1028         /*
1029          * If cleaning the entire object do a pass to mark the pages read-only.
1030          * If everything worked out ok, clear OBJ_WRITEABLE and
1031          * OBJ_MIGHTBEDIRTY.
1032          */
1033         if (wholescan) {
1034                 info.error = 0;
1035                 vm_page_rb_tree_RB_SCAN(&object->rb_memq, rb_vm_page_scancmp,
1036                                         vm_object_page_clean_pass1, &info);
1037                 if (info.error == 0) {
1038                         vm_object_clear_flag(object,
1039                                              OBJ_WRITEABLE|OBJ_MIGHTBEDIRTY);
1040                         if (object->type == OBJT_VNODE &&
1041                             (vp = (struct vnode *)object->handle) != NULL) {
1042                                 if (vp->v_flag & VOBJDIRTY) 
1043                                         vclrflags(vp, VOBJDIRTY);
1044                         }
1045                 }
1046         }
1047
1048         /*
1049          * Do a pass to clean all the dirty pages we find.
1050          */
1051         do {
1052                 info.error = 0;
1053                 generation = object->generation;
1054                 vm_page_rb_tree_RB_SCAN(&object->rb_memq, rb_vm_page_scancmp,
1055                                         vm_object_page_clean_pass2, &info);
1056         } while (info.error || generation != object->generation);
1057
1058         vm_object_clear_flag(object, OBJ_CLEANING);
1059         vm_object_drop(object);
1060 }
1061
1062 /*
1063  * The caller must hold the object.
1064  */
1065 static 
1066 int
1067 vm_object_page_clean_pass1(struct vm_page *p, void *data)
1068 {
1069         struct rb_vm_page_scan_info *info = data;
1070
1071         vm_page_flag_set(p, PG_CLEANCHK);
1072         if ((info->limit & OBJPC_NOSYNC) && (p->flags & PG_NOSYNC)) {
1073                 info->error = 1;
1074         } else if (vm_page_busy_try(p, FALSE) == 0) {
1075                 vm_page_protect(p, VM_PROT_READ);       /* must not block */
1076                 vm_page_wakeup(p);
1077         } else {
1078                 info->error = 1;
1079         }
1080         lwkt_yield();
1081         return(0);
1082 }
1083
1084 /*
1085  * The caller must hold the object
1086  */
1087 static 
1088 int
1089 vm_object_page_clean_pass2(struct vm_page *p, void *data)
1090 {
1091         struct rb_vm_page_scan_info *info = data;
1092         int generation;
1093
1094         /*
1095          * Do not mess with pages that were inserted after we started
1096          * the cleaning pass.
1097          */
1098         if ((p->flags & PG_CLEANCHK) == 0)
1099                 goto done;
1100
1101         generation = info->object->generation;
1102         vm_page_busy_wait(p, TRUE, "vpcwai");
1103         if (p->object != info->object ||
1104             info->object->generation != generation) {
1105                 info->error = 1;
1106                 vm_page_wakeup(p);
1107                 goto done;
1108         }
1109
1110         /*
1111          * Before wasting time traversing the pmaps, check for trivial
1112          * cases where the page cannot be dirty.
1113          */
1114         if (p->valid == 0 || (p->queue - p->pc) == PQ_CACHE) {
1115                 KKASSERT((p->dirty & p->valid) == 0);
1116                 vm_page_wakeup(p);
1117                 goto done;
1118         }
1119
1120         /*
1121          * Check whether the page is dirty or not.  The page has been set
1122          * to be read-only so the check will not race a user dirtying the
1123          * page.
1124          */
1125         vm_page_test_dirty(p);
1126         if ((p->dirty & p->valid) == 0) {
1127                 vm_page_flag_clear(p, PG_CLEANCHK);
1128                 vm_page_wakeup(p);
1129                 goto done;
1130         }
1131
1132         /*
1133          * If we have been asked to skip nosync pages and this is a
1134          * nosync page, skip it.  Note that the object flags were
1135          * not cleared in this case (because pass1 will have returned an
1136          * error), so we do not have to set them.
1137          */
1138         if ((info->limit & OBJPC_NOSYNC) && (p->flags & PG_NOSYNC)) {
1139                 vm_page_flag_clear(p, PG_CLEANCHK);
1140                 vm_page_wakeup(p);
1141                 goto done;
1142         }
1143
1144         /*
1145          * Flush as many pages as we can.  PG_CLEANCHK will be cleared on
1146          * the pages that get successfully flushed.  Set info->error if
1147          * we raced an object modification.
1148          */
1149         vm_object_page_collect_flush(info->object, p, info->pagerflags);
1150 done:
1151         lwkt_yield();
1152         return(0);
1153 }
1154
1155 /*
1156  * Collect the specified page and nearby pages and flush them out.
1157  * The number of pages flushed is returned.  The passed page is busied
1158  * by the caller and we are responsible for its disposition.
1159  *
1160  * The caller must hold the object.
1161  */
1162 static int
1163 vm_object_page_collect_flush(vm_object_t object, vm_page_t p, int pagerflags)
1164 {
1165         int runlen;
1166         int error;
1167         int maxf;
1168         int chkb;
1169         int maxb;
1170         int i;
1171         vm_pindex_t pi;
1172         vm_page_t maf[vm_pageout_page_count];
1173         vm_page_t mab[vm_pageout_page_count];
1174         vm_page_t ma[vm_pageout_page_count];
1175
1176         ASSERT_LWKT_TOKEN_HELD(vm_object_token(object));
1177
1178         pi = p->pindex;
1179
1180         maxf = 0;
1181         for(i = 1; i < vm_pageout_page_count; i++) {
1182                 vm_page_t tp;
1183
1184                 tp = vm_page_lookup_busy_try(object, pi + i, TRUE, &error);
1185                 if (error)
1186                         break;
1187                 if (tp == NULL)
1188                         break;
1189                 if ((pagerflags & VM_PAGER_IGNORE_CLEANCHK) == 0 &&
1190                     (tp->flags & PG_CLEANCHK) == 0) {
1191                         vm_page_wakeup(tp);
1192                         break;
1193                 }
1194                 if ((tp->queue - tp->pc) == PQ_CACHE) {
1195                         vm_page_flag_clear(tp, PG_CLEANCHK);
1196                         vm_page_wakeup(tp);
1197                         break;
1198                 }
1199                 vm_page_test_dirty(tp);
1200                 if ((tp->dirty & tp->valid) == 0) {
1201                         vm_page_flag_clear(tp, PG_CLEANCHK);
1202                         vm_page_wakeup(tp);
1203                         break;
1204                 }
1205                 maf[i - 1] = tp;
1206                 maxf++;
1207         }
1208
1209         maxb = 0;
1210         chkb = vm_pageout_page_count -  maxf;
1211         /*
1212          * NOTE: chkb can be 0
1213          */
1214         for(i = 1; chkb && i < chkb; i++) {
1215                 vm_page_t tp;
1216
1217                 tp = vm_page_lookup_busy_try(object, pi - i, TRUE, &error);
1218                 if (error)
1219                         break;
1220                 if (tp == NULL)
1221                         break;
1222                 if ((pagerflags & VM_PAGER_IGNORE_CLEANCHK) == 0 &&
1223                     (tp->flags & PG_CLEANCHK) == 0) {
1224                         vm_page_wakeup(tp);
1225                         break;
1226                 }
1227                 if ((tp->queue - tp->pc) == PQ_CACHE) {
1228                         vm_page_flag_clear(tp, PG_CLEANCHK);
1229                         vm_page_wakeup(tp);
1230                         break;
1231                 }
1232                 vm_page_test_dirty(tp);
1233                 if ((tp->dirty & tp->valid) == 0) {
1234                         vm_page_flag_clear(tp, PG_CLEANCHK);
1235                         vm_page_wakeup(tp);
1236                         break;
1237                 }
1238                 mab[i - 1] = tp;
1239                 maxb++;
1240         }
1241
1242         /*
1243          * All pages in the maf[] and mab[] array are busied.
1244          */
1245         for (i = 0; i < maxb; i++) {
1246                 int index = (maxb - i) - 1;
1247                 ma[index] = mab[i];
1248                 vm_page_flag_clear(ma[index], PG_CLEANCHK);
1249         }
1250         vm_page_flag_clear(p, PG_CLEANCHK);
1251         ma[maxb] = p;
1252         for(i = 0; i < maxf; i++) {
1253                 int index = (maxb + i) + 1;
1254                 ma[index] = maf[i];
1255                 vm_page_flag_clear(ma[index], PG_CLEANCHK);
1256         }
1257         runlen = maxb + maxf + 1;
1258
1259         for (i = 0; i < runlen; i++)
1260                 vm_page_hold(ma[i]);
1261
1262         vm_pageout_flush(ma, runlen, pagerflags);
1263
1264         for (i = 0; i < runlen; i++) {
1265                 if (ma[i]->valid & ma[i]->dirty) {
1266                         vm_page_protect(ma[i], VM_PROT_READ);
1267                         vm_page_flag_set(ma[i], PG_CLEANCHK);
1268
1269                         /*
1270                          * maxf will end up being the actual number of pages
1271                          * we wrote out contiguously, non-inclusive of the
1272                          * first page.  We do not count look-behind pages.
1273                          */
1274                         if (i >= maxb + 1 && (maxf > i - maxb - 1))
1275                                 maxf = i - maxb - 1;
1276                 }
1277                 vm_page_unhold(ma[i]);
1278         }
1279         return(maxf + 1);
1280 }
1281
1282 /*
1283  * Same as vm_object_pmap_copy, except range checking really
1284  * works, and is meant for small sections of an object.
1285  *
1286  * This code protects resident pages by making them read-only
1287  * and is typically called on a fork or split when a page
1288  * is converted to copy-on-write.  
1289  *
1290  * NOTE: If the page is already at VM_PROT_NONE, calling
1291  * vm_page_protect will have no effect.
1292  */
1293 void
1294 vm_object_pmap_copy_1(vm_object_t object, vm_pindex_t start, vm_pindex_t end)
1295 {
1296         vm_pindex_t idx;
1297         vm_page_t p;
1298
1299         if (object == NULL || (object->flags & OBJ_WRITEABLE) == 0)
1300                 return;
1301
1302         vm_object_hold(object);
1303         for (idx = start; idx < end; idx++) {
1304                 p = vm_page_lookup(object, idx);
1305                 if (p == NULL)
1306                         continue;
1307                 vm_page_protect(p, VM_PROT_READ);
1308         }
1309         vm_object_drop(object);
1310 }
1311
1312 /*
1313  * Removes all physical pages in the specified object range from all
1314  * physical maps.
1315  *
1316  * The object must *not* be locked.
1317  */
1318
1319 static int vm_object_pmap_remove_callback(vm_page_t p, void *data);
1320
1321 void
1322 vm_object_pmap_remove(vm_object_t object, vm_pindex_t start, vm_pindex_t end)
1323 {
1324         struct rb_vm_page_scan_info info;
1325
1326         if (object == NULL)
1327                 return;
1328         info.start_pindex = start;
1329         info.end_pindex = end - 1;
1330
1331         vm_object_hold(object);
1332         vm_page_rb_tree_RB_SCAN(&object->rb_memq, rb_vm_page_scancmp,
1333                                 vm_object_pmap_remove_callback, &info);
1334         if (start == 0 && end == object->size)
1335                 vm_object_clear_flag(object, OBJ_WRITEABLE);
1336         vm_object_drop(object);
1337 }
1338
1339 /*
1340  * The caller must hold the object
1341  */
1342 static int
1343 vm_object_pmap_remove_callback(vm_page_t p, void *data __unused)
1344 {
1345         vm_page_protect(p, VM_PROT_NONE);
1346         return(0);
1347 }
1348
1349 /*
1350  * Implements the madvise function at the object/page level.
1351  *
1352  * MADV_WILLNEED        (any object)
1353  *
1354  *      Activate the specified pages if they are resident.
1355  *
1356  * MADV_DONTNEED        (any object)
1357  *
1358  *      Deactivate the specified pages if they are resident.
1359  *
1360  * MADV_FREE    (OBJT_DEFAULT/OBJT_SWAP objects, OBJ_ONEMAPPING only)
1361  *
1362  *      Deactivate and clean the specified pages if they are
1363  *      resident.  This permits the process to reuse the pages
1364  *      without faulting or the kernel to reclaim the pages
1365  *      without I/O.
1366  *
1367  * No requirements.
1368  */
1369 void
1370 vm_object_madvise(vm_object_t object, vm_pindex_t pindex, int count, int advise)
1371 {
1372         vm_pindex_t end, tpindex;
1373         vm_object_t tobject;
1374         vm_object_t xobj;
1375         vm_page_t m;
1376         int error;
1377
1378         if (object == NULL)
1379                 return;
1380
1381         end = pindex + count;
1382
1383         vm_object_hold(object);
1384         tobject = object;
1385
1386         /*
1387          * Locate and adjust resident pages
1388          */
1389         for (; pindex < end; pindex += 1) {
1390 relookup:
1391                 if (tobject != object)
1392                         vm_object_drop(tobject);
1393                 tobject = object;
1394                 tpindex = pindex;
1395 shadowlookup:
1396                 /*
1397                  * MADV_FREE only operates on OBJT_DEFAULT or OBJT_SWAP pages
1398                  * and those pages must be OBJ_ONEMAPPING.
1399                  */
1400                 if (advise == MADV_FREE) {
1401                         if ((tobject->type != OBJT_DEFAULT &&
1402                              tobject->type != OBJT_SWAP) ||
1403                             (tobject->flags & OBJ_ONEMAPPING) == 0) {
1404                                 continue;
1405                         }
1406                 }
1407
1408                 m = vm_page_lookup_busy_try(tobject, tpindex, TRUE, &error);
1409
1410                 if (error) {
1411                         vm_page_sleep_busy(m, TRUE, "madvpo");
1412                         goto relookup;
1413                 }
1414                 if (m == NULL) {
1415                         /*
1416                          * There may be swap even if there is no backing page
1417                          */
1418                         if (advise == MADV_FREE && tobject->type == OBJT_SWAP)
1419                                 swap_pager_freespace(tobject, tpindex, 1);
1420
1421                         /*
1422                          * next object
1423                          */
1424                         while ((xobj = tobject->backing_object) != NULL) {
1425                                 KKASSERT(xobj != object);
1426                                 vm_object_hold(xobj);
1427                                 if (xobj == tobject->backing_object)
1428                                         break;
1429                                 vm_object_drop(xobj);
1430                         }
1431                         if (xobj == NULL)
1432                                 continue;
1433                         tpindex += OFF_TO_IDX(tobject->backing_object_offset);
1434                         if (tobject != object) {
1435                                 vm_object_lock_swap();
1436                                 vm_object_drop(tobject);
1437                         }
1438                         tobject = xobj;
1439                         goto shadowlookup;
1440                 }
1441
1442                 /*
1443                  * If the page is not in a normal active state, we skip it.
1444                  * If the page is not managed there are no page queues to
1445                  * mess with.  Things can break if we mess with pages in
1446                  * any of the below states.
1447                  */
1448                 if (
1449                     /*m->hold_count ||*/
1450                     m->wire_count ||
1451                     (m->flags & PG_UNMANAGED) ||
1452                     m->valid != VM_PAGE_BITS_ALL
1453                 ) {
1454                         vm_page_wakeup(m);
1455                         continue;
1456                 }
1457
1458                 /*
1459                  * Theoretically once a page is known not to be busy, an
1460                  * interrupt cannot come along and rip it out from under us.
1461                  */
1462
1463                 if (advise == MADV_WILLNEED) {
1464                         vm_page_activate(m);
1465                 } else if (advise == MADV_DONTNEED) {
1466                         vm_page_dontneed(m);
1467                 } else if (advise == MADV_FREE) {
1468                         /*
1469                          * Mark the page clean.  This will allow the page
1470                          * to be freed up by the system.  However, such pages
1471                          * are often reused quickly by malloc()/free()
1472                          * so we do not do anything that would cause
1473                          * a page fault if we can help it.
1474                          *
1475                          * Specifically, we do not try to actually free
1476                          * the page now nor do we try to put it in the
1477                          * cache (which would cause a page fault on reuse).
1478                          *
1479                          * But we do make the page is freeable as we
1480                          * can without actually taking the step of unmapping
1481                          * it.
1482                          */
1483                         pmap_clear_modify(m);
1484                         m->dirty = 0;
1485                         m->act_count = 0;
1486                         vm_page_dontneed(m);
1487                         if (tobject->type == OBJT_SWAP)
1488                                 swap_pager_freespace(tobject, tpindex, 1);
1489                 }
1490                 vm_page_wakeup(m);
1491         }       
1492         if (tobject != object)
1493                 vm_object_drop(tobject);
1494         vm_object_drop(object);
1495 }
1496
1497 /*
1498  * Create a new object which is backed by the specified existing object
1499  * range.  Replace the pointer and offset that was pointing at the existing
1500  * object with the pointer/offset for the new object.
1501  *
1502  * No other requirements.
1503  */
1504 void
1505 vm_object_shadow(vm_object_t *objectp, vm_ooffset_t *offset, vm_size_t length,
1506                  int addref)
1507 {
1508         vm_object_t source;
1509         vm_object_t result;
1510
1511         source = *objectp;
1512
1513         /*
1514          * Don't create the new object if the old object isn't shared.
1515          * We have to chain wait before adding the reference to avoid
1516          * racing a collapse or deallocation.
1517          *
1518          * Add the additional ref to source here to avoid racing a later
1519          * collapse or deallocation. Clear the ONEMAPPING flag whether
1520          * addref is TRUE or not in this case because the original object
1521          * will be shadowed.
1522          */
1523         if (source) {
1524                 vm_object_hold(source);
1525                 vm_object_chain_wait(source);
1526                 if (source->ref_count == 1 &&
1527                     source->handle == NULL &&
1528                     (source->type == OBJT_DEFAULT ||
1529                      source->type == OBJT_SWAP)) {
1530                         vm_object_drop(source);
1531                         if (addref) {
1532                                 vm_object_reference_locked(source);
1533                                 vm_object_clear_flag(source, OBJ_ONEMAPPING);
1534                         }
1535                         return;
1536                 }
1537                 vm_object_reference_locked(source);
1538                 vm_object_clear_flag(source, OBJ_ONEMAPPING);
1539         }
1540
1541         /*
1542          * Allocate a new object with the given length.  The new object
1543          * is returned referenced but we may have to add another one.
1544          * If we are adding a second reference we must clear OBJ_ONEMAPPING.
1545          * (typically because the caller is about to clone a vm_map_entry).
1546          *
1547          * The source object currently has an extra reference to prevent
1548          * collapses into it while we mess with its shadow list, which
1549          * we will remove later in this routine.
1550          */
1551         if ((result = vm_object_allocate(OBJT_DEFAULT, length)) == NULL)
1552                 panic("vm_object_shadow: no object for shadowing");
1553         vm_object_hold(result);
1554         if (addref) {
1555                 vm_object_reference_locked(result);
1556                 vm_object_clear_flag(result, OBJ_ONEMAPPING);
1557         }
1558
1559         /*
1560          * The new object shadows the source object.  Chain wait before
1561          * adjusting shadow_count or the shadow list to avoid races.
1562          *
1563          * Try to optimize the result object's page color when shadowing
1564          * in order to maintain page coloring consistency in the combined 
1565          * shadowed object.
1566          */
1567         KKASSERT(result->backing_object == NULL);
1568         result->backing_object = source;
1569         if (source) {
1570                 vm_object_chain_wait(source);
1571                 LIST_INSERT_HEAD(&source->shadow_head, result, shadow_list);
1572                 source->shadow_count++;
1573                 source->generation++;
1574 #ifdef SMP
1575                 /* cpu localization twist */
1576                 result->pg_color = (int)(intptr_t)curthread;
1577 #else
1578                 result->pg_color = (source->pg_color + OFF_TO_IDX(*offset)) &
1579                                    PQ_L2_MASK;
1580 #endif
1581         }
1582
1583         /*
1584          * Adjust the return storage.  Drop the ref on source before
1585          * returning.
1586          */
1587         result->backing_object_offset = *offset;
1588         vm_object_drop(result);
1589         *offset = 0;
1590         if (source) {
1591                 vm_object_deallocate_locked(source);
1592                 vm_object_drop(source);
1593         }
1594
1595         /*
1596          * Return the new things
1597          */
1598         *objectp = result;
1599 }
1600
1601 #define OBSC_TEST_ALL_SHADOWED  0x0001
1602 #define OBSC_COLLAPSE_NOWAIT    0x0002
1603 #define OBSC_COLLAPSE_WAIT      0x0004
1604
1605 static int vm_object_backing_scan_callback(vm_page_t p, void *data);
1606
1607 /*
1608  * The caller must hold the object.
1609  */
1610 static __inline int
1611 vm_object_backing_scan(vm_object_t object, vm_object_t backing_object, int op)
1612 {
1613         struct rb_vm_page_scan_info info;
1614
1615         vm_object_assert_held(object);
1616         vm_object_assert_held(backing_object);
1617
1618         KKASSERT(backing_object == object->backing_object);
1619         info.backing_offset_index = OFF_TO_IDX(object->backing_object_offset);
1620
1621         /*
1622          * Initial conditions
1623          */
1624         if (op & OBSC_TEST_ALL_SHADOWED) {
1625                 /*
1626                  * We do not want to have to test for the existence of
1627                  * swap pages in the backing object.  XXX but with the
1628                  * new swapper this would be pretty easy to do.
1629                  *
1630                  * XXX what about anonymous MAP_SHARED memory that hasn't
1631                  * been ZFOD faulted yet?  If we do not test for this, the
1632                  * shadow test may succeed! XXX
1633                  */
1634                 if (backing_object->type != OBJT_DEFAULT)
1635                         return(0);
1636         }
1637         if (op & OBSC_COLLAPSE_WAIT) {
1638                 KKASSERT((backing_object->flags & OBJ_DEAD) == 0);
1639                 vm_object_set_flag(backing_object, OBJ_DEAD);
1640                 lwkt_gettoken(&vmobj_token);
1641                 TAILQ_REMOVE(&vm_object_list, backing_object, object_list);
1642                 vm_object_count--;
1643                 lwkt_reltoken(&vmobj_token);
1644                 vm_object_dead_wakeup(backing_object);
1645         }
1646
1647         /*
1648          * Our scan.   We have to retry if a negative error code is returned,
1649          * otherwise 0 or 1 will be returned in info.error.  0 Indicates that
1650          * the scan had to be stopped because the parent does not completely
1651          * shadow the child.
1652          */
1653         info.object = object;
1654         info.backing_object = backing_object;
1655         info.limit = op;
1656         do {
1657                 info.error = 1;
1658                 vm_page_rb_tree_RB_SCAN(&backing_object->rb_memq, NULL,
1659                                         vm_object_backing_scan_callback,
1660                                         &info);
1661         } while (info.error < 0);
1662
1663         return(info.error);
1664 }
1665
1666 /*
1667  * The caller must hold the object.
1668  */
1669 static int
1670 vm_object_backing_scan_callback(vm_page_t p, void *data)
1671 {
1672         struct rb_vm_page_scan_info *info = data;
1673         vm_object_t backing_object;
1674         vm_object_t object;
1675         vm_pindex_t pindex;
1676         vm_pindex_t new_pindex;
1677         vm_pindex_t backing_offset_index;
1678         int op;
1679
1680         pindex = p->pindex;
1681         new_pindex = pindex - info->backing_offset_index;
1682         op = info->limit;
1683         object = info->object;
1684         backing_object = info->backing_object;
1685         backing_offset_index = info->backing_offset_index;
1686
1687         if (op & OBSC_TEST_ALL_SHADOWED) {
1688                 vm_page_t pp;
1689
1690                 /*
1691                  * Ignore pages outside the parent object's range
1692                  * and outside the parent object's mapping of the 
1693                  * backing object.
1694                  *
1695                  * note that we do not busy the backing object's
1696                  * page.
1697                  */
1698                 if (pindex < backing_offset_index ||
1699                     new_pindex >= object->size
1700                 ) {
1701                         return(0);
1702                 }
1703
1704                 /*
1705                  * See if the parent has the page or if the parent's
1706                  * object pager has the page.  If the parent has the
1707                  * page but the page is not valid, the parent's
1708                  * object pager must have the page.
1709                  *
1710                  * If this fails, the parent does not completely shadow
1711                  * the object and we might as well give up now.
1712                  */
1713                 pp = vm_page_lookup(object, new_pindex);
1714                 if ((pp == NULL || pp->valid == 0) &&
1715                     !vm_pager_has_page(object, new_pindex)
1716                 ) {
1717                         info->error = 0;        /* problemo */
1718                         return(-1);             /* stop the scan */
1719                 }
1720         }
1721
1722         /*
1723          * Check for busy page.  Note that we may have lost (p) when we
1724          * possibly blocked above.
1725          */
1726         if (op & (OBSC_COLLAPSE_WAIT | OBSC_COLLAPSE_NOWAIT)) {
1727                 vm_page_t pp;
1728
1729                 if (vm_page_busy_try(p, TRUE)) {
1730                         if (op & OBSC_COLLAPSE_NOWAIT) {
1731                                 return(0);
1732                         } else {
1733                                 /*
1734                                  * If we slept, anything could have
1735                                  * happened.   Ask that the scan be restarted.
1736                                  *
1737                                  * Since the object is marked dead, the
1738                                  * backing offset should not have changed.  
1739                                  */
1740                                 vm_page_sleep_busy(p, TRUE, "vmocol");
1741                                 info->error = -1;
1742                                 return(-1);
1743                         }
1744                 }
1745
1746                 /*
1747                  * If (p) is no longer valid restart the scan.
1748                  */
1749                 if (p->object != backing_object || p->pindex != pindex) {
1750                         kprintf("vm_object_backing_scan: Warning: page "
1751                                 "%p ripped out from under us\n", p);
1752                         vm_page_wakeup(p);
1753                         info->error = -1;
1754                         return(-1);
1755                 }
1756
1757                 if (op & OBSC_COLLAPSE_NOWAIT) {
1758                         if (p->valid == 0 /*|| p->hold_count*/ ||
1759                             p->wire_count) {
1760                                 vm_page_wakeup(p);
1761                                 return(0);
1762                         }
1763                 } else {
1764                         /* XXX what if p->valid == 0 , hold_count, etc? */
1765                 }
1766
1767                 KASSERT(
1768                     p->object == backing_object,
1769                     ("vm_object_qcollapse(): object mismatch")
1770                 );
1771
1772                 /*
1773                  * Destroy any associated swap
1774                  */
1775                 if (backing_object->type == OBJT_SWAP)
1776                         swap_pager_freespace(backing_object, p->pindex, 1);
1777
1778                 if (
1779                     p->pindex < backing_offset_index ||
1780                     new_pindex >= object->size
1781                 ) {
1782                         /*
1783                          * Page is out of the parent object's range, we 
1784                          * can simply destroy it. 
1785                          */
1786                         vm_page_protect(p, VM_PROT_NONE);
1787                         vm_page_free(p);
1788                         return(0);
1789                 }
1790
1791                 pp = vm_page_lookup(object, new_pindex);
1792                 if (pp != NULL || vm_pager_has_page(object, new_pindex)) {
1793                         /*
1794                          * page already exists in parent OR swap exists
1795                          * for this location in the parent.  Destroy 
1796                          * the original page from the backing object.
1797                          *
1798                          * Leave the parent's page alone
1799                          */
1800                         vm_page_protect(p, VM_PROT_NONE);
1801                         vm_page_free(p);
1802                         return(0);
1803                 }
1804
1805                 /*
1806                  * Page does not exist in parent, rename the
1807                  * page from the backing object to the main object. 
1808                  *
1809                  * If the page was mapped to a process, it can remain 
1810                  * mapped through the rename.
1811                  */
1812                 if ((p->queue - p->pc) == PQ_CACHE)
1813                         vm_page_deactivate(p);
1814
1815                 vm_page_rename(p, object, new_pindex);
1816                 vm_page_wakeup(p);
1817                 /* page automatically made dirty by rename */
1818         }
1819         return(0);
1820 }
1821
1822 /*
1823  * This version of collapse allows the operation to occur earlier and
1824  * when paging_in_progress is true for an object...  This is not a complete
1825  * operation, but should plug 99.9% of the rest of the leaks.
1826  *
1827  * The caller must hold the object and backing_object and both must be
1828  * chainlocked.
1829  *
1830  * (only called from vm_object_collapse)
1831  */
1832 static void
1833 vm_object_qcollapse(vm_object_t object, vm_object_t backing_object)
1834 {
1835         if (backing_object->ref_count == 1) {
1836                 backing_object->ref_count += 2;
1837                 vm_object_backing_scan(object, backing_object,
1838                                        OBSC_COLLAPSE_NOWAIT);
1839                 backing_object->ref_count -= 2;
1840         }
1841 }
1842
1843 /*
1844  * Collapse an object with the object backing it.  Pages in the backing
1845  * object are moved into the parent, and the backing object is deallocated.
1846  * Any conflict is resolved in favor of the parent's existing pages.
1847  *
1848  * object must be held and chain-locked on call.
1849  *
1850  * The caller must have an extra ref on object to prevent a race from
1851  * destroying it during the collapse.
1852  */
1853 void
1854 vm_object_collapse(vm_object_t object, struct vm_object_dealloc_list **dlistp)
1855 {
1856         struct vm_object_dealloc_list *dlist = NULL;
1857         vm_object_t backing_object;
1858
1859         /*
1860          * Only one thread is attempting a collapse at any given moment.
1861          * There are few restrictions for (object) that callers of this
1862          * function check so reentrancy is likely.
1863          */
1864         KKASSERT(object != NULL);
1865         vm_object_assert_held(object);
1866         KKASSERT(object->flags & OBJ_CHAINLOCK);
1867
1868         for (;;) {
1869                 vm_object_t bbobj;
1870                 int dodealloc;
1871
1872                 /*
1873                  * We have to hold the backing object, check races.
1874                  */
1875                 while ((backing_object = object->backing_object) != NULL) {
1876                         vm_object_hold(backing_object);
1877                         if (backing_object == object->backing_object)
1878                                 break;
1879                         vm_object_drop(backing_object);
1880                 }
1881
1882                 /*
1883                  * No backing object?  Nothing to collapse then.
1884                  */
1885                 if (backing_object == NULL)
1886                         break;
1887
1888                 /*
1889                  * You can't collapse with a non-default/non-swap object.
1890                  */
1891                 if (backing_object->type != OBJT_DEFAULT &&
1892                     backing_object->type != OBJT_SWAP) {
1893                         vm_object_drop(backing_object);
1894                         backing_object = NULL;
1895                         break;
1896                 }
1897
1898                 /*
1899                  * Chain-lock the backing object too because if we
1900                  * successfully merge its pages into the top object we
1901                  * will collapse backing_object->backing_object as the
1902                  * new backing_object.  Re-check that it is still our
1903                  * backing object.
1904                  */
1905                 vm_object_chain_acquire(backing_object);
1906                 if (backing_object != object->backing_object) {
1907                         vm_object_chain_release(backing_object);
1908                         vm_object_drop(backing_object);
1909                         continue;
1910                 }
1911
1912                 /*
1913                  * we check the backing object first, because it is most likely
1914                  * not collapsable.
1915                  */
1916                 if (backing_object->handle != NULL ||
1917                     (backing_object->type != OBJT_DEFAULT &&
1918                      backing_object->type != OBJT_SWAP) ||
1919                     (backing_object->flags & OBJ_DEAD) ||
1920                     object->handle != NULL ||
1921                     (object->type != OBJT_DEFAULT &&
1922                      object->type != OBJT_SWAP) ||
1923                     (object->flags & OBJ_DEAD)) {
1924                         break;
1925                 }
1926
1927                 /*
1928                  * If paging is in progress we can't do a normal collapse.
1929                  */
1930                 if (
1931                     object->paging_in_progress != 0 ||
1932                     backing_object->paging_in_progress != 0
1933                 ) {
1934                         vm_object_qcollapse(object, backing_object);
1935                         break;
1936                 }
1937
1938                 /*
1939                  * We know that we can either collapse the backing object (if
1940                  * the parent is the only reference to it) or (perhaps) have
1941                  * the parent bypass the object if the parent happens to shadow
1942                  * all the resident pages in the entire backing object.
1943                  *
1944                  * This is ignoring pager-backed pages such as swap pages.
1945                  * vm_object_backing_scan fails the shadowing test in this
1946                  * case.
1947                  */
1948                 if (backing_object->ref_count == 1) {
1949                         /*
1950                          * If there is exactly one reference to the backing
1951                          * object, we can collapse it into the parent.  
1952                          */
1953                         KKASSERT(object->backing_object == backing_object);
1954                         vm_object_backing_scan(object, backing_object,
1955                                                OBSC_COLLAPSE_WAIT);
1956
1957                         /*
1958                          * Move the pager from backing_object to object.
1959                          */
1960                         if (backing_object->type == OBJT_SWAP) {
1961                                 vm_object_pip_add(backing_object, 1);
1962
1963                                 /*
1964                                  * scrap the paging_offset junk and do a 
1965                                  * discrete copy.  This also removes major 
1966                                  * assumptions about how the swap-pager 
1967                                  * works from where it doesn't belong.  The
1968                                  * new swapper is able to optimize the
1969                                  * destroy-source case.
1970                                  */
1971                                 vm_object_pip_add(object, 1);
1972                                 swap_pager_copy(backing_object, object,
1973                                     OFF_TO_IDX(object->backing_object_offset),
1974                                     TRUE);
1975                                 vm_object_pip_wakeup(object);
1976                                 vm_object_pip_wakeup(backing_object);
1977                         }
1978
1979                         /*
1980                          * Object now shadows whatever backing_object did.
1981                          * Remove object from backing_object's shadow_list.
1982                          */
1983                         LIST_REMOVE(object, shadow_list);
1984                         KKASSERT(object->backing_object == backing_object);
1985                         backing_object->shadow_count--;
1986                         backing_object->generation++;
1987
1988                         /*
1989                          * backing_object->backing_object moves from within
1990                          * backing_object to within object.
1991                          */
1992                         while ((bbobj = backing_object->backing_object) != NULL) {
1993                                 vm_object_hold(bbobj);
1994                                 if (bbobj == backing_object->backing_object)
1995                                         break;
1996                                 vm_object_drop(bbobj);
1997                         }
1998                         if (bbobj) {
1999                                 LIST_REMOVE(backing_object, shadow_list);
2000                                 bbobj->shadow_count--;
2001                                 bbobj->generation++;
2002                                 backing_object->backing_object = NULL;
2003                         }
2004                         object->backing_object = bbobj;
2005                         if (bbobj) {
2006                                 LIST_INSERT_HEAD(&bbobj->shadow_head,
2007                                                  object, shadow_list);
2008                                 bbobj->shadow_count++;
2009                                 bbobj->generation++;
2010                         }
2011
2012                         object->backing_object_offset +=
2013                                 backing_object->backing_object_offset;
2014
2015                         vm_object_drop(bbobj);
2016
2017                         /*
2018                          * Discard the old backing_object.  Nothing should be
2019                          * able to ref it, other than a vm_map_split(),
2020                          * and vm_map_split() will stall on our chain lock.
2021                          * And we control the parent so it shouldn't be
2022                          * possible for it to go away either.
2023                          *
2024                          * Since the backing object has no pages, no pager
2025                          * left, and no object references within it, all
2026                          * that is necessary is to dispose of it.
2027                          */
2028                         KASSERT(backing_object->ref_count == 1,
2029                                 ("backing_object %p was somehow "
2030                                  "re-referenced during collapse!",
2031                                  backing_object));
2032                         KASSERT(RB_EMPTY(&backing_object->rb_memq),
2033                                 ("backing_object %p somehow has left "
2034                                  "over pages during collapse!",
2035                                  backing_object));
2036
2037                         /*
2038                          * The object can be destroyed.
2039                          *
2040                          * XXX just fall through and dodealloc instead
2041                          *     of forcing destruction?
2042                          */
2043                         --backing_object->ref_count;
2044                         if ((backing_object->flags & OBJ_DEAD) == 0)
2045                                 vm_object_terminate(backing_object);
2046                         object_collapses++;
2047                         dodealloc = 0;
2048                 } else {
2049                         /*
2050                          * If we do not entirely shadow the backing object,
2051                          * there is nothing we can do so we give up.
2052                          */
2053                         if (vm_object_backing_scan(object, backing_object,
2054                                                 OBSC_TEST_ALL_SHADOWED) == 0) {
2055                                 break;
2056                         }
2057
2058                         /*
2059                          * bbobj is backing_object->backing_object.  Since
2060                          * object completely shadows backing_object we can
2061                          * bypass it and become backed by bbobj instead.
2062                          */
2063                         while ((bbobj = backing_object->backing_object) != NULL) {
2064                                 vm_object_hold(bbobj);
2065                                 if (bbobj == backing_object->backing_object)
2066                                         break;
2067                                 vm_object_drop(bbobj);
2068                         }
2069
2070                         /*
2071                          * Make object shadow bbobj instead of backing_object.
2072                          * Remove object from backing_object's shadow list.
2073                          *
2074                          * Deallocating backing_object will not remove
2075                          * it, since its reference count is at least 2.
2076                          */
2077                         KKASSERT(object->backing_object == backing_object);
2078                         LIST_REMOVE(object, shadow_list);
2079                         backing_object->shadow_count--;
2080                         backing_object->generation++;
2081
2082                         /*
2083                          * Add a ref to bbobj, bbobj now shadows object.
2084                          *
2085                          * NOTE: backing_object->backing_object still points
2086                          *       to bbobj.  That relationship remains intact
2087                          *       because backing_object has > 1 ref, so
2088                          *       someone else is pointing to it (hence why
2089                          *       we can't collapse it into object and can
2090                          *       only handle the all-shadowed bypass case).
2091                          */
2092                         if (bbobj) {
2093                                 vm_object_chain_wait(bbobj);
2094                                 vm_object_reference_locked(bbobj);
2095                                 LIST_INSERT_HEAD(&bbobj->shadow_head,
2096                                                  object, shadow_list);
2097                                 bbobj->shadow_count++;
2098                                 bbobj->generation++;
2099                                 object->backing_object_offset +=
2100                                         backing_object->backing_object_offset;
2101                                 object->backing_object = bbobj;
2102                                 vm_object_drop(bbobj);
2103                         } else {
2104                                 object->backing_object = NULL;
2105                         }
2106
2107                         /*
2108                          * Drop the reference count on backing_object.  To
2109                          * handle ref_count races properly we can't assume
2110                          * that the ref_count is still at least 2 so we
2111                          * have to actually call vm_object_deallocate()
2112                          * (after clearing the chainlock).
2113                          */
2114                         object_bypasses++;
2115                         dodealloc = 1;
2116                 }
2117
2118                 /*
2119                  * Ok, we want to loop on the new object->bbobj association,
2120                  * possibly collapsing it further.  However if dodealloc is
2121                  * non-zero we have to deallocate the backing_object which
2122                  * itself can potentially undergo a collapse, creating a
2123                  * recursion depth issue with the LWKT token subsystem.
2124                  *
2125                  * In the case where we must deallocate the backing_object
2126                  * it is possible now that the backing_object has a single
2127                  * shadow count on some other object (not represented here
2128                  * as yet), since it no longer shadows us.  Thus when we
2129                  * call vm_object_deallocate() it may attempt to collapse
2130                  * itself into its remaining parent.
2131                  */
2132                 if (dodealloc) {
2133                         struct vm_object_dealloc_list *dtmp;
2134
2135                         vm_object_chain_release(backing_object);
2136                         vm_object_unlock(backing_object);
2137                         /* backing_object remains held */
2138
2139                         /*
2140                          * Auto-deallocation list for caller convenience.
2141                          */
2142                         if (dlistp == NULL)
2143                                 dlistp = &dlist;
2144
2145                         dtmp = kmalloc(sizeof(*dtmp), M_TEMP, M_WAITOK);
2146                         dtmp->object = backing_object;
2147                         dtmp->next = *dlistp;
2148                         *dlistp = dtmp;
2149                 } else {
2150                         vm_object_chain_release(backing_object);
2151                         vm_object_drop(backing_object);
2152                 }
2153                 /* backing_object = NULL; not needed */
2154                 /* loop */
2155         }
2156
2157         /*
2158          * Clean up any left over backing_object
2159          */
2160         if (backing_object) {
2161                 vm_object_chain_release(backing_object);
2162                 vm_object_drop(backing_object);
2163         }
2164
2165         /*
2166          * Clean up any auto-deallocation list.  This is a convenience
2167          * for top-level callers so they don't have to pass &dlist.
2168          * Do not clean up any caller-passed dlistp, the caller will
2169          * do that.
2170          */
2171         if (dlist)
2172                 vm_object_deallocate_list(&dlist);
2173
2174 }
2175
2176 /*
2177  * vm_object_collapse() may collect additional objects in need of
2178  * deallocation.  This routine deallocates these objects.  The
2179  * deallocation itself can trigger additional collapses (which the
2180  * deallocate function takes care of).  This procedure is used to
2181  * reduce procedural recursion since these vm_object shadow chains
2182  * can become quite long.
2183  */
2184 void
2185 vm_object_deallocate_list(struct vm_object_dealloc_list **dlistp)
2186 {
2187         struct vm_object_dealloc_list *dlist;
2188
2189         while ((dlist = *dlistp) != NULL) {
2190                 *dlistp = dlist->next;
2191                 vm_object_lock(dlist->object);
2192                 vm_object_deallocate_locked(dlist->object);
2193                 vm_object_drop(dlist->object);
2194                 kfree(dlist, M_TEMP);
2195         }
2196 }
2197
2198 /*
2199  * Removes all physical pages in the specified object range from the
2200  * object's list of pages.
2201  *
2202  * No requirements.
2203  */
2204 static int vm_object_page_remove_callback(vm_page_t p, void *data);
2205
2206 void
2207 vm_object_page_remove(vm_object_t object, vm_pindex_t start, vm_pindex_t end,
2208                       boolean_t clean_only)
2209 {
2210         struct rb_vm_page_scan_info info;
2211         int all;
2212
2213         /*
2214          * Degenerate cases and assertions
2215          */
2216         vm_object_hold(object);
2217         if (object == NULL ||
2218             (object->resident_page_count == 0 && object->swblock_count == 0)) {
2219                 vm_object_drop(object);
2220                 return;
2221         }
2222         KASSERT(object->type != OBJT_PHYS, 
2223                 ("attempt to remove pages from a physical object"));
2224
2225         /*
2226          * Indicate that paging is occuring on the object
2227          */
2228         vm_object_pip_add(object, 1);
2229
2230         /*
2231          * Figure out the actual removal range and whether we are removing
2232          * the entire contents of the object or not.  If removing the entire
2233          * contents, be sure to get all pages, even those that might be 
2234          * beyond the end of the object.
2235          */
2236         info.start_pindex = start;
2237         if (end == 0)
2238                 info.end_pindex = (vm_pindex_t)-1;
2239         else
2240                 info.end_pindex = end - 1;
2241         info.limit = clean_only;
2242         all = (start == 0 && info.end_pindex >= object->size - 1);
2243
2244         /*
2245          * Loop until we are sure we have gotten them all.
2246          */
2247         do {
2248                 info.error = 0;
2249                 vm_page_rb_tree_RB_SCAN(&object->rb_memq, rb_vm_page_scancmp,
2250                                         vm_object_page_remove_callback, &info);
2251         } while (info.error);
2252
2253         /*
2254          * Remove any related swap if throwing away pages, or for
2255          * non-swap objects (the swap is a clean copy in that case).
2256          */
2257         if (object->type != OBJT_SWAP || clean_only == FALSE) {
2258                 if (all)
2259                         swap_pager_freespace_all(object);
2260                 else
2261                         swap_pager_freespace(object, info.start_pindex,
2262                              info.end_pindex - info.start_pindex + 1);
2263         }
2264
2265         /*
2266          * Cleanup
2267          */
2268         vm_object_pip_wakeup(object);
2269         vm_object_drop(object);
2270 }
2271
2272 /*
2273  * The caller must hold the object
2274  */
2275 static int
2276 vm_object_page_remove_callback(vm_page_t p, void *data)
2277 {
2278         struct rb_vm_page_scan_info *info = data;
2279
2280         if (vm_page_busy_try(p, TRUE)) {
2281                 vm_page_sleep_busy(p, TRUE, "vmopar");
2282                 info->error = 1;
2283                 return(0);
2284         }
2285
2286         /*
2287          * Wired pages cannot be destroyed, but they can be invalidated
2288          * and we do so if clean_only (limit) is not set.
2289          *
2290          * WARNING!  The page may be wired due to being part of a buffer
2291          *           cache buffer, and the buffer might be marked B_CACHE.
2292          *           This is fine as part of a truncation but VFSs must be
2293          *           sure to fix the buffer up when re-extending the file.
2294          */
2295         if (p->wire_count != 0) {
2296                 vm_page_protect(p, VM_PROT_NONE);
2297                 if (info->limit == 0)
2298                         p->valid = 0;
2299                 vm_page_wakeup(p);
2300                 return(0);
2301         }
2302
2303         /*
2304          * limit is our clean_only flag.  If set and the page is dirty, do
2305          * not free it.  If set and the page is being held by someone, do
2306          * not free it.
2307          */
2308         if (info->limit && p->valid) {
2309                 vm_page_test_dirty(p);
2310                 if (p->valid & p->dirty) {
2311                         vm_page_wakeup(p);
2312                         return(0);
2313                 }
2314 #if 0
2315                 if (p->hold_count) {
2316                         vm_page_wakeup(p);
2317                         return(0);
2318                 }
2319 #endif
2320         }
2321
2322         /*
2323          * Destroy the page
2324          */
2325         vm_page_protect(p, VM_PROT_NONE);
2326         vm_page_free(p);
2327         return(0);
2328 }
2329
2330 /*
2331  * Coalesces two objects backing up adjoining regions of memory into a
2332  * single object.
2333  *
2334  * returns TRUE if objects were combined.
2335  *
2336  * NOTE: Only works at the moment if the second object is NULL -
2337  *       if it's not, which object do we lock first?
2338  *
2339  * Parameters:
2340  *      prev_object     First object to coalesce
2341  *      prev_offset     Offset into prev_object
2342  *      next_object     Second object into coalesce
2343  *      next_offset     Offset into next_object
2344  *
2345  *      prev_size       Size of reference to prev_object
2346  *      next_size       Size of reference to next_object
2347  *
2348  * The caller does not need to hold (prev_object) but must have a stable
2349  * pointer to it (typically by holding the vm_map locked).
2350  */
2351 boolean_t
2352 vm_object_coalesce(vm_object_t prev_object, vm_pindex_t prev_pindex,
2353                    vm_size_t prev_size, vm_size_t next_size)
2354 {
2355         vm_pindex_t next_pindex;
2356
2357         if (prev_object == NULL)
2358                 return (TRUE);
2359
2360         vm_object_hold(prev_object);
2361
2362         if (prev_object->type != OBJT_DEFAULT &&
2363             prev_object->type != OBJT_SWAP) {
2364                 vm_object_drop(prev_object);
2365                 return (FALSE);
2366         }
2367
2368         /*
2369          * Try to collapse the object first
2370          */
2371         vm_object_chain_acquire(prev_object);
2372         vm_object_collapse(prev_object, NULL);
2373
2374         /*
2375          * Can't coalesce if: . more than one reference . paged out . shadows
2376          * another object . has a copy elsewhere (any of which mean that the
2377          * pages not mapped to prev_entry may be in use anyway)
2378          */
2379
2380         if (prev_object->backing_object != NULL) {
2381                 vm_object_chain_release(prev_object);
2382                 vm_object_drop(prev_object);
2383                 return (FALSE);
2384         }
2385
2386         prev_size >>= PAGE_SHIFT;
2387         next_size >>= PAGE_SHIFT;
2388         next_pindex = prev_pindex + prev_size;
2389
2390         if ((prev_object->ref_count > 1) &&
2391             (prev_object->size != next_pindex)) {
2392                 vm_object_chain_release(prev_object);
2393                 vm_object_drop(prev_object);
2394                 return (FALSE);
2395         }
2396
2397         /*
2398          * Remove any pages that may still be in the object from a previous
2399          * deallocation.
2400          */
2401         if (next_pindex < prev_object->size) {
2402                 vm_object_page_remove(prev_object,
2403                                       next_pindex,
2404                                       next_pindex + next_size, FALSE);
2405                 if (prev_object->type == OBJT_SWAP)
2406                         swap_pager_freespace(prev_object,
2407                                              next_pindex, next_size);
2408         }
2409
2410         /*
2411          * Extend the object if necessary.
2412          */
2413         if (next_pindex + next_size > prev_object->size)
2414                 prev_object->size = next_pindex + next_size;
2415
2416         vm_object_chain_release(prev_object);
2417         vm_object_drop(prev_object);
2418         return (TRUE);
2419 }
2420
2421 /*
2422  * Make the object writable and flag is being possibly dirty.
2423  *
2424  * The caller must hold the object. XXX called from vm_page_dirty(),
2425  * There is currently no requirement to hold the object.
2426  */
2427 void
2428 vm_object_set_writeable_dirty(vm_object_t object)
2429 {
2430         struct vnode *vp;
2431
2432         /*vm_object_assert_held(object);*/
2433         /*
2434          * Avoid contention in vm fault path by checking the state before
2435          * issuing an atomic op on it.
2436          */
2437         if ((object->flags & (OBJ_WRITEABLE|OBJ_MIGHTBEDIRTY)) !=
2438             (OBJ_WRITEABLE|OBJ_MIGHTBEDIRTY)) {
2439                 vm_object_set_flag(object, OBJ_WRITEABLE|OBJ_MIGHTBEDIRTY);
2440         }
2441         if (object->type == OBJT_VNODE &&
2442             (vp = (struct vnode *)object->handle) != NULL) {
2443                 if ((vp->v_flag & VOBJDIRTY) == 0) {
2444                         vsetflags(vp, VOBJDIRTY);
2445                 }
2446         }
2447 }
2448
2449 #include "opt_ddb.h"
2450 #ifdef DDB
2451 #include <sys/kernel.h>
2452
2453 #include <sys/cons.h>
2454
2455 #include <ddb/ddb.h>
2456
2457 static int      _vm_object_in_map (vm_map_t map, vm_object_t object,
2458                                        vm_map_entry_t entry);
2459 static int      vm_object_in_map (vm_object_t object);
2460
2461 /*
2462  * The caller must hold the object.
2463  */
2464 static int
2465 _vm_object_in_map(vm_map_t map, vm_object_t object, vm_map_entry_t entry)
2466 {
2467         vm_map_t tmpm;
2468         vm_map_entry_t tmpe;
2469         vm_object_t obj, nobj;
2470         int entcount;
2471
2472         if (map == 0)
2473                 return 0;
2474         if (entry == 0) {
2475                 tmpe = map->header.next;
2476                 entcount = map->nentries;
2477                 while (entcount-- && (tmpe != &map->header)) {
2478                         if( _vm_object_in_map(map, object, tmpe)) {
2479                                 return 1;
2480                         }
2481                         tmpe = tmpe->next;
2482                 }
2483                 return (0);
2484         }
2485         switch(entry->maptype) {
2486         case VM_MAPTYPE_SUBMAP:
2487                 tmpm = entry->object.sub_map;
2488                 tmpe = tmpm->header.next;
2489                 entcount = tmpm->nentries;
2490                 while (entcount-- && tmpe != &tmpm->header) {
2491                         if( _vm_object_in_map(tmpm, object, tmpe)) {
2492                                 return 1;
2493                         }
2494                         tmpe = tmpe->next;
2495                 }
2496                 break;
2497         case VM_MAPTYPE_NORMAL:
2498         case VM_MAPTYPE_VPAGETABLE:
2499                 obj = entry->object.vm_object;
2500                 while (obj) {
2501                         if (obj == object) {
2502                                 if (obj != entry->object.vm_object)
2503                                         vm_object_drop(obj);
2504                                 return 1;
2505                         }
2506                         while ((nobj = obj->backing_object) != NULL) {
2507                                 vm_object_hold(nobj);
2508                                 if (nobj == obj->backing_object)
2509                                         break;
2510                                 vm_object_drop(nobj);
2511                         }
2512                         if (obj != entry->object.vm_object) {
2513                                 if (nobj)
2514                                         vm_object_lock_swap();
2515                                 vm_object_drop(obj);
2516                         }
2517                         obj = nobj;
2518                 }
2519                 break;
2520         default:
2521                 break;
2522         }
2523         return 0;
2524 }
2525
2526 static int vm_object_in_map_callback(struct proc *p, void *data);
2527
2528 struct vm_object_in_map_info {
2529         vm_object_t object;
2530         int rv;
2531 };
2532
2533 /*
2534  * Debugging only
2535  */
2536 static int
2537 vm_object_in_map(vm_object_t object)
2538 {
2539         struct vm_object_in_map_info info;
2540
2541         info.rv = 0;
2542         info.object = object;
2543
2544         allproc_scan(vm_object_in_map_callback, &info);
2545         if (info.rv)
2546                 return 1;
2547         if( _vm_object_in_map(&kernel_map, object, 0))
2548                 return 1;
2549         if( _vm_object_in_map(&pager_map, object, 0))
2550                 return 1;
2551         if( _vm_object_in_map(&buffer_map, object, 0))
2552                 return 1;
2553         return 0;
2554 }
2555
2556 /*
2557  * Debugging only
2558  */
2559 static int
2560 vm_object_in_map_callback(struct proc *p, void *data)
2561 {
2562         struct vm_object_in_map_info *info = data;
2563
2564         if (p->p_vmspace) {
2565                 if (_vm_object_in_map(&p->p_vmspace->vm_map, info->object, 0)) {
2566                         info->rv = 1;
2567                         return -1;
2568                 }
2569         }
2570         return (0);
2571 }
2572
2573 DB_SHOW_COMMAND(vmochk, vm_object_check)
2574 {
2575         vm_object_t object;
2576
2577         /*
2578          * make sure that internal objs are in a map somewhere
2579          * and none have zero ref counts.
2580          */
2581         for (object = TAILQ_FIRST(&vm_object_list);
2582                         object != NULL;
2583                         object = TAILQ_NEXT(object, object_list)) {
2584                 if (object->type == OBJT_MARKER)
2585                         continue;
2586                 if (object->handle == NULL &&
2587                     (object->type == OBJT_DEFAULT || object->type == OBJT_SWAP)) {
2588                         if (object->ref_count == 0) {
2589                                 db_printf("vmochk: internal obj has zero ref count: %ld\n",
2590                                         (long)object->size);
2591                         }
2592                         if (!vm_object_in_map(object)) {
2593                                 db_printf(
2594                         "vmochk: internal obj is not in a map: "
2595                         "ref: %d, size: %lu: 0x%lx, backing_object: %p\n",
2596                                     object->ref_count, (u_long)object->size, 
2597                                     (u_long)object->size,
2598                                     (void *)object->backing_object);
2599                         }
2600                 }
2601         }
2602 }
2603
2604 /*
2605  * Debugging only
2606  */
2607 DB_SHOW_COMMAND(object, vm_object_print_static)
2608 {
2609         /* XXX convert args. */
2610         vm_object_t object = (vm_object_t)addr;
2611         boolean_t full = have_addr;
2612
2613         vm_page_t p;
2614
2615         /* XXX count is an (unused) arg.  Avoid shadowing it. */
2616 #define count   was_count
2617
2618         int count;
2619
2620         if (object == NULL)
2621                 return;
2622
2623         db_iprintf(
2624             "Object %p: type=%d, size=0x%lx, res=%d, ref=%d, flags=0x%x\n",
2625             object, (int)object->type, (u_long)object->size,
2626             object->resident_page_count, object->ref_count, object->flags);
2627         /*
2628          * XXX no %qd in kernel.  Truncate object->backing_object_offset.
2629          */
2630         db_iprintf(" sref=%d, backing_object(%d)=(%p)+0x%lx\n",
2631             object->shadow_count, 
2632             object->backing_object ? object->backing_object->ref_count : 0,
2633             object->backing_object, (long)object->backing_object_offset);
2634
2635         if (!full)
2636                 return;
2637
2638         db_indent += 2;
2639         count = 0;
2640         RB_FOREACH(p, vm_page_rb_tree, &object->rb_memq) {
2641                 if (count == 0)
2642                         db_iprintf("memory:=");
2643                 else if (count == 6) {
2644                         db_printf("\n");
2645                         db_iprintf(" ...");
2646                         count = 0;
2647                 } else
2648                         db_printf(",");
2649                 count++;
2650
2651                 db_printf("(off=0x%lx,page=0x%lx)",
2652                     (u_long) p->pindex, (u_long) VM_PAGE_TO_PHYS(p));
2653         }
2654         if (count != 0)
2655                 db_printf("\n");
2656         db_indent -= 2;
2657 }
2658
2659 /* XXX. */
2660 #undef count
2661
2662 /*
2663  * XXX need this non-static entry for calling from vm_map_print.
2664  *
2665  * Debugging only
2666  */
2667 void
2668 vm_object_print(/* db_expr_t */ long addr,
2669                 boolean_t have_addr,
2670                 /* db_expr_t */ long count,
2671                 char *modif)
2672 {
2673         vm_object_print_static(addr, have_addr, count, modif);
2674 }
2675
2676 /*
2677  * Debugging only
2678  */
2679 DB_SHOW_COMMAND(vmopag, vm_object_print_pages)
2680 {
2681         vm_object_t object;
2682         int nl = 0;
2683         int c;
2684         for (object = TAILQ_FIRST(&vm_object_list);
2685                         object != NULL;
2686                         object = TAILQ_NEXT(object, object_list)) {
2687                 vm_pindex_t idx, fidx;
2688                 vm_pindex_t osize;
2689                 vm_paddr_t pa = -1, padiff;
2690                 int rcount;
2691                 vm_page_t m;
2692
2693                 if (object->type == OBJT_MARKER)
2694                         continue;
2695                 db_printf("new object: %p\n", (void *)object);
2696                 if ( nl > 18) {
2697                         c = cngetc();
2698                         if (c != ' ')
2699                                 return;
2700                         nl = 0;
2701                 }
2702                 nl++;
2703                 rcount = 0;
2704                 fidx = 0;
2705                 osize = object->size;
2706                 if (osize > 128)
2707                         osize = 128;
2708                 for (idx = 0; idx < osize; idx++) {
2709                         m = vm_page_lookup(object, idx);
2710                         if (m == NULL) {
2711                                 if (rcount) {
2712                                         db_printf(" index(%ld)run(%d)pa(0x%lx)\n",
2713                                                 (long)fidx, rcount, (long)pa);
2714                                         if ( nl > 18) {
2715                                                 c = cngetc();
2716                                                 if (c != ' ')
2717                                                         return;
2718                                                 nl = 0;
2719                                         }
2720                                         nl++;
2721                                         rcount = 0;
2722                                 }
2723                                 continue;
2724                         }
2725
2726                                 
2727                         if (rcount &&
2728                                 (VM_PAGE_TO_PHYS(m) == pa + rcount * PAGE_SIZE)) {
2729                                 ++rcount;
2730                                 continue;
2731                         }
2732                         if (rcount) {
2733                                 padiff = pa + rcount * PAGE_SIZE - VM_PAGE_TO_PHYS(m);
2734                                 padiff >>= PAGE_SHIFT;
2735                                 padiff &= PQ_L2_MASK;
2736                                 if (padiff == 0) {
2737                                         pa = VM_PAGE_TO_PHYS(m) - rcount * PAGE_SIZE;
2738                                         ++rcount;
2739                                         continue;
2740                                 }
2741                                 db_printf(" index(%ld)run(%d)pa(0x%lx)",
2742                                         (long)fidx, rcount, (long)pa);
2743                                 db_printf("pd(%ld)\n", (long)padiff);
2744                                 if ( nl > 18) {
2745                                         c = cngetc();
2746                                         if (c != ' ')
2747                                                 return;
2748                                         nl = 0;
2749                                 }
2750                                 nl++;
2751                         }
2752                         fidx = idx;
2753                         pa = VM_PAGE_TO_PHYS(m);
2754                         rcount = 1;
2755                 }
2756                 if (rcount) {
2757                         db_printf(" index(%ld)run(%d)pa(0x%lx)\n",
2758                                 (long)fidx, rcount, (long)pa);
2759                         if ( nl > 18) {
2760                                 c = cngetc();
2761                                 if (c != ' ')
2762                                         return;
2763                                 nl = 0;
2764                         }
2765                         nl++;
2766                 }
2767         }
2768 }
2769 #endif /* DDB */