kernel -- vm_object locking: DEBUG_LOCKS check for hold_wait vs hold deadlock
[dragonfly.git] / sys / vm / vm_object.c
1 /*
2  * (MPSAFE)
3  *
4  * Copyright (c) 1991, 1993
5  *      The Regents of the University of California.  All rights reserved.
6  *
7  * This code is derived from software contributed to Berkeley by
8  * The Mach Operating System project at Carnegie-Mellon University.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. All advertising materials mentioning features or use of this software
19  *    must display the following acknowledgement:
20  *      This product includes software developed by the University of
21  *      California, Berkeley and its contributors.
22  * 4. Neither the name of the University nor the names of its contributors
23  *    may be used to endorse or promote products derived from this software
24  *    without specific prior written permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36  * SUCH DAMAGE.
37  *
38  *      from: @(#)vm_object.c   8.5 (Berkeley) 3/22/94
39  *
40  *
41  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
42  * All rights reserved.
43  *
44  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
45  *
46  * Permission to use, copy, modify and distribute this software and
47  * its documentation is hereby granted, provided that both the copyright
48  * notice and this permission notice appear in all copies of the
49  * software, derivative works or modified versions, and any portions
50  * thereof, and that both notices appear in supporting documentation.
51  *
52  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
53  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
54  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
55  *
56  * Carnegie Mellon requests users of this software to return to
57  *
58  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
59  *  School of Computer Science
60  *  Carnegie Mellon University
61  *  Pittsburgh PA 15213-3890
62  *
63  * any improvements or extensions that they make and grant Carnegie the
64  * rights to redistribute these changes.
65  *
66  * $FreeBSD: src/sys/vm/vm_object.c,v 1.171.2.8 2003/05/26 19:17:56 alc Exp $
67  */
68
69 /*
70  *      Virtual memory object module.
71  */
72
73 #include <sys/param.h>
74 #include <sys/systm.h>
75 #include <sys/proc.h>           /* for curproc, pageproc */
76 #include <sys/thread.h>
77 #include <sys/vnode.h>
78 #include <sys/vmmeter.h>
79 #include <sys/mman.h>
80 #include <sys/mount.h>
81 #include <sys/kernel.h>
82 #include <sys/sysctl.h>
83 #include <sys/refcount.h>
84
85 #include <vm/vm.h>
86 #include <vm/vm_param.h>
87 #include <vm/pmap.h>
88 #include <vm/vm_map.h>
89 #include <vm/vm_object.h>
90 #include <vm/vm_page.h>
91 #include <vm/vm_pageout.h>
92 #include <vm/vm_pager.h>
93 #include <vm/swap_pager.h>
94 #include <vm/vm_kern.h>
95 #include <vm/vm_extern.h>
96 #include <vm/vm_zone.h>
97
98 #define EASY_SCAN_FACTOR        8
99
100 static void     vm_object_qcollapse(vm_object_t object);
101 static int      vm_object_page_collect_flush(vm_object_t object, vm_page_t p,
102                                              int pagerflags);
103 static void     vm_object_lock_init(vm_object_t);
104 static void     vm_object_hold_wake(vm_object_t);
105 static void     vm_object_hold_wait(vm_object_t);
106
107
108 /*
109  *      Virtual memory objects maintain the actual data
110  *      associated with allocated virtual memory.  A given
111  *      page of memory exists within exactly one object.
112  *
113  *      An object is only deallocated when all "references"
114  *      are given up.  Only one "reference" to a given
115  *      region of an object should be writeable.
116  *
117  *      Associated with each object is a list of all resident
118  *      memory pages belonging to that object; this list is
119  *      maintained by the "vm_page" module, and locked by the object's
120  *      lock.
121  *
122  *      Each object also records a "pager" routine which is
123  *      used to retrieve (and store) pages to the proper backing
124  *      storage.  In addition, objects may be backed by other
125  *      objects from which they were virtual-copied.
126  *
127  *      The only items within the object structure which are
128  *      modified after time of creation are:
129  *              reference count         locked by object's lock
130  *              pager routine           locked by object's lock
131  *
132  */
133
134 struct object_q vm_object_list;         /* locked by vmobj_token */
135 struct vm_object kernel_object;
136
137 static long vm_object_count;            /* locked by vmobj_token */
138 extern int vm_pageout_page_count;
139
140 static long object_collapses;
141 static long object_bypasses;
142 static int next_index;
143 static vm_zone_t obj_zone;
144 static struct vm_zone obj_zone_store;
145 #define VM_OBJECTS_INIT 256
146 static struct vm_object vm_objects_init[VM_OBJECTS_INIT];
147
148 /*
149  * Initialize a freshly allocated object
150  *
151  * Used only by vm_object_allocate() and zinitna().
152  *
153  * No requirements.
154  */
155 void
156 _vm_object_allocate(objtype_t type, vm_pindex_t size, vm_object_t object)
157 {
158         int incr;
159
160         RB_INIT(&object->rb_memq);
161         LIST_INIT(&object->shadow_head);
162
163         object->type = type;
164         object->size = size;
165         object->ref_count = 1;
166         object->hold_count = 0;
167         object->flags = 0;
168         if ((object->type == OBJT_DEFAULT) || (object->type == OBJT_SWAP))
169                 vm_object_set_flag(object, OBJ_ONEMAPPING);
170         object->paging_in_progress = 0;
171         object->resident_page_count = 0;
172         object->agg_pv_list_count = 0;
173         object->shadow_count = 0;
174         object->pg_color = next_index;
175         if ( size > (PQ_L2_SIZE / 3 + PQ_PRIME1))
176                 incr = PQ_L2_SIZE / 3 + PQ_PRIME1;
177         else
178                 incr = size;
179         next_index = (next_index + incr) & PQ_L2_MASK;
180         object->handle = NULL;
181         object->backing_object = NULL;
182         object->backing_object_offset = (vm_ooffset_t) 0;
183
184         object->generation++;
185         object->swblock_count = 0;
186         RB_INIT(&object->swblock_root);
187         vm_object_lock_init(object);
188
189         lwkt_gettoken(&vmobj_token);
190         TAILQ_INSERT_TAIL(&vm_object_list, object, object_list);
191         vm_object_count++;
192         lwkt_reltoken(&vmobj_token);
193 }
194
195 /*
196  * Initialize the VM objects module.
197  *
198  * Called from the low level boot code only.
199  */
200 void
201 vm_object_init(void)
202 {
203         TAILQ_INIT(&vm_object_list);
204         
205         _vm_object_allocate(OBJT_DEFAULT, OFF_TO_IDX(KvaEnd),
206                             &kernel_object);
207
208         obj_zone = &obj_zone_store;
209         zbootinit(obj_zone, "VM OBJECT", sizeof (struct vm_object),
210                 vm_objects_init, VM_OBJECTS_INIT);
211 }
212
213 void
214 vm_object_init2(void)
215 {
216         zinitna(obj_zone, NULL, NULL, 0, 0, ZONE_PANICFAIL, 1);
217 }
218
219 /*
220  * Allocate and return a new object of the specified type and size.
221  *
222  * No requirements.
223  */
224 vm_object_t
225 vm_object_allocate(objtype_t type, vm_pindex_t size)
226 {
227         vm_object_t result;
228
229         result = (vm_object_t) zalloc(obj_zone);
230
231         _vm_object_allocate(type, size, result);
232
233         return (result);
234 }
235
236 /*
237  * Add an additional reference to a vm_object.
238  *
239  * Object passed by caller must be stable or caller must already
240  * hold vmobj_token to avoid races.
241  */
242 void
243 vm_object_reference(vm_object_t object)
244 {
245         lwkt_gettoken(&vmobj_token);
246         vm_object_reference_locked(object);
247         lwkt_reltoken(&vmobj_token);
248 }
249
250 void
251 vm_object_reference_locked(vm_object_t object)
252 {
253         if (object) {
254                 ASSERT_LWKT_TOKEN_HELD(&vmobj_token);
255                 object->ref_count++;
256                 if (object->type == OBJT_VNODE) {
257                         vref(object->handle);
258                         /* XXX what if the vnode is being destroyed? */
259                 }
260         }
261 }
262
263 /*
264  * Dereference an object and its underlying vnode.
265  *
266  * The caller must hold vmobj_token.
267  */
268 static void
269 vm_object_vndeallocate(vm_object_t object)
270 {
271         struct vnode *vp = (struct vnode *) object->handle;
272
273         KASSERT(object->type == OBJT_VNODE,
274             ("vm_object_vndeallocate: not a vnode object"));
275         KASSERT(vp != NULL, ("vm_object_vndeallocate: missing vp"));
276         ASSERT_LWKT_TOKEN_HELD(&vmobj_token);
277 #ifdef INVARIANTS
278         if (object->ref_count == 0) {
279                 vprint("vm_object_vndeallocate", vp);
280                 panic("vm_object_vndeallocate: bad object reference count");
281         }
282 #endif
283
284         object->ref_count--;
285         if (object->ref_count == 0)
286                 vclrflags(vp, VTEXT);
287         vrele(vp);
288 }
289
290 /*
291  * Release a reference to the specified object, gained either through a
292  * vm_object_allocate or a vm_object_reference call.  When all references
293  * are gone, storage associated with this object may be relinquished.
294  */
295 void
296 vm_object_deallocate(vm_object_t object)
297 {
298         lwkt_gettoken(&vmobj_token);
299         vm_object_deallocate_locked(object);
300         lwkt_reltoken(&vmobj_token);
301 }
302
303 void
304 vm_object_deallocate_locked(vm_object_t object)
305 {
306         vm_object_t temp;
307
308         ASSERT_LWKT_TOKEN_HELD(&vmobj_token);
309
310         while (object != NULL) {
311                 if (object->type == OBJT_VNODE) {
312                         vm_object_vndeallocate(object);
313                         break;
314                 }
315
316                 if (object->ref_count == 0) {
317                         panic("vm_object_deallocate: object deallocated "
318                               "too many times: %d", object->type);
319                 }
320                 if (object->ref_count > 2) {
321                         object->ref_count--;
322                         break;
323                 }
324
325                 /*
326                  * We currently need the vm_token from this point on, and
327                  * we must recheck ref_count after acquiring it.
328                  */
329                 lwkt_gettoken(&vm_token);
330
331                 if (object->ref_count > 2) {
332                         object->ref_count--;
333                         lwkt_reltoken(&vm_token);
334                         break;
335                 }
336
337                 /*
338                  * Here on ref_count of one or two, which are special cases for
339                  * objects.
340                  */
341                 if ((object->ref_count == 2) && (object->shadow_count == 0)) {
342                         vm_object_set_flag(object, OBJ_ONEMAPPING);
343                         object->ref_count--;
344                         lwkt_reltoken(&vm_token);
345                         break;
346                 }
347                 if ((object->ref_count == 2) && (object->shadow_count == 1)) {
348                         object->ref_count--;
349                         if ((object->handle == NULL) &&
350                             (object->type == OBJT_DEFAULT ||
351                              object->type == OBJT_SWAP)) {
352                                 vm_object_t robject;
353
354                                 robject = LIST_FIRST(&object->shadow_head);
355                                 KASSERT(robject != NULL,
356                                         ("vm_object_deallocate: ref_count: "
357                                         "%d, shadow_count: %d",
358                                         object->ref_count,
359                                         object->shadow_count));
360
361                                 if ((robject->handle == NULL) &&
362                                     (robject->type == OBJT_DEFAULT ||
363                                      robject->type == OBJT_SWAP)) {
364
365                                         robject->ref_count++;
366
367                                         while (
368                                                 robject->paging_in_progress ||
369                                                 object->paging_in_progress
370                                         ) {
371                                                 vm_object_pip_sleep(robject, "objde1");
372                                                 vm_object_pip_sleep(object, "objde2");
373                                         }
374
375                                         if (robject->ref_count == 1) {
376                                                 robject->ref_count--;
377                                                 object = robject;
378                                                 goto doterm;
379                                         }
380
381                                         object = robject;
382                                         vm_object_collapse(object);
383                                         lwkt_reltoken(&vm_token);
384                                         continue;
385                                 }
386                         }
387                         lwkt_reltoken(&vm_token);
388                         break;
389                 }
390
391                 /*
392                  * Normal dereferencing path
393                  */
394                 object->ref_count--;
395                 if (object->ref_count != 0) {
396                         lwkt_reltoken(&vm_token);
397                         break;
398                 }
399
400                 /*
401                  * Termination path
402                  */
403 doterm:
404                 temp = object->backing_object;
405                 if (temp) {
406                         LIST_REMOVE(object, shadow_list);
407                         temp->shadow_count--;
408                         temp->generation++;
409                         object->backing_object = NULL;
410                 }
411                 lwkt_reltoken(&vm_token);
412
413                 /*
414                  * Don't double-terminate, we could be in a termination
415                  * recursion due to the terminate having to sync data
416                  * to disk.
417                  */
418                 if ((object->flags & OBJ_DEAD) == 0)
419                         vm_object_terminate(object);
420                 object = temp;
421         }
422 }
423
424 /*
425  * Destroy the specified object, freeing up related resources.
426  *
427  * The object must have zero references.
428  *
429  * The caller must be holding vmobj_token and properly interlock with
430  * OBJ_DEAD.
431  */
432 static int vm_object_terminate_callback(vm_page_t p, void *data);
433
434 void
435 vm_object_terminate(vm_object_t object)
436 {
437         /*
438          * Make sure no one uses us.  Once we set OBJ_DEAD we should be
439          * able to safely block.
440          */
441         KKASSERT((object->flags & OBJ_DEAD) == 0);
442         ASSERT_LWKT_TOKEN_HELD(&vmobj_token);
443         vm_object_set_flag(object, OBJ_DEAD);
444
445         /*
446          * Wait for the pageout daemon to be done with the object
447          */
448         vm_object_pip_wait(object, "objtrm1");
449
450         KASSERT(!object->paging_in_progress,
451                 ("vm_object_terminate: pageout in progress"));
452
453         /*
454          * Clean and free the pages, as appropriate. All references to the
455          * object are gone, so we don't need to lock it.
456          */
457         if (object->type == OBJT_VNODE) {
458                 struct vnode *vp;
459
460                 /*
461                  * Clean pages and flush buffers.
462                  */
463                 vm_object_page_clean(object, 0, 0, OBJPC_SYNC);
464
465                 vp = (struct vnode *) object->handle;
466                 vinvalbuf(vp, V_SAVE, 0, 0);
467         }
468
469         /*
470          * Wait for any I/O to complete, after which there had better not
471          * be any references left on the object.
472          */
473         vm_object_pip_wait(object, "objtrm2");
474
475         if (object->ref_count != 0) {
476                 panic("vm_object_terminate: object with references, "
477                       "ref_count=%d", object->ref_count);
478         }
479
480         /*
481          * Now free any remaining pages. For internal objects, this also
482          * removes them from paging queues. Don't free wired pages, just
483          * remove them from the object. 
484          */
485         lwkt_gettoken(&vm_token);
486         vm_page_rb_tree_RB_SCAN(&object->rb_memq, NULL,
487                                 vm_object_terminate_callback, NULL);
488         lwkt_reltoken(&vm_token);
489
490         /*
491          * Let the pager know object is dead.
492          */
493         vm_pager_deallocate(object);
494
495         /*
496          * Wait for the object hold count to hit zero
497          */
498         vm_object_hold_wait(object);
499
500         /*
501          * Remove the object from the global object list.
502          *
503          * (we are holding vmobj_token)
504          */
505         TAILQ_REMOVE(&vm_object_list, object, object_list);
506         vm_object_count--;
507         vm_object_dead_wakeup(object);
508
509         if (object->ref_count != 0) {
510                 panic("vm_object_terminate2: object with references, "
511                       "ref_count=%d", object->ref_count);
512         }
513
514         /*
515          * Free the space for the object.
516          */
517         zfree(obj_zone, object);
518 }
519
520 /*
521  * The caller must hold vm_token.
522  */
523 static int
524 vm_object_terminate_callback(vm_page_t p, void *data __unused)
525 {
526         if (p->busy || (p->flags & PG_BUSY))
527                 panic("vm_object_terminate: freeing busy page %p", p);
528         if (p->wire_count == 0) {
529                 vm_page_busy(p);
530                 vm_page_free(p);
531                 mycpu->gd_cnt.v_pfree++;
532         } else {
533                 if (p->queue != PQ_NONE)
534                         kprintf("vm_object_terminate: Warning: Encountered wired page %p on queue %d\n", p, p->queue);
535                 vm_page_busy(p);
536                 vm_page_remove(p);
537                 vm_page_wakeup(p);
538         }
539         return(0);
540 }
541
542 /*
543  * The object is dead but still has an object<->pager association.  Sleep
544  * and return.  The caller typically retests the association in a loop.
545  *
546  * Must be called with the vmobj_token held.
547  */
548 void
549 vm_object_dead_sleep(vm_object_t object, const char *wmesg)
550 {
551         ASSERT_LWKT_TOKEN_HELD(&vmobj_token);
552         if (object->handle) {
553                 vm_object_set_flag(object, OBJ_DEADWNT);
554                 tsleep(object, 0, wmesg, 0);
555                 /* object may be invalid after this point */
556         }
557 }
558
559 /*
560  * Wakeup anyone waiting for the object<->pager disassociation on
561  * a dead object.
562  *
563  * Must be called with the vmobj_token held.
564  */
565 void
566 vm_object_dead_wakeup(vm_object_t object)
567 {
568         ASSERT_LWKT_TOKEN_HELD(&vmobj_token);
569         if (object->flags & OBJ_DEADWNT) {
570                 vm_object_clear_flag(object, OBJ_DEADWNT);
571                 wakeup(object);
572         }
573 }
574
575 /*
576  * Clean all dirty pages in the specified range of object.  Leaves page
577  * on whatever queue it is currently on.   If NOSYNC is set then do not
578  * write out pages with PG_NOSYNC set (originally comes from MAP_NOSYNC),
579  * leaving the object dirty.
580  *
581  * When stuffing pages asynchronously, allow clustering.  XXX we need a
582  * synchronous clustering mode implementation.
583  *
584  * Odd semantics: if start == end, we clean everything.
585  *
586  * The object must be locked? XXX
587  */
588 static int vm_object_page_clean_pass1(struct vm_page *p, void *data);
589 static int vm_object_page_clean_pass2(struct vm_page *p, void *data);
590
591 void
592 vm_object_page_clean(vm_object_t object, vm_pindex_t start, vm_pindex_t end,
593                      int flags)
594 {
595         struct rb_vm_page_scan_info info;
596         struct vnode *vp;
597         int wholescan;
598         int pagerflags;
599         int curgeneration;
600
601         lwkt_gettoken(&vm_token);
602         if (object->type != OBJT_VNODE ||
603             (object->flags & OBJ_MIGHTBEDIRTY) == 0) {
604                 lwkt_reltoken(&vm_token);
605                 return;
606         }
607
608         pagerflags = (flags & (OBJPC_SYNC | OBJPC_INVAL)) ? 
609                         VM_PAGER_PUT_SYNC : VM_PAGER_CLUSTER_OK;
610         pagerflags |= (flags & OBJPC_INVAL) ? VM_PAGER_PUT_INVAL : 0;
611
612         vp = object->handle;
613
614         /*
615          * Interlock other major object operations.  This allows us to 
616          * temporarily clear OBJ_WRITEABLE and OBJ_MIGHTBEDIRTY.
617          */
618         crit_enter();
619         vm_object_set_flag(object, OBJ_CLEANING);
620
621         /*
622          * Handle 'entire object' case
623          */
624         info.start_pindex = start;
625         if (end == 0) {
626                 info.end_pindex = object->size - 1;
627         } else {
628                 info.end_pindex = end - 1;
629         }
630         wholescan = (start == 0 && info.end_pindex == object->size - 1);
631         info.limit = flags;
632         info.pagerflags = pagerflags;
633         info.object = object;
634
635         /*
636          * If cleaning the entire object do a pass to mark the pages read-only.
637          * If everything worked out ok, clear OBJ_WRITEABLE and
638          * OBJ_MIGHTBEDIRTY.
639          */
640         if (wholescan) {
641                 info.error = 0;
642                 vm_page_rb_tree_RB_SCAN(&object->rb_memq, rb_vm_page_scancmp,
643                                         vm_object_page_clean_pass1, &info);
644                 if (info.error == 0) {
645                         vm_object_clear_flag(object,
646                                              OBJ_WRITEABLE|OBJ_MIGHTBEDIRTY);
647                         if (object->type == OBJT_VNODE &&
648                             (vp = (struct vnode *)object->handle) != NULL) {
649                                 if (vp->v_flag & VOBJDIRTY) 
650                                         vclrflags(vp, VOBJDIRTY);
651                         }
652                 }
653         }
654
655         /*
656          * Do a pass to clean all the dirty pages we find.
657          */
658         do {
659                 info.error = 0;
660                 curgeneration = object->generation;
661                 vm_page_rb_tree_RB_SCAN(&object->rb_memq, rb_vm_page_scancmp,
662                                         vm_object_page_clean_pass2, &info);
663         } while (info.error || curgeneration != object->generation);
664
665         vm_object_clear_flag(object, OBJ_CLEANING);
666         crit_exit();
667         lwkt_reltoken(&vm_token);
668 }
669
670 /*
671  * The caller must hold vm_token.
672  */
673 static 
674 int
675 vm_object_page_clean_pass1(struct vm_page *p, void *data)
676 {
677         struct rb_vm_page_scan_info *info = data;
678
679         vm_page_flag_set(p, PG_CLEANCHK);
680         if ((info->limit & OBJPC_NOSYNC) && (p->flags & PG_NOSYNC))
681                 info->error = 1;
682         else
683                 vm_page_protect(p, VM_PROT_READ);       /* must not block */
684         return(0);
685 }
686
687 /*
688  * The caller must hold vm_token.
689  */
690 static 
691 int
692 vm_object_page_clean_pass2(struct vm_page *p, void *data)
693 {
694         struct rb_vm_page_scan_info *info = data;
695         int n;
696
697         /*
698          * Do not mess with pages that were inserted after we started
699          * the cleaning pass.
700          */
701         if ((p->flags & PG_CLEANCHK) == 0)
702                 return(0);
703
704         /*
705          * Before wasting time traversing the pmaps, check for trivial
706          * cases where the page cannot be dirty.
707          */
708         if (p->valid == 0 || (p->queue - p->pc) == PQ_CACHE) {
709                 KKASSERT((p->dirty & p->valid) == 0);
710                 return(0);
711         }
712
713         /*
714          * Check whether the page is dirty or not.  The page has been set
715          * to be read-only so the check will not race a user dirtying the
716          * page.
717          */
718         vm_page_test_dirty(p);
719         if ((p->dirty & p->valid) == 0) {
720                 vm_page_flag_clear(p, PG_CLEANCHK);
721                 return(0);
722         }
723
724         /*
725          * If we have been asked to skip nosync pages and this is a
726          * nosync page, skip it.  Note that the object flags were
727          * not cleared in this case (because pass1 will have returned an
728          * error), so we do not have to set them.
729          */
730         if ((info->limit & OBJPC_NOSYNC) && (p->flags & PG_NOSYNC)) {
731                 vm_page_flag_clear(p, PG_CLEANCHK);
732                 return(0);
733         }
734
735         /*
736          * Flush as many pages as we can.  PG_CLEANCHK will be cleared on
737          * the pages that get successfully flushed.  Set info->error if
738          * we raced an object modification.
739          */
740         n = vm_object_page_collect_flush(info->object, p, info->pagerflags);
741         if (n == 0)
742                 info->error = 1;
743         return(0);
744 }
745
746 /*
747  * Collect the specified page and nearby pages and flush them out.
748  * The number of pages flushed is returned.
749  *
750  * The caller must hold vm_token.
751  */
752 static int
753 vm_object_page_collect_flush(vm_object_t object, vm_page_t p, int pagerflags)
754 {
755         int runlen;
756         int maxf;
757         int chkb;
758         int maxb;
759         int i;
760         int curgeneration;
761         vm_pindex_t pi;
762         vm_page_t maf[vm_pageout_page_count];
763         vm_page_t mab[vm_pageout_page_count];
764         vm_page_t ma[vm_pageout_page_count];
765
766         curgeneration = object->generation;
767
768         pi = p->pindex;
769         while (vm_page_sleep_busy(p, TRUE, "vpcwai")) {
770                 if (object->generation != curgeneration) {
771                         return(0);
772                 }
773         }
774         KKASSERT(p->object == object && p->pindex == pi);
775
776         maxf = 0;
777         for(i = 1; i < vm_pageout_page_count; i++) {
778                 vm_page_t tp;
779
780                 if ((tp = vm_page_lookup(object, pi + i)) != NULL) {
781                         if ((tp->flags & PG_BUSY) ||
782                                 ((pagerflags & VM_PAGER_IGNORE_CLEANCHK) == 0 && 
783                                  (tp->flags & PG_CLEANCHK) == 0) ||
784                                 (tp->busy != 0))
785                                 break;
786                         if((tp->queue - tp->pc) == PQ_CACHE) {
787                                 vm_page_flag_clear(tp, PG_CLEANCHK);
788                                 break;
789                         }
790                         vm_page_test_dirty(tp);
791                         if ((tp->dirty & tp->valid) == 0) {
792                                 vm_page_flag_clear(tp, PG_CLEANCHK);
793                                 break;
794                         }
795                         maf[ i - 1 ] = tp;
796                         maxf++;
797                         continue;
798                 }
799                 break;
800         }
801
802         maxb = 0;
803         chkb = vm_pageout_page_count -  maxf;
804         if (chkb) {
805                 for(i = 1; i < chkb;i++) {
806                         vm_page_t tp;
807
808                         if ((tp = vm_page_lookup(object, pi - i)) != NULL) {
809                                 if ((tp->flags & PG_BUSY) ||
810                                         ((pagerflags & VM_PAGER_IGNORE_CLEANCHK) == 0 && 
811                                          (tp->flags & PG_CLEANCHK) == 0) ||
812                                         (tp->busy != 0))
813                                         break;
814                                 if((tp->queue - tp->pc) == PQ_CACHE) {
815                                         vm_page_flag_clear(tp, PG_CLEANCHK);
816                                         break;
817                                 }
818                                 vm_page_test_dirty(tp);
819                                 if ((tp->dirty & tp->valid) == 0) {
820                                         vm_page_flag_clear(tp, PG_CLEANCHK);
821                                         break;
822                                 }
823                                 mab[ i - 1 ] = tp;
824                                 maxb++;
825                                 continue;
826                         }
827                         break;
828                 }
829         }
830
831         for(i = 0; i < maxb; i++) {
832                 int index = (maxb - i) - 1;
833                 ma[index] = mab[i];
834                 vm_page_flag_clear(ma[index], PG_CLEANCHK);
835         }
836         vm_page_flag_clear(p, PG_CLEANCHK);
837         ma[maxb] = p;
838         for(i = 0; i < maxf; i++) {
839                 int index = (maxb + i) + 1;
840                 ma[index] = maf[i];
841                 vm_page_flag_clear(ma[index], PG_CLEANCHK);
842         }
843         runlen = maxb + maxf + 1;
844
845         vm_pageout_flush(ma, runlen, pagerflags);
846         for (i = 0; i < runlen; i++) {
847                 if (ma[i]->valid & ma[i]->dirty) {
848                         vm_page_protect(ma[i], VM_PROT_READ);
849                         vm_page_flag_set(ma[i], PG_CLEANCHK);
850
851                         /*
852                          * maxf will end up being the actual number of pages
853                          * we wrote out contiguously, non-inclusive of the
854                          * first page.  We do not count look-behind pages.
855                          */
856                         if (i >= maxb + 1 && (maxf > i - maxb - 1))
857                                 maxf = i - maxb - 1;
858                 }
859         }
860         return(maxf + 1);
861 }
862
863 /*
864  * Same as vm_object_pmap_copy, except range checking really
865  * works, and is meant for small sections of an object.
866  *
867  * This code protects resident pages by making them read-only
868  * and is typically called on a fork or split when a page
869  * is converted to copy-on-write.  
870  *
871  * NOTE: If the page is already at VM_PROT_NONE, calling
872  * vm_page_protect will have no effect.
873  */
874 void
875 vm_object_pmap_copy_1(vm_object_t object, vm_pindex_t start, vm_pindex_t end)
876 {
877         vm_pindex_t idx;
878         vm_page_t p;
879
880         if (object == NULL || (object->flags & OBJ_WRITEABLE) == 0)
881                 return;
882
883         /*
884          * spl protection needed to prevent races between the lookup,
885          * an interrupt unbusy/free, and our protect call.
886          */
887         crit_enter();
888         lwkt_gettoken(&vm_token);
889         for (idx = start; idx < end; idx++) {
890                 p = vm_page_lookup(object, idx);
891                 if (p == NULL)
892                         continue;
893                 vm_page_protect(p, VM_PROT_READ);
894         }
895         lwkt_reltoken(&vm_token);
896         crit_exit();
897 }
898
899 /*
900  * Removes all physical pages in the specified object range from all
901  * physical maps.
902  *
903  * The object must *not* be locked.
904  */
905
906 static int vm_object_pmap_remove_callback(vm_page_t p, void *data);
907
908 void
909 vm_object_pmap_remove(vm_object_t object, vm_pindex_t start, vm_pindex_t end)
910 {
911         struct rb_vm_page_scan_info info;
912
913         if (object == NULL)
914                 return;
915         info.start_pindex = start;
916         info.end_pindex = end - 1;
917
918         crit_enter();
919         lwkt_gettoken(&vm_token);
920         vm_page_rb_tree_RB_SCAN(&object->rb_memq, rb_vm_page_scancmp,
921                                 vm_object_pmap_remove_callback, &info);
922         if (start == 0 && end == object->size)
923                 vm_object_clear_flag(object, OBJ_WRITEABLE);
924         lwkt_reltoken(&vm_token);
925         crit_exit();
926 }
927
928 /*
929  * The caller must hold vm_token.
930  */
931 static int
932 vm_object_pmap_remove_callback(vm_page_t p, void *data __unused)
933 {
934         vm_page_protect(p, VM_PROT_NONE);
935         return(0);
936 }
937
938 /*
939  * Implements the madvise function at the object/page level.
940  *
941  * MADV_WILLNEED        (any object)
942  *
943  *      Activate the specified pages if they are resident.
944  *
945  * MADV_DONTNEED        (any object)
946  *
947  *      Deactivate the specified pages if they are resident.
948  *
949  * MADV_FREE    (OBJT_DEFAULT/OBJT_SWAP objects, OBJ_ONEMAPPING only)
950  *
951  *      Deactivate and clean the specified pages if they are
952  *      resident.  This permits the process to reuse the pages
953  *      without faulting or the kernel to reclaim the pages
954  *      without I/O.
955  *
956  * No requirements.
957  */
958 void
959 vm_object_madvise(vm_object_t object, vm_pindex_t pindex, int count, int advise)
960 {
961         vm_pindex_t end, tpindex;
962         vm_object_t tobject;
963         vm_page_t m;
964
965         if (object == NULL)
966                 return;
967
968         end = pindex + count;
969
970         lwkt_gettoken(&vm_token);
971
972         /*
973          * Locate and adjust resident pages
974          */
975         for (; pindex < end; pindex += 1) {
976 relookup:
977                 tobject = object;
978                 tpindex = pindex;
979 shadowlookup:
980                 /*
981                  * MADV_FREE only operates on OBJT_DEFAULT or OBJT_SWAP pages
982                  * and those pages must be OBJ_ONEMAPPING.
983                  */
984                 if (advise == MADV_FREE) {
985                         if ((tobject->type != OBJT_DEFAULT &&
986                              tobject->type != OBJT_SWAP) ||
987                             (tobject->flags & OBJ_ONEMAPPING) == 0) {
988                                 continue;
989                         }
990                 }
991
992                 /*
993                  * spl protection is required to avoid a race between the
994                  * lookup, an interrupt unbusy/free, and our busy check.
995                  */
996
997                 crit_enter();
998                 m = vm_page_lookup(tobject, tpindex);
999
1000                 if (m == NULL) {
1001                         /*
1002                          * There may be swap even if there is no backing page
1003                          */
1004                         if (advise == MADV_FREE && tobject->type == OBJT_SWAP)
1005                                 swap_pager_freespace(tobject, tpindex, 1);
1006
1007                         /*
1008                          * next object
1009                          */
1010                         crit_exit();
1011                         if (tobject->backing_object == NULL)
1012                                 continue;
1013                         tpindex += OFF_TO_IDX(tobject->backing_object_offset);
1014                         tobject = tobject->backing_object;
1015                         goto shadowlookup;
1016                 }
1017
1018                 /*
1019                  * If the page is busy or not in a normal active state,
1020                  * we skip it.  If the page is not managed there are no
1021                  * page queues to mess with.  Things can break if we mess
1022                  * with pages in any of the below states.
1023                  */
1024                 if (
1025                     m->hold_count ||
1026                     m->wire_count ||
1027                     (m->flags & PG_UNMANAGED) ||
1028                     m->valid != VM_PAGE_BITS_ALL
1029                 ) {
1030                         crit_exit();
1031                         continue;
1032                 }
1033
1034                 if (vm_page_sleep_busy(m, TRUE, "madvpo")) {
1035                         crit_exit();
1036                         goto relookup;
1037                 }
1038                 vm_page_busy(m);
1039                 crit_exit();
1040
1041                 /*
1042                  * Theoretically once a page is known not to be busy, an
1043                  * interrupt cannot come along and rip it out from under us.
1044                  */
1045
1046                 if (advise == MADV_WILLNEED) {
1047                         vm_page_activate(m);
1048                 } else if (advise == MADV_DONTNEED) {
1049                         vm_page_dontneed(m);
1050                 } else if (advise == MADV_FREE) {
1051                         /*
1052                          * Mark the page clean.  This will allow the page
1053                          * to be freed up by the system.  However, such pages
1054                          * are often reused quickly by malloc()/free()
1055                          * so we do not do anything that would cause
1056                          * a page fault if we can help it.
1057                          *
1058                          * Specifically, we do not try to actually free
1059                          * the page now nor do we try to put it in the
1060                          * cache (which would cause a page fault on reuse).
1061                          *
1062                          * But we do make the page is freeable as we
1063                          * can without actually taking the step of unmapping
1064                          * it.
1065                          */
1066                         pmap_clear_modify(m);
1067                         m->dirty = 0;
1068                         m->act_count = 0;
1069                         vm_page_dontneed(m);
1070                         if (tobject->type == OBJT_SWAP)
1071                                 swap_pager_freespace(tobject, tpindex, 1);
1072                 }
1073                 vm_page_wakeup(m);
1074         }       
1075         lwkt_reltoken(&vm_token);
1076 }
1077
1078 /*
1079  * Create a new object which is backed by the specified existing object
1080  * range.  The source object reference is deallocated.
1081  *
1082  * The new object and offset into that object are returned in the source
1083  * parameters.
1084  *
1085  * No other requirements.
1086  */
1087 void
1088 vm_object_shadow(vm_object_t *object, vm_ooffset_t *offset, vm_size_t length)
1089 {
1090         vm_object_t source;
1091         vm_object_t result;
1092
1093         source = *object;
1094
1095         /*
1096          * Don't create the new object if the old object isn't shared.
1097          */
1098         lwkt_gettoken(&vm_token);
1099
1100         if (source != NULL &&
1101             source->ref_count == 1 &&
1102             source->handle == NULL &&
1103             (source->type == OBJT_DEFAULT ||
1104              source->type == OBJT_SWAP)) {
1105                 lwkt_reltoken(&vm_token);
1106                 return;
1107         }
1108
1109         /*
1110          * Allocate a new object with the given length
1111          */
1112
1113         if ((result = vm_object_allocate(OBJT_DEFAULT, length)) == NULL)
1114                 panic("vm_object_shadow: no object for shadowing");
1115
1116         /*
1117          * The new object shadows the source object, adding a reference to it.
1118          * Our caller changes his reference to point to the new object,
1119          * removing a reference to the source object.  Net result: no change
1120          * of reference count.
1121          *
1122          * Try to optimize the result object's page color when shadowing
1123          * in order to maintain page coloring consistency in the combined 
1124          * shadowed object.
1125          */
1126         result->backing_object = source;
1127         if (source) {
1128                 LIST_INSERT_HEAD(&source->shadow_head, result, shadow_list);
1129                 source->shadow_count++;
1130                 source->generation++;
1131                 result->pg_color = (source->pg_color + OFF_TO_IDX(*offset)) & PQ_L2_MASK;
1132         }
1133
1134         /*
1135          * Store the offset into the source object, and fix up the offset into
1136          * the new object.
1137          */
1138         result->backing_object_offset = *offset;
1139         lwkt_reltoken(&vm_token);
1140
1141         /*
1142          * Return the new things
1143          */
1144         *offset = 0;
1145         *object = result;
1146 }
1147
1148 #define OBSC_TEST_ALL_SHADOWED  0x0001
1149 #define OBSC_COLLAPSE_NOWAIT    0x0002
1150 #define OBSC_COLLAPSE_WAIT      0x0004
1151
1152 static int vm_object_backing_scan_callback(vm_page_t p, void *data);
1153
1154 /*
1155  * The caller must hold vm_token.
1156  */
1157 static __inline int
1158 vm_object_backing_scan(vm_object_t object, int op)
1159 {
1160         struct rb_vm_page_scan_info info;
1161         vm_object_t backing_object;
1162
1163         crit_enter();
1164
1165         backing_object = object->backing_object;
1166         info.backing_offset_index = OFF_TO_IDX(object->backing_object_offset);
1167
1168         /*
1169          * Initial conditions
1170          */
1171
1172         if (op & OBSC_TEST_ALL_SHADOWED) {
1173                 /*
1174                  * We do not want to have to test for the existence of
1175                  * swap pages in the backing object.  XXX but with the
1176                  * new swapper this would be pretty easy to do.
1177                  *
1178                  * XXX what about anonymous MAP_SHARED memory that hasn't
1179                  * been ZFOD faulted yet?  If we do not test for this, the
1180                  * shadow test may succeed! XXX
1181                  */
1182                 if (backing_object->type != OBJT_DEFAULT) {
1183                         crit_exit();
1184                         return(0);
1185                 }
1186         }
1187         if (op & OBSC_COLLAPSE_WAIT) {
1188                 KKASSERT((backing_object->flags & OBJ_DEAD) == 0);
1189                 vm_object_set_flag(backing_object, OBJ_DEAD);
1190         }
1191
1192         /*
1193          * Our scan.   We have to retry if a negative error code is returned,
1194          * otherwise 0 or 1 will be returned in info.error.  0 Indicates that
1195          * the scan had to be stopped because the parent does not completely
1196          * shadow the child.
1197          */
1198         info.object = object;
1199         info.backing_object = backing_object;
1200         info.limit = op;
1201         do {
1202                 info.error = 1;
1203                 vm_page_rb_tree_RB_SCAN(&backing_object->rb_memq, NULL,
1204                                         vm_object_backing_scan_callback,
1205                                         &info);
1206         } while (info.error < 0);
1207         crit_exit();
1208         return(info.error);
1209 }
1210
1211 /*
1212  * The caller must hold vm_token.
1213  */
1214 static int
1215 vm_object_backing_scan_callback(vm_page_t p, void *data)
1216 {
1217         struct rb_vm_page_scan_info *info = data;
1218         vm_object_t backing_object;
1219         vm_object_t object;
1220         vm_pindex_t new_pindex;
1221         vm_pindex_t backing_offset_index;
1222         int op;
1223
1224         new_pindex = p->pindex - info->backing_offset_index;
1225         op = info->limit;
1226         object = info->object;
1227         backing_object = info->backing_object;
1228         backing_offset_index = info->backing_offset_index;
1229
1230         if (op & OBSC_TEST_ALL_SHADOWED) {
1231                 vm_page_t pp;
1232
1233                 /*
1234                  * Ignore pages outside the parent object's range
1235                  * and outside the parent object's mapping of the 
1236                  * backing object.
1237                  *
1238                  * note that we do not busy the backing object's
1239                  * page.
1240                  */
1241                 if (
1242                     p->pindex < backing_offset_index ||
1243                     new_pindex >= object->size
1244                 ) {
1245                         return(0);
1246                 }
1247
1248                 /*
1249                  * See if the parent has the page or if the parent's
1250                  * object pager has the page.  If the parent has the
1251                  * page but the page is not valid, the parent's
1252                  * object pager must have the page.
1253                  *
1254                  * If this fails, the parent does not completely shadow
1255                  * the object and we might as well give up now.
1256                  */
1257
1258                 pp = vm_page_lookup(object, new_pindex);
1259                 if ((pp == NULL || pp->valid == 0) &&
1260                     !vm_pager_has_page(object, new_pindex)
1261                 ) {
1262                         info->error = 0;        /* problemo */
1263                         return(-1);             /* stop the scan */
1264                 }
1265         }
1266
1267         /*
1268          * Check for busy page
1269          */
1270
1271         if (op & (OBSC_COLLAPSE_WAIT | OBSC_COLLAPSE_NOWAIT)) {
1272                 vm_page_t pp;
1273
1274                 if (op & OBSC_COLLAPSE_NOWAIT) {
1275                         if (
1276                             (p->flags & PG_BUSY) ||
1277                             !p->valid || 
1278                             p->hold_count || 
1279                             p->wire_count ||
1280                             p->busy
1281                         ) {
1282                                 return(0);
1283                         }
1284                 } else if (op & OBSC_COLLAPSE_WAIT) {
1285                         if (vm_page_sleep_busy(p, TRUE, "vmocol")) {
1286                                 /*
1287                                  * If we slept, anything could have
1288                                  * happened.   Ask that the scan be restarted.
1289                                  *
1290                                  * Since the object is marked dead, the
1291                                  * backing offset should not have changed.  
1292                                  */
1293                                 info->error = -1;
1294                                 return(-1);
1295                         }
1296                 }
1297
1298                 /* 
1299                  * Busy the page
1300                  */
1301                 vm_page_busy(p);
1302
1303                 KASSERT(
1304                     p->object == backing_object,
1305                     ("vm_object_qcollapse(): object mismatch")
1306                 );
1307
1308                 /*
1309                  * Destroy any associated swap
1310                  */
1311                 if (backing_object->type == OBJT_SWAP)
1312                         swap_pager_freespace(backing_object, p->pindex, 1);
1313
1314                 if (
1315                     p->pindex < backing_offset_index ||
1316                     new_pindex >= object->size
1317                 ) {
1318                         /*
1319                          * Page is out of the parent object's range, we 
1320                          * can simply destroy it. 
1321                          */
1322                         vm_page_protect(p, VM_PROT_NONE);
1323                         vm_page_free(p);
1324                         return(0);
1325                 }
1326
1327                 pp = vm_page_lookup(object, new_pindex);
1328                 if (pp != NULL || vm_pager_has_page(object, new_pindex)) {
1329                         /*
1330                          * page already exists in parent OR swap exists
1331                          * for this location in the parent.  Destroy 
1332                          * the original page from the backing object.
1333                          *
1334                          * Leave the parent's page alone
1335                          */
1336                         vm_page_protect(p, VM_PROT_NONE);
1337                         vm_page_free(p);
1338                         return(0);
1339                 }
1340
1341                 /*
1342                  * Page does not exist in parent, rename the
1343                  * page from the backing object to the main object. 
1344                  *
1345                  * If the page was mapped to a process, it can remain 
1346                  * mapped through the rename.
1347                  */
1348                 if ((p->queue - p->pc) == PQ_CACHE)
1349                         vm_page_deactivate(p);
1350
1351                 vm_page_rename(p, object, new_pindex);
1352                 /* page automatically made dirty by rename */
1353         }
1354         return(0);
1355 }
1356
1357 /*
1358  * This version of collapse allows the operation to occur earlier and
1359  * when paging_in_progress is true for an object...  This is not a complete
1360  * operation, but should plug 99.9% of the rest of the leaks.
1361  *
1362  * The caller must hold vm_token and vmobj_token.
1363  * (only called from vm_object_collapse)
1364  */
1365 static void
1366 vm_object_qcollapse(vm_object_t object)
1367 {
1368         vm_object_t backing_object = object->backing_object;
1369
1370         if (backing_object->ref_count != 1)
1371                 return;
1372
1373         backing_object->ref_count += 2;
1374
1375         vm_object_backing_scan(object, OBSC_COLLAPSE_NOWAIT);
1376
1377         backing_object->ref_count -= 2;
1378 }
1379
1380 /*
1381  * Collapse an object with the object backing it.  Pages in the backing
1382  * object are moved into the parent, and the backing object is deallocated.
1383  */
1384 void
1385 vm_object_collapse(vm_object_t object)
1386 {
1387         ASSERT_LWKT_TOKEN_HELD(&vm_token);
1388         ASSERT_LWKT_TOKEN_HELD(&vmobj_token);
1389
1390         while (TRUE) {
1391                 vm_object_t backing_object;
1392
1393                 /*
1394                  * Verify that the conditions are right for collapse:
1395                  *
1396                  * The object exists and the backing object exists.
1397                  */
1398                 if (object == NULL)
1399                         break;
1400
1401                 if ((backing_object = object->backing_object) == NULL)
1402                         break;
1403
1404                 /*
1405                  * we check the backing object first, because it is most likely
1406                  * not collapsable.
1407                  */
1408                 if (backing_object->handle != NULL ||
1409                     (backing_object->type != OBJT_DEFAULT &&
1410                      backing_object->type != OBJT_SWAP) ||
1411                     (backing_object->flags & OBJ_DEAD) ||
1412                     object->handle != NULL ||
1413                     (object->type != OBJT_DEFAULT &&
1414                      object->type != OBJT_SWAP) ||
1415                     (object->flags & OBJ_DEAD)) {
1416                         break;
1417                 }
1418
1419                 if (
1420                     object->paging_in_progress != 0 ||
1421                     backing_object->paging_in_progress != 0
1422                 ) {
1423                         vm_object_qcollapse(object);
1424                         break;
1425                 }
1426
1427                 /*
1428                  * We know that we can either collapse the backing object (if
1429                  * the parent is the only reference to it) or (perhaps) have
1430                  * the parent bypass the object if the parent happens to shadow
1431                  * all the resident pages in the entire backing object.
1432                  *
1433                  * This is ignoring pager-backed pages such as swap pages.
1434                  * vm_object_backing_scan fails the shadowing test in this
1435                  * case.
1436                  */
1437
1438                 if (backing_object->ref_count == 1) {
1439                         /*
1440                          * If there is exactly one reference to the backing
1441                          * object, we can collapse it into the parent.  
1442                          */
1443                         vm_object_backing_scan(object, OBSC_COLLAPSE_WAIT);
1444
1445                         /*
1446                          * Move the pager from backing_object to object.
1447                          */
1448
1449                         if (backing_object->type == OBJT_SWAP) {
1450                                 vm_object_pip_add(backing_object, 1);
1451
1452                                 /*
1453                                  * scrap the paging_offset junk and do a 
1454                                  * discrete copy.  This also removes major 
1455                                  * assumptions about how the swap-pager 
1456                                  * works from where it doesn't belong.  The
1457                                  * new swapper is able to optimize the
1458                                  * destroy-source case.
1459                                  */
1460
1461                                 vm_object_pip_add(object, 1);
1462                                 swap_pager_copy(
1463                                     backing_object,
1464                                     object,
1465                                     OFF_TO_IDX(object->backing_object_offset), TRUE);
1466                                 vm_object_pip_wakeup(object);
1467
1468                                 vm_object_pip_wakeup(backing_object);
1469                         }
1470                         /*
1471                          * Object now shadows whatever backing_object did.
1472                          * Note that the reference to 
1473                          * backing_object->backing_object moves from within 
1474                          * backing_object to within object.
1475                          */
1476
1477                         LIST_REMOVE(object, shadow_list);
1478                         object->backing_object->shadow_count--;
1479                         object->backing_object->generation++;
1480                         if (backing_object->backing_object) {
1481                                 LIST_REMOVE(backing_object, shadow_list);
1482                                 backing_object->backing_object->shadow_count--;
1483                                 backing_object->backing_object->generation++;
1484                         }
1485                         object->backing_object = backing_object->backing_object;
1486                         if (object->backing_object) {
1487                                 LIST_INSERT_HEAD(
1488                                     &object->backing_object->shadow_head,
1489                                     object, 
1490                                     shadow_list
1491                                 );
1492                                 object->backing_object->shadow_count++;
1493                                 object->backing_object->generation++;
1494                         }
1495
1496                         object->backing_object_offset +=
1497                             backing_object->backing_object_offset;
1498
1499                         /*
1500                          * Discard backing_object.
1501                          *
1502                          * Since the backing object has no pages, no pager left,
1503                          * and no object references within it, all that is
1504                          * necessary is to dispose of it.
1505                          */
1506
1507                         KASSERT(backing_object->ref_count == 1,
1508                                 ("backing_object %p was somehow "
1509                                  "re-referenced during collapse!",
1510                                  backing_object));
1511                         KASSERT(RB_EMPTY(&backing_object->rb_memq),
1512                                 ("backing_object %p somehow has left "
1513                                  "over pages during collapse!",
1514                                  backing_object));
1515
1516                         /*
1517                          * Wait for hold count to hit zero
1518                          */
1519                         vm_object_hold_wait(backing_object);
1520
1521                         /* (we are holding vmobj_token) */
1522                         TAILQ_REMOVE(&vm_object_list, backing_object,
1523                                      object_list);
1524                         vm_object_count--;
1525
1526                         zfree(obj_zone, backing_object);
1527
1528                         object_collapses++;
1529                 } else {
1530                         vm_object_t new_backing_object;
1531
1532                         /*
1533                          * If we do not entirely shadow the backing object,
1534                          * there is nothing we can do so we give up.
1535                          */
1536
1537                         if (vm_object_backing_scan(object, OBSC_TEST_ALL_SHADOWED) == 0) {
1538                                 break;
1539                         }
1540
1541                         /*
1542                          * Make the parent shadow the next object in the
1543                          * chain.  Deallocating backing_object will not remove
1544                          * it, since its reference count is at least 2.
1545                          */
1546
1547                         LIST_REMOVE(object, shadow_list);
1548                         backing_object->shadow_count--;
1549                         backing_object->generation++;
1550
1551                         new_backing_object = backing_object->backing_object;
1552                         if ((object->backing_object = new_backing_object) != NULL) {
1553                                 vm_object_reference(new_backing_object);
1554                                 LIST_INSERT_HEAD(
1555                                     &new_backing_object->shadow_head,
1556                                     object,
1557                                     shadow_list
1558                                 );
1559                                 new_backing_object->shadow_count++;
1560                                 new_backing_object->generation++;
1561                                 object->backing_object_offset +=
1562                                         backing_object->backing_object_offset;
1563                         }
1564
1565                         /*
1566                          * Drop the reference count on backing_object. Since
1567                          * its ref_count was at least 2, it will not vanish;
1568                          * so we don't need to call vm_object_deallocate, but
1569                          * we do anyway.
1570                          */
1571                         vm_object_deallocate_locked(backing_object);
1572                         object_bypasses++;
1573                 }
1574
1575                 /*
1576                  * Try again with this object's new backing object.
1577                  */
1578         }
1579 }
1580
1581 /*
1582  * Removes all physical pages in the specified object range from the
1583  * object's list of pages.
1584  *
1585  * No requirements.
1586  */
1587 static int vm_object_page_remove_callback(vm_page_t p, void *data);
1588
1589 void
1590 vm_object_page_remove(vm_object_t object, vm_pindex_t start, vm_pindex_t end,
1591                       boolean_t clean_only)
1592 {
1593         struct rb_vm_page_scan_info info;
1594         int all;
1595
1596         /*
1597          * Degenerate cases and assertions
1598          */
1599         lwkt_gettoken(&vm_token);
1600         if (object == NULL ||
1601             (object->resident_page_count == 0 && object->swblock_count == 0)) {
1602                 lwkt_reltoken(&vm_token);
1603                 return;
1604         }
1605         KASSERT(object->type != OBJT_PHYS, 
1606                 ("attempt to remove pages from a physical object"));
1607
1608         /*
1609          * Indicate that paging is occuring on the object
1610          */
1611         crit_enter();
1612         vm_object_pip_add(object, 1);
1613
1614         /*
1615          * Figure out the actual removal range and whether we are removing
1616          * the entire contents of the object or not.  If removing the entire
1617          * contents, be sure to get all pages, even those that might be 
1618          * beyond the end of the object.
1619          */
1620         info.start_pindex = start;
1621         if (end == 0)
1622                 info.end_pindex = (vm_pindex_t)-1;
1623         else
1624                 info.end_pindex = end - 1;
1625         info.limit = clean_only;
1626         all = (start == 0 && info.end_pindex >= object->size - 1);
1627
1628         /*
1629          * Loop until we are sure we have gotten them all.
1630          */
1631         do {
1632                 info.error = 0;
1633                 vm_page_rb_tree_RB_SCAN(&object->rb_memq, rb_vm_page_scancmp,
1634                                         vm_object_page_remove_callback, &info);
1635         } while (info.error);
1636
1637         /*
1638          * Remove any related swap if throwing away pages, or for
1639          * non-swap objects (the swap is a clean copy in that case).
1640          */
1641         if (object->type != OBJT_SWAP || clean_only == FALSE) {
1642                 if (all)
1643                         swap_pager_freespace_all(object);
1644                 else
1645                         swap_pager_freespace(object, info.start_pindex,
1646                              info.end_pindex - info.start_pindex + 1);
1647         }
1648
1649         /*
1650          * Cleanup
1651          */
1652         vm_object_pip_wakeup(object);
1653         crit_exit();
1654         lwkt_reltoken(&vm_token);
1655 }
1656
1657 /*
1658  * The caller must hold vm_token.
1659  */
1660 static int
1661 vm_object_page_remove_callback(vm_page_t p, void *data)
1662 {
1663         struct rb_vm_page_scan_info *info = data;
1664
1665         /*
1666          * Wired pages cannot be destroyed, but they can be invalidated
1667          * and we do so if clean_only (limit) is not set.
1668          *
1669          * WARNING!  The page may be wired due to being part of a buffer
1670          *           cache buffer, and the buffer might be marked B_CACHE.
1671          *           This is fine as part of a truncation but VFSs must be
1672          *           sure to fix the buffer up when re-extending the file.
1673          */
1674         if (p->wire_count != 0) {
1675                 vm_page_protect(p, VM_PROT_NONE);
1676                 if (info->limit == 0)
1677                         p->valid = 0;
1678                 return(0);
1679         }
1680
1681         /*
1682          * The busy flags are only cleared at
1683          * interrupt -- minimize the spl transitions
1684          */
1685
1686         if (vm_page_sleep_busy(p, TRUE, "vmopar")) {
1687                 info->error = 1;
1688                 return(0);
1689         }
1690
1691         /*
1692          * limit is our clean_only flag.  If set and the page is dirty, do
1693          * not free it.  If set and the page is being held by someone, do
1694          * not free it.
1695          */
1696         if (info->limit && p->valid) {
1697                 vm_page_test_dirty(p);
1698                 if (p->valid & p->dirty)
1699                         return(0);
1700                 if (p->hold_count)
1701                         return(0);
1702         }
1703
1704         /*
1705          * Destroy the page
1706          */
1707         vm_page_busy(p);
1708         vm_page_protect(p, VM_PROT_NONE);
1709         vm_page_free(p);
1710         return(0);
1711 }
1712
1713 /*
1714  * Coalesces two objects backing up adjoining regions of memory into a
1715  * single object.
1716  *
1717  * returns TRUE if objects were combined.
1718  *
1719  * NOTE: Only works at the moment if the second object is NULL -
1720  *       if it's not, which object do we lock first?
1721  *
1722  * Parameters:
1723  *      prev_object     First object to coalesce
1724  *      prev_offset     Offset into prev_object
1725  *      next_object     Second object into coalesce
1726  *      next_offset     Offset into next_object
1727  *
1728  *      prev_size       Size of reference to prev_object
1729  *      next_size       Size of reference to next_object
1730  *
1731  * The object must not be locked.
1732  * The caller must hold vm_token and vmobj_token.
1733  */
1734 boolean_t
1735 vm_object_coalesce(vm_object_t prev_object, vm_pindex_t prev_pindex,
1736                    vm_size_t prev_size, vm_size_t next_size)
1737 {
1738         vm_pindex_t next_pindex;
1739
1740         ASSERT_LWKT_TOKEN_HELD(&vm_token);
1741         ASSERT_LWKT_TOKEN_HELD(&vmobj_token);
1742
1743         if (prev_object == NULL) {
1744                 return (TRUE);
1745         }
1746
1747         if (prev_object->type != OBJT_DEFAULT &&
1748             prev_object->type != OBJT_SWAP) {
1749                 return (FALSE);
1750         }
1751
1752         /*
1753          * Try to collapse the object first
1754          */
1755         vm_object_collapse(prev_object);
1756
1757         /*
1758          * Can't coalesce if: . more than one reference . paged out . shadows
1759          * another object . has a copy elsewhere (any of which mean that the
1760          * pages not mapped to prev_entry may be in use anyway)
1761          */
1762
1763         if (prev_object->backing_object != NULL) {
1764                 return (FALSE);
1765         }
1766
1767         prev_size >>= PAGE_SHIFT;
1768         next_size >>= PAGE_SHIFT;
1769         next_pindex = prev_pindex + prev_size;
1770
1771         if ((prev_object->ref_count > 1) &&
1772             (prev_object->size != next_pindex)) {
1773                 return (FALSE);
1774         }
1775
1776         /*
1777          * Remove any pages that may still be in the object from a previous
1778          * deallocation.
1779          */
1780         if (next_pindex < prev_object->size) {
1781                 vm_object_page_remove(prev_object,
1782                                       next_pindex,
1783                                       next_pindex + next_size, FALSE);
1784                 if (prev_object->type == OBJT_SWAP)
1785                         swap_pager_freespace(prev_object,
1786                                              next_pindex, next_size);
1787         }
1788
1789         /*
1790          * Extend the object if necessary.
1791          */
1792         if (next_pindex + next_size > prev_object->size)
1793                 prev_object->size = next_pindex + next_size;
1794
1795         return (TRUE);
1796 }
1797
1798 /*
1799  * Make the object writable and flag is being possibly dirty.
1800  *
1801  * No requirements.
1802  */
1803 void
1804 vm_object_set_writeable_dirty(vm_object_t object)
1805 {
1806         struct vnode *vp;
1807
1808         lwkt_gettoken(&vm_token);
1809         vm_object_set_flag(object, OBJ_WRITEABLE|OBJ_MIGHTBEDIRTY);
1810         if (object->type == OBJT_VNODE &&
1811             (vp = (struct vnode *)object->handle) != NULL) {
1812                 if ((vp->v_flag & VOBJDIRTY) == 0) {
1813                         vsetflags(vp, VOBJDIRTY);
1814                 }
1815         }
1816         lwkt_reltoken(&vm_token);
1817 }
1818
1819 static void
1820 vm_object_lock_init(vm_object_t obj)
1821 {
1822 #if defined(DEBUG_LOCKS)
1823         int i;
1824
1825         obj->debug_hold_bitmap = 0;
1826         for (i = 0; i < VMOBJ_DEBUG_ARRAY_SIZE; i++) {
1827                 obj->debug_hold_thrs[i] = NULL;
1828         }
1829 #endif
1830 }
1831
1832 void
1833 vm_object_lock(vm_object_t obj)
1834 {
1835         lwkt_getpooltoken(obj);
1836 }
1837
1838 void
1839 vm_object_unlock(vm_object_t obj)
1840 {
1841         lwkt_relpooltoken(obj);
1842 }
1843
1844 void
1845 vm_object_hold(vm_object_t obj)
1846 {
1847         vm_object_lock(obj);
1848
1849         refcount_acquire(&obj->hold_count);
1850
1851 #if defined(DEBUG_LOCKS)
1852         int i;
1853
1854         i = ffs(~obj->debug_hold_bitmap) - 1;
1855         if (i == -1) {
1856                 panic("vm_object hold count > VMOBJ_DEBUG_ARRAY_SIZE");
1857         }
1858
1859         obj->debug_hold_bitmap |= (1 << i);
1860         obj->debug_hold_thrs[i] = curthread;
1861 #endif
1862 }
1863
1864 void
1865 vm_object_drop(vm_object_t obj)
1866 {
1867         int rc;
1868
1869 #if defined(DEBUG_LOCKS)
1870         int found = 0;
1871         int i;
1872
1873         for (i = 0; i < VMOBJ_DEBUG_ARRAY_SIZE; i++) {
1874                 if ((obj->debug_hold_bitmap & (1 << i)) &&
1875                     (obj->debug_hold_thrs[i] == curthread)) {
1876                         obj->debug_hold_bitmap &= ~(1 << i);
1877                         obj->debug_hold_thrs[i] = NULL;
1878                         found = 1;
1879                         break;
1880                 }
1881         }
1882
1883         if (found == 0)
1884                 panic("vm_object: attempt to drop hold on non-self-held obj");
1885 #endif
1886
1887         rc = refcount_release(&obj->hold_count);
1888         vm_object_unlock(obj);
1889
1890         if (rc) 
1891                 vm_object_hold_wake(obj);
1892 }
1893
1894 static void
1895 vm_object_hold_wake(vm_object_t obj)
1896 {
1897         wakeup(obj);
1898 }
1899
1900 static void
1901 vm_object_hold_wait(vm_object_t obj)
1902 {
1903         vm_object_lock(obj);
1904
1905 #if defined(DEBUG_LOCKS)
1906         int i;
1907
1908         for (i = 0; i < VMOBJ_DEBUG_ARRAY_SIZE; i++) {
1909                 if ((obj->debug_hold_bitmap & (1 << i)) &&
1910                     (obj->debug_hold_thrs[i] == curthread))
1911                         panic("vm_object: self-hold in terminate or collapse");
1912         }
1913 #endif
1914
1915         while (obj->hold_count)
1916                 tsleep(obj, 0, "vmobjhld", 0);
1917
1918         vm_object_unlock(obj);
1919 }
1920
1921 #include "opt_ddb.h"
1922 #ifdef DDB
1923 #include <sys/kernel.h>
1924
1925 #include <sys/cons.h>
1926
1927 #include <ddb/ddb.h>
1928
1929 static int      _vm_object_in_map (vm_map_t map, vm_object_t object,
1930                                        vm_map_entry_t entry);
1931 static int      vm_object_in_map (vm_object_t object);
1932
1933 /*
1934  * The caller must hold vm_token.
1935  */
1936 static int
1937 _vm_object_in_map(vm_map_t map, vm_object_t object, vm_map_entry_t entry)
1938 {
1939         vm_map_t tmpm;
1940         vm_map_entry_t tmpe;
1941         vm_object_t obj;
1942         int entcount;
1943
1944         if (map == 0)
1945                 return 0;
1946         if (entry == 0) {
1947                 tmpe = map->header.next;
1948                 entcount = map->nentries;
1949                 while (entcount-- && (tmpe != &map->header)) {
1950                         if( _vm_object_in_map(map, object, tmpe)) {
1951                                 return 1;
1952                         }
1953                         tmpe = tmpe->next;
1954                 }
1955                 return (0);
1956         }
1957         switch(entry->maptype) {
1958         case VM_MAPTYPE_SUBMAP:
1959                 tmpm = entry->object.sub_map;
1960                 tmpe = tmpm->header.next;
1961                 entcount = tmpm->nentries;
1962                 while (entcount-- && tmpe != &tmpm->header) {
1963                         if( _vm_object_in_map(tmpm, object, tmpe)) {
1964                                 return 1;
1965                         }
1966                         tmpe = tmpe->next;
1967                 }
1968                 break;
1969         case VM_MAPTYPE_NORMAL:
1970         case VM_MAPTYPE_VPAGETABLE:
1971                 obj = entry->object.vm_object;
1972                 while (obj) {
1973                         if (obj == object)
1974                                 return 1;
1975                         obj = obj->backing_object;
1976                 }
1977                 break;
1978         default:
1979                 break;
1980         }
1981         return 0;
1982 }
1983
1984 static int vm_object_in_map_callback(struct proc *p, void *data);
1985
1986 struct vm_object_in_map_info {
1987         vm_object_t object;
1988         int rv;
1989 };
1990
1991 /*
1992  * Debugging only
1993  */
1994 static int
1995 vm_object_in_map(vm_object_t object)
1996 {
1997         struct vm_object_in_map_info info;
1998
1999         info.rv = 0;
2000         info.object = object;
2001
2002         allproc_scan(vm_object_in_map_callback, &info);
2003         if (info.rv)
2004                 return 1;
2005         if( _vm_object_in_map(&kernel_map, object, 0))
2006                 return 1;
2007         if( _vm_object_in_map(&pager_map, object, 0))
2008                 return 1;
2009         if( _vm_object_in_map(&buffer_map, object, 0))
2010                 return 1;
2011         return 0;
2012 }
2013
2014 /*
2015  * Debugging only
2016  */
2017 static int
2018 vm_object_in_map_callback(struct proc *p, void *data)
2019 {
2020         struct vm_object_in_map_info *info = data;
2021
2022         if (p->p_vmspace) {
2023                 if (_vm_object_in_map(&p->p_vmspace->vm_map, info->object, 0)) {
2024                         info->rv = 1;
2025                         return -1;
2026                 }
2027         }
2028         return (0);
2029 }
2030
2031 DB_SHOW_COMMAND(vmochk, vm_object_check)
2032 {
2033         vm_object_t object;
2034
2035         /*
2036          * make sure that internal objs are in a map somewhere
2037          * and none have zero ref counts.
2038          */
2039         for (object = TAILQ_FIRST(&vm_object_list);
2040                         object != NULL;
2041                         object = TAILQ_NEXT(object, object_list)) {
2042                 if (object->type == OBJT_MARKER)
2043                         continue;
2044                 if (object->handle == NULL &&
2045                     (object->type == OBJT_DEFAULT || object->type == OBJT_SWAP)) {
2046                         if (object->ref_count == 0) {
2047                                 db_printf("vmochk: internal obj has zero ref count: %ld\n",
2048                                         (long)object->size);
2049                         }
2050                         if (!vm_object_in_map(object)) {
2051                                 db_printf(
2052                         "vmochk: internal obj is not in a map: "
2053                         "ref: %d, size: %lu: 0x%lx, backing_object: %p\n",
2054                                     object->ref_count, (u_long)object->size, 
2055                                     (u_long)object->size,
2056                                     (void *)object->backing_object);
2057                         }
2058                 }
2059         }
2060 }
2061
2062 /*
2063  * Debugging only
2064  */
2065 DB_SHOW_COMMAND(object, vm_object_print_static)
2066 {
2067         /* XXX convert args. */
2068         vm_object_t object = (vm_object_t)addr;
2069         boolean_t full = have_addr;
2070
2071         vm_page_t p;
2072
2073         /* XXX count is an (unused) arg.  Avoid shadowing it. */
2074 #define count   was_count
2075
2076         int count;
2077
2078         if (object == NULL)
2079                 return;
2080
2081         db_iprintf(
2082             "Object %p: type=%d, size=0x%lx, res=%d, ref=%d, flags=0x%x\n",
2083             object, (int)object->type, (u_long)object->size,
2084             object->resident_page_count, object->ref_count, object->flags);
2085         /*
2086          * XXX no %qd in kernel.  Truncate object->backing_object_offset.
2087          */
2088         db_iprintf(" sref=%d, backing_object(%d)=(%p)+0x%lx\n",
2089             object->shadow_count, 
2090             object->backing_object ? object->backing_object->ref_count : 0,
2091             object->backing_object, (long)object->backing_object_offset);
2092
2093         if (!full)
2094                 return;
2095
2096         db_indent += 2;
2097         count = 0;
2098         RB_FOREACH(p, vm_page_rb_tree, &object->rb_memq) {
2099                 if (count == 0)
2100                         db_iprintf("memory:=");
2101                 else if (count == 6) {
2102                         db_printf("\n");
2103                         db_iprintf(" ...");
2104                         count = 0;
2105                 } else
2106                         db_printf(",");
2107                 count++;
2108
2109                 db_printf("(off=0x%lx,page=0x%lx)",
2110                     (u_long) p->pindex, (u_long) VM_PAGE_TO_PHYS(p));
2111         }
2112         if (count != 0)
2113                 db_printf("\n");
2114         db_indent -= 2;
2115 }
2116
2117 /* XXX. */
2118 #undef count
2119
2120 /*
2121  * XXX need this non-static entry for calling from vm_map_print.
2122  *
2123  * Debugging only
2124  */
2125 void
2126 vm_object_print(/* db_expr_t */ long addr,
2127                 boolean_t have_addr,
2128                 /* db_expr_t */ long count,
2129                 char *modif)
2130 {
2131         vm_object_print_static(addr, have_addr, count, modif);
2132 }
2133
2134 /*
2135  * Debugging only
2136  */
2137 DB_SHOW_COMMAND(vmopag, vm_object_print_pages)
2138 {
2139         vm_object_t object;
2140         int nl = 0;
2141         int c;
2142         for (object = TAILQ_FIRST(&vm_object_list);
2143                         object != NULL;
2144                         object = TAILQ_NEXT(object, object_list)) {
2145                 vm_pindex_t idx, fidx;
2146                 vm_pindex_t osize;
2147                 vm_paddr_t pa = -1, padiff;
2148                 int rcount;
2149                 vm_page_t m;
2150
2151                 if (object->type == OBJT_MARKER)
2152                         continue;
2153                 db_printf("new object: %p\n", (void *)object);
2154                 if ( nl > 18) {
2155                         c = cngetc();
2156                         if (c != ' ')
2157                                 return;
2158                         nl = 0;
2159                 }
2160                 nl++;
2161                 rcount = 0;
2162                 fidx = 0;
2163                 osize = object->size;
2164                 if (osize > 128)
2165                         osize = 128;
2166                 for (idx = 0; idx < osize; idx++) {
2167                         m = vm_page_lookup(object, idx);
2168                         if (m == NULL) {
2169                                 if (rcount) {
2170                                         db_printf(" index(%ld)run(%d)pa(0x%lx)\n",
2171                                                 (long)fidx, rcount, (long)pa);
2172                                         if ( nl > 18) {
2173                                                 c = cngetc();
2174                                                 if (c != ' ')
2175                                                         return;
2176                                                 nl = 0;
2177                                         }
2178                                         nl++;
2179                                         rcount = 0;
2180                                 }
2181                                 continue;
2182                         }
2183
2184                                 
2185                         if (rcount &&
2186                                 (VM_PAGE_TO_PHYS(m) == pa + rcount * PAGE_SIZE)) {
2187                                 ++rcount;
2188                                 continue;
2189                         }
2190                         if (rcount) {
2191                                 padiff = pa + rcount * PAGE_SIZE - VM_PAGE_TO_PHYS(m);
2192                                 padiff >>= PAGE_SHIFT;
2193                                 padiff &= PQ_L2_MASK;
2194                                 if (padiff == 0) {
2195                                         pa = VM_PAGE_TO_PHYS(m) - rcount * PAGE_SIZE;
2196                                         ++rcount;
2197                                         continue;
2198                                 }
2199                                 db_printf(" index(%ld)run(%d)pa(0x%lx)",
2200                                         (long)fidx, rcount, (long)pa);
2201                                 db_printf("pd(%ld)\n", (long)padiff);
2202                                 if ( nl > 18) {
2203                                         c = cngetc();
2204                                         if (c != ' ')
2205                                                 return;
2206                                         nl = 0;
2207                                 }
2208                                 nl++;
2209                         }
2210                         fidx = idx;
2211                         pa = VM_PAGE_TO_PHYS(m);
2212                         rcount = 1;
2213                 }
2214                 if (rcount) {
2215                         db_printf(" index(%ld)run(%d)pa(0x%lx)\n",
2216                                 (long)fidx, rcount, (long)pa);
2217                         if ( nl > 18) {
2218                                 c = cngetc();
2219                                 if (c != ' ')
2220                                         return;
2221                                 nl = 0;
2222                         }
2223                         nl++;
2224                 }
2225         }
2226 }
2227 #endif /* DDB */