kernel - Add Proportional RSS (PRES)
[dragonfly.git] / sys / vm / vm_object.c
1 /*
2  * (MPSAFE)
3  *
4  * Copyright (c) 1991, 1993
5  *      The Regents of the University of California.  All rights reserved.
6  *
7  * This code is derived from software contributed to Berkeley by
8  * The Mach Operating System project at Carnegie-Mellon University.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. All advertising materials mentioning features or use of this software
19  *    must display the following acknowledgement:
20  *      This product includes software developed by the University of
21  *      California, Berkeley and its contributors.
22  * 4. Neither the name of the University nor the names of its contributors
23  *    may be used to endorse or promote products derived from this software
24  *    without specific prior written permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36  * SUCH DAMAGE.
37  *
38  *      from: @(#)vm_object.c   8.5 (Berkeley) 3/22/94
39  *
40  *
41  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
42  * All rights reserved.
43  *
44  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
45  *
46  * Permission to use, copy, modify and distribute this software and
47  * its documentation is hereby granted, provided that both the copyright
48  * notice and this permission notice appear in all copies of the
49  * software, derivative works or modified versions, and any portions
50  * thereof, and that both notices appear in supporting documentation.
51  *
52  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
53  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
54  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
55  *
56  * Carnegie Mellon requests users of this software to return to
57  *
58  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
59  *  School of Computer Science
60  *  Carnegie Mellon University
61  *  Pittsburgh PA 15213-3890
62  *
63  * any improvements or extensions that they make and grant Carnegie the
64  * rights to redistribute these changes.
65  *
66  * $FreeBSD: src/sys/vm/vm_object.c,v 1.171.2.8 2003/05/26 19:17:56 alc Exp $
67  * $DragonFly: src/sys/vm/vm_object.c,v 1.33 2008/05/09 07:24:48 dillon Exp $
68  */
69
70 /*
71  *      Virtual memory object module.
72  */
73
74 #include <sys/param.h>
75 #include <sys/systm.h>
76 #include <sys/proc.h>           /* for curproc, pageproc */
77 #include <sys/vnode.h>
78 #include <sys/vmmeter.h>
79 #include <sys/mman.h>
80 #include <sys/mount.h>
81 #include <sys/kernel.h>
82 #include <sys/sysctl.h>
83
84 #include <vm/vm.h>
85 #include <vm/vm_param.h>
86 #include <vm/pmap.h>
87 #include <vm/vm_map.h>
88 #include <vm/vm_object.h>
89 #include <vm/vm_page.h>
90 #include <vm/vm_pageout.h>
91 #include <vm/vm_pager.h>
92 #include <vm/swap_pager.h>
93 #include <vm/vm_kern.h>
94 #include <vm/vm_extern.h>
95 #include <vm/vm_zone.h>
96
97 #define EASY_SCAN_FACTOR        8
98
99 static void     vm_object_qcollapse(vm_object_t object);
100 static int      vm_object_page_collect_flush(vm_object_t object, vm_page_t p,
101                                              int pagerflags);
102
103 /*
104  *      Virtual memory objects maintain the actual data
105  *      associated with allocated virtual memory.  A given
106  *      page of memory exists within exactly one object.
107  *
108  *      An object is only deallocated when all "references"
109  *      are given up.  Only one "reference" to a given
110  *      region of an object should be writeable.
111  *
112  *      Associated with each object is a list of all resident
113  *      memory pages belonging to that object; this list is
114  *      maintained by the "vm_page" module, and locked by the object's
115  *      lock.
116  *
117  *      Each object also records a "pager" routine which is
118  *      used to retrieve (and store) pages to the proper backing
119  *      storage.  In addition, objects may be backed by other
120  *      objects from which they were virtual-copied.
121  *
122  *      The only items within the object structure which are
123  *      modified after time of creation are:
124  *              reference count         locked by object's lock
125  *              pager routine           locked by object's lock
126  *
127  */
128
129 struct object_q vm_object_list;         /* locked by vmobj_token */
130 struct vm_object kernel_object;
131
132 static long vm_object_count;            /* locked by vmobj_token */
133 extern int vm_pageout_page_count;
134
135 static long object_collapses;
136 static long object_bypasses;
137 static int next_index;
138 static vm_zone_t obj_zone;
139 static struct vm_zone obj_zone_store;
140 static int object_hash_rand;
141 #define VM_OBJECTS_INIT 256
142 static struct vm_object vm_objects_init[VM_OBJECTS_INIT];
143
144 /*
145  * Initialize a freshly allocated object
146  *
147  * Used only by vm_object_allocate() and zinitna().
148  *
149  * No requirements.
150  */
151 void
152 _vm_object_allocate(objtype_t type, vm_pindex_t size, vm_object_t object)
153 {
154         int incr;
155
156         RB_INIT(&object->rb_memq);
157         LIST_INIT(&object->shadow_head);
158
159         object->type = type;
160         object->size = size;
161         object->ref_count = 1;
162         object->flags = 0;
163         if ((object->type == OBJT_DEFAULT) || (object->type == OBJT_SWAP))
164                 vm_object_set_flag(object, OBJ_ONEMAPPING);
165         object->paging_in_progress = 0;
166         object->resident_page_count = 0;
167         object->agg_pv_list_count = 0;
168         object->shadow_count = 0;
169         object->pg_color = next_index;
170         if ( size > (PQ_L2_SIZE / 3 + PQ_PRIME1))
171                 incr = PQ_L2_SIZE / 3 + PQ_PRIME1;
172         else
173                 incr = size;
174         next_index = (next_index + incr) & PQ_L2_MASK;
175         object->handle = NULL;
176         object->backing_object = NULL;
177         object->backing_object_offset = (vm_ooffset_t) 0;
178         /*
179          * Try to generate a number that will spread objects out in the
180          * hash table.  We 'wipe' new objects across the hash in 128 page
181          * increments plus 1 more to offset it a little more by the time
182          * it wraps around.
183          */
184         object->hash_rand = object_hash_rand - 129;
185
186         object->generation++;
187         object->swblock_count = 0;
188         RB_INIT(&object->swblock_root);
189
190         lwkt_gettoken(&vmobj_token);
191         TAILQ_INSERT_TAIL(&vm_object_list, object, object_list);
192         vm_object_count++;
193         object_hash_rand = object->hash_rand;
194         lwkt_reltoken(&vmobj_token);
195 }
196
197 /*
198  * Initialize the VM objects module.
199  *
200  * Called from the low level boot code only.
201  */
202 void
203 vm_object_init(void)
204 {
205         TAILQ_INIT(&vm_object_list);
206         
207         _vm_object_allocate(OBJT_DEFAULT, OFF_TO_IDX(KvaEnd),
208                             &kernel_object);
209
210         obj_zone = &obj_zone_store;
211         zbootinit(obj_zone, "VM OBJECT", sizeof (struct vm_object),
212                 vm_objects_init, VM_OBJECTS_INIT);
213 }
214
215 void
216 vm_object_init2(void)
217 {
218         zinitna(obj_zone, NULL, NULL, 0, 0, ZONE_PANICFAIL, 1);
219 }
220
221 /*
222  * Allocate and return a new object of the specified type and size.
223  *
224  * No requirements.
225  */
226 vm_object_t
227 vm_object_allocate(objtype_t type, vm_pindex_t size)
228 {
229         vm_object_t result;
230
231         result = (vm_object_t) zalloc(obj_zone);
232
233         _vm_object_allocate(type, size, result);
234
235         return (result);
236 }
237
238 /*
239  * Add an additional reference to a vm_object.
240  *
241  * Object passed by caller must be stable or caller must already
242  * hold vmobj_token to avoid races.
243  */
244 void
245 vm_object_reference(vm_object_t object)
246 {
247         if (object) {
248                 lwkt_gettoken(&vmobj_token);
249                 object->ref_count++;
250                 if (object->type == OBJT_VNODE) {
251                         vref(object->handle);
252                         /* XXX what if the vnode is being destroyed? */
253                 }
254                 lwkt_reltoken(&vmobj_token);
255         }
256 }
257
258 void
259 vm_object_reference_locked(vm_object_t object)
260 {
261         if (object) {
262                 ASSERT_LWKT_TOKEN_HELD(&vmobj_token);
263                 object->ref_count++;
264                 if (object->type == OBJT_VNODE) {
265                         vref(object->handle);
266                         /* XXX what if the vnode is being destroyed? */
267                 }
268         }
269 }
270
271 /*
272  * Dereference an object and its underlying vnode.
273  *
274  * The caller must hold vmobj_token.
275  */
276 static void
277 vm_object_vndeallocate(vm_object_t object)
278 {
279         struct vnode *vp = (struct vnode *) object->handle;
280
281         KASSERT(object->type == OBJT_VNODE,
282             ("vm_object_vndeallocate: not a vnode object"));
283         KASSERT(vp != NULL, ("vm_object_vndeallocate: missing vp"));
284         ASSERT_LWKT_TOKEN_HELD(&vmobj_token);
285 #ifdef INVARIANTS
286         if (object->ref_count == 0) {
287                 vprint("vm_object_vndeallocate", vp);
288                 panic("vm_object_vndeallocate: bad object reference count");
289         }
290 #endif
291
292         object->ref_count--;
293         if (object->ref_count == 0)
294                 vclrflags(vp, VTEXT);
295         vrele(vp);
296 }
297
298 /*
299  * Release a reference to the specified object, gained either through a
300  * vm_object_allocate or a vm_object_reference call.  When all references
301  * are gone, storage associated with this object may be relinquished.
302  */
303 void
304 vm_object_deallocate(vm_object_t object)
305 {
306         lwkt_gettoken(&vmobj_token);
307         vm_object_deallocate_locked(object);
308         lwkt_reltoken(&vmobj_token);
309 }
310
311 void
312 vm_object_deallocate_locked(vm_object_t object)
313 {
314         vm_object_t temp;
315
316         ASSERT_LWKT_TOKEN_HELD(&vmobj_token);
317
318         while (object != NULL) {
319                 if (object->type == OBJT_VNODE) {
320                         vm_object_vndeallocate(object);
321                         break;
322                 }
323
324                 if (object->ref_count == 0) {
325                         panic("vm_object_deallocate: object deallocated "
326                               "too many times: %d", object->type);
327                 }
328                 if (object->ref_count > 2) {
329                         object->ref_count--;
330                         break;
331                 }
332
333                 /*
334                  * We currently need the vm_token from this point on, and
335                  * we must recheck ref_count after acquiring it.
336                  */
337                 lwkt_gettoken(&vm_token);
338
339                 if (object->ref_count > 2) {
340                         object->ref_count--;
341                         lwkt_reltoken(&vm_token);
342                         break;
343                 }
344
345                 /*
346                  * Here on ref_count of one or two, which are special cases for
347                  * objects.
348                  */
349                 if ((object->ref_count == 2) && (object->shadow_count == 0)) {
350                         vm_object_set_flag(object, OBJ_ONEMAPPING);
351                         object->ref_count--;
352                         lwkt_reltoken(&vm_token);
353                         break;
354                 }
355                 if ((object->ref_count == 2) && (object->shadow_count == 1)) {
356                         object->ref_count--;
357                         if ((object->handle == NULL) &&
358                             (object->type == OBJT_DEFAULT ||
359                              object->type == OBJT_SWAP)) {
360                                 vm_object_t robject;
361
362                                 robject = LIST_FIRST(&object->shadow_head);
363                                 KASSERT(robject != NULL,
364                                         ("vm_object_deallocate: ref_count: "
365                                         "%d, shadow_count: %d",
366                                         object->ref_count,
367                                         object->shadow_count));
368
369                                 if ((robject->handle == NULL) &&
370                                     (robject->type == OBJT_DEFAULT ||
371                                      robject->type == OBJT_SWAP)) {
372
373                                         robject->ref_count++;
374
375                                         while (
376                                                 robject->paging_in_progress ||
377                                                 object->paging_in_progress
378                                         ) {
379                                                 vm_object_pip_sleep(robject, "objde1");
380                                                 vm_object_pip_sleep(object, "objde2");
381                                         }
382
383                                         if (robject->ref_count == 1) {
384                                                 robject->ref_count--;
385                                                 object = robject;
386                                                 goto doterm;
387                                         }
388
389                                         object = robject;
390                                         vm_object_collapse(object);
391                                         lwkt_reltoken(&vm_token);
392                                         continue;
393                                 }
394                         }
395                         lwkt_reltoken(&vm_token);
396                         break;
397                 }
398
399                 /*
400                  * Normal dereferencing path
401                  */
402                 object->ref_count--;
403                 if (object->ref_count != 0) {
404                         lwkt_reltoken(&vm_token);
405                         break;
406                 }
407
408                 /*
409                  * Termination path
410                  */
411 doterm:
412                 temp = object->backing_object;
413                 if (temp) {
414                         LIST_REMOVE(object, shadow_list);
415                         temp->shadow_count--;
416                         temp->generation++;
417                         object->backing_object = NULL;
418                 }
419                 lwkt_reltoken(&vm_token);
420
421                 /*
422                  * Don't double-terminate, we could be in a termination
423                  * recursion due to the terminate having to sync data
424                  * to disk.
425                  */
426                 if ((object->flags & OBJ_DEAD) == 0)
427                         vm_object_terminate(object);
428                 object = temp;
429         }
430 }
431
432 /*
433  * Destroy the specified object, freeing up related resources.
434  *
435  * The object must have zero references.
436  *
437  * The caller must be holding vmobj_token and properly interlock with
438  * OBJ_DEAD.
439  */
440 static int vm_object_terminate_callback(vm_page_t p, void *data);
441
442 void
443 vm_object_terminate(vm_object_t object)
444 {
445         /*
446          * Make sure no one uses us.  Once we set OBJ_DEAD we should be
447          * able to safely block.
448          */
449         KKASSERT((object->flags & OBJ_DEAD) == 0);
450         ASSERT_LWKT_TOKEN_HELD(&vmobj_token);
451         vm_object_set_flag(object, OBJ_DEAD);
452
453         /*
454          * Wait for the pageout daemon to be done with the object
455          */
456         vm_object_pip_wait(object, "objtrm");
457
458         KASSERT(!object->paging_in_progress,
459                 ("vm_object_terminate: pageout in progress"));
460
461         /*
462          * Clean and free the pages, as appropriate. All references to the
463          * object are gone, so we don't need to lock it.
464          */
465         if (object->type == OBJT_VNODE) {
466                 struct vnode *vp;
467
468                 /*
469                  * Clean pages and flush buffers.
470                  */
471                 vm_object_page_clean(object, 0, 0, OBJPC_SYNC);
472
473                 vp = (struct vnode *) object->handle;
474                 vinvalbuf(vp, V_SAVE, 0, 0);
475         }
476
477         /*
478          * Wait for any I/O to complete, after which there had better not
479          * be any references left on the object.
480          */
481         vm_object_pip_wait(object, "objtrm");
482
483         if (object->ref_count != 0) {
484                 panic("vm_object_terminate: object with references, "
485                       "ref_count=%d", object->ref_count);
486         }
487
488         /*
489          * Now free any remaining pages. For internal objects, this also
490          * removes them from paging queues. Don't free wired pages, just
491          * remove them from the object. 
492          */
493         lwkt_gettoken(&vm_token);
494         vm_page_rb_tree_RB_SCAN(&object->rb_memq, NULL,
495                                 vm_object_terminate_callback, NULL);
496         lwkt_reltoken(&vm_token);
497
498         /*
499          * Let the pager know object is dead.
500          */
501         vm_pager_deallocate(object);
502
503         /*
504          * Remove the object from the global object list.
505          *
506          * (we are holding vmobj_token)
507          */
508         TAILQ_REMOVE(&vm_object_list, object, object_list);
509         vm_object_count--;
510         vm_object_dead_wakeup(object);
511
512         if (object->ref_count != 0) {
513                 panic("vm_object_terminate2: object with references, "
514                       "ref_count=%d", object->ref_count);
515         }
516
517         /*
518          * Free the space for the object.
519          */
520         zfree(obj_zone, object);
521 }
522
523 /*
524  * The caller must hold vm_token.
525  */
526 static int
527 vm_object_terminate_callback(vm_page_t p, void *data __unused)
528 {
529         if (p->busy || (p->flags & PG_BUSY))
530                 panic("vm_object_terminate: freeing busy page %p", p);
531         if (p->wire_count == 0) {
532                 vm_page_busy(p);
533                 vm_page_free(p);
534                 mycpu->gd_cnt.v_pfree++;
535         } else {
536                 if (p->queue != PQ_NONE)
537                         kprintf("vm_object_terminate: Warning: Encountered wired page %p on queue %d\n", p, p->queue);
538                 vm_page_busy(p);
539                 vm_page_remove(p);
540                 vm_page_wakeup(p);
541         }
542         return(0);
543 }
544
545 /*
546  * The object is dead but still has an object<->pager association.  Sleep
547  * and return.  The caller typically retests the association in a loop.
548  *
549  * Must be called with the vmobj_token held.
550  */
551 void
552 vm_object_dead_sleep(vm_object_t object, const char *wmesg)
553 {
554         ASSERT_LWKT_TOKEN_HELD(&vmobj_token);
555         if (object->handle) {
556                 vm_object_set_flag(object, OBJ_DEADWNT);
557                 tsleep(object, 0, wmesg, 0);
558                 /* object may be invalid after this point */
559         }
560 }
561
562 /*
563  * Wakeup anyone waiting for the object<->pager disassociation on
564  * a dead object.
565  *
566  * Must be called with the vmobj_token held.
567  */
568 void
569 vm_object_dead_wakeup(vm_object_t object)
570 {
571         ASSERT_LWKT_TOKEN_HELD(&vmobj_token);
572         if (object->flags & OBJ_DEADWNT) {
573                 vm_object_clear_flag(object, OBJ_DEADWNT);
574                 wakeup(object);
575         }
576 }
577
578 /*
579  * Clean all dirty pages in the specified range of object.  Leaves page
580  * on whatever queue it is currently on.   If NOSYNC is set then do not
581  * write out pages with PG_NOSYNC set (originally comes from MAP_NOSYNC),
582  * leaving the object dirty.
583  *
584  * When stuffing pages asynchronously, allow clustering.  XXX we need a
585  * synchronous clustering mode implementation.
586  *
587  * Odd semantics: if start == end, we clean everything.
588  *
589  * The object must be locked? XXX
590  */
591 static int vm_object_page_clean_pass1(struct vm_page *p, void *data);
592 static int vm_object_page_clean_pass2(struct vm_page *p, void *data);
593
594 void
595 vm_object_page_clean(vm_object_t object, vm_pindex_t start, vm_pindex_t end,
596                      int flags)
597 {
598         struct rb_vm_page_scan_info info;
599         struct vnode *vp;
600         int wholescan;
601         int pagerflags;
602         int curgeneration;
603
604         lwkt_gettoken(&vm_token);
605         if (object->type != OBJT_VNODE ||
606             (object->flags & OBJ_MIGHTBEDIRTY) == 0) {
607                 lwkt_reltoken(&vm_token);
608                 return;
609         }
610
611         pagerflags = (flags & (OBJPC_SYNC | OBJPC_INVAL)) ? 
612                         VM_PAGER_PUT_SYNC : VM_PAGER_CLUSTER_OK;
613         pagerflags |= (flags & OBJPC_INVAL) ? VM_PAGER_PUT_INVAL : 0;
614
615         vp = object->handle;
616
617         /*
618          * Interlock other major object operations.  This allows us to 
619          * temporarily clear OBJ_WRITEABLE and OBJ_MIGHTBEDIRTY.
620          */
621         crit_enter();
622         vm_object_set_flag(object, OBJ_CLEANING);
623
624         /*
625          * Handle 'entire object' case
626          */
627         info.start_pindex = start;
628         if (end == 0) {
629                 info.end_pindex = object->size - 1;
630         } else {
631                 info.end_pindex = end - 1;
632         }
633         wholescan = (start == 0 && info.end_pindex == object->size - 1);
634         info.limit = flags;
635         info.pagerflags = pagerflags;
636         info.object = object;
637
638         /*
639          * If cleaning the entire object do a pass to mark the pages read-only.
640          * If everything worked out ok, clear OBJ_WRITEABLE and
641          * OBJ_MIGHTBEDIRTY.
642          */
643         if (wholescan) {
644                 info.error = 0;
645                 vm_page_rb_tree_RB_SCAN(&object->rb_memq, rb_vm_page_scancmp,
646                                         vm_object_page_clean_pass1, &info);
647                 if (info.error == 0) {
648                         vm_object_clear_flag(object,
649                                              OBJ_WRITEABLE|OBJ_MIGHTBEDIRTY);
650                         if (object->type == OBJT_VNODE &&
651                             (vp = (struct vnode *)object->handle) != NULL) {
652                                 if (vp->v_flag & VOBJDIRTY) 
653                                         vclrflags(vp, VOBJDIRTY);
654                         }
655                 }
656         }
657
658         /*
659          * Do a pass to clean all the dirty pages we find.
660          */
661         do {
662                 info.error = 0;
663                 curgeneration = object->generation;
664                 vm_page_rb_tree_RB_SCAN(&object->rb_memq, rb_vm_page_scancmp,
665                                         vm_object_page_clean_pass2, &info);
666         } while (info.error || curgeneration != object->generation);
667
668         vm_object_clear_flag(object, OBJ_CLEANING);
669         crit_exit();
670         lwkt_reltoken(&vm_token);
671 }
672
673 /*
674  * The caller must hold vm_token.
675  */
676 static 
677 int
678 vm_object_page_clean_pass1(struct vm_page *p, void *data)
679 {
680         struct rb_vm_page_scan_info *info = data;
681
682         vm_page_flag_set(p, PG_CLEANCHK);
683         if ((info->limit & OBJPC_NOSYNC) && (p->flags & PG_NOSYNC))
684                 info->error = 1;
685         else
686                 vm_page_protect(p, VM_PROT_READ);       /* must not block */
687         return(0);
688 }
689
690 /*
691  * The caller must hold vm_token.
692  */
693 static 
694 int
695 vm_object_page_clean_pass2(struct vm_page *p, void *data)
696 {
697         struct rb_vm_page_scan_info *info = data;
698         int n;
699
700         /*
701          * Do not mess with pages that were inserted after we started
702          * the cleaning pass.
703          */
704         if ((p->flags & PG_CLEANCHK) == 0)
705                 return(0);
706
707         /*
708          * Before wasting time traversing the pmaps, check for trivial
709          * cases where the page cannot be dirty.
710          */
711         if (p->valid == 0 || (p->queue - p->pc) == PQ_CACHE) {
712                 KKASSERT((p->dirty & p->valid) == 0);
713                 return(0);
714         }
715
716         /*
717          * Check whether the page is dirty or not.  The page has been set
718          * to be read-only so the check will not race a user dirtying the
719          * page.
720          */
721         vm_page_test_dirty(p);
722         if ((p->dirty & p->valid) == 0) {
723                 vm_page_flag_clear(p, PG_CLEANCHK);
724                 return(0);
725         }
726
727         /*
728          * If we have been asked to skip nosync pages and this is a
729          * nosync page, skip it.  Note that the object flags were
730          * not cleared in this case (because pass1 will have returned an
731          * error), so we do not have to set them.
732          */
733         if ((info->limit & OBJPC_NOSYNC) && (p->flags & PG_NOSYNC)) {
734                 vm_page_flag_clear(p, PG_CLEANCHK);
735                 return(0);
736         }
737
738         /*
739          * Flush as many pages as we can.  PG_CLEANCHK will be cleared on
740          * the pages that get successfully flushed.  Set info->error if
741          * we raced an object modification.
742          */
743         n = vm_object_page_collect_flush(info->object, p, info->pagerflags);
744         if (n == 0)
745                 info->error = 1;
746         return(0);
747 }
748
749 /*
750  * Collect the specified page and nearby pages and flush them out.
751  * The number of pages flushed is returned.
752  *
753  * The caller must hold vm_token.
754  */
755 static int
756 vm_object_page_collect_flush(vm_object_t object, vm_page_t p, int pagerflags)
757 {
758         int runlen;
759         int maxf;
760         int chkb;
761         int maxb;
762         int i;
763         int curgeneration;
764         vm_pindex_t pi;
765         vm_page_t maf[vm_pageout_page_count];
766         vm_page_t mab[vm_pageout_page_count];
767         vm_page_t ma[vm_pageout_page_count];
768
769         curgeneration = object->generation;
770
771         pi = p->pindex;
772         while (vm_page_sleep_busy(p, TRUE, "vpcwai")) {
773                 if (object->generation != curgeneration) {
774                         return(0);
775                 }
776         }
777         KKASSERT(p->object == object && p->pindex == pi);
778
779         maxf = 0;
780         for(i = 1; i < vm_pageout_page_count; i++) {
781                 vm_page_t tp;
782
783                 if ((tp = vm_page_lookup(object, pi + i)) != NULL) {
784                         if ((tp->flags & PG_BUSY) ||
785                                 ((pagerflags & VM_PAGER_IGNORE_CLEANCHK) == 0 && 
786                                  (tp->flags & PG_CLEANCHK) == 0) ||
787                                 (tp->busy != 0))
788                                 break;
789                         if((tp->queue - tp->pc) == PQ_CACHE) {
790                                 vm_page_flag_clear(tp, PG_CLEANCHK);
791                                 break;
792                         }
793                         vm_page_test_dirty(tp);
794                         if ((tp->dirty & tp->valid) == 0) {
795                                 vm_page_flag_clear(tp, PG_CLEANCHK);
796                                 break;
797                         }
798                         maf[ i - 1 ] = tp;
799                         maxf++;
800                         continue;
801                 }
802                 break;
803         }
804
805         maxb = 0;
806         chkb = vm_pageout_page_count -  maxf;
807         if (chkb) {
808                 for(i = 1; i < chkb;i++) {
809                         vm_page_t tp;
810
811                         if ((tp = vm_page_lookup(object, pi - i)) != NULL) {
812                                 if ((tp->flags & PG_BUSY) ||
813                                         ((pagerflags & VM_PAGER_IGNORE_CLEANCHK) == 0 && 
814                                          (tp->flags & PG_CLEANCHK) == 0) ||
815                                         (tp->busy != 0))
816                                         break;
817                                 if((tp->queue - tp->pc) == PQ_CACHE) {
818                                         vm_page_flag_clear(tp, PG_CLEANCHK);
819                                         break;
820                                 }
821                                 vm_page_test_dirty(tp);
822                                 if ((tp->dirty & tp->valid) == 0) {
823                                         vm_page_flag_clear(tp, PG_CLEANCHK);
824                                         break;
825                                 }
826                                 mab[ i - 1 ] = tp;
827                                 maxb++;
828                                 continue;
829                         }
830                         break;
831                 }
832         }
833
834         for(i = 0; i < maxb; i++) {
835                 int index = (maxb - i) - 1;
836                 ma[index] = mab[i];
837                 vm_page_flag_clear(ma[index], PG_CLEANCHK);
838         }
839         vm_page_flag_clear(p, PG_CLEANCHK);
840         ma[maxb] = p;
841         for(i = 0; i < maxf; i++) {
842                 int index = (maxb + i) + 1;
843                 ma[index] = maf[i];
844                 vm_page_flag_clear(ma[index], PG_CLEANCHK);
845         }
846         runlen = maxb + maxf + 1;
847
848         vm_pageout_flush(ma, runlen, pagerflags);
849         for (i = 0; i < runlen; i++) {
850                 if (ma[i]->valid & ma[i]->dirty) {
851                         vm_page_protect(ma[i], VM_PROT_READ);
852                         vm_page_flag_set(ma[i], PG_CLEANCHK);
853
854                         /*
855                          * maxf will end up being the actual number of pages
856                          * we wrote out contiguously, non-inclusive of the
857                          * first page.  We do not count look-behind pages.
858                          */
859                         if (i >= maxb + 1 && (maxf > i - maxb - 1))
860                                 maxf = i - maxb - 1;
861                 }
862         }
863         return(maxf + 1);
864 }
865
866 /*
867  * Same as vm_object_pmap_copy, except range checking really
868  * works, and is meant for small sections of an object.
869  *
870  * This code protects resident pages by making them read-only
871  * and is typically called on a fork or split when a page
872  * is converted to copy-on-write.  
873  *
874  * NOTE: If the page is already at VM_PROT_NONE, calling
875  * vm_page_protect will have no effect.
876  */
877 void
878 vm_object_pmap_copy_1(vm_object_t object, vm_pindex_t start, vm_pindex_t end)
879 {
880         vm_pindex_t idx;
881         vm_page_t p;
882
883         if (object == NULL || (object->flags & OBJ_WRITEABLE) == 0)
884                 return;
885
886         /*
887          * spl protection needed to prevent races between the lookup,
888          * an interrupt unbusy/free, and our protect call.
889          */
890         crit_enter();
891         lwkt_gettoken(&vm_token);
892         for (idx = start; idx < end; idx++) {
893                 p = vm_page_lookup(object, idx);
894                 if (p == NULL)
895                         continue;
896                 vm_page_protect(p, VM_PROT_READ);
897         }
898         lwkt_reltoken(&vm_token);
899         crit_exit();
900 }
901
902 /*
903  * Removes all physical pages in the specified object range from all
904  * physical maps.
905  *
906  * The object must *not* be locked.
907  */
908
909 static int vm_object_pmap_remove_callback(vm_page_t p, void *data);
910
911 void
912 vm_object_pmap_remove(vm_object_t object, vm_pindex_t start, vm_pindex_t end)
913 {
914         struct rb_vm_page_scan_info info;
915
916         if (object == NULL)
917                 return;
918         info.start_pindex = start;
919         info.end_pindex = end - 1;
920
921         crit_enter();
922         lwkt_gettoken(&vm_token);
923         vm_page_rb_tree_RB_SCAN(&object->rb_memq, rb_vm_page_scancmp,
924                                 vm_object_pmap_remove_callback, &info);
925         if (start == 0 && end == object->size)
926                 vm_object_clear_flag(object, OBJ_WRITEABLE);
927         lwkt_reltoken(&vm_token);
928         crit_exit();
929 }
930
931 /*
932  * The caller must hold vm_token.
933  */
934 static int
935 vm_object_pmap_remove_callback(vm_page_t p, void *data __unused)
936 {
937         vm_page_protect(p, VM_PROT_NONE);
938         return(0);
939 }
940
941 /*
942  * Implements the madvise function at the object/page level.
943  *
944  * MADV_WILLNEED        (any object)
945  *
946  *      Activate the specified pages if they are resident.
947  *
948  * MADV_DONTNEED        (any object)
949  *
950  *      Deactivate the specified pages if they are resident.
951  *
952  * MADV_FREE    (OBJT_DEFAULT/OBJT_SWAP objects, OBJ_ONEMAPPING only)
953  *
954  *      Deactivate and clean the specified pages if they are
955  *      resident.  This permits the process to reuse the pages
956  *      without faulting or the kernel to reclaim the pages
957  *      without I/O.
958  *
959  * No requirements.
960  */
961 void
962 vm_object_madvise(vm_object_t object, vm_pindex_t pindex, int count, int advise)
963 {
964         vm_pindex_t end, tpindex;
965         vm_object_t tobject;
966         vm_page_t m;
967
968         if (object == NULL)
969                 return;
970
971         end = pindex + count;
972
973         lwkt_gettoken(&vm_token);
974
975         /*
976          * Locate and adjust resident pages
977          */
978         for (; pindex < end; pindex += 1) {
979 relookup:
980                 tobject = object;
981                 tpindex = pindex;
982 shadowlookup:
983                 /*
984                  * MADV_FREE only operates on OBJT_DEFAULT or OBJT_SWAP pages
985                  * and those pages must be OBJ_ONEMAPPING.
986                  */
987                 if (advise == MADV_FREE) {
988                         if ((tobject->type != OBJT_DEFAULT &&
989                              tobject->type != OBJT_SWAP) ||
990                             (tobject->flags & OBJ_ONEMAPPING) == 0) {
991                                 continue;
992                         }
993                 }
994
995                 /*
996                  * spl protection is required to avoid a race between the
997                  * lookup, an interrupt unbusy/free, and our busy check.
998                  */
999
1000                 crit_enter();
1001                 m = vm_page_lookup(tobject, tpindex);
1002
1003                 if (m == NULL) {
1004                         /*
1005                          * There may be swap even if there is no backing page
1006                          */
1007                         if (advise == MADV_FREE && tobject->type == OBJT_SWAP)
1008                                 swap_pager_freespace(tobject, tpindex, 1);
1009
1010                         /*
1011                          * next object
1012                          */
1013                         crit_exit();
1014                         if (tobject->backing_object == NULL)
1015                                 continue;
1016                         tpindex += OFF_TO_IDX(tobject->backing_object_offset);
1017                         tobject = tobject->backing_object;
1018                         goto shadowlookup;
1019                 }
1020
1021                 /*
1022                  * If the page is busy or not in a normal active state,
1023                  * we skip it.  If the page is not managed there are no
1024                  * page queues to mess with.  Things can break if we mess
1025                  * with pages in any of the below states.
1026                  */
1027                 if (
1028                     m->hold_count ||
1029                     m->wire_count ||
1030                     (m->flags & PG_UNMANAGED) ||
1031                     m->valid != VM_PAGE_BITS_ALL
1032                 ) {
1033                         crit_exit();
1034                         continue;
1035                 }
1036
1037                 if (vm_page_sleep_busy(m, TRUE, "madvpo")) {
1038                         crit_exit();
1039                         goto relookup;
1040                 }
1041                 crit_exit();
1042
1043                 /*
1044                  * Theoretically once a page is known not to be busy, an
1045                  * interrupt cannot come along and rip it out from under us.
1046                  */
1047
1048                 if (advise == MADV_WILLNEED) {
1049                         vm_page_activate(m);
1050                 } else if (advise == MADV_DONTNEED) {
1051                         vm_page_dontneed(m);
1052                 } else if (advise == MADV_FREE) {
1053                         /*
1054                          * Mark the page clean.  This will allow the page
1055                          * to be freed up by the system.  However, such pages
1056                          * are often reused quickly by malloc()/free()
1057                          * so we do not do anything that would cause
1058                          * a page fault if we can help it.
1059                          *
1060                          * Specifically, we do not try to actually free
1061                          * the page now nor do we try to put it in the
1062                          * cache (which would cause a page fault on reuse).
1063                          *
1064                          * But we do make the page is freeable as we
1065                          * can without actually taking the step of unmapping
1066                          * it.
1067                          */
1068                         pmap_clear_modify(m);
1069                         m->dirty = 0;
1070                         m->act_count = 0;
1071                         vm_page_dontneed(m);
1072                         if (tobject->type == OBJT_SWAP)
1073                                 swap_pager_freespace(tobject, tpindex, 1);
1074                 }
1075         }       
1076         lwkt_reltoken(&vm_token);
1077 }
1078
1079 /*
1080  * Create a new object which is backed by the specified existing object
1081  * range.  The source object reference is deallocated.
1082  *
1083  * The new object and offset into that object are returned in the source
1084  * parameters.
1085  *
1086  * No other requirements.
1087  */
1088 void
1089 vm_object_shadow(vm_object_t *object, vm_ooffset_t *offset, vm_size_t length)
1090 {
1091         vm_object_t source;
1092         vm_object_t result;
1093
1094         source = *object;
1095
1096         /*
1097          * Don't create the new object if the old object isn't shared.
1098          */
1099         lwkt_gettoken(&vm_token);
1100
1101         if (source != NULL &&
1102             source->ref_count == 1 &&
1103             source->handle == NULL &&
1104             (source->type == OBJT_DEFAULT ||
1105              source->type == OBJT_SWAP)) {
1106                 lwkt_reltoken(&vm_token);
1107                 return;
1108         }
1109
1110         /*
1111          * Allocate a new object with the given length
1112          */
1113
1114         if ((result = vm_object_allocate(OBJT_DEFAULT, length)) == NULL)
1115                 panic("vm_object_shadow: no object for shadowing");
1116
1117         /*
1118          * The new object shadows the source object, adding a reference to it.
1119          * Our caller changes his reference to point to the new object,
1120          * removing a reference to the source object.  Net result: no change
1121          * of reference count.
1122          *
1123          * Try to optimize the result object's page color when shadowing
1124          * in order to maintain page coloring consistency in the combined 
1125          * shadowed object.
1126          */
1127         result->backing_object = source;
1128         if (source) {
1129                 LIST_INSERT_HEAD(&source->shadow_head, result, shadow_list);
1130                 source->shadow_count++;
1131                 source->generation++;
1132                 result->pg_color = (source->pg_color + OFF_TO_IDX(*offset)) & PQ_L2_MASK;
1133         }
1134
1135         /*
1136          * Store the offset into the source object, and fix up the offset into
1137          * the new object.
1138          */
1139         result->backing_object_offset = *offset;
1140         lwkt_reltoken(&vm_token);
1141
1142         /*
1143          * Return the new things
1144          */
1145         *offset = 0;
1146         *object = result;
1147 }
1148
1149 #define OBSC_TEST_ALL_SHADOWED  0x0001
1150 #define OBSC_COLLAPSE_NOWAIT    0x0002
1151 #define OBSC_COLLAPSE_WAIT      0x0004
1152
1153 static int vm_object_backing_scan_callback(vm_page_t p, void *data);
1154
1155 /*
1156  * The caller must hold vm_token.
1157  */
1158 static __inline int
1159 vm_object_backing_scan(vm_object_t object, int op)
1160 {
1161         struct rb_vm_page_scan_info info;
1162         vm_object_t backing_object;
1163
1164         crit_enter();
1165
1166         backing_object = object->backing_object;
1167         info.backing_offset_index = OFF_TO_IDX(object->backing_object_offset);
1168
1169         /*
1170          * Initial conditions
1171          */
1172
1173         if (op & OBSC_TEST_ALL_SHADOWED) {
1174                 /*
1175                  * We do not want to have to test for the existence of
1176                  * swap pages in the backing object.  XXX but with the
1177                  * new swapper this would be pretty easy to do.
1178                  *
1179                  * XXX what about anonymous MAP_SHARED memory that hasn't
1180                  * been ZFOD faulted yet?  If we do not test for this, the
1181                  * shadow test may succeed! XXX
1182                  */
1183                 if (backing_object->type != OBJT_DEFAULT) {
1184                         crit_exit();
1185                         return(0);
1186                 }
1187         }
1188         if (op & OBSC_COLLAPSE_WAIT) {
1189                 KKASSERT((backing_object->flags & OBJ_DEAD) == 0);
1190                 vm_object_set_flag(backing_object, OBJ_DEAD);
1191         }
1192
1193         /*
1194          * Our scan.   We have to retry if a negative error code is returned,
1195          * otherwise 0 or 1 will be returned in info.error.  0 Indicates that
1196          * the scan had to be stopped because the parent does not completely
1197          * shadow the child.
1198          */
1199         info.object = object;
1200         info.backing_object = backing_object;
1201         info.limit = op;
1202         do {
1203                 info.error = 1;
1204                 vm_page_rb_tree_RB_SCAN(&backing_object->rb_memq, NULL,
1205                                         vm_object_backing_scan_callback,
1206                                         &info);
1207         } while (info.error < 0);
1208         crit_exit();
1209         return(info.error);
1210 }
1211
1212 /*
1213  * The caller must hold vm_token.
1214  */
1215 static int
1216 vm_object_backing_scan_callback(vm_page_t p, void *data)
1217 {
1218         struct rb_vm_page_scan_info *info = data;
1219         vm_object_t backing_object;
1220         vm_object_t object;
1221         vm_pindex_t new_pindex;
1222         vm_pindex_t backing_offset_index;
1223         int op;
1224
1225         new_pindex = p->pindex - info->backing_offset_index;
1226         op = info->limit;
1227         object = info->object;
1228         backing_object = info->backing_object;
1229         backing_offset_index = info->backing_offset_index;
1230
1231         if (op & OBSC_TEST_ALL_SHADOWED) {
1232                 vm_page_t pp;
1233
1234                 /*
1235                  * Ignore pages outside the parent object's range
1236                  * and outside the parent object's mapping of the 
1237                  * backing object.
1238                  *
1239                  * note that we do not busy the backing object's
1240                  * page.
1241                  */
1242                 if (
1243                     p->pindex < backing_offset_index ||
1244                     new_pindex >= object->size
1245                 ) {
1246                         return(0);
1247                 }
1248
1249                 /*
1250                  * See if the parent has the page or if the parent's
1251                  * object pager has the page.  If the parent has the
1252                  * page but the page is not valid, the parent's
1253                  * object pager must have the page.
1254                  *
1255                  * If this fails, the parent does not completely shadow
1256                  * the object and we might as well give up now.
1257                  */
1258
1259                 pp = vm_page_lookup(object, new_pindex);
1260                 if ((pp == NULL || pp->valid == 0) &&
1261                     !vm_pager_has_page(object, new_pindex)
1262                 ) {
1263                         info->error = 0;        /* problemo */
1264                         return(-1);             /* stop the scan */
1265                 }
1266         }
1267
1268         /*
1269          * Check for busy page
1270          */
1271
1272         if (op & (OBSC_COLLAPSE_WAIT | OBSC_COLLAPSE_NOWAIT)) {
1273                 vm_page_t pp;
1274
1275                 if (op & OBSC_COLLAPSE_NOWAIT) {
1276                         if (
1277                             (p->flags & PG_BUSY) ||
1278                             !p->valid || 
1279                             p->hold_count || 
1280                             p->wire_count ||
1281                             p->busy
1282                         ) {
1283                                 return(0);
1284                         }
1285                 } else if (op & OBSC_COLLAPSE_WAIT) {
1286                         if (vm_page_sleep_busy(p, TRUE, "vmocol")) {
1287                                 /*
1288                                  * If we slept, anything could have
1289                                  * happened.   Ask that the scan be restarted.
1290                                  *
1291                                  * Since the object is marked dead, the
1292                                  * backing offset should not have changed.  
1293                                  */
1294                                 info->error = -1;
1295                                 return(-1);
1296                         }
1297                 }
1298
1299                 /* 
1300                  * Busy the page
1301                  */
1302                 vm_page_busy(p);
1303
1304                 KASSERT(
1305                     p->object == backing_object,
1306                     ("vm_object_qcollapse(): object mismatch")
1307                 );
1308
1309                 /*
1310                  * Destroy any associated swap
1311                  */
1312                 if (backing_object->type == OBJT_SWAP)
1313                         swap_pager_freespace(backing_object, p->pindex, 1);
1314
1315                 if (
1316                     p->pindex < backing_offset_index ||
1317                     new_pindex >= object->size
1318                 ) {
1319                         /*
1320                          * Page is out of the parent object's range, we 
1321                          * can simply destroy it. 
1322                          */
1323                         vm_page_protect(p, VM_PROT_NONE);
1324                         vm_page_free(p);
1325                         return(0);
1326                 }
1327
1328                 pp = vm_page_lookup(object, new_pindex);
1329                 if (pp != NULL || vm_pager_has_page(object, new_pindex)) {
1330                         /*
1331                          * page already exists in parent OR swap exists
1332                          * for this location in the parent.  Destroy 
1333                          * the original page from the backing object.
1334                          *
1335                          * Leave the parent's page alone
1336                          */
1337                         vm_page_protect(p, VM_PROT_NONE);
1338                         vm_page_free(p);
1339                         return(0);
1340                 }
1341
1342                 /*
1343                  * Page does not exist in parent, rename the
1344                  * page from the backing object to the main object. 
1345                  *
1346                  * If the page was mapped to a process, it can remain 
1347                  * mapped through the rename.
1348                  */
1349                 if ((p->queue - p->pc) == PQ_CACHE)
1350                         vm_page_deactivate(p);
1351
1352                 vm_page_rename(p, object, new_pindex);
1353                 /* page automatically made dirty by rename */
1354         }
1355         return(0);
1356 }
1357
1358 /*
1359  * This version of collapse allows the operation to occur earlier and
1360  * when paging_in_progress is true for an object...  This is not a complete
1361  * operation, but should plug 99.9% of the rest of the leaks.
1362  *
1363  * The caller must hold vm_token and vmobj_token.
1364  * (only called from vm_object_collapse)
1365  */
1366 static void
1367 vm_object_qcollapse(vm_object_t object)
1368 {
1369         vm_object_t backing_object = object->backing_object;
1370
1371         if (backing_object->ref_count != 1)
1372                 return;
1373
1374         backing_object->ref_count += 2;
1375
1376         vm_object_backing_scan(object, OBSC_COLLAPSE_NOWAIT);
1377
1378         backing_object->ref_count -= 2;
1379 }
1380
1381 /*
1382  * Collapse an object with the object backing it.  Pages in the backing
1383  * object are moved into the parent, and the backing object is deallocated.
1384  */
1385 void
1386 vm_object_collapse(vm_object_t object)
1387 {
1388         ASSERT_LWKT_TOKEN_HELD(&vm_token);
1389         ASSERT_LWKT_TOKEN_HELD(&vmobj_token);
1390
1391         while (TRUE) {
1392                 vm_object_t backing_object;
1393
1394                 /*
1395                  * Verify that the conditions are right for collapse:
1396                  *
1397                  * The object exists and the backing object exists.
1398                  */
1399                 if (object == NULL)
1400                         break;
1401
1402                 if ((backing_object = object->backing_object) == NULL)
1403                         break;
1404
1405                 /*
1406                  * we check the backing object first, because it is most likely
1407                  * not collapsable.
1408                  */
1409                 if (backing_object->handle != NULL ||
1410                     (backing_object->type != OBJT_DEFAULT &&
1411                      backing_object->type != OBJT_SWAP) ||
1412                     (backing_object->flags & OBJ_DEAD) ||
1413                     object->handle != NULL ||
1414                     (object->type != OBJT_DEFAULT &&
1415                      object->type != OBJT_SWAP) ||
1416                     (object->flags & OBJ_DEAD)) {
1417                         break;
1418                 }
1419
1420                 if (
1421                     object->paging_in_progress != 0 ||
1422                     backing_object->paging_in_progress != 0
1423                 ) {
1424                         vm_object_qcollapse(object);
1425                         break;
1426                 }
1427
1428                 /*
1429                  * We know that we can either collapse the backing object (if
1430                  * the parent is the only reference to it) or (perhaps) have
1431                  * the parent bypass the object if the parent happens to shadow
1432                  * all the resident pages in the entire backing object.
1433                  *
1434                  * This is ignoring pager-backed pages such as swap pages.
1435                  * vm_object_backing_scan fails the shadowing test in this
1436                  * case.
1437                  */
1438
1439                 if (backing_object->ref_count == 1) {
1440                         /*
1441                          * If there is exactly one reference to the backing
1442                          * object, we can collapse it into the parent.  
1443                          */
1444                         vm_object_backing_scan(object, OBSC_COLLAPSE_WAIT);
1445
1446                         /*
1447                          * Move the pager from backing_object to object.
1448                          */
1449
1450                         if (backing_object->type == OBJT_SWAP) {
1451                                 vm_object_pip_add(backing_object, 1);
1452
1453                                 /*
1454                                  * scrap the paging_offset junk and do a 
1455                                  * discrete copy.  This also removes major 
1456                                  * assumptions about how the swap-pager 
1457                                  * works from where it doesn't belong.  The
1458                                  * new swapper is able to optimize the
1459                                  * destroy-source case.
1460                                  */
1461
1462                                 vm_object_pip_add(object, 1);
1463                                 swap_pager_copy(
1464                                     backing_object,
1465                                     object,
1466                                     OFF_TO_IDX(object->backing_object_offset), TRUE);
1467                                 vm_object_pip_wakeup(object);
1468
1469                                 vm_object_pip_wakeup(backing_object);
1470                         }
1471                         /*
1472                          * Object now shadows whatever backing_object did.
1473                          * Note that the reference to 
1474                          * backing_object->backing_object moves from within 
1475                          * backing_object to within object.
1476                          */
1477
1478                         LIST_REMOVE(object, shadow_list);
1479                         object->backing_object->shadow_count--;
1480                         object->backing_object->generation++;
1481                         if (backing_object->backing_object) {
1482                                 LIST_REMOVE(backing_object, shadow_list);
1483                                 backing_object->backing_object->shadow_count--;
1484                                 backing_object->backing_object->generation++;
1485                         }
1486                         object->backing_object = backing_object->backing_object;
1487                         if (object->backing_object) {
1488                                 LIST_INSERT_HEAD(
1489                                     &object->backing_object->shadow_head,
1490                                     object, 
1491                                     shadow_list
1492                                 );
1493                                 object->backing_object->shadow_count++;
1494                                 object->backing_object->generation++;
1495                         }
1496
1497                         object->backing_object_offset +=
1498                             backing_object->backing_object_offset;
1499
1500                         /*
1501                          * Discard backing_object.
1502                          *
1503                          * Since the backing object has no pages, no pager left,
1504                          * and no object references within it, all that is
1505                          * necessary is to dispose of it.
1506                          */
1507
1508                         KASSERT(backing_object->ref_count == 1,
1509                                 ("backing_object %p was somehow "
1510                                  "re-referenced during collapse!",
1511                                  backing_object));
1512                         KASSERT(RB_EMPTY(&backing_object->rb_memq),
1513                                 ("backing_object %p somehow has left "
1514                                  "over pages during collapse!",
1515                                  backing_object));
1516
1517                         /* (we are holding vmobj_token) */
1518                         TAILQ_REMOVE(&vm_object_list, backing_object,
1519                                      object_list);
1520                         vm_object_count--;
1521
1522                         zfree(obj_zone, backing_object);
1523
1524                         object_collapses++;
1525                 } else {
1526                         vm_object_t new_backing_object;
1527
1528                         /*
1529                          * If we do not entirely shadow the backing object,
1530                          * there is nothing we can do so we give up.
1531                          */
1532
1533                         if (vm_object_backing_scan(object, OBSC_TEST_ALL_SHADOWED) == 0) {
1534                                 break;
1535                         }
1536
1537                         /*
1538                          * Make the parent shadow the next object in the
1539                          * chain.  Deallocating backing_object will not remove
1540                          * it, since its reference count is at least 2.
1541                          */
1542
1543                         LIST_REMOVE(object, shadow_list);
1544                         backing_object->shadow_count--;
1545                         backing_object->generation++;
1546
1547                         new_backing_object = backing_object->backing_object;
1548                         if ((object->backing_object = new_backing_object) != NULL) {
1549                                 vm_object_reference(new_backing_object);
1550                                 LIST_INSERT_HEAD(
1551                                     &new_backing_object->shadow_head,
1552                                     object,
1553                                     shadow_list
1554                                 );
1555                                 new_backing_object->shadow_count++;
1556                                 new_backing_object->generation++;
1557                                 object->backing_object_offset +=
1558                                         backing_object->backing_object_offset;
1559                         }
1560
1561                         /*
1562                          * Drop the reference count on backing_object. Since
1563                          * its ref_count was at least 2, it will not vanish;
1564                          * so we don't need to call vm_object_deallocate, but
1565                          * we do anyway.
1566                          */
1567                         vm_object_deallocate_locked(backing_object);
1568                         object_bypasses++;
1569                 }
1570
1571                 /*
1572                  * Try again with this object's new backing object.
1573                  */
1574         }
1575 }
1576
1577 /*
1578  * Removes all physical pages in the specified object range from the
1579  * object's list of pages.
1580  *
1581  * No requirements.
1582  */
1583 static int vm_object_page_remove_callback(vm_page_t p, void *data);
1584
1585 void
1586 vm_object_page_remove(vm_object_t object, vm_pindex_t start, vm_pindex_t end,
1587                       boolean_t clean_only)
1588 {
1589         struct rb_vm_page_scan_info info;
1590         int all;
1591
1592         /*
1593          * Degenerate cases and assertions
1594          */
1595         lwkt_gettoken(&vm_token);
1596         if (object == NULL ||
1597             (object->resident_page_count == 0 && object->swblock_count == 0)) {
1598                 lwkt_reltoken(&vm_token);
1599                 return;
1600         }
1601         KASSERT(object->type != OBJT_PHYS, 
1602                 ("attempt to remove pages from a physical object"));
1603
1604         /*
1605          * Indicate that paging is occuring on the object
1606          */
1607         crit_enter();
1608         vm_object_pip_add(object, 1);
1609
1610         /*
1611          * Figure out the actual removal range and whether we are removing
1612          * the entire contents of the object or not.  If removing the entire
1613          * contents, be sure to get all pages, even those that might be 
1614          * beyond the end of the object.
1615          */
1616         info.start_pindex = start;
1617         if (end == 0)
1618                 info.end_pindex = (vm_pindex_t)-1;
1619         else
1620                 info.end_pindex = end - 1;
1621         info.limit = clean_only;
1622         all = (start == 0 && info.end_pindex >= object->size - 1);
1623
1624         /*
1625          * Loop until we are sure we have gotten them all.
1626          */
1627         do {
1628                 info.error = 0;
1629                 vm_page_rb_tree_RB_SCAN(&object->rb_memq, rb_vm_page_scancmp,
1630                                         vm_object_page_remove_callback, &info);
1631         } while (info.error);
1632
1633         /*
1634          * Remove any related swap if throwing away pages, or for
1635          * non-swap objects (the swap is a clean copy in that case).
1636          */
1637         if (object->type != OBJT_SWAP || clean_only == FALSE) {
1638                 if (all)
1639                         swap_pager_freespace_all(object);
1640                 else
1641                         swap_pager_freespace(object, info.start_pindex,
1642                              info.end_pindex - info.start_pindex + 1);
1643         }
1644
1645         /*
1646          * Cleanup
1647          */
1648         vm_object_pip_wakeup(object);
1649         crit_exit();
1650         lwkt_reltoken(&vm_token);
1651 }
1652
1653 /*
1654  * The caller must hold vm_token.
1655  */
1656 static int
1657 vm_object_page_remove_callback(vm_page_t p, void *data)
1658 {
1659         struct rb_vm_page_scan_info *info = data;
1660
1661         /*
1662          * Wired pages cannot be destroyed, but they can be invalidated
1663          * and we do so if clean_only (limit) is not set.
1664          *
1665          * WARNING!  The page may be wired due to being part of a buffer
1666          *           cache buffer, and the buffer might be marked B_CACHE.
1667          *           This is fine as part of a truncation but VFSs must be
1668          *           sure to fix the buffer up when re-extending the file.
1669          */
1670         if (p->wire_count != 0) {
1671                 vm_page_protect(p, VM_PROT_NONE);
1672                 if (info->limit == 0)
1673                         p->valid = 0;
1674                 return(0);
1675         }
1676
1677         /*
1678          * The busy flags are only cleared at
1679          * interrupt -- minimize the spl transitions
1680          */
1681
1682         if (vm_page_sleep_busy(p, TRUE, "vmopar")) {
1683                 info->error = 1;
1684                 return(0);
1685         }
1686
1687         /*
1688          * limit is our clean_only flag.  If set and the page is dirty, do
1689          * not free it.  If set and the page is being held by someone, do
1690          * not free it.
1691          */
1692         if (info->limit && p->valid) {
1693                 vm_page_test_dirty(p);
1694                 if (p->valid & p->dirty)
1695                         return(0);
1696                 if (p->hold_count)
1697                         return(0);
1698         }
1699
1700         /*
1701          * Destroy the page
1702          */
1703         vm_page_busy(p);
1704         vm_page_protect(p, VM_PROT_NONE);
1705         vm_page_free(p);
1706         return(0);
1707 }
1708
1709 /*
1710  * Coalesces two objects backing up adjoining regions of memory into a
1711  * single object.
1712  *
1713  * returns TRUE if objects were combined.
1714  *
1715  * NOTE: Only works at the moment if the second object is NULL -
1716  *       if it's not, which object do we lock first?
1717  *
1718  * Parameters:
1719  *      prev_object     First object to coalesce
1720  *      prev_offset     Offset into prev_object
1721  *      next_object     Second object into coalesce
1722  *      next_offset     Offset into next_object
1723  *
1724  *      prev_size       Size of reference to prev_object
1725  *      next_size       Size of reference to next_object
1726  *
1727  * The object must not be locked.
1728  * The caller must hold vm_token and vmobj_token.
1729  */
1730 boolean_t
1731 vm_object_coalesce(vm_object_t prev_object, vm_pindex_t prev_pindex,
1732                    vm_size_t prev_size, vm_size_t next_size)
1733 {
1734         vm_pindex_t next_pindex;
1735
1736         ASSERT_LWKT_TOKEN_HELD(&vm_token);
1737         ASSERT_LWKT_TOKEN_HELD(&vmobj_token);
1738
1739         if (prev_object == NULL) {
1740                 return (TRUE);
1741         }
1742
1743         if (prev_object->type != OBJT_DEFAULT &&
1744             prev_object->type != OBJT_SWAP) {
1745                 return (FALSE);
1746         }
1747
1748         /*
1749          * Try to collapse the object first
1750          */
1751         vm_object_collapse(prev_object);
1752
1753         /*
1754          * Can't coalesce if: . more than one reference . paged out . shadows
1755          * another object . has a copy elsewhere (any of which mean that the
1756          * pages not mapped to prev_entry may be in use anyway)
1757          */
1758
1759         if (prev_object->backing_object != NULL)
1760                 return (FALSE);
1761
1762         prev_size >>= PAGE_SHIFT;
1763         next_size >>= PAGE_SHIFT;
1764         next_pindex = prev_pindex + prev_size;
1765
1766         if ((prev_object->ref_count > 1) &&
1767             (prev_object->size != next_pindex)) {
1768                 return (FALSE);
1769         }
1770
1771         /*
1772          * Remove any pages that may still be in the object from a previous
1773          * deallocation.
1774          */
1775         if (next_pindex < prev_object->size) {
1776                 vm_object_page_remove(prev_object,
1777                                       next_pindex,
1778                                       next_pindex + next_size, FALSE);
1779                 if (prev_object->type == OBJT_SWAP)
1780                         swap_pager_freespace(prev_object,
1781                                              next_pindex, next_size);
1782         }
1783
1784         /*
1785          * Extend the object if necessary.
1786          */
1787         if (next_pindex + next_size > prev_object->size)
1788                 prev_object->size = next_pindex + next_size;
1789         return (TRUE);
1790 }
1791
1792 /*
1793  * Make the object writable and flag is being possibly dirty.
1794  *
1795  * No requirements.
1796  */
1797 void
1798 vm_object_set_writeable_dirty(vm_object_t object)
1799 {
1800         struct vnode *vp;
1801
1802         lwkt_gettoken(&vm_token);
1803         vm_object_set_flag(object, OBJ_WRITEABLE|OBJ_MIGHTBEDIRTY);
1804         if (object->type == OBJT_VNODE &&
1805             (vp = (struct vnode *)object->handle) != NULL) {
1806                 if ((vp->v_flag & VOBJDIRTY) == 0) {
1807                         vsetflags(vp, VOBJDIRTY);
1808                 }
1809         }
1810         lwkt_reltoken(&vm_token);
1811 }
1812
1813 #include "opt_ddb.h"
1814 #ifdef DDB
1815 #include <sys/kernel.h>
1816
1817 #include <sys/cons.h>
1818
1819 #include <ddb/ddb.h>
1820
1821 static int      _vm_object_in_map (vm_map_t map, vm_object_t object,
1822                                        vm_map_entry_t entry);
1823 static int      vm_object_in_map (vm_object_t object);
1824
1825 /*
1826  * The caller must hold vm_token.
1827  */
1828 static int
1829 _vm_object_in_map(vm_map_t map, vm_object_t object, vm_map_entry_t entry)
1830 {
1831         vm_map_t tmpm;
1832         vm_map_entry_t tmpe;
1833         vm_object_t obj;
1834         int entcount;
1835
1836         if (map == 0)
1837                 return 0;
1838         if (entry == 0) {
1839                 tmpe = map->header.next;
1840                 entcount = map->nentries;
1841                 while (entcount-- && (tmpe != &map->header)) {
1842                         if( _vm_object_in_map(map, object, tmpe)) {
1843                                 return 1;
1844                         }
1845                         tmpe = tmpe->next;
1846                 }
1847                 return (0);
1848         }
1849         switch(entry->maptype) {
1850         case VM_MAPTYPE_SUBMAP:
1851                 tmpm = entry->object.sub_map;
1852                 tmpe = tmpm->header.next;
1853                 entcount = tmpm->nentries;
1854                 while (entcount-- && tmpe != &tmpm->header) {
1855                         if( _vm_object_in_map(tmpm, object, tmpe)) {
1856                                 return 1;
1857                         }
1858                         tmpe = tmpe->next;
1859                 }
1860                 break;
1861         case VM_MAPTYPE_NORMAL:
1862         case VM_MAPTYPE_VPAGETABLE:
1863                 obj = entry->object.vm_object;
1864                 while (obj) {
1865                         if (obj == object)
1866                                 return 1;
1867                         obj = obj->backing_object;
1868                 }
1869                 break;
1870         default:
1871                 break;
1872         }
1873         return 0;
1874 }
1875
1876 static int vm_object_in_map_callback(struct proc *p, void *data);
1877
1878 struct vm_object_in_map_info {
1879         vm_object_t object;
1880         int rv;
1881 };
1882
1883 /*
1884  * Debugging only
1885  */
1886 static int
1887 vm_object_in_map(vm_object_t object)
1888 {
1889         struct vm_object_in_map_info info;
1890
1891         info.rv = 0;
1892         info.object = object;
1893
1894         allproc_scan(vm_object_in_map_callback, &info);
1895         if (info.rv)
1896                 return 1;
1897         if( _vm_object_in_map(&kernel_map, object, 0))
1898                 return 1;
1899         if( _vm_object_in_map(&pager_map, object, 0))
1900                 return 1;
1901         if( _vm_object_in_map(&buffer_map, object, 0))
1902                 return 1;
1903         return 0;
1904 }
1905
1906 /*
1907  * Debugging only
1908  */
1909 static int
1910 vm_object_in_map_callback(struct proc *p, void *data)
1911 {
1912         struct vm_object_in_map_info *info = data;
1913
1914         if (p->p_vmspace) {
1915                 if (_vm_object_in_map(&p->p_vmspace->vm_map, info->object, 0)) {
1916                         info->rv = 1;
1917                         return -1;
1918                 }
1919         }
1920         return (0);
1921 }
1922
1923 DB_SHOW_COMMAND(vmochk, vm_object_check)
1924 {
1925         vm_object_t object;
1926
1927         /*
1928          * make sure that internal objs are in a map somewhere
1929          * and none have zero ref counts.
1930          */
1931         for (object = TAILQ_FIRST(&vm_object_list);
1932                         object != NULL;
1933                         object = TAILQ_NEXT(object, object_list)) {
1934                 if (object->type == OBJT_MARKER)
1935                         continue;
1936                 if (object->handle == NULL &&
1937                     (object->type == OBJT_DEFAULT || object->type == OBJT_SWAP)) {
1938                         if (object->ref_count == 0) {
1939                                 db_printf("vmochk: internal obj has zero ref count: %ld\n",
1940                                         (long)object->size);
1941                         }
1942                         if (!vm_object_in_map(object)) {
1943                                 db_printf(
1944                         "vmochk: internal obj is not in a map: "
1945                         "ref: %d, size: %lu: 0x%lx, backing_object: %p\n",
1946                                     object->ref_count, (u_long)object->size, 
1947                                     (u_long)object->size,
1948                                     (void *)object->backing_object);
1949                         }
1950                 }
1951         }
1952 }
1953
1954 /*
1955  * Debugging only
1956  */
1957 DB_SHOW_COMMAND(object, vm_object_print_static)
1958 {
1959         /* XXX convert args. */
1960         vm_object_t object = (vm_object_t)addr;
1961         boolean_t full = have_addr;
1962
1963         vm_page_t p;
1964
1965         /* XXX count is an (unused) arg.  Avoid shadowing it. */
1966 #define count   was_count
1967
1968         int count;
1969
1970         if (object == NULL)
1971                 return;
1972
1973         db_iprintf(
1974             "Object %p: type=%d, size=0x%lx, res=%d, ref=%d, flags=0x%x\n",
1975             object, (int)object->type, (u_long)object->size,
1976             object->resident_page_count, object->ref_count, object->flags);
1977         /*
1978          * XXX no %qd in kernel.  Truncate object->backing_object_offset.
1979          */
1980         db_iprintf(" sref=%d, backing_object(%d)=(%p)+0x%lx\n",
1981             object->shadow_count, 
1982             object->backing_object ? object->backing_object->ref_count : 0,
1983             object->backing_object, (long)object->backing_object_offset);
1984
1985         if (!full)
1986                 return;
1987
1988         db_indent += 2;
1989         count = 0;
1990         RB_FOREACH(p, vm_page_rb_tree, &object->rb_memq) {
1991                 if (count == 0)
1992                         db_iprintf("memory:=");
1993                 else if (count == 6) {
1994                         db_printf("\n");
1995                         db_iprintf(" ...");
1996                         count = 0;
1997                 } else
1998                         db_printf(",");
1999                 count++;
2000
2001                 db_printf("(off=0x%lx,page=0x%lx)",
2002                     (u_long) p->pindex, (u_long) VM_PAGE_TO_PHYS(p));
2003         }
2004         if (count != 0)
2005                 db_printf("\n");
2006         db_indent -= 2;
2007 }
2008
2009 /* XXX. */
2010 #undef count
2011
2012 /*
2013  * XXX need this non-static entry for calling from vm_map_print.
2014  *
2015  * Debugging only
2016  */
2017 void
2018 vm_object_print(/* db_expr_t */ long addr,
2019                 boolean_t have_addr,
2020                 /* db_expr_t */ long count,
2021                 char *modif)
2022 {
2023         vm_object_print_static(addr, have_addr, count, modif);
2024 }
2025
2026 /*
2027  * Debugging only
2028  */
2029 DB_SHOW_COMMAND(vmopag, vm_object_print_pages)
2030 {
2031         vm_object_t object;
2032         int nl = 0;
2033         int c;
2034         for (object = TAILQ_FIRST(&vm_object_list);
2035                         object != NULL;
2036                         object = TAILQ_NEXT(object, object_list)) {
2037                 vm_pindex_t idx, fidx;
2038                 vm_pindex_t osize;
2039                 vm_paddr_t pa = -1, padiff;
2040                 int rcount;
2041                 vm_page_t m;
2042
2043                 if (object->type == OBJT_MARKER)
2044                         continue;
2045                 db_printf("new object: %p\n", (void *)object);
2046                 if ( nl > 18) {
2047                         c = cngetc();
2048                         if (c != ' ')
2049                                 return;
2050                         nl = 0;
2051                 }
2052                 nl++;
2053                 rcount = 0;
2054                 fidx = 0;
2055                 osize = object->size;
2056                 if (osize > 128)
2057                         osize = 128;
2058                 for (idx = 0; idx < osize; idx++) {
2059                         m = vm_page_lookup(object, idx);
2060                         if (m == NULL) {
2061                                 if (rcount) {
2062                                         db_printf(" index(%ld)run(%d)pa(0x%lx)\n",
2063                                                 (long)fidx, rcount, (long)pa);
2064                                         if ( nl > 18) {
2065                                                 c = cngetc();
2066                                                 if (c != ' ')
2067                                                         return;
2068                                                 nl = 0;
2069                                         }
2070                                         nl++;
2071                                         rcount = 0;
2072                                 }
2073                                 continue;
2074                         }
2075
2076                                 
2077                         if (rcount &&
2078                                 (VM_PAGE_TO_PHYS(m) == pa + rcount * PAGE_SIZE)) {
2079                                 ++rcount;
2080                                 continue;
2081                         }
2082                         if (rcount) {
2083                                 padiff = pa + rcount * PAGE_SIZE - VM_PAGE_TO_PHYS(m);
2084                                 padiff >>= PAGE_SHIFT;
2085                                 padiff &= PQ_L2_MASK;
2086                                 if (padiff == 0) {
2087                                         pa = VM_PAGE_TO_PHYS(m) - rcount * PAGE_SIZE;
2088                                         ++rcount;
2089                                         continue;
2090                                 }
2091                                 db_printf(" index(%ld)run(%d)pa(0x%lx)",
2092                                         (long)fidx, rcount, (long)pa);
2093                                 db_printf("pd(%ld)\n", (long)padiff);
2094                                 if ( nl > 18) {
2095                                         c = cngetc();
2096                                         if (c != ' ')
2097                                                 return;
2098                                         nl = 0;
2099                                 }
2100                                 nl++;
2101                         }
2102                         fidx = idx;
2103                         pa = VM_PAGE_TO_PHYS(m);
2104                         rcount = 1;
2105                 }
2106                 if (rcount) {
2107                         db_printf(" index(%ld)run(%d)pa(0x%lx)\n",
2108                                 (long)fidx, rcount, (long)pa);
2109                         if ( nl > 18) {
2110                                 c = cngetc();
2111                                 if (c != ' ')
2112                                         return;
2113                                 nl = 0;
2114                         }
2115                         nl++;
2116                 }
2117         }
2118 }
2119 #endif /* DDB */