Merge from vendor branch LESS:
[dragonfly.git] / sys / vm / vm_object.c
1 /*
2  * Copyright (c) 1991, 1993
3  *      The Regents of the University of California.  All rights reserved.
4  *
5  * This code is derived from software contributed to Berkeley by
6  * The Mach Operating System project at Carnegie-Mellon University.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. All advertising materials mentioning features or use of this software
17  *    must display the following acknowledgement:
18  *      This product includes software developed by the University of
19  *      California, Berkeley and its contributors.
20  * 4. Neither the name of the University nor the names of its contributors
21  *    may be used to endorse or promote products derived from this software
22  *    without specific prior written permission.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34  * SUCH DAMAGE.
35  *
36  *      from: @(#)vm_object.c   8.5 (Berkeley) 3/22/94
37  *
38  *
39  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
40  * All rights reserved.
41  *
42  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
43  *
44  * Permission to use, copy, modify and distribute this software and
45  * its documentation is hereby granted, provided that both the copyright
46  * notice and this permission notice appear in all copies of the
47  * software, derivative works or modified versions, and any portions
48  * thereof, and that both notices appear in supporting documentation.
49  *
50  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
51  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
52  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
53  *
54  * Carnegie Mellon requests users of this software to return to
55  *
56  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
57  *  School of Computer Science
58  *  Carnegie Mellon University
59  *  Pittsburgh PA 15213-3890
60  *
61  * any improvements or extensions that they make and grant Carnegie the
62  * rights to redistribute these changes.
63  *
64  * $FreeBSD: src/sys/vm/vm_object.c,v 1.171.2.8 2003/05/26 19:17:56 alc Exp $
65  * $DragonFly: src/sys/vm/vm_object.c,v 1.30 2007/03/20 00:55:10 dillon Exp $
66  */
67
68 /*
69  *      Virtual memory object module.
70  */
71
72 #include <sys/param.h>
73 #include <sys/systm.h>
74 #include <sys/proc.h>           /* for curproc, pageproc */
75 #include <sys/vnode.h>
76 #include <sys/vmmeter.h>
77 #include <sys/mman.h>
78 #include <sys/mount.h>
79 #include <sys/kernel.h>
80 #include <sys/sysctl.h>
81
82 #include <vm/vm.h>
83 #include <vm/vm_param.h>
84 #include <vm/pmap.h>
85 #include <vm/vm_map.h>
86 #include <vm/vm_object.h>
87 #include <vm/vm_page.h>
88 #include <vm/vm_pageout.h>
89 #include <vm/vm_pager.h>
90 #include <vm/swap_pager.h>
91 #include <vm/vm_kern.h>
92 #include <vm/vm_extern.h>
93 #include <vm/vm_zone.h>
94
95 #define EASY_SCAN_FACTOR        8
96
97 static void     vm_object_qcollapse(vm_object_t object);
98 static int      vm_object_page_collect_flush(vm_object_t object, vm_page_t p,
99                                              int pagerflags);
100
101 /*
102  *      Virtual memory objects maintain the actual data
103  *      associated with allocated virtual memory.  A given
104  *      page of memory exists within exactly one object.
105  *
106  *      An object is only deallocated when all "references"
107  *      are given up.  Only one "reference" to a given
108  *      region of an object should be writeable.
109  *
110  *      Associated with each object is a list of all resident
111  *      memory pages belonging to that object; this list is
112  *      maintained by the "vm_page" module, and locked by the object's
113  *      lock.
114  *
115  *      Each object also records a "pager" routine which is
116  *      used to retrieve (and store) pages to the proper backing
117  *      storage.  In addition, objects may be backed by other
118  *      objects from which they were virtual-copied.
119  *
120  *      The only items within the object structure which are
121  *      modified after time of creation are:
122  *              reference count         locked by object's lock
123  *              pager routine           locked by object's lock
124  *
125  */
126
127 struct object_q vm_object_list;
128 struct vm_object kernel_object;
129
130 static long vm_object_count;            /* count of all objects */
131 extern int vm_pageout_page_count;
132
133 static long object_collapses;
134 static long object_bypasses;
135 static int next_index;
136 static vm_zone_t obj_zone;
137 static struct vm_zone obj_zone_store;
138 static int object_hash_rand;
139 #define VM_OBJECTS_INIT 256
140 static struct vm_object vm_objects_init[VM_OBJECTS_INIT];
141
142 void
143 _vm_object_allocate(objtype_t type, vm_size_t size, vm_object_t object)
144 {
145         int incr;
146         RB_INIT(&object->rb_memq);
147         LIST_INIT(&object->shadow_head);
148
149         object->type = type;
150         object->size = size;
151         object->ref_count = 1;
152         object->flags = 0;
153         if ((object->type == OBJT_DEFAULT) || (object->type == OBJT_SWAP))
154                 vm_object_set_flag(object, OBJ_ONEMAPPING);
155         object->paging_in_progress = 0;
156         object->resident_page_count = 0;
157         object->shadow_count = 0;
158         object->pg_color = next_index;
159         if ( size > (PQ_L2_SIZE / 3 + PQ_PRIME1))
160                 incr = PQ_L2_SIZE / 3 + PQ_PRIME1;
161         else
162                 incr = size;
163         next_index = (next_index + incr) & PQ_L2_MASK;
164         object->handle = NULL;
165         object->backing_object = NULL;
166         object->backing_object_offset = (vm_ooffset_t) 0;
167         /*
168          * Try to generate a number that will spread objects out in the
169          * hash table.  We 'wipe' new objects across the hash in 128 page
170          * increments plus 1 more to offset it a little more by the time
171          * it wraps around.
172          */
173         object->hash_rand = object_hash_rand - 129;
174
175         object->generation++;
176
177         crit_enter();
178         TAILQ_INSERT_TAIL(&vm_object_list, object, object_list);
179         vm_object_count++;
180         object_hash_rand = object->hash_rand;
181         crit_exit();
182 }
183
184 /*
185  *      vm_object_init:
186  *
187  *      Initialize the VM objects module.
188  */
189 void
190 vm_object_init(void)
191 {
192         TAILQ_INIT(&vm_object_list);
193         
194         _vm_object_allocate(OBJT_DEFAULT, OFF_TO_IDX(KvaEnd),
195                             &kernel_object);
196
197         obj_zone = &obj_zone_store;
198         zbootinit(obj_zone, "VM OBJECT", sizeof (struct vm_object),
199                 vm_objects_init, VM_OBJECTS_INIT);
200 }
201
202 void
203 vm_object_init2(void)
204 {
205         zinitna(obj_zone, NULL, NULL, 0, 0, ZONE_PANICFAIL, 1);
206 }
207
208 /*
209  *      vm_object_allocate:
210  *
211  *      Returns a new object with the given size.
212  */
213
214 vm_object_t
215 vm_object_allocate(objtype_t type, vm_size_t size)
216 {
217         vm_object_t result;
218
219         result = (vm_object_t) zalloc(obj_zone);
220
221         _vm_object_allocate(type, size, result);
222
223         return (result);
224 }
225
226
227 /*
228  *      vm_object_reference:
229  *
230  *      Gets another reference to the given object.
231  */
232 void
233 vm_object_reference(vm_object_t object)
234 {
235         if (object == NULL)
236                 return;
237
238         object->ref_count++;
239         if (object->type == OBJT_VNODE) {
240                 vref(object->handle);
241                 /* XXX what if the vnode is being destroyed? */
242         }
243 }
244
245 static void
246 vm_object_vndeallocate(vm_object_t object)
247 {
248         struct vnode *vp = (struct vnode *) object->handle;
249
250         KASSERT(object->type == OBJT_VNODE,
251             ("vm_object_vndeallocate: not a vnode object"));
252         KASSERT(vp != NULL, ("vm_object_vndeallocate: missing vp"));
253 #ifdef INVARIANTS
254         if (object->ref_count == 0) {
255                 vprint("vm_object_vndeallocate", vp);
256                 panic("vm_object_vndeallocate: bad object reference count");
257         }
258 #endif
259
260         object->ref_count--;
261         if (object->ref_count == 0)
262                 vp->v_flag &= ~VTEXT;
263         vrele(vp);
264 }
265
266 /*
267  *      vm_object_deallocate:
268  *
269  *      Release a reference to the specified object,
270  *      gained either through a vm_object_allocate
271  *      or a vm_object_reference call.  When all references
272  *      are gone, storage associated with this object
273  *      may be relinquished.
274  *
275  *      No object may be locked.
276  */
277 void
278 vm_object_deallocate(vm_object_t object)
279 {
280         vm_object_t temp;
281
282         while (object != NULL) {
283                 if (object->type == OBJT_VNODE) {
284                         vm_object_vndeallocate(object);
285                         return;
286                 }
287
288                 if (object->ref_count == 0) {
289                         panic("vm_object_deallocate: object deallocated too many times: %d", object->type);
290                 } else if (object->ref_count > 2) {
291                         object->ref_count--;
292                         return;
293                 }
294
295                 /*
296                  * Here on ref_count of one or two, which are special cases for
297                  * objects.
298                  */
299                 if ((object->ref_count == 2) && (object->shadow_count == 0)) {
300                         vm_object_set_flag(object, OBJ_ONEMAPPING);
301                         object->ref_count--;
302                         return;
303                 } else if ((object->ref_count == 2) && (object->shadow_count == 1)) {
304                         object->ref_count--;
305                         if ((object->handle == NULL) &&
306                             (object->type == OBJT_DEFAULT ||
307                              object->type == OBJT_SWAP)) {
308                                 vm_object_t robject;
309
310                                 robject = LIST_FIRST(&object->shadow_head);
311                                 KASSERT(robject != NULL,
312                                     ("vm_object_deallocate: ref_count: %d, shadow_count: %d",
313                                          object->ref_count,
314                                          object->shadow_count));
315                                 if ((robject->handle == NULL) &&
316                                     (robject->type == OBJT_DEFAULT ||
317                                      robject->type == OBJT_SWAP)) {
318
319                                         robject->ref_count++;
320
321                                         while (
322                                                 robject->paging_in_progress ||
323                                                 object->paging_in_progress
324                                         ) {
325                                                 vm_object_pip_sleep(robject, "objde1");
326                                                 vm_object_pip_sleep(object, "objde2");
327                                         }
328
329                                         if (robject->ref_count == 1) {
330                                                 robject->ref_count--;
331                                                 object = robject;
332                                                 goto doterm;
333                                         }
334
335                                         object = robject;
336                                         vm_object_collapse(object);
337                                         continue;
338                                 }
339                         }
340
341                         return;
342
343                 } else {
344                         object->ref_count--;
345                         if (object->ref_count != 0)
346                                 return;
347                 }
348
349 doterm:
350
351                 temp = object->backing_object;
352                 if (temp) {
353                         LIST_REMOVE(object, shadow_list);
354                         temp->shadow_count--;
355                         temp->generation++;
356                         object->backing_object = NULL;
357                 }
358
359                 /*
360                  * Don't double-terminate, we could be in a termination
361                  * recursion due to the terminate having to sync data
362                  * to disk.
363                  */
364                 if ((object->flags & OBJ_DEAD) == 0)
365                         vm_object_terminate(object);
366                 object = temp;
367         }
368 }
369
370 /*
371  *      vm_object_terminate actually destroys the specified object, freeing
372  *      up all previously used resources.
373  *
374  *      The object must be locked.
375  *      This routine may block.
376  */
377 static int vm_object_terminate_callback(vm_page_t p, void *data);
378
379 void
380 vm_object_terminate(vm_object_t object)
381 {
382         /*
383          * Make sure no one uses us.
384          */
385         vm_object_set_flag(object, OBJ_DEAD);
386
387         /*
388          * wait for the pageout daemon to be done with the object
389          */
390         vm_object_pip_wait(object, "objtrm");
391
392         KASSERT(!object->paging_in_progress,
393                 ("vm_object_terminate: pageout in progress"));
394
395         /*
396          * Clean and free the pages, as appropriate. All references to the
397          * object are gone, so we don't need to lock it.
398          */
399         if (object->type == OBJT_VNODE) {
400                 struct vnode *vp;
401
402                 /*
403                  * Clean pages and flush buffers.
404                  */
405                 vm_object_page_clean(object, 0, 0, OBJPC_SYNC);
406
407                 vp = (struct vnode *) object->handle;
408                 vinvalbuf(vp, V_SAVE, 0, 0);
409         }
410
411         /*
412          * Wait for any I/O to complete, after which there had better not
413          * be any references left on the object.
414          */
415         vm_object_pip_wait(object, "objtrm");
416
417         if (object->ref_count != 0)
418                 panic("vm_object_terminate: object with references, ref_count=%d", object->ref_count);
419
420         /*
421          * Now free any remaining pages. For internal objects, this also
422          * removes them from paging queues. Don't free wired pages, just
423          * remove them from the object. 
424          */
425         crit_enter();
426         vm_page_rb_tree_RB_SCAN(&object->rb_memq, NULL,
427                                 vm_object_terminate_callback, NULL);
428         crit_exit();
429
430         /*
431          * Let the pager know object is dead.
432          */
433         vm_pager_deallocate(object);
434
435         /*
436          * Remove the object from the global object list.
437          */
438         crit_enter();
439         TAILQ_REMOVE(&vm_object_list, object, object_list);
440         vm_object_count--;
441         crit_exit();
442
443         wakeup(object);
444         if (object->ref_count != 0)
445                 panic("vm_object_terminate2: object with references, ref_count=%d", object->ref_count);
446
447         /*
448          * Free the space for the object.
449          */
450         zfree(obj_zone, object);
451 }
452
453 static int
454 vm_object_terminate_callback(vm_page_t p, void *data __unused)
455 {
456         if (p->busy || (p->flags & PG_BUSY))
457                 panic("vm_object_terminate: freeing busy page %p", p);
458         if (p->wire_count == 0) {
459                 vm_page_busy(p);
460                 vm_page_free(p);
461                 mycpu->gd_cnt.v_pfree++;
462         } else {
463                 if (p->queue != PQ_NONE)
464                         kprintf("vm_object_terminate: Warning: Encountered wired page %p on queue %d\n", p, p->queue);
465                 vm_page_busy(p);
466                 vm_page_remove(p);
467                 vm_page_wakeup(p);
468         }
469         return(0);
470 }
471
472 /*
473  *      vm_object_page_clean
474  *
475  *      Clean all dirty pages in the specified range of object.  Leaves page 
476  *      on whatever queue it is currently on.   If NOSYNC is set then do not
477  *      write out pages with PG_NOSYNC set (originally comes from MAP_NOSYNC),
478  *      leaving the object dirty.
479  *
480  *      When stuffing pages asynchronously, allow clustering.  XXX we need a
481  *      synchronous clustering mode implementation.
482  *
483  *      Odd semantics: if start == end, we clean everything.
484  */
485 static int vm_object_page_clean_pass1(struct vm_page *p, void *data);
486 static int vm_object_page_clean_pass2(struct vm_page *p, void *data);
487
488 void
489 vm_object_page_clean(vm_object_t object, vm_pindex_t start, vm_pindex_t end,
490                      int flags)
491 {
492         struct rb_vm_page_scan_info info;
493         struct vnode *vp;
494         int wholescan;
495         int pagerflags;
496         int curgeneration;
497
498         if (object->type != OBJT_VNODE ||
499                 (object->flags & OBJ_MIGHTBEDIRTY) == 0)
500                 return;
501
502         pagerflags = (flags & (OBJPC_SYNC | OBJPC_INVAL)) ? 
503                         VM_PAGER_PUT_SYNC : VM_PAGER_CLUSTER_OK;
504         pagerflags |= (flags & OBJPC_INVAL) ? VM_PAGER_PUT_INVAL : 0;
505
506         vp = object->handle;
507
508         /*
509          * Interlock other major object operations.  This allows us to 
510          * temporarily clear OBJ_WRITEABLE and OBJ_MIGHTBEDIRTY.
511          */
512         crit_enter();
513         vm_object_set_flag(object, OBJ_CLEANING);
514
515         /*
516          * Handle 'entire object' case
517          */
518         info.start_pindex = start;
519         if (end == 0) {
520                 info.end_pindex = object->size - 1;
521         } else {
522                 info.end_pindex = end - 1;
523         }
524         wholescan = (start == 0 && info.end_pindex == object->size - 1);
525         info.limit = flags;
526         info.pagerflags = pagerflags;
527         info.object = object;
528
529         /*
530          * If cleaning the entire object do a pass to mark the pages read-only.
531          * If everything worked out ok, clear OBJ_WRITEABLE and
532          * OBJ_MIGHTBEDIRTY.
533          */
534         if (wholescan) {
535                 info.error = 0;
536                 vm_page_rb_tree_RB_SCAN(&object->rb_memq, rb_vm_page_scancmp,
537                                         vm_object_page_clean_pass1, &info);
538                 if (info.error == 0) {
539                         vm_object_clear_flag(object,
540                                              OBJ_WRITEABLE|OBJ_MIGHTBEDIRTY);
541                         if (object->type == OBJT_VNODE &&
542                             (vp = (struct vnode *)object->handle) != NULL) {
543                                 if (vp->v_flag & VOBJDIRTY) 
544                                         vclrflags(vp, VOBJDIRTY);
545                         }
546                 }
547         }
548
549         /*
550          * Do a pass to clean all the dirty pages we find.
551          */
552         do {
553                 info.error = 0;
554                 curgeneration = object->generation;
555                 vm_page_rb_tree_RB_SCAN(&object->rb_memq, rb_vm_page_scancmp,
556                                         vm_object_page_clean_pass2, &info);
557         } while (info.error || curgeneration != object->generation);
558
559         vm_object_clear_flag(object, OBJ_CLEANING);
560         crit_exit();
561 }
562
563 static 
564 int
565 vm_object_page_clean_pass1(struct vm_page *p, void *data)
566 {
567         struct rb_vm_page_scan_info *info = data;
568
569         vm_page_flag_set(p, PG_CLEANCHK);
570         if ((info->limit & OBJPC_NOSYNC) && (p->flags & PG_NOSYNC))
571                 info->error = 1;
572         else
573                 vm_page_protect(p, VM_PROT_READ);
574         return(0);
575 }
576
577 static 
578 int
579 vm_object_page_clean_pass2(struct vm_page *p, void *data)
580 {
581         struct rb_vm_page_scan_info *info = data;
582         int n;
583
584         /*
585          * Do not mess with pages that were inserted after we started
586          * the cleaning pass.
587          */
588         if ((p->flags & PG_CLEANCHK) == 0)
589                 return(0);
590
591         /*
592          * Before wasting time traversing the pmaps, check for trivial
593          * cases where the page cannot be dirty.
594          */
595         if (p->valid == 0 || (p->queue - p->pc) == PQ_CACHE) {
596                 KKASSERT((p->dirty & p->valid) == 0);
597                 return(0);
598         }
599
600         /*
601          * Check whether the page is dirty or not.  The page has been set
602          * to be read-only so the check will not race a user dirtying the
603          * page.
604          */
605         vm_page_test_dirty(p);
606         if ((p->dirty & p->valid) == 0) {
607                 vm_page_flag_clear(p, PG_CLEANCHK);
608                 return(0);
609         }
610
611         /*
612          * If we have been asked to skip nosync pages and this is a
613          * nosync page, skip it.  Note that the object flags were
614          * not cleared in this case (because pass1 will have returned an
615          * error), so we do not have to set them.
616          */
617         if ((info->limit & OBJPC_NOSYNC) && (p->flags & PG_NOSYNC)) {
618                 vm_page_flag_clear(p, PG_CLEANCHK);
619                 return(0);
620         }
621
622         /*
623          * Flush as many pages as we can.  PG_CLEANCHK will be cleared on
624          * the pages that get successfully flushed.  Set info->error if
625          * we raced an object modification.
626          */
627         n = vm_object_page_collect_flush(info->object, p, info->pagerflags);
628         if (n == 0)
629                 info->error = 1;
630         return(0);
631 }
632
633 /*
634  * This routine must be called within a critical section to properly avoid
635  * an interrupt unbusy/free race that can occur prior to the busy check.
636  *
637  * Using the object generation number here to detect page ripout is not
638  * the best idea in the world. XXX
639  *
640  * NOTE: we operate under the assumption that a page found to not be busy
641  * will not be ripped out from under us by an interrupt.  XXX we should
642  * recode this to explicitly busy the pages.
643  */
644 static int
645 vm_object_page_collect_flush(vm_object_t object, vm_page_t p, int pagerflags)
646 {
647         int runlen;
648         int maxf;
649         int chkb;
650         int maxb;
651         int i;
652         int curgeneration;
653         vm_pindex_t pi;
654         vm_page_t maf[vm_pageout_page_count];
655         vm_page_t mab[vm_pageout_page_count];
656         vm_page_t ma[vm_pageout_page_count];
657
658         curgeneration = object->generation;
659
660         pi = p->pindex;
661         while (vm_page_sleep_busy(p, TRUE, "vpcwai")) {
662                 if (object->generation != curgeneration) {
663                         return(0);
664                 }
665         }
666         KKASSERT(p->object == object && p->pindex == pi);
667
668         maxf = 0;
669         for(i = 1; i < vm_pageout_page_count; i++) {
670                 vm_page_t tp;
671
672                 if ((tp = vm_page_lookup(object, pi + i)) != NULL) {
673                         if ((tp->flags & PG_BUSY) ||
674                                 ((pagerflags & VM_PAGER_IGNORE_CLEANCHK) == 0 && 
675                                  (tp->flags & PG_CLEANCHK) == 0) ||
676                                 (tp->busy != 0))
677                                 break;
678                         if((tp->queue - tp->pc) == PQ_CACHE) {
679                                 vm_page_flag_clear(tp, PG_CLEANCHK);
680                                 break;
681                         }
682                         vm_page_test_dirty(tp);
683                         if ((tp->dirty & tp->valid) == 0) {
684                                 vm_page_flag_clear(tp, PG_CLEANCHK);
685                                 break;
686                         }
687                         maf[ i - 1 ] = tp;
688                         maxf++;
689                         continue;
690                 }
691                 break;
692         }
693
694         maxb = 0;
695         chkb = vm_pageout_page_count -  maxf;
696         if (chkb) {
697                 for(i = 1; i < chkb;i++) {
698                         vm_page_t tp;
699
700                         if ((tp = vm_page_lookup(object, pi - i)) != NULL) {
701                                 if ((tp->flags & PG_BUSY) ||
702                                         ((pagerflags & VM_PAGER_IGNORE_CLEANCHK) == 0 && 
703                                          (tp->flags & PG_CLEANCHK) == 0) ||
704                                         (tp->busy != 0))
705                                         break;
706                                 if((tp->queue - tp->pc) == PQ_CACHE) {
707                                         vm_page_flag_clear(tp, PG_CLEANCHK);
708                                         break;
709                                 }
710                                 vm_page_test_dirty(tp);
711                                 if ((tp->dirty & tp->valid) == 0) {
712                                         vm_page_flag_clear(tp, PG_CLEANCHK);
713                                         break;
714                                 }
715                                 mab[ i - 1 ] = tp;
716                                 maxb++;
717                                 continue;
718                         }
719                         break;
720                 }
721         }
722
723         for(i = 0; i < maxb; i++) {
724                 int index = (maxb - i) - 1;
725                 ma[index] = mab[i];
726                 vm_page_flag_clear(ma[index], PG_CLEANCHK);
727         }
728         vm_page_flag_clear(p, PG_CLEANCHK);
729         ma[maxb] = p;
730         for(i = 0; i < maxf; i++) {
731                 int index = (maxb + i) + 1;
732                 ma[index] = maf[i];
733                 vm_page_flag_clear(ma[index], PG_CLEANCHK);
734         }
735         runlen = maxb + maxf + 1;
736
737         vm_pageout_flush(ma, runlen, pagerflags);
738         for (i = 0; i < runlen; i++) {
739                 if (ma[i]->valid & ma[i]->dirty) {
740                         vm_page_protect(ma[i], VM_PROT_READ);
741                         vm_page_flag_set(ma[i], PG_CLEANCHK);
742
743                         /*
744                          * maxf will end up being the actual number of pages
745                          * we wrote out contiguously, non-inclusive of the
746                          * first page.  We do not count look-behind pages.
747                          */
748                         if (i >= maxb + 1 && (maxf > i - maxb - 1))
749                                 maxf = i - maxb - 1;
750                 }
751         }
752         return(maxf + 1);
753 }
754
755 #ifdef not_used
756 /* XXX I cannot tell if this should be an exported symbol */
757 /*
758  *      vm_object_deactivate_pages
759  *
760  *      Deactivate all pages in the specified object.  (Keep its pages
761  *      in memory even though it is no longer referenced.)
762  *
763  *      The object must be locked.
764  */
765 static int vm_object_deactivate_pages_callback(vm_page_t p, void *data);
766
767 static void
768 vm_object_deactivate_pages(vm_object_t object)
769 {
770         crit_enter();
771         vm_page_rb_tree_RB_SCAN(&object->rb_memq, NULL,
772                                 vm_object_deactivate_pages_callback, NULL);
773         crit_exit();
774 }
775
776 static int
777 vm_object_deactivate_pages_callback(vm_page_t p, void *data __unused)
778 {
779         vm_page_deactivate(p);
780         return(0);
781 }
782
783 #endif
784
785 /*
786  * Same as vm_object_pmap_copy, except range checking really
787  * works, and is meant for small sections of an object.
788  *
789  * This code protects resident pages by making them read-only
790  * and is typically called on a fork or split when a page
791  * is converted to copy-on-write.  
792  *
793  * NOTE: If the page is already at VM_PROT_NONE, calling
794  * vm_page_protect will have no effect.
795  */
796 void
797 vm_object_pmap_copy_1(vm_object_t object, vm_pindex_t start, vm_pindex_t end)
798 {
799         vm_pindex_t idx;
800         vm_page_t p;
801
802         if (object == NULL || (object->flags & OBJ_WRITEABLE) == 0)
803                 return;
804
805         /*
806          * spl protection needed to prevent races between the lookup,
807          * an interrupt unbusy/free, and our protect call.
808          */
809         crit_enter();
810         for (idx = start; idx < end; idx++) {
811                 p = vm_page_lookup(object, idx);
812                 if (p == NULL)
813                         continue;
814                 vm_page_protect(p, VM_PROT_READ);
815         }
816         crit_exit();
817 }
818
819 /*
820  *      vm_object_pmap_remove:
821  *
822  *      Removes all physical pages in the specified
823  *      object range from all physical maps.
824  *
825  *      The object must *not* be locked.
826  */
827
828 static int vm_object_pmap_remove_callback(vm_page_t p, void *data);
829
830 void
831 vm_object_pmap_remove(vm_object_t object, vm_pindex_t start, vm_pindex_t end)
832 {
833         struct rb_vm_page_scan_info info;
834
835         if (object == NULL)
836                 return;
837         info.start_pindex = start;
838         info.end_pindex = end - 1;
839         crit_enter();
840         vm_page_rb_tree_RB_SCAN(&object->rb_memq, rb_vm_page_scancmp,
841                                 vm_object_pmap_remove_callback, &info);
842         if (start == 0 && end == object->size)
843                 vm_object_clear_flag(object, OBJ_WRITEABLE);
844         crit_exit();
845 }
846
847 static int
848 vm_object_pmap_remove_callback(vm_page_t p, void *data __unused)
849 {
850         vm_page_protect(p, VM_PROT_NONE);
851         return(0);
852 }
853
854 /*
855  *      vm_object_madvise:
856  *
857  *      Implements the madvise function at the object/page level.
858  *
859  *      MADV_WILLNEED   (any object)
860  *
861  *          Activate the specified pages if they are resident.
862  *
863  *      MADV_DONTNEED   (any object)
864  *
865  *          Deactivate the specified pages if they are resident.
866  *
867  *      MADV_FREE       (OBJT_DEFAULT/OBJT_SWAP objects,
868  *                       OBJ_ONEMAPPING only)
869  *
870  *          Deactivate and clean the specified pages if they are
871  *          resident.  This permits the process to reuse the pages
872  *          without faulting or the kernel to reclaim the pages
873  *          without I/O.
874  */
875 void
876 vm_object_madvise(vm_object_t object, vm_pindex_t pindex, int count, int advise)
877 {
878         vm_pindex_t end, tpindex;
879         vm_object_t tobject;
880         vm_page_t m;
881
882         if (object == NULL)
883                 return;
884
885         end = pindex + count;
886
887         /*
888          * Locate and adjust resident pages
889          */
890
891         for (; pindex < end; pindex += 1) {
892 relookup:
893                 tobject = object;
894                 tpindex = pindex;
895 shadowlookup:
896                 /*
897                  * MADV_FREE only operates on OBJT_DEFAULT or OBJT_SWAP pages
898                  * and those pages must be OBJ_ONEMAPPING.
899                  */
900                 if (advise == MADV_FREE) {
901                         if ((tobject->type != OBJT_DEFAULT &&
902                              tobject->type != OBJT_SWAP) ||
903                             (tobject->flags & OBJ_ONEMAPPING) == 0) {
904                                 continue;
905                         }
906                 }
907
908                 /*
909                  * spl protection is required to avoid a race between the
910                  * lookup, an interrupt unbusy/free, and our busy check.
911                  */
912
913                 crit_enter();
914                 m = vm_page_lookup(tobject, tpindex);
915
916                 if (m == NULL) {
917                         /*
918                          * There may be swap even if there is no backing page
919                          */
920                         if (advise == MADV_FREE && tobject->type == OBJT_SWAP)
921                                 swap_pager_freespace(tobject, tpindex, 1);
922
923                         /*
924                          * next object
925                          */
926                         crit_exit();
927                         if (tobject->backing_object == NULL)
928                                 continue;
929                         tpindex += OFF_TO_IDX(tobject->backing_object_offset);
930                         tobject = tobject->backing_object;
931                         goto shadowlookup;
932                 }
933
934                 /*
935                  * If the page is busy or not in a normal active state,
936                  * we skip it.  If the page is not managed there are no
937                  * page queues to mess with.  Things can break if we mess
938                  * with pages in any of the below states.
939                  */
940                 if (
941                     m->hold_count ||
942                     m->wire_count ||
943                     (m->flags & PG_UNMANAGED) ||
944                     m->valid != VM_PAGE_BITS_ALL
945                 ) {
946                         crit_exit();
947                         continue;
948                 }
949
950                 if (vm_page_sleep_busy(m, TRUE, "madvpo")) {
951                         crit_exit();
952                         goto relookup;
953                 }
954                 crit_exit();
955
956                 /*
957                  * Theoretically once a page is known not to be busy, an
958                  * interrupt cannot come along and rip it out from under us.
959                  */
960
961                 if (advise == MADV_WILLNEED) {
962                         vm_page_activate(m);
963                 } else if (advise == MADV_DONTNEED) {
964                         vm_page_dontneed(m);
965                 } else if (advise == MADV_FREE) {
966                         /*
967                          * Mark the page clean.  This will allow the page
968                          * to be freed up by the system.  However, such pages
969                          * are often reused quickly by malloc()/free()
970                          * so we do not do anything that would cause
971                          * a page fault if we can help it.
972                          *
973                          * Specifically, we do not try to actually free
974                          * the page now nor do we try to put it in the
975                          * cache (which would cause a page fault on reuse).
976                          *
977                          * But we do make the page is freeable as we
978                          * can without actually taking the step of unmapping
979                          * it.
980                          */
981                         pmap_clear_modify(m);
982                         m->dirty = 0;
983                         m->act_count = 0;
984                         vm_page_dontneed(m);
985                         if (tobject->type == OBJT_SWAP)
986                                 swap_pager_freespace(tobject, tpindex, 1);
987                 }
988         }       
989 }
990
991 /*
992  *      vm_object_shadow:
993  *
994  *      Create a new object which is backed by the
995  *      specified existing object range.  The source
996  *      object reference is deallocated.
997  *
998  *      The new object and offset into that object
999  *      are returned in the source parameters.
1000  */
1001
1002 void
1003 vm_object_shadow(vm_object_t *object,   /* IN/OUT */
1004                  vm_ooffset_t *offset,  /* IN/OUT */
1005                  vm_size_t length)
1006 {
1007         vm_object_t source;
1008         vm_object_t result;
1009
1010         source = *object;
1011
1012         /*
1013          * Don't create the new object if the old object isn't shared.
1014          */
1015
1016         if (source != NULL &&
1017             source->ref_count == 1 &&
1018             source->handle == NULL &&
1019             (source->type == OBJT_DEFAULT ||
1020              source->type == OBJT_SWAP))
1021                 return;
1022
1023         /*
1024          * Allocate a new object with the given length
1025          */
1026
1027         if ((result = vm_object_allocate(OBJT_DEFAULT, length)) == NULL)
1028                 panic("vm_object_shadow: no object for shadowing");
1029
1030         /*
1031          * The new object shadows the source object, adding a reference to it.
1032          * Our caller changes his reference to point to the new object,
1033          * removing a reference to the source object.  Net result: no change
1034          * of reference count.
1035          *
1036          * Try to optimize the result object's page color when shadowing
1037          * in order to maintain page coloring consistency in the combined 
1038          * shadowed object.
1039          */
1040         result->backing_object = source;
1041         if (source) {
1042                 LIST_INSERT_HEAD(&source->shadow_head, result, shadow_list);
1043                 source->shadow_count++;
1044                 source->generation++;
1045                 result->pg_color = (source->pg_color + OFF_TO_IDX(*offset)) & PQ_L2_MASK;
1046         }
1047
1048         /*
1049          * Store the offset into the source object, and fix up the offset into
1050          * the new object.
1051          */
1052
1053         result->backing_object_offset = *offset;
1054
1055         /*
1056          * Return the new things
1057          */
1058
1059         *offset = 0;
1060         *object = result;
1061 }
1062
1063 #define OBSC_TEST_ALL_SHADOWED  0x0001
1064 #define OBSC_COLLAPSE_NOWAIT    0x0002
1065 #define OBSC_COLLAPSE_WAIT      0x0004
1066
1067 static int vm_object_backing_scan_callback(vm_page_t p, void *data);
1068
1069 static __inline int
1070 vm_object_backing_scan(vm_object_t object, int op)
1071 {
1072         struct rb_vm_page_scan_info info;
1073         vm_object_t backing_object;
1074
1075         /*
1076          * spl protection is required to avoid races between the memq/lookup,
1077          * an interrupt doing an unbusy/free, and our busy check.  Amoung
1078          * other things.
1079          */
1080         crit_enter();
1081
1082         backing_object = object->backing_object;
1083         info.backing_offset_index = OFF_TO_IDX(object->backing_object_offset);
1084
1085         /*
1086          * Initial conditions
1087          */
1088
1089         if (op & OBSC_TEST_ALL_SHADOWED) {
1090                 /*
1091                  * We do not want to have to test for the existence of
1092                  * swap pages in the backing object.  XXX but with the
1093                  * new swapper this would be pretty easy to do.
1094                  *
1095                  * XXX what about anonymous MAP_SHARED memory that hasn't
1096                  * been ZFOD faulted yet?  If we do not test for this, the
1097                  * shadow test may succeed! XXX
1098                  */
1099                 if (backing_object->type != OBJT_DEFAULT) {
1100                         crit_exit();
1101                         return(0);
1102                 }
1103         }
1104         if (op & OBSC_COLLAPSE_WAIT) {
1105                 KKASSERT((backing_object->flags & OBJ_DEAD) == 0);
1106                 vm_object_set_flag(backing_object, OBJ_DEAD);
1107         }
1108
1109         /*
1110          * Our scan.   We have to retry if a negative error code is returned,
1111          * otherwise 0 or 1 will be returned in info.error.  0 Indicates that
1112          * the scan had to be stopped because the parent does not completely
1113          * shadow the child.
1114          */
1115         info.object = object;
1116         info.backing_object = backing_object;
1117         info.limit = op;
1118         do {
1119                 info.error = 1;
1120                 vm_page_rb_tree_RB_SCAN(&backing_object->rb_memq, NULL,
1121                                         vm_object_backing_scan_callback,
1122                                         &info);
1123         } while (info.error < 0);
1124         crit_exit();
1125         return(info.error);
1126 }
1127
1128 static int
1129 vm_object_backing_scan_callback(vm_page_t p, void *data)
1130 {
1131         struct rb_vm_page_scan_info *info = data;
1132         vm_object_t backing_object;
1133         vm_object_t object;
1134         vm_pindex_t new_pindex;
1135         vm_pindex_t backing_offset_index;
1136         int op;
1137
1138         new_pindex = p->pindex - info->backing_offset_index;
1139         op = info->limit;
1140         object = info->object;
1141         backing_object = info->backing_object;
1142         backing_offset_index = info->backing_offset_index;
1143
1144         if (op & OBSC_TEST_ALL_SHADOWED) {
1145                 vm_page_t pp;
1146
1147                 /*
1148                  * Ignore pages outside the parent object's range
1149                  * and outside the parent object's mapping of the 
1150                  * backing object.
1151                  *
1152                  * note that we do not busy the backing object's
1153                  * page.
1154                  */
1155                 if (
1156                     p->pindex < backing_offset_index ||
1157                     new_pindex >= object->size
1158                 ) {
1159                         return(0);
1160                 }
1161
1162                 /*
1163                  * See if the parent has the page or if the parent's
1164                  * object pager has the page.  If the parent has the
1165                  * page but the page is not valid, the parent's
1166                  * object pager must have the page.
1167                  *
1168                  * If this fails, the parent does not completely shadow
1169                  * the object and we might as well give up now.
1170                  */
1171
1172                 pp = vm_page_lookup(object, new_pindex);
1173                 if (
1174                     (pp == NULL || pp->valid == 0) &&
1175                     !vm_pager_has_page(object, new_pindex, NULL, NULL)
1176                 ) {
1177                         info->error = 0;        /* problemo */
1178                         return(-1);             /* stop the scan */
1179                 }
1180         }
1181
1182         /*
1183          * Check for busy page
1184          */
1185
1186         if (op & (OBSC_COLLAPSE_WAIT | OBSC_COLLAPSE_NOWAIT)) {
1187                 vm_page_t pp;
1188
1189                 if (op & OBSC_COLLAPSE_NOWAIT) {
1190                         if (
1191                             (p->flags & PG_BUSY) ||
1192                             !p->valid || 
1193                             p->hold_count || 
1194                             p->wire_count ||
1195                             p->busy
1196                         ) {
1197                                 return(0);
1198                         }
1199                 } else if (op & OBSC_COLLAPSE_WAIT) {
1200                         if (vm_page_sleep_busy(p, TRUE, "vmocol")) {
1201                                 /*
1202                                  * If we slept, anything could have
1203                                  * happened.   Ask that the scan be restarted.
1204                                  *
1205                                  * Since the object is marked dead, the
1206                                  * backing offset should not have changed.  
1207                                  */
1208                                 info->error = -1;
1209                                 return(-1);
1210                         }
1211                 }
1212
1213                 /* 
1214                  * Busy the page
1215                  */
1216                 vm_page_busy(p);
1217
1218                 KASSERT(
1219                     p->object == backing_object,
1220                     ("vm_object_qcollapse(): object mismatch")
1221                 );
1222
1223                 /*
1224                  * Destroy any associated swap
1225                  */
1226                 if (backing_object->type == OBJT_SWAP) {
1227                         swap_pager_freespace(
1228                             backing_object, 
1229                             p->pindex,
1230                             1
1231                         );
1232                 }
1233
1234                 if (
1235                     p->pindex < backing_offset_index ||
1236                     new_pindex >= object->size
1237                 ) {
1238                         /*
1239                          * Page is out of the parent object's range, we 
1240                          * can simply destroy it. 
1241                          */
1242                         vm_page_protect(p, VM_PROT_NONE);
1243                         vm_page_free(p);
1244                         return(0);
1245                 }
1246
1247                 pp = vm_page_lookup(object, new_pindex);
1248                 if (
1249                     pp != NULL ||
1250                     vm_pager_has_page(object, new_pindex, NULL, NULL)
1251                 ) {
1252                         /*
1253                          * page already exists in parent OR swap exists
1254                          * for this location in the parent.  Destroy 
1255                          * the original page from the backing object.
1256                          *
1257                          * Leave the parent's page alone
1258                          */
1259                         vm_page_protect(p, VM_PROT_NONE);
1260                         vm_page_free(p);
1261                         return(0);
1262                 }
1263
1264                 /*
1265                  * Page does not exist in parent, rename the
1266                  * page from the backing object to the main object. 
1267                  *
1268                  * If the page was mapped to a process, it can remain 
1269                  * mapped through the rename.
1270                  */
1271                 if ((p->queue - p->pc) == PQ_CACHE)
1272                         vm_page_deactivate(p);
1273
1274                 vm_page_rename(p, object, new_pindex);
1275                 /* page automatically made dirty by rename */
1276         }
1277         return(0);
1278 }
1279
1280 /*
1281  * this version of collapse allows the operation to occur earlier and
1282  * when paging_in_progress is true for an object...  This is not a complete
1283  * operation, but should plug 99.9% of the rest of the leaks.
1284  */
1285 static void
1286 vm_object_qcollapse(vm_object_t object)
1287 {
1288         vm_object_t backing_object = object->backing_object;
1289
1290         if (backing_object->ref_count != 1)
1291                 return;
1292
1293         backing_object->ref_count += 2;
1294
1295         vm_object_backing_scan(object, OBSC_COLLAPSE_NOWAIT);
1296
1297         backing_object->ref_count -= 2;
1298 }
1299
1300 /*
1301  *      vm_object_collapse:
1302  *
1303  *      Collapse an object with the object backing it.
1304  *      Pages in the backing object are moved into the
1305  *      parent, and the backing object is deallocated.
1306  */
1307 void
1308 vm_object_collapse(vm_object_t object)
1309 {
1310         while (TRUE) {
1311                 vm_object_t backing_object;
1312
1313                 /*
1314                  * Verify that the conditions are right for collapse:
1315                  *
1316                  * The object exists and the backing object exists.
1317                  */
1318                 if (object == NULL)
1319                         break;
1320
1321                 if ((backing_object = object->backing_object) == NULL)
1322                         break;
1323
1324                 /*
1325                  * we check the backing object first, because it is most likely
1326                  * not collapsable.
1327                  */
1328                 if (backing_object->handle != NULL ||
1329                     (backing_object->type != OBJT_DEFAULT &&
1330                      backing_object->type != OBJT_SWAP) ||
1331                     (backing_object->flags & OBJ_DEAD) ||
1332                     object->handle != NULL ||
1333                     (object->type != OBJT_DEFAULT &&
1334                      object->type != OBJT_SWAP) ||
1335                     (object->flags & OBJ_DEAD)) {
1336                         break;
1337                 }
1338
1339                 if (
1340                     object->paging_in_progress != 0 ||
1341                     backing_object->paging_in_progress != 0
1342                 ) {
1343                         vm_object_qcollapse(object);
1344                         break;
1345                 }
1346
1347                 /*
1348                  * We know that we can either collapse the backing object (if
1349                  * the parent is the only reference to it) or (perhaps) have
1350                  * the parent bypass the object if the parent happens to shadow
1351                  * all the resident pages in the entire backing object.
1352                  *
1353                  * This is ignoring pager-backed pages such as swap pages.
1354                  * vm_object_backing_scan fails the shadowing test in this
1355                  * case.
1356                  */
1357
1358                 if (backing_object->ref_count == 1) {
1359                         /*
1360                          * If there is exactly one reference to the backing
1361                          * object, we can collapse it into the parent.  
1362                          */
1363                         vm_object_backing_scan(object, OBSC_COLLAPSE_WAIT);
1364
1365                         /*
1366                          * Move the pager from backing_object to object.
1367                          */
1368
1369                         if (backing_object->type == OBJT_SWAP) {
1370                                 vm_object_pip_add(backing_object, 1);
1371
1372                                 /*
1373                                  * scrap the paging_offset junk and do a 
1374                                  * discrete copy.  This also removes major 
1375                                  * assumptions about how the swap-pager 
1376                                  * works from where it doesn't belong.  The
1377                                  * new swapper is able to optimize the
1378                                  * destroy-source case.
1379                                  */
1380
1381                                 vm_object_pip_add(object, 1);
1382                                 swap_pager_copy(
1383                                     backing_object,
1384                                     object,
1385                                     OFF_TO_IDX(object->backing_object_offset), TRUE);
1386                                 vm_object_pip_wakeup(object);
1387
1388                                 vm_object_pip_wakeup(backing_object);
1389                         }
1390                         /*
1391                          * Object now shadows whatever backing_object did.
1392                          * Note that the reference to 
1393                          * backing_object->backing_object moves from within 
1394                          * backing_object to within object.
1395                          */
1396
1397                         LIST_REMOVE(object, shadow_list);
1398                         object->backing_object->shadow_count--;
1399                         object->backing_object->generation++;
1400                         if (backing_object->backing_object) {
1401                                 LIST_REMOVE(backing_object, shadow_list);
1402                                 backing_object->backing_object->shadow_count--;
1403                                 backing_object->backing_object->generation++;
1404                         }
1405                         object->backing_object = backing_object->backing_object;
1406                         if (object->backing_object) {
1407                                 LIST_INSERT_HEAD(
1408                                     &object->backing_object->shadow_head,
1409                                     object, 
1410                                     shadow_list
1411                                 );
1412                                 object->backing_object->shadow_count++;
1413                                 object->backing_object->generation++;
1414                         }
1415
1416                         object->backing_object_offset +=
1417                             backing_object->backing_object_offset;
1418
1419                         /*
1420                          * Discard backing_object.
1421                          *
1422                          * Since the backing object has no pages, no pager left,
1423                          * and no object references within it, all that is
1424                          * necessary is to dispose of it.
1425                          */
1426
1427                         KASSERT(backing_object->ref_count == 1, ("backing_object %p was somehow re-referenced during collapse!", backing_object));
1428                         KASSERT(RB_EMPTY(&backing_object->rb_memq), ("backing_object %p somehow has left over pages during collapse!", backing_object));
1429                         crit_enter();
1430                         TAILQ_REMOVE(
1431                             &vm_object_list, 
1432                             backing_object,
1433                             object_list
1434                         );
1435                         vm_object_count--;
1436                         crit_exit();
1437
1438                         zfree(obj_zone, backing_object);
1439
1440                         object_collapses++;
1441                 } else {
1442                         vm_object_t new_backing_object;
1443
1444                         /*
1445                          * If we do not entirely shadow the backing object,
1446                          * there is nothing we can do so we give up.
1447                          */
1448
1449                         if (vm_object_backing_scan(object, OBSC_TEST_ALL_SHADOWED) == 0) {
1450                                 break;
1451                         }
1452
1453                         /*
1454                          * Make the parent shadow the next object in the
1455                          * chain.  Deallocating backing_object will not remove
1456                          * it, since its reference count is at least 2.
1457                          */
1458
1459                         LIST_REMOVE(object, shadow_list);
1460                         backing_object->shadow_count--;
1461                         backing_object->generation++;
1462
1463                         new_backing_object = backing_object->backing_object;
1464                         if ((object->backing_object = new_backing_object) != NULL) {
1465                                 vm_object_reference(new_backing_object);
1466                                 LIST_INSERT_HEAD(
1467                                     &new_backing_object->shadow_head,
1468                                     object,
1469                                     shadow_list
1470                                 );
1471                                 new_backing_object->shadow_count++;
1472                                 new_backing_object->generation++;
1473                                 object->backing_object_offset +=
1474                                         backing_object->backing_object_offset;
1475                         }
1476
1477                         /*
1478                          * Drop the reference count on backing_object. Since
1479                          * its ref_count was at least 2, it will not vanish;
1480                          * so we don't need to call vm_object_deallocate, but
1481                          * we do anyway.
1482                          */
1483                         vm_object_deallocate(backing_object);
1484                         object_bypasses++;
1485                 }
1486
1487                 /*
1488                  * Try again with this object's new backing object.
1489                  */
1490         }
1491 }
1492
1493 /*
1494  *      vm_object_page_remove: [internal]
1495  *
1496  *      Removes all physical pages in the specified
1497  *      object range from the object's list of pages.
1498  */
1499 static int vm_object_page_remove_callback(vm_page_t p, void *data);
1500
1501 void
1502 vm_object_page_remove(vm_object_t object, vm_pindex_t start, vm_pindex_t end,
1503                       boolean_t clean_only)
1504 {
1505         struct rb_vm_page_scan_info info;
1506         int all;
1507
1508         /*
1509          * Degenerate cases and assertions
1510          */
1511         if (object == NULL || object->resident_page_count == 0)
1512                 return;
1513         KASSERT(object->type != OBJT_PHYS, 
1514                 ("attempt to remove pages from a physical object"));
1515
1516         /*
1517          * Indicate that paging is occuring on the object
1518          */
1519         crit_enter();
1520         vm_object_pip_add(object, 1);
1521
1522         /*
1523          * Figure out the actual removal range and whether we are removing
1524          * the entire contents of the object or not.  If removing the entire
1525          * contents, be sure to get all pages, even those that might be 
1526          * beyond the end of the object.
1527          */
1528         info.start_pindex = start;
1529         if (end == 0)
1530                 info.end_pindex = (vm_pindex_t)-1;
1531         else
1532                 info.end_pindex = end - 1;
1533         info.limit = clean_only;
1534         all = (start == 0 && info.end_pindex >= object->size - 1);
1535
1536         /*
1537          * Loop until we are sure we have gotten them all.
1538          */
1539         do {
1540                 info.error = 0;
1541                 vm_page_rb_tree_RB_SCAN(&object->rb_memq, rb_vm_page_scancmp,
1542                                         vm_object_page_remove_callback, &info);
1543         } while (info.error);
1544
1545         /*
1546          * Cleanup
1547          */
1548         vm_object_pip_wakeup(object);
1549         crit_exit();
1550 }
1551
1552 static int
1553 vm_object_page_remove_callback(vm_page_t p, void *data)
1554 {
1555         struct rb_vm_page_scan_info *info = data;
1556
1557         /*
1558          * Wired pages cannot be destroyed, but they can be invalidated
1559          * and we do so if clean_only (limit) is not set.
1560          */
1561         if (p->wire_count != 0) {
1562                 vm_page_protect(p, VM_PROT_NONE);
1563                 if (info->limit == 0)
1564                         p->valid = 0;
1565                 return(0);
1566         }
1567
1568         /*
1569          * The busy flags are only cleared at
1570          * interrupt -- minimize the spl transitions
1571          */
1572
1573         if (vm_page_sleep_busy(p, TRUE, "vmopar")) {
1574                 info->error = 1;
1575                 return(0);
1576         }
1577
1578         /*
1579          * limit is our clean_only flag.  If set and the page is dirty, do
1580          * not free it.
1581          */
1582         if (info->limit && p->valid) {
1583                 vm_page_test_dirty(p);
1584                 if (p->valid & p->dirty)
1585                         return(0);
1586         }
1587
1588         /*
1589          * Destroy the page
1590          */
1591         vm_page_busy(p);
1592         vm_page_protect(p, VM_PROT_NONE);
1593         vm_page_free(p);
1594         return(0);
1595 }
1596
1597 /*
1598  *      Routine:        vm_object_coalesce
1599  *      Function:       Coalesces two objects backing up adjoining
1600  *                      regions of memory into a single object.
1601  *
1602  *      returns TRUE if objects were combined.
1603  *
1604  *      NOTE:   Only works at the moment if the second object is NULL -
1605  *              if it's not, which object do we lock first?
1606  *
1607  *      Parameters:
1608  *              prev_object     First object to coalesce
1609  *              prev_offset     Offset into prev_object
1610  *              next_object     Second object into coalesce
1611  *              next_offset     Offset into next_object
1612  *
1613  *              prev_size       Size of reference to prev_object
1614  *              next_size       Size of reference to next_object
1615  *
1616  *      Conditions:
1617  *      The object must *not* be locked.
1618  */
1619 boolean_t
1620 vm_object_coalesce(vm_object_t prev_object, vm_pindex_t prev_pindex,
1621     vm_size_t prev_size, vm_size_t next_size)
1622 {
1623         vm_pindex_t next_pindex;
1624
1625         if (prev_object == NULL) {
1626                 return (TRUE);
1627         }
1628
1629         if (prev_object->type != OBJT_DEFAULT &&
1630             prev_object->type != OBJT_SWAP) {
1631                 return (FALSE);
1632         }
1633
1634         /*
1635          * Try to collapse the object first
1636          */
1637         vm_object_collapse(prev_object);
1638
1639         /*
1640          * Can't coalesce if: . more than one reference . paged out . shadows
1641          * another object . has a copy elsewhere (any of which mean that the
1642          * pages not mapped to prev_entry may be in use anyway)
1643          */
1644
1645         if (prev_object->backing_object != NULL) {
1646                 return (FALSE);
1647         }
1648
1649         prev_size >>= PAGE_SHIFT;
1650         next_size >>= PAGE_SHIFT;
1651         next_pindex = prev_pindex + prev_size;
1652
1653         if ((prev_object->ref_count > 1) &&
1654             (prev_object->size != next_pindex)) {
1655                 return (FALSE);
1656         }
1657
1658         /*
1659          * Remove any pages that may still be in the object from a previous
1660          * deallocation.
1661          */
1662         if (next_pindex < prev_object->size) {
1663                 vm_object_page_remove(prev_object,
1664                                       next_pindex,
1665                                       next_pindex + next_size, FALSE);
1666                 if (prev_object->type == OBJT_SWAP)
1667                         swap_pager_freespace(prev_object,
1668                                              next_pindex, next_size);
1669         }
1670
1671         /*
1672          * Extend the object if necessary.
1673          */
1674         if (next_pindex + next_size > prev_object->size)
1675                 prev_object->size = next_pindex + next_size;
1676
1677         return (TRUE);
1678 }
1679
1680 void
1681 vm_object_set_writeable_dirty(vm_object_t object)
1682 {
1683         struct vnode *vp;
1684
1685         vm_object_set_flag(object, OBJ_WRITEABLE|OBJ_MIGHTBEDIRTY);
1686         if (object->type == OBJT_VNODE &&
1687             (vp = (struct vnode *)object->handle) != NULL) {
1688                 if ((vp->v_flag & VOBJDIRTY) == 0) {
1689                         vsetflags(vp, VOBJDIRTY);
1690                 }
1691         }
1692 }
1693
1694
1695
1696 #include "opt_ddb.h"
1697 #ifdef DDB
1698 #include <sys/kernel.h>
1699
1700 #include <sys/cons.h>
1701
1702 #include <ddb/ddb.h>
1703
1704 static int      _vm_object_in_map (vm_map_t map, vm_object_t object,
1705                                        vm_map_entry_t entry);
1706 static int      vm_object_in_map (vm_object_t object);
1707
1708 static int
1709 _vm_object_in_map(vm_map_t map, vm_object_t object, vm_map_entry_t entry)
1710 {
1711         vm_map_t tmpm;
1712         vm_map_entry_t tmpe;
1713         vm_object_t obj;
1714         int entcount;
1715
1716         if (map == 0)
1717                 return 0;
1718         if (entry == 0) {
1719                 tmpe = map->header.next;
1720                 entcount = map->nentries;
1721                 while (entcount-- && (tmpe != &map->header)) {
1722                         if( _vm_object_in_map(map, object, tmpe)) {
1723                                 return 1;
1724                         }
1725                         tmpe = tmpe->next;
1726                 }
1727                 return (0);
1728         }
1729         switch(entry->maptype) {
1730         case VM_MAPTYPE_SUBMAP:
1731                 tmpm = entry->object.sub_map;
1732                 tmpe = tmpm->header.next;
1733                 entcount = tmpm->nentries;
1734                 while (entcount-- && tmpe != &tmpm->header) {
1735                         if( _vm_object_in_map(tmpm, object, tmpe)) {
1736                                 return 1;
1737                         }
1738                         tmpe = tmpe->next;
1739                 }
1740                 break;
1741         case VM_MAPTYPE_NORMAL:
1742         case VM_MAPTYPE_VPAGETABLE:
1743                 obj = entry->object.vm_object;
1744                 while (obj) {
1745                         if (obj == object)
1746                                 return 1;
1747                         obj = obj->backing_object;
1748                 }
1749                 break;
1750         default:
1751                 break;
1752         }
1753         return 0;
1754 }
1755
1756 static int vm_object_in_map_callback(struct proc *p, void *data);
1757
1758 struct vm_object_in_map_info {
1759         vm_object_t object;
1760         int rv;
1761 };
1762
1763 static int
1764 vm_object_in_map(vm_object_t object)
1765 {
1766         struct vm_object_in_map_info info;
1767
1768         info.rv = 0;
1769         info.object = object;
1770
1771         allproc_scan(vm_object_in_map_callback, &info);
1772         if (info.rv)
1773                 return 1;
1774         if( _vm_object_in_map(&kernel_map, object, 0))
1775                 return 1;
1776         if( _vm_object_in_map(&pager_map, object, 0))
1777                 return 1;
1778         if( _vm_object_in_map(&buffer_map, object, 0))
1779                 return 1;
1780         return 0;
1781 }
1782
1783 static int
1784 vm_object_in_map_callback(struct proc *p, void *data)
1785 {
1786         struct vm_object_in_map_info *info = data;
1787
1788         if (p->p_vmspace) {
1789                 if (_vm_object_in_map(&p->p_vmspace->vm_map, info->object, 0)) {
1790                         info->rv = 1;
1791                         return -1;
1792                 }
1793         }
1794         return (0);
1795 }
1796
1797 DB_SHOW_COMMAND(vmochk, vm_object_check)
1798 {
1799         vm_object_t object;
1800
1801         /*
1802          * make sure that internal objs are in a map somewhere
1803          * and none have zero ref counts.
1804          */
1805         for (object = TAILQ_FIRST(&vm_object_list);
1806                         object != NULL;
1807                         object = TAILQ_NEXT(object, object_list)) {
1808                 if (object->handle == NULL &&
1809                     (object->type == OBJT_DEFAULT || object->type == OBJT_SWAP)) {
1810                         if (object->ref_count == 0) {
1811                                 db_printf("vmochk: internal obj has zero ref count: %ld\n",
1812                                         (long)object->size);
1813                         }
1814                         if (!vm_object_in_map(object)) {
1815                                 db_printf(
1816                         "vmochk: internal obj is not in a map: "
1817                         "ref: %d, size: %lu: 0x%lx, backing_object: %p\n",
1818                                     object->ref_count, (u_long)object->size, 
1819                                     (u_long)object->size,
1820                                     (void *)object->backing_object);
1821                         }
1822                 }
1823         }
1824 }
1825
1826 /*
1827  *      vm_object_print:        [ debug ]
1828  */
1829 DB_SHOW_COMMAND(object, vm_object_print_static)
1830 {
1831         /* XXX convert args. */
1832         vm_object_t object = (vm_object_t)addr;
1833         boolean_t full = have_addr;
1834
1835         vm_page_t p;
1836
1837         /* XXX count is an (unused) arg.  Avoid shadowing it. */
1838 #define count   was_count
1839
1840         int count;
1841
1842         if (object == NULL)
1843                 return;
1844
1845         db_iprintf(
1846             "Object %p: type=%d, size=0x%lx, res=%d, ref=%d, flags=0x%x\n",
1847             object, (int)object->type, (u_long)object->size,
1848             object->resident_page_count, object->ref_count, object->flags);
1849         /*
1850          * XXX no %qd in kernel.  Truncate object->backing_object_offset.
1851          */
1852         db_iprintf(" sref=%d, backing_object(%d)=(%p)+0x%lx\n",
1853             object->shadow_count, 
1854             object->backing_object ? object->backing_object->ref_count : 0,
1855             object->backing_object, (long)object->backing_object_offset);
1856
1857         if (!full)
1858                 return;
1859
1860         db_indent += 2;
1861         count = 0;
1862         RB_FOREACH(p, vm_page_rb_tree, &object->rb_memq) {
1863                 if (count == 0)
1864                         db_iprintf("memory:=");
1865                 else if (count == 6) {
1866                         db_printf("\n");
1867                         db_iprintf(" ...");
1868                         count = 0;
1869                 } else
1870                         db_printf(",");
1871                 count++;
1872
1873                 db_printf("(off=0x%lx,page=0x%lx)",
1874                     (u_long) p->pindex, (u_long) VM_PAGE_TO_PHYS(p));
1875         }
1876         if (count != 0)
1877                 db_printf("\n");
1878         db_indent -= 2;
1879 }
1880
1881 /* XXX. */
1882 #undef count
1883
1884 /* XXX need this non-static entry for calling from vm_map_print. */
1885 void
1886 vm_object_print(/* db_expr_t */ long addr,
1887                 boolean_t have_addr,
1888                 /* db_expr_t */ long count,
1889                 char *modif)
1890 {
1891         vm_object_print_static(addr, have_addr, count, modif);
1892 }
1893
1894 DB_SHOW_COMMAND(vmopag, vm_object_print_pages)
1895 {
1896         vm_object_t object;
1897         int nl = 0;
1898         int c;
1899         for (object = TAILQ_FIRST(&vm_object_list);
1900                         object != NULL;
1901                         object = TAILQ_NEXT(object, object_list)) {
1902                 vm_pindex_t idx, fidx;
1903                 vm_pindex_t osize;
1904                 vm_paddr_t pa = -1, padiff;
1905                 int rcount;
1906                 vm_page_t m;
1907
1908                 db_printf("new object: %p\n", (void *)object);
1909                 if ( nl > 18) {
1910                         c = cngetc();
1911                         if (c != ' ')
1912                                 return;
1913                         nl = 0;
1914                 }
1915                 nl++;
1916                 rcount = 0;
1917                 fidx = 0;
1918                 osize = object->size;
1919                 if (osize > 128)
1920                         osize = 128;
1921                 for (idx = 0; idx < osize; idx++) {
1922                         m = vm_page_lookup(object, idx);
1923                         if (m == NULL) {
1924                                 if (rcount) {
1925                                         db_printf(" index(%ld)run(%d)pa(0x%lx)\n",
1926                                                 (long)fidx, rcount, (long)pa);
1927                                         if ( nl > 18) {
1928                                                 c = cngetc();
1929                                                 if (c != ' ')
1930                                                         return;
1931                                                 nl = 0;
1932                                         }
1933                                         nl++;
1934                                         rcount = 0;
1935                                 }
1936                                 continue;
1937                         }
1938
1939                                 
1940                         if (rcount &&
1941                                 (VM_PAGE_TO_PHYS(m) == pa + rcount * PAGE_SIZE)) {
1942                                 ++rcount;
1943                                 continue;
1944                         }
1945                         if (rcount) {
1946                                 padiff = pa + rcount * PAGE_SIZE - VM_PAGE_TO_PHYS(m);
1947                                 padiff >>= PAGE_SHIFT;
1948                                 padiff &= PQ_L2_MASK;
1949                                 if (padiff == 0) {
1950                                         pa = VM_PAGE_TO_PHYS(m) - rcount * PAGE_SIZE;
1951                                         ++rcount;
1952                                         continue;
1953                                 }
1954                                 db_printf(" index(%ld)run(%d)pa(0x%lx)",
1955                                         (long)fidx, rcount, (long)pa);
1956                                 db_printf("pd(%ld)\n", (long)padiff);
1957                                 if ( nl > 18) {
1958                                         c = cngetc();
1959                                         if (c != ' ')
1960                                                 return;
1961                                         nl = 0;
1962                                 }
1963                                 nl++;
1964                         }
1965                         fidx = idx;
1966                         pa = VM_PAGE_TO_PHYS(m);
1967                         rcount = 1;
1968                 }
1969                 if (rcount) {
1970                         db_printf(" index(%ld)run(%d)pa(0x%lx)\n",
1971                                 (long)fidx, rcount, (long)pa);
1972                         if ( nl > 18) {
1973                                 c = cngetc();
1974                                 if (c != ' ')
1975                                         return;
1976                                 nl = 0;
1977                         }
1978                         nl++;
1979                 }
1980         }
1981 }
1982 #endif /* DDB */