Bring in the fictitious page wiring bug fixes from FreeBSD-5. Make additional
authorMatthew Dillon <dillon@dragonflybsd.org>
Thu, 27 May 2004 00:38:58 +0000 (00:38 +0000)
committerMatthew Dillon <dillon@dragonflybsd.org>
Thu, 27 May 2004 00:38:58 +0000 (00:38 +0000)
major changes to the APIs to clean them up (so this commit is substantially
different than what was committed to FreeBSD-5).

Obtained-from: Alan Cox <alc@cs.rice.edu> (FreeBSD-5)

sys/vm/vm_extern.h
sys/vm/vm_fault.c
sys/vm/vm_map.c
sys/vm/vm_page.c

index 73463f3..97fc38b 100644 (file)
@@ -32,7 +32,7 @@
  *
  *     @(#)vm_extern.h 8.2 (Berkeley) 1/12/94
  * $FreeBSD: src/sys/vm/vm_extern.h,v 1.46.2.3 2003/01/13 22:51:17 dillon Exp $
- * $DragonFly: src/sys/vm/vm_extern.h,v 1.12 2004/05/19 22:53:06 dillon Exp $
+ * $DragonFly: src/sys/vm/vm_extern.h,v 1.13 2004/05/27 00:38:58 dillon Exp $
  */
 
 #ifndef _VM_EXTERN_H_
@@ -78,9 +78,8 @@ void swapout_procs (int);
 int useracc (caddr_t, int, int);
 int vm_fault (vm_map_t, vm_offset_t, vm_prot_t, int);
 void vm_fault_copy_entry (vm_map_t, vm_map_t, vm_map_entry_t, vm_map_entry_t);
-void vm_fault_unwire (vm_map_t, vm_offset_t, vm_offset_t);
-int vm_fault_wire (vm_map_t, vm_offset_t, vm_offset_t);
-int vm_fault_user_wire (vm_map_t, vm_offset_t, vm_offset_t);
+void vm_fault_unwire (vm_map_t, vm_map_entry_t);
+int vm_fault_wire (vm_map_t, vm_map_entry_t, boolean_t);
 void vm_fork (struct proc *, struct proc *, int);
 void vm_waitproc (struct proc *);
 int vm_mmap (vm_map_t, vm_offset_t *, vm_size_t, vm_prot_t, vm_prot_t, int, void *, vm_ooffset_t);
index 7afe56c..fe4f5ab 100644 (file)
@@ -67,7 +67,7 @@
  * rights to redistribute these changes.
  *
  * $FreeBSD: src/sys/vm/vm_fault.c,v 1.108.2.8 2002/02/26 05:49:27 silby Exp $
- * $DragonFly: src/sys/vm/vm_fault.c,v 1.15 2004/05/20 22:42:25 dillon Exp $
+ * $DragonFly: src/sys/vm/vm_fault.c,v 1.16 2004/05/27 00:38:58 dillon Exp $
  */
 
 /*
@@ -927,66 +927,36 @@ vm_fault_quick(caddr_t v, int prot)
 }
 
 /*
- *     vm_fault_wire:
- *
- *     Wire down a range of virtual addresses in a map.
+ * Wire down a range of virtual addresses in a map.  The entry in question
+ * should be marked in-transition and the map must be locked.  We must
+ * release the map temporarily while faulting-in the page to avoid a
+ * deadlock.  Note that the entry may be clipped while we are blocked but
+ * will never be freed.
  */
 int
-vm_fault_wire(vm_map_t map, vm_offset_t start, vm_offset_t end)
+vm_fault_wire(vm_map_t map, vm_map_entry_t entry, boolean_t user_wire)
 {
-
+       boolean_t fictitious;
+       vm_offset_t start;
+       vm_offset_t end;
        vm_offset_t va;
+       vm_paddr_t pa;
        pmap_t pmap;
        int rv;
 
        pmap = vm_map_pmap(map);
+       start = entry->start;
+       end = entry->end;
+       fictitious = entry->object.vm_object &&
+                       (entry->object.vm_object->type == OBJT_DEVICE);
 
-       /*
-        * Inform the physical mapping system that the range of addresses may
-        * not fault, so that page tables and such can be locked down as well.
-        */
-
-       pmap_pageable(pmap, start, end, FALSE);
-
-       /*
-        * We simulate a fault to get the page and enter it in the physical
-        * map.
-        */
-
-       for (va = start; va < end; va += PAGE_SIZE) {
-               rv = vm_fault(map, va, VM_PROT_READ|VM_PROT_WRITE,
-                       VM_FAULT_CHANGE_WIRING);
-               if (rv) {
-                       if (va != start)
-                               vm_fault_unwire(map, start, va);
-                       return (rv);
-               }
-       }
-       return (KERN_SUCCESS);
-}
-
-/*
- *     vm_fault_user_wire:
- *
- *     Wire down a range of virtual addresses in a map.  This
- *     is for user mode though, so we only ask for read access
- *     on currently read only sections.
- */
-int
-vm_fault_user_wire(vm_map_t map, vm_offset_t start, vm_offset_t end)
-{
-
-       vm_offset_t va;
-       pmap_t pmap;
-       int rv;
-
-       pmap = vm_map_pmap(map);
+       vm_map_unlock(map);
+       map->timestamp++;
 
        /*
         * Inform the physical mapping system that the range of addresses may
         * not fault, so that page tables and such can be locked down as well.
         */
-
        pmap_pageable(pmap, start, end, FALSE);
 
        /*
@@ -994,42 +964,61 @@ vm_fault_user_wire(vm_map_t map, vm_offset_t start, vm_offset_t end)
         * map.
         */
        for (va = start; va < end; va += PAGE_SIZE) {
-               rv = vm_fault(map, va, VM_PROT_READ, VM_FAULT_USER_WIRE);
+               if (user_wire) {
+                       rv = vm_fault(map, va, VM_PROT_READ, 
+                                       VM_FAULT_USER_WIRE);
+               } else {
+                       rv = vm_fault(map, va, VM_PROT_READ|VM_PROT_WRITE,
+                                       VM_FAULT_CHANGE_WIRING);
+               }
                if (rv) {
-                       if (va != start)
-                               vm_fault_unwire(map, start, va);
+                       while (va > start) {
+                               va -= PAGE_SIZE;
+                               if ((pa = pmap_extract(pmap, va)) == 0)
+                                       continue;
+                               pmap_change_wiring(pmap, va, FALSE);
+                               if (!fictitious)
+                                       vm_page_unwire(PHYS_TO_VM_PAGE(pa), 1);
+                       }
+                       pmap_pageable(pmap, start, end, TRUE);
+                       vm_map_lock(map);
                        return (rv);
                }
        }
+       vm_map_lock(map);
        return (KERN_SUCCESS);
 }
 
-
 /*
- *     vm_fault_unwire:
- *
- *     Unwire a range of virtual addresses in a map.
+ * Unwire a range of virtual addresses in a map.  The map should be
+ * locked.
  */
 void
-vm_fault_unwire(vm_map_t map, vm_offset_t start, vm_offset_t end)
+vm_fault_unwire(vm_map_t map, vm_map_entry_t entry)
 {
-
+       boolean_t fictitious;
+       vm_offset_t start;
+       vm_offset_t end;
        vm_offset_t va;
        vm_paddr_t pa;
        pmap_t pmap;
 
        pmap = vm_map_pmap(map);
+       start = entry->start;
+       end = entry->end;
+       fictitious = entry->object.vm_object &&
+                       (entry->object.vm_object->type == OBJT_DEVICE);
 
        /*
         * Since the pages are wired down, we must be able to get their
         * mappings from the physical map system.
         */
-
        for (va = start; va < end; va += PAGE_SIZE) {
                pa = pmap_extract(pmap, va);
                if (pa != 0) {
                        pmap_change_wiring(pmap, va, FALSE);
-                       vm_page_unwire(PHYS_TO_VM_PAGE(pa), 1);
+                       if (!fictitious)
+                               vm_page_unwire(PHYS_TO_VM_PAGE(pa), 1);
                }
        }
 
@@ -1037,9 +1026,7 @@ vm_fault_unwire(vm_map_t map, vm_offset_t start, vm_offset_t end)
         * Inform the physical mapping system that the range of addresses may
         * fault, so that page tables and such may be unwired themselves.
         */
-
        pmap_pageable(pmap, start, end, TRUE);
-
 }
 
 /*
index 6628b8b..8f8a761 100644 (file)
@@ -62,7 +62,7 @@
  * rights to redistribute these changes.
  *
  * $FreeBSD: src/sys/vm/vm_map.c,v 1.187.2.19 2003/05/27 00:47:02 alc Exp $
- * $DragonFly: src/sys/vm/vm_map.c,v 1.27 2004/05/13 17:40:19 dillon Exp $
+ * $DragonFly: src/sys/vm/vm_map.c,v 1.28 2004/05/27 00:38:58 dillon Exp $
  */
 
 /*
@@ -1748,16 +1748,14 @@ vm_map_unwire(vm_map_t map, vm_offset_t start, vm_offset_t real_end,
                        entry->eflags |= MAP_ENTRY_USER_WIRED;
 
                        /*
-                        * Now fault in the area.  The map lock needs to be
-                        * manipulated to avoid deadlocks.  The in-transition
+                        * Now fault in the area.  Note that vm_fault_wire()
+                        * may release the map lock temporarily, it will be
+                        * relocked on return.  The in-transition
                         * flag protects the entries. 
                         */
                        save_start = entry->start;
                        save_end = entry->end;
-                       vm_map_unlock(map);
-                       map->timestamp++;
-                       rv = vm_fault_user_wire(map, save_start, save_end);
-                       vm_map_lock(map);
+                       rv = vm_fault_wire(map, entry, TRUE);
                        if (rv) {
                                CLIP_CHECK_BACK(entry, save_start);
                                for (;;) {
@@ -1833,11 +1831,12 @@ vm_map_unwire(vm_map_t map, vm_offset_t start, vm_offset_t real_end,
                 */
                entry = start_entry;
                while ((entry != &map->header) && (entry->start < end)) {
-                       KASSERT(entry->eflags & MAP_ENTRY_USER_WIRED, ("expected USER_WIRED on entry %p", entry));
+                       KASSERT(entry->eflags & MAP_ENTRY_USER_WIRED,
+                               ("expected USER_WIRED on entry %p", entry));
                        entry->eflags &= ~MAP_ENTRY_USER_WIRED;
                        entry->wired_count--;
                        if (entry->wired_count == 0)
-                               vm_fault_unwire(map, entry->start, entry->end);
+                               vm_fault_unwire(map, entry);
                        entry = entry->next;
                }
        }
@@ -1976,7 +1975,6 @@ vm_map_wire(vm_map_t map, vm_offset_t start, vm_offset_t real_end, int kmflags)
                 */
 
                s = splvm();
-               vm_map_unlock(map);
 
                entry = start_entry;
                while (entry != &map->header && entry->start < end) {
@@ -1990,7 +1988,7 @@ vm_map_wire(vm_map_t map, vm_offset_t start, vm_offset_t real_end, int kmflags)
                        vm_offset_t save_end = entry->end;
 
                        if (entry->wired_count == 1)
-                               rv = vm_fault_wire(map, entry->start, entry->end);
+                               rv = vm_fault_wire(map, entry, FALSE);
                        if (rv) {
                                CLIP_CHECK_BACK(entry, save_start);
                                for (;;) {
@@ -2009,13 +2007,6 @@ vm_map_wire(vm_map_t map, vm_offset_t start, vm_offset_t real_end, int kmflags)
                }
                splx(s);
 
-               /*
-                * relock.  start_entry is still IN_TRANSITION and must
-                * still exist, but may have been clipped (handled just
-                * below).
-                */
-               vm_map_lock(map);
-
                /*
                 * If a failure occured undo everything by falling through
                 * to the unwiring code.  'end' has already been adjusted
@@ -2025,9 +2016,10 @@ vm_map_wire(vm_map_t map, vm_offset_t start, vm_offset_t real_end, int kmflags)
                        kmflags |= KM_PAGEABLE;
 
                /*
-                * start_entry might have been clipped if we unlocked the
-                * map and blocked.  No matter how clipped it has gotten
-                * there should be a fragment that is on our start boundary.
+                * start_entry is still IN_TRANSITION but may have been 
+                * clipped since vm_fault_wire() unlocks and relocks the
+                * map.  No matter how clipped it has gotten there should
+                * be a fragment that is on our start boundary.
                 */
                CLIP_CHECK_BACK(start_entry, start);
        }
@@ -2056,7 +2048,7 @@ vm_map_wire(vm_map_t map, vm_offset_t start, vm_offset_t real_end, int kmflags)
                while ((entry != &map->header) && (entry->start < end)) {
                        entry->wired_count--;
                        if (entry->wired_count == 0)
-                               vm_fault_unwire(map, entry->start, entry->end);
+                               vm_fault_unwire(map, entry);
                        entry = entry->next;
                }
        }
@@ -2234,8 +2226,9 @@ vm_map_clean(vm_map_t map, vm_offset_t start, vm_offset_t end, boolean_t syncio,
 static void 
 vm_map_entry_unwire(vm_map_t map, vm_map_entry_t entry)
 {
-       vm_fault_unwire(map, entry->start, entry->end);
+       entry->eflags &= ~MAP_ENTRY_USER_WIRED;
        entry->wired_count = 0;
+       vm_fault_unwire(map, entry);
 }
 
 /*
@@ -2274,9 +2267,9 @@ vm_map_delete(vm_map_t map, vm_offset_t start, vm_offset_t end, int *countp)
         */
 
 again:
-       if (!vm_map_lookup_entry(map, start, &first_entry))
+       if (!vm_map_lookup_entry(map, start, &first_entry)) {
                entry = first_entry->next;
-       else {
+       else {
                entry = first_entry;
                vm_map_clip_start(map, entry, start, countp);
                /*
@@ -2335,9 +2328,8 @@ again:
                 * Unwire before removing addresses from the pmap; otherwise,
                 * unwiring will put the entries back in the pmap.
                 */
-               if (entry->wired_count != 0) {
+               if (entry->wired_count != 0)
                        vm_map_entry_unwire(map, entry);
-               }
 
                offidxend = offidxstart + count;
 
index ca9e48d..1dd1411 100644 (file)
@@ -35,7 +35,7 @@
  *
  *     from: @(#)vm_page.c     7.4 (Berkeley) 5/7/91
  * $FreeBSD: src/sys/vm/vm_page.c,v 1.147.2.18 2002/03/10 05:03:19 alc Exp $
- * $DragonFly: src/sys/vm/vm_page.c,v 1.24 2004/05/20 22:42:25 dillon Exp $
+ * $DragonFly: src/sys/vm/vm_page.c,v 1.25 2004/05/27 00:38:58 dillon Exp $
  */
 
 /*
@@ -988,7 +988,6 @@ vm_page_free_wakeup(void)
  *     Object and page must be locked prior to entry.
  *     This routine may not block.
  */
-
 void
 vm_page_free_toq(vm_page_t m)
 {
@@ -1019,8 +1018,8 @@ vm_page_free_toq(vm_page_t m)
        vm_page_remove(m);
 
        /*
-        * If fictitious remove object association and
-        * return, otherwise delay object association removal.
+        * No further management of fictitious pages occurs beyond object
+        * and queue removal.
         */
        if ((m->flags & PG_FICTITIOUS) != 0) {
                splx(s);
@@ -1122,18 +1121,20 @@ vm_page_wire(vm_page_t m)
        /*
         * Only bump the wire statistics if the page is not already wired,
         * and only unqueue the page if it is on some queue (if it is unmanaged
-        * it is already off the queues).
+        * it is already off the queues).  Don't do anything with fictitious
+        * pages because they are always wired.
         */
        s = splvm();
-       if (m->wire_count == 0) {
-               if ((m->flags & PG_UNMANAGED) == 0)
-                       vm_page_unqueue(m);
-               vmstats.v_wire_count++;
+       if ((m->flags & PG_FICTITIOUS) == 0) {
+               if (m->wire_count == 0) {
+                       if ((m->flags & PG_UNMANAGED) == 0)
+                               vm_page_unqueue(m);
+                       vmstats.v_wire_count++;
+               }
+               m->wire_count++;
+               KASSERT(m->wire_count != 0,
+                   ("vm_page_wire: wire_count overflow m=%p", m));
        }
-       m->wire_count++;
-       KASSERT(m->wire_count != 0,
-           ("vm_page_wire: wire_count overflow m=%p", m));
-
        splx(s);
        vm_page_flag_set(m, PG_MAPPED);
 }
@@ -1169,28 +1170,30 @@ vm_page_unwire(vm_page_t m, int activate)
        int s;
 
        s = splvm();
-
-       if (m->wire_count > 0) {
-               m->wire_count--;
-               if (m->wire_count == 0) {
-                       vmstats.v_wire_count--;
+       if (m->flags & PG_FICTITIOUS) {
+               /* do nothing */
+       } else if (m->wire_count <= 0) {
+               panic("vm_page_unwire: invalid wire count: %d", m->wire_count);
+       } else {
+               if (--m->wire_count == 0) {
+                       --vmstats.v_wire_count;
                        if (m->flags & PG_UNMANAGED) {
                                ;
                        } else if (activate) {
-                               TAILQ_INSERT_TAIL(&vm_page_queues[PQ_ACTIVE].pl, m, pageq);
+                               TAILQ_INSERT_TAIL(
+                                   &vm_page_queues[PQ_ACTIVE].pl, m, pageq);
                                m->queue = PQ_ACTIVE;
                                vm_page_queues[PQ_ACTIVE].lcnt++;
                                vmstats.v_active_count++;
                        } else {
                                vm_page_flag_clear(m, PG_WINATCFLS);
-                               TAILQ_INSERT_TAIL(&vm_page_queues[PQ_INACTIVE].pl, m, pageq);
+                               TAILQ_INSERT_TAIL(
+                                   &vm_page_queues[PQ_INACTIVE].pl, m, pageq);
                                m->queue = PQ_INACTIVE;
                                vm_page_queues[PQ_INACTIVE].lcnt++;
                                vmstats.v_inactive_count++;
                        }
                }
-       } else {
-               panic("vm_page_unwire: invalid wire count: %d", m->wire_count);
        }
        splx(s);
 }