kernel - fix procfs vm_map scan.
authorMatthew Dillon <dillon@apollo.backplane.com>
Wed, 10 Mar 2010 23:56:09 +0000 (15:56 -0800)
committerMatthew Dillon <dillon@apollo.backplane.com>
Wed, 10 Mar 2010 23:56:09 +0000 (15:56 -0800)
* procfs was holding a vm_map lock during a uiomove, which can result
  in a recursive lock panic on the vm_map.

* since we must now unlock the map during the uiomove use a trick with
  vm_map->hint to detect if the current entry has been ripped out from
  under us and issue a lookup to reacquire our position in the scan.

Reported-by: "Alex Hornung" <ahornung@gmail.com>
sys/vfs/procfs/procfs_map.c

index 62d942b..59d4577 100644 (file)
@@ -85,14 +85,14 @@ procfs_domap(struct proc *curp, struct lwp *lp, struct pfsnode *pfs,
                return (0);
        
        error = 0;
-       if (map != &curproc->p_vmspace->vm_map)
-               vm_map_lock_read(map);
+       vm_map_lock_read(map);
        for (entry = map->header.next;
                ((uio->uio_resid > 0) && (entry != &map->header));
                entry = entry->next) {
                vm_object_t obj, tobj, lobj;
                int ref_count, shadow_count, flags;
                vm_offset_t addr;
+               vm_offset_t ostart;
                int resident, privateresident;
                char *type;
 
@@ -107,6 +107,15 @@ procfs_domap(struct proc *curp, struct lwp *lp, struct pfsnode *pfs,
                else
                        privateresident = 0;
 
+               /*
+                * Use map->hint as a poor man's ripout detector.
+                */
+               map->hint = entry;
+               ostart = entry->start;
+
+               /*
+                * Count resident pages (XXX can be horrible on 64-bit)
+                */
                resident = 0;
                addr = entry->start;
                while (addr < entry->end) {
@@ -168,12 +177,32 @@ case OBJT_DEVICE:
                        error = EFBIG;
                        break;
                }
+
+               /*
+                * We cannot safely hold the map locked while accessing
+                * userspace as a VM fault might recurse the locked map.
+                */
+               vm_map_unlock_read(map);
                error = uiomove(mebuffer, len, uio);
+               vm_map_lock_read(map);
                if (error)
                        break;
+
+               /*
+                * We use map->hint as a poor man's ripout detector.  If
+                * it does not match the entry we set it to prior to
+                * unlocking the map the entry MIGHT now be stale.  In
+                * this case we do an expensive lookup to find our place
+                * in the iteration again.
+                */
+               if (map->hint != entry) {
+                       vm_map_entry_t reentry;
+
+                       vm_map_lookup_entry(map, ostart, &reentry);
+                       entry = reentry;
+               }
        }
-       if (map != &curproc->p_vmspace->vm_map)
-               vm_map_unlock_read(map);
+       vm_map_unlock_read(map);
 
        return error;
 }