proc->thread stage 4: rework the VFS and DEVICE subsystems to take thread
[dragonfly.git] / sys / vfs / nullfs / null_subr.c
index 9b7dfbd..72c1c00 100644 (file)
@@ -36,7 +36,7 @@
  *     @(#)null_subr.c 8.7 (Berkeley) 5/14/95
  *
  * $FreeBSD: src/sys/miscfs/nullfs/null_subr.c,v 1.21.2.4 2001/06/26 04:20:09 bp Exp $
- * $DragonFly: src/sys/vfs/nullfs/Attic/null_subr.c,v 1.2 2003/06/17 04:28:42 dillon Exp $
+ * $DragonFly: src/sys/vfs/nullfs/Attic/null_subr.c,v 1.3 2003/06/25 03:55:59 dillon Exp $
  */
 
 #include <sys/param.h>
@@ -108,7 +108,7 @@ null_node_find(mp, lowervp)
        struct mount *mp;
        struct vnode *lowervp;
 {
-       struct proc *p = curproc;       /* XXX */
+       struct thread *td = curthread;  /* XXX */
        struct null_node_hashhead *hd;
        struct null_node *a;
        struct vnode *vp;
@@ -121,25 +121,25 @@ null_node_find(mp, lowervp)
         */
        hd = NULL_NHASH(lowervp);
 loop:
-       lockmgr(&null_hashlock, LK_EXCLUSIVE, NULL, p);
+       lockmgr(&null_hashlock, LK_EXCLUSIVE, NULL, td);
        LIST_FOREACH(a, hd, null_hash) {
                if (a->null_lowervp == lowervp && NULLTOV(a)->v_mount == mp) {
                        vp = NULLTOV(a);
-                       lockmgr(&null_hashlock, LK_RELEASE, NULL, p);
+                       lockmgr(&null_hashlock, LK_RELEASE, NULL, td);
                        /*
                         * We need vget for the VXLOCK
                         * stuff, but we don't want to lock
                         * the lower node.
                         */
-                       if (vget(vp, LK_EXCLUSIVE | LK_CANRECURSE, p)) {
+                       if (vget(vp, LK_EXCLUSIVE | LK_CANRECURSE, td)) {
                                printf ("null_node_find: vget failed.\n");
                                goto loop;
                        }
-                       VOP_UNLOCK(lowervp, 0, p);
+                       VOP_UNLOCK(lowervp, 0, td);
                        return (vp);
                }
        }
-       lockmgr(&null_hashlock, LK_RELEASE, NULL, p);
+       lockmgr(&null_hashlock, LK_RELEASE, NULL, td);
 
        return NULLVP;
 }
@@ -156,7 +156,7 @@ null_node_alloc(mp, lowervp, vpp)
        struct vnode *lowervp;
        struct vnode **vpp;
 {
-       struct proc *p = curproc;       /* XXX */
+       struct thread *td = curthread;  /* XXX */
        struct null_node_hashhead *hd;
        struct null_node *xp;
        struct vnode *othervp, *vp;
@@ -206,16 +206,16 @@ null_node_alloc(mp, lowervp, vpp)
         * NULL, then we copy that up and manually lock the new vnode.
         */
 
-       lockmgr(&null_hashlock, LK_EXCLUSIVE, NULL, p);
+       lockmgr(&null_hashlock, LK_EXCLUSIVE, NULL, td);
        vp->v_vnlock = lowervp->v_vnlock;
-       error = VOP_LOCK(vp, LK_EXCLUSIVE | LK_THISLAYER, p);
+       error = VOP_LOCK(vp, LK_EXCLUSIVE | LK_THISLAYER, td);
        if (error)
                panic("null_node_alloc: can't lock new vnode\n");
 
        VREF(lowervp);
        hd = NULL_NHASH(lowervp);
        LIST_INSERT_HEAD(hd, xp, null_hash);
-       lockmgr(&null_hashlock, LK_RELEASE, NULL, p);
+       lockmgr(&null_hashlock, LK_RELEASE, NULL, td);
        return 0;
 }