hammer2 - Move write thread from hmp to pmp
authorMatthew Dillon <dillon@apollo.backplane.com>
Wed, 2 Oct 2013 04:43:34 +0000 (21:43 -0700)
committerMatthew Dillon <dillon@apollo.backplane.com>
Wed, 2 Oct 2013 04:43:34 +0000 (21:43 -0700)
* Move the write thread from struct hammer2_mount to struct hammer2_pfsmount.
  Logical buffers for files are associated with the PFS, not the HMP.  A PFS
  can be backed by multiple HMPs.

sys/vfs/hammer2/hammer2.h
sys/vfs/hammer2/hammer2_vfsops.c
sys/vfs/hammer2/hammer2_vnops.c

index fa040cc..45b74b7 100644 (file)
@@ -472,9 +472,6 @@ struct hammer2_mount {
        int             volhdrno;       /* last volhdrno written */
        hammer2_volume_data_t voldata;
        hammer2_volume_data_t volsync;  /* synchronized voldata */
-       struct bio_queue_head wthread_bioq; /* bio queue for write thread */
-       struct mtx wthread_mtx;     /* mutex for write thread */
-       int     wthread_destroy;    /* to control the write thread */
 };
 
 typedef struct hammer2_mount hammer2_mount_t;
@@ -525,6 +522,10 @@ struct hammer2_pfsmount {
        long                    inmem_inodes;
        long                    inmem_chains;
        int                     inmem_waiting;
+       thread_t                wthread_td;     /* write thread td */
+       struct bio_queue_head   wthread_bioq;   /* logical buffer bioq */
+       struct mtx              wthread_mtx;    /* interlock */
+       int                     wthread_destroy;/* termination sequencing */
 };
 
 typedef struct hammer2_pfsmount hammer2_pfsmount_t;
index fbf8ff0..c944cec 100644 (file)
@@ -544,16 +544,6 @@ hammer2_vfs_mount(struct mount *mp, char *path, caddr_t data,
                hammer2_inode_unlock_ex(hmp->sroot, schain);
                schain = NULL;
                /* leave hmp->sroot with one ref */
-               
-               mtx_init(&hmp->wthread_mtx);
-               bioq_init(&hmp->wthread_bioq);
-               hmp->wthread_destroy = 0;
-       
-               /*
-                * Launch threads.
-                */
-               lwkt_create(hammer2_write_thread, hmp,
-                               NULL, NULL, 0, -1, "hammer2-write");
        }
 
        /*
@@ -663,6 +653,16 @@ hammer2_vfs_mount(struct mount *mp, char *path, caddr_t data,
        kprintf("iroot %p\n", pmp->iroot);
 
        /*
+        * The logical file buffer bio write thread handles things
+        * like physical block assignment and compression.
+        */
+       mtx_init(&pmp->wthread_mtx);
+       bioq_init(&pmp->wthread_bioq);
+       pmp->wthread_destroy = 0;
+       lwkt_create(hammer2_write_thread, pmp,
+                   &pmp->wthread_td, NULL, 0, -1, "hwrite-%s", label);
+
+       /*
         * Ref the cluster management messaging descriptor.  The mount
         * program deals with the other end of the communications pipe.
         */
@@ -704,7 +704,7 @@ static
 void
 hammer2_write_thread(void *arg)
 {
-       hammer2_mount_t* hmp;
+       hammer2_pfsmount_t *pmp;
        struct bio *bio;
        struct buf *bp;
        hammer2_trans_t trans;
@@ -719,20 +719,20 @@ hammer2_write_thread(void *arg)
        int pblksize;
        int error;
        
-       hmp = arg;
+       pmp = arg;
        
-       mtx_lock(&hmp->wthread_mtx);
-       while (hmp->wthread_destroy == 0) {
-               if (bioq_first(&hmp->wthread_bioq) == NULL) {
-                       mtxsleep(&hmp->wthread_bioq, &hmp->wthread_mtx,
+       mtx_lock(&pmp->wthread_mtx);
+       while (pmp->wthread_destroy == 0) {
+               if (bioq_first(&pmp->wthread_bioq) == NULL) {
+                       mtxsleep(&pmp->wthread_bioq, &pmp->wthread_mtx,
                                 0, "h2bioqw", 0);
                }
                last_ip = NULL;
                parent = NULL;
                parentp = &parent;
 
-               while ((bio = bioq_takefirst(&hmp->wthread_bioq)) != NULL) {
-                       mtx_unlock(&hmp->wthread_mtx);
+               while ((bio = bioq_takefirst(&pmp->wthread_bioq)) != NULL) {
+                       mtx_unlock(&pmp->wthread_mtx);
                        
                        error = 0;
                        bp = bio->bio_buf;
@@ -778,7 +778,7 @@ hammer2_write_thread(void *arg)
                                bp->b_error = EIO;
                        }
                        biodone(bio);
-                       mtx_lock(&hmp->wthread_mtx);
+                       mtx_lock(&pmp->wthread_mtx);
                }
 
                /*
@@ -787,10 +787,10 @@ hammer2_write_thread(void *arg)
                if (last_ip)
                        hammer2_trans_done(&trans);
        }
-       hmp->wthread_destroy = -1;
-       wakeup(&hmp->wthread_destroy);
+       pmp->wthread_destroy = -1;
+       wakeup(&pmp->wthread_destroy);
        
-       mtx_unlock(&hmp->wthread_mtx);
+       mtx_unlock(&pmp->wthread_mtx);
 }
 
 /* 
@@ -1378,27 +1378,37 @@ hammer2_vfs_unmount(struct mount *mp, int mntflags)
 
        lockmgr(&hammer2_mntlk, LK_EXCLUSIVE);
 
-       for (i = 0; i < pmp->cluster.nchains; ++i) {
-               hmp = pmp->cluster.chains[i]->hmp;
-
+       /*
+        * If mount initialization proceeded far enough we must flush
+        * its vnodes.
+        */
+       if (mntflags & MNT_FORCE)
+               flags = FORCECLOSE;
+       else
                flags = 0;
+       if (pmp->iroot) {
+               error = vflush(mp, 0, flags);
+               if (error)
+                       goto failed;
+       }
 
-               if (mntflags & MNT_FORCE)
-                       flags |= FORCECLOSE;
-
-               hammer2_mount_exlock(hmp);
+       if (pmp->wthread_td) {
+               mtx_lock(&pmp->wthread_mtx);
+               pmp->wthread_destroy = 1;
+               wakeup(&pmp->wthread_bioq);
+               while (pmp->wthread_destroy != -1) {
+                       mtxsleep(&pmp->wthread_destroy,
+                               &pmp->wthread_mtx, 0,
+                               "umount-sleep", 0);
+               }
+               mtx_unlock(&pmp->wthread_mtx);
+               pmp->wthread_td = NULL;
+       }
 
-               /*
-                * If mount initialization proceeded far enough we must flush
-                * its vnodes.
-                */
-               if (pmp->iroot)
-                       error = vflush(mp, 0, flags);
+       for (i = 0; i < pmp->cluster.nchains; ++i) {
+               hmp = pmp->cluster.chains[i]->hmp;
 
-               if (error) {
-                       hammer2_mount_unlock(hmp);
-                       goto failed;
-               }
+               hammer2_mount_exlock(hmp);
 
                --hmp->pmp_count;
                kprintf("hammer2_unmount hmp=%p pmpcnt=%d\n",
@@ -1489,31 +1499,21 @@ hammer2_vfs_unmount(struct mount *mp, int mntflags)
                        hammer2_chain_drop(&hmp->fchain);
 
                        /*
-                        * Final drop of embedded volume root chain to clean up
-                        * vchain.core (vchain structure is not flagged ALLOCATED
-                        * so it is cleaned out and then left to rot).
+                        * Final drop of embedded volume root chain to clean
+                        * up vchain.core (vchain structure is not flagged
+                        * ALLOCATED so it is cleaned out and then left to
+                        * rot).
                         */
                        dumpcnt = 50;
                        hammer2_dump_chain(&hmp->vchain, 0, &dumpcnt);
                        hammer2_mount_unlock(hmp);
                        hammer2_chain_drop(&hmp->vchain);
-               } else {
-                       hammer2_mount_unlock(hmp);
-               }
-               if (hmp->pmp_count == 0) {
-                       mtx_lock(&hmp->wthread_mtx);
-                       hmp->wthread_destroy = 1;
-                       wakeup(&hmp->wthread_bioq);
-                       while (hmp->wthread_destroy != -1) {
-                               mtxsleep(&hmp->wthread_destroy,
-                                       &hmp->wthread_mtx, 0,
-                                       "umount-sleep", 0);
-                       }
-                       mtx_unlock(&hmp->wthread_mtx);
 
                        TAILQ_REMOVE(&hammer2_mntlist, hmp, mntentry);
                        kmalloc_destroy(&hmp->mchain);
                        kfree(hmp, M_HAMMER2);
+               } else {
+                       hammer2_mount_unlock(hmp);
                }
        }
 
index cd51082..95041a9 100644 (file)
@@ -1119,11 +1119,11 @@ hammer2_write_file(hammer2_inode_t *ip,
                 *          with IO_SYNC or IO_ASYNC set.  These writes
                 *          must be handled as the pageout daemon expects.
                 */
-               if (ap->a_ioflag & IO_SYNC) {
+               if (ioflag & IO_SYNC) {
                        bwrite(bp);
-               } else if ((ap->a_ioflag & IO_DIRECT) && endofblk) {
+               } else if ((ioflag & IO_DIRECT) && endofblk) {
                        bawrite(bp);
-               } else if (ap->a_ioflag & IO_ASYNC) {
+               } else if (ioflag & IO_ASYNC) {
                        bawrite(bp);
                } else {
                        bdwrite(bp);
@@ -2039,16 +2039,7 @@ static
 int
 hammer2_strategy_write(struct vop_strategy_args *ap)
 {      
-       /*
-        * XXX temporary because all write handling is currently
-        * in the vop_write path (which is incorrect and won't catch
-        * certain file modifications via mmap()).  What we need
-        * to do is have the strategy_write code queue the bio to
-        * one or more support threads which will do the complex
-        * logical->physical work and have the vop_write path just do
-        * normal operations on the logical buffer.
-        */
-       hammer2_mount_t *hmp;
+       hammer2_pfsmount_t *pmp;
        struct bio *bio;
        struct buf *bp;
        hammer2_inode_t *ip;
@@ -2056,12 +2047,13 @@ hammer2_strategy_write(struct vop_strategy_args *ap)
        bio = ap->a_bio;
        bp = bio->bio_buf;
        ip = VTOI(ap->a_vp);
-       hmp = ip->pmp->cluster.chains[0]->hmp;
+       pmp = ip->pmp;
        
-       mtx_lock(&hmp->wthread_mtx);
-       bioq_insert_tail(&hmp->wthread_bioq, ap->a_bio);
-       wakeup(&hmp->wthread_bioq);
-       mtx_unlock(&hmp->wthread_mtx);
+       mtx_lock(&pmp->wthread_mtx);
+       bioq_insert_tail(&pmp->wthread_bioq, ap->a_bio);
+       wakeup(&pmp->wthread_bioq);
+       mtx_unlock(&pmp->wthread_mtx);
+
        return(0);
 }