jo->flags &= ~(MC_JOURNAL_STOP_REQ | MC_JOURNAL_STOP_IMM);
jo->flags |= MC_JOURNAL_WACTIVE;
lwkt_create(journal_wthread, jo, NULL, &jo->wthread,
- TDF_STOPREQ, -1,
+ TDF_NOSTART, -1,
"journal w:%.*s", JIDMAX, jo->id);
lwkt_setpri(&jo->wthread, TDPRI_KERN_DAEMON);
lwkt_schedule(&jo->wthread);
if (jo->flags & MC_JOURNAL_WANT_FULLDUPLEX) {
jo->flags |= MC_JOURNAL_RACTIVE;
lwkt_create(journal_rthread, jo, NULL, &jo->rthread,
- TDF_STOPREQ, -1,
+ TDF_NOSTART, -1,
"journal r:%.*s", JIDMAX, jo->id);
lwkt_setpri(&jo->rthread, TDPRI_KERN_DAEMON);
lwkt_schedule(&jo->rthread);
struct nchandle nch;
nch.mount = vp->v_mount;
- spin_lock(&vp->v_spinlock);
+ spin_lock(&vp->v_spin);
TAILQ_FOREACH(nch.ncp, &vp->v_namecache, nc_vnode) {
if ((nch.ncp->nc_flag & (NCF_UNRESOLVED|NCF_DESTROYED)) == 0)
break;
}
if (nch.ncp) {
cache_hold(&nch);
- spin_unlock(&vp->v_spinlock);
+ spin_unlock(&vp->v_spin);
jrecord_write_path(jrec, JLEAF_PATH_REF, nch.ncp);
cache_drop(&nch);
} else {
- spin_unlock(&vp->v_spinlock);
+ spin_unlock(&vp->v_spin);
}
}
struct nchandle nch;
nch.mount = vp->v_mount;
- spin_lock(&vp->v_spinlock);
+ spin_lock(&vp->v_spin);
TAILQ_FOREACH(nch.ncp, &vp->v_namecache, nc_vnode) {
if (nch.ncp == notncp)
continue;
}
if (nch.ncp) {
cache_hold(&nch);
- spin_unlock(&vp->v_spinlock);
+ spin_unlock(&vp->v_spin);
jrecord_write_path(jrec, JLEAF_PATH_REF, nch.ncp);
cache_drop(&nch);
} else {
- spin_unlock(&vp->v_spinlock);
+ spin_unlock(&vp->v_spin);
}
}