From 3b998fa96afe52828957ea4f65d15320eb0fe240 Mon Sep 17 00:00:00 2001 From: Matthew Dillon Date: Sun, 6 Jun 2010 10:26:42 -0700 Subject: [PATCH] kernel - lwkt_token revamp * Simplify the token API. Hide the lwkt_tokref mechanics and simplify the lwkt_gettoken()/lwkt_reltoken() API to remove the need to declare and pass a lwkt_tokref along with the token. This makes tokens operate more like locks. There is a minor restriction that tokens must be unlocked in exactly the reverse order they were locked in, and another restriction limiting the maximum number of tokens a thread can hold to defined value (32 for now). The tokrefs are now an array embedded in the thread structure. * Improve performance when blocking and unblocking threads with recursively held tokens. * Improve performance when acquiring the same token recursively. This operation is now O(1) and requires no locks or critical sections of any sort. This will allow us to acquire redundant tokens in deep call paths without having to worry about performance issues. * Add a flags field to the lwkt_token and lwkt_tokref structures and add a flagged feature which will acquire the MP lock along with a particular token. This will be used as a transitory mechanism in upcoming MPSAFE work. The mplock feature in the token structure can be directly connected to a mpsafe sysctl without being vulnerable to state-change races. --- sys/ddb/db_ps.c | 4 +- .../linux/i386/linprocfs/linprocfs_subr.c | 19 +- sys/emulation/ndis/kern_ndis.c | 50 ++-- sys/emulation/ndis/subr_ntoskrnl.c | 57 ++-- sys/kern/kern_lockf.c | 6 +- sys/kern/lwkt_thread.c | 11 +- sys/kern/lwkt_token.c | 244 +++++++++--------- sys/kern/subr_disk.c | 21 +- sys/kern/subr_kobj.c | 14 +- sys/kern/subr_rman.c | 52 ++-- sys/kern/sys_pipe.c | 144 +++++------ sys/kern/vfs_bio.c | 5 +- sys/kern/vfs_lock.c | 2 +- sys/kern/vfs_mount.c | 62 ++--- sys/kern/vfs_subr.c | 83 +++--- sys/kern/vfs_sync.c | 22 +- sys/kern/vfs_vm.c | 5 +- sys/kern/vfs_vopops.c | 19 +- sys/netproto/smb/smb_subr.h | 1 - sys/platform/pc32/i386/busdma_machdep.c | 9 +- sys/platform/pc64/x86_64/busdma_machdep.c | 9 +- .../vkernel/platform/busdma_machdep.c | 9 +- .../vkernel64/platform/busdma_machdep.c | 9 +- sys/sys/mount.h | 8 +- sys/sys/rman.h | 5 +- sys/sys/thread.h | 48 +++- sys/sys/thread2.h | 12 - sys/vfs/fifofs/fifo_vnops.c | 93 +++---- sys/vfs/gnu/ext2fs/ext2_ihash.c | 37 ++- sys/vfs/gnu/ext2fs/ext2_vnops.c | 15 +- sys/vfs/hammer/hammer_vnops.c | 10 +- sys/vfs/hpfs/hpfs_hash.c | 31 +-- sys/vfs/hpfs/hpfs_vfsops.c | 2 +- sys/vfs/isofs/cd9660/cd9660_node.c | 21 +- sys/vfs/msdosfs/msdosfs_denode.c | 26 +- sys/vfs/nfs/nfs_subs.c | 6 +- sys/vfs/nfs/nfs_vnops.c | 5 +- sys/vfs/ntfs/ntfs_ihash.c | 24 +- sys/vfs/ntfs/ntfs_vfsops.c | 1 - sys/vfs/udf/udf_vfsops.c | 2 +- sys/vfs/udf/udf_vnops.c | 19 +- sys/vfs/ufs/ffs_rawread.c | 5 +- sys/vfs/ufs/ffs_softdep.c | 16 +- sys/vfs/ufs/ufs_ihash.c | 37 ++- sys/vfs/ufs/ufs_vnops.c | 10 +- 45 files changed, 581 insertions(+), 709 deletions(-) diff --git a/sys/ddb/db_ps.c b/sys/ddb/db_ps.c index 80797442f4..ef128a187e 100644 --- a/sys/ddb/db_ps.c +++ b/sys/ddb/db_ps.c @@ -201,10 +201,10 @@ db_dump_td_tokens(thread_t td) lwkt_tokref_t ref; lwkt_token_t tok; - if (td->td_toks == NULL) + if (TD_TOKS_NOT_HELD(td)) return; db_printf(" TOKENS:"); - for (ref = td->td_toks; ref; ref = ref->tr_next) { + for (ref = &td->td_toks_base; ref < td->td_toks_stop; ++ref) { tok = ref->tr_tok; db_printf(" %p[tok=%p", ref, ref->tr_tok); diff --git a/sys/emulation/linux/i386/linprocfs/linprocfs_subr.c b/sys/emulation/linux/i386/linprocfs/linprocfs_subr.c index ccda25d32a..8108b59ca7 100644 --- a/sys/emulation/linux/i386/linprocfs/linprocfs_subr.c +++ b/sys/emulation/linux/i386/linprocfs/linprocfs_subr.c @@ -92,10 +92,9 @@ linprocfs_allocvp(struct mount *mp, struct vnode **vpp, long pid, struct pfsnode *pfs; struct vnode *vp; struct pfsnode **pp; - lwkt_tokref ilock; int error; - lwkt_gettoken(&ilock, &pfs_token); + lwkt_gettoken(&pfs_token); loop: for (pfs = pfshead[pid & PFSHMASK]; pfs; pfs = pfs->pfs_next) { vp = PFSTOV(pfs); @@ -105,7 +104,7 @@ loop: if (vget(vp, LK_EXCLUSIVE|LK_SLEEPFAIL)) goto loop; *vpp = vp; - lwkt_reltoken(&ilock); + lwkt_reltoken(&pfs_token); return (0); } } @@ -231,7 +230,7 @@ out: pfsvplock &= ~PROCFS_WANT; wakeup((caddr_t) &pfsvplock); } - lwkt_reltoken(&ilock); + lwkt_reltoken(&pfs_token); return (error); } @@ -241,16 +240,15 @@ linprocfs_freevp(struct vnode *vp) { struct pfsnode **pfspp; struct pfsnode *pfs = VTOPFS(vp); - lwkt_tokref ilock; - lwkt_gettoken(&ilock, &pfs_token); + lwkt_gettoken(&pfs_token); pfspp = &pfshead[pfs->pfs_pid & PFSHMASK]; while (*pfspp != pfs) { KKASSERT(*pfspp != NULL); pfspp = &(*pfspp)->pfs_next; } *pfspp = pfs->pfs_next; - lwkt_reltoken(&ilock); + lwkt_reltoken(&pfs_token); FREE(vp->v_data, M_TEMP); vp->v_data = NULL; return (0); @@ -405,13 +403,12 @@ vfs_findname(vfs_namemap_t *nm, char *buf, int buflen) void linprocfs_init(void) { - lwkt_token_init(&pfs_token); + lwkt_token_init(&pfs_token, 1); } void linprocfs_exit(struct thread *td) { - lwkt_tokref ilock; struct pfsnode *pfs; struct vnode *vp; pid_t pid; @@ -422,7 +419,7 @@ linprocfs_exit(struct thread *td) /* * Remove all the procfs vnodes associated with an exiting process. */ - lwkt_gettoken(&ilock, &pfs_token); + lwkt_gettoken(&pfs_token); restart: for (pfs = pfshead[pid & PFSHMASK]; pfs; pfs = pfs->pfs_next) { if (pfs->pfs_pid == pid) { @@ -433,7 +430,7 @@ restart: goto restart; } } - lwkt_reltoken(&ilock); + lwkt_reltoken(&pfs_token); lwkt_token_uninit(&pfs_token); } diff --git a/sys/emulation/ndis/kern_ndis.c b/sys/emulation/ndis/kern_ndis.c index 4c66292d5e..2f9efdd503 100644 --- a/sys/emulation/ndis/kern_ndis.c +++ b/sys/emulation/ndis/kern_ndis.c @@ -197,7 +197,6 @@ ndis_runq(void *arg) { struct ndis_req *r = NULL, *die = NULL; struct ndisproc *p; - struct lwkt_tokref tokref; p = arg; @@ -208,19 +207,19 @@ ndis_runq(void *arg) /* Look for any jobs on the work queue. */ - lwkt_gettoken(&tokref, &ndis_thr_token); + lwkt_gettoken(&ndis_thr_token); p->np_state = NDIS_PSTATE_RUNNING; while(STAILQ_FIRST(p->np_q) != NULL) { r = STAILQ_FIRST(p->np_q); STAILQ_REMOVE_HEAD(p->np_q, link); - lwkt_reltoken(&tokref); + lwkt_reltoken(&ndis_thr_token); /* Do the work. */ if (r->nr_func != NULL) (*r->nr_func)(r->nr_arg); - lwkt_gettoken(&tokref, &ndis_thr_token); + lwkt_gettoken(&ndis_thr_token); STAILQ_INSERT_HEAD(&ndis_free, r, link); /* Check for a shutdown request */ @@ -229,7 +228,7 @@ ndis_runq(void *arg) die = r; } p->np_state = NDIS_PSTATE_SLEEPING; - lwkt_reltoken(&tokref); + lwkt_reltoken(&ndis_thr_token); /* Bail if we were told to shut down. */ @@ -247,7 +246,7 @@ ndis_create_kthreads(void) struct ndis_req *r; int i, error = 0; - lwkt_token_init(&ndis_thr_token); + lwkt_token_init(&ndis_thr_token, 1); STAILQ_INIT(&ndis_ttodo); STAILQ_INIT(&ndis_itodo); @@ -313,7 +312,6 @@ ndis_stop_thread(int t) struct ndis_req *r; struct ndisqhead *q; thread_t td; - struct lwkt_tokref tokref; if (t == NDIS_TASKQUEUE) { q = &ndis_ttodo; @@ -325,14 +323,14 @@ ndis_stop_thread(int t) /* Create and post a special 'exit' job. */ - lwkt_gettoken(&tokref, &ndis_thr_token); + lwkt_gettoken(&ndis_thr_token); r = STAILQ_FIRST(&ndis_free); STAILQ_REMOVE_HEAD(&ndis_free, link); r->nr_func = NULL; r->nr_arg = NULL; r->nr_exit = TRUE; STAILQ_INSERT_TAIL(q, r, link); - lwkt_reltoken(&tokref); + lwkt_reltoken(&ndis_thr_token); ndis_thresume(td); @@ -342,14 +340,12 @@ ndis_stop_thread(int t) /* Now empty the job list. */ - lwkt_gettoken(&tokref, &ndis_thr_token); + lwkt_gettoken(&ndis_thr_token); while ((r = STAILQ_FIRST(q)) != NULL) { STAILQ_REMOVE_HEAD(q, link); STAILQ_INSERT_HEAD(&ndis_free, r, link); } - lwkt_reltoken(&tokref); - - return; + lwkt_reltoken(&ndis_thr_token); } static int @@ -357,14 +353,13 @@ ndis_enlarge_thrqueue(int cnt) { struct ndis_req *r; int i; - struct lwkt_tokref tokref; for (i = 0; i < cnt; i++) { r = kmalloc(sizeof(struct ndis_req), M_DEVBUF, M_WAITOK); - lwkt_gettoken(&tokref, &ndis_thr_token); + lwkt_gettoken(&ndis_thr_token); STAILQ_INSERT_HEAD(&ndis_free, r, link); ndis_jobs++; - lwkt_reltoken(&tokref); + lwkt_reltoken(&ndis_thr_token); } return(0); @@ -375,18 +370,17 @@ ndis_shrink_thrqueue(int cnt) { struct ndis_req *r; int i; - struct lwkt_tokref tokref; for (i = 0; i < cnt; i++) { - lwkt_gettoken(&tokref, &ndis_thr_token); + lwkt_gettoken(&ndis_thr_token); r = STAILQ_FIRST(&ndis_free); if (r == NULL) { - lwkt_reltoken(&tokref); + lwkt_reltoken(&ndis_thr_token); return(ENOMEM); } STAILQ_REMOVE_HEAD(&ndis_free, link); ndis_jobs--; - lwkt_reltoken(&tokref); + lwkt_reltoken(&ndis_thr_token); kfree(r, M_DEVBUF); } @@ -399,7 +393,6 @@ ndis_unsched(void (*func)(void *), void *arg, int t) struct ndis_req *r; struct ndisqhead *q; thread_t td; - struct lwkt_tokref tokref; if (t == NDIS_TASKQUEUE) { q = &ndis_ttodo; @@ -409,17 +402,17 @@ ndis_unsched(void (*func)(void *), void *arg, int t) td = ndis_iproc.np_td; } - lwkt_gettoken(&tokref, &ndis_thr_token); + lwkt_gettoken(&ndis_thr_token); STAILQ_FOREACH(r, q, link) { if (r->nr_func == func && r->nr_arg == arg) { STAILQ_REMOVE(q, r, ndis_req, link); STAILQ_INSERT_HEAD(&ndis_free, r, link); - lwkt_reltoken(&tokref); + lwkt_reltoken(&ndis_thr_token); return(0); } } - lwkt_reltoken(&tokref); + lwkt_reltoken(&ndis_thr_token); return(ENOENT); } @@ -431,7 +424,6 @@ ndis_sched(void (*func)(void *), void *arg, int t) struct ndisqhead *q; thread_t td; int s; - struct lwkt_tokref tokref; if (t == NDIS_TASKQUEUE) { q = &ndis_ttodo; @@ -441,20 +433,20 @@ ndis_sched(void (*func)(void *), void *arg, int t) td = ndis_iproc.np_td; } - lwkt_gettoken(&tokref, &ndis_thr_token); + lwkt_gettoken(&ndis_thr_token); /* * Check to see if an instance of this job is already * pending. If so, don't bother queuing it again. */ STAILQ_FOREACH(r, q, link) { if (r->nr_func == func && r->nr_arg == arg) { - lwkt_reltoken(&tokref); + lwkt_reltoken(&ndis_thr_token); return(0); } } r = STAILQ_FIRST(&ndis_free); if (r == NULL) { - lwkt_reltoken(&tokref); + lwkt_reltoken(&ndis_thr_token); return(EAGAIN); } STAILQ_REMOVE_HEAD(&ndis_free, link); @@ -466,7 +458,7 @@ ndis_sched(void (*func)(void *), void *arg, int t) s = ndis_tproc.np_state; else s = ndis_iproc.np_state; - lwkt_reltoken(&tokref); + lwkt_reltoken(&ndis_thr_token); /* * Post the job, but only if the thread is actually blocked diff --git a/sys/emulation/ndis/subr_ntoskrnl.c b/sys/emulation/ndis/subr_ntoskrnl.c index 707bceccf8..2889a11bf6 100644 --- a/sys/emulation/ndis/subr_ntoskrnl.c +++ b/sys/emulation/ndis/subr_ntoskrnl.c @@ -184,7 +184,7 @@ static MALLOC_DEFINE(M_NDIS, "ndis", "ndis emulation"); int ntoskrnl_libinit(void) { - lwkt_token_init(&ntoskrnl_dispatchtoken); + lwkt_token_init(&ntoskrnl_dispatchtoken, 1); ntoskrnl_init_lock(&ntoskrnl_global); TAILQ_INIT(&ntoskrnl_reflist); return(0); @@ -309,11 +309,10 @@ ntoskrnl_wakeup(void *arg) wait_block *w; list_entry *e; struct thread *td; - struct lwkt_tokref tokref; obj = arg; - lwkt_gettoken(&tokref, &ntoskrnl_dispatchtoken); + lwkt_gettoken(&ntoskrnl_dispatchtoken); obj->dh_sigstate = TRUE; e = obj->dh_waitlisthead.nle_flink; while (e != &obj->dh_waitlisthead) { @@ -328,9 +327,7 @@ ntoskrnl_wakeup(void *arg) break; e = e->nle_flink; } - lwkt_reltoken(&tokref); - - return; + lwkt_reltoken(&ntoskrnl_dispatchtoken); } static void @@ -408,12 +405,11 @@ ntoskrnl_waitforobj(nt_dispatch_header *obj, uint32_t reason, int error = 0; int ticks; uint64_t curtime; - struct lwkt_tokref tokref; if (obj == NULL) return(STATUS_INVALID_PARAMETER); - lwkt_gettoken(&tokref, &ntoskrnl_dispatchtoken); + lwkt_gettoken(&ntoskrnl_dispatchtoken); /* * See if the object is a mutex. If so, and we already own @@ -432,13 +428,13 @@ ntoskrnl_waitforobj(nt_dispatch_header *obj, uint32_t reason, obj->dh_sigstate = FALSE; km->km_acquirecnt++; km->km_ownerthread = curthread->td_proc; - lwkt_reltoken(&tokref); + lwkt_reltoken(&ntoskrnl_dispatchtoken); return (STATUS_SUCCESS); } } else if (obj->dh_sigstate == TRUE) { if (obj->dh_type == EVENT_TYPE_SYNC) obj->dh_sigstate = FALSE; - lwkt_reltoken(&tokref); + lwkt_reltoken(&ntoskrnl_dispatchtoken); return (STATUS_SUCCESS); } @@ -473,7 +469,7 @@ ntoskrnl_waitforobj(nt_dispatch_header *obj, uint32_t reason, } } - lwkt_reltoken(&tokref); + lwkt_reltoken(&ntoskrnl_dispatchtoken); ticks = 1 + tv.tv_sec * hz + tv.tv_usec * hz / 1000000; error = ndis_thsuspend(td, duetime == NULL ? 0 : ticks); @@ -484,7 +480,7 @@ ntoskrnl_waitforobj(nt_dispatch_header *obj, uint32_t reason, if (error == EWOULDBLOCK) { REMOVE_LIST_ENTRY((&w.wb_waitlist)); - lwkt_reltoken(&tokref); + lwkt_reltoken(&ntoskrnl_dispatchtoken); return(STATUS_TIMEOUT); } @@ -507,7 +503,7 @@ ntoskrnl_waitforobj(nt_dispatch_header *obj, uint32_t reason, obj->dh_sigstate = FALSE; REMOVE_LIST_ENTRY((&w.wb_waitlist)); - lwkt_reltoken(&tokref); + lwkt_reltoken(&ntoskrnl_dispatchtoken); return(STATUS_SUCCESS); } @@ -526,14 +522,13 @@ ntoskrnl_waitforobjs(uint32_t cnt, nt_dispatch_header *obj[], int i, wcnt = 0, widx = 0, error = 0; uint64_t curtime; struct timespec t1, t2; - struct lwkt_tokref tokref; if (cnt > MAX_WAIT_OBJECTS) return(STATUS_INVALID_PARAMETER); if (cnt > THREAD_WAIT_OBJECTS && wb_array == NULL) return(STATUS_INVALID_PARAMETER); - lwkt_gettoken(&tokref, &ntoskrnl_dispatchtoken); + lwkt_gettoken(&ntoskrnl_dispatchtoken); if (wb_array == NULL) w = &_wb_array[0]; @@ -554,7 +549,7 @@ ntoskrnl_waitforobjs(uint32_t cnt, nt_dispatch_header *obj[], km->km_acquirecnt++; km->km_ownerthread = curthread->td_proc; if (wtype == WAITTYPE_ANY) { - lwkt_reltoken(&tokref); + lwkt_reltoken(&ntoskrnl_dispatchtoken); return (STATUS_WAIT_0 + i); } } @@ -562,7 +557,7 @@ ntoskrnl_waitforobjs(uint32_t cnt, nt_dispatch_header *obj[], if (obj[i]->dh_type == EVENT_TYPE_SYNC) obj[i]->dh_sigstate = FALSE; if (wtype == WAITTYPE_ANY) { - lwkt_reltoken(&tokref); + lwkt_reltoken(&ntoskrnl_dispatchtoken); return (STATUS_WAIT_0 + i); } } @@ -602,7 +597,7 @@ ntoskrnl_waitforobjs(uint32_t cnt, nt_dispatch_header *obj[], while (wcnt) { nanotime(&t1); - lwkt_reltoken(&tokref); + lwkt_reltoken(&ntoskrnl_dispatchtoken); if (duetime) { ticks = 1 + tv.tv_sec * hz + tv.tv_usec * hz / 1000000; @@ -611,7 +606,7 @@ ntoskrnl_waitforobjs(uint32_t cnt, nt_dispatch_header *obj[], error = ndis_thsuspend(td, 0); } - lwkt_gettoken(&tokref, &ntoskrnl_dispatchtoken); + lwkt_gettoken(&ntoskrnl_dispatchtoken); nanotime(&t2); for (i = 0; i < cnt; i++) { @@ -647,16 +642,16 @@ ntoskrnl_waitforobjs(uint32_t cnt, nt_dispatch_header *obj[], } if (error == EWOULDBLOCK) { - lwkt_reltoken(&tokref); + lwkt_reltoken(&ntoskrnl_dispatchtoken); return(STATUS_TIMEOUT); } if (wtype == WAITTYPE_ANY && wcnt) { - lwkt_reltoken(&tokref); + lwkt_reltoken(&ntoskrnl_dispatchtoken); return(STATUS_WAIT_0 + widx); } - lwkt_reltoken(&tokref); + lwkt_reltoken(&ntoskrnl_dispatchtoken); return(STATUS_SUCCESS); } @@ -1270,20 +1265,19 @@ ntoskrnl_init_mutex(kmutant *kmutex, uint32_t level) __stdcall static uint32_t ntoskrnl_release_mutex(kmutant *kmutex, uint8_t kwait) { - struct lwkt_tokref tokref; - - lwkt_gettoken(&tokref, &ntoskrnl_dispatchtoken); + lwkt_gettoken(&ntoskrnl_dispatchtoken); if (kmutex->km_ownerthread != curthread->td_proc) { - lwkt_reltoken(&tokref); + lwkt_reltoken(&ntoskrnl_dispatchtoken); return(STATUS_MUTANT_NOT_OWNED); } kmutex->km_acquirecnt--; if (kmutex->km_acquirecnt == 0) { kmutex->km_ownerthread = NULL; - lwkt_reltoken(&tokref); + lwkt_reltoken(&ntoskrnl_dispatchtoken); ntoskrnl_wakeup(&kmutex->km_header); - } else - lwkt_reltoken(&tokref); + } else { + lwkt_reltoken(&ntoskrnl_dispatchtoken); + } return(kmutex->km_acquirecnt); } @@ -1308,12 +1302,11 @@ __stdcall uint32_t ntoskrnl_reset_event(nt_kevent *kevent) { uint32_t prevstate; - struct lwkt_tokref tokref; - lwkt_gettoken(&tokref, &ntoskrnl_dispatchtoken); + lwkt_gettoken(&ntoskrnl_dispatchtoken); prevstate = kevent->k_header.dh_sigstate; kevent->k_header.dh_sigstate = FALSE; - lwkt_reltoken(&tokref); + lwkt_reltoken(&ntoskrnl_dispatchtoken); return(prevstate); } diff --git a/sys/kern/kern_lockf.c b/sys/kern/kern_lockf.c index 043c5b7ff2..20470393e0 100644 --- a/sys/kern/kern_lockf.c +++ b/sys/kern/kern_lockf.c @@ -191,7 +191,7 @@ lf_advlock(struct vop_advlock_args *ap, struct lockf *lock, u_quad_t size) struct proc *owner; off_t start, end; int type, flags, error; - lwkt_tokref ilock; + lwkt_token_t token; /* * Convert the flock structure into a start and end. @@ -236,7 +236,7 @@ lf_advlock(struct vop_advlock_args *ap, struct lockf *lock, u_quad_t size) /* * Do the requested operation. */ - lwkt_getpooltoken(&ilock, lock); + token = lwkt_getpooltoken(lock); if (lock->init_done == 0) { TAILQ_INIT(&lock->lf_range); @@ -273,7 +273,7 @@ lf_advlock(struct vop_advlock_args *ap, struct lockf *lock, u_quad_t size) error = EINVAL; break; } - lwkt_reltoken(&ilock); + lwkt_reltoken(token); return(error); } diff --git a/sys/kern/lwkt_thread.c b/sys/kern/lwkt_thread.c index 0c7dd6bbb0..9f6ba58ec9 100644 --- a/sys/kern/lwkt_thread.c +++ b/sys/kern/lwkt_thread.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003,2004 The DragonFly Project. All rights reserved. + * Copyright (c) 2003-2010 The DragonFly Project. All rights reserved. * * This code is derived from software contributed to The DragonFly Project * by Matthew Dillon @@ -356,6 +356,7 @@ lwkt_init_thread(thread_t td, void *stack, int stksize, int flags, td->td_flags = flags; td->td_gd = gd; td->td_pri = TDPRI_KERN_DAEMON + TDPRI_CRIT; + td->td_toks_stop = &td->td_toks_base; #ifdef SMP if ((flags & TDF_MPSAFE) == 0) td->td_mpcount = 1; @@ -520,7 +521,7 @@ lwkt_switch(void) td->td_release(td); crit_enter_gd(gd); - if (td->td_toks) + if (TD_TOKS_HELD(td)) lwkt_relalltokens(td); /* @@ -635,7 +636,7 @@ again: * cause the core MP lock to be released. */ if ((ntd->td_mpcount && mpheld == 0 && !cpu_try_mplock()) || - (ntd->td_toks && lwkt_getalltokens(ntd) == 0) + (TD_TOKS_HELD(ntd) && lwkt_getalltokens(ntd) == 0) ) { u_int32_t rqmask = gd->gd_runqmask; @@ -653,7 +654,7 @@ again: * failure, but the variable is only needed for * the loop. */ - if (ntd->td_toks && !lwkt_getalltokens(ntd)) { + if (TD_TOKS_HELD(ntd) && !lwkt_getalltokens(ntd)) { /* spinning due to token contention */ #ifdef INVARIANTS ++token_contention_count; @@ -872,7 +873,7 @@ lwkt_preempt(thread_t ntd, int critpri) need_lwkt_resched(); return; } - if (ntd->td_toks) { + if (TD_TOKS_HELD(ntd)) { ++preempt_miss; need_lwkt_resched(); return; diff --git a/sys/kern/lwkt_token.c b/sys/kern/lwkt_token.c index 143d0b589b..5956c1a3a1 100644 --- a/sys/kern/lwkt_token.c +++ b/sys/kern/lwkt_token.c @@ -61,6 +61,7 @@ #include #include +#include #include #include @@ -126,6 +127,18 @@ _lwkt_token_pool_lookup(void *ptr) return(&pool_tokens[i & LWKT_MASK_POOL_TOKENS]); } +/* + * Initialize a tokref_t prior to making it visible in the thread's + * token array. + */ +static __inline +void +_lwkt_tokref_init(lwkt_tokref_t ref, lwkt_token_t tok, thread_t td) +{ + ref->tr_tok = tok; + ref->tr_owner = td; + ref->tr_flags = tok->t_flags; +} /* * Obtain all the tokens required by the specified thread on the current @@ -135,18 +148,23 @@ _lwkt_token_pool_lookup(void *ptr) * lwkt_getalltokens is called by the LWKT scheduler to acquire all * tokens that the thread had acquired prior to going to sleep. * + * The scheduler is responsible for maintaining the MP lock count, so + * we don't need to deal with tr_flags here. + * * Called from a critical section. */ int lwkt_getalltokens(thread_t td) { - lwkt_tokref_t scan1; - lwkt_tokref_t scan2; + lwkt_tokref_t scan; lwkt_tokref_t ref; lwkt_token_t tok; - for (scan1 = td->td_toks; scan1; scan1 = scan1->tr_next) { - tok = scan1->tr_tok; + /* + * Acquire tokens in forward order, assign or validate tok->t_ref. + */ + for (scan = &td->td_toks_base; scan < td->td_toks_stop; ++scan) { + tok = scan->tr_tok; for (;;) { /* * Try to acquire the token if we do not already have @@ -157,41 +175,28 @@ lwkt_getalltokens(thread_t td) * lost a cpu race. */ ref = tok->t_ref; - if (ref == scan1) - break; if (ref == NULL) { - if (atomic_cmpset_ptr(&tok->t_ref, NULL, scan1)) + if (atomic_cmpset_ptr(&tok->t_ref, NULL, scan)) break; continue; } /* - * If acquisition fails the token might be held - * recursively by another ref owned by the same - * thread. - * - * NOTE! We cannot just dereference 'ref' to test - * the tr_owner as its storage will be - * unstable if it belongs to another thread. - * - * NOTE! Since tokens are inserted at the head - * of the list we must migrate such tokens - * so the actual lock is not cleared until - * the last release. + * Test if ref is already recursively held by this + * thread. We cannot safely dereference tok->t_ref + * (it might belong to another thread and is thus + * unstable), but we don't have to. We can simply + * range-check it. */ - scan2 = td->td_toks; - for (;;) { - if (scan2 == scan1) { - lwkt_relalltokens(td); - return(FALSE); - } - if (scan2 == ref) { - tok->t_ref = scan1; - break; - } - scan2 = scan2->tr_next; - } - break; + if (ref >= &td->td_toks_base && ref < td->td_toks_stop) + break; + + /* + * Otherwise we failed to acquire all the tokens. + * Undo and return. + */ + lwkt_relalltokens(td); + return(FALSE); } } return (TRUE); @@ -203,49 +208,47 @@ lwkt_getalltokens(thread_t td) * This code is really simple. Even in cases where we own all the tokens * note that t_ref may not match the scan for recursively held tokens, * or for the case where a lwkt_getalltokens() failed. + * + * The scheduler is responsible for maintaining the MP lock count, so + * we don't need to deal with tr_flags here. * * Called from a critical section. */ void lwkt_relalltokens(thread_t td) { - lwkt_tokref_t scan1; + lwkt_tokref_t scan; lwkt_token_t tok; - for (scan1 = td->td_toks; scan1; scan1 = scan1->tr_next) { - tok = scan1->tr_tok; - if (tok->t_ref == scan1) + for (scan = &td->td_toks_base; scan < td->td_toks_stop; ++scan) { + tok = scan->tr_tok; + if (tok->t_ref == scan) tok->t_ref = NULL; } } /* - * Token acquisition helper function. Note that get/trytokenref do not - * reset t_lastowner if the token is already held. Only lwkt_token_is_stale() - * is allowed to do that. + * Token acquisition helper function. The caller must have already + * made nref visible by adjusting td_toks_stop and will be responsible + * for the disposition of nref on either success or failure. * - * NOTE: On failure, this function doesn't remove the token from the - * thread's token list, so that you have to perform that yourself: - * - * td->td_toks = ref->tr_next; + * When acquiring tokens recursively we want tok->t_ref to point to + * the outer (first) acquisition so it gets cleared only on the last + * release. */ static __inline int _lwkt_trytokref2(lwkt_tokref_t nref, thread_t td) { - lwkt_tokref_t ref; - lwkt_tokref_t scan2; lwkt_token_t tok; + lwkt_tokref_t ref; KKASSERT(td->td_gd->gd_intr_nesting_level == 0); /* - * Link the tokref into curthread's list. Make sure the - * cpu does not reorder these instructions! + * Make sure the compiler does not reorder prior instructions + * beyond this demark. */ - nref->tr_next = td->td_toks; - cpu_ccfence(); - td->td_toks = nref; cpu_ccfence(); /* @@ -258,8 +261,6 @@ _lwkt_trytokref2(lwkt_tokref_t nref, thread_t td) * it. */ ref = tok->t_ref; - if (ref == nref) - return (TRUE); if (ref == NULL) { /* * NOTE: If atomic_cmpset_ptr() fails we have to @@ -272,23 +273,18 @@ _lwkt_trytokref2(lwkt_tokref_t nref, thread_t td) } /* - * If acquisition fails the token might be held - * recursively by another ref owned by the same - * thread. - * - * NOTE! We cannot just dereference 'ref' to test - * the tr_owner as its storage will be - * unstable if it belongs to another thread. - * - * NOTE! We do not migrate t_ref to nref here as we - * want the recursion unwinding in reverse order - * to NOT release the token until last the - * recursive ref is released. + * Test if ref is already recursively held by this + * thread. We cannot safely dereference tok->t_ref + * (it might belong to another thread and is thus + * unstable), but we don't have to. We can simply + * range-check it. + */ + if (ref >= &td->td_toks_base && ref < td->td_toks_stop) + return(TRUE); + + /* + * Otherwise we failed. */ - for (scan2 = nref->tr_next; scan2; scan2 = scan2->tr_next) { - if (scan2 == ref) - return(TRUE); - } return(FALSE); } } @@ -300,11 +296,17 @@ static __inline int _lwkt_trytokref(lwkt_tokref_t ref, thread_t td) { + if ((ref->tr_flags & LWKT_TOKEN_MPSAFE) == 0) { + if (try_mplock() == 0) + return (FALSE); + } if (_lwkt_trytokref2(ref, td) == FALSE) { /* - * Cleanup. Remove the token from the thread's list. + * Cleanup, deactivate the failed token. */ - td->td_toks = ref->tr_next; + --td->td_toks_stop; + if ((ref->tr_flags & LWKT_TOKEN_MPSAFE) == 0) + rel_mplock(); return (FALSE); } return (TRUE); @@ -317,98 +319,99 @@ static __inline void _lwkt_gettokref(lwkt_tokref_t ref, thread_t td) { + if ((ref->tr_flags & LWKT_TOKEN_MPSAFE) == 0) + get_mplock(); if (_lwkt_trytokref2(ref, td) == FALSE) { /* * Give up running if we can't acquire the token right now. - * But as we have linked in the tokref to the thread's list - * (_lwkt_trytokref2), the scheduler now takes care to acquire - * the token (by calling lwkt_getalltokens) before resuming - * execution. As such, when we return from lwkt_yield(), - * the token is acquired. * - * Since we failed this is not a recursive token so upon + * Since the tokref is already active the scheduler now + * takes care of acquisition, so we need only call + * lwkt_yield(). + * + * Since we failed this was not a recursive token so upon * return tr_tok->t_ref should be assigned to this specific * ref. */ logtoken(fail, ref); lwkt_yield(); logtoken(succ, ref); -#if 0 - if (ref->tr_tok->t_ref != ref) { - lwkt_tokref_t scan; - kprintf("gettokref %p failed, held by tok %p ref %p\n", - ref, ref->tr_tok, ref->tr_tok->t_ref); - for (scan = td->td_toks; scan; scan = scan->tr_next) { - kprintf(" %p\n", scan); - } - } -#endif KKASSERT(ref->tr_tok->t_ref == ref); } } void -lwkt_gettoken(lwkt_tokref_t ref, lwkt_token_t tok) +lwkt_gettoken(lwkt_token_t tok) { thread_t td = curthread; + lwkt_tokref_t ref; - lwkt_tokref_init(ref, tok, td); + ref = td->td_toks_stop; + KKASSERT(ref < &td->td_toks_end); + _lwkt_tokref_init(ref, tok, td); + ++td->td_toks_stop; _lwkt_gettokref(ref, td); } -void -lwkt_getpooltoken(lwkt_tokref_t ref, void *ptr) +lwkt_token_t +lwkt_getpooltoken(void *ptr) { thread_t td = curthread; + lwkt_token_t tok; + lwkt_tokref_t ref; - lwkt_tokref_init(ref, _lwkt_token_pool_lookup(ptr), td); + ref = td->td_toks_stop; + KKASSERT(ref < &td->td_toks_end); + tok = _lwkt_token_pool_lookup(ptr); + _lwkt_tokref_init(ref, tok, td); + ++td->td_toks_stop; _lwkt_gettokref(ref, td); -} - -void -lwkt_gettokref(lwkt_tokref_t ref) -{ - _lwkt_gettokref(ref, ref->tr_owner); + return(tok); } int -lwkt_trytoken(lwkt_tokref_t ref, lwkt_token_t tok) +lwkt_trytoken(lwkt_token_t tok) { thread_t td = curthread; + lwkt_tokref_t ref; - lwkt_tokref_init(ref, tok, td); + ref = td->td_toks_stop; + KKASSERT(ref < &td->td_toks_end); + _lwkt_tokref_init(ref, tok, td); + ++td->td_toks_stop; return(_lwkt_trytokref(ref, td)); } -int -lwkt_trytokref(lwkt_tokref_t ref) -{ - return(_lwkt_trytokref(ref, ref->tr_owner)); -} - /* * Release a serializing token. * - * WARNING! Any recursive tokens must be released in reverse order. + * WARNING! All tokens must be released in reverse order. This will be + * asserted. */ void -lwkt_reltoken(lwkt_tokref_t ref) +lwkt_reltoken(lwkt_token_t tok) { - struct lwkt_tokref **scanp; - lwkt_token_t tok; - thread_t td; + thread_t td = curthread; + lwkt_tokref_t ref; - tok = ref->tr_tok; + /* + * Remove ref from thread token list and assert that it matches + * the token passed in. Tokens must be released in reverse order. + */ + ref = td->td_toks_stop - 1; + KKASSERT(ref >= &td->td_toks_base && ref->tr_tok == tok); + td->td_toks_stop = ref; + + /* + * If the token was not MPSAFE release the MP lock. + */ + if ((ref->tr_flags & LWKT_TOKEN_MPSAFE) == 0) + rel_mplock(); /* - * Remove the ref from the thread's token list. - * - * NOTE: td == curthread + * Make sure the compiler does not reorder the clearing of + * tok->t_ref. */ - td = ref->tr_owner; - for (scanp = &td->td_toks; *scanp != ref; scanp = &((*scanp)->tr_next)) - ; - *scanp = ref->tr_next; cpu_ccfence(); /* @@ -432,7 +435,7 @@ lwkt_token_pool_init(void) int i; for (i = 0; i < LWKT_NUM_POOL_TOKENS; ++i) - lwkt_token_init(&pool_tokens[i]); + lwkt_token_init(&pool_tokens[i], 1); } lwkt_token_t @@ -446,9 +449,10 @@ lwkt_token_pool_lookup(void *ptr) * and reset the generation count. */ void -lwkt_token_init(lwkt_token_t tok) +lwkt_token_init(lwkt_token_t tok, int mpsafe) { tok->t_ref = NULL; + tok->t_flags = mpsafe ? LWKT_TOKEN_MPSAFE : 0; } void diff --git a/sys/kern/subr_disk.c b/sys/kern/subr_disk.c index 530262ac8f..3911be0fa5 100644 --- a/sys/kern/subr_disk.c +++ b/sys/kern/subr_disk.c @@ -357,7 +357,6 @@ disk_msg_core(void *arg) { struct disk *dp; struct diskslice *sp; - lwkt_tokref ilock; disk_msg_t msg; int run; @@ -383,9 +382,9 @@ disk_msg_core(void *arg) dp->d_cdev->si_name); devfs_destroy_subnames(dp->d_cdev->si_name); devfs_destroy_dev(dp->d_cdev); - lwkt_gettoken(&ilock, &disklist_token); + lwkt_gettoken(&disklist_token); LIST_REMOVE(dp, d_list); - lwkt_reltoken(&ilock); + lwkt_reltoken(&disklist_token); if (dp->d_info.d_serialno) { kfree(dp->d_info.d_serialno, M_TEMP); dp->d_info.d_serialno = NULL; @@ -500,7 +499,6 @@ disk_msg_send_sync(uint32_t cmd, void *load, void *load2) cdev_t disk_create(int unit, struct disk *dp, struct dev_ops *raw_ops) { - lwkt_tokref ilock; cdev_t rawdev; disk_debug(1, @@ -525,13 +523,11 @@ disk_create(int unit, struct disk *dp, struct dev_ops *raw_ops) dsched_disk_create_callback(dp, raw_ops->head.name, unit); - lwkt_gettoken(&ilock, &disklist_token); + lwkt_gettoken(&disklist_token); LIST_INSERT_HEAD(&disklist, dp, d_list); - lwkt_reltoken(&ilock); + lwkt_reltoken(&disklist_token); - disk_debug(1, - "disk_create (end): %s%d\n", - raw_ops->head.name, unit); + disk_debug(1, "disk_create (end): %s%d\n", raw_ops->head.name, unit); return (dp->d_rawdev); } @@ -705,14 +701,13 @@ struct disk * disk_enumerate(struct disk *disk) { struct disk *dp; - lwkt_tokref ilock; - lwkt_gettoken(&ilock, &disklist_token); + lwkt_gettoken(&disklist_token); if (!disk) dp = (LIST_FIRST(&disklist)); else dp = (LIST_NEXT(disk, d_list)); - lwkt_reltoken(&ilock); + lwkt_reltoken(&disklist_token); return dp; } @@ -1195,7 +1190,7 @@ disk_init(void) objcache_malloc_free, &disk_msg_malloc_args); - lwkt_token_init(&disklist_token); + lwkt_token_init(&disklist_token, 1); /* * Initialize the reply-only port which acts as a message drain diff --git a/sys/kern/subr_kobj.c b/sys/kern/subr_kobj.c index c12d68b9d2..749ac5f465 100644 --- a/sys/kern/subr_kobj.c +++ b/sys/kern/subr_kobj.c @@ -66,7 +66,7 @@ static int kobj_next_id = 1; static void kobj_init_token(void *arg) { - lwkt_token_init(&kobj_token); + lwkt_token_init(&kobj_token, 1); } SYSINIT(kobj, SI_BOOT1_LOCK, SI_ORDER_ANY, kobj_init_token, NULL); @@ -220,9 +220,7 @@ kobj_class_free(kobj_class_t cls) void kobj_class_instantiate(kobj_class_t cls) { - lwkt_tokref ilock; - - lwkt_gettoken(&ilock, &kobj_token); + lwkt_gettoken(&kobj_token); crit_enter(); if (!cls->ops) @@ -230,15 +228,13 @@ kobj_class_instantiate(kobj_class_t cls) cls->refs++; crit_exit(); - lwkt_reltoken(&ilock); + lwkt_reltoken(&kobj_token); } void kobj_class_uninstantiate(kobj_class_t cls) { - lwkt_tokref ilock; - - lwkt_gettoken(&ilock, &kobj_token); + lwkt_gettoken(&kobj_token); crit_enter(); cls->refs--; @@ -246,7 +242,7 @@ kobj_class_uninstantiate(kobj_class_t cls) kobj_class_free(cls); crit_exit(); - lwkt_reltoken(&ilock); + lwkt_reltoken(&kobj_token); } kobj_t diff --git a/sys/kern/subr_rman.c b/sys/kern/subr_rman.c index 2ff224f579..e98e89ef4c 100644 --- a/sys/kern/subr_rman.c +++ b/sys/kern/subr_rman.c @@ -87,12 +87,11 @@ int rman_init(struct rman *rm) { static int once; - lwkt_tokref ilock; if (once == 0) { once = 1; TAILQ_INIT(&rman_head); - lwkt_token_init(&rman_tok); + lwkt_token_init(&rman_tok, 1); } if (rm->rm_type == RMAN_UNINIT) @@ -104,11 +103,11 @@ rman_init(struct rman *rm) rm->rm_slock = kmalloc(sizeof *rm->rm_slock, M_RMAN, M_NOWAIT); if (rm->rm_slock == NULL) return ENOMEM; - lwkt_token_init(rm->rm_slock); + lwkt_token_init(rm->rm_slock, 1); - lwkt_gettoken(&ilock, &rman_tok); + lwkt_gettoken(&rman_tok); TAILQ_INSERT_TAIL(&rman_head, rm, rm_link); - lwkt_reltoken(&ilock); + lwkt_reltoken(&rman_tok); return 0; } @@ -120,7 +119,6 @@ int rman_manage_region(struct rman *rm, u_long start, u_long end) { struct resource *r, *s; - lwkt_tokref ilock; DPRINTF(("rman_manage_region: <%s> request: start %#lx, end %#lx\n", rm->rm_descr, start, end)); @@ -134,7 +132,7 @@ rman_manage_region(struct rman *rm, u_long start, u_long end) r->r_dev = 0; r->r_rm = rm; - lwkt_gettoken(&ilock, rm->rm_slock); + lwkt_gettoken(rm->rm_slock); for (s = TAILQ_FIRST(&rm->rm_list); s && s->r_end < r->r_start; s = TAILQ_NEXT(s, r_link)) @@ -145,7 +143,7 @@ rman_manage_region(struct rman *rm, u_long start, u_long end) else TAILQ_INSERT_BEFORE(s, r, r_link); - lwkt_reltoken(&ilock); + lwkt_reltoken(rm->rm_slock); return 0; } @@ -153,12 +151,11 @@ int rman_fini(struct rman *rm) { struct resource *r; - lwkt_tokref ilock; - lwkt_gettoken(&ilock, rm->rm_slock); + lwkt_gettoken(rm->rm_slock); TAILQ_FOREACH(r, &rm->rm_list, r_link) { if (r->r_flags & RF_ALLOCATED) { - lwkt_reltoken(&ilock); + lwkt_reltoken(rm->rm_slock); return EBUSY; } } @@ -172,11 +169,12 @@ rman_fini(struct rman *rm) TAILQ_REMOVE(&rm->rm_list, r, r_link); kfree(r, M_RMAN); } - lwkt_reltoken(&ilock); + lwkt_reltoken(rm->rm_slock); + /* XXX what's the point of this if we are going to free the struct? */ - lwkt_gettoken(&ilock, &rman_tok); + lwkt_gettoken(&rman_tok); TAILQ_REMOVE(&rman_head, rm, rm_link); - lwkt_reltoken(&ilock); + lwkt_reltoken(&rman_tok); kfree(rm->rm_slock, M_RMAN); return 0; @@ -189,7 +187,6 @@ rman_reserve_resource(struct rman *rm, u_long start, u_long end, u_long count, u_int want_activate; struct resource *r, *s, *rv; u_long rstart, rend; - lwkt_tokref ilock; rv = 0; @@ -200,7 +197,7 @@ rman_reserve_resource(struct rman *rm, u_long start, u_long end, u_long count, want_activate = (flags & RF_ACTIVE); flags &= ~RF_ACTIVE; - lwkt_gettoken(&ilock, rm->rm_slock); + lwkt_gettoken(rm->rm_slock); for (r = TAILQ_FIRST(&rm->rm_list); r && r->r_end < start; @@ -379,7 +376,7 @@ out: rv = 0; } } - lwkt_reltoken(&ilock); + lwkt_reltoken(rm->rm_slock); return (rv); } @@ -423,13 +420,12 @@ rman_activate_resource(struct resource *r) { int rv; struct resource *whohas; - lwkt_tokref ilock; struct rman *rm; rm = r->r_rm; - lwkt_gettoken(&ilock, rm->rm_slock); + lwkt_gettoken(rm->rm_slock); rv = int_rman_activate_resource(rm, r, &whohas); - lwkt_reltoken(&ilock); + lwkt_reltoken(rm->rm_slock); return rv; } @@ -437,7 +433,7 @@ rman_activate_resource(struct resource *r) /* XXX */ int -rman_await_resource(struct resource *r, lwkt_tokref_t ilock, int slpflags, int timo) +rman_await_resource(struct resource *r, int slpflags, int timo) { int rv; struct resource *whohas; @@ -445,7 +441,7 @@ rman_await_resource(struct resource *r, lwkt_tokref_t ilock, int slpflags, int t rm = r->r_rm; for (;;) { - lwkt_gettoken(ilock, rm->rm_slock); + lwkt_gettoken(rm->rm_slock); rv = int_rman_activate_resource(rm, r, &whohas); if (rv != EBUSY) return (rv); /* returns with ilock held */ @@ -462,7 +458,7 @@ rman_await_resource(struct resource *r, lwkt_tokref_t ilock, int slpflags, int t whohas->r_flags |= RF_WANTED; rv = tsleep(r->r_sharehead, slpflags, "rmwait", timo); if (rv) { - lwkt_reltoken(ilock); + lwkt_reltoken(rm->rm_slock); crit_exit(); return rv; } @@ -489,13 +485,12 @@ int_rman_deactivate_resource(struct resource *r) int rman_deactivate_resource(struct resource *r) { - lwkt_tokref ilock; struct rman *rm; rm = r->r_rm; - lwkt_gettoken(&ilock, rm->rm_slock); + lwkt_gettoken(rm->rm_slock); int_rman_deactivate_resource(r); - lwkt_reltoken(&ilock); + lwkt_reltoken(rm->rm_slock); return 0; } @@ -589,12 +584,11 @@ int rman_release_resource(struct resource *r) { struct rman *rm = r->r_rm; - lwkt_tokref ilock; int rv; - lwkt_gettoken(&ilock, rm->rm_slock); + lwkt_gettoken(rm->rm_slock); rv = int_rman_release_resource(rm, r); - lwkt_reltoken(&ilock); + lwkt_reltoken(rm->rm_slock); return (rv); } diff --git a/sys/kern/sys_pipe.c b/sys/kern/sys_pipe.c index 13c0c69b71..189634b60c 100644 --- a/sys/kern/sys_pipe.c +++ b/sys/kern/sys_pipe.c @@ -404,8 +404,8 @@ pipe_create(struct pipe **cpipep) vfs_timestamp(&cpipe->pipe_ctime); cpipe->pipe_atime = cpipe->pipe_ctime; cpipe->pipe_mtime = cpipe->pipe_ctime; - lwkt_token_init(&cpipe->pipe_rlock); - lwkt_token_init(&cpipe->pipe_wlock); + lwkt_token_init(&cpipe->pipe_rlock, 1); + lwkt_token_init(&cpipe->pipe_wlock, 1); return (0); } @@ -423,8 +423,6 @@ pipe_read(struct file *fp, struct uio *uio, struct ucred *cred, int fflags) u_int nsize; /* total bytes to read */ u_int rindex; /* contiguous bytes available */ int notify_writer; - lwkt_tokref rlock; - lwkt_tokref wlock; int mpsave; int bigread; int bigcount; @@ -437,7 +435,7 @@ pipe_read(struct file *fp, struct uio *uio, struct ucred *cred, int fflags) */ pipe_get_mplock(&mpsave); rpipe = (struct pipe *)fp->f_data; - lwkt_gettoken(&rlock, &rpipe->pipe_rlock); + lwkt_gettoken(&rpipe->pipe_rlock); if (fflags & O_FBLOCKING) nbio = 0; @@ -457,7 +455,7 @@ pipe_read(struct file *fp, struct uio *uio, struct ucred *cred, int fflags) error = pipe_start_uio(rpipe, &rpipe->pipe_rip); if (error) { pipe_rel_mplock(&mpsave); - lwkt_reltoken(&rlock); + lwkt_reltoken(&rpipe->pipe_rlock); return (error); } notify_writer = 0; @@ -524,14 +522,14 @@ pipe_read(struct file *fp, struct uio *uio, struct ucred *cred, int fflags) * wlock are held. */ if (rpipe->pipe_state & PIPE_WANTW) { - lwkt_gettoken(&wlock, &rpipe->pipe_wlock); + lwkt_gettoken(&rpipe->pipe_wlock); if (rpipe->pipe_state & PIPE_WANTW) { notify_writer = 0; rpipe->pipe_state &= ~PIPE_WANTW; - lwkt_reltoken(&wlock); + lwkt_reltoken(&rpipe->pipe_wlock); wakeup(rpipe); } else { - lwkt_reltoken(&wlock); + lwkt_reltoken(&rpipe->pipe_wlock); } } @@ -593,10 +591,10 @@ pipe_read(struct file *fp, struct uio *uio, struct ucred *cred, int fflags) /* * Last chance, interlock with WANTR. */ - lwkt_gettoken(&wlock, &rpipe->pipe_wlock); + lwkt_gettoken(&rpipe->pipe_wlock); size = rpipe->pipe_buffer.windex - rpipe->pipe_buffer.rindex; if (size) { - lwkt_reltoken(&wlock); + lwkt_reltoken(&rpipe->pipe_wlock); continue; } @@ -605,7 +603,7 @@ pipe_read(struct file *fp, struct uio *uio, struct ucred *cred, int fflags) * tokens already held. */ if (rpipe->pipe_state & PIPE_REOF) { - lwkt_reltoken(&wlock); + lwkt_reltoken(&rpipe->pipe_wlock); break; } @@ -637,7 +635,7 @@ pipe_read(struct file *fp, struct uio *uio, struct ucred *cred, int fflags) */ rpipe->pipe_state |= PIPE_WANTR; tsleep_interlock(rpipe, PCATCH); - lwkt_reltoken(&wlock); + lwkt_reltoken(&rpipe->pipe_wlock); error = tsleep(rpipe, PCATCH | PINTERLOCKED, "piperd", 0); ++pipe_rblocked_count; if (error) @@ -661,23 +659,23 @@ pipe_read(struct file *fp, struct uio *uio, struct ucred *cred, int fflags) */ if (notify_writer) { if (rpipe->pipe_state & PIPE_WANTW) { - lwkt_gettoken(&wlock, &rpipe->pipe_wlock); + lwkt_gettoken(&rpipe->pipe_wlock); if (rpipe->pipe_state & PIPE_WANTW) { rpipe->pipe_state &= ~PIPE_WANTW; - lwkt_reltoken(&wlock); + lwkt_reltoken(&rpipe->pipe_wlock); wakeup(rpipe); } else { - lwkt_reltoken(&wlock); + lwkt_reltoken(&rpipe->pipe_wlock); } } if (pipeseltest(rpipe)) { - lwkt_gettoken(&wlock, &rpipe->pipe_wlock); + lwkt_gettoken(&rpipe->pipe_wlock); pipeselwakeup(rpipe); - lwkt_reltoken(&wlock); + lwkt_reltoken(&rpipe->pipe_wlock); } } /*size = rpipe->pipe_buffer.windex - rpipe->pipe_buffer.rindex;*/ - lwkt_reltoken(&rlock); + lwkt_reltoken(&rpipe->pipe_rlock); pipe_rel_mplock(&mpsave); return (error); @@ -693,8 +691,6 @@ pipe_write(struct file *fp, struct uio *uio, struct ucred *cred, int fflags) int orig_resid; int nbio; struct pipe *wpipe, *rpipe; - lwkt_tokref rlock; - lwkt_tokref wlock; u_int windex; u_int space; u_int wcount; @@ -709,10 +705,10 @@ pipe_write(struct file *fp, struct uio *uio, struct ucred *cred, int fflags) */ rpipe = (struct pipe *) fp->f_data; wpipe = rpipe->pipe_peer; - lwkt_gettoken(&wlock, &wpipe->pipe_wlock); + lwkt_gettoken(&wpipe->pipe_wlock); if (wpipe->pipe_state & PIPE_WEOF) { pipe_rel_mplock(&mpsave); - lwkt_reltoken(&wlock); + lwkt_reltoken(&wpipe->pipe_wlock); return (EPIPE); } @@ -721,7 +717,7 @@ pipe_write(struct file *fp, struct uio *uio, struct ucred *cred, int fflags) */ if (uio->uio_resid == 0) { pipe_rel_mplock(&mpsave); - lwkt_reltoken(&wlock); + lwkt_reltoken(&wpipe->pipe_wlock); return(0); } @@ -731,7 +727,7 @@ pipe_write(struct file *fp, struct uio *uio, struct ucred *cred, int fflags) error = pipe_start_uio(wpipe, &wpipe->pipe_wip); if (error) { pipe_rel_mplock(&mpsave); - lwkt_reltoken(&wlock); + lwkt_reltoken(&wpipe->pipe_wlock); return (error); } @@ -755,7 +751,7 @@ pipe_write(struct file *fp, struct uio *uio, struct ucred *cred, int fflags) /* * Recheck after lock. */ - lwkt_gettoken(&rlock, &wpipe->pipe_rlock); + lwkt_gettoken(&wpipe->pipe_rlock); if ((wpipe->pipe_buffer.size <= PIPE_SIZE) && (pipe_nbig < pipe_maxbig) && (wpipe->pipe_buffer.rindex == wpipe->pipe_buffer.windex)) { @@ -765,7 +761,7 @@ pipe_write(struct file *fp, struct uio *uio, struct ucred *cred, int fflags) else atomic_subtract_int(&pipe_nbig, 1); } - lwkt_reltoken(&rlock); + lwkt_reltoken(&wpipe->pipe_rlock); } orig_resid = uio->uio_resid; @@ -880,7 +876,7 @@ pipe_write(struct file *fp, struct uio *uio, struct ucred *cred, int fflags) * These are token locks so we do not have to worry about * deadlocks. */ - lwkt_gettoken(&rlock, &wpipe->pipe_rlock); + lwkt_gettoken(&wpipe->pipe_rlock); /* * If the "read-side" has been blocked, wake it up now @@ -896,7 +892,7 @@ pipe_write(struct file *fp, struct uio *uio, struct ucred *cred, int fflags) * don't block on non-blocking I/O */ if (nbio) { - lwkt_reltoken(&rlock); + lwkt_reltoken(&wpipe->pipe_rlock); error = EAGAIN; break; } @@ -917,7 +913,7 @@ pipe_write(struct file *fp, struct uio *uio, struct ucred *cred, int fflags) * tokens already held. */ if (wpipe->pipe_state & PIPE_WEOF) { - lwkt_reltoken(&rlock); + lwkt_reltoken(&wpipe->pipe_rlock); error = EPIPE; break; } @@ -934,7 +930,7 @@ pipe_write(struct file *fp, struct uio *uio, struct ucred *cred, int fflags) error = tsleep(wpipe, PCATCH, "pipewr", 0); ++pipe_wblocked_count; } - lwkt_reltoken(&rlock); + lwkt_reltoken(&wpipe->pipe_rlock); /* * Break out if we errored or the read side wants us to go @@ -957,19 +953,19 @@ pipe_write(struct file *fp, struct uio *uio, struct ucred *cred, int fflags) */ if (wpipe->pipe_buffer.windex != wpipe->pipe_buffer.rindex) { if (wpipe->pipe_state & PIPE_WANTR) { - lwkt_gettoken(&rlock, &wpipe->pipe_rlock); + lwkt_gettoken(&wpipe->pipe_rlock); if (wpipe->pipe_state & PIPE_WANTR) { wpipe->pipe_state &= ~PIPE_WANTR; - lwkt_reltoken(&rlock); + lwkt_reltoken(&wpipe->pipe_rlock); wakeup(wpipe); } else { - lwkt_reltoken(&rlock); + lwkt_reltoken(&wpipe->pipe_rlock); } } if (pipeseltest(wpipe)) { - lwkt_gettoken(&rlock, &wpipe->pipe_rlock); + lwkt_gettoken(&wpipe->pipe_rlock); pipeselwakeup(wpipe); - lwkt_reltoken(&rlock); + lwkt_reltoken(&wpipe->pipe_rlock); } } @@ -990,7 +986,7 @@ pipe_write(struct file *fp, struct uio *uio, struct ucred *cred, int fflags) * wake up select/poll. */ /*space = wpipe->pipe_buffer.windex - wpipe->pipe_buffer.rindex;*/ - lwkt_reltoken(&wlock); + lwkt_reltoken(&wpipe->pipe_wlock); pipe_rel_mplock(&mpsave); return (error); } @@ -1005,16 +1001,14 @@ pipe_ioctl(struct file *fp, u_long cmd, caddr_t data, struct ucred *cred, struct sysmsg *msg) { struct pipe *mpipe; - lwkt_tokref rlock; - lwkt_tokref wlock; int error; int mpsave; pipe_get_mplock(&mpsave); mpipe = (struct pipe *)fp->f_data; - lwkt_gettoken(&rlock, &mpipe->pipe_rlock); - lwkt_gettoken(&wlock, &mpipe->pipe_wlock); + lwkt_gettoken(&mpipe->pipe_rlock); + lwkt_gettoken(&mpipe->pipe_wlock); switch (cmd) { case FIOASYNC: @@ -1055,8 +1049,8 @@ pipe_ioctl(struct file *fp, u_long cmd, caddr_t data, error = ENOTTY; break; } - lwkt_reltoken(&rlock); - lwkt_reltoken(&wlock); + lwkt_reltoken(&mpipe->pipe_wlock); + lwkt_reltoken(&mpipe->pipe_rlock); pipe_rel_mplock(&mpsave); return (error); @@ -1106,10 +1100,6 @@ pipe_poll_events(struct pipe *rpipe, struct pipe *wpipe, int events) int pipe_poll(struct file *fp, int events, struct ucred *cred) { - lwkt_tokref rpipe_rlock; - lwkt_tokref rpipe_wlock; - lwkt_tokref wpipe_rlock; - lwkt_tokref wpipe_wlock; struct pipe *rpipe; struct pipe *wpipe; int revents = 0; @@ -1122,12 +1112,12 @@ pipe_poll(struct file *fp, int events, struct ucred *cred) revents = pipe_poll_events(rpipe, wpipe, events); if (revents == 0) { if (events & (POLLIN | POLLRDNORM)) { - lwkt_gettoken(&rpipe_rlock, &rpipe->pipe_rlock); - lwkt_gettoken(&rpipe_wlock, &rpipe->pipe_wlock); + lwkt_gettoken(&rpipe->pipe_rlock); + lwkt_gettoken(&rpipe->pipe_wlock); } if (events & (POLLOUT | POLLWRNORM)) { - lwkt_gettoken(&wpipe_rlock, &wpipe->pipe_rlock); - lwkt_gettoken(&wpipe_wlock, &wpipe->pipe_wlock); + lwkt_gettoken(&wpipe->pipe_rlock); + lwkt_gettoken(&wpipe->pipe_wlock); } revents = pipe_poll_events(rpipe, wpipe, events); if (revents == 0) { @@ -1141,13 +1131,13 @@ pipe_poll(struct file *fp, int events, struct ucred *cred) wpipe->pipe_state |= PIPE_SEL; } } - if (events & (POLLIN | POLLRDNORM)) { - lwkt_reltoken(&rpipe_rlock); - lwkt_reltoken(&rpipe_wlock); - } if (events & (POLLOUT | POLLWRNORM)) { - lwkt_reltoken(&wpipe_rlock); - lwkt_reltoken(&wpipe_wlock); + lwkt_reltoken(&wpipe->pipe_wlock); + lwkt_reltoken(&wpipe->pipe_rlock); + } + if (events & (POLLIN | POLLRDNORM)) { + lwkt_reltoken(&rpipe->pipe_wlock); + lwkt_reltoken(&rpipe->pipe_rlock); } } pipe_rel_mplock(&mpsave); @@ -1212,10 +1202,6 @@ pipe_shutdown(struct file *fp, int how) struct pipe *rpipe; struct pipe *wpipe; int error = EPIPE; - lwkt_tokref rpipe_rlock; - lwkt_tokref rpipe_wlock; - lwkt_tokref wpipe_rlock; - lwkt_tokref wpipe_wlock; int mpsave; pipe_get_mplock(&mpsave); @@ -1226,10 +1212,10 @@ pipe_shutdown(struct file *fp, int how) * We modify pipe_state on both pipes, which means we need * all four tokens! */ - lwkt_gettoken(&rpipe_rlock, &rpipe->pipe_rlock); - lwkt_gettoken(&rpipe_wlock, &rpipe->pipe_wlock); - lwkt_gettoken(&wpipe_rlock, &wpipe->pipe_rlock); - lwkt_gettoken(&wpipe_wlock, &wpipe->pipe_wlock); + lwkt_gettoken(&rpipe->pipe_rlock); + lwkt_gettoken(&rpipe->pipe_wlock); + lwkt_gettoken(&wpipe->pipe_rlock); + lwkt_gettoken(&wpipe->pipe_wlock); switch(how) { case SHUT_RDWR: @@ -1265,10 +1251,10 @@ pipe_shutdown(struct file *fp, int how) pipeselwakeup(rpipe); pipeselwakeup(wpipe); - lwkt_reltoken(&rpipe_rlock); - lwkt_reltoken(&rpipe_wlock); - lwkt_reltoken(&wpipe_rlock); - lwkt_reltoken(&wpipe_wlock); + lwkt_reltoken(&wpipe->pipe_wlock); + lwkt_reltoken(&wpipe->pipe_rlock); + lwkt_reltoken(&rpipe->pipe_wlock); + lwkt_reltoken(&rpipe->pipe_rlock); pipe_rel_mplock(&mpsave); return (error); @@ -1297,10 +1283,6 @@ pipeclose(struct pipe *cpipe) { globaldata_t gd; struct pipe *ppipe; - lwkt_tokref cpipe_rlock; - lwkt_tokref cpipe_wlock; - lwkt_tokref ppipe_rlock; - lwkt_tokref ppipe_wlock; if (cpipe == NULL) return; @@ -1313,8 +1295,8 @@ pipeclose(struct pipe *cpipe) */ if (cpipe->pipe_slock) lockmgr(cpipe->pipe_slock, LK_EXCLUSIVE); - lwkt_gettoken(&cpipe_rlock, &cpipe->pipe_rlock); - lwkt_gettoken(&cpipe_wlock, &cpipe->pipe_wlock); + lwkt_gettoken(&cpipe->pipe_rlock); + lwkt_gettoken(&cpipe->pipe_wlock); /* * Set our state, wakeup anyone waiting in select, and @@ -1331,8 +1313,8 @@ pipeclose(struct pipe *cpipe) * Disconnect from peer. */ if ((ppipe = cpipe->pipe_peer) != NULL) { - lwkt_gettoken(&ppipe_rlock, &ppipe->pipe_rlock); - lwkt_gettoken(&ppipe_wlock, &ppipe->pipe_wlock); + lwkt_gettoken(&ppipe->pipe_rlock); + lwkt_gettoken(&ppipe->pipe_wlock); ppipe->pipe_state |= PIPE_REOF | PIPE_WEOF; pipeselwakeup(ppipe); if (ppipe->pipe_state & (PIPE_WANTR | PIPE_WANTW)) { @@ -1344,8 +1326,8 @@ pipeclose(struct pipe *cpipe) KNOTE(&ppipe->pipe_sel.si_note, 0); rel_mplock(); } - lwkt_reltoken(&ppipe_rlock); - lwkt_reltoken(&ppipe_wlock); + lwkt_reltoken(&ppipe->pipe_wlock); + lwkt_reltoken(&ppipe->pipe_rlock); } /* @@ -1361,8 +1343,8 @@ pipeclose(struct pipe *cpipe) ppipe = NULL; } - lwkt_reltoken(&cpipe_rlock); - lwkt_reltoken(&cpipe_wlock); + lwkt_reltoken(&cpipe->pipe_wlock); + lwkt_reltoken(&cpipe->pipe_rlock); if (cpipe->pipe_slock) lockmgr(cpipe->pipe_slock, LK_RELEASE); diff --git a/sys/kern/vfs_bio.c b/sys/kern/vfs_bio.c index ea10c0e435..9c459bac82 100644 --- a/sys/kern/vfs_bio.c +++ b/sys/kern/vfs_bio.c @@ -2630,7 +2630,6 @@ inmem(struct vnode *vp, off_t loffset) struct buf * findblk(struct vnode *vp, off_t loffset, int flags) { - lwkt_tokref vlock; struct buf *bp; int lkflags; @@ -2639,9 +2638,9 @@ findblk(struct vnode *vp, off_t loffset, int flags) lkflags |= LK_NOWAIT; for (;;) { - lwkt_gettoken(&vlock, &vp->v_token); + lwkt_gettoken(&vp->v_token); bp = buf_rb_hash_RB_LOOKUP(&vp->v_rbhash_tree, loffset); - lwkt_reltoken(&vlock); + lwkt_reltoken(&vp->v_token); if (bp == NULL || (flags & FINDBLK_TEST)) break; if (BUF_LOCK(bp, lkflags)) { diff --git a/sys/kern/vfs_lock.c b/sys/kern/vfs_lock.c index 1c66ef3bda..7e70cd9e04 100644 --- a/sys/kern/vfs_lock.c +++ b/sys/kern/vfs_lock.c @@ -415,7 +415,7 @@ vnode_ctor(void *obj, void *private, int ocflags) { struct vnode *vp = obj; - lwkt_token_init(&vp->v_token); + lwkt_token_init(&vp->v_token, 1); lockinit(&vp->v_lock, "vnode", 0, 0); ccms_dataspace_init(&vp->v_ccms); TAILQ_INIT(&vp->v_namecache); diff --git a/sys/kern/vfs_mount.c b/sys/kern/vfs_mount.c index ffaf7b5fa4..40c30cd16b 100644 --- a/sys/kern/vfs_mount.c +++ b/sys/kern/vfs_mount.c @@ -135,9 +135,9 @@ static TAILQ_HEAD(,bio_ops) bio_ops_list = TAILQ_HEAD_INITIALIZER(bio_ops_list); void vfs_mount_init(void) { - lwkt_token_init(&mountlist_token); - lwkt_token_init(&mntvnode_token); - lwkt_token_init(&mntid_token); + lwkt_token_init(&mountlist_token, 1); + lwkt_token_init(&mntvnode_token, 1); + lwkt_token_init(&mntid_token, 1); TAILQ_INIT(&mountscan_list); TAILQ_INIT(&mntvnodescan_list); mount_init(&dummymount); @@ -320,7 +320,7 @@ void mount_init(struct mount *mp) { lockinit(&mp->mnt_lock, "vfslock", 0, 0); - lwkt_token_init(&mp->mnt_token); + lwkt_token_init(&mp->mnt_token, 1); TAILQ_INIT(&mp->mnt_nvnodelist); TAILQ_INIT(&mp->mnt_reservedvnlist); @@ -337,16 +337,15 @@ struct mount * vfs_getvfs(fsid_t *fsid) { struct mount *mp; - lwkt_tokref ilock; - lwkt_gettoken(&ilock, &mountlist_token); + lwkt_gettoken(&mountlist_token); TAILQ_FOREACH(mp, &mountlist, mnt_list) { if (mp->mnt_stat.f_fsid.val[0] == fsid->val[0] && mp->mnt_stat.f_fsid.val[1] == fsid->val[1]) { break; } } - lwkt_reltoken(&ilock); + lwkt_reltoken(&mountlist_token); return (mp); } @@ -366,11 +365,10 @@ void vfs_getnewfsid(struct mount *mp) { static u_int16_t mntid_base; - lwkt_tokref ilock; fsid_t tfsid; int mtype; - lwkt_gettoken(&ilock, &mntid_token); + lwkt_gettoken(&mntid_token); mtype = mp->mnt_vfc->vfc_typenum; tfsid.val[1] = mtype; mtype = (mtype & 0xFF) << 24; @@ -383,7 +381,7 @@ vfs_getnewfsid(struct mount *mp) } mp->mnt_stat.f_fsid.val[0] = tfsid.val[0]; mp->mnt_stat.f_fsid.val[1] = tfsid.val[1]; - lwkt_reltoken(&ilock); + lwkt_reltoken(&mntid_token); } /* @@ -564,7 +562,6 @@ vlrureclaim(struct mount *mp, void *data) { struct vnlru_info *info = data; struct vnode *vp; - lwkt_tokref ilock; int done; int trigger; int usevnodes; @@ -590,7 +587,7 @@ vlrureclaim(struct mount *mp, void *data) trigger = vmstats.v_page_count * (trigger_mult + 2) / usevnodes; done = 0; - lwkt_gettoken(&ilock, &mntvnode_token); + lwkt_gettoken(&mntvnode_token); count = mp->mnt_nvnodelistsize / 10 + 1; while (count && mp->mnt_syncer) { @@ -666,7 +663,7 @@ vlrureclaim(struct mount *mp, void *data) ++done; --count; } - lwkt_reltoken(&ilock); + lwkt_reltoken(&mntvnode_token); return (done); } @@ -775,14 +772,12 @@ vnlru_proc(void) void mountlist_insert(struct mount *mp, int how) { - lwkt_tokref ilock; - - lwkt_gettoken(&ilock, &mountlist_token); + lwkt_gettoken(&mountlist_token); if (how == MNTINS_FIRST) TAILQ_INSERT_HEAD(&mountlist, mp, mnt_list); else TAILQ_INSERT_TAIL(&mountlist, mp, mnt_list); - lwkt_reltoken(&ilock); + lwkt_reltoken(&mountlist_token); } /* @@ -795,12 +790,11 @@ mountlist_insert(struct mount *mp, int how) int mountlist_interlock(int (*callback)(struct mount *), struct mount *mp) { - lwkt_tokref ilock; int error; - lwkt_gettoken(&ilock, &mountlist_token); + lwkt_gettoken(&mountlist_token); error = callback(mp); - lwkt_reltoken(&ilock); + lwkt_reltoken(&mountlist_token); return (error); } @@ -830,9 +824,8 @@ void mountlist_remove(struct mount *mp) { struct mountscan_info *msi; - lwkt_tokref ilock; - lwkt_gettoken(&ilock, &mountlist_token); + lwkt_gettoken(&mountlist_token); TAILQ_FOREACH(msi, &mountscan_list, msi_entry) { if (msi->msi_node == mp) { if (msi->msi_how & MNTSCAN_FORWARD) @@ -842,7 +835,7 @@ mountlist_remove(struct mount *mp) } } TAILQ_REMOVE(&mountlist, mp, mnt_list); - lwkt_reltoken(&ilock); + lwkt_reltoken(&mountlist_token); } /* @@ -868,13 +861,12 @@ int mountlist_scan(int (*callback)(struct mount *, void *), void *data, int how) { struct mountscan_info info; - lwkt_tokref ilock; struct mount *mp; thread_t td; int count; int res; - lwkt_gettoken(&ilock, &mountlist_token); + lwkt_gettoken(&mountlist_token); info.msi_how = how; info.msi_node = NULL; /* paranoia */ @@ -921,7 +913,7 @@ mountlist_scan(int (*callback)(struct mount *, void *), void *data, int how) } } TAILQ_REMOVE(&mountscan_list, &info, msi_entry); - lwkt_reltoken(&ilock); + lwkt_reltoken(&mountlist_token); return(res); } @@ -944,9 +936,7 @@ SYSINIT(vnlru, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start, &vnlru_kp) void insmntque(struct vnode *vp, struct mount *mp) { - lwkt_tokref ilock; - - lwkt_gettoken(&ilock, &mntvnode_token); + lwkt_gettoken(&mntvnode_token); /* * Delete from old mount point vnode list, if on one. */ @@ -961,7 +951,7 @@ insmntque(struct vnode *vp, struct mount *mp) * The 'end' of the LRU list is the vnode prior to mp->mnt_syncer. */ if ((vp->v_mount = mp) == NULL) { - lwkt_reltoken(&ilock); + lwkt_reltoken(&mntvnode_token); return; } if (mp->mnt_syncer) { @@ -970,7 +960,7 @@ insmntque(struct vnode *vp, struct mount *mp) TAILQ_INSERT_TAIL(&mp->mnt_nvnodelist, vp, v_nmntvnodes); } mp->mnt_nvnodelistsize++; - lwkt_reltoken(&ilock); + lwkt_reltoken(&mntvnode_token); } @@ -1001,14 +991,13 @@ vmntvnodescan( void *data ) { struct vmntvnodescan_info info; - lwkt_tokref ilock; struct vnode *vp; int r = 0; int maxcount = 1000000; int stopcount = 0; int count = 0; - lwkt_gettoken(&ilock, &mntvnode_token); + lwkt_gettoken(&mntvnode_token); /* * If asked to do one pass stop after iterating available vnodes. @@ -1127,7 +1116,7 @@ next: info.vp = TAILQ_NEXT(vp, v_nmntvnodes); } TAILQ_REMOVE(&mntvnodescan_list, &info, entry); - lwkt_reltoken(&ilock); + lwkt_reltoken(&mntvnode_token); return(r); } @@ -1315,14 +1304,13 @@ struct mount * mount_get_by_nc(struct namecache *ncp) { struct mount *mp = NULL; - lwkt_tokref ilock; - lwkt_gettoken(&ilock, &mountlist_token); + lwkt_gettoken(&mountlist_token); TAILQ_FOREACH(mp, &mountlist, mnt_list) { if (ncp == mp->mnt_ncmountpt.ncp) break; } - lwkt_reltoken(&ilock); + lwkt_reltoken(&mountlist_token); return (mp); } diff --git a/sys/kern/vfs_subr.c b/sys/kern/vfs_subr.c index bb7b9474c3..0e55ff62a1 100644 --- a/sys/kern/vfs_subr.c +++ b/sys/kern/vfs_subr.c @@ -192,7 +192,7 @@ vfs_subr_init(void) KvaSize / factor2); desiredvnodes = imax(desiredvnodes, maxproc * 8); - lwkt_token_init(&spechash_token); + lwkt_token_init(&spechash_token, 1); } /* @@ -287,10 +287,9 @@ vinvalbuf(struct vnode *vp, int flags, int slpflag, int slptimeo) { struct vinvalbuf_bp_info info; vm_object_t object; - lwkt_tokref vlock; int error; - lwkt_gettoken(&vlock, &vp->v_token); + lwkt_gettoken(&vp->v_token); /* * If we are being asked to save, call fsync to ensure that the inode @@ -363,7 +362,7 @@ vinvalbuf(struct vnode *vp, int flags, int slpflag, int slptimeo) panic("vinvalbuf: flush failed, buffers still present"); error = 0; done: - lwkt_reltoken(&vlock); + lwkt_reltoken(&vp->v_token); return (error); } @@ -445,7 +444,6 @@ vtruncbuf(struct vnode *vp, off_t length, int blksize) { off_t truncloffset; const char *filename; - lwkt_tokref vlock; int count; /* @@ -458,7 +456,7 @@ vtruncbuf(struct vnode *vp, off_t length, int blksize) else truncloffset = length; - lwkt_gettoken(&vlock, &vp->v_token); + lwkt_gettoken(&vp->v_token); do { count = RB_SCAN(buf_rb_tree, &vp->v_rbclean_tree, vtruncbuf_bp_trunc_cmp, @@ -518,7 +516,7 @@ vtruncbuf(struct vnode *vp, off_t length, int blksize) } } while(count); - lwkt_reltoken(&vlock); + lwkt_reltoken(&vp->v_token); return (0); } @@ -631,7 +629,6 @@ vfsync(struct vnode *vp, int waitfor, int passes, int (*waitoutput)(struct vnode *, struct thread *)) { struct vfsync_info info; - lwkt_tokref vlock; int error; bzero(&info, sizeof(info)); @@ -639,7 +636,7 @@ vfsync(struct vnode *vp, int waitfor, int passes, if ((info.checkdef = checkdef) == NULL) info.syncdeps = 1; - lwkt_gettoken(&vlock, &vp->v_token); + lwkt_gettoken(&vp->v_token); switch(waitfor) { case MNT_LAZY: @@ -705,7 +702,7 @@ vfsync(struct vnode *vp, int waitfor, int passes, } break; } - lwkt_reltoken(&vlock); + lwkt_reltoken(&vp->v_token); return(error); } @@ -829,17 +826,15 @@ vfsync_bp(struct buf *bp, void *data) int bgetvp(struct vnode *vp, struct buf *bp) { - lwkt_tokref vlock; - KASSERT(bp->b_vp == NULL, ("bgetvp: not free")); KKASSERT((bp->b_flags & (B_HASHED|B_DELWRI|B_VNCLEAN|B_VNDIRTY)) == 0); /* * Insert onto list for new vnode. */ - lwkt_gettoken(&vlock, &vp->v_token); + lwkt_gettoken(&vp->v_token); if (buf_rb_hash_RB_INSERT(&vp->v_rbhash_tree, bp)) { - lwkt_reltoken(&vlock); + lwkt_reltoken(&vp->v_token); return (EEXIST); } bp->b_vp = vp; @@ -848,7 +843,7 @@ bgetvp(struct vnode *vp, struct buf *bp) if (buf_rb_tree_RB_INSERT(&vp->v_rbclean_tree, bp)) panic("reassignbuf: dup lblk/clean vp %p bp %p", vp, bp); vhold(vp); - lwkt_reltoken(&vlock); + lwkt_reltoken(&vp->v_token); return(0); } @@ -859,7 +854,6 @@ void brelvp(struct buf *bp) { struct vnode *vp; - lwkt_tokref vlock; KASSERT(bp->b_vp != NULL, ("brelvp: NULL")); @@ -867,7 +861,7 @@ brelvp(struct buf *bp) * Delete from old vnode list, if on one. */ vp = bp->b_vp; - lwkt_gettoken(&vlock, &vp->v_token); + lwkt_gettoken(&vp->v_token); if (bp->b_flags & (B_VNDIRTY | B_VNCLEAN)) { if (bp->b_flags & B_VNDIRTY) buf_rb_tree_RB_REMOVE(&vp->v_rbdirty_tree, bp); @@ -884,7 +878,7 @@ brelvp(struct buf *bp) LIST_REMOVE(vp, v_synclist); } bp->b_vp = NULL; - lwkt_reltoken(&vlock); + lwkt_reltoken(&vp->v_token); vdrop(vp); } @@ -899,7 +893,6 @@ void reassignbuf(struct buf *bp) { struct vnode *vp = bp->b_vp; - lwkt_tokref vlock; int delay; KKASSERT(vp != NULL); @@ -912,7 +905,7 @@ reassignbuf(struct buf *bp) if (bp->b_flags & B_PAGING) panic("cannot reassign paging buffer"); - lwkt_gettoken(&vlock, &vp->v_token); + lwkt_gettoken(&vp->v_token); if (bp->b_flags & B_DELWRI) { /* * Move to the dirty list, add the vnode to the worklist @@ -968,7 +961,7 @@ reassignbuf(struct buf *bp) LIST_REMOVE(vp, v_synclist); } } - lwkt_reltoken(&vlock); + lwkt_reltoken(&vp->v_token); } /* @@ -1009,32 +1002,29 @@ bdevvp(cdev_t dev, struct vnode **vpp) int v_associate_rdev(struct vnode *vp, cdev_t dev) { - lwkt_tokref ilock; - if (dev == NULL) return(ENXIO); if (dev_is_good(dev) == 0) return(ENXIO); KKASSERT(vp->v_rdev == NULL); vp->v_rdev = reference_dev(dev); - lwkt_gettoken(&ilock, &spechash_token); + lwkt_gettoken(&spechash_token); SLIST_INSERT_HEAD(&dev->si_hlist, vp, v_cdevnext); - lwkt_reltoken(&ilock); + lwkt_reltoken(&spechash_token); return(0); } void v_release_rdev(struct vnode *vp) { - lwkt_tokref ilock; cdev_t dev; if ((dev = vp->v_rdev) != NULL) { - lwkt_gettoken(&ilock, &spechash_token); + lwkt_gettoken(&spechash_token); SLIST_REMOVE(&dev->si_hlist, vp, vnode, v_cdevnext); vp->v_rdev = NULL; release_dev(dev); - lwkt_reltoken(&ilock); + lwkt_reltoken(&spechash_token); } } @@ -1201,7 +1191,6 @@ vrevoke(struct vnode *vp, struct ucred *cred) { struct vnode *vq; struct vnode *vqn; - lwkt_tokref ilock; cdev_t dev; int error; @@ -1226,7 +1215,7 @@ vrevoke(struct vnode *vp, struct ucred *cred) return(0); } reference_dev(dev); - lwkt_gettoken(&ilock, &spechash_token); + lwkt_gettoken(&spechash_token); vqn = SLIST_FIRST(&dev->si_hlist); if (vqn) @@ -1239,7 +1228,7 @@ vrevoke(struct vnode *vp, struct ucred *cred) /*v_release_rdev(vq);*/ vrele(vq); } - lwkt_reltoken(&ilock); + lwkt_reltoken(&spechash_token); dev_drevoke(dev); release_dev(dev); return (0); @@ -1352,19 +1341,18 @@ vgone_vxlocked(struct vnode *vp) int vfinddev(cdev_t dev, enum vtype type, struct vnode **vpp) { - lwkt_tokref ilock; struct vnode *vp; - lwkt_gettoken(&ilock, &spechash_token); + lwkt_gettoken(&spechash_token); SLIST_FOREACH(vp, &dev->si_hlist, v_cdevnext) { if (type == vp->v_type) { *vpp = vp; vref(vp); - lwkt_reltoken(&ilock); + lwkt_reltoken(&spechash_token); return (1); } } - lwkt_reltoken(&ilock); + lwkt_reltoken(&spechash_token); return (0); } @@ -1377,16 +1365,15 @@ vfinddev(cdev_t dev, enum vtype type, struct vnode **vpp) int count_dev(cdev_t dev) { - lwkt_tokref ilock; struct vnode *vp; int count = 0; if (SLIST_FIRST(&dev->si_hlist)) { - lwkt_gettoken(&ilock, &spechash_token); + lwkt_gettoken(&spechash_token); SLIST_FOREACH(vp, &dev->si_hlist, v_cdevnext) { count += vp->v_opencount; } - lwkt_reltoken(&ilock); + lwkt_reltoken(&spechash_token); } return(count); } @@ -2135,11 +2122,9 @@ vfs_msync_scan2(struct mount *mp, struct vnode *vp, void *data) int vn_pollrecord(struct vnode *vp, int events) { - lwkt_tokref vlock; - KKASSERT(curthread->td_proc != NULL); - lwkt_gettoken(&vlock, &vp->v_token); + lwkt_gettoken(&vp->v_token); if (vp->v_pollinfo.vpi_revents & events) { /* * This leaves events we are not interested @@ -2151,12 +2136,12 @@ vn_pollrecord(struct vnode *vp, int events) events &= vp->v_pollinfo.vpi_revents; vp->v_pollinfo.vpi_revents &= ~events; - lwkt_reltoken(&vlock); + lwkt_reltoken(&vp->v_token); return events; } vp->v_pollinfo.vpi_events |= events; selrecord(curthread, &vp->v_pollinfo.vpi_selinfo); - lwkt_reltoken(&vlock); + lwkt_reltoken(&vp->v_token); return 0; } @@ -2169,9 +2154,7 @@ vn_pollrecord(struct vnode *vp, int events) void vn_pollevent(struct vnode *vp, int events) { - lwkt_tokref vlock; - - lwkt_gettoken(&vlock, &vp->v_token); + lwkt_gettoken(&vp->v_token); if (vp->v_pollinfo.vpi_events & events) { /* * We clear vpi_events so that we don't @@ -2188,7 +2171,7 @@ vn_pollevent(struct vnode *vp, int events) vp->v_pollinfo.vpi_revents |= events; selwakeup(&vp->v_pollinfo.vpi_selinfo); } - lwkt_reltoken(&vlock); + lwkt_reltoken(&vp->v_token); } /* @@ -2199,14 +2182,12 @@ vn_pollevent(struct vnode *vp, int events) void vn_pollgone(struct vnode *vp) { - lwkt_tokref vlock; - - lwkt_gettoken(&vlock, &vp->v_token); + lwkt_gettoken(&vp->v_token); if (vp->v_pollinfo.vpi_events) { vp->v_pollinfo.vpi_events = 0; selwakeup(&vp->v_pollinfo.vpi_selinfo); } - lwkt_reltoken(&vlock); + lwkt_reltoken(&vp->v_token); } /* diff --git a/sys/kern/vfs_sync.c b/sys/kern/vfs_sync.c index 0567e0d713..66f3657f27 100644 --- a/sys/kern/vfs_sync.c +++ b/sys/kern/vfs_sync.c @@ -120,7 +120,7 @@ vfs_sync_init(void) syncer_workitem_pending = hashinit(syncer_maxdelay, M_DEVBUF, &syncer_mask); syncer_maxdelay = syncer_mask + 1; - lwkt_token_init(&syncer_token); + lwkt_token_init(&syncer_token, 1); } /* @@ -157,10 +157,9 @@ vfs_sync_init(void) void vn_syncer_add_to_worklist(struct vnode *vp, int delay) { - lwkt_tokref ilock; int slot; - lwkt_gettoken(&ilock, &syncer_token); + lwkt_gettoken(&syncer_token); if (vp->v_flag & VONWORKLST) LIST_REMOVE(vp, v_synclist); @@ -171,7 +170,7 @@ vn_syncer_add_to_worklist(struct vnode *vp, int delay) LIST_INSERT_HEAD(&syncer_workitem_pending[slot], vp, v_synclist); vsetflags(vp, VONWORKLST); - lwkt_reltoken(&ilock); + lwkt_reltoken(&syncer_token); } struct thread *updatethread; @@ -192,8 +191,6 @@ sched_sync(void) struct thread *td = curthread; struct synclist *slp; struct vnode *vp; - lwkt_tokref ilock; - lwkt_tokref vlock; long starttime; EVENTHANDLER_REGISTER(shutdown_pre_sync, shutdown_kproc, td, @@ -203,7 +200,7 @@ sched_sync(void) kproc_suspend_loop(); starttime = time_second; - lwkt_gettoken(&ilock, &syncer_token); + lwkt_gettoken(&syncer_token); /* * Push files whose dirty time has expired. Be careful @@ -232,7 +229,7 @@ sched_sync(void) * here. */ if (LIST_FIRST(slp) == vp) { - lwkt_gettoken(&vlock, &vp->v_token); + lwkt_gettoken(&vp->v_token); if (LIST_FIRST(slp) == vp) { if (RB_EMPTY(&vp->v_rbdirty_tree) && !vn_isdisk(vp, NULL)) { @@ -242,10 +239,10 @@ sched_sync(void) } vn_syncer_add_to_worklist(vp, syncdelay); } - lwkt_reltoken(&vlock); + lwkt_reltoken(&vp->v_token); } } - lwkt_reltoken(&ilock); + lwkt_reltoken(&syncer_token); /* * Do sync processing for each mount. @@ -451,15 +448,14 @@ static int sync_reclaim(struct vop_reclaim_args *ap) { struct vnode *vp = ap->a_vp; - lwkt_tokref ilock; - lwkt_gettoken(&ilock, &syncer_token); + lwkt_gettoken(&syncer_token); KKASSERT(vp->v_mount->mnt_syncer != vp); if (vp->v_flag & VONWORKLST) { LIST_REMOVE(vp, v_synclist); vclrflags(vp, VONWORKLST); } - lwkt_reltoken(&ilock); + lwkt_reltoken(&syncer_token); return (0); } diff --git a/sys/kern/vfs_vm.c b/sys/kern/vfs_vm.c index 36c3955ee2..04b50b5b35 100644 --- a/sys/kern/vfs_vm.c +++ b/sys/kern/vfs_vm.c @@ -127,7 +127,6 @@ nvtruncbuf(struct vnode *vp, off_t length, int blksize, int boff) off_t truncloffset; off_t truncboffset; const char *filename; - lwkt_tokref vlock; struct buf *bp; int count; int error; @@ -146,7 +145,7 @@ nvtruncbuf(struct vnode *vp, off_t length, int blksize, int boff) else truncloffset = length; - lwkt_gettoken(&vlock, &vp->v_token); + lwkt_gettoken(&vp->v_token); do { count = RB_SCAN(buf_rb_tree, &vp->v_rbclean_tree, nvtruncbuf_bp_trunc_cmp, @@ -235,7 +234,7 @@ nvtruncbuf(struct vnode *vp, off_t length, int blksize, int boff) } } while(count); - lwkt_reltoken(&vlock); + lwkt_reltoken(&vp->v_token); return (error); } diff --git a/sys/kern/vfs_vopops.c b/sys/kern/vfs_vopops.c index 248135220f..05bb93ce5c 100644 --- a/sys/kern/vfs_vopops.c +++ b/sys/kern/vfs_vopops.c @@ -744,6 +744,7 @@ int vop_inactive(struct vop_ops *ops, struct vnode *vp) { struct vop_inactive_args ap; + struct mount *mp; VFS_MPLOCK_DECLARE; int error; @@ -751,9 +752,14 @@ vop_inactive(struct vop_ops *ops, struct vnode *vp) ap.a_head.a_ops = ops; ap.a_vp = vp; - VFS_MPLOCK_FLAG(vp->v_mount, MNTK_IN_MPSAFE); + /* + * WARNING! Deactivation of the vnode can cause it to be recycled, + * clearing vp->v_mount. + */ + mp = vp->v_mount; + VFS_MPLOCK_FLAG(mp, MNTK_IN_MPSAFE); DO_OPS(ops, error, &ap, vop_inactive); - VFS_MPUNLOCK(vp->v_mount); + VFS_MPUNLOCK(mp); return(error); } @@ -764,6 +770,7 @@ int vop_reclaim(struct vop_ops *ops, struct vnode *vp) { struct vop_reclaim_args ap; + struct mount *mp; VFS_MPLOCK_DECLARE; int error; @@ -771,9 +778,13 @@ vop_reclaim(struct vop_ops *ops, struct vnode *vp) ap.a_head.a_ops = ops; ap.a_vp = vp; - VFS_MPLOCK1(vp->v_mount); + /* + * WARNING! Reclamation of the vnode will clear vp->v_mount. + */ + mp = vp->v_mount; + VFS_MPLOCK1(mp); DO_OPS(ops, error, &ap, vop_reclaim); - VFS_MPUNLOCK(vp->v_mount); + VFS_MPUNLOCK(mp); return(error); } diff --git a/sys/netproto/smb/smb_subr.h b/sys/netproto/smb/smb_subr.h index ddfedaa4e4..c79833c84c 100644 --- a/sys/netproto/smb/smb_subr.h +++ b/sys/netproto/smb/smb_subr.h @@ -140,7 +140,6 @@ extern smb_unichar smb_unieol; struct mbchain; struct proc; struct thread; -struct lwkt_tokref; struct smb_vc; struct smb_rq; diff --git a/sys/platform/pc32/i386/busdma_machdep.c b/sys/platform/pc32/i386/busdma_machdep.c index 686311d413..2721b20b97 100644 --- a/sys/platform/pc32/i386/busdma_machdep.c +++ b/sys/platform/pc32/i386/busdma_machdep.c @@ -139,7 +139,7 @@ struct bounce_zone { #endif static struct lwkt_token bounce_zone_tok = - LWKT_TOKEN_INITIALIZER(bounce_zone_tok); + LWKT_TOKEN_MP_INITIALIZER(bounce_zone_tok); static int busdma_zonecount; static STAILQ_HEAD(, bounce_zone) bounce_zone_list = STAILQ_HEAD_INITIALIZER(bounce_zone_list); @@ -1073,20 +1073,19 @@ static int alloc_bounce_zone(bus_dma_tag_t dmat) { struct bounce_zone *bz, *new_bz; - lwkt_tokref ref; KASSERT(dmat->bounce_zone == NULL, ("bounce zone was already assigned\n")); new_bz = kmalloc(sizeof(*new_bz), M_DEVBUF, M_INTWAIT | M_ZERO); - lwkt_gettoken(&ref, &bounce_zone_tok); + lwkt_gettoken(&bounce_zone_tok); /* Check to see if we already have a suitable zone */ STAILQ_FOREACH(bz, &bounce_zone_list, links) { if (dmat->alignment <= bz->alignment && dmat->lowaddr >= bz->lowaddr) { - lwkt_reltoken(&ref); + lwkt_reltoken(&bounce_zone_tok); dmat->bounce_zone = bz; kfree(new_bz, M_DEVBUF); @@ -1110,7 +1109,7 @@ alloc_bounce_zone(bus_dma_tag_t dmat) ksnprintf(bz->lowaddrid, 18, "%#jx", (uintmax_t)bz->lowaddr); STAILQ_INSERT_TAIL(&bounce_zone_list, bz, links); - lwkt_reltoken(&ref); + lwkt_reltoken(&bounce_zone_tok); dmat->bounce_zone = bz; diff --git a/sys/platform/pc64/x86_64/busdma_machdep.c b/sys/platform/pc64/x86_64/busdma_machdep.c index 686311d413..2721b20b97 100644 --- a/sys/platform/pc64/x86_64/busdma_machdep.c +++ b/sys/platform/pc64/x86_64/busdma_machdep.c @@ -139,7 +139,7 @@ struct bounce_zone { #endif static struct lwkt_token bounce_zone_tok = - LWKT_TOKEN_INITIALIZER(bounce_zone_tok); + LWKT_TOKEN_MP_INITIALIZER(bounce_zone_tok); static int busdma_zonecount; static STAILQ_HEAD(, bounce_zone) bounce_zone_list = STAILQ_HEAD_INITIALIZER(bounce_zone_list); @@ -1073,20 +1073,19 @@ static int alloc_bounce_zone(bus_dma_tag_t dmat) { struct bounce_zone *bz, *new_bz; - lwkt_tokref ref; KASSERT(dmat->bounce_zone == NULL, ("bounce zone was already assigned\n")); new_bz = kmalloc(sizeof(*new_bz), M_DEVBUF, M_INTWAIT | M_ZERO); - lwkt_gettoken(&ref, &bounce_zone_tok); + lwkt_gettoken(&bounce_zone_tok); /* Check to see if we already have a suitable zone */ STAILQ_FOREACH(bz, &bounce_zone_list, links) { if (dmat->alignment <= bz->alignment && dmat->lowaddr >= bz->lowaddr) { - lwkt_reltoken(&ref); + lwkt_reltoken(&bounce_zone_tok); dmat->bounce_zone = bz; kfree(new_bz, M_DEVBUF); @@ -1110,7 +1109,7 @@ alloc_bounce_zone(bus_dma_tag_t dmat) ksnprintf(bz->lowaddrid, 18, "%#jx", (uintmax_t)bz->lowaddr); STAILQ_INSERT_TAIL(&bounce_zone_list, bz, links); - lwkt_reltoken(&ref); + lwkt_reltoken(&bounce_zone_tok); dmat->bounce_zone = bz; diff --git a/sys/platform/vkernel/platform/busdma_machdep.c b/sys/platform/vkernel/platform/busdma_machdep.c index 14263d55a1..24a2bdcbcf 100644 --- a/sys/platform/vkernel/platform/busdma_machdep.c +++ b/sys/platform/vkernel/platform/busdma_machdep.c @@ -129,7 +129,7 @@ struct bounce_zone { #endif static struct lwkt_token bounce_zone_tok = - LWKT_TOKEN_INITIALIZER(bounce_zone_tok); + LWKT_TOKEN_MP_INITIALIZER(bounce_zone_tok); static int busdma_zonecount; static STAILQ_HEAD(, bounce_zone) bounce_zone_list = STAILQ_HEAD_INITIALIZER(bounce_zone_list); @@ -996,20 +996,19 @@ static int alloc_bounce_zone(bus_dma_tag_t dmat) { struct bounce_zone *bz, *new_bz; - lwkt_tokref ref; KASSERT(dmat->bounce_zone == NULL, ("bounce zone was already assigned\n")); new_bz = kmalloc(sizeof(*new_bz), M_DEVBUF, M_INTWAIT | M_ZERO); - lwkt_gettoken(&ref, &bounce_zone_tok); + lwkt_gettoken(&bounce_zone_tok); /* Check to see if we already have a suitable zone */ STAILQ_FOREACH(bz, &bounce_zone_list, links) { if (dmat->alignment <= bz->alignment && dmat->lowaddr >= bz->lowaddr) { - lwkt_reltoken(&ref); + lwkt_reltoken(&bounce_zone_tok); dmat->bounce_zone = bz; kfree(new_bz, M_DEVBUF); @@ -1033,7 +1032,7 @@ alloc_bounce_zone(bus_dma_tag_t dmat) ksnprintf(bz->lowaddrid, 18, "%#jx", (uintmax_t)bz->lowaddr); STAILQ_INSERT_TAIL(&bounce_zone_list, bz, links); - lwkt_reltoken(&ref); + lwkt_reltoken(&bounce_zone_tok); dmat->bounce_zone = bz; diff --git a/sys/platform/vkernel64/platform/busdma_machdep.c b/sys/platform/vkernel64/platform/busdma_machdep.c index 6d1842a347..b0ff3e8beb 100644 --- a/sys/platform/vkernel64/platform/busdma_machdep.c +++ b/sys/platform/vkernel64/platform/busdma_machdep.c @@ -127,7 +127,7 @@ struct bounce_zone { #endif static struct lwkt_token bounce_zone_tok = - LWKT_TOKEN_INITIALIZER(bounce_zone_tok); + LWKT_TOKEN_MP_INITIALIZER(bounce_zone_tok); static int busdma_zonecount; static STAILQ_HEAD(, bounce_zone) bounce_zone_list = STAILQ_HEAD_INITIALIZER(bounce_zone_list); @@ -992,20 +992,19 @@ static int alloc_bounce_zone(bus_dma_tag_t dmat) { struct bounce_zone *bz, *new_bz; - lwkt_tokref ref; KASSERT(dmat->bounce_zone == NULL, ("bounce zone was already assigned\n")); new_bz = kmalloc(sizeof(*new_bz), M_DEVBUF, M_INTWAIT | M_ZERO); - lwkt_gettoken(&ref, &bounce_zone_tok); + lwkt_gettoken(&bounce_zone_tok); /* Check to see if we already have a suitable zone */ STAILQ_FOREACH(bz, &bounce_zone_list, links) { if (dmat->alignment <= bz->alignment && dmat->lowaddr >= bz->lowaddr) { - lwkt_reltoken(&ref); + lwkt_reltoken(&bounce_zone_tok); dmat->bounce_zone = bz; kfree(new_bz, M_DEVBUF); @@ -1029,7 +1028,7 @@ alloc_bounce_zone(bus_dma_tag_t dmat) ksnprintf(bz->lowaddrid, 18, "%#jx", (uintmax_t)bz->lowaddr); STAILQ_INSERT_TAIL(&bounce_zone_list, bz, links); - lwkt_reltoken(&ref); + lwkt_reltoken(&bounce_zone_tok); dmat->bounce_zone = bz; diff --git a/sys/sys/mount.h b/sys/sys/mount.h index 67cc8750cd..af2ca6ea86 100644 --- a/sys/sys/mount.h +++ b/sys/sys/mount.h @@ -330,7 +330,7 @@ struct mount { /* * VFS MPLOCK helper. */ -#define VFS_MPLOCK_DECLARE struct lwkt_tokref xlock; int xlock_mpsafe +#define VFS_MPLOCK_DECLARE int xlock_mpsafe #define VFS_MPLOCK1(mp) VFS_MPLOCK_FLAG(mp, MNTK_MPSAFE) @@ -338,7 +338,7 @@ struct mount { do { \ if (xlock_mpsafe) { \ get_mplock(); /* TEMPORARY */ \ - lwkt_gettoken(&xlock, &mp->mnt_token); \ + lwkt_gettoken(&mp->mnt_token); \ xlock_mpsafe = 0; \ } \ } while(0) @@ -349,7 +349,7 @@ struct mount { xlock_mpsafe = 1; \ } else { \ get_mplock(); /* TEMPORARY */ \ - lwkt_gettoken(&xlock, &mp->mnt_token); \ + lwkt_gettoken(&mp->mnt_token); \ xlock_mpsafe = 0; \ } \ } while(0) @@ -357,7 +357,7 @@ struct mount { #define VFS_MPUNLOCK(mp) \ do { \ if (xlock_mpsafe == 0) { \ - lwkt_reltoken(&xlock); \ + lwkt_reltoken(&mp->mnt_token); \ rel_mplock(); /* TEMPORARY */ \ } \ } while(0) diff --git a/sys/sys/rman.h b/sys/sys/rman.h index 3e2c95cb90..225dd4d017 100644 --- a/sys/sys/rman.h +++ b/sys/sys/rman.h @@ -112,7 +112,6 @@ struct resource { }; struct lwkt_token; -struct lwkt_tokref; struct rman { struct resource_head rm_list; @@ -126,7 +125,9 @@ struct rman { TAILQ_HEAD(rman_head, rman); int rman_activate_resource(struct resource *r); -int rman_await_resource(struct resource *r, struct lwkt_tokref *ilock, int slpflags, int timo); +#if 0 +int rman_await_resource(struct resource *r, int slpflags, int timo); +#endif int rman_deactivate_resource(struct resource *r); int rman_fini(struct rman *rm); int rman_init(struct rman *rm); diff --git a/sys/sys/thread.h b/sys/sys/thread.h index cacc775566..7525f188f0 100644 --- a/sys/sys/thread.h +++ b/sys/sys/thread.h @@ -102,25 +102,40 @@ struct intrframe; typedef struct lwkt_token { struct lwkt_tokref *t_ref; /* Owning ref or NULL */ + intptr_t t_flags; /* MP lock required */ } lwkt_token; -#define LWKT_TOKEN_INITIALIZER(head) \ +#define LWKT_TOKEN_MPSAFE 0x0001 + +/* + * Static initialization for a lwkt_token. + * UP - Not MPSAFE (full MP lock will also be acquired) + * MP - Is MPSAFE (only the token will be acquired) + */ +#define LWKT_TOKEN_UP_INITIALIZER(head) \ { \ - .t_ref = NULL \ + .t_ref = NULL, \ + .t_flags = 0 \ +} + +#define LWKT_TOKEN_MP_INITIALIZER(head) \ +{ \ + .t_ref = NULL, \ + .t_flags = LWKT_TOKEN_MPSAFE \ } #define ASSERT_LWKT_TOKEN_HELD(tok) \ KKASSERT((tok)->t_ref->tr_owner == curthread) -typedef struct lwkt_tokref { +struct lwkt_tokref { lwkt_token_t tr_tok; /* token in question */ struct thread *tr_owner; /* me */ - lwkt_tokref_t tr_next; /* linked list */ -} lwkt_tokref; + intptr_t tr_flags; /* copy of t_flags */ +}; #define MAXCPUFIFO 16 /* power of 2 */ #define MAXCPUFIFO_MASK (MAXCPUFIFO - 1) -#define LWKT_MAXTOKENS 16 /* max tokens beneficially held by thread */ +#define LWKT_MAXTOKENS 32 /* max tokens beneficially held by thread */ /* * Always cast to ipifunc_t when registering an ipi. The actual ipi function @@ -230,7 +245,8 @@ struct thread { struct thread *td_preempted; /* we preempted this thread */ struct ucred *td_ucred; /* synchronized from p_ucred */ struct caps_kinfo *td_caps; /* list of client and server registrations */ - lwkt_tokref_t td_toks; /* tokens beneficially held */ + lwkt_tokref_t td_toks_stop; + struct lwkt_tokref td_toks_array[LWKT_MAXTOKENS]; #ifdef DEBUG_CRIT_SECTIONS #define CRIT_DEBUG_ARRAY_SIZE 32 #define CRIT_DEBUG_ARRAY_MASK (CRIT_DEBUG_ARRAY_SIZE - 1) @@ -241,6 +257,12 @@ struct thread { struct md_thread td_mach; }; +#define td_toks_base td_toks_array[0] +#define td_toks_end td_toks_array[LWKT_MAXTOKENS] + +#define TD_TOKS_HELD(td) ((td)->td_toks_stop != &(td)->td_toks_base) +#define TD_TOKS_NOT_HELD(td) ((td)->td_toks_stop == &(td)->td_toks_base) + /* * Thread flags. Note that TDF_RUNNING is cleared on the old thread after * we switch to the new one, which is necessary because LWKTs don't need @@ -346,20 +368,18 @@ extern void lwkt_hold(thread_t); extern void lwkt_rele(thread_t); extern void lwkt_passive_release(thread_t); -extern void lwkt_gettoken(lwkt_tokref_t, lwkt_token_t); -extern int lwkt_trytoken(lwkt_tokref_t, lwkt_token_t); -extern void lwkt_gettokref(lwkt_tokref_t); -extern int lwkt_trytokref(lwkt_tokref_t); -extern void lwkt_reltoken(lwkt_tokref_t); +extern void lwkt_gettoken(lwkt_token_t); +extern int lwkt_trytoken(lwkt_token_t); +extern void lwkt_reltoken(lwkt_token_t); extern int lwkt_getalltokens(thread_t); extern void lwkt_relalltokens(thread_t); extern void lwkt_drain_token_requests(void); -extern void lwkt_token_init(lwkt_token_t); +extern void lwkt_token_init(lwkt_token_t, int); extern void lwkt_token_uninit(lwkt_token_t); extern void lwkt_token_pool_init(void); extern lwkt_token_t lwkt_token_pool_lookup(void *); -extern void lwkt_getpooltoken(lwkt_tokref_t, void *); +extern lwkt_token_t lwkt_getpooltoken(void *); extern void lwkt_setpri(thread_t, int); extern void lwkt_setpri_initial(thread_t, int); diff --git a/sys/sys/thread2.h b/sys/sys/thread2.h index a1683dddde..0087690c8e 100644 --- a/sys/sys/thread2.h +++ b/sys/sys/thread2.h @@ -195,18 +195,6 @@ crit_test(thread_t td) return(td->td_pri >= TDPRI_CRIT); } -/* - * Initialize a tokref_t. We only need to initialize the token pointer - * and the magic number. We do not have to initialize tr_next, tr_gdreqnext, - * or tr_reqgd. - */ -static __inline void -lwkt_tokref_init(lwkt_tokref_t ref, lwkt_token_t tok, thread_t td) -{ - ref->tr_tok = tok; - ref->tr_owner = td; -} - /* * Return whether any threads are runnable, whether they meet mp_lock * requirements or not. diff --git a/sys/vfs/fifofs/fifo_vnops.c b/sys/vfs/fifofs/fifo_vnops.c index 23f24082d0..d9688ae4bd 100644 --- a/sys/vfs/fifofs/fifo_vnops.c +++ b/sys/vfs/fifofs/fifo_vnops.c @@ -165,10 +165,9 @@ fifo_open(struct vop_open_args *ap) struct vnode *vp = ap->a_vp; struct fifoinfo *fip; struct socket *rso, *wso; - lwkt_tokref vlock; int error; - lwkt_gettoken(&vlock, &vp->v_token); + lwkt_gettoken(&vp->v_token); if ((fip = vp->v_fifoinfo) == NULL) { MALLOC(fip, struct fifoinfo *, sizeof(*fip), M_FIFOINFO, M_WAITOK); vp->v_fifoinfo = fip; @@ -258,13 +257,13 @@ fifo_open(struct vop_open_args *ap) } vsetflags(vp, VNOTSEEKABLE); error = vop_stdopen(ap); - lwkt_reltoken(&vlock); + lwkt_reltoken(&vp->v_token); return (error); bad: vop_stdopen(ap); /* bump opencount/writecount as appropriate */ VOP_CLOSE(vp, ap->a_mode); done: - lwkt_reltoken(&vlock); + lwkt_reltoken(&vp->v_token); return (error); } @@ -279,9 +278,9 @@ static int fifo_read(struct vop_read_args *ap) { struct uio *uio = ap->a_uio; - struct socket *rso = ap->a_vp->v_fifoinfo->fi_readsock; + struct vnode *vp = ap->a_vp; + struct socket *rso = vp->v_fifoinfo->fi_readsock; int error, startresid; - lwkt_tokref vlock; int flags; #ifdef DIAGNOSTIC @@ -295,11 +294,11 @@ fifo_read(struct vop_read_args *ap) else flags = 0; startresid = uio->uio_resid; - vn_unlock(ap->a_vp); - lwkt_gettoken(&vlock, &ap->a_vp->v_token); + vn_unlock(vp); + lwkt_gettoken(&vp->v_token); error = soreceive(rso, NULL, uio, NULL, NULL, &flags); - lwkt_reltoken(&vlock); - vn_lock(ap->a_vp, LK_EXCLUSIVE | LK_RETRY); + lwkt_reltoken(&vp->v_token); + vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); return (error); } @@ -313,9 +312,9 @@ fifo_read(struct vop_read_args *ap) static int fifo_write(struct vop_write_args *ap) { - struct socket *wso = ap->a_vp->v_fifoinfo->fi_writesock; struct thread *td = ap->a_uio->uio_td; - lwkt_tokref vlock; + struct vnode *vp = ap->a_vp; + struct socket *wso = vp->v_fifoinfo->fi_writesock; int error; int flags; @@ -327,11 +326,11 @@ fifo_write(struct vop_write_args *ap) flags = MSG_FNONBLOCKING; else flags = 0; - vn_unlock(ap->a_vp); - lwkt_gettoken(&vlock, &ap->a_vp->v_token); + vn_unlock(vp); + lwkt_gettoken(&vp->v_token); error = sosend(wso, NULL, ap->a_uio, 0, NULL, flags, td); - lwkt_reltoken(&vlock); - vn_lock(ap->a_vp, LK_EXCLUSIVE | LK_RETRY); + lwkt_reltoken(&vp->v_token); + vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); return (error); } @@ -346,24 +345,24 @@ static int fifo_ioctl(struct vop_ioctl_args *ap) { struct file filetmp; /* Local */ - lwkt_tokref vlock; + struct vnode *vp = ap->a_vp; int error; if (ap->a_fflag & FREAD) { - filetmp.f_data = ap->a_vp->v_fifoinfo->fi_readsock; - lwkt_gettoken(&vlock, &ap->a_vp->v_token); + filetmp.f_data = vp->v_fifoinfo->fi_readsock; + lwkt_gettoken(&vp->v_token); error = soo_ioctl(&filetmp, ap->a_command, ap->a_data, ap->a_cred, ap->a_sysmsg); - lwkt_reltoken(&vlock); + lwkt_reltoken(&vp->v_token); if (error) return (error); } if (ap->a_fflag & FWRITE) { - filetmp.f_data = ap->a_vp->v_fifoinfo->fi_writesock; - lwkt_gettoken(&vlock, &ap->a_vp->v_token); + filetmp.f_data = vp->v_fifoinfo->fi_writesock; + lwkt_gettoken(&vp->v_token); error = soo_ioctl(&filetmp, ap->a_command, ap->a_data, ap->a_cred, ap->a_sysmsg); - lwkt_reltoken(&vlock); + lwkt_reltoken(&vp->v_token); if (error) return (error); } @@ -381,9 +380,8 @@ fifo_kqfilter(struct vop_kqfilter_args *ap) struct fifoinfo *fi = vp->v_fifoinfo; struct socket *so; struct signalsockbuf *ssb; - lwkt_tokref vlock; - lwkt_gettoken(&vlock, &vp->v_token); + lwkt_gettoken(&vp->v_token); switch (ap->a_kn->kn_filter) { case EVFILT_READ: @@ -397,14 +395,14 @@ fifo_kqfilter(struct vop_kqfilter_args *ap) ssb = &so->so_snd; break; default: - lwkt_reltoken(&vlock); + lwkt_reltoken(&vp->v_token); return (1); } ap->a_kn->kn_hook = (caddr_t)vp; ssb_insert_knote(ssb, ap->a_kn); - lwkt_reltoken(&vlock); + lwkt_reltoken(&vp->v_token); return (0); } @@ -413,11 +411,10 @@ filt_fifordetach(struct knote *kn) { struct vnode *vp = (void *)kn->kn_hook; struct socket *so = vp->v_fifoinfo->fi_readsock; - lwkt_tokref vlock; - lwkt_gettoken(&vlock, &vp->v_token); + lwkt_gettoken(&vp->v_token); ssb_remove_knote(&so->so_rcv, kn); - lwkt_reltoken(&vlock); + lwkt_reltoken(&vp->v_token); } static int @@ -425,17 +422,16 @@ filt_fiforead(struct knote *kn, long hint) { struct vnode *vp = (void *)kn->kn_hook; struct socket *so = vp->v_fifoinfo->fi_readsock; - lwkt_tokref vlock; - lwkt_gettoken(&vlock, &vp->v_token); + lwkt_gettoken(&vp->v_token); kn->kn_data = so->so_rcv.ssb_cc; if (so->so_state & SS_CANTRCVMORE) { kn->kn_flags |= EV_EOF; - lwkt_reltoken(&vlock); + lwkt_reltoken(&vp->v_token); return (1); } kn->kn_flags &= ~EV_EOF; - lwkt_reltoken(&vlock); + lwkt_reltoken(&vp->v_token); return (kn->kn_data > 0); } @@ -444,11 +440,10 @@ filt_fifowdetach(struct knote *kn) { struct vnode *vp = (void *)kn->kn_hook; struct socket *so = vp->v_fifoinfo->fi_writesock; - lwkt_tokref vlock; - lwkt_gettoken(&vlock, &vp->v_token); + lwkt_gettoken(&vp->v_token); ssb_remove_knote(&so->so_snd, kn); - lwkt_reltoken(&vlock); + lwkt_reltoken(&vp->v_token); } static int @@ -456,17 +451,16 @@ filt_fifowrite(struct knote *kn, long hint) { struct vnode *vp = (void *)kn->kn_hook; struct socket *so = vp->v_fifoinfo->fi_writesock; - lwkt_tokref vlock; - lwkt_gettoken(&vlock, &vp->v_token); + lwkt_gettoken(&vp->v_token); kn->kn_data = ssb_space(&so->so_snd); if (so->so_state & SS_CANTSENDMORE) { kn->kn_flags |= EV_EOF; - lwkt_reltoken(&vlock); + lwkt_reltoken(&vp->v_token); return (1); } kn->kn_flags &= ~EV_EOF; - lwkt_reltoken(&vlock); + lwkt_reltoken(&vp->v_token); return (kn->kn_data >= so->so_snd.ssb_lowat); } @@ -477,11 +471,11 @@ filt_fifowrite(struct knote *kn, long hint) static int fifo_poll(struct vop_poll_args *ap) { + struct vnode *vp = ap->a_vp; struct file filetmp; int events, revents = 0; - lwkt_tokref vlock; - lwkt_gettoken(&vlock, &ap->a_vp->v_token); + lwkt_gettoken(&vp->v_token); events = ap->a_events & (POLLIN | POLLINIGNEOF | POLLPRI | POLLRDNORM | POLLRDBAND); if (events) { @@ -498,7 +492,7 @@ fifo_poll(struct vop_poll_args *ap) events |= POLLINIGNEOF; } - filetmp.f_data = ap->a_vp->v_fifoinfo->fi_readsock; + filetmp.f_data = vp->v_fifoinfo->fi_readsock; if (filetmp.f_data) revents |= soo_poll(&filetmp, events, ap->a_cred); @@ -511,11 +505,11 @@ fifo_poll(struct vop_poll_args *ap) } events = ap->a_events & (POLLOUT | POLLWRNORM | POLLWRBAND); if (events) { - filetmp.f_data = ap->a_vp->v_fifoinfo->fi_writesock; + filetmp.f_data = vp->v_fifoinfo->fi_writesock; if (filetmp.f_data) revents |= soo_poll(&filetmp, events, ap->a_cred); } - lwkt_reltoken(&vlock); + lwkt_reltoken(&vp->v_token); return (revents); } @@ -558,9 +552,8 @@ fifo_close(struct vop_close_args *ap) struct vnode *vp = ap->a_vp; struct fifoinfo *fip; int error1, error2; - lwkt_tokref vlock; - lwkt_gettoken(&vlock, &vp->v_token); + lwkt_gettoken(&vp->v_token); fip = vp->v_fifoinfo; if (ap->a_fflag & FREAD) { fip->fi_readers--; @@ -574,7 +567,7 @@ fifo_close(struct vop_close_args *ap) } if (vp->v_sysref.refcnt > 1) { vop_stdclose(ap); - lwkt_reltoken(&vlock); + lwkt_reltoken(&vp->v_token); return (0); } error1 = soclose(fip->fi_readsock, FNONBLOCK); @@ -586,7 +579,7 @@ fifo_close(struct vop_close_args *ap) } else { vop_stdclose(ap); } - lwkt_reltoken(&vlock); + lwkt_reltoken(&vp->v_token); return (error2); } diff --git a/sys/vfs/gnu/ext2fs/ext2_ihash.c b/sys/vfs/gnu/ext2fs/ext2_ihash.c index 5510ecb519..686aba7c2a 100644 --- a/sys/vfs/gnu/ext2fs/ext2_ihash.c +++ b/sys/vfs/gnu/ext2fs/ext2_ihash.c @@ -69,18 +69,16 @@ ext2_ihashinit(void) ext2_ihash <<= 1; ext2_ihashtbl = kmalloc(sizeof(void *) * ext2_ihash, M_EXT2IHASH, M_WAITOK|M_ZERO); --ext2_ihash; - lwkt_token_init(&ext2_ihash_token); + lwkt_token_init(&ext2_ihash_token, 1); } int ext2_uninit(struct vfsconf *vfc) { - lwkt_tokref ilock; - - lwkt_gettoken(&ilock, &ext2_ihash_token); + lwkt_gettoken(&ext2_ihash_token); if (ext2_ihashtbl) kfree(ext2_ihashtbl, M_EXT2IHASH); - lwkt_reltoken(&ilock); + lwkt_reltoken(&ext2_ihash_token); return (0); } @@ -92,14 +90,13 @@ struct vnode * ext2_ihashlookup(cdev_t dev, ino_t inum) { struct inode *ip; - lwkt_tokref ilock; - lwkt_gettoken(&ilock, &ext2_ihash_token); + lwkt_gettoken(&ext2_ihash_token); for (ip = *INOHASH(dev, inum); ip; ip = ip->i_next) { if (inum == ip->i_number && dev == ip->i_dev) break; } - lwkt_reltoken(&ilock); + lwkt_reltoken(&ext2_ihash_token); if (ip) return (ITOV(ip)); return (NULLVP); @@ -117,11 +114,10 @@ ext2_ihashlookup(cdev_t dev, ino_t inum) struct vnode * ext2_ihashget(cdev_t dev, ino_t inum) { - lwkt_tokref ilock; struct inode *ip; struct vnode *vp; - lwkt_gettoken(&ilock, &ext2_ihash_token); + lwkt_gettoken(&ext2_ihash_token); loop: for (ip = *INOHASH(dev, inum); ip; ip = ip->i_next) { if (inum != ip->i_number || dev != ip->i_dev) @@ -141,10 +137,10 @@ loop: vput(vp); goto loop; } - lwkt_reltoken(&ilock); + lwkt_reltoken(&ext2_ihash_token); return (vp); } - lwkt_reltoken(&ilock); + lwkt_reltoken(&ext2_ihash_token); return (NULL); } @@ -156,15 +152,14 @@ loop: int ext2_ihashcheck(cdev_t dev, ino_t inum) { - lwkt_tokref ilock; struct inode *ip; - lwkt_gettoken(&ilock, &ext2_ihash_token); + lwkt_gettoken(&ext2_ihash_token); for (ip = *INOHASH(dev, inum); ip; ip = ip->i_next) { if (inum == ip->i_number && dev == ip->i_dev) break; } - lwkt_reltoken(&ilock); + lwkt_reltoken(&ext2_ihash_token); return(ip ? 1 : 0); } @@ -176,14 +171,13 @@ ext2_ihashins(struct inode *ip) { struct inode **ipp; struct inode *iq; - lwkt_tokref ilock; KKASSERT((ip->i_flag & IN_HASHED) == 0); - lwkt_gettoken(&ilock, &ext2_ihash_token); + lwkt_gettoken(&ext2_ihash_token); ipp = INOHASH(ip->i_dev, ip->i_number); while ((iq = *ipp) != NULL) { if (ip->i_dev == iq->i_dev && ip->i_number == iq->i_number) { - lwkt_reltoken(&ilock); + lwkt_reltoken(&ext2_ihash_token); return(EBUSY); } ipp = &iq->i_next; @@ -191,7 +185,7 @@ ext2_ihashins(struct inode *ip) ip->i_next = NULL; *ipp = ip; ip->i_flag |= IN_HASHED; - lwkt_reltoken(&ilock); + lwkt_reltoken(&ext2_ihash_token); return(0); } @@ -201,11 +195,10 @@ ext2_ihashins(struct inode *ip) void ext2_ihashrem(struct inode *ip) { - lwkt_tokref ilock; struct inode **ipp; struct inode *iq; - lwkt_gettoken(&ilock, &ext2_ihash_token); + lwkt_gettoken(&ext2_ihash_token); if (ip->i_flag & IN_HASHED) { ipp = INOHASH(ip->i_dev, ip->i_number); while ((iq = *ipp) != NULL) { @@ -218,6 +211,6 @@ ext2_ihashrem(struct inode *ip) ip->i_next = NULL; ip->i_flag &= ~IN_HASHED; } - lwkt_reltoken(&ilock); + lwkt_reltoken(&ext2_ihash_token); } diff --git a/sys/vfs/gnu/ext2fs/ext2_vnops.c b/sys/vfs/gnu/ext2fs/ext2_vnops.c index c91d70a6fd..2f6dafa215 100644 --- a/sys/vfs/gnu/ext2fs/ext2_vnops.c +++ b/sys/vfs/gnu/ext2fs/ext2_vnops.c @@ -206,7 +206,6 @@ ext2_fsync(struct vop_fsync_args *ap) { struct ext2_fsync_bp_info info; struct vnode *vp = ap->a_vp; - lwkt_tokref vlock; int count; /* @@ -218,7 +217,7 @@ ext2_fsync(struct vop_fsync_args *ap) */ ext2_discard_prealloc(VTOI(vp)); - lwkt_gettoken(&vlock, &vp->v_token); + lwkt_gettoken(&vp->v_token); info.vp = vp; loop: info.waitfor = ap->a_waitfor; @@ -236,7 +235,7 @@ loop: } #endif } - lwkt_reltoken(&vlock); + lwkt_reltoken(&vp->v_token); return (EXT2_UPDATE(ap->a_vp, ap->a_waitfor == MNT_WAIT)); } @@ -1942,7 +1941,6 @@ ext2_kqfilter(struct vop_kqfilter_args *ap) { struct vnode *vp = ap->a_vp; struct knote *kn = ap->a_kn; - lwkt_tokref vlock; switch (kn->kn_filter) { case EVFILT_READ: @@ -1960,9 +1958,9 @@ ext2_kqfilter(struct vop_kqfilter_args *ap) kn->kn_hook = (caddr_t)vp; - lwkt_gettoken(&vlock, &vp->v_token); + lwkt_gettoken(&vp->v_token); SLIST_INSERT_HEAD(&vp->v_pollinfo.vpi_selinfo.si_note, kn, kn_selnext); - lwkt_reltoken(&vlock); + lwkt_reltoken(&vp->v_token); return (0); } @@ -1971,12 +1969,11 @@ static void filt_ext2detach(struct knote *kn) { struct vnode *vp = (struct vnode *)kn->kn_hook; - lwkt_tokref vlock; - lwkt_gettoken(&vlock, &vp->v_token); + lwkt_gettoken(&vp->v_token); SLIST_REMOVE(&vp->v_pollinfo.vpi_selinfo.si_note, kn, knote, kn_selnext); - lwkt_reltoken(&vlock); + lwkt_reltoken(&vp->v_token); } /*ARGSUSED*/ diff --git a/sys/vfs/hammer/hammer_vnops.c b/sys/vfs/hammer/hammer_vnops.c index d5fe582362..0289f6a30c 100644 --- a/sys/vfs/hammer/hammer_vnops.c +++ b/sys/vfs/hammer/hammer_vnops.c @@ -3360,7 +3360,6 @@ hammer_vop_kqfilter(struct vop_kqfilter_args *ap) { struct vnode *vp = ap->a_vp; struct knote *kn = ap->a_kn; - lwkt_tokref vlock; switch (kn->kn_filter) { case EVFILT_READ: @@ -3378,9 +3377,9 @@ hammer_vop_kqfilter(struct vop_kqfilter_args *ap) kn->kn_hook = (caddr_t)vp; - lwkt_gettoken(&vlock, &vp->v_token); + lwkt_gettoken(&vp->v_token); SLIST_INSERT_HEAD(&vp->v_pollinfo.vpi_selinfo.si_note, kn, kn_selnext); - lwkt_reltoken(&vlock); + lwkt_reltoken(&vp->v_token); return(0); } @@ -3389,12 +3388,11 @@ static void filt_hammerdetach(struct knote *kn) { struct vnode *vp = (void *)kn->kn_hook; - lwkt_tokref vlock; - lwkt_gettoken(&vlock, &vp->v_token); + lwkt_gettoken(&vp->v_token); SLIST_REMOVE(&vp->v_pollinfo.vpi_selinfo.si_note, kn, knote, kn_selnext); - lwkt_reltoken(&vlock); + lwkt_reltoken(&vp->v_token); } static int diff --git a/sys/vfs/hpfs/hpfs_hash.c b/sys/vfs/hpfs/hpfs_hash.c index ee71b5e68d..d329e26ee6 100644 --- a/sys/vfs/hpfs/hpfs_hash.c +++ b/sys/vfs/hpfs/hpfs_hash.c @@ -69,7 +69,7 @@ hpfs_hphashinit(void) lockinit (&hpfs_hphash_lock, "hpfs_hphashlock", 0, 0); hpfs_hphashtbl = HASHINIT(desiredvnodes, M_HPFSHASH, M_WAITOK, &hpfs_hphash); - lwkt_token_init(&hpfs_hphash_token); + lwkt_token_init(&hpfs_hphash_token, 1); } /* @@ -78,12 +78,10 @@ hpfs_hphashinit(void) int hpfs_hphash_uninit(struct vfsconf *vfc) { - lwkt_tokref ilock; - - lwkt_gettoken(&ilock, &hpfs_hphash_token); + lwkt_gettoken(&hpfs_hphash_token); if (hpfs_hphashtbl) kfree(hpfs_hphashtbl, M_HPFSHASH); - lwkt_reltoken(&ilock); + lwkt_reltoken(&hpfs_hphash_token); return 0; } @@ -96,14 +94,13 @@ struct hpfsnode * hpfs_hphashlookup(cdev_t dev, lsn_t ino) { struct hpfsnode *hp; - lwkt_tokref ilock; - lwkt_gettoken(&ilock, &hpfs_hphash_token); + lwkt_gettoken(&hpfs_hphash_token); for (hp = HPNOHASH(dev, ino)->lh_first; hp; hp = hp->h_hash.le_next) { if (ino == hp->h_no && dev == hp->h_dev) break; } - lwkt_reltoken(&ilock); + lwkt_reltoken(&hpfs_hphash_token); return (hp); } @@ -112,10 +109,9 @@ struct vnode * hpfs_hphashvget(cdev_t dev, lsn_t ino) { struct hpfsnode *hp; - lwkt_tokref ilock; struct vnode *vp; - lwkt_gettoken(&ilock, &hpfs_hphash_token); + lwkt_gettoken(&hpfs_hphash_token); loop: for (hp = HPNOHASH(dev, ino)->lh_first; hp; hp = hp->h_hash.le_next) { if (ino != hp->h_no || dev != hp->h_dev) @@ -140,10 +136,10 @@ loop: /* * Or if the vget fails (due to a race) */ - lwkt_reltoken(&ilock); + lwkt_reltoken(&hpfs_hphash_token); return (vp); } - lwkt_reltoken(&ilock); + lwkt_reltoken(&hpfs_hphash_token); return (NULLVP); } @@ -154,13 +150,12 @@ void hpfs_hphashins(struct hpfsnode *hp) { struct hphashhead *hpp; - lwkt_tokref ilock; - lwkt_gettoken(&ilock, &hpfs_hphash_token); + lwkt_gettoken(&hpfs_hphash_token); hpp = HPNOHASH(hp->h_dev, hp->h_no); hp->h_flag |= H_HASHED; LIST_INSERT_HEAD(hpp, hp, h_hash); - lwkt_reltoken(&ilock); + lwkt_reltoken(&hpfs_hphash_token); } /* @@ -169,9 +164,7 @@ hpfs_hphashins(struct hpfsnode *hp) void hpfs_hphashrem(struct hpfsnode *hp) { - lwkt_tokref ilock; - - lwkt_gettoken(&ilock, &hpfs_hphash_token); + lwkt_gettoken(&hpfs_hphash_token); if (hp->h_flag & H_HASHED) { hp->h_flag &= ~H_HASHED; LIST_REMOVE(hp, h_hash); @@ -180,5 +173,5 @@ hpfs_hphashrem(struct hpfsnode *hp) hp->h_hash.le_prev = NULL; #endif } - lwkt_reltoken(&ilock); + lwkt_reltoken(&hpfs_hphash_token); } diff --git a/sys/vfs/hpfs/hpfs_vfsops.c b/sys/vfs/hpfs/hpfs_vfsops.c index 30bd14dec1..5f3df4749b 100644 --- a/sys/vfs/hpfs/hpfs_vfsops.c +++ b/sys/vfs/hpfs/hpfs_vfsops.c @@ -500,7 +500,7 @@ hpfs_vget(struct mount *mp, struct vnode *dvp, ino_t ino, struct vnode **vpp) if (ino == (ino_t)hpmp->hpm_su.su_rootfno) vsetflags(vp, VROOT); - lwkt_token_init(&hp->h_interlock); + lwkt_token_init(&hp->h_interlock, 1); hp->h_flag = H_INVAL; hp->h_vp = vp; diff --git a/sys/vfs/isofs/cd9660/cd9660_node.c b/sys/vfs/isofs/cd9660/cd9660_node.c index 567fca6caa..cf3f5d0e21 100644 --- a/sys/vfs/isofs/cd9660/cd9660_node.c +++ b/sys/vfs/isofs/cd9660/cd9660_node.c @@ -87,7 +87,7 @@ cd9660_init(struct vfsconf *vfsp) isohashtbl = kmalloc(sizeof(void *) * isohash, M_ISOFSMNT, M_WAITOK|M_ZERO); --isohash; - lwkt_token_init(&cd9660_ihash_token); + lwkt_token_init(&cd9660_ihash_token, 1); return (0); } @@ -109,10 +109,9 @@ struct vnode * cd9660_ihashget(cdev_t dev, ino_t inum) { struct iso_node *ip; - lwkt_tokref ilock; struct vnode *vp; - lwkt_gettoken(&ilock, &cd9660_ihash_token); + lwkt_gettoken(&cd9660_ihash_token); loop: for (ip = isohashtbl[INOHASH(dev, inum)]; ip; ip = ip->i_next) { if (inum != ip->i_number || dev != ip->i_dev) @@ -131,10 +130,10 @@ loop: if (ip == NULL || ITOV(ip) != vp) { goto loop; } - lwkt_reltoken(&ilock); + lwkt_reltoken(&cd9660_ihash_token); return (vp); } - lwkt_reltoken(&ilock); + lwkt_reltoken(&cd9660_ihash_token); return (NULL); } @@ -146,20 +145,19 @@ int cd9660_ihashins(struct iso_node *ip) { struct iso_node **ipp, *iq; - lwkt_tokref ilock; - lwkt_gettoken(&ilock, &cd9660_ihash_token); + lwkt_gettoken(&cd9660_ihash_token); ipp = &isohashtbl[INOHASH(ip->i_dev, ip->i_number)]; while ((iq = *ipp) != NULL) { if (iq->i_dev == ip->i_dev && iq->i_number == ip->i_number) { - lwkt_reltoken(&ilock); + lwkt_reltoken(&cd9660_ihash_token); return(EBUSY); } ipp = &iq->i_next; } ip->i_next = NULL; *ipp = ip; - lwkt_reltoken(&ilock); + lwkt_reltoken(&cd9660_ihash_token); return(0); } @@ -170,9 +168,8 @@ static void cd9660_ihashrem(struct iso_node *ip) { struct iso_node **ipp, *iq; - lwkt_tokref ilock; - lwkt_gettoken(&ilock, &cd9660_ihash_token); + lwkt_gettoken(&cd9660_ihash_token); ipp = &isohashtbl[INOHASH(ip->i_dev, ip->i_number)]; while ((iq = *ipp) != NULL) { if (ip == iq) @@ -182,7 +179,7 @@ cd9660_ihashrem(struct iso_node *ip) KKASSERT(ip == iq); *ipp = ip->i_next; ip->i_next = NULL; - lwkt_reltoken(&ilock); + lwkt_reltoken(&cd9660_ihash_token); } /* diff --git a/sys/vfs/msdosfs/msdosfs_denode.c b/sys/vfs/msdosfs/msdosfs_denode.c index 74e0cc629c..b2a2799e76 100644 --- a/sys/vfs/msdosfs/msdosfs_denode.c +++ b/sys/vfs/msdosfs/msdosfs_denode.c @@ -122,7 +122,7 @@ msdosfs_init(struct vfsconf *vfsp) dehashtbl = kmalloc(sizeof(void *) * dehash, M_MSDOSFSMNT, M_WAITOK|M_ZERO); --dehash; - lwkt_token_init(&dehash_token); + lwkt_token_init(&dehash_token, 1); return (0); } @@ -139,10 +139,9 @@ static struct denode * msdosfs_hashget(cdev_t dev, u_long dirclust, u_long diroff) { struct denode *dep; - lwkt_tokref ilock; struct vnode *vp; - lwkt_gettoken(&ilock, &dehash_token); + lwkt_gettoken(&dehash_token); loop: for (dep = DEHASH(dev, dirclust, diroff); dep; dep = dep->de_next) { if (dirclust != dep->de_dirclust @@ -171,10 +170,10 @@ loop: vput(vp); goto loop; } - lwkt_reltoken(&ilock); + lwkt_reltoken(&dehash_token); return (dep); } - lwkt_reltoken(&ilock); + lwkt_reltoken(&dehash_token); return (NULL); } @@ -187,23 +186,22 @@ int msdosfs_hashins(struct denode *dep) { struct denode **depp, *deq; - lwkt_tokref ilock; - lwkt_gettoken(&ilock, &dehash_token); + lwkt_gettoken(&dehash_token); depp = &DEHASH(dep->de_dev, dep->de_dirclust, dep->de_diroffset); while ((deq = *depp) != NULL) { if (deq->de_dev == dep->de_dev && deq->de_dirclust == dep->de_dirclust && deq->de_diroffset == dep->de_diroffset && deq->de_refcnt > 0) { - lwkt_reltoken(&ilock); + lwkt_reltoken(&dehash_token); return(EBUSY); } depp = &deq->de_next; } dep->de_next = NULL; *depp = dep; - lwkt_reltoken(&ilock); + lwkt_reltoken(&dehash_token); return(0); } @@ -212,9 +210,8 @@ void msdosfs_hashrem(struct denode *dep) { struct denode **depp, *deq; - lwkt_tokref ilock; - lwkt_gettoken(&ilock, &dehash_token); + lwkt_gettoken(&dehash_token); depp = &DEHASH(dep->de_dev, dep->de_dirclust, dep->de_diroffset); while ((deq = *depp) != NULL) { if (dep == deq) @@ -224,22 +221,21 @@ msdosfs_hashrem(struct denode *dep) KKASSERT(dep == deq); *depp = dep->de_next; dep->de_next = NULL; - lwkt_reltoken(&ilock); + lwkt_reltoken(&dehash_token); } void msdosfs_reinsert(struct denode *ip, u_long new_dirclust, u_long new_diroffset) { - lwkt_tokref ilock; int error; - lwkt_gettoken(&ilock, &dehash_token); + lwkt_gettoken(&dehash_token); msdosfs_hashrem(ip); ip->de_dirclust = new_dirclust; ip->de_diroffset = new_diroffset; error = msdosfs_hashins(ip); KASSERT(!error, ("msdosfs_reinsert: insertion failed %d", error)); - lwkt_reltoken(&ilock); + lwkt_reltoken(&dehash_token); } /* diff --git a/sys/vfs/nfs/nfs_subs.c b/sys/vfs/nfs/nfs_subs.c index 638861af05..0cdeebcbb4 100644 --- a/sys/vfs/nfs/nfs_subs.c +++ b/sys/vfs/nfs/nfs_subs.c @@ -1432,13 +1432,11 @@ static int nfs_clearcommit_callback(struct mount *mp, struct vnode *vp, void *data __unused) { - lwkt_tokref vlock; - vhold(vp); - lwkt_gettoken(&vlock, &vp->v_token); + lwkt_gettoken(&vp->v_token); RB_SCAN(buf_rb_tree, &vp->v_rbdirty_tree, NULL, nfs_clearcommit_bp, NULL); - lwkt_reltoken(&vlock); + lwkt_reltoken(&vp->v_token); vdrop(vp); return(0); } diff --git a/sys/vfs/nfs/nfs_vnops.c b/sys/vfs/nfs/nfs_vnops.c index 0275b62acb..7dc763a790 100644 --- a/sys/vfs/nfs/nfs_vnops.c +++ b/sys/vfs/nfs/nfs_vnops.c @@ -3102,7 +3102,6 @@ nfs_flush(struct vnode *vp, int waitfor, struct thread *td, int commit) struct nfsnode *np = VTONFS(vp); struct nfsmount *nmp = VFSTONFS(vp->v_mount); struct nfs_flush_info info; - lwkt_tokref vlock; int error; bzero(&info, sizeof(info)); @@ -3111,7 +3110,7 @@ nfs_flush(struct vnode *vp, int waitfor, struct thread *td, int commit) info.waitfor = waitfor; info.slpflag = (nmp->nm_flag & NFSMNT_INT) ? PCATCH : 0; info.loops = 0; - lwkt_gettoken(&vlock, &vp->v_token); + lwkt_gettoken(&vp->v_token); do { /* @@ -3187,7 +3186,7 @@ nfs_flush(struct vnode *vp, int waitfor, struct thread *td, int commit) error = np->n_error; np->n_flag &= ~NWRITEERR; } - lwkt_reltoken(&vlock); + lwkt_reltoken(&vp->v_token); return (error); } diff --git a/sys/vfs/ntfs/ntfs_ihash.c b/sys/vfs/ntfs/ntfs_ihash.c index bc26e2e478..7af561e986 100644 --- a/sys/vfs/ntfs/ntfs_ihash.c +++ b/sys/vfs/ntfs/ntfs_ihash.c @@ -70,7 +70,7 @@ ntfs_nthashinit(void) lockinit(&ntfs_hashlock, "ntfs_nthashlock", 0, 0); ntfs_nthashtbl = HASHINIT(desiredvnodes, M_NTFSNTHASH, M_WAITOK, &ntfs_nthash); - lwkt_token_init(&ntfs_nthash_slock); + lwkt_token_init(&ntfs_nthash_slock, 1); } /* @@ -79,12 +79,10 @@ ntfs_nthashinit(void) int ntfs_nthash_uninit(struct vfsconf *vfc) { - lwkt_tokref ilock; - - lwkt_gettoken(&ilock, &ntfs_nthash_slock); + lwkt_gettoken(&ntfs_nthash_slock); if (ntfs_nthashtbl) kfree(ntfs_nthashtbl, M_NTFSNTHASH); - lwkt_reltoken(&ilock); + lwkt_reltoken(&ntfs_nthash_slock); return 0; } @@ -97,14 +95,13 @@ struct ntnode * ntfs_nthashlookup(cdev_t dev, ino_t inum) { struct ntnode *ip; - lwkt_tokref ilock; - lwkt_gettoken(&ilock, &ntfs_nthash_slock); + lwkt_gettoken(&ntfs_nthash_slock); for (ip = NTNOHASH(dev, inum)->lh_first; ip; ip = ip->i_hash.le_next) { if (inum == ip->i_number && dev == ip->i_dev) break; } - lwkt_reltoken(&ilock); + lwkt_reltoken(&ntfs_nthash_slock); return (ip); } @@ -116,13 +113,12 @@ void ntfs_nthashins(struct ntnode *ip) { struct nthashhead *ipp; - lwkt_tokref ilock; - lwkt_gettoken(&ilock, &ntfs_nthash_slock); + lwkt_gettoken(&ntfs_nthash_slock); ipp = NTNOHASH(ip->i_dev, ip->i_number); LIST_INSERT_HEAD(ipp, ip, i_hash); ip->i_flag |= IN_HASHED; - lwkt_reltoken(&ilock); + lwkt_reltoken(&ntfs_nthash_slock); } /* @@ -131,9 +127,7 @@ ntfs_nthashins(struct ntnode *ip) void ntfs_nthashrem(struct ntnode *ip) { - lwkt_tokref ilock; - - lwkt_gettoken(&ilock, &ntfs_nthash_slock); + lwkt_gettoken(&ntfs_nthash_slock); if (ip->i_flag & IN_HASHED) { ip->i_flag &= ~IN_HASHED; LIST_REMOVE(ip, i_hash); @@ -142,5 +136,5 @@ ntfs_nthashrem(struct ntnode *ip) ip->i_hash.le_prev = NULL; #endif } - lwkt_reltoken(&ilock); + lwkt_reltoken(&ntfs_nthash_slock); } diff --git a/sys/vfs/ntfs/ntfs_vfsops.c b/sys/vfs/ntfs/ntfs_vfsops.c index 5e4a9c073f..774f6265cc 100644 --- a/sys/vfs/ntfs/ntfs_vfsops.c +++ b/sys/vfs/ntfs/ntfs_vfsops.c @@ -161,7 +161,6 @@ ntfs_mountroot(void) struct vnode *rootvp; struct thread *td = curthread; /* XXX */ struct ntfs_args args; - lwkt_tokref ilock; int error; if (root_device->dv_class != DV_DISK) diff --git a/sys/vfs/udf/udf_vfsops.c b/sys/vfs/udf/udf_vfsops.c index fe9e3b9b12..84b3a466b5 100644 --- a/sys/vfs/udf/udf_vfsops.c +++ b/sys/vfs/udf/udf_vfsops.c @@ -384,7 +384,7 @@ udf_mountfs(struct vnode *devvp, struct mount *mp) brelse(bp); bp = NULL; - lwkt_token_init(&udfmp->hash_token); + lwkt_token_init(&udfmp->hash_token, 1); udfmp->hashtbl = phashinit(UDF_HASHTBLSIZE, M_UDFMOUNT, &udfmp->hashsz); return(0); diff --git a/sys/vfs/udf/udf_vnops.c b/sys/vfs/udf/udf_vnops.c index 92cc6d569c..2d42f658ae 100644 --- a/sys/vfs/udf/udf_vnops.c +++ b/sys/vfs/udf/udf_vnops.c @@ -93,15 +93,14 @@ udf_hashlookup(struct udf_mnt *udfmp, ino_t id, struct vnode **vpp) struct udf_node *node; struct udf_hash_lh *lh; struct vnode *vp; - lwkt_tokref hashlock; *vpp = NULL; - lwkt_gettoken(&hashlock, &udfmp->hash_token); + lwkt_gettoken(&udfmp->hash_token); loop: lh = &udfmp->hashtbl[id % udfmp->hashsz]; if (lh == NULL) { - lwkt_reltoken(&hashlock); + lwkt_reltoken(&udfmp->hash_token); return(ENOENT); } LIST_FOREACH(node, lh, le) { @@ -123,12 +122,12 @@ loop: vput(vp); goto loop; } - lwkt_reltoken(&hashlock); + lwkt_reltoken(&udfmp->hash_token); *vpp = vp; return(0); } - lwkt_reltoken(&hashlock); + lwkt_reltoken(&udfmp->hash_token); return(0); } @@ -137,14 +136,13 @@ udf_hashins(struct udf_node *node) { struct udf_mnt *udfmp; struct udf_hash_lh *lh; - lwkt_tokref hashlock; udfmp = node->udfmp; - lwkt_gettoken(&hashlock, &udfmp->hash_token); + lwkt_gettoken(&udfmp->hash_token); lh = &udfmp->hashtbl[node->hash_id % udfmp->hashsz]; LIST_INSERT_HEAD(lh, node, le); - lwkt_reltoken(&hashlock); + lwkt_reltoken(&udfmp->hash_token); return(0); } @@ -154,16 +152,15 @@ udf_hashrem(struct udf_node *node) { struct udf_mnt *udfmp; struct udf_hash_lh *lh; - lwkt_tokref hashlock; udfmp = node->udfmp; - lwkt_gettoken(&hashlock, &udfmp->hash_token); + lwkt_gettoken(&udfmp->hash_token); lh = &udfmp->hashtbl[node->hash_id % udfmp->hashsz]; if (lh == NULL) panic("hash entry is NULL, node->hash_id= %"PRId64"\n", node->hash_id); LIST_REMOVE(node, le); - lwkt_reltoken(&hashlock); + lwkt_reltoken(&udfmp->hash_token); return(0); } diff --git a/sys/vfs/ufs/ffs_rawread.c b/sys/vfs/ufs/ffs_rawread.c index 18bae99625..0876e37a42 100644 --- a/sys/vfs/ufs/ffs_rawread.c +++ b/sys/vfs/ufs/ffs_rawread.c @@ -89,12 +89,11 @@ ffs_rawread_sync(struct vnode *vp) { int error; int upgraded; - lwkt_tokref vlock; /* * Check for dirty mmap, pending writes and dirty buffers */ - lwkt_gettoken(&vlock, &vp->v_token); + lwkt_gettoken(&vp->v_token); if (bio_track_active(&vp->v_track_write) || !RB_EMPTY(&vp->v_rbdirty_tree) || (vp->v_flag & VOBJDIRTY) != 0) { @@ -136,7 +135,7 @@ ffs_rawread_sync(struct vnode *vp) error = 0; } done: - lwkt_reltoken(&vlock); + lwkt_reltoken(&vp->v_token); return error; } diff --git a/sys/vfs/ufs/ffs_softdep.c b/sys/vfs/ufs/ffs_softdep.c index 8e2c63f61f..1dc1b6bca4 100644 --- a/sys/vfs/ufs/ffs_softdep.c +++ b/sys/vfs/ufs/ffs_softdep.c @@ -1816,7 +1816,6 @@ softdep_setup_freeblocks(struct inode *ip, off_t length) struct freeblks *freeblks; struct inodedep *inodedep; struct allocdirect *adp; - lwkt_tokref vlock; struct vnode *vp; struct buf *bp; struct fs *fs; @@ -1901,12 +1900,12 @@ softdep_setup_freeblocks(struct inode *ip, off_t length) info.fs = fs; info.ip = ip; - lwkt_gettoken(&vlock, &vp->v_token); + lwkt_gettoken(&vp->v_token); do { count = RB_SCAN(buf_rb_tree, &vp->v_rbdirty_tree, NULL, softdep_setup_freeblocks_bp, &info); } while (count != 0); - lwkt_reltoken(&vlock); + lwkt_reltoken(&vp->v_token); if (inodedep_lookup(fs, ip->i_number, 0, &inodedep) != 0) (void)free_inodedep(inodedep); @@ -4209,15 +4208,13 @@ static int softdep_fsync_mountdev_bp(struct buf *bp, void *data); void softdep_fsync_mountdev(struct vnode *vp) { - lwkt_tokref vlock; - if (!vn_isdisk(vp, NULL)) panic("softdep_fsync_mountdev: vnode not a disk"); ACQUIRE_LOCK(&lk); - lwkt_gettoken(&vlock, &vp->v_token); + lwkt_gettoken(&vp->v_token); RB_SCAN(buf_rb_tree, &vp->v_rbdirty_tree, NULL, softdep_fsync_mountdev_bp, vp); - lwkt_reltoken(&vlock); + lwkt_reltoken(&vp->v_token); drain_output(vp, 1); FREE_LOCK(&lk); } @@ -4271,7 +4268,6 @@ int softdep_sync_metadata(struct vnode *vp, struct thread *td) { struct softdep_sync_metadata_info info; - lwkt_tokref vlock; int error, waitfor; /* @@ -4320,10 +4316,10 @@ top: info.vp = vp; info.waitfor = waitfor; - lwkt_gettoken(&vlock, &vp->v_token); + lwkt_gettoken(&vp->v_token); error = RB_SCAN(buf_rb_tree, &vp->v_rbdirty_tree, NULL, softdep_sync_metadata_bp, &info); - lwkt_reltoken(&vlock); + lwkt_reltoken(&vp->v_token); if (error < 0) { FREE_LOCK(&lk); return(-error); /* error code */ diff --git a/sys/vfs/ufs/ufs_ihash.c b/sys/vfs/ufs/ufs_ihash.c index 2a94abc920..ae543f6af8 100644 --- a/sys/vfs/ufs/ufs_ihash.c +++ b/sys/vfs/ufs/ufs_ihash.c @@ -68,18 +68,16 @@ ufs_ihashinit(void) ihash <<= 1; ihashtbl = kmalloc(sizeof(void *) * ihash, M_UFSIHASH, M_WAITOK|M_ZERO); --ihash; - lwkt_token_init(&ufs_ihash_token); + lwkt_token_init(&ufs_ihash_token, 1); } int ufs_uninit(struct vfsconf *vfc) { - lwkt_tokref ilock; - - lwkt_gettoken(&ilock, &ufs_ihash_token); + lwkt_gettoken(&ufs_ihash_token); if (ihashtbl) kfree(ihashtbl, M_UFSIHASH); - lwkt_reltoken(&ilock); + lwkt_reltoken(&ufs_ihash_token); return (0); } @@ -91,14 +89,13 @@ struct vnode * ufs_ihashlookup(cdev_t dev, ino_t inum) { struct inode *ip; - lwkt_tokref ilock; - lwkt_gettoken(&ilock, &ufs_ihash_token); + lwkt_gettoken(&ufs_ihash_token); for (ip = *INOHASH(dev, inum); ip; ip = ip->i_next) { if (inum == ip->i_number && dev == ip->i_dev) break; } - lwkt_reltoken(&ilock); + lwkt_reltoken(&ufs_ihash_token); if (ip) return (ITOV(ip)); return (NULLVP); @@ -116,11 +113,10 @@ ufs_ihashlookup(cdev_t dev, ino_t inum) struct vnode * ufs_ihashget(cdev_t dev, ino_t inum) { - lwkt_tokref ilock; struct inode *ip; struct vnode *vp; - lwkt_gettoken(&ilock, &ufs_ihash_token); + lwkt_gettoken(&ufs_ihash_token); loop: for (ip = *INOHASH(dev, inum); ip; ip = ip->i_next) { if (inum != ip->i_number || dev != ip->i_dev) @@ -140,10 +136,10 @@ loop: vput(vp); goto loop; } - lwkt_reltoken(&ilock); + lwkt_reltoken(&ufs_ihash_token); return (vp); } - lwkt_reltoken(&ilock); + lwkt_reltoken(&ufs_ihash_token); return (NULL); } @@ -155,15 +151,14 @@ loop: int ufs_ihashcheck(cdev_t dev, ino_t inum) { - lwkt_tokref ilock; struct inode *ip; - lwkt_gettoken(&ilock, &ufs_ihash_token); + lwkt_gettoken(&ufs_ihash_token); for (ip = *INOHASH(dev, inum); ip; ip = ip->i_next) { if (inum == ip->i_number && dev == ip->i_dev) break; } - lwkt_reltoken(&ilock); + lwkt_reltoken(&ufs_ihash_token); return(ip ? 1 : 0); } @@ -175,14 +170,13 @@ ufs_ihashins(struct inode *ip) { struct inode **ipp; struct inode *iq; - lwkt_tokref ilock; KKASSERT((ip->i_flag & IN_HASHED) == 0); - lwkt_gettoken(&ilock, &ufs_ihash_token); + lwkt_gettoken(&ufs_ihash_token); ipp = INOHASH(ip->i_dev, ip->i_number); while ((iq = *ipp) != NULL) { if (ip->i_dev == iq->i_dev && ip->i_number == iq->i_number) { - lwkt_reltoken(&ilock); + lwkt_reltoken(&ufs_ihash_token); return(EBUSY); } ipp = &iq->i_next; @@ -190,7 +184,7 @@ ufs_ihashins(struct inode *ip) ip->i_next = NULL; *ipp = ip; ip->i_flag |= IN_HASHED; - lwkt_reltoken(&ilock); + lwkt_reltoken(&ufs_ihash_token); return(0); } @@ -200,11 +194,10 @@ ufs_ihashins(struct inode *ip) void ufs_ihashrem(struct inode *ip) { - lwkt_tokref ilock; struct inode **ipp; struct inode *iq; - lwkt_gettoken(&ilock, &ufs_ihash_token); + lwkt_gettoken(&ufs_ihash_token); if (ip->i_flag & IN_HASHED) { ipp = INOHASH(ip->i_dev, ip->i_number); while ((iq = *ipp) != NULL) { @@ -217,6 +210,6 @@ ufs_ihashrem(struct inode *ip) ip->i_next = NULL; ip->i_flag &= ~IN_HASHED; } - lwkt_reltoken(&ilock); + lwkt_reltoken(&ufs_ihash_token); } diff --git a/sys/vfs/ufs/ufs_vnops.c b/sys/vfs/ufs/ufs_vnops.c index fd489a5ef0..1ab8b367a0 100644 --- a/sys/vfs/ufs/ufs_vnops.c +++ b/sys/vfs/ufs/ufs_vnops.c @@ -2133,7 +2133,6 @@ ufs_kqfilter(struct vop_kqfilter_args *ap) { struct vnode *vp = ap->a_vp; struct knote *kn = ap->a_kn; - lwkt_tokref vlock; switch (kn->kn_filter) { case EVFILT_READ: @@ -2151,9 +2150,9 @@ ufs_kqfilter(struct vop_kqfilter_args *ap) kn->kn_hook = (caddr_t)vp; - lwkt_gettoken(&vlock, &vp->v_token); + lwkt_gettoken(&vp->v_token); SLIST_INSERT_HEAD(&vp->v_pollinfo.vpi_selinfo.si_note, kn, kn_selnext); - lwkt_reltoken(&vlock); + lwkt_reltoken(&vp->v_token); return (0); } @@ -2162,12 +2161,11 @@ static void filt_ufsdetach(struct knote *kn) { struct vnode *vp = (struct vnode *)kn->kn_hook; - lwkt_tokref vlock; - lwkt_gettoken(&vlock, &vp->v_token); + lwkt_gettoken(&vp->v_token); SLIST_REMOVE(&vp->v_pollinfo.vpi_selinfo.si_note, kn, knote, kn_selnext); - lwkt_reltoken(&vlock); + lwkt_reltoken(&vp->v_token); } /*ARGSUSED*/ -- 2.41.0