From: Matthew Dillon Date: Sun, 20 Dec 2009 06:18:37 +0000 (-0800) Subject: kernel - misc MPSAFe work X-Git-Tag: v2.7.1~273 X-Git-Url: https://gitweb.dragonflybsd.org/dragonfly.git/commitdiff_plain/61f96b6ff60acbd1692d0bffe07b5e1f262327dc kernel - misc MPSAFe work * prisoncount, prison_hold(), and prison_free() are now MPSAFE * Remove spinlock from ucred * Fix cr_ref race in crfree(), and crfree() is now fully MPSAFE. * cache_hold(), cache_copy(), cache_changemount() are now MPSAFE. * cache_drop() is not yet MPSAFE in the 1->0 transition case. * nlookup_init() is now MPSAFE --- diff --git a/sys/kern/kern_jail.c b/sys/kern/kern_jail.c index 2d4bd3495e..f32a9d1df0 100644 --- a/sys/kern/kern_jail.c +++ b/sys/kern/kern_jail.c @@ -177,7 +177,7 @@ kern_jail(struct prison *pr, struct jail *j) } LIST_INSERT_HEAD(&allprison, pr, pr_list); - prisoncount++; + atomic_add_int(&prisoncount, 1); error = kern_jail_attach(pr->pr_id); if (error) { @@ -642,29 +642,48 @@ end: SYSCTL_OID(_jail, OID_AUTO, list, CTLTYPE_STRING | CTLFLAG_RD, NULL, 0, sysctl_jail_list, "A", "List of active jails"); +/* + * MPSAFE + */ void prison_hold(struct prison *pr) { - pr->pr_ref++; + atomic_add_int(&pr->pr_ref, 1); } +/* + * MPALMOSTSAFE + */ void prison_free(struct prison *pr) { struct jail_ip_storage *jls; - KKASSERT(pr->pr_ref >= 1); - if (--pr->pr_ref > 0) + KKASSERT(pr->pr_ref > 0); + if (atomic_fetchadd_int(&pr->pr_ref, -1) != 1) return; - /* Delete all ips */ + /* + * The MP lock is needed on the last ref to adjust + * the list. + */ + get_mplock(); + if (pr->pr_ref) { + rel_mplock(); + return; + } + LIST_REMOVE(pr, pr_list); + atomic_add_int(&prisoncount, -1); + rel_mplock(); + + /* + * Clean up + */ while (!SLIST_EMPTY(&pr->pr_ips)) { jls = SLIST_FIRST(&pr->pr_ips); SLIST_REMOVE_HEAD(&pr->pr_ips, entries); kfree(jls, M_PRISON); } - LIST_REMOVE(pr, pr_list); - prisoncount--; if (pr->pr_linux != NULL) kfree(pr->pr_linux, M_PRISON); diff --git a/sys/kern/kern_prot.c b/sys/kern/kern_prot.c index a3d2bd2298..bfb2941b8f 100644 --- a/sys/kern/kern_prot.c +++ b/sys/kern/kern_prot.c @@ -1005,7 +1005,6 @@ static __inline void _crinit(struct ucred *cr) { cr->cr_ref = 1; - spin_init(&cr->cr_spin); } /* @@ -1058,23 +1057,19 @@ crhold(struct ucred *cr) * must also use atomic_subtract_int() below. A spinlock is required * in crfree() to handle multiple callers racing the refcount to 0. * - * MPALMOSTSAFE - acquires mplock on 1->0 transition of ref count + * MPSAFE */ void crfree(struct ucred *cr) { if (cr->cr_ref <= 0) panic("Freeing already free credential! %p", cr); - spin_lock_wr(&cr->cr_spin); - atomic_subtract_int(&cr->cr_ref, 1); - if (cr->cr_ref == 0) { - spin_unlock_wr(&cr->cr_spin); + if (atomic_fetchadd_int(&cr->cr_ref, -1) == 1) { /* * Some callers of crget(), such as nfs_statfs(), * allocate a temporary credential, but don't * allocate a uidinfo structure. */ - get_mplock(); if (cr->cr_uidinfo != NULL) { uidrop(cr->cr_uidinfo); cr->cr_uidinfo = NULL; @@ -1092,15 +1087,14 @@ crfree(struct ucred *cr) cr->cr_prison = NULL; /* safety */ FREE((caddr_t)cr, M_CRED); - rel_mplock(); - } else { - spin_unlock_wr(&cr->cr_spin); } } /* * Atomize a cred structure so it can be modified without polluting * other references to it. + * + * MPSAFE (however, *pcr must be stable) */ struct ucred * cratom(struct ucred **pcr) @@ -1128,6 +1122,8 @@ cratom(struct ucred **pcr) #if 0 /* no longer used but keep around for a little while */ /* * Copy cred structure to a new one and free the old one. + * + * MPSAFE (*cr must be stable) */ struct ucred * crcopy(struct ucred *cr) diff --git a/sys/kern/kern_resource.c b/sys/kern/kern_resource.c index 325c3d0d65..77debe7a9a 100644 --- a/sys/kern/kern_resource.c +++ b/sys/kern/kern_resource.c @@ -773,7 +773,7 @@ void uihold(struct uidinfo *uip) { atomic_add_int(&uip->ui_ref, 1); - KKASSERT(uip->ui_ref > 0); + KKASSERT(uip->ui_ref >= 0); } /* @@ -782,10 +782,9 @@ uihold(struct uidinfo *uip) void uidrop(struct uidinfo *uip) { + KKASSERT(uip->ui_ref > 0); if (atomic_fetchadd_int(&uip->ui_ref, -1) == 1) { uifree(uip); - } else { - KKASSERT(uip->ui_ref > 0); } } diff --git a/sys/kern/vfs_cache.c b/sys/kern/vfs_cache.c index 8367997598..b291622b88 100644 --- a/sys/kern/vfs_cache.c +++ b/sys/kern/vfs_cache.c @@ -216,6 +216,8 @@ static void cache_zap(struct namecache *ncp); * * This is a rare case where callers are allowed to hold a spinlock, * so we can't ourselves. + * + * MPSAFE */ static __inline struct namecache * @@ -235,8 +237,8 @@ void _cache_drop(struct namecache *ncp) { KKASSERT(ncp->nc_refs > 0); - if (ncp->nc_refs == 1 && - (ncp->nc_flag & NCF_UNRESOLVED) && + if (ncp->nc_refs == 1 && + (ncp->nc_flag & NCF_UNRESOLVED) && TAILQ_EMPTY(&ncp->nc_list) ) { KKASSERT(ncp->nc_exlocks == 0); @@ -338,35 +340,46 @@ cache_zero(struct nchandle *nch) * * Warning: caller may hold an unrelated read spinlock, which means we can't * use read spinlocks here. + * + * MPSAFE if nch is */ struct nchandle * cache_hold(struct nchandle *nch) { _cache_hold(nch->ncp); - ++nch->mount->mnt_refs; + atomic_add_int(&nch->mount->mnt_refs, 1); return(nch); } +/* + * Create a copy of a namecache handle for an already-referenced + * entry. + * + * MPSAFE if nch is + */ void cache_copy(struct nchandle *nch, struct nchandle *target) { *target = *nch; _cache_hold(target->ncp); - ++nch->mount->mnt_refs; + atomic_add_int(&nch->mount->mnt_refs, 1); } +/* + * MPSAFE if nch is + */ void cache_changemount(struct nchandle *nch, struct mount *mp) { - --nch->mount->mnt_refs; + atomic_add_int(&nch->mount->mnt_refs, -1); nch->mount = mp; - ++nch->mount->mnt_refs; + atomic_add_int(&nch->mount->mnt_refs, 1); } void cache_drop(struct nchandle *nch) { - --nch->mount->mnt_refs; + atomic_add_int(&nch->mount->mnt_refs, -1); _cache_drop(nch->ncp); nch->ncp = NULL; nch->mount = NULL; @@ -543,7 +556,7 @@ cache_get(struct nchandle *nch, struct nchandle *target) { target->mount = nch->mount; target->ncp = _cache_get(nch->ncp); - ++target->mount->mnt_refs; + atomic_add_int(&target->mount->mnt_refs, 1); } static int @@ -566,7 +579,7 @@ cache_get_nonblock(struct nchandle *nch) int error; if ((error = _cache_get_nonblock(nch->ncp)) == 0) - ++nch->mount->mnt_refs; + atomic_add_int(&nch->mount->mnt_refs, 1); return (error); } @@ -581,7 +594,7 @@ _cache_put(struct namecache *ncp) void cache_put(struct nchandle *nch) { - --nch->mount->mnt_refs; + atomic_add_int(&nch->mount->mnt_refs, -1); _cache_put(nch->ncp); nch->ncp = NULL; nch->mount = NULL; @@ -1940,7 +1953,7 @@ found: cache_hysteresis(); nch.mount = mp; nch.ncp = ncp; - ++nch.mount->mnt_refs; + atomic_add_int(&nch.mount->mnt_refs, 1); return(nch); } @@ -2289,7 +2302,7 @@ cache_allocroot(struct nchandle *nch, struct mount *mp, struct vnode *vp) { nch->ncp = cache_alloc(0); nch->mount = mp; - ++mp->mnt_refs; + atomic_add_int(&mp->mnt_refs, 1); if (vp) _cache_setvp(nch->mount, nch->ncp, vp); } diff --git a/sys/kern/vfs_nlookup.c b/sys/kern/vfs_nlookup.c index fd13b9ffc0..d1de9c1eab 100644 --- a/sys/kern/vfs_nlookup.c +++ b/sys/kern/vfs_nlookup.c @@ -77,6 +77,8 @@ * * The first process proc0's credentials are used if the calling thread * is not associated with a process context. + * + * MPSAFE */ int nlookup_init(struct nlookupdata *nd, diff --git a/sys/sys/ucred.h b/sys/sys/ucred.h index b7a4d43dfb..9abbd2c1f8 100644 --- a/sys/sys/ucred.h +++ b/sys/sys/ucred.h @@ -68,7 +68,6 @@ struct ucred { uid_t cr_svuid; /* Saved effective user id. */ gid_t cr_rgid; /* Real group id. */ gid_t cr_svgid; /* Saved effective group id. */ - struct spinlock cr_spin; }; #define cr_gid cr_groups[0] #define NOCRED ((struct ucred *)0) /* no credential available */