From: Hiten Pandya Date: Wed, 2 Mar 2005 18:42:09 +0000 (+0000) Subject: Rename the flags for sf_buf_alloc(9) to be in line with FreeBSD: X-Git-Tag: v2.0.1~8441 X-Git-Url: https://gitweb.dragonflybsd.org/dragonfly.git/commitdiff_plain/4f1640d6f72c3f774978ed4628ae2b12e23b4c90 Rename the flags for sf_buf_alloc(9) to be in line with FreeBSD: SFBA_PCATCH -> SFB_CATCH SFBA_QUICK -> SFB_CPUPRIVATE Discussed-with: Matthew Dillon --- diff --git a/sys/i386/i386/uio_machdep.c b/sys/i386/i386/uio_machdep.c index bdb4de2ac7..178534f6f0 100644 --- a/sys/i386/i386/uio_machdep.c +++ b/sys/i386/i386/uio_machdep.c @@ -34,7 +34,7 @@ * * @(#)kern_subr.c 8.3 (Berkeley) 1/21/94 * $FreeBSD: src/sys/i386/i386/uio_machdep.c,v 1.1 2004/03/21 20:28:36 alc Exp $ - * $DragonFly: src/sys/i386/i386/Attic/uio_machdep.c,v 1.7 2004/08/18 09:16:16 dillon Exp $ + * $DragonFly: src/sys/i386/i386/Attic/uio_machdep.c,v 1.8 2005/03/02 18:42:08 hmp Exp $ */ #include @@ -89,7 +89,7 @@ uiomove_fromphys(vm_page_t *ma, vm_offset_t offset, int n, struct uio *uio) page_offset = offset & PAGE_MASK; cnt = min(cnt, PAGE_SIZE - page_offset); m = ma[offset >> PAGE_SHIFT]; - sf = sf_buf_alloc(m, SFBA_QUICK); + sf = sf_buf_alloc(m, SFB_CPUPRIVATE); cp = (char *)sf_buf_kva(sf) + page_offset; switch (uio->uio_segflg) { case UIO_USERSPACE: diff --git a/sys/kern/kern_exec.c b/sys/kern/kern_exec.c index c39f466db5..ee8e072eab 100644 --- a/sys/kern/kern_exec.c +++ b/sys/kern/kern_exec.c @@ -24,7 +24,7 @@ * SUCH DAMAGE. * * $FreeBSD: src/sys/kern/kern_exec.c,v 1.107.2.15 2002/07/30 15:40:46 nectar Exp $ - * $DragonFly: src/sys/kern/kern_exec.c,v 1.30 2005/01/29 20:54:20 dillon Exp $ + * $DragonFly: src/sys/kern/kern_exec.c,v 1.31 2005/03/02 18:42:08 hmp Exp $ */ #include @@ -568,7 +568,7 @@ exec_map_first_page(struct image_params *imgp) vm_page_wakeup(m); /* unbusy the page */ crit_exit(); - imgp->firstpage = sf_buf_alloc(m, SFBA_QUICK); + imgp->firstpage = sf_buf_alloc(m, SFB_CPUPRIVATE); imgp->image_header = (void *)sf_buf_kva(imgp->firstpage); return 0; diff --git a/sys/kern/kern_msfbuf.c b/sys/kern/kern_msfbuf.c index 7180e69d7b..eeda18f952 100644 --- a/sys/kern/kern_msfbuf.c +++ b/sys/kern/kern_msfbuf.c @@ -36,7 +36,7 @@ * Copyright (c) 1998 David Greenman. All rights reserved. * src/sys/kern/kern_sfbuf.c,v 1.7 2004/05/13 19:46:18 dillon * - * $DragonFly: src/sys/kern/kern_msfbuf.c,v 1.8 2005/03/01 01:20:03 hmp Exp $ + * $DragonFly: src/sys/kern/kern_msfbuf.c,v 1.9 2005/03/02 18:42:08 hmp Exp $ */ /* * MSFBUFs cache linear multi-page ephermal mappings and operate similar @@ -144,7 +144,7 @@ msf_buf_hash(vm_page_t base_m) * Get an msf_buf from the freelist; if none are available * than it will block. * - * If SFBA_PCATCH was specified in 'flags' than the sleep is + * If SFB_CATCH was specified in 'flags' than the sleep is * block is interruptable by signals etc; this flag is normally * use for system calls. * @@ -193,7 +193,7 @@ msf_buf_alloc(vm_page_t *pg_ary, int npages, int flags) * and msf_bufs are supposed to be temporary mappings. */ while ((msf = TAILQ_FIRST(&msf_buf_freelist)) == NULL) { - pflags = (flags & SFBA_PCATCH) ? PCATCH : 0; + pflags = (flags & SFB_CATCH) ? PCATCH : 0; ++msf_buf_alloc_want; error = tsleep(&msf_buf_freelist, pflags, "msfbuf", 0); --msf_buf_alloc_want; @@ -230,7 +230,7 @@ msf_buf_alloc(vm_page_t *pg_ary, int npages, int flags) */ done: ++msf->m_refcnt; - if ((flags & SFBA_QUICK)) { + if ((flags & SFB_CPUPRIVATE)) { pmap_qenter2(msf->m_kva, msf->m_xio.xio_pages, msf->m_xio.xio_npages, &msf->m_cpumask); } else { diff --git a/sys/kern/kern_sfbuf.c b/sys/kern/kern_sfbuf.c index 8438d2d500..76d553095c 100644 --- a/sys/kern/kern_sfbuf.c +++ b/sys/kern/kern_sfbuf.c @@ -22,7 +22,7 @@ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * - * $DragonFly: src/sys/kern/kern_sfbuf.c,v 1.11 2004/08/24 21:53:38 dillon Exp $ + * $DragonFly: src/sys/kern/kern_sfbuf.c,v 1.12 2005/03/02 18:42:08 hmp Exp $ */ #include @@ -120,7 +120,7 @@ sf_buf_alloc(struct vm_page *m, int flags) * cache hit * * We must invalidate the TLB entry based on whether - * it need only be valid on the local cpu (SFBA_QUICK), + * it need only be valid on the local cpu (SFB_CPUPRIVATE), * or on all cpus. This is conditionalized and in * most cases no system-wide invalidation should be * needed. @@ -129,7 +129,7 @@ sf_buf_alloc(struct vm_page *m, int flags) * on the 0->1 transition. */ ++sf->refcnt; - if ((flags & SFBA_QUICK) && sfbuf_quick) { + if ((flags & SFB_CPUPRIVATE) && sfbuf_quick) { if ((sf->cpumask & gd->gd_cpumask) == 0) { pmap_kenter_sync_quick(sf->kva); sf->cpumask |= gd->gd_cpumask; @@ -151,7 +151,7 @@ sf_buf_alloc(struct vm_page *m, int flags) */ for (;;) { if ((sf = TAILQ_FIRST(&sf_buf_freelist)) == NULL) { - pflags = (flags & SFBA_PCATCH) ? PCATCH : 0; + pflags = (flags & SFB_CATCH) ? PCATCH : 0; ++sf_buf_alloc_want; error = tsleep(&sf_buf_freelist, pflags, "sfbufa", 0); --sf_buf_alloc_want; @@ -184,7 +184,7 @@ sf_buf_alloc(struct vm_page *m, int flags) LIST_INSERT_HEAD(hash_chain, sf, list_entry); sf->refcnt = 1; sf->m = m; - if ((flags & SFBA_QUICK) && sfbuf_quick) { + if ((flags & SFB_CPUPRIVATE) && sfbuf_quick) { pmap_kenter_quick(sf->kva, sf->m->phys_addr); sf->cpumask = gd->gd_cpumask; } else { diff --git a/sys/kern/kern_xio.c b/sys/kern/kern_xio.c index f976f2e7fc..572f4ac2b0 100644 --- a/sys/kern/kern_xio.c +++ b/sys/kern/kern_xio.c @@ -31,7 +31,7 @@ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * - * $DragonFly: src/sys/kern/kern_xio.c,v 1.8 2005/03/01 23:35:13 dillon Exp $ + * $DragonFly: src/sys/kern/kern_xio.c,v 1.9 2005/03/02 18:42:08 hmp Exp $ */ /* * Kernel XIO interface. An initialized XIO is basically a collection of @@ -280,7 +280,7 @@ xio_copy_xtou(xio_t xio, int uoffset, void *uptr, int bytes) ++i ) { m = xio->xio_pages[i]; - sf = sf_buf_alloc(m, SFBA_QUICK); + sf = sf_buf_alloc(m, SFB_CPUPRIVATE); error = copyout((char *)sf_buf_kva(sf) + offset, uptr, n); sf_buf_free(sf); if (error) @@ -328,7 +328,7 @@ xio_copy_xtok(xio_t xio, int uoffset, void *kptr, int bytes) ++i ) { m = xio->xio_pages[i]; - sf = sf_buf_alloc(m, SFBA_QUICK); + sf = sf_buf_alloc(m, SFB_CPUPRIVATE); bcopy((char *)sf_buf_kva(sf) + offset, kptr, n); sf_buf_free(sf); bytes -= n; diff --git a/sys/kern/uipc_syscalls.c b/sys/kern/uipc_syscalls.c index c2e2ace232..c166a2b82c 100644 --- a/sys/kern/uipc_syscalls.c +++ b/sys/kern/uipc_syscalls.c @@ -35,7 +35,7 @@ * * @(#)uipc_syscalls.c 8.4 (Berkeley) 2/21/94 * $FreeBSD: src/sys/kern/uipc_syscalls.c,v 1.65.2.17 2003/04/04 17:11:16 tegge Exp $ - * $DragonFly: src/sys/kern/uipc_syscalls.c,v 1.49 2005/02/08 23:00:33 hsu Exp $ + * $DragonFly: src/sys/kern/uipc_syscalls.c,v 1.50 2005/03/02 18:42:08 hmp Exp $ */ #include "opt_ktrace.h" @@ -1582,7 +1582,7 @@ retry_lookup: * Get a sendfile buf. We usually wait as long as necessary, * but this wait can be interrupted. */ - if ((sf = sf_buf_alloc(pg, SFBA_PCATCH)) == NULL) { + if ((sf = sf_buf_alloc(pg, SFB_CATCH)) == NULL) { crit_enter(); vm_page_unwire(pg, 0); vm_page_try_to_free(pg); diff --git a/sys/platform/pc32/i386/uio_machdep.c b/sys/platform/pc32/i386/uio_machdep.c index 9bfe9e2016..d4971f7505 100644 --- a/sys/platform/pc32/i386/uio_machdep.c +++ b/sys/platform/pc32/i386/uio_machdep.c @@ -34,7 +34,7 @@ * * @(#)kern_subr.c 8.3 (Berkeley) 1/21/94 * $FreeBSD: src/sys/i386/i386/uio_machdep.c,v 1.1 2004/03/21 20:28:36 alc Exp $ - * $DragonFly: src/sys/platform/pc32/i386/Attic/uio_machdep.c,v 1.7 2004/08/18 09:16:16 dillon Exp $ + * $DragonFly: src/sys/platform/pc32/i386/Attic/uio_machdep.c,v 1.8 2005/03/02 18:42:08 hmp Exp $ */ #include @@ -89,7 +89,7 @@ uiomove_fromphys(vm_page_t *ma, vm_offset_t offset, int n, struct uio *uio) page_offset = offset & PAGE_MASK; cnt = min(cnt, PAGE_SIZE - page_offset); m = ma[offset >> PAGE_SHIFT]; - sf = sf_buf_alloc(m, SFBA_QUICK); + sf = sf_buf_alloc(m, SFB_CPUPRIVATE); cp = (char *)sf_buf_kva(sf) + page_offset; switch (uio->uio_segflg) { case UIO_USERSPACE: diff --git a/sys/sys/sfbuf.h b/sys/sys/sfbuf.h index 54f31fbe68..d0ed4a8609 100644 --- a/sys/sys/sfbuf.h +++ b/sys/sys/sfbuf.h @@ -23,7 +23,7 @@ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * - * $DragonFly: src/sys/sys/sfbuf.h,v 1.7 2004/12/30 07:01:52 cpressey Exp $ + * $DragonFly: src/sys/sys/sfbuf.h,v 1.8 2005/03/02 18:42:09 hmp Exp $ */ #ifndef _SFBUF_H_ @@ -46,9 +46,9 @@ struct sf_buf { /* * sf_buf_alloc() flags (not all are stored in sf->flags) */ -#define SFBA_QUICK 0x0001 /* sync mapping to current cpu only */ +#define SFB_CPUPRIVATE 0x0001 /* sync mapping to current cpu only */ #define SFBA_ONFREEQ 0x0002 /* on the free queue (lazy move) */ -#define SFBA_PCATCH 0x0004 /* allow interruption */ +#define SFB_CATCH 0x0004 /* allow interruption */ static __inline vm_offset_t sf_buf_kva(struct sf_buf *sf) diff --git a/sys/vm/vnode_pager.c b/sys/vm/vnode_pager.c index be2a198ca0..8ac0d0e6e2 100644 --- a/sys/vm/vnode_pager.c +++ b/sys/vm/vnode_pager.c @@ -39,7 +39,7 @@ * * from: @(#)vnode_pager.c 7.5 (Berkeley) 4/20/91 * $FreeBSD: src/sys/vm/vnode_pager.c,v 1.116.2.7 2002/12/31 09:34:51 dillon Exp $ - * $DragonFly: src/sys/vm/vnode_pager.c,v 1.17 2004/12/10 19:50:32 dillon Exp $ + * $DragonFly: src/sys/vm/vnode_pager.c,v 1.18 2005/03/02 18:42:09 hmp Exp $ */ /* @@ -307,7 +307,7 @@ vnode_pager_setsize(struct vnode *vp, vm_ooffset_t nsize) * Clear out partial-page garbage in case * the page has been mapped. */ - sf = sf_buf_alloc(m, SFBA_QUICK); + sf = sf_buf_alloc(m, SFB_CPUPRIVATE); kva = sf_buf_kva(sf); bzero((caddr_t)kva + base, size); sf_buf_free(sf);