4 * Copyright (c) 2010 by The DragonFly Project and Samuel J. Greear.
7 * This code is derived from software contributed to The DragonFly Project
8 * by Samuel J. Greear <sjg@thesjg.com>
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in
18 * the documentation and/or other materials provided with the
20 * 3. Neither the name of The DragonFly Project nor the names of its
21 * contributors may be used to endorse or promote products derived
22 * from this software without specific, prior written permission.
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
25 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
26 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
27 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
28 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
29 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
30 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
31 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
32 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
33 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
34 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38 #include <sys/types.h>
39 #include <sys/kernel.h>
40 #include <sys/objcache.h>
41 #include <sys/sysctl.h>
42 #include <sys/param.h>
43 #include <sys/serialize.h>
44 #include <sys/systm.h>
46 #include <sys/queue.h>
49 #include <vm/vm_extern.h>
50 #include <vm/vm_kern.h>
51 #include <vm/vm_page.h>
52 #include <cpu/lwbuf.h>
53 #include <machine/globaldata.h>
54 #include <machine/atomic.h>
55 #include <machine/param.h>
56 #include <sys/thread.h>
58 static void lwbuf_init(void *);
59 SYSINIT(sock_lwb, SI_BOOT2_MACHDEP, SI_ORDER_ANY, lwbuf_init, NULL);
61 static struct objcache *lwbuf_cache;
63 MALLOC_DEFINE(M_LWBUF, "lwbuf", "Lightweight buffers");
64 struct objcache_malloc_args lwbuf_malloc_args =
65 { sizeof(struct lwbuf), M_LWBUF };
67 /* Number of pages of KVA to allocate at boot per cpu (1MB) */
68 static int lwbuf_reserve_pages = 256;
69 static int lwbuf_count;
70 static int lwbuf_kva_bytes;
72 SYSCTL_INT(_kern_ipc, OID_AUTO, lwbuf_reserve, CTLFLAG_RD,
73 &lwbuf_reserve_pages, 0,
74 "Number of pre-allocated lightweight buffers");
75 SYSCTL_INT(_kern_ipc, OID_AUTO, lwbuf_count, CTLFLAG_RD,
77 "Currently allocated lightweight buffers");
78 SYSCTL_INT(_kern_ipc, OID_AUTO, lwbuf_kva_bytes, CTLFLAG_RD,
80 "Currently used KVA for lightweight buffers");
83 lwbuf_cache_ctor(void *obj, void *pdata, int ocflags)
85 struct lwbuf *lwb = (struct lwbuf *)obj;
89 lwb->kva = kmem_alloc_nofault(&kernel_map, PAGE_SIZE, PAGE_SIZE);
92 atomic_add_int(&lwbuf_kva_bytes, PAGE_SIZE);
98 * Destructor for lwb. Note that we must remove any pmap entries
99 * created with pmap_kenter() to prevent them from being misinterpreted
100 * as managed pages which would cause kernel_pmap.pm_stats.resident_count
101 * to get out of whack.
104 lwbuf_cache_dtor(void *obj, void *pdata)
106 struct lwbuf *lwb = (struct lwbuf *)obj;
108 KKASSERT(lwb->kva != 0);
109 lwkt_gettoken(&pmap_token);
110 pmap_kremove_quick(lwb->kva);
111 lwkt_reltoken(&pmap_token);
112 kmem_free(&kernel_map, lwb->kva, PAGE_SIZE);
114 atomic_add_int(&lwbuf_kva_bytes, -PAGE_SIZE);
118 lwbuf_init(void *arg)
120 lwbuf_cache = objcache_create("lwbuf", 0, 0,
121 lwbuf_cache_ctor, lwbuf_cache_dtor, NULL,
122 objcache_malloc_alloc, objcache_malloc_free,
127 lwbuf_alloc(vm_page_t m)
129 struct mdglobaldata *gd = mdcpu;
132 lwb = objcache_get(lwbuf_cache, M_WAITOK);
133 KKASSERT(lwb->m == NULL);
135 lwb->cpumask = gd->mi.gd_cpumask;
136 pmap_kenter_quick(lwb->kva, m->phys_addr);
137 atomic_add_int(&lwbuf_count, 1);
143 lwbuf_free(struct lwbuf *lwb)
145 KKASSERT(lwb->m != NULL);
148 objcache_put(lwbuf_cache, lwb);
149 atomic_add_int(&lwbuf_count, -1);
153 lwbuf_set_global(struct lwbuf *lwb)
155 if (lwb->cpumask != (cpumask_t)-1) {
156 pmap_kenter_sync(lwb->kva);
157 lwb->cpumask = (cpumask_t)-1;
162 _lwbuf_kva(struct lwbuf *lwb, struct mdglobaldata *gd)
166 pmap_kenter_sync_quick(lwb->kva);
170 new = old | gd->mi.gd_cpumask;
171 } while (atomic_cmpset_int(&lwb->cpumask, old, new) == 0);
177 lwbuf_kva(struct lwbuf *lwb)
179 struct mdglobaldata *gd = mdcpu;
181 if (lwb->cpumask & gd->mi.gd_cpumask)
184 return (_lwbuf_kva(lwb, gd));