kernel - Optimize the x86-64 lwbuf API
[dragonfly.git] / sys / cpu / i386 / misc / lwbuf.c
CommitLineData
5c5185ae
SG
1/*
2 * Copyright (c) 2010 by The DragonFly Project and Samuel J. Greear.
3 * All rights reserved.
4 *
5 * This code is derived from software contributed to The DragonFly Project
6 * by Samuel J. Greear <sjg@thesjg.com>
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 *
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in
16 * the documentation and/or other materials provided with the
17 * distribution.
18 * 3. Neither the name of The DragonFly Project nor the names of its
19 * contributors may be used to endorse or promote products derived
20 * from this software without specific, prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
25 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
26 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
27 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
28 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
30 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
31 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
32 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * SUCH DAMAGE.
34 */
35
36#include <sys/types.h>
37#include <sys/kernel.h>
38#include <sys/objcache.h>
39#include <sys/sysctl.h>
40#include <sys/param.h>
41#include <sys/serialize.h>
42#include <sys/systm.h>
43#include <sys/proc.h>
44#include <sys/queue.h>
45#include <vm/vm.h>
46#include <vm/pmap.h>
47#include <vm/vm_extern.h>
48#include <vm/vm_kern.h>
49#include <vm/vm_page.h>
50#include <cpu/lwbuf.h>
51#include <machine/globaldata.h>
52#include <machine/atomic.h>
53#include <machine/param.h>
75ecf659 54#include <sys/thread.h>
8d79eb09 55
5c5185ae
SG
56static void lwbuf_init(void *);
57SYSINIT(sock_lwb, SI_BOOT2_MACHDEP, SI_ORDER_ANY, lwbuf_init, NULL);
58
9c7e7f27 59static struct objcache *lwbuf_cache;
5c5185ae 60
5c5185ae 61MALLOC_DEFINE(M_LWBUF, "lwbuf", "Lightweight buffers");
1f3936d6
MD
62struct objcache_malloc_args lwbuf_malloc_args =
63 { sizeof(struct lwbuf), M_LWBUF };
5c5185ae 64
9c7e7f27
SG
65/* Number of pages of KVA to allocate at boot per cpu (1MB) */
66static int lwbuf_reserve_pages = 256;
1f3936d6
MD
67static int lwbuf_count;
68static int lwbuf_kva_bytes;
9c7e7f27
SG
69
70SYSCTL_INT(_kern_ipc, OID_AUTO, lwbuf_reserve, CTLFLAG_RD,
71 &lwbuf_reserve_pages, 0,
1f3936d6
MD
72 "Number of pre-allocated lightweight buffers");
73SYSCTL_INT(_kern_ipc, OID_AUTO, lwbuf_count, CTLFLAG_RD,
74 &lwbuf_count, 0,
75 "Currently allocated lightweight buffers");
76SYSCTL_INT(_kern_ipc, OID_AUTO, lwbuf_kva_bytes, CTLFLAG_RD,
77 &lwbuf_kva_bytes, 0,
78 "Currently used KVA for lightweight buffers");
9c7e7f27
SG
79
80static boolean_t
81lwbuf_cache_ctor(void *obj, void *pdata, int ocflags)
82{
83 struct lwbuf *lwb = (struct lwbuf *)obj;
84
1f3936d6
MD
85 lwb->m = NULL;
86 lwb->cpumask = 0;
9c7e7f27 87 lwb->kva = kmem_alloc_nofault(&kernel_map, PAGE_SIZE, PAGE_SIZE);
9c7e7f27 88 if (lwb->kva == 0)
5c5185ae 89 return (FALSE);
1f3936d6 90 atomic_add_int(&lwbuf_kva_bytes, PAGE_SIZE);
5c5185ae
SG
91
92 return (TRUE);
93}
94
45758e38
MD
95/*
96 * Destructor for lwb. Note that we must remove any pmap entries
97 * created with pmap_kenter() to prevent them from being misinterpreted
98 * as managed pages which would cause kernel_pmap.pm_stats.resident_count
99 * to get out of whack.
100 */
5c5185ae 101static void
9c7e7f27
SG
102lwbuf_cache_dtor(void *obj, void *pdata)
103{
104 struct lwbuf *lwb = (struct lwbuf *)obj;
105
1f3936d6 106 KKASSERT(lwb->kva != 0);
45758e38 107 pmap_kremove_quick(lwb->kva);
9c7e7f27 108 kmem_free(&kernel_map, lwb->kva, PAGE_SIZE);
1f3936d6
MD
109 lwb->kva = 0;
110 atomic_add_int(&lwbuf_kva_bytes, -PAGE_SIZE);
9c7e7f27
SG
111}
112
113static void
5c5185ae
SG
114lwbuf_init(void *arg)
115{
9c7e7f27
SG
116 lwbuf_cache = objcache_create("lwbuf", 0, 0,
117 lwbuf_cache_ctor, lwbuf_cache_dtor, NULL,
118 objcache_malloc_alloc, objcache_malloc_free,
119 &lwbuf_malloc_args);
5c5185ae
SG
120}
121
122struct lwbuf *
7a683a24 123lwbuf_alloc(vm_page_t m, struct lwbuf *lwb_dummy __unused)
5c5185ae
SG
124{
125 struct mdglobaldata *gd = mdcpu;
5c5185ae
SG
126 struct lwbuf *lwb;
127
3c9d6a07 128 lwb = objcache_get(lwbuf_cache, M_WAITOK);
1f3936d6 129 KKASSERT(lwb->m == NULL);
8d79eb09 130 lwb->m = m;
1f3936d6 131 lwb->cpumask = gd->mi.gd_cpumask;
8d79eb09 132 pmap_kenter_quick(lwb->kva, m->phys_addr);
1f3936d6 133 atomic_add_int(&lwbuf_count, 1);
5c5185ae
SG
134
135 return (lwb);
136}
137
138void
139lwbuf_free(struct lwbuf *lwb)
140{
1f3936d6 141 KKASSERT(lwb->m != NULL);
5c5185ae 142 lwb->m = NULL;
5c5185ae 143 lwb->cpumask = 0;
1f3936d6
MD
144 objcache_put(lwbuf_cache, lwb);
145 atomic_add_int(&lwbuf_count, -1);
5c5185ae
SG
146}
147
148void
149lwbuf_set_global(struct lwbuf *lwb)
150{
1f3936d6
MD
151 if (lwb->cpumask != (cpumask_t)-1) {
152 pmap_kenter_sync(lwb->kva);
153 lwb->cpumask = (cpumask_t)-1;
154 }
5c5185ae
SG
155}
156
157static vm_offset_t
158_lwbuf_kva(struct lwbuf *lwb, struct mdglobaldata *gd)
159{
160 cpumask_t old, new;
161
162 pmap_kenter_sync_quick(lwb->kva);
163
164 do {
165 old = lwb->cpumask;
166 new = old | gd->mi.gd_cpumask;
167 } while (atomic_cmpset_int(&lwb->cpumask, old, new) == 0);
168
169 return (lwb->kva);
170}
171
172__inline vm_offset_t
173lwbuf_kva(struct lwbuf *lwb)
174{
175 struct mdglobaldata *gd = mdcpu;
176
177 if (lwb->cpumask & gd->mi.gd_cpumask)
178 return (lwb->kva);
179
180 return (_lwbuf_kva(lwb, gd));
181}