2 * Copyright (c) 1998 David Greenman. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * $DragonFly: src/sys/kern/kern_sfbuf.c,v 1.4 2004/04/29 17:31:02 dillon Exp $
28 #include <sys/param.h>
29 #include <sys/types.h>
30 #include <sys/systm.h>
31 #include <sys/kernel.h>
32 #include <sys/malloc.h>
33 #include <sys/queue.h>
34 #include <sys/sfbuf.h>
35 #include <sys/globaldata.h>
36 #include <sys/thread.h>
37 #include <sys/sysctl.h>
39 #include <vm/vm_extern.h>
40 #include <vm/vm_kern.h>
41 #include <vm/vm_page.h>
43 #include <sys/thread2.h>
45 static void sf_buf_init(void *arg);
46 SYSINIT(sock_sf, SI_SUB_MBUF, SI_ORDER_ANY, sf_buf_init, NULL)
48 LIST_HEAD(sf_buf_list, sf_buf);
51 * A hash table of active sendfile(2) buffers
53 static struct sf_buf_list *sf_buf_hashtable;
54 static u_long sf_buf_hashmask;
56 static TAILQ_HEAD(, sf_buf) sf_buf_freelist;
57 static u_int sf_buf_alloc_want;
59 static vm_offset_t sf_base;
60 static struct sf_buf *sf_bufs;
62 static int sfbuf_quick = 1;
63 SYSCTL_INT(_debug, OID_AUTO, sfbuf_quick, CTLFLAG_RW, &sfbuf_quick, 0, "");
67 sf_buf_hash(vm_page_t m)
71 hv = ((int)m / sizeof(vm_page_t)) + ((int)m >> 12);
72 return(hv & sf_buf_hashmask);
76 * Allocate a pool of sf_bufs (sendfile(2) or "super-fast" if you prefer. :-))
79 sf_buf_init(void *arg)
83 sf_buf_hashtable = hashinit(nsfbufs, M_TEMP, &sf_buf_hashmask);
84 TAILQ_INIT(&sf_buf_freelist);
85 sf_base = kmem_alloc_nofault(kernel_map, nsfbufs * PAGE_SIZE);
86 sf_bufs = malloc(nsfbufs * sizeof(struct sf_buf), M_TEMP,
88 for (i = 0; i < nsfbufs; i++) {
89 sf_bufs[i].kva = sf_base + i * PAGE_SIZE;
90 sf_bufs[i].flags |= SFBA_ONFREEQ;
91 TAILQ_INSERT_TAIL(&sf_buf_freelist, &sf_bufs[i], free_entry);
96 * Get an sf_buf from the freelist. Will block if none are available.
99 sf_buf_alloc(struct vm_page *m, int flags)
101 struct sf_buf_list *hash_chain;
109 hash_chain = &sf_buf_hashtable[sf_buf_hash(m)];
110 LIST_FOREACH(sf, hash_chain, list_entry) {
115 * We must invalidate the TLB entry based on whether
116 * it need only be valid on the local cpu (SFBA_QUICK),
117 * or on all cpus. This is conditionalized and in
118 * most cases no system-wide invalidation should be
121 * Note: we do not remove the entry from the freelist
122 * on the 0->1 transition.
125 if ((flags & SFBA_QUICK) && sfbuf_quick) {
126 if ((sf->cpumask & gd->gd_cpumask) == 0) {
127 pmap_kenter_sync_quick(sf->kva);
128 sf->cpumask |= gd->gd_cpumask;
131 if (sf->cpumask != (cpumask_t)-1) {
132 pmap_kenter_sync(sf->kva);
133 sf->cpumask = (cpumask_t)-1;
136 goto done; /* found existing mapping */
141 * Didn't find old mapping. Get a buffer off the freelist. We
142 * may have to remove and skip buffers with non-zero ref counts
143 * that were lazily allocated.
146 if ((sf = TAILQ_FIRST(&sf_buf_freelist)) == NULL) {
147 pflags = (flags & SFBA_PCATCH) ? PCATCH : 0;
149 error = tsleep(&sf_buf_freelist, pflags, "sfbufa", 0);
155 * We may have to do delayed removals for referenced
156 * sf_buf's here in addition to locating a sf_buf
157 * to reuse. The sf_bufs must be removed.
159 * We are finished when we find an sf_buf with a
160 * refcnt of 0. We theoretically do not have to
161 * remove it from the freelist but it's a good idea
162 * to do so to preserve LRU operation for the
163 * (1) never before seen before case and (2)
164 * accidently recycled due to prior cached uses not
165 * removing the buffer case.
167 KKASSERT(sf->flags & SFBA_ONFREEQ);
168 TAILQ_REMOVE(&sf_buf_freelist, sf, free_entry);
169 sf->flags &= ~SFBA_ONFREEQ;
174 if (sf->m != NULL) /* remove previous mapping from hash table */
175 LIST_REMOVE(sf, list_entry);
176 LIST_INSERT_HEAD(hash_chain, sf, list_entry);
179 if ((flags & SFBA_QUICK) && sfbuf_quick) {
180 pmap_kenter_quick(sf->kva, sf->m->phys_addr);
181 sf->cpumask = gd->gd_cpumask;
183 pmap_kenter(sf->kva, sf->m->phys_addr);
184 sf->cpumask = (cpumask_t)-1;
191 #define dtosf(x) (&sf_bufs[((uintptr_t)(x) - (uintptr_t)sf_base) >> PAGE_SHIFT])
194 sf_buf_tosf(caddr_t addr)
200 sf_buf_ref(struct sf_buf *sf)
203 panic("sf_buf_ref: referencing a free sf_buf");
208 * Lose a reference to an sf_buf. When none left, detach mapped page
209 * and release resources back to the system. Note that the sfbuf's
210 * removal from the freelist is delayed, so it may in fact already be
211 * on the free list. This is the optimal (and most likely) scenario.
213 * Must be called at splimp.
216 sf_buf_free(struct sf_buf *sf)
219 panic("sf_buf_free: freeing free sf_buf");
221 if (sf->refcnt == 0 && (sf->flags & SFBA_ONFREEQ) == 0) {
222 KKASSERT(sf->aux1 == 0 && sf->aux2 == 0);
223 TAILQ_INSERT_TAIL(&sf_buf_freelist, sf, free_entry);
224 sf->flags |= SFBA_ONFREEQ;
225 if (sf_buf_alloc_want > 0)
226 wakeup_one(&sf_buf_freelist);