netmap: change header includes
[dragonfly.git] / sys / net / netmap / netmap_freebsd.c
CommitLineData
fb578518
FF
1/*
2 * Copyright (C) 2013 Universita` di Pisa. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23 * SUCH DAMAGE.
24 */
25
26#include <sys/types.h>
27#include <sys/module.h>
28#include <sys/errno.h>
29#include <sys/param.h> /* defines used in kernel.h */
30#include <sys/kernel.h> /* types used in module initialization */
31#include <sys/conf.h> /* DEV_MODULE */
13431b3e 32#include <sys/devfs.h>
fb578518
FF
33
34#include <vm/vm.h> /* vtophys */
35#include <vm/pmap.h> /* vtophys */
36#include <vm/vm_param.h>
37#include <vm/vm_object.h>
38#include <vm/vm_page.h>
bf9f7c16 39#include <vm/vm_page2.h>
fb578518 40#include <vm/vm_pager.h>
fb578518 41
13431b3e
FF
42
43#include <sys/malloc.h>
44#include <sys/socket.h> /* sockaddrs */
45#include <sys/event.h>
fb578518
FF
46#include <net/if.h>
47#include <net/if_var.h>
13431b3e 48#include <sys/bus.h> /* bus_dmamap_* */
ed9bd855 49
13431b3e 50#include <net/netmap.h>
b3f97fad
FF
51#include <net/netmap/netmap_kern.h>
52#include <net/netmap/netmap_mem2.h>
fb578518
FF
53
54
55/* ======================== FREEBSD-SPECIFIC ROUTINES ================== */
56
57/*
58 * Intercept the rx routine in the standard device driver.
59 * Second argument is non-zero to intercept, 0 to restore
60 */
61int
62netmap_catch_rx(struct netmap_adapter *na, int intercept)
63{
64 struct netmap_generic_adapter *gna = (struct netmap_generic_adapter *)na;
65 struct ifnet *ifp = na->ifp;
66
67 if (intercept) {
68 if (gna->save_if_input) {
69 D("cannot intercept again");
70 return EINVAL; /* already set */
71 }
72 gna->save_if_input = ifp->if_input;
73 ifp->if_input = generic_rx_handler;
74 } else {
75 if (!gna->save_if_input){
76 D("cannot restore");
77 return EINVAL; /* not saved */
78 }
79 ifp->if_input = gna->save_if_input;
80 gna->save_if_input = NULL;
81 }
82
83 return 0;
84}
85
86/*
87 * Intercept the packet steering routine in the tx path,
88 * so that we can decide which queue is used for an mbuf.
89 * Second argument is non-zero to intercept, 0 to restore.
90 *
91 * XXX see if FreeBSD has such a mechanism
92 */
93void
94netmap_catch_packet_steering(struct netmap_generic_adapter *na, int enable)
95{
96 if (enable) {
97 } else {
98 }
99}
100
101/* Transmit routine used by generic_netmap_txsync(). Returns 0 on success
102 * and non-zero on error (which may be packet drops or other errors).
103 * addr and len identify the netmap buffer, m is the (preallocated)
104 * mbuf to use for transmissions.
105 *
106 * We should add a reference to the mbuf so the m_freem() at the end
107 * of the transmission does not consume resources.
108 *
109 * On FreeBSD, and on multiqueue cards, we can force the queue using
110 * if ((m->m_flags & M_FLOWID) != 0)
111 * i = m->m_pkthdr.flowid % adapter->num_queues;
112 * else
113 * i = curcpu % adapter->num_queues;
114 *
115 */
116int
117generic_xmit_frame(struct ifnet *ifp, struct mbuf *m,
118 void *addr, u_int len, u_int ring_nr)
119{
120 int ret;
121
122 m->m_len = m->m_pkthdr.len = 0;
123
124 // copy data to the mbuf
125 m_copyback(m, 0, len, addr);
126
bf9f7c16 127#if 0
fb578518
FF
128 // inc refcount. We are alone, so we can skip the atomic
129 atomic_fetchadd_int(m->m_ext.ref_cnt, 1);
130 m->m_flags |= M_FLOWID;
bf9f7c16
FF
131#endif
132 m->m_pkthdr.hash = ring_nr; /* XXX probably not accurate */
fb578518
FF
133 m->m_pkthdr.rcvif = ifp; /* used for tx notification */
134 ret = ifp->if_transmit(ifp, m);
135 return ret;
136}
137
138/*
139 * The following two functions are empty until we have a generic
140 * way to extract the info from the ifp
141 */
142int
143generic_find_num_desc(struct ifnet *ifp, unsigned int *tx, unsigned int *rx)
144{
145 D("called");
146 return 0;
147}
148
149void
150generic_find_num_queues(struct ifnet *ifp, u_int *txq, u_int *rxq)
151{
152 D("called");
153 *txq = 1;
154 *rxq = 1;
155}
156
157void netmap_mitigation_init(struct netmap_generic_adapter *na)
158{
159 ND("called");
160 na->mit_pending = 0;
161}
162
163
164void netmap_mitigation_start(struct netmap_generic_adapter *na)
165{
166 ND("called");
167}
168
169void netmap_mitigation_restart(struct netmap_generic_adapter *na)
170{
171 ND("called");
172}
173
174int netmap_mitigation_active(struct netmap_generic_adapter *na)
175{
176 ND("called");
177 return 0;
178}
179
180void netmap_mitigation_cleanup(struct netmap_generic_adapter *na)
181{
182 ND("called");
183}
184
185
186/*
187 * In order to track whether pages are still mapped, we hook into
188 * the standard cdev_pager and intercept the constructor and
189 * destructor.
190 */
191
192struct netmap_vm_handle_t {
193 struct cdev *dev;
194 struct netmap_priv_d *priv;
195};
196
197static int
198netmap_dev_pager_ctor(void *handle, vm_ooffset_t size, vm_prot_t prot,
199 vm_ooffset_t foff, struct ucred *cred, u_short *color)
200{
201 struct netmap_vm_handle_t *vmh = handle;
bf9f7c16 202 (void)vmh;
fb578518
FF
203 D("handle %p size %jd prot %d foff %jd",
204 handle, (intmax_t)size, prot, (intmax_t)foff);
bf9f7c16 205#if 0
fb578518 206 dev_ref(vmh->dev);
bf9f7c16 207#endif
fb578518
FF
208 return 0;
209}
210
211
212static void
213netmap_dev_pager_dtor(void *handle)
214{
215 struct netmap_vm_handle_t *vmh = handle;
216 struct cdev *dev = vmh->dev;
217 struct netmap_priv_d *priv = vmh->priv;
bf9f7c16 218 (void)dev;
fb578518
FF
219 D("handle %p", handle);
220 netmap_dtor(priv);
ed9bd855 221 kfree(vmh, M_DEVBUF);
bf9f7c16 222#if 0
fb578518 223 dev_rel(dev);
bf9f7c16 224#endif
fb578518
FF
225}
226
227static int
228netmap_dev_pager_fault(vm_object_t object, vm_ooffset_t offset,
229 int prot, vm_page_t *mres)
230{
231 struct netmap_vm_handle_t *vmh = object->handle;
232 struct netmap_priv_d *priv = vmh->priv;
233 vm_paddr_t paddr;
234 vm_page_t page;
235 vm_memattr_t memattr;
236 vm_pindex_t pidx;
237
238 ND("object %p offset %jd prot %d mres %p",
239 object, (intmax_t)offset, prot, mres);
240 memattr = object->memattr;
241 pidx = OFF_TO_IDX(offset);
242 paddr = netmap_mem_ofstophys(priv->np_mref, offset);
243 if (paddr == 0)
244 return VM_PAGER_FAIL;
245
246 if (((*mres)->flags & PG_FICTITIOUS) != 0) {
247 /*
248 * If the passed in result page is a fake page, update it with
249 * the new physical address.
250 */
251 page = *mres;
252 vm_page_updatefake(page, paddr, memattr);
253 } else {
254 /*
255 * Replace the passed in reqpage page with our own fake page and
256 * free up the all of the original pages.
257 */
258#ifndef VM_OBJECT_WUNLOCK /* FreeBSD < 10.x */
259#define VM_OBJECT_WUNLOCK VM_OBJECT_UNLOCK
260#define VM_OBJECT_WLOCK VM_OBJECT_LOCK
261#endif /* VM_OBJECT_WUNLOCK */
262
263 VM_OBJECT_WUNLOCK(object);
264 page = vm_page_getfake(paddr, memattr);
265 VM_OBJECT_WLOCK(object);
fb578518 266 vm_page_free(*mres);
fb578518
FF
267 *mres = page;
268 vm_page_insert(page, object, pidx);
269 }
270 page->valid = VM_PAGE_BITS_ALL;
271 return (VM_PAGER_OK);
272}
273
274
275static struct cdev_pager_ops netmap_cdev_pager_ops = {
276 .cdev_pg_ctor = netmap_dev_pager_ctor,
277 .cdev_pg_dtor = netmap_dev_pager_dtor,
278 .cdev_pg_fault = netmap_dev_pager_fault,
279};
280
281
282static int
13431b3e 283netmap_mmap_single(struct dev_mmap_single_args *ap)
fb578518
FF
284{
285 int error;
13431b3e
FF
286 struct cdev *cdev = ap->a_head.a_dev;
287 vm_ooffset_t *foff = ap->a_offset;
288 vm_object_t *objp = ap->a_object;
289 vm_size_t objsize = ap->a_size;
fb578518
FF
290 struct netmap_vm_handle_t *vmh;
291 struct netmap_priv_d *priv;
13431b3e 292 int prot = ap->a_nprot;
fb578518
FF
293 vm_object_t obj;
294
295 D("cdev %p foff %jd size %jd objp %p prot %d", cdev,
296 (intmax_t )*foff, (intmax_t )objsize, objp, prot);
297
ed9bd855 298 vmh = kmalloc(sizeof(struct netmap_vm_handle_t), M_DEVBUF,
fb578518
FF
299 M_NOWAIT | M_ZERO);
300 if (vmh == NULL)
301 return ENOMEM;
302 vmh->dev = cdev;
303
304 NMG_LOCK();
bf9f7c16 305#if 0
fb578518
FF
306 error = devfs_get_cdevpriv((void**)&priv);
307 if (error)
308 goto err_unlock;
bf9f7c16 309#endif
fb578518
FF
310 vmh->priv = priv;
311 priv->np_refcount++;
312 NMG_UNLOCK();
313
314 error = netmap_get_memory(priv);
315 if (error)
316 goto err_deref;
317
318 obj = cdev_pager_allocate(vmh, OBJT_DEVICE,
319 &netmap_cdev_pager_ops, objsize, prot,
320 *foff, NULL);
321 if (obj == NULL) {
322 D("cdev_pager_allocate failed");
323 error = EINVAL;
324 goto err_deref;
325 }
326
327 *objp = obj;
328 return 0;
329
330err_deref:
331 NMG_LOCK();
332 priv->np_refcount--;
bf9f7c16 333#if 0
fb578518 334err_unlock:
bf9f7c16 335#endif
fb578518
FF
336 NMG_UNLOCK();
337// err:
ed9bd855 338 kfree(vmh, M_DEVBUF);
fb578518
FF
339 return error;
340}
341
342
343// XXX can we remove this ?
344static int
13431b3e 345netmap_close(struct dev_close_args *ap)
fb578518
FF
346{
347 if (netmap_verbose)
13431b3e
FF
348 D("dev %p fflag 0x%x devtype %d",
349 ap->a_head.a_dev, ap->a_fflag, ap->a_devtype);
fb578518
FF
350 return 0;
351}
352
353
354static int
13431b3e 355netmap_open(struct dev_open_args *ap)
fb578518
FF
356{
357 struct netmap_priv_d *priv;
bf9f7c16 358#if 0
fb578518 359 int error;
bf9f7c16 360#endif
fb578518 361
13431b3e 362 (void)ap;
fb578518
FF
363
364 // XXX wait or nowait ?
ed9bd855 365 priv = kmalloc(sizeof(struct netmap_priv_d), M_DEVBUF,
fb578518
FF
366 M_NOWAIT | M_ZERO);
367 if (priv == NULL)
368 return ENOMEM;
369
bf9f7c16 370#if 0
fb578518
FF
371 error = devfs_set_cdevpriv(priv, netmap_dtor);
372 if (error)
373 return error;
bf9f7c16 374#endif
fb578518
FF
375
376 priv->np_refcount = 1;
377
378 return 0;
379}
380
381
bf9f7c16 382struct dev_ops netmap_cdevsw = {
13431b3e 383 { "netmap", 0, 0 },
fb578518
FF
384 .d_open = netmap_open,
385 .d_mmap_single = netmap_mmap_single,
386 .d_ioctl = netmap_ioctl,
f27ed164 387 .d_kqfilter = netmap_kqfilter,
13431b3e 388 .d_close = netmap_close,
fb578518
FF
389};
390
391
392/*
393 * Kernel entry point.
394 *
395 * Initialize/finalize the module and return.
396 *
397 * Return 0 on success, errno on failure.
398 */
399static int
400netmap_loader(__unused struct module *module, int event, __unused void *arg)
401{
402 int error = 0;
403
404 switch (event) {
405 case MOD_LOAD:
406 error = netmap_init();
407 break;
408
409 case MOD_UNLOAD:
410 netmap_fini();
411 break;
412
413 default:
414 error = EOPNOTSUPP;
415 break;
416 }
417
418 return (error);
419}
420
421
422DEV_MODULE(netmap, netmap_loader, NULL);