kernel - Provide descriptions for lwkt.* and debug.* sysctl's
[dragonfly.git] / sys / net / bpf.c
CommitLineData
984263bc
MD
1/*
2 * Copyright (c) 1990, 1991, 1993
3 * The Regents of the University of California. All rights reserved.
4 *
5 * This code is derived from the Stanford/CMU enet packet filter,
6 * (net/enet.c) distributed as part of 4.3BSD, and code contributed
7 * to Berkeley by Steven McCanne and Van Jacobson both of Lawrence
8 * Berkeley Laboratory.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the University of
21 * California, Berkeley and its contributors.
22 * 4. Neither the name of the University nor the names of its contributors
23 * may be used to endorse or promote products derived from this software
24 * without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * SUCH DAMAGE.
37 *
38 * @(#)bpf.c 8.2 (Berkeley) 3/28/94
39 *
40 * $FreeBSD: src/sys/net/bpf.c,v 1.59.2.12 2002/04/14 21:41:48 luigi Exp $
41 */
42
1f2de5d4 43#include "use_bpf.h"
984263bc 44
984263bc
MD
45#include <sys/param.h>
46#include <sys/systm.h>
47#include <sys/conf.h>
fef8985e 48#include <sys/device.h>
984263bc
MD
49#include <sys/malloc.h>
50#include <sys/mbuf.h>
51#include <sys/time.h>
52#include <sys/proc.h>
53#include <sys/signalvar.h>
54#include <sys/filio.h>
55#include <sys/sockio.h>
56#include <sys/ttycom.h>
57#include <sys/filedesc.h>
58
9fb2d31f 59#include <sys/event.h>
984263bc
MD
60
61#include <sys/socket.h>
62#include <sys/vnode.h>
63
0b31d406 64#include <sys/thread2.h>
684a93c4 65#include <sys/mplock2.h>
0b31d406 66
984263bc
MD
67#include <net/if.h>
68#include <net/bpf.h>
69#include <net/bpfdesc.h>
e1c548c2 70#include <net/netmsg2.h>
984263bc
MD
71
72#include <netinet/in.h>
73#include <netinet/if_ether.h>
74#include <sys/kernel.h>
75#include <sys/sysctl.h>
76
2c1e28dd 77#include <sys/devfs.h>
cd29885a 78
e1c548c2 79struct netmsg_bpf_output {
002c1265 80 struct netmsg_base base;
e1c548c2
SZ
81 struct mbuf *nm_mbuf;
82 struct ifnet *nm_ifp;
83 struct sockaddr *nm_dst;
84};
85
984263bc 86MALLOC_DEFINE(M_BPF, "BPF", "BPF data");
cd29885a 87DEVFS_DECLARE_CLONE_BITMAP(bpf);
95db3aac
AH
88
89#if NBPF <= 1
8be7edad 90#define BPF_PREALLOCATED_UNITS 4
95db3aac
AH
91#else
92#define BPF_PREALLOCATED_UNITS NBPF
93#endif
984263bc
MD
94
95#if NBPF > 0
96
97/*
984263bc
MD
98 * The default read buffer size is patchable.
99 */
5534e0c8
JS
100static int bpf_bufsize = BPF_DEFAULTBUFSIZE;
101SYSCTL_INT(_debug, OID_AUTO, bpf_bufsize, CTLFLAG_RW,
0c52fa62 102 &bpf_bufsize, 0, "Current size of bpf buffer");
faff5ee2 103int bpf_maxbufsize = BPF_MAXBUFSIZE;
5534e0c8 104SYSCTL_INT(_debug, OID_AUTO, bpf_maxbufsize, CTLFLAG_RW,
0c52fa62 105 &bpf_maxbufsize, 0, "Maximum size of bpf buffer");
984263bc
MD
106
107/*
108 * bpf_iflist is the list of interfaces; each corresponds to an ifnet
109 */
110static struct bpf_if *bpf_iflist;
111
5534e0c8
JS
112static int bpf_allocbufs(struct bpf_d *);
113static void bpf_attachd(struct bpf_d *d, struct bpf_if *bp);
114static void bpf_detachd(struct bpf_d *d);
cefd3279 115static void bpf_resetd(struct bpf_d *);
5534e0c8
JS
116static void bpf_freed(struct bpf_d *);
117static void bpf_mcopy(const void *, void *, size_t);
118static int bpf_movein(struct uio *, int, struct mbuf **,
7cff0268 119 struct sockaddr *, int *, struct bpf_insn *);
5534e0c8
JS
120static int bpf_setif(struct bpf_d *, struct ifreq *);
121static void bpf_timed_out(void *);
122static void bpf_wakeup(struct bpf_d *);
123static void catchpacket(struct bpf_d *, u_char *, u_int, u_int,
aedcf384
SZ
124 void (*)(const void *, void *, size_t),
125 const struct timeval *);
7cff0268 126static int bpf_setf(struct bpf_d *, struct bpf_program *, u_long cmd);
1f8e62c9
JS
127static int bpf_getdltlist(struct bpf_d *, struct bpf_dltlist *);
128static int bpf_setdlt(struct bpf_d *, u_int);
5534e0c8 129static void bpf_drvinit(void *unused);
9fb2d31f
SG
130static void bpf_filter_detach(struct knote *kn);
131static int bpf_filter_read(struct knote *kn, long hint);
5534e0c8
JS
132
133static d_open_t bpfopen;
cd29885a 134static d_clone_t bpfclone;
5534e0c8
JS
135static d_close_t bpfclose;
136static d_read_t bpfread;
137static d_write_t bpfwrite;
138static d_ioctl_t bpfioctl;
9fb2d31f 139static d_kqfilter_t bpfkqfilter;
984263bc
MD
140
141#define CDEV_MAJOR 23
fef8985e 142static struct dev_ops bpf_ops = {
88abd8b5 143 { "bpf", 0, 0 },
fef8985e
MD
144 .d_open = bpfopen,
145 .d_close = bpfclose,
146 .d_read = bpfread,
147 .d_write = bpfwrite,
148 .d_ioctl = bpfioctl,
9fb2d31f 149 .d_kqfilter = bpfkqfilter
984263bc
MD
150};
151
152
153static int
5534e0c8 154bpf_movein(struct uio *uio, int linktype, struct mbuf **mp,
7cff0268 155 struct sockaddr *sockp, int *datlen, struct bpf_insn *wfilter)
984263bc
MD
156{
157 struct mbuf *m;
158 int error;
159 int len;
160 int hlen;
7cff0268 161 int slen;
984263bc 162
77a30c79
SZ
163 *datlen = 0;
164 *mp = NULL;
165
984263bc
MD
166 /*
167 * Build a sockaddr based on the data link layer type.
168 * We do this at this level because the ethernet header
169 * is copied directly into the data field of the sockaddr.
170 * In the case of SLIP, there is no header and the packet
171 * is forwarded as is.
172 * Also, we are careful to leave room at the front of the mbuf
173 * for the link level header.
174 */
175 switch (linktype) {
984263bc
MD
176 case DLT_SLIP:
177 sockp->sa_family = AF_INET;
178 hlen = 0;
179 break;
180
181 case DLT_EN10MB:
182 sockp->sa_family = AF_UNSPEC;
183 /* XXX Would MAXLINKHDR be better? */
184 hlen = sizeof(struct ether_header);
185 break;
186
984263bc
MD
187 case DLT_RAW:
188 case DLT_NULL:
189 sockp->sa_family = AF_UNSPEC;
190 hlen = 0;
191 break;
192
984263bc
MD
193 case DLT_ATM_RFC1483:
194 /*
195 * en atm driver requires 4-byte atm pseudo header.
196 * though it isn't standard, vpi:vci needs to be
197 * specified anyway.
198 */
199 sockp->sa_family = AF_UNSPEC;
f23061d4 200 hlen = 12; /* XXX 4(ATM_PH) + 3(LLC) + 5(SNAP) */
984263bc 201 break;
5534e0c8 202
984263bc
MD
203 case DLT_PPP:
204 sockp->sa_family = AF_UNSPEC;
205 hlen = 4; /* This should match PPP_HDRLEN */
206 break;
207
208 default:
5534e0c8 209 return(EIO);
984263bc
MD
210 }
211
212 len = uio->uio_resid;
213 *datlen = len - hlen;
214 if ((unsigned)len > MCLBYTES)
5534e0c8 215 return(EIO);
984263bc 216
70978433 217 m = m_getl(len, MB_WAIT, MT_DATA, M_PKTHDR, NULL);
f23061d4 218 if (m == NULL)
5534e0c8 219 return(ENOBUFS);
984263bc
MD
220 m->m_pkthdr.len = m->m_len = len;
221 m->m_pkthdr.rcvif = NULL;
222 *mp = m;
7cff0268
MS
223
224 if (m->m_len < hlen) {
225 error = EPERM;
226 goto bad;
227 }
228
229 error = uiomove(mtod(m, u_char *), len, uio);
230 if (error)
231 goto bad;
232
233 slen = bpf_filter(wfilter, mtod(m, u_char *), len, len);
234 if (slen == 0) {
235 error = EPERM;
236 goto bad;
237 }
238
984263bc 239 /*
7cff0268 240 * Make room for link header, and copy it to sockaddr.
984263bc
MD
241 */
242 if (hlen != 0) {
7cff0268 243 bcopy(m->m_data, sockp->sa_data, hlen);
984263bc
MD
244 m->m_pkthdr.len -= hlen;
245 m->m_len -= hlen;
984263bc 246 m->m_data += hlen; /* XXX */
984263bc 247 }
7cff0268 248 return (0);
5534e0c8 249bad:
984263bc 250 m_freem(m);
5534e0c8 251 return(error);
984263bc
MD
252}
253
254/*
255 * Attach file to the bpf interface, i.e. make d listen on bp.
256 * Must be called at splimp.
257 */
258static void
5534e0c8 259bpf_attachd(struct bpf_d *d, struct bpf_if *bp)
984263bc
MD
260{
261 /*
262 * Point d at bp, and add d to the interface's list of listeners.
263 * Finally, point the driver's bpf cookie at the interface so
264 * it will divert packets to bpf.
265 */
266 d->bd_bif = bp;
f23061d4 267 SLIST_INSERT_HEAD(&bp->bif_dlist, d, bd_next);
1f8e62c9 268 *bp->bif_driverp = bp;
19f10c78
RP
269
270 EVENTHANDLER_INVOKE(bpf_track, bp->bif_ifp, bp->bif_dlt, 1);
984263bc
MD
271}
272
273/*
274 * Detach a file from its interface.
275 */
276static void
5534e0c8 277bpf_detachd(struct bpf_d *d)
984263bc 278{
1f8e62c9 279 int error;
984263bc 280 struct bpf_if *bp;
1f8e62c9 281 struct ifnet *ifp;
984263bc
MD
282
283 bp = d->bd_bif;
1f8e62c9
JS
284 ifp = bp->bif_ifp;
285
286 /* Remove d from the interface's descriptor list. */
287 SLIST_REMOVE(&bp->bif_dlist, d, bpf_d, bd_next);
288
289 if (SLIST_EMPTY(&bp->bif_dlist)) {
290 /*
291 * Let the driver know that there are no more listeners.
292 */
293 *bp->bif_driverp = NULL;
294 }
295 d->bd_bif = NULL;
19f10c78
RP
296
297 EVENTHANDLER_INVOKE(bpf_track, ifp, bp->bif_dlt, 0);
298
984263bc
MD
299 /*
300 * Check if this descriptor had requested promiscuous mode.
301 * If so, turn it off.
302 */
303 if (d->bd_promisc) {
304 d->bd_promisc = 0;
1f8e62c9
JS
305 error = ifpromisc(ifp, 0);
306 if (error != 0 && error != ENXIO) {
984263bc 307 /*
1f8e62c9 308 * ENXIO can happen if a pccard is unplugged,
984263bc
MD
309 * Something is really wrong if we were able to put
310 * the driver into promiscuous mode, but can't
311 * take it out.
312 */
1f8e62c9
JS
313 if_printf(ifp, "bpf_detach: ifpromisc failed(%d)\n",
314 error);
5534e0c8 315 }
984263bc 316 }
984263bc
MD
317}
318
319/*
320 * Open ethernet device. Returns ENXIO for illegal minor device number,
321 * EBUSY if file is open by another process.
322 */
323/* ARGSUSED */
5534e0c8 324static int
fef8985e 325bpfopen(struct dev_open_args *ap)
984263bc 326{
b13267a5 327 cdev_t dev = ap->a_head.a_dev;
41c20dac 328 struct bpf_d *d;
41c20dac 329
fef8985e 330 if (ap->a_cred->cr_prison)
5534e0c8 331 return(EPERM);
984263bc
MD
332
333 d = dev->si_drv1;
334 /*
f23061d4 335 * Each minor can be opened by only one process. If the requested
984263bc
MD
336 * minor is in use, return EBUSY.
337 */
5534e0c8
JS
338 if (d != NULL)
339 return(EBUSY);
cd29885a 340
0c3c561c 341 MALLOC(d, struct bpf_d *, sizeof *d, M_BPF, M_WAITOK | M_ZERO);
984263bc
MD
342 dev->si_drv1 = d;
343 d->bd_bufsize = bpf_bufsize;
344 d->bd_sig = SIGIO;
345 d->bd_seesent = 1;
346 callout_init(&d->bd_callout);
5534e0c8 347 return(0);
984263bc
MD
348}
349
cd29885a
MD
350static int
351bpfclone(struct dev_clone_args *ap)
352{
353 int unit;
354
355 unit = devfs_clone_bitmap_get(&DEVFS_CLONE_BITMAP(bpf), 0);
356 ap->a_dev = make_only_dev(&bpf_ops, unit, 0, 0, 0600, "bpf%d", unit);
357
358 return 0;
359}
360
984263bc
MD
361/*
362 * Close the descriptor by detaching it from its interface,
363 * deallocating its buffers, and marking it free.
364 */
365/* ARGSUSED */
5534e0c8 366static int
fef8985e 367bpfclose(struct dev_close_args *ap)
984263bc 368{
b13267a5 369 cdev_t dev = ap->a_head.a_dev;
41c20dac 370 struct bpf_d *d = dev->si_drv1;
984263bc
MD
371
372 funsetown(d->bd_sigio);
0b31d406 373 crit_enter();
984263bc
MD
374 if (d->bd_state == BPF_WAITING)
375 callout_stop(&d->bd_callout);
376 d->bd_state = BPF_IDLE;
5534e0c8 377 if (d->bd_bif != NULL)
984263bc 378 bpf_detachd(d);
0b31d406 379 crit_exit();
984263bc 380 bpf_freed(d);
f23061d4 381 dev->si_drv1 = NULL;
8be7edad
MD
382 if (dev->si_uminor >= BPF_PREALLOCATED_UNITS) {
383 devfs_clone_bitmap_put(&DEVFS_CLONE_BITMAP(bpf), dev->si_uminor);
384 destroy_dev(dev);
385 }
efda3bd0 386 kfree(d, M_BPF);
5534e0c8 387 return(0);
984263bc 388}
984263bc
MD
389
390/*
391 * Rotate the packet buffers in descriptor d. Move the store buffer
392 * into the hold slot, and the free buffer into the store slot.
393 * Zero the length of the new store buffer.
394 */
395#define ROTATE_BUFFERS(d) \
396 (d)->bd_hbuf = (d)->bd_sbuf; \
397 (d)->bd_hlen = (d)->bd_slen; \
398 (d)->bd_sbuf = (d)->bd_fbuf; \
399 (d)->bd_slen = 0; \
f23061d4 400 (d)->bd_fbuf = NULL;
984263bc
MD
401/*
402 * bpfread - read next chunk of packets from buffers
403 */
5534e0c8 404static int
fef8985e 405bpfread(struct dev_read_args *ap)
984263bc 406{
b13267a5 407 cdev_t dev = ap->a_head.a_dev;
82ed7fc2 408 struct bpf_d *d = dev->si_drv1;
984263bc
MD
409 int timed_out;
410 int error;
984263bc
MD
411
412 /*
413 * Restrict application to use a buffer the same size as
414 * as kernel buffers.
415 */
fef8985e 416 if (ap->a_uio->uio_resid != d->bd_bufsize)
5534e0c8 417 return(EINVAL);
984263bc 418
0b31d406 419 crit_enter();
984263bc
MD
420 if (d->bd_state == BPF_WAITING)
421 callout_stop(&d->bd_callout);
422 timed_out = (d->bd_state == BPF_TIMED_OUT);
423 d->bd_state = BPF_IDLE;
424 /*
425 * If the hold buffer is empty, then do a timed sleep, which
426 * ends when the timeout expires or when enough packets
427 * have arrived to fill the store buffer.
428 */
f23061d4 429 while (d->bd_hbuf == NULL) {
95ec0a9c
MD
430 if ((d->bd_immediate || (ap->a_ioflag & IO_NDELAY) || timed_out)
431 && d->bd_slen != 0) {
984263bc 432 /*
95ec0a9c
MD
433 * A packet(s) either arrived since the previous,
434 * We're in immediate mode, or are reading
435 * in non-blocking mode, and a packet(s)
436 * either arrived since the previous
984263bc
MD
437 * read or arrived while we were asleep.
438 * Rotate the buffers and return what's here.
439 */
440 ROTATE_BUFFERS(d);
441 break;
442 }
443
444 /*
445 * No data is available, check to see if the bpf device
446 * is still pointed at a real interface. If not, return
447 * ENXIO so that the userland process knows to rebind
448 * it before using it again.
449 */
450 if (d->bd_bif == NULL) {
0b31d406 451 crit_exit();
5534e0c8 452 return(ENXIO);
984263bc
MD
453 }
454
fef8985e 455 if (ap->a_ioflag & IO_NDELAY) {
0b31d406 456 crit_exit();
5534e0c8 457 return(EWOULDBLOCK);
984263bc 458 }
5534e0c8 459 error = tsleep(d, PCATCH, "bpf", d->bd_rtout);
984263bc 460 if (error == EINTR || error == ERESTART) {
0b31d406 461 crit_exit();
5534e0c8 462 return(error);
984263bc
MD
463 }
464 if (error == EWOULDBLOCK) {
465 /*
466 * On a timeout, return what's in the buffer,
467 * which may be nothing. If there is something
468 * in the store buffer, we can rotate the buffers.
469 */
470 if (d->bd_hbuf)
471 /*
472 * We filled up the buffer in between
473 * getting the timeout and arriving
474 * here, so we don't need to rotate.
475 */
476 break;
477
478 if (d->bd_slen == 0) {
0b31d406 479 crit_exit();
5534e0c8 480 return(0);
984263bc
MD
481 }
482 ROTATE_BUFFERS(d);
483 break;
484 }
485 }
486 /*
487 * At this point, we know we have something in the hold slot.
488 */
0b31d406 489 crit_exit();
984263bc
MD
490
491 /*
492 * Move data from hold buffer into user space.
493 * We know the entire buffer is transferred since
494 * we checked above that the read buffer is bpf_bufsize bytes.
495 */
fef8985e 496 error = uiomove(d->bd_hbuf, d->bd_hlen, ap->a_uio);
984263bc 497
0b31d406 498 crit_enter();
984263bc 499 d->bd_fbuf = d->bd_hbuf;
f23061d4 500 d->bd_hbuf = NULL;
984263bc 501 d->bd_hlen = 0;
0b31d406 502 crit_exit();
984263bc 503
5534e0c8 504 return(error);
984263bc
MD
505}
506
507
508/*
509 * If there are processes sleeping on this descriptor, wake them up.
510 */
5534e0c8
JS
511static void
512bpf_wakeup(struct bpf_d *d)
984263bc
MD
513{
514 if (d->bd_state == BPF_WAITING) {
515 callout_stop(&d->bd_callout);
516 d->bd_state = BPF_IDLE;
517 }
f23061d4 518 wakeup(d);
984263bc
MD
519 if (d->bd_async && d->bd_sig && d->bd_sigio)
520 pgsigio(d->bd_sigio, d->bd_sig, 0);
521
78195a76 522 get_mplock();
5b22f1a7 523 KNOTE(&d->bd_kq.ki_note, 0);
78195a76 524 rel_mplock();
984263bc
MD
525}
526
527static void
5534e0c8 528bpf_timed_out(void *arg)
984263bc
MD
529{
530 struct bpf_d *d = (struct bpf_d *)arg;
984263bc 531
0b31d406 532 crit_enter();
984263bc
MD
533 if (d->bd_state == BPF_WAITING) {
534 d->bd_state = BPF_TIMED_OUT;
535 if (d->bd_slen != 0)
536 bpf_wakeup(d);
537 }
0b31d406 538 crit_exit();
984263bc
MD
539}
540
e1c548c2 541static void
002c1265 542bpf_output_dispatch(netmsg_t msg)
e1c548c2 543{
002c1265 544 struct netmsg_bpf_output *bmsg = (struct netmsg_bpf_output *)msg;
e1c548c2
SZ
545 struct ifnet *ifp = bmsg->nm_ifp;
546 int error;
547
548 /*
549 * The driver frees the mbuf.
550 */
551 error = ifp->if_output(ifp, bmsg->nm_mbuf, bmsg->nm_dst, NULL);
002c1265 552 lwkt_replymsg(&msg->lmsg, error);
e1c548c2
SZ
553}
554
0301afa9 555static int
fef8985e 556bpfwrite(struct dev_write_args *ap)
984263bc 557{
b13267a5 558 cdev_t dev = ap->a_head.a_dev;
82ed7fc2 559 struct bpf_d *d = dev->si_drv1;
984263bc
MD
560 struct ifnet *ifp;
561 struct mbuf *m;
0b31d406 562 int error;
daaccb0f 563 struct sockaddr dst;
984263bc 564 int datlen;
e1c548c2 565 struct netmsg_bpf_output bmsg;
984263bc 566
f23061d4 567 if (d->bd_bif == NULL)
5534e0c8 568 return(ENXIO);
984263bc
MD
569
570 ifp = d->bd_bif->bif_ifp;
571
fef8985e 572 if (ap->a_uio->uio_resid == 0)
5534e0c8 573 return(0);
984263bc 574
fef8985e 575 error = bpf_movein(ap->a_uio, (int)d->bd_bif->bif_dlt, &m,
7cff0268 576 &dst, &datlen, d->bd_wfilter);
984263bc 577 if (error)
5534e0c8 578 return(error);
984263bc 579
253ad594
MD
580 if (datlen > ifp->if_mtu) {
581 m_freem(m);
5534e0c8 582 return(EMSGSIZE);
253ad594 583 }
984263bc
MD
584
585 if (d->bd_hdrcmplt)
586 dst.sa_family = pseudo_AF_HDRCMPLT;
587
002c1265 588 netmsg_init(&bmsg.base, NULL, &curthread->td_msgport,
c3c96e44 589 0, bpf_output_dispatch);
e1c548c2
SZ
590 bmsg.nm_mbuf = m;
591 bmsg.nm_ifp = ifp;
592 bmsg.nm_dst = &dst;
593
002c1265 594 return lwkt_domsg(cpu_portfn(0), &bmsg.base.lmsg, 0);
984263bc
MD
595}
596
597/*
598 * Reset a descriptor by flushing its packet buffer and clearing the
599 * receive and drop counts. Should be called at splimp.
600 */
601static void
cefd3279 602bpf_resetd(struct bpf_d *d)
984263bc
MD
603{
604 if (d->bd_hbuf) {
605 /* Free the hold buffer. */
606 d->bd_fbuf = d->bd_hbuf;
f23061d4 607 d->bd_hbuf = NULL;
984263bc
MD
608 }
609 d->bd_slen = 0;
610 d->bd_hlen = 0;
611 d->bd_rcount = 0;
612 d->bd_dcount = 0;
613}
614
615/*
616 * FIONREAD Check for read packet available.
617 * SIOCGIFADDR Get interface address - convenient hook to driver.
618 * BIOCGBLEN Get buffer len [for read()].
619 * BIOCSETF Set ethernet read filter.
7cff0268 620 * BIOCSETWF Set ethernet write filter.
984263bc
MD
621 * BIOCFLUSH Flush read packet buffer.
622 * BIOCPROMISC Put interface into promiscuous mode.
623 * BIOCGDLT Get link layer type.
624 * BIOCGETIF Get interface name.
625 * BIOCSETIF Set interface.
626 * BIOCSRTIMEOUT Set read timeout.
627 * BIOCGRTIMEOUT Get read timeout.
628 * BIOCGSTATS Get packet stats.
629 * BIOCIMMEDIATE Set immediate mode.
630 * BIOCVERSION Get filter language version.
631 * BIOCGHDRCMPLT Get "header already complete" flag
632 * BIOCSHDRCMPLT Set "header already complete" flag
633 * BIOCGSEESENT Get "see packets sent" flag
634 * BIOCSSEESENT Set "see packets sent" flag
7cff0268 635 * BIOCLOCK Set "locked" flag
984263bc
MD
636 */
637/* ARGSUSED */
5534e0c8 638static int
fef8985e 639bpfioctl(struct dev_ioctl_args *ap)
984263bc 640{
b13267a5 641 cdev_t dev = ap->a_head.a_dev;
41c20dac 642 struct bpf_d *d = dev->si_drv1;
0b31d406 643 int error = 0;
984263bc 644
0b31d406 645 crit_enter();
984263bc
MD
646 if (d->bd_state == BPF_WAITING)
647 callout_stop(&d->bd_callout);
648 d->bd_state = BPF_IDLE;
0b31d406 649 crit_exit();
984263bc 650
7cff0268
MS
651 if (d->bd_locked == 1) {
652 switch (ap->a_cmd) {
653 case BIOCGBLEN:
654 case BIOCFLUSH:
655 case BIOCGDLT:
656 case BIOCGDLTLIST:
657 case BIOCGETIF:
658 case BIOCGRTIMEOUT:
659 case BIOCGSTATS:
660 case BIOCVERSION:
661 case BIOCGRSIG:
662 case BIOCGHDRCMPLT:
663 case FIONREAD:
664 case BIOCLOCK:
665 case BIOCSRTIMEOUT:
666 case BIOCIMMEDIATE:
667 case TIOCGPGRP:
668 break;
669 default:
670 return (EPERM);
671 }
672 }
fef8985e 673 switch (ap->a_cmd) {
984263bc
MD
674 default:
675 error = EINVAL;
676 break;
677
678 /*
679 * Check for read packet available.
680 */
681 case FIONREAD:
682 {
683 int n;
684
0b31d406 685 crit_enter();
984263bc
MD
686 n = d->bd_slen;
687 if (d->bd_hbuf)
688 n += d->bd_hlen;
0b31d406 689 crit_exit();
984263bc 690
fef8985e 691 *(int *)ap->a_data = n;
984263bc
MD
692 break;
693 }
694
695 case SIOCGIFADDR:
696 {
697 struct ifnet *ifp;
698
67756095 699 if (d->bd_bif == NULL) {
984263bc 700 error = EINVAL;
67756095 701 } else {
984263bc 702 ifp = d->bd_bif->bif_ifp;
a3dd34d2 703 ifnet_serialize_all(ifp);
fef8985e
MD
704 error = ifp->if_ioctl(ifp, ap->a_cmd,
705 ap->a_data, ap->a_cred);
a3dd34d2 706 ifnet_deserialize_all(ifp);
984263bc
MD
707 }
708 break;
709 }
710
711 /*
712 * Get buffer len [for read()].
713 */
714 case BIOCGBLEN:
fef8985e 715 *(u_int *)ap->a_data = d->bd_bufsize;
984263bc
MD
716 break;
717
718 /*
719 * Set buffer length.
720 */
721 case BIOCSBLEN:
67756095 722 if (d->bd_bif != NULL) {
984263bc 723 error = EINVAL;
67756095 724 } else {
fef8985e 725 u_int size = *(u_int *)ap->a_data;
984263bc
MD
726
727 if (size > bpf_maxbufsize)
fef8985e 728 *(u_int *)ap->a_data = size = bpf_maxbufsize;
984263bc 729 else if (size < BPF_MINBUFSIZE)
fef8985e 730 *(u_int *)ap->a_data = size = BPF_MINBUFSIZE;
984263bc
MD
731 d->bd_bufsize = size;
732 }
984263bc
MD
733 break;
734
735 /*
736 * Set link layer read filter.
737 */
738 case BIOCSETF:
7cff0268
MS
739 case BIOCSETWF:
740 error = bpf_setf(d, (struct bpf_program *)ap->a_data,
741 ap->a_cmd);
984263bc
MD
742 break;
743
744 /*
745 * Flush read packet buffer.
746 */
747 case BIOCFLUSH:
0b31d406 748 crit_enter();
cefd3279 749 bpf_resetd(d);
0b31d406 750 crit_exit();
984263bc
MD
751 break;
752
753 /*
754 * Put interface into promiscuous mode.
755 */
756 case BIOCPROMISC:
f23061d4 757 if (d->bd_bif == NULL) {
984263bc
MD
758 /*
759 * No interface attached yet.
760 */
761 error = EINVAL;
762 break;
763 }
0b31d406 764 crit_enter();
984263bc
MD
765 if (d->bd_promisc == 0) {
766 error = ifpromisc(d->bd_bif->bif_ifp, 1);
767 if (error == 0)
768 d->bd_promisc = 1;
769 }
0b31d406 770 crit_exit();
984263bc
MD
771 break;
772
773 /*
774 * Get device parameters.
775 */
776 case BIOCGDLT:
f23061d4 777 if (d->bd_bif == NULL)
984263bc
MD
778 error = EINVAL;
779 else
fef8985e 780 *(u_int *)ap->a_data = d->bd_bif->bif_dlt;
984263bc
MD
781 break;
782
783 /*
1f8e62c9
JS
784 * Get a list of supported data link types.
785 */
786 case BIOCGDLTLIST:
67756095 787 if (d->bd_bif == NULL) {
1f8e62c9 788 error = EINVAL;
67756095
SZ
789 } else {
790 error = bpf_getdltlist(d,
791 (struct bpf_dltlist *)ap->a_data);
792 }
1f8e62c9
JS
793 break;
794
795 /*
796 * Set data link type.
797 */
798 case BIOCSDLT:
799 if (d->bd_bif == NULL)
800 error = EINVAL;
801 else
fef8985e 802 error = bpf_setdlt(d, *(u_int *)ap->a_data);
1f8e62c9
JS
803 break;
804
805 /*
984263bc
MD
806 * Get interface name.
807 */
808 case BIOCGETIF:
0c3c561c 809 if (d->bd_bif == NULL) {
984263bc 810 error = EINVAL;
0c3c561c 811 } else {
984263bc 812 struct ifnet *const ifp = d->bd_bif->bif_ifp;
fef8985e 813 struct ifreq *const ifr = (struct ifreq *)ap->a_data;
984263bc 814
3e4a09e7 815 strlcpy(ifr->ifr_name, ifp->if_xname,
0c3c561c 816 sizeof ifr->ifr_name);
984263bc
MD
817 }
818 break;
819
820 /*
821 * Set interface.
822 */
823 case BIOCSETIF:
fef8985e 824 error = bpf_setif(d, (struct ifreq *)ap->a_data);
984263bc
MD
825 break;
826
827 /*
828 * Set read timeout.
829 */
830 case BIOCSRTIMEOUT:
831 {
fef8985e 832 struct timeval *tv = (struct timeval *)ap->a_data;
984263bc
MD
833
834 /*
835 * Subtract 1 tick from tvtohz() since this isn't
836 * a one-shot timer.
837 */
838 if ((error = itimerfix(tv)) == 0)
a94976ad 839 d->bd_rtout = tvtohz_low(tv);
984263bc
MD
840 break;
841 }
842
843 /*
844 * Get read timeout.
845 */
846 case BIOCGRTIMEOUT:
847 {
fef8985e 848 struct timeval *tv = (struct timeval *)ap->a_data;
984263bc
MD
849
850 tv->tv_sec = d->bd_rtout / hz;
a591f597 851 tv->tv_usec = (d->bd_rtout % hz) * ustick;
984263bc
MD
852 break;
853 }
854
855 /*
856 * Get packet stats.
857 */
858 case BIOCGSTATS:
859 {
fef8985e 860 struct bpf_stat *bs = (struct bpf_stat *)ap->a_data;
984263bc
MD
861
862 bs->bs_recv = d->bd_rcount;
863 bs->bs_drop = d->bd_dcount;
864 break;
865 }
866
867 /*
868 * Set immediate mode.
869 */
870 case BIOCIMMEDIATE:
fef8985e 871 d->bd_immediate = *(u_int *)ap->a_data;
984263bc
MD
872 break;
873
874 case BIOCVERSION:
875 {
fef8985e 876 struct bpf_version *bv = (struct bpf_version *)ap->a_data;
984263bc
MD
877
878 bv->bv_major = BPF_MAJOR_VERSION;
879 bv->bv_minor = BPF_MINOR_VERSION;
880 break;
881 }
882
883 /*
884 * Get "header already complete" flag
885 */
886 case BIOCGHDRCMPLT:
fef8985e 887 *(u_int *)ap->a_data = d->bd_hdrcmplt;
984263bc
MD
888 break;
889
890 /*
891 * Set "header already complete" flag
892 */
893 case BIOCSHDRCMPLT:
fef8985e 894 d->bd_hdrcmplt = *(u_int *)ap->a_data ? 1 : 0;
984263bc
MD
895 break;
896
897 /*
898 * Get "see sent packets" flag
899 */
900 case BIOCGSEESENT:
fef8985e 901 *(u_int *)ap->a_data = d->bd_seesent;
984263bc
MD
902 break;
903
904 /*
905 * Set "see sent packets" flag
906 */
907 case BIOCSSEESENT:
fef8985e 908 d->bd_seesent = *(u_int *)ap->a_data;
984263bc
MD
909 break;
910
984263bc 911 case FIOASYNC: /* Send signal on receive packets */
fef8985e 912 d->bd_async = *(int *)ap->a_data;
984263bc
MD
913 break;
914
915 case FIOSETOWN:
fef8985e 916 error = fsetown(*(int *)ap->a_data, &d->bd_sigio);
984263bc
MD
917 break;
918
919 case FIOGETOWN:
fef8985e 920 *(int *)ap->a_data = fgetown(d->bd_sigio);
984263bc
MD
921 break;
922
923 /* This is deprecated, FIOSETOWN should be used instead. */
924 case TIOCSPGRP:
fef8985e 925 error = fsetown(-(*(int *)ap->a_data), &d->bd_sigio);
984263bc
MD
926 break;
927
928 /* This is deprecated, FIOGETOWN should be used instead. */
929 case TIOCGPGRP:
fef8985e 930 *(int *)ap->a_data = -fgetown(d->bd_sigio);
984263bc
MD
931 break;
932
933 case BIOCSRSIG: /* Set receive signal */
934 {
f23061d4 935 u_int sig;
984263bc 936
fef8985e 937 sig = *(u_int *)ap->a_data;
984263bc
MD
938
939 if (sig >= NSIG)
940 error = EINVAL;
941 else
942 d->bd_sig = sig;
943 break;
944 }
945 case BIOCGRSIG:
fef8985e 946 *(u_int *)ap->a_data = d->bd_sig;
984263bc 947 break;
7cff0268
MS
948 case BIOCLOCK:
949 d->bd_locked = 1;
950 break;
984263bc 951 }
5534e0c8 952 return(error);
984263bc
MD
953}
954
955/*
956 * Set d's packet filter program to fp. If this file already has a filter,
957 * free it and replace it. Returns EINVAL for bogus requests.
958 */
959static int
7cff0268 960bpf_setf(struct bpf_d *d, struct bpf_program *fp, u_long cmd)
984263bc
MD
961{
962 struct bpf_insn *fcode, *old;
7cff0268
MS
963 u_int wfilter, flen, size;
964
965 if (cmd == BIOCSETWF) {
966 old = d->bd_wfilter;
967 wfilter = 1;
968 } else {
969 wfilter = 0;
970 old = d->bd_rfilter;
971 }
f23061d4 972 if (fp->bf_insns == NULL) {
984263bc 973 if (fp->bf_len != 0)
5534e0c8 974 return(EINVAL);
0b31d406 975 crit_enter();
7cff0268
MS
976 if (wfilter)
977 d->bd_wfilter = NULL;
978 else
979 d->bd_rfilter = NULL;
cefd3279 980 bpf_resetd(d);
0b31d406 981 crit_exit();
67756095 982 if (old != NULL)
efda3bd0 983 kfree(old, M_BPF);
5534e0c8 984 return(0);
984263bc
MD
985 }
986 flen = fp->bf_len;
987 if (flen > BPF_MAXINSNS)
5534e0c8 988 return(EINVAL);
984263bc 989
0c3c561c 990 size = flen * sizeof *fp->bf_insns;
efda3bd0 991 fcode = (struct bpf_insn *)kmalloc(size, M_BPF, M_WAITOK);
f23061d4 992 if (copyin(fp->bf_insns, fcode, size) == 0 &&
984263bc 993 bpf_validate(fcode, (int)flen)) {
0b31d406 994 crit_enter();
7cff0268
MS
995 if (wfilter)
996 d->bd_wfilter = fcode;
997 else
998 d->bd_rfilter = fcode;
cefd3279 999 bpf_resetd(d);
0b31d406 1000 crit_exit();
67756095 1001 if (old != NULL)
efda3bd0 1002 kfree(old, M_BPF);
984263bc 1003
5534e0c8 1004 return(0);
984263bc 1005 }
efda3bd0 1006 kfree(fcode, M_BPF);
5534e0c8 1007 return(EINVAL);
984263bc
MD
1008}
1009
1010/*
1011 * Detach a file from its current interface (if attached at all) and attach
1012 * to the interface indicated by the name stored in ifr.
1013 * Return an errno or 0.
1014 */
1015static int
5534e0c8 1016bpf_setif(struct bpf_d *d, struct ifreq *ifr)
984263bc
MD
1017{
1018 struct bpf_if *bp;
0b31d406 1019 int error;
984263bc
MD
1020 struct ifnet *theywant;
1021
1022 theywant = ifunit(ifr->ifr_name);
f23061d4 1023 if (theywant == NULL)
5534e0c8 1024 return(ENXIO);
984263bc
MD
1025
1026 /*
1027 * Look through attached interfaces for the named one.
1028 */
67756095 1029 for (bp = bpf_iflist; bp != NULL; bp = bp->bif_next) {
984263bc
MD
1030 struct ifnet *ifp = bp->bif_ifp;
1031
f23061d4 1032 if (ifp == NULL || ifp != theywant)
984263bc 1033 continue;
1f8e62c9
JS
1034 /* skip additional entry */
1035 if (bp->bif_driverp != &ifp->if_bpf)
1036 continue;
984263bc
MD
1037 /*
1038 * We found the requested interface.
984263bc
MD
1039 * Allocate the packet buffers if we need to.
1040 * If we're already attached to requested interface,
1041 * just flush the buffer.
1042 */
5534e0c8 1043 if (d->bd_sbuf == NULL) {
984263bc
MD
1044 error = bpf_allocbufs(d);
1045 if (error != 0)
5534e0c8 1046 return(error);
984263bc 1047 }
0b31d406 1048 crit_enter();
984263bc 1049 if (bp != d->bd_bif) {
5534e0c8 1050 if (d->bd_bif != NULL) {
984263bc
MD
1051 /*
1052 * Detach if attached to something else.
1053 */
1054 bpf_detachd(d);
5534e0c8 1055 }
984263bc
MD
1056
1057 bpf_attachd(d, bp);
1058 }
cefd3279 1059 bpf_resetd(d);
0b31d406 1060 crit_exit();
5534e0c8 1061 return(0);
984263bc 1062 }
5534e0c8 1063
984263bc 1064 /* Not found. */
5534e0c8 1065 return(ENXIO);
984263bc
MD
1066}
1067
9fb2d31f 1068static struct filterops bpf_read_filtops =
4c91dbc9 1069 { FILTEROP_ISFD, NULL, bpf_filter_detach, bpf_filter_read };
9fb2d31f
SG
1070
1071static int
1072bpfkqfilter(struct dev_kqfilter_args *ap)
1073{
1074 cdev_t dev = ap->a_head.a_dev;
1075 struct knote *kn = ap->a_kn;
1076 struct klist *klist;
1077 struct bpf_d *d;
1078
1079 d = dev->si_drv1;
1080 if (d->bd_bif == NULL) {
1081 ap->a_result = 1;
1082 return (0);
1083 }
1084
1085 ap->a_result = 0;
1086 switch (kn->kn_filter) {
1087 case EVFILT_READ:
1088 kn->kn_fop = &bpf_read_filtops;
1089 kn->kn_hook = (caddr_t)d;
1090 break;
1091 default:
b287d649 1092 ap->a_result = EOPNOTSUPP;
9fb2d31f
SG
1093 return (0);
1094 }
1095
5b22f1a7
SG
1096 klist = &d->bd_kq.ki_note;
1097 knote_insert(klist, kn);
9fb2d31f
SG
1098
1099 return (0);
1100}
1101
1102static void
1103bpf_filter_detach(struct knote *kn)
1104{
1105 struct klist *klist;
1106 struct bpf_d *d;
1107
9fb2d31f 1108 d = (struct bpf_d *)kn->kn_hook;
5b22f1a7
SG
1109 klist = &d->bd_kq.ki_note;
1110 knote_remove(klist, kn);
9fb2d31f
SG
1111}
1112
1113static int
1114bpf_filter_read(struct knote *kn, long hint)
1115{
1116 struct bpf_d *d;
1117 int ready = 0;
1118
1119 crit_enter();
1120 d = (struct bpf_d *)kn->kn_hook;
1121 if (d->bd_hlen != 0 ||
1122 ((d->bd_immediate || d->bd_state == BPF_TIMED_OUT) &&
1123 d->bd_slen != 0)) {
1124 ready = 1;
1125 } else {
1126 /* Start the read timeout if necessary. */
1127 if (d->bd_rtout > 0 && d->bd_state == BPF_IDLE) {
1128 callout_reset(&d->bd_callout, d->bd_rtout,
1129 bpf_timed_out, d);
1130 d->bd_state = BPF_WAITING;
1131 }
1132 }
1133 crit_exit();
1134
1135 return (ready);
1136}
1137
1138
984263bc 1139/*
1f8e62c9
JS
1140 * Process the packet pkt of length pktlen. The packet is parsed
1141 * by each listener's filter, and if accepted, stashed into the
1142 * corresponding buffer.
984263bc
MD
1143 */
1144void
1f8e62c9 1145bpf_tap(struct bpf_if *bp, u_char *pkt, u_int pktlen)
984263bc 1146{
82ed7fc2 1147 struct bpf_d *d;
aedcf384
SZ
1148 struct timeval tv;
1149 int gottime = 0;
82ed7fc2 1150 u_int slen;
f23061d4 1151
64202d9a
SZ
1152 get_mplock();
1153
1154 /* Re-check */
1155 if (bp == NULL) {
1156 rel_mplock();
1157 return;
1158 }
1159
984263bc
MD
1160 /*
1161 * Note that the ipl does not have to be raised at this point.
1162 * The only problem that could arise here is that if two different
1163 * interfaces shared any data. This is not the case.
1164 */
f23061d4 1165 SLIST_FOREACH(d, &bp->bif_dlist, bd_next) {
984263bc 1166 ++d->bd_rcount;
7cff0268 1167 slen = bpf_filter(d->bd_rfilter, pkt, pktlen, pktlen);
aedcf384
SZ
1168 if (slen != 0) {
1169 if (!gottime) {
1170 microtime(&tv);
1171 gottime = 1;
1172 }
1173 catchpacket(d, pkt, pktlen, slen, ovbcopy, &tv);
1174 }
984263bc 1175 }
64202d9a
SZ
1176
1177 rel_mplock();
984263bc
MD
1178}
1179
1180/*
1181 * Copy data from an mbuf chain into a buffer. This code is derived
1182 * from m_copydata in sys/uipc_mbuf.c.
1183 */
1184static void
5534e0c8 1185bpf_mcopy(const void *src_arg, void *dst_arg, size_t len)
984263bc 1186{
82ed7fc2
RG
1187 const struct mbuf *m;
1188 u_int count;
984263bc
MD
1189 u_char *dst;
1190
1191 m = src_arg;
1192 dst = dst_arg;
1193 while (len > 0) {
5534e0c8 1194 if (m == NULL)
984263bc
MD
1195 panic("bpf_mcopy");
1196 count = min(m->m_len, len);
1197 bcopy(mtod(m, void *), dst, count);
1198 m = m->m_next;
1199 dst += count;
1200 len -= count;
1201 }
1202}
1203
1204/*
1f8e62c9
JS
1205 * Process the packet in the mbuf chain m. The packet is parsed by each
1206 * listener's filter, and if accepted, stashed into the corresponding
1207 * buffer.
984263bc
MD
1208 */
1209void
1f8e62c9 1210bpf_mtap(struct bpf_if *bp, struct mbuf *m)
984263bc 1211{
984263bc
MD
1212 struct bpf_d *d;
1213 u_int pktlen, slen;
aedcf384
SZ
1214 struct timeval tv;
1215 int gottime = 0;
984263bc 1216
64202d9a
SZ
1217 get_mplock();
1218
1219 /* Re-check */
1220 if (bp == NULL) {
1221 rel_mplock();
1222 return;
1223 }
1224
1f8e62c9 1225 /* Don't compute pktlen, if no descriptor is attached. */
64202d9a
SZ
1226 if (SLIST_EMPTY(&bp->bif_dlist)) {
1227 rel_mplock();
1f8e62c9 1228 return;
64202d9a 1229 }
1f8e62c9 1230
cefd3279 1231 pktlen = m_lengthm(m, NULL);
984263bc 1232
f23061d4 1233 SLIST_FOREACH(d, &bp->bif_dlist, bd_next) {
984263bc
MD
1234 if (!d->bd_seesent && (m->m_pkthdr.rcvif == NULL))
1235 continue;
1236 ++d->bd_rcount;
7cff0268 1237 slen = bpf_filter(d->bd_rfilter, (u_char *)m, pktlen, 0);
aedcf384
SZ
1238 if (slen != 0) {
1239 if (!gottime) {
1240 microtime(&tv);
1241 gottime = 1;
1242 }
1243 catchpacket(d, (u_char *)m, pktlen, slen, bpf_mcopy,
1244 &tv);
1245 }
984263bc 1246 }
64202d9a
SZ
1247
1248 rel_mplock();
984263bc
MD
1249}
1250
70224baa
JL
1251/*
1252 * Incoming linkage from device drivers, where we have a mbuf chain
1253 * but need to prepend some arbitrary header from a linear buffer.
1254 *
1255 * Con up a minimal dummy header to pacify bpf. Allocate (only) a
1256 * struct m_hdr on the stack. This is safe as bpf only reads from the
1257 * fields in this header that we initialize, and will not try to free
1258 * it or keep a pointer to it.
1259 */
1260void
1261bpf_mtap_hdr(struct bpf_if *arg, caddr_t data, u_int dlen, struct mbuf *m, u_int direction)
1262{
1263 struct m_hdr mh;
1264
1265 mh.mh_flags = 0;
1266 mh.mh_next = m;
1267 mh.mh_len = dlen;
1268 mh.mh_data = data;
1269
1270 return bpf_mtap(arg, (struct mbuf *) &mh);
1271}
1272
81806f7a
HP
1273void
1274bpf_mtap_family(struct bpf_if *bp, struct mbuf *m, sa_family_t family)
1275{
1276 u_int family4;
1277
1278 KKASSERT(family != AF_UNSPEC);
cefd3279 1279
81806f7a
HP
1280 family4 = (u_int)family;
1281 bpf_ptap(bp, m, &family4, sizeof(family4));
1282}
1283
984263bc 1284/*
1f8e62c9
JS
1285 * Process the packet in the mbuf chain m with the header in m prepended.
1286 * The packet is parsed by each listener's filter, and if accepted,
1287 * stashed into the corresponding buffer.
1288 */
1289void
1290bpf_ptap(struct bpf_if *bp, struct mbuf *m, const void *data, u_int dlen)
1291{
1292 struct mbuf mb;
1293
1294 /*
1295 * Craft on-stack mbuf suitable for passing to bpf_mtap.
1296 * Note that we cut corners here; we only setup what's
1297 * absolutely needed--this mbuf should never go anywhere else.
1298 */
1299 mb.m_next = m;
1300 mb.m_data = __DECONST(void *, data); /* LINTED */
1301 mb.m_len = dlen;
2265ca08 1302 mb.m_pkthdr.rcvif = m->m_pkthdr.rcvif;
1f8e62c9 1303
c972d9bf 1304 bpf_mtap(bp, &mb);
1f8e62c9
JS
1305}
1306
1307/*
984263bc
MD
1308 * Move the packet data from interface memory (pkt) into the
1309 * store buffer. Return 1 if it's time to wakeup a listener (buffer full),
1310 * otherwise 0. "copy" is the routine called to do the actual data
1311 * transfer. bcopy is passed in to copy contiguous chunks, while
1312 * bpf_mcopy is passed in to copy mbuf chains. In the latter case,
1313 * pkt is really an mbuf.
1314 */
1315static void
5534e0c8 1316catchpacket(struct bpf_d *d, u_char *pkt, u_int pktlen, u_int snaplen,
aedcf384
SZ
1317 void (*cpfn)(const void *, void *, size_t),
1318 const struct timeval *tv)
984263bc 1319{
82ed7fc2
RG
1320 struct bpf_hdr *hp;
1321 int totlen, curlen;
1322 int hdrlen = d->bd_bif->bif_hdrlen;
44aa8f02 1323 int wakeup = 0;
984263bc
MD
1324 /*
1325 * Figure out how many bytes to move. If the packet is
1326 * greater or equal to the snapshot length, transfer that
1327 * much. Otherwise, transfer the whole packet (unless
1328 * we hit the buffer size limit).
1329 */
1330 totlen = hdrlen + min(snaplen, pktlen);
1331 if (totlen > d->bd_bufsize)
1332 totlen = d->bd_bufsize;
1333
1334 /*
1335 * Round up the end of the previous packet to the next longword.
1336 */
1337 curlen = BPF_WORDALIGN(d->bd_slen);
1338 if (curlen + totlen > d->bd_bufsize) {
1339 /*
1340 * This packet will overflow the storage buffer.
1341 * Rotate the buffers if we can, then wakeup any
1342 * pending reads.
1343 */
f23061d4 1344 if (d->bd_fbuf == NULL) {
984263bc
MD
1345 /*
1346 * We haven't completed the previous read yet,
1347 * so drop the packet.
1348 */
1349 ++d->bd_dcount;
1350 return;
1351 }
1352 ROTATE_BUFFERS(d);
44aa8f02 1353 wakeup = 1;
984263bc 1354 curlen = 0;
67756095 1355 } else if (d->bd_immediate || d->bd_state == BPF_TIMED_OUT) {
984263bc
MD
1356 /*
1357 * Immediate mode is set, or the read timeout has
1358 * already expired during a select call. A packet
1359 * arrived, so the reader should be woken up.
1360 */
44aa8f02 1361 wakeup = 1;
67756095 1362 }
984263bc
MD
1363
1364 /*
1365 * Append the bpf header.
1366 */
1367 hp = (struct bpf_hdr *)(d->bd_sbuf + curlen);
aedcf384 1368 hp->bh_tstamp = *tv;
984263bc
MD
1369 hp->bh_datalen = pktlen;
1370 hp->bh_hdrlen = hdrlen;
1371 /*
1372 * Copy the packet data into the store buffer and update its length.
1373 */
1374 (*cpfn)(pkt, (u_char *)hp + hdrlen, (hp->bh_caplen = totlen - hdrlen));
1375 d->bd_slen = curlen + totlen;
44aa8f02
SG
1376
1377 if (wakeup)
1378 bpf_wakeup(d);
984263bc
MD
1379}
1380
1381/*
1382 * Initialize all nonzero fields of a descriptor.
1383 */
1384static int
5534e0c8 1385bpf_allocbufs(struct bpf_d *d)
984263bc 1386{
efda3bd0 1387 d->bd_fbuf = kmalloc(d->bd_bufsize, M_BPF, M_WAITOK);
efda3bd0 1388 d->bd_sbuf = kmalloc(d->bd_bufsize, M_BPF, M_WAITOK);
984263bc
MD
1389 d->bd_slen = 0;
1390 d->bd_hlen = 0;
5534e0c8 1391 return(0);
984263bc
MD
1392}
1393
1394/*
67756095 1395 * Free buffers and packet filter program currently in use by a descriptor.
984263bc
MD
1396 * Called on close.
1397 */
1398static void
5534e0c8 1399bpf_freed(struct bpf_d *d)
984263bc
MD
1400{
1401 /*
1402 * We don't need to lock out interrupts since this descriptor has
1403 * been detached from its interface and it yet hasn't been marked
1404 * free.
1405 */
f23061d4 1406 if (d->bd_sbuf != NULL) {
efda3bd0 1407 kfree(d->bd_sbuf, M_BPF);
f23061d4 1408 if (d->bd_hbuf != NULL)
efda3bd0 1409 kfree(d->bd_hbuf, M_BPF);
f23061d4 1410 if (d->bd_fbuf != NULL)
efda3bd0 1411 kfree(d->bd_fbuf, M_BPF);
984263bc 1412 }
7cff0268
MS
1413 if (d->bd_rfilter)
1414 kfree(d->bd_rfilter, M_BPF);
1415 if (d->bd_wfilter)
1416 kfree(d->bd_wfilter, M_BPF);
984263bc
MD
1417}
1418
1419/*
1420 * Attach an interface to bpf. ifp is a pointer to the structure
1421 * defining the interface to be attached, dlt is the link layer type,
1422 * and hdrlen is the fixed size of the link header (variable length
1f8e62c9 1423 * headers are not yet supported).
984263bc
MD
1424 */
1425void
5534e0c8 1426bpfattach(struct ifnet *ifp, u_int dlt, u_int hdrlen)
984263bc 1427{
1f8e62c9
JS
1428 bpfattach_dlt(ifp, dlt, hdrlen, &ifp->if_bpf);
1429}
1430
1431void
1432bpfattach_dlt(struct ifnet *ifp, u_int dlt, u_int hdrlen, struct bpf_if **driverp)
1433{
984263bc 1434 struct bpf_if *bp;
02334e23 1435
efda3bd0 1436 bp = kmalloc(sizeof *bp, M_BPF, M_WAITOK | M_ZERO);
984263bc 1437
1f8e62c9 1438 SLIST_INIT(&bp->bif_dlist);
984263bc
MD
1439 bp->bif_ifp = ifp;
1440 bp->bif_dlt = dlt;
1f8e62c9
JS
1441 bp->bif_driverp = driverp;
1442 *bp->bif_driverp = NULL;
984263bc
MD
1443
1444 bp->bif_next = bpf_iflist;
1445 bpf_iflist = bp;
1446
984263bc
MD
1447 /*
1448 * Compute the length of the bpf header. This is not necessarily
1449 * equal to SIZEOF_BPF_HDR because we want to insert spacing such
1450 * that the network layer header begins on a longword boundary (for
1451 * performance reasons and to alleviate alignment restrictions).
1452 */
1453 bp->bif_hdrlen = BPF_WORDALIGN(hdrlen + SIZEOF_BPF_HDR) - hdrlen;
1454
1455 if (bootverbose)
1f8e62c9 1456 if_printf(ifp, "bpf attached\n");
984263bc
MD
1457}
1458
1459/*
1460 * Detach bpf from an interface. This involves detaching each descriptor
1461 * associated with the interface, and leaving bd_bif NULL. Notify each
1462 * descriptor as it's detached so that any sleepers wake up and get
1463 * ENXIO.
1464 */
1465void
5534e0c8 1466bpfdetach(struct ifnet *ifp)
984263bc 1467{
5534e0c8
JS
1468 struct bpf_if *bp, *bp_prev;
1469 struct bpf_d *d;
984263bc 1470
0b31d406 1471 crit_enter();
984263bc
MD
1472
1473 /* Locate BPF interface information */
1474 bp_prev = NULL;
1475 for (bp = bpf_iflist; bp != NULL; bp = bp->bif_next) {
1476 if (ifp == bp->bif_ifp)
1477 break;
1478 bp_prev = bp;
1479 }
1480
1481 /* Interface wasn't attached */
1482 if (bp->bif_ifp == NULL) {
0b31d406 1483 crit_exit();
4b1cf444 1484 kprintf("bpfdetach: %s was not attached\n", ifp->if_xname);
984263bc
MD
1485 return;
1486 }
1487
f23061d4 1488 while ((d = SLIST_FIRST(&bp->bif_dlist)) != NULL) {
984263bc
MD
1489 bpf_detachd(d);
1490 bpf_wakeup(d);
1491 }
1492
5534e0c8 1493 if (bp_prev != NULL)
984263bc 1494 bp_prev->bif_next = bp->bif_next;
5534e0c8 1495 else
984263bc 1496 bpf_iflist = bp->bif_next;
984263bc 1497
efda3bd0 1498 kfree(bp, M_BPF);
984263bc 1499
0b31d406 1500 crit_exit();
984263bc
MD
1501}
1502
1f8e62c9
JS
1503/*
1504 * Get a list of available data link type of the interface.
1505 */
1506static int
1507bpf_getdltlist(struct bpf_d *d, struct bpf_dltlist *bfl)
1508{
1509 int n, error;
1510 struct ifnet *ifp;
1511 struct bpf_if *bp;
1512
1513 ifp = d->bd_bif->bif_ifp;
1514 n = 0;
1515 error = 0;
67756095 1516 for (bp = bpf_iflist; bp != NULL; bp = bp->bif_next) {
1f8e62c9
JS
1517 if (bp->bif_ifp != ifp)
1518 continue;
1519 if (bfl->bfl_list != NULL) {
1520 if (n >= bfl->bfl_len) {
1521 return (ENOMEM);
1522 }
1523 error = copyout(&bp->bif_dlt,
1524 bfl->bfl_list + n, sizeof(u_int));
1525 }
1526 n++;
1527 }
1528 bfl->bfl_len = n;
1529 return(error);
1530}
1531
1532/*
1533 * Set the data link type of a BPF instance.
1534 */
1535static int
1536bpf_setdlt(struct bpf_d *d, u_int dlt)
1537{
1538 int error, opromisc;
1539 struct ifnet *ifp;
1540 struct bpf_if *bp;
1f8e62c9
JS
1541
1542 if (d->bd_bif->bif_dlt == dlt)
1543 return (0);
1544 ifp = d->bd_bif->bif_ifp;
67756095 1545 for (bp = bpf_iflist; bp != NULL; bp = bp->bif_next) {
1f8e62c9
JS
1546 if (bp->bif_ifp == ifp && bp->bif_dlt == dlt)
1547 break;
1548 }
1549 if (bp != NULL) {
1550 opromisc = d->bd_promisc;
0b31d406 1551 crit_enter();
1f8e62c9
JS
1552 bpf_detachd(d);
1553 bpf_attachd(d, bp);
cefd3279 1554 bpf_resetd(d);
1f8e62c9
JS
1555 if (opromisc) {
1556 error = ifpromisc(bp->bif_ifp, 1);
67756095 1557 if (error) {
1f8e62c9
JS
1558 if_printf(bp->bif_ifp,
1559 "bpf_setdlt: ifpromisc failed (%d)\n",
1560 error);
67756095 1561 } else {
1f8e62c9 1562 d->bd_promisc = 1;
67756095 1563 }
1f8e62c9 1564 }
0b31d406 1565 crit_exit();
1f8e62c9
JS
1566 }
1567 return(bp == NULL ? EINVAL : 0);
1568}
1569
984263bc 1570static void
e4c9c0c8 1571bpf_drvinit(void *unused)
984263bc 1572{
8be7edad
MD
1573 int i;
1574
b96f3782
AH
1575 make_autoclone_dev(&bpf_ops, &DEVFS_CLONE_BITMAP(bpf),
1576 bpfclone, 0, 0, 0600, "bpf");
8be7edad
MD
1577 for (i = 0; i < BPF_PREALLOCATED_UNITS; i++) {
1578 make_dev(&bpf_ops, i, 0, 0, 0600, "bpf%d", i);
1579 devfs_clone_bitmap_set(&DEVFS_CLONE_BITMAP(bpf), i);
1580 }
cd29885a
MD
1581}
1582
1583static void
1584bpf_drvuninit(void *unused)
1585{
8be7edad 1586 devfs_clone_handler_del("bpf");
cd29885a
MD
1587 dev_ops_remove_all(&bpf_ops);
1588 devfs_clone_bitmap_uninit(&DEVFS_CLONE_BITMAP(bpf));
984263bc
MD
1589}
1590
1591SYSINIT(bpfdev,SI_SUB_DRIVERS,SI_ORDER_MIDDLE+CDEV_MAJOR,bpf_drvinit,NULL)
cd29885a 1592SYSUNINIT(bpfdev, SI_SUB_DRIVERS,SI_ORDER_MIDDLE+CDEV_MAJOR,bpf_drvuninit, NULL);
984263bc
MD
1593
1594#else /* !BPF */
1595/*
1596 * NOP stubs to allow bpf-using drivers to load and function.
1597 *
1598 * A 'better' implementation would allow the core bpf functionality
1599 * to be loaded at runtime.
1600 */
1601
1602void
1f8e62c9
JS
1603bpf_tap(struct bpf_if *bp, u_char *pkt, u_int pktlen)
1604{
1605}
1606
1607void
1608bpf_mtap(struct bpf_if *bp, struct mbuf *m)
984263bc
MD
1609{
1610}
1611
1612void
f1555601 1613bpf_ptap(struct bpf_if *bp, struct mbuf *m, const void *data, u_int dlen)
984263bc
MD
1614{
1615}
1616
1617void
5534e0c8 1618bpfattach(struct ifnet *ifp, u_int dlt, u_int hdrlen)
984263bc
MD
1619{
1620}
1621
1622void
1f8e62c9
JS
1623bpfattach_dlt(struct ifnet *ifp, u_int dlt, u_int hdrlen, struct bpf_if **driverp)
1624{
1625}
1626
1627void
5534e0c8 1628bpfdetach(struct ifnet *ifp)
984263bc
MD
1629{
1630}
1631
1632u_int
5534e0c8 1633bpf_filter(const struct bpf_insn *pc, u_char *p, u_int wirelen, u_int buflen)
984263bc
MD
1634{
1635 return -1; /* "no filter" behaviour */
1636}
1637
1638#endif /* !BPF */