DEVFS - remove dev_ops_add(), dev_ops_get(), and get_dev()
[dragonfly.git] / sys / net / bpf.c
CommitLineData
984263bc
MD
1/*
2 * Copyright (c) 1990, 1991, 1993
3 * The Regents of the University of California. All rights reserved.
4 *
5 * This code is derived from the Stanford/CMU enet packet filter,
6 * (net/enet.c) distributed as part of 4.3BSD, and code contributed
7 * to Berkeley by Steven McCanne and Van Jacobson both of Lawrence
8 * Berkeley Laboratory.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the University of
21 * California, Berkeley and its contributors.
22 * 4. Neither the name of the University nor the names of its contributors
23 * may be used to endorse or promote products derived from this software
24 * without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * SUCH DAMAGE.
37 *
38 * @(#)bpf.c 8.2 (Berkeley) 3/28/94
39 *
40 * $FreeBSD: src/sys/net/bpf.c,v 1.59.2.12 2002/04/14 21:41:48 luigi Exp $
92db3805 41 * $DragonFly: src/sys/net/bpf.c,v 1.50 2008/09/23 11:28:49 sephe Exp $
984263bc
MD
42 */
43
1f2de5d4 44#include "use_bpf.h"
984263bc 45
984263bc
MD
46#include <sys/param.h>
47#include <sys/systm.h>
48#include <sys/conf.h>
fef8985e 49#include <sys/device.h>
984263bc
MD
50#include <sys/malloc.h>
51#include <sys/mbuf.h>
52#include <sys/time.h>
53#include <sys/proc.h>
54#include <sys/signalvar.h>
55#include <sys/filio.h>
56#include <sys/sockio.h>
57#include <sys/ttycom.h>
58#include <sys/filedesc.h>
59
984263bc
MD
60#include <sys/poll.h>
61
62#include <sys/socket.h>
63#include <sys/vnode.h>
64
0b31d406
SW
65#include <sys/thread2.h>
66
984263bc
MD
67#include <net/if.h>
68#include <net/bpf.h>
69#include <net/bpfdesc.h>
e1c548c2 70#include <net/netmsg2.h>
984263bc
MD
71
72#include <netinet/in.h>
73#include <netinet/if_ether.h>
74#include <sys/kernel.h>
75#include <sys/sysctl.h>
76
cd29885a
MD
77#include <vfs/devfs/devfs.h>
78
e1c548c2
SZ
79struct netmsg_bpf_output {
80 struct netmsg nm_netmsg;
81 struct mbuf *nm_mbuf;
82 struct ifnet *nm_ifp;
83 struct sockaddr *nm_dst;
84};
85
984263bc 86MALLOC_DEFINE(M_BPF, "BPF", "BPF data");
cd29885a 87DEVFS_DECLARE_CLONE_BITMAP(bpf);
984263bc
MD
88
89#if NBPF > 0
90
91/*
984263bc
MD
92 * The default read buffer size is patchable.
93 */
5534e0c8
JS
94static int bpf_bufsize = BPF_DEFAULTBUFSIZE;
95SYSCTL_INT(_debug, OID_AUTO, bpf_bufsize, CTLFLAG_RW,
96 &bpf_bufsize, 0, "");
faff5ee2 97int bpf_maxbufsize = BPF_MAXBUFSIZE;
5534e0c8
JS
98SYSCTL_INT(_debug, OID_AUTO, bpf_maxbufsize, CTLFLAG_RW,
99 &bpf_maxbufsize, 0, "");
984263bc
MD
100
101/*
102 * bpf_iflist is the list of interfaces; each corresponds to an ifnet
103 */
104static struct bpf_if *bpf_iflist;
105
5534e0c8
JS
106static int bpf_allocbufs(struct bpf_d *);
107static void bpf_attachd(struct bpf_d *d, struct bpf_if *bp);
108static void bpf_detachd(struct bpf_d *d);
cefd3279 109static void bpf_resetd(struct bpf_d *);
5534e0c8
JS
110static void bpf_freed(struct bpf_d *);
111static void bpf_mcopy(const void *, void *, size_t);
112static int bpf_movein(struct uio *, int, struct mbuf **,
7cff0268 113 struct sockaddr *, int *, struct bpf_insn *);
5534e0c8
JS
114static int bpf_setif(struct bpf_d *, struct ifreq *);
115static void bpf_timed_out(void *);
116static void bpf_wakeup(struct bpf_d *);
117static void catchpacket(struct bpf_d *, u_char *, u_int, u_int,
aedcf384
SZ
118 void (*)(const void *, void *, size_t),
119 const struct timeval *);
7cff0268 120static int bpf_setf(struct bpf_d *, struct bpf_program *, u_long cmd);
1f8e62c9
JS
121static int bpf_getdltlist(struct bpf_d *, struct bpf_dltlist *);
122static int bpf_setdlt(struct bpf_d *, u_int);
5534e0c8
JS
123static void bpf_drvinit(void *unused);
124
125static d_open_t bpfopen;
cd29885a 126static d_clone_t bpfclone;
5534e0c8
JS
127static d_close_t bpfclose;
128static d_read_t bpfread;
129static d_write_t bpfwrite;
130static d_ioctl_t bpfioctl;
131static d_poll_t bpfpoll;
984263bc
MD
132
133#define CDEV_MAJOR 23
fef8985e
MD
134static struct dev_ops bpf_ops = {
135 { "bpf", CDEV_MAJOR, 0 },
136 .d_open = bpfopen,
137 .d_close = bpfclose,
138 .d_read = bpfread,
139 .d_write = bpfwrite,
140 .d_ioctl = bpfioctl,
141 .d_poll = bpfpoll,
984263bc
MD
142};
143
144
145static int
5534e0c8 146bpf_movein(struct uio *uio, int linktype, struct mbuf **mp,
7cff0268 147 struct sockaddr *sockp, int *datlen, struct bpf_insn *wfilter)
984263bc
MD
148{
149 struct mbuf *m;
150 int error;
151 int len;
152 int hlen;
7cff0268 153 int slen;
984263bc 154
77a30c79
SZ
155 *datlen = 0;
156 *mp = NULL;
157
984263bc
MD
158 /*
159 * Build a sockaddr based on the data link layer type.
160 * We do this at this level because the ethernet header
161 * is copied directly into the data field of the sockaddr.
162 * In the case of SLIP, there is no header and the packet
163 * is forwarded as is.
164 * Also, we are careful to leave room at the front of the mbuf
165 * for the link level header.
166 */
167 switch (linktype) {
984263bc
MD
168 case DLT_SLIP:
169 sockp->sa_family = AF_INET;
170 hlen = 0;
171 break;
172
173 case DLT_EN10MB:
174 sockp->sa_family = AF_UNSPEC;
175 /* XXX Would MAXLINKHDR be better? */
176 hlen = sizeof(struct ether_header);
177 break;
178
984263bc
MD
179 case DLT_RAW:
180 case DLT_NULL:
181 sockp->sa_family = AF_UNSPEC;
182 hlen = 0;
183 break;
184
984263bc
MD
185 case DLT_ATM_RFC1483:
186 /*
187 * en atm driver requires 4-byte atm pseudo header.
188 * though it isn't standard, vpi:vci needs to be
189 * specified anyway.
190 */
191 sockp->sa_family = AF_UNSPEC;
f23061d4 192 hlen = 12; /* XXX 4(ATM_PH) + 3(LLC) + 5(SNAP) */
984263bc 193 break;
5534e0c8 194
984263bc
MD
195 case DLT_PPP:
196 sockp->sa_family = AF_UNSPEC;
197 hlen = 4; /* This should match PPP_HDRLEN */
198 break;
199
200 default:
5534e0c8 201 return(EIO);
984263bc
MD
202 }
203
204 len = uio->uio_resid;
205 *datlen = len - hlen;
206 if ((unsigned)len > MCLBYTES)
5534e0c8 207 return(EIO);
984263bc 208
70978433 209 m = m_getl(len, MB_WAIT, MT_DATA, M_PKTHDR, NULL);
f23061d4 210 if (m == NULL)
5534e0c8 211 return(ENOBUFS);
984263bc
MD
212 m->m_pkthdr.len = m->m_len = len;
213 m->m_pkthdr.rcvif = NULL;
214 *mp = m;
7cff0268
MS
215
216 if (m->m_len < hlen) {
217 error = EPERM;
218 goto bad;
219 }
220
221 error = uiomove(mtod(m, u_char *), len, uio);
222 if (error)
223 goto bad;
224
225 slen = bpf_filter(wfilter, mtod(m, u_char *), len, len);
226 if (slen == 0) {
227 error = EPERM;
228 goto bad;
229 }
230
984263bc 231 /*
7cff0268 232 * Make room for link header, and copy it to sockaddr.
984263bc
MD
233 */
234 if (hlen != 0) {
7cff0268 235 bcopy(m->m_data, sockp->sa_data, hlen);
984263bc
MD
236 m->m_pkthdr.len -= hlen;
237 m->m_len -= hlen;
984263bc 238 m->m_data += hlen; /* XXX */
984263bc 239 }
7cff0268 240 return (0);
5534e0c8 241bad:
984263bc 242 m_freem(m);
5534e0c8 243 return(error);
984263bc
MD
244}
245
246/*
247 * Attach file to the bpf interface, i.e. make d listen on bp.
248 * Must be called at splimp.
249 */
250static void
5534e0c8 251bpf_attachd(struct bpf_d *d, struct bpf_if *bp)
984263bc
MD
252{
253 /*
254 * Point d at bp, and add d to the interface's list of listeners.
255 * Finally, point the driver's bpf cookie at the interface so
256 * it will divert packets to bpf.
257 */
258 d->bd_bif = bp;
f23061d4 259 SLIST_INSERT_HEAD(&bp->bif_dlist, d, bd_next);
1f8e62c9 260 *bp->bif_driverp = bp;
984263bc
MD
261}
262
263/*
264 * Detach a file from its interface.
265 */
266static void
5534e0c8 267bpf_detachd(struct bpf_d *d)
984263bc 268{
1f8e62c9 269 int error;
984263bc 270 struct bpf_if *bp;
1f8e62c9 271 struct ifnet *ifp;
984263bc
MD
272
273 bp = d->bd_bif;
1f8e62c9
JS
274 ifp = bp->bif_ifp;
275
276 /* Remove d from the interface's descriptor list. */
277 SLIST_REMOVE(&bp->bif_dlist, d, bpf_d, bd_next);
278
279 if (SLIST_EMPTY(&bp->bif_dlist)) {
280 /*
281 * Let the driver know that there are no more listeners.
282 */
283 *bp->bif_driverp = NULL;
284 }
285 d->bd_bif = NULL;
984263bc
MD
286 /*
287 * Check if this descriptor had requested promiscuous mode.
288 * If so, turn it off.
289 */
290 if (d->bd_promisc) {
291 d->bd_promisc = 0;
1f8e62c9
JS
292 error = ifpromisc(ifp, 0);
293 if (error != 0 && error != ENXIO) {
984263bc 294 /*
1f8e62c9 295 * ENXIO can happen if a pccard is unplugged,
984263bc
MD
296 * Something is really wrong if we were able to put
297 * the driver into promiscuous mode, but can't
298 * take it out.
299 */
1f8e62c9
JS
300 if_printf(ifp, "bpf_detach: ifpromisc failed(%d)\n",
301 error);
5534e0c8 302 }
984263bc 303 }
984263bc
MD
304}
305
306/*
307 * Open ethernet device. Returns ENXIO for illegal minor device number,
308 * EBUSY if file is open by another process.
309 */
310/* ARGSUSED */
5534e0c8 311static int
fef8985e 312bpfopen(struct dev_open_args *ap)
984263bc 313{
b13267a5 314 cdev_t dev = ap->a_head.a_dev;
41c20dac 315 struct bpf_d *d;
41c20dac 316
fef8985e 317 if (ap->a_cred->cr_prison)
5534e0c8 318 return(EPERM);
984263bc
MD
319
320 d = dev->si_drv1;
321 /*
f23061d4 322 * Each minor can be opened by only one process. If the requested
984263bc
MD
323 * minor is in use, return EBUSY.
324 */
5534e0c8
JS
325 if (d != NULL)
326 return(EBUSY);
cd29885a 327
0c3c561c 328 MALLOC(d, struct bpf_d *, sizeof *d, M_BPF, M_WAITOK | M_ZERO);
984263bc
MD
329 dev->si_drv1 = d;
330 d->bd_bufsize = bpf_bufsize;
331 d->bd_sig = SIGIO;
332 d->bd_seesent = 1;
333 callout_init(&d->bd_callout);
5534e0c8 334 return(0);
984263bc
MD
335}
336
cd29885a
MD
337static int
338bpfclone(struct dev_clone_args *ap)
339{
340 int unit;
341
342 unit = devfs_clone_bitmap_get(&DEVFS_CLONE_BITMAP(bpf), 0);
343 ap->a_dev = make_only_dev(&bpf_ops, unit, 0, 0, 0600, "bpf%d", unit);
344
345 return 0;
346}
347
984263bc
MD
348/*
349 * Close the descriptor by detaching it from its interface,
350 * deallocating its buffers, and marking it free.
351 */
352/* ARGSUSED */
5534e0c8 353static int
fef8985e 354bpfclose(struct dev_close_args *ap)
984263bc 355{
b13267a5 356 cdev_t dev = ap->a_head.a_dev;
41c20dac 357 struct bpf_d *d = dev->si_drv1;
984263bc
MD
358
359 funsetown(d->bd_sigio);
0b31d406 360 crit_enter();
984263bc
MD
361 if (d->bd_state == BPF_WAITING)
362 callout_stop(&d->bd_callout);
363 d->bd_state = BPF_IDLE;
5534e0c8 364 if (d->bd_bif != NULL)
984263bc 365 bpf_detachd(d);
0b31d406 366 crit_exit();
984263bc 367 bpf_freed(d);
f23061d4 368 dev->si_drv1 = NULL;
cd29885a 369 devfs_clone_bitmap_put(&DEVFS_CLONE_BITMAP(bpf), dev->si_uminor);
efda3bd0 370 kfree(d, M_BPF);
5534e0c8 371 return(0);
984263bc 372}
984263bc
MD
373
374/*
375 * Rotate the packet buffers in descriptor d. Move the store buffer
376 * into the hold slot, and the free buffer into the store slot.
377 * Zero the length of the new store buffer.
378 */
379#define ROTATE_BUFFERS(d) \
380 (d)->bd_hbuf = (d)->bd_sbuf; \
381 (d)->bd_hlen = (d)->bd_slen; \
382 (d)->bd_sbuf = (d)->bd_fbuf; \
383 (d)->bd_slen = 0; \
f23061d4 384 (d)->bd_fbuf = NULL;
984263bc
MD
385/*
386 * bpfread - read next chunk of packets from buffers
387 */
5534e0c8 388static int
fef8985e 389bpfread(struct dev_read_args *ap)
984263bc 390{
b13267a5 391 cdev_t dev = ap->a_head.a_dev;
82ed7fc2 392 struct bpf_d *d = dev->si_drv1;
984263bc
MD
393 int timed_out;
394 int error;
984263bc
MD
395
396 /*
397 * Restrict application to use a buffer the same size as
398 * as kernel buffers.
399 */
fef8985e 400 if (ap->a_uio->uio_resid != d->bd_bufsize)
5534e0c8 401 return(EINVAL);
984263bc 402
0b31d406 403 crit_enter();
984263bc
MD
404 if (d->bd_state == BPF_WAITING)
405 callout_stop(&d->bd_callout);
406 timed_out = (d->bd_state == BPF_TIMED_OUT);
407 d->bd_state = BPF_IDLE;
408 /*
409 * If the hold buffer is empty, then do a timed sleep, which
410 * ends when the timeout expires or when enough packets
411 * have arrived to fill the store buffer.
412 */
f23061d4 413 while (d->bd_hbuf == NULL) {
984263bc
MD
414 if ((d->bd_immediate || timed_out) && d->bd_slen != 0) {
415 /*
416 * A packet(s) either arrived since the previous
417 * read or arrived while we were asleep.
418 * Rotate the buffers and return what's here.
419 */
420 ROTATE_BUFFERS(d);
421 break;
422 }
423
424 /*
425 * No data is available, check to see if the bpf device
426 * is still pointed at a real interface. If not, return
427 * ENXIO so that the userland process knows to rebind
428 * it before using it again.
429 */
430 if (d->bd_bif == NULL) {
0b31d406 431 crit_exit();
5534e0c8 432 return(ENXIO);
984263bc
MD
433 }
434
fef8985e 435 if (ap->a_ioflag & IO_NDELAY) {
0b31d406 436 crit_exit();
5534e0c8 437 return(EWOULDBLOCK);
984263bc 438 }
5534e0c8 439 error = tsleep(d, PCATCH, "bpf", d->bd_rtout);
984263bc 440 if (error == EINTR || error == ERESTART) {
0b31d406 441 crit_exit();
5534e0c8 442 return(error);
984263bc
MD
443 }
444 if (error == EWOULDBLOCK) {
445 /*
446 * On a timeout, return what's in the buffer,
447 * which may be nothing. If there is something
448 * in the store buffer, we can rotate the buffers.
449 */
450 if (d->bd_hbuf)
451 /*
452 * We filled up the buffer in between
453 * getting the timeout and arriving
454 * here, so we don't need to rotate.
455 */
456 break;
457
458 if (d->bd_slen == 0) {
0b31d406 459 crit_exit();
5534e0c8 460 return(0);
984263bc
MD
461 }
462 ROTATE_BUFFERS(d);
463 break;
464 }
465 }
466 /*
467 * At this point, we know we have something in the hold slot.
468 */
0b31d406 469 crit_exit();
984263bc
MD
470
471 /*
472 * Move data from hold buffer into user space.
473 * We know the entire buffer is transferred since
474 * we checked above that the read buffer is bpf_bufsize bytes.
475 */
fef8985e 476 error = uiomove(d->bd_hbuf, d->bd_hlen, ap->a_uio);
984263bc 477
0b31d406 478 crit_enter();
984263bc 479 d->bd_fbuf = d->bd_hbuf;
f23061d4 480 d->bd_hbuf = NULL;
984263bc 481 d->bd_hlen = 0;
0b31d406 482 crit_exit();
984263bc 483
5534e0c8 484 return(error);
984263bc
MD
485}
486
487
488/*
489 * If there are processes sleeping on this descriptor, wake them up.
490 */
5534e0c8
JS
491static void
492bpf_wakeup(struct bpf_d *d)
984263bc
MD
493{
494 if (d->bd_state == BPF_WAITING) {
495 callout_stop(&d->bd_callout);
496 d->bd_state = BPF_IDLE;
497 }
f23061d4 498 wakeup(d);
984263bc
MD
499 if (d->bd_async && d->bd_sig && d->bd_sigio)
500 pgsigio(d->bd_sigio, d->bd_sig, 0);
501
78195a76 502 get_mplock();
984263bc 503 selwakeup(&d->bd_sel);
78195a76 504 rel_mplock();
984263bc
MD
505 /* XXX */
506 d->bd_sel.si_pid = 0;
984263bc
MD
507}
508
509static void
5534e0c8 510bpf_timed_out(void *arg)
984263bc
MD
511{
512 struct bpf_d *d = (struct bpf_d *)arg;
984263bc 513
0b31d406 514 crit_enter();
984263bc
MD
515 if (d->bd_state == BPF_WAITING) {
516 d->bd_state = BPF_TIMED_OUT;
517 if (d->bd_slen != 0)
518 bpf_wakeup(d);
519 }
0b31d406 520 crit_exit();
984263bc
MD
521}
522
e1c548c2
SZ
523static void
524bpf_output_dispatch(struct netmsg *nmsg)
525{
526 struct netmsg_bpf_output *bmsg = (struct netmsg_bpf_output *)nmsg;
527 struct ifnet *ifp = bmsg->nm_ifp;
528 int error;
529
530 /*
531 * The driver frees the mbuf.
532 */
533 error = ifp->if_output(ifp, bmsg->nm_mbuf, bmsg->nm_dst, NULL);
534 lwkt_replymsg(&nmsg->nm_lmsg, error);
535}
536
0301afa9 537static int
fef8985e 538bpfwrite(struct dev_write_args *ap)
984263bc 539{
b13267a5 540 cdev_t dev = ap->a_head.a_dev;
82ed7fc2 541 struct bpf_d *d = dev->si_drv1;
984263bc
MD
542 struct ifnet *ifp;
543 struct mbuf *m;
0b31d406 544 int error;
daaccb0f 545 struct sockaddr dst;
984263bc 546 int datlen;
e1c548c2 547 struct netmsg_bpf_output bmsg;
984263bc 548
f23061d4 549 if (d->bd_bif == NULL)
5534e0c8 550 return(ENXIO);
984263bc
MD
551
552 ifp = d->bd_bif->bif_ifp;
553
fef8985e 554 if (ap->a_uio->uio_resid == 0)
5534e0c8 555 return(0);
984263bc 556
fef8985e 557 error = bpf_movein(ap->a_uio, (int)d->bd_bif->bif_dlt, &m,
7cff0268 558 &dst, &datlen, d->bd_wfilter);
984263bc 559 if (error)
5534e0c8 560 return(error);
984263bc 561
253ad594
MD
562 if (datlen > ifp->if_mtu) {
563 m_freem(m);
5534e0c8 564 return(EMSGSIZE);
253ad594 565 }
984263bc
MD
566
567 if (d->bd_hdrcmplt)
568 dst.sa_family = pseudo_AF_HDRCMPLT;
569
92db3805 570 netmsg_init(&bmsg.nm_netmsg, &curthread->td_msgport, MSGF_MPSAFE,
e1c548c2
SZ
571 bpf_output_dispatch);
572 bmsg.nm_mbuf = m;
573 bmsg.nm_ifp = ifp;
574 bmsg.nm_dst = &dst;
575
576 return lwkt_domsg(cpu_portfn(0), &bmsg.nm_netmsg.nm_lmsg, 0);
984263bc
MD
577}
578
579/*
580 * Reset a descriptor by flushing its packet buffer and clearing the
581 * receive and drop counts. Should be called at splimp.
582 */
583static void
cefd3279 584bpf_resetd(struct bpf_d *d)
984263bc
MD
585{
586 if (d->bd_hbuf) {
587 /* Free the hold buffer. */
588 d->bd_fbuf = d->bd_hbuf;
f23061d4 589 d->bd_hbuf = NULL;
984263bc
MD
590 }
591 d->bd_slen = 0;
592 d->bd_hlen = 0;
593 d->bd_rcount = 0;
594 d->bd_dcount = 0;
595}
596
597/*
598 * FIONREAD Check for read packet available.
599 * SIOCGIFADDR Get interface address - convenient hook to driver.
600 * BIOCGBLEN Get buffer len [for read()].
601 * BIOCSETF Set ethernet read filter.
7cff0268 602 * BIOCSETWF Set ethernet write filter.
984263bc
MD
603 * BIOCFLUSH Flush read packet buffer.
604 * BIOCPROMISC Put interface into promiscuous mode.
605 * BIOCGDLT Get link layer type.
606 * BIOCGETIF Get interface name.
607 * BIOCSETIF Set interface.
608 * BIOCSRTIMEOUT Set read timeout.
609 * BIOCGRTIMEOUT Get read timeout.
610 * BIOCGSTATS Get packet stats.
611 * BIOCIMMEDIATE Set immediate mode.
612 * BIOCVERSION Get filter language version.
613 * BIOCGHDRCMPLT Get "header already complete" flag
614 * BIOCSHDRCMPLT Set "header already complete" flag
615 * BIOCGSEESENT Get "see packets sent" flag
616 * BIOCSSEESENT Set "see packets sent" flag
7cff0268 617 * BIOCLOCK Set "locked" flag
984263bc
MD
618 */
619/* ARGSUSED */
5534e0c8 620static int
fef8985e 621bpfioctl(struct dev_ioctl_args *ap)
984263bc 622{
b13267a5 623 cdev_t dev = ap->a_head.a_dev;
41c20dac 624 struct bpf_d *d = dev->si_drv1;
0b31d406 625 int error = 0;
984263bc 626
0b31d406 627 crit_enter();
984263bc
MD
628 if (d->bd_state == BPF_WAITING)
629 callout_stop(&d->bd_callout);
630 d->bd_state = BPF_IDLE;
0b31d406 631 crit_exit();
984263bc 632
7cff0268
MS
633 if (d->bd_locked == 1) {
634 switch (ap->a_cmd) {
635 case BIOCGBLEN:
636 case BIOCFLUSH:
637 case BIOCGDLT:
638 case BIOCGDLTLIST:
639 case BIOCGETIF:
640 case BIOCGRTIMEOUT:
641 case BIOCGSTATS:
642 case BIOCVERSION:
643 case BIOCGRSIG:
644 case BIOCGHDRCMPLT:
645 case FIONREAD:
646 case BIOCLOCK:
647 case BIOCSRTIMEOUT:
648 case BIOCIMMEDIATE:
649 case TIOCGPGRP:
650 break;
651 default:
652 return (EPERM);
653 }
654 }
fef8985e 655 switch (ap->a_cmd) {
984263bc
MD
656 default:
657 error = EINVAL;
658 break;
659
660 /*
661 * Check for read packet available.
662 */
663 case FIONREAD:
664 {
665 int n;
666
0b31d406 667 crit_enter();
984263bc
MD
668 n = d->bd_slen;
669 if (d->bd_hbuf)
670 n += d->bd_hlen;
0b31d406 671 crit_exit();
984263bc 672
fef8985e 673 *(int *)ap->a_data = n;
984263bc
MD
674 break;
675 }
676
677 case SIOCGIFADDR:
678 {
679 struct ifnet *ifp;
680
67756095 681 if (d->bd_bif == NULL) {
984263bc 682 error = EINVAL;
67756095 683 } else {
984263bc 684 ifp = d->bd_bif->bif_ifp;
a3dd34d2 685 ifnet_serialize_all(ifp);
fef8985e
MD
686 error = ifp->if_ioctl(ifp, ap->a_cmd,
687 ap->a_data, ap->a_cred);
a3dd34d2 688 ifnet_deserialize_all(ifp);
984263bc
MD
689 }
690 break;
691 }
692
693 /*
694 * Get buffer len [for read()].
695 */
696 case BIOCGBLEN:
fef8985e 697 *(u_int *)ap->a_data = d->bd_bufsize;
984263bc
MD
698 break;
699
700 /*
701 * Set buffer length.
702 */
703 case BIOCSBLEN:
67756095 704 if (d->bd_bif != NULL) {
984263bc 705 error = EINVAL;
67756095 706 } else {
fef8985e 707 u_int size = *(u_int *)ap->a_data;
984263bc
MD
708
709 if (size > bpf_maxbufsize)
fef8985e 710 *(u_int *)ap->a_data = size = bpf_maxbufsize;
984263bc 711 else if (size < BPF_MINBUFSIZE)
fef8985e 712 *(u_int *)ap->a_data = size = BPF_MINBUFSIZE;
984263bc
MD
713 d->bd_bufsize = size;
714 }
984263bc
MD
715 break;
716
717 /*
718 * Set link layer read filter.
719 */
720 case BIOCSETF:
7cff0268
MS
721 case BIOCSETWF:
722 error = bpf_setf(d, (struct bpf_program *)ap->a_data,
723 ap->a_cmd);
984263bc
MD
724 break;
725
726 /*
727 * Flush read packet buffer.
728 */
729 case BIOCFLUSH:
0b31d406 730 crit_enter();
cefd3279 731 bpf_resetd(d);
0b31d406 732 crit_exit();
984263bc
MD
733 break;
734
735 /*
736 * Put interface into promiscuous mode.
737 */
738 case BIOCPROMISC:
f23061d4 739 if (d->bd_bif == NULL) {
984263bc
MD
740 /*
741 * No interface attached yet.
742 */
743 error = EINVAL;
744 break;
745 }
0b31d406 746 crit_enter();
984263bc
MD
747 if (d->bd_promisc == 0) {
748 error = ifpromisc(d->bd_bif->bif_ifp, 1);
749 if (error == 0)
750 d->bd_promisc = 1;
751 }
0b31d406 752 crit_exit();
984263bc
MD
753 break;
754
755 /*
756 * Get device parameters.
757 */
758 case BIOCGDLT:
f23061d4 759 if (d->bd_bif == NULL)
984263bc
MD
760 error = EINVAL;
761 else
fef8985e 762 *(u_int *)ap->a_data = d->bd_bif->bif_dlt;
984263bc
MD
763 break;
764
765 /*
1f8e62c9
JS
766 * Get a list of supported data link types.
767 */
768 case BIOCGDLTLIST:
67756095 769 if (d->bd_bif == NULL) {
1f8e62c9 770 error = EINVAL;
67756095
SZ
771 } else {
772 error = bpf_getdltlist(d,
773 (struct bpf_dltlist *)ap->a_data);
774 }
1f8e62c9
JS
775 break;
776
777 /*
778 * Set data link type.
779 */
780 case BIOCSDLT:
781 if (d->bd_bif == NULL)
782 error = EINVAL;
783 else
fef8985e 784 error = bpf_setdlt(d, *(u_int *)ap->a_data);
1f8e62c9
JS
785 break;
786
787 /*
984263bc
MD
788 * Get interface name.
789 */
790 case BIOCGETIF:
0c3c561c 791 if (d->bd_bif == NULL) {
984263bc 792 error = EINVAL;
0c3c561c 793 } else {
984263bc 794 struct ifnet *const ifp = d->bd_bif->bif_ifp;
fef8985e 795 struct ifreq *const ifr = (struct ifreq *)ap->a_data;
984263bc 796
3e4a09e7 797 strlcpy(ifr->ifr_name, ifp->if_xname,
0c3c561c 798 sizeof ifr->ifr_name);
984263bc
MD
799 }
800 break;
801
802 /*
803 * Set interface.
804 */
805 case BIOCSETIF:
fef8985e 806 error = bpf_setif(d, (struct ifreq *)ap->a_data);
984263bc
MD
807 break;
808
809 /*
810 * Set read timeout.
811 */
812 case BIOCSRTIMEOUT:
813 {
fef8985e 814 struct timeval *tv = (struct timeval *)ap->a_data;
984263bc
MD
815
816 /*
817 * Subtract 1 tick from tvtohz() since this isn't
818 * a one-shot timer.
819 */
820 if ((error = itimerfix(tv)) == 0)
a94976ad 821 d->bd_rtout = tvtohz_low(tv);
984263bc
MD
822 break;
823 }
824
825 /*
826 * Get read timeout.
827 */
828 case BIOCGRTIMEOUT:
829 {
fef8985e 830 struct timeval *tv = (struct timeval *)ap->a_data;
984263bc
MD
831
832 tv->tv_sec = d->bd_rtout / hz;
833 tv->tv_usec = (d->bd_rtout % hz) * tick;
834 break;
835 }
836
837 /*
838 * Get packet stats.
839 */
840 case BIOCGSTATS:
841 {
fef8985e 842 struct bpf_stat *bs = (struct bpf_stat *)ap->a_data;
984263bc
MD
843
844 bs->bs_recv = d->bd_rcount;
845 bs->bs_drop = d->bd_dcount;
846 break;
847 }
848
849 /*
850 * Set immediate mode.
851 */
852 case BIOCIMMEDIATE:
fef8985e 853 d->bd_immediate = *(u_int *)ap->a_data;
984263bc
MD
854 break;
855
856 case BIOCVERSION:
857 {
fef8985e 858 struct bpf_version *bv = (struct bpf_version *)ap->a_data;
984263bc
MD
859
860 bv->bv_major = BPF_MAJOR_VERSION;
861 bv->bv_minor = BPF_MINOR_VERSION;
862 break;
863 }
864
865 /*
866 * Get "header already complete" flag
867 */
868 case BIOCGHDRCMPLT:
fef8985e 869 *(u_int *)ap->a_data = d->bd_hdrcmplt;
984263bc
MD
870 break;
871
872 /*
873 * Set "header already complete" flag
874 */
875 case BIOCSHDRCMPLT:
fef8985e 876 d->bd_hdrcmplt = *(u_int *)ap->a_data ? 1 : 0;
984263bc
MD
877 break;
878
879 /*
880 * Get "see sent packets" flag
881 */
882 case BIOCGSEESENT:
fef8985e 883 *(u_int *)ap->a_data = d->bd_seesent;
984263bc
MD
884 break;
885
886 /*
887 * Set "see sent packets" flag
888 */
889 case BIOCSSEESENT:
fef8985e 890 d->bd_seesent = *(u_int *)ap->a_data;
984263bc
MD
891 break;
892
984263bc 893 case FIOASYNC: /* Send signal on receive packets */
fef8985e 894 d->bd_async = *(int *)ap->a_data;
984263bc
MD
895 break;
896
897 case FIOSETOWN:
fef8985e 898 error = fsetown(*(int *)ap->a_data, &d->bd_sigio);
984263bc
MD
899 break;
900
901 case FIOGETOWN:
fef8985e 902 *(int *)ap->a_data = fgetown(d->bd_sigio);
984263bc
MD
903 break;
904
905 /* This is deprecated, FIOSETOWN should be used instead. */
906 case TIOCSPGRP:
fef8985e 907 error = fsetown(-(*(int *)ap->a_data), &d->bd_sigio);
984263bc
MD
908 break;
909
910 /* This is deprecated, FIOGETOWN should be used instead. */
911 case TIOCGPGRP:
fef8985e 912 *(int *)ap->a_data = -fgetown(d->bd_sigio);
984263bc
MD
913 break;
914
915 case BIOCSRSIG: /* Set receive signal */
916 {
f23061d4 917 u_int sig;
984263bc 918
fef8985e 919 sig = *(u_int *)ap->a_data;
984263bc
MD
920
921 if (sig >= NSIG)
922 error = EINVAL;
923 else
924 d->bd_sig = sig;
925 break;
926 }
927 case BIOCGRSIG:
fef8985e 928 *(u_int *)ap->a_data = d->bd_sig;
984263bc 929 break;
7cff0268
MS
930 case BIOCLOCK:
931 d->bd_locked = 1;
932 break;
984263bc 933 }
5534e0c8 934 return(error);
984263bc
MD
935}
936
937/*
938 * Set d's packet filter program to fp. If this file already has a filter,
939 * free it and replace it. Returns EINVAL for bogus requests.
940 */
941static int
7cff0268 942bpf_setf(struct bpf_d *d, struct bpf_program *fp, u_long cmd)
984263bc
MD
943{
944 struct bpf_insn *fcode, *old;
7cff0268
MS
945 u_int wfilter, flen, size;
946
947 if (cmd == BIOCSETWF) {
948 old = d->bd_wfilter;
949 wfilter = 1;
950 } else {
951 wfilter = 0;
952 old = d->bd_rfilter;
953 }
f23061d4 954 if (fp->bf_insns == NULL) {
984263bc 955 if (fp->bf_len != 0)
5534e0c8 956 return(EINVAL);
0b31d406 957 crit_enter();
7cff0268
MS
958 if (wfilter)
959 d->bd_wfilter = NULL;
960 else
961 d->bd_rfilter = NULL;
cefd3279 962 bpf_resetd(d);
0b31d406 963 crit_exit();
67756095 964 if (old != NULL)
efda3bd0 965 kfree(old, M_BPF);
5534e0c8 966 return(0);
984263bc
MD
967 }
968 flen = fp->bf_len;
969 if (flen > BPF_MAXINSNS)
5534e0c8 970 return(EINVAL);
984263bc 971
0c3c561c 972 size = flen * sizeof *fp->bf_insns;
efda3bd0 973 fcode = (struct bpf_insn *)kmalloc(size, M_BPF, M_WAITOK);
f23061d4 974 if (copyin(fp->bf_insns, fcode, size) == 0 &&
984263bc 975 bpf_validate(fcode, (int)flen)) {
0b31d406 976 crit_enter();
7cff0268
MS
977 if (wfilter)
978 d->bd_wfilter = fcode;
979 else
980 d->bd_rfilter = fcode;
cefd3279 981 bpf_resetd(d);
0b31d406 982 crit_exit();
67756095 983 if (old != NULL)
efda3bd0 984 kfree(old, M_BPF);
984263bc 985
5534e0c8 986 return(0);
984263bc 987 }
efda3bd0 988 kfree(fcode, M_BPF);
5534e0c8 989 return(EINVAL);
984263bc
MD
990}
991
992/*
993 * Detach a file from its current interface (if attached at all) and attach
994 * to the interface indicated by the name stored in ifr.
995 * Return an errno or 0.
996 */
997static int
5534e0c8 998bpf_setif(struct bpf_d *d, struct ifreq *ifr)
984263bc
MD
999{
1000 struct bpf_if *bp;
0b31d406 1001 int error;
984263bc
MD
1002 struct ifnet *theywant;
1003
1004 theywant = ifunit(ifr->ifr_name);
f23061d4 1005 if (theywant == NULL)
5534e0c8 1006 return(ENXIO);
984263bc
MD
1007
1008 /*
1009 * Look through attached interfaces for the named one.
1010 */
67756095 1011 for (bp = bpf_iflist; bp != NULL; bp = bp->bif_next) {
984263bc
MD
1012 struct ifnet *ifp = bp->bif_ifp;
1013
f23061d4 1014 if (ifp == NULL || ifp != theywant)
984263bc 1015 continue;
1f8e62c9
JS
1016 /* skip additional entry */
1017 if (bp->bif_driverp != &ifp->if_bpf)
1018 continue;
984263bc
MD
1019 /*
1020 * We found the requested interface.
1021 * If it's not up, return an error.
1022 * Allocate the packet buffers if we need to.
1023 * If we're already attached to requested interface,
1024 * just flush the buffer.
1025 */
f23061d4 1026 if (!(ifp->if_flags & IFF_UP))
5534e0c8 1027 return(ENETDOWN);
984263bc 1028
5534e0c8 1029 if (d->bd_sbuf == NULL) {
984263bc
MD
1030 error = bpf_allocbufs(d);
1031 if (error != 0)
5534e0c8 1032 return(error);
984263bc 1033 }
0b31d406 1034 crit_enter();
984263bc 1035 if (bp != d->bd_bif) {
5534e0c8 1036 if (d->bd_bif != NULL) {
984263bc
MD
1037 /*
1038 * Detach if attached to something else.
1039 */
1040 bpf_detachd(d);
5534e0c8 1041 }
984263bc
MD
1042
1043 bpf_attachd(d, bp);
1044 }
cefd3279 1045 bpf_resetd(d);
0b31d406 1046 crit_exit();
5534e0c8 1047 return(0);
984263bc 1048 }
5534e0c8 1049
984263bc 1050 /* Not found. */
5534e0c8 1051 return(ENXIO);
984263bc
MD
1052}
1053
1054/*
1055 * Support for select() and poll() system calls
1056 *
1057 * Return true iff the specific operation will not block indefinitely.
1058 * Otherwise, return false but make a note that a selwakeup() must be done.
1059 */
0301afa9 1060static int
fef8985e 1061bpfpoll(struct dev_poll_args *ap)
984263bc 1062{
b13267a5 1063 cdev_t dev = ap->a_head.a_dev;
82ed7fc2 1064 struct bpf_d *d;
984263bc
MD
1065 int revents;
1066
1067 d = dev->si_drv1;
1068 if (d->bd_bif == NULL)
5534e0c8 1069 return(ENXIO);
984263bc 1070
fef8985e 1071 revents = ap->a_events & (POLLOUT | POLLWRNORM);
0b31d406 1072 crit_enter();
fef8985e 1073 if (ap->a_events & (POLLIN | POLLRDNORM)) {
984263bc
MD
1074 /*
1075 * An imitation of the FIONREAD ioctl code.
1076 * XXX not quite. An exact imitation:
1077 * if (d->b_slen != 0 ||
1078 * (d->bd_hbuf != NULL && d->bd_hlen != 0)
1079 */
1080 if (d->bd_hlen != 0 ||
1081 ((d->bd_immediate || d->bd_state == BPF_TIMED_OUT) &&
67756095 1082 d->bd_slen != 0)) {
fef8985e 1083 revents |= ap->a_events & (POLLIN | POLLRDNORM);
67756095 1084 } else {
fef8985e 1085 selrecord(curthread, &d->bd_sel);
984263bc
MD
1086 /* Start the read timeout if necessary. */
1087 if (d->bd_rtout > 0 && d->bd_state == BPF_IDLE) {
1088 callout_reset(&d->bd_callout, d->bd_rtout,
1089 bpf_timed_out, d);
1090 d->bd_state = BPF_WAITING;
1091 }
1092 }
1093 }
0b31d406 1094 crit_exit();
fef8985e
MD
1095 ap->a_events = revents;
1096 return(0);
984263bc
MD
1097}
1098
1099/*
1f8e62c9
JS
1100 * Process the packet pkt of length pktlen. The packet is parsed
1101 * by each listener's filter, and if accepted, stashed into the
1102 * corresponding buffer.
984263bc
MD
1103 */
1104void
1f8e62c9 1105bpf_tap(struct bpf_if *bp, u_char *pkt, u_int pktlen)
984263bc 1106{
82ed7fc2 1107 struct bpf_d *d;
aedcf384
SZ
1108 struct timeval tv;
1109 int gottime = 0;
82ed7fc2 1110 u_int slen;
f23061d4 1111
64202d9a
SZ
1112 get_mplock();
1113
1114 /* Re-check */
1115 if (bp == NULL) {
1116 rel_mplock();
1117 return;
1118 }
1119
984263bc
MD
1120 /*
1121 * Note that the ipl does not have to be raised at this point.
1122 * The only problem that could arise here is that if two different
1123 * interfaces shared any data. This is not the case.
1124 */
f23061d4 1125 SLIST_FOREACH(d, &bp->bif_dlist, bd_next) {
984263bc 1126 ++d->bd_rcount;
7cff0268 1127 slen = bpf_filter(d->bd_rfilter, pkt, pktlen, pktlen);
aedcf384
SZ
1128 if (slen != 0) {
1129 if (!gottime) {
1130 microtime(&tv);
1131 gottime = 1;
1132 }
1133 catchpacket(d, pkt, pktlen, slen, ovbcopy, &tv);
1134 }
984263bc 1135 }
64202d9a
SZ
1136
1137 rel_mplock();
984263bc
MD
1138}
1139
1140/*
1141 * Copy data from an mbuf chain into a buffer. This code is derived
1142 * from m_copydata in sys/uipc_mbuf.c.
1143 */
1144static void
5534e0c8 1145bpf_mcopy(const void *src_arg, void *dst_arg, size_t len)
984263bc 1146{
82ed7fc2
RG
1147 const struct mbuf *m;
1148 u_int count;
984263bc
MD
1149 u_char *dst;
1150
1151 m = src_arg;
1152 dst = dst_arg;
1153 while (len > 0) {
5534e0c8 1154 if (m == NULL)
984263bc
MD
1155 panic("bpf_mcopy");
1156 count = min(m->m_len, len);
1157 bcopy(mtod(m, void *), dst, count);
1158 m = m->m_next;
1159 dst += count;
1160 len -= count;
1161 }
1162}
1163
1164/*
1f8e62c9
JS
1165 * Process the packet in the mbuf chain m. The packet is parsed by each
1166 * listener's filter, and if accepted, stashed into the corresponding
1167 * buffer.
984263bc
MD
1168 */
1169void
1f8e62c9 1170bpf_mtap(struct bpf_if *bp, struct mbuf *m)
984263bc 1171{
984263bc
MD
1172 struct bpf_d *d;
1173 u_int pktlen, slen;
aedcf384
SZ
1174 struct timeval tv;
1175 int gottime = 0;
984263bc 1176
64202d9a
SZ
1177 get_mplock();
1178
1179 /* Re-check */
1180 if (bp == NULL) {
1181 rel_mplock();
1182 return;
1183 }
1184
1f8e62c9 1185 /* Don't compute pktlen, if no descriptor is attached. */
64202d9a
SZ
1186 if (SLIST_EMPTY(&bp->bif_dlist)) {
1187 rel_mplock();
1f8e62c9 1188 return;
64202d9a 1189 }
1f8e62c9 1190
cefd3279 1191 pktlen = m_lengthm(m, NULL);
984263bc 1192
f23061d4 1193 SLIST_FOREACH(d, &bp->bif_dlist, bd_next) {
984263bc
MD
1194 if (!d->bd_seesent && (m->m_pkthdr.rcvif == NULL))
1195 continue;
1196 ++d->bd_rcount;
7cff0268 1197 slen = bpf_filter(d->bd_rfilter, (u_char *)m, pktlen, 0);
aedcf384
SZ
1198 if (slen != 0) {
1199 if (!gottime) {
1200 microtime(&tv);
1201 gottime = 1;
1202 }
1203 catchpacket(d, (u_char *)m, pktlen, slen, bpf_mcopy,
1204 &tv);
1205 }
984263bc 1206 }
64202d9a
SZ
1207
1208 rel_mplock();
984263bc
MD
1209}
1210
81806f7a
HP
1211void
1212bpf_mtap_family(struct bpf_if *bp, struct mbuf *m, sa_family_t family)
1213{
1214 u_int family4;
1215
1216 KKASSERT(family != AF_UNSPEC);
cefd3279 1217
81806f7a
HP
1218 family4 = (u_int)family;
1219 bpf_ptap(bp, m, &family4, sizeof(family4));
1220}
1221
984263bc 1222/*
1f8e62c9
JS
1223 * Process the packet in the mbuf chain m with the header in m prepended.
1224 * The packet is parsed by each listener's filter, and if accepted,
1225 * stashed into the corresponding buffer.
1226 */
1227void
1228bpf_ptap(struct bpf_if *bp, struct mbuf *m, const void *data, u_int dlen)
1229{
1230 struct mbuf mb;
1231
1232 /*
1233 * Craft on-stack mbuf suitable for passing to bpf_mtap.
1234 * Note that we cut corners here; we only setup what's
1235 * absolutely needed--this mbuf should never go anywhere else.
1236 */
1237 mb.m_next = m;
1238 mb.m_data = __DECONST(void *, data); /* LINTED */
1239 mb.m_len = dlen;
2265ca08 1240 mb.m_pkthdr.rcvif = m->m_pkthdr.rcvif;
1f8e62c9 1241
c972d9bf 1242 bpf_mtap(bp, &mb);
1f8e62c9
JS
1243}
1244
1245/*
984263bc
MD
1246 * Move the packet data from interface memory (pkt) into the
1247 * store buffer. Return 1 if it's time to wakeup a listener (buffer full),
1248 * otherwise 0. "copy" is the routine called to do the actual data
1249 * transfer. bcopy is passed in to copy contiguous chunks, while
1250 * bpf_mcopy is passed in to copy mbuf chains. In the latter case,
1251 * pkt is really an mbuf.
1252 */
1253static void
5534e0c8 1254catchpacket(struct bpf_d *d, u_char *pkt, u_int pktlen, u_int snaplen,
aedcf384
SZ
1255 void (*cpfn)(const void *, void *, size_t),
1256 const struct timeval *tv)
984263bc 1257{
82ed7fc2
RG
1258 struct bpf_hdr *hp;
1259 int totlen, curlen;
1260 int hdrlen = d->bd_bif->bif_hdrlen;
984263bc
MD
1261 /*
1262 * Figure out how many bytes to move. If the packet is
1263 * greater or equal to the snapshot length, transfer that
1264 * much. Otherwise, transfer the whole packet (unless
1265 * we hit the buffer size limit).
1266 */
1267 totlen = hdrlen + min(snaplen, pktlen);
1268 if (totlen > d->bd_bufsize)
1269 totlen = d->bd_bufsize;
1270
1271 /*
1272 * Round up the end of the previous packet to the next longword.
1273 */
1274 curlen = BPF_WORDALIGN(d->bd_slen);
1275 if (curlen + totlen > d->bd_bufsize) {
1276 /*
1277 * This packet will overflow the storage buffer.
1278 * Rotate the buffers if we can, then wakeup any
1279 * pending reads.
1280 */
f23061d4 1281 if (d->bd_fbuf == NULL) {
984263bc
MD
1282 /*
1283 * We haven't completed the previous read yet,
1284 * so drop the packet.
1285 */
1286 ++d->bd_dcount;
1287 return;
1288 }
1289 ROTATE_BUFFERS(d);
1290 bpf_wakeup(d);
1291 curlen = 0;
67756095 1292 } else if (d->bd_immediate || d->bd_state == BPF_TIMED_OUT) {
984263bc
MD
1293 /*
1294 * Immediate mode is set, or the read timeout has
1295 * already expired during a select call. A packet
1296 * arrived, so the reader should be woken up.
1297 */
1298 bpf_wakeup(d);
67756095 1299 }
984263bc
MD
1300
1301 /*
1302 * Append the bpf header.
1303 */
1304 hp = (struct bpf_hdr *)(d->bd_sbuf + curlen);
aedcf384 1305 hp->bh_tstamp = *tv;
984263bc
MD
1306 hp->bh_datalen = pktlen;
1307 hp->bh_hdrlen = hdrlen;
1308 /*
1309 * Copy the packet data into the store buffer and update its length.
1310 */
1311 (*cpfn)(pkt, (u_char *)hp + hdrlen, (hp->bh_caplen = totlen - hdrlen));
1312 d->bd_slen = curlen + totlen;
1313}
1314
1315/*
1316 * Initialize all nonzero fields of a descriptor.
1317 */
1318static int
5534e0c8 1319bpf_allocbufs(struct bpf_d *d)
984263bc 1320{
efda3bd0 1321 d->bd_fbuf = kmalloc(d->bd_bufsize, M_BPF, M_WAITOK);
efda3bd0 1322 d->bd_sbuf = kmalloc(d->bd_bufsize, M_BPF, M_WAITOK);
984263bc
MD
1323 d->bd_slen = 0;
1324 d->bd_hlen = 0;
5534e0c8 1325 return(0);
984263bc
MD
1326}
1327
1328/*
67756095 1329 * Free buffers and packet filter program currently in use by a descriptor.
984263bc
MD
1330 * Called on close.
1331 */
1332static void
5534e0c8 1333bpf_freed(struct bpf_d *d)
984263bc
MD
1334{
1335 /*
1336 * We don't need to lock out interrupts since this descriptor has
1337 * been detached from its interface and it yet hasn't been marked
1338 * free.
1339 */
f23061d4 1340 if (d->bd_sbuf != NULL) {
efda3bd0 1341 kfree(d->bd_sbuf, M_BPF);
f23061d4 1342 if (d->bd_hbuf != NULL)
efda3bd0 1343 kfree(d->bd_hbuf, M_BPF);
f23061d4 1344 if (d->bd_fbuf != NULL)
efda3bd0 1345 kfree(d->bd_fbuf, M_BPF);
984263bc 1346 }
7cff0268
MS
1347 if (d->bd_rfilter)
1348 kfree(d->bd_rfilter, M_BPF);
1349 if (d->bd_wfilter)
1350 kfree(d->bd_wfilter, M_BPF);
984263bc
MD
1351}
1352
1353/*
1354 * Attach an interface to bpf. ifp is a pointer to the structure
1355 * defining the interface to be attached, dlt is the link layer type,
1356 * and hdrlen is the fixed size of the link header (variable length
1f8e62c9 1357 * headers are not yet supported).
984263bc
MD
1358 */
1359void
5534e0c8 1360bpfattach(struct ifnet *ifp, u_int dlt, u_int hdrlen)
984263bc 1361{
1f8e62c9
JS
1362 bpfattach_dlt(ifp, dlt, hdrlen, &ifp->if_bpf);
1363}
1364
1365void
1366bpfattach_dlt(struct ifnet *ifp, u_int dlt, u_int hdrlen, struct bpf_if **driverp)
1367{
984263bc 1368 struct bpf_if *bp;
02334e23 1369
efda3bd0 1370 bp = kmalloc(sizeof *bp, M_BPF, M_WAITOK | M_ZERO);
984263bc 1371
1f8e62c9 1372 SLIST_INIT(&bp->bif_dlist);
984263bc
MD
1373 bp->bif_ifp = ifp;
1374 bp->bif_dlt = dlt;
1f8e62c9
JS
1375 bp->bif_driverp = driverp;
1376 *bp->bif_driverp = NULL;
984263bc
MD
1377
1378 bp->bif_next = bpf_iflist;
1379 bpf_iflist = bp;
1380
984263bc
MD
1381 /*
1382 * Compute the length of the bpf header. This is not necessarily
1383 * equal to SIZEOF_BPF_HDR because we want to insert spacing such
1384 * that the network layer header begins on a longword boundary (for
1385 * performance reasons and to alleviate alignment restrictions).
1386 */
1387 bp->bif_hdrlen = BPF_WORDALIGN(hdrlen + SIZEOF_BPF_HDR) - hdrlen;
1388
1389 if (bootverbose)
1f8e62c9 1390 if_printf(ifp, "bpf attached\n");
984263bc
MD
1391}
1392
1393/*
1394 * Detach bpf from an interface. This involves detaching each descriptor
1395 * associated with the interface, and leaving bd_bif NULL. Notify each
1396 * descriptor as it's detached so that any sleepers wake up and get
1397 * ENXIO.
1398 */
1399void
5534e0c8 1400bpfdetach(struct ifnet *ifp)
984263bc 1401{
5534e0c8
JS
1402 struct bpf_if *bp, *bp_prev;
1403 struct bpf_d *d;
984263bc 1404
0b31d406 1405 crit_enter();
984263bc
MD
1406
1407 /* Locate BPF interface information */
1408 bp_prev = NULL;
1409 for (bp = bpf_iflist; bp != NULL; bp = bp->bif_next) {
1410 if (ifp == bp->bif_ifp)
1411 break;
1412 bp_prev = bp;
1413 }
1414
1415 /* Interface wasn't attached */
1416 if (bp->bif_ifp == NULL) {
0b31d406 1417 crit_exit();
4b1cf444 1418 kprintf("bpfdetach: %s was not attached\n", ifp->if_xname);
984263bc
MD
1419 return;
1420 }
1421
f23061d4 1422 while ((d = SLIST_FIRST(&bp->bif_dlist)) != NULL) {
984263bc
MD
1423 bpf_detachd(d);
1424 bpf_wakeup(d);
1425 }
1426
5534e0c8 1427 if (bp_prev != NULL)
984263bc 1428 bp_prev->bif_next = bp->bif_next;
5534e0c8 1429 else
984263bc 1430 bpf_iflist = bp->bif_next;
984263bc 1431
efda3bd0 1432 kfree(bp, M_BPF);
984263bc 1433
0b31d406 1434 crit_exit();
984263bc
MD
1435}
1436
1f8e62c9
JS
1437/*
1438 * Get a list of available data link type of the interface.
1439 */
1440static int
1441bpf_getdltlist(struct bpf_d *d, struct bpf_dltlist *bfl)
1442{
1443 int n, error;
1444 struct ifnet *ifp;
1445 struct bpf_if *bp;
1446
1447 ifp = d->bd_bif->bif_ifp;
1448 n = 0;
1449 error = 0;
67756095 1450 for (bp = bpf_iflist; bp != NULL; bp = bp->bif_next) {
1f8e62c9
JS
1451 if (bp->bif_ifp != ifp)
1452 continue;
1453 if (bfl->bfl_list != NULL) {
1454 if (n >= bfl->bfl_len) {
1455 return (ENOMEM);
1456 }
1457 error = copyout(&bp->bif_dlt,
1458 bfl->bfl_list + n, sizeof(u_int));
1459 }
1460 n++;
1461 }
1462 bfl->bfl_len = n;
1463 return(error);
1464}
1465
1466/*
1467 * Set the data link type of a BPF instance.
1468 */
1469static int
1470bpf_setdlt(struct bpf_d *d, u_int dlt)
1471{
1472 int error, opromisc;
1473 struct ifnet *ifp;
1474 struct bpf_if *bp;
1f8e62c9
JS
1475
1476 if (d->bd_bif->bif_dlt == dlt)
1477 return (0);
1478 ifp = d->bd_bif->bif_ifp;
67756095 1479 for (bp = bpf_iflist; bp != NULL; bp = bp->bif_next) {
1f8e62c9
JS
1480 if (bp->bif_ifp == ifp && bp->bif_dlt == dlt)
1481 break;
1482 }
1483 if (bp != NULL) {
1484 opromisc = d->bd_promisc;
0b31d406 1485 crit_enter();
1f8e62c9
JS
1486 bpf_detachd(d);
1487 bpf_attachd(d, bp);
cefd3279 1488 bpf_resetd(d);
1f8e62c9
JS
1489 if (opromisc) {
1490 error = ifpromisc(bp->bif_ifp, 1);
67756095 1491 if (error) {
1f8e62c9
JS
1492 if_printf(bp->bif_ifp,
1493 "bpf_setdlt: ifpromisc failed (%d)\n",
1494 error);
67756095 1495 } else {
1f8e62c9 1496 d->bd_promisc = 1;
67756095 1497 }
1f8e62c9 1498 }
0b31d406 1499 crit_exit();
1f8e62c9
JS
1500 }
1501 return(bp == NULL ? EINVAL : 0);
1502}
1503
984263bc 1504static void
e4c9c0c8 1505bpf_drvinit(void *unused)
984263bc 1506{
cd29885a
MD
1507 make_dev(&bpf_ops, 0, 0, 0, 0600, "bpf");
1508 devfs_clone_bitmap_init(&DEVFS_CLONE_BITMAP(bpf));
1509 devfs_clone_handler_add("bpf", bpfclone);
1510}
1511
1512static void
1513bpf_drvuninit(void *unused)
1514{
1515 dev_ops_remove_all(&bpf_ops);
1516 devfs_clone_bitmap_uninit(&DEVFS_CLONE_BITMAP(bpf));
984263bc
MD
1517}
1518
1519SYSINIT(bpfdev,SI_SUB_DRIVERS,SI_ORDER_MIDDLE+CDEV_MAJOR,bpf_drvinit,NULL)
cd29885a 1520SYSUNINIT(bpfdev, SI_SUB_DRIVERS,SI_ORDER_MIDDLE+CDEV_MAJOR,bpf_drvuninit, NULL);
984263bc
MD
1521
1522#else /* !BPF */
1523/*
1524 * NOP stubs to allow bpf-using drivers to load and function.
1525 *
1526 * A 'better' implementation would allow the core bpf functionality
1527 * to be loaded at runtime.
1528 */
1529
1530void
1f8e62c9
JS
1531bpf_tap(struct bpf_if *bp, u_char *pkt, u_int pktlen)
1532{
1533}
1534
1535void
1536bpf_mtap(struct bpf_if *bp, struct mbuf *m)
984263bc
MD
1537{
1538}
1539
1540void
f1555601 1541bpf_ptap(struct bpf_if *bp, struct mbuf *m, const void *data, u_int dlen)
984263bc
MD
1542{
1543}
1544
1545void
5534e0c8 1546bpfattach(struct ifnet *ifp, u_int dlt, u_int hdrlen)
984263bc
MD
1547{
1548}
1549
1550void
1f8e62c9
JS
1551bpfattach_dlt(struct ifnet *ifp, u_int dlt, u_int hdrlen, struct bpf_if **driverp)
1552{
1553}
1554
1555void
5534e0c8 1556bpfdetach(struct ifnet *ifp)
984263bc
MD
1557{
1558}
1559
1560u_int
5534e0c8 1561bpf_filter(const struct bpf_insn *pc, u_char *p, u_int wirelen, u_int buflen)
984263bc
MD
1562{
1563 return -1; /* "no filter" behaviour */
1564}
1565
1566#endif /* !BPF */