kernel: Make SMP support default (and non-optional).
[dragonfly.git] / sys / net / netisr.c
CommitLineData
66d6c637
JH
1/*
2 * Copyright (c) 2003, 2004 Matthew Dillon. All rights reserved.
3 * Copyright (c) 2003, 2004 Jeffrey M. Hsu. All rights reserved.
4 * Copyright (c) 2003 Jonathan Lemon. All rights reserved.
5 * Copyright (c) 2003, 2004 The DragonFly Project. All rights reserved.
6 *
7 * This code is derived from software contributed to The DragonFly Project
8 * by Jonathan Lemon, Jeffrey M. Hsu, and Matthew Dillon.
9 *
d849e575
MD
10 * Jonathan Lemon gave Jeffrey Hsu permission to combine his copyright
11 * into this one around July 8 2004.
12 *
66d6c637
JH
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
15 * are met:
16 * 1. Redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution.
21 * 3. Neither the name of The DragonFly Project nor the names of its
22 * contributors may be used to endorse or promote products derived
23 * from this software without specific, prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
26 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
27 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
28 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
29 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
30 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
31 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
32 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
33 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
34 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
35 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * SUCH DAMAGE.
ef0fdad1
MD
37 */
38
39#include <sys/param.h>
40#include <sys/systm.h>
bf82f9b7 41#include <sys/kernel.h>
9eeaa8a9 42#include <sys/malloc.h>
bf82f9b7 43#include <sys/msgport.h>
ef0fdad1
MD
44#include <sys/proc.h>
45#include <sys/interrupt.h>
8bde602d
JH
46#include <sys/socket.h>
47#include <sys/sysctl.h>
48e7b118 48#include <sys/socketvar.h>
8bde602d
JH
49#include <net/if.h>
50#include <net/if_var.h>
ef0fdad1
MD
51#include <net/netisr.h>
52#include <machine/cpufunc.h>
a91f9815 53#include <machine/smp.h>
ef0fdad1 54
3227f1b8
MD
55#include <sys/thread2.h>
56#include <sys/msgport2.h>
4599cf19 57#include <net/netmsg2.h>
684a93c4 58#include <sys/mplock2.h>
3227f1b8 59
c3c96e44
MD
60static void netmsg_service_loop(void *arg);
61static void cpu0_cpufn(struct mbuf **mp, int hoff);
e6f77b88 62static void netisr_nohashck(struct mbuf *, const struct pktinfo *);
5c703385
MD
63
64struct netmsg_port_registration {
c3c96e44
MD
65 TAILQ_ENTRY(netmsg_port_registration) npr_entry;
66 lwkt_port_t npr_port;
67};
68
69struct netmsg_rollup {
70 TAILQ_ENTRY(netmsg_rollup) ru_entry;
71 netisr_ru_t ru_func;
5c703385
MD
72};
73
a91f9815
SZ
74struct netmsg_barrier {
75 struct netmsg_base base;
0503d1d0 76 volatile cpumask_t *br_cpumask;
ca3321f8 77 volatile uint32_t br_done;
a91f9815
SZ
78};
79
d0c7a72a
SZ
80#define NETISR_BR_NOTDONE 0x1
81#define NETISR_BR_WAITDONE 0x80000000
ca3321f8 82
a91f9815
SZ
83struct netisr_barrier {
84 struct netmsg_barrier *br_msgs[MAXCPU];
85 int br_isset;
86};
87
bf82f9b7 88static struct netisr netisrs[NETISR_MAX];
5c703385 89static TAILQ_HEAD(,netmsg_port_registration) netreglist;
c3c96e44 90static TAILQ_HEAD(,netmsg_rollup) netrulist;
bf82f9b7
MD
91
92/* Per-CPU thread to handle any protocol. */
c3c96e44 93static struct thread netisr_cpu[MAXCPU];
3227f1b8 94lwkt_port netisr_afree_rport;
c3d495a1 95lwkt_port netisr_afree_free_so_rport;
a29576fc 96lwkt_port netisr_adone_rport;
6aad077d 97lwkt_port netisr_apanic_rport;
3efe7008 98lwkt_port netisr_sync_port;
3227f1b8 99
fb0f29c4
MD
100static int (*netmsg_fwd_port_fn)(lwkt_port_t, lwkt_msg_t);
101
92db3805 102SYSCTL_NODE(_net, OID_AUTO, netisr, CTLFLAG_RW, 0, "netisr");
ff4a1403 103
3227f1b8
MD
104/*
105 * netisr_afree_rport replymsg function, only used to handle async
106 * messages which the sender has abandoned to their fate.
107 */
108static void
109netisr_autofree_reply(lwkt_port_t port, lwkt_msg_t msg)
110{
c3c96e44 111 kfree(msg, M_LWKTMSG);
3227f1b8 112}
ef0fdad1 113
c3d495a1
MD
114static void
115netisr_autofree_free_so_reply(lwkt_port_t port, lwkt_msg_t msg)
116{
117 sofree(((netmsg_t)msg)->base.nm_so);
118 kfree(msg, M_LWKTMSG);
119}
120
dc22b3aa 121/*
fb0f29c4
MD
122 * We need a custom putport function to handle the case where the
123 * message target is the current thread's message port. This case
124 * can occur when the TCP or UDP stack does a direct callback to NFS and NFS
125 * then turns around and executes a network operation synchronously.
3efe7008 126 *
fb0f29c4
MD
127 * To prevent deadlocking, we must execute these self-referential messages
128 * synchronously, effectively turning the message into a glorified direct
129 * procedure call back into the protocol stack. The operation must be
130 * complete on return or we will deadlock, so panic if it isn't.
002c1265
MD
131 *
132 * However, the target function is under no obligation to immediately
133 * reply the message. It may forward it elsewhere.
dc22b3aa 134 */
5c703385 135static int
dc22b3aa
JH
136netmsg_put_port(lwkt_port_t port, lwkt_msg_t lmsg)
137{
002c1265 138 netmsg_base_t nmsg = (void *)lmsg;
c3c96e44
MD
139
140 if ((lmsg->ms_flags & MSGF_SYNC) && port == &curthread->td_msgport) {
002c1265 141 nmsg->nm_dispatch((netmsg_t)nmsg);
c3c96e44
MD
142 return(EASYNC);
143 } else {
144 return(netmsg_fwd_port_fn(port, lmsg));
145 }
dc22b3aa
JH
146}
147
3efe7008
MD
148/*
149 * UNIX DOMAIN sockets still have to run their uipc functions synchronously,
150 * because they depend on the user proc context for a number of things
151 * (like creds) which we have not yet incorporated into the message structure.
152 *
153 * However, we maintain or message/port abstraction. Having a special
154 * synchronous port which runs the commands synchronously gives us the
155 * ability to serialize operations in one place later on when we start
156 * removing the BGL.
3efe7008
MD
157 */
158static int
159netmsg_sync_putport(lwkt_port_t port, lwkt_msg_t lmsg)
160{
002c1265 161 netmsg_base_t nmsg = (void *)lmsg;
3efe7008 162
c3c96e44 163 KKASSERT((lmsg->ms_flags & MSGF_DONE) == 0);
e0383bf3 164
c3c96e44 165 lmsg->ms_target_port = port; /* required for abort */
002c1265 166 nmsg->nm_dispatch((netmsg_t)nmsg);
c3c96e44 167 return(EASYNC);
3efe7008
MD
168}
169
ef0fdad1 170static void
bf82f9b7 171netisr_init(void)
ef0fdad1 172{
c3c96e44
MD
173 int i;
174
175 TAILQ_INIT(&netreglist);
176 TAILQ_INIT(&netrulist);
177
178 /*
179 * Create default per-cpu threads for generic protocol handling.
180 */
181 for (i = 0; i < ncpus; ++i) {
182 lwkt_create(netmsg_service_loop, NULL, NULL,
4643740a 183 &netisr_cpu[i], TDF_NOSTART|TDF_FORCE_SPINPORT,
392cd266 184 i, "netisr_cpu %d", i);
c3c96e44
MD
185 netmsg_service_port_init(&netisr_cpu[i].td_msgport);
186 lwkt_schedule(&netisr_cpu[i]);
187 }
188
189 /*
190 * The netisr_afree_rport is a special reply port which automatically
191 * frees the replied message. The netisr_adone_rport simply marks
192 * the message as being done. The netisr_apanic_rport panics if
193 * the message is replied to.
194 */
195 lwkt_initport_replyonly(&netisr_afree_rport, netisr_autofree_reply);
c3d495a1
MD
196 lwkt_initport_replyonly(&netisr_afree_free_so_rport,
197 netisr_autofree_free_so_reply);
c3c96e44
MD
198 lwkt_initport_replyonly_null(&netisr_adone_rport);
199 lwkt_initport_panic(&netisr_apanic_rport);
200
201 /*
202 * The netisr_syncport is a special port which executes the message
203 * synchronously and waits for it if EASYNC is returned.
204 */
205 lwkt_initport_putonly(&netisr_sync_port, netmsg_sync_putport);
ef0fdad1
MD
206}
207
b2632176 208SYSINIT(netisr, SI_SUB_PRE_DRIVERS, SI_ORDER_FIRST, netisr_init, NULL);
bf82f9b7 209
5c703385
MD
210/*
211 * Finish initializing the message port for a netmsg service. This also
212 * registers the port for synchronous cleanup operations such as when an
213 * ifnet is being destroyed. There is no deregistration API yet.
214 */
215void
216netmsg_service_port_init(lwkt_port_t port)
217{
c3c96e44
MD
218 struct netmsg_port_registration *reg;
219
220 /*
221 * Override the putport function. Our custom function checks for
222 * self-references and executes such commands synchronously.
223 */
224 if (netmsg_fwd_port_fn == NULL)
225 netmsg_fwd_port_fn = port->mp_putport;
226 KKASSERT(netmsg_fwd_port_fn == port->mp_putport);
227 port->mp_putport = netmsg_put_port;
228
229 /*
230 * Keep track of ports using the netmsg API so we can synchronize
231 * certain operations (such as freeing an ifnet structure) across all
232 * consumers.
233 */
234 reg = kmalloc(sizeof(*reg), M_TEMP, M_WAITOK|M_ZERO);
235 reg->npr_port = port;
236 TAILQ_INSERT_TAIL(&netreglist, reg, npr_entry);
5c703385
MD
237}
238
239/*
240 * This function synchronizes the caller with all netmsg services. For
241 * example, if an interface is being removed we must make sure that all
242 * packets related to that interface complete processing before the structure
243 * can actually be freed. This sort of synchronization is an alternative to
244 * ref-counting the netif, removing the ref counting overhead in favor of
245 * placing additional overhead in the netif freeing sequence (where it is
246 * inconsequential).
247 */
248void
249netmsg_service_sync(void)
250{
c3c96e44 251 struct netmsg_port_registration *reg;
002c1265 252 struct netmsg_base smsg;
5c703385 253
79f504ca 254 netmsg_init(&smsg, NULL, &curthread->td_msgport, 0, netmsg_sync_handler);
5c703385 255
c3c96e44 256 TAILQ_FOREACH(reg, &netreglist, npr_entry) {
002c1265 257 lwkt_domsg(reg->npr_port, &smsg.lmsg, 0);
c3c96e44 258 }
5c703385
MD
259}
260
261/*
262 * The netmsg function simply replies the message. API semantics require
263 * EASYNC to be returned if the netmsg function disposes of the message.
264 */
79f504ca
SZ
265void
266netmsg_sync_handler(netmsg_t msg)
5c703385 267{
002c1265 268 lwkt_replymsg(&msg->lmsg, 0);
bf82f9b7 269}
8bde602d 270
95f8b5ce 271/*
92db3805
SZ
272 * Generic netmsg service loop. Some protocols may roll their own but all
273 * must do the basic command dispatch function call done here.
95f8b5ce 274 */
c3c96e44 275static void
92db3805 276netmsg_service_loop(void *arg)
95f8b5ce 277{
c3c96e44 278 struct netmsg_rollup *ru;
002c1265 279 netmsg_base_t msg;
c3c96e44
MD
280 thread_t td = curthread;;
281 int limit;
282
283 while ((msg = lwkt_waitport(&td->td_msgport, 0))) {
284 /*
285 * Run up to 512 pending netmsgs.
286 */
287 limit = 512;
288 do {
289 KASSERT(msg->nm_dispatch != NULL,
ed20d0e3 290 ("netmsg_service isr %d badmsg",
002c1265 291 msg->lmsg.u.ms_result));
0ce0603e
MD
292 if (msg->nm_so &&
293 msg->nm_so->so_port != &td->td_msgport) {
294 /*
295 * Sockets undergoing connect or disconnect
296 * ops can change ports on us. Chase the
297 * port.
298 */
299 kprintf("netmsg_service_loop: Warning, "
300 "port changed so=%p\n", msg->nm_so);
301 lwkt_forwardmsg(msg->nm_so->so_port,
002c1265 302 &msg->lmsg);
0ce0603e
MD
303 } else {
304 /*
305 * We are on the correct port, dispatch it.
306 */
002c1265 307 msg->nm_dispatch((netmsg_t)msg);
0ce0603e 308 }
c3c96e44
MD
309 if (--limit == 0)
310 break;
311 } while ((msg = lwkt_getport(&td->td_msgport)) != NULL);
312
313 /*
314 * Run all registered rollup functions for this cpu
315 * (e.g. tcp_willblock()).
316 */
317 TAILQ_FOREACH(ru, &netrulist, ru_entry)
318 ru->ru_func();
319 }
95f8b5ce
SZ
320}
321
bf82f9b7 322/*
c3c96e44
MD
323 * Forward a packet to a netisr service function.
324 *
325 * If the packet has not been assigned to a protocol thread we call
326 * the port characterization function to assign it. The caller must
327 * clear M_HASH (or not have set it in the first place) if the caller
328 * wishes the packet to be recharacterized.
bf82f9b7 329 */
c3c96e44
MD
330int
331netisr_queue(int num, struct mbuf *m)
bf82f9b7 332{
c3c96e44
MD
333 struct netisr *ni;
334 struct netmsg_packet *pmsg;
335 lwkt_port_t port;
336
c157ff7a 337 KASSERT((num > 0 && num <= NELEM(netisrs)),
c3c96e44
MD
338 ("Bad isr %d", num));
339
340 ni = &netisrs[num];
341 if (ni->ni_handler == NULL) {
342 kprintf("Unregistered isr %d\n", num);
343 m_freem(m);
344 return (EIO);
345 }
346
347 /*
348 * Figure out which protocol thread to send to. This does not
349 * have to be perfect but performance will be really good if it
350 * is correct. Major protocol inputs such as ip_input() will
351 * re-characterize the packet as necessary.
352 */
353 if ((m->m_flags & M_HASH) == 0) {
354 ni->ni_cpufn(&m, 0);
355 if (m == NULL) {
356 m_freem(m);
357 return (EIO);
358 }
359 if ((m->m_flags & M_HASH) == 0) {
360 kprintf("netisr_queue(%d): packet hash failed\n", num);
361 m_freem(m);
362 return (EIO);
363 }
364 }
365
366 /*
367 * Get the protocol port based on the packet hash, initialize
368 * the netmsg, and send it off.
369 */
3abced87 370 port = netisr_portfn(m->m_pkthdr.hash);
c3c96e44 371 pmsg = &m->m_hdr.mh_netmsg;
002c1265 372 netmsg_init(&pmsg->base, NULL, &netisr_apanic_rport,
c3c96e44
MD
373 0, ni->ni_handler);
374 pmsg->nm_packet = m;
002c1265
MD
375 pmsg->base.lmsg.u.ms_result = num;
376 lwkt_sendmsg(port, &pmsg->base.lmsg);
c3c96e44
MD
377
378 return (0);
8bde602d
JH
379}
380
ebe4c2ae
SZ
381/*
382 * Run a netisr service function on the packet.
383 *
384 * The packet must have been correctly characterized!
385 */
386int
387netisr_handle(int num, struct mbuf *m)
388{
389 struct netisr *ni;
390 struct netmsg_packet *pmsg;
391 lwkt_port_t port;
392
393 /*
394 * Get the protocol port based on the packet hash
395 */
ed20d0e3 396 KASSERT((m->m_flags & M_HASH), ("packet not characterized"));
3abced87 397 port = netisr_portfn(m->m_pkthdr.hash);
ed20d0e3 398 KASSERT(&curthread->td_msgport == port, ("wrong msgport"));
ebe4c2ae
SZ
399
400 KASSERT((num > 0 && num <= NELEM(netisrs)), ("bad isr %d", num));
401 ni = &netisrs[num];
402 if (ni->ni_handler == NULL) {
403 kprintf("unregistered isr %d\n", num);
404 m_freem(m);
405 return EIO;
406 }
407
408 /*
409 * Initialize the netmsg, and run the handler directly.
410 */
411 pmsg = &m->m_hdr.mh_netmsg;
412 netmsg_init(&pmsg->base, NULL, &netisr_apanic_rport,
413 0, ni->ni_handler);
414 pmsg->nm_packet = m;
415 pmsg->base.lmsg.u.ms_result = num;
416 ni->ni_handler((netmsg_t)&pmsg->base);
417
418 return 0;
419}
420
8bde602d 421/*
c3c96e44
MD
422 * Pre-characterization of a deeper portion of the packet for the
423 * requested isr.
424 *
425 * The base of the ISR type (e.g. IP) that we want to characterize is
426 * at (hoff) relative to the beginning of the mbuf. This allows
427 * e.g. ether_input_chain() to not have to adjust the m_data/m_len.
8bde602d 428 */
c3c96e44
MD
429void
430netisr_characterize(int num, struct mbuf **mp, int hoff)
ef0fdad1 431{
c3c96e44
MD
432 struct netisr *ni;
433 struct mbuf *m;
434
435 /*
436 * Validation
437 */
c3c96e44
MD
438 m = *mp;
439 KKASSERT(m != NULL);
440
e6318d16
MD
441 if (num < 0 || num >= NETISR_MAX) {
442 if (num == NETISR_MAX) {
443 m->m_flags |= M_HASH;
444 m->m_pkthdr.hash = 0;
445 return;
446 }
447 panic("Bad isr %d", num);
448 }
449
c3c96e44
MD
450 /*
451 * Valid netisr?
452 */
453 ni = &netisrs[num];
454 if (ni->ni_handler == NULL) {
455 kprintf("Unregistered isr %d\n", num);
456 m_freem(m);
457 *mp = NULL;
458 }
459
460 /*
461 * Characterize the packet
462 */
463 if ((m->m_flags & M_HASH) == 0) {
464 ni->ni_cpufn(mp, hoff);
465 m = *mp;
466 if (m && (m->m_flags & M_HASH) == 0)
467 kprintf("netisr_queue(%d): packet hash failed\n", num);
468 }
8bde602d
JH
469}
470
bf82f9b7 471void
c3c96e44 472netisr_register(int num, netisr_fn_t handler, netisr_cpufn_t cpufn)
8bde602d 473{
c3c96e44 474 struct netisr *ni;
ef0fdad1 475
c157ff7a 476 KASSERT((num > 0 && num <= NELEM(netisrs)),
c3c96e44
MD
477 ("netisr_register: bad isr %d", num));
478 KKASSERT(handler != NULL);
479
480 if (cpufn == NULL)
481 cpufn = cpu0_cpufn;
8bde602d 482
c3c96e44
MD
483 ni = &netisrs[num];
484
485 ni->ni_handler = handler;
e6f77b88 486 ni->ni_hashck = netisr_nohashck;
c3c96e44
MD
487 ni->ni_cpufn = cpufn;
488 netmsg_init(&ni->ni_netmsg, NULL, &netisr_adone_rport, 0, NULL);
ef0fdad1 489}
bf82f9b7 490
e6f77b88
SZ
491void
492netisr_register_hashcheck(int num, netisr_hashck_t hashck)
493{
494 struct netisr *ni;
495
496 KASSERT((num > 0 && num <= NELEM(netisrs)),
497 ("netisr_register: bad isr %d", num));
498
499 ni = &netisrs[num];
500 ni->ni_hashck = hashck;
501}
502
c3c96e44
MD
503void
504netisr_register_rollup(netisr_ru_t ru_func)
bf82f9b7 505{
c3c96e44 506 struct netmsg_rollup *ru;
97a43e72 507
c3c96e44
MD
508 ru = kmalloc(sizeof(*ru), M_TEMP, M_WAITOK|M_ZERO);
509 ru->ru_func = ru_func;
510 TAILQ_INSERT_TAIL(&netrulist, ru, ru_entry);
bf82f9b7
MD
511}
512
c3c96e44
MD
513/*
514 * Return the message port for the general protocol message servicing
515 * thread for a particular cpu.
516 */
ecdefdda 517lwkt_port_t
3abced87 518netisr_portfn(int cpu)
ecdefdda 519{
c3c96e44
MD
520 KKASSERT(cpu >= 0 && cpu < ncpus);
521 return (&netisr_cpu[cpu].td_msgport);
ecdefdda
MD
522}
523
c244d613 524/*
c3c96e44 525 * Return the current cpu's network protocol thread.
c244d613
SZ
526 */
527lwkt_port_t
528cur_netport(void)
529{
3abced87 530 return(netisr_portfn(mycpu->gd_cpuid));
c244d613
SZ
531}
532
c3c96e44
MD
533/*
534 * Return a default protocol control message processing thread port
535 */
e3873585
SZ
536lwkt_port_t
537cpu0_ctlport(int cmd __unused, struct sockaddr *sa __unused,
538 void *extra __unused)
539{
c3c96e44 540 return (&netisr_cpu[0].td_msgport);
e3873585
SZ
541}
542
c3c96e44
MD
543/*
544 * This is a default netisr packet characterization function which
545 * sets M_HASH. If a netisr is registered with a NULL cpufn function
546 * this one is assigned.
547 *
548 * This function makes no attempt to validate the packet.
549 */
550static void
551cpu0_cpufn(struct mbuf **mp, int hoff __unused)
552{
553 struct mbuf *m = *mp;
554
555 m->m_flags |= M_HASH;
556 m->m_pkthdr.hash = 0;
3efe7008
MD
557}
558
bf82f9b7 559/*
a29576fc 560 * schednetisr() is used to call the netisr handler from the appropriate
9eeaa8a9 561 * netisr thread for polling and other purposes.
a29576fc
MD
562 *
563 * This function may be called from a hard interrupt or IPI and must be
564 * MP SAFE and non-blocking. We use a fixed per-cpu message instead of
565 * trying to allocate one. We must get ourselves onto the target cpu
566 * to safely check the MSGF_DONE bit on the message but since the message
567 * will be sent to that cpu anyway this does not add any extra work beyond
568 * what lwkt_sendmsg() would have already had to do to schedule the target
569 * thread.
bf82f9b7 570 */
a29576fc
MD
571static void
572schednetisr_remote(void *data)
bf82f9b7 573{
c3c96e44
MD
574 int num = (int)(intptr_t)data;
575 struct netisr *ni = &netisrs[num];
576 lwkt_port_t port = &netisr_cpu[0].td_msgport;
002c1265 577 netmsg_base_t pmsg;
c3c96e44
MD
578
579 pmsg = &netisrs[num].ni_netmsg;
002c1265 580 if (pmsg->lmsg.ms_flags & MSGF_DONE) {
c3c96e44 581 netmsg_init(pmsg, NULL, &netisr_adone_rport, 0, ni->ni_handler);
002c1265
MD
582 pmsg->lmsg.u.ms_result = num;
583 lwkt_sendmsg(port, &pmsg->lmsg);
c3c96e44 584 }
a29576fc
MD
585}
586
587void
588schednetisr(int num)
589{
c157ff7a 590 KASSERT((num > 0 && num <= NELEM(netisrs)),
c3c96e44
MD
591 ("schednetisr: bad isr %d", num));
592 KKASSERT(netisrs[num].ni_handler != NULL);
c3c96e44
MD
593 if (mycpu->gd_cpuid != 0) {
594 lwkt_send_ipiq(globaldata_find(0),
595 schednetisr_remote, (void *)(intptr_t)num);
596 } else {
597 crit_enter();
598 schednetisr_remote((void *)(intptr_t)num);
599 crit_exit();
600 }
bf82f9b7 601}
a91f9815 602
a91f9815
SZ
603static void
604netisr_barrier_dispatch(netmsg_t nmsg)
605{
606 struct netmsg_barrier *msg = (struct netmsg_barrier *)nmsg;
607
608 atomic_clear_cpumask(msg->br_cpumask, mycpu->gd_cpumask);
609 if (*msg->br_cpumask == 0)
610 wakeup(msg->br_cpumask);
611
d0c7a72a
SZ
612 for (;;) {
613 uint32_t done = msg->br_done;
614
ca3321f8 615 cpu_ccfence();
d0c7a72a
SZ
616 if ((done & NETISR_BR_NOTDONE) == 0)
617 break;
618
ca3321f8
SZ
619 tsleep_interlock(&msg->br_done, 0);
620 if (atomic_cmpset_int(&msg->br_done,
d0c7a72a 621 done, done | NETISR_BR_WAITDONE))
ca3321f8
SZ
622 tsleep(&msg->br_done, PINTERLOCKED, "nbrdsp", 0);
623 }
a91f9815
SZ
624
625 lwkt_replymsg(&nmsg->lmsg, 0);
626}
627
a91f9815
SZ
628struct netisr_barrier *
629netisr_barrier_create(void)
630{
631 struct netisr_barrier *br;
632
633 br = kmalloc(sizeof(*br), M_LWKTMSG, M_WAITOK | M_ZERO);
634 return br;
635}
636
637void
638netisr_barrier_set(struct netisr_barrier *br)
639{
0503d1d0 640 volatile cpumask_t other_cpumask;
a91f9815
SZ
641 int i, cur_cpuid;
642
3abced87 643 KKASSERT(&curthread->td_msgport == netisr_portfn(0));
a91f9815
SZ
644 KKASSERT(!br->br_isset);
645
646 other_cpumask = mycpu->gd_other_cpus & smp_active_mask;
647 cur_cpuid = mycpuid;
648
649 for (i = 0; i < ncpus; ++i) {
650 struct netmsg_barrier *msg;
651
652 if (i == cur_cpuid)
653 continue;
654
655 msg = kmalloc(sizeof(struct netmsg_barrier),
656 M_LWKTMSG, M_WAITOK);
657 netmsg_init(&msg->base, NULL, &netisr_afree_rport,
658 MSGF_PRIORITY, netisr_barrier_dispatch);
659 msg->br_cpumask = &other_cpumask;
ca3321f8 660 msg->br_done = NETISR_BR_NOTDONE;
a91f9815
SZ
661
662 KKASSERT(br->br_msgs[i] == NULL);
663 br->br_msgs[i] = msg;
664 }
665
666 for (i = 0; i < ncpus; ++i) {
667 if (i == cur_cpuid)
668 continue;
3abced87 669 lwkt_sendmsg(netisr_portfn(i), &br->br_msgs[i]->base.lmsg);
a91f9815
SZ
670 }
671
672 while (other_cpumask != 0) {
673 tsleep_interlock(&other_cpumask, 0);
674 if (other_cpumask != 0)
675 tsleep(&other_cpumask, PINTERLOCKED, "nbrset", 0);
676 }
a91f9815
SZ
677 br->br_isset = 1;
678}
679
680void
681netisr_barrier_rem(struct netisr_barrier *br)
682{
a91f9815
SZ
683 int i, cur_cpuid;
684
3abced87 685 KKASSERT(&curthread->td_msgport == netisr_portfn(0));
a91f9815
SZ
686 KKASSERT(br->br_isset);
687
688 cur_cpuid = mycpuid;
689 for (i = 0; i < ncpus; ++i) {
690 struct netmsg_barrier *msg = br->br_msgs[i];
d0c7a72a 691 uint32_t done;
a91f9815
SZ
692
693 msg = br->br_msgs[i];
694 br->br_msgs[i] = NULL;
695
696 if (i == cur_cpuid)
697 continue;
698
d0c7a72a
SZ
699 done = atomic_swap_int(&msg->br_done, 0);
700 if (done & NETISR_BR_WAITDONE)
701 wakeup(&msg->br_done);
a91f9815 702 }
a91f9815
SZ
703 br->br_isset = 0;
704}
e6f77b88
SZ
705
706static void
707netisr_nohashck(struct mbuf *m, const struct pktinfo *pi __unused)
708{
709 m->m_flags &= ~M_HASH;
710}
711
712void
713netisr_hashcheck(int num, struct mbuf *m, const struct pktinfo *pi)
714{
715 struct netisr *ni;
716
717 if (num < 0 || num >= NETISR_MAX)
718 panic("Bad isr %d", num);
719
720 /*
721 * Valid netisr?
722 */
723 ni = &netisrs[num];
724 if (ni->ni_handler == NULL)
ed20d0e3 725 panic("Unregistered isr %d", num);
e6f77b88
SZ
726
727 ni->ni_hashck(m, pi);
728}