2 * Copyright (c) 2004, 2005 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. Neither the name of The DragonFly Project nor the names of its
16 * contributors may be used to endorse or promote products derived
17 * from this software without specific, prior written permission.
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
22 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
23 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
24 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
25 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
26 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
27 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
28 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
29 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * Copyright (c) 1988, 1991, 1993
35 * The Regents of the University of California. All rights reserved.
37 * Redistribution and use in source and binary forms, with or without
38 * modification, are permitted provided that the following conditions
40 * 1. Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * 2. Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in the
44 * documentation and/or other materials provided with the distribution.
45 * 3. Neither the name of the University nor the names of its contributors
46 * may be used to endorse or promote products derived from this software
47 * without specific prior written permission.
49 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
50 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
51 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
52 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
53 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
54 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
55 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
56 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
57 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
58 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
61 * @(#)rtsock.c 8.7 (Berkeley) 10/12/95
62 * $FreeBSD: src/sys/net/rtsock.c,v 1.44.2.11 2002/12/04 14:05:41 ru Exp $
65 #include <sys/param.h>
66 #include <sys/systm.h>
67 #include <sys/kernel.h>
68 #include <sys/sysctl.h>
71 #include <sys/malloc.h>
73 #include <sys/protosw.h>
74 #include <sys/socket.h>
75 #include <sys/socketvar.h>
76 #include <sys/domain.h>
79 #include <sys/thread2.h>
80 #include <sys/socketvar2.h>
83 #include <net/if_var.h>
84 #include <net/route.h>
85 #include <net/raw_cb.h>
86 #include <net/netmsg2.h>
87 #include <net/netisr2.h>
89 MALLOC_DEFINE(M_RTABLE, "routetbl", "routing tables");
91 static struct route_cb {
98 static const struct sockaddr route_src = { 2, PF_ROUTE, };
104 struct sysctl_req *w_req;
107 #ifndef RTTABLE_DUMP_MSGCNT_MAX
108 /* Should be large enough for dupkeys */
109 #define RTTABLE_DUMP_MSGCNT_MAX 64
112 struct rttable_walkarg {
123 struct sockaddr_storage w_key0;
124 struct sockaddr_storage w_mask0;
127 struct netmsg_rttable_walk {
128 struct netmsg_base base;
130 struct rttable_walkarg *w;
134 rt_msg_mbuf (int, struct rt_addrinfo *);
135 static void rt_msg_buffer (int, struct rt_addrinfo *, void *buf, int len);
136 static int rt_msgsize(int type, const struct rt_addrinfo *rtinfo);
137 static int rt_xaddrs (char *, char *, struct rt_addrinfo *);
138 static int sysctl_rttable(int af, struct sysctl_req *req, int op, int arg);
139 static int sysctl_iflist (int af, struct walkarg *w);
140 static int route_output(struct mbuf *, struct socket *, ...);
141 static void rt_setmetrics (u_long, struct rt_metrics *,
142 struct rt_metrics *);
145 * It really doesn't make any sense at all for this code to share much
146 * with raw_usrreq.c, since its functionality is so restricted. XXX
149 rts_abort(netmsg_t msg)
152 raw_usrreqs.pru_abort(msg);
153 /* msg invalid now */
157 /* pru_accept is EOPNOTSUPP */
160 rts_attach(netmsg_t msg)
162 struct socket *so = msg->base.nm_so;
163 struct pru_attach_info *ai = msg->attach.nm_ai;
165 int proto = msg->attach.nm_proto;
169 if (sotorawcb(so) != NULL) {
174 rp = kmalloc(sizeof *rp, M_PCB, M_WAITOK | M_ZERO);
177 * The critical section is necessary to block protocols from sending
178 * error notifications (like RTM_REDIRECT or RTM_LOSING) while
179 * this PCB is extant but incompletely initialized.
180 * Probably we should try to do more of this work beforehand and
181 * eliminate the critical section.
184 soreference(so); /* so_pcb assignment */
185 error = raw_attach(so, proto, ai->sb_rlimit);
191 switch(rp->rcb_proto.sp_protocol) {
196 route_cb.ip6_count++;
199 rp->rcb_faddr = &route_src;
200 route_cb.any_count++;
202 so->so_options |= SO_USELOOPBACK;
206 lwkt_replymsg(&msg->lmsg, error);
210 rts_bind(netmsg_t msg)
213 raw_usrreqs.pru_bind(msg); /* xxx just EINVAL */
214 /* msg invalid now */
219 rts_connect(netmsg_t msg)
222 raw_usrreqs.pru_connect(msg); /* XXX just EINVAL */
223 /* msg invalid now */
227 /* pru_connect2 is EOPNOTSUPP */
228 /* pru_control is EOPNOTSUPP */
231 rts_detach(netmsg_t msg)
233 struct socket *so = msg->base.nm_so;
234 struct rawcb *rp = sotorawcb(so);
238 switch(rp->rcb_proto.sp_protocol) {
243 route_cb.ip6_count--;
246 route_cb.any_count--;
248 raw_usrreqs.pru_detach(msg);
249 /* msg invalid now */
254 rts_disconnect(netmsg_t msg)
257 raw_usrreqs.pru_disconnect(msg);
258 /* msg invalid now */
262 /* pru_listen is EOPNOTSUPP */
265 rts_peeraddr(netmsg_t msg)
268 raw_usrreqs.pru_peeraddr(msg);
269 /* msg invalid now */
273 /* pru_rcvd is EOPNOTSUPP */
274 /* pru_rcvoob is EOPNOTSUPP */
277 rts_send(netmsg_t msg)
280 raw_usrreqs.pru_send(msg);
281 /* msg invalid now */
285 /* pru_sense is null */
288 rts_shutdown(netmsg_t msg)
291 raw_usrreqs.pru_shutdown(msg);
292 /* msg invalid now */
297 rts_sockaddr(netmsg_t msg)
300 raw_usrreqs.pru_sockaddr(msg);
301 /* msg invalid now */
305 static struct pr_usrreqs route_usrreqs = {
306 .pru_abort = rts_abort,
307 .pru_accept = pr_generic_notsupp,
308 .pru_attach = rts_attach,
309 .pru_bind = rts_bind,
310 .pru_connect = rts_connect,
311 .pru_connect2 = pr_generic_notsupp,
312 .pru_control = pr_generic_notsupp,
313 .pru_detach = rts_detach,
314 .pru_disconnect = rts_disconnect,
315 .pru_listen = pr_generic_notsupp,
316 .pru_peeraddr = rts_peeraddr,
317 .pru_rcvd = pr_generic_notsupp,
318 .pru_rcvoob = pr_generic_notsupp,
319 .pru_send = rts_send,
320 .pru_sense = pru_sense_null,
321 .pru_shutdown = rts_shutdown,
322 .pru_sockaddr = rts_sockaddr,
323 .pru_sosend = sosend,
324 .pru_soreceive = soreceive
327 static __inline sa_family_t
328 familyof(struct sockaddr *sa)
330 return (sa != NULL ? sa->sa_family : 0);
334 * Routing socket input function. The packet must be serialized onto cpu 0.
335 * We use the cpu0_soport() netisr processing loop to handle it.
337 * This looks messy but it means that anyone, including interrupt code,
338 * can send a message to the routing socket.
341 rts_input_handler(netmsg_t msg)
343 static const struct sockaddr route_dst = { 2, PF_ROUTE, };
344 struct sockproto route_proto;
345 struct netmsg_packet *pmsg = &msg->packet;
350 family = pmsg->base.lmsg.u.ms_result;
351 route_proto.sp_family = PF_ROUTE;
352 route_proto.sp_protocol = family;
357 skip = m->m_pkthdr.header;
358 m->m_pkthdr.header = NULL;
360 raw_input(m, &route_proto, &route_src, &route_dst, skip);
364 rts_input_skip(struct mbuf *m, sa_family_t family, struct rawcb *skip)
366 struct netmsg_packet *pmsg;
371 port = netisr_cpuport(0); /* XXX same as for routing socket */
372 pmsg = &m->m_hdr.mh_netmsg;
373 netmsg_init(&pmsg->base, NULL, &netisr_apanic_rport,
374 0, rts_input_handler);
376 pmsg->base.lmsg.u.ms_result = family;
377 m->m_pkthdr.header = skip; /* XXX steal field in pkthdr */
378 lwkt_sendmsg(port, &pmsg->base.lmsg);
382 rts_input(struct mbuf *m, sa_family_t family)
384 rts_input_skip(m, family, NULL);
388 reallocbuf_nofree(void *ptr, size_t len, size_t olen)
392 newptr = kmalloc(len, M_RTABLE, M_INTWAIT | M_NULLOK);
395 bcopy(ptr, newptr, olen);
400 * Internal helper routine for route_output().
403 _fillrtmsg(struct rt_msghdr **prtm, struct rtentry *rt,
404 struct rt_addrinfo *rtinfo)
407 struct rt_msghdr *rtm = *prtm;
409 /* Fill in rt_addrinfo for call to rt_msg_buffer(). */
410 rtinfo->rti_dst = rt_key(rt);
411 rtinfo->rti_gateway = rt->rt_gateway;
412 rtinfo->rti_netmask = rt_mask(rt); /* might be NULL */
413 rtinfo->rti_genmask = rt->rt_genmask; /* might be NULL */
414 if (rtm->rtm_addrs & (RTA_IFP | RTA_IFA)) {
415 if (rt->rt_ifp != NULL) {
416 rtinfo->rti_ifpaddr =
417 TAILQ_FIRST(&rt->rt_ifp->if_addrheads[mycpuid])
419 rtinfo->rti_ifaaddr = rt->rt_ifa->ifa_addr;
420 if (rt->rt_ifp->if_flags & IFF_POINTOPOINT)
421 rtinfo->rti_bcastaddr = rt->rt_ifa->ifa_dstaddr;
422 rtm->rtm_index = rt->rt_ifp->if_index;
424 rtinfo->rti_ifpaddr = NULL;
425 rtinfo->rti_ifaaddr = NULL;
427 } else if (rt->rt_ifp != NULL) {
428 rtm->rtm_index = rt->rt_ifp->if_index;
431 msglen = rt_msgsize(rtm->rtm_type, rtinfo);
432 if (rtm->rtm_msglen < msglen) {
433 /* NOTE: Caller will free the old rtm accordingly */
434 rtm = reallocbuf_nofree(rtm, msglen, rtm->rtm_msglen);
439 rt_msg_buffer(rtm->rtm_type, rtinfo, rtm, msglen);
441 rtm->rtm_flags = rt->rt_flags;
442 rtm->rtm_rmx = rt->rt_rmx;
443 rtm->rtm_addrs = rtinfo->rti_addrs;
449 struct rt_msghdr *bak_rtm;
450 struct rt_msghdr *new_rtm;
454 fillrtmsg(struct rtm_arg *arg, struct rtentry *rt,
455 struct rt_addrinfo *rtinfo)
457 struct rt_msghdr *rtm = arg->new_rtm;
460 error = _fillrtmsg(&rtm, rt, rtinfo);
462 if (arg->new_rtm != rtm) {
464 * _fillrtmsg() just allocated a new rtm;
465 * if the previously allocated rtm is not
466 * the backing rtm, it should be freed.
468 if (arg->new_rtm != arg->bak_rtm)
469 kfree(arg->new_rtm, M_RTABLE);
476 static void route_output_add_callback(int, int, struct rt_addrinfo *,
477 struct rtentry *, void *);
478 static void route_output_delete_callback(int, int, struct rt_addrinfo *,
479 struct rtentry *, void *);
480 static int route_output_get_callback(int, struct rt_addrinfo *,
481 struct rtentry *, void *, int);
482 static int route_output_change_callback(int, struct rt_addrinfo *,
483 struct rtentry *, void *, int);
484 static int route_output_lock_callback(int, struct rt_addrinfo *,
485 struct rtentry *, void *, int);
489 route_output(struct mbuf *m, struct socket *so, ...)
492 struct rt_msghdr *rtm = NULL;
493 struct rawcb *rp = NULL;
494 struct pr_output_info *oi;
495 struct rt_addrinfo rtinfo;
503 oi = __va_arg(ap, struct pr_output_info *);
506 family = familyof(NULL);
508 #define gotoerr(e) { error = e; goto flush;}
511 (m->m_len < sizeof(long) &&
512 (m = m_pullup(m, sizeof(long))) == NULL))
514 len = m->m_pkthdr.len;
515 if (len < sizeof(struct rt_msghdr) ||
516 len != mtod(m, struct rt_msghdr *)->rtm_msglen)
519 rtm = kmalloc(len, M_RTABLE, M_INTWAIT | M_NULLOK);
523 m_copydata(m, 0, len, (caddr_t)rtm);
524 if (rtm->rtm_version != RTM_VERSION)
525 gotoerr(EPROTONOSUPPORT);
527 rtm->rtm_pid = oi->p_pid;
528 bzero(&rtinfo, sizeof(struct rt_addrinfo));
529 rtinfo.rti_addrs = rtm->rtm_addrs;
530 if (rt_xaddrs((char *)(rtm + 1), (char *)rtm + len, &rtinfo) != 0)
533 rtinfo.rti_flags = rtm->rtm_flags;
534 if (rtinfo.rti_dst == NULL || rtinfo.rti_dst->sa_family >= AF_MAX ||
535 (rtinfo.rti_gateway && rtinfo.rti_gateway->sa_family >= AF_MAX))
538 family = familyof(rtinfo.rti_dst);
541 * Verify that the caller has the appropriate privilege; RTM_GET
542 * is the only operation the non-superuser is allowed.
544 if (rtm->rtm_type != RTM_GET &&
545 priv_check_cred(so->so_cred, PRIV_ROOT, 0) != 0)
548 if (rtinfo.rti_genmask != NULL) {
549 error = rtmask_add_global(rtinfo.rti_genmask,
550 rtm->rtm_type != RTM_GET ?
551 RTREQ_PRIO_HIGH : RTREQ_PRIO_NORM);
556 switch (rtm->rtm_type) {
558 if (rtinfo.rti_gateway == NULL) {
561 error = rtrequest1_global(RTM_ADD, &rtinfo,
562 route_output_add_callback, rtm, RTREQ_PRIO_HIGH);
567 * Backing rtm (bak_rtm) could _not_ be freed during
568 * rtrequest1_global or rtsearch_global, even if the
569 * callback reallocates the rtm due to its size changes,
570 * since rtinfo points to the backing rtm's memory area.
571 * After rtrequest1_global or rtsearch_global returns,
572 * it is safe to free the backing rtm, since rtinfo will
573 * not be used anymore.
575 * new_rtm will be used to save the new rtm allocated
576 * by rtrequest1_global or rtsearch_global.
580 error = rtrequest1_global(RTM_DELETE, &rtinfo,
581 route_output_delete_callback, &arg, RTREQ_PRIO_HIGH);
583 if (rtm != arg.bak_rtm)
584 kfree(arg.bak_rtm, M_RTABLE);
587 /* See the comment in RTM_DELETE */
590 error = rtsearch_global(RTM_GET, &rtinfo,
591 route_output_get_callback, &arg, RTS_NOEXACTMATCH,
594 if (rtm != arg.bak_rtm)
595 kfree(arg.bak_rtm, M_RTABLE);
598 error = rtsearch_global(RTM_CHANGE, &rtinfo,
599 route_output_change_callback, rtm, RTS_EXACTMATCH,
603 error = rtsearch_global(RTM_LOCK, &rtinfo,
604 route_output_lock_callback, rtm, RTS_EXACTMATCH,
614 rtm->rtm_errno = error;
616 rtm->rtm_flags |= RTF_DONE;
620 * Check to see if we don't want our own messages.
622 if (!(so->so_options & SO_USELOOPBACK)) {
623 if (route_cb.any_count <= 1) {
625 kfree(rtm, M_RTABLE);
629 /* There is another listener, so construct message */
633 m_copyback(m, 0, rtm->rtm_msglen, (caddr_t)rtm);
634 if (m->m_pkthdr.len < rtm->rtm_msglen) {
637 } else if (m->m_pkthdr.len > rtm->rtm_msglen)
638 m_adj(m, rtm->rtm_msglen - m->m_pkthdr.len);
639 kfree(rtm, M_RTABLE);
642 rts_input_skip(m, family, rp);
647 route_output_add_callback(int cmd, int error, struct rt_addrinfo *rtinfo,
648 struct rtentry *rt, void *arg)
650 struct rt_msghdr *rtm = arg;
652 if (error == 0 && rt != NULL) {
653 rt_setmetrics(rtm->rtm_inits, &rtm->rtm_rmx,
655 rt->rt_rmx.rmx_locks &= ~(rtm->rtm_inits);
656 rt->rt_rmx.rmx_locks |=
657 (rtm->rtm_inits & rtm->rtm_rmx.rmx_locks);
658 if (rtinfo->rti_genmask != NULL) {
659 rt->rt_genmask = rtmask_purelookup(rtinfo->rti_genmask);
660 if (rt->rt_genmask == NULL) {
662 * This should not happen, since we
663 * have already installed genmask
664 * on each CPU before we reach here.
666 panic("genmask is gone!?");
669 rt->rt_genmask = NULL;
671 rtm->rtm_index = rt->rt_ifp->if_index;
676 route_output_delete_callback(int cmd, int error, struct rt_addrinfo *rtinfo,
677 struct rtentry *rt, void *arg)
679 if (error == 0 && rt) {
681 if (fillrtmsg(arg, rt, rtinfo) != 0) {
683 /* XXX no way to return the error */
687 if (rt && rt->rt_refcnt == 0) {
694 route_output_get_callback(int cmd, struct rt_addrinfo *rtinfo,
695 struct rtentry *rt, void *arg, int found_cnt)
697 int error, found = 0;
699 if (((rtinfo->rti_flags ^ rt->rt_flags) & RTF_HOST) == 0)
702 error = fillrtmsg(arg, rt, rtinfo);
703 if (!error && found) {
704 /* Got the exact match, we could return now! */
711 route_output_change_callback(int cmd, struct rt_addrinfo *rtinfo,
712 struct rtentry *rt, void *arg, int found_cnt)
714 struct rt_msghdr *rtm = arg;
719 * new gateway could require new ifaddr, ifp;
720 * flags may also be different; ifp may be specified
721 * by ll sockaddr when protocol address is ambiguous
723 if (((rt->rt_flags & RTF_GATEWAY) && rtinfo->rti_gateway != NULL) ||
724 rtinfo->rti_ifpaddr != NULL ||
725 (rtinfo->rti_ifaaddr != NULL &&
726 !sa_equal(rtinfo->rti_ifaaddr, rt->rt_ifa->ifa_addr))) {
727 error = rt_getifa(rtinfo);
731 if (rtinfo->rti_gateway != NULL) {
733 * We only need to generate rtmsg upon the
734 * first route to be changed.
736 error = rt_setgate(rt, rt_key(rt), rtinfo->rti_gateway,
737 found_cnt == 1 ? RTL_REPORTMSG : RTL_DONTREPORT);
741 if ((ifa = rtinfo->rti_ifa) != NULL) {
742 struct ifaddr *oifa = rt->rt_ifa;
745 if (oifa && oifa->ifa_rtrequest)
746 oifa->ifa_rtrequest(RTM_DELETE, rt);
750 rt->rt_ifp = rtinfo->rti_ifp;
753 rt_setmetrics(rtm->rtm_inits, &rtm->rtm_rmx, &rt->rt_rmx);
754 if (rt->rt_ifa && rt->rt_ifa->ifa_rtrequest)
755 rt->rt_ifa->ifa_rtrequest(RTM_ADD, rt);
756 if (rtinfo->rti_genmask != NULL) {
757 rt->rt_genmask = rtmask_purelookup(rtinfo->rti_genmask);
758 if (rt->rt_genmask == NULL) {
760 * This should not happen, since we
761 * have already installed genmask
762 * on each CPU before we reach here.
764 panic("genmask is gone!?");
767 rtm->rtm_index = rt->rt_ifp->if_index;
773 route_output_lock_callback(int cmd, struct rt_addrinfo *rtinfo,
774 struct rtentry *rt, void *arg,
775 int found_cnt __unused)
777 struct rt_msghdr *rtm = arg;
779 rt->rt_rmx.rmx_locks &= ~(rtm->rtm_inits);
780 rt->rt_rmx.rmx_locks |=
781 (rtm->rtm_inits & rtm->rtm_rmx.rmx_locks);
786 rt_setmetrics(u_long which, struct rt_metrics *in, struct rt_metrics *out)
788 #define setmetric(flag, elt) if (which & (flag)) out->elt = in->elt;
789 setmetric(RTV_RPIPE, rmx_recvpipe);
790 setmetric(RTV_SPIPE, rmx_sendpipe);
791 setmetric(RTV_SSTHRESH, rmx_ssthresh);
792 setmetric(RTV_RTT, rmx_rtt);
793 setmetric(RTV_RTTVAR, rmx_rttvar);
794 setmetric(RTV_HOPCOUNT, rmx_hopcount);
795 setmetric(RTV_MTU, rmx_mtu);
796 setmetric(RTV_EXPIRE, rmx_expire);
797 setmetric(RTV_MSL, rmx_msl);
798 setmetric(RTV_IWMAXSEGS, rmx_iwmaxsegs);
799 setmetric(RTV_IWCAPSEGS, rmx_iwcapsegs);
804 * Extract the addresses of the passed sockaddrs.
805 * Do a little sanity checking so as to avoid bad memory references.
806 * This data is derived straight from userland.
809 rt_xaddrs(char *cp, char *cplim, struct rt_addrinfo *rtinfo)
814 for (i = 0; (i < RTAX_MAX) && (cp < cplim); i++) {
815 if ((rtinfo->rti_addrs & (1 << i)) == 0)
817 sa = (struct sockaddr *)cp;
821 if ((cp + sa->sa_len) > cplim) {
826 * There are no more... Quit now.
827 * If there are more bits, they are in error.
828 * I've seen this. route(1) can evidently generate these.
829 * This causes kernel to core dump.
830 * For compatibility, if we see this, point to a safe address.
832 if (sa->sa_len == 0) {
833 static struct sockaddr sa_zero = {
834 sizeof sa_zero, AF_INET,
837 rtinfo->rti_info[i] = &sa_zero;
838 kprintf("rtsock: received more addr bits than sockaddrs.\n");
839 return (0); /* should be EINVAL but for compat */
842 /* Accept the sockaddr. */
843 rtinfo->rti_info[i] = sa;
844 cp += RT_ROUNDUP(sa->sa_len);
850 rt_msghdrsize(int type)
855 return sizeof(struct ifa_msghdr);
858 return sizeof(struct ifma_msghdr);
860 return sizeof(struct if_msghdr);
863 return sizeof(struct if_announcemsghdr);
865 return sizeof(struct rt_msghdr);
870 rt_msgsize(int type, const struct rt_addrinfo *rtinfo)
874 len = rt_msghdrsize(type);
875 for (i = 0; i < RTAX_MAX; i++) {
876 if (rtinfo->rti_info[i] != NULL)
877 len += RT_ROUNDUP(rtinfo->rti_info[i]->sa_len);
884 * Build a routing message in a buffer.
885 * Copy the addresses in the rtinfo->rti_info[] sockaddr array
886 * to the end of the buffer after the message header.
888 * Set the rtinfo->rti_addrs bitmask of addresses present in rtinfo->rti_info[].
889 * This side-effect can be avoided if we reorder the addrs bitmask field in all
890 * the route messages to line up so we can set it here instead of back in the
894 rt_msg_buffer(int type, struct rt_addrinfo *rtinfo, void *buf, int msglen)
896 struct rt_msghdr *rtm;
900 rtm = (struct rt_msghdr *) buf;
901 rtm->rtm_version = RTM_VERSION;
902 rtm->rtm_type = type;
903 rtm->rtm_msglen = msglen;
905 cp = (char *)buf + rt_msghdrsize(type);
906 rtinfo->rti_addrs = 0;
907 for (i = 0; i < RTAX_MAX; i++) {
910 if ((sa = rtinfo->rti_info[i]) == NULL)
912 rtinfo->rti_addrs |= (1 << i);
913 dlen = RT_ROUNDUP(sa->sa_len);
920 * Build a routing message in a mbuf chain.
921 * Copy the addresses in the rtinfo->rti_info[] sockaddr array
922 * to the end of the mbuf after the message header.
924 * Set the rtinfo->rti_addrs bitmask of addresses present in rtinfo->rti_info[].
925 * This side-effect can be avoided if we reorder the addrs bitmask field in all
926 * the route messages to line up so we can set it here instead of back in the
930 rt_msg_mbuf(int type, struct rt_addrinfo *rtinfo)
933 struct rt_msghdr *rtm;
937 hlen = rt_msghdrsize(type);
938 KASSERT(hlen <= MCLBYTES, ("rt_msg_mbuf: hlen %d doesn't fit", hlen));
940 m = m_getl(hlen, M_NOWAIT, MT_DATA, M_PKTHDR, NULL);
944 m->m_pkthdr.len = m->m_len = hlen;
945 m->m_pkthdr.rcvif = NULL;
946 rtinfo->rti_addrs = 0;
948 for (i = 0; i < RTAX_MAX; i++) {
952 if ((sa = rtinfo->rti_info[i]) == NULL)
954 rtinfo->rti_addrs |= (1 << i);
955 dlen = RT_ROUNDUP(sa->sa_len);
956 m_copyback(m, len, dlen, (caddr_t)sa); /* can grow mbuf chain */
959 if (m->m_pkthdr.len != len) { /* one of the m_copyback() calls failed */
963 rtm = mtod(m, struct rt_msghdr *);
965 rtm->rtm_msglen = len;
966 rtm->rtm_version = RTM_VERSION;
967 rtm->rtm_type = type;
972 * This routine is called to generate a message from the routing
973 * socket indicating that a redirect has occurred, a routing lookup
974 * has failed, or that a protocol has detected timeouts to a particular
978 rt_missmsg(int type, struct rt_addrinfo *rtinfo, int flags, int error)
980 struct sockaddr *dst = rtinfo->rti_info[RTAX_DST];
981 struct rt_msghdr *rtm;
984 if (route_cb.any_count == 0)
986 m = rt_msg_mbuf(type, rtinfo);
989 rtm = mtod(m, struct rt_msghdr *);
990 rtm->rtm_flags = RTF_DONE | flags;
991 rtm->rtm_errno = error;
992 rtm->rtm_addrs = rtinfo->rti_addrs;
993 rts_input(m, familyof(dst));
997 rt_dstmsg(int type, struct sockaddr *dst, int error)
999 struct rt_msghdr *rtm;
1000 struct rt_addrinfo addrs;
1003 if (route_cb.any_count == 0)
1005 bzero(&addrs, sizeof(struct rt_addrinfo));
1006 addrs.rti_info[RTAX_DST] = dst;
1007 m = rt_msg_mbuf(type, &addrs);
1010 rtm = mtod(m, struct rt_msghdr *);
1011 rtm->rtm_flags = RTF_DONE;
1012 rtm->rtm_errno = error;
1013 rtm->rtm_addrs = addrs.rti_addrs;
1014 rts_input(m, familyof(dst));
1018 * This routine is called to generate a message from the routing
1019 * socket indicating that the status of a network interface has changed.
1022 rt_ifmsg(struct ifnet *ifp)
1024 struct if_msghdr *ifm;
1026 struct rt_addrinfo rtinfo;
1028 if (route_cb.any_count == 0)
1030 bzero(&rtinfo, sizeof(struct rt_addrinfo));
1031 m = rt_msg_mbuf(RTM_IFINFO, &rtinfo);
1034 ifm = mtod(m, struct if_msghdr *);
1035 ifm->ifm_index = ifp->if_index;
1036 ifm->ifm_flags = ifp->if_flags;
1037 ifm->ifm_data = ifp->if_data;
1043 rt_ifamsg(int cmd, struct ifaddr *ifa)
1045 struct ifa_msghdr *ifam;
1046 struct rt_addrinfo rtinfo;
1048 struct ifnet *ifp = ifa->ifa_ifp;
1050 bzero(&rtinfo, sizeof(struct rt_addrinfo));
1051 rtinfo.rti_ifaaddr = ifa->ifa_addr;
1052 rtinfo.rti_ifpaddr =
1053 TAILQ_FIRST(&ifp->if_addrheads[mycpuid])->ifa->ifa_addr;
1054 rtinfo.rti_netmask = ifa->ifa_netmask;
1055 rtinfo.rti_bcastaddr = ifa->ifa_dstaddr;
1057 m = rt_msg_mbuf(cmd, &rtinfo);
1061 ifam = mtod(m, struct ifa_msghdr *);
1062 ifam->ifam_index = ifp->if_index;
1063 ifam->ifam_metric = ifa->ifa_metric;
1064 ifam->ifam_flags = ifa->ifa_flags;
1065 ifam->ifam_addrs = rtinfo.rti_addrs;
1067 rts_input(m, familyof(ifa->ifa_addr));
1071 rt_rtmsg(int cmd, struct rtentry *rt, struct ifnet *ifp, int error)
1073 struct rt_msghdr *rtm;
1074 struct rt_addrinfo rtinfo;
1076 struct sockaddr *dst;
1081 bzero(&rtinfo, sizeof(struct rt_addrinfo));
1082 rtinfo.rti_dst = dst = rt_key(rt);
1083 rtinfo.rti_gateway = rt->rt_gateway;
1084 rtinfo.rti_netmask = rt_mask(rt);
1086 rtinfo.rti_ifpaddr =
1087 TAILQ_FIRST(&ifp->if_addrheads[mycpuid])->ifa->ifa_addr;
1089 rtinfo.rti_ifaaddr = rt->rt_ifa->ifa_addr;
1091 m = rt_msg_mbuf(cmd, &rtinfo);
1095 rtm = mtod(m, struct rt_msghdr *);
1097 rtm->rtm_index = ifp->if_index;
1098 rtm->rtm_flags |= rt->rt_flags;
1099 rtm->rtm_errno = error;
1100 rtm->rtm_addrs = rtinfo.rti_addrs;
1102 rts_input(m, familyof(dst));
1106 * This is called to generate messages from the routing socket
1107 * indicating a network interface has had addresses associated with it.
1108 * if we ever reverse the logic and replace messages TO the routing
1109 * socket indicate a request to configure interfaces, then it will
1110 * be unnecessary as the routing socket will automatically generate
1114 rt_newaddrmsg(int cmd, struct ifaddr *ifa, int error, struct rtentry *rt)
1116 if (route_cb.any_count == 0)
1119 if (cmd == RTM_ADD) {
1120 rt_ifamsg(RTM_NEWADDR, ifa);
1121 rt_rtmsg(RTM_ADD, rt, ifa->ifa_ifp, error);
1123 KASSERT((cmd == RTM_DELETE), ("unknown cmd %d", cmd));
1124 rt_rtmsg(RTM_DELETE, rt, ifa->ifa_ifp, error);
1125 rt_ifamsg(RTM_DELADDR, ifa);
1130 * This is the analogue to the rt_newaddrmsg which performs the same
1131 * function but for multicast group memberhips. This is easier since
1132 * there is no route state to worry about.
1135 rt_newmaddrmsg(int cmd, struct ifmultiaddr *ifma)
1137 struct rt_addrinfo rtinfo;
1138 struct mbuf *m = NULL;
1139 struct ifnet *ifp = ifma->ifma_ifp;
1140 struct ifma_msghdr *ifmam;
1142 if (route_cb.any_count == 0)
1145 bzero(&rtinfo, sizeof(struct rt_addrinfo));
1146 rtinfo.rti_ifaaddr = ifma->ifma_addr;
1147 if (ifp != NULL && !TAILQ_EMPTY(&ifp->if_addrheads[mycpuid])) {
1148 rtinfo.rti_ifpaddr =
1149 TAILQ_FIRST(&ifp->if_addrheads[mycpuid])->ifa->ifa_addr;
1152 * If a link-layer address is present, present it as a ``gateway''
1153 * (similarly to how ARP entries, e.g., are presented).
1155 rtinfo.rti_gateway = ifma->ifma_lladdr;
1157 m = rt_msg_mbuf(cmd, &rtinfo);
1161 ifmam = mtod(m, struct ifma_msghdr *);
1162 ifmam->ifmam_index = ifp->if_index;
1163 ifmam->ifmam_addrs = rtinfo.rti_addrs;
1165 rts_input(m, familyof(ifma->ifma_addr));
1168 static struct mbuf *
1169 rt_makeifannouncemsg(struct ifnet *ifp, int type, int what,
1170 struct rt_addrinfo *info)
1172 struct if_announcemsghdr *ifan;
1175 if (route_cb.any_count == 0)
1178 bzero(info, sizeof(*info));
1179 m = rt_msg_mbuf(type, info);
1183 ifan = mtod(m, struct if_announcemsghdr *);
1184 ifan->ifan_index = ifp->if_index;
1185 strlcpy(ifan->ifan_name, ifp->if_xname, sizeof ifan->ifan_name);
1186 ifan->ifan_what = what;
1191 * This is called to generate routing socket messages indicating
1192 * IEEE80211 wireless events.
1193 * XXX we piggyback on the RTM_IFANNOUNCE msg format in a clumsy way.
1196 rt_ieee80211msg(struct ifnet *ifp, int what, void *data, size_t data_len)
1198 struct rt_addrinfo info;
1201 m = rt_makeifannouncemsg(ifp, RTM_IEEE80211, what, &info);
1206 * Append the ieee80211 data. Try to stick it in the
1207 * mbuf containing the ifannounce msg; otherwise allocate
1208 * a new mbuf and append.
1210 * NB: we assume m is a single mbuf.
1212 if (data_len > M_TRAILINGSPACE(m)) {
1213 /* XXX use m_getb(data_len, M_NOWAIT, MT_DATA, 0); */
1214 struct mbuf *n = m_get(M_NOWAIT, MT_DATA);
1219 KKASSERT(data_len <= M_TRAILINGSPACE(n));
1220 bcopy(data, mtod(n, void *), data_len);
1221 n->m_len = data_len;
1223 } else if (data_len > 0) {
1224 bcopy(data, mtod(m, u_int8_t *) + m->m_len, data_len);
1225 m->m_len += data_len;
1228 if (m->m_flags & M_PKTHDR)
1229 m->m_pkthdr.len += data_len;
1230 mtod(m, struct if_announcemsghdr *)->ifan_msglen += data_len;
1235 * This is called to generate routing socket messages indicating
1236 * network interface arrival and departure.
1239 rt_ifannouncemsg(struct ifnet *ifp, int what)
1241 struct rt_addrinfo addrinfo;
1244 m = rt_makeifannouncemsg(ifp, RTM_IFANNOUNCE, what, &addrinfo);
1250 resizewalkarg(struct walkarg *w, int len)
1254 newptr = kmalloc(len, M_RTABLE, M_INTWAIT | M_NULLOK);
1257 if (w->w_tmem != NULL)
1258 kfree(w->w_tmem, M_RTABLE);
1260 w->w_tmemsize = len;
1265 ifnet_compute_stats(struct ifnet *ifp)
1267 IFNET_STAT_GET(ifp, ipackets, ifp->if_ipackets);
1268 IFNET_STAT_GET(ifp, ierrors, ifp->if_ierrors);
1269 IFNET_STAT_GET(ifp, opackets, ifp->if_opackets);
1270 IFNET_STAT_GET(ifp, collisions, ifp->if_collisions);
1271 IFNET_STAT_GET(ifp, ibytes, ifp->if_ibytes);
1272 IFNET_STAT_GET(ifp, obytes, ifp->if_obytes);
1273 IFNET_STAT_GET(ifp, imcasts, ifp->if_imcasts);
1274 IFNET_STAT_GET(ifp, omcasts, ifp->if_omcasts);
1275 IFNET_STAT_GET(ifp, iqdrops, ifp->if_iqdrops);
1276 IFNET_STAT_GET(ifp, noproto, ifp->if_noproto);
1277 IFNET_STAT_GET(ifp, oqdrops, ifp->if_oqdrops);
1281 sysctl_iflist(int af, struct walkarg *w)
1284 struct rt_addrinfo rtinfo;
1287 bzero(&rtinfo, sizeof(struct rt_addrinfo));
1290 TAILQ_FOREACH(ifp, &ifnetlist, if_link) {
1291 struct ifaddr_container *ifac, *ifac_mark;
1292 struct ifaddr_marker mark;
1293 struct ifaddrhead *head;
1296 if (w->w_arg && w->w_arg != ifp->if_index)
1298 head = &ifp->if_addrheads[mycpuid];
1300 * There is no need to reference the first ifaddr
1301 * even if the following resizewalkarg() blocks,
1302 * since the first ifaddr will not be destroyed
1303 * when the ifnet lock is held.
1305 ifac = TAILQ_FIRST(head);
1307 rtinfo.rti_ifpaddr = ifa->ifa_addr;
1308 msglen = rt_msgsize(RTM_IFINFO, &rtinfo);
1309 if (w->w_tmemsize < msglen && resizewalkarg(w, msglen) != 0) {
1313 rt_msg_buffer(RTM_IFINFO, &rtinfo, w->w_tmem, msglen);
1314 rtinfo.rti_ifpaddr = NULL;
1315 if (w->w_req != NULL && w->w_tmem != NULL) {
1316 struct if_msghdr *ifm = w->w_tmem;
1318 ifm->ifm_index = ifp->if_index;
1319 ifm->ifm_flags = ifp->if_flags;
1320 ifnet_compute_stats(ifp);
1321 ifm->ifm_data = ifp->if_data;
1322 ifm->ifm_addrs = rtinfo.rti_addrs;
1323 error = SYSCTL_OUT(w->w_req, ifm, msglen);
1330 * Add a marker, since SYSCTL_OUT() could block and during
1331 * that period the list could be changed.
1333 ifa_marker_init(&mark, ifp);
1334 ifac_mark = &mark.ifac;
1335 TAILQ_INSERT_AFTER(head, ifac, ifac_mark, ifa_link);
1336 while ((ifac = TAILQ_NEXT(ifac_mark, ifa_link)) != NULL) {
1337 TAILQ_REMOVE(head, ifac_mark, ifa_link);
1338 TAILQ_INSERT_AFTER(head, ifac, ifac_mark, ifa_link);
1343 if (ifa->ifa_addr->sa_family == AF_UNSPEC)
1346 if (af && af != ifa->ifa_addr->sa_family)
1348 if (curproc->p_ucred->cr_prison &&
1349 prison_if(curproc->p_ucred, ifa->ifa_addr))
1351 rtinfo.rti_ifaaddr = ifa->ifa_addr;
1352 rtinfo.rti_netmask = ifa->ifa_netmask;
1353 rtinfo.rti_bcastaddr = ifa->ifa_dstaddr;
1354 msglen = rt_msgsize(RTM_NEWADDR, &rtinfo);
1356 * Keep a reference on this ifaddr, so that it will
1357 * not be destroyed if the following resizewalkarg()
1361 if (w->w_tmemsize < msglen &&
1362 resizewalkarg(w, msglen) != 0) {
1364 TAILQ_REMOVE(head, ifac_mark, ifa_link);
1368 rt_msg_buffer(RTM_NEWADDR, &rtinfo, w->w_tmem, msglen);
1369 if (w->w_req != NULL) {
1370 struct ifa_msghdr *ifam = w->w_tmem;
1372 ifam->ifam_index = ifa->ifa_ifp->if_index;
1373 ifam->ifam_flags = ifa->ifa_flags;
1374 ifam->ifam_metric = ifa->ifa_metric;
1375 ifam->ifam_addrs = rtinfo.rti_addrs;
1376 error = SYSCTL_OUT(w->w_req, w->w_tmem, msglen);
1379 TAILQ_REMOVE(head, ifac_mark, ifa_link);
1386 TAILQ_REMOVE(head, ifac_mark, ifa_link);
1387 rtinfo.rti_netmask = NULL;
1388 rtinfo.rti_ifaaddr = NULL;
1389 rtinfo.rti_bcastaddr = NULL;
1396 rttable_walkarg_create(struct rttable_walkarg *w, int op, int arg)
1398 struct rt_addrinfo rtinfo;
1399 struct sockaddr_storage ss;
1402 memset(w, 0, sizeof(*w));
1406 memset(&ss, 0, sizeof(ss));
1407 ss.ss_len = sizeof(ss);
1409 memset(&rtinfo, 0, sizeof(rtinfo));
1410 for (i = 0; i < RTAX_MAX; ++i)
1411 rtinfo.rti_info[i] = (struct sockaddr *)&ss;
1412 msglen = rt_msgsize(RTM_GET, &rtinfo);
1414 w->w_bufsz = msglen * RTTABLE_DUMP_MSGCNT_MAX;
1415 w->w_buf = kmalloc(w->w_bufsz, M_TEMP, M_WAITOK | M_NULLOK);
1416 if (w->w_buf == NULL)
1422 rttable_walkarg_destroy(struct rttable_walkarg *w)
1424 kfree(w->w_buf, M_TEMP);
1428 rttable_entry_rtinfo(struct rt_addrinfo *rtinfo, struct radix_node *rn)
1430 struct rtentry *rt = (struct rtentry *)rn;
1432 bzero(rtinfo, sizeof(*rtinfo));
1433 rtinfo->rti_dst = rt_key(rt);
1434 rtinfo->rti_gateway = rt->rt_gateway;
1435 rtinfo->rti_netmask = rt_mask(rt);
1436 rtinfo->rti_genmask = rt->rt_genmask;
1437 if (rt->rt_ifp != NULL) {
1438 rtinfo->rti_ifpaddr =
1439 TAILQ_FIRST(&rt->rt_ifp->if_addrheads[mycpuid])->ifa->ifa_addr;
1440 rtinfo->rti_ifaaddr = rt->rt_ifa->ifa_addr;
1441 if (rt->rt_ifp->if_flags & IFF_POINTOPOINT)
1442 rtinfo->rti_bcastaddr = rt->rt_ifa->ifa_dstaddr;
1447 rttable_walk_entry(struct radix_node *rn, void *xw)
1449 struct rttable_walkarg *w = xw;
1450 struct rtentry *rt = (struct rtentry *)rn;
1451 struct rt_addrinfo rtinfo;
1452 struct rt_msghdr *rtm;
1453 boolean_t save = FALSE;
1454 int msglen, w_bufleft;
1457 rttable_entry_rtinfo(&rtinfo, rn);
1458 msglen = rt_msgsize(RTM_GET, &rtinfo);
1460 w_bufleft = w->w_bufsz - w->w_buflen;
1462 if (rn->rn_dupedkey != NULL) {
1463 struct radix_node *rn1 = rn;
1464 int total_msglen = msglen;
1467 * Make sure that we have enough space left for all
1468 * dupedkeys, since rn_walktree_at always starts
1469 * from the first dupedkey.
1471 while ((rn1 = rn1->rn_dupedkey) != NULL) {
1472 struct rt_addrinfo rtinfo1;
1475 if (rn1->rn_flags & RNF_ROOT)
1478 rttable_entry_rtinfo(&rtinfo1, rn1);
1479 msglen1 = rt_msgsize(RTM_GET, &rtinfo1);
1480 total_msglen += msglen1;
1483 if (total_msglen > w_bufleft) {
1484 if (total_msglen > w->w_bufsz) {
1485 static int logged = 0;
1488 kprintf("buffer is too small for "
1489 "all dupedkeys, increase "
1490 "RTTABLE_DUMP_MSGCNT_MAX\n");
1497 } else if (msglen > w_bufleft) {
1503 * Not enough buffer left; remember the position
1504 * to start from upon next round.
1506 KASSERT(msglen <= w->w_bufsz, ("msg too long %d", msglen));
1508 KASSERT(rtinfo.rti_dst->sa_len <= sizeof(w->w_key0),
1509 ("key too long %d", rtinfo.rti_dst->sa_len));
1510 memset(&w->w_key0, 0, sizeof(w->w_key0));
1511 memcpy(&w->w_key0, rtinfo.rti_dst, rtinfo.rti_dst->sa_len);
1512 w->w_key = (const char *)&w->w_key0;
1514 if (rtinfo.rti_netmask != NULL) {
1516 rtinfo.rti_netmask->sa_len <= sizeof(w->w_mask0),
1517 ("mask too long %d", rtinfo.rti_netmask->sa_len));
1518 memset(&w->w_mask0, 0, sizeof(w->w_mask0));
1519 memcpy(&w->w_mask0, rtinfo.rti_netmask,
1520 rtinfo.rti_netmask->sa_len);
1521 w->w_mask = (const char *)&w->w_mask0;
1528 if (w->w_op == NET_RT_FLAGS && !(rt->rt_flags & w->w_arg))
1531 ptr = ((uint8_t *)w->w_buf) + w->w_buflen;
1532 rt_msg_buffer(RTM_GET, &rtinfo, ptr, msglen);
1534 rtm = (struct rt_msghdr *)ptr;
1535 rtm->rtm_flags = rt->rt_flags;
1536 rtm->rtm_use = rt->rt_use;
1537 rtm->rtm_rmx = rt->rt_rmx;
1538 rtm->rtm_index = rt->rt_ifp->if_index;
1539 rtm->rtm_errno = rtm->rtm_pid = rtm->rtm_seq = 0;
1540 rtm->rtm_addrs = rtinfo.rti_addrs;
1542 w->w_buflen += msglen;
1548 rttable_walk_dispatch(netmsg_t msg)
1550 struct netmsg_rttable_walk *nmsg = (struct netmsg_rttable_walk *)msg;
1551 struct radix_node_head *rnh = rt_tables[mycpuid][nmsg->af];
1552 struct rttable_walkarg *w = nmsg->w;
1555 error = rnh->rnh_walktree_at(rnh, w->w_key, w->w_mask,
1556 rttable_walk_entry, w);
1557 lwkt_replymsg(&nmsg->base.lmsg, error);
1561 sysctl_rttable(int af, struct sysctl_req *req, int op, int arg)
1563 struct rttable_walkarg w;
1566 error = rttable_walkarg_create(&w, op, arg);
1571 for (i = 1; i <= AF_MAX; i++) {
1572 if (rt_tables[mycpuid][i] != NULL && (af == 0 || af == i)) {
1576 struct netmsg_rttable_walk nmsg;
1578 netmsg_init(&nmsg.base, NULL,
1579 &curthread->td_msgport, 0,
1580 rttable_walk_dispatch);
1586 error = lwkt_domsg(netisr_cpuport(mycpuid),
1587 &nmsg.base.lmsg, 0);
1588 if (error && error != EJUSTRETURN)
1591 if (req != NULL && w.w_buflen > 0) {
1594 error1 = SYSCTL_OUT(req, w.w_buf,
1601 if (error == 0) /* done */
1607 rttable_walkarg_destroy(&w);
1612 sysctl_rtsock(SYSCTL_HANDLER_ARGS)
1614 int *name = (int *)arg1;
1615 u_int namelen = arg2;
1625 if (namelen != 3 && namelen != 4)
1628 bzero(&w, sizeof w);
1634 * Optional third argument specifies cpu, used primarily for
1635 * debugging the route table.
1638 if (name[3] < 0 || name[3] >= netisr_ncpus)
1643 * Target cpu is not specified, use cpu0 then, so that
1644 * the result set will be relatively stable.
1649 lwkt_migratecpu(cpu);
1654 error = sysctl_rttable(af, w.w_req, w.w_op, w.w_arg);
1658 error = sysctl_iflist(af, &w);
1661 if (w.w_tmem != NULL)
1662 kfree(w.w_tmem, M_RTABLE);
1664 lwkt_migratecpu(origcpu);
1668 SYSCTL_NODE(_net, PF_ROUTE, routetable, CTLFLAG_RD, sysctl_rtsock, "");
1671 * Definitions of protocols supported in the ROUTE domain.
1674 static struct domain routedomain; /* or at least forward */
1676 static struct protosw routesw[] = {
1678 .pr_type = SOCK_RAW,
1679 .pr_domain = &routedomain,
1681 .pr_flags = PR_ATOMIC|PR_ADDR,
1683 .pr_output = route_output,
1684 .pr_ctlinput = raw_ctlinput,
1685 .pr_ctloutput = NULL,
1686 .pr_ctlport = cpu0_ctlport,
1688 .pr_init = raw_init,
1689 .pr_usrreqs = &route_usrreqs
1693 static struct domain routedomain = {
1694 .dom_family = AF_ROUTE,
1695 .dom_name = "route",
1697 .dom_externalize = NULL,
1698 .dom_dispose = NULL,
1699 .dom_protosw = routesw,
1700 .dom_protoswNPROTOSW = &routesw[(sizeof routesw)/(sizeof routesw[0])],
1701 .dom_next = SLIST_ENTRY_INITIALIZER,
1702 .dom_rtattach = NULL,
1705 .dom_ifattach = NULL,
1706 .dom_ifdetach = NULL