1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __NET_IP_TUNNELS_H
3 #define __NET_IP_TUNNELS_H 1
5 #include <linux/if_tunnel.h>
6 #include <linux/netdevice.h>
7 #include <linux/skbuff.h>
8 #include <linux/socket.h>
9 #include <linux/types.h>
10 #include <linux/u64_stats_sync.h>
11 #include <linux/bitops.h>
13 #include <net/dsfield.h>
14 #include <net/gro_cells.h>
15 #include <net/inet_ecn.h>
16 #include <net/netns/generic.h>
17 #include <net/rtnetlink.h>
18 #include <net/lwtunnel.h>
19 #include <net/dst_cache.h>
21 #if IS_ENABLED(CONFIG_IPV6)
23 #include <net/ip6_fib.h>
24 #include <net/ip6_route.h>
27 /* Keep error state on tunnel for 30 sec */
28 #define IPTUNNEL_ERR_TIMEO (30*HZ)
30 /* Used to memset ip_tunnel padding. */
31 #define IP_TUNNEL_KEY_SIZE offsetofend(struct ip_tunnel_key, tp_dst)
33 /* Used to memset ipv4 address padding. */
34 #define IP_TUNNEL_KEY_IPV4_PAD offsetofend(struct ip_tunnel_key, u.ipv4.dst)
35 #define IP_TUNNEL_KEY_IPV4_PAD_LEN \
36 (sizeof_field(struct ip_tunnel_key, u) - \
37 sizeof_field(struct ip_tunnel_key, u.ipv4))
39 #define __ipt_flag_op(op, ...) \
40 op(__VA_ARGS__, __IP_TUNNEL_FLAG_NUM)
42 #define IP_TUNNEL_DECLARE_FLAGS(...) \
43 __ipt_flag_op(DECLARE_BITMAP, __VA_ARGS__)
45 #define ip_tunnel_flags_zero(...) __ipt_flag_op(bitmap_zero, __VA_ARGS__)
46 #define ip_tunnel_flags_copy(...) __ipt_flag_op(bitmap_copy, __VA_ARGS__)
47 #define ip_tunnel_flags_and(...) __ipt_flag_op(bitmap_and, __VA_ARGS__)
48 #define ip_tunnel_flags_or(...) __ipt_flag_op(bitmap_or, __VA_ARGS__)
50 #define ip_tunnel_flags_empty(...) \
51 __ipt_flag_op(bitmap_empty, __VA_ARGS__)
52 #define ip_tunnel_flags_intersect(...) \
53 __ipt_flag_op(bitmap_intersects, __VA_ARGS__)
54 #define ip_tunnel_flags_subset(...) \
55 __ipt_flag_op(bitmap_subset, __VA_ARGS__)
57 struct ip_tunnel_key {
69 IP_TUNNEL_DECLARE_FLAGS(tun_flags);
70 __be32 label; /* Flow Label for IPv6 */
72 u8 tos; /* TOS for IPv4, TC for IPv6 */
73 u8 ttl; /* TTL for IPv4, HL for IPv6 */
79 struct ip_tunnel_encap {
86 /* Flags for ip_tunnel_info mode. */
87 #define IP_TUNNEL_INFO_TX 0x01 /* represents tx tunnel parameters */
88 #define IP_TUNNEL_INFO_IPV6 0x02 /* key contains IPv6 addresses */
89 #define IP_TUNNEL_INFO_BRIDGE 0x04 /* represents a bridged tunnel id */
91 /* Maximum tunnel options length. */
92 #define IP_TUNNEL_OPTS_MAX \
93 GENMASK((sizeof_field(struct ip_tunnel_info, \
94 options_len) * BITS_PER_BYTE) - 1, 0)
96 #define ip_tunnel_info_opts(info) \
98 const struct ip_tunnel_info * : ((const void *)((info) + 1)),\
99 struct ip_tunnel_info * : ((void *)((info) + 1))\
102 struct ip_tunnel_info {
103 struct ip_tunnel_key key;
104 struct ip_tunnel_encap encap;
105 #ifdef CONFIG_DST_CACHE
106 struct dst_cache dst_cache;
112 /* 6rd prefix/relay information */
113 #ifdef CONFIG_IPV6_SIT_6RD
114 struct ip_tunnel_6rd_parm {
115 struct in6_addr prefix;
122 struct ip_tunnel_prl_entry {
123 struct ip_tunnel_prl_entry __rcu *next;
126 struct rcu_head rcu_head;
131 /* Kernel-side variant of ip_tunnel_parm */
132 struct ip_tunnel_parm_kern {
134 IP_TUNNEL_DECLARE_FLAGS(i_flags);
135 IP_TUNNEL_DECLARE_FLAGS(o_flags);
143 struct ip_tunnel __rcu *next;
144 struct hlist_node hash_node;
146 struct net_device *dev;
147 netdevice_tracker dev_tracker;
149 struct net *net; /* netns for packet i/o */
151 unsigned long err_time; /* Time when the last ICMP error
153 int err_count; /* Number of arrived ICMP errors */
155 /* These four fields used only by GRE */
156 u32 i_seqno; /* The last seen seqno */
157 atomic_t o_seqno; /* The last output seqno */
158 int tun_hlen; /* Precalculated header length */
160 /* These four fields used only by ERSPAN */
161 u32 index; /* ERSPAN type II index */
162 u8 erspan_ver; /* ERSPAN version */
163 u8 dir; /* ERSPAN direction */
164 u16 hwid; /* ERSPAN hardware ID */
166 struct dst_cache dst_cache;
168 struct ip_tunnel_parm_kern parms;
171 int encap_hlen; /* Encap header length (FOU,GUE) */
172 int hlen; /* tun_hlen + encap_hlen */
173 struct ip_tunnel_encap encap;
176 #ifdef CONFIG_IPV6_SIT_6RD
177 struct ip_tunnel_6rd_parm ip6rd;
179 struct ip_tunnel_prl_entry __rcu *prl; /* potential router list */
180 unsigned int prl_count; /* # of entries in PRL */
181 unsigned int ip_tnl_net_id;
182 struct gro_cells gro_cells;
188 struct tnl_ptk_info {
189 IP_TUNNEL_DECLARE_FLAGS(flags);
196 #define PACKET_RCVD 0
197 #define PACKET_REJECT 1
198 #define PACKET_NEXT 2
200 #define IP_TNL_HASH_BITS 7
201 #define IP_TNL_HASH_SIZE (1 << IP_TNL_HASH_BITS)
203 struct ip_tunnel_net {
204 struct net_device *fb_tunnel_dev;
205 struct rtnl_link_ops *rtnl_link_ops;
206 struct hlist_head tunnels[IP_TNL_HASH_SIZE];
207 struct ip_tunnel __rcu *collect_md_tun;
211 static inline void ip_tunnel_set_options_present(unsigned long *flags)
213 IP_TUNNEL_DECLARE_FLAGS(present) = { };
215 __set_bit(IP_TUNNEL_GENEVE_OPT_BIT, present);
216 __set_bit(IP_TUNNEL_VXLAN_OPT_BIT, present);
217 __set_bit(IP_TUNNEL_ERSPAN_OPT_BIT, present);
218 __set_bit(IP_TUNNEL_GTP_OPT_BIT, present);
219 __set_bit(IP_TUNNEL_PFCP_OPT_BIT, present);
221 ip_tunnel_flags_or(flags, flags, present);
224 static inline void ip_tunnel_clear_options_present(unsigned long *flags)
226 IP_TUNNEL_DECLARE_FLAGS(present) = { };
228 __set_bit(IP_TUNNEL_GENEVE_OPT_BIT, present);
229 __set_bit(IP_TUNNEL_VXLAN_OPT_BIT, present);
230 __set_bit(IP_TUNNEL_ERSPAN_OPT_BIT, present);
231 __set_bit(IP_TUNNEL_GTP_OPT_BIT, present);
232 __set_bit(IP_TUNNEL_PFCP_OPT_BIT, present);
234 __ipt_flag_op(bitmap_andnot, flags, flags, present);
237 static inline bool ip_tunnel_is_options_present(const unsigned long *flags)
239 IP_TUNNEL_DECLARE_FLAGS(present) = { };
241 __set_bit(IP_TUNNEL_GENEVE_OPT_BIT, present);
242 __set_bit(IP_TUNNEL_VXLAN_OPT_BIT, present);
243 __set_bit(IP_TUNNEL_ERSPAN_OPT_BIT, present);
244 __set_bit(IP_TUNNEL_GTP_OPT_BIT, present);
245 __set_bit(IP_TUNNEL_PFCP_OPT_BIT, present);
247 return ip_tunnel_flags_intersect(flags, present);
250 static inline bool ip_tunnel_flags_is_be16_compat(const unsigned long *flags)
252 IP_TUNNEL_DECLARE_FLAGS(supp) = { };
254 bitmap_set(supp, 0, BITS_PER_TYPE(__be16));
255 __set_bit(IP_TUNNEL_VTI_BIT, supp);
257 return ip_tunnel_flags_subset(flags, supp);
260 static inline void ip_tunnel_flags_from_be16(unsigned long *dst, __be16 flags)
262 ip_tunnel_flags_zero(dst);
264 bitmap_write(dst, be16_to_cpu(flags), 0, BITS_PER_TYPE(__be16));
265 __assign_bit(IP_TUNNEL_VTI_BIT, dst, flags & VTI_ISVTI);
268 static inline __be16 ip_tunnel_flags_to_be16(const unsigned long *flags)
272 ret = cpu_to_be16(bitmap_read(flags, 0, BITS_PER_TYPE(__be16)));
273 if (test_bit(IP_TUNNEL_VTI_BIT, flags))
279 static inline void ip_tunnel_key_init(struct ip_tunnel_key *key,
280 __be32 saddr, __be32 daddr,
281 u8 tos, u8 ttl, __be32 label,
282 __be16 tp_src, __be16 tp_dst,
284 const unsigned long *tun_flags)
286 key->tun_id = tun_id;
287 key->u.ipv4.src = saddr;
288 key->u.ipv4.dst = daddr;
289 memset((unsigned char *)key + IP_TUNNEL_KEY_IPV4_PAD,
290 0, IP_TUNNEL_KEY_IPV4_PAD_LEN);
294 ip_tunnel_flags_copy(key->tun_flags, tun_flags);
296 /* For the tunnel types on the top of IPsec, the tp_src and tp_dst of
297 * the upper tunnel are used.
298 * E.g: GRE over IPSEC, the tp_src and tp_port are zero.
300 key->tp_src = tp_src;
301 key->tp_dst = tp_dst;
303 /* Clear struct padding. */
304 if (sizeof(*key) != IP_TUNNEL_KEY_SIZE)
305 memset((unsigned char *)key + IP_TUNNEL_KEY_SIZE,
306 0, sizeof(*key) - IP_TUNNEL_KEY_SIZE);
310 ip_tunnel_dst_cache_usable(const struct sk_buff *skb,
311 const struct ip_tunnel_info *info)
316 return !info || !test_bit(IP_TUNNEL_NOCACHE_BIT, info->key.tun_flags);
319 static inline unsigned short ip_tunnel_info_af(const struct ip_tunnel_info
322 return tun_info->mode & IP_TUNNEL_INFO_IPV6 ? AF_INET6 : AF_INET;
325 static inline __be64 key32_to_tunnel_id(__be32 key)
328 return (__force __be64)key;
330 return (__force __be64)((__force u64)key << 32);
334 /* Returns the least-significant 32 bits of a __be64. */
335 static inline __be32 tunnel_id_to_key32(__be64 tun_id)
338 return (__force __be32)tun_id;
340 return (__force __be32)((__force u64)tun_id >> 32);
346 static inline void ip_tunnel_init_flow(struct flowi4 *fl4,
348 __be32 daddr, __be32 saddr,
349 __be32 key, __u8 tos,
350 struct net *net, int oif,
351 __u32 mark, __u32 tun_inner_hash,
354 memset(fl4, 0, sizeof(*fl4));
357 fl4->flowi4_l3mdev = l3mdev_master_upper_ifindex_by_index_rcu(net, oif);
358 /* Legacy VRF/l3mdev use case */
359 fl4->flowi4_oif = fl4->flowi4_l3mdev ? 0 : oif;
364 fl4->flowi4_tos = tos;
365 fl4->flowi4_proto = proto;
366 fl4->fl4_gre_key = key;
367 fl4->flowi4_mark = mark;
368 fl4->flowi4_multipath_hash = tun_inner_hash;
369 fl4->flowi4_flags = flow_flags;
372 int ip_tunnel_init(struct net_device *dev);
373 void ip_tunnel_uninit(struct net_device *dev);
374 void ip_tunnel_dellink(struct net_device *dev, struct list_head *head);
375 struct net *ip_tunnel_get_link_net(const struct net_device *dev);
376 int ip_tunnel_get_iflink(const struct net_device *dev);
377 int ip_tunnel_init_net(struct net *net, unsigned int ip_tnl_net_id,
378 struct rtnl_link_ops *ops, char *devname);
380 void ip_tunnel_delete_nets(struct list_head *list_net, unsigned int id,
381 struct rtnl_link_ops *ops,
382 struct list_head *dev_to_kill);
384 void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
385 const struct iphdr *tnl_params, const u8 protocol);
386 void ip_md_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
387 const u8 proto, int tunnel_hlen);
388 int ip_tunnel_ctl(struct net_device *dev, struct ip_tunnel_parm_kern *p,
390 bool ip_tunnel_parm_from_user(struct ip_tunnel_parm_kern *kp,
391 const void __user *data);
392 bool ip_tunnel_parm_to_user(void __user *data, struct ip_tunnel_parm_kern *kp);
393 int ip_tunnel_siocdevprivate(struct net_device *dev, struct ifreq *ifr,
394 void __user *data, int cmd);
395 int __ip_tunnel_change_mtu(struct net_device *dev, int new_mtu, bool strict);
396 int ip_tunnel_change_mtu(struct net_device *dev, int new_mtu);
398 struct ip_tunnel *ip_tunnel_lookup(struct ip_tunnel_net *itn,
399 int link, const unsigned long *flags,
400 __be32 remote, __be32 local,
403 void ip_tunnel_md_udp_encap(struct sk_buff *skb, struct ip_tunnel_info *info);
404 int ip_tunnel_rcv(struct ip_tunnel *tunnel, struct sk_buff *skb,
405 const struct tnl_ptk_info *tpi, struct metadata_dst *tun_dst,
407 int ip_tunnel_changelink(struct net_device *dev, struct nlattr *tb[],
408 struct ip_tunnel_parm_kern *p, __u32 fwmark);
409 int ip_tunnel_newlink(struct net_device *dev, struct nlattr *tb[],
410 struct ip_tunnel_parm_kern *p, __u32 fwmark);
411 void ip_tunnel_setup(struct net_device *dev, unsigned int net_id);
413 bool ip_tunnel_netlink_encap_parms(struct nlattr *data[],
414 struct ip_tunnel_encap *encap);
416 void ip_tunnel_netlink_parms(struct nlattr *data[],
417 struct ip_tunnel_parm_kern *parms);
419 extern const struct header_ops ip_tunnel_header_ops;
420 __be16 ip_tunnel_parse_protocol(const struct sk_buff *skb);
422 struct ip_tunnel_encap_ops {
423 size_t (*encap_hlen)(struct ip_tunnel_encap *e);
424 int (*build_header)(struct sk_buff *skb, struct ip_tunnel_encap *e,
425 u8 *protocol, struct flowi4 *fl4);
426 int (*err_handler)(struct sk_buff *skb, u32 info);
429 #define MAX_IPTUN_ENCAP_OPS 8
431 extern const struct ip_tunnel_encap_ops __rcu *
432 iptun_encaps[MAX_IPTUN_ENCAP_OPS];
434 int ip_tunnel_encap_add_ops(const struct ip_tunnel_encap_ops *op,
436 int ip_tunnel_encap_del_ops(const struct ip_tunnel_encap_ops *op,
439 int ip_tunnel_encap_setup(struct ip_tunnel *t,
440 struct ip_tunnel_encap *ipencap);
442 static inline bool pskb_inet_may_pull(struct sk_buff *skb)
446 switch (skb->protocol) {
447 #if IS_ENABLED(CONFIG_IPV6)
448 case htons(ETH_P_IPV6):
449 nhlen = sizeof(struct ipv6hdr);
452 case htons(ETH_P_IP):
453 nhlen = sizeof(struct iphdr);
459 return pskb_network_may_pull(skb, nhlen);
462 /* Variant of pskb_inet_may_pull().
464 static inline bool skb_vlan_inet_prepare(struct sk_buff *skb)
466 int nhlen = 0, maclen = ETH_HLEN;
467 __be16 type = skb->protocol;
469 /* Essentially this is skb_protocol(skb, true)
470 * And we get MAC len.
472 if (eth_type_vlan(type))
473 type = __vlan_get_protocol(skb, type, &maclen);
476 #if IS_ENABLED(CONFIG_IPV6)
477 case htons(ETH_P_IPV6):
478 nhlen = sizeof(struct ipv6hdr);
481 case htons(ETH_P_IP):
482 nhlen = sizeof(struct iphdr);
485 /* For ETH_P_IPV6/ETH_P_IP we make sure to pull
486 * a base network header in skb->head.
488 if (!pskb_may_pull(skb, maclen + nhlen))
491 skb_set_network_header(skb, maclen);
495 static inline int ip_encap_hlen(struct ip_tunnel_encap *e)
497 const struct ip_tunnel_encap_ops *ops;
500 if (e->type == TUNNEL_ENCAP_NONE)
503 if (e->type >= MAX_IPTUN_ENCAP_OPS)
507 ops = rcu_dereference(iptun_encaps[e->type]);
508 if (likely(ops && ops->encap_hlen))
509 hlen = ops->encap_hlen(e);
515 static inline int ip_tunnel_encap(struct sk_buff *skb,
516 struct ip_tunnel_encap *e,
517 u8 *protocol, struct flowi4 *fl4)
519 const struct ip_tunnel_encap_ops *ops;
522 if (e->type == TUNNEL_ENCAP_NONE)
525 if (e->type >= MAX_IPTUN_ENCAP_OPS)
529 ops = rcu_dereference(iptun_encaps[e->type]);
530 if (likely(ops && ops->build_header))
531 ret = ops->build_header(skb, e, protocol, fl4);
537 /* Extract dsfield from inner protocol */
538 static inline u8 ip_tunnel_get_dsfield(const struct iphdr *iph,
539 const struct sk_buff *skb)
541 __be16 payload_protocol = skb_protocol(skb, true);
543 if (payload_protocol == htons(ETH_P_IP))
545 else if (payload_protocol == htons(ETH_P_IPV6))
546 return ipv6_get_dsfield((const struct ipv6hdr *)iph);
551 static inline __be32 ip_tunnel_get_flowlabel(const struct iphdr *iph,
552 const struct sk_buff *skb)
554 __be16 payload_protocol = skb_protocol(skb, true);
556 if (payload_protocol == htons(ETH_P_IPV6))
557 return ip6_flowlabel((const struct ipv6hdr *)iph);
562 static inline u8 ip_tunnel_get_ttl(const struct iphdr *iph,
563 const struct sk_buff *skb)
565 __be16 payload_protocol = skb_protocol(skb, true);
567 if (payload_protocol == htons(ETH_P_IP))
569 else if (payload_protocol == htons(ETH_P_IPV6))
570 return ((const struct ipv6hdr *)iph)->hop_limit;
575 /* Propogate ECN bits out */
576 static inline u8 ip_tunnel_ecn_encap(u8 tos, const struct iphdr *iph,
577 const struct sk_buff *skb)
579 u8 inner = ip_tunnel_get_dsfield(iph, skb);
581 return INET_ECN_encapsulate(tos, inner);
584 int __iptunnel_pull_header(struct sk_buff *skb, int hdr_len,
585 __be16 inner_proto, bool raw_proto, bool xnet);
587 static inline int iptunnel_pull_header(struct sk_buff *skb, int hdr_len,
588 __be16 inner_proto, bool xnet)
590 return __iptunnel_pull_header(skb, hdr_len, inner_proto, false, xnet);
593 void iptunnel_xmit(struct sock *sk, struct rtable *rt, struct sk_buff *skb,
594 __be32 src, __be32 dst, u8 proto,
595 u8 tos, u8 ttl, __be16 df, bool xnet);
596 struct metadata_dst *iptunnel_metadata_reply(struct metadata_dst *md,
598 int skb_tunnel_check_pmtu(struct sk_buff *skb, struct dst_entry *encap_dst,
599 int headroom, bool reply);
601 int iptunnel_handle_offloads(struct sk_buff *skb, int gso_type_mask);
603 static inline int iptunnel_pull_offloads(struct sk_buff *skb)
605 if (skb_is_gso(skb)) {
608 err = skb_unclone(skb, GFP_ATOMIC);
611 skb_shinfo(skb)->gso_type &= ~(NETIF_F_GSO_ENCAP_ALL >>
615 skb->encapsulation = 0;
619 static inline void iptunnel_xmit_stats(struct net_device *dev, int pkt_len)
622 struct pcpu_sw_netstats *tstats = get_cpu_ptr(dev->tstats);
624 u64_stats_update_begin(&tstats->syncp);
625 u64_stats_add(&tstats->tx_bytes, pkt_len);
626 u64_stats_inc(&tstats->tx_packets);
627 u64_stats_update_end(&tstats->syncp);
633 DEV_STATS_INC(dev, tx_errors);
634 DEV_STATS_INC(dev, tx_aborted_errors);
636 DEV_STATS_INC(dev, tx_dropped);
640 static inline void ip_tunnel_info_opts_get(void *to,
641 const struct ip_tunnel_info *info)
643 memcpy(to, info + 1, info->options_len);
646 static inline void ip_tunnel_info_opts_set(struct ip_tunnel_info *info,
647 const void *from, int len,
648 const unsigned long *flags)
650 info->options_len = len;
652 memcpy(ip_tunnel_info_opts(info), from, len);
653 ip_tunnel_flags_or(info->key.tun_flags, info->key.tun_flags,
658 static inline struct ip_tunnel_info *lwt_tun_info(struct lwtunnel_state *lwtstate)
660 return (struct ip_tunnel_info *)lwtstate->data;
663 DECLARE_STATIC_KEY_FALSE(ip_tunnel_metadata_cnt);
665 /* Returns > 0 if metadata should be collected */
666 static inline int ip_tunnel_collect_metadata(void)
668 return static_branch_unlikely(&ip_tunnel_metadata_cnt);
671 void __init ip_tunnel_core_init(void);
673 void ip_tunnel_need_metadata(void);
674 void ip_tunnel_unneed_metadata(void);
676 #else /* CONFIG_INET */
678 static inline struct ip_tunnel_info *lwt_tun_info(struct lwtunnel_state *lwtstate)
683 static inline void ip_tunnel_need_metadata(void)
687 static inline void ip_tunnel_unneed_metadata(void)
691 static inline void ip_tunnel_info_opts_get(void *to,
692 const struct ip_tunnel_info *info)
696 static inline void ip_tunnel_info_opts_set(struct ip_tunnel_info *info,
697 const void *from, int len,
698 const unsigned long *flags)
700 info->options_len = 0;
703 #endif /* CONFIG_INET */
705 #endif /* __NET_IP_TUNNELS_H */