tcp/dccp: install syn_recv requests into ehash table
[linux.git] / net / ipv4 / tcp_ipv4.c
1 /*
2  * INET         An implementation of the TCP/IP protocol suite for the LINUX
3  *              operating system.  INET is implemented using the  BSD Socket
4  *              interface as the means of communication with the user level.
5  *
6  *              Implementation of the Transmission Control Protocol(TCP).
7  *
8  *              IPv4 specific functions
9  *
10  *
11  *              code split from:
12  *              linux/ipv4/tcp.c
13  *              linux/ipv4/tcp_input.c
14  *              linux/ipv4/tcp_output.c
15  *
16  *              See tcp.c for author information
17  *
18  *      This program is free software; you can redistribute it and/or
19  *      modify it under the terms of the GNU General Public License
20  *      as published by the Free Software Foundation; either version
21  *      2 of the License, or (at your option) any later version.
22  */
23
24 /*
25  * Changes:
26  *              David S. Miller :       New socket lookup architecture.
27  *                                      This code is dedicated to John Dyson.
28  *              David S. Miller :       Change semantics of established hash,
29  *                                      half is devoted to TIME_WAIT sockets
30  *                                      and the rest go in the other half.
31  *              Andi Kleen :            Add support for syncookies and fixed
32  *                                      some bugs: ip options weren't passed to
33  *                                      the TCP layer, missed a check for an
34  *                                      ACK bit.
35  *              Andi Kleen :            Implemented fast path mtu discovery.
36  *                                      Fixed many serious bugs in the
37  *                                      request_sock handling and moved
38  *                                      most of it into the af independent code.
39  *                                      Added tail drop and some other bugfixes.
40  *                                      Added new listen semantics.
41  *              Mike McLagan    :       Routing by source
42  *      Juan Jose Ciarlante:            ip_dynaddr bits
43  *              Andi Kleen:             various fixes.
44  *      Vitaly E. Lavrov        :       Transparent proxy revived after year
45  *                                      coma.
46  *      Andi Kleen              :       Fix new listen.
47  *      Andi Kleen              :       Fix accept error reporting.
48  *      YOSHIFUJI Hideaki @USAGI and:   Support IPV6_V6ONLY socket option, which
49  *      Alexey Kuznetsov                allow both IPv4 and IPv6 sockets to bind
50  *                                      a single port at the same time.
51  */
52
53 #define pr_fmt(fmt) "TCP: " fmt
54
55 #include <linux/bottom_half.h>
56 #include <linux/types.h>
57 #include <linux/fcntl.h>
58 #include <linux/module.h>
59 #include <linux/random.h>
60 #include <linux/cache.h>
61 #include <linux/jhash.h>
62 #include <linux/init.h>
63 #include <linux/times.h>
64 #include <linux/slab.h>
65
66 #include <net/net_namespace.h>
67 #include <net/icmp.h>
68 #include <net/inet_hashtables.h>
69 #include <net/tcp.h>
70 #include <net/transp_v6.h>
71 #include <net/ipv6.h>
72 #include <net/inet_common.h>
73 #include <net/timewait_sock.h>
74 #include <net/xfrm.h>
75 #include <net/secure_seq.h>
76 #include <net/tcp_memcontrol.h>
77 #include <net/busy_poll.h>
78
79 #include <linux/inet.h>
80 #include <linux/ipv6.h>
81 #include <linux/stddef.h>
82 #include <linux/proc_fs.h>
83 #include <linux/seq_file.h>
84
85 #include <linux/crypto.h>
86 #include <linux/scatterlist.h>
87
88 int sysctl_tcp_tw_reuse __read_mostly;
89 int sysctl_tcp_low_latency __read_mostly;
90 EXPORT_SYMBOL(sysctl_tcp_low_latency);
91
92 #ifdef CONFIG_TCP_MD5SIG
93 static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
94                                __be32 daddr, __be32 saddr, const struct tcphdr *th);
95 #endif
96
97 struct inet_hashinfo tcp_hashinfo;
98 EXPORT_SYMBOL(tcp_hashinfo);
99
100 static  __u32 tcp_v4_init_sequence(const struct sk_buff *skb)
101 {
102         return secure_tcp_sequence_number(ip_hdr(skb)->daddr,
103                                           ip_hdr(skb)->saddr,
104                                           tcp_hdr(skb)->dest,
105                                           tcp_hdr(skb)->source);
106 }
107
108 int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
109 {
110         const struct tcp_timewait_sock *tcptw = tcp_twsk(sktw);
111         struct tcp_sock *tp = tcp_sk(sk);
112
113         /* With PAWS, it is safe from the viewpoint
114            of data integrity. Even without PAWS it is safe provided sequence
115            spaces do not overlap i.e. at data rates <= 80Mbit/sec.
116
117            Actually, the idea is close to VJ's one, only timestamp cache is
118            held not per host, but per port pair and TW bucket is used as state
119            holder.
120
121            If TW bucket has been already destroyed we fall back to VJ's scheme
122            and use initial timestamp retrieved from peer table.
123          */
124         if (tcptw->tw_ts_recent_stamp &&
125             (!twp || (sysctl_tcp_tw_reuse &&
126                              get_seconds() - tcptw->tw_ts_recent_stamp > 1))) {
127                 tp->write_seq = tcptw->tw_snd_nxt + 65535 + 2;
128                 if (tp->write_seq == 0)
129                         tp->write_seq = 1;
130                 tp->rx_opt.ts_recent       = tcptw->tw_ts_recent;
131                 tp->rx_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
132                 sock_hold(sktw);
133                 return 1;
134         }
135
136         return 0;
137 }
138 EXPORT_SYMBOL_GPL(tcp_twsk_unique);
139
140 /* This will initiate an outgoing connection. */
141 int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
142 {
143         struct sockaddr_in *usin = (struct sockaddr_in *)uaddr;
144         struct inet_sock *inet = inet_sk(sk);
145         struct tcp_sock *tp = tcp_sk(sk);
146         __be16 orig_sport, orig_dport;
147         __be32 daddr, nexthop;
148         struct flowi4 *fl4;
149         struct rtable *rt;
150         int err;
151         struct ip_options_rcu *inet_opt;
152
153         if (addr_len < sizeof(struct sockaddr_in))
154                 return -EINVAL;
155
156         if (usin->sin_family != AF_INET)
157                 return -EAFNOSUPPORT;
158
159         nexthop = daddr = usin->sin_addr.s_addr;
160         inet_opt = rcu_dereference_protected(inet->inet_opt,
161                                              sock_owned_by_user(sk));
162         if (inet_opt && inet_opt->opt.srr) {
163                 if (!daddr)
164                         return -EINVAL;
165                 nexthop = inet_opt->opt.faddr;
166         }
167
168         orig_sport = inet->inet_sport;
169         orig_dport = usin->sin_port;
170         fl4 = &inet->cork.fl.u.ip4;
171         rt = ip_route_connect(fl4, nexthop, inet->inet_saddr,
172                               RT_CONN_FLAGS(sk), sk->sk_bound_dev_if,
173                               IPPROTO_TCP,
174                               orig_sport, orig_dport, sk);
175         if (IS_ERR(rt)) {
176                 err = PTR_ERR(rt);
177                 if (err == -ENETUNREACH)
178                         IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
179                 return err;
180         }
181
182         if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
183                 ip_rt_put(rt);
184                 return -ENETUNREACH;
185         }
186
187         if (!inet_opt || !inet_opt->opt.srr)
188                 daddr = fl4->daddr;
189
190         if (!inet->inet_saddr)
191                 inet->inet_saddr = fl4->saddr;
192         sk_rcv_saddr_set(sk, inet->inet_saddr);
193
194         if (tp->rx_opt.ts_recent_stamp && inet->inet_daddr != daddr) {
195                 /* Reset inherited state */
196                 tp->rx_opt.ts_recent       = 0;
197                 tp->rx_opt.ts_recent_stamp = 0;
198                 if (likely(!tp->repair))
199                         tp->write_seq      = 0;
200         }
201
202         if (tcp_death_row.sysctl_tw_recycle &&
203             !tp->rx_opt.ts_recent_stamp && fl4->daddr == daddr)
204                 tcp_fetch_timewait_stamp(sk, &rt->dst);
205
206         inet->inet_dport = usin->sin_port;
207         sk_daddr_set(sk, daddr);
208
209         inet_csk(sk)->icsk_ext_hdr_len = 0;
210         if (inet_opt)
211                 inet_csk(sk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
212
213         tp->rx_opt.mss_clamp = TCP_MSS_DEFAULT;
214
215         /* Socket identity is still unknown (sport may be zero).
216          * However we set state to SYN-SENT and not releasing socket
217          * lock select source port, enter ourselves into the hash tables and
218          * complete initialization after this.
219          */
220         tcp_set_state(sk, TCP_SYN_SENT);
221         err = inet_hash_connect(&tcp_death_row, sk);
222         if (err)
223                 goto failure;
224
225         sk_set_txhash(sk);
226
227         rt = ip_route_newports(fl4, rt, orig_sport, orig_dport,
228                                inet->inet_sport, inet->inet_dport, sk);
229         if (IS_ERR(rt)) {
230                 err = PTR_ERR(rt);
231                 rt = NULL;
232                 goto failure;
233         }
234         /* OK, now commit destination to socket.  */
235         sk->sk_gso_type = SKB_GSO_TCPV4;
236         sk_setup_caps(sk, &rt->dst);
237
238         if (!tp->write_seq && likely(!tp->repair))
239                 tp->write_seq = secure_tcp_sequence_number(inet->inet_saddr,
240                                                            inet->inet_daddr,
241                                                            inet->inet_sport,
242                                                            usin->sin_port);
243
244         inet->inet_id = tp->write_seq ^ jiffies;
245
246         err = tcp_connect(sk);
247
248         rt = NULL;
249         if (err)
250                 goto failure;
251
252         return 0;
253
254 failure:
255         /*
256          * This unhashes the socket and releases the local port,
257          * if necessary.
258          */
259         tcp_set_state(sk, TCP_CLOSE);
260         ip_rt_put(rt);
261         sk->sk_route_caps = 0;
262         inet->inet_dport = 0;
263         return err;
264 }
265 EXPORT_SYMBOL(tcp_v4_connect);
266
267 /*
268  * This routine reacts to ICMP_FRAG_NEEDED mtu indications as defined in RFC1191.
269  * It can be called through tcp_release_cb() if socket was owned by user
270  * at the time tcp_v4_err() was called to handle ICMP message.
271  */
272 void tcp_v4_mtu_reduced(struct sock *sk)
273 {
274         struct dst_entry *dst;
275         struct inet_sock *inet = inet_sk(sk);
276         u32 mtu = tcp_sk(sk)->mtu_info;
277
278         dst = inet_csk_update_pmtu(sk, mtu);
279         if (!dst)
280                 return;
281
282         /* Something is about to be wrong... Remember soft error
283          * for the case, if this connection will not able to recover.
284          */
285         if (mtu < dst_mtu(dst) && ip_dont_fragment(sk, dst))
286                 sk->sk_err_soft = EMSGSIZE;
287
288         mtu = dst_mtu(dst);
289
290         if (inet->pmtudisc != IP_PMTUDISC_DONT &&
291             ip_sk_accept_pmtu(sk) &&
292             inet_csk(sk)->icsk_pmtu_cookie > mtu) {
293                 tcp_sync_mss(sk, mtu);
294
295                 /* Resend the TCP packet because it's
296                  * clear that the old packet has been
297                  * dropped. This is the new "fast" path mtu
298                  * discovery.
299                  */
300                 tcp_simple_retransmit(sk);
301         } /* else let the usual retransmit timer handle it */
302 }
303 EXPORT_SYMBOL(tcp_v4_mtu_reduced);
304
305 static void do_redirect(struct sk_buff *skb, struct sock *sk)
306 {
307         struct dst_entry *dst = __sk_dst_check(sk, 0);
308
309         if (dst)
310                 dst->ops->redirect(dst, sk, skb);
311 }
312
313
314 /* handle ICMP messages on TCP_NEW_SYN_RECV request sockets */
315 void tcp_req_err(struct sock *sk, u32 seq)
316 {
317         struct request_sock *req = inet_reqsk(sk);
318         struct net *net = sock_net(sk);
319
320         /* ICMPs are not backlogged, hence we cannot get
321          * an established socket here.
322          */
323         WARN_ON(req->sk);
324
325         if (seq != tcp_rsk(req)->snt_isn) {
326                 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
327                 reqsk_put(req);
328         } else {
329                 /*
330                  * Still in SYN_RECV, just remove it silently.
331                  * There is no good way to pass the error to the newly
332                  * created socket, and POSIX does not want network
333                  * errors returned from accept().
334                  */
335                 NET_INC_STATS_BH(net, LINUX_MIB_LISTENDROPS);
336                 inet_csk_reqsk_queue_drop(req->rsk_listener, req);
337         }
338 }
339 EXPORT_SYMBOL(tcp_req_err);
340
341 /*
342  * This routine is called by the ICMP module when it gets some
343  * sort of error condition.  If err < 0 then the socket should
344  * be closed and the error returned to the user.  If err > 0
345  * it's just the icmp type << 8 | icmp code.  After adjustment
346  * header points to the first 8 bytes of the tcp header.  We need
347  * to find the appropriate port.
348  *
349  * The locking strategy used here is very "optimistic". When
350  * someone else accesses the socket the ICMP is just dropped
351  * and for some paths there is no check at all.
352  * A more general error queue to queue errors for later handling
353  * is probably better.
354  *
355  */
356
357 void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
358 {
359         const struct iphdr *iph = (const struct iphdr *)icmp_skb->data;
360         struct tcphdr *th = (struct tcphdr *)(icmp_skb->data + (iph->ihl << 2));
361         struct inet_connection_sock *icsk;
362         struct tcp_sock *tp;
363         struct inet_sock *inet;
364         const int type = icmp_hdr(icmp_skb)->type;
365         const int code = icmp_hdr(icmp_skb)->code;
366         struct sock *sk;
367         struct sk_buff *skb;
368         struct request_sock *fastopen;
369         __u32 seq, snd_una;
370         __u32 remaining;
371         int err;
372         struct net *net = dev_net(icmp_skb->dev);
373
374         sk = __inet_lookup_established(net, &tcp_hashinfo, iph->daddr,
375                                        th->dest, iph->saddr, ntohs(th->source),
376                                        inet_iif(icmp_skb));
377         if (!sk) {
378                 ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
379                 return;
380         }
381         if (sk->sk_state == TCP_TIME_WAIT) {
382                 inet_twsk_put(inet_twsk(sk));
383                 return;
384         }
385         seq = ntohl(th->seq);
386         if (sk->sk_state == TCP_NEW_SYN_RECV)
387                 return tcp_req_err(sk, seq);
388
389         bh_lock_sock(sk);
390         /* If too many ICMPs get dropped on busy
391          * servers this needs to be solved differently.
392          * We do take care of PMTU discovery (RFC1191) special case :
393          * we can receive locally generated ICMP messages while socket is held.
394          */
395         if (sock_owned_by_user(sk)) {
396                 if (!(type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED))
397                         NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
398         }
399         if (sk->sk_state == TCP_CLOSE)
400                 goto out;
401
402         if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
403                 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
404                 goto out;
405         }
406
407         icsk = inet_csk(sk);
408         tp = tcp_sk(sk);
409         /* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */
410         fastopen = tp->fastopen_rsk;
411         snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una;
412         if (sk->sk_state != TCP_LISTEN &&
413             !between(seq, snd_una, tp->snd_nxt)) {
414                 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
415                 goto out;
416         }
417
418         switch (type) {
419         case ICMP_REDIRECT:
420                 do_redirect(icmp_skb, sk);
421                 goto out;
422         case ICMP_SOURCE_QUENCH:
423                 /* Just silently ignore these. */
424                 goto out;
425         case ICMP_PARAMETERPROB:
426                 err = EPROTO;
427                 break;
428         case ICMP_DEST_UNREACH:
429                 if (code > NR_ICMP_UNREACH)
430                         goto out;
431
432                 if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */
433                         /* We are not interested in TCP_LISTEN and open_requests
434                          * (SYN-ACKs send out by Linux are always <576bytes so
435                          * they should go through unfragmented).
436                          */
437                         if (sk->sk_state == TCP_LISTEN)
438                                 goto out;
439
440                         tp->mtu_info = info;
441                         if (!sock_owned_by_user(sk)) {
442                                 tcp_v4_mtu_reduced(sk);
443                         } else {
444                                 if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED, &tp->tsq_flags))
445                                         sock_hold(sk);
446                         }
447                         goto out;
448                 }
449
450                 err = icmp_err_convert[code].errno;
451                 /* check if icmp_skb allows revert of backoff
452                  * (see draft-zimmermann-tcp-lcd) */
453                 if (code != ICMP_NET_UNREACH && code != ICMP_HOST_UNREACH)
454                         break;
455                 if (seq != tp->snd_una  || !icsk->icsk_retransmits ||
456                     !icsk->icsk_backoff || fastopen)
457                         break;
458
459                 if (sock_owned_by_user(sk))
460                         break;
461
462                 icsk->icsk_backoff--;
463                 icsk->icsk_rto = tp->srtt_us ? __tcp_set_rto(tp) :
464                                                TCP_TIMEOUT_INIT;
465                 icsk->icsk_rto = inet_csk_rto_backoff(icsk, TCP_RTO_MAX);
466
467                 skb = tcp_write_queue_head(sk);
468                 BUG_ON(!skb);
469
470                 remaining = icsk->icsk_rto -
471                             min(icsk->icsk_rto,
472                                 tcp_time_stamp - tcp_skb_timestamp(skb));
473
474                 if (remaining) {
475                         inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
476                                                   remaining, TCP_RTO_MAX);
477                 } else {
478                         /* RTO revert clocked out retransmission.
479                          * Will retransmit now */
480                         tcp_retransmit_timer(sk);
481                 }
482
483                 break;
484         case ICMP_TIME_EXCEEDED:
485                 err = EHOSTUNREACH;
486                 break;
487         default:
488                 goto out;
489         }
490
491         switch (sk->sk_state) {
492         case TCP_SYN_SENT:
493         case TCP_SYN_RECV:
494                 /* Only in fast or simultaneous open. If a fast open socket is
495                  * is already accepted it is treated as a connected one below.
496                  */
497                 if (fastopen && !fastopen->sk)
498                         break;
499
500                 if (!sock_owned_by_user(sk)) {
501                         sk->sk_err = err;
502
503                         sk->sk_error_report(sk);
504
505                         tcp_done(sk);
506                 } else {
507                         sk->sk_err_soft = err;
508                 }
509                 goto out;
510         }
511
512         /* If we've already connected we will keep trying
513          * until we time out, or the user gives up.
514          *
515          * rfc1122 4.2.3.9 allows to consider as hard errors
516          * only PROTO_UNREACH and PORT_UNREACH (well, FRAG_FAILED too,
517          * but it is obsoleted by pmtu discovery).
518          *
519          * Note, that in modern internet, where routing is unreliable
520          * and in each dark corner broken firewalls sit, sending random
521          * errors ordered by their masters even this two messages finally lose
522          * their original sense (even Linux sends invalid PORT_UNREACHs)
523          *
524          * Now we are in compliance with RFCs.
525          *                                                      --ANK (980905)
526          */
527
528         inet = inet_sk(sk);
529         if (!sock_owned_by_user(sk) && inet->recverr) {
530                 sk->sk_err = err;
531                 sk->sk_error_report(sk);
532         } else  { /* Only an error on timeout */
533                 sk->sk_err_soft = err;
534         }
535
536 out:
537         bh_unlock_sock(sk);
538         sock_put(sk);
539 }
540
541 void __tcp_v4_send_check(struct sk_buff *skb, __be32 saddr, __be32 daddr)
542 {
543         struct tcphdr *th = tcp_hdr(skb);
544
545         if (skb->ip_summed == CHECKSUM_PARTIAL) {
546                 th->check = ~tcp_v4_check(skb->len, saddr, daddr, 0);
547                 skb->csum_start = skb_transport_header(skb) - skb->head;
548                 skb->csum_offset = offsetof(struct tcphdr, check);
549         } else {
550                 th->check = tcp_v4_check(skb->len, saddr, daddr,
551                                          csum_partial(th,
552                                                       th->doff << 2,
553                                                       skb->csum));
554         }
555 }
556
557 /* This routine computes an IPv4 TCP checksum. */
558 void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb)
559 {
560         const struct inet_sock *inet = inet_sk(sk);
561
562         __tcp_v4_send_check(skb, inet->inet_saddr, inet->inet_daddr);
563 }
564 EXPORT_SYMBOL(tcp_v4_send_check);
565
566 /*
567  *      This routine will send an RST to the other tcp.
568  *
569  *      Someone asks: why I NEVER use socket parameters (TOS, TTL etc.)
570  *                    for reset.
571  *      Answer: if a packet caused RST, it is not for a socket
572  *              existing in our system, if it is matched to a socket,
573  *              it is just duplicate segment or bug in other side's TCP.
574  *              So that we build reply only basing on parameters
575  *              arrived with segment.
576  *      Exception: precedence violation. We do not implement it in any case.
577  */
578
579 static void tcp_v4_send_reset(const struct sock *sk, struct sk_buff *skb)
580 {
581         const struct tcphdr *th = tcp_hdr(skb);
582         struct {
583                 struct tcphdr th;
584 #ifdef CONFIG_TCP_MD5SIG
585                 __be32 opt[(TCPOLEN_MD5SIG_ALIGNED >> 2)];
586 #endif
587         } rep;
588         struct ip_reply_arg arg;
589 #ifdef CONFIG_TCP_MD5SIG
590         struct tcp_md5sig_key *key;
591         const __u8 *hash_location = NULL;
592         unsigned char newhash[16];
593         int genhash;
594         struct sock *sk1 = NULL;
595 #endif
596         struct net *net;
597
598         /* Never send a reset in response to a reset. */
599         if (th->rst)
600                 return;
601
602         /* If sk not NULL, it means we did a successful lookup and incoming
603          * route had to be correct. prequeue might have dropped our dst.
604          */
605         if (!sk && skb_rtable(skb)->rt_type != RTN_LOCAL)
606                 return;
607
608         /* Swap the send and the receive. */
609         memset(&rep, 0, sizeof(rep));
610         rep.th.dest   = th->source;
611         rep.th.source = th->dest;
612         rep.th.doff   = sizeof(struct tcphdr) / 4;
613         rep.th.rst    = 1;
614
615         if (th->ack) {
616                 rep.th.seq = th->ack_seq;
617         } else {
618                 rep.th.ack = 1;
619                 rep.th.ack_seq = htonl(ntohl(th->seq) + th->syn + th->fin +
620                                        skb->len - (th->doff << 2));
621         }
622
623         memset(&arg, 0, sizeof(arg));
624         arg.iov[0].iov_base = (unsigned char *)&rep;
625         arg.iov[0].iov_len  = sizeof(rep.th);
626
627         net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev);
628 #ifdef CONFIG_TCP_MD5SIG
629         hash_location = tcp_parse_md5sig_option(th);
630         if (!sk && hash_location) {
631                 /*
632                  * active side is lost. Try to find listening socket through
633                  * source port, and then find md5 key through listening socket.
634                  * we are not loose security here:
635                  * Incoming packet is checked with md5 hash with finding key,
636                  * no RST generated if md5 hash doesn't match.
637                  */
638                 sk1 = __inet_lookup_listener(net,
639                                              &tcp_hashinfo, ip_hdr(skb)->saddr,
640                                              th->source, ip_hdr(skb)->daddr,
641                                              ntohs(th->source), inet_iif(skb));
642                 /* don't send rst if it can't find key */
643                 if (!sk1)
644                         return;
645                 rcu_read_lock();
646                 key = tcp_md5_do_lookup(sk1, (union tcp_md5_addr *)
647                                         &ip_hdr(skb)->saddr, AF_INET);
648                 if (!key)
649                         goto release_sk1;
650
651                 genhash = tcp_v4_md5_hash_skb(newhash, key, NULL, skb);
652                 if (genhash || memcmp(hash_location, newhash, 16) != 0)
653                         goto release_sk1;
654         } else {
655                 key = sk ? tcp_md5_do_lookup(sk, (union tcp_md5_addr *)
656                                              &ip_hdr(skb)->saddr,
657                                              AF_INET) : NULL;
658         }
659
660         if (key) {
661                 rep.opt[0] = htonl((TCPOPT_NOP << 24) |
662                                    (TCPOPT_NOP << 16) |
663                                    (TCPOPT_MD5SIG << 8) |
664                                    TCPOLEN_MD5SIG);
665                 /* Update length and the length the header thinks exists */
666                 arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
667                 rep.th.doff = arg.iov[0].iov_len / 4;
668
669                 tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[1],
670                                      key, ip_hdr(skb)->saddr,
671                                      ip_hdr(skb)->daddr, &rep.th);
672         }
673 #endif
674         arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
675                                       ip_hdr(skb)->saddr, /* XXX */
676                                       arg.iov[0].iov_len, IPPROTO_TCP, 0);
677         arg.csumoffset = offsetof(struct tcphdr, check) / 2;
678         arg.flags = (sk && inet_sk(sk)->transparent) ? IP_REPLY_ARG_NOSRCCHECK : 0;
679         /* When socket is gone, all binding information is lost.
680          * routing might fail in this case. No choice here, if we choose to force
681          * input interface, we will misroute in case of asymmetric route.
682          */
683         if (sk)
684                 arg.bound_dev_if = sk->sk_bound_dev_if;
685
686         arg.tos = ip_hdr(skb)->tos;
687         ip_send_unicast_reply(*this_cpu_ptr(net->ipv4.tcp_sk),
688                               skb, &TCP_SKB_CB(skb)->header.h4.opt,
689                               ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
690                               &arg, arg.iov[0].iov_len);
691
692         TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
693         TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS);
694
695 #ifdef CONFIG_TCP_MD5SIG
696 release_sk1:
697         if (sk1) {
698                 rcu_read_unlock();
699                 sock_put(sk1);
700         }
701 #endif
702 }
703
704 /* The code following below sending ACKs in SYN-RECV and TIME-WAIT states
705    outside socket context is ugly, certainly. What can I do?
706  */
707
708 static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
709                             u32 win, u32 tsval, u32 tsecr, int oif,
710                             struct tcp_md5sig_key *key,
711                             int reply_flags, u8 tos)
712 {
713         const struct tcphdr *th = tcp_hdr(skb);
714         struct {
715                 struct tcphdr th;
716                 __be32 opt[(TCPOLEN_TSTAMP_ALIGNED >> 2)
717 #ifdef CONFIG_TCP_MD5SIG
718                            + (TCPOLEN_MD5SIG_ALIGNED >> 2)
719 #endif
720                         ];
721         } rep;
722         struct ip_reply_arg arg;
723         struct net *net = dev_net(skb_dst(skb)->dev);
724
725         memset(&rep.th, 0, sizeof(struct tcphdr));
726         memset(&arg, 0, sizeof(arg));
727
728         arg.iov[0].iov_base = (unsigned char *)&rep;
729         arg.iov[0].iov_len  = sizeof(rep.th);
730         if (tsecr) {
731                 rep.opt[0] = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
732                                    (TCPOPT_TIMESTAMP << 8) |
733                                    TCPOLEN_TIMESTAMP);
734                 rep.opt[1] = htonl(tsval);
735                 rep.opt[2] = htonl(tsecr);
736                 arg.iov[0].iov_len += TCPOLEN_TSTAMP_ALIGNED;
737         }
738
739         /* Swap the send and the receive. */
740         rep.th.dest    = th->source;
741         rep.th.source  = th->dest;
742         rep.th.doff    = arg.iov[0].iov_len / 4;
743         rep.th.seq     = htonl(seq);
744         rep.th.ack_seq = htonl(ack);
745         rep.th.ack     = 1;
746         rep.th.window  = htons(win);
747
748 #ifdef CONFIG_TCP_MD5SIG
749         if (key) {
750                 int offset = (tsecr) ? 3 : 0;
751
752                 rep.opt[offset++] = htonl((TCPOPT_NOP << 24) |
753                                           (TCPOPT_NOP << 16) |
754                                           (TCPOPT_MD5SIG << 8) |
755                                           TCPOLEN_MD5SIG);
756                 arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
757                 rep.th.doff = arg.iov[0].iov_len/4;
758
759                 tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[offset],
760                                     key, ip_hdr(skb)->saddr,
761                                     ip_hdr(skb)->daddr, &rep.th);
762         }
763 #endif
764         arg.flags = reply_flags;
765         arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
766                                       ip_hdr(skb)->saddr, /* XXX */
767                                       arg.iov[0].iov_len, IPPROTO_TCP, 0);
768         arg.csumoffset = offsetof(struct tcphdr, check) / 2;
769         if (oif)
770                 arg.bound_dev_if = oif;
771         arg.tos = tos;
772         ip_send_unicast_reply(*this_cpu_ptr(net->ipv4.tcp_sk),
773                               skb, &TCP_SKB_CB(skb)->header.h4.opt,
774                               ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
775                               &arg, arg.iov[0].iov_len);
776
777         TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
778 }
779
780 static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
781 {
782         struct inet_timewait_sock *tw = inet_twsk(sk);
783         struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
784
785         tcp_v4_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
786                         tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
787                         tcp_time_stamp + tcptw->tw_ts_offset,
788                         tcptw->tw_ts_recent,
789                         tw->tw_bound_dev_if,
790                         tcp_twsk_md5_key(tcptw),
791                         tw->tw_transparent ? IP_REPLY_ARG_NOSRCCHECK : 0,
792                         tw->tw_tos
793                         );
794
795         inet_twsk_put(tw);
796 }
797
798 static void tcp_v4_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
799                                   struct request_sock *req)
800 {
801         /* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
802          * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
803          */
804         tcp_v4_send_ack(skb, (sk->sk_state == TCP_LISTEN) ?
805                         tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt,
806                         tcp_rsk(req)->rcv_nxt, req->rcv_wnd,
807                         tcp_time_stamp,
808                         req->ts_recent,
809                         0,
810                         tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&ip_hdr(skb)->daddr,
811                                           AF_INET),
812                         inet_rsk(req)->no_srccheck ? IP_REPLY_ARG_NOSRCCHECK : 0,
813                         ip_hdr(skb)->tos);
814 }
815
816 /*
817  *      Send a SYN-ACK after having received a SYN.
818  *      This still operates on a request_sock only, not on a big
819  *      socket.
820  */
821 static int tcp_v4_send_synack(const struct sock *sk, struct dst_entry *dst,
822                               struct flowi *fl,
823                               struct request_sock *req,
824                               u16 queue_mapping,
825                               struct tcp_fastopen_cookie *foc)
826 {
827         const struct inet_request_sock *ireq = inet_rsk(req);
828         struct flowi4 fl4;
829         int err = -1;
830         struct sk_buff *skb;
831
832         /* First, grab a route. */
833         if (!dst && (dst = inet_csk_route_req(sk, &fl4, req)) == NULL)
834                 return -1;
835
836         skb = tcp_make_synack(sk, dst, req, foc);
837
838         if (skb) {
839                 __tcp_v4_send_check(skb, ireq->ir_loc_addr, ireq->ir_rmt_addr);
840
841                 skb_set_queue_mapping(skb, queue_mapping);
842                 err = ip_build_and_send_pkt(skb, sk, ireq->ir_loc_addr,
843                                             ireq->ir_rmt_addr,
844                                             ireq->opt);
845                 err = net_xmit_eval(err);
846         }
847
848         return err;
849 }
850
851 /*
852  *      IPv4 request_sock destructor.
853  */
854 static void tcp_v4_reqsk_destructor(struct request_sock *req)
855 {
856         kfree(inet_rsk(req)->opt);
857 }
858
859
860 #ifdef CONFIG_TCP_MD5SIG
861 /*
862  * RFC2385 MD5 checksumming requires a mapping of
863  * IP address->MD5 Key.
864  * We need to maintain these in the sk structure.
865  */
866
867 /* Find the Key structure for an address.  */
868 struct tcp_md5sig_key *tcp_md5_do_lookup(const struct sock *sk,
869                                          const union tcp_md5_addr *addr,
870                                          int family)
871 {
872         const struct tcp_sock *tp = tcp_sk(sk);
873         struct tcp_md5sig_key *key;
874         unsigned int size = sizeof(struct in_addr);
875         const struct tcp_md5sig_info *md5sig;
876
877         /* caller either holds rcu_read_lock() or socket lock */
878         md5sig = rcu_dereference_check(tp->md5sig_info,
879                                        sock_owned_by_user(sk) ||
880                                        lockdep_is_held((spinlock_t *)&sk->sk_lock.slock));
881         if (!md5sig)
882                 return NULL;
883 #if IS_ENABLED(CONFIG_IPV6)
884         if (family == AF_INET6)
885                 size = sizeof(struct in6_addr);
886 #endif
887         hlist_for_each_entry_rcu(key, &md5sig->head, node) {
888                 if (key->family != family)
889                         continue;
890                 if (!memcmp(&key->addr, addr, size))
891                         return key;
892         }
893         return NULL;
894 }
895 EXPORT_SYMBOL(tcp_md5_do_lookup);
896
897 struct tcp_md5sig_key *tcp_v4_md5_lookup(const struct sock *sk,
898                                          const struct sock *addr_sk)
899 {
900         const union tcp_md5_addr *addr;
901
902         addr = (const union tcp_md5_addr *)&addr_sk->sk_daddr;
903         return tcp_md5_do_lookup(sk, addr, AF_INET);
904 }
905 EXPORT_SYMBOL(tcp_v4_md5_lookup);
906
907 /* This can be called on a newly created socket, from other files */
908 int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
909                    int family, const u8 *newkey, u8 newkeylen, gfp_t gfp)
910 {
911         /* Add Key to the list */
912         struct tcp_md5sig_key *key;
913         struct tcp_sock *tp = tcp_sk(sk);
914         struct tcp_md5sig_info *md5sig;
915
916         key = tcp_md5_do_lookup(sk, addr, family);
917         if (key) {
918                 /* Pre-existing entry - just update that one. */
919                 memcpy(key->key, newkey, newkeylen);
920                 key->keylen = newkeylen;
921                 return 0;
922         }
923
924         md5sig = rcu_dereference_protected(tp->md5sig_info,
925                                            sock_owned_by_user(sk));
926         if (!md5sig) {
927                 md5sig = kmalloc(sizeof(*md5sig), gfp);
928                 if (!md5sig)
929                         return -ENOMEM;
930
931                 sk_nocaps_add(sk, NETIF_F_GSO_MASK);
932                 INIT_HLIST_HEAD(&md5sig->head);
933                 rcu_assign_pointer(tp->md5sig_info, md5sig);
934         }
935
936         key = sock_kmalloc(sk, sizeof(*key), gfp);
937         if (!key)
938                 return -ENOMEM;
939         if (!tcp_alloc_md5sig_pool()) {
940                 sock_kfree_s(sk, key, sizeof(*key));
941                 return -ENOMEM;
942         }
943
944         memcpy(key->key, newkey, newkeylen);
945         key->keylen = newkeylen;
946         key->family = family;
947         memcpy(&key->addr, addr,
948                (family == AF_INET6) ? sizeof(struct in6_addr) :
949                                       sizeof(struct in_addr));
950         hlist_add_head_rcu(&key->node, &md5sig->head);
951         return 0;
952 }
953 EXPORT_SYMBOL(tcp_md5_do_add);
954
955 int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr, int family)
956 {
957         struct tcp_md5sig_key *key;
958
959         key = tcp_md5_do_lookup(sk, addr, family);
960         if (!key)
961                 return -ENOENT;
962         hlist_del_rcu(&key->node);
963         atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
964         kfree_rcu(key, rcu);
965         return 0;
966 }
967 EXPORT_SYMBOL(tcp_md5_do_del);
968
969 static void tcp_clear_md5_list(struct sock *sk)
970 {
971         struct tcp_sock *tp = tcp_sk(sk);
972         struct tcp_md5sig_key *key;
973         struct hlist_node *n;
974         struct tcp_md5sig_info *md5sig;
975
976         md5sig = rcu_dereference_protected(tp->md5sig_info, 1);
977
978         hlist_for_each_entry_safe(key, n, &md5sig->head, node) {
979                 hlist_del_rcu(&key->node);
980                 atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
981                 kfree_rcu(key, rcu);
982         }
983 }
984
985 static int tcp_v4_parse_md5_keys(struct sock *sk, char __user *optval,
986                                  int optlen)
987 {
988         struct tcp_md5sig cmd;
989         struct sockaddr_in *sin = (struct sockaddr_in *)&cmd.tcpm_addr;
990
991         if (optlen < sizeof(cmd))
992                 return -EINVAL;
993
994         if (copy_from_user(&cmd, optval, sizeof(cmd)))
995                 return -EFAULT;
996
997         if (sin->sin_family != AF_INET)
998                 return -EINVAL;
999
1000         if (!cmd.tcpm_keylen)
1001                 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin->sin_addr.s_addr,
1002                                       AF_INET);
1003
1004         if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
1005                 return -EINVAL;
1006
1007         return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin->sin_addr.s_addr,
1008                               AF_INET, cmd.tcpm_key, cmd.tcpm_keylen,
1009                               GFP_KERNEL);
1010 }
1011
1012 static int tcp_v4_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp,
1013                                         __be32 daddr, __be32 saddr, int nbytes)
1014 {
1015         struct tcp4_pseudohdr *bp;
1016         struct scatterlist sg;
1017
1018         bp = &hp->md5_blk.ip4;
1019
1020         /*
1021          * 1. the TCP pseudo-header (in the order: source IP address,
1022          * destination IP address, zero-padded protocol number, and
1023          * segment length)
1024          */
1025         bp->saddr = saddr;
1026         bp->daddr = daddr;
1027         bp->pad = 0;
1028         bp->protocol = IPPROTO_TCP;
1029         bp->len = cpu_to_be16(nbytes);
1030
1031         sg_init_one(&sg, bp, sizeof(*bp));
1032         return crypto_hash_update(&hp->md5_desc, &sg, sizeof(*bp));
1033 }
1034
1035 static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
1036                                __be32 daddr, __be32 saddr, const struct tcphdr *th)
1037 {
1038         struct tcp_md5sig_pool *hp;
1039         struct hash_desc *desc;
1040
1041         hp = tcp_get_md5sig_pool();
1042         if (!hp)
1043                 goto clear_hash_noput;
1044         desc = &hp->md5_desc;
1045
1046         if (crypto_hash_init(desc))
1047                 goto clear_hash;
1048         if (tcp_v4_md5_hash_pseudoheader(hp, daddr, saddr, th->doff << 2))
1049                 goto clear_hash;
1050         if (tcp_md5_hash_header(hp, th))
1051                 goto clear_hash;
1052         if (tcp_md5_hash_key(hp, key))
1053                 goto clear_hash;
1054         if (crypto_hash_final(desc, md5_hash))
1055                 goto clear_hash;
1056
1057         tcp_put_md5sig_pool();
1058         return 0;
1059
1060 clear_hash:
1061         tcp_put_md5sig_pool();
1062 clear_hash_noput:
1063         memset(md5_hash, 0, 16);
1064         return 1;
1065 }
1066
1067 int tcp_v4_md5_hash_skb(char *md5_hash, const struct tcp_md5sig_key *key,
1068                         const struct sock *sk,
1069                         const struct sk_buff *skb)
1070 {
1071         struct tcp_md5sig_pool *hp;
1072         struct hash_desc *desc;
1073         const struct tcphdr *th = tcp_hdr(skb);
1074         __be32 saddr, daddr;
1075
1076         if (sk) { /* valid for establish/request sockets */
1077                 saddr = sk->sk_rcv_saddr;
1078                 daddr = sk->sk_daddr;
1079         } else {
1080                 const struct iphdr *iph = ip_hdr(skb);
1081                 saddr = iph->saddr;
1082                 daddr = iph->daddr;
1083         }
1084
1085         hp = tcp_get_md5sig_pool();
1086         if (!hp)
1087                 goto clear_hash_noput;
1088         desc = &hp->md5_desc;
1089
1090         if (crypto_hash_init(desc))
1091                 goto clear_hash;
1092
1093         if (tcp_v4_md5_hash_pseudoheader(hp, daddr, saddr, skb->len))
1094                 goto clear_hash;
1095         if (tcp_md5_hash_header(hp, th))
1096                 goto clear_hash;
1097         if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
1098                 goto clear_hash;
1099         if (tcp_md5_hash_key(hp, key))
1100                 goto clear_hash;
1101         if (crypto_hash_final(desc, md5_hash))
1102                 goto clear_hash;
1103
1104         tcp_put_md5sig_pool();
1105         return 0;
1106
1107 clear_hash:
1108         tcp_put_md5sig_pool();
1109 clear_hash_noput:
1110         memset(md5_hash, 0, 16);
1111         return 1;
1112 }
1113 EXPORT_SYMBOL(tcp_v4_md5_hash_skb);
1114
1115 #endif
1116
1117 /* Called with rcu_read_lock() */
1118 static bool tcp_v4_inbound_md5_hash(const struct sock *sk,
1119                                     const struct sk_buff *skb)
1120 {
1121 #ifdef CONFIG_TCP_MD5SIG
1122         /*
1123          * This gets called for each TCP segment that arrives
1124          * so we want to be efficient.
1125          * We have 3 drop cases:
1126          * o No MD5 hash and one expected.
1127          * o MD5 hash and we're not expecting one.
1128          * o MD5 hash and its wrong.
1129          */
1130         const __u8 *hash_location = NULL;
1131         struct tcp_md5sig_key *hash_expected;
1132         const struct iphdr *iph = ip_hdr(skb);
1133         const struct tcphdr *th = tcp_hdr(skb);
1134         int genhash;
1135         unsigned char newhash[16];
1136
1137         hash_expected = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&iph->saddr,
1138                                           AF_INET);
1139         hash_location = tcp_parse_md5sig_option(th);
1140
1141         /* We've parsed the options - do we have a hash? */
1142         if (!hash_expected && !hash_location)
1143                 return false;
1144
1145         if (hash_expected && !hash_location) {
1146                 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
1147                 return true;
1148         }
1149
1150         if (!hash_expected && hash_location) {
1151                 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
1152                 return true;
1153         }
1154
1155         /* Okay, so this is hash_expected and hash_location -
1156          * so we need to calculate the checksum.
1157          */
1158         genhash = tcp_v4_md5_hash_skb(newhash,
1159                                       hash_expected,
1160                                       NULL, skb);
1161
1162         if (genhash || memcmp(hash_location, newhash, 16) != 0) {
1163                 net_info_ratelimited("MD5 Hash failed for (%pI4, %d)->(%pI4, %d)%s\n",
1164                                      &iph->saddr, ntohs(th->source),
1165                                      &iph->daddr, ntohs(th->dest),
1166                                      genhash ? " tcp_v4_calc_md5_hash failed"
1167                                      : "");
1168                 return true;
1169         }
1170         return false;
1171 #endif
1172         return false;
1173 }
1174
1175 static void tcp_v4_init_req(struct request_sock *req,
1176                             const struct sock *sk_listener,
1177                             struct sk_buff *skb)
1178 {
1179         struct inet_request_sock *ireq = inet_rsk(req);
1180
1181         sk_rcv_saddr_set(req_to_sk(req), ip_hdr(skb)->daddr);
1182         sk_daddr_set(req_to_sk(req), ip_hdr(skb)->saddr);
1183         ireq->no_srccheck = inet_sk(sk_listener)->transparent;
1184         ireq->opt = tcp_v4_save_options(skb);
1185 }
1186
1187 static struct dst_entry *tcp_v4_route_req(const struct sock *sk,
1188                                           struct flowi *fl,
1189                                           const struct request_sock *req,
1190                                           bool *strict)
1191 {
1192         struct dst_entry *dst = inet_csk_route_req(sk, &fl->u.ip4, req);
1193
1194         if (strict) {
1195                 if (fl->u.ip4.daddr == inet_rsk(req)->ir_rmt_addr)
1196                         *strict = true;
1197                 else
1198                         *strict = false;
1199         }
1200
1201         return dst;
1202 }
1203
1204 struct request_sock_ops tcp_request_sock_ops __read_mostly = {
1205         .family         =       PF_INET,
1206         .obj_size       =       sizeof(struct tcp_request_sock),
1207         .rtx_syn_ack    =       tcp_rtx_synack,
1208         .send_ack       =       tcp_v4_reqsk_send_ack,
1209         .destructor     =       tcp_v4_reqsk_destructor,
1210         .send_reset     =       tcp_v4_send_reset,
1211         .syn_ack_timeout =      tcp_syn_ack_timeout,
1212 };
1213
1214 static const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = {
1215         .mss_clamp      =       TCP_MSS_DEFAULT,
1216 #ifdef CONFIG_TCP_MD5SIG
1217         .req_md5_lookup =       tcp_v4_md5_lookup,
1218         .calc_md5_hash  =       tcp_v4_md5_hash_skb,
1219 #endif
1220         .init_req       =       tcp_v4_init_req,
1221 #ifdef CONFIG_SYN_COOKIES
1222         .cookie_init_seq =      cookie_v4_init_sequence,
1223 #endif
1224         .route_req      =       tcp_v4_route_req,
1225         .init_seq       =       tcp_v4_init_sequence,
1226         .send_synack    =       tcp_v4_send_synack,
1227 };
1228
1229 int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1230 {
1231         /* Never answer to SYNs send to broadcast or multicast */
1232         if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
1233                 goto drop;
1234
1235         return tcp_conn_request(&tcp_request_sock_ops,
1236                                 &tcp_request_sock_ipv4_ops, sk, skb);
1237
1238 drop:
1239         NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1240         return 0;
1241 }
1242 EXPORT_SYMBOL(tcp_v4_conn_request);
1243
1244
1245 /*
1246  * The three way handshake has completed - we got a valid synack -
1247  * now create the new socket.
1248  */
1249 struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
1250                                   struct request_sock *req,
1251                                   struct dst_entry *dst)
1252 {
1253         struct inet_request_sock *ireq;
1254         struct inet_sock *newinet;
1255         struct tcp_sock *newtp;
1256         struct sock *newsk;
1257 #ifdef CONFIG_TCP_MD5SIG
1258         struct tcp_md5sig_key *key;
1259 #endif
1260         struct ip_options_rcu *inet_opt;
1261
1262         if (sk_acceptq_is_full(sk))
1263                 goto exit_overflow;
1264
1265         newsk = tcp_create_openreq_child(sk, req, skb);
1266         if (!newsk)
1267                 goto exit_nonewsk;
1268
1269         newsk->sk_gso_type = SKB_GSO_TCPV4;
1270         inet_sk_rx_dst_set(newsk, skb);
1271
1272         newtp                 = tcp_sk(newsk);
1273         newinet               = inet_sk(newsk);
1274         ireq                  = inet_rsk(req);
1275         sk_daddr_set(newsk, ireq->ir_rmt_addr);
1276         sk_rcv_saddr_set(newsk, ireq->ir_loc_addr);
1277         newinet->inet_saddr           = ireq->ir_loc_addr;
1278         inet_opt              = ireq->opt;
1279         rcu_assign_pointer(newinet->inet_opt, inet_opt);
1280         ireq->opt             = NULL;
1281         newinet->mc_index     = inet_iif(skb);
1282         newinet->mc_ttl       = ip_hdr(skb)->ttl;
1283         newinet->rcv_tos      = ip_hdr(skb)->tos;
1284         inet_csk(newsk)->icsk_ext_hdr_len = 0;
1285         if (inet_opt)
1286                 inet_csk(newsk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
1287         newinet->inet_id = newtp->write_seq ^ jiffies;
1288
1289         if (!dst) {
1290                 dst = inet_csk_route_child_sock(sk, newsk, req);
1291                 if (!dst)
1292                         goto put_and_exit;
1293         } else {
1294                 /* syncookie case : see end of cookie_v4_check() */
1295         }
1296         sk_setup_caps(newsk, dst);
1297
1298         tcp_ca_openreq_child(newsk, dst);
1299
1300         tcp_sync_mss(newsk, dst_mtu(dst));
1301         newtp->advmss = dst_metric_advmss(dst);
1302         if (tcp_sk(sk)->rx_opt.user_mss &&
1303             tcp_sk(sk)->rx_opt.user_mss < newtp->advmss)
1304                 newtp->advmss = tcp_sk(sk)->rx_opt.user_mss;
1305
1306         tcp_initialize_rcv_mss(newsk);
1307
1308 #ifdef CONFIG_TCP_MD5SIG
1309         /* Copy over the MD5 key from the original socket */
1310         key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&newinet->inet_daddr,
1311                                 AF_INET);
1312         if (key) {
1313                 /*
1314                  * We're using one, so create a matching key
1315                  * on the newsk structure. If we fail to get
1316                  * memory, then we end up not copying the key
1317                  * across. Shucks.
1318                  */
1319                 tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newinet->inet_daddr,
1320                                AF_INET, key->key, key->keylen, GFP_ATOMIC);
1321                 sk_nocaps_add(newsk, NETIF_F_GSO_MASK);
1322         }
1323 #endif
1324
1325         if (__inet_inherit_port(sk, newsk) < 0)
1326                 goto put_and_exit;
1327         __inet_hash_nolisten(newsk, NULL);
1328
1329         return newsk;
1330
1331 exit_overflow:
1332         NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1333 exit_nonewsk:
1334         dst_release(dst);
1335 exit:
1336         NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1337         return NULL;
1338 put_and_exit:
1339         inet_csk_prepare_forced_close(newsk);
1340         tcp_done(newsk);
1341         goto exit;
1342 }
1343 EXPORT_SYMBOL(tcp_v4_syn_recv_sock);
1344
1345 static struct sock *tcp_v4_cookie_check(struct sock *sk, struct sk_buff *skb)
1346 {
1347 #ifdef CONFIG_SYN_COOKIES
1348         const struct tcphdr *th = tcp_hdr(skb);
1349
1350         if (!th->syn)
1351                 sk = cookie_v4_check(sk, skb);
1352 #endif
1353         return sk;
1354 }
1355
1356 /* The socket must have it's spinlock held when we get
1357  * here.
1358  *
1359  * We have a potential double-lock case here, so even when
1360  * doing backlog processing we use the BH locking scheme.
1361  * This is because we cannot sleep with the original spinlock
1362  * held.
1363  */
1364 int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
1365 {
1366         struct sock *rsk;
1367
1368         if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1369                 struct dst_entry *dst = sk->sk_rx_dst;
1370
1371                 sock_rps_save_rxhash(sk, skb);
1372                 sk_mark_napi_id(sk, skb);
1373                 if (dst) {
1374                         if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif ||
1375                             !dst->ops->check(dst, 0)) {
1376                                 dst_release(dst);
1377                                 sk->sk_rx_dst = NULL;
1378                         }
1379                 }
1380                 tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len);
1381                 return 0;
1382         }
1383
1384         if (tcp_checksum_complete(skb))
1385                 goto csum_err;
1386
1387         if (sk->sk_state == TCP_LISTEN) {
1388                 struct sock *nsk = tcp_v4_cookie_check(sk, skb);
1389
1390                 if (!nsk)
1391                         goto discard;
1392                 if (nsk != sk) {
1393                         sock_rps_save_rxhash(nsk, skb);
1394                         sk_mark_napi_id(nsk, skb);
1395                         if (tcp_child_process(sk, nsk, skb)) {
1396                                 rsk = nsk;
1397                                 goto reset;
1398                         }
1399                         return 0;
1400                 }
1401         } else
1402                 sock_rps_save_rxhash(sk, skb);
1403
1404         if (tcp_rcv_state_process(sk, skb)) {
1405                 rsk = sk;
1406                 goto reset;
1407         }
1408         return 0;
1409
1410 reset:
1411         tcp_v4_send_reset(rsk, skb);
1412 discard:
1413         kfree_skb(skb);
1414         /* Be careful here. If this function gets more complicated and
1415          * gcc suffers from register pressure on the x86, sk (in %ebx)
1416          * might be destroyed here. This current version compiles correctly,
1417          * but you have been warned.
1418          */
1419         return 0;
1420
1421 csum_err:
1422         TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_CSUMERRORS);
1423         TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
1424         goto discard;
1425 }
1426 EXPORT_SYMBOL(tcp_v4_do_rcv);
1427
1428 void tcp_v4_early_demux(struct sk_buff *skb)
1429 {
1430         const struct iphdr *iph;
1431         const struct tcphdr *th;
1432         struct sock *sk;
1433
1434         if (skb->pkt_type != PACKET_HOST)
1435                 return;
1436
1437         if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr)))
1438                 return;
1439
1440         iph = ip_hdr(skb);
1441         th = tcp_hdr(skb);
1442
1443         if (th->doff < sizeof(struct tcphdr) / 4)
1444                 return;
1445
1446         sk = __inet_lookup_established(dev_net(skb->dev), &tcp_hashinfo,
1447                                        iph->saddr, th->source,
1448                                        iph->daddr, ntohs(th->dest),
1449                                        skb->skb_iif);
1450         if (sk) {
1451                 skb->sk = sk;
1452                 skb->destructor = sock_edemux;
1453                 if (sk_fullsock(sk)) {
1454                         struct dst_entry *dst = READ_ONCE(sk->sk_rx_dst);
1455
1456                         if (dst)
1457                                 dst = dst_check(dst, 0);
1458                         if (dst &&
1459                             inet_sk(sk)->rx_dst_ifindex == skb->skb_iif)
1460                                 skb_dst_set_noref(skb, dst);
1461                 }
1462         }
1463 }
1464
1465 /* Packet is added to VJ-style prequeue for processing in process
1466  * context, if a reader task is waiting. Apparently, this exciting
1467  * idea (VJ's mail "Re: query about TCP header on tcp-ip" of 07 Sep 93)
1468  * failed somewhere. Latency? Burstiness? Well, at least now we will
1469  * see, why it failed. 8)8)                               --ANK
1470  *
1471  */
1472 bool tcp_prequeue(struct sock *sk, struct sk_buff *skb)
1473 {
1474         struct tcp_sock *tp = tcp_sk(sk);
1475
1476         if (sysctl_tcp_low_latency || !tp->ucopy.task)
1477                 return false;
1478
1479         if (skb->len <= tcp_hdrlen(skb) &&
1480             skb_queue_len(&tp->ucopy.prequeue) == 0)
1481                 return false;
1482
1483         /* Before escaping RCU protected region, we need to take care of skb
1484          * dst. Prequeue is only enabled for established sockets.
1485          * For such sockets, we might need the skb dst only to set sk->sk_rx_dst
1486          * Instead of doing full sk_rx_dst validity here, let's perform
1487          * an optimistic check.
1488          */
1489         if (likely(sk->sk_rx_dst))
1490                 skb_dst_drop(skb);
1491         else
1492                 skb_dst_force(skb);
1493
1494         __skb_queue_tail(&tp->ucopy.prequeue, skb);
1495         tp->ucopy.memory += skb->truesize;
1496         if (tp->ucopy.memory > sk->sk_rcvbuf) {
1497                 struct sk_buff *skb1;
1498
1499                 BUG_ON(sock_owned_by_user(sk));
1500
1501                 while ((skb1 = __skb_dequeue(&tp->ucopy.prequeue)) != NULL) {
1502                         sk_backlog_rcv(sk, skb1);
1503                         NET_INC_STATS_BH(sock_net(sk),
1504                                          LINUX_MIB_TCPPREQUEUEDROPPED);
1505                 }
1506
1507                 tp->ucopy.memory = 0;
1508         } else if (skb_queue_len(&tp->ucopy.prequeue) == 1) {
1509                 wake_up_interruptible_sync_poll(sk_sleep(sk),
1510                                            POLLIN | POLLRDNORM | POLLRDBAND);
1511                 if (!inet_csk_ack_scheduled(sk))
1512                         inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
1513                                                   (3 * tcp_rto_min(sk)) / 4,
1514                                                   TCP_RTO_MAX);
1515         }
1516         return true;
1517 }
1518 EXPORT_SYMBOL(tcp_prequeue);
1519
1520 /*
1521  *      From tcp_input.c
1522  */
1523
1524 int tcp_v4_rcv(struct sk_buff *skb)
1525 {
1526         const struct iphdr *iph;
1527         const struct tcphdr *th;
1528         struct sock *sk;
1529         int ret;
1530         struct net *net = dev_net(skb->dev);
1531
1532         if (skb->pkt_type != PACKET_HOST)
1533                 goto discard_it;
1534
1535         /* Count it even if it's bad */
1536         TCP_INC_STATS_BH(net, TCP_MIB_INSEGS);
1537
1538         if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1539                 goto discard_it;
1540
1541         th = tcp_hdr(skb);
1542
1543         if (th->doff < sizeof(struct tcphdr) / 4)
1544                 goto bad_packet;
1545         if (!pskb_may_pull(skb, th->doff * 4))
1546                 goto discard_it;
1547
1548         /* An explanation is required here, I think.
1549          * Packet length and doff are validated by header prediction,
1550          * provided case of th->doff==0 is eliminated.
1551          * So, we defer the checks. */
1552
1553         if (skb_checksum_init(skb, IPPROTO_TCP, inet_compute_pseudo))
1554                 goto csum_error;
1555
1556         th = tcp_hdr(skb);
1557         iph = ip_hdr(skb);
1558         /* This is tricky : We move IPCB at its correct location into TCP_SKB_CB()
1559          * barrier() makes sure compiler wont play fool^Waliasing games.
1560          */
1561         memmove(&TCP_SKB_CB(skb)->header.h4, IPCB(skb),
1562                 sizeof(struct inet_skb_parm));
1563         barrier();
1564
1565         TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1566         TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1567                                     skb->len - th->doff * 4);
1568         TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1569         TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th);
1570         TCP_SKB_CB(skb)->tcp_tw_isn = 0;
1571         TCP_SKB_CB(skb)->ip_dsfield = ipv4_get_dsfield(iph);
1572         TCP_SKB_CB(skb)->sacked  = 0;
1573
1574         sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
1575         if (!sk)
1576                 goto no_tcp_socket;
1577
1578 process:
1579         if (sk->sk_state == TCP_TIME_WAIT)
1580                 goto do_time_wait;
1581
1582         if (sk->sk_state == TCP_NEW_SYN_RECV) {
1583                 struct request_sock *req = inet_reqsk(sk);
1584                 struct sock *nsk = NULL;
1585
1586                 sk = req->rsk_listener;
1587                 if (tcp_v4_inbound_md5_hash(sk, skb))
1588                         goto discard_and_relse;
1589                 if (sk->sk_state == TCP_LISTEN)
1590                         nsk = tcp_check_req(sk, skb, req, false);
1591                 if (!nsk) {
1592                         reqsk_put(req);
1593                         goto discard_it;
1594                 }
1595                 if (nsk == sk) {
1596                         sock_hold(sk);
1597                         reqsk_put(req);
1598                 } else if (tcp_child_process(sk, nsk, skb)) {
1599                         tcp_v4_send_reset(nsk, skb);
1600                         goto discard_it;
1601                 } else {
1602                         return 0;
1603                 }
1604         }
1605         if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
1606                 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
1607                 goto discard_and_relse;
1608         }
1609
1610         if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
1611                 goto discard_and_relse;
1612
1613         if (tcp_v4_inbound_md5_hash(sk, skb))
1614                 goto discard_and_relse;
1615
1616         nf_reset(skb);
1617
1618         if (sk_filter(sk, skb))
1619                 goto discard_and_relse;
1620
1621         sk_incoming_cpu_update(sk);
1622         skb->dev = NULL;
1623
1624         bh_lock_sock_nested(sk);
1625         tcp_sk(sk)->segs_in += max_t(u16, 1, skb_shinfo(skb)->gso_segs);
1626         ret = 0;
1627         if (!sock_owned_by_user(sk)) {
1628                 if (!tcp_prequeue(sk, skb))
1629                         ret = tcp_v4_do_rcv(sk, skb);
1630         } else if (unlikely(sk_add_backlog(sk, skb,
1631                                            sk->sk_rcvbuf + sk->sk_sndbuf))) {
1632                 bh_unlock_sock(sk);
1633                 NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP);
1634                 goto discard_and_relse;
1635         }
1636         bh_unlock_sock(sk);
1637
1638         sock_put(sk);
1639
1640         return ret;
1641
1642 no_tcp_socket:
1643         if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
1644                 goto discard_it;
1645
1646         if (tcp_checksum_complete(skb)) {
1647 csum_error:
1648                 TCP_INC_STATS_BH(net, TCP_MIB_CSUMERRORS);
1649 bad_packet:
1650                 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
1651         } else {
1652                 tcp_v4_send_reset(NULL, skb);
1653         }
1654
1655 discard_it:
1656         /* Discard frame. */
1657         kfree_skb(skb);
1658         return 0;
1659
1660 discard_and_relse:
1661         sock_put(sk);
1662         goto discard_it;
1663
1664 do_time_wait:
1665         if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1666                 inet_twsk_put(inet_twsk(sk));
1667                 goto discard_it;
1668         }
1669
1670         if (tcp_checksum_complete(skb)) {
1671                 inet_twsk_put(inet_twsk(sk));
1672                 goto csum_error;
1673         }
1674         switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1675         case TCP_TW_SYN: {
1676                 struct sock *sk2 = inet_lookup_listener(dev_net(skb->dev),
1677                                                         &tcp_hashinfo,
1678                                                         iph->saddr, th->source,
1679                                                         iph->daddr, th->dest,
1680                                                         inet_iif(skb));
1681                 if (sk2) {
1682                         inet_twsk_deschedule_put(inet_twsk(sk));
1683                         sk = sk2;
1684                         goto process;
1685                 }
1686                 /* Fall through to ACK */
1687         }
1688         case TCP_TW_ACK:
1689                 tcp_v4_timewait_ack(sk, skb);
1690                 break;
1691         case TCP_TW_RST:
1692                 goto no_tcp_socket;
1693         case TCP_TW_SUCCESS:;
1694         }
1695         goto discard_it;
1696 }
1697
1698 static struct timewait_sock_ops tcp_timewait_sock_ops = {
1699         .twsk_obj_size  = sizeof(struct tcp_timewait_sock),
1700         .twsk_unique    = tcp_twsk_unique,
1701         .twsk_destructor= tcp_twsk_destructor,
1702 };
1703
1704 void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
1705 {
1706         struct dst_entry *dst = skb_dst(skb);
1707
1708         if (dst) {
1709                 dst_hold(dst);
1710                 sk->sk_rx_dst = dst;
1711                 inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
1712         }
1713 }
1714 EXPORT_SYMBOL(inet_sk_rx_dst_set);
1715
1716 const struct inet_connection_sock_af_ops ipv4_specific = {
1717         .queue_xmit        = ip_queue_xmit,
1718         .send_check        = tcp_v4_send_check,
1719         .rebuild_header    = inet_sk_rebuild_header,
1720         .sk_rx_dst_set     = inet_sk_rx_dst_set,
1721         .conn_request      = tcp_v4_conn_request,
1722         .syn_recv_sock     = tcp_v4_syn_recv_sock,
1723         .net_header_len    = sizeof(struct iphdr),
1724         .setsockopt        = ip_setsockopt,
1725         .getsockopt        = ip_getsockopt,
1726         .addr2sockaddr     = inet_csk_addr2sockaddr,
1727         .sockaddr_len      = sizeof(struct sockaddr_in),
1728         .bind_conflict     = inet_csk_bind_conflict,
1729 #ifdef CONFIG_COMPAT
1730         .compat_setsockopt = compat_ip_setsockopt,
1731         .compat_getsockopt = compat_ip_getsockopt,
1732 #endif
1733         .mtu_reduced       = tcp_v4_mtu_reduced,
1734 };
1735 EXPORT_SYMBOL(ipv4_specific);
1736
1737 #ifdef CONFIG_TCP_MD5SIG
1738 static const struct tcp_sock_af_ops tcp_sock_ipv4_specific = {
1739         .md5_lookup             = tcp_v4_md5_lookup,
1740         .calc_md5_hash          = tcp_v4_md5_hash_skb,
1741         .md5_parse              = tcp_v4_parse_md5_keys,
1742 };
1743 #endif
1744
1745 /* NOTE: A lot of things set to zero explicitly by call to
1746  *       sk_alloc() so need not be done here.
1747  */
1748 static int tcp_v4_init_sock(struct sock *sk)
1749 {
1750         struct inet_connection_sock *icsk = inet_csk(sk);
1751
1752         tcp_init_sock(sk);
1753
1754         icsk->icsk_af_ops = &ipv4_specific;
1755
1756 #ifdef CONFIG_TCP_MD5SIG
1757         tcp_sk(sk)->af_specific = &tcp_sock_ipv4_specific;
1758 #endif
1759
1760         return 0;
1761 }
1762
1763 void tcp_v4_destroy_sock(struct sock *sk)
1764 {
1765         struct tcp_sock *tp = tcp_sk(sk);
1766
1767         tcp_clear_xmit_timers(sk);
1768
1769         tcp_cleanup_congestion_control(sk);
1770
1771         /* Cleanup up the write buffer. */
1772         tcp_write_queue_purge(sk);
1773
1774         /* Cleans up our, hopefully empty, out_of_order_queue. */
1775         __skb_queue_purge(&tp->out_of_order_queue);
1776
1777 #ifdef CONFIG_TCP_MD5SIG
1778         /* Clean up the MD5 key list, if any */
1779         if (tp->md5sig_info) {
1780                 tcp_clear_md5_list(sk);
1781                 kfree_rcu(tp->md5sig_info, rcu);
1782                 tp->md5sig_info = NULL;
1783         }
1784 #endif
1785
1786         /* Clean prequeue, it must be empty really */
1787         __skb_queue_purge(&tp->ucopy.prequeue);
1788
1789         /* Clean up a referenced TCP bind bucket. */
1790         if (inet_csk(sk)->icsk_bind_hash)
1791                 inet_put_port(sk);
1792
1793         BUG_ON(tp->fastopen_rsk);
1794
1795         /* If socket is aborted during connect operation */
1796         tcp_free_fastopen_req(tp);
1797         tcp_saved_syn_free(tp);
1798
1799         sk_sockets_allocated_dec(sk);
1800         sock_release_memcg(sk);
1801 }
1802 EXPORT_SYMBOL(tcp_v4_destroy_sock);
1803
1804 #ifdef CONFIG_PROC_FS
1805 /* Proc filesystem TCP sock list dumping. */
1806
1807 /*
1808  * Get next listener socket follow cur.  If cur is NULL, get first socket
1809  * starting from bucket given in st->bucket; when st->bucket is zero the
1810  * very first socket in the hash table is returned.
1811  */
1812 static void *listening_get_next(struct seq_file *seq, void *cur)
1813 {
1814         struct inet_connection_sock *icsk;
1815         struct hlist_nulls_node *node;
1816         struct sock *sk = cur;
1817         struct inet_listen_hashbucket *ilb;
1818         struct tcp_iter_state *st = seq->private;
1819         struct net *net = seq_file_net(seq);
1820
1821         if (!sk) {
1822                 ilb = &tcp_hashinfo.listening_hash[st->bucket];
1823                 spin_lock_bh(&ilb->lock);
1824                 sk = sk_nulls_head(&ilb->head);
1825                 st->offset = 0;
1826                 goto get_sk;
1827         }
1828         ilb = &tcp_hashinfo.listening_hash[st->bucket];
1829         ++st->num;
1830         ++st->offset;
1831
1832         sk = sk_nulls_next(sk);
1833 get_sk:
1834         sk_nulls_for_each_from(sk, node) {
1835                 if (!net_eq(sock_net(sk), net))
1836                         continue;
1837                 if (sk->sk_family == st->family) {
1838                         cur = sk;
1839                         goto out;
1840                 }
1841                 icsk = inet_csk(sk);
1842         }
1843         spin_unlock_bh(&ilb->lock);
1844         st->offset = 0;
1845         if (++st->bucket < INET_LHTABLE_SIZE) {
1846                 ilb = &tcp_hashinfo.listening_hash[st->bucket];
1847                 spin_lock_bh(&ilb->lock);
1848                 sk = sk_nulls_head(&ilb->head);
1849                 goto get_sk;
1850         }
1851         cur = NULL;
1852 out:
1853         return cur;
1854 }
1855
1856 static void *listening_get_idx(struct seq_file *seq, loff_t *pos)
1857 {
1858         struct tcp_iter_state *st = seq->private;
1859         void *rc;
1860
1861         st->bucket = 0;
1862         st->offset = 0;
1863         rc = listening_get_next(seq, NULL);
1864
1865         while (rc && *pos) {
1866                 rc = listening_get_next(seq, rc);
1867                 --*pos;
1868         }
1869         return rc;
1870 }
1871
1872 static inline bool empty_bucket(const struct tcp_iter_state *st)
1873 {
1874         return hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].chain);
1875 }
1876
1877 /*
1878  * Get first established socket starting from bucket given in st->bucket.
1879  * If st->bucket is zero, the very first socket in the hash is returned.
1880  */
1881 static void *established_get_first(struct seq_file *seq)
1882 {
1883         struct tcp_iter_state *st = seq->private;
1884         struct net *net = seq_file_net(seq);
1885         void *rc = NULL;
1886
1887         st->offset = 0;
1888         for (; st->bucket <= tcp_hashinfo.ehash_mask; ++st->bucket) {
1889                 struct sock *sk;
1890                 struct hlist_nulls_node *node;
1891                 spinlock_t *lock = inet_ehash_lockp(&tcp_hashinfo, st->bucket);
1892
1893                 /* Lockless fast path for the common case of empty buckets */
1894                 if (empty_bucket(st))
1895                         continue;
1896
1897                 spin_lock_bh(lock);
1898                 sk_nulls_for_each(sk, node, &tcp_hashinfo.ehash[st->bucket].chain) {
1899                         if (sk->sk_family != st->family ||
1900                             !net_eq(sock_net(sk), net)) {
1901                                 continue;
1902                         }
1903                         rc = sk;
1904                         goto out;
1905                 }
1906                 spin_unlock_bh(lock);
1907         }
1908 out:
1909         return rc;
1910 }
1911
1912 static void *established_get_next(struct seq_file *seq, void *cur)
1913 {
1914         struct sock *sk = cur;
1915         struct hlist_nulls_node *node;
1916         struct tcp_iter_state *st = seq->private;
1917         struct net *net = seq_file_net(seq);
1918
1919         ++st->num;
1920         ++st->offset;
1921
1922         sk = sk_nulls_next(sk);
1923
1924         sk_nulls_for_each_from(sk, node) {
1925                 if (sk->sk_family == st->family && net_eq(sock_net(sk), net))
1926                         return sk;
1927         }
1928
1929         spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
1930         ++st->bucket;
1931         return established_get_first(seq);
1932 }
1933
1934 static void *established_get_idx(struct seq_file *seq, loff_t pos)
1935 {
1936         struct tcp_iter_state *st = seq->private;
1937         void *rc;
1938
1939         st->bucket = 0;
1940         rc = established_get_first(seq);
1941
1942         while (rc && pos) {
1943                 rc = established_get_next(seq, rc);
1944                 --pos;
1945         }
1946         return rc;
1947 }
1948
1949 static void *tcp_get_idx(struct seq_file *seq, loff_t pos)
1950 {
1951         void *rc;
1952         struct tcp_iter_state *st = seq->private;
1953
1954         st->state = TCP_SEQ_STATE_LISTENING;
1955         rc        = listening_get_idx(seq, &pos);
1956
1957         if (!rc) {
1958                 st->state = TCP_SEQ_STATE_ESTABLISHED;
1959                 rc        = established_get_idx(seq, pos);
1960         }
1961
1962         return rc;
1963 }
1964
1965 static void *tcp_seek_last_pos(struct seq_file *seq)
1966 {
1967         struct tcp_iter_state *st = seq->private;
1968         int offset = st->offset;
1969         int orig_num = st->num;
1970         void *rc = NULL;
1971
1972         switch (st->state) {
1973         case TCP_SEQ_STATE_LISTENING:
1974                 if (st->bucket >= INET_LHTABLE_SIZE)
1975                         break;
1976                 st->state = TCP_SEQ_STATE_LISTENING;
1977                 rc = listening_get_next(seq, NULL);
1978                 while (offset-- && rc)
1979                         rc = listening_get_next(seq, rc);
1980                 if (rc)
1981                         break;
1982                 st->bucket = 0;
1983                 st->state = TCP_SEQ_STATE_ESTABLISHED;
1984                 /* Fallthrough */
1985         case TCP_SEQ_STATE_ESTABLISHED:
1986                 if (st->bucket > tcp_hashinfo.ehash_mask)
1987                         break;
1988                 rc = established_get_first(seq);
1989                 while (offset-- && rc)
1990                         rc = established_get_next(seq, rc);
1991         }
1992
1993         st->num = orig_num;
1994
1995         return rc;
1996 }
1997
1998 static void *tcp_seq_start(struct seq_file *seq, loff_t *pos)
1999 {
2000         struct tcp_iter_state *st = seq->private;
2001         void *rc;
2002
2003         if (*pos && *pos == st->last_pos) {
2004                 rc = tcp_seek_last_pos(seq);
2005                 if (rc)
2006                         goto out;
2007         }
2008
2009         st->state = TCP_SEQ_STATE_LISTENING;
2010         st->num = 0;
2011         st->bucket = 0;
2012         st->offset = 0;
2013         rc = *pos ? tcp_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
2014
2015 out:
2016         st->last_pos = *pos;
2017         return rc;
2018 }
2019
2020 static void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2021 {
2022         struct tcp_iter_state *st = seq->private;
2023         void *rc = NULL;
2024
2025         if (v == SEQ_START_TOKEN) {
2026                 rc = tcp_get_idx(seq, 0);
2027                 goto out;
2028         }
2029
2030         switch (st->state) {
2031         case TCP_SEQ_STATE_LISTENING:
2032                 rc = listening_get_next(seq, v);
2033                 if (!rc) {
2034                         st->state = TCP_SEQ_STATE_ESTABLISHED;
2035                         st->bucket = 0;
2036                         st->offset = 0;
2037                         rc        = established_get_first(seq);
2038                 }
2039                 break;
2040         case TCP_SEQ_STATE_ESTABLISHED:
2041                 rc = established_get_next(seq, v);
2042                 break;
2043         }
2044 out:
2045         ++*pos;
2046         st->last_pos = *pos;
2047         return rc;
2048 }
2049
2050 static void tcp_seq_stop(struct seq_file *seq, void *v)
2051 {
2052         struct tcp_iter_state *st = seq->private;
2053
2054         switch (st->state) {
2055         case TCP_SEQ_STATE_LISTENING:
2056                 if (v != SEQ_START_TOKEN)
2057                         spin_unlock_bh(&tcp_hashinfo.listening_hash[st->bucket].lock);
2058                 break;
2059         case TCP_SEQ_STATE_ESTABLISHED:
2060                 if (v)
2061                         spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2062                 break;
2063         }
2064 }
2065
2066 int tcp_seq_open(struct inode *inode, struct file *file)
2067 {
2068         struct tcp_seq_afinfo *afinfo = PDE_DATA(inode);
2069         struct tcp_iter_state *s;
2070         int err;
2071
2072         err = seq_open_net(inode, file, &afinfo->seq_ops,
2073                           sizeof(struct tcp_iter_state));
2074         if (err < 0)
2075                 return err;
2076
2077         s = ((struct seq_file *)file->private_data)->private;
2078         s->family               = afinfo->family;
2079         s->last_pos             = 0;
2080         return 0;
2081 }
2082 EXPORT_SYMBOL(tcp_seq_open);
2083
2084 int tcp_proc_register(struct net *net, struct tcp_seq_afinfo *afinfo)
2085 {
2086         int rc = 0;
2087         struct proc_dir_entry *p;
2088
2089         afinfo->seq_ops.start           = tcp_seq_start;
2090         afinfo->seq_ops.next            = tcp_seq_next;
2091         afinfo->seq_ops.stop            = tcp_seq_stop;
2092
2093         p = proc_create_data(afinfo->name, S_IRUGO, net->proc_net,
2094                              afinfo->seq_fops, afinfo);
2095         if (!p)
2096                 rc = -ENOMEM;
2097         return rc;
2098 }
2099 EXPORT_SYMBOL(tcp_proc_register);
2100
2101 void tcp_proc_unregister(struct net *net, struct tcp_seq_afinfo *afinfo)
2102 {
2103         remove_proc_entry(afinfo->name, net->proc_net);
2104 }
2105 EXPORT_SYMBOL(tcp_proc_unregister);
2106
2107 static void get_openreq4(const struct request_sock *req,
2108                          struct seq_file *f, int i)
2109 {
2110         const struct inet_request_sock *ireq = inet_rsk(req);
2111         long delta = req->rsk_timer.expires - jiffies;
2112
2113         seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2114                 " %02X %08X:%08X %02X:%08lX %08X %5u %8d %u %d %pK",
2115                 i,
2116                 ireq->ir_loc_addr,
2117                 ireq->ir_num,
2118                 ireq->ir_rmt_addr,
2119                 ntohs(ireq->ir_rmt_port),
2120                 TCP_SYN_RECV,
2121                 0, 0, /* could print option size, but that is af dependent. */
2122                 1,    /* timers active (only the expire timer) */
2123                 jiffies_delta_to_clock_t(delta),
2124                 req->num_timeout,
2125                 from_kuid_munged(seq_user_ns(f),
2126                                  sock_i_uid(req->rsk_listener)),
2127                 0,  /* non standard timer */
2128                 0, /* open_requests have no inode */
2129                 0,
2130                 req);
2131 }
2132
2133 static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i)
2134 {
2135         int timer_active;
2136         unsigned long timer_expires;
2137         const struct tcp_sock *tp = tcp_sk(sk);
2138         const struct inet_connection_sock *icsk = inet_csk(sk);
2139         const struct inet_sock *inet = inet_sk(sk);
2140         const struct fastopen_queue *fastopenq = &icsk->icsk_accept_queue.fastopenq;
2141         __be32 dest = inet->inet_daddr;
2142         __be32 src = inet->inet_rcv_saddr;
2143         __u16 destp = ntohs(inet->inet_dport);
2144         __u16 srcp = ntohs(inet->inet_sport);
2145         int rx_queue;
2146
2147         if (icsk->icsk_pending == ICSK_TIME_RETRANS ||
2148             icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS ||
2149             icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
2150                 timer_active    = 1;
2151                 timer_expires   = icsk->icsk_timeout;
2152         } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
2153                 timer_active    = 4;
2154                 timer_expires   = icsk->icsk_timeout;
2155         } else if (timer_pending(&sk->sk_timer)) {
2156                 timer_active    = 2;
2157                 timer_expires   = sk->sk_timer.expires;
2158         } else {
2159                 timer_active    = 0;
2160                 timer_expires = jiffies;
2161         }
2162
2163         if (sk->sk_state == TCP_LISTEN)
2164                 rx_queue = sk->sk_ack_backlog;
2165         else
2166                 /*
2167                  * because we dont lock socket, we might find a transient negative value
2168                  */
2169                 rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0);
2170
2171         seq_printf(f, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX "
2172                         "%08X %5u %8d %lu %d %pK %lu %lu %u %u %d",
2173                 i, src, srcp, dest, destp, sk->sk_state,
2174                 tp->write_seq - tp->snd_una,
2175                 rx_queue,
2176                 timer_active,
2177                 jiffies_delta_to_clock_t(timer_expires - jiffies),
2178                 icsk->icsk_retransmits,
2179                 from_kuid_munged(seq_user_ns(f), sock_i_uid(sk)),
2180                 icsk->icsk_probes_out,
2181                 sock_i_ino(sk),
2182                 atomic_read(&sk->sk_refcnt), sk,
2183                 jiffies_to_clock_t(icsk->icsk_rto),
2184                 jiffies_to_clock_t(icsk->icsk_ack.ato),
2185                 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
2186                 tp->snd_cwnd,
2187                 sk->sk_state == TCP_LISTEN ?
2188                     (fastopenq ? fastopenq->max_qlen : 0) :
2189                     (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh));
2190 }
2191
2192 static void get_timewait4_sock(const struct inet_timewait_sock *tw,
2193                                struct seq_file *f, int i)
2194 {
2195         long delta = tw->tw_timer.expires - jiffies;
2196         __be32 dest, src;
2197         __u16 destp, srcp;
2198
2199         dest  = tw->tw_daddr;
2200         src   = tw->tw_rcv_saddr;
2201         destp = ntohs(tw->tw_dport);
2202         srcp  = ntohs(tw->tw_sport);
2203
2204         seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2205                 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK",
2206                 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
2207                 3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
2208                 atomic_read(&tw->tw_refcnt), tw);
2209 }
2210
2211 #define TMPSZ 150
2212
2213 static int tcp4_seq_show(struct seq_file *seq, void *v)
2214 {
2215         struct tcp_iter_state *st;
2216         struct sock *sk = v;
2217
2218         seq_setwidth(seq, TMPSZ - 1);
2219         if (v == SEQ_START_TOKEN) {
2220                 seq_puts(seq, "  sl  local_address rem_address   st tx_queue "
2221                            "rx_queue tr tm->when retrnsmt   uid  timeout "
2222                            "inode");
2223                 goto out;
2224         }
2225         st = seq->private;
2226
2227         if (sk->sk_state == TCP_TIME_WAIT)
2228                 get_timewait4_sock(v, seq, st->num);
2229         else if (sk->sk_state == TCP_NEW_SYN_RECV)
2230                 get_openreq4(v, seq, st->num);
2231         else
2232                 get_tcp4_sock(v, seq, st->num);
2233 out:
2234         seq_pad(seq, '\n');
2235         return 0;
2236 }
2237
2238 static const struct file_operations tcp_afinfo_seq_fops = {
2239         .owner   = THIS_MODULE,
2240         .open    = tcp_seq_open,
2241         .read    = seq_read,
2242         .llseek  = seq_lseek,
2243         .release = seq_release_net
2244 };
2245
2246 static struct tcp_seq_afinfo tcp4_seq_afinfo = {
2247         .name           = "tcp",
2248         .family         = AF_INET,
2249         .seq_fops       = &tcp_afinfo_seq_fops,
2250         .seq_ops        = {
2251                 .show           = tcp4_seq_show,
2252         },
2253 };
2254
2255 static int __net_init tcp4_proc_init_net(struct net *net)
2256 {
2257         return tcp_proc_register(net, &tcp4_seq_afinfo);
2258 }
2259
2260 static void __net_exit tcp4_proc_exit_net(struct net *net)
2261 {
2262         tcp_proc_unregister(net, &tcp4_seq_afinfo);
2263 }
2264
2265 static struct pernet_operations tcp4_net_ops = {
2266         .init = tcp4_proc_init_net,
2267         .exit = tcp4_proc_exit_net,
2268 };
2269
2270 int __init tcp4_proc_init(void)
2271 {
2272         return register_pernet_subsys(&tcp4_net_ops);
2273 }
2274
2275 void tcp4_proc_exit(void)
2276 {
2277         unregister_pernet_subsys(&tcp4_net_ops);
2278 }
2279 #endif /* CONFIG_PROC_FS */
2280
2281 struct proto tcp_prot = {
2282         .name                   = "TCP",
2283         .owner                  = THIS_MODULE,
2284         .close                  = tcp_close,
2285         .connect                = tcp_v4_connect,
2286         .disconnect             = tcp_disconnect,
2287         .accept                 = inet_csk_accept,
2288         .ioctl                  = tcp_ioctl,
2289         .init                   = tcp_v4_init_sock,
2290         .destroy                = tcp_v4_destroy_sock,
2291         .shutdown               = tcp_shutdown,
2292         .setsockopt             = tcp_setsockopt,
2293         .getsockopt             = tcp_getsockopt,
2294         .recvmsg                = tcp_recvmsg,
2295         .sendmsg                = tcp_sendmsg,
2296         .sendpage               = tcp_sendpage,
2297         .backlog_rcv            = tcp_v4_do_rcv,
2298         .release_cb             = tcp_release_cb,
2299         .hash                   = inet_hash,
2300         .unhash                 = inet_unhash,
2301         .get_port               = inet_csk_get_port,
2302         .enter_memory_pressure  = tcp_enter_memory_pressure,
2303         .stream_memory_free     = tcp_stream_memory_free,
2304         .sockets_allocated      = &tcp_sockets_allocated,
2305         .orphan_count           = &tcp_orphan_count,
2306         .memory_allocated       = &tcp_memory_allocated,
2307         .memory_pressure        = &tcp_memory_pressure,
2308         .sysctl_mem             = sysctl_tcp_mem,
2309         .sysctl_wmem            = sysctl_tcp_wmem,
2310         .sysctl_rmem            = sysctl_tcp_rmem,
2311         .max_header             = MAX_TCP_HEADER,
2312         .obj_size               = sizeof(struct tcp_sock),
2313         .slab_flags             = SLAB_DESTROY_BY_RCU,
2314         .twsk_prot              = &tcp_timewait_sock_ops,
2315         .rsk_prot               = &tcp_request_sock_ops,
2316         .h.hashinfo             = &tcp_hashinfo,
2317         .no_autobind            = true,
2318 #ifdef CONFIG_COMPAT
2319         .compat_setsockopt      = compat_tcp_setsockopt,
2320         .compat_getsockopt      = compat_tcp_getsockopt,
2321 #endif
2322 #ifdef CONFIG_MEMCG_KMEM
2323         .init_cgroup            = tcp_init_cgroup,
2324         .destroy_cgroup         = tcp_destroy_cgroup,
2325         .proto_cgroup           = tcp_proto_cgroup,
2326 #endif
2327 };
2328 EXPORT_SYMBOL(tcp_prot);
2329
2330 static void __net_exit tcp_sk_exit(struct net *net)
2331 {
2332         int cpu;
2333
2334         for_each_possible_cpu(cpu)
2335                 inet_ctl_sock_destroy(*per_cpu_ptr(net->ipv4.tcp_sk, cpu));
2336         free_percpu(net->ipv4.tcp_sk);
2337 }
2338
2339 static int __net_init tcp_sk_init(struct net *net)
2340 {
2341         int res, cpu;
2342
2343         net->ipv4.tcp_sk = alloc_percpu(struct sock *);
2344         if (!net->ipv4.tcp_sk)
2345                 return -ENOMEM;
2346
2347         for_each_possible_cpu(cpu) {
2348                 struct sock *sk;
2349
2350                 res = inet_ctl_sock_create(&sk, PF_INET, SOCK_RAW,
2351                                            IPPROTO_TCP, net);
2352                 if (res)
2353                         goto fail;
2354                 *per_cpu_ptr(net->ipv4.tcp_sk, cpu) = sk;
2355         }
2356
2357         net->ipv4.sysctl_tcp_ecn = 2;
2358         net->ipv4.sysctl_tcp_ecn_fallback = 1;
2359
2360         net->ipv4.sysctl_tcp_base_mss = TCP_BASE_MSS;
2361         net->ipv4.sysctl_tcp_probe_threshold = TCP_PROBE_THRESHOLD;
2362         net->ipv4.sysctl_tcp_probe_interval = TCP_PROBE_INTERVAL;
2363
2364         return 0;
2365 fail:
2366         tcp_sk_exit(net);
2367
2368         return res;
2369 }
2370
2371 static void __net_exit tcp_sk_exit_batch(struct list_head *net_exit_list)
2372 {
2373         inet_twsk_purge(&tcp_hashinfo, &tcp_death_row, AF_INET);
2374 }
2375
2376 static struct pernet_operations __net_initdata tcp_sk_ops = {
2377        .init       = tcp_sk_init,
2378        .exit       = tcp_sk_exit,
2379        .exit_batch = tcp_sk_exit_batch,
2380 };
2381
2382 void __init tcp_v4_init(void)
2383 {
2384         inet_hashinfo_init(&tcp_hashinfo);
2385         if (register_pernet_subsys(&tcp_sk_ops))
2386                 panic("Failed to create the TCP control socket.\n");
2387 }