1/*-
2 * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1995
3 *	The Regents of the University of California.  All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 * 4. Neither the name of the University nor the names of its contributors
14 *    may be used to endorse or promote products derived from this software
15 *    without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 *
29 *	@(#)tcp_subr.c	8.2 (Berkeley) 5/24/95
30 */
31
32#include <sys/cdefs.h>
33__FBSDID("$FreeBSD$");
34
35#include "opt_inet.h"
36#include "opt_inet6.h"
37#include "opt_tcpdebug.h"
38
39#include <sys/param.h>
40#include <sys/systm.h>
41#include <sys/callout.h>
42#include <sys/kernel.h>
43#include <sys/sysctl.h>
44#include <sys/malloc.h>
45#include <sys/mbuf.h>
46#include <sys/priv.h>
47#include <sys/proc.h>
48#include <sys/socket.h>
49#include <sys/socketvar.h>
50#include <sys/protosw.h>
51#include <sys/random.h>
52
53#include <vm/uma.h>
54
55#include <net/route.h>
56#include <net/if.h>
57#include <net/vnet.h>
58
59#include <netinet/in.h>
60#include <netinet/in_pcb.h>
61#include <netinet/in_systm.h>
62#include <netinet/in_var.h>
63#include <netinet/ip.h>
64#include <netinet/ip_icmp.h>
65#include <netinet/ip_var.h>
66#ifdef INET6
67#include <netinet/ip6.h>
68#include <netinet6/in6_pcb.h>
69#include <netinet6/ip6_var.h>
70#include <netinet6/scope6_var.h>
71#include <netinet6/nd6.h>
72#endif
73#include <netinet/tcp.h>
74#include <netinet/tcp_fsm.h>
75#include <netinet/tcp_seq.h>
76#include <netinet/tcp_timer.h>
77#include <netinet/tcp_var.h>
78#ifdef INET6
79#include <netinet6/tcp6_var.h>
80#endif
81#include <netinet/tcpip.h>
82#ifdef TCPDEBUG
83#include <netinet/tcp_debug.h>
84#endif
85#ifdef INET6
86#include <netinet6/ip6protosw.h>
87#endif
88
89#include <machine/in_cksum.h>
90
91#include <security/mac/mac_framework.h>
92
93static VNET_DEFINE(uma_zone_t, tcptw_zone);
94#define	V_tcptw_zone			VNET(tcptw_zone)
95static int	maxtcptw;
96
97/*
98 * The timed wait queue contains references to each of the TCP sessions
99 * currently in the TIME_WAIT state.  The queue pointers, including the
100 * queue pointers in each tcptw structure, are protected using the global
101 * tcbinfo lock, which must be held over queue iteration and modification.
102 */
103static VNET_DEFINE(TAILQ_HEAD(, tcptw), twq_2msl);
104#define	V_twq_2msl			VNET(twq_2msl)
105
106static void	tcp_tw_2msl_reset(struct tcptw *, int);
107static void	tcp_tw_2msl_stop(struct tcptw *);
108static int	tcp_twrespond(struct tcptw *, int);
109
110static int
111tcptw_auto_size(void)
112{
113	int halfrange;
114
115	/*
116	 * Max out at half the ephemeral port range so that TIME_WAIT
117	 * sockets don't tie up too many ephemeral ports.
118	 */
119	if (V_ipport_lastauto > V_ipport_firstauto)
120		halfrange = (V_ipport_lastauto - V_ipport_firstauto) / 2;
121	else
122		halfrange = (V_ipport_firstauto - V_ipport_lastauto) / 2;
123	/* Protect against goofy port ranges smaller than 32. */
124	return (imin(imax(halfrange, 32), maxsockets / 5));
125}
126
127static int
128sysctl_maxtcptw(SYSCTL_HANDLER_ARGS)
129{
130	int error, new;
131
132	if (maxtcptw == 0)
133		new = tcptw_auto_size();
134	else
135		new = maxtcptw;
136	error = sysctl_handle_int(oidp, &new, 0, req);
137	if (error == 0 && req->newptr)
138		if (new >= 32) {
139			maxtcptw = new;
140			uma_zone_set_max(V_tcptw_zone, maxtcptw);
141		}
142	return (error);
143}
144
145SYSCTL_PROC(_net_inet_tcp, OID_AUTO, maxtcptw, CTLTYPE_INT|CTLFLAG_RW,
146    &maxtcptw, 0, sysctl_maxtcptw, "IU",
147    "Maximum number of compressed TCP TIME_WAIT entries");
148
149VNET_DEFINE(int, nolocaltimewait) = 0;
150#define	V_nolocaltimewait	VNET(nolocaltimewait)
151SYSCTL_VNET_INT(_net_inet_tcp, OID_AUTO, nolocaltimewait, CTLFLAG_RW,
152    &VNET_NAME(nolocaltimewait), 0,
153    "Do not create compressed TCP TIME_WAIT entries for local connections");
154
155void
156tcp_tw_zone_change(void)
157{
158
159	if (maxtcptw == 0)
160		uma_zone_set_max(V_tcptw_zone, tcptw_auto_size());
161}
162
163void
164tcp_tw_init(void)
165{
166
167	V_tcptw_zone = uma_zcreate("tcptw", sizeof(struct tcptw),
168	    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
169	TUNABLE_INT_FETCH("net.inet.tcp.maxtcptw", &maxtcptw);
170	if (maxtcptw == 0)
171		uma_zone_set_max(V_tcptw_zone, tcptw_auto_size());
172	else
173		uma_zone_set_max(V_tcptw_zone, maxtcptw);
174	TAILQ_INIT(&V_twq_2msl);
175}
176
177#ifdef VIMAGE
178void
179tcp_tw_destroy(void)
180{
181	struct tcptw *tw;
182
183	INP_INFO_WLOCK(&V_tcbinfo);
184	while((tw = TAILQ_FIRST(&V_twq_2msl)) != NULL)
185		tcp_twclose(tw, 0);
186	INP_INFO_WUNLOCK(&V_tcbinfo);
187
188	uma_zdestroy(V_tcptw_zone);
189}
190#endif
191
192/*
193 * Move a TCP connection into TIME_WAIT state.
194 *    tcbinfo is locked.
195 *    inp is locked, and is unlocked before returning.
196 */
197void
198tcp_twstart(struct tcpcb *tp)
199{
200	struct tcptw *tw;
201	struct inpcb *inp = tp->t_inpcb;
202	int acknow;
203	struct socket *so;
204#ifdef INET6
205	int isipv6 = inp->inp_inc.inc_flags & INC_ISIPV6;
206#endif
207
208	INP_INFO_WLOCK_ASSERT(&V_tcbinfo);	/* tcp_tw_2msl_reset(). */
209	INP_WLOCK_ASSERT(inp);
210
211	if (V_nolocaltimewait) {
212		int error = 0;
213#ifdef INET6
214		if (isipv6)
215			error = in6_localaddr(&inp->in6p_faddr);
216#endif
217#if defined(INET6) && defined(INET)
218		else
219#endif
220#ifdef INET
221			error = in_localip(inp->inp_faddr);
222#endif
223		if (error) {
224			tp = tcp_close(tp);
225			if (tp != NULL)
226				INP_WUNLOCK(inp);
227			return;
228		}
229	}
230
231	tw = uma_zalloc(V_tcptw_zone, M_NOWAIT);
232	if (tw == NULL) {
233		tw = tcp_tw_2msl_scan(1);
234		if (tw == NULL) {
235			tp = tcp_close(tp);
236			if (tp != NULL)
237				INP_WUNLOCK(inp);
238			return;
239		}
240	}
241	tw->tw_inpcb = inp;
242
243	/*
244	 * Recover last window size sent.
245	 */
246	if (SEQ_GT(tp->rcv_adv, tp->rcv_nxt))
247		tw->last_win = (tp->rcv_adv - tp->rcv_nxt) >> tp->rcv_scale;
248	else
249		tw->last_win = 0;
250
251	/*
252	 * Set t_recent if timestamps are used on the connection.
253	 */
254	if ((tp->t_flags & (TF_REQ_TSTMP|TF_RCVD_TSTMP|TF_NOOPT)) ==
255	    (TF_REQ_TSTMP|TF_RCVD_TSTMP)) {
256		tw->t_recent = tp->ts_recent;
257		tw->ts_offset = tp->ts_offset;
258	} else {
259		tw->t_recent = 0;
260		tw->ts_offset = 0;
261	}
262
263	tw->snd_nxt = tp->snd_nxt;
264	tw->rcv_nxt = tp->rcv_nxt;
265	tw->iss     = tp->iss;
266	tw->irs     = tp->irs;
267	tw->t_starttime = tp->t_starttime;
268	tw->tw_time = 0;
269
270/* XXX
271 * If this code will
272 * be used for fin-wait-2 state also, then we may need
273 * a ts_recent from the last segment.
274 */
275	acknow = tp->t_flags & TF_ACKNOW;
276
277	/*
278	 * First, discard tcpcb state, which includes stopping its timers and
279	 * freeing it.  tcp_discardcb() used to also release the inpcb, but
280	 * that work is now done in the caller.
281	 *
282	 * Note: soisdisconnected() call used to be made in tcp_discardcb(),
283	 * and might not be needed here any longer.
284	 */
285	tcp_discardcb(tp);
286	so = inp->inp_socket;
287	soisdisconnected(so);
288	tw->tw_cred = crhold(so->so_cred);
289	SOCK_LOCK(so);
290	tw->tw_so_options = so->so_options;
291	SOCK_UNLOCK(so);
292	if (acknow)
293		tcp_twrespond(tw, TH_ACK);
294	inp->inp_ppcb = tw;
295	inp->inp_flags |= INP_TIMEWAIT;
296	tcp_tw_2msl_reset(tw, 0);
297
298	/*
299	 * If the inpcb owns the sole reference to the socket, then we can
300	 * detach and free the socket as it is not needed in time wait.
301	 */
302	if (inp->inp_flags & INP_SOCKREF) {
303		KASSERT(so->so_state & SS_PROTOREF,
304		    ("tcp_twstart: !SS_PROTOREF"));
305		inp->inp_flags &= ~INP_SOCKREF;
306		INP_WUNLOCK(inp);
307		ACCEPT_LOCK();
308		SOCK_LOCK(so);
309		so->so_state &= ~SS_PROTOREF;
310		sofree(so);
311	} else
312		INP_WUNLOCK(inp);
313}
314
315#if 0
316/*
317 * The appromixate rate of ISN increase of Microsoft TCP stacks;
318 * the actual rate is slightly higher due to the addition of
319 * random positive increments.
320 *
321 * Most other new OSes use semi-randomized ISN values, so we
322 * do not need to worry about them.
323 */
324#define MS_ISN_BYTES_PER_SECOND		250000
325
326/*
327 * Determine if the ISN we will generate has advanced beyond the last
328 * sequence number used by the previous connection.  If so, indicate
329 * that it is safe to recycle this tw socket by returning 1.
330 */
331int
332tcp_twrecycleable(struct tcptw *tw)
333{
334	tcp_seq new_iss = tw->iss;
335	tcp_seq new_irs = tw->irs;
336
337	INP_INFO_WLOCK_ASSERT(&V_tcbinfo);
338	new_iss += (ticks - tw->t_starttime) * (ISN_BYTES_PER_SECOND / hz);
339	new_irs += (ticks - tw->t_starttime) * (MS_ISN_BYTES_PER_SECOND / hz);
340
341	if (SEQ_GT(new_iss, tw->snd_nxt) && SEQ_GT(new_irs, tw->rcv_nxt))
342		return (1);
343	else
344		return (0);
345}
346#endif
347
348/*
349 * Returns 1 if the TIME_WAIT state was killed and we should start over,
350 * looking for a pcb in the listen state.  Returns 0 otherwise.
351 */
352int
353tcp_twcheck(struct inpcb *inp, struct tcpopt *to __unused, struct tcphdr *th,
354    struct mbuf *m, int tlen)
355{
356	struct tcptw *tw;
357	int thflags;
358	tcp_seq seq;
359
360	/* tcbinfo lock required for tcp_twclose(), tcp_tw_2msl_reset(). */
361	INP_INFO_WLOCK_ASSERT(&V_tcbinfo);
362	INP_WLOCK_ASSERT(inp);
363
364	/*
365	 * XXXRW: Time wait state for inpcb has been recycled, but inpcb is
366	 * still present.  This is undesirable, but temporarily necessary
367	 * until we work out how to handle inpcb's who's timewait state has
368	 * been removed.
369	 */
370	tw = intotw(inp);
371	if (tw == NULL)
372		goto drop;
373
374	thflags = th->th_flags;
375
376	/*
377	 * NOTE: for FIN_WAIT_2 (to be added later),
378	 * must validate sequence number before accepting RST
379	 */
380
381	/*
382	 * If the segment contains RST:
383	 *	Drop the segment - see Stevens, vol. 2, p. 964 and
384	 *      RFC 1337.
385	 */
386	if (thflags & TH_RST)
387		goto drop;
388
389#if 0
390/* PAWS not needed at the moment */
391	/*
392	 * RFC 1323 PAWS: If we have a timestamp reply on this segment
393	 * and it's less than ts_recent, drop it.
394	 */
395	if ((to.to_flags & TOF_TS) != 0 && tp->ts_recent &&
396	    TSTMP_LT(to.to_tsval, tp->ts_recent)) {
397		if ((thflags & TH_ACK) == 0)
398			goto drop;
399		goto ack;
400	}
401	/*
402	 * ts_recent is never updated because we never accept new segments.
403	 */
404#endif
405
406	/*
407	 * If a new connection request is received
408	 * while in TIME_WAIT, drop the old connection
409	 * and start over if the sequence numbers
410	 * are above the previous ones.
411	 */
412	if ((thflags & TH_SYN) && SEQ_GT(th->th_seq, tw->rcv_nxt)) {
413		tcp_twclose(tw, 0);
414		return (1);
415	}
416
417	/*
418	 * Drop the segment if it does not contain an ACK.
419	 */
420	if ((thflags & TH_ACK) == 0)
421		goto drop;
422
423	/*
424	 * Reset the 2MSL timer if this is a duplicate FIN.
425	 */
426	if (thflags & TH_FIN) {
427		seq = th->th_seq + tlen + (thflags & TH_SYN ? 1 : 0);
428		if (seq + 1 == tw->rcv_nxt)
429			tcp_tw_2msl_reset(tw, 1);
430	}
431
432	/*
433	 * Acknowledge the segment if it has data or is not a duplicate ACK.
434	 */
435	if (thflags != TH_ACK || tlen != 0 ||
436	    th->th_seq != tw->rcv_nxt || th->th_ack != tw->snd_nxt)
437		tcp_twrespond(tw, TH_ACK);
438drop:
439	INP_WUNLOCK(inp);
440	m_freem(m);
441	return (0);
442}
443
444void
445tcp_twclose(struct tcptw *tw, int reuse)
446{
447	struct socket *so;
448	struct inpcb *inp;
449
450	/*
451	 * At this point, we are in one of two situations:
452	 *
453	 * (1) We have no socket, just an inpcb<->twtcp pair.  We can free
454	 *     all state.
455	 *
456	 * (2) We have a socket -- if we own a reference, release it and
457	 *     notify the socket layer.
458	 */
459	inp = tw->tw_inpcb;
460	KASSERT((inp->inp_flags & INP_TIMEWAIT), ("tcp_twclose: !timewait"));
461	KASSERT(intotw(inp) == tw, ("tcp_twclose: inp_ppcb != tw"));
462	INP_INFO_WLOCK_ASSERT(&V_tcbinfo);	/* tcp_tw_2msl_stop(). */
463	INP_WLOCK_ASSERT(inp);
464
465	tw->tw_inpcb = NULL;
466	tcp_tw_2msl_stop(tw);
467	inp->inp_ppcb = NULL;
468	in_pcbdrop(inp);
469
470	so = inp->inp_socket;
471	if (so != NULL) {
472		/*
473		 * If there's a socket, handle two cases: first, we own a
474		 * strong reference, which we will now release, or we don't
475		 * in which case another reference exists (XXXRW: think
476		 * about this more), and we don't need to take action.
477		 */
478		if (inp->inp_flags & INP_SOCKREF) {
479			inp->inp_flags &= ~INP_SOCKREF;
480			INP_WUNLOCK(inp);
481			ACCEPT_LOCK();
482			SOCK_LOCK(so);
483			KASSERT(so->so_state & SS_PROTOREF,
484			    ("tcp_twclose: INP_SOCKREF && !SS_PROTOREF"));
485			so->so_state &= ~SS_PROTOREF;
486			sofree(so);
487		} else {
488			/*
489			 * If we don't own the only reference, the socket and
490			 * inpcb need to be left around to be handled by
491			 * tcp_usr_detach() later.
492			 */
493			INP_WUNLOCK(inp);
494		}
495	} else
496		in_pcbfree(inp);
497	TCPSTAT_INC(tcps_closed);
498	crfree(tw->tw_cred);
499	tw->tw_cred = NULL;
500	if (reuse)
501		return;
502	uma_zfree(V_tcptw_zone, tw);
503}
504
505static int
506tcp_twrespond(struct tcptw *tw, int flags)
507{
508	struct inpcb *inp = tw->tw_inpcb;
509#if defined(INET6) || defined(INET)
510	struct tcphdr *th = NULL;
511#endif
512	struct mbuf *m;
513#ifdef INET
514	struct ip *ip = NULL;
515#endif
516	u_int hdrlen, optlen;
517	int error = 0;			/* Keep compiler happy */
518	struct tcpopt to;
519#ifdef INET6
520	struct ip6_hdr *ip6 = NULL;
521	int isipv6 = inp->inp_inc.inc_flags & INC_ISIPV6;
522#endif
523	hdrlen = 0;                     /* Keep compiler happy */
524
525	INP_WLOCK_ASSERT(inp);
526
527	m = m_gethdr(M_NOWAIT, MT_DATA);
528	if (m == NULL)
529		return (ENOBUFS);
530	m->m_data += max_linkhdr;
531
532#ifdef MAC
533	mac_inpcb_create_mbuf(inp, m);
534#endif
535
536#ifdef INET6
537	if (isipv6) {
538		hdrlen = sizeof(struct ip6_hdr) + sizeof(struct tcphdr);
539		ip6 = mtod(m, struct ip6_hdr *);
540		th = (struct tcphdr *)(ip6 + 1);
541		tcpip_fillheaders(inp, ip6, th);
542	}
543#endif
544#if defined(INET6) && defined(INET)
545	else
546#endif
547#ifdef INET
548	{
549		hdrlen = sizeof(struct tcpiphdr);
550		ip = mtod(m, struct ip *);
551		th = (struct tcphdr *)(ip + 1);
552		tcpip_fillheaders(inp, ip, th);
553	}
554#endif
555	to.to_flags = 0;
556
557	/*
558	 * Send a timestamp and echo-reply if both our side and our peer
559	 * have sent timestamps in our SYN's and this is not a RST.
560	 */
561	if (tw->t_recent && flags == TH_ACK) {
562		to.to_flags |= TOF_TS;
563		to.to_tsval = tcp_ts_getticks() + tw->ts_offset;
564		to.to_tsecr = tw->t_recent;
565	}
566	optlen = tcp_addoptions(&to, (u_char *)(th + 1));
567
568	m->m_len = hdrlen + optlen;
569	m->m_pkthdr.len = m->m_len;
570
571	KASSERT(max_linkhdr + m->m_len <= MHLEN, ("tcptw: mbuf too small"));
572
573	th->th_seq = htonl(tw->snd_nxt);
574	th->th_ack = htonl(tw->rcv_nxt);
575	th->th_off = (sizeof(struct tcphdr) + optlen) >> 2;
576	th->th_flags = flags;
577	th->th_win = htons(tw->last_win);
578
579	m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum);
580#ifdef INET6
581	if (isipv6) {
582		m->m_pkthdr.csum_flags = CSUM_TCP_IPV6;
583		th->th_sum = in6_cksum_pseudo(ip6,
584		    sizeof(struct tcphdr) + optlen, IPPROTO_TCP, 0);
585		ip6->ip6_hlim = in6_selecthlim(inp, NULL);
586		error = ip6_output(m, inp->in6p_outputopts, NULL,
587		    (tw->tw_so_options & SO_DONTROUTE), NULL, NULL, inp);
588	}
589#endif
590#if defined(INET6) && defined(INET)
591	else
592#endif
593#ifdef INET
594	{
595		m->m_pkthdr.csum_flags = CSUM_TCP;
596		th->th_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr,
597		    htons(sizeof(struct tcphdr) + optlen + IPPROTO_TCP));
598		ip->ip_len = htons(m->m_pkthdr.len);
599		if (V_path_mtu_discovery)
600			ip->ip_off |= htons(IP_DF);
601		error = ip_output(m, inp->inp_options, NULL,
602		    ((tw->tw_so_options & SO_DONTROUTE) ? IP_ROUTETOIF : 0),
603		    NULL, inp);
604	}
605#endif
606	if (flags & TH_ACK)
607		TCPSTAT_INC(tcps_sndacks);
608	else
609		TCPSTAT_INC(tcps_sndctrl);
610	TCPSTAT_INC(tcps_sndtotal);
611	return (error);
612}
613
614static void
615tcp_tw_2msl_reset(struct tcptw *tw, int rearm)
616{
617
618	INP_INFO_WLOCK_ASSERT(&V_tcbinfo);
619	INP_WLOCK_ASSERT(tw->tw_inpcb);
620	if (rearm)
621		TAILQ_REMOVE(&V_twq_2msl, tw, tw_2msl);
622	tw->tw_time = ticks + 2 * tcp_msl;
623	TAILQ_INSERT_TAIL(&V_twq_2msl, tw, tw_2msl);
624}
625
626static void
627tcp_tw_2msl_stop(struct tcptw *tw)
628{
629
630	INP_INFO_WLOCK_ASSERT(&V_tcbinfo);
631	TAILQ_REMOVE(&V_twq_2msl, tw, tw_2msl);
632}
633
634struct tcptw *
635tcp_tw_2msl_scan(int reuse)
636{
637	struct tcptw *tw;
638
639	INP_INFO_WLOCK_ASSERT(&V_tcbinfo);
640	for (;;) {
641		tw = TAILQ_FIRST(&V_twq_2msl);
642		if (tw == NULL || (!reuse && (tw->tw_time - ticks) > 0))
643			break;
644		INP_WLOCK(tw->tw_inpcb);
645		tcp_twclose(tw, reuse);
646		if (reuse)
647			return (tw);
648	}
649	return (NULL);
650}
651