1/*-
2 * SPDX-License-Identifier: BSD-3-Clause
3 *
4 * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1995
5 *	The Regents of the University of California.  All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 *    notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 *    notice, this list of conditions and the following disclaimer in the
14 *    documentation and/or other materials provided with the distribution.
15 * 3. Neither the name of the University nor the names of its contributors
16 *    may be used to endorse or promote products derived from this software
17 *    without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 */
31
32#include <sys/cdefs.h>
33#include "opt_inet.h"
34#include "opt_inet6.h"
35#include "opt_ipsec.h"
36#include "opt_kern_tls.h"
37
38#include <sys/param.h>
39#include <sys/systm.h>
40#include <sys/arb.h>
41#include <sys/domain.h>
42#ifdef TCP_HHOOK
43#include <sys/hhook.h>
44#endif
45#include <sys/kernel.h>
46#ifdef KERN_TLS
47#include <sys/ktls.h>
48#endif
49#include <sys/lock.h>
50#include <sys/mbuf.h>
51#include <sys/mutex.h>
52#include <sys/protosw.h>
53#include <sys/qmath.h>
54#include <sys/sdt.h>
55#include <sys/socket.h>
56#include <sys/socketvar.h>
57#include <sys/sysctl.h>
58#include <sys/stats.h>
59
60#include <net/if.h>
61#include <net/route.h>
62#include <net/route/nhop.h>
63#include <net/vnet.h>
64
65#include <netinet/in.h>
66#include <netinet/in_kdtrace.h>
67#include <netinet/in_systm.h>
68#include <netinet/ip.h>
69#include <netinet/in_pcb.h>
70#include <netinet/ip_var.h>
71#include <netinet/ip_options.h>
72#ifdef INET6
73#include <netinet6/in6_pcb.h>
74#include <netinet/ip6.h>
75#include <netinet6/ip6_var.h>
76#endif
77#include <netinet/tcp.h>
78#define	TCPOUTFLAGS
79#include <netinet/tcp_fsm.h>
80#include <netinet/tcp_seq.h>
81#include <netinet/tcp_var.h>
82#include <netinet/tcp_log_buf.h>
83#include <netinet/tcp_syncache.h>
84#include <netinet/tcp_timer.h>
85#include <netinet/tcpip.h>
86#include <netinet/cc/cc.h>
87#include <netinet/tcp_fastopen.h>
88#ifdef TCPPCAP
89#include <netinet/tcp_pcap.h>
90#endif
91#ifdef TCP_OFFLOAD
92#include <netinet/tcp_offload.h>
93#endif
94#include <netinet/tcp_ecn.h>
95
96#include <netipsec/ipsec_support.h>
97
98#include <netinet/udp.h>
99#include <netinet/udp_var.h>
100#include <machine/in_cksum.h>
101
102#include <security/mac/mac_framework.h>
103
104VNET_DEFINE(int, path_mtu_discovery) = 1;
105SYSCTL_INT(_net_inet_tcp, OID_AUTO, path_mtu_discovery, CTLFLAG_VNET | CTLFLAG_RW,
106	&VNET_NAME(path_mtu_discovery), 1,
107	"Enable Path MTU Discovery");
108
109VNET_DEFINE(int, tcp_do_tso) = 1;
110SYSCTL_INT(_net_inet_tcp, OID_AUTO, tso, CTLFLAG_VNET | CTLFLAG_RW,
111	&VNET_NAME(tcp_do_tso), 0,
112	"Enable TCP Segmentation Offload");
113
114VNET_DEFINE(int, tcp_sendspace) = 1024*32;
115#define	V_tcp_sendspace	VNET(tcp_sendspace)
116SYSCTL_INT(_net_inet_tcp, TCPCTL_SENDSPACE, sendspace, CTLFLAG_VNET | CTLFLAG_RW,
117	&VNET_NAME(tcp_sendspace), 0, "Initial send socket buffer size");
118
119VNET_DEFINE(int, tcp_do_autosndbuf) = 1;
120SYSCTL_INT(_net_inet_tcp, OID_AUTO, sendbuf_auto, CTLFLAG_VNET | CTLFLAG_RW,
121	&VNET_NAME(tcp_do_autosndbuf), 0,
122	"Enable automatic send buffer sizing");
123
124VNET_DEFINE(int, tcp_autosndbuf_inc) = 8*1024;
125SYSCTL_INT(_net_inet_tcp, OID_AUTO, sendbuf_inc, CTLFLAG_VNET | CTLFLAG_RW,
126	&VNET_NAME(tcp_autosndbuf_inc), 0,
127	"Incrementor step size of automatic send buffer");
128
129VNET_DEFINE(int, tcp_autosndbuf_max) = 2*1024*1024;
130SYSCTL_INT(_net_inet_tcp, OID_AUTO, sendbuf_max, CTLFLAG_VNET | CTLFLAG_RW,
131	&VNET_NAME(tcp_autosndbuf_max), 0,
132	"Max size of automatic send buffer");
133
134VNET_DEFINE(int, tcp_sendbuf_auto_lowat) = 0;
135#define	V_tcp_sendbuf_auto_lowat	VNET(tcp_sendbuf_auto_lowat)
136SYSCTL_INT(_net_inet_tcp, OID_AUTO, sendbuf_auto_lowat, CTLFLAG_VNET | CTLFLAG_RW,
137	&VNET_NAME(tcp_sendbuf_auto_lowat), 0,
138	"Modify threshold for auto send buffer growth to account for SO_SNDLOWAT");
139
140/*
141 * Make sure that either retransmit or persist timer is set for SYN, FIN and
142 * non-ACK.
143 */
144#define TCP_XMIT_TIMER_ASSERT(tp, len, th_flags)			\
145	KASSERT(((len) == 0 && ((th_flags) & (TH_SYN | TH_FIN)) == 0) ||\
146	    tcp_timer_active((tp), TT_REXMT) ||				\
147	    tcp_timer_active((tp), TT_PERSIST),				\
148	    ("neither rexmt nor persist timer is set"))
149
150#ifdef TCP_HHOOK
151/*
152 * Wrapper for the TCP established output helper hook.
153 */
154void
155hhook_run_tcp_est_out(struct tcpcb *tp, struct tcphdr *th,
156    struct tcpopt *to, uint32_t len, int tso)
157{
158	struct tcp_hhook_data hhook_data;
159
160	if (V_tcp_hhh[HHOOK_TCP_EST_OUT]->hhh_nhooks > 0) {
161		hhook_data.tp = tp;
162		hhook_data.th = th;
163		hhook_data.to = to;
164		hhook_data.len = len;
165		hhook_data.tso = tso;
166
167		hhook_run_hooks(V_tcp_hhh[HHOOK_TCP_EST_OUT], &hhook_data,
168		    &tp->t_osd);
169	}
170}
171#endif
172
173/*
174 * CC wrapper hook functions
175 */
176void
177cc_after_idle(struct tcpcb *tp)
178{
179	INP_WLOCK_ASSERT(tptoinpcb(tp));
180
181	if (CC_ALGO(tp)->after_idle != NULL)
182		CC_ALGO(tp)->after_idle(&tp->t_ccv);
183}
184
185/*
186 * Tcp output routine: figure out what should be sent and send it.
187 */
188int
189tcp_default_output(struct tcpcb *tp)
190{
191	struct socket *so = tptosocket(tp);
192	struct inpcb *inp = tptoinpcb(tp);
193	int32_t len;
194	uint32_t recwin, sendwin;
195	uint16_t flags;
196	int off, error = 0;	/* Keep compiler happy */
197	u_int if_hw_tsomaxsegcount = 0;
198	u_int if_hw_tsomaxsegsize = 0;
199	struct mbuf *m;
200	struct ip *ip = NULL;
201	struct tcphdr *th;
202	u_char opt[TCP_MAXOLEN];
203	unsigned ipoptlen, optlen, hdrlen, ulen;
204#if defined(IPSEC) || defined(IPSEC_SUPPORT)
205	unsigned ipsec_optlen = 0;
206#endif
207	int idle, sendalot, curticks;
208	int sack_rxmit, sack_bytes_rxmt;
209	struct sackhole *p;
210	int tso, mtu;
211	struct tcpopt to;
212	struct udphdr *udp = NULL;
213	struct tcp_log_buffer *lgb;
214	unsigned int wanted_cookie = 0;
215	unsigned int dont_sendalot = 0;
216#if 0
217	int maxburst = TCP_MAXBURST;
218#endif
219#ifdef INET6
220	struct ip6_hdr *ip6 = NULL;
221	const bool isipv6 = (inp->inp_vflag & INP_IPV6) != 0;
222#endif
223#ifdef KERN_TLS
224	const bool hw_tls = tp->t_nic_ktls_xmit != 0;
225#else
226	const bool hw_tls = false;
227#endif
228
229	NET_EPOCH_ASSERT();
230	INP_WLOCK_ASSERT(inp);
231
232#ifdef TCP_OFFLOAD
233	if (tp->t_flags & TF_TOE)
234		return (tcp_offload_output(tp));
235#endif
236
237	/*
238	 * For TFO connections in SYN_SENT or SYN_RECEIVED,
239	 * only allow the initial SYN or SYN|ACK and those sent
240	 * by the retransmit timer.
241	 */
242	if ((tp->t_flags & TF_FASTOPEN) &&
243	    ((tp->t_state == TCPS_SYN_SENT) ||
244	    (tp->t_state == TCPS_SYN_RECEIVED)) &&
245	    SEQ_GT(tp->snd_max, tp->snd_una) && /* SYN or SYN|ACK sent */
246	    (tp->snd_nxt != tp->snd_una))       /* not a retransmit */
247		return (0);
248
249	/*
250	 * Determine length of data that should be transmitted,
251	 * and flags that will be used.
252	 * If there is some data or critical controls (SYN, RST)
253	 * to send, then transmit; otherwise, investigate further.
254	 */
255	idle = (tp->t_flags & TF_LASTIDLE) || (tp->snd_max == tp->snd_una);
256	if (idle && (((ticks - tp->t_rcvtime) >= tp->t_rxtcur) ||
257	    (tp->t_sndtime && ((ticks - tp->t_sndtime) >= tp->t_rxtcur))))
258		cc_after_idle(tp);
259	tp->t_flags &= ~TF_LASTIDLE;
260	if (idle) {
261		if (tp->t_flags & TF_MORETOCOME) {
262			tp->t_flags |= TF_LASTIDLE;
263			idle = 0;
264		}
265	}
266again:
267	/*
268	 * If we've recently taken a timeout, snd_max will be greater than
269	 * snd_nxt.  There may be SACK information that allows us to avoid
270	 * resending already delivered data.  Adjust snd_nxt accordingly.
271	 */
272	if ((tp->t_flags & TF_SACK_PERMIT) &&
273	    SEQ_LT(tp->snd_nxt, tp->snd_max))
274		tcp_sack_adjust(tp);
275	sendalot = 0;
276	tso = 0;
277	mtu = 0;
278	off = tp->snd_nxt - tp->snd_una;
279	sendwin = min(tp->snd_wnd, tp->snd_cwnd);
280
281	flags = tcp_outflags[tp->t_state];
282	/*
283	 * Send any SACK-generated retransmissions.  If we're explicitly trying
284	 * to send out new data (when sendalot is 1), bypass this function.
285	 * If we retransmit in fast recovery mode, decrement snd_cwnd, since
286	 * we're replacing a (future) new transmission with a retransmission
287	 * now, and we previously incremented snd_cwnd in tcp_input().
288	 */
289	/*
290	 * Still in sack recovery , reset rxmit flag to zero.
291	 */
292	sack_rxmit = 0;
293	sack_bytes_rxmt = 0;
294	len = 0;
295	p = NULL;
296	if ((tp->t_flags & TF_SACK_PERMIT) && IN_FASTRECOVERY(tp->t_flags) &&
297	    (p = tcp_sack_output(tp, &sack_bytes_rxmt))) {
298		uint32_t cwin;
299
300		cwin =
301		    imax(min(tp->snd_wnd, tp->snd_cwnd) - sack_bytes_rxmt, 0);
302		/* Do not retransmit SACK segments beyond snd_recover */
303		if (SEQ_GT(p->end, tp->snd_recover)) {
304			/*
305			 * (At least) part of sack hole extends beyond
306			 * snd_recover. Check to see if we can rexmit data
307			 * for this hole.
308			 */
309			if (SEQ_GEQ(p->rxmit, tp->snd_recover)) {
310				/*
311				 * Can't rexmit any more data for this hole.
312				 * That data will be rexmitted in the next
313				 * sack recovery episode, when snd_recover
314				 * moves past p->rxmit.
315				 */
316				p = NULL;
317				goto after_sack_rexmit;
318			} else {
319				/* Can rexmit part of the current hole */
320				len = ((int32_t)ulmin(cwin,
321				    SEQ_SUB(tp->snd_recover, p->rxmit)));
322			}
323		} else {
324			len = ((int32_t)ulmin(cwin,
325			    SEQ_SUB(p->end, p->rxmit)));
326		}
327		if (len > 0) {
328			off = SEQ_SUB(p->rxmit, tp->snd_una);
329			KASSERT(off >= 0,("%s: sack block to the left of una : %d",
330			    __func__, off));
331			sack_rxmit = 1;
332			sendalot = 1;
333		}
334	}
335after_sack_rexmit:
336	/*
337	 * Get standard flags, and add SYN or FIN if requested by 'hidden'
338	 * state flags.
339	 */
340	if (tp->t_flags & TF_NEEDFIN)
341		flags |= TH_FIN;
342	if (tp->t_flags & TF_NEEDSYN)
343		flags |= TH_SYN;
344
345	SOCKBUF_LOCK(&so->so_snd);
346	/*
347	 * If in persist timeout with window of 0, send 1 byte.
348	 * Otherwise, if window is small but nonzero
349	 * and timer expired, we will send what we can
350	 * and go to transmit state.
351	 */
352	if (tp->t_flags & TF_FORCEDATA) {
353		if (sendwin == 0) {
354			/*
355			 * If we still have some data to send, then
356			 * clear the FIN bit.  Usually this would
357			 * happen below when it realizes that we
358			 * aren't sending all the data.  However,
359			 * if we have exactly 1 byte of unsent data,
360			 * then it won't clear the FIN bit below,
361			 * and if we are in persist state, we wind
362			 * up sending the packet without recording
363			 * that we sent the FIN bit.
364			 *
365			 * We can't just blindly clear the FIN bit,
366			 * because if we don't have any more data
367			 * to send then the probe will be the FIN
368			 * itself.
369			 */
370			if (off < sbused(&so->so_snd))
371				flags &= ~TH_FIN;
372			sendwin = 1;
373		} else {
374			tcp_timer_activate(tp, TT_PERSIST, 0);
375			tp->t_rxtshift = 0;
376		}
377	}
378
379	/*
380	 * If snd_nxt == snd_max and we have transmitted a FIN, the
381	 * offset will be > 0 even if so_snd.sb_cc is 0, resulting in
382	 * a negative length.  This can also occur when TCP opens up
383	 * its congestion window while receiving additional duplicate
384	 * acks after fast-retransmit because TCP will reset snd_nxt
385	 * to snd_max after the fast-retransmit.
386	 *
387	 * In the normal retransmit-FIN-only case, however, snd_nxt will
388	 * be set to snd_una, the offset will be 0, and the length may
389	 * wind up 0.
390	 *
391	 * If sack_rxmit is true we are retransmitting from the scoreboard
392	 * in which case len is already set.
393	 */
394	if (sack_rxmit == 0) {
395		if (sack_bytes_rxmt == 0) {
396			len = ((int32_t)min(sbavail(&so->so_snd), sendwin) -
397			    off);
398		} else {
399			int32_t cwin;
400
401			/*
402			 * We are inside of a SACK recovery episode and are
403			 * sending new data, having retransmitted all the
404			 * data possible in the scoreboard.
405			 */
406			len = ((int32_t)min(sbavail(&so->so_snd), tp->snd_wnd) -
407			    off);
408			/*
409			 * Don't remove this (len > 0) check !
410			 * We explicitly check for len > 0 here (although it
411			 * isn't really necessary), to work around a gcc
412			 * optimization issue - to force gcc to compute
413			 * len above. Without this check, the computation
414			 * of len is bungled by the optimizer.
415			 */
416			if (len > 0) {
417				cwin = tp->snd_cwnd - imax(0, (int32_t)
418					(tp->snd_nxt - tp->snd_recover)) -
419					sack_bytes_rxmt;
420				if (cwin < 0)
421					cwin = 0;
422				len = imin(len, cwin);
423			}
424		}
425	}
426
427	/*
428	 * Lop off SYN bit if it has already been sent.  However, if this
429	 * is SYN-SENT state and if segment contains data and if we don't
430	 * know that foreign host supports TAO, suppress sending segment.
431	 */
432	if ((flags & TH_SYN) && SEQ_GT(tp->snd_nxt, tp->snd_una)) {
433		if (tp->t_state != TCPS_SYN_RECEIVED)
434			flags &= ~TH_SYN;
435		/*
436		 * When sending additional segments following a TFO SYN|ACK,
437		 * do not include the SYN bit.
438		 */
439		if ((tp->t_flags & TF_FASTOPEN) &&
440		    (tp->t_state == TCPS_SYN_RECEIVED))
441			flags &= ~TH_SYN;
442		off--, len++;
443	}
444
445	/*
446	 * Be careful not to send data and/or FIN on SYN segments.
447	 * This measure is needed to prevent interoperability problems
448	 * with not fully conformant TCP implementations.
449	 */
450	if ((flags & TH_SYN) && (tp->t_flags & TF_NOOPT)) {
451		len = 0;
452		flags &= ~TH_FIN;
453	}
454
455	/*
456	 * On TFO sockets, ensure no data is sent in the following cases:
457	 *
458	 *  - When retransmitting SYN|ACK on a passively-created socket
459	 *
460	 *  - When retransmitting SYN on an actively created socket
461	 *
462	 *  - When sending a zero-length cookie (cookie request) on an
463	 *    actively created socket
464	 *
465	 *  - When the socket is in the CLOSED state (RST is being sent)
466	 */
467	if ((tp->t_flags & TF_FASTOPEN) &&
468	    (((flags & TH_SYN) && (tp->t_rxtshift > 0)) ||
469	     ((tp->t_state == TCPS_SYN_SENT) &&
470	      (tp->t_tfo_client_cookie_len == 0)) ||
471	     (flags & TH_RST)))
472		len = 0;
473
474	/* Without fast-open there should never be data sent on a SYN. */
475	if ((flags & TH_SYN) && !(tp->t_flags & TF_FASTOPEN)) {
476		len = 0;
477	}
478
479	if (len <= 0) {
480		/*
481		 * If FIN has been sent but not acked,
482		 * but we haven't been called to retransmit,
483		 * len will be < 0.  Otherwise, window shrank
484		 * after we sent into it.  If window shrank to 0,
485		 * cancel pending retransmit, pull snd_nxt back
486		 * to (closed) window, and set the persist timer
487		 * if it isn't already going.  If the window didn't
488		 * close completely, just wait for an ACK.
489		 *
490		 * We also do a general check here to ensure that
491		 * we will set the persist timer when we have data
492		 * to send, but a 0-byte window. This makes sure
493		 * the persist timer is set even if the packet
494		 * hits one of the "goto send" lines below.
495		 */
496		len = 0;
497		if ((sendwin == 0) && (TCPS_HAVEESTABLISHED(tp->t_state)) &&
498		    (off < (int) sbavail(&so->so_snd)) &&
499		    !tcp_timer_active(tp, TT_PERSIST)) {
500			tcp_timer_activate(tp, TT_REXMT, 0);
501			tp->t_rxtshift = 0;
502			tp->snd_nxt = tp->snd_una;
503			if (!tcp_timer_active(tp, TT_PERSIST))
504				tcp_setpersist(tp);
505		}
506	}
507
508	/* len will be >= 0 after this point. */
509	KASSERT(len >= 0, ("[%s:%d]: len < 0", __func__, __LINE__));
510
511	tcp_sndbuf_autoscale(tp, so, sendwin);
512
513	/*
514	 * Decide if we can use TCP Segmentation Offloading (if supported by
515	 * hardware).
516	 *
517	 * TSO may only be used if we are in a pure bulk sending state.  The
518	 * presence of TCP-MD5, SACK retransmits, SACK advertizements and
519	 * IP options prevent using TSO.  With TSO the TCP header is the same
520	 * (except for the sequence number) for all generated packets.  This
521	 * makes it impossible to transmit any options which vary per generated
522	 * segment or packet.
523	 *
524	 * IPv4 handling has a clear separation of ip options and ip header
525	 * flags while IPv6 combines both in in6p_outputopts. ip6_optlen() does
526	 * the right thing below to provide length of just ip options and thus
527	 * checking for ipoptlen is enough to decide if ip options are present.
528	 */
529#if defined(IPSEC) || defined(IPSEC_SUPPORT)
530	/*
531	 * Pre-calculate here as we save another lookup into the darknesses
532	 * of IPsec that way and can actually decide if TSO is ok.
533	 */
534#ifdef INET6
535	if (isipv6 && IPSEC_ENABLED(ipv6))
536		ipsec_optlen = IPSEC_HDRSIZE(ipv6, inp);
537#ifdef INET
538	else
539#endif
540#endif /* INET6 */
541#ifdef INET
542	if (IPSEC_ENABLED(ipv4))
543		ipsec_optlen = IPSEC_HDRSIZE(ipv4, inp);
544#endif /* INET */
545#endif /* IPSEC */
546#ifdef INET6
547	if (isipv6)
548		ipoptlen = ip6_optlen(inp);
549	else
550#endif
551	if (inp->inp_options)
552		ipoptlen = inp->inp_options->m_len -
553				offsetof(struct ipoption, ipopt_list);
554	else
555		ipoptlen = 0;
556#if defined(IPSEC) || defined(IPSEC_SUPPORT)
557	ipoptlen += ipsec_optlen;
558#endif
559
560	if ((tp->t_flags & TF_TSO) && V_tcp_do_tso && len > tp->t_maxseg &&
561	    (tp->t_port == 0) &&
562	    ((tp->t_flags & TF_SIGNATURE) == 0) &&
563	    tp->rcv_numsacks == 0 && ((sack_rxmit == 0) || V_tcp_sack_tso) &&
564	    ipoptlen == 0 && !(flags & TH_SYN))
565		tso = 1;
566
567	if (SEQ_LT((sack_rxmit ? p->rxmit : tp->snd_nxt) + len,
568		    tp->snd_una + sbused(&so->so_snd))) {
569			flags &= ~TH_FIN;
570	}
571
572	recwin = lmin(lmax(sbspace(&so->so_rcv), 0),
573	    (long)TCP_MAXWIN << tp->rcv_scale);
574
575	/*
576	 * Sender silly window avoidance.   We transmit under the following
577	 * conditions when len is non-zero:
578	 *
579	 *	- We have a full segment (or more with TSO)
580	 *	- This is the last buffer in a write()/send() and we are
581	 *	  either idle or running NODELAY
582	 *	- we've timed out (e.g. persist timer)
583	 *	- we have more then 1/2 the maximum send window's worth of
584	 *	  data (receiver may be limited the window size)
585	 *	- we need to retransmit
586	 */
587	if (len) {
588		if (len >= tp->t_maxseg)
589			goto send;
590		/*
591		 * As the TCP header options are now
592		 * considered when setting up the initial
593		 * window, we would not send the last segment
594		 * if we skip considering the option length here.
595		 * Note: this may not work when tcp headers change
596		 * very dynamically in the future.
597		 */
598		if ((((tp->t_flags & TF_SIGNATURE) ?
599			PADTCPOLEN(TCPOLEN_SIGNATURE) : 0) +
600		    ((tp->t_flags & TF_RCVD_TSTMP) ?
601			PADTCPOLEN(TCPOLEN_TIMESTAMP) : 0) +
602		    len) >= tp->t_maxseg)
603			goto send;
604		/*
605		 * NOTE! on localhost connections an 'ack' from the remote
606		 * end may occur synchronously with the output and cause
607		 * us to flush a buffer queued with moretocome.  XXX
608		 *
609		 * note: the len + off check is almost certainly unnecessary.
610		 */
611		if (!(tp->t_flags & TF_MORETOCOME) &&	/* normal case */
612		    (idle || (tp->t_flags & TF_NODELAY)) &&
613		    (uint32_t)len + (uint32_t)off >= sbavail(&so->so_snd) &&
614		    (tp->t_flags & TF_NOPUSH) == 0) {
615			goto send;
616		}
617		if (tp->t_flags & TF_FORCEDATA)		/* typ. timeout case */
618			goto send;
619		if (len >= tp->max_sndwnd / 2 && tp->max_sndwnd > 0)
620			goto send;
621		if (SEQ_LT(tp->snd_nxt, tp->snd_max))	/* retransmit case */
622			goto send;
623		if (sack_rxmit)
624			goto send;
625	}
626
627	/*
628	 * Sending of standalone window updates.
629	 *
630	 * Window updates are important when we close our window due to a
631	 * full socket buffer and are opening it again after the application
632	 * reads data from it.  Once the window has opened again and the
633	 * remote end starts to send again the ACK clock takes over and
634	 * provides the most current window information.
635	 *
636	 * We must avoid the silly window syndrome whereas every read
637	 * from the receive buffer, no matter how small, causes a window
638	 * update to be sent.  We also should avoid sending a flurry of
639	 * window updates when the socket buffer had queued a lot of data
640	 * and the application is doing small reads.
641	 *
642	 * Prevent a flurry of pointless window updates by only sending
643	 * an update when we can increase the advertized window by more
644	 * than 1/4th of the socket buffer capacity.  When the buffer is
645	 * getting full or is very small be more aggressive and send an
646	 * update whenever we can increase by two mss sized segments.
647	 * In all other situations the ACK's to new incoming data will
648	 * carry further window increases.
649	 *
650	 * Don't send an independent window update if a delayed
651	 * ACK is pending (it will get piggy-backed on it) or the
652	 * remote side already has done a half-close and won't send
653	 * more data.
654	 */
655	if (recwin > 0 && !(tp->t_flags & TF_NEEDSYN) &&
656	    !(tp->t_flags & TF_DELACK) &&
657	    !TCPS_HAVERCVDFIN(tp->t_state)) {
658		/*
659		 * "adv" is the amount we could increase the window,
660		 * taking into account that we are limited by
661		 * TCP_MAXWIN << tp->rcv_scale.
662		 */
663		int32_t adv;
664		int oldwin;
665
666		adv = recwin;
667		if (SEQ_GT(tp->rcv_adv, tp->rcv_nxt)) {
668			oldwin = (tp->rcv_adv - tp->rcv_nxt);
669			if (adv > oldwin)
670				adv -= oldwin;
671			else
672				adv = 0;
673		} else
674			oldwin = 0;
675
676		/*
677		 * If the new window size ends up being the same as or less
678		 * than the old size when it is scaled, then don't force
679		 * a window update.
680		 */
681		if (oldwin >> tp->rcv_scale >= (adv + oldwin) >> tp->rcv_scale)
682			goto dontupdate;
683
684		if (adv >= (int32_t)(2 * tp->t_maxseg) &&
685		    (adv >= (int32_t)(so->so_rcv.sb_hiwat / 4) ||
686		     recwin <= (so->so_rcv.sb_hiwat / 8) ||
687		     so->so_rcv.sb_hiwat <= 8 * tp->t_maxseg ||
688		     adv >= TCP_MAXWIN << tp->rcv_scale))
689			goto send;
690		if (2 * adv >= (int32_t)so->so_rcv.sb_hiwat)
691			goto send;
692	}
693dontupdate:
694
695	/*
696	 * Send if we owe the peer an ACK, RST, SYN, or urgent data.  ACKNOW
697	 * is also a catch-all for the retransmit timer timeout case.
698	 */
699	if (tp->t_flags & TF_ACKNOW)
700		goto send;
701	if ((flags & TH_RST) ||
702	    ((flags & TH_SYN) && (tp->t_flags & TF_NEEDSYN) == 0))
703		goto send;
704	if (SEQ_GT(tp->snd_up, tp->snd_una))
705		goto send;
706	/*
707	 * If our state indicates that FIN should be sent
708	 * and we have not yet done so, then we need to send.
709	 */
710	if (flags & TH_FIN &&
711	    ((tp->t_flags & TF_SENTFIN) == 0 || tp->snd_nxt == tp->snd_una))
712		goto send;
713	/*
714	 * In SACK, it is possible for tcp_output to fail to send a segment
715	 * after the retransmission timer has been turned off.  Make sure
716	 * that the retransmission timer is set.
717	 */
718	if ((tp->t_flags & TF_SACK_PERMIT) &&
719	    SEQ_GT(tp->snd_max, tp->snd_una) &&
720	    !tcp_timer_active(tp, TT_REXMT) &&
721	    !tcp_timer_active(tp, TT_PERSIST)) {
722		tcp_timer_activate(tp, TT_REXMT, TP_RXTCUR(tp));
723		goto just_return;
724	}
725	/*
726	 * TCP window updates are not reliable, rather a polling protocol
727	 * using ``persist'' packets is used to insure receipt of window
728	 * updates.  The three ``states'' for the output side are:
729	 *	idle			not doing retransmits or persists
730	 *	persisting		to move a small or zero window
731	 *	(re)transmitting	and thereby not persisting
732	 *
733	 * tcp_timer_active(tp, TT_PERSIST)
734	 *	is true when we are in persist state.
735	 * (tp->t_flags & TF_FORCEDATA)
736	 *	is set when we are called to send a persist packet.
737	 * tcp_timer_active(tp, TT_REXMT)
738	 *	is set when we are retransmitting
739	 * The output side is idle when both timers are zero.
740	 *
741	 * If send window is too small, there is data to transmit, and no
742	 * retransmit or persist is pending, then go to persist state.
743	 * If nothing happens soon, send when timer expires:
744	 * if window is nonzero, transmit what we can,
745	 * otherwise force out a byte.
746	 */
747	if (sbavail(&so->so_snd) && !tcp_timer_active(tp, TT_REXMT) &&
748	    !tcp_timer_active(tp, TT_PERSIST)) {
749		tp->t_rxtshift = 0;
750		tcp_setpersist(tp);
751	}
752
753	/*
754	 * No reason to send a segment, just return.
755	 */
756just_return:
757	SOCKBUF_UNLOCK(&so->so_snd);
758	return (0);
759
760send:
761	SOCKBUF_LOCK_ASSERT(&so->so_snd);
762	if (len > 0) {
763		if (len >= tp->t_maxseg)
764			tp->t_flags2 |= TF2_PLPMTU_MAXSEGSNT;
765		else
766			tp->t_flags2 &= ~TF2_PLPMTU_MAXSEGSNT;
767	}
768	/*
769	 * Before ESTABLISHED, force sending of initial options
770	 * unless TCP set not to do any options.
771	 * NOTE: we assume that the IP/TCP header plus TCP options
772	 * always fit in a single mbuf, leaving room for a maximum
773	 * link header, i.e.
774	 *	max_linkhdr + sizeof (struct tcpiphdr) + optlen <= MCLBYTES
775	 */
776	optlen = 0;
777#ifdef INET6
778	if (isipv6)
779		hdrlen = sizeof (struct ip6_hdr) + sizeof (struct tcphdr);
780	else
781#endif
782		hdrlen = sizeof (struct tcpiphdr);
783
784	if (flags & TH_SYN) {
785		tp->snd_nxt = tp->iss;
786	}
787
788	/*
789	 * Compute options for segment.
790	 * We only have to care about SYN and established connection
791	 * segments.  Options for SYN-ACK segments are handled in TCP
792	 * syncache.
793	 */
794	to.to_flags = 0;
795	if ((tp->t_flags & TF_NOOPT) == 0) {
796		/* Maximum segment size. */
797		if (flags & TH_SYN) {
798			to.to_mss = tcp_mssopt(&inp->inp_inc);
799			if (tp->t_port)
800				to.to_mss -= V_tcp_udp_tunneling_overhead;
801			to.to_flags |= TOF_MSS;
802
803			/*
804			 * On SYN or SYN|ACK transmits on TFO connections,
805			 * only include the TFO option if it is not a
806			 * retransmit, as the presence of the TFO option may
807			 * have caused the original SYN or SYN|ACK to have
808			 * been dropped by a middlebox.
809			 */
810			if ((tp->t_flags & TF_FASTOPEN) &&
811			    (tp->t_rxtshift == 0)) {
812				if (tp->t_state == TCPS_SYN_RECEIVED) {
813					to.to_tfo_len = TCP_FASTOPEN_COOKIE_LEN;
814					to.to_tfo_cookie =
815					    (u_int8_t *)&tp->t_tfo_cookie.server;
816					to.to_flags |= TOF_FASTOPEN;
817					wanted_cookie = 1;
818				} else if (tp->t_state == TCPS_SYN_SENT) {
819					to.to_tfo_len =
820					    tp->t_tfo_client_cookie_len;
821					to.to_tfo_cookie =
822					    tp->t_tfo_cookie.client;
823					to.to_flags |= TOF_FASTOPEN;
824					wanted_cookie = 1;
825					/*
826					 * If we wind up having more data to
827					 * send with the SYN than can fit in
828					 * one segment, don't send any more
829					 * until the SYN|ACK comes back from
830					 * the other end.
831					 */
832					dont_sendalot = 1;
833				}
834			}
835		}
836		/* Window scaling. */
837		if ((flags & TH_SYN) && (tp->t_flags & TF_REQ_SCALE)) {
838			to.to_wscale = tp->request_r_scale;
839			to.to_flags |= TOF_SCALE;
840		}
841		/* Timestamps. */
842		if ((tp->t_flags & TF_RCVD_TSTMP) ||
843		    ((flags & TH_SYN) && (tp->t_flags & TF_REQ_TSTMP))) {
844			curticks = tcp_ts_getticks();
845			to.to_tsval = curticks + tp->ts_offset;
846			to.to_tsecr = tp->ts_recent;
847			to.to_flags |= TOF_TS;
848			if (tp->t_rxtshift == 1)
849				tp->t_badrxtwin = curticks;
850		}
851
852		/* Set receive buffer autosizing timestamp. */
853		if (tp->rfbuf_ts == 0 &&
854		    (so->so_rcv.sb_flags & SB_AUTOSIZE))
855			tp->rfbuf_ts = tcp_ts_getticks();
856
857		/* Selective ACK's. */
858		if (tp->t_flags & TF_SACK_PERMIT) {
859			if (flags & TH_SYN)
860				to.to_flags |= TOF_SACKPERM;
861			else if (TCPS_HAVEESTABLISHED(tp->t_state) &&
862			    tp->rcv_numsacks > 0) {
863				to.to_flags |= TOF_SACK;
864				to.to_nsacks = tp->rcv_numsacks;
865				to.to_sacks = (u_char *)tp->sackblks;
866			}
867		}
868#if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE)
869		/* TCP-MD5 (RFC2385). */
870		/*
871		 * Check that TCP_MD5SIG is enabled in tcpcb to
872		 * account the size needed to set this TCP option.
873		 */
874		if (tp->t_flags & TF_SIGNATURE)
875			to.to_flags |= TOF_SIGNATURE;
876#endif /* TCP_SIGNATURE */
877
878		/* Processing the options. */
879		hdrlen += optlen = tcp_addoptions(&to, opt);
880		/*
881		 * If we wanted a TFO option to be added, but it was unable
882		 * to fit, ensure no data is sent.
883		 */
884		if ((tp->t_flags & TF_FASTOPEN) && wanted_cookie &&
885		    !(to.to_flags & TOF_FASTOPEN))
886			len = 0;
887	}
888	if (tp->t_port) {
889		if (V_tcp_udp_tunneling_port == 0) {
890			/* The port was removed?? */
891			SOCKBUF_UNLOCK(&so->so_snd);
892			return (EHOSTUNREACH);
893		}
894		hdrlen += sizeof(struct udphdr);
895	}
896	/*
897	 * Adjust data length if insertion of options will
898	 * bump the packet length beyond the t_maxseg length.
899	 * Clear the FIN bit because we cut off the tail of
900	 * the segment.
901	 */
902	if (len + optlen + ipoptlen > tp->t_maxseg) {
903		flags &= ~TH_FIN;
904
905		if (tso) {
906			u_int if_hw_tsomax;
907			u_int moff;
908			int max_len;
909
910			/* extract TSO information */
911			if_hw_tsomax = tp->t_tsomax;
912			if_hw_tsomaxsegcount = tp->t_tsomaxsegcount;
913			if_hw_tsomaxsegsize = tp->t_tsomaxsegsize;
914
915			/*
916			 * Limit a TSO burst to prevent it from
917			 * overflowing or exceeding the maximum length
918			 * allowed by the network interface:
919			 */
920			KASSERT(ipoptlen == 0,
921			    ("%s: TSO can't do IP options", __func__));
922
923			/*
924			 * Check if we should limit by maximum payload
925			 * length:
926			 */
927			if (if_hw_tsomax != 0) {
928				/* compute maximum TSO length */
929				max_len = (if_hw_tsomax - hdrlen -
930				    max_linkhdr);
931				if (max_len <= 0) {
932					len = 0;
933				} else if (len > max_len) {
934					sendalot = 1;
935					len = max_len;
936				}
937			}
938
939			/*
940			 * Prevent the last segment from being
941			 * fractional unless the send sockbuf can be
942			 * emptied:
943			 */
944			max_len = (tp->t_maxseg - optlen);
945			if (((uint32_t)off + (uint32_t)len) <
946			    sbavail(&so->so_snd)) {
947				moff = len % max_len;
948				if (moff != 0) {
949					len -= moff;
950					sendalot = 1;
951				}
952			}
953
954			/*
955			 * In case there are too many small fragments
956			 * don't use TSO:
957			 */
958			if (len <= max_len) {
959				len = max_len;
960				sendalot = 1;
961				tso = 0;
962			}
963
964			/*
965			 * Send the FIN in a separate segment
966			 * after the bulk sending is done.
967			 * We don't trust the TSO implementations
968			 * to clear the FIN flag on all but the
969			 * last segment.
970			 */
971			if (tp->t_flags & TF_NEEDFIN)
972				sendalot = 1;
973		} else {
974			if (optlen + ipoptlen >= tp->t_maxseg) {
975				/*
976				 * Since we don't have enough space to put
977				 * the IP header chain and the TCP header in
978				 * one packet as required by RFC 7112, don't
979				 * send it. Also ensure that at least one
980				 * byte of the payload can be put into the
981				 * TCP segment.
982				 */
983				SOCKBUF_UNLOCK(&so->so_snd);
984				error = EMSGSIZE;
985				sack_rxmit = 0;
986				goto out;
987			}
988			len = tp->t_maxseg - optlen - ipoptlen;
989			sendalot = 1;
990			if (dont_sendalot)
991				sendalot = 0;
992		}
993	} else
994		tso = 0;
995
996	KASSERT(len + hdrlen + ipoptlen <= IP_MAXPACKET,
997	    ("%s: len > IP_MAXPACKET", __func__));
998
999/*#ifdef DIAGNOSTIC*/
1000#ifdef INET6
1001	if (max_linkhdr + hdrlen > MCLBYTES)
1002#else
1003	if (max_linkhdr + hdrlen > MHLEN)
1004#endif
1005		panic("tcphdr too big");
1006/*#endif*/
1007
1008	/*
1009	 * This KASSERT is here to catch edge cases at a well defined place.
1010	 * Before, those had triggered (random) panic conditions further down.
1011	 */
1012	KASSERT(len >= 0, ("[%s:%d]: len < 0", __func__, __LINE__));
1013
1014	/*
1015	 * Grab a header mbuf, attaching a copy of data to
1016	 * be transmitted, and initialize the header from
1017	 * the template for sends on this connection.
1018	 */
1019	if (len) {
1020		struct mbuf *mb;
1021		struct sockbuf *msb;
1022		u_int moff;
1023
1024		if ((tp->t_flags & TF_FORCEDATA) && len == 1) {
1025			TCPSTAT_INC(tcps_sndprobe);
1026#ifdef STATS
1027			if (SEQ_LT(tp->snd_nxt, tp->snd_max))
1028				stats_voi_update_abs_u32(tp->t_stats,
1029				VOI_TCP_RETXPB, len);
1030			else
1031				stats_voi_update_abs_u64(tp->t_stats,
1032				    VOI_TCP_TXPB, len);
1033#endif /* STATS */
1034		} else if (SEQ_LT(tp->snd_nxt, tp->snd_max) || sack_rxmit) {
1035			tp->t_sndrexmitpack++;
1036			TCPSTAT_INC(tcps_sndrexmitpack);
1037			TCPSTAT_ADD(tcps_sndrexmitbyte, len);
1038			if (sack_rxmit) {
1039				TCPSTAT_INC(tcps_sack_rexmits);
1040				if (tso) {
1041					TCPSTAT_INC(tcps_sack_rexmits_tso);
1042				}
1043				TCPSTAT_ADD(tcps_sack_rexmit_bytes, len);
1044			}
1045#ifdef STATS
1046			stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RETXPB,
1047			    len);
1048#endif /* STATS */
1049		} else {
1050			TCPSTAT_INC(tcps_sndpack);
1051			TCPSTAT_ADD(tcps_sndbyte, len);
1052#ifdef STATS
1053			stats_voi_update_abs_u64(tp->t_stats, VOI_TCP_TXPB,
1054			    len);
1055#endif /* STATS */
1056		}
1057#ifdef INET6
1058		if (MHLEN < hdrlen + max_linkhdr)
1059			m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
1060		else
1061#endif
1062			m = m_gethdr(M_NOWAIT, MT_DATA);
1063
1064		if (m == NULL) {
1065			SOCKBUF_UNLOCK(&so->so_snd);
1066			error = ENOBUFS;
1067			sack_rxmit = 0;
1068			goto out;
1069		}
1070
1071		m->m_data += max_linkhdr;
1072		m->m_len = hdrlen;
1073
1074		/*
1075		 * Start the m_copy functions from the closest mbuf
1076		 * to the offset in the socket buffer chain.
1077		 */
1078		mb = sbsndptr_noadv(&so->so_snd, off, &moff);
1079		if (len <= MHLEN - hdrlen - max_linkhdr && !hw_tls) {
1080			m_copydata(mb, moff, len,
1081			    mtod(m, caddr_t) + hdrlen);
1082			if (SEQ_LT(tp->snd_nxt, tp->snd_max))
1083				sbsndptr_adv(&so->so_snd, mb, len);
1084			m->m_len += len;
1085		} else {
1086			if (SEQ_LT(tp->snd_nxt, tp->snd_max))
1087				msb = NULL;
1088			else
1089				msb = &so->so_snd;
1090			m->m_next = tcp_m_copym(mb, moff,
1091			    &len, if_hw_tsomaxsegcount,
1092			    if_hw_tsomaxsegsize, msb, hw_tls);
1093			if (len <= (tp->t_maxseg - optlen)) {
1094				/*
1095				 * Must have ran out of mbufs for the copy
1096				 * shorten it to no longer need tso. Lets
1097				 * not put on sendalot since we are low on
1098				 * mbufs.
1099				 */
1100				tso = 0;
1101			}
1102			if (m->m_next == NULL) {
1103				SOCKBUF_UNLOCK(&so->so_snd);
1104				(void) m_free(m);
1105				error = ENOBUFS;
1106				sack_rxmit = 0;
1107				goto out;
1108			}
1109		}
1110
1111		/*
1112		 * If we're sending everything we've got, set PUSH.
1113		 * (This will keep happy those implementations which only
1114		 * give data to the user when a buffer fills or
1115		 * a PUSH comes in.)
1116		 */
1117		if (((uint32_t)off + (uint32_t)len == sbused(&so->so_snd)) &&
1118		    !(flags & TH_SYN))
1119			flags |= TH_PUSH;
1120		SOCKBUF_UNLOCK(&so->so_snd);
1121	} else {
1122		SOCKBUF_UNLOCK(&so->so_snd);
1123		if (tp->t_flags & TF_ACKNOW)
1124			TCPSTAT_INC(tcps_sndacks);
1125		else if (flags & (TH_SYN|TH_FIN|TH_RST))
1126			TCPSTAT_INC(tcps_sndctrl);
1127		else if (SEQ_GT(tp->snd_up, tp->snd_una))
1128			TCPSTAT_INC(tcps_sndurg);
1129		else
1130			TCPSTAT_INC(tcps_sndwinup);
1131
1132		m = m_gethdr(M_NOWAIT, MT_DATA);
1133		if (m == NULL) {
1134			error = ENOBUFS;
1135			sack_rxmit = 0;
1136			goto out;
1137		}
1138#ifdef INET6
1139		if (isipv6 && (MHLEN < hdrlen + max_linkhdr) &&
1140		    MHLEN >= hdrlen) {
1141			M_ALIGN(m, hdrlen);
1142		} else
1143#endif
1144		m->m_data += max_linkhdr;
1145		m->m_len = hdrlen;
1146	}
1147	SOCKBUF_UNLOCK_ASSERT(&so->so_snd);
1148	m->m_pkthdr.rcvif = (struct ifnet *)0;
1149#ifdef MAC
1150	mac_inpcb_create_mbuf(inp, m);
1151#endif
1152#ifdef INET6
1153	if (isipv6) {
1154		ip6 = mtod(m, struct ip6_hdr *);
1155		if (tp->t_port) {
1156			udp = (struct udphdr *)((caddr_t)ip6 + sizeof(struct ip6_hdr));
1157			udp->uh_sport = htons(V_tcp_udp_tunneling_port);
1158			udp->uh_dport = tp->t_port;
1159			ulen = hdrlen + len - sizeof(struct ip6_hdr);
1160			udp->uh_ulen = htons(ulen);
1161			th = (struct tcphdr *)(udp + 1);
1162		} else {
1163			th = (struct tcphdr *)(ip6 + 1);
1164		}
1165		tcpip_fillheaders(inp, tp->t_port, ip6, th);
1166	} else
1167#endif /* INET6 */
1168	{
1169		ip = mtod(m, struct ip *);
1170		if (tp->t_port) {
1171			udp = (struct udphdr *)((caddr_t)ip + sizeof(struct ip));
1172			udp->uh_sport = htons(V_tcp_udp_tunneling_port);
1173			udp->uh_dport = tp->t_port;
1174			ulen = hdrlen + len - sizeof(struct ip);
1175			udp->uh_ulen = htons(ulen);
1176			th = (struct tcphdr *)(udp + 1);
1177		} else
1178			th = (struct tcphdr *)(ip + 1);
1179		tcpip_fillheaders(inp, tp->t_port, ip, th);
1180	}
1181
1182	/*
1183	 * Fill in fields, remembering maximum advertised
1184	 * window for use in delaying messages about window sizes.
1185	 * If resending a FIN, be sure not to use a new sequence number.
1186	 */
1187	if (flags & TH_FIN && tp->t_flags & TF_SENTFIN &&
1188	    tp->snd_nxt == tp->snd_max)
1189		tp->snd_nxt--;
1190	/*
1191	 * If we are starting a connection, send ECN setup
1192	 * SYN packet. If we are on a retransmit, we may
1193	 * resend those bits a number of times as per
1194	 * RFC 3168.
1195	 */
1196	if (tp->t_state == TCPS_SYN_SENT && V_tcp_do_ecn) {
1197		flags |= tcp_ecn_output_syn_sent(tp);
1198	}
1199	/* Also handle parallel SYN for ECN */
1200	if ((TCPS_HAVERCVDSYN(tp->t_state)) &&
1201	    (tp->t_flags2 & (TF2_ECN_PERMIT | TF2_ACE_PERMIT))) {
1202		int ect = tcp_ecn_output_established(tp, &flags, len, sack_rxmit);
1203		if ((tp->t_state == TCPS_SYN_RECEIVED) &&
1204		    (tp->t_flags2 & TF2_ECN_SND_ECE))
1205			tp->t_flags2 &= ~TF2_ECN_SND_ECE;
1206#ifdef INET6
1207		if (isipv6) {
1208			ip6->ip6_flow &= ~htonl(IPTOS_ECN_MASK << IPV6_FLOWLABEL_LEN);
1209			ip6->ip6_flow |= htonl(ect << IPV6_FLOWLABEL_LEN);
1210		}
1211		else
1212#endif
1213		{
1214			ip->ip_tos &= ~IPTOS_ECN_MASK;
1215			ip->ip_tos |= ect;
1216		}
1217	}
1218
1219	/*
1220	 * If we are doing retransmissions, then snd_nxt will
1221	 * not reflect the first unsent octet.  For ACK only
1222	 * packets, we do not want the sequence number of the
1223	 * retransmitted packet, we want the sequence number
1224	 * of the next unsent octet.  So, if there is no data
1225	 * (and no SYN or FIN), use snd_max instead of snd_nxt
1226	 * when filling in ti_seq.  But if we are in persist
1227	 * state, snd_max might reflect one byte beyond the
1228	 * right edge of the window, so use snd_nxt in that
1229	 * case, since we know we aren't doing a retransmission.
1230	 * (retransmit and persist are mutually exclusive...)
1231	 */
1232	if (sack_rxmit == 0) {
1233		if (len || (flags & (TH_SYN|TH_FIN)) ||
1234		    tcp_timer_active(tp, TT_PERSIST))
1235			th->th_seq = htonl(tp->snd_nxt);
1236		else
1237			th->th_seq = htonl(tp->snd_max);
1238	} else {
1239		th->th_seq = htonl(p->rxmit);
1240		p->rxmit += len;
1241		/*
1242		 * Lost Retransmission Detection
1243		 * trigger resending of a (then
1244		 * still existing) hole, when
1245		 * fack acks recoverypoint.
1246		 */
1247		if ((tp->t_flags & TF_LRD) && SEQ_GEQ(p->rxmit, p->end))
1248			p->rxmit = tp->snd_recover;
1249		tp->sackhint.sack_bytes_rexmit += len;
1250	}
1251	if (IN_RECOVERY(tp->t_flags)) {
1252		/*
1253		 * Account all bytes transmitted while
1254		 * IN_RECOVERY, simplifying PRR and
1255		 * Lost Retransmit Detection
1256		 */
1257		tp->sackhint.prr_out += len;
1258	}
1259	th->th_ack = htonl(tp->rcv_nxt);
1260	if (optlen) {
1261		bcopy(opt, th + 1, optlen);
1262		th->th_off = (sizeof (struct tcphdr) + optlen) >> 2;
1263	}
1264	tcp_set_flags(th, flags);
1265	/*
1266	 * Calculate receive window.  Don't shrink window,
1267	 * but avoid silly window syndrome.
1268	 * If a RST segment is sent, advertise a window of zero.
1269	 */
1270	if (flags & TH_RST) {
1271		recwin = 0;
1272	} else {
1273		if (recwin < (so->so_rcv.sb_hiwat / 4) &&
1274		    recwin < tp->t_maxseg)
1275			recwin = 0;
1276		if (SEQ_GT(tp->rcv_adv, tp->rcv_nxt) &&
1277		    recwin < (tp->rcv_adv - tp->rcv_nxt))
1278			recwin = (tp->rcv_adv - tp->rcv_nxt);
1279	}
1280	/*
1281	 * According to RFC1323 the window field in a SYN (i.e., a <SYN>
1282	 * or <SYN,ACK>) segment itself is never scaled.  The <SYN,ACK>
1283	 * case is handled in syncache.
1284	 */
1285	if (flags & TH_SYN)
1286		th->th_win = htons((u_short)
1287				(min(sbspace(&so->so_rcv), TCP_MAXWIN)));
1288	else {
1289		/* Avoid shrinking window with window scaling. */
1290		recwin = roundup2(recwin, 1 << tp->rcv_scale);
1291		th->th_win = htons((u_short)(recwin >> tp->rcv_scale));
1292	}
1293
1294	/*
1295	 * Adjust the RXWIN0SENT flag - indicate that we have advertised
1296	 * a 0 window.  This may cause the remote transmitter to stall.  This
1297	 * flag tells soreceive() to disable delayed acknowledgements when
1298	 * draining the buffer.  This can occur if the receiver is attempting
1299	 * to read more data than can be buffered prior to transmitting on
1300	 * the connection.
1301	 */
1302	if (th->th_win == 0) {
1303		tp->t_sndzerowin++;
1304		tp->t_flags |= TF_RXWIN0SENT;
1305	} else
1306		tp->t_flags &= ~TF_RXWIN0SENT;
1307	if (SEQ_GT(tp->snd_up, tp->snd_nxt)) {
1308		th->th_urp = htons((u_short)(tp->snd_up - tp->snd_nxt));
1309		th->th_flags |= TH_URG;
1310	} else
1311		/*
1312		 * If no urgent pointer to send, then we pull
1313		 * the urgent pointer to the left edge of the send window
1314		 * so that it doesn't drift into the send window on sequence
1315		 * number wraparound.
1316		 */
1317		tp->snd_up = tp->snd_una;		/* drag it along */
1318
1319	/*
1320	 * Put TCP length in extended header, and then
1321	 * checksum extended header and data.
1322	 */
1323	m->m_pkthdr.len = hdrlen + len; /* in6_cksum() need this */
1324
1325#if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE)
1326	if (to.to_flags & TOF_SIGNATURE) {
1327		/*
1328		 * Calculate MD5 signature and put it into the place
1329		 * determined before.
1330		 * NOTE: since TCP options buffer doesn't point into
1331		 * mbuf's data, calculate offset and use it.
1332		 */
1333		if (!TCPMD5_ENABLED() || (error = TCPMD5_OUTPUT(m, th,
1334		    (u_char *)(th + 1) + (to.to_signature - opt))) != 0) {
1335			/*
1336			 * Do not send segment if the calculation of MD5
1337			 * digest has failed.
1338			 */
1339			m_freem(m);
1340			goto out;
1341		}
1342	}
1343#endif
1344#ifdef INET6
1345	if (isipv6) {
1346		/*
1347		 * There is no need to fill in ip6_plen right now.
1348		 * It will be filled later by ip6_output.
1349		 */
1350		if (tp->t_port) {
1351			m->m_pkthdr.csum_flags = CSUM_UDP_IPV6;
1352			m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum);
1353			udp->uh_sum = in6_cksum_pseudo(ip6, ulen, IPPROTO_UDP, 0);
1354			th->th_sum = htons(0);
1355			UDPSTAT_INC(udps_opackets);
1356		} else {
1357			m->m_pkthdr.csum_flags = CSUM_TCP_IPV6;
1358			m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum);
1359			th->th_sum = in6_cksum_pseudo(ip6,
1360			    sizeof(struct tcphdr) + optlen + len, IPPROTO_TCP,
1361			    0);
1362		}
1363	}
1364#endif
1365#if defined(INET6) && defined(INET)
1366	else
1367#endif
1368#ifdef INET
1369	{
1370		if (tp->t_port) {
1371			m->m_pkthdr.csum_flags = CSUM_UDP;
1372			m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum);
1373			udp->uh_sum = in_pseudo(ip->ip_src.s_addr,
1374			   ip->ip_dst.s_addr, htons(ulen + IPPROTO_UDP));
1375			th->th_sum = htons(0);
1376			UDPSTAT_INC(udps_opackets);
1377		} else {
1378			m->m_pkthdr.csum_flags = CSUM_TCP;
1379			m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum);
1380			th->th_sum = in_pseudo(ip->ip_src.s_addr,
1381			    ip->ip_dst.s_addr, htons(sizeof(struct tcphdr) +
1382			    IPPROTO_TCP + len + optlen));
1383		}
1384
1385		/* IP version must be set here for ipv4/ipv6 checking later */
1386		KASSERT(ip->ip_v == IPVERSION,
1387		    ("%s: IP version incorrect: %d", __func__, ip->ip_v));
1388	}
1389#endif
1390
1391	/*
1392	 * Enable TSO and specify the size of the segments.
1393	 * The TCP pseudo header checksum is always provided.
1394	 */
1395	if (tso) {
1396		KASSERT(len > tp->t_maxseg - optlen,
1397		    ("%s: len <= tso_segsz", __func__));
1398		m->m_pkthdr.csum_flags |= CSUM_TSO;
1399		m->m_pkthdr.tso_segsz = tp->t_maxseg - optlen;
1400	}
1401
1402	KASSERT(len + hdrlen == m_length(m, NULL),
1403	    ("%s: mbuf chain shorter than expected: %d + %u != %u",
1404	    __func__, len, hdrlen, m_length(m, NULL)));
1405
1406#ifdef TCP_HHOOK
1407	/* Run HHOOK_TCP_ESTABLISHED_OUT helper hooks. */
1408	hhook_run_tcp_est_out(tp, th, &to, len, tso);
1409#endif
1410
1411	TCP_PROBE3(debug__output, tp, th, m);
1412
1413	/* We're getting ready to send; log now. */
1414	/* XXXMT: We are not honoring verbose logging. */
1415
1416	if (tcp_bblogging_on(tp))
1417		lgb = tcp_log_event(tp, th, &so->so_rcv, &so->so_snd,
1418		    TCP_LOG_OUT, ERRNO_UNK, len, NULL, false, NULL, NULL, 0,
1419		    NULL);
1420	else
1421		lgb = NULL;
1422
1423	/*
1424	 * Fill in IP length and desired time to live and
1425	 * send to IP level.  There should be a better way
1426	 * to handle ttl and tos; we could keep them in
1427	 * the template, but need a way to checksum without them.
1428	 */
1429	/*
1430	 * m->m_pkthdr.len should have been set before checksum calculation,
1431	 * because in6_cksum() need it.
1432	 */
1433#ifdef INET6
1434	if (isipv6) {
1435		/*
1436		 * we separately set hoplimit for every segment, since the
1437		 * user might want to change the value via setsockopt.
1438		 * Also, desired default hop limit might be changed via
1439		 * Neighbor Discovery.
1440		 */
1441		ip6->ip6_hlim = in6_selecthlim(inp, NULL);
1442
1443		/*
1444		 * Set the packet size here for the benefit of DTrace probes.
1445		 * ip6_output() will set it properly; it's supposed to include
1446		 * the option header lengths as well.
1447		 */
1448		ip6->ip6_plen = htons(m->m_pkthdr.len - sizeof(*ip6));
1449
1450		if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss)
1451			tp->t_flags2 |= TF2_PLPMTU_PMTUD;
1452		else
1453			tp->t_flags2 &= ~TF2_PLPMTU_PMTUD;
1454
1455		if (tp->t_state == TCPS_SYN_SENT)
1456			TCP_PROBE5(connect__request, NULL, tp, ip6, tp, th);
1457
1458		TCP_PROBE5(send, NULL, tp, ip6, tp, th);
1459
1460#ifdef TCPPCAP
1461		/* Save packet, if requested. */
1462		tcp_pcap_add(th, m, &(tp->t_outpkts));
1463#endif
1464
1465		/* TODO: IPv6 IP6TOS_ECT bit on */
1466		error = ip6_output(m, inp->in6p_outputopts, &inp->inp_route6,
1467		    ((so->so_options & SO_DONTROUTE) ?  IP_ROUTETOIF : 0),
1468		    NULL, NULL, inp);
1469
1470		if (error == EMSGSIZE && inp->inp_route6.ro_nh != NULL)
1471			mtu = inp->inp_route6.ro_nh->nh_mtu;
1472	}
1473#endif /* INET6 */
1474#if defined(INET) && defined(INET6)
1475	else
1476#endif
1477#ifdef INET
1478    {
1479	ip->ip_len = htons(m->m_pkthdr.len);
1480#ifdef INET6
1481	if (inp->inp_vflag & INP_IPV6PROTO)
1482		ip->ip_ttl = in6_selecthlim(inp, NULL);
1483#endif /* INET6 */
1484	/*
1485	 * If we do path MTU discovery, then we set DF on every packet.
1486	 * This might not be the best thing to do according to RFC3390
1487	 * Section 2. However the tcp hostcache migitates the problem
1488	 * so it affects only the first tcp connection with a host.
1489	 *
1490	 * NB: Don't set DF on small MTU/MSS to have a safe fallback.
1491	 */
1492	if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss) {
1493		tp->t_flags2 |= TF2_PLPMTU_PMTUD;
1494		if (tp->t_port == 0 || len < V_tcp_minmss) {
1495			ip->ip_off |= htons(IP_DF);
1496		}
1497	} else {
1498		tp->t_flags2 &= ~TF2_PLPMTU_PMTUD;
1499	}
1500
1501	if (tp->t_state == TCPS_SYN_SENT)
1502		TCP_PROBE5(connect__request, NULL, tp, ip, tp, th);
1503
1504	TCP_PROBE5(send, NULL, tp, ip, tp, th);
1505
1506#ifdef TCPPCAP
1507	/* Save packet, if requested. */
1508	tcp_pcap_add(th, m, &(tp->t_outpkts));
1509#endif
1510
1511	error = ip_output(m, inp->inp_options, &inp->inp_route,
1512	    ((so->so_options & SO_DONTROUTE) ? IP_ROUTETOIF : 0), 0, inp);
1513
1514	if (error == EMSGSIZE && inp->inp_route.ro_nh != NULL)
1515		mtu = inp->inp_route.ro_nh->nh_mtu;
1516    }
1517#endif /* INET */
1518
1519	if (lgb != NULL) {
1520		lgb->tlb_errno = error;
1521		lgb = NULL;
1522	}
1523out:
1524	if (error == 0)
1525		tcp_account_for_send(tp, len, (tp->snd_nxt != tp->snd_max), 0, hw_tls);
1526	/*
1527	 * In transmit state, time the transmission and arrange for
1528	 * the retransmit.  In persist state, just set snd_max.  In a closed
1529	 * state just return.
1530	 */
1531	if (flags & TH_RST) {
1532		TCPSTAT_INC(tcps_sndtotal);
1533		return (0);
1534	} else if ((tp->t_flags & TF_FORCEDATA) == 0 ||
1535	    !tcp_timer_active(tp, TT_PERSIST)) {
1536		tcp_seq startseq = tp->snd_nxt;
1537
1538		/*
1539		 * Advance snd_nxt over sequence space of this segment.
1540		 */
1541		if (flags & (TH_SYN|TH_FIN)) {
1542			if (flags & TH_SYN)
1543				tp->snd_nxt++;
1544			if (flags & TH_FIN) {
1545				tp->snd_nxt++;
1546				tp->t_flags |= TF_SENTFIN;
1547			}
1548		}
1549		if (sack_rxmit)
1550			goto timer;
1551		tp->snd_nxt += len;
1552		if (SEQ_GT(tp->snd_nxt, tp->snd_max)) {
1553			/*
1554			 * Update "made progress" indication if we just
1555			 * added new data to an empty socket buffer.
1556			 */
1557			if (tp->snd_una == tp->snd_max)
1558				tp->t_acktime = ticks;
1559			tp->snd_max = tp->snd_nxt;
1560			/*
1561			 * Time this transmission if not a retransmission and
1562			 * not currently timing anything.
1563			 */
1564			tp->t_sndtime = ticks;
1565			if (tp->t_rtttime == 0) {
1566				tp->t_rtttime = ticks;
1567				tp->t_rtseq = startseq;
1568				TCPSTAT_INC(tcps_segstimed);
1569			}
1570#ifdef STATS
1571			if (!(tp->t_flags & TF_GPUTINPROG) && len) {
1572				tp->t_flags |= TF_GPUTINPROG;
1573				tp->gput_seq = startseq;
1574				tp->gput_ack = startseq +
1575				    ulmin(sbavail(&so->so_snd) - off, sendwin);
1576				tp->gput_ts = tcp_ts_getticks();
1577			}
1578#endif /* STATS */
1579		}
1580
1581		/*
1582		 * Set retransmit timer if not currently set,
1583		 * and not doing a pure ack or a keep-alive probe.
1584		 * Initial value for retransmit timer is smoothed
1585		 * round-trip time + 2 * round-trip time variance.
1586		 * Initialize shift counter which is used for backoff
1587		 * of retransmit time.
1588		 */
1589timer:
1590		if (!tcp_timer_active(tp, TT_REXMT) &&
1591		    ((sack_rxmit && tp->snd_nxt != tp->snd_max) ||
1592		     (tp->snd_nxt != tp->snd_una))) {
1593			if (tcp_timer_active(tp, TT_PERSIST)) {
1594				tcp_timer_activate(tp, TT_PERSIST, 0);
1595				tp->t_rxtshift = 0;
1596			}
1597			tcp_timer_activate(tp, TT_REXMT, TP_RXTCUR(tp));
1598		} else if (len == 0 && sbavail(&so->so_snd) &&
1599		    !tcp_timer_active(tp, TT_REXMT) &&
1600		    !tcp_timer_active(tp, TT_PERSIST)) {
1601			/*
1602			 * Avoid a situation where we do not set persist timer
1603			 * after a zero window condition. For example:
1604			 * 1) A -> B: packet with enough data to fill the window
1605			 * 2) B -> A: ACK for #1 + new data (0 window
1606			 *    advertisement)
1607			 * 3) A -> B: ACK for #2, 0 len packet
1608			 *
1609			 * In this case, A will not activate the persist timer,
1610			 * because it chose to send a packet. Unless tcp_output
1611			 * is called for some other reason (delayed ack timer,
1612			 * another input packet from B, socket syscall), A will
1613			 * not send zero window probes.
1614			 *
1615			 * So, if you send a 0-length packet, but there is data
1616			 * in the socket buffer, and neither the rexmt or
1617			 * persist timer is already set, then activate the
1618			 * persist timer.
1619			 */
1620			tp->t_rxtshift = 0;
1621			tcp_setpersist(tp);
1622		}
1623	} else {
1624		/*
1625		 * Persist case, update snd_max but since we are in
1626		 * persist mode (no window) we do not update snd_nxt.
1627		 */
1628		int xlen = len;
1629		if (flags & TH_SYN)
1630			++xlen;
1631		if (flags & TH_FIN) {
1632			++xlen;
1633			tp->t_flags |= TF_SENTFIN;
1634		}
1635		if (SEQ_GT(tp->snd_nxt + xlen, tp->snd_max))
1636			tp->snd_max = tp->snd_nxt + xlen;
1637	}
1638	if ((error == 0) &&
1639	    (TCPS_HAVEESTABLISHED(tp->t_state) &&
1640	     (tp->t_flags & TF_SACK_PERMIT) &&
1641	     tp->rcv_numsacks > 0)) {
1642		    /* Clean up any DSACK's sent */
1643		    tcp_clean_dsack_blocks(tp);
1644	}
1645	if (error) {
1646		/*
1647		 * We know that the packet was lost, so back out the
1648		 * sequence number advance, if any.
1649		 *
1650		 * If the error is EPERM the packet got blocked by the
1651		 * local firewall.  Normally we should terminate the
1652		 * connection but the blocking may have been spurious
1653		 * due to a firewall reconfiguration cycle.  So we treat
1654		 * it like a packet loss and let the retransmit timer and
1655		 * timeouts do their work over time.
1656		 * XXX: It is a POLA question whether calling tcp_drop right
1657		 * away would be the really correct behavior instead.
1658		 */
1659		if (((tp->t_flags & TF_FORCEDATA) == 0 ||
1660		    !tcp_timer_active(tp, TT_PERSIST)) &&
1661		    ((flags & TH_SYN) == 0) &&
1662		    (error != EPERM)) {
1663			if (sack_rxmit) {
1664				p->rxmit = SEQ_MIN(p->end, p->rxmit) - len;
1665				tp->sackhint.sack_bytes_rexmit -= len;
1666				KASSERT(tp->sackhint.sack_bytes_rexmit >= 0,
1667				    ("sackhint bytes rtx >= 0"));
1668				KASSERT((flags & TH_FIN) == 0,
1669				    ("error while FIN with SACK rxmit"));
1670			} else {
1671				tp->snd_nxt -= len;
1672				if (flags & TH_FIN)
1673					tp->snd_nxt--;
1674			}
1675			if (IN_RECOVERY(tp->t_flags))
1676				tp->sackhint.prr_out -= len;
1677		}
1678		SOCKBUF_UNLOCK_ASSERT(&so->so_snd);	/* Check gotos. */
1679		switch (error) {
1680		case EACCES:
1681		case EPERM:
1682			tp->t_softerror = error;
1683			return (error);
1684		case ENOBUFS:
1685			TCP_XMIT_TIMER_ASSERT(tp, len, flags);
1686			tp->snd_cwnd = tp->t_maxseg;
1687			return (0);
1688		case EMSGSIZE:
1689			/*
1690			 * For some reason the interface we used initially
1691			 * to send segments changed to another or lowered
1692			 * its MTU.
1693			 * If TSO was active we either got an interface
1694			 * without TSO capabilits or TSO was turned off.
1695			 * If we obtained mtu from ip_output() then update
1696			 * it and try again.
1697			 */
1698			if (tso)
1699				tp->t_flags &= ~TF_TSO;
1700			if (mtu != 0) {
1701				tcp_mss_update(tp, -1, mtu, NULL, NULL);
1702				goto again;
1703			}
1704			return (error);
1705		case EHOSTDOWN:
1706		case EHOSTUNREACH:
1707		case ENETDOWN:
1708		case ENETUNREACH:
1709			if (TCPS_HAVERCVDSYN(tp->t_state)) {
1710				tp->t_softerror = error;
1711				return (0);
1712			}
1713			/* FALLTHROUGH */
1714		default:
1715			return (error);
1716		}
1717	}
1718	TCPSTAT_INC(tcps_sndtotal);
1719
1720	/*
1721	 * Data sent (as far as we can tell).
1722	 * If this advertises a larger window than any other segment,
1723	 * then remember the size of the advertised window.
1724	 * Any pending ACK has now been sent.
1725	 */
1726	if (SEQ_GT(tp->rcv_nxt + recwin, tp->rcv_adv))
1727		tp->rcv_adv = tp->rcv_nxt + recwin;
1728	tp->last_ack_sent = tp->rcv_nxt;
1729	tp->t_flags &= ~(TF_ACKNOW | TF_DELACK);
1730	if (tcp_timer_active(tp, TT_DELACK))
1731		tcp_timer_activate(tp, TT_DELACK, 0);
1732	if (sendalot)
1733		goto again;
1734	return (0);
1735}
1736
1737void
1738tcp_setpersist(struct tcpcb *tp)
1739{
1740	int t = ((tp->t_srtt >> 2) + tp->t_rttvar) >> 1;
1741	int tt;
1742	int maxunacktime;
1743
1744	tp->t_flags &= ~TF_PREVVALID;
1745	if (tcp_timer_active(tp, TT_REXMT))
1746		panic("tcp_setpersist: retransmit pending");
1747	/*
1748	 * If the state is already closed, don't bother.
1749	 */
1750	if (tp->t_state == TCPS_CLOSED)
1751		return;
1752
1753	/*
1754	 * Start/restart persistence timer.
1755	 */
1756	TCPT_RANGESET(tt, t * tcp_backoff[tp->t_rxtshift],
1757		      tcp_persmin, tcp_persmax);
1758	if (TP_MAXUNACKTIME(tp) && tp->t_acktime) {
1759		maxunacktime = tp->t_acktime + TP_MAXUNACKTIME(tp) - ticks;
1760		if (maxunacktime < 1)
1761			maxunacktime = 1;
1762		if (maxunacktime < tt)
1763			tt = maxunacktime;
1764	}
1765	tcp_timer_activate(tp, TT_PERSIST, tt);
1766	if (tp->t_rxtshift < V_tcp_retries)
1767		tp->t_rxtshift++;
1768}
1769
1770/*
1771 * Insert TCP options according to the supplied parameters to the place
1772 * optp in a consistent way.  Can handle unaligned destinations.
1773 *
1774 * The order of the option processing is crucial for optimal packing and
1775 * alignment for the scarce option space.
1776 *
1777 * The optimal order for a SYN/SYN-ACK segment is:
1778 *   MSS (4) + NOP (1) + Window scale (3) + SACK permitted (2) +
1779 *   Timestamp (10) + Signature (18) = 38 bytes out of a maximum of 40.
1780 *
1781 * The SACK options should be last.  SACK blocks consume 8*n+2 bytes.
1782 * So a full size SACK blocks option is 34 bytes (with 4 SACK blocks).
1783 * At minimum we need 10 bytes (to generate 1 SACK block).  If both
1784 * TCP Timestamps (12 bytes) and TCP Signatures (18 bytes) are present,
1785 * we only have 10 bytes for SACK options (40 - (12 + 18)).
1786 */
1787int
1788tcp_addoptions(struct tcpopt *to, u_char *optp)
1789{
1790	u_int32_t mask, optlen = 0;
1791
1792	for (mask = 1; mask < TOF_MAXOPT; mask <<= 1) {
1793		if ((to->to_flags & mask) != mask)
1794			continue;
1795		if (optlen == TCP_MAXOLEN)
1796			break;
1797		switch (to->to_flags & mask) {
1798		case TOF_MSS:
1799			while (optlen % 4) {
1800				optlen += TCPOLEN_NOP;
1801				*optp++ = TCPOPT_NOP;
1802			}
1803			if (TCP_MAXOLEN - optlen < TCPOLEN_MAXSEG)
1804				continue;
1805			optlen += TCPOLEN_MAXSEG;
1806			*optp++ = TCPOPT_MAXSEG;
1807			*optp++ = TCPOLEN_MAXSEG;
1808			to->to_mss = htons(to->to_mss);
1809			bcopy((u_char *)&to->to_mss, optp, sizeof(to->to_mss));
1810			optp += sizeof(to->to_mss);
1811			break;
1812		case TOF_SCALE:
1813			while (!optlen || optlen % 2 != 1) {
1814				optlen += TCPOLEN_NOP;
1815				*optp++ = TCPOPT_NOP;
1816			}
1817			if (TCP_MAXOLEN - optlen < TCPOLEN_WINDOW)
1818				continue;
1819			optlen += TCPOLEN_WINDOW;
1820			*optp++ = TCPOPT_WINDOW;
1821			*optp++ = TCPOLEN_WINDOW;
1822			*optp++ = to->to_wscale;
1823			break;
1824		case TOF_SACKPERM:
1825			while (optlen % 2) {
1826				optlen += TCPOLEN_NOP;
1827				*optp++ = TCPOPT_NOP;
1828			}
1829			if (TCP_MAXOLEN - optlen < TCPOLEN_SACK_PERMITTED)
1830				continue;
1831			optlen += TCPOLEN_SACK_PERMITTED;
1832			*optp++ = TCPOPT_SACK_PERMITTED;
1833			*optp++ = TCPOLEN_SACK_PERMITTED;
1834			break;
1835		case TOF_TS:
1836			while (!optlen || optlen % 4 != 2) {
1837				optlen += TCPOLEN_NOP;
1838				*optp++ = TCPOPT_NOP;
1839			}
1840			if (TCP_MAXOLEN - optlen < TCPOLEN_TIMESTAMP)
1841				continue;
1842			optlen += TCPOLEN_TIMESTAMP;
1843			*optp++ = TCPOPT_TIMESTAMP;
1844			*optp++ = TCPOLEN_TIMESTAMP;
1845			to->to_tsval = htonl(to->to_tsval);
1846			to->to_tsecr = htonl(to->to_tsecr);
1847			bcopy((u_char *)&to->to_tsval, optp, sizeof(to->to_tsval));
1848			optp += sizeof(to->to_tsval);
1849			bcopy((u_char *)&to->to_tsecr, optp, sizeof(to->to_tsecr));
1850			optp += sizeof(to->to_tsecr);
1851			break;
1852		case TOF_SIGNATURE:
1853			{
1854			int siglen = TCPOLEN_SIGNATURE - 2;
1855
1856			while (!optlen || optlen % 4 != 2) {
1857				optlen += TCPOLEN_NOP;
1858				*optp++ = TCPOPT_NOP;
1859			}
1860			if (TCP_MAXOLEN - optlen < TCPOLEN_SIGNATURE) {
1861				to->to_flags &= ~TOF_SIGNATURE;
1862				continue;
1863			}
1864			optlen += TCPOLEN_SIGNATURE;
1865			*optp++ = TCPOPT_SIGNATURE;
1866			*optp++ = TCPOLEN_SIGNATURE;
1867			to->to_signature = optp;
1868			while (siglen--)
1869				 *optp++ = 0;
1870			break;
1871			}
1872		case TOF_SACK:
1873			{
1874			int sackblks = 0;
1875			struct sackblk *sack = (struct sackblk *)to->to_sacks;
1876			tcp_seq sack_seq;
1877
1878			while (!optlen || optlen % 4 != 2) {
1879				optlen += TCPOLEN_NOP;
1880				*optp++ = TCPOPT_NOP;
1881			}
1882			if (TCP_MAXOLEN - optlen < TCPOLEN_SACKHDR + TCPOLEN_SACK)
1883				continue;
1884			optlen += TCPOLEN_SACKHDR;
1885			*optp++ = TCPOPT_SACK;
1886			sackblks = min(to->to_nsacks,
1887					(TCP_MAXOLEN - optlen) / TCPOLEN_SACK);
1888			*optp++ = TCPOLEN_SACKHDR + sackblks * TCPOLEN_SACK;
1889			while (sackblks--) {
1890				sack_seq = htonl(sack->start);
1891				bcopy((u_char *)&sack_seq, optp, sizeof(sack_seq));
1892				optp += sizeof(sack_seq);
1893				sack_seq = htonl(sack->end);
1894				bcopy((u_char *)&sack_seq, optp, sizeof(sack_seq));
1895				optp += sizeof(sack_seq);
1896				optlen += TCPOLEN_SACK;
1897				sack++;
1898			}
1899			TCPSTAT_INC(tcps_sack_send_blocks);
1900			break;
1901			}
1902		case TOF_FASTOPEN:
1903			{
1904			int total_len;
1905
1906			/* XXX is there any point to aligning this option? */
1907			total_len = TCPOLEN_FAST_OPEN_EMPTY + to->to_tfo_len;
1908			if (TCP_MAXOLEN - optlen < total_len) {
1909				to->to_flags &= ~TOF_FASTOPEN;
1910				continue;
1911			}
1912			*optp++ = TCPOPT_FAST_OPEN;
1913			*optp++ = total_len;
1914			if (to->to_tfo_len > 0) {
1915				bcopy(to->to_tfo_cookie, optp, to->to_tfo_len);
1916				optp += to->to_tfo_len;
1917			}
1918			optlen += total_len;
1919			break;
1920			}
1921		default:
1922			panic("%s: unknown TCP option type", __func__);
1923			break;
1924		}
1925	}
1926
1927	/* Terminate and pad TCP options to a 4 byte boundary. */
1928	if (optlen % 4) {
1929		optlen += TCPOLEN_EOL;
1930		*optp++ = TCPOPT_EOL;
1931	}
1932	/*
1933	 * According to RFC 793 (STD0007):
1934	 *   "The content of the header beyond the End-of-Option option
1935	 *    must be header padding (i.e., zero)."
1936	 *   and later: "The padding is composed of zeros."
1937	 */
1938	while (optlen % 4) {
1939		optlen += TCPOLEN_PAD;
1940		*optp++ = TCPOPT_PAD;
1941	}
1942
1943	KASSERT(optlen <= TCP_MAXOLEN, ("%s: TCP options too long", __func__));
1944	return (optlen);
1945}
1946
1947/*
1948 * This is a copy of m_copym(), taking the TSO segment size/limit
1949 * constraints into account, and advancing the sndptr as it goes.
1950 */
1951struct mbuf *
1952tcp_m_copym(struct mbuf *m, int32_t off0, int32_t *plen,
1953    int32_t seglimit, int32_t segsize, struct sockbuf *sb, bool hw_tls)
1954{
1955#ifdef KERN_TLS
1956	struct ktls_session *tls, *ntls;
1957	struct mbuf *start __diagused;
1958#endif
1959	struct mbuf *n, **np;
1960	struct mbuf *top;
1961	int32_t off = off0;
1962	int32_t len = *plen;
1963	int32_t fragsize;
1964	int32_t len_cp = 0;
1965	int32_t *pkthdrlen;
1966	uint32_t mlen, frags;
1967	bool copyhdr;
1968
1969	KASSERT(off >= 0, ("tcp_m_copym, negative off %d", off));
1970	KASSERT(len >= 0, ("tcp_m_copym, negative len %d", len));
1971	if (off == 0 && m->m_flags & M_PKTHDR)
1972		copyhdr = true;
1973	else
1974		copyhdr = false;
1975	while (off > 0) {
1976		KASSERT(m != NULL, ("tcp_m_copym, offset > size of mbuf chain"));
1977		if (off < m->m_len)
1978			break;
1979		off -= m->m_len;
1980		if ((sb) && (m == sb->sb_sndptr)) {
1981			sb->sb_sndptroff += m->m_len;
1982			sb->sb_sndptr = m->m_next;
1983		}
1984		m = m->m_next;
1985	}
1986	np = &top;
1987	top = NULL;
1988	pkthdrlen = NULL;
1989#ifdef KERN_TLS
1990	if (hw_tls && (m->m_flags & M_EXTPG))
1991		tls = m->m_epg_tls;
1992	else
1993		tls = NULL;
1994	start = m;
1995#endif
1996	while (len > 0) {
1997		if (m == NULL) {
1998			KASSERT(len == M_COPYALL,
1999			    ("tcp_m_copym, length > size of mbuf chain"));
2000			*plen = len_cp;
2001			if (pkthdrlen != NULL)
2002				*pkthdrlen = len_cp;
2003			break;
2004		}
2005#ifdef KERN_TLS
2006		if (hw_tls) {
2007			if (m->m_flags & M_EXTPG)
2008				ntls = m->m_epg_tls;
2009			else
2010				ntls = NULL;
2011
2012			/*
2013			 * Avoid mixing TLS records with handshake
2014			 * data or TLS records from different
2015			 * sessions.
2016			 */
2017			if (tls != ntls) {
2018				MPASS(m != start);
2019				*plen = len_cp;
2020				if (pkthdrlen != NULL)
2021					*pkthdrlen = len_cp;
2022				break;
2023			}
2024		}
2025#endif
2026		mlen = min(len, m->m_len - off);
2027		if (seglimit) {
2028			/*
2029			 * For M_EXTPG mbufs, add 3 segments
2030			 * + 1 in case we are crossing page boundaries
2031			 * + 2 in case the TLS hdr/trailer are used
2032			 * It is cheaper to just add the segments
2033			 * than it is to take the cache miss to look
2034			 * at the mbuf ext_pgs state in detail.
2035			 */
2036			if (m->m_flags & M_EXTPG) {
2037				fragsize = min(segsize, PAGE_SIZE);
2038				frags = 3;
2039			} else {
2040				fragsize = segsize;
2041				frags = 0;
2042			}
2043
2044			/* Break if we really can't fit anymore. */
2045			if ((frags + 1) >= seglimit) {
2046				*plen =	len_cp;
2047				if (pkthdrlen != NULL)
2048					*pkthdrlen = len_cp;
2049				break;
2050			}
2051
2052			/*
2053			 * Reduce size if you can't copy the whole
2054			 * mbuf. If we can't copy the whole mbuf, also
2055			 * adjust len so the loop will end after this
2056			 * mbuf.
2057			 */
2058			if ((frags + howmany(mlen, fragsize)) >= seglimit) {
2059				mlen = (seglimit - frags - 1) * fragsize;
2060				len = mlen;
2061				*plen = len_cp + len;
2062				if (pkthdrlen != NULL)
2063					*pkthdrlen = *plen;
2064			}
2065			frags += howmany(mlen, fragsize);
2066			if (frags == 0)
2067				frags++;
2068			seglimit -= frags;
2069			KASSERT(seglimit > 0,
2070			    ("%s: seglimit went too low", __func__));
2071		}
2072		if (copyhdr)
2073			n = m_gethdr(M_NOWAIT, m->m_type);
2074		else
2075			n = m_get(M_NOWAIT, m->m_type);
2076		*np = n;
2077		if (n == NULL)
2078			goto nospace;
2079		if (copyhdr) {
2080			if (!m_dup_pkthdr(n, m, M_NOWAIT))
2081				goto nospace;
2082			if (len == M_COPYALL)
2083				n->m_pkthdr.len -= off0;
2084			else
2085				n->m_pkthdr.len = len;
2086			pkthdrlen = &n->m_pkthdr.len;
2087			copyhdr = false;
2088		}
2089		n->m_len = mlen;
2090		len_cp += n->m_len;
2091		if (m->m_flags & (M_EXT | M_EXTPG)) {
2092			n->m_data = m->m_data + off;
2093			mb_dupcl(n, m);
2094		} else
2095			bcopy(mtod(m, caddr_t)+off, mtod(n, caddr_t),
2096			    (u_int)n->m_len);
2097
2098		if (sb && (sb->sb_sndptr == m) &&
2099		    ((n->m_len + off) >= m->m_len) && m->m_next) {
2100			sb->sb_sndptroff += m->m_len;
2101			sb->sb_sndptr = m->m_next;
2102		}
2103		off = 0;
2104		if (len != M_COPYALL) {
2105			len -= n->m_len;
2106		}
2107		m = m->m_next;
2108		np = &n->m_next;
2109	}
2110	return (top);
2111nospace:
2112	m_freem(top);
2113	return (NULL);
2114}
2115
2116void
2117tcp_sndbuf_autoscale(struct tcpcb *tp, struct socket *so, uint32_t sendwin)
2118{
2119
2120	/*
2121	 * Automatic sizing of send socket buffer.  Often the send buffer
2122	 * size is not optimally adjusted to the actual network conditions
2123	 * at hand (delay bandwidth product).  Setting the buffer size too
2124	 * small limits throughput on links with high bandwidth and high
2125	 * delay (eg. trans-continental/oceanic links).  Setting the
2126	 * buffer size too big consumes too much real kernel memory,
2127	 * especially with many connections on busy servers.
2128	 *
2129	 * The criteria to step up the send buffer one notch are:
2130	 *  1. receive window of remote host is larger than send buffer
2131	 *     (with a fudge factor of 5/4th);
2132	 *  2. send buffer is filled to 7/8th with data (so we actually
2133	 *     have data to make use of it);
2134	 *  3. send buffer fill has not hit maximal automatic size;
2135	 *  4. our send window (slow start and cogestion controlled) is
2136	 *     larger than sent but unacknowledged data in send buffer.
2137	 *
2138	 * The remote host receive window scaling factor may limit the
2139	 * growing of the send buffer before it reaches its allowed
2140	 * maximum.
2141	 *
2142	 * It scales directly with slow start or congestion window
2143	 * and does at most one step per received ACK.  This fast
2144	 * scaling has the drawback of growing the send buffer beyond
2145	 * what is strictly necessary to make full use of a given
2146	 * delay*bandwidth product.  However testing has shown this not
2147	 * to be much of an problem.  At worst we are trading wasting
2148	 * of available bandwidth (the non-use of it) for wasting some
2149	 * socket buffer memory.
2150	 *
2151	 * TODO: Shrink send buffer during idle periods together
2152	 * with congestion window.  Requires another timer.  Has to
2153	 * wait for upcoming tcp timer rewrite.
2154	 *
2155	 * XXXGL: should there be used sbused() or sbavail()?
2156	 */
2157	if (V_tcp_do_autosndbuf && so->so_snd.sb_flags & SB_AUTOSIZE) {
2158		int lowat;
2159
2160		lowat = V_tcp_sendbuf_auto_lowat ? so->so_snd.sb_lowat : 0;
2161		if ((tp->snd_wnd / 4 * 5) >= so->so_snd.sb_hiwat - lowat &&
2162		    sbused(&so->so_snd) >=
2163		    (so->so_snd.sb_hiwat / 8 * 7) - lowat &&
2164		    sbused(&so->so_snd) < V_tcp_autosndbuf_max &&
2165		    sendwin >= (sbused(&so->so_snd) -
2166		    (tp->snd_nxt - tp->snd_una))) {
2167			if (!sbreserve_locked(so, SO_SND,
2168			    min(so->so_snd.sb_hiwat + V_tcp_autosndbuf_inc,
2169			     V_tcp_autosndbuf_max), curthread))
2170				so->so_snd.sb_flags &= ~SB_AUTOSIZE;
2171		}
2172	}
2173}
2174