1/*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2007, Myricom Inc.
5 * Copyright (c) 2008, Intel Corporation.
6 * Copyright (c) 2012 The FreeBSD Foundation
7 * Copyright (c) 2016-2021 Mellanox Technologies.
8 * All rights reserved.
9 *
10 * Portions of this software were developed by Bjoern Zeeb
11 * under sponsorship from the FreeBSD Foundation.
12 *
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
15 * are met:
16 * 1. Redistributions of source code must retain the above copyright
17 *    notice, this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 *    notice, this list of conditions and the following disclaimer in the
20 *    documentation and/or other materials provided with the distribution.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 */
34
35#include <sys/cdefs.h>
36#include "opt_inet.h"
37#include "opt_inet6.h"
38
39#include <sys/param.h>
40#include <sys/systm.h>
41#include <sys/kernel.h>
42#include <sys/malloc.h>
43#include <sys/mbuf.h>
44#include <sys/socket.h>
45#include <sys/socketvar.h>
46#include <sys/sockbuf.h>
47#include <sys/sysctl.h>
48
49#include <net/if.h>
50#include <net/if_var.h>
51#include <net/ethernet.h>
52#include <net/bpf.h>
53#include <net/vnet.h>
54#include <net/if_dl.h>
55#include <net/if_media.h>
56#include <net/if_private.h>
57#include <net/if_types.h>
58#include <net/infiniband.h>
59#include <net/if_lagg.h>
60
61#include <netinet/in_systm.h>
62#include <netinet/in.h>
63#include <netinet/ip6.h>
64#include <netinet/ip.h>
65#include <netinet/ip_var.h>
66#include <netinet/in_pcb.h>
67#include <netinet6/in6_pcb.h>
68#include <netinet/tcp.h>
69#include <netinet/tcp_seq.h>
70#include <netinet/tcp_lro.h>
71#include <netinet/tcp_var.h>
72#include <netinet/tcpip.h>
73#include <netinet/tcp_hpts.h>
74#include <netinet/tcp_log_buf.h>
75#include <netinet/tcp_fsm.h>
76#include <netinet/udp.h>
77#include <netinet6/ip6_var.h>
78
79#include <machine/in_cksum.h>
80
81static MALLOC_DEFINE(M_LRO, "LRO", "LRO control structures");
82
83static void	tcp_lro_rx_done(struct lro_ctrl *lc);
84static int	tcp_lro_rx_common(struct lro_ctrl *lc, struct mbuf *m,
85		    uint32_t csum, bool use_hash);
86
87SYSCTL_NODE(_net_inet_tcp, OID_AUTO, lro,  CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
88    "TCP LRO");
89
90long tcplro_stacks_wanting_mbufq;
91int	(*tcp_lro_flush_tcphpts)(struct lro_ctrl *lc, struct lro_entry *le);
92
93counter_u64_t tcp_inp_lro_direct_queue;
94counter_u64_t tcp_inp_lro_wokeup_queue;
95counter_u64_t tcp_inp_lro_compressed;
96counter_u64_t tcp_inp_lro_locks_taken;
97counter_u64_t tcp_extra_mbuf;
98counter_u64_t tcp_would_have_but;
99counter_u64_t tcp_comp_total;
100counter_u64_t tcp_uncomp_total;
101counter_u64_t tcp_bad_csums;
102
103static unsigned	tcp_lro_entries = TCP_LRO_ENTRIES;
104SYSCTL_UINT(_net_inet_tcp_lro, OID_AUTO, entries,
105    CTLFLAG_RDTUN | CTLFLAG_MPSAFE, &tcp_lro_entries, 0,
106    "default number of LRO entries");
107
108static uint32_t tcp_lro_cpu_set_thresh = TCP_LRO_CPU_DECLARATION_THRESH;
109SYSCTL_UINT(_net_inet_tcp_lro, OID_AUTO, lro_cpu_threshold,
110    CTLFLAG_RDTUN | CTLFLAG_MPSAFE, &tcp_lro_cpu_set_thresh, 0,
111    "Number of interrupts in a row on the same CPU that will make us declare an 'affinity' cpu?");
112
113static uint32_t tcp_less_accurate_lro_ts = 0;
114SYSCTL_UINT(_net_inet_tcp_lro, OID_AUTO, lro_less_accurate,
115    CTLFLAG_MPSAFE, &tcp_less_accurate_lro_ts, 0,
116    "Do we trade off efficency by doing less timestamp operations for time accuracy?");
117
118SYSCTL_COUNTER_U64(_net_inet_tcp_lro, OID_AUTO, fullqueue, CTLFLAG_RD,
119    &tcp_inp_lro_direct_queue, "Number of lro's fully queued to transport");
120SYSCTL_COUNTER_U64(_net_inet_tcp_lro, OID_AUTO, wokeup, CTLFLAG_RD,
121    &tcp_inp_lro_wokeup_queue, "Number of lro's where we woke up transport via hpts");
122SYSCTL_COUNTER_U64(_net_inet_tcp_lro, OID_AUTO, compressed, CTLFLAG_RD,
123    &tcp_inp_lro_compressed, "Number of lro's compressed and sent to transport");
124SYSCTL_COUNTER_U64(_net_inet_tcp_lro, OID_AUTO, lockcnt, CTLFLAG_RD,
125    &tcp_inp_lro_locks_taken, "Number of lro's inp_wlocks taken");
126SYSCTL_COUNTER_U64(_net_inet_tcp_lro, OID_AUTO, extra_mbuf, CTLFLAG_RD,
127    &tcp_extra_mbuf, "Number of times we had an extra compressed ack dropped into the tp");
128SYSCTL_COUNTER_U64(_net_inet_tcp_lro, OID_AUTO, would_have_but, CTLFLAG_RD,
129    &tcp_would_have_but, "Number of times we would have had an extra compressed, but mget failed");
130SYSCTL_COUNTER_U64(_net_inet_tcp_lro, OID_AUTO, with_m_ackcmp, CTLFLAG_RD,
131    &tcp_comp_total, "Number of mbufs queued with M_ACKCMP flags set");
132SYSCTL_COUNTER_U64(_net_inet_tcp_lro, OID_AUTO, without_m_ackcmp, CTLFLAG_RD,
133    &tcp_uncomp_total, "Number of mbufs queued without M_ACKCMP");
134SYSCTL_COUNTER_U64(_net_inet_tcp_lro, OID_AUTO, lro_badcsum, CTLFLAG_RD,
135    &tcp_bad_csums, "Number of packets that the common code saw with bad csums");
136
137void
138tcp_lro_reg_mbufq(void)
139{
140	atomic_fetchadd_long(&tcplro_stacks_wanting_mbufq, 1);
141}
142
143void
144tcp_lro_dereg_mbufq(void)
145{
146	atomic_fetchadd_long(&tcplro_stacks_wanting_mbufq, -1);
147}
148
149static __inline void
150tcp_lro_active_insert(struct lro_ctrl *lc, struct lro_head *bucket,
151    struct lro_entry *le)
152{
153
154	LIST_INSERT_HEAD(&lc->lro_active, le, next);
155	LIST_INSERT_HEAD(bucket, le, hash_next);
156}
157
158static __inline void
159tcp_lro_active_remove(struct lro_entry *le)
160{
161
162	LIST_REMOVE(le, next);		/* active list */
163	LIST_REMOVE(le, hash_next);	/* hash bucket */
164}
165
166int
167tcp_lro_init(struct lro_ctrl *lc)
168{
169	return (tcp_lro_init_args(lc, NULL, tcp_lro_entries, 0));
170}
171
172int
173tcp_lro_init_args(struct lro_ctrl *lc, struct ifnet *ifp,
174    unsigned lro_entries, unsigned lro_mbufs)
175{
176	struct lro_entry *le;
177	size_t size;
178	unsigned i, elements;
179
180	lc->lro_bad_csum = 0;
181	lc->lro_queued = 0;
182	lc->lro_flushed = 0;
183	lc->lro_mbuf_count = 0;
184	lc->lro_mbuf_max = lro_mbufs;
185	lc->lro_cnt = lro_entries;
186	lc->lro_ackcnt_lim = TCP_LRO_ACKCNT_MAX;
187	lc->lro_length_lim = TCP_LRO_LENGTH_MAX;
188	lc->ifp = ifp;
189	LIST_INIT(&lc->lro_free);
190	LIST_INIT(&lc->lro_active);
191
192	/* create hash table to accelerate entry lookup */
193	if (lro_entries > lro_mbufs)
194		elements = lro_entries;
195	else
196		elements = lro_mbufs;
197	lc->lro_hash = phashinit_flags(elements, M_LRO, &lc->lro_hashsz,
198	    HASH_NOWAIT);
199	if (lc->lro_hash == NULL) {
200		memset(lc, 0, sizeof(*lc));
201		return (ENOMEM);
202	}
203
204	/* compute size to allocate */
205	size = (lro_mbufs * sizeof(struct lro_mbuf_sort)) +
206	    (lro_entries * sizeof(*le));
207	lc->lro_mbuf_data = (struct lro_mbuf_sort *)
208	    malloc(size, M_LRO, M_NOWAIT | M_ZERO);
209
210	/* check for out of memory */
211	if (lc->lro_mbuf_data == NULL) {
212		free(lc->lro_hash, M_LRO);
213		memset(lc, 0, sizeof(*lc));
214		return (ENOMEM);
215	}
216	/* compute offset for LRO entries */
217	le = (struct lro_entry *)
218	    (lc->lro_mbuf_data + lro_mbufs);
219
220	/* setup linked list */
221	for (i = 0; i != lro_entries; i++)
222		LIST_INSERT_HEAD(&lc->lro_free, le + i, next);
223
224	return (0);
225}
226
227struct vxlan_header {
228	uint32_t	vxlh_flags;
229	uint32_t	vxlh_vni;
230};
231
232static inline void *
233tcp_lro_low_level_parser(void *ptr, struct lro_parser *parser, bool update_data, bool is_vxlan, int mlen)
234{
235	const struct ether_vlan_header *eh;
236	void *old;
237	uint16_t eth_type;
238
239	if (update_data)
240		memset(parser, 0, sizeof(*parser));
241
242	old = ptr;
243
244	if (is_vxlan) {
245		const struct vxlan_header *vxh;
246		vxh = ptr;
247		ptr = (uint8_t *)ptr + sizeof(*vxh);
248		if (update_data) {
249			parser->data.vxlan_vni =
250			    vxh->vxlh_vni & htonl(0xffffff00);
251		}
252	}
253
254	eh = ptr;
255	if (__predict_false(eh->evl_encap_proto == htons(ETHERTYPE_VLAN))) {
256		eth_type = eh->evl_proto;
257		if (update_data) {
258			/* strip priority and keep VLAN ID only */
259			parser->data.vlan_id = eh->evl_tag & htons(EVL_VLID_MASK);
260		}
261		/* advance to next header */
262		ptr = (uint8_t *)ptr + ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
263		mlen -= (ETHER_HDR_LEN  + ETHER_VLAN_ENCAP_LEN);
264	} else {
265		eth_type = eh->evl_encap_proto;
266		/* advance to next header */
267		mlen -= ETHER_HDR_LEN;
268		ptr = (uint8_t *)ptr + ETHER_HDR_LEN;
269	}
270	if (__predict_false(mlen <= 0))
271		return (NULL);
272	switch (eth_type) {
273#ifdef INET
274	case htons(ETHERTYPE_IP):
275		parser->ip4 = ptr;
276		if (__predict_false(mlen < sizeof(struct ip)))
277			return (NULL);
278		/* Ensure there are no IPv4 options. */
279		if ((parser->ip4->ip_hl << 2) != sizeof (*parser->ip4))
280			break;
281		/* .. and the packet is not fragmented. */
282		if (parser->ip4->ip_off & htons(IP_MF|IP_OFFMASK))
283			break;
284		/* .. and the packet has valid src/dst addrs */
285		if (__predict_false(parser->ip4->ip_src.s_addr == INADDR_ANY ||
286			parser->ip4->ip_dst.s_addr == INADDR_ANY))
287			break;
288		ptr = (uint8_t *)ptr + (parser->ip4->ip_hl << 2);
289		mlen -= sizeof(struct ip);
290		if (update_data) {
291			parser->data.s_addr.v4 = parser->ip4->ip_src;
292			parser->data.d_addr.v4 = parser->ip4->ip_dst;
293		}
294		switch (parser->ip4->ip_p) {
295		case IPPROTO_UDP:
296			if (__predict_false(mlen < sizeof(struct udphdr)))
297				return (NULL);
298			parser->udp = ptr;
299			if (update_data) {
300				parser->data.lro_type = LRO_TYPE_IPV4_UDP;
301				parser->data.s_port = parser->udp->uh_sport;
302				parser->data.d_port = parser->udp->uh_dport;
303			} else {
304				MPASS(parser->data.lro_type == LRO_TYPE_IPV4_UDP);
305			}
306			ptr = ((uint8_t *)ptr + sizeof(*parser->udp));
307			parser->total_hdr_len = (uint8_t *)ptr - (uint8_t *)old;
308			return (ptr);
309		case IPPROTO_TCP:
310			parser->tcp = ptr;
311			if (__predict_false(mlen < sizeof(struct tcphdr)))
312				return (NULL);
313			if (update_data) {
314				parser->data.lro_type = LRO_TYPE_IPV4_TCP;
315				parser->data.s_port = parser->tcp->th_sport;
316				parser->data.d_port = parser->tcp->th_dport;
317			} else {
318				MPASS(parser->data.lro_type == LRO_TYPE_IPV4_TCP);
319			}
320			if (__predict_false(mlen < (parser->tcp->th_off << 2)))
321				return (NULL);
322			ptr = (uint8_t *)ptr + (parser->tcp->th_off << 2);
323			parser->total_hdr_len = (uint8_t *)ptr - (uint8_t *)old;
324			return (ptr);
325		default:
326			break;
327		}
328		break;
329#endif
330#ifdef INET6
331	case htons(ETHERTYPE_IPV6):
332		parser->ip6 = ptr;
333		if (__predict_false(mlen < sizeof(struct ip6_hdr)))
334			return (NULL);
335		/* Ensure the packet has valid src/dst addrs */
336		if (__predict_false(IN6_IS_ADDR_UNSPECIFIED(&parser->ip6->ip6_src) ||
337			IN6_IS_ADDR_UNSPECIFIED(&parser->ip6->ip6_dst)))
338			return (NULL);
339		ptr = (uint8_t *)ptr + sizeof(*parser->ip6);
340		if (update_data) {
341			parser->data.s_addr.v6 = parser->ip6->ip6_src;
342			parser->data.d_addr.v6 = parser->ip6->ip6_dst;
343		}
344		mlen -= sizeof(struct ip6_hdr);
345		switch (parser->ip6->ip6_nxt) {
346		case IPPROTO_UDP:
347			if (__predict_false(mlen < sizeof(struct udphdr)))
348				return (NULL);
349			parser->udp = ptr;
350			if (update_data) {
351				parser->data.lro_type = LRO_TYPE_IPV6_UDP;
352				parser->data.s_port = parser->udp->uh_sport;
353				parser->data.d_port = parser->udp->uh_dport;
354			} else {
355				MPASS(parser->data.lro_type == LRO_TYPE_IPV6_UDP);
356			}
357			ptr = (uint8_t *)ptr + sizeof(*parser->udp);
358			parser->total_hdr_len = (uint8_t *)ptr - (uint8_t *)old;
359			return (ptr);
360		case IPPROTO_TCP:
361			if (__predict_false(mlen < sizeof(struct tcphdr)))
362				return (NULL);
363			parser->tcp = ptr;
364			if (update_data) {
365				parser->data.lro_type = LRO_TYPE_IPV6_TCP;
366				parser->data.s_port = parser->tcp->th_sport;
367				parser->data.d_port = parser->tcp->th_dport;
368			} else {
369				MPASS(parser->data.lro_type == LRO_TYPE_IPV6_TCP);
370			}
371			if (__predict_false(mlen < (parser->tcp->th_off << 2)))
372				return (NULL);
373			ptr = (uint8_t *)ptr + (parser->tcp->th_off << 2);
374			parser->total_hdr_len = (uint8_t *)ptr - (uint8_t *)old;
375			return (ptr);
376		default:
377			break;
378		}
379		break;
380#endif
381	default:
382		break;
383	}
384	/* Invalid packet - cannot parse */
385	return (NULL);
386}
387
388static const int vxlan_csum = CSUM_INNER_L3_CALC | CSUM_INNER_L3_VALID |
389    CSUM_INNER_L4_CALC | CSUM_INNER_L4_VALID;
390
391static inline struct lro_parser *
392tcp_lro_parser(struct mbuf *m, struct lro_parser *po, struct lro_parser *pi, bool update_data)
393{
394	void *data_ptr;
395
396	/* Try to parse outer headers first. */
397	data_ptr = tcp_lro_low_level_parser(m->m_data, po, update_data, false, m->m_len);
398	if (data_ptr == NULL || po->total_hdr_len > m->m_len)
399		return (NULL);
400
401	if (update_data) {
402		/* Store VLAN ID, if any. */
403		if (__predict_false(m->m_flags & M_VLANTAG)) {
404			po->data.vlan_id =
405			    htons(m->m_pkthdr.ether_vtag) & htons(EVL_VLID_MASK);
406		}
407		/* Store decrypted flag, if any. */
408		if (__predict_false((m->m_pkthdr.csum_flags &
409		    CSUM_TLS_MASK) == CSUM_TLS_DECRYPTED))
410			po->data.lro_flags |= LRO_FLAG_DECRYPTED;
411	}
412
413	switch (po->data.lro_type) {
414	case LRO_TYPE_IPV4_UDP:
415	case LRO_TYPE_IPV6_UDP:
416		/* Check for VXLAN headers. */
417		if ((m->m_pkthdr.csum_flags & vxlan_csum) != vxlan_csum)
418			break;
419
420		/* Try to parse inner headers. */
421		data_ptr = tcp_lro_low_level_parser(data_ptr, pi, update_data, true,
422						    (m->m_len - ((caddr_t)data_ptr - m->m_data)));
423		if (data_ptr == NULL || (pi->total_hdr_len + po->total_hdr_len) > m->m_len)
424			break;
425
426		/* Verify supported header types. */
427		switch (pi->data.lro_type) {
428		case LRO_TYPE_IPV4_TCP:
429		case LRO_TYPE_IPV6_TCP:
430			return (pi);
431		default:
432			break;
433		}
434		break;
435	case LRO_TYPE_IPV4_TCP:
436	case LRO_TYPE_IPV6_TCP:
437		if (update_data)
438			memset(pi, 0, sizeof(*pi));
439		return (po);
440	default:
441		break;
442	}
443	return (NULL);
444}
445
446static inline int
447tcp_lro_trim_mbuf_chain(struct mbuf *m, const struct lro_parser *po)
448{
449	int len;
450
451	switch (po->data.lro_type) {
452#ifdef INET
453	case LRO_TYPE_IPV4_TCP:
454		len = ((uint8_t *)po->ip4 - (uint8_t *)m->m_data) +
455		    ntohs(po->ip4->ip_len);
456		break;
457#endif
458#ifdef INET6
459	case LRO_TYPE_IPV6_TCP:
460		len = ((uint8_t *)po->ip6 - (uint8_t *)m->m_data) +
461		    ntohs(po->ip6->ip6_plen) + sizeof(*po->ip6);
462		break;
463#endif
464	default:
465		return (TCP_LRO_CANNOT);
466	}
467
468	/*
469	 * If the frame is padded beyond the end of the IP packet,
470	 * then trim the extra bytes off:
471	 */
472	if (__predict_true(m->m_pkthdr.len == len)) {
473		return (0);
474	} else if (m->m_pkthdr.len > len) {
475		m_adj(m, len - m->m_pkthdr.len);
476		return (0);
477	}
478	return (TCP_LRO_CANNOT);
479}
480
481static void
482lro_free_mbuf_chain(struct mbuf *m)
483{
484	struct mbuf *save;
485
486	while (m) {
487		save = m->m_nextpkt;
488		m->m_nextpkt = NULL;
489		m_freem(m);
490		m = save;
491	}
492}
493
494void
495tcp_lro_free(struct lro_ctrl *lc)
496{
497	struct lro_entry *le;
498	unsigned x;
499
500	/* reset LRO free list */
501	LIST_INIT(&lc->lro_free);
502
503	/* free active mbufs, if any */
504	while ((le = LIST_FIRST(&lc->lro_active)) != NULL) {
505		tcp_lro_active_remove(le);
506		lro_free_mbuf_chain(le->m_head);
507	}
508
509	/* free hash table */
510	free(lc->lro_hash, M_LRO);
511	lc->lro_hash = NULL;
512	lc->lro_hashsz = 0;
513
514	/* free mbuf array, if any */
515	for (x = 0; x != lc->lro_mbuf_count; x++)
516		m_freem(lc->lro_mbuf_data[x].mb);
517	lc->lro_mbuf_count = 0;
518
519	/* free allocated memory, if any */
520	free(lc->lro_mbuf_data, M_LRO);
521	lc->lro_mbuf_data = NULL;
522}
523
524static uint16_t
525tcp_lro_rx_csum_tcphdr(const struct tcphdr *th)
526{
527	const uint16_t *ptr;
528	uint32_t csum;
529	uint16_t len;
530
531	csum = -th->th_sum;	/* exclude checksum field */
532	len = th->th_off;
533	ptr = (const uint16_t *)th;
534	while (len--) {
535		csum += *ptr;
536		ptr++;
537		csum += *ptr;
538		ptr++;
539	}
540	while (csum > 0xffff)
541		csum = (csum >> 16) + (csum & 0xffff);
542
543	return (csum);
544}
545
546static uint16_t
547tcp_lro_rx_csum_data(const struct lro_parser *pa, uint16_t tcp_csum)
548{
549	uint32_t c;
550	uint16_t cs;
551
552	c = tcp_csum;
553
554	switch (pa->data.lro_type) {
555#ifdef INET6
556	case LRO_TYPE_IPV6_TCP:
557		/* Compute full pseudo IPv6 header checksum. */
558		cs = in6_cksum_pseudo(pa->ip6, ntohs(pa->ip6->ip6_plen), pa->ip6->ip6_nxt, 0);
559		break;
560#endif
561#ifdef INET
562	case LRO_TYPE_IPV4_TCP:
563		/* Compute full pseudo IPv4 header checsum. */
564		cs = in_addword(ntohs(pa->ip4->ip_len) - sizeof(*pa->ip4), IPPROTO_TCP);
565		cs = in_pseudo(pa->ip4->ip_src.s_addr, pa->ip4->ip_dst.s_addr, htons(cs));
566		break;
567#endif
568	default:
569		cs = 0;		/* Keep compiler happy. */
570		break;
571	}
572
573	/* Complement checksum. */
574	cs = ~cs;
575	c += cs;
576
577	/* Remove TCP header checksum. */
578	cs = ~tcp_lro_rx_csum_tcphdr(pa->tcp);
579	c += cs;
580
581	/* Compute checksum remainder. */
582	while (c > 0xffff)
583		c = (c >> 16) + (c & 0xffff);
584
585	return (c);
586}
587
588static void
589tcp_lro_rx_done(struct lro_ctrl *lc)
590{
591	struct lro_entry *le;
592
593	while ((le = LIST_FIRST(&lc->lro_active)) != NULL) {
594		tcp_lro_active_remove(le);
595		tcp_lro_flush(lc, le);
596	}
597}
598
599static void
600tcp_lro_flush_active(struct lro_ctrl *lc)
601{
602	struct lro_entry *le;
603
604	/*
605	 * Walk through the list of le entries, and
606	 * any one that does have packets flush. This
607	 * is called because we have an inbound packet
608	 * (e.g. SYN) that has to have all others flushed
609	 * in front of it. Note we have to do the remove
610	 * because tcp_lro_flush() assumes that the entry
611	 * is being freed. This is ok it will just get
612	 * reallocated again like it was new.
613	 */
614	LIST_FOREACH(le, &lc->lro_active, next) {
615		if (le->m_head != NULL) {
616			tcp_lro_active_remove(le);
617			tcp_lro_flush(lc, le);
618		}
619	}
620}
621
622void
623tcp_lro_flush_inactive(struct lro_ctrl *lc, const struct timeval *timeout)
624{
625	struct lro_entry *le, *le_tmp;
626	uint64_t now, tov;
627	struct bintime bt;
628
629	NET_EPOCH_ASSERT();
630	if (LIST_EMPTY(&lc->lro_active))
631		return;
632
633	/* get timeout time and current time in ns */
634	binuptime(&bt);
635	now = bintime2ns(&bt);
636	tov = ((timeout->tv_sec * 1000000000) + (timeout->tv_usec * 1000));
637	LIST_FOREACH_SAFE(le, &lc->lro_active, next, le_tmp) {
638		if (now >= (bintime2ns(&le->alloc_time) + tov)) {
639			tcp_lro_active_remove(le);
640			tcp_lro_flush(lc, le);
641		}
642	}
643}
644
645#ifdef INET
646static int
647tcp_lro_rx_ipv4(struct lro_ctrl *lc, struct mbuf *m, struct ip *ip4)
648{
649	uint16_t csum;
650
651	/* Legacy IP has a header checksum that needs to be correct. */
652	if (m->m_pkthdr.csum_flags & CSUM_IP_CHECKED) {
653		if (__predict_false((m->m_pkthdr.csum_flags & CSUM_IP_VALID) == 0)) {
654			lc->lro_bad_csum++;
655			return (TCP_LRO_CANNOT);
656		}
657	} else {
658		csum = in_cksum_hdr(ip4);
659		if (__predict_false(csum != 0)) {
660			lc->lro_bad_csum++;
661			return (TCP_LRO_CANNOT);
662		}
663	}
664	return (0);
665}
666#endif
667
668static inline void
669tcp_lro_assign_and_checksum_16(uint16_t *ptr, uint16_t value, uint16_t *psum)
670{
671	uint32_t csum;
672
673	csum = 0xffff - *ptr + value;
674	while (csum > 0xffff)
675		csum = (csum >> 16) + (csum & 0xffff);
676	*ptr = value;
677	*psum = csum;
678}
679
680static uint16_t
681tcp_lro_update_checksum(const struct lro_parser *pa, const struct lro_entry *le,
682    uint16_t payload_len, uint16_t delta_sum)
683{
684	uint32_t csum;
685	uint16_t tlen;
686	uint16_t temp[5] = {};
687
688	switch (pa->data.lro_type) {
689	case LRO_TYPE_IPV4_TCP:
690		/* Compute new IPv4 length. */
691		tlen = (pa->ip4->ip_hl << 2) + (pa->tcp->th_off << 2) + payload_len;
692		tcp_lro_assign_and_checksum_16(&pa->ip4->ip_len, htons(tlen), &temp[0]);
693
694		/* Subtract delta from current IPv4 checksum. */
695		csum = pa->ip4->ip_sum + 0xffff - temp[0];
696		while (csum > 0xffff)
697			csum = (csum >> 16) + (csum & 0xffff);
698		tcp_lro_assign_and_checksum_16(&pa->ip4->ip_sum, csum, &temp[1]);
699		goto update_tcp_header;
700
701	case LRO_TYPE_IPV6_TCP:
702		/* Compute new IPv6 length. */
703		tlen = (pa->tcp->th_off << 2) + payload_len;
704		tcp_lro_assign_and_checksum_16(&pa->ip6->ip6_plen, htons(tlen), &temp[0]);
705		goto update_tcp_header;
706
707	case LRO_TYPE_IPV4_UDP:
708		/* Compute new IPv4 length. */
709		tlen = (pa->ip4->ip_hl << 2) + sizeof(*pa->udp) + payload_len;
710		tcp_lro_assign_and_checksum_16(&pa->ip4->ip_len, htons(tlen), &temp[0]);
711
712		/* Subtract delta from current IPv4 checksum. */
713		csum = pa->ip4->ip_sum + 0xffff - temp[0];
714		while (csum > 0xffff)
715			csum = (csum >> 16) + (csum & 0xffff);
716		tcp_lro_assign_and_checksum_16(&pa->ip4->ip_sum, csum, &temp[1]);
717		goto update_udp_header;
718
719	case LRO_TYPE_IPV6_UDP:
720		/* Compute new IPv6 length. */
721		tlen = sizeof(*pa->udp) + payload_len;
722		tcp_lro_assign_and_checksum_16(&pa->ip6->ip6_plen, htons(tlen), &temp[0]);
723		goto update_udp_header;
724
725	default:
726		return (0);
727	}
728
729update_tcp_header:
730	/* Compute current TCP header checksum. */
731	temp[2] = tcp_lro_rx_csum_tcphdr(pa->tcp);
732
733	/* Incorporate the latest ACK into the TCP header. */
734	pa->tcp->th_ack = le->ack_seq;
735	pa->tcp->th_win = le->window;
736
737	/* Incorporate latest timestamp into the TCP header. */
738	if (le->timestamp != 0) {
739		uint32_t *ts_ptr;
740
741		ts_ptr = (uint32_t *)(pa->tcp + 1);
742		ts_ptr[1] = htonl(le->tsval);
743		ts_ptr[2] = le->tsecr;
744	}
745
746	/* Compute new TCP header checksum. */
747	temp[3] = tcp_lro_rx_csum_tcphdr(pa->tcp);
748
749	/* Compute new TCP checksum. */
750	csum = pa->tcp->th_sum + 0xffff - delta_sum +
751	    0xffff - temp[0] + 0xffff - temp[3] + temp[2];
752	while (csum > 0xffff)
753		csum = (csum >> 16) + (csum & 0xffff);
754
755	/* Assign new TCP checksum. */
756	tcp_lro_assign_and_checksum_16(&pa->tcp->th_sum, csum, &temp[4]);
757
758	/* Compute all modififications affecting next checksum. */
759	csum = temp[0] + temp[1] + 0xffff - temp[2] +
760	    temp[3] + temp[4] + delta_sum;
761	while (csum > 0xffff)
762		csum = (csum >> 16) + (csum & 0xffff);
763
764	/* Return delta checksum to next stage, if any. */
765	return (csum);
766
767update_udp_header:
768	tlen = sizeof(*pa->udp) + payload_len;
769	/* Assign new UDP length and compute checksum delta. */
770	tcp_lro_assign_and_checksum_16(&pa->udp->uh_ulen, htons(tlen), &temp[2]);
771
772	/* Check if there is a UDP checksum. */
773	if (__predict_false(pa->udp->uh_sum != 0)) {
774		/* Compute new UDP checksum. */
775		csum = pa->udp->uh_sum + 0xffff - delta_sum +
776		    0xffff - temp[0] + 0xffff - temp[2];
777		while (csum > 0xffff)
778			csum = (csum >> 16) + (csum & 0xffff);
779		/* Assign new UDP checksum. */
780		tcp_lro_assign_and_checksum_16(&pa->udp->uh_sum, csum, &temp[3]);
781	}
782
783	/* Compute all modififications affecting next checksum. */
784	csum = temp[0] + temp[1] + temp[2] + temp[3] + delta_sum;
785	while (csum > 0xffff)
786		csum = (csum >> 16) + (csum & 0xffff);
787
788	/* Return delta checksum to next stage, if any. */
789	return (csum);
790}
791
792static void
793tcp_flush_out_entry(struct lro_ctrl *lc, struct lro_entry *le)
794{
795	/* Check if we need to recompute any checksums. */
796	if (le->needs_merge) {
797		uint16_t csum;
798
799		switch (le->inner.data.lro_type) {
800		case LRO_TYPE_IPV4_TCP:
801			csum = tcp_lro_update_checksum(&le->inner, le,
802			    le->m_head->m_pkthdr.lro_tcp_d_len,
803			    le->m_head->m_pkthdr.lro_tcp_d_csum);
804			csum = tcp_lro_update_checksum(&le->outer, NULL,
805			    le->m_head->m_pkthdr.lro_tcp_d_len +
806			    le->inner.total_hdr_len, csum);
807			le->m_head->m_pkthdr.csum_flags = CSUM_DATA_VALID |
808			    CSUM_PSEUDO_HDR | CSUM_IP_CHECKED | CSUM_IP_VALID;
809			le->m_head->m_pkthdr.csum_data = 0xffff;
810			if (__predict_false(le->outer.data.lro_flags & LRO_FLAG_DECRYPTED))
811				le->m_head->m_pkthdr.csum_flags |= CSUM_TLS_DECRYPTED;
812			break;
813		case LRO_TYPE_IPV6_TCP:
814			csum = tcp_lro_update_checksum(&le->inner, le,
815			    le->m_head->m_pkthdr.lro_tcp_d_len,
816			    le->m_head->m_pkthdr.lro_tcp_d_csum);
817			csum = tcp_lro_update_checksum(&le->outer, NULL,
818			    le->m_head->m_pkthdr.lro_tcp_d_len +
819			    le->inner.total_hdr_len, csum);
820			le->m_head->m_pkthdr.csum_flags = CSUM_DATA_VALID |
821			    CSUM_PSEUDO_HDR;
822			le->m_head->m_pkthdr.csum_data = 0xffff;
823			if (__predict_false(le->outer.data.lro_flags & LRO_FLAG_DECRYPTED))
824				le->m_head->m_pkthdr.csum_flags |= CSUM_TLS_DECRYPTED;
825			break;
826		case LRO_TYPE_NONE:
827			switch (le->outer.data.lro_type) {
828			case LRO_TYPE_IPV4_TCP:
829				csum = tcp_lro_update_checksum(&le->outer, le,
830				    le->m_head->m_pkthdr.lro_tcp_d_len,
831				    le->m_head->m_pkthdr.lro_tcp_d_csum);
832				le->m_head->m_pkthdr.csum_flags = CSUM_DATA_VALID |
833				    CSUM_PSEUDO_HDR | CSUM_IP_CHECKED | CSUM_IP_VALID;
834				le->m_head->m_pkthdr.csum_data = 0xffff;
835				if (__predict_false(le->outer.data.lro_flags & LRO_FLAG_DECRYPTED))
836					le->m_head->m_pkthdr.csum_flags |= CSUM_TLS_DECRYPTED;
837				break;
838			case LRO_TYPE_IPV6_TCP:
839				csum = tcp_lro_update_checksum(&le->outer, le,
840				    le->m_head->m_pkthdr.lro_tcp_d_len,
841				    le->m_head->m_pkthdr.lro_tcp_d_csum);
842				le->m_head->m_pkthdr.csum_flags = CSUM_DATA_VALID |
843				    CSUM_PSEUDO_HDR;
844				le->m_head->m_pkthdr.csum_data = 0xffff;
845				if (__predict_false(le->outer.data.lro_flags & LRO_FLAG_DECRYPTED))
846					le->m_head->m_pkthdr.csum_flags |= CSUM_TLS_DECRYPTED;
847				break;
848			default:
849				break;
850			}
851			break;
852		default:
853			break;
854		}
855	}
856
857	/*
858	 * Break any chain, this is not set to NULL on the singleton
859	 * case m_nextpkt points to m_head. Other case set them
860	 * m_nextpkt to NULL in push_and_replace.
861	 */
862	le->m_head->m_nextpkt = NULL;
863	lc->lro_queued += le->m_head->m_pkthdr.lro_nsegs;
864	(*lc->ifp->if_input)(lc->ifp, le->m_head);
865}
866
867static void
868tcp_set_entry_to_mbuf(struct lro_ctrl *lc, struct lro_entry *le,
869    struct mbuf *m, struct tcphdr *th)
870{
871	uint32_t *ts_ptr;
872	uint16_t tcp_data_len;
873	uint16_t tcp_opt_len;
874
875	ts_ptr = (uint32_t *)(th + 1);
876	tcp_opt_len = (th->th_off << 2);
877	tcp_opt_len -= sizeof(*th);
878
879	/* Check if there is a timestamp option. */
880	if (tcp_opt_len == 0 ||
881	    __predict_false(tcp_opt_len != TCPOLEN_TSTAMP_APPA ||
882	    *ts_ptr != TCP_LRO_TS_OPTION)) {
883		/* We failed to find the timestamp option. */
884		le->timestamp = 0;
885	} else {
886		le->timestamp = 1;
887		le->tsval = ntohl(*(ts_ptr + 1));
888		le->tsecr = *(ts_ptr + 2);
889	}
890
891	tcp_data_len = m->m_pkthdr.lro_tcp_d_len;
892
893	/* Pull out TCP sequence numbers and window size. */
894	le->next_seq = ntohl(th->th_seq) + tcp_data_len;
895	le->ack_seq = th->th_ack;
896	le->window = th->th_win;
897	le->flags = tcp_get_flags(th);
898	le->needs_merge = 0;
899
900	/* Setup new data pointers. */
901	le->m_head = m;
902	le->m_tail = m_last(m);
903}
904
905static void
906tcp_push_and_replace(struct lro_ctrl *lc, struct lro_entry *le, struct mbuf *m)
907{
908	struct lro_parser *pa;
909
910	/*
911	 * Push up the stack of the current entry
912	 * and replace it with "m".
913	 */
914	struct mbuf *msave;
915
916	/* Grab off the next and save it */
917	msave = le->m_head->m_nextpkt;
918	le->m_head->m_nextpkt = NULL;
919
920	/* Now push out the old entry */
921	tcp_flush_out_entry(lc, le);
922
923	/* Re-parse new header, should not fail. */
924	pa = tcp_lro_parser(m, &le->outer, &le->inner, false);
925	KASSERT(pa != NULL,
926	    ("tcp_push_and_replace: LRO parser failed on m=%p\n", m));
927
928	/*
929	 * Now to replace the data properly in the entry
930	 * we have to reset the TCP header and
931	 * other fields.
932	 */
933	tcp_set_entry_to_mbuf(lc, le, m, pa->tcp);
934
935	/* Restore the next list */
936	m->m_nextpkt = msave;
937}
938
939static void
940tcp_lro_mbuf_append_pkthdr(struct lro_entry *le, const struct mbuf *p)
941{
942	struct mbuf *m;
943	uint32_t csum;
944
945	m = le->m_head;
946	if (m->m_pkthdr.lro_nsegs == 1) {
947		/* Compute relative checksum. */
948		csum = p->m_pkthdr.lro_tcp_d_csum;
949	} else {
950		/* Merge TCP data checksums. */
951		csum = (uint32_t)m->m_pkthdr.lro_tcp_d_csum +
952		    (uint32_t)p->m_pkthdr.lro_tcp_d_csum;
953		while (csum > 0xffff)
954			csum = (csum >> 16) + (csum & 0xffff);
955	}
956
957	/* Update various counters. */
958	m->m_pkthdr.len += p->m_pkthdr.lro_tcp_d_len;
959	m->m_pkthdr.lro_tcp_d_csum = csum;
960	m->m_pkthdr.lro_tcp_d_len += p->m_pkthdr.lro_tcp_d_len;
961	m->m_pkthdr.lro_nsegs += p->m_pkthdr.lro_nsegs;
962	le->needs_merge = 1;
963}
964
965static void
966tcp_lro_condense(struct lro_ctrl *lc, struct lro_entry *le)
967{
968	/*
969	 * Walk through the mbuf chain we
970	 * have on tap and compress/condense
971	 * as required.
972	 */
973	uint32_t *ts_ptr;
974	struct mbuf *m;
975	struct tcphdr *th;
976	uint32_t tcp_data_len_total;
977	uint32_t tcp_data_seg_total;
978	uint16_t tcp_data_len;
979	uint16_t tcp_opt_len;
980
981	/*
982	 * First we must check the lead (m_head)
983	 * we must make sure that it is *not*
984	 * something that should be sent up
985	 * right away (sack etc).
986	 */
987again:
988	m = le->m_head->m_nextpkt;
989	if (m == NULL) {
990		/* Just one left. */
991		return;
992	}
993
994	th = tcp_lro_get_th(m);
995	tcp_opt_len = (th->th_off << 2);
996	tcp_opt_len -= sizeof(*th);
997	ts_ptr = (uint32_t *)(th + 1);
998
999	if (tcp_opt_len != 0 && __predict_false(tcp_opt_len != TCPOLEN_TSTAMP_APPA ||
1000	    *ts_ptr != TCP_LRO_TS_OPTION)) {
1001		/*
1002		 * Its not the timestamp. We can't
1003		 * use this guy as the head.
1004		 */
1005		le->m_head->m_nextpkt = m->m_nextpkt;
1006		tcp_push_and_replace(lc, le, m);
1007		goto again;
1008	}
1009	if ((tcp_get_flags(th) & ~(TH_ACK | TH_PUSH)) != 0) {
1010		/*
1011		 * Make sure that previously seen segments/ACKs are delivered
1012		 * before this segment, e.g. FIN.
1013		 */
1014		le->m_head->m_nextpkt = m->m_nextpkt;
1015		tcp_push_and_replace(lc, le, m);
1016		goto again;
1017	}
1018	while((m = le->m_head->m_nextpkt) != NULL) {
1019		/*
1020		 * condense m into le, first
1021		 * pull m out of the list.
1022		 */
1023		le->m_head->m_nextpkt = m->m_nextpkt;
1024		m->m_nextpkt = NULL;
1025		/* Setup my data */
1026		tcp_data_len = m->m_pkthdr.lro_tcp_d_len;
1027		th = tcp_lro_get_th(m);
1028		ts_ptr = (uint32_t *)(th + 1);
1029		tcp_opt_len = (th->th_off << 2);
1030		tcp_opt_len -= sizeof(*th);
1031		tcp_data_len_total = le->m_head->m_pkthdr.lro_tcp_d_len + tcp_data_len;
1032		tcp_data_seg_total = le->m_head->m_pkthdr.lro_nsegs + m->m_pkthdr.lro_nsegs;
1033
1034		if (tcp_data_seg_total >= lc->lro_ackcnt_lim ||
1035		    tcp_data_len_total >= lc->lro_length_lim) {
1036			/* Flush now if appending will result in overflow. */
1037			tcp_push_and_replace(lc, le, m);
1038			goto again;
1039		}
1040		if (tcp_opt_len != 0 &&
1041		    __predict_false(tcp_opt_len != TCPOLEN_TSTAMP_APPA ||
1042		    *ts_ptr != TCP_LRO_TS_OPTION)) {
1043			/*
1044			 * Maybe a sack in the new one? We need to
1045			 * start all over after flushing the
1046			 * current le. We will go up to the beginning
1047			 * and flush it (calling the replace again possibly
1048			 * or just returning).
1049			 */
1050			tcp_push_and_replace(lc, le, m);
1051			goto again;
1052		}
1053		if ((tcp_get_flags(th) & ~(TH_ACK | TH_PUSH)) != 0) {
1054			tcp_push_and_replace(lc, le, m);
1055			goto again;
1056		}
1057		if (tcp_opt_len != 0) {
1058			uint32_t tsval = ntohl(*(ts_ptr + 1));
1059			/* Make sure timestamp values are increasing. */
1060			if (TSTMP_GT(le->tsval, tsval))  {
1061				tcp_push_and_replace(lc, le, m);
1062				goto again;
1063			}
1064			le->tsval = tsval;
1065			le->tsecr = *(ts_ptr + 2);
1066		}
1067		/* Try to append the new segment. */
1068		if (__predict_false(ntohl(th->th_seq) != le->next_seq ||
1069				    ((tcp_get_flags(th) & TH_ACK) !=
1070				      (le->flags & TH_ACK)) ||
1071				    (tcp_data_len == 0 &&
1072				     le->ack_seq == th->th_ack &&
1073				     le->window == th->th_win))) {
1074			/* Out of order packet, non-ACK + ACK or dup ACK. */
1075			tcp_push_and_replace(lc, le, m);
1076			goto again;
1077		}
1078		if (tcp_data_len != 0 ||
1079		    SEQ_GT(ntohl(th->th_ack), ntohl(le->ack_seq))) {
1080			le->next_seq += tcp_data_len;
1081			le->ack_seq = th->th_ack;
1082			le->window = th->th_win;
1083			le->needs_merge = 1;
1084		} else if (th->th_ack == le->ack_seq) {
1085			if (WIN_GT(th->th_win, le->window)) {
1086				le->window = th->th_win;
1087				le->needs_merge = 1;
1088			}
1089		}
1090
1091		if (tcp_data_len == 0) {
1092			m_freem(m);
1093			continue;
1094		}
1095
1096		/* Merge TCP data checksum and length to head mbuf. */
1097		tcp_lro_mbuf_append_pkthdr(le, m);
1098
1099		/*
1100		 * Adjust the mbuf so that m_data points to the first byte of
1101		 * the ULP payload.  Adjust the mbuf to avoid complications and
1102		 * append new segment to existing mbuf chain.
1103		 */
1104		m_adj(m, m->m_pkthdr.len - tcp_data_len);
1105		m_demote_pkthdr(m);
1106		le->m_tail->m_next = m;
1107		le->m_tail = m_last(m);
1108	}
1109}
1110
1111void
1112tcp_lro_flush(struct lro_ctrl *lc, struct lro_entry *le)
1113{
1114
1115	/* Only optimise if there are multiple packets waiting. */
1116	NET_EPOCH_ASSERT();
1117	if (tcp_lro_flush_tcphpts == NULL ||
1118	    tcp_lro_flush_tcphpts(lc, le) != 0) {
1119		tcp_lro_condense(lc, le);
1120		tcp_flush_out_entry(lc, le);
1121	}
1122	lc->lro_flushed++;
1123	bzero(le, sizeof(*le));
1124	LIST_INSERT_HEAD(&lc->lro_free, le, next);
1125}
1126
1127#define	tcp_lro_msb_64(x) (1ULL << (flsll(x) - 1))
1128
1129/*
1130 * The tcp_lro_sort() routine is comparable to qsort(), except it has
1131 * a worst case complexity limit of O(MIN(N,64)*N), where N is the
1132 * number of elements to sort and 64 is the number of sequence bits
1133 * available. The algorithm is bit-slicing the 64-bit sequence number,
1134 * sorting one bit at a time from the most significant bit until the
1135 * least significant one, skipping the constant bits. This is
1136 * typically called a radix sort.
1137 */
1138static void
1139tcp_lro_sort(struct lro_mbuf_sort *parray, uint32_t size)
1140{
1141	struct lro_mbuf_sort temp;
1142	uint64_t ones;
1143	uint64_t zeros;
1144	uint32_t x;
1145	uint32_t y;
1146
1147repeat:
1148	/* for small arrays insertion sort is faster */
1149	if (size <= 12) {
1150		for (x = 1; x < size; x++) {
1151			temp = parray[x];
1152			for (y = x; y > 0 && temp.seq < parray[y - 1].seq; y--)
1153				parray[y] = parray[y - 1];
1154			parray[y] = temp;
1155		}
1156		return;
1157	}
1158
1159	/* compute sequence bits which are constant */
1160	ones = 0;
1161	zeros = 0;
1162	for (x = 0; x != size; x++) {
1163		ones |= parray[x].seq;
1164		zeros |= ~parray[x].seq;
1165	}
1166
1167	/* compute bits which are not constant into "ones" */
1168	ones &= zeros;
1169	if (ones == 0)
1170		return;
1171
1172	/* pick the most significant bit which is not constant */
1173	ones = tcp_lro_msb_64(ones);
1174
1175	/*
1176	 * Move entries having cleared sequence bits to the beginning
1177	 * of the array:
1178	 */
1179	for (x = y = 0; y != size; y++) {
1180		/* skip set bits */
1181		if (parray[y].seq & ones)
1182			continue;
1183		/* swap entries */
1184		temp = parray[x];
1185		parray[x] = parray[y];
1186		parray[y] = temp;
1187		x++;
1188	}
1189
1190	KASSERT(x != 0 && x != size, ("Memory is corrupted\n"));
1191
1192	/* sort zeros */
1193	tcp_lro_sort(parray, x);
1194
1195	/* sort ones */
1196	parray += x;
1197	size -= x;
1198	goto repeat;
1199}
1200
1201void
1202tcp_lro_flush_all(struct lro_ctrl *lc)
1203{
1204	uint64_t seq;
1205	uint64_t nseq;
1206	unsigned x;
1207
1208	NET_EPOCH_ASSERT();
1209	/* check if no mbufs to flush */
1210	if (lc->lro_mbuf_count == 0)
1211		goto done;
1212	if (lc->lro_cpu_is_set == 0) {
1213		if (lc->lro_last_cpu == curcpu) {
1214			lc->lro_cnt_of_same_cpu++;
1215			/* Have we reached the threshold to declare a cpu? */
1216			if (lc->lro_cnt_of_same_cpu > tcp_lro_cpu_set_thresh)
1217				lc->lro_cpu_is_set = 1;
1218		} else {
1219			lc->lro_last_cpu = curcpu;
1220			lc->lro_cnt_of_same_cpu = 0;
1221		}
1222	}
1223	CURVNET_SET(lc->ifp->if_vnet);
1224
1225	/* get current time */
1226	binuptime(&lc->lro_last_queue_time);
1227
1228	/* sort all mbufs according to stream */
1229	tcp_lro_sort(lc->lro_mbuf_data, lc->lro_mbuf_count);
1230
1231	/* input data into LRO engine, stream by stream */
1232	seq = 0;
1233	for (x = 0; x != lc->lro_mbuf_count; x++) {
1234		struct mbuf *mb;
1235
1236		/* get mbuf */
1237		mb = lc->lro_mbuf_data[x].mb;
1238
1239		/* get sequence number, masking away the packet index */
1240		nseq = lc->lro_mbuf_data[x].seq & (-1ULL << 24);
1241
1242		/* check for new stream */
1243		if (seq != nseq) {
1244			seq = nseq;
1245
1246			/* flush active streams */
1247			tcp_lro_rx_done(lc);
1248		}
1249
1250		/* add packet to LRO engine */
1251		if (tcp_lro_rx_common(lc, mb, 0, false) != 0) {
1252 			/* Flush anything we have acummulated */
1253 			tcp_lro_flush_active(lc);
1254			/* input packet to network layer */
1255			(*lc->ifp->if_input)(lc->ifp, mb);
1256			lc->lro_queued++;
1257			lc->lro_flushed++;
1258		}
1259	}
1260	CURVNET_RESTORE();
1261done:
1262	/* flush active streams */
1263	tcp_lro_rx_done(lc);
1264	tcp_hpts_softclock();
1265	lc->lro_mbuf_count = 0;
1266}
1267
1268static struct lro_head *
1269tcp_lro_rx_get_bucket(struct lro_ctrl *lc, struct mbuf *m, struct lro_parser *parser)
1270{
1271	u_long hash;
1272
1273	if (M_HASHTYPE_ISHASH(m)) {
1274		hash = m->m_pkthdr.flowid;
1275	} else {
1276		for (unsigned i = hash = 0; i != LRO_RAW_ADDRESS_MAX; i++)
1277			hash += parser->data.raw[i];
1278	}
1279	return (&lc->lro_hash[hash % lc->lro_hashsz]);
1280}
1281
1282static int
1283tcp_lro_rx_common(struct lro_ctrl *lc, struct mbuf *m, uint32_t csum, bool use_hash)
1284{
1285	struct lro_parser pi;	/* inner address data */
1286	struct lro_parser po;	/* outer address data */
1287	struct lro_parser *pa;	/* current parser for TCP stream */
1288	struct lro_entry *le;
1289	struct lro_head *bucket;
1290	struct tcphdr *th;
1291	int tcp_data_len;
1292	int tcp_opt_len;
1293	int error;
1294	uint16_t tcp_data_sum;
1295
1296#ifdef INET
1297	/* Quickly decide if packet cannot be LRO'ed */
1298	if (__predict_false(V_ipforwarding != 0))
1299		return (TCP_LRO_CANNOT);
1300#endif
1301#ifdef INET6
1302	/* Quickly decide if packet cannot be LRO'ed */
1303	if (__predict_false(V_ip6_forwarding != 0))
1304		return (TCP_LRO_CANNOT);
1305#endif
1306	if (((m->m_pkthdr.csum_flags & (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) !=
1307	     ((CSUM_DATA_VALID | CSUM_PSEUDO_HDR))) ||
1308	    (m->m_pkthdr.csum_data != 0xffff)) {
1309		/*
1310		 * The checksum either did not have hardware offload
1311		 * or it was a bad checksum. We can't LRO such
1312		 * a packet.
1313		 */
1314		counter_u64_add(tcp_bad_csums, 1);
1315		return (TCP_LRO_CANNOT);
1316	}
1317	/* We expect a contiguous header [eh, ip, tcp]. */
1318	pa = tcp_lro_parser(m, &po, &pi, true);
1319	if (__predict_false(pa == NULL))
1320		return (TCP_LRO_NOT_SUPPORTED);
1321
1322	/* We don't expect any padding. */
1323	error = tcp_lro_trim_mbuf_chain(m, pa);
1324	if (__predict_false(error != 0))
1325		return (error);
1326
1327#ifdef INET
1328	switch (pa->data.lro_type) {
1329	case LRO_TYPE_IPV4_TCP:
1330		error = tcp_lro_rx_ipv4(lc, m, pa->ip4);
1331		if (__predict_false(error != 0))
1332			return (error);
1333		break;
1334	default:
1335		break;
1336	}
1337#endif
1338	/* If no hardware or arrival stamp on the packet add timestamp */
1339	if ((m->m_flags & (M_TSTMP_LRO | M_TSTMP)) == 0) {
1340		m->m_pkthdr.rcv_tstmp = bintime2ns(&lc->lro_last_queue_time);
1341		m->m_flags |= M_TSTMP_LRO;
1342	}
1343
1344	/* Get pointer to TCP header. */
1345	th = pa->tcp;
1346
1347	/* Don't process SYN packets. */
1348	if (__predict_false(tcp_get_flags(th) & TH_SYN))
1349		return (TCP_LRO_CANNOT);
1350
1351	/* Get total TCP header length and compute payload length. */
1352	tcp_opt_len = (th->th_off << 2);
1353	tcp_data_len = m->m_pkthdr.len - ((uint8_t *)th -
1354	    (uint8_t *)m->m_data) - tcp_opt_len;
1355	tcp_opt_len -= sizeof(*th);
1356
1357	/* Don't process invalid TCP headers. */
1358	if (__predict_false(tcp_opt_len < 0 || tcp_data_len < 0))
1359		return (TCP_LRO_CANNOT);
1360
1361	/* Compute TCP data only checksum. */
1362	if (tcp_data_len == 0)
1363		tcp_data_sum = 0;	/* no data, no checksum */
1364	else if (__predict_false(csum != 0))
1365		tcp_data_sum = tcp_lro_rx_csum_data(pa, ~csum);
1366	else
1367		tcp_data_sum = tcp_lro_rx_csum_data(pa, ~th->th_sum);
1368
1369	/* Save TCP info in mbuf. */
1370	m->m_nextpkt = NULL;
1371	m->m_pkthdr.rcvif = lc->ifp;
1372	m->m_pkthdr.lro_tcp_d_csum = tcp_data_sum;
1373	m->m_pkthdr.lro_tcp_d_len = tcp_data_len;
1374	m->m_pkthdr.lro_tcp_h_off = ((uint8_t *)th - (uint8_t *)m->m_data);
1375	m->m_pkthdr.lro_nsegs = 1;
1376
1377	/* Get hash bucket. */
1378	if (!use_hash) {
1379		bucket = &lc->lro_hash[0];
1380	} else {
1381		bucket = tcp_lro_rx_get_bucket(lc, m, pa);
1382	}
1383
1384	/* Try to find a matching previous segment. */
1385	LIST_FOREACH(le, bucket, hash_next) {
1386		/* Compare addresses and ports. */
1387		if (lro_address_compare(&po.data, &le->outer.data) == false ||
1388		    lro_address_compare(&pi.data, &le->inner.data) == false)
1389			continue;
1390
1391		/* Check if no data and old ACK. */
1392		if (tcp_data_len == 0 &&
1393		    SEQ_LT(ntohl(th->th_ack), ntohl(le->ack_seq))) {
1394			m_freem(m);
1395			return (0);
1396		}
1397
1398		/* Mark "m" in the last spot. */
1399		le->m_last_mbuf->m_nextpkt = m;
1400		/* Now set the tail to "m". */
1401		le->m_last_mbuf = m;
1402		return (0);
1403	}
1404
1405	/* Try to find an empty slot. */
1406	if (LIST_EMPTY(&lc->lro_free))
1407		return (TCP_LRO_NO_ENTRIES);
1408
1409	/* Start a new segment chain. */
1410	le = LIST_FIRST(&lc->lro_free);
1411	LIST_REMOVE(le, next);
1412	tcp_lro_active_insert(lc, bucket, le);
1413
1414	/* Make sure the headers are set. */
1415	le->inner = pi;
1416	le->outer = po;
1417
1418	/* Store time this entry was allocated. */
1419	le->alloc_time = lc->lro_last_queue_time;
1420
1421	tcp_set_entry_to_mbuf(lc, le, m, th);
1422
1423	/* Now set the tail to "m". */
1424	le->m_last_mbuf = m;
1425
1426	return (0);
1427}
1428
1429int
1430tcp_lro_rx(struct lro_ctrl *lc, struct mbuf *m, uint32_t csum)
1431{
1432	int error;
1433
1434	if (((m->m_pkthdr.csum_flags & (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) !=
1435	     ((CSUM_DATA_VALID | CSUM_PSEUDO_HDR))) ||
1436	    (m->m_pkthdr.csum_data != 0xffff)) {
1437		/*
1438		 * The checksum either did not have hardware offload
1439		 * or it was a bad checksum. We can't LRO such
1440		 * a packet.
1441		 */
1442		counter_u64_add(tcp_bad_csums, 1);
1443		return (TCP_LRO_CANNOT);
1444	}
1445	/* get current time */
1446	binuptime(&lc->lro_last_queue_time);
1447	CURVNET_SET(lc->ifp->if_vnet);
1448	error = tcp_lro_rx_common(lc, m, csum, true);
1449	if (__predict_false(error != 0)) {
1450		/*
1451		 * Flush anything we have acummulated
1452		 * ahead of this packet that can't
1453		 * be LRO'd. This preserves order.
1454		 */
1455		tcp_lro_flush_active(lc);
1456	}
1457	CURVNET_RESTORE();
1458
1459	return (error);
1460}
1461
1462void
1463tcp_lro_queue_mbuf(struct lro_ctrl *lc, struct mbuf *mb)
1464{
1465	NET_EPOCH_ASSERT();
1466	/* sanity checks */
1467	if (__predict_false(lc->ifp == NULL || lc->lro_mbuf_data == NULL ||
1468	    lc->lro_mbuf_max == 0)) {
1469		/* packet drop */
1470		m_freem(mb);
1471		return;
1472	}
1473
1474	/* check if packet is not LRO capable */
1475	if (__predict_false((lc->ifp->if_capenable & IFCAP_LRO) == 0)) {
1476		/* input packet to network layer */
1477		(*lc->ifp->if_input) (lc->ifp, mb);
1478		return;
1479	}
1480
1481 	/* If no hardware or arrival stamp on the packet add timestamp */
1482 	if ((tcplro_stacks_wanting_mbufq > 0) &&
1483 	    (tcp_less_accurate_lro_ts == 0) &&
1484 	    ((mb->m_flags & M_TSTMP) == 0)) {
1485 		/* Add in an LRO time since no hardware */
1486 		binuptime(&lc->lro_last_queue_time);
1487 		mb->m_pkthdr.rcv_tstmp = bintime2ns(&lc->lro_last_queue_time);
1488 		mb->m_flags |= M_TSTMP_LRO;
1489 	}
1490
1491	/* create sequence number */
1492	lc->lro_mbuf_data[lc->lro_mbuf_count].seq =
1493	    (((uint64_t)M_HASHTYPE_GET(mb)) << 56) |
1494	    (((uint64_t)mb->m_pkthdr.flowid) << 24) |
1495	    ((uint64_t)lc->lro_mbuf_count);
1496
1497	/* enter mbuf */
1498	lc->lro_mbuf_data[lc->lro_mbuf_count].mb = mb;
1499
1500	/* flush if array is full */
1501	if (__predict_false(++lc->lro_mbuf_count == lc->lro_mbuf_max))
1502		tcp_lro_flush_all(lc);
1503}
1504
1505/* end */
1506