1/*-
2 * SPDX-License-Identifier: BSD-3-Clause
3 *
4 * Copyright (c) 2002 Andre Oppermann, Internet Business Solutions AG
5 * Copyright (c) 2021 Gleb Smirnoff <glebius@FreeBSD.org>
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 *    notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in the
15 *    documentation and/or other materials provided with the distribution.
16 * 3. The name of the author may not be used to endorse or promote
17 *    products derived from this software without specific prior written
18 *    permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
31 */
32
33/*
34 * The tcp_hostcache moves the tcp-specific cached metrics from the routing
35 * table to a dedicated structure indexed by the remote IP address.  It keeps
36 * information on the measured TCP parameters of past TCP sessions to allow
37 * better initial start values to be used with later connections to/from the
38 * same source.  Depending on the network parameters (delay, max MTU,
39 * congestion window) between local and remote sites, this can lead to
40 * significant speed-ups for new TCP connections after the first one.
41 *
42 * Due to the tcp_hostcache, all TCP-specific metrics information in the
43 * routing table have been removed.  The inpcb no longer keeps a pointer to
44 * the routing entry, and protocol-initiated route cloning has been removed
45 * as well.  With these changes, the routing table has gone back to being
46 * more lightwight and only carries information related to packet forwarding.
47 *
48 * tcp_hostcache is designed for multiple concurrent access in SMP
49 * environments and high contention.  It is a straight hash.  Each bucket row
50 * is protected by its own lock for modification.  Readers are protected by
51 * SMR.  This puts certain restrictions on writers, e.g. a writer shall only
52 * insert a fully populated entry into a row.  Writer can't reuse least used
53 * entry if a hash is full.  Value updates for an entry shall be atomic.
54 *
55 * TCP stack(s) communication with tcp_hostcache() is done via KBI functions
56 * tcp_hc_*() and the hc_metrics_lite structure.
57 *
58 * Since tcp_hostcache is only caching information, there are no fatal
59 * consequences if we either can't allocate a new entry or have to drop
60 * an existing entry, or return somewhat stale information.
61 */
62
63/*
64 * Many thanks to jlemon for basic structure of tcp_syncache which is being
65 * followed here.
66 */
67
68#include <sys/cdefs.h>
69#include "opt_inet6.h"
70
71#include <sys/param.h>
72#include <sys/systm.h>
73#include <sys/hash.h>
74#include <sys/jail.h>
75#include <sys/kernel.h>
76#include <sys/lock.h>
77#include <sys/mutex.h>
78#include <sys/malloc.h>
79#include <sys/proc.h>
80#include <sys/sbuf.h>
81#include <sys/smr.h>
82#include <sys/socket.h>
83#include <sys/sysctl.h>
84
85#include <net/vnet.h>
86
87#include <netinet/in.h>
88#include <netinet/in_pcb.h>
89#include <netinet/tcp.h>
90#include <netinet/tcp_var.h>
91
92#include <vm/uma.h>
93
94struct hc_head {
95	CK_SLIST_HEAD(hc_qhead, hc_metrics) hch_bucket;
96	u_int		hch_length;
97	struct mtx	hch_mtx;
98};
99
100struct hc_metrics {
101	/* housekeeping */
102	CK_SLIST_ENTRY(hc_metrics) rmx_q;
103	struct		in_addr ip4;	/* IP address */
104	struct		in6_addr ip6;	/* IP6 address */
105	uint32_t	ip6_zoneid;	/* IPv6 scope zone id */
106	/* endpoint specific values for tcp */
107	uint32_t	rmx_mtu;	/* MTU for this path */
108	uint32_t	rmx_ssthresh;	/* outbound gateway buffer limit */
109	uint32_t	rmx_rtt;	/* estimated round trip time */
110	uint32_t	rmx_rttvar;	/* estimated rtt variance */
111	uint32_t	rmx_cwnd;	/* congestion window */
112	uint32_t	rmx_sendpipe;	/* outbound delay-bandwidth product */
113	uint32_t	rmx_recvpipe;	/* inbound delay-bandwidth product */
114	/* TCP hostcache internal data */
115	int		rmx_expire;	/* lifetime for object */
116#ifdef	TCP_HC_COUNTERS
117	u_long		rmx_hits;	/* number of hits */
118	u_long		rmx_updates;	/* number of updates */
119#endif
120};
121
122struct tcp_hostcache {
123	struct hc_head	*hashbase;
124	uma_zone_t	zone;
125	smr_t		smr;
126	u_int		hashsize;
127	u_int		hashmask;
128	u_int		hashsalt;
129	u_int		bucket_limit;
130	u_int		cache_count;
131	u_int		cache_limit;
132	int		expire;
133	int		prune;
134	int		purgeall;
135};
136
137/* Arbitrary values */
138#define TCP_HOSTCACHE_HASHSIZE		512
139#define TCP_HOSTCACHE_BUCKETLIMIT	30
140#define TCP_HOSTCACHE_EXPIRE		60*60	/* one hour */
141#define TCP_HOSTCACHE_PRUNE		5*60	/* every 5 minutes */
142
143VNET_DEFINE_STATIC(struct tcp_hostcache, tcp_hostcache);
144#define	V_tcp_hostcache		VNET(tcp_hostcache)
145
146VNET_DEFINE_STATIC(struct callout, tcp_hc_callout);
147#define	V_tcp_hc_callout	VNET(tcp_hc_callout)
148
149static struct hc_metrics *tcp_hc_lookup(struct in_conninfo *);
150static int sysctl_tcp_hc_list(SYSCTL_HANDLER_ARGS);
151static int sysctl_tcp_hc_histo(SYSCTL_HANDLER_ARGS);
152static int sysctl_tcp_hc_purgenow(SYSCTL_HANDLER_ARGS);
153static void tcp_hc_purge_internal(int);
154static void tcp_hc_purge(void *);
155
156static SYSCTL_NODE(_net_inet_tcp, OID_AUTO, hostcache,
157    CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
158    "TCP Host cache");
159
160VNET_DEFINE(int, tcp_use_hostcache) = 1;
161#define V_tcp_use_hostcache  VNET(tcp_use_hostcache)
162SYSCTL_INT(_net_inet_tcp_hostcache, OID_AUTO, enable, CTLFLAG_VNET | CTLFLAG_RW,
163    &VNET_NAME(tcp_use_hostcache), 0,
164    "Enable the TCP hostcache");
165
166SYSCTL_UINT(_net_inet_tcp_hostcache, OID_AUTO, cachelimit, CTLFLAG_VNET | CTLFLAG_RDTUN,
167    &VNET_NAME(tcp_hostcache.cache_limit), 0,
168    "Overall entry limit for hostcache");
169
170SYSCTL_UINT(_net_inet_tcp_hostcache, OID_AUTO, hashsize, CTLFLAG_VNET | CTLFLAG_RDTUN,
171    &VNET_NAME(tcp_hostcache.hashsize), 0,
172    "Size of TCP hostcache hashtable");
173
174SYSCTL_UINT(_net_inet_tcp_hostcache, OID_AUTO, bucketlimit,
175    CTLFLAG_VNET | CTLFLAG_RDTUN, &VNET_NAME(tcp_hostcache.bucket_limit), 0,
176    "Per-bucket hash limit for hostcache");
177
178SYSCTL_UINT(_net_inet_tcp_hostcache, OID_AUTO, count, CTLFLAG_VNET | CTLFLAG_RD,
179    &VNET_NAME(tcp_hostcache.cache_count), 0,
180    "Current number of entries in hostcache");
181
182SYSCTL_INT(_net_inet_tcp_hostcache, OID_AUTO, expire, CTLFLAG_VNET | CTLFLAG_RW,
183    &VNET_NAME(tcp_hostcache.expire), 0,
184    "Expire time of TCP hostcache entries");
185
186SYSCTL_INT(_net_inet_tcp_hostcache, OID_AUTO, prune, CTLFLAG_VNET | CTLFLAG_RW,
187    &VNET_NAME(tcp_hostcache.prune), 0,
188    "Time between purge runs");
189
190SYSCTL_INT(_net_inet_tcp_hostcache, OID_AUTO, purge, CTLFLAG_VNET | CTLFLAG_RW,
191    &VNET_NAME(tcp_hostcache.purgeall), 0,
192    "Expire all entries on next purge run");
193
194SYSCTL_PROC(_net_inet_tcp_hostcache, OID_AUTO, list,
195    CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_SKIP | CTLFLAG_MPSAFE,
196    0, 0, sysctl_tcp_hc_list, "A",
197    "List of all hostcache entries");
198
199SYSCTL_PROC(_net_inet_tcp_hostcache, OID_AUTO, histo,
200    CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_SKIP | CTLFLAG_MPSAFE,
201    0, 0, sysctl_tcp_hc_histo, "A",
202    "Print a histogram of hostcache hashbucket utilization");
203
204SYSCTL_PROC(_net_inet_tcp_hostcache, OID_AUTO, purgenow,
205    CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE,
206    NULL, 0, sysctl_tcp_hc_purgenow, "I",
207    "Immediately purge all entries");
208
209static MALLOC_DEFINE(M_HOSTCACHE, "hostcache", "TCP hostcache");
210
211/* Use jenkins_hash32(), as in other parts of the tcp stack */
212#define	HOSTCACHE_HASH(inc)						\
213	((inc)->inc_flags & INC_ISIPV6) ?				\
214		(jenkins_hash32((inc)->inc6_faddr.s6_addr32, 4,		\
215		V_tcp_hostcache.hashsalt) & V_tcp_hostcache.hashmask)	\
216	:								\
217		(jenkins_hash32(&(inc)->inc_faddr.s_addr, 1,		\
218		V_tcp_hostcache.hashsalt) & V_tcp_hostcache.hashmask)
219
220#define THC_LOCK(h)		mtx_lock(&(h)->hch_mtx)
221#define THC_UNLOCK(h)		mtx_unlock(&(h)->hch_mtx)
222
223void
224tcp_hc_init(void)
225{
226	u_int cache_limit;
227	int i;
228
229	/*
230	 * Initialize hostcache structures.
231	 */
232	atomic_store_int(&V_tcp_hostcache.cache_count, 0);
233	V_tcp_hostcache.hashsize = TCP_HOSTCACHE_HASHSIZE;
234	V_tcp_hostcache.bucket_limit = TCP_HOSTCACHE_BUCKETLIMIT;
235	V_tcp_hostcache.expire = TCP_HOSTCACHE_EXPIRE;
236	V_tcp_hostcache.prune = TCP_HOSTCACHE_PRUNE;
237	V_tcp_hostcache.hashsalt = arc4random();
238
239	TUNABLE_INT_FETCH("net.inet.tcp.hostcache.hashsize",
240	    &V_tcp_hostcache.hashsize);
241	if (!powerof2(V_tcp_hostcache.hashsize)) {
242		printf("WARNING: hostcache hash size is not a power of 2.\n");
243		V_tcp_hostcache.hashsize = TCP_HOSTCACHE_HASHSIZE; /* default */
244	}
245	V_tcp_hostcache.hashmask = V_tcp_hostcache.hashsize - 1;
246
247	TUNABLE_INT_FETCH("net.inet.tcp.hostcache.bucketlimit",
248	    &V_tcp_hostcache.bucket_limit);
249
250	cache_limit = V_tcp_hostcache.hashsize * V_tcp_hostcache.bucket_limit;
251	V_tcp_hostcache.cache_limit = cache_limit;
252	TUNABLE_INT_FETCH("net.inet.tcp.hostcache.cachelimit",
253	    &V_tcp_hostcache.cache_limit);
254	if (V_tcp_hostcache.cache_limit > cache_limit)
255		V_tcp_hostcache.cache_limit = cache_limit;
256
257	/*
258	 * Allocate the hash table.
259	 */
260	V_tcp_hostcache.hashbase = (struct hc_head *)
261	    malloc(V_tcp_hostcache.hashsize * sizeof(struct hc_head),
262		   M_HOSTCACHE, M_WAITOK | M_ZERO);
263
264	/*
265	 * Initialize the hash buckets.
266	 */
267	for (i = 0; i < V_tcp_hostcache.hashsize; i++) {
268		CK_SLIST_INIT(&V_tcp_hostcache.hashbase[i].hch_bucket);
269		V_tcp_hostcache.hashbase[i].hch_length = 0;
270		mtx_init(&V_tcp_hostcache.hashbase[i].hch_mtx, "tcp_hc_entry",
271			  NULL, MTX_DEF);
272	}
273
274	/*
275	 * Allocate the hostcache entries.
276	 */
277	V_tcp_hostcache.zone =
278	    uma_zcreate("hostcache", sizeof(struct hc_metrics),
279	    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_SMR);
280	uma_zone_set_max(V_tcp_hostcache.zone, V_tcp_hostcache.cache_limit);
281	V_tcp_hostcache.smr = uma_zone_get_smr(V_tcp_hostcache.zone);
282
283	/*
284	 * Set up periodic cache cleanup.
285	 */
286	callout_init(&V_tcp_hc_callout, 1);
287	callout_reset(&V_tcp_hc_callout, V_tcp_hostcache.prune * hz,
288	    tcp_hc_purge, curvnet);
289}
290
291#ifdef VIMAGE
292void
293tcp_hc_destroy(void)
294{
295	int i;
296
297	callout_drain(&V_tcp_hc_callout);
298
299	/* Purge all hc entries. */
300	tcp_hc_purge_internal(1);
301
302	/* Free the uma zone and the allocated hash table. */
303	uma_zdestroy(V_tcp_hostcache.zone);
304
305	for (i = 0; i < V_tcp_hostcache.hashsize; i++)
306		mtx_destroy(&V_tcp_hostcache.hashbase[i].hch_mtx);
307	free(V_tcp_hostcache.hashbase, M_HOSTCACHE);
308}
309#endif
310
311/*
312 * Internal function: compare cache entry to a connection.
313 */
314static bool
315tcp_hc_cmp(struct hc_metrics *hc_entry, struct in_conninfo *inc)
316{
317
318	if (inc->inc_flags & INC_ISIPV6) {
319		/* XXX: check ip6_zoneid */
320		if (memcmp(&inc->inc6_faddr, &hc_entry->ip6,
321		    sizeof(inc->inc6_faddr)) == 0)
322			return (true);
323	} else {
324		if (memcmp(&inc->inc_faddr, &hc_entry->ip4,
325		    sizeof(inc->inc_faddr)) == 0)
326			return (true);
327	}
328
329	return (false);
330}
331
332/*
333 * Internal function: look up an entry in the hostcache for read.
334 * On success returns in SMR section.
335 */
336static struct hc_metrics *
337tcp_hc_lookup(struct in_conninfo *inc)
338{
339	struct hc_head *hc_head;
340	struct hc_metrics *hc_entry;
341
342	KASSERT(inc != NULL, ("%s: NULL in_conninfo", __func__));
343
344	hc_head = &V_tcp_hostcache.hashbase[HOSTCACHE_HASH(inc)];
345
346	/*
347	 * Iterate through entries in bucket row looking for a match.
348	 */
349	smr_enter(V_tcp_hostcache.smr);
350	CK_SLIST_FOREACH(hc_entry, &hc_head->hch_bucket, rmx_q)
351		if (tcp_hc_cmp(hc_entry, inc))
352			break;
353
354	if (hc_entry != NULL) {
355		if (atomic_load_int(&hc_entry->rmx_expire) !=
356		    V_tcp_hostcache.expire)
357			atomic_store_int(&hc_entry->rmx_expire,
358			    V_tcp_hostcache.expire);
359#ifdef	TCP_HC_COUNTERS
360		hc_entry->rmx_hits++;
361#endif
362	} else
363		smr_exit(V_tcp_hostcache.smr);
364
365	return (hc_entry);
366}
367
368/*
369 * External function: look up an entry in the hostcache and fill out the
370 * supplied TCP metrics structure.  Fills in NULL when no entry was found or
371 * a value is not set.
372 */
373void
374tcp_hc_get(struct in_conninfo *inc, struct hc_metrics_lite *hc_metrics_lite)
375{
376	struct hc_metrics *hc_entry;
377
378	if (!V_tcp_use_hostcache) {
379		bzero(hc_metrics_lite, sizeof(*hc_metrics_lite));
380		return;
381	}
382
383	/*
384	 * Find the right bucket.
385	 */
386	hc_entry = tcp_hc_lookup(inc);
387
388	/*
389	 * If we don't have an existing object.
390	 */
391	if (hc_entry == NULL) {
392		bzero(hc_metrics_lite, sizeof(*hc_metrics_lite));
393		return;
394	}
395
396	hc_metrics_lite->rmx_mtu = atomic_load_32(&hc_entry->rmx_mtu);
397	hc_metrics_lite->rmx_ssthresh = atomic_load_32(&hc_entry->rmx_ssthresh);
398	hc_metrics_lite->rmx_rtt = atomic_load_32(&hc_entry->rmx_rtt);
399	hc_metrics_lite->rmx_rttvar = atomic_load_32(&hc_entry->rmx_rttvar);
400	hc_metrics_lite->rmx_cwnd = atomic_load_32(&hc_entry->rmx_cwnd);
401	hc_metrics_lite->rmx_sendpipe = atomic_load_32(&hc_entry->rmx_sendpipe);
402	hc_metrics_lite->rmx_recvpipe = atomic_load_32(&hc_entry->rmx_recvpipe);
403
404	smr_exit(V_tcp_hostcache.smr);
405}
406
407/*
408 * External function: look up an entry in the hostcache and return the
409 * discovered path MTU.  Returns 0 if no entry is found or value is not
410 * set.
411 */
412uint32_t
413tcp_hc_getmtu(struct in_conninfo *inc)
414{
415	struct hc_metrics *hc_entry;
416	uint32_t mtu;
417
418	if (!V_tcp_use_hostcache)
419		return (0);
420
421	hc_entry = tcp_hc_lookup(inc);
422	if (hc_entry == NULL) {
423		return (0);
424	}
425
426	mtu = atomic_load_32(&hc_entry->rmx_mtu);
427	smr_exit(V_tcp_hostcache.smr);
428
429	return (mtu);
430}
431
432/*
433 * External function: update the MTU value of an entry in the hostcache.
434 * Creates a new entry if none was found.
435 */
436void
437tcp_hc_updatemtu(struct in_conninfo *inc, uint32_t mtu)
438{
439	struct hc_metrics_lite hcml = { .rmx_mtu = mtu };
440
441	return (tcp_hc_update(inc, &hcml));
442}
443
444/*
445 * External function: update the TCP metrics of an entry in the hostcache.
446 * Creates a new entry if none was found.
447 */
448void
449tcp_hc_update(struct in_conninfo *inc, struct hc_metrics_lite *hcml)
450{
451	struct hc_head *hc_head;
452	struct hc_metrics *hc_entry, *hc_prev;
453	uint32_t v;
454	bool new;
455
456	if (!V_tcp_use_hostcache)
457		return;
458
459	hc_head = &V_tcp_hostcache.hashbase[HOSTCACHE_HASH(inc)];
460	hc_prev = NULL;
461
462	THC_LOCK(hc_head);
463	CK_SLIST_FOREACH(hc_entry, &hc_head->hch_bucket, rmx_q) {
464		if (tcp_hc_cmp(hc_entry, inc))
465			break;
466		if (CK_SLIST_NEXT(hc_entry, rmx_q) != NULL)
467			hc_prev = hc_entry;
468	}
469
470	if (hc_entry != NULL) {
471		if (atomic_load_int(&hc_entry->rmx_expire) !=
472		    V_tcp_hostcache.expire)
473			atomic_store_int(&hc_entry->rmx_expire,
474			    V_tcp_hostcache.expire);
475#ifdef	TCP_HC_COUNTERS
476		hc_entry->rmx_updates++;
477#endif
478		new = false;
479	} else {
480		/*
481		 * Try to allocate a new entry.  If the bucket limit is
482		 * reached, delete the least-used element, located at the end
483		 * of the CK_SLIST.  During lookup we saved the pointer to
484		 * the second to last element, in case if list has at least 2
485		 * elements.  This will allow to delete last element without
486		 * extra traversal.
487		 *
488		 * Give up if the row is empty.
489		 */
490		if (hc_head->hch_length >= V_tcp_hostcache.bucket_limit ||
491		    atomic_load_int(&V_tcp_hostcache.cache_count) >=
492		    V_tcp_hostcache.cache_limit) {
493			if (hc_prev != NULL) {
494				hc_entry = CK_SLIST_NEXT(hc_prev, rmx_q);
495				KASSERT(CK_SLIST_NEXT(hc_entry, rmx_q) == NULL,
496				    ("%s: %p is not one to last",
497				    __func__, hc_prev));
498				CK_SLIST_REMOVE_AFTER(hc_prev, rmx_q);
499			} else if ((hc_entry =
500			    CK_SLIST_FIRST(&hc_head->hch_bucket)) != NULL) {
501				KASSERT(CK_SLIST_NEXT(hc_entry, rmx_q) == NULL,
502				    ("%s: %p is not the only element",
503				    __func__, hc_entry));
504				CK_SLIST_REMOVE_HEAD(&hc_head->hch_bucket,
505				    rmx_q);
506			} else {
507				THC_UNLOCK(hc_head);
508				return;
509			}
510			KASSERT(hc_head->hch_length > 0 &&
511			    hc_head->hch_length <= V_tcp_hostcache.bucket_limit,
512			    ("tcp_hostcache: bucket length violated at %p",
513			    hc_head));
514			hc_head->hch_length--;
515			atomic_subtract_int(&V_tcp_hostcache.cache_count, 1);
516			TCPSTAT_INC(tcps_hc_bucketoverflow);
517			uma_zfree_smr(V_tcp_hostcache.zone, hc_entry);
518		}
519
520		/*
521		 * Allocate a new entry, or balk if not possible.
522		 */
523		hc_entry = uma_zalloc_smr(V_tcp_hostcache.zone, M_NOWAIT);
524		if (hc_entry == NULL) {
525			THC_UNLOCK(hc_head);
526			return;
527		}
528
529		/*
530		 * Initialize basic information of hostcache entry.
531		 */
532		bzero(hc_entry, sizeof(*hc_entry));
533		if (inc->inc_flags & INC_ISIPV6) {
534			hc_entry->ip6 = inc->inc6_faddr;
535			hc_entry->ip6_zoneid = inc->inc6_zoneid;
536		} else
537			hc_entry->ip4 = inc->inc_faddr;
538		hc_entry->rmx_expire = V_tcp_hostcache.expire;
539		new = true;
540	}
541
542	/*
543	 * Fill in data.  Use atomics, since an existing entry is
544	 * accessible by readers in SMR section.
545	 */
546	if (hcml->rmx_mtu != 0) {
547		atomic_store_32(&hc_entry->rmx_mtu, hcml->rmx_mtu);
548	}
549	if (hcml->rmx_rtt != 0) {
550		if (hc_entry->rmx_rtt == 0)
551			v = hcml->rmx_rtt;
552		else
553			v = ((uint64_t)hc_entry->rmx_rtt +
554			    (uint64_t)hcml->rmx_rtt) / 2;
555		atomic_store_32(&hc_entry->rmx_rtt, v);
556		TCPSTAT_INC(tcps_cachedrtt);
557	}
558	if (hcml->rmx_rttvar != 0) {
559	        if (hc_entry->rmx_rttvar == 0)
560			v = hcml->rmx_rttvar;
561		else
562			v = ((uint64_t)hc_entry->rmx_rttvar +
563			    (uint64_t)hcml->rmx_rttvar) / 2;
564		atomic_store_32(&hc_entry->rmx_rttvar, v);
565		TCPSTAT_INC(tcps_cachedrttvar);
566	}
567	if (hcml->rmx_ssthresh != 0) {
568		if (hc_entry->rmx_ssthresh == 0)
569			v = hcml->rmx_ssthresh;
570		else
571			v = (hc_entry->rmx_ssthresh + hcml->rmx_ssthresh) / 2;
572		atomic_store_32(&hc_entry->rmx_ssthresh, v);
573		TCPSTAT_INC(tcps_cachedssthresh);
574	}
575	if (hcml->rmx_cwnd != 0) {
576		if (hc_entry->rmx_cwnd == 0)
577			v = hcml->rmx_cwnd;
578		else
579			v = ((uint64_t)hc_entry->rmx_cwnd +
580			    (uint64_t)hcml->rmx_cwnd) / 2;
581		atomic_store_32(&hc_entry->rmx_cwnd, v);
582		/* TCPSTAT_INC(tcps_cachedcwnd); */
583	}
584	if (hcml->rmx_sendpipe != 0) {
585		if (hc_entry->rmx_sendpipe == 0)
586			v = hcml->rmx_sendpipe;
587		else
588			v = ((uint64_t)hc_entry->rmx_sendpipe +
589			    (uint64_t)hcml->rmx_sendpipe) /2;
590		atomic_store_32(&hc_entry->rmx_sendpipe, v);
591		/* TCPSTAT_INC(tcps_cachedsendpipe); */
592	}
593	if (hcml->rmx_recvpipe != 0) {
594		if (hc_entry->rmx_recvpipe == 0)
595			v = hcml->rmx_recvpipe;
596		else
597			v = ((uint64_t)hc_entry->rmx_recvpipe +
598			    (uint64_t)hcml->rmx_recvpipe) /2;
599		atomic_store_32(&hc_entry->rmx_recvpipe, v);
600		/* TCPSTAT_INC(tcps_cachedrecvpipe); */
601	}
602
603	/*
604	 * Put it upfront.
605	 */
606	if (new) {
607		CK_SLIST_INSERT_HEAD(&hc_head->hch_bucket, hc_entry, rmx_q);
608		hc_head->hch_length++;
609		KASSERT(hc_head->hch_length <= V_tcp_hostcache.bucket_limit,
610		    ("tcp_hostcache: bucket length too high at %p", hc_head));
611		atomic_add_int(&V_tcp_hostcache.cache_count, 1);
612		TCPSTAT_INC(tcps_hc_added);
613	} else if (hc_entry != CK_SLIST_FIRST(&hc_head->hch_bucket)) {
614		KASSERT(CK_SLIST_NEXT(hc_prev, rmx_q) == hc_entry,
615		    ("%s: %p next is not %p", __func__, hc_prev, hc_entry));
616		CK_SLIST_REMOVE_AFTER(hc_prev, rmx_q);
617		CK_SLIST_INSERT_HEAD(&hc_head->hch_bucket, hc_entry, rmx_q);
618	}
619	THC_UNLOCK(hc_head);
620}
621
622/*
623 * Sysctl function: prints the list and values of all hostcache entries in
624 * unsorted order.
625 */
626static int
627sysctl_tcp_hc_list(SYSCTL_HANDLER_ARGS)
628{
629	const int linesize = 128;
630	struct sbuf sb;
631	int i, error, len;
632	struct hc_metrics *hc_entry;
633	char ip4buf[INET_ADDRSTRLEN];
634#ifdef INET6
635	char ip6buf[INET6_ADDRSTRLEN];
636#endif
637
638	if (jailed_without_vnet(curthread->td_ucred) != 0)
639		return (EPERM);
640
641	/* Optimize Buffer length query by sbin/sysctl */
642	if (req->oldptr == NULL) {
643		len = (atomic_load_int(&V_tcp_hostcache.cache_count) + 1) *
644			linesize;
645		return (SYSCTL_OUT(req, NULL, len));
646	}
647
648	error = sysctl_wire_old_buffer(req, 0);
649	if (error != 0) {
650		return(error);
651	}
652
653	/* Use a buffer sized for one full bucket */
654	sbuf_new_for_sysctl(&sb, NULL, V_tcp_hostcache.bucket_limit *
655		linesize, req);
656
657	sbuf_printf(&sb,
658		"\nIP address        MTU  SSTRESH      RTT   RTTVAR "
659		"    CWND SENDPIPE RECVPIPE "
660#ifdef	TCP_HC_COUNTERS
661		"HITS  UPD  "
662#endif
663		"EXP\n");
664	sbuf_drain(&sb);
665
666#define msec(u) (((u) + 500) / 1000)
667	for (i = 0; i < V_tcp_hostcache.hashsize; i++) {
668		THC_LOCK(&V_tcp_hostcache.hashbase[i]);
669		CK_SLIST_FOREACH(hc_entry,
670		    &V_tcp_hostcache.hashbase[i].hch_bucket, rmx_q) {
671			sbuf_printf(&sb,
672			    "%-15s %5u %8u %6lums %6lums %8u %8u %8u "
673#ifdef	TCP_HC_COUNTERS
674			    "%4lu %4lu "
675#endif
676			    "%4i\n",
677			    hc_entry->ip4.s_addr ?
678			        inet_ntoa_r(hc_entry->ip4, ip4buf) :
679#ifdef INET6
680				ip6_sprintf(ip6buf, &hc_entry->ip6),
681#else
682				"IPv6?",
683#endif
684			    hc_entry->rmx_mtu,
685			    hc_entry->rmx_ssthresh,
686			    msec((u_long)hc_entry->rmx_rtt *
687				(RTM_RTTUNIT / (hz * TCP_RTT_SCALE))),
688			    msec((u_long)hc_entry->rmx_rttvar *
689				(RTM_RTTUNIT / (hz * TCP_RTTVAR_SCALE))),
690			    hc_entry->rmx_cwnd,
691			    hc_entry->rmx_sendpipe,
692			    hc_entry->rmx_recvpipe,
693#ifdef	TCP_HC_COUNTERS
694			    hc_entry->rmx_hits,
695			    hc_entry->rmx_updates,
696#endif
697			    hc_entry->rmx_expire);
698		}
699		THC_UNLOCK(&V_tcp_hostcache.hashbase[i]);
700		sbuf_drain(&sb);
701	}
702#undef msec
703	error = sbuf_finish(&sb);
704	sbuf_delete(&sb);
705	return(error);
706}
707
708/*
709 * Sysctl function: prints a histogram of the hostcache hashbucket
710 * utilization.
711 */
712static int
713sysctl_tcp_hc_histo(SYSCTL_HANDLER_ARGS)
714{
715	const int linesize = 50;
716	struct sbuf sb;
717	int i, error;
718	int *histo;
719	u_int hch_length;
720
721	if (jailed_without_vnet(curthread->td_ucred) != 0)
722		return (EPERM);
723
724	histo = (int *)malloc(sizeof(int) * (V_tcp_hostcache.bucket_limit + 1),
725			M_TEMP, M_NOWAIT|M_ZERO);
726	if (histo == NULL)
727		return(ENOMEM);
728
729	for (i = 0; i < V_tcp_hostcache.hashsize; i++) {
730		hch_length = V_tcp_hostcache.hashbase[i].hch_length;
731		KASSERT(hch_length <= V_tcp_hostcache.bucket_limit,
732		    ("tcp_hostcache: bucket limit exceeded at %u: %u",
733		    i, hch_length));
734		histo[hch_length]++;
735	}
736
737	/* Use a buffer for 16 lines */
738	sbuf_new_for_sysctl(&sb, NULL, 16 * linesize, req);
739
740	sbuf_printf(&sb, "\nLength\tCount\n");
741	for (i = 0; i <= V_tcp_hostcache.bucket_limit; i++) {
742		sbuf_printf(&sb, "%u\t%u\n", i, histo[i]);
743	}
744	error = sbuf_finish(&sb);
745	sbuf_delete(&sb);
746	free(histo, M_TEMP);
747	return(error);
748}
749
750/*
751 * Caller has to make sure the curvnet is set properly.
752 */
753static void
754tcp_hc_purge_internal(int all)
755{
756	struct hc_head *head;
757	struct hc_metrics *hc_entry, *hc_next, *hc_prev;
758	int i;
759
760	for (i = 0; i < V_tcp_hostcache.hashsize; i++) {
761		head = &V_tcp_hostcache.hashbase[i];
762		hc_prev = NULL;
763		THC_LOCK(head);
764		CK_SLIST_FOREACH_SAFE(hc_entry, &head->hch_bucket, rmx_q,
765		    hc_next) {
766			KASSERT(head->hch_length > 0 && head->hch_length <=
767			    V_tcp_hostcache.bucket_limit, ("tcp_hostcache: "
768			    "bucket length out of range at %u: %u", i,
769			    head->hch_length));
770			if (all ||
771			    atomic_load_int(&hc_entry->rmx_expire) <= 0) {
772				if (hc_prev != NULL) {
773					KASSERT(hc_entry ==
774					    CK_SLIST_NEXT(hc_prev, rmx_q),
775					    ("%s: %p is not next to %p",
776					    __func__, hc_entry, hc_prev));
777					CK_SLIST_REMOVE_AFTER(hc_prev, rmx_q);
778				} else {
779					KASSERT(hc_entry ==
780					    CK_SLIST_FIRST(&head->hch_bucket),
781					    ("%s: %p is not first",
782					    __func__, hc_entry));
783					CK_SLIST_REMOVE_HEAD(&head->hch_bucket,
784					    rmx_q);
785				}
786				uma_zfree_smr(V_tcp_hostcache.zone, hc_entry);
787				head->hch_length--;
788				atomic_subtract_int(&V_tcp_hostcache.cache_count, 1);
789			} else {
790				atomic_subtract_int(&hc_entry->rmx_expire,
791				    V_tcp_hostcache.prune);
792				hc_prev = hc_entry;
793			}
794		}
795		THC_UNLOCK(head);
796	}
797}
798
799/*
800 * Expire and purge (old|all) entries in the tcp_hostcache.  Runs
801 * periodically from the callout.
802 */
803static void
804tcp_hc_purge(void *arg)
805{
806	CURVNET_SET((struct vnet *) arg);
807	int all = 0;
808
809	if (V_tcp_hostcache.purgeall) {
810		if (V_tcp_hostcache.purgeall == 2)
811			V_tcp_hostcache.hashsalt = arc4random();
812		all = 1;
813		V_tcp_hostcache.purgeall = 0;
814	}
815
816	tcp_hc_purge_internal(all);
817
818	callout_reset(&V_tcp_hc_callout, V_tcp_hostcache.prune * hz,
819	    tcp_hc_purge, arg);
820	CURVNET_RESTORE();
821}
822
823/*
824 * Expire and purge all entries in hostcache immediately.
825 */
826static int
827sysctl_tcp_hc_purgenow(SYSCTL_HANDLER_ARGS)
828{
829	int error, val;
830
831	val = 0;
832	error = sysctl_handle_int(oidp, &val, 0, req);
833	if (error || !req->newptr)
834		return (error);
835
836	if (val == 2)
837		V_tcp_hostcache.hashsalt = arc4random();
838	tcp_hc_purge_internal(1);
839
840	callout_reset(&V_tcp_hc_callout, V_tcp_hostcache.prune * hz,
841	    tcp_hc_purge, curvnet);
842
843	return (0);
844}
845