1/*-
2 * Copyright (c) 2002 Andre Oppermann, Internet Business Solutions AG
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 * 3. The name of the author may not be used to endorse or promote
14 *    products derived from this software without specific prior written
15 *    permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29
30/*
31 * The tcp_hostcache moves the tcp-specific cached metrics from the routing
32 * table to a dedicated structure indexed by the remote IP address.  It keeps
33 * information on the measured TCP parameters of past TCP sessions to allow
34 * better initial start values to be used with later connections to/from the
35 * same source.  Depending on the network parameters (delay, bandwidth, max
36 * MTU, congestion window) between local and remote sites, this can lead to
37 * significant speed-ups for new TCP connections after the first one.
38 *
39 * Due to the tcp_hostcache, all TCP-specific metrics information in the
40 * routing table have been removed.  The inpcb no longer keeps a pointer to
41 * the routing entry, and protocol-initiated route cloning has been removed
42 * as well.  With these changes, the routing table has gone back to being
43 * more lightwight and only carries information related to packet forwarding.
44 *
45 * tcp_hostcache is designed for multiple concurrent access in SMP
46 * environments and high contention.  All bucket rows have their own lock and
47 * thus multiple lookups and modifies can be done at the same time as long as
48 * they are in different bucket rows.  If a request for insertion of a new
49 * record can't be satisfied, it simply returns an empty structure.  Nobody
50 * and nothing outside of tcp_hostcache.c will ever point directly to any
51 * entry in the tcp_hostcache.  All communication is done in an
52 * object-oriented way and only functions of tcp_hostcache will manipulate
53 * hostcache entries.  Otherwise, we are unable to achieve good behaviour in
54 * concurrent access situations.  Since tcp_hostcache is only caching
55 * information, there are no fatal consequences if we either can't satisfy
56 * any particular request or have to drop/overwrite an existing entry because
57 * of bucket limit memory constrains.
58 */
59
60/*
61 * Many thanks to jlemon for basic structure of tcp_syncache which is being
62 * followed here.
63 */
64
65#include <sys/cdefs.h>
66__FBSDID("$FreeBSD: stable/10/sys/netinet/tcp_hostcache.c 314667 2017-03-04 13:03:31Z avg $");
67
68#include "opt_inet6.h"
69
70#include <sys/param.h>
71#include <sys/systm.h>
72#include <sys/kernel.h>
73#include <sys/lock.h>
74#include <sys/mutex.h>
75#include <sys/malloc.h>
76#include <sys/sbuf.h>
77#include <sys/socket.h>
78#include <sys/socketvar.h>
79#include <sys/sysctl.h>
80
81#include <net/if.h>
82#include <net/route.h>
83#include <net/vnet.h>
84
85#include <netinet/in.h>
86#include <netinet/in_systm.h>
87#include <netinet/ip.h>
88#include <netinet/in_var.h>
89#include <netinet/in_pcb.h>
90#include <netinet/ip_var.h>
91#ifdef INET6
92#include <netinet/ip6.h>
93#include <netinet6/ip6_var.h>
94#endif
95#include <netinet/tcp.h>
96#include <netinet/tcp_var.h>
97#include <netinet/tcp_hostcache.h>
98#ifdef INET6
99#include <netinet6/tcp6_var.h>
100#endif
101
102#include <vm/uma.h>
103
104/* Arbitrary values */
105#define TCP_HOSTCACHE_HASHSIZE		512
106#define TCP_HOSTCACHE_BUCKETLIMIT	30
107#define TCP_HOSTCACHE_EXPIRE		60*60	/* one hour */
108#define TCP_HOSTCACHE_PRUNE		5*60	/* every 5 minutes */
109
110static VNET_DEFINE(struct tcp_hostcache, tcp_hostcache);
111#define	V_tcp_hostcache		VNET(tcp_hostcache)
112
113static VNET_DEFINE(struct callout, tcp_hc_callout);
114#define	V_tcp_hc_callout	VNET(tcp_hc_callout)
115
116static struct hc_metrics *tcp_hc_lookup(struct in_conninfo *);
117static struct hc_metrics *tcp_hc_insert(struct in_conninfo *);
118static int sysctl_tcp_hc_list(SYSCTL_HANDLER_ARGS);
119static void tcp_hc_purge_internal(int);
120static void tcp_hc_purge(void *);
121
122static SYSCTL_NODE(_net_inet_tcp, OID_AUTO, hostcache, CTLFLAG_RW, 0,
123    "TCP Host cache");
124
125SYSCTL_VNET_UINT(_net_inet_tcp_hostcache, OID_AUTO, cachelimit, CTLFLAG_RDTUN,
126    &VNET_NAME(tcp_hostcache.cache_limit), 0,
127    "Overall entry limit for hostcache");
128
129SYSCTL_VNET_UINT(_net_inet_tcp_hostcache, OID_AUTO, hashsize, CTLFLAG_RDTUN,
130    &VNET_NAME(tcp_hostcache.hashsize), 0,
131    "Size of TCP hostcache hashtable");
132
133SYSCTL_VNET_UINT(_net_inet_tcp_hostcache, OID_AUTO, bucketlimit,
134    CTLFLAG_RDTUN, &VNET_NAME(tcp_hostcache.bucket_limit), 0,
135    "Per-bucket hash limit for hostcache");
136
137SYSCTL_VNET_UINT(_net_inet_tcp_hostcache, OID_AUTO, count, CTLFLAG_RD,
138     &VNET_NAME(tcp_hostcache.cache_count), 0,
139    "Current number of entries in hostcache");
140
141SYSCTL_VNET_INT(_net_inet_tcp_hostcache, OID_AUTO, expire, CTLFLAG_RW,
142    &VNET_NAME(tcp_hostcache.expire), 0,
143    "Expire time of TCP hostcache entries");
144
145SYSCTL_VNET_INT(_net_inet_tcp_hostcache, OID_AUTO, prune, CTLFLAG_RW,
146    &VNET_NAME(tcp_hostcache.prune), 0,
147    "Time between purge runs");
148
149SYSCTL_VNET_INT(_net_inet_tcp_hostcache, OID_AUTO, purge, CTLFLAG_RW,
150    &VNET_NAME(tcp_hostcache.purgeall), 0,
151    "Expire all entires on next purge run");
152
153SYSCTL_PROC(_net_inet_tcp_hostcache, OID_AUTO, list,
154    CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_SKIP, 0, 0,
155    sysctl_tcp_hc_list, "A", "List of all hostcache entries");
156
157
158static MALLOC_DEFINE(M_HOSTCACHE, "hostcache", "TCP hostcache");
159
160#define HOSTCACHE_HASH(ip) \
161	(((ip)->s_addr ^ ((ip)->s_addr >> 7) ^ ((ip)->s_addr >> 17)) &	\
162	  V_tcp_hostcache.hashmask)
163
164/* XXX: What is the recommended hash to get good entropy for IPv6 addresses? */
165#define HOSTCACHE_HASH6(ip6)				\
166	(((ip6)->s6_addr32[0] ^				\
167	  (ip6)->s6_addr32[1] ^				\
168	  (ip6)->s6_addr32[2] ^				\
169	  (ip6)->s6_addr32[3]) &			\
170	 V_tcp_hostcache.hashmask)
171
172#define THC_LOCK(lp)		mtx_lock(lp)
173#define THC_UNLOCK(lp)		mtx_unlock(lp)
174
175void
176tcp_hc_init(void)
177{
178	u_int cache_limit;
179	int i;
180
181	/*
182	 * Initialize hostcache structures.
183	 */
184	V_tcp_hostcache.cache_count = 0;
185	V_tcp_hostcache.hashsize = TCP_HOSTCACHE_HASHSIZE;
186	V_tcp_hostcache.bucket_limit = TCP_HOSTCACHE_BUCKETLIMIT;
187	V_tcp_hostcache.expire = TCP_HOSTCACHE_EXPIRE;
188	V_tcp_hostcache.prune = TCP_HOSTCACHE_PRUNE;
189
190	TUNABLE_INT_FETCH("net.inet.tcp.hostcache.hashsize",
191	    &V_tcp_hostcache.hashsize);
192	if (!powerof2(V_tcp_hostcache.hashsize)) {
193		printf("WARNING: hostcache hash size is not a power of 2.\n");
194		V_tcp_hostcache.hashsize = TCP_HOSTCACHE_HASHSIZE; /* default */
195	}
196	V_tcp_hostcache.hashmask = V_tcp_hostcache.hashsize - 1;
197
198	TUNABLE_INT_FETCH("net.inet.tcp.hostcache.bucketlimit",
199	    &V_tcp_hostcache.bucket_limit);
200
201	cache_limit = V_tcp_hostcache.hashsize * V_tcp_hostcache.bucket_limit;
202	V_tcp_hostcache.cache_limit = cache_limit;
203	TUNABLE_INT_FETCH("net.inet.tcp.hostcache.cachelimit",
204	    &V_tcp_hostcache.cache_limit);
205	if (V_tcp_hostcache.cache_limit > cache_limit)
206		V_tcp_hostcache.cache_limit = cache_limit;
207
208	/*
209	 * Allocate the hash table.
210	 */
211	V_tcp_hostcache.hashbase = (struct hc_head *)
212	    malloc(V_tcp_hostcache.hashsize * sizeof(struct hc_head),
213		   M_HOSTCACHE, M_WAITOK | M_ZERO);
214
215	/*
216	 * Initialize the hash buckets.
217	 */
218	for (i = 0; i < V_tcp_hostcache.hashsize; i++) {
219		TAILQ_INIT(&V_tcp_hostcache.hashbase[i].hch_bucket);
220		V_tcp_hostcache.hashbase[i].hch_length = 0;
221		mtx_init(&V_tcp_hostcache.hashbase[i].hch_mtx, "tcp_hc_entry",
222			  NULL, MTX_DEF);
223	}
224
225	/*
226	 * Allocate the hostcache entries.
227	 */
228	V_tcp_hostcache.zone =
229	    uma_zcreate("hostcache", sizeof(struct hc_metrics),
230	    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
231	uma_zone_set_max(V_tcp_hostcache.zone, V_tcp_hostcache.cache_limit);
232
233	/*
234	 * Set up periodic cache cleanup.
235	 */
236	callout_init(&V_tcp_hc_callout, 1);
237	callout_reset(&V_tcp_hc_callout, V_tcp_hostcache.prune * hz,
238	    tcp_hc_purge, curvnet);
239}
240
241#ifdef VIMAGE
242void
243tcp_hc_destroy(void)
244{
245	int i;
246
247	callout_drain(&V_tcp_hc_callout);
248
249	/* Purge all hc entries. */
250	tcp_hc_purge_internal(1);
251
252	/* Free the uma zone and the allocated hash table. */
253	uma_zdestroy(V_tcp_hostcache.zone);
254
255	for (i = 0; i < V_tcp_hostcache.hashsize; i++)
256		mtx_destroy(&V_tcp_hostcache.hashbase[i].hch_mtx);
257	free(V_tcp_hostcache.hashbase, M_HOSTCACHE);
258}
259#endif
260
261/*
262 * Internal function: look up an entry in the hostcache or return NULL.
263 *
264 * If an entry has been returned, the caller becomes responsible for
265 * unlocking the bucket row after he is done reading/modifying the entry.
266 */
267static struct hc_metrics *
268tcp_hc_lookup(struct in_conninfo *inc)
269{
270	int hash;
271	struct hc_head *hc_head;
272	struct hc_metrics *hc_entry;
273
274	KASSERT(inc != NULL, ("tcp_hc_lookup with NULL in_conninfo pointer"));
275
276	/*
277	 * Hash the foreign ip address.
278	 */
279	if (inc->inc_flags & INC_ISIPV6)
280		hash = HOSTCACHE_HASH6(&inc->inc6_faddr);
281	else
282		hash = HOSTCACHE_HASH(&inc->inc_faddr);
283
284	hc_head = &V_tcp_hostcache.hashbase[hash];
285
286	/*
287	 * Acquire lock for this bucket row; we release the lock if we don't
288	 * find an entry, otherwise the caller has to unlock after he is
289	 * done.
290	 */
291	THC_LOCK(&hc_head->hch_mtx);
292
293	/*
294	 * Iterate through entries in bucket row looking for a match.
295	 */
296	TAILQ_FOREACH(hc_entry, &hc_head->hch_bucket, rmx_q) {
297		if (inc->inc_flags & INC_ISIPV6) {
298			if (memcmp(&inc->inc6_faddr, &hc_entry->ip6,
299			    sizeof(inc->inc6_faddr)) == 0)
300				return hc_entry;
301		} else {
302			if (memcmp(&inc->inc_faddr, &hc_entry->ip4,
303			    sizeof(inc->inc_faddr)) == 0)
304				return hc_entry;
305		}
306	}
307
308	/*
309	 * We were unsuccessful and didn't find anything.
310	 */
311	THC_UNLOCK(&hc_head->hch_mtx);
312	return NULL;
313}
314
315/*
316 * Internal function: insert an entry into the hostcache or return NULL if
317 * unable to allocate a new one.
318 *
319 * If an entry has been returned, the caller becomes responsible for
320 * unlocking the bucket row after he is done reading/modifying the entry.
321 */
322static struct hc_metrics *
323tcp_hc_insert(struct in_conninfo *inc)
324{
325	int hash;
326	struct hc_head *hc_head;
327	struct hc_metrics *hc_entry;
328
329	KASSERT(inc != NULL, ("tcp_hc_insert with NULL in_conninfo pointer"));
330
331	/*
332	 * Hash the foreign ip address.
333	 */
334	if (inc->inc_flags & INC_ISIPV6)
335		hash = HOSTCACHE_HASH6(&inc->inc6_faddr);
336	else
337		hash = HOSTCACHE_HASH(&inc->inc_faddr);
338
339	hc_head = &V_tcp_hostcache.hashbase[hash];
340
341	/*
342	 * Acquire lock for this bucket row; we release the lock if we don't
343	 * find an entry, otherwise the caller has to unlock after he is
344	 * done.
345	 */
346	THC_LOCK(&hc_head->hch_mtx);
347
348	/*
349	 * If the bucket limit is reached, reuse the least-used element.
350	 */
351	if (hc_head->hch_length >= V_tcp_hostcache.bucket_limit ||
352	    V_tcp_hostcache.cache_count >= V_tcp_hostcache.cache_limit) {
353		hc_entry = TAILQ_LAST(&hc_head->hch_bucket, hc_qhead);
354		/*
355		 * At first we were dropping the last element, just to
356		 * reacquire it in the next two lines again, which isn't very
357		 * efficient.  Instead just reuse the least used element.
358		 * We may drop something that is still "in-use" but we can be
359		 * "lossy".
360		 * Just give up if this bucket row is empty and we don't have
361		 * anything to replace.
362		 */
363		if (hc_entry == NULL) {
364			THC_UNLOCK(&hc_head->hch_mtx);
365			return NULL;
366		}
367		TAILQ_REMOVE(&hc_head->hch_bucket, hc_entry, rmx_q);
368		V_tcp_hostcache.hashbase[hash].hch_length--;
369		V_tcp_hostcache.cache_count--;
370		TCPSTAT_INC(tcps_hc_bucketoverflow);
371#if 0
372		uma_zfree(V_tcp_hostcache.zone, hc_entry);
373#endif
374	} else {
375		/*
376		 * Allocate a new entry, or balk if not possible.
377		 */
378		hc_entry = uma_zalloc(V_tcp_hostcache.zone, M_NOWAIT);
379		if (hc_entry == NULL) {
380			THC_UNLOCK(&hc_head->hch_mtx);
381			return NULL;
382		}
383	}
384
385	/*
386	 * Initialize basic information of hostcache entry.
387	 */
388	bzero(hc_entry, sizeof(*hc_entry));
389	if (inc->inc_flags & INC_ISIPV6)
390		bcopy(&inc->inc6_faddr, &hc_entry->ip6, sizeof(hc_entry->ip6));
391	else
392		hc_entry->ip4 = inc->inc_faddr;
393	hc_entry->rmx_head = hc_head;
394	hc_entry->rmx_expire = V_tcp_hostcache.expire;
395
396	/*
397	 * Put it upfront.
398	 */
399	TAILQ_INSERT_HEAD(&hc_head->hch_bucket, hc_entry, rmx_q);
400	V_tcp_hostcache.hashbase[hash].hch_length++;
401	V_tcp_hostcache.cache_count++;
402	TCPSTAT_INC(tcps_hc_added);
403
404	return hc_entry;
405}
406
407/*
408 * External function: look up an entry in the hostcache and fill out the
409 * supplied TCP metrics structure.  Fills in NULL when no entry was found or
410 * a value is not set.
411 */
412void
413tcp_hc_get(struct in_conninfo *inc, struct hc_metrics_lite *hc_metrics_lite)
414{
415	struct hc_metrics *hc_entry;
416
417	/*
418	 * Find the right bucket.
419	 */
420	hc_entry = tcp_hc_lookup(inc);
421
422	/*
423	 * If we don't have an existing object.
424	 */
425	if (hc_entry == NULL) {
426		bzero(hc_metrics_lite, sizeof(*hc_metrics_lite));
427		return;
428	}
429	hc_entry->rmx_hits++;
430	hc_entry->rmx_expire = V_tcp_hostcache.expire; /* start over again */
431
432	hc_metrics_lite->rmx_mtu = hc_entry->rmx_mtu;
433	hc_metrics_lite->rmx_ssthresh = hc_entry->rmx_ssthresh;
434	hc_metrics_lite->rmx_rtt = hc_entry->rmx_rtt;
435	hc_metrics_lite->rmx_rttvar = hc_entry->rmx_rttvar;
436	hc_metrics_lite->rmx_bandwidth = hc_entry->rmx_bandwidth;
437	hc_metrics_lite->rmx_cwnd = hc_entry->rmx_cwnd;
438	hc_metrics_lite->rmx_sendpipe = hc_entry->rmx_sendpipe;
439	hc_metrics_lite->rmx_recvpipe = hc_entry->rmx_recvpipe;
440
441	/*
442	 * Unlock bucket row.
443	 */
444	THC_UNLOCK(&hc_entry->rmx_head->hch_mtx);
445}
446
447/*
448 * External function: look up an entry in the hostcache and return the
449 * discovered path MTU.  Returns NULL if no entry is found or value is not
450 * set.
451 */
452u_long
453tcp_hc_getmtu(struct in_conninfo *inc)
454{
455	struct hc_metrics *hc_entry;
456	u_long mtu;
457
458	hc_entry = tcp_hc_lookup(inc);
459	if (hc_entry == NULL) {
460		return 0;
461	}
462	hc_entry->rmx_hits++;
463	hc_entry->rmx_expire = V_tcp_hostcache.expire; /* start over again */
464
465	mtu = hc_entry->rmx_mtu;
466	THC_UNLOCK(&hc_entry->rmx_head->hch_mtx);
467	return mtu;
468}
469
470/*
471 * External function: update the MTU value of an entry in the hostcache.
472 * Creates a new entry if none was found.
473 */
474void
475tcp_hc_updatemtu(struct in_conninfo *inc, u_long mtu)
476{
477	struct hc_metrics *hc_entry;
478
479	/*
480	 * Find the right bucket.
481	 */
482	hc_entry = tcp_hc_lookup(inc);
483
484	/*
485	 * If we don't have an existing object, try to insert a new one.
486	 */
487	if (hc_entry == NULL) {
488		hc_entry = tcp_hc_insert(inc);
489		if (hc_entry == NULL)
490			return;
491	}
492	hc_entry->rmx_updates++;
493	hc_entry->rmx_expire = V_tcp_hostcache.expire; /* start over again */
494
495	hc_entry->rmx_mtu = mtu;
496
497	/*
498	 * Put it upfront so we find it faster next time.
499	 */
500	TAILQ_REMOVE(&hc_entry->rmx_head->hch_bucket, hc_entry, rmx_q);
501	TAILQ_INSERT_HEAD(&hc_entry->rmx_head->hch_bucket, hc_entry, rmx_q);
502
503	/*
504	 * Unlock bucket row.
505	 */
506	THC_UNLOCK(&hc_entry->rmx_head->hch_mtx);
507}
508
509/*
510 * External function: update the TCP metrics of an entry in the hostcache.
511 * Creates a new entry if none was found.
512 */
513void
514tcp_hc_update(struct in_conninfo *inc, struct hc_metrics_lite *hcml)
515{
516	struct hc_metrics *hc_entry;
517
518	hc_entry = tcp_hc_lookup(inc);
519	if (hc_entry == NULL) {
520		hc_entry = tcp_hc_insert(inc);
521		if (hc_entry == NULL)
522			return;
523	}
524	hc_entry->rmx_updates++;
525	hc_entry->rmx_expire = V_tcp_hostcache.expire; /* start over again */
526
527	if (hcml->rmx_rtt != 0) {
528		if (hc_entry->rmx_rtt == 0)
529			hc_entry->rmx_rtt = hcml->rmx_rtt;
530		else
531			hc_entry->rmx_rtt =
532			    (hc_entry->rmx_rtt + hcml->rmx_rtt) / 2;
533		TCPSTAT_INC(tcps_cachedrtt);
534	}
535	if (hcml->rmx_rttvar != 0) {
536	        if (hc_entry->rmx_rttvar == 0)
537			hc_entry->rmx_rttvar = hcml->rmx_rttvar;
538		else
539			hc_entry->rmx_rttvar =
540			    (hc_entry->rmx_rttvar + hcml->rmx_rttvar) / 2;
541		TCPSTAT_INC(tcps_cachedrttvar);
542	}
543	if (hcml->rmx_ssthresh != 0) {
544		if (hc_entry->rmx_ssthresh == 0)
545			hc_entry->rmx_ssthresh = hcml->rmx_ssthresh;
546		else
547			hc_entry->rmx_ssthresh =
548			    (hc_entry->rmx_ssthresh + hcml->rmx_ssthresh) / 2;
549		TCPSTAT_INC(tcps_cachedssthresh);
550	}
551	if (hcml->rmx_bandwidth != 0) {
552		if (hc_entry->rmx_bandwidth == 0)
553			hc_entry->rmx_bandwidth = hcml->rmx_bandwidth;
554		else
555			hc_entry->rmx_bandwidth =
556			    (hc_entry->rmx_bandwidth + hcml->rmx_bandwidth) / 2;
557		/* TCPSTAT_INC(tcps_cachedbandwidth); */
558	}
559	if (hcml->rmx_cwnd != 0) {
560		if (hc_entry->rmx_cwnd == 0)
561			hc_entry->rmx_cwnd = hcml->rmx_cwnd;
562		else
563			hc_entry->rmx_cwnd =
564			    (hc_entry->rmx_cwnd + hcml->rmx_cwnd) / 2;
565		/* TCPSTAT_INC(tcps_cachedcwnd); */
566	}
567	if (hcml->rmx_sendpipe != 0) {
568		if (hc_entry->rmx_sendpipe == 0)
569			hc_entry->rmx_sendpipe = hcml->rmx_sendpipe;
570		else
571			hc_entry->rmx_sendpipe =
572			    (hc_entry->rmx_sendpipe + hcml->rmx_sendpipe) /2;
573		/* TCPSTAT_INC(tcps_cachedsendpipe); */
574	}
575	if (hcml->rmx_recvpipe != 0) {
576		if (hc_entry->rmx_recvpipe == 0)
577			hc_entry->rmx_recvpipe = hcml->rmx_recvpipe;
578		else
579			hc_entry->rmx_recvpipe =
580			    (hc_entry->rmx_recvpipe + hcml->rmx_recvpipe) /2;
581		/* TCPSTAT_INC(tcps_cachedrecvpipe); */
582	}
583
584	TAILQ_REMOVE(&hc_entry->rmx_head->hch_bucket, hc_entry, rmx_q);
585	TAILQ_INSERT_HEAD(&hc_entry->rmx_head->hch_bucket, hc_entry, rmx_q);
586	THC_UNLOCK(&hc_entry->rmx_head->hch_mtx);
587}
588
589/*
590 * Sysctl function: prints the list and values of all hostcache entries in
591 * unsorted order.
592 */
593static int
594sysctl_tcp_hc_list(SYSCTL_HANDLER_ARGS)
595{
596	int linesize = 128;
597	struct sbuf sb;
598	int i, error;
599	struct hc_metrics *hc_entry;
600#ifdef INET6
601	char ip6buf[INET6_ADDRSTRLEN];
602#endif
603
604	sbuf_new(&sb, NULL, linesize * (V_tcp_hostcache.cache_count + 1),
605	    SBUF_FIXEDLEN);
606
607	sbuf_printf(&sb,
608	        "\nIP address        MTU  SSTRESH      RTT   RTTVAR BANDWIDTH "
609		"    CWND SENDPIPE RECVPIPE HITS  UPD  EXP\n");
610
611#define msec(u) (((u) + 500) / 1000)
612	for (i = 0; i < V_tcp_hostcache.hashsize; i++) {
613		THC_LOCK(&V_tcp_hostcache.hashbase[i].hch_mtx);
614		TAILQ_FOREACH(hc_entry, &V_tcp_hostcache.hashbase[i].hch_bucket,
615			      rmx_q) {
616			sbuf_printf(&sb,
617			    "%-15s %5lu %8lu %6lums %6lums %9lu %8lu %8lu %8lu "
618			    "%4lu %4lu %4i\n",
619			    hc_entry->ip4.s_addr ? inet_ntoa(hc_entry->ip4) :
620#ifdef INET6
621				ip6_sprintf(ip6buf, &hc_entry->ip6),
622#else
623				"IPv6?",
624#endif
625			    hc_entry->rmx_mtu,
626			    hc_entry->rmx_ssthresh,
627			    msec(hc_entry->rmx_rtt *
628				(RTM_RTTUNIT / (hz * TCP_RTT_SCALE))),
629			    msec(hc_entry->rmx_rttvar *
630				(RTM_RTTUNIT / (hz * TCP_RTTVAR_SCALE))),
631			    hc_entry->rmx_bandwidth * 8,
632			    hc_entry->rmx_cwnd,
633			    hc_entry->rmx_sendpipe,
634			    hc_entry->rmx_recvpipe,
635			    hc_entry->rmx_hits,
636			    hc_entry->rmx_updates,
637			    hc_entry->rmx_expire);
638		}
639		THC_UNLOCK(&V_tcp_hostcache.hashbase[i].hch_mtx);
640	}
641#undef msec
642	sbuf_finish(&sb);
643	error = SYSCTL_OUT(req, sbuf_data(&sb), sbuf_len(&sb));
644	sbuf_delete(&sb);
645	return(error);
646}
647
648/*
649 * Caller has to make sure the curvnet is set properly.
650 */
651static void
652tcp_hc_purge_internal(int all)
653{
654	struct hc_metrics *hc_entry, *hc_next;
655	int i;
656
657	for (i = 0; i < V_tcp_hostcache.hashsize; i++) {
658		THC_LOCK(&V_tcp_hostcache.hashbase[i].hch_mtx);
659		TAILQ_FOREACH_SAFE(hc_entry,
660		    &V_tcp_hostcache.hashbase[i].hch_bucket, rmx_q, hc_next) {
661			if (all || hc_entry->rmx_expire <= 0) {
662				TAILQ_REMOVE(&V_tcp_hostcache.hashbase[i].hch_bucket,
663					      hc_entry, rmx_q);
664				uma_zfree(V_tcp_hostcache.zone, hc_entry);
665				V_tcp_hostcache.hashbase[i].hch_length--;
666				V_tcp_hostcache.cache_count--;
667			} else
668				hc_entry->rmx_expire -= V_tcp_hostcache.prune;
669		}
670		THC_UNLOCK(&V_tcp_hostcache.hashbase[i].hch_mtx);
671	}
672}
673
674/*
675 * Expire and purge (old|all) entries in the tcp_hostcache.  Runs
676 * periodically from the callout.
677 */
678static void
679tcp_hc_purge(void *arg)
680{
681	CURVNET_SET((struct vnet *) arg);
682	int all = 0;
683
684	if (V_tcp_hostcache.purgeall) {
685		all = 1;
686		V_tcp_hostcache.purgeall = 0;
687	}
688
689	tcp_hc_purge_internal(all);
690
691	callout_reset(&V_tcp_hc_callout, V_tcp_hostcache.prune * hz,
692	    tcp_hc_purge, arg);
693	CURVNET_RESTORE();
694}
695