aboutsummaryrefslogtreecommitdiff
path: root/sys/netinet/tcp_hostcache.c
blob: 6d05bfc667e7b8d1ee68dbae208cfbd61268b548 (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
/*-
 * SPDX-License-Identifier: BSD-3-Clause
 *
 * Copyright (c) 2002 Andre Oppermann, Internet Business Solutions AG
 * Copyright (c) 2021 Gleb Smirnoff <glebius@FreeBSD.org>
 * All rights reserved.
 *
 * Redistribution and use in source and binary forms, with or without
 * modification, are permitted provided that the following conditions
 * are met:
 * 1. Redistributions of source code must retain the above copyright
 *    notice, this list of conditions and the following disclaimer.
 * 2. Redistributions in binary form must reproduce the above copyright
 *    notice, this list of conditions and the following disclaimer in the
 *    documentation and/or other materials provided with the distribution.
 * 3. The name of the author may not be used to endorse or promote
 *    products derived from this software without specific prior written
 *    permission.
 *
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
 * SUCH DAMAGE.
 */

/*
 * The tcp_hostcache moves the tcp-specific cached metrics from the routing
 * table to a dedicated structure indexed by the remote IP address.  It keeps
 * information on the measured TCP parameters of past TCP sessions to allow
 * better initial start values to be used with later connections to/from the
 * same source.  Depending on the network parameters (delay, max MTU,
 * congestion window) between local and remote sites, this can lead to
 * significant speed-ups for new TCP connections after the first one.
 *
 * Due to the tcp_hostcache, all TCP-specific metrics information in the
 * routing table have been removed.  The inpcb no longer keeps a pointer to
 * the routing entry, and protocol-initiated route cloning has been removed
 * as well.  With these changes, the routing table has gone back to being
 * more lightwight and only carries information related to packet forwarding.
 *
 * tcp_hostcache is designed for multiple concurrent access in SMP
 * environments and high contention.  It is a straight hash.  Each bucket row
 * is protected by its own lock for modification.  Readers are protected by
 * SMR.  This puts certain restrictions on writers, e.g. a writer shall only
 * insert a fully populated entry into a row.  Writer can't reuse least used
 * entry if a hash is full.  Value updates for an entry shall be atomic.
 *
 * TCP stack(s) communication with tcp_hostcache() is done via KBI functions
 * tcp_hc_*() and the hc_metrics_lite structure.
 *
 * Since tcp_hostcache is only caching information, there are no fatal
 * consequences if we either can't allocate a new entry or have to drop
 * an existing entry, or return somewhat stale information.
 */

/*
 * Many thanks to jlemon for basic structure of tcp_syncache which is being
 * followed here.
 */

#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");

#include "opt_inet6.h"

#include <sys/param.h>
#include <sys/systm.h>
#include <sys/hash.h>
#include <sys/jail.h>
#include <sys/kernel.h>
#include <sys/lock.h>
#include <sys/mutex.h>
#include <sys/malloc.h>
#include <sys/proc.h>
#include <sys/sbuf.h>
#include <sys/smr.h>
#include <sys/socket.h>
#include <sys/socketvar.h>
#include <sys/sysctl.h>

#include <net/vnet.h>

#include <netinet/in.h>
#include <netinet/in_pcb.h>
#include <netinet/tcp.h>
#include <netinet/tcp_var.h>

#include <vm/uma.h>

struct hc_head {
	CK_SLIST_HEAD(hc_qhead, hc_metrics) hch_bucket;
	u_int		hch_length;
	struct mtx	hch_mtx;
};

struct hc_metrics {
	/* housekeeping */
	CK_SLIST_ENTRY(hc_metrics) rmx_q;
	struct		in_addr ip4;	/* IP address */
	struct		in6_addr ip6;	/* IP6 address */
	uint32_t	ip6_zoneid;	/* IPv6 scope zone id */
	/* endpoint specific values for tcp */
	uint32_t	rmx_mtu;	/* MTU for this path */
	uint32_t	rmx_ssthresh;	/* outbound gateway buffer limit */
	uint32_t	rmx_rtt;	/* estimated round trip time */
	uint32_t	rmx_rttvar;	/* estimated rtt variance */
	uint32_t	rmx_cwnd;	/* congestion window */
	uint32_t	rmx_sendpipe;	/* outbound delay-bandwidth product */
	uint32_t	rmx_recvpipe;	/* inbound delay-bandwidth product */
	/* TCP hostcache internal data */
	int		rmx_expire;	/* lifetime for object */
#ifdef	TCP_HC_COUNTERS
	u_long		rmx_hits;	/* number of hits */
	u_long		rmx_updates;	/* number of updates */
#endif
};

struct tcp_hostcache {
	struct hc_head	*hashbase;
	uma_zone_t	zone;
	smr_t		smr;
	u_int		hashsize;
	u_int		hashmask;
	u_int		hashsalt;
	u_int		bucket_limit;
	u_int		cache_count;
	u_int		cache_limit;
	int		expire;
	int		prune;
	int		purgeall;
};

/* Arbitrary values */
#define TCP_HOSTCACHE_HASHSIZE		512
#define TCP_HOSTCACHE_BUCKETLIMIT	30
#define TCP_HOSTCACHE_EXPIRE		60*60	/* one hour */
#define TCP_HOSTCACHE_PRUNE		5*60	/* every 5 minutes */

VNET_DEFINE_STATIC(struct tcp_hostcache, tcp_hostcache);
#define	V_tcp_hostcache		VNET(tcp_hostcache)

VNET_DEFINE_STATIC(struct callout, tcp_hc_callout);
#define	V_tcp_hc_callout	VNET(tcp_hc_callout)

static struct hc_metrics *tcp_hc_lookup(struct in_conninfo *);
static int sysctl_tcp_hc_list(SYSCTL_HANDLER_ARGS);
static int sysctl_tcp_hc_histo(SYSCTL_HANDLER_ARGS);
static int sysctl_tcp_hc_purgenow(SYSCTL_HANDLER_ARGS);
static void tcp_hc_purge_internal(int);
static void tcp_hc_purge(void *);

static SYSCTL_NODE(_net_inet_tcp, OID_AUTO, hostcache,
    CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
    "TCP Host cache");

VNET_DEFINE(int, tcp_use_hostcache) = 1;
#define V_tcp_use_hostcache  VNET(tcp_use_hostcache)
SYSCTL_INT(_net_inet_tcp_hostcache, OID_AUTO, enable, CTLFLAG_VNET | CTLFLAG_RW,
    &VNET_NAME(tcp_use_hostcache), 0,
    "Enable the TCP hostcache");

SYSCTL_UINT(_net_inet_tcp_hostcache, OID_AUTO, cachelimit, CTLFLAG_VNET | CTLFLAG_RDTUN,
    &VNET_NAME(tcp_hostcache.cache_limit), 0,
    "Overall entry limit for hostcache");

SYSCTL_UINT(_net_inet_tcp_hostcache, OID_AUTO, hashsize, CTLFLAG_VNET | CTLFLAG_RDTUN,
    &VNET_NAME(tcp_hostcache.hashsize), 0,
    "Size of TCP hostcache hashtable");

SYSCTL_UINT(_net_inet_tcp_hostcache, OID_AUTO, bucketlimit,
    CTLFLAG_VNET | CTLFLAG_RDTUN, &VNET_NAME(tcp_hostcache.bucket_limit), 0,
    "Per-bucket hash limit for hostcache");

SYSCTL_UINT(_net_inet_tcp_hostcache, OID_AUTO, count, CTLFLAG_VNET | CTLFLAG_RD,
    &VNET_NAME(tcp_hostcache.cache_count), 0,
    "Current number of entries in hostcache");

SYSCTL_INT(_net_inet_tcp_hostcache, OID_AUTO, expire, CTLFLAG_VNET | CTLFLAG_RW,
    &VNET_NAME(tcp_hostcache.expire), 0,
    "Expire time of TCP hostcache entries");

SYSCTL_INT(_net_inet_tcp_hostcache, OID_AUTO, prune, CTLFLAG_VNET | CTLFLAG_RW,
    &VNET_NAME(tcp_hostcache.prune), 0,
    "Time between purge runs");

SYSCTL_INT(_net_inet_tcp_hostcache, OID_AUTO, purge, CTLFLAG_VNET | CTLFLAG_RW,
    &VNET_NAME(tcp_hostcache.purgeall), 0,
    "Expire all entires on next purge run");

SYSCTL_PROC(_net_inet_tcp_hostcache, OID_AUTO, list,
    CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_SKIP | CTLFLAG_MPSAFE,
    0, 0, sysctl_tcp_hc_list, "A",
    "List of all hostcache entries");

SYSCTL_PROC(_net_inet_tcp_hostcache, OID_AUTO, histo,
    CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_SKIP | CTLFLAG_MPSAFE,
    0, 0, sysctl_tcp_hc_histo, "A",
    "Print a histogram of hostcache hashbucket utilization");

SYSCTL_PROC(_net_inet_tcp_hostcache, OID_AUTO, purgenow,
    CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE,
    NULL, 0, sysctl_tcp_hc_purgenow, "I",
    "Immediately purge all entries");

static MALLOC_DEFINE(M_HOSTCACHE, "hostcache", "TCP hostcache");

/* Use jenkins_hash32(), as in other parts of the tcp stack */
#define	HOSTCACHE_HASH(inc)						\
	((inc)->inc_flags & INC_ISIPV6) ?				\
		(jenkins_hash32((inc)->inc6_faddr.s6_addr32, 4,		\
		V_tcp_hostcache.hashsalt) & V_tcp_hostcache.hashmask)	\
	:								\
		(jenkins_hash32(&(inc)->inc_faddr.s_addr, 1,		\
		V_tcp_hostcache.hashsalt) & V_tcp_hostcache.hashmask)

#define THC_LOCK(h)		mtx_lock(&(h)->hch_mtx)
#define THC_UNLOCK(h)		mtx_unlock(&(h)->hch_mtx)

void
tcp_hc_init(void)
{
	u_int cache_limit;
	int i;

	/*
	 * Initialize hostcache structures.
	 */
	atomic_store_int(&V_tcp_hostcache.cache_count, 0);
	V_tcp_hostcache.hashsize = TCP_HOSTCACHE_HASHSIZE;
	V_tcp_hostcache.bucket_limit = TCP_HOSTCACHE_BUCKETLIMIT;
	V_tcp_hostcache.expire = TCP_HOSTCACHE_EXPIRE;
	V_tcp_hostcache.prune = TCP_HOSTCACHE_PRUNE;
	V_tcp_hostcache.hashsalt = arc4random();

	TUNABLE_INT_FETCH("net.inet.tcp.hostcache.hashsize",
	    &V_tcp_hostcache.hashsize);
	if (!powerof2(V_tcp_hostcache.hashsize)) {
		printf("WARNING: hostcache hash size is not a power of 2.\n");
		V_tcp_hostcache.hashsize = TCP_HOSTCACHE_HASHSIZE; /* default */
	}
	V_tcp_hostcache.hashmask = V_tcp_hostcache.hashsize - 1;

	TUNABLE_INT_FETCH("net.inet.tcp.hostcache.bucketlimit",
	    &V_tcp_hostcache.bucket_limit);

	cache_limit = V_tcp_hostcache.hashsize * V_tcp_hostcache.bucket_limit;
	V_tcp_hostcache.cache_limit = cache_limit;
	TUNABLE_INT_FETCH("net.inet.tcp.hostcache.cachelimit",
	    &V_tcp_hostcache.cache_limit);
	if (V_tcp_hostcache.cache_limit > cache_limit)
		V_tcp_hostcache.cache_limit = cache_limit;

	/*
	 * Allocate the hash table.
	 */
	V_tcp_hostcache.hashbase = (struct hc_head *)
	    malloc(V_tcp_hostcache.hashsize * sizeof(struct hc_head),
		   M_HOSTCACHE, M_WAITOK | M_ZERO);

	/*
	 * Initialize the hash buckets.
	 */
	for (i = 0; i < V_tcp_hostcache.hashsize; i++) {
		CK_SLIST_INIT(&V_tcp_hostcache.hashbase[i].hch_bucket);
		V_tcp_hostcache.hashbase[i].hch_length = 0;
		mtx_init(&V_tcp_hostcache.hashbase[i].hch_mtx, "tcp_hc_entry",
			  NULL, MTX_DEF);
	}

	/*
	 * Allocate the hostcache entries.
	 */
	V_tcp_hostcache.zone =
	    uma_zcreate("hostcache", sizeof(struct hc_metrics),
	    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_SMR);
	uma_zone_set_max(V_tcp_hostcache.zone, V_tcp_hostcache.cache_limit);
	V_tcp_hostcache.smr = uma_zone_get_smr(V_tcp_hostcache.zone);

	/*
	 * Set up periodic cache cleanup.
	 */
	callout_init(&V_tcp_hc_callout, 1);
	callout_reset(&V_tcp_hc_callout, V_tcp_hostcache.prune * hz,
	    tcp_hc_purge, curvnet);
}

#ifdef VIMAGE
void
tcp_hc_destroy(void)
{
	int i;

	callout_drain(&V_tcp_hc_callout);

	/* Purge all hc entries. */
	tcp_hc_purge_internal(1);

	/* Free the uma zone and the allocated hash table. */
	uma_zdestroy(V_tcp_hostcache.zone);

	for (i = 0; i < V_tcp_hostcache.hashsize; i++)
		mtx_destroy(&V_tcp_hostcache.hashbase[i].hch_mtx);
	free(V_tcp_hostcache.hashbase, M_HOSTCACHE);
}
#endif

/*
 * Internal function: compare cache entry to a connection.
 */
static bool
tcp_hc_cmp(struct hc_metrics *hc_entry, struct in_conninfo *inc)
{

	if (inc->inc_flags & INC_ISIPV6) {
		/* XXX: check ip6_zoneid */
		if (memcmp(&inc->inc6_faddr, &hc_entry->ip6,
		    sizeof(inc->inc6_faddr)) == 0)
			return (true);
	} else {
		if (memcmp(&inc->inc_faddr, &hc_entry->ip4,
		    sizeof(inc->inc_faddr)) == 0)
			return (true);
	}

	return (false);
}

/*
 * Internal function: look up an entry in the hostcache for read.
 * On success returns in SMR section.
 */
static struct hc_metrics *
tcp_hc_lookup(struct in_conninfo *inc)
{
	struct hc_head *hc_head;
	struct hc_metrics *hc_entry;

	KASSERT(inc != NULL, ("%s: NULL in_conninfo", __func__));

	hc_head = &V_tcp_hostcache.hashbase[HOSTCACHE_HASH(inc)];

	/*
	 * Iterate through entries in bucket row looking for a match.
	 */
	smr_enter(V_tcp_hostcache.smr);
	CK_SLIST_FOREACH(hc_entry, &hc_head->hch_bucket, rmx_q)
		if (tcp_hc_cmp(hc_entry, inc))
			break;

	if (hc_entry != NULL) {
		if (atomic_load_int(&hc_entry->rmx_expire) !=
		    V_tcp_hostcache.expire)
			atomic_store_int(&hc_entry->rmx_expire,
			    V_tcp_hostcache.expire);
#ifdef	TCP_HC_COUNTERS
		hc_entry->rmx_hits++;
#endif
	} else
		smr_exit(V_tcp_hostcache.smr);

	return (hc_entry);
}

/*
 * External function: look up an entry in the hostcache and fill out the
 * supplied TCP metrics structure.  Fills in NULL when no entry was found or
 * a value is not set.
 */
void
tcp_hc_get(struct in_conninfo *inc, struct hc_metrics_lite *hc_metrics_lite)
{
	struct hc_metrics *hc_entry;

	if (!V_tcp_use_hostcache) {
		bzero(hc_metrics_lite, sizeof(*hc_metrics_lite));
		return;
	}

	/*
	 * Find the right bucket.
	 */
	hc_entry = tcp_hc_lookup(inc);

	/*
	 * If we don't have an existing object.
	 */
	if (hc_entry == NULL) {
		bzero(hc_metrics_lite, sizeof(*hc_metrics_lite));
		return;
	}

	hc_metrics_lite->rmx_mtu = atomic_load_32(&hc_entry->rmx_mtu);
	hc_metrics_lite->rmx_ssthresh = atomic_load_32(&hc_entry->rmx_ssthresh);
	hc_metrics_lite->rmx_rtt = atomic_load_32(&hc_entry->rmx_rtt);
	hc_metrics_lite->rmx_rttvar = atomic_load_32(&hc_entry->rmx_rttvar);
	hc_metrics_lite->rmx_cwnd = atomic_load_32(&hc_entry->rmx_cwnd);
	hc_metrics_lite->rmx_sendpipe = atomic_load_32(&hc_entry->rmx_sendpipe);
	hc_metrics_lite->rmx_recvpipe = atomic_load_32(&hc_entry->rmx_recvpipe);

	smr_exit(V_tcp_hostcache.smr);
}

/*
 * External function: look up an entry in the hostcache and return the
 * discovered path MTU.  Returns 0 if no entry is found or value is not
 * set.
 */
uint32_t
tcp_hc_getmtu(struct in_conninfo *inc)
{
	struct hc_metrics *hc_entry;
	uint32_t mtu;

	if (!V_tcp_use_hostcache)
		return (0);

	hc_entry = tcp_hc_lookup(inc);
	if (hc_entry == NULL) {
		return (0);
	}

	mtu = atomic_load_32(&hc_entry->rmx_mtu);
	smr_exit(V_tcp_hostcache.smr);

	return (mtu);
}

/*
 * External function: update the MTU value of an entry in the hostcache.
 * Creates a new entry if none was found.
 */
void
tcp_hc_updatemtu(struct in_conninfo *inc, uint32_t mtu)
{
	struct hc_metrics_lite hcml = { .rmx_mtu = mtu };

	return (tcp_hc_update(inc, &hcml));
}

/*
 * External function: update the TCP metrics of an entry in the hostcache.
 * Creates a new entry if none was found.
 */
void
tcp_hc_update(struct in_conninfo *inc, struct hc_metrics_lite *hcml)
{
	struct hc_head *hc_head;
	struct hc_metrics *hc_entry, *hc_prev;
	uint32_t v;
	bool new;

	if (!V_tcp_use_hostcache)
		return;

	hc_head = &V_tcp_hostcache.hashbase[HOSTCACHE_HASH(inc)];
	hc_prev = NULL;

	THC_LOCK(hc_head);
	CK_SLIST_FOREACH(hc_entry, &hc_head->hch_bucket, rmx_q) {
		if (tcp_hc_cmp(hc_entry, inc))
			break;
		if (CK_SLIST_NEXT(hc_entry, rmx_q) != NULL)
			hc_prev = hc_entry;
	}

	if (hc_entry != NULL) {
		if (atomic_load_int(&hc_entry->rmx_expire) !=
		    V_tcp_hostcache.expire)
			atomic_store_int(&hc_entry->rmx_expire,
			    V_tcp_hostcache.expire);
#ifdef	TCP_HC_COUNTERS
		hc_entry->rmx_updates++;
#endif
		new = false;
	} else {
		/*
		 * Try to allocate a new entry.  If the bucket limit is
		 * reached, delete the least-used element, located at the end
		 * of the CK_SLIST.  During lookup we saved the pointer to
		 * the second to last element, in case if list has at least 2
		 * elements.  This will allow to delete last element without
		 * extra traversal.
		 *
		 * Give up if the row is empty.
		 */
		if (hc_head->hch_length >= V_tcp_hostcache.bucket_limit ||
		    atomic_load_int(&V_tcp_hostcache.cache_count) >=
		    V_tcp_hostcache.cache_limit) {
			if (hc_prev != NULL) {
				hc_entry = CK_SLIST_NEXT(hc_prev, rmx_q);
				KASSERT(CK_SLIST_NEXT(hc_entry, rmx_q) == NULL,
				    ("%s: %p is not one to last",
				    __func__, hc_prev));
				CK_SLIST_REMOVE_AFTER(hc_prev, rmx_q);
			} else if ((hc_entry =
			    CK_SLIST_FIRST(&hc_head->hch_bucket)) != NULL) {
				KASSERT(CK_SLIST_NEXT(hc_entry, rmx_q) == NULL,
				    ("%s: %p is not the only element",
				    __func__, hc_entry));
				CK_SLIST_REMOVE_HEAD(&hc_head->hch_bucket,
				    rmx_q);
			} else {
				THC_UNLOCK(hc_head);
				return;
			}
			KASSERT(hc_head->hch_length > 0 &&
			    hc_head->hch_length <= V_tcp_hostcache.bucket_limit,
			    ("tcp_hostcache: bucket length violated at %p",
			    hc_head));
			hc_head->hch_length--;
			atomic_subtract_int(&V_tcp_hostcache.cache_count, 1);
			TCPSTAT_INC(tcps_hc_bucketoverflow);
			uma_zfree_smr(V_tcp_hostcache.zone, hc_entry);
		}

		/*
		 * Allocate a new entry, or balk if not possible.
		 */
		hc_entry = uma_zalloc_smr(V_tcp_hostcache.zone, M_NOWAIT);
		if (hc_entry == NULL) {
			THC_UNLOCK(hc_head);
			return;
		}

		/*
		 * Initialize basic information of hostcache entry.
		 */
		bzero(hc_entry, sizeof(*hc_entry));
		if (inc->inc_flags & INC_ISIPV6) {
			hc_entry->ip6 = inc->inc6_faddr;
			hc_entry->ip6_zoneid = inc->inc6_zoneid;
		} else
			hc_entry->ip4 = inc->inc_faddr;
		hc_entry->rmx_expire = V_tcp_hostcache.expire;
		new = true;
	}

	/*
	 * Fill in data.  Use atomics, since an existing entry is
	 * accessible by readers in SMR section.
	 */
	if (hcml->rmx_mtu != 0) {
		atomic_store_32(&hc_entry->rmx_mtu, hcml->rmx_mtu);
	}
	if (hcml->rmx_rtt != 0) {
		if (hc_entry->rmx_rtt == 0)
			v = hcml->rmx_rtt;
		else
			v = ((uint64_t)hc_entry->rmx_rtt +
			    (uint64_t)hcml->rmx_rtt) / 2;
		atomic_store_32(&hc_entry->rmx_rtt, v);
		TCPSTAT_INC(tcps_cachedrtt);
	}
	if (hcml->rmx_rttvar != 0) {
	        if (hc_entry->rmx_rttvar == 0)
			v = hcml->rmx_rttvar;
		else
			v = ((uint64_t)hc_entry->rmx_rttvar +
			    (uint64_t)hcml->rmx_rttvar) / 2;
		atomic_store_32(&hc_entry->rmx_rttvar, v);
		TCPSTAT_INC(tcps_cachedrttvar);
	}
	if (hcml->rmx_ssthresh != 0) {
		if (hc_entry->rmx_ssthresh == 0)
			v = hcml->rmx_ssthresh;
		else
			v = (hc_entry->rmx_ssthresh + hcml->rmx_ssthresh) / 2;
		atomic_store_32(&hc_entry->rmx_ssthresh, v);
		TCPSTAT_INC(tcps_cachedssthresh);
	}
	if (hcml->rmx_cwnd != 0) {
		if (hc_entry->rmx_cwnd == 0)
			v = hcml->rmx_cwnd;
		else
			v = ((uint64_t)hc_entry->rmx_cwnd +
			    (uint64_t)hcml->rmx_cwnd) / 2;
		atomic_store_32(&hc_entry->rmx_cwnd, v);
		/* TCPSTAT_INC(tcps_cachedcwnd); */
	}
	if (hcml->rmx_sendpipe != 0) {
		if (hc_entry->rmx_sendpipe == 0)
			v = hcml->rmx_sendpipe;
		else
			v = ((uint64_t)hc_entry->rmx_sendpipe +
			    (uint64_t)hcml->rmx_sendpipe) /2;
		atomic_store_32(&hc_entry->rmx_sendpipe, v);
		/* TCPSTAT_INC(tcps_cachedsendpipe); */
	}
	if (hcml->rmx_recvpipe != 0) {
		if (hc_entry->rmx_recvpipe == 0)
			v = hcml->rmx_recvpipe;
		else
			v = ((uint64_t)hc_entry->rmx_recvpipe +
			    (uint64_t)hcml->rmx_recvpipe) /2;
		atomic_store_32(&hc_entry->rmx_recvpipe, v);
		/* TCPSTAT_INC(tcps_cachedrecvpipe); */
	}

	/*
	 * Put it upfront.
	 */
	if (new) {
		CK_SLIST_INSERT_HEAD(&hc_head->hch_bucket, hc_entry, rmx_q);
		hc_head->hch_length++;
		KASSERT(hc_head->hch_length <= V_tcp_hostcache.bucket_limit,
		    ("tcp_hostcache: bucket length too high at %p", hc_head));
		atomic_add_int(&V_tcp_hostcache.cache_count, 1);
		TCPSTAT_INC(tcps_hc_added);
	} else if (hc_entry != CK_SLIST_FIRST(&hc_head->hch_bucket)) {
		KASSERT(CK_SLIST_NEXT(hc_prev, rmx_q) == hc_entry,
		    ("%s: %p next is not %p", __func__, hc_prev, hc_entry));
		CK_SLIST_REMOVE_AFTER(hc_prev, rmx_q);
		CK_SLIST_INSERT_HEAD(&hc_head->hch_bucket, hc_entry, rmx_q);
	}
	THC_UNLOCK(hc_head);
}

/*
 * Sysctl function: prints the list and values of all hostcache entries in
 * unsorted order.
 */
static int
sysctl_tcp_hc_list(SYSCTL_HANDLER_ARGS)
{
	const int linesize = 128;
	struct sbuf sb;
	int i, error, len;
	struct hc_metrics *hc_entry;
	char ip4buf[INET_ADDRSTRLEN];
#ifdef INET6
	char ip6buf[INET6_ADDRSTRLEN];
#endif

	if (jailed_without_vnet(curthread->td_ucred) != 0)
		return (EPERM);

	/* Optimize Buffer length query by sbin/sysctl */
	if (req->oldptr == NULL) {
		len = (atomic_load_int(&V_tcp_hostcache.cache_count) + 1) *
			linesize;
		return (SYSCTL_OUT(req, NULL, len));
	}

	error = sysctl_wire_old_buffer(req, 0);
	if (error != 0) {
		return(error);
	}

	/* Use a buffer sized for one full bucket */
	sbuf_new_for_sysctl(&sb, NULL, V_tcp_hostcache.bucket_limit *
		linesize, req);

	sbuf_printf(&sb,
		"\nIP address        MTU  SSTRESH      RTT   RTTVAR "
		"    CWND SENDPIPE RECVPIPE "
#ifdef	TCP_HC_COUNTERS
		"HITS  UPD  "
#endif
		"EXP\n");
	sbuf_drain(&sb);

#define msec(u) (((u) + 500) / 1000)
	for (i = 0; i < V_tcp_hostcache.hashsize; i++) {
		THC_LOCK(&V_tcp_hostcache.hashbase[i]);
		CK_SLIST_FOREACH(hc_entry,
		    &V_tcp_hostcache.hashbase[i].hch_bucket, rmx_q) {
			sbuf_printf(&sb,
			    "%-15s %5u %8u %6lums %6lums %8u %8u %8u "
#ifdef	TCP_HC_COUNTERS
			    "%4lu %4lu "
#endif
			    "%4i\n",
			    hc_entry->ip4.s_addr ?
			        inet_ntoa_r(hc_entry->ip4, ip4buf) :
#ifdef INET6
				ip6_sprintf(ip6buf, &hc_entry->ip6),
#else
				"IPv6?",
#endif
			    hc_entry->rmx_mtu,
			    hc_entry->rmx_ssthresh,
			    msec((u_long)hc_entry->rmx_rtt *
				(RTM_RTTUNIT / (hz * TCP_RTT_SCALE))),
			    msec((u_long)hc_entry->rmx_rttvar *
				(RTM_RTTUNIT / (hz * TCP_RTTVAR_SCALE))),
			    hc_entry->rmx_cwnd,
			    hc_entry->rmx_sendpipe,
			    hc_entry->rmx_recvpipe,
#ifdef	TCP_HC_COUNTERS
			    hc_entry->rmx_hits,
			    hc_entry->rmx_updates,
#endif
			    hc_entry->rmx_expire);
		}
		THC_UNLOCK(&V_tcp_hostcache.hashbase[i]);
		sbuf_drain(&sb);
	}
#undef msec
	error = sbuf_finish(&sb);
	sbuf_delete(&sb);
	return(error);
}

/*
 * Sysctl function: prints a histogram of the hostcache hashbucket
 * utilization.
 */
static int
sysctl_tcp_hc_histo(SYSCTL_HANDLER_ARGS)
{
	const int linesize = 50;
	struct sbuf sb;
	int i, error;
	int *histo;
	u_int hch_length;

	if (jailed_without_vnet(curthread->td_ucred) != 0)
		return (EPERM);

	histo = (int *)malloc(sizeof(int) * (V_tcp_hostcache.bucket_limit + 1),
			M_TEMP, M_NOWAIT|M_ZERO);
	if (histo == NULL)
		return(ENOMEM);

	for (i = 0; i < V_tcp_hostcache.hashsize; i++) {
		hch_length = V_tcp_hostcache.hashbase[i].hch_length;
		KASSERT(hch_length <= V_tcp_hostcache.bucket_limit,
		    ("tcp_hostcache: bucket limit exceeded at %u: %u",
		    i, hch_length));
		histo[hch_length]++;
	}

	/* Use a buffer for 16 lines */
	sbuf_new_for_sysctl(&sb, NULL, 16 * linesize, req);

	sbuf_printf(&sb, "\nLength\tCount\n");
	for (i = 0; i <= V_tcp_hostcache.bucket_limit; i++) {
		sbuf_printf(&sb, "%u\t%u\n", i, histo[i]);
	}
	error = sbuf_finish(&sb);
	sbuf_delete(&sb);
	free(histo, M_TEMP);
	return(error);
}

/*
 * Caller has to make sure the curvnet is set properly.
 */
static void
tcp_hc_purge_internal(int all)
{
	struct hc_head *head;
	struct hc_metrics *hc_entry, *hc_next, *hc_prev;
	int i;

	for (i = 0; i < V_tcp_hostcache.hashsize; i++) {
		head = &V_tcp_hostcache.hashbase[i];
		hc_prev = NULL;
		THC_LOCK(head);
		CK_SLIST_FOREACH_SAFE(hc_entry, &head->hch_bucket, rmx_q,
		    hc_next) {
			KASSERT(head->hch_length > 0 && head->hch_length <=
			    V_tcp_hostcache.bucket_limit, ("tcp_hostcache: "
			    "bucket length out of range at %u: %u", i,
			    head->hch_length));
			if (all ||
			    atomic_load_int(&hc_entry->rmx_expire) <= 0) {
				if (hc_prev != NULL) {
					KASSERT(hc_entry ==
					    CK_SLIST_NEXT(hc_prev, rmx_q),
					    ("%s: %p is not next to %p",
					    __func__, hc_entry, hc_prev));
					CK_SLIST_REMOVE_AFTER(hc_prev, rmx_q);
				} else {
					KASSERT(hc_entry ==
					    CK_SLIST_FIRST(&head->hch_bucket),
					    ("%s: %p is not first",
					    __func__, hc_entry));
					CK_SLIST_REMOVE_HEAD(&head->hch_bucket,
					    rmx_q);
				}
				uma_zfree_smr(V_tcp_hostcache.zone, hc_entry);
				head->hch_length--;
				atomic_subtract_int(&V_tcp_hostcache.cache_count, 1);
			} else {
				atomic_subtract_int(&hc_entry->rmx_expire,
				    V_tcp_hostcache.prune);
				hc_prev = hc_entry;
			}
		}
		THC_UNLOCK(head);
	}
}

/*
 * Expire and purge (old|all) entries in the tcp_hostcache.  Runs
 * periodically from the callout.
 */
static void
tcp_hc_purge(void *arg)
{
	CURVNET_SET((struct vnet *) arg);
	int all = 0;

	if (V_tcp_hostcache.purgeall) {
		if (V_tcp_hostcache.purgeall == 2)
			V_tcp_hostcache.hashsalt = arc4random();
		all = 1;
		V_tcp_hostcache.purgeall = 0;
	}

	tcp_hc_purge_internal(all);

	callout_reset(&V_tcp_hc_callout, V_tcp_hostcache.prune * hz,
	    tcp_hc_purge, arg);
	CURVNET_RESTORE();
}

/*
 * Expire and purge all entries in hostcache immediately.
 */
static int
sysctl_tcp_hc_purgenow(SYSCTL_HANDLER_ARGS)
{
	int error, val;

	val = 0;
	error = sysctl_handle_int(oidp, &val, 0, req);
	if (error || !req->newptr)
		return (error);

	if (val == 2)
		V_tcp_hostcache.hashsalt = arc4random();
	tcp_hc_purge_internal(1);

	callout_reset(&V_tcp_hc_callout, V_tcp_hostcache.prune * hz,
	    tcp_hc_purge, curvnet);

	return (0);
}