diff options
author | Richard Scheffenegger <rscheff@FreeBSD.org> | 2021-04-01 08:00:32 +0000 |
---|---|---|
committer | Richard Scheffenegger <rscheff@FreeBSD.org> | 2021-04-16 21:33:41 +0000 |
commit | 254b56ac2a9ee43d10b0a4f8eeb20b722bb7fd74 (patch) | |
tree | 25f61853f96de3660dfbc6ba1a1c7a899eac2193 | |
parent | d20563819b9292ddcd42caf3449636b84825b1d6 (diff) |
tcp: For hostcache performance, use atomics instead of counters
As accessing the tcp hostcache happens frequently on some
classes of servers, it was recommended to use atomic_add/subtract
rather than (per-CPU distributed) counters, which have to be
summed up at high cost to cache efficiency.
PR: 254333
MFC after: 2 weeks
Sponsored by: NetApp, Inc.
Reviewed By: #transport, tuexen, jtl
Differential Revision: https://reviews.freebsd.org/D29522
(cherry picked from commit 529a2a0f2765f6c57c50a5af6be242c03bf714e3)
-rw-r--r-- | sys/netinet/tcp_hostcache.c | 24 | ||||
-rw-r--r-- | sys/netinet/tcp_hostcache.h | 2 |
2 files changed, 12 insertions, 14 deletions
diff --git a/sys/netinet/tcp_hostcache.c b/sys/netinet/tcp_hostcache.c index 0b5a212c4897..47846040d9db 100644 --- a/sys/netinet/tcp_hostcache.c +++ b/sys/netinet/tcp_hostcache.c @@ -144,8 +144,8 @@ SYSCTL_UINT(_net_inet_tcp_hostcache, OID_AUTO, bucketlimit, CTLFLAG_VNET | CTLFLAG_RDTUN, &VNET_NAME(tcp_hostcache.bucket_limit), 0, "Per-bucket hash limit for hostcache"); -SYSCTL_COUNTER_U64(_net_inet_tcp_hostcache, OID_AUTO, count, CTLFLAG_VNET | CTLFLAG_RD, - &VNET_NAME(tcp_hostcache.cache_count), +SYSCTL_UINT(_net_inet_tcp_hostcache, OID_AUTO, count, CTLFLAG_VNET | CTLFLAG_RD, + &VNET_NAME(tcp_hostcache.cache_count), 0, "Current number of entries in hostcache"); SYSCTL_INT(_net_inet_tcp_hostcache, OID_AUTO, expire, CTLFLAG_VNET | CTLFLAG_RW, @@ -194,8 +194,7 @@ tcp_hc_init(void) /* * Initialize hostcache structures. */ - V_tcp_hostcache.cache_count = counter_u64_alloc(M_WAITOK); - counter_u64_zero(V_tcp_hostcache.cache_count); + atomic_store_int(&V_tcp_hostcache.cache_count, 0); V_tcp_hostcache.hashsize = TCP_HOSTCACHE_HASHSIZE; V_tcp_hostcache.bucket_limit = TCP_HOSTCACHE_BUCKETLIMIT; V_tcp_hostcache.expire = TCP_HOSTCACHE_EXPIRE; @@ -263,9 +262,6 @@ tcp_hc_destroy(void) /* Purge all hc entries. */ tcp_hc_purge_internal(1); - /* Release the counter */ - counter_u64_free(V_tcp_hostcache.cache_count); - /* Free the uma zone and the allocated hash table. */ uma_zdestroy(V_tcp_hostcache.zone); @@ -373,7 +369,7 @@ tcp_hc_insert(struct in_conninfo *inc) * If the bucket limit is reached, reuse the least-used element. */ if (hc_head->hch_length >= V_tcp_hostcache.bucket_limit || - counter_u64_fetch(V_tcp_hostcache.cache_count) >= V_tcp_hostcache.cache_limit) { + atomic_load_int(&V_tcp_hostcache.cache_count) >= V_tcp_hostcache.cache_limit) { hc_entry = TAILQ_LAST(&hc_head->hch_bucket, hc_qhead); /* * At first we were dropping the last element, just to @@ -390,7 +386,7 @@ tcp_hc_insert(struct in_conninfo *inc) } TAILQ_REMOVE(&hc_head->hch_bucket, hc_entry, rmx_q); V_tcp_hostcache.hashbase[hash].hch_length--; - counter_u64_add(V_tcp_hostcache.cache_count, -1); + atomic_subtract_int(&V_tcp_hostcache.cache_count, 1); TCPSTAT_INC(tcps_hc_bucketoverflow); #if 0 uma_zfree(V_tcp_hostcache.zone, hc_entry); @@ -423,7 +419,7 @@ tcp_hc_insert(struct in_conninfo *inc) */ TAILQ_INSERT_HEAD(&hc_head->hch_bucket, hc_entry, rmx_q); V_tcp_hostcache.hashbase[hash].hch_length++; - counter_u64_add(V_tcp_hostcache.cache_count, 1); + atomic_add_int(&V_tcp_hostcache.cache_count, 1); TCPSTAT_INC(tcps_hc_added); return hc_entry; @@ -637,7 +633,8 @@ sysctl_tcp_hc_list(SYSCTL_HANDLER_ARGS) /* Optimize Buffer length query by sbin/sysctl */ if (req->oldptr == NULL) { - len = (counter_u64_fetch(V_tcp_hostcache.cache_count) + 1) * linesize; + len = (atomic_load_int(&V_tcp_hostcache.cache_count) + 1) * + linesize; return (SYSCTL_OUT(req, NULL, len)); } @@ -647,7 +644,8 @@ sysctl_tcp_hc_list(SYSCTL_HANDLER_ARGS) } /* Use a buffer sized for one full bucket */ - sbuf_new_for_sysctl(&sb, NULL, V_tcp_hostcache.bucket_limit * linesize, req); + sbuf_new_for_sysctl(&sb, NULL, V_tcp_hostcache.bucket_limit * + linesize, req); sbuf_printf(&sb, "\nIP address MTU SSTRESH RTT RTTVAR " @@ -709,7 +707,7 @@ tcp_hc_purge_internal(int all) hc_entry, rmx_q); uma_zfree(V_tcp_hostcache.zone, hc_entry); V_tcp_hostcache.hashbase[i].hch_length--; - counter_u64_add(V_tcp_hostcache.cache_count, -1); + atomic_subtract_int(&V_tcp_hostcache.cache_count, 1); } else hc_entry->rmx_expire -= V_tcp_hostcache.prune; } diff --git a/sys/netinet/tcp_hostcache.h b/sys/netinet/tcp_hostcache.h index 4eddfaa806c5..db8ab4bf4d08 100644 --- a/sys/netinet/tcp_hostcache.h +++ b/sys/netinet/tcp_hostcache.h @@ -72,7 +72,7 @@ struct tcp_hostcache { u_int hashsize; u_int hashmask; u_int bucket_limit; - counter_u64_t cache_count; + u_int cache_count; u_int cache_limit; int expire; int prune; |