summaryrefslogtreecommitdiffstats
path: root/sys/netinet/tcp_hostcache.c
diff options
context:
space:
mode:
Diffstat (limited to 'sys/netinet/tcp_hostcache.c')
-rw-r--r--sys/netinet/tcp_hostcache.c133
1 files changed, 67 insertions, 66 deletions
diff --git a/sys/netinet/tcp_hostcache.c b/sys/netinet/tcp_hostcache.c
index 3a68607..bf8f616 100644
--- a/sys/netinet/tcp_hostcache.c
+++ b/sys/netinet/tcp_hostcache.c
@@ -76,6 +76,7 @@ __FBSDID("$FreeBSD$");
#include <sys/socket.h>
#include <sys/socketvar.h>
#include <sys/sysctl.h>
+#include <sys/vimage.h>
#include <net/if.h>
@@ -186,7 +187,7 @@ static MALLOC_DEFINE(M_HOSTCACHE, "hostcache", "TCP hostcache");
#define HOSTCACHE_HASH(ip) \
(((ip)->s_addr ^ ((ip)->s_addr >> 7) ^ ((ip)->s_addr >> 17)) & \
- tcp_hostcache.hashmask)
+ V_tcp_hostcache.hashmask)
/* XXX: What is the recommended hash to get good entropy for IPv6 addresses? */
#define HOSTCACHE_HASH6(ip6) \
@@ -194,7 +195,7 @@ static MALLOC_DEFINE(M_HOSTCACHE, "hostcache", "TCP hostcache");
(ip6)->s6_addr32[1] ^ \
(ip6)->s6_addr32[2] ^ \
(ip6)->s6_addr32[3]) & \
- tcp_hostcache.hashmask)
+ V_tcp_hostcache.hashmask)
#define THC_LOCK(lp) mtx_lock(lp)
#define THC_UNLOCK(lp) mtx_unlock(lp)
@@ -207,55 +208,55 @@ tcp_hc_init(void)
/*
* Initialize hostcache structures.
*/
- tcp_hostcache.cache_count = 0;
- tcp_hostcache.hashsize = TCP_HOSTCACHE_HASHSIZE;
- tcp_hostcache.bucket_limit = TCP_HOSTCACHE_BUCKETLIMIT;
- tcp_hostcache.cache_limit =
- tcp_hostcache.hashsize * tcp_hostcache.bucket_limit;
- tcp_hostcache.expire = TCP_HOSTCACHE_EXPIRE;
- tcp_hostcache.prune = TCP_HOSTCACHE_PRUNE;
+ V_tcp_hostcache.cache_count = 0;
+ V_tcp_hostcache.hashsize = TCP_HOSTCACHE_HASHSIZE;
+ V_tcp_hostcache.bucket_limit = TCP_HOSTCACHE_BUCKETLIMIT;
+ V_tcp_hostcache.cache_limit =
+ V_tcp_hostcache.hashsize * V_tcp_hostcache.bucket_limit;
+ V_tcp_hostcache.expire = TCP_HOSTCACHE_EXPIRE;
+ V_tcp_hostcache.prune = TCP_HOSTCACHE_PRUNE;
TUNABLE_INT_FETCH("net.inet.tcp.hostcache.hashsize",
- &tcp_hostcache.hashsize);
+ &V_tcp_hostcache.hashsize);
TUNABLE_INT_FETCH("net.inet.tcp.hostcache.cachelimit",
- &tcp_hostcache.cache_limit);
+ &V_tcp_hostcache.cache_limit);
TUNABLE_INT_FETCH("net.inet.tcp.hostcache.bucketlimit",
- &tcp_hostcache.bucket_limit);
- if (!powerof2(tcp_hostcache.hashsize)) {
+ &V_tcp_hostcache.bucket_limit);
+ if (!powerof2(V_tcp_hostcache.hashsize)) {
printf("WARNING: hostcache hash size is not a power of 2.\n");
- tcp_hostcache.hashsize = TCP_HOSTCACHE_HASHSIZE; /* default */
+ V_tcp_hostcache.hashsize = TCP_HOSTCACHE_HASHSIZE; /* default */
}
- tcp_hostcache.hashmask = tcp_hostcache.hashsize - 1;
+ V_tcp_hostcache.hashmask = V_tcp_hostcache.hashsize - 1;
/*
* Allocate the hash table.
*/
- tcp_hostcache.hashbase = (struct hc_head *)
- malloc(tcp_hostcache.hashsize * sizeof(struct hc_head),
+ V_tcp_hostcache.hashbase = (struct hc_head *)
+ malloc(V_tcp_hostcache.hashsize * sizeof(struct hc_head),
M_HOSTCACHE, M_WAITOK | M_ZERO);
/*
* Initialize the hash buckets.
*/
- for (i = 0; i < tcp_hostcache.hashsize; i++) {
- TAILQ_INIT(&tcp_hostcache.hashbase[i].hch_bucket);
- tcp_hostcache.hashbase[i].hch_length = 0;
- mtx_init(&tcp_hostcache.hashbase[i].hch_mtx, "tcp_hc_entry",
+ for (i = 0; i < V_tcp_hostcache.hashsize; i++) {
+ TAILQ_INIT(&V_tcp_hostcache.hashbase[i].hch_bucket);
+ V_tcp_hostcache.hashbase[i].hch_length = 0;
+ mtx_init(&V_tcp_hostcache.hashbase[i].hch_mtx, "tcp_hc_entry",
NULL, MTX_DEF);
}
/*
* Allocate the hostcache entries.
*/
- tcp_hostcache.zone = uma_zcreate("hostcache", sizeof(struct hc_metrics),
+ V_tcp_hostcache.zone = uma_zcreate("hostcache", sizeof(struct hc_metrics),
NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
- uma_zone_set_max(tcp_hostcache.zone, tcp_hostcache.cache_limit);
+ uma_zone_set_max(V_tcp_hostcache.zone, V_tcp_hostcache.cache_limit);
/*
* Set up periodic cache cleanup.
*/
- callout_init(&tcp_hc_callout, CALLOUT_MPSAFE);
- callout_reset(&tcp_hc_callout, tcp_hostcache.prune * hz, tcp_hc_purge, 0);
+ callout_init(&V_tcp_hc_callout, CALLOUT_MPSAFE);
+ callout_reset(&V_tcp_hc_callout, V_tcp_hostcache.prune * hz, tcp_hc_purge, 0);
}
/*
@@ -281,7 +282,7 @@ tcp_hc_lookup(struct in_conninfo *inc)
else
hash = HOSTCACHE_HASH(&inc->inc_faddr);
- hc_head = &tcp_hostcache.hashbase[hash];
+ hc_head = &V_tcp_hostcache.hashbase[hash];
/*
* Acquire lock for this bucket row; we release the lock if we don't
@@ -336,7 +337,7 @@ tcp_hc_insert(struct in_conninfo *inc)
else
hash = HOSTCACHE_HASH(&inc->inc_faddr);
- hc_head = &tcp_hostcache.hashbase[hash];
+ hc_head = &V_tcp_hostcache.hashbase[hash];
/*
* Acquire lock for this bucket row; we release the lock if we don't
@@ -348,8 +349,8 @@ tcp_hc_insert(struct in_conninfo *inc)
/*
* If the bucket limit is reached, reuse the least-used element.
*/
- if (hc_head->hch_length >= tcp_hostcache.bucket_limit ||
- tcp_hostcache.cache_count >= tcp_hostcache.cache_limit) {
+ if (hc_head->hch_length >= V_tcp_hostcache.bucket_limit ||
+ V_tcp_hostcache.cache_count >= V_tcp_hostcache.cache_limit) {
hc_entry = TAILQ_LAST(&hc_head->hch_bucket, hc_qhead);
/*
* At first we were dropping the last element, just to
@@ -365,17 +366,17 @@ tcp_hc_insert(struct in_conninfo *inc)
return NULL;
}
TAILQ_REMOVE(&hc_head->hch_bucket, hc_entry, rmx_q);
- tcp_hostcache.hashbase[hash].hch_length--;
- tcp_hostcache.cache_count--;
- tcpstat.tcps_hc_bucketoverflow++;
+ V_tcp_hostcache.hashbase[hash].hch_length--;
+ V_tcp_hostcache.cache_count--;
+ V_tcpstat.tcps_hc_bucketoverflow++;
#if 0
- uma_zfree(tcp_hostcache.zone, hc_entry);
+ uma_zfree(V_tcp_hostcache.zone, hc_entry);
#endif
} else {
/*
* Allocate a new entry, or balk if not possible.
*/
- hc_entry = uma_zalloc(tcp_hostcache.zone, M_NOWAIT);
+ hc_entry = uma_zalloc(V_tcp_hostcache.zone, M_NOWAIT);
if (hc_entry == NULL) {
THC_UNLOCK(&hc_head->hch_mtx);
return NULL;
@@ -391,15 +392,15 @@ tcp_hc_insert(struct in_conninfo *inc)
else
hc_entry->ip4 = inc->inc_faddr;
hc_entry->rmx_head = hc_head;
- hc_entry->rmx_expire = tcp_hostcache.expire;
+ hc_entry->rmx_expire = V_tcp_hostcache.expire;
/*
* Put it upfront.
*/
TAILQ_INSERT_HEAD(&hc_head->hch_bucket, hc_entry, rmx_q);
- tcp_hostcache.hashbase[hash].hch_length++;
- tcp_hostcache.cache_count++;
- tcpstat.tcps_hc_added++;
+ V_tcp_hostcache.hashbase[hash].hch_length++;
+ V_tcp_hostcache.cache_count++;
+ V_tcpstat.tcps_hc_added++;
return hc_entry;
}
@@ -427,7 +428,7 @@ tcp_hc_get(struct in_conninfo *inc, struct hc_metrics_lite *hc_metrics_lite)
return;
}
hc_entry->rmx_hits++;
- hc_entry->rmx_expire = tcp_hostcache.expire; /* start over again */
+ hc_entry->rmx_expire = V_tcp_hostcache.expire; /* start over again */
hc_metrics_lite->rmx_mtu = hc_entry->rmx_mtu;
hc_metrics_lite->rmx_ssthresh = hc_entry->rmx_ssthresh;
@@ -460,7 +461,7 @@ tcp_hc_getmtu(struct in_conninfo *inc)
return 0;
}
hc_entry->rmx_hits++;
- hc_entry->rmx_expire = tcp_hostcache.expire; /* start over again */
+ hc_entry->rmx_expire = V_tcp_hostcache.expire; /* start over again */
mtu = hc_entry->rmx_mtu;
THC_UNLOCK(&hc_entry->rmx_head->hch_mtx);
@@ -490,7 +491,7 @@ tcp_hc_updatemtu(struct in_conninfo *inc, u_long mtu)
return;
}
hc_entry->rmx_updates++;
- hc_entry->rmx_expire = tcp_hostcache.expire; /* start over again */
+ hc_entry->rmx_expire = V_tcp_hostcache.expire; /* start over again */
hc_entry->rmx_mtu = mtu;
@@ -522,7 +523,7 @@ tcp_hc_update(struct in_conninfo *inc, struct hc_metrics_lite *hcml)
return;
}
hc_entry->rmx_updates++;
- hc_entry->rmx_expire = tcp_hostcache.expire; /* start over again */
+ hc_entry->rmx_expire = V_tcp_hostcache.expire; /* start over again */
if (hcml->rmx_rtt != 0) {
if (hc_entry->rmx_rtt == 0)
@@ -530,7 +531,7 @@ tcp_hc_update(struct in_conninfo *inc, struct hc_metrics_lite *hcml)
else
hc_entry->rmx_rtt =
(hc_entry->rmx_rtt + hcml->rmx_rtt) / 2;
- tcpstat.tcps_cachedrtt++;
+ V_tcpstat.tcps_cachedrtt++;
}
if (hcml->rmx_rttvar != 0) {
if (hc_entry->rmx_rttvar == 0)
@@ -538,7 +539,7 @@ tcp_hc_update(struct in_conninfo *inc, struct hc_metrics_lite *hcml)
else
hc_entry->rmx_rttvar =
(hc_entry->rmx_rttvar + hcml->rmx_rttvar) / 2;
- tcpstat.tcps_cachedrttvar++;
+ V_tcpstat.tcps_cachedrttvar++;
}
if (hcml->rmx_ssthresh != 0) {
if (hc_entry->rmx_ssthresh == 0)
@@ -546,7 +547,7 @@ tcp_hc_update(struct in_conninfo *inc, struct hc_metrics_lite *hcml)
else
hc_entry->rmx_ssthresh =
(hc_entry->rmx_ssthresh + hcml->rmx_ssthresh) / 2;
- tcpstat.tcps_cachedssthresh++;
+ V_tcpstat.tcps_cachedssthresh++;
}
if (hcml->rmx_bandwidth != 0) {
if (hc_entry->rmx_bandwidth == 0)
@@ -554,7 +555,7 @@ tcp_hc_update(struct in_conninfo *inc, struct hc_metrics_lite *hcml)
else
hc_entry->rmx_bandwidth =
(hc_entry->rmx_bandwidth + hcml->rmx_bandwidth) / 2;
- /* tcpstat.tcps_cachedbandwidth++; */
+ /* V_tcpstat.tcps_cachedbandwidth++; */
}
if (hcml->rmx_cwnd != 0) {
if (hc_entry->rmx_cwnd == 0)
@@ -562,7 +563,7 @@ tcp_hc_update(struct in_conninfo *inc, struct hc_metrics_lite *hcml)
else
hc_entry->rmx_cwnd =
(hc_entry->rmx_cwnd + hcml->rmx_cwnd) / 2;
- /* tcpstat.tcps_cachedcwnd++; */
+ /* V_tcpstat.tcps_cachedcwnd++; */
}
if (hcml->rmx_sendpipe != 0) {
if (hc_entry->rmx_sendpipe == 0)
@@ -570,7 +571,7 @@ tcp_hc_update(struct in_conninfo *inc, struct hc_metrics_lite *hcml)
else
hc_entry->rmx_sendpipe =
(hc_entry->rmx_sendpipe + hcml->rmx_sendpipe) /2;
- /* tcpstat.tcps_cachedsendpipe++; */
+ /* V_tcpstat.tcps_cachedsendpipe++; */
}
if (hcml->rmx_recvpipe != 0) {
if (hc_entry->rmx_recvpipe == 0)
@@ -578,7 +579,7 @@ tcp_hc_update(struct in_conninfo *inc, struct hc_metrics_lite *hcml)
else
hc_entry->rmx_recvpipe =
(hc_entry->rmx_recvpipe + hcml->rmx_recvpipe) /2;
- /* tcpstat.tcps_cachedrecvpipe++; */
+ /* V_tcpstat.tcps_cachedrecvpipe++; */
}
TAILQ_REMOVE(&hc_entry->rmx_head->hch_bucket, hc_entry, rmx_q);
@@ -602,7 +603,7 @@ sysctl_tcp_hc_list(SYSCTL_HANDLER_ARGS)
char ip6buf[INET6_ADDRSTRLEN];
#endif
- bufsize = linesize * (tcp_hostcache.cache_count + 1);
+ bufsize = linesize * (V_tcp_hostcache.cache_count + 1);
p = buf = (char *)malloc(bufsize, M_TEMP, M_WAITOK|M_ZERO);
@@ -612,9 +613,9 @@ sysctl_tcp_hc_list(SYSCTL_HANDLER_ARGS)
p += len;
#define msec(u) (((u) + 500) / 1000)
- for (i = 0; i < tcp_hostcache.hashsize; i++) {
- THC_LOCK(&tcp_hostcache.hashbase[i].hch_mtx);
- TAILQ_FOREACH(hc_entry, &tcp_hostcache.hashbase[i].hch_bucket,
+ for (i = 0; i < V_tcp_hostcache.hashsize; i++) {
+ THC_LOCK(&V_tcp_hostcache.hashbase[i].hch_mtx);
+ TAILQ_FOREACH(hc_entry, &V_tcp_hostcache.hashbase[i].hch_bucket,
rmx_q) {
len = snprintf(p, linesize,
"%-15s %5lu %8lu %6lums %6lums %9lu %8lu %8lu %8lu "
@@ -640,7 +641,7 @@ sysctl_tcp_hc_list(SYSCTL_HANDLER_ARGS)
hc_entry->rmx_expire);
p += len;
}
- THC_UNLOCK(&tcp_hostcache.hashbase[i].hch_mtx);
+ THC_UNLOCK(&V_tcp_hostcache.hashbase[i].hch_mtx);
}
#undef msec
error = SYSCTL_OUT(req, buf, p - buf);
@@ -659,25 +660,25 @@ tcp_hc_purge(void *arg)
int all = (intptr_t)arg;
int i;
- if (tcp_hostcache.purgeall) {
+ if (V_tcp_hostcache.purgeall) {
all = 1;
- tcp_hostcache.purgeall = 0;
+ V_tcp_hostcache.purgeall = 0;
}
- for (i = 0; i < tcp_hostcache.hashsize; i++) {
- THC_LOCK(&tcp_hostcache.hashbase[i].hch_mtx);
- TAILQ_FOREACH_SAFE(hc_entry, &tcp_hostcache.hashbase[i].hch_bucket,
+ for (i = 0; i < V_tcp_hostcache.hashsize; i++) {
+ THC_LOCK(&V_tcp_hostcache.hashbase[i].hch_mtx);
+ TAILQ_FOREACH_SAFE(hc_entry, &V_tcp_hostcache.hashbase[i].hch_bucket,
rmx_q, hc_next) {
if (all || hc_entry->rmx_expire <= 0) {
- TAILQ_REMOVE(&tcp_hostcache.hashbase[i].hch_bucket,
+ TAILQ_REMOVE(&V_tcp_hostcache.hashbase[i].hch_bucket,
hc_entry, rmx_q);
- uma_zfree(tcp_hostcache.zone, hc_entry);
- tcp_hostcache.hashbase[i].hch_length--;
- tcp_hostcache.cache_count--;
+ uma_zfree(V_tcp_hostcache.zone, hc_entry);
+ V_tcp_hostcache.hashbase[i].hch_length--;
+ V_tcp_hostcache.cache_count--;
} else
- hc_entry->rmx_expire -= tcp_hostcache.prune;
+ hc_entry->rmx_expire -= V_tcp_hostcache.prune;
}
- THC_UNLOCK(&tcp_hostcache.hashbase[i].hch_mtx);
+ THC_UNLOCK(&V_tcp_hostcache.hashbase[i].hch_mtx);
}
- callout_reset(&tcp_hc_callout, tcp_hostcache.prune * hz, tcp_hc_purge, 0);
+ callout_reset(&V_tcp_hc_callout, V_tcp_hostcache.prune * hz, tcp_hc_purge, 0);
}
OpenPOWER on IntegriCloud