summaryrefslogtreecommitdiffstats
path: root/sys/netinet/tcp_syncache.c
diff options
context:
space:
mode:
Diffstat (limited to 'sys/netinet/tcp_syncache.c')
-rw-r--r--sys/netinet/tcp_syncache.c157
1 files changed, 79 insertions, 78 deletions
diff --git a/sys/netinet/tcp_syncache.c b/sys/netinet/tcp_syncache.c
index 3f7a9bc..fd580d2 100644
--- a/sys/netinet/tcp_syncache.c
+++ b/sys/netinet/tcp_syncache.c
@@ -53,6 +53,7 @@ __FBSDID("$FreeBSD$");
#include <sys/socket.h>
#include <sys/socketvar.h>
#include <sys/syslog.h>
+#include <sys/vimage.h>
#include <vm/uma.h>
@@ -231,13 +232,13 @@ SYSCTL_INT(_net_inet_tcp_syncache, OID_AUTO, rst_on_sock_fail, CTLFLAG_RW,
static MALLOC_DEFINE(M_SYNCACHE, "syncache", "TCP syncache");
#define SYNCACHE_HASH(inc, mask) \
- ((tcp_syncache.hash_secret ^ \
+ ((V_tcp_syncache.hash_secret ^ \
(inc)->inc_faddr.s_addr ^ \
((inc)->inc_faddr.s_addr >> 16) ^ \
(inc)->inc_fport ^ (inc)->inc_lport) & mask)
#define SYNCACHE_HASH6(inc, mask) \
- ((tcp_syncache.hash_secret ^ \
+ ((V_tcp_syncache.hash_secret ^ \
(inc)->inc6_faddr.s6_addr32[0] ^ \
(inc)->inc6_faddr.s6_addr32[3] ^ \
(inc)->inc_fport ^ (inc)->inc_lport) & mask)
@@ -267,7 +268,7 @@ syncache_free(struct syncache *sc)
mac_syncache_destroy(&sc->sc_label);
#endif
- uma_zfree(tcp_syncache.zone, sc);
+ uma_zfree(V_tcp_syncache.zone, sc);
}
void
@@ -275,47 +276,47 @@ syncache_init(void)
{
int i;
- tcp_syncache.cache_count = 0;
- tcp_syncache.hashsize = TCP_SYNCACHE_HASHSIZE;
- tcp_syncache.bucket_limit = TCP_SYNCACHE_BUCKETLIMIT;
- tcp_syncache.rexmt_limit = SYNCACHE_MAXREXMTS;
- tcp_syncache.hash_secret = arc4random();
+ V_tcp_syncache.cache_count = 0;
+ V_tcp_syncache.hashsize = TCP_SYNCACHE_HASHSIZE;
+ V_tcp_syncache.bucket_limit = TCP_SYNCACHE_BUCKETLIMIT;
+ V_tcp_syncache.rexmt_limit = SYNCACHE_MAXREXMTS;
+ V_tcp_syncache.hash_secret = arc4random();
TUNABLE_INT_FETCH("net.inet.tcp.syncache.hashsize",
- &tcp_syncache.hashsize);
+ &V_tcp_syncache.hashsize);
TUNABLE_INT_FETCH("net.inet.tcp.syncache.bucketlimit",
- &tcp_syncache.bucket_limit);
- if (!powerof2(tcp_syncache.hashsize) || tcp_syncache.hashsize == 0) {
+ &V_tcp_syncache.bucket_limit);
+ if (!powerof2(V_tcp_syncache.hashsize) || V_tcp_syncache.hashsize == 0) {
printf("WARNING: syncache hash size is not a power of 2.\n");
- tcp_syncache.hashsize = TCP_SYNCACHE_HASHSIZE;
+ V_tcp_syncache.hashsize = TCP_SYNCACHE_HASHSIZE;
}
- tcp_syncache.hashmask = tcp_syncache.hashsize - 1;
+ V_tcp_syncache.hashmask = V_tcp_syncache.hashsize - 1;
/* Set limits. */
- tcp_syncache.cache_limit =
- tcp_syncache.hashsize * tcp_syncache.bucket_limit;
+ V_tcp_syncache.cache_limit =
+ V_tcp_syncache.hashsize * V_tcp_syncache.bucket_limit;
TUNABLE_INT_FETCH("net.inet.tcp.syncache.cachelimit",
- &tcp_syncache.cache_limit);
+ &V_tcp_syncache.cache_limit);
/* Allocate the hash table. */
- MALLOC(tcp_syncache.hashbase, struct syncache_head *,
- tcp_syncache.hashsize * sizeof(struct syncache_head),
+ MALLOC(V_tcp_syncache.hashbase, struct syncache_head *,
+ V_tcp_syncache.hashsize * sizeof(struct syncache_head),
M_SYNCACHE, M_WAITOK | M_ZERO);
/* Initialize the hash buckets. */
- for (i = 0; i < tcp_syncache.hashsize; i++) {
- TAILQ_INIT(&tcp_syncache.hashbase[i].sch_bucket);
- mtx_init(&tcp_syncache.hashbase[i].sch_mtx, "tcp_sc_head",
+ for (i = 0; i < V_tcp_syncache.hashsize; i++) {
+ TAILQ_INIT(&V_tcp_syncache.hashbase[i].sch_bucket);
+ mtx_init(&V_tcp_syncache.hashbase[i].sch_mtx, "tcp_sc_head",
NULL, MTX_DEF);
- callout_init_mtx(&tcp_syncache.hashbase[i].sch_timer,
- &tcp_syncache.hashbase[i].sch_mtx, 0);
- tcp_syncache.hashbase[i].sch_length = 0;
+ callout_init_mtx(&V_tcp_syncache.hashbase[i].sch_timer,
+ &V_tcp_syncache.hashbase[i].sch_mtx, 0);
+ V_tcp_syncache.hashbase[i].sch_length = 0;
}
/* Create the syncache entry zone. */
- tcp_syncache.zone = uma_zcreate("syncache", sizeof(struct syncache),
+ V_tcp_syncache.zone = uma_zcreate("syncache", sizeof(struct syncache),
NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
- uma_zone_set_max(tcp_syncache.zone, tcp_syncache.cache_limit);
+ uma_zone_set_max(V_tcp_syncache.zone, V_tcp_syncache.cache_limit);
}
/*
@@ -333,12 +334,12 @@ syncache_insert(struct syncache *sc, struct syncache_head *sch)
* Make sure that we don't overflow the per-bucket limit.
* If the bucket is full, toss the oldest element.
*/
- if (sch->sch_length >= tcp_syncache.bucket_limit) {
+ if (sch->sch_length >= V_tcp_syncache.bucket_limit) {
KASSERT(!TAILQ_EMPTY(&sch->sch_bucket),
("sch->sch_length incorrect"));
sc2 = TAILQ_LAST(&sch->sch_bucket, sch_head);
syncache_drop(sc2, sch);
- tcpstat.tcps_sc_bucketoverflow++;
+ V_tcpstat.tcps_sc_bucketoverflow++;
}
/* Put it into the bucket. */
@@ -352,8 +353,8 @@ syncache_insert(struct syncache *sc, struct syncache_head *sch)
SCH_UNLOCK(sch);
- tcp_syncache.cache_count++;
- tcpstat.tcps_sc_added++;
+ V_tcp_syncache.cache_count++;
+ V_tcpstat.tcps_sc_added++;
}
/*
@@ -374,7 +375,7 @@ syncache_drop(struct syncache *sc, struct syncache_head *sch)
sc->sc_tu->tu_syncache_event(TOE_SC_DROP, sc->sc_toepcb);
#endif
syncache_free(sc);
- tcp_syncache.cache_count--;
+ V_tcp_syncache.cache_count--;
}
/*
@@ -430,7 +431,7 @@ syncache_timer(void *xsch)
sch->sch_nextc = sc->sc_rxttime;
continue;
}
- if (sc->sc_rxmits > tcp_syncache.rexmt_limit) {
+ if (sc->sc_rxmits > V_tcp_syncache.rexmt_limit) {
if ((s = tcp_log_addrs(&sc->sc_inc, NULL, NULL, NULL))) {
log(LOG_DEBUG, "%s; %s: Retransmits exhausted, "
"giving up and removing syncache entry\n",
@@ -438,7 +439,7 @@ syncache_timer(void *xsch)
free(s, M_TCPLOG);
}
syncache_drop(sc, sch);
- tcpstat.tcps_sc_stale++;
+ V_tcpstat.tcps_sc_stale++;
continue;
}
if ((s = tcp_log_addrs(&sc->sc_inc, NULL, NULL, NULL))) {
@@ -449,7 +450,7 @@ syncache_timer(void *xsch)
}
(void) syncache_respond(sc);
- tcpstat.tcps_sc_retransmitted++;
+ V_tcpstat.tcps_sc_retransmitted++;
syncache_timeout(sc, sch, 0);
}
if (!TAILQ_EMPTY(&(sch)->sch_bucket))
@@ -469,8 +470,8 @@ syncache_lookup(struct in_conninfo *inc, struct syncache_head **schp)
#ifdef INET6
if (inc->inc_isipv6) {
- sch = &tcp_syncache.hashbase[
- SYNCACHE_HASH6(inc, tcp_syncache.hashmask)];
+ sch = &V_tcp_syncache.hashbase[
+ SYNCACHE_HASH6(inc, V_tcp_syncache.hashmask)];
*schp = sch;
SCH_LOCK(sch);
@@ -483,8 +484,8 @@ syncache_lookup(struct in_conninfo *inc, struct syncache_head **schp)
} else
#endif
{
- sch = &tcp_syncache.hashbase[
- SYNCACHE_HASH(inc, tcp_syncache.hashmask)];
+ sch = &V_tcp_syncache.hashbase[
+ SYNCACHE_HASH(inc, V_tcp_syncache.hashmask)];
*schp = sch;
SCH_LOCK(sch);
@@ -526,7 +527,7 @@ syncache_chkrst(struct in_conninfo *inc, struct tcphdr *th)
if ((s = tcp_log_addrs(inc, th, NULL, NULL)))
log(LOG_DEBUG, "%s; %s: Spurious RST with ACK, SYN or "
"FIN flag set, segment ignored\n", s, __func__);
- tcpstat.tcps_badrst++;
+ V_tcpstat.tcps_badrst++;
goto done;
}
@@ -543,7 +544,7 @@ syncache_chkrst(struct in_conninfo *inc, struct tcphdr *th)
log(LOG_DEBUG, "%s; %s: Spurious RST without matching "
"syncache entry (possibly syncookie only), "
"segment ignored\n", s, __func__);
- tcpstat.tcps_badrst++;
+ V_tcpstat.tcps_badrst++;
goto done;
}
@@ -567,13 +568,13 @@ syncache_chkrst(struct in_conninfo *inc, struct tcphdr *th)
log(LOG_DEBUG, "%s; %s: Our SYN|ACK was rejected, "
"connection attempt aborted by remote endpoint\n",
s, __func__);
- tcpstat.tcps_sc_reset++;
+ V_tcpstat.tcps_sc_reset++;
} else {
if ((s = tcp_log_addrs(inc, th, NULL, NULL)))
log(LOG_DEBUG, "%s; %s: RST with invalid SEQ %u != "
"IRS %u (+WND %u), segment ignored\n",
s, __func__, th->th_seq, sc->sc_irs, sc->sc_wnd);
- tcpstat.tcps_badrst++;
+ V_tcpstat.tcps_badrst++;
}
done:
@@ -592,7 +593,7 @@ syncache_badack(struct in_conninfo *inc)
SCH_LOCK_ASSERT(sch);
if (sc != NULL) {
syncache_drop(sc, sch);
- tcpstat.tcps_sc_badack++;
+ V_tcpstat.tcps_sc_badack++;
}
SCH_UNLOCK(sch);
}
@@ -625,7 +626,7 @@ syncache_unreach(struct in_conninfo *inc, struct tcphdr *th)
goto done;
}
syncache_drop(sc, sch);
- tcpstat.tcps_sc_unreach++;
+ V_tcpstat.tcps_sc_unreach++;
done:
SCH_UNLOCK(sch);
}
@@ -641,7 +642,7 @@ syncache_socket(struct syncache *sc, struct socket *lso, struct mbuf *m)
struct tcpcb *tp;
char *s;
- INP_INFO_WLOCK_ASSERT(&tcbinfo);
+ INP_INFO_WLOCK_ASSERT(&V_tcbinfo);
/*
* Ok, create the full blown connection, and set things up
@@ -656,7 +657,7 @@ syncache_socket(struct syncache *sc, struct socket *lso, struct mbuf *m)
* have the peer retransmit its SYN again after its
* RTO and try again.
*/
- tcpstat.tcps_listendrop++;
+ V_tcpstat.tcps_listendrop++;
if ((s = tcp_log_addrs(&sc->sc_inc, NULL, NULL, NULL))) {
log(LOG_DEBUG, "%s; %s: Socket create failed "
"due to limits or memory shortage\n",
@@ -826,7 +827,7 @@ syncache_socket(struct syncache *sc, struct socket *lso, struct mbuf *m)
INP_WUNLOCK(inp);
- tcpstat.tcps_accepts++;
+ V_tcpstat.tcps_accepts++;
return (so);
abort:
@@ -857,7 +858,7 @@ syncache_expand(struct in_conninfo *inc, struct tcpopt *to, struct tcphdr *th,
* Global TCP locks are held because we manipulate the PCB lists
* and create a new socket.
*/
- INP_INFO_WLOCK_ASSERT(&tcbinfo);
+ INP_INFO_WLOCK_ASSERT(&V_tcbinfo);
KASSERT((th->th_flags & (TH_RST|TH_ACK|TH_SYN)) == TH_ACK,
("%s: can handle only ACK", __func__));
@@ -895,7 +896,7 @@ syncache_expand(struct in_conninfo *inc, struct tcpopt *to, struct tcphdr *th,
/* Pull out the entry to unlock the bucket row. */
TAILQ_REMOVE(&sch->sch_bucket, sc, sc_hash);
sch->sch_length--;
- tcp_syncache.cache_count--;
+ V_tcp_syncache.cache_count--;
SCH_UNLOCK(sch);
}
@@ -945,9 +946,9 @@ syncache_expand(struct in_conninfo *inc, struct tcpopt *to, struct tcphdr *th,
*lsop = syncache_socket(sc, *lsop, m);
if (*lsop == NULL)
- tcpstat.tcps_sc_aborted++;
+ V_tcpstat.tcps_sc_aborted++;
else
- tcpstat.tcps_sc_completed++;
+ V_tcpstat.tcps_sc_completed++;
/* how do we find the inp for the new socket? */
if (sc != &scs)
@@ -968,9 +969,9 @@ tcp_offload_syncache_expand(struct in_conninfo *inc, struct tcpopt *to,
{
int rc;
- INP_INFO_WLOCK(&tcbinfo);
+ INP_INFO_WLOCK(&V_tcbinfo);
rc = syncache_expand(inc, to, th, lsop, m);
- INP_INFO_WUNLOCK(&tcbinfo);
+ INP_INFO_WUNLOCK(&V_tcbinfo);
return (rc);
}
@@ -1009,7 +1010,7 @@ _syncache_add(struct in_conninfo *inc, struct tcpopt *to, struct tcphdr *th,
#endif
struct syncache scs;
- INP_INFO_WLOCK_ASSERT(&tcbinfo);
+ INP_INFO_WLOCK_ASSERT(&V_tcbinfo);
INP_WLOCK_ASSERT(inp); /* listen socket */
KASSERT((th->th_flags & (TH_RST|TH_ACK|TH_SYN)) == TH_SYN,
("%s: unexpected tcp flags", __func__));
@@ -1038,13 +1039,13 @@ _syncache_add(struct in_conninfo *inc, struct tcpopt *to, struct tcphdr *th,
#ifdef MAC
if (mac_syncache_init(&maclabel) != 0) {
INP_WUNLOCK(inp);
- INP_INFO_WUNLOCK(&tcbinfo);
+ INP_INFO_WUNLOCK(&V_tcbinfo);
goto done;
} else
mac_syncache_create(maclabel, inp);
#endif
INP_WUNLOCK(inp);
- INP_INFO_WUNLOCK(&tcbinfo);
+ INP_INFO_WUNLOCK(&V_tcbinfo);
/*
* Remember the IP options, if any.
@@ -1074,7 +1075,7 @@ _syncache_add(struct in_conninfo *inc, struct tcpopt *to, struct tcphdr *th,
sc->sc_tu->tu_syncache_event(TOE_SC_ENTRY_PRESENT,
sc->sc_toepcb);
#endif
- tcpstat.tcps_sc_dupsyn++;
+ V_tcpstat.tcps_sc_dupsyn++;
if (ipopts) {
/*
* If we were remembering a previous source route,
@@ -1111,24 +1112,24 @@ _syncache_add(struct in_conninfo *inc, struct tcpopt *to, struct tcphdr *th,
if (!TOEPCB_ISSET(sc) && syncache_respond(sc) == 0) {
sc->sc_rxmits = 0;
syncache_timeout(sc, sch, 1);
- tcpstat.tcps_sndacks++;
- tcpstat.tcps_sndtotal++;
+ V_tcpstat.tcps_sndacks++;
+ V_tcpstat.tcps_sndtotal++;
}
SCH_UNLOCK(sch);
goto done;
}
- sc = uma_zalloc(tcp_syncache.zone, M_NOWAIT | M_ZERO);
+ sc = uma_zalloc(V_tcp_syncache.zone, M_NOWAIT | M_ZERO);
if (sc == NULL) {
/*
* The zone allocator couldn't provide more entries.
* Treat this as if the cache was full; drop the oldest
* entry and insert the new one.
*/
- tcpstat.tcps_sc_zonefail++;
+ V_tcpstat.tcps_sc_zonefail++;
if ((sc = TAILQ_LAST(&sch->sch_bucket, sch_head)) != NULL)
syncache_drop(sc, sch);
- sc = uma_zalloc(tcp_syncache.zone, M_NOWAIT | M_ZERO);
+ sc = uma_zalloc(V_tcp_syncache.zone, M_NOWAIT | M_ZERO);
if (sc == NULL) {
if (tcp_syncookies) {
bzero(&scs, sizeof(scs));
@@ -1175,7 +1176,7 @@ _syncache_add(struct in_conninfo *inc, struct tcpopt *to, struct tcphdr *th,
win = imin(win, TCP_MAXWIN);
sc->sc_wnd = win;
- if (tcp_do_rfc1323) {
+ if (V_tcp_do_rfc1323) {
/*
* A timestamp received in a SYN makes
* it ok to send timestamp requests and replies.
@@ -1234,7 +1235,7 @@ _syncache_add(struct in_conninfo *inc, struct tcpopt *to, struct tcphdr *th,
sc->sc_peer_mss = to->to_mss; /* peer mss may be zero */
if (noopt)
sc->sc_flags |= SCF_NOOPT;
- if ((th->th_flags & (TH_ECE|TH_CWR)) && tcp_do_ecn)
+ if ((th->th_flags & (TH_ECE|TH_CWR)) && V_tcp_do_ecn)
sc->sc_flags |= SCF_ECN;
if (tcp_syncookies) {
@@ -1260,12 +1261,12 @@ _syncache_add(struct in_conninfo *inc, struct tcpopt *to, struct tcphdr *th,
syncache_free(sc);
else if (sc != &scs)
syncache_insert(sc, sch); /* locks and unlocks sch */
- tcpstat.tcps_sndacks++;
- tcpstat.tcps_sndtotal++;
+ V_tcpstat.tcps_sndacks++;
+ V_tcpstat.tcps_sndtotal++;
} else {
if (sc != &scs)
syncache_free(sc);
- tcpstat.tcps_sc_dropped++;
+ V_tcpstat.tcps_sc_dropped++;
}
done:
@@ -1304,7 +1305,7 @@ syncache_respond(struct syncache *sc)
/* Determine MSS we advertize to other end of connection. */
mssopt = tcp_mssopt(&sc->sc_inc);
if (sc->sc_peer_mss)
- mssopt = max( min(sc->sc_peer_mss, mssopt), tcp_minmss);
+ mssopt = max( min(sc->sc_peer_mss, mssopt), V_tcp_minmss);
/* XXX: Assume that the entire packet will fit in a header mbuf. */
KASSERT(max_linkhdr + tlen + TCP_MAXOLEN <= MHLEN,
@@ -1358,7 +1359,7 @@ syncache_respond(struct syncache *sc)
* 1) path_mtu_discovery is disabled
* 2) the SCF_UNREACH flag has been set
*/
- if (path_mtu_discovery && ((sc->sc_flags & SCF_UNREACH) == 0))
+ if (V_path_mtu_discovery && ((sc->sc_flags & SCF_UNREACH) == 0))
ip->ip_off |= IP_DF;
th = (struct tcphdr *)(ip + 1);
@@ -1376,7 +1377,7 @@ syncache_respond(struct syncache *sc)
if (sc->sc_flags & SCF_ECN) {
th->th_flags |= TH_ECE;
- tcpstat.tcps_ecn_shs++;
+ V_tcpstat.tcps_ecn_shs++;
}
/* Tack on the TCP options. */
@@ -1454,7 +1455,7 @@ tcp_offload_syncache_add(struct in_conninfo *inc, struct tcpopt *to,
struct toe_usrreqs *tu, void *toepcb)
{
- INP_INFO_WLOCK(&tcbinfo);
+ INP_INFO_WLOCK(&V_tcbinfo);
INP_WLOCK(inp);
_syncache_add(inc, to, th, inp, lsop, NULL, tu, toepcb);
}
@@ -1568,7 +1569,7 @@ syncookie_generate(struct syncache_head *sch, struct syncache *sc,
off = sc->sc_iss & 0x7; /* iss was randomized before */
/* Maximum segment size calculation. */
- pmss = max( min(sc->sc_peer_mss, tcp_mssopt(&sc->sc_inc)), tcp_minmss);
+ pmss = max( min(sc->sc_peer_mss, tcp_mssopt(&sc->sc_inc)), V_tcp_minmss);
for (mss = sizeof(tcp_sc_msstab) / sizeof(int) - 1; mss > 0; mss--)
if (tcp_sc_msstab[mss] <= pmss)
break;
@@ -1606,7 +1607,7 @@ syncookie_generate(struct syncache_head *sch, struct syncache *sc,
sc->sc_tsoff = data - ticks; /* after XOR */
}
- tcpstat.tcps_sc_sendcookie++;
+ V_tcpstat.tcps_sc_sendcookie++;
return;
}
@@ -1709,7 +1710,7 @@ syncookie_lookup(struct in_conninfo *inc, struct syncache_head *sch,
sc->sc_rxmits = 0;
sc->sc_peer_mss = tcp_sc_msstab[mss];
- tcpstat.tcps_sc_recvcookie++;
+ V_tcpstat.tcps_sc_recvcookie++;
return (sc);
}
@@ -1725,9 +1726,9 @@ syncache_pcbcount(void)
struct syncache_head *sch;
int count, i;
- for (count = 0, i = 0; i < tcp_syncache.hashsize; i++) {
+ for (count = 0, i = 0; i < V_tcp_syncache.hashsize; i++) {
/* No need to lock for a read. */
- sch = &tcp_syncache.hashbase[i];
+ sch = &V_tcp_syncache.hashbase[i];
count += sch->sch_length;
}
return count;
@@ -1750,8 +1751,8 @@ syncache_pcblist(struct sysctl_req *req, int max_pcbs, int *pcbs_exported)
struct syncache_head *sch;
int count, error, i;
- for (count = 0, error = 0, i = 0; i < tcp_syncache.hashsize; i++) {
- sch = &tcp_syncache.hashbase[i];
+ for (count = 0, error = 0, i = 0; i < V_tcp_syncache.hashsize; i++) {
+ sch = &V_tcp_syncache.hashbase[i];
SCH_LOCK(sch);
TAILQ_FOREACH(sc, &sch->sch_bucket, sc_hash) {
if (count >= max_pcbs) {
OpenPOWER on IntegriCloud