summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authornp <np@FreeBSD.org>2013-08-28 23:00:34 +0000
committernp <np@FreeBSD.org>2013-08-28 23:00:34 +0000
commite9b6cb5ecce096a84afcbac4cbf86003db5d12fc (patch)
tree16eac644bd0d8909a19d1ecfe80efe34fe8d8fa2
parent6731dd428d26602eb9428e8d6bf309b24bea5ad2 (diff)
downloadFreeBSD-src-e9b6cb5ecce096a84afcbac4cbf86003db5d12fc.zip
FreeBSD-src-e9b6cb5ecce096a84afcbac4cbf86003db5d12fc.tar.gz
Merge r254336 from user/np/cxl_tuning.
Add a last-modified timestamp to each LRO entry and provide an interface to flush all inactive entries. Drivers decide when to flush and what the inactivity threshold should be. Network drivers that process an rx queue to completion can enter a livelock type situation when the rate at which packets are received reaches equilibrium with the rate at which the rx thread is processing them. When this happens the final LRO flush (normally when the rx routine is done) does not occur. Pure ACKs and segments with total payload < 64K can get stuck in an LRO entry. Symptoms are that TCP tx-mostly connections' performance falls off a cliff during heavy, unrelated rx on the interface. Flushing only inactive LRO entries works better than any of these alternates that I tried: - don't LRO pure ACKs - flush _all_ LRO entries periodically (every 'x' microseconds or every 'y' descriptors) - stop rx processing in the driver periodically and schedule remaining work for later. Reviewed by: andre
-rw-r--r--sys/netinet/tcp_lro.c23
-rw-r--r--sys/netinet/tcp_lro.h4
2 files changed, 26 insertions, 1 deletions
diff --git a/sys/netinet/tcp_lro.c b/sys/netinet/tcp_lro.c
index 032d47c..63a6bba 100644
--- a/sys/netinet/tcp_lro.c
+++ b/sys/netinet/tcp_lro.c
@@ -194,6 +194,25 @@ tcp_lro_rx_csum_fixup(struct lro_entry *le, void *l3hdr, struct tcphdr *th,
#endif
void
+tcp_lro_flush_inactive(struct lro_ctrl *lc, const struct timeval *timeout)
+{
+ struct lro_entry *le, *le_tmp;
+ struct timeval tv;
+
+ if (SLIST_EMPTY(&lc->lro_active))
+ return;
+
+ getmicrotime(&tv);
+ timevalsub(&tv, timeout);
+ SLIST_FOREACH_SAFE(le, &lc->lro_active, next, le_tmp) {
+ if (timevalcmp(&tv, &le->mtime, >=)) {
+ SLIST_REMOVE(&lc->lro_active, le, lro_entry, next);
+ tcp_lro_flush(lc, le);
+ }
+ }
+}
+
+void
tcp_lro_flush(struct lro_ctrl *lc, struct lro_entry *le)
{
@@ -543,7 +562,8 @@ tcp_lro_rx(struct lro_ctrl *lc, struct mbuf *m, uint32_t csum)
if (le->p_len > (65535 - lc->ifp->if_mtu)) {
SLIST_REMOVE(&lc->lro_active, le, lro_entry, next);
tcp_lro_flush(lc, le);
- }
+ } else
+ getmicrotime(&le->mtime);
return (0);
}
@@ -556,6 +576,7 @@ tcp_lro_rx(struct lro_ctrl *lc, struct mbuf *m, uint32_t csum)
le = SLIST_FIRST(&lc->lro_free);
SLIST_REMOVE_HEAD(&lc->lro_free, next);
SLIST_INSERT_HEAD(&lc->lro_active, le, next);
+ getmicrotime(&le->mtime);
/* Start filling in details. */
switch (eh_type) {
diff --git a/sys/netinet/tcp_lro.h b/sys/netinet/tcp_lro.h
index b3a5017..ab6d74a 100644
--- a/sys/netinet/tcp_lro.h
+++ b/sys/netinet/tcp_lro.h
@@ -30,6 +30,8 @@
#ifndef _TCP_LRO_H_
#define _TCP_LRO_H_
+#include <sys/time.h>
+
struct lro_entry
{
SLIST_ENTRY(lro_entry) next;
@@ -59,6 +61,7 @@ struct lro_entry
uint32_t tsecr;
uint16_t window;
uint16_t timestamp; /* flag, not a TCP hdr field. */
+ struct timeval mtime;
};
SLIST_HEAD(lro_head, lro_entry);
@@ -83,6 +86,7 @@ struct lro_ctrl {
int tcp_lro_init(struct lro_ctrl *);
void tcp_lro_free(struct lro_ctrl *);
+void tcp_lro_flush_inactive(struct lro_ctrl *, const struct timeval *);
void tcp_lro_flush(struct lro_ctrl *, struct lro_entry *);
int tcp_lro_rx(struct lro_ctrl *, struct mbuf *, uint32_t);
OpenPOWER on IntegriCloud