summaryrefslogtreecommitdiffstats
path: root/sys/net/netisr.c
diff options
context:
space:
mode:
authorrwatson <rwatson@FreeBSD.org>2003-10-03 18:27:24 +0000
committerrwatson <rwatson@FreeBSD.org>2003-10-03 18:27:24 +0000
commit2bd88e51d5ff78538387afd55fef083b80a0056a (patch)
treef5fe007e6a11bbf25780876aa8db6449a6981b22 /sys/net/netisr.c
parent55a6ca8b62ac8960225695ad65471b41deb2b759 (diff)
downloadFreeBSD-src-2bd88e51d5ff78538387afd55fef083b80a0056a.zip
FreeBSD-src-2bd88e51d5ff78538387afd55fef083b80a0056a.tar.gz
When direct dispatching an netisr (net.isr.enable=1), if there are already
any queued packets for the isr, process those packets before the newly submitted packet, maintaining ordering of all packets being delivered to the netisr. Remove the bypass counter since we don't bypass anymore. Leave the comment about possible problems and options since later performance optimization may change the strategy for addressing ordering problems here. Specifically, this maintains the strong isr ordering guarantee; additional parallelism and lower latency may be possible by moving to weaker guarantees (per-interface, for example). We will probably at some point also want to remove the one instance netisr dispatch limit currently enforced by a mutex, but it's not clear that's 100% safe yet, even in the netperf branch. Reviewed by: sam, others
Diffstat (limited to 'sys/net/netisr.c')
-rw-r--r--sys/net/netisr.c34
1 files changed, 21 insertions, 13 deletions
diff --git a/sys/net/netisr.c b/sys/net/netisr.c
index d7fc4ec..347fd9a 100644
--- a/sys/net/netisr.c
+++ b/sys/net/netisr.c
@@ -100,7 +100,6 @@ struct isrstat {
int isrs_count; /* dispatch count */
int isrs_directed; /* ...successfully dispatched */
int isrs_deferred; /* ...queued instead */
- int isrs_bypassed; /* bypassed queued packets */
int isrs_queued; /* intentionally queueued */
int isrs_swi_count; /* swi_net handlers called */
};
@@ -119,14 +118,30 @@ SYSCTL_INT(_net_isr, OID_AUTO, directed, CTLFLAG_RD,
&isrstat.isrs_directed, 0, "");
SYSCTL_INT(_net_isr, OID_AUTO, deferred, CTLFLAG_RD,
&isrstat.isrs_deferred, 0, "");
-SYSCTL_INT(_net_isr, OID_AUTO, bypassed, CTLFLAG_RD,
- &isrstat.isrs_bypassed, 0, "");
SYSCTL_INT(_net_isr, OID_AUTO, queued, CTLFLAG_RD,
&isrstat.isrs_queued, 0, "");
SYSCTL_INT(_net_isr, OID_AUTO, swi_count, CTLFLAG_RD,
&isrstat.isrs_swi_count, 0, "");
/*
+ * Process all packets currently present in a netisr queue. Used to
+ * drain an existing set of packets waiting for processing when we
+ * begin direct dispatch, to avoid processing packets out of order.
+ */
+static void
+netisr_processqueue(struct netisr *ni)
+{
+ struct mbuf *m;
+
+ for (;;) {
+ IF_DEQUEUE(ni->ni_queue, m);
+ if (m == NULL)
+ break;
+ ni->ni_handler(m);
+ }
+}
+
+/*
* Call the netisr directly instead of queueing the packet, if possible.
*
* Ideally, the permissibility of calling the routine would be determined
@@ -163,10 +178,9 @@ netisr_dispatch(int num, struct mbuf *m)
* b. fallback to queueing the packet,
* c. sweep the issue under the rug and ignore it.
*
- * Currently, we do c), and keep a rough event counter.
+ * Currently, we do a). Previously, we did c).
*/
- if (_IF_QLEN(ni->ni_queue) > 0)
- isrstat.isrs_bypassed++;
+ netisr_processqueue(ni);
ni->ni_handler(m);
mtx_unlock(&netisr_mtx);
} else {
@@ -204,7 +218,6 @@ static void
swi_net(void *dummy)
{
struct netisr *ni;
- struct mbuf *m;
u_int bits;
int i;
#ifdef DEVICE_POLLING
@@ -230,12 +243,7 @@ swi_net(void *dummy)
if (ni->ni_queue == NULL)
ni->ni_handler(NULL);
else
- for (;;) {
- IF_DEQUEUE(ni->ni_queue, m);
- if (m == NULL)
- break;
- ni->ni_handler(m);
- }
+ netisr_processqueue(ni);
}
} while (polling);
mtx_unlock(&netisr_mtx);
OpenPOWER on IntegriCloud