summaryrefslogtreecommitdiffstats
path: root/sys/net/netisr.c
diff options
context:
space:
mode:
authorsam <sam@FreeBSD.org>2003-11-08 22:28:40 +0000
committersam <sam@FreeBSD.org>2003-11-08 22:28:40 +0000
commit7f3b205cb834636e3bbf0bb354cccd5c98ee1db4 (patch)
tree2ce18b56605a2424ffb613d7f2ee4678a34df3a4 /sys/net/netisr.c
parentdb66506d6cab556d613d20d13ef3403ca934395c (diff)
downloadFreeBSD-src-7f3b205cb834636e3bbf0bb354cccd5c98ee1db4.zip
FreeBSD-src-7f3b205cb834636e3bbf0bb354cccd5c98ee1db4.tar.gz
o add a flags parameter to netisr_register that is used to specify
whether or not the isr needs to hold Giant when running; Giant-less operation is also controlled by the setting of debug_mpsafenet o mark all netisr's except NETISR_IP as needing Giant o add a GIANT_REQUIRED assertion to the top of netisr's that need Giant o pickup Giant (when debug_mpsafenet is 1) inside ip_input before calling up with a packet o change netisr handling so swi_net runs w/o Giant; instead we grab Giant before invoking handlers based on whether the handler needs Giant o change netisr handling so that netisr's that are marked MPSAFE may have multiple instances active at a time o add netisr statistics for packets dropped because the isr is inactive Supported by: FreeBSD Foundation
Diffstat (limited to 'sys/net/netisr.c')
-rw-r--r--sys/net/netisr.c84
1 files changed, 45 insertions, 39 deletions
diff --git a/sys/net/netisr.c b/sys/net/netisr.c
index cb24e12..ee2c6426 100644
--- a/sys/net/netisr.c
+++ b/sys/net/netisr.c
@@ -68,9 +68,9 @@ volatile unsigned int netisr; /* scheduling bits for network */
struct netisr {
netisr_t *ni_handler;
struct ifqueue *ni_queue;
+ int ni_flags;
} netisrs[32];
-static struct mtx netisr_mtx;
static void *net_ih;
void
@@ -80,37 +80,37 @@ legacy_setsoftnet(void)
}
void
-netisr_register(int num, netisr_t *handler, struct ifqueue *inq)
+netisr_register(int num, netisr_t *handler, struct ifqueue *inq, int flags)
{
KASSERT(!(num < 0 || num >= (sizeof(netisrs)/sizeof(*netisrs))),
("bad isr %d", num));
netisrs[num].ni_handler = handler;
netisrs[num].ni_queue = inq;
+ if ((flags & NETISR_MPSAFE) && !debug_mpsafenet)
+ flags &= ~NETISR_MPSAFE;
+ netisrs[num].ni_flags = flags;
}
void
netisr_unregister(int num)
{
struct netisr *ni;
- int s;
KASSERT(!(num < 0 || num >= (sizeof(netisrs)/sizeof(*netisrs))),
("bad isr %d", num));
ni = &netisrs[num];
ni->ni_handler = NULL;
- if (ni->ni_queue != NULL) {
- s = splimp();
+ if (ni->ni_queue != NULL)
IF_DRAIN(ni->ni_queue);
- splx(s);
- }
}
struct isrstat {
int isrs_count; /* dispatch count */
- int isrs_directed; /* ...successfully dispatched */
+ int isrs_directed; /* ...directly dispatched */
int isrs_deferred; /* ...queued instead */
int isrs_queued; /* intentionally queueued */
+ int isrs_drop; /* dropped 'cuz no handler */
int isrs_swi_count; /* swi_net handlers called */
};
static struct isrstat isrstat;
@@ -130,6 +130,8 @@ SYSCTL_INT(_net_isr, OID_AUTO, deferred, CTLFLAG_RD,
&isrstat.isrs_deferred, 0, "");
SYSCTL_INT(_net_isr, OID_AUTO, queued, CTLFLAG_RD,
&isrstat.isrs_queued, 0, "");
+SYSCTL_INT(_net_isr, OID_AUTO, drop, CTLFLAG_RD,
+ &isrstat.isrs_drop, 0, "");
SYSCTL_INT(_net_isr, OID_AUTO, swi_count, CTLFLAG_RD,
&isrstat.isrs_swi_count, 0, "");
@@ -153,46 +155,43 @@ netisr_processqueue(struct netisr *ni)
/*
* Call the netisr directly instead of queueing the packet, if possible.
- *
- * Ideally, the permissibility of calling the routine would be determined
- * by checking if splnet() was asserted at the time the device interrupt
- * occurred; if so, this indicates that someone is in the network stack.
- *
- * However, bus_setup_intr uses INTR_TYPE_NET, which sets splnet before
- * calling the interrupt handler, so the previous mask is unavailable.
- * Approximate this by checking intr_nesting_level instead; if any SWI
- * handlers are running, the packet is queued instead.
*/
void
netisr_dispatch(int num, struct mbuf *m)
{
struct netisr *ni;
- isrstat.isrs_count++;
+ isrstat.isrs_count++; /* XXX redundant */
KASSERT(!(num < 0 || num >= (sizeof(netisrs)/sizeof(*netisrs))),
("bad isr %d", num));
ni = &netisrs[num];
if (ni->ni_queue == NULL) {
+ isrstat.isrs_drop++;
m_freem(m);
return;
}
- if (netisr_enable && mtx_trylock(&netisr_mtx)) {
+ /*
+ * Do direct dispatch only for MPSAFE netisrs (and
+ * only when enabled). Note that when a netisr is
+ * marked MPSAFE we permit multiple concurrent instances
+ * to run. We guarantee only the order in which
+ * packets are processed for each "dispatch point" in
+ * the system (i.e. call to netisr_dispatch or
+ * netisr_queue). This insures ordering of packets
+ * from an interface but does not guarantee ordering
+ * between multiple places in the system (e.g. IP
+ * dispatched from interfaces vs. IP queued from IPSec).
+ */
+ if (netisr_enable && (ni->ni_flags & NETISR_MPSAFE)) {
isrstat.isrs_directed++;
/*
- * One slight problem here is that packets might bypass
- * each other in the stack, if an earlier one happened
- * to get stuck in the queue.
- *
- * we can either:
- * a. drain the queue before handling this packet,
- * b. fallback to queueing the packet,
- * c. sweep the issue under the rug and ignore it.
- *
- * Currently, we do a). Previously, we did c).
+ * NB: We used to drain the queue before handling
+ * the packet but now do not. Doing so here will
+ * not preserve ordering so instead we fallback to
+ * guaranteeing order only from dispatch points
+ * in the system (see above).
*/
- netisr_processqueue(ni);
ni->ni_handler(m);
- mtx_unlock(&netisr_mtx);
} else {
isrstat.isrs_deferred++;
if (IF_HANDOFF(ni->ni_queue, m, NULL))
@@ -214,6 +213,7 @@ netisr_queue(int num, struct mbuf *m)
("bad isr %d", num));
ni = &netisrs[num];
if (ni->ni_queue == NULL) {
+ isrstat.isrs_drop++;
m_freem(m);
return (1);
}
@@ -236,7 +236,6 @@ swi_net(void *dummy)
const int polling = 0;
#endif
- mtx_lock(&netisr_mtx);
do {
bits = atomic_readandclear_int(&netisr);
if (bits == 0)
@@ -250,21 +249,28 @@ swi_net(void *dummy)
printf("swi_net: unregistered isr %d.\n", i);
continue;
}
- if (ni->ni_queue == NULL)
- ni->ni_handler(NULL);
- else
- netisr_processqueue(ni);
+ if ((ni->ni_flags & NETISR_MPSAFE) == 0) {
+ mtx_lock(&Giant);
+ if (ni->ni_queue == NULL)
+ ni->ni_handler(NULL);
+ else
+ netisr_processqueue(ni);
+ mtx_unlock(&Giant);
+ } else {
+ if (ni->ni_queue == NULL)
+ ni->ni_handler(NULL);
+ else
+ netisr_processqueue(ni);
+ }
}
} while (polling);
- mtx_unlock(&netisr_mtx);
}
static void
start_netisr(void *dummy)
{
- mtx_init(&netisr_mtx, "netisr lock", NULL, MTX_DEF);
- if (swi_add(NULL, "net", swi_net, NULL, SWI_NET, 0, &net_ih))
+ if (swi_add(NULL, "net", swi_net, NULL, SWI_NET, INTR_MPSAFE, &net_ih))
panic("start_netisr");
}
SYSINIT(start_netisr, SI_SUB_SOFTINTR, SI_ORDER_FIRST, start_netisr, NULL)
OpenPOWER on IntegriCloud