summaryrefslogtreecommitdiffstats
path: root/sys/mips
diff options
context:
space:
mode:
authorjmallett <jmallett@FreeBSD.org>2011-01-09 23:46:24 +0000
committerjmallett <jmallett@FreeBSD.org>2011-01-09 23:46:24 +0000
commitef1dcae0cf597aead004a2c541cb9e38096c6643 (patch)
treedd7c7c451c58aaf269f9377cf351f157f3fc34c7 /sys/mips
parentdb62f5eeb502519f7c229d55593fea1fd9738bd1 (diff)
downloadFreeBSD-src-ef1dcae0cf597aead004a2c541cb9e38096c6643.zip
FreeBSD-src-ef1dcae0cf597aead004a2c541cb9e38096c6643.tar.gz
Now that we correctly enable rx interrupts on all cores, performance has gotten
quite awful, because e.g. 4 packets will come in and get processed on 4 different cores at the same time, really battling with the TCP stack quite painfully. For now, just run one task at a time. This gets performance up in most cases to where it was before the correctness fixes that got interrupts to run on all cores (except in high-load TCP transmit cases where all we're handling receive for is ACKs) and in some cases it's better now. What would be ideal would be to use a more advanced interrupt mitigation strategy and possibly to use different workqueue groups per port for multi-port systems, and so on, but this is a fine stopgap.
Diffstat (limited to 'sys/mips')
-rw-r--r--sys/mips/cavium/octe/ethernet-rx.c23
1 files changed, 22 insertions, 1 deletions
diff --git a/sys/mips/cavium/octe/ethernet-rx.c b/sys/mips/cavium/octe/ethernet-rx.c
index f872b43..b51bb58 100644
--- a/sys/mips/cavium/octe/ethernet-rx.c
+++ b/sys/mips/cavium/octe/ethernet-rx.c
@@ -54,6 +54,8 @@ extern struct ifnet *cvm_oct_device[];
static struct task cvm_oct_task;
static struct taskqueue *cvm_oct_taskq;
+static int cvm_oct_rx_active;
+
/**
* Interrupt handler. The interrupt occurs whenever the POW
* transitions from 0->1 packets in our group.
@@ -70,7 +72,13 @@ int cvm_oct_do_interrupt(void *dev_id)
cvmx_write_csr(CVMX_POW_WQ_INT, 1<<pow_receive_group);
else
cvmx_write_csr(CVMX_POW_WQ_INT, 0x10001<<pow_receive_group);
- taskqueue_enqueue(cvm_oct_taskq, &cvm_oct_task);
+
+ /*
+ * Schedule task if there isn't one running.
+ */
+ if (atomic_cmpset_int(&cvm_oct_rx_active, 0, 1))
+ taskqueue_enqueue(cvm_oct_taskq, &cvm_oct_task);
+
return FILTER_HANDLED;
}
@@ -353,6 +361,19 @@ void cvm_oct_tasklet_rx(void *context, int pending)
cvm_oct_free_work(work);
}
+ /*
+ * If we hit our limit, schedule another task while we clean up.
+ */
+ if (INTERRUPT_LIMIT != 0 && rx_count == MAX_RX_PACKETS) {
+ taskqueue_enqueue(cvm_oct_taskq, &cvm_oct_task);
+ } else {
+ /*
+ * No more packets, all done.
+ */
+ if (!atomic_cmpset_int(&cvm_oct_rx_active, 1, 0))
+ panic("%s: inconsistent rx active state.", __func__);
+ }
+
/* Restore the original POW group mask */
cvmx_write_csr(CVMX_POW_PP_GRP_MSKX(coreid), old_group_mask);
if (USE_ASYNC_IOBDMA) {
OpenPOWER on IntegriCloud