summaryrefslogtreecommitdiffstats
path: root/sys/net/ieee8023ad_lacp.c
diff options
context:
space:
mode:
authorthompsa <thompsa@FreeBSD.org>2007-05-15 07:41:46 +0000
committerthompsa <thompsa@FreeBSD.org>2007-05-15 07:41:46 +0000
commit1eb8d76bed26546baab610bdf8db03724e5e4b16 (patch)
tree9b46ae887a0094102a87bddd505c6bca8a08d5cf /sys/net/ieee8023ad_lacp.c
parentc7b3a722a96766bc84c8f96b03aa1cf5ce003d34 (diff)
downloadFreeBSD-src-1eb8d76bed26546baab610bdf8db03724e5e4b16.zip
FreeBSD-src-1eb8d76bed26546baab610bdf8db03724e5e4b16.tar.gz
Change from a mutex to a read/write lock. This allows the tx port to be
selected simultaneously by multiple senders and transmit/receive is not serialised between aggregated interfaces.
Diffstat (limited to 'sys/net/ieee8023ad_lacp.c')
-rw-r--r--sys/net/ieee8023ad_lacp.c95
1 files changed, 80 insertions, 15 deletions
diff --git a/sys/net/ieee8023ad_lacp.c b/sys/net/ieee8023ad_lacp.c
index b022035..11b2f5e 100644
--- a/sys/net/ieee8023ad_lacp.c
+++ b/sys/net/ieee8023ad_lacp.c
@@ -38,6 +38,9 @@ __FBSDID("$FreeBSD$");
#include <sys/socket.h> /* for net/if.h */
#include <sys/sockio.h>
#include <machine/stdarg.h>
+#include <sys/lock.h>
+#include <sys/rwlock.h>
+#include <sys/taskqueue.h>
#include <net/if.h>
#include <net/if_dl.h>
@@ -111,6 +114,9 @@ static void lacp_aggregator_delref(struct lacp_softc *,
/* receive machine */
+static void lacp_dequeue(void *, int);
+static int lacp_pdu_input(struct lagg_port *, struct mbuf *);
+static int lacp_marker_input(struct lagg_port *, struct mbuf *);
static void lacp_sm_rx(struct lacp_port *, const struct lacpdu *);
static void lacp_sm_rx_timer(struct lacp_port *);
static void lacp_sm_rx_set_expired(struct lacp_port *);
@@ -202,17 +208,66 @@ static const lacp_timer_func_t lacp_timer_funcs[LACP_NTIMER] = {
[LACP_TIMER_WAIT_WHILE] = lacp_sm_mux_timer,
};
+void
+lacp_input(struct lagg_port *lgp, struct mbuf *m)
+{
+ struct lagg_softc *lgs = lgp->lp_lagg;
+ struct lacp_softc *lsc = LACP_SOFTC(lgs);
+ uint8_t subtype;
+
+ if (m->m_pkthdr.len < sizeof(struct ether_header) + sizeof(subtype)) {
+ m_freem(m);
+ return;
+ }
+
+ m_copydata(m, sizeof(struct ether_header), sizeof(subtype), &subtype);
+ switch (subtype) {
+ case SLOWPROTOCOLS_SUBTYPE_LACP:
+ IF_HANDOFF(&lsc->lsc_queue, m, NULL);
+ taskqueue_enqueue(taskqueue_swi, &lsc->lsc_qtask);
+ break;
+
+ case SLOWPROTOCOLS_SUBTYPE_MARKER:
+ lacp_marker_input(lgp, m);
+ break;
+
+ default:
+ /* Unknown LACP packet type */
+ m_freem(m);
+ break;
+ }
+}
+
+static void
+lacp_dequeue(void *arg, int pending)
+{
+ struct lacp_softc *lsc = (struct lacp_softc *)arg;
+ struct lagg_softc *sc = lsc->lsc_lagg;
+ struct lagg_port *lgp;
+ struct mbuf *m;
+
+ LAGG_WLOCK(sc);
+ for (;;) {
+ IF_DEQUEUE(&lsc->lsc_queue, m);
+ if (m == NULL)
+ break;
+ lgp = m->m_pkthdr.rcvif->if_lagg;
+ lacp_pdu_input(lgp, m);
+ }
+ LAGG_WUNLOCK(sc);
+}
+
/*
- * lacp_input: process lacpdu
+ * lacp_pdu_input: process lacpdu
*/
-int
-lacp_input(struct lagg_port *lgp, struct mbuf *m)
+static int
+lacp_pdu_input(struct lagg_port *lgp, struct mbuf *m)
{
struct lacp_port *lp = LACP_PORT(lgp);
struct lacpdu *du;
int error = 0;
- LAGG_LOCK_ASSERT(lgp->lp_lagg);
+ LAGG_WLOCK_ASSERT(lgp->lp_lagg);
if (__predict_false(lp->lp_flags & LACP_PORT_DETACHING)) {
goto bad;
@@ -303,7 +358,7 @@ lacp_xmit_lacpdu(struct lacp_port *lp)
struct lacpdu *du;
int error;
- LAGG_LOCK_ASSERT(lgp->lp_lagg);
+ LAGG_WLOCK_ASSERT(lgp->lp_lagg);
m = m_gethdr(M_DONTWAIT, MT_DATA);
if (m == NULL) {
@@ -360,7 +415,7 @@ lacp_linkstate(struct lagg_port *lgp)
uint8_t old_state;
uint16_t old_key;
- LAGG_LOCK_ASSERT(lgp->lp_lagg);
+ LAGG_WLOCK_ASSERT(lgp->lp_lagg);
bzero((char *)&ifmr, sizeof(ifmr));
error = (*ifp->if_ioctl)(ifp, SIOCGIFMEDIA, (caddr_t)&ifmr);
@@ -397,8 +452,10 @@ static void
lacp_tick(void *arg)
{
struct lacp_softc *lsc = arg;
+ struct lagg_softc *sc = lsc->lsc_lagg;
struct lacp_port *lp;
+ LAGG_WLOCK(sc);
LIST_FOREACH(lp, &lsc->lsc_ports, lp_next) {
if ((lp->lp_state & LACP_STATE_AGGREGATION) == 0)
continue;
@@ -410,6 +467,7 @@ lacp_tick(void *arg)
lacp_sm_tx(lp);
lacp_sm_ptx_tx_schedule(lp);
}
+ LAGG_WUNLOCK(sc);
callout_reset(&lsc->lsc_callout, hz, lacp_tick, lsc);
}
@@ -427,7 +485,7 @@ lacp_port_create(struct lagg_port *lgp)
boolean_t active = TRUE; /* XXX should be configurable */
boolean_t fast = FALSE; /* XXX should be configurable */
- LAGG_LOCK_ASSERT(lgs);
+ LAGG_WLOCK_ASSERT(lgs);
bzero((char *)&sdl, sizeof(sdl));
sdl.sdl_len = sizeof(sdl);
@@ -474,7 +532,7 @@ lacp_port_destroy(struct lagg_port *lgp)
struct lacp_port *lp = LACP_PORT(lgp);
int i;
- LAGG_LOCK_ASSERT(lgp->lp_lagg);
+ LAGG_WLOCK_ASSERT(lgp->lp_lagg);
for (i = 0; i < LACP_NTIMER; i++) {
LACP_TIMER_DISARM(lp, i);
@@ -539,7 +597,7 @@ lacp_disable_distributing(struct lacp_port *lp)
char buf[LACP_LAGIDSTR_MAX+1];
#endif /* defined(LACP_DEBUG) */
- LAGG_LOCK_ASSERT(lgp->lp_lagg);
+ LAGG_WLOCK_ASSERT(lgp->lp_lagg);
if (la == NULL || (lp->lp_state & LACP_STATE_DISTRIBUTING) == 0) {
return;
@@ -577,7 +635,7 @@ lacp_enable_distributing(struct lacp_port *lp)
char buf[LACP_LAGIDSTR_MAX+1];
#endif /* defined(LACP_DEBUG) */
- LAGG_LOCK_ASSERT(lgp->lp_lagg);
+ LAGG_WLOCK_ASSERT(lgp->lp_lagg);
if ((lp->lp_state & LACP_STATE_DISTRIBUTING) != 0) {
return;
@@ -616,7 +674,7 @@ lacp_attach(struct lagg_softc *lgs)
{
struct lacp_softc *lsc;
- LAGG_LOCK_ASSERT(lgs);
+ LAGG_WLOCK_ASSERT(lgs);
lsc = malloc(sizeof(struct lacp_softc),
M_DEVBUF, M_NOWAIT|M_ZERO);
@@ -631,8 +689,12 @@ lacp_attach(struct lagg_softc *lgs)
TAILQ_INIT(&lsc->lsc_aggregators);
LIST_INIT(&lsc->lsc_ports);
- callout_init_mtx(&lsc->lsc_transit_callout, &lgs->sc_mtx, 0);
- callout_init_mtx(&lsc->lsc_callout, &lgs->sc_mtx, 0);
+ TASK_INIT(&lsc->lsc_qtask, 0, lacp_dequeue, lsc);
+ mtx_init(&lsc->lsc_queue.ifq_mtx, "lacp queue", NULL, MTX_DEF);
+ lsc->lsc_queue.ifq_maxlen = ifqmaxlen;
+
+ callout_init(&lsc->lsc_transit_callout, CALLOUT_MPSAFE);
+ callout_init(&lsc->lsc_callout, CALLOUT_MPSAFE);
/* if the lagg is already up then do the same */
if (lgs->sc_ifp->if_drv_flags & IFF_DRV_RUNNING)
@@ -654,6 +716,9 @@ lacp_detach(struct lagg_softc *lgs)
lgs->sc_psc = NULL;
callout_drain(&lsc->lsc_transit_callout);
callout_drain(&lsc->lsc_callout);
+ taskqueue_drain(taskqueue_swi, &lsc->lsc_qtask);
+ IF_DRAIN(&lsc->lsc_queue);
+ mtx_destroy(&lsc->lsc_queue.ifq_mtx);
free(lsc, M_DEVBUF);
return (0);
@@ -685,7 +750,7 @@ lacp_select_tx_port(struct lagg_softc *lgs, struct mbuf *m)
uint32_t hash;
int nports;
- LAGG_LOCK_ASSERT(lgs);
+ LAGG_WLOCK_ASSERT(lgs);
if (__predict_false(lsc->lsc_suppress_distributing)) {
LACP_DPRINTF((NULL, "%s: waiting transit\n", __func__));
@@ -1538,7 +1603,7 @@ lacp_marker_input(struct lagg_port *lgp, struct mbuf *m)
struct markerdu *mdu;
int error = 0;
- LAGG_LOCK_ASSERT(lgp->lp_lagg);
+ LAGG_RLOCK_ASSERT(lgp->lp_lagg);
if (__predict_false(lp->lp_flags & LACP_PORT_DETACHING)) {
goto bad;
OpenPOWER on IntegriCloud