summaryrefslogtreecommitdiffstats
path: root/sys/net/if_lagg.c
diff options
context:
space:
mode:
authormav <mav@FreeBSD.org>2017-05-16 00:30:40 +0000
committermav <mav@FreeBSD.org>2017-05-16 00:30:40 +0000
commit1943c4762afacb8897b851c84fea47b557e8de9f (patch)
tree1860d712cd2732cd1063b0d6e70d69abc5305aae /sys/net/if_lagg.c
parent87d7c82fefe4143a9b80cf82c7eacb61458352d4 (diff)
downloadFreeBSD-src-1943c4762afacb8897b851c84fea47b557e8de9f.zip
FreeBSD-src-1943c4762afacb8897b851c84fea47b557e8de9f.tar.gz
MFC r317696, r317723, r317836: Introduce sleepable locks into if_lagg.
Before this change if_lagg was using nonsleepable rmlocks to protect its internal state. This patch introduces another sx lock to protect code paths that require sleeping, while still uses old rmlock to protect hot nonsleepable data paths. This change allows to remove taskqueue decoupling used before to change interface addresses without holding the lock. Instead it uses sx lock to protect direct if_ioctl() calls. As another bonus, the new code synchronizes enabled capabilities of member interfaces, and allows to control them with ifconfig laggX, that was impossible before. This part should fix interoperation with if_bridge, that may need to disable some capabilities, such as TXCSUM or LRO, to allow bridging with noncapable interfaces.
Diffstat (limited to 'sys/net/if_lagg.c')
-rw-r--r--sys/net/if_lagg.c533
1 files changed, 208 insertions, 325 deletions
diff --git a/sys/net/if_lagg.c b/sys/net/if_lagg.c
index dc3e0cc..c108f53 100644
--- a/sys/net/if_lagg.c
+++ b/sys/net/if_lagg.c
@@ -38,6 +38,7 @@ __FBSDID("$FreeBSD$");
#include <sys/proc.h>
#include <sys/lock.h>
#include <sys/rmlock.h>
+#include <sys/sx.h>
#include <sys/taskqueue.h>
#include <sys/eventhandler.h>
@@ -98,10 +99,7 @@ static VNET_DEFINE(struct if_clone *, lagg_cloner);
#define V_lagg_cloner VNET(lagg_cloner)
static const char laggname[] = "lagg";
-static void lagg_lladdr(struct lagg_softc *, uint8_t *);
static void lagg_capabilities(struct lagg_softc *);
-static void lagg_port_lladdr(struct lagg_port *, uint8_t *, lagg_llqtype);
-static void lagg_port_setlladdr(void *, int);
static int lagg_port_create(struct lagg_softc *, struct ifnet *);
static int lagg_port_destroy(struct lagg_port *, int);
static struct mbuf *lagg_input(struct ifnet *, struct mbuf *);
@@ -118,8 +116,9 @@ static void lagg_port2req(struct lagg_port *, struct lagg_reqport *);
static void lagg_init(void *);
static void lagg_stop(struct lagg_softc *);
static int lagg_ioctl(struct ifnet *, u_long, caddr_t);
-static int lagg_ether_setmulti(struct lagg_softc *);
-static int lagg_ether_cmdmulti(struct lagg_port *, int);
+static int lagg_setmulti(struct lagg_port *);
+static int lagg_clrmulti(struct lagg_port *);
+static int lagg_setcaps(struct lagg_port *, int cap);
static int lagg_setflag(struct lagg_port *, int, int,
int (*func)(struct ifnet *, int));
static int lagg_setflags(struct lagg_port *, int status);
@@ -311,6 +310,7 @@ static void
lagg_proto_attach(struct lagg_softc *sc, lagg_proto pr)
{
+ LAGG_XLOCK_ASSERT(sc);
KASSERT(sc->sc_proto == LAGG_PROTO_NONE, ("%s: sc %p has proto",
__func__, sc));
@@ -327,8 +327,8 @@ lagg_proto_detach(struct lagg_softc *sc)
{
lagg_proto pr;
+ LAGG_XLOCK_ASSERT(sc);
LAGG_WLOCK_ASSERT(sc);
-
pr = sc->sc_proto;
sc->sc_proto = LAGG_PROTO_NONE;
@@ -427,15 +427,14 @@ lagg_register_vlan(void *arg, struct ifnet *ifp, u_int16_t vtag)
{
struct lagg_softc *sc = ifp->if_softc;
struct lagg_port *lp;
- struct rm_priotracker tracker;
if (ifp->if_softc != arg) /* Not our event */
return;
- LAGG_RLOCK(sc, &tracker);
+ LAGG_SLOCK(sc);
SLIST_FOREACH(lp, &sc->sc_ports, lp_entries)
EVENTHANDLER_INVOKE(vlan_config, lp->lp_ifp, vtag);
- LAGG_RUNLOCK(sc, &tracker);
+ LAGG_SUNLOCK(sc);
}
/*
@@ -447,15 +446,14 @@ lagg_unregister_vlan(void *arg, struct ifnet *ifp, u_int16_t vtag)
{
struct lagg_softc *sc = ifp->if_softc;
struct lagg_port *lp;
- struct rm_priotracker tracker;
if (ifp->if_softc != arg) /* Not our event */
return;
- LAGG_RLOCK(sc, &tracker);
+ LAGG_SLOCK(sc);
SLIST_FOREACH(lp, &sc->sc_ports, lp_entries)
EVENTHANDLER_INVOKE(vlan_unconfig, lp->lp_ifp, vtag);
- LAGG_RUNLOCK(sc, &tracker);
+ LAGG_SUNLOCK(sc);
}
static int
@@ -471,7 +469,10 @@ lagg_clone_create(struct if_clone *ifc, int unit, caddr_t params)
free(sc, M_DEVBUF);
return (ENOSPC);
}
+ LAGG_LOCK_INIT(sc);
+ LAGG_SX_INIT(sc);
+ LAGG_XLOCK(sc);
if (V_def_use_flowid)
sc->sc_opts |= LAGG_OPT_USE_FLOWID;
sc->flowid_shift = V_def_flowid_shift;
@@ -481,9 +482,7 @@ lagg_clone_create(struct if_clone *ifc, int unit, caddr_t params)
lagg_proto_attach(sc, LAGG_PROTO_DEFAULT);
- LAGG_LOCK_INIT(sc);
SLIST_INIT(&sc->sc_ports);
- TASK_INIT(&sc->sc_lladdr_task, 0, lagg_port_setlladdr, sc);
/* Initialise pseudo media types */
ifmedia_init(&sc->sc_media, 0, lagg_media_change,
@@ -516,6 +515,7 @@ lagg_clone_create(struct if_clone *ifc, int unit, caddr_t params)
LAGG_LIST_LOCK();
SLIST_INSERT_HEAD(&V_lagg_list, sc, sc_entries);
LAGG_LIST_UNLOCK();
+ LAGG_XUNLOCK(sc);
return (0);
}
@@ -526,8 +526,8 @@ lagg_clone_destroy(struct ifnet *ifp)
struct lagg_softc *sc = (struct lagg_softc *)ifp->if_softc;
struct lagg_port *lp;
- LAGG_WLOCK(sc);
-
+ LAGG_XLOCK(sc);
+ sc->sc_destroying = 1;
lagg_stop(sc);
ifp->if_flags &= ~IFF_UP;
@@ -535,15 +535,15 @@ lagg_clone_destroy(struct ifnet *ifp)
EVENTHANDLER_DEREGISTER(vlan_unconfig, sc->vlan_detach);
/* Shutdown and remove lagg ports */
- while ((lp = SLIST_FIRST(&sc->sc_ports)) != NULL) {
- lp->lp_detaching = LAGG_CLONE_DESTROY;
+ while ((lp = SLIST_FIRST(&sc->sc_ports)) != NULL)
lagg_port_destroy(lp, 1);
- }
+
/* Unhook the aggregation protocol */
+ LAGG_WLOCK(sc);
lagg_proto_detach(sc);
LAGG_UNLOCK_ASSERT(sc);
+ LAGG_XUNLOCK(sc);
- taskqueue_drain(taskqueue_swi, &sc->sc_lladdr_task);
ifmedia_removeall(&sc->sc_media);
ether_ifdetach(ifp);
if_free(ifp);
@@ -552,47 +552,11 @@ lagg_clone_destroy(struct ifnet *ifp)
SLIST_REMOVE(&V_lagg_list, sc, lagg_softc, sc_entries);
LAGG_LIST_UNLOCK();
+ LAGG_SX_DESTROY(sc);
LAGG_LOCK_DESTROY(sc);
free(sc, M_DEVBUF);
}
-/*
- * Set link-layer address on the lagg interface itself.
- *
- * Set noinline to be dtrace-friendly
- */
-static __noinline void
-lagg_lladdr(struct lagg_softc *sc, uint8_t *lladdr)
-{
- struct ifnet *ifp = sc->sc_ifp;
- struct lagg_port lp;
-
- if (memcmp(lladdr, IF_LLADDR(ifp), ETHER_ADDR_LEN) == 0)
- return;
-
- LAGG_WLOCK_ASSERT(sc);
- /*
- * Set the link layer address on the lagg interface.
- * lagg_proto_lladdr() notifies the MAC change to
- * the aggregation protocol. iflladdr_event handler which
- * may trigger gratuitous ARPs for INET will be handled in
- * a taskqueue.
- */
- bcopy(lladdr, IF_LLADDR(ifp), ETHER_ADDR_LEN);
- lagg_proto_lladdr(sc);
-
- /*
- * Send notification request for lagg interface
- * itself. Note that new lladdr is already set.
- */
- bzero(&lp, sizeof(lp));
- lp.lp_ifp = sc->sc_ifp;
- lp.lp_softc = sc;
-
- /* Do not request lladdr change */
- lagg_port_lladdr(&lp, lladdr, LAGG_LLQTYPE_VIRT);
-}
-
static void
lagg_capabilities(struct lagg_softc *sc)
{
@@ -601,7 +565,7 @@ lagg_capabilities(struct lagg_softc *sc)
u_long hwa = ~0UL;
struct ifnet_hw_tsomax hw_tsomax;
- LAGG_WLOCK_ASSERT(sc);
+ LAGG_XLOCK_ASSERT(sc);
memset(&hw_tsomax, 0, sizeof(hw_tsomax));
@@ -629,97 +593,10 @@ lagg_capabilities(struct lagg_softc *sc)
if_printf(sc->sc_ifp,
"capabilities 0x%08x enabled 0x%08x\n", cap, ena);
}
-}
-
-/*
- * Enqueue interface lladdr notification.
- * If request is already queued, it is updated.
- * If setting lladdr is also desired, @do_change has to be set to 1.
- *
- * Set noinline to be dtrace-friendly
- */
-static __noinline void
-lagg_port_lladdr(struct lagg_port *lp, uint8_t *lladdr, lagg_llqtype llq_type)
-{
- struct lagg_softc *sc = lp->lp_softc;
- struct ifnet *ifp = lp->lp_ifp;
- struct lagg_llq *llq;
-
- LAGG_WLOCK_ASSERT(sc);
-
- /*
- * Do not enqueue requests where lladdr is the same for
- * "physical" interfaces (e.g. ports in lagg)
- */
- if (llq_type == LAGG_LLQTYPE_PHYS &&
- memcmp(IF_LLADDR(ifp), lladdr, ETHER_ADDR_LEN) == 0)
- return;
-
- /* Check to make sure its not already queued to be changed */
- SLIST_FOREACH(llq, &sc->sc_llq_head, llq_entries) {
- if (llq->llq_ifp == ifp) {
- /* Update lladdr, it may have changed */
- bcopy(lladdr, llq->llq_lladdr, ETHER_ADDR_LEN);
- return;
- }
- }
-
- llq = malloc(sizeof(struct lagg_llq), M_DEVBUF, M_NOWAIT | M_ZERO);
- if (llq == NULL) /* XXX what to do */
- return;
-
- if_ref(ifp);
- llq->llq_ifp = ifp;
- llq->llq_type = llq_type;
- bcopy(lladdr, llq->llq_lladdr, ETHER_ADDR_LEN);
- /* XXX: We should insert to tail */
- SLIST_INSERT_HEAD(&sc->sc_llq_head, llq, llq_entries);
-
- taskqueue_enqueue(taskqueue_swi, &sc->sc_lladdr_task);
-}
-
-/*
- * Set the interface MAC address from a taskqueue to avoid a LOR.
- *
- * Set noinline to be dtrace-friendly
- */
-static __noinline void
-lagg_port_setlladdr(void *arg, int pending)
-{
- struct lagg_softc *sc = (struct lagg_softc *)arg;
- struct lagg_llq *llq, *head;
- struct ifnet *ifp;
-
- /* Grab a local reference of the queue and remove it from the softc */
- LAGG_WLOCK(sc);
- head = SLIST_FIRST(&sc->sc_llq_head);
- SLIST_FIRST(&sc->sc_llq_head) = NULL;
- LAGG_WUNLOCK(sc);
-
- /*
- * Traverse the queue and set the lladdr on each ifp. It is safe to do
- * unlocked as we have the only reference to it.
- */
- for (llq = head; llq != NULL; llq = head) {
- ifp = llq->llq_ifp;
-
- CURVNET_SET(ifp->if_vnet);
- /*
- * Set the link layer address on the laggport interface.
- * Note that if_setlladdr() or iflladdr_event handler
- * may result in arp transmission / lltable updates.
- */
- if (llq->llq_type == LAGG_LLQTYPE_PHYS)
- if_setlladdr(ifp, llq->llq_lladdr,
- ETHER_ADDR_LEN);
- else
- EVENTHANDLER_INVOKE(iflladdr_event, ifp);
- CURVNET_RESTORE();
- head = SLIST_NEXT(llq, llq_entries);
- if_rele(ifp);
- free(llq, M_DEVBUF);
- }
+ /* Apply unified capabilities back to the lagg ports. */
+ SLIST_FOREACH(lp, &sc->sc_ports, lp_entries)
+ lagg_setcaps(lp, ena);
}
static int
@@ -730,7 +607,7 @@ lagg_port_create(struct lagg_softc *sc, struct ifnet *ifp)
int error, i;
uint64_t *pval;
- LAGG_WLOCK_ASSERT(sc);
+ LAGG_XLOCK_ASSERT(sc);
/* Limit the maximal number of lagg ports */
if (sc->sc_count >= LAGG_MAX_PORTS)
@@ -758,9 +635,8 @@ lagg_port_create(struct lagg_softc *sc, struct ifnet *ifp)
return (EINVAL);
}
- if ((lp = malloc(sizeof(struct lagg_port),
- M_DEVBUF, M_NOWAIT|M_ZERO)) == NULL)
- return (ENOMEM);
+ lp = malloc(sizeof(struct lagg_port), M_DEVBUF, M_WAITOK|M_ZERO);
+ lp->lp_softc = sc;
/* Check if port is a stacked lagg */
LAGG_LIST_LOCK();
@@ -783,6 +659,26 @@ lagg_port_create(struct lagg_softc *sc, struct ifnet *ifp)
}
LAGG_LIST_UNLOCK();
+ if_ref(ifp);
+ lp->lp_ifp = ifp;
+
+ bcopy(IF_LLADDR(ifp), lp->lp_lladdr, ETHER_ADDR_LEN);
+ lp->lp_ifcapenable = ifp->if_capenable;
+ if (SLIST_EMPTY(&sc->sc_ports)) {
+ LAGG_WLOCK(sc);
+ bcopy(IF_LLADDR(ifp), IF_LLADDR(sc->sc_ifp), ETHER_ADDR_LEN);
+ lagg_proto_lladdr(sc);
+ LAGG_WUNLOCK(sc);
+ EVENTHANDLER_INVOKE(iflladdr_event, sc->sc_ifp);
+ } else {
+ if_setlladdr(ifp, IF_LLADDR(sc->sc_ifp), ETHER_ADDR_LEN);
+ }
+ lagg_setflags(lp, 1);
+
+ LAGG_WLOCK(sc);
+ if (SLIST_EMPTY(&sc->sc_ports))
+ sc->sc_primary = lp;
+
/* Change the interface type */
lp->lp_iftype = ifp->if_type;
ifp->if_type = IFT_IEEE8023ADLAG;
@@ -792,25 +688,10 @@ lagg_port_create(struct lagg_softc *sc, struct ifnet *ifp)
lp->lp_output = ifp->if_output;
ifp->if_output = lagg_port_output;
- if_ref(ifp);
- lp->lp_ifp = ifp;
- lp->lp_softc = sc;
-
- /* Save port link layer address */
- bcopy(IF_LLADDR(ifp), lp->lp_lladdr, ETHER_ADDR_LEN);
-
- if (SLIST_EMPTY(&sc->sc_ports)) {
- sc->sc_primary = lp;
- /* First port in lagg. Update/notify lagg lladdress */
- lagg_lladdr(sc, IF_LLADDR(ifp));
- } else {
-
- /*
- * Update link layer address for this port and
- * send notifications to other subsystems.
- */
- lagg_port_lladdr(lp, IF_LLADDR(sc->sc_ifp), LAGG_LLQTYPE_PHYS);
- }
+ /* Read port counters */
+ pval = lp->port_counters.val;
+ for (i = 0; i < IFCOUNTERS; i++, pval++)
+ *pval = ifp->if_get_counter(ifp, i);
/*
* Insert into the list of ports.
@@ -831,24 +712,21 @@ lagg_port_create(struct lagg_softc *sc, struct ifnet *ifp)
SLIST_INSERT_HEAD(&sc->sc_ports, lp, lp_entries);
sc->sc_count++;
- /* Update lagg capabilities */
- lagg_capabilities(sc);
- lagg_linkstate(sc);
-
- /* Read port counters */
- pval = lp->port_counters.val;
- for (i = 0; i < IFCOUNTERS; i++, pval++)
- *pval = ifp->if_get_counter(ifp, i);
- /* Add multicast addresses and interface flags to this port */
- lagg_ether_cmdmulti(lp, 1);
- lagg_setflags(lp, 1);
+ lagg_setmulti(lp);
if ((error = lagg_proto_addport(sc, lp)) != 0) {
/* Remove the port, without calling pr_delport. */
lagg_port_destroy(lp, 0);
+ LAGG_UNLOCK_ASSERT(sc);
return (error);
}
+ LAGG_WUNLOCK(sc);
+
+ /* Update lagg capabilities */
+ lagg_capabilities(sc);
+ lagg_linkstate(sc);
+
return (0);
}
@@ -860,8 +738,7 @@ lagg_port_checkstacking(struct lagg_softc *sc)
struct lagg_port *lp;
int m = 0;
- LAGG_WLOCK_ASSERT(sc);
-
+ LAGG_SXLOCK_ASSERT(sc);
SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
if (lp->lp_flags & LAGG_PORT_STACK) {
sc_ptr = (struct lagg_softc *)lp->lp_ifp->if_softc;
@@ -878,25 +755,20 @@ lagg_port_destroy(struct lagg_port *lp, int rundelport)
{
struct lagg_softc *sc = lp->lp_softc;
struct lagg_port *lp_ptr, *lp0;
- struct lagg_llq *llq;
struct ifnet *ifp = lp->lp_ifp;
uint64_t *pval, vdiff;
int i;
- LAGG_WLOCK_ASSERT(sc);
+ LAGG_XLOCK_ASSERT(sc);
- if (rundelport)
+ if (rundelport) {
+ LAGG_WLOCK(sc);
lagg_proto_delport(sc, lp);
+ } else
+ LAGG_WLOCK_ASSERT(sc);
- /*
- * Remove multicast addresses and interface flags from this port and
- * reset the MAC address, skip if the interface is being detached.
- */
- if (lp->lp_detaching == 0) {
- lagg_ether_cmdmulti(lp, 0);
- lagg_setflags(lp, 0);
- lagg_port_lladdr(lp, lp->lp_lladdr, LAGG_LLQTYPE_PHYS);
- }
+ if (lp->lp_detaching == 0)
+ lagg_clrmulti(lp);
/* Restore interface */
ifp->if_type = lp->lp_iftype;
@@ -919,43 +791,37 @@ lagg_port_destroy(struct lagg_port *lp, int rundelport)
if (lp == sc->sc_primary) {
uint8_t lladdr[ETHER_ADDR_LEN];
- if ((lp0 = SLIST_FIRST(&sc->sc_ports)) == NULL) {
+ if ((lp0 = SLIST_FIRST(&sc->sc_ports)) == NULL)
bzero(&lladdr, ETHER_ADDR_LEN);
- } else {
- bcopy(lp0->lp_lladdr,
- lladdr, ETHER_ADDR_LEN);
- }
- if (lp->lp_detaching != LAGG_CLONE_DESTROY)
- lagg_lladdr(sc, lladdr);
-
- /* Mark lp0 as new primary */
+ else
+ bcopy(lp0->lp_lladdr, lladdr, ETHER_ADDR_LEN);
sc->sc_primary = lp0;
+ if (sc->sc_destroying == 0) {
+ bcopy(lladdr, IF_LLADDR(sc->sc_ifp), ETHER_ADDR_LEN);
+ lagg_proto_lladdr(sc);
+ LAGG_WUNLOCK(sc);
+ EVENTHANDLER_INVOKE(iflladdr_event, sc->sc_ifp);
+ } else
+ LAGG_WUNLOCK(sc);
/*
- * Enqueue lladdr update/notification for each port
- * (new primary needs update as well, to switch from
- * old lladdr to its 'real' one).
+ * Update lladdr for each port (new primary needs update
+ * as well, to switch from old lladdr to its 'real' one)
*/
SLIST_FOREACH(lp_ptr, &sc->sc_ports, lp_entries)
- lagg_port_lladdr(lp_ptr, lladdr, LAGG_LLQTYPE_PHYS);
- }
-
- /* Remove any pending lladdr changes from the queue */
- if (lp->lp_detaching != 0) {
- SLIST_FOREACH(llq, &sc->sc_llq_head, llq_entries) {
- if (llq->llq_ifp == ifp) {
- SLIST_REMOVE(&sc->sc_llq_head, llq, lagg_llq,
- llq_entries);
- if_rele(llq->llq_ifp);
- free(llq, M_DEVBUF);
- break; /* Only appears once */
- }
- }
- }
+ if_setlladdr(lp_ptr->lp_ifp, lladdr, ETHER_ADDR_LEN);
+ } else
+ LAGG_WUNLOCK(sc);
if (lp->lp_ifflags)
if_printf(ifp, "%s: lp_ifflags unclean\n", __func__);
+ if (lp->lp_detaching == 0) {
+ lagg_setflags(lp, 0);
+ lagg_setcaps(lp, lp->lp_ifcapenable);
+ if_setlladdr(ifp, lp->lp_lladdr, ETHER_ADDR_LEN);
+ }
+
if_rele(ifp);
free(lp, M_DEVBUF);
@@ -973,7 +839,6 @@ lagg_port_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
struct lagg_softc *sc;
struct lagg_port *lp = NULL;
int error = 0;
- struct rm_priotracker tracker;
/* Should be checked by the caller */
if (ifp->if_type != IFT_IEEE8023ADLAG ||
@@ -988,15 +853,15 @@ lagg_port_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
break;
}
- LAGG_RLOCK(sc, &tracker);
+ LAGG_SLOCK(sc);
if ((lp = ifp->if_lagg) == NULL || lp->lp_softc != sc) {
error = ENOENT;
- LAGG_RUNLOCK(sc, &tracker);
+ LAGG_SUNLOCK(sc);
break;
}
lagg_port2req(lp, rp);
- LAGG_RUNLOCK(sc, &tracker);
+ LAGG_SUNLOCK(sc);
break;
case SIOCSIFCAP:
@@ -1009,9 +874,9 @@ lagg_port_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
break;
/* Update lagg interface capabilities */
- LAGG_WLOCK(sc);
+ LAGG_XLOCK(sc);
lagg_capabilities(sc);
- LAGG_WUNLOCK(sc);
+ LAGG_XUNLOCK(sc);
break;
case SIOCSIFMTU:
@@ -1121,10 +986,10 @@ lagg_port_ifdetach(void *arg __unused, struct ifnet *ifp)
sc = lp->lp_softc;
- LAGG_WLOCK(sc);
- lp->lp_detaching = LAGG_PORT_DETACH;
+ LAGG_XLOCK(sc);
+ lp->lp_detaching = 1;
lagg_port_destroy(lp, 1);
- LAGG_WUNLOCK(sc);
+ LAGG_XUNLOCK(sc);
}
static void
@@ -1174,10 +1039,11 @@ lagg_init(void *xsc)
struct ifnet *ifp = sc->sc_ifp;
struct lagg_port *lp;
- if (ifp->if_drv_flags & IFF_DRV_RUNNING)
+ LAGG_XLOCK(sc);
+ if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
+ LAGG_XUNLOCK(sc);
return;
-
- LAGG_WLOCK(sc);
+ }
ifp->if_drv_flags |= IFF_DRV_RUNNING;
@@ -1186,12 +1052,15 @@ lagg_init(void *xsc)
* This might be if_setlladdr() notification
* that lladdr has been changed.
*/
- SLIST_FOREACH(lp, &sc->sc_ports, lp_entries)
- lagg_port_lladdr(lp, IF_LLADDR(ifp), LAGG_LLQTYPE_PHYS);
+ SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
+ if (memcmp(IF_LLADDR(ifp), IF_LLADDR(lp->lp_ifp),
+ ETHER_ADDR_LEN) != 0)
+ if_setlladdr(lp->lp_ifp, IF_LLADDR(ifp), ETHER_ADDR_LEN);
+ }
lagg_proto_init(sc);
- LAGG_WUNLOCK(sc);
+ LAGG_XUNLOCK(sc);
}
static void
@@ -1199,7 +1068,7 @@ lagg_stop(struct lagg_softc *sc)
{
struct ifnet *ifp = sc->sc_ifp;
- LAGG_WLOCK_ASSERT(sc);
+ LAGG_XLOCK_ASSERT(sc);
if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
return;
@@ -1223,22 +1092,14 @@ lagg_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
struct thread *td = curthread;
char *buf, *outbuf;
int count, buflen, len, error = 0;
- struct rm_priotracker tracker;
bzero(&rpbuf, sizeof(rpbuf));
switch (cmd) {
case SIOCGLAGG:
- LAGG_RLOCK(sc, &tracker);
- count = 0;
- SLIST_FOREACH(lp, &sc->sc_ports, lp_entries)
- count++;
- buflen = count * sizeof(struct lagg_reqport);
- LAGG_RUNLOCK(sc, &tracker);
-
+ LAGG_SLOCK(sc);
+ buflen = sc->sc_count * sizeof(struct lagg_reqport);
outbuf = malloc(buflen, M_TEMP, M_WAITOK | M_ZERO);
-
- LAGG_RLOCK(sc, &tracker);
ra->ra_proto = sc->sc_proto;
lagg_proto_request(sc, &ra->ra_psc);
count = 0;
@@ -1254,7 +1115,7 @@ lagg_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
buf += sizeof(rpbuf);
len -= sizeof(rpbuf);
}
- LAGG_RUNLOCK(sc, &tracker);
+ LAGG_SUNLOCK(sc);
ra->ra_ports = count;
ra->ra_size = count * sizeof(rpbuf);
error = copyout(outbuf, ra->ra_port, ra->ra_size);
@@ -1269,12 +1130,15 @@ lagg_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
break;
}
+ LAGG_XLOCK(sc);
LAGG_WLOCK(sc);
lagg_proto_detach(sc);
LAGG_UNLOCK_ASSERT(sc);
lagg_proto_attach(sc, ra->ra_proto);
+ LAGG_XUNLOCK(sc);
break;
case SIOCGLAGGOPTS:
+ LAGG_SLOCK(sc);
ro->ro_opts = sc->sc_opts;
if (sc->sc_proto == LAGG_PROTO_LACP) {
struct lacp_softc *lsc;
@@ -1298,6 +1162,7 @@ lagg_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
ro->ro_bkt = sc->sc_bkt;
ro->ro_flapping = sc->sc_flapping;
ro->ro_flowid_shift = sc->flowid_shift;
+ LAGG_SUNLOCK(sc);
break;
case SIOCSLAGGOPTS:
if (sc->sc_proto == LAGG_PROTO_ROUNDROBIN) {
@@ -1339,13 +1204,13 @@ lagg_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
break;
}
- LAGG_WLOCK(sc);
+ LAGG_XLOCK(sc);
if (valid == 0 ||
(lacp == 1 && sc->sc_proto != LAGG_PROTO_LACP)) {
/* Invalid combination of options specified. */
error = EINVAL;
- LAGG_WUNLOCK(sc);
+ LAGG_XUNLOCK(sc);
break; /* Return from SIOCSLAGGOPTS. */
}
/*
@@ -1400,18 +1265,18 @@ lagg_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
break;
}
}
- LAGG_WUNLOCK(sc);
+ LAGG_XUNLOCK(sc);
break;
case SIOCGLAGGFLAGS:
rf->rf_flags = 0;
- LAGG_RLOCK(sc, &tracker);
+ LAGG_SLOCK(sc);
if (sc->sc_flags & MBUF_HASHFLAG_L2)
rf->rf_flags |= LAGG_F_HASHL2;
if (sc->sc_flags & MBUF_HASHFLAG_L3)
rf->rf_flags |= LAGG_F_HASHL3;
if (sc->sc_flags & MBUF_HASHFLAG_L4)
rf->rf_flags |= LAGG_F_HASHL4;
- LAGG_RUNLOCK(sc, &tracker);
+ LAGG_SUNLOCK(sc);
break;
case SIOCSLAGGHASH:
error = priv_check(td, PRIV_NET_LAGG);
@@ -1421,7 +1286,7 @@ lagg_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
error = EINVAL;
break;
}
- LAGG_WLOCK(sc);
+ LAGG_XLOCK(sc);
sc->sc_flags = 0;
if (rf->rf_flags & LAGG_F_HASHL2)
sc->sc_flags |= MBUF_HASHFLAG_L2;
@@ -1429,7 +1294,7 @@ lagg_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
sc->sc_flags |= MBUF_HASHFLAG_L3;
if (rf->rf_flags & LAGG_F_HASHL4)
sc->sc_flags |= MBUF_HASHFLAG_L4;
- LAGG_WUNLOCK(sc);
+ LAGG_XUNLOCK(sc);
break;
case SIOCGLAGGPORT:
if (rp->rp_portname[0] == '\0' ||
@@ -1438,17 +1303,17 @@ lagg_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
break;
}
- LAGG_RLOCK(sc, &tracker);
+ LAGG_SLOCK(sc);
if ((lp = (struct lagg_port *)tpif->if_lagg) == NULL ||
lp->lp_softc != sc) {
error = ENOENT;
- LAGG_RUNLOCK(sc, &tracker);
+ LAGG_SUNLOCK(sc);
if_rele(tpif);
break;
}
lagg_port2req(lp, rp);
- LAGG_RUNLOCK(sc, &tracker);
+ LAGG_SUNLOCK(sc);
if_rele(tpif);
break;
case SIOCSLAGGPORT:
@@ -1480,9 +1345,9 @@ lagg_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
tpif->if_xname);
}
#endif
- LAGG_WLOCK(sc);
+ LAGG_XLOCK(sc);
error = lagg_port_create(sc, tpif);
- LAGG_WUNLOCK(sc);
+ LAGG_XUNLOCK(sc);
if_rele(tpif);
break;
case SIOCSLAGGDELPORT:
@@ -1495,26 +1360,25 @@ lagg_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
break;
}
- LAGG_WLOCK(sc);
+ LAGG_XLOCK(sc);
if ((lp = (struct lagg_port *)tpif->if_lagg) == NULL ||
lp->lp_softc != sc) {
error = ENOENT;
- LAGG_WUNLOCK(sc);
+ LAGG_XUNLOCK(sc);
if_rele(tpif);
break;
}
error = lagg_port_destroy(lp, 1);
- LAGG_WUNLOCK(sc);
+ LAGG_XUNLOCK(sc);
if_rele(tpif);
break;
case SIOCSIFFLAGS:
/* Set flags on ports too */
- LAGG_WLOCK(sc);
+ LAGG_XLOCK(sc);
SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
lagg_setflags(lp, 1);
}
- LAGG_WUNLOCK(sc);
if (!(ifp->if_flags & IFF_UP) &&
(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
@@ -1522,23 +1386,28 @@ lagg_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
* If interface is marked down and it is running,
* then stop and disable it.
*/
- LAGG_WLOCK(sc);
lagg_stop(sc);
- LAGG_WUNLOCK(sc);
+ LAGG_XUNLOCK(sc);
} else if ((ifp->if_flags & IFF_UP) &&
!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
/*
* If interface is marked up and it is stopped, then
* start it.
*/
+ LAGG_XUNLOCK(sc);
(*ifp->if_init)(sc);
- }
+ } else
+ LAGG_XUNLOCK(sc);
break;
case SIOCADDMULTI:
case SIOCDELMULTI:
LAGG_WLOCK(sc);
- error = lagg_ether_setmulti(sc);
+ SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
+ lagg_clrmulti(lp);
+ lagg_setmulti(lp);
+ }
LAGG_WUNLOCK(sc);
+ error = 0;
break;
case SIOCSIFMEDIA:
case SIOCGIFMEDIA:
@@ -1546,8 +1415,18 @@ lagg_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
break;
case SIOCSIFCAP:
+ LAGG_XLOCK(sc);
+ SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
+ if (lp->lp_ioctl != NULL)
+ (*lp->lp_ioctl)(lp->lp_ifp, cmd, data);
+ }
+ lagg_capabilities(sc);
+ LAGG_XUNLOCK(sc);
+ error = 0;
+ break;
+
case SIOCSIFMTU:
- /* Do not allow the MTU or caps to be directly changed */
+ /* Do not allow the MTU to be directly changed */
error = EINVAL;
break;
@@ -1559,67 +1438,69 @@ lagg_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
}
static int
-lagg_ether_setmulti(struct lagg_softc *sc)
+lagg_setmulti(struct lagg_port *lp)
{
- struct lagg_port *lp;
+ struct lagg_softc *sc = lp->lp_softc;
+ struct ifnet *ifp = lp->lp_ifp;
+ struct ifnet *scifp = sc->sc_ifp;
+ struct lagg_mc *mc;
+ struct ifmultiaddr *ifma;
+ int error;
LAGG_WLOCK_ASSERT(sc);
-
- SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
- /* First, remove any existing filter entries. */
- lagg_ether_cmdmulti(lp, 0);
- /* copy all addresses from the lagg interface to the port */
- lagg_ether_cmdmulti(lp, 1);
+ IF_ADDR_WLOCK(scifp);
+ TAILQ_FOREACH(ifma, &scifp->if_multiaddrs, ifma_link) {
+ if (ifma->ifma_addr->sa_family != AF_LINK)
+ continue;
+ mc = malloc(sizeof(struct lagg_mc), M_DEVBUF, M_NOWAIT);
+ if (mc == NULL) {
+ IF_ADDR_WUNLOCK(scifp);
+ return (ENOMEM);
+ }
+ bcopy(ifma->ifma_addr, &mc->mc_addr,
+ ifma->ifma_addr->sa_len);
+ mc->mc_addr.sdl_index = ifp->if_index;
+ mc->mc_ifma = NULL;
+ SLIST_INSERT_HEAD(&lp->lp_mc_head, mc, mc_entries);
+ }
+ IF_ADDR_WUNLOCK(scifp);
+ SLIST_FOREACH (mc, &lp->lp_mc_head, mc_entries) {
+ error = if_addmulti(ifp,
+ (struct sockaddr *)&mc->mc_addr, &mc->mc_ifma);
+ if (error)
+ return (error);
}
return (0);
}
static int
-lagg_ether_cmdmulti(struct lagg_port *lp, int set)
+lagg_clrmulti(struct lagg_port *lp)
{
- struct lagg_softc *sc = lp->lp_softc;
- struct ifnet *ifp = lp->lp_ifp;
- struct ifnet *scifp = sc->sc_ifp;
struct lagg_mc *mc;
- struct ifmultiaddr *ifma;
- int error;
- LAGG_WLOCK_ASSERT(sc);
-
- if (set) {
- IF_ADDR_WLOCK(scifp);
- TAILQ_FOREACH(ifma, &scifp->if_multiaddrs, ifma_link) {
- if (ifma->ifma_addr->sa_family != AF_LINK)
- continue;
- mc = malloc(sizeof(struct lagg_mc), M_DEVBUF, M_NOWAIT);
- if (mc == NULL) {
- IF_ADDR_WUNLOCK(scifp);
- return (ENOMEM);
- }
- bcopy(ifma->ifma_addr, &mc->mc_addr,
- ifma->ifma_addr->sa_len);
- mc->mc_addr.sdl_index = ifp->if_index;
- mc->mc_ifma = NULL;
- SLIST_INSERT_HEAD(&lp->lp_mc_head, mc, mc_entries);
- }
- IF_ADDR_WUNLOCK(scifp);
- SLIST_FOREACH (mc, &lp->lp_mc_head, mc_entries) {
- error = if_addmulti(ifp,
- (struct sockaddr *)&mc->mc_addr, &mc->mc_ifma);
- if (error)
- return (error);
- }
- } else {
- while ((mc = SLIST_FIRST(&lp->lp_mc_head)) != NULL) {
- SLIST_REMOVE(&lp->lp_mc_head, mc, lagg_mc, mc_entries);
- if (mc->mc_ifma && lp->lp_detaching == 0)
- if_delmulti_ifma(mc->mc_ifma);
- free(mc, M_DEVBUF);
- }
+ LAGG_WLOCK_ASSERT(lp->lp_softc);
+ while ((mc = SLIST_FIRST(&lp->lp_mc_head)) != NULL) {
+ SLIST_REMOVE(&lp->lp_mc_head, mc, lagg_mc, mc_entries);
+ if (mc->mc_ifma && lp->lp_detaching == 0)
+ if_delmulti_ifma(mc->mc_ifma);
+ free(mc, M_DEVBUF);
}
return (0);
}
+static int
+lagg_setcaps(struct lagg_port *lp, int cap)
+{
+ struct ifreq ifr;
+
+ if (lp->lp_ifp->if_capenable == cap)
+ return (0);
+ if (lp->lp_ioctl == NULL)
+ return (ENXIO);
+ ifr.ifr_reqcap = cap;
+ return ((*lp->lp_ioctl)(lp->lp_ifp, SIOCSIFCAP, (caddr_t)&ifr));
+}
+
/* Handle a ref counted flag that should be set on the lagg port as well */
static int
lagg_setflag(struct lagg_port *lp, int flag, int status,
@@ -1630,7 +1511,7 @@ lagg_setflag(struct lagg_port *lp, int flag, int status,
struct ifnet *ifp = lp->lp_ifp;
int error;
- LAGG_WLOCK_ASSERT(sc);
+ LAGG_XLOCK_ASSERT(sc);
status = status ? (scifp->if_flags & flag) : 0;
/* Now "status" contains the flag value or 0 */
@@ -1764,17 +1645,16 @@ lagg_media_status(struct ifnet *ifp, struct ifmediareq *imr)
{
struct lagg_softc *sc = (struct lagg_softc *)ifp->if_softc;
struct lagg_port *lp;
- struct rm_priotracker tracker;
imr->ifm_status = IFM_AVALID;
imr->ifm_active = IFM_ETHER | IFM_AUTO;
- LAGG_RLOCK(sc, &tracker);
+ LAGG_SLOCK(sc);
SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
if (LAGG_PORTACTIVE(lp))
imr->ifm_status |= IFM_ACTIVE;
}
- LAGG_RUNLOCK(sc, &tracker);
+ LAGG_SUNLOCK(sc);
}
static void
@@ -1784,6 +1664,8 @@ lagg_linkstate(struct lagg_softc *sc)
int new_link = LINK_STATE_DOWN;
uint64_t speed;
+ LAGG_XLOCK_ASSERT(sc);
+
/* Our link is considered up if at least one of our ports is active */
SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
if (lp->lp_ifp->if_link_state == LINK_STATE_UP) {
@@ -1824,10 +1706,10 @@ lagg_port_state(struct ifnet *ifp, int state)
if (sc == NULL)
return;
- LAGG_WLOCK(sc);
+ LAGG_XLOCK(sc);
lagg_linkstate(sc);
lagg_proto_linkstate(sc, lp);
- LAGG_WUNLOCK(sc);
+ LAGG_XUNLOCK(sc);
}
struct lagg_port *
@@ -1836,7 +1718,6 @@ lagg_link_active(struct lagg_softc *sc, struct lagg_port *lp)
struct lagg_port *lp_next, *rval = NULL;
// int new_link = LINK_STATE_DOWN;
- LAGG_RLOCK_ASSERT(sc);
/*
* Search a port which reports an active link state.
*/
@@ -2193,6 +2074,8 @@ lagg_lacp_lladdr(struct lagg_softc *sc)
{
struct lagg_port *lp;
+ LAGG_SXLOCK_ASSERT(sc);
+
/* purge all the lacp ports */
SLIST_FOREACH(lp, &sc->sc_ports, lp_entries)
lacp_port_destroy(lp);
OpenPOWER on IntegriCloud