summaryrefslogtreecommitdiffstats
path: root/sys/dev/netmap/if_re_netmap.h
diff options
context:
space:
mode:
Diffstat (limited to 'sys/dev/netmap/if_re_netmap.h')
-rw-r--r--sys/dev/netmap/if_re_netmap.h384
1 files changed, 172 insertions, 212 deletions
diff --git a/sys/dev/netmap/if_re_netmap.h b/sys/dev/netmap/if_re_netmap.h
index f0f1f19..10abe4f 100644
--- a/sys/dev/netmap/if_re_netmap.h
+++ b/sys/dev/netmap/if_re_netmap.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2011 Luigi Rizzo. All rights reserved.
+ * Copyright (C) 2011-2014 Luigi Rizzo. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -26,8 +26,9 @@
/*
* $FreeBSD$
*
- * netmap support for "re"
- * For details on netmap support please see ixgbe_netmap.h
+ * netmap support for: re
+ *
+ * For more details on netmap support please see ixgbe_netmap.h
*/
@@ -39,71 +40,24 @@
/*
- * wrapper to export locks to the generic code
- * We should not use the tx/rx locks
- */
-static void
-re_netmap_lock_wrapper(struct ifnet *ifp, int what, u_int queueid)
-{
- struct rl_softc *adapter = ifp->if_softc;
-
- switch (what) {
- case NETMAP_CORE_LOCK:
- RL_LOCK(adapter);
- break;
- case NETMAP_CORE_UNLOCK:
- RL_UNLOCK(adapter);
- break;
-
- case NETMAP_TX_LOCK:
- case NETMAP_RX_LOCK:
- case NETMAP_TX_UNLOCK:
- case NETMAP_RX_UNLOCK:
- D("invalid lock call %d, no tx/rx locks here", what);
- break;
- }
-}
-
-
-/*
- * support for netmap register/unregisted. We are already under core lock.
- * only called on the first register or the last unregister.
+ * Register/unregister. We are already under netmap lock.
*/
static int
-re_netmap_reg(struct ifnet *ifp, int onoff)
+re_netmap_reg(struct netmap_adapter *na, int onoff)
{
+ struct ifnet *ifp = na->ifp;
struct rl_softc *adapter = ifp->if_softc;
- struct netmap_adapter *na = NA(ifp);
- int error = 0;
-
- if (na == NULL)
- return EINVAL;
- /* Tell the stack that the interface is no longer active */
- ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
-
- re_stop(adapter);
+ RL_LOCK(adapter);
+ re_stop(adapter); /* also clears IFF_DRV_RUNNING */
if (onoff) {
- ifp->if_capenable |= IFCAP_NETMAP;
-
- /* save if_transmit to restore it later */
- na->if_transmit = ifp->if_transmit;
- ifp->if_transmit = netmap_start;
-
- re_init_locked(adapter);
-
- if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) == 0) {
- error = ENOMEM;
- goto fail;
- }
+ nm_set_native_flags(na);
} else {
-fail:
- /* restore if_transmit */
- ifp->if_transmit = na->if_transmit;
- ifp->if_capenable &= ~IFCAP_NETMAP;
- re_init_locked(adapter); /* also enables intr */
+ nm_clear_native_flags(na);
}
- return (error);
+ re_init_locked(adapter); /* also enables intr */
+ RL_UNLOCK(adapter);
+ return (ifp->if_drv_flags & IFF_DRV_RUNNING ? 0 : 1);
}
@@ -111,97 +65,102 @@ fail:
* Reconcile kernel and user view of the transmit ring.
*/
static int
-re_netmap_txsync(struct ifnet *ifp, u_int ring_nr, int do_lock)
+re_netmap_txsync(struct netmap_adapter *na, u_int ring_nr, int flags)
{
- struct rl_softc *sc = ifp->if_softc;
- struct rl_txdesc *txd = sc->rl_ldata.rl_tx_desc;
- struct netmap_adapter *na = NA(sc->rl_ifp);
+ struct ifnet *ifp = na->ifp;
struct netmap_kring *kring = &na->tx_rings[ring_nr];
struct netmap_ring *ring = kring->ring;
- int j, k, l, n, lim = kring->nkr_num_slots - 1;
-
- k = ring->cur;
- if (k > lim)
- return netmap_ring_reinit(kring);
+ u_int nm_i; /* index into the netmap ring */
+ u_int nic_i; /* index into the NIC ring */
+ u_int n;
+ u_int const lim = kring->nkr_num_slots - 1;
+ u_int const head = kring->rhead;
- if (do_lock)
- RL_LOCK(sc);
+ /* device-specific */
+ struct rl_softc *sc = ifp->if_softc;
+ struct rl_txdesc *txd = sc->rl_ldata.rl_tx_desc;
- /* Sync the TX descriptor list */
bus_dmamap_sync(sc->rl_ldata.rl_tx_list_tag,
- sc->rl_ldata.rl_tx_list_map,
- BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
-
- /* XXX move after the transmissions */
- /* record completed transmissions */
- for (n = 0, l = sc->rl_ldata.rl_tx_considx;
- l != sc->rl_ldata.rl_tx_prodidx;
- n++, l = RL_TX_DESC_NXT(sc, l)) {
- uint32_t cmdstat =
- le32toh(sc->rl_ldata.rl_tx_list[l].rl_cmdstat);
- if (cmdstat & RL_TDESC_STAT_OWN)
- break;
- }
- if (n > 0) {
- sc->rl_ldata.rl_tx_considx = l;
- sc->rl_ldata.rl_tx_free += n;
- kring->nr_hwavail += n;
- }
+ sc->rl_ldata.rl_tx_list_map,
+ BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); // XXX extra postwrite ?
- /* update avail to what the kernel knows */
- ring->avail = kring->nr_hwavail;
+ /*
+ * First part: process new packets to send.
+ */
+ nm_i = kring->nr_hwcur;
+ if (nm_i != head) { /* we have new packets to send */
+ nic_i = sc->rl_ldata.rl_tx_prodidx;
+ // XXX or netmap_idx_k2n(kring, nm_i);
+
+ for (n = 0; nm_i != head; n++) {
+ struct netmap_slot *slot = &ring->slot[nm_i];
+ u_int len = slot->len;
+ uint64_t paddr;
+ void *addr = PNMB(slot, &paddr);
- j = kring->nr_hwcur;
- if (j != k) { /* we have new packets to send */
- l = sc->rl_ldata.rl_tx_prodidx;
- for (n = 0; j != k; n++) {
- struct netmap_slot *slot = &ring->slot[j];
- struct rl_desc *desc = &sc->rl_ldata.rl_tx_list[l];
+ /* device-specific */
+ struct rl_desc *desc = &sc->rl_ldata.rl_tx_list[nic_i];
int cmd = slot->len | RL_TDESC_CMD_EOF |
RL_TDESC_CMD_OWN | RL_TDESC_CMD_SOF ;
- uint64_t paddr;
- void *addr = PNMB(slot, &paddr);
- int len = slot->len;
- if (addr == netmap_buffer_base || len > NETMAP_BUF_SIZE) {
- if (do_lock)
- RL_UNLOCK(sc);
- // XXX what about prodidx ?
- return netmap_ring_reinit(kring);
- }
+ NM_CHECK_ADDR_LEN(addr, len);
- if (l == lim) /* mark end of ring */
+ if (nic_i == lim) /* mark end of ring */
cmd |= RL_TDESC_CMD_EOR;
if (slot->flags & NS_BUF_CHANGED) {
+ /* buffer has changed, reload map */
desc->rl_bufaddr_lo = htole32(RL_ADDR_LO(paddr));
desc->rl_bufaddr_hi = htole32(RL_ADDR_HI(paddr));
- /* buffer has changed, unload and reload map */
netmap_reload_map(sc->rl_ldata.rl_tx_mtag,
- txd[l].tx_dmamap, addr);
- slot->flags &= ~NS_BUF_CHANGED;
+ txd[nic_i].tx_dmamap, addr);
}
- slot->flags &= ~NS_REPORT;
+ slot->flags &= ~(NS_REPORT | NS_BUF_CHANGED);
+
+ /* Fill the slot in the NIC ring. */
desc->rl_cmdstat = htole32(cmd);
+
+ /* make sure changes to the buffer are synced */
bus_dmamap_sync(sc->rl_ldata.rl_tx_mtag,
- txd[l].tx_dmamap, BUS_DMASYNC_PREWRITE);
- j = (j == lim) ? 0 : j + 1;
- l = (l == lim) ? 0 : l + 1;
+ txd[nic_i].tx_dmamap,
+ BUS_DMASYNC_PREWRITE);
+
+ nm_i = nm_next(nm_i, lim);
+ nic_i = nm_next(nic_i, lim);
}
- sc->rl_ldata.rl_tx_prodidx = l;
- kring->nr_hwcur = k; /* the saved ring->cur */
- ring->avail -= n; // XXX see others
- kring->nr_hwavail = ring->avail;
+ sc->rl_ldata.rl_tx_prodidx = nic_i;
+ kring->nr_hwcur = head;
+ /* synchronize the NIC ring */
bus_dmamap_sync(sc->rl_ldata.rl_tx_list_tag,
- sc->rl_ldata.rl_tx_list_map,
- BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
+ sc->rl_ldata.rl_tx_list_map,
+ BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
/* start ? */
CSR_WRITE_1(sc, sc->rl_txstart, RL_TXSTART_START);
}
- if (do_lock)
- RL_UNLOCK(sc);
+
+ /*
+ * Second part: reclaim buffers for completed transmissions.
+ */
+ if (flags & NAF_FORCE_RECLAIM || nm_kr_txempty(kring)) {
+ nic_i = sc->rl_ldata.rl_tx_considx;
+ for (n = 0; nic_i != sc->rl_ldata.rl_tx_prodidx;
+ n++, nic_i = RL_TX_DESC_NXT(sc, nic_i)) {
+ uint32_t cmdstat =
+ le32toh(sc->rl_ldata.rl_tx_list[nic_i].rl_cmdstat);
+ if (cmdstat & RL_TDESC_STAT_OWN)
+ break;
+ }
+ if (n > 0) {
+ sc->rl_ldata.rl_tx_considx = nic_i;
+ sc->rl_ldata.rl_tx_free += n;
+ kring->nr_hwtail = nm_prev(netmap_idx_n2k(kring, nic_i), lim);
+ }
+ }
+
+ nm_txsync_finalize(kring);
+
return 0;
}
@@ -210,44 +169,46 @@ re_netmap_txsync(struct ifnet *ifp, u_int ring_nr, int do_lock)
* Reconcile kernel and user view of the receive ring.
*/
static int
-re_netmap_rxsync(struct ifnet *ifp, u_int ring_nr, int do_lock)
+re_netmap_rxsync(struct netmap_adapter *na, u_int ring_nr, int flags)
{
- struct rl_softc *sc = ifp->if_softc;
- struct rl_rxdesc *rxd = sc->rl_ldata.rl_rx_desc;
- struct netmap_adapter *na = NA(sc->rl_ifp);
+ struct ifnet *ifp = na->ifp;
struct netmap_kring *kring = &na->rx_rings[ring_nr];
struct netmap_ring *ring = kring->ring;
- int j, l, n, lim = kring->nkr_num_slots - 1;
- int force_update = do_lock || kring->nr_kflags & NKR_PENDINTR;
- u_int k = ring->cur, resvd = ring->reserved;
+ u_int nm_i; /* index into the netmap ring */
+ u_int nic_i; /* index into the NIC ring */
+ u_int n;
+ u_int const lim = kring->nkr_num_slots - 1;
+ u_int const head = nm_rxsync_prologue(kring);
+ int force_update = (flags & NAF_FORCE_READ) || kring->nr_kflags & NKR_PENDINTR;
+
+ /* device-specific */
+ struct rl_softc *sc = ifp->if_softc;
+ struct rl_rxdesc *rxd = sc->rl_ldata.rl_rx_desc;
- k = ring->cur;
- if (k > lim)
+ if (head > lim)
return netmap_ring_reinit(kring);
- if (do_lock)
- RL_LOCK(sc);
- /* XXX check sync modes */
bus_dmamap_sync(sc->rl_ldata.rl_rx_list_tag,
- sc->rl_ldata.rl_rx_list_map,
- BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
+ sc->rl_ldata.rl_rx_list_map,
+ BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
/*
- * Import newly received packets into the netmap ring.
- * j is an index in the netmap ring, l in the NIC ring.
+ * First part: import newly received packets.
*
- * The device uses all the buffers in the ring, so we need
+ * This device uses all the buffers in the ring, so we need
* another termination condition in addition to RL_RDESC_STAT_OWN
- * cleared (all buffers could have it cleared. The easiest one
- * is to limit the amount of data reported up to 'lim'
+ * cleared (all buffers could have it cleared). The easiest one
+ * is to stop right before nm_hwcur.
*/
- l = sc->rl_ldata.rl_rx_prodidx; /* next pkt to check */
- j = netmap_idx_n2k(kring, l); /* the kring index */
if (netmap_no_pendintr || force_update) {
uint16_t slot_flags = kring->nkr_slot_flags;
+ uint32_t stop_i = nm_prev(kring->nr_hwcur, lim);
- for (n = kring->nr_hwavail; n < lim ; n++) {
- struct rl_desc *cur_rx = &sc->rl_ldata.rl_rx_list[l];
+ nic_i = sc->rl_ldata.rl_rx_prodidx; /* next pkt to check */
+ nm_i = netmap_idx_n2k(kring, nic_i);
+
+ while (nm_i != stop_i) {
+ struct rl_desc *cur_rx = &sc->rl_ldata.rl_rx_list[nic_i];
uint32_t rxstat = le32toh(cur_rx->rl_cmdstat);
uint32_t total_len;
@@ -256,78 +217,72 @@ re_netmap_rxsync(struct ifnet *ifp, u_int ring_nr, int do_lock)
total_len = rxstat & sc->rl_rxlenmask;
/* XXX subtract crc */
total_len = (total_len < 4) ? 0 : total_len - 4;
- kring->ring->slot[j].len = total_len;
- kring->ring->slot[j].flags = slot_flags;
+ ring->slot[nm_i].len = total_len;
+ ring->slot[nm_i].flags = slot_flags;
/* sync was in re_newbuf() */
bus_dmamap_sync(sc->rl_ldata.rl_rx_mtag,
- rxd[l].rx_dmamap, BUS_DMASYNC_POSTREAD);
- j = (j == lim) ? 0 : j + 1;
- l = (l == lim) ? 0 : l + 1;
- }
- if (n != kring->nr_hwavail) {
- sc->rl_ldata.rl_rx_prodidx = l;
- sc->rl_ifp->if_ipackets += n - kring->nr_hwavail;
- kring->nr_hwavail = n;
+ rxd[nic_i].rx_dmamap, BUS_DMASYNC_POSTREAD);
+ // sc->rl_ifp->if_ipackets++;
+ nm_i = nm_next(nm_i, lim);
+ nic_i = nm_next(nic_i, lim);
}
+ sc->rl_ldata.rl_rx_prodidx = nic_i;
+ kring->nr_hwtail = nm_i;
kring->nr_kflags &= ~NKR_PENDINTR;
}
- /* skip past packets that userspace has released */
- j = kring->nr_hwcur;
- if (resvd > 0) {
- if (resvd + ring->avail >= lim + 1) {
- D("XXX invalid reserve/avail %d %d", resvd, ring->avail);
- ring->reserved = resvd = 0; // XXX panic...
- }
- k = (k >= resvd) ? k - resvd : k + lim + 1 - resvd;
- }
- if (j != k) { /* userspace has released some packets. */
- l = netmap_idx_k2n(kring, j); /* the NIC index */
- for (n = 0; j != k; n++) {
- struct netmap_slot *slot = ring->slot + j;
- struct rl_desc *desc = &sc->rl_ldata.rl_rx_list[l];
- int cmd = NETMAP_BUF_SIZE | RL_RDESC_CMD_OWN;
+ /*
+ * Second part: skip past packets that userspace has released.
+ */
+ nm_i = kring->nr_hwcur;
+ if (nm_i != head) {
+ nic_i = netmap_idx_k2n(kring, nm_i);
+ for (n = 0; nm_i != head; n++) {
+ struct netmap_slot *slot = &ring->slot[nm_i];
uint64_t paddr;
void *addr = PNMB(slot, &paddr);
- if (addr == netmap_buffer_base) { /* bad buf */
- if (do_lock)
- RL_UNLOCK(sc);
- return netmap_ring_reinit(kring);
- }
+ struct rl_desc *desc = &sc->rl_ldata.rl_rx_list[nic_i];
+ int cmd = NETMAP_BUF_SIZE | RL_RDESC_CMD_OWN;
- if (l == lim) /* mark end of ring */
+ if (addr == netmap_buffer_base) /* bad buf */
+ goto ring_reset;
+
+ if (nic_i == lim) /* mark end of ring */
cmd |= RL_RDESC_CMD_EOR;
- slot->flags &= ~NS_REPORT;
if (slot->flags & NS_BUF_CHANGED) {
- netmap_reload_map(sc->rl_ldata.rl_rx_mtag,
- rxd[l].rx_dmamap, addr);
+ /* buffer has changed, reload map */
desc->rl_bufaddr_lo = htole32(RL_ADDR_LO(paddr));
desc->rl_bufaddr_hi = htole32(RL_ADDR_HI(paddr));
+ netmap_reload_map(sc->rl_ldata.rl_rx_mtag,
+ rxd[nic_i].rx_dmamap, addr);
slot->flags &= ~NS_BUF_CHANGED;
}
desc->rl_cmdstat = htole32(cmd);
bus_dmamap_sync(sc->rl_ldata.rl_rx_mtag,
- rxd[l].rx_dmamap, BUS_DMASYNC_PREREAD);
- j = (j == lim) ? 0 : j + 1;
- l = (l == lim) ? 0 : l + 1;
+ rxd[nic_i].rx_dmamap,
+ BUS_DMASYNC_PREREAD);
+ nm_i = nm_next(nm_i, lim);
+ nic_i = nm_next(nic_i, lim);
}
- kring->nr_hwavail -= n;
- kring->nr_hwcur = k;
- /* Flush the RX DMA ring */
+ kring->nr_hwcur = head;
bus_dmamap_sync(sc->rl_ldata.rl_rx_list_tag,
sc->rl_ldata.rl_rx_list_map,
- BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
+ BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
}
- /* tell userspace that there are new packets */
- ring->avail = kring->nr_hwavail - resvd;
- if (do_lock)
- RL_UNLOCK(sc);
+
+ /* tell userspace that there might be new packets */
+ nm_rxsync_finalize(kring);
+
return 0;
+
+ring_reset:
+ return netmap_ring_reinit(kring);
}
+
/*
* Additional routines to init the tx and rx rings.
* In other drivers we do that inline in the main code.
@@ -339,11 +294,16 @@ re_netmap_tx_init(struct rl_softc *sc)
struct rl_desc *desc;
int i, n;
struct netmap_adapter *na = NA(sc->rl_ifp);
- struct netmap_slot *slot = netmap_reset(na, NR_TX, 0, 0);
+ struct netmap_slot *slot;
+ if (!na || !(na->na_flags & NAF_NATIVE_ON)) {
+ return;
+ }
+
+ slot = netmap_reset(na, NR_TX, 0, 0);
/* slot is NULL if we are not in netmap mode */
if (!slot)
- return;
+ return; // XXX cannot happen
/* in netmap mode, overwrite addresses and maps */
txd = sc->rl_ldata.rl_tx_desc;
desc = sc->rl_ldata.rl_tx_list;
@@ -369,36 +329,35 @@ re_netmap_rx_init(struct rl_softc *sc)
struct netmap_slot *slot = netmap_reset(na, NR_RX, 0, 0);
struct rl_desc *desc = sc->rl_ldata.rl_rx_list;
uint32_t cmdstat;
- int i, n, max_avail;
+ uint32_t nic_i, max_avail;
+ uint32_t const n = sc->rl_ldata.rl_rx_desc_cnt;
if (!slot)
return;
- n = sc->rl_ldata.rl_rx_desc_cnt;
/*
- * Userspace owned hwavail packets before the reset,
- * so the NIC that last hwavail descriptors of the ring
- * are still owned by the driver (and keep one empty).
+ * Do not release the slots owned by userspace,
+ * and also keep one empty.
*/
- max_avail = n - 1 - na->rx_rings[0].nr_hwavail;
- for (i = 0; i < n; i++) {
+ max_avail = n - 1 - nm_kr_rxspace(&na->rx_rings[0]);
+ for (nic_i = 0; nic_i < n; nic_i++) {
void *addr;
uint64_t paddr;
- int l = netmap_idx_n2k(&na->rx_rings[0], i);
+ uint32_t nm_i = netmap_idx_n2k(&na->rx_rings[0], nic_i);
- addr = PNMB(slot + l, &paddr);
+ addr = PNMB(slot + nm_i, &paddr);
netmap_reload_map(sc->rl_ldata.rl_rx_mtag,
- sc->rl_ldata.rl_rx_desc[i].rx_dmamap, addr);
+ sc->rl_ldata.rl_rx_desc[nic_i].rx_dmamap, addr);
bus_dmamap_sync(sc->rl_ldata.rl_rx_mtag,
- sc->rl_ldata.rl_rx_desc[i].rx_dmamap, BUS_DMASYNC_PREREAD);
- desc[i].rl_bufaddr_lo = htole32(RL_ADDR_LO(paddr));
- desc[i].rl_bufaddr_hi = htole32(RL_ADDR_HI(paddr));
+ sc->rl_ldata.rl_rx_desc[nic_i].rx_dmamap, BUS_DMASYNC_PREREAD);
+ desc[nic_i].rl_bufaddr_lo = htole32(RL_ADDR_LO(paddr));
+ desc[nic_i].rl_bufaddr_hi = htole32(RL_ADDR_HI(paddr));
cmdstat = NETMAP_BUF_SIZE;
- if (i == n - 1) /* mark the end of ring */
+ if (nic_i == n - 1) /* mark the end of ring */
cmdstat |= RL_RDESC_CMD_EOR;
- if (i < max_avail)
+ if (nic_i < max_avail)
cmdstat |= RL_RDESC_CMD_OWN;
- desc[i].rl_cmdstat = htole32(cmdstat);
+ desc[nic_i].rl_cmdstat = htole32(cmdstat);
}
}
@@ -411,13 +370,14 @@ re_netmap_attach(struct rl_softc *sc)
bzero(&na, sizeof(na));
na.ifp = sc->rl_ifp;
- na.separate_locks = 0;
+ na.na_flags = NAF_BDG_MAYSLEEP;
na.num_tx_desc = sc->rl_ldata.rl_tx_desc_cnt;
na.num_rx_desc = sc->rl_ldata.rl_rx_desc_cnt;
na.nm_txsync = re_netmap_txsync;
na.nm_rxsync = re_netmap_rxsync;
- na.nm_lock = re_netmap_lock_wrapper;
na.nm_register = re_netmap_reg;
- netmap_attach(&na, 1);
+ na.num_tx_rings = na.num_rx_rings = 1;
+ netmap_attach(&na);
}
+
/* end of file */
OpenPOWER on IntegriCloud