summaryrefslogtreecommitdiffstats
path: root/sys/dev/txp
diff options
context:
space:
mode:
authoryongari <yongari@FreeBSD.org>2009-03-12 01:14:47 +0000
committeryongari <yongari@FreeBSD.org>2009-03-12 01:14:47 +0000
commitd19de5111b0e52da5deebff0119e64530d96578b (patch)
tree83dcc55df766d6b623e30e8b019fce44e0f0548d /sys/dev/txp
parentec17f9599d00b9f034d9c7a8e892dd6a5ef5c56b (diff)
downloadFreeBSD-src-d19de5111b0e52da5deebff0119e64530d96578b.zip
FreeBSD-src-d19de5111b0e52da5deebff0119e64530d96578b.tar.gz
bus_dma(9) conversion and make txp(4) work on all architectures.
o Header file cleanup. o bus_dma(9) conversion. - Removed all consumers of vtophys(9) and converted to use bus_dma(9). - Typhoon2 functional specification says the controller supports 64bit DMA addressing. However all Typhoon controllers are known to lack of DAC support so 64bit DMA support was disabled. - The hardware can't handle more 16 fragmented Tx DMA segments so teach txp(4) to collapse these segments to be less than 16. - Added Rx buffer alignment requirements(4 bytes alignment) and implemented fixup code to align receive frame. Previously txp(4) always copied Rx frame to align it on 2 byte boundary but its copy overhead is much higher than unaligned access on i386/amd64. Alignment fixup code is now applied only for strict-alignment architectures. With this change i386 and amd64 will get instant Rx performance boost. Typhoon2 datasheet mentions a command that pads arbitrary bytes in Rx buffer but that command does not work. - Nuked pointer trick in descriptor ring. This does not work on sparc64 and replaced it with bcopy. Alternatively txp(4) can embed a 32 bits index value into the descriptor and compute real buffer address but it may make code complicated. - Added endianness support code in various Tx/Rx/command/response descriptor access. With this change txp(4) should work on all architectures. o Added comments for known firmware bugs(Tx checksum offloading, TSO, VLAN stripping and Rx buffer padding control). o Prefer faster memory space register access to I/O space access. Added fall-back mechanism to use alternative I/O space access. The hardware supports both memory and I/O mapped access. Users can still force to use old I/O space access by setting hw.txp.prefer_iomap tunable to 1 in /boot/loader.conf. o Added experimental suspend/resume methods. o Nuke error prone Rx buffer handling code and implemented local buffer management with TAILQ. Be definition the controller can't pass the last received frame to host if no Rx free buffers are available to use as head and tail pointer of Rx descriptor ring can't have the same value. In that case the Rx buffer pointer in Rx buffer ring still holds a valid buffer and txp_rxbuf_reclaim() can't fill Rx buffers as the first buffer is still valid. Instead of relying on the value of Rx buffer ring, introduce local buffer management code to handle empty buffer situation. This should fix a long standing bug which completely hangs the controller under high network load. I could easily trigger the issue by sending 64 bytes UDP frames with netperf. I have no idea how this bugs was not fixed for a long time. o Converted ithread interrupt handler to filter based one. o Rearranged txp_detach routine such that it's now used for general clean-up routine. o Show sleep image version on device attach time. This will help to know what action should be taken depending on sleep image version. The version information in datasheet was wrong for newer NV images so I followed Linux which seems to correctly extract version numbers from response descriptors. o Firmware image is no longer downloaded in device attach time. Now it is reloaded whenever if_init is invoked. This is to ensure correct operation of hardware when something goes wrong. Previously the controller always run without regard to running state of firmware. This change will add additional controller initialization time but it give more robust operation as txp(4) always start off from a known state. The controller is put into sleep state until administrator explicitly up the interface. o As firmware is loaded in if_init handler, it's now possible to implement real watchdog timeout handler. When watchdog timer is expired, full-reset the controller and initialize the hardware again as most other drivers do. While I'm here use our own timer for watchdog instead of using if_watchdog/if_timer interface. o Instead of masking specific interrupts with TXP_IMR register, program TXP_IER register with the interrupts to be raised and use TXP_IMR to toggle interrupt generation. o Implemented txp_wait() to wait a specific state of a controller. o Separate boot related code from txp_download_fw() and name it txp_boot() to handle boot process. o Added bus_barrier(9) to host to ARM communication. o Added endianness to all typhoon command processing. The ARM93C always expects little-endian format of command/data. o Removed __STRICT_ALIGNMENT which is not valid on FreeBSD. __NO_STRICT_ALIGNMENT is provided for that purpose on FreeBSD. Previously __STRICT_ALIGNMENT was unconditionally defined for all architectures. o Rewrote SIOCSIFCAP ioctl handler such that each capability can be controlled by ifconfig(8). Note, disabling VLAN hardware tagging has no effect due to the bug of firmware. o Don't send TXP_CMD_CLEAR_STATISTICS to clear MAC statistics in txp_tick(). The command is not atomic. Instead, just read the statistics and reflect saved statistics to the statistics. dev.txp.%d.stats sysctl node provides detailed MAC statistics. This also reduces a lot of waste of CPU cycles as processing a command ring takes a very long time on ARM93C. Note, Rx multicast and broadcast statistics does not seem to right. It might be another bug of firmware. o Implemented link state change handling in txp_tick(). Now sending packets is allowed only after establishing a valid link. Also invoke link state change notification whenever its state is changed so pseudo drivers like lagg(4) that relies on link state can work with failover or link aggregation without hacks. if_baudrate is updated to resolved speed so SNMP agents can get correct bandwidth parameters. o Overhauled Tx routine such that it now honors number of allowable DMA segments and checks for 4 free descriptors before trying to send a frame. A frame may require 4 descriptors(1 frame descriptor, 1 or more frame descriptors, 1 TSO option descriptor, one free descriptor to prevent descriptor wrap-around) at least so it's necessary to check available free descriptors prior to setting up DMA operation. o Added a sysctl variable dev.txp.%d.process_limit to control how many received frames should be served in Rx handler. Valid ranges are 16 to 128(default 64) in unit of frames. o Added ALTQ(4) support. o Added missing IFCAP_VLAN_HWCSUM as txp(4) can offload checksum calculation as well as VLAN tag insertion/stripping. o Fixed media header length for VLAN. o Don't set if_mtu in device attach, it's already set in ether_ifattach(). o Enabled MWI. o Fixed module unload panic when bpf listeners are active. o Rearranged ethernet address programming logic such that it works on strict-alignment architectures. o Removed unused member variables in softc. o Added support for WOL. o Removed now unused TXP_PCI_LOMEM/TXP_PCI_LOIO. o Added wakeup command TXP_BOOTCMD_WAKEUP definition. o Added a new firmware version query command, TXP_CMD_READ_VERSION. o Removed volatile keyword in softc as bus_dmamap_sync(9) should take care of this. o Removed embedded union trick of a structure used to to access a pointer on LP64 systems. o Added a few TSO related definitions for struct txp_tcpseg_desc. However TSO is not used at all due to the limitation of hardware. o Redefined PKT_MAX_PKTLEN to theoretical maximum size of a frame. o Switched from bus_space_{read|write}_4 to bus_{read|write}_4. o Added a new macro TXP_DESC_INC to compute next descriptor index. Tested by: don.nasco <> gmail dot com
Diffstat (limited to 'sys/dev/txp')
-rw-r--r--sys/dev/txp/if_txp.c2685
-rw-r--r--sys/dev/txp/if_txpreg.h371
2 files changed, 2155 insertions, 901 deletions
diff --git a/sys/dev/txp/if_txp.c b/sys/dev/txp/if_txp.c
index 6509874..b0e989d 100644
--- a/sys/dev/txp/if_txp.c
+++ b/sys/dev/txp/if_txp.c
@@ -42,53 +42,82 @@ __FBSDID("$FreeBSD$");
*/
#include <sys/param.h>
#include <sys/systm.h>
-#include <sys/sockio.h>
-#include <sys/mbuf.h>
-#include <sys/malloc.h>
+#include <sys/bus.h>
+#include <sys/endian.h>
#include <sys/kernel.h>
+#include <sys/lock.h>
+#include <sys/malloc.h>
+#include <sys/mbuf.h>
#include <sys/module.h>
+#include <sys/mutex.h>
+#include <sys/queue.h>
+#include <sys/rman.h>
#include <sys/socket.h>
+#include <sys/sockio.h>
+#include <sys/sysctl.h>
+#include <sys/taskqueue.h>
+#include <net/bpf.h>
#include <net/if.h>
#include <net/if_arp.h>
#include <net/ethernet.h>
#include <net/if_dl.h>
+#include <net/if_media.h>
#include <net/if_types.h>
#include <net/if_vlan_var.h>
#include <netinet/in.h>
#include <netinet/in_systm.h>
-#include <netinet/in_var.h>
#include <netinet/ip.h>
-#include <netinet/if_ether.h>
-#include <machine/in_cksum.h>
-
-#include <net/if_media.h>
-
-#include <net/bpf.h>
-
-#include <vm/vm.h> /* for vtophys */
-#include <vm/pmap.h> /* for vtophys */
-#include <machine/bus.h>
-#include <machine/resource.h>
-#include <sys/bus.h>
-#include <sys/rman.h>
#include <dev/mii/mii.h>
-#include <dev/mii/miivar.h>
+
#include <dev/pci/pcireg.h>
#include <dev/pci/pcivar.h>
-#define TXP_USEIOSPACE
-#define __STRICT_ALIGNMENT
+#include <machine/bus.h>
+#include <machine/in_cksum.h>
#include <dev/txp/if_txpreg.h>
#include <dev/txp/3c990img.h>
-#ifndef lint
-static const char rcsid[] =
- "$FreeBSD$";
-#endif
+MODULE_DEPEND(txp, pci, 1, 1, 1);
+MODULE_DEPEND(txp, ether, 1, 1, 1);
+
+/*
+ * XXX Known Typhoon firmware issues.
+ *
+ * 1. It seems that firmware has Tx TCP/UDP checksum offloading bug.
+ * The firmware hangs when it's told to compute TCP/UDP checksum.
+ * I'm not sure whether the firmware requires special alignment to
+ * do checksum offloading but datasheet says nothing about that.
+ * 2. Datasheet says nothing for maximum number of fragmented
+ * descriptors supported. Experimentation shows up to 16 fragment
+ * descriptors are supported in the firmware. For TSO case, upper
+ * stack can send 64KB sized IP datagram plus link header size(
+ * ethernet header + VLAN tag) frame but controller can handle up
+ * to 64KB frame given that PAGE_SIZE is 4KB(i.e. 16 * PAGE_SIZE).
+ * Because frames that need TSO operation of hardware can be
+ * larger than 64KB I disabled TSO capability. TSO operation for
+ * less than or equal to 16 fragment descriptors works without
+ * problems, though.
+ * 3. VLAN hardware tag stripping is always enabled in the firmware
+ * even if it's explicitly told to not strip the tag. It's
+ * possible to add the tag back in Rx handler if VLAN hardware
+ * tag is not active but I didn't try that as it would be
+ * layering violation.
+ * 4. TXP_CMD_RECV_BUFFER_CONTROL does not work as expected in
+ * datasheet such that driver should handle the alignment
+ * restriction by copying received frame to align the frame on
+ * 32bit boundary on strict-alignment architectures. This adds a
+ * lot of CPU burden and it effectively reduce Rx performance on
+ * strict-alignment architectures(e.g. sparc64, arm, mips and ia64).
+ *
+ * Unfortunately it seems that 3Com have no longer interests in
+ * releasing fixed firmware so we may have to live with these bugs.
+ */
+
+#define TXP_CSUM_FEATURES (CSUM_IP)
/*
* Various supported device vendors/types and their names.
@@ -112,25 +141,36 @@ static struct txp_type txp_devs[] = {
static int txp_probe(device_t);
static int txp_attach(device_t);
static int txp_detach(device_t);
-static void txp_intr(void *);
-static void txp_tick(void *);
static int txp_shutdown(device_t);
+static int txp_suspend(device_t);
+static int txp_resume(device_t);
+static int txp_intr(void *);
+static void txp_int_task(void *, int);
+static void txp_tick(void *);
static int txp_ioctl(struct ifnet *, u_long, caddr_t);
static void txp_start(struct ifnet *);
static void txp_start_locked(struct ifnet *);
+static int txp_encap(struct txp_softc *, struct txp_tx_ring *, struct mbuf **);
static void txp_stop(struct txp_softc *);
static void txp_init(void *);
static void txp_init_locked(struct txp_softc *);
-static void txp_watchdog(struct ifnet *);
+static void txp_watchdog(struct txp_softc *);
-static void txp_release_resources(struct txp_softc *);
-static int txp_chip_init(struct txp_softc *);
-static int txp_reset_adapter(struct txp_softc *);
+static int txp_reset(struct txp_softc *);
+static int txp_boot(struct txp_softc *, uint32_t);
+static int txp_sleep(struct txp_softc *, int);
+static int txp_wait(struct txp_softc *, uint32_t);
static int txp_download_fw(struct txp_softc *);
static int txp_download_fw_wait(struct txp_softc *);
static int txp_download_fw_section(struct txp_softc *,
struct txp_fw_section_header *, int);
static int txp_alloc_rings(struct txp_softc *);
+static void txp_init_rings(struct txp_softc *);
+static int txp_dma_alloc(struct txp_softc *, char *, bus_dma_tag_t *,
+ bus_size_t, bus_size_t, bus_dmamap_t *, void **, bus_size_t, bus_addr_t *);
+static void txp_dma_free(struct txp_softc *, bus_dma_tag_t *, bus_dmamap_t *,
+ void **);
+static void txp_free_rings(struct txp_softc *);
static int txp_rxring_fill(struct txp_softc *);
static void txp_rxring_empty(struct txp_softc *);
static void txp_set_filter(struct txp_softc *);
@@ -138,14 +178,14 @@ static void txp_set_filter(struct txp_softc *);
static int txp_cmd_desc_numfree(struct txp_softc *);
static int txp_command(struct txp_softc *, uint16_t, uint16_t, uint32_t,
uint32_t, uint16_t *, uint32_t *, uint32_t *, int);
-static int txp_command2(struct txp_softc *, uint16_t, uint16_t,
+static int txp_ext_command(struct txp_softc *, uint16_t, uint16_t,
uint32_t, uint32_t, struct txp_ext_desc *, uint8_t,
struct txp_rsp_desc **, int);
-static int txp_response(struct txp_softc *, uint32_t, uint16_t, uint16_t,
+static int txp_response(struct txp_softc *, uint16_t, uint16_t,
struct txp_rsp_desc **);
static void txp_rsp_fixup(struct txp_softc *, struct txp_rsp_desc *,
struct txp_rsp_desc *);
-static void txp_capabilities(struct txp_softc *);
+static int txp_set_capabilities(struct txp_softc *);
static void txp_ifmedia_sts(struct ifnet *, struct ifmediareq *);
static int txp_ifmedia_upd(struct ifnet *);
@@ -154,15 +194,18 @@ static void txp_show_descriptor(void *);
#endif
static void txp_tx_reclaim(struct txp_softc *, struct txp_tx_ring *);
static void txp_rxbuf_reclaim(struct txp_softc *);
-static void txp_rx_reclaim(struct txp_softc *, struct txp_rx_ring *);
-
-#ifdef TXP_USEIOSPACE
-#define TXP_RES SYS_RES_IOPORT
-#define TXP_RID TXP_PCI_LOIO
-#else
-#define TXP_RES SYS_RES_MEMORY
-#define TXP_RID TXP_PCI_LOMEM
+#ifndef __NO_STRICT_ALIGNMENT
+static __inline void txp_fixup_rx(struct mbuf *);
#endif
+static int txp_rx_reclaim(struct txp_softc *, struct txp_rx_ring *, int);
+static void txp_stats_save(struct txp_softc *);
+static void txp_stats_update(struct txp_softc *, struct txp_rsp_desc *);
+static void txp_sysctl_node(struct txp_softc *);
+static int sysctl_int_range(SYSCTL_HANDLER_ARGS, int, int);
+static int sysctl_hw_txp_proc_limit(SYSCTL_HANDLER_ARGS);
+
+static int prefer_iomap = 0;
+TUNABLE_INT("hw.txp.prefer_iomap", &prefer_iomap);
static device_method_t txp_methods[] = {
/* Device interface */
@@ -170,7 +213,10 @@ static device_method_t txp_methods[] = {
DEVMETHOD(device_attach, txp_attach),
DEVMETHOD(device_detach, txp_detach),
DEVMETHOD(device_shutdown, txp_shutdown),
- { 0, 0 }
+ DEVMETHOD(device_suspend, txp_suspend),
+ DEVMETHOD(device_resume, txp_resume),
+
+ { NULL, NULL }
};
static driver_t txp_driver = {
@@ -182,8 +228,6 @@ static driver_t txp_driver = {
static devclass_t txp_devclass;
DRIVER_MODULE(txp, pci, txp_driver, txp_devclass, 0, 0);
-MODULE_DEPEND(txp, pci, 1, 1, 1);
-MODULE_DEPEND(txp, ether, 1, 1, 1);
static int
txp_probe(device_t dev)
@@ -209,36 +253,65 @@ txp_attach(device_t dev)
{
struct txp_softc *sc;
struct ifnet *ifp;
+ struct txp_rsp_desc *rsp;
uint16_t p1;
- uint32_t p2;
- int error = 0, rid;
- u_char eaddr[6];
+ uint32_t p2, reg;
+ int error = 0, pmc, rid;
+ uint8_t eaddr[ETHER_ADDR_LEN], *ver;
sc = device_get_softc(dev);
sc->sc_dev = dev;
- sc->sc_cold = 1;
mtx_init(&sc->sc_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
MTX_DEF);
callout_init_mtx(&sc->sc_tick, &sc->sc_mtx, 0);
+ TASK_INIT(&sc->sc_int_task, 0, txp_int_task, sc);
+ TAILQ_INIT(&sc->sc_busy_list);
+ TAILQ_INIT(&sc->sc_free_list);
- /*
- * Map control/status registers.
- */
- pci_enable_busmaster(dev);
-
- rid = TXP_RID;
- sc->sc_res = bus_alloc_resource_any(dev, TXP_RES, &rid,
- RF_ACTIVE);
+ ifmedia_init(&sc->sc_ifmedia, 0, txp_ifmedia_upd, txp_ifmedia_sts);
+ ifmedia_add(&sc->sc_ifmedia, IFM_ETHER | IFM_10_T, 0, NULL);
+ ifmedia_add(&sc->sc_ifmedia, IFM_ETHER | IFM_10_T | IFM_HDX, 0, NULL);
+ ifmedia_add(&sc->sc_ifmedia, IFM_ETHER | IFM_10_T | IFM_FDX, 0, NULL);
+ ifmedia_add(&sc->sc_ifmedia, IFM_ETHER | IFM_100_TX, 0, NULL);
+ ifmedia_add(&sc->sc_ifmedia, IFM_ETHER | IFM_100_TX | IFM_HDX, 0, NULL);
+ ifmedia_add(&sc->sc_ifmedia, IFM_ETHER | IFM_100_TX | IFM_FDX, 0, NULL);
+ ifmedia_add(&sc->sc_ifmedia, IFM_ETHER | IFM_AUTO, 0, NULL);
+ pci_enable_busmaster(dev);
+ /* Prefer memory space register mapping over IO space. */
+ if (prefer_iomap == 0) {
+ sc->sc_res_id = PCIR_BAR(1);
+ sc->sc_res_type = SYS_RES_MEMORY;
+ } else {
+ sc->sc_res_id = PCIR_BAR(0);
+ sc->sc_res_type = SYS_RES_IOPORT;
+ }
+ sc->sc_res = bus_alloc_resource_any(dev, sc->sc_res_type,
+ &sc->sc_res_id, RF_ACTIVE);
+ if (sc->sc_res == NULL && prefer_iomap == 0) {
+ sc->sc_res_id = PCIR_BAR(0);
+ sc->sc_res_type = SYS_RES_IOPORT;
+ sc->sc_res = bus_alloc_resource_any(dev, sc->sc_res_type,
+ &sc->sc_res_id, RF_ACTIVE);
+ }
if (sc->sc_res == NULL) {
device_printf(dev, "couldn't map ports/memory\n");
- error = ENXIO;
- goto fail;
+ ifmedia_removeall(&sc->sc_ifmedia);
+ mtx_destroy(&sc->sc_mtx);
+ return (ENXIO);
}
- sc->sc_bt = rman_get_bustag(sc->sc_res);
- sc->sc_bh = rman_get_bushandle(sc->sc_res);
+ /* Enable MWI. */
+ reg = pci_read_config(dev, PCIR_COMMAND, 2);
+ reg |= PCIM_CMD_MWRICEN;
+ pci_write_config(dev, PCIR_COMMAND, reg, 2);
+ /* Check cache line size. */
+ reg = pci_read_config(dev, PCIR_CACHELNSZ, 1);
+ reg <<= 4;
+ if (reg == 0 || (reg % 16) != 0)
+ device_printf(sc->sc_dev,
+ "invalid cache line size : %u\n", reg);
/* Allocate interrupt */
rid = 0;
@@ -251,112 +324,155 @@ txp_attach(device_t dev)
goto fail;
}
- if (txp_chip_init(sc)) {
- error = ENXIO;
- goto fail;
- }
-
- sc->sc_fwbuf = contigmalloc(32768, M_DEVBUF,
- M_NOWAIT, 0, 0xffffffff, PAGE_SIZE, 0);
- if (sc->sc_fwbuf == NULL) {
- device_printf(dev, "no memory for firmware\n");
- error = ENXIO;
- goto fail;
- }
- error = txp_download_fw(sc);
- contigfree(sc->sc_fwbuf, 32768, M_DEVBUF);
- sc->sc_fwbuf = NULL;
-
- if (error)
- goto fail;
-
- sc->sc_ldata = contigmalloc(sizeof(struct txp_ldata), M_DEVBUF,
- M_NOWAIT, 0, 0xffffffff, PAGE_SIZE, 0);
- if (sc->sc_ldata == NULL) {
- device_printf(dev, "no memory for descriptor ring\n");
- error = ENXIO;
+ if ((error = txp_alloc_rings(sc)) != 0)
goto fail;
- }
- bzero(sc->sc_ldata, sizeof(struct txp_ldata));
-
- if (txp_alloc_rings(sc)) {
+ txp_init_rings(sc);
+ txp_sysctl_node(sc);
+ /* Reset controller and make it reload sleep image. */
+ if (txp_reset(sc) != 0) {
error = ENXIO;
goto fail;
}
- if (txp_command(sc, TXP_CMD_MAX_PKT_SIZE_WRITE, TXP_MAX_PKTLEN, 0, 0,
- NULL, NULL, NULL, 1)) {
+ /* Let controller boot from sleep image. */
+ if (txp_boot(sc, STAT_WAITING_FOR_HOST_REQUEST) != 0) {
+ device_printf(sc->sc_dev, "could not boot sleep image\n");
error = ENXIO;
goto fail;
}
+ /* Get station address. */
if (txp_command(sc, TXP_CMD_STATION_ADDRESS_READ, 0, 0, 0,
- &p1, &p2, NULL, 1)) {
+ &p1, &p2, NULL, TXP_CMD_WAIT)) {
error = ENXIO;
goto fail;
}
+ p1 = le16toh(p1);
eaddr[0] = ((uint8_t *)&p1)[1];
eaddr[1] = ((uint8_t *)&p1)[0];
+ p2 = le32toh(p2);
eaddr[2] = ((uint8_t *)&p2)[3];
eaddr[3] = ((uint8_t *)&p2)[2];
eaddr[4] = ((uint8_t *)&p2)[1];
eaddr[5] = ((uint8_t *)&p2)[0];
- sc->sc_cold = 0;
-
- ifmedia_init(&sc->sc_ifmedia, 0, txp_ifmedia_upd, txp_ifmedia_sts);
- ifmedia_add(&sc->sc_ifmedia, IFM_ETHER|IFM_10_T, 0, NULL);
- ifmedia_add(&sc->sc_ifmedia, IFM_ETHER|IFM_10_T|IFM_HDX, 0, NULL);
- ifmedia_add(&sc->sc_ifmedia, IFM_ETHER|IFM_10_T|IFM_FDX, 0, NULL);
- ifmedia_add(&sc->sc_ifmedia, IFM_ETHER|IFM_100_TX, 0, NULL);
- ifmedia_add(&sc->sc_ifmedia, IFM_ETHER|IFM_100_TX|IFM_HDX, 0, NULL);
- ifmedia_add(&sc->sc_ifmedia, IFM_ETHER|IFM_100_TX|IFM_FDX, 0, NULL);
- ifmedia_add(&sc->sc_ifmedia, IFM_ETHER|IFM_AUTO, 0, NULL);
-
- sc->sc_xcvr = TXP_XCVR_AUTO;
- txp_command(sc, TXP_CMD_XCVR_SELECT, TXP_XCVR_AUTO, 0, 0,
- NULL, NULL, NULL, 0);
- ifmedia_set(&sc->sc_ifmedia, IFM_ETHER|IFM_AUTO);
-
ifp = sc->sc_ifp = if_alloc(IFT_ETHER);
if (ifp == NULL) {
- device_printf(dev, "can not if_alloc()\n");
+ device_printf(dev, "can not allocate ifnet structure\n");
error = ENOSPC;
goto fail;
}
+
+ /*
+ * Show sleep image version information which may help to
+ * diagnose sleep image specific issues.
+ */
+ rsp = NULL;
+ if (txp_ext_command(sc, TXP_CMD_READ_VERSION, 0, 0, 0, NULL, 0,
+ &rsp, TXP_CMD_WAIT)) {
+ device_printf(dev, "can not read sleep image version\n");
+ error = ENXIO;
+ goto fail;
+ }
+ if (rsp->rsp_numdesc == 0) {
+ p2 = le32toh(rsp->rsp_par2) & 0xFFFF;
+ device_printf(dev, "Typhoon 1.0 sleep image (2000/%02u/%02u)\n",
+ p2 >> 8, p2 & 0xFF);
+ } else if (rsp->rsp_numdesc == 2) {
+ p2 = le32toh(rsp->rsp_par2);
+ ver = (uint8_t *)(rsp + 1);
+ /*
+ * Even if datasheet says the command returns a NULL
+ * terminated version string, explicitly terminate
+ * the string. Given that several bugs of firmware
+ * I can't trust this simple one.
+ */
+ ver[25] = '\0';
+ device_printf(dev,
+ "Typhoon 1.1+ sleep image %02u.%03u.%03u %s\n",
+ p2 >> 24, (p2 >> 12) & 0xFFF, p2 & 0xFFF, ver);
+ } else {
+ p2 = le32toh(rsp->rsp_par2);
+ device_printf(dev,
+ "Unknown Typhoon sleep image version: %u:0x%08x\n",
+ rsp->rsp_numdesc, p2);
+ }
+ if (rsp != NULL)
+ free(rsp, M_DEVBUF);
+
+ sc->sc_xcvr = TXP_XCVR_AUTO;
+ txp_command(sc, TXP_CMD_XCVR_SELECT, TXP_XCVR_AUTO, 0, 0,
+ NULL, NULL, NULL, TXP_CMD_NOWAIT);
+ ifmedia_set(&sc->sc_ifmedia, IFM_ETHER | IFM_AUTO);
+
ifp->if_softc = sc;
if_initname(ifp, device_get_name(dev), device_get_unit(dev));
- ifp->if_mtu = ETHERMTU;
ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
ifp->if_ioctl = txp_ioctl;
ifp->if_start = txp_start;
- ifp->if_watchdog = txp_watchdog;
ifp->if_init = txp_init;
- ifp->if_baudrate = 100000000;
- ifp->if_snd.ifq_maxlen = TX_ENTRIES;
- ifp->if_hwassist = 0;
- txp_capabilities(sc);
-
+ ifp->if_snd.ifq_drv_maxlen = TX_ENTRIES - 1;
+ IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
+ IFQ_SET_READY(&ifp->if_snd);
/*
- * Attach us everywhere
+ * It's possible to read firmware's offload capability but
+ * we have not downloaded the firmware yet so announce
+ * working capability here. We're not interested in IPSec
+ * capability and due to the lots of firmware bug we can't
+ * advertise the whole capability anyway.
*/
+ ifp->if_capabilities = IFCAP_RXCSUM | IFCAP_TXCSUM;
+ if (pci_find_extcap(dev, PCIY_PMG, &pmc) == 0)
+ ifp->if_capabilities |= IFCAP_WOL_MAGIC;
+ /* Enable all capabilities. */
+ ifp->if_capenable = ifp->if_capabilities;
+
ether_ifattach(ifp, eaddr);
+ /* VLAN capability setup. */
+ ifp->if_capabilities |= IFCAP_VLAN_MTU;
+ ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWCSUM;
+ ifp->if_capenable = ifp->if_capabilities;
+ /* Tell the upper layer(s) we support long frames. */
+ ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
+
+ WRITE_REG(sc, TXP_IER, TXP_INTR_NONE);
+ WRITE_REG(sc, TXP_IMR, TXP_INTR_ALL);
+
+ /* Create local taskq. */
+ sc->sc_tq = taskqueue_create_fast("txp_taskq", M_WAITOK,
+ taskqueue_thread_enqueue, &sc->sc_tq);
+ if (sc->sc_tq == NULL) {
+ device_printf(dev, "could not create taskqueue.\n");
+ ether_ifdetach(ifp);
+ error = ENXIO;
+ goto fail;
+ }
+ taskqueue_start_threads(&sc->sc_tq, 1, PI_NET, "%s taskq",
+ device_get_nameunit(sc->sc_dev));
+
+ /* Put controller into sleep. */
+ if (txp_sleep(sc, 0) != 0) {
+ ether_ifdetach(ifp);
+ error = ENXIO;
+ goto fail;
+ }
+
error = bus_setup_intr(dev, sc->sc_irq, INTR_TYPE_NET | INTR_MPSAFE,
- NULL, txp_intr, sc, &sc->sc_intrhand);
+ txp_intr, NULL, sc, &sc->sc_intrhand);
- if (error) {
+ if (error != 0) {
ether_ifdetach(ifp);
- device_printf(dev, "couldn't set up irq\n");
+ device_printf(dev, "couldn't set up interrupt handler.\n");
goto fail;
}
return (0);
fail:
- txp_release_resources(sc);
- mtx_destroy(&sc->sc_mtx);
+ if (error != 0)
+ txp_detach(dev);
return (error);
}
@@ -365,101 +481,57 @@ txp_detach(device_t dev)
{
struct txp_softc *sc;
struct ifnet *ifp;
- int i;
sc = device_get_softc(dev);
- ifp = sc->sc_ifp;
- TXP_LOCK(sc);
- txp_stop(sc);
- TXP_UNLOCK(sc);
- txp_shutdown(dev);
- callout_drain(&sc->sc_tick);
+ ifp = sc->sc_ifp;
+ if (device_is_attached(dev)) {
+ TXP_LOCK(sc);
+ sc->sc_flags |= TXP_FLAG_DETACH;
+ txp_stop(sc);
+ TXP_UNLOCK(sc);
+ callout_drain(&sc->sc_tick);
+ taskqueue_drain(sc->sc_tq, &sc->sc_int_task);
+ ether_ifdetach(ifp);
+ }
+ WRITE_REG(sc, TXP_IMR, TXP_INTR_ALL);
ifmedia_removeall(&sc->sc_ifmedia);
- ether_ifdetach(ifp);
-
- for (i = 0; i < RXBUF_ENTRIES; i++)
- free(sc->sc_rxbufs[i].rb_sd, M_DEVBUF);
-
- txp_release_resources(sc);
-
- mtx_destroy(&sc->sc_mtx);
- return (0);
-}
-
-static void
-txp_release_resources(struct txp_softc *sc)
-{
- device_t dev;
-
- dev = sc->sc_dev;
-
if (sc->sc_intrhand != NULL)
bus_teardown_intr(dev, sc->sc_irq, sc->sc_intrhand);
-
if (sc->sc_irq != NULL)
bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sc_irq);
-
if (sc->sc_res != NULL)
- bus_release_resource(dev, TXP_RES, TXP_RID, sc->sc_res);
-
- if (sc->sc_ldata != NULL)
- contigfree(sc->sc_ldata, sizeof(struct txp_ldata), M_DEVBUF);
-
- if (sc->sc_ifp)
+ bus_release_resource(dev, sc->sc_res_type, sc->sc_res_id,
+ sc->sc_res);
+ if (sc->sc_ifp != NULL) {
if_free(sc->sc_ifp);
-}
-
-static int
-txp_chip_init(struct txp_softc *sc)
-{
- /* disable interrupts */
- WRITE_REG(sc, TXP_IER, 0);
- WRITE_REG(sc, TXP_IMR,
- TXP_INT_SELF | TXP_INT_PCI_TABORT | TXP_INT_PCI_MABORT |
- TXP_INT_DMA3 | TXP_INT_DMA2 | TXP_INT_DMA1 | TXP_INT_DMA0 |
- TXP_INT_LATCH);
-
- /* ack all interrupts */
- WRITE_REG(sc, TXP_ISR, TXP_INT_RESERVED | TXP_INT_LATCH |
- TXP_INT_A2H_7 | TXP_INT_A2H_6 | TXP_INT_A2H_5 | TXP_INT_A2H_4 |
- TXP_INT_SELF | TXP_INT_PCI_TABORT | TXP_INT_PCI_MABORT |
- TXP_INT_DMA3 | TXP_INT_DMA2 | TXP_INT_DMA1 | TXP_INT_DMA0 |
- TXP_INT_A2H_3 | TXP_INT_A2H_2 | TXP_INT_A2H_1 | TXP_INT_A2H_0);
-
- if (txp_reset_adapter(sc))
- return (-1);
-
- /* disable interrupts */
- WRITE_REG(sc, TXP_IER, 0);
- WRITE_REG(sc, TXP_IMR,
- TXP_INT_SELF | TXP_INT_PCI_TABORT | TXP_INT_PCI_MABORT |
- TXP_INT_DMA3 | TXP_INT_DMA2 | TXP_INT_DMA1 | TXP_INT_DMA0 |
- TXP_INT_LATCH);
-
- /* ack all interrupts */
- WRITE_REG(sc, TXP_ISR, TXP_INT_RESERVED | TXP_INT_LATCH |
- TXP_INT_A2H_7 | TXP_INT_A2H_6 | TXP_INT_A2H_5 | TXP_INT_A2H_4 |
- TXP_INT_SELF | TXP_INT_PCI_TABORT | TXP_INT_PCI_MABORT |
- TXP_INT_DMA3 | TXP_INT_DMA2 | TXP_INT_DMA1 | TXP_INT_DMA0 |
- TXP_INT_A2H_3 | TXP_INT_A2H_2 | TXP_INT_A2H_1 | TXP_INT_A2H_0);
+ sc->sc_ifp = NULL;
+ }
+ txp_free_rings(sc);
+ mtx_destroy(&sc->sc_mtx);
return (0);
}
static int
-txp_reset_adapter(struct txp_softc *sc)
+txp_reset(struct txp_softc *sc)
{
uint32_t r;
int i;
+ /* Disable interrupts. */
+ WRITE_REG(sc, TXP_IER, TXP_INTR_NONE);
+ WRITE_REG(sc, TXP_IMR, TXP_INTR_ALL);
+ /* Ack all pending interrupts. */
+ WRITE_REG(sc, TXP_ISR, TXP_INTR_ALL);
+
r = 0;
WRITE_REG(sc, TXP_SRR, TXP_SRR_ALL);
DELAY(1000);
WRITE_REG(sc, TXP_SRR, 0);
- /* Should wait max 6 seconds */
+ /* Should wait max 6 seconds. */
for (i = 0; i < 6000; i++) {
r = READ_REG(sc, TXP_A2H_0);
if (r == STAT_WAITING_FOR_HOST_REQUEST)
@@ -467,11 +539,55 @@ txp_reset_adapter(struct txp_softc *sc)
DELAY(1000);
}
- if (r != STAT_WAITING_FOR_HOST_REQUEST) {
+ if (r != STAT_WAITING_FOR_HOST_REQUEST)
device_printf(sc->sc_dev, "reset hung\n");
- return (-1);
+
+ WRITE_REG(sc, TXP_IER, TXP_INTR_NONE);
+ WRITE_REG(sc, TXP_IMR, TXP_INTR_ALL);
+ WRITE_REG(sc, TXP_ISR, TXP_INTR_ALL);
+
+ /*
+ * Give more time to complete loading sleep image before
+ * trying to boot from sleep image.
+ */
+ DELAY(5000);
+
+ return (0);
+}
+
+static int
+txp_boot(struct txp_softc *sc, uint32_t state)
+{
+
+ /* See if it's waiting for boot, and try to boot it. */
+ if (txp_wait(sc, state) != 0) {
+ device_printf(sc->sc_dev, "not waiting for boot\n");
+ return (ENXIO);
}
+ WRITE_REG(sc, TXP_H2A_2, TXP_ADDR_HI(sc->sc_ldata.txp_boot_paddr));
+ TXP_BARRIER(sc, TXP_H2A_2, 4, BUS_SPACE_BARRIER_WRITE);
+ WRITE_REG(sc, TXP_H2A_1, TXP_ADDR_LO(sc->sc_ldata.txp_boot_paddr));
+ TXP_BARRIER(sc, TXP_H2A_1, 4, BUS_SPACE_BARRIER_WRITE);
+ WRITE_REG(sc, TXP_H2A_0, TXP_BOOTCMD_REGISTER_BOOT_RECORD);
+ TXP_BARRIER(sc, TXP_H2A_0, 4, BUS_SPACE_BARRIER_WRITE);
+
+ /* See if it booted. */
+ if (txp_wait(sc, STAT_RUNNING) != 0) {
+ device_printf(sc->sc_dev, "firmware not running\n");
+ return (ENXIO);
+ }
+
+ /* Clear TX and CMD ring write registers. */
+ WRITE_REG(sc, TXP_H2A_1, TXP_BOOTCMD_NULL);
+ TXP_BARRIER(sc, TXP_H2A_1, 4, BUS_SPACE_BARRIER_WRITE);
+ WRITE_REG(sc, TXP_H2A_2, TXP_BOOTCMD_NULL);
+ TXP_BARRIER(sc, TXP_H2A_2, 4, BUS_SPACE_BARRIER_WRITE);
+ WRITE_REG(sc, TXP_H2A_3, TXP_BOOTCMD_NULL);
+ TXP_BARRIER(sc, TXP_H2A_3, 4, BUS_SPACE_BARRIER_WRITE);
+ WRITE_REG(sc, TXP_H2A_0, TXP_BOOTCMD_NULL);
+ TXP_BARRIER(sc, TXP_H2A_0, 4, BUS_SPACE_BARRIER_WRITE);
+
return (0);
}
@@ -480,10 +596,11 @@ txp_download_fw(struct txp_softc *sc)
{
struct txp_fw_file_header *fileheader;
struct txp_fw_section_header *secthead;
- int error, sect;
- uint32_t r, i, ier, imr;
+ int sect;
+ uint32_t error, ier, imr;
+
+ TXP_LOCK_ASSERT(sc);
- r = 0;
error = 0;
ier = READ_REG(sc, TXP_IER);
WRITE_REG(sc, TXP_IER, ier | TXP_INT_A2H_0);
@@ -491,68 +608,60 @@ txp_download_fw(struct txp_softc *sc)
imr = READ_REG(sc, TXP_IMR);
WRITE_REG(sc, TXP_IMR, imr | TXP_INT_A2H_0);
- for (i = 0; i < 10000; i++) {
- r = READ_REG(sc, TXP_A2H_0);
- if (r == STAT_WAITING_FOR_HOST_REQUEST)
- break;
- DELAY(50);
- }
- if (r != STAT_WAITING_FOR_HOST_REQUEST) {
+ if (txp_wait(sc, STAT_WAITING_FOR_HOST_REQUEST) != 0) {
device_printf(sc->sc_dev, "not waiting for host request\n");
- error = -1;
+ error = ETIMEDOUT;
goto fail;
}
- /* Ack the status */
+ /* Ack the status. */
WRITE_REG(sc, TXP_ISR, TXP_INT_A2H_0);
fileheader = (struct txp_fw_file_header *)tc990image;
if (bcmp("TYPHOON", fileheader->magicid, sizeof(fileheader->magicid))) {
- device_printf(sc->sc_dev, "fw invalid magic\n");
- error = -1;
+ device_printf(sc->sc_dev, "firmware invalid magic\n");
goto fail;
}
- /* Tell boot firmware to get ready for image */
- WRITE_REG(sc, TXP_H2A_1, fileheader->addr);
- WRITE_REG(sc, TXP_H2A_2, fileheader->hmac[0]);
- WRITE_REG(sc, TXP_H2A_3, fileheader->hmac[1]);
- WRITE_REG(sc, TXP_H2A_4, fileheader->hmac[2]);
- WRITE_REG(sc, TXP_H2A_5, fileheader->hmac[3]);
- WRITE_REG(sc, TXP_H2A_6, fileheader->hmac[4]);
+ /* Tell boot firmware to get ready for image. */
+ WRITE_REG(sc, TXP_H2A_1, le32toh(fileheader->addr));
+ TXP_BARRIER(sc, TXP_H2A_1, 4, BUS_SPACE_BARRIER_WRITE);
+ WRITE_REG(sc, TXP_H2A_2, le32toh(fileheader->hmac[0]));
+ TXP_BARRIER(sc, TXP_H2A_2, 4, BUS_SPACE_BARRIER_WRITE);
+ WRITE_REG(sc, TXP_H2A_3, le32toh(fileheader->hmac[1]));
+ TXP_BARRIER(sc, TXP_H2A_3, 4, BUS_SPACE_BARRIER_WRITE);
+ WRITE_REG(sc, TXP_H2A_4, le32toh(fileheader->hmac[2]));
+ TXP_BARRIER(sc, TXP_H2A_4, 4, BUS_SPACE_BARRIER_WRITE);
+ WRITE_REG(sc, TXP_H2A_5, le32toh(fileheader->hmac[3]));
+ TXP_BARRIER(sc, TXP_H2A_5, 4, BUS_SPACE_BARRIER_WRITE);
+ WRITE_REG(sc, TXP_H2A_6, le32toh(fileheader->hmac[4]));
+ TXP_BARRIER(sc, TXP_H2A_6, 4, BUS_SPACE_BARRIER_WRITE);
WRITE_REG(sc, TXP_H2A_0, TXP_BOOTCMD_RUNTIME_IMAGE);
+ TXP_BARRIER(sc, TXP_H2A_0, 4, BUS_SPACE_BARRIER_WRITE);
if (txp_download_fw_wait(sc)) {
- device_printf(sc->sc_dev, "fw wait failed, initial\n");
- error = -1;
+ device_printf(sc->sc_dev, "firmware wait failed, initial\n");
+ error = ETIMEDOUT;
goto fail;
}
secthead = (struct txp_fw_section_header *)(((uint8_t *)tc990image) +
sizeof(struct txp_fw_file_header));
- for (sect = 0; sect < fileheader->nsections; sect++) {
-
- if (txp_download_fw_section(sc, secthead, sect)) {
- error = -1;
+ for (sect = 0; sect < le32toh(fileheader->nsections); sect++) {
+ if ((error = txp_download_fw_section(sc, secthead, sect)) != 0)
goto fail;
- }
secthead = (struct txp_fw_section_header *)
- (((uint8_t *)secthead) + secthead->nbytes +
+ (((uint8_t *)secthead) + le32toh(secthead->nbytes) +
sizeof(*secthead));
}
WRITE_REG(sc, TXP_H2A_0, TXP_BOOTCMD_DOWNLOAD_COMPLETE);
+ TXP_BARRIER(sc, TXP_H2A_0, 4, BUS_SPACE_BARRIER_WRITE);
- for (i = 0; i < 10000; i++) {
- r = READ_REG(sc, TXP_A2H_0);
- if (r == STAT_WAITING_FOR_BOOT)
- break;
- DELAY(50);
- }
- if (r != STAT_WAITING_FOR_BOOT) {
+ if (txp_wait(sc, STAT_WAITING_FOR_BOOT) != 0) {
device_printf(sc->sc_dev, "not waiting for boot\n");
- error = -1;
+ error = ETIMEDOUT;
goto fail;
}
@@ -566,27 +675,26 @@ fail:
static int
txp_download_fw_wait(struct txp_softc *sc)
{
- uint32_t i, r;
+ uint32_t i;
- r = 0;
- for (i = 0; i < 10000; i++) {
- r = READ_REG(sc, TXP_ISR);
- if (r & TXP_INT_A2H_0)
+ TXP_LOCK_ASSERT(sc);
+
+ for (i = 0; i < TXP_TIMEOUT; i++) {
+ if ((READ_REG(sc, TXP_ISR) & TXP_INT_A2H_0) != 0)
break;
DELAY(50);
}
- if (!(r & TXP_INT_A2H_0)) {
- device_printf(sc->sc_dev, "fw wait failed comm0\n");
- return (-1);
+ if (i == TXP_TIMEOUT) {
+ device_printf(sc->sc_dev, "firmware wait failed comm0\n");
+ return (ETIMEDOUT);
}
WRITE_REG(sc, TXP_ISR, TXP_INT_A2H_0);
- r = READ_REG(sc, TXP_A2H_0);
- if (r != STAT_WAITING_FOR_SEGMENT) {
- device_printf(sc->sc_dev, "fw not waiting for segment\n");
- return (-1);
+ if (READ_REG(sc, TXP_A2H_0) != STAT_WAITING_FOR_SEGMENT) {
+ device_printf(sc->sc_dev, "firmware not waiting for segment\n");
+ return (ETIMEDOUT);
}
return (0);
}
@@ -595,180 +703,262 @@ static int
txp_download_fw_section(struct txp_softc *sc,
struct txp_fw_section_header *sect, int sectnum)
{
- vm_offset_t dma;
+ bus_dma_tag_t sec_tag;
+ bus_dmamap_t sec_map;
+ bus_addr_t sec_paddr;
+ uint8_t *sec_buf;
int rseg, err = 0;
struct mbuf m;
uint16_t csum;
- /* Skip zero length sections */
- if (sect->nbytes == 0)
+ TXP_LOCK_ASSERT(sc);
+
+ /* Skip zero length sections. */
+ if (le32toh(sect->nbytes) == 0)
return (0);
- /* Make sure we aren't past the end of the image */
+ /* Make sure we aren't past the end of the image. */
rseg = ((uint8_t *)sect) - ((uint8_t *)tc990image);
if (rseg >= sizeof(tc990image)) {
- device_printf(sc->sc_dev, "fw invalid section address, "
- "section %d\n", sectnum);
- return (-1);
+ device_printf(sc->sc_dev,
+ "firmware invalid section address, section %d\n", sectnum);
+ return (EIO);
}
- /* Make sure this section doesn't go past the end */
- rseg += sect->nbytes;
+ /* Make sure this section doesn't go past the end. */
+ rseg += le32toh(sect->nbytes);
if (rseg >= sizeof(tc990image)) {
- device_printf(sc->sc_dev, "fw truncated section %d\n",
+ device_printf(sc->sc_dev, "firmware truncated section %d\n",
sectnum);
- return (-1);
+ return (EIO);
}
- bcopy(((uint8_t *)sect) + sizeof(*sect), sc->sc_fwbuf, sect->nbytes);
- dma = vtophys(sc->sc_fwbuf);
+ sec_tag = NULL;
+ sec_map = NULL;
+ sec_buf = NULL;
+ /* XXX */
+ TXP_UNLOCK(sc);
+ err = txp_dma_alloc(sc, "firmware sections", &sec_tag, sizeof(uint32_t),
+ 0, &sec_map, (void **)&sec_buf, le32toh(sect->nbytes), &sec_paddr);
+ TXP_LOCK(sc);
+ if (err != 0)
+ goto bail;
+ bcopy(((uint8_t *)sect) + sizeof(*sect), sec_buf,
+ le32toh(sect->nbytes));
/*
* dummy up mbuf and verify section checksum
*/
m.m_type = MT_DATA;
m.m_next = m.m_nextpkt = NULL;
- m.m_len = sect->nbytes;
- m.m_data = sc->sc_fwbuf;
+ m.m_len = le32toh(sect->nbytes);
+ m.m_data = sec_buf;
m.m_flags = 0;
- csum = in_cksum(&m, sect->nbytes);
+ csum = in_cksum(&m, le32toh(sect->nbytes));
if (csum != sect->cksum) {
- device_printf(sc->sc_dev, "fw section %d, bad "
- "cksum (expected 0x%x got 0x%x)\n",
- sectnum, sect->cksum, csum);
- err = -1;
+ device_printf(sc->sc_dev,
+ "firmware section %d, bad cksum (expected 0x%x got 0x%x)\n",
+ sectnum, le16toh(sect->cksum), csum);
+ err = EIO;
goto bail;
}
- WRITE_REG(sc, TXP_H2A_1, sect->nbytes);
- WRITE_REG(sc, TXP_H2A_2, sect->cksum);
- WRITE_REG(sc, TXP_H2A_3, sect->addr);
- WRITE_REG(sc, TXP_H2A_4, 0);
- WRITE_REG(sc, TXP_H2A_5, dma & 0xffffffff);
+ bus_dmamap_sync(sec_tag, sec_map, BUS_DMASYNC_PREWRITE);
+
+ WRITE_REG(sc, TXP_H2A_1, le32toh(sect->nbytes));
+ TXP_BARRIER(sc, TXP_H2A_1, 4, BUS_SPACE_BARRIER_WRITE);
+ WRITE_REG(sc, TXP_H2A_2, le16toh(sect->cksum));
+ TXP_BARRIER(sc, TXP_H2A_2, 4, BUS_SPACE_BARRIER_WRITE);
+ WRITE_REG(sc, TXP_H2A_3, le32toh(sect->addr));
+ TXP_BARRIER(sc, TXP_H2A_3, 4, BUS_SPACE_BARRIER_WRITE);
+ WRITE_REG(sc, TXP_H2A_4, TXP_ADDR_HI(sec_paddr));
+ TXP_BARRIER(sc, TXP_H2A_4, 4, BUS_SPACE_BARRIER_WRITE);
+ WRITE_REG(sc, TXP_H2A_5, TXP_ADDR_LO(sec_paddr));
+ TXP_BARRIER(sc, TXP_H2A_5, 4, BUS_SPACE_BARRIER_WRITE);
WRITE_REG(sc, TXP_H2A_0, TXP_BOOTCMD_SEGMENT_AVAILABLE);
+ TXP_BARRIER(sc, TXP_H2A_0, 4, BUS_SPACE_BARRIER_WRITE);
if (txp_download_fw_wait(sc)) {
- device_printf(sc->sc_dev, "fw wait failed, "
- "section %d\n", sectnum);
- err = -1;
+ device_printf(sc->sc_dev,
+ "firmware wait failed, section %d\n", sectnum);
+ err = ETIMEDOUT;
}
+ bus_dmamap_sync(sec_tag, sec_map, BUS_DMASYNC_POSTWRITE);
bail:
+ txp_dma_free(sc, &sec_tag, &sec_map, (void **)&sec_buf);
return (err);
}
-static void
+static int
txp_intr(void *vsc)
{
- struct txp_softc *sc = vsc;
- struct txp_hostvar *hv = sc->sc_hostvar;
+ struct txp_softc *sc;
+ uint32_t status;
+
+ sc = vsc;
+ status = READ_REG(sc, TXP_ISR);
+ if ((status & TXP_INT_LATCH) == 0)
+ return (FILTER_STRAY);
+ WRITE_REG(sc, TXP_ISR, status);
+ WRITE_REG(sc, TXP_IMR, TXP_INTR_ALL);
+ taskqueue_enqueue(sc->sc_tq, &sc->sc_int_task);
+
+ return (FILTER_HANDLED);
+}
+
+static void
+txp_int_task(void *arg, int pending)
+{
+ struct txp_softc *sc;
+ struct ifnet *ifp;
+ struct txp_hostvar *hv;
uint32_t isr;
+ int more;
- /* mask all interrupts */
- TXP_LOCK(sc);
- WRITE_REG(sc, TXP_IMR, TXP_INT_RESERVED | TXP_INT_SELF |
- TXP_INT_A2H_7 | TXP_INT_A2H_6 | TXP_INT_A2H_5 | TXP_INT_A2H_4 |
- TXP_INT_A2H_2 | TXP_INT_A2H_1 | TXP_INT_A2H_0 |
- TXP_INT_DMA3 | TXP_INT_DMA2 | TXP_INT_DMA1 | TXP_INT_DMA0 |
- TXP_INT_PCI_TABORT | TXP_INT_PCI_MABORT | TXP_INT_LATCH);
+ sc = (struct txp_softc *)arg;
+ TXP_LOCK(sc);
+ ifp = sc->sc_ifp;
+ hv = sc->sc_hostvar;
isr = READ_REG(sc, TXP_ISR);
- while (isr) {
+ if ((isr & TXP_INT_LATCH) != 0)
WRITE_REG(sc, TXP_ISR, isr);
+ if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
+ bus_dmamap_sync(sc->sc_cdata.txp_hostvar_tag,
+ sc->sc_cdata.txp_hostvar_map,
+ BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
+ more = 0;
if ((*sc->sc_rxhir.r_roff) != (*sc->sc_rxhir.r_woff))
- txp_rx_reclaim(sc, &sc->sc_rxhir);
+ more += txp_rx_reclaim(sc, &sc->sc_rxhir,
+ sc->sc_process_limit);
if ((*sc->sc_rxlor.r_roff) != (*sc->sc_rxlor.r_woff))
- txp_rx_reclaim(sc, &sc->sc_rxlor);
-
+ more += txp_rx_reclaim(sc, &sc->sc_rxlor,
+ sc->sc_process_limit);
+ /*
+ * XXX
+ * It seems controller is not smart enough to handle
+ * FIFO overflow conditions under heavy network load.
+ * No matter how often new Rx buffers are passed to
+ * controller the situation didn't change. Maybe
+ * flow-control would be the only way to mitigate the
+ * issue but firmware does not have commands that
+ * control the threshold of emitting pause frames.
+ */
if (hv->hv_rx_buf_write_idx == hv->hv_rx_buf_read_idx)
txp_rxbuf_reclaim(sc);
-
if (sc->sc_txhir.r_cnt && (sc->sc_txhir.r_cons !=
- TXP_OFFSET2IDX(*(sc->sc_txhir.r_off))))
+ TXP_OFFSET2IDX(le32toh(*(sc->sc_txhir.r_off)))))
txp_tx_reclaim(sc, &sc->sc_txhir);
-
if (sc->sc_txlor.r_cnt && (sc->sc_txlor.r_cons !=
- TXP_OFFSET2IDX(*(sc->sc_txlor.r_off))))
+ TXP_OFFSET2IDX(le32toh(*(sc->sc_txlor.r_off)))))
txp_tx_reclaim(sc, &sc->sc_txlor);
-
- isr = READ_REG(sc, TXP_ISR);
+ bus_dmamap_sync(sc->sc_cdata.txp_hostvar_tag,
+ sc->sc_cdata.txp_hostvar_map,
+ BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
+ if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
+ txp_start_locked(sc->sc_ifp);
+ if (more != 0 || READ_REG(sc, TXP_ISR & TXP_INT_LATCH) != 0) {
+ taskqueue_enqueue(sc->sc_tq, &sc->sc_int_task);
+ TXP_UNLOCK(sc);
+ return;
+ }
}
- /* unmask all interrupts */
- WRITE_REG(sc, TXP_IMR, TXP_INT_A2H_3);
-
- txp_start_locked(sc->sc_ifp);
+ /* Re-enable interrupts. */
+ WRITE_REG(sc, TXP_IMR, TXP_INTR_NONE);
TXP_UNLOCK(sc);
}
-static void
-txp_rx_reclaim(struct txp_softc *sc, struct txp_rx_ring *r)
+#ifndef __NO_STRICT_ALIGNMENT
+static __inline void
+txp_fixup_rx(struct mbuf *m)
{
- struct ifnet *ifp = sc->sc_ifp;
+ int i;
+ uint16_t *src, *dst;
+
+ src = mtod(m, uint16_t *);
+ dst = src - (TXP_RXBUF_ALIGN - ETHER_ALIGN) / sizeof *src;
+
+ for (i = 0; i < (m->m_len / sizeof(uint16_t) + 1); i++)
+ *dst++ = *src++;
+
+ m->m_data -= TXP_RXBUF_ALIGN - ETHER_ALIGN;
+}
+#endif
+
+static int
+txp_rx_reclaim(struct txp_softc *sc, struct txp_rx_ring *r, int count)
+{
+ struct ifnet *ifp;
struct txp_rx_desc *rxd;
struct mbuf *m;
- struct txp_swdesc *sd = NULL;
- uint32_t roff, woff;
+ struct txp_rx_swdesc *sd;
+ uint32_t roff, woff, rx_stat, prog;
TXP_LOCK_ASSERT(sc);
- roff = *r->r_roff;
- woff = *r->r_woff;
- rxd = r->r_desc + (roff / sizeof(struct txp_rx_desc));
-
- while (roff != woff) {
- if (rxd->rx_flags & RX_FLAGS_ERROR) {
- device_printf(sc->sc_dev, "error 0x%x\n",
- rxd->rx_stat);
- ifp->if_ierrors++;
- goto next;
- }
+ ifp = sc->sc_ifp;
- /* retrieve stashed pointer */
- sd = rxd->rx_sd;
+ bus_dmamap_sync(r->r_tag, r->r_map, BUS_DMASYNC_POSTREAD |
+ BUS_DMASYNC_POSTWRITE);
+ roff = le32toh(*r->r_roff);
+ woff = le32toh(*r->r_woff);
+ rxd = r->r_desc + roff / sizeof(struct txp_rx_desc);
+ for (prog = 0; roff != woff; prog++, count--) {
+ if (count <= 0)
+ break;
+ bcopy((u_long *)&rxd->rx_vaddrlo, &sd, sizeof(sd));
+ KASSERT(sd != NULL, ("%s: Rx desc ring corrupted", __func__));
+ bus_dmamap_sync(sc->sc_cdata.txp_rx_tag, sd->sd_map,
+ BUS_DMASYNC_POSTREAD);
+ bus_dmamap_unload(sc->sc_cdata.txp_rx_tag, sd->sd_map);
m = sd->sd_mbuf;
+ KASSERT(m != NULL, ("%s: Rx buffer ring corrupted", __func__));
sd->sd_mbuf = NULL;
-
- m->m_pkthdr.len = m->m_len = rxd->rx_len;
-
-#ifdef __STRICT_ALIGNMENT
- {
- /*
- * XXX Nice chip, except it won't accept "off by 2"
- * buffers, so we're force to copy. Supposedly
- * this will be fixed in a newer firmware rev
- * and this will be temporary.
- */
- struct mbuf *mnew;
-
- mnew = m_devget(mtod(m, caddr_t), rxd->rx_len,
- ETHER_ALIGN, ifp, NULL);
+ TAILQ_REMOVE(&sc->sc_busy_list, sd, sd_next);
+ TAILQ_INSERT_TAIL(&sc->sc_free_list, sd, sd_next);
+ if ((rxd->rx_flags & RX_FLAGS_ERROR) != 0) {
+ if (bootverbose)
+ device_printf(sc->sc_dev, "Rx error %u\n",
+ le32toh(rxd->rx_stat) & RX_ERROR_MASK);
m_freem(m);
- if (mnew == NULL) {
- ifp->if_ierrors++;
- goto next;
- }
- m = mnew;
+ goto next;
}
-#endif
- if (rxd->rx_stat & RX_STAT_IPCKSUMBAD)
- m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
- else if (rxd->rx_stat & RX_STAT_IPCKSUMGOOD)
- m->m_pkthdr.csum_flags |=
- CSUM_IP_CHECKED|CSUM_IP_VALID;
-
- if ((rxd->rx_stat & RX_STAT_TCPCKSUMGOOD) ||
- (rxd->rx_stat & RX_STAT_UDPCKSUMGOOD)) {
- m->m_pkthdr.csum_flags |=
- CSUM_DATA_VALID|CSUM_PSEUDO_HDR;
- m->m_pkthdr.csum_data = 0xffff;
+ m->m_pkthdr.len = m->m_len = le16toh(rxd->rx_len);
+ m->m_pkthdr.rcvif = ifp;
+#ifndef __NO_STRICT_ALIGNMENT
+ txp_fixup_rx(m);
+#endif
+ rx_stat = le32toh(rxd->rx_stat);
+ if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) {
+ if ((rx_stat & RX_STAT_IPCKSUMBAD) != 0)
+ m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
+ else if ((rx_stat & RX_STAT_IPCKSUMGOOD) != 0)
+ m->m_pkthdr.csum_flags |=
+ CSUM_IP_CHECKED|CSUM_IP_VALID;
+
+ if ((rx_stat & RX_STAT_TCPCKSUMGOOD) != 0 ||
+ (rx_stat & RX_STAT_UDPCKSUMGOOD) != 0) {
+ m->m_pkthdr.csum_flags |=
+ CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
+ m->m_pkthdr.csum_data = 0xffff;
+ }
}
- if (rxd->rx_stat & RX_STAT_VLAN) {
- m->m_pkthdr.ether_vtag = htons(rxd->rx_vlan >> 16);
+ /*
+ * XXX
+ * Typhoon has a firmware bug that VLAN tag is always
+ * stripped out even if it is told to not remove the tag.
+ * Therefore don't check if_capenable here.
+ */
+ if (/* (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0 && */
+ (rx_stat & RX_STAT_VLAN) != 0) {
+ m->m_pkthdr.ether_vtag =
+ bswap16((le32toh(rxd->rx_vlan) >> 16));
m->m_flags |= M_VLANTAG;
}
@@ -777,60 +967,86 @@ txp_rx_reclaim(struct txp_softc *sc, struct txp_rx_ring *r)
TXP_LOCK(sc);
next:
-
roff += sizeof(struct txp_rx_desc);
if (roff == (RX_ENTRIES * sizeof(struct txp_rx_desc))) {
roff = 0;
rxd = r->r_desc;
} else
rxd++;
- woff = *r->r_woff;
+ prog++;
}
- *r->r_roff = woff;
+ if (prog == 0)
+ return (0);
+
+ bus_dmamap_sync(r->r_tag, r->r_map, BUS_DMASYNC_PREREAD |
+ BUS_DMASYNC_PREWRITE);
+ *r->r_roff = le32toh(roff);
+
+ return (count > 0 ? 0 : EAGAIN);
}
static void
txp_rxbuf_reclaim(struct txp_softc *sc)
{
- struct ifnet *ifp = sc->sc_ifp;
- struct txp_hostvar *hv = sc->sc_hostvar;
+ struct txp_hostvar *hv;
struct txp_rxbuf_desc *rbd;
- struct txp_swdesc *sd;
- uint32_t i;
+ struct txp_rx_swdesc *sd;
+ bus_dma_segment_t segs[1];
+ int nsegs, prod, prog;
+ uint32_t cons;
TXP_LOCK_ASSERT(sc);
- if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
+
+ hv = sc->sc_hostvar;
+ cons = TXP_OFFSET2IDX(le32toh(hv->hv_rx_buf_read_idx));
+ prod = sc->sc_rxbufprod;
+ TXP_DESC_INC(prod, RXBUF_ENTRIES);
+ if (prod == cons)
return;
- i = sc->sc_rxbufprod;
- rbd = sc->sc_rxbufs + i;
+ bus_dmamap_sync(sc->sc_cdata.txp_rxbufs_tag,
+ sc->sc_cdata.txp_rxbufs_map,
+ BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
- while (1) {
- sd = rbd->rb_sd;
- if (sd->sd_mbuf != NULL)
+ for (prog = 0; prod != cons; prog++) {
+ sd = TAILQ_FIRST(&sc->sc_free_list);
+ if (sd == NULL)
break;
-
+ rbd = sc->sc_rxbufs + prod;
+ bcopy((u_long *)&rbd->rb_vaddrlo, &sd, sizeof(sd));
sd->sd_mbuf = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
if (sd->sd_mbuf == NULL)
- return;
- sd->sd_mbuf->m_pkthdr.rcvif = ifp;
+ break;
sd->sd_mbuf->m_pkthdr.len = sd->sd_mbuf->m_len = MCLBYTES;
-
- rbd->rb_paddrlo = vtophys(mtod(sd->sd_mbuf, vm_offset_t))
- & 0xffffffff;
- rbd->rb_paddrhi = 0;
-
- hv->hv_rx_buf_write_idx = TXP_IDX2OFFSET(i);
-
- if (++i == RXBUF_ENTRIES) {
- i = 0;
- rbd = sc->sc_rxbufs;
- } else
- rbd++;
+#ifndef __NO_STRICT_ALIGNMENT
+ m_adj(sd->sd_mbuf, TXP_RXBUF_ALIGN);
+#endif
+ if (bus_dmamap_load_mbuf_sg(sc->sc_cdata.txp_rx_tag,
+ sd->sd_map, sd->sd_mbuf, segs, &nsegs, 0) != 0) {
+ m_freem(sd->sd_mbuf);
+ sd->sd_mbuf = NULL;
+ break;
+ }
+ KASSERT(nsegs == 1, ("%s : %d segments returned!", __func__,
+ nsegs));
+ TAILQ_REMOVE(&sc->sc_free_list, sd, sd_next);
+ TAILQ_INSERT_TAIL(&sc->sc_busy_list, sd, sd_next);
+ bus_dmamap_sync(sc->sc_cdata.txp_rx_tag, sd->sd_map,
+ BUS_DMASYNC_PREREAD);
+ rbd->rb_paddrlo = htole32(TXP_ADDR_LO(segs[0].ds_addr));
+ rbd->rb_paddrhi = htole32(TXP_ADDR_HI(segs[0].ds_addr));
+ TXP_DESC_INC(prod, RXBUF_ENTRIES);
}
- sc->sc_rxbufprod = i;
+ if (prog == 0)
+ return;
+ bus_dmamap_sync(sc->sc_cdata.txp_rxbufs_tag,
+ sc->sc_cdata.txp_rxbufs_map,
+ BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
+ prod = (prod + RXBUF_ENTRIES - 1) % RXBUF_ENTRIES;
+ sc->sc_rxbufprod = prod;
+ hv->hv_rx_buf_write_idx = htole32(TXP_IDX2OFFSET(prod));
}
/*
@@ -839,26 +1055,35 @@ txp_rxbuf_reclaim(struct txp_softc *sc)
static void
txp_tx_reclaim(struct txp_softc *sc, struct txp_tx_ring *r)
{
- struct ifnet *ifp = sc->sc_ifp;
- uint32_t idx = TXP_OFFSET2IDX(*(r->r_off));
- uint32_t cons = r->r_cons, cnt = r->r_cnt;
- struct txp_tx_desc *txd = r->r_desc + cons;
- struct txp_swdesc *sd = sc->sc_txd + cons;
- struct mbuf *m;
+ struct ifnet *ifp;
+ uint32_t idx;
+ uint32_t cons, cnt;
+ struct txp_tx_desc *txd;
+ struct txp_swdesc *sd;
TXP_LOCK_ASSERT(sc);
- while (cons != idx) {
- if (cnt == 0)
- break;
- if ((txd->tx_flags & TX_FLAGS_TYPE_M) ==
- TX_FLAGS_TYPE_DATA) {
- m = sd->sd_mbuf;
- if (m != NULL) {
- m_freem(m);
+ bus_dmamap_sync(r->r_tag, r->r_map, BUS_DMASYNC_POSTREAD |
+ BUS_DMASYNC_POSTWRITE);
+ ifp = sc->sc_ifp;
+ idx = TXP_OFFSET2IDX(le32toh(*(r->r_off)));
+ cons = r->r_cons;
+ cnt = r->r_cnt;
+ txd = r->r_desc + cons;
+ sd = sc->sc_txd + cons;
+
+ for (cnt = r->r_cnt; cons != idx && cnt > 0; cnt--) {
+ if ((txd->tx_flags & TX_FLAGS_TYPE_M) == TX_FLAGS_TYPE_DATA) {
+ if (sd->sd_mbuf != NULL) {
+ bus_dmamap_sync(sc->sc_cdata.txp_tx_tag,
+ sd->sd_map, BUS_DMASYNC_POSTWRITE);
+ bus_dmamap_unload(sc->sc_cdata.txp_tx_tag,
+ sd->sd_map);
+ m_freem(sd->sd_mbuf);
+ sd->sd_mbuf = NULL;
txd->tx_addrlo = 0;
txd->tx_addrhi = 0;
- ifp->if_opackets++;
+ txd->tx_flags = 0;
}
}
ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
@@ -871,172 +1096,594 @@ txp_tx_reclaim(struct txp_softc *sc, struct txp_tx_ring *r)
txd++;
sd++;
}
-
- cnt--;
}
+ bus_dmamap_sync(r->r_tag, r->r_map, BUS_DMASYNC_PREREAD |
+ BUS_DMASYNC_PREWRITE);
r->r_cons = cons;
r->r_cnt = cnt;
if (cnt == 0)
- ifp->if_timer = 0;
+ sc->sc_watchdog_timer = 0;
}
static int
txp_shutdown(device_t dev)
{
+
+ return (txp_suspend(dev));
+}
+
+static int
+txp_suspend(device_t dev)
+{
struct txp_softc *sc;
+ struct ifnet *ifp;
+ uint8_t *eaddr;
+ uint16_t p1;
+ uint32_t p2;
+ int pmc;
+ uint16_t pmstat;
sc = device_get_softc(dev);
TXP_LOCK(sc);
+ ifp = sc->sc_ifp;
+ txp_stop(sc);
+ txp_init_rings(sc);
+ /* Reset controller and make it reload sleep image. */
+ txp_reset(sc);
+ /* Let controller boot from sleep image. */
+ if (txp_boot(sc, STAT_WAITING_FOR_HOST_REQUEST) != 0)
+ device_printf(sc->sc_dev, "couldn't boot sleep image\n");
+
+ /* Set station address. */
+ eaddr = IF_LLADDR(sc->sc_ifp);
+ p1 = 0;
+ ((uint8_t *)&p1)[1] = eaddr[0];
+ ((uint8_t *)&p1)[0] = eaddr[1];
+ p1 = le16toh(p1);
+ ((uint8_t *)&p2)[3] = eaddr[2];
+ ((uint8_t *)&p2)[2] = eaddr[3];
+ ((uint8_t *)&p2)[1] = eaddr[4];
+ ((uint8_t *)&p2)[0] = eaddr[5];
+ p2 = le32toh(p2);
+ txp_command(sc, TXP_CMD_STATION_ADDRESS_WRITE, p1, p2, 0, NULL, NULL,
+ NULL, TXP_CMD_WAIT);
+ txp_set_filter(sc);
+ WRITE_REG(sc, TXP_IER, TXP_INTR_NONE);
+ WRITE_REG(sc, TXP_IMR, TXP_INTR_ALL);
+ txp_sleep(sc, sc->sc_ifp->if_capenable);
+ if (pci_find_extcap(sc->sc_dev, PCIY_PMG, &pmc) == 0) {
+ /* Request PME. */
+ pmstat = pci_read_config(sc->sc_dev,
+ pmc + PCIR_POWER_STATUS, 2);
+ pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE);
+ if ((ifp->if_capenable & IFCAP_WOL) != 0)
+ pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
+ pci_write_config(sc->sc_dev,
+ pmc + PCIR_POWER_STATUS, pmstat, 2);
+ }
+ TXP_UNLOCK(sc);
+
+ return (0);
+}
- /* mask all interrupts */
- WRITE_REG(sc, TXP_IMR,
- TXP_INT_SELF | TXP_INT_PCI_TABORT | TXP_INT_PCI_MABORT |
- TXP_INT_DMA3 | TXP_INT_DMA2 | TXP_INT_DMA1 | TXP_INT_DMA0 |
- TXP_INT_LATCH);
+static int
+txp_resume(device_t dev)
+{
+ struct txp_softc *sc;
+ int pmc;
+ uint16_t pmstat;
+
+ sc = device_get_softc(dev);
- txp_command(sc, TXP_CMD_TX_DISABLE, 0, 0, 0, NULL, NULL, NULL, 0);
- txp_command(sc, TXP_CMD_RX_DISABLE, 0, 0, 0, NULL, NULL, NULL, 0);
- txp_command(sc, TXP_CMD_HALT, 0, 0, 0, NULL, NULL, NULL, 0);
+ TXP_LOCK(sc);
+ if (pci_find_extcap(sc->sc_dev, PCIY_PMG, &pmc) == 0) {
+ /* Disable PME and clear PME status. */
+ pmstat = pci_read_config(sc->sc_dev,
+ pmc + PCIR_POWER_STATUS, 2);
+ if ((pmstat & PCIM_PSTAT_PMEENABLE) != 0) {
+ pmstat &= ~PCIM_PSTAT_PMEENABLE;
+ pci_write_config(sc->sc_dev,
+ pmc + PCIR_POWER_STATUS, pmstat, 2);
+ }
+ }
+ if ((sc->sc_ifp->if_flags & IFF_UP) != 0)
+ txp_init_locked(sc);
TXP_UNLOCK(sc);
return (0);
}
+struct txp_dmamap_arg {
+ bus_addr_t txp_busaddr;
+};
+
+static void
+txp_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
+{
+ struct txp_dmamap_arg *ctx;
+
+ if (error != 0)
+ return;
+
+ KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
+
+ ctx = (struct txp_dmamap_arg *)arg;
+ ctx->txp_busaddr = segs[0].ds_addr;
+}
+
+static int
+txp_dma_alloc(struct txp_softc *sc, char *type, bus_dma_tag_t *tag,
+ bus_size_t alignment, bus_size_t boundary, bus_dmamap_t *map, void **buf,
+ bus_size_t size, bus_addr_t *paddr)
+{
+ struct txp_dmamap_arg ctx;
+ int error;
+
+ /* Create DMA block tag. */
+ error = bus_dma_tag_create(
+ sc->sc_cdata.txp_parent_tag, /* parent */
+ alignment, boundary, /* algnmnt, boundary */
+ BUS_SPACE_MAXADDR, /* lowaddr */
+ BUS_SPACE_MAXADDR, /* highaddr */
+ NULL, NULL, /* filter, filterarg */
+ size, /* maxsize */
+ 1, /* nsegments */
+ size, /* maxsegsize */
+ 0, /* flags */
+ NULL, NULL, /* lockfunc, lockarg */
+ tag);
+ if (error != 0) {
+ device_printf(sc->sc_dev,
+ "could not create DMA tag for %s.\n", type);
+ return (error);
+ }
+
+ *paddr = 0;
+ /* Allocate DMA'able memory and load the DMA map. */
+ error = bus_dmamem_alloc(*tag, buf, BUS_DMA_WAITOK | BUS_DMA_ZERO |
+ BUS_DMA_COHERENT, map);
+ if (error != 0) {
+ device_printf(sc->sc_dev,
+ "could not allocate DMA'able memory for %s.\n", type);
+ return (error);
+ }
+
+ ctx.txp_busaddr = 0;
+ error = bus_dmamap_load(*tag, *map, *(uint8_t **)buf,
+ size, txp_dmamap_cb, &ctx, BUS_DMA_NOWAIT);
+ if (error != 0 || ctx.txp_busaddr == 0) {
+ device_printf(sc->sc_dev,
+ "could not load DMA'able memory for %s.\n", type);
+ return (error);
+ }
+ *paddr = ctx.txp_busaddr;
+
+ return (0);
+}
+
+static void
+txp_dma_free(struct txp_softc *sc, bus_dma_tag_t *tag, bus_dmamap_t *map,
+ void **buf)
+{
+
+ if (*tag != NULL) {
+ if (*map != NULL)
+ bus_dmamap_unload(*tag, *map);
+ if (*map != NULL && buf != NULL)
+ bus_dmamem_free(*tag, *(uint8_t **)buf, *map);
+ *(uint8_t **)buf = NULL;
+ *map = NULL;
+ bus_dma_tag_destroy(*tag);
+ *tag = NULL;
+ }
+}
+
static int
txp_alloc_rings(struct txp_softc *sc)
{
struct txp_boot_record *boot;
struct txp_ldata *ld;
- uint32_t r;
- int i;
+ struct txp_swdesc *txd;
+ struct txp_rxbuf_desc *rbd;
+ struct txp_rx_swdesc *sd;
+ int error, i;
- r = 0;
- ld = sc->sc_ldata;
- boot = &ld->txp_boot;
+ ld = &sc->sc_ldata;
+ boot = ld->txp_boot;
/* boot record */
sc->sc_boot = boot;
- /* host variables */
- bzero(&ld->txp_hostvar, sizeof(struct txp_hostvar));
- boot->br_hostvar_lo = vtophys(&ld->txp_hostvar);
- boot->br_hostvar_hi = 0;
- sc->sc_hostvar = (struct txp_hostvar *)&ld->txp_hostvar;
+ /*
+ * Create parent ring/DMA block tag.
+ * Datasheet says that all ring addresses and descriptors
+ * support 64bits addressing. However the controller is
+ * known to have no support DAC so limit DMA address space
+ * to 32bits.
+ */
+ error = bus_dma_tag_create(
+ bus_get_dma_tag(sc->sc_dev), /* parent */
+ 1, 0, /* algnmnt, boundary */
+ BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
+ BUS_SPACE_MAXADDR, /* highaddr */
+ NULL, NULL, /* filter, filterarg */
+ BUS_SPACE_MAXSIZE_32BIT, /* maxsize */
+ 0, /* nsegments */
+ BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
+ 0, /* flags */
+ NULL, NULL, /* lockfunc, lockarg */
+ &sc->sc_cdata.txp_parent_tag);
+ if (error != 0) {
+ device_printf(sc->sc_dev, "could not create parent DMA tag.\n");
+ return (error);
+ }
- /* hi priority tx ring */
- boot->br_txhipri_lo = vtophys(&ld->txp_txhiring);;
- boot->br_txhipri_hi = 0;
- boot->br_txhipri_siz = TX_ENTRIES * sizeof(struct txp_tx_desc);
+ /* Boot record. */
+ error = txp_dma_alloc(sc, "boot record",
+ &sc->sc_cdata.txp_boot_tag, sizeof(uint32_t), 0,
+ &sc->sc_cdata.txp_boot_map, (void **)&sc->sc_ldata.txp_boot,
+ sizeof(struct txp_boot_record),
+ &sc->sc_ldata.txp_boot_paddr);
+ if (error != 0)
+ return (error);
+ boot = sc->sc_ldata.txp_boot;
+ sc->sc_boot = boot;
+
+ /* Host variables. */
+ error = txp_dma_alloc(sc, "host variables",
+ &sc->sc_cdata.txp_hostvar_tag, sizeof(uint32_t), 0,
+ &sc->sc_cdata.txp_hostvar_map, (void **)&sc->sc_ldata.txp_hostvar,
+ sizeof(struct txp_hostvar),
+ &sc->sc_ldata.txp_hostvar_paddr);
+ if (error != 0)
+ return (error);
+ boot->br_hostvar_lo =
+ htole32(TXP_ADDR_LO(sc->sc_ldata.txp_hostvar_paddr));
+ boot->br_hostvar_hi =
+ htole32(TXP_ADDR_HI(sc->sc_ldata.txp_hostvar_paddr));
+ sc->sc_hostvar = sc->sc_ldata.txp_hostvar;
+
+ /* Hi priority tx ring. */
+ error = txp_dma_alloc(sc, "hi priority tx ring",
+ &sc->sc_cdata.txp_txhiring_tag, sizeof(struct txp_tx_desc), 0,
+ &sc->sc_cdata.txp_txhiring_map, (void **)&sc->sc_ldata.txp_txhiring,
+ sizeof(struct txp_tx_desc) * TX_ENTRIES,
+ &sc->sc_ldata.txp_txhiring_paddr);
+ if (error != 0)
+ return (error);
+ boot->br_txhipri_lo =
+ htole32(TXP_ADDR_LO(sc->sc_ldata.txp_txhiring_paddr));
+ boot->br_txhipri_hi =
+ htole32(TXP_ADDR_HI(sc->sc_ldata.txp_txhiring_paddr));
+ boot->br_txhipri_siz =
+ htole32(TX_ENTRIES * sizeof(struct txp_tx_desc));
+ sc->sc_txhir.r_tag = sc->sc_cdata.txp_txhiring_tag;
+ sc->sc_txhir.r_map = sc->sc_cdata.txp_txhiring_map;
sc->sc_txhir.r_reg = TXP_H2A_1;
- sc->sc_txhir.r_desc = (struct txp_tx_desc *)&ld->txp_txhiring;
+ sc->sc_txhir.r_desc = sc->sc_ldata.txp_txhiring;
sc->sc_txhir.r_cons = sc->sc_txhir.r_prod = sc->sc_txhir.r_cnt = 0;
sc->sc_txhir.r_off = &sc->sc_hostvar->hv_tx_hi_desc_read_idx;
- /* lo priority tx ring */
- boot->br_txlopri_lo = vtophys(&ld->txp_txloring);
- boot->br_txlopri_hi = 0;
- boot->br_txlopri_siz = TX_ENTRIES * sizeof(struct txp_tx_desc);
+ /* Low priority tx ring. */
+ error = txp_dma_alloc(sc, "low priority tx ring",
+ &sc->sc_cdata.txp_txloring_tag, sizeof(struct txp_tx_desc), 0,
+ &sc->sc_cdata.txp_txloring_map, (void **)&sc->sc_ldata.txp_txloring,
+ sizeof(struct txp_tx_desc) * TX_ENTRIES,
+ &sc->sc_ldata.txp_txloring_paddr);
+ if (error != 0)
+ return (error);
+ boot->br_txlopri_lo =
+ htole32(TXP_ADDR_LO(sc->sc_ldata.txp_txloring_paddr));
+ boot->br_txlopri_hi =
+ htole32(TXP_ADDR_HI(sc->sc_ldata.txp_txloring_paddr));
+ boot->br_txlopri_siz =
+ htole32(TX_ENTRIES * sizeof(struct txp_tx_desc));
+ sc->sc_txlor.r_tag = sc->sc_cdata.txp_txloring_tag;
+ sc->sc_txlor.r_map = sc->sc_cdata.txp_txloring_map;
sc->sc_txlor.r_reg = TXP_H2A_3;
- sc->sc_txlor.r_desc = (struct txp_tx_desc *)&ld->txp_txloring;
+ sc->sc_txlor.r_desc = sc->sc_ldata.txp_txloring;
sc->sc_txlor.r_cons = sc->sc_txlor.r_prod = sc->sc_txlor.r_cnt = 0;
sc->sc_txlor.r_off = &sc->sc_hostvar->hv_tx_lo_desc_read_idx;
- /* high priority rx ring */
- boot->br_rxhipri_lo = vtophys(&ld->txp_rxhiring);
- boot->br_rxhipri_hi = 0;
- boot->br_rxhipri_siz = RX_ENTRIES * sizeof(struct txp_rx_desc);
- sc->sc_rxhir.r_desc = (struct txp_rx_desc *)&ld->txp_rxhiring;
+ /* High priority rx ring. */
+ error = txp_dma_alloc(sc, "hi priority rx ring",
+ &sc->sc_cdata.txp_rxhiring_tag, sizeof(struct txp_rx_desc), 0,
+ &sc->sc_cdata.txp_rxhiring_map, (void **)&sc->sc_ldata.txp_rxhiring,
+ sizeof(struct txp_rx_desc) * RX_ENTRIES,
+ &sc->sc_ldata.txp_rxhiring_paddr);
+ if (error != 0)
+ return (error);
+ boot->br_rxhipri_lo =
+ htole32(TXP_ADDR_LO(sc->sc_ldata.txp_rxhiring_paddr));
+ boot->br_rxhipri_hi =
+ htole32(TXP_ADDR_HI(sc->sc_ldata.txp_rxhiring_paddr));
+ boot->br_rxhipri_siz =
+ htole32(RX_ENTRIES * sizeof(struct txp_rx_desc));
+ sc->sc_rxhir.r_tag = sc->sc_cdata.txp_rxhiring_tag;
+ sc->sc_rxhir.r_map = sc->sc_cdata.txp_rxhiring_map;
+ sc->sc_rxhir.r_desc = sc->sc_ldata.txp_rxhiring;
sc->sc_rxhir.r_roff = &sc->sc_hostvar->hv_rx_hi_read_idx;
sc->sc_rxhir.r_woff = &sc->sc_hostvar->hv_rx_hi_write_idx;
- /* low priority rx ring */
- boot->br_rxlopri_lo = vtophys(&ld->txp_rxloring);
- boot->br_rxlopri_hi = 0;
- boot->br_rxlopri_siz = RX_ENTRIES * sizeof(struct txp_rx_desc);
- sc->sc_rxlor.r_desc = (struct txp_rx_desc *)&ld->txp_rxloring;
+ /* Low priority rx ring. */
+ error = txp_dma_alloc(sc, "low priority rx ring",
+ &sc->sc_cdata.txp_rxloring_tag, sizeof(struct txp_rx_desc), 0,
+ &sc->sc_cdata.txp_rxloring_map, (void **)&sc->sc_ldata.txp_rxloring,
+ sizeof(struct txp_rx_desc) * RX_ENTRIES,
+ &sc->sc_ldata.txp_rxloring_paddr);
+ if (error != 0)
+ return (error);
+ boot->br_rxlopri_lo =
+ htole32(TXP_ADDR_LO(sc->sc_ldata.txp_rxloring_paddr));
+ boot->br_rxlopri_hi =
+ htole32(TXP_ADDR_HI(sc->sc_ldata.txp_rxloring_paddr));
+ boot->br_rxlopri_siz =
+ htole32(RX_ENTRIES * sizeof(struct txp_rx_desc));
+ sc->sc_rxlor.r_tag = sc->sc_cdata.txp_rxloring_tag;
+ sc->sc_rxlor.r_map = sc->sc_cdata.txp_rxloring_map;
+ sc->sc_rxlor.r_desc = sc->sc_ldata.txp_rxloring;
sc->sc_rxlor.r_roff = &sc->sc_hostvar->hv_rx_lo_read_idx;
sc->sc_rxlor.r_woff = &sc->sc_hostvar->hv_rx_lo_write_idx;
- /* command ring */
- bzero(&ld->txp_cmdring, sizeof(struct txp_cmd_desc) * CMD_ENTRIES);
- boot->br_cmd_lo = vtophys(&ld->txp_cmdring);
- boot->br_cmd_hi = 0;
- boot->br_cmd_siz = CMD_ENTRIES * sizeof(struct txp_cmd_desc);
- sc->sc_cmdring.base = (struct txp_cmd_desc *)&ld->txp_cmdring;
+ /* Command ring. */
+ error = txp_dma_alloc(sc, "command ring",
+ &sc->sc_cdata.txp_cmdring_tag, sizeof(struct txp_cmd_desc), 0,
+ &sc->sc_cdata.txp_cmdring_map, (void **)&sc->sc_ldata.txp_cmdring,
+ sizeof(struct txp_cmd_desc) * CMD_ENTRIES,
+ &sc->sc_ldata.txp_cmdring_paddr);
+ if (error != 0)
+ return (error);
+ boot->br_cmd_lo = htole32(TXP_ADDR_LO(sc->sc_ldata.txp_cmdring_paddr));
+ boot->br_cmd_hi = htole32(TXP_ADDR_HI(sc->sc_ldata.txp_cmdring_paddr));
+ boot->br_cmd_siz = htole32(CMD_ENTRIES * sizeof(struct txp_cmd_desc));
+ sc->sc_cmdring.base = sc->sc_ldata.txp_cmdring;
sc->sc_cmdring.size = CMD_ENTRIES * sizeof(struct txp_cmd_desc);
sc->sc_cmdring.lastwrite = 0;
- /* response ring */
- bzero(&ld->txp_rspring, sizeof(struct txp_rsp_desc) * RSP_ENTRIES);
- boot->br_resp_lo = vtophys(&ld->txp_rspring);
- boot->br_resp_hi = 0;
- boot->br_resp_siz = CMD_ENTRIES * sizeof(struct txp_rsp_desc);
- sc->sc_rspring.base = (struct txp_rsp_desc *)&ld->txp_rspring;
+ /* Response ring. */
+ error = txp_dma_alloc(sc, "response ring",
+ &sc->sc_cdata.txp_rspring_tag, sizeof(struct txp_rsp_desc), 0,
+ &sc->sc_cdata.txp_rspring_map, (void **)&sc->sc_ldata.txp_rspring,
+ sizeof(struct txp_rsp_desc) * RSP_ENTRIES,
+ &sc->sc_ldata.txp_rspring_paddr);
+ if (error != 0)
+ return (error);
+ boot->br_resp_lo = htole32(TXP_ADDR_LO(sc->sc_ldata.txp_rspring_paddr));
+ boot->br_resp_hi = htole32(TXP_ADDR_HI(sc->sc_ldata.txp_rspring_paddr));
+ boot->br_resp_siz = htole32(RSP_ENTRIES * sizeof(struct txp_rsp_desc));
+ sc->sc_rspring.base = sc->sc_ldata.txp_rspring;
sc->sc_rspring.size = RSP_ENTRIES * sizeof(struct txp_rsp_desc);
sc->sc_rspring.lastwrite = 0;
- /* receive buffer ring */
- boot->br_rxbuf_lo = vtophys(&ld->txp_rxbufs);
- boot->br_rxbuf_hi = 0;
- boot->br_rxbuf_siz = RXBUF_ENTRIES * sizeof(struct txp_rxbuf_desc);
- sc->sc_rxbufs = (struct txp_rxbuf_desc *)&ld->txp_rxbufs;
+ /* Receive buffer ring. */
+ error = txp_dma_alloc(sc, "receive buffer ring",
+ &sc->sc_cdata.txp_rxbufs_tag, sizeof(struct txp_rxbuf_desc), 0,
+ &sc->sc_cdata.txp_rxbufs_map, (void **)&sc->sc_ldata.txp_rxbufs,
+ sizeof(struct txp_rxbuf_desc) * RXBUF_ENTRIES,
+ &sc->sc_ldata.txp_rxbufs_paddr);
+ if (error != 0)
+ return (error);
+ boot->br_rxbuf_lo =
+ htole32(TXP_ADDR_LO(sc->sc_ldata.txp_rxbufs_paddr));
+ boot->br_rxbuf_hi =
+ htole32(TXP_ADDR_HI(sc->sc_ldata.txp_rxbufs_paddr));
+ boot->br_rxbuf_siz =
+ htole32(RXBUF_ENTRIES * sizeof(struct txp_rxbuf_desc));
+ sc->sc_rxbufs = sc->sc_ldata.txp_rxbufs;
+
+ /* Zero ring. */
+ error = txp_dma_alloc(sc, "zero buffer",
+ &sc->sc_cdata.txp_zero_tag, sizeof(uint32_t), 0,
+ &sc->sc_cdata.txp_zero_map, (void **)&sc->sc_ldata.txp_zero,
+ sizeof(uint32_t), &sc->sc_ldata.txp_zero_paddr);
+ if (error != 0)
+ return (error);
+ boot->br_zero_lo = htole32(TXP_ADDR_LO(sc->sc_ldata.txp_zero_paddr));
+ boot->br_zero_hi = htole32(TXP_ADDR_HI(sc->sc_ldata.txp_zero_paddr));
+
+ bus_dmamap_sync(sc->sc_cdata.txp_boot_tag, sc->sc_cdata.txp_boot_map,
+ BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
+
+ /* Create Tx buffers. */
+ error = bus_dma_tag_create(
+ sc->sc_cdata.txp_parent_tag, /* parent */
+ 1, 0, /* algnmnt, boundary */
+ BUS_SPACE_MAXADDR, /* lowaddr */
+ BUS_SPACE_MAXADDR, /* highaddr */
+ NULL, NULL, /* filter, filterarg */
+ MCLBYTES * TXP_MAXTXSEGS, /* maxsize */
+ TXP_MAXTXSEGS, /* nsegments */
+ MCLBYTES, /* maxsegsize */
+ 0, /* flags */
+ NULL, NULL, /* lockfunc, lockarg */
+ &sc->sc_cdata.txp_tx_tag);
+ if (error != 0) {
+ device_printf(sc->sc_dev, "could not create Tx DMA tag.\n");
+ goto fail;
+ }
+
+ /* Create tag for Rx buffers. */
+ error = bus_dma_tag_create(
+ sc->sc_cdata.txp_parent_tag, /* parent */
+ TXP_RXBUF_ALIGN, 0, /* algnmnt, boundary */
+ BUS_SPACE_MAXADDR, /* lowaddr */
+ BUS_SPACE_MAXADDR, /* highaddr */
+ NULL, NULL, /* filter, filterarg */
+ MCLBYTES, /* maxsize */
+ 1, /* nsegments */
+ MCLBYTES, /* maxsegsize */
+ 0, /* flags */
+ NULL, NULL, /* lockfunc, lockarg */
+ &sc->sc_cdata.txp_rx_tag);
+ if (error != 0) {
+ device_printf(sc->sc_dev, "could not create Rx DMA tag.\n");
+ goto fail;
+ }
+ /* Create DMA maps for Tx buffers. */
+ for (i = 0; i < TX_ENTRIES; i++) {
+ txd = &sc->sc_txd[i];
+ txd->sd_mbuf = NULL;
+ txd->sd_map = NULL;
+ error = bus_dmamap_create(sc->sc_cdata.txp_tx_tag, 0,
+ &txd->sd_map);
+ if (error != 0) {
+ device_printf(sc->sc_dev,
+ "could not create Tx dmamap.\n");
+ goto fail;
+ }
+ }
+
+ /* Create DMA maps for Rx buffers. */
for (i = 0; i < RXBUF_ENTRIES; i++) {
- struct txp_swdesc *sd;
- if (sc->sc_rxbufs[i].rb_sd != NULL)
- continue;
- sc->sc_rxbufs[i].rb_sd = malloc(sizeof(struct txp_swdesc),
- M_DEVBUF, M_NOWAIT);
- if (sc->sc_rxbufs[i].rb_sd == NULL)
- return (ENOBUFS);
- sd = sc->sc_rxbufs[i].rb_sd;
+ sd = malloc(sizeof(struct txp_rx_swdesc), M_DEVBUF,
+ M_NOWAIT | M_ZERO);
+ if (sd == NULL) {
+ error = ENOMEM;
+ goto fail;
+ }
+ /*
+ * The virtual address part of descriptor is not used
+ * by hardware so use that to save an ring entry. We
+ * need bcopy here otherwise the address wouldn't be
+ * valid on big-endian architectures.
+ */
+ rbd = sc->sc_rxbufs + i;
+ bcopy(&sd, (u_long *)&rbd->rb_vaddrlo, sizeof(sd));
sd->sd_mbuf = NULL;
+ sd->sd_map = NULL;
+ error = bus_dmamap_create(sc->sc_cdata.txp_rx_tag, 0,
+ &sd->sd_map);
+ if (error != 0) {
+ device_printf(sc->sc_dev,
+ "could not create Rx dmamap.\n");
+ goto fail;
+ }
+ TAILQ_INSERT_TAIL(&sc->sc_free_list, sd, sd_next);
}
+
+fail:
+ return (error);
+}
+
+static void
+txp_init_rings(struct txp_softc *sc)
+{
+
+ bzero(sc->sc_ldata.txp_hostvar, sizeof(struct txp_hostvar));
+ bzero(sc->sc_ldata.txp_zero, sizeof(uint32_t));
+ sc->sc_txhir.r_cons = 0;
+ sc->sc_txhir.r_prod = 0;
+ sc->sc_txhir.r_cnt = 0;
+ sc->sc_txlor.r_cons = 0;
+ sc->sc_txlor.r_prod = 0;
+ sc->sc_txlor.r_cnt = 0;
+ sc->sc_cmdring.lastwrite = 0;
+ sc->sc_rspring.lastwrite = 0;
sc->sc_rxbufprod = 0;
+ bus_dmamap_sync(sc->sc_cdata.txp_hostvar_tag,
+ sc->sc_cdata.txp_hostvar_map,
+ BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
+}
- /* zero dma */
- bzero(&ld->txp_zero, sizeof(uint32_t));
- boot->br_zero_lo = vtophys(&ld->txp_zero);
- boot->br_zero_hi = 0;
+static int
+txp_wait(struct txp_softc *sc, uint32_t state)
+{
+ uint32_t reg;
+ int i;
- /* See if it's waiting for boot, and try to boot it */
- for (i = 0; i < 10000; i++) {
- r = READ_REG(sc, TXP_A2H_0);
- if (r == STAT_WAITING_FOR_BOOT)
+ for (i = 0; i < TXP_TIMEOUT; i++) {
+ reg = READ_REG(sc, TXP_A2H_0);
+ if (reg == state)
break;
DELAY(50);
}
- if (r != STAT_WAITING_FOR_BOOT) {
- device_printf(sc->sc_dev, "not waiting for boot\n");
- return (ENXIO);
- }
+ return (i == TXP_TIMEOUT ? ETIMEDOUT : 0);
+}
- WRITE_REG(sc, TXP_H2A_2, 0);
- WRITE_REG(sc, TXP_H2A_1, vtophys(sc->sc_boot));
- WRITE_REG(sc, TXP_H2A_0, TXP_BOOTCMD_REGISTER_BOOT_RECORD);
+static void
+txp_free_rings(struct txp_softc *sc)
+{
+ struct txp_swdesc *txd;
+ struct txp_rx_swdesc *sd;
+ int i;
- /* See if it booted */
- for (i = 0; i < 10000; i++) {
- r = READ_REG(sc, TXP_A2H_0);
- if (r == STAT_RUNNING)
- break;
- DELAY(50);
+ /* Tx buffers. */
+ if (sc->sc_cdata.txp_tx_tag != NULL) {
+ for (i = 0; i < TX_ENTRIES; i++) {
+ txd = &sc->sc_txd[i];
+ if (txd->sd_map != NULL) {
+ bus_dmamap_destroy(sc->sc_cdata.txp_tx_tag,
+ txd->sd_map);
+ txd->sd_map = NULL;
+ }
+ }
+ bus_dma_tag_destroy(sc->sc_cdata.txp_tx_tag);
+ sc->sc_cdata.txp_tx_tag = NULL;
}
- if (r != STAT_RUNNING) {
- device_printf(sc->sc_dev, "fw not running\n");
- return (ENXIO);
+ /* Rx buffers. */
+ if (sc->sc_cdata.txp_rx_tag != NULL) {
+ if (sc->sc_rxbufs != NULL) {
+ KASSERT(TAILQ_FIRST(&sc->sc_busy_list) == NULL,
+ ("%s : still have busy Rx buffers", __func__));
+ while ((sd = TAILQ_FIRST(&sc->sc_free_list)) != NULL) {
+ TAILQ_REMOVE(&sc->sc_free_list, sd, sd_next);
+ if (sd->sd_map != NULL) {
+ bus_dmamap_destroy(
+ sc->sc_cdata.txp_rx_tag,
+ sd->sd_map);
+ sd->sd_map = NULL;
+ }
+ free(sd, M_DEVBUF);
+ }
+ }
+ bus_dma_tag_destroy(sc->sc_cdata.txp_rx_tag);
+ sc->sc_cdata.txp_rx_tag = NULL;
}
- /* Clear TX and CMD ring write registers */
- WRITE_REG(sc, TXP_H2A_1, TXP_BOOTCMD_NULL);
- WRITE_REG(sc, TXP_H2A_2, TXP_BOOTCMD_NULL);
- WRITE_REG(sc, TXP_H2A_3, TXP_BOOTCMD_NULL);
- WRITE_REG(sc, TXP_H2A_0, TXP_BOOTCMD_NULL);
+ /* Hi priority Tx ring. */
+ txp_dma_free(sc, &sc->sc_cdata.txp_txhiring_tag,
+ &sc->sc_cdata.txp_txhiring_map,
+ (void **)&sc->sc_ldata.txp_txhiring);
+ /* Low priority Tx ring. */
+ txp_dma_free(sc, &sc->sc_cdata.txp_txloring_tag,
+ &sc->sc_cdata.txp_txloring_map,
+ (void **)&sc->sc_ldata.txp_txloring);
+ /* Hi priority Rx ring. */
+ txp_dma_free(sc, &sc->sc_cdata.txp_rxhiring_tag,
+ &sc->sc_cdata.txp_rxhiring_map,
+ (void **)&sc->sc_ldata.txp_rxhiring);
+ /* Low priority Rx ring. */
+ txp_dma_free(sc, &sc->sc_cdata.txp_rxloring_tag,
+ &sc->sc_cdata.txp_rxloring_map,
+ (void **)&sc->sc_ldata.txp_rxloring);
+ /* Receive buffer ring. */
+ txp_dma_free(sc, &sc->sc_cdata.txp_rxbufs_tag,
+ &sc->sc_cdata.txp_rxbufs_map, (void **)&sc->sc_ldata.txp_rxbufs);
+ /* Command ring. */
+ txp_dma_free(sc, &sc->sc_cdata.txp_cmdring_tag,
+ &sc->sc_cdata.txp_cmdring_map, (void **)&sc->sc_ldata.txp_cmdring);
+ /* Response ring. */
+ txp_dma_free(sc, &sc->sc_cdata.txp_rspring_tag,
+ &sc->sc_cdata.txp_rspring_map, (void **)&sc->sc_ldata.txp_rspring);
+ /* Zero ring. */
+ txp_dma_free(sc, &sc->sc_cdata.txp_zero_tag,
+ &sc->sc_cdata.txp_zero_map, (void **)&sc->sc_ldata.txp_zero);
+ /* Host variables. */
+ txp_dma_free(sc, &sc->sc_cdata.txp_hostvar_tag,
+ &sc->sc_cdata.txp_hostvar_map, (void **)&sc->sc_ldata.txp_hostvar);
+ /* Boot record. */
+ txp_dma_free(sc, &sc->sc_cdata.txp_boot_tag,
+ &sc->sc_cdata.txp_boot_map, (void **)&sc->sc_ldata.txp_boot);
+
+ if (sc->sc_cdata.txp_parent_tag != NULL) {
+ bus_dma_tag_destroy(sc->sc_cdata.txp_parent_tag);
+ sc->sc_cdata.txp_parent_tag = NULL;
+ }
- return (0);
}
static int
@@ -1044,17 +1691,25 @@ txp_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
{
struct txp_softc *sc = ifp->if_softc;
struct ifreq *ifr = (struct ifreq *)data;
- int error = 0;
+ int capenable, error = 0, mask;
- switch (command) {
+ switch(command) {
case SIOCSIFFLAGS:
TXP_LOCK(sc);
- if (ifp->if_flags & IFF_UP) {
- txp_init_locked(sc);
+ if ((ifp->if_flags & IFF_UP) != 0) {
+ if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
+ if (((ifp->if_flags ^ sc->sc_if_flags)
+ & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
+ txp_set_filter(sc);
+ } else {
+ if ((sc->sc_flags & TXP_FLAG_DETACH) == 0)
+ txp_init_locked(sc);
+ }
} else {
- if (ifp->if_drv_flags & IFF_DRV_RUNNING)
+ if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
txp_stop(sc);
}
+ sc->sc_if_flags = ifp->if_flags;
TXP_UNLOCK(sc);
break;
case SIOCADDMULTI:
@@ -1064,9 +1719,42 @@ txp_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
* filter accordingly.
*/
TXP_LOCK(sc);
- txp_set_filter(sc);
+ if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
+ txp_set_filter(sc);
+ TXP_UNLOCK(sc);
+ break;
+ case SIOCSIFCAP:
+ TXP_LOCK(sc);
+ capenable = ifp->if_capenable;
+ mask = ifr->ifr_reqcap ^ ifp->if_capenable;
+ if ((mask & IFCAP_TXCSUM) != 0 &&
+ (ifp->if_capabilities & IFCAP_TXCSUM) != 0) {
+ ifp->if_capenable ^= IFCAP_TXCSUM;
+ if ((ifp->if_capenable & IFCAP_TXCSUM) != 0)
+ ifp->if_hwassist |= TXP_CSUM_FEATURES;
+ else
+ ifp->if_hwassist &= ~TXP_CSUM_FEATURES;
+ }
+ if ((mask & IFCAP_RXCSUM) != 0 &&
+ (ifp->if_capabilities & IFCAP_RXCSUM) != 0)
+ ifp->if_capenable ^= IFCAP_RXCSUM;
+ if ((mask & IFCAP_WOL_MAGIC) != 0 &&
+ (ifp->if_capabilities & IFCAP_WOL_MAGIC) != 0)
+ ifp->if_capenable ^= IFCAP_WOL_MAGIC;
+ if ((mask & IFCAP_VLAN_HWTAGGING) != 0 &&
+ (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) != 0)
+ ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
+ if ((mask & IFCAP_VLAN_HWCSUM) != 0 &&
+ (ifp->if_capabilities & IFCAP_VLAN_HWCSUM) != 0)
+ ifp->if_capenable ^= IFCAP_VLAN_HWCSUM;
+ if ((ifp->if_capenable & IFCAP_TXCSUM) == 0)
+ ifp->if_capenable &= ~IFCAP_VLAN_HWCSUM;
+ if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) == 0)
+ ifp->if_capenable &= ~IFCAP_VLAN_HWCSUM;
+ if (capenable != ifp->if_capenable)
+ txp_set_capabilities(sc);
TXP_UNLOCK(sc);
- error = 0;
+ VLAN_CAPABILITIES(ifp);
break;
case SIOCGIFMEDIA:
case SIOCSIFMEDIA:
@@ -1083,29 +1771,54 @@ txp_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
static int
txp_rxring_fill(struct txp_softc *sc)
{
- int i;
- struct ifnet *ifp;
- struct txp_swdesc *sd;
+ struct txp_rxbuf_desc *rbd;
+ struct txp_rx_swdesc *sd;
+ bus_dma_segment_t segs[1];
+ int error, i, nsegs;
TXP_LOCK_ASSERT(sc);
- ifp = sc->sc_ifp;
+
+ bus_dmamap_sync(sc->sc_cdata.txp_rxbufs_tag,
+ sc->sc_cdata.txp_rxbufs_map,
+ BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
for (i = 0; i < RXBUF_ENTRIES; i++) {
- sd = sc->sc_rxbufs[i].rb_sd;
+ sd = TAILQ_FIRST(&sc->sc_free_list);
+ if (sd == NULL)
+ return (ENOMEM);
+ rbd = sc->sc_rxbufs + i;
+ bcopy(&sd, (u_long *)&rbd->rb_vaddrlo, sizeof(sd));
+ KASSERT(sd->sd_mbuf == NULL,
+ ("%s : Rx buffer ring corrupted", __func__));
sd->sd_mbuf = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
if (sd->sd_mbuf == NULL)
- return (ENOBUFS);
-
+ return (ENOMEM);
sd->sd_mbuf->m_pkthdr.len = sd->sd_mbuf->m_len = MCLBYTES;
- sd->sd_mbuf->m_pkthdr.rcvif = ifp;
-
- sc->sc_rxbufs[i].rb_paddrlo =
- vtophys(mtod(sd->sd_mbuf, vm_offset_t));
- sc->sc_rxbufs[i].rb_paddrhi = 0;
+#ifndef __NO_STRICT_ALIGNMENT
+ m_adj(sd->sd_mbuf, TXP_RXBUF_ALIGN);
+#endif
+ if ((error = bus_dmamap_load_mbuf_sg(sc->sc_cdata.txp_rx_tag,
+ sd->sd_map, sd->sd_mbuf, segs, &nsegs, 0)) != 0) {
+ m_freem(sd->sd_mbuf);
+ sd->sd_mbuf = NULL;
+ return (error);
+ }
+ KASSERT(nsegs == 1, ("%s : %d segments returned!", __func__,
+ nsegs));
+ TAILQ_REMOVE(&sc->sc_free_list, sd, sd_next);
+ TAILQ_INSERT_TAIL(&sc->sc_busy_list, sd, sd_next);
+ bus_dmamap_sync(sc->sc_cdata.txp_rx_tag, sd->sd_map,
+ BUS_DMASYNC_PREREAD);
+ rbd->rb_paddrlo = htole32(TXP_ADDR_LO(segs[0].ds_addr));
+ rbd->rb_paddrhi = htole32(TXP_ADDR_HI(segs[0].ds_addr));
}
- sc->sc_hostvar->hv_rx_buf_write_idx = (RXBUF_ENTRIES - 1) *
- sizeof(struct txp_rxbuf_desc);
+ bus_dmamap_sync(sc->sc_cdata.txp_rxbufs_tag,
+ sc->sc_cdata.txp_rxbufs_map,
+ BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
+ sc->sc_rxbufprod = RXBUF_ENTRIES - 1;
+ sc->sc_hostvar->hv_rx_buf_write_idx =
+ htole32(TXP_IDX2OFFSET(RXBUF_ENTRIES - 1));
return (0);
}
@@ -1113,23 +1826,30 @@ txp_rxring_fill(struct txp_softc *sc)
static void
txp_rxring_empty(struct txp_softc *sc)
{
- int i;
- struct txp_swdesc *sd;
+ struct txp_rx_swdesc *sd;
+ int cnt;
TXP_LOCK_ASSERT(sc);
+
if (sc->sc_rxbufs == NULL)
return;
-
- for (i = 0; i < RXBUF_ENTRIES; i++) {
- if (&sc->sc_rxbufs[i] == NULL)
- continue;
- sd = sc->sc_rxbufs[i].rb_sd;
- if (sd == NULL)
- continue;
- if (sd->sd_mbuf != NULL) {
- m_freem(sd->sd_mbuf);
- sd->sd_mbuf = NULL;
- }
+ bus_dmamap_sync(sc->sc_cdata.txp_hostvar_tag,
+ sc->sc_cdata.txp_hostvar_map,
+ BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
+
+ /* Release allocated Rx buffers. */
+ cnt = 0;
+ while ((sd = TAILQ_FIRST(&sc->sc_busy_list)) != NULL) {
+ TAILQ_REMOVE(&sc->sc_busy_list, sd, sd_next);
+ KASSERT(sd->sd_mbuf != NULL,
+ ("%s : Rx buffer ring corrupted", __func__));
+ bus_dmamap_sync(sc->sc_cdata.txp_rx_tag, sd->sd_map,
+ BUS_DMASYNC_POSTREAD);
+ bus_dmamap_unload(sc->sc_cdata.txp_rx_tag, sd->sd_map);
+ m_freem(sd->sd_mbuf);
+ sd->sd_mbuf = NULL;
+ TAILQ_INSERT_TAIL(&sc->sc_free_list, sd, sd_next);
+ cnt++;
}
}
@@ -1148,85 +1868,157 @@ static void
txp_init_locked(struct txp_softc *sc)
{
struct ifnet *ifp;
+ uint8_t *eaddr;
uint16_t p1;
uint32_t p2;
+ int error;
TXP_LOCK_ASSERT(sc);
ifp = sc->sc_ifp;
- if (ifp->if_drv_flags & IFF_DRV_RUNNING)
+ if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
return;
- txp_stop(sc);
+ /* Initialize ring structure. */
+ txp_init_rings(sc);
+ /* Wakeup controller. */
+ WRITE_REG(sc, TXP_H2A_0, TXP_BOOTCMD_WAKEUP);
+ TXP_BARRIER(sc, TXP_H2A_0, 4, BUS_SPACE_BARRIER_WRITE);
+ /*
+ * It seems that earlier NV image can go back to online from
+ * wakeup command but newer ones require controller reset.
+ * So jut reset controller again.
+ */
+ if (txp_reset(sc) != 0)
+ goto init_fail;
+ /* Download firmware. */
+ error = txp_download_fw(sc);
+ if (error != 0) {
+ device_printf(sc->sc_dev, "could not download firmware.\n");
+ goto init_fail;
+ }
+ bus_dmamap_sync(sc->sc_cdata.txp_hostvar_tag,
+ sc->sc_cdata.txp_hostvar_map,
+ BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
+ if ((error = txp_rxring_fill(sc)) != 0) {
+ device_printf(sc->sc_dev, "no memory for Rx buffers.\n");
+ goto init_fail;
+ }
+ bus_dmamap_sync(sc->sc_cdata.txp_hostvar_tag,
+ sc->sc_cdata.txp_hostvar_map,
+ BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
+ if (txp_boot(sc, STAT_WAITING_FOR_BOOT) != 0) {
+ device_printf(sc->sc_dev, "could not boot firmware.\n");
+ goto init_fail;
+ }
- txp_command(sc, TXP_CMD_MAX_PKT_SIZE_WRITE, TXP_MAX_PKTLEN, 0, 0,
- NULL, NULL, NULL, 1);
+ /*
+ * Quite contrary to Typhoon T2 software functional specification,
+ * it seems that TXP_CMD_RECV_BUFFER_CONTROL command is not
+ * implemented in the firmware. This means driver should have to
+ * handle misaligned frames on alignment architectures. AFAIK this
+ * is the only controller manufactured by 3Com that has this stupid
+ * bug. 3Com should fix this.
+ */
+ if (txp_command(sc, TXP_CMD_MAX_PKT_SIZE_WRITE, TXP_MAX_PKTLEN, 0, 0,
+ NULL, NULL, NULL, TXP_CMD_NOWAIT) != 0)
+ goto init_fail;
+ /* Undocumented command(interrupt coalescing disable?) - From Linux. */
+ if (txp_command(sc, TXP_CMD_FILTER_DEFINE, 0, 0, 0, NULL, NULL, NULL,
+ TXP_CMD_NOWAIT) != 0)
+ goto init_fail;
/* Set station address. */
- ((uint8_t *)&p1)[1] = IF_LLADDR(sc->sc_ifp)[0];
- ((uint8_t *)&p1)[0] = IF_LLADDR(sc->sc_ifp)[1];
- ((uint8_t *)&p2)[3] = IF_LLADDR(sc->sc_ifp)[2];
- ((uint8_t *)&p2)[2] = IF_LLADDR(sc->sc_ifp)[3];
- ((uint8_t *)&p2)[1] = IF_LLADDR(sc->sc_ifp)[4];
- ((uint8_t *)&p2)[0] = IF_LLADDR(sc->sc_ifp)[5];
- txp_command(sc, TXP_CMD_STATION_ADDRESS_WRITE, p1, p2, 0,
- NULL, NULL, NULL, 1);
+ eaddr = IF_LLADDR(sc->sc_ifp);
+ p1 = 0;
+ ((uint8_t *)&p1)[1] = eaddr[0];
+ ((uint8_t *)&p1)[0] = eaddr[1];
+ p1 = le16toh(p1);
+ ((uint8_t *)&p2)[3] = eaddr[2];
+ ((uint8_t *)&p2)[2] = eaddr[3];
+ ((uint8_t *)&p2)[1] = eaddr[4];
+ ((uint8_t *)&p2)[0] = eaddr[5];
+ p2 = le32toh(p2);
+ if (txp_command(sc, TXP_CMD_STATION_ADDRESS_WRITE, p1, p2, 0,
+ NULL, NULL, NULL, TXP_CMD_NOWAIT) != 0)
+ goto init_fail;
txp_set_filter(sc);
+ txp_set_capabilities(sc);
- txp_rxring_fill(sc);
-
- txp_command(sc, TXP_CMD_TX_ENABLE, 0, 0, 0, NULL, NULL, NULL, 1);
- txp_command(sc, TXP_CMD_RX_ENABLE, 0, 0, 0, NULL, NULL, NULL, 1);
-
- WRITE_REG(sc, TXP_IER, TXP_INT_RESERVED | TXP_INT_SELF |
- TXP_INT_A2H_7 | TXP_INT_A2H_6 | TXP_INT_A2H_5 | TXP_INT_A2H_4 |
- TXP_INT_A2H_2 | TXP_INT_A2H_1 | TXP_INT_A2H_0 |
- TXP_INT_DMA3 | TXP_INT_DMA2 | TXP_INT_DMA1 | TXP_INT_DMA0 |
- TXP_INT_PCI_TABORT | TXP_INT_PCI_MABORT | TXP_INT_LATCH);
- WRITE_REG(sc, TXP_IMR, TXP_INT_A2H_3);
+ if (txp_command(sc, TXP_CMD_CLEAR_STATISTICS, 0, 0, 0,
+ NULL, NULL, NULL, TXP_CMD_NOWAIT))
+ goto init_fail;
+ if (txp_command(sc, TXP_CMD_XCVR_SELECT, sc->sc_xcvr, 0, 0,
+ NULL, NULL, NULL, TXP_CMD_NOWAIT) != 0)
+ goto init_fail;
+ if (txp_command(sc, TXP_CMD_TX_ENABLE, 0, 0, 0, NULL, NULL, NULL,
+ TXP_CMD_NOWAIT) != 0)
+ goto init_fail;
+ if (txp_command(sc, TXP_CMD_RX_ENABLE, 0, 0, 0, NULL, NULL, NULL,
+ TXP_CMD_NOWAIT) != 0)
+ goto init_fail;
+
+ /* Ack all pending interrupts and enable interrupts. */
+ WRITE_REG(sc, TXP_ISR, TXP_INTR_ALL);
+ WRITE_REG(sc, TXP_IER, TXP_INTRS);
+ WRITE_REG(sc, TXP_IMR, TXP_INTR_NONE);
ifp->if_drv_flags |= IFF_DRV_RUNNING;
ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
- ifp->if_timer = 0;
callout_reset(&sc->sc_tick, hz, txp_tick, sc);
+ return;
+
+init_fail:
+ txp_rxring_empty(sc);
+ txp_init_rings(sc);
+ txp_reset(sc);
+ WRITE_REG(sc, TXP_IMR, TXP_INTR_ALL);
}
static void
txp_tick(void *vsc)
{
- struct txp_softc *sc = vsc;
- struct ifnet *ifp = sc->sc_ifp;
- struct txp_rsp_desc *rsp = NULL;
+ struct txp_softc *sc;
+ struct ifnet *ifp;
+ struct txp_rsp_desc *rsp;
struct txp_ext_desc *ext;
+ int link;
+ sc = vsc;
TXP_LOCK_ASSERT(sc);
+ bus_dmamap_sync(sc->sc_cdata.txp_hostvar_tag,
+ sc->sc_cdata.txp_hostvar_map,
+ BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
txp_rxbuf_reclaim(sc);
+ bus_dmamap_sync(sc->sc_cdata.txp_hostvar_tag,
+ sc->sc_cdata.txp_hostvar_map,
+ BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
- if (txp_command2(sc, TXP_CMD_READ_STATISTICS, 0, 0, 0, NULL, 0,
- &rsp, 1))
+ ifp = sc->sc_ifp;
+ rsp = NULL;
+
+ link = sc->sc_flags & TXP_FLAG_LINK;
+ if (txp_ext_command(sc, TXP_CMD_READ_STATISTICS, 0, 0, 0, NULL, 0,
+ &rsp, TXP_CMD_WAIT))
goto out;
if (rsp->rsp_numdesc != 6)
goto out;
- if (txp_command(sc, TXP_CMD_CLEAR_STATISTICS, 0, 0, 0,
- NULL, NULL, NULL, 1))
- goto out;
- ext = (struct txp_ext_desc *)(rsp + 1);
-
- ifp->if_ierrors += ext[3].ext_2 + ext[3].ext_3 + ext[3].ext_4 +
- ext[4].ext_1 + ext[4].ext_4;
- ifp->if_oerrors += ext[0].ext_1 + ext[1].ext_1 + ext[1].ext_4 +
- ext[2].ext_1;
- ifp->if_collisions += ext[0].ext_2 + ext[0].ext_3 + ext[1].ext_2 +
- ext[1].ext_3;
- ifp->if_opackets += rsp->rsp_par2;
- ifp->if_ipackets += ext[2].ext_3;
+ txp_stats_update(sc, rsp);
+ if (link == 0 && (sc->sc_flags & TXP_FLAG_LINK) != 0) {
+ ext = (struct txp_ext_desc *)(rsp + 1);
+ /* Update baudrate with resolved speed. */
+ if ((ext[5].ext_2 & 0x02) != 0)
+ ifp->if_baudrate = IF_Mbps(100);
+ else
+ ifp->if_baudrate = IF_Mbps(10);
+ }
out:
if (rsp != NULL)
free(rsp, M_DEVBUF);
-
+ txp_watchdog(sc);
callout_reset(&sc->sc_tick, hz, txp_tick, sc);
}
@@ -1244,105 +2036,151 @@ txp_start(struct ifnet *ifp)
static void
txp_start_locked(struct ifnet *ifp)
{
- struct txp_softc *sc = ifp->if_softc;
- struct txp_tx_ring *r = &sc->sc_txhir;
- struct txp_tx_desc *txd;
- struct txp_frag_desc *fxd;
- struct mbuf *m, *m0;
- struct txp_swdesc *sd;
- uint32_t firstprod, firstcnt, prod, cnt;
+ struct txp_softc *sc;
+ struct mbuf *m_head;
+ int enq;
+ sc = ifp->if_softc;
TXP_LOCK_ASSERT(sc);
+
if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
- IFF_DRV_RUNNING)
+ IFF_DRV_RUNNING || (sc->sc_flags & TXP_FLAG_LINK) == 0)
return;
- prod = r->r_prod;
- cnt = r->r_cnt;
-
- while (1) {
- IF_DEQUEUE(&ifp->if_snd, m);
- if (m == NULL)
+ for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd); ) {
+ IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
+ if (m_head == NULL)
break;
+ /*
+ * Pack the data into the transmit ring. If we
+ * don't have room, set the OACTIVE flag and wait
+ * for the NIC to drain the ring.
+ * ATM only Hi-ring is used.
+ */
+ if (txp_encap(sc, &sc->sc_txhir, &m_head)) {
+ if (m_head == NULL)
+ break;
+ IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
+ ifp->if_drv_flags |= IFF_DRV_OACTIVE;
+ break;
+ }
- firstprod = prod;
- firstcnt = cnt;
+ /*
+ * If there's a BPF listener, bounce a copy of this frame
+ * to him.
+ */
+ ETHER_BPF_MTAP(ifp, m_head);
- sd = sc->sc_txd + prod;
- sd->sd_mbuf = m;
+ /* Send queued frame. */
+ WRITE_REG(sc, sc->sc_txhir.r_reg,
+ TXP_IDX2OFFSET(sc->sc_txhir.r_prod));
+ }
- if ((TX_ENTRIES - cnt) < 4)
- goto oactive;
+ if (enq > 0) {
+ /* Set a timeout in case the chip goes out to lunch. */
+ sc->sc_watchdog_timer = TXP_TX_TIMEOUT;
+ }
+}
- txd = r->r_desc + prod;
+static int
+txp_encap(struct txp_softc *sc, struct txp_tx_ring *r, struct mbuf **m_head)
+{
+ struct txp_tx_desc *first_txd;
+ struct txp_frag_desc *fxd;
+ struct txp_swdesc *sd;
+ struct mbuf *m;
+ bus_dma_segment_t txsegs[TXP_MAXTXSEGS];
+ int error, i, nsegs;
- txd->tx_flags = TX_FLAGS_TYPE_DATA;
- txd->tx_numdesc = 0;
- txd->tx_addrlo = 0;
- txd->tx_addrhi = 0;
- txd->tx_totlen = 0;
- txd->tx_pflags = 0;
+ TXP_LOCK_ASSERT(sc);
- if (++prod == TX_ENTRIES)
- prod = 0;
+ M_ASSERTPKTHDR((*m_head));
- if (++cnt >= (TX_ENTRIES - 4))
- goto oactive;
+ m = *m_head;
+ first_txd = r->r_desc + r->r_prod;
+ sd = sc->sc_txd + r->r_prod;
- if (m->m_flags & M_VLANTAG) {
- txd->tx_pflags = TX_PFLAGS_VLAN |
- (htons(m->m_pkthdr.ether_vtag) << TX_PFLAGS_VLANTAG_S);
+ error = bus_dmamap_load_mbuf_sg(sc->sc_cdata.txp_tx_tag, sd->sd_map,
+ *m_head, txsegs, &nsegs, 0);
+ if (error == EFBIG) {
+ m = m_collapse(*m_head, M_DONTWAIT, TXP_MAXTXSEGS);
+ if (m == NULL) {
+ m_freem(*m_head);
+ *m_head = NULL;
+ return (ENOMEM);
}
-
- if (m->m_pkthdr.csum_flags & CSUM_IP)
- txd->tx_pflags |= TX_PFLAGS_IPCKSUM;
-
-#if 0
- if (m->m_pkthdr.csum_flags & CSUM_TCP)
- txd->tx_pflags |= TX_PFLAGS_TCPCKSUM;
- if (m->m_pkthdr.csum_flags & CSUM_UDP)
- txd->tx_pflags |= TX_PFLAGS_UDPCKSUM;
-#endif
-
- fxd = (struct txp_frag_desc *)(r->r_desc + prod);
- for (m0 = m; m0 != NULL; m0 = m0->m_next) {
- if (m0->m_len == 0)
- continue;
- if (++cnt >= (TX_ENTRIES - 4))
- goto oactive;
-
- txd->tx_numdesc++;
-
- fxd->frag_flags = FRAG_FLAGS_TYPE_FRAG;
- fxd->frag_rsvd1 = 0;
- fxd->frag_len = m0->m_len;
- fxd->frag_addrlo = vtophys(mtod(m0, vm_offset_t));
- fxd->frag_addrhi = 0;
- fxd->frag_rsvd2 = 0;
-
- if (++prod == TX_ENTRIES) {
- fxd = (struct txp_frag_desc *)r->r_desc;
- prod = 0;
- } else
- fxd++;
-
+ *m_head = m;
+ error = bus_dmamap_load_mbuf_sg(sc->sc_cdata.txp_tx_tag,
+ sd->sd_map, *m_head, txsegs, &nsegs, 0);
+ if (error != 0) {
+ m_freem(*m_head);
+ *m_head = NULL;
+ return (error);
}
+ } else if (error != 0)
+ return (error);
+ if (nsegs == 0) {
+ m_freem(*m_head);
+ *m_head = NULL;
+ return (EIO);
+ }
- ifp->if_timer = 5;
+ /* Check descriptor overrun. */
+ if (r->r_cnt + nsegs >= TX_ENTRIES - TXP_TXD_RESERVED) {
+ bus_dmamap_unload(sc->sc_cdata.txp_tx_tag, sd->sd_map);
+ return (ENOBUFS);
+ }
+ bus_dmamap_sync(sc->sc_cdata.txp_tx_tag, sd->sd_map,
+ BUS_DMASYNC_PREWRITE);
+ sd->sd_mbuf = m;
+
+ first_txd->tx_flags = TX_FLAGS_TYPE_DATA;
+ first_txd->tx_numdesc = 0;
+ first_txd->tx_addrlo = 0;
+ first_txd->tx_addrhi = 0;
+ first_txd->tx_totlen = 0;
+ first_txd->tx_pflags = 0;
+ r->r_cnt++;
+ TXP_DESC_INC(r->r_prod, TX_ENTRIES);
+
+ /* Configure Tx IP/TCP/UDP checksum offload. */
+ if ((m->m_pkthdr.csum_flags & CSUM_IP) != 0)
+ first_txd->tx_pflags |= htole32(TX_PFLAGS_IPCKSUM);
+#ifdef notyet
+ /* XXX firmware bug. */
+ if ((m->m_pkthdr.csum_flags & CSUM_TCP) != 0)
+ first_txd->tx_pflags |= htole32(TX_PFLAGS_TCPCKSUM);
+ if ((m->m_pkthdr.csum_flags & CSUM_UDP) != 0)
+ first_txd->tx_pflags |= htole32(TX_PFLAGS_UDPCKSUM);
+#endif
- ETHER_BPF_MTAP(ifp, m);
- WRITE_REG(sc, r->r_reg, TXP_IDX2OFFSET(prod));
+ /* Configure VLAN hardware tag insertion. */
+ if ((m->m_flags & M_VLANTAG) != 0)
+ first_txd->tx_pflags |=
+ htole32(TX_PFLAGS_VLAN | TX_PFLAGS_PRIO |
+ (bswap16(m->m_pkthdr.ether_vtag) << TX_PFLAGS_VLANTAG_S));
+
+ for (i = 0; i < nsegs; i++) {
+ fxd = (struct txp_frag_desc *)(r->r_desc + r->r_prod);
+ fxd->frag_flags = FRAG_FLAGS_TYPE_FRAG | TX_FLAGS_VALID;
+ fxd->frag_rsvd1 = 0;
+ fxd->frag_len = htole16(txsegs[i].ds_len);
+ fxd->frag_addrhi = htole32(TXP_ADDR_HI(txsegs[i].ds_addr));
+ fxd->frag_addrlo = htole32(TXP_ADDR_LO(txsegs[i].ds_addr));
+ fxd->frag_rsvd2 = 0;
+ first_txd->tx_numdesc++;
+ r->r_cnt++;
+ TXP_DESC_INC(r->r_prod, TX_ENTRIES);
}
- r->r_prod = prod;
- r->r_cnt = cnt;
- return;
+ /* Lastly set valid flag. */
+ first_txd->tx_flags |= TX_FLAGS_VALID;
+
+ /* Sync descriptors. */
+ bus_dmamap_sync(r->r_tag, r->r_map,
+ BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
-oactive:
- ifp->if_drv_flags |= IFF_DRV_OACTIVE;
- r->r_prod = firstprod;
- r->r_cnt = firstcnt;
- IF_PREPEND(&ifp->if_snd, m);
+ return (0);
}
/*
@@ -1352,52 +2190,64 @@ static int
txp_command(struct txp_softc *sc, uint16_t id, uint16_t in1, uint32_t in2,
uint32_t in3, uint16_t *out1, uint32_t *out2, uint32_t *out3, int wait)
{
- struct txp_rsp_desc *rsp = NULL;
+ struct txp_rsp_desc *rsp;
- if (txp_command2(sc, id, in1, in2, in3, NULL, 0, &rsp, wait))
+ rsp = NULL;
+ if (txp_ext_command(sc, id, in1, in2, in3, NULL, 0, &rsp, wait) != 0) {
+ device_printf(sc->sc_dev, "command 0x%02x failed\n", id);
return (-1);
+ }
- if (!wait)
+ if (wait == TXP_CMD_NOWAIT)
return (0);
+ KASSERT(rsp != NULL, ("rsp is NULL!\n"));
if (out1 != NULL)
- *out1 = rsp->rsp_par1;
+ *out1 = le16toh(rsp->rsp_par1);
if (out2 != NULL)
- *out2 = rsp->rsp_par2;
+ *out2 = le32toh(rsp->rsp_par2);
if (out3 != NULL)
- *out3 = rsp->rsp_par3;
+ *out3 = le32toh(rsp->rsp_par3);
free(rsp, M_DEVBUF);
return (0);
}
static int
-txp_command2(struct txp_softc *sc, uint16_t id, uint16_t in1, uint32_t in2,
+txp_ext_command(struct txp_softc *sc, uint16_t id, uint16_t in1, uint32_t in2,
uint32_t in3, struct txp_ext_desc *in_extp, uint8_t in_extn,
struct txp_rsp_desc **rspp, int wait)
{
- struct txp_hostvar *hv = sc->sc_hostvar;
+ struct txp_hostvar *hv;
struct txp_cmd_desc *cmd;
struct txp_ext_desc *ext;
uint32_t idx, i;
uint16_t seq;
+ int error;
+ error = 0;
+ hv = sc->sc_hostvar;
if (txp_cmd_desc_numfree(sc) < (in_extn + 1)) {
- device_printf(sc->sc_dev, "no free cmd descriptors\n");
- return (-1);
+ device_printf(sc->sc_dev,
+ "%s : out of free cmd descriptors for command 0x%02x\n",
+ __func__, id);
+ return (ENOBUFS);
}
+ bus_dmamap_sync(sc->sc_cdata.txp_cmdring_tag,
+ sc->sc_cdata.txp_cmdring_map, BUS_DMASYNC_POSTWRITE);
idx = sc->sc_cmdring.lastwrite;
cmd = (struct txp_cmd_desc *)(((uint8_t *)sc->sc_cmdring.base) + idx);
bzero(cmd, sizeof(*cmd));
cmd->cmd_numdesc = in_extn;
- cmd->cmd_seq = seq = sc->sc_seq++;
- cmd->cmd_id = id;
- cmd->cmd_par1 = in1;
- cmd->cmd_par2 = in2;
- cmd->cmd_par3 = in3;
+ seq = sc->sc_seq++;
+ cmd->cmd_seq = htole16(seq);
+ cmd->cmd_id = htole16(id);
+ cmd->cmd_par1 = htole16(in1);
+ cmd->cmd_par2 = htole32(in2);
+ cmd->cmd_par3 = htole32(in3);
cmd->cmd_flags = CMD_FLAGS_TYPE_CMD |
- (wait ? CMD_FLAGS_RESP : 0) | CMD_FLAGS_VALID;
+ (wait == TXP_CMD_WAIT ? CMD_FLAGS_RESP : 0) | CMD_FLAGS_VALID;
idx += sizeof(struct txp_cmd_desc);
if (idx == sc->sc_cmdring.size)
@@ -1413,73 +2263,122 @@ txp_command2(struct txp_softc *sc, uint16_t id, uint16_t in1, uint32_t in2,
}
sc->sc_cmdring.lastwrite = idx;
-
+ bus_dmamap_sync(sc->sc_cdata.txp_cmdring_tag,
+ sc->sc_cdata.txp_cmdring_map, BUS_DMASYNC_PREWRITE);
+ bus_dmamap_sync(sc->sc_cdata.txp_hostvar_tag,
+ sc->sc_cdata.txp_hostvar_map, BUS_DMASYNC_PREREAD |
+ BUS_DMASYNC_PREWRITE);
WRITE_REG(sc, TXP_H2A_2, sc->sc_cmdring.lastwrite);
+ TXP_BARRIER(sc, TXP_H2A_2, 4, BUS_SPACE_BARRIER_WRITE);
- if (!wait)
+ if (wait == TXP_CMD_NOWAIT)
return (0);
- for (i = 0; i < 10000; i++) {
- idx = hv->hv_resp_read_idx;
- if (idx != hv->hv_resp_write_idx) {
- *rspp = NULL;
- if (txp_response(sc, idx, id, seq, rspp))
- return (-1);
- if (*rspp != NULL)
+ for (i = 0; i < TXP_TIMEOUT; i++) {
+ bus_dmamap_sync(sc->sc_cdata.txp_hostvar_tag,
+ sc->sc_cdata.txp_hostvar_map, BUS_DMASYNC_POSTREAD |
+ BUS_DMASYNC_POSTWRITE);
+ if (le32toh(hv->hv_resp_read_idx) !=
+ le32toh(hv->hv_resp_write_idx)) {
+ error = txp_response(sc, id, seq, rspp);
+ bus_dmamap_sync(sc->sc_cdata.txp_hostvar_tag,
+ sc->sc_cdata.txp_hostvar_map,
+ BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
+ if (error != 0)
+ return (error);
+ if (*rspp != NULL)
break;
}
DELAY(50);
}
- if (i == 1000 || (*rspp) == NULL) {
- device_printf(sc->sc_dev, "0x%x command failed\n", id);
- return (-1);
+ if (i == TXP_TIMEOUT) {
+ device_printf(sc->sc_dev, "command 0x%02x timedout\n", id);
+ error = ETIMEDOUT;
}
- return (0);
+ return (error);
}
static int
-txp_response(struct txp_softc *sc, uint32_t ridx, uint16_t id, uint16_t seq,
+txp_response(struct txp_softc *sc, uint16_t id, uint16_t seq,
struct txp_rsp_desc **rspp)
{
- struct txp_hostvar *hv = sc->sc_hostvar;
+ struct txp_hostvar *hv;
struct txp_rsp_desc *rsp;
+ uint32_t ridx;
- while (ridx != hv->hv_resp_write_idx) {
+ bus_dmamap_sync(sc->sc_cdata.txp_rspring_tag,
+ sc->sc_cdata.txp_rspring_map, BUS_DMASYNC_POSTREAD);
+ hv = sc->sc_hostvar;
+ ridx = le32toh(hv->hv_resp_read_idx);
+ while (ridx != le32toh(hv->hv_resp_write_idx)) {
rsp = (struct txp_rsp_desc *)(((uint8_t *)sc->sc_rspring.base) + ridx);
- if (id == rsp->rsp_id && rsp->rsp_seq == seq) {
+ if (id == le16toh(rsp->rsp_id) &&
+ le16toh(rsp->rsp_seq) == seq) {
*rspp = (struct txp_rsp_desc *)malloc(
sizeof(struct txp_rsp_desc) * (rsp->rsp_numdesc + 1),
M_DEVBUF, M_NOWAIT);
- if ((*rspp) == NULL)
- return (-1);
+ if (*rspp == NULL) {
+ device_printf(sc->sc_dev,"%s : command 0x%02x "
+ "memory allocation failure\n",
+ __func__, id);
+ return (ENOMEM);
+ }
txp_rsp_fixup(sc, rsp, *rspp);
return (0);
}
- if (rsp->rsp_flags & RSP_FLAGS_ERROR) {
- device_printf(sc->sc_dev, "response error!\n");
+ if ((rsp->rsp_flags & RSP_FLAGS_ERROR) != 0) {
+ device_printf(sc->sc_dev,
+ "%s : command 0x%02x response error!\n", __func__,
+ le16toh(rsp->rsp_id));
txp_rsp_fixup(sc, rsp, NULL);
- ridx = hv->hv_resp_read_idx;
+ ridx = le32toh(hv->hv_resp_read_idx);
continue;
}
- switch (rsp->rsp_id) {
+ /*
+ * The following unsolicited responses are handled during
+ * processing of TXP_CMD_READ_STATISTICS which requires
+ * response. Driver abuses the command to detect media
+ * status change.
+ * TXP_CMD_FILTER_DEFINE is not an unsolicited response
+ * but we don't process response ring in interrupt handler
+ * so we have to ignore this command here, otherwise
+ * unknown command message would be printed.
+ */
+ switch (le16toh(rsp->rsp_id)) {
case TXP_CMD_CYCLE_STATISTICS:
+ case TXP_CMD_FILTER_DEFINE:
+ break;
case TXP_CMD_MEDIA_STATUS_READ:
+ if ((le16toh(rsp->rsp_par1) & 0x0800) == 0) {
+ sc->sc_flags |= TXP_FLAG_LINK;
+ if_link_state_change(sc->sc_ifp,
+ LINK_STATE_UP);
+ } else {
+ sc->sc_flags &= ~TXP_FLAG_LINK;
+ if_link_state_change(sc->sc_ifp,
+ LINK_STATE_DOWN);
+ }
break;
case TXP_CMD_HELLO_RESPONSE:
- device_printf(sc->sc_dev, "hello\n");
+ /*
+ * Driver should repsond to hello message but
+ * TXP_CMD_READ_STATISTICS is issued for every
+ * hz, therefore there is no need to send an
+ * explicit command here.
+ */
+ device_printf(sc->sc_dev, "%s : hello\n", __func__);
break;
default:
- device_printf(sc->sc_dev, "unknown id(0x%x)\n",
- rsp->rsp_id);
+ device_printf(sc->sc_dev,
+ "%s : unknown command 0x%02x\n", __func__,
+ le16toh(rsp->rsp_id));
}
-
txp_rsp_fixup(sc, rsp, NULL);
- ridx = hv->hv_resp_read_idx;
- hv->hv_resp_read_idx = ridx;
+ ridx = le32toh(hv->hv_resp_read_idx);
}
return (0);
@@ -1489,11 +2388,13 @@ static void
txp_rsp_fixup(struct txp_softc *sc, struct txp_rsp_desc *rsp,
struct txp_rsp_desc *dst)
{
- struct txp_rsp_desc *src = rsp;
- struct txp_hostvar *hv = sc->sc_hostvar;
+ struct txp_rsp_desc *src;
+ struct txp_hostvar *hv;
uint32_t i, ridx;
- ridx = hv->hv_resp_read_idx;
+ src = rsp;
+ hv = sc->sc_hostvar;
+ ridx = le32toh(hv->hv_resp_read_idx);
for (i = 0; i < rsp->rsp_numdesc + 1; i++) {
if (dst != NULL)
@@ -1504,28 +2405,33 @@ txp_rsp_fixup(struct txp_softc *sc, struct txp_rsp_desc *rsp,
ridx = 0;
} else
src++;
- sc->sc_rspring.lastwrite = hv->hv_resp_read_idx = ridx;
+ sc->sc_rspring.lastwrite = ridx;
}
- hv->hv_resp_read_idx = ridx;
+ hv->hv_resp_read_idx = htole32(ridx);
}
static int
txp_cmd_desc_numfree(struct txp_softc *sc)
{
- struct txp_hostvar *hv = sc->sc_hostvar;
- struct txp_boot_record *br = sc->sc_boot;
+ struct txp_hostvar *hv;
+ struct txp_boot_record *br;
uint32_t widx, ridx, nfree;
+ bus_dmamap_sync(sc->sc_cdata.txp_hostvar_tag,
+ sc->sc_cdata.txp_hostvar_map,
+ BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
+ hv = sc->sc_hostvar;
+ br = sc->sc_boot;
widx = sc->sc_cmdring.lastwrite;
- ridx = hv->hv_cmd_read_idx;
+ ridx = le32toh(hv->hv_cmd_read_idx);
if (widx == ridx) {
/* Ring is completely free */
- nfree = br->br_cmd_siz - sizeof(struct txp_cmd_desc);
+ nfree = le32toh(br->br_cmd_siz) - sizeof(struct txp_cmd_desc);
} else {
if (widx > ridx)
- nfree = br->br_cmd_siz -
+ nfree = le32toh(br->br_cmd_siz) -
(widx - ridx + sizeof(struct txp_cmd_desc));
else
nfree = ridx - widx - sizeof(struct txp_cmd_desc);
@@ -1534,6 +2440,32 @@ txp_cmd_desc_numfree(struct txp_softc *sc)
return (nfree / sizeof(struct txp_cmd_desc));
}
+static int
+txp_sleep(struct txp_softc *sc, int capenable)
+{
+ uint16_t events;
+ int error;
+
+ events = 0;
+ if ((capenable & IFCAP_WOL_MAGIC) != 0)
+ events |= 0x01;
+ error = txp_command(sc, TXP_CMD_ENABLE_WAKEUP_EVENTS, events, 0, 0,
+ NULL, NULL, NULL, TXP_CMD_NOWAIT);
+ if (error == 0) {
+ /* Goto sleep. */
+ error = txp_command(sc, TXP_CMD_GOTO_SLEEP, 0, 0, 0, NULL,
+ NULL, NULL, TXP_CMD_NOWAIT);
+ if (error == 0) {
+ error = txp_wait(sc, STAT_SLEEPING);
+ if (error != 0)
+ device_printf(sc->sc_dev,
+ "unable to enter into sleep\n");
+ }
+ }
+
+ return (error);
+}
+
static void
txp_stop(struct txp_softc *sc)
{
@@ -1542,20 +2474,62 @@ txp_stop(struct txp_softc *sc)
TXP_LOCK_ASSERT(sc);
ifp = sc->sc_ifp;
+ if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
+ return;
+
+ WRITE_REG(sc, TXP_IER, TXP_INTR_NONE);
+ WRITE_REG(sc, TXP_ISR, TXP_INTR_ALL);
+
ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
+ sc->sc_flags &= ~TXP_FLAG_LINK;
callout_stop(&sc->sc_tick);
- txp_command(sc, TXP_CMD_TX_DISABLE, 0, 0, 0, NULL, NULL, NULL, 1);
- txp_command(sc, TXP_CMD_RX_DISABLE, 0, 0, 0, NULL, NULL, NULL, 1);
-
+ txp_command(sc, TXP_CMD_TX_DISABLE, 0, 0, 0, NULL, NULL, NULL,
+ TXP_CMD_NOWAIT);
+ txp_command(sc, TXP_CMD_RX_DISABLE, 0, 0, 0, NULL, NULL, NULL,
+ TXP_CMD_NOWAIT);
+ /* Save statistics for later use. */
+ txp_stats_save(sc);
+ /* Halt controller. */
+ txp_command(sc, TXP_CMD_HALT, 0, 0, 0, NULL, NULL, NULL,
+ TXP_CMD_NOWAIT);
+
+ if (txp_wait(sc, STAT_HALTED) != 0)
+ device_printf(sc->sc_dev, "controller halt timedout!\n");
+ /* Reclaim Tx/Rx buffers. */
+ if (sc->sc_txhir.r_cnt && (sc->sc_txhir.r_cons !=
+ TXP_OFFSET2IDX(le32toh(*(sc->sc_txhir.r_off)))))
+ txp_tx_reclaim(sc, &sc->sc_txhir);
+ if (sc->sc_txlor.r_cnt && (sc->sc_txlor.r_cons !=
+ TXP_OFFSET2IDX(le32toh(*(sc->sc_txlor.r_off)))))
+ txp_tx_reclaim(sc, &sc->sc_txlor);
txp_rxring_empty(sc);
+
+ txp_init_rings(sc);
+ /* Reset controller and make it reload sleep image. */
+ txp_reset(sc);
+ /* Let controller boot from sleep image. */
+ if (txp_boot(sc, STAT_WAITING_FOR_HOST_REQUEST) != 0)
+ device_printf(sc->sc_dev, "could not boot sleep image\n");
+ txp_sleep(sc, 0);
}
static void
-txp_watchdog(struct ifnet *ifp)
+txp_watchdog(struct txp_softc *sc)
{
+ struct ifnet *ifp;
+
+ TXP_LOCK_ASSERT(sc);
+
+ if (sc->sc_watchdog_timer == 0 || --sc->sc_watchdog_timer)
+ return;
+ ifp = sc->sc_ifp;
+ if_printf(ifp, "watchdog timeout -- resetting\n");
+ ifp->if_oerrors++;
+ txp_stop(sc);
+ txp_init_locked(sc);
}
static int
@@ -1595,7 +2569,7 @@ txp_ifmedia_upd(struct ifnet *ifp)
}
txp_command(sc, TXP_CMD_XCVR_SELECT, new_xcvr, 0, 0,
- NULL, NULL, NULL, 0);
+ NULL, NULL, NULL, TXP_CMD_NOWAIT);
sc->sc_xcvr = new_xcvr;
TXP_UNLOCK(sc);
@@ -1613,23 +2587,26 @@ txp_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
ifmr->ifm_active = IFM_ETHER;
TXP_LOCK(sc);
+ /* Check whether firmware is running. */
+ if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
+ goto bail;
if (txp_command(sc, TXP_CMD_PHY_MGMT_READ, 0, MII_BMSR, 0,
- &bmsr, NULL, NULL, 1))
+ &bmsr, NULL, NULL, TXP_CMD_WAIT))
goto bail;
if (txp_command(sc, TXP_CMD_PHY_MGMT_READ, 0, MII_BMSR, 0,
- &bmsr, NULL, NULL, 1))
+ &bmsr, NULL, NULL, TXP_CMD_WAIT))
goto bail;
if (txp_command(sc, TXP_CMD_PHY_MGMT_READ, 0, MII_BMCR, 0,
- &bmcr, NULL, NULL, 1))
+ &bmcr, NULL, NULL, TXP_CMD_WAIT))
goto bail;
if (txp_command(sc, TXP_CMD_PHY_MGMT_READ, 0, MII_ANLPAR, 0,
- &anlpar, NULL, NULL, 1))
+ &anlpar, NULL, NULL, TXP_CMD_WAIT))
goto bail;
if (txp_command(sc, TXP_CMD_PHY_MGMT_READ, 0, MII_ANAR, 0,
- &anar, NULL, NULL, 1))
+ &anar, NULL, NULL, TXP_CMD_WAIT))
goto bail;
TXP_UNLOCK(sc);
@@ -1687,32 +2664,37 @@ txp_show_descriptor(void *d)
case CMD_FLAGS_TYPE_CMD:
/* command descriptor */
printf("[cmd flags 0x%x num %d id %d seq %d par1 0x%x par2 0x%x par3 0x%x]\n",
- cmd->cmd_flags, cmd->cmd_numdesc, cmd->cmd_id, cmd->cmd_seq,
- cmd->cmd_par1, cmd->cmd_par2, cmd->cmd_par3);
+ cmd->cmd_flags, cmd->cmd_numdesc, le16toh(cmd->cmd_id),
+ le16toh(cmd->cmd_seq), le16toh(cmd->cmd_par1),
+ le32toh(cmd->cmd_par2), le32toh(cmd->cmd_par3));
break;
case CMD_FLAGS_TYPE_RESP:
/* response descriptor */
printf("[rsp flags 0x%x num %d id %d seq %d par1 0x%x par2 0x%x par3 0x%x]\n",
- rsp->rsp_flags, rsp->rsp_numdesc, rsp->rsp_id, rsp->rsp_seq,
- rsp->rsp_par1, rsp->rsp_par2, rsp->rsp_par3);
+ rsp->rsp_flags, rsp->rsp_numdesc, le16toh(rsp->rsp_id),
+ le16toh(rsp->rsp_seq), le16toh(rsp->rsp_par1),
+ le32toh(rsp->rsp_par2), le32toh(rsp->rsp_par3));
break;
case CMD_FLAGS_TYPE_DATA:
/* data header (assuming tx for now) */
printf("[data flags 0x%x num %d totlen %d addr 0x%x/0x%x pflags 0x%x]",
- txd->tx_flags, txd->tx_numdesc, txd->tx_totlen,
- txd->tx_addrlo, txd->tx_addrhi, txd->tx_pflags);
+ txd->tx_flags, txd->tx_numdesc, le16toh(txd->tx_totlen),
+ le32toh(txd->tx_addrlo), le32toh(txd->tx_addrhi),
+ le32toh(txd->tx_pflags));
break;
case CMD_FLAGS_TYPE_FRAG:
/* fragment descriptor */
printf("[frag flags 0x%x rsvd1 0x%x len %d addr 0x%x/0x%x rsvd2 0x%x]",
- frgd->frag_flags, frgd->frag_rsvd1, frgd->frag_len,
- frgd->frag_addrlo, frgd->frag_addrhi, frgd->frag_rsvd2);
+ frgd->frag_flags, frgd->frag_rsvd1, le16toh(frgd->frag_len),
+ le32toh(frgd->frag_addrlo), le32toh(frgd->frag_addrhi),
+ le32toh(frgd->frag_rsvd2));
break;
default:
printf("[unknown(%x) flags 0x%x num %d id %d seq %d par1 0x%x par2 0x%x par3 0x%x]\n",
cmd->cmd_flags & CMD_FLAGS_TYPE_M,
- cmd->cmd_flags, cmd->cmd_numdesc, cmd->cmd_id, cmd->cmd_seq,
- cmd->cmd_par1, cmd->cmd_par2, cmd->cmd_par3);
+ cmd->cmd_flags, cmd->cmd_numdesc, le16toh(cmd->cmd_id),
+ le16toh(cmd->cmd_seq), le16toh(cmd->cmd_par1),
+ le32toh(cmd->cmd_par2), le32toh(cmd->cmd_par3));
break;
}
}
@@ -1758,77 +2740,244 @@ txp_set_filter(struct txp_softc *sc)
if (mcnt > 0) {
filter |= TXP_RXFILT_HASHMULTI;
txp_command(sc, TXP_CMD_MCAST_HASH_MASK_WRITE, 2, mchash[0],
- mchash[1], NULL, NULL, NULL, 0);
+ mchash[1], NULL, NULL, NULL, TXP_CMD_NOWAIT);
}
setit:
-
txp_command(sc, TXP_CMD_RX_FILTER_WRITE, filter, 0, 0,
- NULL, NULL, NULL, 1);
+ NULL, NULL, NULL, TXP_CMD_NOWAIT);
+}
+
+static int
+txp_set_capabilities(struct txp_softc *sc)
+{
+ struct ifnet *ifp;
+ uint32_t rxcap, txcap;
+
+ TXP_LOCK_ASSERT(sc);
+
+ rxcap = txcap = 0;
+ ifp = sc->sc_ifp;
+ if ((ifp->if_capenable & IFCAP_TXCSUM) != 0) {
+ if ((ifp->if_hwassist & CSUM_IP) != 0)
+ txcap |= OFFLOAD_IPCKSUM;
+ if ((ifp->if_hwassist & CSUM_TCP) != 0)
+ txcap |= OFFLOAD_TCPCKSUM;
+ if ((ifp->if_hwassist & CSUM_UDP) != 0)
+ txcap |= OFFLOAD_UDPCKSUM;
+ rxcap = txcap;
+ }
+ if ((ifp->if_capenable & IFCAP_RXCSUM) == 0)
+ rxcap &= ~(OFFLOAD_IPCKSUM | OFFLOAD_TCPCKSUM |
+ OFFLOAD_UDPCKSUM);
+ if ((ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) != 0) {
+ rxcap |= OFFLOAD_VLAN;
+ txcap |= OFFLOAD_VLAN;
+ }
+
+ /* Tell firmware new offload configuration. */
+ return (txp_command(sc, TXP_CMD_OFFLOAD_WRITE, 0, txcap, rxcap, NULL,
+ NULL, NULL, TXP_CMD_NOWAIT));
}
static void
-txp_capabilities(struct txp_softc *sc)
+txp_stats_save(struct txp_softc *sc)
{
- struct ifnet *ifp = sc->sc_ifp;
- struct txp_rsp_desc *rsp = NULL;
- struct txp_ext_desc *ext;
+ struct txp_rsp_desc *rsp;
- if (txp_command2(sc, TXP_CMD_OFFLOAD_READ, 0, 0, 0, NULL, 0, &rsp, 1))
- goto out;
+ TXP_LOCK_ASSERT(sc);
- if (rsp->rsp_numdesc != 1)
+ rsp = NULL;
+ if (txp_ext_command(sc, TXP_CMD_READ_STATISTICS, 0, 0, 0, NULL, 0,
+ &rsp, TXP_CMD_WAIT))
goto out;
- ext = (struct txp_ext_desc *)(rsp + 1);
+ if (rsp->rsp_numdesc != 6)
+ goto out;
+ txp_stats_update(sc, rsp);
+out:
+ if (rsp != NULL)
+ free(rsp, M_DEVBUF);
+ bcopy(&sc->sc_stats, &sc->sc_ostats, sizeof(struct txp_hw_stats));
+}
- sc->sc_tx_capability = ext->ext_1 & OFFLOAD_MASK;
- sc->sc_rx_capability = ext->ext_2 & OFFLOAD_MASK;
- ifp->if_capabilities = 0;
+static void
+txp_stats_update(struct txp_softc *sc, struct txp_rsp_desc *rsp)
+{
+ struct ifnet *ifp;
+ struct txp_hw_stats *ostats, *stats;
+ struct txp_ext_desc *ext;
- if (rsp->rsp_par2 & rsp->rsp_par3 & OFFLOAD_VLAN) {
- sc->sc_tx_capability |= OFFLOAD_VLAN;
- sc->sc_rx_capability |= OFFLOAD_VLAN;
- ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
- }
+ TXP_LOCK_ASSERT(sc);
-#if 0
- /* not ready yet */
- if (rsp->rsp_par2 & rsp->rsp_par3 & OFFLOAD_IPSEC) {
- sc->sc_tx_capability |= OFFLOAD_IPSEC;
- sc->sc_rx_capability |= OFFLOAD_IPSEC;
- ifp->if_capabilities |= IFCAP_IPSEC;
- }
-#endif
+ ifp = sc->sc_ifp;
+ ext = (struct txp_ext_desc *)(rsp + 1);
+ ostats = &sc->sc_ostats;
+ stats = &sc->sc_stats;
+ stats->tx_frames = ostats->tx_frames + le32toh(rsp->rsp_par2);
+ stats->tx_bytes = ostats->tx_bytes + (uint64_t)le32toh(rsp->rsp_par3) +
+ ((uint64_t)le32toh(ext[0].ext_1) << 32);
+ stats->tx_deferred = ostats->tx_deferred + le32toh(ext[0].ext_2);
+ stats->tx_late_colls = ostats->tx_late_colls + le32toh(ext[0].ext_3);
+ stats->tx_colls = ostats->tx_colls + le32toh(ext[0].ext_4);
+ stats->tx_carrier_lost = ostats->tx_carrier_lost +
+ le32toh(ext[1].ext_1);
+ stats->tx_multi_colls = ostats->tx_multi_colls +
+ le32toh(ext[1].ext_2);
+ stats->tx_excess_colls = ostats->tx_excess_colls +
+ le32toh(ext[1].ext_3);
+ stats->tx_fifo_underruns = ostats->tx_fifo_underruns +
+ le32toh(ext[1].ext_4);
+ stats->tx_mcast_oflows = ostats->tx_mcast_oflows +
+ le32toh(ext[2].ext_1);
+ stats->tx_filtered = ostats->tx_filtered + le32toh(ext[2].ext_2);
+ stats->rx_frames = ostats->rx_frames + le32toh(ext[2].ext_3);
+ stats->rx_bytes = ostats->rx_bytes + (uint64_t)le32toh(ext[2].ext_4) +
+ ((uint64_t)le32toh(ext[3].ext_1) << 32);
+ stats->rx_fifo_oflows = ostats->rx_fifo_oflows + le32toh(ext[3].ext_2);
+ stats->rx_badssd = ostats->rx_badssd + le32toh(ext[3].ext_3);
+ stats->rx_crcerrs = ostats->rx_crcerrs + le32toh(ext[3].ext_4);
+ stats->rx_lenerrs = ostats->rx_lenerrs + le32toh(ext[4].ext_1);
+ stats->rx_bcast_frames = ostats->rx_bcast_frames +
+ le32toh(ext[4].ext_2);
+ stats->rx_mcast_frames = ostats->rx_mcast_frames +
+ le32toh(ext[4].ext_3);
+ stats->rx_oflows = ostats->rx_oflows + le32toh(ext[4].ext_4);
+ stats->rx_filtered = ostats->rx_filtered + le32toh(ext[5].ext_1);
+
+ ifp->if_ierrors = stats->rx_fifo_oflows + stats->rx_badssd +
+ stats->rx_crcerrs + stats->rx_lenerrs + stats->rx_oflows;
+ ifp->if_oerrors = stats->tx_deferred + stats->tx_carrier_lost +
+ stats->tx_fifo_underruns + stats->tx_mcast_oflows;
+ ifp->if_collisions = stats->tx_late_colls + stats->tx_multi_colls +
+ stats->tx_excess_colls;
+ ifp->if_opackets = stats->tx_frames;
+ ifp->if_ipackets = stats->rx_frames;
+}
- if (rsp->rsp_par2 & rsp->rsp_par3 & OFFLOAD_IPCKSUM) {
- sc->sc_tx_capability |= OFFLOAD_IPCKSUM;
- sc->sc_rx_capability |= OFFLOAD_IPCKSUM;
- ifp->if_capabilities |= IFCAP_HWCSUM;
- ifp->if_hwassist |= CSUM_IP;
- }
+#define TXP_SYSCTL_STAT_ADD32(c, h, n, p, d) \
+ SYSCTL_ADD_UINT(c, h, OID_AUTO, n, CTLFLAG_RD, p, 0, d)
- if (rsp->rsp_par2 & rsp->rsp_par3 & OFFLOAD_TCPCKSUM) {
-#if 0
- sc->sc_tx_capability |= OFFLOAD_TCPCKSUM;
+#if __FreeBSD_version > 800000
+#define TXP_SYSCTL_STAT_ADD64(c, h, n, p, d) \
+ SYSCTL_ADD_QUAD(c, h, OID_AUTO, n, CTLFLAG_RD, p, d)
+#else
+#define TXP_SYSCTL_STAT_ADD64(c, h, n, p, d) \
+ SYSCTL_ADD_ULONG(c, h, OID_AUTO, n, CTLFLAG_RD, p, d)
#endif
- sc->sc_rx_capability |= OFFLOAD_TCPCKSUM;
- ifp->if_capabilities |= IFCAP_HWCSUM;
- }
- if (rsp->rsp_par2 & rsp->rsp_par3 & OFFLOAD_UDPCKSUM) {
-#if 0
- sc->sc_tx_capability |= OFFLOAD_UDPCKSUM;
-#endif
- sc->sc_rx_capability |= OFFLOAD_UDPCKSUM;
- ifp->if_capabilities |= IFCAP_HWCSUM;
+static void
+txp_sysctl_node(struct txp_softc *sc)
+{
+ struct sysctl_ctx_list *ctx;
+ struct sysctl_oid_list *child, *parent;
+ struct sysctl_oid *tree;
+ struct txp_hw_stats *stats;
+ int error;
+
+ stats = &sc->sc_stats;
+ ctx = device_get_sysctl_ctx(sc->sc_dev);
+ child = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->sc_dev));
+ SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "process_limit",
+ CTLTYPE_INT | CTLFLAG_RW, &sc->sc_process_limit, 0,
+ sysctl_hw_txp_proc_limit, "I",
+ "max number of Rx events to process");
+ /* Pull in device tunables. */
+ sc->sc_process_limit = TXP_PROC_DEFAULT;
+ error = resource_int_value(device_get_name(sc->sc_dev),
+ device_get_unit(sc->sc_dev), "process_limit",
+ &sc->sc_process_limit);
+ if (error == 0) {
+ if (sc->sc_process_limit < TXP_PROC_MIN ||
+ sc->sc_process_limit > TXP_PROC_MAX) {
+ device_printf(sc->sc_dev,
+ "process_limit value out of range; "
+ "using default: %d\n", TXP_PROC_DEFAULT);
+ sc->sc_process_limit = TXP_PROC_DEFAULT;
+ }
}
- ifp->if_capenable = ifp->if_capabilities;
+ tree = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "stats", CTLFLAG_RD,
+ NULL, "TXP statistics");
+ parent = SYSCTL_CHILDREN(tree);
+
+ /* Tx statistics. */
+ tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "tx", CTLFLAG_RD,
+ NULL, "Tx MAC statistics");
+ child = SYSCTL_CHILDREN(tree);
+
+ TXP_SYSCTL_STAT_ADD32(ctx, child, "frames",
+ &stats->tx_frames, "Frames");
+ TXP_SYSCTL_STAT_ADD64(ctx, child, "octets",
+ &stats->tx_bytes, "Octets");
+ TXP_SYSCTL_STAT_ADD32(ctx, child, "deferred",
+ &stats->tx_deferred, "Deferred frames");
+ TXP_SYSCTL_STAT_ADD32(ctx, child, "late_colls",
+ &stats->tx_late_colls, "Late collisions");
+ TXP_SYSCTL_STAT_ADD32(ctx, child, "colls",
+ &stats->tx_colls, "Collisions");
+ TXP_SYSCTL_STAT_ADD32(ctx, child, "carrier_lost",
+ &stats->tx_carrier_lost, "Carrier lost");
+ TXP_SYSCTL_STAT_ADD32(ctx, child, "multi_colls",
+ &stats->tx_multi_colls, "Multiple collisions");
+ TXP_SYSCTL_STAT_ADD32(ctx, child, "excess_colls",
+ &stats->tx_excess_colls, "Excessive collisions");
+ TXP_SYSCTL_STAT_ADD32(ctx, child, "fifo_underruns",
+ &stats->tx_fifo_underruns, "FIFO underruns");
+ TXP_SYSCTL_STAT_ADD32(ctx, child, "mcast_oflows",
+ &stats->tx_mcast_oflows, "Multicast overflows");
+ TXP_SYSCTL_STAT_ADD32(ctx, child, "filtered",
+ &stats->tx_filtered, "Filtered frames");
+
+ /* Rx statistics. */
+ tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "rx", CTLFLAG_RD,
+ NULL, "Rx MAC statistics");
+ child = SYSCTL_CHILDREN(tree);
+
+ TXP_SYSCTL_STAT_ADD32(ctx, child, "frames",
+ &stats->rx_frames, "Frames");
+ TXP_SYSCTL_STAT_ADD64(ctx, child, "octets",
+ &stats->rx_bytes, "Octets");
+ TXP_SYSCTL_STAT_ADD32(ctx, child, "fifo_oflows",
+ &stats->rx_fifo_oflows, "FIFO overflows");
+ TXP_SYSCTL_STAT_ADD32(ctx, child, "badssd",
+ &stats->rx_badssd, "Bad SSD");
+ TXP_SYSCTL_STAT_ADD32(ctx, child, "crcerrs",
+ &stats->rx_crcerrs, "CRC errors");
+ TXP_SYSCTL_STAT_ADD32(ctx, child, "lenerrs",
+ &stats->rx_lenerrs, "Length errors");
+ TXP_SYSCTL_STAT_ADD32(ctx, child, "bcast_frames",
+ &stats->rx_bcast_frames, "Broadcast frames");
+ TXP_SYSCTL_STAT_ADD32(ctx, child, "mcast_frames",
+ &stats->rx_mcast_frames, "Multicast frames");
+ TXP_SYSCTL_STAT_ADD32(ctx, child, "oflows",
+ &stats->rx_oflows, "Overflows");
+ TXP_SYSCTL_STAT_ADD32(ctx, child, "filtered",
+ &stats->rx_filtered, "Filtered frames");
+}
- if (txp_command(sc, TXP_CMD_OFFLOAD_WRITE, 0,
- sc->sc_tx_capability, sc->sc_rx_capability, NULL, NULL, NULL, 1))
- goto out;
+#undef TXP_SYSCTL_STAT_ADD32
+#undef TXP_SYSCTL_STAT_ADD64
-out:
- if (rsp != NULL)
- free(rsp, M_DEVBUF);
+static int
+sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high)
+{
+ int error, value;
+
+ if (arg1 == NULL)
+ return (EINVAL);
+ value = *(int *)arg1;
+ error = sysctl_handle_int(oidp, &value, 0, req);
+ if (error || req->newptr == NULL)
+ return (error);
+ if (value < low || value > high)
+ return (EINVAL);
+ *(int *)arg1 = value;
+
+ return (0);
+}
+
+static int
+sysctl_hw_txp_proc_limit(SYSCTL_HANDLER_ARGS)
+{
+ return (sysctl_int_range(oidp, arg1, arg2, req,
+ TXP_PROC_MIN, TXP_PROC_MAX));
}
diff --git a/sys/dev/txp/if_txpreg.h b/sys/dev/txp/if_txpreg.h
index b049db2..cfd211c 100644
--- a/sys/dev/txp/if_txpreg.h
+++ b/sys/dev/txp/if_txpreg.h
@@ -32,9 +32,6 @@
* THE POSSIBILITY OF SUCH DAMAGE.
*/
-#define TXP_PCI_LOMEM PCIR_BAR(1) /* memory map BAR */
-#define TXP_PCI_LOIO PCIR_BAR(0) /* IO map BAR */
-
/*
* Typhoon registers.
*/
@@ -78,6 +75,14 @@
#define TXP_INT_LATCH 0x00000001 /* interrupt latch */
/*
+ * Controller periodically generates TXP_INT_A2H_3 interrupt so
+ * we don't want to see them in interrupt handler.
+ */
+#define TXP_INTRS 0xFFFFFFEF
+#define TXP_INTR_ALL 0xFFFFFFFF
+#define TXP_INTR_NONE 0x00000000
+
+/*
* soft reset register (SRR)
*/
#define TXP_SRR_ALL 0x0000007f /* full reset */
@@ -86,6 +91,7 @@
* Typhoon boot commands.
*/
#define TXP_BOOTCMD_NULL 0x00
+#define TXP_BOOTCMD_WAKEUP 0xfa
#define TXP_BOOTCMD_DOWNLOAD_COMPLETE 0xfb
#define TXP_BOOTCMD_SEGMENT_AVAILABLE 0xfc
#define TXP_BOOTCMD_RUNTIME_IMAGE 0xfd
@@ -158,6 +164,7 @@
#define TXP_CMD_FILTER_TABLE_MODE_WRITE 0x3d
#define TXP_CMD_FILTER_TCL_WRITE 0x3e
#define TXP_CMD_FILTER_TBL_READ 0x3f
+#define TXP_CMD_READ_VERSION 0x43
#define TXP_CMD_FILTER_DEFINE 0x45
#define TXP_CMD_ADD_WAKEUP_PKT 0x46
#define TXP_CMD_ADD_SLEEP_PKT 0x47
@@ -225,12 +232,12 @@
#define TXP_STAT_UDPCKSUMGOOD 0x0200
struct txp_tx_desc {
- volatile uint8_t tx_flags; /* type/descriptor flags */
- volatile uint8_t tx_numdesc; /* number of descriptors */
- volatile uint16_t tx_totlen; /* total packet length */
- volatile uint32_t tx_addrlo; /* virt addr low word */
- volatile uint32_t tx_addrhi; /* virt addr high word */
- volatile uint32_t tx_pflags; /* processing flags */
+ uint8_t tx_flags; /* type/descriptor flags */
+ uint8_t tx_numdesc; /* number of descriptors */
+ uint16_t tx_totlen; /* total packet length */
+ uint32_t tx_addrlo; /* virt addr low word */
+ uint32_t tx_addrhi; /* virt addr high word */
+ uint32_t tx_pflags; /* processing flags */
};
#define TX_FLAGS_TYPE_M 0x07 /* type mask */
#define TX_FLAGS_TYPE_FRAG 0x00 /* type: fragment */
@@ -256,25 +263,17 @@ struct txp_tx_desc {
#define TX_PFLAGS_VLANTAG_S 12 /* amount to shift tag */
struct txp_rx_desc {
- volatile uint8_t rx_flags; /* type/descriptor flags */
- volatile uint8_t rx_numdesc; /* number of descriptors */
- volatile uint16_t rx_len; /* frame length */
-#ifdef notdef
- volatile uint32_t rx_vaddrlo; /* virtual address, lo word */
- volatile uint32_t rx_vaddrhi; /* virtual address, hi word */
-#endif
- union {
- struct txp_swdesc *rx_sd;
- uint64_t rx_dummy;
- } txp_rx_u;
- volatile uint32_t rx_stat; /* status */
- volatile uint16_t rx_filter; /* filter status */
- volatile uint16_t rx_hash; /* hash status */
- volatile uint32_t rx_vlan; /* vlan tag/priority */
+ uint8_t rx_flags; /* type/descriptor flags */
+ uint8_t rx_numdesc; /* number of descriptors */
+ uint16_t rx_len; /* frame length */
+ uint32_t rx_vaddrlo; /* virtual address, lo word */
+ uint32_t rx_vaddrhi; /* virtual address, hi word */
+ uint32_t rx_stat; /* status */
+ uint16_t rx_filter; /* filter status */
+ uint16_t rx_hash; /* hash status */
+ uint32_t rx_vlan; /* vlan tag/priority */
};
-#define rx_sd txp_rx_u.rx_sd
-
/* txp_rx_desc.rx_flags */
#define RX_FLAGS_TYPE_M 0x07 /* type mask */
#define RX_FLAGS_TYPE_FRAG 0x00 /* type: fragment */
@@ -297,6 +296,7 @@ struct txp_rx_desc {
#define RX_ERROR_OVERSIZE 0x00000005 /* oversized packet */
#define RX_ERROR_ALIGN 0x00000006 /* alignment error */
#define RX_ERROR_DRIBBLE 0x00000007 /* dribble bit */
+#define RX_ERROR_MASK 0x07
/* txp_rx_desc.rx_stat (if rx_flags & RX_FLAGS_ERROR not bit set) */
#define RX_STAT_PROTO_M 0x00000003 /* protocol mask */
@@ -316,36 +316,28 @@ struct txp_rx_desc {
struct txp_rxbuf_desc {
- volatile uint32_t rb_paddrlo;
- volatile uint32_t rb_paddrhi;
-#ifdef notdef
- volatile uint32_t rb_vaddrlo;
- volatile uint32_t rb_vaddrhi;
-#endif
- union {
- struct txp_swdesc *rb_sd;
- uint64_t rb_dummy;
- } txp_rb_u;
+ uint32_t rb_paddrlo;
+ uint32_t rb_paddrhi;
+ uint32_t rb_vaddrlo;
+ uint32_t rb_vaddrhi;
};
-#define rb_sd txp_rb_u.rb_sd
-
/* Extension descriptor */
struct txp_ext_desc {
- volatile uint32_t ext_1;
- volatile uint32_t ext_2;
- volatile uint32_t ext_3;
- volatile uint32_t ext_4;
+ uint32_t ext_1;
+ uint32_t ext_2;
+ uint32_t ext_3;
+ uint32_t ext_4;
};
struct txp_cmd_desc {
- volatile uint8_t cmd_flags;
- volatile uint8_t cmd_numdesc;
- volatile uint16_t cmd_id;
- volatile uint16_t cmd_seq;
- volatile uint16_t cmd_par1;
- volatile uint32_t cmd_par2;
- volatile uint32_t cmd_par3;
+ uint8_t cmd_flags;
+ uint8_t cmd_numdesc;
+ uint16_t cmd_id;
+ uint16_t cmd_seq;
+ uint16_t cmd_par1;
+ uint32_t cmd_par2;
+ uint32_t cmd_par3;
};
#define CMD_FLAGS_TYPE_M 0x07 /* type mask */
#define CMD_FLAGS_TYPE_FRAG 0x00 /* type: fragment */
@@ -358,13 +350,13 @@ struct txp_cmd_desc {
#define CMD_FLAGS_VALID 0x80 /* valid descriptor */
struct txp_rsp_desc {
- volatile uint8_t rsp_flags;
- volatile uint8_t rsp_numdesc;
- volatile uint16_t rsp_id;
- volatile uint16_t rsp_seq;
- volatile uint16_t rsp_par1;
- volatile uint32_t rsp_par2;
- volatile uint32_t rsp_par3;
+ uint8_t rsp_flags;
+ uint8_t rsp_numdesc;
+ uint16_t rsp_id;
+ uint16_t rsp_seq;
+ uint16_t rsp_par1;
+ uint32_t rsp_par2;
+ uint32_t rsp_par3;
};
#define RSP_FLAGS_TYPE_M 0x07 /* type mask */
#define RSP_FLAGS_TYPE_FRAG 0x00 /* type: fragment */
@@ -376,12 +368,12 @@ struct txp_rsp_desc {
#define RSP_FLAGS_ERROR 0x40 /* response error */
struct txp_frag_desc {
- volatile uint8_t frag_flags; /* type/descriptor flags */
- volatile uint8_t frag_rsvd1;
- volatile uint16_t frag_len; /* bytes in this fragment */
- volatile uint32_t frag_addrlo; /* phys addr low word */
- volatile uint32_t frag_addrhi; /* phys addr high word */
- volatile uint32_t frag_rsvd2;
+ uint8_t frag_flags; /* type/descriptor flags */
+ uint8_t frag_rsvd1;
+ uint16_t frag_len; /* bytes in this fragment */
+ uint32_t frag_addrlo; /* phys addr low word */
+ uint32_t frag_addrhi; /* phys addr high word */
+ uint32_t frag_rsvd2;
};
#define FRAG_FLAGS_TYPE_M 0x07 /* type mask */
#define FRAG_FLAGS_TYPE_FRAG 0x00 /* type: fragment */
@@ -418,19 +410,25 @@ struct txp_ipsec_desc {
};
struct txp_tcpseg_desc {
- uint8_t tcpseg_desctype:3,
- tcpseg_rsvd:1,
- tcpseg_type:4;
-
+ uint8_t tcpseg_type;
uint8_t tcpseg_num;
-
- uint16_t tcpseg_mss:12,
- tcpseg_misc:4;
-
+ uint16_t tcpseg_mss;
uint32_t tcpseg_respaddr;
uint32_t tcpseg_txbytes;
uint32_t tcpseg_lss;
};
+#define TCPSEG_DESC_TYPE_M 0x07 /* type mask */
+#define TCPSEG_DESC_TYPE_FRAG 0x00 /* type: fragment */
+#define TCPSEG_DESC_TYPE_DATA 0x01 /* type: data frame */
+#define TCPSEG_DESC_TYPE_CMD 0x02 /* type: command frame */
+#define TCPSEG_DESC_TYPE_OPT 0x03 /* type: options */
+#define TCPSEG_DESC_TYPE_RX 0x04 /* type: command */
+#define TCPSEG_DESC_TYPE_RESP 0x05 /* type: response */
+#define TCPSEG_OPT_IPSEC 0x00
+#define TCPSEG_OPT_TSO 0x10
+#define TCPSEG_MSS_MASK 0x0FFF
+#define TCPSEG_MSS_FIRST 0x1000
+#define TCPSEG_MSS_LAST 0x2000
/*
* Transceiver types
@@ -460,48 +458,48 @@ struct txp_tcpseg_desc {
* boot record (pointers to rings)
*/
struct txp_boot_record {
- volatile uint32_t br_hostvar_lo; /* host ring pointer */
- volatile uint32_t br_hostvar_hi;
- volatile uint32_t br_txlopri_lo; /* tx low pri ring */
- volatile uint32_t br_txlopri_hi;
- volatile uint32_t br_txlopri_siz;
- volatile uint32_t br_txhipri_lo; /* tx high pri ring */
- volatile uint32_t br_txhipri_hi;
- volatile uint32_t br_txhipri_siz;
- volatile uint32_t br_rxlopri_lo; /* rx low pri ring */
- volatile uint32_t br_rxlopri_hi;
- volatile uint32_t br_rxlopri_siz;
- volatile uint32_t br_rxbuf_lo; /* rx buffer ring */
- volatile uint32_t br_rxbuf_hi;
- volatile uint32_t br_rxbuf_siz;
- volatile uint32_t br_cmd_lo; /* command ring */
- volatile uint32_t br_cmd_hi;
- volatile uint32_t br_cmd_siz;
- volatile uint32_t br_resp_lo; /* response ring */
- volatile uint32_t br_resp_hi;
- volatile uint32_t br_resp_siz;
- volatile uint32_t br_zero_lo; /* zero word */
- volatile uint32_t br_zero_hi;
- volatile uint32_t br_rxhipri_lo; /* rx high pri ring */
- volatile uint32_t br_rxhipri_hi;
- volatile uint32_t br_rxhipri_siz;
+ uint32_t br_hostvar_lo; /* host ring pointer */
+ uint32_t br_hostvar_hi;
+ uint32_t br_txlopri_lo; /* tx low pri ring */
+ uint32_t br_txlopri_hi;
+ uint32_t br_txlopri_siz;
+ uint32_t br_txhipri_lo; /* tx high pri ring */
+ uint32_t br_txhipri_hi;
+ uint32_t br_txhipri_siz;
+ uint32_t br_rxlopri_lo; /* rx low pri ring */
+ uint32_t br_rxlopri_hi;
+ uint32_t br_rxlopri_siz;
+ uint32_t br_rxbuf_lo; /* rx buffer ring */
+ uint32_t br_rxbuf_hi;
+ uint32_t br_rxbuf_siz;
+ uint32_t br_cmd_lo; /* command ring */
+ uint32_t br_cmd_hi;
+ uint32_t br_cmd_siz;
+ uint32_t br_resp_lo; /* response ring */
+ uint32_t br_resp_hi;
+ uint32_t br_resp_siz;
+ uint32_t br_zero_lo; /* zero word */
+ uint32_t br_zero_hi;
+ uint32_t br_rxhipri_lo; /* rx high pri ring */
+ uint32_t br_rxhipri_hi;
+ uint32_t br_rxhipri_siz;
};
/*
* hostvar structure (shared with typhoon)
*/
struct txp_hostvar {
- volatile uint32_t hv_rx_hi_read_idx; /* host->arm */
- volatile uint32_t hv_rx_lo_read_idx; /* host->arm */
- volatile uint32_t hv_rx_buf_write_idx; /* host->arm */
- volatile uint32_t hv_resp_read_idx; /* host->arm */
- volatile uint32_t hv_tx_lo_desc_read_idx; /* arm->host */
- volatile uint32_t hv_tx_hi_desc_read_idx; /* arm->host */
- volatile uint32_t hv_rx_lo_write_idx; /* arm->host */
- volatile uint32_t hv_rx_buf_read_idx; /* arm->host */
- volatile uint32_t hv_cmd_read_idx; /* arm->host */
- volatile uint32_t hv_resp_write_idx; /* arm->host */
- volatile uint32_t hv_rx_hi_write_idx; /* arm->host */
+ uint32_t hv_rx_hi_read_idx; /* host->arm */
+ uint32_t hv_rx_lo_read_idx; /* host->arm */
+ uint32_t hv_rx_buf_write_idx; /* host->arm */
+ uint32_t hv_resp_read_idx; /* host->arm */
+ uint32_t hv_tx_lo_desc_read_idx; /* arm->host */
+ uint32_t hv_tx_hi_desc_read_idx; /* arm->host */
+ uint32_t hv_rx_lo_write_idx; /* arm->host */
+ uint32_t hv_rx_buf_read_idx; /* arm->host */
+ uint32_t hv_cmd_read_idx; /* arm->host */
+ uint32_t hv_resp_write_idx; /* arm->host */
+ uint32_t hv_rx_hi_write_idx; /* arm->host */
};
/*
@@ -555,35 +553,105 @@ struct txp_rsp_ring {
struct txp_tx_ring {
struct txp_tx_desc *r_desc; /* base address of descs */
+ bus_dma_tag_t r_tag;
+ bus_dmamap_t r_map;
uint32_t r_reg; /* register to activate */
uint32_t r_prod; /* producer */
uint32_t r_cons; /* consumer */
uint32_t r_cnt; /* # descs in use */
- volatile uint32_t *r_off; /* hostvar index pointer */
+ uint32_t *r_off; /* hostvar index pointer */
};
struct txp_swdesc {
- struct mbuf * sd_mbuf;
+ struct mbuf *sd_mbuf;
+ bus_dmamap_t sd_map;
+};
+
+struct txp_rx_swdesc {
+ TAILQ_ENTRY(txp_rx_swdesc) sd_next;
+ struct mbuf *sd_mbuf;
bus_dmamap_t sd_map;
};
struct txp_rx_ring {
struct txp_rx_desc *r_desc; /* base address of descs */
- volatile uint32_t *r_roff; /* hv read offset ptr */
- volatile uint32_t *r_woff; /* hv write offset ptr */
+ bus_dma_tag_t r_tag;
+ bus_dmamap_t r_map;
+ uint32_t *r_roff; /* hv read offset ptr */
+ uint32_t *r_woff; /* hv write offset ptr */
};
struct txp_ldata {
- struct txp_boot_record txp_boot;
- struct txp_hostvar txp_hostvar;
- struct txp_tx_desc txp_txhiring[TX_ENTRIES];
- struct txp_tx_desc txp_txloring[TX_ENTRIES];
- struct txp_rxbuf_desc txp_rxbufs[RXBUF_ENTRIES];
- struct txp_rx_desc txp_rxhiring[RX_ENTRIES];
- struct txp_rx_desc txp_rxloring[RX_ENTRIES];
- struct txp_cmd_desc txp_cmdring[CMD_ENTRIES];
- struct txp_rsp_desc txp_rspring[RSP_ENTRIES];
- uint32_t txp_zero;
+ struct txp_boot_record *txp_boot;
+ bus_addr_t txp_boot_paddr;
+ struct txp_hostvar *txp_hostvar;
+ bus_addr_t txp_hostvar_paddr;
+ struct txp_tx_desc *txp_txhiring;
+ bus_addr_t txp_txhiring_paddr;
+ struct txp_tx_desc *txp_txloring;
+ bus_addr_t txp_txloring_paddr;
+ struct txp_rxbuf_desc *txp_rxbufs;
+ bus_addr_t txp_rxbufs_paddr;
+ struct txp_rx_desc *txp_rxhiring;
+ bus_addr_t txp_rxhiring_paddr;
+ struct txp_rx_desc *txp_rxloring;
+ bus_addr_t txp_rxloring_paddr;
+ struct txp_cmd_desc *txp_cmdring;
+ bus_addr_t txp_cmdring_paddr;
+ struct txp_rsp_desc *txp_rspring;
+ bus_addr_t txp_rspring_paddr;
+ uint32_t *txp_zero;
+ bus_addr_t txp_zero_paddr;
+};
+
+struct txp_chain_data {
+ bus_dma_tag_t txp_parent_tag;
+ bus_dma_tag_t txp_boot_tag;
+ bus_dmamap_t txp_boot_map;
+ bus_dma_tag_t txp_hostvar_tag;
+ bus_dmamap_t txp_hostvar_map;
+ bus_dma_tag_t txp_txhiring_tag;
+ bus_dmamap_t txp_txhiring_map;
+ bus_dma_tag_t txp_txloring_tag;
+ bus_dmamap_t txp_txloring_map;
+ bus_dma_tag_t txp_tx_tag;
+ bus_dma_tag_t txp_rx_tag;
+ bus_dma_tag_t txp_rxbufs_tag;
+ bus_dmamap_t txp_rxbufs_map;
+ bus_dma_tag_t txp_rxhiring_tag;
+ bus_dmamap_t txp_rxhiring_map;
+ bus_dma_tag_t txp_rxloring_tag;
+ bus_dmamap_t txp_rxloring_map;
+ bus_dma_tag_t txp_cmdring_tag;
+ bus_dmamap_t txp_cmdring_map;
+ bus_dma_tag_t txp_rspring_tag;
+ bus_dmamap_t txp_rspring_map;
+ bus_dma_tag_t txp_zero_tag;
+ bus_dmamap_t txp_zero_map;
+};
+
+struct txp_hw_stats {
+ uint32_t tx_frames;
+ uint64_t tx_bytes;
+ uint32_t tx_deferred;
+ uint32_t tx_late_colls;
+ uint32_t tx_colls;
+ uint32_t tx_carrier_lost;
+ uint32_t tx_multi_colls;
+ uint32_t tx_excess_colls;
+ uint32_t tx_fifo_underruns;
+ uint32_t tx_mcast_oflows;
+ uint32_t tx_filtered;
+ uint32_t rx_frames;
+ uint64_t rx_bytes;
+ uint32_t rx_fifo_oflows;
+ uint32_t rx_badssd;
+ uint32_t rx_crcerrs;
+ uint32_t rx_lenerrs;
+ uint32_t rx_bcast_frames;
+ uint32_t rx_mcast_frames;
+ uint32_t rx_oflows;
+ uint32_t rx_filtered;
};
struct txp_softc {
@@ -591,27 +659,37 @@ struct txp_softc {
device_t sc_dev;
struct txp_hostvar *sc_hostvar;
struct txp_boot_record *sc_boot;
- bus_space_handle_t sc_bh; /* bus handle (regs) */
- bus_space_tag_t sc_bt; /* bus tag (regs) */
struct resource *sc_res;
+ int sc_res_id;
+ int sc_res_type;
struct resource *sc_irq;
void *sc_intrhand;
- struct mtx sc_mtx;
- struct txp_ldata *sc_ldata;
- void *sc_fwbuf;
+ struct txp_chain_data sc_cdata;
+ struct txp_ldata sc_ldata;
int sc_rxbufprod;
+ int sc_process_limit;
struct txp_cmd_ring sc_cmdring;
struct txp_rsp_ring sc_rspring;
- struct txp_swdesc sc_txd[TX_ENTRIES];
struct callout sc_tick;
struct ifmedia sc_ifmedia;
+ struct txp_hw_stats sc_ostats;
+ struct txp_hw_stats sc_stats;
struct txp_tx_ring sc_txhir, sc_txlor;
+ struct txp_swdesc sc_txd[TX_ENTRIES];
struct txp_rxbuf_desc *sc_rxbufs;
struct txp_rx_ring sc_rxhir, sc_rxlor;
uint16_t sc_xcvr;
uint16_t sc_seq;
- int sc_cold;
- uint32_t sc_rx_capability, sc_tx_capability;
+ int sc_watchdog_timer;
+ int sc_if_flags;
+ int sc_flags;
+#define TXP_FLAG_DETACH 0x4000
+#define TXP_FLAG_LINK 0x8000
+ TAILQ_HEAD(, txp_rx_swdesc) sc_free_list;
+ TAILQ_HEAD(, txp_rx_swdesc) sc_busy_list;
+ struct task sc_int_task;
+ struct taskqueue *sc_tq;
+ struct mtx sc_mtx;
};
struct txp_fw_file_header {
@@ -630,17 +708,26 @@ struct txp_fw_section_header {
};
#define TXP_MAX_SEGLEN 0xffff
-#define TXP_MAX_PKTLEN 0x0800
+#define TXP_MAX_PKTLEN (ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN)
-#define WRITE_REG(sc,reg,val) \
- bus_space_write_4((sc)->sc_bt, (sc)->sc_bh, reg, val)
-#define READ_REG(sc,reg) \
- bus_space_read_4((sc)->sc_bt, (sc)->sc_bh, reg)
+#define WRITE_REG(sc, reg, val) bus_write_4((sc)->sc_res, reg, val)
+#define READ_REG(sc, reg) bus_read_4((sc)->sc_res, reg)
+#define TXP_BARRIER(sc, o, l, f) bus_barrier((sc)->sc_res, (o), (l), (f))
#define TXP_LOCK(sc) mtx_lock(&(sc)->sc_mtx)
#define TXP_UNLOCK(sc) mtx_unlock(&(sc)->sc_mtx)
#define TXP_LOCK_ASSERT(sc) mtx_assert(&(sc)->sc_mtx, MA_OWNED)
+#define TXP_MAXTXSEGS 16
+#define TXP_RXBUF_ALIGN (sizeof(uint32_t))
+
+#define TXP_PROC_MIN 16
+#define TXP_PROC_MAX RX_ENTRIES
+#define TXP_PROC_DEFAULT (RX_ENTRIES / 2)
+
+#define TXP_ADDR_HI(x) ((uint64_t)(x) >> 32)
+#define TXP_ADDR_LO(x) ((uint64_t)(x) & 0xffffffff)
+
/*
* 3Com PCI vendor ID.
*/
@@ -661,3 +748,21 @@ struct txp_type {
uint16_t txp_did;
char *txp_name;
};
+
+#define TXP_TIMEOUT 10000
+#define TXP_CMD_NOWAIT 0
+#define TXP_CMD_WAIT 1
+#define TXP_TX_TIMEOUT 5
+
+/*
+ * Each frame requires one frame descriptor and one or more
+ * fragment descriptors. If TSO is used frame descriptor block
+ * requires one or two option frame descriptors depending on
+ * number of framents. Therefore we will consume three
+ * additional descriptors at most to use TSO for a frame and
+ * one reserved descriptor in order not to full Tx descriptor
+ * ring.
+ */
+#define TXP_TXD_RESERVED 4
+
+#define TXP_DESC_INC(x, y) ((x) = ((x) + 1) % (y))
OpenPOWER on IntegriCloud