summaryrefslogtreecommitdiffstats
path: root/sys/dev
diff options
context:
space:
mode:
Diffstat (limited to 'sys/dev')
-rw-r--r--sys/dev/aac/aac.c23
-rw-r--r--sys/dev/aac/aacvar.h7
-rw-r--r--sys/dev/acpica/Osd/OsdDebug.c23
-rw-r--r--sys/dev/acpica/acpi.c68
-rw-r--r--sys/dev/acpica/acpi_pci.c26
-rw-r--r--sys/dev/ahci/ahci.c101
-rw-r--r--sys/dev/amdsbwd/amdsbwd.c184
-rw-r--r--sys/dev/ata/ata-sata.c30
-rw-r--r--sys/dev/ata/chipsets/ata-intel.c4
-rw-r--r--sys/dev/ath/ath_dfs/null/dfs_null.c160
-rw-r--r--sys/dev/ath/ath_hal/ah.c8
-rw-r--r--sys/dev/ath/ath_hal/ah.h100
-rw-r--r--sys/dev/ath/ath_hal/ah_desc.h3
-rw-r--r--sys/dev/ath/ath_hal/ah_devid.h2
-rw-r--r--sys/dev/ath/ath_hal/ah_eeprom.h4
-rw-r--r--sys/dev/ath/ath_hal/ah_eeprom_9287.c42
-rw-r--r--sys/dev/ath/ath_hal/ah_internal.h14
-rw-r--r--sys/dev/ath/ath_hal/ar5212/ar5212.h10
-rw-r--r--sys/dev/ath/ath_hal/ar5212/ar5212_attach.c9
-rw-r--r--sys/dev/ath/ath_hal/ar5212/ar5212_misc.c131
-rw-r--r--sys/dev/ath/ath_hal/ar5212/ar5212reg.h1
-rw-r--r--sys/dev/ath/ath_hal/ar5416/ar5416.h7
-rw-r--r--sys/dev/ath/ath_hal/ar5416/ar5416_ani.c17
-rw-r--r--sys/dev/ath/ath_hal/ar5416/ar5416_attach.c14
-rw-r--r--sys/dev/ath/ath_hal/ar5416/ar5416_cal.c4
-rw-r--r--sys/dev/ath/ath_hal/ar5416/ar5416_misc.c148
-rw-r--r--sys/dev/ath/ath_hal/ar5416/ar5416_reset.c54
-rw-r--r--sys/dev/ath/ath_hal/ar5416/ar5416phy.h21
-rw-r--r--sys/dev/ath/ath_hal/ar5416/ar5416reg.h60
-rw-r--r--sys/dev/ath/ath_hal/ar9001/ar9130_attach.c1
-rw-r--r--sys/dev/ath/ath_hal/ar9001/ar9160_attach.c3
-rw-r--r--sys/dev/ath/ath_hal/ar9002/ar9280_attach.c4
-rw-r--r--sys/dev/ath/ath_hal/ar9002/ar9285_attach.c3
-rw-r--r--sys/dev/ath/ath_hal/ar9002/ar9287.c392
-rw-r--r--sys/dev/ath/ath_hal/ar9002/ar9287.h62
-rw-r--r--sys/dev/ath/ath_hal/ar9002/ar9287.ini783
-rw-r--r--sys/dev/ath/ath_hal/ar9002/ar9287_attach.c476
-rw-r--r--sys/dev/ath/ath_hal/ar9002/ar9287_cal.c73
-rw-r--r--sys/dev/ath/ath_hal/ar9002/ar9287_cal.h33
-rw-r--r--sys/dev/ath/ath_hal/ar9002/ar9287_olc.c171
-rw-r--r--sys/dev/ath/ath_hal/ar9002/ar9287_olc.h31
-rw-r--r--sys/dev/ath/ath_hal/ar9002/ar9287_reset.c571
-rw-r--r--sys/dev/ath/ath_hal/ar9002/ar9287_reset.h27
-rw-r--r--sys/dev/ath/ath_hal/ar9002/ar9287an.h49
-rw-r--r--sys/dev/ath/ath_hal/ar9002/ar9287phy.h26
-rw-r--r--sys/dev/ath/if_ath.c99
-rw-r--r--sys/dev/ath/if_ath_ahb.c6
-rw-r--r--sys/dev/ath/if_ath_tx_ht.c20
-rw-r--r--sys/dev/ath/if_athdfs.h47
-rw-r--r--sys/dev/ath/if_athvar.h24
-rw-r--r--sys/dev/atkbdc/atkbd.c15
-rw-r--r--sys/dev/atkbdc/atkbdreg.h1
-rw-r--r--sys/dev/bxe/bxe_debug.h49
-rw-r--r--sys/dev/bxe/bxe_link.c36
-rw-r--r--sys/dev/bxe/if_bxe.c5858
-rw-r--r--sys/dev/bxe/if_bxe.h363
-rw-r--r--sys/dev/cardbus/cardbus_cis.c5
-rw-r--r--sys/dev/cxgbe/adapter.h25
-rw-r--r--sys/dev/cxgbe/common/common.h2
-rw-r--r--sys/dev/cxgbe/common/jhash.h140
-rw-r--r--sys/dev/cxgbe/common/t4fw_interface.h237
-rw-r--r--sys/dev/cxgbe/offload.h18
-rw-r--r--sys/dev/cxgbe/osdep.h1
-rw-r--r--sys/dev/cxgbe/t4_ioctl.h19
-rw-r--r--sys/dev/cxgbe/t4_l2t.c361
-rw-r--r--sys/dev/cxgbe/t4_l2t.h71
-rw-r--r--sys/dev/cxgbe/t4_main.c380
-rw-r--r--sys/dev/cxgbe/t4_sge.c415
-rw-r--r--sys/dev/e1000/if_igb.c256
-rw-r--r--sys/dev/e1000/if_igb.h6
-rw-r--r--sys/dev/hwpmc/hwpmc_mod.c8
-rw-r--r--sys/dev/iicbus/ad7417.c621
-rw-r--r--sys/dev/iicbus/ds1775.c105
-rw-r--r--sys/dev/iicbus/max6690.c113
-rw-r--r--sys/dev/ipw/if_ipw.c2
-rw-r--r--sys/dev/iwi/if_iwi.c2
-rw-r--r--sys/dev/iwn/if_iwn.c39
-rw-r--r--sys/dev/ixgbe/LICENSE2
-rw-r--r--sys/dev/ixgbe/README267
-rw-r--r--sys/dev/ixgbe/ixgbe.c79
-rw-r--r--sys/dev/ixgbe/ixv.c58
-rw-r--r--sys/dev/mfi/mfi.c18
-rw-r--r--sys/dev/mfi/mfireg.h4
-rw-r--r--sys/dev/mmc/mmc.c22
-rw-r--r--sys/dev/mmc/mmcvar.h2
-rw-r--r--sys/dev/msk/if_msk.c35
-rw-r--r--sys/dev/mvs/mvs.c44
-rw-r--r--sys/dev/nfe/if_nfe.c2
-rw-r--r--sys/dev/pccard/pccard.c8
-rw-r--r--sys/dev/pccbb/pccbb.c20
-rw-r--r--sys/dev/pci/pci.c11
-rw-r--r--sys/dev/pci/pci_pci.c7
-rw-r--r--sys/dev/puc/puc.c38
-rw-r--r--sys/dev/puc/puc_bfe.h3
-rw-r--r--sys/dev/puc/puc_pccard.c4
-rw-r--r--sys/dev/puc/puc_pci.c4
-rw-r--r--sys/dev/puc/pucdata.c56
-rw-r--r--sys/dev/safe/safe.c9
-rw-r--r--sys/dev/sdhci/sdhci.c28
-rw-r--r--sys/dev/siis/siis.c25
-rw-r--r--sys/dev/sound/pci/hda/hdac.c70
-rw-r--r--sys/dev/sound/pcm/sound.c54
-rw-r--r--sys/dev/uart/uart_dev_ns8250.c8
-rw-r--r--sys/dev/usb/net/if_axe.c21
-rw-r--r--sys/dev/usb/net/if_udav.c1
-rw-r--r--sys/dev/usb/serial/umcs.c1075
-rw-r--r--sys/dev/usb/serial/umcs.h644
-rw-r--r--sys/dev/usb/usb_device.h2
-rw-r--r--sys/dev/usb/usb_freebsd.h1
-rw-r--r--sys/dev/usb/usb_generic.c6
-rw-r--r--sys/dev/usb/usb_hub.c27
-rw-r--r--sys/dev/usb/usb_process.c7
-rw-r--r--sys/dev/usb/usb_request.c100
-rw-r--r--sys/dev/usb/usb_request.h4
-rw-r--r--sys/dev/usb/usb_transfer.c5
-rw-r--r--sys/dev/usb/usbdevs3
-rw-r--r--sys/dev/usb/usbdi.h1
-rw-r--r--sys/dev/wpi/if_wpi.c2
-rw-r--r--sys/dev/xen/blkback/blkback.c1983
-rw-r--r--sys/dev/xen/blkfront/blkfront.c4
-rw-r--r--sys/dev/xen/control/control.c24
-rw-r--r--sys/dev/xen/netfront/netfront.c3
122 files changed, 13170 insertions, 4945 deletions
diff --git a/sys/dev/aac/aac.c b/sys/dev/aac/aac.c
index 53528fd..45cfa02 100644
--- a/sys/dev/aac/aac.c
+++ b/sys/dev/aac/aac.c
@@ -661,6 +661,16 @@ aac_detach(device_t dev)
callout_drain(&sc->aac_daemontime);
+ mtx_lock(&sc->aac_io_lock);
+ while (sc->aifflags & AAC_AIFFLAGS_RUNNING) {
+ sc->aifflags |= AAC_AIFFLAGS_EXIT;
+ wakeup(sc->aifthread);
+ msleep(sc->aac_dev, &sc->aac_io_lock, PUSER, "aacdch", 0);
+ }
+ mtx_unlock(&sc->aac_io_lock);
+ KASSERT((sc->aifflags & AAC_AIFFLAGS_RUNNING) == 0,
+ ("%s: invalid detach state", __func__));
+
/* Remove the child containers */
while ((co = TAILQ_FIRST(&sc->aac_container_tqh)) != NULL) {
error = device_delete_child(dev, co->co_disk);
@@ -679,15 +689,6 @@ aac_detach(device_t dev)
free(sim, M_AACBUF);
}
- if (sc->aifflags & AAC_AIFFLAGS_RUNNING) {
- sc->aifflags |= AAC_AIFFLAGS_EXIT;
- wakeup(sc->aifthread);
- tsleep(sc->aac_dev, PUSER | PCATCH, "aacdch", 30 * hz);
- }
-
- if (sc->aifflags & AAC_AIFFLAGS_RUNNING)
- panic("Cannot shutdown AIF thread");
-
if ((error = aac_shutdown(dev)))
return(error);
@@ -1020,7 +1021,7 @@ aac_command_thread(struct aac_softc *sc)
/*
* First see if any FIBs need to be allocated. This needs
* to be called without the driver lock because contigmalloc
- * will grab Giant, and would result in an LOR.
+ * can sleep.
*/
if ((sc->aifflags & AAC_AIFFLAGS_ALLOCFIBS) != 0) {
mtx_unlock(&sc->aac_io_lock);
@@ -1372,7 +1373,9 @@ aac_alloc_command(struct aac_softc *sc, struct aac_command **cmp)
if ((cm = aac_dequeue_free(sc)) == NULL) {
if (sc->total_fibs < sc->aac_max_fibs) {
+ mtx_lock(&sc->aac_io_lock);
sc->aifflags |= AAC_AIFFLAGS_ALLOCFIBS;
+ mtx_unlock(&sc->aac_io_lock);
wakeup(sc->aifthread);
}
return (EBUSY);
diff --git a/sys/dev/aac/aacvar.h b/sys/dev/aac/aacvar.h
index 61f3c5b..d994acf 100644
--- a/sys/dev/aac/aacvar.h
+++ b/sys/dev/aac/aacvar.h
@@ -386,13 +386,12 @@ struct aac_softc
struct proc *aifthread;
int aifflags;
#define AAC_AIFFLAGS_RUNNING (1 << 0)
-#define AAC_AIFFLAGS_AIF (1 << 1)
+#define AAC_AIFFLAGS_UNUSED0 (1 << 1)
#define AAC_AIFFLAGS_EXIT (1 << 2)
#define AAC_AIFFLAGS_EXITED (1 << 3)
-#define AAC_AIFFLAGS_PRINTF (1 << 4)
+#define AAC_AIFFLAGS_UNUSED1 (1 << 4)
#define AAC_AIFFLAGS_ALLOCFIBS (1 << 5)
-#define AAC_AIFFLAGS_PENDING (AAC_AIFFLAGS_AIF | AAC_AIFFLAGS_PRINTF | \
- AAC_AIFFLAGS_ALLOCFIBS)
+#define AAC_AIFFLAGS_PENDING AAC_AIFFLAGS_ALLOCFIBS
u_int32_t flags;
#define AAC_FLAGS_PERC2QC (1 << 0)
#define AAC_FLAGS_ENABLE_CAM (1 << 1) /* No SCSI passthrough */
diff --git a/sys/dev/acpica/Osd/OsdDebug.c b/sys/dev/acpica/Osd/OsdDebug.c
index 8425357..0547f75 100644
--- a/sys/dev/acpica/Osd/OsdDebug.c
+++ b/sys/dev/acpica/Osd/OsdDebug.c
@@ -47,20 +47,23 @@ __FBSDID("$FreeBSD$");
#include <dev/acpica/acpivar.h>
-UINT32
-AcpiOsGetLine(char *Buffer)
+ACPI_STATUS
+AcpiOsGetLine(char *Buffer, UINT32 BufferLength, UINT32 *BytesRead)
{
#ifdef DDB
- char *cp;
+ char *cp;
- db_readline(Buffer, 80);
- for (cp = Buffer; *cp != 0; cp++)
- if (*cp == '\n')
- *cp = 0;
- return (AE_OK);
+ cp = Buffer;
+ if (db_readline(Buffer, BufferLength) > 0)
+ while (*cp != '\0' && *cp != '\n' && *cp != '\r')
+ cp++;
+ *cp = '\0';
+ if (BytesRead != NULL)
+ *BytesRead = cp - Buffer;
+ return (AE_OK);
#else
- printf("AcpiOsGetLine called but no input support");
- return (AE_NOT_EXIST);
+ printf("AcpiOsGetLine called but no input support");
+ return (AE_NOT_EXIST);
#endif /* DDB */
}
diff --git a/sys/dev/acpica/acpi.c b/sys/dev/acpica/acpi.c
index f6d6094..84f65bd 100644
--- a/sys/dev/acpica/acpi.c
+++ b/sys/dev/acpica/acpi.c
@@ -123,6 +123,8 @@ static int acpi_set_resource(device_t dev, device_t child, int type,
static struct resource *acpi_alloc_resource(device_t bus, device_t child,
int type, int *rid, u_long start, u_long end,
u_long count, u_int flags);
+static int acpi_adjust_resource(device_t bus, device_t child, int type,
+ struct resource *r, u_long start, u_long end);
static int acpi_release_resource(device_t bus, device_t child, int type,
int rid, struct resource *r);
static void acpi_delete_resource(device_t bus, device_t child, int type,
@@ -149,6 +151,7 @@ static ACPI_STATUS acpi_sleep_disable(struct acpi_softc *sc);
static ACPI_STATUS acpi_EnterSleepState(struct acpi_softc *sc, int state);
static void acpi_shutdown_final(void *arg, int howto);
static void acpi_enable_fixed_events(struct acpi_softc *sc);
+static BOOLEAN acpi_has_hid(ACPI_HANDLE handle);
static int acpi_wake_sleep_prep(ACPI_HANDLE handle, int sstate);
static int acpi_wake_run_prep(ACPI_HANDLE handle, int sstate);
static int acpi_wake_prep_walk(int sstate);
@@ -193,6 +196,7 @@ static device_method_t acpi_methods[] = {
DEVMETHOD(bus_set_resource, acpi_set_resource),
DEVMETHOD(bus_get_resource, bus_generic_rl_get_resource),
DEVMETHOD(bus_alloc_resource, acpi_alloc_resource),
+ DEVMETHOD(bus_adjust_resource, acpi_adjust_resource),
DEVMETHOD(bus_release_resource, acpi_release_resource),
DEVMETHOD(bus_delete_resource, acpi_delete_resource),
DEVMETHOD(bus_child_pnpinfo_str, acpi_child_pnpinfo_str_method),
@@ -1325,29 +1329,40 @@ acpi_alloc_resource(device_t bus, device_t child, int type, int *rid,
}
static int
-acpi_release_resource(device_t bus, device_t child, int type, int rid,
- struct resource *r)
+acpi_is_resource_managed(int type, struct resource *r)
{
- struct rman *rm;
- int ret;
/* We only handle memory and IO resources through rman. */
switch (type) {
case SYS_RES_IOPORT:
- rm = &acpi_rman_io;
- break;
+ return (rman_is_region_manager(r, &acpi_rman_io));
case SYS_RES_MEMORY:
- rm = &acpi_rman_mem;
- break;
- default:
- rm = NULL;
+ return (rman_is_region_manager(r, &acpi_rman_mem));
}
+ return (0);
+}
+
+static int
+acpi_adjust_resource(device_t bus, device_t child, int type, struct resource *r,
+ u_long start, u_long end)
+{
+
+ if (acpi_is_resource_managed(type, r))
+ return (rman_adjust_resource(r, start, end));
+ return (bus_generic_adjust_resource(bus, child, type, r, start, end));
+}
+
+static int
+acpi_release_resource(device_t bus, device_t child, int type, int rid,
+ struct resource *r)
+{
+ int ret;
/*
* If this resource belongs to one of our internal managers,
* deactivate it and release it to the local pool.
*/
- if (rm != NULL && rman_is_region_manager(r, rm)) {
+ if (acpi_is_resource_managed(type, r)) {
if (rman_get_flags(r) & RF_ACTIVE) {
ret = bus_deactivate_resource(child, type, rid, r);
if (ret != 0)
@@ -1841,6 +1856,13 @@ acpi_probe_child(ACPI_HANDLE handle, UINT32 level, void *context, void **status)
break;
if (acpi_parse_prw(handle, &prw) == 0)
AcpiSetupGpeForWake(handle, prw.gpe_handle, prw.gpe_bit);
+
+ /*
+ * Ignore devices that do not have a _HID or _CID. They should
+ * be discovered by other buses (e.g. the PCI bus driver).
+ */
+ if (!acpi_has_hid(handle))
+ break;
/* FALLTHROUGH */
case ACPI_TYPE_PROCESSOR:
case ACPI_TYPE_THERMAL:
@@ -2029,6 +2051,30 @@ acpi_BatteryIsPresent(device_t dev)
}
/*
+ * Returns true if a device has at least one valid device ID.
+ */
+static BOOLEAN
+acpi_has_hid(ACPI_HANDLE h)
+{
+ ACPI_DEVICE_INFO *devinfo;
+ BOOLEAN ret;
+
+ if (h == NULL ||
+ ACPI_FAILURE(AcpiGetObjectInfo(h, &devinfo)))
+ return (FALSE);
+
+ ret = FALSE;
+ if ((devinfo->Valid & ACPI_VALID_HID) != 0)
+ ret = TRUE;
+ else if ((devinfo->Valid & ACPI_VALID_CID) != 0)
+ if (devinfo->CompatibleIdList.Count > 0)
+ ret = TRUE;
+
+ AcpiOsFree(devinfo);
+ return (ret);
+}
+
+/*
* Match a HID string against a handle
*/
BOOLEAN
diff --git a/sys/dev/acpica/acpi_pci.c b/sys/dev/acpica/acpi_pci.c
index 76cbacb..44db74a 100644
--- a/sys/dev/acpica/acpi_pci.c
+++ b/sys/dev/acpica/acpi_pci.c
@@ -209,38 +209,24 @@ acpi_pci_update_device(ACPI_HANDLE handle, device_t pci_child)
device_t child;
/*
- * Lookup and remove the unused device that acpi0 creates when it walks
- * the namespace creating devices.
+ * Occasionally a PCI device may show up as an ACPI device
+ * with a _HID. (For example, the TabletPC TC1000 has a
+ * second PCI-ISA bridge that has a _HID for an
+ * acpi_sysresource device.) In that case, leave ACPI-CA's
+ * device data pointing at the ACPI-enumerated device.
*/
child = acpi_get_device(handle);
if (child != NULL) {
- if (device_is_alive(child)) {
- /*
- * The TabletPC TC1000 has a second PCI-ISA bridge
- * that has a _HID for an acpi_sysresource device.
- * In that case, leave ACPI-CA's device data pointing
- * at the ACPI-enumerated device.
- */
- device_printf(child,
- "Conflicts with PCI device %d:%d:%d\n",
- pci_get_bus(pci_child), pci_get_slot(pci_child),
- pci_get_function(pci_child));
- return;
- }
KASSERT(device_get_parent(child) ==
devclass_get_device(devclass_find("acpi"), 0),
("%s: child (%s)'s parent is not acpi0", __func__,
acpi_name(handle)));
- device_delete_child(device_get_parent(child), child);
+ return;
}
/*
* Update ACPI-CA to use the PCI enumerated device_t for this handle.
*/
- status = AcpiDetachData(handle, acpi_fake_objhandler);
- if (ACPI_FAILURE(status))
- printf("WARNING: Unable to detach object data from %s - %s\n",
- acpi_name(handle), AcpiFormatException(status));
status = AcpiAttachData(handle, acpi_fake_objhandler, pci_child);
if (ACPI_FAILURE(status))
printf("WARNING: Unable to attach object data to %s - %s\n",
diff --git a/sys/dev/ahci/ahci.c b/sys/dev/ahci/ahci.c
index 2a06492..136011c 100644
--- a/sys/dev/ahci/ahci.c
+++ b/sys/dev/ahci/ahci.c
@@ -119,6 +119,7 @@ static struct {
#define AHCI_Q_NOBSYRES 256
#define AHCI_Q_NOAA 512
#define AHCI_Q_NOCOUNT 1024
+#define AHCI_Q_ALTSIG 2048
} ahci_ids[] = {
{0x43801002, 0x00, "ATI IXP600", 0},
{0x43901002, 0x00, "ATI IXP700", 0},
@@ -192,8 +193,9 @@ static struct {
{0x614511ab, 0x00, "Marvell 88SX6145", AHCI_Q_NOFORCE | AHCI_Q_4CH |
AHCI_Q_EDGEIS | AHCI_Q_NONCQ | AHCI_Q_NOCOUNT},
{0x91201b4b, 0x00, "Marvell 88SE912x", AHCI_Q_EDGEIS|AHCI_Q_NOBSYRES},
- {0x91231b4b, 0x11, "Marvell 88SE912x", AHCI_Q_NOBSYRES},
+ {0x91231b4b, 0x11, "Marvell 88SE912x", AHCI_Q_NOBSYRES|AHCI_Q_ALTSIG},
{0x91231b4b, 0x00, "Marvell 88SE912x", AHCI_Q_EDGEIS|AHCI_Q_SATA2|AHCI_Q_NOBSYRES},
+ {0x91721b4b, 0x00, "Marvell 88SE9172", AHCI_Q_NOBSYRES},
{0x91821b4b, 0x00, "Marvell 88SE9182", AHCI_Q_NOBSYRES},
{0x06201103, 0x00, "HighPoint RocketRAID 620", AHCI_Q_NOBSYRES},
{0x06201b4b, 0x00, "HighPoint RocketRAID 620", AHCI_Q_NOBSYRES},
@@ -398,6 +400,13 @@ ahci_attach(device_t dev)
if (ctlr->caps & AHCI_CAP_EMS)
ctlr->capsem = ATA_INL(ctlr->r_mem, AHCI_EM_CTL);
ctlr->ichannels = ATA_INL(ctlr->r_mem, AHCI_PI);
+
+ /* Identify and set separate quirks for HBA and RAID f/w Marvells. */
+ if ((ctlr->quirks & AHCI_Q_NOBSYRES) &&
+ (ctlr->quirks & AHCI_Q_ALTSIG) &&
+ (ctlr->caps & AHCI_CAP_SPM) == 0)
+ ctlr->quirks &= ~AHCI_Q_NOBSYRES;
+
if (ctlr->quirks & AHCI_Q_1CH) {
ctlr->caps &= ~AHCI_CAP_NPMASK;
ctlr->ichannels &= 0x01;
@@ -1764,7 +1773,7 @@ ahci_execute_transaction(struct ahci_slot *slot)
struct ahci_cmd_list *clp;
union ccb *ccb = slot->ccb;
int port = ccb->ccb_h.target_id & 0x0f;
- int fis_size, i;
+ int fis_size, i, softreset;
uint8_t *fis = ch->dma.rfis + 0x40;
uint8_t val;
@@ -1791,17 +1800,20 @@ ahci_execute_transaction(struct ahci_slot *slot)
if ((ccb->ccb_h.func_code == XPT_ATA_IO) &&
(ccb->ataio.cmd.flags & CAM_ATAIO_CONTROL)) {
if (ccb->ataio.cmd.control & ATA_A_RESET) {
+ softreset = 1;
/* Kick controller into sane state */
ahci_stop(dev);
ahci_clo(dev);
ahci_start(dev, 0);
clp->cmd_flags |= AHCI_CMD_RESET | AHCI_CMD_CLR_BUSY;
} else {
+ softreset = 2;
/* Prepare FIS receive area for check. */
for (i = 0; i < 20; i++)
fis[i] = 0xff;
}
- }
+ } else
+ softreset = 0;
clp->bytecount = 0;
clp->cmd_table_phys = htole64(ch->dma.work_bus + AHCI_CT_OFFSET +
(AHCI_CT_SIZE * slot->slot));
@@ -1825,8 +1837,7 @@ ahci_execute_transaction(struct ahci_slot *slot)
ATA_OUTL(ch->r_mem, AHCI_P_CI, (1 << slot->slot));
/* Device reset commands doesn't interrupt. Poll them. */
if (ccb->ccb_h.func_code == XPT_ATA_IO &&
- (ccb->ataio.cmd.command == ATA_DEVICE_RESET ||
- (ccb->ataio.cmd.flags & CAM_ATAIO_CONTROL))) {
+ (ccb->ataio.cmd.command == ATA_DEVICE_RESET || softreset)) {
int count, timeout = ccb->ccb_h.timeout * 100;
enum ahci_err_type et = AHCI_ERR_NONE;
@@ -1834,10 +1845,13 @@ ahci_execute_transaction(struct ahci_slot *slot)
DELAY(10);
if (!(ATA_INL(ch->r_mem, AHCI_P_CI) & (1 << slot->slot)))
break;
- if (ATA_INL(ch->r_mem, AHCI_P_TFD) & ATA_S_ERROR) {
+ if ((ATA_INL(ch->r_mem, AHCI_P_TFD) & ATA_S_ERROR) &&
+ softreset != 1) {
+#if 0
device_printf(ch->dev,
"Poll error on slot %d, TFD: %04x\n",
slot->slot, ATA_INL(ch->r_mem, AHCI_P_TFD));
+#endif
et = AHCI_ERR_TFE;
break;
}
@@ -1849,9 +1863,20 @@ ahci_execute_transaction(struct ahci_slot *slot)
break;
}
}
+
+ /* Marvell controllers do not wait for readyness. */
+ if ((ch->quirks & AHCI_Q_NOBSYRES) && softreset == 2 &&
+ et == AHCI_ERR_NONE) {
+ while ((val = fis[2]) & ATA_S_BUSY) {
+ DELAY(10);
+ if (count++ >= timeout)
+ break;
+ }
+ }
+
if (timeout && (count >= timeout)) {
- device_printf(ch->dev,
- "Poll timeout on slot %d\n", slot->slot);
+ device_printf(dev, "Poll timeout on slot %d port %d\n",
+ slot->slot, port);
device_printf(dev, "is %08x cs %08x ss %08x "
"rs %08x tfd %02x serr %08x\n",
ATA_INL(ch->r_mem, AHCI_P_IS),
@@ -1861,30 +1886,11 @@ ahci_execute_transaction(struct ahci_slot *slot)
ATA_INL(ch->r_mem, AHCI_P_SERR));
et = AHCI_ERR_TIMEOUT;
}
- /* Marvell controllers do not wait for readyness. */
- if ((ch->quirks & AHCI_Q_NOBSYRES) &&
- (ccb->ccb_h.func_code == XPT_ATA_IO) &&
- (ccb->ataio.cmd.flags & CAM_ATAIO_CONTROL) &&
- (ccb->ataio.cmd.control & ATA_A_RESET) == 0) {
- while ((val = fis[2]) & (ATA_S_BUSY | ATA_S_DRQ)) {
- DELAY(10);
- if (count++ >= timeout) {
- device_printf(dev, "device is not "
- "ready after soft-reset: "
- "tfd = %08x\n", val);
- et = AHCI_ERR_TIMEOUT;
- break;
- }
- }
- }
- ahci_end_transaction(slot, et);
+
/* Kick controller into sane state and enable FBS. */
- if ((ccb->ccb_h.func_code == XPT_ATA_IO) &&
- (ccb->ataio.cmd.flags & CAM_ATAIO_CONTROL) &&
- (ccb->ataio.cmd.control & ATA_A_RESET) == 0) {
- ahci_stop(ch->dev);
- ahci_start(ch->dev, 1);
- }
+ if (softreset == 2)
+ ch->eslots |= (1 << slot->slot);
+ ahci_end_transaction(slot, et);
return;
}
/* Start command execution timeout */
@@ -1962,7 +1968,8 @@ ahci_timeout(struct ahci_slot *slot)
return;
}
- device_printf(dev, "Timeout on slot %d\n", slot->slot);
+ device_printf(dev, "Timeout on slot %d port %d\n",
+ slot->slot, slot->ccb->ccb_h.target_id & 0x0f);
device_printf(dev, "is %08x cs %08x ss %08x rs %08x tfd %02x serr %08x\n",
ATA_INL(ch->r_mem, AHCI_P_IS), ATA_INL(ch->r_mem, AHCI_P_CI),
ATA_INL(ch->r_mem, AHCI_P_SACT), ch->rslots,
@@ -2013,6 +2020,7 @@ ahci_end_transaction(struct ahci_slot *slot, enum ahci_err_type et)
union ccb *ccb = slot->ccb;
struct ahci_cmd_list *clp;
int lastto;
+ uint32_t sig;
bus_dmamap_sync(ch->dma.work_tag, ch->dma.work_map,
BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
@@ -2050,6 +2058,20 @@ ahci_end_transaction(struct ahci_slot *slot, enum ahci_err_type et)
res->lba_high_exp = fis[10];
res->sector_count = fis[12];
res->sector_count_exp = fis[13];
+
+ /*
+ * Some weird controllers do not return signature in
+ * FIS receive area. Read it from PxSIG register.
+ */
+ if ((ch->quirks & AHCI_Q_ALTSIG) &&
+ (ccb->ataio.cmd.flags & CAM_ATAIO_CONTROL) &&
+ (ccb->ataio.cmd.control & ATA_A_RESET) == 0) {
+ sig = ATA_INL(ch->r_mem, AHCI_P_SIG);
+ res->lba_high = sig >> 24;
+ res->lba_mid = sig >> 16;
+ res->lba_low = sig >> 8;
+ res->sector_count = sig;
+ }
} else
bzero(res, sizeof(*res));
if ((ccb->ataio.cmd.flags & CAM_ATAIO_FPDMA) == 0 &&
@@ -2169,13 +2191,6 @@ ahci_end_transaction(struct ahci_slot *slot, enum ahci_err_type et)
ch->numhslots++;
} else
xpt_done(ccb);
- /* Unfreeze frozen command. */
- if (ch->frozen && !ahci_check_collision(dev, ch->frozen)) {
- union ccb *fccb = ch->frozen;
- ch->frozen = NULL;
- ahci_begin_transaction(dev, fccb);
- xpt_release_simq(ch->sim, TRUE);
- }
/* If we have no other active commands, ... */
if (ch->rslots == 0) {
/* if there was fatal error - reset port. */
@@ -2185,6 +2200,7 @@ ahci_end_transaction(struct ahci_slot *slot, enum ahci_err_type et)
/* if we have slots in error, we can reinit port. */
if (ch->eslots != 0) {
ahci_stop(dev);
+ ahci_clo(dev);
ahci_start(dev, 1);
}
/* if there commands on hold, we can do READ LOG. */
@@ -2195,6 +2211,13 @@ ahci_end_transaction(struct ahci_slot *slot, enum ahci_err_type et)
} else if ((ch->rslots & ~ch->toslots) == 0 &&
et != AHCI_ERR_TIMEOUT)
ahci_rearm_timeout(dev);
+ /* Unfreeze frozen command. */
+ if (ch->frozen && !ahci_check_collision(dev, ch->frozen)) {
+ union ccb *fccb = ch->frozen;
+ ch->frozen = NULL;
+ ahci_begin_transaction(dev, fccb);
+ xpt_release_simq(ch->sim, TRUE);
+ }
/* Start PM timer. */
if (ch->numrslots == 0 && ch->pm_level > 3 &&
(ch->curr[ch->pm_present ? 15 : 0].caps & CTS_SATA_CAPS_D_PMREQ)) {
diff --git a/sys/dev/amdsbwd/amdsbwd.c b/sys/dev/amdsbwd/amdsbwd.c
index f5f0f87..4256381 100644
--- a/sys/dev/amdsbwd/amdsbwd.c
+++ b/sys/dev/amdsbwd/amdsbwd.c
@@ -25,8 +25,8 @@
*/
/*
- * This is a driver for watchdog timer present in AMD SB600/SB7xx
- * south bridges and other watchdog timers advertised via WDRT ACPI table.
+ * This is a driver for watchdog timer present in AMD SB600/SB7xx/SB8xx
+ * southbridges.
* Please see the following specifications for the descriptions of the
* registers and flags:
* - AMD SB600 Register Reference Guide, Public Version, Rev. 3.03 (SB600 RRG)
@@ -35,11 +35,13 @@
* http://developer.amd.com/assets/43009_sb7xx_rrg_pub_1.00.pdf
* - AMD SB700/710/750 Register Programming Requirements (RPR)
* http://developer.amd.com/assets/42413_sb7xx_rpr_pub_1.00.pdf
+ * - AMD SB800-Series Southbridges Register Reference Guide (RRG)
+ * http://support.amd.com/us/Embedded_TechDocs/45482.pdf
* Please see the following for Watchdog Resource Table specification:
* - Watchdog Timer Hardware Requirements for Windows Server 2003 (WDRT)
* http://www.microsoft.com/whdc/system/sysinternals/watchdog.mspx
- * AMD SB600/SB7xx watchdog hardware seems to conform to the above,
- * but my system doesn't provide the table.
+ * AMD SB600/SB7xx/SB8xx watchdog hardware seems to conform to the above
+ * specifications, but the table hasn't been spotted in the wild yet.
*/
#include <sys/cdefs.h>
@@ -59,15 +61,15 @@ __FBSDID("$FreeBSD$");
#include <dev/pci/pcivar.h>
#include <isa/isavar.h>
-/* RRG 2.3.3.1.1, page 161. */
+/* SB7xx RRG 2.3.3.1.1. */
#define AMDSB_PMIO_INDEX 0xcd6
#define AMDSB_PMIO_DATA (PMIO_INDEX + 1)
#define AMDSB_PMIO_WIDTH 2
-/* RRG 2.3.3.2, page 181. */
+/* SB7xx RRG 2.3.3.2. */
#define AMDSB_PM_RESET_STATUS0 0x44
#define AMDSB_PM_RESET_STATUS1 0x45
#define AMDSB_WD_RST_STS 0x02
-/* RRG 2.3.3.2, page 188; RPR 2.36, page 30. */
+/* SB7xx RRG 2.3.3.2, RPR 2.36. */
#define AMDSB_PM_WDT_CTRL 0x69
#define AMDSB_WDT_DISABLE 0x01
#define AMDSB_WDT_RES_MASK (0x02 | 0x04)
@@ -77,7 +79,18 @@ __FBSDID("$FreeBSD$");
#define AMDSB_WDT_RES_1S 0x06
#define AMDSB_PM_WDT_BASE_LSB 0x6c
#define AMDSB_PM_WDT_BASE_MSB 0x6f
-/* RRG 2.3.4, page 223, WDRT. */
+/* SB8xx RRG 2.3.3. */
+#define AMDSB8_PM_WDT_EN 0x48
+#define AMDSB8_WDT_DEC_EN 0x01
+#define AMDSB8_WDT_DISABLE 0x02
+#define AMDSB8_PM_WDT_CTRL 0x4c
+#define AMDSB8_WDT_32KHZ 0x00
+#define AMDSB8_WDT_1HZ 0x03
+#define AMDSB8_WDT_RES_MASK 0x03
+#define AMDSB8_PM_RESET_STATUS0 0xC0
+#define AMDSB8_PM_RESET_STATUS1 0xC1
+#define AMDSB8_WD_RST_STS 0x20
+/* SB7xx RRG 2.3.4, WDRT. */
#define AMDSB_WD_CTRL 0x00
#define AMDSB_WD_RUN 0x01
#define AMDSB_WD_FIRED 0x02
@@ -90,8 +103,9 @@ __FBSDID("$FreeBSD$");
#define AMDSB_WDIO_REG_WIDTH 4
/* WDRT */
#define MAXCOUNT_MIN_VALUE 511
-/* RRG 2.3.1.1, page 122; SB600 RRG 2.3.1.1, page 97. */
-#define AMDSB7xx_SMBUS_DEVID 0x43851002
+/* SB7xx RRG 2.3.1.1, SB600 RRG 2.3.1.1, SB8xx RRG 2.3.1. */
+#define AMDSB_SMBUS_DEVID 0x43851002
+#define AMDSB8_SMBUS_REVID 0x40
#define amdsbwd_verbose_printf(dev, ...) \
do { \
@@ -265,7 +279,7 @@ amdsbwd_identify(driver_t *driver, device_t parent)
smb_dev = pci_find_bsf(0, 20, 0);
if (smb_dev == NULL)
return;
- if (pci_get_devid(smb_dev) != AMDSB7xx_SMBUS_DEVID)
+ if (pci_get_devid(smb_dev) != AMDSB_SMBUS_DEVID)
return;
child = BUS_ADD_CHILD(parent, ISA_ORDER_SPECULATIVE, "amdsbwd", -1);
@@ -273,15 +287,102 @@ amdsbwd_identify(driver_t *driver, device_t parent)
device_printf(parent, "add amdsbwd child failed\n");
}
+
+static void
+amdsbwd_probe_sb7xx(device_t dev, struct resource *pmres, uint32_t *addr)
+{
+ uint32_t val;
+ int i;
+
+ /* Report cause of previous reset for user's convenience. */
+ val = pmio_read(pmres, AMDSB_PM_RESET_STATUS0);
+ if (val != 0)
+ amdsbwd_verbose_printf(dev, "ResetStatus0 = %#04x\n", val);
+ val = pmio_read(pmres, AMDSB_PM_RESET_STATUS1);
+ if (val != 0)
+ amdsbwd_verbose_printf(dev, "ResetStatus1 = %#04x\n", val);
+ if ((val & AMDSB_WD_RST_STS) != 0)
+ device_printf(dev, "Previous Reset was caused by Watchdog\n");
+
+ /* Find base address of memory mapped WDT registers. */
+ for (*addr = 0, i = 0; i < 4; i++) {
+ *addr <<= 8;
+ *addr |= pmio_read(pmres, AMDSB_PM_WDT_BASE_MSB - i);
+ }
+ /* Set watchdog timer tick to 1s. */
+ val = pmio_read(pmres, AMDSB_PM_WDT_CTRL);
+ val &= ~AMDSB_WDT_RES_MASK;
+ val |= AMDSB_WDT_RES_10MS;
+ pmio_write(pmres, AMDSB_PM_WDT_CTRL, val);
+
+ /* Enable watchdog device (in stopped state). */
+ val = pmio_read(pmres, AMDSB_PM_WDT_CTRL);
+ val &= ~AMDSB_WDT_DISABLE;
+ pmio_write(pmres, AMDSB_PM_WDT_CTRL, val);
+
+ /*
+ * XXX TODO: Ensure that watchdog decode is enabled
+ * (register 0x41, bit 3).
+ */
+ device_set_desc(dev, "AMD SB600/SB7xx Watchdog Timer");
+}
+
+static void
+amdsbwd_probe_sb8xx(device_t dev, struct resource *pmres, uint32_t *addr)
+{
+ uint32_t val;
+ int i;
+
+ /* Report cause of previous reset for user's convenience. */
+ val = pmio_read(pmres, AMDSB8_PM_RESET_STATUS0);
+ if (val != 0)
+ amdsbwd_verbose_printf(dev, "ResetStatus0 = %#04x\n", val);
+ val = pmio_read(pmres, AMDSB8_PM_RESET_STATUS1);
+ if (val != 0)
+ amdsbwd_verbose_printf(dev, "ResetStatus1 = %#04x\n", val);
+ if ((val & AMDSB8_WD_RST_STS) != 0)
+ device_printf(dev, "Previous Reset was caused by Watchdog\n");
+
+ /* Find base address of memory mapped WDT registers. */
+ for (*addr = 0, i = 0; i < 4; i++) {
+ *addr <<= 8;
+ *addr |= pmio_read(pmres, AMDSB8_PM_WDT_EN + 3 - i);
+ }
+ *addr &= ~0x07u;
+
+ /* Set watchdog timer tick to 1s. */
+ val = pmio_read(pmres, AMDSB8_PM_WDT_CTRL);
+ val &= ~AMDSB8_WDT_RES_MASK;
+ val |= AMDSB8_WDT_1HZ;
+ pmio_write(pmres, AMDSB8_PM_WDT_CTRL, val);
+#ifdef AMDSBWD_DEBUG
+ val = pmio_read(pmres, AMDSB8_PM_WDT_CTRL);
+ amdsbwd_verbose_printf(dev, "AMDSB8_PM_WDT_CTRL value = %#02x\n", val);
+#endif
+
+ /*
+ * Enable watchdog device (in stopped state)
+ * and decoding of its address.
+ */
+ val = pmio_read(pmres, AMDSB8_PM_WDT_EN);
+ val &= ~AMDSB8_WDT_DISABLE;
+ val |= AMDSB8_WDT_DEC_EN;
+ pmio_write(pmres, AMDSB8_PM_WDT_EN, val);
+#ifdef AMDSBWD_DEBUG
+ val = pmio_read(pmres, AMDSB8_PM_WDT_EN);
+ device_printf(dev, "AMDSB8_PM_WDT_EN value = %#02x\n", val);
+#endif
+ device_set_desc(dev, "AMD SB8xx Watchdog Timer");
+}
+
static int
amdsbwd_probe(device_t dev)
{
struct resource *res;
+ device_t smb_dev;
uint32_t addr;
- uint32_t val;
int rid;
int rc;
- int i;
/* Do not claim some ISA PnP device by accident. */
if (isa_get_logicalid(dev) != 0)
@@ -301,21 +402,16 @@ amdsbwd_probe(device_t dev)
return (ENXIO);
}
- /* Report cause of previous reset for user's convenience. */
- val = pmio_read(res, AMDSB_PM_RESET_STATUS0);
- if (val != 0)
- amdsbwd_verbose_printf(dev, "ResetStatus0 = %#04x\n", val);
- val = pmio_read(res, AMDSB_PM_RESET_STATUS1);
- if (val != 0)
- amdsbwd_verbose_printf(dev, "ResetStatus1 = %#04x\n", val);
- if ((val & AMDSB_WD_RST_STS) != 0)
- device_printf(dev, "Previous Reset was caused by Watchdog\n");
+ smb_dev = pci_find_bsf(0, 20, 0);
+ KASSERT(smb_dev != NULL, ("can't find SMBus PCI device\n"));
+ if (pci_get_revid(smb_dev) < AMDSB8_SMBUS_REVID)
+ amdsbwd_probe_sb7xx(dev, res, &addr);
+ else
+ amdsbwd_probe_sb8xx(dev, res, &addr);
+
+ bus_release_resource(dev, SYS_RES_IOPORT, rid, res);
+ bus_delete_resource(dev, SYS_RES_IOPORT, rid);
- /* Find base address of memory mapped WDT registers. */
- for (addr = 0, i = 0; i < 4; i++) {
- addr <<= 8;
- addr |= pmio_read(res, AMDSB_PM_WDT_BASE_MSB - i);
- }
amdsbwd_verbose_printf(dev, "memory base address = %#010x\n", addr);
rc = bus_set_resource(dev, SYS_RES_MEMORY, 0, addr + AMDSB_WD_CTRL,
AMDSB_WDIO_REG_WIDTH);
@@ -330,36 +426,25 @@ amdsbwd_probe(device_t dev)
return (ENXIO);
}
- /* Set watchdog timer tick to 10ms. */
- val = pmio_read(res, AMDSB_PM_WDT_CTRL);
- val &= ~AMDSB_WDT_RES_MASK;
- val |= AMDSB_WDT_RES_10MS;
- pmio_write(res, AMDSB_PM_WDT_CTRL, val);
-
- /* Enable watchdog device (in stopped state). */
- val = pmio_read(res, AMDSB_PM_WDT_CTRL);
- val &= ~AMDSB_WDT_DISABLE;
- pmio_write(res, AMDSB_PM_WDT_CTRL, val);
-
- /*
- * XXX TODO: Ensure that watchdog decode is enabled
- * (register 0x41, bit 3).
- */
- bus_release_resource(dev, SYS_RES_IOPORT, rid, res);
- bus_delete_resource(dev, SYS_RES_IOPORT, rid);
-
- device_set_desc(dev, "AMD SB600/SB7xx Watchdog Timer");
return (0);
}
static int
amdsbwd_attach_sb(device_t dev, struct amdsbwd_softc *sc)
{
+ device_t smb_dev;
+
sc->max_ticks = UINT16_MAX;
- sc->ms_per_tick = 10;
sc->rid_ctrl = 0;
sc->rid_count = 1;
+ smb_dev = pci_find_bsf(0, 20, 0);
+ KASSERT(smb_dev != NULL, ("can't find SMBus PCI device\n"));
+ if (pci_get_revid(smb_dev) < AMDSB8_SMBUS_REVID)
+ sc->ms_per_tick = 10;
+ else
+ sc->ms_per_tick = 1000;
+
sc->res_ctrl = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
&sc->rid_ctrl, RF_ACTIVE);
if (sc->res_ctrl == NULL) {
@@ -388,6 +473,11 @@ amdsbwd_attach(device_t dev)
if (rc != 0)
goto fail;
+#ifdef AMDSBWD_DEBUG
+ device_printf(dev, "wd ctrl = %#04x\n", wdctrl_read(sc));
+ device_printf(dev, "wd count = %#04x\n", wdcount_read(sc));
+#endif
+
/* Setup initial state of Watchdog Control. */
wdctrl_write(sc, AMDSB_WD_FIRED);
diff --git a/sys/dev/ata/ata-sata.c b/sys/dev/ata/ata-sata.c
index e95fc8f..1ddf238 100644
--- a/sys/dev/ata/ata-sata.c
+++ b/sys/dev/ata/ata-sata.c
@@ -54,6 +54,11 @@ ata_sata_phy_check_events(device_t dev, int port)
u_int32_t error, status;
ata_sata_scr_read(ch, port, ATA_SERROR, &error);
+
+ /* Check that SError value is sane. */
+ if (error == 0xffffffff)
+ return;
+
/* Clear set error bits/interrupt. */
if (error)
ata_sata_scr_write(ch, port, ATA_SERROR, error);
@@ -163,18 +168,18 @@ ata_sata_phy_reset(device_t dev, int port, int quick)
if (bootverbose) {
if (port < 0) {
- device_printf(dev, "hardware reset ...\n");
+ device_printf(dev, "hard reset ...\n");
} else {
- device_printf(dev, "p%d: hardware reset ...\n", port);
+ device_printf(dev, "p%d: hard reset ...\n", port);
}
}
for (retry = 0; retry < 10; retry++) {
for (loop = 0; loop < 10; loop++) {
if (ata_sata_scr_write(ch, port, ATA_SCONTROL, ATA_SC_DET_RESET))
- return (0);
+ goto fail;
ata_udelay(100);
if (ata_sata_scr_read(ch, port, ATA_SCONTROL, &val))
- return (0);
+ goto fail;
if ((val & ATA_SC_DET_MASK) == ATA_SC_DET_RESET)
break;
}
@@ -183,15 +188,26 @@ ata_sata_phy_reset(device_t dev, int port, int quick)
if (ata_sata_scr_write(ch, port, ATA_SCONTROL,
ATA_SC_DET_IDLE | ((ch->pm_level > 0) ? 0 :
ATA_SC_IPM_DIS_PARTIAL | ATA_SC_IPM_DIS_SLUMBER)))
- return (0);
+ goto fail;
ata_udelay(100);
if (ata_sata_scr_read(ch, port, ATA_SCONTROL, &val))
- return (0);
+ goto fail;
if ((val & ATA_SC_DET_MASK) == 0)
return ata_sata_connect(ch, port, 0);
}
}
- return 0;
+fail:
+ /* Clear SATA error register. */
+ ata_sata_scr_write(ch, port, ATA_SERROR, 0xffffffff);
+
+ if (bootverbose) {
+ if (port < 0) {
+ device_printf(dev, "hard reset failed\n");
+ } else {
+ device_printf(dev, "p%d: hard reset failed\n", port);
+ }
+ }
+ return (0);
}
int
diff --git a/sys/dev/ata/chipsets/ata-intel.c b/sys/dev/ata/chipsets/ata-intel.c
index 3b514db..e128051 100644
--- a/sys/dev/ata/chipsets/ata-intel.c
+++ b/sys/dev/ata/chipsets/ata-intel.c
@@ -288,7 +288,9 @@ ata_intel_chipinit(device_t dev)
ATA_OUTL(ctlr->r_res2, 0x0C,
ATA_INL(ctlr->r_res2, 0x0C) | 0xf);
}
- } else {
+ /* Skip BAR(5) on ICH8M Apples, system locks up on access. */
+ } else if (ctlr->chip->chipid != ATA_I82801HBM_S1 ||
+ pci_get_subvendor(dev) != 0x106b) {
ctlr->r_type2 = SYS_RES_IOPORT;
ctlr->r_rid2 = PCIR_BAR(5);
ctlr->r_res2 = bus_alloc_resource_any(dev, ctlr->r_type2,
diff --git a/sys/dev/ath/ath_dfs/null/dfs_null.c b/sys/dev/ath/ath_dfs/null/dfs_null.c
new file mode 100644
index 0000000..2f050a4
--- /dev/null
+++ b/sys/dev/ath/ath_dfs/null/dfs_null.c
@@ -0,0 +1,160 @@
+/*-
+ * Copyright (c) 2011 Adrian Chadd, Xenion Pty Ltd
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
+ * redistribution must be conditioned upon including a substantially
+ * similar Disclaimer requirement for further binary redistribution.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY
+ * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
+ * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
+ * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGES.
+ *
+ * $FreeBSD$
+ */
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+/*
+ * This implements an empty DFS module.
+ */
+#include "opt_inet.h"
+#include "opt_wlan.h"
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/sysctl.h>
+#include <sys/kernel.h>
+#include <sys/lock.h>
+#include <sys/mutex.h>
+#include <sys/errno.h>
+
+#include <machine/bus.h>
+#include <machine/resource.h>
+#include <sys/bus.h>
+
+#include <sys/socket.h>
+
+#include <net/if.h>
+#include <net/if_media.h>
+#include <net/if_arp.h>
+#include <net/ethernet.h> /* XXX for ether_sprintf */
+
+#include <net80211/ieee80211_var.h>
+
+#include <net/bpf.h>
+
+#ifdef INET
+#include <netinet/in.h>
+#include <netinet/if_ether.h>
+#endif
+
+#include <dev/ath/if_athvar.h>
+#include <dev/ath/if_athdfs.h>
+
+#include <dev/ath/ath_hal/ah_desc.h>
+
+/*
+ * Methods which are required
+ */
+
+/*
+ * Attach DFS to the given interface
+ */
+int
+ath_dfs_attach(struct ath_softc *sc)
+{
+ return 1;
+}
+
+/*
+ * Detach DFS from the given interface
+ */
+int
+ath_dfs_detach(struct ath_softc *sc)
+{
+ return 1;
+}
+
+/*
+ * Enable radar check
+ */
+void
+ath_dfs_radar_enable(struct ath_softc *sc, struct ieee80211_channel *chan)
+{
+ /* Check if the current channel is radar-enabled */
+ if (! IEEE80211_IS_CHAN_DFS(chan))
+ return;
+}
+
+/*
+ * Process DFS related PHY errors
+ */
+void
+ath_dfs_process_phy_err(struct ath_softc *sc, const char *buf,
+ uint64_t tsf, struct ath_rx_status *rxstat)
+{
+
+}
+
+/*
+ * Process the radar events and determine whether a DFS event has occured.
+ *
+ * This is designed to run outside of the RX processing path.
+ * The RX path will call ath_dfs_tasklet_needed() to see whether
+ * the task/callback running this routine needs to be called.
+ */
+int
+ath_dfs_process_radar_event(struct ath_softc *sc,
+ struct ieee80211_channel *chan)
+{
+ return 0;
+}
+
+/*
+ * Determine whether the the DFS check task needs to be queued.
+ *
+ * This is called in the RX task when the current batch of packets
+ * have been received. It will return whether there are any radar
+ * events for ath_dfs_process_radar_event() to handle.
+ */
+int
+ath_dfs_tasklet_needed(struct ath_softc *sc, struct ieee80211_channel *chan)
+{
+ return 0;
+}
+
+/*
+ * Handle ioctl requests from the diagnostic interface
+ */
+int
+ath_ioctl_phyerr(struct ath_softc *sc, struct ath_diag *ad)
+{
+ return 1;
+}
+
+/*
+ * Get the current DFS thresholds from the HAL
+ */
+int
+ath_dfs_get_thresholds(struct ath_softc *sc, HAL_PHYERR_PARAM *param)
+{
+ ath_hal_getdfsthresh(sc->sc_ah, param);
+ return 1;
+}
diff --git a/sys/dev/ath/ath_hal/ah.c b/sys/dev/ath/ath_hal/ah.c
index 13a59f1..647f322 100644
--- a/sys/dev/ath/ath_hal/ah.c
+++ b/sys/dev/ath/ath_hal/ah.c
@@ -117,6 +117,8 @@ ath_hal_mac_name(struct ath_hal *ah)
return "9280";
case AR_XSREV_VERSION_KITE:
return "9285";
+ case AR_XSREV_VERSION_KIWI:
+ return "9287";
}
return "????";
}
@@ -608,6 +610,10 @@ ath_hal_getcapability(struct ath_hal *ah, HAL_CAPABILITY_TYPE type,
return HAL_OK;
case HAL_CAP_4ADDR_AGGR:
return pCap->hal4AddrAggrSupport ? HAL_OK : HAL_ENOTSUPP;
+ case HAL_CAP_EXT_CHAN_DFS:
+ return pCap->halExtChanDfsSupport ? HAL_OK : HAL_ENOTSUPP;
+ case HAL_CAP_COMBINED_RADAR_RSSI:
+ return pCap->halUseCombinedRadarRssi ? HAL_OK : HAL_ENOTSUPP;
case HAL_CAP_AUTO_SLEEP:
return pCap->halAutoSleepSupport ? HAL_OK : HAL_ENOTSUPP;
case HAL_CAP_MBSSID_AGGR_SUPPORT:
@@ -624,6 +630,8 @@ ath_hal_getcapability(struct ath_hal *ah, HAL_CAPABILITY_TYPE type,
case HAL_CAP_RXTSTAMP_PREC: /* rx desc tstamp precision (bits) */
*result = pCap->halTstampPrecision;
return HAL_OK;
+ case HAL_CAP_ENHANCED_DFS_SUPPORT:
+ return pCap->halEnhancedDfsSupport ? HAL_OK : HAL_ENOTSUPP;
/* FreeBSD-specific entries for now */
case HAL_CAP_RXORN_FATAL: /* HAL_INT_RXORN treated as fatal */
diff --git a/sys/dev/ath/ath_hal/ah.h b/sys/dev/ath/ath_hal/ah.h
index 85790e1..7a01be3 100644
--- a/sys/dev/ath/ath_hal/ah.h
+++ b/sys/dev/ath/ath_hal/ah.h
@@ -121,6 +121,9 @@ typedef enum {
HAL_CAP_RTS_AGGR_LIMIT = 42, /* aggregation limit with RTS */
HAL_CAP_4ADDR_AGGR = 43, /* hardware is capable of 4addr aggregation */
+ HAL_CAP_DFS_DMN = 44, /* current DFS domain */
+ HAL_CAP_EXT_CHAN_DFS = 45, /* DFS support for extension channel */
+ HAL_CAP_COMBINED_RADAR_RSSI = 46, /* Is combined RSSI for radar accurate */
HAL_CAP_AUTO_SLEEP = 48, /* hardware can go to network sleep
automatically after waking up to receive TIM */
@@ -133,6 +136,7 @@ typedef enum {
HAL_CAP_HT20_SGI = 96, /* hardware supports HT20 short GI */
HAL_CAP_RXTSTAMP_PREC = 100, /* rx desc tstamp precision (bits) */
+ HAL_CAP_ENHANCED_DFS_SUPPORT = 117, /* hardware supports enhanced DFS */
/* The following are private to the FreeBSD HAL (224 onward) */
@@ -669,6 +673,90 @@ typedef struct {
} HAL_CHANNEL_SURVEY;
/*
+ * ANI commands.
+ *
+ * These are used both internally and externally via the diagnostic
+ * API.
+ *
+ * Note that this is NOT the ANI commands being used via the INTMIT
+ * capability - that has a different mapping for some reason.
+ */
+typedef enum {
+ HAL_ANI_PRESENT = 0, /* is ANI support present */
+ HAL_ANI_NOISE_IMMUNITY_LEVEL = 1, /* set level */
+ HAL_ANI_OFDM_WEAK_SIGNAL_DETECTION = 2, /* enable/disable */
+ HAL_ANI_CCK_WEAK_SIGNAL_THR = 3, /* enable/disable */
+ HAL_ANI_FIRSTEP_LEVEL = 4, /* set level */
+ HAL_ANI_SPUR_IMMUNITY_LEVEL = 5, /* set level */
+ HAL_ANI_MODE = 6, /* 0 => manual, 1 => auto (XXX do not change) */
+ HAL_ANI_PHYERR_RESET = 7, /* reset phy error stats */
+} HAL_ANI_CMD;
+
+/*
+ * This is the layout of the ANI INTMIT capability.
+ *
+ * Notice that the command values differ to HAL_ANI_CMD.
+ */
+typedef enum {
+ HAL_CAP_INTMIT_PRESENT = 0,
+ HAL_CAP_INTMIT_ENABLE = 1,
+ HAL_CAP_INTMIT_NOISE_IMMUNITY_LEVEL = 2,
+ HAL_CAP_INTMIT_OFDM_WEAK_SIGNAL_LEVEL = 3,
+ HAL_CAP_INTMIT_CCK_WEAK_SIGNAL_THR = 4,
+ HAL_CAP_INTMIT_FIRSTEP_LEVEL = 5,
+ HAL_CAP_INTMIT_SPUR_IMMUNITY_LEVEL = 6
+} HAL_CAP_INTMIT_CMD;
+
+typedef struct {
+ int32_t pe_firpwr; /* FIR pwr out threshold */
+ int32_t pe_rrssi; /* Radar rssi thresh */
+ int32_t pe_height; /* Pulse height thresh */
+ int32_t pe_prssi; /* Pulse rssi thresh */
+ int32_t pe_inband; /* Inband thresh */
+
+ /* The following params are only for AR5413 and later */
+ u_int32_t pe_relpwr; /* Relative power threshold in 0.5dB steps */
+ u_int32_t pe_relstep; /* Pulse Relative step threshold in 0.5dB steps */
+ u_int32_t pe_maxlen; /* Max length of radar sign in 0.8us units */
+ HAL_BOOL pe_usefir128; /* Use the average in-band power measured over 128 cycles */
+ HAL_BOOL pe_blockradar; /*
+ * Enable to block radar check if pkt detect is done via OFDM
+ * weak signal detect or pkt is detected immediately after tx
+ * to rx transition
+ */
+ HAL_BOOL pe_enmaxrssi; /*
+ * Enable to use the max rssi instead of the last rssi during
+ * fine gain changes for radar detection
+ */
+ HAL_BOOL pe_extchannel; /* Enable DFS on ext channel */
+} HAL_PHYERR_PARAM;
+
+#define HAL_PHYERR_PARAM_NOVAL 65535
+#define HAL_PHYERR_PARAM_ENABLE 0x8000 /* Enable/Disable if applicable */
+
+
+/*
+ * Flag for setting QUIET period
+ */
+typedef enum {
+ HAL_QUIET_DISABLE = 0x0,
+ HAL_QUIET_ENABLE = 0x1,
+ HAL_QUIET_ADD_CURRENT_TSF = 0x2, /* add current TSF to next_start offset */
+ HAL_QUIET_ADD_SWBA_RESP_TIME = 0x4, /* add beacon response time to next_start offset */
+} HAL_QUIET_FLAG;
+
+#define HAL_DFS_EVENT_PRICH 0x0000001
+
+struct dfs_event {
+ uint64_t re_full_ts; /* 64-bit full timestamp from interrupt time */
+ uint32_t re_ts; /* Original 15 bit recv timestamp */
+ uint8_t re_rssi; /* rssi of radar event */
+ uint8_t re_dur; /* duration of radar pulse */
+ uint32_t re_flags; /* Flags (see above) */
+};
+typedef struct dfs_event HAL_DFS_EVENT;
+
+/*
* Hardware Access Layer (HAL) API.
*
* Clients of the HAL call ath_hal_attach to obtain a reference to an
@@ -842,6 +930,18 @@ struct ath_hal {
u_int __ahdecl(*ah_getCTSTimeout)(struct ath_hal*);
HAL_BOOL __ahdecl(*ah_setDecompMask)(struct ath_hal*, uint16_t, int);
void __ahdecl(*ah_setCoverageClass)(struct ath_hal*, uint8_t, int);
+ HAL_STATUS __ahdecl(*ah_setQuiet)(struct ath_hal *ah, uint32_t period,
+ uint32_t duration, uint32_t nextStart,
+ HAL_QUIET_FLAG flag);
+
+ /* DFS functions */
+ void __ahdecl(*ah_enableDfs)(struct ath_hal *ah,
+ HAL_PHYERR_PARAM *pe);
+ void __ahdecl(*ah_getDfsThresh)(struct ath_hal *ah,
+ HAL_PHYERR_PARAM *pe);
+ HAL_BOOL __ahdecl(*ah_procRadarEvent)(struct ath_hal *ah,
+ struct ath_rx_status *rxs, uint64_t fulltsf,
+ const char *buf, HAL_DFS_EVENT *event);
/* Key Cache Functions */
uint32_t __ahdecl(*ah_getKeyCacheSize)(struct ath_hal*);
diff --git a/sys/dev/ath/ath_hal/ah_desc.h b/sys/dev/ath/ath_hal/ah_desc.h
index ff6f40c..bd3e6a8 100644
--- a/sys/dev/ath/ath_hal/ah_desc.h
+++ b/sys/dev/ath/ath_hal/ah_desc.h
@@ -144,7 +144,7 @@ enum {
HAL_PHYERR_RADAR = 5, /* Radar detect */
HAL_PHYERR_SERVICE = 6, /* Illegal service */
HAL_PHYERR_TOR = 7, /* Transmit override receive */
- /* NB: these are specific to the 5212 */
+ /* NB: these are specific to the 5212 and later */
HAL_PHYERR_OFDM_TIMING = 17, /* */
HAL_PHYERR_OFDM_SIGNAL_PARITY = 18, /* */
HAL_PHYERR_OFDM_RATE_ILLEGAL = 19, /* */
@@ -152,6 +152,7 @@ enum {
HAL_PHYERR_OFDM_POWER_DROP = 21, /* */
HAL_PHYERR_OFDM_SERVICE = 22, /* */
HAL_PHYERR_OFDM_RESTART = 23, /* */
+ HAL_PHYERR_FALSE_RADAR_EXT = 24, /* */
HAL_PHYERR_CCK_TIMING = 25, /* */
HAL_PHYERR_CCK_HEADER_CRC = 26, /* */
HAL_PHYERR_CCK_RATE_ILLEGAL = 27, /* */
diff --git a/sys/dev/ath/ath_hal/ah_devid.h b/sys/dev/ath/ath_hal/ah_devid.h
index 64033f3..c7a98dd 100644
--- a/sys/dev/ath/ath_hal/ah_devid.h
+++ b/sys/dev/ath/ath_hal/ah_devid.h
@@ -80,6 +80,8 @@
#define AR9280_DEVID_PCIE 0x002a /* AR9280 PCI-E Merlin */
#define AR9285_DEVID_PCIE 0x002b /* AR9285 PCI-E Kite */
#define AR2427_DEVID_PCIE 0x002c /* AR2427 PCI-E w/ 802.11n bonded out */
+#define AR9287_DEVID_PCI 0x002d /* AR9227 PCI Kiwi */
+#define AR9287_DEVID_PCIE 0x002e /* AR9287 PCI-E Kiwi */
#define AR_SUBVENDOR_ID_NOG 0x0e11 /* No 11G subvendor ID */
#define AR_SUBVENDOR_ID_NEW_A 0x7065 /* Update device to new RD */
diff --git a/sys/dev/ath/ath_hal/ah_eeprom.h b/sys/dev/ath/ath_hal/ah_eeprom.h
index c7fe385..2ca0589 100644
--- a/sys/dev/ath/ath_hal/ah_eeprom.h
+++ b/sys/dev/ath/ath_hal/ah_eeprom.h
@@ -101,7 +101,9 @@ enum {
AR_EEP_ANTGAINMAX_2, /* int8_t* */
AR_EEP_WRITEPROTECT, /* use ath_hal_eepromGetFlag */
AR_EEP_PWR_TABLE_OFFSET,/* int8_t* */
- AR_EEP_PWDCLKIND /* uint8_t* */
+ AR_EEP_PWDCLKIND, /* uint8_t* */
+ AR_EEP_TEMPSENSE_SLOPE, /* int8_t* */
+ AR_EEP_TEMPSENSE_SLOPE_PAL_ON, /* int8_t* */
};
typedef struct {
diff --git a/sys/dev/ath/ath_hal/ah_eeprom_9287.c b/sys/dev/ath/ath_hal/ah_eeprom_9287.c
index e8c5e54..4055093 100644
--- a/sys/dev/ath/ath_hal/ah_eeprom_9287.c
+++ b/sys/dev/ath/ath_hal/ah_eeprom_9287.c
@@ -63,28 +63,10 @@ v9287EepromGet(struct ath_hal *ah, int param, void *val)
return pBase->opCapFlags;
case AR_EEP_RFSILENT:
return pBase->rfSilent;
-#if 0
- case AR_EEP_OB_5:
- return pModal[CHAN_A_IDX].ob;
- case AR_EEP_DB_5:
- return pModal[CHAN_A_IDX].db;
- case AR_EEP_OB_2:
- return pModal[CHAN_B_IDX].ob;
- case AR_EEP_DB_2:
- return pModal[CHAN_B_IDX].db;
-#endif
case AR_EEP_TXMASK:
return pBase->txMask;
case AR_EEP_RXMASK:
return pBase->rxMask;
-#if 0
- case AR_EEP_RXGAIN_TYPE:
- return IS_VERS(>=, AR5416_EEP_MINOR_VER_17) ?
- pBase->rxGainType : AR5416_EEP_RXGAIN_ORIG;
- case AR_EEP_TXGAIN_TYPE:
- return IS_VERS(>=, AR5416_EEP_MINOR_VER_19) ?
- pBase->txGainType : AR5416_EEP_TXGAIN_ORIG;
-#endif
case AR_EEP_OL_PWRCTRL:
HALASSERT(val == AH_NULL);
return pBase->openLoopPwrCntl ? HAL_OK : HAL_EIO;
@@ -117,6 +99,18 @@ v9287EepromGet(struct ath_hal *ah, int param, void *val)
case AR_EEP_PWR_TABLE_OFFSET:
*(int8_t *) val = pBase->pwrTableOffset;
return HAL_OK;
+ case AR_EEP_TEMPSENSE_SLOPE:
+ if (IS_VERS(>=, AR9287_EEP_MINOR_VER_2))
+ *(int8_t *)val = pBase->tempSensSlope;
+ else
+ *(int8_t *)val = 0;
+ return HAL_OK;
+ case AR_EEP_TEMPSENSE_SLOPE_PAL_ON:
+ if (IS_VERS(>=, AR9287_EEP_MINOR_VER_3))
+ *(int8_t *)val = pBase->tempSensSlopePalOn;
+ else
+ *(int8_t *)val = 0;
+ return HAL_OK;
default:
HALASSERT(0);
return HAL_EINVAL;
@@ -132,14 +126,12 @@ v9287EepromSet(struct ath_hal *ah, int param, int v)
HAL_EEPROM_9287 *ee = AH_PRIVATE(ah)->ah_eeprom;
switch (param) {
- case AR_EEP_ANTGAINMAX_2:
- ee->ee_antennaGainMax[1] = (int8_t) v;
- return HAL_OK;
- case AR_EEP_ANTGAINMAX_5:
- ee->ee_antennaGainMax[0] = (int8_t) v;
- return HAL_OK;
+ case AR_EEP_ANTGAINMAX_2:
+ ee->ee_antennaGainMax[1] = (int8_t) v;
+ return HAL_OK;
+ default:
+ return HAL_EINVAL;
}
- return HAL_EINVAL;
}
static HAL_BOOL
diff --git a/sys/dev/ath/ath_hal/ah_internal.h b/sys/dev/ath/ath_hal/ah_internal.h
index b4cc817..d66c9d8 100644
--- a/sys/dev/ath/ath_hal/ah_internal.h
+++ b/sys/dev/ath/ath_hal/ah_internal.h
@@ -200,8 +200,10 @@ typedef struct {
halRifsTxSupport : 1,
hal4AddrAggrSupport : 1,
halExtChanDfsSupport : 1,
+ halUseCombinedRadarRssi : 1,
halForcePpmSupport : 1,
halEnhancedPmSupport : 1,
+ halEnhancedDfsSupport : 1,
halMbssidAggrSupport : 1,
halBssidMatchSupport : 1,
hal4kbSplitTransSupport : 1,
@@ -418,18 +420,6 @@ extern HAL_BOOL ath_hal_setTxQProps(struct ath_hal *ah,
extern HAL_BOOL ath_hal_getTxQProps(struct ath_hal *ah,
HAL_TXQ_INFO *qInfo, const HAL_TX_QUEUE_INFO *qi);
-typedef enum {
- HAL_ANI_PRESENT = 0x1, /* is ANI support present */
- HAL_ANI_NOISE_IMMUNITY_LEVEL = 0x2, /* set level */
- HAL_ANI_OFDM_WEAK_SIGNAL_DETECTION = 0x4, /* enable/disable */
- HAL_ANI_CCK_WEAK_SIGNAL_THR = 0x8, /* enable/disable */
- HAL_ANI_FIRSTEP_LEVEL = 0x10, /* set level */
- HAL_ANI_SPUR_IMMUNITY_LEVEL = 0x20, /* set level */
- HAL_ANI_MODE = 0x40, /* 0 => manual, 1 => auto (XXX do not change) */
- HAL_ANI_PHYERR_RESET =0x80, /* reset phy error stats */
- HAL_ANI_ALL = 0xff
-} HAL_ANI_CMD;
-
#define HAL_SPUR_VAL_MASK 0x3FFF
#define HAL_SPUR_CHAN_WIDTH 87
#define HAL_BIN_WIDTH_BASE_100HZ 3125
diff --git a/sys/dev/ath/ath_hal/ar5212/ar5212.h b/sys/dev/ath/ath_hal/ar5212/ar5212.h
index e226816..8503a62 100644
--- a/sys/dev/ath/ath_hal/ar5212/ar5212.h
+++ b/sys/dev/ath/ath_hal/ar5212/ar5212.h
@@ -320,6 +320,9 @@ struct ath_hal_5212 {
struct ar5212AniState *ah_curani; /* cached last reference */
struct ar5212AniState ah_ani[AH_MAXCHAN]; /* per-channel state */
+ /* AR5416 uses some of the AR5212 ANI code; these are the ANI methods */
+ HAL_BOOL (*ah_aniControl) (struct ath_hal *, HAL_ANI_CMD cmd, int param);
+
/*
* Transmit power state. Note these are maintained
* here so they can be retrieved by diagnostic tools.
@@ -503,6 +506,8 @@ extern HAL_BOOL ar5212SetCapability(struct ath_hal *, HAL_CAPABILITY_TYPE,
extern HAL_BOOL ar5212GetDiagState(struct ath_hal *ah, int request,
const void *args, uint32_t argsize,
void **result, uint32_t *resultsize);
+extern HAL_STATUS ar5212SetQuiet(struct ath_hal *ah, uint32_t period,
+ uint32_t duration, uint32_t nextStart, HAL_QUIET_FLAG flag);
extern HAL_BOOL ar5212SetPowerMode(struct ath_hal *ah, HAL_POWER_MODE mode,
int setChip);
@@ -615,5 +620,10 @@ extern void ar5212AniReset(struct ath_hal *, const struct ieee80211_channel *,
extern HAL_BOOL ar5212IsNFCalInProgress(struct ath_hal *ah);
extern HAL_BOOL ar5212WaitNFCalComplete(struct ath_hal *ah, int i);
+extern void ar5212EnableDfs(struct ath_hal *ah, HAL_PHYERR_PARAM *pe);
+extern void ar5212GetDfsThresh(struct ath_hal *ah, HAL_PHYERR_PARAM *pe);
+extern HAL_BOOL ar5212ProcessRadarEvent(struct ath_hal *ah,
+ struct ath_rx_status *rxs, uint64_t fulltsf, const char *buf,
+ HAL_DFS_EVENT *event);
#endif /* _ATH_AR5212_H_ */
diff --git a/sys/dev/ath/ath_hal/ar5212/ar5212_attach.c b/sys/dev/ath/ath_hal/ar5212/ar5212_attach.c
index 4b0fcbe..8e7f3cb 100644
--- a/sys/dev/ath/ath_hal/ar5212/ar5212_attach.c
+++ b/sys/dev/ath/ath_hal/ar5212/ar5212_attach.c
@@ -127,6 +127,12 @@ static const struct ath_hal_private ar5212hal = {{
.ah_getCTSTimeout = ar5212GetCTSTimeout,
.ah_setDecompMask = ar5212SetDecompMask,
.ah_setCoverageClass = ar5212SetCoverageClass,
+ .ah_setQuiet = ar5212SetQuiet,
+
+ /* DFS Functions */
+ .ah_enableDfs = ar5212EnableDfs,
+ .ah_getDfsThresh = ar5212GetDfsThresh,
+ .ah_procRadarEvent = ar5212ProcessRadarEvent,
/* Key Cache Functions */
.ah_getKeyCacheSize = ar5212GetKeyCacheSize,
@@ -203,6 +209,9 @@ ar5212AniSetup(struct ath_hal *ah)
ar5212AniAttach(ah, &tmp, &tmp, AH_TRUE);
} else
ar5212AniAttach(ah, &aniparams, &aniparams, AH_TRUE);
+
+ /* Set overridable ANI methods */
+ AH5212(ah)->ah_aniControl = ar5212AniControl;
}
/*
diff --git a/sys/dev/ath/ath_hal/ar5212/ar5212_misc.c b/sys/dev/ath/ath_hal/ar5212/ar5212_misc.c
index 0d6adc1..3a6019d 100644
--- a/sys/dev/ath/ath_hal/ar5212/ar5212_misc.c
+++ b/sys/dev/ath/ath_hal/ar5212/ar5212_misc.c
@@ -21,9 +21,7 @@
#include "ah.h"
#include "ah_internal.h"
#include "ah_devid.h"
-#ifdef AH_DEBUG
#include "ah_desc.h" /* NB: for HAL_PHYERR* */
-#endif
#include "ar5212/ar5212.h"
#include "ar5212/ar5212reg.h"
@@ -634,6 +632,20 @@ ar5212SetCoverageClass(struct ath_hal *ah, uint8_t coverageclass, int now)
}
}
+HAL_STATUS
+ar5212SetQuiet(struct ath_hal *ah, uint32_t period, uint32_t duration,
+ uint32_t nextStart, HAL_QUIET_FLAG flag)
+{
+ OS_REG_WRITE(ah, AR_QUIET2, period | (duration << AR_QUIET2_QUIET_DUR_S));
+ if (flag & HAL_QUIET_ENABLE) {
+ OS_REG_WRITE(ah, AR_QUIET1, nextStart | (1 << 16));
+ }
+ else {
+ OS_REG_WRITE(ah, AR_QUIET1, nextStart);
+ }
+ return HAL_OK;
+}
+
void
ar5212SetPCUConfig(struct ath_hal *ah)
{
@@ -880,16 +892,16 @@ ar5212GetCapability(struct ath_hal *ah, HAL_CAPABILITY_TYPE type,
return HAL_OK;
case HAL_CAP_INTMIT: /* interference mitigation */
switch (capability) {
- case 0: /* hardware capability */
+ case HAL_CAP_INTMIT_PRESENT: /* hardware capability */
return HAL_OK;
- case 1:
+ case HAL_CAP_INTMIT_ENABLE:
return (ahp->ah_procPhyErr & HAL_ANI_ENA) ?
HAL_OK : HAL_ENXIO;
- case 2: /* HAL_ANI_NOISE_IMMUNITY_LEVEL */
- case 3: /* HAL_ANI_OFDM_WEAK_SIGNAL_DETECTION */
- case 4: /* HAL_ANI_CCK_WEAK_SIGNAL_THR */
- case 5: /* HAL_ANI_FIRSTEP_LEVEL */
- case 6: /* HAL_ANI_SPUR_IMMUNITY_LEVEL */
+ case HAL_CAP_INTMIT_NOISE_IMMUNITY_LEVEL:
+ case HAL_CAP_INTMIT_OFDM_WEAK_SIGNAL_LEVEL:
+ case HAL_CAP_INTMIT_CCK_WEAK_SIGNAL_THR:
+ case HAL_CAP_INTMIT_FIRSTEP_LEVEL:
+ case HAL_CAP_INTMIT_SPUR_IMMUNITY_LEVEL:
ani = ar5212AniGetCurrentState(ah);
if (ani == AH_NULL)
return HAL_ENXIO;
@@ -980,6 +992,8 @@ ar5212SetCapability(struct ath_hal *ah, HAL_CAPABILITY_TYPE type,
OS_REG_WRITE(ah, AR_TPC, ahp->ah_macTPC);
return AH_TRUE;
case HAL_CAP_INTMIT: { /* interference mitigation */
+ /* This maps the public ANI commands to the internal ANI commands */
+ /* Private: HAL_ANI_CMD; Public: HAL_CAP_INTMIT_CMD */
static const HAL_ANI_CMD cmds[] = {
HAL_ANI_PRESENT,
HAL_ANI_MODE,
@@ -990,7 +1004,7 @@ ar5212SetCapability(struct ath_hal *ah, HAL_CAPABILITY_TYPE type,
HAL_ANI_SPUR_IMMUNITY_LEVEL,
};
return capability < N(cmds) ?
- ar5212AniControl(ah, cmds[capability], setting) :
+ AH5212(ah)->ah_aniControl(ah, cmds[capability], setting) :
AH_FALSE;
}
case HAL_CAP_TSF_ADJUST: /* hardware has beacon tsf adjust */
@@ -1053,7 +1067,7 @@ ar5212GetDiagState(struct ath_hal *ah, int request,
case HAL_DIAG_ANI_CMD:
if (argsize != 2*sizeof(uint32_t))
return AH_FALSE;
- ar5212AniControl(ah, ((const uint32_t *)args)[0],
+ AH5212(ah)->ah_aniControl(ah, ((const uint32_t *)args)[0],
((const uint32_t *)args)[1]);
return AH_TRUE;
case HAL_DIAG_ANI_PARAMS:
@@ -1113,3 +1127,98 @@ ar5212WaitNFCalComplete(struct ath_hal *ah, int i)
}
return AH_FALSE;
}
+
+void
+ar5212EnableDfs(struct ath_hal *ah, HAL_PHYERR_PARAM *pe)
+{
+ uint32_t val;
+ val = OS_REG_READ(ah, AR_PHY_RADAR_0);
+
+ if (pe->pe_firpwr != HAL_PHYERR_PARAM_NOVAL) {
+ val &= ~AR_PHY_RADAR_0_FIRPWR;
+ val |= SM(pe->pe_firpwr, AR_PHY_RADAR_0_FIRPWR);
+ }
+ if (pe->pe_rrssi != HAL_PHYERR_PARAM_NOVAL) {
+ val &= ~AR_PHY_RADAR_0_RRSSI;
+ val |= SM(pe->pe_rrssi, AR_PHY_RADAR_0_RRSSI);
+ }
+ if (pe->pe_height != HAL_PHYERR_PARAM_NOVAL) {
+ val &= ~AR_PHY_RADAR_0_HEIGHT;
+ val |= SM(pe->pe_height, AR_PHY_RADAR_0_HEIGHT);
+ }
+ if (pe->pe_prssi != HAL_PHYERR_PARAM_NOVAL) {
+ val &= ~AR_PHY_RADAR_0_PRSSI;
+ val |= SM(pe->pe_prssi, AR_PHY_RADAR_0_PRSSI);
+ }
+ if (pe->pe_inband != HAL_PHYERR_PARAM_NOVAL) {
+ val &= ~AR_PHY_RADAR_0_INBAND;
+ val |= SM(pe->pe_inband, AR_PHY_RADAR_0_INBAND);
+ }
+ OS_REG_WRITE(ah, AR_PHY_RADAR_0, val | AR_PHY_RADAR_0_ENA);
+}
+
+void
+ar5212GetDfsThresh(struct ath_hal *ah, HAL_PHYERR_PARAM *pe)
+{
+ uint32_t val,temp;
+
+ val = OS_REG_READ(ah, AR_PHY_RADAR_0);
+
+ temp = MS(val,AR_PHY_RADAR_0_FIRPWR);
+ temp |= 0xFFFFFF80;
+ pe->pe_firpwr = temp;
+ pe->pe_rrssi = MS(val, AR_PHY_RADAR_0_RRSSI);
+ pe->pe_height = MS(val, AR_PHY_RADAR_0_HEIGHT);
+ pe->pe_prssi = MS(val, AR_PHY_RADAR_0_PRSSI);
+ pe->pe_inband = MS(val, AR_PHY_RADAR_0_INBAND);
+
+ pe->pe_relpwr = 0;
+ pe->pe_relstep = 0;
+ pe->pe_maxlen = 0;
+ pe->pe_extchannel = AH_FALSE;
+}
+
+/*
+ * Process the radar phy error and extract the pulse duration.
+ */
+HAL_BOOL
+ar5212ProcessRadarEvent(struct ath_hal *ah, struct ath_rx_status *rxs,
+ uint64_t fulltsf, const char *buf, HAL_DFS_EVENT *event)
+{
+ uint8_t dur;
+ uint8_t rssi;
+
+ /* Check whether the given phy error is a radar event */
+ if ((rxs->rs_phyerr != HAL_PHYERR_RADAR) &&
+ (rxs->rs_phyerr != HAL_PHYERR_FALSE_RADAR_EXT))
+ return AH_FALSE;
+
+ /*
+ * The first byte is the pulse width - if there's
+ * no data, simply set the duration to 0
+ */
+ if (rxs->rs_datalen >= 1)
+ /* The pulse width is byte 0 of the data */
+ dur = ((uint8_t) buf[0]) & 0xff;
+ else
+ dur = 0;
+
+ /* Pulse RSSI is the normal reported RSSI */
+ rssi = (uint8_t) rxs->rs_rssi;
+
+ /* 0 duration/rssi is not a valid radar event */
+ if (dur == 0 && rssi == 0)
+ return AH_FALSE;
+
+ HALDEBUG(ah, HAL_DEBUG_DFS, "%s: rssi=%d, dur=%d\n",
+ __func__, rssi, dur);
+
+ /* Record the event */
+ event->re_full_ts = fulltsf;
+ event->re_ts = rxs->rs_tstamp;
+ event->re_rssi = rssi;
+ event->re_dur = dur;
+ event->re_flags = HAL_DFS_EVENT_PRICH;
+
+ return AH_TRUE;
+}
diff --git a/sys/dev/ath/ath_hal/ar5212/ar5212reg.h b/sys/dev/ath/ath_hal/ar5212/ar5212reg.h
index f99b203..15c1a58 100644
--- a/sys/dev/ath/ath_hal/ar5212/ar5212reg.h
+++ b/sys/dev/ath/ath_hal/ar5212/ar5212reg.h
@@ -300,6 +300,7 @@
#define AR_QUIET1_NEXT_QUIET 0xffff
#define AR_QUIET1_QUIET_ENABLE 0x10000 /* Enable Quiet time operation */
#define AR_QUIET1_QUIET_ACK_CTS_ENABLE 0x20000 /* Do we ack/cts during quiet period */
+#define AR_QUIET1_QUIET_ACK_CTS_ENABLE_S 17
#define AR_QUIET2 0x8100 /* More Quiet time programming */
#define AR_QUIET2_QUIET_PER_S 0 /* Periodicity of quiet period (TU) */
diff --git a/sys/dev/ath/ath_hal/ar5416/ar5416.h b/sys/dev/ath/ath_hal/ar5416/ar5416.h
index 5327296..e5294b0 100644
--- a/sys/dev/ath/ath_hal/ar5416/ar5416.h
+++ b/sys/dev/ath/ath_hal/ar5416/ar5416.h
@@ -194,6 +194,8 @@ extern uint32_t ar5416Get11nExtBusy(struct ath_hal *ah);
extern void ar5416Set11nMac2040(struct ath_hal *ah, HAL_HT_MACMODE mode);
extern HAL_HT_RXCLEAR ar5416Get11nRxClear(struct ath_hal *ah);
extern void ar5416Set11nRxClear(struct ath_hal *ah, HAL_HT_RXCLEAR rxclear);
+extern HAL_STATUS ar5416SetQuiet(struct ath_hal *ah, uint32_t period,
+ uint32_t duration, uint32_t nextStart, HAL_QUIET_FLAG flag);
extern HAL_STATUS ar5416GetCapability(struct ath_hal *ah,
HAL_CAPABILITY_TYPE type, uint32_t capability, uint32_t *result);
extern HAL_BOOL ar5416GetDiagState(struct ath_hal *ah, int request,
@@ -201,6 +203,11 @@ extern HAL_BOOL ar5416GetDiagState(struct ath_hal *ah, int request,
void **result, uint32_t *resultsize);
extern HAL_BOOL ar5416SetRifsDelay(struct ath_hal *ah,
const struct ieee80211_channel *chan, HAL_BOOL enable);
+extern void ar5416EnableDfs(struct ath_hal *ah, HAL_PHYERR_PARAM *pe);
+extern void ar5416GetDfsThresh(struct ath_hal *ah, HAL_PHYERR_PARAM *pe);
+extern HAL_BOOL ar5416ProcessRadarEvent(struct ath_hal *ah,
+ struct ath_rx_status *rxs, uint64_t fulltsf, const char *buf,
+ HAL_DFS_EVENT *event);
extern HAL_BOOL ar5416SetPowerMode(struct ath_hal *ah, HAL_POWER_MODE mode,
int setChip);
diff --git a/sys/dev/ath/ath_hal/ar5416/ar5416_ani.c b/sys/dev/ath/ath_hal/ar5416/ar5416_ani.c
index 3a8f785..e2c8592 100644
--- a/sys/dev/ath/ath_hal/ar5416/ar5416_ani.c
+++ b/sys/dev/ath/ath_hal/ar5416/ar5416_ani.c
@@ -175,9 +175,17 @@ ar5416AniControl(struct ath_hal *ah, HAL_ANI_CMD cmd, int param)
struct ar5212AniState *aniState = ahp->ah_curani;
const struct ar5212AniParams *params = aniState->params;
+ /* Check whether the particular function is enabled */
+ if (((1 << cmd) & AH5416(ah)->ah_ani_function) == 0) {
+ HALDEBUG(ah, HAL_DEBUG_ANI, "%s: command %d disabled\n",
+ __func__, cmd);
+ HALDEBUG(ah, HAL_DEBUG_ANI, "%s: cmd %d; mask %x\n", __func__, cmd, AH5416(ah)->ah_ani_function);
+ return AH_FALSE;
+ }
+
OS_MARK(ah, AH_MARK_ANI_CONTROL, cmd);
- switch (cmd & AH5416(ah)->ah_ani_function) {
+ switch (cmd) {
case HAL_ANI_NOISE_IMMUNITY_LEVEL: {
u_int level = param;
@@ -356,14 +364,14 @@ ar5416AniOfdmErrTrigger(struct ath_hal *ah)
aniState = ahp->ah_curani;
params = aniState->params;
/* First, raise noise immunity level, up to max */
- if ((AH5416(ah)->ah_ani_function & HAL_ANI_NOISE_IMMUNITY_LEVEL) &&
+ if ((AH5416(ah)->ah_ani_function & (1 << HAL_ANI_NOISE_IMMUNITY_LEVEL)) &&
(aniState->noiseImmunityLevel+1 < params->maxNoiseImmunityLevel)) {
ar5416AniControl(ah, HAL_ANI_NOISE_IMMUNITY_LEVEL,
aniState->noiseImmunityLevel + 1);
return;
}
/* then, raise spur immunity level, up to max */
- if ((AH5416(ah)->ah_ani_function & HAL_ANI_SPUR_IMMUNITY_LEVEL) &&
+ if ((AH5416(ah)->ah_ani_function & (1 << HAL_ANI_SPUR_IMMUNITY_LEVEL)) &&
(aniState->spurImmunityLevel+1 < params->maxSpurImmunityLevel)) {
ar5416AniControl(ah, HAL_ANI_SPUR_IMMUNITY_LEVEL,
aniState->spurImmunityLevel + 1);
@@ -443,7 +451,8 @@ ar5416AniCckErrTrigger(struct ath_hal *ah)
/* first, raise noise immunity level, up to max */
aniState = ahp->ah_curani;
params = aniState->params;
- if (aniState->noiseImmunityLevel+1 < params->maxNoiseImmunityLevel) {
+ if ((AH5416(ah)->ah_ani_function & (1 << HAL_ANI_NOISE_IMMUNITY_LEVEL) &&
+ aniState->noiseImmunityLevel+1 < params->maxNoiseImmunityLevel)) {
ar5416AniControl(ah, HAL_ANI_NOISE_IMMUNITY_LEVEL,
aniState->noiseImmunityLevel + 1);
return;
diff --git a/sys/dev/ath/ath_hal/ar5416/ar5416_attach.c b/sys/dev/ath/ath_hal/ar5416/ar5416_attach.c
index 6779bf9..e636325 100644
--- a/sys/dev/ath/ath_hal/ar5416/ar5416_attach.c
+++ b/sys/dev/ath/ath_hal/ar5416/ar5416_attach.c
@@ -58,7 +58,7 @@ ar5416AniSetup(struct ath_hal *ah)
.period = 100,
};
/* NB: disable ANI noise immmunity for reliable RIFS rx */
- AH5416(ah)->ah_ani_function &= ~ HAL_ANI_NOISE_IMMUNITY_LEVEL;
+ AH5416(ah)->ah_ani_function &= ~(1 << HAL_ANI_NOISE_IMMUNITY_LEVEL);
ar5416AniAttach(ah, &aniparams, &aniparams, AH_TRUE);
}
@@ -139,10 +139,16 @@ ar5416InitState(struct ath_hal_5416 *ahp5416, uint16_t devid, HAL_SOFTC sc,
ah->ah_setAntennaSwitch = ar5416SetAntennaSwitch;
ah->ah_setDecompMask = ar5416SetDecompMask;
ah->ah_setCoverageClass = ar5416SetCoverageClass;
+ ah->ah_setQuiet = ar5416SetQuiet;
ah->ah_resetKeyCacheEntry = ar5416ResetKeyCacheEntry;
ah->ah_setKeyCacheEntry = ar5416SetKeyCacheEntry;
+ /* DFS Functions */
+ ah->ah_enableDfs = ar5416EnableDfs;
+ ah->ah_getDfsThresh = ar5416GetDfsThresh;
+ ah->ah_procRadarEvent = ar5416ProcessRadarEvent;
+
/* Power Management Functions */
ah->ah_setPowerMode = ar5416SetPowerMode;
@@ -199,7 +205,10 @@ ar5416InitState(struct ath_hal_5416 *ahp5416, uint16_t devid, HAL_SOFTC sc,
AH5416(ah)->ah_tx_chainmask = AR5416_DEFAULT_TXCHAINMASK;
/* Enable all ANI functions to begin with */
- AH5416(ah)->ah_ani_function = HAL_ANI_ALL;
+ AH5416(ah)->ah_ani_function = 0xffffffff;
+
+ /* Set overridable ANI methods */
+ AH5212(ah)->ah_aniControl = ar5416AniControl;
}
uint32_t
@@ -875,6 +884,7 @@ ar5416FillCapabilityInfo(struct ath_hal *ah)
pCap->halBssidMatchSupport = AH_TRUE;
pCap->halGTTSupport = AH_TRUE;
pCap->halCSTSupport = AH_TRUE;
+ pCap->halEnhancedDfsSupport = AH_FALSE;
if (ath_hal_eepromGetFlag(ah, AR_EEP_RFKILL) &&
ath_hal_eepromGet(ah, AR_EEP_RFSILENT, &ahpriv->ah_rfsilent) == HAL_OK) {
diff --git a/sys/dev/ath/ath_hal/ar5416/ar5416_cal.c b/sys/dev/ath/ath_hal/ar5416/ar5416_cal.c
index ee61c30..1356c7d 100644
--- a/sys/dev/ath/ath_hal/ar5416/ar5416_cal.c
+++ b/sys/dev/ath/ath_hal/ar5416/ar5416_cal.c
@@ -594,8 +594,8 @@ ar5416LoadNF(struct ath_hal *ah, const struct ieee80211_channel *chan)
if (AR_SREV_KITE(ah)) {
/* Kite has only one chain */
chainmask = 0x9;
- } else if (AR_SREV_MERLIN(ah)) {
- /* Merlin has only two chains */
+ } else if (AR_SREV_MERLIN(ah) || AR_SREV_KIWI(ah)) {
+ /* Merlin/Kiwi has only two chains */
chainmask = 0x1B;
} else {
chainmask = 0x3F;
diff --git a/sys/dev/ath/ath_hal/ar5416/ar5416_misc.c b/sys/dev/ath/ath_hal/ar5416/ar5416_misc.c
index 8f18c46..2332656 100644
--- a/sys/dev/ath/ath_hal/ar5416/ar5416_misc.c
+++ b/sys/dev/ath/ath_hal/ar5416/ar5416_misc.c
@@ -273,6 +273,35 @@ ar5416Set11nRxClear(struct ath_hal *ah, HAL_HT_RXCLEAR rxclear)
}
}
+/* XXX shouldn't be here! */
+#define TU_TO_USEC(_tu) ((_tu) << 10)
+
+HAL_STATUS
+ar5416SetQuiet(struct ath_hal *ah, uint32_t period, uint32_t duration,
+ uint32_t nextStart, HAL_QUIET_FLAG flag)
+{
+ uint32_t period_us = TU_TO_USEC(period); /* convert to us unit */
+ uint32_t nextStart_us = TU_TO_USEC(nextStart); /* convert to us unit */
+ if (flag & HAL_QUIET_ENABLE) {
+ if ((!nextStart) || (flag & HAL_QUIET_ADD_CURRENT_TSF)) {
+ /* Add the nextStart offset to the current TSF */
+ nextStart_us += OS_REG_READ(ah, AR_TSF_L32);
+ }
+ if (flag & HAL_QUIET_ADD_SWBA_RESP_TIME) {
+ nextStart_us += ath_hal_sw_beacon_response_time;
+ }
+ OS_REG_RMW_FIELD(ah, AR_QUIET1, AR_QUIET1_QUIET_ACK_CTS_ENABLE, 1);
+ OS_REG_WRITE(ah, AR_QUIET2, SM(duration, AR_QUIET2_QUIET_DUR));
+ OS_REG_WRITE(ah, AR_QUIET_PERIOD, period_us);
+ OS_REG_WRITE(ah, AR_NEXT_QUIET, nextStart_us);
+ OS_REG_SET_BIT(ah, AR_TIMER_MODE, AR_TIMER_MODE_QUIET);
+ } else {
+ OS_REG_CLR_BIT(ah, AR_TIMER_MODE, AR_TIMER_MODE_QUIET);
+ }
+ return HAL_OK;
+}
+#undef TU_TO_USEC
+
HAL_STATUS
ar5416GetCapability(struct ath_hal *ah, HAL_CAPABILITY_TYPE type,
uint32_t capability, uint32_t *result)
@@ -560,3 +589,122 @@ ar5416DetectBBHang(struct ath_hal *ah)
#undef N
}
#undef NUM_STATUS_READS
+
+/*
+ * Get the radar parameter values and return them in the pe
+ * structure
+ */
+void
+ar5416GetDfsThresh(struct ath_hal *ah, HAL_PHYERR_PARAM *pe)
+{
+ uint32_t val, temp;
+
+ val = OS_REG_READ(ah, AR_PHY_RADAR_0);
+
+ temp = MS(val,AR_PHY_RADAR_0_FIRPWR);
+ temp |= 0xFFFFFF80;
+ pe->pe_firpwr = temp;
+ pe->pe_rrssi = MS(val, AR_PHY_RADAR_0_RRSSI);
+ pe->pe_height = MS(val, AR_PHY_RADAR_0_HEIGHT);
+ pe->pe_prssi = MS(val, AR_PHY_RADAR_0_PRSSI);
+ pe->pe_inband = MS(val, AR_PHY_RADAR_0_INBAND);
+
+ val = OS_REG_READ(ah, AR_PHY_RADAR_1);
+ temp = val & AR_PHY_RADAR_1_RELPWR_ENA;
+ pe->pe_relpwr = MS(val, AR_PHY_RADAR_1_RELPWR_THRESH);
+ if (temp)
+ pe->pe_relpwr |= HAL_PHYERR_PARAM_ENABLE;
+ temp = val & AR_PHY_RADAR_1_RELSTEP_CHECK;
+ pe->pe_relstep = MS(val, AR_PHY_RADAR_1_RELSTEP_THRESH);
+ if (temp)
+ pe->pe_relstep |= HAL_PHYERR_PARAM_ENABLE;
+ pe->pe_maxlen = MS(val, AR_PHY_RADAR_1_MAXLEN);
+ pe->pe_extchannel = !! (OS_REG_READ(ah, AR_PHY_RADAR_EXT) &
+ AR_PHY_RADAR_EXT_ENA);
+}
+
+/*
+ * Enable radar detection and set the radar parameters per the
+ * values in pe
+ */
+void
+ar5416EnableDfs(struct ath_hal *ah, HAL_PHYERR_PARAM *pe)
+{
+ uint32_t val;
+
+ val = OS_REG_READ(ah, AR_PHY_RADAR_0);
+
+ if (pe->pe_firpwr != HAL_PHYERR_PARAM_NOVAL) {
+ val &= ~AR_PHY_RADAR_0_FIRPWR;
+ val |= SM(pe->pe_firpwr, AR_PHY_RADAR_0_FIRPWR);
+ }
+ if (pe->pe_rrssi != HAL_PHYERR_PARAM_NOVAL) {
+ val &= ~AR_PHY_RADAR_0_RRSSI;
+ val |= SM(pe->pe_rrssi, AR_PHY_RADAR_0_RRSSI);
+ }
+ if (pe->pe_height != HAL_PHYERR_PARAM_NOVAL) {
+ val &= ~AR_PHY_RADAR_0_HEIGHT;
+ val |= SM(pe->pe_height, AR_PHY_RADAR_0_HEIGHT);
+ }
+ if (pe->pe_prssi != HAL_PHYERR_PARAM_NOVAL) {
+ val &= ~AR_PHY_RADAR_0_PRSSI;
+ val |= SM(pe->pe_prssi, AR_PHY_RADAR_0_PRSSI);
+ }
+ if (pe->pe_inband != HAL_PHYERR_PARAM_NOVAL) {
+ val &= ~AR_PHY_RADAR_0_INBAND;
+ val |= SM(pe->pe_inband, AR_PHY_RADAR_0_INBAND);
+ }
+
+ /*Enable FFT data*/
+ val |= AR_PHY_RADAR_0_FFT_ENA;
+
+ OS_REG_WRITE(ah, AR_PHY_RADAR_0, val | AR_PHY_RADAR_0_ENA);
+
+ val = OS_REG_READ(ah, AR_PHY_RADAR_1);
+ val |= (AR_PHY_RADAR_1_MAX_RRSSI | AR_PHY_RADAR_1_BLOCK_CHECK);
+
+ if (pe->pe_maxlen != HAL_PHYERR_PARAM_NOVAL) {
+ val &= ~AR_PHY_RADAR_1_MAXLEN;
+ val |= SM(pe->pe_maxlen, AR_PHY_RADAR_1_MAXLEN);
+ }
+ OS_REG_WRITE(ah, AR_PHY_RADAR_1, val);
+
+ /*
+ * Enable HT/40 if the upper layer asks;
+ * it should check the channel is HT/40 and HAL_CAP_EXT_CHAN_DFS
+ * is available.
+ */
+ if (pe->pe_extchannel)
+ OS_REG_SET_BIT(ah, AR_PHY_RADAR_EXT, AR_PHY_RADAR_EXT_ENA);
+ else
+ OS_REG_CLR_BIT(ah, AR_PHY_RADAR_EXT, AR_PHY_RADAR_EXT_ENA);
+
+ if (pe->pe_relstep != HAL_PHYERR_PARAM_NOVAL) {
+ val = OS_REG_READ(ah, AR_PHY_RADAR_1);
+ val &= ~AR_PHY_RADAR_1_RELSTEP_THRESH;
+ val |= SM(pe->pe_relstep, AR_PHY_RADAR_1_RELSTEP_THRESH);
+ OS_REG_WRITE(ah, AR_PHY_RADAR_1, val);
+ }
+ if (pe->pe_relpwr != HAL_PHYERR_PARAM_NOVAL) {
+ val = OS_REG_READ(ah, AR_PHY_RADAR_1);
+ val &= ~AR_PHY_RADAR_1_RELPWR_THRESH;
+ val |= SM(pe->pe_relpwr, AR_PHY_RADAR_1_RELPWR_THRESH);
+ OS_REG_WRITE(ah, AR_PHY_RADAR_1, val);
+ }
+}
+
+/*
+ * Extract the radar event information from the given phy error.
+ *
+ * Returns AH_TRUE if the phy error was actually a phy error,
+ * AH_FALSE if the phy error wasn't a phy error.
+ */
+HAL_BOOL
+ar5416ProcessRadarEvent(struct ath_hal *ah, struct ath_rx_status *rxs,
+ uint64_t fulltsf, const char *buf, HAL_DFS_EVENT *event)
+{
+ /*
+ * For now, this isn't implemented.
+ */
+ return AH_FALSE;
+}
diff --git a/sys/dev/ath/ath_hal/ar5416/ar5416_reset.c b/sys/dev/ath/ath_hal/ar5416/ar5416_reset.c
index d2ae351..1da686a 100644
--- a/sys/dev/ath/ath_hal/ar5416/ar5416_reset.c
+++ b/sys/dev/ath/ath_hal/ar5416/ar5416_reset.c
@@ -167,6 +167,17 @@ ar5416Reset(struct ath_hal *ah, HAL_OPMODE opmode,
AH5416(ah)->ah_writeIni(ah, chan);
+ if(AR_SREV_KIWI_13_OR_LATER(ah) ) {
+ /* Enable ASYNC FIFO */
+ OS_REG_SET_BIT(ah, AR_MAC_PCU_ASYNC_FIFO_REG3,
+ AR_MAC_PCU_ASYNC_FIFO_REG3_DATAPATH_SEL);
+ OS_REG_SET_BIT(ah, AR_PHY_MODE, AR_PHY_MODE_ASYNCFIFO);
+ OS_REG_CLR_BIT(ah, AR_MAC_PCU_ASYNC_FIFO_REG3,
+ AR_MAC_PCU_ASYNC_FIFO_REG3_SOFT_RESET);
+ OS_REG_SET_BIT(ah, AR_MAC_PCU_ASYNC_FIFO_REG3,
+ AR_MAC_PCU_ASYNC_FIFO_REG3_SOFT_RESET);
+ }
+
/* Override ini values (that can be overriden in this fashion) */
ar5416OverrideIni(ah, chan);
@@ -258,6 +269,12 @@ ar5416Reset(struct ath_hal *ah, HAL_OPMODE opmode,
OS_REG_WRITE(ah, AR_MAC_LED, OS_REG_READ(ah, AR_MAC_LED) |
saveLedState);
+ /* Start TSF2 for generic timer 8-15 */
+#ifdef NOTYET
+ if (AR_SREV_KIWI(ah))
+ ar5416StartTsf2(ah);
+#endif
+
/* Restore previous antenna */
OS_REG_WRITE(ah, AR_DEF_ANTENNA, saveDefAntenna);
@@ -292,6 +309,41 @@ ar5416Reset(struct ath_hal *ah, HAL_OPMODE opmode,
/* This may override the AR_DIAG_SW register */
ar5416InitUserSettings(ah);
+ if (AR_SREV_KIWI_13_OR_LATER(ah)) {
+ /*
+ * Enable ASYNC FIFO
+ *
+ * If Async FIFO is enabled, the following counters change
+ * as MAC now runs at 117 Mhz instead of 88/44MHz when
+ * async FIFO is disabled.
+ *
+ * Overwrite the delay/timeouts initialized in ProcessIni()
+ * above.
+ */
+ OS_REG_WRITE(ah, AR_D_GBL_IFS_SIFS,
+ AR_D_GBL_IFS_SIFS_ASYNC_FIFO_DUR);
+ OS_REG_WRITE(ah, AR_D_GBL_IFS_SLOT,
+ AR_D_GBL_IFS_SLOT_ASYNC_FIFO_DUR);
+ OS_REG_WRITE(ah, AR_D_GBL_IFS_EIFS,
+ AR_D_GBL_IFS_EIFS_ASYNC_FIFO_DUR);
+
+ OS_REG_WRITE(ah, AR_TIME_OUT,
+ AR_TIME_OUT_ACK_CTS_ASYNC_FIFO_DUR);
+ OS_REG_WRITE(ah, AR_USEC, AR_USEC_ASYNC_FIFO_DUR);
+
+ OS_REG_SET_BIT(ah, AR_MAC_PCU_LOGIC_ANALYZER,
+ AR_MAC_PCU_LOGIC_ANALYZER_DISBUG20768);
+ OS_REG_RMW_FIELD(ah, AR_AHB_MODE, AR_AHB_CUSTOM_BURST_EN,
+ AR_AHB_CUSTOM_BURST_ASYNC_FIFO_VAL);
+ }
+
+ if (AR_SREV_KIWI_13_OR_LATER(ah)) {
+ /* Enable AGGWEP to accelerate encryption engine */
+ OS_REG_SET_BIT(ah, AR_PCU_MISC_MODE2,
+ AR_PCU_MISC_MODE2_ENABLE_AGGWEP);
+ }
+
+
/*
* disable seq number generation in hw
*/
@@ -2576,7 +2628,7 @@ ar5416OverrideIni(struct ath_hal *ah, const struct ieee80211_channel *chan)
if (!AR_SREV_9271(ah))
val &= ~AR_PCU_MISC_MODE2_HWWAR1;
- if (AR_SREV_9287_11_OR_LATER(ah))
+ if (AR_SREV_KIWI_11_OR_LATER(ah))
val = val & (~AR_PCU_MISC_MODE2_HWWAR2);
OS_REG_WRITE(ah, AR_PCU_MISC_MODE2, val);
diff --git a/sys/dev/ath/ath_hal/ar5416/ar5416phy.h b/sys/dev/ath/ath_hal/ar5416/ar5416phy.h
index 86643f0..d7a5e0b 100644
--- a/sys/dev/ath/ath_hal/ar5416/ar5416phy.h
+++ b/sys/dev/ath/ath_hal/ar5416/ar5416phy.h
@@ -21,6 +21,25 @@
#include "ar5212/ar5212phy.h"
+/* For AR_PHY_RADAR0 */
+#define AR_PHY_RADAR_0_FFT_ENA 0x80000000
+
+#define AR_PHY_RADAR_EXT 0x9940
+#define AR_PHY_RADAR_EXT_ENA 0x00004000
+
+#define AR_PHY_RADAR_1 0x9958
+#define AR_PHY_RADAR_1_RELPWR_ENA 0x00800000
+#define AR_PHY_RADAR_1_USE_FIR128 0x00400000
+#define AR_PHY_RADAR_1_RELPWR_THRESH 0x003F0000
+#define AR_PHY_RADAR_1_RELPWR_THRESH_S 16
+#define AR_PHY_RADAR_1_BLOCK_CHECK 0x00008000
+#define AR_PHY_RADAR_1_MAX_RRSSI 0x00004000
+#define AR_PHY_RADAR_1_RELSTEP_CHECK 0x00002000
+#define AR_PHY_RADAR_1_RELSTEP_THRESH 0x00001F00
+#define AR_PHY_RADAR_1_RELSTEP_THRESH_S 8
+#define AR_PHY_RADAR_1_MAXLEN 0x000000FF
+#define AR_PHY_RADAR_1_MAXLEN_S 0
+
#define AR_PHY_CHIP_ID_REV_0 0x80 /* 5416 Rev 0 (owl 1.0) BB */
#define AR_PHY_CHIP_ID_REV_1 0x81 /* 5416 Rev 1 (owl 2.0) BB */
@@ -301,4 +320,6 @@
#define AR_PHY_TX_PWRCTRL9_RES_DC_REMOVAL 0x80000000
#define AR_PHY_TX_PWRCTRL9_RES_DC_REMOVAL_S 31
+#define AR_PHY_MODE_ASYNCFIFO 0x80 /* Enable async fifo */
+
#endif /* _DEV_ATH_AR5416PHY_H_ */
diff --git a/sys/dev/ath/ath_hal/ar5416/ar5416reg.h b/sys/dev/ath/ath_hal/ar5416/ar5416reg.h
index 9921366..561c5b4 100644
--- a/sys/dev/ath/ath_hal/ar5416/ar5416reg.h
+++ b/sys/dev/ath/ath_hal/ar5416/ar5416reg.h
@@ -219,6 +219,10 @@
#define AR_AHB_PAGE_SIZE_1K 0x00000000 /* set page-size as 1k */
#define AR_AHB_PAGE_SIZE_2K 0x00000008 /* set page-size as 2k */
#define AR_AHB_PAGE_SIZE_4K 0x00000010 /* set page-size as 4k */
+/* Kiwi */
+#define AR_AHB_CUSTOM_BURST_EN 0x000000C0 /* set Custom Burst Mode */
+#define AR_AHB_CUSTOM_BURST_EN_S 6 /* set Custom Burst Mode */
+#define AR_AHB_CUSTOM_BURST_ASYNC_FIFO_VAL 3 /* set both bits in Async FIFO mode */
/* MAC PCU Registers */
#define AR_STA_ID1_PRESERVE_SEQNUM 0x20000000 /* Don't replace seq num */
@@ -451,9 +455,23 @@
* For Merlin and above only.
*/
#define AR_PCU_MISC_MODE2_ADHOC_MCAST_KEYID_ENABLE 0x00000040
+#define AR_PCU_MISC_MODE2_ENABLE_AGGWEP 0x00020000 /* Kiwi or later? */
#define AR_PCU_MISC_MODE2_HWWAR1 0x00100000
#define AR_PCU_MISC_MODE2_HWWAR2 0x02000000
+/* For Kiwi */
+#define AR_MAC_PCU_ASYNC_FIFO_REG3 0x8358
+#define AR_MAC_PCU_ASYNC_FIFO_REG3_DATAPATH_SEL 0x00000400
+#define AR_MAC_PCU_ASYNC_FIFO_REG3_SOFT_RESET 0x80000000
+
+/* TSF2. For Kiwi only */
+#define AR_TSF2_L32 0x8390
+#define AR_TSF2_U32 0x8394
+
+/* MAC Direct Connect Control. For Kiwi only */
+#define AR_DIRECT_CONNECT 0x83A0
+#define AR_DC_AP_STA_EN 0x00000001
+
/* GPIO Interrupt */
#define AR_INTR_GPIO 0x3FF00000 /* gpio interrupted */
#define AR_INTR_GPIO_S 20
@@ -488,6 +506,17 @@
#define AR_PCU_TXBUF_CTRL_USABLE_SIZE 0x700
#define AR_9285_PCU_TXBUF_CTRL_USABLE_SIZE 0x380
+/* IFS, SIFS, slot, etc for Async FIFO mode (Kiwi) */
+#define AR_D_GBL_IFS_SIFS_ASYNC_FIFO_DUR 0x000003AB
+#define AR_TIME_OUT_ACK_CTS_ASYNC_FIFO_DUR 0x16001D56
+#define AR_USEC_ASYNC_FIFO_DUR 0x12e00074
+#define AR_D_GBL_IFS_SLOT_ASYNC_FIFO_DUR 0x00000420
+#define AR_D_GBL_IFS_EIFS_ASYNC_FIFO_DUR 0x0000A5EB
+
+/* Used by Kiwi Async FIFO */
+#define AR_MAC_PCU_LOGIC_ANALYZER 0x8264
+#define AR_MAC_PCU_LOGIC_ANALYZER_DISBUG20768 0x20000000
+
/* Eeprom defines */
#define AR_EEPROM_STATUS_DATA_VAL 0x0000ffff
#define AR_EEPROM_STATUS_DATA_VAL_S 0
@@ -566,6 +595,11 @@
#define AR_XSREV_REVISION_KITE_10 0 /* Kite 1.0 */
#define AR_XSREV_REVISION_KITE_11 1 /* Kite 1.1 */
#define AR_XSREV_REVISION_KITE_12 2 /* Kite 1.2 */
+#define AR_XSREV_VERSION_KIWI 0x180 /* Kiwi (AR9287) */
+#define AR_XSREV_REVISION_KIWI_10 0
+#define AR_XSREV_REVISION_KIWI_11 1
+#define AR_XSREV_REVISION_KIWI_12 2
+#define AR_XSREV_REVISION_KIWI_13 3
/* Owl (AR5416) */
#define AR_SREV_OWL(_ah) \
@@ -648,9 +682,31 @@
(AR_SREV_KITE_12_OR_LATER(_ah) && \
((OS_REG_READ(_ah, AR_AN_SYNTH9) & 0x7) == 0x1))
+#define AR_SREV_KIWI(_ah) \
+ (AH_PRIVATE((_ah))->ah_macVersion == AR_XSREV_VERSION_KIWI)
+
+#define AR_SREV_KIWI_11_OR_LATER(_ah) \
+ (AR_SREV_KIWI(_ah) && \
+ AH_PRIVATE((_ah))->ah_macRev >= AR_XSREV_REVISION_KIWI_11)
+
+#define AR_SREV_KIWI_11(_ah) \
+ (AR_SREV_KIWI(_ah) && \
+ AH_PRIVATE((_ah))->ah_macRev == AR_XSREV_REVISION_KIWI_11)
+
+#define AR_SREV_KIWI_12(_ah) \
+ (AR_SREV_KIWI(_ah) && \
+ AH_PRIVATE((_ah))->ah_macRev == AR_XSREV_REVISION_KIWI_12)
+
+#define AR_SREV_KIWI_12_OR_LATER(_ah) \
+ (AR_SREV_KIWI(_ah) && \
+ AH_PRIVATE((_ah))->ah_macRev >= AR_XSREV_REVISION_KIWI_12)
+
+#define AR_SREV_KIWI_13_OR_LATER(_ah) \
+ (AR_SREV_KIWI(_ah) && \
+ AH_PRIVATE((_ah))->ah_macRev >= AR_XSREV_REVISION_KIWI_13)
+
+
/* Not yet implemented chips */
#define AR_SREV_9271(_ah) 0
-#define AR_SREV_9287_11_OR_LATER(_ah) 0
-#define AR_SREV_KIWI_10_OR_LATER(_ah) 0
#endif /* _DEV_ATH_AR5416REG_H */
diff --git a/sys/dev/ath/ath_hal/ar9001/ar9130_attach.c b/sys/dev/ath/ath_hal/ar9001/ar9130_attach.c
index 49a5f5e..2a3f3f0 100644
--- a/sys/dev/ath/ath_hal/ar9001/ar9130_attach.c
+++ b/sys/dev/ath/ath_hal/ar9001/ar9130_attach.c
@@ -289,6 +289,7 @@ ar9130FillCapabilityInfo(struct ath_hal *ah)
pCap->halRifsTxSupport = AH_TRUE;
pCap->halRtsAggrLimit = 64*1024; /* 802.11n max */
pCap->halExtChanDfsSupport = AH_TRUE;
+ pCap->halUseCombinedRadarRssi = AH_TRUE;
pCap->halAutoSleepSupport = AH_FALSE; /* XXX? */
/*
* MBSSID aggregation is broken in Howl v1.1, v1.2, v1.3
diff --git a/sys/dev/ath/ath_hal/ar9001/ar9160_attach.c b/sys/dev/ath/ath_hal/ar9001/ar9160_attach.c
index 0b6472b..44a549d 100644
--- a/sys/dev/ath/ath_hal/ar9001/ar9160_attach.c
+++ b/sys/dev/ath/ath_hal/ar9001/ar9160_attach.c
@@ -82,7 +82,7 @@ ar9160AniSetup(struct ath_hal *ah)
};
/* NB: disable ANI noise immmunity for reliable RIFS rx */
- AH5416(ah)->ah_ani_function &= ~ HAL_ANI_NOISE_IMMUNITY_LEVEL;
+ AH5416(ah)->ah_ani_function &= ~(1 << HAL_ANI_NOISE_IMMUNITY_LEVEL);
ar5416AniAttach(ah, &aniparams, &aniparams, AH_TRUE);
}
@@ -293,6 +293,7 @@ ar9160FillCapabilityInfo(struct ath_hal *ah)
pCap->halRifsTxSupport = AH_TRUE;
pCap->halRtsAggrLimit = 64*1024; /* 802.11n max */
pCap->halExtChanDfsSupport = AH_TRUE;
+ pCap->halUseCombinedRadarRssi = AH_TRUE;
pCap->halAutoSleepSupport = AH_FALSE; /* XXX? */
pCap->halMbssidAggrSupport = AH_TRUE;
pCap->hal4AddrAggrSupport = AH_TRUE;
diff --git a/sys/dev/ath/ath_hal/ar9002/ar9280_attach.c b/sys/dev/ath/ath_hal/ar9002/ar9280_attach.c
index 3351edb..ebe3be1 100644
--- a/sys/dev/ath/ath_hal/ar9002/ar9280_attach.c
+++ b/sys/dev/ath/ath_hal/ar9002/ar9280_attach.c
@@ -93,7 +93,7 @@ ar9280AniSetup(struct ath_hal *ah)
.period = 100,
};
/* NB: disable ANI noise immmunity for reliable RIFS rx */
- AH5416(ah)->ah_ani_function &= ~ HAL_ANI_NOISE_IMMUNITY_LEVEL;
+ AH5416(ah)->ah_ani_function &= ~(1 << HAL_ANI_NOISE_IMMUNITY_LEVEL);
/* NB: ANI is not enabled yet */
ar5416AniAttach(ah, &aniparams, &aniparams, AH_TRUE);
@@ -783,6 +783,7 @@ ar9280FillCapabilityInfo(struct ath_hal *ah)
pCap->halRifsTxSupport = AH_TRUE;
pCap->halRtsAggrLimit = 64*1024; /* 802.11n max */
pCap->halExtChanDfsSupport = AH_TRUE;
+ pCap->halUseCombinedRadarRssi = AH_TRUE;
#if 0
/* XXX bluetooth */
pCap->halBtCoexSupport = AH_TRUE;
@@ -804,6 +805,7 @@ ar9280FillCapabilityInfo(struct ath_hal *ah)
}
pCap->halRxStbcSupport = 1;
pCap->halTxStbcSupport = 1;
+ pCap->halEnhancedDfsSupport = AH_TRUE;
return AH_TRUE;
}
diff --git a/sys/dev/ath/ath_hal/ar9002/ar9285_attach.c b/sys/dev/ath/ath_hal/ar9002/ar9285_attach.c
index b7ed27d..9120313 100644
--- a/sys/dev/ath/ath_hal/ar9002/ar9285_attach.c
+++ b/sys/dev/ath/ath_hal/ar9002/ar9285_attach.c
@@ -98,7 +98,7 @@ ar9285AniSetup(struct ath_hal *ah)
.period = 100,
};
/* NB: disable ANI noise immmunity for reliable RIFS rx */
- AH5416(ah)->ah_ani_function &= ~ HAL_ANI_NOISE_IMMUNITY_LEVEL;
+ AH5416(ah)->ah_ani_function &= ~(1 << HAL_ANI_NOISE_IMMUNITY_LEVEL);
ar5416AniAttach(ah, &aniparams, &aniparams, AH_TRUE);
}
@@ -414,6 +414,7 @@ ar9285FillCapabilityInfo(struct ath_hal *ah)
pCap->halRifsTxSupport = AH_TRUE;
pCap->halRtsAggrLimit = 64*1024; /* 802.11n max */
pCap->halExtChanDfsSupport = AH_TRUE;
+ pCap->halUseCombinedRadarRssi = AH_TRUE;
#if 0
/* XXX bluetooth */
pCap->halBtCoexSupport = AH_TRUE;
diff --git a/sys/dev/ath/ath_hal/ar9002/ar9287.c b/sys/dev/ath/ath_hal/ar9002/ar9287.c
new file mode 100644
index 0000000..9b874b3
--- /dev/null
+++ b/sys/dev/ath/ath_hal/ar9002/ar9287.c
@@ -0,0 +1,392 @@
+/*
+ * Copyright (c) 2008-2009 Sam Leffler, Errno Consulting
+ * Copyright (c) 2008 Atheros Communications, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ *
+ * $FreeBSD$
+ */
+#include "opt_ah.h"
+
+/*
+ * NB: Merlin and later have a simpler RF backend.
+ */
+#include "ah.h"
+#include "ah_internal.h"
+
+#include "ah_eeprom_v14.h"
+
+#include "ar9002/ar9287.h"
+#include "ar5416/ar5416reg.h"
+#include "ar5416/ar5416phy.h"
+
+#define N(a) (sizeof(a)/sizeof(a[0]))
+
+struct ar9287State {
+ RF_HAL_FUNCS base; /* public state, must be first */
+ uint16_t pcdacTable[1]; /* XXX */
+};
+#define AR9287(ah) ((struct ar9287State *) AH5212(ah)->ah_rfHal)
+
+static HAL_BOOL ar9287GetChannelMaxMinPower(struct ath_hal *,
+ const struct ieee80211_channel *, int16_t *maxPow,int16_t *minPow);
+int16_t ar9287GetNfAdjust(struct ath_hal *ah, const HAL_CHANNEL_INTERNAL *c);
+
+static void
+ar9287WriteRegs(struct ath_hal *ah, u_int modesIndex, u_int freqIndex,
+ int writes)
+{
+ (void) ath_hal_ini_write(ah, &AH5416(ah)->ah_ini_bb_rfgain,
+ freqIndex, writes);
+}
+
+/*
+ * Take the MHz channel value and set the Channel value
+ *
+ * ASSUMES: Writes enabled to analog bus
+ *
+ * Actual Expression,
+ *
+ * For 2GHz channel,
+ * Channel Frequency = (3/4) * freq_ref * (chansel[8:0] + chanfrac[16:0]/2^17)
+ * (freq_ref = 40MHz)
+ *
+ * For 5GHz channel,
+ * Channel Frequency = (3/2) * freq_ref * (chansel[8:0] + chanfrac[16:0]/2^10)
+ * (freq_ref = 40MHz/(24>>amodeRefSel))
+ *
+ * For 5GHz channels which are 5MHz spaced,
+ * Channel Frequency = (3/2) * freq_ref * (chansel[8:0] + chanfrac[16:0]/2^17)
+ * (freq_ref = 40MHz)
+ */
+static HAL_BOOL
+ar9287SetChannel(struct ath_hal *ah, const struct ieee80211_channel *chan)
+{
+ uint16_t bMode, fracMode, aModeRefSel = 0;
+ uint32_t freq, ndiv, channelSel = 0, channelFrac = 0, reg32 = 0;
+ CHAN_CENTERS centers;
+ uint32_t refDivA = 24;
+
+ OS_MARK(ah, AH_MARK_SETCHANNEL, chan->ic_freq);
+
+ ar5416GetChannelCenters(ah, chan, &centers);
+ freq = centers.synth_center;
+
+ reg32 = OS_REG_READ(ah, AR_PHY_SYNTH_CONTROL);
+ reg32 &= 0xc0000000;
+
+ if (freq < 4800) { /* 2 GHz, fractional mode */
+ uint32_t txctl;
+ int regWrites = 0;
+
+ bMode = 1;
+ fracMode = 1;
+ aModeRefSel = 0;
+ channelSel = (freq * 0x10000)/15;
+
+ if (AR_SREV_KIWI_11_OR_LATER(ah)) {
+ if (freq == 2484) {
+ ath_hal_ini_write(ah,
+ &AH9287(ah)->ah_ini_cckFirJapan2484, 1,
+ regWrites);
+ } else {
+ ath_hal_ini_write(ah,
+ &AH9287(ah)->ah_ini_cckFirNormal, 1,
+ regWrites);
+ }
+ }
+
+ txctl = OS_REG_READ(ah, AR_PHY_CCK_TX_CTRL);
+ if (freq == 2484) {
+ /* Enable channel spreading for channel 14 */
+ OS_REG_WRITE(ah, AR_PHY_CCK_TX_CTRL,
+ txctl | AR_PHY_CCK_TX_CTRL_JAPAN);
+ } else {
+ OS_REG_WRITE(ah, AR_PHY_CCK_TX_CTRL,
+ txctl &~ AR_PHY_CCK_TX_CTRL_JAPAN);
+ }
+ } else {
+ bMode = 0;
+ fracMode = 0;
+
+ if ((freq % 20) == 0) {
+ aModeRefSel = 3;
+ } else if ((freq % 10) == 0) {
+ aModeRefSel = 2;
+ } else {
+ aModeRefSel = 0;
+ /*
+ * Enable 2G (fractional) mode for channels which
+ * are 5MHz spaced
+ */
+ fracMode = 1;
+ refDivA = 1;
+ channelSel = (freq * 0x8000)/15;
+
+ /* RefDivA setting */
+ OS_A_REG_RMW_FIELD(ah, AR_AN_SYNTH9,
+ AR_AN_SYNTH9_REFDIVA, refDivA);
+ }
+ if (!fracMode) {
+ ndiv = (freq * (refDivA >> aModeRefSel))/60;
+ channelSel = ndiv & 0x1ff;
+ channelFrac = (ndiv & 0xfffffe00) * 2;
+ channelSel = (channelSel << 17) | channelFrac;
+ }
+ }
+
+ reg32 = reg32 | (bMode << 29) | (fracMode << 28) |
+ (aModeRefSel << 26) | (channelSel);
+
+ OS_REG_WRITE(ah, AR_PHY_SYNTH_CONTROL, reg32);
+
+ AH_PRIVATE(ah)->ah_curchan = chan;
+
+ return AH_TRUE;
+}
+
+/*
+ * Return a reference to the requested RF Bank.
+ */
+static uint32_t *
+ar9287GetRfBank(struct ath_hal *ah, int bank)
+{
+ HALDEBUG(ah, HAL_DEBUG_ANY, "%s: unknown RF Bank %d requested\n",
+ __func__, bank);
+ return AH_NULL;
+}
+
+/*
+ * Reads EEPROM header info from device structure and programs
+ * all rf registers
+ */
+static HAL_BOOL
+ar9287SetRfRegs(struct ath_hal *ah, const struct ieee80211_channel *chan,
+ uint16_t modesIndex, uint16_t *rfXpdGain)
+{
+ return AH_TRUE; /* nothing to do */
+}
+
+/*
+ * Read the transmit power levels from the structures taken from EEPROM
+ * Interpolate read transmit power values for this channel
+ * Organize the transmit power values into a table for writing into the hardware
+ */
+
+static HAL_BOOL
+ar9287SetPowerTable(struct ath_hal *ah, int16_t *pPowerMin, int16_t *pPowerMax,
+ const struct ieee80211_channel *chan, uint16_t *rfXpdGain)
+{
+ return AH_TRUE;
+}
+
+#if 0
+static int16_t
+ar9287GetMinPower(struct ath_hal *ah, EXPN_DATA_PER_CHANNEL_5112 *data)
+{
+ int i, minIndex;
+ int16_t minGain,minPwr,minPcdac,retVal;
+
+ /* Assume NUM_POINTS_XPD0 > 0 */
+ minGain = data->pDataPerXPD[0].xpd_gain;
+ for (minIndex=0,i=1; i<NUM_XPD_PER_CHANNEL; i++) {
+ if (data->pDataPerXPD[i].xpd_gain < minGain) {
+ minIndex = i;
+ minGain = data->pDataPerXPD[i].xpd_gain;
+ }
+ }
+ minPwr = data->pDataPerXPD[minIndex].pwr_t4[0];
+ minPcdac = data->pDataPerXPD[minIndex].pcdac[0];
+ for (i=1; i<NUM_POINTS_XPD0; i++) {
+ if (data->pDataPerXPD[minIndex].pwr_t4[i] < minPwr) {
+ minPwr = data->pDataPerXPD[minIndex].pwr_t4[i];
+ minPcdac = data->pDataPerXPD[minIndex].pcdac[i];
+ }
+ }
+ retVal = minPwr - (minPcdac*2);
+ return(retVal);
+}
+#endif
+
+static HAL_BOOL
+ar9287GetChannelMaxMinPower(struct ath_hal *ah,
+ const struct ieee80211_channel *chan,
+ int16_t *maxPow, int16_t *minPow)
+{
+#if 0
+ struct ath_hal_5212 *ahp = AH5212(ah);
+ int numChannels=0,i,last;
+ int totalD, totalF,totalMin;
+ EXPN_DATA_PER_CHANNEL_5112 *data=AH_NULL;
+ EEPROM_POWER_EXPN_5112 *powerArray=AH_NULL;
+
+ *maxPow = 0;
+ if (IS_CHAN_A(chan)) {
+ powerArray = ahp->ah_modePowerArray5112;
+ data = powerArray[headerInfo11A].pDataPerChannel;
+ numChannels = powerArray[headerInfo11A].numChannels;
+ } else if (IS_CHAN_G(chan) || IS_CHAN_108G(chan)) {
+ /* XXX - is this correct? Should we also use the same power for turbo G? */
+ powerArray = ahp->ah_modePowerArray5112;
+ data = powerArray[headerInfo11G].pDataPerChannel;
+ numChannels = powerArray[headerInfo11G].numChannels;
+ } else if (IS_CHAN_B(chan)) {
+ powerArray = ahp->ah_modePowerArray5112;
+ data = powerArray[headerInfo11B].pDataPerChannel;
+ numChannels = powerArray[headerInfo11B].numChannels;
+ } else {
+ return (AH_TRUE);
+ }
+ /* Make sure the channel is in the range of the TP values
+ * (freq piers)
+ */
+ if ((numChannels < 1) ||
+ (chan->channel < data[0].channelValue) ||
+ (chan->channel > data[numChannels-1].channelValue))
+ return(AH_FALSE);
+
+ /* Linearly interpolate the power value now */
+ for (last=0,i=0;
+ (i<numChannels) && (chan->channel > data[i].channelValue);
+ last=i++);
+ totalD = data[i].channelValue - data[last].channelValue;
+ if (totalD > 0) {
+ totalF = data[i].maxPower_t4 - data[last].maxPower_t4;
+ *maxPow = (int8_t) ((totalF*(chan->channel-data[last].channelValue) + data[last].maxPower_t4*totalD)/totalD);
+
+ totalMin = ar9287GetMinPower(ah,&data[i]) - ar9287GetMinPower(ah, &data[last]);
+ *minPow = (int8_t) ((totalMin*(chan->channel-data[last].channelValue) + ar9287GetMinPower(ah, &data[last])*totalD)/totalD);
+ return (AH_TRUE);
+ } else {
+ if (chan->channel == data[i].channelValue) {
+ *maxPow = data[i].maxPower_t4;
+ *minPow = ar9287GetMinPower(ah, &data[i]);
+ return(AH_TRUE);
+ } else
+ return(AH_FALSE);
+ }
+#else
+ *maxPow = *minPow = 0;
+ return AH_FALSE;
+#endif
+}
+
+/*
+ * The ordering of nfarray is thus:
+ *
+ * nfarray[0]: Chain 0 ctl
+ * nfarray[1]: Chain 1 ctl
+ * nfarray[2]: Chain 2 ctl
+ * nfarray[3]: Chain 0 ext
+ * nfarray[4]: Chain 1 ext
+ * nfarray[5]: Chain 2 ext
+ */
+static void
+ar9287GetNoiseFloor(struct ath_hal *ah, int16_t nfarray[])
+{
+ int16_t nf;
+
+ nf = MS(OS_REG_READ(ah, AR_PHY_CCA), AR9280_PHY_MINCCA_PWR);
+ if (nf & 0x100)
+ nf = 0 - ((nf ^ 0x1ff) + 1);
+ HALDEBUG(ah, HAL_DEBUG_NFCAL,
+ "NF calibrated [ctl] [chain 0] is %d\n", nf);
+ nfarray[0] = nf;
+
+ nf = MS(OS_REG_READ(ah, AR_PHY_CH1_CCA), AR9280_PHY_CH1_MINCCA_PWR);
+ if (nf & 0x100)
+ nf = 0 - ((nf ^ 0x1ff) + 1);
+ HALDEBUG(ah, HAL_DEBUG_NFCAL,
+ "NF calibrated [ctl] [chain 1] is %d\n", nf);
+ nfarray[1] = nf;
+
+ nf = MS(OS_REG_READ(ah, AR_PHY_EXT_CCA), AR9280_PHY_EXT_MINCCA_PWR);
+ if (nf & 0x100)
+ nf = 0 - ((nf ^ 0x1ff) + 1);
+ HALDEBUG(ah, HAL_DEBUG_NFCAL,
+ "NF calibrated [ext] [chain 0] is %d\n", nf);
+ nfarray[3] = nf;
+
+ nf = MS(OS_REG_READ(ah, AR_PHY_CH1_EXT_CCA), AR9280_PHY_CH1_EXT_MINCCA_PWR);
+ if (nf & 0x100)
+ nf = 0 - ((nf ^ 0x1ff) + 1);
+ HALDEBUG(ah, HAL_DEBUG_NFCAL,
+ "NF calibrated [ext] [chain 1] is %d\n", nf);
+ nfarray[4] = nf;
+
+ /* Chain 2 - invalid */
+ nfarray[2] = 0;
+ nfarray[5] = 0;
+
+}
+
+/*
+ * Adjust NF based on statistical values for 5GHz frequencies.
+ * Stubbed:Not used by Fowl
+ */
+int16_t
+ar9287GetNfAdjust(struct ath_hal *ah, const HAL_CHANNEL_INTERNAL *c)
+{
+ return 0;
+}
+
+/*
+ * Free memory for analog bank scratch buffers
+ */
+static void
+ar9287RfDetach(struct ath_hal *ah)
+{
+ struct ath_hal_5212 *ahp = AH5212(ah);
+
+ HALASSERT(ahp->ah_rfHal != AH_NULL);
+ ath_hal_free(ahp->ah_rfHal);
+ ahp->ah_rfHal = AH_NULL;
+}
+
+HAL_BOOL
+ar9287RfAttach(struct ath_hal *ah, HAL_STATUS *status)
+{
+ struct ath_hal_5212 *ahp = AH5212(ah);
+ struct ar9287State *priv;
+
+ HALDEBUG(ah, HAL_DEBUG_ATTACH, "%s: attach AR9280 radio\n", __func__);
+
+ HALASSERT(ahp->ah_rfHal == AH_NULL);
+ priv = ath_hal_malloc(sizeof(struct ar9287State));
+ if (priv == AH_NULL) {
+ HALDEBUG(ah, HAL_DEBUG_ANY,
+ "%s: cannot allocate private state\n", __func__);
+ *status = HAL_ENOMEM; /* XXX */
+ return AH_FALSE;
+ }
+ priv->base.rfDetach = ar9287RfDetach;
+ priv->base.writeRegs = ar9287WriteRegs;
+ priv->base.getRfBank = ar9287GetRfBank;
+ priv->base.setChannel = ar9287SetChannel;
+ priv->base.setRfRegs = ar9287SetRfRegs;
+ priv->base.setPowerTable = ar9287SetPowerTable;
+ priv->base.getChannelMaxMinPower = ar9287GetChannelMaxMinPower;
+ priv->base.getNfAdjust = ar9287GetNfAdjust;
+
+ ahp->ah_pcdacTable = priv->pcdacTable;
+ ahp->ah_pcdacTableSize = sizeof(priv->pcdacTable);
+ ahp->ah_rfHal = &priv->base;
+ /*
+ * Set noise floor adjust method; we arrange a
+ * direct call instead of thunking.
+ */
+ AH_PRIVATE(ah)->ah_getNfAdjust = priv->base.getNfAdjust;
+ AH_PRIVATE(ah)->ah_getNoiseFloor = ar9287GetNoiseFloor;
+
+ return AH_TRUE;
+}
diff --git a/sys/dev/ath/ath_hal/ar9002/ar9287.h b/sys/dev/ath/ath_hal/ar9002/ar9287.h
new file mode 100644
index 0000000..90d25ed
--- /dev/null
+++ b/sys/dev/ath/ath_hal/ar9002/ar9287.h
@@ -0,0 +1,62 @@
+/*
+ * Copyright (c) 2010 Atheros Communications, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _ATH_AR9287_H_
+#define _ATH_AR9287_H_
+
+#include "ar5416/ar5416.h"
+
+/*
+ * This is a chip thing, but it's used here as part of the
+ * ath_hal_9287 struct; so it's convienent to locate the
+ * define here.
+ */
+#define AR9287_TX_GAIN_TABLE_SIZE 22
+
+struct ath_hal_9287 {
+ struct ath_hal_5416 ah_5416;
+
+ HAL_INI_ARRAY ah_ini_xmodes;
+ HAL_INI_ARRAY ah_ini_rxgain;
+ HAL_INI_ARRAY ah_ini_txgain;
+
+ HAL_INI_ARRAY ah_ini_cckFirNormal;
+ HAL_INI_ARRAY ah_ini_cckFirJapan2484;
+
+ int PDADCdelta;
+
+ uint32_t originalGain[AR9287_TX_GAIN_TABLE_SIZE];
+};
+#define AH9287(_ah) ((struct ath_hal_9287 *)(_ah))
+
+#define AR9287_DEFAULT_RXCHAINMASK 3
+#define AR9285_DEFAULT_RXCHAINMASK 1
+#define AR9287_DEFAULT_TXCHAINMASK 3
+#define AR9285_DEFAULT_TXCHAINMASK 1
+
+#define AR_PHY_CCA_NOM_VAL_9287_2GHZ -112
+#define AR_PHY_CCA_NOM_VAL_9287_5GHZ -112
+#define AR_PHY_CCA_MIN_GOOD_VAL_9287_2GHZ -127
+#define AR_PHY_CCA_MIN_GOOD_VAL_9287_5GHZ -122
+#define AR_PHY_CCA_MAX_GOOD_VAL_9287_2GHZ -97
+#define AR_PHY_CCA_MAX_GOOD_VAL_9287_5GHZ -102
+
+extern HAL_BOOL ar9287RfAttach(struct ath_hal *, HAL_STATUS *);
+extern HAL_BOOL ar9287SetAntennaSwitch(struct ath_hal *, HAL_ANT_SETTING);
+
+#endif /* _ATH_AR9287_H_ */
diff --git a/sys/dev/ath/ath_hal/ar9002/ar9287.ini b/sys/dev/ath/ath_hal/ar9002/ar9287.ini
new file mode 100644
index 0000000..7f4ca05
--- /dev/null
+++ b/sys/dev/ath/ath_hal/ar9002/ar9287.ini
@@ -0,0 +1,783 @@
+/*
+ * Copyright (c) 2010 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ *
+ * $FreeBSD$
+ */
+
+static const uint32_t ar9287Modes_9287_1_1[][6] = {
+ {0x00001030, 0x00000000, 0x00000000, 0x000002c0, 0x00000160, 0x000001e0},
+ {0x00001070, 0x00000000, 0x00000000, 0x00000318, 0x0000018c, 0x000001e0},
+ {0x000010b0, 0x00000000, 0x00000000, 0x00007c70, 0x00003e38, 0x00001180},
+ {0x000010f0, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000008},
+ {0x00008014, 0x00000000, 0x00000000, 0x10801600, 0x08400b00, 0x06e006e0},
+ {0x0000801c, 0x00000000, 0x00000000, 0x12e00057, 0x12e0002b, 0x0988004f},
+ {0x00008120, 0x08f04800, 0x08f04800, 0x08f04810, 0x08f04810, 0x08f04810},
+ {0x000081d0, 0x00003200, 0x00003200, 0x0000320a, 0x0000320a, 0x0000320a},
+ {0x00008318, 0x00000000, 0x00000000, 0x00006880, 0x00003440, 0x00006880},
+ {0x00009804, 0x00000000, 0x00000000, 0x000003c4, 0x00000300, 0x00000303},
+ {0x00009820, 0x00000000, 0x00000000, 0x02020200, 0x02020200, 0x02020200},
+ {0x00009824, 0x00000000, 0x00000000, 0x01000e0e, 0x01000e0e, 0x01000e0e},
+ {0x00009828, 0x00000000, 0x00000000, 0x3a020001, 0x3a020001, 0x3a020001},
+ {0x00009834, 0x00000000, 0x00000000, 0x00000e0e, 0x00000e0e, 0x00000e0e},
+ {0x00009838, 0x00000003, 0x00000003, 0x00000007, 0x00000007, 0x00000007},
+ {0x00009840, 0x206a002e, 0x206a002e, 0x206a012e, 0x206a012e, 0x206a012e},
+ {0x00009844, 0x03720000, 0x03720000, 0x037216a0, 0x037216a0, 0x037216a0},
+ {0x00009850, 0x60000000, 0x60000000, 0x6d4000e2, 0x6c4000e2, 0x6c4000e2},
+ {0x00009858, 0x7c000d00, 0x7c000d00, 0x7ec84d2e, 0x7ec84d2e, 0x7ec84d2e},
+ {0x0000985c, 0x3100005e, 0x3100005e, 0x3139605e, 0x31395d5e, 0x31395d5e},
+ {0x00009860, 0x00058d00, 0x00058d00, 0x00058d20, 0x00058d20, 0x00058d18},
+ {0x00009864, 0x00000e00, 0x00000e00, 0x0001ce00, 0x0001ce00, 0x0001ce00},
+ {0x00009868, 0x000040c0, 0x000040c0, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0},
+ {0x0000986c, 0x00000080, 0x00000080, 0x06903881, 0x06903881, 0x06903881},
+ {0x00009914, 0x00000000, 0x00000000, 0x00001130, 0x00000898, 0x000007d0},
+ {0x00009918, 0x00000000, 0x00000000, 0x00000016, 0x0000000b, 0x00000016},
+ {0x00009924, 0xd00a8a01, 0xd00a8a01, 0xd00a8a0d, 0xd00a8a0d, 0xd00a8a0d},
+ {0x00009944, 0xefbc0000, 0xefbc0000, 0xefbc1010, 0xefbc1010, 0xefbc1010},
+ {0x00009960, 0x00000000, 0x00000000, 0x00000010, 0x00000010, 0x00000010},
+ {0x0000a960, 0x00000000, 0x00000000, 0x00000010, 0x00000010, 0x00000010},
+ {0x00009964, 0x00000000, 0x00000000, 0x00000210, 0x00000210, 0x00000210},
+ {0x0000c968, 0x00000200, 0x00000200, 0x000003ce, 0x000003ce, 0x000003ce},
+ {0x000099b8, 0x00000000, 0x00000000, 0x0000001c, 0x0000001c, 0x0000001c},
+ {0x000099bc, 0x00000000, 0x00000000, 0x00000c00, 0x00000c00, 0x00000c00},
+ {0x000099c0, 0x00000000, 0x00000000, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4},
+ {0x0000a204, 0x00000440, 0x00000440, 0x00000444, 0x00000444, 0x00000444},
+ {0x0000a20c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b20c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a21c, 0x1803800a, 0x1803800a, 0x1883800a, 0x1883800a, 0x1883800a},
+ {0x0000a230, 0x00000000, 0x00000000, 0x00000210, 0x00000108, 0x00000000},
+ {0x0000a250, 0x00000000, 0x00000000, 0x0004a000, 0x0004a000, 0x0004a000},
+ {0x0000a358, 0x7999aa02, 0x7999aa02, 0x7999aa0e, 0x7999aa0e, 0x7999aa0e},
+ {0x0000a3d8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+};
+
+static const uint32_t ar9287Common_9287_1_1[][2] = {
+ /* Addr allmodes */
+ {0x0000000c, 0x00000000},
+ {0x00000030, 0x00020015},
+ {0x00000034, 0x00000005},
+ {0x00000040, 0x00000000},
+ {0x00000044, 0x00000008},
+ {0x00000048, 0x00000008},
+ {0x0000004c, 0x00000010},
+ {0x00000050, 0x00000000},
+ {0x00000054, 0x0000001f},
+ {0x00000800, 0x00000000},
+ {0x00000804, 0x00000000},
+ {0x00000808, 0x00000000},
+ {0x0000080c, 0x00000000},
+ {0x00000810, 0x00000000},
+ {0x00000814, 0x00000000},
+ {0x00000818, 0x00000000},
+ {0x0000081c, 0x00000000},
+ {0x00000820, 0x00000000},
+ {0x00000824, 0x00000000},
+ {0x00001040, 0x002ffc0f},
+ {0x00001044, 0x002ffc0f},
+ {0x00001048, 0x002ffc0f},
+ {0x0000104c, 0x002ffc0f},
+ {0x00001050, 0x002ffc0f},
+ {0x00001054, 0x002ffc0f},
+ {0x00001058, 0x002ffc0f},
+ {0x0000105c, 0x002ffc0f},
+ {0x00001060, 0x002ffc0f},
+ {0x00001064, 0x002ffc0f},
+ {0x00001230, 0x00000000},
+ {0x00001270, 0x00000000},
+ {0x00001038, 0x00000000},
+ {0x00001078, 0x00000000},
+ {0x000010b8, 0x00000000},
+ {0x000010f8, 0x00000000},
+ {0x00001138, 0x00000000},
+ {0x00001178, 0x00000000},
+ {0x000011b8, 0x00000000},
+ {0x000011f8, 0x00000000},
+ {0x00001238, 0x00000000},
+ {0x00001278, 0x00000000},
+ {0x000012b8, 0x00000000},
+ {0x000012f8, 0x00000000},
+ {0x00001338, 0x00000000},
+ {0x00001378, 0x00000000},
+ {0x000013b8, 0x00000000},
+ {0x000013f8, 0x00000000},
+ {0x00001438, 0x00000000},
+ {0x00001478, 0x00000000},
+ {0x000014b8, 0x00000000},
+ {0x000014f8, 0x00000000},
+ {0x00001538, 0x00000000},
+ {0x00001578, 0x00000000},
+ {0x000015b8, 0x00000000},
+ {0x000015f8, 0x00000000},
+ {0x00001638, 0x00000000},
+ {0x00001678, 0x00000000},
+ {0x000016b8, 0x00000000},
+ {0x000016f8, 0x00000000},
+ {0x00001738, 0x00000000},
+ {0x00001778, 0x00000000},
+ {0x000017b8, 0x00000000},
+ {0x000017f8, 0x00000000},
+ {0x0000103c, 0x00000000},
+ {0x0000107c, 0x00000000},
+ {0x000010bc, 0x00000000},
+ {0x000010fc, 0x00000000},
+ {0x0000113c, 0x00000000},
+ {0x0000117c, 0x00000000},
+ {0x000011bc, 0x00000000},
+ {0x000011fc, 0x00000000},
+ {0x0000123c, 0x00000000},
+ {0x0000127c, 0x00000000},
+ {0x000012bc, 0x00000000},
+ {0x000012fc, 0x00000000},
+ {0x0000133c, 0x00000000},
+ {0x0000137c, 0x00000000},
+ {0x000013bc, 0x00000000},
+ {0x000013fc, 0x00000000},
+ {0x0000143c, 0x00000000},
+ {0x0000147c, 0x00000000},
+ {0x00004030, 0x00000002},
+ {0x0000403c, 0x00000002},
+ {0x00004024, 0x0000001f},
+ {0x00004060, 0x00000000},
+ {0x00004064, 0x00000000},
+ {0x00007010, 0x00000033},
+ {0x00007020, 0x00000000},
+ {0x00007034, 0x00000002},
+ {0x00007038, 0x000004c2},
+ {0x00008004, 0x00000000},
+ {0x00008008, 0x00000000},
+ {0x0000800c, 0x00000000},
+ {0x00008018, 0x00000700},
+ {0x00008020, 0x00000000},
+ {0x00008038, 0x00000000},
+ {0x0000803c, 0x00000000},
+ {0x00008048, 0x40000000},
+ {0x00008054, 0x00000000},
+ {0x00008058, 0x00000000},
+ {0x0000805c, 0x000fc78f},
+ {0x00008060, 0x0000000f},
+ {0x00008064, 0x00000000},
+ {0x00008070, 0x00000000},
+ {0x000080c0, 0x2a80001a},
+ {0x000080c4, 0x05dc01e0},
+ {0x000080c8, 0x1f402710},
+ {0x000080cc, 0x01f40000},
+ {0x000080d0, 0x00001e00},
+ {0x000080d4, 0x00000000},
+ {0x000080d8, 0x00400000},
+ {0x000080e0, 0xffffffff},
+ {0x000080e4, 0x0000ffff},
+ {0x000080e8, 0x003f3f3f},
+ {0x000080ec, 0x00000000},
+ {0x000080f0, 0x00000000},
+ {0x000080f4, 0x00000000},
+ {0x000080f8, 0x00000000},
+ {0x000080fc, 0x00020000},
+ {0x00008100, 0x00020000},
+ {0x00008104, 0x00000001},
+ {0x00008108, 0x00000052},
+ {0x0000810c, 0x00000000},
+ {0x00008110, 0x00000168},
+ {0x00008118, 0x000100aa},
+ {0x0000811c, 0x00003210},
+ {0x00008124, 0x00000000},
+ {0x00008128, 0x00000000},
+ {0x0000812c, 0x00000000},
+ {0x00008130, 0x00000000},
+ {0x00008134, 0x00000000},
+ {0x00008138, 0x00000000},
+ {0x0000813c, 0x00000000},
+ {0x00008144, 0xffffffff},
+ {0x00008168, 0x00000000},
+ {0x0000816c, 0x00000000},
+ {0x00008170, 0x18487320},
+ {0x00008174, 0xfaa4fa50},
+ {0x00008178, 0x00000100},
+ {0x0000817c, 0x00000000},
+ {0x000081c0, 0x00000000},
+ {0x000081c4, 0x00000000},
+ {0x000081d4, 0x00000000},
+ {0x000081ec, 0x00000000},
+ {0x000081f0, 0x00000000},
+ {0x000081f4, 0x00000000},
+ {0x000081f8, 0x00000000},
+ {0x000081fc, 0x00000000},
+ {0x00008200, 0x00000000},
+ {0x00008204, 0x00000000},
+ {0x00008208, 0x00000000},
+ {0x0000820c, 0x00000000},
+ {0x00008210, 0x00000000},
+ {0x00008214, 0x00000000},
+ {0x00008218, 0x00000000},
+ {0x0000821c, 0x00000000},
+ {0x00008220, 0x00000000},
+ {0x00008224, 0x00000000},
+ {0x00008228, 0x00000000},
+ {0x0000822c, 0x00000000},
+ {0x00008230, 0x00000000},
+ {0x00008234, 0x00000000},
+ {0x00008238, 0x00000000},
+ {0x0000823c, 0x00000000},
+ {0x00008240, 0x00100000},
+ {0x00008244, 0x0010f400},
+ {0x00008248, 0x00000100},
+ {0x0000824c, 0x0001e800},
+ {0x00008250, 0x00000000},
+ {0x00008254, 0x00000000},
+ {0x00008258, 0x00000000},
+ {0x0000825c, 0x400000ff},
+ {0x00008260, 0x00080922},
+ {0x00008264, 0x88a00010},
+ {0x00008270, 0x00000000},
+ {0x00008274, 0x40000000},
+ {0x00008278, 0x003e4180},
+ {0x0000827c, 0x00000000},
+ {0x00008284, 0x0000002c},
+ {0x00008288, 0x0000002c},
+ {0x0000828c, 0x000000ff},
+ {0x00008294, 0x00000000},
+ {0x00008298, 0x00000000},
+ {0x0000829c, 0x00000000},
+ {0x00008300, 0x00000040},
+ {0x00008314, 0x00000000},
+ {0x00008328, 0x00000000},
+ {0x0000832c, 0x00000007},
+ {0x00008330, 0x00000302},
+ {0x00008334, 0x00000e00},
+ {0x00008338, 0x00ff0000},
+ {0x0000833c, 0x00000000},
+ {0x00008340, 0x000107ff},
+ {0x00008344, 0x01c81043},
+ {0x00008360, 0xffffffff},
+ {0x00008364, 0xffffffff},
+ {0x00008368, 0x00000000},
+ {0x00008370, 0x00000000},
+ {0x00008374, 0x000000ff},
+ {0x00008378, 0x00000000},
+ {0x0000837c, 0x00000000},
+ {0x00008380, 0xffffffff},
+ {0x00008384, 0xffffffff},
+ {0x00008390, 0x0fffffff},
+ {0x00008394, 0x0fffffff},
+ {0x00008398, 0x00000000},
+ {0x0000839c, 0x00000000},
+ {0x000083a0, 0x00000000},
+ {0x00009808, 0x00000000},
+ {0x0000980c, 0xafe68e30},
+ {0x00009810, 0xfd14e000},
+ {0x00009814, 0x9c0a9f6b},
+ {0x0000981c, 0x00000000},
+ {0x0000982c, 0x0000a000},
+ {0x00009830, 0x00000000},
+ {0x0000983c, 0x00200400},
+ {0x0000984c, 0x0040233c},
+ {0x0000a84c, 0x0040233c},
+ {0x00009854, 0x00000044},
+ {0x00009900, 0x00000000},
+ {0x00009904, 0x00000000},
+ {0x00009908, 0x00000000},
+ {0x0000990c, 0x00000000},
+ {0x00009910, 0x10002310},
+ {0x0000991c, 0x10000fff},
+ {0x00009920, 0x04900000},
+ {0x0000a920, 0x04900000},
+ {0x00009928, 0x00000001},
+ {0x0000992c, 0x00000004},
+ {0x00009930, 0x00000000},
+ {0x0000a930, 0x00000000},
+ {0x00009934, 0x1e1f2022},
+ {0x00009938, 0x0a0b0c0d},
+ {0x0000993c, 0x00000000},
+ {0x00009948, 0x9280c00a},
+ {0x0000994c, 0x00020028},
+ {0x00009954, 0x5f3ca3de},
+ {0x00009958, 0x0108ecff},
+ {0x00009940, 0x14750604},
+ {0x0000c95c, 0x004b6a8e},
+ {0x00009970, 0x990bb514},
+ {0x00009974, 0x00000000},
+ {0x00009978, 0x00000001},
+ {0x0000997c, 0x00000000},
+ {0x000099a0, 0x00000000},
+ {0x000099a4, 0x00000001},
+ {0x000099a8, 0x201fff00},
+ {0x000099ac, 0x0c6f0000},
+ {0x000099b0, 0x03051000},
+ {0x000099b4, 0x00000820},
+ {0x000099c4, 0x06336f77},
+ {0x000099c8, 0x6af6532f},
+ {0x000099cc, 0x08f186c8},
+ {0x000099d0, 0x00046384},
+ {0x000099dc, 0x00000000},
+ {0x000099e0, 0x00000000},
+ {0x000099e4, 0xaaaaaaaa},
+ {0x000099e8, 0x3c466478},
+ {0x000099ec, 0x0cc80caa},
+ {0x000099f0, 0x00000000},
+ {0x000099fc, 0x00001042},
+ {0x0000a208, 0x803e4788},
+ {0x0000a210, 0x4080a333},
+ {0x0000a214, 0x40206c10},
+ {0x0000a218, 0x009c4060},
+ {0x0000a220, 0x01834061},
+ {0x0000a224, 0x00000400},
+ {0x0000a228, 0x000003b5},
+ {0x0000a22c, 0x233f7180},
+ {0x0000a234, 0x20202020},
+ {0x0000a238, 0x20202020},
+ {0x0000a23c, 0x13c889af},
+ {0x0000a240, 0x38490a20},
+ {0x0000a244, 0x00000000},
+ {0x0000a248, 0xfffffffc},
+ {0x0000a24c, 0x00000000},
+ {0x0000a254, 0x00000000},
+ {0x0000a258, 0x0cdbd380},
+ {0x0000a25c, 0x0f0f0f01},
+ {0x0000a260, 0xdfa91f01},
+ {0x0000a264, 0x00418a11},
+ {0x0000b264, 0x00418a11},
+ {0x0000a268, 0x00000000},
+ {0x0000a26c, 0x0e79e5c6},
+ {0x0000b26c, 0x0e79e5c6},
+ {0x0000d270, 0x00820820},
+ {0x0000a278, 0x1ce739ce},
+ {0x0000a27c, 0x050701ce},
+ {0x0000d35c, 0x07ffffef},
+ {0x0000d360, 0x0fffffe7},
+ {0x0000d364, 0x17ffffe5},
+ {0x0000d368, 0x1fffffe4},
+ {0x0000d36c, 0x37ffffe3},
+ {0x0000d370, 0x3fffffe3},
+ {0x0000d374, 0x57ffffe3},
+ {0x0000d378, 0x5fffffe2},
+ {0x0000d37c, 0x7fffffe2},
+ {0x0000d380, 0x7f3c7bba},
+ {0x0000d384, 0xf3307ff0},
+ {0x0000a388, 0x0c000000},
+ {0x0000a38c, 0x20202020},
+ {0x0000a390, 0x20202020},
+ {0x0000a394, 0x1ce739ce},
+ {0x0000a398, 0x000001ce},
+ {0x0000b398, 0x000001ce},
+ {0x0000a39c, 0x00000001},
+ {0x0000a3c8, 0x00000246},
+ {0x0000a3cc, 0x20202020},
+ {0x0000a3d0, 0x20202020},
+ {0x0000a3d4, 0x20202020},
+ {0x0000a3dc, 0x1ce739ce},
+ {0x0000a3e0, 0x000001ce},
+ {0x0000a3e4, 0x00000000},
+ {0x0000a3e8, 0x18c43433},
+ {0x0000a3ec, 0x00f70081},
+ {0x0000a3f0, 0x01036a1e},
+ {0x0000a3f4, 0x00000000},
+ {0x0000b3f4, 0x00000000},
+ {0x0000a7d8, 0x000003f1},
+ {0x00007800, 0x00000800},
+ {0x00007804, 0x6c35ffd2},
+ {0x00007808, 0x6db6c000},
+ {0x0000780c, 0x6db6cb30},
+ {0x00007810, 0x6db6cb6c},
+ {0x00007814, 0x0501e200},
+ {0x00007818, 0x0094128d},
+ {0x0000781c, 0x976ee392},
+ {0x00007820, 0xf75ff6fc},
+ {0x00007824, 0x00040000},
+ {0x00007828, 0xdb003012},
+ {0x0000782c, 0x04924914},
+ {0x00007830, 0x21084210},
+ {0x00007834, 0x00140000},
+ {0x00007838, 0x0e4548d8},
+ {0x0000783c, 0x54214514},
+ {0x00007840, 0x02025830},
+ {0x00007844, 0x71c0d388},
+ {0x00007848, 0x934934a8},
+ {0x00007850, 0x00000000},
+ {0x00007854, 0x00000800},
+ {0x00007858, 0x6c35ffd2},
+ {0x0000785c, 0x6db6c000},
+ {0x00007860, 0x6db6cb30},
+ {0x00007864, 0x6db6cb6c},
+ {0x00007868, 0x0501e200},
+ {0x0000786c, 0x0094128d},
+ {0x00007870, 0x976ee392},
+ {0x00007874, 0xf75ff6fc},
+ {0x00007878, 0x00040000},
+ {0x0000787c, 0xdb003012},
+ {0x00007880, 0x04924914},
+ {0x00007884, 0x21084210},
+ {0x00007888, 0x001b6db0},
+ {0x0000788c, 0x00376b63},
+ {0x00007890, 0x06db6db6},
+ {0x00007894, 0x006d8000},
+ {0x00007898, 0x48100000},
+ {0x0000789c, 0x00000000},
+ {0x000078a0, 0x08000000},
+ {0x000078a4, 0x0007ffd8},
+ {0x000078a8, 0x0007ffd8},
+ {0x000078ac, 0x001c0020},
+ {0x000078b0, 0x00060aeb},
+ {0x000078b4, 0x40008080},
+ {0x000078b8, 0x2a850160},
+};
+
+static const uint32_t ar9287Common_normal_cck_fir_coeff_9287_1_1[][2] = {
+ /* Addr allmodes */
+ {0x0000a1f4, 0x00fffeff},
+ {0x0000a1f8, 0x00f5f9ff},
+ {0x0000a1fc, 0xb79f6427},
+};
+
+static const uint32_t ar9287Common_japan_2484_cck_fir_coeff_9287_1_1[][2] = {
+ /* Addr allmodes */
+ {0x0000a1f4, 0x00000000},
+ {0x0000a1f8, 0xefff0301},
+ {0x0000a1fc, 0xca9228ee},
+};
+
+static const uint32_t ar9287Modes_tx_gain_9287_1_1[][6] = {
+ {0x0000a300, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a304, 0x00000000, 0x00000000, 0x00004002, 0x00004002, 0x00004002},
+ {0x0000a308, 0x00000000, 0x00000000, 0x00008004, 0x00008004, 0x00008004},
+ {0x0000a30c, 0x00000000, 0x00000000, 0x0000c00a, 0x0000c00a, 0x0000c00a},
+ {0x0000a310, 0x00000000, 0x00000000, 0x0001000c, 0x0001000c, 0x0001000c},
+ {0x0000a314, 0x00000000, 0x00000000, 0x0001420b, 0x0001420b, 0x0001420b},
+ {0x0000a318, 0x00000000, 0x00000000, 0x0001824a, 0x0001824a, 0x0001824a},
+ {0x0000a31c, 0x00000000, 0x00000000, 0x0001c44a, 0x0001c44a, 0x0001c44a},
+ {0x0000a320, 0x00000000, 0x00000000, 0x0002064a, 0x0002064a, 0x0002064a},
+ {0x0000a324, 0x00000000, 0x00000000, 0x0002484a, 0x0002484a, 0x0002484a},
+ {0x0000a328, 0x00000000, 0x00000000, 0x00028a4a, 0x00028a4a, 0x00028a4a},
+ {0x0000a32c, 0x00000000, 0x00000000, 0x0002cc4a, 0x0002cc4a, 0x0002cc4a},
+ {0x0000a330, 0x00000000, 0x00000000, 0x00030e4a, 0x00030e4a, 0x00030e4a},
+ {0x0000a334, 0x00000000, 0x00000000, 0x00034e8a, 0x00034e8a, 0x00034e8a},
+ {0x0000a338, 0x00000000, 0x00000000, 0x00038e8c, 0x00038e8c, 0x00038e8c},
+ {0x0000a33c, 0x00000000, 0x00000000, 0x0003cecc, 0x0003cecc, 0x0003cecc},
+ {0x0000a340, 0x00000000, 0x00000000, 0x00040ed4, 0x00040ed4, 0x00040ed4},
+ {0x0000a344, 0x00000000, 0x00000000, 0x00044edc, 0x00044edc, 0x00044edc},
+ {0x0000a348, 0x00000000, 0x00000000, 0x00048ede, 0x00048ede, 0x00048ede},
+ {0x0000a34c, 0x00000000, 0x00000000, 0x0004cf1e, 0x0004cf1e, 0x0004cf1e},
+ {0x0000a350, 0x00000000, 0x00000000, 0x00050f5e, 0x00050f5e, 0x00050f5e},
+ {0x0000a354, 0x00000000, 0x00000000, 0x00054f9e, 0x00054f9e, 0x00054f9e},
+ {0x0000a780, 0x00000000, 0x00000000, 0x00000062, 0x00000062, 0x00000062},
+ {0x0000a784, 0x00000000, 0x00000000, 0x00004064, 0x00004064, 0x00004064},
+ {0x0000a788, 0x00000000, 0x00000000, 0x000080a4, 0x000080a4, 0x000080a4},
+ {0x0000a78c, 0x00000000, 0x00000000, 0x0000c0aa, 0x0000c0aa, 0x0000c0aa},
+ {0x0000a790, 0x00000000, 0x00000000, 0x000100ac, 0x000100ac, 0x000100ac},
+ {0x0000a794, 0x00000000, 0x00000000, 0x000140b4, 0x000140b4, 0x000140b4},
+ {0x0000a798, 0x00000000, 0x00000000, 0x000180f4, 0x000180f4, 0x000180f4},
+ {0x0000a79c, 0x00000000, 0x00000000, 0x0001c134, 0x0001c134, 0x0001c134},
+ {0x0000a7a0, 0x00000000, 0x00000000, 0x00020174, 0x00020174, 0x00020174},
+ {0x0000a7a4, 0x00000000, 0x00000000, 0x0002417c, 0x0002417c, 0x0002417c},
+ {0x0000a7a8, 0x00000000, 0x00000000, 0x0002817e, 0x0002817e, 0x0002817e},
+ {0x0000a7ac, 0x00000000, 0x00000000, 0x0002c1be, 0x0002c1be, 0x0002c1be},
+ {0x0000a7b0, 0x00000000, 0x00000000, 0x000301fe, 0x000301fe, 0x000301fe},
+ {0x0000a7b4, 0x00000000, 0x00000000, 0x000301fe, 0x000301fe, 0x000301fe},
+ {0x0000a7b8, 0x00000000, 0x00000000, 0x000301fe, 0x000301fe, 0x000301fe},
+ {0x0000a7bc, 0x00000000, 0x00000000, 0x000301fe, 0x000301fe, 0x000301fe},
+ {0x0000a7c0, 0x00000000, 0x00000000, 0x000301fe, 0x000301fe, 0x000301fe},
+ {0x0000a7c4, 0x00000000, 0x00000000, 0x000301fe, 0x000301fe, 0x000301fe},
+ {0x0000a7c8, 0x00000000, 0x00000000, 0x000301fe, 0x000301fe, 0x000301fe},
+ {0x0000a7cc, 0x00000000, 0x00000000, 0x000301fe, 0x000301fe, 0x000301fe},
+ {0x0000a7d0, 0x00000000, 0x00000000, 0x000301fe, 0x000301fe, 0x000301fe},
+ {0x0000a7d4, 0x00000000, 0x00000000, 0x000301fe, 0x000301fe, 0x000301fe},
+ {0x0000a274, 0x0a180000, 0x0a180000, 0x0a1aa000, 0x0a1aa000, 0x0a1aa000},
+};
+
+static const uint32_t ar9287Modes_rx_gain_9287_1_1[][6] = {
+ {0x00009a00, 0x00000000, 0x00000000, 0x0000a120, 0x0000a120, 0x0000a120},
+ {0x00009a04, 0x00000000, 0x00000000, 0x0000a124, 0x0000a124, 0x0000a124},
+ {0x00009a08, 0x00000000, 0x00000000, 0x0000a128, 0x0000a128, 0x0000a128},
+ {0x00009a0c, 0x00000000, 0x00000000, 0x0000a12c, 0x0000a12c, 0x0000a12c},
+ {0x00009a10, 0x00000000, 0x00000000, 0x0000a130, 0x0000a130, 0x0000a130},
+ {0x00009a14, 0x00000000, 0x00000000, 0x0000a194, 0x0000a194, 0x0000a194},
+ {0x00009a18, 0x00000000, 0x00000000, 0x0000a198, 0x0000a198, 0x0000a198},
+ {0x00009a1c, 0x00000000, 0x00000000, 0x0000a20c, 0x0000a20c, 0x0000a20c},
+ {0x00009a20, 0x00000000, 0x00000000, 0x0000a210, 0x0000a210, 0x0000a210},
+ {0x00009a24, 0x00000000, 0x00000000, 0x0000a284, 0x0000a284, 0x0000a284},
+ {0x00009a28, 0x00000000, 0x00000000, 0x0000a288, 0x0000a288, 0x0000a288},
+ {0x00009a2c, 0x00000000, 0x00000000, 0x0000a28c, 0x0000a28c, 0x0000a28c},
+ {0x00009a30, 0x00000000, 0x00000000, 0x0000a290, 0x0000a290, 0x0000a290},
+ {0x00009a34, 0x00000000, 0x00000000, 0x0000a294, 0x0000a294, 0x0000a294},
+ {0x00009a38, 0x00000000, 0x00000000, 0x0000a2a0, 0x0000a2a0, 0x0000a2a0},
+ {0x00009a3c, 0x00000000, 0x00000000, 0x0000a2a4, 0x0000a2a4, 0x0000a2a4},
+ {0x00009a40, 0x00000000, 0x00000000, 0x0000a2a8, 0x0000a2a8, 0x0000a2a8},
+ {0x00009a44, 0x00000000, 0x00000000, 0x0000a2ac, 0x0000a2ac, 0x0000a2ac},
+ {0x00009a48, 0x00000000, 0x00000000, 0x0000a2b0, 0x0000a2b0, 0x0000a2b0},
+ {0x00009a4c, 0x00000000, 0x00000000, 0x0000a2b4, 0x0000a2b4, 0x0000a2b4},
+ {0x00009a50, 0x00000000, 0x00000000, 0x0000a2b8, 0x0000a2b8, 0x0000a2b8},
+ {0x00009a54, 0x00000000, 0x00000000, 0x0000a2c4, 0x0000a2c4, 0x0000a2c4},
+ {0x00009a58, 0x00000000, 0x00000000, 0x0000a708, 0x0000a708, 0x0000a708},
+ {0x00009a5c, 0x00000000, 0x00000000, 0x0000a70c, 0x0000a70c, 0x0000a70c},
+ {0x00009a60, 0x00000000, 0x00000000, 0x0000a710, 0x0000a710, 0x0000a710},
+ {0x00009a64, 0x00000000, 0x00000000, 0x0000ab04, 0x0000ab04, 0x0000ab04},
+ {0x00009a68, 0x00000000, 0x00000000, 0x0000ab08, 0x0000ab08, 0x0000ab08},
+ {0x00009a6c, 0x00000000, 0x00000000, 0x0000ab0c, 0x0000ab0c, 0x0000ab0c},
+ {0x00009a70, 0x00000000, 0x00000000, 0x0000ab10, 0x0000ab10, 0x0000ab10},
+ {0x00009a74, 0x00000000, 0x00000000, 0x0000ab14, 0x0000ab14, 0x0000ab14},
+ {0x00009a78, 0x00000000, 0x00000000, 0x0000ab18, 0x0000ab18, 0x0000ab18},
+ {0x00009a7c, 0x00000000, 0x00000000, 0x0000ab8c, 0x0000ab8c, 0x0000ab8c},
+ {0x00009a80, 0x00000000, 0x00000000, 0x0000ab90, 0x0000ab90, 0x0000ab90},
+ {0x00009a84, 0x00000000, 0x00000000, 0x0000ab94, 0x0000ab94, 0x0000ab94},
+ {0x00009a88, 0x00000000, 0x00000000, 0x0000ab98, 0x0000ab98, 0x0000ab98},
+ {0x00009a8c, 0x00000000, 0x00000000, 0x0000aba4, 0x0000aba4, 0x0000aba4},
+ {0x00009a90, 0x00000000, 0x00000000, 0x0000aba8, 0x0000aba8, 0x0000aba8},
+ {0x00009a94, 0x00000000, 0x00000000, 0x0000cb04, 0x0000cb04, 0x0000cb04},
+ {0x00009a98, 0x00000000, 0x00000000, 0x0000cb08, 0x0000cb08, 0x0000cb08},
+ {0x00009a9c, 0x00000000, 0x00000000, 0x0000cb0c, 0x0000cb0c, 0x0000cb0c},
+ {0x00009aa0, 0x00000000, 0x00000000, 0x0000cb10, 0x0000cb10, 0x0000cb10},
+ {0x00009aa4, 0x00000000, 0x00000000, 0x0000cb14, 0x0000cb14, 0x0000cb14},
+ {0x00009aa8, 0x00000000, 0x00000000, 0x0000cb18, 0x0000cb18, 0x0000cb18},
+ {0x00009aac, 0x00000000, 0x00000000, 0x0000cb8c, 0x0000cb8c, 0x0000cb8c},
+ {0x00009ab0, 0x00000000, 0x00000000, 0x0000cb90, 0x0000cb90, 0x0000cb90},
+ {0x00009ab4, 0x00000000, 0x00000000, 0x0000cf18, 0x0000cf18, 0x0000cf18},
+ {0x00009ab8, 0x00000000, 0x00000000, 0x0000cf24, 0x0000cf24, 0x0000cf24},
+ {0x00009abc, 0x00000000, 0x00000000, 0x0000cf28, 0x0000cf28, 0x0000cf28},
+ {0x00009ac0, 0x00000000, 0x00000000, 0x0000d314, 0x0000d314, 0x0000d314},
+ {0x00009ac4, 0x00000000, 0x00000000, 0x0000d318, 0x0000d318, 0x0000d318},
+ {0x00009ac8, 0x00000000, 0x00000000, 0x0000d38c, 0x0000d38c, 0x0000d38c},
+ {0x00009acc, 0x00000000, 0x00000000, 0x0000d390, 0x0000d390, 0x0000d390},
+ {0x00009ad0, 0x00000000, 0x00000000, 0x0000d394, 0x0000d394, 0x0000d394},
+ {0x00009ad4, 0x00000000, 0x00000000, 0x0000d398, 0x0000d398, 0x0000d398},
+ {0x00009ad8, 0x00000000, 0x00000000, 0x0000d3a4, 0x0000d3a4, 0x0000d3a4},
+ {0x00009adc, 0x00000000, 0x00000000, 0x0000d3a8, 0x0000d3a8, 0x0000d3a8},
+ {0x00009ae0, 0x00000000, 0x00000000, 0x0000d3ac, 0x0000d3ac, 0x0000d3ac},
+ {0x00009ae4, 0x00000000, 0x00000000, 0x0000d3b0, 0x0000d3b0, 0x0000d3b0},
+ {0x00009ae8, 0x00000000, 0x00000000, 0x0000f380, 0x0000f380, 0x0000f380},
+ {0x00009aec, 0x00000000, 0x00000000, 0x0000f384, 0x0000f384, 0x0000f384},
+ {0x00009af0, 0x00000000, 0x00000000, 0x0000f388, 0x0000f388, 0x0000f388},
+ {0x00009af4, 0x00000000, 0x00000000, 0x0000f710, 0x0000f710, 0x0000f710},
+ {0x00009af8, 0x00000000, 0x00000000, 0x0000f714, 0x0000f714, 0x0000f714},
+ {0x00009afc, 0x00000000, 0x00000000, 0x0000f718, 0x0000f718, 0x0000f718},
+ {0x00009b00, 0x00000000, 0x00000000, 0x0000fb10, 0x0000fb10, 0x0000fb10},
+ {0x00009b04, 0x00000000, 0x00000000, 0x0000fb14, 0x0000fb14, 0x0000fb14},
+ {0x00009b08, 0x00000000, 0x00000000, 0x0000fb18, 0x0000fb18, 0x0000fb18},
+ {0x00009b0c, 0x00000000, 0x00000000, 0x0000fb8c, 0x0000fb8c, 0x0000fb8c},
+ {0x00009b10, 0x00000000, 0x00000000, 0x0000fb90, 0x0000fb90, 0x0000fb90},
+ {0x00009b14, 0x00000000, 0x00000000, 0x0000fb94, 0x0000fb94, 0x0000fb94},
+ {0x00009b18, 0x00000000, 0x00000000, 0x0000ff8c, 0x0000ff8c, 0x0000ff8c},
+ {0x00009b1c, 0x00000000, 0x00000000, 0x0000ff90, 0x0000ff90, 0x0000ff90},
+ {0x00009b20, 0x00000000, 0x00000000, 0x0000ff94, 0x0000ff94, 0x0000ff94},
+ {0x00009b24, 0x00000000, 0x00000000, 0x0000ffa0, 0x0000ffa0, 0x0000ffa0},
+ {0x00009b28, 0x00000000, 0x00000000, 0x0000ffa4, 0x0000ffa4, 0x0000ffa4},
+ {0x00009b2c, 0x00000000, 0x00000000, 0x0000ffa8, 0x0000ffa8, 0x0000ffa8},
+ {0x00009b30, 0x00000000, 0x00000000, 0x0000ffac, 0x0000ffac, 0x0000ffac},
+ {0x00009b34, 0x00000000, 0x00000000, 0x0000ffb0, 0x0000ffb0, 0x0000ffb0},
+ {0x00009b38, 0x00000000, 0x00000000, 0x0000ffb4, 0x0000ffb4, 0x0000ffb4},
+ {0x00009b3c, 0x00000000, 0x00000000, 0x0000ffa1, 0x0000ffa1, 0x0000ffa1},
+ {0x00009b40, 0x00000000, 0x00000000, 0x0000ffa5, 0x0000ffa5, 0x0000ffa5},
+ {0x00009b44, 0x00000000, 0x00000000, 0x0000ffa9, 0x0000ffa9, 0x0000ffa9},
+ {0x00009b48, 0x00000000, 0x00000000, 0x0000ffad, 0x0000ffad, 0x0000ffad},
+ {0x00009b4c, 0x00000000, 0x00000000, 0x0000ffb1, 0x0000ffb1, 0x0000ffb1},
+ {0x00009b50, 0x00000000, 0x00000000, 0x0000ffb5, 0x0000ffb5, 0x0000ffb5},
+ {0x00009b54, 0x00000000, 0x00000000, 0x0000ffb9, 0x0000ffb9, 0x0000ffb9},
+ {0x00009b58, 0x00000000, 0x00000000, 0x0000ffc5, 0x0000ffc5, 0x0000ffc5},
+ {0x00009b5c, 0x00000000, 0x00000000, 0x0000ffc9, 0x0000ffc9, 0x0000ffc9},
+ {0x00009b60, 0x00000000, 0x00000000, 0x0000ffcd, 0x0000ffcd, 0x0000ffcd},
+ {0x00009b64, 0x00000000, 0x00000000, 0x0000ffd1, 0x0000ffd1, 0x0000ffd1},
+ {0x00009b68, 0x00000000, 0x00000000, 0x0000ffd5, 0x0000ffd5, 0x0000ffd5},
+ {0x00009b6c, 0x00000000, 0x00000000, 0x0000ffc2, 0x0000ffc2, 0x0000ffc2},
+ {0x00009b70, 0x00000000, 0x00000000, 0x0000ffc6, 0x0000ffc6, 0x0000ffc6},
+ {0x00009b74, 0x00000000, 0x00000000, 0x0000ffca, 0x0000ffca, 0x0000ffca},
+ {0x00009b78, 0x00000000, 0x00000000, 0x0000ffce, 0x0000ffce, 0x0000ffce},
+ {0x00009b7c, 0x00000000, 0x00000000, 0x0000ffd2, 0x0000ffd2, 0x0000ffd2},
+ {0x00009b80, 0x00000000, 0x00000000, 0x0000ffd6, 0x0000ffd6, 0x0000ffd6},
+ {0x00009b84, 0x00000000, 0x00000000, 0x0000ffda, 0x0000ffda, 0x0000ffda},
+ {0x00009b88, 0x00000000, 0x00000000, 0x0000ffc7, 0x0000ffc7, 0x0000ffc7},
+ {0x00009b8c, 0x00000000, 0x00000000, 0x0000ffcb, 0x0000ffcb, 0x0000ffcb},
+ {0x00009b90, 0x00000000, 0x00000000, 0x0000ffcf, 0x0000ffcf, 0x0000ffcf},
+ {0x00009b94, 0x00000000, 0x00000000, 0x0000ffd3, 0x0000ffd3, 0x0000ffd3},
+ {0x00009b98, 0x00000000, 0x00000000, 0x0000ffd7, 0x0000ffd7, 0x0000ffd7},
+ {0x00009b9c, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb},
+ {0x00009ba0, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb},
+ {0x00009ba4, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb},
+ {0x00009ba8, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb},
+ {0x00009bac, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb},
+ {0x00009bb0, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb},
+ {0x00009bb4, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb},
+ {0x00009bb8, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb},
+ {0x00009bbc, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb},
+ {0x00009bc0, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb},
+ {0x00009bc4, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb},
+ {0x00009bc8, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb},
+ {0x00009bcc, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb},
+ {0x00009bd0, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb},
+ {0x00009bd4, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb},
+ {0x00009bd8, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb},
+ {0x00009bdc, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb},
+ {0x00009be0, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb},
+ {0x00009be4, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb},
+ {0x00009be8, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb},
+ {0x00009bec, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb},
+ {0x00009bf0, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb},
+ {0x00009bf4, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb},
+ {0x00009bf8, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb},
+ {0x00009bfc, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb},
+ {0x0000aa00, 0x00000000, 0x00000000, 0x0000a120, 0x0000a120, 0x0000a120},
+ {0x0000aa04, 0x00000000, 0x00000000, 0x0000a124, 0x0000a124, 0x0000a124},
+ {0x0000aa08, 0x00000000, 0x00000000, 0x0000a128, 0x0000a128, 0x0000a128},
+ {0x0000aa0c, 0x00000000, 0x00000000, 0x0000a12c, 0x0000a12c, 0x0000a12c},
+ {0x0000aa10, 0x00000000, 0x00000000, 0x0000a130, 0x0000a130, 0x0000a130},
+ {0x0000aa14, 0x00000000, 0x00000000, 0x0000a194, 0x0000a194, 0x0000a194},
+ {0x0000aa18, 0x00000000, 0x00000000, 0x0000a198, 0x0000a198, 0x0000a198},
+ {0x0000aa1c, 0x00000000, 0x00000000, 0x0000a20c, 0x0000a20c, 0x0000a20c},
+ {0x0000aa20, 0x00000000, 0x00000000, 0x0000a210, 0x0000a210, 0x0000a210},
+ {0x0000aa24, 0x00000000, 0x00000000, 0x0000a284, 0x0000a284, 0x0000a284},
+ {0x0000aa28, 0x00000000, 0x00000000, 0x0000a288, 0x0000a288, 0x0000a288},
+ {0x0000aa2c, 0x00000000, 0x00000000, 0x0000a28c, 0x0000a28c, 0x0000a28c},
+ {0x0000aa30, 0x00000000, 0x00000000, 0x0000a290, 0x0000a290, 0x0000a290},
+ {0x0000aa34, 0x00000000, 0x00000000, 0x0000a294, 0x0000a294, 0x0000a294},
+ {0x0000aa38, 0x00000000, 0x00000000, 0x0000a2a0, 0x0000a2a0, 0x0000a2a0},
+ {0x0000aa3c, 0x00000000, 0x00000000, 0x0000a2a4, 0x0000a2a4, 0x0000a2a4},
+ {0x0000aa40, 0x00000000, 0x00000000, 0x0000a2a8, 0x0000a2a8, 0x0000a2a8},
+ {0x0000aa44, 0x00000000, 0x00000000, 0x0000a2ac, 0x0000a2ac, 0x0000a2ac},
+ {0x0000aa48, 0x00000000, 0x00000000, 0x0000a2b0, 0x0000a2b0, 0x0000a2b0},
+ {0x0000aa4c, 0x00000000, 0x00000000, 0x0000a2b4, 0x0000a2b4, 0x0000a2b4},
+ {0x0000aa50, 0x00000000, 0x00000000, 0x0000a2b8, 0x0000a2b8, 0x0000a2b8},
+ {0x0000aa54, 0x00000000, 0x00000000, 0x0000a2c4, 0x0000a2c4, 0x0000a2c4},
+ {0x0000aa58, 0x00000000, 0x00000000, 0x0000a708, 0x0000a708, 0x0000a708},
+ {0x0000aa5c, 0x00000000, 0x00000000, 0x0000a70c, 0x0000a70c, 0x0000a70c},
+ {0x0000aa60, 0x00000000, 0x00000000, 0x0000a710, 0x0000a710, 0x0000a710},
+ {0x0000aa64, 0x00000000, 0x00000000, 0x0000ab04, 0x0000ab04, 0x0000ab04},
+ {0x0000aa68, 0x00000000, 0x00000000, 0x0000ab08, 0x0000ab08, 0x0000ab08},
+ {0x0000aa6c, 0x00000000, 0x00000000, 0x0000ab0c, 0x0000ab0c, 0x0000ab0c},
+ {0x0000aa70, 0x00000000, 0x00000000, 0x0000ab10, 0x0000ab10, 0x0000ab10},
+ {0x0000aa74, 0x00000000, 0x00000000, 0x0000ab14, 0x0000ab14, 0x0000ab14},
+ {0x0000aa78, 0x00000000, 0x00000000, 0x0000ab18, 0x0000ab18, 0x0000ab18},
+ {0x0000aa7c, 0x00000000, 0x00000000, 0x0000ab8c, 0x0000ab8c, 0x0000ab8c},
+ {0x0000aa80, 0x00000000, 0x00000000, 0x0000ab90, 0x0000ab90, 0x0000ab90},
+ {0x0000aa84, 0x00000000, 0x00000000, 0x0000ab94, 0x0000ab94, 0x0000ab94},
+ {0x0000aa88, 0x00000000, 0x00000000, 0x0000ab98, 0x0000ab98, 0x0000ab98},
+ {0x0000aa8c, 0x00000000, 0x00000000, 0x0000aba4, 0x0000aba4, 0x0000aba4},
+ {0x0000aa90, 0x00000000, 0x00000000, 0x0000aba8, 0x0000aba8, 0x0000aba8},
+ {0x0000aa94, 0x00000000, 0x00000000, 0x0000cb04, 0x0000cb04, 0x0000cb04},
+ {0x0000aa98, 0x00000000, 0x00000000, 0x0000cb08, 0x0000cb08, 0x0000cb08},
+ {0x0000aa9c, 0x00000000, 0x00000000, 0x0000cb0c, 0x0000cb0c, 0x0000cb0c},
+ {0x0000aaa0, 0x00000000, 0x00000000, 0x0000cb10, 0x0000cb10, 0x0000cb10},
+ {0x0000aaa4, 0x00000000, 0x00000000, 0x0000cb14, 0x0000cb14, 0x0000cb14},
+ {0x0000aaa8, 0x00000000, 0x00000000, 0x0000cb18, 0x0000cb18, 0x0000cb18},
+ {0x0000aaac, 0x00000000, 0x00000000, 0x0000cb8c, 0x0000cb8c, 0x0000cb8c},
+ {0x0000aab0, 0x00000000, 0x00000000, 0x0000cb90, 0x0000cb90, 0x0000cb90},
+ {0x0000aab4, 0x00000000, 0x00000000, 0x0000cf18, 0x0000cf18, 0x0000cf18},
+ {0x0000aab8, 0x00000000, 0x00000000, 0x0000cf24, 0x0000cf24, 0x0000cf24},
+ {0x0000aabc, 0x00000000, 0x00000000, 0x0000cf28, 0x0000cf28, 0x0000cf28},
+ {0x0000aac0, 0x00000000, 0x00000000, 0x0000d314, 0x0000d314, 0x0000d314},
+ {0x0000aac4, 0x00000000, 0x00000000, 0x0000d318, 0x0000d318, 0x0000d318},
+ {0x0000aac8, 0x00000000, 0x00000000, 0x0000d38c, 0x0000d38c, 0x0000d38c},
+ {0x0000aacc, 0x00000000, 0x00000000, 0x0000d390, 0x0000d390, 0x0000d390},
+ {0x0000aad0, 0x00000000, 0x00000000, 0x0000d394, 0x0000d394, 0x0000d394},
+ {0x0000aad4, 0x00000000, 0x00000000, 0x0000d398, 0x0000d398, 0x0000d398},
+ {0x0000aad8, 0x00000000, 0x00000000, 0x0000d3a4, 0x0000d3a4, 0x0000d3a4},
+ {0x0000aadc, 0x00000000, 0x00000000, 0x0000d3a8, 0x0000d3a8, 0x0000d3a8},
+ {0x0000aae0, 0x00000000, 0x00000000, 0x0000d3ac, 0x0000d3ac, 0x0000d3ac},
+ {0x0000aae4, 0x00000000, 0x00000000, 0x0000d3b0, 0x0000d3b0, 0x0000d3b0},
+ {0x0000aae8, 0x00000000, 0x00000000, 0x0000f380, 0x0000f380, 0x0000f380},
+ {0x0000aaec, 0x00000000, 0x00000000, 0x0000f384, 0x0000f384, 0x0000f384},
+ {0x0000aaf0, 0x00000000, 0x00000000, 0x0000f388, 0x0000f388, 0x0000f388},
+ {0x0000aaf4, 0x00000000, 0x00000000, 0x0000f710, 0x0000f710, 0x0000f710},
+ {0x0000aaf8, 0x00000000, 0x00000000, 0x0000f714, 0x0000f714, 0x0000f714},
+ {0x0000aafc, 0x00000000, 0x00000000, 0x0000f718, 0x0000f718, 0x0000f718},
+ {0x0000ab00, 0x00000000, 0x00000000, 0x0000fb10, 0x0000fb10, 0x0000fb10},
+ {0x0000ab04, 0x00000000, 0x00000000, 0x0000fb14, 0x0000fb14, 0x0000fb14},
+ {0x0000ab08, 0x00000000, 0x00000000, 0x0000fb18, 0x0000fb18, 0x0000fb18},
+ {0x0000ab0c, 0x00000000, 0x00000000, 0x0000fb8c, 0x0000fb8c, 0x0000fb8c},
+ {0x0000ab10, 0x00000000, 0x00000000, 0x0000fb90, 0x0000fb90, 0x0000fb90},
+ {0x0000ab14, 0x00000000, 0x00000000, 0x0000fb94, 0x0000fb94, 0x0000fb94},
+ {0x0000ab18, 0x00000000, 0x00000000, 0x0000ff8c, 0x0000ff8c, 0x0000ff8c},
+ {0x0000ab1c, 0x00000000, 0x00000000, 0x0000ff90, 0x0000ff90, 0x0000ff90},
+ {0x0000ab20, 0x00000000, 0x00000000, 0x0000ff94, 0x0000ff94, 0x0000ff94},
+ {0x0000ab24, 0x00000000, 0x00000000, 0x0000ffa0, 0x0000ffa0, 0x0000ffa0},
+ {0x0000ab28, 0x00000000, 0x00000000, 0x0000ffa4, 0x0000ffa4, 0x0000ffa4},
+ {0x0000ab2c, 0x00000000, 0x00000000, 0x0000ffa8, 0x0000ffa8, 0x0000ffa8},
+ {0x0000ab30, 0x00000000, 0x00000000, 0x0000ffac, 0x0000ffac, 0x0000ffac},
+ {0x0000ab34, 0x00000000, 0x00000000, 0x0000ffb0, 0x0000ffb0, 0x0000ffb0},
+ {0x0000ab38, 0x00000000, 0x00000000, 0x0000ffb4, 0x0000ffb4, 0x0000ffb4},
+ {0x0000ab3c, 0x00000000, 0x00000000, 0x0000ffa1, 0x0000ffa1, 0x0000ffa1},
+ {0x0000ab40, 0x00000000, 0x00000000, 0x0000ffa5, 0x0000ffa5, 0x0000ffa5},
+ {0x0000ab44, 0x00000000, 0x00000000, 0x0000ffa9, 0x0000ffa9, 0x0000ffa9},
+ {0x0000ab48, 0x00000000, 0x00000000, 0x0000ffad, 0x0000ffad, 0x0000ffad},
+ {0x0000ab4c, 0x00000000, 0x00000000, 0x0000ffb1, 0x0000ffb1, 0x0000ffb1},
+ {0x0000ab50, 0x00000000, 0x00000000, 0x0000ffb5, 0x0000ffb5, 0x0000ffb5},
+ {0x0000ab54, 0x00000000, 0x00000000, 0x0000ffb9, 0x0000ffb9, 0x0000ffb9},
+ {0x0000ab58, 0x00000000, 0x00000000, 0x0000ffc5, 0x0000ffc5, 0x0000ffc5},
+ {0x0000ab5c, 0x00000000, 0x00000000, 0x0000ffc9, 0x0000ffc9, 0x0000ffc9},
+ {0x0000ab60, 0x00000000, 0x00000000, 0x0000ffcd, 0x0000ffcd, 0x0000ffcd},
+ {0x0000ab64, 0x00000000, 0x00000000, 0x0000ffd1, 0x0000ffd1, 0x0000ffd1},
+ {0x0000ab68, 0x00000000, 0x00000000, 0x0000ffd5, 0x0000ffd5, 0x0000ffd5},
+ {0x0000ab6c, 0x00000000, 0x00000000, 0x0000ffc2, 0x0000ffc2, 0x0000ffc2},
+ {0x0000ab70, 0x00000000, 0x00000000, 0x0000ffc6, 0x0000ffc6, 0x0000ffc6},
+ {0x0000ab74, 0x00000000, 0x00000000, 0x0000ffca, 0x0000ffca, 0x0000ffca},
+ {0x0000ab78, 0x00000000, 0x00000000, 0x0000ffce, 0x0000ffce, 0x0000ffce},
+ {0x0000ab7c, 0x00000000, 0x00000000, 0x0000ffd2, 0x0000ffd2, 0x0000ffd2},
+ {0x0000ab80, 0x00000000, 0x00000000, 0x0000ffd6, 0x0000ffd6, 0x0000ffd6},
+ {0x0000ab84, 0x00000000, 0x00000000, 0x0000ffda, 0x0000ffda, 0x0000ffda},
+ {0x0000ab88, 0x00000000, 0x00000000, 0x0000ffc7, 0x0000ffc7, 0x0000ffc7},
+ {0x0000ab8c, 0x00000000, 0x00000000, 0x0000ffcb, 0x0000ffcb, 0x0000ffcb},
+ {0x0000ab90, 0x00000000, 0x00000000, 0x0000ffcf, 0x0000ffcf, 0x0000ffcf},
+ {0x0000ab94, 0x00000000, 0x00000000, 0x0000ffd3, 0x0000ffd3, 0x0000ffd3},
+ {0x0000ab98, 0x00000000, 0x00000000, 0x0000ffd7, 0x0000ffd7, 0x0000ffd7},
+ {0x0000ab9c, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb},
+ {0x0000aba0, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb},
+ {0x0000aba4, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb},
+ {0x0000aba8, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb},
+ {0x0000abac, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb},
+ {0x0000abb0, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb},
+ {0x0000abb4, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb},
+ {0x0000abb8, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb},
+ {0x0000abbc, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb},
+ {0x0000abc0, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb},
+ {0x0000abc4, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb},
+ {0x0000abc8, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb},
+ {0x0000abcc, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb},
+ {0x0000abd0, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb},
+ {0x0000abd4, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb},
+ {0x0000abd8, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb},
+ {0x0000abdc, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb},
+ {0x0000abe0, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb},
+ {0x0000abe4, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb},
+ {0x0000abe8, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb},
+ {0x0000abec, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb},
+ {0x0000abf0, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb},
+ {0x0000abf4, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb},
+ {0x0000abf8, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb},
+ {0x0000abfc, 0x00000000, 0x00000000, 0x0000ffdb, 0x0000ffdb, 0x0000ffdb},
+ {0x00009848, 0x00000000, 0x00000000, 0x00001067, 0x00001067, 0x00001067},
+ {0x0000a848, 0x00000000, 0x00000000, 0x00001067, 0x00001067, 0x00001067},
+};
+
+static const uint32_t ar9287PciePhy_clkreq_always_on_L1_9287_1_1[][2] = {
+ /* Addr allmodes */
+ {0x00004040, 0x9248fd00},
+ {0x00004040, 0x24924924},
+ {0x00004040, 0xa8000019},
+ {0x00004040, 0x13160820},
+ {0x00004040, 0xe5980560},
+ {0x00004040, 0xc01dcffd},
+ {0x00004040, 0x1aaabe41},
+ {0x00004040, 0xbe105554},
+ {0x00004040, 0x00043007},
+ {0x00004044, 0x00000000},
+};
+
+static const uint32_t ar9287PciePhy_clkreq_off_L1_9287_1_1[][2] = {
+ /* Addr allmodes */
+ {0x00004040, 0x9248fd00},
+ {0x00004040, 0x24924924},
+ {0x00004040, 0xa8000019},
+ {0x00004040, 0x13160820},
+ {0x00004040, 0xe5980560},
+ {0x00004040, 0xc01dcffc},
+ {0x00004040, 0x1aaabe41},
+ {0x00004040, 0xbe105554},
+ {0x00004040, 0x00043007},
+ {0x00004044, 0x00000000},
+};
diff --git a/sys/dev/ath/ath_hal/ar9002/ar9287_attach.c b/sys/dev/ath/ath_hal/ar9002/ar9287_attach.c
new file mode 100644
index 0000000..9cbe0a5
--- /dev/null
+++ b/sys/dev/ath/ath_hal/ar9002/ar9287_attach.c
@@ -0,0 +1,476 @@
+/*
+ * Copyright (c) 2008-2009 Sam Leffler, Errno Consulting
+ * Copyright (c) 2008 Atheros Communications, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ *
+ * $FreeBSD$
+ */
+#include "opt_ah.h"
+
+#include "ah.h"
+#include "ah_internal.h"
+#include "ah_devid.h"
+
+#include "ah_eeprom_v14.h" /* XXX for tx/rx gain */
+#include "ah_eeprom_9287.h"
+
+#include "ar9002/ar9280.h"
+#include "ar9002/ar9287.h"
+#include "ar5416/ar5416reg.h"
+#include "ar5416/ar5416phy.h"
+
+#include "ar9002/ar9287_cal.h"
+#include "ar9002/ar9287_reset.h"
+#include "ar9002/ar9287_olc.h"
+
+#include "ar9002/ar9287.ini"
+
+static const HAL_PERCAL_DATA ar9287_iq_cal = { /* single sample */
+ .calName = "IQ", .calType = IQ_MISMATCH_CAL,
+ .calNumSamples = MIN_CAL_SAMPLES,
+ .calCountMax = PER_MAX_LOG_COUNT,
+ .calCollect = ar5416IQCalCollect,
+ .calPostProc = ar5416IQCalibration
+};
+static const HAL_PERCAL_DATA ar9287_adc_gain_cal = { /* single sample */
+ .calName = "ADC Gain", .calType = ADC_GAIN_CAL,
+ .calNumSamples = MIN_CAL_SAMPLES,
+ .calCountMax = PER_MIN_LOG_COUNT,
+ .calCollect = ar5416AdcGainCalCollect,
+ .calPostProc = ar5416AdcGainCalibration
+};
+static const HAL_PERCAL_DATA ar9287_adc_dc_cal = { /* single sample */
+ .calName = "ADC DC", .calType = ADC_DC_CAL,
+ .calNumSamples = MIN_CAL_SAMPLES,
+ .calCountMax = PER_MIN_LOG_COUNT,
+ .calCollect = ar5416AdcDcCalCollect,
+ .calPostProc = ar5416AdcDcCalibration
+};
+static const HAL_PERCAL_DATA ar9287_adc_init_dc_cal = {
+ .calName = "ADC Init DC", .calType = ADC_DC_INIT_CAL,
+ .calNumSamples = MIN_CAL_SAMPLES,
+ .calCountMax = INIT_LOG_COUNT,
+ .calCollect = ar5416AdcDcCalCollect,
+ .calPostProc = ar5416AdcDcCalibration
+};
+
+static void ar9287ConfigPCIE(struct ath_hal *ah, HAL_BOOL restore);
+static HAL_BOOL ar9287FillCapabilityInfo(struct ath_hal *ah);
+static void ar9287WriteIni(struct ath_hal *ah,
+ const struct ieee80211_channel *chan);
+
+static void
+ar9287AniSetup(struct ath_hal *ah)
+{
+ /*
+ * These are the parameters from the AR5416 ANI code;
+ * they likely need quite a bit of adjustment for the
+ * AR9280.
+ */
+ static const struct ar5212AniParams aniparams = {
+ .maxNoiseImmunityLevel = 4, /* levels 0..4 */
+ .totalSizeDesired = { -55, -55, -55, -55, -62 },
+ .coarseHigh = { -14, -14, -14, -14, -12 },
+ .coarseLow = { -64, -64, -64, -64, -70 },
+ .firpwr = { -78, -78, -78, -78, -80 },
+ .maxSpurImmunityLevel = 2,
+ .cycPwrThr1 = { 2, 4, 6 },
+ .maxFirstepLevel = 2, /* levels 0..2 */
+ .firstep = { 0, 4, 8 },
+ .ofdmTrigHigh = 500,
+ .ofdmTrigLow = 200,
+ .cckTrigHigh = 200,
+ .cckTrigLow = 100,
+ .rssiThrHigh = 40,
+ .rssiThrLow = 7,
+ .period = 100,
+ };
+ /* NB: disable ANI noise immmunity for reliable RIFS rx */
+ AH5416(ah)->ah_ani_function &= ~ HAL_ANI_NOISE_IMMUNITY_LEVEL;
+
+ /* NB: ANI is not enabled yet */
+ ar5416AniAttach(ah, &aniparams, &aniparams, AH_TRUE);
+}
+
+/*
+ * Attach for an AR9287 part.
+ */
+static struct ath_hal *
+ar9287Attach(uint16_t devid, HAL_SOFTC sc,
+ HAL_BUS_TAG st, HAL_BUS_HANDLE sh, uint16_t *eepromdata,
+ HAL_STATUS *status)
+{
+ struct ath_hal_9287 *ahp9287;
+ struct ath_hal_5212 *ahp;
+ struct ath_hal *ah;
+ uint32_t val;
+ HAL_STATUS ecode;
+ HAL_BOOL rfStatus;
+ int8_t pwr_table_offset;
+
+ HALDEBUG(AH_NULL, HAL_DEBUG_ATTACH, "%s: sc %p st %p sh %p\n",
+ __func__, sc, (void*) st, (void*) sh);
+
+ /* NB: memory is returned zero'd */
+ ahp9287 = ath_hal_malloc(sizeof (struct ath_hal_9287));
+ if (ahp9287 == AH_NULL) {
+ HALDEBUG(AH_NULL, HAL_DEBUG_ANY,
+ "%s: cannot allocate memory for state block\n", __func__);
+ *status = HAL_ENOMEM;
+ return AH_NULL;
+ }
+ ahp = AH5212(ahp9287);
+ ah = &ahp->ah_priv.h;
+
+ ar5416InitState(AH5416(ah), devid, sc, st, sh, status);
+
+ /* XXX override with 9280 specific state */
+ /* override 5416 methods for our needs */
+ ah->ah_setAntennaSwitch = ar9287SetAntennaSwitch;
+ ah->ah_configPCIE = ar9287ConfigPCIE;
+
+ AH5416(ah)->ah_cal.iqCalData.calData = &ar9287_iq_cal;
+ AH5416(ah)->ah_cal.adcGainCalData.calData = &ar9287_adc_gain_cal;
+ AH5416(ah)->ah_cal.adcDcCalData.calData = &ar9287_adc_dc_cal;
+ AH5416(ah)->ah_cal.adcDcCalInitData.calData = &ar9287_adc_init_dc_cal;
+ /* Better performance without ADC Gain Calibration */
+ AH5416(ah)->ah_cal.suppCals = ADC_DC_CAL | IQ_MISMATCH_CAL;
+
+ AH5416(ah)->ah_spurMitigate = ar9280SpurMitigate;
+ AH5416(ah)->ah_writeIni = ar9287WriteIni;
+
+ ah->ah_setTxPower = ar9287SetTransmitPower;
+ ah->ah_setBoardValues = ar9287SetBoardValues;
+
+ AH5416(ah)->ah_olcInit = ar9287olcInit;
+ AH5416(ah)->ah_olcTempCompensation = ar9287olcTemperatureCompensation;
+ //AH5416(ah)->ah_setPowerCalTable = ar9287SetPowerCalTable;
+ AH5416(ah)->ah_cal_initcal = ar9287InitCalHardware;
+ AH5416(ah)->ah_cal_pacal = ar9287PACal;
+
+ /* XXX NF calibration */
+ /* XXX Ini override? (IFS vars - since the kiwi mac clock is faster?) */
+ /* XXX what else is kiwi-specific in the radio/calibration pathway? */
+
+ AH5416(ah)->ah_rx_chainmask = AR9287_DEFAULT_RXCHAINMASK;
+ AH5416(ah)->ah_tx_chainmask = AR9287_DEFAULT_TXCHAINMASK;
+
+ if (!ar5416SetResetReg(ah, HAL_RESET_POWER_ON)) {
+ /* reset chip */
+ HALDEBUG(ah, HAL_DEBUG_ANY, "%s: couldn't reset chip\n",
+ __func__);
+ ecode = HAL_EIO;
+ goto bad;
+ }
+
+ if (!ar5416SetPowerMode(ah, HAL_PM_AWAKE, AH_TRUE)) {
+ HALDEBUG(ah, HAL_DEBUG_ANY, "%s: couldn't wakeup chip\n",
+ __func__);
+ ecode = HAL_EIO;
+ goto bad;
+ }
+ /* Read Revisions from Chips before taking out of reset */
+ val = OS_REG_READ(ah, AR_SREV);
+ HALDEBUG(ah, HAL_DEBUG_ATTACH,
+ "%s: ID 0x%x VERSION 0x%x TYPE 0x%x REVISION 0x%x\n",
+ __func__, MS(val, AR_XSREV_ID), MS(val, AR_XSREV_VERSION),
+ MS(val, AR_XSREV_TYPE), MS(val, AR_XSREV_REVISION));
+ /* NB: include chip type to differentiate from pre-Sowl versions */
+ AH_PRIVATE(ah)->ah_macVersion =
+ (val & AR_XSREV_VERSION) >> AR_XSREV_TYPE_S;
+ AH_PRIVATE(ah)->ah_macRev = MS(val, AR_XSREV_REVISION);
+ AH_PRIVATE(ah)->ah_ispcie = (val & AR_XSREV_TYPE_HOST_MODE) == 0;
+
+ /* Don't support Kiwi < 1.2; those are pre-release chips */
+ if (! AR_SREV_KIWI_12_OR_LATER(ah)) {
+ ath_hal_printf(ah, "[ath]: Kiwi < 1.2 is not supported\n");
+ ecode = HAL_EIO;
+ goto bad;
+ }
+
+ /* setup common ini data; rf backends handle remainder */
+ HAL_INI_INIT(&ahp->ah_ini_modes, ar9287Modes_9287_1_1, 6);
+ HAL_INI_INIT(&ahp->ah_ini_common, ar9287Common_9287_1_1, 2);
+
+ /* If pcie_clock_req */
+ HAL_INI_INIT(&AH5416(ah)->ah_ini_pcieserdes,
+ ar9287PciePhy_clkreq_always_on_L1_9287_1_1, 2);
+
+ /* XXX WoW ini values */
+
+ /* Else */
+#if 0
+ HAL_INI_INIT(&AH5416(ah)->ah_ini_pcieserdes,
+ ar9287PciePhy_clkreq_off_L1_9287_1_1, 2);
+#endif
+
+ /* Initialise Japan arrays */
+ HAL_INI_INIT(&ahp9287->ah_ini_cckFirNormal,
+ ar9287Common_normal_cck_fir_coeff_9287_1_1, 2);
+ HAL_INI_INIT(&ahp9287->ah_ini_cckFirJapan2484,
+ ar9287Common_japan_2484_cck_fir_coeff_9287_1_1, 2);
+
+ ar5416AttachPCIE(ah);
+
+ ecode = ath_hal_9287EepromAttach(ah);
+ if (ecode != HAL_OK)
+ goto bad;
+
+ if (!ar5416ChipReset(ah, AH_NULL)) { /* reset chip */
+ HALDEBUG(ah, HAL_DEBUG_ANY, "%s: chip reset failed\n", __func__);
+ ecode = HAL_EIO;
+ goto bad;
+ }
+
+ AH_PRIVATE(ah)->ah_phyRev = OS_REG_READ(ah, AR_PHY_CHIP_ID);
+
+ if (!ar5212ChipTest(ah)) {
+ HALDEBUG(ah, HAL_DEBUG_ANY, "%s: hardware self-test failed\n",
+ __func__);
+ ecode = HAL_ESELFTEST;
+ goto bad;
+ }
+
+ /*
+ * Set correct Baseband to analog shift
+ * setting to access analog chips.
+ */
+ OS_REG_WRITE(ah, AR_PHY(0), 0x00000007);
+
+ /* Read Radio Chip Rev Extract */
+ AH_PRIVATE(ah)->ah_analog5GhzRev = ar5416GetRadioRev(ah);
+ switch (AH_PRIVATE(ah)->ah_analog5GhzRev & AR_RADIO_SREV_MAJOR) {
+ case AR_RAD2133_SREV_MAJOR: /* Sowl: 2G/3x3 */
+ case AR_RAD5133_SREV_MAJOR: /* Sowl: 2+5G/3x3 */
+ break;
+ default:
+ if (AH_PRIVATE(ah)->ah_analog5GhzRev == 0) {
+ AH_PRIVATE(ah)->ah_analog5GhzRev =
+ AR_RAD5133_SREV_MAJOR;
+ break;
+ }
+#ifdef AH_DEBUG
+ HALDEBUG(ah, HAL_DEBUG_ANY,
+ "%s: 5G Radio Chip Rev 0x%02X is not supported by "
+ "this driver\n", __func__,
+ AH_PRIVATE(ah)->ah_analog5GhzRev);
+ ecode = HAL_ENOTSUPP;
+ goto bad;
+#endif
+ }
+ rfStatus = ar9287RfAttach(ah, &ecode);
+ if (!rfStatus) {
+ HALDEBUG(ah, HAL_DEBUG_ANY, "%s: RF setup failed, status %u\n",
+ __func__, ecode);
+ goto bad;
+ }
+
+ /*
+ * We only implement open-loop TX power control
+ * for the AR9287 in this codebase.
+ */
+ if (! ath_hal_eepromGetFlag(ah, AR_EEP_OL_PWRCTRL)) {
+ ath_hal_printf(ah, "[ath] AR9287 w/ closed-loop TX power control"
+ " isn't supported.\n");
+ ecode = HAL_ENOTSUPP;
+ goto bad;
+ }
+
+ /*
+ * Check whether the power table offset isn't the default.
+ * This can occur with eeprom minor V21 or greater on Merlin.
+ */
+ (void) ath_hal_eepromGet(ah, AR_EEP_PWR_TABLE_OFFSET, &pwr_table_offset);
+ if (pwr_table_offset != AR5416_PWR_TABLE_OFFSET_DB)
+ ath_hal_printf(ah, "[ath]: default pwr offset: %d dBm != EEPROM pwr offset: %d dBm; curves will be adjusted.\n",
+ AR5416_PWR_TABLE_OFFSET_DB, (int) pwr_table_offset);
+
+ /* setup rxgain table */
+ HAL_INI_INIT(&ahp9287->ah_ini_rxgain, ar9287Modes_rx_gain_9287_1_1, 6);
+
+ /* setup txgain table */
+ HAL_INI_INIT(&ahp9287->ah_ini_txgain, ar9287Modes_tx_gain_9287_1_1, 6);
+
+ /*
+ * Got everything we need now to setup the capabilities.
+ */
+ if (!ar9287FillCapabilityInfo(ah)) {
+ ecode = HAL_EEREAD;
+ goto bad;
+ }
+
+ ecode = ath_hal_eepromGet(ah, AR_EEP_MACADDR, ahp->ah_macaddr);
+ if (ecode != HAL_OK) {
+ HALDEBUG(ah, HAL_DEBUG_ANY,
+ "%s: error getting mac address from EEPROM\n", __func__);
+ goto bad;
+ }
+ /* XXX How about the serial number ? */
+ /* Read Reg Domain */
+ AH_PRIVATE(ah)->ah_currentRD =
+ ath_hal_eepromGet(ah, AR_EEP_REGDMN_0, AH_NULL);
+
+ /*
+ * ah_miscMode is populated by ar5416FillCapabilityInfo()
+ * starting from griffin. Set here to make sure that
+ * AR_MISC_MODE_MIC_NEW_LOC_ENABLE is set before a GTK is
+ * placed into hardware.
+ */
+ if (ahp->ah_miscMode != 0)
+ OS_REG_WRITE(ah, AR_MISC_MODE, OS_REG_READ(ah, AR_MISC_MODE) | ahp->ah_miscMode);
+
+ ar9287AniSetup(ah); /* Anti Noise Immunity */
+
+ /* Setup noise floor min/max/nominal values */
+ AH5416(ah)->nf_2g.max = AR_PHY_CCA_MAX_GOOD_VAL_9287_2GHZ;
+ AH5416(ah)->nf_2g.min = AR_PHY_CCA_MIN_GOOD_VAL_9287_2GHZ;
+ AH5416(ah)->nf_2g.nominal = AR_PHY_CCA_NOM_VAL_9287_2GHZ;
+ AH5416(ah)->nf_5g.max = AR_PHY_CCA_MAX_GOOD_VAL_9287_5GHZ;
+ AH5416(ah)->nf_5g.min = AR_PHY_CCA_MIN_GOOD_VAL_9287_5GHZ;
+ AH5416(ah)->nf_5g.nominal = AR_PHY_CCA_NOM_VAL_9287_5GHZ;
+
+ ar5416InitNfHistBuff(AH5416(ah)->ah_cal.nfCalHist);
+
+ HALDEBUG(ah, HAL_DEBUG_ATTACH, "%s: return\n", __func__);
+
+ return ah;
+bad:
+ if (ah != AH_NULL)
+ ah->ah_detach(ah);
+ if (status)
+ *status = ecode;
+ return AH_NULL;
+}
+
+static void
+ar9287ConfigPCIE(struct ath_hal *ah, HAL_BOOL restore)
+{
+ if (AH_PRIVATE(ah)->ah_ispcie && !restore) {
+ ath_hal_ini_write(ah, &AH5416(ah)->ah_ini_pcieserdes, 1, 0);
+ OS_DELAY(1000);
+ OS_REG_SET_BIT(ah, AR_PCIE_PM_CTRL, AR_PCIE_PM_CTRL_ENA);
+ OS_REG_WRITE(ah, AR_WA, AR9285_WA_DEFAULT); /* Yes, Kiwi uses the Kite PCIe PHY WA */
+ }
+}
+
+static void
+ar9287WriteIni(struct ath_hal *ah, const struct ieee80211_channel *chan)
+{
+ u_int modesIndex, freqIndex;
+ int regWrites = 0;
+
+ /* Setup the indices for the next set of register array writes */
+ /* XXX Ignore 11n dynamic mode on the AR5416 for the moment */
+ if (IEEE80211_IS_CHAN_2GHZ(chan)) {
+ freqIndex = 2;
+ if (IEEE80211_IS_CHAN_HT40(chan))
+ modesIndex = 3;
+ else if (IEEE80211_IS_CHAN_108G(chan))
+ modesIndex = 5;
+ else
+ modesIndex = 4;
+ } else {
+ freqIndex = 1;
+ if (IEEE80211_IS_CHAN_HT40(chan) ||
+ IEEE80211_IS_CHAN_TURBO(chan))
+ modesIndex = 2;
+ else
+ modesIndex = 1;
+ }
+
+ /* Set correct Baseband to analog shift setting to access analog chips. */
+ OS_REG_WRITE(ah, AR_PHY(0), 0x00000007);
+ OS_REG_WRITE(ah, AR_PHY_ADC_SERIAL_CTL, AR_PHY_SEL_INTERNAL_ADDAC);
+
+ regWrites = ath_hal_ini_write(ah, &AH5212(ah)->ah_ini_modes, modesIndex, regWrites);
+ regWrites = ath_hal_ini_write(ah, &AH9287(ah)->ah_ini_rxgain, modesIndex, regWrites);
+ regWrites = ath_hal_ini_write(ah, &AH9287(ah)->ah_ini_txgain, modesIndex, regWrites);
+ regWrites = ath_hal_ini_write(ah, &AH5212(ah)->ah_ini_common, 1, regWrites);
+}
+
+#define AR_BASE_FREQ_2GHZ 2300
+#define AR_BASE_FREQ_5GHZ 4900
+#define AR_SPUR_FEEQ_BOUND_HT40 19
+#define AR_SPUR_FEEQ_BOUND_HT20 10
+
+
+
+/*
+ * Fill all software cached or static hardware state information.
+ * Return failure if capabilities are to come from EEPROM and
+ * cannot be read.
+ */
+static HAL_BOOL
+ar9287FillCapabilityInfo(struct ath_hal *ah)
+{
+ HAL_CAPABILITIES *pCap = &AH_PRIVATE(ah)->ah_caps;
+
+ if (!ar5416FillCapabilityInfo(ah))
+ return AH_FALSE;
+ pCap->halNumGpioPins = 10;
+ pCap->halWowSupport = AH_TRUE;
+ pCap->halWowMatchPatternExact = AH_TRUE;
+#if 0
+ pCap->halWowMatchPatternDword = AH_TRUE;
+#endif
+
+ pCap->halCSTSupport = AH_TRUE;
+ pCap->halRifsRxSupport = AH_TRUE;
+ pCap->halRifsTxSupport = AH_TRUE;
+ pCap->halRtsAggrLimit = 64*1024; /* 802.11n max */
+ pCap->halExtChanDfsSupport = AH_TRUE;
+ pCap->halUseCombinedRadarRssi = AH_TRUE;
+#if 0
+ /* XXX bluetooth */
+ pCap->halBtCoexSupport = AH_TRUE;
+#endif
+ pCap->halAutoSleepSupport = AH_FALSE; /* XXX? */
+ pCap->hal4kbSplitTransSupport = AH_FALSE;
+ /* Disable this so Block-ACK works correctly */
+ pCap->halHasRxSelfLinkedTail = AH_FALSE;
+ pCap->halPSPollBroken = AH_FALSE;
+
+ /* Hardware supports (at least) single-stream STBC TX/RX */
+ pCap->halRxStbcSupport = 1;
+ pCap->halTxStbcSupport = 1;
+
+ /* Hardware supports short-GI w/ 20MHz */
+ pCap->halHTSGI20Support = 1;
+
+ pCap->halEnhancedDfsSupport = AH_TRUE;
+
+ return AH_TRUE;
+}
+
+/*
+ * This has been disabled - having the HAL flip chainmasks on/off
+ * when attempting to implement 11n disrupts things. For now, just
+ * leave this flipped off and worry about implementing TX diversity
+ * for legacy and MCS0-7 when 11n is fully functioning.
+ */
+HAL_BOOL
+ar9287SetAntennaSwitch(struct ath_hal *ah, HAL_ANT_SETTING settings)
+{
+ return AH_TRUE;
+}
+
+static const char*
+ar9287Probe(uint16_t vendorid, uint16_t devid)
+{
+ if (vendorid == ATHEROS_VENDOR_ID &&
+ (devid == AR9287_DEVID_PCI || devid == AR9287_DEVID_PCIE))
+ return "Atheros 9287";
+ return AH_NULL;
+}
+AH_CHIP(AR9287, ar9287Probe, ar9287Attach);
diff --git a/sys/dev/ath/ath_hal/ar9002/ar9287_cal.c b/sys/dev/ath/ath_hal/ar9002/ar9287_cal.c
new file mode 100644
index 0000000..d5024b0
--- /dev/null
+++ b/sys/dev/ath/ath_hal/ar9002/ar9287_cal.c
@@ -0,0 +1,73 @@
+/*
+ * Copyright (c) 2008-2010 Atheros Communications Inc.
+ * Copyright (c) 2011 Adrian Chadd, Xenion Pty Ltd.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+#include "opt_ah.h"
+#include "ah.h"
+#include "ah_internal.h"
+
+#include "ah_eeprom_v4k.h"
+
+#include "ar9002/ar9285.h"
+#include "ar5416/ar5416reg.h"
+#include "ar5416/ar5416phy.h"
+#include "ar9002/ar9002phy.h"
+//#include "ar9002/ar9287phy.h"
+
+#include "ar9002/ar9287_cal.h"
+
+
+void
+ar9287PACal(struct ath_hal *ah, HAL_BOOL is_reset)
+{
+ /* XXX not required */
+}
+
+/*
+ * This is like Merlin but without ADC disable
+ */
+HAL_BOOL
+ar9287InitCalHardware(struct ath_hal *ah, const struct ieee80211_channel *chan)
+{
+ OS_REG_SET_BIT(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_FLTR_CAL);
+
+ /* Calibrate the AGC */
+ OS_REG_WRITE(ah, AR_PHY_AGC_CONTROL,
+ OS_REG_READ(ah, AR_PHY_AGC_CONTROL) | AR_PHY_AGC_CONTROL_CAL);
+
+ /* Poll for offset calibration complete */
+ if (!ath_hal_wait(ah, AR_PHY_AGC_CONTROL,
+ AR_PHY_AGC_CONTROL_CAL, 0)) {
+ HALDEBUG(ah, HAL_DEBUG_RESET,
+ "%s: offset calibration failed to complete in 1ms; "
+ "noisy environment?\n", __func__);
+ return AH_FALSE;
+ }
+
+ OS_REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_FLTR_CAL);
+
+ return AH_TRUE;
+}
diff --git a/sys/dev/ath/ath_hal/ar9002/ar9287_cal.h b/sys/dev/ath/ath_hal/ar9002/ar9287_cal.h
new file mode 100644
index 0000000..1a7cda2
--- /dev/null
+++ b/sys/dev/ath/ath_hal/ar9002/ar9287_cal.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright (c) 2008-2010 Atheros Communications Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+#ifndef __AR9287_CAL_H__
+#define __AR9287_CAL_H__
+
+extern void ar9287PACal(struct ath_hal *ah, HAL_BOOL is_reset);
+extern HAL_BOOL ar9287InitCalHardware(struct ath_hal *ah, const struct ieee80211_channel *chan);
+
+#endif /* __AR9287_CAL_H__ */
diff --git a/sys/dev/ath/ath_hal/ar9002/ar9287_olc.c b/sys/dev/ath/ath_hal/ar9002/ar9287_olc.c
new file mode 100644
index 0000000..cbbe017
--- /dev/null
+++ b/sys/dev/ath/ath_hal/ar9002/ar9287_olc.c
@@ -0,0 +1,171 @@
+/*
+ * Copyright (c) 2011 Adrian Chadd, Xenion Pty Ltd.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+#include "opt_ah.h"
+
+#include "ah.h"
+#include "ah_internal.h"
+
+#include "ah_eeprom_v14.h"
+#include "ah_eeprom_9287.h"
+
+#include "ar9002/ar9280.h"
+#include "ar5416/ar5416reg.h"
+#include "ar5416/ar5416phy.h"
+#include "ar9002/ar9002phy.h"
+
+#include "ar9002/ar9287phy.h"
+#include "ar9002/ar9287an.h"
+#include "ar9002/ar9287_olc.h"
+
+void
+ar9287olcInit(struct ath_hal *ah)
+{
+ OS_REG_SET_BIT(ah, AR_PHY_TX_PWRCTRL9,
+ AR_PHY_TX_PWRCTRL9_RES_DC_REMOVAL);
+ OS_A_REG_RMW_FIELD(ah, AR9287_AN_TXPC0,
+ AR9287_AN_TXPC0_TXPCMODE,
+ AR9287_AN_TXPC0_TXPCMODE_TEMPSENSE);
+ OS_DELAY(100);
+}
+
+/*
+ * Run temperature compensation calibration.
+ *
+ * The TX gain table is adjusted depending upon the difference
+ * between the initial PDADC value and the currently read
+ * average TX power sample value. This value is only valid if
+ * frames have been transmitted, so currPDADC will be 0 if
+ * no frames have yet been transmitted.
+ */
+void
+ar9287olcTemperatureCompensation(struct ath_hal *ah)
+{
+ uint32_t rddata;
+ int32_t delta, currPDADC, slope;
+
+ rddata = OS_REG_READ(ah, AR_PHY_TX_PWRCTRL4);
+ currPDADC = MS(rddata, AR_PHY_TX_PWRCTRL_PD_AVG_OUT);
+
+ HALDEBUG(ah, HAL_DEBUG_PERCAL, "%s: initPDADC=%d, currPDADC=%d\n",
+ __func__, AH5416(ah)->initPDADC, currPDADC);
+
+ if (AH5416(ah)->initPDADC == 0 || currPDADC == 0) {
+ /*
+ * Zero value indicates that no frames have been transmitted
+ * yet, can't do temperature compensation until frames are
+ * transmitted.
+ */
+ return;
+ } else {
+ int8_t val;
+ (void) (ath_hal_eepromGet(ah, AR_EEP_TEMPSENSE_SLOPE, &val));
+ slope = val;
+
+ if (slope == 0) { /* to avoid divide by zero case */
+ delta = 0;
+ } else {
+ delta = ((currPDADC - AH5416(ah)->initPDADC)*4) / slope;
+ }
+ OS_REG_RMW_FIELD(ah, AR_PHY_CH0_TX_PWRCTRL11,
+ AR_PHY_TX_PWRCTRL_OLPC_TEMP_COMP, delta);
+ OS_REG_RMW_FIELD(ah, AR_PHY_CH1_TX_PWRCTRL11,
+ AR_PHY_TX_PWRCTRL_OLPC_TEMP_COMP, delta);
+
+ HALDEBUG(ah, HAL_DEBUG_PERCAL, "%s: delta=%d\n", __func__, delta);
+ }
+}
+
+void
+ar9287olcGetTxGainIndex(struct ath_hal *ah,
+ const struct ieee80211_channel *chan,
+ struct cal_data_op_loop_ar9287 *pRawDatasetOpLoop,
+ uint8_t *pCalChans, uint16_t availPiers, int8_t *pPwr)
+{
+ uint16_t idxL = 0, idxR = 0, numPiers;
+ HAL_BOOL match;
+ CHAN_CENTERS centers;
+
+ ar5416GetChannelCenters(ah, chan, &centers);
+
+ for (numPiers = 0; numPiers < availPiers; numPiers++) {
+ if (pCalChans[numPiers] == AR5416_BCHAN_UNUSED)
+ break;
+ }
+
+ match = ath_ee_getLowerUpperIndex(
+ (uint8_t)FREQ2FBIN(centers.synth_center, IEEE80211_IS_CHAN_2GHZ(chan)),
+ pCalChans, numPiers, &idxL, &idxR);
+
+ if (match) {
+ *pPwr = (int8_t) pRawDatasetOpLoop[idxL].pwrPdg[0][0];
+ } else {
+ *pPwr = ((int8_t) pRawDatasetOpLoop[idxL].pwrPdg[0][0] +
+ (int8_t) pRawDatasetOpLoop[idxR].pwrPdg[0][0])/2;
+ }
+}
+
+void
+ar9287olcSetPDADCs(struct ath_hal *ah, int32_t txPower,
+ uint16_t chain)
+{
+ uint32_t tmpVal;
+ uint32_t a;
+
+ /* Enable OLPC for chain 0 */
+
+ tmpVal = OS_REG_READ(ah, 0xa270);
+ tmpVal = tmpVal & 0xFCFFFFFF;
+ tmpVal = tmpVal | (0x3 << 24);
+ OS_REG_WRITE(ah, 0xa270, tmpVal);
+
+ /* Enable OLPC for chain 1 */
+
+ tmpVal = OS_REG_READ(ah, 0xb270);
+ tmpVal = tmpVal & 0xFCFFFFFF;
+ tmpVal = tmpVal | (0x3 << 24);
+ OS_REG_WRITE(ah, 0xb270, tmpVal);
+
+ /* Write the OLPC ref power for chain 0 */
+
+ if (chain == 0) {
+ tmpVal = OS_REG_READ(ah, 0xa398);
+ tmpVal = tmpVal & 0xff00ffff;
+ a = (txPower)&0xff;
+ tmpVal = tmpVal | (a << 16);
+ OS_REG_WRITE(ah, 0xa398, tmpVal);
+ }
+
+ /* Write the OLPC ref power for chain 1 */
+
+ if (chain == 1) {
+ tmpVal = OS_REG_READ(ah, 0xb398);
+ tmpVal = tmpVal & 0xff00ffff;
+ a = (txPower)&0xff;
+ tmpVal = tmpVal | (a << 16);
+ OS_REG_WRITE(ah, 0xb398, tmpVal);
+ }
+}
diff --git a/sys/dev/ath/ath_hal/ar9002/ar9287_olc.h b/sys/dev/ath/ath_hal/ar9002/ar9287_olc.h
new file mode 100644
index 0000000..ff21ce6
--- /dev/null
+++ b/sys/dev/ath/ath_hal/ar9002/ar9287_olc.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2010 Atheros Communications, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef __AR9287_OLC_H__
+#define __AR9287_OLC_H__
+
+extern void ar9287olcInit(struct ath_hal *ah);
+extern void ar9287olcTemperatureCompensation(struct ath_hal *ah);
+extern void ar9287olcGetTxGainIndex(struct ath_hal *ah,
+ const struct ieee80211_channel *chan,
+ struct cal_data_op_loop_ar9287 *pRawDatasetOpLoop,
+ uint8_t *pCalChans, uint16_t availPiers, int8_t *pPwr);
+extern void ar9287olcSetPDADCs(struct ath_hal *ah,
+ int32_t txPower, uint16_t chain);
+
+#endif /* __AR9287_OLC_H__ */
diff --git a/sys/dev/ath/ath_hal/ar9002/ar9287_reset.c b/sys/dev/ath/ath_hal/ar9002/ar9287_reset.c
new file mode 100644
index 0000000..a799ba0
--- /dev/null
+++ b/sys/dev/ath/ath_hal/ar9002/ar9287_reset.c
@@ -0,0 +1,571 @@
+/*
+ * Copyright (c) 2002-2009 Sam Leffler, Errno Consulting
+ * Copyright (c) 2002-2008 Atheros Communications, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ *
+ * $FreeBSD$
+ */
+
+#include "opt_ah.h"
+
+#include "ah.h"
+#include "ah_internal.h"
+#include "ah_devid.h"
+
+#include "ah_eeprom_v14.h"
+#include "ah_eeprom_9287.h"
+
+#include "ar5416/ar5416.h"
+#include "ar5416/ar5416reg.h"
+#include "ar5416/ar5416phy.h"
+
+#include "ar9002/ar9287phy.h"
+#include "ar9002/ar9287an.h"
+
+#include "ar9002/ar9287_olc.h"
+#include "ar9002/ar9287_reset.h"
+
+/*
+ * Set the TX power calibration table per-chain.
+ *
+ * This only supports open-loop TX power control for the AR9287.
+ */
+static void
+ar9287SetPowerCalTable(struct ath_hal *ah,
+ const struct ieee80211_channel *chan, int16_t *pTxPowerIndexOffset)
+{
+ struct cal_data_op_loop_ar9287 *pRawDatasetOpenLoop;
+ uint8_t *pCalBChans = NULL;
+ uint16_t pdGainOverlap_t2;
+ uint16_t numPiers = 0, i;
+ uint16_t numXpdGain, xpdMask;
+ uint16_t xpdGainValues[AR5416_NUM_PD_GAINS] = {0, 0, 0, 0};
+ uint32_t regChainOffset;
+ HAL_EEPROM_9287 *ee = AH_PRIVATE(ah)->ah_eeprom;
+ struct ar9287_eeprom *pEepData = &ee->ee_base;
+
+ xpdMask = pEepData->modalHeader.xpdGain;
+
+ if ((pEepData->baseEepHeader.version & AR9287_EEP_VER_MINOR_MASK) >=
+ AR9287_EEP_MINOR_VER_2)
+ pdGainOverlap_t2 = pEepData->modalHeader.pdGainOverlap;
+ else
+ pdGainOverlap_t2 = (uint16_t)(MS(OS_REG_READ(ah, AR_PHY_TPCRG5),
+ AR_PHY_TPCRG5_PD_GAIN_OVERLAP));
+
+ /* Note: Kiwi should only be 2ghz.. */
+ if (IEEE80211_IS_CHAN_2GHZ(chan)) {
+ pCalBChans = pEepData->calFreqPier2G;
+ numPiers = AR9287_NUM_2G_CAL_PIERS;
+ pRawDatasetOpenLoop = (struct cal_data_op_loop_ar9287 *)pEepData->calPierData2G[0];
+ AH5416(ah)->initPDADC = pRawDatasetOpenLoop->vpdPdg[0][0];
+ }
+ numXpdGain = 0;
+
+ /* Calculate the value of xpdgains from the xpdGain Mask */
+ for (i = 1; i <= AR5416_PD_GAINS_IN_MASK; i++) {
+ if ((xpdMask >> (AR5416_PD_GAINS_IN_MASK - i)) & 1) {
+ if (numXpdGain >= AR5416_NUM_PD_GAINS)
+ break;
+ xpdGainValues[numXpdGain] =
+ (uint16_t)(AR5416_PD_GAINS_IN_MASK-i);
+ numXpdGain++;
+ }
+ }
+
+ OS_REG_RMW_FIELD(ah, AR_PHY_TPCRG1, AR_PHY_TPCRG1_NUM_PD_GAIN,
+ (numXpdGain - 1) & 0x3);
+ OS_REG_RMW_FIELD(ah, AR_PHY_TPCRG1, AR_PHY_TPCRG1_PD_GAIN_1,
+ xpdGainValues[0]);
+ OS_REG_RMW_FIELD(ah, AR_PHY_TPCRG1, AR_PHY_TPCRG1_PD_GAIN_2,
+ xpdGainValues[1]);
+ OS_REG_RMW_FIELD(ah, AR_PHY_TPCRG1, AR_PHY_TPCRG1_PD_GAIN_3,
+ xpdGainValues[2]);
+
+ for (i = 0; i < AR9287_MAX_CHAINS; i++) {
+ regChainOffset = i * 0x1000;
+
+ if (pEepData->baseEepHeader.txMask & (1 << i)) {
+ int8_t txPower;
+ pRawDatasetOpenLoop =
+ (struct cal_data_op_loop_ar9287 *)pEepData->calPierData2G[i];
+ ar9287olcGetTxGainIndex(ah, chan,
+ pRawDatasetOpenLoop,
+ pCalBChans, numPiers,
+ &txPower);
+ ar9287olcSetPDADCs(ah, txPower, i);
+ }
+ }
+
+ *pTxPowerIndexOffset = 0;
+}
+
+
+/* XXX hard-coded values? */
+#define REDUCE_SCALED_POWER_BY_TWO_CHAIN 6
+
+/*
+ * ar9287SetPowerPerRateTable
+ *
+ * Sets the transmit power in the baseband for the given
+ * operating channel and mode.
+ *
+ * This is like the v14 EEPROM table except the 5GHz code.
+ */
+static HAL_BOOL
+ar9287SetPowerPerRateTable(struct ath_hal *ah,
+ struct ar9287_eeprom *pEepData,
+ const struct ieee80211_channel *chan,
+ int16_t *ratesArray, uint16_t cfgCtl,
+ uint16_t AntennaReduction,
+ uint16_t twiceMaxRegulatoryPower,
+ uint16_t powerLimit)
+{
+#define N(a) (sizeof(a)/sizeof(a[0]))
+/* Local defines to distinguish between extension and control CTL's */
+#define EXT_ADDITIVE (0x8000)
+#define CTL_11A_EXT (CTL_11A | EXT_ADDITIVE)
+#define CTL_11G_EXT (CTL_11G | EXT_ADDITIVE)
+#define CTL_11B_EXT (CTL_11B | EXT_ADDITIVE)
+
+ uint16_t twiceMaxEdgePower = AR5416_MAX_RATE_POWER;
+ int i;
+ int16_t twiceLargestAntenna;
+ struct cal_ctl_data_ar9287 *rep;
+ CAL_TARGET_POWER_LEG targetPowerOfdm;
+ CAL_TARGET_POWER_LEG targetPowerCck = {0, {0, 0, 0, 0}};
+ CAL_TARGET_POWER_LEG targetPowerOfdmExt = {0, {0, 0, 0, 0}};
+ CAL_TARGET_POWER_LEG targetPowerCckExt = {0, {0, 0, 0, 0}};
+ CAL_TARGET_POWER_HT targetPowerHt20;
+ CAL_TARGET_POWER_HT targetPowerHt40 = {0, {0, 0, 0, 0}};
+ int16_t scaledPower, minCtlPower;
+
+#define SUB_NUM_CTL_MODES_AT_2G_40 3 /* excluding HT40, EXT-OFDM, EXT-CCK */
+ static const uint16_t ctlModesFor11g[] = {
+ CTL_11B, CTL_11G, CTL_2GHT20, CTL_11B_EXT, CTL_11G_EXT, CTL_2GHT40
+ };
+ const uint16_t *pCtlMode;
+ uint16_t numCtlModes, ctlMode, freq;
+ CHAN_CENTERS centers;
+
+ ar5416GetChannelCenters(ah, chan, &centers);
+
+ /* Compute TxPower reduction due to Antenna Gain */
+
+ twiceLargestAntenna = AH_MAX(
+ pEepData->modalHeader.antennaGainCh[0],
+ pEepData->modalHeader.antennaGainCh[1]);
+
+ twiceLargestAntenna = (int16_t)AH_MIN((AntennaReduction) - twiceLargestAntenna, 0);
+
+ /* XXX setup for 5212 use (really used?) */
+ ath_hal_eepromSet(ah, AR_EEP_ANTGAINMAX_2, twiceLargestAntenna);
+
+ /*
+ * scaledPower is the minimum of the user input power level and
+ * the regulatory allowed power level
+ */
+ scaledPower = AH_MIN(powerLimit, twiceMaxRegulatoryPower + twiceLargestAntenna);
+
+ /* Reduce scaled Power by number of chains active to get to per chain tx power level */
+ /* TODO: better value than these? */
+ switch (owl_get_ntxchains(AH5416(ah)->ah_tx_chainmask)) {
+ case 1:
+ break;
+ case 2:
+ scaledPower -= REDUCE_SCALED_POWER_BY_TWO_CHAIN;
+ break;
+ default:
+ return AH_FALSE; /* Unsupported number of chains */
+ }
+
+ scaledPower = AH_MAX(0, scaledPower);
+
+ /* Get target powers from EEPROM - our baseline for TX Power */
+ /* XXX assume channel is 2ghz */
+ if (1) {
+ /* Setup for CTL modes */
+ numCtlModes = N(ctlModesFor11g) - SUB_NUM_CTL_MODES_AT_2G_40; /* CTL_11B, CTL_11G, CTL_2GHT20 */
+ pCtlMode = ctlModesFor11g;
+
+ ar5416GetTargetPowersLeg(ah, chan, pEepData->calTargetPowerCck,
+ AR9287_NUM_2G_CCK_TARGET_POWERS, &targetPowerCck, 4, AH_FALSE);
+ ar5416GetTargetPowersLeg(ah, chan, pEepData->calTargetPower2G,
+ AR9287_NUM_2G_20_TARGET_POWERS, &targetPowerOfdm, 4, AH_FALSE);
+ ar5416GetTargetPowers(ah, chan, pEepData->calTargetPower2GHT20,
+ AR9287_NUM_2G_20_TARGET_POWERS, &targetPowerHt20, 8, AH_FALSE);
+
+ if (IEEE80211_IS_CHAN_HT40(chan)) {
+ numCtlModes = N(ctlModesFor11g); /* All 2G CTL's */
+
+ ar5416GetTargetPowers(ah, chan, pEepData->calTargetPower2GHT40,
+ AR9287_NUM_2G_40_TARGET_POWERS, &targetPowerHt40, 8, AH_TRUE);
+ /* Get target powers for extension channels */
+ ar5416GetTargetPowersLeg(ah, chan, pEepData->calTargetPowerCck,
+ AR9287_NUM_2G_CCK_TARGET_POWERS, &targetPowerCckExt, 4, AH_TRUE);
+ ar5416GetTargetPowersLeg(ah, chan, pEepData->calTargetPower2G,
+ AR9287_NUM_2G_20_TARGET_POWERS, &targetPowerOfdmExt, 4, AH_TRUE);
+ }
+ }
+
+ /*
+ * For MIMO, need to apply regulatory caps individually across dynamically
+ * running modes: CCK, OFDM, HT20, HT40
+ *
+ * The outer loop walks through each possible applicable runtime mode.
+ * The inner loop walks through each ctlIndex entry in EEPROM.
+ * The ctl value is encoded as [7:4] == test group, [3:0] == test mode.
+ *
+ */
+ for (ctlMode = 0; ctlMode < numCtlModes; ctlMode++) {
+ HAL_BOOL isHt40CtlMode = (pCtlMode[ctlMode] == CTL_5GHT40) ||
+ (pCtlMode[ctlMode] == CTL_2GHT40);
+ if (isHt40CtlMode) {
+ freq = centers.ctl_center;
+ } else if (pCtlMode[ctlMode] & EXT_ADDITIVE) {
+ freq = centers.ext_center;
+ } else {
+ freq = centers.ctl_center;
+ }
+
+ /* walk through each CTL index stored in EEPROM */
+ for (i = 0; (i < AR9287_NUM_CTLS) && pEepData->ctlIndex[i]; i++) {
+ uint16_t twiceMinEdgePower;
+
+ /* compare test group from regulatory channel list with test mode from pCtlMode list */
+ if ((((cfgCtl & ~CTL_MODE_M) | (pCtlMode[ctlMode] & CTL_MODE_M)) == pEepData->ctlIndex[i]) ||
+ (((cfgCtl & ~CTL_MODE_M) | (pCtlMode[ctlMode] & CTL_MODE_M)) ==
+ ((pEepData->ctlIndex[i] & CTL_MODE_M) | SD_NO_CTL))) {
+ rep = &(pEepData->ctlData[i]);
+ twiceMinEdgePower = ar5416GetMaxEdgePower(freq,
+ rep->ctlEdges[owl_get_ntxchains(AH5416(ah)->ah_tx_chainmask) - 1],
+ IEEE80211_IS_CHAN_2GHZ(chan));
+ if ((cfgCtl & ~CTL_MODE_M) == SD_NO_CTL) {
+ /* Find the minimum of all CTL edge powers that apply to this channel */
+ twiceMaxEdgePower = AH_MIN(twiceMaxEdgePower, twiceMinEdgePower);
+ } else {
+ /* specific */
+ twiceMaxEdgePower = twiceMinEdgePower;
+ break;
+ }
+ }
+ }
+ minCtlPower = (uint8_t)AH_MIN(twiceMaxEdgePower, scaledPower);
+ /* Apply ctl mode to correct target power set */
+ switch(pCtlMode[ctlMode]) {
+ case CTL_11B:
+ for (i = 0; i < N(targetPowerCck.tPow2x); i++) {
+ targetPowerCck.tPow2x[i] = (uint8_t)AH_MIN(targetPowerCck.tPow2x[i], minCtlPower);
+ }
+ break;
+ case CTL_11A:
+ case CTL_11G:
+ for (i = 0; i < N(targetPowerOfdm.tPow2x); i++) {
+ targetPowerOfdm.tPow2x[i] = (uint8_t)AH_MIN(targetPowerOfdm.tPow2x[i], minCtlPower);
+ }
+ break;
+ case CTL_5GHT20:
+ case CTL_2GHT20:
+ for (i = 0; i < N(targetPowerHt20.tPow2x); i++) {
+ targetPowerHt20.tPow2x[i] = (uint8_t)AH_MIN(targetPowerHt20.tPow2x[i], minCtlPower);
+ }
+ break;
+ case CTL_11B_EXT:
+ targetPowerCckExt.tPow2x[0] = (uint8_t)AH_MIN(targetPowerCckExt.tPow2x[0], minCtlPower);
+ break;
+ case CTL_11A_EXT:
+ case CTL_11G_EXT:
+ targetPowerOfdmExt.tPow2x[0] = (uint8_t)AH_MIN(targetPowerOfdmExt.tPow2x[0], minCtlPower);
+ break;
+ case CTL_5GHT40:
+ case CTL_2GHT40:
+ for (i = 0; i < N(targetPowerHt40.tPow2x); i++) {
+ targetPowerHt40.tPow2x[i] = (uint8_t)AH_MIN(targetPowerHt40.tPow2x[i], minCtlPower);
+ }
+ break;
+ default:
+ return AH_FALSE;
+ break;
+ }
+ } /* end ctl mode checking */
+
+ /* Set rates Array from collected data */
+ ar5416SetRatesArrayFromTargetPower(ah, chan, ratesArray,
+ &targetPowerCck,
+ &targetPowerCckExt,
+ &targetPowerOfdm,
+ &targetPowerOfdmExt,
+ &targetPowerHt20,
+ &targetPowerHt40);
+ return AH_TRUE;
+#undef EXT_ADDITIVE
+#undef CTL_11A_EXT
+#undef CTL_11G_EXT
+#undef CTL_11B_EXT
+#undef SUB_NUM_CTL_MODES_AT_5G_40
+#undef SUB_NUM_CTL_MODES_AT_2G_40
+#undef N
+}
+
+#undef REDUCE_SCALED_POWER_BY_TWO_CHAIN
+
+/*
+ * This is based off of the AR5416/AR9285 code and likely could
+ * be unified in the future.
+ */
+HAL_BOOL
+ar9287SetTransmitPower(struct ath_hal *ah,
+ const struct ieee80211_channel *chan, uint16_t *rfXpdGain)
+{
+#define POW_SM(_r, _s) (((_r) & 0x3f) << (_s))
+#define N(a) (sizeof (a) / sizeof (a[0]))
+
+ const struct modal_eep_ar9287_header *pModal;
+ struct ath_hal_5212 *ahp = AH5212(ah);
+ int16_t ratesArray[Ar5416RateSize];
+ int16_t txPowerIndexOffset = 0;
+ uint8_t ht40PowerIncForPdadc = 2;
+ int i;
+
+ uint16_t cfgCtl;
+ uint16_t powerLimit;
+ uint16_t twiceAntennaReduction;
+ uint16_t twiceMaxRegulatoryPower;
+ int16_t maxPower;
+ HAL_EEPROM_9287 *ee = AH_PRIVATE(ah)->ah_eeprom;
+ struct ar9287_eeprom *pEepData = &ee->ee_base;
+
+ /* Setup info for the actual eeprom */
+ OS_MEMZERO(ratesArray, sizeof(ratesArray));
+ cfgCtl = ath_hal_getctl(ah, chan);
+ powerLimit = chan->ic_maxregpower * 2;
+ twiceAntennaReduction = chan->ic_maxantgain;
+ twiceMaxRegulatoryPower = AH_MIN(MAX_RATE_POWER, AH_PRIVATE(ah)->ah_powerLimit);
+ pModal = &pEepData->modalHeader;
+ HALDEBUG(ah, HAL_DEBUG_RESET, "%s Channel=%u CfgCtl=%u\n",
+ __func__,chan->ic_freq, cfgCtl );
+
+ /* XXX Assume Minor is v2 or later */
+ ht40PowerIncForPdadc = pModal->ht40PowerIncForPdadc;
+
+ /* Fetch per-rate power table for the given channel */
+ if (! ar9287SetPowerPerRateTable(ah, pEepData, chan,
+ &ratesArray[0],cfgCtl,
+ twiceAntennaReduction,
+ twiceMaxRegulatoryPower, powerLimit)) {
+ HALDEBUG(ah, HAL_DEBUG_ANY,
+ "%s: unable to set tx power per rate table\n", __func__);
+ return AH_FALSE;
+ }
+
+ /* Set TX power control calibration curves for each TX chain */
+ ar9287SetPowerCalTable(ah, chan, &txPowerIndexOffset);
+
+ /* Calculate maximum power level */
+ maxPower = AH_MAX(ratesArray[rate6mb], ratesArray[rateHt20_0]);
+ maxPower = AH_MAX(maxPower, ratesArray[rate1l]);
+
+ if (IEEE80211_IS_CHAN_HT40(chan))
+ maxPower = AH_MAX(maxPower, ratesArray[rateHt40_0]);
+
+ ahp->ah_tx6PowerInHalfDbm = maxPower;
+ AH_PRIVATE(ah)->ah_maxPowerLevel = maxPower;
+ ahp->ah_txPowerIndexOffset = txPowerIndexOffset;
+
+ /*
+ * txPowerIndexOffset is set by the SetPowerTable() call -
+ * adjust the rate table (0 offset if rates EEPROM not loaded)
+ */
+ /* XXX what about the pwrTableOffset? */
+ for (i = 0; i < N(ratesArray); i++) {
+ ratesArray[i] = (int16_t)(txPowerIndexOffset + ratesArray[i]);
+ /* -5 dBm offset for Merlin and later; this includes Kiwi */
+ ratesArray[i] -= AR5416_PWR_TABLE_OFFSET_DB * 2;
+ if (ratesArray[i] > AR5416_MAX_RATE_POWER)
+ ratesArray[i] = AR5416_MAX_RATE_POWER;
+ if (ratesArray[i] < 0)
+ ratesArray[i] = 0;
+ }
+
+#ifdef AH_EEPROM_DUMP
+ ar5416PrintPowerPerRate(ah, ratesArray);
+#endif
+
+ /*
+ * Adjust the HT40 power to meet the correct target TX power
+ * for 40MHz mode, based on TX power curves that are established
+ * for 20MHz mode.
+ *
+ * XXX handle overflow/too high power level?
+ */
+ if (IEEE80211_IS_CHAN_HT40(chan)) {
+ ratesArray[rateHt40_0] += ht40PowerIncForPdadc;
+ ratesArray[rateHt40_1] += ht40PowerIncForPdadc;
+ ratesArray[rateHt40_2] += ht40PowerIncForPdadc;
+ ratesArray[rateHt40_3] += ht40PowerIncForPdadc;
+ ratesArray[rateHt40_4] += ht40PowerIncForPdadc;
+ ratesArray[rateHt40_5] += ht40PowerIncForPdadc;
+ ratesArray[rateHt40_6] += ht40PowerIncForPdadc;
+ ratesArray[rateHt40_7] += ht40PowerIncForPdadc;
+ }
+
+ /* Write the TX power rate registers */
+ ar5416WriteTxPowerRateRegisters(ah, chan, ratesArray);
+
+ return AH_TRUE;
+#undef POW_SM
+#undef N
+}
+
+/*
+ * Read EEPROM header info and program the device for correct operation
+ * given the channel value.
+ */
+HAL_BOOL
+ar9287SetBoardValues(struct ath_hal *ah, const struct ieee80211_channel *chan)
+{
+ const HAL_EEPROM_9287 *ee = AH_PRIVATE(ah)->ah_eeprom;
+ const struct ar9287_eeprom *eep = &ee->ee_base;
+ const struct modal_eep_ar9287_header *pModal = &eep->modalHeader;
+ uint16_t antWrites[AR9287_ANT_16S];
+ uint32_t regChainOffset, regval;
+ uint8_t txRxAttenLocal;
+ int i, j, offset_num;
+
+ pModal = &eep->modalHeader;
+
+ antWrites[0] = (uint16_t)((pModal->antCtrlCommon >> 28) & 0xF);
+ antWrites[1] = (uint16_t)((pModal->antCtrlCommon >> 24) & 0xF);
+ antWrites[2] = (uint16_t)((pModal->antCtrlCommon >> 20) & 0xF);
+ antWrites[3] = (uint16_t)((pModal->antCtrlCommon >> 16) & 0xF);
+ antWrites[4] = (uint16_t)((pModal->antCtrlCommon >> 12) & 0xF);
+ antWrites[5] = (uint16_t)((pModal->antCtrlCommon >> 8) & 0xF);
+ antWrites[6] = (uint16_t)((pModal->antCtrlCommon >> 4) & 0xF);
+ antWrites[7] = (uint16_t)(pModal->antCtrlCommon & 0xF);
+
+ offset_num = 8;
+
+ for (i = 0, j = offset_num; i < AR9287_MAX_CHAINS; i++) {
+ antWrites[j++] = (uint16_t)((pModal->antCtrlChain[i] >> 28) & 0xf);
+ antWrites[j++] = (uint16_t)((pModal->antCtrlChain[i] >> 10) & 0x3);
+ antWrites[j++] = (uint16_t)((pModal->antCtrlChain[i] >> 8) & 0x3);
+ antWrites[j++] = 0;
+ antWrites[j++] = (uint16_t)((pModal->antCtrlChain[i] >> 6) & 0x3);
+ antWrites[j++] = (uint16_t)((pModal->antCtrlChain[i] >> 4) & 0x3);
+ antWrites[j++] = (uint16_t)((pModal->antCtrlChain[i] >> 2) & 0x3);
+ antWrites[j++] = (uint16_t)(pModal->antCtrlChain[i] & 0x3);
+ }
+
+ OS_REG_WRITE(ah, AR_PHY_SWITCH_COM, pModal->antCtrlCommon);
+
+ for (i = 0; i < AR9287_MAX_CHAINS; i++) {
+ regChainOffset = i * 0x1000;
+
+ OS_REG_WRITE(ah, AR_PHY_SWITCH_CHAIN_0 + regChainOffset,
+ pModal->antCtrlChain[i]);
+
+ OS_REG_WRITE(ah, AR_PHY_TIMING_CTRL4_CHAIN(0) + regChainOffset,
+ (OS_REG_READ(ah, AR_PHY_TIMING_CTRL4_CHAIN(0) + regChainOffset)
+ & ~(AR_PHY_TIMING_CTRL4_IQCORR_Q_Q_COFF |
+ AR_PHY_TIMING_CTRL4_IQCORR_Q_I_COFF)) |
+ SM(pModal->iqCalICh[i],
+ AR_PHY_TIMING_CTRL4_IQCORR_Q_I_COFF) |
+ SM(pModal->iqCalQCh[i],
+ AR_PHY_TIMING_CTRL4_IQCORR_Q_Q_COFF));
+
+ txRxAttenLocal = pModal->txRxAttenCh[i];
+
+ OS_REG_RMW_FIELD(ah, AR_PHY_GAIN_2GHZ + regChainOffset,
+ AR_PHY_GAIN_2GHZ_XATTEN1_MARGIN,
+ pModal->bswMargin[i]);
+ OS_REG_RMW_FIELD(ah, AR_PHY_GAIN_2GHZ + regChainOffset,
+ AR_PHY_GAIN_2GHZ_XATTEN1_DB,
+ pModal->bswAtten[i]);
+ OS_REG_RMW_FIELD(ah, AR_PHY_RXGAIN + regChainOffset,
+ AR9280_PHY_RXGAIN_TXRX_ATTEN,
+ txRxAttenLocal);
+ OS_REG_RMW_FIELD(ah, AR_PHY_RXGAIN + regChainOffset,
+ AR9280_PHY_RXGAIN_TXRX_MARGIN,
+ pModal->rxTxMarginCh[i]);
+ }
+
+
+ if (IEEE80211_IS_CHAN_HT40(chan))
+ OS_REG_RMW_FIELD(ah, AR_PHY_SETTLING,
+ AR_PHY_SETTLING_SWITCH, pModal->swSettleHt40);
+ else
+ OS_REG_RMW_FIELD(ah, AR_PHY_SETTLING,
+ AR_PHY_SETTLING_SWITCH, pModal->switchSettling);
+
+ OS_REG_RMW_FIELD(ah, AR_PHY_DESIRED_SZ,
+ AR_PHY_DESIRED_SZ_ADC, pModal->adcDesiredSize);
+
+ OS_REG_WRITE(ah, AR_PHY_RF_CTL4,
+ SM(pModal->txEndToXpaOff, AR_PHY_RF_CTL4_TX_END_XPAA_OFF)
+ | SM(pModal->txEndToXpaOff, AR_PHY_RF_CTL4_TX_END_XPAB_OFF)
+ | SM(pModal->txFrameToXpaOn, AR_PHY_RF_CTL4_FRAME_XPAA_ON)
+ | SM(pModal->txFrameToXpaOn, AR_PHY_RF_CTL4_FRAME_XPAB_ON));
+
+ OS_REG_RMW_FIELD(ah, AR_PHY_RF_CTL3,
+ AR_PHY_TX_END_TO_A2_RX_ON, pModal->txEndToRxOn);
+
+ OS_REG_RMW_FIELD(ah, AR_PHY_CCA,
+ AR9280_PHY_CCA_THRESH62, pModal->thresh62);
+ OS_REG_RMW_FIELD(ah, AR_PHY_EXT_CCA0,
+ AR_PHY_EXT_CCA0_THRESH62, pModal->thresh62);
+
+ regval = OS_REG_READ(ah, AR9287_AN_RF2G3_CH0);
+ regval &= ~(AR9287_AN_RF2G3_DB1 |
+ AR9287_AN_RF2G3_DB2 |
+ AR9287_AN_RF2G3_OB_CCK |
+ AR9287_AN_RF2G3_OB_PSK |
+ AR9287_AN_RF2G3_OB_QAM |
+ AR9287_AN_RF2G3_OB_PAL_OFF);
+ regval |= (SM(pModal->db1, AR9287_AN_RF2G3_DB1) |
+ SM(pModal->db2, AR9287_AN_RF2G3_DB2) |
+ SM(pModal->ob_cck, AR9287_AN_RF2G3_OB_CCK) |
+ SM(pModal->ob_psk, AR9287_AN_RF2G3_OB_PSK) |
+ SM(pModal->ob_qam, AR9287_AN_RF2G3_OB_QAM) |
+ SM(pModal->ob_pal_off, AR9287_AN_RF2G3_OB_PAL_OFF));
+
+ OS_REG_WRITE(ah, AR9287_AN_RF2G3_CH0, regval);
+ OS_DELAY(100); /* analog write */
+
+ regval = OS_REG_READ(ah, AR9287_AN_RF2G3_CH1);
+ regval &= ~(AR9287_AN_RF2G3_DB1 |
+ AR9287_AN_RF2G3_DB2 |
+ AR9287_AN_RF2G3_OB_CCK |
+ AR9287_AN_RF2G3_OB_PSK |
+ AR9287_AN_RF2G3_OB_QAM |
+ AR9287_AN_RF2G3_OB_PAL_OFF);
+ regval |= (SM(pModal->db1, AR9287_AN_RF2G3_DB1) |
+ SM(pModal->db2, AR9287_AN_RF2G3_DB2) |
+ SM(pModal->ob_cck, AR9287_AN_RF2G3_OB_CCK) |
+ SM(pModal->ob_psk, AR9287_AN_RF2G3_OB_PSK) |
+ SM(pModal->ob_qam, AR9287_AN_RF2G3_OB_QAM) |
+ SM(pModal->ob_pal_off, AR9287_AN_RF2G3_OB_PAL_OFF));
+
+ OS_REG_WRITE(ah, AR9287_AN_RF2G3_CH1, regval);
+ OS_DELAY(100); /* analog write */
+
+ OS_REG_RMW_FIELD(ah, AR_PHY_RF_CTL2,
+ AR_PHY_TX_FRAME_TO_DATA_START, pModal->txFrameToDataStart);
+ OS_REG_RMW_FIELD(ah, AR_PHY_RF_CTL2,
+ AR_PHY_TX_FRAME_TO_PA_ON, pModal->txFrameToPaOn);
+
+ OS_A_REG_RMW_FIELD(ah, AR9287_AN_TOP2,
+ AR9287_AN_TOP2_XPABIAS_LVL, pModal->xpaBiasLvl);
+
+ return AH_TRUE;
+}
diff --git a/sys/dev/ath/ath_hal/ar9002/ar9287_reset.h b/sys/dev/ath/ath_hal/ar9002/ar9287_reset.h
new file mode 100644
index 0000000..679fb8c
--- /dev/null
+++ b/sys/dev/ath/ath_hal/ar9002/ar9287_reset.h
@@ -0,0 +1,27 @@
+/*
+ * Copyright (c) 2010 Atheros Communications, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef __AR9287_RESET_H__
+#define __AR9287_RESET_H__
+
+extern HAL_BOOL ar9287SetTransmitPower(struct ath_hal *ah,
+ const struct ieee80211_channel *chan, uint16_t *rfXpdGain);
+extern HAL_BOOL ar9287SetBoardValues(struct ath_hal *ah,
+ const struct ieee80211_channel *chan);
+
+#endif /* __AR9287_RESET_H__ */
diff --git a/sys/dev/ath/ath_hal/ar9002/ar9287an.h b/sys/dev/ath/ath_hal/ar9002/ar9287an.h
new file mode 100644
index 0000000..ba7a92c
--- /dev/null
+++ b/sys/dev/ath/ath_hal/ar9002/ar9287an.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright (c) 2010 Atheros Communications, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef __AR9287AN_H__
+#define __AR9287AN_H__
+
+#define AR9287_AN_RF2G3_CH0 0x7808
+#define AR9287_AN_RF2G3_CH1 0x785c
+#define AR9287_AN_RF2G3_DB1 0xE0000000
+#define AR9287_AN_RF2G3_DB1_S 29
+#define AR9287_AN_RF2G3_DB2 0x1C000000
+#define AR9287_AN_RF2G3_DB2_S 26
+#define AR9287_AN_RF2G3_OB_CCK 0x03800000
+#define AR9287_AN_RF2G3_OB_CCK_S 23
+#define AR9287_AN_RF2G3_OB_PSK 0x00700000
+#define AR9287_AN_RF2G3_OB_PSK_S 20
+#define AR9287_AN_RF2G3_OB_QAM 0x000E0000
+#define AR9287_AN_RF2G3_OB_QAM_S 17
+#define AR9287_AN_RF2G3_OB_PAL_OFF 0x0001C000
+#define AR9287_AN_RF2G3_OB_PAL_OFF_S 14
+
+#define AR9287_AN_TXPC0 0x7898
+#define AR9287_AN_TXPC0_TXPCMODE 0x0000C000
+#define AR9287_AN_TXPC0_TXPCMODE_S 14
+#define AR9287_AN_TXPC0_TXPCMODE_NORMAL 0
+#define AR9287_AN_TXPC0_TXPCMODE_TEST 1
+#define AR9287_AN_TXPC0_TXPCMODE_TEMPSENSE 2
+#define AR9287_AN_TXPC0_TXPCMODE_ATBTEST 3
+
+#define AR9287_AN_TOP2 0x78b4
+#define AR9287_AN_TOP2_XPABIAS_LVL 0xC0000000
+#define AR9287_AN_TOP2_XPABIAS_LVL_S 30
+
+#endif
diff --git a/sys/dev/ath/ath_hal/ar9002/ar9287phy.h b/sys/dev/ath/ath_hal/ar9002/ar9287phy.h
new file mode 100644
index 0000000..8f28194
--- /dev/null
+++ b/sys/dev/ath/ath_hal/ar9002/ar9287phy.h
@@ -0,0 +1,26 @@
+/*
+ * Copyright (c) 2010 Atheros Communications, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef __AR9287PHY_H__
+#define __AR9287PHY_H__
+
+/* AR_PHY_CH0_TX_PWRCTRL11, AR_PHY_CH1_TX_PWRCTRL11 */
+#define AR_PHY_TX_PWRCTRL_OLPC_TEMP_COMP 0x0000FC00
+#define AR_PHY_TX_PWRCTRL_OLPC_TEMP_COMP_S 10
+
+#endif
diff --git a/sys/dev/ath/if_ath.c b/sys/dev/ath/if_ath.c
index 745a0d5..6c7b0e7 100644
--- a/sys/dev/ath/if_ath.c
+++ b/sys/dev/ath/if_ath.c
@@ -95,11 +95,13 @@ __FBSDID("$FreeBSD$");
#include <dev/ath/if_ath_tx.h>
#include <dev/ath/if_ath_sysctl.h>
#include <dev/ath/if_ath_keycache.h>
+#include <dev/ath/if_athdfs.h>
#ifdef ATH_TX99_DIAG
#include <dev/ath/ath_tx99/ath_tx99.h>
#endif
+
/*
* ATH_BCBUF determines the number of vap's that can transmit
* beacons and also (currently) the number of vap's that can
@@ -199,6 +201,8 @@ static void ath_setcurmode(struct ath_softc *, enum ieee80211_phymode);
static void ath_announce(struct ath_softc *);
+static void ath_dfs_tasklet(void *, int);
+
#ifdef IEEE80211_SUPPORT_TDMA
static void ath_tdma_settimers(struct ath_softc *sc, u_int32_t nexttbtt,
u_int32_t bintval);
@@ -471,6 +475,16 @@ ath_attach(u_int16_t devid, struct ath_softc *sc)
goto bad2;
}
+ /* Attach DFS module */
+ if (! ath_dfs_attach(sc)) {
+ device_printf(sc->sc_dev, "%s: unable to attach DFS\n", __func__);
+ error = EIO;
+ goto bad2;
+ }
+
+ /* Start DFS processing tasklet */
+ TASK_INIT(&sc->sc_dfstask, 0, ath_dfs_tasklet, sc);
+
sc->sc_blinking = 0;
sc->sc_ledstate = 1;
sc->sc_ledon = 0; /* low true */
@@ -627,13 +641,22 @@ ath_attach(u_int16_t devid, struct ath_softc *sc)
| IEEE80211_HTC_AMPDU /* A-MPDU tx/rx */
| IEEE80211_HTC_AMSDU /* A-MSDU tx/rx */
| IEEE80211_HTCAP_MAXAMSDU_3839 /* max A-MSDU length */
- /* At the present time, the hardware doesn't support short-GI in 20mhz mode */
-#if 0
- | IEEE80211_HTCAP_SHORTGI20 /* short GI in 20MHz */
-#endif
| IEEE80211_HTCAP_SMPS_OFF; /* SM power save off */
;
+ /*
+ * Enable short-GI for HT20 only if the hardware
+ * advertises support.
+ * Notably, anything earlier than the AR9287 doesn't.
+ */
+ if ((ath_hal_getcapability(ah,
+ HAL_CAP_HT20_SGI, 0, NULL) == HAL_OK) &&
+ (wmodes & HAL_MODE_HT20)) {
+ device_printf(sc->sc_dev,
+ "[HT] enabling short-GI in 20MHz mode\n");
+ ic->ic_htcaps |= IEEE80211_HTCAP_SHORTGI20;
+ }
+
if (wmodes & HAL_MODE_HT40)
ic->ic_htcaps |= IEEE80211_HTCAP_CHWIDTH40
| IEEE80211_HTCAP_SHORTGI40;
@@ -762,6 +785,8 @@ ath_detach(struct ath_softc *sc)
sc->sc_tx99->detach(sc->sc_tx99);
#endif
ath_rate_detach(sc->sc_rc);
+
+ ath_dfs_detach(sc);
ath_desc_free(sc);
ath_tx_cleanup(sc);
ath_hal_detach(sc->sc_ah); /* NB: sets chip in full sleep */
@@ -975,6 +1000,21 @@ ath_vap_create(struct ieee80211com *ic,
avp->av_bmiss = vap->iv_bmiss;
vap->iv_bmiss = ath_bmiss_vap;
+ /* Set default parameters */
+
+ /*
+ * Anything earlier than some AR9300 series MACs don't
+ * support a smaller MPDU density.
+ */
+ vap->iv_ampdu_density = IEEE80211_HTCAP_MPDUDENSITY_8;
+ /*
+ * All NICs can handle the maximum size, however
+ * AR5416 based MACs can only TX aggregates w/ RTS
+ * protection when the total aggregate size is <= 8k.
+ * However, for now that's enforced by the TX path.
+ */
+ vap->iv_ampdu_rxmax = IEEE80211_HTCAP_MAXRXAMPDU_64K;
+
avp->av_bslot = -1;
if (needbeacon) {
/*
@@ -1221,6 +1261,10 @@ ath_resume(struct ath_softc *sc)
sc->sc_curchan != NULL ? sc->sc_curchan : ic->ic_curchan,
AH_FALSE, &status);
ath_reset_keycache(sc);
+
+ /* Let DFS at it in case it's a DFS channel */
+ ath_dfs_radar_enable(sc, ic->ic_curchan);
+
if (sc->sc_resume_up) {
if (ic->ic_opmode == IEEE80211_M_STA) {
ath_init(sc);
@@ -1530,6 +1574,9 @@ ath_init(void *arg)
}
ath_chan_change(sc, ic->ic_curchan);
+ /* Let DFS at it in case it's a DFS channel */
+ ath_dfs_radar_enable(sc, ic->ic_curchan);
+
/*
* Likewise this is set during reset so update
* state cached in the driver.
@@ -1675,6 +1722,10 @@ ath_reset(struct ifnet *ifp)
if_printf(ifp, "%s: unable to reset hardware; hal status %u\n",
__func__, status);
sc->sc_diversity = ath_hal_getdiversity(ah);
+
+ /* Let DFS at it in case it's a DFS channel */
+ ath_dfs_radar_enable(sc, ic->ic_curchan);
+
if (ath_startrecv(sc) != 0) /* restart recv */
if_printf(ifp, "%s: unable to start recv logic\n", __func__);
/*
@@ -1966,6 +2017,10 @@ ath_calcrxfilter(struct ath_softc *sc)
if (ic->ic_opmode == IEEE80211_M_MONITOR)
rfilt |= HAL_RX_FILTER_CONTROL;
+ if (sc->sc_dodfs) {
+ rfilt |= HAL_RX_FILTER_PHYRADAR;
+ }
+
/*
* Enable RX of compressed BAR frames only when doing
* 802.11n. Required for A-MPDU.
@@ -3417,6 +3472,17 @@ ath_rx_proc(void *arg, int npending)
sc->sc_stats.ast_rx_fifoerr++;
if (rs->rs_status & HAL_RXERR_PHY) {
sc->sc_stats.ast_rx_phyerr++;
+ /* Process DFS radar events */
+ if ((rs->rs_phyerr == HAL_PHYERR_RADAR) ||
+ (rs->rs_phyerr == HAL_PHYERR_FALSE_RADAR_EXT)) {
+ /* Since we're touching the frame data, sync it */
+ bus_dmamap_sync(sc->sc_dmat,
+ bf->bf_dmamap,
+ BUS_DMASYNC_POSTREAD);
+ /* Now pass it to the radar processing code */
+ ath_dfs_process_phy_err(sc, mtod(m, char *), tsf, rs);
+ }
+
/* Be suitably paranoid about receiving phy errors out of the stats array bounds */
if (rs->rs_phyerr < 64)
sc->sc_stats.ast_rx_phy[rs->rs_phyerr]++;
@@ -3658,6 +3724,10 @@ rx_next:
if (ngood)
sc->sc_lastrx = tsf;
+ /* Queue DFS tasklet if needed */
+ if (ath_dfs_tasklet_needed(sc, sc->sc_curchan))
+ taskqueue_enqueue(sc->sc_tq, &sc->sc_dfstask);
+
if ((ifp->if_drv_flags & IFF_DRV_OACTIVE) == 0) {
#ifdef IEEE80211_SUPPORT_SUPERG
ieee80211_ff_age_all(ic, 100);
@@ -4375,6 +4445,9 @@ ath_chan_set(struct ath_softc *sc, struct ieee80211_channel *chan)
}
sc->sc_diversity = ath_hal_getdiversity(ah);
+ /* Let DFS at it in case it's a DFS channel */
+ ath_dfs_radar_enable(sc, ic->ic_curchan);
+
/*
* Re-enable rx framework.
*/
@@ -5641,5 +5714,23 @@ ath_tdma_beacon_send(struct ath_softc *sc, struct ieee80211vap *vap)
}
#endif /* IEEE80211_SUPPORT_TDMA */
+static void
+ath_dfs_tasklet(void *p, int npending)
+{
+ struct ath_softc *sc = (struct ath_softc *) p;
+ struct ifnet *ifp = sc->sc_ifp;
+ struct ieee80211com *ic = ifp->if_l2com;
+
+ /*
+ * If previous processing has found a radar event,
+ * signal this to the net80211 layer to begin DFS
+ * processing.
+ */
+ if (ath_dfs_process_radar_event(sc, sc->sc_curchan)) {
+ /* DFS event found, initiate channel change */
+ ieee80211_dfs_notify_radar(ic, sc->sc_curchan);
+ }
+}
+
MODULE_VERSION(if_ath, 1);
MODULE_DEPEND(if_ath, wlan, 1, 1, 1); /* 802.11 media layer */
diff --git a/sys/dev/ath/if_ath_ahb.c b/sys/dev/ath/if_ath_ahb.c
index 33b8b92..a2bca05 100644
--- a/sys/dev/ath/if_ath_ahb.c
+++ b/sys/dev/ath/if_ath_ahb.c
@@ -123,7 +123,7 @@ ath_ahb_attach(device_t dev)
device_printf(sc->sc_dev, "eeprom @ %p\n", (void *) eepromaddr);
psc->sc_eeprom = bus_alloc_resource(dev, SYS_RES_MEMORY, &rid, (uintptr_t) eepromaddr,
(uintptr_t) eepromaddr + (uintptr_t) ((ATH_EEPROM_DATA_SIZE * 2) - 1), 0, RF_ACTIVE);
- if (psc->sc_sr == NULL) {
+ if (psc->sc_eeprom == NULL) {
device_printf(dev, "cannot map eeprom space\n");
goto bad0;
}
@@ -139,6 +139,10 @@ ath_ahb_attach(device_t dev)
/* Copy the EEPROM data out */
sc->sc_eepromdata = malloc(ATH_EEPROM_DATA_SIZE * 2, M_TEMP, M_NOWAIT | M_ZERO);
+ if (sc->sc_eepromdata == NULL) {
+ device_printf(dev, "cannot allocate memory for eeprom data\n");
+ goto bad1;
+ }
device_printf(sc->sc_dev, "eeprom data @ %p\n", (void *) rman_get_bushandle(psc->sc_eeprom));
/* XXX why doesn't this work? -adrian */
#if 0
diff --git a/sys/dev/ath/if_ath_tx_ht.c b/sys/dev/ath/if_ath_tx_ht.c
index f4ade31..348a1499 100644
--- a/sys/dev/ath/if_ath_tx_ht.c
+++ b/sys/dev/ath/if_ath_tx_ht.c
@@ -136,15 +136,23 @@ ath_rateseries_setup(struct ath_softc *sc, struct ieee80211_node *ni,
*/
if (ni->ni_chw == 40)
series[i].RateFlags |= HAL_RATESERIES_2040;
-#if 0
+
/*
- * The hardware only supports short-gi in 40mhz mode -
- * if later hardware supports it in 20mhz mode, be sure
- * to add the relevant check here.
+ * Set short-GI only if the node has advertised it
+ * the channel width is suitable, and we support it.
+ * We don't currently have a "negotiated" set of bits -
+ * ni_htcap is what the remote end sends, not what this
+ * node is capable of.
*/
- if (ni->ni_htcap & IEEE80211_HTCAP_SHORTGI40)
+ if (ni->ni_chw == 40 &&
+ ic->ic_htcaps & IEEE80211_HTCAP_SHORTGI40 &&
+ ni->ni_htcap & IEEE80211_HTCAP_SHORTGI40)
+ series[i].RateFlags |= HAL_RATESERIES_HALFGI;
+
+ if (ni->ni_chw == 20 &&
+ ic->ic_htcaps & IEEE80211_HTCAP_SHORTGI20 &&
+ ni->ni_htcap & IEEE80211_HTCAP_SHORTGI20)
series[i].RateFlags |= HAL_RATESERIES_HALFGI;
-#endif
series[i].Rate = rt->info[rix[i]].rateCode;
diff --git a/sys/dev/ath/if_athdfs.h b/sys/dev/ath/if_athdfs.h
new file mode 100644
index 0000000..88ee7fc
--- /dev/null
+++ b/sys/dev/ath/if_athdfs.h
@@ -0,0 +1,47 @@
+/*-
+ * Copyright (c) 2011 Adrian Chadd, Xenion Pty Ltd
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
+ * redistribution must be conditioned upon including a substantially
+ * similar Disclaimer requirement for further binary redistribution.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY
+ * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
+ * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
+ * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGES.
+ *
+ * $FreeBSD$
+ */
+#ifndef __IF_ATHDFS_H__
+#define __IF_ATHDFS_H__
+
+extern int ath_dfs_attach(struct ath_softc *sc);
+extern int ath_dfs_detach(struct ath_softc *sc);
+extern void ath_dfs_radar_enable(struct ath_softc *,
+ struct ieee80211_channel *chan);
+extern void ath_dfs_process_phy_err(struct ath_softc *sc, const char *buf,
+ uint64_t tsf, struct ath_rx_status *rxstat);
+extern int ath_dfs_process_radar_event(struct ath_softc *sc,
+ struct ieee80211_channel *chan);
+extern int ath_dfs_tasklet_needed(struct ath_softc *sc,
+ struct ieee80211_channel *chan);
+extern int ath_ioctl_phyerr(struct ath_softc *sc, struct ath_diag *ad);
+extern int ath_dfs_get_thresholds(struct ath_softc *sc, HAL_PHYERR_PARAM *param);
+
+#endif /* __IF_ATHDFS_H__ */
diff --git a/sys/dev/ath/if_athvar.h b/sys/dev/ath/if_athvar.h
index 26a50bc..3bc8522 100644
--- a/sys/dev/ath/if_athvar.h
+++ b/sys/dev/ath/if_athvar.h
@@ -357,6 +357,11 @@ struct ath_softc {
uint16_t *sc_eepromdata; /* Local eeprom data, if AR9100 */
int sc_txchainmask; /* currently configured TX chainmask */
int sc_rxchainmask; /* currently configured RX chainmask */
+
+ /* DFS related state */
+ void *sc_dfs; /* Used by an optional DFS module */
+ int sc_dodfs; /* Whether to enable DFS rx filter bits */
+ struct task sc_dfstask; /* DFS processing task */
};
#define ATH_LOCK_INIT(_sc) \
@@ -634,11 +639,11 @@ void ath_intr(void *);
#define ath_hal_settpcts(_ah, _tpcts) \
ath_hal_setcapability(_ah, HAL_CAP_TPC_CTS, 0, _tpcts, NULL)
#define ath_hal_hasintmit(_ah) \
- (ath_hal_getcapability(_ah, HAL_CAP_INTMIT, 0, NULL) == HAL_OK)
+ (ath_hal_getcapability(_ah, HAL_CAP_INTMIT, HAL_CAP_INTMIT_PRESENT, NULL) == HAL_OK)
#define ath_hal_getintmit(_ah) \
- (ath_hal_getcapability(_ah, HAL_CAP_INTMIT, 1, NULL) == HAL_OK)
+ (ath_hal_getcapability(_ah, HAL_CAP_INTMIT, HAL_CAP_INTMIT_ENABLE, NULL) == HAL_OK)
#define ath_hal_setintmit(_ah, _v) \
- ath_hal_setcapability(_ah, HAL_CAP_INTMIT, 1, _v, NULL)
+ ath_hal_setcapability(_ah, HAL_CAP_INTMIT, HAL_CAP_INTMIT_ENABLE, _v, NULL)
#define ath_hal_getchannoise(_ah, _c) \
((*(_ah)->ah_getChanNoise)((_ah), (_c)))
#define ath_hal_getrxchainmask(_ah, _prxchainmask) \
@@ -694,6 +699,19 @@ void ath_intr(void *);
#define ath_hal_set11nburstduration(_ah, _ds, _dur) \
((*(_ah)->ah_set11nBurstDuration)((_ah), (_ds), (_dur)))
+/*
+ * This is badly-named; you need to set the correct parameters
+ * to begin to receive useful radar events; and even then
+ * it doesn't "enable" DFS. See the ath_dfs/null/ module for
+ * more information.
+ */
+#define ath_hal_enabledfs(_ah, _param) \
+ ((*(_ah)->ah_enableDfs)((_ah), (_param)))
+#define ath_hal_getdfsthresh(_ah, _param) \
+ ((*(_ah)->ah_getDfsThresh)((_ah), (_param)))
+#define ath_hal_procradarevent(_ah, _rxs, _fulltsf, _buf, _event) \
+ ((*(_ah)->ah_procRadarEvent)((_ah), (_rxs), (_fulltsf), (_buf), (_event)))
+
#define ath_hal_gpioCfgOutput(_ah, _gpio, _type) \
((*(_ah)->ah_gpioCfgOutput)((_ah), (_gpio), (_type)))
#define ath_hal_gpioset(_ah, _gpio, _b) \
diff --git a/sys/dev/atkbdc/atkbd.c b/sys/dev/atkbdc/atkbd.c
index 643554d..f2f5d74 100644
--- a/sys/dev/atkbdc/atkbd.c
+++ b/sys/dev/atkbdc/atkbd.c
@@ -1097,10 +1097,17 @@ get_typematic(keyboard_t *kbd)
x86regs_t regs;
uint8_t *p;
- if (!(kbd->kb_config & KB_CONF_PROBE_TYPEMATIC))
- return (ENODEV);
-
- if (x86bios_get_intr(0x15) == 0 || x86bios_get_intr(0x16) == 0)
+ /*
+ * Traditional entry points of int 0x15 and 0x16 are fixed
+ * and later BIOSes follow them. (U)EFI CSM specification
+ * also mandates these fixed entry points.
+ *
+ * Validate the entry points here before we proceed further.
+ * It's known that some recent laptops does not have the
+ * same entry point and hang on boot if we call it.
+ */
+ if (x86bios_get_intr(0x15) != 0xf000f859 ||
+ x86bios_get_intr(0x16) != 0xf000e82e)
return (ENODEV);
/* Is BIOS system configuration table supported? */
diff --git a/sys/dev/atkbdc/atkbdreg.h b/sys/dev/atkbdc/atkbdreg.h
index 3d54b4d..cf7ee6b 100644
--- a/sys/dev/atkbdc/atkbdreg.h
+++ b/sys/dev/atkbdc/atkbdreg.h
@@ -36,7 +36,6 @@
#define KB_CONF_NO_RESET (1 << 1) /* don't reset the keyboard */
#define KB_CONF_ALT_SCANCODESET (1 << 2) /* assume the XT type keyboard */
#define KB_CONF_NO_PROBE_TEST (1 << 3) /* don't test keyboard during probe */
-#define KB_CONF_PROBE_TYPEMATIC (1 << 4) /* probe keyboard typematic */
#ifdef _KERNEL
diff --git a/sys/dev/bxe/bxe_debug.h b/sys/dev/bxe/bxe_debug.h
index 99cbe5d..baf0e32 100644
--- a/sys/dev/bxe/bxe_debug.h
+++ b/sys/dev/bxe/bxe_debug.h
@@ -41,21 +41,22 @@ extern uint32_t bxe_debug;
* Debugging macros and definitions.
*/
-#define BXE_CP_LOAD 0x00000001
-#define BXE_CP_SEND 0x00000002
-#define BXE_CP_RECV 0x00000004
-#define BXE_CP_INTR 0x00000008
-#define BXE_CP_UNLOAD 0x00000010
-#define BXE_CP_RESET 0x00000020
-#define BXE_CP_IOCTL 0x00000040
-#define BXE_CP_STATS 0x00000080
-#define BXE_CP_MISC 0x00000100
-#define BXE_CP_PHY 0x00000200
-#define BXE_CP_RAMROD 0x00000400
-#define BXE_CP_NVRAM 0x00000800
-#define BXE_CP_REGS 0x00001000
-#define BXE_CP_ALL 0x00FFFFFF
-#define BXE_CP_MASK 0x00FFFFFF
+#define BXE_CP_LOAD 0x00000001
+#define BXE_CP_SEND 0x00000002
+#define BXE_CP_RECV 0x00000004
+#define BXE_CP_INTR 0x00000008
+#define BXE_CP_UNLOAD 0x00000010
+#define BXE_CP_RESET 0x00000020
+#define BXE_CP_IOCTL 0x00000040
+#define BXE_CP_STATS 0x00000080
+#define BXE_CP_MISC 0x00000100
+#define BXE_CP_PHY 0x00000200
+#define BXE_CP_RAMROD 0x00000400
+#define BXE_CP_NVRAM 0x00000800
+#define BXE_CP_REGS 0x00001000
+#define BXE_CP_TPA 0x00002000
+#define BXE_CP_ALL 0x00FFFFFF
+#define BXE_CP_MASK 0x00FFFFFF
#define BXE_LEVEL_FATAL 0x00000000
#define BXE_LEVEL_WARN 0x01000000
@@ -144,12 +145,18 @@ extern uint32_t bxe_debug;
#define BXE_EXTREME_REGS (BXE_CP_REGS | BXE_LEVEL_EXTREME)
#define BXE_INSANE_REGS (BXE_CP_REGS | BXE_LEVEL_INSANE)
-#define BXE_FATAL (BXE_CP_ALL | BXE_LEVEL_FATAL)
-#define BXE_WARN (BXE_CP_ALL | BXE_LEVEL_WARN)
-#define BXE_INFO (BXE_CP_ALL | BXE_LEVEL_INFO)
-#define BXE_VERBOSE (BXE_CP_ALL | BXE_LEVEL_VERBOSE)
-#define BXE_EXTREME (BXE_CP_ALL | BXE_LEVEL_EXTREME)
-#define BXE_INSANE (BXE_CP_ALL | BXE_LEVEL_INSANE)
+#define BXE_WARN_TPA (BXE_CP_TPA | BXE_LEVEL_WARN)
+#define BXE_INFO_TPA (BXE_CP_TPA | BXE_LEVEL_INFO)
+#define BXE_VERBOSE_TPA (BXE_CP_TPA | BXE_LEVEL_VERBOSE)
+#define BXE_EXTREME_TPA (BXE_CP_TPA | BXE_LEVEL_EXTREME)
+#define BXE_INSANE_TPA (BXE_CP_TPA | BXE_LEVEL_INSANE)
+
+#define BXE_FATAL (BXE_CP_ALL | BXE_LEVEL_FATAL)
+#define BXE_WARN (BXE_CP_ALL | BXE_LEVEL_WARN)
+#define BXE_INFO (BXE_CP_ALL | BXE_LEVEL_INFO)
+#define BXE_VERBOSE (BXE_CP_ALL | BXE_LEVEL_VERBOSE)
+#define BXE_EXTREME (BXE_CP_ALL | BXE_LEVEL_EXTREME)
+#define BXE_INSANE (BXE_CP_ALL | BXE_LEVEL_INSANE)
#define BXE_CODE_PATH(cp) ((cp & BXE_CP_MASK) & bxe_debug)
#define BXE_MSG_LEVEL(lv) ((lv & BXE_LEVEL_MASK) <= (bxe_debug & BXE_LEVEL_MASK))
diff --git a/sys/dev/bxe/bxe_link.c b/sys/dev/bxe/bxe_link.c
index 6ee29a8..8adc87e 100644
--- a/sys/dev/bxe/bxe_link.c
+++ b/sys/dev/bxe/bxe_link.c
@@ -1168,15 +1168,17 @@ bxe_set_parallel_detection(struct link_params *params, uint8_t phy_flags)
control2 |= MDIO_SERDES_DIGITAL_A_1000X_CONTROL2_PRL_DT_EN;
else
control2 &= ~MDIO_SERDES_DIGITAL_A_1000X_CONTROL2_PRL_DT_EN;
- DBPRINT(sc, 1, "params->speed_cap_mask = 0x%x, control2 = 0x%x\n",
- params->speed_cap_mask, control2);
+
+ DBPRINT(sc, BXE_VERBOSE_PHY, "%s(): params->speed_cap_mask = 0x%x, "
+ "control2 = 0x%x\n", __FUNCTION__, params->speed_cap_mask, control2);
+
CL45_WR_OVER_CL22(sc, params->port, params->phy_addr,
MDIO_REG_BANK_SERDES_DIGITAL, MDIO_SERDES_DIGITAL_A_1000X_CONTROL2,
control2);
if ((phy_flags & PHY_XGXS_FLAG) && (params->speed_cap_mask &
PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) {
- DBPRINT(sc, BXE_INFO, "XGXS\n");
+ DBPRINT(sc, BXE_VERBOSE_PHY, "%s(): XGXS\n", __FUNCTION__);
CL45_WR_OVER_CL22(sc, params->port, params->phy_addr,
MDIO_REG_BANK_10G_PARALLEL_DETECT,
@@ -1688,7 +1690,9 @@ bxe_flow_ctrl_resolve(struct link_params *params, struct link_vars *vars,
}
bxe_pause_resolve(vars, pause_result);
}
- DBPRINT(sc, BXE_INFO, "flow_ctrl 0x%x\n", vars->flow_ctrl);
+
+ DBPRINT(sc, BXE_VERBOSE_PHY, "%s(): flow_ctrl 0x%x\n",
+ __FUNCTION__, vars->flow_ctrl);
}
static void
@@ -1698,13 +1702,16 @@ bxe_check_fallback_to_cl37(struct link_params *params)
uint16_t rx_status, ustat_val, cl37_fsm_recieved;
sc = params->sc;
- DBPRINT(sc, BXE_INFO, "bxe_check_fallback_to_cl37\n");
+
+ DBPRINT(sc, BXE_VERBOSE_PHY, "%s(): IEEE 802.3 Clause 37 Fallback\n",
+ __FUNCTION__);
+
CL45_RD_OVER_CL22(sc, params->port, params->phy_addr, MDIO_REG_BANK_RX0,
MDIO_RX0_RX_STATUS, &rx_status);
if ((rx_status & MDIO_RX0_RX_STATUS_SIGDET) !=
(MDIO_RX0_RX_STATUS_SIGDET)) {
DBPRINT(sc, BXE_VERBOSE_PHY,
- "Signal is not detected. Restoring CL73."
+ "No signal detected. Restoring CL73."
"rx_status(0x80b0) = 0x%x\n", rx_status);
CL45_WR_OVER_CL22(sc, params->port, params->phy_addr,
MDIO_REG_BANK_CL73_IEEEB0, MDIO_CL73_IEEEB0_CL73_AN_CONTROL,
@@ -1738,7 +1745,9 @@ bxe_check_fallback_to_cl37(struct link_params *params)
CL45_WR_OVER_CL22(sc, params->port, params->phy_addr,
MDIO_REG_BANK_CL73_IEEEB0, MDIO_CL73_IEEEB0_CL73_AN_CONTROL, 0);
bxe_restart_autoneg(params, 0);
- DBPRINT(sc, BXE_INFO, "Disabling CL73, and restarting CL37 autoneg\n");
+
+ DBPRINT(sc, BXE_INFO, "%s(): Disabling CL73 and restarting CL37 "
+ "autoneg\n", __FUNCTION__);
}
static void
@@ -3391,7 +3400,8 @@ bxe_init_internal_phy(struct link_params *params, struct link_vars *vars,
((XGXS_EXT_PHY_TYPE(params->ext_phy_config) ==
PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) &&
params->loopback_mode == LOOPBACK_EXT)) {
- DBPRINT(sc, BXE_INFO, "not SGMII, no AN\n");
+ DBPRINT(sc, BXE_VERBOSE_PHY, "%s(): Not SGMII, no AN\n",
+ __FUNCTION__);
/* Disable autoneg. */
bxe_set_autoneg(params, vars, 0);
@@ -5338,9 +5348,6 @@ bxe_set_led(struct link_params *params, uint8_t mode, uint32_t speed)
emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
ext_phy_type = XGXS_EXT_PHY_TYPE(params->ext_phy_config);
- DBPRINT(sc, BXE_INFO, "bxe_set_led: port %x, mode %d\n", port, mode);
- DBPRINT(sc, BXE_VERBOSE_PHY, "speed 0x%x, hw_led_mode 0x%x\n", speed,
- hw_led_mode);
switch (mode) {
case LED_MODE_OFF:
REG_WR(sc, NIG_REG_LED_10G_P0 + port * 4, 0);
@@ -5382,7 +5389,7 @@ bxe_set_led(struct link_params *params, uint8_t mode, uint32_t speed)
default:
rc = -EINVAL;
DBPRINT(sc, BXE_VERBOSE_PHY,
- "bxe_set_led: Invalid led mode %d\n", mode);
+ "%s(): Invalid led mode (%d)!\n", __FUNCTION__, mode);
break;
}
return (rc);
@@ -5635,7 +5642,10 @@ bxe_link_reset(struct link_params *params, struct link_vars *vars,
ext_phy_type = XGXS_EXT_PHY_TYPE(ext_phy_config);
val = REG_RD(sc, params->shmem_base + offsetof(struct shmem_region,
dev_info.port_feature_config[params->port].config));
- DBPRINT(sc, BXE_INFO, "Resetting the link of port %d\n", port);
+
+ DBPRINT(sc, BXE_INFO, "%s(): Resetting port %d link.\n",
+ __FUNCTION__, port);
+
/* Disable attentions. */
vars->link_status = 0;
bxe_update_mng(params, vars->link_status);
diff --git a/sys/dev/bxe/if_bxe.c b/sys/dev/bxe/if_bxe.c
index cf0a40e..e7534f4 100644
--- a/sys/dev/bxe/if_bxe.c
+++ b/sys/dev/bxe/if_bxe.c
@@ -70,7 +70,6 @@ __FBSDID("$FreeBSD$");
#ifdef BXE_DEBUG
uint32_t bxe_debug = BXE_WARN;
-
/* 0 = Never */
/* 1 = 1 in 2,147,483,648 */
/* 256 = 1 in 8,388,608 */
@@ -84,12 +83,9 @@ uint32_t bxe_debug = BXE_WARN;
/* Controls how often to simulate an mbuf allocation failure. */
int bxe_debug_mbuf_allocation_failure = 0;
-/* Controls how often to simulate a DMA mapping failure. */
+/* Controls how often to simulate a DMA mapping failure. */
int bxe_debug_dma_map_addr_failure = 0;
-/* Controls how often to received frame error. */
-int bxe_debug_received_frame_error = 0;
-
/* Controls how often to simulate a bootcode failure. */
int bxe_debug_bootcode_running_failure = 0;
#endif
@@ -103,7 +99,7 @@ int bxe_debug_bootcode_running_failure = 0;
/* BXE Build Time Options */
/* #define BXE_NVRAM_WRITE 1 */
-#define USE_DMAE 1
+#define BXE_USE_DMAE 1
/*
* PCI Device ID Table
@@ -132,14 +128,17 @@ static int bxe_attach(device_t);
static int bxe_detach(device_t);
static int bxe_shutdown(device_t);
-static void bxe_set_tunables(struct bxe_softc *);
+/*
+ * Driver local functions.
+ */
+static void bxe_tunables_set(struct bxe_softc *);
static void bxe_print_adapter_info(struct bxe_softc *);
static void bxe_probe_pci_caps(struct bxe_softc *);
static void bxe_link_settings_supported(struct bxe_softc *, uint32_t);
static void bxe_link_settings_requested(struct bxe_softc *);
-static int bxe_get_function_hwinfo(struct bxe_softc *);
-static void bxe_get_port_hwinfo(struct bxe_softc *);
-static void bxe_get_common_hwinfo(struct bxe_softc *);
+static int bxe_hwinfo_function_get(struct bxe_softc *);
+static int bxe_hwinfo_port_get(struct bxe_softc *);
+static int bxe_hwinfo_common_get(struct bxe_softc *);
static void bxe_undi_unload(struct bxe_softc *);
static int bxe_setup_leading(struct bxe_softc *);
static int bxe_stop_leading(struct bxe_softc *);
@@ -241,8 +240,8 @@ static int bxe_tx_encap(struct bxe_fastpath *, struct mbuf **);
static void bxe_tx_start(struct ifnet *);
static void bxe_tx_start_locked(struct ifnet *, struct bxe_fastpath *);
static int bxe_tx_mq_start(struct ifnet *, struct mbuf *);
-static int bxe_tx_mq_start_locked(struct ifnet *, struct bxe_fastpath *,
- struct mbuf *);
+static int bxe_tx_mq_start_locked(struct ifnet *,
+ struct bxe_fastpath *, struct mbuf *);
static void bxe_mq_flush(struct ifnet *ifp);
static int bxe_ioctl(struct ifnet *, u_long, caddr_t);
static __inline int bxe_has_rx_work(struct bxe_fastpath *);
@@ -254,33 +253,34 @@ static void bxe_intr_sp(void *);
static void bxe_task_fp(void *, int);
static void bxe_intr_fp(void *);
static void bxe_zero_sb(struct bxe_softc *, int);
-static void bxe_init_sb(struct bxe_softc *, struct host_status_block *,
- bus_addr_t, int);
+static void bxe_init_sb(struct bxe_softc *,
+ struct host_status_block *, bus_addr_t, int);
static void bxe_zero_def_sb(struct bxe_softc *);
-static void bxe_init_def_sb(struct bxe_softc *, struct host_def_status_block *,
- bus_addr_t, int);
+static void bxe_init_def_sb(struct bxe_softc *,
+ struct host_def_status_block *, bus_addr_t, int);
static void bxe_update_coalesce(struct bxe_softc *);
static __inline void bxe_update_rx_prod(struct bxe_softc *,
- struct bxe_fastpath *, uint16_t, uint16_t, uint16_t);
+ struct bxe_fastpath *, uint16_t, uint16_t, uint16_t);
static void bxe_clear_sge_mask_next_elems(struct bxe_fastpath *);
static __inline void bxe_init_sge_ring_bit_mask(struct bxe_fastpath *);
-static __inline void bxe_free_tpa_pool(struct bxe_fastpath *, int);
-static __inline void bxe_free_rx_sge(struct bxe_softc *, struct bxe_fastpath *,
- uint16_t);
-static __inline void bxe_free_rx_sge_range(struct bxe_softc *,
- struct bxe_fastpath *, int);
-static struct mbuf *bxe_alloc_mbuf(struct bxe_fastpath *, int);
-static int bxe_map_mbuf(struct bxe_fastpath *, struct mbuf *, bus_dma_tag_t,
- bus_dmamap_t, bus_dma_segment_t *);
-static struct mbuf *bxe_alloc_tpa_mbuf(struct bxe_fastpath *, int, int);
-static void bxe_alloc_mutexes(struct bxe_softc *);
-static void bxe_free_mutexes(struct bxe_softc *);
-static int bxe_alloc_rx_sge(struct bxe_softc *, struct bxe_fastpath *,
- uint16_t);
-static void bxe_init_rx_chains(struct bxe_softc *);
+static int bxe_alloc_tpa_mbuf(struct bxe_fastpath *, int);
+static int bxe_fill_tpa_pool(struct bxe_fastpath *);
+static void bxe_free_tpa_pool(struct bxe_fastpath *);
+
+static int bxe_alloc_rx_sge_mbuf(struct bxe_fastpath *, uint16_t);
+static int bxe_fill_sg_chain(struct bxe_fastpath *);
+static void bxe_free_sg_chain(struct bxe_fastpath *);
+
+static int bxe_alloc_rx_bd_mbuf(struct bxe_fastpath *, uint16_t);
+static int bxe_fill_rx_bd_chain(struct bxe_fastpath *);
+static void bxe_free_rx_bd_chain(struct bxe_fastpath *);
+
+static void bxe_mutexes_alloc(struct bxe_softc *);
+static void bxe_mutexes_free(struct bxe_softc *);
+static void bxe_clear_rx_chains(struct bxe_softc *);
+static int bxe_init_rx_chains(struct bxe_softc *);
+static void bxe_clear_tx_chains(struct bxe_softc *);
static void bxe_init_tx_chains(struct bxe_softc *);
-static void bxe_free_rx_chains(struct bxe_softc *);
-static void bxe_free_tx_chains(struct bxe_softc *);
static void bxe_init_sp_ring(struct bxe_softc *);
static void bxe_init_context(struct bxe_softc *);
static void bxe_init_ind_table(struct bxe_softc *);
@@ -291,8 +291,7 @@ static void bxe_init_internal_port(struct bxe_softc *);
static void bxe_init_internal_func(struct bxe_softc *);
static void bxe_init_internal(struct bxe_softc *, uint32_t);
-static void bxe_init_nic(struct bxe_softc *, uint32_t);
-static int bxe_gunzip_init(struct bxe_softc *);
+static int bxe_init_nic(struct bxe_softc *, uint32_t);
static void bxe_lb_pckt(struct bxe_softc *);
static int bxe_int_mem_test(struct bxe_softc *);
static void bxe_enable_blocks_attention (struct bxe_softc *);
@@ -304,13 +303,9 @@ static void bxe_ilt_wr(struct bxe_softc *, uint32_t, bus_addr_t);
static int bxe_init_func(struct bxe_softc *);
static int bxe_init_hw(struct bxe_softc *, uint32_t);
static int bxe_fw_command(struct bxe_softc *, uint32_t);
-static void bxe_dma_free(struct bxe_softc *);
-static void bxe_dmamem_free(struct bxe_softc *, bus_dma_tag_t, caddr_t,
- bus_dmamap_t);
+static void bxe_host_structures_free(struct bxe_softc *);
static void bxe_dma_map_addr(void *, bus_dma_segment_t *, int, int);
-static int bxe_dma_alloc(device_t);
-static int bxe_dmamem_alloc(struct bxe_softc *, bus_dma_tag_t, bus_dmamap_t,
- void *, uint32_t, bus_addr_t *);
+static int bxe_host_structures_alloc(device_t);
static void bxe_set_mac_addr_e1(struct bxe_softc *, int);
static void bxe_set_mac_addr_e1h(struct bxe_softc *, int);
static void bxe_set_rx_mode(struct bxe_softc *);
@@ -330,15 +325,12 @@ static void bxe_tpa_stop(struct bxe_softc *, struct bxe_fastpath *, uint16_t,
int, int, union eth_rx_cqe *, uint16_t);
static void bxe_rxeof(struct bxe_fastpath *);
static void bxe_txeof(struct bxe_fastpath *);
-static int bxe_get_buf(struct bxe_fastpath *, struct mbuf *, uint16_t);
static int bxe_watchdog(struct bxe_fastpath *fp);
-static int bxe_change_mtu(struct bxe_softc *, int);
static void bxe_tick(void *);
static void bxe_add_sysctls(struct bxe_softc *);
-static void bxe_gunzip_end(struct bxe_softc *);
-static void bxe_write_dmae_phys_len(struct bxe_softc *, bus_addr_t, uint32_t,
- uint32_t);
+static void bxe_write_dmae_phys_len(struct bxe_softc *,
+ bus_addr_t, uint32_t, uint32_t);
void bxe_write_dmae(struct bxe_softc *, bus_addr_t, uint32_t, uint32_t);
void bxe_read_dmae(struct bxe_softc *, uint32_t, uint32_t);
@@ -360,32 +352,33 @@ static int bxe_sysctl_dump_rx_bd_chain(SYSCTL_HANDLER_ARGS);
static int bxe_sysctl_dump_tx_chain(SYSCTL_HANDLER_ARGS);
static int bxe_sysctl_reg_read(SYSCTL_HANDLER_ARGS);
static int bxe_sysctl_breakpoint(SYSCTL_HANDLER_ARGS);
-static void bxe_validate_rx_packet(struct bxe_fastpath *, uint16_t,
- union eth_rx_cqe *, struct mbuf *);
+static __noinline void bxe_validate_rx_packet(struct bxe_fastpath *,
+ uint16_t, union eth_rx_cqe *, struct mbuf *);
static void bxe_grcdump(struct bxe_softc *, int);
-static void bxe_dump_enet(struct bxe_softc *,struct mbuf *);
-static void bxe_dump_mbuf (struct bxe_softc *, struct mbuf *);
-static void bxe_dump_tx_mbuf_chain(struct bxe_softc *, int, int);
-static void bxe_dump_rx_mbuf_chain(struct bxe_softc *, int, int);
-static void bxe_dump_tx_parsing_bd(struct bxe_fastpath *,int,
- struct eth_tx_parse_bd *);
-static void bxe_dump_txbd(struct bxe_fastpath *, int,
- union eth_tx_bd_types *);
-static void bxe_dump_rxbd(struct bxe_fastpath *, int,
- struct eth_rx_bd *);
-static void bxe_dump_cqe(struct bxe_fastpath *, int, union eth_rx_cqe *);
-static void bxe_dump_tx_chain(struct bxe_fastpath *, int, int);
-static void bxe_dump_rx_cq_chain(struct bxe_fastpath *, int, int);
-static void bxe_dump_rx_bd_chain(struct bxe_fastpath *, int, int);
-static void bxe_dump_status_block(struct bxe_softc *);
-static void bxe_dump_stats_block(struct bxe_softc *);
-static void bxe_dump_fp_state(struct bxe_fastpath *);
-static void bxe_dump_port_state_locked(struct bxe_softc *);
-static void bxe_dump_link_vars_state_locked(struct bxe_softc *);
-static void bxe_dump_link_params_state_locked(struct bxe_softc *);
-static void bxe_dump_driver_state(struct bxe_softc *);
-static void bxe_dump_hw_state(struct bxe_softc *);
-static void bxe_dump_fw(struct bxe_softc *);
+static __noinline void bxe_dump_enet(struct bxe_softc *,struct mbuf *);
+static __noinline void bxe_dump_mbuf (struct bxe_softc *, struct mbuf *);
+static __noinline void bxe_dump_tx_mbuf_chain(struct bxe_softc *, int, int);
+static __noinline void bxe_dump_rx_mbuf_chain(struct bxe_softc *, int, int);
+static __noinline void bxe_dump_tx_parsing_bd(struct bxe_fastpath *,int,
+ struct eth_tx_parse_bd *);
+static __noinline void bxe_dump_txbd(struct bxe_fastpath *, int,
+ union eth_tx_bd_types *);
+static __noinline void bxe_dump_rxbd(struct bxe_fastpath *, int,
+ struct eth_rx_bd *);
+static __noinline void bxe_dump_cqe(struct bxe_fastpath *,
+ int, union eth_rx_cqe *);
+static __noinline void bxe_dump_tx_chain(struct bxe_fastpath *, int, int);
+static __noinline void bxe_dump_rx_cq_chain(struct bxe_fastpath *, int, int);
+static __noinline void bxe_dump_rx_bd_chain(struct bxe_fastpath *, int, int);
+static __noinline void bxe_dump_status_block(struct bxe_softc *);
+static __noinline void bxe_dump_stats_block(struct bxe_softc *);
+static __noinline void bxe_dump_fp_state(struct bxe_fastpath *);
+static __noinline void bxe_dump_port_state_locked(struct bxe_softc *);
+static __noinline void bxe_dump_link_vars_state_locked(struct bxe_softc *);
+static __noinline void bxe_dump_link_params_state_locked(struct bxe_softc *);
+static __noinline void bxe_dump_driver_state(struct bxe_softc *);
+static __noinline void bxe_dump_hw_state(struct bxe_softc *);
+static __noinline void bxe_dump_fw(struct bxe_softc *);
static void bxe_decode_mb_msgs(struct bxe_softc *, uint32_t, uint32_t);
static void bxe_decode_ramrod_cmd(struct bxe_softc *, int);
static void bxe_breakpoint(struct bxe_softc *);
@@ -433,11 +426,6 @@ DRIVER_MODULE(bxe, pci, bxe_driver, bxe_devclass, 0, 0);
SYSCTL_NODE(_hw, OID_AUTO, bxe, CTLFLAG_RD, 0, "bxe driver parameters");
/* Allowable values are TRUE (1) or FALSE (0). */
-static int bxe_stats_enable = FALSE;
-TUNABLE_INT("hw.bxe.stats_enable", &bxe_stats_enable);
-SYSCTL_UINT(_hw_bxe, OID_AUTO, stats_enable, CTLFLAG_RDTUN, &bxe_stats_enable,
- 0, "stats Enable/Disable");
-
static int bxe_dcc_enable = FALSE;
TUNABLE_INT("hw.bxe.dcc_enable", &bxe_dcc_enable);
SYSCTL_UINT(_hw_bxe, OID_AUTO, dcc_enable, CTLFLAG_RDTUN, &bxe_dcc_enable,
@@ -456,18 +444,6 @@ SYSCTL_UINT(_hw_bxe, OID_AUTO, int_mode, CTLFLAG_RDTUN, &bxe_int_mode,
0, "Interrupt (MSI-X|MSI|INTx) mode");
/*
- * Specifies whether the driver should disable Transparent Packet
- * Aggregation (TPA, also known as LRO). By default TPA is enabled.
- *
- * Allowable values are TRUE (1) or FALSE (0).
- */
-static int bxe_tpa_enable = FALSE;
-TUNABLE_INT("hw.bxe.tpa_enable", &bxe_tpa_enable);
-SYSCTL_UINT(_hw_bxe, OID_AUTO, tpa_enable, CTLFLAG_RDTUN, &bxe_tpa_enable,
- 0, "TPA Enable/Disable");
-
-
-/*
* Specifies the number of queues that will be used when a multi-queue
* RSS mode is selected using bxe_multi_mode below.
*
@@ -480,8 +456,8 @@ SYSCTL_UINT(_hw_bxe, OID_AUTO, queue_count, CTLFLAG_RDTUN, &bxe_queue_count,
/*
* ETH_RSS_MODE_DISABLED (0)
- * Disables all multi-queue/packet sorting algorithms. Each
- * received frame is routed to the same receive queue.
+ * Disables all multi-queue/packet sorting algorithms. All
+ * received frames are routed to a single receive queue.
*
* ETH_RSS_MODE_REGULAR (1)
* The default mode which assigns incoming frames to receive
@@ -579,7 +555,7 @@ bxe_reg_write32(struct bxe_softc *sc, bus_size_t offset, uint32_t val)
(uintmax_t)offset);
}
- DBPRINT(sc, BXE_INSANE, "%s(): offset = 0x%jX, val = 0x%08X\n",
+ DBPRINT(sc, BXE_INSANE_REGS, "%s(): offset = 0x%jX, val = 0x%08X\n",
__FUNCTION__, (uintmax_t)offset, val);
bus_space_write_4(sc->bxe_btag, sc->bxe_bhandle, offset, val);
@@ -602,7 +578,7 @@ bxe_reg_write16(struct bxe_softc *sc, bus_size_t offset, uint16_t val)
(uintmax_t)offset);
}
- DBPRINT(sc, BXE_INSANE, "%s(): offset = 0x%jX, val = 0x%04X\n",
+ DBPRINT(sc, BXE_INSANE_REGS, "%s(): offset = 0x%jX, val = 0x%04X\n",
__FUNCTION__, (uintmax_t)offset, val);
bus_space_write_2(sc->bxe_btag, sc->bxe_bhandle, offset, val);
@@ -619,7 +595,7 @@ static void
bxe_reg_write8(struct bxe_softc *sc, bus_size_t offset, uint8_t val)
{
- DBPRINT(sc, BXE_INSANE, "%s(): offset = 0x%jX, val = 0x%02X\n",
+ DBPRINT(sc, BXE_INSANE_REGS, "%s(): offset = 0x%jX, val = 0x%02X\n",
__FUNCTION__, (uintmax_t)offset, val);
bus_space_write_1(sc->bxe_btag, sc->bxe_bhandle, offset, val);
@@ -645,7 +621,7 @@ bxe_reg_read32(struct bxe_softc *sc, bus_size_t offset)
val = bus_space_read_4(sc->bxe_btag, sc->bxe_bhandle, offset);
- DBPRINT(sc, BXE_INSANE, "%s(): offset = 0x%jX, val = 0x%08X\n",
+ DBPRINT(sc, BXE_INSANE_REGS, "%s(): offset = 0x%jX, val = 0x%08X\n",
__FUNCTION__, (uintmax_t)offset, val);
return (val);
@@ -671,7 +647,7 @@ bxe_reg_read16(struct bxe_softc *sc, bus_size_t offset)
val = bus_space_read_2(sc->bxe_btag, sc->bxe_bhandle, offset);
- DBPRINT(sc, BXE_INSANE, "%s(): offset = 0x%jX, val = 0x%08X\n",
+ DBPRINT(sc, BXE_INSANE_REGS, "%s(): offset = 0x%jX, val = 0x%08X\n",
__FUNCTION__, (uintmax_t)offset, val);
return (val);
@@ -690,10 +666,10 @@ bxe_reg_read8(struct bxe_softc *sc, bus_size_t offset)
{
uint8_t val = bus_space_read_1(sc->bxe_btag, sc->bxe_bhandle, offset);
- DBPRINT(sc, BXE_INSANE, "%s(): offset = 0x%jX, val = 0x%02X\n",
+ DBPRINT(sc, BXE_INSANE_REGS, "%s(): offset = 0x%jX, val = 0x%02X\n",
__FUNCTION__, (uintmax_t)offset, val);
- return(val);
+ return (val);
}
#endif
@@ -996,6 +972,7 @@ bxe_probe(device_t dev)
* Returns:
* None.
*/
+/* ToDo: Create a sysctl for this info. */
static void
bxe_print_adapter_info(struct bxe_softc *sc)
{
@@ -1025,19 +1002,14 @@ bxe_print_adapter_info(struct bxe_softc *sc)
printf("); Flags (");
/* Miscellaneous flags. */
- if (sc->bxe_flags & BXE_USING_MSI_FLAG)
+ if (sc->msi_count > 0)
printf("MSI");
- if (sc->bxe_flags & BXE_USING_MSIX_FLAG) {
+ if (sc->msix_count > 0) {
if (i > 0) printf("|");
printf("MSI-X"); i++;
}
- if (sc->bxe_flags & BXE_SAFC_TX_FLAG) {
- if (i > 0) printf("|");
- printf("SAFC"); i++;
- }
-
if (TPA_ENABLED(sc)) {
if (i > 0) printf("|");
printf("TPA"); i++;
@@ -1056,6 +1028,9 @@ bxe_print_adapter_info(struct bxe_softc *sc)
break;
}
+ printf("); BD's (RX:%d,TX:%d",
+ (int) USABLE_RX_BD, (int) USABLE_TX_BD);
+
/* Firmware versions and device features. */
printf("); Firmware (%d.%d.%d); Bootcode (%d.%d.%d)\n",
BCM_5710_FW_MAJOR_VERSION,
@@ -1069,6 +1044,64 @@ bxe_print_adapter_info(struct bxe_softc *sc)
}
/*
+ * Release any interrupts allocated by the driver.
+ *
+ * Returns:
+ * None
+ */
+static void
+bxe_interrupt_free(struct bxe_softc *sc)
+{
+ device_t dev;
+ int i;
+
+ DBENTER(BXE_VERBOSE_RESET | BXE_VERBOSE_UNLOAD);
+
+ dev = sc->dev;
+
+ if (sc->msix_count > 0) {
+ /* Free MSI-X resources. */
+
+ for (i = 0; i < sc->msix_count; i++) {
+ DBPRINT(sc, (BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET |
+ BXE_VERBOSE_INTR), "%s(): Releasing MSI-X[%d] "
+ "vector.\n", __FUNCTION__, i);
+ if (sc->bxe_msix_res[i] && sc->bxe_msix_rid[i])
+ bus_release_resource(dev, SYS_RES_IRQ,
+ sc->bxe_msix_rid[i], sc->bxe_msix_res[i]);
+ }
+
+ pci_release_msi(dev);
+
+ } else if (sc->msi_count > 0) {
+ /* Free MSI resources. */
+
+ for (i = 0; i < sc->msi_count; i++) {
+ DBPRINT(sc, (BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET |
+ BXE_VERBOSE_INTR), "%s(): Releasing MSI[%d] "
+ "vector.\n", __FUNCTION__, i);
+ if (sc->bxe_msi_res[i] && sc->bxe_msi_rid[i])
+ bus_release_resource(dev, SYS_RES_IRQ,
+ sc->bxe_msi_rid[i], sc->bxe_msi_res[i]);
+ }
+
+ pci_release_msi(dev);
+
+ } else {
+ /* Free legacy interrupt resources. */
+
+ DBPRINT(sc, (BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET |
+ BXE_VERBOSE_INTR), "%s(): Releasing legacy interrupt.\n",
+ __FUNCTION__);
+ if (sc->bxe_irq_res != NULL)
+ bus_release_resource(dev, SYS_RES_IRQ,
+ sc->bxe_irq_rid, sc->bxe_irq_res);
+ }
+
+ DBEXIT(BXE_VERBOSE_RESET | BXE_VERBOSE_UNLOAD);
+}
+
+/*
* This function determines and allocates the appropriate
* interrupt based on system capabilites and user request.
*
@@ -1086,30 +1119,19 @@ bxe_print_adapter_info(struct bxe_softc *sc)
* 0 = Success, !0 = Failure.
*/
static int
-bxe_interrupt_allocate(struct bxe_softc *sc)
+bxe_interrupt_alloc(struct bxe_softc *sc)
{
device_t dev;
- int i, rid, rc;
+ int error, i, rid, rc;
int msi_count, msi_required, msi_allocated;
int msix_count, msix_required, msix_allocated;
- rc = 0;
- dev = sc->dev;
- msi_count = 0;
- msi_required = 0;
- msi_allocated = 0;
- msix_count = 0;
- msix_required = 0;
- msix_allocated = 0;
-
DBENTER(BXE_VERBOSE_LOAD | BXE_VERBOSE_INTR);
- /* Assume SAFC not enabled for TX. */
- sc->bxe_flags &= ~BXE_SAFC_TX_FLAG;
-
- /* Clear any previous priority queue mappings. */
- for (i = 0; i < BXE_MAX_PRIORITY; i++)
- sc->pri_map[i] = 0;
+ rc = 0;
+ dev = sc->dev;
+ msi_count = msi_required = msi_allocated = 0;
+ msix_count = msix_required = msix_allocated = 0;
/* Get the number of available MSI/MSI-X interrupts from the OS. */
if (sc->int_mode > 0) {
@@ -1140,7 +1162,8 @@ bxe_interrupt_allocate(struct bxe_softc *sc)
/* BSD resource identifier */
rid = 1;
- if (pci_alloc_msix(dev, &msix_allocated) == 0) {
+ error = pci_alloc_msix(dev, &msix_allocated);
+ if (error == 0) {
DBPRINT(sc, (BXE_VERBOSE_LOAD | BXE_VERBOSE_INTR),
"%s(): Required/Allocated (%d/%d) MSI-X vector(s).\n",
__FUNCTION__, msix_required, msix_allocated);
@@ -1148,7 +1171,6 @@ bxe_interrupt_allocate(struct bxe_softc *sc)
/* Make sure we got all the interrupts we asked for. */
if (msix_allocated >= msix_required) {
sc->msix_count = msix_required;
- sc->bxe_flags |= BXE_USING_MSIX_FLAG;
msi_count = 0;
/* Allocate the MSI-X vectors. */
@@ -1165,7 +1187,7 @@ bxe_interrupt_allocate(struct bxe_softc *sc)
"%s(%d): Failed to map MSI-X[%d] vector!\n",
__FILE__, __LINE__, (3));
rc = ENXIO;
- goto bxe_interrupt_allocate_exit;
+ goto bxe_interrupt_alloc_exit;
}
}
} else {
@@ -1176,7 +1198,6 @@ bxe_interrupt_allocate(struct bxe_softc *sc)
/* Release any resources acquired. */
pci_release_msi(dev);
- sc->bxe_flags &= ~BXE_USING_MSIX_FLAG;
sc->msix_count = msix_count = 0;
/* We'll try MSI next. */
@@ -1200,7 +1221,8 @@ bxe_interrupt_allocate(struct bxe_softc *sc)
msi_required);
rid = 1;
- if (pci_alloc_msi(dev, &msi_allocated) == 0) {
+ error = pci_alloc_msi(dev, &msi_allocated);
+ if (error == 0) {
DBPRINT(sc, (BXE_VERBOSE_LOAD | BXE_VERBOSE_INTR),
"%s(): Required/Allocated (%d/%d) MSI vector(s).\n",
__FUNCTION__, msi_required, msi_allocated);
@@ -1212,7 +1234,6 @@ bxe_interrupt_allocate(struct bxe_softc *sc)
*/
if (msi_required >= msi_allocated) {
sc->msi_count = msi_required;
- sc->bxe_flags |= BXE_USING_MSI_FLAG;
/* Allocate the MSI vectors. */
for (i = 0; i < msi_required; i++) {
sc->bxe_msi_rid[i] = i + rid;
@@ -1226,7 +1247,7 @@ bxe_interrupt_allocate(struct bxe_softc *sc)
"%s(%d): Failed to map MSI vector (%d)!\n",
__FILE__, __LINE__, (i));
rc = ENXIO;
- goto bxe_interrupt_allocate_exit;
+ goto bxe_interrupt_alloc_exit;
}
}
}
@@ -1237,7 +1258,6 @@ bxe_interrupt_allocate(struct bxe_softc *sc)
/* Release any resources acquired. */
pci_release_msi(dev);
- sc->bxe_flags &= ~BXE_USING_MSI_FLAG;
sc->msi_count = msi_count = 0;
/* We'll try INTx next. */
@@ -1262,7 +1282,7 @@ bxe_interrupt_allocate(struct bxe_softc *sc)
BXE_PRINTF("%s(%d): PCI map interrupt failed!\n",
__FILE__, __LINE__);
rc = ENXIO;
- goto bxe_interrupt_allocate_exit;
+ goto bxe_interrupt_alloc_exit;
}
sc->bxe_irq_rid = rid;
}
@@ -1271,27 +1291,55 @@ bxe_interrupt_allocate(struct bxe_softc *sc)
"%s(): Actual: int_mode = %d, multi_mode = %d, num_queues = %d\n",
__FUNCTION__, sc->int_mode, sc->multi_mode, sc->num_queues);
-bxe_interrupt_allocate_exit:
+bxe_interrupt_alloc_exit:
DBEXIT(BXE_VERBOSE_LOAD | BXE_VERBOSE_INTR);
return (rc);
}
+/*
+ * This function releases taskqueues.
+ *
+ * Returns:
+ * None
+ */
static void
bxe_interrupt_detach(struct bxe_softc *sc)
{
+#ifdef BXE_TASK
+ struct bxe_fastpath *fp;
+#endif
device_t dev;
int i;
+ DBENTER(BXE_VERBOSE_UNLOAD);
+
dev = sc->dev;
- DBENTER(BXE_VERBOSE_RESET | BXE_VERBOSE_UNLOAD);
+
+#ifdef BXE_TASK
+ /* Free the OS taskqueue resources. */
+ for (i = 0; i < sc->num_queues; i++) {
+ fp = &sc->fp[i];
+
+ if (fp->tq != NULL) {
+ taskqueue_drain(fp->tq, &fp->task);
+ taskqueue_free(fp->tq);
+ }
+ }
+
+ if (sc->tq != NULL) {
+ taskqueue_drain(sc->tq, &sc->task);
+ taskqueue_free(sc->tq);
+ }
+#endif
+
/* Release interrupt resources. */
- if ((sc->bxe_flags & BXE_USING_MSIX_FLAG) && sc->msix_count) {
+ if (sc->msix_count > 0) {
for (i = 0; i < sc->msix_count; i++) {
if (sc->bxe_msix_tag[i] && sc->bxe_msix_res[i])
bus_teardown_intr(dev, sc->bxe_msix_res[i],
sc->bxe_msix_tag[i]);
}
- } else if ((sc->bxe_flags & BXE_USING_MSI_FLAG) && sc->msi_count) {
+ } else if (sc->msi_count > 0) {
for (i = 0; i < sc->msi_count; i++) {
if (sc->bxe_msi_tag[i] && sc->bxe_msi_res[i])
bus_teardown_intr(dev, sc->bxe_msi_res[i],
@@ -1302,6 +1350,8 @@ bxe_interrupt_detach(struct bxe_softc *sc)
bus_teardown_intr(dev, sc->bxe_irq_res,
sc->bxe_irq_tag);
}
+
+ DBEXIT(BXE_VERBOSE_UNLOAD);
}
/*
@@ -1336,7 +1386,7 @@ bxe_interrupt_attach(struct bxe_softc *sc)
#endif
/* Setup interrupt handlers. */
- if (sc->bxe_flags & BXE_USING_MSIX_FLAG) {
+ if (sc->msix_count > 0) {
DBPRINT(sc, (BXE_VERBOSE_LOAD | BXE_VERBOSE_INTR),
"%s(): Enabling slowpath MSI-X[0] vector.\n",__FUNCTION__);
/*
@@ -1344,13 +1394,9 @@ bxe_interrupt_attach(struct bxe_softc *sc)
* driver instance to the interrupt handler for the
* slowpath.
*/
- rc = bus_setup_intr(sc->dev,
- sc->bxe_msix_res[0],
- INTR_TYPE_NET | INTR_MPSAFE,
- NULL,
- bxe_intr_sp,
- sc,
- &sc->bxe_msix_tag[0]);
+ rc = bus_setup_intr(sc->dev, sc->bxe_msix_res[0],
+ INTR_TYPE_NET | INTR_MPSAFE, NULL, bxe_intr_sp,
+ sc, &sc->bxe_msix_tag[0]);
if (rc) {
BXE_PRINTF(
@@ -1360,10 +1406,8 @@ bxe_interrupt_attach(struct bxe_softc *sc)
}
#if __FreeBSD_version >= 800504
- bus_describe_intr(sc->dev,
- sc->bxe_msix_res[0],
- sc->bxe_msix_tag[0],
- "sp");
+ bus_describe_intr(sc->dev, sc->bxe_msix_res[0],
+ sc->bxe_msix_tag[0], "sp");
#endif
/* Now initialize the fastpath vectors. */
@@ -1377,13 +1421,9 @@ bxe_interrupt_attach(struct bxe_softc *sc)
* fastpath context to the interrupt handler in this
* case. Also the first msix_res was used by the sp.
*/
- rc = bus_setup_intr(sc->dev,
- sc->bxe_msix_res[i + 1],
- INTR_TYPE_NET | INTR_MPSAFE,
- NULL,
- bxe_intr_fp,
- fp,
- &sc->bxe_msix_tag[i + 1]);
+ rc = bus_setup_intr(sc->dev, sc->bxe_msix_res[i + 1],
+ INTR_TYPE_NET | INTR_MPSAFE, NULL, bxe_intr_fp,
+ fp, &sc->bxe_msix_tag[i + 1]);
if (rc) {
BXE_PRINTF(
@@ -1393,11 +1433,8 @@ bxe_interrupt_attach(struct bxe_softc *sc)
}
#if __FreeBSD_version >= 800504
- bus_describe_intr(sc->dev,
- sc->bxe_msix_res[i + 1],
- sc->bxe_msix_tag[i + 1],
- "fp[%02d]",
- i);
+ bus_describe_intr(sc->dev, sc->bxe_msix_res[i + 1],
+ sc->bxe_msix_tag[i + 1], "fp[%02d]", i);
#endif
/* Bind the fastpath instance to a CPU. */
@@ -1409,13 +1446,13 @@ bxe_interrupt_attach(struct bxe_softc *sc)
#ifdef BXE_TASK
TASK_INIT(&fp->task, 0, bxe_task_fp, fp);
fp->tq = taskqueue_create_fast("bxe_fpq", M_NOWAIT,
- taskqueue_thread_enqueue, &fp->tq);
+ taskqueue_thread_enqueue, &fp->tq);
taskqueue_start_threads(&fp->tq, 1, PI_NET, "%s fpq",
- device_get_nameunit(sc->dev));
+ device_get_nameunit(sc->dev));
#endif
fp->state = BXE_FP_STATE_IRQ;
}
- } else if (sc->bxe_flags & BXE_USING_MSI_FLAG) {
+ } else if (sc->msi_count > 0) {
DBPRINT(sc, (BXE_VERBOSE_LOAD | BXE_VERBOSE_INTR),
"%s(): Enabling slowpath MSI[0] vector.\n",
__FUNCTION__);
@@ -1424,12 +1461,8 @@ bxe_interrupt_attach(struct bxe_softc *sc)
* instance to the interrupt handler for the slowpath.
*/
rc = bus_setup_intr(sc->dev,sc->bxe_msi_res[0],
- INTR_TYPE_NET | INTR_MPSAFE,
- NULL,
- bxe_intr_sp,
- sc,
- &sc->bxe_msi_tag[0]
- );
+ INTR_TYPE_NET | INTR_MPSAFE, NULL, bxe_intr_sp,
+ sc, &sc->bxe_msi_tag[0]);
if (rc) {
BXE_PRINTF(
@@ -1439,10 +1472,8 @@ bxe_interrupt_attach(struct bxe_softc *sc)
}
#if __FreeBSD_version >= 800504
- bus_describe_intr(sc->dev,
- sc->bxe_msi_res[0],
- sc->bxe_msi_tag[0],
- "sp");
+ bus_describe_intr(sc->dev, sc->bxe_msi_res[0],
+ sc->bxe_msi_tag[0], "sp");
#endif
/* Now initialize the fastpath vectors. */
@@ -1457,14 +1488,9 @@ bxe_interrupt_attach(struct bxe_softc *sc)
* fastpath context to the interrupt handler in this
* case.
*/
- rc = bus_setup_intr(sc->dev,
- sc->bxe_msi_res[i + 1],
- INTR_TYPE_NET | INTR_MPSAFE,
- NULL,
- bxe_intr_fp,
- fp,
- &sc->bxe_msi_tag[i + 1]
- );
+ rc = bus_setup_intr(sc->dev, sc->bxe_msi_res[i + 1],
+ INTR_TYPE_NET | INTR_MPSAFE, NULL, bxe_intr_fp,
+ fp, &sc->bxe_msi_tag[i + 1]);
if (rc) {
BXE_PRINTF(
@@ -1474,19 +1500,16 @@ bxe_interrupt_attach(struct bxe_softc *sc)
}
#if __FreeBSD_version >= 800504
- bus_describe_intr(sc->dev,
- sc->bxe_msi_res[i + 1],
- sc->bxe_msi_tag[i + 1],
- "fp[%02d]",
- i);
+ bus_describe_intr(sc->dev, sc->bxe_msi_res[i + 1],
+ sc->bxe_msi_tag[i + 1], "fp[%02d]", i);
#endif
#ifdef BXE_TASK
TASK_INIT(&fp->task, 0, bxe_task_fp, fp);
fp->tq = taskqueue_create_fast("bxe_fpq", M_NOWAIT,
- taskqueue_thread_enqueue, &fp->tq);
+ taskqueue_thread_enqueue, &fp->tq);
taskqueue_start_threads(&fp->tq, 1, PI_NET, "%s fpq",
- device_get_nameunit(sc->dev));
+ device_get_nameunit(sc->dev));
#endif
}
@@ -1495,23 +1518,19 @@ bxe_interrupt_attach(struct bxe_softc *sc)
fp = &sc->fp[0];
#endif
DBPRINT(sc, (BXE_VERBOSE_LOAD | BXE_VERBOSE_INTR),
- "%s(): Enabling INTx interrupts.\n", __FUNCTION__);
+ "%s(): Enabling INTx interrupts.\n", __FUNCTION__);
/*
* Setup the interrupt handler. Note that we pass the
* driver instance to the interrupt handler which
* will handle both the slowpath and fastpath.
*/
- rc = bus_setup_intr(sc->dev,sc->bxe_irq_res,
- INTR_TYPE_NET | INTR_MPSAFE,
- NULL,
- bxe_intr_legacy,
- sc,
- &sc->bxe_irq_tag);
+ rc = bus_setup_intr(sc->dev,sc->bxe_irq_res, INTR_TYPE_NET |
+ INTR_MPSAFE, NULL, bxe_intr_legacy, sc, &sc->bxe_irq_tag);
if (rc) {
BXE_PRINTF("%s(%d): Failed to allocate interrupt!\n",
- __FILE__, __LINE__);
+ __FILE__, __LINE__);
goto bxe_interrupt_attach_exit;
}
#ifdef BXE_TASK
@@ -1616,56 +1635,78 @@ bxe_probe_pci_caps(struct bxe_softc *sc)
DBEXIT(BXE_EXTREME_LOAD);
}
+/*
+ * Setup firmware pointers for BCM57710.
+ *
+ * Returns:
+ * None
+ */
static void
bxe_init_e1_firmware(struct bxe_softc *sc)
{
- INIT_OPS(sc) = (struct raw_op *)init_ops_e1;
- INIT_DATA(sc) = (const uint32_t *)init_data_e1;
- INIT_OPS_OFFSETS(sc) = (const uint16_t *)init_ops_offsets_e1;
- INIT_TSEM_INT_TABLE_DATA(sc) = tsem_int_table_data_e1;
- INIT_TSEM_PRAM_DATA(sc) = tsem_pram_data_e1;
- INIT_USEM_INT_TABLE_DATA(sc) = usem_int_table_data_e1;
- INIT_USEM_PRAM_DATA(sc) = usem_pram_data_e1;
- INIT_XSEM_INT_TABLE_DATA(sc) = xsem_int_table_data_e1;
- INIT_XSEM_PRAM_DATA(sc) = xsem_pram_data_e1;
- INIT_CSEM_INT_TABLE_DATA(sc) = csem_int_table_data_e1;
- INIT_CSEM_PRAM_DATA(sc) = csem_pram_data_e1;
+ INIT_OPS(sc) = (struct raw_op *)init_ops_e1;
+ INIT_DATA(sc) = (const uint32_t *)init_data_e1;
+ INIT_OPS_OFFSETS(sc) = (const uint16_t *)init_ops_offsets_e1;
+ INIT_TSEM_INT_TABLE_DATA(sc) = tsem_int_table_data_e1;
+ INIT_TSEM_PRAM_DATA(sc) = tsem_pram_data_e1;
+ INIT_USEM_INT_TABLE_DATA(sc) = usem_int_table_data_e1;
+ INIT_USEM_PRAM_DATA(sc) = usem_pram_data_e1;
+ INIT_XSEM_INT_TABLE_DATA(sc) = xsem_int_table_data_e1;
+ INIT_XSEM_PRAM_DATA(sc) = xsem_pram_data_e1;
+ INIT_CSEM_INT_TABLE_DATA(sc) = csem_int_table_data_e1;
+ INIT_CSEM_PRAM_DATA(sc) = csem_pram_data_e1;
}
+/*
+ * Setup firmware pointers for BCM57711.
+ *
+ * Returns:
+ * None
+ */
static void
bxe_init_e1h_firmware(struct bxe_softc *sc)
{
- INIT_OPS(sc) = (struct raw_op *)init_ops_e1h;
- INIT_DATA(sc) = (const uint32_t *)init_data_e1h;
- INIT_OPS_OFFSETS(sc) = (const uint16_t *)init_ops_offsets_e1h;
- INIT_TSEM_INT_TABLE_DATA(sc) = tsem_int_table_data_e1h;
- INIT_TSEM_PRAM_DATA(sc) = tsem_pram_data_e1h;
- INIT_USEM_INT_TABLE_DATA(sc) = usem_int_table_data_e1h;
- INIT_USEM_PRAM_DATA(sc) = usem_pram_data_e1h;
- INIT_XSEM_INT_TABLE_DATA(sc) = xsem_int_table_data_e1h;
- INIT_XSEM_PRAM_DATA(sc) = xsem_pram_data_e1h;
- INIT_CSEM_INT_TABLE_DATA(sc) = csem_int_table_data_e1h;
- INIT_CSEM_PRAM_DATA(sc) = csem_pram_data_e1h;
+ INIT_OPS(sc) = (struct raw_op *)init_ops_e1h;
+ INIT_DATA(sc) = (const uint32_t *)init_data_e1h;
+ INIT_OPS_OFFSETS(sc) = (const uint16_t *)init_ops_offsets_e1h;
+ INIT_TSEM_INT_TABLE_DATA(sc) = tsem_int_table_data_e1h;
+ INIT_TSEM_PRAM_DATA(sc) = tsem_pram_data_e1h;
+ INIT_USEM_INT_TABLE_DATA(sc) = usem_int_table_data_e1h;
+ INIT_USEM_PRAM_DATA(sc) = usem_pram_data_e1h;
+ INIT_XSEM_INT_TABLE_DATA(sc) = xsem_int_table_data_e1h;
+ INIT_XSEM_PRAM_DATA(sc) = xsem_pram_data_e1h;
+ INIT_CSEM_INT_TABLE_DATA(sc) = csem_int_table_data_e1h;
+ INIT_CSEM_PRAM_DATA(sc) = csem_pram_data_e1h;
}
+/*
+ * Sets up pointers for loading controller firmware.
+ *
+ * Returns:
+ * 0 = Success, !0 = Failure
+ */
static int
bxe_init_firmware(struct bxe_softc *sc)
{
+ int rc;
+
+ rc = 0;
+
if (CHIP_IS_E1(sc))
bxe_init_e1_firmware(sc);
else if (CHIP_IS_E1H(sc))
bxe_init_e1h_firmware(sc);
else {
- BXE_PRINTF("%s(%d): Unsupported chip revision\n",
+ BXE_PRINTF("%s(%d): No firmware to support chip revision!\n",
__FILE__, __LINE__);
- return (ENXIO);
+ rc = ENXIO;
}
- return (0);
-}
+ return (rc);
+}
static void
-bxe_set_tunables(struct bxe_softc *sc)
+bxe_tunables_set(struct bxe_softc *sc)
{
/*
* Get our starting point for interrupt mode/number of queues.
@@ -1724,15 +1765,7 @@ bxe_set_tunables(struct bxe_softc *sc)
"%s(): Requested: int_mode = %d, multi_mode = %d num_queues = %d\n",
__FUNCTION__, sc->int_mode, sc->multi_mode, sc->num_queues);
- /* Set transparent packet aggregation (TPA), aka LRO, flag. */
- if (bxe_tpa_enable!= FALSE)
- sc->bxe_flags |= BXE_TPA_ENABLE_FLAG;
-
- /* Capture the stats enable/disable setting. */
- if (bxe_stats_enable == FALSE)
- sc->stats_enable = FALSE;
- else
- sc->stats_enable = TRUE;
+ sc->stats_enable = TRUE;
/* Select the host coalescing tick count values (limit values). */
if (bxe_tx_ticks > 100) {
@@ -1766,11 +1799,13 @@ bxe_set_tunables(struct bxe_softc *sc)
/*
+ * Allocates PCI resources from OS.
+ *
* Returns:
* 0 = Success, !0 = Failure
*/
static int
-bxe_alloc_pci_resources(struct bxe_softc *sc)
+bxe_pci_resources_alloc(struct bxe_softc *sc)
{
int rid, rc = 0;
@@ -1782,32 +1817,32 @@ bxe_alloc_pci_resources(struct bxe_softc *sc)
* processor memory.
*/
rid = PCIR_BAR(0);
- sc->bxe_res = bus_alloc_resource_any(
- sc->dev, SYS_RES_MEMORY, &rid, RF_ACTIVE);
+ sc->bxe_res = bus_alloc_resource_any(sc->dev,
+ SYS_RES_MEMORY, &rid, RF_ACTIVE);
if (sc->bxe_res == NULL) {
BXE_PRINTF("%s(%d):PCI BAR0 memory allocation failed\n",
__FILE__, __LINE__);
rc = ENXIO;
- goto bxe_alloc_pci_resources_exit;
+ goto bxe_pci_resources_alloc_exit;
}
/* Get OS resource handles for BAR0 memory. */
- sc->bxe_btag = rman_get_bustag(sc->bxe_res);
- sc->bxe_bhandle = rman_get_bushandle(sc->bxe_res);
- sc->bxe_vhandle = (vm_offset_t) rman_get_virtual(sc->bxe_res);
+ sc->bxe_btag = rman_get_bustag(sc->bxe_res);
+ sc->bxe_bhandle = rman_get_bushandle(sc->bxe_res);
+ sc->bxe_vhandle = (vm_offset_t) rman_get_virtual(sc->bxe_res);
/*
* Allocate PCI memory resources for BAR2.
* Doorbell (DB) memory.
*/
rid = PCIR_BAR(2);
- sc->bxe_db_res = bus_alloc_resource_any(
- sc->dev, SYS_RES_MEMORY, &rid, RF_ACTIVE);
+ sc->bxe_db_res = bus_alloc_resource_any(sc->dev,
+ SYS_RES_MEMORY, &rid, RF_ACTIVE);
if (sc->bxe_db_res == NULL) {
BXE_PRINTF("%s(%d): PCI BAR2 memory allocation failed\n",
__FILE__, __LINE__);
rc = ENXIO;
- goto bxe_alloc_pci_resources_exit;
+ goto bxe_pci_resources_alloc_exit;
}
/* Get OS resource handles for BAR2 memory. */
@@ -1815,45 +1850,52 @@ bxe_alloc_pci_resources(struct bxe_softc *sc)
sc->bxe_db_bhandle = rman_get_bushandle(sc->bxe_db_res);
sc->bxe_db_vhandle = (vm_offset_t) rman_get_virtual(sc->bxe_db_res);
-bxe_alloc_pci_resources_exit:
+bxe_pci_resources_alloc_exit:
DBEXIT(BXE_VERBOSE_LOAD);
- return(rc);
+ return (rc);
}
/*
+ * Frees PCI resources allocated in bxe_pci_resources_alloc().
+ *
* Returns:
* None
*/
static void
-bxe_release_pci_resources(struct bxe_softc *sc)
+bxe_pci_resources_free(struct bxe_softc *sc)
{
+ DBENTER(BXE_VERBOSE_UNLOAD);
+
/* Release the PCIe BAR0 mapped memory. */
if (sc->bxe_res != NULL) {
- DBPRINT(sc, (BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET),
- "%s(): Releasing PCI BAR0 memory.\n", __FUNCTION__);
- bus_release_resource(sc->dev,
- SYS_RES_MEMORY, PCIR_BAR(0), sc->bxe_res);
+ bus_release_resource(sc->dev, SYS_RES_MEMORY,
+ PCIR_BAR(0), sc->bxe_res);
}
/* Release the PCIe BAR2 (doorbell) mapped memory. */
if (sc->bxe_db_res != NULL) {
- DBPRINT(sc, (BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET),
- "%s(): Releasing PCI BAR2 memory.\n", __FUNCTION__);
- bus_release_resource(sc->dev,
- SYS_RES_MEMORY, PCIR_BAR(2), sc->bxe_db_res);
+ bus_release_resource(sc->dev, SYS_RES_MEMORY,
+ PCIR_BAR(2), sc->bxe_db_res);
}
+
+ DBENTER(BXE_VERBOSE_UNLOAD);
}
/*
+ * Determines the media reported to the OS by examining
+ * the installed PHY type.
+ *
* Returns:
* 0 = Success, !0 = Failure
*/
static int
bxe_media_detect(struct bxe_softc *sc)
{
- int rc = 0;
+ int rc;
+
+ rc = 0;
/* Identify supported media based on the PHY type. */
switch (XGXS_EXT_PHY_TYPE(sc->link_params.ext_phy_config)) {
@@ -1887,8 +1929,6 @@ bxe_media_detect(struct bxe_softc *sc)
case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN:
default:
- BXE_PRINTF("%s(%d): PHY not supported by driver!\n",
- __FILE__, __LINE__);
sc->media = 0;
rc = ENODEV;
}
@@ -1915,7 +1955,7 @@ bxe_attach(device_t dev)
int rc;
sc = device_get_softc(dev);
- DBENTER(BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET);
+ DBENTER(BXE_INFO_LOAD | BXE_INFO_RESET);
sc->dev = dev;
sc->bxe_unit = device_get_unit(dev);
@@ -1923,18 +1963,34 @@ bxe_attach(device_t dev)
sc->bxe_flags = 0;
sc->state = BXE_STATE_CLOSED;
rc = 0;
- bxe_set_tunables(sc);
- bxe_alloc_mutexes(sc);
+ DBPRINT(sc, BXE_FATAL, "%s(): ************************\n",
+ __FUNCTION__);
+ DBPRINT(sc, BXE_FATAL, "%s(): ** Debug mode enabled **\n",
+ __FUNCTION__);
+ DBPRINT(sc, BXE_FATAL, "%s(): ************************\n",
+ __FUNCTION__);
+ DBPRINT(sc, BXE_FATAL, "%s(): sc vaddr = 0x%08X:%08X\n",
+ __FUNCTION__, (uint32_t) U64_HI(sc), (uint32_t) U64_LO(sc));
+
+ /* Get the user configurable values for driver load. */
+ bxe_tunables_set(sc);
- /* Prepare the tick routine. */
- callout_init(&sc->bxe_tick_callout, CALLOUT_MPSAFE);
+ bxe_mutexes_alloc(sc);
+
+ /* Prepare tick routine. */
+ callout_init_mtx(&sc->bxe_tick_callout, &sc->bxe_core_mtx, 0);
/* Enable bus master capability */
pci_enable_busmaster(dev);
- if ((rc = bxe_alloc_pci_resources(sc)) != 0)
+ /* Enable PCI BAR mapped memory for register access. */
+ rc = bxe_pci_resources_alloc(sc);
+ if (rc != 0) {
+ BXE_PRINTF("%s(%d): Error allocating PCI resources!\n",
+ __FILE__, __LINE__);
goto bxe_attach_fail;
+ }
/* Put indirect address registers into a sane state. */
pci_write_config(sc->dev, PCICFG_GRC_ADDRESS,
@@ -1945,19 +2001,26 @@ bxe_attach(device_t dev)
REG_WR(sc, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(sc) * 16, 0);
/* Get hardware info from shared memory and validate data. */
- if (bxe_get_function_hwinfo(sc)) {
+ rc = bxe_hwinfo_function_get(sc);
+ if (rc != 0) {
DBPRINT(sc, BXE_WARN,
"%s(): Failed to get hardware info!\n", __FUNCTION__);
- rc = ENODEV;
goto bxe_attach_fail;
}
/* Setup supported media options. */
- if ((rc = bxe_media_detect(sc)) != 0)
+ rc = bxe_media_detect(sc);
+ if (rc != 0) {
+ BXE_PRINTF("%s(%d): Unknown media (PHY) type!\n",
+ __FILE__, __LINE__);
goto bxe_attach_fail;
+ }
+ /* Interface entrypoint for media type/status reporting. */
ifmedia_init(&sc->bxe_ifmedia,
IFM_IMASK, bxe_ifmedia_upd, bxe_ifmedia_status);
+
+ /* Default interface values. */
ifmedia_add(&sc->bxe_ifmedia,
IFM_ETHER | sc->media | IFM_FDX, 0, NULL);
ifmedia_add(&sc->bxe_ifmedia,
@@ -1967,38 +2030,37 @@ bxe_attach(device_t dev)
sc->bxe_ifmedia.ifm_media =
sc->bxe_ifmedia.ifm_cur->ifm_media;
- /* Set init arrays */
+ /* Setup firmware arrays (firmware load comes later). */
rc = bxe_init_firmware(sc);
if (rc) {
- BXE_PRINTF("%s(%d): Error loading firmware\n",
+ BXE_PRINTF("%s(%d): Error preparing firmware load!\n",
__FILE__, __LINE__);
goto bxe_attach_fail;
}
-
#ifdef BXE_DEBUG
/* Allocate a memory buffer for grcdump output.*/
sc->grcdump_buffer = malloc(BXE_GRCDUMP_BUF_SIZE, M_TEMP, M_NOWAIT);
if (sc->grcdump_buffer == NULL) {
- /* Failure is OK, just print a message and continue attach. */
BXE_PRINTF("%s(%d): Failed to allocate grcdump memory "
"buffer!\n", __FILE__, __LINE__);
+ rc = ENOBUFS;
}
#endif
/* Check that NVRAM contents are valid.*/
- if (bxe_nvram_test(sc)) {
+ rc = bxe_nvram_test(sc);
+ if (rc != 0) {
BXE_PRINTF("%s(%d): Failed NVRAM test!\n",
__FILE__, __LINE__);
- rc = ENODEV;
goto bxe_attach_fail;
}
/* Allocate the appropriate interrupts.*/
- if (bxe_interrupt_allocate(sc)) {
+ rc = bxe_interrupt_alloc(sc);
+ if (rc != 0) {
BXE_PRINTF("%s(%d): Interrupt allocation failed!\n",
__FILE__, __LINE__);
- rc = ENODEV;
goto bxe_attach_fail;
}
@@ -2016,7 +2078,7 @@ bxe_attach(device_t dev)
}
/* Check if PXE/UNDI is still active and unload it. */
- if (!BP_NOMCP(sc))
+ if (!NOMCP(sc))
bxe_undi_unload(sc);
/*
@@ -2032,6 +2094,7 @@ bxe_attach(device_t dev)
sc->rx_ring_size = USABLE_RX_BD;
/* Assume receive IP/TCP/UDP checksum is enabled. */
+ /* ToDo: Change when IOCTL changes checksum offload? */
sc->rx_csum = 1;
/* Disable WoL. */
@@ -2041,10 +2104,10 @@ bxe_attach(device_t dev)
sc->mbuf_alloc_size = MCLBYTES;
/* Allocate DMA memory resources. */
- if (bxe_dma_alloc(sc->dev)) {
+ rc = bxe_host_structures_alloc(sc->dev);
+ if (rc != 0) {
BXE_PRINTF("%s(%d): DMA memory allocation failed!\n",
__FILE__, __LINE__);
- rc = ENOMEM;
goto bxe_attach_fail;
}
@@ -2060,10 +2123,13 @@ bxe_attach(device_t dev)
/* Initialize the FreeBSD ifnet interface. */
ifp->if_softc = sc;
if_initname(ifp, device_get_name(dev), device_get_unit(dev));
+
+ /* Written by driver before attach, read-only afterwards. */
ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
+
+ /* Driver entrypoints from the network interface. */
ifp->if_ioctl = bxe_ioctl;
ifp->if_start = bxe_tx_start;
-
#if __FreeBSD_version >= 800000
ifp->if_transmit = bxe_tx_mq_start;
ifp->if_qflush = bxe_mq_flush;
@@ -2077,10 +2143,8 @@ bxe_attach(device_t dev)
ifp->if_mtu = ETHERMTU;
ifp->if_hwassist = BXE_IF_HWASSIST;
ifp->if_capabilities = BXE_IF_CAPABILITIES;
- if (TPA_ENABLED(sc)) {
- ifp->if_capabilities |= IFCAP_LRO;
- }
- ifp->if_capenable = ifp->if_capabilities;
+ /* TPA not enabled by default. */
+ ifp->if_capenable = BXE_IF_CAPABILITIES & ~IFCAP_LRO;
ifp->if_baudrate = IF_Gbps(10UL);
ifp->if_snd.ifq_drv_maxlen = sc->tx_ring_size;
@@ -2092,7 +2156,8 @@ bxe_attach(device_t dev)
ether_ifattach(ifp, sc->link_params.mac_addr);
/* Attach the interrupts to the interrupt handlers. */
- if (bxe_interrupt_attach(sc)) {
+ rc = bxe_interrupt_attach(sc);
+ if (rc != 0) {
BXE_PRINTF("%s(%d): Interrupt allocation failed!\n",
__FILE__, __LINE__);
goto bxe_attach_fail;
@@ -2108,8 +2173,8 @@ bxe_attach_fail:
if (rc != 0)
bxe_detach(dev);
- DBEXIT(BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET);
- return(rc);
+ DBEXIT(BXE_INFO_LOAD | BXE_INFO_RESET);
+ return (rc);
}
@@ -2593,7 +2658,7 @@ bxe_link_settings_requested_exit:
* 0 = Success, !0 = Failure
*/
static int
-bxe_get_function_hwinfo(struct bxe_softc *sc)
+bxe_hwinfo_function_get(struct bxe_softc *sc)
{
uint32_t mac_hi, mac_lo, val;
int func, rc;
@@ -2604,7 +2669,7 @@ bxe_get_function_hwinfo(struct bxe_softc *sc)
func = BP_FUNC(sc);
/* Get the common hardware configuration first. */
- bxe_get_common_hwinfo(sc);
+ bxe_hwinfo_common_get(sc);
/* Assume no outer VLAN/multi-function support. */
sc->e1hov = sc->e1hmf = 0;
@@ -2621,13 +2686,13 @@ bxe_get_function_hwinfo(struct bxe_softc *sc)
} else {
if (BP_E1HVN(sc)) {
rc = EPERM;
- goto bxe_get_function_hwinfo_exit;
+ goto bxe_hwinfo_function_get_exit;
}
}
}
- if (!BP_NOMCP(sc)) {
- bxe_get_port_hwinfo(sc);
+ if (!NOMCP(sc)) {
+ bxe_hwinfo_port_get(sc);
sc->fw_seq = SHMEM_RD(sc, func_mb[func].drv_mb_header) &
DRV_MSG_SEQ_NUMBER_MASK;
}
@@ -2636,7 +2701,7 @@ bxe_get_function_hwinfo(struct bxe_softc *sc)
/*
* Fetch the factory configured MAC address for multi function
* devices. If this is not a multi-function device then the MAC
- * address was already read in the bxe_get_port_hwinfo() routine.
+ * address was already read in the bxe_hwinfo_port_get() routine.
* The MAC addresses used by the port are not the same as the MAC
* addressed used by the function.
*/
@@ -2647,6 +2712,7 @@ bxe_get_function_hwinfo(struct bxe_softc *sc)
if ((mac_lo == 0) && (mac_hi == 0)) {
BXE_PRINTF("%s(%d): Invalid Ethernet address!\n",
__FILE__, __LINE__);
+ rc = ENODEV;
} else {
sc->link_params.mac_addr[0] = (u_char)(mac_hi >> 8);
sc->link_params.mac_addr[1] = (u_char)(mac_hi);
@@ -2658,9 +2724,9 @@ bxe_get_function_hwinfo(struct bxe_softc *sc)
}
-bxe_get_function_hwinfo_exit:
+bxe_hwinfo_function_get_exit:
DBEXIT(BXE_VERBOSE_LOAD);
- return(rc);
+ return (rc);
}
@@ -2674,15 +2740,16 @@ bxe_get_function_hwinfo_exit:
* for future use.
*
* Returns:
- * None
+ * 0 = Success, !0 = Failure
*/
-static void
-bxe_get_port_hwinfo(struct bxe_softc *sc)
+static int
+bxe_hwinfo_port_get(struct bxe_softc *sc)
{
- int i, port;
+ int i, port, rc;
uint32_t val, mac_hi, mac_lo;
DBENTER(BXE_VERBOSE_LOAD);
+ rc = 0;
port = BP_PORT(sc);
sc->link_params.sc = sc;
@@ -2736,6 +2803,7 @@ bxe_get_port_hwinfo(struct bxe_softc *sc)
if (mac_lo == 0 && mac_hi == 0) {
BXE_PRINTF("%s(%d): No Ethernet address programmed on the "
"controller!\n", __FILE__, __LINE__);
+ rc = ENODEV;
} else {
sc->link_params.mac_addr[0] = (u_char)(mac_hi >> 8);
sc->link_params.mac_addr[1] = (u_char)(mac_hi);
@@ -2746,6 +2814,7 @@ bxe_get_port_hwinfo(struct bxe_softc *sc)
}
DBEXIT(BXE_VERBOSE_LOAD);
+ return (rc);
}
@@ -2753,17 +2822,22 @@ bxe_get_port_hwinfo(struct bxe_softc *sc)
* Get common hardware configuration.
*
* Multiple port devices such as the BCM57710 have configuration
- * information that is specific to each Ethernet port of the controller.
+ * information that is shared between all ports of the Ethernet
+ * controller. This function reads that configuration
+ * information from the bootcode's shared memory and saves it
+ * for future use.
*
* Returns:
- * None
+ * 0 = Success, !0 = Failure
*/
-static void
-bxe_get_common_hwinfo(struct bxe_softc *sc)
+static int
+bxe_hwinfo_common_get(struct bxe_softc *sc)
{
uint32_t val;
+ int rc;
DBENTER(BXE_VERBOSE_LOAD);
+ rc = 0;
/* Get the chip revision. */
sc->common.chip_id = sc->link_params.chip_id =
@@ -2806,10 +2880,12 @@ bxe_get_common_hwinfo(struct bxe_softc *sc)
(sc->common.shmem_base < 0xA0000) ||
(sc->common.shmem_base > 0xC0000)) {
- DBPRINT(sc, BXE_FATAL, "%s(): MCP is not active!\n",
- __FUNCTION__);
+ BXE_PRINTF("%s(%d): MCP is not active!\n",
+ __FILE__, __LINE__);
+ /* ToDo: Remove the NOMCP support. */
sc->bxe_flags |= BXE_NO_MCP_FLAG;
- goto bxe_get_common_hwinfo_exit;
+ rc = ENODEV;
+ goto bxe_hwinfo_common_get_exit;
}
/* Make sure the shared memory contents are valid. */
@@ -2818,7 +2894,8 @@ bxe_get_common_hwinfo(struct bxe_softc *sc)
(SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) {
BXE_PRINTF("%s(%d): Invalid NVRAM! Bad validity "
"signature.\n", __FILE__, __LINE__);
- goto bxe_get_common_hwinfo_exit;
+ rc = ENODEV;
+ goto bxe_hwinfo_common_get_exit;
}
/* Read the device configuration from shared memory. */
@@ -2854,11 +2931,13 @@ bxe_get_common_hwinfo(struct bxe_softc *sc)
BXE_PRINTF("%s(%d): Warning: This driver needs bootcode "
"0x%08X but found 0x%08X, please upgrade!\n",
__FILE__, __LINE__, MIN_BXE_BC_VER, sc->common.bc_ver);
- goto bxe_get_common_hwinfo_exit;
+ rc = ENODEV;
+ goto bxe_hwinfo_common_get_exit;
}
-bxe_get_common_hwinfo_exit:
+bxe_hwinfo_common_get_exit:
DBEXIT(BXE_VERBOSE_LOAD);
+ return (rc);
}
@@ -2979,51 +3058,45 @@ bxe_undi_unload(struct bxe_softc *sc)
* Stops the controller, resets the controller, and releases resources.
*
* Returns:
- * 0 on success, positive value on failure.
+ * 0 on success, !0 = failure.
*/
static int
bxe_detach(device_t dev)
{
struct bxe_softc *sc;
struct ifnet *ifp;
-#ifdef BXE_TASK
- struct bxe_fastpath *fp;
- int i;
-#endif
+ int rc;
sc = device_get_softc(dev);
- DBENTER(BXE_VERBOSE_RESET);
+ DBENTER(BXE_INFO_UNLOAD);
+
+ rc = 0;
ifp = sc->bxe_ifp;
if (ifp != NULL && ifp->if_vlantrunk != NULL) {
BXE_PRINTF("%s(%d): Cannot detach while VLANs are in use.\n",
__FILE__, __LINE__);
- return(EBUSY);
+ rc = EBUSY;
+ goto bxe_detach_exit;
}
/* Stop and reset the controller if it was open. */
if (sc->state != BXE_STATE_CLOSED) {
BXE_CORE_LOCK(sc);
- bxe_stop_locked(sc, UNLOAD_CLOSE);
+ rc = bxe_stop_locked(sc, UNLOAD_CLOSE);
BXE_CORE_UNLOCK(sc);
}
-#ifdef BXE_TASK
- /* Free the OS taskqueue resources. */
- for (i = 0; i < sc->num_queues; i++) {
- fp = &sc->fp[i];
+#ifdef BXE_DEBUG
+ /* Free memory buffer for grcdump output.*/
+ if (sc->grcdump_buffer != NULL)
+ free(sc->grcdump_buffer, M_TEMP);
+#endif
- if (fp->tq) {
- taskqueue_drain(fp->tq, &fp->task);
- taskqueue_free(fp->tq);
- }
- }
+ /* Clean-up any remaining interrupt resources. */
+ bxe_interrupt_detach(sc);
+ bxe_interrupt_free(sc);
- if (sc->tq) {
- taskqueue_drain(sc->tq, &sc->task);
- taskqueue_free(sc->tq);
- }
-#endif
/* Release the network interface. */
if (ifp != NULL)
ether_ifdetach(ifp);
@@ -3031,8 +3104,15 @@ bxe_detach(device_t dev)
/* Release all remaining resources. */
bxe_release_resources(sc);
+
+ /* Free all PCI resources. */
+ bxe_pci_resources_free(sc);
pci_disable_busmaster(dev);
+ bxe_mutexes_free(sc);
+
+bxe_detach_exit:
+ DBEXIT(BXE_INFO_UNLOAD);
return(0);
}
@@ -3079,9 +3159,8 @@ bxe_stop_leading(struct bxe_softc *sc)
uint16_t dsb_sp_prod_idx;
int rc, timeout;
- DBENTER(BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET | BXE_VERBOSE_RAMROD);
-
- DBPRINT(sc, BXE_VERBOSE_LOAD, "%s(): Stop client connection "
+ DBPRINT(sc, (BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET |
+ BXE_VERBOSE_UNLOAD), "%s(): Stop client connection "
"on fp[00].\n", __FUNCTION__);
/* Send the ETH_HALT ramrod. */
@@ -3089,26 +3168,24 @@ bxe_stop_leading(struct bxe_softc *sc)
bxe_sp_post(sc,RAMROD_CMD_ID_ETH_HALT, 0, 0, sc->fp[0].cl_id, 0);
/* Poll for the ETH_HALT ramrod on the leading connection. */
- rc = bxe_wait_ramrod(sc, BXE_FP_STATE_HALTED, 0, &(sc->fp[0].state), 1);
- if (rc)
+ rc = bxe_wait_ramrod(sc, BXE_FP_STATE_HALTED,
+ 0, &(sc->fp[0].state), 1);
+ if (rc) {
+ DBPRINT(sc, BXE_FATAL, "%s(): Timeout waiting for "
+ "STATE_HALTED ramrod completion!\n", __FUNCTION__);
goto bxe_stop_leading_exit;
+ }
+ /* Get the default status block SP producer index. */
dsb_sp_prod_idx = *sc->dsb_sp_prod;
- /*
- * Now that the connection is in the
- * HALTED state send PORT_DELETE ramrod.
- */
+ /* After HALT we send PORT_DELETE ramrod. */
bxe_sp_post(sc, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
- /*
- * Wait for completion. This can take a * long time if the other port
- * is busy. Give the command some time to complete but don't wait for a
- * completion since there's nothing we can do.
- */
+ /* Be patient but don't wait forever. */
timeout = 500;
while (dsb_sp_prod_idx == *sc->dsb_sp_prod) {
- if (!timeout) {
+ if (timeout == 0) {
DBPRINT(sc, BXE_FATAL, "%s(): Timeout waiting for "
"PORT_DEL ramrod completion!\n", __FUNCTION__);
rc = EBUSY;
@@ -3124,8 +3201,7 @@ bxe_stop_leading(struct bxe_softc *sc)
sc->fp[0].state = BXE_FP_STATE_CLOSED;
bxe_stop_leading_exit:
- DBEXIT(BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET | BXE_VERBOSE_RAMROD);
- return(rc);
+ return (rc);
}
/*
@@ -3140,9 +3216,8 @@ bxe_setup_multi(struct bxe_softc *sc, int index)
struct bxe_fastpath *fp;
int rc;
- DBENTER(BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET | BXE_VERBOSE_RAMROD);
-
- DBPRINT(sc, BXE_VERBOSE_LOAD, "%s(): Setup client connection "
+ DBPRINT(sc, (BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET |
+ BXE_VERBOSE_UNLOAD), "%s(): Setup client connection "
"on fp[%02d].\n", __FUNCTION__, index);
fp = &sc->fp[index];
@@ -3154,10 +3229,9 @@ bxe_setup_multi(struct bxe_softc *sc, int index)
bxe_sp_post(sc, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0, fp->cl_id, 0);
/* Wait for the ramrod to complete. */
- rc = bxe_wait_ramrod(sc, BXE_FP_STATE_OPEN, index, &(fp->state), 1);
+ rc = bxe_wait_ramrod(sc, BXE_FP_STATE_OPEN, index, &fp->state, 1);
- DBEXIT(BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET | BXE_VERBOSE_RAMROD);
- return(rc);
+ return (rc);
}
/*
@@ -3175,9 +3249,8 @@ bxe_stop_multi(struct bxe_softc *sc, int index)
struct bxe_fastpath *fp;
int rc;
- DBENTER(BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET | BXE_VERBOSE_RAMROD);
-
- DBPRINT(sc, BXE_VERBOSE_LOAD, "%s(): Stop client connection "
+ DBPRINT(sc, (BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET |
+ BXE_VERBOSE_UNLOAD), "%s(): Stop client connection "
"on fp[%02d].\n", __FUNCTION__, index);
fp = &sc->fp[index];
@@ -3186,8 +3259,8 @@ bxe_stop_multi(struct bxe_softc *sc, int index)
fp->state = BXE_FP_STATE_HALTING;
bxe_sp_post(sc, RAMROD_CMD_ID_ETH_HALT, index, 0, fp->cl_id, 0);
- /* Wait for the ramrod completion. */
- rc = bxe_wait_ramrod(sc, BXE_FP_STATE_HALTED, index, &(fp->state), 1);
+ /* Wait for the HALT ramrod completion. */
+ rc = bxe_wait_ramrod(sc, BXE_FP_STATE_HALTED, index, &fp->state, 1);
if (rc){
BXE_PRINTF("%s(%d): fp[%02d] client ramrod halt failed!\n",
__FILE__, __LINE__, index);
@@ -3196,12 +3269,11 @@ bxe_stop_multi(struct bxe_softc *sc, int index)
/* Delete the CFC entry. */
bxe_sp_post(sc, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
- /* Poll for the ramrod completion. */
- rc = bxe_wait_ramrod(sc, BXE_FP_STATE_CLOSED, index, &(fp->state), 1);
+ /* Poll for the DELETE ramrod completion. */
+ rc = bxe_wait_ramrod(sc, BXE_FP_STATE_CLOSED, index, &fp->state, 1);
bxe_stop_multi_exit:
- DBEXIT(BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET | BXE_VERBOSE_RAMROD);
- return(rc);
+ return (rc);
}
/*
@@ -3268,7 +3340,7 @@ bxe__link_reset(struct bxe_softc *sc)
{
DBENTER(BXE_VERBOSE_PHY);
- if (!BP_NOMCP(sc)) {
+ if (!NOMCP(sc)) {
bxe_acquire_phy_lock(sc);
bxe_link_reset(&sc->link_params, &sc->link_vars, 1);
bxe_release_phy_lock(sc);
@@ -3285,7 +3357,7 @@ bxe__link_reset(struct bxe_softc *sc)
* Stop the controller.
*
* Returns:
- * Nothing.
+ * 0 = Success, !0 = Failure
*/
static int
bxe_stop_locked(struct bxe_softc *sc, int unload_mode)
@@ -3298,18 +3370,20 @@ bxe_stop_locked(struct bxe_softc *sc, int unload_mode)
uint8_t entry, *mac_addr;
int count, i, port, rc;
- DBENTER(BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET | BXE_VERBOSE_UNLOAD);
+ DBENTER(BXE_INFO_LOAD | BXE_INFO_RESET | BXE_INFO_UNLOAD);
+
ifp = sc->bxe_ifp;
port = BP_PORT(sc),
- reset_code = 0;
- rc = 0;
+ rc = reset_code = 0;
+
+ BXE_CORE_LOCK_ASSERT(sc);
/* Stop the periodic tick. */
callout_stop(&sc->bxe_tick_callout);
sc->state = BXE_STATE_CLOSING_WAIT4_HALT;
- /* Stop receiving all types of Ethernet traffic. */
+ /* Prevent any further RX traffic. */
sc->rx_mode = BXE_RX_MODE_NONE;
bxe_set_storm_rx_mode(sc);
@@ -3320,6 +3394,7 @@ bxe_stop_locked(struct bxe_softc *sc, int unload_mode)
/* Tell the bootcode to stop watching for a heartbeat. */
SHMEM_WR(sc, func_mb[BP_FUNC(sc)].drv_pulse_mb,
(DRV_PULSE_ALWAYS_ALIVE | sc->fw_drv_pulse_wr_seq));
+
/* Stop the statistics updates. */
bxe_stats_handle(sc, STATS_EVENT_STOP);
@@ -3327,6 +3402,9 @@ bxe_stop_locked(struct bxe_softc *sc, int unload_mode)
for (i = 0; i < sc->num_queues; i++) {
fp = &sc->fp[i];
+ if (fp == NULL || fp->tx_pkt_cons_sb == NULL)
+ break;
+
count = 1000;
while (bxe_has_tx_work(fp)) {
@@ -3334,7 +3412,7 @@ bxe_stop_locked(struct bxe_softc *sc, int unload_mode)
if (count == 0) {
BXE_PRINTF(
- "%s(%d): Timeout wating for fp[%d] transmits to complete!\n",
+ "%s(%d): Timeout wating for fp[%02d] transmits to complete!\n",
__FILE__, __LINE__, i);
break;
}
@@ -3351,8 +3429,8 @@ bxe_stop_locked(struct bxe_softc *sc, int unload_mode)
/* Disable Interrupts */
bxe_int_disable(sc);
-
DELAY(1000);
+
/* Clear the MAC addresses. */
if (CHIP_IS_E1(sc)) {
config = BXE_SP(sc, mcast_config);
@@ -3376,8 +3454,10 @@ bxe_stop_locked(struct bxe_softc *sc, int unload_mode)
REG_WR(sc, MC_HASH_OFFSET(sc, i), 0);
REG_WR(sc, MISC_REG_E1HMF_MODE, 0);
}
+
/* Determine if any WoL settings needed. */
if (unload_mode == UNLOAD_NORMAL)
+ /* Driver initiatied WoL is disabled. */
reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
else if (sc->bxe_flags & BXE_NO_WOL_FLAG) {
/* Driver initiated WoL is disabled, use OOB WoL settings. */
@@ -3398,38 +3478,29 @@ bxe_stop_locked(struct bxe_softc *sc, int unload_mode)
/* Prevent WoL. */
reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
}
+
/* Stop all non-leading client connections. */
for (i = 1; i < sc->num_queues; i++) {
if (bxe_stop_multi(sc, i)){
goto bxe_stop_locked_exit;
}
}
+
/* Stop the leading client connection. */
rc = bxe_stop_leading(sc);
- if (rc) {
-#ifdef BXE_DEBUG
- if ((sc->state != BXE_STATE_CLOSING_WAIT4_UNLOAD) ||
- (sc->fp[0].state != BXE_FP_STATE_CLOSED)) {
- BXE_PRINTF("%s(%d): Failed to close leading "
- "client connection!\n", __FILE__, __LINE__);
- }
-#endif
- }
-
DELAY(10000);
bxe_stop_locked_exit:
-
- if (BP_NOMCP(sc)) {
+ if (NOMCP(sc)) {
DBPRINT(sc, BXE_INFO,
- "%s(): Old No MCP load counts: %d, %d, %d\n", __FUNCTION__,
- load_count[0], load_count[1], load_count[2]);
+ "%s(): Old No MCP load counts: %d, %d, %d\n",
+ __FUNCTION__, load_count[0], load_count[1], load_count[2]);
load_count[0]--;
load_count[1 + port]--;
DBPRINT(sc, BXE_INFO,
- "%s(): New No MCP load counts: %d, %d, %d\n", __FUNCTION__,
- load_count[0], load_count[1], load_count[2]);
+ "%s(): New No MCP load counts: %d, %d, %d\n",
+ __FUNCTION__, load_count[0], load_count[1], load_count[2]);
if (load_count[0] == 0)
reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
@@ -3454,32 +3525,31 @@ bxe_stop_locked_exit:
DELAY(10000);
/* Report UNLOAD_DONE to MCP */
- if (!BP_NOMCP(sc))
+ if (!NOMCP(sc))
bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_DONE);
sc->port.pmf = 0;
/* Free RX chains and buffers. */
- bxe_free_rx_chains(sc);
+ bxe_clear_rx_chains(sc);
/* Free TX chains and buffers. */
- bxe_free_tx_chains(sc);
+ bxe_clear_tx_chains(sc);
sc->state = BXE_STATE_CLOSED;
bxe_ack_int(sc);
- DBEXIT(BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET |BXE_VERBOSE_UNLOAD);
- return(rc);
+ DBEXIT(BXE_INFO_LOAD | BXE_INFO_RESET |BXE_INFO_UNLOAD);
+ return (rc);
}
-
/*
* Device shutdown function.
*
* Stops and resets the controller.
*
* Returns:
- * Nothing
+ * 0 = Success, !0 = Failure
*/
static int
bxe_shutdown(device_t dev)
@@ -3487,13 +3557,13 @@ bxe_shutdown(device_t dev)
struct bxe_softc *sc;
sc = device_get_softc(dev);
- DBENTER(BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET | BXE_VERBOSE_UNLOAD);
+ DBENTER(BXE_INFO_LOAD | BXE_INFO_RESET | BXE_INFO_UNLOAD);
BXE_CORE_LOCK(sc);
bxe_stop_locked(sc, UNLOAD_NORMAL);
BXE_CORE_UNLOCK(sc);
- DBEXIT(BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET | BXE_VERBOSE_UNLOAD);
+ DBEXIT(BXE_INFO_LOAD | BXE_INFO_RESET | BXE_INFO_UNLOAD);
return (0);
}
@@ -3571,7 +3641,9 @@ bxe__link_status_update(struct bxe_softc *sc)
bxe_stats_handle(sc, STATS_EVENT_LINK_UP);
else
bxe_stats_handle(sc, STATS_EVENT_STOP);
+
bxe_read_mf_cfg(sc);
+
/* Indicate link status. */
bxe_link_report(sc);
@@ -3630,7 +3702,7 @@ bxe_initial_phy_init(struct bxe_softc *sc)
DBENTER(BXE_VERBOSE_PHY);
rc = 0;
- if (!BP_NOMCP(sc)) {
+ if (!NOMCP(sc)) {
/*
* It is recommended to turn off RX flow control for 5771x
@@ -3646,6 +3718,7 @@ bxe_initial_phy_init(struct bxe_softc *sc)
bxe_release_phy_lock(sc);
bxe_calc_fc_adv(sc);
+
if (sc->link_vars.link_up) {
bxe_stats_handle(sc,STATS_EVENT_LINK_UP);
bxe_link_report(sc);
@@ -3673,9 +3746,10 @@ static int
bxe_alloc_buf_rings(struct bxe_softc *sc)
{
struct bxe_fastpath *fp;
- int i, rc = 0;
+ int i, rc;
DBENTER(BXE_VERBOSE_LOAD);
+ rc = 0;
for (i = 0; i < sc->num_queues; i++) {
fp = &sc->fp[i];
@@ -3685,14 +3759,15 @@ bxe_alloc_buf_rings(struct bxe_softc *sc)
M_DEVBUF, M_DONTWAIT, &fp->mtx);
if (fp->br == NULL) {
rc = ENOMEM;
- return(rc);
+ goto bxe_alloc_buf_rings_exit;
}
} else
BXE_PRINTF("%s(%d): Bug!\n", __FILE__, __LINE__);
}
+bxe_alloc_buf_rings_exit:
DBEXIT(BXE_VERBOSE_LOAD);
- return(rc);
+ return (rc);
}
/*
@@ -3737,9 +3812,9 @@ bxe_init_locked(struct bxe_softc *sc, int load_mode)
{
struct ifnet *ifp;
uint32_t load_code;
- int i, port;
+ int error, i, port;
- DBENTER(BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET);
+ DBENTER(BXE_INFO_LOAD | BXE_INFO_RESET);
BXE_CORE_LOCK_ASSERT(sc);
@@ -3753,7 +3828,7 @@ bxe_init_locked(struct bxe_softc *sc, int load_mode)
/* Check if the driver is still running and bail out if it is. */
if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
- DBPRINT(sc, BXE_INFO,
+ DBPRINT(sc, BXE_WARN,
"%s(): Init called while driver is running!\n",
__FUNCTION__);
goto bxe_init_locked_exit;
@@ -3770,7 +3845,7 @@ bxe_init_locked(struct bxe_softc *sc, int load_mode)
*/
sc->state = BXE_STATE_OPENING_WAIT4_LOAD;
- if (BP_NOMCP(sc)) {
+ if (NOMCP(sc)) {
port = BP_PORT(sc);
DBPRINT(sc, BXE_INFO,
@@ -3817,7 +3892,8 @@ bxe_init_locked(struct bxe_softc *sc, int load_mode)
sc->intr_sem = 1;
/* Initialize hardware. */
- if (bxe_init_hw(sc, load_code)){
+ error = bxe_init_hw(sc, load_code);
+ if (error != 0){
BXE_PRINTF("%s(%d): Hardware initialization failed, "
"aborting!\n", __FILE__, __LINE__);
goto bxe_init_locked_failed1;
@@ -3826,6 +3902,7 @@ bxe_init_locked(struct bxe_softc *sc, int load_mode)
/* Calculate and save the Ethernet MTU size. */
sc->port.ether_mtu = ifp->if_mtu + ETHER_HDR_LEN +
(ETHER_VLAN_ENCAP_LEN * 2) + ETHER_CRC_LEN + 4;
+
DBPRINT(sc, BXE_INFO, "%s(): Setting MTU = %d\n",
__FUNCTION__, sc->port.ether_mtu);
@@ -3836,12 +3913,18 @@ bxe_init_locked(struct bxe_softc *sc, int load_mode)
sc->mbuf_alloc_size = PAGE_SIZE;
else
sc->mbuf_alloc_size = MJUM9BYTES;
+
DBPRINT(sc, BXE_INFO, "%s(): mbuf_alloc_size = %d, "
"max_frame_size = %d\n", __FUNCTION__,
sc->mbuf_alloc_size, sc->port.ether_mtu);
/* Setup NIC internals and enable interrupts. */
- bxe_init_nic(sc, load_code);
+ error = bxe_init_nic(sc, load_code);
+ if (error != 0) {
+ BXE_PRINTF("%s(%d): NIC initialization failed, "
+ "aborting!\n", __FILE__, __LINE__);
+ goto bxe_init_locked_failed1;
+ }
if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) &&
(sc->common.shmem2_base)){
@@ -3855,7 +3938,8 @@ bxe_init_locked(struct bxe_softc *sc, int load_mode)
#if __FreeBSD_version >= 800000
/* Allocate buffer rings for multiqueue operation. */
- if (bxe_alloc_buf_rings(sc)) {
+ error = bxe_alloc_buf_rings(sc);
+ if (error != 0) {
BXE_PRINTF("%s(%d): Buffer ring initialization failed, "
"aborting!\n", __FILE__, __LINE__);
goto bxe_init_locked_failed1;
@@ -3863,7 +3947,7 @@ bxe_init_locked(struct bxe_softc *sc, int load_mode)
#endif
/* Tell MCP that driver load is done. */
- if (!BP_NOMCP(sc)) {
+ if (!NOMCP(sc)) {
load_code = bxe_fw_command(sc, DRV_MSG_CODE_LOAD_DONE);
if (!load_code) {
BXE_PRINTF("%s(%d): Driver load failed! No MCP "
@@ -3878,10 +3962,12 @@ bxe_init_locked(struct bxe_softc *sc, int load_mode)
sc->intr_sem = 0;
/* Setup the leading connection for the controller. */
- if (bxe_setup_leading(sc))
+ error = bxe_setup_leading(sc);
+ if (error != 0) {
DBPRINT(sc, BXE_FATAL, "%s(): Initial PORT_SETUP ramrod "
"failed. State is not OPEN!\n", __FUNCTION__);
-
+ goto bxe_init_locked_failed3;
+ }
if (CHIP_IS_E1H(sc)) {
if (sc->mf_config[BP_E1HVN(sc)] & FUNC_MF_CFG_FUNC_DISABLED) {
@@ -3917,7 +4003,6 @@ bxe_init_locked(struct bxe_softc *sc, int load_mode)
else
bxe_set_mac_addr_e1h(sc, 1);
-
DELAY(1000);
/* Perform PHY initialization for the primary port. */
@@ -3950,7 +4035,7 @@ bxe_init_locked(struct bxe_softc *sc, int load_mode)
bxe__link_status_update(sc);
DELAY(1000);
- /* Tell the stack the driver is running and the TX queue is open. */
+ /* Tell the stack the driver is running. */
ifp->if_drv_flags = IFF_DRV_RUNNING;
/* Schedule our periodic timer tick. */
@@ -3958,23 +4043,20 @@ bxe_init_locked(struct bxe_softc *sc, int load_mode)
/* Everything went OK, go ahead and exit. */
goto bxe_init_locked_exit;
- /* Try and gracefully shutdown the device because of a failure. */
bxe_init_locked_failed4:
-
+ /* Try and gracefully shutdown the device because of a failure. */
for (i = 1; i < sc->num_queues; i++)
bxe_stop_multi(sc, i);
+bxe_init_locked_failed3:
bxe_stop_leading(sc);
-
bxe_stats_handle(sc, STATS_EVENT_STOP);
bxe_init_locked_failed2:
-
bxe_int_disable(sc);
bxe_init_locked_failed1:
-
- if (!BP_NOMCP(sc)) {
+ if (!NOMCP(sc)) {
bxe_fw_command(sc, DRV_MSG_CODE_LOAD_DONE);
bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_DONE);
@@ -3985,11 +4067,10 @@ bxe_init_locked_failed1:
bxe_free_buf_rings(sc);
#endif
- DBPRINT(sc, BXE_INFO, "%s(): Initialization failed!\n", __FUNCTION__);
+ DBPRINT(sc, BXE_WARN, "%s(): Initialization failed!\n", __FUNCTION__);
bxe_init_locked_exit:
-
- DBEXIT(BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET);
+ DBEXIT(BXE_INFO_LOAD | BXE_INFO_RESET);
}
/*
@@ -4039,7 +4120,7 @@ bxe_wait_ramrod(struct bxe_softc *sc, int state, int idx, int *state_p,
}
/* We timed out polling for a completion. */
- DBPRINT(sc, BXE_FATAL, "%s(): Timeout %s for state 0x%08X on fp[%d]. "
+ DBPRINT(sc, BXE_FATAL, "%s(): Timeout %s for state 0x%08X on fp[%02d]. "
"Got 0x%x instead\n", __FUNCTION__, poll ? "polling" : "waiting",
state, idx, *state_p);
@@ -4060,7 +4141,7 @@ bxe_write_dmae_phys_len(struct bxe_softc *sc, bus_addr_t phys_addr,
uint32_t addr, uint32_t len)
{
int dmae_wr_max, offset;
- DBENTER(BXE_VERBOSE_LOAD);
+ DBENTER(BXE_INSANE_REGS);
dmae_wr_max = DMAE_LEN32_WR_MAX(sc);
offset = 0;
@@ -4071,7 +4152,7 @@ bxe_write_dmae_phys_len(struct bxe_softc *sc, bus_addr_t phys_addr,
len -= dmae_wr_max;
}
bxe_write_dmae(sc, phys_addr + offset, addr + offset, len);
- DBEXIT(BXE_VERBOSE_LOAD);
+ DBEXIT(BXE_INSANE_REGS);
}
@@ -4119,17 +4200,17 @@ bxe_init_ind_wr(struct bxe_softc *sc, uint32_t addr, const uint32_t *data,
static void
bxe_write_big_buf(struct bxe_softc *sc, uint32_t addr, uint32_t len)
{
- DBENTER(BXE_VERBOSE_LOAD);
-#ifdef USE_DMAE
+ DBENTER(BXE_INSANE_REGS);
+#ifdef BXE_USE_DMAE
if (sc->dmae_ready)
- bxe_write_dmae_phys_len(sc, sc->gunzip_mapping, addr, len);
+ bxe_write_dmae_phys_len(sc, sc->gz_dma.paddr, addr, len);
else
- bxe_init_str_wr(sc, addr, sc->gunzip_buf, len);
+ bxe_init_str_wr(sc, addr, sc->gz, len);
#else
- bxe_init_str_wr(sc, addr, sc->gunzip_buf, len);
+ bxe_init_str_wr(sc, addr, sc->gz, len);
#endif
- DBEXIT(BXE_VERBOSE_LOAD);
+ DBEXIT(BXE_INSANE_REGS);
}
/*
@@ -4148,9 +4229,9 @@ bxe_init_fill(struct bxe_softc *sc, uint32_t addr, int fill, uint32_t len)
DBENTER(BXE_VERBOSE_LOAD);
- length = (((len * 4) > FW_BUF_SIZE) ? FW_BUF_SIZE : (len * 4));
+ length = (((len * 4) > BXE_FW_BUF_SIZE) ? BXE_FW_BUF_SIZE : (len * 4));
leftovers = length / 4;
- memset(sc->gunzip_buf, fill, length);
+ memset(sc->gz, fill, length);
for (i = 0; i < len; i += leftovers) {
cur_len = min(leftovers, len - i);
@@ -4173,13 +4254,15 @@ bxe_init_wr_64(struct bxe_softc *sc, uint32_t addr, const uint32_t *data,
uint32_t buf_len32, cur_len, len;
int i;
- buf_len32 = FW_BUF_SIZE / 4;
+ DBENTER(BXE_INSANE_REGS);
+
+ buf_len32 = BXE_FW_BUF_SIZE / 4;
len = len64 * 2;
/* 64 bit value is in a blob: first low DWORD, then high DWORD. */
data64 = HILO_U64((*(data + 1)), (*data));
- len64 = min((uint32_t)(FW_BUF_SIZE / 8), len64);
+ len64 = min((uint32_t)(BXE_FW_BUF_SIZE / 8), len64);
for (i = 0; i < len64; i++) {
- pdata = ((uint64_t *)(sc->gunzip_buf)) + i;
+ pdata = ((uint64_t *)(sc->gz)) + i;
*pdata = data64;
}
@@ -4187,6 +4270,8 @@ bxe_init_wr_64(struct bxe_softc *sc, uint32_t addr, const uint32_t *data,
cur_len = min(buf_len32, len - i);
bxe_write_big_buf(sc, addr + i*4, cur_len);
}
+
+ DBEXIT(BXE_INSANE_REGS);
}
@@ -4247,15 +4332,15 @@ static void
bxe_write_big_buf_wb(struct bxe_softc *sc, uint32_t addr, uint32_t len)
{
if (sc->dmae_ready)
- bxe_write_dmae_phys_len(sc, sc->gunzip_mapping, addr, len);
+ bxe_write_dmae_phys_len(sc, sc->gz_dma.paddr, addr, len);
else
- bxe_init_ind_wr(sc, addr, sc->gunzip_buf, len);
+ bxe_init_ind_wr(sc, addr, sc->gz, len);
}
#define VIRT_WR_DMAE_LEN(sc, data, addr, len32, le32_swap) \
do { \
- memcpy(sc->gunzip_buf, data, (len32)*4); \
+ memcpy(sc->gz, data, (len32)*4); \
bxe_write_big_buf_wb(sc, addr, len32); \
} while (0)
@@ -4271,7 +4356,7 @@ bxe_init_wr_wb(struct bxe_softc *sc, uint32_t addr, const uint32_t *data,
{
const uint32_t *old_data;
- DBENTER(BXE_VERBOSE_LOAD);
+ DBENTER(BXE_INSANE_REGS);
old_data = data;
data = (const uint32_t *)bxe_sel_blob(sc, addr, (const uint8_t *)data);
if (sc->dmae_ready) {
@@ -4282,7 +4367,7 @@ bxe_init_wr_wb(struct bxe_softc *sc, uint32_t addr, const uint32_t *data,
} else
bxe_init_ind_wr(sc, addr, data, len);
- DBEXIT(BXE_VERBOSE_LOAD);
+ DBEXIT(BXE_INSANE_REGS);
}
static void
@@ -4316,6 +4401,8 @@ bxe_init_block(struct bxe_softc *sc, uint32_t block, uint32_t stage)
uint16_t op_end, op_start;
int hw_wr;
+ DBENTER(BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET);
+
op_start = INIT_OPS_OFFSETS(sc)[BLOCK_OPS_IDX(block, stage,
STAGE_START)];
op_end = INIT_OPS_OFFSETS(sc)[BLOCK_OPS_IDX(block, stage, STAGE_END)];
@@ -4370,11 +4457,14 @@ bxe_init_block(struct bxe_softc *sc, uint32_t block, uint32_t stage)
break;
}
}
+
+ DBEXIT(BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET);
}
/*
* Handles controller initialization when called from an unlocked routine.
* ifconfig calls this function.
+ *
* Returns:
* None.
*/
@@ -4384,16 +4474,12 @@ bxe_init(void *xsc)
struct bxe_softc *sc;
sc = xsc;
- DBENTER(BXE_VERBOSE_RESET | BXE_VERBOSE_UNLOAD);
BXE_CORE_LOCK(sc);
bxe_init_locked(sc, LOAD_NORMAL);
BXE_CORE_UNLOCK(sc);
-
- DBEXIT(BXE_VERBOSE_RESET | BXE_VERBOSE_UNLOAD);
}
-
/*
* Release all resources used by the driver.
*
@@ -4407,7 +4493,6 @@ static void
bxe_release_resources(struct bxe_softc *sc)
{
device_t dev;
- int i;
DBENTER(BXE_VERBOSE_RESET | BXE_VERBOSE_UNLOAD);
@@ -4417,57 +4502,14 @@ bxe_release_resources(struct bxe_softc *sc)
if (sc->bxe_ifp != NULL)
if_free(sc->bxe_ifp);
- /* Release interrupt resources. */
- bxe_interrupt_detach(sc);
-
- if ((sc->bxe_flags & BXE_USING_MSIX_FLAG) && sc->msix_count) {
-
- for (i = 0; i < sc->msix_count; i++) {
- DBPRINT(sc, (BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET |
- BXE_VERBOSE_INTR), "%s(): Releasing MSI-X[%d] "
- "vector.\n", __FUNCTION__, i);
- if (sc->bxe_msix_res[i] && sc->bxe_msix_rid[i])
- bus_release_resource(dev, SYS_RES_IRQ,
- sc->bxe_msix_rid[i], sc->bxe_msix_res[i]);
- }
-
- pci_release_msi(dev);
-
- } else if ((sc->bxe_flags & BXE_USING_MSI_FLAG) && sc->msi_count) {
-
- for (i = 0; i < sc->msi_count; i++) {
- DBPRINT(sc, (BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET |
- BXE_VERBOSE_INTR), "%s(): Releasing MSI[%d] "
- "vector.\n", __FUNCTION__, i);
- if (sc->bxe_msi_res[i] && sc->bxe_msi_rid[i])
- bus_release_resource(dev, SYS_RES_IRQ,
- sc->bxe_msi_rid[i], sc->bxe_msi_res[i]);
- }
-
- pci_release_msi(dev);
-
- } else {
-
- DBPRINT(sc, (BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET |
- BXE_VERBOSE_INTR), "%s(): Releasing legacy interrupt.\n",
- __FUNCTION__);
- if (sc->bxe_irq_res != NULL)
- bus_release_resource(dev, SYS_RES_IRQ,
- sc->bxe_irq_rid, sc->bxe_irq_res);
- }
-
/* Free the DMA resources. */
- bxe_dma_free(sc);
-
- bxe_release_pci_resources(sc);
+ bxe_host_structures_free(sc);
#if __FreeBSD_version >= 800000
/* Free multiqueue buffer rings. */
bxe_free_buf_rings(sc);
#endif
- /* Free remaining fastpath resources. */
- bxe_free_mutexes(sc);
}
@@ -4484,7 +4526,7 @@ bxe_release_resources(struct bxe_softc *sc)
static void
bxe_reg_wr_ind(struct bxe_softc *sc, uint32_t offset, uint32_t val)
{
- DBPRINT(sc, BXE_INSANE, "%s(); offset = 0x%08X, val = 0x%08X\n",
+ DBPRINT(sc, BXE_INSANE_REGS, "%s(); offset = 0x%08X, val = 0x%08X\n",
__FUNCTION__, offset, val);
pci_write_config(sc->dev, PCICFG_GRC_ADDRESS, offset, 4);
@@ -4518,7 +4560,7 @@ bxe_reg_rd_ind(struct bxe_softc *sc, uint32_t offset)
pci_write_config(sc->dev, PCICFG_GRC_ADDRESS,
PCICFG_VENDOR_ID_OFFSET, 4);
- DBPRINT(sc, BXE_INSANE, "%s(); offset = 0x%08X, val = 0x%08X\n",
+ DBPRINT(sc, BXE_INSANE_REGS, "%s(); offset = 0x%08X, val = 0x%08X\n",
__FUNCTION__, offset, val);
return (val);
}
@@ -4548,7 +4590,7 @@ bxe_post_dmae(struct bxe_softc *sc, struct dmae_command *dmae, int idx)
for (i = 0; i < (sizeof(struct dmae_command) / 4); i++) {
REG_WR(sc, cmd_offset + i * 4, *(((uint32_t *)dmae) + i));
- DBPRINT(sc, BXE_INSANE, "%s(): DMAE cmd[%d].%d : 0x%08X\n",
+ DBPRINT(sc, BXE_INSANE_REGS, "%s(): DMAE cmd[%d].%d : 0x%08X\n",
__FUNCTION__, idx, i, cmd_offset + i * 4);
}
@@ -4666,7 +4708,7 @@ bxe_read_dmae(struct bxe_softc *sc, uint32_t src_addr,
uint32_t *data, *wb_comp;
int i, timeout;
- DBENTER(BXE_INSANE);
+ DBENTER(BXE_INSANE_REGS);
wb_comp = BXE_SP(sc, wb_comp);
/* Fall back to indirect access if DMAE is not ready. */
@@ -4728,7 +4770,7 @@ bxe_read_dmae(struct bxe_softc *sc, uint32_t src_addr,
BXE_DMAE_UNLOCK(sc);
bxe_read_dmae_exit:
- DBEXIT(BXE_INSANE);
+ DBEXIT(BXE_INSANE_REGS);
}
/*
@@ -4962,7 +5004,7 @@ bxe_int_enable(struct bxe_softc *sc)
port = BP_PORT(sc);
hc_addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
val = REG_RD(sc, hc_addr);
- if (sc->bxe_flags & BXE_USING_MSIX_FLAG) {
+ if (sc->msix_count > 0) {
if (sc->msix_count == 1) {
/* Single interrupt, multiple queues.*/
@@ -4993,7 +5035,7 @@ bxe_int_enable(struct bxe_softc *sc)
HC_CONFIG_0_REG_ATTN_BIT_EN_0);
}
- } else if (sc->bxe_flags & BXE_USING_MSI_FLAG) {
+ } else if (sc->msi_count > 0) {
if (sc->msi_count == 1) {
@@ -5080,7 +5122,7 @@ bxe_int_disable(struct bxe_softc *sc)
uint32_t hc_addr, val;
int port;
- DBENTER(BXE_VERBOSE_INTR);
+ DBENTER(BXE_VERBOSE_INTR | BXE_VERBOSE_RESET | BXE_VERBOSE_UNLOAD);
port = BP_PORT(sc);
hc_addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
@@ -5097,7 +5139,7 @@ bxe_int_disable(struct bxe_softc *sc)
__FUNCTION__, val);
}
- DBEXIT(BXE_VERBOSE_INTR);
+ DBEXIT(BXE_VERBOSE_INTR | BXE_VERBOSE_RESET | BXE_VERBOSE_UNLOAD);
}
#define BXE_CRC32_RESIDUAL 0xdebb20e3
@@ -5592,10 +5634,6 @@ bxe_ack_sb(struct bxe_softc *sc, uint8_t sb_id, uint8_t storm, uint16_t index,
struct igu_ack_register igu_ack;
uint32_t hc_addr;
- DBPRINT(sc, BXE_VERBOSE_INTR, "%s(): sb_id = %d, storm = %d, "
- "index = %d, int_mode = %d, update = %d.\n", __FUNCTION__, sb_id,
- storm, index, int_mode, update);
-
hc_addr = (HC_REG_COMMAND_REG + BP_PORT(sc) * 32 + COMMAND_REG_INT_ACK);
igu_ack.status_block_index = index;
igu_ack.sb_id_and_flags =
@@ -5605,11 +5643,6 @@ bxe_ack_sb(struct bxe_softc *sc, uint8_t sb_id, uint8_t storm, uint16_t index,
(int_mode << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
rmb();
-
- DBPRINT(sc, BXE_VERBOSE_INTR,
- "%s(): Writing 0x%08X to HC addr 0x%08X\n", __FUNCTION__,
- (*(uint32_t *) &igu_ack), hc_addr);
-
REG_WR(sc, hc_addr, (*(uint32_t *) &igu_ack));
wmb();
}
@@ -5618,7 +5651,8 @@ bxe_ack_sb(struct bxe_softc *sc, uint8_t sb_id, uint8_t storm, uint16_t index,
* Update fastpath status block index.
*
* Returns:
- * 0
+ * 0 = Nu completes, 1 = TX completes, 2 = RX completes,
+ * 3 = RX & TX completes
*/
static __inline uint16_t
bxe_update_fpsb_idx(struct bxe_fastpath *fp)
@@ -5686,7 +5720,7 @@ bxe_sp_event(struct bxe_fastpath *fp, union eth_rx_cqe *rr_cqe)
cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
DBPRINT(sc, BXE_VERBOSE_RAMROD, "%s(): CID = %d, ramrod command = %d, "
- "device state = 0x%08X, fp[%d].state = 0x%08X, type = %d\n",
+ "device state = 0x%08X, fp[%02d].state = 0x%08X, type = %d\n",
__FUNCTION__, cid, command, sc->state, fp->index, fp->state,
rr_cqe->ramrod_cqe.ramrod_type);
@@ -5699,13 +5733,13 @@ bxe_sp_event(struct bxe_fastpath *fp, union eth_rx_cqe *rr_cqe)
switch (command | fp->state) {
case (RAMROD_CMD_ID_ETH_CLIENT_SETUP | BXE_FP_STATE_OPENING):
DBPRINT(sc, BXE_VERBOSE_RAMROD,
- "%s(): Completed fp[%d] CLIENT_SETUP Ramrod.\n",
+ "%s(): Completed fp[%02d] CLIENT_SETUP Ramrod.\n",
__FUNCTION__, cid);
fp->state = BXE_FP_STATE_OPEN;
break;
case (RAMROD_CMD_ID_ETH_HALT | BXE_FP_STATE_HALTING):
DBPRINT(sc, BXE_VERBOSE_RAMROD,
- "%s(): Completed fp[%d] ETH_HALT ramrod\n",
+ "%s(): Completed fp[%02d] ETH_HALT ramrod\n",
__FUNCTION__, cid);
fp->state = BXE_FP_STATE_HALTED;
break;
@@ -5734,7 +5768,7 @@ bxe_sp_event(struct bxe_fastpath *fp, union eth_rx_cqe *rr_cqe)
break;
case (RAMROD_CMD_ID_ETH_CFC_DEL | BXE_STATE_CLOSING_WAIT4_HALT):
DBPRINT(sc, BXE_VERBOSE_RAMROD,
- "%s(): Completed fp[%d] ETH_CFC_DEL ramrod.\n",
+ "%s(): Completed fp[%02d] ETH_CFC_DEL ramrod.\n",
__FUNCTION__, cid);
sc->fp[cid].state = BXE_FP_STATE_CLOSED;
break;
@@ -5787,7 +5821,7 @@ bxe_acquire_hw_lock(struct bxe_softc *sc, uint32_t resource)
/* Validating that the resource is within range. */
if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
- DBPRINT(sc, BXE_INFO, "%s(): Resource is out of range! "
+ DBPRINT(sc, BXE_WARN, "%s(): Resource is out of range! "
"resource(0x%08X) > HW_LOCK_MAX_RESOURCE_VALUE(0x%08X)\n",
__FUNCTION__, resource, HW_LOCK_MAX_RESOURCE_VALUE);
rc = EINVAL;
@@ -5797,7 +5831,7 @@ bxe_acquire_hw_lock(struct bxe_softc *sc, uint32_t resource)
/* Validating that the resource is not already taken. */
lock_status = REG_RD(sc, hw_lock_control_reg);
if (lock_status & resource_bit) {
- DBPRINT(sc, BXE_INFO, "%s(): Failed to acquire lock! "
+ DBPRINT(sc, BXE_WARN, "%s(): Failed to acquire lock! "
"lock_status = 0x%08X, resource_bit = 0x%08X\n",
__FUNCTION__, lock_status, resource_bit);
rc = EEXIST;
@@ -5815,7 +5849,7 @@ bxe_acquire_hw_lock(struct bxe_softc *sc, uint32_t resource)
DELAY(5000);
}
- DBPRINT(sc, BXE_INFO, "%s(): Timeout!\n", __FUNCTION__);
+ DBPRINT(sc, BXE_WARN, "%s(): Timeout!\n", __FUNCTION__);
rc = EAGAIN;
bxe_acquire_hw_lock_exit:
@@ -5846,7 +5880,7 @@ bxe_release_hw_lock(struct bxe_softc *sc, uint32_t resource)
rc = 0;
/* Validating that the resource is within range */
if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
- DBPRINT(sc, BXE_INFO, "%s(): Resource is out of range! "
+ DBPRINT(sc, BXE_WARN, "%s(): Resource is out of range! "
"resource(0x%08X) > HW_LOCK_MAX_RESOURCE_VALUE(0x%08X)\n",
__FUNCTION__, resource, HW_LOCK_MAX_RESOURCE_VALUE);
rc = EINVAL;
@@ -5861,7 +5895,7 @@ bxe_release_hw_lock(struct bxe_softc *sc, uint32_t resource)
/* Validating that the resource is currently taken */
lock_status = REG_RD(sc, hw_lock_control_reg);
if (!(lock_status & resource_bit)) {
- DBPRINT(sc, BXE_INFO, "%s(): The resource is not currently "
+ DBPRINT(sc, BXE_WARN, "%s(): The resource is not currently "
"locked! lock_status = 0x%08X, resource_bit = 0x%08X\n",
__FUNCTION__, lock_status, resource_bit);
rc = EFAULT;
@@ -6045,15 +6079,13 @@ bxe_set_spio(struct bxe_softc *sc, int spio_num, uint32_t mode)
uint32_t spio_reg, spio_mask;
int rc;
- DBENTER(BXE_VERBOSE_MISC);
-
rc = 0;
spio_mask = 1 << spio_num;
/* Validate the SPIO. */
if ((spio_num < MISC_REGISTERS_SPIO_4) ||
(spio_num > MISC_REGISTERS_SPIO_7)) {
- DBPRINT(sc, BXE_FATAL, "%s(): Invalid SPIO (%d)!\n",
+ DBPRINT(sc, BXE_WARN, "%s(): Invalid SPIO (%d)!\n",
__FUNCTION__, spio_num);
rc = EINVAL;
goto bxe_set_spio_exit;
@@ -6071,24 +6103,24 @@ bxe_set_spio(struct bxe_softc *sc, int spio_num, uint32_t mode)
switch (mode) {
case MISC_REGISTERS_SPIO_OUTPUT_LOW :
- DBPRINT(sc, BXE_INFO, "%s(): Set SPIO %d -> output low\n",
- __FUNCTION__, spio_num);
+ DBPRINT(sc, BXE_VERBOSE_MISC, "%s(): Set SPIO %d -> "
+ "output low\n", __FUNCTION__, spio_num);
spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
break;
case MISC_REGISTERS_SPIO_OUTPUT_HIGH :
- DBPRINT(sc, BXE_INFO, "%s(): Set SPIO %d -> output high\n",
- __FUNCTION__, spio_num);
+ DBPRINT(sc, BXE_VERBOSE_MISC, "%s(): Set SPIO %d -> "
+ "output high\n", __FUNCTION__, spio_num);
spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
break;
case MISC_REGISTERS_SPIO_INPUT_HI_Z:
- DBPRINT(sc, BXE_INFO, "%s(): Set SPIO %d -> input\n",
- __FUNCTION__, spio_num);
+ DBPRINT(sc, BXE_VERBOSE_MISC, "%s(): Set SPIO %d -> "
+ "input\n", __FUNCTION__, spio_num);
spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
break;
default:
- DBPRINT(sc, BXE_FATAL, "%s(): Unknown SPIO mode (0x%08X)!\n",
+ DBPRINT(sc, BXE_WARN, "%s(): Unknown SPIO mode (0x%08X)!\n",
__FUNCTION__, mode);
break;
}
@@ -6101,7 +6133,6 @@ bxe_set_spio(struct bxe_softc *sc, int spio_num, uint32_t mode)
}
bxe_set_spio_exit:
- DBEXIT(BXE_VERBOSE_MISC);
return (rc);
}
@@ -6202,9 +6233,6 @@ bxe_link_attn(struct bxe_softc *sc)
bxe_stats_handle(sc, STATS_EVENT_LINK_UP);
}
- /* Report the new link status. */
- bxe_link_report(sc);
-
/* Need additional handling for multi-function devices. */
if (IS_E1HMF(sc)) {
port = BP_PORT(sc);
@@ -6254,11 +6282,9 @@ bxe_pmf_update(struct bxe_softc *sc)
uint32_t val;
int port;
- DBENTER(BXE_VERBOSE_INTR);
-
/* Record that this driver instance is managing the port. */
sc->port.pmf = 1;
- DBPRINT(sc, BXE_INFO, "%s(): Enabling port management function.\n",
+ DBPRINT(sc, BXE_INFO, "%s(): Enabling this port as PMF.\n",
__FUNCTION__);
/* Enable NIG attention. */
@@ -6268,8 +6294,6 @@ bxe_pmf_update(struct bxe_softc *sc)
REG_WR(sc, HC_REG_LEADING_EDGE_0 + port * 8, val);
bxe_stats_handle(sc, STATS_EVENT_PMF);
-
- DBEXIT(BXE_VERBOSE_INTR);
}
/* 8073 Download definitions */
@@ -6376,9 +6400,9 @@ bxe_sp_post(struct bxe_softc *sc, int command, int cid, uint32_t data_hi,
{
int func, rc;
- DBENTER(BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET | BXE_VERBOSE_RAMROD);
-
- DBRUNMSG(BXE_VERBOSE_RAMROD, bxe_decode_ramrod_cmd(sc, command));
+ DBRUNMSG((BXE_EXTREME_LOAD | BXE_EXTREME_RESET |
+ BXE_EXTREME_UNLOAD | BXE_EXTREME_RAMROD),
+ bxe_decode_ramrod_cmd(sc, command));
DBPRINT(sc, BXE_VERBOSE_RAMROD, "%s(): cid = %d, data_hi = 0x%08X, "
"data_low = 0x%08X, remaining spq entries = %d\n", __FUNCTION__,
@@ -6437,8 +6461,6 @@ bxe_sp_post(struct bxe_softc *sc, int command, int cid, uint32_t data_hi,
bxe_sp_post_exit:
BXE_SP_UNLOCK(sc);
- DBEXIT(BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET | BXE_VERBOSE_RAMROD);
-
return (rc);
}
@@ -6512,7 +6534,7 @@ bxe_update_dsb_idx(struct bxe_softc *sc)
uint16_t rc;
rc = 0;
- dsb = sc->def_status_block;
+ dsb = sc->def_sb;
/* Read memory barrier since block is written by hardware. */
rmb();
@@ -7130,9 +7152,9 @@ bxe_attn_int(struct bxe_softc* sc)
DBENTER(BXE_VERBOSE_INTR);
- attn_bits = le32toh(sc->def_status_block->atten_status_block.attn_bits);
+ attn_bits = le32toh(sc->def_sb->atten_status_block.attn_bits);
attn_ack =
- le32toh(sc->def_status_block->atten_status_block.attn_bits_ack);
+ le32toh(sc->def_sb->atten_status_block.attn_bits_ack);
attn_state = sc->attn_state;
asserted = attn_bits & ~attn_ack & ~attn_state;
deasserted = ~attn_bits & attn_ack & attn_state;
@@ -7262,7 +7284,7 @@ bxe_attn_int(struct bxe_softc* sc)
#ifdef __i386__
#define BITS_PER_LONG 32
-#else /*Only support x86_64(AMD64 and EM64T)*/
+#else
#define BITS_PER_LONG 64
#endif
@@ -7290,19 +7312,19 @@ static void
bxe_stats_storm_post(struct bxe_softc *sc)
{
struct eth_query_ramrod_data ramrod_data = {0};
- int rc;
+ int i, rc;
DBENTER(BXE_INSANE_STATS);
if (!sc->stats_pending) {
ramrod_data.drv_counter = sc->stats_counter++;
ramrod_data.collect_port = sc->port.pmf ? 1 : 0;
- ramrod_data.ctr_id_vector = (1 << BP_CL_ID(sc));
+ for (i = 0; i < sc->num_queues; i++)
+ ramrod_data.ctr_id_vector |= (1 << sc->fp[i].cl_id);
rc = bxe_sp_post(sc, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
((uint32_t *)&ramrod_data)[1],
((uint32_t *)&ramrod_data)[0], 0);
-
if (rc == 0) {
/* Stats ramrod has it's own slot on the SPQ. */
sc->spq_left++;
@@ -7313,22 +7335,32 @@ bxe_stats_storm_post(struct bxe_softc *sc)
DBEXIT(BXE_INSANE_STATS);
}
+/*
+ * Setup the adrress used by the driver to report port-based statistics
+ * back to the controller.
+ *
+ * Returns:
+ * None.
+ */
static void
bxe_stats_port_base_init(struct bxe_softc *sc)
{
uint32_t *stats_comp;
struct dmae_command *dmae;
- if (!sc->port.pmf || !sc->port.port_stx) {
+ DBENTER(BXE_VERBOSE_STATS);
+
+ /* Only the port management function (PMF) does this work. */
+ if ((sc->port.pmf == 0) || !sc->port.port_stx) {
BXE_PRINTF("%s(%d): Invalid statistcs port setup!\n",
__FILE__, __LINE__);
- return;
+ goto bxe_stats_port_base_init_exit;
}
stats_comp = BXE_SP(sc, stats_comp);
+ sc->executer_idx = 0;
- sc->executer_idx = 0; /* dmae clients */
-
+ /* DMA the address of the drivers port statistics block. */
dmae = BXE_SP(sc, dmae[sc->executer_idx++]);
dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
@@ -7352,8 +7384,18 @@ bxe_stats_port_base_init(struct bxe_softc *sc)
*stats_comp = 0;
bxe_stats_hw_post(sc);
bxe_stats_comp(sc);
+
+bxe_stats_port_base_init_exit:
+ DBEXIT(BXE_VERBOSE_STATS);
}
+/*
+ * Setup the adrress used by the driver to report function-based statistics
+ * back to the controller.
+ *
+ * Returns:
+ * None.
+ */
static void
bxe_stats_func_base_init(struct bxe_softc *sc)
{
@@ -7361,12 +7403,22 @@ bxe_stats_func_base_init(struct bxe_softc *sc)
int vn, vn_max;
uint32_t func_stx;
+ DBENTER(BXE_VERBOSE_STATS);
+
+ /* Only the port management function (PMF) does this work. */
+ if ((sc->port.pmf == 0) || !sc->func_stx) {
+ BXE_PRINTF("%s(%d): Invalid statistcs function setup!\n",
+ __FILE__, __LINE__);
+ goto bxe_stats_func_base_init_exit;
+ }
+
port = BP_PORT(sc);
func_stx = sc->func_stx;
vn_max = IS_E1HMF(sc) ? E1HVN_MAX : E1VN_MAX;
+ /* Initialize each function individually. */
for (vn = VN_0; vn < vn_max; vn++) {
- func = 2*vn + port;
+ func = 2 * vn + port;
sc->func_stx = SHMEM_RD(sc, func_mb[func].fw_mb_param);
bxe_stats_func_init(sc);
bxe_stats_hw_post(sc);
@@ -7374,20 +7426,38 @@ bxe_stats_func_base_init(struct bxe_softc *sc)
}
sc->func_stx = func_stx;
+
+bxe_stats_func_base_init_exit:
+ DBEXIT(BXE_VERBOSE_STATS);
}
+/*
+ * DMA the function-based statistics to the controller.
+ *
+ * Returns:
+ * None.
+ */
static void
bxe_stats_func_base_update(struct bxe_softc *sc)
{
uint32_t *stats_comp;
struct dmae_command *dmae;
+ DBENTER(BXE_VERBOSE_STATS);
+
+ /* Only the port management function (PMF) does this work. */
+ if ((sc->port.pmf == 0) || !sc->func_stx) {
+ BXE_PRINTF("%s(%d): Invalid statistcs function update!\n",
+ __FILE__, __LINE__);
+ goto bxe_stats_func_base_update_exit;
+ }
+
dmae = &sc->stats_dmae;
stats_comp = BXE_SP(sc, stats_comp);
-
sc->executer_idx = 0;
memset(dmae, 0, sizeof(struct dmae_command));
+ /* DMA the function statistics from the driver to the H/W. */
dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
@@ -7410,6 +7480,9 @@ bxe_stats_func_base_update(struct bxe_softc *sc)
*stats_comp = 0;
bxe_stats_hw_post(sc);
bxe_stats_comp(sc);
+
+bxe_stats_func_base_update_exit:
+ DBEXIT(BXE_VERBOSE_STATS);
}
@@ -7428,7 +7501,7 @@ bxe_stats_init(struct bxe_softc *sc)
DBENTER(BXE_VERBOSE_STATS);
if (sc->stats_enable == FALSE)
- return;
+ goto bxe_stats_init_exit;
port = BP_PORT(sc);
func = BP_FUNC(sc);
@@ -7436,19 +7509,21 @@ bxe_stats_init(struct bxe_softc *sc)
sc->stats_counter = 0;
sc->stats_pending = 0;
- /* Fetch the offset of port statistics in shared memory. */
- if (BP_NOMCP(sc)){
+ /* Fetch the offset of port & function statistics in shared memory. */
+ if (NOMCP(sc)){
sc->port.port_stx = 0;
sc->func_stx = 0;
} else{
sc->port.port_stx = SHMEM_RD(sc, port_mb[port].port_stx);
sc->func_stx = SHMEM_RD(sc, func_mb[func].fw_mb_param);
}
- /* If this is still 0 then no management firmware running. */
+
DBPRINT(sc, BXE_VERBOSE_STATS, "%s(): sc->port.port_stx = 0x%08X\n",
__FUNCTION__, sc->port.port_stx);
+ DBPRINT(sc, BXE_VERBOSE_STATS, "%s(): sc->func_stx = 0x%08X\n",
+ __FUNCTION__, sc->func_stx);
- /* port stats */
+ /* Port statistics. */
memset(&(sc->port.old_nig_stats), 0, sizeof(struct nig_stats));
sc->port.old_nig_stats.brb_discard = REG_RD(sc,
NIG_REG_STAT0_BRB_DISCARD + port * 0x38);
@@ -7459,10 +7534,11 @@ bxe_stats_init(struct bxe_softc *sc)
REG_RD_DMAE(sc, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port * 0x50,
&(sc->port.old_nig_stats.egress_mac_pkt1_lo), 2);
- /* function stats */
+ /* Function statistics. */
for (i = 0; i < sc->num_queues; i++) {
fp = &sc->fp[i];
- /* Clear function statistics memory. */
+
+ /* Clear all per-queue statistics. */
memset(&fp->old_tclient, 0,
sizeof(struct tstorm_per_client_stats));
memset(&fp->old_uclient, 0,
@@ -7473,18 +7549,21 @@ bxe_stats_init(struct bxe_softc *sc)
sizeof(struct bxe_q_stats));
}
+ /* ToDo: Clear any driver specific statistics? */
+
sc->stats_state = STATS_STATE_DISABLED;
- /* Init port statistics if we're the port management function. */
- if (sc->port.pmf) {
- /* Port_stx are in 57710 when ncsi presnt & always in 57711.*/
+ if (sc->port.pmf == 1) {
+ /* Init port & function stats if we're PMF. */
if (sc->port.port_stx)
bxe_stats_port_base_init(sc);
if (sc->func_stx)
bxe_stats_func_base_init(sc);
} else if (sc->func_stx)
+ /* Update function stats if we're not PMF. */
bxe_stats_func_base_update(sc);
+bxe_stats_init_exit:
DBEXIT(BXE_VERBOSE_STATS);
}
@@ -7548,9 +7627,10 @@ bxe_stats_hw_post(struct bxe_softc *sc)
}
/*
+ * Delay routine which polls for the DMA engine to complete.
*
* Returns:
- * 1
+ * 0 = Failure, !0 = Success
*/
static int
bxe_stats_comp(struct bxe_softc *sc)
@@ -7562,6 +7642,7 @@ bxe_stats_comp(struct bxe_softc *sc)
stats_comp = BXE_SP(sc, stats_comp);
cnt = 10;
+
while (*stats_comp != DMAE_COMP_VAL) {
if (!cnt) {
BXE_PRINTF("%s(%d): Timeout waiting for statistics "
@@ -7573,11 +7654,12 @@ bxe_stats_comp(struct bxe_softc *sc)
}
DBEXIT(BXE_VERBOSE_STATS);
+ /* ToDo: Shouldn't this return the value of cnt? */
return (1);
}
/*
- * Initialize port statistics.
+ * DMA port statistcs from controller to driver.
*
* Returns:
* None.
@@ -7595,13 +7677,14 @@ bxe_stats_pmf_update(struct bxe_softc *sc)
loader_idx = PMF_DMAE_C(sc);
/* We shouldn't be here if any of the following are false. */
- if (!IS_E1HMF(sc) || !sc->port.pmf || !sc->port.port_stx) {
- DBPRINT(sc, BXE_WARN, "%s(): Bug!\n", __FUNCTION__);
+ if (!IS_E1HMF(sc) || (sc->port.pmf == 0) || !sc->port.port_stx) {
+ BXE_PRINTF("%s(%d): Statistics bug!\n", __FILE__, __LINE__);
goto bxe_stats_pmf_update_exit;
}
sc->executer_idx = 0;
+ /* Instruct DMA engine to copy port statistics from H/W to driver. */
opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
@@ -7638,6 +7721,7 @@ bxe_stats_pmf_update(struct bxe_softc *sc)
dmae->comp_addr_hi = U64_HI(BXE_SP_MAPPING(sc, stats_comp));
dmae->comp_val = DMAE_COMP_VAL;
+ /* Start the DMA and wait for the result. */
*stats_comp = 0;
bxe_stats_hw_post(sc);
bxe_stats_comp(sc);
@@ -7647,7 +7731,10 @@ bxe_stats_pmf_update_exit:
}
/*
- * Prepare the DMAE parameters required for port statistics.
+ * Prepare the DMAE parameters required for all statistics.
+ *
+ * This function should only be called by the driver instance
+ * that is designated as the port management function (PMF).
*
* Returns:
* None.
@@ -7666,8 +7753,8 @@ bxe_stats_port_init(struct bxe_softc *sc)
loader_idx = PMF_DMAE_C(sc);
stats_comp = BXE_SP(sc, stats_comp);
- /* Sanity check. */
- if (!sc->link_vars.link_up || !sc->port.pmf) {
+ /* Only the port management function (PMF) does this work. */
+ if (!sc->link_vars.link_up || (sc->port.pmf == 0)) {
BXE_PRINTF("%s(%d): Invalid statistics port setup!\n",
__FILE__, __LINE__);
goto bxe_stats_port_init_exit;
@@ -7675,7 +7762,7 @@ bxe_stats_port_init(struct bxe_softc *sc)
sc->executer_idx = 0;
- /* Setup statistics reporting to MCP. */
+ /* The same opcde is used for multiple DMA operations. */
opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
@@ -7728,7 +7815,7 @@ bxe_stats_port_init(struct bxe_softc *sc)
(vn << DMAE_CMD_E1HVN_SHIFT));
if (sc->link_vars.mac_type == MAC_TYPE_BMAC) {
- /* Enable statistics for the BMAC. */
+ /* Enable statistics for the 10Gb BMAC. */
mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
NIG_REG_INGRESS_BMAC0_MEM);
@@ -7764,7 +7851,7 @@ bxe_stats_port_init(struct bxe_softc *sc)
dmae->comp_val = 1;
} else if (sc->link_vars.mac_type == MAC_TYPE_EMAC) {
- /* Enable statistics for the EMAC. */
+ /* Enable statistics for the 1Gb EMAC. */
mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
@@ -7873,6 +7960,8 @@ bxe_stats_port_init_exit:
/*
* Prepare the DMAE parameters required for function statistics.
*
+ * This function is called by all driver instances.
+ *
* Returns:
* None.
*/
@@ -7884,15 +7973,14 @@ bxe_stats_func_init(struct bxe_softc *sc)
DBENTER(BXE_VERBOSE_STATS);
- dmae = &sc->stats_dmae;
- stats_comp = BXE_SP(sc, stats_comp);
-
if (!sc->func_stx) {
BXE_PRINTF("%s(%d): Invalid statistics function setup!\n",
__FILE__, __LINE__);
goto bxe_stats_func_init_exit;
}
+ dmae = &sc->stats_dmae;
+ stats_comp = BXE_SP(sc, stats_comp);
sc->executer_idx = 0;
memset(dmae, 0, sizeof(struct dmae_command));
@@ -7924,6 +8012,8 @@ bxe_stats_func_init_exit:
}
/*
+ * Starts a statistics update DMA and waits for completion.
+ *
* Returns:
* None.
*/
@@ -7933,9 +8023,8 @@ bxe_stats_start(struct bxe_softc *sc)
DBENTER(BXE_VERBOSE_STATS);
- if (sc->port.pmf)
+ if (sc->port.pmf == 1)
bxe_stats_port_init(sc);
-
else if (sc->func_stx)
bxe_stats_func_init(sc);
@@ -7978,6 +8067,7 @@ bxe_stats_restart(struct bxe_softc *sc)
}
/*
+ * Update the Big MAC (10Gb BMAC) statistics.
*
* Returns:
* None.
@@ -7987,7 +8077,7 @@ bxe_stats_bmac_update(struct bxe_softc *sc)
{
struct bmac_stats *new;
struct host_port_stats *pstats;
- struct bxe_eth_stats *estats;
+ struct bxe_port_stats *estats;
struct regpair diff;
DBENTER(BXE_INSANE_STATS);
@@ -7996,19 +8086,32 @@ bxe_stats_bmac_update(struct bxe_softc *sc)
pstats = BXE_SP(sc, port_stats);
estats = &sc->eth_stats;
- UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
- UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
- UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
- UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
- UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
- UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
- UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
- UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
- UPDATE_STAT64(rx_stat_grxpf, rx_stat_bmac_xpf);
- UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
- UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
- UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
- UPDATE_STAT64(tx_stat_gt127, tx_stat_etherstatspkts65octetsto127octets);
+ UPDATE_STAT64(rx_stat_grerb,
+ rx_stat_ifhcinbadoctets);
+ UPDATE_STAT64(rx_stat_grfcs,
+ rx_stat_dot3statsfcserrors);
+ UPDATE_STAT64(rx_stat_grund,
+ rx_stat_etherstatsundersizepkts);
+ UPDATE_STAT64(rx_stat_grovr,
+ rx_stat_dot3statsframestoolong);
+ UPDATE_STAT64(rx_stat_grfrg,
+ rx_stat_etherstatsfragments);
+ UPDATE_STAT64(rx_stat_grjbr,
+ rx_stat_etherstatsjabbers);
+ UPDATE_STAT64(rx_stat_grxcf,
+ rx_stat_maccontrolframesreceived);
+ UPDATE_STAT64(rx_stat_grxpf,
+ rx_stat_xoffstateentered);
+ UPDATE_STAT64(rx_stat_grxpf,
+ rx_stat_bmac_xpf);
+ UPDATE_STAT64(tx_stat_gtxpf,
+ tx_stat_outxoffsent);
+ UPDATE_STAT64(tx_stat_gtxpf,
+ tx_stat_flowcontroldone);
+ UPDATE_STAT64(tx_stat_gt64,
+ tx_stat_etherstatspkts64octets);
+ UPDATE_STAT64(tx_stat_gt127,
+ tx_stat_etherstatspkts65octetsto127octets);
UPDATE_STAT64(tx_stat_gt255,
tx_stat_etherstatspkts128octetsto255octets);
UPDATE_STAT64(tx_stat_gt511,
@@ -8017,19 +8120,23 @@ bxe_stats_bmac_update(struct bxe_softc *sc)
tx_stat_etherstatspkts512octetsto1023octets);
UPDATE_STAT64(tx_stat_gt1518,
tx_stat_etherstatspkts1024octetsto1522octets);
- UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
- UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
- UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
- UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
+ UPDATE_STAT64(tx_stat_gt2047,
+ tx_stat_bmac_2047);
+ UPDATE_STAT64(tx_stat_gt4095,
+ tx_stat_bmac_4095);
+ UPDATE_STAT64(tx_stat_gt9216,
+ tx_stat_bmac_9216);
+ UPDATE_STAT64(tx_stat_gt16383,
+ tx_stat_bmac_16383);
UPDATE_STAT64(tx_stat_gterr,
tx_stat_dot3statsinternalmactransmiterrors);
- UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
+ UPDATE_STAT64(tx_stat_gtufl,
+ tx_stat_bmac_ufl);
estats->pause_frames_received_hi =
pstats->mac_stx[1].rx_stat_bmac_xpf_hi;
estats->pause_frames_received_lo =
pstats->mac_stx[1].rx_stat_bmac_xpf_lo;
-
estats->pause_frames_sent_hi =
pstats->mac_stx[1].tx_stat_outxoffsent_hi;
estats->pause_frames_sent_lo =
@@ -8039,6 +8146,8 @@ bxe_stats_bmac_update(struct bxe_softc *sc)
}
/*
+ * Update the Ethernet MAC (1Gb EMAC) statistics.
+ *
* Returns:
* None.
*/
@@ -8047,7 +8156,7 @@ bxe_stats_emac_update(struct bxe_softc *sc)
{
struct emac_stats *new;
struct host_port_stats *pstats;
- struct bxe_eth_stats *estats;
+ struct bxe_port_stats *estats;
DBENTER(BXE_INSANE_STATS);
@@ -8092,9 +8201,9 @@ bxe_stats_emac_update(struct bxe_softc *sc)
estats->pause_frames_received_lo =
pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo;
ADD_64(estats->pause_frames_received_hi,
- pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi,
- estats->pause_frames_received_lo,
- pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo);
+ pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi,
+ estats->pause_frames_received_lo,
+ pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo);
estats->pause_frames_sent_hi =
pstats->mac_stx[1].tx_stat_outxonsent_hi;
@@ -8117,7 +8226,7 @@ bxe_stats_hw_update(struct bxe_softc *sc)
{
struct nig_stats *new, *old;
struct host_port_stats *pstats;
- struct bxe_eth_stats *estats;
+ struct bxe_port_stats *estats;
struct regpair diff;
uint32_t nig_timer_max;
int rc;
@@ -8162,12 +8271,15 @@ bxe_stats_hw_update(struct bxe_softc *sc)
pstats->host_port_stats_start = ++pstats->host_port_stats_end;
- nig_timer_max = SHMEM_RD(sc, port_mb[BP_PORT(sc)].stat_nig_timer);
- if (nig_timer_max != estats->nig_timer_max) {
- estats->nig_timer_max = nig_timer_max;
- DBPRINT(sc, BXE_WARN,
- "%s(): NIG timer reached max value (%u)!\n", __FUNCTION__,
- estats->nig_timer_max);
+ if (!NOMCP(sc)) {
+ nig_timer_max =
+ SHMEM_RD(sc, port_mb[BP_PORT(sc)].stat_nig_timer);
+ if (nig_timer_max != estats->nig_timer_max) {
+ estats->nig_timer_max = nig_timer_max;
+ DBPRINT(sc, BXE_WARN,
+ "%s(): NIG timer reached max value (%u)!\n",
+ __FUNCTION__, estats->nig_timer_max);
+ }
}
bxe_stats_hw_update_exit:
@@ -8179,12 +8291,15 @@ bxe_stats_hw_update_exit:
* Returns:
* 0 = Success, !0 = Failure.
*/
+// DRC - Done
static int
bxe_stats_storm_update(struct bxe_softc *sc)
{
int rc, i, cl_id;
struct eth_stats_query *stats;
+ struct bxe_port_stats *estats;
struct host_func_stats *fstats;
+ struct bxe_q_stats *qstats;
struct tstorm_per_port_stats *tport;
struct tstorm_per_client_stats *tclient;
struct ustorm_per_client_stats *uclient;
@@ -8192,72 +8307,66 @@ bxe_stats_storm_update(struct bxe_softc *sc)
struct tstorm_per_client_stats *old_tclient;
struct ustorm_per_client_stats *old_uclient;
struct xstorm_per_client_stats *old_xclient;
- struct bxe_eth_stats *estats;
- struct bxe_q_stats *qstats;
struct bxe_fastpath * fp;
uint32_t diff;
DBENTER(BXE_INSANE_STATS);
rc = 0;
+ diff = 0;
stats = BXE_SP(sc, fw_stats);
tport = &stats->tstorm_common.port_statistics;
-
fstats = BXE_SP(sc, func_stats);
+
memcpy(&(fstats->total_bytes_received_hi),
&(BXE_SP(sc, func_stats_base)->total_bytes_received_hi),
- sizeof(struct host_func_stats) - 2*sizeof(uint32_t));
+ sizeof(struct host_func_stats) - 2 * sizeof(uint32_t));
- diff = 0;
estats = &sc->eth_stats;
estats->no_buff_discard_hi = 0;
estats->no_buff_discard_lo = 0;
estats->error_bytes_received_hi = 0;
estats->error_bytes_received_lo = 0;
-/* estats->etherstatsoverrsizepkts_hi = 0;
+ estats->etherstatsoverrsizepkts_hi = 0;
estats->etherstatsoverrsizepkts_lo = 0;
-*/
+
for (i = 0; i < sc->num_queues; i++) {
fp = &sc->fp[i];
cl_id = fp->cl_id;
tclient = &stats->tstorm_common.client_statistics[cl_id];
- uclient = &stats->ustorm_common.client_statistics[cl_id];
- xclient = &stats->xstorm_common.client_statistics[cl_id];
old_tclient = &fp->old_tclient;
+ uclient = &stats->ustorm_common.client_statistics[cl_id];
old_uclient = &fp->old_uclient;
+ xclient = &stats->xstorm_common.client_statistics[cl_id];
old_xclient = &fp->old_xclient;
qstats = &fp->eth_q_stats;
- /* Are STORM statistics valid? */
+ /* Are TSTORM statistics valid? */
if ((uint16_t)(le16toh(tclient->stats_counter) + 1) !=
sc->stats_counter) {
-#if 0
DBPRINT(sc, BXE_WARN, "%s(): Stats not updated by TSTORM "
"(tstorm counter (%d) != stats_counter (%d))!\n",
__FUNCTION__, tclient->stats_counter, sc->stats_counter);
-#endif
rc = 1;
goto bxe_stats_storm_update_exit;
}
+ /* Are USTORM statistics valid? */
if ((uint16_t)(le16toh(uclient->stats_counter) + 1) !=
sc->stats_counter) {
-#if 0
DBPRINT(sc, BXE_WARN, "%s(): Stats not updated by USTORM "
"(ustorm counter (%d) != stats_counter (%d))!\n",
__FUNCTION__, uclient->stats_counter, sc->stats_counter);
-#endif
rc = 2;
goto bxe_stats_storm_update_exit;
}
+ /* Are XSTORM statistics valid? */
if ((uint16_t)(le16toh(xclient->stats_counter) + 1) !=
sc->stats_counter) {
-#if 0
DBPRINT(sc, BXE_WARN, "%s(): Stats not updated by XSTORM "
"(xstorm counter (%d) != stats_counter (%d))!\n",
__FUNCTION__, xclient->stats_counter, sc->stats_counter);
-#endif
rc = 3;
goto bxe_stats_storm_update_exit;
}
@@ -8313,9 +8422,8 @@ bxe_stats_storm_update(struct bxe_softc *sc)
total_multicast_packets_received);
UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
total_broadcast_packets_received);
-/* UPDATE_EXTEND_TSTAT(packets_too_big_discard,
- etherstatsoverrsizepkts);
-*/
+ UPDATE_EXTEND_TSTAT(packets_too_big_discard,
+ etherstatsoverrsizepkts);
UPDATE_EXTEND_TSTAT(no_buff_discard, no_buff_discard);
SUB_EXTEND_USTAT(ucast_no_buff_pkts,
@@ -8329,19 +8437,19 @@ bxe_stats_storm_update(struct bxe_softc *sc)
UPDATE_EXTEND_USTAT(bcast_no_buff_pkts, no_buff_discard);
qstats->total_bytes_transmitted_hi =
- (xclient->unicast_bytes_sent.hi);
+ le32toh(xclient->unicast_bytes_sent.hi);
qstats->total_bytes_transmitted_lo =
- (xclient->unicast_bytes_sent.lo);
+ le32toh(xclient->unicast_bytes_sent.lo);
ADD_64(qstats->total_bytes_transmitted_hi,
- (xclient->multicast_bytes_sent.hi),
+ le32toh(xclient->multicast_bytes_sent.hi),
qstats->total_bytes_transmitted_lo,
- (xclient->multicast_bytes_sent.lo));
+ le32toh(xclient->multicast_bytes_sent.lo));
ADD_64(qstats->total_bytes_transmitted_hi,
- (xclient->broadcast_bytes_sent.hi),
+ le32toh(xclient->broadcast_bytes_sent.hi),
qstats->total_bytes_transmitted_lo,
- (xclient->broadcast_bytes_sent.lo));
+ le32toh(xclient->broadcast_bytes_sent.lo));
UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
total_unicast_packets_transmitted);
@@ -8356,63 +8464,72 @@ bxe_stats_storm_update(struct bxe_softc *sc)
old_tclient->ttl0_discard = tclient->ttl0_discard;
ADD_64(fstats->total_bytes_received_hi,
- qstats->total_bytes_received_hi,
- fstats->total_bytes_received_lo,
- qstats->total_bytes_received_lo);
+ qstats->total_bytes_received_hi,
+ fstats->total_bytes_received_lo,
+ qstats->total_bytes_received_lo);
ADD_64(fstats->total_bytes_transmitted_hi,
- qstats->total_bytes_transmitted_hi,
- fstats->total_bytes_transmitted_lo,
- qstats->total_bytes_transmitted_lo);
+ qstats->total_bytes_transmitted_hi,
+ fstats->total_bytes_transmitted_lo,
+ qstats->total_bytes_transmitted_lo);
ADD_64(fstats->total_unicast_packets_received_hi,
- qstats->total_unicast_packets_received_hi,
- fstats->total_unicast_packets_received_lo,
- qstats->total_unicast_packets_received_lo);
+ qstats->total_unicast_packets_received_hi,
+ fstats->total_unicast_packets_received_lo,
+ qstats->total_unicast_packets_received_lo);
ADD_64(fstats->total_multicast_packets_received_hi,
- qstats->total_multicast_packets_received_hi,
- fstats->total_multicast_packets_received_lo,
- qstats->total_multicast_packets_received_lo);
+ qstats->total_multicast_packets_received_hi,
+ fstats->total_multicast_packets_received_lo,
+ qstats->total_multicast_packets_received_lo);
ADD_64(fstats->total_broadcast_packets_received_hi,
- qstats->total_broadcast_packets_received_hi,
- fstats->total_broadcast_packets_received_lo,
- qstats->total_broadcast_packets_received_lo);
+ qstats->total_broadcast_packets_received_hi,
+ fstats->total_broadcast_packets_received_lo,
+ qstats->total_broadcast_packets_received_lo);
ADD_64(fstats->total_unicast_packets_transmitted_hi,
- qstats->total_unicast_packets_transmitted_hi,
- fstats->total_unicast_packets_transmitted_lo,
- qstats->total_unicast_packets_transmitted_lo);
+ qstats->total_unicast_packets_transmitted_hi,
+ fstats->total_unicast_packets_transmitted_lo,
+ qstats->total_unicast_packets_transmitted_lo);
ADD_64(fstats->total_multicast_packets_transmitted_hi,
- qstats->total_multicast_packets_transmitted_hi,
- fstats->total_multicast_packets_transmitted_lo,
- qstats->total_multicast_packets_transmitted_lo);
+ qstats->total_multicast_packets_transmitted_hi,
+ fstats->total_multicast_packets_transmitted_lo,
+ qstats->total_multicast_packets_transmitted_lo);
ADD_64(fstats->total_broadcast_packets_transmitted_hi,
- qstats->total_broadcast_packets_transmitted_hi,
- fstats->total_broadcast_packets_transmitted_lo,
- qstats->total_broadcast_packets_transmitted_lo);
+ qstats->total_broadcast_packets_transmitted_hi,
+ fstats->total_broadcast_packets_transmitted_lo,
+ qstats->total_broadcast_packets_transmitted_lo);
ADD_64(fstats->valid_bytes_received_hi,
- qstats->valid_bytes_received_hi,
- fstats->valid_bytes_received_lo,
- qstats->valid_bytes_received_lo);
+ qstats->valid_bytes_received_hi,
+ fstats->valid_bytes_received_lo,
+ qstats->valid_bytes_received_lo);
ADD_64(estats->error_bytes_received_hi,
- qstats->error_bytes_received_hi,
- estats->error_bytes_received_lo,
- qstats->error_bytes_received_lo);
-
- ADD_64(estats->no_buff_discard_hi, qstats->no_buff_discard_hi,
- estats->no_buff_discard_lo, qstats->no_buff_discard_lo);
+ qstats->error_bytes_received_hi,
+ estats->error_bytes_received_lo,
+ qstats->error_bytes_received_lo);
+ ADD_64(estats->etherstatsoverrsizepkts_hi,
+ qstats->etherstatsoverrsizepkts_hi,
+ estats->etherstatsoverrsizepkts_lo,
+ qstats->etherstatsoverrsizepkts_lo);
+ ADD_64(estats->no_buff_discard_hi,
+ qstats->no_buff_discard_hi,
+ estats->no_buff_discard_lo,
+ qstats->no_buff_discard_lo);
}
ADD_64(fstats->total_bytes_received_hi,
- estats->rx_stat_ifhcinbadoctets_hi,
- fstats->total_bytes_received_lo,
- estats->rx_stat_ifhcinbadoctets_lo);
+ estats->rx_stat_ifhcinbadoctets_hi,
+ fstats->total_bytes_received_lo,
+ estats->rx_stat_ifhcinbadoctets_lo);
memcpy(estats, &(fstats->total_bytes_received_hi),
- sizeof(struct host_func_stats) - 2*sizeof(uint32_t));
+ sizeof(struct host_func_stats) - 2 * sizeof(uint32_t));
+ ADD_64(estats->etherstatsoverrsizepkts_hi,
+ estats->rx_stat_dot3statsframestoolong_hi,
+ estats->etherstatsoverrsizepkts_lo,
+ estats->rx_stat_dot3statsframestoolong_lo);
ADD_64(estats->error_bytes_received_hi,
- estats->rx_stat_ifhcinbadoctets_hi,
- estats->error_bytes_received_lo,
- estats->rx_stat_ifhcinbadoctets_lo);
+ estats->rx_stat_ifhcinbadoctets_hi,
+ estats->error_bytes_received_lo,
+ estats->rx_stat_ifhcinbadoctets_lo);
if (sc->port.pmf) {
estats->mac_filter_discard =
@@ -8431,7 +8548,7 @@ bxe_stats_storm_update(struct bxe_softc *sc)
bxe_stats_storm_update_exit:
DBEXIT(BXE_INSANE_STATS);
- return(rc);
+ return (rc);
}
/*
@@ -8444,7 +8561,7 @@ static void
bxe_stats_net_update(struct bxe_softc *sc)
{
struct tstorm_per_client_stats *old_tclient;
- struct bxe_eth_stats *estats;
+ struct bxe_port_stats *estats;
struct ifnet *ifp;
DBENTER(BXE_INSANE_STATS);
@@ -8469,7 +8586,6 @@ bxe_stats_net_update(struct bxe_softc *sc)
(u_long) estats->no_buff_discard_lo +
(u_long) estats->mac_discard +
(u_long) estats->rx_stat_etherstatsundersizepkts_lo +
- (u_long) estats->jabber_packets_received +
(u_long) estats->brb_drop_lo +
(u_long) estats->brb_truncate_discard +
(u_long) estats->rx_stat_dot3statsfcserrors_lo +
@@ -8515,7 +8631,7 @@ bxe_stats_update(struct bxe_softc *sc)
goto bxe_stats_update_exit;
/* Check for any hardware statistics updates. */
- if (sc->port.pmf)
+ if (sc->port.pmf == 1)
update = (bxe_stats_hw_update(sc) == 0);
/* Check for any STORM statistics updates. */
@@ -8637,10 +8753,11 @@ bxe_stats_stop(struct bxe_softc *sc)
DBENTER(BXE_VERBOSE_STATS);
update = 0;
+
/* Wait for any pending completions. */
bxe_stats_comp(sc);
- if (sc->port.pmf)
+ if (sc->port.pmf == 1)
update = (bxe_stats_hw_update(sc) == 0);
update |= (bxe_stats_storm_update(sc) == 0);
@@ -8648,7 +8765,7 @@ bxe_stats_stop(struct bxe_softc *sc)
if (update) {
bxe_stats_net_update(sc);
- if (sc->port.pmf)
+ if (sc->port.pmf == 1)
bxe_stats_port_stop(sc);
bxe_stats_hw_post(sc);
@@ -8667,7 +8784,8 @@ bxe_stats_stop(struct bxe_softc *sc)
static void
bxe_stats_do_nothing(struct bxe_softc *sc)
{
-
+ DBENTER(BXE_VERBOSE_STATS);
+ DBEXIT(BXE_VERBOSE_STATS);
}
static const struct {
@@ -8701,9 +8819,10 @@ bxe_stats_handle(struct bxe_softc *sc, enum bxe_stats_event event)
{
enum bxe_stats_state state;
- DBENTER(BXE_INSANE_STATS);
+ DBENTER(BXE_EXTREME_STATS);
state = sc->stats_state;
+
#ifdef BXE_DEBUG
if (event != STATS_EVENT_UPDATE)
DBPRINT(sc, BXE_VERBOSE_STATS,
@@ -8720,7 +8839,7 @@ bxe_stats_handle(struct bxe_softc *sc, enum bxe_stats_event event)
__FUNCTION__, sc->stats_state);
#endif
- DBEXIT(BXE_INSANE_STATS);
+ DBEXIT(BXE_EXTREME_STATS);
}
/*
@@ -8798,167 +8917,137 @@ bxe_tx_encap(struct bxe_fastpath *fp, struct mbuf **m_head)
struct eth_tx_bd *tx_data_bd;
struct eth_tx_bd *tx_total_pkt_size_bd;
struct eth_tx_start_bd *tx_start_bd;
- uint16_t etype, bd_prod, pkt_prod, total_pkt_size;
+ uint16_t etype, sw_tx_bd_prod, sw_pkt_prod, total_pkt_size;
+// uint16_t bd_index, pkt_index;
uint8_t mac_type;
- int i, e_hlen, error, nsegs, rc, nbds, vlan_off, ovlan;
+ int i, defragged, e_hlen, error, nsegs, rc, nbds, vlan_off, ovlan;
struct bxe_softc *sc;
sc = fp->sc;
DBENTER(BXE_VERBOSE_SEND);
- rc = nbds = ovlan = vlan_off = total_pkt_size = 0;
+ DBRUN(M_ASSERTPKTHDR(*m_head));
m0 = *m_head;
-
- tx_total_pkt_size_bd = NULL;
+ rc = defragged = nbds = ovlan = vlan_off = total_pkt_size = 0;
tx_start_bd = NULL;
tx_data_bd = NULL;
tx_parse_bd = NULL;
+ tx_total_pkt_size_bd = NULL;
- pkt_prod = fp->tx_pkt_prod;
- bd_prod = TX_BD(fp->tx_bd_prod);
+ /* Get the H/W pointer (0 to 65535) for packets and BD's. */
+ sw_pkt_prod = fp->tx_pkt_prod;
+ sw_tx_bd_prod = fp->tx_bd_prod;
- mac_type = UNICAST_ADDRESS;
+ /* Create the S/W index (0 to MAX_TX_BD) for packets and BD's. */
+// pkt_index = TX_BD(sw_pkt_prod);
+// bd_index = TX_BD(sw_tx_bd_prod);
-#ifdef BXE_DEBUG
- int debug_prod;
- DBRUN(debug_prod = bd_prod);
-#endif
+ mac_type = UNICAST_ADDRESS;
/* Map the mbuf into the next open DMAable memory. */
- map = fp->tx_mbuf_map[TX_BD(pkt_prod)];
+ map = fp->tx_mbuf_map[TX_BD(sw_pkt_prod)];
error = bus_dmamap_load_mbuf_sg(fp->tx_mbuf_tag, map, m0,
segs, &nsegs, BUS_DMA_NOWAIT);
- do{
- /* Handle any mapping errors. */
- if(__predict_false(error)){
- fp->tx_dma_mapping_failure++;
- if (error == ENOMEM) {
- /* Resource issue, try again later. */
- rc = ENOMEM;
- }else if (error == EFBIG) {
- /* Possibly recoverable. */
- fp->mbuf_defrag_attempts++;
- m0 = m_defrag(*m_head, M_DONTWAIT);
- if (m0 == NULL) {
- fp->mbuf_defrag_failures++;
- rc = ENOBUFS;
- } else {
- /* Defrag was successful, try mapping again.*/
- fp->mbuf_defrag_successes++;
- *m_head = m0;
- error =
- bus_dmamap_load_mbuf_sg(
- fp->tx_mbuf_tag, map, m0,
- segs, &nsegs, BUS_DMA_NOWAIT);
- if (error) {
- fp->tx_dma_mapping_failure++;
- rc = error;
- }
- }
- }else {
- /* Unrecoverable. */
- DBPRINT(sc, BXE_WARN_SEND,
- "%s(): Unknown TX mapping error! "
- "rc = %d.\n", __FUNCTION__, error);
- DBRUN(bxe_dump_mbuf(sc, m0));
- rc = error;
- }
-
- break;
- }
-
- /* Make sure this enough room in the send queue. */
- if (__predict_false((nsegs + 2) >
- (USABLE_TX_BD - fp->used_tx_bd))) {
- fp->tx_queue_too_full++;
- bus_dmamap_unload(fp->tx_mbuf_tag, map);
- rc = ENOBUFS;
- break;
- }
-
- /* Now make sure it fits in the pkt window */
- if (__predict_false(nsegs > 12)) {
-
- /*
- * The mbuf may be to big for the controller
- * to handle. If the frame is a TSO frame
- * we'll need to do an additional check.
- */
- if(m0->m_pkthdr.csum_flags & CSUM_TSO){
- if (bxe_chktso_window(sc,nsegs,segs,m0) == 0)
- /* OK to send. */
- break;
- else
- fp->window_violation_tso++;
- } else
- fp->window_violation_std++;
-
- /*
- * If this is a standard frame then defrag is
- * required. Unmap the mbuf, defrag it, then
- * try mapping it again.
- */
+ /* Handle any mapping errors. */
+ if(__predict_false(error != 0)){
+ fp->tx_dma_mapping_failure++;
+ if (error == ENOMEM) {
+ /* Resource issue, try again later. */
+ rc = ENOMEM;
+ } else if (error == EFBIG) {
+ /* Possibly recoverable with defragmentation. */
fp->mbuf_defrag_attempts++;
- bus_dmamap_unload(fp->tx_mbuf_tag, map);
m0 = m_defrag(*m_head, M_DONTWAIT);
if (m0 == NULL) {
fp->mbuf_defrag_failures++;
rc = ENOBUFS;
- break;
+ } else {
+ /* Defrag successful, try mapping again.*/
+ *m_head = m0;
+ error = bus_dmamap_load_mbuf_sg(
+ fp->tx_mbuf_tag, map, m0,
+ segs, &nsegs, BUS_DMA_NOWAIT);
+ if (error) {
+ fp->tx_dma_mapping_failure++;
+ rc = error;
+ }
}
+ } else {
+ /* Unknown, unrecoverable mapping error. */
+ DBPRINT(sc, BXE_WARN_SEND,
+ "%s(): Unknown TX mapping error! "
+ "rc = %d.\n", __FUNCTION__, error);
+ DBRUN(bxe_dump_mbuf(sc, m0));
+ rc = error;
+ }
- /* Defrag was successful, try mapping again. */
- fp->mbuf_defrag_successes++;
- *m_head = m0;
- error =
- bus_dmamap_load_mbuf_sg(
- fp->tx_mbuf_tag, map, m0,
- segs, &nsegs, BUS_DMA_NOWAIT);
-
- /* Handle any mapping errors. */
- if (__predict_false(error)) {
- fp->tx_dma_mapping_failure++;
- rc = error;
- break;
- }
+ goto bxe_tx_encap_continue;
+ }
- /* Last try */
- if (m0->m_pkthdr.csum_flags & CSUM_TSO){
- if (bxe_chktso_window(sc,nsegs,segs,m0) == 1)
- rc = ENOBUFS;
- } else if (nsegs > 12 ){
- rc = ENOBUFS;
- } else
- rc = 0;
- }
- }while (0);
+ /* Make sure there's enough room in the send queue. */
+ if (__predict_false((nsegs + 2) >
+ (USABLE_TX_BD - fp->tx_bd_used))) {
+ /* Recoverable, try again later. */
+ fp->tx_hw_queue_full++;
+ bus_dmamap_unload(fp->tx_mbuf_tag, map);
+ rc = ENOMEM;
+ goto bxe_tx_encap_continue;
+ }
+
+ /* Capture the current H/W TX chain high watermark. */
+ if (__predict_false(fp->tx_hw_max_queue_depth <
+ fp->tx_bd_used))
+ fp->tx_hw_max_queue_depth = fp->tx_bd_used;
+
+ /* Now make sure it fits in the packet window. */
+ if (__predict_false(nsegs > 12)) {
+ /*
+ * The mbuf may be to big for the controller
+ * to handle. If the frame is a TSO frame
+ * we'll need to do an additional check.
+ */
+ if(m0->m_pkthdr.csum_flags & CSUM_TSO){
+ if (bxe_chktso_window(sc,nsegs,segs,m0) == 0)
+ /* OK to send. */
+ goto bxe_tx_encap_continue;
+ else
+ fp->tx_window_violation_tso++;
+ } else
+ fp->tx_window_violation_std++;
+ /* No sense trying to defrag again, we'll drop the frame. */
+ if (defragged > 0)
+ rc = ENODEV;
+ }
+
+bxe_tx_encap_continue:
/* Check for errors */
if (rc){
if(rc == ENOMEM){
/* Recoverable try again later */
}else{
- fp->soft_tx_errors++;
- DBRUN(fp->tx_mbuf_alloc--);
+ fp->tx_soft_errors++;
+ fp->tx_mbuf_alloc--;
m_freem(*m_head);
*m_head = NULL;
}
- return (rc);
+ goto bxe_tx_encap_exit;
}
- /* We're committed to sending the frame, update the counter. */
- fp->tx_pkt_prod++;
+ /* Save the mbuf and mapping. */
+ fp->tx_mbuf_ptr[TX_BD(sw_pkt_prod)] = m0;
+ fp->tx_mbuf_map[TX_BD(sw_pkt_prod)] = map;
- /* set flag according to packet type (UNICAST_ADDRESS is default)*/
+ /* Set flag according to packet type (UNICAST_ADDRESS is default). */
if (m0->m_flags & M_BCAST)
mac_type = BROADCAST_ADDRESS;
else if (m0->m_flags & M_MCAST)
mac_type = MULTICAST_ADDRESS;
- /* Prepare the first transmit BD for the mbuf(Get a link from the chain). */
- tx_start_bd = &fp->tx_bd_chain[TX_PAGE(bd_prod)][TX_IDX(bd_prod)].start_bd;
+ /* Prepare the first transmit (Start) BD for the mbuf. */
+ tx_start_bd = &fp->tx_chain[TX_BD(sw_tx_bd_prod)].start_bd;
tx_start_bd->addr_lo = htole32(U64_LO(segs[0].ds_addr));
tx_start_bd->addr_hi = htole32(U64_HI(segs[0].ds_addr));
@@ -8970,32 +9059,29 @@ bxe_tx_encap(struct bxe_fastpath *fp, struct mbuf **m_head)
tx_start_bd->general_data |= (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT);
- nbds = nsegs + 1; /* Add 1 for parsing bd. Assuming nseg > 0 */
+ /* All frames have at least Start BD + Parsing BD. */
+ nbds = nsegs + 1;
tx_start_bd->nbd = htole16(nbds);
if (m0->m_flags & M_VLANTAG) {
-// vlan_off += ETHER_VLAN_ENCAP_LEN;
tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
tx_start_bd->vlan = htole16(m0->m_pkthdr.ether_vtag);
- DBPRINT(sc, BXE_VERBOSE_SEND, "%s(): Inserting VLAN tag %d\n",
- __FUNCTION__, m0->m_pkthdr.ether_vtag);
- }
- else
+ } else
/*
* In cases where the VLAN tag is not used the firmware
* expects to see a packet counter in the VLAN tag field
* Failure to do so will cause an assertion which will
* stop the controller.
*/
- tx_start_bd->vlan = htole16(pkt_prod);
+ tx_start_bd->vlan = htole16(fp->tx_pkt_prod);
/*
- * Add a parsing BD from the chain. The parsing bd is always added,
- * however, it is only used for tso & chksum.
+ * Add a parsing BD from the chain. The parsing BD is always added,
+ * however, it is only used for TSO & chksum.
*/
- bd_prod = TX_BD(NEXT_TX_BD(bd_prod));
+ sw_tx_bd_prod = NEXT_TX_BD(sw_tx_bd_prod);
tx_parse_bd = (struct eth_tx_parse_bd *)
- &fp->tx_bd_chain[TX_PAGE(bd_prod)][TX_IDX(bd_prod)].parse_bd;
+ &fp->tx_chain[TX_BD(sw_tx_bd_prod)].parse_bd;
memset(tx_parse_bd, 0, sizeof(struct eth_tx_parse_bd));
/* Gather all info about the packet and add to tx_parse_bd */
@@ -9006,7 +9092,7 @@ bxe_tx_encap(struct bxe_fastpath *fp, struct mbuf **m_head)
uint16_t flags = 0;
struct udphdr *uh = NULL;
- /* Map the Ethernet header to find the type & header length. */
+ /* Map Ethernet header to find type & header length. */
eh = mtod(m0, struct ether_vlan_header *);
/* Handle VLAN encapsulation if present. */
@@ -9024,23 +9110,22 @@ bxe_tx_encap(struct bxe_fastpath *fp, struct mbuf **m_head)
ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT);
switch (etype) {
- case ETHERTYPE_IP:{
- /* if mbuf's len < 20bytes, the ip_hdr is in next mbuf*/
+ case ETHERTYPE_IP:
+ /* If mbuf len < 20bytes, IP header is in next mbuf. */
if (m0->m_len < sizeof(struct ip))
- ip = (struct ip *)m0->m_next->m_data;
+ ip = (struct ip *) m0->m_next->m_data;
else
- ip = (struct ip *)(m0->m_data + e_hlen);
+ ip = (struct ip *) (m0->m_data + e_hlen);
/* Calculate IP header length (16 bit words). */
tx_parse_bd->ip_hlen = (ip->ip_hl << 1);
/* Calculate enet + IP header length (16 bit words). */
- tx_parse_bd->total_hlen = tx_parse_bd->ip_hlen + (e_hlen >> 1);
+ tx_parse_bd->total_hlen = tx_parse_bd->ip_hlen +
+ (e_hlen >> 1);
if (m0->m_pkthdr.csum_flags & CSUM_IP) {
- DBPRINT(sc, BXE_EXTREME_SEND, "%s(): IP checksum "
- "enabled.\n", __FUNCTION__);
- fp->offload_frames_csum_ip++;
+ fp->tx_offload_frames_csum_ip++;
flags |= ETH_TX_BD_FLAGS_IP_CSUM;
}
@@ -9048,132 +9133,130 @@ bxe_tx_encap(struct bxe_fastpath *fp, struct mbuf **m_head)
if ((m0->m_pkthdr.csum_flags & CSUM_TCP)||
(m0->m_pkthdr.csum_flags & CSUM_TSO)){
- /* Perform TCP checksum offload. */
- DBPRINT(sc, BXE_EXTREME_SEND, "%s(): TCP checksum "
- "enabled.\n", __FUNCTION__);
-
/* Get the TCP header. */
- th = (struct tcphdr *)((caddr_t)ip + (ip->ip_hl << 2));
+ th = (struct tcphdr *)((caddr_t)ip +
+ (ip->ip_hl << 2));
/* Add the TCP checksum offload flag. */
flags |= ETH_TX_BD_FLAGS_L4_CSUM;
- fp->offload_frames_csum_tcp++;
+ fp->tx_offload_frames_csum_tcp++;
/* Update the enet + IP + TCP header length. */
- tx_parse_bd->total_hlen += (uint16_t)(th->th_off << 1);
+ tx_parse_bd->total_hlen +=
+ (uint16_t)(th->th_off << 1);
/* Get the pseudo header checksum. */
- tx_parse_bd->tcp_pseudo_csum = ntohs(th->th_sum);
+ tx_parse_bd->tcp_pseudo_csum =
+ ntohs(th->th_sum);
+
} else if (m0->m_pkthdr.csum_flags & CSUM_UDP) {
/*
- * The hardware doesn't actually support UDP checksum
- * offload but we can fake it by doing TCP checksum
- * offload and factoring out the extra bytes that are
- * different between the TCP header and the UDP header.
- * calculation will begin 10 bytes before the actual
- * start of the UDP header. To work around this we
- * need to calculate the checksum of the 10 bytes
- * before the UDP header and factor that out of the
- * UDP pseudo header checksum before asking the H/W
- * to calculate the full UDP checksum.
+ * The hardware doesn't actually support UDP
+ * checksum offload but we can fake it by
+ * doing TCP checksum offload and factoring
+ * out the extra bytes that are different
+ * between the TCP header and the UDP header.
+ *
+ * Calculation will begin 10 bytes before the
+ * actual start of the UDP header. To work
+ * around this we need to calculate the
+ * checksum of the 10 bytes before the UDP
+ * header and factor that out of the UDP
+ * pseudo header checksum before asking the
+ * H/W to calculate the full UDP checksum.
*/
uint16_t tmp_csum;
uint32_t *tmp_uh;
/* This value is 10. */
- uint8_t fix = (uint8_t) (offsetof(struct tcphdr, th_sum) -
- (int) offsetof(struct udphdr, uh_sum));
-
- /* Perform UDP checksum offload. */
- DBPRINT(sc, BXE_EXTREME_SEND, "%s(): UDP checksum "
- "enabled.\n", __FUNCTION__);
+ uint8_t fix = (uint8_t) (offsetof(struct tcphdr, th_sum) -
+ (int) offsetof(struct udphdr, uh_sum));
- /* Add the TCP checksum offload flag for UDP frames too. */
+ /*
+ * Add the TCP checksum offload flag for
+ * UDP frames too.*
+ */
flags |= ETH_TX_BD_FLAGS_L4_CSUM;
- fp->offload_frames_csum_udp++;
- tx_parse_bd->global_data |= ETH_TX_PARSE_BD_UDP_CS_FLG;
+ fp->tx_offload_frames_csum_udp++;
+ tx_parse_bd->global_data |=
+ ETH_TX_PARSE_BD_UDP_CS_FLG;
/* Get a pointer to the UDP header. */
- uh = (struct udphdr *)((caddr_t)ip + (ip->ip_hl << 2));
+ uh = (struct udphdr *)((caddr_t)ip +
+ (ip->ip_hl << 2));
- /* Set a pointer 10 bytes before the actual UDP header. */
- tmp_uh = (uint32_t *)((uint8_t *)uh - fix);
+ /* Set pointer 10 bytes before UDP header. */
+ tmp_uh = (uint32_t *)((uint8_t *)uh -
+ fix);
/*
- * Calculate a pseudo header checksum over the 10 bytes
- * before the UDP header.
+ * Calculate a pseudo header checksum over
+ * the 10 bytes before the UDP header.
*/
tmp_csum = in_pseudo(ntohl(*tmp_uh),
- ntohl(*(tmp_uh + 1)),
- ntohl((*(tmp_uh + 2)) & 0x0000FFFF));
+ ntohl(*(tmp_uh + 1)),
+ ntohl((*(tmp_uh + 2)) & 0x0000FFFF));
/* Update the enet + IP + UDP header length. */
- tx_parse_bd->total_hlen += (sizeof(struct udphdr) >> 1);
- tx_parse_bd->tcp_pseudo_csum = ~in_addword(uh->uh_sum, ~tmp_csum);
+ tx_parse_bd->total_hlen +=
+ (sizeof(struct udphdr) >> 1);
+ tx_parse_bd->tcp_pseudo_csum =
+ ~in_addword(uh->uh_sum, ~tmp_csum);
}
- /* Update the flags settings for VLAN/Offload. */
+ /* Update the offload flags. */
tx_start_bd->bd_flags.as_bitfield |= flags;
-
break;
- }
+
case ETHERTYPE_IPV6:
- fp->unsupported_tso_request_ipv6++;
- /* DRC - How to handle this error? */
+ fp->tx_unsupported_tso_request_ipv6++;
+ /* ToDo: Add IPv6 support. */
break;
default:
- fp->unsupported_tso_request_not_tcp++;
- /* DRC - How to handle this error? */
+ fp->tx_unsupported_tso_request_not_tcp++;
+ /* ToDo - How to handle this error? */
}
/* Setup the Parsing BD with TSO specific info */
if (m0->m_pkthdr.csum_flags & CSUM_TSO) {
-
uint16_t hdr_len = tx_parse_bd->total_hlen << 1;
- DBPRINT(sc, BXE_EXTREME_SEND, "%s(): TSO is enabled.\n",
- __FUNCTION__);
-
- tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
+ tx_start_bd->bd_flags.as_bitfield |=
+ ETH_TX_BD_FLAGS_SW_LSO;
+ fp->tx_offload_frames_tso++;
- fp->offload_frames_tso++;
- if (__predict_false(tx_start_bd->nbytes > hdr_len)) {
+ /* ToDo: Does this really help? */
+ if (__predict_false(tx_start_bd->nbytes > hdr_len)) {
+ fp->tx_header_splits++;
/*
* Split the first BD into 2 BDs to make the
- * FW job easy...
+ * firmwares job easy...
*/
tx_start_bd->nbd++;
DBPRINT(sc, BXE_EXTREME_SEND,
"%s(): TSO split headr size is %d (%x:%x) nbds %d\n",
- __FUNCTION__, tx_start_bd->nbytes, tx_start_bd->addr_hi,
+ __FUNCTION__, tx_start_bd->nbytes,
+ tx_start_bd->addr_hi,
tx_start_bd->addr_lo, nbds);
- bd_prod = TX_BD(NEXT_TX_BD(bd_prod));
-
- /* Get a new transmit BD (after the tx_parse_bd) and fill it. */
- tx_data_bd = &fp->tx_bd_chain[TX_PAGE(bd_prod)][TX_IDX(bd_prod)].reg_bd;
- tx_data_bd->addr_hi = htole32(U64_HI(segs[0].ds_addr + hdr_len));
- tx_data_bd->addr_lo = htole32(U64_LO(segs[0].ds_addr + hdr_len));
- tx_data_bd->nbytes = htole16(segs[0].ds_len) - hdr_len;
+ sw_tx_bd_prod = NEXT_TX_BD(sw_tx_bd_prod);
+
+ /* New transmit BD (after the tx_parse_bd). */
+ tx_data_bd =
+ &fp->tx_chain[TX_BD(sw_tx_bd_prod)].reg_bd;
+ tx_data_bd->addr_hi =
+ htole32(U64_HI(segs[0].ds_addr + hdr_len));
+ tx_data_bd->addr_lo =
+ htole32(U64_LO(segs[0].ds_addr + hdr_len));
+ tx_data_bd->nbytes =
+ htole16(segs[0].ds_len) - hdr_len;
if (tx_total_pkt_size_bd == NULL)
tx_total_pkt_size_bd = tx_data_bd;
-
- /*
- * This indicates that the transmit BD
- * has no individual mapping and the
- * FW ignores this flag in a BD that is
- * not marked with the start flag.
- */
-
- DBPRINT(sc, BXE_EXTREME_SEND,
- "%s(): TSO split data size is %d (%x:%x)\n",
- __FUNCTION__, tx_data_bd->nbytes,
- tx_data_bd->addr_hi, tx_data_bd->addr_lo);
}
/*
- * For TSO the controller needs the following info:
+ * The controller needs the following info for TSO:
* MSS, tcp_send_seq, ip_id, and tcp_pseudo_csum.
*/
tx_parse_bd->lso_mss = htole16(m0->m_pkthdr.tso_segsz);
@@ -9190,10 +9273,10 @@ bxe_tx_encap(struct bxe_fastpath *fp, struct mbuf **m_head)
}
}
- /* Prepare Remaining BDs. Start_tx_bd contains first seg(frag). */
+ /* Prepare remaining BDs. Start_tx_bd contains first seg (frag). */
for (i = 1; i < nsegs ; i++) {
- bd_prod = TX_BD(NEXT_TX_BD(bd_prod));
- tx_data_bd = &fp->tx_bd_chain[TX_PAGE(bd_prod)][TX_IDX(bd_prod)].reg_bd;
+ sw_tx_bd_prod = NEXT_TX_BD(sw_tx_bd_prod);
+ tx_data_bd = &fp->tx_chain[TX_BD(sw_tx_bd_prod)].reg_bd;
tx_data_bd->addr_lo = htole32(U64_LO(segs[i].ds_addr));
tx_data_bd->addr_hi = htole32(U64_HI(segs[i].ds_addr));
tx_data_bd->nbytes = htole16(segs[i].ds_len);
@@ -9205,56 +9288,27 @@ bxe_tx_encap(struct bxe_fastpath *fp, struct mbuf **m_head)
if(tx_total_pkt_size_bd != NULL)
tx_total_pkt_size_bd->total_pkt_bytes = total_pkt_size;
- /* Update bd producer index value for next tx */
- bd_prod = TX_BD(NEXT_TX_BD(bd_prod));
- DBRUNMSG(BXE_EXTREME_SEND, bxe_dump_tx_chain(fp, debug_prod, nbds));
-
- /*
- * Ensure that the mbuf pointer for this
- * transmission is placed at the array index
- * of the last descriptor in this chain.
- * This is done because a single map is used
- * for all segments of the mbuf and we don't
- * want to unload the map before all of the
- * segments have been freed.
- */
- fp->tx_mbuf_ptr[TX_BD(pkt_prod)] = m0;
+ /* Update TX BD producer index value for next TX */
+ sw_tx_bd_prod = NEXT_TX_BD(sw_tx_bd_prod);
- fp->used_tx_bd += nbds;
+ /* Update the used TX BD counter. */
+ fp->tx_bd_used += nbds;
/*
- * Ring the tx doorbell, counting the next
- * bd if the packet contains or ends with it.
+ * If the chain of tx_bd's describing this frame
+ * is adjacent to or spans an eth_tx_next_bd element
+ * then we need to increment the nbds value.
*/
- if(TX_IDX(bd_prod) < nbds)
+ if(TX_IDX(sw_tx_bd_prod) < nbds)
nbds++;
-//BXE_PRINTF("nsegs:%d, tpktsz:0x%x\n",nsegs, total_pkt_size) ;
-
- /*
- * Update the buffer descriptor producer count and the packet
- * producer count in doorbell data memory (eth_tx_db_data) then
- * ring the doorbell.
- */
-/* fp->hw_tx_prods->bds_prod =
- htole16(le16toh(fp->hw_tx_prods->bds_prod) + nbds);
-*/
-
-
/* Don't allow reordering of writes for nbd and packets. */
mb();
-/*
- fp->hw_tx_prods->packets_prod =
- htole32(le32toh(fp->hw_tx_prods->packets_prod) + 1);
-*/
-// DOORBELL(sc, fp->index, 0);
-
-// BXE_PRINTF("doorbell: nbd %d bd %u index %d\n", nbds, bd_prod, fp->index);
-
fp->tx_db.data.prod += nbds;
/* Producer points to the next free tx_bd at this point. */
- fp->tx_bd_prod = bd_prod;
+ fp->tx_pkt_prod++;
+ fp->tx_bd_prod = sw_tx_bd_prod;
DOORBELL(sc, fp->index, fp->tx_db.raw);
@@ -9268,8 +9322,9 @@ bxe_tx_encap(struct bxe_fastpath *fp, struct mbuf **m_head)
bus_space_barrier(sc->bxe_db_btag, sc->bxe_db_bhandle,
0, 0, BUS_SPACE_BARRIER_READ);
+bxe_tx_encap_exit:
DBEXIT(BXE_VERBOSE_SEND);
- return(rc);
+ return (rc);
}
@@ -9291,7 +9346,7 @@ bxe_tx_start(struct ifnet *ifp)
/* Exit if the transmit queue is full or link down. */
if (((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
IFF_DRV_RUNNING) || !sc->link_vars.link_up) {
- DBPRINT(sc, BXE_VERBOSE_SEND,
+ DBPRINT(sc, BXE_WARN,
"%s(): No link or TX queue full, ignoring "
"transmit request.\n", __FUNCTION__);
goto bxe_tx_start_exit;
@@ -9336,7 +9391,7 @@ bxe_tx_start_locked(struct ifnet *ifp, struct bxe_fastpath *fp)
break;
/* The transmit mbuf now belongs to us, keep track of it. */
- DBRUN(fp->tx_mbuf_alloc++);
+ fp->tx_mbuf_alloc++;
/*
* Pack the data into the transmit ring. If we
@@ -9354,8 +9409,8 @@ bxe_tx_start_locked(struct ifnet *ifp, struct bxe_fastpath *fp)
*/
ifp->if_drv_flags |= IFF_DRV_OACTIVE;
IFQ_DRV_PREPEND(&ifp->if_snd, m);
- DBRUN(fp->tx_mbuf_alloc--);
- sc->eth_stats.driver_xoff++;
+ fp->tx_mbuf_alloc--;
+ fp->tx_queue_xoff++;
} else {
}
@@ -9375,8 +9430,6 @@ bxe_tx_start_locked(struct ifnet *ifp, struct bxe_fastpath *fp)
if (tx_count > 0)
/* Reset the TX watchdog timeout timer. */
fp->watchdog_timer = BXE_TX_TIMEOUT;
- else
- fp->tx_start_called_on_empty_queue++;
DBEXIT(BXE_EXTREME_SEND);
}
@@ -9391,41 +9444,27 @@ bxe_tx_start_locked(struct ifnet *ifp, struct bxe_fastpath *fp)
static int
bxe_tx_mq_start(struct ifnet *ifp, struct mbuf *m)
{
- struct bxe_softc *sc;
- struct bxe_fastpath *fp;
- int fp_index, rc;
+ struct bxe_softc *sc;
+ struct bxe_fastpath *fp;
+ int fp_index, rc;
sc = ifp->if_softc;
- fp_index = 0;
-
DBENTER(BXE_EXTREME_SEND);
- /* Map the flow ID to a queue number. */
- if ((m->m_flags & M_FLOWID) != 0) {
- fp_index = m->m_pkthdr.flowid % sc->num_queues;
- DBPRINT(sc, BXE_EXTREME_SEND,
- "%s(): Found flowid %d\n",
- __FUNCTION__, fp_index);
- } else {
- DBPRINT(sc, BXE_EXTREME_SEND,
- "%s(): No flowid found, using %d\n",
- __FUNCTION__, fp_index);
- }
+ fp_index = 0;
+ /* If using flow ID, assign the TX queue based on the flow ID. */
+ if ((m->m_flags & M_FLOWID) != 0)
+ fp_index = m->m_pkthdr.flowid % sc->num_queues;
/* Select the fastpath TX queue for the frame. */
fp = &sc->fp[fp_index];
- /* Exit if the transmit queue is full or link down. */
+ /* Skip H/W enqueue if transmit queue is full or link down. */
if (((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
IFF_DRV_RUNNING) || !sc->link_vars.link_up) {
- /* We're stuck with the mbuf. Stash it for now. */
- DBPRINT(sc, BXE_EXTREME_SEND,
- "%s(): TX queue full/link down, "
- "parking mbuf...\n", __FUNCTION__);
+ /* Stash the mbuf if we can. */
rc = drbr_enqueue(ifp, fp->br, m);
- /* DRC - Setup a task to try again. */
- /* taskqueue_enqueue(tq, task); */
goto bxe_tx_mq_start_exit;
}
@@ -9435,12 +9474,13 @@ bxe_tx_mq_start(struct ifnet *ifp, struct mbuf *m)
bxe_tx_mq_start_exit:
DBEXIT(BXE_EXTREME_SEND);
- return(rc);
+ return (rc);
}
/*
- * Multiqueue (TSS) transmit routine.
+ * Multiqueue (TSS) transmit routine. This routine is responsible
+ * for adding a frame to the hardware's transmit queue.
*
* Returns:
* 0 if transmit succeeds, !0 otherwise.
@@ -9451,55 +9491,40 @@ bxe_tx_mq_start_locked(struct ifnet *ifp,
{
struct bxe_softc *sc;
struct mbuf *next;
- int depth, rc = 0, tx_count = 0;
+ int depth, rc, tx_count;
sc = fp->sc;
-
DBENTER(BXE_EXTREME_SEND);
+
+ rc = tx_count = 0;
+
+ /* Fetch the depth of the driver queue. */
depth = drbr_inuse(ifp, fp->br);
- if (depth > fp->max_drbr_queue_depth) {
- fp->max_drbr_queue_depth = depth;
- }
- DBPRINT(sc, BXE_EXTREME_SEND,
- "%s(): fp[%02d], drbr queue depth=%d\n",
- __FUNCTION__, fp->index, depth);
+ if (depth > fp->tx_max_drbr_queue_depth)
+ fp->tx_max_drbr_queue_depth = depth;
BXE_FP_LOCK_ASSERT(fp);
if (m == NULL) {
- /* Check for any other work. */
- DBPRINT(sc, BXE_EXTREME_SEND,
- "%s(): No initial work, dequeue mbuf...\n",
- __FUNCTION__);
+ /* No new work, check for pending frames. */
next = drbr_dequeue(ifp, fp->br);
} else if (drbr_needs_enqueue(ifp, fp->br)) {
- /* Work pending, queue mbuf to maintain packet order. */
- DBPRINT(sc, BXE_EXTREME_SEND,
- "%s(): Found queued data pending...\n",
- __FUNCTION__);
- if ((rc = drbr_enqueue(ifp, fp->br, m)) != 0) {
- DBPRINT(sc, BXE_EXTREME_SEND,
- "%s(): Enqueue failed...\n",
- __FUNCTION__);
+ /* Both new and pending work, maintain packet order. */
+ rc = drbr_enqueue(ifp, fp->br, m);
+ if (rc != 0) {
+ fp->tx_soft_errors++;
goto bxe_tx_mq_start_locked_exit;
}
- DBPRINT(sc, BXE_EXTREME_SEND,
- "%s(): Dequeueing old mbuf...\n",
- __FUNCTION__);
next = drbr_dequeue(ifp, fp->br);
- } else {
- /* Work with the mbuf we have. */
- DBPRINT(sc, BXE_EXTREME_SEND,
- "%s(): Start with current mbuf...\n",
- __FUNCTION__);
+ } else
+ /* New work only, nothing pending. */
next = m;
- }
/* Keep adding entries while there are frames to send. */
while (next != NULL) {
/* The transmit mbuf now belongs to us, keep track of it. */
- DBRUN(fp->tx_mbuf_alloc++);
+ fp->tx_mbuf_alloc++;
/*
* Pack the data into the transmit ring. If we
@@ -9507,9 +9532,8 @@ bxe_tx_mq_start_locked(struct ifnet *ifp,
* head of the TX queue, set the OACTIVE flag,
* and wait for the NIC to drain the chain.
*/
- if (__predict_false(bxe_tx_encap(fp, &next))) {
- DBPRINT(sc, BXE_WARN, "%s(): TX encap failure...\n",
- __FUNCTION__);
+ rc = bxe_tx_encap(fp, &next);
+ if (__predict_false(rc != 0)) {
fp->tx_encap_failures++;
/* Very Bad Frames(tm) may have been dropped. */
if (next != NULL) {
@@ -9518,12 +9542,11 @@ bxe_tx_mq_start_locked(struct ifnet *ifp,
* the frame.
*/
ifp->if_drv_flags |= IFF_DRV_OACTIVE;
- DBPRINT(sc, BXE_EXTREME_SEND,
- "%s(): Save mbuf for another time...\n",
- __FUNCTION__);
+ fp->tx_frame_deferred++;
+
+ /* This may reorder frame. */
rc = drbr_enqueue(ifp, fp->br, next);
- DBRUN(fp->tx_mbuf_alloc--);
- sc->eth_stats.driver_xoff++;
+ fp->tx_mbuf_alloc--;
}
/* Stop looking for more work. */
@@ -9536,27 +9559,27 @@ bxe_tx_mq_start_locked(struct ifnet *ifp,
/* Send a copy of the frame to any BPF listeners. */
BPF_MTAP(ifp, next);
- DBPRINT(sc, BXE_EXTREME_SEND,
- "%s(): Check for queued mbufs...\n",
- __FUNCTION__);
+ /* Handle any completions if we're running low. */
+ if (fp->tx_bd_used >= BXE_TX_CLEANUP_THRESHOLD)
+ bxe_txeof(fp);
+
+ /* Close TX since there's so little room left. */
+ if (fp->tx_bd_used >= BXE_TX_CLEANUP_THRESHOLD) {
+ ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
+ break;
+ }
+
next = drbr_dequeue(ifp, fp->br);
}
- DBPRINT(sc, BXE_EXTREME_SEND,
- "%s(): Enqueued %d mbufs...\n",
- __FUNCTION__, tx_count);
-
/* No TX packets were dequeued. */
- if (tx_count > 0) {
+ if (tx_count > 0)
/* Reset the TX watchdog timeout timer. */
fp->watchdog_timer = BXE_TX_TIMEOUT;
- } else {
- fp->tx_start_called_on_empty_queue++;
- }
bxe_tx_mq_start_locked_exit:
DBEXIT(BXE_EXTREME_SEND);
- return(rc);
+ return (rc);
}
@@ -9575,10 +9598,11 @@ bxe_mq_flush(struct ifnet *ifp)
for (i = 0; i < sc->num_queues; i++) {
fp = &sc->fp[i];
- DBPRINT(sc, BXE_VERBOSE_UNLOAD, "%s(): Clearing fp[%02d]...\n",
- __FUNCTION__, fp->index);
-
if (fp->br != NULL) {
+ DBPRINT(sc, BXE_VERBOSE_UNLOAD,
+ "%s(): Clearing fp[%02d]...\n",
+ __FUNCTION__, fp->index);
+
BXE_FP_LOCK(fp);
while ((m = buf_ring_dequeue_sc(fp->br)) != NULL)
m_freem(m);
@@ -9607,7 +9631,7 @@ bxe_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
int error, mask, reinit;
sc = ifp->if_softc;
- DBENTER(BXE_EXTREME_MISC);
+ DBENTER(BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET | BXE_VERBOSE_MISC);
ifr = (struct ifreq *)data;
error = 0;
@@ -9616,72 +9640,65 @@ bxe_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
switch (command) {
case SIOCSIFMTU:
/* Set the MTU. */
- DBPRINT(sc, BXE_EXTREME_MISC, "%s(): Received SIOCSIFMTU\n",
+ DBPRINT(sc, BXE_VERBOSE_MISC, "%s(): Received SIOCSIFMTU\n",
__FUNCTION__);
/* Check that the MTU setting is supported. */
if ((ifr->ifr_mtu < BXE_MIN_MTU) ||
- (ifr->ifr_mtu > BXE_JUMBO_MTU)) {
- DBPRINT(sc, BXE_WARN, "%s(): Unsupported MTU "
- "(%d < %d < %d)!\n", __FUNCTION__, BXE_MIN_MTU,
- ifr->ifr_mtu, BXE_JUMBO_MTU);
+ (ifr->ifr_mtu > BXE_JUMBO_MTU)) {
error = EINVAL;
break;
}
BXE_CORE_LOCK(sc);
ifp->if_mtu = ifr->ifr_mtu;
- bxe_change_mtu(sc, ifp->if_drv_flags & IFF_DRV_RUNNING);
BXE_CORE_UNLOCK(sc);
+
+ reinit = 1;
break;
case SIOCSIFFLAGS:
/* Toggle the interface state up or down. */
- DBPRINT(sc, BXE_EXTREME_MISC, "%s(): Received SIOCSIFFLAGS\n",
+ DBPRINT(sc, BXE_VERBOSE_MISC, "%s(): Received SIOCSIFFLAGS\n",
__FUNCTION__);
BXE_CORE_LOCK(sc);
-
/* Check if the interface is up. */
if (ifp->if_flags & IFF_UP) {
if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
- /*
- * Change the promiscuous/multicast flags as
- * necessary.
- */
+ /* Set promiscuous/multicast flags. */
bxe_set_rx_mode(sc);
} else {
/* Start the HW */
bxe_init_locked(sc, LOAD_NORMAL);
}
} else {
- /*
- * The interface is down. Check if the driver is
- * running.
- */
+ /* Bring down the interface. */
if (ifp->if_drv_flags & IFF_DRV_RUNNING)
bxe_stop_locked(sc, UNLOAD_NORMAL);
}
BXE_CORE_UNLOCK(sc);
+
break;
case SIOCADDMULTI:
case SIOCDELMULTI:
/* Add/Delete multicast addresses. */
- DBPRINT(sc, BXE_EXTREME_MISC,
+ DBPRINT(sc, BXE_VERBOSE_MISC,
"%s(): Received SIOCADDMULTI/SIOCDELMULTI\n", __FUNCTION__);
BXE_CORE_LOCK(sc);
-
- /* Don't bother unless the driver's running. */
+ /* Check if the interface is up. */
if (ifp->if_drv_flags & IFF_DRV_RUNNING)
+ /* Set receive mode flags. */
bxe_set_rx_mode(sc);
-
BXE_CORE_UNLOCK(sc);
+
break;
case SIOCSIFMEDIA:
case SIOCGIFMEDIA:
/* Set/Get Interface media */
- DBPRINT(sc, BXE_EXTREME_MISC,
+ DBPRINT(sc, BXE_VERBOSE_MISC,
"%s(): Received SIOCSIFMEDIA/SIOCGIFMEDIA\n", __FUNCTION__);
+
error = ifmedia_ioctl(ifp, ifr, &sc->bxe_ifmedia, command);
break;
case SIOCSIFCAP:
@@ -9697,13 +9714,13 @@ bxe_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
/* Toggle the LRO capabilites enable flag. */
if (mask & IFCAP_LRO) {
- if (TPA_ENABLED(sc)) {
- ifp->if_capenable ^= IFCAP_LRO;
- sc->bxe_flags ^= BXE_TPA_ENABLE_FLAG;
- DBPRINT(sc, BXE_INFO_MISC,
- "%s(): Toggling LRO (bxe_flags = "
- "0x%08X).\n", __FUNCTION__, sc->bxe_flags);
- }
+ ifp->if_capenable ^= IFCAP_LRO;
+ sc->bxe_flags ^= BXE_TPA_ENABLE_FLAG;
+ DBPRINT(sc, BXE_INFO_MISC,
+ "%s(): Toggling LRO (bxe_flags = "
+ "0x%08X).\n", __FUNCTION__, sc->bxe_flags);
+
+ /* LRO requires different buffer setup. */
reinit = 1;
}
@@ -9735,6 +9752,7 @@ bxe_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
/* Toggle VLAN_MTU capabilities enable flag. */
if (mask & IFCAP_VLAN_MTU) {
+ /* ToDo: Is this really true? */
BXE_PRINTF("%s(%d): Changing VLAN_MTU not supported.\n",
__FILE__, __LINE__);
error = EINVAL;
@@ -9742,6 +9760,7 @@ bxe_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
/* Toggle VLANHWTAG capabilities enabled flag. */
if (mask & IFCAP_VLAN_HWTAGGING) {
+ /* ToDo: Is this really true? */
BXE_PRINTF(
"%s(%d): Changing VLAN_HWTAGGING not supported!\n",
__FILE__, __LINE__);
@@ -9758,27 +9777,22 @@ bxe_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
/* Toggle TSO6 capabilities enabled flag. */
if (mask & IFCAP_TSO6) {
- DBPRINT(sc, BXE_VERBOSE_MISC,
- "%s(): Toggling IFCAP_TSO6.\n", __FUNCTION__);
-
- ifp->if_capenable ^= IFCAP_TSO6;
- }
-
- /* Handle any other capabilities. */
- if (mask & ~(IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU |
- IFCAP_RXCSUM | IFCAP_TXCSUM)) {
- BXE_PRINTF("%s(%d): Unsupported capability!\n",
+ /* ToDo: Add TSO6 support. */
+ BXE_PRINTF(
+ "%s(%d): Changing TSO6 not supported!\n",
__FILE__, __LINE__);
- error = EINVAL;
- }
-
- /* Restart the controller with the new capabilities. */
- if (reinit) {
- bxe_stop_locked(sc, UNLOAD_NORMAL);
- bxe_init_locked(sc, LOAD_NORMAL);
}
-
BXE_CORE_UNLOCK(sc);
+
+ /*
+ * ToDo: Look into supporting:
+ * VLAN_HWFILTER
+ * VLAN_HWCSUM
+ * VLAN_HWTSO
+ * POLLING
+ * WOL[_UCAST|_MCAST|_MAGIC]
+ *
+ */
break;
default:
/* We don't know how to handle the IOCTL, pass it on. */
@@ -9786,7 +9800,15 @@ bxe_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
break;
}
- DBEXIT(BXE_EXTREME_MISC);
+ /* Restart the controller with the new capabilities. */
+ if ((ifp->if_drv_flags & IFF_DRV_RUNNING) && (reinit != 0)) {
+ BXE_CORE_LOCK(sc);
+ bxe_stop_locked(sc, UNLOAD_NORMAL);
+ bxe_init_locked(sc, LOAD_NORMAL);
+ BXE_CORE_UNLOCK(sc);
+ }
+
+ DBEXIT(BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET | BXE_VERBOSE_MISC);
return (error);
}
@@ -9798,7 +9820,7 @@ bxe_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
* caller.
*
* Returns:
- * The adjusted value of *fp->rx_cons_sb.
+ * The adjusted value of *fp->rx_cons_sb.
*/
static __inline uint16_t
bxe_rx_cq_cons(struct bxe_fastpath *fp)
@@ -9816,7 +9838,7 @@ bxe_rx_cq_cons(struct bxe_fastpath *fp)
* need to adjust the value accordingly.
*/
if ((rx_cq_cons_sb & USABLE_RCQ_ENTRIES_PER_PAGE) ==
- USABLE_RCQ_ENTRIES_PER_PAGE)
+ USABLE_RCQ_ENTRIES_PER_PAGE)
rx_cq_cons_sb++;
return (rx_cq_cons_sb);
@@ -9827,7 +9849,7 @@ bxe_has_tx_work(struct bxe_fastpath *fp)
{
rmb();
- return (((fp->tx_pkt_prod != le16toh(*fp->tx_cons_sb)) || \
+ return (((fp->tx_pkt_prod != le16toh(*fp->tx_pkt_cons_sb)) || \
(fp->tx_pkt_prod != fp->tx_pkt_cons)));
}
@@ -9836,8 +9858,8 @@ bxe_has_tx_work(struct bxe_fastpath *fp)
* completion queue.
*
* Returns:
- * 0 = No received frames pending, !0 = Received frames
- * pending
+ * 0 = No received frames pending, !0 = Received frames
+ * pending
*/
static __inline int
bxe_has_rx_work(struct bxe_fastpath *fp)
@@ -9860,7 +9882,6 @@ bxe_task_sp(void *xsc, int pending)
uint32_t sp_status;
sc = xsc;
- DBENTER(BXE_EXTREME_INTR);
DBPRINT(sc, BXE_EXTREME_INTR, "%s(): pending = %d.\n", __FUNCTION__,
pending);
@@ -9897,8 +9918,6 @@ bxe_task_sp(void *xsc, int pending)
IGU_INT_NOP, 1);
bxe_ack_sb(sc, DEF_SB_ID, TSTORM_ID, le16toh(sc->def_t_idx),
IGU_INT_ENABLE, 1);
-
- DBEXIT(BXE_EXTREME_INTR);
}
@@ -9931,9 +9950,6 @@ bxe_intr_legacy(void *xsc)
if (fp_status == 0)
goto bxe_intr_legacy_exit;
- /* Need to weed out calls due to shared interrupts. */
- DBENTER(BXE_EXTREME_INTR);
-
/* Handle the fastpath interrupt. */
/*
* sb_id = 0 for ustorm, 1 for cstorm.
@@ -9945,9 +9961,8 @@ bxe_intr_legacy(void *xsc)
*/
mask = (0x2 << fp->sb_id);
- DBPRINT(sc, BXE_EXTREME_INTR,
- "%s(): fp_status = 0x%08X, mask = 0x%08X\n", __FUNCTION__,
- fp_status, mask);
+ DBPRINT(sc, BXE_INSANE_INTR, "%s(): fp_status = 0x%08X, mask = "
+ "0x%08X\n", __FUNCTION__, fp_status, mask);
/* CSTORM event means fastpath completion. */
if (fp_status & mask) {
@@ -10004,7 +10019,9 @@ bxe_intr_sp(void *xsc)
struct bxe_softc *sc;
sc = xsc;
- DBENTER(BXE_EXTREME_INTR);
+
+ DBPRINT(sc, BXE_INSANE_INTR, "%s(%d): Slowpath interrupt.\n",
+ __FUNCTION__, curcpu);
/* Don't handle any interrupts if we're not ready. */
if (__predict_false(sc->intr_sem != 0))
@@ -10021,7 +10038,7 @@ bxe_intr_sp(void *xsc)
#endif
bxe_intr_sp_exit:
- DBEXIT(BXE_EXTREME_INTR);
+ return;
}
/*
@@ -10041,10 +10058,8 @@ bxe_intr_fp (void *xfp)
fp = xfp;
sc = fp->sc;
- DBENTER(BXE_EXTREME_INTR);
-
- DBPRINT(sc, BXE_VERBOSE_INTR,
- "%s(%d): MSI-X vector on fp[%d].sb_id = %d\n",
+ DBPRINT(sc, BXE_INSANE_INTR,
+ "%s(%d): fp[%02d].sb_id = %d interrupt.\n",
__FUNCTION__, curcpu, fp->index, fp->sb_id);
/* Don't handle any interrupts if we're not ready. */
@@ -10060,7 +10075,7 @@ bxe_intr_fp (void *xfp)
#endif
bxe_intr_fp_exit:
- DBEXIT(BXE_EXTREME_INTR);
+ return;
}
/*
@@ -10080,12 +10095,7 @@ bxe_task_fp (void *xfp, int pending)
fp = xfp;
sc = fp->sc;
- DBENTER(BXE_EXTREME_INTR);
-
- DBPRINT(sc, BXE_EXTREME_INTR, "%s(): pending = %d.\n", __FUNCTION__,
- pending);
-
- DBPRINT(sc, BXE_EXTREME_INTR, "%s(%d): Fastpath task on fp[%d]"
+ DBPRINT(sc, BXE_EXTREME_INTR, "%s(%d): Fastpath task on fp[%02d]"
".sb_id = %d\n", __FUNCTION__, curcpu, fp->index, fp->sb_id);
/* Update the fast path indices */
@@ -10105,8 +10115,6 @@ bxe_task_fp (void *xfp, int pending)
/* Acknowledge the fastpath status block indices. */
bxe_ack_sb(sc, fp->sb_id, USTORM_ID, fp->fp_u_idx, IGU_INT_NOP, 1);
bxe_ack_sb(sc, fp->sb_id, CSTORM_ID, fp->fp_c_idx, IGU_INT_ENABLE, 1);
-
- DBEXIT(BXE_EXTREME_INTR);
}
/*
@@ -10120,12 +10128,8 @@ bxe_zero_sb(struct bxe_softc *sc, int sb_id)
{
int port;
- port = BP_PORT(sc);
-
DBENTER(BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET | BXE_VERBOSE_INTR);
- DBPRINT(sc, (BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET),
- "%s(): Clearing sb_id = %d on port %d.\n", __FUNCTION__, sb_id,
- port);
+ port = BP_PORT(sc);
/* "CSTORM" */
bxe_init_fill(sc, CSEM_REG_FAST_MEMORY +
@@ -10151,13 +10155,14 @@ bxe_init_sb(struct bxe_softc *sc, struct host_status_block *sb,
uint64_t section;
int func, index, port;
+ DBENTER(BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET | BXE_VERBOSE_INTR);
+
port = BP_PORT(sc);
func = BP_FUNC(sc);
- DBENTER(BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET | BXE_VERBOSE_INTR);
DBPRINT(sc, (BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET | BXE_VERBOSE_INTR),
- "%s(): Initializing sb_id = %d on port %d, function %d.\n",
- __FUNCTION__, sb_id, port, func);
+ "%s(): Initializing sb_id = %d on port %d, function %d.\n",
+ __FUNCTION__, sb_id, port, func);
/* Setup the USTORM status block. */
section = ((uint64_t)mapping) + offsetof(struct host_status_block,
@@ -10418,300 +10423,580 @@ bxe_update_coalesce(struct bxe_softc *sc)
}
/*
- * Free memory buffers from the TPA pool.
+ * Allocate an mbuf and assign it to the TPA pool.
*
* Returns:
- * None
+ * 0 = Success, !0 = Failure
+ *
+ * Modifies:
+ * fp->tpa_mbuf_ptr[queue]
+ * fp->tpa_mbuf_map[queue]
+ * fp->tpa_mbuf_segs[queue]
*/
-static __inline void
-bxe_free_tpa_pool(struct bxe_fastpath *fp, int last)
+static int
+bxe_alloc_tpa_mbuf(struct bxe_fastpath *fp, int queue)
{
struct bxe_softc *sc;
- int j;
+ bus_dma_segment_t segs[1];
+ bus_dmamap_t map;
+ struct mbuf *m;
+ int nsegs, rc;
sc = fp->sc;
+ DBENTER(BXE_INSANE_TPA);
+ rc = 0;
+
+ DBRUNIF((fp->disable_tpa == TRUE),
+ BXE_PRINTF("%s(): fp[%02d] TPA disabled!\n",
+ __FUNCTION__, fp->index));
+
#ifdef BXE_DEBUG
- int tpa_pool_max;
+ /* Simulate an mbuf allocation failure. */
+ if (DB_RANDOMTRUE(bxe_debug_mbuf_allocation_failure)) {
+ sc->debug_sim_mbuf_alloc_failed++;
+ fp->mbuf_tpa_alloc_failed++;
+ rc = ENOMEM;
+ goto bxe_alloc_tpa_mbuf_exit;
+ }
+#endif
- tpa_pool_max = CHIP_IS_E1H(sc) ? ETH_MAX_AGGREGATION_QUEUES_E1H :
- ETH_MAX_AGGREGATION_QUEUES_E1;
- DBRUNIF((last > tpa_pool_max), DBPRINT(sc, BXE_FATAL,
- "%s(): Index value out of range (%d > %d)!\n", __FUNCTION__, last,
- tpa_pool_max));
+ /* Allocate the new TPA mbuf. */
+ m = m_getjcl(M_DONTWAIT, MT_DATA, M_PKTHDR, sc->mbuf_alloc_size);
+ if (__predict_false(m == NULL)) {
+ fp->mbuf_tpa_alloc_failed++;
+ rc = ENOBUFS;
+ goto bxe_alloc_tpa_mbuf_exit;
+ }
+
+ DBRUN(fp->tpa_mbuf_alloc++);
+
+ /* Initialize the mbuf buffer length. */
+ m->m_pkthdr.len = m->m_len = sc->mbuf_alloc_size;
+
+#ifdef BXE_DEBUG
+ /* Simulate an mbuf mapping failure. */
+ if (DB_RANDOMTRUE(bxe_debug_dma_map_addr_failure)) {
+ sc->debug_sim_mbuf_map_failed++;
+ fp->mbuf_tpa_mapping_failed++;
+ m_freem(m);
+ DBRUN(fp->tpa_mbuf_alloc--);
+ rc = ENOMEM;
+ goto bxe_alloc_tpa_mbuf_exit;
+ }
#endif
- if (!(TPA_ENABLED(sc)))
- return;
+ /* Map the TPA mbuf into non-paged pool. */
+ rc = bus_dmamap_load_mbuf_sg(fp->rx_mbuf_tag,
+ fp->tpa_mbuf_spare_map, m, segs, &nsegs, BUS_DMA_NOWAIT);
+ if (__predict_false(rc != 0)) {
+ fp->mbuf_tpa_mapping_failed++;
+ m_free(m);
+ DBRUN(fp->tpa_mbuf_alloc--);
+ goto bxe_alloc_tpa_mbuf_exit;
+ }
- for (j = 0; j < last; j++) {
- if (fp->rx_mbuf_tag) {
- if (fp->tpa_mbuf_map[j] != NULL) {
- bus_dmamap_sync(fp->rx_mbuf_tag,
- fp->tpa_mbuf_map[j], BUS_DMASYNC_POSTREAD);
- bus_dmamap_unload(fp->rx_mbuf_tag,
- fp->tpa_mbuf_map[j]);
- }
+ /* All mubfs must map to a single segment. */
+ KASSERT(nsegs == 1, ("%s(): Too many segments (%d) returned!",
+ __FUNCTION__, nsegs));
- if (fp->tpa_mbuf_ptr[j] != NULL) {
- m_freem(fp->tpa_mbuf_ptr[j]);
- DBRUN(fp->tpa_mbuf_alloc--);
- fp->tpa_mbuf_ptr[j] = NULL;
- } else {
- DBPRINT(sc, BXE_FATAL,
- "%s(): TPA bin %d empty on free!\n",
- __FUNCTION__, j);
- }
- }
+ /* Release any existing TPA mbuf mapping. */
+ if (fp->tpa_mbuf_map[queue] != NULL) {
+ bus_dmamap_sync(fp->rx_mbuf_tag,
+ fp->tpa_mbuf_map[queue], BUS_DMASYNC_POSTREAD);
+ bus_dmamap_unload(fp->rx_mbuf_tag,
+ fp->tpa_mbuf_map[queue]);
}
+
+ /* Save the mbuf and mapping info for the TPA mbuf. */
+ map = fp->tpa_mbuf_map[queue];
+ fp->tpa_mbuf_map[queue] = fp->tpa_mbuf_spare_map;
+ fp->tpa_mbuf_spare_map = map;
+ bus_dmamap_sync(fp->rx_mbuf_tag,
+ fp->tpa_mbuf_map[queue], BUS_DMASYNC_PREREAD);
+ fp->tpa_mbuf_ptr[queue] = m;
+ fp->tpa_mbuf_segs[queue] = segs[0];
+
+bxe_alloc_tpa_mbuf_exit:
+ DBEXIT(BXE_INSANE_TPA);
+ return (rc);
}
/*
- * Free an entry in the receive scatter gather list.
+ * Allocate mbufs for a fastpath TPA pool.
*
* Returns:
- * None
+ * 0 = Success, !0 = Failure.
+ *
+ * Modifies:
+ * fp->tpa_state[]
+ * fp->disable_tpa
*/
-static __inline void
-bxe_free_rx_sge(struct bxe_softc *sc, struct bxe_fastpath *fp, uint16_t index)
+static int
+bxe_fill_tpa_pool(struct bxe_fastpath *fp)
{
- struct eth_rx_sge *sge;
+ struct bxe_softc *sc;
+ int max_agg_queues, queue, rc;
- sge = &fp->rx_sge_chain[RX_SGE_PAGE(index)][RX_SGE_IDX(index)];
- /* Skip "next page" elements */
- if (!sge)
- return;
+ sc = fp->sc;
+ DBENTER(BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET);
+ rc = 0;
- if (fp->rx_sge_buf_tag) {
- if (fp->rx_sge_buf_map[index]) {
- bus_dmamap_sync(fp->rx_sge_buf_tag,
- fp->rx_sge_buf_map[index], BUS_DMASYNC_POSTREAD);
- bus_dmamap_unload(fp->rx_sge_buf_tag,
- fp->rx_sge_buf_map[index]);
- }
+ if (!TPA_ENABLED(sc)) {
+ fp->disable_tpa = TRUE;
+ goto bxe_fill_tpa_pool_exit;
+ }
- if (fp->rx_sge_buf_ptr[index]) {
- DBRUN(fp->sge_mbuf_alloc--);
- m_freem(fp->rx_sge_buf_ptr[index]);
- fp->rx_sge_buf_ptr[index] = NULL;
- }
+ max_agg_queues = CHIP_IS_E1(sc) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
+ ETH_MAX_AGGREGATION_QUEUES_E1H;
+
+ /* Assume the fill operation worked. */
+ fp->disable_tpa = FALSE;
- sge->addr_hi = sge->addr_lo = 0;
+ /* Fill the TPA pool. */
+ for (queue = 0; queue < max_agg_queues; queue++) {
+ rc = bxe_alloc_tpa_mbuf(fp, queue);
+ if (rc != 0) {
+ BXE_PRINTF(
+ "%s(%d): fp[%02d] TPA disabled!\n",
+ __FILE__, __LINE__, fp->index);
+ fp->disable_tpa = TRUE;
+ break;
+ }
+ fp->tpa_state[queue] = BXE_TPA_STATE_STOP;
}
+
+bxe_fill_tpa_pool_exit:
+ DBEXIT(BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET);
+ return (rc);
}
/*
- * Free a range of scatter gather elements from the ring.
+ * Free all mbufs from a fastpath TPA pool.
*
* Returns:
* None
+ *
+ * Modifies:
+ * fp->tpa_mbuf_ptr[]
+ * fp->tpa_mbuf_map[]
+ * fp->tpa_mbuf_alloc
*/
-static __inline void
-bxe_free_rx_sge_range(struct bxe_softc *sc, struct bxe_fastpath *fp, int last)
+static void
+bxe_free_tpa_pool(struct bxe_fastpath *fp)
{
- int i;
+ struct bxe_softc *sc;
+ int i, max_agg_queues;
+
+ sc = fp->sc;
+ DBENTER(BXE_INSANE_LOAD | BXE_INSANE_UNLOAD | BXE_INSANE_TPA);
- for (i = 0; i < last; i++)
- bxe_free_rx_sge(sc, fp, i);
+ if (fp->rx_mbuf_tag == NULL)
+ goto bxe_free_tpa_pool_exit;
+
+ max_agg_queues = CHIP_IS_E1H(sc) ?
+ ETH_MAX_AGGREGATION_QUEUES_E1H :
+ ETH_MAX_AGGREGATION_QUEUES_E1;
+
+ /* Release all mbufs and and all DMA maps in the TPA pool. */
+ for (i = 0; i < max_agg_queues; i++) {
+ if (fp->tpa_mbuf_map[i] != NULL) {
+ bus_dmamap_sync(fp->rx_mbuf_tag, fp->tpa_mbuf_map[i],
+ BUS_DMASYNC_POSTREAD);
+ bus_dmamap_unload(fp->rx_mbuf_tag, fp->tpa_mbuf_map[i]);
+ }
+
+ if (fp->tpa_mbuf_ptr[i] != NULL) {
+ m_freem(fp->tpa_mbuf_ptr[i]);
+ DBRUN(fp->tpa_mbuf_alloc--);
+ fp->tpa_mbuf_ptr[i] = NULL;
+ }
+ }
+
+bxe_free_tpa_pool_exit:
+ DBEXIT(BXE_INSANE_LOAD | BXE_INSANE_UNLOAD | BXE_INSANE_TPA);
}
/*
- * Allocate an mbuf of the specified size for the caller.
+ * Allocate an mbuf and assign it to the receive scatter gather chain.
+ * The caller must take care to save a copy of the existing mbuf in the
+ * SG mbuf chain.
*
* Returns:
- * NULL on failure or an mbuf pointer on success.
+ * 0 = Success, !0= Failure.
+ *
+ * Modifies:
+ * fp->sg_chain[index]
+ * fp->rx_sge_buf_ptr[index]
+ * fp->rx_sge_buf_map[index]
+ * fp->rx_sge_spare_map
*/
-static struct mbuf*
-bxe_alloc_mbuf(struct bxe_fastpath *fp, int size)
+static int
+bxe_alloc_rx_sge_mbuf(struct bxe_fastpath *fp, uint16_t index)
{
struct bxe_softc *sc;
- struct mbuf *m_new;
+ struct eth_rx_sge *sge;
+ bus_dma_segment_t segs[1];
+ bus_dmamap_t map;
+ struct mbuf *m;
+ int nsegs, rc;
sc = fp->sc;
- DBENTER(BXE_INSANE);
+ DBENTER(BXE_INSANE_TPA);
+ rc = 0;
#ifdef BXE_DEBUG
/* Simulate an mbuf allocation failure. */
if (DB_RANDOMTRUE(bxe_debug_mbuf_allocation_failure)) {
- DBPRINT(sc, BXE_WARN,
- "%s(): Simulated mbuf allocation failure!\n", __FUNCTION__);
- fp->mbuf_alloc_failed++;
- sc->debug_mbuf_sim_alloc_failed++;
- m_new = NULL;
- goto bxe_alloc_mbuf_exit;
+ sc->debug_sim_mbuf_alloc_failed++;
+ fp->mbuf_sge_alloc_failed++;
+ rc = ENOMEM;
+ goto bxe_alloc_rx_sge_mbuf_exit;
}
#endif
- /* Allocate a new mbuf with memory attached. */
- if (size <= MCLBYTES)
- m_new = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
- else
- m_new = m_getjcl(M_DONTWAIT, MT_DATA, M_PKTHDR, size);
-
- /* Check whether the allocation succeeded and handle a failure. */
- if (__predict_false(m_new == NULL)) {
- DBPRINT(sc, BXE_WARN, "%s(): Failed to allocate %d byte "
- "mbuf on fp[%02d]!\n", __FUNCTION__, size, fp->index);
- fp->mbuf_alloc_failed++;
- goto bxe_alloc_mbuf_exit;
+ /* Allocate a new SGE mbuf. */
+ m = m_getjcl(M_DONTWAIT, MT_DATA, M_PKTHDR, SGE_PAGE_SIZE);
+ if (__predict_false(m == NULL)) {
+ fp->mbuf_sge_alloc_failed++;
+ rc = ENOMEM;
+ goto bxe_alloc_rx_sge_mbuf_exit;
}
- /* Do a little extra error checking when debugging. */
- DBRUN(M_ASSERTPKTHDR(m_new));
+ DBRUN(fp->sge_mbuf_alloc++);
/* Initialize the mbuf buffer length. */
- m_new->m_pkthdr.len = m_new->m_len = size;
- DBRUN(sc->debug_memory_allocated += size);
+ m->m_pkthdr.len = m->m_len = SGE_PAGE_SIZE;
+
+#ifdef BXE_DEBUG
+ /* Simulate an mbuf mapping failure. */
+ if (DB_RANDOMTRUE(bxe_debug_dma_map_addr_failure)) {
+ sc->debug_sim_mbuf_map_failed++;
+ fp->mbuf_sge_mapping_failed++;
+ m_freem(m);
+ DBRUN(fp->sge_mbuf_alloc--);
+ rc = ENOMEM;
+ goto bxe_alloc_rx_sge_mbuf_exit;
+ }
+#endif
+
+ /* Map the SGE mbuf into non-paged pool. */
+ rc = bus_dmamap_load_mbuf_sg(fp->rx_sge_buf_tag,
+ fp->rx_sge_spare_map, m, segs, &nsegs, BUS_DMA_NOWAIT);
+ if (__predict_false(rc != 0)) {
+ fp->mbuf_sge_mapping_failed++;
+ m_freem(m);
+ DBRUN(fp->sge_mbuf_alloc--);
+ goto bxe_alloc_rx_sge_mbuf_exit;
+ }
-bxe_alloc_mbuf_exit:
- return (m_new);
+ /* All mubfs must map to a single segment. */
+ KASSERT(nsegs == 1, ("%s(): Too many segments (%d) returned!",
+ __FUNCTION__, nsegs));
+
+ /* Unload any existing SGE mbuf mapping. */
+ if (fp->rx_sge_buf_map[index] != NULL) {
+ bus_dmamap_sync(fp->rx_sge_buf_tag,
+ fp->rx_sge_buf_map[index], BUS_DMASYNC_POSTREAD);
+ bus_dmamap_unload(fp->rx_sge_buf_tag,
+ fp->rx_sge_buf_map[index]);
+ }
+
+ /* Add the new SGE mbuf to the SGE ring. */
+ map = fp->rx_sge_buf_map[index];
+ fp->rx_sge_buf_map[index] = fp->rx_sge_spare_map;
+ fp->rx_sge_spare_map = map;
+ bus_dmamap_sync(fp->rx_sge_buf_tag,
+ fp->rx_sge_buf_map[index], BUS_DMASYNC_PREREAD);
+ fp->rx_sge_buf_ptr[index] = m;
+ sge = &fp->sg_chain[index];
+ sge->addr_hi = htole32(U64_HI(segs[0].ds_addr));
+ sge->addr_lo = htole32(U64_LO(segs[0].ds_addr));
+
+bxe_alloc_rx_sge_mbuf_exit:
+ DBEXIT(BXE_INSANE_TPA);
+ return (rc);
}
/*
- * Map an mbuf into non-paged memory for the caller.
+ * Allocate mbufs for a SGE chain.
*
* Returns:
* 0 = Success, !0 = Failure.
*
- * Side-effects:
- * The mbuf passed will be released if a mapping failure occurs.
- * The segment mapping will be udpated if the mapping is successful.
+ * Modifies:
+ * fp->disable_tpa
+ * fp->rx_sge_prod
*/
static int
-bxe_map_mbuf(struct bxe_fastpath *fp, struct mbuf *m, bus_dma_tag_t tag,
- bus_dmamap_t map, bus_dma_segment_t *seg)
+bxe_fill_sg_chain(struct bxe_fastpath *fp)
{
struct bxe_softc *sc;
- bus_dma_segment_t segs[4];
- int nsegs, rc;
+ uint16_t index;
+ int i, rc;
+
sc = fp->sc;
+ DBENTER(BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET);
rc = 0;
- DBENTER(BXE_INSANE);
-
-#ifdef BXE_DEBUG
- /* Simulate an mbuf mapping failure. */
- if (DB_RANDOMTRUE(bxe_debug_dma_map_addr_failure)) {
- DBPRINT(sc, BXE_WARN, "%s(): Simulated mbuf mapping failure!\n",
- __FUNCTION__);
- sc->debug_mbuf_sim_map_failed++;
- fp->mbuf_alloc_failed++;
- sc->debug_memory_allocated -= m->m_len;
- m_freem(m);
- rc = EINVAL;
- goto bxe_map_mbuf_exit;
+ if (!TPA_ENABLED(sc)) {
+ fp->disable_tpa = TRUE;
+ goto bxe_fill_sg_chain_exit;
}
-#endif
- /* Map the buffer memory into non-paged memory. */
- rc = bus_dmamap_load_mbuf_sg(tag, map, m, segs, &nsegs, BUS_DMA_NOWAIT);
+ /* Assume the fill operation works. */
+ fp->disable_tpa = FALSE;
- /* Handle any mapping errors. */
- if (__predict_false(rc)) {
- DBPRINT(sc, BXE_WARN, "%s(): mbuf mapping failure (%d) on "
- "fp[%02d]!\n", __FUNCTION__, rc, fp->index);
- fp->mbuf_alloc_failed++;
- DBRUN(sc->debug_memory_allocated -= m->m_len);
- m_freem(m);
- goto bxe_map_mbuf_exit;
+ /* Fill the RX SGE chain. */
+ index = 0;
+ for (i = 0; i < USABLE_RX_SGE; i++) {
+ rc = bxe_alloc_rx_sge_mbuf(fp, index);
+ if (rc != 0) {
+ BXE_PRINTF(
+ "%s(%d): fp[%02d] SGE memory allocation failure!\n",
+ __FILE__, __LINE__, fp->index);
+ index = 0;
+ fp->disable_tpa = TRUE;
+ break;
+ }
+ index = NEXT_SGE_IDX(index);
}
- /* All mubfs must map to a single segment. */
- KASSERT(nsegs == 1, ("%s(): Too many segments (%d) returned!",
- __FUNCTION__, nsegs));
-
- /* Save the DMA mapping tag for this memory buffer. */
- *seg = segs[0];
+ /* Update the driver's copy of the RX SGE producer index. */
+ fp->rx_sge_prod = index;
-bxe_map_mbuf_exit:
- DBEXIT(BXE_INSANE);
+bxe_fill_sg_chain_exit:
+ DBEXIT(BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET);
return (rc);
}
/*
- * Allocate an mbuf for the TPA pool.
+ * Free all elements from the receive scatter gather chain.
*
* Returns:
- * NULL on failure or an mbuf pointer on success.
+ * None
+ *
+ * Modifies:
+ * fp->rx_sge_buf_ptr[]
+ * fp->rx_sge_buf_map[]
+ * fp->sge_mbuf_alloc
*/
-static struct mbuf *
-bxe_alloc_tpa_mbuf(struct bxe_fastpath *fp, int index, int size)
+static void
+bxe_free_sg_chain(struct bxe_fastpath *fp)
{
- bus_dma_segment_t seg;
- struct mbuf *m;
- int rc;
-
- /* Allocate the new mbuf. */
- if ((m = bxe_alloc_mbuf(fp, size)) == NULL)
- goto bxe_alloc_tpa_mbuf_exit;
+ struct bxe_softc *sc;
+ int i;
- /* Map the mbuf into non-paged pool. */
- rc = bxe_map_mbuf(fp, m, fp->rx_mbuf_tag, fp->tpa_mbuf_map[index],
- &seg);
+ sc = fp->sc;
+ DBENTER(BXE_INSANE_TPA);
- if (rc) {
- m = NULL;
- goto bxe_alloc_tpa_mbuf_exit;
- }
+ if (fp->rx_sge_buf_tag == NULL)
+ goto bxe_free_sg_chain_exit;
- DBRUN(fp->tpa_mbuf_alloc++);
+ /* Free all mbufs and unload all maps. */
+ for (i = 0; i < TOTAL_RX_SGE; i++) {
+ /* Free the map and the mbuf if they're allocated. */
+ if (fp->rx_sge_buf_map[i] != NULL) {
+ bus_dmamap_sync(fp->rx_sge_buf_tag,
+ fp->rx_sge_buf_map[i], BUS_DMASYNC_POSTREAD);
+ bus_dmamap_unload(fp->rx_sge_buf_tag,
+ fp->rx_sge_buf_map[i]);
+ }
- /* Save the mapping info for the mbuf. */
- fp->tpa_mbuf_segs[index] = seg;
+ if (fp->rx_sge_buf_ptr[i] != NULL) {
+ m_freem(fp->rx_sge_buf_ptr[i]);
+ DBRUN(fp->sge_mbuf_alloc--);
+ fp->rx_sge_buf_ptr[i] = NULL;
+ }
+ }
-bxe_alloc_tpa_mbuf_exit:
- return (m);
+bxe_free_sg_chain_exit:
+ DBEXIT(BXE_INSANE_TPA);
}
/*
- * Allocate a receive scatter gather entry
+ * Allocate an mbuf, if necessary, and add it to the receive chain.
*
* Returns:
- * 0 = Success, != Failure.
+ * 0 = Success, !0 = Failure.
*/
static int
-bxe_alloc_rx_sge(struct bxe_softc *sc, struct bxe_fastpath *fp,
- uint16_t ring_prod)
+bxe_alloc_rx_bd_mbuf(struct bxe_fastpath *fp, uint16_t index)
{
- struct eth_rx_sge *sge;
- bus_dma_segment_t seg;
+ struct bxe_softc *sc;
+ struct eth_rx_bd *rx_bd;
+ bus_dma_segment_t segs[1];
+ bus_dmamap_t map;
struct mbuf *m;
- int rc;
+ int nsegs, rc;
- sge = &fp->rx_sge_chain[RX_SGE_PAGE(ring_prod)][RX_SGE_IDX(ring_prod)];
+ sc = fp->sc;
+ DBENTER(BXE_INSANE_LOAD | BXE_INSANE_RESET | BXE_INSANE_RECV);
rc = 0;
- /* Allocate a new mbuf. */
- if ((m = bxe_alloc_mbuf(fp, PAGE_SIZE)) == NULL) {
+#ifdef BXE_DEBUG
+ /* Simulate an mbuf allocation failure. */
+ if (DB_RANDOMTRUE(bxe_debug_mbuf_allocation_failure)) {
+ sc->debug_sim_mbuf_alloc_failed++;
+ fp->mbuf_rx_bd_alloc_failed++;
rc = ENOMEM;
- goto bxe_alloc_rx_sge_exit;
+ goto bxe_alloc_rx_bd_mbuf_exit;
}
+#endif
- /* Map the mbuf into non-paged pool. */
- rc = bxe_map_mbuf(fp, m, fp->rx_sge_buf_tag,
- fp->rx_sge_buf_map[ring_prod], &seg);
+ /* Allocate the new RX BD mbuf. */
+ m = m_getjcl(M_DONTWAIT, MT_DATA, M_PKTHDR, sc->mbuf_alloc_size);
+ if (__predict_false(m == NULL)) {
+ fp->mbuf_rx_bd_alloc_failed++;
+ rc = ENOBUFS;
+ goto bxe_alloc_rx_bd_mbuf_exit;
+ }
- if (rc)
- goto bxe_alloc_rx_sge_exit;
+ DBRUN(fp->rx_mbuf_alloc++);
- DBRUN(fp->sge_mbuf_alloc++);
+ /* Initialize the mbuf buffer length. */
+ m->m_pkthdr.len = m->m_len = sc->mbuf_alloc_size;
- /* Add the SGE buffer to the SGE ring. */
- sge->addr_hi = htole32(U64_HI(seg.ds_addr));
- sge->addr_lo = htole32(U64_LO(seg.ds_addr));
- fp->rx_sge_buf_ptr[ring_prod] = m;
+#ifdef BXE_DEBUG
+ /* Simulate an mbuf mapping failure. */
+ if (DB_RANDOMTRUE(bxe_debug_dma_map_addr_failure)) {
+ sc->debug_sim_mbuf_map_failed++;
+ fp->mbuf_rx_bd_mapping_failed++;
+ m_freem(m);
+ DBRUN(fp->rx_mbuf_alloc--);
+ rc = ENOMEM;
+ goto bxe_alloc_rx_bd_mbuf_exit;
+ }
+#endif
-bxe_alloc_rx_sge_exit:
+ /* Map the TPA mbuf into non-paged pool. */
+ rc = bus_dmamap_load_mbuf_sg(fp->rx_mbuf_tag,
+ fp->rx_mbuf_spare_map, m, segs, &nsegs, BUS_DMA_NOWAIT);
+ if (__predict_false(rc != 0)) {
+ fp->mbuf_rx_bd_mapping_failed++;
+ m_freem(m);
+ DBRUN(fp->rx_mbuf_alloc--);
+ goto bxe_alloc_rx_bd_mbuf_exit;
+ }
+
+ /* All mubfs must map to a single segment. */
+ KASSERT(nsegs == 1, ("%s(): Too many segments (%d) returned!",
+ __FUNCTION__, nsegs));
+
+ /* Release any existing RX BD mbuf mapping. */
+ if (fp->rx_mbuf_map[index] != NULL) {
+ bus_dmamap_sync(fp->rx_mbuf_tag,
+ fp->rx_mbuf_map[index], BUS_DMASYNC_POSTREAD);
+ bus_dmamap_unload(fp->rx_mbuf_tag,
+ fp->rx_mbuf_map[index]);
+ }
+
+ /* Save the mbuf and mapping info. */
+ map = fp->rx_mbuf_map[index];
+ fp->rx_mbuf_map[index] = fp->rx_mbuf_spare_map;
+ fp->rx_mbuf_spare_map = map;
+ bus_dmamap_sync(fp->rx_mbuf_tag,
+ fp->rx_mbuf_map[index], BUS_DMASYNC_PREREAD);
+ fp->rx_mbuf_ptr[index] = m;
+ rx_bd = &fp->rx_chain[index];
+ rx_bd->addr_hi = htole32(U64_HI(segs[0].ds_addr));
+ rx_bd->addr_lo = htole32(U64_LO(segs[0].ds_addr));
+
+bxe_alloc_rx_bd_mbuf_exit:
+ DBEXIT(BXE_INSANE_LOAD | BXE_INSANE_RESET | BXE_INSANE_RECV);
return (rc);
}
+
+/*
+ * Allocate mbufs for a receive chain.
+ *
+ * Returns:
+ * 0 = Success, !0 = Failure.
+ *
+ * Modifies:
+ * fp->rx_bd_prod
+ */
+static int
+bxe_fill_rx_bd_chain(struct bxe_fastpath *fp)
+{
+ struct bxe_softc *sc;
+ uint16_t index;
+ int i, rc;
+
+ sc = fp->sc;
+ DBENTER(BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET);
+ rc = index = 0;
+
+ /* Allocate buffers for all the RX BDs in RX BD Chain. */
+ for (i = 0; i < USABLE_RX_BD; i++) {
+ rc = bxe_alloc_rx_bd_mbuf(fp, index);
+ if (rc != 0) {
+ BXE_PRINTF(
+ "%s(%d): Memory allocation failure! Cannot fill fp[%02d] RX chain.\n",
+ __FILE__, __LINE__, fp->index);
+ index = 0;
+ break;
+ }
+ index = NEXT_RX_BD(index);
+ }
+
+ fp->rx_bd_prod = index;
+ DBEXIT(BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET);
+ return (rc);
+}
+
+/*
+ * Free all buffers from the receive chain.
+ *
+ * Returns:
+ * None
+ *
+ * Modifies:
+ * fp->rx_mbuf_ptr[]
+ * fp->rx_mbuf_map[]
+ * fp->rx_mbuf_alloc
+ */
+static void
+bxe_free_rx_bd_chain(struct bxe_fastpath *fp)
+{
+ struct bxe_softc *sc;
+ int i;
+
+ sc = fp->sc;
+ DBENTER(BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET);
+
+ if (fp->rx_mbuf_tag == NULL)
+ goto bxe_free_rx_bd_chain_exit;
+
+ /* Free all mbufs and unload all maps. */
+ for (i = 0; i < TOTAL_RX_BD; i++) {
+ if (fp->rx_mbuf_map[i] != NULL) {
+ bus_dmamap_sync(fp->rx_mbuf_tag, fp->rx_mbuf_map[i],
+ BUS_DMASYNC_POSTREAD);
+ bus_dmamap_unload(fp->rx_mbuf_tag, fp->rx_mbuf_map[i]);
+ }
+
+ if (fp->rx_mbuf_ptr[i] != NULL) {
+ m_freem(fp->rx_mbuf_ptr[i]);
+ DBRUN(fp->rx_mbuf_alloc--);
+ fp->rx_mbuf_ptr[i] = NULL;
+ }
+ }
+
+bxe_free_rx_bd_chain_exit:
+ DBEXIT(BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET);
+}
+
/*
+ * Setup mutexes used by the driver.
+ *
* Returns:
* None.
*/
static void
-bxe_alloc_mutexes(struct bxe_softc *sc)
+bxe_mutexes_alloc(struct bxe_softc *sc)
{
struct bxe_fastpath *fp;
int i;
@@ -10726,7 +11011,7 @@ bxe_alloc_mutexes(struct bxe_softc *sc)
BXE_PRINT_LOCK_INIT(sc, "bxe_print_lock");
/* Allocate one mutex for each fastpath structure. */
- for (i=0; i < sc->num_queues; i++ ) {
+ for (i = 0; i < sc->num_queues; i++ ) {
fp = &sc->fp[i];
/* Allocate per fastpath mutexes. */
@@ -10739,23 +11024,25 @@ bxe_alloc_mutexes(struct bxe_softc *sc)
}
/*
+ * Free mutexes used by the driver.
+ *
* Returns:
* None.
*/
static void
-bxe_free_mutexes(struct bxe_softc *sc)
+bxe_mutexes_free(struct bxe_softc *sc)
{
struct bxe_fastpath *fp;
int i;
DBENTER(BXE_VERBOSE_UNLOAD);
- for (i=0; i < sc->num_queues; i++ ) {
+ for (i = 0; i < sc->num_queues; i++ ) {
fp = &sc->fp[i];
/* Release per fastpath mutexes. */
- if (mtx_initialized(&(fp->mtx)))
- mtx_destroy(&(fp->mtx));
+ if (mtx_initialized(&fp->mtx))
+ mtx_destroy(&fp->mtx);
}
BXE_PRINT_LOCK_DESTROY(sc);
@@ -10769,7 +11056,42 @@ bxe_free_mutexes(struct bxe_softc *sc)
}
+/*
+ * Free memory and clear the RX data structures.
+ *
+ * Returns:
+ * Nothing.
+ */
+static void
+bxe_clear_rx_chains(struct bxe_softc *sc)
+{
+ struct bxe_fastpath *fp;
+ int i;
+ DBENTER(BXE_VERBOSE_RESET);
+
+ for (i = 0; i < sc->num_queues; i++) {
+ fp = &sc->fp[i];
+
+ /* Free all RX buffers. */
+ bxe_free_rx_bd_chain(fp);
+ bxe_free_tpa_pool(fp);
+ bxe_free_sg_chain(fp);
+
+ /* Check if any mbufs lost in the process. */
+ DBRUNIF((fp->tpa_mbuf_alloc), DBPRINT(sc, BXE_FATAL,
+ "%s(): Memory leak! Lost %d mbufs from fp[%02d] TPA pool!\n",
+ __FUNCTION__, fp->tpa_mbuf_alloc, fp->index));
+ DBRUNIF((fp->sge_mbuf_alloc), DBPRINT(sc, BXE_FATAL,
+ "%s(): Memory leak! Lost %d mbufs from fp[%02d] SGE chain!\n",
+ __FUNCTION__, fp->sge_mbuf_alloc, fp->index));
+ DBRUNIF((fp->rx_mbuf_alloc), DBPRINT(sc, BXE_FATAL,
+ "%s(): Memory leak! Lost %d mbufs from fp[%02d] RX chain!\n",
+ __FUNCTION__, fp->rx_mbuf_alloc, fp->index));
+ }
+
+ DBEXIT(BXE_VERBOSE_RESET);
+}
/*
* Initialize the receive rings.
@@ -10777,69 +11099,26 @@ bxe_free_mutexes(struct bxe_softc *sc)
* Returns:
* None.
*/
-static void
+static int
bxe_init_rx_chains(struct bxe_softc *sc)
{
struct bxe_fastpath *fp;
- struct eth_rx_sge *sge;
- struct eth_rx_bd *rx_bd;
- struct eth_rx_cqe_next_page *nextpg;
- uint16_t rx_bd_prod, rx_sge_prod;
- int func, i, j, rcq_idx, rx_idx, rx_sge_idx, max_agg_queues;
+ int func, i, rc;
DBENTER(BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET);
-
+ rc = 0;
func = BP_FUNC(sc);
- max_agg_queues = CHIP_IS_E1(sc) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
- ETH_MAX_AGGREGATION_QUEUES_E1H;
-
- sc->rx_buf_size = sc->mbuf_alloc_size;
-
- /* Allocate memory for the TPA pool. */
- if (TPA_ENABLED(sc)) {
- DBPRINT(sc, (BXE_INFO_LOAD | BXE_INFO_RESET),
- "%s(): mtu = %d, rx_buf_size = %d\n", __FUNCTION__,
- (int)sc->bxe_ifp->if_mtu, sc->rx_buf_size);
-
- for (i = 0; i < sc->num_queues; i++) {
- fp = &sc->fp[i];
- DBPRINT(sc, (BXE_INSANE_LOAD | BXE_INSANE_RESET),
- "%s(): Initializing fp[%02d] TPA pool.\n",
- __FUNCTION__, i);
-
- for (j = 0; j < max_agg_queues; j++) {
- DBPRINT(sc,
- (BXE_INSANE_LOAD | BXE_INSANE_RESET),
- "%s(): Initializing fp[%02d] TPA "
- "pool[%d].\n", __FUNCTION__, i, j);
-
- fp->disable_tpa = 0;
- fp->tpa_mbuf_ptr[j] = bxe_alloc_tpa_mbuf(fp, j,
- sc->mbuf_alloc_size);
-
- if (fp->tpa_mbuf_ptr[j] == NULL) {
- fp->tpa_mbuf_alloc_failed++;
- BXE_PRINTF("TPA disabled on "
- "fp[%02d]!\n", i);
- bxe_free_tpa_pool(fp, j);
- fp->disable_tpa = 1;
- break;
- }
- fp->tpa_state[j] = BXE_TPA_STATE_STOP;
- }
- }
- }
/* Allocate memory for RX and CQ chains. */
for (i = 0; i < sc->num_queues; i++) {
fp = &sc->fp[i];
DBPRINT(sc, (BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET),
- "%s(): Initializing fp[%d] RX chain.\n", __FUNCTION__, i);
+ "%s(): Initializing fp[%02d] RX chain.\n", __FUNCTION__, i);
fp->rx_bd_cons = fp->rx_bd_prod = 0;
fp->rx_cq_cons = fp->rx_cq_prod = 0;
- /* Status block's completion queue consumer index. */
+ /* Pointer to status block's CQ consumer index. */
fp->rx_cq_cons_sb = &fp->status_block->
u_status_block.index_values[HC_INDEX_U_ETH_RX_CQ_CONS];
@@ -10847,138 +11126,30 @@ bxe_init_rx_chains(struct bxe_softc *sc)
fp->rx_bd_cons_sb = &fp->status_block->
u_status_block.index_values[HC_INDEX_U_ETH_RX_BD_CONS];
- if (TPA_ENABLED(sc)) {
- DBPRINT(sc, (BXE_INSANE_LOAD | BXE_INSANE_RESET),
- "%s(): Linking fp[%d] SGE rings.\n", __FUNCTION__,
- i);
-
- /* Link the SGE Ring Pages to form SGE chain */
- for (j = 0; j < NUM_RX_SGE_PAGES; j++) {
- rx_sge_idx = ((j + 1) % NUM_RX_SGE_PAGES);
- sge = &fp->rx_sge_chain[j][MAX_RX_SGE_CNT];
-
- DBPRINT(sc, (BXE_EXTREME_LOAD | BXE_EXTREME_RESET),
- "%s(): fp[%02d].rx_sge_chain[%02d][0x%04X]=0x%jX\n",
- __FUNCTION__, i, j,
- (uint16_t) MAX_RX_SGE_CNT,
- (uintmax_t) fp->rx_sge_chain_paddr[rx_sge_idx]);
-
- sge->addr_hi =
- htole32(U64_HI(fp->rx_sge_chain_paddr[rx_sge_idx]));
- sge->addr_lo =
- htole32(U64_LO(fp->rx_sge_chain_paddr[rx_sge_idx]));
- }
-
- bxe_init_sge_ring_bit_mask(fp);
- }
-
- DBPRINT(sc, (BXE_INSANE_LOAD | BXE_INSANE_RESET),
- "%s(): Linking fp[%d] RX chain pages.\n", __FUNCTION__, i);
-
- /* Link the pages to form the RX BD Chain. */
- for (j = 0; j < NUM_RX_PAGES; j++) {
- rx_idx = ((j + 1) % NUM_RX_PAGES);
- rx_bd = &fp->rx_bd_chain[j][USABLE_RX_BD_PER_PAGE];
-
- DBPRINT(sc, (BXE_EXTREME_LOAD),
- "%s(): fp[%02d].rx_bd_chain[%02d][0x%04X]=0x%jX\n",
- __FUNCTION__, i, j,
- (uint16_t) USABLE_RX_BD_PER_PAGE,
- (uintmax_t) fp->rx_bd_chain_paddr[rx_idx]);
-
- rx_bd->addr_hi =
- htole32(U64_HI(fp->rx_bd_chain_paddr[rx_idx]));
- rx_bd->addr_lo =
- htole32(U64_LO(fp->rx_bd_chain_paddr[rx_idx]));
- }
-
- DBPRINT(sc, (BXE_INSANE_LOAD | BXE_INSANE_RESET),
- "%s(): Linking fp[%d] RX completion chain pages.\n",
- __FUNCTION__, i);
-
- /* Link the pages to form the RX Completion Queue.*/
- for (j = 0; j < NUM_RCQ_PAGES; j++) {
- rcq_idx = ((j + 1) % NUM_RCQ_PAGES);
- nextpg = (struct eth_rx_cqe_next_page *)
- &fp->rx_cq_chain[j][USABLE_RCQ_ENTRIES_PER_PAGE];
-
- DBPRINT(sc, (BXE_EXTREME_LOAD),
- "%s(): fp[%02d].rx_cq_chain[%02d][0x%04X]=0x%jX\n",
- __FUNCTION__, i, j,
- (uint16_t) USABLE_RCQ_ENTRIES_PER_PAGE,
- (uintmax_t) fp->rx_cq_chain_paddr[rcq_idx]);
-
- nextpg->addr_hi =
- htole32(U64_HI(fp->rx_cq_chain_paddr[rcq_idx]));
- nextpg->addr_lo =
- htole32(U64_LO(fp->rx_cq_chain_paddr[rcq_idx]));
- }
-
- if (TPA_ENABLED(sc)) {
- /* Allocate SGEs and initialize the ring elements. */
- rx_sge_prod = 0;
-
- while (rx_sge_prod < sc->rx_ring_size) {
- if (bxe_alloc_rx_sge(sc, fp, rx_sge_prod) != 0) {
- fp->tpa_mbuf_alloc_failed++;
- BXE_PRINTF(
- "%s(%d): Memory allocation failure! "
- "Disabling TPA for fp[%02d].\n",
- __FILE__, __LINE__, i);
-
- /* Cleanup already allocated elements */
- bxe_free_rx_sge_range(sc, fp,
- rx_sge_prod);
- fp->disable_tpa = 1;
- rx_sge_prod = 0;
- break;
- }
- rx_sge_prod = NEXT_SGE_IDX(rx_sge_prod);
- }
-
- fp->rx_sge_prod = rx_sge_prod;
- }
-
- /*
- * Allocate buffers for all the RX BDs in RX BD Chain.
- */
- rx_bd_prod = 0;
- DBRUN(fp->free_rx_bd = sc->rx_ring_size);
-
- for (j = 0; j < sc->rx_ring_size; j++) {
- if (bxe_get_buf(fp, NULL, rx_bd_prod)) {
- BXE_PRINTF(
- "%s(%d): Memory allocation failure! Cannot fill fp[%d] RX chain.\n",
- __FILE__, __LINE__, i);
- break;
- }
- rx_bd_prod = NEXT_RX_BD(rx_bd_prod);
- }
-
- /* Update the driver's copy of the producer indices. */
- fp->rx_bd_prod = rx_bd_prod;
fp->rx_cq_prod = TOTAL_RCQ_ENTRIES;
- fp->rx_pkts = fp->rx_calls = 0;
+ fp->rx_pkts = fp->rx_tpa_pkts = fp->rx_soft_errors = 0;
- DBPRINT(sc, (BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET),
- "%s(): USABLE_RX_BD=0x%04X, USABLE_RCQ_ENTRIES=0x%04X\n",
- __FUNCTION__, (uint16_t) USABLE_RX_BD,
- (uint16_t) USABLE_RCQ_ENTRIES);
- DBPRINT(sc, (BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET),
- "%s(): fp[%02d]->rx_bd_prod=0x%04X, rx_cq_prod=0x%04X\n",
- __FUNCTION__, i, fp->rx_bd_prod, fp->rx_cq_prod);
+ /* Allocate memory for the receive chain. */
+ rc = bxe_fill_rx_bd_chain(fp);
+ if (rc != 0)
+ goto bxe_init_rx_chains_exit;
+
+ /* Allocate memory for TPA pool. */
+ rc = bxe_fill_tpa_pool(fp);
+ if (rc != 0)
+ goto bxe_init_rx_chains_exit;
+ /* Allocate memory for scatter-gather chain. */
+ rc = bxe_fill_sg_chain(fp);
+ if (rc != 0)
+ goto bxe_init_rx_chains_exit;
- /* Prepare the recevie BD and CQ buffers for DMA access. */
- for (j = 0; j < NUM_RX_PAGES; j++)
- bus_dmamap_sync(fp->rx_bd_chain_tag,
- fp->rx_bd_chain_map[j], BUS_DMASYNC_PREREAD |
- BUS_DMASYNC_PREWRITE);
+ /* Prepare the receive BD and CQ buffers for DMA access. */
+ bus_dmamap_sync(fp->rx_dma.tag, fp->rx_dma.map,
+ BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
- for (j = 0; j < NUM_RCQ_PAGES; j++)
- bus_dmamap_sync(fp->rx_cq_chain_tag,
- fp->rx_cq_chain_map[j], BUS_DMASYNC_PREREAD |
- BUS_DMASYNC_PREWRITE);
+ bus_dmamap_sync(fp->rcq_dma.tag, fp->rcq_dma.map,
+ BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
/*
* Tell the controller that we have rx_bd's and CQE's
@@ -10989,6 +11160,7 @@ bxe_init_rx_chains(struct bxe_softc *sc)
bxe_update_rx_prod(sc, fp, fp->rx_bd_prod,
fp->rx_cq_prod, fp->rx_sge_prod);
+ /* ToDo - Move to dma_alloc(). */
/*
* Tell controller where the receive CQ
* chains start in physical memory.
@@ -10996,214 +11168,123 @@ bxe_init_rx_chains(struct bxe_softc *sc)
if (i == 0) {
REG_WR(sc, BAR_USTORM_INTMEM +
USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
- U64_LO(fp->rx_cq_chain_paddr[0]));
+ U64_LO(fp->rcq_dma.paddr));
REG_WR(sc, BAR_USTORM_INTMEM +
USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
- U64_HI(fp->rx_cq_chain_paddr[0]));
+ U64_HI(fp->rcq_dma.paddr));
}
}
- /*
- * ToDo: Need a cleanup path if memory allocation
- * fails during initializtion. This is especially
- * easy if multiqueue is used on a system with
- * jumbo frames and many CPUs. On my 16GB system
- * with 8 CPUs I get the following defaults:
- *
- * kern.ipc.nmbjumbo16: 3200
- * kern.ipc.nmbjumbo9: 6400
- * kern.ipc.nmbjumbop: 12800
- * kern.ipc.nmbclusters: 25600
- */
-
- DBEXIT(BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET);
-}
-
-/*
- * Initialize the transmit chain.
- *
- * Returns:
- * None.
- */
-static void
-bxe_init_tx_chains(struct bxe_softc *sc)
-{
- struct bxe_fastpath *fp;
- struct eth_tx_next_bd *tx_n_bd;
- int i, j;
-
- DBENTER(BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET);
-
- for (i = 0; i < sc->num_queues; i++) {
- fp = &sc->fp[i];
-
- DBPRINT(sc, (BXE_INSANE_LOAD | BXE_INSANE_RESET),
- "%s(): Linking fp[%d] TX chain pages.\n", __FUNCTION__, i);
-
- for (j = 0; j < NUM_TX_PAGES; j++) {
- tx_n_bd =
- &fp->tx_bd_chain[j][USABLE_TX_BD_PER_PAGE].next_bd;
-
- DBPRINT(sc, (BXE_INSANE_LOAD | BXE_INSANE_RESET),
- "%s(): Linking fp[%d] TX BD chain page[%d].\n",
- __FUNCTION__, i, j);
-
- tx_n_bd->addr_hi =
- htole32(U64_HI(fp->tx_bd_chain_paddr[(j + 1) %
- NUM_TX_PAGES]));
- tx_n_bd->addr_lo =
- htole32(U64_LO(fp->tx_bd_chain_paddr[(j + 1) %
- NUM_TX_PAGES]));
- }
-
- fp->tx_db.data.header.header = DOORBELL_HDR_DB_TYPE;
- fp->tx_db.data.zero_fill1 = 0;
- fp->tx_db.data.prod = 0;
-
- fp->tx_pkt_prod = 0;
- fp->tx_pkt_cons = 0;
- fp->tx_bd_prod = 0;
- fp->tx_bd_cons = 0;
- fp->used_tx_bd = 0;
-
- /*
- * Copy of TX BD Chain completion queue Consumer Index
- * from the Status Block.
- */
- fp->tx_cons_sb =
- &fp->status_block->c_status_block.index_values[C_SB_ETH_TX_CQ_INDEX];
-
- fp->tx_pkts = 0;
- }
+bxe_init_rx_chains_exit:
+ /* Release memory if an error occurred. */
+ if (rc != 0)
+ bxe_clear_rx_chains(sc);
DBEXIT(BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET);
+ return (rc);
}
/*
- * Free memory and clear the RX data structures.
+ * Free memory and clear the TX data structures.
*
* Returns:
* Nothing.
*/
static void
-bxe_free_rx_chains(struct bxe_softc *sc)
+bxe_clear_tx_chains(struct bxe_softc *sc)
{
struct bxe_fastpath *fp;
- int i, j, max_agg_queues;
+ int i, j;
DBENTER(BXE_VERBOSE_RESET);
for (i = 0; i < sc->num_queues; i++) {
fp = &sc->fp[i];
- if (fp->rx_mbuf_tag) {
- /* Free any mbufs still in the RX mbuf chain. */
- for (j = 0; j < TOTAL_RX_BD; j++) {
- if (fp->rx_mbuf_ptr[j] != NULL) {
- if (fp->rx_mbuf_map[j] != NULL)
- bus_dmamap_sync(fp->rx_mbuf_tag,
- fp->rx_mbuf_map[j],
- BUS_DMASYNC_POSTREAD);
- DBRUN(fp->rx_mbuf_alloc--);
- m_freem(fp->rx_mbuf_ptr[j]);
- fp->rx_mbuf_ptr[j] = NULL;
- }
- }
-
- /* Clear each RX chain page. */
- for (j = 0; j < NUM_RX_PAGES; j++) {
- if (fp->rx_bd_chain[j] != NULL)
- bzero((char *)fp->rx_bd_chain[j],
- BXE_RX_CHAIN_PAGE_SZ);
- }
- /* Clear each RX completion queue page. */
- for (j = 0; j < NUM_RCQ_PAGES; j++) {
- if (fp->rx_cq_chain[j] != NULL)
- bzero((char *)fp->rx_cq_chain[j],
- BXE_RX_CHAIN_PAGE_SZ);
- }
-
- if (TPA_ENABLED(sc)) {
- max_agg_queues = CHIP_IS_E1H(sc) ?
- ETH_MAX_AGGREGATION_QUEUES_E1H :
- ETH_MAX_AGGREGATION_QUEUES_E1;
-
- /* Free the TPA Pool mbufs. */
- bxe_free_tpa_pool(fp, max_agg_queues);
-
- /*
- * Free any mbufs still in the RX SGE
- * buf chain.
- */
- bxe_free_rx_sge_range(fp->sc, fp, MAX_RX_SGE);
-
- /* Clear each RX SGE page. */
- for (j = 0; j < NUM_RX_SGE_PAGES; j++) {
- if (fp->rx_sge_chain[j] != NULL)
- bzero(
- (char *)fp->rx_sge_chain[j],
- BXE_RX_CHAIN_PAGE_SZ);
+ /* Free all mbufs and unload all maps. */
+ if (fp->tx_mbuf_tag) {
+ for (j = 0; j < TOTAL_TX_BD; j++) {
+ if (fp->tx_mbuf_ptr[j] != NULL) {
+ bus_dmamap_sync(fp->tx_mbuf_tag,
+ fp->tx_mbuf_map[j],
+ BUS_DMASYNC_POSTWRITE);
+ bus_dmamap_unload(fp->tx_mbuf_tag,
+ fp->tx_mbuf_map[j]);
+ m_freem(fp->tx_mbuf_ptr[j]);
+ fp->tx_mbuf_alloc--;
+ fp->tx_mbuf_ptr[j] = NULL;
}
}
}
/* Check if we lost any mbufs in the process. */
- DBRUNIF((fp->rx_mbuf_alloc), DBPRINT(sc, BXE_FATAL,
- "%s(): Memory leak! Lost %d mbufs from fp[%d] RX chain!\n",
- __FUNCTION__, fp->rx_mbuf_alloc, fp->index));
+ DBRUNIF((fp->tx_mbuf_alloc), DBPRINT(sc, BXE_FATAL,
+ "%s(): Memory leak! Lost %d mbufs from fp[%02d] TX chain!\n",
+ __FUNCTION__, fp->tx_mbuf_alloc, fp->index));
}
DBEXIT(BXE_VERBOSE_RESET);
}
/*
- * Free memory and clear the TX data structures.
+ * Initialize the transmit chain.
*
* Returns:
- * Nothing.
+ * None.
*/
static void
-bxe_free_tx_chains(struct bxe_softc *sc)
+bxe_init_tx_chains(struct bxe_softc *sc)
{
struct bxe_fastpath *fp;
int i, j;
- DBENTER(BXE_VERBOSE_RESET);
+ DBENTER(BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET);
for (i = 0; i < sc->num_queues; i++) {
fp = &sc->fp[i];
- if (fp->tx_mbuf_tag) {
- /*
- * Unmap, unload, and free any mbufs in the
- * TX mbuf chain.
- */
- for (j = 0; j < TOTAL_TX_BD; j++) {
- if (fp->tx_mbuf_ptr[j] != NULL) {
- if (fp->tx_mbuf_map[j] != NULL)
- bus_dmamap_sync(fp->tx_mbuf_tag,
- fp->tx_mbuf_map[j],
- BUS_DMASYNC_POSTWRITE);
- DBRUN(fp->tx_mbuf_alloc--);
- m_freem(fp->tx_mbuf_ptr[j]);
- fp->tx_mbuf_ptr[j] = NULL;
- }
- }
- /* Clear each TX chain page. */
- for (j = 0; j < NUM_TX_PAGES; j++) {
- if (fp->tx_bd_chain[j] != NULL)
- bzero((char *)fp->tx_bd_chain[j],
- BXE_TX_CHAIN_PAGE_SZ);
- }
+ /* Initialize transmit doorbell. */
+ fp->tx_db.data.header.header = DOORBELL_HDR_DB_TYPE;
+ fp->tx_db.data.zero_fill1 = 0;
+ fp->tx_db.data.prod = 0;
+
+ /* Initialize tranmsit producer/consumer indices. */
+ fp->tx_pkt_prod = fp->tx_pkt_cons = 0;
+ fp->tx_bd_prod = fp->tx_bd_cons = 0;
+ fp->tx_bd_used = 0;
- /* Check if we lost any mbufs in the process. */
- DBRUNIF((fp->tx_mbuf_alloc), DBPRINT(sc, BXE_FATAL,
- "%s(): Memory leak! Lost %d mbufs from fp[%d] TX chain!\n",
- __FUNCTION__, fp->tx_mbuf_alloc, fp->index));
+ /* Pointer to TX packet consumer in status block. */
+ fp->tx_pkt_cons_sb =
+ &fp->status_block->c_status_block.index_values[C_SB_ETH_TX_CQ_INDEX];
+
+ /* Soft TX counters. */
+ fp->tx_pkts = 0;
+ fp->tx_soft_errors = 0;
+ fp->tx_offload_frames_csum_ip = 0;
+ fp->tx_offload_frames_csum_tcp = 0;
+ fp->tx_offload_frames_csum_udp = 0;
+ fp->tx_offload_frames_tso = 0;
+ fp->tx_header_splits = 0;
+ fp->tx_encap_failures = 0;
+ fp->tx_hw_queue_full = 0;
+ fp->tx_hw_max_queue_depth = 0;
+ fp->tx_dma_mapping_failure = 0;
+ fp->tx_max_drbr_queue_depth = 0;
+ fp->tx_window_violation_std = 0;
+ fp->tx_window_violation_tso = 0;
+ fp->tx_unsupported_tso_request_ipv6 = 0;
+ fp->tx_unsupported_tso_request_not_tcp = 0;
+ fp->tx_chain_lost_mbuf = 0;
+ fp->tx_frame_deferred = 0;
+ fp->tx_queue_xoff = 0;
+
+ /* Clear all TX mbuf pointers. */
+ for (j = 0; j < TOTAL_TX_BD; j++) {
+ fp->tx_mbuf_ptr[j] = NULL;
}
}
- DBEXIT(BXE_VERBOSE_RESET);
+ DBEXIT(BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET);
}
/*
@@ -11232,9 +11313,9 @@ bxe_init_sp_ring(struct bxe_softc *sc)
/* Tell the controller the address of the slowpath ring. */
REG_WR(sc, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
- U64_LO(sc->spq_paddr));
+ U64_LO(sc->spq_dma.paddr));
REG_WR(sc, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
- U64_HI(sc->spq_paddr));
+ U64_HI(sc->spq_dma.paddr));
REG_WR(sc, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
sc->spq_prod_idx);
@@ -11283,27 +11364,33 @@ bxe_init_context(struct bxe_softc *sc)
context->ustorm_st_context.common.mc_alignment_log_size = 8;
/* Set the size of the receive buffers. */
context->ustorm_st_context.common.bd_buff_size =
- sc->rx_buf_size;
+ sc->mbuf_alloc_size;
/* Set the address of the receive chain base page. */
context->ustorm_st_context.common.bd_page_base_hi =
- U64_HI(fp->rx_bd_chain_paddr[0]);
+ U64_HI(fp->rx_dma.paddr);
context->ustorm_st_context.common.bd_page_base_lo =
- U64_LO(fp->rx_bd_chain_paddr[0]);
+ U64_LO(fp->rx_dma.paddr);
- if (TPA_ENABLED(sc) && !(fp->disable_tpa)) {
+ if (TPA_ENABLED(sc) && (fp->disable_tpa == FALSE)) {
/* Enable TPA and SGE chain support. */
context->ustorm_st_context.common.flags |=
USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA;
+
/* Set the size of the SGE buffer. */
context->ustorm_st_context.common.sge_buff_size =
- (uint16_t) (PAGES_PER_SGE * BCM_PAGE_SIZE);
+ (uint16_t) (SGE_PAGE_SIZE * PAGES_PER_SGE);
+
/* Set the address of the SGE chain base page. */
context->ustorm_st_context.common.sge_page_base_hi =
- U64_HI(fp->rx_sge_chain_paddr[0]);
+ U64_HI(fp->sg_dma.paddr);
context->ustorm_st_context.common.sge_page_base_lo =
- U64_LO(fp->rx_sge_chain_paddr[0]);
+ U64_LO(fp->sg_dma.paddr);
+
+ DBPRINT(sc, BXE_VERBOSE_TPA, "%s(): MTU = %d\n",
+ __FUNCTION__, (int) sc->bxe_ifp->if_mtu);
+ /* Describe MTU to SGE alignment. */
context->ustorm_st_context.common.max_sges_for_packet =
SGE_PAGE_ALIGN(sc->bxe_ifp->if_mtu) >>
SGE_PAGE_SHIFT;
@@ -11311,6 +11398,10 @@ bxe_init_context(struct bxe_softc *sc)
((context->ustorm_st_context.common.
max_sges_for_packet + PAGES_PER_SGE - 1) &
(~(PAGES_PER_SGE - 1))) >> PAGES_PER_SGE_SHIFT;
+
+ DBPRINT(sc, BXE_VERBOSE_TPA,
+ "%s(): max_sges_for_packet = %d\n", __FUNCTION__,
+ context->ustorm_st_context.common.max_sges_for_packet);
}
/* Update USTORM context. */
@@ -11325,9 +11416,9 @@ bxe_init_context(struct bxe_softc *sc)
/* Set the address of the transmit chain base page. */
context->xstorm_st_context.tx_bd_page_base_hi =
- U64_HI(fp->tx_bd_chain_paddr[0]);
+ U64_HI(fp->tx_dma.paddr);
context->xstorm_st_context.tx_bd_page_base_lo =
- U64_LO(fp->tx_bd_chain_paddr[0]);
+ U64_LO(fp->tx_dma.paddr);
/* Enable XSTORM statistics. */
context->xstorm_st_context.statistics_data = (cl_id |
@@ -11592,7 +11683,7 @@ bxe_init_internal_func(struct bxe_softc *sc)
}
/* Enable TPA if needed */
- if (sc->bxe_flags & BXE_TPA_ENABLE_FLAG)
+ if (TPA_ENABLED(sc))
tstorm_config.config_flags |=
TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA;
@@ -11693,21 +11784,24 @@ bxe_init_internal_func(struct bxe_softc *sc)
}
/* Init completion queue mapping and TPA aggregation size. */
- max_agg_size = min((uint32_t)(sc->rx_buf_size + 8 * BCM_PAGE_SIZE *
- PAGES_PER_SGE), (uint32_t)0xffff);
+ max_agg_size = min((uint32_t)(sc->mbuf_alloc_size +
+ (8 * BCM_PAGE_SIZE * PAGES_PER_SGE)), (uint32_t)0xffff);
+
+ DBPRINT(sc, BXE_VERBOSE_TPA, "%s(): max_agg_size = 0x%08X\n",
+ __FUNCTION__, max_agg_size);
for (i = 0; i < sc->num_queues; i++) {
fp = &sc->fp[i];
nextpg = (struct eth_rx_cqe_next_page *)
- &fp->rx_cq_chain[0][USABLE_RCQ_ENTRIES_PER_PAGE];
+ &fp->rcq_chain[USABLE_RCQ_ENTRIES_PER_PAGE];
/* Program the completion queue address. */
REG_WR(sc, BAR_USTORM_INTMEM +
USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id),
- U64_LO(fp->rx_cq_chain_paddr[0]));
+ U64_LO(fp->rcq_dma.paddr));
REG_WR(sc, BAR_USTORM_INTMEM +
USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id) + 4,
- U64_HI(fp->rx_cq_chain_paddr[0]));
+ U64_HI(fp->rcq_dma.paddr));
/* Program the first CQ next page address. */
REG_WR(sc, BAR_USTORM_INTMEM +
@@ -11735,7 +11829,7 @@ bxe_init_internal_func(struct bxe_softc *sc)
for (i = 0; i < sc->num_queues; i++) {
fp = &sc->fp[i];
- if (!fp->disable_tpa) {
+ if (fp->disable_tpa == FALSE) {
rx_pause.sge_thr_low = 150;
rx_pause.sge_thr_high = 250;
}
@@ -11818,18 +11912,18 @@ bxe_init_internal(struct bxe_softc *sc, uint32_t load_code)
* Returns:
* None
*/
-static void
+static int
bxe_init_nic(struct bxe_softc *sc, uint32_t load_code)
{
struct bxe_fastpath *fp;
- int i;
+ int i, rc;
DBENTER(BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET);
/* Intialize fastpath structures and the status block. */
for (i = 0; i < sc->num_queues; i++) {
fp = &sc->fp[i];
- fp->disable_tpa = 1;
+ fp->disable_tpa = TRUE;
bzero((char *)fp->status_block, BXE_STATUS_BLK_SZ);
fp->fp_u_idx = 0;
@@ -11851,29 +11945,31 @@ bxe_init_nic(struct bxe_softc *sc, uint32_t load_code)
fp->sb_id = fp->cl_id;
DBPRINT(sc, (BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET),
- "%s(): fp[%d]: cl_id = %d, sb_id = %d\n",
+ "%s(): fp[%02d]: cl_id = %d, sb_id = %d\n",
__FUNCTION__, fp->index, fp->cl_id, fp->sb_id);
/* Initialize the fastpath status block. */
- bxe_init_sb(sc, fp->status_block, fp->status_block_paddr,
+ bxe_init_sb(sc, fp->status_block, fp->sb_dma.paddr,
fp->sb_id);
bxe_update_fpsb_idx(fp);
}
rmb();
- bzero((char *)sc->def_status_block, BXE_DEF_STATUS_BLK_SZ);
+ bzero((char *)sc->def_sb, BXE_DEF_STATUS_BLK_SZ);
/* Initialize the Default Status Block. */
- bxe_init_def_sb(sc, sc->def_status_block, sc->def_status_block_paddr,
- DEF_SB_ID);
+ bxe_init_def_sb(sc, sc->def_sb, sc->def_sb_dma.paddr, DEF_SB_ID);
bxe_update_dsb_idx(sc);
/* Initialize the coalescence parameters. */
bxe_update_coalesce(sc);
- /* Intiialize the Receive BD Chain and Receive Completion Chain. */
- bxe_init_rx_chains(sc);
+ /* Initialize receive chains. */
+ rc = bxe_init_rx_chains(sc);
+ if (rc != 0) {
+ goto bxe_init_nic_exit;
+ }
/* Initialize the Transmit BD Chain. */
bxe_init_tx_chains(sc);
@@ -11895,46 +11991,7 @@ bxe_init_nic(struct bxe_softc *sc, uint32_t load_code)
/* Disable the interrupts from device until init is complete.*/
bxe_int_disable(sc);
- DBEXIT(BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET);
-}
-
-/*
-*
-* Returns:
-* 0 = Success, !0 = Failure
-*/
-static int
-bxe_gunzip_init(struct bxe_softc *sc)
-{
- int rc;
-
- rc = 0;
-
- DBENTER(BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET);
-
- bxe_dmamem_alloc(sc, sc->gunzip_tag, sc->gunzip_map, sc->gunzip_buf,
- FW_BUF_SIZE, &sc->gunzip_mapping);
-
- if (sc->gunzip_buf == NULL)
- goto bxe_gunzip_init_nomem1;
-
- sc->strm = malloc(sizeof(*sc->strm), M_DEVBUF, M_NOWAIT);
- if (sc->strm == NULL)
- goto bxe_gunzip_init_nomem2;
-
- goto bxe_gunzip_init_exit;
-
-bxe_gunzip_init_nomem2:
- bxe_dmamem_free(sc, sc->gunzip_tag, sc->gunzip_buf, sc->gunzip_map);
- sc->gunzip_buf = NULL;
-
-bxe_gunzip_init_nomem1:
- BXE_PRINTF(
- "%s(%d): Cannot allocate firmware buffer for decompression!\n",
- __FILE__, __LINE__);
- rc = ENOMEM;
-
-bxe_gunzip_init_exit:
+bxe_init_nic_exit:
DBEXIT(BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET);
return (rc);
}
@@ -11948,14 +12005,14 @@ bxe_gunzip_init_exit:
static void
bxe_lb_pckt(struct bxe_softc *sc)
{
-#ifdef USE_DMAE
+#ifdef BXE_USE_DMAE
uint32_t wb_write[3];
#endif
DBENTER(BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET);
/* Ethernet source and destination addresses. */
-#ifdef USE_DMAE
+#ifdef BXE_USE_DMAE
wb_write[0] = 0x55555555;
wb_write[1] = 0x55555555;
wb_write[2] = 0x20; /* SOP */
@@ -11967,7 +12024,7 @@ bxe_lb_pckt(struct bxe_softc *sc)
#endif
/* NON-IP protocol. */
-#ifdef USE_DMAE
+#ifdef BXE_USE_DMAE
wb_write[0] = 0x09000000;
wb_write[1] = 0x55555555;
wb_write[2] = 0x10; /* EOP */
@@ -12130,7 +12187,8 @@ bxe_int_mem_test(struct bxe_softc *sc)
val = REG_RD(sc, NIG_REG_INGRESS_EOP_LB_EMPTY);
if (val != 1) {
- DBPRINT(sc, BXE_INFO, "clear of NIG failed\n");
+ DBPRINT(sc, BXE_INFO, "%s(): Unable to clear NIG!\n",
+ __FUNCTION__);
rc = 6;
goto bxe_int_mem_test_exit;
}
@@ -12495,7 +12553,7 @@ bxe_setup_fan_failure_detection(struct bxe_softc *sc)
int is_required, port;
is_required = 0;
- if (BP_NOMCP(sc))
+ if (NOMCP(sc))
return;
val = SHMEM_RD(sc, dev_info.shared_hw_config.config2) &
@@ -12787,7 +12845,7 @@ bxe_init_common(struct bxe_softc *sc)
bxe_enable_blocks_attention(sc);
- if (!BP_NOMCP(sc)) {
+ if (!NOMCP(sc)) {
bxe_acquire_phy_lock(sc);
bxe_common_init_phy(sc, sc->common.shmem_base);
bxe_release_phy_lock(sc);
@@ -12813,7 +12871,7 @@ bxe_init_port(struct bxe_softc *sc)
uint32_t val, low, high;
uint32_t swap_val, swap_override, aeu_gpio_mask, offset;
uint32_t reg_addr;
- int i, init_stage, port;
+ int init_stage, port;
port = BP_PORT(sc);
init_stage = port ? PORT1_STAGE : PORT0_STAGE;
@@ -12855,14 +12913,6 @@ bxe_init_port(struct bxe_softc *sc)
REG_WR(sc, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port * 4, low);
REG_WR(sc, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port * 4, high);
- if (sc->bxe_flags & BXE_SAFC_TX_FLAG) {
- REG_WR(sc, BRB1_REG_HIGH_LLFC_LOW_THRESHOLD_0 + port * 4, 0xa0);
- REG_WR(sc, BRB1_REG_HIGH_LLFC_HIGH_THRESHOLD_0 + port * 4,
- 0xd8);
- REG_WR(sc, BRB1_REG_LOW_LLFC_LOW_THRESHOLD_0 + port *4, 0xa0);
- REG_WR(sc, BRB1_REG_LOW_LLFC_HIGH_THRESHOLD_0 + port * 4, 0xd8);
- }
-
/* Port PRS comes here. */
bxe_init_block(sc, PRS_BLOCK, init_stage);
@@ -12901,6 +12951,7 @@ bxe_init_port(struct bxe_softc *sc)
REG_WR(sc, HC_REG_LEADING_EDGE_0 + port * 8, 0);
REG_WR(sc, HC_REG_TRAILING_EDGE_0 + port * 8, 0);
}
+
bxe_init_block(sc, HC_BLOCK, init_stage);
bxe_init_block(sc, MISC_AEU_BLOCK, init_stage);
@@ -12927,33 +12978,12 @@ bxe_init_port(struct bxe_softc *sc)
/* Enable outer VLAN support if required. */
REG_WR(sc, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port * 4,
(IS_E1HOV(sc) ? 0x1 : 0x2));
-
- if (sc->bxe_flags & BXE_SAFC_TX_FLAG){
- high = 0;
- for (i = 0; i < BXE_MAX_PRIORITY; i++) {
- if (sc->pri_map[i] == 1)
- high |= (1 << i);
- }
- REG_WR(sc, NIG_REG_LLFC_HIGH_PRIORITY_CLASSES_0 +
- port * 4, high);
- low = 0;
- for (i = 0; i < BXE_MAX_PRIORITY; i++) {
- if (sc->pri_map[i] == 0)
- low |= (1 << i);
- }
- REG_WR(sc, NIG_REG_LLFC_LOW_PRIORITY_CLASSES_0 +
- port * 4, low);
-
- REG_WR(sc, NIG_REG_PAUSE_ENABLE_0 + port * 4, 0);
- REG_WR(sc, NIG_REG_LLFC_ENABLE_0 + port * 4, 1);
- REG_WR(sc, NIG_REG_LLFC_OUT_EN_0 + port * 4, 1);
- } else {
- REG_WR(sc, NIG_REG_LLFC_ENABLE_0 + port * 4, 0);
- REG_WR(sc, NIG_REG_LLFC_OUT_EN_0 + port * 4, 0);
- REG_WR(sc, NIG_REG_PAUSE_ENABLE_0 + port * 4, 1);
- }
}
+ REG_WR(sc, NIG_REG_LLFC_ENABLE_0 + port * 4, 0);
+ REG_WR(sc, NIG_REG_LLFC_OUT_EN_0 + port * 4, 0);
+ REG_WR(sc, NIG_REG_PAUSE_ENABLE_0 + port * 4, 1);
+
bxe_init_block(sc, MCP_BLOCK, init_stage);
bxe_init_block(sc, DMAE_BLOCK, init_stage);
@@ -13127,7 +13157,6 @@ bxe_init_hw(struct bxe_softc *sc, uint32_t load_code)
DBENTER(BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET);
sc->dmae_ready = 0;
- bxe_gunzip_init(sc);
switch (load_code) {
case FW_MSG_CODE_DRV_LOAD_COMMON:
rc = bxe_init_common(sc);
@@ -13154,21 +13183,19 @@ bxe_init_hw(struct bxe_softc *sc, uint32_t load_code)
}
/* Fetch additional config data if the bootcode is running. */
- if (!BP_NOMCP(sc)) {
+ if (!NOMCP(sc)) {
func = BP_FUNC(sc);
/* Fetch the pulse sequence number. */
sc->fw_drv_pulse_wr_seq = (SHMEM_RD(sc,
func_mb[func].drv_pulse_mb) & DRV_PULSE_SEQ_MASK);
}
- /* This needs to be done before gunzip end. */
+ /* Clear the default status block. */
bxe_zero_def_sb(sc);
for (i = 0; i < sc->num_queues; i++)
bxe_zero_sb(sc, BP_L_ID(sc) + i);
bxe_init_hw_exit:
- bxe_gunzip_end(sc);
-
DBEXIT(BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET);
return (rc);
@@ -13194,8 +13221,6 @@ bxe_fw_command(struct bxe_softc *sc, uint32_t command)
rc = 0;
cnt = 1;
- DBENTER(BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET);
-
DBRUNMSG(BXE_VERBOSE, bxe_decode_mb_msgs(sc, (command | seq), 0));
BXE_FWMB_LOCK(sc);
@@ -13225,321 +13250,285 @@ bxe_fw_command(struct bxe_softc *sc, uint32_t command)
}
BXE_FWMB_UNLOCK(sc);
-
- DBEXIT(BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET);
return (rc);
}
/*
- * Free any DMA memory owned by the driver.
- *
- * Scans through each data structre that requires DMA memory and frees
- * the memory if allocated.
+ * Allocate a block of memory and map it for DMA. No partial
+ * completions allowed, release any resources acquired if we
+ * can't acquire all resources.
*
* Returns:
- * Nothing.
+ * 0 = Success, !0 = Failure
+ *
+ * Modifies:
+ * dma->paddr
+ * dma->vaddr
+ * dma->tag
+ * dma->map
+ * dma->size
+ *
*/
-static void
-bxe_dma_free(struct bxe_softc *sc)
+static int
+bxe_dma_malloc(struct bxe_softc *sc, bus_size_t size,
+ struct bxe_dma *dma, int mapflags, const char *msg)
{
- struct bxe_fastpath *fp;
- int i, j;
+ int rc;
DBENTER(BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET);
- if (sc->parent_tag != NULL) {
+ DBRUNIF(dma->size > 0,
+ BXE_PRINTF("%s(): Called for %s with size > 0 (%05d)!\n",
+ __FUNCTION__, msg, (int) dma->size));
- for (i = 0; i < sc->num_queues; i++) {
- fp = &sc->fp[i];
- /* Trust no one! */
- if (fp) {
- /* Free, unmap, and destroy the status block. */
- if (fp->status_block_tag != NULL) {
- if (fp->status_block_map != NULL) {
- if (fp->status_block != NULL)
- bus_dmamem_free(
- fp->status_block_tag,
- fp->status_block,
- fp->status_block_map);
-
- bus_dmamap_unload(
- fp->status_block_tag,
- fp->status_block_map);
- bus_dmamap_destroy(
- fp->status_block_tag,
- fp->status_block_map);
- }
+ rc = bus_dma_tag_create(
+ sc->parent_tag, /* parent */
+ BCM_PAGE_SIZE, /* alignment for segs */
+ BXE_DMA_BOUNDARY, /* cannot cross */
+ BUS_SPACE_MAXADDR, /* restricted low */
+ BUS_SPACE_MAXADDR, /* restricted hi */
+ NULL, NULL, /* filter f(), arg */
+ size, /* max size for this tag */
+ 1, /* # of discontinuities */
+ size, /* max seg size */
+ BUS_DMA_ALLOCNOW, /* flags */
+ NULL, NULL, /* lock f(), arg */
+ &dma->tag);
- bus_dma_tag_destroy(
- fp->status_block_tag);
- }
+ if (rc != 0) {
+ BXE_PRINTF("%s(%d): bus_dma_tag_create() "
+ "failed (rc = %d) for %s!\n",
+ __FILE__, __LINE__, rc, msg);
+ goto bxe_dma_malloc_fail_create;
+ }
- /*
- * Free, unmap and destroy all TX BD
- * chain pages.
- */
- if (fp->tx_bd_chain_tag != NULL) {
- for (j = 0; j < NUM_TX_PAGES; j++ ) {
- if (fp->tx_bd_chain_map[j] != NULL) {
- if (fp->tx_bd_chain[j] != NULL)
- bus_dmamem_free(fp->tx_bd_chain_tag,
- fp->tx_bd_chain[j],
- fp->tx_bd_chain_map[j]);
-
- bus_dmamap_unload(fp->tx_bd_chain_tag,
- fp->tx_bd_chain_map[j]);
- bus_dmamap_destroy(fp->tx_bd_chain_tag,
- fp->tx_bd_chain_map[j]);
- }
- }
+ rc = bus_dmamem_alloc(dma->tag, (void **)&dma->vaddr,
+ BUS_DMA_NOWAIT, &dma->map);
+ if (rc != 0) {
+ BXE_PRINTF("%s(%d): bus_dmamem_alloc() "
+ "failed (rc = %d) for %s!\n",
+ __FILE__, __LINE__, rc, msg);
+ goto bxe_dma_malloc_fail_alloc;
+ }
- bus_dma_tag_destroy(fp->tx_bd_chain_tag);
- }
+ rc = bus_dmamap_load(dma->tag, dma->map, dma->vaddr, size,
+ bxe_dma_map_addr, &dma->paddr, mapflags | BUS_DMA_NOWAIT);
+ if (rc != 0) {
+ BXE_PRINTF("%s(%d): bus_dmamap_load() "
+ "failed (rc = %d) for %s!\n",
+ __FILE__, __LINE__, rc, msg);
+ goto bxe_dma_malloc_fail_load;
+ }
- /* Free, unmap and destroy all RX BD chain pages. */
- if (fp->rx_bd_chain_tag != NULL) {
-
- for (j = 0; j < NUM_RX_PAGES; j++ ) {
- if (fp->rx_bd_chain_map[j] != NULL) {
- if (fp->rx_bd_chain[j] != NULL)
- bus_dmamem_free(fp->rx_bd_chain_tag,
- fp->rx_bd_chain[j],
- fp->rx_bd_chain_map[j]);
-
- bus_dmamap_unload(fp->rx_bd_chain_tag,
- fp->rx_bd_chain_map[j]);
- bus_dmamap_destroy(fp->rx_bd_chain_tag,
- fp->rx_bd_chain_map[j]);
- }
- }
+ dma->size = size;
- bus_dma_tag_destroy(fp->rx_bd_chain_tag);
- }
+ DBPRINT(sc, BXE_VERBOSE, "%s(): size=%06d, vaddr=0x%p, "
+ "paddr=0x%jX - %s\n", __FUNCTION__, (int) dma->size,
+ dma->vaddr, (uintmax_t) dma->paddr, msg);
- /*
- * Free, unmap and destroy all RX CQ
- * chain pages.
- */
- if (fp->rx_cq_chain_tag != NULL) {
- for (j = 0; j < NUM_RCQ_PAGES; j++ ) {
- if (fp->rx_cq_chain_map[j] != NULL) {
- if (fp->rx_cq_chain[j] != NULL)
- bus_dmamem_free(fp->rx_cq_chain_tag,
- fp->rx_cq_chain[j],
- fp->rx_cq_chain_map[j]);
-
- bus_dmamap_unload(fp->rx_cq_chain_tag,
- fp->rx_cq_chain_map[j]);
- bus_dmamap_destroy(fp->rx_cq_chain_tag,
- fp->rx_cq_chain_map[j]);
- }
- }
+ goto bxe_dma_malloc_exit;
- bus_dma_tag_destroy(fp->rx_cq_chain_tag);
- }
+bxe_dma_malloc_fail_load:
+ bus_dmamem_free(dma->tag, dma->vaddr, dma->map);
- /* Unload and destroy the TX mbuf maps. */
- if (fp->tx_mbuf_tag != NULL) {
- for (j = 0; j < TOTAL_TX_BD; j++) {
- if (fp->tx_mbuf_map[j] != NULL) {
- bus_dmamap_unload(fp->tx_mbuf_tag,
- fp->tx_mbuf_map[j]);
- bus_dmamap_destroy(fp->tx_mbuf_tag,
- fp->tx_mbuf_map[j]);
- }
- }
+bxe_dma_malloc_fail_alloc:
+ bus_dma_tag_destroy(dma->tag);
+ dma->vaddr = NULL;
- bus_dma_tag_destroy(fp->tx_mbuf_tag);
- }
+bxe_dma_malloc_fail_create:
+ dma->map = NULL;
+ dma->tag = NULL;
+ dma->size = 0;
+bxe_dma_malloc_exit:
+ DBEXIT(BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET);
+ return (rc);
+}
- if (TPA_ENABLED(sc)) {
- int tpa_pool_max = CHIP_IS_E1H(sc) ?
- ETH_MAX_AGGREGATION_QUEUES_E1H :
- ETH_MAX_AGGREGATION_QUEUES_E1;
+/*
+ * Release a block of DMA memory associated tag/map.
+ *
+ * Returns:
+ * None
+ */
+static void
+bxe_dma_free(struct bxe_softc *sc, struct bxe_dma *dma)
+{
+ DBENTER(BXE_VERBOSE_LOAD | BXE_VERBOSE_UNLOAD);
- /* Unload and destroy the TPA pool mbuf maps. */
- if (fp->rx_mbuf_tag != NULL) {
+ if (dma->size > 0) {
+ bus_dmamap_sync(dma->tag, dma->map,
+ BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
+ bus_dmamap_unload(dma->tag, dma->map);
+ bus_dmamem_free(dma->tag, dma->vaddr, dma->map);
+ bus_dma_tag_destroy(dma->tag);
+ dma->size = 0;
+ }
- for (j = 0; j < tpa_pool_max; j++) {
+ DBEXIT(BXE_VERBOSE_LOAD | BXE_VERBOSE_UNLOAD);
+}
- if (fp->tpa_mbuf_map[j] != NULL) {
- bus_dmamap_unload(fp->rx_mbuf_tag,
- fp->tpa_mbuf_map[j]);
- bus_dmamap_destroy(fp->rx_mbuf_tag,
- fp->tpa_mbuf_map[j]);
- }
- }
- }
+/*
+ * Free any DMA memory owned by the driver.
+ *
+ * Scans through each data structre that requires DMA memory and frees
+ * the memory if allocated.
+ *
+ * Returns:
+ * Nothing.
+ */
+static void
+bxe_host_structures_free(struct bxe_softc *sc)
+{
+ struct bxe_fastpath *fp;
+ int i, j, max_agg_queues;
- /* Free, unmap and destroy all RX SGE chain pages. */
- if (fp->rx_sge_chain_tag != NULL) {
- for (j = 0; j < NUM_RX_SGE_PAGES; j++ ) {
- if (fp->rx_sge_chain_map[j] != NULL) {
- if (fp->rx_sge_chain[j] != NULL)
- bus_dmamem_free(fp->rx_sge_chain_tag,
- fp->rx_sge_chain[j],
- fp->rx_sge_chain_map[j]);
-
- bus_dmamap_unload(fp->rx_sge_chain_tag,
- fp->rx_sge_chain_map[j]);
- bus_dmamap_destroy(fp->rx_sge_chain_tag,
- fp->rx_sge_chain_map[j]);
- }
- }
-
- bus_dma_tag_destroy(fp->rx_sge_chain_tag);
- }
+ DBENTER(BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET | BXE_VERBOSE_UNLOAD);
+ max_agg_queues = CHIP_IS_E1H(sc) ?
+ ETH_MAX_AGGREGATION_QUEUES_E1H :
+ ETH_MAX_AGGREGATION_QUEUES_E1;
- /* Unload and destroy the SGE Buf maps. */
- if (fp->rx_sge_buf_tag != NULL) {
+ if (sc->parent_tag == NULL)
+ goto bxe_host_structures_free_exit;
- for (j = 0; j < TOTAL_RX_SGE; j++) {
- if (fp->rx_sge_buf_map[j] != NULL) {
- bus_dmamap_unload(fp->rx_sge_buf_tag,
- fp->rx_sge_buf_map[j]);
- bus_dmamap_destroy(fp->rx_sge_buf_tag,
- fp->rx_sge_buf_map[j]);
- }
- }
+ for (i = 0; i < sc->num_queues; i++) {
+ fp = &sc->fp[i];
- bus_dma_tag_destroy(fp->rx_sge_buf_tag);
- }
- }
+ /* Trust no one! */
+ if (fp == NULL)
+ break;
- /* Unload and destroy the RX mbuf maps. */
- if (fp->rx_mbuf_tag != NULL) {
- for (j = 0; j < TOTAL_RX_BD; j++) {
- if (fp->rx_mbuf_map[j] != NULL) {
- bus_dmamap_unload(fp->rx_mbuf_tag,
- fp->rx_mbuf_map[j]);
- bus_dmamap_destroy(fp->rx_mbuf_tag,
- fp->rx_mbuf_map[j]);
- }
- }
+ /* Status block. */
+ bxe_dma_free(sc, &fp->sb_dma);
- bus_dma_tag_destroy(fp->rx_mbuf_tag);
- }
+ /* TX chain. */
+ bxe_dma_free(sc, &fp->tx_dma);
+ fp->tx_chain = NULL;
- }
- }
+ /* RX chain */
+ bxe_dma_free(sc, &fp->rx_dma);
+ fp->rx_chain = NULL;
- /* Destroy the def_status block. */
- if (sc->def_status_block_tag != NULL) {
- if (sc->def_status_block_map != NULL) {
- if (sc->def_status_block != NULL)
- bus_dmamem_free(
- sc->def_status_block_tag,
- sc->def_status_block,
- sc->def_status_block_map);
-
- bus_dmamap_unload(sc->def_status_block_tag,
- sc->def_status_block_map);
- bus_dmamap_destroy(sc->def_status_block_tag,
- sc->def_status_block_map);
- }
+ /* RCQ chain */
+ bxe_dma_free(sc, &fp->rcq_dma);
+ fp->rcq_chain = NULL;
- bus_dma_tag_destroy(sc->def_status_block_tag);
- }
+ /* SG chain */
+ bxe_dma_free(sc, &fp->sg_dma);
+ fp->sg_chain = NULL;
- /* Destroy the statistics block. */
- if (sc->stats_tag != NULL) {
- if (sc->stats_map != NULL) {
- if (sc->stats_block != NULL)
- bus_dmamem_free(sc->stats_tag,
- sc->stats_block, sc->stats_map);
- bus_dmamap_unload(sc->stats_tag, sc->stats_map);
- bus_dmamap_destroy(sc->stats_tag,
- sc->stats_map);
+ /* Unload and destroy the TX mbuf maps. */
+ if (fp->tx_mbuf_tag != NULL) {
+ for (j = 0; j < TOTAL_TX_BD; j++) {
+ if (fp->tx_mbuf_map[j] != NULL) {
+ bus_dmamap_unload(
+ fp->tx_mbuf_tag,
+ fp->tx_mbuf_map[j]);
+ bus_dmamap_destroy(
+ fp->tx_mbuf_tag,
+ fp->tx_mbuf_map[j]);
+ }
}
- bus_dma_tag_destroy(sc->stats_tag);
+ bus_dma_tag_destroy(fp->tx_mbuf_tag);
}
- /* Destroy the Slow Path block. */
- if (sc->slowpath_tag != NULL) {
- if (sc->slowpath_map != NULL) {
- if (sc->slowpath != NULL)
- bus_dmamem_free(sc->slowpath_tag,
- sc->slowpath, sc->slowpath_map);
-
- bus_dmamap_unload(sc->slowpath_tag,
- sc->slowpath_map);
- bus_dmamap_destroy(sc->slowpath_tag,
- sc->slowpath_map);
+ /* Unload and destroy the TPA pool mbuf maps. */
+ if (fp->rx_mbuf_tag != NULL) {
+ if (fp->tpa_mbuf_spare_map != NULL) {
+ bus_dmamap_unload(
+ fp->rx_mbuf_tag,
+ fp->tpa_mbuf_spare_map);
+ bus_dmamap_destroy(
+ fp->rx_mbuf_tag,
+ fp->tpa_mbuf_spare_map);
}
- bus_dma_tag_destroy(sc->slowpath_tag);
+ for (j = 0; j < max_agg_queues; j++) {
+ if (fp->tpa_mbuf_map[j] != NULL) {
+ bus_dmamap_unload(
+ fp->rx_mbuf_tag,
+ fp->tpa_mbuf_map[j]);
+ bus_dmamap_destroy(
+ fp->rx_mbuf_tag,
+ fp->tpa_mbuf_map[j]);
+ }
+ }
}
- /* Destroy the Slow Path Ring. */
- if (sc->spq_tag != NULL) {
- if (sc->spq_map != NULL) {
- if (sc->spq != NULL)
- bus_dmamem_free(sc->spq_tag, sc->spq,
- sc->spq_map);
+ /* Unload and destroy the SGE Buf maps. */
+ if (fp->rx_sge_buf_tag != NULL) {
+ if (fp->rx_sge_spare_map != NULL) {
+ bus_dmamap_unload(
+ fp->rx_sge_buf_tag,
+ fp->rx_sge_spare_map);
+ bus_dmamap_destroy(
+ fp->rx_sge_buf_tag,
+ fp->rx_sge_spare_map);
+ }
- bus_dmamap_unload(sc->spq_tag, sc->spq_map);
- bus_dmamap_destroy(sc->spq_tag, sc->spq_map);
+ for (j = 0; j < TOTAL_RX_SGE; j++) {
+ if (fp->rx_sge_buf_map[j] != NULL) {
+ bus_dmamap_unload(
+ fp->rx_sge_buf_tag,
+ fp->rx_sge_buf_map[j]);
+ bus_dmamap_destroy(
+ fp->rx_sge_buf_tag,
+ fp->rx_sge_buf_map[j]);
+ }
}
- bus_dma_tag_destroy(sc->spq_tag);
+ bus_dma_tag_destroy(fp->rx_sge_buf_tag);
}
+ /* Unload and destroy the RX mbuf maps. */
+ if (fp->rx_mbuf_tag != NULL) {
+ if (fp->rx_mbuf_spare_map != NULL) {
+ bus_dmamap_unload(fp->rx_mbuf_tag,
+ fp->rx_mbuf_spare_map);
+ bus_dmamap_destroy(fp->rx_mbuf_tag,
+ fp->rx_mbuf_spare_map);
+ }
- free(sc->strm, M_DEVBUF);
- sc->strm = NULL;
-
- if (sc->gunzip_tag != NULL) {
- if (sc->gunzip_map != NULL) {
- if (sc->gunzip_buf != NULL)
- bus_dmamem_free(sc->gunzip_tag,
- sc->gunzip_buf, sc->gunzip_map);
-
- bus_dmamap_unload(sc->gunzip_tag,
- sc->gunzip_map);
- bus_dmamap_destroy(sc->gunzip_tag,
- sc->gunzip_map);
+ for (j = 0; j < TOTAL_RX_BD; j++) {
+ if (fp->rx_mbuf_map[j] != NULL) {
+ bus_dmamap_unload(
+ fp->rx_mbuf_tag,
+ fp->rx_mbuf_map[j]);
+ bus_dmamap_destroy(
+ fp->rx_mbuf_tag,
+ fp->rx_mbuf_map[j]);
+ }
}
- bus_dma_tag_destroy(sc->gunzip_tag);
+ bus_dma_tag_destroy(fp->rx_mbuf_tag);
}
-
- bus_dma_tag_destroy(sc->parent_tag);
}
- DBENTER(BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET);
-}
+ /* Destroy the default status block */
+ bxe_dma_free(sc, &sc->def_sb_dma);
+ sc->def_sb = NULL;
-/*
- * Free paged pool memory maps and tags.
- *
- * Returns:
- * Nothing.
- */
-
-static void
-bxe_dmamem_free(struct bxe_softc *sc, bus_dma_tag_t tag, caddr_t buf,
- bus_dmamap_t map)
-{
+ /* Destroy the statistics block */
+ bxe_dma_free(sc, &sc->stats_dma);
+ sc->stats = NULL;
- DBENTER(BXE_VERBOSE_RESET | BXE_VERBOSE_UNLOAD);
-
- if (tag) {
- if (sc->gunzip_buf != NULL)
- bus_dmamem_free(tag, buf, map);
+ /* Destroy the slowpath block. */
+ bxe_dma_free(sc, &sc->slowpath_dma);
+ sc->slowpath = NULL;
- if (map != NULL) {
- bus_dmamap_unload(tag, map);
- bus_dmamap_destroy(tag, map);
- }
-
- if (tag != NULL)
- bus_dma_tag_destroy(tag);
- }
+ /* Destroy the slowpath queue. */
+ bxe_dma_free(sc, &sc->spq_dma);
+ sc->spq = NULL;
+ /* Destroy the slowpath queue. */
+ bxe_dma_free(sc, &sc->gz_dma);
+ sc->gz = NULL;
+ free(sc->strm, M_DEVBUF);
+ sc->strm = NULL;
- DBEXIT(BXE_VERBOSE_RESET | BXE_VERBOSE_UNLOAD);
+bxe_host_structures_free_exit:
+ DBEXIT(BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET | BXE_VERBOSE_UNLOAD);
}
/*
@@ -13575,31 +13564,30 @@ bxe_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
/*
* Allocate any non-paged DMA memory needed by the driver.
*
- * Allocates DMA memory needed for the various global structures which are
- * read or written by the hardware.
- *
* Returns:
* 0 = Success, !0 = Failure.
*/
static int
-bxe_dma_alloc(device_t dev)
+bxe_host_structures_alloc(device_t dev)
{
struct bxe_softc *sc;
struct bxe_fastpath *fp;
- int error, rc;
+ int rc;
bus_addr_t busaddr;
bus_size_t max_size, max_seg_size;
int i, j, max_segments;
sc = device_get_softc(dev);
- rc = 0;
-
DBENTER(BXE_VERBOSE_RESET);
+ rc = 0;
+ int max_agg_queues = CHIP_IS_E1H(sc) ?
+ ETH_MAX_AGGREGATION_QUEUES_E1H :
+ ETH_MAX_AGGREGATION_QUEUES_E1;
/*
* Allocate the parent bus DMA tag appropriate for PCI.
*/
- if (bus_dma_tag_create(NULL, /* parent tag */
+ rc = bus_dma_tag_create(NULL, /* parent tag */
1, /* alignment for segs */
BXE_DMA_BOUNDARY, /* cannot cross */
BUS_SPACE_MAXADDR, /* restricted low */
@@ -13612,136 +13600,112 @@ bxe_dma_alloc(device_t dev)
0, /* flags */
NULL, /* lock f() */
NULL, /* lock f() arg */
- &sc->parent_tag) /* dma tag */
- ) {
+ &sc->parent_tag); /* dma tag */
+ if (rc != 0) {
BXE_PRINTF("%s(%d): Could not allocate parent DMA tag!\n",
__FILE__, __LINE__);
rc = ENOMEM;
- goto bxe_dma_alloc_exit;
+ goto bxe_host_structures_alloc_exit;
}
/* Allocate DMA memory for each fastpath structure. */
for (i = 0; i < sc->num_queues; i++) {
fp = &sc->fp[i];
- DBPRINT(sc, (BXE_EXTREME_LOAD | BXE_EXTREME_RESET),
- "%s(): fp[%d] virtual address = %p, size = %lu\n",
- __FUNCTION__, i, fp,
- (long unsigned int)sizeof(struct bxe_fastpath));
/*
- * Create a DMA tag for the status block, allocate and
- * clear the memory, map the memory into DMA space, and
- * fetch the physical address of the block.
- */
+ * Allocate status block*
+ */
+ rc = bxe_dma_malloc(sc, BXE_STATUS_BLK_SZ,
+ &fp->sb_dma, BUS_DMA_NOWAIT, "fp status block");
+ /* ToDo: Only using 32 bytes out of 4KB allocation! */
+ if (rc != 0)
+ goto bxe_host_structures_alloc_exit;
+ fp->status_block =
+ (struct host_status_block *) fp->sb_dma.vaddr;
- if (bus_dma_tag_create(sc->parent_tag,
- BCM_PAGE_SIZE, /* alignment for segs */
- BXE_DMA_BOUNDARY, /* cannot cross */
- BUS_SPACE_MAXADDR, /* restricted low */
- BUS_SPACE_MAXADDR, /* restricted hi */
- NULL, /* filter f() */
- NULL, /* filter f() arg */
- BXE_STATUS_BLK_SZ, /* max map for this tag */
- 1, /* # of discontinuities */
- BXE_STATUS_BLK_SZ, /* max seg size */
- 0, /* flags */
- NULL, /* lock f() */
- NULL, /* lock f() arg */
- &fp->status_block_tag)) {
- BXE_PRINTF(
- "%s(%d): Could not allocate fp[%d] status block DMA tag!\n",
- __FILE__, __LINE__, i);
- rc = ENOMEM;
- goto bxe_dma_alloc_exit;
- }
-
- if (bus_dmamem_alloc(fp->status_block_tag,
- (void **)&fp->status_block, BUS_DMA_NOWAIT,
- &fp->status_block_map)) {
- BXE_PRINTF(
- "%s(%d): Could not allocate fp[%d] status block DMA memory!\n",
- __FILE__, __LINE__, i);
- rc = ENOMEM;
- goto bxe_dma_alloc_exit;
+ /*
+ * Allocate TX chain.
+ */
+ rc = bxe_dma_malloc(sc, BXE_TX_CHAIN_PAGE_SZ *
+ NUM_TX_PAGES, &fp->tx_dma, BUS_DMA_NOWAIT,
+ "tx chain pages");
+ if (rc != 0)
+ goto bxe_host_structures_alloc_exit;
+ fp->tx_chain = (union eth_tx_bd_types *) fp->tx_dma.vaddr;
+
+ /* Link the TX chain pages. */
+ for (j = 1; j <= NUM_TX_PAGES; j++) {
+ struct eth_tx_next_bd *tx_n_bd =
+ &fp->tx_chain[TOTAL_TX_BD_PER_PAGE * j - 1].next_bd;
+
+ busaddr = fp->tx_dma.paddr +
+ BCM_PAGE_SIZE * (j % NUM_TX_PAGES);
+ tx_n_bd->addr_hi = htole32(U64_HI(busaddr));
+ tx_n_bd->addr_lo = htole32(U64_LO(busaddr));
}
- bzero((char *)fp->status_block, BXE_STATUS_BLK_SZ);
-
- error = bus_dmamap_load(fp->status_block_tag,
- fp->status_block_map, fp->status_block, BXE_STATUS_BLK_SZ,
- bxe_dma_map_addr, &busaddr, BUS_DMA_NOWAIT);
-
- if (error) {
- BXE_PRINTF(
- "%s(%d): Could not map fp[%d] status block DMA memory!\n",
- __FILE__, __LINE__, i);
- rc = ENOMEM;
- goto bxe_dma_alloc_exit;
+ /*
+ * Allocate RX chain.
+ */
+ rc = bxe_dma_malloc(sc, BXE_RX_CHAIN_PAGE_SZ *
+ NUM_RX_PAGES, &fp->rx_dma, BUS_DMA_NOWAIT,
+ "rx chain pages");
+ if (rc != 0)
+ goto bxe_host_structures_alloc_exit;
+ fp->rx_chain = (struct eth_rx_bd *) fp->rx_dma.vaddr;
+
+ /* Link the RX chain pages. */
+ for (j = 1; j <= NUM_RX_PAGES; j++) {
+ struct eth_rx_bd *rx_bd =
+ &fp->rx_chain[TOTAL_RX_BD_PER_PAGE * j - 2];
+
+ busaddr = fp->rx_dma.paddr +
+ BCM_PAGE_SIZE * (j % NUM_RX_PAGES);
+ rx_bd->addr_hi = htole32(U64_HI(busaddr));
+ rx_bd->addr_lo = htole32(U64_LO(busaddr));
}
- /* Physical address of Status Block */
- fp->status_block_paddr = busaddr;
- DBPRINT(sc, (BXE_EXTREME_LOAD | BXE_EXTREME_RESET),
- "%s(): fp[%d] status block physical address = 0x%jX\n",
- __FUNCTION__, i, (uintmax_t) fp->status_block_paddr);
-
/*
- * Create a DMA tag for the TX buffer descriptor chain,
- * allocate and clear the memory, and fetch the
- * physical address of the block.
+ * Allocate CQ chain.
*/
- if (bus_dma_tag_create(sc->parent_tag,
- BCM_PAGE_SIZE, /* alignment for segs */
- BXE_DMA_BOUNDARY, /* cannot cross */
- BUS_SPACE_MAXADDR, /* restricted low */
- BUS_SPACE_MAXADDR, /* restricted hi */
- NULL, /* filter f() */
- NULL, /* filter f() arg */
- BXE_TX_CHAIN_PAGE_SZ,/* max map for this tag */
- 1, /* # of discontinuities */
- BXE_TX_CHAIN_PAGE_SZ,/* max seg size */
- 0, /* flags */
- NULL, /* lock f() */
- NULL, /* lock f() arg */
- &fp->tx_bd_chain_tag)) {
- BXE_PRINTF(
- "%s(%d): Could not allocate fp[%d] TX descriptor chain DMA tag!\n",
- __FILE__, __LINE__, i);
- rc = ENOMEM;
- goto bxe_dma_alloc_exit;
+ rc = bxe_dma_malloc(sc, BXE_RX_CHAIN_PAGE_SZ *
+ NUM_RCQ_PAGES, &fp->rcq_dma, BUS_DMA_NOWAIT,
+ "rcq chain pages");
+ if (rc != 0)
+ goto bxe_host_structures_alloc_exit;
+ fp->rcq_chain = (union eth_rx_cqe *) fp->rcq_dma.vaddr;
+
+ /* Link the CQ chain pages. */
+ for (j = 1; j <= NUM_RCQ_PAGES; j++) {
+ struct eth_rx_cqe_next_page *nextpg =
+ (struct eth_rx_cqe_next_page *)
+ &fp->rcq_chain[TOTAL_RCQ_ENTRIES_PER_PAGE * j - 1];
+
+ busaddr = fp->rcq_dma.paddr +
+ BCM_PAGE_SIZE * (j % NUM_RCQ_PAGES);
+ nextpg->addr_hi = htole32(U64_HI(busaddr));
+ nextpg->addr_lo = htole32(U64_LO(busaddr));
}
- for (j = 0; j < NUM_TX_PAGES; j++) {
- if (bus_dmamem_alloc(fp->tx_bd_chain_tag,
- (void **)&fp->tx_bd_chain[j], BUS_DMA_NOWAIT,
- &fp->tx_bd_chain_map[j])) {
- BXE_PRINTF(
- "%s(%d): Could not allocate fp[%d] TX descriptor chain DMA memory!\n",
- __FILE__, __LINE__, i);
- rc = ENOMEM;
- goto bxe_dma_alloc_exit;
- }
-
- bzero((char *)fp->tx_bd_chain[j], BXE_TX_CHAIN_PAGE_SZ);
-
- error = bus_dmamap_load(fp->tx_bd_chain_tag,
- fp->tx_bd_chain_map[j], fp->tx_bd_chain[j],
- BXE_TX_CHAIN_PAGE_SZ, bxe_dma_map_addr,
- &busaddr, BUS_DMA_NOWAIT);
-
- if (error) {
- BXE_PRINTF(
- "%s(%d): Could not map fp[%d] TX descriptor chain DMA memory!\n",
- __FILE__, __LINE__, i);
- rc = ENOMEM;
- goto bxe_dma_alloc_exit;
- }
-
- /* Physical Address of each page in the Tx BD Chain. */
- fp->tx_bd_chain_paddr[j] = busaddr;
- DBPRINT(sc, (BXE_EXTREME_LOAD | BXE_EXTREME_RESET),
- "%s(): fp[%d]->tx_bd_chain_paddr[%d] = 0x%jX\n",
- __FUNCTION__, i, j, (uintmax_t)busaddr);
+ /*
+ * Allocate SG chain.
+ */
+ rc = bxe_dma_malloc(sc, BXE_RX_CHAIN_PAGE_SZ *
+ NUM_RX_SGE_PAGES, &fp->sg_dma, BUS_DMA_NOWAIT,
+ "sg chain pages");
+ if (rc != 0)
+ goto bxe_host_structures_alloc_exit;
+ fp->sg_chain = (struct eth_rx_sge *) fp->sg_dma.vaddr;
+
+ /* Link the SG chain pages. */
+ for (j = 1; j <= NUM_RX_SGE_PAGES; j++) {
+ struct eth_rx_sge *nextpg =
+ &fp->sg_chain[TOTAL_RX_SGE_PER_PAGE * j - 2];
+
+ busaddr = fp->sg_dma.paddr +
+ BCM_PAGE_SIZE * (j % NUM_RX_SGE_PAGES);
+ nextpg->addr_hi = htole32(U64_HI(busaddr));
+ nextpg->addr_lo = htole32(U64_LO(busaddr));
}
/*
@@ -13773,84 +13737,25 @@ bxe_dma_alloc(device_t dev)
NULL, /* lock f() arg */
&fp->tx_mbuf_tag)) {
BXE_PRINTF(
- "%s(%d): Could not allocate fp[%d] TX mbuf DMA tag!\n",
+ "%s(%d): Could not allocate fp[%d] "
+ "TX mbuf DMA tag!\n",
__FILE__, __LINE__, i);
rc = ENOMEM;
- goto bxe_dma_alloc_exit;
+ goto bxe_host_structures_alloc_exit;
}
/* Create DMA maps for each the TX mbuf cluster(ext buf). */
for (j = 0; j < TOTAL_TX_BD; j++) {
if (bus_dmamap_create(fp->tx_mbuf_tag,
BUS_DMA_NOWAIT,
- &(fp->tx_mbuf_map[j]))) {
- BXE_PRINTF(
- "%s(%d): Unable to create fp[%d] TX mbuf DMA map!\n",
- __FILE__, __LINE__, i);
- rc = ENOMEM;
- goto bxe_dma_alloc_exit;
- }
- }
-
- /*
- * Create a DMA tag for the RX buffer
- * descriptor chain, allocate and clear
- * the memory, and fetch the physical
- * address of the blocks.
- */
- if (bus_dma_tag_create(sc->parent_tag,
- BCM_PAGE_SIZE, /* alignment for segs */
- BXE_DMA_BOUNDARY, /* cannot cross */
- BUS_SPACE_MAXADDR, /* restricted low */
- BUS_SPACE_MAXADDR, /* restricted hi */
- NULL, /* filter f() */
- NULL, /* filter f() arg */
- BXE_RX_CHAIN_PAGE_SZ,/* max map for this tag */
- 1, /* # of discontinuities */
- BXE_RX_CHAIN_PAGE_SZ,/* max seg size */
- 0, /* flags */
- NULL, /* lock f() */
- NULL, /* lock f() arg */
- &fp->rx_bd_chain_tag)) {
- BXE_PRINTF(
- "%s(%d): Could not allocate fp[%d] RX BD chain DMA tag!\n",
- __FILE__, __LINE__, i);
- rc = ENOMEM;
- goto bxe_dma_alloc_exit;
- }
-
- for (j = 0; j < NUM_RX_PAGES; j++) {
- if (bus_dmamem_alloc(fp->rx_bd_chain_tag,
- (void **)&fp->rx_bd_chain[j], BUS_DMA_NOWAIT,
- &fp->rx_bd_chain_map[j])) {
+ &fp->tx_mbuf_map[j])) {
BXE_PRINTF(
- "%s(%d): Could not allocate fp[%d] RX BD chain[%d] DMA memory!\n",
+ "%s(%d): Unable to create fp[%02d]."
+ "tx_mbuf_map[%d] DMA map!\n",
__FILE__, __LINE__, i, j);
rc = ENOMEM;
- goto bxe_dma_alloc_exit;
+ goto bxe_host_structures_alloc_exit;
}
-
- bzero((char *)fp->rx_bd_chain[j], BXE_RX_CHAIN_PAGE_SZ);
-
- error = bus_dmamap_load(fp->rx_bd_chain_tag,
- fp->rx_bd_chain_map[j], fp->rx_bd_chain[j],
- BXE_RX_CHAIN_PAGE_SZ, bxe_dma_map_addr, &busaddr,
- BUS_DMA_NOWAIT);
-
- if (error) {
- BXE_PRINTF(
- "%s(%d): Could not map fp[%d] RX BD chain[%d] DMA memory!\n",
- __FILE__, __LINE__, i, j);
- rc = ENOMEM;
- goto bxe_dma_alloc_exit;
- }
-
- /* Physical address of each page in the RX BD chain */
- fp->rx_bd_chain_paddr[j] = busaddr;
-
- DBPRINT(sc, (BXE_EXTREME_LOAD | BXE_EXTREME_RESET),
- "%s(): fp[%d]->rx_bd_chain_paddr[%d] = 0x%jX\n",
- __FUNCTION__, i, j, (uintmax_t)busaddr);
}
/*
@@ -13871,431 +13776,152 @@ bxe_dma_alloc(device_t dev)
NULL, /* lock f() arg */
&fp->rx_mbuf_tag)) {
BXE_PRINTF(
- "%s(%d): Could not allocate fp[%d] RX mbuf DMA tag!\n",
+ "%s(%d): Could not allocate fp[%02d] "
+ "RX mbuf DMA tag!\n",
__FILE__, __LINE__, i);
rc = ENOMEM;
- goto bxe_dma_alloc_exit;
+ goto bxe_host_structures_alloc_exit;
}
/* Create DMA maps for the RX mbuf clusters. */
+ if (bus_dmamap_create(fp->rx_mbuf_tag,
+ BUS_DMA_NOWAIT, &fp->rx_mbuf_spare_map)) {
+ BXE_PRINTF(
+ "%s(%d): Unable to create fp[%02d]."
+ "rx_mbuf_spare_map DMA map!\n",
+ __FILE__, __LINE__, i);
+ rc = ENOMEM;
+ goto bxe_host_structures_alloc_exit;
+ }
+
for (j = 0; j < TOTAL_RX_BD; j++) {
if (bus_dmamap_create(fp->rx_mbuf_tag,
- BUS_DMA_NOWAIT, &(fp->rx_mbuf_map[j]))) {
+ BUS_DMA_NOWAIT, &fp->rx_mbuf_map[j])) {
BXE_PRINTF(
- "%s(%d): Unable to create fp[%d] RX mbuf DMA map!\n",
- __FILE__, __LINE__, i);
+ "%s(%d): Unable to create fp[%02d]."
+ "rx_mbuf_map[%d] DMA map!\n",
+ __FILE__, __LINE__, i, j);
rc = ENOMEM;
- goto bxe_dma_alloc_exit;
+ goto bxe_host_structures_alloc_exit;
}
}
/*
- * Create a DMA tag for the RX Completion
- * Queue, allocate and clear the memory,
- * map the memory into DMA space, and fetch
- * the physical address of the block.
+ * Create a DMA tag for RX SGE bufs.
*/
- if (bus_dma_tag_create(sc->parent_tag,
- BCM_PAGE_SIZE, /* alignment for segs */
- BXE_DMA_BOUNDARY, /* cannot cross */
- BUS_SPACE_MAXADDR, /* restricted low */
- BUS_SPACE_MAXADDR, /* restricted hi */
- NULL, /* filter f() */
- NULL, /* filter f() arg */
- BXE_RX_CHAIN_PAGE_SZ,/* max map for this tag */
- 1, /* # of discontinuities */
- BXE_RX_CHAIN_PAGE_SZ,/* max seg size */
- 0, /* flags */
- NULL, /* lock f() */
- NULL, /* lock f() arg */
- &fp->rx_cq_chain_tag)) {
+ if (bus_dma_tag_create(sc->parent_tag, 1,
+ BXE_DMA_BOUNDARY, BUS_SPACE_MAXADDR,
+ BUS_SPACE_MAXADDR, NULL, NULL, PAGE_SIZE, 1,
+ PAGE_SIZE, 0, NULL, NULL, &fp->rx_sge_buf_tag)) {
BXE_PRINTF(
- "%s(%d): Could not allocate fp[%d] RX Completion Queue DMA tag!\n",
+ "%s(%d): Could not allocate fp[%02d] "
+ "RX SGE mbuf DMA tag!\n",
__FILE__, __LINE__, i);
rc = ENOMEM;
- goto bxe_dma_alloc_exit;
+ goto bxe_host_structures_alloc_exit;
}
- for (j = 0; j < NUM_RCQ_PAGES; j++) {
- if (bus_dmamem_alloc(fp->rx_cq_chain_tag,
- (void **)&fp->rx_cq_chain[j], BUS_DMA_NOWAIT,
- &fp->rx_cq_chain_map[j])) {
- BXE_PRINTF(
- "%s(%d): Could not allocate fp[%d] RX Completion Queue DMA memory!\n",
- __FILE__, __LINE__, i);
- rc = ENOMEM;
- goto bxe_dma_alloc_exit;
- }
-
- bzero((char *)fp->rx_cq_chain[j],
- BXE_RX_CHAIN_PAGE_SZ);
-
- error = bus_dmamap_load(fp->rx_cq_chain_tag,
- fp->rx_cq_chain_map[j], fp->rx_cq_chain[j],
- BXE_RX_CHAIN_PAGE_SZ, bxe_dma_map_addr, &busaddr,
- BUS_DMA_NOWAIT);
-
- if (error) {
- BXE_PRINTF(
- "%s(%d): Could not map fp[%d] RX Completion Queue DMA memory!\n",
- __FILE__, __LINE__, i);
- rc = ENOMEM;
- goto bxe_dma_alloc_exit;
- }
-
- /*
- * Physical address of each page in the RX
- * Completion Chain.
- */
- fp->rx_cq_chain_paddr[j] = busaddr;
-
- DBPRINT(sc, (BXE_EXTREME_LOAD | BXE_EXTREME_RESET),
- "%s(): fp[%d]->rx_cq_chain_paddr[%d] = 0x%jX\n",
- __FUNCTION__, i, j, (uintmax_t)busaddr);
+ /* Create DMA maps for the SGE mbuf clusters. */
+ if (bus_dmamap_create(fp->rx_sge_buf_tag,
+ BUS_DMA_NOWAIT, &fp->rx_sge_spare_map)) {
+ BXE_PRINTF(
+ "%s(%d): Unable to create fp[%02d]."
+ "rx_sge_spare_map DMA map!\n",
+ __FILE__, __LINE__, i);
+ rc = ENOMEM;
+ goto bxe_host_structures_alloc_exit;
}
- if (TPA_ENABLED(sc)) {
- int tpa_pool_max = CHIP_IS_E1H(sc) ?
- ETH_MAX_AGGREGATION_QUEUES_E1H :
- ETH_MAX_AGGREGATION_QUEUES_E1;
-
- /*
- * Create a DMA tag for the RX SGE Ring,
- * allocate and clear the memory, map the
- * memory into DMA space, and fetch the
- * physical address of the block.
- */
- if (bus_dma_tag_create(sc->parent_tag,
- BCM_PAGE_SIZE, /* alignment for segs */
- BXE_DMA_BOUNDARY, /* cannot cross */
- BUS_SPACE_MAXADDR, /* restricted low */
- BUS_SPACE_MAXADDR, /* restricted hi */
- NULL, /* filter f() */
- NULL, /* filter f() arg */
- BXE_RX_CHAIN_PAGE_SZ,/* max map for this tag */
- 1, /* # of discontinuities */
- BXE_RX_CHAIN_PAGE_SZ,/* max seg size */
- 0, /* flags */
- NULL, /* lock f() */
- NULL, /* lock f() arg */
- &fp->rx_sge_chain_tag)) {
+ for (j = 0; j < TOTAL_RX_SGE; j++) {
+ if (bus_dmamap_create(fp->rx_sge_buf_tag,
+ BUS_DMA_NOWAIT, &fp->rx_sge_buf_map[j])) {
BXE_PRINTF(
- "%s(%d): Could not allocate fp[%d] RX SGE descriptor chain DMA tag!\n",
- __FILE__, __LINE__, i);
+ "%s(%d): Unable to create fp[%02d]."
+ "rx_sge_buf_map[%d] DMA map!\n",
+ __FILE__, __LINE__, i, j);
rc = ENOMEM;
- goto bxe_dma_alloc_exit;
+ goto bxe_host_structures_alloc_exit;
}
+ }
- for (j = 0; j < NUM_RX_SGE_PAGES; j++) {
- if (bus_dmamem_alloc(fp->rx_sge_chain_tag,
- (void **)&fp->rx_sge_chain[j],
- BUS_DMA_NOWAIT, &fp->rx_sge_chain_map[j])) {
- BXE_PRINTF(
- "%s(%d): Could not allocate fp[%d] RX SGE chain[%d] DMA memory!\n",
- __FILE__, __LINE__, i, j);
- rc = ENOMEM;
- goto bxe_dma_alloc_exit;
- }
-
- bzero((char *)fp->rx_sge_chain[j],
- BXE_RX_CHAIN_PAGE_SZ);
-
- error = bus_dmamap_load(fp->rx_sge_chain_tag,
- fp->rx_sge_chain_map[j],
- fp->rx_sge_chain[j], BXE_RX_CHAIN_PAGE_SZ,
- bxe_dma_map_addr, &busaddr, BUS_DMA_NOWAIT);
-
- if (error) {
- BXE_PRINTF(
- "%s(%d): Could not map fp[%d] RX SGE chain[%d] DMA memory!\n",
- __FILE__, __LINE__, i, j);
- rc = ENOMEM;
- goto bxe_dma_alloc_exit;
- }
-
- /*
- * Physical address of each page in the RX
- * SGE chain.
- */
- DBPRINT(sc,
- (BXE_EXTREME_LOAD | BXE_EXTREME_RESET),
- "%s(): fp[%d]->rx_sge_chain_paddr[%d] = 0x%jX\n",
- __FUNCTION__, i, j, (uintmax_t)busaddr);
- fp->rx_sge_chain_paddr[j] = busaddr;
- }
+ /* Create DMA maps for the TPA pool mbufs. */
+ if (bus_dmamap_create(fp->rx_mbuf_tag,
+ BUS_DMA_NOWAIT, &fp->tpa_mbuf_spare_map)) {
+ BXE_PRINTF(
+ "%s(%d): Unable to create fp[%02d]."
+ "tpa_mbuf_spare_map DMA map!\n",
+ __FILE__, __LINE__, i);
+ rc = ENOMEM;
+ goto bxe_host_structures_alloc_exit;
+ }
- /*
- * Create a DMA tag for RX SGE bufs.
- */
- if (bus_dma_tag_create(sc->parent_tag, 1,
- BXE_DMA_BOUNDARY, BUS_SPACE_MAXADDR,
- BUS_SPACE_MAXADDR, NULL, NULL, PAGE_SIZE, 1,
- PAGE_SIZE, 0, NULL, NULL, &fp->rx_sge_buf_tag)) {
+ for (j = 0; j < max_agg_queues; j++) {
+ if (bus_dmamap_create(fp->rx_mbuf_tag,
+ BUS_DMA_NOWAIT, &fp->tpa_mbuf_map[j])) {
BXE_PRINTF(
- "%s(%d): Could not allocate fp[%d] RX SGE mbuf DMA tag!\n",
- __FILE__, __LINE__, i);
+ "%s(%d): Unable to create fp[%02d]."
+ "tpa_mbuf_map[%d] DMA map!\n",
+ __FILE__, __LINE__, i, j);
rc = ENOMEM;
- goto bxe_dma_alloc_exit;
- }
-
- /* Create DMA maps for the SGE mbuf clusters. */
- for (j = 0; j < TOTAL_RX_SGE; j++) {
- if (bus_dmamap_create(fp->rx_sge_buf_tag,
- BUS_DMA_NOWAIT, &(fp->rx_sge_buf_map[j]))) {
- BXE_PRINTF(
- "%s(%d): Unable to create fp[%d] RX SGE mbuf DMA map!\n",
- __FILE__, __LINE__, i);
- rc = ENOMEM;
- goto bxe_dma_alloc_exit;
- }
- }
-
- /* Create DMA maps for the TPA pool mbufs. */
- for (j = 0; j < tpa_pool_max; j++) {
- if (bus_dmamap_create(fp->rx_mbuf_tag,
- BUS_DMA_NOWAIT, &(fp->tpa_mbuf_map[j]))) {
- BXE_PRINTF(
- "%s(%d): Unable to create fp[%d] TPA DMA map!\n",
- __FILE__, __LINE__, i);
- rc = ENOMEM;
- goto bxe_dma_alloc_exit;
- }
+ goto bxe_host_structures_alloc_exit;
}
}
+
+ bxe_init_sge_ring_bit_mask(fp);
}
/*
- * Create a DMA tag for the def_status block, allocate and clear the
- * memory, map the memory into DMA space, and fetch the physical
- * address of the block.
+ * Allocate default status block.
*/
- if (bus_dma_tag_create(sc->parent_tag, BCM_PAGE_SIZE, BXE_DMA_BOUNDARY,
- BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
- BXE_DEF_STATUS_BLK_SZ, 1, BXE_DEF_STATUS_BLK_SZ, 0, NULL, NULL,
- &sc->def_status_block_tag)) {
- BXE_PRINTF(
- "%s(%d): Could not allocate def_status block DMA tag!\n",
- __FILE__, __LINE__);
- rc = ENOMEM;
- goto bxe_dma_alloc_exit;
- }
-
- if (bus_dmamem_alloc(sc->def_status_block_tag,
- (void **)&sc->def_status_block, BUS_DMA_NOWAIT,
- &sc->def_status_block_map)) {
- BXE_PRINTF(
- "%s(%d): Could not allocate def_status block DMA memory!\n",
- __FILE__, __LINE__);
- rc = ENOMEM;
- goto bxe_dma_alloc_exit;
- }
-
- bzero((char *)sc->def_status_block, BXE_DEF_STATUS_BLK_SZ);
-
- error = bus_dmamap_load(sc->def_status_block_tag,
- sc->def_status_block_map, sc->def_status_block,
- BXE_DEF_STATUS_BLK_SZ, bxe_dma_map_addr, &busaddr, BUS_DMA_NOWAIT);
-
- if (error) {
- BXE_PRINTF(
- "%s(%d): Could not map def_status block DMA memory!\n",
- __FILE__, __LINE__);
- rc = ENOMEM;
- goto bxe_dma_alloc_exit;
- }
-
- /* Physical Address of Default Status Block. */
- sc->def_status_block_paddr = busaddr;
- DBPRINT(sc, (BXE_EXTREME_LOAD | BXE_EXTREME_RESET),
- "%s(): Default status block physical address = 0x%08X\n",
- __FUNCTION__, (uint32_t)sc->def_status_block_paddr);
+ rc = bxe_dma_malloc(sc, BXE_DEF_STATUS_BLK_SZ, &sc->def_sb_dma,
+ BUS_DMA_NOWAIT, "default status block");
+ if (rc != 0)
+ goto bxe_host_structures_alloc_exit;
+ sc->def_sb = (struct host_def_status_block *) sc->def_sb_dma.vaddr;
/*
- * Create a DMA tag for the statistics block, allocate and clear the
- * memory, map the memory into DMA space, and fetch the physical
- * address of the block.
+ * Allocate statistics block.
*/
- if (bus_dma_tag_create(sc->parent_tag, BXE_DMA_ALIGN, BXE_DMA_BOUNDARY,
- BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, BXE_STATS_BLK_SZ,
- 1, BXE_STATS_BLK_SZ, 0, NULL, NULL, &sc->stats_tag)) {
- BXE_PRINTF(
- "%s(%d): Could not allocate statistics block DMA tag!\n",
- __FILE__, __LINE__);
- rc = ENOMEM;
- goto bxe_dma_alloc_exit;
- }
-
- if (bus_dmamem_alloc(sc->stats_tag, (void **)&sc->stats_block,
- BUS_DMA_NOWAIT, &sc->stats_map)) {
- BXE_PRINTF(
- "%s(%d): Could not allocate statistics block DMA memory!\n",
- __FILE__, __LINE__);
- rc = ENOMEM;
- goto bxe_dma_alloc_exit;
- }
-
- bzero((char *)sc->stats_block, BXE_STATS_BLK_SZ);
-
- error = bus_dmamap_load(sc->stats_tag, sc->stats_map, sc->stats_block,
- BXE_STATS_BLK_SZ, bxe_dma_map_addr, &busaddr, BUS_DMA_NOWAIT);
-
- if (error) {
- BXE_PRINTF(
- "%s(%d): Could not map statistics block DMA memory!\n",
- __FILE__, __LINE__);
- rc = ENOMEM;
- goto bxe_dma_alloc_exit;
- }
-
- /* Physical Address of Statistics Block. */
- sc->stats_block_paddr = busaddr;
- DBPRINT(sc, (BXE_EXTREME_LOAD | BXE_EXTREME_RESET),
- "%s(): Statistics block physical address = 0x%08X\n",
- __FUNCTION__, (uint32_t)sc->stats_block_paddr);
+ rc = bxe_dma_malloc(sc, BXE_STATS_BLK_SZ, &sc->stats_dma,
+ BUS_DMA_NOWAIT, "statistics block");
+ if (rc != 0)
+ goto bxe_host_structures_alloc_exit;
+ sc->stats = (struct statistics_block *) sc->stats_dma.vaddr;
/*
- * Create a DMA tag for slowpath memory, allocate and clear the
- * memory, map the memory into DMA space, and fetch the physical
- * address of the block.
+ * Allocate slowpath block.
*/
- if (bus_dma_tag_create(sc->parent_tag, BCM_PAGE_SIZE, BXE_DMA_BOUNDARY,
- BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, BXE_SLOWPATH_SZ,
- 1, BXE_SLOWPATH_SZ, 0, NULL, NULL, &sc->slowpath_tag)) {
- BXE_PRINTF(
- "%s(%d): Could not allocate slowpath DMA tag!\n",
- __FILE__, __LINE__);
- rc = ENOMEM;
- goto bxe_dma_alloc_exit;
- }
-
- if (bus_dmamem_alloc(sc->slowpath_tag, (void **)&sc->slowpath,
- BUS_DMA_NOWAIT, &sc->slowpath_map)) {
- BXE_PRINTF(
- "%s(%d): Could not allocate slowpath DMA memory!\n",
- __FILE__, __LINE__);
- rc = ENOMEM;
- goto bxe_dma_alloc_exit;
- }
-
- bzero((char *)sc->slowpath, BXE_SLOWPATH_SZ);
-
- error = bus_dmamap_load(sc->slowpath_tag, sc->slowpath_map,
- sc->slowpath, BXE_SLOWPATH_SZ, bxe_dma_map_addr, &busaddr,
- BUS_DMA_NOWAIT);
-
- if (error) {
- BXE_PRINTF("%s(%d): Could not map slowpath DMA memory!\n",
- __FILE__, __LINE__);
- rc = ENOMEM;
- goto bxe_dma_alloc_exit;
- }
-
- /* Physical Address For Slow Path Context. */
- sc->slowpath_paddr = busaddr;
- DBPRINT(sc, (BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET),
- "%s(): Slowpath context physical address = 0x%08X\n",
- __FUNCTION__, (uint32_t)sc->slowpath_paddr);
+ rc = bxe_dma_malloc(sc, BXE_SLOWPATH_SZ, &sc->slowpath_dma,
+ BUS_DMA_NOWAIT, "slowpath block");
+ if (rc != 0)
+ goto bxe_host_structures_alloc_exit;
+ sc->slowpath = (struct bxe_slowpath *) sc->slowpath_dma.vaddr;
/*
- * Create a DMA tag for the Slow Path Queue, allocate and clear the
- * memory, map the memory into DMA space, and fetch the physical
- * address of the block.
+ * Allocate slowpath queue.
*/
- if (bus_dma_tag_create(sc->parent_tag, BCM_PAGE_SIZE, BXE_DMA_BOUNDARY,
- BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, BXE_SPQ_SZ, 1,
- BXE_SPQ_SZ, 0, NULL, NULL, &sc->spq_tag)) {
- BXE_PRINTF("%s(%d): Could not allocate SPQ DMA tag!\n",
- __FILE__, __LINE__);
- rc = ENOMEM;
- goto bxe_dma_alloc_exit;
- }
-
- if (bus_dmamem_alloc(sc->spq_tag, (void **)&sc->spq, BUS_DMA_NOWAIT,
- &sc->spq_map)) {
- BXE_PRINTF("%s(%d): Could not allocate SPQ DMA memory!\n",
- __FILE__, __LINE__);
- rc = ENOMEM;
- goto bxe_dma_alloc_exit;
- }
-
- bzero((char *)sc->spq, BXE_SPQ_SZ);
-
- error = bus_dmamap_load(sc->spq_tag, sc->spq_map, sc->spq, BXE_SPQ_SZ,
- bxe_dma_map_addr, &busaddr, BUS_DMA_NOWAIT);
-
- if (error) {
- BXE_PRINTF("%s(%d): Could not map SPQ DMA memory!\n",
- __FILE__, __LINE__);
- rc = ENOMEM;
- goto bxe_dma_alloc_exit;
- }
-
- /* Physical address of slow path queue. */
- sc->spq_paddr = busaddr;
- DBPRINT(sc, (BXE_EXTREME_LOAD | BXE_EXTREME_RESET),
- "%s(): Slowpath queue physical address = 0x%08X\n",
- __FUNCTION__, (uint32_t)sc->spq_paddr);
-
- if (bxe_gunzip_init(sc)) {
- rc = ENOMEM;
- goto bxe_dma_alloc_exit;
- }
-
-bxe_dma_alloc_exit:
- DBEXIT(BXE_VERBOSE_RESET);
- return (rc);
-}
-
-/*
- * Allocate DMA memory used for the firmware gunzip memory.
- *
- * Returns:
- * 0 for success, !0 = Failure.
- */
-
-static int
-bxe_dmamem_alloc(struct bxe_softc *sc, bus_dma_tag_t tag, bus_dmamap_t map,
- void *buf, uint32_t buflen, bus_addr_t *busaddr)
-{
- int rc;
-
- rc = 0;
-
- DBENTER(BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET);
+ rc = bxe_dma_malloc(sc, BXE_SPQ_SZ, &sc->spq_dma,
+ BUS_DMA_NOWAIT, "slowpath queue");
+ if (rc != 0)
+ goto bxe_host_structures_alloc_exit;
+ sc->spq = (struct eth_spe *) sc->spq_dma.vaddr;
/*
- * Create a DMA tag for the block, allocate and clear the
- * memory, map the memory into DMA space, and fetch the physical
- * address of the block.
+ * Allocate firmware decompression buffer.
*/
- if (bus_dma_tag_create(sc->parent_tag, BXE_DMA_ALIGN, BXE_DMA_BOUNDARY,
- BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, buflen, 1, buflen,
- 0, NULL, NULL, &sc->gunzip_tag)) {
- BXE_PRINTF("%s(%d): Could not allocate DMA tag!\n",
- __FILE__, __LINE__);
- rc = ENOMEM;
- goto bxe_dmamem_alloc_exit;
- }
-
- if (bus_dmamem_alloc(sc->gunzip_tag, (void **)&sc->gunzip_buf,
- BUS_DMA_NOWAIT, &sc->gunzip_map)) {
- BXE_PRINTF("%s(%d): Could not allocate DMA memory!\n",
- __FILE__, __LINE__);
- rc = ENOMEM;
- goto bxe_dmamem_alloc_exit;
+ rc = bxe_dma_malloc(sc, BXE_FW_BUF_SIZE, &sc->gz_dma,
+ BUS_DMA_NOWAIT, "gunzip buffer");
+ if (rc != 0)
+ goto bxe_host_structures_alloc_exit;
+ sc->gz = sc->gz_dma.vaddr;
+ if (sc->strm == NULL) {
+ goto bxe_host_structures_alloc_exit;
}
- bzero((char *)sc->gunzip_buf, buflen);
-
- if (bus_dmamap_load(sc->gunzip_tag, sc->gunzip_map, sc->gunzip_buf,
- buflen, bxe_dma_map_addr, busaddr, BUS_DMA_NOWAIT)) {
- BXE_PRINTF("%s(%d): Could not map DMA memory!\n",
- __FILE__, __LINE__);
- rc = ENOMEM;
- }
+ sc->strm = malloc(sizeof(*sc->strm), M_DEVBUF, M_NOWAIT);
-bxe_dmamem_alloc_exit:
- DBEXIT(BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET);
+bxe_host_structures_alloc_exit:
+ DBEXIT(BXE_VERBOSE_RESET);
return (rc);
}
@@ -14313,7 +13939,7 @@ bxe_set_mac_addr_e1(struct bxe_softc *sc, int set)
uint8_t *eaddr;
int port;
- DBENTER(BXE_VERBOSE_MISC);
+ DBENTER(BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET | BXE_VERBOSE_UNLOAD);
config = BXE_SP(sc, mac_config);
port = BP_PORT(sc);
@@ -14371,7 +13997,7 @@ bxe_set_mac_addr_e1(struct bxe_softc *sc, int set)
U64_HI(BXE_SP_MAPPING(sc, mac_config)),
U64_LO(BXE_SP_MAPPING(sc, mac_config)), 0);
- DBEXIT(BXE_VERBOSE_MISC);
+ DBEXIT(BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET | BXE_VERBOSE_UNLOAD);
}
/*
@@ -14388,7 +14014,7 @@ bxe_set_mac_addr_e1h(struct bxe_softc *sc, int set)
uint8_t *eaddr;
int func, port;
- DBENTER(BXE_VERBOSE_MISC);
+ DBENTER(BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET | BXE_VERBOSE_UNLOAD);
config = (struct mac_configuration_cmd_e1h *)BXE_SP(sc, mac_config);
port = BP_PORT(sc);
@@ -14428,7 +14054,7 @@ bxe_set_mac_addr_e1h(struct bxe_softc *sc, int set)
config_table->flags =
MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
- DBPRINT(sc, BXE_VERBOSE_MISC,
+ DBPRINT(sc, BXE_VERBOSE,
"%s(): %s MAC (%04x:%04x:%04x), E1HOV = %d, CLID = %d\n",
__FUNCTION__, (set ? "Setting" : "Clearing"),
config_table->msb_mac_addr, config_table->middle_mac_addr,
@@ -14439,7 +14065,7 @@ bxe_set_mac_addr_e1h(struct bxe_softc *sc, int set)
U64_LO(BXE_SP_MAPPING(sc, mac_config)), 0);
bxe_set_mac_addr_e1h_exit:
- DBEXIT(BXE_VERBOSE_MISC);
+ DBEXIT(BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET | BXE_VERBOSE_UNLOAD);
}
/*
@@ -14481,24 +14107,14 @@ bxe_set_rx_mode(struct bxe_softc *sc)
* multicast address filtering.
*/
if (ifp->if_flags & IFF_PROMISC) {
- DBPRINT(sc, BXE_VERBOSE_MISC,
- "%s(): Enabling promiscuous mode.\n", __FUNCTION__);
-
/* Enable promiscuous mode. */
rx_mode = BXE_RX_MODE_PROMISC;
} else if (ifp->if_flags & IFF_ALLMULTI ||
ifp->if_amcount > BXE_MAX_MULTICAST) {
- DBPRINT(sc, BXE_VERBOSE_MISC,
- "%s(): Enabling all multicast mode.\n", __FUNCTION__);
-
/* Enable all multicast addresses. */
rx_mode = BXE_RX_MODE_ALLMULTI;
} else {
/* Enable selective multicast mode. */
- DBPRINT(sc, BXE_VERBOSE_MISC,
- "%s(): Enabling selective multicast mode.\n",
- __FUNCTION__);
-
if (CHIP_IS_E1(sc)) {
i = 0;
config = BXE_SP(sc, mcast_config);
@@ -14608,7 +14224,6 @@ bxe_reset_func(struct bxe_softc *sc)
/* Configure IGU. */
REG_WR(sc, HC_REG_LEADING_EDGE_0 + port * 8, 0);
REG_WR(sc, HC_REG_TRAILING_EDGE_0 + port * 8, 0);
-
REG_WR(sc, HC_REG_CONFIG_0 + (port * 4), 0x1000);
/* Clear ILT. */
@@ -14670,8 +14285,10 @@ bxe_reset_common(struct bxe_softc *sc)
DBENTER(BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET | BXE_VERBOSE_UNLOAD);
- REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0xd3ffff7f);
- REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
+ REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
+ 0xd3ffff7f);
+ REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
+ 0x1403);
DBEXIT(BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET | BXE_VERBOSE_UNLOAD);
}
@@ -14687,7 +14304,6 @@ bxe_reset_chip(struct bxe_softc *sc, uint32_t reset_code)
{
DBENTER(BXE_VERBOSE_LOAD | BXE_VERBOSE_RESET | BXE_VERBOSE_UNLOAD);
- DBRUNLV(BXE_INFO, bxe_decode_mb_msgs(sc, 0, reset_code));
switch (reset_code) {
case FW_MSG_CODE_DRV_UNLOAD_COMMON:
@@ -14712,10 +14328,12 @@ bxe_reset_chip(struct bxe_softc *sc, uint32_t reset_code)
}
/*
- * Called by the OS to set media options (link, speed, etc.).
+ * Called by the OS to set media options (link, speed, etc.)
+ * when the user specifies "ifconfig bxe media XXX" or
+ * "ifconfig bxe mediaopt XXX".
*
* Returns:
- * 0 = Success, positive value for failure.
+ * 0 = Success, !0 = Failure
*/
static int
bxe_ifmedia_upd(struct ifnet *ifp)
@@ -14730,44 +14348,32 @@ bxe_ifmedia_upd(struct ifnet *ifp)
ifm = &sc->bxe_ifmedia;
rc = 0;
- /* This is an Ethernet controller. */
+ /* We only support Ethernet media type. */
if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) {
rc = EINVAL;
goto bxe_ifmedia_upd_exit;
}
- BXE_CORE_LOCK(sc);
-
switch (IFM_SUBTYPE(ifm->ifm_media)) {
case IFM_AUTO:
- DBPRINT(sc, BXE_VERBOSE_PHY,
- "%s(): Media set to IFM_AUTO, restarting autonegotiation.\n",
- __FUNCTION__);
+ /* ToDo: What to do here? */
+ /* Doing nothing translates to success here. */
break;
case IFM_10G_CX4:
- DBPRINT(sc, BXE_VERBOSE_PHY,
- "%s(): Media set to IFM_10G_CX4, forced mode.\n", __FUNCTION__);
- break;
+ /* Fall-through */
case IFM_10G_SR:
- DBPRINT(sc, BXE_VERBOSE_PHY,
- "%s(): Media set to IFM_10G_SR, forced mode.\n", __FUNCTION__);
- break;
+ /* Fall-through */
case IFM_10G_T:
- DBPRINT(sc, BXE_VERBOSE_PHY,
- "%s(): Media set to IFM_10G_T, forced mode.\n", __FUNCTION__);
- break;
+ /* Fall-through */
case IFM_10G_TWINAX:
- DBPRINT(sc, BXE_VERBOSE_PHY,
- "%s(): Media set to IFM_10G_TWINAX, forced mode.\n", __FUNCTION__);
- break;
+ /* Fall-through */
default:
+ /* We don't support channging the media type. */
DBPRINT(sc, BXE_WARN, "%s(): Invalid media type!\n",
__FUNCTION__);
rc = EINVAL;
}
- BXE_CORE_UNLOCK(sc);
-
bxe_ifmedia_upd_exit:
DBENTER(BXE_VERBOSE_PHY);
return (rc);
@@ -14789,7 +14395,7 @@ bxe_ifmedia_status(struct ifnet *ifp, struct ifmediareq *ifmr)
DBENTER(BXE_EXTREME_LOAD | BXE_EXTREME_RESET);
/* Report link down if the driver isn't running. */
- if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
+ if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
ifmr->ifm_active |= IFM_NONE;
goto bxe_ifmedia_status_exit;
}
@@ -14824,13 +14430,13 @@ bxe_ifmedia_status_exit:
* None.
*/
static __inline void
-bxe_update_last_max_sge(struct bxe_fastpath *fp, uint16_t idx)
+bxe_update_last_max_sge(struct bxe_fastpath *fp, uint16_t index)
{
uint16_t last_max;
last_max = fp->last_max_sge;
- if (SUB_S16(idx, last_max) > 0)
- fp->last_max_sge = idx;
+ if (SUB_S16(index, last_max) > 0)
+ fp->last_max_sge = index;
}
/*
@@ -14842,13 +14448,13 @@ bxe_update_last_max_sge(struct bxe_fastpath *fp, uint16_t idx)
static void
bxe_clear_sge_mask_next_elems(struct bxe_fastpath *fp)
{
- int i, idx, j;
+ int i, index, j;
- for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
- idx = RX_SGE_CNT * i - 1;
+ for (i = 0; i < NUM_RX_SGE_PAGES; i++) {
+ index = i * TOTAL_RX_SGE_PER_PAGE + USABLE_RX_SGE_PER_PAGE;
for (j = 0; j < 2; j++) {
- SGE_MASK_CLEAR_BIT(fp, idx);
- idx--;
+ SGE_MASK_CLEAR_BIT(fp, index);
+ index++;
}
}
}
@@ -14864,7 +14470,7 @@ bxe_update_sge_prod(struct bxe_fastpath *fp,
struct eth_fast_path_rx_cqe *fp_cqe)
{
struct bxe_softc *sc;
- uint16_t delta, last_max, last_elem, first_elem, sge_len;
+ uint16_t delta, first_elem, last_max, last_elem, sge_len;
int i;
sc = fp->sc;
@@ -14874,7 +14480,7 @@ bxe_update_sge_prod(struct bxe_fastpath *fp,
sge_len = SGE_PAGE_ALIGN(le16toh(fp_cqe->pkt_len) -
le16toh(fp_cqe->len_on_bd)) >> SGE_PAGE_SHIFT;
if (!sge_len)
- return;
+ goto bxe_update_sge_prod_exit;
/* First mark all used pages. */
for (i = 0; i < sge_len; i++)
@@ -14893,10 +14499,10 @@ bxe_update_sge_prod(struct bxe_fastpath *fp,
/* Now update the producer index. */
for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
- if (fp->sge_mask[i])
+ if (fp->rx_sge_mask[i])
break;
- fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
+ fp->rx_sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
delta += RX_SGE_MASK_ELEM_SZ;
}
@@ -14906,16 +14512,18 @@ bxe_update_sge_prod(struct bxe_fastpath *fp,
bxe_clear_sge_mask_next_elems(fp);
}
+bxe_update_sge_prod_exit:
DBEXIT(BXE_EXTREME_RECV);
}
/*
* Initialize scatter gather ring bitmask.
*
- * Elements may be taken from the scatter gather ring out of order since
- * TCP frames may be out of order or intermingled among multiple TCP
- * flows on the wire. The SGE bitmask tracks which elements are used
- * or available.
+ * Each entry in the SGE is associated with an aggregation in process.
+ * Since there is no guarantee that all Ethernet frames associated with
+ * a partciular TCP flow will arrive at the adapter and be placed into
+ * the SGE chain contiguously, we maintain a bitmask for each SGE element
+ * that identifies which aggregation an Ethernet frame belongs to.
*
* Returns:
* None
@@ -14925,13 +14533,15 @@ bxe_init_sge_ring_bit_mask(struct bxe_fastpath *fp)
{
/* Set the mask to all 1s, it's faster to compare to 0 than to 0xf. */
- memset(fp->sge_mask, 0xff,
+ memset(fp->rx_sge_mask, 0xff,
(TOTAL_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT) * sizeof(uint64_t));
/*
- * Clear the two last indices in the page to 1. These are the
- * indices that correspond to the "next" element which will
- * never be indicated and should be removed from calculations.
+ * The SGE chain is formatted just like the RX chain.
+ * The last two elements are reserved as a "next page pointer"
+ * to the next page of SGE elements. Clear the last two
+ * elements in each SGE chain page since they will never be
+ * used to track an aggregation.
*/
bxe_clear_sge_mask_next_elems(fp);
}
@@ -14948,32 +14558,55 @@ static void
bxe_tpa_start(struct bxe_fastpath *fp, uint16_t queue, uint16_t cons,
uint16_t prod)
{
- struct bxe_softc *sc = fp->sc;
+ struct bxe_softc *sc;
struct mbuf *m_temp;
struct eth_rx_bd *rx_bd;
bus_dmamap_t map_temp;
+ int max_agg_queues;
sc = fp->sc;
- DBENTER(BXE_EXTREME_RECV);
+ DBENTER(BXE_INSANE_RECV | BXE_INSANE_TPA);
- /* Move the empty mbuf and mapping from the TPA pool. */
+
+
+ DBPRINT(sc, BXE_EXTREME_TPA,
+ "%s(): fp[%02d].tpa[%02d], cons=0x%04X, prod=0x%04X\n",
+ __FUNCTION__, fp->index, queue, cons, prod);
+
+ max_agg_queues = CHIP_IS_E1(sc) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
+ ETH_MAX_AGGREGATION_QUEUES_E1H;
+
+ DBRUNIF((queue > max_agg_queues),
+ BXE_PRINTF("%s(): fp[%02d] illegal aggregation (%d > %d)!\n",
+ __FUNCTION__, fp->index, queue, max_agg_queues));
+
+ DBRUNIF((fp->tpa_state[queue] != BXE_TPA_STATE_STOP),
+ BXE_PRINTF("%s(): Starting aggregation on "
+ "fp[%02d].tpa[%02d] even though queue is not in the "
+ "TPA_STOP state!\n", __FUNCTION__, fp->index, queue));
+
+ /* Remove the existing mbuf and mapping from the TPA pool. */
m_temp = fp->tpa_mbuf_ptr[queue];
map_temp = fp->tpa_mbuf_map[queue];
+ /* Only the paranoid survive! */
+ if(m_temp == NULL) {
+ BXE_PRINTF("%s(%d): fp[%02d].tpa[%02d] not allocated!\n",
+ __FILE__, __LINE__, fp->index, queue);
+ /* ToDo: Additional error handling! */
+ goto bxe_tpa_start_exit;
+ }
+
/* Move received mbuf and mapping to TPA pool. */
fp->tpa_mbuf_ptr[queue] = fp->rx_mbuf_ptr[cons];
fp->tpa_mbuf_map[queue] = fp->rx_mbuf_map[cons];
- DBRUNIF((fp->tpa_state[queue] != BXE_TPA_STATE_STOP),
- DBPRINT(sc, BXE_FATAL, "%s(): Starting bin[%d] even though queue "
- "is not in the TPA_STOP state!\n", __FUNCTION__, queue));
-
/* Place the TPA bin into the START state. */
fp->tpa_state[queue] = BXE_TPA_STATE_START;
DBRUN(fp->tpa_queue_used |= (1 << queue));
/* Get the rx_bd for the next open entry on the receive chain. */
- rx_bd = &fp->rx_bd_chain[RX_PAGE(prod)][RX_IDX(prod)];
+ rx_bd = &fp->rx_chain[prod];
/* Update the rx_bd with the empty mbuf from the TPA pool. */
rx_bd->addr_hi = htole32(U64_HI(fp->tpa_mbuf_segs[queue].ds_addr));
@@ -14981,13 +14614,14 @@ bxe_tpa_start(struct bxe_fastpath *fp, uint16_t queue, uint16_t cons,
fp->rx_mbuf_ptr[prod] = m_temp;
fp->rx_mbuf_map[prod] = map_temp;
- DBEXIT(BXE_EXTREME_RECV);
+bxe_tpa_start_exit:
+ DBEXIT(BXE_INSANE_RECV | BXE_INSANE_TPA);
}
/*
* When a TPA aggregation is completed, loop through the individual mbufs
* of the aggregation, combining them into a single mbuf which will be sent
- * up the stack. Refill all mbufs freed as we go along.
+ * up the stack. Refill all freed SGEs with mbufs as we go along.
*
* Returns:
* 0 = Success, !0 = Failure.
@@ -14996,22 +14630,27 @@ static int
bxe_fill_frag_mbuf(struct bxe_softc *sc, struct bxe_fastpath *fp,
struct mbuf *m, struct eth_fast_path_rx_cqe *fp_cqe, uint16_t cqe_idx)
{
+ struct mbuf *m_frag;
uint32_t frag_len, frag_size, pages, i;
uint16_t sge_idx, len_on_bd;
- int rc, j;
+ int j, rc;
- DBENTER(BXE_EXTREME_RECV);
+ DBENTER(BXE_EXTREME_RECV | BXE_EXTREME_TPA);
rc = 0;
len_on_bd = le16toh(fp_cqe->len_on_bd);
frag_size = le16toh(fp_cqe->pkt_len) - len_on_bd;
pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
+ DBPRINT(sc, BXE_VERBOSE_TPA,
+ "%s(): len_on_bd=%d, frag_size=%d, pages=%d\n",
+ __FUNCTION__, len_on_bd, frag_size, pages);
+
/* Make sure the aggregated frame is not too big to handle. */
if (pages > 8 * PAGES_PER_SGE) {
DBPRINT(sc, BXE_FATAL,
- "%s(): SGL length (%d) is too long! CQE index is %d\n",
- __FUNCTION__, pages, cqe_idx);
+ "%s(): fp[%02d].rx_sge[0x%04X] has too many pages (%d)!\n",
+ __FUNCTION__, fp->index, cqe_idx, pages);
DBPRINT(sc, BXE_FATAL,
"%s(): fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n",
__FUNCTION__, le16toh(fp_cqe->pkt_len), len_on_bd);
@@ -15021,7 +14660,7 @@ bxe_fill_frag_mbuf(struct bxe_softc *sc, struct bxe_fastpath *fp,
}
/*
- * Run through the scatter gather list, pulling the individual
+ * Scan through the scatter gather list, pulling individual
* mbufs into a single mbuf for the host stack.
*/
for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
@@ -15035,38 +14674,37 @@ bxe_fill_frag_mbuf(struct bxe_softc *sc, struct bxe_fastpath *fp,
frag_len = min(frag_size, (uint32_t)(BCM_PAGE_SIZE *
PAGES_PER_SGE));
- /* Update the mbuf with the fragment length. */
- fp->rx_sge_buf_ptr[sge_idx]->m_len = frag_len;
+ DBPRINT(sc, BXE_VERBOSE_TPA,
+ "%s(): i=%d, j=%d, frag_size=%d, frag_len=%d\n",
+ __FUNCTION__, i, j, frag_size, frag_len);
- /* Unmap the mbuf from DMA space. */
- bus_dmamap_sync(fp->rx_sge_buf_tag, fp->rx_sge_buf_map[sge_idx],
- BUS_DMASYNC_POSTREAD);
- bus_dmamap_unload(fp->rx_sge_buf_tag,
- fp->rx_sge_buf_map[sge_idx]);
+ m_frag = fp->rx_sge_buf_ptr[sge_idx];
- /* Concatenate the current fragment to the aggregated mbuf. */
- m_cat(m, fp->rx_sge_buf_ptr[sge_idx]);
+ /* Allocate a new mbuf for the SGE. */
+ rc = bxe_alloc_rx_sge_mbuf(fp, sge_idx);
+ if (rc) {
+ /*
+ * Leave all remaining SGEs in the ring.
+ */
+ goto bxe_fill_frag_mbuf_exit;
+ }
- /* The SGE mbuf was freed in the call to m_cat(). */
- DBRUN(fp->sge_mbuf_alloc--);
- fp->rx_sge_buf_ptr[sge_idx] = NULL;
+ /* Update the fragment its length. */
+ m_frag->m_len = frag_len;
- /*
- * Try an allocate a new mbuf for the SGE that was just
- * released. If an allocation error occurs stop where we
- * are and drop the whole frame.
- */
- rc = bxe_alloc_rx_sge(sc, fp, sge_idx);
- if (rc)
- goto bxe_fill_frag_mbuf_exit;
+ /* Concatenate the fragment to the head mbuf. */
+ m_cat(m, m_frag);
+ DBRUN(fp->sge_mbuf_alloc--);
+ /* Update TPA mbuf size and remaining fragment size. */
m->m_pkthdr.len += frag_len;
-
frag_size -= frag_len;
}
bxe_fill_frag_mbuf_exit:
- DBEXIT(BXE_EXTREME_RECV);
+ DBPRINT(sc, BXE_VERBOSE_TPA,
+ "%s(): frag_size=%d\n", __FUNCTION__, frag_size);
+ DBEXIT(BXE_EXTREME_RECV | BXE_EXTREME_TPA);
return (rc);
}
@@ -15082,102 +14720,70 @@ static void
bxe_tpa_stop(struct bxe_softc *sc, struct bxe_fastpath *fp, uint16_t queue,
int pad, int len, union eth_rx_cqe *cqe, uint16_t cqe_idx)
{
- struct mbuf *m_old, *m_new;
- struct ip *ip;
+ struct mbuf *m;
struct ifnet *ifp;
- struct ether_vlan_header *eh;
- bus_dma_segment_t seg;
- int rc, e_hlen;
+ int rc;
- DBENTER(BXE_EXTREME_RECV);
- DBPRINT(sc, BXE_VERBOSE_RECV,
- "%s(): fp[%d], tpa queue = %d, len = %d, pad = %d\n", __FUNCTION__,
- fp->index, queue, len, pad);
+ DBENTER(BXE_INSANE_RECV | BXE_INSANE_TPA);
+ DBPRINT(sc, (BXE_EXTREME_RECV | BXE_EXTREME_TPA),
+ "%s(): fp[%02d].tpa[%02d], len=%d, pad=%d\n",
+ __FUNCTION__, fp->index, queue, len, pad);
rc = 0;
ifp = sc->bxe_ifp;
- /* Unmap m_old from DMA space. */
- m_old = fp->tpa_mbuf_ptr[queue];
- bus_dmamap_sync(fp->rx_mbuf_tag, fp->tpa_mbuf_map[queue],
- BUS_DMASYNC_POSTREAD);
- bus_dmamap_unload(fp->rx_mbuf_tag, fp->tpa_mbuf_map[queue]);
+ m = fp->tpa_mbuf_ptr[queue];
- /* Skip over the pad when passing the data up the stack. */
- m_adj(m_old, pad);
+ /* Allocate a replacement before modifying existing mbuf. */
+ rc = bxe_alloc_tpa_mbuf(fp, queue);
+ if (rc) {
+ /* Drop the frame and log a soft error. */
+ fp->rx_soft_errors++;
+ goto bxe_tpa_stop_exit;
+ }
- /* Adjust the packet length to match the received data. */
- m_old->m_pkthdr.len = m_old->m_len = len;
+ /* We have a replacement, fixup the current mbuf. */
+ m_adj(m, pad);
+ m->m_pkthdr.len = m->m_len = len;
- /* Validate the checksum if offload enabled. */
- m_old->m_pkthdr.csum_flags |= CSUM_IP_CHECKED | CSUM_IP_VALID |
+ /* Mark the checksums valid (taken care of by firmware). */
+ m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED | CSUM_IP_VALID |
CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
- m_old->m_pkthdr.csum_data = 0xffff;
-
- /* Map the header and find the Ethernet type & header length. */
- eh = mtod(m_old, struct ether_vlan_header *);
- if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN))
- e_hlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
- else
- e_hlen = ETHER_HDR_LEN;
-
- /* Get the IP header pointer. */
- ip = (struct ip *)(m_old->m_data + e_hlen);
+ m->m_pkthdr.csum_data = 0xffff;
- ip->ip_sum = 0;
- ip->ip_sum = in_cksum_hdr(ip);
-
- /* Try and aggregate all of the receive mbufs into a single mbuf. */
- if (!bxe_fill_frag_mbuf(sc, fp, m_old, &cqe->fast_path_cqe, cqe_idx)) {
- /*
- * We have an aggregated frame. If the frame has a vlan tag
- * attach that information to the mbuf.
- */
+ /* Aggregate all of the SGEs into a single mbuf. */
+ rc = bxe_fill_frag_mbuf(sc, fp, m, &cqe->fast_path_cqe, cqe_idx);
+ if (rc) {
+ /* Drop the packet and log an error. */
+ fp->rx_soft_errors++;
+ m_freem(m);
+ } else {
+ /* Find VLAN tag and send frame up to the stack. */
if ((le16toh(cqe->fast_path_cqe.pars_flags.flags) &
PARSING_FLAGS_VLAN)) {
- m_old->m_pkthdr.ether_vtag =
+ m->m_pkthdr.ether_vtag =
cqe->fast_path_cqe.vlan_tag;
- m_old->m_flags |= M_VLANTAG;
+ m->m_flags |= M_VLANTAG;
}
- /* Send the packet to the appropriate interface. */
- m_old->m_pkthdr.rcvif = ifp;
+ /* Assign packet to the appropriate interface. */
+ m->m_pkthdr.rcvif = ifp;
- /* Pass the packet up to the stack. */
- fp->ipackets++;
- DBRUN(fp->tpa_pkts++);
- (*ifp->if_input)(ifp, m_old);
- } else {
- DBPRINT(sc, BXE_WARN,
- "%s(): Failed to allocate new SGE page, dropping frame!\n",
- __FUNCTION__);
- fp->soft_rx_errors++;
- m_freem(m_old);
+ /* Update packet statistics. */
+ fp->rx_tpa_pkts++;
+ ifp->if_ipackets++;
+
+ /* ToDo: Any potential locking issues here? */
+ /* Pass the frame to the stack. */
+ (*ifp->if_input)(ifp, m);
}
- /* We passed m_old up the stack or dropped the frame. */
+ /* We passed mbuf up the stack or dropped the frame. */
DBRUN(fp->tpa_mbuf_alloc--);
- /* Allocate a replacement mbuf. */
- if (__predict_false((m_new = bxe_alloc_mbuf(fp,
- sc->mbuf_alloc_size)) == NULL))
- goto bxe_tpa_stop_exit;
-
- /* Map the new mbuf and place it in the pool. */
- rc = bxe_map_mbuf(fp, m_new, fp->rx_mbuf_tag,
- fp->tpa_mbuf_map[queue], &seg);
- if (rc)
- goto bxe_tpa_stop_exit;
-
- DBRUN(fp->tpa_mbuf_alloc++);
-
- fp->tpa_mbuf_ptr[queue] = m_new;
- fp->tpa_mbuf_segs[queue] = seg;
-
bxe_tpa_stop_exit:
fp->tpa_state[queue] = BXE_TPA_STATE_STOP;
DBRUN(fp->tpa_queue_used &= ~(1 << queue));
-
- DBEXIT(BXE_EXTREME_RECV);
+ DBEXIT(BXE_INSANE_RECV | BXE_INSANE_TPA);
}
/*
@@ -15195,7 +14801,7 @@ bxe_update_rx_prod(struct bxe_softc *sc, struct bxe_fastpath *fp,
int i;
/* Update producers. */
- rx_prods.bd_prod = bd_prod;
+ rx_prods.bd_prod = bd_prod;
rx_prods.cqe_prod = cqe_prod;
rx_prods.sge_prod = sge_prod;
@@ -15213,7 +14819,7 @@ bxe_update_rx_prod(struct bxe_softc *sc, struct bxe_fastpath *fp,
}
/*
- * Handles received frame interrupt events.
+ * Processes received frames.
*
* Returns:
* Nothing.
@@ -15228,6 +14834,7 @@ bxe_rxeof(struct bxe_fastpath *fp)
uint16_t rx_cq_cons, rx_cq_cons_idx;
uint16_t rx_cq_prod, rx_cq_cons_sb;
unsigned long rx_pkts = 0;
+ int rc;
sc = fp->sc;
ifp = sc->bxe_ifp;
@@ -15240,8 +14847,8 @@ bxe_rxeof(struct bxe_fastpath *fp)
/*
* Get working copies of the driver's view of the
* RX indices. These are 16 bit values that are
- * expected to increment from from 0 to 65535
- * and then wrap-around to 0 again.
+ * expected to increment from 0 to 65535 and then
+ * wrap-around to 0 again.
*/
rx_bd_cons = fp->rx_bd_cons;
rx_bd_prod = fp->rx_bd_prod;
@@ -15249,7 +14856,7 @@ bxe_rxeof(struct bxe_fastpath *fp)
rx_cq_prod = fp->rx_cq_prod;
DBPRINT(sc, (BXE_EXTREME_RECV),
- "%s(%d): BEFORE: fp[%d], rx_bd_cons = 0x%04X, rx_bd_prod = 0x%04X, "
+ "%s(%d): BEFORE: fp[%02d], rx_bd_cons = 0x%04X, rx_bd_prod = 0x%04X, "
"rx_cq_cons_sw = 0x%04X, rx_cq_prod_sw = 0x%04X\n", __FUNCTION__,
curcpu, fp->index, rx_bd_cons, rx_bd_prod, rx_cq_cons, rx_cq_prod);
@@ -15271,33 +14878,24 @@ bxe_rxeof(struct bxe_fastpath *fp)
/*
* Convert the 16 bit indices used by hardware
- * into values that map to the arrays used by
- * the driver (i.e. an index).
+ * into array indices used by the driver.
*/
- rx_cq_cons_idx = RCQ_ENTRY(rx_cq_cons);
+ rx_cq_cons_idx = RCQ_ENTRY(rx_cq_cons);
rx_bd_prod_idx = RX_BD(rx_bd_prod);
rx_bd_cons_idx = RX_BD(rx_bd_cons);
wmb();
- /* Fetch the cookie. */
+ /* Fetch the completion queue entry (i.e. cookie). */
cqe = (union eth_rx_cqe *)
- &fp->rx_cq_chain[RCQ_PAGE(rx_cq_cons_idx)][RCQ_IDX(rx_cq_cons_idx)];
+ &fp->rcq_chain[rx_cq_cons_idx];
cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
-#ifdef BXE_DEBUG
- /* Simulate an error on the received frame. */
- if (DB_RANDOMTRUE(bxe_debug_received_frame_error)) {
- DBPRINT(sc, BXE_WARN,
- "%s(): Simulated CQE error flags!\n", __FUNCTION__);
- cqe_fp_flags |= ETH_RX_ERROR_FLAGS;
- sc->debug_received_frame_error++;
+ /* Sanity check the cookie flags. */
+ if (__predict_false(cqe_fp_flags == 0)) {
+ fp->rx_null_cqe_flags++;
+ DBRUN(bxe_dump_cqe(fp, rx_cq_cons_idx, cqe));
+ /* ToDo: What error handling can be done here? */
}
-#endif
-
- DBRUNIF((cqe_fp_flags == 0),
- fp->null_cqe_flags++;
- bxe_dump_cqe(fp, rx_cq_cons_idx, cqe));
- /* DRC - ANything else to do here? */
/* Check the CQE type for slowpath or fastpath completion. */
if (__predict_false(CQE_TYPE(cqe_fp_flags) ==
@@ -15314,7 +14912,8 @@ bxe_rxeof(struct bxe_fastpath *fp)
pad = cqe->fast_path_cqe.placement_offset;
/* Check if the completion is for TPA. */
- if ((!fp->disable_tpa) && (TPA_TYPE(cqe_fp_flags) !=
+ if ((fp->disable_tpa == FALSE) &&
+ (TPA_TYPE(cqe_fp_flags) !=
(TPA_TYPE_START | TPA_TYPE_END))) {
uint16_t queue = cqe->fast_path_cqe.queue_index;
@@ -15325,21 +14924,19 @@ bxe_rxeof(struct bxe_fastpath *fp)
* the frames.
*/
- /*
- * Check if a TPA aggregation has been started.
- */
+ /* Check if TPA aggregation has started. */
if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
- bxe_tpa_start(fp, queue,
- rx_bd_cons_idx, rx_bd_prod_idx);
+ bxe_tpa_start(fp, queue, rx_bd_cons_idx,
+ rx_bd_prod_idx);
goto bxe_rxeof_next_rx;
}
- /* Check if a TPA aggregation has completed. */
+ /* Check if TPA aggregation has completed. */
if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
- if (!BXE_RX_SUM_FIX(cqe))
- DBPRINT(sc, BXE_FATAL,
- "%s(): STOP on non-TCP data.\n",
- __FUNCTION__);
+ DBRUNIF(!BXE_RX_SUM_FIX(cqe),
+ DBPRINT(sc, BXE_FATAL,
+ "%s(): STOP on non-TCP data.\n",
+ __FUNCTION__));
/*
* This is the size of the linear
@@ -15359,108 +14956,39 @@ bxe_rxeof(struct bxe_fastpath *fp)
}
}
- /* Remove the mbuf from the RX chain. */
m = fp->rx_mbuf_ptr[rx_bd_cons_idx];
- fp->rx_mbuf_ptr[rx_bd_cons_idx] = NULL;
- DBRUN(fp->free_rx_bd++);
- DBRUNIF((fp->free_rx_bd > USABLE_RX_BD),
- DBPRINT(sc, BXE_FATAL,
- "%s(): fp[%d] - Too many free rx_bd's (0x%04X)!\n",
- __FUNCTION__, fp->index, fp->free_rx_bd));
-
- /* Unmap the mbuf from DMA space. */
- bus_dmamap_sync(fp->rx_mbuf_tag,
- fp->rx_mbuf_map[rx_bd_cons_idx],
- BUS_DMASYNC_POSTREAD);
- bus_dmamap_unload(fp->rx_mbuf_tag,
- fp->rx_mbuf_map[rx_bd_cons_idx]);
+ /* Allocate a replacement before modifying existing mbuf. */
+ rc = bxe_alloc_rx_bd_mbuf(fp, rx_bd_prod_idx);
+ if (rc) {
+ /* Drop the frame and log a soft error. */
+ fp->rx_soft_errors++;
+ goto bxe_rxeof_next_rx;
+ }
/* Check if the received frame has any errors. */
if (__predict_false(cqe_fp_flags &
ETH_RX_ERROR_FLAGS)) {
DBPRINT(sc, BXE_WARN ,
- "%s(): Found error flags (0x%08X) "
- "set in received frame on fp[%d]!\n",
- __FUNCTION__, cqe_fp_flags, fp->index);
-
- fp->soft_rx_errors++;
-
- /* Reuse the mbuf for a new frame. */
- if (bxe_get_buf(fp, m, rx_bd_prod_idx)) {
- DBPRINT(sc, BXE_FATAL,
- "%s(): Can't reuse RX mbuf!\n",
- __FUNCTION__);
- DBRUN(bxe_breakpoint(sc));
+ "%s(): fp[%02d].cqe[0x%04X] has errors "
+ "(0x%08X)!\n", __FUNCTION__, fp->index,
+ rx_cq_cons, cqe_fp_flags);
- /* ToDo: Find alterntive to panic(). */
- panic("bxe%d: Can't reuse RX mbuf!\n",
- sc->bxe_unit);
- }
-
- /* Go handle any additional received frames. */
+ fp->rx_soft_errors++;
goto bxe_rxeof_next_rx;
}
- /*
- * The high level logic used here is to
- * immediatley replace each receive buffer
- * as it is used so that the receive chain
- * is full at all times. First we try to
- * allocate a new receive buffer, but if
- * that fails then we will reuse the
- * existing mbuf and log an error for the
- * lost packet.
- */
-
- /* Allocate a new mbuf for the receive chain. */
- if (__predict_false(bxe_get_buf(fp,
- NULL, rx_bd_prod_idx))) {
- /*
- * Drop the current frame if we can't get
- * a new mbuf.
- */
- fp->soft_rx_errors++;
-
- /*
- * Place the current mbuf back in the
- * receive chain.
- */
- if (__predict_false(bxe_get_buf(fp, m,
- rx_bd_prod_idx))) {
- /* This is really bad! */
- DBPRINT(sc, BXE_FATAL,
- "%s(): Can't reuse RX mbuf!\n",
- __FUNCTION__);
- DBRUN(bxe_breakpoint(sc));
-
- /* ToDo: Find alterntive to panic(). */
- panic(
- "bxe%d: Double mbuf allocation failure!\n",
- sc->bxe_unit);
- }
-
- /* Go handle any additional received frames. */
- goto bxe_rxeof_next_rx;
- }
-
- /*
- * Skip over the pad when passing the data up the stack.
- */
+ /* We have a replacement, fixup the current mbuf. */
m_adj(m, pad);
-
- /*
- * Adjust the packet length to match the received data.
- */
m->m_pkthdr.len = m->m_len = len;
- /* Send the packet to the appropriate interface. */
+ /* Assign packet to the appropriate interface. */
m->m_pkthdr.rcvif = ifp;
- /* Assume no hardware checksum. */
+ /* Assume no hardware checksum complated. */
m->m_pkthdr.csum_flags = 0;
- /* Validate the checksum if offload enabled. */
+ /* Validate checksum if offload enabled. */
if (ifp->if_capenable & IFCAP_RXCSUM) {
/* Check whether IP checksummed or not. */
if (sc->rx_csum &&
@@ -15517,8 +15045,9 @@ bxe_rxeof(struct bxe_fastpath *fp)
/* Last chance to check for problems. */
DBRUN(bxe_validate_rx_packet(fp, rx_cq_cons, cqe, m));
- /* Pass the mbuf off to the upper layers. */
+ /* Update packet statistics. */
ifp->if_ipackets++;
+ rx_pkts++;
/* ToDo: Any potential locking issues here? */
/* Pass the frame to the stack. */
@@ -15530,7 +15059,6 @@ bxe_rxeof(struct bxe_fastpath *fp)
bxe_rxeof_next_rx:
rx_bd_prod = NEXT_RX_BD(rx_bd_prod);
rx_bd_cons = NEXT_RX_BD(rx_bd_cons);
- rx_pkts++;
bxe_rxeof_next_cqe:
rx_cq_prod = NEXT_RCQ_IDX(rx_cq_prod);
@@ -15543,14 +15071,14 @@ bxe_rxeof_next_cqe:
rmb();
}
- /* Update the driver copy of the fastpath indices. */
+ /* Update driver copy of the fastpath indices. */
fp->rx_bd_cons = rx_bd_cons;
fp->rx_bd_prod = rx_bd_prod;
fp->rx_cq_cons = rx_cq_cons;
fp->rx_cq_prod = rx_cq_prod;
DBPRINT(sc, (BXE_EXTREME_RECV),
- "%s(%d): AFTER: fp[%d], rx_bd_cons = 0x%04X, rx_bd_prod = 0x%04X, "
+ "%s(%d): AFTER: fp[%02d], rx_bd_cons = 0x%04X, rx_bd_prod = 0x%04X, "
"rx_cq_cons_sw = 0x%04X, rx_cq_prod_sw = 0x%04X\n", __FUNCTION__,
curcpu, fp->index, rx_bd_cons, rx_bd_prod, rx_cq_cons, rx_cq_prod);
@@ -15561,12 +15089,11 @@ bxe_rxeof_next_cqe:
BUS_SPACE_BARRIER_READ);
fp->rx_pkts += rx_pkts;
- fp->rx_calls++;
DBEXIT(BXE_EXTREME_RECV);
}
/*
- * Handles transmit completion interrupt events.
+ * Processes transmit completions.
*
* Returns:
* Nothing.
@@ -15577,92 +15104,60 @@ bxe_txeof(struct bxe_fastpath *fp)
struct bxe_softc *sc;
struct ifnet *ifp;
struct eth_tx_start_bd *txbd;
- uint16_t hw_pkt_cons, sw_pkt_cons, sw_tx_bd_cons, sw_tx_chain_cons;
- uint16_t pkt_cons, nbds;
+ uint16_t hw_pkt_cons, sw_pkt_cons, sw_tx_bd_cons;
+ uint16_t bd_index, pkt_index, nbds;
int i;
sc = fp->sc;
ifp = sc->bxe_ifp;
DBENTER(BXE_EXTREME_SEND);
- DBPRINT(sc, BXE_EXTREME_SEND, "%s(): Servicing fp[%d]\n",
- __FUNCTION__, fp->index);
/* Get the hardware's view of the TX packet consumer index. */
- hw_pkt_cons = le16toh(*fp->tx_cons_sb);
+ hw_pkt_cons = le16toh(*fp->tx_pkt_cons_sb);
sw_pkt_cons = fp->tx_pkt_cons;
sw_tx_bd_cons = fp->tx_bd_cons;
/* Cycle through any completed TX chain page entries. */
while (sw_pkt_cons != hw_pkt_cons) {
- txbd = NULL;
- sw_tx_chain_cons = TX_BD(sw_tx_bd_cons);
- pkt_cons = TX_BD(sw_pkt_cons);
+ bd_index = TX_BD(sw_tx_bd_cons);
+ pkt_index = TX_BD(sw_pkt_cons);
-#ifdef BXE_DEBUG
- if (sw_tx_chain_cons > MAX_TX_BD) {
- BXE_PRINTF(
- "%s(): TX chain consumer out of range! 0x%04X > 0x%04X\n",
- __FUNCTION__, sw_tx_chain_cons, (int)MAX_TX_BD);
- bxe_breakpoint(sc);
- }
-#endif
-
- txbd =
-&fp->tx_bd_chain[TX_PAGE(sw_tx_chain_cons)][TX_IDX(sw_tx_chain_cons)].start_bd;
-
-#ifdef BXE_DEBUG
- if (txbd == NULL) {
- BXE_PRINTF("%s(): Unexpected NULL tx_bd[0x%04X]!\n",
- __FUNCTION__, sw_tx_chain_cons);
- bxe_breakpoint(sc);
- }
-#endif
-
- /*
- * Find the number of BD's that were used in the completed pkt.
- */
+ txbd = &fp->tx_chain[bd_index].start_bd;
nbds = txbd->nbd;
- /*
- * Free the ext mbuf cluster from the mbuf of the completed
- * frame.
- */
- if (__predict_true(fp->tx_mbuf_ptr[pkt_cons] != NULL)) {
- /* Unmap it from the mbuf. */
+ /* Free the completed frame's mbuf. */
+ if (__predict_true(fp->tx_mbuf_ptr[pkt_index] != NULL)) {
+ /* Unmap the mbuf from non-paged memory. */
bus_dmamap_unload(fp->tx_mbuf_tag,
- fp->tx_mbuf_map[pkt_cons]);
+ fp->tx_mbuf_map[pkt_index]);
- /* Return the mbuf to the stack. */
- DBRUN(fp->tx_mbuf_alloc--);
- m_freem(fp->tx_mbuf_ptr[pkt_cons]);
- fp->tx_mbuf_ptr[pkt_cons] = NULL;
+ /* Return the mbuf to the system. */
+ m_freem(fp->tx_mbuf_ptr[pkt_index]);
+ fp->tx_mbuf_alloc--;
+ fp->tx_mbuf_ptr[pkt_index] = NULL;
fp->opackets++;
} else {
fp->tx_chain_lost_mbuf++;
}
- /* Skip over the remaining used buffer descriptors. */
- fp->used_tx_bd -= nbds;
+ /* Updated packet consumer value. */
+ sw_pkt_cons++;
+ /* Skip over the remaining used buffer descriptors. */
+ fp->tx_bd_used -= nbds;
for (i = 0; i < nbds; i++)
sw_tx_bd_cons = NEXT_TX_BD(sw_tx_bd_cons);
- /* Increment the software copy of packet consumer index */
- sw_pkt_cons++;
-
- /*
- * Refresh the hw packet consumer index to see if there's
- * new work.
- */
- hw_pkt_cons = le16toh(*fp->tx_cons_sb);
+ /* Check for new work since we started. */
+ hw_pkt_cons = le16toh(*fp->tx_pkt_cons_sb);
rmb();
}
/* Enable new transmits if we've made enough room. */
- if (fp->used_tx_bd < BXE_TX_CLEANUP_THRESHOLD) {
+ if (fp->tx_bd_used < BXE_TX_CLEANUP_THRESHOLD) {
ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
- if (fp->used_tx_bd == 0) {
+ if (fp->tx_bd_used == 0) {
/*
* Clear the watchdog timer if we've emptied
* the TX chain.
@@ -15684,78 +15179,6 @@ bxe_txeof(struct bxe_fastpath *fp)
}
/*
- * Encapsulate an mbuf cluster into the rx_bd.
- *
- * This routine will map an mbuf cluster into 1 rx_bd
- *
- * Returns:
- * 0 for success, positive value for failure.
- */
-static int
-bxe_get_buf(struct bxe_fastpath *fp, struct mbuf *m, uint16_t prod)
-{
- struct bxe_softc *sc;
- bus_dma_segment_t seg;
- struct mbuf *m_new;
- struct eth_rx_bd *rx_bd;
- int rc;
-
- sc = fp->sc;
- m_new = NULL;
- rc = 0;
-
- DBENTER(BXE_INSANE_LOAD | BXE_INSANE_RESET | BXE_INSANE_RECV);
-
- /* Make sure the inputs are valid. */
- DBRUNIF((prod > MAX_RX_BD),
- BXE_PRINTF("%s(): RX producer out of range: 0x%04X > 0x%04X\n",
- __FUNCTION__, prod, (uint16_t) MAX_RX_BD));
-
- /* Check whether this is a new mbuf allocation. */
- if (m == NULL) {
- if ((m_new = bxe_alloc_mbuf(fp, sc->mbuf_alloc_size)) == NULL) {
- rc = ENOBUFS;
- goto bxe_get_buf_exit;
- }
-
- DBRUN(fp->rx_mbuf_alloc++);
- } else {
- /* Reuse the existing mbuf. */
- m_new = m;
- m_new->m_pkthdr.len = m_new->m_len = sc->mbuf_alloc_size;
- }
-
- /* Do some additional sanity checks on the mbuf. */
- DBRUN(m_sanity(m_new, FALSE));
-
- rc = bxe_map_mbuf(fp, m_new, fp->rx_mbuf_tag,
- fp->rx_mbuf_map[prod], &seg);
-
- if (__predict_false(rc)) {
- DBRUN(fp->rx_mbuf_alloc--);
- rc = ENOBUFS;
- goto bxe_get_buf_exit;
- }
-
- /* Setup the rx_bd for the first segment. */
- rx_bd = &fp->rx_bd_chain[RX_PAGE(prod)][RX_IDX(prod)];
- rx_bd->addr_lo = htole32(U64_LO(seg.ds_addr));
- rx_bd->addr_hi = htole32(U64_HI(seg.ds_addr));
-
- /* Save the mbuf and update our counter. */
- fp->rx_mbuf_ptr[prod] = m_new;
-
- DBRUN(fp->free_rx_bd--);
- DBRUNIF((fp->free_rx_bd > USABLE_RX_BD),
- DBPRINT(sc, BXE_FATAL, "%s(): fp[%d] - Too many free rx_bd's "
- "(0x%04X)!\n", __FUNCTION__, fp->index, fp->free_rx_bd));
-
-bxe_get_buf_exit:
- DBEXIT(BXE_INSANE_LOAD | BXE_INSANE_RESET | BXE_INSANE_RECV);
- return (rc);
-}
-
-/*
* Transmit timeout handler.
*
* Returns:
@@ -15764,9 +15187,10 @@ bxe_get_buf_exit:
static int
bxe_watchdog(struct bxe_fastpath *fp)
{
- struct bxe_softc *sc = fp->sc;
+ struct bxe_softc *sc;
int rc = 0;
+ sc = fp->sc;
DBENTER(BXE_INSANE_SEND);
BXE_FP_LOCK(fp);
@@ -15795,39 +15219,10 @@ bxe_watchdog(struct bxe_fastpath *fp)
bxe_watchdog_exit:
DBEXIT(BXE_INSANE_SEND);
- return(rc);
-}
-
-
-/*
- * Change the MTU size for the port. The MTU should be validated before
- * calling this routine.
- *
- * Returns:
- * 0 = Success, !0 = Failure.
- */
-static int
-bxe_change_mtu(struct bxe_softc *sc, int if_drv_running)
-{
- struct ifnet *ifp;
- int rc;
-
- BXE_CORE_LOCK_ASSERT(sc);
-
- rc = 0;
- ifp = sc->bxe_ifp;
- sc->bxe_ifp->if_mtu = ifp->if_mtu;
- if (if_drv_running) {
- DBPRINT(sc, BXE_INFO_IOCTL, "%s(): Changing the MTU to %d.\n",
- __FUNCTION__, sc->bxe_ifp->if_mtu);
-
- bxe_stop_locked(sc, UNLOAD_NORMAL);
- bxe_init_locked(sc, LOAD_NORMAL);
- }
-
return (rc);
}
+
/*
* The periodic timer tick routine.
*
@@ -15850,21 +15245,22 @@ bxe_tick(void *xsc)
sc = xsc;
DBENTER(BXE_INSANE_MISC);
+
/* Check for TX timeouts on any fastpath. */
for (i = 0; i < sc->num_queues; i++) {
fp = &sc->fp[i];
+
if (bxe_watchdog(fp) != 0)
break;
}
- BXE_CORE_LOCK(sc);
func = BP_FUNC(sc);
/* Schedule the next tick. */
callout_reset(&sc->bxe_tick_callout, hz, bxe_tick, sc);
#if 0
- if (!BP_NOMCP(sc)) {
+ if (!NOMCP(sc)) {
func = BP_FUNC(sc);
++sc->fw_drv_pulse_wr_seq;
@@ -15894,8 +15290,6 @@ bxe_tick(void *xsc)
if ((sc->state == BXE_STATE_OPEN) || (sc->state == BXE_STATE_DISABLED))
bxe_stats_handle(sc, STATS_EVENT_UPDATE);
-
- BXE_CORE_UNLOCK(sc);
}
#ifdef BXE_DEBUG
@@ -16155,7 +15549,7 @@ bxe_add_sysctls(struct bxe_softc *sc)
device_get_sysctl_ctx(sc->dev);
struct sysctl_oid_list *children =
SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev));
- struct bxe_eth_stats *estats = &sc->eth_stats;
+ struct bxe_port_stats *estats = &sc->eth_stats;
SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
"estats_total_bytes_received_hi",
@@ -16275,95 +15669,110 @@ bxe_add_sysctls(struct bxe_softc *sc)
namebuf, CTLFLAG_RD, NULL, "Queue Name");
queue_list = SYSCTL_CHILDREN(queue_node);
+ /*
+ * Receive related fastpath statistics.*
+ */
SYSCTL_ADD_ULONG(ctx, queue_list, OID_AUTO,
"rx_pkts",
CTLFLAG_RD, &fp->rx_pkts,
"Received packets");
SYSCTL_ADD_ULONG(ctx, queue_list, OID_AUTO,
- "tx_pkts",
- CTLFLAG_RD, &fp->tx_pkts,
- "Transmitted packets");
-
- SYSCTL_ADD_ULONG(ctx, queue_list, OID_AUTO,
- "mbuf_alloc_failed",
- CTLFLAG_RD, &fp->mbuf_alloc_failed,
- "Mbuf allocation failure count");
+ "rx_tpa_pkts",
+ CTLFLAG_RD, &fp->rx_tpa_pkts,
+ "Received TPA packets");
SYSCTL_ADD_ULONG(ctx, queue_list, OID_AUTO,
- "tpa_mbuf_alloc_failed",
- CTLFLAG_RD, &fp->tpa_mbuf_alloc_failed,
- "TPA mbuf allocation failure count");
+ "rx_null_cqe_flags",
+ CTLFLAG_RD, &fp->rx_null_cqe_flags,
+ "CQEs with NULL flags count");
SYSCTL_ADD_ULONG(ctx, queue_list, OID_AUTO,
- "mbuf_defrag_attempts",
- CTLFLAG_RD, &fp->mbuf_defrag_attempts,
- "Mbuf defrag attempt count");
+ "rx_soft_errors",
+ CTLFLAG_RD, &fp->rx_soft_errors,
+ "Received frames dropped by driver count");
+ /*
+ * Transmit related fastpath statistics.*
+ */
SYSCTL_ADD_ULONG(ctx, queue_list, OID_AUTO,
- "mbuf_defrag_failures",
- CTLFLAG_RD, &fp->mbuf_defrag_failures,
- "Mbuf defrag failure count");
+ "tx_pkts",
+ CTLFLAG_RD, &fp->tx_pkts,
+ "Transmitted packets");
SYSCTL_ADD_ULONG(ctx, queue_list, OID_AUTO,
- "mbuf_defrag_successes",
- CTLFLAG_RD, &fp->mbuf_defrag_successes,
- "Mbuf defrag success count");
+ "tx_soft_errors",
+ CTLFLAG_RD, &fp->tx_soft_errors,
+ "Transmit frames dropped by driver count");
SYSCTL_ADD_ULONG(ctx, queue_list, OID_AUTO,
- "offload_frames_csum_ip",
- CTLFLAG_RD, &fp->offload_frames_csum_ip,
+ "tx_offload_frames_csum_ip",
+ CTLFLAG_RD, &fp->tx_offload_frames_csum_ip,
"IP checksum offload frame count");
SYSCTL_ADD_ULONG(ctx, queue_list, OID_AUTO,
- "offload_frames_csum_tcp",
- CTLFLAG_RD, &fp->offload_frames_csum_tcp,
+ "tx_offload_frames_csum_tcp",
+ CTLFLAG_RD, &fp->tx_offload_frames_csum_tcp,
"TCP checksum offload frame count");
SYSCTL_ADD_ULONG(ctx, queue_list, OID_AUTO,
- "offload_frames_csum_udp",
- CTLFLAG_RD, &fp->offload_frames_csum_udp,
+ "tx_offload_frames_csum_udp",
+ CTLFLAG_RD, &fp->tx_offload_frames_csum_udp,
"UDP checksum offload frame count");
SYSCTL_ADD_ULONG(ctx, queue_list, OID_AUTO,
- "offload_frames_tso",
- CTLFLAG_RD, &fp->offload_frames_tso,
+ "tx_offload_frames_tso",
+ CTLFLAG_RD, &fp->tx_offload_frames_tso,
"TSO offload frame count");
SYSCTL_ADD_ULONG(ctx, queue_list, OID_AUTO,
+ "tx_header_splits",
+ CTLFLAG_RD, &fp->tx_header_splits,
+ "TSO frame header/data split count");
+
+ SYSCTL_ADD_ULONG(ctx, queue_list, OID_AUTO,
"tx_encap_failures",
CTLFLAG_RD, &fp->tx_encap_failures,
"TX encapsulation failure count");
SYSCTL_ADD_ULONG(ctx, queue_list, OID_AUTO,
- "tx_start_called_on_empty_queue",
- CTLFLAG_RD, &fp->tx_start_called_on_empty_queue,
- "TX start function called on empty "
- "TX queue count");
+ "tx_hw_queue_full",
+ CTLFLAG_RD, &fp->tx_hw_queue_full,
+ "TX H/W queue too full to add a frame count");
SYSCTL_ADD_ULONG(ctx, queue_list, OID_AUTO,
- "tx_queue_too_full",
- CTLFLAG_RD, &fp->tx_queue_too_full,
- "TX queue too full to add a TX frame count");
+ "tx_hw_max_queue_depth",
+ CTLFLAG_RD, &fp->tx_hw_max_queue_depth,
+ "TX H/W maximum queue depth count");
SYSCTL_ADD_ULONG(ctx, queue_list, OID_AUTO,
- "window_violation_std",
- CTLFLAG_RD, &fp->window_violation_std,
+ "tx_dma_mapping_failure",
+ CTLFLAG_RD, &fp->tx_dma_mapping_failure,
+ "TX DMA mapping failure");
+
+ SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO,
+ "tx_max_drbr_queue_depth",
+ CTLFLAG_RD, &fp->tx_max_drbr_queue_depth,
+ 0, "TX S/W queue maximum depth");
+
+ SYSCTL_ADD_ULONG(ctx, queue_list, OID_AUTO,
+ "tx_window_violation_std",
+ CTLFLAG_RD, &fp->tx_window_violation_std,
"Standard frame TX BD window violation count");
SYSCTL_ADD_ULONG(ctx, queue_list, OID_AUTO,
- "window_violation_tso",
- CTLFLAG_RD, &fp->window_violation_tso,
+ "tx_window_violation_tso",
+ CTLFLAG_RD, &fp->tx_window_violation_tso,
"TSO frame TX BD window violation count");
SYSCTL_ADD_ULONG(ctx, queue_list, OID_AUTO,
- "unsupported_tso_request_ipv6",
- CTLFLAG_RD, &fp->unsupported_tso_request_ipv6,
+ "tx_unsupported_tso_request_ipv6",
+ CTLFLAG_RD, &fp->tx_unsupported_tso_request_ipv6,
"TSO frames with unsupported IPv6 protocol count");
SYSCTL_ADD_ULONG(ctx, queue_list, OID_AUTO,
- "unsupported_tso_request_not_tcp",
- CTLFLAG_RD, &fp->unsupported_tso_request_not_tcp,
+ "tx_unsupported_tso_request_not_tcp",
+ CTLFLAG_RD, &fp->tx_unsupported_tso_request_not_tcp,
"TSO frames with unsupported protocol count");
SYSCTL_ADD_ULONG(ctx, queue_list, OID_AUTO,
@@ -16371,17 +15780,58 @@ bxe_add_sysctls(struct bxe_softc *sc)
CTLFLAG_RD, &fp->tx_chain_lost_mbuf,
"Mbufs lost on TX chain count");
- SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO,
- "max_drbr_queue_depth",
- CTLFLAG_RD, &fp->max_drbr_queue_depth,
- 0, "Driver queue maximum dpeth");
+ SYSCTL_ADD_ULONG(ctx, queue_list, OID_AUTO,
+ "tx_frame_deferred",
+ CTLFLAG_RD, &fp->tx_frame_deferred,
+ "TX frame deferred from H/W queue to S/W queue count");
-#ifdef BXE_DEBUG
SYSCTL_ADD_ULONG(ctx, queue_list, OID_AUTO,
- "null_cqe_flags",
- CTLFLAG_RD, &fp->null_cqe_flags,
- "CQEs with NULL flags count");
-#endif
+ "tx_queue_xoff",
+ CTLFLAG_RD, &fp->tx_queue_xoff,
+ "TX queue full count");
+
+ /*
+ * Memory related fastpath statistics.*
+ */
+ SYSCTL_ADD_ULONG(ctx, queue_list, OID_AUTO,
+ "mbuf_rx_bd_alloc_failed",
+ CTLFLAG_RD, &fp->mbuf_rx_bd_alloc_failed,
+ "RX BD mbuf allocation failure count");
+
+ SYSCTL_ADD_ULONG(ctx, queue_list, OID_AUTO,
+ "mbuf_rx_bd_mapping_failed",
+ CTLFLAG_RD, &fp->mbuf_rx_bd_mapping_failed,
+ "RX BD mbuf mapping failure count");
+
+ SYSCTL_ADD_ULONG(ctx, queue_list, OID_AUTO,
+ "mbuf_tpa_alloc_failed",
+ CTLFLAG_RD, &fp->mbuf_tpa_alloc_failed,
+ "TPA mbuf allocation failure count");
+
+ SYSCTL_ADD_ULONG(ctx, queue_list, OID_AUTO,
+ "mbuf_tpa_mapping_failed",
+ CTLFLAG_RD, &fp->mbuf_tpa_mapping_failed,
+ "TPA mbuf mapping failure count");
+
+ SYSCTL_ADD_ULONG(ctx, queue_list, OID_AUTO,
+ "mbuf_sge_alloc_failed",
+ CTLFLAG_RD, &fp->mbuf_sge_alloc_failed,
+ "SGE mbuf allocation failure count");
+
+ SYSCTL_ADD_ULONG(ctx, queue_list, OID_AUTO,
+ "mbuf_sge_mapping_failed",
+ CTLFLAG_RD, &fp->mbuf_sge_mapping_failed,
+ "SGE mbuf mapping failure count");
+
+ SYSCTL_ADD_ULONG(ctx, queue_list, OID_AUTO,
+ "mbuf_defrag_attempts",
+ CTLFLAG_RD, &fp->mbuf_defrag_attempts,
+ "Mbuf defrag attempt count");
+
+ SYSCTL_ADD_ULONG(ctx, queue_list, OID_AUTO,
+ "mbuf_defrag_failures",
+ CTLFLAG_RD, &fp->mbuf_defrag_failures,
+ "Mbuf defrag failure count");
}
} while (0);
@@ -16560,13 +16010,13 @@ bxe_dump_debug_reg_wread(struct bxe_softc *sc, uint32_t *index)
pwreg_addrs = NULL;
/* Read different registers for different controllers. */
- if (CHIP_IS_E1H(sc)) {
- wregs_count = wregs_count_e1h;
- pwreg_addrs = &wreg_addrs_e1h[0];
- } else {
- wregs_count = wregs_count_e1;
- pwreg_addrs = &wreg_addrs_e1[0];
- }
+ if (CHIP_IS_E1H(sc)) {
+ wregs_count = wregs_count_e1h;
+ pwreg_addrs = &wreg_addrs_e1h[0];
+ } else {
+ wregs_count = wregs_count_e1;
+ pwreg_addrs = &wreg_addrs_e1[0];
+ }
for (reg_addrs_index = 0; reg_addrs_index < wregs_count;
reg_addrs_index++) {
@@ -16646,22 +16096,23 @@ bxe_grcdump(struct bxe_softc *sc, int log)
* Returns:
* Nothing.
*/
-static __attribute__ ((noinline))
+static __noinline
void bxe_validate_rx_packet(struct bxe_fastpath *fp, uint16_t comp_cons,
union eth_rx_cqe *cqe, struct mbuf *m)
{
struct bxe_softc *sc;
+ int error;
sc = fp->sc;
- /* Check that the mbuf is sane. */
- m_sanity(m, FALSE);
- /* Make sure the packet has a valid length. */
- if ((m->m_len < ETHER_HDR_LEN) |
- (m->m_len > ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD)) {
+ /* Check that the mbuf is sane. */
+ error = m_sanity(m, FALSE);
+ if (error != 1 || ((m->m_len < ETHER_HDR_LEN) |
+ (m->m_len > ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD))) {
m_print(m, 128);
bxe_dump_enet(sc, m);
bxe_dump_cqe(fp, comp_cons, cqe);
+ /* Make sure the packet has a valid length. */
}
}
@@ -16673,7 +16124,7 @@ void bxe_validate_rx_packet(struct bxe_fastpath *fp, uint16_t comp_cons,
* Returns:
* Nothing.
*/
-static __attribute__ ((noinline))
+static __noinline
void bxe_dump_enet(struct bxe_softc *sc, struct mbuf *m)
{
struct ether_vlan_header *eh;
@@ -16803,7 +16254,7 @@ bxe_dump_mbuf_data(struct mbuf *m, int len)
* Returns:
* Nothing.
*/
-static __attribute__ ((noinline))
+static __noinline
void bxe_dump_mbuf(struct bxe_softc *sc, struct mbuf *m)
{
if (m == NULL) {
@@ -16868,17 +16319,19 @@ void bxe_dump_mbuf(struct bxe_softc *sc, struct mbuf *m)
* Returns:
* Nothing.
*/
-static __attribute__ ((noinline))
+static __noinline
void bxe_dump_rxbd(struct bxe_fastpath *fp, int idx,
struct eth_rx_bd *rx_bd)
{
- struct bxe_softc *sc = fp->sc;
+ struct bxe_softc *sc;
+
+ sc = fp->sc;
/* Check if index out of range. */
if (idx > MAX_RX_BD) {
BXE_PRINTF("fp[%02d].rx_bd[0x%04X] XX: Invalid rx_bd index!\n",
fp->index, idx);
- } else if ((idx & RX_DESC_MASK) >= USABLE_RX_BD_PER_PAGE) {
+ } else if ((idx & RX_BD_PER_PAGE_MASK) >= USABLE_RX_BD_PER_PAGE) {
/* RX Chain page pointer. */
BXE_PRINTF("fp[%02d].rx_bd[0x%04X] NP: haddr=0x%08X:%08X\n",
fp->index, idx, rx_bd->addr_hi, rx_bd->addr_lo);
@@ -16894,11 +16347,13 @@ void bxe_dump_rxbd(struct bxe_fastpath *fp, int idx,
* Returns:
* Nothing.
*/
-static __attribute__ ((noinline))
+static __noinline
void bxe_dump_cqe(struct bxe_fastpath *fp, int idx,
union eth_rx_cqe *cqe)
{
- struct bxe_softc *sc = fp->sc;
+ struct bxe_softc *sc;
+
+ sc = fp->sc;
if (idx > MAX_RCQ_ENTRIES) {
/* Index out of range. */
@@ -16931,26 +16386,28 @@ void bxe_dump_cqe(struct bxe_fastpath *fp, int idx,
* Returns:
* Nothing.
*/
-static __attribute__ ((noinline))
+static __noinline
void bxe_dump_tx_parsing_bd(struct bxe_fastpath *fp, int idx,
struct eth_tx_parse_bd *p_bd)
{
- struct bxe_softc *sc = fp->sc;
-
- if (idx > MAX_TX_BD){
- /* Index out of range. */
- BXE_PRINTF("fp[%02d].tx_bd[0x%04X] XX: Invalid tx_bd index!\n",
- fp->index, idx);
- } else {
- BXE_PRINTF("fp[%02d]:tx_bd[0x%04X] PB: global_data=0x%b, "
- "tcp_flags=0x%b, ip_hlen=%04d, total_hlen=%04d, "
- "tcp_pseudo_csum=0x%04X, lso_mss=0x%04X, ip_id=0x%04X, "
- "tcp_send_seq=0x%08X\n", fp->index, idx,
- p_bd->global_data, BXE_ETH_TX_PARSE_BD_GLOBAL_DATA_PRINTFB,
- p_bd->tcp_flags, BXE_ETH_TX_PARSE_BD_TCP_FLAGS_PRINTFB,
- p_bd->ip_hlen, p_bd->total_hlen, p_bd->tcp_pseudo_csum,
- p_bd->lso_mss, p_bd->ip_id, p_bd->tcp_send_seq);
- }
+ struct bxe_softc *sc;
+
+ sc = fp->sc;
+
+ if (idx > MAX_TX_BD){
+ /* Index out of range. */
+ BXE_PRINTF("fp[%02d].tx_bd[0x%04X] XX: Invalid tx_bd index!\n",
+ fp->index, idx);
+ } else {
+ BXE_PRINTF("fp[%02d]:tx_bd[0x%04X] PB: global_data=0x%b, "
+ "tcp_flags=0x%b, ip_hlen=%04d, total_hlen=%04d, "
+ "tcp_pseudo_csum=0x%04X, lso_mss=0x%04X, ip_id=0x%04X, "
+ "tcp_send_seq=0x%08X\n", fp->index, idx,
+ p_bd->global_data, BXE_ETH_TX_PARSE_BD_GLOBAL_DATA_PRINTFB,
+ p_bd->tcp_flags, BXE_ETH_TX_PARSE_BD_TCP_FLAGS_PRINTFB,
+ p_bd->ip_hlen, p_bd->total_hlen, p_bd->tcp_pseudo_csum,
+ p_bd->lso_mss, p_bd->ip_id, p_bd->tcp_send_seq);
+ }
}
/*
@@ -16959,11 +16416,13 @@ void bxe_dump_tx_parsing_bd(struct bxe_fastpath *fp, int idx,
* Returns:
* Nothing.
*/
-static __attribute__ ((noinline))
+static __noinline
void bxe_dump_txbd(struct bxe_fastpath *fp, int idx,
union eth_tx_bd_types *tx_bd)
{
- struct bxe_softc *sc = fp->sc;
+ struct bxe_softc *sc;
+
+ sc = fp->sc;
if (idx > MAX_TX_BD){
/* Index out of range. */
@@ -17002,24 +16461,26 @@ void bxe_dump_txbd(struct bxe_fastpath *fp, int idx,
* Returns:
* Nothing.
*/
-static __attribute__ ((noinline))
+static __noinline
void bxe_dump_tx_chain(struct bxe_fastpath * fp, int tx_bd_prod, int count)
{
- struct bxe_softc *sc = fp->sc;
+ struct bxe_softc *sc;
union eth_tx_bd_types *tx_bd;
uint32_t val_hi, val_lo;
int i, parsing_bd = 0;
+ sc = fp->sc;
+
/* First some info about the tx_bd chain structure. */
BXE_PRINTF(
"----------------------------"
" tx_bd chain "
"----------------------------\n");
- val_hi = U64_HI(fp->tx_bd_chain_paddr);
- val_lo = U64_LO(fp->tx_bd_chain_paddr);
+ val_hi = U64_HI(fp->tx_dma.paddr);
+ val_lo = U64_LO(fp->tx_dma.paddr);
BXE_PRINTF(
- "0x%08X:%08X - (fp[%02d]->tx_bd_chain_paddr) TX Chain physical address\n",
+ "0x%08X:%08X - (fp[%02d]->tx_dma.paddr) TX Chain physical address\n",
val_hi, val_lo, fp->index);
BXE_PRINTF(
"page size = 0x%08X, tx chain pages = 0x%08X\n",
@@ -17037,12 +16498,11 @@ void bxe_dump_tx_chain(struct bxe_fastpath * fp, int tx_bd_prod, int count)
/* Now print out the tx_bd's themselves. */
for (i = 0; i < count; i++) {
- tx_bd =
- &fp->tx_bd_chain[TX_PAGE(tx_bd_prod)][TX_IDX(tx_bd_prod)];
+ tx_bd = &fp->tx_chain[tx_bd_prod];
if (parsing_bd) {
struct eth_tx_parse_bd *p_bd;
p_bd = (struct eth_tx_parse_bd *)
- &fp->tx_bd_chain[TX_PAGE(tx_bd_prod)][TX_IDX(tx_bd_prod)].parse_bd;
+ &fp->tx_chain[tx_bd_prod].parse_bd;
bxe_dump_tx_parsing_bd(fp, tx_bd_prod, p_bd);
parsing_bd = 0;
} else {
@@ -17071,23 +16531,23 @@ void bxe_dump_tx_chain(struct bxe_fastpath * fp, int tx_bd_prod, int count)
* Returns:
* Nothing.
*/
-static __attribute__ ((noinline))
+static __noinline
void bxe_dump_rx_cq_chain(struct bxe_fastpath *fp, int rx_cq_prod, int count)
{
- struct bxe_softc *sc = fp->sc;
+ struct bxe_softc *sc;
union eth_rx_cqe *cqe;
int i;
+ sc = fp->sc;
+
/* First some info about the tx_bd chain structure. */
BXE_PRINTF(
"----------------------------"
" CQE Chain "
"----------------------------\n");
- for (i=0; i< NUM_RCQ_PAGES; i++) {
- BXE_PRINTF("fp[%02d]->rx_cq_chain_paddr[%d] = 0x%jX\n",
- fp->index, i, (uintmax_t) fp->rx_cq_chain_paddr[i]);
- }
+ BXE_PRINTF("fp[%02d]->rcq_dma.paddr = 0x%jX\n",
+ fp->index, (uintmax_t) fp->rcq_dma.paddr);
BXE_PRINTF("page size = 0x%08X, cq chain pages "
" = 0x%08X\n",
@@ -17107,9 +16567,10 @@ void bxe_dump_rx_cq_chain(struct bxe_fastpath *fp, int rx_cq_prod, int count)
"----------------------------\n");
for (i = 0; i < count; i++) {
- cqe = (union eth_rx_cqe *)&fp->rx_cq_chain
- [RCQ_PAGE(rx_cq_prod)][RCQ_IDX(rx_cq_prod)];
+ cqe = (union eth_rx_cqe *)&fp->rcq_chain[rx_cq_prod];
+
bxe_dump_cqe(fp, rx_cq_prod, cqe);
+
/* Don't skip next page pointers. */
rx_cq_prod = ((rx_cq_prod + 1) & MAX_RCQ_ENTRIES);
}
@@ -17126,8 +16587,8 @@ void bxe_dump_rx_cq_chain(struct bxe_fastpath *fp, int rx_cq_prod, int count)
* Returns:
* Nothing.
*/
-static __attribute__ ((noinline))
-void bxe_dump_rx_bd_chain(struct bxe_fastpath *fp, int rx_prod, int count)
+static __noinline
+void bxe_dump_rx_bd_chain(struct bxe_fastpath *fp, int prod, int count)
{
struct bxe_softc *sc;
struct eth_rx_bd *rx_bd;
@@ -17135,6 +16596,7 @@ void bxe_dump_rx_bd_chain(struct bxe_fastpath *fp, int rx_prod, int count)
int i;
sc = fp->sc;
+
/* First some info about the tx_bd chain structure. */
BXE_PRINTF(
"----------------------------"
@@ -17144,8 +16606,8 @@ void bxe_dump_rx_bd_chain(struct bxe_fastpath *fp, int rx_prod, int count)
BXE_PRINTF(
"----- RX_BD Chain -----\n");
- BXE_PRINTF("fp[%02d]->rx_cq_chain_paddr[0] = 0x%jX\n",
- fp->index, (uintmax_t) fp->rx_cq_chain_paddr[0]);
+ BXE_PRINTF("fp[%02d]->rx_dma.paddr = 0x%jX\n",
+ fp->index, (uintmax_t) fp->rx_dma.paddr);
BXE_PRINTF(
"page size = 0x%08X, rx chain pages = 0x%08X\n",
@@ -17166,15 +16628,14 @@ void bxe_dump_rx_bd_chain(struct bxe_fastpath *fp, int rx_prod, int count)
/* Now print out the rx_bd's themselves. */
for (i = 0; i < count; i++) {
- rx_bd = (struct eth_rx_bd *)
- (&fp->rx_bd_chain[RX_PAGE(rx_prod)][RX_IDX(rx_prod)]);
- m = sc->fp->rx_mbuf_ptr[rx_prod];
+ rx_bd = (struct eth_rx_bd *) (&fp->rx_chain[prod]);
+ m = sc->fp->rx_mbuf_ptr[prod];
- bxe_dump_rxbd(fp, rx_prod, rx_bd);
+ bxe_dump_rxbd(fp, prod, rx_bd);
bxe_dump_mbuf(sc, m);
/* Don't skip next page pointers. */
- rx_prod = ((rx_prod + 1) & MAX_RX_BD);
+ prod = ((prod + 1) & MAX_RX_BD);
}
BXE_PRINTF(
@@ -17189,7 +16650,7 @@ void bxe_dump_rx_bd_chain(struct bxe_fastpath *fp, int rx_prod, int count)
* Returns:
* Nothing.
*/
-static __attribute__ ((noinline))
+static __noinline
void bxe_dump_hw_state(struct bxe_softc *sc)
{
int i;
@@ -17216,7 +16677,7 @@ void bxe_dump_hw_state(struct bxe_softc *sc)
* Returns:
* Nothing.
*/
-static __attribute__ ((noinline))
+static __noinline
void bxe_dump_rx_mbuf_chain(struct bxe_softc *sc, int chain_prod, int count)
{
struct mbuf *m;
@@ -17246,7 +16707,7 @@ void bxe_dump_rx_mbuf_chain(struct bxe_softc *sc, int chain_prod, int count)
* Returns:
* Nothing.
*/
-static __attribute__ ((noinline))
+static __noinline
void bxe_dump_tx_mbuf_chain(struct bxe_softc *sc, int chain_prod, int count)
{
struct mbuf *m;
@@ -17276,15 +16737,15 @@ void bxe_dump_tx_mbuf_chain(struct bxe_softc *sc, int chain_prod, int count)
* Returns:
* Nothing.
*/
-static __attribute__ ((noinline))
+static __noinline
void bxe_dump_status_block(struct bxe_softc *sc)
{
struct bxe_fastpath *fp;
- struct host_def_status_block *dsb;
+ struct host_def_status_block *def_sb;
struct host_status_block *fpsb;
int i;
- dsb = sc->def_status_block;
+ def_sb = sc->def_sb;
BXE_PRINTF(
"----------------------------"
" Status Block "
@@ -17359,92 +16820,92 @@ void bxe_dump_status_block(struct bxe_softc *sc)
/* Print attention information. */
BXE_PRINTF(
" 0x%02X - Status Block ID\n",
- dsb->atten_status_block.status_block_id);
+ def_sb->atten_status_block.status_block_id);
BXE_PRINTF(
"0x%08X - Attn Bits\n",
- dsb->atten_status_block.attn_bits);
+ def_sb->atten_status_block.attn_bits);
BXE_PRINTF(
"0x%08X - Attn Bits Ack\n",
- dsb->atten_status_block.attn_bits_ack);
+ def_sb->atten_status_block.attn_bits_ack);
BXE_PRINTF(
" 0x%04X - Attn Block Index\n",
- le16toh(dsb->atten_status_block.attn_bits_index));
+ le16toh(def_sb->atten_status_block.attn_bits_index));
/* Print the USTORM fields (HC_USTORM_DEF_SB_NUM_INDICES). */
BXE_PRINTF(
" 0x%02X - USTORM Status Block ID\n",
- dsb->u_def_status_block.status_block_id);
+ def_sb->u_def_status_block.status_block_id);
BXE_PRINTF(
" 0x%04X - USTORM Status Block Index\n",
- le16toh(dsb->u_def_status_block.status_block_index));
+ le16toh(def_sb->u_def_status_block.status_block_index));
BXE_PRINTF(
" 0x%04X - USTORM [ETH_RDMA_RX_CQ_CONS]\n",
- le16toh(dsb->u_def_status_block.index_values[HC_INDEX_DEF_U_ETH_RDMA_RX_CQ_CONS]));
+ le16toh(def_sb->u_def_status_block.index_values[HC_INDEX_DEF_U_ETH_RDMA_RX_CQ_CONS]));
BXE_PRINTF(
" 0x%04X - USTORM [ETH_ISCSI_RX_CQ_CONS]\n",
- le16toh(dsb->u_def_status_block.index_values[HC_INDEX_DEF_U_ETH_ISCSI_RX_CQ_CONS]));
+ le16toh(def_sb->u_def_status_block.index_values[HC_INDEX_DEF_U_ETH_ISCSI_RX_CQ_CONS]));
BXE_PRINTF(
" 0x%04X - USTORM [ETH_RDMA_RX_BD_CONS]\n",
- le16toh(dsb->u_def_status_block.index_values[HC_INDEX_DEF_U_ETH_RDMA_RX_BD_CONS]));
+ le16toh(def_sb->u_def_status_block.index_values[HC_INDEX_DEF_U_ETH_RDMA_RX_BD_CONS]));
BXE_PRINTF(
" 0x%04X - USTORM [ETH_ISCSI_RX_BD_CONS]\n",
- le16toh(dsb->u_def_status_block.index_values[HC_INDEX_DEF_U_ETH_ISCSI_RX_BD_CONS]));
+ le16toh(def_sb->u_def_status_block.index_values[HC_INDEX_DEF_U_ETH_ISCSI_RX_BD_CONS]));
/* Print the CSTORM fields (HC_CSTORM_DEF_SB_NUM_INDICES). */
BXE_PRINTF(
" 0x%02X - CSTORM Status Block ID\n",
- dsb->c_def_status_block.status_block_id);
+ def_sb->c_def_status_block.status_block_id);
BXE_PRINTF(
" 0x%04X - CSTORM Status Block Index\n",
- le16toh(dsb->c_def_status_block.status_block_index));
+ le16toh(def_sb->c_def_status_block.status_block_index));
BXE_PRINTF(
" 0x%04X - CSTORM [RDMA_EQ_CONS]\n",
- le16toh(dsb->c_def_status_block.index_values[HC_INDEX_DEF_C_RDMA_EQ_CONS]));
+ le16toh(def_sb->c_def_status_block.index_values[HC_INDEX_DEF_C_RDMA_EQ_CONS]));
BXE_PRINTF(
" 0x%04X - CSTORM [RDMA_NAL_PROD]\n",
- le16toh(dsb->c_def_status_block.index_values[HC_INDEX_DEF_C_RDMA_NAL_PROD]));
+ le16toh(def_sb->c_def_status_block.index_values[HC_INDEX_DEF_C_RDMA_NAL_PROD]));
BXE_PRINTF(
" 0x%04X - CSTORM [ETH_FW_TX_CQ_CONS]\n",
- le16toh(dsb->c_def_status_block.index_values[HC_INDEX_DEF_C_ETH_FW_TX_CQ_CONS]));
+ le16toh(def_sb->c_def_status_block.index_values[HC_INDEX_DEF_C_ETH_FW_TX_CQ_CONS]));
BXE_PRINTF(
" 0x%04X - CSTORM [ETH_SLOW_PATH]\n",
- le16toh(dsb->c_def_status_block.index_values[HC_INDEX_DEF_C_ETH_SLOW_PATH]));
+ le16toh(def_sb->c_def_status_block.index_values[HC_INDEX_DEF_C_ETH_SLOW_PATH]));
BXE_PRINTF(
" 0x%04X - CSTORM [ETH_RDMA_CQ_CONS]\n",
- le16toh(dsb->c_def_status_block.index_values[HC_INDEX_DEF_C_ETH_RDMA_CQ_CONS]));
+ le16toh(def_sb->c_def_status_block.index_values[HC_INDEX_DEF_C_ETH_RDMA_CQ_CONS]));
BXE_PRINTF(
" 0x%04X - CSTORM [ETH_ISCSI_CQ_CONS]\n",
- le16toh(dsb->c_def_status_block.index_values[HC_INDEX_DEF_C_ETH_ISCSI_CQ_CONS]));
+ le16toh(def_sb->c_def_status_block.index_values[HC_INDEX_DEF_C_ETH_ISCSI_CQ_CONS]));
BXE_PRINTF(
" 0x%04X - CSTORM [UNUSED]\n",
- le16toh(dsb->c_def_status_block.index_values[6]));
+ le16toh(def_sb->c_def_status_block.index_values[6]));
BXE_PRINTF(
" 0x%04X - CSTORM [UNUSED]\n",
- le16toh(dsb->c_def_status_block.index_values[7]));
+ le16toh(def_sb->c_def_status_block.index_values[7]));
/* Print the TSTORM fields (HC_TSTORM_DEF_SB_NUM_INDICES). */
BXE_PRINTF(
" 0x%02X - TSTORM Status Block ID\n",
- dsb->t_def_status_block.status_block_id);
+ def_sb->t_def_status_block.status_block_id);
BXE_PRINTF(
" 0x%04X - TSTORM Status Block Index\n",
- le16toh(dsb->t_def_status_block.status_block_index));
+ le16toh(def_sb->t_def_status_block.status_block_index));
for (i = 0; i < HC_TSTORM_DEF_SB_NUM_INDICES; i++)
BXE_PRINTF(
" 0x%04X - TSTORM [UNUSED]\n",
- le16toh(dsb->t_def_status_block.index_values[i]));
+ le16toh(def_sb->t_def_status_block.index_values[i]));
/* Print the XSTORM fields (HC_XSTORM_DEF_SB_NUM_INDICES). */
BXE_PRINTF(
" 0x%02X - XSTORM Status Block ID\n",
- dsb->x_def_status_block.status_block_id);
+ def_sb->x_def_status_block.status_block_id);
BXE_PRINTF(
" 0x%04X - XSTORM Status Block Index\n",
- le16toh(dsb->x_def_status_block.status_block_index));
+ le16toh(def_sb->x_def_status_block.status_block_index));
for (i = 0; i < HC_XSTORM_DEF_SB_NUM_INDICES; i++)
BXE_PRINTF(
" 0x%04X - XSTORM [UNUSED]\n",
- le16toh(dsb->x_def_status_block.index_values[i]));
+ le16toh(def_sb->x_def_status_block.index_values[i]));
BXE_PRINTF(
"----------------------------"
@@ -17459,7 +16920,7 @@ void bxe_dump_status_block(struct bxe_softc *sc)
* Returns:
* Nothing.
*/
-static __attribute__ ((noinline))
+static __noinline
void bxe_dump_stats_block(struct bxe_softc *sc)
{
@@ -17471,7 +16932,7 @@ void bxe_dump_stats_block(struct bxe_softc *sc)
* Returns:
* Nothing.
*/
-static __attribute__ ((noinline))
+static __noinline
void bxe_dump_fp_state(struct bxe_fastpath *fp)
{
struct bxe_softc *sc;
@@ -17501,9 +16962,6 @@ void bxe_dump_fp_state(struct bxe_fastpath *fp)
/* Receive state. */
BXE_PRINTF(
- " 0x%04X - (fp[%02d]->free_rx_bd)\n",
- fp->free_rx_bd, fp->index);
- BXE_PRINTF(
" 0x%04X - (fp[%02d]->rx_bd_prod)\n",
fp->rx_bd_prod, fp->index);
BXE_PRINTF(
@@ -17525,13 +16983,13 @@ void bxe_dump_fp_state(struct bxe_fastpath *fp)
" %16lu - (fp[%02d]->ipackets)\n",
fp->ipackets, fp->index);
BXE_PRINTF(
- " %16lu - (fp[%02d]->soft_rx_errors)\n",
- fp->soft_rx_errors, fp->index);
+ " %16lu - (fp[%02d]->rx_soft_errors)\n",
+ fp->rx_soft_errors, fp->index);
/* Transmit state. */
BXE_PRINTF(
- " 0x%04X - (fp[%02d]->used_tx_bd)\n",
- fp->used_tx_bd, fp->index);
+ " 0x%04X - (fp[%02d]->tx_bd_used)\n",
+ fp->tx_bd_used, fp->index);
BXE_PRINTF(
" 0x%04X - (fp[%02d]->tx_bd_prod)\n",
fp->tx_bd_prod, fp->index);
@@ -17554,14 +17012,14 @@ void bxe_dump_fp_state(struct bxe_fastpath *fp)
" %16lu - (fp[%02d]->opackets)\n",
fp->opackets, fp->index);
BXE_PRINTF(
- " %16lu - (fp[%02d]->soft_tx_errors)\n",
- fp->soft_tx_errors, fp->index);
+ " %16lu - (fp[%02d]->tx_soft_errors)\n",
+ fp->tx_soft_errors, fp->index);
/* TPA state. */
if (TPA_ENABLED(sc)) {
BXE_PRINTF(
- " %16lu - (fp[%02d]->tpa_pkts)\n",
- fp->tpa_pkts, fp->index);
+ " %16lu - (fp[%02d]->rx_tpa_pkts)\n",
+ fp->rx_tpa_pkts, fp->index);
BXE_PRINTF(
" 0x%08X - (fp[%02d]->tpa_mbuf_alloc)\n",
fp->tpa_mbuf_alloc, fp->index);
@@ -17592,7 +17050,7 @@ void bxe_dump_fp_state(struct bxe_fastpath *fp)
* Returns:
* Nothing.
*/
-static __attribute__ ((noinline))
+static __noinline
void bxe_dump_port_state_locked(struct bxe_softc *sc)
{
@@ -17622,7 +17080,7 @@ void bxe_dump_port_state_locked(struct bxe_softc *sc)
* Returns:
* Nothing.
*/
-static __attribute__ ((noinline))
+static __noinline
void bxe_dump_link_vars_state_locked(struct bxe_softc *sc)
{
BXE_PRINTF(
@@ -17685,7 +17143,7 @@ void bxe_dump_link_vars_state_locked(struct bxe_softc *sc)
* Returns:
* Nothing.
*/
-static __attribute__ ((noinline))
+static __noinline
void bxe_dump_link_params_state_locked(struct bxe_softc *sc)
{
BXE_PRINTF(
@@ -17739,7 +17197,7 @@ void bxe_dump_link_params_state_locked(struct bxe_softc *sc)
* Returns:
* Nothing.
*/
-static __attribute__ ((noinline))
+static __noinline
void bxe_dump_driver_state(struct bxe_softc *sc)
{
uint32_t val_hi, val_lo;
@@ -17773,12 +17231,10 @@ void bxe_dump_driver_state(struct bxe_softc *sc)
sc->rx_lane_swap);
BXE_PRINTF(" 0x%08X - (sc->tx_lane_swap) TX XAUI lane swap\n",
sc->tx_lane_swap);
- BXE_PRINTF(" %16lu - (sc->debug_mbuf_sim_alloc_failed)\n",
- sc->debug_mbuf_sim_alloc_failed);
- BXE_PRINTF(" %16lu - (sc->debug_mbuf_sim_map_failed)\n",
- sc->debug_mbuf_sim_map_failed);
- BXE_PRINTF(" %16lu - (sc->debug_memory_allocated)\n",
- sc->debug_memory_allocated);
+ BXE_PRINTF(" %16lu - (sc->debug_sim_mbuf_alloc_failed)\n",
+ sc->debug_sim_mbuf_alloc_failed);
+ BXE_PRINTF(" %16lu - (sc->debug_sim_mbuf_map_failed)\n",
+ sc->debug_sim_mbuf_map_failed);
BXE_PRINTF(
"----------------------------"
@@ -17791,44 +17247,39 @@ void bxe_dump_driver_state(struct bxe_softc *sc)
}
/*
- * Dump bootcode debug buffer to the console.
+ * Dump bootcode (MCP) debug buffer to the console.
*
* Returns:
* None
*/
-static __attribute__ ((noinline))
+static __noinline
void bxe_dump_fw(struct bxe_softc *sc)
{
- uint32_t data[9], mark, offset;
- int word;
+ uint32_t addr, mark, data[9], offset;
+ int word;
- mark = REG_RD(sc, MCP_REG_MCPR_SCRATCH + 0xf104);
- mark = ((mark + 0x3) & ~0x3);
+ addr = sc->common.shmem_base - 0x0800 + 4;
+ mark = REG_RD(sc, addr);
+ mark = MCP_REG_MCPR_SCRATCH + ((mark + 0x3) & ~0x3) - 0x08000000;
BXE_PRINTF(
- "----------------------------"
- " Bootcode State "
- "----------------------------\n");
- BXE_PRINTF("Begin MCP bootcode dump (mark = 0x%08X)\n", mark);
- BXE_PRINTF(
- "----------------------------"
- "----------------"
- "----------------------------\n");
+ "---------------------------"
+ " MCP Debug Buffer "
+ "---------------------------\n");
- for (offset = mark - 0x08000000; offset <= 0xF900;
+ /* Read from "mark" to the end of the buffer. */
+ for (offset = mark; offset <= sc->common.shmem_base;
offset += (0x8 * 4)) {
for (word = 0; word < 8; word++)
- data[word] = htonl(REG_RD(sc, MCP_REG_MCPR_SCRATCH +
- offset + 4 * word));
+ data[word] = htonl(REG_RD(sc, offset + 4 * word));
data[8] = 0x0;
printf("%s", (char *) data);
}
- for (offset = 0xF108; offset <= mark - 0x08000000;
- offset += (0x8 * 4)) {
+ /* Read from the start of the buffer to "mark". */
+ for (offset = addr + 4; offset <= mark; offset += (0x8 * 4)) {
for (word = 0; word < 8; word++)
- data[word] = htonl(REG_RD(sc, MCP_REG_MCPR_SCRATCH +
- offset + 4 * word));
+ data[word] = htonl(REG_RD(sc, offset + 4 * word));
data[8] = 0x0;
printf("%s", (char *) data);
}
@@ -18129,26 +17580,9 @@ bxe_breakpoint(struct bxe_softc *sc)
bxe_dump_fp_state(&sc->fp[i]);
bxe_dump_status_block(sc);
+ bxe_dump_fw(sc);
/* Call the OS debugger. */
breakpoint();
}
#endif
-
-/*
- *
- * Returns:
- * Nothing.
- */
-static void
-bxe_gunzip_end(struct bxe_softc *sc)
-{
- free(sc->strm, M_DEVBUF);
- sc->strm = NULL;
-
- if (sc->gunzip_buf) {
- bxe_dmamem_free(sc, sc->gunzip_tag, sc->gunzip_buf,
- sc->gunzip_map);
- sc->gunzip_buf = NULL;
- }
-}
diff --git a/sys/dev/bxe/if_bxe.h b/sys/dev/bxe/if_bxe.h
index 8da3db4..a5af0bd 100644
--- a/sys/dev/bxe/if_bxe.h
+++ b/sys/dev/bxe/if_bxe.h
@@ -251,20 +251,22 @@ struct bxe_type {
#define SGE_PAGE_SHIFT PAGE_SHIFT
#define SGE_PAGE_ALIGN(addr) PAGE_ALIGN(addr)
-/* SGE ring related macros */
+/* NUM_RX_SGE_PAGES must be a power of 2. */
#define NUM_RX_SGE_PAGES 2
-#define RX_SGE_CNT (BCM_PAGE_SIZE / sizeof(struct eth_rx_sge))
-#define MAX_RX_SGE_CNT (RX_SGE_CNT - 2)
+#define TOTAL_RX_SGE_PER_PAGE (BCM_PAGE_SIZE / sizeof(struct eth_rx_sge)) /* 512 */
+#define USABLE_RX_SGE_PER_PAGE (TOTAL_RX_SGE_PER_PAGE - 2) /* 510 */
+#define RX_SGE_PER_PAGE_MASK (TOTAL_RX_SGE_PER_PAGE - 1) /* 511 */
+#define TOTAL_RX_SGE (TOTAL_RX_SGE_PER_PAGE * NUM_RX_SGE_PAGES) /* 1024 */
+#define USABLE_RX_SGE (USABLE_RX_SGE_PER_PAGE * NUM_RX_SGE_PAGES) /* 1020 */
+#define MAX_RX_SGE (TOTAL_RX_SGE - 1) /* 1023 */
+
-/* RX_SGE_CNT is required to be a power of 2 */
-#define RX_SGE_MASK (RX_SGE_CNT - 1)
-#define TOTAL_RX_SGE (RX_SGE_CNT * NUM_RX_SGE_PAGES)
-#define MAX_RX_SGE (TOTAL_RX_SGE - 1)
#define NEXT_SGE_IDX(x) \
- ((((x) & RX_SGE_MASK) == (MAX_RX_SGE_CNT - 1)) ? (x) + 3 : (x) + 1)
+ ((((x) & RX_SGE_PER_PAGE_MASK) == (USABLE_RX_SGE_PER_PAGE - 1)) \
+ ? (x) + 3 : (x) + 1)
#define RX_SGE(x) ((x) & MAX_RX_SGE)
-#define RX_SGE_PAGE(x) (((x) & ~RX_SGE_MASK) >> 9)
-#define RX_SGE_IDX(x) ((x) & RX_SGE_MASK)
+#define RX_SGE_PAGE(x) (((x) & ~RX_SGE_PER_PAGE_MASK) >> 9)
+#define RX_SGE_IDX(x) ((x) & RX_SGE_PER_PAGE_MASK)
/* SGE producer mask related macros. */
/* Number of bits in one sge_mask array element. */
@@ -282,23 +284,23 @@ struct bxe_type {
/* Number of uint64_t elements in SGE mask array. */
#define RX_SGE_MASK_LEN \
- ((NUM_RX_SGE_PAGES * RX_SGE_CNT) / RX_SGE_MASK_ELEM_SZ)
+ ((NUM_RX_SGE_PAGES * TOTAL_RX_SGE_PER_PAGE) / RX_SGE_MASK_ELEM_SZ)
#define RX_SGE_MASK_LEN_MASK (RX_SGE_MASK_LEN - 1)
#define NEXT_SGE_MASK_ELEM(el) (((el) + 1) & RX_SGE_MASK_LEN_MASK)
+
/*
* Transmit Buffer Descriptor (tx_bd) definitions*
*/
-/* ToDo: Tune this value based on multi-queue/RSS enable/disable. */
-#define NUM_TX_PAGES 2
+/* NUM_TX_PAGES must be a power of 2. */
+#define NUM_TX_PAGES 1
+#define TOTAL_TX_BD_PER_PAGE (BCM_PAGE_SIZE / sizeof(union eth_tx_bd_types)) /* 256 */
+#define USABLE_TX_BD_PER_PAGE (TOTAL_TX_BD_PER_PAGE - 1) /* 255 */
+#define TOTAL_TX_BD (TOTAL_TX_BD_PER_PAGE * NUM_TX_PAGES) /* 512 */
+#define USABLE_TX_BD (USABLE_TX_BD_PER_PAGE * NUM_TX_PAGES) /* 510 */
+#define MAX_TX_BD (TOTAL_TX_BD - 1) /* 511 */
-#define TOTAL_TX_BD_PER_PAGE (BCM_PAGE_SIZE / sizeof(union eth_tx_bd_types))
-#define USABLE_TX_BD_PER_PAGE (TOTAL_TX_BD_PER_PAGE - 1)
-#define TOTAL_TX_BD (TOTAL_TX_BD_PER_PAGE * NUM_TX_PAGES)
-#define USABLE_TX_BD (USABLE_TX_BD_PER_PAGE * NUM_TX_PAGES)
-#define MAX_TX_AVAIL (USABLE_TX_BD_PER_PAGE * NUM_TX_PAGES - 2)
-#define MAX_TX_BD (TOTAL_TX_BD - 1)
#define NEXT_TX_BD(x) \
((((x) & USABLE_TX_BD_PER_PAGE) == \
(USABLE_TX_BD_PER_PAGE - 1)) ? (x) + 2 : (x) + 1)
@@ -309,55 +311,33 @@ struct bxe_type {
/*
* Receive Buffer Descriptor (rx_bd) definitions*
*/
-#define NUM_RX_PAGES 2
-
-/* 512 (0x200) of 8 byte bds in 4096 byte page. */
-#define TOTAL_RX_BD_PER_PAGE (BCM_PAGE_SIZE / sizeof(struct eth_rx_bd))
-
-/* 510 (0x1fe) = 512 - 2 */
-#define USABLE_RX_BD_PER_PAGE (TOTAL_RX_BD_PER_PAGE - 2)
-
-/* 1024 (0x400) */
-#define TOTAL_RX_BD (TOTAL_RX_BD_PER_PAGE * NUM_RX_PAGES)
-/* 1020 (0x3fc) = 1024 - 4 */
-#define USABLE_RX_BD (USABLE_RX_BD_PER_PAGE * NUM_RX_PAGES)
-
-/* 1023 (0x3ff) = 1024 -1 */
-#define MAX_RX_BD (TOTAL_RX_BD - 1)
-
-/* 511 (0x1ff) = 512 - 1 */
-#define RX_DESC_MASK (TOTAL_RX_BD_PER_PAGE - 1)
+/* NUM_RX_PAGES must be a power of 2. */
+#define NUM_RX_PAGES 1
+#define TOTAL_RX_BD_PER_PAGE (BCM_PAGE_SIZE / sizeof(struct eth_rx_bd)) /* 512 */
+#define USABLE_RX_BD_PER_PAGE (TOTAL_RX_BD_PER_PAGE - 2) /* 510 */
+#define RX_BD_PER_PAGE_MASK (TOTAL_RX_BD_PER_PAGE - 1) /* 511 */
+#define TOTAL_RX_BD (TOTAL_RX_BD_PER_PAGE * NUM_RX_PAGES) /* 1024 */
+#define USABLE_RX_BD (USABLE_RX_BD_PER_PAGE * NUM_RX_PAGES) /* 1020 */
+#define MAX_RX_BD (TOTAL_RX_BD - 1) /* 1023 */
#define NEXT_RX_BD(x) \
- ((((x) & RX_DESC_MASK) == \
+ ((((x) & RX_BD_PER_PAGE_MASK) == \
(USABLE_RX_BD_PER_PAGE - 1)) ? (x) + 3 : (x) + 1)
/* x & 0x3ff */
#define RX_BD(x) ((x) & MAX_RX_BD)
-#define RX_PAGE(x) (((x) & ~RX_DESC_MASK) >> 9)
-#define RX_IDX(x) ((x) & RX_DESC_MASK)
+#define RX_PAGE(x) (((x) & ~RX_BD_PER_PAGE_MASK) >> 9)
+#define RX_IDX(x) ((x) & RX_BD_PER_PAGE_MASK)
/*
* Receive Completion Queue definitions*
*/
-
-/* CQEs (32 bytes) are 4 times larger than rx_bd's (8 bytes). */
#define NUM_RCQ_PAGES (NUM_RX_PAGES * 4)
-
-/* 128 (0x80) */
-#define TOTAL_RCQ_ENTRIES_PER_PAGE (BCM_PAGE_SIZE / sizeof(union eth_rx_cqe))
-
-/* 127 (0x7f)for the next page RCQ bd */
-#define USABLE_RCQ_ENTRIES_PER_PAGE (TOTAL_RCQ_ENTRIES_PER_PAGE - 1)
-
-/* 1024 (0x400) */
-#define TOTAL_RCQ_ENTRIES (TOTAL_RCQ_ENTRIES_PER_PAGE * NUM_RCQ_PAGES)
-
-/* 1016 (0x3f8) */
-#define USABLE_RCQ_ENTRIES (USABLE_RCQ_ENTRIES_PER_PAGE * NUM_RCQ_PAGES)
-
-/* 1023 (0x3ff) */
-#define MAX_RCQ_ENTRIES (TOTAL_RCQ_ENTRIES - 1)
+#define TOTAL_RCQ_ENTRIES_PER_PAGE (BCM_PAGE_SIZE / sizeof(union eth_rx_cqe)) /* 128 */
+#define USABLE_RCQ_ENTRIES_PER_PAGE (TOTAL_RCQ_ENTRIES_PER_PAGE - 1) /* 127 */
+#define TOTAL_RCQ_ENTRIES (TOTAL_RCQ_ENTRIES_PER_PAGE * NUM_RCQ_PAGES) /* 1024 */
+#define USABLE_RCQ_ENTRIES (USABLE_RCQ_ENTRIES_PER_PAGE * NUM_RCQ_PAGES) /* 1016 */
+#define MAX_RCQ_ENTRIES (TOTAL_RCQ_ENTRIES - 1) /* 1023 */
#define NEXT_RCQ_IDX(x) \
((((x) & USABLE_RCQ_ENTRIES_PER_PAGE) == \
@@ -383,11 +363,11 @@ struct bxe_type {
} while (0)
#define SGE_MASK_SET_BIT(fp, idx) \
- __SGE_MASK_SET_BIT(fp->sge_mask[(idx) >> RX_SGE_MASK_ELEM_SHIFT], \
+ __SGE_MASK_SET_BIT(fp->rx_sge_mask[(idx) >> RX_SGE_MASK_ELEM_SHIFT], \
((idx) & RX_SGE_MASK_ELEM_MASK))
#define SGE_MASK_CLEAR_BIT(fp, idx) \
- __SGE_MASK_CLEAR_BIT(fp->sge_mask[(idx) >> RX_SGE_MASK_ELEM_SHIFT], \
+ __SGE_MASK_CLEAR_BIT(fp->rx_sge_mask[(idx) >> RX_SGE_MASK_ELEM_SHIFT], \
((idx) & RX_SGE_MASK_ELEM_MASK))
#define BXE_TX_TIMEOUT 5
@@ -418,14 +398,14 @@ struct bxe_type {
* IFCAP_TSO6, IFCAP_WOL_UCAST.
*/
#if __FreeBSD_version < 700000
-#define BXE_IF_CAPABILITIES \
+#define BXE_IF_CAPABILITIES \
(IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING | IFCAP_HWCSUM | \
IFCAP_JUMBO_MTU)
#else
- /* TSO was introduced in FreeBSD 7 */
-#define BXE_IF_CAPABILITIES \
+ /* TSO/LRO was introduced in FreeBSD 7 */
+#define BXE_IF_CAPABILITIES \
(IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING | IFCAP_HWCSUM | \
- IFCAP_JUMBO_MTU | IFCAP_TSO4 | IFCAP_VLAN_HWCSUM)
+ IFCAP_JUMBO_MTU | IFCAP_TSO4 | IFCAP_VLAN_HWCSUM | IFCAP_LRO)
#endif
/* Some typical Ethernet frame sizes */
@@ -449,6 +429,10 @@ struct bxe_type {
/* Resolution of the rate shaping timer - 100 usec */
#define RS_PERIODIC_TIMEOUT_USEC 100
+#define BXE_MBUF_ALLOC_RETRY_COUNT 10
+#define BXE_MBUF_MAPPING_RETRY_COUNT 10
+#define BXE_MBUF_RETRY_DELAY 100
+
/*
* Resolution of fairness algorithm, in usecs.
* Coefficient for calculating the actual t_fair.
@@ -546,7 +530,8 @@ enum bxe_stats_state {
STATS_STATE_MAX
};
-struct bxe_eth_stats {
+/* Statistics for an Ethernet port. */
+struct bxe_port_stats {
uint32_t total_bytes_received_hi;
uint32_t total_bytes_received_lo;
uint32_t total_bytes_transmitted_hi;
@@ -567,6 +552,12 @@ struct bxe_eth_stats {
uint32_t valid_bytes_received_lo;
uint32_t error_bytes_received_hi;
uint32_t error_bytes_received_lo;
+ uint32_t etherstatsoverrsizepkts_hi;
+ uint32_t etherstatsoverrsizepkts_lo;
+ uint32_t no_buff_discard_hi;
+ uint32_t no_buff_discard_lo;
+
+ /* Layout must match struct mac_stx. */
uint32_t rx_stat_ifhcinbadoctets_hi;
uint32_t rx_stat_ifhcinbadoctets_lo;
uint32_t tx_stat_ifhcoutbadoctets_hi;
@@ -643,39 +634,33 @@ struct bxe_eth_stats {
uint32_t tx_stat_dot3statsinternalmactransmiterrors_lo;
uint32_t tx_stat_bmac_ufl_hi;
uint32_t tx_stat_bmac_ufl_lo;
- uint32_t brb_drop_hi;
- uint32_t brb_drop_lo;
- uint32_t brb_truncate_hi;
- uint32_t brb_truncate_lo;
+ /* End of mac_stx. */
+
uint32_t pause_frames_received_hi;
uint32_t pause_frames_received_lo;
uint32_t pause_frames_sent_hi;
uint32_t pause_frames_sent_lo;
- uint32_t jabber_packets_received;
-
uint32_t etherstatspkts1024octetsto1522octets_hi;
uint32_t etherstatspkts1024octetsto1522octets_lo;
uint32_t etherstatspktsover1522octets_hi;
uint32_t etherstatspktsover1522octets_lo;
-
- uint32_t no_buff_discard_hi;
- uint32_t no_buff_discard_lo;
-
+ uint32_t brb_drop_hi;
+ uint32_t brb_drop_lo;
+ uint32_t brb_truncate_hi;
+ uint32_t brb_truncate_lo;
uint32_t mac_filter_discard;
uint32_t xxoverflow_discard;
uint32_t brb_truncate_discard;
uint32_t mac_discard;
-
uint32_t driver_xoff;
uint32_t rx_err_discard_pkt;
uint32_t rx_skb_alloc_failed;
uint32_t hw_csum_err;
-
uint32_t nig_timer_max;
};
#define STATS_OFFSET32(stat_name) \
- (offsetof(struct bxe_eth_stats, stat_name) / 4)
+ (offsetof(struct bxe_port_stats, stat_name) / 4)
#define MAX_CONTEXT 16
@@ -841,6 +826,18 @@ struct bxe_port {
#define PMF_DMAE_C(sc) \
(BP_PORT(sc) * MAX_DMAE_C_PER_PORT + E1HVN_MAX)
+
+/* Used to manage DMA allocations. */
+struct bxe_dma {
+ bus_addr_t paddr;
+ void *vaddr;
+ bus_dma_tag_t tag;
+ bus_dmamap_t map;
+ bus_dma_segment_t seg;
+ bus_size_t size;
+ int nseg;
+};
+
/*
* This is the slowpath data structure. It is mapped into non-paged memory
* so that the hardware can access it's contents directly and must be page
@@ -884,7 +881,7 @@ struct bxe_slowpath {
#define BXE_SP(sc, var) (&sc->slowpath->var)
#define BXE_SP_CHECK(sc, var) ((sc->slowpath) ? (&sc->slowpath->var) : NULL)
#define BXE_SP_MAPPING(sc, var) \
- (sc->slowpath_paddr + offsetof(struct bxe_slowpath, var))
+ (sc->slowpath_dma.paddr + offsetof(struct bxe_slowpath, var))
union db_prod {
struct doorbell_set_prod data;
@@ -933,51 +930,33 @@ struct bxe_fastpath {
struct mtx mtx;
char mtx_name[16];
- /* Hardware maintained status block. */
- bus_dma_tag_t status_block_tag;
- bus_dmamap_t status_block_map;
+ /* Status block. */
+ struct bxe_dma sb_dma;
struct host_status_block *status_block;
- bus_addr_t status_block_paddr;
-#ifdef notyet
- /*
- * In this implementation the doorbell data block
- * (eth_tx_db_data) is mapped into memory immediately
- * following the status block and is part of the same
- * memory allocation.
- */
- struct eth_tx_db_data *hw_tx_prods;
- bus_addr_t tx_prods_paddr;
-#endif
- /* Hardware maintained TX buffer descriptor chains. */
- bus_dma_tag_t tx_bd_chain_tag;
- bus_dmamap_t tx_bd_chain_map[NUM_TX_PAGES];
+ /* Transmit chain. */
+ struct bxe_dma tx_dma;
+ union eth_tx_bd_types *tx_chain;
+
+ /* Receive chain. */
+ struct bxe_dma rx_dma;
+ struct eth_rx_bd *rx_chain;
- union eth_tx_bd_types *tx_bd_chain[NUM_TX_PAGES];
- bus_addr_t tx_bd_chain_paddr[NUM_TX_PAGES];
+ /* Receive completion queue chain. */
+ struct bxe_dma rcq_dma;
+ union eth_rx_cqe *rcq_chain;
- /* Bus resource tag for TX mbufs. */
+ /* Bus resource tag, map, and mbufs for TX chain. */
bus_dma_tag_t tx_mbuf_tag;
bus_dmamap_t tx_mbuf_map[TOTAL_TX_BD];
struct mbuf *tx_mbuf_ptr[TOTAL_TX_BD];
- /* Hardware maintained RX buffer descriptor chains. */
- bus_dma_tag_t rx_bd_chain_tag;
- bus_dmamap_t rx_bd_chain_map[NUM_RX_PAGES];
- struct eth_rx_bd *rx_bd_chain[NUM_RX_PAGES];
- bus_addr_t rx_bd_chain_paddr[NUM_RX_PAGES];
-
- /* Bus resource tag for RX mbufs. */
+ /* Bus resource tag, map, and mbufs for RX chain. */
bus_dma_tag_t rx_mbuf_tag;
bus_dmamap_t rx_mbuf_map[TOTAL_RX_BD];
+ bus_dmamap_t rx_mbuf_spare_map;
struct mbuf *rx_mbuf_ptr[TOTAL_RX_BD];
- /* Hardware maintained Completion Queue (CQ) chains. */
- bus_dma_tag_t rx_cq_chain_tag;
- bus_dmamap_t rx_cq_chain_map[NUM_RCQ_PAGES];
- union eth_rx_cqe *rx_cq_chain[NUM_RCQ_PAGES];
- bus_addr_t rx_cq_chain_paddr[NUM_RCQ_PAGES];
-
/* Ticks until chip reset. */
int watchdog_timer;
@@ -1014,8 +993,6 @@ struct bxe_fastpath {
/* Transmit packet producer index (used in eth_tx_bd). */
uint16_t tx_pkt_prod;
-
- /* Transmit packet consumer index. */
uint16_t tx_pkt_cons;
/* Transmit buffer descriptor prod/cons indices. */
@@ -1044,25 +1021,27 @@ struct bxe_fastpath {
uint16_t *rx_bd_cons_sb;
/* Pointer to the transmit consumer in the status block. */
- uint16_t *tx_cons_sb;
+ uint16_t *tx_pkt_cons_sb;
- /* Free/used buffer descriptor counters. */
- uint16_t used_tx_bd;
+ /* Used TX buffer descriptor counters. */
+ uint16_t tx_bd_used;
/* Begin: TPA Related data structure. */
- /* Hardware maintained RX Scatter Gather Entry chains. */
- bus_dma_tag_t rx_sge_chain_tag;
- bus_dmamap_t rx_sge_chain_map[NUM_RX_SGE_PAGES];
- struct eth_rx_sge *rx_sge_chain[NUM_RX_SGE_PAGES];
- bus_addr_t rx_sge_chain_paddr[NUM_RX_SGE_PAGES];
+ struct bxe_dma sg_dma;
+ struct eth_rx_sge *sg_chain;
/* Bus tag for RX SGE bufs. */
bus_dma_tag_t rx_sge_buf_tag;
bus_dmamap_t rx_sge_buf_map[TOTAL_RX_SGE];
+ bus_dmamap_t rx_sge_spare_map;
struct mbuf *rx_sge_buf_ptr[TOTAL_RX_SGE];
- uint64_t sge_mask[RX_SGE_MASK_LEN];
+ /*
+ * Bitmask for each SGE element indicating which
+ * aggregation that element is a part of.
+ */
+ uint64_t rx_sge_mask[RX_SGE_MASK_LEN];
uint16_t rx_sge_prod;
/* The last maximal completed SGE. */
@@ -1072,6 +1051,7 @@ struct bxe_fastpath {
/* Use the larger supported size for TPA queue length. */
bus_dmamap_t tpa_mbuf_map[ETH_MAX_AGGREGATION_QUEUES_E1H];
+ bus_dmamap_t tpa_mbuf_spare_map;
struct mbuf *tpa_mbuf_ptr[ETH_MAX_AGGREGATION_QUEUES_E1H];
bus_dma_segment_t tpa_mbuf_segs[ETH_MAX_AGGREGATION_QUEUES_E1H];
@@ -1088,21 +1068,46 @@ struct bxe_fastpath {
struct xstorm_per_client_stats old_xclient;
struct bxe_q_stats eth_q_stats;
- uint16_t free_rx_bd;
-
#if __FreeBSD_version >= 800000
struct buf_ring *br;
#endif
- /* Recieve/transmit packet counters. */
+ /* Receive path driver statistics. */
unsigned long rx_pkts;
+ unsigned long rx_tpa_pkts;
+ unsigned long rx_null_cqe_flags;
+ unsigned long rx_soft_errors;
+
+ /* Transmit path driver statistics. */
unsigned long tx_pkts;
- unsigned long tpa_pkts;
- unsigned long rx_calls;
- unsigned long mbuf_alloc_failed;
+ unsigned long tx_soft_errors;
+ unsigned long tx_offload_frames_csum_ip;
+ unsigned long tx_offload_frames_csum_tcp;
+ unsigned long tx_offload_frames_csum_udp;
+ unsigned long tx_offload_frames_tso;
+ unsigned long tx_header_splits;
+ unsigned long tx_encap_failures;
+ unsigned long tx_hw_queue_full;
+ unsigned long tx_hw_max_queue_depth;
+ unsigned long tx_dma_mapping_failure;
+ int tx_max_drbr_queue_depth;
+ unsigned long tx_window_violation_std;
+ unsigned long tx_window_violation_tso;
+ unsigned long tx_unsupported_tso_request_ipv6;
+ unsigned long tx_unsupported_tso_request_not_tcp;
+ unsigned long tx_chain_lost_mbuf;
+ unsigned long tx_frame_deferred;
+ unsigned long tx_queue_xoff;
+
+ /* Memory path driver statistics. */
unsigned long mbuf_defrag_attempts;
unsigned long mbuf_defrag_failures;
- unsigned long mbuf_defrag_successes;
+ unsigned long mbuf_rx_bd_alloc_failed;
+ unsigned long mbuf_rx_bd_mapping_failed;
+ unsigned long mbuf_tpa_alloc_failed;
+ unsigned long mbuf_tpa_mapping_failed;
+ unsigned long mbuf_sge_alloc_failed;
+ unsigned long mbuf_sge_mapping_failed;
/* Track the number of enqueued mbufs. */
int tx_mbuf_alloc;
@@ -1110,29 +1115,9 @@ struct bxe_fastpath {
int sge_mbuf_alloc;
int tpa_mbuf_alloc;
- int max_drbr_queue_depth;
-
uint64_t tpa_queue_used;
- unsigned long null_cqe_flags;
- unsigned long offload_frames_csum_ip;
- unsigned long offload_frames_csum_tcp;
- unsigned long offload_frames_csum_udp;
- unsigned long offload_frames_tso;
- unsigned long tx_encap_failures;
- unsigned long tx_start_called_on_empty_queue;
- unsigned long tx_queue_too_full;
- unsigned long tx_dma_mapping_failure;
- unsigned long window_violation_tso;
- unsigned long window_violation_std;
- unsigned long unsupported_tso_request_ipv6;
- unsigned long unsupported_tso_request_not_tcp;
- unsigned long tpa_mbuf_alloc_failed;
- unsigned long tx_chain_lost_mbuf;
-
/* FreeBSD interface statistics. */
- unsigned long soft_rx_errors;
- unsigned long soft_tx_errors;
unsigned long ipackets;
unsigned long opackets;
@@ -1144,7 +1129,7 @@ struct bxe_fastpath {
#define BXE_STATUS_BLK_SZ \
sizeof(struct host_status_block) /* +sizeof(struct eth_tx_db_data) */
#define BXE_DEF_STATUS_BLK_SZ sizeof(struct host_def_status_block)
-#define BXE_STATS_BLK_SZ sizeof(struct bxe_eth_stats)
+#define BXE_STATS_BLK_SZ sizeof(struct bxe_port_stats)
#define BXE_SLOWPATH_SZ sizeof(struct bxe_slowpath)
#define BXE_SPQ_SZ BCM_PAGE_SIZE
#define BXE_TX_CHAIN_PAGE_SZ BCM_PAGE_SIZE
@@ -1165,14 +1150,13 @@ struct bxe_softc {
/* Bus tag for the bxe controller. */
bus_dma_tag_t parent_tag;
+
/* OS resources for BAR0 memory. */
struct resource *bxe_res;
bus_space_tag_t bxe_btag;
bus_space_handle_t bxe_bhandle;
vm_offset_t bxe_vhandle;
- /* OS resources for BAR2 memory. */
-
/* OS resources for BAR1 doorbell memory. */
#define BXE_DB_SIZE (16 * 2048)
struct resource *bxe_db_res;
@@ -1216,7 +1200,6 @@ struct bxe_softc {
struct taskqueue *tq;
/* RX Driver parameters*/
uint32_t rx_csum;
- int rx_buf_size;
/* ToDo: Replace with OS specific defintions. */
#define ETH_HLEN 14
@@ -1225,11 +1208,8 @@ struct bxe_softc {
#define ETH_MAX_PACKET_SIZE 1500
#define ETH_MAX_JUMBO_PACKET_SIZE 9600
- /* Hardware Maintained Host Default Status Block. */
- bus_dma_tag_t def_status_block_tag;
- bus_dmamap_t def_status_block_map;
- struct host_def_status_block *def_status_block;
- bus_addr_t def_status_block_paddr;
+ struct bxe_dma def_sb_dma;
+ struct host_def_status_block *def_sb;
#define DEF_SB_ID 16
uint16_t def_c_idx;
@@ -1241,23 +1221,15 @@ struct bxe_softc {
uint32_t attn_state;
struct attn_route attn_group[MAX_DYNAMIC_ATTN_GRPS];
- /* H/W maintained statistics block. */
- bus_dma_tag_t stats_tag;
- bus_dmamap_t stats_map;
- struct statistics_block *stats_block;
- bus_addr_t stats_block_paddr;
+ struct bxe_dma stats_dma;
+ struct statistics_block *stats;
- /* H/W maintained slow path. */
- bus_dma_tag_t slowpath_tag;
- bus_dmamap_t slowpath_map;
+ struct bxe_dma slowpath_dma;
struct bxe_slowpath *slowpath;
- bus_addr_t slowpath_paddr;
- /* Slow path ring. */
- bus_dma_tag_t spq_tag;
- bus_dmamap_t spq_map;
+ struct bxe_dma spq_dma;
struct eth_spe *spq;
- bus_addr_t spq_paddr;
+
uint16_t spq_prod_idx;
struct eth_spe *spq_prod_bd;
struct eth_spe *spq_last_bd;
@@ -1273,17 +1245,15 @@ struct bxe_softc {
/* Device flags. */
uint32_t bxe_flags;
-#define BXE_ONE_PORT_FLAG 0x00000004
-#define BXE_NO_WOL_FLAG 0x00000008
-#define BXE_USING_DAC_FLAG 0x00000010
-#define BXE_USING_MSIX_FLAG 0x00000020
-#define BXE_USING_MSI_FLAG 0x00000040
-#define BXE_TPA_ENABLE_FLAG 0x00000080
-#define BXE_NO_MCP_FLAG 0x00000100
-#define BP_NOMCP(sc) (sc->bxe_flags & BXE_NO_MCP_FLAG)
-#define BXE_SAFC_TX_FLAG 0x00000200
+#define BXE_ONE_PORT_FLAG 0x00000001
+#define BXE_NO_WOL_FLAG 0x00000002
+#define BXE_USING_DAC_FLAG 0x00000004
+#define BXE_TPA_ENABLE_FLAG 0x00000008
+#define BXE_NO_MCP_FLAG 0x00000010
#define TPA_ENABLED(sc) (sc->bxe_flags & BXE_TPA_ENABLE_FLAG)
+#define NOMCP(sc) (sc->bxe_flags & BXE_NO_MCP_FLAG)
+
/* PCI Express function number for the device. */
int bxe_func;
@@ -1386,8 +1356,6 @@ struct bxe_softc {
int mrrs;
int dcc_enable;
-#define BXE_NUM_QUEUES(cos) \
- ((bxe_qs_per_cos & (0xff << (cos * 8))) >> (cos * 8))
#define BXE_MAX_QUEUES(sc) \
(IS_E1HMF(sc) ? (MAX_CONTEXT / E1HVN_MAX) : MAX_CONTEXT)
@@ -1396,18 +1364,6 @@ struct bxe_softc {
#define BXE_MAX_PRIORITY 8
#define BXE_MAX_ENTRIES_PER_PRI 16
- /* Number of queues per class of service. */
- uint8_t qs_per_cos[BXE_MAX_COS];
-
- /* Priority to class of service mapping. */
- uint8_t pri_map[BXE_MAX_PRIORITY];
-
- /* min rate per cos */
- uint16_t cos_min_rate[BXE_MAX_COS];
-
- /* Class of service to queue mapping. */
- uint8_t cos_map[BXE_MAX_COS];
-
/* Used for multiple function devices. */
uint32_t mf_config[E1HVN_MAX];
@@ -1449,15 +1405,13 @@ struct bxe_softc {
/* Statistics. */
uint16_t stats_counter;
- struct bxe_eth_stats eth_stats;
+ struct bxe_port_stats eth_stats;
+ /* Support for DMAE and compressed firmware. */
z_streamp strm;
- bus_dma_tag_t gunzip_tag;
- bus_dmamap_t gunzip_map;
- void *gunzip_buf;
- bus_addr_t gunzip_mapping;
- int gunzip_outlen;
-#define FW_BUF_SIZE 0x40000
+ struct bxe_dma gz_dma;
+ void *gz;
+#define BXE_FW_BUF_SIZE 0x40000
struct raw_op *init_ops;
/* Init blocks offsets inside init_ops */
@@ -1500,10 +1454,9 @@ struct bxe_softc {
uint8_t intr_sem;
#ifdef BXE_DEBUG
- unsigned long debug_mbuf_sim_alloc_failed;
- unsigned long debug_mbuf_sim_map_failed;
+ unsigned long debug_sim_mbuf_alloc_failed;
+ unsigned long debug_sim_mbuf_map_failed;
unsigned long debug_received_frame_error;
- unsigned long debug_memory_allocated;
/* A buffer for hardware/firmware state information (grcdump). */
uint32_t *grcdump_buffer;
@@ -1763,7 +1716,7 @@ struct bxe_softc {
(&fp->status_block->c_status_block.index_values[C_SB_ETH_TX_CQ_INDEX])
#define BXE_SP_DSB_INDEX \
- &sc->def_status_block->c_def_status_block.index_values[C_DEF_SB_SP_INDEX]
+ &sc->def_sb->c_def_status_block.index_values[C_DEF_SB_SP_INDEX]
#define BXE_RX_SB_INDEX_NUM \
(((U_SB_ETH_RX_CQ_INDEX << \
diff --git a/sys/dev/cardbus/cardbus_cis.c b/sys/dev/cardbus/cardbus_cis.c
index 2cfea19..3352a56 100644
--- a/sys/dev/cardbus/cardbus_cis.c
+++ b/sys/dev/cardbus/cardbus_cis.c
@@ -324,7 +324,7 @@ decode_tuple_bar(device_t cbdev, device_t child, int id,
* hint when the cardbus bridge is a child of pci0 (the main
* bus). The PC Card spec seems to indicate that this should
* only be done on x86 based machines, which suggests that on
- * non-x86 machines the adddresses can be anywhere. Since the
+ * non-x86 machines the addresses can be anywhere. Since the
* hardware can do it on non-x86 machines, it should be able
* to do it on x86 machines too. Therefore, we can and should
* ignore this hint. Furthermore, the PC Card spec recommends
@@ -430,7 +430,6 @@ cardbus_read_tuple_finish(device_t cbdev, device_t child, int rid,
{
if (res != CIS_CONFIG_SPACE) {
bus_release_resource(child, SYS_RES_MEMORY, rid, res);
- bus_delete_resource(child, SYS_RES_MEMORY, rid);
}
}
@@ -467,7 +466,7 @@ cardbus_read_tuple_init(device_t cbdev, device_t child, uint32_t *start,
}
/* allocate the memory space to read CIS */
- res = bus_alloc_resource(child, SYS_RES_MEMORY, rid, 0, ~0, 1,
+ res = bus_alloc_resource_any(child, SYS_RES_MEMORY, rid,
rman_make_alignment_flags(4096) | RF_ACTIVE);
if (res == NULL) {
device_printf(cbdev, "Unable to allocate resource "
diff --git a/sys/dev/cxgbe/adapter.h b/sys/dev/cxgbe/adapter.h
index 6b48325..8624fc1 100644
--- a/sys/dev/cxgbe/adapter.h
+++ b/sys/dev/cxgbe/adapter.h
@@ -110,6 +110,9 @@ enum {
FW_IQ_QSIZE = 256,
FW_IQ_ESIZE = 64, /* At least 64 mandated by the firmware spec */
+ INTR_IQ_QSIZE = 64,
+ INTR_IQ_ESIZE = 64, /* Handles some CPLs too, do not reduce */
+
CTRL_EQ_QSIZE = 128,
CTRL_EQ_ESIZE = 64,
@@ -141,7 +144,7 @@ enum {
/* adapter flags */
FULL_INIT_DONE = (1 << 0),
FW_OK = (1 << 1),
- INTR_FWD = (1 << 2),
+ INTR_SHARED = (1 << 2), /* one set of intrq's for all ports */
CXGBE_BUSY = (1 << 9),
@@ -294,7 +297,7 @@ struct sge_eq {
uint16_t pidx; /* producer idx (desc idx) */
uint16_t pending; /* # of descriptors used since last doorbell */
uint16_t iqid; /* iq that gets egr_update for the eq */
- uint32_t cntxt_id; /* SGE context id for the eq */
+ unsigned int cntxt_id; /* SGE context id for the eq */
};
struct sge_fl {
@@ -384,17 +387,16 @@ struct sge_ctrlq {
/* stats for common events first */
- uint64_t total_wrs; /* # of work requests sent down this queue */
/* stats for not-that-common events */
uint32_t no_desc; /* out of hardware descriptors */
- uint32_t too_long; /* WR longer than hardware max */
} __aligned(CACHE_LINE_SIZE);
struct sge {
uint16_t timer_val[SGE_NTIMERS];
uint8_t counter_val[SGE_NCOUNTERS];
+ int fl_starve_threshold;
int nrxq; /* total rx queues (all ports and the rest) */
int ntxq; /* total tx queues (all ports and the rest) */
@@ -403,7 +405,7 @@ struct sge {
struct sge_iq fwq; /* Firmware event queue */
struct sge_ctrlq *ctrlq;/* Control queues */
- struct sge_iq *fiq; /* Forwarded interrupt queues (INTR_FWD) */
+ struct sge_iq *intrq; /* Interrupt queues */
struct sge_txq *txq; /* NIC tx queues */
struct sge_rxq *rxq; /* NIC rx queues */
@@ -445,6 +447,7 @@ struct adapter {
struct port_info *port[MAX_NPORTS];
uint8_t chan_map[NCHAN];
+ struct l2t_data *l2t; /* L2 table */
struct tid_info tids;
int registered_device_map;
@@ -456,7 +459,9 @@ struct adapter {
struct t4_virt_res vres;
struct sysctl_ctx_list ctx; /* from first_port_up to last_port_down */
+ struct sysctl_oid *oid_fwq;
struct sysctl_oid *oid_ctrlq;
+ struct sysctl_oid *oid_intrq;
struct mtx sc_lock;
char lockname[16];
@@ -502,7 +507,10 @@ struct adapter {
rxq = &pi->adapter->sge.rxq[pi->first_rxq]; \
for (iter = 0; iter < pi->nrxq; ++iter, ++rxq)
-#define NFIQ(sc) ((sc)->intr_count > 1 ? (sc)->intr_count - 1 : 1)
+/* One for errors, one for firmware events */
+#define T4_EXTRA_INTR 2
+#define NINTRQ(sc) ((sc)->intr_count > T4_EXTRA_INTR ? \
+ (sc)->intr_count - T4_EXTRA_INTR : 1)
static inline uint32_t
t4_read_reg(struct adapter *sc, uint32_t reg)
@@ -599,12 +607,9 @@ int t4_teardown_adapter_queues(struct adapter *);
int t4_setup_eth_queues(struct port_info *);
int t4_teardown_eth_queues(struct port_info *);
void t4_intr_all(void *);
-void t4_intr_fwd(void *);
+void t4_intr(void *);
void t4_intr_err(void *);
void t4_intr_evt(void *);
-void t4_intr_data(void *);
-void t4_evt_rx(void *);
-void t4_eth_rx(void *);
int t4_mgmt_tx(struct adapter *, struct mbuf *);
int t4_eth_tx(struct ifnet *, struct sge_txq *, struct mbuf *);
void t4_update_fl_bufsize(struct ifnet *);
diff --git a/sys/dev/cxgbe/common/common.h b/sys/dev/cxgbe/common/common.h
index fa5ac9f..913be9b 100644
--- a/sys/dev/cxgbe/common/common.h
+++ b/sys/dev/cxgbe/common/common.h
@@ -54,7 +54,7 @@ enum {
#define FW_VERSION_MAJOR 1
#define FW_VERSION_MINOR 3
-#define FW_VERSION_MICRO 8
+#define FW_VERSION_MICRO 10
struct port_stats {
u64 tx_octets; /* total # of octets in good frames */
diff --git a/sys/dev/cxgbe/common/jhash.h b/sys/dev/cxgbe/common/jhash.h
new file mode 100644
index 0000000..4546b7b
--- /dev/null
+++ b/sys/dev/cxgbe/common/jhash.h
@@ -0,0 +1,140 @@
+#ifndef _JHASH_H
+#define _JHASH_H
+
+/* jhash.h: Jenkins hash support.
+ *
+ * Copyright (C) 1996 Bob Jenkins (bob_jenkins@burtleburtle.net)
+ *
+ * http://burtleburtle.net/bob/hash/
+ *
+ * These are the credits from Bob's sources:
+ *
+ * lookup2.c, by Bob Jenkins, December 1996, Public Domain.
+ * hash(), hash2(), hash3, and mix() are externally useful functions.
+ * Routines to test the hash are included if SELF_TEST is defined.
+ * You can use this free for any purpose. It has no warranty.
+ *
+ * $FreeBSD$
+ */
+
+/* NOTE: Arguments are modified. */
+#define __jhash_mix(a, b, c) \
+{ \
+ a -= b; a -= c; a ^= (c>>13); \
+ b -= c; b -= a; b ^= (a<<8); \
+ c -= a; c -= b; c ^= (b>>13); \
+ a -= b; a -= c; a ^= (c>>12); \
+ b -= c; b -= a; b ^= (a<<16); \
+ c -= a; c -= b; c ^= (b>>5); \
+ a -= b; a -= c; a ^= (c>>3); \
+ b -= c; b -= a; b ^= (a<<10); \
+ c -= a; c -= b; c ^= (b>>15); \
+}
+
+/* The golden ration: an arbitrary value */
+#define JHASH_GOLDEN_RATIO 0x9e3779b9
+
+/* The most generic version, hashes an arbitrary sequence
+ * of bytes. No alignment or length assumptions are made about
+ * the input key.
+ */
+static inline u32 jhash(const void *key, u32 length, u32 initval)
+{
+ u32 a, b, c, len;
+ const u8 *k = key;
+
+ len = length;
+ a = b = JHASH_GOLDEN_RATIO;
+ c = initval;
+
+ while (len >= 12) {
+ a += (k[0] +((u32)k[1]<<8) +((u32)k[2]<<16) +((u32)k[3]<<24));
+ b += (k[4] +((u32)k[5]<<8) +((u32)k[6]<<16) +((u32)k[7]<<24));
+ c += (k[8] +((u32)k[9]<<8) +((u32)k[10]<<16)+((u32)k[11]<<24));
+
+ __jhash_mix(a,b,c);
+
+ k += 12;
+ len -= 12;
+ }
+
+ c += length;
+ switch (len) {
+ case 11: c += ((u32)k[10]<<24);
+ case 10: c += ((u32)k[9]<<16);
+ case 9 : c += ((u32)k[8]<<8);
+ case 8 : b += ((u32)k[7]<<24);
+ case 7 : b += ((u32)k[6]<<16);
+ case 6 : b += ((u32)k[5]<<8);
+ case 5 : b += k[4];
+ case 4 : a += ((u32)k[3]<<24);
+ case 3 : a += ((u32)k[2]<<16);
+ case 2 : a += ((u32)k[1]<<8);
+ case 1 : a += k[0];
+ };
+
+ __jhash_mix(a,b,c);
+
+ return c;
+}
+
+/* A special optimized version that handles 1 or more of u32s.
+ * The length parameter here is the number of u32s in the key.
+ */
+static inline u32 jhash2(u32 *k, u32 length, u32 initval)
+{
+ u32 a, b, c, len;
+
+ a = b = JHASH_GOLDEN_RATIO;
+ c = initval;
+ len = length;
+
+ while (len >= 3) {
+ a += k[0];
+ b += k[1];
+ c += k[2];
+ __jhash_mix(a, b, c);
+ k += 3; len -= 3;
+ }
+
+ c += length * 4;
+
+ switch (len) {
+ case 2 : b += k[1];
+ case 1 : a += k[0];
+ };
+
+ __jhash_mix(a,b,c);
+
+ return c;
+}
+
+
+/* A special ultra-optimized versions that knows they are hashing exactly
+ * 3, 2 or 1 word(s).
+ *
+ * NOTE: In partilar the "c += length; __jhash_mix(a,b,c);" normally
+ * done at the end is not done here.
+ */
+static inline u32 jhash_3words(u32 a, u32 b, u32 c, u32 initval)
+{
+ a += JHASH_GOLDEN_RATIO;
+ b += JHASH_GOLDEN_RATIO;
+ c += initval;
+
+ __jhash_mix(a, b, c);
+
+ return c;
+}
+
+static inline u32 jhash_2words(u32 a, u32 b, u32 initval)
+{
+ return jhash_3words(a, b, 0, initval);
+}
+
+static inline u32 jhash_1word(u32 a, u32 initval)
+{
+ return jhash_3words(a, 0, 0, initval);
+}
+
+#endif /* _JHASH_H */
diff --git a/sys/dev/cxgbe/common/t4fw_interface.h b/sys/dev/cxgbe/common/t4fw_interface.h
index 88126be..3eb9615 100644
--- a/sys/dev/cxgbe/common/t4fw_interface.h
+++ b/sys/dev/cxgbe/common/t4fw_interface.h
@@ -43,6 +43,7 @@ enum fw_retval {
FW_ENOMEM = 12, /* out of memory */
FW_EFAULT = 14, /* bad address; fw bad */
FW_EBUSY = 16, /* resource busy */
+ FW_EEXIST = 17, /* File exists */
FW_EINVAL = 22, /* invalid argument */
FW_ENOSYS = 38, /* functionality not implemented */
FW_EPROTO = 71, /* protocol error */
@@ -59,6 +60,8 @@ enum fw_retval {
FW_FCOE_NO_XCHG = 136, /* */
FW_SCSI_RSP_ERR = 137, /* */
FW_ERR_RDEV_IMPL_LOGO = 138, /* */
+ FW_SCSI_UNDER_FLOW_ERR = 139, /* */
+ FW_SCSI_OVER_FLOW_ERR = 140, /* */
};
/******************************************************************************
@@ -85,7 +88,8 @@ enum fw_wr_opcodes {
FW_RI_FR_NSMR_WR = 0x19,
FW_RI_INV_LSTAG_WR = 0x1a,
FW_RI_WR = 0x0d,
- FW_LASTC2E_WR = 0x4a
+ FW_ISCSI_NODE_WR = 0x4a,
+ FW_LASTC2E_WR = 0x4b
};
/*
@@ -514,7 +518,7 @@ struct fw_eth_tx_pkts_wr {
__be32 r3;
__be16 plen;
__u8 npkt;
- __u8 r4;
+ __u8 type;
};
struct fw_eq_flush_wr {
@@ -1465,6 +1469,65 @@ struct fw_ri_wr {
#define G_FW_RI_WR_P2PTYPE(x) \
(((x) >> S_FW_RI_WR_P2PTYPE) & M_FW_RI_WR_P2PTYPE)
+#ifdef FOISCSI
+struct fw_iscsi_node_wr {
+ __u8 opcode;
+ __u8 subop;
+ __u8 node_attr_to_compl;
+ __u8 len16;
+ __u8 status;
+ __u8 r2;
+ __be16 immd_len;
+ __be64 cookie;
+ __be32 node_id;
+ __be32 ctrl_handle;
+ __be32 io_handle;
+ __be32 r3;
+};
+
+#define S_FW_ISCSI_NODE_WR_NODE_ATTR 7
+#define M_FW_ISCSI_NODE_WR_NODE_ATTR 0x1
+#define V_FW_ISCSI_NODE_WR_NODE_ATTR(x) ((x) << S_FW_ISCSI_NODE_WR_NODE_ATTR)
+#define G_FW_ISCSI_NODE_WR_NODE_ATTR(x) \
+ (((x) >> S_FW_ISCSI_NODE_WR_NODE_ATTR) & M_FW_ISCSI_NODE_WR_NODE_ATTR)
+#define F_FW_ISCSI_NODE_WR_NODE_ATTR V_FW_ISCSI_NODE_WR_NODE_ATTR(1U)
+
+#define S_FW_ISCSI_NODE_WR_SESS_ATTR 6
+#define M_FW_ISCSI_NODE_WR_SESS_ATTR 0x1
+#define V_FW_ISCSI_NODE_WR_SESS_ATTR(x) ((x) << S_FW_ISCSI_NODE_WR_SESS_ATTR)
+#define G_FW_ISCSI_NODE_WR_SESS_ATTR(x) \
+ (((x) >> S_FW_ISCSI_NODE_WR_SESS_ATTR) & M_FW_ISCSI_NODE_WR_SESS_ATTR)
+#define F_FW_ISCSI_NODE_WR_SESS_ATTR V_FW_ISCSI_NODE_WR_SESS_ATTR(1U)
+
+#define S_FW_ISCSI_NODE_WR_CONN_ATTR 5
+#define M_FW_ISCSI_NODE_WR_CONN_ATTR 0x1
+#define V_FW_ISCSI_NODE_WR_CONN_ATTR(x) ((x) << S_FW_ISCSI_NODE_WR_CONN_ATTR)
+#define G_FW_ISCSI_NODE_WR_CONN_ATTR(x) \
+ (((x) >> S_FW_ISCSI_NODE_WR_CONN_ATTR) & M_FW_ISCSI_NODE_WR_CONN_ATTR)
+#define F_FW_ISCSI_NODE_WR_CONN_ATTR V_FW_ISCSI_NODE_WR_CONN_ATTR(1U)
+
+#define S_FW_ISCSI_NODE_WR_TGT_ATTR 4
+#define M_FW_ISCSI_NODE_WR_TGT_ATTR 0x1
+#define V_FW_ISCSI_NODE_WR_TGT_ATTR(x) ((x) << S_FW_ISCSI_NODE_WR_TGT_ATTR)
+#define G_FW_ISCSI_NODE_WR_TGT_ATTR(x) \
+ (((x) >> S_FW_ISCSI_NODE_WR_TGT_ATTR) & M_FW_ISCSI_NODE_WR_TGT_ATTR)
+#define F_FW_ISCSI_NODE_WR_TGT_ATTR V_FW_ISCSI_NODE_WR_TGT_ATTR(1U)
+
+#define S_FW_ISCSI_NODE_WR_NODE_TYPE 3
+#define M_FW_ISCSI_NODE_WR_NODE_TYPE 0x1
+#define V_FW_ISCSI_NODE_WR_NODE_TYPE(x) ((x) << S_FW_ISCSI_NODE_WR_NODE_TYPE)
+#define G_FW_ISCSI_NODE_WR_NODE_TYPE(x) \
+ (((x) >> S_FW_ISCSI_NODE_WR_NODE_TYPE) & M_FW_ISCSI_NODE_WR_NODE_TYPE)
+#define F_FW_ISCSI_NODE_WR_NODE_TYPE V_FW_ISCSI_NODE_WR_NODE_TYPE(1U)
+
+#define S_FW_ISCSI_NODE_WR_COMPL 0
+#define M_FW_ISCSI_NODE_WR_COMPL 0x1
+#define V_FW_ISCSI_NODE_WR_COMPL(x) ((x) << S_FW_ISCSI_NODE_WR_COMPL)
+#define G_FW_ISCSI_NODE_WR_COMPL(x) \
+ (((x) >> S_FW_ISCSI_NODE_WR_COMPL) & M_FW_ISCSI_NODE_WR_COMPL)
+#define F_FW_ISCSI_NODE_WR_COMPL V_FW_ISCSI_NODE_WR_COMPL(1U)
+
+#endif
/******************************************************************************
* C O M M A N D s
@@ -1511,6 +1574,7 @@ enum fw_cmd_opcodes {
FW_RSS_VI_CONFIG_CMD = 0x23,
FW_SCHED_CMD = 0x24,
FW_DEVLOG_CMD = 0x25,
+ FW_NETIF_CMD = 0x26,
FW_LASTC2E_CMD = 0x40,
FW_ERROR_CMD = 0x80,
FW_DEBUG_CMD = 0x81,
@@ -1941,6 +2005,8 @@ enum fw_caps_config_iscsi {
FW_CAPS_CONFIG_ISCSI_TARGET_PDU = 0x00000002,
FW_CAPS_CONFIG_ISCSI_INITIATOR_CNXOFLD = 0x00000004,
FW_CAPS_CONFIG_ISCSI_TARGET_CNXOFLD = 0x00000008,
+ FW_CAPS_CONFIG_ISCSI_INITIATOR_SSNOFLD = 0x00000010,
+ FW_CAPS_CONFIG_ISCSI_TARGET_SSNOFLD = 0x00000020,
};
enum fw_caps_config_fcoe {
@@ -3941,6 +4007,39 @@ enum fw_port_cap {
FW_PORT_CAP_TECHKX4 = 0x2000,
};
+#define S_FW_PORT_AUXLINFO_MDI 3
+#define M_FW_PORT_AUXLINFO_MDI 0x3
+#define V_FW_PORT_AUXLINFO_MDI(x) ((x) << S_FW_PORT_AUXLINFO_MDI)
+#define G_FW_PORT_AUXLINFO_MDI(x) \
+ (((x) >> S_FW_PORT_AUXLINFO_MDI) & M_FW_PORT_AUXLINFO_MDI)
+
+#define S_FW_PORT_AUXLINFO_KX4 2
+#define M_FW_PORT_AUXLINFO_KX4 0x1
+#define V_FW_PORT_AUXLINFO_KX4(x) ((x) << S_FW_PORT_AUXLINFO_KX4)
+#define G_FW_PORT_AUXLINFO_KX4(x) \
+ (((x) >> S_FW_PORT_AUXLINFO_KX4) & M_FW_PORT_AUXLINFO_KX4)
+#define F_FW_PORT_AUXLINFO_KX4 V_FW_PORT_AUXLINFO_KX4(1U)
+
+#define S_FW_PORT_AUXLINFO_KR 1
+#define M_FW_PORT_AUXLINFO_KR 0x1
+#define V_FW_PORT_AUXLINFO_KR(x) ((x) << S_FW_PORT_AUXLINFO_KR)
+#define G_FW_PORT_AUXLINFO_KR(x) \
+ (((x) >> S_FW_PORT_AUXLINFO_KR) & M_FW_PORT_AUXLINFO_KR)
+#define F_FW_PORT_AUXLINFO_KR V_FW_PORT_AUXLINFO_KR(1U)
+
+#define S_FW_PORT_AUXLINFO_FEC 0
+#define M_FW_PORT_AUXLINFO_FEC 0x1
+#define V_FW_PORT_AUXLINFO_FEC(x) ((x) << S_FW_PORT_AUXLINFO_FEC)
+#define G_FW_PORT_AUXLINFO_FEC(x) \
+ (((x) >> S_FW_PORT_AUXLINFO_FEC) & M_FW_PORT_AUXLINFO_FEC)
+#define F_FW_PORT_AUXLINFO_FEC V_FW_PORT_AUXLINFO_FEC(1U)
+
+#define S_FW_PORT_RCAP_AUX 11
+#define M_FW_PORT_RCAP_AUX 0x7
+#define V_FW_PORT_RCAP_AUX(x) ((x) << S_FW_PORT_RCAP_AUX)
+#define G_FW_PORT_RCAP_AUX(x) \
+ (((x) >> S_FW_PORT_RCAP_AUX) & M_FW_PORT_RCAP_AUX)
+
#define S_FW_PORT_CAP_SPEED 0
#define M_FW_PORT_CAP_SPEED 0x3f
#define V_FW_PORT_CAP_SPEED(x) ((x) << S_FW_PORT_CAP_SPEED)
@@ -4002,11 +4101,23 @@ enum fw_port_l2cfg_ctlbf {
FW_PORT_L2_CTLBF_MTU = 0x40
};
+enum fw_port_dcb_cfg {
+ FW_PORT_DCB_CFG_PG = 0x01,
+ FW_PORT_DCB_CFG_PFC = 0x02,
+ FW_PORT_DCB_CFG_APPL = 0x04
+};
+
+enum fw_port_dcb_cfg_rc {
+ FW_PORT_DCB_CFG_SUCCESS = 0x0,
+ FW_PORT_DCB_CFG_ERROR = 0x1
+};
+
enum fw_port_dcb_type {
FW_PORT_DCB_TYPE_PGID = 0x00,
FW_PORT_DCB_TYPE_PGRATE = 0x01,
FW_PORT_DCB_TYPE_PRIORATE = 0x02,
- FW_PORT_DCB_TYPE_PFC = 0x03
+ FW_PORT_DCB_TYPE_PFC = 0x03,
+ FW_PORT_DCB_TYPE_APP_ID = 0x04,
};
struct fw_port_cmd {
@@ -4038,7 +4149,7 @@ struct fw_port_cmd {
__be16 acap;
__be16 mtu;
__u8 cbllen;
- __u8 r7;
+ __u8 auxlinfo;
__be32 r8;
__be64 r9;
} info;
@@ -4068,6 +4179,14 @@ struct fw_port_cmd {
__be16 r10[3];
__be64 r11;
} pfc;
+ struct fw_port_app_priority {
+ __u8 type;
+ __u8 r10_lo[3];
+ __u8 prio;
+ __u8 sel;
+ __be16 protocolid;
+ __u8 r12[8];
+ } app_priority;
} dcb;
} u;
};
@@ -5232,6 +5351,116 @@ struct fw_devlog_cmd {
(((x) >> S_FW_DEVLOG_CMD_MEMADDR16_DEVLOG) & \
M_FW_DEVLOG_CMD_MEMADDR16_DEVLOG)
+struct fw_netif_cmd {
+ __be32 op_portid;
+ __be32 retval_to_len16;
+ __be32 add_to_ipv4gw;
+ __be32 vlanid_mtuval;
+ __be32 gwaddr;
+ __be32 addr;
+ __be32 nmask;
+ __be32 bcaddr;
+};
+
+#define S_FW_NETIF_CMD_PORTID 0
+#define M_FW_NETIF_CMD_PORTID 0xf
+#define V_FW_NETIF_CMD_PORTID(x) ((x) << S_FW_NETIF_CMD_PORTID)
+#define G_FW_NETIF_CMD_PORTID(x) \
+ (((x) >> S_FW_NETIF_CMD_PORTID) & M_FW_NETIF_CMD_PORTID)
+
+#define S_FW_NETIF_CMD_RETVAL 24
+#define M_FW_NETIF_CMD_RETVAL 0xff
+#define V_FW_NETIF_CMD_RETVAL(x) ((x) << S_FW_NETIF_CMD_RETVAL)
+#define G_FW_NETIF_CMD_RETVAL(x) \
+ (((x) >> S_FW_NETIF_CMD_RETVAL) & M_FW_NETIF_CMD_RETVAL)
+
+#define S_FW_NETIF_CMD_IFIDX 16
+#define M_FW_NETIF_CMD_IFIDX 0xff
+#define V_FW_NETIF_CMD_IFIDX(x) ((x) << S_FW_NETIF_CMD_IFIDX)
+#define G_FW_NETIF_CMD_IFIDX(x) \
+ (((x) >> S_FW_NETIF_CMD_IFIDX) & M_FW_NETIF_CMD_IFIDX)
+
+#define S_FW_NETIF_CMD_LEN16 0
+#define M_FW_NETIF_CMD_LEN16 0xff
+#define V_FW_NETIF_CMD_LEN16(x) ((x) << S_FW_NETIF_CMD_LEN16)
+#define G_FW_NETIF_CMD_LEN16(x) \
+ (((x) >> S_FW_NETIF_CMD_LEN16) & M_FW_NETIF_CMD_LEN16)
+
+#define S_FW_NETIF_CMD_ADD 31
+#define M_FW_NETIF_CMD_ADD 0x1
+#define V_FW_NETIF_CMD_ADD(x) ((x) << S_FW_NETIF_CMD_ADD)
+#define G_FW_NETIF_CMD_ADD(x) \
+ (((x) >> S_FW_NETIF_CMD_ADD) & M_FW_NETIF_CMD_ADD)
+#define F_FW_NETIF_CMD_ADD V_FW_NETIF_CMD_ADD(1U)
+
+#define S_FW_NETIF_CMD_LINK 30
+#define M_FW_NETIF_CMD_LINK 0x1
+#define V_FW_NETIF_CMD_LINK(x) ((x) << S_FW_NETIF_CMD_LINK)
+#define G_FW_NETIF_CMD_LINK(x) \
+ (((x) >> S_FW_NETIF_CMD_LINK) & M_FW_NETIF_CMD_LINK)
+#define F_FW_NETIF_CMD_LINK V_FW_NETIF_CMD_LINK(1U)
+
+#define S_FW_NETIF_CMD_VLAN 29
+#define M_FW_NETIF_CMD_VLAN 0x1
+#define V_FW_NETIF_CMD_VLAN(x) ((x) << S_FW_NETIF_CMD_VLAN)
+#define G_FW_NETIF_CMD_VLAN(x) \
+ (((x) >> S_FW_NETIF_CMD_VLAN) & M_FW_NETIF_CMD_VLAN)
+#define F_FW_NETIF_CMD_VLAN V_FW_NETIF_CMD_VLAN(1U)
+
+#define S_FW_NETIF_CMD_MTU 28
+#define M_FW_NETIF_CMD_MTU 0x1
+#define V_FW_NETIF_CMD_MTU(x) ((x) << S_FW_NETIF_CMD_MTU)
+#define G_FW_NETIF_CMD_MTU(x) \
+ (((x) >> S_FW_NETIF_CMD_MTU) & M_FW_NETIF_CMD_MTU)
+#define F_FW_NETIF_CMD_MTU V_FW_NETIF_CMD_MTU(1U)
+
+#define S_FW_NETIF_CMD_DHCP 27
+#define M_FW_NETIF_CMD_DHCP 0x1
+#define V_FW_NETIF_CMD_DHCP(x) ((x) << S_FW_NETIF_CMD_DHCP)
+#define G_FW_NETIF_CMD_DHCP(x) \
+ (((x) >> S_FW_NETIF_CMD_DHCP) & M_FW_NETIF_CMD_DHCP)
+#define F_FW_NETIF_CMD_DHCP V_FW_NETIF_CMD_DHCP(1U)
+
+#define S_FW_NETIF_CMD_IPV4BCADDR 3
+#define M_FW_NETIF_CMD_IPV4BCADDR 0x1
+#define V_FW_NETIF_CMD_IPV4BCADDR(x) ((x) << S_FW_NETIF_CMD_IPV4BCADDR)
+#define G_FW_NETIF_CMD_IPV4BCADDR(x) \
+ (((x) >> S_FW_NETIF_CMD_IPV4BCADDR) & M_FW_NETIF_CMD_IPV4BCADDR)
+#define F_FW_NETIF_CMD_IPV4BCADDR V_FW_NETIF_CMD_IPV4BCADDR(1U)
+
+#define S_FW_NETIF_CMD_IPV4NMASK 2
+#define M_FW_NETIF_CMD_IPV4NMASK 0x1
+#define V_FW_NETIF_CMD_IPV4NMASK(x) ((x) << S_FW_NETIF_CMD_IPV4NMASK)
+#define G_FW_NETIF_CMD_IPV4NMASK(x) \
+ (((x) >> S_FW_NETIF_CMD_IPV4NMASK) & M_FW_NETIF_CMD_IPV4NMASK)
+#define F_FW_NETIF_CMD_IPV4NMASK V_FW_NETIF_CMD_IPV4NMASK(1U)
+
+#define S_FW_NETIF_CMD_IPV4ADDR 1
+#define M_FW_NETIF_CMD_IPV4ADDR 0x1
+#define V_FW_NETIF_CMD_IPV4ADDR(x) ((x) << S_FW_NETIF_CMD_IPV4ADDR)
+#define G_FW_NETIF_CMD_IPV4ADDR(x) \
+ (((x) >> S_FW_NETIF_CMD_IPV4ADDR) & M_FW_NETIF_CMD_IPV4ADDR)
+#define F_FW_NETIF_CMD_IPV4ADDR V_FW_NETIF_CMD_IPV4ADDR(1U)
+
+#define S_FW_NETIF_CMD_IPV4GW 0
+#define M_FW_NETIF_CMD_IPV4GW 0x1
+#define V_FW_NETIF_CMD_IPV4GW(x) ((x) << S_FW_NETIF_CMD_IPV4GW)
+#define G_FW_NETIF_CMD_IPV4GW(x) \
+ (((x) >> S_FW_NETIF_CMD_IPV4GW) & M_FW_NETIF_CMD_IPV4GW)
+#define F_FW_NETIF_CMD_IPV4GW V_FW_NETIF_CMD_IPV4GW(1U)
+
+#define S_FW_NETIF_CMD_VLANID 16
+#define M_FW_NETIF_CMD_VLANID 0xfff
+#define V_FW_NETIF_CMD_VLANID(x) ((x) << S_FW_NETIF_CMD_VLANID)
+#define G_FW_NETIF_CMD_VLANID(x) \
+ (((x) >> S_FW_NETIF_CMD_VLANID) & M_FW_NETIF_CMD_VLANID)
+
+#define S_FW_NETIF_CMD_MTUVAL 0
+#define M_FW_NETIF_CMD_MTUVAL 0xffff
+#define V_FW_NETIF_CMD_MTUVAL(x) ((x) << S_FW_NETIF_CMD_MTUVAL)
+#define G_FW_NETIF_CMD_MTUVAL(x) \
+ (((x) >> S_FW_NETIF_CMD_MTUVAL) & M_FW_NETIF_CMD_MTUVAL)
+
enum fw_error_type {
FW_ERROR_TYPE_EXCEPTION = 0x0,
FW_ERROR_TYPE_HWMODULE = 0x1,
diff --git a/sys/dev/cxgbe/offload.h b/sys/dev/cxgbe/offload.h
index fa58853..f31b840 100644
--- a/sys/dev/cxgbe/offload.h
+++ b/sys/dev/cxgbe/offload.h
@@ -31,6 +31,24 @@
#ifndef __T4_OFFLOAD_H__
#define __T4_OFFLOAD_H__
+/* CPL message priority levels */
+enum {
+ CPL_PRIORITY_DATA = 0, /* data messages */
+ CPL_PRIORITY_SETUP = 1, /* connection setup messages */
+ CPL_PRIORITY_TEARDOWN = 0, /* connection teardown messages */
+ CPL_PRIORITY_LISTEN = 1, /* listen start/stop messages */
+ CPL_PRIORITY_ACK = 1, /* RX ACK messages */
+ CPL_PRIORITY_CONTROL = 1 /* control messages */
+};
+
+#define INIT_TP_WR(w, tid) do { \
+ (w)->wr.wr_hi = htonl(V_FW_WR_OP(FW_TP_WR) | \
+ V_FW_WR_IMMDLEN(sizeof(*w) - sizeof(w->wr))); \
+ (w)->wr.wr_mid = htonl(V_FW_WR_LEN16(DIV_ROUND_UP(sizeof(*w), 16)) | \
+ V_FW_WR_FLOWID(tid)); \
+ (w)->wr.wr_lo = cpu_to_be64(0); \
+} while (0)
+
/*
* Max # of ATIDs. The absolute HW max is 16K but we keep it lower.
*/
diff --git a/sys/dev/cxgbe/osdep.h b/sys/dev/cxgbe/osdep.h
index 438a434..bde1eb4 100644
--- a/sys/dev/cxgbe/osdep.h
+++ b/sys/dev/cxgbe/osdep.h
@@ -82,6 +82,7 @@ typedef boolean_t bool;
#define DIV_ROUND_UP(x, y) howmany(x, y)
#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
+#define container_of(p, s, f) ((s *)(((uint8_t *)(p)) - offsetof(s, f)))
#define swab16(x) bswap16(x)
#define swab32(x) bswap32(x)
diff --git a/sys/dev/cxgbe/t4_ioctl.h b/sys/dev/cxgbe/t4_ioctl.h
index 8f1d133..ecc2c3d 100644
--- a/sys/dev/cxgbe/t4_ioctl.h
+++ b/sys/dev/cxgbe/t4_ioctl.h
@@ -46,6 +46,7 @@ enum {
T4_GET_FILTER, /* get information about a filter */
T4_SET_FILTER, /* program a filter */
T4_DEL_FILTER, /* delete a filter */
+ T4_GET_SGE_CONTEXT, /* get SGE context for a queue */
};
struct t4_reg {
@@ -178,10 +179,26 @@ struct t4_filter_specification {
struct t4_filter {
uint32_t idx;
+ uint16_t l2tidx;
+ uint16_t smtidx;
uint64_t hits;
struct t4_filter_specification fs;
};
+#define T4_SGE_CONTEXT_SIZE 24
+enum {
+ SGE_CONTEXT_EGRESS,
+ SGE_CONTEXT_INGRESS,
+ SGE_CONTEXT_FLM,
+ SGE_CONTEXT_CNM
+};
+
+struct t4_sge_context {
+ uint32_t mem_id;
+ uint32_t cid;
+ uint32_t data[T4_SGE_CONTEXT_SIZE / 4];
+};
+
#define CHELSIO_T4_GETREG _IOWR('f', T4_GETREG, struct t4_reg)
#define CHELSIO_T4_SETREG _IOW('f', T4_SETREG, struct t4_reg)
#define CHELSIO_T4_REGDUMP _IOWR('f', T4_REGDUMP, struct t4_regdump)
@@ -190,4 +207,6 @@ struct t4_filter {
#define CHELSIO_T4_GET_FILTER _IOWR('f', T4_GET_FILTER, struct t4_filter)
#define CHELSIO_T4_SET_FILTER _IOW('f', T4_SET_FILTER, struct t4_filter)
#define CHELSIO_T4_DEL_FILTER _IOW('f', T4_DEL_FILTER, struct t4_filter)
+#define CHELSIO_T4_GET_SGE_CONTEXT _IOWR('f', T4_GET_SGE_CONTEXT, \
+ struct t4_sge_context)
#endif
diff --git a/sys/dev/cxgbe/t4_l2t.c b/sys/dev/cxgbe/t4_l2t.c
new file mode 100644
index 0000000..31197b8
--- /dev/null
+++ b/sys/dev/cxgbe/t4_l2t.c
@@ -0,0 +1,361 @@
+/*-
+ * Copyright (c) 2011 Chelsio Communications, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include "opt_inet.h"
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/kernel.h>
+#include <sys/module.h>
+#include <sys/bus.h>
+#include <sys/lock.h>
+#include <sys/mutex.h>
+#include <sys/rwlock.h>
+#include <sys/socket.h>
+#include <net/if.h>
+#include <net/ethernet.h>
+#include <net/if_vlan_var.h>
+#include <net/if_dl.h>
+#include <net/if_llatbl.h>
+#include <net/route.h>
+#include <netinet/in.h>
+#include <netinet/in_var.h>
+#include <netinet/if_ether.h>
+
+#include "common/common.h"
+#include "common/jhash.h"
+#include "common/t4_msg.h"
+#include "offload.h"
+#include "t4_l2t.h"
+
+/* identifies sync vs async L2T_WRITE_REQs */
+#define S_SYNC_WR 12
+#define V_SYNC_WR(x) ((x) << S_SYNC_WR)
+#define F_SYNC_WR V_SYNC_WR(1)
+
+enum {
+ L2T_STATE_VALID, /* entry is up to date */
+ L2T_STATE_STALE, /* entry may be used but needs revalidation */
+ L2T_STATE_RESOLVING, /* entry needs address resolution */
+ L2T_STATE_SYNC_WRITE, /* synchronous write of entry underway */
+
+ /* when state is one of the below the entry is not hashed */
+ L2T_STATE_SWITCHING, /* entry is being used by a switching filter */
+ L2T_STATE_UNUSED /* entry not in use */
+};
+
+struct l2t_data {
+ struct rwlock lock;
+ volatile int nfree; /* number of free entries */
+ struct l2t_entry *rover;/* starting point for next allocation */
+ struct l2t_entry l2tab[L2T_SIZE];
+};
+
+/*
+ * Module locking notes: There is a RW lock protecting the L2 table as a
+ * whole plus a spinlock per L2T entry. Entry lookups and allocations happen
+ * under the protection of the table lock, individual entry changes happen
+ * while holding that entry's spinlock. The table lock nests outside the
+ * entry locks. Allocations of new entries take the table lock as writers so
+ * no other lookups can happen while allocating new entries. Entry updates
+ * take the table lock as readers so multiple entries can be updated in
+ * parallel. An L2T entry can be dropped by decrementing its reference count
+ * and therefore can happen in parallel with entry allocation but no entry
+ * can change state or increment its ref count during allocation as both of
+ * these perform lookups.
+ *
+ * Note: We do not take refereces to ifnets in this module because both
+ * the TOE and the sockets already hold references to the interfaces and the
+ * lifetime of an L2T entry is fully contained in the lifetime of the TOE.
+ */
+static inline unsigned int
+vlan_prio(const struct l2t_entry *e)
+{
+ return e->vlan >> 13;
+}
+
+static inline void
+l2t_hold(struct l2t_data *d, struct l2t_entry *e)
+{
+ if (atomic_fetchadd_int(&e->refcnt, 1) == 0) /* 0 -> 1 transition */
+ atomic_add_int(&d->nfree, -1);
+}
+
+/*
+ * To avoid having to check address families we do not allow v4 and v6
+ * neighbors to be on the same hash chain. We keep v4 entries in the first
+ * half of available hash buckets and v6 in the second.
+ */
+enum {
+ L2T_SZ_HALF = L2T_SIZE / 2,
+ L2T_HASH_MASK = L2T_SZ_HALF - 1
+};
+
+static inline unsigned int
+arp_hash(const uint32_t *key, int ifindex)
+{
+ return jhash_2words(*key, ifindex, 0) & L2T_HASH_MASK;
+}
+
+static inline unsigned int
+ipv6_hash(const uint32_t *key, int ifindex)
+{
+ uint32_t xor = key[0] ^ key[1] ^ key[2] ^ key[3];
+
+ return L2T_SZ_HALF + (jhash_2words(xor, ifindex, 0) & L2T_HASH_MASK);
+}
+
+static inline unsigned int
+addr_hash(const uint32_t *addr, int addr_len, int ifindex)
+{
+ return addr_len == 4 ? arp_hash(addr, ifindex) :
+ ipv6_hash(addr, ifindex);
+}
+
+/*
+ * Checks if an L2T entry is for the given IP/IPv6 address. It does not check
+ * whether the L2T entry and the address are of the same address family.
+ * Callers ensure an address is only checked against L2T entries of the same
+ * family, something made trivial by the separation of IP and IPv6 hash chains
+ * mentioned above. Returns 0 if there's a match,
+ */
+static inline int
+addreq(const struct l2t_entry *e, const uint32_t *addr)
+{
+ if (e->v6)
+ return (e->addr[0] ^ addr[0]) | (e->addr[1] ^ addr[1]) |
+ (e->addr[2] ^ addr[2]) | (e->addr[3] ^ addr[3]);
+ return e->addr[0] ^ addr[0];
+}
+
+/*
+ * Write an L2T entry. Must be called with the entry locked (XXX: really?).
+ * The write may be synchronous or asynchronous.
+ */
+static int
+write_l2e(struct adapter *sc, struct l2t_entry *e, int sync)
+{
+ struct mbuf *m;
+ struct cpl_l2t_write_req *req;
+
+ if ((m = m_gethdr(M_NOWAIT, MT_DATA)) == NULL)
+ return (ENOMEM);
+
+ req = mtod(m, struct cpl_l2t_write_req *);
+ m->m_pkthdr.len = m->m_len = sizeof(*req);
+
+ INIT_TP_WR(req, 0);
+ OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ, e->idx |
+ V_SYNC_WR(sync) | V_TID_QID(sc->sge.fwq.abs_id)));
+ req->params = htons(V_L2T_W_PORT(e->lport) | V_L2T_W_NOREPLY(!sync));
+ req->l2t_idx = htons(e->idx);
+ req->vlan = htons(e->vlan);
+ memcpy(req->dst_mac, e->dmac, sizeof(req->dst_mac));
+
+ t4_mgmt_tx(sc, m);
+
+ if (sync && e->state != L2T_STATE_SWITCHING)
+ e->state = L2T_STATE_SYNC_WRITE;
+
+ return (0);
+}
+
+/*
+ * Add a packet to an L2T entry's queue of packets awaiting resolution.
+ * Must be called with the entry's lock held.
+ */
+static inline void
+arpq_enqueue(struct l2t_entry *e, struct mbuf *m)
+{
+ mtx_assert(&e->lock, MA_OWNED);
+
+ m->m_next = NULL;
+ if (e->arpq_head)
+ e->arpq_tail->m_next = m;
+ else
+ e->arpq_head = m;
+ e->arpq_tail = m;
+}
+
+/*
+ * Allocate a free L2T entry. Must be called with l2t_data.lock held.
+ */
+static struct l2t_entry *
+alloc_l2e(struct l2t_data *d)
+{
+ struct l2t_entry *end, *e, **p;
+
+ rw_assert(&d->lock, RA_WLOCKED);
+
+ if (!atomic_load_acq_int(&d->nfree))
+ return (NULL);
+
+ /* there's definitely a free entry */
+ for (e = d->rover, end = &d->l2tab[L2T_SIZE]; e != end; ++e)
+ if (atomic_load_acq_int(&e->refcnt) == 0)
+ goto found;
+
+ for (e = d->l2tab; atomic_load_acq_int(&e->refcnt); ++e) ;
+found:
+ d->rover = e + 1;
+ atomic_add_int(&d->nfree, -1);
+
+ /*
+ * The entry we found may be an inactive entry that is
+ * presently in the hash table. We need to remove it.
+ */
+ if (e->state < L2T_STATE_SWITCHING) {
+ for (p = &d->l2tab[e->hash].first; *p; p = &(*p)->next) {
+ if (*p == e) {
+ *p = e->next;
+ e->next = NULL;
+ break;
+ }
+ }
+ }
+
+ e->state = L2T_STATE_UNUSED;
+ return e;
+}
+
+/*
+ * Called when an L2T entry has no more users. The entry is left in the hash
+ * table since it is likely to be reused but we also bump nfree to indicate
+ * that the entry can be reallocated for a different neighbor. We also drop
+ * the existing neighbor reference in case the neighbor is going away and is
+ * waiting on our reference.
+ *
+ * Because entries can be reallocated to other neighbors once their ref count
+ * drops to 0 we need to take the entry's lock to avoid races with a new
+ * incarnation.
+ */
+static void
+t4_l2e_free(struct l2t_entry *e)
+{
+ struct llentry *lle = NULL;
+ struct l2t_data *d;
+
+ mtx_lock(&e->lock);
+ if (atomic_load_acq_int(&e->refcnt) == 0) { /* hasn't been recycled */
+ lle = e->lle;
+ e->lle = NULL;
+ /*
+ * Don't need to worry about the arpq, an L2T entry can't be
+ * released if any packets are waiting for resolution as we
+ * need to be able to communicate with the device to close a
+ * connection.
+ */
+ }
+ mtx_unlock(&e->lock);
+
+ d = container_of(e, struct l2t_data, l2tab[e->idx]);
+ atomic_add_int(&d->nfree, 1);
+
+ if (lle)
+ LLE_FREE(lle);
+}
+
+void
+t4_l2t_release(struct l2t_entry *e)
+{
+ if (atomic_fetchadd_int(&e->refcnt, -1) == 1)
+ t4_l2e_free(e);
+}
+
+/*
+ * Allocate an L2T entry for use by a switching rule. Such need to be
+ * explicitly freed and while busy they are not on any hash chain, so normal
+ * address resolution updates do not see them.
+ */
+struct l2t_entry *
+t4_l2t_alloc_switching(struct l2t_data *d)
+{
+ struct l2t_entry *e;
+
+ rw_rlock(&d->lock);
+ e = alloc_l2e(d);
+ if (e) {
+ mtx_lock(&e->lock); /* avoid race with t4_l2t_free */
+ e->state = L2T_STATE_SWITCHING;
+ atomic_store_rel_int(&e->refcnt, 1);
+ mtx_unlock(&e->lock);
+ }
+ rw_runlock(&d->lock);
+ return e;
+}
+
+/*
+ * Sets/updates the contents of a switching L2T entry that has been allocated
+ * with an earlier call to @t4_l2t_alloc_switching.
+ */
+int
+t4_l2t_set_switching(struct adapter *sc, struct l2t_entry *e, uint16_t vlan,
+ uint8_t port, uint8_t *eth_addr)
+{
+ e->vlan = vlan;
+ e->lport = port;
+ memcpy(e->dmac, eth_addr, ETHER_ADDR_LEN);
+ return write_l2e(sc, e, 0);
+}
+
+struct l2t_data *
+t4_init_l2t(int flags)
+{
+ int i;
+ struct l2t_data *d;
+
+ d = malloc(sizeof(*d), M_CXGBE, M_ZERO | flags);
+ if (!d)
+ return (NULL);
+
+ d->rover = d->l2tab;
+ atomic_store_rel_int(&d->nfree, L2T_SIZE);
+ rw_init(&d->lock, "L2T");
+
+ for (i = 0; i < L2T_SIZE; i++) {
+ d->l2tab[i].idx = i;
+ d->l2tab[i].state = L2T_STATE_UNUSED;
+ mtx_init(&d->l2tab[i].lock, "L2T_E", NULL, MTX_DEF);
+ atomic_store_rel_int(&d->l2tab[i].refcnt, 0);
+ }
+
+ return (d);
+}
+
+int
+t4_free_l2t(struct l2t_data *d)
+{
+ int i;
+
+ for (i = 0; i < L2T_SIZE; i++)
+ mtx_destroy(&d->l2tab[i].lock);
+ rw_destroy(&d->lock);
+ free(d, M_CXGBE);
+
+ return (0);
+}
diff --git a/sys/dev/cxgbe/t4_l2t.h b/sys/dev/cxgbe/t4_l2t.h
new file mode 100644
index 0000000..c5520c6
--- /dev/null
+++ b/sys/dev/cxgbe/t4_l2t.h
@@ -0,0 +1,71 @@
+/*-
+ * Copyright (c) 2011 Chelsio Communications, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ *
+ */
+
+#ifndef __T4_L2T_H
+#define __T4_L2T_H
+
+enum { L2T_SIZE = 4096 }; /* # of L2T entries */
+
+/*
+ * Each L2T entry plays multiple roles. First of all, it keeps state for the
+ * corresponding entry of the HW L2 table and maintains a queue of offload
+ * packets awaiting address resolution. Second, it is a node of a hash table
+ * chain, where the nodes of the chain are linked together through their next
+ * pointer. Finally, each node is a bucket of a hash table, pointing to the
+ * first element in its chain through its first pointer.
+ */
+struct l2t_entry {
+ uint16_t state; /* entry state */
+ uint16_t idx; /* entry index */
+ uint32_t addr[4]; /* next hop IP or IPv6 address */
+ struct ifnet *ifp; /* outgoing interface */
+ uint16_t smt_idx; /* SMT index */
+ uint16_t vlan; /* VLAN TCI (id: 0-11, prio: 13-15) */
+ int ifindex; /* interface index */
+ struct llentry *lle; /* llentry for next hop */
+ struct l2t_entry *first; /* start of hash chain */
+ struct l2t_entry *next; /* next l2t_entry on chain */
+ struct mbuf *arpq_head; /* list of mbufs awaiting resolution */
+ struct mbuf *arpq_tail;
+ struct mtx lock;
+ volatile uint32_t refcnt; /* entry reference count */
+ uint16_t hash; /* hash bucket the entry is on */
+ uint8_t v6; /* whether entry is for IPv6 */
+ uint8_t lport; /* associated offload logical port */
+ uint8_t dmac[ETHER_ADDR_LEN]; /* next hop's MAC address */
+};
+
+struct l2t_data *t4_init_l2t(int);
+int t4_free_l2t(struct l2t_data *);
+struct l2t_entry *t4_l2t_alloc_switching(struct l2t_data *);
+int t4_l2t_set_switching(struct adapter *, struct l2t_entry *, uint16_t,
+ uint8_t, uint8_t *);
+void t4_l2t_release(struct l2t_entry *);
+
+#endif /* __T4_L2T_H */
diff --git a/sys/dev/cxgbe/t4_main.c b/sys/dev/cxgbe/t4_main.c
index 469af8d..18b813d 100644
--- a/sys/dev/cxgbe/t4_main.c
+++ b/sys/dev/cxgbe/t4_main.c
@@ -62,6 +62,7 @@ __FBSDID("$FreeBSD$");
#include "common/t4_regs_values.h"
#include "common/t4fw_interface.h"
#include "t4_ioctl.h"
+#include "t4_l2t.h"
/* T4 bus driver interface */
static int t4_probe(device_t);
@@ -213,12 +214,12 @@ SYSCTL_UINT(_hw_cxgbe, OID_AUTO, interrupt_types, CTLFLAG_RDTUN, &intr_types, 0,
"interrupt types allowed (bits 0, 1, 2 = INTx, MSI, MSI-X respectively)");
/*
- * Force the driver to use interrupt forwarding.
+ * Force the driver to use the same set of interrupts for all ports.
*/
-static int intr_fwd = 0;
-TUNABLE_INT("hw.cxgbe.interrupt_forwarding", &intr_fwd);
-SYSCTL_UINT(_hw_cxgbe, OID_AUTO, interrupt_forwarding, CTLFLAG_RDTUN,
- &intr_fwd, 0, "always use forwarded interrupts");
+static int intr_shared = 0;
+TUNABLE_INT("hw.cxgbe.interrupts_shared", &intr_shared);
+SYSCTL_UINT(_hw_cxgbe, OID_AUTO, interrupts_shared, CTLFLAG_RDTUN,
+ &intr_shared, 0, "interrupts shared between all ports");
static unsigned int filter_mode = HW_TPL_FR_MT_PR_IV_P_FC;
TUNABLE_INT("hw.cxgbe.filter_mode", &filter_mode);
@@ -228,7 +229,7 @@ SYSCTL_UINT(_hw_cxgbe, OID_AUTO, filter_mode, CTLFLAG_RDTUN,
struct intrs_and_queues {
int intr_type; /* INTx, MSI, or MSI-X */
int nirq; /* Number of vectors */
- int intr_fwd; /* Interrupts forwarded */
+ int intr_shared; /* Interrupts shared between all ports */
int ntxq10g; /* # of NIC txq's for each 10G port */
int nrxq10g; /* # of NIC rxq's for each 10G port */
int ntxq1g; /* # of NIC txq's for each 1G port */
@@ -240,6 +241,7 @@ struct filter_entry {
uint32_t locked:1; /* filter is administratively locked */
uint32_t pending:1; /* filter action is pending firmware reply */
uint32_t smtidx:8; /* Source MAC Table index for smac */
+ struct l2t_entry *l2t; /* Layer Two Table entry for dmac */
struct t4_filter_specification fs;
};
@@ -269,6 +271,7 @@ static void setup_memwin(struct adapter *);
static int cfg_itype_and_nqueues(struct adapter *, int, int,
struct intrs_and_queues *);
static int prep_firmware(struct adapter *);
+static int get_devlog_params(struct adapter *, struct devlog_params *);
static int get_capabilities(struct adapter *, struct fw_caps_config_cmd *);
static int get_params(struct adapter *, struct fw_caps_config_cmd *);
static void t4_set_desc(struct adapter *);
@@ -295,19 +298,22 @@ static int sysctl_holdoff_pktc_idx(SYSCTL_HANDLER_ARGS);
static int sysctl_qsize_rxq(SYSCTL_HANDLER_ARGS);
static int sysctl_qsize_txq(SYSCTL_HANDLER_ARGS);
static int sysctl_handle_t4_reg64(SYSCTL_HANDLER_ARGS);
+static int sysctl_devlog(SYSCTL_HANDLER_ARGS);
static inline void txq_start(struct ifnet *, struct sge_txq *);
static uint32_t fconf_to_mode(uint32_t);
static uint32_t mode_to_fconf(uint32_t);
static uint32_t fspec_to_fconf(struct t4_filter_specification *);
static int get_filter_mode(struct adapter *, uint32_t *);
static int set_filter_mode(struct adapter *, uint32_t);
+static inline uint64_t get_filter_hits(struct adapter *, uint32_t);
static int get_filter(struct adapter *, struct t4_filter *);
static int set_filter(struct adapter *, struct t4_filter *);
static int del_filter(struct adapter *, struct t4_filter *);
-static void clear_filter(struct adapter *, struct filter_entry *);
+static void clear_filter(struct filter_entry *);
static int set_filter_wr(struct adapter *, int);
static int del_filter_wr(struct adapter *, int);
void filter_rpl(struct adapter *, const struct cpl_set_tcb_rpl *);
+static int get_sge_context(struct adapter *, struct t4_sge_context *);
static int t4_mod_event(module_t, int, void *);
struct t4_pciids {
@@ -400,6 +406,9 @@ t4_attach(device_t dev)
if (rc != 0)
goto done; /* error message displayed already */
+ /* Read firmware devlog parameters */
+ (void) get_devlog_params(sc, &sc->params.devlog);
+
/* Get device capabilities and select which ones we'll use */
rc = get_capabilities(sc, &caps);
if (rc != 0) {
@@ -484,6 +493,8 @@ t4_attach(device_t dev)
V_RXTSHIFTMAXR2(15) | V_PERSHIFTBACKOFFMAX(8) | V_PERSHIFTMAX(8) |
V_KEEPALIVEMAXR1(4) | V_KEEPALIVEMAXR2(9));
t4_write_reg(sc, A_ULP_RX_TDDP_PSZ, V_HPZ0(PAGE_SHIFT - 12));
+ t4_set_reg_field(sc, A_TP_PARA_REG3, F_TUNNELCNGDROP0 |
+ F_TUNNELCNGDROP1 | F_TUNNELCNGDROP2 | F_TUNNELCNGDROP3, 0);
setup_memwin(sc);
@@ -514,8 +525,8 @@ t4_attach(device_t dev)
device_printf(dev, "unable to initialize port %d: %d\n",
i, rc);
free(pi, M_CXGBE);
- sc->port[i] = NULL; /* indicates init failed */
- continue;
+ sc->port[i] = NULL;
+ goto done;
}
snprintf(pi->lockname, sizeof(pi->lockname), "%sp%d",
@@ -582,15 +593,15 @@ t4_attach(device_t dev)
s->nrxq = n10g * iaq.nrxq10g + n1g * iaq.nrxq1g;
s->ntxq = n10g * iaq.ntxq10g + n1g * iaq.ntxq1g;
s->neq = s->ntxq + s->nrxq; /* the free list in an rxq is an eq */
- s->neq += NCHAN; /* control queues, 1 per hw channel */
+ s->neq += sc->params.nports; /* control queues, 1 per port */
s->niq = s->nrxq + 1; /* 1 extra for firmware event queue */
- if (iaq.intr_fwd) {
- sc->flags |= INTR_FWD;
- s->niq += NFIQ(sc); /* forwarded interrupt queues */
- s->fiq = malloc(NFIQ(sc) * sizeof(struct sge_iq), M_CXGBE,
- M_ZERO | M_WAITOK);
- }
- s->ctrlq = malloc(NCHAN * sizeof(struct sge_ctrlq), M_CXGBE,
+ if (iaq.intr_shared)
+ sc->flags |= INTR_SHARED;
+ s->niq += NINTRQ(sc); /* interrupt queues */
+
+ s->intrq = malloc(NINTRQ(sc) * sizeof(struct sge_iq), M_CXGBE,
+ M_ZERO | M_WAITOK);
+ s->ctrlq = malloc(sc->params.nports * sizeof(struct sge_ctrlq), M_CXGBE,
M_ZERO | M_WAITOK);
s->rxq = malloc(s->nrxq * sizeof(struct sge_rxq), M_CXGBE,
M_ZERO | M_WAITOK);
@@ -604,6 +615,8 @@ t4_attach(device_t dev)
sc->irq = malloc(sc->intr_count * sizeof(struct irq), M_CXGBE,
M_ZERO | M_WAITOK);
+ sc->l2t = t4_init_l2t(M_WAITOK);
+
t4_sysctls(sc);
/*
@@ -691,11 +704,14 @@ t4_detach(device_t dev)
bus_release_resource(dev, SYS_RES_MEMORY, sc->msix_rid,
sc->msix_res);
+ if (sc->l2t)
+ t4_free_l2t(sc->l2t);
+
free(sc->irq, M_CXGBE);
free(sc->sge.rxq, M_CXGBE);
free(sc->sge.txq, M_CXGBE);
free(sc->sge.ctrlq, M_CXGBE);
- free(sc->sge.fiq, M_CXGBE);
+ free(sc->sge.intrq, M_CXGBE);
free(sc->sge.iqmap, M_CXGBE);
free(sc->sge.eqmap, M_CXGBE);
free(sc->tids.ftid_tab, M_CXGBE);
@@ -1231,33 +1247,32 @@ cfg_itype_and_nqueues(struct adapter *sc, int n10g, int n1g,
nrxq10g = min(nc, max_nrxq_10g);
nrxq1g = min(nc, max_nrxq_1g);
- /* Extra 2 is for a) error interrupt b) firmware event */
- iaq->nirq = n10g * nrxq10g + n1g * nrxq1g + 2;
- if (iaq->nirq <= navail && intr_fwd == 0) {
+ iaq->nirq = n10g * nrxq10g + n1g * nrxq1g + T4_EXTRA_INTR;
+ if (iaq->nirq <= navail && intr_shared == 0) {
if (itype == INTR_MSI && !powerof2(iaq->nirq))
- goto fwd;
+ goto share;
/* One for err, one for fwq, and one for each rxq */
- iaq->intr_fwd = 0;
+ iaq->intr_shared = 0;
iaq->nrxq10g = nrxq10g;
iaq->nrxq1g = nrxq1g;
} else {
-fwd:
- iaq->intr_fwd = 1;
+share:
+ iaq->intr_shared = 1;
- if (navail > nc) {
+ if (navail >= nc + T4_EXTRA_INTR) {
if (itype == INTR_MSIX)
- navail = nc + 1;
+ navail = nc + T4_EXTRA_INTR;
/* navail is and must remain a pow2 for MSI */
if (itype == INTR_MSI) {
KASSERT(powerof2(navail),
("%d not power of 2", navail));
- while (navail / 2 > nc)
+ while (navail / 2 >= nc + T4_EXTRA_INTR)
navail /= 2;
}
}
@@ -1290,7 +1305,7 @@ fwd:
* the kernel is willing to allocate (it's in navail).
*/
pci_release_msi(sc->dev);
- goto fwd;
+ goto share;
}
device_printf(sc->dev,
@@ -1414,6 +1429,34 @@ prep_firmware(struct adapter *sc)
}
static int
+get_devlog_params(struct adapter *sc, struct devlog_params *dlog)
+{
+ struct fw_devlog_cmd devlog_cmd;
+ uint32_t meminfo;
+ int rc;
+
+ bzero(&devlog_cmd, sizeof(devlog_cmd));
+ devlog_cmd.op_to_write = htobe32(V_FW_CMD_OP(FW_DEVLOG_CMD) |
+ F_FW_CMD_REQUEST | F_FW_CMD_READ);
+ devlog_cmd.retval_len16 = htobe32(FW_LEN16(devlog_cmd));
+ rc = -t4_wr_mbox(sc, sc->mbox, &devlog_cmd, sizeof(devlog_cmd),
+ &devlog_cmd);
+ if (rc != 0) {
+ device_printf(sc->dev,
+ "failed to get devlog parameters: %d.\n", rc);
+ bzero(dlog, sizeof (*dlog));
+ return (rc);
+ }
+
+ meminfo = be32toh(devlog_cmd.memtype_devlog_memaddr16_devlog);
+ dlog->memtype = G_FW_DEVLOG_CMD_MEMTYPE_DEVLOG(meminfo);
+ dlog->start = G_FW_DEVLOG_CMD_MEMADDR16_DEVLOG(meminfo) << 4;
+ dlog->size = be32toh(devlog_cmd.memsize_devlog);
+
+ return (0);
+}
+
+static int
get_capabilities(struct adapter *sc, struct fw_caps_config_cmd *caps)
{
int rc;
@@ -1923,16 +1966,18 @@ cxgbe_uninit_synchronized(struct port_info *pi)
return (0);
}
-#define T4_ALLOC_IRQ(sc, irqid, rid, handler, arg, name) do { \
- rc = t4_alloc_irq(sc, &sc->irq[irqid], rid, handler, arg, name); \
+#define T4_ALLOC_IRQ(sc, irq, rid, handler, arg, name) do { \
+ rc = t4_alloc_irq(sc, irq, rid, handler, arg, name); \
if (rc != 0) \
goto done; \
} while (0)
static int
first_port_up(struct adapter *sc)
{
- int rc, i;
- char name[8];
+ int rc, i, rid, p, q;
+ char s[8];
+ struct irq *irq;
+ struct sge_iq *intrq;
ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
@@ -1946,39 +1991,52 @@ first_port_up(struct adapter *sc)
/*
* Setup interrupts.
*/
+ irq = &sc->irq[0];
+ rid = sc->intr_type == INTR_INTX ? 0 : 1;
if (sc->intr_count == 1) {
- KASSERT(sc->flags & INTR_FWD,
- ("%s: single interrupt but not forwarded?", __func__));
- T4_ALLOC_IRQ(sc, 0, 0, t4_intr_all, sc, "all");
+ KASSERT(sc->flags & INTR_SHARED,
+ ("%s: single interrupt but not shared?", __func__));
+
+ T4_ALLOC_IRQ(sc, irq, rid, t4_intr_all, sc, "all");
} else {
/* Multiple interrupts. The first one is always error intr */
- T4_ALLOC_IRQ(sc, 0, 1, t4_intr_err, sc, "err");
-
- if (sc->flags & INTR_FWD) {
- /* The rest are shared by the fwq and all data intr */
- for (i = 1; i < sc->intr_count; i++) {
- snprintf(name, sizeof(name), "mux%d", i - 1);
- T4_ALLOC_IRQ(sc, i, i + 1, t4_intr_fwd,
- &sc->sge.fiq[i - 1], name);
+ T4_ALLOC_IRQ(sc, irq, rid, t4_intr_err, sc, "err");
+ irq++;
+ rid++;
+
+ /* Firmware event queue normally has an interrupt of its own */
+ if (sc->intr_count > T4_EXTRA_INTR) {
+ T4_ALLOC_IRQ(sc, irq, rid, t4_intr_evt, &sc->sge.fwq,
+ "evt");
+ irq++;
+ rid++;
+ }
+
+ intrq = &sc->sge.intrq[0];
+ if (sc->flags & INTR_SHARED) {
+
+ /* All ports share these interrupt queues */
+
+ for (i = 0; i < NINTRQ(sc); i++) {
+ snprintf(s, sizeof(s), "*.%d", i);
+ T4_ALLOC_IRQ(sc, irq, rid, t4_intr, intrq, s);
+ irq++;
+ rid++;
+ intrq++;
}
} else {
- struct port_info *pi;
- int p, q;
- T4_ALLOC_IRQ(sc, 1, 2, t4_intr_evt, &sc->sge.fwq,
- "evt");
+ /* Each port has its own set of interrupt queues */
- p = q = 0;
- pi = sc->port[p];
- for (i = 2; i < sc->intr_count; i++) {
- snprintf(name, sizeof(name), "p%dq%d", p, q);
- if (++q >= pi->nrxq) {
- p++;
- q = 0;
- pi = sc->port[p];
+ for (p = 0; p < sc->params.nports; p++) {
+ for (q = 0; q < sc->port[p]->nrxq; q++) {
+ snprintf(s, sizeof(s), "%d.%d", p, q);
+ T4_ALLOC_IRQ(sc, irq, rid, t4_intr,
+ intrq, s);
+ irq++;
+ rid++;
+ intrq++;
}
- T4_ALLOC_IRQ(sc, i, i + 1, t4_intr_data,
- &sc->sge.rxq[i - 2], name);
}
}
}
@@ -2366,6 +2424,10 @@ t4_sysctls(struct adapter *sc)
CTLTYPE_STRING | CTLFLAG_RD, &intr_pktcount, sizeof(intr_pktcount),
sysctl_int_array, "A", "interrupt holdoff packet counter values");
+ SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "devlog",
+ CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
+ sysctl_devlog, "A", "device log");
+
return (0);
}
@@ -2709,6 +2771,120 @@ sysctl_handle_t4_reg64(SYSCTL_HANDLER_ARGS)
return (sysctl_handle_64(oidp, &val, 0, req));
}
+const char *devlog_level_strings[] = {
+ [FW_DEVLOG_LEVEL_EMERG] = "EMERG",
+ [FW_DEVLOG_LEVEL_CRIT] = "CRIT",
+ [FW_DEVLOG_LEVEL_ERR] = "ERR",
+ [FW_DEVLOG_LEVEL_NOTICE] = "NOTICE",
+ [FW_DEVLOG_LEVEL_INFO] = "INFO",
+ [FW_DEVLOG_LEVEL_DEBUG] = "DEBUG"
+};
+
+const char *devlog_facility_strings[] = {
+ [FW_DEVLOG_FACILITY_CORE] = "CORE",
+ [FW_DEVLOG_FACILITY_SCHED] = "SCHED",
+ [FW_DEVLOG_FACILITY_TIMER] = "TIMER",
+ [FW_DEVLOG_FACILITY_RES] = "RES",
+ [FW_DEVLOG_FACILITY_HW] = "HW",
+ [FW_DEVLOG_FACILITY_FLR] = "FLR",
+ [FW_DEVLOG_FACILITY_DMAQ] = "DMAQ",
+ [FW_DEVLOG_FACILITY_PHY] = "PHY",
+ [FW_DEVLOG_FACILITY_MAC] = "MAC",
+ [FW_DEVLOG_FACILITY_PORT] = "PORT",
+ [FW_DEVLOG_FACILITY_VI] = "VI",
+ [FW_DEVLOG_FACILITY_FILTER] = "FILTER",
+ [FW_DEVLOG_FACILITY_ACL] = "ACL",
+ [FW_DEVLOG_FACILITY_TM] = "TM",
+ [FW_DEVLOG_FACILITY_QFC] = "QFC",
+ [FW_DEVLOG_FACILITY_DCB] = "DCB",
+ [FW_DEVLOG_FACILITY_ETH] = "ETH",
+ [FW_DEVLOG_FACILITY_OFLD] = "OFLD",
+ [FW_DEVLOG_FACILITY_RI] = "RI",
+ [FW_DEVLOG_FACILITY_ISCSI] = "ISCSI",
+ [FW_DEVLOG_FACILITY_FCOE] = "FCOE",
+ [FW_DEVLOG_FACILITY_FOISCSI] = "FOISCSI",
+ [FW_DEVLOG_FACILITY_FOFCOE] = "FOFCOE"
+};
+
+static int
+sysctl_devlog(SYSCTL_HANDLER_ARGS)
+{
+ struct adapter *sc = arg1;
+ struct devlog_params *dparams = &sc->params.devlog;
+ struct fw_devlog_e *buf, *e;
+ int i, j, rc, nentries, first = 0;
+ struct sbuf *sb;
+ uint64_t ftstamp = UINT64_MAX;
+
+ if (dparams->start == 0)
+ return (ENXIO);
+
+ nentries = dparams->size / sizeof(struct fw_devlog_e);
+
+ buf = malloc(dparams->size, M_CXGBE, M_NOWAIT);
+ if (buf == NULL)
+ return (ENOMEM);
+
+ rc = -t4_mem_read(sc, dparams->memtype, dparams->start, dparams->size,
+ (void *)buf);
+ if (rc != 0)
+ goto done;
+
+ for (i = 0; i < nentries; i++) {
+ e = &buf[i];
+
+ if (e->timestamp == 0)
+ break; /* end */
+
+ e->timestamp = be64toh(e->timestamp);
+ e->seqno = be32toh(e->seqno);
+ for (j = 0; j < 8; j++)
+ e->params[j] = be32toh(e->params[j]);
+
+ if (e->timestamp < ftstamp) {
+ ftstamp = e->timestamp;
+ first = i;
+ }
+ }
+
+ if (buf[first].timestamp == 0)
+ goto done; /* nothing in the log */
+
+ rc = sysctl_wire_old_buffer(req, 0);
+ if (rc != 0)
+ goto done;
+
+ sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
+ sbuf_printf(sb, "\n%10s %15s %8s %8s %s\n",
+ "Seq#", "Tstamp", "Level", "Facility", "Message");
+
+ i = first;
+ do {
+ e = &buf[i];
+ if (e->timestamp == 0)
+ break; /* end */
+
+ sbuf_printf(sb, "%10d %15ju %8s %8s ",
+ e->seqno, e->timestamp,
+ (e->level < ARRAY_SIZE(devlog_level_strings) ?
+ devlog_level_strings[e->level] : "UNKNOWN"),
+ (e->facility < ARRAY_SIZE(devlog_facility_strings) ?
+ devlog_facility_strings[e->facility] : "UNKNOWN"));
+ sbuf_printf(sb, e->fmt, e->params[0], e->params[1],
+ e->params[2], e->params[3], e->params[4],
+ e->params[5], e->params[6], e->params[7]);
+
+ if (++i == nentries)
+ i = 0;
+ } while (i != first);
+
+ rc = sbuf_finish(sb);
+ sbuf_delete(sb);
+done:
+ free(buf, M_CXGBE);
+ return (rc);
+}
+
static inline void
txq_start(struct ifnet *ifp, struct sge_txq *txq)
{
@@ -2892,6 +3068,20 @@ done:
return (rc);
}
+static inline uint64_t
+get_filter_hits(struct adapter *sc, uint32_t fid)
+{
+ uint32_t tcb_base = t4_read_reg(sc, A_TP_CMM_TCB_BASE);
+ uint64_t hits;
+
+ t4_write_reg(sc, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, 0),
+ tcb_base + (fid + sc->tids.ftid_base) * TCB_SIZE);
+ t4_read_reg(sc, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, 0));
+ hits = t4_read_reg64(sc, MEMWIN0_BASE + 16);
+
+ return (be64toh(hits));
+}
+
static int
get_filter(struct adapter *sc, struct t4_filter *t)
{
@@ -2913,8 +3103,13 @@ get_filter(struct adapter *sc, struct t4_filter *t)
for (i = t->idx; i < nfilters; i++, f++) {
if (f->valid) {
t->idx = i;
+ t->l2tidx = f->l2t ? f->l2t->idx : 0;
+ t->smtidx = f->smtidx;
+ if (f->fs.hitcnts)
+ t->hits = get_filter_hits(sc, t->idx);
+ else
+ t->hits = UINT64_MAX;
t->fs = f->fs;
- t->hits = 0; /* XXX implement */
return (0);
}
@@ -3034,11 +3229,12 @@ del_filter(struct adapter *sc, struct t4_filter *t)
return (0);
}
-/* XXX: L2T */
static void
-clear_filter(struct adapter *sc, struct filter_entry *f)
+clear_filter(struct filter_entry *f)
{
- (void) sc;
+ if (f->l2t)
+ t4_l2t_release(f->l2t);
+
bzero(f, sizeof (*f));
}
@@ -3053,8 +3249,18 @@ set_filter_wr(struct adapter *sc, int fidx)
ADAPTER_LOCK_ASSERT_OWNED(sc);
- if (f->fs.newdmac || f->fs.newvlan)
- return (ENOTSUP); /* XXX: fix after L2T code */
+ if (f->fs.newdmac || f->fs.newvlan) {
+ /* This filter needs an L2T entry; allocate one. */
+ f->l2t = t4_l2t_alloc_switching(sc->l2t);
+ if (f->l2t == NULL)
+ return (EAGAIN);
+ if (t4_l2t_set_switching(sc, f->l2t, f->fs.vlan, f->fs.eport,
+ f->fs.dmac)) {
+ t4_l2t_release(f->l2t);
+ f->l2t = NULL;
+ return (ENOMEM);
+ }
+ }
ftid = sc->tids.ftid_base + fidx;
@@ -3089,7 +3295,7 @@ set_filter_wr(struct adapter *sc, int fidx)
V_FW_FILTER_WR_HITCNTS(f->fs.hitcnts) |
V_FW_FILTER_WR_TXCHAN(f->fs.eport) |
V_FW_FILTER_WR_PRIO(f->fs.prio) |
- V_FW_FILTER_WR_L2TIX(0)); /* XXX: L2T */
+ V_FW_FILTER_WR_L2TIX(f->l2t ? f->l2t->idx : 0));
fwr->ethtype = htobe16(f->fs.val.ethtype);
fwr->ethtypem = htobe16(f->fs.mask.ethtype);
fwr->frag_to_ovlan_vldm =
@@ -3101,7 +3307,7 @@ set_filter_wr(struct adapter *sc, int fidx)
V_FW_FILTER_WR_OVLAN_VLDM(f->fs.mask.ovlan_vld));
fwr->smac_sel = 0;
fwr->rx_chan_rx_rpl_iq = htobe16(V_FW_FILTER_WR_RX_CHAN(0) |
- V_FW_FILTER_WR_RX_RPL_IQ(sc->sge.fwq.abs_id));
+ V_FW_FILTER_WR_RX_RPL_IQ(sc->sge.intrq[0].abs_id));
fwr->maci_to_matchtypem =
htobe32(V_FW_FILTER_WR_MACI(f->fs.val.macidx) |
V_FW_FILTER_WR_MACIM(f->fs.mask.macidx) |
@@ -3136,7 +3342,7 @@ set_filter_wr(struct adapter *sc, int fidx)
if (rc != 0) {
sc->tids.ftids_in_use--;
m_freem(m);
- clear_filter(sc, f);
+ clear_filter(f);
}
return (rc);
}
@@ -3161,7 +3367,7 @@ del_filter_wr(struct adapter *sc, int fidx)
m->m_len = m->m_pkthdr.len = sizeof(*fwr);
bzero(fwr, sizeof (*fwr));
- t4_mk_filtdelwr(ftid, fwr, sc->sge.fwq.abs_id);
+ t4_mk_filtdelwr(ftid, fwr, sc->sge.intrq[0].abs_id);
f->pending = 1;
rc = t4_mgmt_tx(sc, m);
@@ -3188,12 +3394,12 @@ filter_rpl(struct adapter *sc, const struct cpl_set_tcb_rpl *rpl)
* Clear the filter when we get confirmation from the
* hardware that the filter has been deleted.
*/
- clear_filter(sc, f);
+ clear_filter(f);
sc->tids.ftids_in_use--;
} else if (rc == FW_FILTER_WR_SMT_TBL_FULL) {
device_printf(sc->dev,
"filter %u setup failed due to full SMT\n", idx);
- clear_filter(sc, f);
+ clear_filter(f);
sc->tids.ftids_in_use--;
} else if (rc == FW_FILTER_WR_FLT_ADDED) {
f->smtidx = (be64toh(rpl->oldval) >> 24) & 0xff;
@@ -3206,12 +3412,41 @@ filter_rpl(struct adapter *sc, const struct cpl_set_tcb_rpl *rpl)
*/
device_printf(sc->dev,
"filter %u setup failed with error %u\n", idx, rc);
- clear_filter(sc, f);
+ clear_filter(f);
sc->tids.ftids_in_use--;
}
}
}
+static int
+get_sge_context(struct adapter *sc, struct t4_sge_context *cntxt)
+{
+ int rc = EINVAL;
+
+ if (cntxt->cid > M_CTXTQID)
+ return (rc);
+
+ if (cntxt->mem_id != CTXT_EGRESS && cntxt->mem_id != CTXT_INGRESS &&
+ cntxt->mem_id != CTXT_FLM && cntxt->mem_id != CTXT_CNM)
+ return (rc);
+
+ if (sc->flags & FW_OK) {
+ ADAPTER_LOCK(sc); /* Avoid parallel t4_wr_mbox */
+ rc = -t4_sge_ctxt_rd(sc, sc->mbox, cntxt->cid, cntxt->mem_id,
+ &cntxt->data[0]);
+ ADAPTER_UNLOCK(sc);
+ }
+
+ if (rc != 0) {
+ /* Read via firmware failed or wasn't even attempted */
+
+ rc = -t4_sge_ctxt_rd_bd(sc, cntxt->cid, cntxt->mem_id,
+ &cntxt->data[0]);
+ }
+
+ return (rc);
+}
+
int
t4_os_find_pci_capability(struct adapter *sc, int cap)
{
@@ -3375,6 +3610,9 @@ t4_ioctl(struct cdev *dev, unsigned long cmd, caddr_t data, int fflag,
rc = del_filter(sc, (struct t4_filter *)data);
ADAPTER_UNLOCK(sc);
break;
+ case CHELSIO_T4_GET_SGE_CONTEXT:
+ rc = get_sge_context(sc, (struct t4_sge_context *)data);
+ break;
default:
rc = EINVAL;
}
diff --git a/sys/dev/cxgbe/t4_sge.c b/sys/dev/cxgbe/t4_sge.c
index a0ef172..b676799 100644
--- a/sys/dev/cxgbe/t4_sge.c
+++ b/sys/dev/cxgbe/t4_sge.c
@@ -91,6 +91,8 @@ struct sgl {
bus_dma_segment_t seg[TX_SGL_SEGS];
};
+static void t4_evt_rx(void *);
+static void t4_eth_rx(void *);
static inline void init_iq(struct sge_iq *, struct adapter *, int, int, int,
int, iq_intr_handler_t *, char *);
static inline void init_fl(struct sge_fl *, int, char *);
@@ -102,8 +104,10 @@ static int free_ring(struct adapter *, bus_dma_tag_t, bus_dmamap_t, bus_addr_t,
static int alloc_iq_fl(struct port_info *, struct sge_iq *, struct sge_fl *,
int, int);
static int free_iq_fl(struct port_info *, struct sge_iq *, struct sge_fl *);
-static int alloc_iq(struct sge_iq *, int);
-static int free_iq(struct sge_iq *);
+static int alloc_intrq(struct adapter *, int, int, int);
+static int free_intrq(struct sge_iq *);
+static int alloc_fwq(struct adapter *, int);
+static int free_fwq(struct sge_iq *);
static int alloc_rxq(struct port_info *, struct sge_rxq *, int, int);
static int free_rxq(struct port_info *, struct sge_rxq *);
static int alloc_ctrlq(struct adapter *, struct sge_ctrlq *, int);
@@ -139,9 +143,10 @@ static void write_eqflush_wr(struct sge_eq *);
static __be64 get_flit(bus_dma_segment_t *, int, int);
static int handle_sge_egr_update(struct adapter *,
const struct cpl_sge_egr_update *);
+static void handle_cpl(struct adapter *, struct sge_iq *);
static int ctrl_tx(struct adapter *, struct sge_ctrlq *, struct mbuf *);
-static int sysctl_abs_id(SYSCTL_HANDLER_ARGS);
+static int sysctl_uint16(SYSCTL_HANDLER_ARGS);
extern void filter_rpl(struct adapter *, const struct cpl_set_tcb_rpl *);
@@ -198,6 +203,9 @@ t4_sge_init(struct adapter *sc)
FL_BUF_SIZE(i));
}
+ i = t4_read_reg(sc, A_SGE_CONM_CTRL);
+ s->fl_starve_threshold = G_EGRTHRESHOLD(i) * 2 + 1;
+
t4_write_reg(sc, A_SGE_INGRESS_RX_THRESHOLD,
V_THRESHOLD_0(s->counter_val[0]) |
V_THRESHOLD_1(s->counter_val[1]) |
@@ -243,8 +251,7 @@ t4_destroy_dma_tag(struct adapter *sc)
/*
* Allocate and initialize the firmware event queue, control queues, and the
- * forwarded interrupt queues (if any). The adapter owns all these queues as
- * they are not associated with any particular port.
+ * interrupt queues. The adapter owns all of these queues.
*
* Returns errno on failure. Resources allocated up to that point may still be
* allocated. Caller is responsible for cleanup in case this function fails.
@@ -252,8 +259,8 @@ t4_destroy_dma_tag(struct adapter *sc)
int
t4_setup_adapter_queues(struct adapter *sc)
{
- int i, rc;
- struct sge_iq *iq, *fwq;
+ int i, j, rc, intr_idx, qsize;
+ struct sge_iq *iq;
struct sge_ctrlq *ctrlq;
iq_intr_handler_t *handler;
char name[16];
@@ -264,47 +271,76 @@ t4_setup_adapter_queues(struct adapter *sc)
struct sysctl_oid *oid = device_get_sysctl_tree(sc->dev);
struct sysctl_oid_list *children = SYSCTL_CHILDREN(oid);
+ sc->oid_fwq = SYSCTL_ADD_NODE(&sc->ctx, children, OID_AUTO,
+ "fwq", CTLFLAG_RD, NULL, "firmware event queue");
sc->oid_ctrlq = SYSCTL_ADD_NODE(&sc->ctx, children, OID_AUTO,
"ctrlq", CTLFLAG_RD, NULL, "ctrl queues");
+ sc->oid_intrq = SYSCTL_ADD_NODE(&sc->ctx, children, OID_AUTO,
+ "intrq", CTLFLAG_RD, NULL, "interrupt queues");
}
- fwq = &sc->sge.fwq;
- if (sc->flags & INTR_FWD) {
- iq = &sc->sge.fiq[0];
-
- /*
- * Forwarded interrupt queues - allocate 1 if there's only 1
- * vector available, one less than the number of vectors
- * otherwise (the first vector is reserved for the error
- * interrupt in that case).
- */
- i = sc->intr_count > 1 ? 1 : 0;
- for (; i < sc->intr_count; i++, iq++) {
-
- snprintf(name, sizeof(name), "%s fiq%d",
+ /*
+ * Interrupt queues
+ */
+ intr_idx = sc->intr_count - NINTRQ(sc);
+ if (sc->flags & INTR_SHARED) {
+ qsize = max((sc->sge.nrxq + 1) * 2, INTR_IQ_QSIZE);
+ for (i = 0; i < NINTRQ(sc); i++, intr_idx++) {
+ snprintf(name, sizeof(name), "%s intrq%d",
device_get_nameunit(sc->dev), i);
- init_iq(iq, sc, 0, 0, (sc->sge.nrxq + 1) * 2, 16, NULL,
- name);
- rc = alloc_iq(iq, i);
+ iq = &sc->sge.intrq[i];
+ init_iq(iq, sc, 0, 0, qsize, INTR_IQ_ESIZE, NULL, name);
+ rc = alloc_intrq(sc, i % sc->params.nports, i,
+ intr_idx);
+
if (rc != 0) {
device_printf(sc->dev,
- "failed to create fwd intr queue %d: %d\n",
- i, rc);
+ "failed to create %s: %d\n", name, rc);
return (rc);
}
}
-
- handler = t4_evt_rx;
- i = 0; /* forward fwq's interrupt to the first fiq */
} else {
- handler = NULL;
- i = 1; /* fwq should use vector 1 (0 is used by error) */
+ int qidx = 0;
+ struct port_info *pi;
+
+ for (i = 0; i < sc->params.nports; i++) {
+ pi = sc->port[i];
+ qsize = max((pi->nrxq + 1) * 2, INTR_IQ_QSIZE);
+ for (j = 0; j < pi->nrxq; j++, qidx++, intr_idx++) {
+ snprintf(name, sizeof(name), "%s intrq%d",
+ device_get_nameunit(pi->dev), j);
+
+ iq = &sc->sge.intrq[qidx];
+ init_iq(iq, sc, 0, 0, qsize, INTR_IQ_ESIZE,
+ NULL, name);
+ rc = alloc_intrq(sc, i, qidx, intr_idx);
+
+ if (rc != 0) {
+ device_printf(sc->dev,
+ "failed to create %s: %d\n",
+ name, rc);
+ return (rc);
+ }
+ }
+ }
}
+ /*
+ * Firmware event queue
+ */
snprintf(name, sizeof(name), "%s fwq", device_get_nameunit(sc->dev));
- init_iq(fwq, sc, 0, 0, FW_IQ_QSIZE, FW_IQ_ESIZE, handler, name);
- rc = alloc_iq(fwq, i);
+ if (sc->intr_count > T4_EXTRA_INTR) {
+ handler = NULL;
+ intr_idx = 1;
+ } else {
+ handler = t4_evt_rx;
+ intr_idx = 0;
+ }
+
+ iq = &sc->sge.fwq;
+ init_iq(iq, sc, 0, 0, FW_IQ_QSIZE, FW_IQ_ESIZE, handler, name);
+ rc = alloc_fwq(sc, intr_idx);
if (rc != 0) {
device_printf(sc->dev,
"failed to create firmware event queue: %d\n", rc);
@@ -313,10 +349,10 @@ t4_setup_adapter_queues(struct adapter *sc)
}
/*
- * Control queues - one per hardware channel.
+ * Control queues - one per port.
*/
ctrlq = &sc->sge.ctrlq[0];
- for (i = 0; i < NCHAN; i++, ctrlq++) {
+ for (i = 0; i < sc->params.nports; i++, ctrlq++) {
snprintf(name, sizeof(name), "%s ctrlq%d",
device_get_nameunit(sc->dev), i);
init_eq(&ctrlq->eq, CTRL_EQ_QSIZE, name);
@@ -344,21 +380,22 @@ t4_teardown_adapter_queues(struct adapter *sc)
ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
/* Do this before freeing the queues */
- if (sc->oid_ctrlq) {
+ if (sc->oid_fwq || sc->oid_ctrlq || sc->oid_intrq) {
sysctl_ctx_free(&sc->ctx);
+ sc->oid_fwq = NULL;
sc->oid_ctrlq = NULL;
+ sc->oid_intrq = NULL;
}
- for (i = 0; i < NCHAN; i++)
+ for (i = 0; i < sc->params.nports; i++)
free_ctrlq(sc, &sc->sge.ctrlq[i]);
iq = &sc->sge.fwq;
- free_iq(iq);
- if (sc->flags & INTR_FWD) {
- for (i = 0; i < NFIQ(sc); i++) {
- iq = &sc->sge.fiq[i];
- free_iq(iq);
- }
+ free_fwq(iq);
+
+ for (i = 0; i < NINTRQ(sc); i++) {
+ iq = &sc->sge.intrq[i];
+ free_intrq(iq);
}
return (0);
@@ -388,23 +425,19 @@ t4_setup_eth_queues(struct port_info *pi)
snprintf(name, sizeof(name), "%s rxq%d-iq",
device_get_nameunit(pi->dev), i);
init_iq(&rxq->iq, sc, pi->tmr_idx, pi->pktc_idx,
- pi->qsize_rxq, RX_IQ_ESIZE,
- sc->flags & INTR_FWD ? t4_eth_rx : NULL, name);
+ pi->qsize_rxq, RX_IQ_ESIZE, t4_eth_rx, name);
snprintf(name, sizeof(name), "%s rxq%d-fl",
device_get_nameunit(pi->dev), i);
init_fl(&rxq->fl, pi->qsize_rxq / 8, name);
- if (sc->flags & INTR_FWD)
- intr_idx = (pi->first_rxq + i) % NFIQ(sc);
- else
- intr_idx = pi->first_rxq + i + 2;
+ intr_idx = pi->first_rxq + i;
+ if (sc->flags & INTR_SHARED)
+ intr_idx %= NINTRQ(sc);
rc = alloc_rxq(pi, rxq, intr_idx, i);
if (rc != 0)
goto done;
-
- intr_idx++;
}
for_each_txq(pi, i, txq) {
@@ -452,25 +485,26 @@ t4_teardown_eth_queues(struct port_info *pi)
return (0);
}
-/* Deals with errors and forwarded interrupts */
+/* Deals with errors and the first (and only) interrupt queue */
void
t4_intr_all(void *arg)
{
struct adapter *sc = arg;
t4_intr_err(arg);
- t4_intr_fwd(&sc->sge.fiq[0]);
+ t4_intr(&sc->sge.intrq[0]);
}
-/* Deals with forwarded interrupts on the given ingress queue */
+/* Deals with interrupts, and a few CPLs, on the given interrupt queue */
void
-t4_intr_fwd(void *arg)
+t4_intr(void *arg)
{
struct sge_iq *iq = arg, *q;
struct adapter *sc = iq->adapter;
struct rsp_ctrl *ctrl;
+ const struct rss_header *rss;
int ndesc_pending = 0, ndesc_total = 0;
- int qid;
+ int qid, rsp_type;
if (!atomic_cmpset_32(&iq->state, IQS_IDLE, IQS_BUSY))
return;
@@ -479,17 +513,23 @@ t4_intr_fwd(void *arg)
rmb();
- /* Only interrupt muxing expected on this queue */
- KASSERT(G_RSPD_TYPE(ctrl->u.type_gen) == X_RSPD_TYPE_INTR,
- ("unexpected event on forwarded interrupt queue: %x",
- G_RSPD_TYPE(ctrl->u.type_gen)));
+ rss = (const void *)iq->cdesc;
+ rsp_type = G_RSPD_TYPE(ctrl->u.type_gen);
+
+ if (__predict_false(rsp_type == X_RSPD_TYPE_CPL)) {
+ handle_cpl(sc, iq);
+ goto nextdesc;
+ }
qid = ntohl(ctrl->pldbuflen_qid) - sc->sge.iq_start;
q = sc->sge.iqmap[qid];
- q->handler(q);
+ if (atomic_cmpset_32(&q->state, IQS_IDLE, IQS_BUSY)) {
+ q->handler(q);
+ atomic_cmpset_32(&q->state, IQS_BUSY, IQS_IDLE);
+ }
- ndesc_total++;
+nextdesc: ndesc_total++;
if (++ndesc_pending >= iq->qsize / 4) {
t4_write_reg(sc, MYPF_REG(A_SGE_PF_GTS),
V_CIDXINC(ndesc_pending) |
@@ -514,9 +554,7 @@ t4_intr_err(void *arg)
{
struct adapter *sc = arg;
- if (sc->intr_type == INTR_INTX)
- t4_write_reg(sc, MYPF_REG(A_PCIE_PF_CLI), 0);
-
+ t4_write_reg(sc, MYPF_REG(A_PCIE_PF_CLI), 0);
t4_slow_intr_handler(sc);
}
@@ -526,70 +564,32 @@ t4_intr_evt(void *arg)
{
struct sge_iq *iq = arg;
- if (!atomic_cmpset_32(&iq->state, IQS_IDLE, IQS_BUSY))
- return;
-
- t4_evt_rx(arg);
-
- atomic_cmpset_32(&iq->state, IQS_BUSY, IQS_IDLE);
-}
-
-void
-t4_intr_data(void *arg)
-{
- struct sge_iq *iq = arg;
-
- if (!atomic_cmpset_32(&iq->state, IQS_IDLE, IQS_BUSY))
- return;
-
- t4_eth_rx(arg);
-
- atomic_cmpset_32(&iq->state, IQS_BUSY, IQS_IDLE);
+ if (atomic_cmpset_32(&iq->state, IQS_IDLE, IQS_BUSY)) {
+ t4_evt_rx(arg);
+ atomic_cmpset_32(&iq->state, IQS_BUSY, IQS_IDLE);
+ }
}
-void
+static void
t4_evt_rx(void *arg)
{
struct sge_iq *iq = arg;
struct adapter *sc = iq->adapter;
struct rsp_ctrl *ctrl;
- const struct rss_header *rss;
int ndesc_pending = 0, ndesc_total = 0;
KASSERT(iq == &sc->sge.fwq, ("%s: unexpected ingress queue", __func__));
while (is_new_response(iq, &ctrl)) {
+ int rsp_type;
rmb();
- rss = (const void *)iq->cdesc;
-
- /* Should only get CPL on this queue */
- KASSERT(G_RSPD_TYPE(ctrl->u.type_gen) == X_RSPD_TYPE_CPL,
- ("%s: unexpected type %d", __func__,
- G_RSPD_TYPE(ctrl->u.type_gen)));
+ rsp_type = G_RSPD_TYPE(ctrl->u.type_gen);
+ if (__predict_false(rsp_type != X_RSPD_TYPE_CPL))
+ panic("%s: unexpected rsp_type %d", __func__, rsp_type);
- switch (rss->opcode) {
- case CPL_FW4_MSG:
- case CPL_FW6_MSG: {
- const struct cpl_fw6_msg *cpl;
-
- cpl = (const void *)(rss + 1);
- if (cpl->type == FW6_TYPE_CMD_RPL)
- t4_handle_fw_rpl(sc, cpl->data);
-
- break;
- }
- case CPL_SGE_EGR_UPDATE:
- handle_sge_egr_update(sc, (const void *)(rss + 1));
- break;
- case CPL_SET_TCB_RPL:
- filter_rpl(sc, (const void *) (rss + 1));
- break;
- default:
- device_printf(sc->dev,
- "can't handle CPL opcode %d.", rss->opcode);
- }
+ handle_cpl(sc, iq);
ndesc_total++;
if (++ndesc_pending >= iq->qsize / 4) {
@@ -600,6 +600,7 @@ t4_evt_rx(void *arg)
V_QINTR_TIMER_IDX(X_TIMERREG_UPDATE_CIDX)));
ndesc_pending = 0;
}
+
iq_next(iq);
}
@@ -613,7 +614,7 @@ t4_evt_rx(void *arg)
#define RX_COPY_THRESHOLD MINCLSIZE
#endif
-void
+static void
t4_eth_rx(void *arg)
{
struct sge_rxq *rxq = arg;
@@ -644,17 +645,9 @@ t4_eth_rx(void *arg)
rss = (const void *)iq->cdesc;
i = G_RSPD_TYPE(ctrl->u.type_gen);
- if (__predict_false(i == X_RSPD_TYPE_CPL)) {
-
- /* Can't be anything except an egress update */
- KASSERT(rss->opcode == CPL_SGE_EGR_UPDATE,
- ("%s: unexpected CPL %x", __func__, rss->opcode));
-
- handle_sge_egr_update(sc, (const void *)(rss + 1));
- goto nextdesc;
- }
KASSERT(i == X_RSPD_TYPE_FLBUF && rss->opcode == CPL_RX_PKT,
- ("%s: unexpected CPL %x rsp %d", __func__, rss->opcode, i));
+ ("%s: unexpected type %d CPL opcode 0x%x",
+ __func__, i, rss->opcode));
sd_next = sd + 1;
if (__predict_false(fl->cidx + 1 == fl->cap))
@@ -786,16 +779,15 @@ t4_eth_rx(void *arg)
refill_fl(sc, fl, 64, 32);
FL_UNLOCK(fl);
-nextdesc: ndescs++;
- iq_next(iq);
-
- if (ndescs > 32) {
+ if (++ndescs > 32) {
t4_write_reg(sc, MYPF_REG(A_SGE_PF_GTS),
V_CIDXINC(ndescs) |
V_INGRESSQID((u32)iq->cntxt_id) |
V_SEINTARM(V_QINTR_TIMER_IDX(X_TIMERREG_UPDATE_CIDX)));
ndescs = 0;
}
+
+ iq_next(iq);
}
#ifdef INET
@@ -1008,7 +1000,7 @@ t4_update_fl_bufsize(struct ifnet *ifp)
/*
* A non-NULL handler indicates this iq will not receive direct interrupts, the
- * handler will be invoked by a forwarded interrupt queue.
+ * handler will be invoked by an interrupt queue.
*/
static inline void
init_iq(struct sge_iq *iq, struct adapter *sc, int tmr_idx, int pktc_idx,
@@ -1100,7 +1092,7 @@ free_ring(struct adapter *sc, bus_dma_tag_t tag, bus_dmamap_t map,
*
* If the ingress queue will take interrupts directly (iq->handler == NULL) then
* the intr_idx specifies the vector, starting from 0. Otherwise it specifies
- * the index of the queue to which its interrupts will be forwarded.
+ * the index of the interrupt queue to which its interrupts will be forwarded.
*/
static int
alloc_iq_fl(struct port_info *pi, struct sge_iq *iq, struct sge_fl *fl,
@@ -1112,10 +1104,6 @@ alloc_iq_fl(struct port_info *pi, struct sge_iq *iq, struct sge_fl *fl,
struct adapter *sc = iq->adapter;
__be32 v = 0;
- /* The adapter queues are nominally allocated in port[0]'s name */
- if (pi == NULL)
- pi = sc->port[0];
-
len = iq->qsize * iq->esize;
rc = alloc_ring(sc, len, &iq->desc_tag, &iq->desc_map, &iq->ba,
(void **)&iq->desc);
@@ -1135,10 +1123,10 @@ alloc_iq_fl(struct port_info *pi, struct sge_iq *iq, struct sge_fl *fl,
v |= F_FW_IQ_CMD_IQASYNCH;
if (iq->handler) {
- KASSERT(intr_idx < NFIQ(sc),
+ KASSERT(intr_idx < NINTRQ(sc),
("%s: invalid indirect intr_idx %d", __func__, intr_idx));
v |= F_FW_IQ_CMD_IQANDST;
- v |= V_FW_IQ_CMD_IQANDSTINDEX(sc->sge.fiq[intr_idx].abs_id);
+ v |= V_FW_IQ_CMD_IQANDSTINDEX(sc->sge.intrq[intr_idx].abs_id);
} else {
KASSERT(intr_idx < sc->intr_count,
("%s: invalid direct intr_idx %d", __func__, intr_idx));
@@ -1248,7 +1236,8 @@ alloc_iq_fl(struct port_info *pi, struct sge_iq *iq, struct sge_fl *fl,
sc->sge.eqmap[cntxt_id] = (void *)fl;
FL_LOCK(fl);
- refill_fl(sc, fl, -1, 8);
+ /* Just enough to make sure it doesn't starve right away. */
+ refill_fl(sc, fl, roundup(sc->sge.fl_starve_threshold, 8), 8);
FL_UNLOCK(fl);
}
@@ -1333,13 +1322,67 @@ free_iq_fl(struct port_info *pi, struct sge_iq *iq, struct sge_fl *fl)
}
static int
-alloc_iq(struct sge_iq *iq, int intr_idx)
+alloc_intrq(struct adapter *sc, int port_idx, int intrq_idx, int intr_idx)
+{
+ int rc;
+ struct sysctl_oid *oid;
+ struct sysctl_oid_list *children;
+ char name[16];
+ struct sge_iq *intrq = &sc->sge.intrq[intrq_idx];
+
+ rc = alloc_iq_fl(sc->port[port_idx], intrq, NULL, intr_idx, -1);
+ if (rc != 0)
+ return (rc);
+
+ children = SYSCTL_CHILDREN(sc->oid_intrq);
+
+ snprintf(name, sizeof(name), "%d", intrq_idx);
+ oid = SYSCTL_ADD_NODE(&sc->ctx, children, OID_AUTO, name, CTLFLAG_RD,
+ NULL, "interrupt queue");
+ children = SYSCTL_CHILDREN(oid);
+
+ SYSCTL_ADD_PROC(&sc->ctx, children, OID_AUTO, "cidx",
+ CTLTYPE_INT | CTLFLAG_RD, &intrq->cidx, 0, sysctl_uint16, "I",
+ "consumer index");
+
+ return (rc);
+}
+
+static int
+free_intrq(struct sge_iq *iq)
+{
+ return free_iq_fl(NULL, iq, NULL);
+
+}
+
+static int
+alloc_fwq(struct adapter *sc, int intr_idx)
{
- return alloc_iq_fl(NULL, iq, NULL, intr_idx, -1);
+ int rc;
+ struct sysctl_oid_list *children;
+ struct sge_iq *fwq = &sc->sge.fwq;
+
+ rc = alloc_iq_fl(sc->port[0], fwq, NULL, intr_idx, -1);
+ if (rc != 0)
+ return (rc);
+
+ children = SYSCTL_CHILDREN(sc->oid_fwq);
+
+ SYSCTL_ADD_PROC(&sc->ctx, children, OID_AUTO, "abs_id",
+ CTLTYPE_INT | CTLFLAG_RD, &fwq->abs_id, 0, sysctl_uint16, "I",
+ "absolute id of the queue");
+ SYSCTL_ADD_PROC(&sc->ctx, children, OID_AUTO, "cntxt_id",
+ CTLTYPE_INT | CTLFLAG_RD, &fwq->cntxt_id, 0, sysctl_uint16, "I",
+ "SGE context id of the queue");
+ SYSCTL_ADD_PROC(&sc->ctx, children, OID_AUTO, "cidx",
+ CTLTYPE_INT | CTLFLAG_RD, &fwq->cidx, 0, sysctl_uint16, "I",
+ "consumer index");
+
+ return (rc);
}
static int
-free_iq(struct sge_iq *iq)
+free_fwq(struct sge_iq *iq)
{
return free_iq_fl(NULL, iq, NULL);
}
@@ -1356,6 +1399,10 @@ alloc_rxq(struct port_info *pi, struct sge_rxq *rxq, int intr_idx, int idx)
if (rc != 0)
return (rc);
+ FL_LOCK(&rxq->fl);
+ refill_fl(pi->adapter, &rxq->fl, rxq->fl.needed / 8, 8);
+ FL_UNLOCK(&rxq->fl);
+
#ifdef INET
rc = tcp_lro_init(&rxq->lro);
if (rc != 0)
@@ -1375,8 +1422,14 @@ alloc_rxq(struct port_info *pi, struct sge_rxq *rxq, int intr_idx, int idx)
children = SYSCTL_CHILDREN(oid);
SYSCTL_ADD_PROC(&pi->ctx, children, OID_AUTO, "abs_id",
- CTLTYPE_INT | CTLFLAG_RD, &rxq->iq.abs_id, 0, sysctl_abs_id, "I",
+ CTLTYPE_INT | CTLFLAG_RD, &rxq->iq.abs_id, 0, sysctl_uint16, "I",
"absolute id of the queue");
+ SYSCTL_ADD_PROC(&pi->ctx, children, OID_AUTO, "cntxt_id",
+ CTLTYPE_INT | CTLFLAG_RD, &rxq->iq.cntxt_id, 0, sysctl_uint16, "I",
+ "SGE context id of the queue");
+ SYSCTL_ADD_PROC(&pi->ctx, children, OID_AUTO, "cidx",
+ CTLTYPE_INT | CTLFLAG_RD, &rxq->iq.cidx, 0, sysctl_uint16, "I",
+ "consumer index");
#ifdef INET
SYSCTL_ADD_INT(&pi->ctx, children, OID_AUTO, "lro_queued", CTLFLAG_RD,
&rxq->lro.lro_queued, 0, NULL);
@@ -1389,6 +1442,19 @@ alloc_rxq(struct port_info *pi, struct sge_rxq *rxq, int intr_idx, int idx)
CTLFLAG_RD, &rxq->vlan_extraction,
"# of times hardware extracted 802.1Q tag");
+ children = SYSCTL_CHILDREN(oid);
+ oid = SYSCTL_ADD_NODE(&pi->ctx, children, OID_AUTO, "fl", CTLFLAG_RD,
+ NULL, "freelist");
+ children = SYSCTL_CHILDREN(oid);
+
+ SYSCTL_ADD_PROC(&pi->ctx, children, OID_AUTO, "cntxt_id",
+ CTLTYPE_INT | CTLFLAG_RD, &rxq->fl.cntxt_id, 0, sysctl_uint16, "I",
+ "SGE context id of the queue");
+ SYSCTL_ADD_UINT(&pi->ctx, children, OID_AUTO, "cidx", CTLFLAG_RD,
+ &rxq->fl.cidx, 0, "consumer index");
+ SYSCTL_ADD_UINT(&pi->ctx, children, OID_AUTO, "pidx", CTLFLAG_RD,
+ &rxq->fl.pidx, 0, "producer index");
+
return (rc);
}
@@ -1433,7 +1499,10 @@ alloc_ctrlq(struct adapter *sc, struct sge_ctrlq *ctrlq, int idx)
eq->cap = eq->qsize - SPG_LEN / CTRL_EQ_ESIZE;
eq->spg = (void *)&eq->desc[eq->cap];
eq->avail = eq->cap - 1; /* one less to avoid cidx = pidx */
- eq->iqid = sc->sge.fwq.cntxt_id;
+ if (sc->flags & INTR_SHARED)
+ eq->iqid = sc->sge.intrq[idx % NINTRQ(sc)].cntxt_id;
+ else
+ eq->iqid = sc->sge.intrq[sc->port[idx]->first_rxq].cntxt_id;
bzero(&c, sizeof(c));
@@ -1446,8 +1515,8 @@ alloc_ctrlq(struct adapter *sc, struct sge_ctrlq *ctrlq, int idx)
c.physeqid_pkd = htobe32(0);
c.fetchszm_to_iqid =
htobe32(V_FW_EQ_CTRL_CMD_HOSTFCMODE(X_HOSTFCMODE_STATUS_PAGE) |
- V_FW_EQ_CTRL_CMD_PCIECHN(idx) | F_FW_EQ_CTRL_CMD_FETCHRO |
- V_FW_EQ_CTRL_CMD_IQID(eq->iqid));
+ V_FW_EQ_CTRL_CMD_PCIECHN(sc->port[idx]->tx_chan) |
+ F_FW_EQ_CTRL_CMD_FETCHRO | V_FW_EQ_CTRL_CMD_IQID(eq->iqid));
c.dcaen_to_eqsize =
htobe32(V_FW_EQ_CTRL_CMD_FBMIN(X_FETCHBURSTMIN_64B) |
V_FW_EQ_CTRL_CMD_FBMAX(X_FETCHBURSTMAX_512B) |
@@ -1479,13 +1548,12 @@ alloc_ctrlq(struct adapter *sc, struct sge_ctrlq *ctrlq, int idx)
NULL, "ctrl queue");
children = SYSCTL_CHILDREN(oid);
- SYSCTL_ADD_UQUAD(&sc->ctx, children, OID_AUTO, "total_wrs", CTLFLAG_RD,
- &ctrlq->total_wrs, "total # of work requests");
+ SYSCTL_ADD_PROC(&sc->ctx, children, OID_AUTO, "pidx",
+ CTLTYPE_INT | CTLFLAG_RD, &ctrlq->eq.pidx, 0, sysctl_uint16, "I",
+ "producer index");
SYSCTL_ADD_UINT(&sc->ctx, children, OID_AUTO, "no_desc", CTLFLAG_RD,
&ctrlq->no_desc, 0,
"# of times ctrlq ran out of hardware descriptors");
- SYSCTL_ADD_UINT(&sc->ctx, children, OID_AUTO, "too_long", CTLFLAG_RD,
- &ctrlq->too_long, 0, "# of oversized work requests");
return (rc);
}
@@ -1526,6 +1594,7 @@ alloc_txq(struct port_info *pi, struct sge_txq *txq, int idx)
char name[16];
struct sysctl_oid *oid;
struct sysctl_oid_list *children;
+ struct sge_iq *intrq;
txq->ifp = pi->ifp;
TASK_INIT(&txq->resume_tx, 0, cxgbe_txq_start, txq);
@@ -1544,7 +1613,12 @@ alloc_txq(struct port_info *pi, struct sge_txq *txq, int idx)
txq->sdesc = malloc(eq->cap * sizeof(struct tx_sdesc), M_CXGBE,
M_ZERO | M_WAITOK);
txq->br = buf_ring_alloc(eq->qsize, M_CXGBE, M_WAITOK, &eq->eq_lock);
- eq->iqid = sc->sge.rxq[pi->first_rxq].iq.cntxt_id;
+
+ intrq = &sc->sge.intrq[0];
+ if (sc->flags & INTR_SHARED)
+ eq->iqid = intrq[(pi->first_txq + idx) % NINTRQ(sc)].cntxt_id;
+ else
+ eq->iqid = intrq[pi->first_rxq + (idx % pi->nrxq)].cntxt_id;
rc = bus_dma_tag_create(sc->dmat, 1, 0, BUS_SPACE_MAXADDR,
BUS_SPACE_MAXADDR, NULL, NULL, 64 * 1024, TX_SGL_SEGS,
@@ -1603,6 +1677,15 @@ alloc_txq(struct port_info *pi, struct sge_txq *txq, int idx)
NULL, "tx queue");
children = SYSCTL_CHILDREN(oid);
+ SYSCTL_ADD_UINT(&pi->ctx, children, OID_AUTO, "cntxt_id", CTLFLAG_RD,
+ &eq->cntxt_id, 0, "SGE context id of the queue");
+ SYSCTL_ADD_PROC(&pi->ctx, children, OID_AUTO, "cidx",
+ CTLTYPE_INT | CTLFLAG_RD, &eq->cidx, 0, sysctl_uint16, "I",
+ "consumer index");
+ SYSCTL_ADD_PROC(&pi->ctx, children, OID_AUTO, "pidx",
+ CTLTYPE_INT | CTLFLAG_RD, &eq->pidx, 0, sysctl_uint16, "I",
+ "producer index");
+
SYSCTL_ADD_UQUAD(&pi->ctx, children, OID_AUTO, "txcsum", CTLFLAG_RD,
&txq->txcsum, "# of times hardware assisted with checksum");
SYSCTL_ADD_UQUAD(&pi->ctx, children, OID_AUTO, "vlan_insertion",
@@ -2324,7 +2407,7 @@ write_txpkts_wr(struct sge_txq *txq, struct txpkts *txpkts)
wr->equiq_to_len16 = htobe32(ctrl);
wr->plen = htobe16(txpkts->plen);
wr->npkt = txpkts->npkt;
- wr->r3 = wr->r4 = 0;
+ wr->r3 = wr->type = 0;
/* Everything else already written */
@@ -2695,6 +2778,32 @@ handle_sge_egr_update(struct adapter *sc, const struct cpl_sge_egr_update *cpl)
return (0);
}
+static void
+handle_cpl(struct adapter *sc, struct sge_iq *iq)
+{
+ const struct rss_header *rss = (const void *)iq->cdesc;
+ const struct cpl_fw6_msg *cpl = (const void *)(rss + 1);
+
+ switch (rss->opcode) {
+ case CPL_FW4_MSG:
+ case CPL_FW6_MSG:
+ if (cpl->type == FW6_TYPE_CMD_RPL)
+ t4_handle_fw_rpl(sc, cpl->data);
+ break;
+
+ case CPL_SGE_EGR_UPDATE:
+ handle_sge_egr_update(sc, (const void *)cpl);
+ break;
+
+ case CPL_SET_TCB_RPL:
+ filter_rpl(sc, (const void *)cpl);
+ break;
+
+ default:
+ panic("%s: unexpected CPL opcode 0x%x", __func__, rss->opcode);
+ }
+}
+
/*
* m0 is freed on successful transmission.
*/
@@ -2710,7 +2819,8 @@ ctrl_tx(struct adapter *sc, struct sge_ctrlq *ctrlq, struct mbuf *m0)
M_ASSERTPKTHDR(m0);
if (m0->m_pkthdr.len > SGE_MAX_WR_LEN) {
- ctrlq->too_long++;
+ log(LOG_ERR, "%s: %s work request too long (%d)",
+ device_get_nameunit(sc->dev), __func__, m0->m_pkthdr.len);
return (EMSGSIZE);
}
ndesc = howmany(m0->m_pkthdr.len, CTRL_EQ_ESIZE);
@@ -2738,7 +2848,6 @@ ctrl_tx(struct adapter *sc, struct sge_ctrlq *ctrlq, struct mbuf *m0)
eq->pidx -= eq->cap;
eq->pending += ndesc;
- ctrlq->total_wrs++;
ring_eq_db(sc, eq);
failed:
EQ_UNLOCK(eq);
@@ -2749,7 +2858,7 @@ failed:
}
static int
-sysctl_abs_id(SYSCTL_HANDLER_ARGS)
+sysctl_uint16(SYSCTL_HANDLER_ARGS)
{
uint16_t *id = arg1;
int i = *id;
diff --git a/sys/dev/e1000/if_igb.c b/sys/dev/e1000/if_igb.c
index 4aa08f6..08735ac 100644
--- a/sys/dev/e1000/if_igb.c
+++ b/sys/dev/e1000/if_igb.c
@@ -36,6 +36,7 @@
#ifdef HAVE_KERNEL_OPTION_HEADERS
#include "opt_device_polling.h"
#include "opt_inet.h"
+#include "opt_inet6.h"
#include "opt_altq.h"
#endif
@@ -99,7 +100,7 @@ int igb_display_debug_stats = 0;
/*********************************************************************
* Driver version:
*********************************************************************/
-char igb_driver_version[] = "version - 2.2.3";
+char igb_driver_version[] = "version - 2.2.5";
/*********************************************************************
@@ -170,13 +171,15 @@ static int igb_detach(device_t);
static int igb_shutdown(device_t);
static int igb_suspend(device_t);
static int igb_resume(device_t);
-static void igb_start(struct ifnet *);
-static void igb_start_locked(struct tx_ring *, struct ifnet *ifp);
#if __FreeBSD_version >= 800000
static int igb_mq_start(struct ifnet *, struct mbuf *);
static int igb_mq_start_locked(struct ifnet *,
struct tx_ring *, struct mbuf *);
static void igb_qflush(struct ifnet *);
+static void igb_deferred_mq_start(void *, int);
+#else
+static void igb_start(struct ifnet *);
+static void igb_start_locked(struct tx_ring *, struct ifnet *ifp);
#endif
static int igb_ioctl(struct ifnet *, u_long, caddr_t);
static void igb_init(void *);
@@ -263,6 +266,7 @@ static void igb_handle_link(void *context, int pending);
static void igb_set_sysctl_value(struct adapter *, const char *,
const char *, int *, int);
static int igb_set_flowcntl(SYSCTL_HANDLER_ARGS);
+static int igb_sysctl_dmac(SYSCTL_HANDLER_ARGS);
#ifdef DEVICE_POLLING
static poll_handler_t igb_poll;
@@ -342,25 +346,6 @@ TUNABLE_INT("hw.igb.hdr_split", &igb_header_split);
static int igb_num_queues = 0;
TUNABLE_INT("hw.igb.num_queues", &igb_num_queues);
-/* How many packets rxeof tries to clean at a time */
-static int igb_rx_process_limit = 100;
-TUNABLE_INT("hw.igb.rx_process_limit", &igb_rx_process_limit);
-
-/* Flow control setting - default to FULL */
-static int igb_fc_setting = e1000_fc_full;
-TUNABLE_INT("hw.igb.fc_setting", &igb_fc_setting);
-
-/* Energy Efficient Ethernet - default to off */
-static int igb_eee_disabled = TRUE;
-TUNABLE_INT("hw.igb.eee_disabled", &igb_eee_disabled);
-
-/*
-** DMA Coalescing, only for i350 - default to off,
-** this feature is for power savings
-*/
-static int igb_dma_coalesce = FALSE;
-TUNABLE_INT("hw.igb.dma_coalesce", &igb_dma_coalesce);
-
/*********************************************************************
* Device identification routine
*
@@ -431,6 +416,11 @@ igb_attach(device_t dev)
INIT_DEBUGOUT("igb_attach: begin");
+ if (resource_disabled("igb", device_get_unit(dev))) {
+ device_printf(dev, "Disabled by device hint\n");
+ return (ENXIO);
+ }
+
adapter = device_get_softc(dev);
adapter->dev = adapter->osdep.dev = dev;
IGB_CORE_LOCK_INIT(adapter, device_get_nameunit(dev));
@@ -448,7 +438,7 @@ igb_attach(device_t dev)
SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
- OID_AUTO, "flow_control", CTLTYPE_INT|CTLFLAG_RW,
+ OID_AUTO, "fc", CTLTYPE_INT|CTLFLAG_RW,
adapter, 0, igb_set_flowcntl, "I", "Flow Control");
callout_init_mtx(&adapter->timer, &adapter->core_mtx, 0);
@@ -474,8 +464,8 @@ igb_attach(device_t dev)
/* Sysctl for limiting the amount of work done in the taskqueue */
igb_set_sysctl_value(adapter, "rx_processing_limit",
- "max number of rx packets to process", &adapter->rx_process_limit,
- igb_rx_process_limit);
+ "max number of rx packets to process",
+ &adapter->rx_process_limit, 100);
/*
* Validate number of transmit and receive descriptors. It
@@ -550,13 +540,14 @@ igb_attach(device_t dev)
/* Some adapter-specific advanced features */
if (adapter->hw.mac.type >= e1000_i350) {
- igb_set_sysctl_value(adapter, "dma_coalesce",
- "configure dma coalesce",
- &adapter->dma_coalesce, igb_dma_coalesce);
+ SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
+ SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
+ OID_AUTO, "dmac", CTLTYPE_INT|CTLFLAG_RW,
+ adapter, 0, igb_sysctl_dmac, "I", "DMA Coalesce");
igb_set_sysctl_value(adapter, "eee_disabled",
"enable Energy Efficient Ethernet",
&adapter->hw.dev_spec._82575.eee_disable,
- igb_eee_disabled);
+ TRUE);
e1000_set_eee_i350(&adapter->hw);
}
@@ -656,6 +647,7 @@ igb_attach(device_t dev)
return (0);
err_late:
+ igb_detach(dev);
igb_free_transmit_structures(adapter);
igb_free_receive_structures(adapter);
igb_release_hw_control(adapter);
@@ -693,6 +685,8 @@ igb_detach(device_t dev)
return (EBUSY);
}
+ ether_ifdetach(adapter->ifp);
+
if (adapter->led_dev != NULL)
led_destroy(adapter->led_dev);
@@ -724,8 +718,6 @@ igb_detach(device_t dev)
if (adapter->vlan_detach != NULL)
EVENTHANDLER_DEREGISTER(vlan_unconfig, adapter->vlan_detach);
- ether_ifdetach(adapter->ifp);
-
callout_drain(&adapter->timer);
igb_free_pci_resources(adapter);
@@ -734,7 +726,8 @@ igb_detach(device_t dev)
igb_free_transmit_structures(adapter);
igb_free_receive_structures(adapter);
- free(adapter->mta, M_DEVBUF);
+ if (adapter->mta != NULL)
+ free(adapter->mta, M_DEVBUF);
IGB_CORE_LOCK_DESTROY(adapter);
@@ -784,14 +777,27 @@ igb_resume(device_t dev)
{
struct adapter *adapter = device_get_softc(dev);
struct ifnet *ifp = adapter->ifp;
+#if __FreeBSD_version >= 800000
+ struct tx_ring *txr = adapter->tx_rings;
+#endif
IGB_CORE_LOCK(adapter);
igb_init_locked(adapter);
igb_init_manageability(adapter);
if ((ifp->if_flags & IFF_UP) &&
- (ifp->if_drv_flags & IFF_DRV_RUNNING))
+ (ifp->if_drv_flags & IFF_DRV_RUNNING)) {
+#if __FreeBSD_version < 800000
igb_start(ifp);
+#else
+ for (int i = 0; i < adapter->num_queues; i++, txr++) {
+ IGB_TX_LOCK(txr);
+ if (!drbr_empty(ifp, txr->br))
+ igb_mq_start_locked(ifp, txr, NULL);
+ IGB_TX_UNLOCK(txr);
+ }
+#endif
+ }
IGB_CORE_UNLOCK(adapter);
@@ -799,6 +805,7 @@ igb_resume(device_t dev)
}
+#if __FreeBSD_version < 800000
/*********************************************************************
* Transmit entry point
*
@@ -875,7 +882,7 @@ igb_start(struct ifnet *ifp)
return;
}
-#if __FreeBSD_version >= 800000
+#else /* __FreeBSD_version >= 800000 */
/*
** Multiqueue Transmit driver
**
@@ -900,7 +907,7 @@ igb_mq_start(struct ifnet *ifp, struct mbuf *m)
IGB_TX_UNLOCK(txr);
} else {
err = drbr_enqueue(ifp, txr->br, m);
- taskqueue_enqueue(que->tq, &que->que_task);
+ taskqueue_enqueue(que->tq, &txr->txq_task);
}
return (err);
@@ -961,6 +968,22 @@ igb_mq_start_locked(struct ifnet *ifp, struct tx_ring *txr, struct mbuf *m)
}
/*
+ * Called from a taskqueue to drain queued transmit packets.
+ */
+static void
+igb_deferred_mq_start(void *arg, int pending)
+{
+ struct tx_ring *txr = arg;
+ struct adapter *adapter = txr->adapter;
+ struct ifnet *ifp = adapter->ifp;
+
+ IGB_TX_LOCK(txr);
+ if (!drbr_empty(ifp, txr->br))
+ igb_mq_start_locked(ifp, txr, NULL);
+ IGB_TX_UNLOCK(txr);
+}
+
+/*
** Flush all ring buffers
*/
static void
@@ -978,7 +1001,7 @@ igb_qflush(struct ifnet *ifp)
}
if_qflush(ifp);
}
-#endif /* __FreeBSD_version >= 800000 */
+#endif /* __FreeBSD_version < 800000 */
/*********************************************************************
* Ioctl entry point
@@ -993,11 +1016,12 @@ static int
igb_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
{
struct adapter *adapter = ifp->if_softc;
- struct ifreq *ifr = (struct ifreq *)data;
-#ifdef INET
- struct ifaddr *ifa = (struct ifaddr *)data;
+ struct ifreq *ifr = (struct ifreq *)data;
+#if defined(INET) || defined(INET6)
+ struct ifaddr *ifa = (struct ifaddr *)data;
+ bool avoid_reset = FALSE;
#endif
- int error = 0;
+ int error = 0;
if (adapter->in_detach)
return (error);
@@ -1005,20 +1029,22 @@ igb_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
switch (command) {
case SIOCSIFADDR:
#ifdef INET
- if (ifa->ifa_addr->sa_family == AF_INET) {
- /*
- * XXX
- * Since resetting hardware takes a very long time
- * and results in link renegotiation we only
- * initialize the hardware only when it is absolutely
- * required.
- */
+ if (ifa->ifa_addr->sa_family == AF_INET)
+ avoid_reset = TRUE;
+#endif
+#ifdef INET6
+ if (ifa->ifa_addr->sa_family == AF_INET6)
+ avoid_reset = TRUE;
+#endif
+#if defined(INET) || defined(INET6)
+ /*
+ ** Calling init results in link renegotiation,
+ ** so we avoid doing it when possible.
+ */
+ if (avoid_reset) {
ifp->if_flags |= IFF_UP;
- if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
- IGB_CORE_LOCK(adapter);
- igb_init_locked(adapter);
- IGB_CORE_UNLOCK(adapter);
- }
+ if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
+ igb_init(adapter);
if (!(ifp->if_flags & IFF_NOARP))
arp_ifinit(ifp, ifa);
} else
@@ -1143,6 +1169,10 @@ igb_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
reinit = 1;
}
+ if (mask & IFCAP_VLAN_HWTSO) {
+ ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
+ reinit = 1;
+ }
if (mask & IFCAP_LRO) {
ifp->if_capenable ^= IFCAP_LRO;
reinit = 1;
@@ -2180,6 +2210,7 @@ igb_allocate_legacy(struct adapter *adapter)
{
device_t dev = adapter->dev;
struct igb_queue *que = adapter->queues;
+ struct tx_ring *txr = adapter->tx_rings;
int error, rid = 0;
/* Turn off all interrupts */
@@ -2198,6 +2229,10 @@ igb_allocate_legacy(struct adapter *adapter)
return (ENXIO);
}
+#if __FreeBSD_version >= 800000
+ TASK_INIT(&txr->txq_task, 0, igb_deferred_mq_start, txr);
+#endif
+
/*
* Try allocating a fast interrupt and the associated deferred
* processing contexts.
@@ -2268,9 +2303,13 @@ igb_allocate_msix(struct adapter *adapter)
*/
if (adapter->num_queues > 1)
bus_bind_intr(dev, que->res, i);
+#if __FreeBSD_version >= 800000
+ TASK_INIT(&que->txr->txq_task, 0, igb_deferred_mq_start,
+ que->txr);
+#endif
/* Make tasklet for deferred handling */
TASK_INIT(&que->que_task, 0, igb_handle_que, que);
- que->tq = taskqueue_create_fast("igb_que", M_NOWAIT,
+ que->tq = taskqueue_create("igb_que", M_NOWAIT,
taskqueue_thread_enqueue, &que->tq);
taskqueue_start_threads(&que->tq, 1, PI_NET, "%s que",
device_get_nameunit(adapter->dev));
@@ -2477,13 +2516,24 @@ igb_free_pci_resources(struct adapter *adapter)
else
(adapter->msix != 0) ? (rid = 1):(rid = 0);
+ que = adapter->queues;
if (adapter->tag != NULL) {
+ taskqueue_drain(que->tq, &adapter->link_task);
bus_teardown_intr(dev, adapter->res, adapter->tag);
adapter->tag = NULL;
}
if (adapter->res != NULL)
bus_release_resource(dev, SYS_RES_IRQ, rid, adapter->res);
+ for (int i = 0; i < adapter->num_queues; i++, que++) {
+ if (que->tq != NULL) {
+#if __FreeBSD_version >= 800000
+ taskqueue_drain(que->tq, &que->txr->txq_task);
+#endif
+ taskqueue_drain(que->tq, &que->que_task);
+ taskqueue_free(que->tq);
+ }
+ }
mem:
if (adapter->msix)
pci_release_msi(dev);
@@ -2669,6 +2719,12 @@ igb_reset(struct adapter *adapter)
fc->pause_time = IGB_FC_PAUSE_TIME;
fc->send_xon = TRUE;
+ if (fc->requested_mode)
+ fc->current_mode = fc->requested_mode;
+ else
+ fc->current_mode = e1000_fc_full;
+
+ adapter->fc = fc->current_mode;
/* Issue a global reset */
e1000_reset_hw(hw);
@@ -2678,9 +2734,13 @@ igb_reset(struct adapter *adapter)
device_printf(dev, "Hardware Initialization Failed\n");
/* Setup DMA Coalescing */
- if ((hw->mac.type == e1000_i350) &&
- (adapter->dma_coalesce == TRUE)) {
- u32 reg;
+ if (hw->mac.type == e1000_i350) {
+ u32 reg = ~E1000_DMACR_DMAC_EN;
+
+ if (adapter->dmac == 0) { /* Disabling it */
+ E1000_WRITE_REG(hw, E1000_DMACR, reg);
+ goto reset_out;
+ }
hwm = (pba - 4) << 10;
reg = (((pba-6) << E1000_DMACR_DMACTHR_SHIFT)
@@ -2689,8 +2749,8 @@ igb_reset(struct adapter *adapter)
/* transition to L0x or L1 if available..*/
reg |= (E1000_DMACR_DMAC_EN | E1000_DMACR_DMAC_LX_MASK);
- /* timer = +-1000 usec in 32usec intervals */
- reg |= (1000 >> 5);
+ /* timer = value in adapter->dmac in 32usec intervals */
+ reg |= (adapter->dmac >> 5);
E1000_WRITE_REG(hw, E1000_DMACR, reg);
/* No lower threshold */
@@ -2715,6 +2775,7 @@ igb_reset(struct adapter *adapter)
device_printf(dev, "DMA Coalescing enabled\n");
}
+reset_out:
E1000_WRITE_REG(&adapter->hw, E1000_VET, ETHERTYPE_VLAN);
e1000_get_phy_info(hw);
e1000_check_for_link(hw);
@@ -2744,10 +2805,11 @@ igb_setup_interface(device_t dev, struct adapter *adapter)
ifp->if_softc = adapter;
ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
ifp->if_ioctl = igb_ioctl;
- ifp->if_start = igb_start;
#if __FreeBSD_version >= 800000
ifp->if_transmit = igb_mq_start;
ifp->if_qflush = igb_qflush;
+#else
+ ifp->if_start = igb_start;
#endif
IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 1);
ifp->if_snd.ifq_drv_maxlen = adapter->num_tx_desc - 1;
@@ -2774,15 +2836,19 @@ igb_setup_interface(device_t dev, struct adapter *adapter)
* support full VLAN capability.
*/
ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
- ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU;
- ifp->if_capenable |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU;
+ ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING
+ | IFCAP_VLAN_HWTSO
+ | IFCAP_VLAN_MTU;
+ ifp->if_capenable |= IFCAP_VLAN_HWTAGGING
+ | IFCAP_VLAN_HWTSO
+ | IFCAP_VLAN_MTU;
/*
- ** Dont turn this on by default, if vlans are
+ ** Don't turn this on by default, if vlans are
** created on another pseudo device (eg. lagg)
** then vlan events are not passed thru, breaking
** operation, but with HW FILTER off it works. If
- ** using vlans directly on the em driver you can
+ ** using vlans directly on the igb driver you can
** enable this and get full hardware tag filtering.
*/
ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
@@ -5542,19 +5608,18 @@ static int
igb_set_flowcntl(SYSCTL_HANDLER_ARGS)
{
int error;
- struct adapter *adapter;
+ struct adapter *adapter = (struct adapter *) arg1;
- error = sysctl_handle_int(oidp, &igb_fc_setting, 0, req);
+ error = sysctl_handle_int(oidp, &adapter->fc, 0, req);
- if (error)
+ if ((error) || (req->newptr == NULL))
return (error);
- adapter = (struct adapter *) arg1;
- switch (igb_fc_setting) {
+ switch (adapter->fc) {
case e1000_fc_rx_pause:
case e1000_fc_tx_pause:
case e1000_fc_full:
- adapter->hw.fc.requested_mode = igb_fc_setting;
+ adapter->hw.fc.requested_mode = adapter->fc;
break;
case e1000_fc_none:
default:
@@ -5563,5 +5628,54 @@ igb_set_flowcntl(SYSCTL_HANDLER_ARGS)
adapter->hw.fc.current_mode = adapter->hw.fc.requested_mode;
e1000_force_mac_fc(&adapter->hw);
- return error;
+ return (error);
+}
+
+/*
+** Manage DMA Coalesce:
+** Control values:
+** 0/1 - off/on
+** Legal timer values are:
+** 250,500,1000-10000 in thousands
+*/
+static int
+igb_sysctl_dmac(SYSCTL_HANDLER_ARGS)
+{
+ struct adapter *adapter = (struct adapter *) arg1;
+ int error;
+
+ error = sysctl_handle_int(oidp, &adapter->dmac, 0, req);
+
+ if ((error) || (req->newptr == NULL))
+ return (error);
+
+ switch (adapter->dmac) {
+ case 0:
+ /*Disabling */
+ break;
+ case 1: /* Just enable and use default */
+ adapter->dmac = 1000;
+ break;
+ case 250:
+ case 500:
+ case 1000:
+ case 2000:
+ case 3000:
+ case 4000:
+ case 5000:
+ case 6000:
+ case 7000:
+ case 8000:
+ case 9000:
+ case 10000:
+ /* Legal values - allow */
+ break;
+ default:
+ /* Do nothing, illegal value */
+ adapter->dmac = 0;
+ return (error);
+ }
+ /* Reinit the interface */
+ igb_init(adapter);
+ return (error);
}
diff --git a/sys/dev/e1000/if_igb.h b/sys/dev/e1000/if_igb.h
index 609d650..f0ab685 100644
--- a/sys/dev/e1000/if_igb.h
+++ b/sys/dev/e1000/if_igb.h
@@ -297,6 +297,7 @@ struct tx_ring {
struct buf_ring *br;
#endif
bus_dma_tag_t txtag;
+ struct task txq_task;
u32 bytes;
u32 packets;
@@ -395,11 +396,12 @@ struct adapter {
u32 shadow_vfta[IGB_VFTA_SIZE];
/* Info about the interface */
- u8 link_active;
+ u16 link_active;
+ u16 fc;
u16 link_speed;
u16 link_duplex;
u32 smartspeed;
- u32 dma_coalesce;
+ u32 dmac;
/* Interface queues */
struct igb_queue *queues;
diff --git a/sys/dev/hwpmc/hwpmc_mod.c b/sys/dev/hwpmc/hwpmc_mod.c
index d6225d8..4cfcea8 100644
--- a/sys/dev/hwpmc/hwpmc_mod.c
+++ b/sys/dev/hwpmc/hwpmc_mod.c
@@ -1991,7 +1991,7 @@ pmc_hook_handler(struct thread *td, int function, void *arg)
* had already processed the interrupt). We don't
* lose the interrupt sample.
*/
- atomic_clear_int(&pmc_cpumask, (1 << PCPU_GET(cpuid)));
+ CPU_CLR_ATOMIC(PCPU_GET(cpuid), &pmc_cpumask);
pmc_process_samples(PCPU_GET(cpuid));
break;
@@ -4083,7 +4083,7 @@ pmc_process_interrupt(int cpu, struct pmc *pm, struct trapframe *tf,
done:
/* mark CPU as needing processing */
- atomic_set_int(&pmc_cpumask, (1 << cpu));
+ CPU_SET_ATOMIC(cpu, &pmc_cpumask);
return (error);
}
@@ -4193,7 +4193,7 @@ pmc_process_samples(int cpu)
break;
if (ps->ps_nsamples == PMC_SAMPLE_INUSE) {
/* Need a rescan at a later time. */
- atomic_set_int(&pmc_cpumask, (1 << cpu));
+ CPU_SET_ATOMIC(cpu, &pmc_cpumask);
break;
}
@@ -4782,7 +4782,7 @@ pmc_cleanup(void)
PMCDBG(MOD,INI,0, "%s", "cleanup");
/* switch off sampling */
- pmc_cpumask = 0;
+ CPU_ZERO(&pmc_cpumask);
pmc_intr = NULL;
sx_xlock(&pmc_sx);
diff --git a/sys/dev/iicbus/ad7417.c b/sys/dev/iicbus/ad7417.c
new file mode 100644
index 0000000..6ae16be
--- /dev/null
+++ b/sys/dev/iicbus/ad7417.c
@@ -0,0 +1,621 @@
+/*-
+ * Copyright (c) 2010 Andreas Tobler
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+ * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/bus.h>
+#include <sys/systm.h>
+#include <sys/module.h>
+#include <sys/callout.h>
+#include <sys/conf.h>
+#include <sys/cpu.h>
+#include <sys/ctype.h>
+#include <sys/kernel.h>
+#include <sys/reboot.h>
+#include <sys/rman.h>
+#include <sys/sysctl.h>
+#include <sys/limits.h>
+
+#include <machine/bus.h>
+#include <machine/md_var.h>
+
+#include <dev/iicbus/iicbus.h>
+#include <dev/iicbus/iiconf.h>
+
+#include <dev/ofw/openfirm.h>
+#include <dev/ofw/ofw_bus.h>
+#include <powerpc/powermac/powermac_thermal.h>
+
+/* CPU A/B sensors, temp and adc: AD7417. */
+
+#define AD7417_TEMP 0x00
+#define AD7417_CONFIG 0x01
+#define AD7417_ADC 0x04
+#define AD7417_CONFIG2 0x05
+#define AD7417_CONFMASK 0xe0
+
+uint8_t adc741x_config;
+
+struct ad7417_sensor {
+ struct pmac_therm therm;
+ device_t dev;
+ int id;
+ enum {
+ ADC7417_TEMP_SENSOR,
+ ADC7417_ADC_SENSOR
+ } type;
+};
+
+struct write_data {
+ uint8_t reg;
+ uint8_t val;
+};
+
+struct read_data {
+ uint8_t reg;
+ uint16_t val;
+};
+
+/* Regular bus attachment functions */
+static int ad7417_probe(device_t);
+static int ad7417_attach(device_t);
+
+/* Utility functions */
+static int ad7417_sensor_sysctl(SYSCTL_HANDLER_ARGS);
+static int ad7417_write(device_t dev, uint32_t addr, uint8_t reg,
+ uint8_t *buf, int len);
+static int ad7417_read_1(device_t dev, uint32_t addr, uint8_t reg,
+ uint8_t *data);
+static int ad7417_read_2(device_t dev, uint32_t addr, uint8_t reg,
+ uint16_t *data);
+static int ad7417_write_read(device_t dev, uint32_t addr,
+ struct write_data out, struct read_data *in);
+static int ad7417_diode_read(struct ad7417_sensor *sens);
+static int ad7417_adc_read(struct ad7417_sensor *sens);
+static int ad7417_sensor_read(struct ad7417_sensor *sens);
+
+struct ad7417_softc {
+ device_t sc_dev;
+ uint32_t sc_addr;
+ struct ad7417_sensor *sc_sensors;
+ int sc_nsensors;
+};
+static device_method_t ad7417_methods[] = {
+ /* Device interface */
+ DEVMETHOD(device_probe, ad7417_probe),
+ DEVMETHOD(device_attach, ad7417_attach),
+ { 0, 0 },
+};
+
+static driver_t ad7417_driver = {
+ "ad7417",
+ ad7417_methods,
+ sizeof(struct ad7417_softc)
+};
+
+static devclass_t ad7417_devclass;
+
+DRIVER_MODULE(ad7417, iicbus, ad7417_driver, ad7417_devclass, 0, 0);
+MALLOC_DEFINE(M_AD7417, "ad7417", "Supply-Monitor AD7417");
+
+
+static int
+ad7417_write(device_t dev, uint32_t addr, uint8_t reg, uint8_t *buff, int len)
+{
+ unsigned char buf[4];
+ int try = 0;
+
+ struct iic_msg msg[] = {
+ { addr, IIC_M_WR, 0, buf }
+ };
+
+ msg[0].len = len + 1;
+ buf[0] = reg;
+ memcpy(buf + 1, buff, len);
+
+ for (;;)
+ {
+ if (iicbus_transfer(dev, msg, 1) == 0)
+ return (0);
+
+ if (++try > 5) {
+ device_printf(dev, "iicbus write failed\n");
+ return (-1);
+ }
+ pause("ad7417_write", hz);
+ }
+}
+
+static int
+ad7417_read_1(device_t dev, uint32_t addr, uint8_t reg, uint8_t *data)
+{
+ uint8_t buf[4];
+ int err, try = 0;
+
+ struct iic_msg msg[2] = {
+ { addr, IIC_M_WR | IIC_M_NOSTOP, 1, &reg },
+ { addr, IIC_M_RD, 1, buf },
+ };
+
+ for (;;)
+ {
+ err = iicbus_transfer(dev, msg, 2);
+ if (err != 0)
+ goto retry;
+
+ *data = *((uint8_t*)buf);
+ return (0);
+ retry:
+ if (++try > 5) {
+ device_printf(dev, "iicbus read failed\n");
+ return (-1);
+ }
+ pause("ad7417_read_1", hz);
+ }
+}
+
+static int
+ad7417_read_2(device_t dev, uint32_t addr, uint8_t reg, uint16_t *data)
+{
+ uint8_t buf[4];
+ int err, try = 0;
+
+ struct iic_msg msg[2] = {
+ { addr, IIC_M_WR | IIC_M_NOSTOP, 1, &reg },
+ { addr, IIC_M_RD, 2, buf },
+ };
+
+ for (;;)
+ {
+ err = iicbus_transfer(dev, msg, 2);
+ if (err != 0)
+ goto retry;
+
+ *data = *((uint16_t*)buf);
+ return (0);
+ retry:
+ if (++try > 5) {
+ device_printf(dev, "iicbus read failed\n");
+ return (-1);
+ }
+ pause("ad7417_read_2", hz);
+ }
+}
+
+static int
+ad7417_write_read(device_t dev, uint32_t addr, struct write_data out,
+ struct read_data *in)
+{
+ uint8_t buf[4];
+ int err, try = 0;
+
+ /* Do a combined write/read. */
+ struct iic_msg msg[3] = {
+ { addr, IIC_M_WR, 2, buf },
+ { addr, IIC_M_WR | IIC_M_NOSTOP, 1, &in->reg },
+ { addr, IIC_M_RD, 2, buf },
+ };
+
+ /* Prepare the write msg. */
+ buf[0] = out.reg;
+ buf[1] = out.val & 0xff;
+
+ for (;;)
+ {
+ err = iicbus_transfer(dev, msg, 3);
+ if (err != 0)
+ goto retry;
+
+ in->val = *((uint16_t*)buf);
+ return (0);
+ retry:
+ if (++try > 5) {
+ device_printf(dev, "iicbus write/read failed\n");
+ return (-1);
+ }
+ pause("ad7417_write_read", hz);
+ }
+}
+
+static int
+ad7417_init_adc(device_t dev, uint32_t addr)
+{
+ uint8_t buf;
+ int err;
+
+ adc741x_config = 0;
+ /* Clear Config2 */
+ buf = 0;
+
+ err = ad7417_write(dev, addr, AD7417_CONFIG2, &buf, 1);
+
+ /* Read & cache Config1 */
+ buf = 0;
+ err = ad7417_write(dev, addr, AD7417_CONFIG, &buf, 1);
+ err = ad7417_read_1(dev, addr, AD7417_CONFIG, &buf);
+ adc741x_config = (uint8_t)buf;
+
+ /* Disable shutdown mode */
+ adc741x_config &= 0xfe;
+ buf = adc741x_config;
+ err = ad7417_write(dev, addr, AD7417_CONFIG, &buf, 1);
+ if (err < 0)
+ return (-1);
+
+ return (0);
+
+}
+static int
+ad7417_probe(device_t dev)
+{
+ const char *name, *compatible;
+ struct ad7417_softc *sc;
+
+ name = ofw_bus_get_name(dev);
+ compatible = ofw_bus_get_compat(dev);
+
+ if (!name)
+ return (ENXIO);
+
+ if (strcmp(name, "supply-monitor") != 0 ||
+ strcmp(compatible, "ad7417") != 0)
+ return (ENXIO);
+
+ sc = device_get_softc(dev);
+ sc->sc_dev = dev;
+ sc->sc_addr = iicbus_get_addr(dev);
+
+ device_set_desc(dev, "Supply-Monitor AD7417");
+
+ return (0);
+}
+
+/*
+ * This function returns the number of sensors. If we call it the second time
+ * and we have allocated memory for sc->sc_sensors, we fill in the properties.
+ */
+static int
+ad7417_fill_sensor_prop(device_t dev)
+{
+ phandle_t child;
+ struct ad7417_softc *sc;
+ u_int id[10];
+ char location[96];
+ char type[32];
+ int i = 0, j, len = 0, prop_len, prev_len = 0;
+
+ sc = device_get_softc(dev);
+
+ child = ofw_bus_get_node(dev);
+
+ /* Fill the sensor location property. */
+ prop_len = OF_getprop(child, "hwsensor-location", location,
+ sizeof(location));
+ while (len < prop_len) {
+ if (sc->sc_sensors != NULL)
+ strcpy(sc->sc_sensors[i].therm.name, location + len);
+ prev_len = strlen(location + len) + 1;
+ len += prev_len;
+ i++;
+ }
+ if (sc->sc_sensors == NULL)
+ return (i);
+
+ /* Fill the sensor type property. */
+ len = 0;
+ i = 0;
+ prev_len = 0;
+ prop_len = OF_getprop(child, "hwsensor-type", type, sizeof(type));
+ while (len < prop_len) {
+ if (strcmp(type + len, "temperature") == 0)
+ sc->sc_sensors[i].type = ADC7417_TEMP_SENSOR;
+ else
+ sc->sc_sensors[i].type = ADC7417_ADC_SENSOR;
+ prev_len = strlen(type + len) + 1;
+ len += prev_len;
+ i++;
+ }
+
+ /* Fill the sensor id property. Taken from OF. */
+ prop_len = OF_getprop(child, "hwsensor-id", id, sizeof(id));
+ for (j = 0; j < i; j++)
+ sc->sc_sensors[j].id = id[j];
+
+ /* Fill the sensor zone property. Taken from OF. */
+ prop_len = OF_getprop(child, "hwsensor-zone", id, sizeof(id));
+ for (j = 0; j < i; j++)
+ sc->sc_sensors[j].therm.zone = id[j];
+
+ /* Finish setting up sensor properties */
+ for (j = 0; j < i; j++) {
+ sc->sc_sensors[j].dev = dev;
+
+ /* HACK: Apple wired a random diode to the ADC line */
+ if (strstr(sc->sc_sensors[j].therm.name, "DIODE TEMP")
+ != NULL) {
+ sc->sc_sensors[j].type = ADC7417_TEMP_SENSOR;
+ sc->sc_sensors[j].therm.read =
+ (int (*)(struct pmac_therm *))(ad7417_diode_read);
+ } else {
+ sc->sc_sensors[j].therm.read =
+ (int (*)(struct pmac_therm *))(ad7417_sensor_read);
+ }
+
+ if (sc->sc_sensors[j].type != ADC7417_TEMP_SENSOR)
+ continue;
+
+ /* Make up some ranges */
+ sc->sc_sensors[j].therm.target_temp = 500 + ZERO_C_TO_K;
+ sc->sc_sensors[j].therm.max_temp = 900 + ZERO_C_TO_K;
+
+ pmac_thermal_sensor_register(&sc->sc_sensors[j].therm);
+ }
+
+ return (i);
+}
+
+static int
+ad7417_attach(device_t dev)
+{
+ struct ad7417_softc *sc;
+ struct sysctl_oid *oid, *sensroot_oid;
+ struct sysctl_ctx_list *ctx;
+ char sysctl_name[32];
+ int i, j;
+ const char *unit;
+ const char *desc;
+
+ sc = device_get_softc(dev);
+
+ sc->sc_nsensors = 0;
+
+ /* Count the actual number of sensors. */
+ sc->sc_nsensors = ad7417_fill_sensor_prop(dev);
+
+ device_printf(dev, "%d sensors detected.\n", sc->sc_nsensors);
+
+ if (sc->sc_nsensors == 0)
+ device_printf(dev, "WARNING: No AD7417 sensors detected!\n");
+
+ sc->sc_sensors = malloc (sc->sc_nsensors * sizeof(struct ad7417_sensor),
+ M_AD7417, M_WAITOK | M_ZERO);
+
+ ctx = device_get_sysctl_ctx(dev);
+ sensroot_oid = SYSCTL_ADD_NODE(ctx,
+ SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "sensor",
+ CTLFLAG_RD, 0, "AD7417 Sensor Information");
+
+ /* Now we can fill the properties into the allocated struct. */
+ sc->sc_nsensors = ad7417_fill_sensor_prop(dev);
+
+ /* Add sysctls for the sensors. */
+ for (i = 0; i < sc->sc_nsensors; i++) {
+ for (j = 0; j < strlen(sc->sc_sensors[i].therm.name); j++) {
+ sysctl_name[j] =
+ tolower(sc->sc_sensors[i].therm.name[j]);
+ if (isspace(sysctl_name[j]))
+ sysctl_name[j] = '_';
+ }
+ sysctl_name[j] = 0;
+
+ oid = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(sensroot_oid),
+ OID_AUTO,
+ sysctl_name, CTLFLAG_RD, 0,
+ "Sensor Information");
+
+ if (sc->sc_sensors[i].type == ADC7417_TEMP_SENSOR) {
+ unit = "temp";
+ desc = "Sensor temp in C";
+ } else {
+ unit = "volt";
+ desc = "Sensor Volt in V";
+ }
+ /* I use i to pass the sensor id. */
+ SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(oid), OID_AUTO,
+ unit, CTLTYPE_INT | CTLFLAG_RD, dev,
+ i, ad7417_sensor_sysctl,
+ sc->sc_sensors[i].type == ADC7417_TEMP_SENSOR ?
+ "IK" : "I", desc);
+ }
+ /* Dump sensor location, ID & type. */
+ if (bootverbose) {
+ device_printf(dev, "Sensors\n");
+ for (i = 0; i < sc->sc_nsensors; i++) {
+ device_printf(dev, "Location: %s ID: %d type: %d\n",
+ sc->sc_sensors[i].therm.name,
+ sc->sc_sensors[i].id,
+ sc->sc_sensors[i].type);
+ }
+ }
+
+ return (0);
+}
+
+static int
+ad7417_get_temp(device_t dev, uint32_t addr, int *temp)
+{
+ uint16_t buf[2];
+ uint16_t read;
+ int err;
+
+ err = ad7417_read_2(dev, addr, AD7417_TEMP, buf);
+
+ if (err < 0)
+ return (-1);
+
+ read = *((int16_t*)buf);
+
+ /* The ADC is 10 bit, the resolution is 0.25 C.
+ The temperature is in tenth kelvin.
+ */
+ *temp = (((int16_t)(read & 0xffc0)) >> 6) * 25 / 10;
+ return (0);
+}
+
+static int
+ad7417_get_adc(device_t dev, uint32_t addr, unsigned int *value,
+ uint8_t chan)
+{
+ uint8_t tmp;
+ int err;
+ struct write_data config;
+ struct read_data data;
+
+ tmp = chan << 5;
+ config.reg = AD7417_CONFIG;
+ data.reg = AD7417_ADC;
+ data.val = 0;
+
+ err = ad7417_read_1(dev, addr, AD7417_CONFIG, &config.val);
+
+ config.val = (config.val & ~AD7417_CONFMASK) | (tmp & AD7417_CONFMASK);
+
+ err = ad7417_write_read(dev, addr, config, &data);
+ if (err < 0)
+ return (-1);
+
+ *value = ((uint32_t)data.val) >> 6;
+
+ return (0);
+}
+
+static int
+ad7417_diode_read(struct ad7417_sensor *sens)
+{
+ static int eeprom_read = 0;
+ static cell_t eeprom[2][40];
+ phandle_t eeprom_node;
+ int rawval, diode_slope, diode_offset;
+ int temp;
+
+ if (!eeprom_read) {
+ eeprom_node = OF_finddevice("/u3/i2c/cpuid@a0");
+ OF_getprop(eeprom_node, "cpuid", eeprom[0], sizeof(eeprom[0]));
+ eeprom_node = OF_finddevice("/u3/i2c/cpuid@a2");
+ OF_getprop(eeprom_node, "cpuid", eeprom[1], sizeof(eeprom[1]));
+ eeprom_read = 1;
+ }
+
+ rawval = ad7417_adc_read(sens);
+ if (rawval < 0)
+ return (-1);
+
+ if (strstr(sens->therm.name, "CPU B") != NULL) {
+ diode_slope = eeprom[1][0x11] >> 16;
+ diode_offset = (int16_t)(eeprom[1][0x11] & 0xffff) << 12;
+ } else {
+ diode_slope = eeprom[0][0x11] >> 16;
+ diode_offset = (int16_t)(eeprom[0][0x11] & 0xffff) << 12;
+ }
+
+ temp = (rawval*diode_slope + diode_offset) >> 2;
+ temp = (10*(temp >> 16)) + ((10*(temp & 0xffff)) >> 16);
+
+ return (temp + ZERO_C_TO_K);
+}
+
+static int
+ad7417_adc_read(struct ad7417_sensor *sens)
+{
+ struct ad7417_softc *sc;
+ uint8_t chan;
+ int temp;
+
+ sc = device_get_softc(sens->dev);
+
+ switch (sens->id) {
+ case 11:
+ case 16:
+ chan = 1;
+ break;
+ case 12:
+ case 17:
+ chan = 2;
+ break;
+ case 13:
+ case 18:
+ chan = 3;
+ break;
+ case 14:
+ case 19:
+ chan = 4;
+ break;
+ default:
+ chan = 1;
+ }
+
+ if (ad7417_get_adc(sc->sc_dev, sc->sc_addr, &temp, chan) < 0)
+ return (-1);
+
+ return (temp);
+}
+
+
+static int
+ad7417_sensor_read(struct ad7417_sensor *sens)
+{
+ struct ad7417_softc *sc;
+ int temp;
+
+ sc = device_get_softc(sens->dev);
+
+ /* Init the ADC. */
+ if (ad7417_init_adc(sc->sc_dev, sc->sc_addr) < 0)
+ return (-1);
+
+ if (sens->type == ADC7417_TEMP_SENSOR) {
+ if (ad7417_get_temp(sc->sc_dev, sc->sc_addr, &temp) < 0)
+ return (-1);
+ temp += ZERO_C_TO_K;
+ } else {
+ temp = ad7417_adc_read(sens);
+ }
+ return (temp);
+}
+
+static int
+ad7417_sensor_sysctl(SYSCTL_HANDLER_ARGS)
+{
+ device_t dev;
+ struct ad7417_softc *sc;
+ struct ad7417_sensor *sens;
+ int value = 0;
+ int error;
+
+ dev = arg1;
+ sc = device_get_softc(dev);
+ sens = &sc->sc_sensors[arg2];
+
+ value = sens->therm.read(&sens->therm);
+ if (value < 0)
+ return (ENXIO);
+
+ error = sysctl_handle_int(oidp, &value, 0, req);
+
+ return (error);
+}
diff --git a/sys/dev/iicbus/ds1775.c b/sys/dev/iicbus/ds1775.c
index 0edc074..23ad6f4 100644
--- a/sys/dev/iicbus/ds1775.c
+++ b/sys/dev/iicbus/ds1775.c
@@ -49,33 +49,29 @@ __FBSDID("$FreeBSD$");
#include <dev/ofw/openfirm.h>
#include <dev/ofw/ofw_bus.h>
-
-#define FCU_ZERO_C_TO_K 2732
+#include <powerpc/powermac/powermac_thermal.h>
/* Drivebay sensor: LM75/DS1775. */
#define DS1775_TEMP 0x0
-struct ds1775_sensor {
- char location[32];
-};
-
/* Regular bus attachment functions */
static int ds1775_probe(device_t);
static int ds1775_attach(device_t);
+struct ds1775_softc {
+ struct pmac_therm sc_sensor;
+ device_t sc_dev;
+ struct intr_config_hook enum_hook;
+ uint32_t sc_addr;
+};
+
/* Utility functions */
+static int ds1775_sensor_read(struct ds1775_softc *sc);
static int ds1775_sensor_sysctl(SYSCTL_HANDLER_ARGS);
static void ds1775_start(void *xdev);
static int ds1775_read_2(device_t dev, uint32_t addr, uint8_t reg,
uint16_t *data);
-struct ds1775_softc {
- device_t sc_dev;
- struct intr_config_hook enum_hook;
- uint32_t sc_addr;
- struct ds1775_sensor *sc_sensors;
-
-};
static device_method_t ds1775_methods[] = {
/* Device interface */
DEVMETHOD(device_probe, ds1775_probe),
@@ -92,26 +88,33 @@ static driver_t ds1775_driver = {
static devclass_t ds1775_devclass;
DRIVER_MODULE(ds1755, iicbus, ds1775_driver, ds1775_devclass, 0, 0);
-MALLOC_DEFINE(M_DS1775, "ds1775", "Temp-Monitor DS1775");
static int
ds1775_read_2(device_t dev, uint32_t addr, uint8_t reg, uint16_t *data)
{
uint8_t buf[4];
+ int err, try = 0;
struct iic_msg msg[2] = {
{ addr, IIC_M_WR | IIC_M_NOSTOP, 1, &reg },
{ addr, IIC_M_RD, 2, buf },
};
- if (iicbus_transfer(dev, msg, 2) != 0) {
- device_printf(dev, "iicbus read failed\n");
- return (EIO);
+ for (;;)
+ {
+ err = iicbus_transfer(dev, msg, 2);
+ if (err != 0)
+ goto retry;
+
+ *data = *((uint16_t*)buf);
+ return (0);
+ retry:
+ if (++try > 5) {
+ device_printf(dev, "iicbus read failed\n");
+ return (-1);
+ }
+ pause("ds1775_read_2", hz);
}
-
- *data = *((uint16_t*)buf);
-
- return (0);
}
static int
@@ -169,7 +172,6 @@ ds1775_start(void *xdev)
{
phandle_t child;
struct ds1775_softc *sc;
- struct ds1775_sensor *sens;
struct sysctl_oid *sensroot_oid;
struct sysctl_ctx_list *ctx;
ssize_t plen;
@@ -183,30 +185,43 @@ ds1775_start(void *xdev)
child = ofw_bus_get_node(dev);
- sc->sc_sensors = malloc (sizeof(struct ds1775_sensor),
- M_DS1775, M_WAITOK | M_ZERO);
-
- sens = sc->sc_sensors;
-
ctx = device_get_sysctl_ctx(dev);
sensroot_oid = device_get_sysctl_tree(dev);
- plen = OF_getprop(child, "hwsensor-location", sens->location,
- sizeof(sens->location));
+ if (OF_getprop(child, "hwsensor-zone", &sc->sc_sensor.zone,
+ sizeof(int)) < 0)
+ sc->sc_sensor.zone = 0;
+
+ plen = OF_getprop(child, "hwsensor-location", sc->sc_sensor.name,
+ sizeof(sc->sc_sensor.name));
units = "C";
if (plen == -1) {
strcpy(sysctl_name, "sensor");
} else {
- for (i = 0; i < strlen(sens->location); i++) {
- sysctl_name[i] = tolower(sens->location[i]);
+ for (i = 0; i < strlen(sc->sc_sensor.name); i++) {
+ sysctl_name[i] = tolower(sc->sc_sensor.name[i]);
if (isspace(sysctl_name[i]))
sysctl_name[i] = '_';
}
sysctl_name[i] = 0;
}
- sprintf(sysctl_desc,"%s (%s)", sens->location, units);
+ /* Make up target temperatures. These are low, for the drive bay. */
+ if (sc->sc_sensor.zone == 0) {
+ sc->sc_sensor.target_temp = 500 + ZERO_C_TO_K;
+ sc->sc_sensor.max_temp = 600 + ZERO_C_TO_K;
+ }
+ else {
+ sc->sc_sensor.target_temp = 300 + ZERO_C_TO_K;
+ sc->sc_sensor.max_temp = 600 + ZERO_C_TO_K;
+ }
+
+ sc->sc_sensor.read =
+ (int (*)(struct pmac_therm *sc))(ds1775_sensor_read);
+ pmac_thermal_sensor_register(&sc->sc_sensor);
+
+ sprintf(sysctl_desc,"%s (%s)", sc->sc_sensor.name, units);
SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(sensroot_oid), OID_AUTO,
sysctl_name,
CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MPSAFE, dev,
@@ -216,44 +231,38 @@ ds1775_start(void *xdev)
}
static int
-ds1775_sensor_read(device_t dev, struct ds1775_sensor *sens, int *temp)
+ds1775_sensor_read(struct ds1775_softc *sc)
{
- struct ds1775_softc *sc;
uint16_t buf[2];
uint16_t read;
+ int err;
- sc = device_get_softc(dev);
-
- ds1775_read_2(sc->sc_dev, sc->sc_addr, DS1775_TEMP, buf);
+ err = ds1775_read_2(sc->sc_dev, sc->sc_addr, DS1775_TEMP, buf);
+ if (err < 0)
+ return (-1);
read = *((int16_t *)buf);
/* The default mode of the ADC is 9 bit, the resolution is 0.5 C per
bit. The temperature is in tenth kelvin.
*/
- *temp = ((int16_t)(read) >> 7) * 5;
-
- return (0);
+ return (((int16_t)(read) >> 7) * 5 + ZERO_C_TO_K);
}
+
static int
ds1775_sensor_sysctl(SYSCTL_HANDLER_ARGS)
{
device_t dev;
struct ds1775_softc *sc;
- struct ds1775_sensor *sens;
- int value;
int error;
unsigned int temp;
dev = arg1;
sc = device_get_softc(dev);
- sens = &sc->sc_sensors[arg2];
- error = ds1775_sensor_read(dev, sens, &value);
- if (error != 0)
- return (error);
-
- temp = value + FCU_ZERO_C_TO_K;
+ temp = ds1775_sensor_read(sc);
+ if (temp < 0)
+ return (EIO);
error = sysctl_handle_int(oidp, &temp, 0, req);
diff --git a/sys/dev/iicbus/max6690.c b/sys/dev/iicbus/max6690.c
index cbfdc26..83f3b50 100644
--- a/sys/dev/iicbus/max6690.c
+++ b/sys/dev/iicbus/max6690.c
@@ -49,20 +49,22 @@ __FBSDID("$FreeBSD$");
#include <dev/ofw/openfirm.h>
#include <dev/ofw/ofw_bus.h>
-
-#define FCU_ZERO_C_TO_K 2732
+#include <powerpc/powermac/powermac_thermal.h>
/* Inlet, Backside, U3 Heatsink sensor: MAX6690. */
#define MAX6690_INT_TEMP 0x0
#define MAX6690_EXT_TEMP 0x1
+#define MAX6690_RSL_STATUS 0x2
#define MAX6690_EEXT_TEMP 0x10
#define MAX6690_IEXT_TEMP 0x11
#define MAX6690_TEMP_MASK 0xe0
struct max6690_sensor {
+ struct pmac_therm therm;
+ device_t dev;
+
int id;
- char location[32];
};
/* Regular bus attachment functions */
@@ -70,10 +72,11 @@ static int max6690_probe(device_t);
static int max6690_attach(device_t);
/* Utility functions */
+static int max6690_sensor_read(struct max6690_sensor *sens);
static int max6690_sensor_sysctl(SYSCTL_HANDLER_ARGS);
static void max6690_start(void *xdev);
-static int max6690_read_1(device_t dev, uint32_t addr, uint8_t reg,
- uint8_t *data);
+static int max6690_read(device_t dev, uint32_t addr, uint8_t reg,
+ uint8_t *data);
struct max6690_softc {
device_t sc_dev;
@@ -101,23 +104,43 @@ DRIVER_MODULE(max6690, iicbus, max6690_driver, max6690_devclass, 0, 0);
MALLOC_DEFINE(M_MAX6690, "max6690", "Temp-Monitor MAX6690");
static int
-max6690_read_1(device_t dev, uint32_t addr, uint8_t reg, uint8_t *data)
+max6690_read(device_t dev, uint32_t addr, uint8_t reg, uint8_t *data)
{
uint8_t buf[4];
-
- struct iic_msg msg[2] = {
+ uint8_t busy[1], rsl;
+ int err, try = 0;
+
+ /* Busy register RSL. */
+ rsl = MAX6690_RSL_STATUS;
+ /* first read the status register, 0x2. If busy, retry. */
+ struct iic_msg msg[4] = {
+ { addr, IIC_M_WR | IIC_M_NOSTOP, 1, &rsl },
+ { addr, IIC_M_RD, 1, busy },
{ addr, IIC_M_WR | IIC_M_NOSTOP, 1, &reg },
{ addr, IIC_M_RD, 1, buf },
};
- if (iicbus_transfer(dev, msg, 2) != 0) {
- device_printf(dev, "iicbus read failed\n");
- return (EIO);
+ for (;;)
+ {
+ err = iicbus_transfer(dev, msg, 4);
+ if (err != 0)
+ goto retry;
+ if (busy[0] & 0x80)
+ goto retry;
+ /* Check for invalid value and retry. */
+ if (buf[0] == 0xff)
+ goto retry;
+
+ *data = *((uint8_t*)buf);
+ return (0);
+
+ retry:
+ if (++try > 5) {
+ device_printf(dev, "iicbus read failed\n");
+ return (-1);
+ }
+ pause("max6690_read", hz);
}
-
- *data = *((uint8_t*)buf);
-
- return (0);
}
static int
@@ -167,7 +190,7 @@ max6690_fill_sensor_prop(device_t dev)
sizeof(location));
while (len < prop_len) {
if (sc->sc_sensors != NULL)
- strcpy(sc->sc_sensors[i].location, location + len);
+ strcpy(sc->sc_sensors[i].therm.name, location + len);
prev_len = strlen(location + len) + 1;
len += prev_len;
i++;
@@ -180,6 +203,22 @@ max6690_fill_sensor_prop(device_t dev)
for (j = 0; j < i; j++)
sc->sc_sensors[j].id = (id[j] & 0xf);
+ /* Fill the sensor zone property. */
+ prop_len = OF_getprop(child, "hwsensor-zone", id, sizeof(id));
+ for (j = 0; j < i; j++)
+ sc->sc_sensors[j].therm.zone = id[j];
+
+ /* Set up remaining sensor properties */
+ for (j = 0; j < i; j++) {
+ sc->sc_sensors[j].dev = dev;
+
+ sc->sc_sensors[j].therm.target_temp = 400 + ZERO_C_TO_K;
+ sc->sc_sensors[j].therm.max_temp = 800 + ZERO_C_TO_K;
+
+ sc->sc_sensors[j].therm.read =
+ (int (*)(struct pmac_therm *))(max6690_sensor_read);
+ }
+
return (i);
}
static int
@@ -240,10 +279,15 @@ max6690_start(void *xdev)
/* Now we can fill the properties into the allocated struct. */
sc->sc_nsensors = max6690_fill_sensor_prop(dev);
+ /* Register with powermac_thermal */
+ for (i = 0; i < sc->sc_nsensors; i++)
+ pmac_thermal_sensor_register(&sc->sc_sensors[i].therm);
+
/* Add sysctls for the sensors. */
for (i = 0; i < sc->sc_nsensors; i++) {
- for (j = 0; j < strlen(sc->sc_sensors[i].location); j++) {
- sysctl_name[j] = tolower(sc->sc_sensors[i].location[j]);
+ for (j = 0; j < strlen(sc->sc_sensors[i].therm.name); j++) {
+ sysctl_name[j] =
+ tolower(sc->sc_sensors[i].therm.name[j]);
if (isspace(sysctl_name[j]))
sysctl_name[j] = '_';
}
@@ -265,7 +309,7 @@ max6690_start(void *xdev)
device_printf(dev, "Sensors\n");
for (i = 0; i < sc->sc_nsensors; i++) {
device_printf(dev, "Location : %s ID: %d\n",
- sc->sc_sensors[i].location,
+ sc->sc_sensors[i].therm.name,
sc->sc_sensors[i].id);
}
}
@@ -274,16 +318,18 @@ max6690_start(void *xdev)
}
static int
-max6690_sensor_read(device_t dev, struct max6690_sensor *sens, int *temp)
+max6690_sensor_read(struct max6690_sensor *sens)
{
uint8_t reg_int = 0, reg_ext = 0;
- uint8_t integer;
- uint8_t fraction;
+ uint8_t integer = 0;
+ uint8_t fraction = 0;
+ int err, temp;
+
struct max6690_softc *sc;
- sc = device_get_softc(dev);
+ sc = device_get_softc(sens->dev);
- /* The internal sensor id's are even, the external ar odd. */
+ /* The internal sensor id's are even, the external are odd. */
if ((sens->id % 2) == 0) {
reg_int = MAX6690_INT_TEMP;
reg_ext = MAX6690_IEXT_TEMP;
@@ -292,18 +338,20 @@ max6690_sensor_read(device_t dev, struct max6690_sensor *sens, int *temp)
reg_ext = MAX6690_EEXT_TEMP;
}
- max6690_read_1(sc->sc_dev, sc->sc_addr, reg_int, &integer);
+ err = max6690_read(sc->sc_dev, sc->sc_addr, reg_int, &integer);
+ err = max6690_read(sc->sc_dev, sc->sc_addr, reg_ext, &fraction);
- max6690_read_1(sc->sc_dev, sc->sc_addr, reg_ext, &fraction);
+ if (err < 0)
+ return (-1);
fraction &= MAX6690_TEMP_MASK;
/* The temperature is in tenth kelvin, the fractional part resolution
is 0.125.
*/
- *temp = (integer * 10) + (fraction >> 5) * 10 / 8;
+ temp = (integer * 10) + (fraction >> 5) * 10 / 8;
- return (0);
+ return (temp + ZERO_C_TO_K);
}
static int
@@ -312,7 +360,6 @@ max6690_sensor_sysctl(SYSCTL_HANDLER_ARGS)
device_t dev;
struct max6690_softc *sc;
struct max6690_sensor *sens;
- int value = 0;
int error;
unsigned int temp;
@@ -320,11 +367,9 @@ max6690_sensor_sysctl(SYSCTL_HANDLER_ARGS)
sc = device_get_softc(dev);
sens = &sc->sc_sensors[arg2];
- error = max6690_sensor_read(dev, sens, &value);
- if (error != 0)
- return (error);
-
- temp = value + FCU_ZERO_C_TO_K;
+ temp = max6690_sensor_read(sens);
+ if (temp < 0)
+ return (EIO);
error = sysctl_handle_int(oidp, &temp, 0, req);
diff --git a/sys/dev/ipw/if_ipw.c b/sys/dev/ipw/if_ipw.c
index db76bfa..7560430 100644
--- a/sys/dev/ipw/if_ipw.c
+++ b/sys/dev/ipw/if_ipw.c
@@ -199,6 +199,8 @@ static devclass_t ipw_devclass;
DRIVER_MODULE(ipw, pci, ipw_driver, ipw_devclass, 0, 0);
+MODULE_VERSION(ipw, 1);
+
static int
ipw_probe(device_t dev)
{
diff --git a/sys/dev/iwi/if_iwi.c b/sys/dev/iwi/if_iwi.c
index dc81309..73b861c 100644
--- a/sys/dev/iwi/if_iwi.c
+++ b/sys/dev/iwi/if_iwi.c
@@ -232,6 +232,8 @@ static devclass_t iwi_devclass;
DRIVER_MODULE(iwi, pci, iwi_driver, iwi_devclass, 0, 0);
+MODULE_VERSION(iwi, 1);
+
static __inline uint8_t
MEM_READ_1(struct iwi_softc *sc, uint32_t addr)
{
diff --git a/sys/dev/iwn/if_iwn.c b/sys/dev/iwn/if_iwn.c
index 29e391f..ab3dec7 100644
--- a/sys/dev/iwn/if_iwn.c
+++ b/sys/dev/iwn/if_iwn.c
@@ -401,6 +401,8 @@ static devclass_t iwn_devclass;
DRIVER_MODULE(iwn, pci, iwn_driver, iwn_devclass, 0, 0);
+MODULE_VERSION(iwn, 1);
+
MODULE_DEPEND(iwn, firmware, 1, 1, 1);
MODULE_DEPEND(iwn, pci, 1, 1, 1);
MODULE_DEPEND(iwn, wlan, 1, 1, 1);
@@ -565,6 +567,7 @@ iwn_attach(device_t dev)
ic->ic_caps =
IEEE80211_C_STA /* station mode supported */
| IEEE80211_C_MONITOR /* monitor mode supported */
+ | IEEE80211_C_BGSCAN /* background scanning */
| IEEE80211_C_TXPMGT /* tx power management */
| IEEE80211_C_SHSLOT /* short slot time supported */
| IEEE80211_C_WPA
@@ -574,8 +577,6 @@ iwn_attach(device_t dev)
#endif
| IEEE80211_C_WME /* WME */
;
- if (sc->hw_type != IWN_HW_REV_TYPE_4965)
- ic->ic_caps |= IEEE80211_C_BGSCAN; /* background scanning */
/* Read MAC address, channels, etc from EEPROM. */
if ((error = iwn_read_eeprom(sc, macaddr)) != 0) {
@@ -605,9 +606,9 @@ iwn_attach(device_t dev)
ic->ic_htcaps =
IEEE80211_HTCAP_SMPS_OFF /* SMPS mode disabled */
| IEEE80211_HTCAP_SHORTGI20 /* short GI in 20MHz */
-#ifdef notyet
| IEEE80211_HTCAP_CHWIDTH40 /* 40MHz channel width*/
| IEEE80211_HTCAP_SHORTGI40 /* short GI in 40MHz */
+#ifdef notyet
| IEEE80211_HTCAP_GREENFIELD
#if IWN_RBUF_SIZE == 8192
| IEEE80211_HTCAP_MAXAMSDU_7935 /* max A-MSDU length */
@@ -2104,6 +2105,7 @@ rate2plcp(int rate)
static void
iwn_newassoc(struct ieee80211_node *ni, int isnew)
{
+#define RV(v) ((v) & IEEE80211_RATE_VAL)
struct ieee80211com *ic = ni->ni_ic;
struct iwn_softc *sc = ic->ic_ifp->if_softc;
struct iwn_node *wn = (void *)ni;
@@ -2117,7 +2119,7 @@ iwn_newassoc(struct ieee80211_node *ni, int isnew)
if (IEEE80211_IS_CHAN_HT(ni->ni_chan)) {
ridx = ni->ni_rates.rs_nrates - 1;
for (i = ni->ni_htrates.rs_nrates - 1; i >= 0; i--) {
- plcp = ni->ni_htrates.rs_rates[i] | IWN_RFLAG_MCS;
+ plcp = RV(ni->ni_htrates.rs_rates[i]) | IWN_RFLAG_MCS;
if (IEEE80211_IS_CHAN_HT40(ni->ni_chan)) {
plcp |= IWN_RFLAG_HT40;
if (ni->ni_htcap & IEEE80211_HTCAP_SHORTGI40)
@@ -2129,8 +2131,7 @@ iwn_newassoc(struct ieee80211_node *ni, int isnew)
else
plcp |= IWN_RFLAG_ANT(txant1);
if (ridx >= 0) {
- rate = ni->ni_rates.rs_rates[ridx];
- rate &= IEEE80211_RATE_VAL;
+ rate = RV(ni->ni_rates.rs_rates[ridx]);
wn->ridx[rate] = plcp;
}
wn->ridx[IEEE80211_RATE_MCS | i] = plcp;
@@ -2138,8 +2139,7 @@ iwn_newassoc(struct ieee80211_node *ni, int isnew)
}
} else {
for (i = 0; i < ni->ni_rates.rs_nrates; i++) {
- rate = ni->ni_rates.rs_rates[i] & IEEE80211_RATE_VAL;
-
+ rate = RV(ni->ni_rates.rs_rates[i]);
plcp = rate2plcp(rate);
ridx = ic->ic_rt->rateCodeToIndex[rate];
if (ridx < IWN_RIDX_OFDM6 &&
@@ -2149,6 +2149,7 @@ iwn_newassoc(struct ieee80211_node *ni, int isnew)
wn->ridx[rate] = htole32(plcp);
}
}
+#undef RV
}
static int
@@ -3313,7 +3314,8 @@ iwn_tx_data(struct iwn_softc *sc, struct mbuf *m, struct ieee80211_node *ni)
}
ac = M_WME_GETAC(m);
- if (IEEE80211_AMPDU_RUNNING(&ni->ni_tx_ampdu[ac])) {
+ if (IEEE80211_QOS_HAS_SEQ(wh) &&
+ IEEE80211_AMPDU_RUNNING(&ni->ni_tx_ampdu[ac])) {
struct ieee80211_tx_ampdu *tap = &ni->ni_tx_ampdu[ac];
ring = &sc->txq[*(int *)tap->txa_private];
@@ -3991,6 +3993,7 @@ iwn5000_add_node(struct iwn_softc *sc, struct iwn_node_info *node, int async)
static int
iwn_set_link_quality(struct iwn_softc *sc, struct ieee80211_node *ni)
{
+#define RV(v) ((v) & IEEE80211_RATE_VAL)
struct iwn_node *wn = (void *)ni;
struct ieee80211_rateset *rs = &ni->ni_rates;
struct iwn_cmd_link_quality linkq;
@@ -4017,11 +4020,11 @@ iwn_set_link_quality(struct iwn_softc *sc, struct ieee80211_node *ni)
if (IEEE80211_IS_CHAN_HT(ni->ni_chan))
rate = IEEE80211_RATE_MCS | txrate;
else
- rate = rs->rs_rates[txrate] & IEEE80211_RATE_VAL;
+ rate = RV(rs->rs_rates[txrate]);
linkq.retry[i] = wn->ridx[rate];
if ((le32toh(wn->ridx[rate]) & IWN_RFLAG_MCS) &&
- (le32toh(wn->ridx[rate]) & 0xff) > 7)
+ RV(le32toh(wn->ridx[rate])) > 7)
linkq.mimo = i + 1;
/* Next retry at immediate lower bit-rate. */
@@ -4029,6 +4032,7 @@ iwn_set_link_quality(struct iwn_softc *sc, struct ieee80211_node *ni)
txrate--;
}
return iwn_cmd(sc, IWN_CMD_LINK_QUALITY, &linkq, sizeof linkq, 1);
+#undef RV
}
/*
@@ -5159,7 +5163,7 @@ iwn_scan(struct iwn_softc *sc)
if (IEEE80211_IS_CHAN_A(ic->ic_curchan) &&
sc->hw_type == IWN_HW_REV_TYPE_4965) {
/* Ant A must be avoided in 5GHz because of an HW bug. */
- rxchain |= IWN_RXCHAIN_FORCE_SEL(IWN_ANT_BC);
+ rxchain |= IWN_RXCHAIN_FORCE_SEL(IWN_ANT_B);
} else /* Use all available RX antennas. */
rxchain |= IWN_RXCHAIN_FORCE_SEL(sc->rxchainmask);
hdr->rxchain = htole16(rxchain);
@@ -5170,14 +5174,19 @@ iwn_scan(struct iwn_softc *sc)
tx->id = sc->broadcast_id;
tx->lifetime = htole32(IWN_LIFETIME_INFINITE);
- if (IEEE80211_IS_CHAN_A(ic->ic_curchan)) {
+ if (IEEE80211_IS_CHAN_5GHZ(ic->ic_curchan)) {
/* Send probe requests at 6Mbps. */
tx->rate = htole32(0xd);
rs = &ic->ic_sup_rates[IEEE80211_MODE_11A];
} else {
hdr->flags = htole32(IWN_RXON_24GHZ | IWN_RXON_AUTO);
- /* Send probe requests at 1Mbps. */
- tx->rate = htole32(10 | IWN_RFLAG_CCK);
+ if (sc->hw_type == IWN_HW_REV_TYPE_4965 &&
+ sc->rxon.associd && sc->rxon.chan > 14)
+ tx->rate = htole32(0xd);
+ else {
+ /* Send probe requests at 1Mbps. */
+ tx->rate = htole32(10 | IWN_RFLAG_CCK);
+ }
rs = &ic->ic_sup_rates[IEEE80211_MODE_11G];
}
/* Use the first valid TX antenna. */
diff --git a/sys/dev/ixgbe/LICENSE b/sys/dev/ixgbe/LICENSE
index 0cf44c8..0d4f1db 100644
--- a/sys/dev/ixgbe/LICENSE
+++ b/sys/dev/ixgbe/LICENSE
@@ -1,6 +1,6 @@
/******************************************************************************
- Copyright (c) 2001-2010, Intel Corporation
+ Copyright (c) 2001-2011, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
diff --git a/sys/dev/ixgbe/README b/sys/dev/ixgbe/README
index 0b27deb..5e4921f 100644
--- a/sys/dev/ixgbe/README
+++ b/sys/dev/ixgbe/README
@@ -1,8 +1,8 @@
-FreeBSD Driver for 10 Gigabit PCI Express Server Adapters
-=============================================
+FreeBSD Driver for Intel(R) Ethernet 10 Gigabit PCI Express Server Adapters
+============================================================================
/*$FreeBSD$*/
-May 14, 2008
+November 12, 2010
Contents
@@ -11,15 +11,15 @@ Contents
- Overview
- Supported Adapters
- Building and Installation
-- Additional Configurations
+- Additional Configurations and Tuning
- Known Limitations
Overview
========
-This file describes the FreeBSD* driver for the 10 Gigabit PCIE Family of
-Adapters. Drivers has been developed for use with FreeBSD 7 or later.
+This file describes the FreeBSD* driver for the Intel(R) Ethernet 10 Gigabit
+Family of Adapters. Driver has been developed for use with FreeBSD 7.2 or later.
For questions related to hardware requirements, refer to the documentation
supplied with your Intel 10GbE adapter. All hardware requirements listed
@@ -29,100 +29,98 @@ apply to use with FreeBSD.
Supported Adapters
==================
-The following Intel network adapters are compatible with the drivers in this
-release:
-
-Controller Adapter Name Physical Layer
----------- ------------ --------------
-82598EB Intel(R) 10 Gigabit XF SR/AF 10G Base -LR (850 nm optical fiber)
- Dual Port Server Adapter 10G Base -SR (1310 nm optical fiber)
-82598EB Intel(R) 10 Gigabit XF SR/LR
- Server Adapter
- Intel(R) 82598EB 10 Gigabit AF
- Network Connection
- Intel(R) 82598EB 10 Gigabit AT
- CX4 Network Connection
+The driver in this release is compatible with 82598 and 82599-based Intel
+Network Connections.
+SFP+ Devices with Pluggable Optics
+----------------------------------
-Building and Installation
-=========================
+82599-BASED ADAPTERS
-NOTE: You must have kernel sources installed in order to compile the driver
- module.
-
- In the instructions below, x.x.x is the driver version as indicated in
- the name of the driver tar.
-
-1. Move the base driver tar file to the directory of your choice. For
- example, use /home/username/ixgbe or /usr/local/src/ixgbe.
-
-2. Untar/unzip the archive:
- tar xfz ixgbe-x.x.x.tar.gz
-
-3. To install man page:
- cd ixgbe-x.x.x
- gzip -c ixgbe.4 > /usr/share/man/man4/ixgbee.4.gz
-
-4. To load the driver onto a running system:
- cd ixgbe-x.x.x/src
- make load
-
-5. To assign an IP address to the interface, enter the following:
- ifconfig ix<interface_num> <IP_address>
-
-6. Verify that the interface works. Enter the following, where <IP_address>
- is the IP address for another machine on the same subnet as the interface
- that is being tested:
- ping <IP_address>
-
-7. If you want the driver to load automatically when the system is booted:
-
- cd ixgbe-x.x.x/src
- make
- make install
-
- Edit /boot/loader.conf, and add the following line:
- ixgbe_load="YES"
-
- OR
-
- compile the driver into the kernel (see item 8).
-
-
- Edit /etc/rc.conf, and create the appropriate ifconfig_ixgbe<interface_num>
- entry:
-
- ifconfig_ix<interface_num>="<ifconfig_settings>"
-
- Example usage:
-
- ifconfig_ix0="inet 192.168.10.1 netmask 255.255.255.0"
-
- NOTE: For assistance, see the ifconfig man page.
-
-8. If you want to compile the driver into the kernel, enter:
-
- FreeBSD 7 or later:
-
- cd ixgbe-x.x.x/src
-
- cp *.[ch] /usr/src/sys/dev/ixgbe
-
- cp Makefile.kernel /usr/src/sys/modules/ixgbe/Makefile
-
- Edit the kernel configuration file (i.e., GENERIC or MYKERNEL) in
- /usr/src/sys/i386/conf (replace "i386" with the appropriate system
- architecture if necessary), and ensure the following line is present:
-
- device ixgbe
-
- Compile and install the kernel. The system must be reboot for the kernel
- updates to take affect. For additional information on compiling the kernel,
- consult the FreeBSD operating system documentation.
+NOTE: If your 82599-based Intel(R) Ethernet Network Adapter came with Intel
+optics, or is an Intel(R) Ethernet Server Adapter X520-2, then it only supports
+Intel optics and/or the direct attach cables listed below.
+When 82599-based SFP+ devices are connected back to back, they should be set to
+the same Speed setting via Ethtool. Results may vary if you mix speed settings.
+
+Supplier Type Part Numbers
+
+SR Modules
+Intel DUAL RATE 1G/10G SFP+ SR (bailed) FTLX8571D3BCV-IT
+Intel DUAL RATE 1G/10G SFP+ SR (bailed) AFBR-703SDZ-IN2
+Intel DUAL RATE 1G/10G SFP+ SR (bailed) AFBR-703SDDZ-IN1
+LR Modules
+Intel DUAL RATE 1G/10G SFP+ LR (bailed) FTLX1471D3BCV-IT
+Intel DUAL RATE 1G/10G SFP+ LR (bailed) AFCT-701SDZ-IN2
+Intel DUAL RATE 1G/10G SFP+ LR (bailed) AFCT-701SDDZ-IN1
+
+The following is a list of 3rd party SFP+ modules and direct attach cables that
+have received some testing. Not all modules are applicable to all devices.
+
+Supplier Type Part Numbers
+
+Finisar SFP+ SR bailed, 10g single rate FTLX8571D3BCL
+Avago SFP+ SR bailed, 10g single rate AFBR-700SDZ
+Finisar SFP+ LR bailed, 10g single rate FTLX8571D3BCV-IT
+
+Finisar DUAL RATE 1G/10G SFP+ SR (No Bail) FTLX8571D3QCV-IT
+Avago DUAL RATE 1G/10G SFP+ SR (No Bail) AFBR-703SDZ-IN1
+Finisar DUAL RATE 1G/10G SFP+ LR (No Bail) FTLX1471D3QCV-IT
+Avago DUAL RATE 1G/10G SFP+ LR (No Bail) AFCT-701SDZ-IN1
+Finistar 1000BASE-T SFP FCLF8522P2BTL
+Avago 1000BASE-T SFP ABCU-5710RZ
+
+82599-based adapters support all passive and active limiting direct attach
+cables that comply with SFF-8431 v4.1 and SFF-8472 v10.4 specifications.
+
+Laser turns off for SFP+ when ifconfig down
+--------------------------------------------------------
+"ifconfig down" turns off the laser for 82599-based SFP+ fiber adapters.
+"ifconfig up" turns on the later.
+
+82598-BASED ADAPTERS
+
+NOTES for 82598-Based Adapters:
+- Intel(R) Ethernet Network Adapters that support removable optical modules
+ only support their original module type (i.e., the Intel(R) 10 Gigabit SR
+ Dual Port Express Module only supports SR optical modules). If you plug
+ in a different type of module, the driver will not load.
+- Hot Swapping/hot plugging optical modules is not supported.
+- Only single speed, 10 gigabit modules are supported.
+- LAN on Motherboard (LOMs) may support DA, SR, or LR modules. Other module
+ types are not supported. Please see your system documentation for details.
+
+The following is a list of 3rd party SFP+ modules and direct attach cables that have
+received some testing. Not all modules are applicable to all devices.
+
+Supplier Type Part Numbers
+
+Finisar SFP+ SR bailed, 10g single rate FTLX8571D3BCL
+Avago SFP+ SR bailed, 10g single rate AFBR-700SDZ
+Finisar SFP+ LR bailed, 10g single rate FTLX1471D3BCL
+
+82598-based adapters support all passive direct attach cables that comply
+with SFF-8431 v4.1 and SFF-8472 v10.4 specifications. Active direct attach
+cables are not supported.
+
+Third party optic modules and cables referred to above are listed only for the
+purpose of highlighting third party specifications and potential compatibility,
+and are not recommendations or endorsements or sponsorship of any third party's
+product by Intel. Intel is not endorsing or promoting products made by any
+third party and the third party reference is provided only to share information
+regarding certain optic modules and cables with the above specifications. There
+may be other manufacturers or suppliers, producing or supplying optic modules
+and cables with similar or matching descriptions. Customers must use their own
+discretion and diligence to purchase optic modules and cables from any third
+party of their choice. Customer are solely responsible for assessing the
+suitability of the product and/or devices and for the selection of the vendor
+for purchasing any product. INTEL ASSUMES NO LIABILITY WHATSOEVER, AND INTEL
+DISCLAIMS ANY EXPRESS OR IMPLIED WARRANTY, RELATING TO SALE AND/OR USE OF
+SUCH THIRD PARTY PRODUCTS OR SELECTION OF VENDOR BY CUSTOMERS.
Configuration and Tuning
-=========================
+========================
The driver supports Transmit/Receive Checksum Offload and Jumbo Frames on
all 10 Gigabit adapters.
@@ -143,7 +141,7 @@ all 10 Gigabit adapters.
The Jumbo Frames MTU range for Intel Adapters is 1500 to 16114. The default
MTU range is 1500. To modify the setting, enter the following:
- ifconfig ix <interface_num> <hostname or IP address> mtu 9000
+ ifconfig ix<interface_num> <hostname or IP address> mtu 9000
To confirm an interface's MTU value, use the ifconfig command. To confirm
the MTU used between two specific devices, use:
@@ -200,6 +198,8 @@ all 10 Gigabit adapters.
TSO
---
+ TSO is enabled by default.
+
To disable:
ifconfig <interface_num> -tso
@@ -209,23 +209,21 @@ all 10 Gigabit adapters.
ifconfig <interface_num> tso
LRO
- ___
+ ---
- Large Receive Offload is available in version 1.4.4, it is on
- by default. It can be toggled off and on by using:
- sysctl dev.ix.X.enable_lro=[0,1]
+ Large Receive Offload is available in the driver; it is on by default.
+ It can be disabled by using:
+ ifconfig <interface_num> -lro
+ To enable:
+ ifconfig <interface_num> lro
- NOTE: when changing this feature you MUST be sure the interface
- is reinitialized, it is easy to do this with ifconfig down/up.
- The LRO code will ultimately move into the kernel stack code,
- but for this first release it was included with the driver.
Important system configuration changes:
---------------------------------------
- When there is a choice run on a 64bit OS rather than 32, it makes
- a significant difference in improvement.
-
+ When there is a choice run on a 64bit OS rather than 32, it makes a
+ significant difference in improvement.
+
The default scheduler SCHED_4BSD is not smart about SMP locality issues.
Significant improvement can be achieved by switching to the ULE scheduler.
@@ -233,34 +231,79 @@ all 10 Gigabit adapters.
SCHED_ULE. Note that this is only advisable on FreeBSD 7, on 6.X there have
been stability problems with ULE.
- Change the file /etc/sysctl.conf, add the line:
+ The interface can generate high number of interrupts. To avoid running
+ into the limit set by the kernel, adjust hw.intr_storm_threshold
+ setting using sysctl:
- hw.intr_storm_threshold: 8000 (the default is 1000)
+ sysctl hw.intr_storm_threshold=9000 (the default is 1000)
+
+ For this change to take effect on boot, edit /etc/sysctl.conf and add the
+ line:
+ hw.intr_storm_threshold=9000
+
+ If you still see Interrupt Storm detected messages, increase the limit to a
+ higher number.
Best throughput results are seen with a large MTU; use 9000 if possible.
- The default number of descriptors is 256, increasing this to 1024 or even
- 2048 may improve performance.
+ The default number of descriptors is 1024, increasing this to 2K or even
+ 4K may improve performance in some workloads, but change carefully.
Known Limitations
=================
+
+For known hardware and troubleshooting issues, refer to the following website.
+
+ http://support.intel.com/support/go/network/adapter/home.htm
+
+Either select the link for your adapter or perform a search for the adapter
+number. The adapter's page lists many issues. For a complete list of hardware
+issues download your adapter's user guide and read the Release Notes.
+
+ UDP stress test with 10GbE driver
+ ---------------------------------
Under small packets UDP stress test with 10GbE driver, the FreeBSD system
will drop UDP packets due to the fullness of socket buffers. You may want
to change the driver's Flow Control variables to the minimum value for
controlling packet reception.
+ Attempting to configure larger MTUs with a large numbers of processors may
+ generate the error message "ix0:could not setup receive structures"
+ --------------------------------------------------------------------------
+ When using the ixgbe driver with RSS autoconfigured based on the number of
+ cores (the default setting) and that number is larger than 4, increase the
+ memory resources allocated for the mbuf pool as follows:
+
+ Add to the sysctl.conf file for the system:
+
+ kern.ipc.nmbclusters=262144
+ kern.ipc.nmbjumbop=262144
+
+ Lower than expected performance on dual port 10GbE devices
+ ----------------------------------------------------------
+ Some PCI-E x8 slots are actually configured as x4 slots. These slots have
+ insufficient bandwidth for full 10Gbe line rate with dual port 10GbE devices.
+ The driver can detect this situation and will write the following message in
+ the system log: "PCI-Express bandwidth available for this card is not
+ sufficient for optimal performance. For optimal performance a x8 PCI-Express
+ slot is required."
+
+ If this error occurs, moving your adapter to a true x8 slot will resolve the
+ issue.
+
+
Support
=======
For general information and support, go to the Intel support website at:
- http://support.intel.com
+ www.intel.com/support/
If an issue is identified with the released source code on the supported
kernel with a supported adapter, email the specific information related to
-the issue to freebsd@intel.com.
+the issue to freebsd@intel.com
diff --git a/sys/dev/ixgbe/ixgbe.c b/sys/dev/ixgbe/ixgbe.c
index f58adbe..a382a53 100644
--- a/sys/dev/ixgbe/ixgbe.c
+++ b/sys/dev/ixgbe/ixgbe.c
@@ -34,6 +34,7 @@
#ifdef HAVE_KERNEL_OPTION_HEADERS
#include "opt_inet.h"
+#include "opt_inet6.h"
#endif
#include "ixgbe.h"
@@ -46,7 +47,7 @@ int ixgbe_display_debug_stats = 0;
/*********************************************************************
* Driver version
*********************************************************************/
-char ixgbe_driver_version[] = "2.3.10";
+char ixgbe_driver_version[] = "2.3.11";
/*********************************************************************
* PCI Device ID Table
@@ -318,7 +319,7 @@ static int fdir_pballoc = 1;
* ixgbe_probe determines if the driver should be loaded on
* adapter based on PCI vendor/device id of the adapter.
*
- * return 0 on success, positive on failure
+ * return BUS_PROBE_DEFAULT on success, positive on failure
*********************************************************************/
static int
@@ -357,7 +358,7 @@ ixgbe_probe(device_t dev)
ixgbe_driver_version);
device_set_desc_copy(dev, adapter_name);
++ixgbe_total_ports;
- return (0);
+ return (BUS_PROBE_DEFAULT);
}
ent++;
}
@@ -385,6 +386,11 @@ ixgbe_attach(device_t dev)
INIT_DEBUGOUT("ixgbe_attach: begin");
+ if (resource_disabled("ixgbe", device_get_unit(dev))) {
+ device_printf(dev, "Disabled by device hint\n");
+ return (ENXIO);
+ }
+
/* Allocate, clear, and link in our adapter structure */
adapter = device_get_softc(dev);
adapter->dev = adapter->osdep.dev = dev;
@@ -862,8 +868,9 @@ ixgbe_ioctl(struct ifnet * ifp, u_long command, caddr_t data)
{
struct adapter *adapter = ifp->if_softc;
struct ifreq *ifr = (struct ifreq *) data;
-#ifdef INET
+#if defined(INET) || defined(INET6)
struct ifaddr *ifa = (struct ifaddr *)data;
+ bool avoid_reset = FALSE;
#endif
int error = 0;
@@ -871,26 +878,28 @@ ixgbe_ioctl(struct ifnet * ifp, u_long command, caddr_t data)
case SIOCSIFADDR:
#ifdef INET
- if (ifa->ifa_addr->sa_family == AF_INET) {
- /*
- * Since resetting hardware takes a very long time
- * and results in link renegotiation we only
- * initialize the hardware only when it is absolutely
- * required.
- */
+ if (ifa->ifa_addr->sa_family == AF_INET)
+ avoid_reset = TRUE;
+#endif
+#ifdef INET6
+ if (ifa->ifa_addr->sa_family == AF_INET6)
+ avoid_reset = TRUE;
+#endif
+#if defined(INET) || defined(INET6)
+ /*
+ ** Calling init results in link renegotiation,
+ ** so we avoid doing it when possible.
+ */
+ if (avoid_reset) {
ifp->if_flags |= IFF_UP;
- if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
- IXGBE_CORE_LOCK(adapter);
- ixgbe_init_locked(adapter);
- IXGBE_CORE_UNLOCK(adapter);
- }
+ if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
+ ixgbe_init(adapter);
if (!(ifp->if_flags & IFF_NOARP))
arp_ifinit(ifp, ifa);
} else
-#endif
error = ether_ioctl(ifp, command, data);
break;
-
+#endif
case SIOCSIFMTU:
IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
if (ifr->ifr_mtu > IXGBE_MAX_FRAME_SIZE - ETHER_HDR_LEN) {
@@ -951,6 +960,8 @@ ixgbe_ioctl(struct ifnet * ifp, u_long command, caddr_t data)
ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
if (mask & IFCAP_VLAN_HWFILTER)
ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
+ if (mask & IFCAP_VLAN_HWTSO)
+ ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
IXGBE_CORE_LOCK(adapter);
ixgbe_init_locked(adapter);
@@ -1338,7 +1349,7 @@ ixgbe_legacy_irq(void *arg)
/*********************************************************************
*
- * MSI Queue Interrupt Service routine
+ * MSIX Queue Interrupt Service routine
*
**********************************************************************/
void
@@ -1357,6 +1368,17 @@ ixgbe_msix_que(void *arg)
IXGBE_TX_LOCK(txr);
more_tx = ixgbe_txeof(txr);
+ /*
+ ** Make certain that if the stack
+ ** has anything queued the task gets
+ ** scheduled to handle it.
+ */
+#if __FreeBSD_version < 800000
+ if (!IFQ_DRV_IS_EMPTY(&adapter->ifp->if_snd))
+#else
+ if (!drbr_empty(adapter->ifp, txr->br))
+#endif
+ more_tx = 1;
IXGBE_TX_UNLOCK(txr);
/* Do AIM now? */
@@ -1570,7 +1592,7 @@ ixgbe_xmit(struct tx_ring *txr, struct mbuf **m_headp)
struct mbuf *m_head;
bus_dma_segment_t segs[adapter->num_segs];
bus_dmamap_t map;
- struct ixgbe_tx_buf *txbuf, *txbuf_mapped;
+ struct ixgbe_tx_buf *txbuf;
union ixgbe_adv_tx_desc *txd = NULL;
m_head = *m_headp;
@@ -1589,7 +1611,6 @@ ixgbe_xmit(struct tx_ring *txr, struct mbuf **m_headp)
*/
first = txr->next_avail_desc;
txbuf = &txr->tx_buffers[first];
- txbuf_mapped = txbuf;
map = txbuf->map;
/*
@@ -1708,6 +1729,8 @@ ixgbe_xmit(struct tx_ring *txr, struct mbuf **m_headp)
txr->next_avail_desc = i;
txbuf->m_head = m_head;
+ /* Swap the dma map between the first and last descriptor */
+ txr->tx_buffers[first].map = txbuf->map;
txbuf->map = map;
bus_dmamap_sync(txr->txtag, map, BUS_DMASYNC_PREWRITE);
@@ -2265,7 +2288,9 @@ ixgbe_setup_msix(struct adapter *adapter)
msi:
msgs = pci_msi_count(dev);
if (msgs == 1 && pci_alloc_msi(dev, &msgs) == 0)
- device_printf(adapter->dev,"Using MSI interrupt\n");
+ device_printf(adapter->dev,"Using an MSI interrupt\n");
+ else
+ device_printf(adapter->dev,"Using a Legacy interrupt\n");
return (msgs);
}
@@ -2412,19 +2437,21 @@ ixgbe_setup_interface(device_t dev, struct adapter *adapter)
ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_TSO4 | IFCAP_VLAN_HWCSUM;
- ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU;
ifp->if_capabilities |= IFCAP_JUMBO_MTU;
+ ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING
+ | IFCAP_VLAN_HWTSO
+ | IFCAP_VLAN_MTU;
ifp->if_capenable = ifp->if_capabilities;
/* Don't enable LRO by default */
ifp->if_capabilities |= IFCAP_LRO;
/*
- ** Dont turn this on by default, if vlans are
+ ** Don't turn this on by default, if vlans are
** created on another pseudo device (eg. lagg)
** then vlan events are not passed thru, breaking
** operation, but with HW FILTER off it works. If
- ** using vlans directly on the em driver you can
+ ** using vlans directly on the ixgbe driver you can
** enable this and get full hardware tag filtering.
*/
ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
@@ -5333,7 +5360,7 @@ ixgbe_add_rx_process_limit(struct adapter *adapter, const char *name,
static int
ixgbe_set_advertise(SYSCTL_HANDLER_ARGS)
{
- int error;
+ int error = 0;
struct adapter *adapter;
struct ixgbe_hw *hw;
ixgbe_link_speed speed, last;
diff --git a/sys/dev/ixgbe/ixv.c b/sys/dev/ixgbe/ixv.c
index f9f910a..e52a527 100644
--- a/sys/dev/ixgbe/ixv.c
+++ b/sys/dev/ixgbe/ixv.c
@@ -1,6 +1,6 @@
/******************************************************************************
- Copyright (c) 2001-2010, Intel Corporation
+ Copyright (c) 2001-2011, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -33,7 +33,8 @@
/*$FreeBSD$*/
#ifdef HAVE_KERNEL_OPTION_HEADERS
-#include "opt_device_polling.h"
+#include "opt_inet.h"
+#include "opt_inet6.h"
#endif
#include "ixv.h"
@@ -41,7 +42,7 @@
/*********************************************************************
* Driver version
*********************************************************************/
-char ixv_driver_version[] = "1.0.0";
+char ixv_driver_version[] = "1.0.1";
/*********************************************************************
* PCI Device ID Table
@@ -234,7 +235,7 @@ static u32 ixv_shadow_vfta[VFTA_SIZE];
* ixv_probe determines if the driver should be loaded on
* adapter based on PCI vendor/device id of the adapter.
*
- * return 0 on success, positive on failure
+ * return BUS_PROBE_DEFAULT on success, positive on failure
*********************************************************************/
static int
@@ -271,7 +272,7 @@ ixv_probe(device_t dev)
ixv_strings[ent->index],
ixv_driver_version);
device_set_desc_copy(dev, adapter_name);
- return (0);
+ return (BUS_PROBE_DEFAULT);
}
ent++;
}
@@ -297,6 +298,11 @@ ixv_attach(device_t dev)
INIT_DEBUGOUT("ixv_attach: begin");
+ if (resource_disabled("ixgbe", device_get_unit(dev))) {
+ device_printf(dev, "Disabled by device hint\n");
+ return (ENXIO);
+ }
+
/* Allocate, clear, and link in our adapter structure */
adapter = device_get_softc(dev);
adapter->dev = adapter->osdep.dev = dev;
@@ -690,10 +696,38 @@ ixv_ioctl(struct ifnet * ifp, u_long command, caddr_t data)
{
struct adapter *adapter = ifp->if_softc;
struct ifreq *ifr = (struct ifreq *) data;
+#if defined(INET) || defined(INET6)
+ struct ifaddr *ifa = (struct ifaddr *) data;
+ bool avoid_reset = FALSE;
+#endif
int error = 0;
switch (command) {
+ case SIOCSIFADDR:
+#ifdef INET
+ if (ifa->ifa_addr->sa_family == AF_INET)
+ avoid_reset = TRUE;
+#endif
+#ifdef INET6
+ if (ifa->ifa_addr->sa_family == AF_INET6)
+ avoid_reset = TRUE;
+#endif
+#if defined(INET) || defined(INET6)
+ /*
+ ** Calling init results in link renegotiation,
+ ** so we avoid doing it when possible.
+ */
+ if (avoid_reset) {
+ ifp->if_flags |= IFF_UP;
+ if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
+ ixv_init(adapter);
+ if (!(ifp->if_flags & IFF_NOARP))
+ arp_ifinit(ifp, ifa);
+ } else
+ error = ether_ioctl(ifp, command, data);
+ break;
+#endif
case SIOCSIFMTU:
IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
if (ifr->ifr_mtu > IXV_MAX_FRAME_SIZE - ETHER_HDR_LEN) {
@@ -1161,7 +1195,7 @@ ixv_xmit(struct tx_ring *txr, struct mbuf **m_headp)
struct mbuf *m_head;
bus_dma_segment_t segs[32];
bus_dmamap_t map;
- struct ixv_tx_buf *txbuf, *txbuf_mapped;
+ struct ixv_tx_buf *txbuf;
union ixgbe_adv_tx_desc *txd = NULL;
m_head = *m_headp;
@@ -1180,7 +1214,6 @@ ixv_xmit(struct tx_ring *txr, struct mbuf **m_headp)
*/
first = txr->next_avail_desc;
txbuf = &txr->tx_buffers[first];
- txbuf_mapped = txbuf;
map = txbuf->map;
/*
@@ -1283,6 +1316,7 @@ ixv_xmit(struct tx_ring *txr, struct mbuf **m_headp)
txr->next_avail_desc = i;
txbuf->m_head = m_head;
+ txr->tx_buffers[first].map = txbuf->map;
txbuf->map = map;
bus_dmamap_sync(txr->txtag, map, BUS_DMASYNC_PREWRITE);
@@ -1820,11 +1854,15 @@ ixv_setup_interface(device_t dev, struct adapter *adapter)
ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_TSO4 | IFCAP_VLAN_HWCSUM;
- ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU;
- ifp->if_capabilities |= IFCAP_JUMBO_MTU | IFCAP_LRO;
-
+ ifp->if_capabilities |= IFCAP_JUMBO_MTU;
+ ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING
+ | IFCAP_VLAN_HWTSO
+ | IFCAP_VLAN_MTU;
ifp->if_capenable = ifp->if_capabilities;
+ /* Don't enable LRO by default */
+ ifp->if_capabilities |= IFCAP_LRO;
+
/*
* Specify the media types supported by this adapter and register
* callbacks to update media and link information
diff --git a/sys/dev/mfi/mfi.c b/sys/dev/mfi/mfi.c
index eb18ffe..1962648 100644
--- a/sys/dev/mfi/mfi.c
+++ b/sys/dev/mfi/mfi.c
@@ -788,7 +788,7 @@ mfi_aen_setup(struct mfi_softc *sc, uint32_t seq_start)
class_locale.members.reserved = 0;
class_locale.members.locale = mfi_event_locale;
- class_locale.members.class = mfi_event_class;
+ class_locale.members.evt_class = mfi_event_class;
if (seq_start == 0) {
error = mfi_get_log_state(sc, &log_state);
@@ -1082,8 +1082,8 @@ mfi_decode_evt(struct mfi_softc *sc, struct mfi_evt_detail *detail)
{
device_printf(sc->mfi_dev, "%d (%s/0x%04x/%s) - %s\n", detail->seq,
- format_timestamp(detail->time), detail->class.members.locale,
- format_class(detail->class.members.class), detail->description);
+ format_timestamp(detail->time), detail->evt_class.members.locale,
+ format_class(detail->evt_class.members.evt_class), detail->description);
}
static int
@@ -1099,16 +1099,16 @@ mfi_aen_register(struct mfi_softc *sc, int seq, int locale)
if (sc->mfi_aen_cm != NULL) {
prior_aen.word =
((uint32_t *)&sc->mfi_aen_cm->cm_frame->dcmd.mbox)[1];
- if (prior_aen.members.class <= current_aen.members.class &&
+ if (prior_aen.members.evt_class <= current_aen.members.evt_class &&
!((prior_aen.members.locale & current_aen.members.locale)
^current_aen.members.locale)) {
return (0);
} else {
prior_aen.members.locale |= current_aen.members.locale;
- if (prior_aen.members.class
- < current_aen.members.class)
- current_aen.members.class =
- prior_aen.members.class;
+ if (prior_aen.members.evt_class
+ < current_aen.members.evt_class)
+ current_aen.members.evt_class =
+ prior_aen.members.evt_class;
mfi_abort(sc, sc->mfi_aen_cm);
}
}
@@ -1199,7 +1199,7 @@ mfi_parse_entries(struct mfi_softc *sc, int start_seq, int stop_seq)
class_locale.members.reserved = 0;
class_locale.members.locale = mfi_event_locale;
- class_locale.members.class = mfi_event_class;
+ class_locale.members.evt_class = mfi_event_class;
size = sizeof(struct mfi_evt_list) + sizeof(struct mfi_evt_detail)
* (MAX_EVENTS - 1);
diff --git a/sys/dev/mfi/mfireg.h b/sys/dev/mfi/mfireg.h
index e08a16d..efee827 100644
--- a/sys/dev/mfi/mfireg.h
+++ b/sys/dev/mfi/mfireg.h
@@ -719,7 +719,7 @@ union mfi_evt {
struct {
uint16_t locale;
uint8_t reserved;
- int8_t class;
+ int8_t evt_class;
} members;
uint32_t word;
} __packed;
@@ -755,7 +755,7 @@ struct mfi_evt_detail {
uint32_t seq;
uint32_t time;
uint32_t code;
- union mfi_evt class;
+ union mfi_evt evt_class;
uint8_t arg_type;
uint8_t reserved1[15];
diff --git a/sys/dev/mmc/mmc.c b/sys/dev/mmc/mmc.c
index 45ddd56..6ff47c3 100644
--- a/sys/dev/mmc/mmc.c
+++ b/sys/dev/mmc/mmc.c
@@ -1445,37 +1445,37 @@ mmc_read_ivar(device_t bus, device_t child, int which, uintptr_t *result)
default:
return (EINVAL);
case MMC_IVAR_DSR_IMP:
- *(int *)result = ivar->csd.dsr_imp;
+ *result = ivar->csd.dsr_imp;
break;
case MMC_IVAR_MEDIA_SIZE:
- *(off_t *)result = ivar->sec_count;
+ *result = ivar->sec_count;
break;
case MMC_IVAR_RCA:
- *(int *)result = ivar->rca;
+ *result = ivar->rca;
break;
case MMC_IVAR_SECTOR_SIZE:
- *(int *)result = MMC_SECTOR_SIZE;
+ *result = MMC_SECTOR_SIZE;
break;
case MMC_IVAR_TRAN_SPEED:
- *(int *)result = mmcbr_get_clock(bus);
+ *result = mmcbr_get_clock(bus);
break;
case MMC_IVAR_READ_ONLY:
- *(int *)result = ivar->read_only;
+ *result = ivar->read_only;
break;
case MMC_IVAR_HIGH_CAP:
- *(int *)result = ivar->high_cap;
+ *result = ivar->high_cap;
break;
case MMC_IVAR_CARD_TYPE:
- *(int *)result = ivar->mode;
+ *result = ivar->mode;
break;
case MMC_IVAR_BUS_WIDTH:
- *(int *)result = ivar->bus_width;
+ *result = ivar->bus_width;
break;
case MMC_IVAR_ERASE_SECTOR:
- *(int *)result = ivar->erase_sector;
+ *result = ivar->erase_sector;
break;
case MMC_IVAR_MAX_DATA:
- *(int *)result = mmcbr_get_max_data(bus);
+ *result = mmcbr_get_max_data(bus);
break;
}
return (0);
diff --git a/sys/dev/mmc/mmcvar.h b/sys/dev/mmc/mmcvar.h
index 9126439..a28d3ac 100644
--- a/sys/dev/mmc/mmcvar.h
+++ b/sys/dev/mmc/mmcvar.h
@@ -79,7 +79,7 @@ enum mmc_device_ivars {
__BUS_ACCESSOR(mmc, var, MMC, ivar, type)
MMC_ACCESSOR(dsr_imp, DSR_IMP, int)
-MMC_ACCESSOR(media_size, MEDIA_SIZE, off_t)
+MMC_ACCESSOR(media_size, MEDIA_SIZE, long)
MMC_ACCESSOR(rca, RCA, int)
MMC_ACCESSOR(sector_size, SECTOR_SIZE, int)
MMC_ACCESSOR(tran_speed, TRAN_SPEED, int)
diff --git a/sys/dev/msk/if_msk.c b/sys/dev/msk/if_msk.c
index 2adbf1c..be4ac5d 100644
--- a/sys/dev/msk/if_msk.c
+++ b/sys/dev/msk/if_msk.c
@@ -566,7 +566,7 @@ msk_miibus_statchg(device_t dev)
msk_phy_writereg(sc_if, PHY_ADDR_MARV, PHY_MARV_INT_MASK, 0);
/* Disable Rx/Tx MAC. */
gmac = GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL);
- if ((GM_GPCR_RX_ENA | GM_GPCR_TX_ENA) != 0) {
+ if ((gmac & (GM_GPCR_RX_ENA | GM_GPCR_TX_ENA)) != 0) {
gmac &= ~(GM_GPCR_RX_ENA | GM_GPCR_TX_ENA);
GMAC_WRITE_2(sc, sc_if->msk_port, GM_GP_CTRL, gmac);
/* Read again to ensure writing. */
@@ -1018,7 +1018,7 @@ msk_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
if (ifr->ifr_mtu > MSK_JUMBO_MTU || ifr->ifr_mtu < ETHERMIN)
error = EINVAL;
else if (ifp->if_mtu != ifr->ifr_mtu) {
- if (ifr->ifr_mtu > ETHERMTU) {
+ if (ifr->ifr_mtu > ETHERMTU) {
if ((sc_if->msk_flags & MSK_FLAG_JUMBO) == 0) {
error = EINVAL;
MSK_IF_UNLOCK(sc_if);
@@ -1636,7 +1636,7 @@ msk_attach(device_t dev)
* this workaround does not work so disable checksum offload
* for VLAN interface.
*/
- ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWTSO;
+ ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWTSO;
/*
* Enable Rx checksum offloading for VLAN tagged frames
* if controller support new descriptor format.
@@ -1921,7 +1921,8 @@ mskc_attach(device_t dev)
error = ENXIO;
goto fail;
}
- mmd = malloc(sizeof(struct msk_mii_data), M_DEVBUF, M_WAITOK | M_ZERO);
+ mmd = malloc(sizeof(struct msk_mii_data), M_DEVBUF, M_WAITOK |
+ M_ZERO);
if (mmd == NULL) {
device_printf(dev, "failed to allocate memory for "
"ivars of PORT_B\n");
@@ -1930,9 +1931,9 @@ mskc_attach(device_t dev)
}
mmd->port = MSK_PORT_B;
mmd->pmd = sc->msk_pmd;
- if (sc->msk_pmd == 'L' || sc->msk_pmd == 'S')
+ if (sc->msk_pmd == 'L' || sc->msk_pmd == 'S')
mmd->mii_flags |= MIIF_HAVEFIBER;
- if (sc->msk_pmd == 'P')
+ if (sc->msk_pmd == 'P')
mmd->mii_flags |= MIIF_HAVEFIBER | MIIF_MACPRIV0;
device_set_ivars(sc->msk_devs[MSK_PORT_B], mmd);
}
@@ -3741,10 +3742,10 @@ msk_init_locked(struct msk_if_softc *sc_if)
ifp->if_capenable &= ~(IFCAP_TSO4 | IFCAP_TXCSUM);
}
- /* GMAC Control reset. */
- CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, GMAC_CTRL), GMC_RST_SET);
- CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, GMAC_CTRL), GMC_RST_CLR);
- CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, GMAC_CTRL), GMC_F_LOOPB_OFF);
+ /* GMAC Control reset. */
+ CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, GMAC_CTRL), GMC_RST_SET);
+ CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, GMAC_CTRL), GMC_RST_CLR);
+ CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, GMAC_CTRL), GMC_F_LOOPB_OFF);
if (sc->msk_hw_id == CHIP_ID_YUKON_EX ||
sc->msk_hw_id == CHIP_ID_YUKON_SUPR)
CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, GMAC_CTRL),
@@ -3854,13 +3855,13 @@ msk_init_locked(struct msk_if_softc *sc_if)
msk_set_tx_stfwd(sc_if);
}
- if (sc->msk_hw_id == CHIP_ID_YUKON_FE_P &&
- sc->msk_hw_rev == CHIP_REV_YU_FE_P_A0) {
- /* Disable dynamic watermark - from Linux. */
- reg = CSR_READ_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_EA));
- reg &= ~0x03;
- CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_EA), reg);
- }
+ if (sc->msk_hw_id == CHIP_ID_YUKON_FE_P &&
+ sc->msk_hw_rev == CHIP_REV_YU_FE_P_A0) {
+ /* Disable dynamic watermark - from Linux. */
+ reg = CSR_READ_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_EA));
+ reg &= ~0x03;
+ CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_EA), reg);
+ }
/*
* Disable Force Sync bit and Alloc bit in Tx RAM interface
diff --git a/sys/dev/mvs/mvs.c b/sys/dev/mvs/mvs.c
index 5dbe30c..54808c5 100644
--- a/sys/dev/mvs/mvs.c
+++ b/sys/dev/mvs/mvs.c
@@ -1738,13 +1738,6 @@ mvs_end_transaction(struct mvs_slot *slot, enum mvs_err_type et)
ch->numhslots++;
} else
xpt_done(ccb);
- /* Unfreeze frozen command. */
- if (ch->frozen && !mvs_check_collision(dev, ch->frozen)) {
- union ccb *fccb = ch->frozen;
- ch->frozen = NULL;
- mvs_begin_transaction(dev, fccb);
- xpt_release_simq(ch->sim, TRUE);
- }
/* If we have no other active commands, ... */
if (ch->rslots == 0) {
/* if there was fatal error - reset port. */
@@ -1764,6 +1757,13 @@ mvs_end_transaction(struct mvs_slot *slot, enum mvs_err_type et)
} else if ((ch->rslots & ~ch->toslots) == 0 &&
et != MVS_ERR_TIMEOUT)
mvs_rearm_timeout(dev);
+ /* Unfreeze frozen command. */
+ if (ch->frozen && !mvs_check_collision(dev, ch->frozen)) {
+ union ccb *fccb = ch->frozen;
+ ch->frozen = NULL;
+ mvs_begin_transaction(dev, fccb);
+ xpt_release_simq(ch->sim, TRUE);
+ }
/* Start PM timer. */
if (ch->numrslots == 0 && ch->pm_level > 3 &&
(ch->curr[ch->pm_present ? 15 : 0].caps & CTS_SATA_CAPS_D_PMREQ)) {
@@ -2080,7 +2080,8 @@ mvs_softreset(device_t dev, union ccb *ccb)
{
struct mvs_channel *ch = device_get_softc(dev);
int port = ccb->ccb_h.target_id & 0x0f;
- int i;
+ int i, stuck;
+ uint8_t status;
mvs_set_edma_mode(dev, MVS_EDMA_OFF);
ATA_OUTB(ch->r_mem, SATA_SATAICTL, port << SATA_SATAICTL_PMPTX_SHIFT);
@@ -2089,12 +2090,35 @@ mvs_softreset(device_t dev, union ccb *ccb)
ATA_OUTB(ch->r_mem, ATA_CONTROL, 0);
ccb->ccb_h.status &= ~CAM_STATUS_MASK;
/* Wait for clearing busy status. */
- if ((i = mvs_wait(dev, 0, ATA_S_BUSY | ATA_S_DRQ, ccb->ccb_h.timeout)) < 0) {
+ if ((i = mvs_wait(dev, 0, ATA_S_BUSY, ccb->ccb_h.timeout)) < 0) {
ccb->ccb_h.status |= CAM_CMD_TIMEOUT;
+ stuck = 1;
} else {
- ccb->ccb_h.status |= CAM_REQ_CMP;
+ status = mvs_getstatus(dev, 0);
+ if (status & ATA_S_ERROR)
+ ccb->ccb_h.status |= CAM_ATA_STATUS_ERROR;
+ else
+ ccb->ccb_h.status |= CAM_REQ_CMP;
+ if (status & ATA_S_DRQ)
+ stuck = 1;
+ else
+ stuck = 0;
}
mvs_tfd_read(dev, ccb);
+
+ /*
+ * XXX: If some device on PMP failed to soft-reset,
+ * try to recover by sending dummy soft-reset to PMP.
+ */
+ if (stuck && ch->pm_present && port != 15) {
+ ATA_OUTB(ch->r_mem, SATA_SATAICTL,
+ 15 << SATA_SATAICTL_PMPTX_SHIFT);
+ ATA_OUTB(ch->r_mem, ATA_CONTROL, ATA_A_RESET);
+ DELAY(10000);
+ ATA_OUTB(ch->r_mem, ATA_CONTROL, 0);
+ mvs_wait(dev, 0, ATA_S_BUSY | ATA_S_DRQ, ccb->ccb_h.timeout);
+ }
+
xpt_done(ccb);
}
diff --git a/sys/dev/nfe/if_nfe.c b/sys/dev/nfe/if_nfe.c
index 6cdfa34..28a3c01 100644
--- a/sys/dev/nfe/if_nfe.c
+++ b/sys/dev/nfe/if_nfe.c
@@ -1889,7 +1889,7 @@ nfe_int_task(void *arg, int pending)
if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
NFE_UNLOCK(sc);
- nfe_enable_intr(sc);
+ nfe_disable_intr(sc);
return;
}
diff --git a/sys/dev/pccard/pccard.c b/sys/dev/pccard/pccard.c
index 00cd1dc..1de571c 100644
--- a/sys/dev/pccard/pccard.c
+++ b/sys/dev/pccard/pccard.c
@@ -1405,8 +1405,8 @@ pccard_ccr_read_impl(device_t brdev, device_t child, uint32_t offset,
struct pccard_ivar *devi = PCCARD_IVAR(child);
*val = pccard_ccr_read(devi->pf, offset);
- device_printf(child, "ccr_read of %#x (%#x) is %#x\n", offset,
- devi->pf->pf_ccr_offset, *val);
+ DEVPRINTF((child, "ccr_read of %#x (%#x) is %#x\n", offset,
+ devi->pf->pf_ccr_offset, *val));
return 0;
}
@@ -1421,8 +1421,8 @@ pccard_ccr_write_impl(device_t brdev, device_t child, uint32_t offset,
* Can't use pccard_ccr_write since client drivers may access
* registers not contained in the 'mask' if they are non-standard.
*/
- device_printf(child, "ccr_write of %#x to %#x (%#x)\n", val, offset,
- devi->pf->pf_ccr_offset);
+ DEVPRINTF((child, "ccr_write of %#x to %#x (%#x)\n", val, offset,
+ devi->pf->pf_ccr_offset));
bus_space_write_1(pf->pf_ccrt, pf->pf_ccrh, pf->pf_ccr_offset + offset,
val);
return 0;
diff --git a/sys/dev/pccbb/pccbb.c b/sys/dev/pccbb/pccbb.c
index bbb9eae..3c60f37 100644
--- a/sys/dev/pccbb/pccbb.c
+++ b/sys/dev/pccbb/pccbb.c
@@ -800,24 +800,36 @@ cbb_power(device_t brdev, int volts)
* We have a shortish timeout of 500ms here. Some bridges do
* not generate a POWER_CYCLE event for 16-bit cards. In
* those cases, we have to cope the best we can, and having
- * only a short delay is better than the alternatives.
+ * only a short delay is better than the alternatives. Others
+ * raise the power cycle a smidge before it is really ready.
+ * We deal with those below.
*/
sane = 10;
while (!(cbb_get(sc, CBB_SOCKET_STATE) & CBB_STATE_POWER_CYCLE) &&
cnt == sc->powerintr && sane-- > 0)
msleep(&sc->powerintr, &sc->mtx, 0, "-", hz / 20);
mtx_unlock(&sc->mtx);
+
+ /*
+ * Relax for 100ms. Some bridges appear to assert this signal
+ * right away, but before the card has stabilized. Other
+ * cards need need more time to cope up reliabily.
+ * Experiments with troublesome setups show this to be a
+ * "cheap" way to enhance reliabilty. We need not do this for
+ * "off" since we don't touch the card after we turn it off.
+ */
+ pause("cbbPwr", min(hz / 10, 1));
+
/*
* The TOPIC95B requires a little bit extra time to get its
* act together, so delay for an additional 100ms. Also as
* documented below, it doesn't seem to set the POWER_CYCLE
* bit, so don't whine if it never came on.
*/
- if (sc->chipset == CB_TOPIC95) {
+ if (sc->chipset == CB_TOPIC95)
pause("cbb95B", hz / 10);
- } else if (sane <= 0) {
+ else if (sane <= 0)
device_printf(sc->dev, "power timeout, doom?\n");
- }
}
/*
diff --git a/sys/dev/pci/pci.c b/sys/dev/pci/pci.c
index 22046c1..9cd5a1c 100644
--- a/sys/dev/pci/pci.c
+++ b/sys/dev/pci/pci.c
@@ -2576,6 +2576,17 @@ pci_add_map(device_t bus, device_t dev, int reg, struct resource_list *rl,
uint16_t cmd;
struct resource *res;
+ /*
+ * The BAR may already exist if the device is a CardBus card
+ * whose CIS is stored in this BAR.
+ */
+ pm = pci_find_bar(dev, reg);
+ if (pm != NULL) {
+ maprange = pci_maprange(pm->pm_value);
+ barlen = maprange == 64 ? 2 : 1;
+ return (barlen);
+ }
+
pci_read_bar(dev, reg, &map, &testval);
if (PCI_BAR_MEM(map)) {
type = SYS_RES_MEMORY;
diff --git a/sys/dev/pci/pci_pci.c b/sys/dev/pci/pci_pci.c
index f68973b..da8465c 100644
--- a/sys/dev/pci/pci_pci.c
+++ b/sys/dev/pci/pci_pci.c
@@ -916,7 +916,8 @@ pcib_grow_window(struct pcib_softc *sc, struct pcib_window *w, int type,
/* Move end_free down until it is properly aligned. */
end_free &= ~(align - 1);
- front = end_free - count;
+ end_free--;
+ front = end_free - (count - 1);
/*
* The resource would now be allocated at (front,
@@ -944,7 +945,7 @@ pcib_grow_window(struct pcib_softc *sc, struct pcib_window *w, int type,
/* Move start_free up until it is properly aligned. */
start_free = roundup2(start_free, align);
- back = start_free + count;
+ back = start_free + count - 1;
/*
* The resource would now be allocated at (start_free,
@@ -957,7 +958,7 @@ pcib_grow_window(struct pcib_softc *sc, struct pcib_window *w, int type,
if (bootverbose)
printf("\tback candidate range: %#lx-%#lx\n",
start_free, back);
- back = roundup2(back, w->step) - 1;
+ back = roundup2(back + 1, w->step) - 1;
back -= rman_get_end(w->res);
} else
back = 0;
diff --git a/sys/dev/puc/puc.c b/sys/dev/puc/puc.c
index b6fa3c5..9bb3ceb 100644
--- a/sys/dev/puc/puc.c
+++ b/sys/dev/puc/puc.c
@@ -726,3 +726,41 @@ puc_bus_read_ivar(device_t dev, device_t child, int index, uintptr_t *result)
}
return (0);
}
+
+int
+puc_bus_print_child(device_t dev, device_t child)
+{
+ struct puc_port *port;
+ int retval;
+
+ port = device_get_ivars(child);
+ retval = 0;
+
+ retval += bus_print_child_header(dev, child);
+ retval += printf(" at port %d", port->p_nr);
+ retval += bus_print_child_footer(dev, child);
+
+ return (retval);
+}
+
+int
+puc_bus_child_location_str(device_t dev, device_t child, char *buf,
+ size_t buflen)
+{
+ struct puc_port *port;
+
+ port = device_get_ivars(child);
+ snprintf(buf, buflen, "port=%d", port->p_nr);
+ return (0);
+}
+
+int
+puc_bus_child_pnpinfo_str(device_t dev, device_t child, char *buf,
+ size_t buflen)
+{
+ struct puc_port *port;
+
+ port = device_get_ivars(child);
+ snprintf(buf, buflen, "type=%d", port->p_type);
+ return (0);
+}
diff --git a/sys/dev/puc/puc_bfe.h b/sys/dev/puc/puc_bfe.h
index f6d69c4..c67fab5 100644
--- a/sys/dev/puc/puc_bfe.h
+++ b/sys/dev/puc/puc_bfe.h
@@ -82,9 +82,12 @@ int puc_bfe_attach(device_t);
int puc_bfe_detach(device_t);
int puc_bfe_probe(device_t, const struct puc_cfg *);
+int puc_bus_child_location_str(device_t, device_t, char *, size_t);
+int puc_bus_child_pnpinfo_str(device_t, device_t, char *, size_t);
struct resource *puc_bus_alloc_resource(device_t, device_t, int, int *, u_long,
u_long, u_long, u_int);
int puc_bus_get_resource(device_t, device_t, int, int, u_long *, u_long *);
+int puc_bus_print_child(device_t, device_t);
int puc_bus_read_ivar(device_t, device_t, int, uintptr_t *);
int puc_bus_release_resource(device_t, device_t, int, int, struct resource *);
int puc_bus_setup_intr(device_t, device_t, struct resource *, int,
diff --git a/sys/dev/puc/puc_pccard.c b/sys/dev/puc/puc_pccard.c
index 2cb9513..63d5787 100644
--- a/sys/dev/puc/puc_pccard.c
+++ b/sys/dev/puc/puc_pccard.c
@@ -82,7 +82,9 @@ static device_method_t puc_pccard_methods[] = {
DEVMETHOD(bus_read_ivar, puc_bus_read_ivar),
DEVMETHOD(bus_setup_intr, puc_bus_setup_intr),
DEVMETHOD(bus_teardown_intr, puc_bus_teardown_intr),
- DEVMETHOD(bus_print_child, bus_generic_print_child),
+ DEVMETHOD(bus_print_child, puc_bus_print_child),
+ DEVMETHOD(bus_child_pnpinfo_str, puc_bus_child_pnpinfo_str),
+ DEVMETHOD(bus_child_location_str, puc_bus_child_location_str),
DEVMETHOD(bus_driver_added, bus_generic_driver_added),
{ 0, 0 }
};
diff --git a/sys/dev/puc/puc_pci.c b/sys/dev/puc/puc_pci.c
index 9a05b66..8c14717 100644
--- a/sys/dev/puc/puc_pci.c
+++ b/sys/dev/puc/puc_pci.c
@@ -132,7 +132,9 @@ static device_method_t puc_pci_methods[] = {
DEVMETHOD(bus_read_ivar, puc_bus_read_ivar),
DEVMETHOD(bus_setup_intr, puc_bus_setup_intr),
DEVMETHOD(bus_teardown_intr, puc_bus_teardown_intr),
- DEVMETHOD(bus_print_child, bus_generic_print_child),
+ DEVMETHOD(bus_print_child, puc_bus_print_child),
+ DEVMETHOD(bus_child_pnpinfo_str, puc_bus_child_pnpinfo_str),
+ DEVMETHOD(bus_child_location_str, puc_bus_child_location_str),
DEVMETHOD(bus_driver_added, bus_generic_driver_added),
{ 0, 0 }
};
diff --git a/sys/dev/puc/pucdata.c b/sys/dev/puc/pucdata.c
index 83b02ea..2b38d9b 100644
--- a/sys/dev/puc/pucdata.c
+++ b/sys/dev/puc/pucdata.c
@@ -48,15 +48,15 @@ __FBSDID("$FreeBSD$");
#include <dev/puc/puc_bfe.h>
static puc_config_f puc_config_amc;
-static puc_config_f puc_config_cronyx;
static puc_config_f puc_config_diva;
+static puc_config_f puc_config_exar;
static puc_config_f puc_config_icbook;
+static puc_config_f puc_config_oxford_pcie;
static puc_config_f puc_config_quatech;
static puc_config_f puc_config_syba;
static puc_config_f puc_config_siig;
static puc_config_f puc_config_timedia;
static puc_config_f puc_config_titan;
-static puc_config_f puc_config_oxford_pcie;
const struct puc_cfg puc_pci_devices[] = {
@@ -548,11 +548,25 @@ const struct puc_cfg puc_pci_devices[] = {
PUC_PORT_8S, 0x18, 0, 8,
},
+ { 0x13a8, 0x0152, 0xffff, 0,
+ "Exar XR17C/D152",
+ DEFAULT_RCLK * 8,
+ PUC_PORT_2S, 0x10, 0, -1,
+ .config_function = puc_config_exar
+ },
+
+ { 0x13a8, 0x0154, 0xffff, 0,
+ "Exar XR17C154",
+ DEFAULT_RCLK * 8,
+ PUC_PORT_4S, 0x10, 0, -1,
+ .config_function = puc_config_exar
+ },
+
{ 0x13a8, 0x0158, 0xffff, 0,
- "Cronyx Omega2-PCI",
+ "Exar XR17C158",
DEFAULT_RCLK * 8,
PUC_PORT_8S, 0x10, 0, -1,
- .config_function = puc_config_cronyx
+ .config_function = puc_config_exar
},
{ 0x13a8, 0x0258, 0xffff, 0,
@@ -1014,28 +1028,28 @@ puc_config_amc(struct puc_softc *sc, enum puc_cfg_cmd cmd, int port,
}
static int
-puc_config_cronyx(struct puc_softc *sc, enum puc_cfg_cmd cmd, int port,
+puc_config_diva(struct puc_softc *sc, enum puc_cfg_cmd cmd, int port,
intptr_t *res)
{
+ const struct puc_cfg *cfg = sc->sc_cfg;
+
if (cmd == PUC_CFG_GET_OFS) {
- *res = port * 0x200;
+ if (cfg->subdevice == 0x1282) /* Everest SP */
+ port <<= 1;
+ else if (cfg->subdevice == 0x104b) /* Maestro SP2 */
+ port = (port == 3) ? 4 : port;
+ *res = port * 8 + ((port > 2) ? 0x18 : 0);
return (0);
}
return (ENXIO);
}
static int
-puc_config_diva(struct puc_softc *sc, enum puc_cfg_cmd cmd, int port,
+puc_config_exar(struct puc_softc *sc, enum puc_cfg_cmd cmd, int port,
intptr_t *res)
{
- const struct puc_cfg *cfg = sc->sc_cfg;
-
if (cmd == PUC_CFG_GET_OFS) {
- if (cfg->subdevice == 0x1282) /* Everest SP */
- port <<= 1;
- else if (cfg->subdevice == 0x104b) /* Maestro SP2 */
- port = (port == 3) ? 4 : port;
- *res = port * 8 + ((port > 2) ? 0x18 : 0);
+ *res = port * 0x200;
return (0);
}
return (ENXIO);
@@ -1292,6 +1306,12 @@ puc_config_timedia(struct puc_softc *sc, enum puc_cfg_cmd cmd, int port,
uint16_t subdev;
switch (cmd) {
+ case PUC_CFG_GET_CLOCK:
+ if (port < 2)
+ *res = DEFAULT_RCLK * 8;
+ else
+ *res = DEFAULT_RCLK;
+ return (0);
case PUC_CFG_GET_DESC:
snprintf(desc, sizeof(desc),
"Timedia technology %d Port Serial", (int)sc->sc_cfg_data);
@@ -1346,14 +1366,12 @@ puc_config_oxford_pcie(struct puc_softc *sc, enum puc_cfg_cmd cmd, int port,
bar = puc_get_bar(sc, cfg->rid);
if (bar == NULL)
return (ENXIO);
-
for (idx = 0; idx < sc->sc_nports; idx++) {
- value = bus_read_1(bar->b_res, 0x1000 + (idx << 9)
- + 0x92);
+ value = bus_read_1(bar->b_res, 0x1000 + (idx << 9) +
+ 0x92);
bus_write_1(bar->b_res, 0x1000 + (idx << 9) + 0x92,
- value | 0x10);
+ value | 0x10);
}
-
return (0);
case PUC_CFG_GET_LEN:
*res = 0x200;
diff --git a/sys/dev/safe/safe.c b/sys/dev/safe/safe.c
index ac97098..18ef5e5 100644
--- a/sys/dev/safe/safe.c
+++ b/sys/dev/safe/safe.c
@@ -1580,9 +1580,12 @@ safe_callback(struct safe_softc *sc, struct safe_ringentry *re)
* SHA-1 ICV's are byte-swapped; fix 'em up
* before copy them to their destination.
*/
- bswap32(re->re_sastate.sa_saved_indigest[0]);
- bswap32(re->re_sastate.sa_saved_indigest[1]);
- bswap32(re->re_sastate.sa_saved_indigest[2]);
+ re->re_sastate.sa_saved_indigest[0] =
+ bswap32(re->re_sastate.sa_saved_indigest[0]);
+ re->re_sastate.sa_saved_indigest[1] =
+ bswap32(re->re_sastate.sa_saved_indigest[1]);
+ re->re_sastate.sa_saved_indigest[2] =
+ bswap32(re->re_sastate.sa_saved_indigest[2]);
}
crypto_copyback(crp->crp_flags, crp->crp_buf,
crd->crd_inject,
diff --git a/sys/dev/sdhci/sdhci.c b/sys/dev/sdhci/sdhci.c
index 6bbc25f..24cba57 100644
--- a/sys/dev/sdhci/sdhci.c
+++ b/sys/dev/sdhci/sdhci.c
@@ -1443,46 +1443,46 @@ sdhci_read_ivar(device_t bus, device_t child, int which, uintptr_t *result)
default:
return (EINVAL);
case MMCBR_IVAR_BUS_MODE:
- *(int *)result = slot->host.ios.bus_mode;
+ *result = slot->host.ios.bus_mode;
break;
case MMCBR_IVAR_BUS_WIDTH:
- *(int *)result = slot->host.ios.bus_width;
+ *result = slot->host.ios.bus_width;
break;
case MMCBR_IVAR_CHIP_SELECT:
- *(int *)result = slot->host.ios.chip_select;
+ *result = slot->host.ios.chip_select;
break;
case MMCBR_IVAR_CLOCK:
- *(int *)result = slot->host.ios.clock;
+ *result = slot->host.ios.clock;
break;
case MMCBR_IVAR_F_MIN:
- *(int *)result = slot->host.f_min;
+ *result = slot->host.f_min;
break;
case MMCBR_IVAR_F_MAX:
- *(int *)result = slot->host.f_max;
+ *result = slot->host.f_max;
break;
case MMCBR_IVAR_HOST_OCR:
- *(int *)result = slot->host.host_ocr;
+ *result = slot->host.host_ocr;
break;
case MMCBR_IVAR_MODE:
- *(int *)result = slot->host.mode;
+ *result = slot->host.mode;
break;
case MMCBR_IVAR_OCR:
- *(int *)result = slot->host.ocr;
+ *result = slot->host.ocr;
break;
case MMCBR_IVAR_POWER_MODE:
- *(int *)result = slot->host.ios.power_mode;
+ *result = slot->host.ios.power_mode;
break;
case MMCBR_IVAR_VDD:
- *(int *)result = slot->host.ios.vdd;
+ *result = slot->host.ios.vdd;
break;
case MMCBR_IVAR_CAPS:
- *(int *)result = slot->host.caps;
+ *result = slot->host.caps;
break;
case MMCBR_IVAR_TIMING:
- *(int *)result = slot->host.ios.timing;
+ *result = slot->host.ios.timing;
break;
case MMCBR_IVAR_MAX_DATA:
- *(int *)result = 65535;
+ *result = 65535;
break;
}
return (0);
diff --git a/sys/dev/siis/siis.c b/sys/dev/siis/siis.c
index 01edae3..a7b018a 100644
--- a/sys/dev/siis/siis.c
+++ b/sys/dev/siis/siis.c
@@ -1178,11 +1178,22 @@ siis_timeout(struct siis_slot *slot)
{
device_t dev = slot->dev;
struct siis_channel *ch = device_get_softc(dev);
+ union ccb *ccb = slot->ccb;
mtx_assert(&ch->mtx, MA_OWNED);
/* Check for stale timeout. */
if (slot->state < SIIS_SLOT_RUNNING)
return;
+
+ /* Handle soft-reset timeouts without doing hard-reset. */
+ if ((ccb->ccb_h.func_code == XPT_ATA_IO) &&
+ (ccb->ataio.cmd.flags & CAM_ATAIO_CONTROL) &&
+ (ccb->ataio.cmd.control & ATA_A_RESET)) {
+ xpt_freeze_simq(ch->sim, ch->numrslots);
+ siis_end_transaction(slot, SIIS_ERR_TFE);
+ return;
+ }
+
device_printf(dev, "Timeout on slot %d\n", slot->slot);
device_printf(dev, "%s is %08x ss %08x rs %08x es %08x sts %08x serr %08x\n",
__func__, ATA_INL(ch->r_mem, SIIS_P_IS),
@@ -1331,13 +1342,6 @@ siis_end_transaction(struct siis_slot *slot, enum siis_err_type et)
ch->numhslots++;
} else
xpt_done(ccb);
- /* Unfreeze frozen command. */
- if (ch->frozen && !siis_check_collision(dev, ch->frozen)) {
- union ccb *fccb = ch->frozen;
- ch->frozen = NULL;
- siis_begin_transaction(dev, fccb);
- xpt_release_simq(ch->sim, TRUE);
- }
/* If we have no other active commands, ... */
if (ch->rslots == 0) {
/* if there were timeouts or fatal error - reset port. */
@@ -1355,6 +1359,13 @@ siis_end_transaction(struct siis_slot *slot, enum siis_err_type et)
} else if ((ch->rslots & ~ch->toslots) == 0 &&
et != SIIS_ERR_TIMEOUT)
siis_rearm_timeout(dev);
+ /* Unfreeze frozen command. */
+ if (ch->frozen && !siis_check_collision(dev, ch->frozen)) {
+ union ccb *fccb = ch->frozen;
+ ch->frozen = NULL;
+ siis_begin_transaction(dev, fccb);
+ xpt_release_simq(ch->sim, TRUE);
+ }
}
static void
diff --git a/sys/dev/sound/pci/hda/hdac.c b/sys/dev/sound/pci/hda/hdac.c
index 7af5303..bb0f385 100644
--- a/sys/dev/sound/pci/hda/hdac.c
+++ b/sys/dev/sound/pci/hda/hdac.c
@@ -754,7 +754,17 @@ static const struct {
#define HDA_CODEC_CX20561 HDA_CODEC_CONSTRUCT(CONEXANT, 0x5051)
#define HDA_CODEC_CX20582 HDA_CODEC_CONSTRUCT(CONEXANT, 0x5066)
#define HDA_CODEC_CX20583 HDA_CODEC_CONSTRUCT(CONEXANT, 0x5067)
+#define HDA_CODEC_CX20584 HDA_CODEC_CONSTRUCT(CONEXANT, 0x5068)
#define HDA_CODEC_CX20585 HDA_CODEC_CONSTRUCT(CONEXANT, 0x5069)
+#define HDA_CODEC_CX20590 HDA_CODEC_CONSTRUCT(CONEXANT, 0x506e)
+#define HDA_CODEC_CX20631 HDA_CODEC_CONSTRUCT(CONEXANT, 0x5097)
+#define HDA_CODEC_CX20632 HDA_CODEC_CONSTRUCT(CONEXANT, 0x5098)
+#define HDA_CODEC_CX20641 HDA_CODEC_CONSTRUCT(CONEXANT, 0x50a1)
+#define HDA_CODEC_CX20642 HDA_CODEC_CONSTRUCT(CONEXANT, 0x50a2)
+#define HDA_CODEC_CX20651 HDA_CODEC_CONSTRUCT(CONEXANT, 0x50ab)
+#define HDA_CODEC_CX20652 HDA_CODEC_CONSTRUCT(CONEXANT, 0x50ac)
+#define HDA_CODEC_CX20664 HDA_CODEC_CONSTRUCT(CONEXANT, 0x50b8)
+#define HDA_CODEC_CX20665 HDA_CODEC_CONSTRUCT(CONEXANT, 0x50b9)
#define HDA_CODEC_CXXXXX HDA_CODEC_CONSTRUCT(CONEXANT, 0xffff)
/* VIA */
@@ -826,12 +836,13 @@ static const struct {
#define HDA_CODEC_NVIDIAXXXX HDA_CODEC_CONSTRUCT(NVIDIA, 0xffff)
/* INTEL */
-#define HDA_CODEC_INTELG45_1 HDA_CODEC_CONSTRUCT(INTEL, 0x2801)
-#define HDA_CODEC_INTELG45_2 HDA_CODEC_CONSTRUCT(INTEL, 0x2802)
-#define HDA_CODEC_INTELG45_3 HDA_CODEC_CONSTRUCT(INTEL, 0x2803)
-#define HDA_CODEC_INTELG45_4 HDA_CODEC_CONSTRUCT(INTEL, 0x2804)
-#define HDA_CODEC_INTELG45_5 HDA_CODEC_CONSTRUCT(INTEL, 0x29fb)
-#define HDA_CODEC_INTELQ57 HDA_CODEC_CONSTRUCT(INTEL, 0x0054)
+#define HDA_CODEC_INTELIP HDA_CODEC_CONSTRUCT(INTEL, 0x0054)
+#define HDA_CODEC_INTELBL HDA_CODEC_CONSTRUCT(INTEL, 0x2801)
+#define HDA_CODEC_INTELCA HDA_CODEC_CONSTRUCT(INTEL, 0x2802)
+#define HDA_CODEC_INTELEL HDA_CODEC_CONSTRUCT(INTEL, 0x2803)
+#define HDA_CODEC_INTELIP2 HDA_CODEC_CONSTRUCT(INTEL, 0x2804)
+#define HDA_CODEC_INTELCPT HDA_CODEC_CONSTRUCT(INTEL, 0x2805)
+#define HDA_CODEC_INTELCL HDA_CODEC_CONSTRUCT(INTEL, 0x29fb)
#define HDA_CODEC_INTELXXXX HDA_CODEC_CONSTRUCT(INTEL, 0xffff)
/* Codecs */
@@ -938,7 +949,17 @@ static const struct {
{ HDA_CODEC_CX20561, "Conexant CX20561 (Hermosa)" },
{ HDA_CODEC_CX20582, "Conexant CX20582 (Pebble)" },
{ HDA_CODEC_CX20583, "Conexant CX20583 (Pebble HSF)" },
+ { HDA_CODEC_CX20584, "Conexant CX20584" },
{ HDA_CODEC_CX20585, "Conexant CX20585" },
+ { HDA_CODEC_CX20590, "Conexant CX20590" },
+ { HDA_CODEC_CX20631, "Conexant CX20631" },
+ { HDA_CODEC_CX20632, "Conexant CX20632" },
+ { HDA_CODEC_CX20641, "Conexant CX20641" },
+ { HDA_CODEC_CX20642, "Conexant CX20642" },
+ { HDA_CODEC_CX20651, "Conexant CX20651" },
+ { HDA_CODEC_CX20652, "Conexant CX20652" },
+ { HDA_CODEC_CX20664, "Conexant CX20664" },
+ { HDA_CODEC_CX20665, "Conexant CX20665" },
{ HDA_CODEC_VT1708_8, "VIA VT1708_8" },
{ HDA_CODEC_VT1708_9, "VIA VT1708_9" },
{ HDA_CODEC_VT1708_A, "VIA VT1708_A" },
@@ -998,12 +1019,13 @@ static const struct {
{ HDA_CODEC_NVIDIAGT21X, "NVidia GT21x HDMI" },
{ HDA_CODEC_NVIDIAMCP89, "NVidia MCP89 HDMI" },
{ HDA_CODEC_NVIDIAGT240, "NVidia GT240 HDMI" },
- { HDA_CODEC_INTELG45_1, "Intel G45 HDMI" },
- { HDA_CODEC_INTELG45_2, "Intel G45 HDMI" },
- { HDA_CODEC_INTELG45_3, "Intel G45 HDMI" },
- { HDA_CODEC_INTELG45_4, "Intel G45 HDMI" },
- { HDA_CODEC_INTELG45_5, "Intel G45 HDMI" },
- { HDA_CODEC_INTELQ57, "Intel Q57 HDMI" },
+ { HDA_CODEC_INTELIP, "Intel Ibex Peak HDMI" },
+ { HDA_CODEC_INTELBL, "Intel Bearlake HDMI" },
+ { HDA_CODEC_INTELCA, "Intel Cantiga HDMI" },
+ { HDA_CODEC_INTELEL, "Intel Eaglelake HDMI" },
+ { HDA_CODEC_INTELIP2, "Intel Ibex Peak HDMI" },
+ { HDA_CODEC_INTELCPT, "Intel Cougar Point HDMI" },
+ { HDA_CODEC_INTELCL, "Intel Crestline HDMI" },
{ HDA_CODEC_SII1390, "Silicon Image SiI1390 HDMI" },
{ HDA_CODEC_SII1392, "Silicon Image SiI1392 HDMI" },
/* Unknown codec */
@@ -4124,7 +4146,10 @@ hdac_attach(device_t dev)
uint16_t vendor;
uint8_t v;
- device_printf(dev, "HDA Driver Revision: %s\n", HDA_DRV_TEST_REV);
+ HDA_BOOTVERBOSE(
+ device_printf(dev, "HDA Driver Revision: %s\n",
+ HDA_DRV_TEST_REV);
+ );
model = (uint32_t)pci_get_device(dev) << 16;
model |= (uint32_t)pci_get_vendor(dev) & 0x0000ffff;
@@ -4919,6 +4944,25 @@ hdac_vendor_patch_parse(struct hdac_devinfo *devinfo)
if (w != NULL)
w->connsenable[0] = 0;
break;
+ case HDA_CODEC_CX20582:
+ case HDA_CODEC_CX20583:
+ case HDA_CODEC_CX20584:
+ case HDA_CODEC_CX20585:
+ case HDA_CODEC_CX20590:
+ /*
+ * These codecs have extra connectivity on record side
+ * too reach for the present parser.
+ */
+ w = hdac_widget_get(devinfo, 20);
+ if (w != NULL)
+ w->connsenable[1] = 0;
+ w = hdac_widget_get(devinfo, 21);
+ if (w != NULL)
+ w->connsenable[1] = 0;
+ w = hdac_widget_get(devinfo, 22);
+ if (w != NULL)
+ w->connsenable[0] = 0;
+ break;
}
}
diff --git a/sys/dev/sound/pcm/sound.c b/sys/dev/sound/pcm/sound.c
index caa7841..958065f 100644
--- a/sys/dev/sound/pcm/sound.c
+++ b/sys/dev/sound/pcm/sound.c
@@ -51,7 +51,7 @@ int pcm_veto_load = 1;
int snd_unit = -1;
TUNABLE_INT("hw.snd.default_unit", &snd_unit);
-static int snd_unit_auto = 0;
+static int snd_unit_auto = -1;
TUNABLE_INT("hw.snd.default_auto", &snd_unit_auto);
SYSCTL_INT(_hw_snd, OID_AUTO, default_auto, CTLFLAG_RW,
&snd_unit_auto, 0, "assign default unit to a newly attached device");
@@ -443,6 +443,7 @@ sysctl_hw_snd_default_unit(SYSCTL_HANDLER_ARGS)
if (!PCM_REGISTERED(d) || CHN_EMPTY(d, channels.pcm))
return EINVAL;
snd_unit = unit;
+ snd_unit_auto = 0;
}
return (error);
}
@@ -737,6 +738,32 @@ pcm_killchan(device_t dev)
return (pcm_chn_destroy(ch));
}
+static int
+pcm_best_unit(int old)
+{
+ struct snddev_info *d;
+ int i, best, bestprio, prio;
+
+ best = -1;
+ bestprio = -100;
+ for (i = 0; pcm_devclass != NULL &&
+ i < devclass_get_maxunit(pcm_devclass); i++) {
+ d = devclass_get_softc(pcm_devclass, i);
+ if (!PCM_REGISTERED(d))
+ continue;
+ prio = 0;
+ if (d->playcount == 0)
+ prio -= 10;
+ if (d->reccount == 0)
+ prio -= 2;
+ if (prio > bestprio || (prio == bestprio && i == old)) {
+ best = i;
+ bestprio = prio;
+ }
+ }
+ return (best);
+}
+
int
pcm_setstatus(device_t dev, char *str)
{
@@ -770,8 +797,12 @@ pcm_setstatus(device_t dev, char *str)
PCM_UNLOCK(d);
- if (snd_unit < 0 || snd_unit_auto != 0)
+ if (snd_unit_auto < 0)
+ snd_unit_auto = (snd_unit < 0) ? 1 : 0;
+ if (snd_unit < 0 || snd_unit_auto > 1)
snd_unit = device_get_unit(dev);
+ else if (snd_unit_auto == 1)
+ snd_unit = pcm_best_unit(snd_unit);
return (0);
}
@@ -1113,7 +1144,6 @@ pcm_unregister(device_t dev)
struct snddev_info *d;
struct pcm_channel *ch;
struct thread *td;
- int i;
td = curthread;
d = device_get_softc(dev);
@@ -1216,21 +1246,9 @@ pcm_unregister(device_t dev)
sndstat_release(td);
if (snd_unit == device_get_unit(dev)) {
- /*
- * Reassign default unit to the next available dev, but
- * first, reset snd_unit to something ridiculous.
- */
- snd_unit = -1;
- for (i = 0; pcm_devclass != NULL &&
- i < devclass_get_maxunit(pcm_devclass); i++) {
- if (device_get_unit(dev) == i)
- continue;
- d = devclass_get_softc(pcm_devclass, i);
- if (PCM_REGISTERED(d)) {
- snd_unit = i;
- break;
- }
- }
+ snd_unit = pcm_best_unit(-1);
+ if (snd_unit_auto == 0)
+ snd_unit_auto = 1;
}
return (0);
diff --git a/sys/dev/uart/uart_dev_ns8250.c b/sys/dev/uart/uart_dev_ns8250.c
index 3cdd5ad..489be29 100644
--- a/sys/dev/uart/uart_dev_ns8250.c
+++ b/sys/dev/uart/uart_dev_ns8250.c
@@ -242,8 +242,14 @@ ns8250_probe(struct uart_bas *bas)
val = uart_getreg(bas, REG_IIR);
if (val & 0x30)
return (ENXIO);
+ /*
+ * Bit 6 of the MCR (= 0x40) appears to be 1 for the Sun1699
+ * chip, but otherwise doesn't seem to have a function. In
+ * other words, uart(4) works regardless. Ignore that bit so
+ * the probe succeeds.
+ */
val = uart_getreg(bas, REG_MCR);
- if (val & 0xe0)
+ if (val & 0xa0)
return (ENXIO);
return (0);
diff --git a/sys/dev/usb/net/if_axe.c b/sys/dev/usb/net/if_axe.c
index fbe63de..00d1c0b 100644
--- a/sys/dev/usb/net/if_axe.c
+++ b/sys/dev/usb/net/if_axe.c
@@ -514,7 +514,7 @@ static void
axe_ax88178_init(struct axe_softc *sc)
{
struct usb_ether *ue;
- int gpio0, phymode;
+ int gpio0, ledmode, phymode;
uint16_t eeprom, val;
ue = &sc->sc_ue;
@@ -528,9 +528,11 @@ axe_ax88178_init(struct axe_softc *sc)
if (eeprom == 0xffff) {
phymode = AXE_PHY_MODE_MARVELL;
gpio0 = 1;
+ ledmode = 0;
} else {
phymode = eeprom & 0x7f;
gpio0 = (eeprom & 0x80) ? 0 : 1;
+ ledmode = eeprom >> 8;
}
if (bootverbose)
@@ -548,9 +550,22 @@ axe_ax88178_init(struct axe_softc *sc)
AXE_GPIO_WRITE(AXE_GPIO0_EN | AXE_GPIO2_EN, hz / 4);
AXE_GPIO_WRITE(AXE_GPIO0_EN | AXE_GPIO2 | AXE_GPIO2_EN,
hz / 32);
- } else
+ } else {
AXE_GPIO_WRITE(AXE_GPIO_RELOAD_EEPROM | AXE_GPIO1 |
- AXE_GPIO1_EN, hz / 32);
+ AXE_GPIO1_EN, hz / 3);
+ if (ledmode == 1) {
+ AXE_GPIO_WRITE(AXE_GPIO1_EN, hz / 3);
+ AXE_GPIO_WRITE(AXE_GPIO1 | AXE_GPIO1_EN,
+ hz / 3);
+ } else {
+ AXE_GPIO_WRITE(AXE_GPIO1 | AXE_GPIO1_EN |
+ AXE_GPIO2 | AXE_GPIO2_EN, hz / 32);
+ AXE_GPIO_WRITE(AXE_GPIO1 | AXE_GPIO1_EN |
+ AXE_GPIO2_EN, hz / 4);
+ AXE_GPIO_WRITE(AXE_GPIO1 | AXE_GPIO1_EN |
+ AXE_GPIO2 | AXE_GPIO2_EN, hz / 32);
+ }
+ }
break;
case AXE_PHY_MODE_CICADA:
case AXE_PHY_MODE_CICADA_V2:
diff --git a/sys/dev/usb/net/if_udav.c b/sys/dev/usb/net/if_udav.c
index a1a0a8d..a6598ef 100644
--- a/sys/dev/usb/net/if_udav.c
+++ b/sys/dev/usb/net/if_udav.c
@@ -210,6 +210,7 @@ static const struct usb_device_id udav_devs[] = {
{USB_VPI(USB_VENDOR_SHANTOU, USB_PRODUCT_SHANTOU_ADM8515, 0)},
/* Kontron AG USB Ethernet */
{USB_VPI(USB_VENDOR_KONTRON, USB_PRODUCT_KONTRON_DM9601, 0)},
+ {USB_VPI(USB_VENDOR_KONTRON, USB_PRODUCT_KONTRON_JP1082, 0)},
};
static void
diff --git a/sys/dev/usb/serial/umcs.c b/sys/dev/usb/serial/umcs.c
new file mode 100644
index 0000000..c74044e
--- /dev/null
+++ b/sys/dev/usb/serial/umcs.c
@@ -0,0 +1,1075 @@
+/*-
+ * Copyright (c) 2010 Lev Serebryakov <lev@FreeBSD.org>.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/*
+ * This driver supports several multiport USB-to-RS232 serial adapters driven
+ * by MosChip mos7820 and mos7840, bridge chips.
+ * The adapters are sold under many different brand names.
+ *
+ * Datasheets are available at MosChip www site at
+ * http://www.moschip.com. The datasheets don't contain full
+ * programming information for the chip.
+ *
+ * It is nornal to have only two enabled ports in devices, based on
+ * quad-port mos7840.
+ *
+ */
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/stdint.h>
+#include <sys/stddef.h>
+#include <sys/param.h>
+#include <sys/queue.h>
+#include <sys/types.h>
+#include <sys/systm.h>
+#include <sys/kernel.h>
+#include <sys/bus.h>
+#include <sys/linker_set.h>
+#include <sys/module.h>
+#include <sys/lock.h>
+#include <sys/mutex.h>
+#include <sys/condvar.h>
+#include <sys/sysctl.h>
+#include <sys/sx.h>
+#include <sys/unistd.h>
+#include <sys/callout.h>
+#include <sys/malloc.h>
+#include <sys/priv.h>
+
+#include <dev/usb/usb.h>
+#include <dev/usb/usbdi.h>
+#include <dev/usb/usbdi_util.h>
+#include <dev/usb/usb_cdc.h>
+#include "usbdevs.h"
+
+#define USB_DEBUG_VAR umcs_debug
+#include <dev/usb/usb_debug.h>
+#include <dev/usb/usb_process.h>
+
+#include <dev/usb/serial/usb_serial.h>
+
+#include <dev/usb/serial/umcs.h>
+
+#define UMCS7840_MODVER 1
+
+#ifdef USB_DEBUG
+static int umcs_debug = 0;
+
+SYSCTL_NODE(_hw_usb, OID_AUTO, umcs, CTLFLAG_RW, 0, "USB umcs quadport serial adapter");
+SYSCTL_INT(_hw_usb_umcs, OID_AUTO, debug, CTLFLAG_RW, &umcs_debug, 0, "Debug level");
+#endif /* USB_DEBUG */
+
+
+/*
+ * Two-port devices (both with 7820 chip and 7840 chip configured as two-port)
+ * have ports 0 and 2, with ports 1 and 3 omitted.
+ * So,PHYSICAL port numbers (indexes) on two-port device will be 0 and 2.
+ * This driver trys to use physical numbers as much as possible.
+ */
+
+/*
+ * Indexed by PHYSICAL port number.
+ * Pack non-regular registers to array to easier if-less access.
+ */
+struct umcs7840_port_registers {
+ uint8_t reg_sp; /* SP register. */
+ uint8_t reg_control; /* CONTROL register. */
+ uint8_t reg_dcr; /* DCR0 register. DCR1 & DCR2 can be
+ * calculated */
+};
+
+static const struct umcs7840_port_registers umcs7840_port_registers[UMCS7840_MAX_PORTS] = {
+ {.reg_sp = MCS7840_DEV_REG_SP1,.reg_control = MCS7840_DEV_REG_CONTROL1,.reg_dcr = MCS7840_DEV_REG_DCR0_1},
+ {.reg_sp = MCS7840_DEV_REG_SP2,.reg_control = MCS7840_DEV_REG_CONTROL2,.reg_dcr = MCS7840_DEV_REG_DCR0_2},
+ {.reg_sp = MCS7840_DEV_REG_SP3,.reg_control = MCS7840_DEV_REG_CONTROL3,.reg_dcr = MCS7840_DEV_REG_DCR0_3},
+ {.reg_sp = MCS7840_DEV_REG_SP4,.reg_control = MCS7840_DEV_REG_CONTROL4,.reg_dcr = MCS7840_DEV_REG_DCR0_4},
+};
+
+enum {
+ UMCS7840_BULK_RD_EP,
+ UMCS7840_BULK_WR_EP,
+ UMCS7840_N_TRANSFERS
+};
+
+struct umcs7840_softc_oneport {
+ struct usb_xfer *sc_xfer[UMCS7840_N_TRANSFERS]; /* Control structures
+ * for two transfers */
+
+ uint8_t sc_lcr; /* local line control register */
+ uint8_t sc_mcr; /* local modem control register */
+ uint8_t sc_lsr; /* local line status register */
+ uint8_t sc_msr; /* local modem status register */
+};
+
+struct umcs7840_softc {
+ struct ucom_super_softc sc_super_ucom;
+ struct ucom_softc sc_ucom[UMCS7840_MAX_PORTS]; /* Need to be continuous
+ * array, so indexed by
+ * LOGICAL port
+ * (subunit) number */
+
+ struct usb_xfer *sc_intr_xfer; /* Interrupt endpoint */
+
+ device_t sc_dev; /* Device for error prints */
+ struct usb_device *sc_udev; /* USB Device for all operations */
+ struct mtx sc_mtx; /* ucom requires this */
+
+ uint8_t sc_driver_done; /* Flag when enumeration is finished */
+
+ uint8_t sc_numports; /* Number of ports (subunits) */
+ struct umcs7840_softc_oneport sc_ports[UMCS7840_MAX_PORTS]; /* Indexed by PHYSICAL
+ * port number. */
+};
+
+/* prototypes */
+static usb_error_t umcs7840_get_reg_sync(struct umcs7840_softc *, uint8_t, uint8_t *);
+static usb_error_t umcs7840_set_reg_sync(struct umcs7840_softc *, uint8_t, uint8_t);
+static usb_error_t umcs7840_get_UART_reg_sync(struct umcs7840_softc *, uint8_t, uint8_t, uint8_t *);
+static usb_error_t umcs7840_set_UART_reg_sync(struct umcs7840_softc *, uint8_t, uint8_t, uint8_t);
+
+static usb_error_t umcs7840_set_baudrate(struct umcs7840_softc *, uint8_t, uint32_t);
+static usb_error_t umcs7840_calc_baudrate(uint32_t rate, uint16_t *, uint8_t *);
+
+static void umcs7840_cfg_get_status(struct ucom_softc *, uint8_t *, uint8_t *);
+static void umcs7840_cfg_set_dtr(struct ucom_softc *, uint8_t);
+static void umcs7840_cfg_set_rts(struct ucom_softc *, uint8_t);
+static void umcs7840_cfg_set_break(struct ucom_softc *, uint8_t);
+static void umcs7840_cfg_param(struct ucom_softc *, struct termios *);
+static void umcs7840_cfg_open(struct ucom_softc *);
+static void umcs7840_cfg_close(struct ucom_softc *);
+
+static int umcs7840_pre_param(struct ucom_softc *, struct termios *);
+
+static void umcs7840_start_read(struct ucom_softc *);
+static void umcs7840_stop_read(struct ucom_softc *);
+
+static void umcs7840_start_write(struct ucom_softc *);
+static void umcs7840_stop_write(struct ucom_softc *);
+
+static void umcs7840_poll(struct ucom_softc *ucom);
+
+static device_probe_t umcs7840_probe;
+static device_attach_t umcs7840_attach;
+static device_detach_t umcs7840_detach;
+
+static usb_callback_t umcs7840_intr_callback;
+static usb_callback_t umcs7840_read_callback1;
+static usb_callback_t umcs7840_read_callback2;
+static usb_callback_t umcs7840_read_callback3;
+static usb_callback_t umcs7840_read_callback4;
+static usb_callback_t umcs7840_write_callback1;
+static usb_callback_t umcs7840_write_callback2;
+static usb_callback_t umcs7840_write_callback3;
+static usb_callback_t umcs7840_write_callback4;
+
+static void umcs7840_read_callbackN(struct usb_xfer *, usb_error_t, uint8_t);
+static void umcs7840_write_callbackN(struct usb_xfer *, usb_error_t, uint8_t);
+
+/* Indexed by LOGICAL port number (subunit), so two-port device uses 0 & 1 */
+static usb_callback_t *umcs7840_rw_callbacks[UMCS7840_MAX_PORTS][UMCS7840_N_TRANSFERS] = {
+ {&umcs7840_read_callback1, &umcs7840_write_callback1},
+ {&umcs7840_read_callback2, &umcs7840_write_callback2},
+ {&umcs7840_read_callback3, &umcs7840_write_callback3},
+ {&umcs7840_read_callback4, &umcs7840_write_callback4},
+};
+
+static const struct usb_config umcs7840_bulk_config_data[UMCS7840_N_TRANSFERS] = {
+ [UMCS7840_BULK_RD_EP] = {
+ .type = UE_BULK,
+ .endpoint = 0x01,
+ .direction = UE_DIR_IN,
+ .flags = {.pipe_bof = 1,.short_xfer_ok = 1,},
+ .bufsize = 0, /* use wMaxPacketSize */
+ .callback = &umcs7840_read_callback1,
+ .if_index = 0,
+ },
+
+ [UMCS7840_BULK_WR_EP] = {
+ .type = UE_BULK,
+ .endpoint = 0x02,
+ .direction = UE_DIR_OUT,
+ .flags = {.pipe_bof = 1,.short_xfer_ok = 1,},
+ .bufsize = 0, /* use wMaxPacketSize */
+ .callback = &umcs7840_write_callback1,
+ .if_index = 0,
+ },
+};
+
+static const struct usb_config umcs7840_intr_config_data[1] = {
+ [0] = {
+ .type = UE_INTERRUPT,
+ .endpoint = 0x09,
+ .direction = UE_DIR_IN,
+ .flags = {.pipe_bof = 1,.short_xfer_ok = 1,},
+ .bufsize = 0, /* use wMaxPacketSize */
+ .callback = &umcs7840_intr_callback,
+ .if_index = 0,
+ },
+};
+
+static struct ucom_callback umcs7840_callback = {
+ .ucom_cfg_get_status = &umcs7840_cfg_get_status,
+
+ .ucom_cfg_set_dtr = &umcs7840_cfg_set_dtr,
+ .ucom_cfg_set_rts = &umcs7840_cfg_set_rts,
+ .ucom_cfg_set_break = &umcs7840_cfg_set_break,
+
+ .ucom_cfg_param = &umcs7840_cfg_param,
+ .ucom_cfg_open = &umcs7840_cfg_open,
+ .ucom_cfg_close = &umcs7840_cfg_close,
+
+ .ucom_pre_param = &umcs7840_pre_param,
+
+ .ucom_start_read = &umcs7840_start_read,
+ .ucom_stop_read = &umcs7840_stop_read,
+
+ .ucom_start_write = &umcs7840_start_write,
+ .ucom_stop_write = &umcs7840_stop_write,
+
+ .ucom_poll = &umcs7840_poll,
+};
+
+static const struct usb_device_id umcs7840_devs[] = {
+ {USB_VPI(USB_VENDOR_MOSCHIP, USB_PRODUCT_MOSCHIP_MCS7820, 0)},
+ {USB_VPI(USB_VENDOR_MOSCHIP, USB_PRODUCT_MOSCHIP_MCS7840, 0)},
+};
+
+static device_method_t umcs7840_methods[] = {
+ DEVMETHOD(device_probe, umcs7840_probe),
+ DEVMETHOD(device_attach, umcs7840_attach),
+ DEVMETHOD(device_detach, umcs7840_detach),
+ {0, 0}
+};
+
+static devclass_t umcs7840_devclass;
+
+static driver_t umcs7840_driver = {
+ .name = "umcs7840",
+ .methods = umcs7840_methods,
+ .size = sizeof(struct umcs7840_softc),
+};
+
+DRIVER_MODULE(umcs7840, uhub, umcs7840_driver, umcs7840_devclass, 0, 0);
+MODULE_DEPEND(umcs7840, ucom, 1, 1, 1);
+MODULE_DEPEND(umcs7840, usb, 1, 1, 1);
+MODULE_VERSION(umcs7840, UMCS7840_MODVER);
+
+static int
+umcs7840_probe(device_t dev)
+{
+ struct usb_attach_arg *uaa = device_get_ivars(dev);
+
+ if (uaa->usb_mode != USB_MODE_HOST)
+ return (ENXIO);
+ if (uaa->info.bConfigIndex != MCS7840_CONFIG_INDEX)
+ return (ENXIO);
+ if (uaa->info.bIfaceIndex != MCS7840_IFACE_INDEX)
+ return (ENXIO);
+ return (usbd_lookup_id_by_uaa(umcs7840_devs, sizeof(umcs7840_devs), uaa));
+}
+
+static int
+umcs7840_attach(device_t dev)
+{
+ struct usb_config umcs7840_config_tmp[UMCS7840_N_TRANSFERS];
+ struct usb_attach_arg *uaa = device_get_ivars(dev);
+ struct umcs7840_softc *sc = device_get_softc(dev);
+
+ uint8_t iface_index = MCS7840_IFACE_INDEX;
+ int error;
+ int subunit;
+ int n;
+ uint8_t data;
+
+ for (n = 0; n < UMCS7840_N_TRANSFERS; ++n)
+ umcs7840_config_tmp[n] = umcs7840_bulk_config_data[n];
+
+ device_set_usb_desc(dev);
+ mtx_init(&sc->sc_mtx, "umcs7840", NULL, MTX_DEF);
+
+ sc->sc_dev = dev;
+ sc->sc_udev = uaa->device;
+
+ /*
+ * Get number of ports
+ * Documentation (full datasheet) says, that number of ports is
+ * set as MCS7840_DEV_MODE_SELECT24S bit in MODE R/Only
+ * register. But vendor driver uses these undocumented
+ * register & bit.
+ *
+ * Experiments show, that MODE register can have `0'
+ * (4 ports) bit on 2-port device, so use vendor driver's way.
+ *
+ * Also, see notes in header file for these constants.
+ */
+ umcs7840_get_reg_sync(sc, MCS7840_DEV_REG_GPIO, &data);
+ if (data & MCS7840_DEV_GPIO_4PORTS) {
+ sc->sc_numports = 4;
+ /* Store physical port numbers in sc_portno */
+ sc->sc_ucom[0].sc_portno = 0;
+ sc->sc_ucom[1].sc_portno = 1;
+ sc->sc_ucom[2].sc_portno = 2;
+ sc->sc_ucom[3].sc_portno = 3;
+ } else {
+ sc->sc_numports = 2;
+ /* Store physical port numbers in sc_portno */
+ sc->sc_ucom[0].sc_portno = 0;
+ sc->sc_ucom[1].sc_portno = 2; /* '1' is skipped */
+ }
+ device_printf(dev, "Chip mcs%04x, found %d active ports\n", uaa->info.idProduct, sc->sc_numports);
+ if (!umcs7840_get_reg_sync(sc, MCS7840_DEV_REG_MODE, &data)) {
+ device_printf(dev, "On-die confguration: RST: active %s, HRD: %s, PLL: %s, POR: %s, Ports: %s, EEPROM write %s, IrDA is %savailable\n",
+ (data & MCS7840_DEV_MODE_RESET) ? "low" : "high",
+ (data & MCS7840_DEV_MODE_SER_PRSNT) ? "yes" : "no",
+ (data & MCS7840_DEV_MODE_PLLBYPASS) ? "bypassed" : "avail",
+ (data & MCS7840_DEV_MODE_PORBYPASS) ? "bypassed" : "avail",
+ (data & MCS7840_DEV_MODE_SELECT24S) ? "2" : "4",
+ (data & MCS7840_DEV_MODE_EEPROMWR) ? "enabled" : "disabled",
+ (data & MCS7840_DEV_MODE_IRDA) ? "" : "not ");
+ }
+ /* Setup all transfers */
+ for (subunit = 0; subunit < sc->sc_numports; ++subunit) {
+ for (n = 0; n < UMCS7840_N_TRANSFERS; ++n) {
+ /* Set endpoint address */
+ umcs7840_config_tmp[n].endpoint = umcs7840_bulk_config_data[n].endpoint + 2 * sc->sc_ucom[subunit].sc_portno;
+ umcs7840_config_tmp[n].callback = umcs7840_rw_callbacks[subunit][n];
+ }
+ error = usbd_transfer_setup(uaa->device,
+ &iface_index, sc->sc_ports[sc->sc_ucom[subunit].sc_portno].sc_xfer, umcs7840_config_tmp,
+ UMCS7840_N_TRANSFERS, sc, &sc->sc_mtx);
+ if (error) {
+ device_printf(dev, "allocating USB transfers failed for subunit %d of %d\n",
+ subunit + 1, sc->sc_numports);
+ goto detach;
+ }
+ }
+ error = usbd_transfer_setup(uaa->device,
+ &iface_index, &sc->sc_intr_xfer, umcs7840_intr_config_data,
+ 1, sc, &sc->sc_mtx);
+ if (error) {
+ device_printf(dev, "allocating USB transfers failed for interrupt\n");
+ goto detach;
+ }
+ /* clear stall at first run */
+ mtx_lock(&sc->sc_mtx);
+ for (subunit = 0; subunit < sc->sc_numports; ++subunit) {
+ usbd_xfer_set_stall(sc->sc_ports[sc->sc_ucom[subunit].sc_portno].sc_xfer[UMCS7840_BULK_RD_EP]);
+ usbd_xfer_set_stall(sc->sc_ports[sc->sc_ucom[subunit].sc_portno].sc_xfer[UMCS7840_BULK_WR_EP]);
+ }
+ mtx_unlock(&sc->sc_mtx);
+
+ error = ucom_attach(&sc->sc_super_ucom, sc->sc_ucom, sc->sc_numports, sc,
+ &umcs7840_callback, &sc->sc_mtx);
+ if (error)
+ goto detach;
+
+ ucom_set_pnpinfo_usb(&sc->sc_super_ucom, dev);
+
+ return (0);
+
+detach:
+ umcs7840_detach(dev);
+ return (ENXIO);
+}
+
+static int
+umcs7840_detach(device_t dev)
+{
+ struct umcs7840_softc *sc = device_get_softc(dev);
+ int subunit;
+
+ ucom_detach(&sc->sc_super_ucom, sc->sc_ucom);
+
+ for (subunit = 0; subunit < sc->sc_numports; ++subunit)
+ usbd_transfer_unsetup(sc->sc_ports[sc->sc_ucom[subunit].sc_portno].sc_xfer, UMCS7840_N_TRANSFERS);
+ usbd_transfer_unsetup(&sc->sc_intr_xfer, 1);
+
+ mtx_destroy(&sc->sc_mtx);
+ return (0);
+}
+
+static void
+umcs7840_cfg_open(struct ucom_softc *ucom)
+{
+ struct umcs7840_softc *sc = ucom->sc_parent;
+ uint16_t pn = ucom->sc_portno;
+ uint8_t data;
+
+ /* If it very first open, finish global configuration */
+ if (!sc->sc_driver_done) {
+ /*
+ * USB enumeration is finished, pass internal memory to FIFOs
+ * If it is done in the end of "attach", kernel panics.
+ */
+ if (umcs7840_get_reg_sync(sc, MCS7840_DEV_REG_CONTROL1, &data))
+ return;
+ data |= MCS7840_DEV_CONTROL1_DRIVER_DONE;
+ if (umcs7840_set_reg_sync(sc, MCS7840_DEV_REG_CONTROL1, data))
+ return;
+ sc->sc_driver_done = 1;
+ }
+ /* Toggle reset bit on-off */
+ if (umcs7840_get_reg_sync(sc, umcs7840_port_registers[pn].reg_sp, &data))
+ return;
+ data |= MCS7840_DEV_SPx_UART_RESET;
+ if (umcs7840_set_reg_sync(sc, umcs7840_port_registers[pn].reg_sp, data))
+ return;
+ data &= ~MCS7840_DEV_SPx_UART_RESET;
+ if (umcs7840_set_reg_sync(sc, umcs7840_port_registers[pn].reg_sp, data))
+ return;
+
+ /* Set RS-232 mode */
+ if (umcs7840_set_UART_reg_sync(sc, pn, MCS7840_UART_REG_SCRATCHPAD, MCS7840_UART_SCRATCHPAD_RS232))
+ return;
+
+ /* Disable RX on time of initialization */
+ if (umcs7840_get_reg_sync(sc, umcs7840_port_registers[pn].reg_control, &data))
+ return;
+ data |= MCS7840_DEV_CONTROLx_RX_DISABLE;
+ if (umcs7840_set_reg_sync(sc, umcs7840_port_registers[pn].reg_control, data))
+ return;
+
+ /* Disable all interrupts */
+ if (umcs7840_set_UART_reg_sync(sc, pn, MCS7840_UART_REG_IER, 0))
+ return;
+
+ /* Reset FIFO -- documented */
+ if (umcs7840_set_UART_reg_sync(sc, pn, MCS7840_UART_REG_FCR, 0))
+ return;
+ if (umcs7840_set_UART_reg_sync(sc, pn, MCS7840_UART_REG_FCR,
+ MCS7840_UART_FCR_ENABLE | MCS7840_UART_FCR_FLUSHRHR |
+ MCS7840_UART_FCR_FLUSHTHR | MCS7840_UART_FCR_RTL_1_14))
+ return;
+
+ /* Set 8 bit, no parity, 1 stop bit -- documented */
+ sc->sc_ports[pn].sc_lcr = MCS7840_UART_LCR_DATALEN8 | MCS7840_UART_LCR_STOPB1;
+ if (umcs7840_set_UART_reg_sync(sc, pn, MCS7840_UART_REG_LCR, sc->sc_ports[pn].sc_lcr))
+ return;
+
+ /*
+ * Enable DTR/RTS on modem control, enable modem interrupts --
+ * documented
+ */
+ sc->sc_ports[pn].sc_mcr = MCS7840_UART_MCR_DTR | MCS7840_UART_MCR_RTS | MCS7840_UART_MCR_IE;
+ if (umcs7840_set_UART_reg_sync(sc, pn, MCS7840_UART_REG_MCR, sc->sc_ports[pn].sc_mcr))
+ return;
+
+ /* Clearing Bulkin and Bulkout FIFO */
+ if (umcs7840_get_reg_sync(sc, umcs7840_port_registers[pn].reg_sp, &data))
+ return;
+ data |= MCS7840_DEV_SPx_RESET_OUT_FIFO | MCS7840_DEV_SPx_RESET_IN_FIFO;
+ if (umcs7840_set_reg_sync(sc, umcs7840_port_registers[pn].reg_sp, data))
+ return;
+ data &= ~(MCS7840_DEV_SPx_RESET_OUT_FIFO | MCS7840_DEV_SPx_RESET_IN_FIFO);
+ if (umcs7840_set_reg_sync(sc, umcs7840_port_registers[pn].reg_sp, data))
+ return;
+
+ /* Set speed 9600 */
+ if (umcs7840_set_baudrate(sc, pn, 9600))
+ return;
+
+
+ /* Finally enable all interrupts -- documented */
+ /*
+ * Copied from vendor driver, I don't know why we should read LCR
+ * here
+ */
+ if (umcs7840_get_UART_reg_sync(sc, pn, MCS7840_UART_REG_LCR, &sc->sc_ports[pn].sc_lcr))
+ return;
+ if (umcs7840_set_UART_reg_sync(sc, pn, MCS7840_UART_REG_IER,
+ MCS7840_UART_IER_RXSTAT | MCS7840_UART_IER_MODEM))
+ return;
+
+ /* Enable RX */
+ if (umcs7840_get_reg_sync(sc, umcs7840_port_registers[pn].reg_control, &data))
+ return;
+ data &= ~MCS7840_DEV_CONTROLx_RX_DISABLE;
+ if (umcs7840_set_reg_sync(sc, umcs7840_port_registers[pn].reg_control, data))
+ return;
+
+ /* Read LSR & MSR */
+ if (umcs7840_get_UART_reg_sync(sc, pn, MCS7840_UART_REG_LSR, &sc->sc_ports[pn].sc_lsr))
+ return;
+ if (umcs7840_get_UART_reg_sync(sc, pn, MCS7840_UART_REG_MSR, &sc->sc_ports[pn].sc_msr))
+ return;
+ DPRINTF("Port %d has been opened, LSR=%02x MSR=%02x\n", pn, sc->sc_ports[pn].sc_lsr, sc->sc_ports[pn].sc_msr);
+}
+
+static void
+umcs7840_cfg_close(struct ucom_softc *ucom)
+{
+ struct umcs7840_softc *sc = ucom->sc_parent;
+ uint16_t pn = ucom->sc_portno;
+ uint8_t data;
+
+ umcs7840_stop_read(ucom);
+ umcs7840_stop_write(ucom);
+
+ umcs7840_set_UART_reg_sync(sc, pn, MCS7840_UART_REG_MCR, 0);
+ umcs7840_set_UART_reg_sync(sc, pn, MCS7840_UART_REG_IER, 0);
+
+ /* Disable RX */
+ if (umcs7840_get_reg_sync(sc, umcs7840_port_registers[pn].reg_control, &data))
+ return;
+ data |= MCS7840_DEV_CONTROLx_RX_DISABLE;
+ if (umcs7840_set_reg_sync(sc, umcs7840_port_registers[pn].reg_control, data))
+ return;
+ DPRINTF("Port %d has been closed\n", pn);
+}
+
+static void
+umcs7840_cfg_set_dtr(struct ucom_softc *ucom, uint8_t onoff)
+{
+ struct umcs7840_softc *sc = ucom->sc_parent;
+ uint8_t pn = ucom->sc_portno;
+
+ if (onoff)
+ sc->sc_ports[pn].sc_mcr |= MCS7840_UART_MCR_DTR;
+ else
+ sc->sc_ports[pn].sc_mcr &= ~MCS7840_UART_MCR_DTR;
+
+ umcs7840_set_UART_reg_sync(sc, pn, MCS7840_UART_REG_MCR, sc->sc_ports[pn].sc_mcr);
+ DPRINTF("Port %d DTR set to: %s\n", pn, onoff ? "on" : "off");
+}
+
+static void
+umcs7840_cfg_set_rts(struct ucom_softc *ucom, uint8_t onoff)
+{
+ struct umcs7840_softc *sc = ucom->sc_parent;
+ uint8_t pn = ucom->sc_portno;
+
+ if (onoff)
+ sc->sc_ports[pn].sc_mcr |= MCS7840_UART_MCR_RTS;
+ else
+ sc->sc_ports[pn].sc_mcr &= ~MCS7840_UART_MCR_RTS;
+
+ umcs7840_set_UART_reg_sync(sc, pn, MCS7840_UART_REG_MCR, sc->sc_ports[pn].sc_mcr);
+ DPRINTF("Port %d RTS set to: %s\n", pn, onoff ? "on" : "off");
+}
+
+static void
+umcs7840_cfg_set_break(struct ucom_softc *ucom, uint8_t onoff)
+{
+ struct umcs7840_softc *sc = ucom->sc_parent;
+ uint8_t pn = ucom->sc_portno;
+
+ if (onoff)
+ sc->sc_ports[pn].sc_lcr |= MCS7840_UART_LCR_BREAK;
+ else
+ sc->sc_ports[pn].sc_lcr &= ~MCS7840_UART_LCR_BREAK;
+
+ umcs7840_set_UART_reg_sync(sc, pn, MCS7840_UART_REG_LCR, sc->sc_ports[pn].sc_lcr);
+ DPRINTF("Port %d BREAK set to: %s\n", pn, onoff ? "on" : "off");
+}
+
+
+static void
+umcs7840_cfg_param(struct ucom_softc *ucom, struct termios *t)
+{
+ struct umcs7840_softc *sc = ucom->sc_parent;
+ uint8_t pn = ucom->sc_portno;
+ uint8_t lcr = sc->sc_ports[pn].sc_lcr;
+ uint8_t mcr = sc->sc_ports[pn].sc_mcr;
+
+ DPRINTF("Port %d config:\n", pn);
+ if (t->c_cflag & CSTOPB) {
+ DPRINTF(" 2 stop bits\n");
+ lcr |= MCS7840_UART_LCR_STOPB2;
+ } else {
+ lcr |= MCS7840_UART_LCR_STOPB1;
+ DPRINTF(" 1 stop bit\n");
+ }
+
+ lcr &= ~MCS7840_UART_LCR_PARITYMASK;
+ if (t->c_cflag & PARENB) {
+ lcr |= MCS7840_UART_LCR_PARITYON;
+ if (t->c_cflag & PARODD) {
+ lcr = MCS7840_UART_LCR_PARITYODD;
+ DPRINTF(" parity on - odd\n");
+ } else {
+ lcr = MCS7840_UART_LCR_PARITYEVEN;
+ DPRINTF(" parity on - even\n");
+ }
+ } else {
+ lcr &= ~MCS7840_UART_LCR_PARITYON;
+ DPRINTF(" parity off\n");
+ }
+
+ lcr &= ~MCS7840_UART_LCR_DATALENMASK;
+ switch (t->c_cflag & CSIZE) {
+ case CS5:
+ lcr |= MCS7840_UART_LCR_DATALEN5;
+ DPRINTF(" 5 bit\n");
+ break;
+ case CS6:
+ lcr |= MCS7840_UART_LCR_DATALEN6;
+ DPRINTF(" 6 bit\n");
+ break;
+ case CS7:
+ lcr |= MCS7840_UART_LCR_DATALEN7;
+ DPRINTF(" 7 bit\n");
+ break;
+ case CS8:
+ lcr |= MCS7840_UART_LCR_DATALEN8;
+ DPRINTF(" 8 bit\n");
+ break;
+ }
+
+ if (t->c_cflag & CRTSCTS) {
+ mcr |= MCS7840_UART_MCR_CTSRTS;
+ DPRINTF(" CTS/RTS\n");
+ } else
+ mcr &= ~MCS7840_UART_MCR_CTSRTS;
+
+ if (t->c_cflag & (CDTR_IFLOW | CDSR_OFLOW)) {
+ mcr |= MCS7840_UART_MCR_DTRDSR;
+ DPRINTF(" DTR/DSR\n");
+ } else
+ mcr &= ~MCS7840_UART_MCR_DTRDSR;
+
+ sc->sc_ports[pn].sc_lcr = lcr;
+ umcs7840_set_UART_reg_sync(sc, pn, MCS7840_UART_REG_LCR, sc->sc_ports[pn].sc_lcr);
+ DPRINTF("Port %d LCR=%02x\n", pn, sc->sc_ports[pn].sc_lcr);
+
+ sc->sc_ports[pn].sc_mcr = mcr;
+ umcs7840_set_UART_reg_sync(sc, pn, MCS7840_UART_REG_MCR, sc->sc_ports[pn].sc_mcr);
+ DPRINTF("Port %d MCR=%02x\n", pn, sc->sc_ports[pn].sc_mcr);
+
+ umcs7840_set_baudrate(sc, pn, t->c_ospeed);
+}
+
+
+static int
+umcs7840_pre_param(struct ucom_softc *ucom, struct termios *t)
+{
+ uint8_t clk;
+ uint16_t divisor;
+
+ if (umcs7840_calc_baudrate(t->c_ospeed, &divisor, &clk) || !divisor)
+ return (EINVAL);
+ return (0);
+}
+
+static void
+umcs7840_start_read(struct ucom_softc *ucom)
+{
+ struct umcs7840_softc *sc = ucom->sc_parent;
+ uint8_t pn = ucom->sc_portno;
+
+ /* Start interrupt transfer */
+ usbd_transfer_start(sc->sc_intr_xfer);
+
+ /* Start read transfer */
+ usbd_transfer_start(sc->sc_ports[pn].sc_xfer[UMCS7840_BULK_RD_EP]);
+}
+
+static void
+umcs7840_stop_read(struct ucom_softc *ucom)
+{
+ struct umcs7840_softc *sc = ucom->sc_parent;
+ uint8_t pn = ucom->sc_portno;
+
+ /* Stop read transfer */
+ usbd_transfer_stop(sc->sc_ports[pn].sc_xfer[UMCS7840_BULK_RD_EP]);
+}
+
+static void
+umcs7840_start_write(struct ucom_softc *ucom)
+{
+ struct umcs7840_softc *sc = ucom->sc_parent;
+ uint8_t pn = ucom->sc_portno;
+
+ /* Start interrupt transfer */
+ usbd_transfer_start(sc->sc_intr_xfer);
+
+ /* Start write transfer */
+ usbd_transfer_start(sc->sc_ports[pn].sc_xfer[UMCS7840_BULK_WR_EP]);
+}
+
+static void
+umcs7840_stop_write(struct ucom_softc *ucom)
+{
+ struct umcs7840_softc *sc = ucom->sc_parent;
+ uint8_t pn = ucom->sc_portno;
+
+ /* Stop write transfer */
+ usbd_transfer_stop(sc->sc_ports[pn].sc_xfer[UMCS7840_BULK_WR_EP]);
+}
+
+static void
+umcs7840_cfg_get_status(struct ucom_softc *ucom, uint8_t *lsr, uint8_t *msr)
+{
+ struct umcs7840_softc *sc = ucom->sc_parent;
+
+ *lsr = sc->sc_ports[ucom->sc_portno].sc_lsr;
+ *msr = sc->sc_ports[ucom->sc_portno].sc_msr;
+ DPRINTF("Port %d status: LSR=%02x MSR=%02x\n", ucom->sc_portno, *lsr, *msr);
+}
+
+static void
+umcs7840_intr_callback(struct usb_xfer *xfer, usb_error_t error)
+{
+ struct umcs7840_softc *sc = usbd_xfer_softc(xfer);
+ struct usb_page_cache *pc;
+ uint8_t buf[13];
+ int actlen;
+ int subunit;
+
+ usbd_xfer_status(xfer, &actlen, NULL, NULL, NULL);
+
+ switch (USB_GET_STATE(xfer)) {
+ case USB_ST_TRANSFERRED:
+ if (actlen == 5 || actlen == 13) {
+ pc = usbd_xfer_get_frame(xfer, 0);
+ usbd_copy_out(pc, 0, buf, actlen);
+ /* Check status of all ports */
+ for (subunit = 0; subunit < sc->sc_numports; ++subunit) {
+ uint8_t pn = sc->sc_ucom[subunit].sc_portno;
+
+ if (buf[pn] & MCS7840_UART_ISR_NOPENDING)
+ continue;
+ DPRINTF("Port %d has pending interrupt: %02x (FIFO: %02x)\n", pn, buf[pn] & MCS7840_UART_ISR_INTMASK, buf[pn] & (~MCS7840_UART_ISR_INTMASK));
+ switch (buf[pn] & MCS7840_UART_ISR_INTMASK) {
+ case MCS7840_UART_ISR_RXERR:
+ case MCS7840_UART_ISR_RXHASDATA:
+ case MCS7840_UART_ISR_RXTIMEOUT:
+ /* Read new LSR */
+ if (umcs7840_get_UART_reg_sync(sc, pn, MCS7840_UART_REG_LSR, &sc->sc_ports[pn].sc_lsr))
+ break; /* Inner switch */
+ ucom_status_change(&sc->sc_ucom[subunit]);
+ /* Inner switch */
+ break;
+ case MCS7840_UART_ISR_TXEMPTY:
+ /* Do nothing */
+ break; /* Inner switch */
+ case MCS7840_UART_ISR_MSCHANGE:
+ /* Read new MSR */
+ if (umcs7840_get_UART_reg_sync(sc, pn, MCS7840_UART_REG_MSR, &sc->sc_ports[pn].sc_msr))
+ break; /* Inner switch */
+ DPRINTF("Port %d: new MSR %02x\n", pn, sc->sc_ports[pn].sc_msr);
+ ucom_status_change(&sc->sc_ucom[subunit]);
+ break;
+ }
+ }
+ } else
+ device_printf(sc->sc_dev, "Invalid interrupt data length %d", actlen);
+ /* FALLTHROUGH */
+ case USB_ST_SETUP:
+tr_setup:
+ usbd_xfer_set_frame_len(xfer, 0, usbd_xfer_max_len(xfer));
+ usbd_transfer_submit(xfer);
+ return;
+
+ default: /* Error */
+ if (error != USB_ERR_CANCELLED) {
+ /* try to clear stall first */
+ usbd_xfer_set_stall(xfer);
+ goto tr_setup;
+ }
+ return;
+ }
+}
+
+static void
+umcs7840_read_callback1(struct usb_xfer *xfer, usb_error_t error)
+{
+ umcs7840_read_callbackN(xfer, error, 0);
+}
+
+static void
+umcs7840_read_callback2(struct usb_xfer *xfer, usb_error_t error)
+{
+ umcs7840_read_callbackN(xfer, error, 1);
+}
+static void
+umcs7840_read_callback3(struct usb_xfer *xfer, usb_error_t error)
+{
+ umcs7840_read_callbackN(xfer, error, 2);
+}
+
+static void
+umcs7840_read_callback4(struct usb_xfer *xfer, usb_error_t error)
+{
+ umcs7840_read_callbackN(xfer, error, 3);
+}
+
+static void
+umcs7840_read_callbackN(struct usb_xfer *xfer, usb_error_t error, uint8_t subunit)
+{
+ struct umcs7840_softc *sc = usbd_xfer_softc(xfer);
+ struct ucom_softc *ucom = &sc->sc_ucom[subunit];
+ struct usb_page_cache *pc;
+ int actlen;
+
+ usbd_xfer_status(xfer, &actlen, NULL, NULL, NULL);
+
+ DPRINTF("Port %d read, state = %d, data length = %d\n", ucom->sc_portno, USB_GET_STATE(xfer), actlen);
+
+ switch (USB_GET_STATE(xfer)) {
+ case USB_ST_TRANSFERRED:
+ pc = usbd_xfer_get_frame(xfer, 0);
+ ucom_put_data(ucom, pc, 0, actlen);
+ /* FALLTHROUGH */
+ case USB_ST_SETUP:
+tr_setup:
+ usbd_xfer_set_frame_len(xfer, 0, usbd_xfer_max_len(xfer));
+ usbd_transfer_submit(xfer);
+ return;
+
+ default: /* Error */
+ if (error != USB_ERR_CANCELLED) {
+ /* try to clear stall first */
+ usbd_xfer_set_stall(xfer);
+ goto tr_setup;
+ }
+ return;
+ }
+}
+
+static void
+umcs7840_write_callback1(struct usb_xfer *xfer, usb_error_t error)
+{
+ umcs7840_write_callbackN(xfer, error, 0);
+}
+
+static void
+umcs7840_write_callback2(struct usb_xfer *xfer, usb_error_t error)
+{
+ umcs7840_write_callbackN(xfer, error, 1);
+}
+
+static void
+umcs7840_write_callback3(struct usb_xfer *xfer, usb_error_t error)
+{
+ umcs7840_write_callbackN(xfer, error, 2);
+}
+
+static void
+umcs7840_write_callback4(struct usb_xfer *xfer, usb_error_t error)
+{
+ umcs7840_write_callbackN(xfer, error, 3);
+}
+
+static void
+umcs7840_write_callbackN(struct usb_xfer *xfer, usb_error_t error, uint8_t subunit)
+{
+ struct umcs7840_softc *sc = usbd_xfer_softc(xfer);
+ struct ucom_softc *ucom = &sc->sc_ucom[subunit];
+ struct usb_page_cache *pc;
+ uint32_t actlen;
+
+ DPRINTF("Port %d write, state = %d\n", ucom->sc_portno, USB_GET_STATE(xfer));
+
+ switch (USB_GET_STATE(xfer)) {
+ case USB_ST_SETUP:
+ case USB_ST_TRANSFERRED:
+tr_setup:
+ pc = usbd_xfer_get_frame(xfer, 0);
+ if (ucom_get_data(ucom, pc, 0, usbd_xfer_max_len(xfer), &actlen)) {
+ DPRINTF("Port %d write, has %d bytes\n", ucom->sc_portno, actlen);
+ usbd_xfer_set_frame_len(xfer, 0, actlen);
+ usbd_transfer_submit(xfer);
+ }
+ return;
+
+ default: /* Error */
+ if (error != USB_ERR_CANCELLED) {
+ /* try to clear stall first */
+ usbd_xfer_set_stall(xfer);
+ goto tr_setup;
+ }
+ return;
+ }
+}
+
+static void
+umcs7840_poll(struct ucom_softc *ucom)
+{
+ struct umcs7840_softc *sc = ucom->sc_parent;
+
+ DPRINTF("Port %d poll\n", ucom->sc_portno);
+ usbd_transfer_poll(sc->sc_ports[ucom->sc_portno].sc_xfer, UMCS7840_N_TRANSFERS);
+ usbd_transfer_poll(&sc->sc_intr_xfer, 1);
+}
+
+static usb_error_t
+umcs7840_get_reg_sync(struct umcs7840_softc *sc, uint8_t reg, uint8_t *data)
+{
+ struct usb_device_request req;
+ usb_error_t err;
+ uint16_t len;
+
+ req.bmRequestType = UT_READ_VENDOR_DEVICE;
+ req.bRequest = MCS7840_RDREQ;
+ USETW(req.wValue, 0);
+ USETW(req.wIndex, reg);
+ USETW(req.wLength, UMCS7840_READ_LENGTH);
+
+ err = usbd_do_request_proc(sc->sc_udev, &sc->sc_super_ucom.sc_tq, &req, (void *)data, 0, &len, UMCS7840_CTRL_TIMEOUT);
+ if (err == USB_ERR_NORMAL_COMPLETION && len != 1) {
+ device_printf(sc->sc_dev, "Reading register %d failed: invalid length %d\n", reg, len);
+ return (USB_ERR_INVAL);
+ } else if (err)
+ device_printf(sc->sc_dev, "Reading register %d failed: %s\n", reg, usbd_errstr(err));
+ return (err);
+}
+
+static usb_error_t
+umcs7840_set_reg_sync(struct umcs7840_softc *sc, uint8_t reg, uint8_t data)
+{
+ struct usb_device_request req;
+ usb_error_t err;
+
+ req.bmRequestType = UT_WRITE_VENDOR_DEVICE;
+ req.bRequest = MCS7840_WRREQ;
+ USETW(req.wValue, data);
+ USETW(req.wIndex, reg);
+ USETW(req.wLength, 0);
+
+ err = usbd_do_request_proc(sc->sc_udev, &sc->sc_super_ucom.sc_tq, &req, NULL, 0, NULL, UMCS7840_CTRL_TIMEOUT);
+ if (err)
+ device_printf(sc->sc_dev, "Writing register %d failed: %s\n", reg, usbd_errstr(err));
+
+ return (err);
+}
+
+static usb_error_t
+umcs7840_get_UART_reg_sync(struct umcs7840_softc *sc, uint8_t portno, uint8_t reg, uint8_t *data)
+{
+ struct usb_device_request req;
+ uint16_t wVal;
+ usb_error_t err;
+ uint16_t len;
+
+ /* portno is port number */
+ wVal = ((uint16_t)(portno + 1)) << 8;
+
+ req.bmRequestType = UT_READ_VENDOR_DEVICE;
+ req.bRequest = MCS7840_RDREQ;
+ USETW(req.wValue, wVal);
+ USETW(req.wIndex, reg);
+ USETW(req.wLength, UMCS7840_READ_LENGTH);
+
+ err = usbd_do_request_proc(sc->sc_udev, &sc->sc_super_ucom.sc_tq, &req, (void *)data, 0, &len, UMCS7840_CTRL_TIMEOUT);
+ if (err == USB_ERR_NORMAL_COMPLETION && len != 1) {
+ device_printf(sc->sc_dev, "Reading UART%d register %d failed: invalid length %d\n", portno, reg, len);
+ return (USB_ERR_INVAL);
+ } else if (err)
+ device_printf(sc->sc_dev, "Reading UART%d register %d failed: %s\n", portno, reg, usbd_errstr(err));
+ return (err);
+}
+
+static usb_error_t
+umcs7840_set_UART_reg_sync(struct umcs7840_softc *sc, uint8_t portno, uint8_t reg, uint8_t data)
+{
+ struct usb_device_request req;
+ usb_error_t err;
+ uint16_t wVal;
+
+ /* portno is port number */
+ wVal = ((uint16_t)(portno + 1)) << 8 | data;
+
+ req.bmRequestType = UT_WRITE_VENDOR_DEVICE;
+ req.bRequest = MCS7840_WRREQ;
+ USETW(req.wValue, wVal);
+ USETW(req.wIndex, reg);
+ USETW(req.wLength, 0);
+
+ err = usbd_do_request_proc(sc->sc_udev, &sc->sc_super_ucom.sc_tq, &req, NULL, 0, NULL, UMCS7840_CTRL_TIMEOUT);
+ if (err)
+ device_printf(sc->sc_dev, "Writing UART%d register %d failed: %s\n", portno, reg, usbd_errstr(err));
+ return (err);
+}
+
+static usb_error_t
+umcs7840_set_baudrate(struct umcs7840_softc *sc, uint8_t portno, uint32_t rate)
+{
+ usb_error_t err;
+ uint16_t divisor;
+ uint8_t clk;
+ uint8_t data;
+
+ if (umcs7840_calc_baudrate(rate, &divisor, &clk)) {
+ DPRINTF("Port %d bad speed: %d\n", portno, rate);
+ return (-1);
+ }
+ if (divisor == 0 || (clk & MCS7840_DEV_SPx_CLOCK_MASK) != clk) {
+ DPRINTF("Port %d bad speed calculation: %d\n", portno, rate);
+ return (-1);
+ }
+ DPRINTF("Port %d set speed: %d (%02x / %d)\n", portno, rate, clk, divisor);
+
+ /* Set clock source for standard BAUD frequences */
+ err = umcs7840_get_reg_sync(sc, umcs7840_port_registers[portno].reg_sp, &data);
+ if (err)
+ return (err);
+ data &= MCS7840_DEV_SPx_CLOCK_MASK;
+ data |= clk;
+ err = umcs7840_set_reg_sync(sc, umcs7840_port_registers[portno].reg_sp, data);
+ if (err)
+ return (err);
+
+ /* Set divider */
+ sc->sc_ports[portno].sc_lcr |= MCS7840_UART_LCR_DIVISORS;
+ err = umcs7840_set_UART_reg_sync(sc, portno, MCS7840_UART_REG_LCR, sc->sc_ports[portno].sc_lcr);
+ if (err)
+ return (err);
+
+ err = umcs7840_set_UART_reg_sync(sc, portno, MCS7840_UART_REG_DLL, (uint8_t)(divisor & 0xff));
+ if (err)
+ return (err);
+ err = umcs7840_set_UART_reg_sync(sc, portno, MCS7840_UART_REG_DLM, (uint8_t)((divisor >> 8) & 0xff));
+ if (err)
+ return (err);
+
+ /* Turn off access to DLL/DLM registers of UART */
+ sc->sc_ports[portno].sc_lcr &= ~MCS7840_UART_LCR_DIVISORS;
+ err = umcs7840_set_UART_reg_sync(sc, portno, MCS7840_UART_REG_LCR, sc->sc_ports[portno].sc_lcr);
+ if (err)
+ return (err);
+ return (0);
+}
+
+/* Maximum speeds for standard frequences, when PLL is not used */
+static const uint32_t umcs7840_baudrate_divisors[] = {0, 115200, 230400, 403200, 460800, 806400, 921600, 1572864, 3145728,};
+static const uint8_t umcs7840_baudrate_divisors_len = sizeof(umcs7840_baudrate_divisors) / sizeof(umcs7840_baudrate_divisors[0]);
+
+static usb_error_t
+umcs7840_calc_baudrate(uint32_t rate, uint16_t *divisor, uint8_t *clk)
+{
+ uint8_t i = 0;
+
+ if (rate > umcs7840_baudrate_divisors[umcs7840_baudrate_divisors_len - 1])
+ return (-1);
+
+ for (i = 0; i < umcs7840_baudrate_divisors_len - 1 &&
+ !(rate > umcs7840_baudrate_divisors[i] && rate <= umcs7840_baudrate_divisors[i + 1]); ++i);
+ *divisor = umcs7840_baudrate_divisors[i + 1] / rate;
+ /* 0x00 .. 0x70 */
+ *clk = i << MCS7840_DEV_SPx_CLOCK_SHIFT;
+ return (0);
+}
diff --git a/sys/dev/usb/serial/umcs.h b/sys/dev/usb/serial/umcs.h
new file mode 100644
index 0000000..310b4af
--- /dev/null
+++ b/sys/dev/usb/serial/umcs.h
@@ -0,0 +1,644 @@
+/* $FreeBSD$ */
+/*-
+ * Copyright (c) 2010 Lev Serebryakov <lev@FreeBSD.org>.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+#ifndef _UMCS7840_H_
+#define _UMCS7840_H_
+
+#define UMCS7840_MAX_PORTS 4
+
+#define UMCS7840_READ_LENGTH 1 /* bytes */
+#define UMCS7840_CTRL_TIMEOUT 500 /* ms */
+
+/* Read/Wrtire registers vendor commands */
+#define MCS7840_RDREQ 0x0d
+#define MCS7840_WRREQ 0x0e
+
+/* Read/Wrtie EEPROM values */
+#define MCS7840_EEPROM_RW_WVALUE 0x0900
+
+/*
+ * All these registers are documented only in full datasheet,
+ * which can be requested from MosChip tech support.
+ */
+#define MCS7840_DEV_REG_SP1 0x00 /* Options for for UART 1, R/W */
+#define MCS7840_DEV_REG_CONTROL1 0x01 /* Control bits for UART 1,
+ * R/W */
+#define MCS7840_DEV_REG_PINPONGHIGH 0x02 /* High bits of ping-pong
+ * register, R/W */
+#define MCS7840_DEV_REG_PINPONGLOW 0x03 /* Low bits of ping-pong
+ * register, R/W */
+/* DCRx_1 Registers goes here (see below, they are documented) */
+#define MCS7840_DEV_REG_GPIO 0x07 /* GPIO_0 and GPIO_1 bits,
+ * undocumented, see notes
+ * below R/W */
+#define MCS7840_DEV_REG_SP2 0x08 /* Options for for UART 2, R/W */
+#define MCS7840_DEV_REG_CONTROL2 0x09 /* Control bits for UART 2,
+ * R/W */
+#define MCS7840_DEV_REG_SP3 0x0a /* Options for for UART 3, R/W */
+#define MCS7840_DEV_REG_CONTROL3 0x0b /* Control bits for UART 3,
+ * R/W */
+#define MCS7840_DEV_REG_SP4 0x0c /* Options for for UART 4, R/W */
+#define MCS7840_DEV_REG_CONTROL4 0x0d /* Control bits for UART 4,
+ * R/W */
+#define MCS7840_DEV_REG_PLL_DIV_M 0x0e /* Pre-diviedr for PLL, R/W */
+#define MCS7840_DEV_REG_UNKNOWN1 0x0f /* NOT MENTIONED AND NOT USED */
+#define MCS7840_DEV_REG_PLL_DIV_N 0x10 /* Loop divider for PLL, R/W */
+#define MCS7840_DEV_REG_CLOCK_MUX 0x12 /* PLL input clock & Interrupt
+ * endpoint control, R/W */
+#define MCS7840_DEV_REG_UNKNOWN2 0x11 /* NOT MENTIONED AND NOT USED */
+#define MCS7840_DEV_REG_CLOCK_SELECT12 0x13 /* Clock source for ports 1 &
+ * 2, R/W */
+#define MCS7840_DEV_REG_CLOCK_SELECT34 0x14 /* Clock source for ports 3 &
+ * 4, R/W */
+#define MCS7840_DEV_REG_UNKNOWN3 0x15 /* NOT MENTIONED AND NOT USED */
+/* DCRx_2-DCRx_4 Registers goes here (see below, they are documented) */
+#define MCS7840_DEV_REG_UNKNOWN4 0x1f /* NOT MENTIONED AND NOT USED */
+#define MCS7840_DEV_REG_UNKNOWN5 0x20 /* NOT MENTIONED AND NOT USED */
+#define MCS7840_DEV_REG_UNKNOWN6 0x21 /* NOT MENTIONED AND NOT USED */
+#define MCS7840_DEV_REG_UNKNOWN7 0x22 /* NOT MENTIONED AND NOT USED */
+#define MCS7840_DEV_REG_UNKNOWN8 0x23 /* NOT MENTIONED AND NOT USED */
+#define MCS7840_DEV_REG_UNKNOWN9 0x24 /* NOT MENTIONED AND NOT USED */
+#define MCS7840_DEV_REG_UNKNOWNA 0x25 /* NOT MENTIONED AND NOT USED */
+#define MCS7840_DEV_REG_UNKNOWNB 0x26 /* NOT MENTIONED AND NOT USED */
+#define MCS7840_DEV_REG_UNKNOWNC 0x27 /* NOT MENTIONED AND NOT USED */
+#define MCS7840_DEV_REG_UNKNOWND 0x28 /* NOT MENTIONED AND NOT USED */
+#define MCS7840_DEV_REG_UNKNOWNE 0x29 /* NOT MENTIONED AND NOT USED */
+#define MCS7840_DEV_REG_UNKNOWNF 0x2a /* NOT MENTIONED AND NOT USED */
+#define MCS7840_DEV_REG_MODE 0x2b /* Hardware configuration,
+ * R/Only */
+#define MCS7840_DEV_REG_SP1_ICG 0x2c /* Inter character gap
+ * configuration for Port 1,
+ * R/W */
+#define MCS7840_DEV_REG_SP2_ICG 0x2d /* Inter character gap
+ * configuration for Port 2,
+ * R/W */
+#define MCS7840_DEV_REG_SP3_ICG 0x2e /* Inter character gap
+ * configuration for Port 3,
+ * R/W */
+#define MCS7840_DEV_REG_SP4_ICG 0x2f /* Inter character gap
+ * configuration for Port 4,
+ * R/W */
+#define MCS7840_DEV_REG_RX_SAMPLING12 0x30 /* RX sampling for ports 1 &
+ * 2, R/W */
+#define MCS7840_DEV_REG_RX_SAMPLING34 0x31 /* RX sampling for ports 3 &
+ * 4, R/W */
+#define MCS7840_DEV_REG_BI_FIFO_STAT1 0x32 /* Bulk-In FIFO Stat for Port
+ * 1, contains number of
+ * availiable bytes, R/Only */
+#define MCS7840_DEV_REG_BO_FIFO_STAT1 0x33 /* Bulk-out FIFO Stat for Port
+ * 1, contains number of
+ * availiable bytes, R/Only */
+#define MCS7840_DEV_REG_BI_FIFO_STAT2 0x34 /* Bulk-In FIFO Stat for Port
+ * 2, contains number of
+ * availiable bytes, R/Only */
+#define MCS7840_DEV_REG_BO_FIFO_STAT2 0x35 /* Bulk-out FIFO Stat for Port
+ * 2, contains number of
+ * availiable bytes, R/Only */
+#define MCS7840_DEV_REG_BI_FIFO_STAT3 0x36 /* Bulk-In FIFO Stat for Port
+ * 3, contains number of
+ * availiable bytes, R/Only */
+#define MCS7840_DEV_REG_BO_FIFO_STAT3 0x37 /* Bulk-out FIFO Stat for Port
+ * 3, contains number of
+ * availiable bytes, R/Only */
+#define MCS7840_DEV_REG_BI_FIFO_STAT4 0x38 /* Bulk-In FIFO Stat for Port
+ * 4, contains number of
+ * availiable bytes, R/Only */
+#define MCS7840_DEV_REG_BO_FIFO_STAT4 0x39 /* Bulk-out FIFO Stat for Port
+ * 4, contains number of
+ * availiable bytes, R/Only */
+#define MCS7840_DEV_REG_ZERO_PERIOD1 0x3a /* Period between zero out
+ * frames for Port 1, R/W */
+#define MCS7840_DEV_REG_ZERO_PERIOD2 0x3b /* Period between zero out
+ * frames for Port 1, R/W */
+#define MCS7840_DEV_REG_ZERO_PERIOD3 0x3c /* Period between zero out
+ * frames for Port 1, R/W */
+#define MCS7840_DEV_REG_ZERO_PERIOD4 0x3d /* Period between zero out
+ * frames for Port 1, R/W */
+#define MCS7840_DEV_REG_ZERO_ENABLE 0x3e /* Enable/disable of zero out
+ * frames, R/W */
+#define MCS7840_DEV_REG_THR_VAL_LOW1 0x3f /* Low 8 bits of threshhold
+ * value for Bulk-Out for Port
+ * 1, R/W */
+#define MCS7840_DEV_REG_THR_VAL_HIGH1 0x40 /* High 1 bit of threshhold
+ * value for Bulk-Out and
+ * enable flag for Port 1, R/W */
+#define MCS7840_DEV_REG_THR_VAL_LOW2 0x41 /* Low 8 bits of threshhold
+ * value for Bulk-Out for Port
+ * 2, R/W */
+#define MCS7840_DEV_REG_THR_VAL_HIGH2 0x42 /* High 1 bit of threshhold
+ * value for Bulk-Out and
+ * enable flag for Port 2, R/W */
+#define MCS7840_DEV_REG_THR_VAL_LOW3 0x43 /* Low 8 bits of threshhold
+ * value for Bulk-Out for Port
+ * 3, R/W */
+#define MCS7840_DEV_REG_THR_VAL_HIGH3 0x44 /* High 1 bit of threshhold
+ * value for Bulk-Out and
+ * enable flag for Port 3, R/W */
+#define MCS7840_DEV_REG_THR_VAL_LOW4 0x45 /* Low 8 bits of threshhold
+ * value for Bulk-Out for Port
+ * 4, R/W */
+#define MCS7840_DEV_REG_THR_VAL_HIGH4 0x46 /* High 1 bit of threshhold
+ * value for Bulk-Out and
+ * enable flag for Port 4, R/W */
+
+/* Bits for SPx registers */
+#define MCS7840_DEV_SPx_LOOP_PIPES 0x01 /* Loop Bulk-Out FIFO to the
+ * Bulk-In FIFO, default = 0 */
+#define MCS7840_DEV_SPx_SKIP_ERR_DATA 0x02 /* Drop data bytes from UART,
+ * which were recevied with
+ * errors, default = 0 */
+#define MCS7840_DEV_SPx_RESET_OUT_FIFO 0x04 /* Reset Bulk-Out FIFO */
+#define MCS7840_DEV_SPx_RESET_IN_FIFO 0x08 /* Reset Bulk-In FIFO */
+#define MCS7840_DEV_SPx_CLOCK_MASK 0x70 /* Mask to extract Baud CLK
+ * source */
+#define MCS7840_DEV_SPx_CLOCK_X1 0x00 /* CLK = 1.8432Mhz, max speed
+ * = 115200 bps, default */
+#define MCS7840_DEV_SPx_CLOCK_X2 0x10 /* CLK = 3.6864Mhz, max speed
+ * = 230400 bps */
+#define MCS7840_DEV_SPx_CLOCK_X35 0x20 /* CLK = 6.4512Mhz, max speed
+ * = 403200 bps */
+#define MCS7840_DEV_SPx_CLOCK_X4 0x30 /* CLK = 7.3728Mhz, max speed
+ * = 460800 bps */
+#define MCS7840_DEV_SPx_CLOCK_X7 0x40 /* CLK = 12.9024Mhz, max speed
+ * = 806400 bps */
+#define MCS7840_DEV_SPx_CLOCK_X8 0x50 /* CLK = 14.7456Mhz, max speed
+ * = 921600 bps */
+#define MCS7840_DEV_SPx_CLOCK_24MHZ 0x60 /* CLK = 24.0000Mhz, max speed
+ * = 1.5 Mbps */
+#define MCS7840_DEV_SPx_CLOCK_48MHZ 0x70 /* CLK = 48.0000Mhz, max speed
+ * = 3.0 Mbps */
+#define MCS7840_DEV_SPx_CLOCK_SHIFT 4 /* Value 0..7 can be shifted
+ * to get clock value */
+#define MCS7840_DEV_SPx_UART_RESET 0x80 /* Reset UART */
+
+/* Bits for CONTROLx registers */
+#define MCS7840_DEV_CONTROLx_HWFC 0x01 /* Enable hardware flow
+ * control (when power
+ * down? It is unclear
+ * in documents),
+ * default = 0 */
+#define MCS7840_DEV_CONTROLx_UNUNSED1 0x02 /* Reserved */
+#define MCS7840_DEV_CONTROLx_CTS_ENABLE 0x04 /* CTS changes are
+ * translated to MSR,
+ * default = 0 */
+#define MCS7840_DEV_CONTROLx_UNUSED2 0x08 /* Reserved for ports
+ * 2,3,4 */
+#define MCS7840_DEV_CONTROL1_DRIVER_DONE 0x08 /* USB enumerating is
+ * finished, USB
+ * enumeration memory
+ * can be used as FIFOs */
+#define MCS7840_DEV_CONTROLx_RX_NEGATE 0x10 /* Negate RX input,
+ * works for IrDA mode
+ * only, default = 0 */
+#define MCS7840_DEV_CONTROLx_RX_DISABLE 0x20 /* Disable RX logic,
+ * works only for
+ * RS-232/RS-485 mode,
+ * default = 0 */
+#define MCS7840_DEV_CONTROLx_FSM_CONTROL 0x40 /* Disable RX FSM when
+ * TX is in progress,
+ * works for IrDA mode
+ * only, default = 0 */
+#define MCS7840_DEV_CONTROLx_UNUSED3 0x80 /* Reserved */
+
+/*
+ * Bits for PINPONGx registers
+ * These registers control how often two input buffers
+ * for Bulk-In FIFOs are swapped. One of buffers is used
+ * for USB trnasfer, other for receiving data from UART.
+ * Exact meaning of 15 bit value in these registers is unknown
+ */
+#define MCS7840_DEV_PINPONGHIGH_MULT 128 /* Only 7 bits in PINPONGLOW
+ * register */
+#define MCS7840_DEV_PINPONGLOW_BITS 7 /* Only 7 bits in PINPONGLOW
+ * register */
+
+/*
+ * THIS ONE IS UNDOCUMENTED IN FULL DATASHEET, but e-mail from tech support
+ * confirms, that it is register for GPIO_0 and GPIO_1 data input/output.
+ * Chips has 2 GPIO, but first one (lower bit) MUST be used by device
+ * authors as "number of port" indicator, grounded (0) for two-port
+ * devices and pulled-up to 1 for 4-port devices.
+ */
+#define MCS7840_DEV_GPIO_4PORTS 0x01 /* Device has 4 ports
+ * configured */
+#define MCS7840_DEV_GPIO_GPIO_0 0x01 /* The same as above */
+#define MCS7840_DEV_GPIO_GPIO_1 0x02 /* GPIO_1 data */
+
+/*
+ * Constants for PLL dividers
+ * Ouptut frequency of PLL is:
+ * Fout = (N/M) * Fin.
+ * Default PLL input frequency Fin is 12Mhz (on-chip).
+ */
+#define MCS7840_DEV_PLL_DIV_M_BITS 6 /* Number of useful bits for M
+ * divider */
+#define MCS7840_DEV_PLL_DIV_M_MASK 0x3f /* Mask for M divider */
+#define MCS7840_DEV_PLL_DIV_M_MIN 1 /* Minimum value for M, 0 is
+ * forbidden */
+#define MCS7840_DEV_PLL_DIV_M_DEF 1 /* Default value for M */
+#define MCS7840_DEV_PLL_DIV_M_MAX 63 /* Maximum value for M */
+#define MCS7840_DEV_PLL_DIV_N_BITS 6 /* Number of useful bits for N
+ * divider */
+#define MCS7840_DEV_PLL_DIV_N_MASK 0x3f /* Mask for N divider */
+#define MCS7840_DEV_PLL_DIV_N_MIN 1 /* Minimum value for N, 0 is
+ * forbidden */
+#define MCS7840_DEV_PLL_DIV_N_DEF 8 /* Default value for N */
+#define MCS7840_DEV_PLL_DIV_N_MAX 63 /* Maximum value for N */
+
+/* Bits for CLOCK_MUX register */
+#define MCS7840_DEV_CLOCK_MUX_INPUTMASK 0x03 /* Mask to extract PLL clock
+ * input */
+#define MCS7840_DEV_CLOCK_MUX_IN12MHZ 0x00 /* 12Mhz PLL input, default */
+#define MCS7840_DEV_CLOCK_MUX_INEXTRN 0x01 /* External (device-depended)
+ * PLL input */
+#define MCS7840_DEV_CLOCK_MUX_INRSV1 0x02 /* Reserved */
+#define MCS7840_DEV_CLOCK_MUX_INRSV2 0x03 /* Reserved */
+#define MCS7840_DEV_CLOCK_MUX_PLLHIGH 0x04 /* 0 = PLL Output is
+ * 20MHz-100MHz (default), 1 =
+ * 100MHz-300MHz range */
+#define MCS7840_DEV_CLOCK_MUX_INTRFIFOS 0x08 /* Enable additional 8 bytes
+ * fro Interrupt USB pipe with
+ * USB FIFOs statuses, default
+ * = 0 */
+#define MCS7840_DEV_CLOCK_MUX_RESERVED1 0x10 /* Unused */
+#define MCS7840_DEV_CLOCK_MUX_RESERVED2 0x20 /* Unused */
+#define MCS7840_DEV_CLOCK_MUX_RESERVED3 0x40 /* Unused */
+#define MCS7840_DEV_CLOCK_MUX_RESERVED4 0x80 /* Unused */
+
+/* Bits for CLOCK_SELECTxx registers */
+#define MCS7840_DEV_CLOCK_SELECT1_MASK 0x07 /* Bits for port 1 in
+ * CLOCK_SELECT12 */
+#define MCS7840_DEV_CLOCK_SELECT1_SHIFT 0 /* Shift for port 1in
+ * CLOCK_SELECT12 */
+#define MCS7840_DEV_CLOCK_SELECT2_MASK 0x38 /* Bits for port 2 in
+ * CLOCK_SELECT12 */
+#define MCS7840_DEV_CLOCK_SELECT2_SHIFT 3 /* Shift for port 2 in
+ * CLOCK_SELECT12 */
+#define MCS7840_DEV_CLOCK_SELECT3_MASK 0x07 /* Bits for port 3 in
+ * CLOCK_SELECT23 */
+#define MCS7840_DEV_CLOCK_SELECT3_SHIFT 0 /* Shift for port 3 in
+ * CLOCK_SELECT23 */
+#define MCS7840_DEV_CLOCK_SELECT4_MASK 0x38 /* Bits for port 4 in
+ * CLOCK_SELECT23 */
+#define MCS7840_DEV_CLOCK_SELECT4_SHIFT 3 /* Shift for port 4 in
+ * CLOCK_SELECT23 */
+#define MCS7840_DEV_CLOCK_SELECT_STD 0x00 /* STANDARD baudrate derived
+ * from 96Mhz, default for all
+ * ports */
+#define MCS7840_DEV_CLOCK_SELECT_30MHZ 0x01 /* 30Mhz */
+#define MCS7840_DEV_CLOCK_SELECT_96MHZ 0x02 /* 96Mhz direct */
+#define MCS7840_DEV_CLOCK_SELECT_120MHZ 0x03 /* 120Mhz */
+#define MCS7840_DEV_CLOCK_SELECT_PLL 0x04 /* PLL output (see for M and N
+ * dividers) */
+#define MCS7840_DEV_CLOCK_SELECT_EXT 0x05 /* External clock input
+ * (device-dependend) */
+#define MCS7840_DEV_CLOCK_SELECT_RES1 0x06 /* Unused */
+#define MCS7840_DEV_CLOCK_SELECT_RES2 0x07 /* Unused */
+
+/* Bits for MODE register */
+#define MCS7840_DEV_MODE_RESERVED1 0x01 /* Unused */
+#define MCS7840_DEV_MODE_RESET 0x02 /* 0: RESET = Active High
+ * (default), 1: Reserved (?) */
+#define MCS7840_DEV_MODE_SER_PRSNT 0x04 /* 0: Reserved, 1: Do not use
+ * hardocded values (default)
+ * (?) */
+#define MCS7840_DEV_MODE_PLLBYPASS 0x08 /* 1: PLL output is bypassed,
+ * default = 0 */
+#define MCS7840_DEV_MODE_PORBYPASS 0x10 /* 1: Power-On Reset is
+ * bypassed, default = 0 */
+#define MCS7840_DEV_MODE_SELECT24S 0x20 /* 0: 4 Serial Ports / IrDA
+ * active, 1: 2 Serial Ports /
+ * IrDA active */
+#define MCS7840_DEV_MODE_EEPROMWR 0x40 /* EEPROM write is enabled,
+ * default */
+#define MCS7840_DEV_MODE_IRDA 0x80 /* IrDA mode is activated
+ * (could be turned on),
+ * default */
+
+/* Bits for SPx ICG */
+#define MCS7840_DEV_SPx_ICG_DEF 0x24 /* All 8 bits is used as
+ * number of BAUD clocks of
+ * pause */
+
+/*
+ * Bits for RX_SAMPLINGxx registers
+ * These registers control when bit value will be sampled within
+ * the baud period.
+ * 0 is very beginning of period, 15 is very end, 7 is the middle.
+ */
+#define MCS7840_DEV_RX_SAMPLING1_MASK 0x0f /* Bits for port 1 in
+ * RX_SAMPLING12 */
+#define MCS7840_DEV_RX_SAMPLING1_SHIFT 0 /* Shift for port 1in
+ * RX_SAMPLING12 */
+#define MCS7840_DEV_RX_SAMPLING2_MASK 0xf0 /* Bits for port 2 in
+ * RX_SAMPLING12 */
+#define MCS7840_DEV_RX_SAMPLING2_SHIFT 4 /* Shift for port 2 in
+ * RX_SAMPLING12 */
+#define MCS7840_DEV_RX_SAMPLING3_MASK 0x0f /* Bits for port 3 in
+ * RX_SAMPLING23 */
+#define MCS7840_DEV_RX_SAMPLING3_SHIFT 0 /* Shift for port 3 in
+ * RX_SAMPLING23 */
+#define MCS7840_DEV_RX_SAMPLING4_MASK 0xf0 /* Bits for port 4 in
+ * RX_SAMPLING23 */
+#define MCS7840_DEV_RX_SAMPLING4_SHIFT 4 /* Shift for port 4 in
+ * RX_SAMPLING23 */
+#define MCS7840_DEV_RX_SAMPLINGx_MIN 0 /* Max for any RX Sampling */
+#define MCS7840_DEV_RX_SAMPLINGx_DEF 7 /* Default for any RX
+ * Sampling, center of period */
+#define MCS7840_DEV_RX_SAMPLINGx_MAX 15 /* Min for any RX Sampling */
+
+/* Bits for ZERO_PERIODx */
+#define MCS7840_DEV_ZERO_PERIODx_DEF 20 /* Number of Bulk-in requests
+ * befor sending zero-sized
+ * reply */
+
+/* Bits for ZERO_ENABLE */
+#define MCS7840_DEV_ZERO_ENABLE_PORT1 0x01 /* Enable of sending
+ * zero-sized replies for port
+ * 1, default */
+#define MCS7840_DEV_ZERO_ENABLE_PORT2 0x02 /* Enable of sending
+ * zero-sized replies for port
+ * 2, default */
+#define MCS7840_DEV_ZERO_ENABLE_PORT3 0x04 /* Enable of sending
+ * zero-sized replies for port
+ * 3, default */
+#define MCS7840_DEV_ZERO_ENABLE_PORT4 0x08 /* Enable of sending
+ * zero-sized replies for port
+ * 4, default */
+
+/* Bits for THR_VAL_HIGHx */
+#define MCS7840_DEV_THR_VAL_HIGH_MASK 0x01 /* Only one bit is used */
+#define MCS7840_DEV_THR_VAL_HIGH_MUL 256 /* This one bit is means "256" */
+#define MCS7840_DEV_THR_VAL_HIGH_SHIFT 8 /* This one bit is means "256" */
+#define MCS7840_DEV_THR_VAL_HIGH_ENABLE 0x80 /* Enable threshold */
+
+/* These are documented in "public" datasheet */
+#define MCS7840_DEV_REG_DCR0_1 0x04 /* Device contol register 0 for Port
+ * 1, R/W */
+#define MCS7840_DEV_REG_DCR1_1 0x05 /* Device contol register 1 for Port
+ * 1, R/W */
+#define MCS7840_DEV_REG_DCR2_1 0x06 /* Device contol register 2 for Port
+ * 1, R/W */
+#define MCS7840_DEV_REG_DCR0_2 0x16 /* Device contol register 0 for Port
+ * 2, R/W */
+#define MCS7840_DEV_REG_DCR1_2 0x17 /* Device contol register 1 for Port
+ * 2, R/W */
+#define MCS7840_DEV_REG_DCR2_2 0x18 /* Device contol register 2 for Port
+ * 2, R/W */
+#define MCS7840_DEV_REG_DCR0_3 0x19 /* Device contol register 0 for Port
+ * 3, R/W */
+#define MCS7840_DEV_REG_DCR1_3 0x1a /* Device contol register 1 for Port
+ * 3, R/W */
+#define MCS7840_DEV_REG_DCR2_3 0x1b /* Device contol register 2 for Port
+ * 3, R/W */
+#define MCS7840_DEV_REG_DCR0_4 0x1c /* Device contol register 0 for Port
+ * 4, R/W */
+#define MCS7840_DEV_REG_DCR1_4 0x1d /* Device contol register 1 for Port
+ * 4, R/W */
+#define MCS7840_DEV_REG_DCR2_4 0x1e /* Device contol register 2 for Port
+ * 4, R/W */
+
+/* Bits of DCR0 registers, documented in datasheet */
+#define MCS7840_DEV_DCR0_PWRSAVE 0x01 /* Shutdown transiver
+ * when USB Suspend is
+ * engaged, default = 1 */
+#define MCS7840_DEV_DCR0_RESERVED1 0x02 /* Unused */
+#define MCS7840_DEV_DCR0_GPIO_MODE_MASK 0x0c /* GPIO Mode bits, WORKS
+ * ONLY FOR PORT 1 */
+#define MCS7840_DEV_DCR0_GPIO_MODE_IN 0x00 /* GPIO Mode - Input
+ * (0b00), WORKS ONLY
+ * FOR PORT 1 */
+#define MCS7840_DEV_DCR0_GPIO_MODE_OUT 0x08 /* GPIO Mode - Input
+ * (0b10), WORKS ONLY
+ * FOR PORT 1 */
+#define MCS7840_DEV_DCR0_RTS_ACTIVE_HIGH 0x10 /* RTS Active is HIGH,
+ * default = 0 (low) */
+#define MCS7840_DEV_DCR0_RTS_AUTO 0x20 /* RTS is controlled by
+ * state of TX buffer,
+ * default = 0
+ * (controlled by MCR) */
+#define MCS7840_DEV_DCR0_IRDA 0x40 /* IrDA mode */
+#define MCS7840_DEV_DCR0_RESERVED2 0x80 /* Unused */
+
+/* Bits of DCR1 registers, documented in datasheet */
+#define MCS7840_DEV_DCR1_GPIO_CURRENT_MASK 0x03 /* Mask to extract GPIO
+ * current value, WORKS
+ * ONLY FOR PORT 1 */
+#define MCS7840_DEV_DCR1_GPIO_CURRENT_6MA 0x00 /* GPIO output current
+ * 6mA, WORKS ONLY FOR
+ * PORT 1 */
+#define MCS7840_DEV_DCR1_GPIO_CURRENT_8MA 0x01 /* GPIO output current
+ * 8mA, defauilt, WORKS
+ * ONLY FOR PORT 1 */
+#define MCS7840_DEV_DCR1_GPIO_CURRENT_10MA 0x02 /* GPIO output current
+ * 10mA, WORKS ONLY FOR
+ * PORT 1 */
+#define MCS7840_DEV_DCR1_GPIO_CURRENT_12MA 0x03 /* GPIO output current
+ * 12mA, WORKS ONLY FOR
+ * PORT 1 */
+#define MCS7840_DEV_DCR1_UART_CURRENT_MASK 0x0c /* Mask to extract UART
+ * signals current value */
+#define MCS7840_DEV_DCR1_UART_CURRENT_6MA 0x00 /* UART output current
+ * 6mA */
+#define MCS7840_DEV_DCR1_UART_CURRENT_8MA 0x04 /* UART output current
+ * 8mA, defauilt */
+#define MCS7840_DEV_DCR1_UART_CURRENT_10MA 0x08 /* UART output current
+ * 10mA */
+#define MCS7840_DEV_DCR1_UART_CURRENT_12MA 0x0c /* UART output current
+ * 12mA */
+#define MCS7840_DEV_DCR1_WAKEUP_DISABLE 0x10 /* Disable Remote USB
+ * Wakeup */
+#define MCS7840_DEV_DCR1_PLLPWRDOWN_DISABLE 0x20 /* Disable PLL power
+ * down when not needed,
+ * WORKS ONLY FOR PORT 1 */
+#define MCS7840_DEV_DCR1_LONG_INTERRUPT 0x40 /* Enable 13 bytes of
+ * interrupt data, with
+ * FIFO statistics,
+ * WORKS ONLY FOR PORT 1 */
+#define MCS7840_DEV_DCR1_RESERVED1 0x80 /* Unused */
+
+/*
+ * Bits of DCR2 registers, documented in datasheet
+ * Wakeup will work only if DCR0_IRDA = 0 (RS-xxx mode) and
+ * DCR1_WAKEUP_DISABLE = 0 (wakeup enabled).
+ */
+#define MCS7840_DEV_DCR2_WAKEUP_CTS 0x01 /* Wakeup on CTS change,
+ * default = 0 */
+#define MCS7840_DEV_DCR2_WAKEUP_DCD 0x02 /* Wakeup on DCD change,
+ * default = 0 */
+#define MCS7840_DEV_DCR2_WAKEUP_RI 0x04 /* Wakeup on RI change,
+ * default = 1 */
+#define MCS7840_DEV_DCR2_WAKEUP_DSR 0x08 /* Wakeup on DSR change,
+ * default = 0 */
+#define MCS7840_DEV_DCR2_WAKEUP_RXD 0x10 /* Wakeup on RX Data change,
+ * default = 0 */
+#define MCS7840_DEV_DCR2_WAKEUP_RESUME 0x20 /* Wakeup issues RESUME
+ * signal, DISCONNECT
+ * otherwise, default = 1 */
+#define MCS7840_DEV_DCR2_RESERVED1 0x40 /* Unused */
+#define MCS7840_DEV_DCR2_SHDN_POLARITY 0x80 /* 0: Pin 12 Active Low, 1:
+ * Pin 12 Active High, default
+ * = 0 */
+
+/* Interrupt endpoint bytes & bits */
+#define MCS7840_IEP_FIFO_STATUS_INDEX 5
+/*
+ * Thesse can be calculated as "1 << portnumber" for Bulk-out and
+ * "1 << (portnumber+1)" for Bulk-in
+ */
+#define MCS7840_IEP_BO_PORT1_HASDATA 0x01
+#define MCS7840_IEP_BI_PORT1_HASDATA 0x02
+#define MCS7840_IEP_BO_PORT2_HASDATA 0x04
+#define MCS7840_IEP_BI_PORT2_HASDATA 0x08
+#define MCS7840_IEP_BO_PORT3_HASDATA 0x10
+#define MCS7840_IEP_BI_PORT3_HASDATA 0x20
+#define MCS7840_IEP_BO_PORT4_HASDATA 0x40
+#define MCS7840_IEP_BI_PORT4_HASDATA 0x80
+
+/* Documented UART registers (fully compatible with 16550 UART) */
+#define MCS7840_UART_REG_THR 0x00 /* Transmitter Holding
+ * Register W/Only */
+#define MCS7840_UART_REG_RHR 0x00 /* Receiver Holding Register
+ * R/Only */
+#define MCS7840_UART_REG_IER 0x01 /* Interrupt enable register -
+ * R/W */
+#define MCS7840_UART_REG_FCR 0x02 /* FIFO Control register -
+ * W/Only */
+#define MCS7840_UART_REG_ISR 0x02 /* Interrupt Status Registter
+ * R/Only */
+#define MCS7840_UART_REG_LCR 0x03 /* Line control register R/W */
+#define MCS7840_UART_REG_MCR 0x04 /* Modem control register R/W */
+#define MCS7840_UART_REG_LSR 0x05 /* Line status register R/Only */
+#define MCS7840_UART_REG_MSR 0x06 /* Modem status register
+ * R/Only */
+#define MCS7840_UART_REG_SCRATCHPAD 0x07 /* Scratch pad register */
+
+#define MCS7840_UART_REG_DLL 0x00 /* Low bits of BAUD divider */
+#define MCS7840_UART_REG_DLM 0x01 /* High bits of BAUD divider */
+
+/* IER bits */
+#define MCS7840_UART_IER_RXREADY 0x01 /* RX Ready interrumpt mask */
+#define MCS7840_UART_IER_TXREADY 0x02 /* TX Ready interrumpt mask */
+#define MCS7840_UART_IER_RXSTAT 0x04 /* RX Status interrumpt mask */
+#define MCS7840_UART_IER_MODEM 0x08 /* Modem status change
+ * interrumpt mask */
+#define MCS7840_UART_IER_SLEEP 0x10 /* SLEEP enable */
+
+/* FCR bits */
+#define MCS7840_UART_FCR_ENABLE 0x01 /* Enable FIFO */
+#define MCS7840_UART_FCR_FLUSHRHR 0x02 /* Flush RHR and FIFO */
+#define MCS7840_UART_FCR_FLUSHTHR 0x04 /* Flush THR and FIFO */
+#define MCS7840_UART_FCR_RTLMASK 0xa0 /* Mask to select RHR
+ * Interrupt Trigger level */
+#define MCS7840_UART_FCR_RTL_1_1 0x00 /* L1 = 1, L2 = 1 */
+#define MCS7840_UART_FCR_RTL_1_4 0x40 /* L1 = 1, L2 = 4 */
+#define MCS7840_UART_FCR_RTL_1_8 0x80 /* L1 = 1, L2 = 8 */
+#define MCS7840_UART_FCR_RTL_1_14 0xa0 /* L1 = 1, L2 = 14 */
+
+/* ISR bits */
+#define MCS7840_UART_ISR_NOPENDING 0x01 /* No interrupt pending */
+#define MCS7840_UART_ISR_INTMASK 0x3f /* Mask to select interrupt
+ * source */
+#define MCS7840_UART_ISR_RXERR 0x06 /* Recevir error */
+#define MCS7840_UART_ISR_RXHASDATA 0x04 /* Recevier has data */
+#define MCS7840_UART_ISR_RXTIMEOUT 0x0c /* Recevier timeout */
+#define MCS7840_UART_ISR_TXEMPTY 0x02 /* Transmitter empty */
+#define MCS7840_UART_ISR_MSCHANGE 0x00 /* Modem status change */
+
+/* LCR bits */
+#define MCS7840_UART_LCR_DATALENMASK 0x03 /* Mask for data length */
+#define MCS7840_UART_LCR_DATALEN5 0x00 /* 5 data bits */
+#define MCS7840_UART_LCR_DATALEN6 0x01 /* 6 data bits */
+#define MCS7840_UART_LCR_DATALEN7 0x02 /* 7 data bits */
+#define MCS7840_UART_LCR_DATALEN8 0x03 /* 8 data bits */
+
+#define MCS7840_UART_LCR_STOPBMASK 0x04 /* Mask for stop bits */
+#define MCS7840_UART_LCR_STOPB1 0x00 /* 1 stop bit in any case */
+#define MCS7840_UART_LCR_STOPB2 0x04 /* 1.5-2 stop bits depends on
+ * data length */
+
+#define MCS7840_UART_LCR_PARITYMASK 0x38 /* Mask for all parity data */
+#define MCS7840_UART_LCR_PARITYON 0x08 /* Parity ON/OFF - ON */
+#define MCS7840_UART_LCR_PARITYODD 0x00 /* Parity Odd */
+#define MCS7840_UART_LCR_PARITYEVEN 0x10 /* Parity Even */
+#define MCS7840_UART_LCR_PARITYODD 0x00 /* Parity Odd */
+#define MCS7840_UART_LCR_PARITYFORCE 0x20 /* Force parity odd/even */
+
+#define MCS7840_UART_LCR_BREAK 0x40 /* Send BREAK */
+#define MCS7840_UART_LCR_DIVISORS 0x80 /* Map DLL/DLM instead of
+ * xHR/IER */
+
+/* LSR bits */
+#define MCS7840_UART_LSR_RHRAVAIL 0x01 /* Data available for read */
+#define MCS7840_UART_LSR_RHROVERRUN 0x02 /* Data FIFO/register overflow */
+#define MCS7840_UART_LSR_PARITYERR 0x04 /* Parity error */
+#define MCS7840_UART_LSR_FRAMEERR 0x10 /* Framing error */
+#define MCS7840_UART_LSR_BREAKERR 0x20 /* BREAK sigmal received */
+#define MCS7840_UART_LSR_THREMPTY 0x40 /* THR register is empty,
+ * ready for transmit */
+#define MCS7840_UART_LSR_HASERR 0x80 /* Has error in receiver FIFO */
+
+/* MCR bits */
+#define MCS7840_UART_MCR_DTR 0x01 /* Force DTR to be active
+ * (low) */
+#define MCS7840_UART_MCR_RTS 0x02 /* Force RTS to be active
+ * (low) */
+#define MCS7840_UART_MCR_IE 0x04 /* Enable interrupts (from
+ * code, not documented) */
+#define MCS7840_UART_MCR_LOOPBACK 0x10 /* Enable local loopback test
+ * mode */
+#define MCS7840_UART_MCR_CTSRTS 0x20 /* Enable CTS/RTS flow control
+ * in 550 (FIFO) mode */
+#define MCS7840_UART_MCR_DTRDSR 0x40 /* Enable DTR/DSR flow control
+ * in 550 (FIFO) mode */
+#define MCS7840_UART_MCR_DCD 0x80 /* Enable DCD flow control in
+ * 550 (FIFO) mode */
+
+/* MSR bits */
+#define MCS7840_UART_MSR_DELTACTS 0x01 /* CTS was changed since last
+ * read */
+#define MCS7840_UART_MSR_DELTADSR 0x02 /* DSR was changed since last
+ * read */
+#define MCS7840_UART_MSR_DELTARI 0x04 /* RI was changed from low to
+ * high since last read */
+#define MCS7840_UART_MSR_DELTADCD 0x08 /* DCD was changed since last
+ * read */
+#define MCS7840_UART_MSR_NEGCTS 0x10 /* Negated CTS signal */
+#define MCS7840_UART_MSR_NEGDSR 0x20 /* Negated DSR signal */
+#define MCS7840_UART_MSR_NEGRI 0x40 /* Negated RI signal */
+#define MCS7840_UART_MSR_NEGDCD 0x80 /* Negated DCD signal */
+
+/* SCRATCHPAD bits */
+#define MCS7840_UART_SCRATCHPAD_RS232 0x00 /* RS-485 disabled */
+#define MCS7840_UART_SCRATCHPAD_RS485_DTRRX 0x80 /* RS-485 mode, DTR High
+ * = RX */
+#define MCS7840_UART_SCRATCHPAD_RS485_DTRTX 0xc0 /* RS-485 mode, DTR High
+ * = TX */
+
+#define MCS7840_CONFIG_INDEX 0
+#define MCS7840_IFACE_INDEX 0
+
+#endif
diff --git a/sys/dev/usb/usb_device.h b/sys/dev/usb/usb_device.h
index c8bc5eb..bf41221 100644
--- a/sys/dev/usb/usb_device.h
+++ b/sys/dev/usb/usb_device.h
@@ -187,6 +187,8 @@ struct usb_device {
struct usb_host_endpoint *linux_endpoint_end;
uint16_t devnum;
#endif
+
+ uint32_t clear_stall_errors; /* number of clear-stall failures */
};
/* globals */
diff --git a/sys/dev/usb/usb_freebsd.h b/sys/dev/usb/usb_freebsd.h
index a44e530..ae69cdb 100644
--- a/sys/dev/usb/usb_freebsd.h
+++ b/sys/dev/usb/usb_freebsd.h
@@ -66,6 +66,7 @@
#define USB_HUB_MAX_DEPTH 5
#define USB_EP0_BUFSIZE 1024 /* bytes */
+#define USB_CS_RESET_LIMIT 20 /* failures = 20 * 50 ms = 1sec */
typedef uint32_t usb_timeout_t; /* milliseconds */
typedef uint32_t usb_frlength_t; /* bytes */
diff --git a/sys/dev/usb/usb_generic.c b/sys/dev/usb/usb_generic.c
index 714ee6f..d62f8f9 100644
--- a/sys/dev/usb/usb_generic.c
+++ b/sys/dev/usb/usb_generic.c
@@ -966,10 +966,8 @@ ugen_re_enumerate(struct usb_fifo *f)
/* ignore any errors */
DPRINTFN(6, "no FIFOs\n");
}
- if (udev->re_enumerate_wait == 0) {
- udev->re_enumerate_wait = 1;
- usb_needs_explore(udev->bus, 0);
- }
+ /* start re-enumeration of device */
+ usbd_start_re_enumerate(udev);
return (0);
}
diff --git a/sys/dev/usb/usb_hub.c b/sys/dev/usb/usb_hub.c
index ce8a4a5..351b134 100644
--- a/sys/dev/usb/usb_hub.c
+++ b/sys/dev/usb/usb_hub.c
@@ -242,9 +242,14 @@ uhub_explore_sub(struct uhub_softc *sc, struct usb_port *up)
if (child->flags.usb_mode == USB_MODE_HOST) {
usbd_enum_lock(child);
if (child->re_enumerate_wait) {
- err = usbd_set_config_index(child, USB_UNCONFIG_INDEX);
- if (err == 0)
- err = usbd_req_re_enumerate(child, NULL);
+ err = usbd_set_config_index(child,
+ USB_UNCONFIG_INDEX);
+ if (err != 0) {
+ DPRINTF("Unconfigure failed: "
+ "%s: Ignored.\n",
+ usbd_errstr(err));
+ }
+ err = usbd_req_re_enumerate(child, NULL);
if (err == 0)
err = usbd_set_config_index(child, 0);
if (err == 0) {
@@ -2471,3 +2476,19 @@ usbd_filter_power_mode(struct usb_device *udev, uint8_t power_mode)
/* use fixed power mode given by hardware driver */
return (temp);
}
+
+/*------------------------------------------------------------------------*
+ * usbd_start_re_enumerate
+ *
+ * This function starts re-enumeration of the given USB device. This
+ * function does not need to be called BUS-locked. This function does
+ * not wait until the re-enumeration is completed.
+ *------------------------------------------------------------------------*/
+void
+usbd_start_re_enumerate(struct usb_device *udev)
+{
+ if (udev->re_enumerate_wait == 0) {
+ udev->re_enumerate_wait = 1;
+ usb_needs_explore(udev->bus, 0);
+ }
+}
diff --git a/sys/dev/usb/usb_process.c b/sys/dev/usb/usb_process.c
index 0509ec2..051ded9 100644
--- a/sys/dev/usb/usb_process.c
+++ b/sys/dev/usb/usb_process.c
@@ -360,7 +360,12 @@ usb_proc_is_gone(struct usb_process *up)
if (up->up_gone)
return (1);
- mtx_assert(up->up_mtx, MA_OWNED);
+ /*
+ * Allow calls when up_mtx is NULL, before the USB process
+ * structure is initialised.
+ */
+ if (up->up_mtx != NULL)
+ mtx_assert(up->up_mtx, MA_OWNED);
return (0);
}
diff --git a/sys/dev/usb/usb_request.c b/sys/dev/usb/usb_request.c
index c099e71..4358ef4 100644
--- a/sys/dev/usb/usb_request.c
+++ b/sys/dev/usb/usb_request.c
@@ -238,6 +238,10 @@ usb_do_clear_stall_callback(struct usb_xfer *xfer, usb_error_t error)
switch (USB_GET_STATE(xfer)) {
case USB_ST_TRANSFERRED:
+
+ /* reset error counter */
+ udev->clear_stall_errors = 0;
+
if (ep == NULL)
goto tr_setup; /* device was unconfigured */
if (ep->edesc &&
@@ -289,8 +293,23 @@ tr_setup:
goto tr_setup;
default:
- if (xfer->error == USB_ERR_CANCELLED) {
+ if (error == USB_ERR_CANCELLED)
break;
+
+ DPRINTF("Clear stall failed.\n");
+ if (udev->clear_stall_errors == USB_CS_RESET_LIMIT)
+ goto tr_setup;
+
+ if (error == USB_ERR_TIMEOUT) {
+ udev->clear_stall_errors = USB_CS_RESET_LIMIT;
+ DPRINTF("Trying to re-enumerate.\n");
+ usbd_start_re_enumerate(udev);
+ } else {
+ udev->clear_stall_errors++;
+ if (udev->clear_stall_errors == USB_CS_RESET_LIMIT) {
+ DPRINTF("Trying to re-enumerate.\n");
+ usbd_start_re_enumerate(udev);
+ }
}
goto tr_setup;
}
@@ -1936,6 +1955,23 @@ usbd_req_re_enumerate(struct usb_device *udev, struct mtx *mtx)
return (USB_ERR_INVAL);
}
retry:
+ /*
+ * Try to reset the High Speed parent HUB of a LOW- or FULL-
+ * speed device, if any.
+ */
+ if (udev->parent_hs_hub != NULL &&
+ udev->speed != USB_SPEED_HIGH) {
+ DPRINTF("Trying to reset parent High Speed TT.\n");
+ err = usbd_req_reset_tt(udev->parent_hs_hub, NULL,
+ udev->hs_port_no);
+ if (err) {
+ DPRINTF("Resetting parent High "
+ "Speed TT failed (%s).\n",
+ usbd_errstr(err));
+ }
+ }
+
+ /* Try to reset the parent HUB port. */
err = usbd_req_reset_port(parent_hub, mtx, udev->port_no);
if (err) {
DPRINTFN(0, "addr=%d, port reset failed, %s\n",
@@ -2033,3 +2069,65 @@ usbd_req_set_device_feature(struct usb_device *udev, struct mtx *mtx,
USETW(req.wLength, 0);
return (usbd_do_request(udev, mtx, &req, 0));
}
+
+/*------------------------------------------------------------------------*
+ * usbd_req_reset_tt
+ *
+ * Returns:
+ * 0: Success
+ * Else: Failure
+ *------------------------------------------------------------------------*/
+usb_error_t
+usbd_req_reset_tt(struct usb_device *udev, struct mtx *mtx,
+ uint8_t port)
+{
+ struct usb_device_request req;
+
+ /* For single TT HUBs the port should be 1 */
+
+ if (udev->ddesc.bDeviceClass == UDCLASS_HUB &&
+ udev->ddesc.bDeviceProtocol == UDPROTO_HSHUBSTT)
+ port = 1;
+
+ req.bmRequestType = UT_WRITE_CLASS_OTHER;
+ req.bRequest = UR_RESET_TT;
+ USETW(req.wValue, 0);
+ req.wIndex[0] = port;
+ req.wIndex[1] = 0;
+ USETW(req.wLength, 0);
+ return (usbd_do_request(udev, mtx, &req, 0));
+}
+
+/*------------------------------------------------------------------------*
+ * usbd_req_clear_tt_buffer
+ *
+ * For single TT HUBs the port should be 1.
+ *
+ * Returns:
+ * 0: Success
+ * Else: Failure
+ *------------------------------------------------------------------------*/
+usb_error_t
+usbd_req_clear_tt_buffer(struct usb_device *udev, struct mtx *mtx,
+ uint8_t port, uint8_t addr, uint8_t type, uint8_t endpoint)
+{
+ struct usb_device_request req;
+ uint16_t wValue;
+
+ /* For single TT HUBs the port should be 1 */
+
+ if (udev->ddesc.bDeviceClass == UDCLASS_HUB &&
+ udev->ddesc.bDeviceProtocol == UDPROTO_HSHUBSTT)
+ port = 1;
+
+ wValue = (endpoint & 0xF) | ((addr & 0x7F) << 4) |
+ ((endpoint & 0x80) << 8) | ((type & 3) << 12);
+
+ req.bmRequestType = UT_WRITE_CLASS_OTHER;
+ req.bRequest = UR_CLEAR_TT_BUFFER;
+ USETW(req.wValue, wValue);
+ req.wIndex[0] = port;
+ req.wIndex[1] = 0;
+ USETW(req.wLength, 0);
+ return (usbd_do_request(udev, mtx, &req, 0));
+}
diff --git a/sys/dev/usb/usb_request.h b/sys/dev/usb/usb_request.h
index 12f373d..ac7a7c1 100644
--- a/sys/dev/usb/usb_request.h
+++ b/sys/dev/usb/usb_request.h
@@ -85,5 +85,9 @@ usb_error_t usbd_req_set_hub_u2_timeout(struct usb_device *udev,
struct mtx *mtx, uint8_t port, uint8_t timeout);
usb_error_t usbd_req_set_hub_depth(struct usb_device *udev,
struct mtx *mtx, uint16_t depth);
+usb_error_t usbd_req_reset_tt(struct usb_device *udev, struct mtx *mtx,
+ uint8_t port);
+usb_error_t usbd_req_clear_tt_buffer(struct usb_device *udev, struct mtx *mtx,
+ uint8_t port, uint8_t addr, uint8_t type, uint8_t endpoint);
#endif /* _USB_REQUEST_H_ */
diff --git a/sys/dev/usb/usb_transfer.c b/sys/dev/usb/usb_transfer.c
index 5fd4f5a..d4c2408 100644
--- a/sys/dev/usb/usb_transfer.c
+++ b/sys/dev/usb/usb_transfer.c
@@ -2928,6 +2928,11 @@ repeat:
usbd_transfer_unsetup(udev->ctrl_xfer, USB_CTRL_XFER_MAX);
/*
+ * Reset clear stall error counter.
+ */
+ udev->clear_stall_errors = 0;
+
+ /*
* Try to setup a new USB transfer for the
* default control endpoint:
*/
diff --git a/sys/dev/usb/usbdevs b/sys/dev/usb/usbdevs
index eb49eb2..a5f4cbb 100644
--- a/sys/dev/usb/usbdevs
+++ b/sys/dev/usb/usbdevs
@@ -2014,6 +2014,7 @@ product KODAK DC280 0x0130 Digital Science DC280
/* Kontron AG products */
product KONTRON DM9601 0x8101 USB Ethernet
+product KONTRON JP1082 0x9700 USB Ethernet
/* Konica Corp. Products */
product KONICA CAMERA 0x0720 Digital Color Camera
@@ -2273,7 +2274,9 @@ product MOBILITY EASIDOCK 0x0304 EasiDock Ethernet
/* MosChip products */
product MOSCHIP MCS7703 0x7703 MCS7703 Serial Port Adapter
product MOSCHIP MCS7730 0x7730 MCS7730 Ethernet
+product MOSCHIP MCS7820 0x7820 MCS7820 Serial Port Adapter
product MOSCHIP MCS7830 0x7830 MCS7830 Ethernet
+product MOSCHIP MCS7840 0x7840 MCS7840 Serial Port Adapter
/* Motorola products */
product MOTOROLA MC141555 0x1555 MC141555 hub controller
diff --git a/sys/dev/usb/usbdi.h b/sys/dev/usb/usbdi.h
index 8f6da7c..91cd3fa 100644
--- a/sys/dev/usb/usbdi.h
+++ b/sys/dev/usb/usbdi.h
@@ -542,6 +542,7 @@ void usbd_m_copy_in(struct usb_page_cache *cache, usb_frlength_t dst_offset,
struct mbuf *m, usb_size_t src_offset, usb_frlength_t src_len);
void usbd_frame_zero(struct usb_page_cache *cache, usb_frlength_t offset,
usb_frlength_t len);
+void usbd_start_re_enumerate(struct usb_device *udev);
int usb_fifo_attach(struct usb_device *udev, void *priv_sc,
struct mtx *priv_mtx, struct usb_fifo_methods *pm,
diff --git a/sys/dev/wpi/if_wpi.c b/sys/dev/wpi/if_wpi.c
index e1fffe1..38ebb7e 100644
--- a/sys/dev/wpi/if_wpi.c
+++ b/sys/dev/wpi/if_wpi.c
@@ -273,6 +273,8 @@ static devclass_t wpi_devclass;
DRIVER_MODULE(wpi, pci, wpi_driver, wpi_devclass, 0, 0);
+MODULE_VERSION(wpi, 1);
+
static const uint8_t wpi_ridx_to_plcp[] = {
/* OFDM: IEEE Std 802.11a-1999, pp. 14 Table 80 */
/* R1-R4 (ral/ural is R4-R1) */
diff --git a/sys/dev/xen/blkback/blkback.c b/sys/dev/xen/blkback/blkback.c
index 458149d..e52c342 100644
--- a/sys/dev/xen/blkback/blkback.c
+++ b/sys/dev/xen/blkback/blkback.c
@@ -1,5 +1,5 @@
/*-
- * Copyright (c) 2009-2010 Spectra Logic Corporation
+ * Copyright (c) 2009-2011 Spectra Logic Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -61,6 +61,8 @@ __FBSDID("$FreeBSD$");
#include <sys/types.h>
#include <sys/vnode.h>
#include <sys/mount.h>
+#include <sys/sysctl.h>
+#include <sys/bitstring.h>
#include <geom/geom.h>
@@ -153,9 +155,19 @@ MALLOC_DEFINE(M_XENBLOCKBACK, "xbbd", "Xen Block Back Driver Data");
#define XBB_MAX_RING_PAGES \
BLKIF_RING_PAGES(BLKIF_SEGS_TO_BLOCKS(XBB_MAX_SEGMENTS_PER_REQUEST) \
* XBB_MAX_REQUESTS)
+/**
+ * The maximum number of ring pages that we can allow per request list.
+ * We limit this to the maximum number of segments per request, because
+ * that is already a reasonable number of segments to aggregate. This
+ * number should never be smaller than XBB_MAX_SEGMENTS_PER_REQUEST,
+ * because that would leave situations where we can't dispatch even one
+ * large request.
+ */
+#define XBB_MAX_SEGMENTS_PER_REQLIST XBB_MAX_SEGMENTS_PER_REQUEST
/*--------------------------- Forward Declarations ---------------------------*/
struct xbb_softc;
+struct xbb_xen_req;
static void xbb_attach_failed(struct xbb_softc *xbb, int err, const char *fmt,
...) __attribute__((format(printf, 3, 4)));
@@ -163,16 +175,15 @@ static int xbb_shutdown(struct xbb_softc *xbb);
static int xbb_detach(device_t dev);
/*------------------------------ Data Structures -----------------------------*/
-/**
- * \brief Object tracking an in-flight I/O from a Xen VBD consumer.
- */
-struct xbb_xen_req {
- /**
- * Linked list links used to aggregate idle request in the
- * request free pool (xbb->request_free_slist).
- */
- SLIST_ENTRY(xbb_xen_req) links;
+STAILQ_HEAD(xbb_xen_req_list, xbb_xen_req);
+
+typedef enum {
+ XBB_REQLIST_NONE = 0x00,
+ XBB_REQLIST_MAPPED = 0x01
+} xbb_reqlist_flags;
+
+struct xbb_xen_reqlist {
/**
* Back reference to the parent block back instance for this
* request. Used during bio_done handling.
@@ -180,17 +191,71 @@ struct xbb_xen_req {
struct xbb_softc *xbb;
/**
- * The remote domain's identifier for this I/O request.
+ * BLKIF_OP code for this request.
+ */
+ int operation;
+
+ /**
+ * Set to BLKIF_RSP_* to indicate request status.
+ *
+ * This field allows an error status to be recorded even if the
+ * delivery of this status must be deferred. Deferred reporting
+ * is necessary, for example, when an error is detected during
+ * completion processing of one bio when other bios for this
+ * request are still outstanding.
+ */
+ int status;
+
+ /**
+ * Number of 512 byte sectors not transferred.
*/
- uint64_t id;
+ int residual_512b_sectors;
+
+ /**
+ * Starting sector number of the first request in the list.
+ */
+ off_t starting_sector_number;
+
+ /**
+ * If we're going to coalesce, the next contiguous sector would be
+ * this one.
+ */
+ off_t next_contig_sector;
+
+ /**
+ * Number of child requests in the list.
+ */
+ int num_children;
+
+ /**
+ * Number of I/O requests dispatched to the backend.
+ */
+ int pendcnt;
+
+ /**
+ * Total number of segments for requests in the list.
+ */
+ int nr_segments;
+
+ /**
+ * Flags for this particular request list.
+ */
+ xbb_reqlist_flags flags;
/**
* Kernel virtual address space reserved for this request
- * structure and used to map the remote domain's pages for
+ * list structure and used to map the remote domain's pages for
* this I/O, into our domain's address space.
*/
uint8_t *kva;
+ /**
+ * Base, psuedo-physical address, corresponding to the start
+ * of this request's kva region.
+ */
+ uint64_t gnt_base;
+
+
#ifdef XBB_USE_BOUNCE_BUFFERS
/**
* Pre-allocated domain local memory used to proxy remote
@@ -200,53 +265,91 @@ struct xbb_xen_req {
#endif
/**
- * Base, psuedo-physical address, corresponding to the start
- * of this request's kva region.
+ * Array of grant handles (one per page) used to map this request.
*/
- uint64_t gnt_base;
+ grant_handle_t *gnt_handles;
+
+ /**
+ * Device statistics request ordering type (ordered or simple).
+ */
+ devstat_tag_type ds_tag_type;
+
+ /**
+ * Device statistics request type (read, write, no_data).
+ */
+ devstat_trans_flags ds_trans_type;
+
+ /**
+ * The start time for this request.
+ */
+ struct bintime ds_t0;
+
+ /**
+ * Linked list of contiguous requests with the same operation type.
+ */
+ struct xbb_xen_req_list contig_req_list;
+
+ /**
+ * Linked list links used to aggregate idle requests in the
+ * request list free pool (xbb->reqlist_free_stailq) and pending
+ * requests waiting for execution (xbb->reqlist_pending_stailq).
+ */
+ STAILQ_ENTRY(xbb_xen_reqlist) links;
+};
+
+STAILQ_HEAD(xbb_xen_reqlist_list, xbb_xen_reqlist);
+
+/**
+ * \brief Object tracking an in-flight I/O from a Xen VBD consumer.
+ */
+struct xbb_xen_req {
+ /**
+ * Linked list links used to aggregate requests into a reqlist
+ * and to store them in the request free pool.
+ */
+ STAILQ_ENTRY(xbb_xen_req) links;
+
+ /**
+ * The remote domain's identifier for this I/O request.
+ */
+ uint64_t id;
/**
* The number of pages currently mapped for this request.
*/
- int nr_pages;
+ int nr_pages;
/**
* The number of 512 byte sectors comprising this requests.
*/
- int nr_512b_sectors;
+ int nr_512b_sectors;
/**
* The number of struct bio requests still outstanding for this
* request on the backend device. This field is only used for
* device (rather than file) backed I/O.
*/
- int pendcnt;
+ int pendcnt;
/**
* BLKIF_OP code for this request.
*/
- int operation;
+ int operation;
/**
- * BLKIF_RSP status code for this request.
- *
- * This field allows an error status to be recorded even if the
- * delivery of this status must be deferred. Deferred reporting
- * is necessary, for example, when an error is detected during
- * completion processing of one bio when other bios for this
- * request are still outstanding.
+ * Storage used for non-native ring requests.
*/
- int status;
+ blkif_request_t ring_req_storage;
/**
- * Device statistics request ordering type (ordered or simple).
+ * Pointer to the Xen request in the ring.
*/
- devstat_tag_type ds_tag_type;
+ blkif_request_t *ring_req;
/**
- * Device statistics request type (read, write, no_data).
+ * Consumer index for this request.
*/
- devstat_trans_flags ds_trans_type;
+ RING_IDX req_ring_idx;
/**
* The start time for this request.
@@ -254,9 +357,9 @@ struct xbb_xen_req {
struct bintime ds_t0;
/**
- * Array of grant handles (one per page) used to map this request.
+ * Pointer back to our parent request list.
*/
- grant_handle_t *gnt_handles;
+ struct xbb_xen_reqlist *reqlist;
};
SLIST_HEAD(xbb_xen_req_slist, xbb_xen_req);
@@ -321,7 +424,10 @@ typedef enum
XBBF_RESOURCE_SHORTAGE = 0x04,
/** Connection teardown in progress. */
- XBBF_SHUTDOWN = 0x08
+ XBBF_SHUTDOWN = 0x08,
+
+ /** A thread is already performing shutdown processing. */
+ XBBF_IN_SHUTDOWN = 0x10
} xbb_flag_t;
/** Backend device type. */
@@ -399,7 +505,7 @@ struct xbb_file_data {
* Only a single file based request is outstanding per-xbb instance,
* so we only need one of these.
*/
- struct iovec xiovecs[XBB_MAX_SEGMENTS_PER_REQUEST];
+ struct iovec xiovecs[XBB_MAX_SEGMENTS_PER_REQLIST];
#ifdef XBB_USE_BOUNCE_BUFFERS
/**
@@ -411,7 +517,7 @@ struct xbb_file_data {
* bounce-out the read data. This array serves as the temporary
* storage for this saved data.
*/
- struct iovec saved_xiovecs[XBB_MAX_SEGMENTS_PER_REQUEST];
+ struct iovec saved_xiovecs[XBB_MAX_SEGMENTS_PER_REQLIST];
/**
* \brief Array of memoized bounce buffer kva offsets used
@@ -422,7 +528,7 @@ struct xbb_file_data {
* the request sg elements is unavoidable. We memoize the computed
* bounce address here to reduce the cost of the second walk.
*/
- void *xiovecs_vaddr[XBB_MAX_SEGMENTS_PER_REQUEST];
+ void *xiovecs_vaddr[XBB_MAX_SEGMENTS_PER_REQLIST];
#endif /* XBB_USE_BOUNCE_BUFFERS */
};
@@ -437,9 +543,9 @@ union xbb_backend_data {
/**
* Function signature of backend specific I/O handlers.
*/
-typedef int (*xbb_dispatch_t)(struct xbb_softc *xbb, blkif_request_t *ring_req,
- struct xbb_xen_req *req, int nseg,
- int operation, int flags);
+typedef int (*xbb_dispatch_t)(struct xbb_softc *xbb,
+ struct xbb_xen_reqlist *reqlist, int operation,
+ int flags);
/**
* Per-instance configuration data.
@@ -467,14 +573,23 @@ struct xbb_softc {
xbb_dispatch_t dispatch_io;
/** The number of requests outstanding on the backend device/file. */
- u_int active_request_count;
+ int active_request_count;
/** Free pool of request tracking structures. */
- struct xbb_xen_req_slist request_free_slist;
+ struct xbb_xen_req_list request_free_stailq;
/** Array, sized at connection time, of request tracking structures. */
struct xbb_xen_req *requests;
+ /** Free pool of request list structures. */
+ struct xbb_xen_reqlist_list reqlist_free_stailq;
+
+ /** List of pending request lists awaiting execution. */
+ struct xbb_xen_reqlist_list reqlist_pending_stailq;
+
+ /** Array, sized at connection time, of request list structures. */
+ struct xbb_xen_reqlist *request_lists;
+
/**
* Global pool of kva used for mapping remote domain ring
* and I/O transaction data.
@@ -487,6 +602,15 @@ struct xbb_softc {
/** The size of the global kva pool. */
int kva_size;
+ /** The size of the KVA area used for request lists. */
+ int reqlist_kva_size;
+
+ /** The number of pages of KVA used for request lists */
+ int reqlist_kva_pages;
+
+ /** Bitmap of free KVA pages */
+ bitstr_t *kva_free;
+
/**
* \brief Cached value of the front-end's domain id.
*
@@ -508,12 +632,12 @@ struct xbb_softc {
int abi;
/**
- * \brief The maximum number of requests allowed to be in
- * flight at a time.
+ * \brief The maximum number of requests and request lists allowed
+ * to be in flight at a time.
*
* This value is negotiated via the XenStore.
*/
- uint32_t max_requests;
+ u_int max_requests;
/**
* \brief The maximum number of segments (1 page per segment)
@@ -521,7 +645,15 @@ struct xbb_softc {
*
* This value is negotiated via the XenStore.
*/
- uint32_t max_request_segments;
+ u_int max_request_segments;
+
+ /**
+ * \brief Maximum number of segments per request list.
+ *
+ * This value is derived from and will generally be larger than
+ * max_request_segments.
+ */
+ u_int max_reqlist_segments;
/**
* The maximum size of any request to this back-end
@@ -529,7 +661,13 @@ struct xbb_softc {
*
* This value is negotiated via the XenStore.
*/
- uint32_t max_request_size;
+ u_int max_request_size;
+
+ /**
+ * The maximum size of any request list. This is derived directly
+ * from max_reqlist_segments.
+ */
+ u_int max_reqlist_size;
/** Various configuration and state bit flags. */
xbb_flag_t flags;
@@ -574,6 +712,7 @@ struct xbb_softc {
struct vnode *vn;
union xbb_backend_data backend;
+
/** The native sector size of the backend. */
u_int sector_size;
@@ -598,7 +737,14 @@ struct xbb_softc {
*
* Ring processing is serialized so we only need one of these.
*/
- struct xbb_sg xbb_sgs[XBB_MAX_SEGMENTS_PER_REQUEST];
+ struct xbb_sg xbb_sgs[XBB_MAX_SEGMENTS_PER_REQLIST];
+
+ /**
+ * Temporary grant table map used in xbb_dispatch_io(). When
+ * XBB_MAX_SEGMENTS_PER_REQLIST gets large, keeping this on the
+ * stack could cause a stack overflow.
+ */
+ struct gnttab_map_grant_ref maps[XBB_MAX_SEGMENTS_PER_REQLIST];
/** Mutex protecting per-instance data. */
struct mtx lock;
@@ -614,8 +760,51 @@ struct xbb_softc {
int pseudo_phys_res_id;
#endif
- /** I/O statistics. */
+ /**
+ * I/O statistics from BlockBack dispatch down. These are
+ * coalesced requests, and we start them right before execution.
+ */
struct devstat *xbb_stats;
+
+ /**
+ * I/O statistics coming into BlockBack. These are the requests as
+ * we get them from BlockFront. They are started as soon as we
+ * receive a request, and completed when the I/O is complete.
+ */
+ struct devstat *xbb_stats_in;
+
+ /** Disable sending flush to the backend */
+ int disable_flush;
+
+ /** Send a real flush for every N flush requests */
+ int flush_interval;
+
+ /** Count of flush requests in the interval */
+ int flush_count;
+
+ /** Don't coalesce requests if this is set */
+ int no_coalesce_reqs;
+
+ /** Number of requests we have received */
+ uint64_t reqs_received;
+
+ /** Number of requests we have completed*/
+ uint64_t reqs_completed;
+
+ /** How many forced dispatches (i.e. without coalescing) have happend */
+ uint64_t forced_dispatch;
+
+ /** How many normal dispatches have happend */
+ uint64_t normal_dispatch;
+
+ /** How many total dispatches have happend */
+ uint64_t total_dispatch;
+
+ /** How many times we have run out of KVA */
+ uint64_t kva_shortages;
+
+ /** How many times we have run out of request structures */
+ uint64_t request_shortages;
};
/*---------------------------- Request Processing ----------------------------*/
@@ -633,21 +822,14 @@ xbb_get_req(struct xbb_softc *xbb)
struct xbb_xen_req *req;
req = NULL;
- mtx_lock(&xbb->lock);
- /*
- * Do not allow new requests to be allocated while we
- * are shutting down.
- */
- if ((xbb->flags & XBBF_SHUTDOWN) == 0) {
- if ((req = SLIST_FIRST(&xbb->request_free_slist)) != NULL) {
- SLIST_REMOVE_HEAD(&xbb->request_free_slist, links);
- xbb->active_request_count++;
- } else {
- xbb->flags |= XBBF_RESOURCE_SHORTAGE;
- }
+ mtx_assert(&xbb->lock, MA_OWNED);
+
+ if ((req = STAILQ_FIRST(&xbb->request_free_stailq)) != NULL) {
+ STAILQ_REMOVE_HEAD(&xbb->request_free_stailq, links);
+ xbb->active_request_count++;
}
- mtx_unlock(&xbb->lock);
+
return (req);
}
@@ -660,34 +842,40 @@ xbb_get_req(struct xbb_softc *xbb)
static inline void
xbb_release_req(struct xbb_softc *xbb, struct xbb_xen_req *req)
{
- int wake_thread;
+ mtx_assert(&xbb->lock, MA_OWNED);
- mtx_lock(&xbb->lock);
- wake_thread = xbb->flags & XBBF_RESOURCE_SHORTAGE;
- xbb->flags &= ~XBBF_RESOURCE_SHORTAGE;
- SLIST_INSERT_HEAD(&xbb->request_free_slist, req, links);
+ STAILQ_INSERT_HEAD(&xbb->request_free_stailq, req, links);
xbb->active_request_count--;
- if ((xbb->flags & XBBF_SHUTDOWN) != 0) {
- /*
- * Shutdown is in progress. See if we can
- * progress further now that one more request
- * has completed and been returned to the
- * free pool.
- */
- xbb_shutdown(xbb);
- }
- mtx_unlock(&xbb->lock);
+ KASSERT(xbb->active_request_count >= 0,
+ ("xbb_release_req: negative active count"));
+}
- if (wake_thread != 0)
- taskqueue_enqueue(xbb->io_taskqueue, &xbb->io_task);
+/**
+ * Return an xbb_xen_req_list of allocated xbb_xen_reqs to the free pool.
+ *
+ * \param xbb Per-instance xbb configuration structure.
+ * \param req_list The list of requests to free.
+ * \param nreqs The number of items in the list.
+ */
+static inline void
+xbb_release_reqs(struct xbb_softc *xbb, struct xbb_xen_req_list *req_list,
+ int nreqs)
+{
+ mtx_assert(&xbb->lock, MA_OWNED);
+
+ STAILQ_CONCAT(&xbb->request_free_stailq, req_list);
+ xbb->active_request_count -= nreqs;
+
+ KASSERT(xbb->active_request_count >= 0,
+ ("xbb_release_reqs: negative active count"));
}
/**
* Given a page index and 512b sector offset within that page,
* calculate an offset into a request's kva region.
*
- * \param req The request structure whose kva region will be accessed.
+ * \param reqlist The request structure whose kva region will be accessed.
* \param pagenr The page index used to compute the kva offset.
* \param sector The 512b sector index used to compute the page relative
* kva offset.
@@ -695,9 +883,9 @@ xbb_release_req(struct xbb_softc *xbb, struct xbb_xen_req *req)
* \return The computed global KVA offset.
*/
static inline uint8_t *
-xbb_req_vaddr(struct xbb_xen_req *req, int pagenr, int sector)
+xbb_reqlist_vaddr(struct xbb_xen_reqlist *reqlist, int pagenr, int sector)
{
- return (req->kva + (PAGE_SIZE * pagenr) + (sector << 9));
+ return (reqlist->kva + (PAGE_SIZE * pagenr) + (sector << 9));
}
#ifdef XBB_USE_BOUNCE_BUFFERS
@@ -705,7 +893,7 @@ xbb_req_vaddr(struct xbb_xen_req *req, int pagenr, int sector)
* Given a page index and 512b sector offset within that page,
* calculate an offset into a request's local bounce memory region.
*
- * \param req The request structure whose bounce region will be accessed.
+ * \param reqlist The request structure whose bounce region will be accessed.
* \param pagenr The page index used to compute the bounce offset.
* \param sector The 512b sector index used to compute the page relative
* bounce offset.
@@ -713,9 +901,9 @@ xbb_req_vaddr(struct xbb_xen_req *req, int pagenr, int sector)
* \return The computed global bounce buffer address.
*/
static inline uint8_t *
-xbb_req_bounce_addr(struct xbb_xen_req *req, int pagenr, int sector)
+xbb_reqlist_bounce_addr(struct xbb_xen_reqlist *reqlist, int pagenr, int sector)
{
- return (req->bounce + (PAGE_SIZE * pagenr) + (sector << 9));
+ return (reqlist->bounce + (PAGE_SIZE * pagenr) + (sector << 9));
}
#endif
@@ -724,7 +912,7 @@ xbb_req_bounce_addr(struct xbb_xen_req *req, int pagenr, int sector)
* calculate an offset into the request's memory region that the
* underlying backend device/file should use for I/O.
*
- * \param req The request structure whose I/O region will be accessed.
+ * \param reqlist The request structure whose I/O region will be accessed.
* \param pagenr The page index used to compute the I/O offset.
* \param sector The 512b sector index used to compute the page relative
* I/O offset.
@@ -736,12 +924,12 @@ xbb_req_bounce_addr(struct xbb_xen_req *req, int pagenr, int sector)
* this request.
*/
static inline uint8_t *
-xbb_req_ioaddr(struct xbb_xen_req *req, int pagenr, int sector)
+xbb_reqlist_ioaddr(struct xbb_xen_reqlist *reqlist, int pagenr, int sector)
{
#ifdef XBB_USE_BOUNCE_BUFFERS
- return (xbb_req_bounce_addr(req, pagenr, sector));
+ return (xbb_reqlist_bounce_addr(reqlist, pagenr, sector));
#else
- return (xbb_req_vaddr(req, pagenr, sector));
+ return (xbb_reqlist_vaddr(reqlist, pagenr, sector));
#endif
}
@@ -750,7 +938,7 @@ xbb_req_ioaddr(struct xbb_xen_req *req, int pagenr, int sector)
* an offset into the local psuedo-physical address space used to map a
* front-end's request data into a request.
*
- * \param req The request structure whose pseudo-physical region
+ * \param reqlist The request list structure whose pseudo-physical region
* will be accessed.
* \param pagenr The page index used to compute the pseudo-physical offset.
* \param sector The 512b sector index used to compute the page relative
@@ -763,10 +951,126 @@ xbb_req_ioaddr(struct xbb_xen_req *req, int pagenr, int sector)
* this request.
*/
static inline uintptr_t
-xbb_req_gntaddr(struct xbb_xen_req *req, int pagenr, int sector)
+xbb_get_gntaddr(struct xbb_xen_reqlist *reqlist, int pagenr, int sector)
{
- return ((uintptr_t)(req->gnt_base
- + (PAGE_SIZE * pagenr) + (sector << 9)));
+ struct xbb_softc *xbb;
+
+ xbb = reqlist->xbb;
+
+ return ((uintptr_t)(xbb->gnt_base_addr +
+ (uintptr_t)(reqlist->kva - xbb->kva) +
+ (PAGE_SIZE * pagenr) + (sector << 9)));
+}
+
+/**
+ * Get Kernel Virtual Address space for mapping requests.
+ *
+ * \param xbb Per-instance xbb configuration structure.
+ * \param nr_pages Number of pages needed.
+ * \param check_only If set, check for free KVA but don't allocate it.
+ * \param have_lock If set, xbb lock is already held.
+ *
+ * \return On success, a pointer to the allocated KVA region. Otherwise NULL.
+ *
+ * Note: This should be unnecessary once we have either chaining or
+ * scatter/gather support for struct bio. At that point we'll be able to
+ * put multiple addresses and lengths in one bio/bio chain and won't need
+ * to map everything into one virtual segment.
+ */
+static uint8_t *
+xbb_get_kva(struct xbb_softc *xbb, int nr_pages)
+{
+ intptr_t first_clear, num_clear;
+ uint8_t *free_kva;
+ int i;
+
+ KASSERT(nr_pages != 0, ("xbb_get_kva of zero length"));
+
+ first_clear = 0;
+ free_kva = NULL;
+
+ mtx_lock(&xbb->lock);
+
+ /*
+ * Look for the first available page. If there are none, we're done.
+ */
+ bit_ffc(xbb->kva_free, xbb->reqlist_kva_pages, &first_clear);
+
+ if (first_clear == -1)
+ goto bailout;
+
+ /*
+ * Starting at the first available page, look for consecutive free
+ * pages that will satisfy the user's request.
+ */
+ for (i = first_clear, num_clear = 0; i < xbb->reqlist_kva_pages; i++) {
+ /*
+ * If this is true, the page is used, so we have to reset
+ * the number of clear pages and the first clear page
+ * (since it pointed to a region with an insufficient number
+ * of clear pages).
+ */
+ if (bit_test(xbb->kva_free, i)) {
+ num_clear = 0;
+ first_clear = -1;
+ continue;
+ }
+
+ if (first_clear == -1)
+ first_clear = i;
+
+ /*
+ * If this is true, we've found a large enough free region
+ * to satisfy the request.
+ */
+ if (++num_clear == nr_pages) {
+
+ bit_nset(xbb->kva_free, first_clear,
+ first_clear + nr_pages - 1);
+
+ free_kva = xbb->kva +
+ (uint8_t *)(first_clear * PAGE_SIZE);
+
+ KASSERT(free_kva >= (uint8_t *)xbb->kva &&
+ free_kva + (nr_pages * PAGE_SIZE) <=
+ (uint8_t *)xbb->ring_config.va,
+ ("Free KVA %p len %d out of range, "
+ "kva = %#jx, ring VA = %#jx\n", free_kva,
+ nr_pages * PAGE_SIZE, (uintmax_t)xbb->kva,
+ (uintmax_t)xbb->ring_config.va));
+ break;
+ }
+ }
+
+bailout:
+
+ if (free_kva == NULL) {
+ xbb->flags |= XBBF_RESOURCE_SHORTAGE;
+ xbb->kva_shortages++;
+ }
+
+ mtx_unlock(&xbb->lock);
+
+ return (free_kva);
+}
+
+/**
+ * Free allocated KVA.
+ *
+ * \param xbb Per-instance xbb configuration structure.
+ * \param kva_ptr Pointer to allocated KVA region.
+ * \param nr_pages Number of pages in the KVA region.
+ */
+static void
+xbb_free_kva(struct xbb_softc *xbb, uint8_t *kva_ptr, int nr_pages)
+{
+ intptr_t start_page;
+
+ mtx_assert(&xbb->lock, MA_OWNED);
+
+ start_page = (intptr_t)(kva_ptr - xbb->kva) >> PAGE_SHIFT;
+ bit_nclear(xbb->kva_free, start_page, start_page + nr_pages - 1);
+
}
/**
@@ -775,23 +1079,23 @@ xbb_req_gntaddr(struct xbb_xen_req *req, int pagenr, int sector)
* \param req The request structure to unmap.
*/
static void
-xbb_unmap_req(struct xbb_xen_req *req)
+xbb_unmap_reqlist(struct xbb_xen_reqlist *reqlist)
{
- struct gnttab_unmap_grant_ref unmap[XBB_MAX_SEGMENTS_PER_REQUEST];
+ struct gnttab_unmap_grant_ref unmap[XBB_MAX_SEGMENTS_PER_REQLIST];
u_int i;
u_int invcount;
int error;
invcount = 0;
- for (i = 0; i < req->nr_pages; i++) {
+ for (i = 0; i < reqlist->nr_segments; i++) {
- if (req->gnt_handles[i] == GRANT_REF_INVALID)
+ if (reqlist->gnt_handles[i] == GRANT_REF_INVALID)
continue;
- unmap[invcount].host_addr = xbb_req_gntaddr(req, i, 0);
+ unmap[invcount].host_addr = xbb_get_gntaddr(reqlist, i, 0);
unmap[invcount].dev_bus_addr = 0;
- unmap[invcount].handle = req->gnt_handles[i];
- req->gnt_handles[i] = GRANT_REF_INVALID;
+ unmap[invcount].handle = reqlist->gnt_handles[i];
+ reqlist->gnt_handles[i] = GRANT_REF_INVALID;
invcount++;
}
@@ -801,6 +1105,175 @@ xbb_unmap_req(struct xbb_xen_req *req)
}
/**
+ * Allocate an internal transaction tracking structure from the free pool.
+ *
+ * \param xbb Per-instance xbb configuration structure.
+ *
+ * \return On success, a pointer to the allocated xbb_xen_reqlist structure.
+ * Otherwise NULL.
+ */
+static inline struct xbb_xen_reqlist *
+xbb_get_reqlist(struct xbb_softc *xbb)
+{
+ struct xbb_xen_reqlist *reqlist;
+
+ reqlist = NULL;
+
+ mtx_assert(&xbb->lock, MA_OWNED);
+
+ if ((reqlist = STAILQ_FIRST(&xbb->reqlist_free_stailq)) != NULL) {
+
+ STAILQ_REMOVE_HEAD(&xbb->reqlist_free_stailq, links);
+ reqlist->flags = XBB_REQLIST_NONE;
+ reqlist->kva = NULL;
+ reqlist->status = BLKIF_RSP_OKAY;
+ reqlist->residual_512b_sectors = 0;
+ reqlist->num_children = 0;
+ reqlist->nr_segments = 0;
+ STAILQ_INIT(&reqlist->contig_req_list);
+ }
+
+ return (reqlist);
+}
+
+/**
+ * Return an allocated transaction tracking structure to the free pool.
+ *
+ * \param xbb Per-instance xbb configuration structure.
+ * \param req The request list structure to free.
+ * \param wakeup If set, wakeup the work thread if freeing this reqlist
+ * during a resource shortage condition.
+ */
+static inline void
+xbb_release_reqlist(struct xbb_softc *xbb, struct xbb_xen_reqlist *reqlist,
+ int wakeup)
+{
+
+ mtx_lock(&xbb->lock);
+
+ if (wakeup) {
+ wakeup = xbb->flags & XBBF_RESOURCE_SHORTAGE;
+ xbb->flags &= ~XBBF_RESOURCE_SHORTAGE;
+ }
+
+ if (reqlist->kva != NULL)
+ xbb_free_kva(xbb, reqlist->kva, reqlist->nr_segments);
+
+ xbb_release_reqs(xbb, &reqlist->contig_req_list, reqlist->num_children);
+
+ STAILQ_INSERT_TAIL(&xbb->reqlist_free_stailq, reqlist, links);
+
+ if ((xbb->flags & XBBF_SHUTDOWN) != 0) {
+ /*
+ * Shutdown is in progress. See if we can
+ * progress further now that one more request
+ * has completed and been returned to the
+ * free pool.
+ */
+ xbb_shutdown(xbb);
+ }
+
+ mtx_unlock(&xbb->lock);
+
+ if (wakeup != 0)
+ taskqueue_enqueue(xbb->io_taskqueue, &xbb->io_task);
+}
+
+/**
+ * Request resources and do basic request setup.
+ *
+ * \param xbb Per-instance xbb configuration structure.
+ * \param reqlist Pointer to reqlist pointer.
+ * \param ring_req Pointer to a block ring request.
+ * \param ring_index The ring index of this request.
+ *
+ * \return 0 for success, non-zero for failure.
+ */
+static int
+xbb_get_resources(struct xbb_softc *xbb, struct xbb_xen_reqlist **reqlist,
+ blkif_request_t *ring_req, RING_IDX ring_idx)
+{
+ struct xbb_xen_reqlist *nreqlist;
+ struct xbb_xen_req *nreq;
+
+ nreqlist = NULL;
+ nreq = NULL;
+
+ mtx_lock(&xbb->lock);
+
+ /*
+ * We don't allow new resources to be allocated if we're in the
+ * process of shutting down.
+ */
+ if ((xbb->flags & XBBF_SHUTDOWN) != 0) {
+ mtx_unlock(&xbb->lock);
+ return (1);
+ }
+
+ /*
+ * Allocate a reqlist if the caller doesn't have one already.
+ */
+ if (*reqlist == NULL) {
+ nreqlist = xbb_get_reqlist(xbb);
+ if (nreqlist == NULL)
+ goto bailout_error;
+ }
+
+ /* We always allocate a request. */
+ nreq = xbb_get_req(xbb);
+ if (nreq == NULL)
+ goto bailout_error;
+
+ mtx_unlock(&xbb->lock);
+
+ if (*reqlist == NULL) {
+ *reqlist = nreqlist;
+ nreqlist->operation = ring_req->operation;
+ nreqlist->starting_sector_number = ring_req->sector_number;
+ STAILQ_INSERT_TAIL(&xbb->reqlist_pending_stailq, nreqlist,
+ links);
+ }
+
+ nreq->reqlist = *reqlist;
+ nreq->req_ring_idx = ring_idx;
+
+ if (xbb->abi != BLKIF_PROTOCOL_NATIVE) {
+ bcopy(ring_req, &nreq->ring_req_storage, sizeof(*ring_req));
+ nreq->ring_req = &nreq->ring_req_storage;
+ } else {
+ nreq->ring_req = ring_req;
+ }
+
+ binuptime(&nreq->ds_t0);
+ devstat_start_transaction(xbb->xbb_stats_in, &nreq->ds_t0);
+ STAILQ_INSERT_TAIL(&(*reqlist)->contig_req_list, nreq, links);
+ (*reqlist)->num_children++;
+ (*reqlist)->nr_segments += ring_req->nr_segments;
+
+ return (0);
+
+bailout_error:
+
+ /*
+ * We're out of resources, so set the shortage flag. The next time
+ * a request is released, we'll try waking up the work thread to
+ * see if we can allocate more resources.
+ */
+ xbb->flags |= XBBF_RESOURCE_SHORTAGE;
+ xbb->request_shortages++;
+
+ if (nreq != NULL)
+ xbb_release_req(xbb, nreq);
+
+ mtx_unlock(&xbb->lock);
+
+ if (nreqlist != NULL)
+ xbb_release_reqlist(xbb, nreqlist, /*wakeup*/ 0);
+
+ return (1);
+}
+
+/**
* Create and transmit a response to a blkif request.
*
* \param xbb Per-instance xbb configuration structure.
@@ -862,6 +1335,8 @@ xbb_send_response(struct xbb_softc *xbb, struct xbb_xen_req *req, int status)
more_to_do = 1;
}
+ xbb->reqs_completed++;
+
mtx_unlock(&xbb->lock);
if (more_to_do)
@@ -872,6 +1347,70 @@ xbb_send_response(struct xbb_softc *xbb, struct xbb_xen_req *req, int status)
}
/**
+ * Complete a request list.
+ *
+ * \param xbb Per-instance xbb configuration structure.
+ * \param reqlist Allocated internal request list structure.
+ */
+static void
+xbb_complete_reqlist(struct xbb_softc *xbb, struct xbb_xen_reqlist *reqlist)
+{
+ struct xbb_xen_req *nreq;
+ off_t sectors_sent;
+
+ sectors_sent = 0;
+
+ if (reqlist->flags & XBB_REQLIST_MAPPED)
+ xbb_unmap_reqlist(reqlist);
+
+ /*
+ * All I/O is done, send the response. A lock should not be
+ * necessary here because the request list is complete, and
+ * therefore this is the only context accessing this request
+ * right now. The functions we call do their own locking if
+ * necessary.
+ */
+ STAILQ_FOREACH(nreq, &reqlist->contig_req_list, links) {
+ off_t cur_sectors_sent;
+
+ xbb_send_response(xbb, nreq, reqlist->status);
+
+ /* We don't report bytes sent if there is an error. */
+ if (reqlist->status == BLKIF_RSP_OKAY)
+ cur_sectors_sent = nreq->nr_512b_sectors;
+ else
+ cur_sectors_sent = 0;
+
+ sectors_sent += cur_sectors_sent;
+
+ devstat_end_transaction(xbb->xbb_stats_in,
+ /*bytes*/cur_sectors_sent << 9,
+ reqlist->ds_tag_type,
+ reqlist->ds_trans_type,
+ /*now*/NULL,
+ /*then*/&nreq->ds_t0);
+ }
+
+ /*
+ * Take out any sectors not sent. If we wind up negative (which
+ * might happen if an error is reported as well as a residual), just
+ * report 0 sectors sent.
+ */
+ sectors_sent -= reqlist->residual_512b_sectors;
+ if (sectors_sent < 0)
+ sectors_sent = 0;
+
+ devstat_end_transaction(xbb->xbb_stats,
+ /*bytes*/ sectors_sent << 9,
+ reqlist->ds_tag_type,
+ reqlist->ds_trans_type,
+ /*now*/NULL,
+ /*then*/&reqlist->ds_t0);
+
+ xbb_release_reqlist(xbb, reqlist, /*wakeup*/ 1);
+}
+
+/**
* Completion handler for buffer I/O requests issued by the device
* backend driver.
*
@@ -881,18 +1420,34 @@ xbb_send_response(struct xbb_softc *xbb, struct xbb_xen_req *req, int status)
static void
xbb_bio_done(struct bio *bio)
{
- struct xbb_softc *xbb;
- struct xbb_xen_req *req;
+ struct xbb_softc *xbb;
+ struct xbb_xen_reqlist *reqlist;
+
+ reqlist = bio->bio_caller1;
+ xbb = reqlist->xbb;
- req = bio->bio_caller1;
- xbb = req->xbb;
+ reqlist->residual_512b_sectors += bio->bio_resid >> 9;
- /* Only include transferred I/O in stats. */
- req->nr_512b_sectors -= bio->bio_resid >> 9;
+ /*
+ * This is a bit imprecise. With aggregated I/O a single
+ * request list can contain multiple front-end requests and
+ * a multiple bios may point to a single request. By carefully
+ * walking the request list, we could map residuals and errors
+ * back to the original front-end request, but the interface
+ * isn't sufficiently rich for us to properly report the error.
+ * So, we just treat the entire request list as having failed if an
+ * error occurs on any part. And, if an error occurs, we treat
+ * the amount of data transferred as 0.
+ *
+ * For residuals, we report it on the overall aggregated device,
+ * but not on the individual requests, since we don't currently
+ * do the work to determine which front-end request to which the
+ * residual applies.
+ */
if (bio->bio_error) {
DPRINTF("BIO returned error %d for operation on device %s\n",
bio->bio_error, xbb->dev_name);
- req->status = BLKIF_RSP_ERROR;
+ reqlist->status = BLKIF_RSP_ERROR;
if (bio->bio_error == ENXIO
&& xenbus_get_state(xbb->dev) == XenbusStateConnected) {
@@ -911,23 +1466,18 @@ xbb_bio_done(struct bio *bio)
vm_offset_t kva_offset;
kva_offset = (vm_offset_t)bio->bio_data
- - (vm_offset_t)req->bounce;
- memcpy((uint8_t *)req->kva + kva_offset,
+ - (vm_offset_t)reqlist->bounce;
+ memcpy((uint8_t *)reqlist->kva + kva_offset,
bio->bio_data, bio->bio_bcount);
}
#endif /* XBB_USE_BOUNCE_BUFFERS */
- if (atomic_fetchadd_int(&req->pendcnt, -1) == 1) {
- xbb_unmap_req(req);
- xbb_send_response(xbb, req, req->status);
- devstat_end_transaction(xbb->xbb_stats,
- /*bytes*/req->nr_512b_sectors << 9,
- req->ds_tag_type,
- req->ds_trans_type,
- /*now*/NULL,
- /*then*/&req->ds_t0);
- xbb_release_req(xbb, req);
- }
+ /*
+ * Decrement the pending count for the request list. When we're
+ * done with the requests, send status back for all of them.
+ */
+ if (atomic_fetchadd_int(&reqlist->pendcnt, -1) == 1)
+ xbb_complete_reqlist(xbb, reqlist);
g_destroy_bio(bio);
}
@@ -936,228 +1486,315 @@ xbb_bio_done(struct bio *bio)
* Parse a blkif request into an internal request structure and send
* it to the backend for processing.
*
- * \param xbb Per-instance xbb configuration structure.
- * \param ring_req Front-end's I/O request as pulled from the shared
- * communication ring.
- * \param req Allocated internal request structure.
- * \param req_ring_idx The location of ring_req within the shared
- * communication ring.
+ * \param xbb Per-instance xbb configuration structure.
+ * \param reqlist Allocated internal request list structure.
*
+ * \return On success, 0. For resource shortages, non-zero.
+ *
* This routine performs the backend common aspects of request parsing
* including compiling an internal request structure, parsing the S/G
* list and any secondary ring requests in which they may reside, and
* the mapping of front-end I/O pages into our domain.
*/
-static void
-xbb_dispatch_io(struct xbb_softc *xbb, blkif_request_t *ring_req,
- struct xbb_xen_req *req, RING_IDX req_ring_idx)
+static int
+xbb_dispatch_io(struct xbb_softc *xbb, struct xbb_xen_reqlist *reqlist)
{
- struct gnttab_map_grant_ref maps[XBB_MAX_SEGMENTS_PER_REQUEST];
struct xbb_sg *xbb_sg;
struct gnttab_map_grant_ref *map;
struct blkif_request_segment *sg;
struct blkif_request_segment *last_block_sg;
+ struct xbb_xen_req *nreq;
u_int nseg;
u_int seg_idx;
u_int block_segs;
int nr_sects;
+ int total_sects;
int operation;
uint8_t bio_flags;
int error;
- nseg = ring_req->nr_segments;
- nr_sects = 0;
- req->xbb = xbb;
- req->id = ring_req->id;
- req->operation = ring_req->operation;
- req->status = BLKIF_RSP_OKAY;
- req->ds_tag_type = DEVSTAT_TAG_SIMPLE;
- req->nr_pages = nseg;
- req->nr_512b_sectors = 0;
+ reqlist->ds_tag_type = DEVSTAT_TAG_SIMPLE;
bio_flags = 0;
- sg = NULL;
+ total_sects = 0;
+ nr_sects = 0;
+
+ /*
+ * First determine whether we have enough free KVA to satisfy this
+ * request list. If not, tell xbb_run_queue() so it can go to
+ * sleep until we have more KVA.
+ */
+ reqlist->kva = NULL;
+ if (reqlist->nr_segments != 0) {
+ reqlist->kva = xbb_get_kva(xbb, reqlist->nr_segments);
+ if (reqlist->kva == NULL) {
+ /*
+ * If we're out of KVA, return ENOMEM.
+ */
+ return (ENOMEM);
+ }
+ }
- binuptime(&req->ds_t0);
- devstat_start_transaction(xbb->xbb_stats, &req->ds_t0);
+ binuptime(&reqlist->ds_t0);
+ devstat_start_transaction(xbb->xbb_stats, &reqlist->ds_t0);
- switch (req->operation) {
+ switch (reqlist->operation) {
case BLKIF_OP_WRITE_BARRIER:
bio_flags |= BIO_ORDERED;
- req->ds_tag_type = DEVSTAT_TAG_ORDERED;
+ reqlist->ds_tag_type = DEVSTAT_TAG_ORDERED;
/* FALLTHROUGH */
case BLKIF_OP_WRITE:
operation = BIO_WRITE;
- req->ds_trans_type = DEVSTAT_WRITE;
+ reqlist->ds_trans_type = DEVSTAT_WRITE;
if ((xbb->flags & XBBF_READ_ONLY) != 0) {
DPRINTF("Attempt to write to read only device %s\n",
xbb->dev_name);
- goto fail_send_response;
+ reqlist->status = BLKIF_RSP_ERROR;
+ goto send_response;
}
break;
case BLKIF_OP_READ:
operation = BIO_READ;
- req->ds_trans_type = DEVSTAT_READ;
+ reqlist->ds_trans_type = DEVSTAT_READ;
break;
case BLKIF_OP_FLUSH_DISKCACHE:
+ /*
+ * If this is true, the user has requested that we disable
+ * flush support. So we just complete the requests
+ * successfully.
+ */
+ if (xbb->disable_flush != 0) {
+ goto send_response;
+ }
+
+ /*
+ * The user has requested that we only send a real flush
+ * for every N flush requests. So keep count, and either
+ * complete the request immediately or queue it for the
+ * backend.
+ */
+ if (xbb->flush_interval != 0) {
+ if (++(xbb->flush_count) < xbb->flush_interval) {
+ goto send_response;
+ } else
+ xbb->flush_count = 0;
+ }
+
operation = BIO_FLUSH;
- req->ds_tag_type = DEVSTAT_TAG_ORDERED;
- req->ds_trans_type = DEVSTAT_NO_DATA;
+ reqlist->ds_tag_type = DEVSTAT_TAG_ORDERED;
+ reqlist->ds_trans_type = DEVSTAT_NO_DATA;
goto do_dispatch;
/*NOTREACHED*/
default:
DPRINTF("error: unknown block io operation [%d]\n",
- req->operation);
- goto fail_send_response;
+ reqlist->operation);
+ reqlist->status = BLKIF_RSP_ERROR;
+ goto send_response;
}
- /* Check that number of segments is sane. */
- if (unlikely(nseg == 0)
- || unlikely(nseg > xbb->max_request_segments)) {
- DPRINTF("Bad number of segments in request (%d)\n", nseg);
- goto fail_send_response;
- }
-
- map = maps;
+ reqlist->xbb = xbb;
xbb_sg = xbb->xbb_sgs;
- block_segs = MIN(req->nr_pages, BLKIF_MAX_SEGMENTS_PER_HEADER_BLOCK);
- sg = ring_req->seg;
- last_block_sg = sg + block_segs;
+ map = xbb->maps;
seg_idx = 0;
- while (1) {
- while (sg < last_block_sg) {
-
- xbb_sg->first_sect = sg->first_sect;
- xbb_sg->last_sect = sg->last_sect;
- xbb_sg->nsect =
- (int8_t)(sg->last_sect - sg->first_sect + 1);
-
- if ((sg->last_sect >= (PAGE_SIZE >> 9))
- || (xbb_sg->nsect <= 0))
- goto fail_send_response;
-
- nr_sects += xbb_sg->nsect;
- map->host_addr = xbb_req_gntaddr(req, seg_idx,
- /*sector*/0);
- map->flags = GNTMAP_host_map;
- map->ref = sg->gref;
- map->dom = xbb->otherend_id;
- if (operation == BIO_WRITE)
- map->flags |= GNTMAP_readonly;
- sg++;
- map++;
- xbb_sg++;
- seg_idx++;
+ STAILQ_FOREACH(nreq, &reqlist->contig_req_list, links) {
+ blkif_request_t *ring_req;
+ RING_IDX req_ring_idx;
+ u_int req_seg_idx;
+
+ ring_req = nreq->ring_req;
+ req_ring_idx = nreq->req_ring_idx;
+ nr_sects = 0;
+ nseg = ring_req->nr_segments;
+ nreq->id = ring_req->id;
+ nreq->nr_pages = nseg;
+ nreq->nr_512b_sectors = 0;
+ req_seg_idx = 0;
+ sg = NULL;
+
+ /* Check that number of segments is sane. */
+ if (unlikely(nseg == 0)
+ || unlikely(nseg > xbb->max_request_segments)) {
+ DPRINTF("Bad number of segments in request (%d)\n",
+ nseg);
+ reqlist->status = BLKIF_RSP_ERROR;
+ goto send_response;
}
- block_segs = MIN(nseg - seg_idx,
- BLKIF_MAX_SEGMENTS_PER_SEGMENT_BLOCK);
- if (block_segs == 0)
- break;
-
- /*
- * Fetch the next request block full of SG elements.
- * For now, only the spacing between entries is different
- * in the different ABIs, not the sg entry layout.
- */
- req_ring_idx++;
- switch (xbb->abi) {
- case BLKIF_PROTOCOL_NATIVE:
- sg = BLKRING_GET_SG_REQUEST(&xbb->rings.native,
- req_ring_idx);
- break;
- case BLKIF_PROTOCOL_X86_32:
- {
- sg = BLKRING_GET_SG_REQUEST(&xbb->rings.x86_32,
- req_ring_idx);
- break;
- }
- case BLKIF_PROTOCOL_X86_64:
- {
- sg = BLKRING_GET_SG_REQUEST(&xbb->rings.x86_64,
- req_ring_idx);
- break;
- }
- default:
- panic("Unexpected blkif protocol ABI.");
- /* NOTREACHED */
- }
+ block_segs = MIN(nreq->nr_pages,
+ BLKIF_MAX_SEGMENTS_PER_HEADER_BLOCK);
+ sg = ring_req->seg;
last_block_sg = sg + block_segs;
- }
+ while (1) {
+
+ while (sg < last_block_sg) {
+ KASSERT(seg_idx <
+ XBB_MAX_SEGMENTS_PER_REQLIST,
+ ("seg_idx %d is too large, max "
+ "segs %d\n", seg_idx,
+ XBB_MAX_SEGMENTS_PER_REQLIST));
+
+ xbb_sg->first_sect = sg->first_sect;
+ xbb_sg->last_sect = sg->last_sect;
+ xbb_sg->nsect =
+ (int8_t)(sg->last_sect -
+ sg->first_sect + 1);
+
+ if ((sg->last_sect >= (PAGE_SIZE >> 9))
+ || (xbb_sg->nsect <= 0)) {
+ reqlist->status = BLKIF_RSP_ERROR;
+ goto send_response;
+ }
+
+ nr_sects += xbb_sg->nsect;
+ map->host_addr = xbb_get_gntaddr(reqlist,
+ seg_idx, /*sector*/0);
+ KASSERT(map->host_addr + PAGE_SIZE <=
+ xbb->ring_config.gnt_addr,
+ ("Host address %#jx len %d overlaps "
+ "ring address %#jx\n",
+ (uintmax_t)map->host_addr, PAGE_SIZE,
+ (uintmax_t)xbb->ring_config.gnt_addr));
+
+ map->flags = GNTMAP_host_map;
+ map->ref = sg->gref;
+ map->dom = xbb->otherend_id;
+ if (operation == BIO_WRITE)
+ map->flags |= GNTMAP_readonly;
+ sg++;
+ map++;
+ xbb_sg++;
+ seg_idx++;
+ req_seg_idx++;
+ }
- /* Convert to the disk's sector size */
- req->nr_512b_sectors = nr_sects;
- nr_sects = (nr_sects << 9) >> xbb->sector_size_shift;
+ block_segs = MIN(nseg - req_seg_idx,
+ BLKIF_MAX_SEGMENTS_PER_SEGMENT_BLOCK);
+ if (block_segs == 0)
+ break;
- if ((req->nr_512b_sectors & ((xbb->sector_size >> 9) - 1)) != 0) {
- device_printf(xbb->dev, "%s: I/O size (%d) is not a multiple "
- "of the backing store sector size (%d)\n",
- __func__, req->nr_512b_sectors << 9,
- xbb->sector_size);
- goto fail_send_response;
+ /*
+ * Fetch the next request block full of SG elements.
+ * For now, only the spacing between entries is
+ * different in the different ABIs, not the sg entry
+ * layout.
+ */
+ req_ring_idx++;
+ switch (xbb->abi) {
+ case BLKIF_PROTOCOL_NATIVE:
+ sg = BLKRING_GET_SG_REQUEST(&xbb->rings.native,
+ req_ring_idx);
+ break;
+ case BLKIF_PROTOCOL_X86_32:
+ {
+ sg = BLKRING_GET_SG_REQUEST(&xbb->rings.x86_32,
+ req_ring_idx);
+ break;
+ }
+ case BLKIF_PROTOCOL_X86_64:
+ {
+ sg = BLKRING_GET_SG_REQUEST(&xbb->rings.x86_64,
+ req_ring_idx);
+ break;
+ }
+ default:
+ panic("Unexpected blkif protocol ABI.");
+ /* NOTREACHED */
+ }
+ last_block_sg = sg + block_segs;
+ }
+
+ /* Convert to the disk's sector size */
+ nreq->nr_512b_sectors = nr_sects;
+ nr_sects = (nr_sects << 9) >> xbb->sector_size_shift;
+ total_sects += nr_sects;
+
+ if ((nreq->nr_512b_sectors &
+ ((xbb->sector_size >> 9) - 1)) != 0) {
+ device_printf(xbb->dev, "%s: I/O size (%d) is not "
+ "a multiple of the backing store sector "
+ "size (%d)\n", __func__,
+ nreq->nr_512b_sectors << 9,
+ xbb->sector_size);
+ reqlist->status = BLKIF_RSP_ERROR;
+ goto send_response;
+ }
}
error = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref,
- maps, req->nr_pages);
+ xbb->maps, reqlist->nr_segments);
if (error != 0)
panic("Grant table operation failed (%d)", error);
- for (seg_idx = 0, map = maps; seg_idx < nseg; seg_idx++, map++) {
+ reqlist->flags |= XBB_REQLIST_MAPPED;
+
+ for (seg_idx = 0, map = xbb->maps; seg_idx < reqlist->nr_segments;
+ seg_idx++, map++){
if (unlikely(map->status != 0)) {
- DPRINTF("invalid buffer -- could not remap it (%d)\n",
- map->status);
- DPRINTF("Mapping(%d): Host Addr 0x%lx, flags 0x%x "
- "ref 0x%x, dom %d\n", seg_idx,
+ DPRINTF("invalid buffer -- could not remap "
+ "it (%d)\n", map->status);
+ DPRINTF("Mapping(%d): Host Addr 0x%lx, flags "
+ "0x%x ref 0x%x, dom %d\n", seg_idx,
map->host_addr, map->flags, map->ref,
map->dom);
- goto fail_unmap_req;
+ reqlist->status = BLKIF_RSP_ERROR;
+ goto send_response;
}
- req->gnt_handles[seg_idx] = map->handle;
+ reqlist->gnt_handles[seg_idx] = map->handle;
}
- if (ring_req->sector_number + nr_sects > xbb->media_num_sectors) {
+ if (reqlist->starting_sector_number + total_sects >
+ xbb->media_num_sectors) {
DPRINTF("%s of [%" PRIu64 ",%" PRIu64 "] "
"extends past end of device %s\n",
operation == BIO_READ ? "read" : "write",
- ring_req->sector_number,
- ring_req->sector_number + nr_sects, xbb->dev_name);
- goto fail_unmap_req;
+ reqlist->starting_sector_number,
+ reqlist->starting_sector_number + total_sects,
+ xbb->dev_name);
+ reqlist->status = BLKIF_RSP_ERROR;
+ goto send_response;
}
do_dispatch:
error = xbb->dispatch_io(xbb,
- ring_req,
- req,
- nseg,
+ reqlist,
operation,
bio_flags);
if (error != 0) {
- if (operation == BIO_FLUSH)
- goto fail_send_response;
- else
- goto fail_unmap_req;
+ reqlist->status = BLKIF_RSP_ERROR;
+ goto send_response;
}
- return;
+ return (0);
+send_response:
-fail_unmap_req:
- xbb_unmap_req(req);
- /* FALLTHROUGH */
+ xbb_complete_reqlist(xbb, reqlist);
-fail_send_response:
- xbb_send_response(xbb, req, BLKIF_RSP_ERROR);
- xbb_release_req(xbb, req);
- devstat_end_transaction(xbb->xbb_stats,
- /*bytes*/0,
- req->ds_tag_type,
- req->ds_trans_type,
- /*now*/NULL,
- /*then*/&req->ds_t0);
+ return (0);
+}
+
+static __inline int
+xbb_count_sects(blkif_request_t *ring_req)
+{
+ int i;
+ int cur_size = 0;
+
+ for (i = 0; i < ring_req->nr_segments; i++) {
+ int nsect;
+
+ nsect = (int8_t)(ring_req->seg[i].last_sect -
+ ring_req->seg[i].first_sect + 1);
+ if (nsect <= 0)
+ break;
+
+ cur_size += nsect;
+ }
+
+ return (cur_size);
}
/**
@@ -1172,95 +1809,210 @@ fail_send_response:
static void
xbb_run_queue(void *context, int pending)
{
- struct xbb_softc *xbb;
- blkif_back_rings_t *rings;
- RING_IDX rp;
+ struct xbb_softc *xbb;
+ blkif_back_rings_t *rings;
+ RING_IDX rp;
+ uint64_t cur_sector;
+ int cur_operation;
+ struct xbb_xen_reqlist *reqlist;
- xbb = (struct xbb_softc *)context;
- rings = &xbb->rings;
+ xbb = (struct xbb_softc *)context;
+ rings = &xbb->rings;
/*
- * Cache req_prod to avoid accessing a cache line shared
- * with the frontend.
+ * Work gather and dispatch loop. Note that we have a bias here
+ * towards gathering I/O sent by blockfront. We first gather up
+ * everything in the ring, as long as we have resources. Then we
+ * dispatch one request, and then attempt to gather up any
+ * additional requests that have come in while we were dispatching
+ * the request.
+ *
+ * This allows us to get a clearer picture (via devstat) of how
+ * many requests blockfront is queueing to us at any given time.
*/
- rp = rings->common.sring->req_prod;
+ for (;;) {
+ int retval;
+
+ /*
+ * Initialize reqlist to the last element in the pending
+ * queue, if there is one. This allows us to add more
+ * requests to that request list, if we have room.
+ */
+ reqlist = STAILQ_LAST(&xbb->reqlist_pending_stailq,
+ xbb_xen_reqlist, links);
+ if (reqlist != NULL) {
+ cur_sector = reqlist->next_contig_sector;
+ cur_operation = reqlist->operation;
+ } else {
+ cur_operation = 0;
+ cur_sector = 0;
+ }
- /* Ensure we see queued requests up to 'rp'. */
- rmb();
+ /*
+ * Cache req_prod to avoid accessing a cache line shared
+ * with the frontend.
+ */
+ rp = rings->common.sring->req_prod;
+
+ /* Ensure we see queued requests up to 'rp'. */
+ rmb();
+
+ /**
+ * Run so long as there is work to consume and the generation
+ * of a response will not overflow the ring.
+ *
+ * @note There's a 1 to 1 relationship between requests and
+ * responses, so an overflow should never occur. This
+ * test is to protect our domain from digesting bogus
+ * data. Shouldn't we log this?
+ */
+ while (rings->common.req_cons != rp
+ && RING_REQUEST_CONS_OVERFLOW(&rings->common,
+ rings->common.req_cons) == 0){
+ blkif_request_t ring_req_storage;
+ blkif_request_t *ring_req;
+ int cur_size;
+
+ switch (xbb->abi) {
+ case BLKIF_PROTOCOL_NATIVE:
+ ring_req = RING_GET_REQUEST(&xbb->rings.native,
+ rings->common.req_cons);
+ break;
+ case BLKIF_PROTOCOL_X86_32:
+ {
+ struct blkif_x86_32_request *ring_req32;
+
+ ring_req32 = RING_GET_REQUEST(
+ &xbb->rings.x86_32, rings->common.req_cons);
+ blkif_get_x86_32_req(&ring_req_storage,
+ ring_req32);
+ ring_req = &ring_req_storage;
+ break;
+ }
+ case BLKIF_PROTOCOL_X86_64:
+ {
+ struct blkif_x86_64_request *ring_req64;
+
+ ring_req64 =RING_GET_REQUEST(&xbb->rings.x86_64,
+ rings->common.req_cons);
+ blkif_get_x86_64_req(&ring_req_storage,
+ ring_req64);
+ ring_req = &ring_req_storage;
+ break;
+ }
+ default:
+ panic("Unexpected blkif protocol ABI.");
+ /* NOTREACHED */
+ }
- /**
- * Run so long as there is work to consume and the generation
- * of a response will not overflow the ring.
- *
- * @note There's a 1 to 1 relationship between requests and responses,
- * so an overflow should never occur. This test is to protect
- * our domain from digesting bogus data. Shouldn't we log this?
- */
- while (rings->common.req_cons != rp
- && RING_REQUEST_CONS_OVERFLOW(&rings->common,
- rings->common.req_cons) == 0) {
- blkif_request_t ring_req_storage;
- blkif_request_t *ring_req;
- struct xbb_xen_req *req;
- RING_IDX req_ring_idx;
-
- req = xbb_get_req(xbb);
- if (req == NULL) {
/*
- * Resource shortage has been recorded.
- * We'll be scheduled to run once a request
- * object frees up due to a completion.
+ * Check for situations that would require closing
+ * off this I/O for further coalescing:
+ * - Coalescing is turned off.
+ * - Current I/O is out of sequence with the previous
+ * I/O.
+ * - Coalesced I/O would be too large.
*/
- break;
- }
+ if ((reqlist != NULL)
+ && ((xbb->no_coalesce_reqs != 0)
+ || ((xbb->no_coalesce_reqs == 0)
+ && ((ring_req->sector_number != cur_sector)
+ || (ring_req->operation != cur_operation)
+ || ((ring_req->nr_segments + reqlist->nr_segments) >
+ xbb->max_reqlist_segments))))) {
+ reqlist = NULL;
+ }
- switch (xbb->abi) {
- case BLKIF_PROTOCOL_NATIVE:
- ring_req = RING_GET_REQUEST(&xbb->rings.native,
- rings->common.req_cons);
- break;
- case BLKIF_PROTOCOL_X86_32:
- {
- struct blkif_x86_32_request *ring_req32;
-
- ring_req32 = RING_GET_REQUEST(&xbb->rings.x86_32,
- rings->common.req_cons);
- blkif_get_x86_32_req(&ring_req_storage, ring_req32);
- ring_req = &ring_req_storage;
- break;
+ /*
+ * Grab and check for all resources in one shot.
+ * If we can't get all of the resources we need,
+ * the shortage is noted and the thread will get
+ * woken up when more resources are available.
+ */
+ retval = xbb_get_resources(xbb, &reqlist, ring_req,
+ xbb->rings.common.req_cons);
+
+ if (retval != 0) {
+ /*
+ * Resource shortage has been recorded.
+ * We'll be scheduled to run once a request
+ * object frees up due to a completion.
+ */
+ break;
+ }
+
+ /*
+ * Signify that we can overwrite this request with
+ * a response by incrementing our consumer index.
+ * The response won't be generated until after
+ * we've already consumed all necessary data out
+ * of the version of the request in the ring buffer
+ * (for native mode). We must update the consumer
+ * index before issueing back-end I/O so there is
+ * no possibility that it will complete and a
+ * response be generated before we make room in
+ * the queue for that response.
+ */
+ xbb->rings.common.req_cons +=
+ BLKIF_SEGS_TO_BLOCKS(ring_req->nr_segments);
+ xbb->reqs_received++;
+
+ cur_size = xbb_count_sects(ring_req);
+ cur_sector = ring_req->sector_number + cur_size;
+ reqlist->next_contig_sector = cur_sector;
+ cur_operation = ring_req->operation;
}
- case BLKIF_PROTOCOL_X86_64:
- {
- struct blkif_x86_64_request *ring_req64;
-
- ring_req64 = RING_GET_REQUEST(&xbb->rings.x86_64,
- rings->common.req_cons);
- blkif_get_x86_64_req(&ring_req_storage, ring_req64);
- ring_req = &ring_req_storage;
+
+ /* Check for I/O to dispatch */
+ reqlist = STAILQ_FIRST(&xbb->reqlist_pending_stailq);
+ if (reqlist == NULL) {
+ /*
+ * We're out of work to do, put the task queue to
+ * sleep.
+ */
break;
}
- default:
- panic("Unexpected blkif protocol ABI.");
- /* NOTREACHED */
- }
/*
- * Signify that we can overwrite this request with a
- * response by incrementing our consumer index. The
- * response won't be generated until after we've already
- * consumed all necessary data out of the version of the
- * request in the ring buffer (for native mode). We
- * must update the consumer index before issueing back-end
- * I/O so there is no possibility that it will complete
- * and a response be generated before we make room in
- * the queue for that response.
+ * Grab the first request off the queue and attempt
+ * to dispatch it.
*/
- req_ring_idx = xbb->rings.common.req_cons;
- xbb->rings.common.req_cons +=
- BLKIF_SEGS_TO_BLOCKS(ring_req->nr_segments);
+ STAILQ_REMOVE_HEAD(&xbb->reqlist_pending_stailq, links);
- xbb_dispatch_io(xbb, ring_req, req, req_ring_idx);
+ retval = xbb_dispatch_io(xbb, reqlist);
+ if (retval != 0) {
+ /*
+ * xbb_dispatch_io() returns non-zero only when
+ * there is a resource shortage. If that's the
+ * case, re-queue this request on the head of the
+ * queue, and go to sleep until we have more
+ * resources.
+ */
+ STAILQ_INSERT_HEAD(&xbb->reqlist_pending_stailq,
+ reqlist, links);
+ break;
+ } else {
+ /*
+ * If we still have anything on the queue after
+ * removing the head entry, that is because we
+ * met one of the criteria to create a new
+ * request list (outlined above), and we'll call
+ * that a forced dispatch for statistical purposes.
+ *
+ * Otherwise, if there is only one element on the
+ * queue, we coalesced everything available on
+ * the ring and we'll call that a normal dispatch.
+ */
+ reqlist = STAILQ_FIRST(&xbb->reqlist_pending_stailq);
+
+ if (reqlist != NULL)
+ xbb->forced_dispatch++;
+ else
+ xbb->normal_dispatch++;
+
+ xbb->total_dispatch++;
+ }
}
}
@@ -1285,11 +2037,7 @@ xbb_intr(void *arg)
* Backend handler for character device access.
*
* \param xbb Per-instance xbb configuration structure.
- * \param ring_req Front-end's I/O request as pulled from the shared
- * communication ring.
- * \param req Allocated internal request structure.
- * \param nseg The number of valid segments for this request in
- * xbb->xbb_sgs.
+ * \param reqlist Allocated internal request list structure.
* \param operation BIO_* I/O operation code.
* \param bio_flags Additional bio_flag data to pass to any generated
* bios (e.g. BIO_ORDERED)..
@@ -1297,28 +2045,30 @@ xbb_intr(void *arg)
* \return 0 for success, errno codes for failure.
*/
static int
-xbb_dispatch_dev(struct xbb_softc *xbb, blkif_request_t *ring_req,
- struct xbb_xen_req *req, int nseg, int operation,
- int bio_flags)
+xbb_dispatch_dev(struct xbb_softc *xbb, struct xbb_xen_reqlist *reqlist,
+ int operation, int bio_flags)
{
struct xbb_dev_data *dev_data;
- struct bio *bios[XBB_MAX_SEGMENTS_PER_REQUEST];
+ struct bio *bios[XBB_MAX_SEGMENTS_PER_REQLIST];
+ struct xbb_xen_req *nreq;
off_t bio_offset;
struct bio *bio;
struct xbb_sg *xbb_sg;
u_int nbio;
u_int bio_idx;
+ u_int nseg;
u_int seg_idx;
int error;
dev_data = &xbb->backend.dev;
- bio_offset = (off_t)ring_req->sector_number
+ bio_offset = (off_t)reqlist->starting_sector_number
<< xbb->sector_size_shift;
error = 0;
nbio = 0;
bio_idx = 0;
if (operation == BIO_FLUSH) {
+ nreq = STAILQ_FIRST(&reqlist->contig_req_list);
bio = g_new_bio();
if (unlikely(bio == NULL)) {
DPRINTF("Unable to allocate bio for BIO_FLUSH\n");
@@ -1332,19 +2082,21 @@ xbb_dispatch_dev(struct xbb_softc *xbb, blkif_request_t *ring_req,
bio->bio_offset = 0;
bio->bio_data = 0;
bio->bio_done = xbb_bio_done;
- bio->bio_caller1 = req;
+ bio->bio_caller1 = nreq;
bio->bio_pblkno = 0;
- req->pendcnt = 1;
+ nreq->pendcnt = 1;
- (*dev_data->csw->d_strategy)(bios[bio_idx]);
+ (*dev_data->csw->d_strategy)(bio);
return (0);
}
- for (seg_idx = 0, bio = NULL, xbb_sg = xbb->xbb_sgs;
- seg_idx < nseg;
- seg_idx++, xbb_sg++) {
+ xbb_sg = xbb->xbb_sgs;
+ bio = NULL;
+ nseg = reqlist->nr_segments;
+
+ for (seg_idx = 0; seg_idx < nseg; seg_idx++, xbb_sg++) {
/*
* KVA will not be contiguous, so any additional
@@ -1353,10 +2105,10 @@ xbb_dispatch_dev(struct xbb_softc *xbb, blkif_request_t *ring_req,
if ((bio != NULL)
&& (xbb_sg->first_sect != 0)) {
if ((bio->bio_length & (xbb->sector_size - 1)) != 0) {
- printf("%s: Discontiguous I/O request from "
- "domain %d ends on non-sector "
- "boundary\n", __func__,
- xbb->otherend_id);
+ printf("%s: Discontiguous I/O request "
+ "from domain %d ends on "
+ "non-sector boundary\n",
+ __func__, xbb->otherend_id);
error = EINVAL;
goto fail_free_bios;
}
@@ -1365,12 +2117,12 @@ xbb_dispatch_dev(struct xbb_softc *xbb, blkif_request_t *ring_req,
if (bio == NULL) {
/*
- * Make sure that the start of this bio is aligned
- * to a device sector.
+ * Make sure that the start of this bio is
+ * aligned to a device sector.
*/
- if ((bio_offset & (xbb->sector_size - 1)) != 0) {
- printf("%s: Misaligned I/O request from "
- "domain %d\n", __func__,
+ if ((bio_offset & (xbb->sector_size - 1)) != 0){
+ printf("%s: Misaligned I/O request "
+ "from domain %d\n", __func__,
xbb->otherend_id);
error = EINVAL;
goto fail_free_bios;
@@ -1385,12 +2137,11 @@ xbb_dispatch_dev(struct xbb_softc *xbb, blkif_request_t *ring_req,
bio->bio_flags |= bio_flags;
bio->bio_dev = dev_data->cdev;
bio->bio_offset = bio_offset;
- bio->bio_data = xbb_req_ioaddr(req, seg_idx,
- xbb_sg->first_sect);
+ bio->bio_data = xbb_reqlist_ioaddr(reqlist, seg_idx,
+ xbb_sg->first_sect);
bio->bio_done = xbb_bio_done;
- bio->bio_caller1 = req;
- bio->bio_pblkno = bio_offset
- >> xbb->sector_size_shift;
+ bio->bio_caller1 = reqlist;
+ bio->bio_pblkno = bio_offset >> xbb->sector_size_shift;
}
bio->bio_length += xbb_sg->nsect << 9;
@@ -1400,10 +2151,10 @@ xbb_dispatch_dev(struct xbb_softc *xbb, blkif_request_t *ring_req,
if (xbb_sg->last_sect != (PAGE_SIZE - 512) >> 9) {
if ((bio->bio_length & (xbb->sector_size - 1)) != 0) {
- printf("%s: Discontiguous I/O request from "
- "domain %d ends on non-sector "
- "boundary\n", __func__,
- xbb->otherend_id);
+ printf("%s: Discontiguous I/O request "
+ "from domain %d ends on "
+ "non-sector boundary\n",
+ __func__, xbb->otherend_id);
error = EINVAL;
goto fail_free_bios;
}
@@ -1415,7 +2166,7 @@ xbb_dispatch_dev(struct xbb_softc *xbb, blkif_request_t *ring_req,
}
}
- req->pendcnt = nbio;
+ reqlist->pendcnt = nbio;
for (bio_idx = 0; bio_idx < nbio; bio_idx++)
{
@@ -1423,10 +2174,10 @@ xbb_dispatch_dev(struct xbb_softc *xbb, blkif_request_t *ring_req,
vm_offset_t kva_offset;
kva_offset = (vm_offset_t)bios[bio_idx]->bio_data
- - (vm_offset_t)req->bounce;
+ - (vm_offset_t)reqlist->bounce;
if (operation == BIO_WRITE) {
memcpy(bios[bio_idx]->bio_data,
- (uint8_t *)req->kva + kva_offset,
+ (uint8_t *)reqlist->kva + kva_offset,
bios[bio_idx]->bio_bcount);
}
#endif
@@ -1438,7 +2189,7 @@ xbb_dispatch_dev(struct xbb_softc *xbb, blkif_request_t *ring_req,
fail_free_bios:
for (bio_idx = 0; bio_idx < (nbio-1); bio_idx++)
g_destroy_bio(bios[bio_idx]);
-
+
return (error);
}
@@ -1446,24 +2197,21 @@ fail_free_bios:
* Backend handler for file access.
*
* \param xbb Per-instance xbb configuration structure.
- * \param ring_req Front-end's I/O request as pulled from the shared
- * communication ring.
- * \param req Allocated internal request structure.
- * \param nseg The number of valid segments for this request in
- * xbb->xbb_sgs.
+ * \param reqlist Allocated internal request list.
* \param operation BIO_* I/O operation code.
- * \param bio_flags Additional bio_flag data to pass to any generated bios
+ * \param flags Additional bio_flag data to pass to any generated bios
* (e.g. BIO_ORDERED)..
*
* \return 0 for success, errno codes for failure.
*/
static int
-xbb_dispatch_file(struct xbb_softc *xbb, blkif_request_t *ring_req,
- struct xbb_xen_req *req, int nseg, int operation,
- int flags)
+xbb_dispatch_file(struct xbb_softc *xbb, struct xbb_xen_reqlist *reqlist,
+ int operation, int flags)
{
struct xbb_file_data *file_data;
u_int seg_idx;
+ u_int nseg;
+ off_t sectors_sent;
struct uio xuio;
struct xbb_sg *xbb_sg;
struct iovec *xiovec;
@@ -1475,11 +2223,10 @@ xbb_dispatch_file(struct xbb_softc *xbb, blkif_request_t *ring_req,
int error;
file_data = &xbb->backend.file;
+ sectors_sent = 0;
error = 0;
bzero(&xuio, sizeof(xuio));
- req->pendcnt = 0;
-
switch (operation) {
case BIO_READ:
xuio.uio_rw = UIO_READ;
@@ -1509,37 +2256,39 @@ xbb_dispatch_file(struct xbb_softc *xbb, blkif_request_t *ring_req,
panic("invalid operation %d", operation);
/* NOTREACHED */
}
- xuio.uio_offset = (vm_offset_t)ring_req->sector_number
+ xuio.uio_offset = (vm_offset_t)reqlist->starting_sector_number
<< xbb->sector_size_shift;
-
xuio.uio_segflg = UIO_SYSSPACE;
xuio.uio_iov = file_data->xiovecs;
xuio.uio_iovcnt = 0;
+ xbb_sg = xbb->xbb_sgs;
+ nseg = reqlist->nr_segments;
- for (seg_idx = 0, xiovec = NULL, xbb_sg = xbb->xbb_sgs;
- seg_idx < nseg; seg_idx++, xbb_sg++) {
+ for (xiovec = NULL, seg_idx = 0; seg_idx < nseg; seg_idx++, xbb_sg++) {
/*
- * If the first sector is not 0, the KVA will not be
- * contiguous and we'll need to go on to another segment.
+ * If the first sector is not 0, the KVA will
+ * not be contiguous and we'll need to go on
+ * to another segment.
*/
if (xbb_sg->first_sect != 0)
xiovec = NULL;
if (xiovec == NULL) {
xiovec = &file_data->xiovecs[xuio.uio_iovcnt];
- xiovec->iov_base = xbb_req_ioaddr(req, seg_idx,
- xbb_sg->first_sect);
+ xiovec->iov_base = xbb_reqlist_ioaddr(reqlist,
+ seg_idx, xbb_sg->first_sect);
#ifdef XBB_USE_BOUNCE_BUFFERS
/*
- * Store the address of the incoming buffer at this
- * particular offset as well, so we can do the copy
- * later without having to do more work to
- * recalculate this address.
+ * Store the address of the incoming
+ * buffer at this particular offset
+ * as well, so we can do the copy
+ * later without having to do more
+ * work to recalculate this address.
*/
p_vaddr = &file_data->xiovecs_vaddr[xuio.uio_iovcnt];
- *p_vaddr = xbb_req_vaddr(req, seg_idx,
- xbb_sg->first_sect);
+ *p_vaddr = xbb_reqlist_vaddr(reqlist, seg_idx,
+ xbb_sg->first_sect);
#endif /* XBB_USE_BOUNCE_BUFFERS */
xiovec->iov_len = 0;
xuio.uio_iovcnt++;
@@ -1550,9 +2299,9 @@ xbb_dispatch_file(struct xbb_softc *xbb, blkif_request_t *ring_req,
xuio.uio_resid += xbb_sg->nsect << 9;
/*
- * If the last sector is not the full page size count,
- * the next segment will not be contiguous in KVA and we
- * need a new iovec.
+ * If the last sector is not the full page
+ * size count, the next segment will not be
+ * contiguous in KVA and we need a new iovec.
*/
if (xbb_sg->last_sect != (PAGE_SIZE - 512) >> 9)
xiovec = NULL;
@@ -1676,23 +2425,10 @@ xbb_dispatch_file(struct xbb_softc *xbb, blkif_request_t *ring_req,
bailout_send_response:
- /*
- * All I/O is already done, send the response. A lock is not
- * necessary here because we're single threaded, and therefore the
- * only context accessing this request right now. If that changes,
- * we may need some locking here.
- */
- xbb_unmap_req(req);
- xbb_send_response(xbb, req, (error == 0) ? BLKIF_RSP_OKAY :
- BLKIF_RSP_ERROR);
- devstat_end_transaction(xbb->xbb_stats,
- /*bytes*/error == 0 ? req->nr_512b_sectors << 9
- : 0,
- req->ds_tag_type,
- req->ds_trans_type,
- /*now*/NULL,
- /*then*/&req->ds_t0);
- xbb_release_req(xbb, req);
+ if (error != 0)
+ reqlist->status = BLKIF_RSP_ERROR;
+
+ xbb_complete_reqlist(xbb, reqlist);
return (0);
}
@@ -1913,6 +2649,12 @@ xbb_open_backend(struct xbb_softc *xbb)
DPRINTF("opening dev=%s\n", xbb->dev_name);
+ if (rootvnode == NULL) {
+ xenbus_dev_fatal(xbb->dev, ENOENT,
+ "Root file system not mounted");
+ return (ENOENT);
+ }
+
if ((xbb->flags & XBBF_READ_ONLY) == 0)
flags |= FWRITE;
@@ -1996,11 +2738,39 @@ xbb_open_backend(struct xbb_softc *xbb)
/*------------------------ Inter-Domain Communication ------------------------*/
/**
- * Cleanup all inter-domain communication mechanisms.
+ * Free dynamically allocated KVA or pseudo-physical address allocations.
*
* \param xbb Per-instance xbb configuration structure.
*/
static void
+xbb_free_communication_mem(struct xbb_softc *xbb)
+{
+ if (xbb->kva != 0) {
+#ifndef XENHVM
+ kmem_free(kernel_map, xbb->kva, xbb->kva_size);
+#else
+ if (xbb->pseudo_phys_res != NULL) {
+ bus_release_resource(xbb->dev, SYS_RES_MEMORY,
+ xbb->pseudo_phys_res_id,
+ xbb->pseudo_phys_res);
+ xbb->pseudo_phys_res = NULL;
+ }
+#endif
+ }
+ xbb->kva = 0;
+ xbb->gnt_base_addr = 0;
+ if (xbb->kva_free != NULL) {
+ free(xbb->kva_free, M_XENBLOCKBACK);
+ xbb->kva_free = NULL;
+ }
+}
+
+/**
+ * Cleanup all inter-domain communication mechanisms.
+ *
+ * \param xbb Per-instance xbb configuration structure.
+ */
+static int
xbb_disconnect(struct xbb_softc *xbb)
{
struct gnttab_unmap_grant_ref ops[XBB_MAX_RING_PAGES];
@@ -2011,13 +2781,24 @@ xbb_disconnect(struct xbb_softc *xbb)
DPRINTF("\n");
if ((xbb->flags & XBBF_RING_CONNECTED) == 0)
- return;
+ return (0);
if (xbb->irq != 0) {
unbind_from_irqhandler(xbb->irq);
xbb->irq = 0;
}
+ mtx_unlock(&xbb->lock);
+ taskqueue_drain(xbb->io_taskqueue, &xbb->io_task);
+ mtx_lock(&xbb->lock);
+
+ /*
+ * No new interrupts can generate work, but we must wait
+ * for all currently active requests to drain.
+ */
+ if (xbb->active_request_count != 0)
+ return (EAGAIN);
+
for (ring_idx = 0, op = ops;
ring_idx < xbb->ring_config.ring_pages;
ring_idx++, op++) {
@@ -2033,7 +2814,37 @@ xbb_disconnect(struct xbb_softc *xbb)
if (error != 0)
panic("Grant table op failed (%d)", error);
+ xbb_free_communication_mem(xbb);
+
+ if (xbb->requests != NULL) {
+ free(xbb->requests, M_XENBLOCKBACK);
+ xbb->requests = NULL;
+ }
+
+ if (xbb->request_lists != NULL) {
+ struct xbb_xen_reqlist *reqlist;
+ int i;
+
+ /* There is one request list for ever allocated request. */
+ for (i = 0, reqlist = xbb->request_lists;
+ i < xbb->max_requests; i++, reqlist++){
+#ifdef XBB_USE_BOUNCE_BUFFERS
+ if (reqlist->bounce != NULL) {
+ free(reqlist->bounce, M_XENBLOCKBACK);
+ reqlist->bounce = NULL;
+ }
+#endif
+ if (reqlist->gnt_handles != NULL) {
+ free(reqlist->gnt_handles, M_XENBLOCKBACK);
+ reqlist->gnt_handles = NULL;
+ }
+ }
+ free(xbb->request_lists, M_XENBLOCKBACK);
+ xbb->request_lists = NULL;
+ }
+
xbb->flags &= ~XBBF_RING_CONNECTED;
+ return (0);
}
/**
@@ -2135,7 +2946,7 @@ xbb_connect_ring(struct xbb_softc *xbb)
INTR_TYPE_BIO | INTR_MPSAFE,
&xbb->irq);
if (error) {
- xbb_disconnect(xbb);
+ (void)xbb_disconnect(xbb);
xenbus_dev_fatal(xbb->dev, error, "binding event channel");
return (error);
}
@@ -2145,6 +2956,10 @@ xbb_connect_ring(struct xbb_softc *xbb)
return 0;
}
+/* Needed to make bit_alloc() macro work */
+#define calloc(count, size) malloc((count)*(size), M_XENBLOCKBACK, \
+ M_NOWAIT|M_ZERO);
+
/**
* Size KVA and pseudo-physical address allocations based on negotiated
* values for the size and number of I/O requests, and the size of our
@@ -2158,9 +2973,18 @@ xbb_connect_ring(struct xbb_softc *xbb)
static int
xbb_alloc_communication_mem(struct xbb_softc *xbb)
{
- xbb->kva_size = (xbb->ring_config.ring_pages
- + (xbb->max_requests * xbb->max_request_segments))
- * PAGE_SIZE;
+ xbb->reqlist_kva_pages = xbb->max_requests * xbb->max_request_segments;
+ xbb->reqlist_kva_size = xbb->reqlist_kva_pages * PAGE_SIZE;
+ xbb->kva_size = xbb->reqlist_kva_size +
+ (xbb->ring_config.ring_pages * PAGE_SIZE);
+
+ xbb->kva_free = bit_alloc(xbb->reqlist_kva_pages);
+ if (xbb->kva_free == NULL)
+ return (ENOMEM);
+
+ DPRINTF("%s: kva_size = %d, reqlist_kva_size = %d\n",
+ device_get_nameunit(xbb->dev), xbb->kva_size,
+ xbb->reqlist_kva_size);
#ifndef XENHVM
xbb->kva = kmem_alloc_nofault(kernel_map, xbb->kva_size);
if (xbb->kva == 0)
@@ -2185,31 +3009,11 @@ xbb_alloc_communication_mem(struct xbb_softc *xbb)
xbb->kva = (vm_offset_t)rman_get_virtual(xbb->pseudo_phys_res);
xbb->gnt_base_addr = rman_get_start(xbb->pseudo_phys_res);
#endif /* XENHVM */
- return (0);
-}
-/**
- * Free dynamically allocated KVA or pseudo-physical address allocations.
- *
- * \param xbb Per-instance xbb configuration structure.
- */
-static void
-xbb_free_communication_mem(struct xbb_softc *xbb)
-{
- if (xbb->kva != 0) {
-#ifndef XENHVM
- kmem_free(kernel_map, xbb->kva, xbb->kva_size);
-#else
- if (xbb->pseudo_phys_res != NULL) {
- bus_release_resource(xbb->dev, SYS_RES_MEMORY,
- xbb->pseudo_phys_res_id,
- xbb->pseudo_phys_res);
- xbb->pseudo_phys_res = NULL;
- }
-#endif
- }
- xbb->kva = 0;
- xbb->gnt_base_addr = 0;
+ DPRINTF("%s: kva: %#jx, gnt_base_addr: %#jx\n",
+ device_get_nameunit(xbb->dev), (uintmax_t)xbb->kva,
+ (uintmax_t)xbb->gnt_base_addr);
+ return (0);
}
/**
@@ -2228,6 +3032,14 @@ xbb_collect_frontend_info(struct xbb_softc *xbb)
otherend_path = xenbus_get_otherend_path(xbb->dev);
/*
+ * Protocol defaults valid even if all negotiation fails.
+ */
+ xbb->ring_config.ring_pages = 1;
+ xbb->max_requests = BLKIF_MAX_RING_REQUESTS(PAGE_SIZE);
+ xbb->max_request_segments = BLKIF_MAX_SEGMENTS_PER_HEADER_BLOCK;
+ xbb->max_request_size = xbb->max_request_segments * PAGE_SIZE;
+
+ /*
* Mandatory data (used in all versions of the protocol) first.
*/
error = xs_gather(XST_NIL, otherend_path,
@@ -2255,19 +3067,19 @@ xbb_collect_frontend_info(struct xbb_softc *xbb)
* tree.
*/
(void)xs_scanf(XST_NIL, otherend_path,
- "ring-pages", NULL, "%" PRIu32,
+ "ring-pages", NULL, "%u",
&xbb->ring_config.ring_pages);
(void)xs_scanf(XST_NIL, otherend_path,
- "max-requests", NULL, "%" PRIu32,
+ "max-requests", NULL, "%u",
&xbb->max_requests);
(void)xs_scanf(XST_NIL, otherend_path,
- "max-request-segments", NULL, "%" PRIu32,
+ "max-request-segments", NULL, "%u",
&xbb->max_request_segments);
(void)xs_scanf(XST_NIL, otherend_path,
- "max-request-size", NULL, "%" PRIu32,
+ "max-request-size", NULL, "%u",
&xbb->max_request_size);
if (xbb->ring_config.ring_pages > XBB_MAX_RING_PAGES) {
@@ -2360,8 +3172,6 @@ xbb_alloc_requests(struct xbb_softc *xbb)
{
struct xbb_xen_req *req;
struct xbb_xen_req *last_req;
- uint8_t *req_kva;
- u_long gnt_base;
/*
* Allocate request book keeping datastructures.
@@ -2374,43 +3184,68 @@ xbb_alloc_requests(struct xbb_softc *xbb)
return (ENOMEM);
}
- req_kva = (uint8_t *)xbb->kva;
- gnt_base = xbb->gnt_base_addr;
req = xbb->requests;
last_req = &xbb->requests[xbb->max_requests - 1];
+ STAILQ_INIT(&xbb->request_free_stailq);
while (req <= last_req) {
+ STAILQ_INSERT_TAIL(&xbb->request_free_stailq, req, links);
+ req++;
+ }
+ return (0);
+}
+
+static int
+xbb_alloc_request_lists(struct xbb_softc *xbb)
+{
+ int i;
+ struct xbb_xen_reqlist *reqlist;
+
+ /*
+ * If no requests can be merged, we need 1 request list per
+ * in flight request.
+ */
+ xbb->request_lists = malloc(xbb->max_requests *
+ sizeof(*xbb->request_lists), M_XENBLOCKBACK, M_NOWAIT|M_ZERO);
+ if (xbb->request_lists == NULL) {
+ xenbus_dev_fatal(xbb->dev, ENOMEM,
+ "Unable to allocate request list structures");
+ return (ENOMEM);
+ }
+
+ STAILQ_INIT(&xbb->reqlist_free_stailq);
+ STAILQ_INIT(&xbb->reqlist_pending_stailq);
+ for (i = 0; i < xbb->max_requests; i++) {
int seg;
- req->xbb = xbb;
- req->kva = req_kva;
- req->gnt_handles = malloc(xbb->max_request_segments
- * sizeof(*req->gnt_handles),
- M_XENBLOCKBACK, M_NOWAIT|M_ZERO);
- if (req->gnt_handles == NULL) {
- xenbus_dev_fatal(xbb->dev, ENOMEM,
- "Unable to allocate request "
- "grant references");
- return (ENOMEM);
- }
+ reqlist = &xbb->request_lists[i];
+
+ reqlist->xbb = xbb;
+
#ifdef XBB_USE_BOUNCE_BUFFERS
- req->bounce = malloc(xbb->max_request_size,
- M_XENBLOCKBACK, M_NOWAIT);
- if (req->bounce == NULL) {
+ reqlist->bounce = malloc(xbb->max_reqlist_size,
+ M_XENBLOCKBACK, M_NOWAIT);
+ if (reqlist->bounce == NULL) {
xenbus_dev_fatal(xbb->dev, ENOMEM,
"Unable to allocate request "
"bounce buffers");
return (ENOMEM);
}
#endif /* XBB_USE_BOUNCE_BUFFERS */
- req->gnt_base = gnt_base;
- req_kva += xbb->max_request_segments * PAGE_SIZE;
- gnt_base += xbb->max_request_segments * PAGE_SIZE;
- SLIST_INSERT_HEAD(&xbb->request_free_slist, req, links);
- for (seg = 0; seg < xbb->max_request_segments; seg++)
- req->gnt_handles[seg] = GRANT_REF_INVALID;
+ reqlist->gnt_handles = malloc(xbb->max_reqlist_segments *
+ sizeof(*reqlist->gnt_handles),
+ M_XENBLOCKBACK, M_NOWAIT|M_ZERO);
+ if (reqlist->gnt_handles == NULL) {
+ xenbus_dev_fatal(xbb->dev, ENOMEM,
+ "Unable to allocate request "
+ "grant references");
+ return (ENOMEM);
+ }
+
+ for (seg = 0; seg < xbb->max_reqlist_segments; seg++)
+ reqlist->gnt_handles[seg] = GRANT_REF_INVALID;
- req++;
+ STAILQ_INSERT_TAIL(&xbb->reqlist_free_stailq, reqlist, links);
}
return (0);
}
@@ -2491,6 +3326,22 @@ xbb_connect(struct xbb_softc *xbb)
if (xbb_collect_frontend_info(xbb) != 0)
return;
+ xbb->flags &= ~XBBF_SHUTDOWN;
+
+ /*
+ * We limit the maximum number of reqlist segments to the maximum
+ * number of segments in the ring, or our absolute maximum,
+ * whichever is smaller.
+ */
+ xbb->max_reqlist_segments = MIN(xbb->max_request_segments *
+ xbb->max_requests, XBB_MAX_SEGMENTS_PER_REQLIST);
+
+ /*
+ * The maximum size is simply a function of the number of segments
+ * we can handle.
+ */
+ xbb->max_reqlist_size = xbb->max_reqlist_segments * PAGE_SIZE;
+
/* Allocate resources whose size depends on front-end configuration. */
error = xbb_alloc_communication_mem(xbb);
if (error != 0) {
@@ -2505,6 +3356,12 @@ xbb_connect(struct xbb_softc *xbb)
return;
}
+ error = xbb_alloc_request_lists(xbb);
+ if (error != 0) {
+ /* Specific errors are reported by xbb_alloc_request_lists(). */
+ return;
+ }
+
/*
* Connect communication channel.
*/
@@ -2520,7 +3377,7 @@ xbb_connect(struct xbb_softc *xbb)
* in this connection, and waiting for a front-end state
* change will not help the situation.
*/
- xbb_disconnect(xbb);
+ (void)xbb_disconnect(xbb);
return;
}
@@ -2542,7 +3399,7 @@ xbb_connect(struct xbb_softc *xbb)
static int
xbb_shutdown(struct xbb_softc *xbb)
{
- static int in_shutdown;
+ int error;
DPRINTF("\n");
@@ -2553,7 +3410,7 @@ xbb_shutdown(struct xbb_softc *xbb)
* the same time. Tell the caller that hits this
* race to try back later.
*/
- if (in_shutdown != 0)
+ if ((xbb->flags & XBBF_IN_SHUTDOWN) != 0)
return (EAGAIN);
DPRINTF("\n");
@@ -2561,20 +3418,30 @@ xbb_shutdown(struct xbb_softc *xbb)
/* Indicate shutdown is in progress. */
xbb->flags |= XBBF_SHUTDOWN;
- /* Wait for requests to complete. */
- if (xbb->active_request_count != 0)
- return (EAGAIN);
-
- DPRINTF("\n");
-
/* Disconnect from the front-end. */
- xbb_disconnect(xbb);
+ error = xbb_disconnect(xbb);
+ if (error != 0) {
+ /*
+ * Requests still outstanding. We'll be called again
+ * once they complete.
+ */
+ KASSERT(error == EAGAIN,
+ ("%s: Unexpected xbb_disconnect() failure %d",
+ __func__, error));
+
+ return (error);
+ }
- in_shutdown = 1;
+ DPRINTF("\n");
+
+ xbb->flags |= XBBF_IN_SHUTDOWN;
mtx_unlock(&xbb->lock);
- xenbus_set_state(xbb->dev, XenbusStateClosed);
+
+ if (xenbus_get_state(xbb->dev) < XenbusStateClosing)
+ xenbus_set_state(xbb->dev, XenbusStateClosing);
+
mtx_lock(&xbb->lock);
- in_shutdown = 0;
+ xbb->flags &= ~XBBF_IN_SHUTDOWN;
/* Indicate to xbb_detach() that is it safe to proceed. */
wakeup(xbb);
@@ -2634,6 +3501,77 @@ xbb_probe(device_t dev)
}
/**
+ * Setup sysctl variables to control various Block Back parameters.
+ *
+ * \param xbb Xen Block Back softc.
+ *
+ */
+static void
+xbb_setup_sysctl(struct xbb_softc *xbb)
+{
+ struct sysctl_ctx_list *sysctl_ctx = NULL;
+ struct sysctl_oid *sysctl_tree = NULL;
+
+ sysctl_ctx = device_get_sysctl_ctx(xbb->dev);
+ if (sysctl_ctx == NULL)
+ return;
+
+ sysctl_tree = device_get_sysctl_tree(xbb->dev);
+ if (sysctl_tree == NULL)
+ return;
+
+ SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO,
+ "disable_flush", CTLFLAG_RW, &xbb->disable_flush, 0,
+ "fake the flush command");
+
+ SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO,
+ "flush_interval", CTLFLAG_RW, &xbb->flush_interval, 0,
+ "send a real flush for N flush requests");
+
+ SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO,
+ "no_coalesce_reqs", CTLFLAG_RW, &xbb->no_coalesce_reqs,0,
+ "Don't coalesce contiguous requests");
+
+ SYSCTL_ADD_UQUAD(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO,
+ "reqs_received", CTLFLAG_RW, &xbb->reqs_received,
+ "how many I/O requests we have received");
+
+ SYSCTL_ADD_UQUAD(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO,
+ "reqs_completed", CTLFLAG_RW, &xbb->reqs_completed,
+ "how many I/O requests have been completed");
+
+ SYSCTL_ADD_UQUAD(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO,
+ "forced_dispatch", CTLFLAG_RW, &xbb->forced_dispatch,
+ "how many I/O dispatches were forced");
+
+ SYSCTL_ADD_UQUAD(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO,
+ "normal_dispatch", CTLFLAG_RW, &xbb->normal_dispatch,
+ "how many I/O dispatches were normal");
+
+ SYSCTL_ADD_UQUAD(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO,
+ "total_dispatch", CTLFLAG_RW, &xbb->total_dispatch,
+ "total number of I/O dispatches");
+
+ SYSCTL_ADD_UQUAD(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO,
+ "kva_shortages", CTLFLAG_RW, &xbb->kva_shortages,
+ "how many times we have run out of KVA");
+
+ SYSCTL_ADD_UQUAD(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO,
+ "request_shortages", CTLFLAG_RW,
+ &xbb->request_shortages,
+ "how many times we have run out of requests");
+
+ SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO,
+ "max_requests", CTLFLAG_RD, &xbb->max_requests, 0,
+ "maximum outstanding requests (negotiated)");
+
+ SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO,
+ "max_request_segments", CTLFLAG_RD,
+ &xbb->max_request_segments, 0,
+ "maximum number of pages per requests (negotiated)");
+}
+
+/**
* Attach to a XenBus device that has been claimed by our probe routine.
*
* \param dev NewBus device object representing this Xen Block Back instance.
@@ -2643,8 +3581,8 @@ xbb_probe(device_t dev)
static int
xbb_attach(device_t dev)
{
- struct xbb_softc *xbb;
- int error;
+ struct xbb_softc *xbb;
+ int error;
DPRINTF("Attaching to %s\n", xenbus_get_node(dev));
@@ -2658,15 +3596,6 @@ xbb_attach(device_t dev)
xbb->otherend_id = xenbus_get_otherend_id(dev);
TASK_INIT(&xbb->io_task, /*priority*/0, xbb_run_queue, xbb);
mtx_init(&xbb->lock, device_get_nameunit(dev), NULL, MTX_DEF);
- SLIST_INIT(&xbb->request_free_slist);
-
- /*
- * Protocol defaults valid even if all negotiation fails.
- */
- xbb->ring_config.ring_pages = 1;
- xbb->max_requests = BLKIF_MAX_RING_REQUESTS(PAGE_SIZE);
- xbb->max_request_segments = BLKIF_MAX_SEGMENTS_PER_HEADER_BLOCK;
- xbb->max_request_size = xbb->max_request_segments * PAGE_SIZE;
/*
* Publish protocol capabilities for consumption by the
@@ -2763,6 +3692,18 @@ xbb_attach(device_t dev)
DEVSTAT_TYPE_DIRECT
| DEVSTAT_TYPE_IF_OTHER,
DEVSTAT_PRIORITY_OTHER);
+
+ xbb->xbb_stats_in = devstat_new_entry("xbbi", device_get_unit(xbb->dev),
+ xbb->sector_size,
+ DEVSTAT_ALL_SUPPORTED,
+ DEVSTAT_TYPE_DIRECT
+ | DEVSTAT_TYPE_IF_OTHER,
+ DEVSTAT_PRIORITY_OTHER);
+ /*
+ * Setup sysctl variables.
+ */
+ xbb_setup_sysctl(xbb);
+
/*
* Create a taskqueue for doing work that must occur from a
* thread context.
@@ -2797,7 +3738,7 @@ xbb_attach(device_t dev)
}
/**
- * Detach from a block back device instanced.
+ * Detach from a block back device instance.
*
* \param dev NewBus device object representing this Xen Block Back instance.
*
@@ -2823,7 +3764,6 @@ xbb_detach(device_t dev)
"xbb_shutdown", 0);
}
mtx_unlock(&xbb->lock);
- mtx_destroy(&xbb->lock);
DPRINTF("\n");
@@ -2833,8 +3773,10 @@ xbb_detach(device_t dev)
if (xbb->xbb_stats != NULL)
devstat_remove_entry(xbb->xbb_stats);
+ if (xbb->xbb_stats_in != NULL)
+ devstat_remove_entry(xbb->xbb_stats_in);
+
xbb_close_backend(xbb);
- xbb_free_communication_mem(xbb);
if (xbb->dev_mode != NULL) {
free(xbb->dev_mode, M_XENBUS);
@@ -2851,29 +3793,7 @@ xbb_detach(device_t dev)
xbb->dev_name = NULL;
}
- if (xbb->requests != NULL) {
- struct xbb_xen_req *req;
- struct xbb_xen_req *last_req;
-
- req = xbb->requests;
- last_req = &xbb->requests[xbb->max_requests - 1];
- while (req <= last_req) {
-#ifdef XBB_USE_BOUNCE_BUFFERS
- if (req->bounce != NULL) {
- free(req->bounce, M_XENBLOCKBACK);
- req->bounce = NULL;
- }
-#endif
- if (req->gnt_handles != NULL) {
- free (req->gnt_handles, M_XENBLOCKBACK);
- req->gnt_handles = NULL;
- }
- req++;
- }
- free(xbb->requests, M_XENBLOCKBACK);
- xbb->requests = NULL;
- }
-
+ mtx_destroy(&xbb->lock);
return (0);
}
@@ -2921,34 +3841,35 @@ xbb_resume(device_t dev)
*
* \return 0 for success, errno codes for failure.
*/
-static int
+static void
xbb_frontend_changed(device_t dev, XenbusState frontend_state)
{
struct xbb_softc *xbb = device_get_softc(dev);
- DPRINTF("state=%s\n", xenbus_strstate(frontend_state));
+ DPRINTF("frontend_state=%s, xbb_state=%s\n",
+ xenbus_strstate(frontend_state),
+ xenbus_strstate(xenbus_get_state(xbb->dev)));
switch (frontend_state) {
case XenbusStateInitialising:
- case XenbusStateClosing:
break;
case XenbusStateInitialised:
case XenbusStateConnected:
xbb_connect(xbb);
break;
+ case XenbusStateClosing:
case XenbusStateClosed:
- case XenbusStateInitWait:
-
mtx_lock(&xbb->lock);
xbb_shutdown(xbb);
mtx_unlock(&xbb->lock);
+ if (frontend_state == XenbusStateClosed)
+ xenbus_set_state(xbb->dev, XenbusStateClosed);
break;
default:
xenbus_dev_fatal(xbb->dev, EINVAL, "saw state %d at frontend",
frontend_state);
break;
}
- return (0);
}
/*---------------------------- NewBus Registration ---------------------------*/
diff --git a/sys/dev/xen/blkfront/blkfront.c b/sys/dev/xen/blkfront/blkfront.c
index 81c0e8b..2868313 100644
--- a/sys/dev/xen/blkfront/blkfront.c
+++ b/sys/dev/xen/blkfront/blkfront.c
@@ -739,7 +739,7 @@ setup_blkring(struct xb_softc *sc)
/**
* Callback received when the backend's state changes.
*/
-static int
+static void
blkfront_backend_changed(device_t dev, XenbusState backend_state)
{
struct xb_softc *sc = device_get_softc(dev);
@@ -772,8 +772,6 @@ blkfront_backend_changed(device_t dev, XenbusState backend_state)
blkfront_closing(dev);
break;
}
-
- return (0);
}
/*
diff --git a/sys/dev/xen/control/control.c b/sys/dev/xen/control/control.c
index c03d536..bc59fa0 100644
--- a/sys/dev/xen/control/control.c
+++ b/sys/dev/xen/control/control.c
@@ -173,8 +173,6 @@ static struct xctrl_shutdown_reason xctrl_shutdown_reasons[] = {
};
struct xctrl_softc {
-
- /** Must be first */
struct xs_watch xctrl_watch;
};
@@ -203,24 +201,29 @@ xctrl_suspend()
unsigned long max_pfn, start_info_mfn;
#ifdef SMP
- cpumask_t map;
+ struct thread *td;
+ cpuset_t map;
/*
* Bind us to CPU 0 and stop any other VCPUs.
*/
- thread_lock(curthread);
- sched_bind(curthread, 0);
- thread_unlock(curthread);
+ td = curthread;
+ thread_lock(td);
+ sched_bind(td, 0);
+ thread_unlock(td);
KASSERT(PCPU_GET(cpuid) == 0, ("xen_suspend: not running on cpu 0"));
- map = PCPU_GET(other_cpus) & ~stopped_cpus;
- if (map)
+ sched_pin();
+ map = PCPU_GET(other_cpus);
+ sched_unpin();
+ CPU_NAND(&map, &stopped_cpus);
+ if (!CPU_EMPTY(&map))
stop_cpus(map);
#endif
if (DEVICE_SUSPEND(root_bus) != 0) {
printf("xen_suspend: device_suspend failed\n");
#ifdef SMP
- if (map)
+ if (!CPU_EMPTY(&map))
restart_cpus(map);
#endif
return;
@@ -289,7 +292,7 @@ xctrl_suspend()
thread_lock(curthread);
sched_unbind(curthread);
thread_unlock(curthread);
- if (map)
+ if (!CPU_EMPTY(&map))
restart_cpus(map);
#endif
}
@@ -445,6 +448,7 @@ xctrl_attach(device_t dev)
/* Activate watch */
xctrl->xctrl_watch.node = "control/shutdown";
xctrl->xctrl_watch.callback = xctrl_on_watch_event;
+ xctrl->xctrl_watch.callback_data = (uintptr_t)xctrl;
xs_register_watch(&xctrl->xctrl_watch);
#ifndef XENHVM
diff --git a/sys/dev/xen/netfront/netfront.c b/sys/dev/xen/netfront/netfront.c
index 40ff031..c694514 100644
--- a/sys/dev/xen/netfront/netfront.c
+++ b/sys/dev/xen/netfront/netfront.c
@@ -650,7 +650,7 @@ netfront_send_fake_arp(device_t dev, struct netfront_info *info)
/**
* Callback received when the backend's state changes.
*/
-static int
+static void
netfront_backend_changed(device_t dev, XenbusState newstate)
{
struct netfront_info *sc = device_get_softc(dev);
@@ -680,7 +680,6 @@ netfront_backend_changed(device_t dev, XenbusState newstate)
xenbus_set_state(dev, XenbusStateClosed);
break;
}
- return (0);
}
static void
OpenPOWER on IntegriCloud