summaryrefslogtreecommitdiffstats
path: root/sys/dev
diff options
context:
space:
mode:
Diffstat (limited to 'sys/dev')
-rw-r--r--sys/dev/acpica/acpi.c106
-rw-r--r--sys/dev/agp/agp.c34
-rw-r--r--sys/dev/agp/agp_apple.c302
-rw-r--r--sys/dev/alc/if_alc.c2
-rw-r--r--sys/dev/bge/if_bge.c317
-rw-r--r--sys/dev/bge/if_bgereg.h42
-rw-r--r--sys/dev/e1000/e1000_82571.c7
-rw-r--r--sys/dev/e1000/if_em.c368
-rw-r--r--sys/dev/e1000/if_em.h27
-rw-r--r--sys/dev/e1000/if_lem.c228
-rw-r--r--sys/dev/e1000/if_lem.h18
-rw-r--r--sys/dev/mii/brgphy.c5
-rw-r--r--sys/dev/mii/e1000phy.c2
-rw-r--r--sys/dev/mii/mii_physubr.c85
-rw-r--r--sys/dev/mii/miidevs2
-rw-r--r--sys/dev/mii/miivar.h2
-rw-r--r--sys/dev/usb/usb_device.c50
-rw-r--r--sys/dev/usb/usb_hub.c6
-rw-r--r--sys/dev/usb/usbdi.h3
-rw-r--r--sys/dev/xen/blkback/blkback.c7
20 files changed, 1201 insertions, 412 deletions
diff --git a/sys/dev/acpica/acpi.c b/sys/dev/acpica/acpi.c
index 24982fe..c0f17f6 100644
--- a/sys/dev/acpica/acpi.c
+++ b/sys/dev/acpica/acpi.c
@@ -86,6 +86,11 @@ static struct cdevsw acpi_cdevsw = {
.d_name = "acpi",
};
+struct acpi_interface {
+ ACPI_STRING *data;
+ int num;
+};
+
/* Global mutex for locking access to the ACPI subsystem. */
struct mtx acpi_mutex;
@@ -163,6 +168,7 @@ static void acpi_enable_pcie(void);
#endif
static void acpi_hint_device_unit(device_t acdev, device_t child,
const char *name, int *unitp);
+static void acpi_reset_interfaces(device_t dev);
static device_method_t acpi_methods[] = {
/* Device interface */
@@ -232,6 +238,16 @@ SYSCTL_STRING(_debug_acpi, OID_AUTO, acpi_ca_version, CTLFLAG_RD,
acpi_ca_version, 0, "Version of Intel ACPI-CA");
/*
+ * Allow overriding _OSI methods.
+ */
+static char acpi_install_interface[256];
+TUNABLE_STR("hw.acpi.install_interface", acpi_install_interface,
+ sizeof(acpi_install_interface));
+static char acpi_remove_interface[256];
+TUNABLE_STR("hw.acpi.remove_interface", acpi_remove_interface,
+ sizeof(acpi_remove_interface));
+
+/*
* Allow override of whether methods execute in parallel or not.
* Enable this for serial behavior, which fixes "AE_ALREADY_EXISTS"
* errors for AML that really can't handle parallel method execution.
@@ -467,6 +483,9 @@ acpi_attach(device_t dev)
goto out;
}
+ /* Override OS interfaces if the user requested. */
+ acpi_reset_interfaces(dev);
+
/* Load ACPI name space. */
status = AcpiLoadTables();
if (ACPI_FAILURE(status)) {
@@ -3473,6 +3492,93 @@ acpi_debug_objects_sysctl(SYSCTL_HANDLER_ARGS)
}
static int
+acpi_parse_interfaces(char *str, struct acpi_interface *iface)
+{
+ char *p;
+ size_t len;
+ int i, j;
+
+ p = str;
+ while (isspace(*p) || *p == ',')
+ p++;
+ len = strlen(p);
+ if (len == 0)
+ return (0);
+ p = strdup(p, M_TEMP);
+ for (i = 0; i < len; i++)
+ if (p[i] == ',')
+ p[i] = '\0';
+ i = j = 0;
+ while (i < len)
+ if (isspace(p[i]) || p[i] == '\0')
+ i++;
+ else {
+ i += strlen(p + i) + 1;
+ j++;
+ }
+ if (j == 0) {
+ free(p, M_TEMP);
+ return (0);
+ }
+ iface->data = malloc(sizeof(*iface->data) * j, M_TEMP, M_WAITOK);
+ iface->num = j;
+ i = j = 0;
+ while (i < len)
+ if (isspace(p[i]) || p[i] == '\0')
+ i++;
+ else {
+ iface->data[j] = p + i;
+ i += strlen(p + i) + 1;
+ j++;
+ }
+
+ return (j);
+}
+
+static void
+acpi_free_interfaces(struct acpi_interface *iface)
+{
+
+ free(iface->data[0], M_TEMP);
+ free(iface->data, M_TEMP);
+}
+
+static void
+acpi_reset_interfaces(device_t dev)
+{
+ struct acpi_interface list;
+ ACPI_STATUS status;
+ int i;
+
+ if (acpi_parse_interfaces(acpi_install_interface, &list) > 0) {
+ for (i = 0; i < list.num; i++) {
+ status = AcpiInstallInterface(list.data[i]);
+ if (ACPI_FAILURE(status))
+ device_printf(dev,
+ "failed to install _OSI(\"%s\"): %s\n",
+ list.data[i], AcpiFormatException(status));
+ else if (bootverbose)
+ device_printf(dev, "installed _OSI(\"%s\")\n",
+ list.data[i]);
+ }
+ acpi_free_interfaces(&list);
+ }
+ if (acpi_parse_interfaces(acpi_remove_interface, &list) > 0) {
+ for (i = 0; i < list.num; i++) {
+ status = AcpiRemoveInterface(list.data[i]);
+ if (ACPI_FAILURE(status))
+ device_printf(dev,
+ "failed to remove _OSI(\"%s\"): %s\n",
+ list.data[i], AcpiFormatException(status));
+ else if (bootverbose)
+ device_printf(dev, "removed _OSI(\"%s\")\n",
+ list.data[i]);
+ }
+ acpi_free_interfaces(&list);
+ }
+}
+
+static int
acpi_pm_func(u_long cmd, void *arg, ...)
{
int state, acpi_state;
diff --git a/sys/dev/agp/agp.c b/sys/dev/agp/agp.c
index ab48085..09e2848 100644
--- a/sys/dev/agp/agp.c
+++ b/sys/dev/agp/agp.c
@@ -219,13 +219,16 @@ agp_generic_attach(device_t dev)
* Find and map the aperture, RF_SHAREABLE for DRM but not RF_ACTIVE
* because the kernel doesn't need to map it.
*/
- if (sc->as_aperture_rid == 0)
- sc->as_aperture_rid = AGP_APBASE;
- sc->as_aperture = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
- &sc->as_aperture_rid, RF_SHAREABLE);
- if (!sc->as_aperture)
- return ENOMEM;
+ if (sc->as_aperture_rid != -1) {
+ if (sc->as_aperture_rid == 0)
+ sc->as_aperture_rid = AGP_APBASE;
+
+ sc->as_aperture = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
+ &sc->as_aperture_rid, RF_SHAREABLE);
+ if (!sc->as_aperture)
+ return ENOMEM;
+ }
/*
* Work out an upper bound for agp memory allocation. This
@@ -272,8 +275,9 @@ agp_free_res(device_t dev)
{
struct agp_softc *sc = device_get_softc(dev);
- bus_release_resource(dev, SYS_RES_MEMORY, sc->as_aperture_rid,
- sc->as_aperture);
+ if (sc->as_aperture != NULL)
+ bus_release_resource(dev, SYS_RES_MEMORY, sc->as_aperture_rid,
+ sc->as_aperture);
mtx_destroy(&sc->as_lock);
agp_flush_cache();
}
@@ -729,7 +733,10 @@ agp_info_user(device_t dev, agp_info *info)
info->bridge_id = pci_get_devid(dev);
info->agp_mode =
pci_read_config(dev, agp_find_caps(dev) + AGP_STATUS, 4);
- info->aper_base = rman_get_start(sc->as_aperture);
+ if (sc->as_aperture)
+ info->aper_base = rman_get_start(sc->as_aperture);
+ else
+ info->aper_base = 0;
info->aper_size = AGP_GET_APERTURE(dev) >> 20;
info->pg_total = info->pg_system = sc->as_maxmem >> AGP_PAGE_SHIFT;
info->pg_used = sc->as_allocated >> AGP_PAGE_SHIFT;
@@ -876,6 +883,8 @@ agp_mmap(struct cdev *kdev, vm_ooffset_t offset, vm_paddr_t *paddr,
if (offset > AGP_GET_APERTURE(dev))
return -1;
+ if (sc->as_aperture == NULL)
+ return -1;
*paddr = rman_get_start(sc->as_aperture) + offset;
return 0;
}
@@ -917,8 +926,11 @@ agp_get_info(device_t dev, struct agp_info *info)
info->ai_mode =
pci_read_config(dev, agp_find_caps(dev) + AGP_STATUS, 4);
- info->ai_aperture_base = rman_get_start(sc->as_aperture);
- info->ai_aperture_size = rman_get_size(sc->as_aperture);
+ if (sc->as_aperture != NULL)
+ info->ai_aperture_base = rman_get_start(sc->as_aperture);
+ else
+ info->ai_aperture_base = 0;
+ info->ai_aperture_size = AGP_GET_APERTURE(dev);
info->ai_memory_allowed = sc->as_maxmem;
info->ai_memory_used = sc->as_allocated;
}
diff --git a/sys/dev/agp/agp_apple.c b/sys/dev/agp/agp_apple.c
new file mode 100644
index 0000000..e16e114
--- /dev/null
+++ b/sys/dev/agp/agp_apple.c
@@ -0,0 +1,302 @@
+/*-
+ * Copyright (c) 2010 Nathan Whitehorn
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include "opt_bus.h"
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/malloc.h>
+#include <sys/kernel.h>
+#include <sys/module.h>
+#include <sys/bus.h>
+#include <sys/lock.h>
+#include <sys/mutex.h>
+#include <sys/proc.h>
+
+#include <machine/resource.h>
+
+#include <dev/agp/agppriv.h>
+#include <dev/agp/agpreg.h>
+#include <dev/pci/pcivar.h>
+#include <dev/pci/pcireg.h>
+
+#include <vm/vm.h>
+#include <vm/vm_object.h>
+#include <vm/pmap.h>
+
+#define UNIN_AGP_GART_BASE 0x8c
+#define UNIN_AGP_BASE_ADDR 0x90
+#define UNIN_AGP_GART_CONTROL 0x94
+
+#define UNIN_AGP_GART_INVAL 0x00000001
+#define UNIN_AGP_GART_ENABLE 0x00000100
+#define UNIN_AGP_GART_2XRESET 0x00010000
+#define UNIN_AGP_U3_GART_PERFRD 0x00080000
+
+struct agp_apple_softc {
+ struct agp_softc agp;
+ uint32_t aperture;
+ struct agp_gatt *gatt;
+ int u3;
+ int needs_2x_reset;
+};
+
+static int
+agp_apple_probe(device_t dev)
+{
+
+ if (resource_disabled("agp", device_get_unit(dev)))
+ return (ENXIO);
+
+ if (pci_get_class(dev) != PCIC_BRIDGE
+ || pci_get_subclass(dev) != PCIS_BRIDGE_HOST)
+ return (ENXIO);
+
+ if (agp_find_caps(dev) == 0)
+ return (ENXIO);
+
+ if (pci_get_class(dev) != PCIC_BRIDGE
+ || pci_get_subclass(dev) != PCIS_BRIDGE_HOST)
+ return (ENXIO);
+
+ switch (pci_get_devid(dev)) {
+ case 0x0020106b:
+ case 0x0027106b:
+ device_set_desc(dev, "Apple UniNorth AGP Bridge");
+ return (BUS_PROBE_DEFAULT);
+ case 0x002d106b:
+ device_set_desc(dev, "Apple UniNorth 1.5 AGP Bridge");
+ return (BUS_PROBE_DEFAULT);
+ case 0x0034106b:
+ device_set_desc(dev, "Apple UniNorth 2 AGP Bridge");
+ return (BUS_PROBE_DEFAULT);
+ case 0x004b106b:
+ case 0x0058106b:
+ case 0x0059106b:
+ device_set_desc(dev, "Apple U3 AGP Bridge");
+ return (BUS_PROBE_DEFAULT);
+ case 0x0066106b:
+ device_set_desc(dev, "Apple Intrepid AGP Bridge");
+ return (BUS_PROBE_DEFAULT);
+ }
+
+ return (ENXIO);
+}
+
+static int
+agp_apple_attach(device_t dev)
+{
+ struct agp_apple_softc *sc = device_get_softc(dev);
+ int error;
+
+ /* Record quirks */
+ sc->needs_2x_reset = 0;
+ sc->u3 = 0;
+ switch (pci_get_devid(dev)) {
+ case 0x0020106b:
+ case 0x0027106b:
+ sc->needs_2x_reset = 1;
+ break;
+ case 0x004b106b:
+ case 0x0058106b:
+ case 0x0059106b:
+ sc->u3 = 1;
+ break;
+ }
+
+ /* Set the aperture bus address base (must be 0) */
+ pci_write_config(dev, UNIN_AGP_BASE_ADDR, 0, 4);
+ agp_set_aperture_resource(dev, -1);
+
+ error = agp_generic_attach(dev);
+ if (error)
+ return (error);
+
+ sc->aperture = 256*1024*1024;
+
+ for (sc->aperture = 256*1024*1024; sc->aperture >= 4*1024*1024;
+ sc->aperture /= 2) {
+ sc->gatt = agp_alloc_gatt(dev);
+ if (sc->gatt)
+ break;
+ }
+ if (sc->aperture < 4*1024*1024) {
+ agp_generic_detach(dev);
+ return ENOMEM;
+ }
+
+ /* Install the gatt. */
+ AGP_SET_APERTURE(dev, sc->aperture);
+
+ /* XXX: U3 scratch page? */
+
+ /* Enable the aperture and TLB. */
+ AGP_FLUSH_TLB(dev);
+
+ return (0);
+}
+
+static int
+agp_apple_detach(device_t dev)
+{
+ struct agp_apple_softc *sc = device_get_softc(dev);
+
+ agp_free_cdev(dev);
+
+ /* Disable the aperture and TLB */
+ pci_write_config(dev, UNIN_AGP_GART_CONTROL, UNIN_AGP_GART_INVAL, 4);
+ pci_write_config(dev, UNIN_AGP_GART_CONTROL, 0, 4);
+
+ if (sc->needs_2x_reset) {
+ pci_write_config(dev, UNIN_AGP_GART_CONTROL,
+ UNIN_AGP_GART_2XRESET, 4);
+ pci_write_config(dev, UNIN_AGP_GART_CONTROL, 0, 4);
+ }
+
+ AGP_SET_APERTURE(dev, 0);
+
+ agp_free_gatt(sc->gatt);
+ agp_free_res(dev);
+ return 0;
+}
+
+static uint32_t
+agp_apple_get_aperture(device_t dev)
+{
+ struct agp_apple_softc *sc = device_get_softc(dev);
+
+ return (sc->aperture);
+}
+
+static int
+agp_apple_set_aperture(device_t dev, uint32_t aperture)
+{
+ struct agp_apple_softc *sc = device_get_softc(dev);
+
+ /*
+ * Check for a multiple of 4 MB and make sure it is within the
+ * programmable range.
+ */
+ if (aperture % (4*1024*1024)
+ || aperture < 4*1024*1024
+ || aperture > ((sc->u3) ? 512 : 256)*1024*1024)
+ return EINVAL;
+
+ /* The aperture value is a multiple of 4 MB */
+ aperture /= (4*1024*1024);
+
+ pci_write_config(dev, UNIN_AGP_GART_BASE,
+ (sc->gatt->ag_physical & 0xfffff000) | aperture, 4);
+
+ return (0);
+}
+
+static int
+agp_apple_bind_page(device_t dev, vm_offset_t offset, vm_offset_t physical)
+{
+ struct agp_apple_softc *sc = device_get_softc(dev);
+
+ if (offset >= (sc->gatt->ag_entries << AGP_PAGE_SHIFT))
+ return EINVAL;
+
+ sc->gatt->ag_virtual[offset >> AGP_PAGE_SHIFT] = physical;
+ __asm __volatile("dcbst 0,%0; sync" ::
+ "r"(&sc->gatt->ag_virtual[offset >> AGP_PAGE_SHIFT]) : "memory");
+ return (0);
+}
+
+static int
+agp_apple_unbind_page(device_t dev, vm_offset_t offset)
+{
+ struct agp_apple_softc *sc = device_get_softc(dev);
+
+ if (offset >= (sc->gatt->ag_entries << AGP_PAGE_SHIFT))
+ return EINVAL;
+
+ sc->gatt->ag_virtual[offset >> AGP_PAGE_SHIFT] = 0;
+ __asm __volatile("dcbst 0,%0; sync" ::
+ "r"(&sc->gatt->ag_virtual[offset >> AGP_PAGE_SHIFT]) : "memory");
+ return (0);
+}
+
+static void
+agp_apple_flush_tlb(device_t dev)
+{
+ struct agp_apple_softc *sc = device_get_softc(dev);
+ uint32_t cntrl = UNIN_AGP_GART_ENABLE;
+
+ if (sc->u3)
+ cntrl |= UNIN_AGP_U3_GART_PERFRD;
+
+ pci_write_config(dev, UNIN_AGP_GART_CONTROL,
+ cntrl | UNIN_AGP_GART_INVAL, 4);
+ pci_write_config(dev, UNIN_AGP_GART_CONTROL, cntrl, 4);
+
+ if (sc->needs_2x_reset) {
+ pci_write_config(dev, UNIN_AGP_GART_CONTROL,
+ cntrl | UNIN_AGP_GART_2XRESET, 4);
+ pci_write_config(dev, UNIN_AGP_GART_CONTROL, cntrl, 4);
+ }
+}
+
+static device_method_t agp_apple_methods[] = {
+ /* Device interface */
+ DEVMETHOD(device_probe, agp_apple_probe),
+ DEVMETHOD(device_attach, agp_apple_attach),
+ DEVMETHOD(device_detach, agp_apple_detach),
+ DEVMETHOD(device_shutdown, bus_generic_shutdown),
+ DEVMETHOD(device_suspend, bus_generic_suspend),
+ DEVMETHOD(device_resume, bus_generic_resume),
+
+ /* AGP interface */
+ DEVMETHOD(agp_get_aperture, agp_apple_get_aperture),
+ DEVMETHOD(agp_set_aperture, agp_apple_set_aperture),
+ DEVMETHOD(agp_bind_page, agp_apple_bind_page),
+ DEVMETHOD(agp_unbind_page, agp_apple_unbind_page),
+ DEVMETHOD(agp_flush_tlb, agp_apple_flush_tlb),
+ DEVMETHOD(agp_enable, agp_generic_enable),
+ DEVMETHOD(agp_alloc_memory, agp_generic_alloc_memory),
+ DEVMETHOD(agp_free_memory, agp_generic_free_memory),
+ DEVMETHOD(agp_bind_memory, agp_generic_bind_memory),
+ DEVMETHOD(agp_unbind_memory, agp_generic_unbind_memory),
+
+ { 0, 0 }
+};
+
+static driver_t agp_apple_driver = {
+ "agp",
+ agp_apple_methods,
+ sizeof(struct agp_apple_softc),
+};
+
+static devclass_t agp_devclass;
+
+DRIVER_MODULE(agp_apple, hostb, agp_apple_driver, agp_devclass, 0, 0);
+MODULE_DEPEND(agp_apple, agp, 1, 1, 1);
+MODULE_DEPEND(agp_apple, pci, 1, 1, 1);
diff --git a/sys/dev/alc/if_alc.c b/sys/dev/alc/if_alc.c
index 5f0f7ec..aee0e10 100644
--- a/sys/dev/alc/if_alc.c
+++ b/sys/dev/alc/if_alc.c
@@ -331,8 +331,8 @@ alc_miibus_statchg(device_t dev)
reg = CSR_READ_4(sc, ALC_MAC_CFG);
reg |= MAC_CFG_TX_ENB | MAC_CFG_RX_ENB;
CSR_WRITE_4(sc, ALC_MAC_CFG, reg);
+ alc_aspm(sc, IFM_SUBTYPE(mii->mii_media_active));
}
- alc_aspm(sc, IFM_SUBTYPE(mii->mii_media_active));
}
static void
diff --git a/sys/dev/bge/if_bge.c b/sys/dev/bge/if_bge.c
index 63c4c5d..59ce673 100644
--- a/sys/dev/bge/if_bge.c
+++ b/sys/dev/bge/if_bge.c
@@ -169,6 +169,8 @@ static const struct bge_type {
{ BCOM_VENDORID, BCOM_DEVICEID_BCM5714S },
{ BCOM_VENDORID, BCOM_DEVICEID_BCM5715 },
{ BCOM_VENDORID, BCOM_DEVICEID_BCM5715S },
+ { BCOM_VENDORID, BCOM_DEVICEID_BCM5717 },
+ { BCOM_VENDORID, BCOM_DEVICEID_BCM5718 },
{ BCOM_VENDORID, BCOM_DEVICEID_BCM5720 },
{ BCOM_VENDORID, BCOM_DEVICEID_BCM5721 },
{ BCOM_VENDORID, BCOM_DEVICEID_BCM5722 },
@@ -289,6 +291,8 @@ static const struct bge_revision {
{ BGE_CHIPID_BCM5715_A0, "BCM5715 A0" },
{ BGE_CHIPID_BCM5715_A1, "BCM5715 A1" },
{ BGE_CHIPID_BCM5715_A3, "BCM5715 A3" },
+ { BGE_CHIPID_BCM5717_A0, "BCM5717 A0" },
+ { BGE_CHIPID_BCM5717_B0, "BCM5717 B0" },
{ BGE_CHIPID_BCM5755_A0, "BCM5755 A0" },
{ BGE_CHIPID_BCM5755_A1, "BCM5755 A1" },
{ BGE_CHIPID_BCM5755_A2, "BCM5755 A2" },
@@ -332,6 +336,7 @@ static const struct bge_revision bge_majorrevs[] = {
{ BGE_ASICREV_BCM5787, "unknown BCM5754/5787" },
{ BGE_ASICREV_BCM5906, "unknown BCM5906" },
{ BGE_ASICREV_BCM57780, "unknown BCM57780" },
+ { BGE_ASICREV_BCM5717, "unknown BCM5717" },
{ 0, NULL }
};
@@ -342,6 +347,7 @@ static const struct bge_revision bge_majorrevs[] = {
#define BGE_IS_5714_FAMILY(sc) ((sc)->bge_flags & BGE_FLAG_5714_FAMILY)
#define BGE_IS_575X_PLUS(sc) ((sc)->bge_flags & BGE_FLAG_575X_PLUS)
#define BGE_IS_5755_PLUS(sc) ((sc)->bge_flags & BGE_FLAG_5755_PLUS)
+#define BGE_IS_5717_PLUS(sc) ((sc)->bge_flags & BGE_FLAG_5717_PLUS)
const struct bge_revision * bge_lookup_rev(uint32_t);
const struct bge_vendor * bge_lookup_vendor(uint16_t);
@@ -367,6 +373,7 @@ static int bge_get_eaddr_eeprom(struct bge_softc *, uint8_t[]);
static int bge_get_eaddr(struct bge_softc *, uint8_t[]);
static void bge_txeof(struct bge_softc *, uint16_t);
+static void bge_rxcsum(struct bge_softc *, struct bge_rx_bd *, struct mbuf *);
static int bge_rxeof(struct bge_softc *, uint16_t, int);
static void bge_asf_driver_up (struct bge_softc *);
@@ -376,7 +383,7 @@ static void bge_stats_update(struct bge_softc *);
static void bge_stats_update_regs(struct bge_softc *);
static struct mbuf *bge_check_short_dma(struct mbuf *);
static struct mbuf *bge_setup_tso(struct bge_softc *, struct mbuf *,
- uint16_t *);
+ uint16_t *, uint16_t *);
static int bge_encap(struct bge_softc *, struct mbuf **, uint32_t *);
static void bge_intr(void *);
@@ -1349,12 +1356,15 @@ bge_stop_fw(struct bge_softc *sc)
static int
bge_chipinit(struct bge_softc *sc)
{
- uint32_t dma_rw_ctl;
+ uint32_t dma_rw_ctl, misc_ctl;
uint16_t val;
int i;
/* Set endianness before we access any non-PCI registers. */
- pci_write_config(sc->bge_dev, BGE_PCI_MISC_CTL, BGE_INIT, 4);
+ misc_ctl = BGE_INIT;
+ if (sc->bge_flags & BGE_FLAG_TAGGED_STATUS)
+ misc_ctl |= BGE_PCIMISCCTL_TAGGED_STATUS;
+ pci_write_config(sc->bge_dev, BGE_PCI_MISC_CTL, misc_ctl, 4);
/* Clear the MAC control register */
CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
@@ -1446,6 +1456,8 @@ bge_chipinit(struct bge_softc *sc)
if (sc->bge_asicrev == BGE_ASICREV_BCM5703 ||
sc->bge_asicrev == BGE_ASICREV_BCM5704)
dma_rw_ctl &= ~BGE_PCIDMARWCTL_MINDMA;
+ if (BGE_IS_5717_PLUS(sc))
+ dma_rw_ctl &= ~BGE_PCIDMARWCTL_DIS_CACHE_ALIGNMENT;
pci_write_config(sc->bge_dev, BGE_PCI_DMA_RW_CTL, dma_rw_ctl, 4);
/*
@@ -1529,7 +1541,16 @@ bge_blockinit(struct bge_softc *sc)
}
/* Configure mbuf pool watermarks */
- if (!BGE_IS_5705_PLUS(sc)) {
+ if (sc->bge_asicrev == BGE_ASICREV_BCM5717) {
+ CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
+ if (sc->bge_ifp->if_mtu > ETHERMTU) {
+ CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x7e);
+ CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0xea);
+ } else {
+ CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x2a);
+ CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0xa0);
+ }
+ } else if (!BGE_IS_5705_PLUS(sc)) {
CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x50);
CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x20);
CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60);
@@ -1621,7 +1642,16 @@ bge_blockinit(struct bge_softc *sc)
BGE_ADDR_HI(sc->bge_ldata.bge_rx_std_ring_paddr);
bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_PREREAD);
- if (BGE_IS_5705_PLUS(sc)) {
+ if (BGE_IS_5717_PLUS(sc)) {
+ /*
+ * Bits 31-16: Programmable ring size (2048, 1024, 512, .., 32)
+ * Bits 15-2 : Maximum RX frame size
+ * Bit 1 : 1 = Ring Disabled, 0 = Ring ENabled
+ * Bit 0 : Reserved
+ */
+ rcb->bge_maxlen_flags =
+ BGE_RCB_MAXLEN_FLAGS(512, BGE_MAX_FRAMELEN << 2);
+ } else if (BGE_IS_5705_PLUS(sc)) {
/*
* Bits 31-16: Programmable ring size (512, 256, 128, 64, 32)
* Bits 15-2 : Reserved (should be 0)
@@ -1640,7 +1670,10 @@ bge_blockinit(struct bge_softc *sc)
rcb->bge_maxlen_flags =
BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN, 0);
}
- rcb->bge_nicaddr = BGE_STD_RX_RINGS;
+ if (sc->bge_asicrev == BGE_ASICREV_BCM5717)
+ rcb->bge_nicaddr = BGE_STD_RX_RINGS_5717;
+ else
+ rcb->bge_nicaddr = BGE_STD_RX_RINGS;
/* Write the standard receive producer ring control block. */
CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_HI, rcb->bge_hostaddr.bge_addr_hi);
CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_LO, rcb->bge_hostaddr.bge_addr_lo);
@@ -1669,7 +1702,10 @@ bge_blockinit(struct bge_softc *sc)
BUS_DMASYNC_PREREAD);
rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0,
BGE_RCB_FLAG_USE_EXT_RX_BD | BGE_RCB_FLAG_RING_DISABLED);
- rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS;
+ if (sc->bge_asicrev == BGE_ASICREV_BCM5717)
+ rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS_5717;
+ else
+ rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS;
CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_HI,
rcb->bge_hostaddr.bge_addr_hi);
CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_LO,
@@ -1726,6 +1762,10 @@ bge_blockinit(struct bge_softc *sc)
if (BGE_IS_JUMBO_CAPABLE(sc))
CSR_WRITE_4(sc, BGE_RBDI_JUMBO_REPL_THRESH,
BGE_JUMBO_RX_RING_CNT/8);
+ if (BGE_IS_5717_PLUS(sc)) {
+ CSR_WRITE_4(sc, BGE_STD_REPLENISH_LWM, 32);
+ CSR_WRITE_4(sc, BGE_JMB_REPLENISH_LWM, 16);
+ }
/*
* Disable all send rings by setting the 'ring disabled' bit
@@ -1750,8 +1790,11 @@ bge_blockinit(struct bge_softc *sc)
BGE_HOSTADDR(taddr, sc->bge_ldata.bge_tx_ring_paddr);
RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi);
RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo);
- RCB_WRITE_4(sc, vrcb, bge_nicaddr,
- BGE_NIC_TXRING_ADDR(0, BGE_TX_RING_CNT));
+ if (sc->bge_asicrev == BGE_ASICREV_BCM5717)
+ RCB_WRITE_4(sc, vrcb, bge_nicaddr, BGE_SEND_RING_5717);
+ else
+ RCB_WRITE_4(sc, vrcb, bge_nicaddr,
+ BGE_NIC_TXRING_ADDR(0, BGE_TX_RING_CNT));
RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
BGE_RCB_MAXLEN_FLAGS(BGE_TX_RING_CNT, 0));
@@ -1760,7 +1803,10 @@ bge_blockinit(struct bge_softc *sc)
* 'ring diabled' bit in the flags field of all the receive
* return ring control blocks, located in NIC memory.
*/
- if (!BGE_IS_5705_PLUS(sc))
+ if (sc->bge_asicrev == BGE_ASICREV_BCM5717) {
+ /* Should be 17, use 16 until we get an SRAM map. */
+ limit = 16;
+ } else if (!BGE_IS_5705_PLUS(sc))
limit = BGE_RX_RINGS_MAX;
else if (sc->bge_asicrev == BGE_ASICREV_BCM5755)
limit = 4;
@@ -1936,6 +1982,10 @@ bge_blockinit(struct bge_softc *sc)
/* Turn on read DMA state machine */
val = BGE_RDMAMODE_ENABLE | BGE_RDMAMODE_ALL_ATTNS;
+
+ if (sc->bge_asicrev == BGE_ASICREV_BCM5717)
+ val |= BGE_RDMAMODE_MULT_DMA_RD_DIS;
+
if (sc->bge_asicrev == BGE_ASICREV_BCM5784 ||
sc->bge_asicrev == BGE_ASICREV_BCM5785 ||
sc->bge_asicrev == BGE_ASICREV_BCM57780)
@@ -1944,16 +1994,18 @@ bge_blockinit(struct bge_softc *sc)
BGE_RDMAMODE_MBUF_SBD_CRPT_ATTN;
if (sc->bge_flags & BGE_FLAG_PCIE)
val |= BGE_RDMAMODE_FIFO_LONG_BURST;
- if (sc->bge_flags & BGE_FLAG_TSO) {
+ if (sc->bge_flags & (BGE_FLAG_TSO | BGE_FLAG_TSO3)) {
val |= BGE_RDMAMODE_TSO4_ENABLE;
- if (sc->bge_asicrev == BGE_ASICREV_BCM5785 ||
+ if (sc->bge_flags & BGE_FLAG_TSO3 ||
+ sc->bge_asicrev == BGE_ASICREV_BCM5785 ||
sc->bge_asicrev == BGE_ASICREV_BCM57780)
val |= BGE_RDMAMODE_TSO6_ENABLE;
}
if (sc->bge_asicrev == BGE_ASICREV_BCM5761 ||
sc->bge_asicrev == BGE_ASICREV_BCM5784 ||
sc->bge_asicrev == BGE_ASICREV_BCM5785 ||
- sc->bge_asicrev == BGE_ASICREV_BCM57780) {
+ sc->bge_asicrev == BGE_ASICREV_BCM57780 ||
+ BGE_IS_5717_PLUS(sc)) {
/*
* Enable fix for read DMA FIFO overruns.
* The fix is to limit the number of RX BDs
@@ -1989,8 +2041,9 @@ bge_blockinit(struct bge_softc *sc)
CSR_WRITE_4(sc, BGE_SDC_MODE, val);
/* Turn on send data initiator state machine */
- if (sc->bge_flags & BGE_FLAG_TSO)
- CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE | 0x08);
+ if (sc->bge_flags & (BGE_FLAG_TSO | BGE_FLAG_TSO3))
+ CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE |
+ BGE_SDIMODE_HW_LSO_PRE_DMA);
else
CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
@@ -2104,9 +2157,22 @@ bge_probe(device_t dev)
id = pci_read_config(dev, BGE_PCI_MISC_CTL, 4) >>
BGE_PCIMISCCTL_ASICREV_SHIFT;
- if (BGE_ASICREV(id) == BGE_ASICREV_USE_PRODID_REG)
- id = pci_read_config(dev,
- BGE_PCI_PRODID_ASICREV, 4);
+ if (BGE_ASICREV(id) == BGE_ASICREV_USE_PRODID_REG) {
+ /*
+ * Find the ASCI revision. Different chips
+ * use different registers.
+ */
+ switch (pci_get_device(dev)) {
+ case BCOM_DEVICEID_BCM5717:
+ case BCOM_DEVICEID_BCM5718:
+ id = pci_read_config(dev,
+ BGE_PCI_GEN2_PRODID_ASICREV, 4);
+ break;
+ default:
+ id = pci_read_config(dev,
+ BGE_PCI_PRODID_ASICREV, 4);
+ }
+ }
br = bge_lookup_rev(id);
v = bge_lookup_vendor(vid);
{
@@ -2423,7 +2489,7 @@ bge_dma_alloc(struct bge_softc *sc)
return (ENOMEM);
}
/* Create tag for Tx mbufs. */
- if (sc->bge_flags & BGE_FLAG_TSO) {
+ if (sc->bge_flags & (BGE_FLAG_TSO | BGE_FLAG_TSO3)) {
txsegsz = BGE_TSOSEG_SZ;
txmaxsegsz = 65535 + sizeof(struct ether_vlan_header);
} else {
@@ -2539,6 +2605,10 @@ bge_can_use_msi(struct bge_softc *sc)
{
int can_use_msi = 0;
+ /* Disable MSI for polling(4). */
+#ifdef DEVICE_POLLING
+ return (0);
+#endif
switch (sc->bge_asicrev) {
case BGE_ASICREV_BCM5714_A0:
case BGE_ASICREV_BCM5714:
@@ -2568,7 +2638,7 @@ bge_attach(device_t dev)
struct bge_softc *sc;
uint32_t hwcfg = 0, misccfg;
u_char eaddr[ETHER_ADDR_LEN];
- int error, msicount, phy_addr, reg, rid, trys;
+ int error, f, msicount, phy_addr, reg, rid, trys;
sc = device_get_softc(dev);
sc->bge_dev = dev;
@@ -2594,14 +2664,55 @@ bge_attach(device_t dev)
sc->bge_chipid =
pci_read_config(dev, BGE_PCI_MISC_CTL, 4) >>
BGE_PCIMISCCTL_ASICREV_SHIFT;
- if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_USE_PRODID_REG)
- sc->bge_chipid = pci_read_config(dev, BGE_PCI_PRODID_ASICREV,
- 4);
+ if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_USE_PRODID_REG) {
+ /*
+ * Find the ASCI revision. Different chips use different
+ * registers.
+ */
+ switch (pci_get_device(dev)) {
+ case BCOM_DEVICEID_BCM5717:
+ case BCOM_DEVICEID_BCM5718:
+ sc->bge_chipid = pci_read_config(dev,
+ BGE_PCI_GEN2_PRODID_ASICREV, 4);
+ break;
+ default:
+ sc->bge_chipid = pci_read_config(dev,
+ BGE_PCI_PRODID_ASICREV, 4);
+ }
+ }
sc->bge_asicrev = BGE_ASICREV(sc->bge_chipid);
sc->bge_chiprev = BGE_CHIPREV(sc->bge_chipid);
/* Set default PHY address. */
phy_addr = 1;
+ /*
+ * PHY address mapping for various devices.
+ *
+ * | F0 Cu | F0 Sr | F1 Cu | F1 Sr |
+ * ---------+-------+-------+-------+-------+
+ * BCM57XX | 1 | X | X | X |
+ * BCM5704 | 1 | X | 1 | X |
+ * BCM5717 | 1 | 8 | 2 | 9 |
+ *
+ * Other addresses may respond but they are not
+ * IEEE compliant PHYs and should be ignored.
+ */
+ if (sc->bge_asicrev == BGE_ASICREV_BCM5717) {
+ f = pci_get_function(dev);
+ if (sc->bge_chipid == BGE_CHIPID_BCM5717_A0) {
+ if (CSR_READ_4(sc, BGE_SGDIG_STS) &
+ BGE_SGDIGSTS_IS_SERDES)
+ phy_addr = f + 8;
+ else
+ phy_addr = f + 1;
+ } else if (sc->bge_chipid == BGE_CHIPID_BCM5717_B0) {
+ if (CSR_READ_4(sc, BGE_CPMU_PHY_STRAP) &
+ BGE_CPMU_PHY_STRAP_IS_SERDES)
+ phy_addr = f + 8;
+ else
+ phy_addr = f + 1;
+ }
+ }
/*
* Don't enable Ethernet@WireSpeed for the 5700, 5906, or the
@@ -2610,7 +2721,8 @@ bge_attach(device_t dev)
if (sc->bge_asicrev != BGE_ASICREV_BCM5700 &&
sc->bge_asicrev != BGE_ASICREV_BCM5906 &&
sc->bge_chipid != BGE_CHIPID_BCM5705_A0 &&
- sc->bge_chipid != BGE_CHIPID_BCM5705_A1)
+ sc->bge_chipid != BGE_CHIPID_BCM5705_A1 &&
+ !BGE_IS_5717_PLUS(sc))
sc->bge_phy_flags |= BGE_PHY_WIRESPEED;
if (bge_has_eaddr(sc))
@@ -2618,6 +2730,11 @@ bge_attach(device_t dev)
/* Save chipset family. */
switch (sc->bge_asicrev) {
+ case BGE_ASICREV_BCM5717:
+ sc->bge_flags |= BGE_FLAG_5717_PLUS | BGE_FLAG_5755_PLUS |
+ BGE_FLAG_575X_PLUS | BGE_FLAG_5705_PLUS | BGE_FLAG_JUMBO |
+ BGE_FLAG_SHORT_DMA_BUG | BGE_FLAG_JUMBO_FRAME;
+ break;
case BGE_ASICREV_BCM5755:
case BGE_ASICREV_BCM5761:
case BGE_ASICREV_BCM5784:
@@ -2663,6 +2780,7 @@ bge_attach(device_t dev)
sc->bge_phy_flags |= BGE_PHY_NO_3LED;
if ((BGE_IS_5705_PLUS(sc)) &&
sc->bge_asicrev != BGE_ASICREV_BCM5906 &&
+ sc->bge_asicrev != BGE_ASICREV_BCM5717 &&
sc->bge_asicrev != BGE_ASICREV_BCM5785 &&
sc->bge_asicrev != BGE_ASICREV_BCM57780) {
if (sc->bge_asicrev == BGE_ASICREV_BCM5755 ||
@@ -2679,7 +2797,8 @@ bge_attach(device_t dev)
}
/* Identify the chips that use an CPMU. */
- if (sc->bge_asicrev == BGE_ASICREV_BCM5784 ||
+ if (BGE_IS_5717_PLUS(sc) ||
+ sc->bge_asicrev == BGE_ASICREV_BCM5784 ||
sc->bge_asicrev == BGE_ASICREV_BCM5761 ||
sc->bge_asicrev == BGE_ASICREV_BCM5785 ||
sc->bge_asicrev == BGE_ASICREV_BCM57780)
@@ -2722,7 +2841,10 @@ bge_attach(device_t dev)
* the TSO to the controllers that are not affected TSO issues
* (e.g. 5755 or higher).
*/
- if (BGE_IS_5755_PLUS(sc)) {
+ if (BGE_IS_5717_PLUS(sc)) {
+ /* BCM5717 requires different TSO configuration. */
+ sc->bge_flags |= BGE_FLAG_TSO3;
+ } else if (BGE_IS_5755_PLUS(sc)) {
/*
* BCM5754 and BCM5787 shares the same ASIC id so
* explicit device id check is required.
@@ -2785,6 +2907,16 @@ bge_attach(device_t dev)
}
}
+ /*
+ * All controllers except BCM5700 supports tagged status but
+ * we use tagged status only for MSI case on BCM5717. Otherwise
+ * MSI on BCM5717 does not work.
+ */
+#ifndef DEVICE_POLLING
+ if (sc->bge_flags & BGE_FLAG_MSI && BGE_IS_5717_PLUS(sc))
+ sc->bge_flags |= BGE_FLAG_TAGGED_STATUS;
+#endif
+
sc->bge_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
RF_SHAREABLE | RF_ACTIVE);
@@ -2848,7 +2980,9 @@ bge_attach(device_t dev)
}
/* 5705 limits RX return ring to 512 entries. */
- if (BGE_IS_5705_PLUS(sc))
+ if (BGE_IS_5717_PLUS(sc))
+ sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT;
+ else if (BGE_IS_5705_PLUS(sc))
sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT_5705;
else
sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT;
@@ -2893,7 +3027,7 @@ bge_attach(device_t dev)
ifp->if_hwassist = sc->bge_csum_features;
ifp->if_capabilities = IFCAP_HWCSUM | IFCAP_VLAN_HWTAGGING |
IFCAP_VLAN_MTU;
- if ((sc->bge_flags & BGE_FLAG_TSO) != 0) {
+ if ((sc->bge_flags & (BGE_FLAG_TSO | BGE_FLAG_TSO3)) != 0) {
ifp->if_hwassist |= CSUM_TSO;
ifp->if_capabilities |= IFCAP_TSO4 | IFCAP_VLAN_HWTSO;
}
@@ -3340,6 +3474,7 @@ bge_reset(struct bge_softc *sc)
/* XXX: Broadcom Linux driver. */
if (sc->bge_flags & BGE_FLAG_PCIE &&
+ sc->bge_asicrev != BGE_ASICREV_BCM5717 &&
sc->bge_chipid != BGE_CHIPID_BCM5750_A0 &&
sc->bge_asicrev != BGE_ASICREV_BCM5785) {
/* Enable Data FIFO protection. */
@@ -3480,20 +3615,8 @@ bge_rxeof(struct bge_softc *sc, uint16_t rx_prod, int holdlck)
m->m_pkthdr.len = m->m_len = cur_rx->bge_len - ETHER_CRC_LEN;
m->m_pkthdr.rcvif = ifp;
- if (ifp->if_capenable & IFCAP_RXCSUM) {
- if (cur_rx->bge_flags & BGE_RXBDFLAG_IP_CSUM) {
- m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
- if ((cur_rx->bge_ip_csum ^ 0xFFFF) == 0)
- m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
- }
- if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM &&
- m->m_pkthdr.len >= ETHER_MIN_NOPAD) {
- m->m_pkthdr.csum_data =
- cur_rx->bge_tcp_udp_csum;
- m->m_pkthdr.csum_flags |=
- CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
- }
- }
+ if (ifp->if_capenable & IFCAP_RXCSUM)
+ bge_rxcsum(sc, cur_rx, m);
/*
* If we received a packet with a vlan tag,
@@ -3552,6 +3675,41 @@ bge_rxeof(struct bge_softc *sc, uint16_t rx_prod, int holdlck)
}
static void
+bge_rxcsum(struct bge_softc *sc, struct bge_rx_bd *cur_rx, struct mbuf *m)
+{
+
+ if (BGE_IS_5717_PLUS(sc)) {
+ if ((cur_rx->bge_flags & BGE_RXBDFLAG_IPV6) == 0) {
+ if (cur_rx->bge_flags & BGE_RXBDFLAG_IP_CSUM) {
+ m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
+ if ((cur_rx->bge_error_flag &
+ BGE_RXERRFLAG_IP_CSUM_NOK) == 0)
+ m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
+ }
+ if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM) {
+ m->m_pkthdr.csum_data =
+ cur_rx->bge_tcp_udp_csum;
+ m->m_pkthdr.csum_flags |= CSUM_DATA_VALID |
+ CSUM_PSEUDO_HDR;
+ }
+ }
+ } else {
+ if (cur_rx->bge_flags & BGE_RXBDFLAG_IP_CSUM) {
+ m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
+ if ((cur_rx->bge_ip_csum ^ 0xFFFF) == 0)
+ m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
+ }
+ if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM &&
+ m->m_pkthdr.len >= ETHER_MIN_NOPAD) {
+ m->m_pkthdr.csum_data =
+ cur_rx->bge_tcp_udp_csum;
+ m->m_pkthdr.csum_flags |= CSUM_DATA_VALID |
+ CSUM_PSEUDO_HDR;
+ }
+ }
+}
+
+static void
bge_txeof(struct bge_softc *sc, uint16_t tx_cons)
{
struct bge_tx_bd *cur_tx;
@@ -3668,7 +3826,7 @@ bge_intr_task(void *arg, int pending)
{
struct bge_softc *sc;
struct ifnet *ifp;
- uint32_t status;
+ uint32_t status, status_tag;
uint16_t rx_prod, tx_cons;
sc = (struct bge_softc *)arg;
@@ -3689,16 +3847,19 @@ bge_intr_task(void *arg, int pending)
rx_prod = sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx;
tx_cons = sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx;
status = sc->bge_ldata.bge_status_block->bge_status;
+ status_tag = sc->bge_ldata.bge_status_block->bge_status_tag << 24;
sc->bge_ldata.bge_status_block->bge_status = 0;
bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
sc->bge_cdata.bge_status_map,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
+ if ((sc->bge_flags & BGE_FLAG_TAGGED_STATUS) == 0)
+ status_tag = 0;
if ((status & BGE_STATFLAG_LINKSTATE_CHANGED) != 0)
bge_link_upd(sc);
/* Let controller work. */
- bge_writembx(sc, BGE_MBX_IRQ0_LO, 0);
+ bge_writembx(sc, BGE_MBX_IRQ0_LO, status_tag);
if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
sc->bge_rx_saved_considx != rx_prod) {
@@ -4104,7 +4265,8 @@ bge_check_short_dma(struct mbuf *m)
}
static struct mbuf *
-bge_setup_tso(struct bge_softc *sc, struct mbuf *m, uint16_t *mss)
+bge_setup_tso(struct bge_softc *sc, struct mbuf *m, uint16_t *mss,
+ uint16_t *flags)
{
struct ip *ip;
struct tcphdr *tcp;
@@ -4147,14 +4309,30 @@ bge_setup_tso(struct bge_softc *sc, struct mbuf *m, uint16_t *mss)
* Broadcom controllers uses different descriptor format for
* TSO depending on ASIC revision. Due to TSO-capable firmware
* license issue and lower performance of firmware based TSO
- * we only support hardware based TSO which is applicable for
- * BCM5755 or newer controllers. Hardware based TSO uses 11
- * bits to store MSS and upper 5 bits are used to store IP/TCP
- * header length(including IP/TCP options). The header length
- * is expressed as 32 bits unit.
+ * we only support hardware based TSO.
*/
+ /* Calculate header length, incl. TCP/IP options, in 32 bit units. */
hlen = ((ip->ip_hl << 2) + (tcp->th_off << 2)) >> 2;
- *mss |= (hlen << 11);
+ if (sc->bge_flags & BGE_FLAG_TSO3) {
+ /*
+ * For BCM5717 and newer controllers, hardware based TSO
+ * uses the 14 lower bits of the bge_mss field to store the
+ * MSS and the upper 2 bits to store the lowest 2 bits of
+ * the IP/TCP header length. The upper 6 bits of the header
+ * length are stored in the bge_flags[14:10,4] field. Jumbo
+ * frames are supported.
+ */
+ *mss |= ((hlen & 0x3) << 14);
+ *flags |= ((hlen & 0xF8) << 7) | ((hlen & 0x4) << 2);
+ } else {
+ /*
+ * For BCM5755 and newer controllers, hardware based TSO uses
+ * the lower 11 bits to store the MSS and the upper 5 bits to
+ * store the IP/TCP header length. Jumbo frames are not
+ * supported.
+ */
+ *mss |= (hlen << 11);
+ }
return (m);
}
@@ -4184,7 +4362,7 @@ bge_encap(struct bge_softc *sc, struct mbuf **m_head, uint32_t *txidx)
m = *m_head;
}
if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) {
- *m_head = m = bge_setup_tso(sc, m, &mss);
+ *m_head = m = bge_setup_tso(sc, m, &mss, &csum_flags);
if (*m_head == NULL)
return (ENOBUFS);
csum_flags |= BGE_TXBDFLAG_CPU_PRE_DMA |
@@ -4207,21 +4385,26 @@ bge_encap(struct bge_softc *sc, struct mbuf **m_head, uint32_t *txidx)
csum_flags |= BGE_TXBDFLAG_IP_FRAG;
}
- if ((m->m_pkthdr.csum_flags & CSUM_TSO) == 0 &&
- sc->bge_forced_collapse > 0 &&
- (sc->bge_flags & BGE_FLAG_PCIE) != 0 && m->m_next != NULL) {
- /*
- * Forcedly collapse mbuf chains to overcome hardware
- * limitation which only support a single outstanding
- * DMA read operation.
- */
- if (sc->bge_forced_collapse == 1)
- m = m_defrag(m, M_DONTWAIT);
- else
- m = m_collapse(m, M_DONTWAIT, sc->bge_forced_collapse);
- if (m == NULL)
- m = *m_head;
- *m_head = m;
+ if ((m->m_pkthdr.csum_flags & CSUM_TSO) == 0) {
+ if (sc->bge_flags & BGE_FLAG_JUMBO_FRAME &&
+ m->m_pkthdr.len > ETHER_MAX_LEN)
+ csum_flags |= BGE_TXBDFLAG_JUMBO_FRAME;
+ if (sc->bge_forced_collapse > 0 &&
+ (sc->bge_flags & BGE_FLAG_PCIE) != 0 && m->m_next != NULL) {
+ /*
+ * Forcedly collapse mbuf chains to overcome hardware
+ * limitation which only support a single outstanding
+ * DMA read operation.
+ */
+ if (sc->bge_forced_collapse == 1)
+ m = m_defrag(m, M_DONTWAIT);
+ else
+ m = m_collapse(m, M_DONTWAIT,
+ sc->bge_forced_collapse);
+ if (m == NULL)
+ m = *m_head;
+ *m_head = m;
+ }
}
map = sc->bge_cdata.bge_tx_dmamap[idx];
diff --git a/sys/dev/bge/if_bgereg.h b/sys/dev/bge/if_bgereg.h
index a50bf59..6a9c2e8 100644
--- a/sys/dev/bge/if_bgereg.h
+++ b/sys/dev/bge/if_bgereg.h
@@ -82,6 +82,7 @@
#define BGE_UNMAPPED_END 0x00001FFF
#define BGE_DMA_DESCRIPTORS 0x00002000
#define BGE_DMA_DESCRIPTORS_END 0x00003FFF
+#define BGE_SEND_RING_5717 0x00004000
#define BGE_SEND_RING_1_TO_4 0x00004000
#define BGE_SEND_RING_1_TO_4_END 0x00005FFF
@@ -100,6 +101,8 @@
#define BGE_BUFFPOOL_2_END 0x00017FFF
#define BGE_BUFFPOOL_3 0x00018000 /* or expansion ROM */
#define BGE_BUFFPOOL_3_END 0x0001FFFF
+#define BGE_STD_RX_RINGS_5717 0x00040000
+#define BGE_JUMBO_RX_RINGS_5717 0x00044400
/* Mappings for external SSRAM configurations */
#define BGE_SEND_RING_5_TO_6 0x00006000
@@ -219,6 +222,7 @@
#define BGE_PCI_ISR_MBX_HI 0xB0
#define BGE_PCI_ISR_MBX_LO 0xB4
#define BGE_PCI_PRODID_ASICREV 0xBC
+#define BGE_PCI_GEN2_PRODID_ASICREV 0xF4
/* PCI Misc. Host control register */
#define BGE_PCIMISCCTL_CLEAR_INTA 0x00000001
@@ -229,6 +233,7 @@
#define BGE_PCIMISCCTL_CLOCKCTL_RW 0x00000020
#define BGE_PCIMISCCTL_REG_WORDSWAP 0x00000040
#define BGE_PCIMISCCTL_INDIRECT_ACCESS 0x00000080
+#define BGE_PCIMISCCTL_TAGGED_STATUS 0x00000200
#define BGE_PCIMISCCTL_ASICREV 0xFFFF0000
#define BGE_PCIMISCCTL_ASICREV_SHIFT 16
@@ -311,6 +316,8 @@
#define BGE_CHIPID_BCM5906_A2 0xc002
#define BGE_CHIPID_BCM57780_A0 0x57780000
#define BGE_CHIPID_BCM57780_A1 0x57780001
+#define BGE_CHIPID_BCM5717_A0 0x05717000
+#define BGE_CHIPID_BCM5717_B0 0x05717100
/* shorthand one */
#define BGE_ASICREV(x) ((x) >> 12)
@@ -331,6 +338,7 @@
/* Should consult BGE_PCI_PRODID_ASICREV for ChipID */
#define BGE_ASICREV_USE_PRODID_REG 0x0f
/* BGE_PCI_PRODID_ASICREV ASIC rev. identifiers. */
+#define BGE_ASICREV_BCM5717 0x5717
#define BGE_ASICREV_BCM5761 0x5761
#define BGE_ASICREV_BCM5784 0x5784
#define BGE_ASICREV_BCM5785 0x5785
@@ -348,11 +356,14 @@
#define BGE_CHIPREV_5750_AX 0x40
#define BGE_CHIPREV_5750_BX 0x41
/* BGE_PCI_PRODID_ASICREV chip rev. identifiers. */
+#define BGE_CHIPREV_5717_AX 0x57170
+#define BGE_CHIPREV_5717_BX 0x57171
#define BGE_CHIPREV_5761_AX 0x57611
#define BGE_CHIPREV_5784_AX 0x57841
/* PCI DMA Read/Write Control register */
#define BGE_PCIDMARWCTL_MINDMA 0x000000FF
+#define BGE_PCIDMARWCTL_DIS_CACHE_ALIGNMENT 0x00000001
#define BGE_PCIDMARWCTL_RDADRR_BNDRY 0x00000700
#define BGE_PCIDMARWCTL_WRADDR_BNDRY 0x00003800
#define BGE_PCIDMARWCTL_ONEDMA_ATONCE 0x0000C000
@@ -566,6 +577,7 @@
#define BGE_TX_RINGS_MAX 4
#define BGE_TX_RINGS_EXTSSRAM_MAX 16
#define BGE_RX_RINGS_MAX 16
+#define BGE_RX_RINGS_MAX_5717 17
/* Ethernet MAC control registers */
#define BGE_MAC_MODE 0x0400
@@ -843,9 +855,10 @@
#define BGE_SGDIGCFG_AUTO 0x80000000
/* SGDIG status (not documented) */
+#define BGE_SGDIGSTS_DONE 0x00000002
+#define BGE_SGDIGSTS_IS_SERDES 0x00000100
#define BGE_SGDIGSTS_PAUSE_CAP 0x00080000
#define BGE_SGDIGSTS_ASYM_PAUSE 0x00100000
-#define BGE_SGDIGSTS_DONE 0x00000002
/* MI communication register */
@@ -911,6 +924,7 @@
#define BGE_SDIMODE_RESET 0x00000001
#define BGE_SDIMODE_ENABLE 0x00000002
#define BGE_SDIMODE_STATS_OFLOW_ATTN 0x00000004
+#define BGE_SDIMODE_HW_LSO_PRE_DMA 0x00000008
/* Send Data Initiator stats register */
#define BGE_SDISTAT_STATS_OFLOW_ATTN 0x00000004
@@ -1188,6 +1202,9 @@
#define BGE_RBDI_STD_REPL_THRESH 0x2C18
#define BGE_RBDI_JUMBO_REPL_THRESH 0x2C1C
+#define BGE_STD_REPLENISH_LWM 0x2D00
+#define BGE_JMB_REPLENISH_LWM 0x2D04
+
/* Receive BD Initiator Mode register */
#define BGE_RBDIMODE_RESET 0x00000001
#define BGE_RBDIMODE_ENABLE 0x00000002
@@ -1501,6 +1518,7 @@
#define BGE_RDMAMODE_MBUF_SBD_CRPT_ATTN 0x00002000
#define BGE_RDMAMODE_FIFO_SIZE_128 0x00020000
#define BGE_RDMAMODE_FIFO_LONG_BURST 0x00030000
+#define BGE_RDMAMODE_MULT_DMA_RD_DIS 0x01000000
#define BGE_RDMAMODE_TSO4_ENABLE 0x08000000
#define BGE_RDMAMODE_TSO6_ENABLE 0x10000000
@@ -2068,15 +2086,27 @@ struct bge_tx_bd {
#define BGE_TXBDFLAG_IP_CSUM 0x0002
#define BGE_TXBDFLAG_END 0x0004
#define BGE_TXBDFLAG_IP_FRAG 0x0008
+#define BGE_TXBDFLAG_JUMBO_FRAME 0x0008 /* 5717 */
#define BGE_TXBDFLAG_IP_FRAG_END 0x0010
+#define BGE_TXBDFLAG_HDRLEN_BIT2 0x0010 /* 5717 */
+#define BGE_TXBDFLAG_SNAP 0x0020 /* 5717 */
#define BGE_TXBDFLAG_VLAN_TAG 0x0040
#define BGE_TXBDFLAG_COAL_NOW 0x0080
#define BGE_TXBDFLAG_CPU_PRE_DMA 0x0100
#define BGE_TXBDFLAG_CPU_POST_DMA 0x0200
+#define BGE_TXBDFLAG_HDRLEN_BIT3 0x0400 /* 5717 */
+#define BGE_TXBDFLAG_HDRLEN_BIT4 0x0800 /* 5717 */
#define BGE_TXBDFLAG_INSERT_SRC_ADDR 0x1000
+#define BGE_TXBDFLAG_HDRLEN_BIT5 0x1000 /* 5717 */
+#define BGE_TXBDFLAG_HDRLEN_BIT6 0x2000 /* 5717 */
+#define BGE_TXBDFLAG_HDRLEN_BIT7 0x4000 /* 5717 */
#define BGE_TXBDFLAG_CHOOSE_SRC_ADDR 0x6000
#define BGE_TXBDFLAG_NO_CRC 0x8000
+#define BGE_TXBDFLAG_MSS_SIZE_MASK 0x3FFF /* 5717 */
+/* Bits [1:0] of the MSS header length. */
+#define BGE_TXBDFLAG_MSS_HDRLEN_MASK 0xC000 /* 5717 */
+
#define BGE_NIC_TXRING_ADDR(ringno, size) \
BGE_SEND_RING_1_TO_4 + \
((ringno * sizeof(struct bge_tx_bd) * size) / 4)
@@ -2153,6 +2183,7 @@ struct bge_extrx_bd {
#define BGE_RXBDFLAG_IP_CSUM 0x1000
#define BGE_RXBDFLAG_TCP_UDP_CSUM 0x2000
#define BGE_RXBDFLAG_TCP_UDP_IS_TCP 0x4000
+#define BGE_RXBDFLAG_IPV6 0x8000
#define BGE_RXERRFLAG_BAD_CRC 0x0001
#define BGE_RXERRFLAG_COLL_DETECT 0x0002
@@ -2162,6 +2193,7 @@ struct bge_extrx_bd {
#define BGE_RXERRFLAG_RUNT 0x0020
#define BGE_RXERRFLAG_TRUNC_NO_RSRCS 0x0040
#define BGE_RXERRFLAG_GIANT 0x0080
+#define BGE_RXERRFLAG_IP_CSUM_NOK 0x1000 /* 5717 */
struct bge_sts_idx {
#if BYTE_ORDER == LITTLE_ENDIAN
@@ -2175,7 +2207,7 @@ struct bge_sts_idx {
struct bge_status_block {
uint32_t bge_status;
- uint32_t bge_rsvd0;
+ uint32_t bge_status_tag;
#if BYTE_ORDER == LITTLE_ENDIAN
uint16_t bge_rx_jumbo_cons_idx;
uint16_t bge_rx_std_cons_idx;
@@ -2221,6 +2253,8 @@ struct bge_status_block {
#define BCOM_DEVICEID_BCM5714S 0x1669
#define BCOM_DEVICEID_BCM5715 0x1678
#define BCOM_DEVICEID_BCM5715S 0x1679
+#define BCOM_DEVICEID_BCM5717 0x1655
+#define BCOM_DEVICEID_BCM5718 0x1656
#define BCOM_DEVICEID_BCM5720 0x1658
#define BCOM_DEVICEID_BCM5721 0x1659
#define BCOM_DEVICEID_BCM5722 0x165A
@@ -2717,16 +2751,20 @@ struct bge_softc {
#define BGE_FLAG_EADDR 0x00000008
#define BGE_FLAG_MII_SERDES 0x00000010
#define BGE_FLAG_CPMU_PRESENT 0x00000020
+#define BGE_FLAG_TAGGED_STATUS 0x00000040
#define BGE_FLAG_MSI 0x00000100
#define BGE_FLAG_PCIX 0x00000200
#define BGE_FLAG_PCIE 0x00000400
#define BGE_FLAG_TSO 0x00000800
+#define BGE_FLAG_TSO3 0x00001000
+#define BGE_FLAG_JUMBO_FRAME 0x00002000
#define BGE_FLAG_5700_FAMILY 0x00010000
#define BGE_FLAG_5705_PLUS 0x00020000
#define BGE_FLAG_5714_FAMILY 0x00040000
#define BGE_FLAG_575X_PLUS 0x00080000
#define BGE_FLAG_5755_PLUS 0x00100000
#define BGE_FLAG_5788 0x00200000
+#define BGE_FLAG_5717_PLUS 0x00400000
#define BGE_FLAG_40BIT_BUG 0x01000000
#define BGE_FLAG_4G_BNDRY_BUG 0x02000000
#define BGE_FLAG_RX_ALIGNBUG 0x04000000
diff --git a/sys/dev/e1000/e1000_82571.c b/sys/dev/e1000/e1000_82571.c
index 3554dbe..5e12c49 100644
--- a/sys/dev/e1000/e1000_82571.c
+++ b/sys/dev/e1000/e1000_82571.c
@@ -666,10 +666,15 @@ static void e1000_put_hw_semaphore_82573(struct e1000_hw *hw)
**/
static s32 e1000_get_hw_semaphore_82574(struct e1000_hw *hw)
{
+ s32 ret_val;
+
DEBUGFUNC("e1000_get_hw_semaphore_82574");
E1000_MUTEX_LOCK(&hw->dev_spec._82571.swflag_mutex);
- return e1000_get_hw_semaphore_82573(hw);
+ ret_val = e1000_get_hw_semaphore_82573(hw);
+ if (ret_val)
+ E1000_MUTEX_UNLOCK(&hw->dev_spec._82571.swflag_mutex);
+ return ret_val;
}
/**
diff --git a/sys/dev/e1000/if_em.c b/sys/dev/e1000/if_em.c
index af6100a..c41e144 100644
--- a/sys/dev/e1000/if_em.c
+++ b/sys/dev/e1000/if_em.c
@@ -93,8 +93,7 @@ int em_display_debug_stats = 0;
/*********************************************************************
* Driver version:
*********************************************************************/
-char em_driver_version[] = "7.0.8";
-
+char em_driver_version[] = "7.1.7";
/*********************************************************************
* PCI Device ID Table
@@ -170,6 +169,8 @@ static em_vendor_info_t em_vendor_info_array[] =
{ 0x8086, E1000_DEV_ID_PCH_M_HV_LC, PCI_ANY_ID, PCI_ANY_ID, 0},
{ 0x8086, E1000_DEV_ID_PCH_D_HV_DM, PCI_ANY_ID, PCI_ANY_ID, 0},
{ 0x8086, E1000_DEV_ID_PCH_D_HV_DC, PCI_ANY_ID, PCI_ANY_ID, 0},
+ { 0x8086, E1000_DEV_ID_PCH2_LV_LM, PCI_ANY_ID, PCI_ANY_ID, 0},
+ { 0x8086, E1000_DEV_ID_PCH2_LV_V, PCI_ANY_ID, PCI_ANY_ID, 0},
/* required last entry */
{ 0, 0, 0, 0, 0}
};
@@ -256,6 +257,8 @@ static int em_dma_malloc(struct adapter *, bus_size_t,
static void em_dma_free(struct adapter *, struct em_dma_alloc *);
static int em_sysctl_nvm_info(SYSCTL_HANDLER_ARGS);
static void em_print_nvm_info(struct adapter *);
+static int em_sysctl_debug_info(SYSCTL_HANDLER_ARGS);
+static void em_print_debug_info(struct adapter *);
static int em_is_valid_ether_addr(u8 *);
static int em_sysctl_int_delay(SYSCTL_HANDLER_ARGS);
static void em_add_int_delay_sysctl(struct adapter *, const char *,
@@ -282,6 +285,8 @@ static void em_handle_link(void *context, int pending);
static void em_add_rx_process_limit(struct adapter *, const char *,
const char *, int *, int);
+static void em_set_flow_cntrl(struct adapter *, const char *,
+ const char *, int *, int);
static __inline void em_rx_discard(struct rx_ring *, int);
@@ -359,14 +364,6 @@ TUNABLE_INT("hw.em.rx_process_limit", &em_rx_process_limit);
static int em_fc_setting = e1000_fc_full;
TUNABLE_INT("hw.em.fc_setting", &em_fc_setting);
-/*
-** Shadow VFTA table, this is needed because
-** the real vlan filter table gets cleared during
-** a soft reset and the driver needs to be able
-** to repopulate it.
-*/
-static u32 em_shadow_vfta[EM_VFTA_SIZE];
-
/* Global used in WOL setup with multiport cards */
static int global_quad_port_a = 0;
@@ -449,6 +446,11 @@ em_attach(device_t dev)
OID_AUTO, "nvm", CTLTYPE_INT|CTLFLAG_RW, adapter, 0,
em_sysctl_nvm_info, "I", "NVM Information");
+ SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
+ SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
+ OID_AUTO, "debug", CTLTYPE_INT|CTLFLAG_RW, adapter, 0,
+ em_sysctl_debug_info, "I", "Debug Information");
+
callout_init_mtx(&adapter->timer, &adapter->core_mtx, 0);
/* Determine hardware and mac info */
@@ -468,9 +470,10 @@ em_attach(device_t dev)
** identified
*/
if ((adapter->hw.mac.type == e1000_ich8lan) ||
- (adapter->hw.mac.type == e1000_pchlan) ||
(adapter->hw.mac.type == e1000_ich9lan) ||
- (adapter->hw.mac.type == e1000_ich10lan)) {
+ (adapter->hw.mac.type == e1000_ich10lan) ||
+ (adapter->hw.mac.type == e1000_pchlan) ||
+ (adapter->hw.mac.type == e1000_pch2lan)) {
int rid = EM_BAR_TYPE_FLASH;
adapter->flash = bus_alloc_resource_any(dev,
SYS_RES_MEMORY, &rid, RF_ACTIVE);
@@ -514,11 +517,16 @@ em_attach(device_t dev)
E1000_REGISTER(&adapter->hw, E1000_TADV),
em_tx_abs_int_delay_dflt);
- /* Sysctls for limiting the amount of work done in the taskqueue */
+ /* Sysctl for limiting the amount of work done in the taskqueue */
em_add_rx_process_limit(adapter, "rx_processing_limit",
"max number of rx packets to process", &adapter->rx_process_limit,
em_rx_process_limit);
+ /* Sysctl for setting the interface flow control */
+ em_set_flow_cntrl(adapter, "flow_control",
+ "max number of rx packets to process",
+ &adapter->fc_setting, em_fc_setting);
+
/*
* Validate number of transmit and receive descriptors. It
* must not exceed hardware maximum, and must be multiple
@@ -581,6 +589,11 @@ em_attach(device_t dev)
goto err_late;
}
+ /* Check SOL/IDER usage */
+ if (e1000_check_reset_block(&adapter->hw))
+ device_printf(dev, "PHY reset is blocked"
+ " due to SOL/IDER session.\n");
+
/*
** Start from a known state, this is
** important in reading the nvm and
@@ -644,11 +657,6 @@ em_attach(device_t dev)
adapter->hw.mac.get_link_status = 1;
em_update_link_status(adapter);
- /* Indicate SOL/IDER usage */
- if (e1000_check_reset_block(&adapter->hw))
- device_printf(dev,
- "PHY reset is blocked due to SOL/IDER session.\n");
-
/* Register for VLAN events */
adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
em_register_vlan, adapter, EVENTHANDLER_PRI_FIRST);
@@ -857,7 +865,7 @@ em_mq_start_locked(struct ifnet *ifp, struct tx_ring *txr, struct mbuf *m)
if (enq > 0) {
/* Set the watchdog */
- txr->watchdog_check = TRUE;
+ txr->queue_status = EM_QUEUE_WORKING;
txr->watchdog_time = ticks;
}
return (err);
@@ -870,14 +878,8 @@ static int
em_mq_start(struct ifnet *ifp, struct mbuf *m)
{
struct adapter *adapter = ifp->if_softc;
- struct tx_ring *txr;
- int i = 0, error = 0;
-
- /* Which queue to use */
- if ((m->m_flags & M_FLOWID) != 0)
- i = m->m_pkthdr.flowid % adapter->num_queues;
-
- txr = &adapter->tx_rings[i];
+ struct tx_ring *txr = adapter->tx_rings;
+ int error;
if (EM_TX_TRYLOCK(txr)) {
error = em_mq_start_locked(ifp, txr, m);
@@ -953,7 +955,7 @@ em_start_locked(struct ifnet *ifp, struct tx_ring *txr)
/* Set timeout in case hardware has problems transmitting. */
txr->watchdog_time = ticks;
- txr->watchdog_check = TRUE;
+ txr->queue_status = EM_QUEUE_WORKING;
}
return;
@@ -1029,6 +1031,7 @@ em_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
case e1000_82572:
case e1000_ich9lan:
case e1000_ich10lan:
+ case e1000_pch2lan:
case e1000_82574:
case e1000_80003es2lan: /* 9K Jumbo Frame size */
max_frame_size = 9234;
@@ -1092,6 +1095,11 @@ em_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
}
break;
case SIOCSIFMEDIA:
+ /*
+ ** As the speed/duplex settings are being
+ ** changed, we need to reset the PHY.
+ */
+ adapter->hw.phy.reset_disable = FALSE;
/* Check SOL/IDER usage */
EM_CORE_LOCK(adapter);
if (e1000_check_reset_block(&adapter->hw)) {
@@ -1101,6 +1109,7 @@ em_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
break;
}
EM_CORE_UNLOCK(adapter);
+ /* falls thru */
case SIOCGIFMEDIA:
IOCTL_DEBUGOUT("ioctl rcv'd: \
SIOCxIFMEDIA (Get/Set Interface Media)");
@@ -1215,13 +1224,16 @@ em_init_locked(struct adapter *adapter)
case e1000_82583:
pba = E1000_PBA_20K; /* 20K for Rx, 20K for Tx */
break;
+ case e1000_ich8lan:
+ pba = E1000_PBA_8K;
+ break;
case e1000_ich9lan:
case e1000_ich10lan:
case e1000_pchlan:
pba = E1000_PBA_10K;
break;
- case e1000_ich8lan:
- pba = E1000_PBA_8K;
+ case e1000_pch2lan:
+ pba = E1000_PBA_26K;
break;
default:
if (adapter->max_frame_size > 8192)
@@ -1259,19 +1271,6 @@ em_init_locked(struct adapter *adapter)
/* Setup VLAN support, basic and offload if available */
E1000_WRITE_REG(&adapter->hw, E1000_VET, ETHERTYPE_VLAN);
- /* Use real VLAN Filter support? */
- if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) {
- if (ifp->if_capenable & IFCAP_VLAN_HWFILTER)
- /* Use real VLAN Filter support */
- em_setup_vlan_hw_support(adapter);
- else {
- u32 ctrl;
- ctrl = E1000_READ_REG(&adapter->hw, E1000_CTRL);
- ctrl |= E1000_CTRL_VME;
- E1000_WRITE_REG(&adapter->hw, E1000_CTRL, ctrl);
- }
- }
-
/* Set hardware offload abilities */
ifp->if_hwassist = 0;
if (ifp->if_capenable & IFCAP_TXCSUM)
@@ -1289,6 +1288,17 @@ em_init_locked(struct adapter *adapter)
/* Setup Multicast table */
em_set_multi(adapter);
+ /*
+ ** Figure out the desired mbuf
+ ** pool for doing jumbos
+ */
+ if (adapter->max_frame_size <= 2048)
+ adapter->rx_mbuf_sz = MCLBYTES;
+ else if (adapter->max_frame_size <= 4096)
+ adapter->rx_mbuf_sz = MJUMPAGESIZE;
+ else
+ adapter->rx_mbuf_sz = MJUM9BYTES;
+
/* Prepare receive descriptors and buffers */
if (em_setup_receive_structures(adapter)) {
device_printf(dev, "Could not setup receive structures\n");
@@ -1297,6 +1307,19 @@ em_init_locked(struct adapter *adapter)
}
em_initialize_receive_unit(adapter);
+ /* Use real VLAN Filter support? */
+ if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) {
+ if (ifp->if_capenable & IFCAP_VLAN_HWFILTER)
+ /* Use real VLAN Filter support */
+ em_setup_vlan_hw_support(adapter);
+ else {
+ u32 ctrl;
+ ctrl = E1000_READ_REG(&adapter->hw, E1000_CTRL);
+ ctrl |= E1000_CTRL_VME;
+ E1000_WRITE_REG(&adapter->hw, E1000_CTRL, ctrl);
+ }
+ }
+
/* Don't lose promiscuous settings */
em_set_promisc(adapter);
@@ -1707,11 +1730,6 @@ em_media_change(struct ifnet *ifp)
device_printf(adapter->dev, "Unsupported media type\n");
}
- /* As the speed/duplex settings my have changed we need to
- * reset the PHY.
- */
- adapter->hw.phy.reset_disable = FALSE;
-
em_init_locked(adapter);
EM_CORE_UNLOCK(adapter);
@@ -1748,19 +1766,6 @@ em_xmit(struct tx_ring *txr, struct mbuf **m_headp)
ip_off = poff = 0;
/*
- ** When doing checksum offload, it is critical to
- ** make sure the first mbuf has more than header,
- ** because that routine expects data to be present.
- */
- if ((m_head->m_pkthdr.csum_flags & CSUM_OFFLOAD) &&
- (m_head->m_len < ETHER_HDR_LEN + sizeof(struct ip))) {
- m_head = m_pullup(m_head, ETHER_HDR_LEN + sizeof(struct ip));
- *m_headp = m_head;
- if (m_head == NULL)
- return (ENOBUFS);
- }
-
- /*
* Intel recommends entire IP/TCP header length reside in a single
* buffer. If multiple descriptors are used to describe the IP and
* TCP header, each descriptor should describe one or more
@@ -1830,6 +1835,7 @@ em_xmit(struct tx_ring *txr, struct mbuf **m_headp)
*m_headp = NULL;
return (ENOBUFS);
}
+ ip = (struct ip *)(mtod(m_head, char *) + ip_off);
ip->ip_len = 0;
ip->ip_sum = 0;
/*
@@ -1838,6 +1844,7 @@ em_xmit(struct tx_ring *txr, struct mbuf **m_headp)
* what hardware expect to see. This is adherence of
* Microsoft's Large Send specification.
*/
+ tp = (struct tcphdr *)(mtod(m_head, char *) + poff);
tp->th_sum = in_pseudo(ip->ip_src.s_addr,
ip->ip_dst.s_addr, htons(IPPROTO_TCP));
} else if (m_head->m_pkthdr.csum_flags & CSUM_TCP) {
@@ -1847,12 +1854,15 @@ em_xmit(struct tx_ring *txr, struct mbuf **m_headp)
*m_headp = NULL;
return (ENOBUFS);
}
+ ip = (struct ip *)(mtod(m_head, char *) + ip_off);
+ tp = (struct tcphdr *)(mtod(m_head, char *) + poff);
} else if (m_head->m_pkthdr.csum_flags & CSUM_UDP) {
m_head = m_pullup(m_head, poff + sizeof(struct udphdr));
if (m_head == NULL) {
*m_headp = NULL;
return (ENOBUFS);
}
+ ip = (struct ip *)(mtod(m_head, char *) + ip_off);
}
*m_headp = m_head;
}
@@ -1929,15 +1939,12 @@ em_xmit(struct tx_ring *txr, struct mbuf **m_headp)
m_head = *m_headp;
/* Do hardware assists */
-#if __FreeBSD_version >= 700000
if (m_head->m_pkthdr.csum_flags & CSUM_TSO) {
- em_tso_setup(txr, m_head, ip_off, ip, tp, &txd_upper,
- &txd_lower);
+ em_tso_setup(txr, m_head, ip_off, ip, tp,
+ &txd_upper, &txd_lower);
/* we need to make a final sentinel transmit desc */
tso_desc = TRUE;
- } else
-#endif
- if (m_head->m_pkthdr.csum_flags & CSUM_OFFLOAD)
+ } else if (m_head->m_pkthdr.csum_flags & CSUM_OFFLOAD)
em_transmit_checksum_setup(txr, m_head,
ip_off, ip, &txd_upper, &txd_lower);
@@ -2164,34 +2171,30 @@ em_local_timer(void *arg)
em_update_stats_counters(adapter);
/* Reset LAA into RAR[0] on 82571 */
- if (e1000_get_laa_state_82571(&adapter->hw) == TRUE)
+ if ((adapter->hw.mac.type == e1000_82571) &&
+ e1000_get_laa_state_82571(&adapter->hw))
e1000_rar_set(&adapter->hw, adapter->hw.mac.addr, 0);
/*
- ** If flow control has paused us since last checking
- ** it invalidates the watchdog timing, so dont run it.
+ ** Don't do TX watchdog check if we've been paused
*/
if (adapter->pause_frames) {
adapter->pause_frames = 0;
goto out;
}
/*
- ** Check for time since any descriptor was cleaned
+ ** Check on the state of the TX queue(s), this
+ ** can be done without the lock because its RO
+ ** and the HUNG state will be static if set.
*/
- for (int i = 0; i < adapter->num_queues; i++, txr++) {
- EM_TX_LOCK(txr);
- if (txr->watchdog_check == FALSE) {
- EM_TX_UNLOCK(txr);
- continue;
- }
- if ((ticks - txr->watchdog_time) > EM_WATCHDOG)
+ for (int i = 0; i < adapter->num_queues; i++, txr++)
+ if (txr->queue_status == EM_QUEUE_HUNG)
goto hung;
- EM_TX_UNLOCK(txr);
- }
out:
callout_reset(&adapter->timer, hz, em_local_timer, adapter);
return;
hung:
+ /* Looks like we're hung */
device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
device_printf(adapter->dev,
"Queue(%d) tdh = %d, hw tdt = %d\n", txr->me,
@@ -2272,7 +2275,7 @@ em_update_link_status(struct adapter *adapter)
adapter->link_active = 0;
/* Link down, disable watchdog */
for (int i = 0; i < adapter->num_queues; i++, txr++)
- txr->watchdog_check = FALSE;
+ txr->queue_status = EM_QUEUE_IDLE;
if_link_state_change(ifp, LINK_STATE_DOWN);
}
}
@@ -2306,7 +2309,7 @@ em_stop(void *arg)
/* Unarm watchdog timer. */
for (int i = 0; i < adapter->num_queues; i++, txr++) {
EM_TX_LOCK(txr);
- txr->watchdog_check = FALSE;
+ txr->queue_status = EM_QUEUE_IDLE;
EM_TX_UNLOCK(txr);
}
@@ -2571,6 +2574,9 @@ em_free_pci_resources(struct adapter *adapter)
for (int i = 0; i < adapter->num_queues; i++) {
txr = &adapter->tx_rings[i];
rxr = &adapter->rx_rings[i];
+ /* an early abort? */
+ if ((txr == NULL) || (rxr == NULL))
+ break;
rid = txr->msix +1;
if (txr->tag != NULL) {
bus_teardown_intr(dev, txr->res, txr->tag);
@@ -2689,6 +2695,7 @@ static void
em_reset(struct adapter *adapter)
{
device_t dev = adapter->dev;
+ struct ifnet *ifp = adapter->ifp;
struct e1000_hw *hw = &adapter->hw;
u16 rx_buffer_size;
@@ -2733,15 +2740,25 @@ em_reset(struct adapter *adapter)
hw->fc.send_xon = TRUE;
/* Set Flow control, use the tunable location if sane */
- if ((em_fc_setting >= 0) || (em_fc_setting < 4))
- hw->fc.requested_mode = em_fc_setting;
- else
- hw->fc.requested_mode = e1000_fc_none;
+ hw->fc.requested_mode = adapter->fc_setting;
- /* Override - workaround for PCHLAN issue */
+ /* Workaround: no TX flow ctrl for PCH */
if (hw->mac.type == e1000_pchlan)
hw->fc.requested_mode = e1000_fc_rx_pause;
+ /* Override - settings for PCH2LAN, ya its magic :) */
+ if (hw->mac.type == e1000_pch2lan) {
+ hw->fc.high_water = 0x5C20;
+ hw->fc.low_water = 0x5048;
+ hw->fc.pause_time = 0x0650;
+ hw->fc.refresh_time = 0x0400;
+ /* Jumbos need adjusted PBA */
+ if (ifp->if_mtu > ETHERMTU)
+ E1000_WRITE_REG(hw, E1000_PBA, 12);
+ else
+ E1000_WRITE_REG(hw, E1000_PBA, 26);
+ }
+
/* Issue a global reset */
e1000_reset_hw(hw);
E1000_WRITE_REG(hw, E1000_WUC, 0);
@@ -3173,6 +3190,7 @@ em_setup_transmit_ring(struct tx_ring *txr)
/* Set number of descriptors available */
txr->tx_avail = adapter->num_tx_desc;
+ txr->queue_status = EM_QUEUE_IDLE;
/* Clear checksum offload context. */
txr->last_hw_offload = 0;
@@ -3233,7 +3251,7 @@ em_initialize_transmit_unit(struct adapter *adapter)
E1000_READ_REG(&adapter->hw, E1000_TDBAL(i)),
E1000_READ_REG(&adapter->hw, E1000_TDLEN(i)));
- txr->watchdog_check = FALSE;
+ txr->queue_status = EM_QUEUE_IDLE;
}
/* Set the default values for the Tx Inter Packet Gap timer */
@@ -3610,16 +3628,20 @@ static bool
em_txeof(struct tx_ring *txr)
{
struct adapter *adapter = txr->adapter;
- int first, last, done;
+ int first, last, done, processed;
struct em_buffer *tx_buffer;
struct e1000_tx_desc *tx_desc, *eop_desc;
struct ifnet *ifp = adapter->ifp;
EM_TX_LOCK_ASSERT(txr);
- if (txr->tx_avail == adapter->num_tx_desc)
+ /* No work, make sure watchdog is off */
+ if (txr->tx_avail == adapter->num_tx_desc) {
+ txr->queue_status = EM_QUEUE_IDLE;
return (FALSE);
+ }
+ processed = 0;
first = txr->next_to_clean;
tx_desc = &txr->tx_base[first];
tx_buffer = &txr->tx_buffers[first];
@@ -3646,6 +3668,7 @@ em_txeof(struct tx_ring *txr)
tx_desc->lower.data = 0;
tx_desc->buffer_addr = 0;
++txr->tx_avail;
+ ++processed;
if (tx_buffer->m_head) {
bus_dmamap_sync(txr->txtag,
@@ -3681,6 +3704,16 @@ em_txeof(struct tx_ring *txr)
txr->next_to_clean = first;
+ /*
+ ** Watchdog calculation, we know there's
+ ** work outstanding or the first return
+ ** would have been taken, so none processed
+ ** for too long indicates a hang. local timer
+ ** will examine this and do a reset if needed.
+ */
+ if ((!processed) && ((ticks - txr->watchdog_time) > EM_WATCHDOG))
+ txr->queue_status = EM_QUEUE_HUNG;
+
/*
* If we have enough room, clear IFF_DRV_OACTIVE
* to tell the stack that it is OK to send packets.
@@ -3689,7 +3722,7 @@ em_txeof(struct tx_ring *txr)
ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
/* Disable watchdog if all clean */
if (txr->tx_avail == adapter->num_tx_desc) {
- txr->watchdog_check = FALSE;
+ txr->queue_status = EM_QUEUE_IDLE;
return (FALSE);
}
}
@@ -3723,7 +3756,8 @@ em_refresh_mbufs(struct rx_ring *rxr, int limit)
*/
if (rxbuf->m_head != NULL)
goto reuse;
- m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
+ m = m_getjcl(M_DONTWAIT, MT_DATA,
+ M_PKTHDR, adapter->rx_mbuf_sz);
/*
** If we have a temporary resource shortage
** that causes a failure, just abort refresh
@@ -3732,10 +3766,7 @@ em_refresh_mbufs(struct rx_ring *rxr, int limit)
*/
if (m == NULL)
goto update;
- m->m_len = m->m_pkthdr.len = MCLBYTES;
-
- if (adapter->max_frame_size <= (MCLBYTES - ETHER_ALIGN))
- m_adj(m, ETHER_ALIGN);
+ m->m_len = m->m_pkthdr.len = adapter->rx_mbuf_sz;
/* Use bus_dma machinery to setup the memory mapping */
error = bus_dmamap_load_mbuf_sg(rxr->rxtag, rxbuf->map,
@@ -3801,9 +3832,9 @@ em_allocate_receive_buffers(struct rx_ring *rxr)
BUS_SPACE_MAXADDR, /* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filter, filterarg */
- MCLBYTES, /* maxsize */
+ MJUM9BYTES, /* maxsize */
1, /* nsegments */
- MCLBYTES, /* maxsegsize */
+ MJUM9BYTES, /* maxsegsize */
0, /* flags */
NULL, /* lockfunc */
NULL, /* lockarg */
@@ -3871,12 +3902,13 @@ em_setup_receive_ring(struct rx_ring *rxr)
for (int j = 0; j != adapter->num_rx_desc; ++j) {
rxbuf = &rxr->rx_buffers[j];
- rxbuf->m_head = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
+ rxbuf->m_head = m_getjcl(M_DONTWAIT, MT_DATA,
+ M_PKTHDR, adapter->rx_mbuf_sz);
if (rxbuf->m_head == NULL)
return (ENOBUFS);
- rxbuf->m_head->m_len = MCLBYTES;
+ rxbuf->m_head->m_len = adapter->rx_mbuf_sz;
rxbuf->m_head->m_flags &= ~M_HASFCS; /* we strip it */
- rxbuf->m_head->m_pkthdr.len = MCLBYTES;
+ rxbuf->m_head->m_pkthdr.len = adapter->rx_mbuf_sz;
/* Get the memory mapping */
error = bus_dmamap_load_mbuf_sg(rxr->rxtag,
@@ -4082,6 +4114,23 @@ em_initialize_receive_unit(struct adapter *adapter)
E1000_WRITE_REG(hw, E1000_RDT(i), adapter->num_rx_desc - 1);
}
+ /* Set early receive threshold on appropriate hw */
+ if (((adapter->hw.mac.type == e1000_ich9lan) ||
+ (adapter->hw.mac.type == e1000_pch2lan) ||
+ (adapter->hw.mac.type == e1000_ich10lan)) &&
+ (ifp->if_mtu > ETHERMTU)) {
+ u32 rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(0));
+ E1000_WRITE_REG(hw, E1000_RXDCTL(0), rxdctl | 3);
+ E1000_WRITE_REG(hw, E1000_ERT, 0x100 | (1 << 13));
+ }
+
+ if (adapter->hw.mac.type == e1000_pch2lan) {
+ if (ifp->if_mtu > ETHERMTU)
+ e1000_lv_jumbo_workaround_ich8lan(hw, TRUE);
+ else
+ e1000_lv_jumbo_workaround_ich8lan(hw, FALSE);
+ }
+
/* Setup the Receive Control Register */
rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
rctl |= E1000_RCTL_EN | E1000_RCTL_BAM |
@@ -4094,7 +4143,14 @@ em_initialize_receive_unit(struct adapter *adapter)
/* Make sure VLAN Filters are off */
rctl &= ~E1000_RCTL_VFE;
rctl &= ~E1000_RCTL_SBP;
- rctl |= E1000_RCTL_SZ_2048;
+
+ if (adapter->rx_mbuf_sz == MCLBYTES)
+ rctl |= E1000_RCTL_SZ_2048;
+ else if (adapter->rx_mbuf_sz == MJUMPAGESIZE)
+ rctl |= E1000_RCTL_SZ_4096 | E1000_RCTL_BSEX;
+ else if (adapter->rx_mbuf_sz > MJUMPAGESIZE)
+ rctl |= E1000_RCTL_SZ_8192 | E1000_RCTL_BSEX;
+
if (ifp->if_mtu > ETHERMTU)
rctl |= E1000_RCTL_LPE;
else
@@ -4190,7 +4246,7 @@ em_rxeof(struct rx_ring *rxr, int count, int *done)
rxr->fmp->m_flags |= M_VLANTAG;
}
#ifdef EM_MULTIQUEUE
- rxr->fmp->m_pkthdr.flowid = curcpu;
+ rxr->fmp->m_pkthdr.flowid = rxr->msix;
rxr->fmp->m_flags |= M_FLOWID;
#endif
#ifndef __NO_STRICT_ALIGNMENT
@@ -4253,6 +4309,7 @@ skip:
static __inline void
em_rx_discard(struct rx_ring *rxr, int i)
{
+ struct adapter *adapter = rxr->adapter;
struct em_buffer *rbuf;
struct mbuf *m;
@@ -4267,7 +4324,7 @@ em_rx_discard(struct rx_ring *rxr, int i)
/* Reset state, keep loaded DMA map and reuse */
m = rbuf->m_head;
- m->m_len = m->m_pkthdr.len = MCLBYTES;
+ m->m_len = m->m_pkthdr.len = adapter->rx_mbuf_sz;
m->m_flags |= M_PKTHDR;
m->m_data = m->m_ext.ext_buf;
m->m_next = NULL;
@@ -4378,12 +4435,15 @@ em_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
if ((vtag == 0) || (vtag > 4095)) /* Invalid ID */
return;
+ EM_CORE_LOCK(adapter);
index = (vtag >> 5) & 0x7F;
bit = vtag & 0x1F;
- em_shadow_vfta[index] |= (1 << bit);
+ adapter->shadow_vfta[index] |= (1 << bit);
++adapter->num_vlans;
/* Re-init to load the changes */
- em_init(adapter);
+ if (ifp->if_capenable & IFCAP_VLAN_HWFILTER)
+ em_init_locked(adapter);
+ EM_CORE_UNLOCK(adapter);
}
/*
@@ -4402,12 +4462,15 @@ em_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
if ((vtag == 0) || (vtag > 4095)) /* Invalid */
return;
+ EM_CORE_LOCK(adapter);
index = (vtag >> 5) & 0x7F;
bit = vtag & 0x1F;
- em_shadow_vfta[index] &= ~(1 << bit);
+ adapter->shadow_vfta[index] &= ~(1 << bit);
--adapter->num_vlans;
/* Re-init to load the changes */
- em_init(adapter);
+ if (ifp->if_capenable & IFCAP_VLAN_HWFILTER)
+ em_init_locked(adapter);
+ EM_CORE_UNLOCK(adapter);
}
static void
@@ -4430,9 +4493,9 @@ em_setup_vlan_hw_support(struct adapter *adapter)
** we need to repopulate it now.
*/
for (int i = 0; i < EM_VFTA_SIZE; i++)
- if (em_shadow_vfta[i] != 0)
+ if (adapter->shadow_vfta[i] != 0)
E1000_WRITE_REG_ARRAY(hw, E1000_VFTA,
- i, em_shadow_vfta[i]);
+ i, adapter->shadow_vfta[i]);
reg = E1000_READ_REG(hw, E1000_CTRL);
reg |= E1000_CTRL_VME;
@@ -4443,10 +4506,6 @@ em_setup_vlan_hw_support(struct adapter *adapter)
reg &= ~E1000_RCTL_CFIEN;
reg |= E1000_RCTL_VFE;
E1000_WRITE_REG(hw, E1000_RCTL, reg);
-
- /* Update the frame size */
- E1000_WRITE_REG(&adapter->hw, E1000_RLPML,
- adapter->max_frame_size + VLAN_TAG_SIZE);
}
static void
@@ -4615,6 +4674,7 @@ em_get_wakeup(device_t dev)
case e1000_ich9lan:
case e1000_ich10lan:
case e1000_pchlan:
+ case e1000_pch2lan:
apme_mask = E1000_WUC_APME;
adapter->has_amt = TRUE;
eeprom_data = E1000_READ_REG(&adapter->hw, E1000_WUC);
@@ -4706,7 +4766,8 @@ em_enable_wakeup(device_t dev)
E1000_WRITE_REG(&adapter->hw, E1000_RCTL, rctl);
}
- if (adapter->hw.mac.type == e1000_pchlan) {
+ if ((adapter->hw.mac.type == e1000_pchlan) ||
+ (adapter->hw.mac.type == e1000_pch2lan)) {
if (em_enable_phy_wakeup(adapter))
return;
} else {
@@ -4739,16 +4800,7 @@ em_enable_phy_wakeup(struct adapter *adapter)
u16 preg;
/* copy MAC RARs to PHY RARs */
- for (int i = 0; i < adapter->hw.mac.rar_entry_count; i++) {
- mreg = E1000_READ_REG(hw, E1000_RAL(i));
- e1000_write_phy_reg(hw, BM_RAR_L(i), (u16)(mreg & 0xFFFF));
- e1000_write_phy_reg(hw, BM_RAR_M(i),
- (u16)((mreg >> 16) & 0xFFFF));
- mreg = E1000_READ_REG(hw, E1000_RAH(i));
- e1000_write_phy_reg(hw, BM_RAR_H(i), (u16)(mreg & 0xFFFF));
- e1000_write_phy_reg(hw, BM_RAR_CTRL(i),
- (u16)((mreg >> 16) & 0xFFFF));
- }
+ e1000_copy_rx_addrs_to_phy_ich8lan(hw);
/* copy MAC MTA to PHY MTA */
for (int i = 0; i < adapter->hw.mac.mta_reg_count; i++) {
@@ -5359,4 +5411,70 @@ em_add_rx_process_limit(struct adapter *adapter, const char *name,
OID_AUTO, name, CTLTYPE_INT|CTLFLAG_RW, limit, value, description);
}
+static void
+em_set_flow_cntrl(struct adapter *adapter, const char *name,
+ const char *description, int *limit, int value)
+{
+ *limit = value;
+ SYSCTL_ADD_INT(device_get_sysctl_ctx(adapter->dev),
+ SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)),
+ OID_AUTO, name, CTLTYPE_INT|CTLFLAG_RW, limit, value, description);
+}
+
+static int
+em_sysctl_debug_info(SYSCTL_HANDLER_ARGS)
+{
+ struct adapter *adapter;
+ int error;
+ int result;
+
+ result = -1;
+ error = sysctl_handle_int(oidp, &result, 0, req);
+
+ if (error || !req->newptr)
+ return (error);
+
+ if (result == 1) {
+ adapter = (struct adapter *)arg1;
+ em_print_debug_info(adapter);
+ }
+
+ return (error);
+}
+
+/*
+** This routine is meant to be fluid, add whatever is
+** needed for debugging a problem. -jfv
+*/
+static void
+em_print_debug_info(struct adapter *adapter)
+{
+ device_t dev = adapter->dev;
+ struct tx_ring *txr = adapter->tx_rings;
+ struct rx_ring *rxr = adapter->rx_rings;
+ if (adapter->ifp->if_drv_flags & IFF_DRV_RUNNING)
+ printf("Interface is RUNNING ");
+ else
+ printf("Interface is NOT RUNNING\n");
+ if (adapter->ifp->if_drv_flags & IFF_DRV_OACTIVE)
+ printf("and ACTIVE\n");
+ else
+ printf("and INACTIVE\n");
+
+ device_printf(dev, "hw tdh = %d, hw tdt = %d\n",
+ E1000_READ_REG(&adapter->hw, E1000_TDH(0)),
+ E1000_READ_REG(&adapter->hw, E1000_TDT(0)));
+ device_printf(dev, "hw rdh = %d, hw rdt = %d\n",
+ E1000_READ_REG(&adapter->hw, E1000_RDH(0)),
+ E1000_READ_REG(&adapter->hw, E1000_RDT(0)));
+ device_printf(dev, "Tx Queue Status = %d\n", txr->queue_status);
+ device_printf(dev, "TX descriptors avail = %d\n",
+ txr->tx_avail);
+ device_printf(dev, "Tx Descriptors avail failure = %ld\n",
+ txr->no_desc_avail);
+ device_printf(dev, "RX discarded packets = %ld\n",
+ rxr->rx_discarded);
+ device_printf(dev, "RX Next to Check = %d\n", rxr->next_to_check);
+ device_printf(dev, "RX Next to Refresh = %d\n", rxr->next_to_refresh);
+}
diff --git a/sys/dev/e1000/if_em.h b/sys/dev/e1000/if_em.h
index fec34ac..8bfd600 100644
--- a/sys/dev/e1000/if_em.h
+++ b/sys/dev/e1000/if_em.h
@@ -188,6 +188,10 @@
#define EM_EEPROM_APME 0x400;
#define EM_82544_APME 0x0004;
+#define EM_QUEUE_IDLE 0
+#define EM_QUEUE_WORKING 1
+#define EM_QUEUE_HUNG 2
+
/*
* TDBA/RDBA should be aligned on 16 byte boundary. But TDLEN/RDLEN should be
* multiple of 128 bytes. So we align TDBA/RDBA on 128 byte boundary. This will
@@ -272,7 +276,7 @@ struct tx_ring {
u32 me;
u32 msix;
u32 ims;
- bool watchdog_check;
+ int queue_status;
int watchdog_time;
struct em_dma_alloc txdma;
struct e1000_tx_desc *tx_base;
@@ -391,6 +395,7 @@ struct adapter {
struct rx_ring *rx_rings;
int num_rx_desc;
u32 rx_process_limit;
+ u32 rx_mbuf_sz;
/* Management and WOL features */
u32 wol;
@@ -400,11 +405,21 @@ struct adapter {
/* Multicast array memory */
u8 *mta;
- /* Info about the board itself */
- uint8_t link_active;
- uint16_t link_speed;
- uint16_t link_duplex;
- uint32_t smartspeed;
+ /*
+ ** Shadow VFTA table, this is needed because
+ ** the real vlan filter table gets cleared during
+ ** a soft reset and the driver needs to be able
+ ** to repopulate it.
+ */
+ u32 shadow_vfta[EM_VFTA_SIZE];
+
+ /* Info about the interface */
+ u8 link_active;
+ u16 link_speed;
+ u16 link_duplex;
+ u32 smartspeed;
+ u32 fc_setting;
+
struct em_int_delay_info tx_int_delay;
struct em_int_delay_info tx_abs_int_delay;
struct em_int_delay_info rx_int_delay;
diff --git a/sys/dev/e1000/if_lem.c b/sys/dev/e1000/if_lem.c
index 846d6bf..21686e6 100644
--- a/sys/dev/e1000/if_lem.c
+++ b/sys/dev/e1000/if_lem.c
@@ -51,9 +51,7 @@
#include <sys/sockio.h>
#include <sys/sysctl.h>
#include <sys/taskqueue.h>
-#if __FreeBSD_version >= 700029
#include <sys/eventhandler.h>
-#endif
#include <machine/bus.h>
#include <machine/resource.h>
@@ -86,8 +84,7 @@
/*********************************************************************
* Legacy Em Driver version:
*********************************************************************/
-char lem_driver_version[] = "1.0.2";
-
+char lem_driver_version[] = "1.0.3";
/*********************************************************************
* PCI Device ID Table
@@ -209,11 +206,9 @@ static void lem_disable_promisc(struct adapter *);
static void lem_set_multi(struct adapter *);
static void lem_update_link_status(struct adapter *);
static int lem_get_buf(struct adapter *, int);
-#if __FreeBSD_version >= 700029
static void lem_register_vlan(void *, struct ifnet *, u16);
static void lem_unregister_vlan(void *, struct ifnet *, u16);
static void lem_setup_vlan_hw_support(struct adapter *);
-#endif
static int lem_xmit(struct adapter *, struct mbuf **);
static void lem_smartspeed(struct adapter *);
static int lem_82547_fifo_workaround(struct adapter *, int);
@@ -231,6 +226,8 @@ static u32 lem_fill_descriptors (bus_addr_t address, u32 length,
static int lem_sysctl_int_delay(SYSCTL_HANDLER_ARGS);
static void lem_add_int_delay_sysctl(struct adapter *, const char *,
const char *, struct em_int_delay_info *, int, int);
+static void lem_set_flow_cntrl(struct adapter *, const char *,
+ const char *, int *, int);
/* Management and WOL Support */
static void lem_init_manageability(struct adapter *);
static void lem_release_manageability(struct adapter *);
@@ -244,11 +241,7 @@ static void lem_led_func(void *, int);
#ifdef EM_LEGACY_IRQ
static void lem_intr(void *);
#else /* FAST IRQ */
-#if __FreeBSD_version < 700000
-static void lem_irq_fast(void *);
-#else
static int lem_irq_fast(void *);
-#endif
static void lem_handle_rxtx(void *context, int pending);
static void lem_handle_link(void *context, int pending);
static void lem_add_rx_process_limit(struct adapter *, const char *,
@@ -320,14 +313,6 @@ TUNABLE_INT("hw.em.rx_process_limit", &lem_rx_process_limit);
static int lem_fc_setting = e1000_fc_full;
TUNABLE_INT("hw.em.fc_setting", &lem_fc_setting);
-/*
-** Shadow VFTA table, this is needed because
-** the real vlan filter table gets cleared during
-** a soft reset and the driver needs to be able
-** to repopulate it.
-*/
-static u32 lem_shadow_vfta[EM_VFTA_SIZE];
-
/* Global used in WOL setup with multiport cards */
static int global_quad_port_a = 0;
@@ -462,6 +447,11 @@ lem_attach(device_t dev)
lem_rx_process_limit);
#endif
+ /* Sysctl for setting the interface flow control */
+ lem_set_flow_cntrl(adapter, "flow_control",
+ "max number of rx packets to process",
+ &adapter->fc_setting, lem_fc_setting);
+
/*
* Validate number of transmit and receive descriptors. It
* must not exceed hardware maximum, and must be multiple
@@ -638,13 +628,11 @@ lem_attach(device_t dev)
else
adapter->pcix_82544 = FALSE;
-#if __FreeBSD_version >= 700029
/* Register for VLAN events */
adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
lem_register_vlan, adapter, EVENTHANDLER_PRI_FIRST);
adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
lem_unregister_vlan, adapter, EVENTHANDLER_PRI_FIRST);
-#endif
lem_add_hw_stats(adapter);
@@ -702,11 +690,7 @@ lem_detach(device_t dev)
INIT_DEBUGOUT("em_detach: begin");
/* Make sure VLANS are not using driver */
-#if __FreeBSD_version >= 700000
if (adapter->ifp->if_vlantrunk != NULL) {
-#else
- if (adapter->ifp->if_nvlans != 0) {
-#endif
device_printf(dev,"Vlan in use, detach first\n");
return (EBUSY);
}
@@ -730,13 +714,11 @@ lem_detach(device_t dev)
EM_TX_UNLOCK(adapter);
EM_CORE_UNLOCK(adapter);
-#if __FreeBSD_version >= 700029
/* Unregister VLAN events */
if (adapter->vlan_attach != NULL)
EVENTHANDLER_DEREGISTER(vlan_config, adapter->vlan_attach);
if (adapter->vlan_detach != NULL)
EVENTHANDLER_DEREGISTER(vlan_unconfig, adapter->vlan_detach);
-#endif
ether_ifdetach(adapter->ifp);
callout_drain(&adapter->timer);
@@ -831,6 +813,19 @@ lem_start_locked(struct ifnet *ifp)
if (!adapter->link_active)
return;
+ /*
+ * Force a cleanup if number of TX descriptors
+ * available hits the threshold
+ */
+ if (adapter->num_tx_desc_avail <= EM_TX_CLEANUP_THRESHOLD) {
+ lem_txeof(adapter);
+ /* Now do we at least have a minimal? */
+ if (adapter->num_tx_desc_avail <= EM_TX_OP_THRESHOLD) {
+ adapter->no_tx_desc_avail1++;
+ return;
+ }
+ }
+
while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
@@ -1043,9 +1038,7 @@ lem_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
}
if (reinit && (ifp->if_drv_flags & IFF_DRV_RUNNING))
lem_init(adapter);
-#if __FreeBSD_version >= 700000
VLAN_CAPABILITIES(ifp);
-#endif
break;
}
@@ -1135,17 +1128,8 @@ lem_init_locked(struct adapter *adapter)
/* Setup VLAN support, basic and offload if available */
E1000_WRITE_REG(&adapter->hw, E1000_VET, ETHERTYPE_VLAN);
-#if __FreeBSD_version < 700029
- if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) {
- u32 ctrl;
- ctrl = E1000_READ_REG(&adapter->hw, E1000_CTRL);
- ctrl |= E1000_CTRL_VME;
- E1000_WRITE_REG(&adapter->hw, E1000_CTRL, ctrl);
- }
-#else
/* Use real VLAN Filter support */
lem_setup_vlan_hw_support(adapter);
-#endif
/* Set hardware offload abilities */
ifp->if_hwassist = 0;
@@ -1174,6 +1158,19 @@ lem_init_locked(struct adapter *adapter)
}
lem_initialize_receive_unit(adapter);
+ /* Use real VLAN Filter support? */
+ if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) {
+ if (ifp->if_capenable & IFCAP_VLAN_HWFILTER)
+ /* Use real VLAN Filter support */
+ lem_setup_vlan_hw_support(adapter);
+ else {
+ u32 ctrl;
+ ctrl = E1000_READ_REG(&adapter->hw, E1000_CTRL);
+ ctrl |= E1000_CTRL_VME;
+ E1000_WRITE_REG(&adapter->hw, E1000_CTRL, ctrl);
+ }
+ }
+
/* Don't lose promiscuous settings */
lem_set_promisc(adapter);
@@ -1276,7 +1273,6 @@ lem_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
* Legacy Interrupt Service routine
*
*********************************************************************/
-
static void
lem_intr(void *arg)
{
@@ -1311,7 +1307,6 @@ lem_intr(void *arg)
}
EM_TX_LOCK(adapter);
- lem_txeof(adapter);
lem_rxeof(adapter, -1, NULL);
lem_txeof(adapter);
if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
@@ -1354,8 +1349,7 @@ lem_handle_rxtx(void *context, int pending)
if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
- if (lem_rxeof(adapter, adapter->rx_process_limit, NULL) != 0)
- taskqueue_enqueue(adapter->tq, &adapter->rxtx_task);
+ lem_rxeof(adapter, adapter->rx_process_limit, NULL);
EM_TX_LOCK(adapter);
lem_txeof(adapter);
if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
@@ -1363,7 +1357,8 @@ lem_handle_rxtx(void *context, int pending)
EM_TX_UNLOCK(adapter);
}
- lem_enable_intr(adapter);
+ if (ifp->if_drv_flags & IFF_DRV_RUNNING)
+ lem_enable_intr(adapter);
}
/*********************************************************************
@@ -1371,13 +1366,7 @@ lem_handle_rxtx(void *context, int pending)
* Fast Legacy/MSI Combined Interrupt Service routine
*
*********************************************************************/
-#if __FreeBSD_version < 700000
-#define FILTER_STRAY
-#define FILTER_HANDLED
-static void
-#else
static int
-#endif
lem_irq_fast(void *arg)
{
struct adapter *adapter = arg;
@@ -1550,25 +1539,10 @@ lem_xmit(struct adapter *adapter, struct mbuf **m_headp)
struct mbuf *m_head;
u32 txd_upper, txd_lower, txd_used, txd_saved;
int error, nsegs, i, j, first, last = 0;
-#if __FreeBSD_version < 700000
- struct m_tag *mtag;
-#endif
+
m_head = *m_headp;
txd_upper = txd_lower = txd_used = txd_saved = 0;
- /*
- * Force a cleanup if number of TX descriptors
- * available hits the threshold
- */
- if (adapter->num_tx_desc_avail <= EM_TX_CLEANUP_THRESHOLD) {
- lem_txeof(adapter);
- /* Now do we at least have a minimal? */
- if (adapter->num_tx_desc_avail <= EM_TX_OP_THRESHOLD) {
- adapter->no_tx_desc_avail1++;
- return (ENOBUFS);
- }
- }
-
/*
** When doing checksum offload, it is critical to
** make sure the first mbuf has more than header,
@@ -1712,20 +1686,6 @@ lem_xmit(struct adapter *adapter, struct mbuf **m_headp)
else
adapter->num_tx_desc_avail -= nsegs;
- /*
- ** Handle VLAN tag, this is the
- ** biggest difference between
- ** 6.x and 7
- */
-#if __FreeBSD_version < 700000
- /* Find out if we are in vlan mode. */
- mtag = VLAN_OUTPUT_TAG(ifp, m_head);
- if (mtag != NULL) {
- ctxd->upper.fields.special =
- htole16(VLAN_TAG_VALUE(mtag));
- ctxd->lower.data |= htole32(E1000_TXD_CMD_VLE);
- }
-#else /* FreeBSD 7 */
if (m_head->m_flags & M_VLANTAG) {
/* Set the vlan id. */
ctxd->upper.fields.special =
@@ -1733,7 +1693,6 @@ lem_xmit(struct adapter *adapter, struct mbuf **m_headp)
/* Tell hardware to add tag */
ctxd->lower.data |= htole32(E1000_TXD_CMD_VLE);
}
-#endif
tx_buffer->m_head = m_head;
tx_buffer_mapped->map = tx_buffer->map;
@@ -2249,11 +2208,7 @@ lem_allocate_irq(struct adapter *adapter)
#ifdef EM_LEGACY_IRQ
/* We do Legacy setup */
if ((error = bus_setup_intr(dev, adapter->res[0],
-#if __FreeBSD_version > 700000
INTR_TYPE_NET | INTR_MPSAFE, NULL, lem_intr, adapter,
-#else /* 6.X */
- INTR_TYPE_NET | INTR_MPSAFE, lem_intr, adapter,
-#endif
&adapter->tag[0])) != 0) {
device_printf(dev, "Failed to register interrupt handler");
return (error);
@@ -2270,13 +2225,8 @@ lem_allocate_irq(struct adapter *adapter)
taskqueue_thread_enqueue, &adapter->tq);
taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s taskq",
device_get_nameunit(adapter->dev));
-#if __FreeBSD_version < 700000
- if ((error = bus_setup_intr(dev, adapter->res[0],
- INTR_TYPE_NET | INTR_FAST, lem_irq_fast, adapter,
-#else
if ((error = bus_setup_intr(dev, adapter->res[0],
INTR_TYPE_NET, lem_irq_fast, NULL, adapter,
-#endif
&adapter->tag[0])) != 0) {
device_printf(dev, "Failed to register fast interrupt "
"handler: %d\n", error);
@@ -2362,7 +2312,7 @@ lem_hardware_init(struct adapter *adapter)
adapter->hw.fc.send_xon = TRUE;
/* Set Flow control, use the tunable location if sane */
- if ((lem_fc_setting >= 0) || (lem_fc_setting < 4))
+ if ((lem_fc_setting >= 0) && (lem_fc_setting < 4))
adapter->hw.fc.requested_mode = lem_fc_setting;
else
adapter->hw.fc.requested_mode = e1000_fc_none;
@@ -2410,14 +2360,8 @@ lem_setup_interface(device_t dev, struct adapter *adapter)
ifp->if_capabilities = ifp->if_capenable = 0;
if (adapter->hw.mac.type >= e1000_82543) {
- int version_cap;
-#if __FreeBSD_version < 700000
- version_cap = IFCAP_HWCSUM;
-#else
- version_cap = IFCAP_HWCSUM | IFCAP_VLAN_HWCSUM;
-#endif
- ifp->if_capabilities |= version_cap;
- ifp->if_capenable |= version_cap;
+ ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_VLAN_HWCSUM;
+ ifp->if_capenable |= IFCAP_HWCSUM | IFCAP_VLAN_HWCSUM;
}
/*
@@ -2427,6 +2371,16 @@ lem_setup_interface(device_t dev, struct adapter *adapter)
ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU;
ifp->if_capenable |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU;
+ /*
+ ** Dont turn this on by default, if vlans are
+ ** created on another pseudo device (eg. lagg)
+ ** then vlan events are not passed thru, breaking
+ ** operation, but with HW FILTER off it works. If
+ ** using vlans directly on the em driver you can
+ ** enable this and get full hardware tag filtering.
+ */
+ ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
+
#ifdef DEVICE_POLLING
ifp->if_capabilities |= IFCAP_POLLING;
#endif
@@ -2551,11 +2505,7 @@ lem_dma_malloc(struct adapter *adapter, bus_size_t size,
{
int error;
-#if __FreeBSD_version >= 700000
error = bus_dma_tag_create(bus_get_dma_tag(adapter->dev), /* parent */
-#else
- error = bus_dma_tag_create(NULL, /* parent */
-#endif
EM_DBA_ALIGN, 0, /* alignment, bounds */
BUS_SPACE_MAXADDR, /* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
@@ -2640,21 +2590,17 @@ lem_allocate_transmit_structures(struct adapter *adapter)
/*
* Create DMA tags for tx descriptors
*/
-#if __FreeBSD_version >= 700000
if ((error = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */
-#else
- if ((error = bus_dma_tag_create(NULL, /* parent */
-#endif
1, 0, /* alignment, bounds */
BUS_SPACE_MAXADDR, /* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filter, filterarg */
- EM_TSO_SIZE, /* maxsize */
+ MCLBYTES * EM_MAX_SCATTER, /* maxsize */
EM_MAX_SCATTER, /* nsegments */
- EM_TSO_SEG_SIZE, /* maxsegsize */
+ MCLBYTES, /* maxsegsize */
0, /* flags */
- NULL, /* lockfunc */
- NULL, /* lockarg */
+ NULL, /* lockfunc */
+ NULL, /* lockarg */
&adapter->txtag)) != 0) {
device_printf(dev, "Unable to allocate TX DMA tag\n");
goto fail;
@@ -3072,23 +3018,20 @@ lem_txeof(struct adapter *adapter)
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
adapter->next_tx_to_clean = first;
+ adapter->num_tx_desc_avail = num_avail;
/*
* If we have enough room, clear IFF_DRV_OACTIVE to
* tell the stack that it is OK to send packets.
* If there are no pending descriptors, clear the watchdog.
*/
- if (num_avail > EM_TX_CLEANUP_THRESHOLD) {
+ if (adapter->num_tx_desc_avail > EM_TX_CLEANUP_THRESHOLD) {
ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
- if (num_avail == adapter->num_tx_desc) {
+ if (adapter->num_tx_desc_avail == adapter->num_tx_desc) {
adapter->watchdog_check = FALSE;
- adapter->num_tx_desc_avail = num_avail;
return;
}
}
-
- adapter->num_tx_desc_avail = num_avail;
- return;
}
/*********************************************************************
@@ -3185,11 +3128,7 @@ lem_allocate_receive_structures(struct adapter *adapter)
return (ENOMEM);
}
-#if __FreeBSD_version >= 700000
error = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */
-#else
- error = bus_dma_tag_create(NULL, /* parent */
-#endif
1, 0, /* alignment, bounds */
BUS_SPACE_MAXADDR, /* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
@@ -3459,7 +3398,7 @@ lem_rxeof(struct adapter *adapter, int count, int *done)
{
struct ifnet *ifp = adapter->ifp;;
struct mbuf *mp;
- u8 status, accept_frame = 0, eop = 0;
+ u8 status = 0, accept_frame = 0, eop = 0;
u16 len, desc_len, prev_len_adj;
int i, rx_sent = 0;
struct e1000_rx_desc *current_desc;
@@ -3477,11 +3416,13 @@ lem_rxeof(struct adapter *adapter, int count, int *done)
return (FALSE);
}
- while ((current_desc->status & E1000_RXD_STAT_DD) &&
- (count != 0) &&
- (ifp->if_drv_flags & IFF_DRV_RUNNING)) {
+ while (count != 0 && ifp->if_drv_flags & IFF_DRV_RUNNING) {
struct mbuf *m = NULL;
+ status = current_desc->status;
+ if ((status & E1000_RXD_STAT_DD) == 0)
+ break;
+
mp = adapter->rx_buffer_area[i].m_head;
/*
* Can't defer bus_dmamap_sync(9) because TBI_ACCEPT
@@ -3493,7 +3434,6 @@ lem_rxeof(struct adapter *adapter, int count, int *done)
accept_frame = 1;
prev_len_adj = 0;
desc_len = le16toh(current_desc->length);
- status = current_desc->status;
if (status & E1000_RXD_STAT_EOP) {
count--;
eop = 1;
@@ -3571,16 +3511,10 @@ lem_rxeof(struct adapter *adapter, int count, int *done)
goto skip;
#endif
if (status & E1000_RXD_STAT_VP) {
-#if __FreeBSD_version < 700000
- VLAN_INPUT_TAG_NEW(ifp, adapter->fmp,
- (le16toh(current_desc->special) &
- E1000_RXD_SPC_VLAN_MASK));
-#else
adapter->fmp->m_pkthdr.ether_vtag =
(le16toh(current_desc->special) &
E1000_RXD_SPC_VLAN_MASK);
adapter->fmp->m_flags |= M_VLANTAG;
-#endif
}
#ifndef __NO_STRICT_ALIGNMENT
skip:
@@ -3636,7 +3570,7 @@ discard:
if (done != NULL)
*done = rx_sent;
EM_RX_UNLOCK(adapter);
- return (current_desc->status & E1000_RXD_STAT_DD);
+ return ((status & E1000_RXD_STAT_DD) ? TRUE : FALSE);
}
#ifndef __NO_STRICT_ALIGNMENT
@@ -3728,7 +3662,6 @@ lem_receive_checksum(struct adapter *adapter,
}
}
-#if __FreeBSD_version >= 700029
/*
* This routine is run via an vlan
* config EVENT
@@ -3745,12 +3678,15 @@ lem_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
if ((vtag == 0) || (vtag > 4095)) /* Invalid ID */
return;
+ EM_CORE_LOCK(adapter);
index = (vtag >> 5) & 0x7F;
bit = vtag & 0x1F;
- lem_shadow_vfta[index] |= (1 << bit);
+ adapter->shadow_vfta[index] |= (1 << bit);
++adapter->num_vlans;
/* Re-init to load the changes */
- lem_init(adapter);
+ if (ifp->if_capenable & IFCAP_VLAN_HWFILTER)
+ lem_init_locked(adapter);
+ EM_CORE_UNLOCK(adapter);
}
/*
@@ -3769,12 +3705,15 @@ lem_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
if ((vtag == 0) || (vtag > 4095)) /* Invalid */
return;
+ EM_CORE_LOCK(adapter);
index = (vtag >> 5) & 0x7F;
bit = vtag & 0x1F;
- lem_shadow_vfta[index] &= ~(1 << bit);
+ adapter->shadow_vfta[index] &= ~(1 << bit);
--adapter->num_vlans;
/* Re-init to load the changes */
- lem_init(adapter);
+ if (ifp->if_capenable & IFCAP_VLAN_HWFILTER)
+ lem_init_locked(adapter);
+ EM_CORE_UNLOCK(adapter);
}
static void
@@ -3797,9 +3736,9 @@ lem_setup_vlan_hw_support(struct adapter *adapter)
** we need to repopulate it now.
*/
for (int i = 0; i < EM_VFTA_SIZE; i++)
- if (lem_shadow_vfta[i] != 0)
+ if (adapter->shadow_vfta[i] != 0)
E1000_WRITE_REG_ARRAY(hw, E1000_VFTA,
- i, lem_shadow_vfta[i]);
+ i, adapter->shadow_vfta[i]);
reg = E1000_READ_REG(hw, E1000_CTRL);
reg |= E1000_CTRL_VME;
@@ -3815,7 +3754,6 @@ lem_setup_vlan_hw_support(struct adapter *adapter)
E1000_WRITE_REG(&adapter->hw, E1000_RLPML,
adapter->max_frame_size + VLAN_TAG_SIZE);
}
-#endif
static void
lem_enable_intr(struct adapter *adapter)
@@ -4661,6 +4599,16 @@ lem_add_int_delay_sysctl(struct adapter *adapter, const char *name,
info, 0, lem_sysctl_int_delay, "I", description);
}
+static void
+lem_set_flow_cntrl(struct adapter *adapter, const char *name,
+ const char *description, int *limit, int value)
+{
+ *limit = value;
+ SYSCTL_ADD_INT(device_get_sysctl_ctx(adapter->dev),
+ SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)),
+ OID_AUTO, name, CTLTYPE_INT|CTLFLAG_RW, limit, value, description);
+}
+
#ifndef EM_LEGACY_IRQ
static void
lem_add_rx_process_limit(struct adapter *adapter, const char *name,
@@ -4672,5 +4620,3 @@ lem_add_rx_process_limit(struct adapter *adapter, const char *name,
OID_AUTO, name, CTLTYPE_INT|CTLFLAG_RW, limit, value, description);
}
#endif
-
-
diff --git a/sys/dev/e1000/if_lem.h b/sys/dev/e1000/if_lem.h
index 2f76aa8..e866d07e 100644
--- a/sys/dev/e1000/if_lem.h
+++ b/sys/dev/e1000/if_lem.h
@@ -328,11 +328,9 @@ struct adapter {
struct task tx_task;
struct taskqueue *tq; /* private task queue */
-#if __FreeBSD_version >= 700029
eventhandler_tag vlan_attach;
eventhandler_tag vlan_detach;
u32 num_vlans;
-#endif
/* Management and WOL features */
u32 wol;
@@ -341,11 +339,22 @@ struct adapter {
/* Multicast array memory */
u8 *mta;
- /* Info about the board itself */
+
+ /*
+ ** Shadow VFTA table, this is needed because
+ ** the real vlan filter table gets cleared during
+ ** a soft reset and the driver needs to be able
+ ** to repopulate it.
+ */
+ u32 shadow_vfta[EM_VFTA_SIZE];
+
+ /* Info about the interface */
uint8_t link_active;
uint16_t link_speed;
uint16_t link_duplex;
uint32_t smartspeed;
+ uint32_t fc_setting;
+
struct em_int_delay_info tx_int_delay;
struct em_int_delay_info tx_abs_int_delay;
struct em_int_delay_info rx_int_delay;
@@ -407,6 +416,9 @@ struct adapter {
unsigned long no_tx_dma_setup;
unsigned long watchdog_events;
unsigned long rx_overruns;
+ unsigned long rx_irq;
+ unsigned long tx_irq;
+ unsigned long link_irq;
/* 82547 workaround */
uint32_t tx_fifo_size;
diff --git a/sys/dev/mii/brgphy.c b/sys/dev/mii/brgphy.c
index 9f915df..454ee22 100644
--- a/sys/dev/mii/brgphy.c
+++ b/sys/dev/mii/brgphy.c
@@ -141,6 +141,7 @@ static const struct mii_phydesc brgphys[] = {
MII_PHY_DESC(xxBROADCOM_ALT1, BCM5709C),
MII_PHY_DESC(xxBROADCOM_ALT1, BCM5761),
MII_PHY_DESC(xxBROADCOM_ALT1, BCM5709S),
+ MII_PHY_DESC(xxBROADCOM_ALT2, BCM5717C),
MII_PHY_DESC(BROADCOM2, BCM5906),
MII_PHY_END
};
@@ -253,6 +254,9 @@ brgphy_attach(device_t dev)
break;
}
break;
+ case MII_OUI_xxBROADCOM_ALT2:
+ /* No special handling yet. */
+ break;
default:
device_printf(dev, "Unrecognized OUI for PHY!\n");
}
@@ -1011,6 +1015,7 @@ brgphy_reset(struct mii_softc *sc)
}
break;
case MII_OUI_xxBROADCOM_ALT1:
+ case MII_OUI_xxBROADCOM_ALT2:
break;
}
diff --git a/sys/dev/mii/e1000phy.c b/sys/dev/mii/e1000phy.c
index e763c21..f49f7c4 100644
--- a/sys/dev/mii/e1000phy.c
+++ b/sys/dev/mii/e1000phy.c
@@ -206,7 +206,7 @@ e1000phy_reset(struct mii_softc *sc)
reg &= ~E1000_SCR_MODE_MASK;
reg |= E1000_SCR_MODE_1000BX;
PHY_WRITE(sc, E1000_SCR, reg);
- if ((sc->mii_flags & MIIF_MACPRIV0) != 0) {
+ if ((sc->mii_flags & MIIF_PHYPRIV0) != 0) {
/* Set SIGDET polarity low for SFP module. */
PHY_WRITE(sc, E1000_EADR, 1);
reg = PHY_READ(sc, E1000_SCR);
diff --git a/sys/dev/mii/mii_physubr.c b/sys/dev/mii/mii_physubr.c
index 015d53c..d1cc4f5 100644
--- a/sys/dev/mii/mii_physubr.c
+++ b/sys/dev/mii/mii_physubr.c
@@ -107,8 +107,8 @@ mii_phy_setmedia(struct mii_softc *sc)
if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
if ((PHY_READ(sc, MII_BMCR) & BMCR_AUTOEN) == 0 ||
- (sc->mii_flags & MIIF_FORCEANEG))
- (void) mii_phy_auto(sc);
+ (sc->mii_flags & MIIF_FORCEANEG) != 0)
+ (void)mii_phy_auto(sc);
return;
}
@@ -124,64 +124,59 @@ mii_phy_setmedia(struct mii_softc *sc)
bmcr = mii_media_table[ife->ifm_data].mm_bmcr;
gtcr = mii_media_table[ife->ifm_data].mm_gtcr;
- if (mii->mii_media.ifm_media & IFM_ETH_MASTER) {
+ if ((mii->mii_media.ifm_media & IFM_ETH_MASTER) != 0) {
switch (IFM_SUBTYPE(ife->ifm_media)) {
case IFM_1000_T:
- gtcr |= GTCR_MAN_MS|GTCR_ADV_MS;
+ gtcr |= GTCR_MAN_MS | GTCR_ADV_MS;
break;
default:
- panic("mii_phy_setmedia: MASTER on wrong media");
+ printf("mii_phy_setmedia: MASTER on wrong media\n");
}
}
- if (ife->ifm_media & IFM_LOOP)
+ if ((ife->ifm_media & IFM_LOOP) != 0)
bmcr |= BMCR_LOOP;
PHY_WRITE(sc, MII_ANAR, anar);
PHY_WRITE(sc, MII_BMCR, bmcr);
- if (sc->mii_flags & MIIF_HAVE_GTCR)
+ if ((sc->mii_flags & MIIF_HAVE_GTCR) != 0)
PHY_WRITE(sc, MII_100T2CR, gtcr);
}
int
mii_phy_auto(struct mii_softc *sc)
{
+ int anar, gtcr;
/*
* Check for 1000BASE-X. Autonegotiation is a bit
* different on such devices.
*/
- if (sc->mii_flags & MIIF_IS_1000X) {
- uint16_t anar = 0;
-
- if (sc->mii_extcapabilities & EXTSR_1000XFDX)
+ if ((sc->mii_flags & MIIF_IS_1000X) != 0) {
+ anar = 0;
+ if ((sc->mii_extcapabilities & EXTSR_1000XFDX) != 0)
anar |= ANAR_X_FD;
- if (sc->mii_extcapabilities & EXTSR_1000XHDX)
+ if ((sc->mii_extcapabilities & EXTSR_1000XHDX) != 0)
anar |= ANAR_X_HD;
- if (sc->mii_flags & MIIF_DOPAUSE) {
+ if ((sc->mii_flags & MIIF_DOPAUSE) != 0) {
/* XXX Asymmetric vs. symmetric? */
anar |= ANLPAR_X_PAUSE_TOWARDS;
}
-
PHY_WRITE(sc, MII_ANAR, anar);
} else {
- uint16_t anar;
-
anar = BMSR_MEDIA_TO_ANAR(sc->mii_capabilities) |
ANAR_CSMA;
- if (sc->mii_flags & MIIF_DOPAUSE)
+ if ((sc->mii_flags & MIIF_DOPAUSE) != 0)
anar |= ANAR_FC;
PHY_WRITE(sc, MII_ANAR, anar);
- if (sc->mii_flags & MIIF_HAVE_GTCR) {
- uint16_t gtcr = 0;
-
- if (sc->mii_extcapabilities & EXTSR_1000TFDX)
+ if ((sc->mii_flags & MIIF_HAVE_GTCR) != 0) {
+ gtcr = 0;
+ if ((sc->mii_extcapabilities & EXTSR_1000TFDX) != 0)
gtcr |= GTCR_ADV_1000TFDX;
- if (sc->mii_extcapabilities & EXTSR_1000THDX)
+ if ((sc->mii_extcapabilities & EXTSR_1000THDX) != 0)
gtcr |= GTCR_ADV_1000THDX;
-
PHY_WRITE(sc, MII_100T2CR, gtcr);
}
}
@@ -213,7 +208,7 @@ mii_phy_tick(struct mii_softc *sc)
/* Read the status register twice; BMSR_LINK is latch-low. */
reg = PHY_READ(sc, MII_BMSR) | PHY_READ(sc, MII_BMSR);
- if (reg & BMSR_LINK) {
+ if ((reg & BMSR_LINK) != 0) {
sc->mii_ticks = 0; /* reset autonegotiation timer. */
/* See above. */
return (0);
@@ -243,7 +238,7 @@ mii_phy_reset(struct mii_softc *sc)
struct ifmedia_entry *ife = sc->mii_pdata->mii_media.ifm_cur;
int reg, i;
- if (sc->mii_flags & MIIF_NOISOLATE)
+ if ((sc->mii_flags & MIIF_NOISOLATE) != 0)
reg = BMCR_RESET;
else
reg = BMCR_RESET | BMCR_ISO;
@@ -303,7 +298,10 @@ mii_phy_add_media(struct mii_softc *sc)
return;
}
- /* Set aneg timer for 10/100 media. Gigabit media handled below. */
+ /*
+ * Set the autonegotiation timer for 10/100 media. Gigabit media is
+ * handled below.
+ */
sc->mii_anegticks = MII_ANEGTICKS;
#define ADD(m, c) ifmedia_add(&mii->mii_media, (m), (c), NULL)
@@ -318,55 +316,54 @@ mii_phy_add_media(struct mii_softc *sc)
* HomePNA PHYs. And there is really only one media type
* that is supported.
*/
- if (sc->mii_flags & MIIF_IS_HPNA) {
- if (sc->mii_capabilities & BMSR_10THDX) {
+ if ((sc->mii_flags & MIIF_IS_HPNA) != 0) {
+ if ((sc->mii_capabilities & BMSR_10THDX) != 0) {
ADD(IFM_MAKEWORD(IFM_ETHER, IFM_HPNA_1, 0,
- sc->mii_inst),
- MII_MEDIA_10_T);
+ sc->mii_inst), MII_MEDIA_10_T);
PRINT("HomePNA1");
}
return;
}
- if (sc->mii_capabilities & BMSR_10THDX) {
+ if ((sc->mii_capabilities & BMSR_10THDX) != 0) {
ADD(IFM_MAKEWORD(IFM_ETHER, IFM_10_T, 0, sc->mii_inst),
MII_MEDIA_10_T);
PRINT("10baseT");
}
- if (sc->mii_capabilities & BMSR_10TFDX) {
+ if ((sc->mii_capabilities & BMSR_10TFDX) != 0) {
ADD(IFM_MAKEWORD(IFM_ETHER, IFM_10_T, IFM_FDX, sc->mii_inst),
MII_MEDIA_10_T_FDX);
PRINT("10baseT-FDX");
}
- if (sc->mii_capabilities & BMSR_100TXHDX) {
+ if ((sc->mii_capabilities & BMSR_100TXHDX) != 0) {
ADD(IFM_MAKEWORD(IFM_ETHER, IFM_100_TX, 0, sc->mii_inst),
MII_MEDIA_100_TX);
PRINT("100baseTX");
}
- if (sc->mii_capabilities & BMSR_100TXFDX) {
+ if ((sc->mii_capabilities & BMSR_100TXFDX) != 0) {
ADD(IFM_MAKEWORD(IFM_ETHER, IFM_100_TX, IFM_FDX, sc->mii_inst),
MII_MEDIA_100_TX_FDX);
PRINT("100baseTX-FDX");
}
- if (sc->mii_capabilities & BMSR_100T4) {
+ if ((sc->mii_capabilities & BMSR_100T4) != 0) {
ADD(IFM_MAKEWORD(IFM_ETHER, IFM_100_T4, 0, sc->mii_inst),
MII_MEDIA_100_T4);
PRINT("100baseT4");
}
- if (sc->mii_extcapabilities & EXTSR_MEDIAMASK) {
+ if ((sc->mii_extcapabilities & EXTSR_MEDIAMASK) != 0) {
/*
* XXX Right now only handle 1000SX and 1000TX. Need
- * XXX to handle 1000LX and 1000CX some how.
+ * XXX to handle 1000LX and 1000CX somehow.
*/
- if (sc->mii_extcapabilities & EXTSR_1000XHDX) {
+ if ((sc->mii_extcapabilities & EXTSR_1000XHDX) != 0) {
sc->mii_anegticks = MII_ANEGTICKS_GIGE;
sc->mii_flags |= MIIF_IS_1000X;
ADD(IFM_MAKEWORD(IFM_ETHER, IFM_1000_SX, 0,
sc->mii_inst), MII_MEDIA_1000_X);
PRINT("1000baseSX");
}
- if (sc->mii_extcapabilities & EXTSR_1000XFDX) {
+ if ((sc->mii_extcapabilities & EXTSR_1000XFDX) != 0) {
sc->mii_anegticks = MII_ANEGTICKS_GIGE;
sc->mii_flags |= MIIF_IS_1000X;
ADD(IFM_MAKEWORD(IFM_ETHER, IFM_1000_SX, IFM_FDX,
@@ -382,7 +379,7 @@ mii_phy_add_media(struct mii_softc *sc)
*
* All 1000baseT PHYs have a 1000baseT control register.
*/
- if (sc->mii_extcapabilities & EXTSR_1000THDX) {
+ if ((sc->mii_extcapabilities & EXTSR_1000THDX) != 0) {
sc->mii_anegticks = MII_ANEGTICKS_GIGE;
sc->mii_flags |= MIIF_HAVE_GTCR;
mii->mii_media.ifm_mask |= IFM_ETH_MASTER;
@@ -390,7 +387,7 @@ mii_phy_add_media(struct mii_softc *sc)
sc->mii_inst), MII_MEDIA_1000_T);
PRINT("1000baseT");
}
- if (sc->mii_extcapabilities & EXTSR_1000TFDX) {
+ if ((sc->mii_extcapabilities & EXTSR_1000TFDX) != 0) {
sc->mii_anegticks = MII_ANEGTICKS_GIGE;
sc->mii_flags |= MIIF_HAVE_GTCR;
mii->mii_media.ifm_mask |= IFM_ETH_MASTER;
@@ -400,7 +397,7 @@ mii_phy_add_media(struct mii_softc *sc)
}
}
- if (sc->mii_capabilities & BMSR_ANEG) {
+ if ((sc->mii_capabilities & BMSR_ANEG) != 0) {
ADD(IFM_MAKEWORD(IFM_ETHER, IFM_AUTO, 0, sc->mii_inst),
MII_NMEDIA); /* intentionally invalid index */
PRINT("auto");
@@ -418,8 +415,7 @@ mii_phy_detach(device_t dev)
mii_phy_down(sc);
sc->mii_dev = NULL;
LIST_REMOVE(sc, mii_list);
-
- return(0);
+ return (0);
}
const struct mii_phydesc *
@@ -452,6 +448,5 @@ mii_phy_dev_probe(device_t dev, const struct mii_phydesc *mpd, int mrv)
device_set_desc(dev, mpd->mpd_name);
return (mrv);
}
-
return (ENXIO);
}
diff --git a/sys/dev/mii/miidevs b/sys/dev/mii/miidevs
index e0ea555..34c5f94 100644
--- a/sys/dev/mii/miidevs
+++ b/sys/dev/mii/miidevs
@@ -81,6 +81,7 @@ oui xxINTEL 0x00f800 Intel
oui xxALTIMA 0x000895 Altima Communications
oui xxBROADCOM 0x000818 Broadcom Corporation
oui xxBROADCOM_ALT1 0x0050ef Broadcom Corporation
+oui xxBROADCOM_ALT2 0x00d897 Broadcom Corporation
oui xxICS 0x00057d Integrated Circuit Systems
oui xxSEEQ 0x0005be Seeq
oui xxSIS 0x000760 Silicon Integrated Systems
@@ -153,6 +154,7 @@ model xxBROADCOM_ALT1 BCM5784 0x003a BCM5784 10/100/1000baseTX PHY
model xxBROADCOM_ALT1 BCM5709C 0x003c BCM5709C 10/100/1000baseTX PHY
model xxBROADCOM_ALT1 BCM5761 0x003d BCM5761 10/100/1000baseTX PHY
model xxBROADCOM_ALT1 BCM5709S 0x003f BCM5709S 1000/2500baseSX PHY
+model xxBROADCOM_ALT2 BCM5717C 0x0020 BCM5717C 10/100/1000baseTX PHY
model BROADCOM2 BCM5906 0x0004 BCM5906 10/100baseTX PHY
/* Cicada Semiconductor PHYs (now owned by Vitesse?) */
diff --git a/sys/dev/mii/miivar.h b/sys/dev/mii/miivar.h
index 15a3f3f..6783cf5 100644
--- a/sys/dev/mii/miivar.h
+++ b/sys/dev/mii/miivar.h
@@ -148,7 +148,7 @@ typedef struct mii_softc mii_softc_t;
/*
* Special `locators' passed to mii_attach(). If one of these is not
* an `any' value, we look for *that* PHY and configure it. If both
- * are not `any', that is an error, and mii_attach() will panic.
+ * are not `any', that is an error, and mii_attach() will fail.
*/
#define MII_OFFSET_ANY -1
#define MII_PHY_ANY -1
diff --git a/sys/dev/usb/usb_device.c b/sys/dev/usb/usb_device.c
index 8887fc1..495bd82 100644
--- a/sys/dev/usb/usb_device.c
+++ b/sys/dev/usb/usb_device.c
@@ -88,7 +88,7 @@ static void usb_init_endpoint(struct usb_device *, uint8_t,
struct usb_endpoint *);
static void usb_unconfigure(struct usb_device *, uint8_t);
static void usb_detach_device_sub(struct usb_device *, device_t *,
- uint8_t);
+ char **, uint8_t);
static uint8_t usb_probe_and_attach_sub(struct usb_device *,
struct usb_attach_arg *);
static void usb_init_attach_arg(struct usb_device *,
@@ -1035,9 +1035,10 @@ usb_reset_iface_endpoints(struct usb_device *udev, uint8_t iface_index)
*------------------------------------------------------------------------*/
static void
usb_detach_device_sub(struct usb_device *udev, device_t *ppdev,
- uint8_t flag)
+ char **ppnpinfo, uint8_t flag)
{
device_t dev;
+ char *pnpinfo;
int err;
dev = *ppdev;
@@ -1069,11 +1070,17 @@ usb_detach_device_sub(struct usb_device *udev, device_t *ppdev,
goto error;
}
}
+
+ pnpinfo = *ppnpinfo;
+ if (pnpinfo != NULL) {
+ *ppnpinfo = NULL;
+ free(pnpinfo, M_USBDEV);
+ }
return;
error:
/* Detach is not allowed to fail in the USB world */
- panic("A USB driver would not detach\n");
+ panic("usb_detach_device_sub: A USB driver would not detach\n");
}
/*------------------------------------------------------------------------*
@@ -1122,7 +1129,8 @@ usb_detach_device(struct usb_device *udev, uint8_t iface_index,
/* looks like the end of the USB interfaces */
break;
}
- usb_detach_device_sub(udev, &iface->subdev, flag);
+ usb_detach_device_sub(udev, &iface->subdev,
+ &iface->pnpinfo, flag);
}
}
@@ -2714,3 +2722,37 @@ usbd_enum_is_locked(struct usb_device *udev)
{
return (sx_xlocked(&udev->enum_sx));
}
+
+/*
+ * The following function is used to set the per-interface specific
+ * plug and play information. The string referred to by the pnpinfo
+ * argument can safely be freed after calling this function. The
+ * pnpinfo of an interface will be reset at device detach or when
+ * passing a NULL argument to this function. This function
+ * returns zero on success, else a USB_ERR_XXX failure code.
+ */
+
+usb_error_t
+usbd_set_pnpinfo(struct usb_device *udev, uint8_t iface_index, const char *pnpinfo)
+{
+ struct usb_interface *iface;
+
+ iface = usbd_get_iface(udev, iface_index);
+ if (iface == NULL)
+ return (USB_ERR_INVAL);
+
+ if (iface->pnpinfo != NULL) {
+ free(iface->pnpinfo, M_USBDEV);
+ iface->pnpinfo = NULL;
+ }
+
+ if (pnpinfo == NULL || pnpinfo[0] == 0)
+ return (0); /* success */
+
+ iface->pnpinfo = strdup(pnpinfo, M_USBDEV);
+ if (iface->pnpinfo == NULL)
+ return (USB_ERR_NOMEM);
+
+ return (0); /* success */
+}
+
diff --git a/sys/dev/usb/usb_hub.c b/sys/dev/usb/usb_hub.c
index afeadaa..b2870a3 100644
--- a/sys/dev/usb/usb_hub.c
+++ b/sys/dev/usb/usb_hub.c
@@ -1330,7 +1330,7 @@ uhub_child_pnpinfo_string(device_t parent, device_t child,
"devclass=0x%02x devsubclass=0x%02x "
"sernum=\"%s\" "
"release=0x%04x "
- "intclass=0x%02x intsubclass=0x%02x",
+ "intclass=0x%02x intsubclass=0x%02x" "%s%s",
UGETW(res.udev->ddesc.idVendor),
UGETW(res.udev->ddesc.idProduct),
res.udev->ddesc.bDeviceClass,
@@ -1338,7 +1338,9 @@ uhub_child_pnpinfo_string(device_t parent, device_t child,
usb_get_serial(res.udev),
UGETW(res.udev->ddesc.bcdDevice),
iface->idesc->bInterfaceClass,
- iface->idesc->bInterfaceSubClass);
+ iface->idesc->bInterfaceSubClass,
+ iface->pnpinfo ? " " : "",
+ iface->pnpinfo ? iface->pnpinfo : "");
} else {
if (buflen) {
buf[0] = '\0';
diff --git a/sys/dev/usb/usbdi.h b/sys/dev/usb/usbdi.h
index 1c2d412..6d4a911 100644
--- a/sys/dev/usb/usbdi.h
+++ b/sys/dev/usb/usbdi.h
@@ -171,6 +171,7 @@ struct usb_interface {
struct usb_host_interface *cur_altsetting;
struct usb_device *linux_udev;
void *bsd_priv_sc; /* device specific information */
+ char *pnpinfo; /* additional PnP-info for this interface */
uint8_t num_altsetting; /* number of alternate settings */
uint8_t bsd_iface_index;
};
@@ -444,6 +445,8 @@ enum usb_hc_mode usbd_get_mode(struct usb_device *udev);
enum usb_dev_speed usbd_get_speed(struct usb_device *udev);
void device_set_usb_desc(device_t dev);
void usb_pause_mtx(struct mtx *mtx, int _ticks);
+usb_error_t usbd_set_pnpinfo(struct usb_device *udev,
+ uint8_t iface_index, const char *pnpinfo);
const struct usb_device_id *usbd_lookup_id_by_info(
const struct usb_device_id *id, usb_size_t sizeof_id,
diff --git a/sys/dev/xen/blkback/blkback.c b/sys/dev/xen/blkback/blkback.c
index 72087f5..458149d 100644
--- a/sys/dev/xen/blkback/blkback.c
+++ b/sys/dev/xen/blkback/blkback.c
@@ -2827,8 +2827,11 @@ xbb_detach(device_t dev)
DPRINTF("\n");
- taskqueue_free(xbb->io_taskqueue);
- devstat_remove_entry(xbb->xbb_stats);
+ if (xbb->io_taskqueue != NULL)
+ taskqueue_free(xbb->io_taskqueue);
+
+ if (xbb->xbb_stats != NULL)
+ devstat_remove_entry(xbb->xbb_stats);
xbb_close_backend(xbb);
xbb_free_communication_mem(xbb);
OpenPOWER on IntegriCloud