summaryrefslogtreecommitdiffstats
path: root/sys/dev
diff options
context:
space:
mode:
authorLuiz Otavio O Souza <luiz@netgate.com>2016-02-13 15:42:03 -0600
committerLuiz Otavio O Souza <luiz@netgate.com>2016-02-13 15:42:03 -0600
commitc5df3d0f11ca3b170da878cdd4782fc603c15460 (patch)
tree8b6e061b2541f563a4393ef37dd69d20a350681d /sys/dev
parent3469b6e4cbedf6789ee84a4ad5ce13a9286c971e (diff)
parenta3f0577ed04b6c469cd8c282522893364bebcf3f (diff)
downloadFreeBSD-src-c5df3d0f11ca3b170da878cdd4782fc603c15460.zip
FreeBSD-src-c5df3d0f11ca3b170da878cdd4782fc603c15460.tar.gz
Merge remote-tracking branch 'origin/stable/10' into devel
Diffstat (limited to 'sys/dev')
-rw-r--r--sys/dev/ixgbe/if_ix.c728
-rw-r--r--sys/dev/ixgbe/if_ixv.c134
-rw-r--r--sys/dev/ixgbe/ix_txrx.c123
-rw-r--r--sys/dev/ixgbe/ixgbe.h37
-rw-r--r--sys/dev/ixgbe/ixgbe_82598.c4
-rw-r--r--sys/dev/ixgbe/ixgbe_82599.c61
-rw-r--r--sys/dev/ixgbe/ixgbe_api.c6
-rw-r--r--sys/dev/ixgbe/ixgbe_api.h9
-rw-r--r--sys/dev/ixgbe/ixgbe_common.c69
-rw-r--r--sys/dev/ixgbe/ixgbe_dcb.c7
-rw-r--r--sys/dev/ixgbe/ixgbe_osdep.c (renamed from sys/dev/ixgbe/LICENSE)55
-rw-r--r--sys/dev/ixgbe/ixgbe_osdep.h67
-rw-r--r--sys/dev/ixgbe/ixgbe_phy.c85
-rw-r--r--sys/dev/ixgbe/ixgbe_phy.h13
-rw-r--r--sys/dev/ixgbe/ixgbe_type.h64
-rw-r--r--sys/dev/ixgbe/ixgbe_vf.c2
-rw-r--r--sys/dev/ixgbe/ixgbe_x540.c44
-rw-r--r--sys/dev/ixgbe/ixgbe_x550.c392
-rw-r--r--sys/dev/ixgbe/ixgbe_x550.h3
-rw-r--r--sys/dev/random/live_entropy_sources.c4
-rw-r--r--sys/dev/random/live_entropy_sources.h2
-rw-r--r--sys/dev/random/random_adaptors.c4
-rw-r--r--sys/dev/random/random_adaptors.h4
-rw-r--r--sys/dev/random/random_harvestq.c30
-rw-r--r--sys/dev/random/randomdev.c23
-rw-r--r--sys/dev/random/randomdev_soft.c6
-rw-r--r--sys/dev/sfxge/common/efx.h8
-rw-r--r--sys/dev/sfxge/common/efx_mcdi.c212
-rw-r--r--sys/dev/sfxge/common/efx_mcdi.h8
-rw-r--r--sys/dev/sfxge/common/efx_phy.c32
-rw-r--r--sys/dev/sfxge/sfxge.c24
31 files changed, 1513 insertions, 747 deletions
diff --git a/sys/dev/ixgbe/if_ix.c b/sys/dev/ixgbe/if_ix.c
index e85c066..48ee76b 100644
--- a/sys/dev/ixgbe/if_ix.c
+++ b/sys/dev/ixgbe/if_ix.c
@@ -46,14 +46,10 @@
#endif
/*********************************************************************
- * Set this to one to display debug statistics
- *********************************************************************/
-int ixgbe_display_debug_stats = 0;
-
-/*********************************************************************
* Driver version
*********************************************************************/
-char ixgbe_driver_version[] = "3.1.0";
+char ixgbe_driver_version[] = "3.1.13-k";
+
/*********************************************************************
* PCI Device ID Table
@@ -94,9 +90,11 @@ static ixgbe_vendor_info_t ixgbe_vendor_info_array[] =
{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T, 0, 0, 0},
{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T1, 0, 0, 0},
{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T, 0, 0, 0},
+ {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T1, 0, 0, 0},
{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KR, 0, 0, 0},
{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KX4, 0, 0, 0},
{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_10G_T, 0, 0, 0},
+ {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_SFP, 0, 0, 0},
/* required last entry */
{0, 0, 0, 0, 0}
};
@@ -130,7 +128,7 @@ static void ixgbe_media_status(struct ifnet *, struct ifmediareq *);
static int ixgbe_media_change(struct ifnet *);
static void ixgbe_identify_hardware(struct adapter *);
static int ixgbe_allocate_pci_resources(struct adapter *);
-static void ixgbe_get_slot_info(struct ixgbe_hw *);
+static void ixgbe_get_slot_info(struct adapter *);
static int ixgbe_allocate_msix(struct adapter *);
static int ixgbe_allocate_legacy(struct adapter *);
static int ixgbe_setup_msix(struct adapter *);
@@ -141,7 +139,6 @@ static void ixgbe_config_gpie(struct adapter *);
static void ixgbe_config_dmac(struct adapter *);
static void ixgbe_config_delay_values(struct adapter *);
static void ixgbe_config_link(struct adapter *);
-static void ixgbe_check_eee_support(struct adapter *);
static void ixgbe_check_wol_support(struct adapter *);
static int ixgbe_setup_low_power_mode(struct adapter *);
static void ixgbe_rearm_queues(struct adapter *, u64);
@@ -150,6 +147,7 @@ static void ixgbe_initialize_transmit_units(struct adapter *);
static void ixgbe_initialize_receive_units(struct adapter *);
static void ixgbe_enable_rx_drop(struct adapter *);
static void ixgbe_disable_rx_drop(struct adapter *);
+static void ixgbe_initialize_rss_mapping(struct adapter *);
static void ixgbe_enable_intr(struct adapter *);
static void ixgbe_disable_intr(struct adapter *);
@@ -167,22 +165,29 @@ static void ixgbe_unregister_vlan(void *, struct ifnet *, u16);
static void ixgbe_add_device_sysctls(struct adapter *);
static void ixgbe_add_hw_stats(struct adapter *);
+static int ixgbe_set_flowcntl(struct adapter *, int);
+static int ixgbe_set_advertise(struct adapter *, int);
/* Sysctl handlers */
static void ixgbe_set_sysctl_value(struct adapter *, const char *,
- const char *, int *, int);
-static int ixgbe_set_flowcntl(SYSCTL_HANDLER_ARGS);
-static int ixgbe_set_advertise(SYSCTL_HANDLER_ARGS);
+ const char *, int *, int);
+static int ixgbe_sysctl_flowcntl(SYSCTL_HANDLER_ARGS);
+static int ixgbe_sysctl_advertise(SYSCTL_HANDLER_ARGS);
static int ixgbe_sysctl_thermal_test(SYSCTL_HANDLER_ARGS);
static int ixgbe_sysctl_dmac(SYSCTL_HANDLER_ARGS);
static int ixgbe_sysctl_phy_temp(SYSCTL_HANDLER_ARGS);
static int ixgbe_sysctl_phy_overtemp_occurred(SYSCTL_HANDLER_ARGS);
+#ifdef IXGBE_DEBUG
+static int ixgbe_sysctl_power_state(SYSCTL_HANDLER_ARGS);
+static int ixgbe_sysctl_print_rss_config(SYSCTL_HANDLER_ARGS);
+#endif
static int ixgbe_sysctl_wol_enable(SYSCTL_HANDLER_ARGS);
static int ixgbe_sysctl_wufc(SYSCTL_HANDLER_ARGS);
static int ixgbe_sysctl_eee_enable(SYSCTL_HANDLER_ARGS);
static int ixgbe_sysctl_eee_negotiated(SYSCTL_HANDLER_ARGS);
static int ixgbe_sysctl_eee_rx_lpi_status(SYSCTL_HANDLER_ARGS);
static int ixgbe_sysctl_eee_tx_lpi_status(SYSCTL_HANDLER_ARGS);
+static int ixgbe_sysctl_eee_tx_lpi_delay(SYSCTL_HANDLER_ARGS);
/* Support for pluggable optic modules */
static bool ixgbe_sfp_probe(struct adapter *);
@@ -290,6 +295,16 @@ SYSCTL_INT(_hw_ix, OID_AUTO, tx_process_limit, CTLFLAG_RDTUN,
"Maximum number of sent packets to process at a time,"
"-1 means unlimited");
+/* Flow control setting, default to full */
+static int ixgbe_flow_control = ixgbe_fc_full;
+SYSCTL_INT(_hw_ix, OID_AUTO, flow_control, CTLFLAG_RDTUN,
+ &ixgbe_flow_control, 0, "Default flow control used for all adapters");
+
+/* Advertise Speed, default to 0 (auto) */
+static int ixgbe_advertise_speed = 0;
+SYSCTL_INT(_hw_ix, OID_AUTO, advertise_speed, CTLFLAG_RDTUN,
+ &ixgbe_advertise_speed, 0, "Default advertised speed for all adapters");
+
/*
** Smart speed setting, default to on
** this only works as a compile option
@@ -317,7 +332,8 @@ SYSCTL_INT(_hw_ix, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &ixgbe_enable_msix, 0,
static int ixgbe_num_queues = 0;
TUNABLE_INT("hw.ix.num_queues", &ixgbe_num_queues);
SYSCTL_INT(_hw_ix, OID_AUTO, num_queues, CTLFLAG_RDTUN, &ixgbe_num_queues, 0,
- "Number of queues to configure, 0 indicates autoconfigure");
+ "Number of queues to configure up to a mximum of 8,"
+ "0 indicates autoconfigure");
/*
** Number of TX descriptors per ring,
@@ -446,9 +462,13 @@ ixgbe_attach(device_t dev)
/* Allocate, clear, and link in our adapter structure */
adapter = device_get_softc(dev);
- adapter->dev = adapter->osdep.dev = dev;
+ adapter->dev = dev;
hw = &adapter->hw;
+#ifdef DEV_NETMAP
+ adapter->init_locked = ixgbe_init_locked;
+ adapter->stop_locked = ixgbe_stop;
+#endif
/* Core Lock Init*/
IXGBE_CORE_LOCK_INIT(adapter, device_get_nameunit(dev));
@@ -531,18 +551,18 @@ ixgbe_attach(device_t dev)
adapter->sfp_probe = TRUE;
error = 0;
} else if (error == IXGBE_ERR_SFP_NOT_SUPPORTED) {
- device_printf(dev,"Unsupported SFP+ module detected!\n");
+ device_printf(dev, "Unsupported SFP+ module detected!\n");
error = EIO;
goto err_late;
} else if (error) {
- device_printf(dev,"Unable to initialize the shared code\n");
+ device_printf(dev, "Unable to initialize the shared code\n");
error = EIO;
goto err_late;
}
/* Make sure we have a good EEPROM before we read from it */
if (ixgbe_validate_eeprom_checksum(&adapter->hw, &csum) < 0) {
- device_printf(dev,"The EEPROM Checksum Is Not Valid\n");
+ device_printf(dev, "The EEPROM Checksum Is Not Valid\n");
error = EIO;
goto err_late;
}
@@ -552,23 +572,25 @@ ixgbe_attach(device_t dev)
case IXGBE_ERR_EEPROM_VERSION:
device_printf(dev, "This device is a pre-production adapter/"
"LOM. Please be aware there may be issues associated "
- "with your hardware.\n If you are experiencing problems "
+ "with your hardware.\nIf you are experiencing problems "
"please contact your Intel or hardware representative "
"who provided you with this hardware.\n");
break;
case IXGBE_ERR_SFP_NOT_SUPPORTED:
- device_printf(dev,"Unsupported SFP+ Module\n");
+ device_printf(dev, "Unsupported SFP+ Module\n");
error = EIO;
goto err_late;
case IXGBE_ERR_SFP_NOT_PRESENT:
- device_printf(dev,"No SFP+ Module found\n");
+ device_printf(dev, "No SFP+ Module found\n");
/* falls thru */
default:
break;
}
- /* Detect and set physical type */
- ixgbe_setup_optics(adapter);
+ /* hw.ix defaults init */
+ ixgbe_set_advertise(adapter, ixgbe_advertise_speed);
+ ixgbe_set_flowcntl(adapter, ixgbe_flow_control);
+ adapter->enable_aim = ixgbe_enable_aim;
if ((adapter->msix > 1) && (ixgbe_enable_msix))
error = ixgbe_allocate_msix(adapter);
@@ -577,6 +599,12 @@ ixgbe_attach(device_t dev)
if (error)
goto err_late;
+ /* Enable the optics for 82599 SFP+ fiber */
+ ixgbe_enable_tx_laser(hw);
+
+ /* Enable power to the phy. */
+ ixgbe_set_phy_power(hw, TRUE);
+
/* Setup OS specific network interface */
if (ixgbe_setup_interface(dev, adapter) != 0)
goto err_late;
@@ -591,11 +619,12 @@ ixgbe_attach(device_t dev)
ixgbe_unregister_vlan, adapter, EVENTHANDLER_PRI_FIRST);
/* Check PCIE slot type/speed/width */
- ixgbe_get_slot_info(hw);
-
+ ixgbe_get_slot_info(adapter);
- /* Set an initial default flow control value */
+ /* Set an initial default flow control & dmac value */
adapter->fc = ixgbe_fc_full;
+ adapter->dmac = 0;
+ adapter->eee_enabled = 0;
#ifdef PCI_IOV
if ((hw->mac.type != ixgbe_mac_82598EB) && (adapter->msix > 1)) {
@@ -621,7 +650,6 @@ ixgbe_attach(device_t dev)
/* Check for certain supported features */
ixgbe_check_wol_support(adapter);
- ixgbe_check_eee_support(adapter);
/* Add sysctls */
ixgbe_add_device_sysctls(adapter);
@@ -682,6 +710,7 @@ ixgbe_detach(device_t dev)
}
#endif /* PCI_IOV */
+ ether_ifdetach(adapter->ifp);
/* Stop the adapter */
IXGBE_CORE_LOCK(adapter);
ixgbe_setup_low_power_mode(adapter);
@@ -721,7 +750,6 @@ ixgbe_detach(device_t dev)
if (adapter->vlan_detach != NULL)
EVENTHANDLER_DEREGISTER(vlan_unconfig, adapter->vlan_detach);
- ether_ifdetach(adapter->ifp);
callout_drain(&adapter->timer);
#ifdef DEV_NETMAP
netmap_detach(adapter->ifp);
@@ -776,10 +804,6 @@ ixgbe_suspend(device_t dev)
error = ixgbe_setup_low_power_mode(adapter);
- /* Save state and power down */
- pci_save_state(dev);
- pci_set_powerstate(dev, PCI_POWERSTATE_D3);
-
IXGBE_CORE_UNLOCK(adapter);
return (error);
@@ -797,9 +821,6 @@ ixgbe_resume(device_t dev)
IXGBE_CORE_LOCK(adapter);
- pci_set_powerstate(dev, PCI_POWERSTATE_D0);
- pci_restore_state(dev);
-
/* Read & clear WUS register */
wus = IXGBE_READ_REG(hw, IXGBE_WUS);
if (wus)
@@ -818,7 +839,6 @@ ixgbe_resume(device_t dev)
IXGBE_CORE_UNLOCK(adapter);
- INIT_DEBUGOUT("ixgbe_resume: end");
return (0);
}
@@ -862,7 +882,7 @@ ixgbe_ioctl(struct ifnet * ifp, u_long command, caddr_t data)
ifp->if_flags |= IFF_UP;
if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
ixgbe_init(adapter);
-#if defined(INET)
+#ifdef INET
if (!(ifp->if_flags & IFF_NOARP))
arp_ifinit(ifp, ifa);
#endif
@@ -920,10 +940,21 @@ ixgbe_ioctl(struct ifnet * ifp, u_long command, caddr_t data)
break;
case SIOCSIFCAP:
{
- int mask = ifr->ifr_reqcap ^ ifp->if_capenable;
IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
- if (mask & IFCAP_HWCSUM)
- ifp->if_capenable ^= IFCAP_HWCSUM;
+
+ int mask = ifr->ifr_reqcap ^ ifp->if_capenable;
+ if (!mask)
+ break;
+
+ /* HW cannot turn these on/off separately */
+ if (mask & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6)) {
+ ifp->if_capenable ^= IFCAP_RXCSUM;
+ ifp->if_capenable ^= IFCAP_RXCSUM_IPV6;
+ }
+ if (mask & IFCAP_TXCSUM)
+ ifp->if_capenable ^= IFCAP_TXCSUM;
+ if (mask & IFCAP_TXCSUM_IPV6)
+ ifp->if_capenable ^= IFCAP_TXCSUM_IPV6;
if (mask & IFCAP_TSO4)
ifp->if_capenable ^= IFCAP_TSO4;
if (mask & IFCAP_TSO6)
@@ -936,6 +967,7 @@ ixgbe_ioctl(struct ifnet * ifp, u_long command, caddr_t data)
ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
if (mask & IFCAP_VLAN_HWTSO)
ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
+
if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
IXGBE_CORE_LOCK(adapter);
ixgbe_init_locked(adapter);
@@ -979,6 +1011,42 @@ ixgbe_ioctl(struct ifnet * ifp, u_long command, caddr_t data)
return (error);
}
+/*
+ * Set the various hardware offload abilities.
+ *
+ * This takes the ifnet's if_capenable flags (e.g. set by the user using
+ * ifconfig) and indicates to the OS via the ifnet's if_hwassist field what
+ * mbuf offload flags the driver will understand.
+ */
+static void
+ixgbe_set_if_hwassist(struct adapter *adapter)
+{
+ struct ifnet *ifp = adapter->ifp;
+
+ ifp->if_hwassist = 0;
+#if __FreeBSD_version >= 1000000
+ if (ifp->if_capenable & IFCAP_TSO4)
+ ifp->if_hwassist |= CSUM_IP_TSO;
+ if (ifp->if_capenable & IFCAP_TSO6)
+ ifp->if_hwassist |= CSUM_IP6_TSO;
+ if (ifp->if_capenable & IFCAP_TXCSUM)
+ ifp->if_hwassist |= (CSUM_IP | CSUM_IP_UDP | CSUM_IP_TCP |
+ CSUM_IP_SCTP);
+ if (ifp->if_capenable & IFCAP_TXCSUM_IPV6)
+ ifp->if_hwassist |= (CSUM_IP6_UDP | CSUM_IP6_TCP |
+ CSUM_IP6_SCTP);
+#else
+ if (ifp->if_capenable & IFCAP_TSO)
+ ifp->if_hwassist |= CSUM_TSO;
+ if (ifp->if_capenable & IFCAP_TXCSUM) {
+ ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
+ struct ixgbe_hw *hw = &adapter->hw;
+ if (hw->mac.type != ixgbe_mac_82598EB)
+ ifp->if_hwassist |= CSUM_SCTP;
+ }
+#endif
+}
+
/*********************************************************************
* Init entry point
*
@@ -1001,6 +1069,7 @@ ixgbe_init_locked(struct adapter *adapter)
struct rx_ring *rxr;
u32 txdctl, mhadd;
u32 rxdctl, rxctrl;
+ int err = 0;
#ifdef PCI_IOV
enum ixgbe_iov_mode mode;
#endif
@@ -1029,17 +1098,8 @@ ixgbe_init_locked(struct adapter *adapter)
ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, 1);
hw->addr_ctrl.rar_used_count = 1;
- /* Set the various hardware offload abilities */
- ifp->if_hwassist = 0;
- if (ifp->if_capenable & IFCAP_TSO)
- ifp->if_hwassist |= CSUM_TSO;
- if (ifp->if_capenable & IFCAP_TXCSUM) {
- ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
-#if __FreeBSD_version >= 800000
- if (hw->mac.type != ixgbe_mac_82598EB)
- ifp->if_hwassist |= CSUM_SCTP;
-#endif
- }
+ /* Set hardware offload abilities from ifnet flags */
+ ixgbe_set_if_hwassist(adapter);
/* Prepare transmit descriptors and buffers */
if (ixgbe_setup_transmit_structures(adapter)) {
@@ -1057,10 +1117,7 @@ ixgbe_init_locked(struct adapter *adapter)
/* Setup Multicast table */
ixgbe_set_multi(adapter);
- /*
- ** Determine the correct mbuf pool
- ** for doing jumbo frames
- */
+ /* Determine the correct mbuf pool, based on frame size */
if (adapter->max_frame_size <= MCLBYTES)
adapter->rx_mbuf_sz = MCLBYTES;
else
@@ -1196,7 +1253,7 @@ ixgbe_init_locked(struct adapter *adapter)
* need to be kick-started
*/
if (hw->phy.type == ixgbe_phy_none) {
- int err = hw->phy.ops.identify(hw);
+ err = hw->phy.ops.identify(hw);
if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
device_printf(dev,
"Unsupported SFP+ module type was detected.\n");
@@ -1208,7 +1265,14 @@ ixgbe_init_locked(struct adapter *adapter)
IXGBE_WRITE_REG(hw, IXGBE_EITR(adapter->vector), IXGBE_LINK_ITR);
/* Configure Energy Efficient Ethernet for supported devices */
- ixgbe_setup_eee(hw, adapter->eee_enabled);
+ if (hw->mac.ops.setup_eee) {
+ err = hw->mac.ops.setup_eee(hw, adapter->eee_enabled);
+ if (err)
+ device_printf(dev, "Error setting up EEE: %d\n", err);
+ }
+
+ /* Enable power to the phy. */
+ ixgbe_set_phy_power(hw, TRUE);
/* Config/Enable Link */
ixgbe_config_link(adapter);
@@ -1278,7 +1342,7 @@ ixgbe_config_gpie(struct adapter *adapter)
/*
* Thermal Failure Detection (X540)
- * Link Detection (X557)
+ * Link Detection (X552 SFP+, X552/X557-AT)
*/
if (hw->mac.type == ixgbe_mac_X540 ||
hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP ||
@@ -1511,7 +1575,7 @@ ixgbe_msix_que(void *arg)
/* Do AIM now? */
- if (ixgbe_enable_aim == FALSE)
+ if (adapter->enable_aim == FALSE)
goto no_calc;
/*
** Do Adaptive Interrupt Moderation:
@@ -1577,6 +1641,9 @@ ixgbe_msix_link(void *arg)
++adapter->link_irq;
+ /* Pause other interrupts */
+ IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_OTHER);
+
/* First get the cause */
reg_eicr = IXGBE_READ_REG(hw, IXGBE_EICS);
/* Be sure the queue bits are not cleared */
@@ -1585,8 +1652,10 @@ ixgbe_msix_link(void *arg)
IXGBE_WRITE_REG(hw, IXGBE_EICR, reg_eicr);
/* Link status change */
- if (reg_eicr & IXGBE_EICR_LSC)
+ if (reg_eicr & IXGBE_EICR_LSC) {
+ IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC);
taskqueue_enqueue(adapter->tq, &adapter->link_task);
+ }
if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
#ifdef IXGBE_FDIR
@@ -1600,14 +1669,14 @@ ixgbe_msix_link(void *arg)
} else
#endif
if (reg_eicr & IXGBE_EICR_ECC) {
- device_printf(adapter->dev, "\nCRITICAL: ECC ERROR!! "
+ device_printf(adapter->dev, "CRITICAL: ECC ERROR!! "
"Please Reboot!!\n");
IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC);
}
/* Check for over temp condition */
if (reg_eicr & IXGBE_EICR_TS) {
- device_printf(adapter->dev, "\nCRITICAL: OVER TEMP!! "
+ device_printf(adapter->dev, "CRITICAL: OVER TEMP!! "
"PHY IS SHUT DOWN!!\n");
device_printf(adapter->dev, "System shutdown required!\n");
IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_TS);
@@ -1649,6 +1718,7 @@ ixgbe_msix_link(void *arg)
taskqueue_enqueue(adapter->tq, &adapter->phy_task);
}
+ /* Re-enable other interrupts */
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, IXGBE_EIMS_OTHER);
return;
}
@@ -1742,6 +1812,7 @@ ixgbe_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
** XXX: These need to use the proper media types once
** they're added.
*/
+#ifndef IFM_ETH_XTYPE
if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
switch (adapter->link_speed) {
case IXGBE_LINK_SPEED_10GB_FULL:
@@ -1767,6 +1838,33 @@ ixgbe_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
ifmr->ifm_active |= IFM_1000_CX | IFM_FDX;
break;
}
+#else
+ if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
+ switch (adapter->link_speed) {
+ case IXGBE_LINK_SPEED_10GB_FULL:
+ ifmr->ifm_active |= IFM_10G_KR | IFM_FDX;
+ break;
+ case IXGBE_LINK_SPEED_2_5GB_FULL:
+ ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
+ break;
+ case IXGBE_LINK_SPEED_1GB_FULL:
+ ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
+ break;
+ }
+ else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4
+ || layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
+ switch (adapter->link_speed) {
+ case IXGBE_LINK_SPEED_10GB_FULL:
+ ifmr->ifm_active |= IFM_10G_KX4 | IFM_FDX;
+ break;
+ case IXGBE_LINK_SPEED_2_5GB_FULL:
+ ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
+ break;
+ case IXGBE_LINK_SPEED_1GB_FULL:
+ ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
+ break;
+ }
+#endif
/* If nothing is recognized... */
if (IFM_SUBTYPE(ifmr->ifm_active) == 0)
@@ -1809,13 +1907,14 @@ ixgbe_media_change(struct ifnet * ifp)
return (EINVAL);
if (hw->phy.media_type == ixgbe_media_type_backplane)
- return (EPERM);
+ return (ENODEV);
/*
** We don't actually need to check against the supported
** media types of the adapter; ifmedia will take care of
** that for us.
*/
+#ifndef IFM_ETH_XTYPE
switch (IFM_SUBTYPE(ifm->ifm_media)) {
case IFM_AUTO:
case IFM_10G_T:
@@ -1841,6 +1940,33 @@ ixgbe_media_change(struct ifnet * ifp)
default:
goto invalid;
}
+#else
+ switch (IFM_SUBTYPE(ifm->ifm_media)) {
+ case IFM_AUTO:
+ case IFM_10G_T:
+ speed |= IXGBE_LINK_SPEED_100_FULL;
+ case IFM_10G_LRM:
+ case IFM_10G_KR:
+ case IFM_10G_LR:
+ case IFM_10G_KX4:
+ speed |= IXGBE_LINK_SPEED_1GB_FULL;
+ case IFM_10G_TWINAX:
+ speed |= IXGBE_LINK_SPEED_10GB_FULL;
+ break;
+ case IFM_1000_T:
+ speed |= IXGBE_LINK_SPEED_100_FULL;
+ case IFM_1000_LX:
+ case IFM_1000_SX:
+ case IFM_1000_KX:
+ speed |= IXGBE_LINK_SPEED_1GB_FULL;
+ break;
+ case IFM_100_TX:
+ speed |= IXGBE_LINK_SPEED_100_FULL;
+ break;
+ default:
+ goto invalid;
+ }
+#endif
hw->mac.autotry_restart = TRUE;
hw->mac.ops.setup_link(hw, speed, TRUE);
@@ -2377,7 +2503,7 @@ ixgbe_allocate_msix(struct adapter *adapter)
return (error);
}
#if __FreeBSD_version >= 800504
- bus_describe_intr(dev, que->res, que->tag, "que %d", i);
+ bus_describe_intr(dev, que->res, que->tag, "q%d", i);
#endif
que->msix = vector;
adapter->active_queues |= (u64)(1 << que->msix);
@@ -2426,8 +2552,8 @@ ixgbe_allocate_msix(struct adapter *adapter)
device_get_nameunit(adapter->dev),
cpu_id);
#else
- taskqueue_start_threads(&que->tq, 1, PI_NET, "%s que",
- device_get_nameunit(adapter->dev));
+ taskqueue_start_threads(&que->tq, 1, PI_NET, "%s:q%d",
+ device_get_nameunit(adapter->dev), i);
#endif
}
@@ -2505,7 +2631,7 @@ ixgbe_setup_msix(struct adapter *adapter)
}
/* Figure out a reasonable auto config value */
- queues = (mp_ncpus > (msgs-1)) ? (msgs-1) : mp_ncpus;
+ queues = (mp_ncpus > (msgs - 1)) ? (msgs - 1) : mp_ncpus;
#ifdef RSS
/* If we're doing RSS, clamp at the number of RSS buckets */
@@ -2515,6 +2641,9 @@ ixgbe_setup_msix(struct adapter *adapter)
if (ixgbe_num_queues != 0)
queues = ixgbe_num_queues;
+ /* Set max queues to 8 when autoconfiguring */
+ else if ((ixgbe_num_queues == 0) && (queues > 8))
+ queues = 8;
/* reflect correct sysctl value */
ixgbe_num_queues = queues;
@@ -2551,12 +2680,12 @@ msi:
rid, adapter->msix_mem);
adapter->msix_mem = NULL;
}
- msgs = 1;
- if (pci_alloc_msi(dev, &msgs) == 0) {
- device_printf(adapter->dev,"Using an MSI interrupt\n");
+ msgs = 1;
+ if (pci_alloc_msi(dev, &msgs) == 0) {
+ device_printf(adapter->dev, "Using an MSI interrupt\n");
return (msgs);
}
- device_printf(adapter->dev,"Using a Legacy interrupt\n");
+ device_printf(adapter->dev, "Using a Legacy interrupt\n");
return (0);
}
@@ -2572,22 +2701,24 @@ ixgbe_allocate_pci_resources(struct adapter *adapter)
&rid, RF_ACTIVE);
if (!(adapter->pci_mem)) {
- device_printf(dev,"Unable to allocate bus resource: memory\n");
+ device_printf(dev, "Unable to allocate bus resource: memory\n");
return (ENXIO);
}
+ /* Save bus_space values for READ/WRITE_REG macros */
adapter->osdep.mem_bus_space_tag =
rman_get_bustag(adapter->pci_mem);
adapter->osdep.mem_bus_space_handle =
rman_get_bushandle(adapter->pci_mem);
+ /* Set hw values for shared code */
adapter->hw.hw_addr = (u8 *) &adapter->osdep.mem_bus_space_handle;
+ adapter->hw.back = adapter;
- /* Legacy defaults */
+ /* Default to 1 queue if MSI-X setup fails */
adapter->num_queues = 1;
- adapter->hw.back = &adapter->osdep;
/*
- ** Now setup MSI or MSI/X, should
+ ** Now setup MSI or MSI-X, should
** return us the number of supported
** vectors. (Will be 1 for MSI)
*/
@@ -2712,13 +2843,22 @@ ixgbe_setup_interface(device_t dev, struct adapter *adapter)
*/
ifp->if_hdrlen = sizeof(struct ether_vlan_header);
- ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_TSO | IFCAP_VLAN_HWCSUM;
- ifp->if_capabilities |= IFCAP_JUMBO_MTU;
- ifp->if_capabilities |= IFCAP_LRO;
- ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING
+ /* Set capability flags */
+ ifp->if_capabilities |= IFCAP_RXCSUM
+ | IFCAP_TXCSUM
+ | IFCAP_RXCSUM_IPV6
+ | IFCAP_TXCSUM_IPV6
+ | IFCAP_TSO4
+ | IFCAP_TSO6
+ | IFCAP_LRO
+ | IFCAP_VLAN_HWTAGGING
| IFCAP_VLAN_HWTSO
+ | IFCAP_VLAN_HWCSUM
+ | IFCAP_JUMBO_MTU
| IFCAP_VLAN_MTU
| IFCAP_HWSTATS;
+
+ /* Enable the above capabilities by default */
ifp->if_capenable = ifp->if_capabilities;
/*
@@ -2738,9 +2878,10 @@ ixgbe_setup_interface(device_t dev, struct adapter *adapter)
ifmedia_init(&adapter->media, IFM_IMASK, ixgbe_media_change,
ixgbe_media_status);
+ adapter->phy_layer = ixgbe_get_supported_physical_layer(&adapter->hw);
ixgbe_add_media_types(adapter);
- /* Autoselect media by default */
+ /* Set autoselect media by default */
ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
return (0);
@@ -2753,7 +2894,7 @@ ixgbe_add_media_types(struct adapter *adapter)
device_t dev = adapter->dev;
int layer;
- layer = adapter->phy_layer = ixgbe_get_supported_physical_layer(hw);
+ layer = adapter->phy_layer;
/* Media types with matching FreeBSD media defines */
if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T)
@@ -2767,20 +2908,28 @@ ixgbe_add_media_types(struct adapter *adapter)
layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA)
ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_TWINAX, 0, NULL);
- if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR)
+ if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR) {
ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_LR, 0, NULL);
- if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR)
+ if (hw->phy.multispeed_fiber)
+ ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_LX, 0, NULL);
+ }
+ if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR) {
ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
+ if (hw->phy.multispeed_fiber)
+ ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_SX, 0, NULL);
+ } else if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX)
+ ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_SX, 0, NULL);
if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4)
ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_CX4, 0, NULL);
- if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX)
- ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_SX, 0, NULL);
- /*
- ** Other (no matching FreeBSD media type):
- ** To workaround this, we'll assign these completely
- ** inappropriate media types.
- */
+#ifdef IFM_ETH_XTYPE
+ if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
+ ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_KR, 0, NULL);
+ if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4)
+ ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_KX4, 0, NULL);
+ if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
+ ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_KX, 0, NULL);
+#else
if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) {
device_printf(dev, "Media supported: 10GbaseKR\n");
device_printf(dev, "10GbaseKR mapped to 10GbaseSR\n");
@@ -2796,10 +2945,9 @@ ixgbe_add_media_types(struct adapter *adapter)
device_printf(dev, "1000baseKX mapped to 1000baseCX\n");
ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_CX, 0, NULL);
}
- if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_BX) {
- /* Someday, someone will care about you... */
+#endif
+ if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_BX)
device_printf(dev, "Media supported: 1000baseBX\n");
- }
if (hw->device_id == IXGBE_DEV_ID_82598AT) {
ifmedia_add(&adapter->media,
@@ -2821,12 +2969,7 @@ ixgbe_config_link(struct adapter *adapter)
sfp = ixgbe_is_sfp(hw);
if (sfp) {
- if (hw->phy.multispeed_fiber) {
- hw->mac.ops.setup_sfp(hw);
- ixgbe_enable_tx_laser(hw);
- taskqueue_enqueue(adapter->tq, &adapter->msf_task);
- } else
- taskqueue_enqueue(adapter->tq, &adapter->mod_task);
+ taskqueue_enqueue(adapter->tq, &adapter->mod_task);
} else {
if (hw->mac.ops.check_link)
err = ixgbe_check_link(hw, &adapter->link_speed,
@@ -2860,7 +3003,6 @@ ixgbe_initialize_transmit_units(struct adapter *adapter)
struct ixgbe_hw *hw = &adapter->hw;
/* Setup the Base and Length of the Tx Descriptor Ring */
-
for (int i = 0; i < adapter->num_queues; i++, txr++) {
u64 tdba = txr->txdma.dma_paddr;
u32 txctrl = 0;
@@ -2880,12 +3022,15 @@ ixgbe_initialize_transmit_units(struct adapter *adapter)
txr->tail = IXGBE_TDT(j);
/* Disable Head Writeback */
+ /*
+ * Note: for X550 series devices, these registers are actually
+ * prefixed with TPH_ isntead of DCA_, but the addresses and
+ * fields remain the same.
+ */
switch (hw->mac.type) {
case ixgbe_mac_82598EB:
txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(j));
break;
- case ixgbe_mac_82599EB:
- case ixgbe_mac_X540:
default:
txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(j));
break;
@@ -2895,8 +3040,6 @@ ixgbe_initialize_transmit_units(struct adapter *adapter)
case ixgbe_mac_82598EB:
IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(j), txctrl);
break;
- case ixgbe_mac_82599EB:
- case ixgbe_mac_X540:
default:
IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(j), txctrl);
break;
@@ -2929,7 +3072,7 @@ ixgbe_initialize_transmit_units(struct adapter *adapter)
}
static void
-ixgbe_initialise_rss_mapping(struct adapter *adapter)
+ixgbe_initialize_rss_mapping(struct adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
u32 reta = 0, mrqc, rss_key[10];
@@ -3064,7 +3207,6 @@ ixgbe_initialize_receive_units(struct adapter *adapter)
u32 bufsz, fctrl, srrctl, rxcsum;
u32 hlreg;
-
/*
* Make sure receives are disabled while
* setting up the descriptor ring
@@ -3129,11 +3271,11 @@ ixgbe_initialize_receive_units(struct adapter *adapter)
srrctl &= ~IXGBE_SRRCTL_DROP_EN;
}
- IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(i), srrctl);
+ IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(j), srrctl);
/* Setup the HW Rx Head and Tail Descriptor Pointers */
- IXGBE_WRITE_REG(hw, IXGBE_RDH(i), 0);
- IXGBE_WRITE_REG(hw, IXGBE_RDT(i), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_RDH(j), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_RDT(j), 0);
/* Set the driver rx tail address */
rxr->tail = IXGBE_RDT(rxr->me);
@@ -3149,7 +3291,7 @@ ixgbe_initialize_receive_units(struct adapter *adapter)
rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
- ixgbe_initialise_rss_mapping(adapter);
+ ixgbe_initialize_rss_mapping(adapter);
if (adapter->num_queues > 1) {
/* RSS and RX IPP Checksum are mutually exclusive */
@@ -3159,6 +3301,7 @@ ixgbe_initialize_receive_units(struct adapter *adapter)
if (ifp->if_capenable & IFCAP_RXCSUM)
rxcsum |= IXGBE_RXCSUM_PCSD;
+ /* This is useful for calculating UDP/IP fragment checksums */
if (!(rxcsum & IXGBE_RXCSUM_PCSD))
rxcsum |= IXGBE_RXCSUM_IPPCSE;
@@ -3380,9 +3523,10 @@ ixgbe_disable_intr(struct adapter *adapter)
** the slot this adapter is plugged into.
*/
static void
-ixgbe_get_slot_info(struct ixgbe_hw *hw)
+ixgbe_get_slot_info(struct adapter *adapter)
{
- device_t dev = ((struct ixgbe_osdep *)hw->back)->dev;
+ device_t dev = adapter->dev;
+ struct ixgbe_hw *hw = &adapter->hw;
struct ixgbe_mac_info *mac = &hw->mac;
u16 link;
u32 offset;
@@ -3589,12 +3733,12 @@ ixgbe_sfp_probe(struct adapter *adapter)
goto out;
ret = hw->phy.ops.reset(hw);
if (ret == IXGBE_ERR_SFP_NOT_SUPPORTED) {
- device_printf(dev,"Unsupported SFP+ module detected!");
- printf(" Reload driver with supported module.\n");
+ device_printf(dev, "Unsupported SFP+ module detected!");
+ device_printf(dev, "Reload driver with supported module.\n");
adapter->sfp_probe = FALSE;
goto out;
} else
- device_printf(dev,"SFP+ module detected!\n");
+ device_printf(dev, "SFP+ module detected!\n");
/* We now have supported optics */
adapter->sfp_probe = FALSE;
/* Set the optics type so system reports correctly */
@@ -3613,10 +3757,14 @@ static void
ixgbe_handle_link(void *context, int pending)
{
struct adapter *adapter = context;
+ struct ixgbe_hw *hw = &adapter->hw;
- ixgbe_check_link(&adapter->hw,
+ ixgbe_check_link(hw,
&adapter->link_speed, &adapter->link_up, 0);
ixgbe_update_link_status(adapter);
+
+ /* Re-enable link interrupts */
+ IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_LSC);
}
/*
@@ -3627,23 +3775,66 @@ ixgbe_handle_mod(void *context, int pending)
{
struct adapter *adapter = context;
struct ixgbe_hw *hw = &adapter->hw;
+ enum ixgbe_phy_type orig_type = hw->phy.type;
device_t dev = adapter->dev;
u32 err;
+ IXGBE_CORE_LOCK(adapter);
+
+ /* Check to see if the PHY type changed */
+ if (hw->phy.ops.identify) {
+ hw->phy.type = ixgbe_phy_unknown;
+ hw->phy.ops.identify(hw);
+ }
+
+ if (hw->phy.type != orig_type) {
+ device_printf(dev, "Detected phy_type %d\n", hw->phy.type);
+
+ if (hw->phy.type == ixgbe_phy_none) {
+ hw->phy.sfp_type = ixgbe_sfp_type_unknown;
+ goto out;
+ }
+
+ /* Try to do the initialization that was skipped before */
+ if (hw->phy.ops.init)
+ hw->phy.ops.init(hw);
+ if (hw->phy.ops.reset)
+ hw->phy.ops.reset(hw);
+ }
+
err = hw->phy.ops.identify_sfp(hw);
if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
device_printf(dev,
"Unsupported SFP+ module type was detected.\n");
- return;
+ goto out;
}
err = hw->mac.ops.setup_sfp(hw);
if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
device_printf(dev,
"Setup failure - unsupported SFP+ module type.\n");
- return;
+ goto out;
+ }
+ if (hw->phy.multispeed_fiber)
+ taskqueue_enqueue(adapter->tq, &adapter->msf_task);
+out:
+ /* Update media type */
+ switch (hw->mac.ops.get_media_type(hw)) {
+ case ixgbe_media_type_fiber:
+ adapter->optics = IFM_10G_SR;
+ break;
+ case ixgbe_media_type_copper:
+ adapter->optics = IFM_10G_TWINAX;
+ break;
+ case ixgbe_media_type_cx4:
+ adapter->optics = IFM_10G_CX4;
+ break;
+ default:
+ adapter->optics = 0;
+ break;
}
- taskqueue_enqueue(adapter->tq, &adapter->msf_task);
+
+ IXGBE_CORE_UNLOCK(adapter);
return;
}
@@ -3658,13 +3849,10 @@ ixgbe_handle_msf(void *context, int pending)
struct ixgbe_hw *hw = &adapter->hw;
u32 autoneg;
bool negotiate;
- int err;
- err = hw->phy.ops.identify_sfp(hw);
- if (!err) {
- ixgbe_setup_optics(adapter);
- INIT_DEBUGOUT1("ixgbe_sfp_probe: flags: %X\n", adapter->optics);
- }
+ IXGBE_CORE_LOCK(adapter);
+ /* get_supported_phy_layer will call hw->phy.ops.identify_sfp() */
+ adapter->phy_layer = ixgbe_get_supported_physical_layer(hw);
autoneg = hw->phy.autoneg_advertised;
if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
@@ -3672,8 +3860,10 @@ ixgbe_handle_msf(void *context, int pending)
if (hw->mac.ops.setup_link)
hw->mac.ops.setup_link(hw, autoneg, TRUE);
+ /* Adjust media types shown in ifconfig */
ifmedia_removeall(&adapter->media);
ixgbe_add_media_types(adapter);
+ IXGBE_CORE_UNLOCK(adapter);
return;
}
@@ -3751,18 +3941,6 @@ ixgbe_config_dmac(struct adapter *adapter)
}
/*
- * Checks whether the adapter supports Energy Efficient Ethernet
- * or not, based on device ID.
- */
-static void
-ixgbe_check_eee_support(struct adapter *adapter)
-{
- struct ixgbe_hw *hw = &adapter->hw;
-
- adapter->eee_enabled = !!(hw->mac.ops.setup_eee);
-}
-
-/*
* Checks whether the adapter's ports are capable of
* Wake On LAN by reading the adapter's NVM.
*
@@ -3780,8 +3958,8 @@ ixgbe_check_wol_support(struct adapter *adapter)
ixgbe_get_device_caps(hw, &dev_caps);
if ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0_1) ||
((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0) &&
- hw->bus.func == 0))
- adapter->wol_support = hw->wol_enabled = 1;
+ hw->bus.func == 0))
+ adapter->wol_support = hw->wol_enabled = 1;
/* Save initial wake up filter configuration */
adapter->wufc = IXGBE_READ_REG(hw, IXGBE_WUFC);
@@ -3801,6 +3979,9 @@ ixgbe_setup_low_power_mode(struct adapter *adapter)
mtx_assert(&adapter->core_mtx, MA_OWNED);
+ if (!hw->wol_enabled)
+ ixgbe_set_phy_power(hw, FALSE);
+
/* Limit power management flow to X550EM baseT */
if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T
&& hw->phy.ops.enter_lplu) {
@@ -4114,7 +4295,7 @@ ixgbe_add_device_sysctls(struct adapter *adapter)
/* Sysctls for all devices */
SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "fc",
CTLTYPE_INT | CTLFLAG_RW, adapter, 0,
- ixgbe_set_flowcntl, "I", IXGBE_SYSCTL_DESC_SET_FC);
+ ixgbe_sysctl_flowcntl, "I", IXGBE_SYSCTL_DESC_SET_FC);
SYSCTL_ADD_INT(ctx, child, OID_AUTO, "enable_aim",
CTLFLAG_RW,
@@ -4122,20 +4303,30 @@ ixgbe_add_device_sysctls(struct adapter *adapter)
SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "advertise_speed",
CTLTYPE_INT | CTLFLAG_RW, adapter, 0,
- ixgbe_set_advertise, "I", IXGBE_SYSCTL_DESC_ADV_SPEED);
+ ixgbe_sysctl_advertise, "I", IXGBE_SYSCTL_DESC_ADV_SPEED);
SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "thermal_test",
CTLTYPE_INT | CTLFLAG_RW, adapter, 0,
ixgbe_sysctl_thermal_test, "I", "Thermal Test");
- /* for X550 devices */
+#ifdef IXGBE_DEBUG
+ /* testing sysctls (for all devices) */
+ SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "power_state",
+ CTLTYPE_INT | CTLFLAG_RW, adapter, 0,
+ ixgbe_sysctl_power_state, "I", "PCI Power State");
+
+ SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "print_rss_config",
+ CTLTYPE_STRING | CTLFLAG_RD, adapter, 0,
+ ixgbe_sysctl_print_rss_config, "A", "Prints RSS Configuration");
+#endif
+ /* for X550 series devices */
if (hw->mac.type >= ixgbe_mac_X550)
SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "dmac",
CTLTYPE_INT | CTLFLAG_RW, adapter, 0,
ixgbe_sysctl_dmac, "I", "DMA Coalesce");
- /* for X550T and X550EM backplane devices */
- if (hw->mac.ops.setup_eee) {
+ /* for X552 backplane devices */
+ if (hw->device_id == IXGBE_DEV_ID_X550EM_X_KR) {
struct sysctl_oid *eee_node;
struct sysctl_oid_list *eee_list;
@@ -4163,11 +4354,15 @@ ixgbe_add_device_sysctls(struct adapter *adapter)
CTLTYPE_INT | CTLFLAG_RD, adapter, 0,
ixgbe_sysctl_eee_rx_lpi_status, "I",
"Whether or not RX link is in LPI state");
+
+ SYSCTL_ADD_PROC(ctx, eee_list, OID_AUTO, "tx_lpi_delay",
+ CTLTYPE_INT | CTLFLAG_RD, adapter, 0,
+ ixgbe_sysctl_eee_tx_lpi_delay, "I",
+ "TX LPI entry delay in microseconds");
}
- /* for certain 10GBaseT devices */
- if (hw->device_id == IXGBE_DEV_ID_X550T ||
- hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
+ /* for WoL-capable devices */
+ if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "wol_enable",
CTLTYPE_INT | CTLFLAG_RW, adapter, 0,
ixgbe_sysctl_wol_enable, "I",
@@ -4179,7 +4374,7 @@ ixgbe_add_device_sysctls(struct adapter *adapter)
"Enable/Disable Wake Up Filters");
}
- /* for X550EM 10GBaseT devices */
+ /* for X552/X557-AT devices */
if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
struct sysctl_oid *phy_node;
struct sysctl_oid_list *phy_list;
@@ -4473,41 +4668,51 @@ ixgbe_set_sysctl_value(struct adapter *adapter, const char *name,
** 3 - full
*/
static int
-ixgbe_set_flowcntl(SYSCTL_HANDLER_ARGS)
+ixgbe_sysctl_flowcntl(SYSCTL_HANDLER_ARGS)
{
- int error, last;
- struct adapter *adapter = (struct adapter *) arg1;
+ int error, fc;
+ struct adapter *adapter;
- last = adapter->fc;
- error = sysctl_handle_int(oidp, &adapter->fc, 0, req);
+ adapter = (struct adapter *) arg1;
+ fc = adapter->fc;
+
+ error = sysctl_handle_int(oidp, &fc, 0, req);
if ((error) || (req->newptr == NULL))
return (error);
/* Don't bother if it's not changed */
- if (adapter->fc == last)
+ if (adapter->fc == fc)
return (0);
- switch (adapter->fc) {
- case ixgbe_fc_rx_pause:
- case ixgbe_fc_tx_pause:
- case ixgbe_fc_full:
- adapter->hw.fc.requested_mode = adapter->fc;
- if (adapter->num_queues > 1)
- ixgbe_disable_rx_drop(adapter);
- break;
- case ixgbe_fc_none:
- adapter->hw.fc.requested_mode = ixgbe_fc_none;
- if (adapter->num_queues > 1)
- ixgbe_enable_rx_drop(adapter);
- break;
- default:
- adapter->fc = last;
- return (EINVAL);
+ return ixgbe_set_flowcntl(adapter, fc);
+}
+
+
+static int
+ixgbe_set_flowcntl(struct adapter *adapter, int fc)
+{
+
+ switch (fc) {
+ case ixgbe_fc_rx_pause:
+ case ixgbe_fc_tx_pause:
+ case ixgbe_fc_full:
+ adapter->hw.fc.requested_mode = adapter->fc;
+ if (adapter->num_queues > 1)
+ ixgbe_disable_rx_drop(adapter);
+ break;
+ case ixgbe_fc_none:
+ adapter->hw.fc.requested_mode = ixgbe_fc_none;
+ if (adapter->num_queues > 1)
+ ixgbe_enable_rx_drop(adapter);
+ break;
+ default:
+ return (EINVAL);
}
+ adapter->fc = fc;
/* Don't autoneg if forcing a value */
adapter->hw.fc.disable_fc_autoneg = TRUE;
ixgbe_fc_enable(&adapter->hw);
- return error;
+ return (0);
}
/*
@@ -4518,27 +4723,39 @@ ixgbe_set_flowcntl(SYSCTL_HANDLER_ARGS)
** 0x4 - advertise 10G
*/
static int
-ixgbe_set_advertise(SYSCTL_HANDLER_ARGS)
+ixgbe_sysctl_advertise(SYSCTL_HANDLER_ARGS)
{
- int error = 0, requested;
- struct adapter *adapter;
- device_t dev;
- struct ixgbe_hw *hw;
- ixgbe_link_speed speed = 0;
+ int error, advertise;
+ struct adapter *adapter;
adapter = (struct adapter *) arg1;
- dev = adapter->dev;
- hw = &adapter->hw;
+ advertise = adapter->advertise;
- requested = adapter->advertise;
- error = sysctl_handle_int(oidp, &requested, 0, req);
+ error = sysctl_handle_int(oidp, &advertise, 0, req);
if ((error) || (req->newptr == NULL))
return (error);
/* Checks to validate new value */
- if (adapter->advertise == requested) /* no change */
+ if (adapter->advertise == advertise) /* no change */
return (0);
+ return ixgbe_set_advertise(adapter, advertise);
+}
+
+static int
+ixgbe_set_advertise(struct adapter *adapter, int advertise)
+{
+ device_t dev;
+ struct ixgbe_hw *hw;
+ ixgbe_link_speed speed;
+
+ hw = &adapter->hw;
+ dev = adapter->dev;
+
+ /* No speed changes for backplane media */
+ if (hw->phy.media_type == ixgbe_media_type_backplane)
+ return (ENODEV);
+
if (!((hw->phy.media_type == ixgbe_media_type_copper) ||
(hw->phy.multispeed_fiber))) {
device_printf(dev,
@@ -4547,13 +4764,13 @@ ixgbe_set_advertise(SYSCTL_HANDLER_ARGS)
return (EINVAL);
}
- if (requested < 0x1 || requested > 0x7) {
+ if (advertise < 0x1 || advertise > 0x7) {
device_printf(dev,
"Invalid advertised speed; valid modes are 0x1 through 0x7\n");
return (EINVAL);
}
- if ((requested & 0x1)
+ if ((advertise & 0x1)
&& (hw->mac.type != ixgbe_mac_X540)
&& (hw->mac.type != ixgbe_mac_X550)) {
device_printf(dev, "Set Advertise: 100Mb on X540/X550 only\n");
@@ -4561,22 +4778,23 @@ ixgbe_set_advertise(SYSCTL_HANDLER_ARGS)
}
/* Set new value and report new advertised mode */
- if (requested & 0x1)
+ speed = 0;
+ if (advertise & 0x1)
speed |= IXGBE_LINK_SPEED_100_FULL;
- if (requested & 0x2)
+ if (advertise & 0x2)
speed |= IXGBE_LINK_SPEED_1GB_FULL;
- if (requested & 0x4)
+ if (advertise & 0x4)
speed |= IXGBE_LINK_SPEED_10GB_FULL;
+ adapter->advertise = advertise;
hw->mac.autotry_restart = TRUE;
hw->mac.ops.setup_link(hw, speed, TRUE);
- adapter->advertise = requested;
- return (error);
+ return (0);
}
/*
- * The following two sysctls are for X550 BaseT devices;
+ * The following two sysctls are for X552/X557-AT devices;
* they deal with the external PHY used in them.
*/
static int
@@ -4675,31 +4893,22 @@ static int
ixgbe_sysctl_dmac(SYSCTL_HANDLER_ARGS)
{
struct adapter *adapter = (struct adapter *) arg1;
- struct ixgbe_hw *hw = &adapter->hw;
struct ifnet *ifp = adapter->ifp;
int error;
- u16 oldval;
+ u32 newval;
- oldval = adapter->dmac;
- error = sysctl_handle_int(oidp, &adapter->dmac, 0, req);
+ newval = adapter->dmac;
+ error = sysctl_handle_int(oidp, &newval, 0, req);
if ((error) || (req->newptr == NULL))
return (error);
- switch (hw->mac.type) {
- case ixgbe_mac_X550:
- case ixgbe_mac_X550EM_x:
- break;
- default:
- device_printf(adapter->dev,
- "DMA Coalescing is only supported on X550 devices\n");
- return (ENODEV);
- }
-
- switch (adapter->dmac) {
+ switch (newval) {
case 0:
/* Disabled */
+ adapter->dmac = 0;
break;
- case 1: /* Enable and use default */
+ case 1:
+ /* Enable and use default */
adapter->dmac = 1000;
break;
case 50:
@@ -4711,10 +4920,10 @@ ixgbe_sysctl_dmac(SYSCTL_HANDLER_ARGS)
case 5000:
case 10000:
/* Legal values - allow */
+ adapter->dmac = newval;
break;
default:
/* Do nothing, illegal value */
- adapter->dmac = oldval;
return (EINVAL);
}
@@ -4725,6 +4934,42 @@ ixgbe_sysctl_dmac(SYSCTL_HANDLER_ARGS)
return (0);
}
+#ifdef IXGBE_DEBUG
+/**
+ * Sysctl to test power states
+ * Values:
+ * 0 - set device to D0
+ * 3 - set device to D3
+ * (none) - get current device power state
+ */
+static int
+ixgbe_sysctl_power_state(SYSCTL_HANDLER_ARGS)
+{
+ struct adapter *adapter = (struct adapter *) arg1;
+ device_t dev = adapter->dev;
+ int curr_ps, new_ps, error = 0;
+
+ curr_ps = new_ps = pci_get_powerstate(dev);
+
+ error = sysctl_handle_int(oidp, &new_ps, 0, req);
+ if ((error) || (req->newptr == NULL))
+ return (error);
+
+ if (new_ps == curr_ps)
+ return (0);
+
+ if (new_ps == 3 && curr_ps == 0)
+ error = DEVICE_SUSPEND(dev);
+ else if (new_ps == 0 && curr_ps == 3)
+ error = DEVICE_RESUME(dev);
+ else
+ return (EINVAL);
+
+ device_printf(dev, "New state: %d\n", pci_get_powerstate(dev));
+
+ return (error);
+}
+#endif
/*
* Sysctl to enable/disable the WoL capability, if supported by the adapter.
* Values:
@@ -4743,13 +4988,14 @@ ixgbe_sysctl_wol_enable(SYSCTL_HANDLER_ARGS)
error = sysctl_handle_int(oidp, &new_wol_enabled, 0, req);
if ((error) || (req->newptr == NULL))
return (error);
+ new_wol_enabled = !!(new_wol_enabled);
if (new_wol_enabled == hw->wol_enabled)
return (0);
if (new_wol_enabled > 0 && !adapter->wol_support)
return (ENODEV);
else
- hw->wol_enabled = !!(new_wol_enabled);
+ hw->wol_enabled = new_wol_enabled;
return (0);
}
@@ -4773,13 +5019,14 @@ ixgbe_sysctl_eee_enable(SYSCTL_HANDLER_ARGS)
error = sysctl_handle_int(oidp, &new_eee_enabled, 0, req);
if ((error) || (req->newptr == NULL))
return (error);
+ new_eee_enabled = !!(new_eee_enabled);
if (new_eee_enabled == adapter->eee_enabled)
return (0);
if (new_eee_enabled > 0 && !hw->mac.ops.setup_eee)
return (ENODEV);
else
- adapter->eee_enabled = !!(new_eee_enabled);
+ adapter->eee_enabled = new_eee_enabled;
/* Re-initialize hardware if it's already running */
if (ifp->if_drv_flags & IFF_DRV_RUNNING)
@@ -4837,6 +5084,21 @@ ixgbe_sysctl_eee_tx_lpi_status(SYSCTL_HANDLER_ARGS)
}
/*
+ * Read-only sysctl indicating TX Link LPI delay
+ */
+static int
+ixgbe_sysctl_eee_tx_lpi_delay(SYSCTL_HANDLER_ARGS)
+{
+ struct adapter *adapter = (struct adapter *) arg1;
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 reg;
+
+ reg = IXGBE_READ_REG(hw, IXGBE_EEE_SU);
+
+ return (sysctl_handle_int(oidp, 0, reg >> 26, req));
+}
+
+/*
* Sysctl to enable/disable the types of packets that the
* adapter will wake up on upon receipt.
* WUFC - Wake Up Filter Control
@@ -4879,6 +5141,58 @@ ixgbe_sysctl_wufc(SYSCTL_HANDLER_ARGS)
return (0);
}
+#ifdef IXGBE_DEBUG
+static int
+ixgbe_sysctl_print_rss_config(SYSCTL_HANDLER_ARGS)
+{
+ struct adapter *adapter = (struct adapter *)arg1;
+ struct ixgbe_hw *hw = &adapter->hw;
+ device_t dev = adapter->dev;
+ int error = 0, reta_size;
+ struct sbuf *buf;
+ u32 reg;
+
+ buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
+ if (!buf) {
+ device_printf(dev, "Could not allocate sbuf for output.\n");
+ return (ENOMEM);
+ }
+
+ // TODO: use sbufs to make a string to print out
+ /* Set multiplier for RETA setup and table size based on MAC */
+ switch (adapter->hw.mac.type) {
+ case ixgbe_mac_X550:
+ case ixgbe_mac_X550EM_x:
+ reta_size = 128;
+ break;
+ default:
+ reta_size = 32;
+ break;
+ }
+
+ /* Print out the redirection table */
+ sbuf_cat(buf, "\n");
+ for (int i = 0; i < reta_size; i++) {
+ if (i < 32) {
+ reg = IXGBE_READ_REG(hw, IXGBE_RETA(i));
+ sbuf_printf(buf, "RETA(%2d): 0x%08x\n", i, reg);
+ } else {
+ reg = IXGBE_READ_REG(hw, IXGBE_ERETA(i - 32));
+ sbuf_printf(buf, "ERETA(%2d): 0x%08x\n", i - 32, reg);
+ }
+ }
+
+ // TODO: print more config
+
+ error = sbuf_finish(buf);
+ if (error)
+ device_printf(dev, "Error finishing sbuf: %d\n", error);
+
+ sbuf_delete(buf);
+ return (0);
+}
+#endif /* IXGBE_DEBUG */
+
/*
** Enable the hardware to drop packets when the buffer is
** full. This is useful when multiqueue,so that no single
diff --git a/sys/dev/ixgbe/if_ixv.c b/sys/dev/ixgbe/if_ixv.c
index a3971cc..f47cf60 100644
--- a/sys/dev/ixgbe/if_ixv.c
+++ b/sys/dev/ixgbe/if_ixv.c
@@ -43,7 +43,7 @@
/*********************************************************************
* Driver version
*********************************************************************/
-char ixv_driver_version[] = "1.4.0";
+char ixv_driver_version[] = "1.4.6-k";
/*********************************************************************
* PCI Device ID Table
@@ -292,9 +292,14 @@ ixv_attach(device_t dev)
/* Allocate, clear, and link in our adapter structure */
adapter = device_get_softc(dev);
- adapter->dev = adapter->osdep.dev = dev;
+ adapter->dev = dev;
hw = &adapter->hw;
+#ifdef DEV_NETMAP
+ adapter->init_locked = ixv_init_locked;
+ adapter->stop_locked = ixv_stop;
+#endif
+
/* Core Lock Init*/
IXGBE_CORE_LOCK_INIT(adapter, device_get_nameunit(dev));
@@ -317,7 +322,7 @@ ixv_attach(device_t dev)
/* Do base PCI setup - map BAR0 */
if (ixv_allocate_pci_resources(adapter)) {
- device_printf(dev, "Allocation of PCI resources failed\n");
+ device_printf(dev, "ixv_allocate_pci_resources() failed!\n");
error = ENXIO;
goto err_out;
}
@@ -357,6 +362,7 @@ ixv_attach(device_t dev)
/* Allocate our TX/RX Queues */
if (ixgbe_allocate_queues(adapter)) {
+ device_printf(dev, "ixgbe_allocate_queues() failed!\n");
error = ENOMEM;
goto err_out;
}
@@ -367,7 +373,7 @@ ixv_attach(device_t dev)
*/
error = ixgbe_init_shared_code(hw);
if (error) {
- device_printf(dev,"Shared Code Initialization Failure\n");
+ device_printf(dev, "ixgbe_init_shared_code() failed!\n");
error = EIO;
goto err_late;
}
@@ -375,23 +381,37 @@ ixv_attach(device_t dev)
/* Setup the mailbox */
ixgbe_init_mbx_params_vf(hw);
- ixgbe_reset_hw(hw);
+ /* Reset mbox api to 1.0 */
+ error = ixgbe_reset_hw(hw);
+ if (error == IXGBE_ERR_RESET_FAILED)
+ device_printf(dev, "ixgbe_reset_hw() failure: Reset Failed!\n");
+ else if (error)
+ device_printf(dev, "ixgbe_reset_hw() failed with error %d\n", error);
+ if (error) {
+ error = EIO;
+ goto err_late;
+ }
- /* Get the Mailbox API version */
- device_printf(dev,"MBX API %d negotiation: %d\n",
- ixgbe_mbox_api_11,
- ixgbevf_negotiate_api_version(hw, ixgbe_mbox_api_11));
+ /* Negotiate mailbox API version */
+ error = ixgbevf_negotiate_api_version(hw, ixgbe_mbox_api_11);
+ if (error) {
+ device_printf(dev, "MBX API 1.1 negotiation failed! Error %d\n", error);
+ error = EIO;
+ goto err_late;
+ }
error = ixgbe_init_hw(hw);
if (error) {
- device_printf(dev,"Hardware Initialization Failure\n");
+ device_printf(dev, "ixgbe_init_hw() failed!\n");
error = EIO;
goto err_late;
}
error = ixv_allocate_msix(adapter);
- if (error)
+ if (error) {
+ device_printf(dev, "ixv_allocate_msix() failed!\n");
goto err_late;
+ }
/* If no mac address was assigned, make a random one */
if (!ixv_check_ether_addr(hw->mac.addr)) {
@@ -451,7 +471,7 @@ ixv_detach(device_t dev)
/* Make sure VLANS are not using driver */
if (adapter->ifp->if_vlantrunk != NULL) {
- device_printf(dev,"Vlan in use, detach first\n");
+ device_printf(dev, "Vlan in use, detach first\n");
return (EBUSY);
}
@@ -560,13 +580,13 @@ ixv_ioctl(struct ifnet * ifp, u_long command, caddr_t data)
#endif
case SIOCSIFMTU:
IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
- if (ifr->ifr_mtu > IXGBE_MAX_FRAME_SIZE - ETHER_HDR_LEN) {
+ if (ifr->ifr_mtu > IXGBE_MAX_FRAME_SIZE - IXGBE_MTU_HDR) {
error = EINVAL;
} else {
IXGBE_CORE_LOCK(adapter);
ifp->if_mtu = ifr->ifr_mtu;
adapter->max_frame_size =
- ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
+ ifp->if_mtu + IXGBE_MTU_HDR;
ixv_init_locked(adapter);
IXGBE_CORE_UNLOCK(adapter);
}
@@ -647,9 +667,9 @@ ixv_init_locked(struct adapter *adapter)
struct ifnet *ifp = adapter->ifp;
device_t dev = adapter->dev;
struct ixgbe_hw *hw = &adapter->hw;
- u32 mhadd, gpie;
+ int error = 0;
- INIT_DEBUGOUT("ixv_init: begin");
+ INIT_DEBUGOUT("ixv_init_locked: begin");
mtx_assert(&adapter->core_mtx, MA_OWNED);
hw->adapter_stopped = FALSE;
ixgbe_stop_adapter(hw);
@@ -666,12 +686,17 @@ ixv_init_locked(struct adapter *adapter)
/* Prepare transmit descriptors and buffers */
if (ixgbe_setup_transmit_structures(adapter)) {
- device_printf(dev,"Could not setup transmit structures\n");
+ device_printf(dev, "Could not setup transmit structures\n");
ixv_stop(adapter);
return;
}
+ /* Reset VF and renegotiate mailbox API version */
ixgbe_reset_hw(hw);
+ error = ixgbevf_negotiate_api_version(hw, ixgbe_mbox_api_11);
+ if (error)
+ device_printf(dev, "MBX API 1.1 negotiation failed! Error %d\n", error);
+
ixv_initialize_transmit_units(adapter);
/* Setup Multicast table */
@@ -688,7 +713,7 @@ ixv_init_locked(struct adapter *adapter)
/* Prepare receive descriptors and buffers */
if (ixgbe_setup_receive_structures(adapter)) {
- device_printf(dev,"Could not setup receive structures\n");
+ device_printf(dev, "Could not setup receive structures\n");
ixv_stop(adapter);
return;
}
@@ -696,12 +721,6 @@ ixv_init_locked(struct adapter *adapter)
/* Configure RX settings */
ixv_initialize_receive_units(adapter);
- /* Enable Enhanced MSIX mode */
- gpie = IXGBE_READ_REG(&adapter->hw, IXGBE_GPIE);
- gpie |= IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_EIAME;
- gpie |= IXGBE_GPIE_PBA_SUPPORT | IXGBE_GPIE_OCD;
- IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
-
/* Set the various hardware offload abilities */
ifp->if_hwassist = 0;
if (ifp->if_capenable & IFCAP_TSO4)
@@ -713,19 +732,9 @@ ixv_init_locked(struct adapter *adapter)
#endif
}
- /* Set MTU size */
- if (ifp->if_mtu > ETHERMTU) {
- mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
- mhadd &= ~IXGBE_MHADD_MFS_MASK;
- mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
- IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
- }
-
/* Set up VLAN offload and filter */
ixv_setup_vlan_support(adapter);
- callout_reset(&adapter->timer, hz, ixv_local_timer, adapter);
-
/* Set up MSI/X routing */
ixv_configure_ivars(adapter);
@@ -741,6 +750,9 @@ ixv_init_locked(struct adapter *adapter)
/* Config/Enable Link */
ixv_config_link(adapter);
+ /* Start watchdog */
+ callout_reset(&adapter->timer, hz, ixv_local_timer, adapter);
+
/* And now turn on interrupts */
ixv_enable_intr(adapter);
@@ -1415,7 +1427,7 @@ ixv_allocate_pci_resources(struct adapter *adapter)
&rid, RF_ACTIVE);
if (!(adapter->pci_mem)) {
- device_printf(dev,"Unable to allocate bus resource: memory\n");
+ device_printf(dev, "Unable to allocate bus resource: memory\n");
return (ENXIO);
}
@@ -1423,12 +1435,11 @@ ixv_allocate_pci_resources(struct adapter *adapter)
rman_get_bustag(adapter->pci_mem);
adapter->osdep.mem_bus_space_handle =
rman_get_bushandle(adapter->pci_mem);
- adapter->hw.hw_addr = (u8 *) &adapter->osdep.mem_bus_space_handle;
+ adapter->hw.hw_addr = (u8 *)&adapter->osdep.mem_bus_space_handle;
/* Pick up the tuneable queues */
adapter->num_queues = ixv_num_queues;
-
- adapter->hw.back = &adapter->osdep;
+ adapter->hw.back = adapter;
/*
** Now setup MSI/X, should
@@ -1536,7 +1547,7 @@ ixv_setup_interface(device_t dev, struct adapter *adapter)
ether_ifattach(ifp, adapter->hw.mac.addr);
adapter->max_frame_size =
- ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
+ ifp->if_mtu + IXGBE_MTU_HDR_VLAN;
/*
* Tell the upper layer(s) we support long frames.
@@ -1557,7 +1568,6 @@ ixv_setup_interface(device_t dev, struct adapter *adapter)
*/
ifmedia_init(&adapter->media, IFM_IMASK, ixv_media_change,
ixv_media_status);
- ifmedia_add(&adapter->media, IFM_ETHER | IFM_FDX, 0, NULL);
ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
@@ -1568,19 +1578,11 @@ static void
ixv_config_link(struct adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
- u32 autoneg, err = 0;
+ u32 autoneg;
if (hw->mac.ops.check_link)
- err = hw->mac.ops.check_link(hw, &autoneg,
+ hw->mac.ops.check_link(hw, &autoneg,
&adapter->link_up, FALSE);
- if (err)
- goto out;
-
- if (hw->mac.ops.setup_link)
- err = hw->mac.ops.setup_link(hw,
- autoneg, adapter->link_up);
-out:
- return;
}
@@ -1647,7 +1649,6 @@ ixv_initialize_receive_units(struct adapter *adapter)
struct ixgbe_hw *hw = &adapter->hw;
struct ifnet *ifp = adapter->ifp;
u32 bufsz, rxcsum, psrtype;
- int max_frame;
if (ifp->if_mtu > ETHERMTU)
bufsz = 4096 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
@@ -1660,9 +1661,8 @@ ixv_initialize_receive_units(struct adapter *adapter)
IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype);
- /* Tell PF our expected packet-size */
- max_frame = ifp->if_mtu + IXGBE_MTU_HDR;
- ixgbevf_rlpml_set_vf(hw, max_frame);
+ /* Tell PF our max_frame size */
+ ixgbevf_rlpml_set_vf(hw, adapter->max_frame_size);
for (int i = 0; i < adapter->num_queues; i++, rxr++) {
u64 rdba = rxr->rxdma.dma_paddr;
@@ -1670,7 +1670,7 @@ ixv_initialize_receive_units(struct adapter *adapter)
/* Disable the queue */
rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
- rxdctl &= ~(IXGBE_RXDCTL_ENABLE | IXGBE_RXDCTL_VME);
+ rxdctl &= ~IXGBE_RXDCTL_ENABLE;
IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), rxdctl);
for (int j = 0; j < 10; j++) {
if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i)) &
@@ -1700,17 +1700,11 @@ ixv_initialize_receive_units(struct adapter *adapter)
reg |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(i), reg);
- /* Setup the HW Rx Head and Tail Descriptor Pointers */
- IXGBE_WRITE_REG(hw, IXGBE_VFRDH(rxr->me), 0);
- IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me),
- adapter->num_rx_desc - 1);
-
- /* Set Rx Tail register */
+ /* Capture Rx Tail register */
rxr->tail = IXGBE_VFRDT(rxr->me);
/* Do the queue enabling last */
- rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
- rxdctl |= IXGBE_RXDCTL_ENABLE;
+ rxdctl |= IXGBE_RXDCTL_ENABLE | IXGBE_RXDCTL_VME;
IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), rxdctl);
for (int k = 0; k < 10; k++) {
if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i)) &
@@ -1769,7 +1763,7 @@ ixv_setup_vlan_support(struct adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
u32 ctrl, vid, vfta, retry;
-
+ struct rx_ring *rxr;
/*
** We get here thru init_locked, meaning
@@ -1785,6 +1779,12 @@ ixv_setup_vlan_support(struct adapter *adapter)
ctrl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
ctrl |= IXGBE_RXDCTL_VME;
IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), ctrl);
+ /*
+ * Let Rx path know that it needs to store VLAN tag
+ * as part of extra mbuf info.
+ */
+ rxr = &adapter->rx_rings[i];
+ rxr->vtag_strip = TRUE;
}
/*
@@ -1800,7 +1800,7 @@ ixv_setup_vlan_support(struct adapter *adapter)
** based on the bits set in each
** of the array ints.
*/
- for ( int j = 0; j < 32; j++) {
+ for (int j = 0; j < 32; j++) {
retry = 0;
if ((vfta & (1 << j)) == 0)
continue;
@@ -1827,10 +1827,10 @@ ixv_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
struct adapter *adapter = ifp->if_softc;
u16 index, bit;
- if (ifp->if_softc != arg) /* Not our event */
+ if (ifp->if_softc != arg) /* Not our event */
return;
- if ((vtag == 0) || (vtag > 4095)) /* Invalid */
+ if ((vtag == 0) || (vtag > 4095)) /* Invalid */
return;
IXGBE_CORE_LOCK(adapter);
diff --git a/sys/dev/ixgbe/ix_txrx.c b/sys/dev/ixgbe/ix_txrx.c
index 068048f..ef42714 100644
--- a/sys/dev/ixgbe/ix_txrx.c
+++ b/sys/dev/ixgbe/ix_txrx.c
@@ -80,27 +80,6 @@ static bool ixgbe_rsc_enable = FALSE;
static int atr_sample_rate = 20;
#endif
-/* Shared PCI config read/write */
-inline u16
-ixgbe_read_pci_cfg(struct ixgbe_hw *hw, u32 reg)
-{
- u16 value;
-
- value = pci_read_config(((struct ixgbe_osdep *)hw->back)->dev,
- reg, 2);
-
- return (value);
-}
-
-inline void
-ixgbe_write_pci_cfg(struct ixgbe_hw *hw, u32 reg, u16 value)
-{
- pci_write_config(((struct ixgbe_osdep *)hw->back)->dev,
- reg, value, 2);
-
- return;
-}
-
/*********************************************************************
* Local Function prototypes
*********************************************************************/
@@ -186,8 +165,8 @@ ixgbe_start(struct ifnet *ifp)
/*
-** Multiqueue Transmit driver
-**
+** Multiqueue Transmit Entry Point
+** (if_transmit function)
*/
int
ixgbe_mq_start(struct ifnet *ifp, struct mbuf *m)
@@ -210,10 +189,14 @@ ixgbe_mq_start(struct ifnet *ifp, struct mbuf *m)
if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) {
#ifdef RSS
if (rss_hash2bucket(m->m_pkthdr.flowid,
- M_HASHTYPE_GET(m), &bucket_id) == 0)
- /* TODO: spit out something if bucket_id > num_queues? */
+ M_HASHTYPE_GET(m), &bucket_id) == 0) {
i = bucket_id % adapter->num_queues;
- else
+#ifdef IXGBE_DEBUG
+ if (bucket_id > adapter->num_queues)
+ if_printf(ifp, "bucket_id (%d) > num_queues "
+ "(%d)\n", bucket_id, adapter->num_queues);
+#endif
+ } else
#endif
i = m->m_pkthdr.flowid % adapter->num_queues;
} else
@@ -444,6 +427,7 @@ retry:
}
#endif
+ olinfo_status |= IXGBE_ADVTXD_CC;
i = txr->next_avail_desc;
for (j = 0; j < nsegs; j++) {
bus_size_t seglen;
@@ -736,8 +720,12 @@ ixgbe_tx_ctx_setup(struct tx_ring *txr, struct mbuf *mp,
struct adapter *adapter = txr->adapter;
struct ixgbe_adv_tx_context_desc *TXD;
struct ether_vlan_header *eh;
+#ifdef INET
struct ip *ip;
+#endif
+#ifdef INET6
struct ip6_hdr *ip6;
+#endif
u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0;
int ehdrlen, ip_hlen = 0;
u16 etype;
@@ -745,9 +733,11 @@ ixgbe_tx_ctx_setup(struct tx_ring *txr, struct mbuf *mp,
int offload = TRUE;
int ctxd = txr->next_avail_desc;
u16 vtag = 0;
+ caddr_t l3d;
+
/* First check if TSO is to be used */
- if (mp->m_pkthdr.csum_flags & CSUM_TSO)
+ if (mp->m_pkthdr.csum_flags & (CSUM_IP_TSO|CSUM_IP6_TSO))
return (ixgbe_tso_setup(txr, mp, cmd_type_len, olinfo_status));
if ((mp->m_pkthdr.csum_flags & CSUM_OFFLOAD) == 0)
@@ -790,20 +780,38 @@ ixgbe_tx_ctx_setup(struct tx_ring *txr, struct mbuf *mp,
if (offload == FALSE)
goto no_offloads;
+ /*
+ * If the first mbuf only includes the ethernet header, jump to the next one
+ * XXX: This assumes the stack splits mbufs containing headers on header boundaries
+ * XXX: And assumes the entire IP header is contained in one mbuf
+ */
+ if (mp->m_len == ehdrlen && mp->m_next)
+ l3d = mtod(mp->m_next, caddr_t);
+ else
+ l3d = mtod(mp, caddr_t) + ehdrlen;
+
switch (etype) {
+#ifdef INET
case ETHERTYPE_IP:
- ip = (struct ip *)(mp->m_data + ehdrlen);
+ ip = (struct ip *)(l3d);
ip_hlen = ip->ip_hl << 2;
ipproto = ip->ip_p;
type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
+ /* Insert IPv4 checksum into data descriptors */
+ if (mp->m_pkthdr.csum_flags & CSUM_IP) {
+ ip->ip_sum = 0;
+ *olinfo_status |= IXGBE_TXD_POPTS_IXSM << 8;
+ }
break;
+#endif
+#ifdef INET6
case ETHERTYPE_IPV6:
- ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
+ ip6 = (struct ip6_hdr *)(l3d);
ip_hlen = sizeof(struct ip6_hdr);
- /* XXX-BZ this will go badly in case of ext hdrs. */
ipproto = ip6->ip6_nxt;
type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV6;
break;
+#endif
default:
offload = FALSE;
break;
@@ -811,29 +819,32 @@ ixgbe_tx_ctx_setup(struct tx_ring *txr, struct mbuf *mp,
vlan_macip_lens |= ip_hlen;
+ /* No support for offloads for non-L4 next headers */
switch (ipproto) {
case IPPROTO_TCP:
- if (mp->m_pkthdr.csum_flags & CSUM_TCP)
+ if (mp->m_pkthdr.csum_flags & (CSUM_IP_TCP | CSUM_IP6_TCP))
type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
+ else
+ offload = false;
break;
-
case IPPROTO_UDP:
- if (mp->m_pkthdr.csum_flags & CSUM_UDP)
+ if (mp->m_pkthdr.csum_flags & (CSUM_IP_UDP | CSUM_IP6_UDP))
type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_UDP;
+ else
+ offload = false;
break;
-
-#if __FreeBSD_version >= 800000
case IPPROTO_SCTP:
- if (mp->m_pkthdr.csum_flags & CSUM_SCTP)
+ if (mp->m_pkthdr.csum_flags & (CSUM_IP_SCTP | CSUM_IP6_SCTP))
type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_SCTP;
+ else
+ offload = false;
break;
-#endif
default:
- offload = FALSE;
+ offload = false;
break;
}
- if (offload) /* For the TX descriptor setup */
+ if (offload) /* Insert L4 checksum into data descriptors */
*olinfo_status |= IXGBE_TXD_POPTS_TXSM << 8;
no_offloads:
@@ -878,7 +889,6 @@ ixgbe_tso_setup(struct tx_ring *txr, struct mbuf *mp,
#endif
struct tcphdr *th;
-
/*
* Determine where frame payload starts.
* Jump over vlan headers if already present
@@ -1035,7 +1045,7 @@ ixgbe_txeof(struct tx_ring *txr)
BUS_DMASYNC_POSTREAD);
do {
- union ixgbe_adv_tx_desc *eop= buf->eop;
+ union ixgbe_adv_tx_desc *eop = buf->eop;
if (eop == NULL) /* No work */
break;
@@ -1277,6 +1287,7 @@ ixgbe_setup_hw_rsc(struct rx_ring *rxr)
rxr->hw_rsc = TRUE;
}
+
/*********************************************************************
*
* Refresh mbuf buffers for RX descriptor rings
@@ -1412,7 +1423,6 @@ fail:
return (error);
}
-
static void
ixgbe_free_receive_ring(struct rx_ring *rxr)
{
@@ -1432,7 +1442,6 @@ ixgbe_free_receive_ring(struct rx_ring *rxr)
}
}
-
/*********************************************************************
*
* Initialize a receive ring and its buffers.
@@ -1980,34 +1989,28 @@ ixgbe_rx_checksum(u32 staterr, struct mbuf * mp, u32 ptype)
{
u16 status = (u16) staterr;
u8 errors = (u8) (staterr >> 24);
- bool sctp = FALSE;
+ bool sctp = false;
if ((ptype & IXGBE_RXDADV_PKTTYPE_ETQF) == 0 &&
(ptype & IXGBE_RXDADV_PKTTYPE_SCTP) != 0)
- sctp = TRUE;
+ sctp = true;
+ /* IPv4 checksum */
if (status & IXGBE_RXD_STAT_IPCS) {
- if (!(errors & IXGBE_RXD_ERR_IPE)) {
- /* IP Checksum Good */
- mp->m_pkthdr.csum_flags = CSUM_IP_CHECKED;
- mp->m_pkthdr.csum_flags |= CSUM_IP_VALID;
-
- } else
- mp->m_pkthdr.csum_flags = 0;
+ mp->m_pkthdr.csum_flags |= CSUM_L3_CALC;
+ /* IP Checksum Good */
+ if (!(errors & IXGBE_RXD_ERR_IPE))
+ mp->m_pkthdr.csum_flags |= CSUM_L3_VALID;
}
+ /* TCP/UDP/SCTP checksum */
if (status & IXGBE_RXD_STAT_L4CS) {
- u64 type = (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
-#if __FreeBSD_version >= 800000
- if (sctp)
- type = CSUM_SCTP_VALID;
-#endif
+ mp->m_pkthdr.csum_flags |= CSUM_L4_CALC;
if (!(errors & IXGBE_RXD_ERR_TCPE)) {
- mp->m_pkthdr.csum_flags |= type;
+ mp->m_pkthdr.csum_flags |= CSUM_L4_VALID;
if (!sctp)
mp->m_pkthdr.csum_data = htons(0xffff);
- }
+ }
}
- return;
}
/********************************************************************
diff --git a/sys/dev/ixgbe/ixgbe.h b/sys/dev/ixgbe/ixgbe.h
index f7721f6..008ced3 100644
--- a/sys/dev/ixgbe/ixgbe.h
+++ b/sys/dev/ixgbe/ixgbe.h
@@ -141,12 +141,6 @@
#define DBA_ALIGN 128
/*
- * This parameter controls the maximum no of times the driver will loop in
- * the isr. Minimum Value = 1
- */
-#define MAX_LOOP 10
-
-/*
* This is the max watchdog interval, ie. the time that can
* pass between any two TX clean operations, such only happening
* when the TX hardware is functioning.
@@ -162,9 +156,11 @@
/* These defines are used in MTU calculations */
#define IXGBE_MAX_FRAME_SIZE 9728
-#define IXGBE_MTU_HDR (ETHER_HDR_LEN + ETHER_CRC_LEN + \
+#define IXGBE_MTU_HDR (ETHER_HDR_LEN + ETHER_CRC_LEN)
+#define IXGBE_MTU_HDR_VLAN (ETHER_HDR_LEN + ETHER_CRC_LEN + \
ETHER_VLAN_ENCAP_LEN)
#define IXGBE_MAX_MTU (IXGBE_MAX_FRAME_SIZE - IXGBE_MTU_HDR)
+#define IXGBE_MAX_MTU_VLAN (IXGBE_MAX_FRAME_SIZE - IXGBE_MTU_HDR_VLAN)
/* Flow control constants */
#define IXGBE_FC_PAUSE 0xFFFF
@@ -181,6 +177,9 @@
* modern Intel CPUs, results in 40 bytes wasted and a significant drop
* in observed efficiency of the optimization, 97.9% -> 81.8%.
*/
+#if __FreeBSD_version < 1002000
+#define MPKTHSIZE (sizeof(struct m_hdr) + sizeof(struct pkthdr))
+#endif
#define IXGBE_RX_COPY_HDR_PADDED ((((MPKTHSIZE - 1) / 32) + 1) * 32)
#define IXGBE_RX_COPY_LEN (MSIZE - IXGBE_RX_COPY_HDR_PADDED)
#define IXGBE_RX_COPY_ALIGN (IXGBE_RX_COPY_HDR_PADDED - MPKTHSIZE)
@@ -211,7 +210,6 @@
#define MSIX_82598_BAR 3
#define MSIX_82599_BAR 4
#define IXGBE_TSO_SIZE 262140
-#define IXGBE_TX_BUFFER_SIZE ((u32) 1514)
#define IXGBE_RX_HDR 128
#define IXGBE_VFTA_SIZE 128
#define IXGBE_BR_SIZE 4096
@@ -221,8 +219,12 @@
#define IXV_EITR_DEFAULT 128
-/* Offload bits in mbuf flag */
-#if __FreeBSD_version >= 800000
+/* Supported offload bits in mbuf flag */
+#if __FreeBSD_version >= 1000000
+#define CSUM_OFFLOAD (CSUM_IP_TSO|CSUM_IP6_TSO|CSUM_IP| \
+ CSUM_IP_UDP|CSUM_IP_TCP|CSUM_IP_SCTP| \
+ CSUM_IP6_UDP|CSUM_IP6_TCP|CSUM_IP6_SCTP)
+#elif __FreeBSD_version >= 800000
#define CSUM_OFFLOAD (CSUM_IP|CSUM_TCP|CSUM_UDP|CSUM_SCTP)
#else
#define CSUM_OFFLOAD (CSUM_IP|CSUM_TCP|CSUM_UDP)
@@ -243,7 +245,11 @@
#define IXGBE_LOW_LATENCY 128
#define IXGBE_AVE_LATENCY 400
#define IXGBE_BULK_LATENCY 1200
-#define IXGBE_LINK_ITR 2000
+
+/* Using 1FF (the max value), the interval is ~1.05ms */
+#define IXGBE_LINK_ITR_QUANTA 0x1FF
+#define IXGBE_LINK_ITR ((IXGBE_LINK_ITR_QUANTA << 3) & \
+ IXGBE_EITR_ITR_INT_MASK)
/* MAC type macros */
#define IXGBE_IS_X550VF(_adapter) \
@@ -449,11 +455,11 @@ struct ixgbe_vf {
/* Our adapter structure */
struct adapter {
- struct ifnet *ifp;
struct ixgbe_hw hw;
-
struct ixgbe_osdep osdep;
+
struct device *dev;
+ struct ifnet *ifp;
struct resource *pci_mem;
struct resource *msix_mem;
@@ -491,6 +497,7 @@ struct adapter {
u32 optics;
u32 fc; /* local flow ctrl setting */
int advertise; /* link speeds */
+ bool enable_aim; /* adaptive interrupt moderation */
bool link_active;
u16 max_frame_size;
u16 num_segs;
@@ -555,6 +562,10 @@ struct adapter {
#ifdef PCI_IOV
struct ixgbe_vf *vfs;
#endif
+#ifdef DEV_NETMAP
+ void (*init_locked)(struct adapter *);
+ void (*stop_locked)(void *);
+#endif
/* Misc stats maintained by the driver */
unsigned long dropped_pkts;
diff --git a/sys/dev/ixgbe/ixgbe_82598.c b/sys/dev/ixgbe/ixgbe_82598.c
index 46e64c5..c10e23a 100644
--- a/sys/dev/ixgbe/ixgbe_82598.c
+++ b/sys/dev/ixgbe/ixgbe_82598.c
@@ -660,7 +660,7 @@ static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw,
hw->phy.ops.read_reg(hw, 0xC00C, IXGBE_TWINAX_DEV,
&adapt_comp_reg);
if (link_up_wait_to_complete) {
- for (i = 0; i < IXGBE_LINK_UP_TIME; i++) {
+ for (i = 0; i < hw->mac.max_link_up_time; i++) {
if ((link_reg & 1) &&
((adapt_comp_reg & 1) == 0)) {
*link_up = TRUE;
@@ -689,7 +689,7 @@ static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw,
links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
if (link_up_wait_to_complete) {
- for (i = 0; i < IXGBE_LINK_UP_TIME; i++) {
+ for (i = 0; i < hw->mac.max_link_up_time; i++) {
if (links_reg & IXGBE_LINKS_UP) {
*link_up = TRUE;
break;
diff --git a/sys/dev/ixgbe/ixgbe_82599.c b/sys/dev/ixgbe/ixgbe_82599.c
index b38620f..109ed95 100644
--- a/sys/dev/ixgbe/ixgbe_82599.c
+++ b/sys/dev/ixgbe/ixgbe_82599.c
@@ -382,8 +382,8 @@ s32 ixgbe_init_ops_82599(struct ixgbe_hw *hw)
mac->max_tx_queues = IXGBE_82599_MAX_TX_QUEUES;
mac->max_msix_vectors = ixgbe_get_pcie_msix_count_generic(hw);
- mac->arc_subsystem_valid = (IXGBE_READ_REG(hw, IXGBE_FWSM) &
- IXGBE_FWSM_MODE_MASK) ? TRUE : FALSE;
+ mac->arc_subsystem_valid = !!(IXGBE_READ_REG(hw, IXGBE_FWSM_BY_MAC(hw))
+ & IXGBE_FWSM_MODE_MASK);
hw->mbx.ops.init_params = ixgbe_init_mbx_params_pf;
@@ -1370,7 +1370,7 @@ s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 fdirctrl,
* Continue setup of fdirctrl register bits:
* Turn perfect match filtering on
* Report hash in RSS field of Rx wb descriptor
- * Initialize the drop queue
+ * Initialize the drop queue to queue 127
* Move the flexible bytes to use the ethertype - shift 6 words
* Set the maximum length per hash bucket to 0xA filters
* Send interrupt when 64 (0x4 * 16) filters are left
@@ -1381,6 +1381,9 @@ s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 fdirctrl,
(0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT) |
(0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT) |
(4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT);
+ if ((hw->mac.type == ixgbe_mac_X550) ||
+ (hw->mac.type == ixgbe_mac_X550EM_x))
+ fdirctrl |= IXGBE_FDIRCTRL_DROP_NO_MATCH;
if (cloud_mode)
fdirctrl |=(IXGBE_FDIRCTRL_FILTERMODE_CLOUD <<
@@ -1392,6 +1395,39 @@ s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 fdirctrl,
return IXGBE_SUCCESS;
}
+/**
+ * ixgbe_set_fdir_drop_queue_82599 - Set Flow Director drop queue
+ * @hw: pointer to hardware structure
+ * @dropqueue: Rx queue index used for the dropped packets
+ **/
+void ixgbe_set_fdir_drop_queue_82599(struct ixgbe_hw *hw, u8 dropqueue)
+{
+ u32 fdirctrl;
+
+ DEBUGFUNC("ixgbe_set_fdir_drop_queue_82599");
+ /* Clear init done bit and drop queue field */
+ fdirctrl = IXGBE_READ_REG(hw, IXGBE_FDIRCTRL);
+ fdirctrl &= ~(IXGBE_FDIRCTRL_DROP_Q_MASK | IXGBE_FDIRCTRL_INIT_DONE);
+
+ /* Set drop queue */
+ fdirctrl |= (dropqueue << IXGBE_FDIRCTRL_DROP_Q_SHIFT);
+ if ((hw->mac.type == ixgbe_mac_X550) ||
+ (hw->mac.type == ixgbe_mac_X550EM_x))
+ fdirctrl |= IXGBE_FDIRCTRL_DROP_NO_MATCH;
+
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD,
+ (IXGBE_READ_REG(hw, IXGBE_FDIRCMD) |
+ IXGBE_FDIRCMD_CLEARHT));
+ IXGBE_WRITE_FLUSH(hw);
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD,
+ (IXGBE_READ_REG(hw, IXGBE_FDIRCMD) &
+ ~IXGBE_FDIRCMD_CLEARHT));
+ IXGBE_WRITE_FLUSH(hw);
+
+ /* write hashes and fdirctrl register, poll for completion */
+ ixgbe_fdir_enable_82599(hw, fdirctrl);
+}
+
/*
* These defines allow us to quickly generate all of the necessary instructions
* in the function below by simply calling out IXGBE_COMPUTE_SIG_HASH_ITERATION
@@ -1492,16 +1528,15 @@ u32 ixgbe_atr_compute_sig_hash_82599(union ixgbe_atr_hash_dword input,
* Note that the tunnel bit in input must not be set when the hardware
* tunneling support does not exist.
**/
-s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
- union ixgbe_atr_hash_dword input,
- union ixgbe_atr_hash_dword common,
- u8 queue)
+void ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
+ union ixgbe_atr_hash_dword input,
+ union ixgbe_atr_hash_dword common,
+ u8 queue)
{
u64 fdirhashcmd;
u8 flow_type;
bool tunnel;
u32 fdircmd;
- s32 err;
DEBUGFUNC("ixgbe_fdir_add_signature_filter_82599");
@@ -1523,7 +1558,7 @@ s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
break;
default:
DEBUGOUT(" Error on flow type input\n");
- return IXGBE_ERR_CONFIG;
+ return;
}
/* configure FDIRCMD register */
@@ -1542,15 +1577,9 @@ s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
fdirhashcmd |= ixgbe_atr_compute_sig_hash_82599(input, common);
IXGBE_WRITE_REG64(hw, IXGBE_FDIRHASH, fdirhashcmd);
- err = ixgbe_fdir_check_cmd_complete(hw, &fdircmd);
- if (err) {
- DEBUGOUT("Flow Director command did not complete!\n");
- return err;
- }
-
DEBUGOUT2("Tx Queue=%x hash=%x\n", queue, (u32)fdirhashcmd);
- return IXGBE_SUCCESS;
+ return;
}
#define IXGBE_COMPUTE_BKT_HASH_ITERATION(_n) \
diff --git a/sys/dev/ixgbe/ixgbe_api.c b/sys/dev/ixgbe/ixgbe_api.c
index 9784e3c..0f4d973 100644
--- a/sys/dev/ixgbe/ixgbe_api.c
+++ b/sys/dev/ixgbe/ixgbe_api.c
@@ -35,8 +35,10 @@
#include "ixgbe_api.h"
#include "ixgbe_common.h"
+#define IXGBE_EMPTY_PARAM
+
static const u32 ixgbe_mvals_base[IXGBE_MVALS_IDX_LIMIT] = {
- IXGBE_MVALS_INIT()
+ IXGBE_MVALS_INIT(IXGBE_EMPTY_PARAM)
};
static const u32 ixgbe_mvals_X540[IXGBE_MVALS_IDX_LIMIT] = {
@@ -113,6 +115,7 @@ s32 ixgbe_init_shared_code(struct ixgbe_hw *hw)
status = IXGBE_ERR_DEVICE_NOT_SUPPORTED;
break;
}
+ hw->mac.max_link_up_time = IXGBE_LINK_UP_TIME;
return status;
}
@@ -187,6 +190,7 @@ s32 ixgbe_set_mac_type(struct ixgbe_hw *hw)
hw->mvals = ixgbe_mvals_X540;
break;
case IXGBE_DEV_ID_X550T:
+ case IXGBE_DEV_ID_X550T1:
hw->mac.type = ixgbe_mac_X550;
hw->mvals = ixgbe_mvals_X550;
break;
diff --git a/sys/dev/ixgbe/ixgbe_api.h b/sys/dev/ixgbe/ixgbe_api.h
index 8c2c4a8..24d5070 100644
--- a/sys/dev/ixgbe/ixgbe_api.h
+++ b/sys/dev/ixgbe/ixgbe_api.h
@@ -148,10 +148,10 @@ s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw);
s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 fdirctrl);
s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 fdirctrl,
bool cloud_mode);
-s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
- union ixgbe_atr_hash_dword input,
- union ixgbe_atr_hash_dword common,
- u8 queue);
+void ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
+ union ixgbe_atr_hash_dword input,
+ union ixgbe_atr_hash_dword common,
+ u8 queue);
s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw,
union ixgbe_atr_input *input_mask, bool cloud_mode);
s32 ixgbe_fdir_write_perfect_filter_82599(struct ixgbe_hw *hw,
@@ -180,6 +180,7 @@ s32 ixgbe_read_i2c_combined_unlocked(struct ixgbe_hw *hw, u8 addr, u16 reg,
u16 *val);
s32 ixgbe_write_i2c_byte(struct ixgbe_hw *hw, u8 byte_offset, u8 dev_addr,
u8 data);
+void ixgbe_set_fdir_drop_queue_82599(struct ixgbe_hw *hw, u8 dropqueue);
s32 ixgbe_write_i2c_byte_unlocked(struct ixgbe_hw *hw, u8 byte_offset,
u8 dev_addr, u8 data);
s32 ixgbe_write_i2c_combined(struct ixgbe_hw *hw, u8 addr, u16 reg, u16 val);
diff --git a/sys/dev/ixgbe/ixgbe_common.c b/sys/dev/ixgbe/ixgbe_common.c
index f0a0776..feb74f6 100644
--- a/sys/dev/ixgbe/ixgbe_common.c
+++ b/sys/dev/ixgbe/ixgbe_common.c
@@ -70,7 +70,7 @@ s32 ixgbe_init_ops_generic(struct ixgbe_hw *hw)
{
struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
struct ixgbe_mac_info *mac = &hw->mac;
- u32 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
+ u32 eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw));
DEBUGFUNC("ixgbe_init_ops_generic");
@@ -188,6 +188,7 @@ bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw)
case IXGBE_DEV_ID_X540T1:
case IXGBE_DEV_ID_X540_BYPASS:
case IXGBE_DEV_ID_X550T:
+ case IXGBE_DEV_ID_X550T1:
case IXGBE_DEV_ID_X550EM_X_10G_T:
supported = TRUE;
break;
@@ -198,9 +199,12 @@ bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw)
break;
}
- ERROR_REPORT2(IXGBE_ERROR_UNSUPPORTED,
+ if (!supported) {
+ ERROR_REPORT2(IXGBE_ERROR_UNSUPPORTED,
"Device %x does not support flow control autoneg",
hw->device_id);
+ }
+
return supported;
}
@@ -1038,7 +1042,7 @@ void ixgbe_set_lan_id_multi_port_pcie(struct ixgbe_hw *hw)
bus->lan_id = bus->func;
/* check for a port swap */
- reg = IXGBE_READ_REG(hw, IXGBE_FACTPS);
+ reg = IXGBE_READ_REG(hw, IXGBE_FACTPS_BY_MAC(hw));
if (reg & IXGBE_FACTPS_LFS)
bus->func ^= 0x1;
}
@@ -1164,7 +1168,7 @@ s32 ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw)
* Check for EEPROM present first.
* If not present leave as none
*/
- eec = IXGBE_READ_REG(hw, IXGBE_EEC);
+ eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw));
if (eec & IXGBE_EEC_PRES) {
eeprom->type = ixgbe_eeprom_spi;
@@ -1725,14 +1729,14 @@ static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw)
status = IXGBE_ERR_SWFW_SYNC;
if (status == IXGBE_SUCCESS) {
- eec = IXGBE_READ_REG(hw, IXGBE_EEC);
+ eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw));
/* Request EEPROM Access */
eec |= IXGBE_EEC_REQ;
- IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
+ IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec);
for (i = 0; i < IXGBE_EEPROM_GRANT_ATTEMPTS; i++) {
- eec = IXGBE_READ_REG(hw, IXGBE_EEC);
+ eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw));
if (eec & IXGBE_EEC_GNT)
break;
usec_delay(5);
@@ -1741,7 +1745,7 @@ static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw)
/* Release if grant not acquired */
if (!(eec & IXGBE_EEC_GNT)) {
eec &= ~IXGBE_EEC_REQ;
- IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
+ IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec);
DEBUGOUT("Could not acquire EEPROM grant\n");
hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
@@ -1752,7 +1756,7 @@ static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw)
if (status == IXGBE_SUCCESS) {
/* Clear CS and SK */
eec &= ~(IXGBE_EEC_CS | IXGBE_EEC_SK);
- IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
+ IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec);
IXGBE_WRITE_FLUSH(hw);
usec_delay(1);
}
@@ -1782,7 +1786,7 @@ static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw)
* If the SMBI bit is 0 when we read it, then the bit will be
* set and we have the semaphore
*/
- swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
+ swsm = IXGBE_READ_REG(hw, IXGBE_SWSM_BY_MAC(hw));
if (!(swsm & IXGBE_SWSM_SMBI)) {
status = IXGBE_SUCCESS;
break;
@@ -1807,7 +1811,7 @@ static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw)
* If the SMBI bit is 0 when we read it, then the bit will be
* set and we have the semaphore
*/
- swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
+ swsm = IXGBE_READ_REG(hw, IXGBE_SWSM_BY_MAC(hw));
if (!(swsm & IXGBE_SWSM_SMBI))
status = IXGBE_SUCCESS;
}
@@ -1815,17 +1819,17 @@ static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw)
/* Now get the semaphore between SW/FW through the SWESMBI bit */
if (status == IXGBE_SUCCESS) {
for (i = 0; i < timeout; i++) {
- swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
+ swsm = IXGBE_READ_REG(hw, IXGBE_SWSM_BY_MAC(hw));
/* Set the SW EEPROM semaphore bit to request access */
swsm |= IXGBE_SWSM_SWESMBI;
- IXGBE_WRITE_REG(hw, IXGBE_SWSM, swsm);
+ IXGBE_WRITE_REG(hw, IXGBE_SWSM_BY_MAC(hw), swsm);
/*
* If we set the bit successfully then we got the
* semaphore.
*/
- swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
+ swsm = IXGBE_READ_REG(hw, IXGBE_SWSM_BY_MAC(hw));
if (swsm & IXGBE_SWSM_SWESMBI)
break;
@@ -1922,15 +1926,15 @@ static void ixgbe_standby_eeprom(struct ixgbe_hw *hw)
DEBUGFUNC("ixgbe_standby_eeprom");
- eec = IXGBE_READ_REG(hw, IXGBE_EEC);
+ eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw));
/* Toggle CS to flush commands */
eec |= IXGBE_EEC_CS;
- IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
+ IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec);
IXGBE_WRITE_FLUSH(hw);
usec_delay(1);
eec &= ~IXGBE_EEC_CS;
- IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
+ IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec);
IXGBE_WRITE_FLUSH(hw);
usec_delay(1);
}
@@ -1950,7 +1954,7 @@ static void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data,
DEBUGFUNC("ixgbe_shift_out_eeprom_bits");
- eec = IXGBE_READ_REG(hw, IXGBE_EEC);
+ eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw));
/*
* Mask is used to shift "count" bits of "data" out to the EEPROM
@@ -1971,7 +1975,7 @@ static void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data,
else
eec &= ~IXGBE_EEC_DI;
- IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
+ IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec);
IXGBE_WRITE_FLUSH(hw);
usec_delay(1);
@@ -1988,7 +1992,7 @@ static void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data,
/* We leave the "DI" bit set to "0" when we leave this routine. */
eec &= ~IXGBE_EEC_DI;
- IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
+ IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec);
IXGBE_WRITE_FLUSH(hw);
}
@@ -2011,7 +2015,7 @@ static u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count)
* the value of the "DO" bit. During this "shifting in" process the
* "DI" bit should always be clear.
*/
- eec = IXGBE_READ_REG(hw, IXGBE_EEC);
+ eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw));
eec &= ~(IXGBE_EEC_DO | IXGBE_EEC_DI);
@@ -2019,7 +2023,7 @@ static u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count)
data = data << 1;
ixgbe_raise_eeprom_clk(hw, &eec);
- eec = IXGBE_READ_REG(hw, IXGBE_EEC);
+ eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw));
eec &= ~(IXGBE_EEC_DI);
if (eec & IXGBE_EEC_DO)
@@ -2045,7 +2049,7 @@ static void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec)
* (setting the SK bit), then delay
*/
*eec = *eec | IXGBE_EEC_SK;
- IXGBE_WRITE_REG(hw, IXGBE_EEC, *eec);
+ IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), *eec);
IXGBE_WRITE_FLUSH(hw);
usec_delay(1);
}
@@ -2064,7 +2068,7 @@ static void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec)
* delay
*/
*eec = *eec & ~IXGBE_EEC_SK;
- IXGBE_WRITE_REG(hw, IXGBE_EEC, *eec);
+ IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), *eec);
IXGBE_WRITE_FLUSH(hw);
usec_delay(1);
}
@@ -2079,19 +2083,19 @@ static void ixgbe_release_eeprom(struct ixgbe_hw *hw)
DEBUGFUNC("ixgbe_release_eeprom");
- eec = IXGBE_READ_REG(hw, IXGBE_EEC);
+ eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw));
eec |= IXGBE_EEC_CS; /* Pull CS high */
eec &= ~IXGBE_EEC_SK; /* Lower SCK */
- IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
+ IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec);
IXGBE_WRITE_FLUSH(hw);
usec_delay(1);
/* Stop requesting EEPROM access */
eec &= ~IXGBE_EEC_REQ;
- IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
+ IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec);
hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
@@ -3147,6 +3151,9 @@ s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw)
DEBUGOUT("GIO Master Disable bit didn't clear - requesting resets\n");
hw->mac.flags |= IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
+ if (hw->mac.type >= ixgbe_mac_X550)
+ goto out;
+
/*
* Before proceeding, make sure that the PCIe block does not have
* transactions pending.
@@ -4069,7 +4076,7 @@ s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
}
if (link_up_wait_to_complete) {
- for (i = 0; i < IXGBE_LINK_UP_TIME; i++) {
+ for (i = 0; i < hw->mac.max_link_up_time; i++) {
if (links_reg & IXGBE_LINKS_UP) {
*link_up = TRUE;
break;
@@ -4715,7 +4722,7 @@ bool ixgbe_mng_present(struct ixgbe_hw *hw)
if (hw->mac.type < ixgbe_mac_82599EB)
return FALSE;
- fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM);
+ fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM_BY_MAC(hw));
fwsm &= IXGBE_FWSM_MODE_MASK;
return fwsm == IXGBE_FWSM_FW_MODE_PT;
}
@@ -4730,7 +4737,7 @@ bool ixgbe_mng_enabled(struct ixgbe_hw *hw)
{
u32 fwsm, manc, factps;
- fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM);
+ fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM_BY_MAC(hw));
if ((fwsm & IXGBE_FWSM_MODE_MASK) != IXGBE_FWSM_FW_MODE_PT)
return FALSE;
@@ -4739,7 +4746,7 @@ bool ixgbe_mng_enabled(struct ixgbe_hw *hw)
return FALSE;
if (hw->mac.type <= ixgbe_mac_X540) {
- factps = IXGBE_READ_REG(hw, IXGBE_FACTPS);
+ factps = IXGBE_READ_REG(hw, IXGBE_FACTPS_BY_MAC(hw));
if (factps & IXGBE_FACTPS_MNGCG)
return FALSE;
}
diff --git a/sys/dev/ixgbe/ixgbe_dcb.c b/sys/dev/ixgbe/ixgbe_dcb.c
index 3659d17..437d336 100644
--- a/sys/dev/ixgbe/ixgbe_dcb.c
+++ b/sys/dev/ixgbe/ixgbe_dcb.c
@@ -148,6 +148,11 @@ s32 ixgbe_dcb_calculate_tc_credits_cee(struct ixgbe_hw *hw,
/* Calculate credit refill ratio using multiplier */
credit_refill = min(link_percentage * min_multiplier,
(u32)IXGBE_DCB_MAX_CREDIT_REFILL);
+
+ /* Refill at least minimum credit */
+ if (credit_refill < min_credit)
+ credit_refill = min_credit;
+
p->data_credits_refill = (u16)credit_refill;
/* Calculate maximum credit for the TC */
@@ -158,7 +163,7 @@ s32 ixgbe_dcb_calculate_tc_credits_cee(struct ixgbe_hw *hw,
* of a TC is too small, the maximum credit may not be
* enough to send out a jumbo frame in data plane arbitration.
*/
- if (credit_max && (credit_max < min_credit))
+ if (credit_max < min_credit)
credit_max = min_credit;
if (direction == IXGBE_DCB_TX_CONFIG) {
diff --git a/sys/dev/ixgbe/LICENSE b/sys/dev/ixgbe/ixgbe_osdep.c
index 394b094..6a4e2fc 100644
--- a/sys/dev/ixgbe/LICENSE
+++ b/sys/dev/ixgbe/ixgbe_osdep.c
@@ -31,3 +31,58 @@
******************************************************************************/
/*$FreeBSD$*/
+
+#include "ixgbe_osdep.h"
+#include "ixgbe.h"
+
+inline device_t
+ixgbe_dev_from_hw(struct ixgbe_hw *hw)
+{
+ return ((struct adapter *)hw->back)->dev;
+}
+
+inline u16
+ixgbe_read_pci_cfg(struct ixgbe_hw *hw, u32 reg)
+{
+ return pci_read_config(((struct adapter *)hw->back)->dev,
+ reg, 2);
+}
+
+inline void
+ixgbe_write_pci_cfg(struct ixgbe_hw *hw, u32 reg, u16 value)
+{
+ pci_write_config(((struct adapter *)hw->back)->dev,
+ reg, value, 2);
+}
+
+inline u32
+ixgbe_read_reg(struct ixgbe_hw *hw, u32 reg)
+{
+ return bus_space_read_4(((struct adapter *)hw->back)->osdep.mem_bus_space_tag,
+ ((struct adapter *)hw->back)->osdep.mem_bus_space_handle,
+ reg);
+}
+
+inline void
+ixgbe_write_reg(struct ixgbe_hw *hw, u32 reg, u32 val)
+{
+ bus_space_write_4(((struct adapter *)hw->back)->osdep.mem_bus_space_tag,
+ ((struct adapter *)hw->back)->osdep.mem_bus_space_handle,
+ reg, val);
+}
+
+inline u32
+ixgbe_read_reg_array(struct ixgbe_hw *hw, u32 reg, u32 offset)
+{
+ return bus_space_read_4(((struct adapter *)hw->back)->osdep.mem_bus_space_tag,
+ ((struct adapter *)hw->back)->osdep.mem_bus_space_handle,
+ reg + (offset << 2));
+}
+
+inline void
+ixgbe_write_reg_array(struct ixgbe_hw *hw, u32 reg, u32 offset, u32 val)
+{
+ bus_space_write_4(((struct adapter *)hw->back)->osdep.mem_bus_space_tag,
+ ((struct adapter *)hw->back)->osdep.mem_bus_space_handle,
+ reg + (offset << 2), val);
+}
diff --git a/sys/dev/ixgbe/ixgbe_osdep.h b/sys/dev/ixgbe/ixgbe_osdep.h
index 95f6ed5c..79d1166 100644
--- a/sys/dev/ixgbe/ixgbe_osdep.h
+++ b/sys/dev/ixgbe/ixgbe_osdep.h
@@ -57,11 +57,20 @@
#define ASSERT(x) if(!(x)) panic("IXGBE: x")
#define EWARN(H, W, S) printf(W)
+enum {
+ IXGBE_ERROR_SOFTWARE,
+ IXGBE_ERROR_POLLING,
+ IXGBE_ERROR_INVALID_STATE,
+ IXGBE_ERROR_UNSUPPORTED,
+ IXGBE_ERROR_ARGUMENT,
+ IXGBE_ERROR_CAUTION,
+};
+
/* The happy-fun DELAY macro is defined in /usr/src/sys/i386/include/clock.h */
#define usec_delay(x) DELAY(x)
#define msec_delay(x) DELAY(1000*(x))
-#define DBG 0
+#define DBG 0
#define MSGOUT(S, A, B) printf(S "\n", A, B)
#define DEBUGFUNC(F) DEBUGOUT(F);
#if DBG
@@ -73,9 +82,23 @@
#define DEBUGOUT5(S,A,B,C,D,E) printf(S "\n",A,B,C,D,E)
#define DEBUGOUT6(S,A,B,C,D,E,F) printf(S "\n",A,B,C,D,E,F)
#define DEBUGOUT7(S,A,B,C,D,E,F,G) printf(S "\n",A,B,C,D,E,F,G)
- #define ERROR_REPORT1(S,A) printf(S "\n",A)
- #define ERROR_REPORT2(S,A,B) printf(S "\n",A,B)
- #define ERROR_REPORT3(S,A,B,C) printf(S "\n",A,B,C)
+ #define ERROR_REPORT1 ERROR_REPORT
+ #define ERROR_REPORT2 ERROR_REPORT
+ #define ERROR_REPORT3 ERROR_REPORT
+ #define ERROR_REPORT(level, format, arg...) do { \
+ switch (level) { \
+ case IXGBE_ERROR_SOFTWARE: \
+ case IXGBE_ERROR_CAUTION: \
+ case IXGBE_ERROR_POLLING: \
+ case IXGBE_ERROR_INVALID_STATE: \
+ case IXGBE_ERROR_UNSUPPORTED: \
+ case IXGBE_ERROR_ARGUMENT: \
+ device_printf(ixgbe_dev_from_hw(hw), format, ## arg); \
+ break; \
+ default: \
+ break; \
+ } \
+ } while (0)
#else
#define DEBUGOUT(S)
#define DEBUGOUT1(S,A)
@@ -165,7 +188,7 @@ void prefetch(void *x)
* non-overlapping regions and 32-byte padding on both src and dst.
*/
static __inline int
-ixgbe_bcopy(void *_src, void *_dst, int l)
+ixgbe_bcopy(void *restrict _src, void *restrict _dst, int l)
{
uint64_t *src = _src;
uint64_t *dst = _dst;
@@ -183,11 +206,13 @@ struct ixgbe_osdep
{
bus_space_tag_t mem_bus_space_tag;
bus_space_handle_t mem_bus_space_handle;
- struct device *dev;
};
-/* These routines are needed by the shared code */
+/* These routines need struct ixgbe_hw declared */
struct ixgbe_hw;
+device_t ixgbe_dev_from_hw(struct ixgbe_hw *hw);
+
+/* These routines are needed by the shared code */
extern u16 ixgbe_read_pci_cfg(struct ixgbe_hw *, u32);
#define IXGBE_READ_PCIE_WORD ixgbe_read_pci_cfg
@@ -196,26 +221,18 @@ extern void ixgbe_write_pci_cfg(struct ixgbe_hw *, u32, u16);
#define IXGBE_WRITE_FLUSH(a) IXGBE_READ_REG(a, IXGBE_STATUS)
-#define IXGBE_READ_REG(a, reg) (\
- bus_space_read_4( ((struct ixgbe_osdep *)(a)->back)->mem_bus_space_tag, \
- ((struct ixgbe_osdep *)(a)->back)->mem_bus_space_handle, \
- reg))
-
-#define IXGBE_WRITE_REG(a, reg, value) (\
- bus_space_write_4( ((struct ixgbe_osdep *)(a)->back)->mem_bus_space_tag, \
- ((struct ixgbe_osdep *)(a)->back)->mem_bus_space_handle, \
- reg, value))
-
+extern u32 ixgbe_read_reg(struct ixgbe_hw *, u32);
+#define IXGBE_READ_REG(a, reg) ixgbe_read_reg(a, reg)
-#define IXGBE_READ_REG_ARRAY(a, reg, offset) (\
- bus_space_read_4( ((struct ixgbe_osdep *)(a)->back)->mem_bus_space_tag, \
- ((struct ixgbe_osdep *)(a)->back)->mem_bus_space_handle, \
- (reg + ((offset) << 2))))
+extern void ixgbe_write_reg(struct ixgbe_hw *, u32, u32);
+#define IXGBE_WRITE_REG(a, reg, val) ixgbe_write_reg(a, reg, val)
-#define IXGBE_WRITE_REG_ARRAY(a, reg, offset, value) (\
- bus_space_write_4( ((struct ixgbe_osdep *)(a)->back)->mem_bus_space_tag, \
- ((struct ixgbe_osdep *)(a)->back)->mem_bus_space_handle, \
- (reg + ((offset) << 2)), value))
+extern u32 ixgbe_read_reg_array(struct ixgbe_hw *, u32, u32);
+#define IXGBE_READ_REG_ARRAY(a, reg, offset) \
+ ixgbe_read_reg_array(a, reg, offset)
+extern void ixgbe_write_reg_array(struct ixgbe_hw *, u32, u32, u32);
+#define IXGBE_WRITE_REG_ARRAY(a, reg, offset, val) \
+ ixgbe_write_reg_array(a, reg, offset, val)
#endif /* _IXGBE_OS_H_ */
diff --git a/sys/dev/ixgbe/ixgbe_phy.c b/sys/dev/ixgbe/ixgbe_phy.c
index 88206c7..f5d22b2 100644
--- a/sys/dev/ixgbe/ixgbe_phy.c
+++ b/sys/dev/ixgbe/ixgbe_phy.c
@@ -508,7 +508,9 @@ enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id)
case TN1010_PHY_ID:
phy_type = ixgbe_phy_tn;
break;
- case X550_PHY_ID:
+ case X550_PHY_ID1:
+ case X550_PHY_ID2:
+ case X550_PHY_ID3:
case X540_PHY_ID:
phy_type = ixgbe_phy_aq;
break;
@@ -830,7 +832,7 @@ s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw)
if (hw->mac.type == ixgbe_mac_X550) {
if (speed & IXGBE_LINK_SPEED_5GB_FULL) {
- /* Set or unset auto-negotiation 1G advertisement */
+ /* Set or unset auto-negotiation 5G advertisement */
hw->phy.ops.read_reg(hw,
IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG,
IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
@@ -848,7 +850,7 @@ s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw)
}
if (speed & IXGBE_LINK_SPEED_2_5GB_FULL) {
- /* Set or unset auto-negotiation 1G advertisement */
+ /* Set or unset auto-negotiation 2.5G advertisement */
hw->phy.ops.read_reg(hw,
IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG,
IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
@@ -950,54 +952,70 @@ s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw,
hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_100_FULL;
/* Setup link based on the new speed settings */
- hw->phy.ops.setup_link(hw);
+ ixgbe_setup_phy_link(hw);
return IXGBE_SUCCESS;
}
/**
- * ixgbe_get_copper_link_capabilities_generic - Determines link capabilities
- * @hw: pointer to hardware structure
- * @speed: pointer to link speed
- * @autoneg: boolean auto-negotiation value
+ * ixgbe_get_copper_speeds_supported - Get copper link speeds from phy
+ * @hw: pointer to hardware structure
*
- * Determines the supported link capabilities by reading the PHY auto
- * negotiation register.
+ * Determines the supported link capabilities by reading the PHY auto
+ * negotiation register.
**/
-s32 ixgbe_get_copper_link_capabilities_generic(struct ixgbe_hw *hw,
- ixgbe_link_speed *speed,
- bool *autoneg)
+static s32 ixgbe_get_copper_speeds_supported(struct ixgbe_hw *hw)
{
s32 status;
u16 speed_ability;
- DEBUGFUNC("ixgbe_get_copper_link_capabilities_generic");
-
- *speed = 0;
- *autoneg = TRUE;
-
status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_SPEED_ABILITY,
IXGBE_MDIO_PMA_PMD_DEV_TYPE,
&speed_ability);
+ if (status)
+ return status;
- if (status == IXGBE_SUCCESS) {
- if (speed_ability & IXGBE_MDIO_PHY_SPEED_10G)
- *speed |= IXGBE_LINK_SPEED_10GB_FULL;
- if (speed_ability & IXGBE_MDIO_PHY_SPEED_1G)
- *speed |= IXGBE_LINK_SPEED_1GB_FULL;
- if (speed_ability & IXGBE_MDIO_PHY_SPEED_100M)
- *speed |= IXGBE_LINK_SPEED_100_FULL;
+ if (speed_ability & IXGBE_MDIO_PHY_SPEED_10G)
+ hw->phy.speeds_supported |= IXGBE_LINK_SPEED_10GB_FULL;
+ if (speed_ability & IXGBE_MDIO_PHY_SPEED_1G)
+ hw->phy.speeds_supported |= IXGBE_LINK_SPEED_1GB_FULL;
+ if (speed_ability & IXGBE_MDIO_PHY_SPEED_100M)
+ hw->phy.speeds_supported |= IXGBE_LINK_SPEED_100_FULL;
+
+ switch (hw->mac.type) {
+ case ixgbe_mac_X550:
+ hw->phy.speeds_supported |= IXGBE_LINK_SPEED_2_5GB_FULL;
+ hw->phy.speeds_supported |= IXGBE_LINK_SPEED_5GB_FULL;
+ break;
+ case ixgbe_mac_X550EM_x:
+ hw->phy.speeds_supported &= ~IXGBE_LINK_SPEED_100_FULL;
+ break;
+ default:
+ break;
}
- /* Internal PHY does not support 100 Mbps */
- if (hw->mac.type == ixgbe_mac_X550EM_x)
- *speed &= ~IXGBE_LINK_SPEED_100_FULL;
+ return status;
+}
+
+/**
+ * ixgbe_get_copper_link_capabilities_generic - Determines link capabilities
+ * @hw: pointer to hardware structure
+ * @speed: pointer to link speed
+ * @autoneg: boolean auto-negotiation value
+ **/
+s32 ixgbe_get_copper_link_capabilities_generic(struct ixgbe_hw *hw,
+ ixgbe_link_speed *speed,
+ bool *autoneg)
+{
+ s32 status = IXGBE_SUCCESS;
+
+ DEBUGFUNC("ixgbe_get_copper_link_capabilities_generic");
- if (hw->mac.type == ixgbe_mac_X550) {
- *speed |= IXGBE_LINK_SPEED_2_5GB_FULL;
- *speed |= IXGBE_LINK_SPEED_5GB_FULL;
- }
+ *autoneg = TRUE;
+ if (!hw->phy.speeds_supported)
+ status = ixgbe_get_copper_speeds_supported(hw);
+ *speed = hw->phy.speeds_supported;
return status;
}
@@ -2716,6 +2734,9 @@ s32 ixgbe_set_copper_phy_power(struct ixgbe_hw *hw, bool on)
u32 status;
u16 reg;
+ if (!on && ixgbe_mng_present(hw))
+ return 0;
+
status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_VENDOR_SPECIFIC_1_CONTROL,
IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
&reg);
diff --git a/sys/dev/ixgbe/ixgbe_phy.h b/sys/dev/ixgbe/ixgbe_phy.h
index fad31bd..3713e28 100644
--- a/sys/dev/ixgbe/ixgbe_phy.h
+++ b/sys/dev/ixgbe/ixgbe_phy.h
@@ -92,16 +92,23 @@
#define IXGBE_CS4227_GLOBAL_ID_LSB 0
#define IXGBE_CS4227_SCRATCH 2
#define IXGBE_CS4227_GLOBAL_ID_VALUE 0x03E5
-#define IXGBE_CS4227_SCRATCH_VALUE 0x5aa5
-#define IXGBE_CS4227_RETRIES 5
+#define IXGBE_CS4227_RESET_PENDING 0x1357
+#define IXGBE_CS4227_RESET_COMPLETE 0x5AA5
+#define IXGBE_CS4227_RETRIES 15
+#define IXGBE_CS4227_EFUSE_STATUS 0x0181
#define IXGBE_CS4227_LINE_SPARE22_MSB 0x12AD /* Reg to program speed */
#define IXGBE_CS4227_LINE_SPARE24_LSB 0x12B0 /* Reg to program EDC */
#define IXGBE_CS4227_HOST_SPARE22_MSB 0x1AAD /* Reg to program speed */
#define IXGBE_CS4227_HOST_SPARE24_LSB 0x1AB0 /* Reg to program EDC */
+#define IXGBE_CS4227_EEPROM_STATUS 0x5001
+#define IXGBE_CS4227_EEPROM_LOAD_OK 0x0001
+#define IXGBE_CS4227_SPEED_1G 0x8000
+#define IXGBE_CS4227_SPEED_10G 0
#define IXGBE_CS4227_EDC_MODE_CX1 0x0002
#define IXGBE_CS4227_EDC_MODE_SR 0x0004
+#define IXGBE_CS4227_EDC_MODE_DIAG 0x0008
#define IXGBE_CS4227_RESET_HOLD 500 /* microseconds */
-#define IXGBE_CS4227_RESET_DELAY 500 /* milliseconds */
+#define IXGBE_CS4227_RESET_DELAY 450 /* milliseconds */
#define IXGBE_CS4227_CHECK_DELAY 30 /* milliseconds */
#define IXGBE_PE 0xE0 /* Port expander address */
#define IXGBE_PE_OUTPUT 1 /* Output register offset */
diff --git a/sys/dev/ixgbe/ixgbe_type.h b/sys/dev/ixgbe/ixgbe_type.h
index 2a53952..da03f79 100644
--- a/sys/dev/ixgbe/ixgbe_type.h
+++ b/sys/dev/ixgbe/ixgbe_type.h
@@ -131,6 +131,7 @@
#define IXGBE_DEV_ID_X540_BYPASS 0x155C
#define IXGBE_DEV_ID_X540T1 0x1560
#define IXGBE_DEV_ID_X550T 0x1563
+#define IXGBE_DEV_ID_X550T1 0x15D1
#define IXGBE_DEV_ID_X550EM_X_KX4 0x15AA
#define IXGBE_DEV_ID_X550EM_X_KR 0x15AB
#define IXGBE_DEV_ID_X550EM_X_SFP 0x15AC
@@ -1478,7 +1479,10 @@ struct ixgbe_dmac_config {
#define IXGBE_MDIO_GLOBAL_INT_CHIP_VEN_MASK 0xFF01 /* int chip-wide mask */
#define IXGBE_MDIO_GLOBAL_INT_CHIP_VEN_FLAG 0xFC01 /* int chip-wide mask */
#define IXGBE_MDIO_GLOBAL_ALARM_1 0xCC00 /* Global alarm 1 */
+#define IXGBE_MDIO_GLOBAL_ALM_1_DEV_FAULT 0x0010 /* device fault */
#define IXGBE_MDIO_GLOBAL_ALM_1_HI_TMP_FAIL 0x4000 /* high temp failure */
+#define IXGBE_MDIO_GLOBAL_FAULT_MSG 0xC850 /* Global Fault Message */
+#define IXGBE_MDIO_GLOBAL_FAULT_MSG_HI_TMP 0x8007 /* high temp failure */
#define IXGBE_MDIO_GLOBAL_INT_MASK 0xD400 /* Global int mask */
#define IXGBE_MDIO_GLOBAL_AN_VEN_ALM_INT_EN 0x1000 /* autoneg vendor alarm int enable */
#define IXGBE_MDIO_GLOBAL_ALARM_1_INT 0x4 /* int in Global alarm 1 */
@@ -1544,7 +1548,9 @@ struct ixgbe_dmac_config {
#define TN1010_PHY_ID 0x00A19410
#define TNX_FW_REV 0xB
#define X540_PHY_ID 0x01540200
-#define X550_PHY_ID 0x01540220
+#define X550_PHY_ID1 0x01540220
+#define X550_PHY_ID2 0x01540223
+#define X550_PHY_ID3 0x01540221
#define X557_PHY_ID 0x01540240
#define AQ_FW_REV 0x20
#define QT2022_PHY_ID 0x0043A400
@@ -1937,6 +1943,7 @@ enum {
* FIP (0x8914): Filter 4
* LLDP (0x88CC): Filter 5
* LACP (0x8809): Filter 6
+ * FC (0x8808): Filter 7
*/
#define IXGBE_ETQF_FILTER_EAPOL 0
#define IXGBE_ETQF_FILTER_FCOE 2
@@ -1944,6 +1951,7 @@ enum {
#define IXGBE_ETQF_FILTER_FIP 4
#define IXGBE_ETQF_FILTER_LLDP 5
#define IXGBE_ETQF_FILTER_LACP 6
+#define IXGBE_ETQF_FILTER_FC 7
/* VLAN Control Bit Masks */
#define IXGBE_VLNCTRL_VET 0x0000FFFF /* bits 0-15 */
#define IXGBE_VLNCTRL_CFI 0x10000000 /* bit 28 */
@@ -2803,7 +2811,9 @@ enum ixgbe_fdir_pballoc_type {
#define IXGBE_FDIRCTRL_REPORT_STATUS 0x00000020
#define IXGBE_FDIRCTRL_REPORT_STATUS_ALWAYS 0x00000080
#define IXGBE_FDIRCTRL_DROP_Q_SHIFT 8
+#define IXGBE_FDIRCTRL_DROP_Q_MASK 0x00007F00
#define IXGBE_FDIRCTRL_FLEX_SHIFT 16
+#define IXGBE_FDIRCTRL_DROP_NO_MATCH 0x00008000
#define IXGBE_FDIRCTRL_FILTERMODE_SHIFT 21
#define IXGBE_FDIRCTRL_FILTERMODE_MACVLAN 0x0001 /* bit 23:21, 001b */
#define IXGBE_FDIRCTRL_FILTERMODE_CLOUD 0x0002 /* bit 23:21, 010b */
@@ -2907,6 +2917,11 @@ enum ixgbe_fdir_pballoc_type {
#define FW_DISABLE_RXEN_CMD 0xDE
#define FW_DISABLE_RXEN_LEN 0x1
#define FW_PHY_MGMT_REQ_CMD 0x20
+#define FW_INT_PHY_REQ_CMD 0xB
+#define FW_INT_PHY_REQ_LEN 10
+#define FW_INT_PHY_REQ_READ 0
+#define FW_INT_PHY_REQ_WRITE 1
+
/* Host Interface Command Structures */
struct ixgbe_hic_hdr {
@@ -2975,6 +2990,21 @@ struct ixgbe_hic_disable_rxen {
u16 pad3;
};
+struct ixgbe_hic_internal_phy_req {
+ struct ixgbe_hic_hdr hdr;
+ u8 port_number;
+ u8 command_type;
+ u16 address;
+ u16 rsv1;
+ u32 write_data;
+ u16 pad;
+};
+
+struct ixgbe_hic_internal_phy_resp {
+ struct ixgbe_hic_hdr hdr;
+ u32 read_data;
+};
+
/* Transmit Descriptor - Legacy */
struct ixgbe_legacy_tx_desc {
@@ -3310,6 +3340,7 @@ union ixgbe_atr_hash_dword {
IXGBE_CAT(SRAMREL, m), \
IXGBE_CAT(FACTPS, m), \
IXGBE_CAT(SWSM, m), \
+ IXGBE_CAT(SWFW_SYNC, m), \
IXGBE_CAT(FWSM, m), \
IXGBE_CAT(SDP0_GPIEN, m), \
IXGBE_CAT(SDP1_GPIEN, m), \
@@ -3792,6 +3823,7 @@ struct ixgbe_mac_info {
u8 flags;
struct ixgbe_dmac_config dmac_config;
bool set_lben;
+ u32 max_link_up_time;
};
struct ixgbe_phy_info {
@@ -3806,6 +3838,7 @@ struct ixgbe_phy_info {
u32 phy_semaphore_mask;
bool reset_disable;
ixgbe_autoneg_advertised autoneg_advertised;
+ ixgbe_link_speed speeds_supported;
enum ixgbe_smart_speed smart_speed;
bool smart_speed_active;
bool multispeed_fiber;
@@ -3918,15 +3951,15 @@ struct ixgbe_hw {
#define IXGBE_FUSES0_300MHZ (1 << 5)
#define IXGBE_FUSES0_REV1 (1 << 6)
-#define IXGBE_KRM_PORT_CAR_GEN_CTRL(P) ((P == 0) ? (0x4010) : (0x8010))
-#define IXGBE_KRM_LINK_CTRL_1(P) ((P == 0) ? (0x420C) : (0x820C))
-#define IXGBE_KRM_AN_CNTL_1(P) ((P == 0) ? (0x422C) : (0x822C))
-#define IXGBE_KRM_DSP_TXFFE_STATE_4(P) ((P == 0) ? (0x4634) : (0x8634))
-#define IXGBE_KRM_DSP_TXFFE_STATE_5(P) ((P == 0) ? (0x4638) : (0x8638))
-#define IXGBE_KRM_RX_TRN_LINKUP_CTRL(P) ((P == 0) ? (0x4B00) : (0x8B00))
-#define IXGBE_KRM_PMD_DFX_BURNIN(P) ((P == 0) ? (0x4E00) : (0x8E00))
-#define IXGBE_KRM_TX_COEFF_CTRL_1(P) ((P == 0) ? (0x5520) : (0x9520))
-#define IXGBE_KRM_RX_ANA_CTL(P) ((P == 0) ? (0x5A00) : (0x9A00))
+#define IXGBE_KRM_PORT_CAR_GEN_CTRL(P) ((P) ? 0x8010 : 0x4010)
+#define IXGBE_KRM_LINK_CTRL_1(P) ((P) ? 0x820C : 0x420C)
+#define IXGBE_KRM_AN_CNTL_1(P) ((P) ? 0x822C : 0x422C)
+#define IXGBE_KRM_DSP_TXFFE_STATE_4(P) ((P) ? 0x8634 : 0x4634)
+#define IXGBE_KRM_DSP_TXFFE_STATE_5(P) ((P) ? 0x8638 : 0x4638)
+#define IXGBE_KRM_RX_TRN_LINKUP_CTRL(P) ((P) ? 0x8B00 : 0x4B00)
+#define IXGBE_KRM_PMD_DFX_BURNIN(P) ((P) ? 0x8E00 : 0x4E00)
+#define IXGBE_KRM_TX_COEFF_CTRL_1(P) ((P) ? 0x9520 : 0x5520)
+#define IXGBE_KRM_RX_ANA_CTL(P) ((P) ? 0x9A00 : 0x5A00)
#define IXGBE_KRM_PORT_CAR_GEN_CTRL_NELB_32B (1 << 9)
#define IXGBE_KRM_PORT_CAR_GEN_CTRL_NELB_KRPCS (1 << 11)
@@ -3960,15 +3993,6 @@ struct ixgbe_hw {
#define IXGBE_KRM_TX_COEFF_CTRL_1_CZERO_EN (1 << 3)
#define IXGBE_KRM_TX_COEFF_CTRL_1_OVRRD_EN (1 << 31)
-#define IXGBE_KX4_LINK_CNTL_1 0x4C
-#define IXGBE_KX4_LINK_CNTL_1_TETH_AN_CAP_KX (1 << 16)
-#define IXGBE_KX4_LINK_CNTL_1_TETH_AN_CAP_KX4 (1 << 17)
-#define IXGBE_KX4_LINK_CNTL_1_TETH_EEE_CAP_KX (1 << 24)
-#define IXGBE_KX4_LINK_CNTL_1_TETH_EEE_CAP_KX4 (1 << 25)
-#define IXGBE_KX4_LINK_CNTL_1_TETH_AN_ENABLE (1 << 29)
-#define IXGBE_KX4_LINK_CNTL_1_TETH_FORCE_LINK_UP (1 << 30)
-#define IXGBE_KX4_LINK_CNTL_1_TETH_AN_RESTART (1 << 31)
-
#define IXGBE_SB_IOSF_INDIRECT_CTRL 0x00011144
#define IXGBE_SB_IOSF_INDIRECT_DATA 0x00011148
@@ -3985,8 +4009,6 @@ struct ixgbe_hw {
#define IXGBE_SB_IOSF_CTRL_BUSY_SHIFT 31
#define IXGBE_SB_IOSF_CTRL_BUSY (1 << IXGBE_SB_IOSF_CTRL_BUSY_SHIFT)
#define IXGBE_SB_IOSF_TARGET_KR_PHY 0
-#define IXGBE_SB_IOSF_TARGET_KX4_PHY 1
-#define IXGBE_SB_IOSF_TARGET_KX4_PCS 2
#define IXGBE_NW_MNG_IF_SEL 0x00011178
#define IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE (1 << 24)
diff --git a/sys/dev/ixgbe/ixgbe_vf.c b/sys/dev/ixgbe/ixgbe_vf.c
index a00b8be..2ce4d32 100644
--- a/sys/dev/ixgbe/ixgbe_vf.c
+++ b/sys/dev/ixgbe/ixgbe_vf.c
@@ -225,8 +225,6 @@ s32 ixgbe_reset_hw_vf(struct ixgbe_hw *hw)
if (ret_val)
return ret_val;
- msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
-
if (msgbuf[0] != (IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_ACK) &&
msgbuf[0] != (IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_NACK))
return IXGBE_ERR_INVALID_MAC_ADDR;
diff --git a/sys/dev/ixgbe/ixgbe_x540.c b/sys/dev/ixgbe/ixgbe_x540.c
index ddf0674..fd52ebe 100644
--- a/sys/dev/ixgbe/ixgbe_x540.c
+++ b/sys/dev/ixgbe/ixgbe_x540.c
@@ -82,8 +82,7 @@ s32 ixgbe_init_ops_X540(struct ixgbe_hw *hw)
/* PHY */
phy->ops.init = ixgbe_init_phy_ops_generic;
phy->ops.reset = NULL;
- if (!ixgbe_mng_present(hw))
- phy->ops.set_phy_power = ixgbe_set_copper_phy_power;
+ phy->ops.set_phy_power = ixgbe_set_copper_phy_power;
/* MAC */
mac->ops.reset_hw = ixgbe_reset_hw_X540;
@@ -138,8 +137,8 @@ s32 ixgbe_init_ops_X540(struct ixgbe_hw *hw)
* ARC supported; valid only if manageability features are
* enabled.
*/
- mac->arc_subsystem_valid = (IXGBE_READ_REG(hw, IXGBE_FWSM) &
- IXGBE_FWSM_MODE_MASK) ? TRUE : FALSE;
+ mac->arc_subsystem_valid = !!(IXGBE_READ_REG(hw, IXGBE_FWSM_BY_MAC(hw))
+ & IXGBE_FWSM_MODE_MASK);
hw->mbx.ops.init_params = ixgbe_init_mbx_params_pf;
@@ -356,7 +355,7 @@ s32 ixgbe_init_eeprom_params_X540(struct ixgbe_hw *hw)
eeprom->semaphore_delay = 10;
eeprom->type = ixgbe_flash;
- eec = IXGBE_READ_REG(hw, IXGBE_EEC);
+ eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw));
eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >>
IXGBE_EEC_SIZE_SHIFT);
eeprom->word_size = 1 << (eeprom_size +
@@ -681,8 +680,8 @@ s32 ixgbe_update_flash_X540(struct ixgbe_hw *hw)
goto out;
}
- flup = IXGBE_READ_REG(hw, IXGBE_EEC) | IXGBE_EEC_FLUP;
- IXGBE_WRITE_REG(hw, IXGBE_EEC, flup);
+ flup = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw)) | IXGBE_EEC_FLUP;
+ IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), flup);
status = ixgbe_poll_flash_update_done_X540(hw);
if (status == IXGBE_SUCCESS)
@@ -691,11 +690,11 @@ s32 ixgbe_update_flash_X540(struct ixgbe_hw *hw)
DEBUGOUT("Flash update time out\n");
if (hw->mac.type == ixgbe_mac_X540 && hw->revision_id == 0) {
- flup = IXGBE_READ_REG(hw, IXGBE_EEC);
+ flup = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw));
if (flup & IXGBE_EEC_SEC1VAL) {
flup |= IXGBE_EEC_FLUP;
- IXGBE_WRITE_REG(hw, IXGBE_EEC, flup);
+ IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), flup);
}
status = ixgbe_poll_flash_update_done_X540(hw);
@@ -724,7 +723,7 @@ static s32 ixgbe_poll_flash_update_done_X540(struct ixgbe_hw *hw)
DEBUGFUNC("ixgbe_poll_flash_update_done_X540");
for (i = 0; i < IXGBE_FLUDONE_ATTEMPTS; i++) {
- reg = IXGBE_READ_REG(hw, IXGBE_EEC);
+ reg = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw));
if (reg & IXGBE_EEC_FLUDONE) {
status = IXGBE_SUCCESS;
break;
@@ -775,10 +774,11 @@ s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask)
if (ixgbe_get_swfw_sync_semaphore(hw))
return IXGBE_ERR_SWFW_SYNC;
- swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC);
+ swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC_BY_MAC(hw));
if (!(swfw_sync & (fwmask | swmask | hwmask))) {
swfw_sync |= swmask;
- IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC, swfw_sync);
+ IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC_BY_MAC(hw),
+ swfw_sync);
ixgbe_release_swfw_sync_semaphore(hw);
msec_delay(5);
return IXGBE_SUCCESS;
@@ -805,10 +805,10 @@ s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask)
*/
if (ixgbe_get_swfw_sync_semaphore(hw))
return IXGBE_ERR_SWFW_SYNC;
- swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC);
+ swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC_BY_MAC(hw));
if (swfw_sync & (fwmask | hwmask)) {
swfw_sync |= swmask;
- IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC, swfw_sync);
+ IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC_BY_MAC(hw), swfw_sync);
ixgbe_release_swfw_sync_semaphore(hw);
msec_delay(5);
return IXGBE_SUCCESS;
@@ -852,9 +852,9 @@ void ixgbe_release_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask)
swmask |= mask & IXGBE_GSSR_I2C_MASK;
ixgbe_get_swfw_sync_semaphore(hw);
- swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC);
+ swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC_BY_MAC(hw));
swfw_sync &= ~swmask;
- IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC, swfw_sync);
+ IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC_BY_MAC(hw), swfw_sync);
ixgbe_release_swfw_sync_semaphore(hw);
msec_delay(5);
@@ -881,7 +881,7 @@ static s32 ixgbe_get_swfw_sync_semaphore(struct ixgbe_hw *hw)
* If the SMBI bit is 0 when we read it, then the bit will be
* set and we have the semaphore
*/
- swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
+ swsm = IXGBE_READ_REG(hw, IXGBE_SWSM_BY_MAC(hw));
if (!(swsm & IXGBE_SWSM_SMBI)) {
status = IXGBE_SUCCESS;
break;
@@ -892,7 +892,7 @@ static s32 ixgbe_get_swfw_sync_semaphore(struct ixgbe_hw *hw)
/* Now get the semaphore between SW/FW through the REGSMP bit */
if (status == IXGBE_SUCCESS) {
for (i = 0; i < timeout; i++) {
- swsm = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC);
+ swsm = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC_BY_MAC(hw));
if (!(swsm & IXGBE_SWFW_REGSMP))
break;
@@ -932,13 +932,13 @@ static void ixgbe_release_swfw_sync_semaphore(struct ixgbe_hw *hw)
/* Release both semaphores by writing 0 to the bits REGSMP and SMBI */
- swsm = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC);
+ swsm = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC_BY_MAC(hw));
swsm &= ~IXGBE_SWFW_REGSMP;
- IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC, swsm);
+ IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC_BY_MAC(hw), swsm);
- swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
+ swsm = IXGBE_READ_REG(hw, IXGBE_SWSM_BY_MAC(hw));
swsm &= ~IXGBE_SWSM_SMBI;
- IXGBE_WRITE_REG(hw, IXGBE_SWSM, swsm);
+ IXGBE_WRITE_REG(hw, IXGBE_SWSM_BY_MAC(hw), swsm);
IXGBE_WRITE_FLUSH(hw);
}
diff --git a/sys/dev/ixgbe/ixgbe_x550.c b/sys/dev/ixgbe/ixgbe_x550.c
index 65daa17..1199d38 100644
--- a/sys/dev/ixgbe/ixgbe_x550.c
+++ b/sys/dev/ixgbe/ixgbe_x550.c
@@ -115,119 +115,6 @@ static s32 ixgbe_write_cs4227(struct ixgbe_hw *hw, u16 reg, u16 value)
}
/**
- * ixgbe_get_cs4227_status - Return CS4227 status
- * @hw: pointer to hardware structure
- *
- * Returns error if CS4227 not successfully initialized
- **/
-static s32 ixgbe_get_cs4227_status(struct ixgbe_hw *hw)
-{
- s32 status;
- u16 value = 0;
- u16 reg_slice, reg_val;
- u8 retry;
-
- for (retry = 0; retry < IXGBE_CS4227_RETRIES; ++retry) {
- status = ixgbe_read_cs4227(hw, IXGBE_CS4227_GLOBAL_ID_LSB,
- &value);
- if (status != IXGBE_SUCCESS)
- return status;
- if (value == IXGBE_CS4227_GLOBAL_ID_VALUE)
- break;
- msec_delay(IXGBE_CS4227_CHECK_DELAY);
- }
- if (value != IXGBE_CS4227_GLOBAL_ID_VALUE)
- return IXGBE_ERR_PHY;
-
- status = ixgbe_read_cs4227(hw, IXGBE_CS4227_SCRATCH, &value);
- if (status != IXGBE_SUCCESS)
- return status;
-
- /* If this is the first time after power-on, check the ucode.
- * Otherwise, this will disrupt link on all ports. Because we
- * can only do this the first time, we must check all ports,
- * not just our own.
- */
- if (value != IXGBE_CS4227_SCRATCH_VALUE) {
- reg_slice = IXGBE_CS4227_LINE_SPARE24_LSB;
- reg_val = (IXGBE_CS4227_EDC_MODE_CX1 << 1) | 0x1;
- status = ixgbe_write_cs4227(hw, reg_slice,
- reg_val);
- if (status != IXGBE_SUCCESS)
- return status;
-
- reg_slice = IXGBE_CS4227_HOST_SPARE24_LSB;
- reg_val = (IXGBE_CS4227_EDC_MODE_CX1 << 1) | 0x1;
- status = ixgbe_write_cs4227(hw, reg_slice,
- reg_val);
- if (status != IXGBE_SUCCESS)
- return status;
-
- reg_slice = IXGBE_CS4227_LINE_SPARE24_LSB + (1 << 12);
- reg_val = (IXGBE_CS4227_EDC_MODE_SR << 1) | 0x1;
- status = ixgbe_write_cs4227(hw, reg_slice,
- reg_val);
- if (status != IXGBE_SUCCESS)
- return status;
-
- reg_slice = IXGBE_CS4227_HOST_SPARE24_LSB + (1 << 12);
- reg_val = (IXGBE_CS4227_EDC_MODE_SR << 1) | 0x1;
- status = ixgbe_write_cs4227(hw, reg_slice,
- reg_val);
- if (status != IXGBE_SUCCESS)
- return status;
-
- msec_delay(10);
- }
-
- /* Verify that the ucode is operational on all ports. */
- reg_slice = IXGBE_CS4227_LINE_SPARE24_LSB;
- reg_val = 0xFFFF;
- status = ixgbe_read_cs4227(hw, reg_slice, &reg_val);
- if (status != IXGBE_SUCCESS)
- return status;
- if (reg_val != 0)
- return IXGBE_ERR_PHY;
-
- reg_slice = IXGBE_CS4227_HOST_SPARE24_LSB;
- reg_val = 0xFFFF;
- status = ixgbe_read_cs4227(hw, reg_slice, &reg_val);
- if (status != IXGBE_SUCCESS)
- return status;
- if (reg_val != 0)
- return IXGBE_ERR_PHY;
-
- reg_slice = IXGBE_CS4227_LINE_SPARE24_LSB + (1 << 12);
- reg_val = 0xFFFF;
- status = ixgbe_read_cs4227(hw, reg_slice, &reg_val);
- if (status != IXGBE_SUCCESS)
- return status;
- if (reg_val != 0)
- return IXGBE_ERR_PHY;
-
- reg_slice = IXGBE_CS4227_HOST_SPARE24_LSB + (1 << 12);
- reg_val = 0xFFFF;
- status = ixgbe_read_cs4227(hw, reg_slice, &reg_val);
- if (status != IXGBE_SUCCESS)
- return status;
- if (reg_val != 0)
- return IXGBE_ERR_PHY;
-
- /* Set scratch for next time. */
- status = ixgbe_write_cs4227(hw, IXGBE_CS4227_SCRATCH,
- IXGBE_CS4227_SCRATCH_VALUE);
- if (status != IXGBE_SUCCESS)
- return status;
- status = ixgbe_read_cs4227(hw, IXGBE_CS4227_SCRATCH, &value);
- if (status != IXGBE_SUCCESS)
- return status;
- if (value != IXGBE_CS4227_SCRATCH_VALUE)
- return IXGBE_ERR_PHY;
-
- return IXGBE_SUCCESS;
-}
-
-/**
* ixgbe_read_pe - Read register from port expander
* @hw: pointer to hardware structure
* @reg: register number to read
@@ -269,13 +156,17 @@ static s32 ixgbe_write_pe(struct ixgbe_hw *hw, u8 reg, u8 value)
* ixgbe_reset_cs4227 - Reset CS4227 using port expander
* @hw: pointer to hardware structure
*
+ * This function assumes that the caller has acquired the proper semaphore.
* Returns error code
**/
static s32 ixgbe_reset_cs4227(struct ixgbe_hw *hw)
{
s32 status;
+ u32 retry;
+ u16 value;
u8 reg;
+ /* Trigger hard reset. */
status = ixgbe_read_pe(hw, IXGBE_PE_OUTPUT, &reg);
if (status != IXGBE_SUCCESS)
return status;
@@ -310,7 +201,29 @@ static s32 ixgbe_reset_cs4227(struct ixgbe_hw *hw)
if (status != IXGBE_SUCCESS)
return status;
+ /* Wait for the reset to complete. */
msec_delay(IXGBE_CS4227_RESET_DELAY);
+ for (retry = 0; retry < IXGBE_CS4227_RETRIES; retry++) {
+ status = ixgbe_read_cs4227(hw, IXGBE_CS4227_EFUSE_STATUS,
+ &value);
+ if (status == IXGBE_SUCCESS &&
+ value == IXGBE_CS4227_EEPROM_LOAD_OK)
+ break;
+ msec_delay(IXGBE_CS4227_CHECK_DELAY);
+ }
+ if (retry == IXGBE_CS4227_RETRIES) {
+ ERROR_REPORT1(IXGBE_ERROR_INVALID_STATE,
+ "CS4227 reset did not complete.");
+ return IXGBE_ERR_PHY;
+ }
+
+ status = ixgbe_read_cs4227(hw, IXGBE_CS4227_EEPROM_STATUS, &value);
+ if (status != IXGBE_SUCCESS ||
+ !(value & IXGBE_CS4227_EEPROM_LOAD_OK)) {
+ ERROR_REPORT1(IXGBE_ERROR_INVALID_STATE,
+ "CS4227 EEPROM did not load successfully.");
+ return IXGBE_ERR_PHY;
+ }
return IXGBE_SUCCESS;
}
@@ -321,29 +234,75 @@ static s32 ixgbe_reset_cs4227(struct ixgbe_hw *hw)
**/
static void ixgbe_check_cs4227(struct ixgbe_hw *hw)
{
+ s32 status = IXGBE_SUCCESS;
u32 swfw_mask = hw->phy.phy_semaphore_mask;
- s32 status;
+ u16 value = 0;
u8 retry;
for (retry = 0; retry < IXGBE_CS4227_RETRIES; retry++) {
status = hw->mac.ops.acquire_swfw_sync(hw, swfw_mask);
if (status != IXGBE_SUCCESS) {
ERROR_REPORT2(IXGBE_ERROR_CAUTION,
- "semaphore failed with %d\n", status);
- return;
+ "semaphore failed with %d", status);
+ msec_delay(IXGBE_CS4227_CHECK_DELAY);
+ continue;
}
- status = ixgbe_get_cs4227_status(hw);
- if (status == IXGBE_SUCCESS) {
- hw->mac.ops.release_swfw_sync(hw, swfw_mask);
- msec_delay(hw->eeprom.semaphore_delay);
+
+ /* Get status of reset flow. */
+ status = ixgbe_read_cs4227(hw, IXGBE_CS4227_SCRATCH, &value);
+
+ if (status == IXGBE_SUCCESS &&
+ value == IXGBE_CS4227_RESET_COMPLETE)
+ goto out;
+
+ if (status != IXGBE_SUCCESS ||
+ value != IXGBE_CS4227_RESET_PENDING)
+ break;
+
+ /* Reset is pending. Wait and check again. */
+ hw->mac.ops.release_swfw_sync(hw, swfw_mask);
+ msec_delay(IXGBE_CS4227_CHECK_DELAY);
+ }
+
+ /* If still pending, assume other instance failed. */
+ if (retry == IXGBE_CS4227_RETRIES) {
+ status = hw->mac.ops.acquire_swfw_sync(hw, swfw_mask);
+ if (status != IXGBE_SUCCESS) {
+ ERROR_REPORT2(IXGBE_ERROR_CAUTION,
+ "semaphore failed with %d", status);
return;
}
- ixgbe_reset_cs4227(hw);
- hw->mac.ops.release_swfw_sync(hw, swfw_mask);
- msec_delay(hw->eeprom.semaphore_delay);
}
- ERROR_REPORT2(IXGBE_ERROR_CAUTION,
- "Unable to initialize CS4227, err=%d\n", status);
+
+ /* Reset the CS4227. */
+ status = ixgbe_reset_cs4227(hw);
+ if (status != IXGBE_SUCCESS) {
+ ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
+ "CS4227 reset failed: %d", status);
+ goto out;
+ }
+
+ /* Reset takes so long, temporarily release semaphore in case the
+ * other driver instance is waiting for the reset indication.
+ */
+ ixgbe_write_cs4227(hw, IXGBE_CS4227_SCRATCH,
+ IXGBE_CS4227_RESET_PENDING);
+ hw->mac.ops.release_swfw_sync(hw, swfw_mask);
+ msec_delay(10);
+ status = hw->mac.ops.acquire_swfw_sync(hw, swfw_mask);
+ if (status != IXGBE_SUCCESS) {
+ ERROR_REPORT2(IXGBE_ERROR_CAUTION,
+ "semaphore failed with %d", status);
+ return;
+ }
+
+ /* Record completion for next time. */
+ status = ixgbe_write_cs4227(hw, IXGBE_CS4227_SCRATCH,
+ IXGBE_CS4227_RESET_COMPLETE);
+
+out:
+ hw->mac.ops.release_swfw_sync(hw, swfw_mask);
+ msec_delay(hw->eeprom.semaphore_delay);
}
/**
@@ -452,8 +411,11 @@ s32 ixgbe_init_ops_X550EM(struct ixgbe_hw *hw)
hw->bus.type = ixgbe_bus_type_internal;
mac->ops.get_bus_info = ixgbe_get_bus_info_X550em;
- mac->ops.read_iosf_sb_reg = ixgbe_read_iosf_sb_reg_x550;
- mac->ops.write_iosf_sb_reg = ixgbe_write_iosf_sb_reg_x550;
+ if (hw->mac.type == ixgbe_mac_X550EM_x) {
+ mac->ops.read_iosf_sb_reg = ixgbe_read_iosf_sb_reg_x550;
+ mac->ops.write_iosf_sb_reg = ixgbe_write_iosf_sb_reg_x550;
+ }
+
mac->ops.get_media_type = ixgbe_get_media_type_X550em;
mac->ops.setup_sfp = ixgbe_setup_sfp_modules_X550em;
mac->ops.get_link_capabilities = ixgbe_get_link_capabilities_X550em;
@@ -679,7 +641,7 @@ s32 ixgbe_setup_eee_X550(struct ixgbe_hw *hw, bool enable_eee)
if (enable_eee) {
eeer |= (IXGBE_EEER_TX_LPI_EN | IXGBE_EEER_RX_LPI_EN);
- if (hw->device_id == IXGBE_DEV_ID_X550T) {
+ if (hw->mac.type == ixgbe_mac_X550) {
/* Advertise EEE capability */
hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_EEE_ADVT,
IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &autoneg_eee_reg);
@@ -717,7 +679,7 @@ s32 ixgbe_setup_eee_X550(struct ixgbe_hw *hw, bool enable_eee)
} else {
eeer &= ~(IXGBE_EEER_TX_LPI_EN | IXGBE_EEER_RX_LPI_EN);
- if (hw->device_id == IXGBE_DEV_ID_X550T) {
+ if (hw->mac.type == ixgbe_mac_X550) {
/* Disable advertised EEE capability */
hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_EEE_ADVT,
IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &autoneg_eee_reg);
@@ -816,7 +778,7 @@ void ixgbe_set_ethertype_anti_spoofing_X550(struct ixgbe_hw *hw,
**/
static s32 ixgbe_iosf_wait(struct ixgbe_hw *hw, u32 *ctrl)
{
- u32 i, command;
+ u32 i, command = 0;
/* Check every 10 usec to see if the address cycle completed.
* The SB IOSF BUSY bit will clear when the operation is
@@ -1003,7 +965,7 @@ void ixgbe_restore_mdd_vf_X550(struct ixgbe_hw *hw, u32 vf)
num_qs = 4; /* 32 VFs / pools */
bitmask = 0x0000000F;
break;
- default: /* 64 VFs / pools */
+ default: /* 64 VFs / pools */
num_qs = 2;
bitmask = 0x00000003;
break;
@@ -1302,7 +1264,7 @@ static s32 ixgbe_get_lasi_ext_t_x550em(struct ixgbe_hw *hw, bool *lsc)
IXGBE_MDIO_GLOBAL_ALARM_1_INT)))
return status;
- /* High temperature failure alarm triggered */
+ /* Global alarm triggered */
status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_ALARM_1,
IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
&reg);
@@ -1315,6 +1277,21 @@ static s32 ixgbe_get_lasi_ext_t_x550em(struct ixgbe_hw *hw, bool *lsc)
/* power down the PHY in case the PHY FW didn't already */
ixgbe_set_copper_phy_power(hw, FALSE);
return IXGBE_ERR_OVERTEMP;
+ } else if (reg & IXGBE_MDIO_GLOBAL_ALM_1_DEV_FAULT) {
+ /* device fault alarm triggered */
+ status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_FAULT_MSG,
+ IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
+ &reg);
+
+ if (status != IXGBE_SUCCESS)
+ return status;
+
+ /* if device fault was due to high temp alarm handle and exit */
+ if (reg == IXGBE_MDIO_GLOBAL_FAULT_MSG_HI_TMP) {
+ /* power down the PHY in case the PHY FW didn't */
+ ixgbe_set_copper_phy_power(hw, FALSE);
+ return IXGBE_ERR_OVERTEMP;
+ }
}
/* Vendor alarm 2 triggered */
@@ -1444,8 +1421,6 @@ static s32 ixgbe_setup_kr_speed_x550em(struct ixgbe_hw *hw,
return status;
reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE;
- reg_val &= ~(IXGBE_KRM_LINK_CTRL_1_TETH_AN_FEC_REQ |
- IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_FEC);
reg_val &= ~(IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KR |
IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KX);
@@ -1492,14 +1467,10 @@ s32 ixgbe_init_phy_ops_X550em(struct ixgbe_hw *hw)
* to determine internal PHY mode.
*/
phy->nw_mng_if_sel = IXGBE_READ_REG(hw, IXGBE_NW_MNG_IF_SEL);
-
- /* If internal PHY mode is KR, then initialize KR link */
if (phy->nw_mng_if_sel & IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE) {
speed = IXGBE_LINK_SPEED_10GB_FULL |
IXGBE_LINK_SPEED_1GB_FULL;
- ret_val = ixgbe_setup_kr_speed_x550em(hw, speed);
}
-
phy->ops.identify_sfp = ixgbe_identify_sfp_module_X550em;
}
@@ -1516,7 +1487,7 @@ s32 ixgbe_init_phy_ops_X550em(struct ixgbe_hw *hw)
/* Set functions pointers based on phy type */
switch (hw->phy.type) {
case ixgbe_phy_x550em_kx4:
- phy->ops.setup_link = ixgbe_setup_kx4_x550em;
+ phy->ops.setup_link = NULL;
phy->ops.read_reg = ixgbe_read_phy_reg_x550em;
phy->ops.write_reg = ixgbe_write_phy_reg_x550em;
break;
@@ -1543,7 +1514,11 @@ s32 ixgbe_init_phy_ops_X550em(struct ixgbe_hw *hw)
ret_val = ixgbe_setup_kr_speed_x550em(hw, speed);
}
- phy->ops.enter_lplu = ixgbe_enter_lplu_t_x550em;
+ /* setup SW LPLU only for first revision */
+ if (!(IXGBE_FUSES0_REV1 & IXGBE_READ_REG(hw,
+ IXGBE_FUSES0_GROUP(0))))
+ phy->ops.enter_lplu = ixgbe_enter_lplu_t_x550em;
+
phy->ops.handle_lasi = ixgbe_handle_lasi_ext_t_x550em;
phy->ops.reset = ixgbe_reset_phy_t_X550em;
break;
@@ -1580,9 +1555,14 @@ s32 ixgbe_reset_hw_X550em(struct ixgbe_hw *hw)
/* flush pending Tx transactions */
ixgbe_clear_tx_pending(hw);
- /* PHY ops must be identified and initialized prior to reset */
+ if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
+ /* Config MDIO clock speed before the first MDIO PHY access */
+ hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
+ hlreg0 &= ~IXGBE_HLREG0_MDCSPD;
+ IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
+ }
- /* Identify PHY and related function pointers */
+ /* PHY ops must be identified and initialized prior to reset */
status = hw->phy.ops.init(hw);
if (status == IXGBE_ERR_SFP_NOT_SUPPORTED)
@@ -1659,13 +1639,6 @@ mac_reset_top:
hw->mac.num_rar_entries = 128;
hw->mac.ops.init_rx_addrs(hw);
- if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
- /* Config MDIO clock speed. */
- hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
- hlreg0 &= ~IXGBE_HLREG0_MDCSPD;
- IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
- }
-
if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP)
ixgbe_setup_mux_ctl(hw);
@@ -1727,43 +1700,6 @@ s32 ixgbe_setup_kr_x550em(struct ixgbe_hw *hw)
}
/**
- * ixgbe_setup_kx4_x550em - Configure the KX4 PHY.
- * @hw: pointer to hardware structure
- *
- * Configures the integrated KX4 PHY.
- **/
-s32 ixgbe_setup_kx4_x550em(struct ixgbe_hw *hw)
-{
- s32 status;
- u32 reg_val;
-
- status = ixgbe_read_iosf_sb_reg_x550(hw, IXGBE_KX4_LINK_CNTL_1,
- IXGBE_SB_IOSF_TARGET_KX4_PCS, &reg_val);
- if (status)
- return status;
-
- reg_val &= ~(IXGBE_KX4_LINK_CNTL_1_TETH_AN_CAP_KX4 |
- IXGBE_KX4_LINK_CNTL_1_TETH_AN_CAP_KX);
-
- reg_val |= IXGBE_KX4_LINK_CNTL_1_TETH_AN_ENABLE;
-
- /* Advertise 10G support. */
- if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL)
- reg_val |= IXGBE_KX4_LINK_CNTL_1_TETH_AN_CAP_KX4;
-
- /* Advertise 1G support. */
- if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL)
- reg_val |= IXGBE_KX4_LINK_CNTL_1_TETH_AN_CAP_KX;
-
- /* Restart auto-negotiation. */
- reg_val |= IXGBE_KX4_LINK_CNTL_1_TETH_AN_RESTART;
- status = ixgbe_write_iosf_sb_reg_x550(hw, IXGBE_KX4_LINK_CNTL_1,
- IXGBE_SB_IOSF_TARGET_KX4_PCS, reg_val);
-
- return status;
-}
-
-/**
* ixgbe_setup_mac_link_sfp_x550em - Setup internal/external the PHY for SFP
* @hw: pointer to hardware structure
*
@@ -1791,38 +1727,53 @@ s32 ixgbe_setup_mac_link_sfp_x550em(struct ixgbe_hw *hw,
if (ret_val != IXGBE_SUCCESS)
return ret_val;
- /* Configure CS4227 for LINE connection rate then type. */
- reg_slice = IXGBE_CS4227_LINE_SPARE22_MSB + (hw->bus.lan_id << 12);
- reg_val = (speed & IXGBE_LINK_SPEED_10GB_FULL) ? 0 : 0x8000;
- ret_val = ixgbe_write_i2c_combined(hw, IXGBE_CS4227, reg_slice,
- reg_val);
-
- reg_slice = IXGBE_CS4227_LINE_SPARE24_LSB + (hw->bus.lan_id << 12);
- if (setup_linear)
- reg_val = (IXGBE_CS4227_EDC_MODE_CX1 << 1) | 0x1;
- else
- reg_val = (IXGBE_CS4227_EDC_MODE_SR << 1) | 0x1;
- ret_val = ixgbe_write_i2c_combined(hw, IXGBE_CS4227, reg_slice,
- reg_val);
-
- /* Configure CS4227 for HOST connection rate then type. */
- reg_slice = IXGBE_CS4227_HOST_SPARE22_MSB + (hw->bus.lan_id << 12);
- reg_val = (speed & IXGBE_LINK_SPEED_10GB_FULL) ? 0 : 0x8000;
- ret_val = ixgbe_write_i2c_combined(hw, IXGBE_CS4227, reg_slice,
- reg_val);
-
- reg_slice = IXGBE_CS4227_HOST_SPARE24_LSB + (hw->bus.lan_id << 12);
- if (setup_linear)
- reg_val = (IXGBE_CS4227_EDC_MODE_CX1 << 1) | 0x1;
- else
+ if (!(hw->phy.nw_mng_if_sel & IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE)) {
+ /* Configure CS4227 LINE side to 10G SR. */
+ reg_slice = IXGBE_CS4227_LINE_SPARE22_MSB +
+ (hw->bus.lan_id << 12);
+ reg_val = IXGBE_CS4227_SPEED_10G;
+ ret_val = ixgbe_write_i2c_combined(hw, IXGBE_CS4227, reg_slice,
+ reg_val);
+
+ reg_slice = IXGBE_CS4227_LINE_SPARE24_LSB +
+ (hw->bus.lan_id << 12);
reg_val = (IXGBE_CS4227_EDC_MODE_SR << 1) | 0x1;
- ret_val = ixgbe_write_i2c_combined(hw, IXGBE_CS4227, reg_slice,
- reg_val);
+ ret_val = ixgbe_write_i2c_combined(hw, IXGBE_CS4227, reg_slice,
+ reg_val);
+
+ /* Configure CS4227 for HOST connection rate then type. */
+ reg_slice = IXGBE_CS4227_HOST_SPARE22_MSB +
+ (hw->bus.lan_id << 12);
+ reg_val = (speed & IXGBE_LINK_SPEED_10GB_FULL) ?
+ IXGBE_CS4227_SPEED_10G : IXGBE_CS4227_SPEED_1G;
+ ret_val = ixgbe_write_i2c_combined(hw, IXGBE_CS4227, reg_slice,
+ reg_val);
+
+ reg_slice = IXGBE_CS4227_HOST_SPARE24_LSB +
+ (hw->bus.lan_id << 12);
+ if (setup_linear)
+ reg_val = (IXGBE_CS4227_EDC_MODE_CX1 << 1) | 0x1;
+ else
+ reg_val = (IXGBE_CS4227_EDC_MODE_SR << 1) | 0x1;
+ ret_val = ixgbe_write_i2c_combined(hw, IXGBE_CS4227, reg_slice,
+ reg_val);
- /* If internal link mode is XFI, then setup XFI internal link. */
- if (!(hw->phy.nw_mng_if_sel & IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE))
+ /* Setup XFI internal link. */
ret_val = ixgbe_setup_ixfi_x550em(hw, &speed);
-
+ } else {
+ /* Configure internal PHY for KR/KX. */
+ ixgbe_setup_kr_speed_x550em(hw, speed);
+
+ /* Configure CS4227 LINE side to proper mode. */
+ reg_slice = IXGBE_CS4227_LINE_SPARE24_LSB +
+ (hw->bus.lan_id << 12);
+ if (setup_linear)
+ reg_val = (IXGBE_CS4227_EDC_MODE_CX1 << 1) | 0x1;
+ else
+ reg_val = (IXGBE_CS4227_EDC_MODE_SR << 1) | 0x1;
+ ret_val = ixgbe_write_i2c_combined(hw, IXGBE_CS4227, reg_slice,
+ reg_val);
+ }
return ret_val;
}
@@ -2743,6 +2694,10 @@ s32 ixgbe_enter_lplu_t_x550em(struct ixgbe_hw *hw)
u32 save_autoneg;
bool link_up;
+ /* SW LPLU not required on later HW revisions. */
+ if (IXGBE_FUSES0_REV1 & IXGBE_READ_REG(hw, IXGBE_FUSES0_GROUP(0)))
+ return IXGBE_SUCCESS;
+
/* If blocked by MNG FW, then don't restart AN */
if (ixgbe_check_reset_blocked(hw))
return IXGBE_SUCCESS;
@@ -2924,7 +2879,7 @@ s32 ixgbe_setup_fc_X550em(struct ixgbe_hw *hw)
goto out;
}
- if (hw->phy.media_type == ixgbe_media_type_backplane) {
+ if (hw->device_id == IXGBE_DEV_ID_X550EM_X_KR) {
ret_val = ixgbe_read_iosf_sb_reg_x550(hw,
IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id),
IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
@@ -2940,9 +2895,8 @@ s32 ixgbe_setup_fc_X550em(struct ixgbe_hw *hw)
IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id),
IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
- /* Not all devices fully support AN. */
- if (hw->device_id == IXGBE_DEV_ID_X550EM_X_KR)
- hw->fc.disable_fc_autoneg = TRUE;
+ /* This device does not fully support AN. */
+ hw->fc.disable_fc_autoneg = TRUE;
}
out:
diff --git a/sys/dev/ixgbe/ixgbe_x550.h b/sys/dev/ixgbe/ixgbe_x550.h
index 8a544ec..b590bb7 100644
--- a/sys/dev/ixgbe/ixgbe_x550.h
+++ b/sys/dev/ixgbe/ixgbe_x550.h
@@ -69,7 +69,7 @@ void ixgbe_set_ethertype_anti_spoofing_X550(struct ixgbe_hw *hw,
s32 ixgbe_write_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr,
u32 device_type, u32 data);
s32 ixgbe_read_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr,
- u32 device_type, u32 *data);
+ u32 device_type, u32 *data);
void ixgbe_disable_mdd_X550(struct ixgbe_hw *hw);
void ixgbe_enable_mdd_X550(struct ixgbe_hw *hw);
void ixgbe_mdd_event_X550(struct ixgbe_hw *hw, u32 *vf_bitmap);
@@ -82,7 +82,6 @@ void ixgbe_init_mac_link_ops_X550em(struct ixgbe_hw *hw);
s32 ixgbe_reset_hw_X550em(struct ixgbe_hw *hw);
s32 ixgbe_init_phy_ops_X550em(struct ixgbe_hw *hw);
s32 ixgbe_setup_kr_x550em(struct ixgbe_hw *hw);
-s32 ixgbe_setup_kx4_x550em(struct ixgbe_hw *hw);
s32 ixgbe_init_ext_t_x550em(struct ixgbe_hw *hw);
s32 ixgbe_setup_internal_phy_t_x550em(struct ixgbe_hw *hw);
s32 ixgbe_setup_phy_loopback_x550em(struct ixgbe_hw *hw);
diff --git a/sys/dev/random/live_entropy_sources.c b/sys/dev/random/live_entropy_sources.c
index d406ebd..d25201a 100644
--- a/sys/dev/random/live_entropy_sources.c
+++ b/sys/dev/random/live_entropy_sources.c
@@ -189,7 +189,7 @@ live_entropy_sources_deinit(void *unused)
sx_destroy(&les_lock);
}
-SYSINIT(random_adaptors, SI_SUB_DRIVERS, SI_ORDER_FIRST,
+SYSINIT(random_adaptors, SI_SUB_RANDOM, SI_ORDER_FIRST,
live_entropy_sources_init, NULL);
-SYSUNINIT(random_adaptors, SI_SUB_DRIVERS, SI_ORDER_FIRST,
+SYSUNINIT(random_adaptors, SI_SUB_RANDOM, SI_ORDER_FIRST,
live_entropy_sources_deinit, NULL);
diff --git a/sys/dev/random/live_entropy_sources.h b/sys/dev/random/live_entropy_sources.h
index 9a23070..a1eec3e 100644
--- a/sys/dev/random/live_entropy_sources.h
+++ b/sys/dev/random/live_entropy_sources.h
@@ -52,7 +52,7 @@ void live_entropy_sources_feed(int, event_proc_f);
modevent, \
0 \
}; \
- DECLARE_MODULE(name, name##_mod, SI_SUB_DRIVERS, \
+ DECLARE_MODULE(name, name##_mod, SI_SUB_RANDOM, \
SI_ORDER_SECOND); \
MODULE_VERSION(name, ver); \
MODULE_DEPEND(name, random, 1, 1, 1);
diff --git a/sys/dev/random/random_adaptors.c b/sys/dev/random/random_adaptors.c
index 4f5ad2c..d17abf7 100644
--- a/sys/dev/random/random_adaptors.c
+++ b/sys/dev/random/random_adaptors.c
@@ -233,9 +233,9 @@ random_adaptors_init(void *unused)
SYSCTL_NODE(_kern, OID_AUTO, random, CTLFLAG_RW, 0, "Random Number Generator");
-SYSINIT(random_adaptors, SI_SUB_DRIVERS, SI_ORDER_FIRST, random_adaptors_init,
+SYSINIT(random_adaptors, SI_SUB_RANDOM, SI_ORDER_FIRST, random_adaptors_init,
NULL);
-SYSUNINIT(random_adaptors, SI_SUB_DRIVERS, SI_ORDER_FIRST,
+SYSUNINIT(random_adaptors, SI_SUB_RANDOM, SI_ORDER_FIRST,
random_adaptors_deinit, NULL);
static void
diff --git a/sys/dev/random/random_adaptors.h b/sys/dev/random/random_adaptors.h
index 4765694..b878b28 100644
--- a/sys/dev/random/random_adaptors.h
+++ b/sys/dev/random/random_adaptors.h
@@ -47,7 +47,7 @@ extern struct random_adaptor *random_adaptor;
/*
* random_adaptor's should be registered prior to
- * random module (SI_SUB_DRIVERS/SI_ORDER_MIDDLE)
+ * random module (SI_SUB_RANDOM/SI_ORDER_MIDDLE)
*/
#define RANDOM_ADAPTOR_MODULE(name, modevent, ver) \
static moduledata_t name##_mod = { \
@@ -55,7 +55,7 @@ extern struct random_adaptor *random_adaptor;
modevent, \
0 \
}; \
- DECLARE_MODULE(name, name##_mod, SI_SUB_DRIVERS, \
+ DECLARE_MODULE(name, name##_mod, SI_SUB_RANDOM, \
SI_ORDER_SECOND); \
MODULE_VERSION(name, ver); \
MODULE_DEPEND(name, random, 1, 1, 1);
diff --git a/sys/dev/random/random_harvestq.c b/sys/dev/random/random_harvestq.c
index b7b8381..2638ea3 100644
--- a/sys/dev/random/random_harvestq.c
+++ b/sys/dev/random/random_harvestq.c
@@ -81,6 +81,8 @@ int random_kthread_control = 0;
static struct proc *random_kthread_proc;
+static event_proc_f random_cb;
+
#ifdef RANDOM_RWFILE
static const char *entropy_files[] = {
"/entropy",
@@ -219,7 +221,7 @@ random_kthread(void *arg)
void
random_harvestq_init(event_proc_f cb)
{
- int error, i;
+ int i;
struct harvest *np;
/* Initialise the harvest fifos */
@@ -238,13 +240,26 @@ random_harvestq_init(event_proc_f cb)
mtx_init(&harvest_mtx, "entropy harvest mutex", NULL, MTX_SPIN);
+ random_cb = cb;
+}
+
+static void
+random_harvestq_start_kproc(void *arg __unused)
+{
+ int error;
+
+ if (random_cb == NULL)
+ return;
+
/* Start the hash/reseed thread */
- error = kproc_create(random_kthread, cb,
+ error = kproc_create(random_kthread, random_cb,
&random_kthread_proc, RFHIGHPID, 0, "rand_harvestq"); /* RANDOM_CSPRNG_NAME */
if (error != 0)
panic("Cannot create entropy maintenance thread.");
}
+SYSINIT(random_kthread, SI_SUB_DRIVERS, SI_ORDER_ANY,
+ random_harvestq_start_kproc, NULL);
void
random_harvestq_deinit(void)
@@ -265,6 +280,17 @@ random_harvestq_deinit(void)
}
harvestfifo.count = 0;
+ /*
+ * Command the hash/reseed thread to end and wait for it to finish
+ */
+ mtx_lock_spin(&harvest_mtx);
+ if (random_kthread_proc != NULL) {
+ random_kthread_control = -1;
+ msleep_spin((void *)&random_kthread_control, &harvest_mtx,
+ "term", 0);
+ }
+ mtx_unlock_spin(&harvest_mtx);
+
mtx_destroy(&harvest_mtx);
}
diff --git a/sys/dev/random/randomdev.c b/sys/dev/random/randomdev.c
index b76cb83..a220e7f 100644
--- a/sys/dev/random/randomdev.c
+++ b/sys/dev/random/randomdev.c
@@ -175,15 +175,24 @@ random_initialize(void *p, struct random_adaptor *s)
printf("random: <%s> initialized\n", s->ident);
+ /* mark random(4) as initialized, to avoid being called again */
+ random_inited = 1;
+}
+
+static void
+random_makedev(void *arg __unused)
+{
+
+ if (random_adaptor == NULL)
+ return;
+
/* Use an appropriately evil mode for those who are concerned
* with daemons */
random_dev = make_dev_credf(MAKEDEV_ETERNAL_KLD, &random_cdevsw,
RANDOM_MINOR, NULL, UID_ROOT, GID_WHEEL, 0666, "random");
make_dev_alias(random_dev, "urandom"); /* compatibility */
-
- /* mark random(4) as initialized, to avoid being called again */
- random_inited = 1;
}
+SYSINIT(random_makedev, SI_SUB_DRIVERS, SI_ORDER_ANY, random_makedev, NULL);
/* ARGSUSED */
static int
@@ -229,5 +238,11 @@ random_modevent(module_t mod __unused, int type, void *data __unused)
return (error);
}
-DEV_MODULE(random, random_modevent, NULL);
+static moduledata_t random_mod = {
+ "random",
+ random_modevent,
+ NULL
+};
+
+DECLARE_MODULE(random, random_mod, SI_SUB_RANDOM, SI_ORDER_MIDDLE);
MODULE_VERSION(random, 1);
diff --git a/sys/dev/random/randomdev_soft.c b/sys/dev/random/randomdev_soft.c
index 0929704..6badecc 100644
--- a/sys/dev/random/randomdev_soft.c
+++ b/sys/dev/random/randomdev_soft.c
@@ -182,12 +182,6 @@ randomdev_deinit(void)
/* Deregister the randomness harvesting routine */
randomdev_deinit_harvester();
- /*
- * Command the hash/reseed thread to end and wait for it to finish
- */
- random_kthread_control = -1;
- tsleep((void *)&random_kthread_control, 0, "term", 0);
-
#if defined(RANDOM_YARROW)
random_yarrow_deinit_alg();
#endif
diff --git a/sys/dev/sfxge/common/efx.h b/sys/dev/sfxge/common/efx.h
index ce70546..14374cf 100644
--- a/sys/dev/sfxge/common/efx.h
+++ b/sys/dev/sfxge/common/efx.h
@@ -879,6 +879,14 @@ efx_phy_media_type_get(
__in efx_nic_t *enp,
__out efx_phy_media_type_t *typep);
+extern efx_rc_t
+efx_phy_module_get_info(
+ __in efx_nic_t *enp,
+ __in uint8_t dev_addr,
+ __in uint8_t offset,
+ __in uint8_t len,
+ __out_bcount(len) uint8_t *data);
+
#if EFSYS_OPT_PHY_STATS
/* START MKCONFIG GENERATED PhyHeaderStatsBlock 30ed56ad501f8e36 */
diff --git a/sys/dev/sfxge/common/efx_mcdi.c b/sys/dev/sfxge/common/efx_mcdi.c
index 9f7a9ff..bb9d9cf 100644
--- a/sys/dev/sfxge/common/efx_mcdi.c
+++ b/sys/dev/sfxge/common/efx_mcdi.c
@@ -2078,5 +2078,217 @@ fail1:
return (rc);
}
+/*
+ * Size of media information page in accordance with SFF-8472 and SFF-8436.
+ * It is used in MCDI interface as well.
+ */
+#define EFX_PHY_MEDIA_INFO_PAGE_SIZE 0x80
+
+static __checkReturn efx_rc_t
+efx_mcdi_get_phy_media_info(
+ __in efx_nic_t *enp,
+ __in uint32_t mcdi_page,
+ __in uint8_t offset,
+ __in uint8_t len,
+ __out_bcount(len) uint8_t *data)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_GET_PHY_MEDIA_INFO_IN_LEN,
+ MC_CMD_GET_PHY_MEDIA_INFO_OUT_LEN(
+ EFX_PHY_MEDIA_INFO_PAGE_SIZE))];
+ efx_rc_t rc;
+
+ EFSYS_ASSERT((uint32_t)offset + len <= EFX_PHY_MEDIA_INFO_PAGE_SIZE);
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_GET_PHY_MEDIA_INFO;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_GET_PHY_MEDIA_INFO_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length =
+ MC_CMD_GET_PHY_MEDIA_INFO_OUT_LEN(EFX_PHY_MEDIA_INFO_PAGE_SIZE);
+
+ MCDI_IN_SET_DWORD(req, GET_PHY_MEDIA_INFO_IN_PAGE, mcdi_page);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (req.emr_out_length_used !=
+ MC_CMD_GET_PHY_MEDIA_INFO_OUT_LEN(EFX_PHY_MEDIA_INFO_PAGE_SIZE)) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+
+ if (MCDI_OUT_DWORD(req, GET_PHY_MEDIA_INFO_OUT_DATALEN) !=
+ EFX_PHY_MEDIA_INFO_PAGE_SIZE) {
+ rc = EIO;
+ goto fail3;
+ }
+
+ memcpy(data,
+ MCDI_OUT2(req, uint8_t, GET_PHY_MEDIA_INFO_OUT_DATA) + offset,
+ len);
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+/*
+ * 2-wire device address of the base information in accordance with SFF-8472
+ * Diagnostic Monitoring Interface for Optical Transceivers section
+ * 4 Memory Organization.
+ */
+#define EFX_PHY_MEDIA_INFO_DEV_ADDR_SFP_BASE 0xA0
+
+/*
+ * 2-wire device address of the digital diagnostics monitoring interface
+ * in accordance with SFF-8472 Diagnostic Monitoring Interface for Optical
+ * Transceivers section 4 Memory Organization.
+ */
+#define EFX_PHY_MEDIA_INFO_DEV_ADDR_SFP_DDM 0xA2
+
+/*
+ * Hard wired 2-wire device address for QSFP+ in accordance with SFF-8436
+ * QSFP+ 10 Gbs 4X PLUGGABLE TRANSCEIVER section 7.4 Device Addressing and
+ * Operation.
+ */
+#define EFX_PHY_MEDIA_INFO_DEV_ADDR_QSFP 0xA0
+
+ __checkReturn efx_rc_t
+efx_mcdi_phy_module_get_info(
+ __in efx_nic_t *enp,
+ __in uint8_t dev_addr,
+ __in uint8_t offset,
+ __in uint8_t len,
+ __out_bcount(len) uint8_t *data)
+{
+ efx_port_t *epp = &(enp->en_port);
+ efx_rc_t rc;
+ uint32_t mcdi_lower_page;
+ uint32_t mcdi_upper_page;
+
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE);
+
+ /*
+ * Map device address to MC_CMD_GET_PHY_MEDIA_INFO pages.
+ * Offset plus length interface allows to access page 0 only.
+ * I.e. non-zero upper pages are not accessible.
+ * See SFF-8472 section 4 Memory Organization and SFF-8436 section 7.6
+ * QSFP+ Memory Map for details on how information is structured
+ * and accessible.
+ */
+ switch (epp->ep_fixed_port_type) {
+ case EFX_PHY_MEDIA_SFP_PLUS:
+ /*
+ * In accordance with SFF-8472 Diagnostic Monitoring
+ * Interface for Optical Transceivers section 4 Memory
+ * Organization two 2-wire addresses are defined.
+ */
+ switch (dev_addr) {
+ /* Base information */
+ case EFX_PHY_MEDIA_INFO_DEV_ADDR_SFP_BASE:
+ /*
+ * MCDI page 0 should be used to access lower
+ * page 0 (0x00 - 0x7f) at the device address 0xA0.
+ */
+ mcdi_lower_page = 0;
+ /*
+ * MCDI page 1 should be used to access upper
+ * page 0 (0x80 - 0xff) at the device address 0xA0.
+ */
+ mcdi_upper_page = 1;
+ break;
+ /* Diagnostics */
+ case EFX_PHY_MEDIA_INFO_DEV_ADDR_SFP_DDM:
+ /*
+ * MCDI page 2 should be used to access lower
+ * page 0 (0x00 - 0x7f) at the device address 0xA2.
+ */
+ mcdi_lower_page = 2;
+ /*
+ * MCDI page 3 should be used to access upper
+ * page 0 (0x80 - 0xff) at the device address 0xA2.
+ */
+ mcdi_upper_page = 3;
+ break;
+ default:
+ rc = ENOTSUP;
+ goto fail1;
+ }
+ break;
+ case EFX_PHY_MEDIA_QSFP_PLUS:
+ switch (dev_addr) {
+ case EFX_PHY_MEDIA_INFO_DEV_ADDR_QSFP:
+ /*
+ * MCDI page -1 should be used to access lower page 0
+ * (0x00 - 0x7f).
+ */
+ mcdi_lower_page = (uint32_t)-1;
+ /*
+ * MCDI page 0 should be used to access upper page 0
+ * (0x80h - 0xff).
+ */
+ mcdi_upper_page = 0;
+ break;
+ default:
+ rc = ENOTSUP;
+ goto fail1;
+ }
+ break;
+ default:
+ rc = ENOTSUP;
+ goto fail1;
+ }
+
+ if (offset < EFX_PHY_MEDIA_INFO_PAGE_SIZE) {
+ uint8_t read_len =
+ MIN(len, EFX_PHY_MEDIA_INFO_PAGE_SIZE - offset);
+
+ rc = efx_mcdi_get_phy_media_info(enp,
+ mcdi_lower_page, offset, read_len, data);
+ if (rc != 0)
+ goto fail2;
+
+ data += read_len;
+ len -= read_len;
+
+ offset = 0;
+ } else {
+ offset -= EFX_PHY_MEDIA_INFO_PAGE_SIZE;
+ }
+
+ if (len > 0) {
+ EFSYS_ASSERT3U(len, <=, EFX_PHY_MEDIA_INFO_PAGE_SIZE);
+ EFSYS_ASSERT3U(offset, <, EFX_PHY_MEDIA_INFO_PAGE_SIZE);
+
+ rc = efx_mcdi_get_phy_media_info(enp,
+ mcdi_upper_page, offset, len, data);
+ if (rc != 0)
+ goto fail3;
+ }
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
#endif /* EFSYS_OPT_MCDI */
diff --git a/sys/dev/sfxge/common/efx_mcdi.h b/sys/dev/sfxge/common/efx_mcdi.h
index dd1d76e..a96bd77 100644
--- a/sys/dev/sfxge/common/efx_mcdi.h
+++ b/sys/dev/sfxge/common/efx_mcdi.h
@@ -228,6 +228,14 @@ efx_mcdi_get_loopback_modes(
__in efx_nic_t *enp);
#endif /* EFSYS_OPT_LOOPBACK */
+extern __checkReturn efx_rc_t
+efx_mcdi_phy_module_get_info(
+ __in efx_nic_t *enp,
+ __in uint8_t dev_addr,
+ __in uint8_t offset,
+ __in uint8_t len,
+ __out_bcount(len) uint8_t *data);
+
#define MCDI_IN(_emr, _type, _ofst) \
((_type *)((_emr).emr_in_buf + (_ofst)))
diff --git a/sys/dev/sfxge/common/efx_phy.c b/sys/dev/sfxge/common/efx_phy.c
index 51e1ccb..3fd4e08 100644
--- a/sys/dev/sfxge/common/efx_phy.c
+++ b/sys/dev/sfxge/common/efx_phy.c
@@ -560,6 +560,38 @@ efx_phy_media_type_get(
*typep = epp->ep_fixed_port_type;
}
+ __checkReturn efx_rc_t
+efx_phy_module_get_info(
+ __in efx_nic_t *enp,
+ __in uint8_t dev_addr,
+ __in uint8_t offset,
+ __in uint8_t len,
+ __out_bcount(len) uint8_t *data)
+{
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT(data != NULL);
+
+ if ((uint32_t)offset + len > 0xff) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ if ((rc = efx_mcdi_phy_module_get_info(enp, dev_addr,
+ offset, len, data)) != 0)
+ goto fail2;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
#if EFSYS_OPT_PHY_STATS
#if EFSYS_OPT_NAMES
diff --git a/sys/dev/sfxge/sfxge.c b/sys/dev/sfxge/sfxge.c
index b82fba4..b112a1e 100644
--- a/sys/dev/sfxge/sfxge.c
+++ b/sys/dev/sfxge/sfxge.c
@@ -507,6 +507,30 @@ sfxge_if_ioctl(struct ifnet *ifp, unsigned long command, caddr_t data)
case SIOCGIFMEDIA:
error = ifmedia_ioctl(ifp, ifr, &sc->media, command);
break;
+#ifdef SIOCGI2C
+ case SIOCGI2C:
+ {
+ struct ifi2creq i2c;
+
+ error = copyin(ifr->ifr_data, &i2c, sizeof(i2c));
+ if (error != 0)
+ break;
+
+ if (i2c.len > sizeof(i2c.data)) {
+ error = EINVAL;
+ break;
+ }
+
+ SFXGE_ADAPTER_LOCK(sc);
+ error = efx_phy_module_get_info(sc->enp, i2c.dev_addr,
+ i2c.offset, i2c.len,
+ &i2c.data[0]);
+ SFXGE_ADAPTER_UNLOCK(sc);
+ if (error == 0)
+ error = copyout(&i2c, ifr->ifr_data, sizeof(i2c));
+ break;
+ }
+#endif
case SIOCGPRIVATE_0:
error = priv_check(curthread, PRIV_DRIVER);
if (error != 0)
OpenPOWER on IntegriCloud