diff options
author | erj <erj@FreeBSD.org> | 2015-05-27 17:44:11 +0000 |
---|---|---|
committer | erj <erj@FreeBSD.org> | 2015-05-27 17:44:11 +0000 |
commit | e12c5d1ed6e67d05e92e7300d4b38cff76215ae7 (patch) | |
tree | c721017df7b05eabfe8fc6f76e956a5ae58ff508 /sys/dev | |
parent | 38c6ad8c69b6e6ab7fd5b8a9089fa30ea3855bed (diff) | |
download | FreeBSD-src-e12c5d1ed6e67d05e92e7300d4b38cff76215ae7.zip FreeBSD-src-e12c5d1ed6e67d05e92e7300d4b38cff76215ae7.tar.gz |
MFC ixgbe commits for 10.2:
- r280182 - Split the driver into independent pf/vf loadables
- r280197 - Resolve build issues
- r280204 - Fix multiple same-name devclasses
- r280228 - Fix i386 LINT build issues / remove unused variable
- r280252 - Fix building ixgbe with gcc
- r280962 - Make changes to busdma code similar to r257541
- r281772 & r281773 - Remove unused variable
- partial r282280 - stats counter update (ix-only)
- r282289 - Add X550 support
- r282290 - Add X550 makefile updates
- r282293 - Add ixgbe_x550.c to conf/files
- r282299 - Fix gcc compile (extraneous extern declaration)
Finally, add ix_txrx.c to conf/files because it's required for compile in stable/10.
Approved by: jfv (mentor)
Diffstat (limited to 'sys/dev')
33 files changed, 12903 insertions, 8199 deletions
diff --git a/sys/dev/ixgbe/LICENSE b/sys/dev/ixgbe/LICENSE index d446282..394b094 100644 --- a/sys/dev/ixgbe/LICENSE +++ b/sys/dev/ixgbe/LICENSE @@ -1,6 +1,6 @@ /****************************************************************************** - Copyright (c) 2001-2013, Intel Corporation + Copyright (c) 2001-2015, Intel Corporation All rights reserved. Redistribution and use in source and binary forms, with or without diff --git a/sys/dev/ixgbe/ixgbe.c b/sys/dev/ixgbe/if_ix.c index 0fe0f01..2f119bc 100644 --- a/sys/dev/ixgbe/ixgbe.c +++ b/sys/dev/ixgbe/if_ix.c @@ -1,6 +1,6 @@ /****************************************************************************** - Copyright (c) 2001-2013, Intel Corporation + Copyright (c) 2001-2015, Intel Corporation All rights reserved. Redistribution and use in source and binary forms, with or without @@ -33,8 +33,11 @@ /*$FreeBSD$*/ +#ifndef IXGBE_STANDALONE_BUILD #include "opt_inet.h" #include "opt_inet6.h" +#endif + #include "ixgbe.h" /********************************************************************* @@ -45,7 +48,7 @@ int ixgbe_display_debug_stats = 0; /********************************************************************* * Driver version *********************************************************************/ -char ixgbe_driver_version[] = "2.5.15"; +char ixgbe_driver_version[] = "2.8.3"; /********************************************************************* * PCI Device ID Table @@ -82,7 +85,13 @@ static ixgbe_vendor_info_t ixgbe_vendor_info_array[] = {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE, 0, 0, 0}, {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599EN_SFP, 0, 0, 0}, {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF_QP, 0, 0, 0}, + {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_QSFP_SF_QP, 0, 0, 0}, {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T, 0, 0, 0}, + {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T1, 0, 0, 0}, + {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T, 0, 0, 0}, + {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KR, 0, 0, 0}, + {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KX4, 0, 0, 0}, + {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_10G_T, 0, 0, 0}, /* required last entry */ {0, 0, 0, 0, 0} }; @@ -102,19 +111,16 @@ static int ixgbe_probe(device_t); static int ixgbe_attach(device_t); static int ixgbe_detach(device_t); static int ixgbe_shutdown(device_t); -#ifdef IXGBE_LEGACY_TX -static void ixgbe_start(struct ifnet *); -static void ixgbe_start_locked(struct tx_ring *, struct ifnet *); -#else /* ! IXGBE_LEGACY_TX */ -static int ixgbe_mq_start(struct ifnet *, struct mbuf *); -static int ixgbe_mq_start_locked(struct ifnet *, struct tx_ring *); -static void ixgbe_qflush(struct ifnet *); -static void ixgbe_deferred_mq_start(void *, int); -#endif /* IXGBE_LEGACY_TX */ +static int ixgbe_suspend(device_t); +static int ixgbe_resume(device_t); static int ixgbe_ioctl(struct ifnet *, u_long, caddr_t); static void ixgbe_init(void *); static void ixgbe_init_locked(struct adapter *); static void ixgbe_stop(void *); +#if __FreeBSD_version >= 1100036 +static uint64_t ixgbe_get_counter(struct ifnet *, ift_counter); +#endif +static void ixgbe_add_media_types(struct adapter *); static void ixgbe_media_status(struct ifnet *, struct ifmediareq *); static int ixgbe_media_change(struct ifnet *); static void ixgbe_identify_hardware(struct adapter *); @@ -122,49 +128,29 @@ static int ixgbe_allocate_pci_resources(struct adapter *); static void ixgbe_get_slot_info(struct ixgbe_hw *); static int ixgbe_allocate_msix(struct adapter *); static int ixgbe_allocate_legacy(struct adapter *); -static int ixgbe_allocate_queues(struct adapter *); static int ixgbe_setup_msix(struct adapter *); static void ixgbe_free_pci_resources(struct adapter *); static void ixgbe_local_timer(void *); static int ixgbe_setup_interface(device_t, struct adapter *); +static void ixgbe_config_dmac(struct adapter *); +static void ixgbe_config_delay_values(struct adapter *); static void ixgbe_config_link(struct adapter *); +static void ixgbe_check_eee_support(struct adapter *); +static void ixgbe_check_wol_support(struct adapter *); +static int ixgbe_setup_low_power_mode(struct adapter *); +static void ixgbe_rearm_queues(struct adapter *, u64); -static int ixgbe_allocate_transmit_buffers(struct tx_ring *); -static int ixgbe_setup_transmit_structures(struct adapter *); -static void ixgbe_setup_transmit_ring(struct tx_ring *); static void ixgbe_initialize_transmit_units(struct adapter *); -static void ixgbe_free_transmit_structures(struct adapter *); -static void ixgbe_free_transmit_buffers(struct tx_ring *); - -static int ixgbe_allocate_receive_buffers(struct rx_ring *); -static int ixgbe_setup_receive_structures(struct adapter *); -static int ixgbe_setup_receive_ring(struct rx_ring *); static void ixgbe_initialize_receive_units(struct adapter *); -static void ixgbe_free_receive_structures(struct adapter *); -static void ixgbe_free_receive_buffers(struct rx_ring *); -static void ixgbe_setup_hw_rsc(struct rx_ring *); +static void ixgbe_enable_rx_drop(struct adapter *); +static void ixgbe_disable_rx_drop(struct adapter *); static void ixgbe_enable_intr(struct adapter *); static void ixgbe_disable_intr(struct adapter *); static void ixgbe_update_stats_counters(struct adapter *); -static void ixgbe_txeof(struct tx_ring *); -static bool ixgbe_rxeof(struct ix_queue *); -static void ixgbe_rx_checksum(u32, struct mbuf *, u32); static void ixgbe_set_promisc(struct adapter *); static void ixgbe_set_multi(struct adapter *); static void ixgbe_update_link_status(struct adapter *); -static void ixgbe_refresh_mbufs(struct rx_ring *, int); -static int ixgbe_xmit(struct tx_ring *, struct mbuf **); -static int ixgbe_set_flowcntl(SYSCTL_HANDLER_ARGS); -static int ixgbe_set_advertise(SYSCTL_HANDLER_ARGS); -static int ixgbe_set_thermal_test(SYSCTL_HANDLER_ARGS); -static int ixgbe_dma_malloc(struct adapter *, bus_size_t, - struct ixgbe_dma_alloc *, int); -static void ixgbe_dma_free(struct adapter *, struct ixgbe_dma_alloc *); -static int ixgbe_tx_ctx_setup(struct tx_ring *, - struct mbuf *, u32 *, u32 *); -static int ixgbe_tso_setup(struct tx_ring *, - struct mbuf *, u32 *, u32 *); static void ixgbe_set_ivar(struct adapter *, u8, u8, s8); static void ixgbe_configure_ivars(struct adapter *); static u8 * ixgbe_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *); @@ -173,14 +159,22 @@ static void ixgbe_setup_vlan_hw_support(struct adapter *); static void ixgbe_register_vlan(void *, struct ifnet *, u16); static void ixgbe_unregister_vlan(void *, struct ifnet *, u16); -static void ixgbe_add_hw_stats(struct adapter *adapter); - -static __inline void ixgbe_rx_discard(struct rx_ring *, int); -static __inline void ixgbe_rx_input(struct rx_ring *, struct ifnet *, - struct mbuf *, u32); +static void ixgbe_add_device_sysctls(struct adapter *); +static void ixgbe_add_hw_stats(struct adapter *); -static void ixgbe_enable_rx_drop(struct adapter *); -static void ixgbe_disable_rx_drop(struct adapter *); +/* Sysctl handlers */ +static int ixgbe_set_flowcntl(SYSCTL_HANDLER_ARGS); +static int ixgbe_set_advertise(SYSCTL_HANDLER_ARGS); +static int ixgbe_sysctl_thermal_test(SYSCTL_HANDLER_ARGS); +static int ixgbe_sysctl_dmac(SYSCTL_HANDLER_ARGS); +static int ixgbe_sysctl_phy_temp(SYSCTL_HANDLER_ARGS); +static int ixgbe_sysctl_phy_overtemp_occurred(SYSCTL_HANDLER_ARGS); +static int ixgbe_sysctl_wol_enable(SYSCTL_HANDLER_ARGS); +static int ixgbe_sysctl_wufc(SYSCTL_HANDLER_ARGS); +static int ixgbe_sysctl_eee_enable(SYSCTL_HANDLER_ARGS); +static int ixgbe_sysctl_eee_negotiated(SYSCTL_HANDLER_ARGS); +static int ixgbe_sysctl_eee_rx_lpi_status(SYSCTL_HANDLER_ARGS); +static int ixgbe_sysctl_eee_tx_lpi_status(SYSCTL_HANDLER_ARGS); /* Support for pluggable optic modules */ static bool ixgbe_sfp_probe(struct adapter *); @@ -198,37 +192,36 @@ static void ixgbe_handle_que(void *, int); static void ixgbe_handle_link(void *, int); static void ixgbe_handle_msf(void *, int); static void ixgbe_handle_mod(void *, int); +static void ixgbe_handle_phy(void *, int); #ifdef IXGBE_FDIR -static void ixgbe_atr(struct tx_ring *, struct mbuf *); static void ixgbe_reinit_fdir(void *, int); #endif -/* Missing shared code prototype */ -extern void ixgbe_stop_mac_link_on_d3_82599(struct ixgbe_hw *hw); - /********************************************************************* * FreeBSD Device Interface Entry Points *********************************************************************/ -static device_method_t ixgbe_methods[] = { +static device_method_t ix_methods[] = { /* Device interface */ DEVMETHOD(device_probe, ixgbe_probe), DEVMETHOD(device_attach, ixgbe_attach), DEVMETHOD(device_detach, ixgbe_detach), DEVMETHOD(device_shutdown, ixgbe_shutdown), + DEVMETHOD(device_suspend, ixgbe_suspend), + DEVMETHOD(device_resume, ixgbe_resume), DEVMETHOD_END }; -static driver_t ixgbe_driver = { - "ix", ixgbe_methods, sizeof(struct adapter), +static driver_t ix_driver = { + "ix", ix_methods, sizeof(struct adapter), }; -devclass_t ixgbe_devclass; -DRIVER_MODULE(ixgbe, pci, ixgbe_driver, ixgbe_devclass, 0, 0); +devclass_t ix_devclass; +DRIVER_MODULE(ix, pci, ix_driver, ix_devclass, 0, 0); -MODULE_DEPEND(ixgbe, pci, 1, 1, 1); -MODULE_DEPEND(ixgbe, ether, 1, 1, 1); +MODULE_DEPEND(ix, pci, 1, 1, 1); +MODULE_DEPEND(ix, ether, 1, 1, 1); /* ** TUNEABLE PARAMETERS: @@ -244,18 +237,16 @@ static SYSCTL_NODE(_hw, OID_AUTO, ix, CTLFLAG_RD, 0, ** traffic for that interrupt vector */ static int ixgbe_enable_aim = TRUE; -TUNABLE_INT("hw.ix.enable_aim", &ixgbe_enable_aim); -SYSCTL_INT(_hw_ix, OID_AUTO, enable_aim, CTLFLAG_RW, &ixgbe_enable_aim, 0, +SYSCTL_INT(_hw_ix, OID_AUTO, enable_aim, CTLFLAG_RWTUN, &ixgbe_enable_aim, 0, "Enable adaptive interrupt moderation"); static int ixgbe_max_interrupt_rate = (4000000 / IXGBE_LOW_LATENCY); -TUNABLE_INT("hw.ix.max_interrupt_rate", &ixgbe_max_interrupt_rate); SYSCTL_INT(_hw_ix, OID_AUTO, max_interrupt_rate, CTLFLAG_RDTUN, &ixgbe_max_interrupt_rate, 0, "Maximum interrupts per second"); /* How many packets rxeof tries to clean at a time */ static int ixgbe_rx_process_limit = 256; -TUNABLE_INT("hw.ix.rx_process_limit", &ixgbe_rx_process_limit); +TUNABLE_INT("hw.ixgbe.rx_process_limit", &ixgbe_rx_process_limit); SYSCTL_INT(_hw_ix, OID_AUTO, rx_process_limit, CTLFLAG_RDTUN, &ixgbe_rx_process_limit, 0, "Maximum number of received packets to process at a time," @@ -263,7 +254,7 @@ SYSCTL_INT(_hw_ix, OID_AUTO, rx_process_limit, CTLFLAG_RDTUN, /* How many packets txeof tries to clean at a time */ static int ixgbe_tx_process_limit = 256; -TUNABLE_INT("hw.ix.tx_process_limit", &ixgbe_tx_process_limit); +TUNABLE_INT("hw.ixgbe.tx_process_limit", &ixgbe_tx_process_limit); SYSCTL_INT(_hw_ix, OID_AUTO, tx_process_limit, CTLFLAG_RDTUN, &ixgbe_tx_process_limit, 0, "Maximum number of sent packets to process at a time," @@ -283,7 +274,6 @@ static int ixgbe_smart_speed = ixgbe_smart_speed_on; * but this allows it to be forced off for testing. */ static int ixgbe_enable_msix = 1; -TUNABLE_INT("hw.ix.enable_msix", &ixgbe_enable_msix); SYSCTL_INT(_hw_ix, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &ixgbe_enable_msix, 0, "Enable MSI-X interrupts"); @@ -294,7 +284,6 @@ SYSCTL_INT(_hw_ix, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &ixgbe_enable_msix, 0, * can be overriden manually here. */ static int ixgbe_num_queues = 0; -TUNABLE_INT("hw.ix.num_queues", &ixgbe_num_queues); SYSCTL_INT(_hw_ix, OID_AUTO, num_queues, CTLFLAG_RDTUN, &ixgbe_num_queues, 0, "Number of queues to configure, 0 indicates autoconfigure"); @@ -304,13 +293,11 @@ SYSCTL_INT(_hw_ix, OID_AUTO, num_queues, CTLFLAG_RDTUN, &ixgbe_num_queues, 0, ** the better performing choice. */ static int ixgbe_txd = PERFORM_TXD; -TUNABLE_INT("hw.ix.txd", &ixgbe_txd); SYSCTL_INT(_hw_ix, OID_AUTO, txd, CTLFLAG_RDTUN, &ixgbe_txd, 0, "Number of transmit descriptors per queue"); /* Number of RX descriptors per ring */ static int ixgbe_rxd = PERFORM_RXD; -TUNABLE_INT("hw.ix.rxd", &ixgbe_rxd); SYSCTL_INT(_hw_ix, OID_AUTO, rxd, CTLFLAG_RDTUN, &ixgbe_rxd, 0, "Number of receive descriptors per queue"); @@ -322,34 +309,10 @@ SYSCTL_INT(_hw_ix, OID_AUTO, rxd, CTLFLAG_RDTUN, &ixgbe_rxd, 0, static int allow_unsupported_sfp = FALSE; TUNABLE_INT("hw.ix.unsupported_sfp", &allow_unsupported_sfp); -/* -** HW RSC control: -** this feature only works with -** IPv4, and only on 82599 and later. -** Also this will cause IP forwarding to -** fail and that can't be controlled by -** the stack as LRO can. For all these -** reasons I've deemed it best to leave -** this off and not bother with a tuneable -** interface, this would need to be compiled -** to enable. -*/ -static bool ixgbe_rsc_enable = FALSE; - /* Keep running tab on them for sanity check */ static int ixgbe_total_ports; #ifdef IXGBE_FDIR -/* -** For Flow Director: this is the -** number of TX packets we sample -** for the filter pool, this means -** every 20th packet will be probed. -** -** This feature can be disabled by -** setting this to 0. -*/ -static int atr_sample_rate = 20; /* ** Flow Director actually 'steals' ** part of the packet buffer as its @@ -453,33 +416,6 @@ ixgbe_attach(device_t dev) /* Core Lock Init*/ IXGBE_CORE_LOCK_INIT(adapter, device_get_nameunit(dev)); - /* SYSCTL APIs */ - - SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), - SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), - OID_AUTO, "fc", CTLTYPE_INT | CTLFLAG_RW, - adapter, 0, ixgbe_set_flowcntl, "I", "Flow Control"); - - SYSCTL_ADD_INT(device_get_sysctl_ctx(dev), - SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), - OID_AUTO, "enable_aim", CTLFLAG_RW, - &ixgbe_enable_aim, 1, "Interrupt Moderation"); - - /* - ** Allow a kind of speed control by forcing the autoneg - ** advertised speed list to only a certain value, this - ** supports 1G on 82599 devices, and 100Mb on x540. - */ - SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), - SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), - OID_AUTO, "advertise_speed", CTLTYPE_INT | CTLFLAG_RW, - adapter, 0, ixgbe_set_advertise, "I", "Link Speed"); - - SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), - SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), - OID_AUTO, "ts", CTLTYPE_INT | CTLFLAG_RW, adapter, - 0, ixgbe_set_thermal_test, "I", "Thermal Test"); - /* Set up the timer callout */ callout_init_mtx(&adapter->timer, &adapter->core_mtx, 0); @@ -506,7 +442,7 @@ ixgbe_attach(device_t dev) ** system mbuf allocation. Tuning nmbclusters ** can alleviate this. */ - if (nmbclusters > 0 ) { + if (nmbclusters > 0) { int s; s = (ixgbe_rxd * adapter->num_queues) * ixgbe_total_ports; if (s > nmbclusters) { @@ -609,26 +545,32 @@ ixgbe_attach(device_t dev) adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig, ixgbe_unregister_vlan, adapter, EVENTHANDLER_PRI_FIRST); - /* - ** Check PCIE slot type/speed/width - */ + /* Check PCIE slot type/speed/width */ ixgbe_get_slot_info(hw); + /* Set an initial default flow control value */ - adapter->fc = ixgbe_fc_full; + adapter->fc = ixgbe_fc_full; + + /* Check for certain supported features */ + ixgbe_check_wol_support(adapter); + ixgbe_check_eee_support(adapter); + + /* Add sysctls */ + ixgbe_add_device_sysctls(adapter); + ixgbe_add_hw_stats(adapter); /* let hardware know driver is loaded */ ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD; IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext); - ixgbe_add_hw_stats(adapter); - #ifdef DEV_NETMAP ixgbe_netmap_attach(adapter); #endif /* DEV_NETMAP */ INIT_DEBUGOUT("ixgbe_attach: end"); return (0); + err_late: ixgbe_free_transmit_structures(adapter); ixgbe_free_receive_structures(adapter); @@ -638,7 +580,6 @@ err_out: ixgbe_free_pci_resources(adapter); free(adapter->mta, M_DEVBUF); return (error); - } /********************************************************************* @@ -667,8 +608,9 @@ ixgbe_detach(device_t dev) return (EBUSY); } + /* Stop the adapter */ IXGBE_CORE_LOCK(adapter); - ixgbe_stop(adapter); + ixgbe_setup_low_power_mode(adapter); IXGBE_CORE_UNLOCK(adapter); for (int i = 0; i < adapter->num_queues; i++, que++, txr++) { @@ -686,6 +628,7 @@ ixgbe_detach(device_t dev) taskqueue_drain(adapter->tq, &adapter->link_task); taskqueue_drain(adapter->tq, &adapter->mod_task); taskqueue_drain(adapter->tq, &adapter->msf_task); + taskqueue_drain(adapter->tq, &adapter->phy_task); #ifdef IXGBE_FDIR taskqueue_drain(adapter->tq, &adapter->fdir_task); #endif @@ -730,204 +673,80 @@ static int ixgbe_shutdown(device_t dev) { struct adapter *adapter = device_get_softc(dev); - IXGBE_CORE_LOCK(adapter); - ixgbe_stop(adapter); - IXGBE_CORE_UNLOCK(adapter); - return (0); -} + int error = 0; + INIT_DEBUGOUT("ixgbe_shutdown: begin"); -#ifdef IXGBE_LEGACY_TX -/********************************************************************* - * Transmit entry point - * - * ixgbe_start is called by the stack to initiate a transmit. - * The driver will remain in this routine as long as there are - * packets to transmit and transmit resources are available. - * In case resources are not available stack is notified and - * the packet is requeued. - **********************************************************************/ - -static void -ixgbe_start_locked(struct tx_ring *txr, struct ifnet * ifp) -{ - struct mbuf *m_head; - struct adapter *adapter = txr->adapter; - - IXGBE_TX_LOCK_ASSERT(txr); - - if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) - return; - if (!adapter->link_active) - return; - - while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) { - if (txr->tx_avail <= IXGBE_QUEUE_MIN_FREE) - break; - - IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head); - if (m_head == NULL) - break; - - if (ixgbe_xmit(txr, &m_head)) { - if (m_head != NULL) - IFQ_DRV_PREPEND(&ifp->if_snd, m_head); - break; - } - /* Send a copy of the frame to the BPF listener */ - ETHER_BPF_MTAP(ifp, m_head); - - /* Set watchdog on */ - txr->watchdog_time = ticks; - txr->queue_status = IXGBE_QUEUE_WORKING; + IXGBE_CORE_LOCK(adapter); + error = ixgbe_setup_low_power_mode(adapter); + IXGBE_CORE_UNLOCK(adapter); - } - return; + return (error); } -/* - * Legacy TX start - called by the stack, this - * always uses the first tx ring, and should - * not be used with multiqueue tx enabled. +/** + * Methods for going from: + * D0 -> D3: ixgbe_suspend + * D3 -> D0: ixgbe_resume */ -static void -ixgbe_start(struct ifnet *ifp) +static int +ixgbe_suspend(device_t dev) { - struct adapter *adapter = ifp->if_softc; - struct tx_ring *txr = adapter->tx_rings; - - if (ifp->if_drv_flags & IFF_DRV_RUNNING) { - IXGBE_TX_LOCK(txr); - ixgbe_start_locked(txr, ifp); - IXGBE_TX_UNLOCK(txr); - } - return; -} + struct adapter *adapter = device_get_softc(dev); + int error = 0; -#else /* ! IXGBE_LEGACY_TX */ + INIT_DEBUGOUT("ixgbe_suspend: begin"); -/* -** Multiqueue Transmit driver -** -*/ -static int -ixgbe_mq_start(struct ifnet *ifp, struct mbuf *m) -{ - struct adapter *adapter = ifp->if_softc; - struct ix_queue *que; - struct tx_ring *txr; - int i, err = 0; + IXGBE_CORE_LOCK(adapter); - /* Which queue to use */ - if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) - i = m->m_pkthdr.flowid % adapter->num_queues; - else - i = curcpu % adapter->num_queues; + error = ixgbe_setup_low_power_mode(adapter); - txr = &adapter->tx_rings[i]; - que = &adapter->queues[i]; + /* Save state and power down */ + pci_save_state(dev); + pci_set_powerstate(dev, PCI_POWERSTATE_D3); - err = drbr_enqueue(ifp, txr->br, m); - if (err) - return (err); - if (IXGBE_TX_TRYLOCK(txr)) { - ixgbe_mq_start_locked(ifp, txr); - IXGBE_TX_UNLOCK(txr); - } else - taskqueue_enqueue(que->tq, &txr->txq_task); + IXGBE_CORE_UNLOCK(adapter); - return (0); + return (error); } static int -ixgbe_mq_start_locked(struct ifnet *ifp, struct tx_ring *txr) +ixgbe_resume(device_t dev) { - struct adapter *adapter = txr->adapter; - struct mbuf *next; - int enqueued = 0, err = 0; - - if (((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) || - adapter->link_active == 0) - return (ENETDOWN); - - /* Process the queue */ -#if __FreeBSD_version < 901504 - next = drbr_dequeue(ifp, txr->br); - while (next != NULL) { - if ((err = ixgbe_xmit(txr, &next)) != 0) { - if (next != NULL) - err = drbr_enqueue(ifp, txr->br, next); -#else - while ((next = drbr_peek(ifp, txr->br)) != NULL) { - if ((err = ixgbe_xmit(txr, &next)) != 0) { - if (next == NULL) { - drbr_advance(ifp, txr->br); - } else { - drbr_putback(ifp, txr->br, next); - } -#endif - break; - } -#if __FreeBSD_version >= 901504 - drbr_advance(ifp, txr->br); -#endif - enqueued++; - /* Send a copy of the frame to the BPF listener */ - ETHER_BPF_MTAP(ifp, next); - if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) - break; -#if __FreeBSD_version < 901504 - next = drbr_dequeue(ifp, txr->br); -#endif - } + struct adapter *adapter = device_get_softc(dev); + struct ifnet *ifp = adapter->ifp; + struct ixgbe_hw *hw = &adapter->hw; + u32 wus; - if (enqueued > 0) { - /* Set watchdog on */ - txr->queue_status = IXGBE_QUEUE_WORKING; - txr->watchdog_time = ticks; - } + INIT_DEBUGOUT("ixgbe_resume: begin"); - if (txr->tx_avail < IXGBE_TX_CLEANUP_THRESHOLD) - ixgbe_txeof(txr); + IXGBE_CORE_LOCK(adapter); - return (err); -} + pci_set_powerstate(dev, PCI_POWERSTATE_D0); + pci_restore_state(dev); -/* - * Called from a taskqueue to drain queued transmit packets. - */ -static void -ixgbe_deferred_mq_start(void *arg, int pending) -{ - struct tx_ring *txr = arg; - struct adapter *adapter = txr->adapter; - struct ifnet *ifp = adapter->ifp; + /* Read & clear WUS register */ + wus = IXGBE_READ_REG(hw, IXGBE_WUS); + if (wus) + device_printf(dev, "Woken up by (WUS): %#010x\n", + IXGBE_READ_REG(hw, IXGBE_WUS)); + IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff); + /* And clear WUFC until next low-power transition */ + IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0); - IXGBE_TX_LOCK(txr); - if (!drbr_empty(ifp, txr->br)) - ixgbe_mq_start_locked(ifp, txr); - IXGBE_TX_UNLOCK(txr); -} + /* + * Required after D3->D0 transition; + * will re-advertise all previous advertised speeds + */ + if (ifp->if_flags & IFF_UP) + ixgbe_init_locked(adapter); -/* -** Flush all ring buffers -*/ -static void -ixgbe_qflush(struct ifnet *ifp) -{ - struct adapter *adapter = ifp->if_softc; - struct tx_ring *txr = adapter->tx_rings; - struct mbuf *m; + IXGBE_CORE_UNLOCK(adapter); - for (int i = 0; i < adapter->num_queues; i++, txr++) { - IXGBE_TX_LOCK(txr); - while ((m = buf_ring_dequeue_sc(txr->br)) != NULL) - m_freem(m); - IXGBE_TX_UNLOCK(txr); - } - if_qflush(ifp); + INIT_DEBUGOUT("ixgbe_resume: end"); + return (0); } -#endif /* IXGBE_LEGACY_TX */ + /********************************************************************* * Ioctl entry point @@ -942,7 +761,6 @@ static int ixgbe_ioctl(struct ifnet * ifp, u_long command, caddr_t data) { struct adapter *adapter = ifp->if_softc; - struct ixgbe_hw *hw = &adapter->hw; struct ifreq *ifr = (struct ifreq *) data; #if defined(INET) || defined(INET6) struct ifaddr *ifa = (struct ifaddr *)data; @@ -978,13 +796,13 @@ ixgbe_ioctl(struct ifnet * ifp, u_long command, caddr_t data) break; case SIOCSIFMTU: IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)"); - if (ifr->ifr_mtu > IXGBE_MAX_FRAME_SIZE - ETHER_HDR_LEN) { + if (ifr->ifr_mtu > IXGBE_MAX_MTU) { error = EINVAL; } else { IXGBE_CORE_LOCK(adapter); ifp->if_mtu = ifr->ifr_mtu; adapter->max_frame_size = - ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN; + ifp->if_mtu + IXGBE_MTU_HDR; ixgbe_init_locked(adapter); IXGBE_CORE_UNLOCK(adapter); } @@ -1048,22 +866,32 @@ ixgbe_ioctl(struct ifnet * ifp, u_long command, caddr_t data) VLAN_CAPABILITIES(ifp); break; } +#if __FreeBSD_version >= 1100036 case SIOCGI2C: { - struct ixgbe_i2c_req i2c; + struct ixgbe_hw *hw = &adapter->hw; + struct ifi2creq i2c; + int i; IOCTL_DEBUGOUT("ioctl: SIOCGI2C (Get I2C Data)"); error = copyin(ifr->ifr_data, &i2c, sizeof(i2c)); - if (error) + if (error != 0) + break; + if (i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2) { + error = EINVAL; break; - if ((i2c.dev_addr != 0xA0) || (i2c.dev_addr != 0xA2)){ + } + if (i2c.len > sizeof(i2c.data)) { error = EINVAL; break; } - hw->phy.ops.read_i2c_byte(hw, i2c.offset, - i2c.dev_addr, i2c.data); + + for (i = 0; i < i2c.len; i++) + hw->phy.ops.read_i2c_byte(hw, i2c.offset + i, + i2c.dev_addr, &i2c.data[i]); error = copyout(&i2c, ifr->ifr_data, sizeof(i2c)); break; } +#endif default: IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)\n", (int)command); error = ether_ioctl(ifp, command, data); @@ -1123,7 +951,7 @@ ixgbe_init_locked(struct adapter *adapter) /* Prepare transmit descriptors and buffers */ if (ixgbe_setup_transmit_structures(adapter)) { - device_printf(dev,"Could not setup transmit structures\n"); + device_printf(dev, "Could not setup transmit structures\n"); ixgbe_stop(adapter); return; } @@ -1149,7 +977,7 @@ ixgbe_init_locked(struct adapter *adapter) /* Prepare receive descriptors and buffers */ if (ixgbe_setup_receive_structures(adapter)) { - device_printf(dev,"Could not setup receive structures\n"); + device_printf(dev, "Could not setup receive structures\n"); ixgbe_stop(adapter); return; } @@ -1160,15 +988,20 @@ ixgbe_init_locked(struct adapter *adapter) gpie = IXGBE_READ_REG(&adapter->hw, IXGBE_GPIE); /* Enable Fan Failure Interrupt */ - gpie |= IXGBE_SDP1_GPIEN; + gpie |= IXGBE_SDP1_GPIEN_BY_MAC(hw); /* Add for Module detection */ if (hw->mac.type == ixgbe_mac_82599EB) gpie |= IXGBE_SDP2_GPIEN; - /* Thermal Failure Detection */ - if (hw->mac.type == ixgbe_mac_X540) - gpie |= IXGBE_SDP0_GPIEN; + /* + * Thermal Failure Detection (X540) + * Link Detection (X552) + */ + if (hw->mac.type == ixgbe_mac_X540 || + hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP || + hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) + gpie |= IXGBE_SDP0_GPIEN_X540; if (adapter->msix > 1) { /* Enable Enhanced MSIX mode */ @@ -1180,6 +1013,7 @@ ixgbe_init_locked(struct adapter *adapter) /* Set MTU size */ if (ifp->if_mtu > ETHERMTU) { + /* aka IXGBE_MAXFRS on 82599 and newer */ mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD); mhadd &= ~IXGBE_MHADD_MFS_MASK; mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT; @@ -1187,7 +1021,6 @@ ixgbe_init_locked(struct adapter *adapter) } /* Now enable all the queues */ - for (int i = 0; i < adapter->num_queues; i++) { txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(i)); txdctl |= IXGBE_TXDCTL_ENABLE; @@ -1302,43 +1135,27 @@ ixgbe_init_locked(struct adapter *adapter) } /* Set moderation on the Link interrupt */ - IXGBE_WRITE_REG(hw, IXGBE_EITR(adapter->linkvec), IXGBE_LINK_ITR); + IXGBE_WRITE_REG(hw, IXGBE_EITR(adapter->vector), IXGBE_LINK_ITR); + + /* Configure Energy Efficient Ethernet for supported devices */ + if (adapter->eee_support) + ixgbe_setup_eee(hw, adapter->eee_enabled); /* Config/Enable Link */ ixgbe_config_link(adapter); /* Hardware Packet Buffer & Flow Control setup */ - { - u32 rxpb, frame, size, tmp; + ixgbe_config_delay_values(adapter); - frame = adapter->max_frame_size; - - /* Calculate High Water */ - if (hw->mac.type == ixgbe_mac_X540) - tmp = IXGBE_DV_X540(frame, frame); - else - tmp = IXGBE_DV(frame, frame); - size = IXGBE_BT2KB(tmp); - rxpb = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) >> 10; - hw->fc.high_water[0] = rxpb - size; - - /* Now calculate Low Water */ - if (hw->mac.type == ixgbe_mac_X540) - tmp = IXGBE_LOW_DV_X540(frame); - else - tmp = IXGBE_LOW_DV(frame); - hw->fc.low_water[0] = IXGBE_BT2KB(tmp); - - hw->fc.requested_mode = adapter->fc; - hw->fc.pause_time = IXGBE_FC_PAUSE; - hw->fc.send_xon = TRUE; - } /* Initialize the FC settings */ ixgbe_start_hw(hw); /* Set up VLAN support and filter */ ixgbe_setup_vlan_hw_support(adapter); + /* Setup DMA Coalescing */ + ixgbe_config_dmac(adapter); + /* And now turn on interrupts */ ixgbe_enable_intr(adapter); @@ -1359,6 +1176,46 @@ ixgbe_init(void *arg) return; } +static void +ixgbe_config_delay_values(struct adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + u32 rxpb, frame, size, tmp; + + frame = adapter->max_frame_size; + + /* Calculate High Water */ + switch (hw->mac.type) { + case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + tmp = IXGBE_DV_X540(frame, frame); + break; + default: + tmp = IXGBE_DV(frame, frame); + break; + } + size = IXGBE_BT2KB(tmp); + rxpb = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) >> 10; + hw->fc.high_water[0] = rxpb - size; + + /* Now calculate Low Water */ + switch (hw->mac.type) { + case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + tmp = IXGBE_LOW_DV_X540(frame); + break; + default: + tmp = IXGBE_LOW_DV(frame); + break; + } + hw->fc.low_water[0] = IXGBE_BT2KB(tmp); + + hw->fc.requested_mode = adapter->fc; + hw->fc.pause_time = IXGBE_FC_PAUSE; + hw->fc.send_xon = TRUE; +} /* ** @@ -1479,16 +1336,21 @@ ixgbe_legacy_irq(void *arg) /* Check for fan failure */ if ((hw->phy.media_type == ixgbe_media_type_copper) && - (reg_eicr & IXGBE_EICR_GPI_SDP1)) { + (reg_eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) { device_printf(adapter->dev, "\nCRITICAL: FAN FAILURE!! " "REPLACE IMMEDIATELY!!\n"); - IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EICR_GPI_SDP1); + IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EICR_GPI_SDP1_BY_MAC(hw)); } /* Link status change */ if (reg_eicr & IXGBE_EICR_LSC) taskqueue_enqueue(adapter->tq, &adapter->link_task); + /* External PHY interrupt */ + if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T && + (reg_eicr & IXGBE_EICR_GPI_SDP0_X540)) + taskqueue_enqueue(adapter->tq, &adapter->phy_task); + if (more) taskqueue_enqueue(que->tq, &que->que_task); else @@ -1597,7 +1459,7 @@ ixgbe_msix_link(void *arg) { struct adapter *adapter = arg; struct ixgbe_hw *hw = &adapter->hw; - u32 reg_eicr; + u32 reg_eicr, mod_mask; ++adapter->link_irq; @@ -1627,34 +1489,46 @@ ixgbe_msix_link(void *arg) device_printf(adapter->dev, "\nCRITICAL: ECC ERROR!! " "Please Reboot!!\n"); IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC); - } else + } + + /* Check for over temp condition */ + if (reg_eicr & IXGBE_EICR_TS) { + device_printf(adapter->dev, "\nCRITICAL: OVER TEMP!! " + "PHY IS SHUT DOWN!!\n"); + device_printf(adapter->dev, "System shutdown required!\n"); + IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_TS); + } + } - if (reg_eicr & IXGBE_EICR_GPI_SDP1) { - /* Clear the interrupt */ - IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1); + /* Pluggable optics-related interrupt */ + if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP) + mod_mask = IXGBE_EICR_GPI_SDP0_X540; + else + mod_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw); + + if (ixgbe_is_sfp(hw)) { + if (reg_eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw)) { + IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1_BY_MAC(hw)); taskqueue_enqueue(adapter->tq, &adapter->msf_task); - } else if (reg_eicr & IXGBE_EICR_GPI_SDP2) { - /* Clear the interrupt */ - IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP2); + } else if (reg_eicr & mod_mask) { + IXGBE_WRITE_REG(hw, IXGBE_EICR, mod_mask); taskqueue_enqueue(adapter->tq, &adapter->mod_task); } - } + } /* Check for fan failure */ if ((hw->device_id == IXGBE_DEV_ID_82598AT) && (reg_eicr & IXGBE_EICR_GPI_SDP1)) { + IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1); device_printf(adapter->dev, "\nCRITICAL: FAN FAILURE!! " "REPLACE IMMEDIATELY!!\n"); - IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1); } - /* Check for over temp condition */ - if ((hw->mac.type == ixgbe_mac_X540) && - (reg_eicr & IXGBE_EICR_TS)) { - device_printf(adapter->dev, "\nCRITICAL: OVER TEMP!! " - "PHY IS SHUT DOWN!!\n"); - device_printf(adapter->dev, "System shutdown required\n"); - IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_TS); + /* External PHY interrupt */ + if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T && + (reg_eicr & IXGBE_EICR_GPI_SDP0_X540)) { + IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP0_X540); + taskqueue_enqueue(adapter->tq, &adapter->phy_task); } IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, IXGBE_EIMS_OTHER); @@ -1674,6 +1548,7 @@ ixgbe_media_status(struct ifnet * ifp, struct ifmediareq * ifmr) { struct adapter *adapter = ifp->if_softc; struct ixgbe_hw *hw = &adapter->hw; + int layer; INIT_DEBUGOUT("ixgbe_media_status: begin"); IXGBE_CORE_LOCK(adapter); @@ -1688,29 +1563,106 @@ ixgbe_media_status(struct ifnet * ifp, struct ifmediareq * ifmr) } ifmr->ifm_status |= IFM_ACTIVE; + layer = ixgbe_get_supported_physical_layer(hw); - /* - * Not all NIC are 1000baseSX as an example X540T. - * We must set properly the media based on NIC model. - */ - switch (hw->device_id) { - case IXGBE_DEV_ID_X540T: - if (adapter->link_speed == IXGBE_LINK_SPEED_100_FULL) - ifmr->ifm_active |= IFM_100_TX | IFM_FDX; - else if (adapter->link_speed == IXGBE_LINK_SPEED_1GB_FULL) + if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T || + layer & IXGBE_PHYSICAL_LAYER_1000BASE_T || + layer & IXGBE_PHYSICAL_LAYER_100BASE_TX) + switch (adapter->link_speed) { + case IXGBE_LINK_SPEED_10GB_FULL: + ifmr->ifm_active |= IFM_10G_T | IFM_FDX; + break; + case IXGBE_LINK_SPEED_1GB_FULL: ifmr->ifm_active |= IFM_1000_T | IFM_FDX; - else if (adapter->link_speed == IXGBE_LINK_SPEED_10GB_FULL) - ifmr->ifm_active |= adapter->optics | IFM_FDX; - break; - default: - if (adapter->link_speed == IXGBE_LINK_SPEED_100_FULL) + break; + case IXGBE_LINK_SPEED_100_FULL: ifmr->ifm_active |= IFM_100_TX | IFM_FDX; - else if (adapter->link_speed == IXGBE_LINK_SPEED_1GB_FULL) + break; + } + if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU || + layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA) + switch (adapter->link_speed) { + case IXGBE_LINK_SPEED_10GB_FULL: + ifmr->ifm_active |= IFM_10G_TWINAX | IFM_FDX; + break; + } + if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR) + switch (adapter->link_speed) { + case IXGBE_LINK_SPEED_10GB_FULL: + ifmr->ifm_active |= IFM_10G_LR | IFM_FDX; + break; + case IXGBE_LINK_SPEED_1GB_FULL: + ifmr->ifm_active |= IFM_1000_LX | IFM_FDX; + break; + } + if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LRM) + switch (adapter->link_speed) { + case IXGBE_LINK_SPEED_10GB_FULL: + ifmr->ifm_active |= IFM_10G_LRM | IFM_FDX; + break; + case IXGBE_LINK_SPEED_1GB_FULL: + ifmr->ifm_active |= IFM_1000_LX | IFM_FDX; + break; + } + if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR || + layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX) + switch (adapter->link_speed) { + case IXGBE_LINK_SPEED_10GB_FULL: + ifmr->ifm_active |= IFM_10G_SR | IFM_FDX; + break; + case IXGBE_LINK_SPEED_1GB_FULL: ifmr->ifm_active |= IFM_1000_SX | IFM_FDX; - else if (adapter->link_speed == IXGBE_LINK_SPEED_10GB_FULL) - ifmr->ifm_active |= adapter->optics | IFM_FDX; - break; - } + break; + } + if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4) + switch (adapter->link_speed) { + case IXGBE_LINK_SPEED_10GB_FULL: + ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX; + break; + } + /* + ** XXX: These need to use the proper media types once + ** they're added. + */ + if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) + switch (adapter->link_speed) { + case IXGBE_LINK_SPEED_10GB_FULL: + ifmr->ifm_active |= IFM_10G_SR | IFM_FDX; + break; + case IXGBE_LINK_SPEED_2_5GB_FULL: + ifmr->ifm_active |= IFM_2500_SX | IFM_FDX; + break; + case IXGBE_LINK_SPEED_1GB_FULL: + ifmr->ifm_active |= IFM_1000_CX | IFM_FDX; + break; + } + else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4 + || layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX) + switch (adapter->link_speed) { + case IXGBE_LINK_SPEED_10GB_FULL: + ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX; + break; + case IXGBE_LINK_SPEED_2_5GB_FULL: + ifmr->ifm_active |= IFM_2500_SX | IFM_FDX; + break; + case IXGBE_LINK_SPEED_1GB_FULL: + ifmr->ifm_active |= IFM_1000_CX | IFM_FDX; + break; + } + + /* If nothing is recognized... */ + if (IFM_SUBTYPE(ifmr->ifm_active) == 0) + ifmr->ifm_active |= IFM_UNKNOWN; + +#if __FreeBSD_version >= 900025 + /* Display current flow control setting used on link */ + if (hw->fc.current_mode == ixgbe_fc_rx_pause || + hw->fc.current_mode == ixgbe_fc_full) + ifmr->ifm_active |= IFM_ETH_RXPAUSE; + if (hw->fc.current_mode == ixgbe_fc_tx_pause || + hw->fc.current_mode == ixgbe_fc_full) + ifmr->ifm_active |= IFM_ETH_TXPAUSE; +#endif IXGBE_CORE_UNLOCK(adapter); @@ -1730,184 +1682,60 @@ ixgbe_media_change(struct ifnet * ifp) { struct adapter *adapter = ifp->if_softc; struct ifmedia *ifm = &adapter->media; + struct ixgbe_hw *hw = &adapter->hw; + ixgbe_link_speed speed = 0; INIT_DEBUGOUT("ixgbe_media_change: begin"); if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) return (EINVAL); - switch (IFM_SUBTYPE(ifm->ifm_media)) { - case IFM_AUTO: - adapter->hw.phy.autoneg_advertised = - IXGBE_LINK_SPEED_100_FULL | - IXGBE_LINK_SPEED_1GB_FULL | - IXGBE_LINK_SPEED_10GB_FULL; - break; - default: - device_printf(adapter->dev, "Only auto media type\n"); - return (EINVAL); - } - - return (0); -} - -/********************************************************************* - * - * This routine maps the mbufs to tx descriptors, allowing the - * TX engine to transmit the packets. - * - return 0 on success, positive on failure - * - **********************************************************************/ - -static int -ixgbe_xmit(struct tx_ring *txr, struct mbuf **m_headp) -{ - struct adapter *adapter = txr->adapter; - u32 olinfo_status = 0, cmd_type_len; - int i, j, error, nsegs; - int first; - bool remap = TRUE; - struct mbuf *m_head; - bus_dma_segment_t segs[adapter->num_segs]; - bus_dmamap_t map; - struct ixgbe_tx_buf *txbuf; - union ixgbe_adv_tx_desc *txd = NULL; - - m_head = *m_headp; - - /* Basic descriptor defines */ - cmd_type_len = (IXGBE_ADVTXD_DTYP_DATA | - IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT); - - if (m_head->m_flags & M_VLANTAG) - cmd_type_len |= IXGBE_ADVTXD_DCMD_VLE; - - /* - * Important to capture the first descriptor - * used because it will contain the index of - * the one we tell the hardware to report back - */ - first = txr->next_avail_desc; - txbuf = &txr->tx_buffers[first]; - map = txbuf->map; + if (hw->phy.media_type == ixgbe_media_type_backplane) + return (EPERM); /* - * Map the packet for DMA. - */ -retry: - error = bus_dmamap_load_mbuf_sg(txr->txtag, map, - *m_headp, segs, &nsegs, BUS_DMA_NOWAIT); - - if (__predict_false(error)) { - struct mbuf *m; - - switch (error) { - case EFBIG: - /* Try it again? - one try */ - if (remap == TRUE) { - remap = FALSE; - m = m_defrag(*m_headp, M_NOWAIT); - if (m == NULL) { - adapter->mbuf_defrag_failed++; - m_freem(*m_headp); - *m_headp = NULL; - return (ENOBUFS); - } - *m_headp = m; - goto retry; - } else - return (error); - case ENOMEM: - txr->no_tx_dma_setup++; - return (error); - default: - txr->no_tx_dma_setup++; - m_freem(*m_headp); - *m_headp = NULL; - return (error); - } - } - - /* Make certain there are enough descriptors */ - if (nsegs > txr->tx_avail - 2) { - txr->no_desc_avail++; - bus_dmamap_unload(txr->txtag, map); - return (ENOBUFS); - } - m_head = *m_headp; - - /* - ** Set up the appropriate offload context - ** this will consume the first descriptor + ** We don't actually need to check against the supported + ** media types of the adapter; ifmedia will take care of + ** that for us. */ - error = ixgbe_tx_ctx_setup(txr, m_head, &cmd_type_len, &olinfo_status); - if (__predict_false(error)) { - if (error == ENOBUFS) - *m_headp = NULL; - return (error); - } - -#ifdef IXGBE_FDIR - /* Do the flow director magic */ - if ((txr->atr_sample) && (!adapter->fdir_reinit)) { - ++txr->atr_count; - if (txr->atr_count >= atr_sample_rate) { - ixgbe_atr(txr, m_head); - txr->atr_count = 0; - } - } -#endif - - i = txr->next_avail_desc; - for (j = 0; j < nsegs; j++) { - bus_size_t seglen; - bus_addr_t segaddr; - - txbuf = &txr->tx_buffers[i]; - txd = &txr->tx_base[i]; - seglen = segs[j].ds_len; - segaddr = htole64(segs[j].ds_addr); - - txd->read.buffer_addr = segaddr; - txd->read.cmd_type_len = htole32(txr->txd_cmd | - cmd_type_len |seglen); - txd->read.olinfo_status = htole32(olinfo_status); - - if (++i == txr->num_desc) - i = 0; + switch (IFM_SUBTYPE(ifm->ifm_media)) { + case IFM_AUTO: + case IFM_10G_T: + speed |= IXGBE_LINK_SPEED_100_FULL; + case IFM_10G_LRM: + case IFM_10G_SR: /* KR, too */ + case IFM_10G_LR: + case IFM_10G_CX4: /* KX4 */ + speed |= IXGBE_LINK_SPEED_1GB_FULL; + case IFM_10G_TWINAX: + speed |= IXGBE_LINK_SPEED_10GB_FULL; + break; + case IFM_1000_T: + speed |= IXGBE_LINK_SPEED_100_FULL; + case IFM_1000_LX: + case IFM_1000_SX: + case IFM_1000_CX: /* KX */ + speed |= IXGBE_LINK_SPEED_1GB_FULL; + break; + case IFM_100_TX: + speed |= IXGBE_LINK_SPEED_100_FULL; + break; + default: + goto invalid; } - txd->read.cmd_type_len |= - htole32(IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS); - txr->tx_avail -= nsegs; - txr->next_avail_desc = i; - - txbuf->m_head = m_head; - /* - ** Here we swap the map so the last descriptor, - ** which gets the completion interrupt has the - ** real map, and the first descriptor gets the - ** unused map from this descriptor. - */ - txr->tx_buffers[first].map = txbuf->map; - txbuf->map = map; - bus_dmamap_sync(txr->txtag, map, BUS_DMASYNC_PREWRITE); - - /* Set the EOP descriptor that will be marked done */ - txbuf = &txr->tx_buffers[first]; - txbuf->eop = txd; - - bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map, - BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); - /* - * Advance the Transmit Descriptor Tail (Tdt), this tells the - * hardware that this frame is available to transmit. - */ - ++txr->total_packets; - IXGBE_WRITE_REG(&adapter->hw, IXGBE_TDT(txr->me), i); + hw->mac.autotry_restart = TRUE; + hw->mac.ops.setup_link(hw, speed, TRUE); + adapter->advertise = + ((speed & IXGBE_LINK_SPEED_10GB_FULL) << 2) | + ((speed & IXGBE_LINK_SPEED_1GB_FULL) << 1) | + ((speed & IXGBE_LINK_SPEED_100_FULL) << 0); return (0); +invalid: + device_printf(adapter->dev, "Invalid media type!\n"); + return (EINVAL); } static void @@ -2056,8 +1884,8 @@ ixgbe_local_timer(void *arg) struct adapter *adapter = arg; device_t dev = adapter->dev; struct ix_queue *que = adapter->queues; - struct tx_ring *txr = adapter->tx_rings; - int hung = 0, paused = 0; + u64 queues = 0; + int hung = 0; mtx_assert(&adapter->core_mtx, MA_OWNED); @@ -2070,26 +1898,44 @@ ixgbe_local_timer(void *arg) ixgbe_update_stats_counters(adapter); /* - * If the interface has been paused - * then don't do the watchdog check - */ - if (IXGBE_READ_REG(&adapter->hw, IXGBE_TFCS) & IXGBE_TFCS_TXOFF) - paused = 1; - - /* ** Check the TX queues status + ** - mark hung queues so we don't schedule on them ** - watchdog only if all queues show hung */ - for (int i = 0; i < adapter->num_queues; i++, que++, txr++) { - if ((txr->queue_status == IXGBE_QUEUE_HUNG) && - (paused == 0)) + for (int i = 0; i < adapter->num_queues; i++, que++) { + /* Keep track of queues with work for soft irq */ + if (que->txr->busy) + queues |= ((u64)1 << que->me); + /* + ** Each time txeof runs without cleaning, but there + ** are uncleaned descriptors it increments busy. If + ** we get to the MAX we declare it hung. + */ + if (que->busy == IXGBE_QUEUE_HUNG) { ++hung; - else if (txr->queue_status == IXGBE_QUEUE_WORKING) - taskqueue_enqueue(que->tq, &txr->txq_task); - } - /* Only truely watchdog if all queues show hung */ - if (hung == adapter->num_queues) - goto watchdog; + /* Mark the queue as inactive */ + adapter->active_queues &= ~((u64)1 << que->me); + continue; + } else { + /* Check if we've come back from hung */ + if ((adapter->active_queues & ((u64)1 << que->me)) == 0) + adapter->active_queues |= ((u64)1 << que->me); + } + if (que->busy >= IXGBE_MAX_TX_BUSY) { + device_printf(dev,"Warning queue %d " + "appears to be hung!\n", i); + que->txr->busy = IXGBE_QUEUE_HUNG; + ++hung; + } + + } + + /* Only truly watchdog if all queues show hung */ + if (hung == adapter->num_queues) + goto watchdog; + else if (queues != 0) { /* Force an IRQ on queues with work */ + ixgbe_rearm_queues(adapter, queues); + } out: callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter); @@ -2097,12 +1943,6 @@ out: watchdog: device_printf(adapter->dev, "Watchdog timeout -- resetting\n"); - device_printf(dev,"Queue(%d) tdh = %d, hw tdt = %d\n", txr->me, - IXGBE_READ_REG(&adapter->hw, IXGBE_TDH(txr->me)), - IXGBE_READ_REG(&adapter->hw, IXGBE_TDT(txr->me))); - device_printf(dev,"TX(%d) desc avail = %d," - "Next TX to Clean = %d\n", - txr->me, txr->tx_avail, txr->next_to_clean); adapter->ifp->if_drv_flags &= ~IFF_DRV_RUNNING; adapter->watchdog_events++; ixgbe_init_locked(adapter); @@ -2119,7 +1959,6 @@ ixgbe_update_link_status(struct adapter *adapter) struct ifnet *ifp = adapter->ifp; device_t dev = adapter->dev; - if (adapter->link_up){ if (adapter->link_active == FALSE) { if (bootverbose) @@ -2129,6 +1968,8 @@ ixgbe_update_link_status(struct adapter *adapter) adapter->link_active = TRUE; /* Update any Flow Control changes */ ixgbe_fc_enable(&adapter->hw); + /* Update DMA coalescing config */ + ixgbe_config_dmac(adapter); if_link_state_change(ifp, LINK_STATE_UP); } } else { /* Link down */ @@ -2207,10 +2048,15 @@ ixgbe_identify_hardware(struct adapter *adapter) hw->subsystem_device_id = pci_read_config(dev, PCIR_SUBDEV_0, 2); + /* + ** Make sure BUSMASTER is set + */ + pci_enable_busmaster(dev); + /* We need this here to set the num_segs below */ ixgbe_set_mac_type(hw); - /* Pick up the 82599 and VF settings */ + /* Pick up the 82599 settings */ if (hw->mac.type != ixgbe_mac_82598EB) { hw->phy.smart_speed = ixgbe_smart_speed; adapter->num_segs = IXGBE_82599_SCATTER; @@ -2320,6 +2166,7 @@ ixgbe_allocate_legacy(struct adapter *adapter) TASK_INIT(&adapter->link_task, 0, ixgbe_handle_link, adapter); TASK_INIT(&adapter->mod_task, 0, ixgbe_handle_mod, adapter); TASK_INIT(&adapter->msf_task, 0, ixgbe_handle_msf, adapter); + TASK_INIT(&adapter->phy_task, 0, ixgbe_handle_phy, adapter); #ifdef IXGBE_FDIR TASK_INIT(&adapter->fdir_task, 0, ixgbe_reinit_fdir, adapter); #endif @@ -2340,7 +2187,7 @@ ixgbe_allocate_legacy(struct adapter *adapter) return (error); } /* For simplicity in the handlers */ - adapter->que_mask = IXGBE_EIMS_ENABLE_MASK; + adapter->active_queues = IXGBE_EIMS_ENABLE_MASK; return (0); } @@ -2358,6 +2205,7 @@ ixgbe_allocate_msix(struct adapter *adapter) struct ix_queue *que = adapter->queues; struct tx_ring *txr = adapter->tx_rings; int error, rid, vector = 0; + int cpu_id = 0; for (int i = 0; i < adapter->num_queues; i++, vector++, que++, txr++) { rid = vector + 1; @@ -2381,13 +2229,19 @@ ixgbe_allocate_msix(struct adapter *adapter) bus_describe_intr(dev, que->res, que->tag, "que %d", i); #endif que->msix = vector; - adapter->que_mask |= (u64)(1 << que->msix); + adapter->active_queues |= (u64)(1 << que->msix); /* - ** Bind the msix vector, and thus the - ** ring to the corresponding cpu. - */ + * Bind the msix vector, and thus the + * rings to the corresponding cpu. + * + * This just happens to match the default RSS round-robin + * bucket -> queue -> CPU allocation. + */ + if (adapter->num_queues > 1) + cpu_id = i; + if (adapter->num_queues > 1) - bus_bind_intr(dev, que->res, i); + bus_bind_intr(dev, que->res, cpu_id); #ifndef IXGBE_LEGACY_TX TASK_INIT(&txr->txq_task, 0, ixgbe_deferred_mq_start, txr); @@ -2420,11 +2274,12 @@ ixgbe_allocate_msix(struct adapter *adapter) #if __FreeBSD_version >= 800504 bus_describe_intr(dev, adapter->res, adapter->tag, "link"); #endif - adapter->linkvec = vector; + adapter->vector = vector; /* Tasklets for Link, SFP and Multispeed Fiber */ TASK_INIT(&adapter->link_task, 0, ixgbe_handle_link, adapter); TASK_INIT(&adapter->mod_task, 0, ixgbe_handle_mod, adapter); TASK_INIT(&adapter->msf_task, 0, ixgbe_handle_msf, adapter); + TASK_INIT(&adapter->phy_task, 0, ixgbe_handle_phy, adapter); #ifdef IXGBE_FDIR TASK_INIT(&adapter->fdir_task, 0, ixgbe_reinit_fdir, adapter); #endif @@ -2473,9 +2328,6 @@ ixgbe_setup_msix(struct adapter *adapter) if (ixgbe_num_queues != 0) queues = ixgbe_num_queues; - /* Set max queues to 8 when autoconfiguring */ - else if ((ixgbe_num_queues == 0) && (queues > 8)) - queues = 8; /* reflect correct sysctl value */ ixgbe_num_queues = queues; @@ -2594,8 +2446,8 @@ ixgbe_free_pci_resources(struct adapter * adapter) /* Clean the Legacy or Link interrupt last */ - if (adapter->linkvec) /* we are doing MSIX */ - rid = adapter->linkvec + 1; + if (adapter->vector) /* we are doing MSIX */ + rid = adapter->vector + 1; else (adapter->msix != 0) ? (rid = 1):(rid = 0); @@ -2629,7 +2481,6 @@ mem: static int ixgbe_setup_interface(device_t dev, struct adapter *adapter) { - struct ixgbe_hw *hw = &adapter->hw; struct ifnet *ifp; INIT_DEBUGOUT("ixgbe_setup_interface: begin"); @@ -2640,15 +2491,20 @@ ixgbe_setup_interface(device_t dev, struct adapter *adapter) return (-1); } if_initname(ifp, device_get_name(dev), device_get_unit(dev)); -#if __FreeBSD_version < 1000025 - ifp->if_baudrate = 1000000000; -#else - if_initbaudrate(ifp, IF_Gbps(10)); -#endif + ifp->if_baudrate = IF_Gbps(10); ifp->if_init = ixgbe_init; ifp->if_softc = adapter; ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_ioctl = ixgbe_ioctl; +#if __FreeBSD_version >= 1100036 + if_setgetcounterfn(ifp, ixgbe_get_counter); +#endif +#if __FreeBSD_version >= 1100045 + /* TSO parameters */ + ifp->if_hw_tsomax = 65518; + ifp->if_hw_tsomaxsegcount = IXGBE_82599_SCATTER; + ifp->if_hw_tsomaxsegsize = 2048; +#endif #ifndef IXGBE_LEGACY_TX ifp->if_transmit = ixgbe_mq_start; ifp->if_qflush = ixgbe_qflush; @@ -2667,7 +2523,7 @@ ixgbe_setup_interface(device_t dev, struct adapter *adapter) /* * Tell the upper layer(s) we support long frames. */ - ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); + ifp->if_hdrlen = sizeof(struct ether_vlan_header); ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_TSO | IFCAP_VLAN_HWCSUM; ifp->if_capabilities |= IFCAP_JUMBO_MTU; @@ -2693,19 +2549,79 @@ ixgbe_setup_interface(device_t dev, struct adapter *adapter) * callbacks to update media and link information */ ifmedia_init(&adapter->media, IFM_IMASK, ixgbe_media_change, - ixgbe_media_status); - ifmedia_add(&adapter->media, IFM_ETHER | adapter->optics, 0, NULL); - ifmedia_set(&adapter->media, IFM_ETHER | adapter->optics); + ixgbe_media_status); + + ixgbe_add_media_types(adapter); + + /* Autoselect media by default */ + ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO); + + return (0); +} + +static void +ixgbe_add_media_types(struct adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + device_t dev = adapter->dev; + int layer; + + layer = ixgbe_get_supported_physical_layer(hw); + + /* Media types with matching FreeBSD media defines */ + if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T) + ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_T, 0, NULL); + if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_T) + ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_T, 0, NULL); + if (layer & IXGBE_PHYSICAL_LAYER_100BASE_TX) + ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX, 0, NULL); + + if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU || + layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA) + ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_TWINAX, 0, NULL); + + if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR) + ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_LR, 0, NULL); + if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR) + ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_SR, 0, NULL); + if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4) + ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_CX4, 0, NULL); + if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX) + ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_SX, 0, NULL); + + /* + ** Other (no matching FreeBSD media type): + ** To workaround this, we'll assign these completely + ** inappropriate media types. + */ + if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) { + device_printf(dev, "Media supported: 10GbaseKR\n"); + device_printf(dev, "10GbaseKR mapped to 10GbaseSR\n"); + ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_SR, 0, NULL); + } + if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4) { + device_printf(dev, "Media supported: 10GbaseKX4\n"); + device_printf(dev, "10GbaseKX4 mapped to 10GbaseCX4\n"); + ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_CX4, 0, NULL); + } + if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX) { + device_printf(dev, "Media supported: 1000baseKX\n"); + device_printf(dev, "1000baseKX mapped to 1000baseCX\n"); + ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_CX, 0, NULL); + } + if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_BX) { + /* Someday, someone will care about you... */ + device_printf(dev, "Media supported: 1000baseBX\n"); + } + if (hw->device_id == IXGBE_DEV_ID_82598AT) { ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL); ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_T, 0, NULL); } - ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL); - ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO); - return (0); + ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL); } static void @@ -2744,393 +2660,10 @@ out: return; } -/******************************************************************** - * Manage DMA'able memory. - *******************************************************************/ -static void -ixgbe_dmamap_cb(void *arg, bus_dma_segment_t * segs, int nseg, int error) -{ - if (error) - return; - *(bus_addr_t *) arg = segs->ds_addr; - return; -} - -static int -ixgbe_dma_malloc(struct adapter *adapter, bus_size_t size, - struct ixgbe_dma_alloc *dma, int mapflags) -{ - device_t dev = adapter->dev; - int r; - - r = bus_dma_tag_create(bus_get_dma_tag(adapter->dev), /* parent */ - DBA_ALIGN, 0, /* alignment, bounds */ - BUS_SPACE_MAXADDR, /* lowaddr */ - BUS_SPACE_MAXADDR, /* highaddr */ - NULL, NULL, /* filter, filterarg */ - size, /* maxsize */ - 1, /* nsegments */ - size, /* maxsegsize */ - BUS_DMA_ALLOCNOW, /* flags */ - NULL, /* lockfunc */ - NULL, /* lockfuncarg */ - &dma->dma_tag); - if (r != 0) { - device_printf(dev,"ixgbe_dma_malloc: bus_dma_tag_create failed; " - "error %u\n", r); - goto fail_0; - } - r = bus_dmamem_alloc(dma->dma_tag, (void **)&dma->dma_vaddr, - BUS_DMA_NOWAIT, &dma->dma_map); - if (r != 0) { - device_printf(dev,"ixgbe_dma_malloc: bus_dmamem_alloc failed; " - "error %u\n", r); - goto fail_1; - } - r = bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr, - size, - ixgbe_dmamap_cb, - &dma->dma_paddr, - mapflags | BUS_DMA_NOWAIT); - if (r != 0) { - device_printf(dev,"ixgbe_dma_malloc: bus_dmamap_load failed; " - "error %u\n", r); - goto fail_2; - } - dma->dma_size = size; - return (0); -fail_2: - bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map); -fail_1: - bus_dma_tag_destroy(dma->dma_tag); -fail_0: - dma->dma_map = NULL; - dma->dma_tag = NULL; - return (r); -} - -static void -ixgbe_dma_free(struct adapter *adapter, struct ixgbe_dma_alloc *dma) -{ - bus_dmamap_sync(dma->dma_tag, dma->dma_map, - BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); - bus_dmamap_unload(dma->dma_tag, dma->dma_map); - bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map); - bus_dma_tag_destroy(dma->dma_tag); -} - - -/********************************************************************* - * - * Allocate memory for the transmit and receive rings, and then - * the descriptors associated with each, called only once at attach. - * - **********************************************************************/ -static int -ixgbe_allocate_queues(struct adapter *adapter) -{ - device_t dev = adapter->dev; - struct ix_queue *que; - struct tx_ring *txr; - struct rx_ring *rxr; - int rsize, tsize, error = IXGBE_SUCCESS; - int txconf = 0, rxconf = 0; - - /* First allocate the top level queue structs */ - if (!(adapter->queues = - (struct ix_queue *) malloc(sizeof(struct ix_queue) * - adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) { - device_printf(dev, "Unable to allocate queue memory\n"); - error = ENOMEM; - goto fail; - } - - /* First allocate the TX ring struct memory */ - if (!(adapter->tx_rings = - (struct tx_ring *) malloc(sizeof(struct tx_ring) * - adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) { - device_printf(dev, "Unable to allocate TX ring memory\n"); - error = ENOMEM; - goto tx_fail; - } - - /* Next allocate the RX */ - if (!(adapter->rx_rings = - (struct rx_ring *) malloc(sizeof(struct rx_ring) * - adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) { - device_printf(dev, "Unable to allocate RX ring memory\n"); - error = ENOMEM; - goto rx_fail; - } - - /* For the ring itself */ - tsize = roundup2(adapter->num_tx_desc * - sizeof(union ixgbe_adv_tx_desc), DBA_ALIGN); - - /* - * Now set up the TX queues, txconf is needed to handle the - * possibility that things fail midcourse and we need to - * undo memory gracefully - */ - for (int i = 0; i < adapter->num_queues; i++, txconf++) { - /* Set up some basics */ - txr = &adapter->tx_rings[i]; - txr->adapter = adapter; - txr->me = i; - txr->num_desc = adapter->num_tx_desc; - - /* Initialize the TX side lock */ - snprintf(txr->mtx_name, sizeof(txr->mtx_name), "%s:tx(%d)", - device_get_nameunit(dev), txr->me); - mtx_init(&txr->tx_mtx, txr->mtx_name, NULL, MTX_DEF); - - if (ixgbe_dma_malloc(adapter, tsize, - &txr->txdma, BUS_DMA_NOWAIT)) { - device_printf(dev, - "Unable to allocate TX Descriptor memory\n"); - error = ENOMEM; - goto err_tx_desc; - } - txr->tx_base = (union ixgbe_adv_tx_desc *)txr->txdma.dma_vaddr; - bzero((void *)txr->tx_base, tsize); - - /* Now allocate transmit buffers for the ring */ - if (ixgbe_allocate_transmit_buffers(txr)) { - device_printf(dev, - "Critical Failure setting up transmit buffers\n"); - error = ENOMEM; - goto err_tx_desc; - } -#ifndef IXGBE_LEGACY_TX - /* Allocate a buf ring */ - txr->br = buf_ring_alloc(IXGBE_BR_SIZE, M_DEVBUF, - M_WAITOK, &txr->tx_mtx); - if (txr->br == NULL) { - device_printf(dev, - "Critical Failure setting up buf ring\n"); - error = ENOMEM; - goto err_tx_desc; - } -#endif - } - - /* - * Next the RX queues... - */ - rsize = roundup2(adapter->num_rx_desc * - sizeof(union ixgbe_adv_rx_desc), DBA_ALIGN); - for (int i = 0; i < adapter->num_queues; i++, rxconf++) { - rxr = &adapter->rx_rings[i]; - /* Set up some basics */ - rxr->adapter = adapter; - rxr->me = i; - rxr->num_desc = adapter->num_rx_desc; - - /* Initialize the RX side lock */ - snprintf(rxr->mtx_name, sizeof(rxr->mtx_name), "%s:rx(%d)", - device_get_nameunit(dev), rxr->me); - mtx_init(&rxr->rx_mtx, rxr->mtx_name, NULL, MTX_DEF); - - if (ixgbe_dma_malloc(adapter, rsize, - &rxr->rxdma, BUS_DMA_NOWAIT)) { - device_printf(dev, - "Unable to allocate RxDescriptor memory\n"); - error = ENOMEM; - goto err_rx_desc; - } - rxr->rx_base = (union ixgbe_adv_rx_desc *)rxr->rxdma.dma_vaddr; - bzero((void *)rxr->rx_base, rsize); - - /* Allocate receive buffers for the ring*/ - if (ixgbe_allocate_receive_buffers(rxr)) { - device_printf(dev, - "Critical Failure setting up receive buffers\n"); - error = ENOMEM; - goto err_rx_desc; - } - } - - /* - ** Finally set up the queue holding structs - */ - for (int i = 0; i < adapter->num_queues; i++) { - que = &adapter->queues[i]; - que->adapter = adapter; - que->txr = &adapter->tx_rings[i]; - que->rxr = &adapter->rx_rings[i]; - } - - return (0); - -err_rx_desc: - for (rxr = adapter->rx_rings; rxconf > 0; rxr++, rxconf--) - ixgbe_dma_free(adapter, &rxr->rxdma); -err_tx_desc: - for (txr = adapter->tx_rings; txconf > 0; txr++, txconf--) - ixgbe_dma_free(adapter, &txr->txdma); - free(adapter->rx_rings, M_DEVBUF); -rx_fail: - free(adapter->tx_rings, M_DEVBUF); -tx_fail: - free(adapter->queues, M_DEVBUF); -fail: - return (error); -} - -/********************************************************************* - * - * Allocate memory for tx_buffer structures. The tx_buffer stores all - * the information needed to transmit a packet on the wire. This is - * called only once at attach, setup is done every reset. - * - **********************************************************************/ -static int -ixgbe_allocate_transmit_buffers(struct tx_ring *txr) -{ - struct adapter *adapter = txr->adapter; - device_t dev = adapter->dev; - struct ixgbe_tx_buf *txbuf; - int error, i; - - /* - * Setup DMA descriptor areas. - */ - if ((error = bus_dma_tag_create( - bus_get_dma_tag(adapter->dev), /* parent */ - 1, 0, /* alignment, bounds */ - BUS_SPACE_MAXADDR, /* lowaddr */ - BUS_SPACE_MAXADDR, /* highaddr */ - NULL, NULL, /* filter, filterarg */ - IXGBE_TSO_SIZE, /* maxsize */ - adapter->num_segs, /* nsegments */ - PAGE_SIZE, /* maxsegsize */ - 0, /* flags */ - NULL, /* lockfunc */ - NULL, /* lockfuncarg */ - &txr->txtag))) { - device_printf(dev,"Unable to allocate TX DMA tag\n"); - goto fail; - } - - if (!(txr->tx_buffers = - (struct ixgbe_tx_buf *) malloc(sizeof(struct ixgbe_tx_buf) * - adapter->num_tx_desc, M_DEVBUF, M_NOWAIT | M_ZERO))) { - device_printf(dev, "Unable to allocate tx_buffer memory\n"); - error = ENOMEM; - goto fail; - } - - /* Create the descriptor buffer dma maps */ - txbuf = txr->tx_buffers; - for (i = 0; i < adapter->num_tx_desc; i++, txbuf++) { - error = bus_dmamap_create(txr->txtag, 0, &txbuf->map); - if (error != 0) { - device_printf(dev, "Unable to create TX DMA map\n"); - goto fail; - } - } - - return 0; -fail: - /* We free all, it handles case where we are in the middle */ - ixgbe_free_transmit_structures(adapter); - return (error); -} - -/********************************************************************* - * - * Initialize a transmit ring. - * - **********************************************************************/ -static void -ixgbe_setup_transmit_ring(struct tx_ring *txr) -{ - struct adapter *adapter = txr->adapter; - struct ixgbe_tx_buf *txbuf; - int i; -#ifdef DEV_NETMAP - struct netmap_adapter *na = NA(adapter->ifp); - struct netmap_slot *slot; -#endif /* DEV_NETMAP */ - - /* Clear the old ring contents */ - IXGBE_TX_LOCK(txr); -#ifdef DEV_NETMAP - /* - * (under lock): if in netmap mode, do some consistency - * checks and set slot to entry 0 of the netmap ring. - */ - slot = netmap_reset(na, NR_TX, txr->me, 0); -#endif /* DEV_NETMAP */ - bzero((void *)txr->tx_base, - (sizeof(union ixgbe_adv_tx_desc)) * adapter->num_tx_desc); - /* Reset indices */ - txr->next_avail_desc = 0; - txr->next_to_clean = 0; - - /* Free any existing tx buffers. */ - txbuf = txr->tx_buffers; - for (i = 0; i < txr->num_desc; i++, txbuf++) { - if (txbuf->m_head != NULL) { - bus_dmamap_sync(txr->txtag, txbuf->map, - BUS_DMASYNC_POSTWRITE); - bus_dmamap_unload(txr->txtag, txbuf->map); - m_freem(txbuf->m_head); - txbuf->m_head = NULL; - } -#ifdef DEV_NETMAP - /* - * In netmap mode, set the map for the packet buffer. - * NOTE: Some drivers (not this one) also need to set - * the physical buffer address in the NIC ring. - * Slots in the netmap ring (indexed by "si") are - * kring->nkr_hwofs positions "ahead" wrt the - * corresponding slot in the NIC ring. In some drivers - * (not here) nkr_hwofs can be negative. Function - * netmap_idx_n2k() handles wraparounds properly. - */ - if (slot) { - int si = netmap_idx_n2k(&na->tx_rings[txr->me], i); - netmap_load_map(na, txr->txtag, txbuf->map, NMB(na, slot + si)); - } -#endif /* DEV_NETMAP */ - /* Clear the EOP descriptor pointer */ - txbuf->eop = NULL; - } - -#ifdef IXGBE_FDIR - /* Set the rate at which we sample packets */ - if (adapter->hw.mac.type != ixgbe_mac_82598EB) - txr->atr_sample = atr_sample_rate; -#endif - - /* Set number of descriptors available */ - txr->tx_avail = adapter->num_tx_desc; - - bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map, - BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); - IXGBE_TX_UNLOCK(txr); -} - -/********************************************************************* - * - * Initialize all transmit rings. - * - **********************************************************************/ -static int -ixgbe_setup_transmit_structures(struct adapter *adapter) -{ - struct tx_ring *txr = adapter->tx_rings; - - for (int i = 0; i < adapter->num_queues; i++, txr++) - ixgbe_setup_transmit_ring(txr); - - return (0); -} /********************************************************************* * - * Enable transmit unit. + * Enable transmit units. * **********************************************************************/ static void @@ -3143,7 +2676,7 @@ ixgbe_initialize_transmit_units(struct adapter *adapter) for (int i = 0; i < adapter->num_queues; i++, txr++) { u64 tdba = txr->txdma.dma_paddr; - u32 txctrl; + u32 txctrl = 0; IXGBE_WRITE_REG(hw, IXGBE_TDBAL(i), (tdba & 0x00000000ffffffffULL)); @@ -3155,9 +2688,8 @@ ixgbe_initialize_transmit_units(struct adapter *adapter) IXGBE_WRITE_REG(hw, IXGBE_TDH(i), 0); IXGBE_WRITE_REG(hw, IXGBE_TDT(i), 0); - /* Setup Transmit Descriptor Cmd Settings */ - txr->txd_cmd = IXGBE_TXD_CMD_IFCS; - txr->queue_status = IXGBE_QUEUE_IDLE; + /* Cache the tail address */ + txr->tail = IXGBE_TDT(txr->me); /* Set the processing limit */ txr->process_limit = ixgbe_tx_process_limit; @@ -3204,936 +2736,85 @@ ixgbe_initialize_transmit_units(struct adapter *adapter) return; } -/********************************************************************* - * - * Free all transmit rings. - * - **********************************************************************/ -static void -ixgbe_free_transmit_structures(struct adapter *adapter) -{ - struct tx_ring *txr = adapter->tx_rings; - - for (int i = 0; i < adapter->num_queues; i++, txr++) { - IXGBE_TX_LOCK(txr); - ixgbe_free_transmit_buffers(txr); - ixgbe_dma_free(adapter, &txr->txdma); - IXGBE_TX_UNLOCK(txr); - IXGBE_TX_LOCK_DESTROY(txr); - } - free(adapter->tx_rings, M_DEVBUF); -} - -/********************************************************************* - * - * Free transmit ring related data structures. - * - **********************************************************************/ static void -ixgbe_free_transmit_buffers(struct tx_ring *txr) +ixgbe_initialise_rss_mapping(struct adapter *adapter) { - struct adapter *adapter = txr->adapter; - struct ixgbe_tx_buf *tx_buffer; - int i; - - INIT_DEBUGOUT("ixgbe_free_transmit_ring: begin"); - - if (txr->tx_buffers == NULL) - return; - - tx_buffer = txr->tx_buffers; - for (i = 0; i < adapter->num_tx_desc; i++, tx_buffer++) { - if (tx_buffer->m_head != NULL) { - bus_dmamap_sync(txr->txtag, tx_buffer->map, - BUS_DMASYNC_POSTWRITE); - bus_dmamap_unload(txr->txtag, - tx_buffer->map); - m_freem(tx_buffer->m_head); - tx_buffer->m_head = NULL; - if (tx_buffer->map != NULL) { - bus_dmamap_destroy(txr->txtag, - tx_buffer->map); - tx_buffer->map = NULL; - } - } else if (tx_buffer->map != NULL) { - bus_dmamap_unload(txr->txtag, - tx_buffer->map); - bus_dmamap_destroy(txr->txtag, - tx_buffer->map); - tx_buffer->map = NULL; - } - } -#ifdef IXGBE_LEGACY_TX - if (txr->br != NULL) - buf_ring_free(txr->br, M_DEVBUF); -#endif - if (txr->tx_buffers != NULL) { - free(txr->tx_buffers, M_DEVBUF); - txr->tx_buffers = NULL; - } - if (txr->txtag != NULL) { - bus_dma_tag_destroy(txr->txtag); - txr->txtag = NULL; - } - return; -} - -/********************************************************************* - * - * Advanced Context Descriptor setup for VLAN, CSUM or TSO - * - **********************************************************************/ - -static int -ixgbe_tx_ctx_setup(struct tx_ring *txr, struct mbuf *mp, - u32 *cmd_type_len, u32 *olinfo_status) -{ - struct ixgbe_adv_tx_context_desc *TXD; - struct ether_vlan_header *eh; - struct ip *ip; - struct ip6_hdr *ip6; - u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0; - int ehdrlen, ip_hlen = 0; - u16 etype; - u8 ipproto = 0; - int offload = TRUE; - int ctxd = txr->next_avail_desc; - u16 vtag = 0; - - /* First check if TSO is to be used */ - if (mp->m_pkthdr.csum_flags & CSUM_TSO) - return (ixgbe_tso_setup(txr, mp, cmd_type_len, olinfo_status)); - - if ((mp->m_pkthdr.csum_flags & CSUM_OFFLOAD) == 0) - offload = FALSE; - - /* Indicate the whole packet as payload when not doing TSO */ - *olinfo_status |= mp->m_pkthdr.len << IXGBE_ADVTXD_PAYLEN_SHIFT; - - /* Now ready a context descriptor */ - TXD = (struct ixgbe_adv_tx_context_desc *) &txr->tx_base[ctxd]; - - /* - ** In advanced descriptors the vlan tag must - ** be placed into the context descriptor. Hence - ** we need to make one even if not doing offloads. - */ - if (mp->m_flags & M_VLANTAG) { - vtag = htole16(mp->m_pkthdr.ether_vtag); - vlan_macip_lens |= (vtag << IXGBE_ADVTXD_VLAN_SHIFT); - } else if (offload == FALSE) /* ... no offload to do */ - return (0); - - /* - * Determine where frame payload starts. - * Jump over vlan headers if already present, - * helpful for QinQ too. - */ - eh = mtod(mp, struct ether_vlan_header *); - if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { - etype = ntohs(eh->evl_proto); - ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; - } else { - etype = ntohs(eh->evl_encap_proto); - ehdrlen = ETHER_HDR_LEN; - } - - /* Set the ether header length */ - vlan_macip_lens |= ehdrlen << IXGBE_ADVTXD_MACLEN_SHIFT; - - switch (etype) { - case ETHERTYPE_IP: - ip = (struct ip *)(mp->m_data + ehdrlen); - ip_hlen = ip->ip_hl << 2; - ipproto = ip->ip_p; - type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4; - break; - case ETHERTYPE_IPV6: - ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen); - ip_hlen = sizeof(struct ip6_hdr); - /* XXX-BZ this will go badly in case of ext hdrs. */ - ipproto = ip6->ip6_nxt; - type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV6; - break; - default: - offload = FALSE; - break; - } - - vlan_macip_lens |= ip_hlen; - type_tucmd_mlhl |= IXGBE_ADVTXD_DCMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT; - - switch (ipproto) { - case IPPROTO_TCP: - if (mp->m_pkthdr.csum_flags & CSUM_TCP) - type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP; - break; - - case IPPROTO_UDP: - if (mp->m_pkthdr.csum_flags & CSUM_UDP) - type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_UDP; - break; - -#if __FreeBSD_version >= 800000 - case IPPROTO_SCTP: - if (mp->m_pkthdr.csum_flags & CSUM_SCTP) - type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_SCTP; - break; -#endif - default: - offload = FALSE; - break; - } - - if (offload) /* For the TX descriptor setup */ - *olinfo_status |= IXGBE_TXD_POPTS_TXSM << 8; - - /* Now copy bits into descriptor */ - TXD->vlan_macip_lens = htole32(vlan_macip_lens); - TXD->type_tucmd_mlhl = htole32(type_tucmd_mlhl); - TXD->seqnum_seed = htole32(0); - TXD->mss_l4len_idx = htole32(0); - - /* We've consumed the first desc, adjust counters */ - if (++ctxd == txr->num_desc) - ctxd = 0; - txr->next_avail_desc = ctxd; - --txr->tx_avail; - - return (0); -} - -/********************************************************************** - * - * Setup work for hardware segmentation offload (TSO) on - * adapters using advanced tx descriptors - * - **********************************************************************/ -static int -ixgbe_tso_setup(struct tx_ring *txr, struct mbuf *mp, - u32 *cmd_type_len, u32 *olinfo_status) -{ - struct ixgbe_adv_tx_context_desc *TXD; - u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0; - u32 mss_l4len_idx = 0, paylen; - u16 vtag = 0, eh_type; - int ctxd, ehdrlen, ip_hlen, tcp_hlen; - struct ether_vlan_header *eh; -#ifdef INET6 - struct ip6_hdr *ip6; -#endif -#ifdef INET - struct ip *ip; -#endif - struct tcphdr *th; + struct ixgbe_hw *hw = &adapter->hw; + uint32_t reta; + int i, j, queue_id, table_size; + int index_mult; + uint32_t rss_key[10]; + uint32_t mrqc; + /* Setup RSS */ + reta = 0; - /* - * Determine where frame payload starts. - * Jump over vlan headers if already present - */ - eh = mtod(mp, struct ether_vlan_header *); - if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { - ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; - eh_type = eh->evl_proto; - } else { - ehdrlen = ETHER_HDR_LEN; - eh_type = eh->evl_encap_proto; - } + /* set up random bits */ + arc4rand(&rss_key, sizeof(rss_key), 0); - switch (ntohs(eh_type)) { -#ifdef INET6 - case ETHERTYPE_IPV6: - ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen); - /* XXX-BZ For now we do not pretend to support ext. hdrs. */ - if (ip6->ip6_nxt != IPPROTO_TCP) - return (ENXIO); - ip_hlen = sizeof(struct ip6_hdr); - ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen); - th = (struct tcphdr *)((caddr_t)ip6 + ip_hlen); - th->th_sum = in6_cksum_pseudo(ip6, 0, IPPROTO_TCP, 0); - type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV6; + /* Set multiplier for RETA setup and table size based on MAC */ + index_mult = 0x1; + table_size = 128; + switch (adapter->hw.mac.type) { + case ixgbe_mac_82598EB: + index_mult = 0x11; break; -#endif -#ifdef INET - case ETHERTYPE_IP: - ip = (struct ip *)(mp->m_data + ehdrlen); - if (ip->ip_p != IPPROTO_TCP) - return (ENXIO); - ip->ip_sum = 0; - ip_hlen = ip->ip_hl << 2; - th = (struct tcphdr *)((caddr_t)ip + ip_hlen); - th->th_sum = in_pseudo(ip->ip_src.s_addr, - ip->ip_dst.s_addr, htons(IPPROTO_TCP)); - type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4; - /* Tell transmit desc to also do IPv4 checksum. */ - *olinfo_status |= IXGBE_TXD_POPTS_IXSM << 8; + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + table_size = 512; break; -#endif default: - panic("%s: CSUM_TSO but no supported IP version (0x%04x)", - __func__, ntohs(eh_type)); - break; - } - - ctxd = txr->next_avail_desc; - TXD = (struct ixgbe_adv_tx_context_desc *) &txr->tx_base[ctxd]; - - tcp_hlen = th->th_off << 2; - - /* This is used in the transmit desc in encap */ - paylen = mp->m_pkthdr.len - ehdrlen - ip_hlen - tcp_hlen; - - /* VLAN MACLEN IPLEN */ - if (mp->m_flags & M_VLANTAG) { - vtag = htole16(mp->m_pkthdr.ether_vtag); - vlan_macip_lens |= (vtag << IXGBE_ADVTXD_VLAN_SHIFT); - } - - vlan_macip_lens |= ehdrlen << IXGBE_ADVTXD_MACLEN_SHIFT; - vlan_macip_lens |= ip_hlen; - TXD->vlan_macip_lens = htole32(vlan_macip_lens); - - /* ADV DTYPE TUCMD */ - type_tucmd_mlhl |= IXGBE_ADVTXD_DCMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT; - type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP; - TXD->type_tucmd_mlhl = htole32(type_tucmd_mlhl); - - /* MSS L4LEN IDX */ - mss_l4len_idx |= (mp->m_pkthdr.tso_segsz << IXGBE_ADVTXD_MSS_SHIFT); - mss_l4len_idx |= (tcp_hlen << IXGBE_ADVTXD_L4LEN_SHIFT); - TXD->mss_l4len_idx = htole32(mss_l4len_idx); - - TXD->seqnum_seed = htole32(0); - - if (++ctxd == txr->num_desc) - ctxd = 0; - - txr->tx_avail--; - txr->next_avail_desc = ctxd; - *cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE; - *olinfo_status |= IXGBE_TXD_POPTS_TXSM << 8; - *olinfo_status |= paylen << IXGBE_ADVTXD_PAYLEN_SHIFT; - ++txr->tso_tx; - return (0); -} - -#ifdef IXGBE_FDIR -/* -** This routine parses packet headers so that Flow -** Director can make a hashed filter table entry -** allowing traffic flows to be identified and kept -** on the same cpu. This would be a performance -** hit, but we only do it at IXGBE_FDIR_RATE of -** packets. -*/ -static void -ixgbe_atr(struct tx_ring *txr, struct mbuf *mp) -{ - struct adapter *adapter = txr->adapter; - struct ix_queue *que; - struct ip *ip; - struct tcphdr *th; - struct udphdr *uh; - struct ether_vlan_header *eh; - union ixgbe_atr_hash_dword input = {.dword = 0}; - union ixgbe_atr_hash_dword common = {.dword = 0}; - int ehdrlen, ip_hlen; - u16 etype; - - eh = mtod(mp, struct ether_vlan_header *); - if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { - ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; - etype = eh->evl_proto; - } else { - ehdrlen = ETHER_HDR_LEN; - etype = eh->evl_encap_proto; - } - - /* Only handling IPv4 */ - if (etype != htons(ETHERTYPE_IP)) - return; - - ip = (struct ip *)(mp->m_data + ehdrlen); - ip_hlen = ip->ip_hl << 2; - - /* check if we're UDP or TCP */ - switch (ip->ip_p) { - case IPPROTO_TCP: - th = (struct tcphdr *)((caddr_t)ip + ip_hlen); - /* src and dst are inverted */ - common.port.dst ^= th->th_sport; - common.port.src ^= th->th_dport; - input.formatted.flow_type ^= IXGBE_ATR_FLOW_TYPE_TCPV4; break; - case IPPROTO_UDP: - uh = (struct udphdr *)((caddr_t)ip + ip_hlen); - /* src and dst are inverted */ - common.port.dst ^= uh->uh_sport; - common.port.src ^= uh->uh_dport; - input.formatted.flow_type ^= IXGBE_ATR_FLOW_TYPE_UDPV4; - break; - default: - return; - } - - input.formatted.vlan_id = htobe16(mp->m_pkthdr.ether_vtag); - if (mp->m_pkthdr.ether_vtag) - common.flex_bytes ^= htons(ETHERTYPE_VLAN); - else - common.flex_bytes ^= etype; - common.ip ^= ip->ip_src.s_addr ^ ip->ip_dst.s_addr; - - que = &adapter->queues[txr->me]; - /* - ** This assumes the Rx queue and Tx - ** queue are bound to the same CPU - */ - ixgbe_fdir_add_signature_filter_82599(&adapter->hw, - input, common, que->msix); -} -#endif /* IXGBE_FDIR */ - -/********************************************************************** - * - * Examine each tx_buffer in the used queue. If the hardware is done - * processing the packet then free associated resources. The - * tx_buffer is put back on the free queue. - * - **********************************************************************/ -static void -ixgbe_txeof(struct tx_ring *txr) -{ - struct adapter *adapter = txr->adapter; - struct ifnet *ifp = adapter->ifp; - u32 work, processed = 0; - u16 limit = txr->process_limit; - struct ixgbe_tx_buf *buf; - union ixgbe_adv_tx_desc *txd; - - mtx_assert(&txr->tx_mtx, MA_OWNED); - -#ifdef DEV_NETMAP - if (ifp->if_capenable & IFCAP_NETMAP) { - struct netmap_adapter *na = NA(ifp); - struct netmap_kring *kring = &na->tx_rings[txr->me]; - txd = txr->tx_base; - bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map, - BUS_DMASYNC_POSTREAD); - /* - * In netmap mode, all the work is done in the context - * of the client thread. Interrupt handlers only wake up - * clients, which may be sleeping on individual rings - * or on a global resource for all rings. - * To implement tx interrupt mitigation, we wake up the client - * thread roughly every half ring, even if the NIC interrupts - * more frequently. This is implemented as follows: - * - ixgbe_txsync() sets kring->nr_kflags with the index of - * the slot that should wake up the thread (nkr_num_slots - * means the user thread should not be woken up); - * - the driver ignores tx interrupts unless netmap_mitigate=0 - * or the slot has the DD bit set. - * - * When the driver has separate locks, we need to - * release and re-acquire txlock to avoid deadlocks. - * XXX see if we can find a better way. - */ - if (!netmap_mitigate || - (kring->nr_kflags < kring->nkr_num_slots && - txd[kring->nr_kflags].wb.status & IXGBE_TXD_STAT_DD)) { - netmap_tx_irq(ifp, txr->me); - } - return; - } -#endif /* DEV_NETMAP */ - - if (txr->tx_avail == txr->num_desc) { - txr->queue_status = IXGBE_QUEUE_IDLE; - return; - } - - /* Get work starting point */ - work = txr->next_to_clean; - buf = &txr->tx_buffers[work]; - txd = &txr->tx_base[work]; - work -= txr->num_desc; /* The distance to ring end */ - bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map, - BUS_DMASYNC_POSTREAD); - - do { - union ixgbe_adv_tx_desc *eop= buf->eop; - if (eop == NULL) /* No work */ - break; - - if ((eop->wb.status & IXGBE_TXD_STAT_DD) == 0) - break; /* I/O not complete */ - - if (buf->m_head) { - txr->bytes += - buf->m_head->m_pkthdr.len; - bus_dmamap_sync(txr->txtag, - buf->map, - BUS_DMASYNC_POSTWRITE); - bus_dmamap_unload(txr->txtag, - buf->map); - m_freem(buf->m_head); - buf->m_head = NULL; - buf->map = NULL; - } - buf->eop = NULL; - ++txr->tx_avail; - - /* We clean the range if multi segment */ - while (txd != eop) { - ++txd; - ++buf; - ++work; - /* wrap the ring? */ - if (__predict_false(!work)) { - work -= txr->num_desc; - buf = txr->tx_buffers; - txd = txr->tx_base; - } - if (buf->m_head) { - txr->bytes += - buf->m_head->m_pkthdr.len; - bus_dmamap_sync(txr->txtag, - buf->map, - BUS_DMASYNC_POSTWRITE); - bus_dmamap_unload(txr->txtag, - buf->map); - m_freem(buf->m_head); - buf->m_head = NULL; - buf->map = NULL; - } - ++txr->tx_avail; - buf->eop = NULL; - - } - ++txr->packets; - ++processed; - ++ifp->if_opackets; - txr->watchdog_time = ticks; - - /* Try the next packet */ - ++txd; - ++buf; - ++work; - /* reset with a wrap */ - if (__predict_false(!work)) { - work -= txr->num_desc; - buf = txr->tx_buffers; - txd = txr->tx_base; - } - prefetch(txd); - } while (__predict_true(--limit)); - - bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map, - BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); - - work += txr->num_desc; - txr->next_to_clean = work; - - /* - ** Watchdog calculation, we know there's - ** work outstanding or the first return - ** would have been taken, so none processed - ** for too long indicates a hang. - */ - if ((!processed) && ((ticks - txr->watchdog_time) > IXGBE_WATCHDOG)) - txr->queue_status = IXGBE_QUEUE_HUNG; - - if (txr->tx_avail == txr->num_desc) - txr->queue_status = IXGBE_QUEUE_IDLE; - - return; -} - -/********************************************************************* - * - * Refresh mbuf buffers for RX descriptor rings - * - now keeps its own state so discards due to resource - * exhaustion are unnecessary, if an mbuf cannot be obtained - * it just returns, keeping its placeholder, thus it can simply - * be recalled to try again. - * - **********************************************************************/ -static void -ixgbe_refresh_mbufs(struct rx_ring *rxr, int limit) -{ - struct adapter *adapter = rxr->adapter; - bus_dma_segment_t seg[1]; - struct ixgbe_rx_buf *rxbuf; - struct mbuf *mp; - int i, j, nsegs, error; - bool refreshed = FALSE; - - i = j = rxr->next_to_refresh; - /* Control the loop with one beyond */ - if (++j == rxr->num_desc) - j = 0; - - while (j != limit) { - rxbuf = &rxr->rx_buffers[i]; - if (rxbuf->buf == NULL) { - mp = m_getjcl(M_NOWAIT, MT_DATA, - M_PKTHDR, rxr->mbuf_sz); - if (mp == NULL) - goto update; - if (adapter->max_frame_size <= (MCLBYTES - ETHER_ALIGN)) - m_adj(mp, ETHER_ALIGN); - } else - mp = rxbuf->buf; - - mp->m_pkthdr.len = mp->m_len = rxr->mbuf_sz; - - /* If we're dealing with an mbuf that was copied rather - * than replaced, there's no need to go through busdma. - */ - if ((rxbuf->flags & IXGBE_RX_COPY) == 0) { - /* Get the memory mapping */ - error = bus_dmamap_load_mbuf_sg(rxr->ptag, - rxbuf->pmap, mp, seg, &nsegs, BUS_DMA_NOWAIT); - if (error != 0) { - printf("Refresh mbufs: payload dmamap load" - " failure - %d\n", error); - m_free(mp); - rxbuf->buf = NULL; - goto update; - } - rxbuf->buf = mp; - bus_dmamap_sync(rxr->ptag, rxbuf->pmap, - BUS_DMASYNC_PREREAD); - rxbuf->addr = rxr->rx_base[i].read.pkt_addr = - htole64(seg[0].ds_addr); - } else { - rxr->rx_base[i].read.pkt_addr = rxbuf->addr; - rxbuf->flags &= ~IXGBE_RX_COPY; - } - - refreshed = TRUE; - /* Next is precalculated */ - i = j; - rxr->next_to_refresh = i; - if (++j == rxr->num_desc) - j = 0; - } -update: - if (refreshed) /* Update hardware tail index */ - IXGBE_WRITE_REG(&adapter->hw, - IXGBE_RDT(rxr->me), rxr->next_to_refresh); - return; -} - -/********************************************************************* - * - * Allocate memory for rx_buffer structures. Since we use one - * rx_buffer per received packet, the maximum number of rx_buffer's - * that we'll need is equal to the number of receive descriptors - * that we've allocated. - * - **********************************************************************/ -static int -ixgbe_allocate_receive_buffers(struct rx_ring *rxr) -{ - struct adapter *adapter = rxr->adapter; - device_t dev = adapter->dev; - struct ixgbe_rx_buf *rxbuf; - int i, bsize, error; - - bsize = sizeof(struct ixgbe_rx_buf) * rxr->num_desc; - if (!(rxr->rx_buffers = - (struct ixgbe_rx_buf *) malloc(bsize, - M_DEVBUF, M_NOWAIT | M_ZERO))) { - device_printf(dev, "Unable to allocate rx_buffer memory\n"); - error = ENOMEM; - goto fail; - } - - if ((error = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */ - 1, 0, /* alignment, bounds */ - BUS_SPACE_MAXADDR, /* lowaddr */ - BUS_SPACE_MAXADDR, /* highaddr */ - NULL, NULL, /* filter, filterarg */ - MJUM16BYTES, /* maxsize */ - 1, /* nsegments */ - MJUM16BYTES, /* maxsegsize */ - 0, /* flags */ - NULL, /* lockfunc */ - NULL, /* lockfuncarg */ - &rxr->ptag))) { - device_printf(dev, "Unable to create RX DMA tag\n"); - goto fail; - } - - for (i = 0; i < rxr->num_desc; i++, rxbuf++) { - rxbuf = &rxr->rx_buffers[i]; - error = bus_dmamap_create(rxr->ptag, - BUS_DMA_NOWAIT, &rxbuf->pmap); - if (error) { - device_printf(dev, "Unable to create RX dma map\n"); - goto fail; - } - } - - return (0); - -fail: - /* Frees all, but can handle partial completion */ - ixgbe_free_receive_structures(adapter); - return (error); -} - -/* -** Used to detect a descriptor that has -** been merged by Hardware RSC. -*/ -static inline u32 -ixgbe_rsc_count(union ixgbe_adv_rx_desc *rx) -{ - return (le32toh(rx->wb.lower.lo_dword.data) & - IXGBE_RXDADV_RSCCNT_MASK) >> IXGBE_RXDADV_RSCCNT_SHIFT; -} - -/********************************************************************* - * - * Initialize Hardware RSC (LRO) feature on 82599 - * for an RX ring, this is toggled by the LRO capability - * even though it is transparent to the stack. - * - * NOTE: since this HW feature only works with IPV4 and - * our testing has shown soft LRO to be as effective - * I have decided to disable this by default. - * - **********************************************************************/ -static void -ixgbe_setup_hw_rsc(struct rx_ring *rxr) -{ - struct adapter *adapter = rxr->adapter; - struct ixgbe_hw *hw = &adapter->hw; - u32 rscctrl, rdrxctl; - - /* If turning LRO/RSC off we need to disable it */ - if ((adapter->ifp->if_capenable & IFCAP_LRO) == 0) { - rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(rxr->me)); - rscctrl &= ~IXGBE_RSCCTL_RSCEN; - return; - } - - rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL); - rdrxctl &= ~IXGBE_RDRXCTL_RSCFRSTSIZE; -#ifdef DEV_NETMAP /* crcstrip is optional in netmap */ - if (adapter->ifp->if_capenable & IFCAP_NETMAP && !ix_crcstrip) -#endif /* DEV_NETMAP */ - rdrxctl |= IXGBE_RDRXCTL_CRCSTRIP; - rdrxctl |= IXGBE_RDRXCTL_RSCACKC; - IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl); - - rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(rxr->me)); - rscctrl |= IXGBE_RSCCTL_RSCEN; - /* - ** Limit the total number of descriptors that - ** can be combined, so it does not exceed 64K - */ - if (rxr->mbuf_sz == MCLBYTES) - rscctrl |= IXGBE_RSCCTL_MAXDESC_16; - else if (rxr->mbuf_sz == MJUMPAGESIZE) - rscctrl |= IXGBE_RSCCTL_MAXDESC_8; - else if (rxr->mbuf_sz == MJUM9BYTES) - rscctrl |= IXGBE_RSCCTL_MAXDESC_4; - else /* Using 16K cluster */ - rscctrl |= IXGBE_RSCCTL_MAXDESC_1; - - IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(rxr->me), rscctrl); - - /* Enable TCP header recognition */ - IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), - (IXGBE_READ_REG(hw, IXGBE_PSRTYPE(0)) | - IXGBE_PSRTYPE_TCPHDR)); - - /* Disable RSC for ACK packets */ - IXGBE_WRITE_REG(hw, IXGBE_RSCDBU, - (IXGBE_RSCDBU_RSCACKDIS | IXGBE_READ_REG(hw, IXGBE_RSCDBU))); - - rxr->hw_rsc = TRUE; -} - - -static void -ixgbe_free_receive_ring(struct rx_ring *rxr) -{ - struct ixgbe_rx_buf *rxbuf; - int i; - - for (i = 0; i < rxr->num_desc; i++) { - rxbuf = &rxr->rx_buffers[i]; - if (rxbuf->buf != NULL) { - bus_dmamap_sync(rxr->ptag, rxbuf->pmap, - BUS_DMASYNC_POSTREAD); - bus_dmamap_unload(rxr->ptag, rxbuf->pmap); - rxbuf->buf->m_flags |= M_PKTHDR; - m_freem(rxbuf->buf); - rxbuf->buf = NULL; - rxbuf->flags = 0; - } } -} - - -/********************************************************************* - * - * Initialize a receive ring and its buffers. - * - **********************************************************************/ -static int -ixgbe_setup_receive_ring(struct rx_ring *rxr) -{ - struct adapter *adapter; - struct ifnet *ifp; - device_t dev; - struct ixgbe_rx_buf *rxbuf; - bus_dma_segment_t seg[1]; - struct lro_ctrl *lro = &rxr->lro; - int rsize, nsegs, error = 0; -#ifdef DEV_NETMAP - struct netmap_adapter *na = NA(rxr->adapter->ifp); - struct netmap_slot *slot; -#endif /* DEV_NETMAP */ - adapter = rxr->adapter; - ifp = adapter->ifp; - dev = adapter->dev; - - /* Clear the ring contents */ - IXGBE_RX_LOCK(rxr); -#ifdef DEV_NETMAP - /* same as in ixgbe_setup_transmit_ring() */ - slot = netmap_reset(na, NR_RX, rxr->me, 0); -#endif /* DEV_NETMAP */ - rsize = roundup2(adapter->num_rx_desc * - sizeof(union ixgbe_adv_rx_desc), DBA_ALIGN); - bzero((void *)rxr->rx_base, rsize); - /* Cache the size */ - rxr->mbuf_sz = adapter->rx_mbuf_sz; - - /* Free current RX buffer structs and their mbufs */ - ixgbe_free_receive_ring(rxr); - - /* Now replenish the mbufs */ - for (int j = 0; j != rxr->num_desc; ++j) { - struct mbuf *mp; - - rxbuf = &rxr->rx_buffers[j]; -#ifdef DEV_NETMAP + /* Set up the redirection table */ + for (i = 0, j = 0; i < table_size; i++, j++) { + if (j == adapter->num_queues) j = 0; + queue_id = (j * index_mult); /* - * In netmap mode, fill the map and set the buffer - * address in the NIC ring, considering the offset - * between the netmap and NIC rings (see comment in - * ixgbe_setup_transmit_ring() ). No need to allocate - * an mbuf, so end the block with a continue; + * The low 8 bits are for hash value (n+0); + * The next 8 bits are for hash value (n+1), etc. */ - if (slot) { - int sj = netmap_idx_n2k(&na->rx_rings[rxr->me], j); - uint64_t paddr; - void *addr; - - addr = PNMB(na, slot + sj, &paddr); - netmap_load_map(na, rxr->ptag, rxbuf->pmap, addr); - /* Update descriptor and the cached value */ - rxr->rx_base[j].read.pkt_addr = htole64(paddr); - rxbuf->addr = htole64(paddr); - continue; - } -#endif /* DEV_NETMAP */ - rxbuf->flags = 0; - rxbuf->buf = m_getjcl(M_NOWAIT, MT_DATA, - M_PKTHDR, adapter->rx_mbuf_sz); - if (rxbuf->buf == NULL) { - error = ENOBUFS; - goto fail; + reta = reta >> 8; + reta = reta | ( ((uint32_t) queue_id) << 24); + if ((i & 3) == 3) { + if (i < 128) + IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta); + else + IXGBE_WRITE_REG(hw, IXGBE_ERETA((i >> 2) - 32), reta); + reta = 0; } - mp = rxbuf->buf; - mp->m_pkthdr.len = mp->m_len = rxr->mbuf_sz; - /* Get the memory mapping */ - error = bus_dmamap_load_mbuf_sg(rxr->ptag, - rxbuf->pmap, mp, seg, - &nsegs, BUS_DMA_NOWAIT); - if (error != 0) - goto fail; - bus_dmamap_sync(rxr->ptag, - rxbuf->pmap, BUS_DMASYNC_PREREAD); - /* Update the descriptor and the cached value */ - rxr->rx_base[j].read.pkt_addr = htole64(seg[0].ds_addr); - rxbuf->addr = htole64(seg[0].ds_addr); } + /* Now fill our hash function seeds */ + for (int i = 0; i < 10; i++) + IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), rss_key[i]); - /* Setup our descriptor indices */ - rxr->next_to_check = 0; - rxr->next_to_refresh = 0; - rxr->lro_enabled = FALSE; - rxr->rx_copies = 0; - rxr->rx_bytes = 0; - rxr->vtag_strip = FALSE; - - bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map, - BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); - + /* Perform hash on these packet types */ /* - ** Now set up the LRO interface: - */ - if (ixgbe_rsc_enable) - ixgbe_setup_hw_rsc(rxr); - else if (ifp->if_capenable & IFCAP_LRO) { - int err = tcp_lro_init(lro); - if (err) { - device_printf(dev, "LRO Initialization failed!\n"); - goto fail; - } - INIT_DEBUGOUT("RX Soft LRO Initialized\n"); - rxr->lro_enabled = TRUE; - lro->ifp = adapter->ifp; - } - - IXGBE_RX_UNLOCK(rxr); - return (0); - -fail: - ixgbe_free_receive_ring(rxr); - IXGBE_RX_UNLOCK(rxr); - return (error); -} - -/********************************************************************* - * - * Initialize all receive rings. - * - **********************************************************************/ -static int -ixgbe_setup_receive_structures(struct adapter *adapter) -{ - struct rx_ring *rxr = adapter->rx_rings; - int j; - - for (j = 0; j < adapter->num_queues; j++, rxr++) - if (ixgbe_setup_receive_ring(rxr)) - goto fail; - - return (0); -fail: - /* - * Free RX buffers allocated so far, we will only handle - * the rings that completed, the failing case will have - * cleaned up for itself. 'j' failed, so its the terminus. + * Disable UDP - IP fragments aren't currently being handled + * and so we end up with a mix of 2-tuple and 4-tuple + * traffic. */ - for (int i = 0; i < j; ++i) { - rxr = &adapter->rx_rings[i]; - ixgbe_free_receive_ring(rxr); - } - - return (ENOBUFS); + mrqc = IXGBE_MRQC_RSSEN + | IXGBE_MRQC_RSS_FIELD_IPV4 + | IXGBE_MRQC_RSS_FIELD_IPV4_TCP +#if 0 + | IXGBE_MRQC_RSS_FIELD_IPV4_UDP +#endif + | IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP + | IXGBE_MRQC_RSS_FIELD_IPV6_EX + | IXGBE_MRQC_RSS_FIELD_IPV6 + | IXGBE_MRQC_RSS_FIELD_IPV6_TCP +#if 0 + | IXGBE_MRQC_RSS_FIELD_IPV6_UDP + | IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP +#endif + ; + IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc); } + /********************************************************************* * * Setup receive registers and features. @@ -4149,23 +2830,23 @@ ixgbe_initialize_receive_units(struct adapter *adapter) struct rx_ring *rxr = adapter->rx_rings; struct ixgbe_hw *hw = &adapter->hw; struct ifnet *ifp = adapter->ifp; - u32 bufsz, rxctrl, fctrl, srrctl, rxcsum; - u32 reta, mrqc = 0, hlreg, random[10]; + u32 bufsz, fctrl, srrctl, rxcsum; + u32 hlreg; /* * Make sure receives are disabled while * setting up the descriptor ring */ - rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); - IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, - rxctrl & ~IXGBE_RXCTRL_RXEN); + ixgbe_disable_rx(hw); /* Enable broadcasts */ fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); fctrl |= IXGBE_FCTRL_BAM; - fctrl |= IXGBE_FCTRL_DPF; - fctrl |= IXGBE_FCTRL_PMCF; + if (adapter->hw.mac.type == ixgbe_mac_82598EB) { + fctrl |= IXGBE_FCTRL_DPF; + fctrl |= IXGBE_FCTRL_PMCF; + } IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); /* Set for Jumbo Frames? */ @@ -4224,6 +2905,9 @@ ixgbe_initialize_receive_units(struct adapter *adapter) /* Set the processing limit */ rxr->process_limit = ixgbe_rx_process_limit; + + /* Set the driver rx tail address */ + rxr->tail = IXGBE_RDT(rxr->me); } if (adapter->hw.mac.type != ixgbe_mac_82598EB) { @@ -4236,39 +2920,9 @@ ixgbe_initialize_receive_units(struct adapter *adapter) rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM); - /* Setup RSS */ - if (adapter->num_queues > 1) { - int i, j; - reta = 0; - - /* set up random bits */ - arc4rand(&random, sizeof(random), 0); - - /* Set up the redirection table */ - for (i = 0, j = 0; i < 128; i++, j++) { - if (j == adapter->num_queues) j = 0; - reta = (reta << 8) | (j * 0x11); - if ((i & 3) == 3) - IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta); - } - - /* Now fill our hash function seeds */ - for (int i = 0; i < 10; i++) - IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), random[i]); - - /* Perform hash on these packet types */ - mrqc = IXGBE_MRQC_RSSEN - | IXGBE_MRQC_RSS_FIELD_IPV4 - | IXGBE_MRQC_RSS_FIELD_IPV4_TCP - | IXGBE_MRQC_RSS_FIELD_IPV4_UDP - | IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP - | IXGBE_MRQC_RSS_FIELD_IPV6_EX - | IXGBE_MRQC_RSS_FIELD_IPV6 - | IXGBE_MRQC_RSS_FIELD_IPV6_TCP - | IXGBE_MRQC_RSS_FIELD_IPV6_UDP - | IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP; - IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc); + ixgbe_initialise_rss_mapping(adapter); + if (adapter->num_queues > 1) { /* RSS and RX IPP Checksum are mutually exclusive */ rxcsum |= IXGBE_RXCSUM_PCSD; } @@ -4284,410 +2938,6 @@ ixgbe_initialize_receive_units(struct adapter *adapter) return; } -/********************************************************************* - * - * Free all receive rings. - * - **********************************************************************/ -static void -ixgbe_free_receive_structures(struct adapter *adapter) -{ - struct rx_ring *rxr = adapter->rx_rings; - - INIT_DEBUGOUT("ixgbe_free_receive_structures: begin"); - - for (int i = 0; i < adapter->num_queues; i++, rxr++) { - struct lro_ctrl *lro = &rxr->lro; - ixgbe_free_receive_buffers(rxr); - /* Free LRO memory */ - tcp_lro_free(lro); - /* Free the ring memory as well */ - ixgbe_dma_free(adapter, &rxr->rxdma); - } - - free(adapter->rx_rings, M_DEVBUF); -} - - -/********************************************************************* - * - * Free receive ring data structures - * - **********************************************************************/ -static void -ixgbe_free_receive_buffers(struct rx_ring *rxr) -{ - struct adapter *adapter = rxr->adapter; - struct ixgbe_rx_buf *rxbuf; - - INIT_DEBUGOUT("ixgbe_free_receive_buffers: begin"); - - /* Cleanup any existing buffers */ - if (rxr->rx_buffers != NULL) { - for (int i = 0; i < adapter->num_rx_desc; i++) { - rxbuf = &rxr->rx_buffers[i]; - if (rxbuf->buf != NULL) { - bus_dmamap_sync(rxr->ptag, rxbuf->pmap, - BUS_DMASYNC_POSTREAD); - bus_dmamap_unload(rxr->ptag, rxbuf->pmap); - rxbuf->buf->m_flags |= M_PKTHDR; - m_freem(rxbuf->buf); - } - rxbuf->buf = NULL; - if (rxbuf->pmap != NULL) { - bus_dmamap_destroy(rxr->ptag, rxbuf->pmap); - rxbuf->pmap = NULL; - } - } - if (rxr->rx_buffers != NULL) { - free(rxr->rx_buffers, M_DEVBUF); - rxr->rx_buffers = NULL; - } - } - - if (rxr->ptag != NULL) { - bus_dma_tag_destroy(rxr->ptag); - rxr->ptag = NULL; - } - - return; -} - -static __inline void -ixgbe_rx_input(struct rx_ring *rxr, struct ifnet *ifp, struct mbuf *m, u32 ptype) -{ - - /* - * ATM LRO is only for IP/TCP packets and TCP checksum of the packet - * should be computed by hardware. Also it should not have VLAN tag in - * ethernet header. In case of IPv6 we do not yet support ext. hdrs. - */ - if (rxr->lro_enabled && - (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0 && - (ptype & IXGBE_RXDADV_PKTTYPE_ETQF) == 0 && - ((ptype & (IXGBE_RXDADV_PKTTYPE_IPV4 | IXGBE_RXDADV_PKTTYPE_TCP)) == - (IXGBE_RXDADV_PKTTYPE_IPV4 | IXGBE_RXDADV_PKTTYPE_TCP) || - (ptype & (IXGBE_RXDADV_PKTTYPE_IPV6 | IXGBE_RXDADV_PKTTYPE_TCP)) == - (IXGBE_RXDADV_PKTTYPE_IPV6 | IXGBE_RXDADV_PKTTYPE_TCP)) && - (m->m_pkthdr.csum_flags & (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) == - (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) { - /* - * Send to the stack if: - ** - LRO not enabled, or - ** - no LRO resources, or - ** - lro enqueue fails - */ - if (rxr->lro.lro_cnt != 0) - if (tcp_lro_rx(&rxr->lro, m, 0) == 0) - return; - } - IXGBE_RX_UNLOCK(rxr); - (*ifp->if_input)(ifp, m); - IXGBE_RX_LOCK(rxr); -} - -static __inline void -ixgbe_rx_discard(struct rx_ring *rxr, int i) -{ - struct ixgbe_rx_buf *rbuf; - - rbuf = &rxr->rx_buffers[i]; - - - /* - ** With advanced descriptors the writeback - ** clobbers the buffer addrs, so its easier - ** to just free the existing mbufs and take - ** the normal refresh path to get new buffers - ** and mapping. - */ - - if (rbuf->fmp != NULL) {/* Partial chain ? */ - rbuf->fmp->m_flags |= M_PKTHDR; - m_freem(rbuf->fmp); - rbuf->fmp = NULL; - rbuf->buf = NULL; /* rbuf->buf is part of fmp's chain */ - } else if (rbuf->buf) { - m_free(rbuf->buf); - rbuf->buf = NULL; - } - - rbuf->flags = 0; - - return; -} - - -/********************************************************************* - * - * This routine executes in interrupt context. It replenishes - * the mbufs in the descriptor and sends data which has been - * dma'ed into host memory to upper layer. - * - * We loop at most count times if count is > 0, or until done if - * count < 0. - * - * Return TRUE for more work, FALSE for all clean. - *********************************************************************/ -static bool -ixgbe_rxeof(struct ix_queue *que) -{ - struct adapter *adapter = que->adapter; - struct rx_ring *rxr = que->rxr; - struct ifnet *ifp = adapter->ifp; - struct lro_ctrl *lro = &rxr->lro; - struct lro_entry *queued; - int i, nextp, processed = 0; - u32 staterr = 0; - u16 count = rxr->process_limit; - union ixgbe_adv_rx_desc *cur; - struct ixgbe_rx_buf *rbuf, *nbuf; - - IXGBE_RX_LOCK(rxr); - -#ifdef DEV_NETMAP - /* Same as the txeof routine: wakeup clients on intr. */ - if (netmap_rx_irq(ifp, rxr->me, &processed)) { - IXGBE_RX_UNLOCK(rxr); - return (FALSE); - } -#endif /* DEV_NETMAP */ - - for (i = rxr->next_to_check; count != 0;) { - struct mbuf *sendmp, *mp; - u32 rsc, ptype; - u16 len; - u16 vtag = 0; - bool eop; - - /* Sync the ring. */ - bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map, - BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); - - cur = &rxr->rx_base[i]; - staterr = le32toh(cur->wb.upper.status_error); - - if ((staterr & IXGBE_RXD_STAT_DD) == 0) - break; - if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) - break; - - count--; - sendmp = NULL; - nbuf = NULL; - rsc = 0; - cur->wb.upper.status_error = 0; - rbuf = &rxr->rx_buffers[i]; - mp = rbuf->buf; - - len = le16toh(cur->wb.upper.length); - ptype = le32toh(cur->wb.lower.lo_dword.data) & - IXGBE_RXDADV_PKTTYPE_MASK; - eop = ((staterr & IXGBE_RXD_STAT_EOP) != 0); - - /* Make sure bad packets are discarded */ - if (eop && (staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK) != 0) { - rxr->rx_discarded++; - ixgbe_rx_discard(rxr, i); - goto next_desc; - } - - /* - ** On 82599 which supports a hardware - ** LRO (called HW RSC), packets need - ** not be fragmented across sequential - ** descriptors, rather the next descriptor - ** is indicated in bits of the descriptor. - ** This also means that we might proceses - ** more than one packet at a time, something - ** that has never been true before, it - ** required eliminating global chain pointers - ** in favor of what we are doing here. -jfv - */ - if (!eop) { - /* - ** Figure out the next descriptor - ** of this frame. - */ - if (rxr->hw_rsc == TRUE) { - rsc = ixgbe_rsc_count(cur); - rxr->rsc_num += (rsc - 1); - } - if (rsc) { /* Get hardware index */ - nextp = ((staterr & - IXGBE_RXDADV_NEXTP_MASK) >> - IXGBE_RXDADV_NEXTP_SHIFT); - } else { /* Just sequential */ - nextp = i + 1; - if (nextp == adapter->num_rx_desc) - nextp = 0; - } - nbuf = &rxr->rx_buffers[nextp]; - prefetch(nbuf); - } - /* - ** Rather than using the fmp/lmp global pointers - ** we now keep the head of a packet chain in the - ** buffer struct and pass this along from one - ** descriptor to the next, until we get EOP. - */ - mp->m_len = len; - /* - ** See if there is a stored head - ** that determines what we are - */ - sendmp = rbuf->fmp; - if (sendmp != NULL) { /* secondary frag */ - rbuf->buf = rbuf->fmp = NULL; - mp->m_flags &= ~M_PKTHDR; - sendmp->m_pkthdr.len += mp->m_len; - } else { - /* - * Optimize. This might be a small packet, - * maybe just a TCP ACK. Do a fast copy that - * is cache aligned into a new mbuf, and - * leave the old mbuf+cluster for re-use. - */ - if (eop && len <= IXGBE_RX_COPY_LEN) { - sendmp = m_gethdr(M_NOWAIT, MT_DATA); - if (sendmp != NULL) { - sendmp->m_data += - IXGBE_RX_COPY_ALIGN; - ixgbe_bcopy(mp->m_data, - sendmp->m_data, len); - sendmp->m_len = len; - rxr->rx_copies++; - rbuf->flags |= IXGBE_RX_COPY; - } - } - if (sendmp == NULL) { - rbuf->buf = rbuf->fmp = NULL; - sendmp = mp; - } - - /* first desc of a non-ps chain */ - sendmp->m_flags |= M_PKTHDR; - sendmp->m_pkthdr.len = mp->m_len; - } - ++processed; - - /* Pass the head pointer on */ - if (eop == 0) { - nbuf->fmp = sendmp; - sendmp = NULL; - mp->m_next = nbuf->buf; - } else { /* Sending this frame */ - sendmp->m_pkthdr.rcvif = ifp; - ifp->if_ipackets++; - rxr->rx_packets++; - /* capture data for AIM */ - rxr->bytes += sendmp->m_pkthdr.len; - rxr->rx_bytes += sendmp->m_pkthdr.len; - /* Process vlan info */ - if ((rxr->vtag_strip) && - (staterr & IXGBE_RXD_STAT_VP)) - vtag = le16toh(cur->wb.upper.vlan); - if (vtag) { - sendmp->m_pkthdr.ether_vtag = vtag; - sendmp->m_flags |= M_VLANTAG; - } - if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) - ixgbe_rx_checksum(staterr, sendmp, ptype); -#if __FreeBSD_version >= 800000 - sendmp->m_pkthdr.flowid = que->msix; - M_HASHTYPE_SET(sendmp, M_HASHTYPE_OPAQUE); -#endif - } -next_desc: - bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map, - BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); - - /* Advance our pointers to the next descriptor. */ - if (++i == rxr->num_desc) - i = 0; - - /* Now send to the stack or do LRO */ - if (sendmp != NULL) { - rxr->next_to_check = i; - ixgbe_rx_input(rxr, ifp, sendmp, ptype); - i = rxr->next_to_check; - } - - /* Every 8 descriptors we go to refresh mbufs */ - if (processed == 8) { - ixgbe_refresh_mbufs(rxr, i); - processed = 0; - } - } - - /* Refresh any remaining buf structs */ - if (ixgbe_rx_unrefreshed(rxr)) - ixgbe_refresh_mbufs(rxr, i); - - rxr->next_to_check = i; - - /* - * Flush any outstanding LRO work - */ - while ((queued = SLIST_FIRST(&lro->lro_active)) != NULL) { - SLIST_REMOVE_HEAD(&lro->lro_active, next); - tcp_lro_flush(lro, queued); - } - - IXGBE_RX_UNLOCK(rxr); - - /* - ** Still have cleaning to do? - */ - if ((staterr & IXGBE_RXD_STAT_DD) != 0) - return (TRUE); - else - return (FALSE); -} - - -/********************************************************************* - * - * Verify that the hardware indicated that the checksum is valid. - * Inform the stack about the status of checksum so that stack - * doesn't spend time verifying the checksum. - * - *********************************************************************/ -static void -ixgbe_rx_checksum(u32 staterr, struct mbuf * mp, u32 ptype) -{ - u16 status = (u16) staterr; - u8 errors = (u8) (staterr >> 24); - bool sctp = FALSE; - - if ((ptype & IXGBE_RXDADV_PKTTYPE_ETQF) == 0 && - (ptype & IXGBE_RXDADV_PKTTYPE_SCTP) != 0) - sctp = TRUE; - - if (status & IXGBE_RXD_STAT_IPCS) { - if (!(errors & IXGBE_RXD_ERR_IPE)) { - /* IP Checksum Good */ - mp->m_pkthdr.csum_flags = CSUM_IP_CHECKED; - mp->m_pkthdr.csum_flags |= CSUM_IP_VALID; - - } else - mp->m_pkthdr.csum_flags = 0; - } - if (status & IXGBE_RXD_STAT_L4CS) { - u64 type = (CSUM_DATA_VALID | CSUM_PSEUDO_HDR); -#if __FreeBSD_version >= 800000 - if (sctp) - type = CSUM_SCTP_VALID; -#endif - if (!(errors & IXGBE_RXD_ERR_TCPE)) { - mp->m_pkthdr.csum_flags |= type; - if (!sctp) - mp->m_pkthdr.csum_data = htons(0xffff); - } - } - return; -} - /* ** This routine is run via an vlan config EVENT, @@ -4811,7 +3061,9 @@ ixgbe_enable_intr(struct adapter *adapter) switch (adapter->hw.mac.type) { case ixgbe_mac_82599EB: mask |= IXGBE_EIMS_ECC; + /* Temperature sensor on some adapters */ mask |= IXGBE_EIMS_GPI_SDP0; + /* SFP+ (RX_LOS_N & MOD_ABS_N) */ mask |= IXGBE_EIMS_GPI_SDP1; mask |= IXGBE_EIMS_GPI_SDP2; #ifdef IXGBE_FDIR @@ -4819,11 +3071,24 @@ ixgbe_enable_intr(struct adapter *adapter) #endif break; case ixgbe_mac_X540: - mask |= IXGBE_EIMS_ECC; /* Detect if Thermal Sensor is enabled */ fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM); if (fwsm & IXGBE_FWSM_TS_ENABLED) mask |= IXGBE_EIMS_TS; + mask |= IXGBE_EIMS_ECC; +#ifdef IXGBE_FDIR + mask |= IXGBE_EIMS_FLOW_DIR; +#endif + break; + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + /* MAC thermal sensor is automatically enabled */ + mask |= IXGBE_EIMS_TS; + /* Some devices use SDP0 for important information */ + if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP || + hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) + mask |= IXGBE_EIMS_GPI_SDP0_BY_MAC(hw); + mask |= IXGBE_EIMS_ECC; #ifdef IXGBE_FDIR mask |= IXGBE_EIMS_FLOW_DIR; #endif @@ -4834,7 +3099,7 @@ ixgbe_enable_intr(struct adapter *adapter) IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask); - /* With RSS we use auto clear */ + /* With MSI-X we use auto clear */ if (adapter->msix_mem) { mask = IXGBE_EIMS_ENABLE_MASK; /* Don't autoclear Link */ @@ -4872,26 +3137,6 @@ ixgbe_disable_intr(struct adapter *adapter) return; } -u16 -ixgbe_read_pci_cfg(struct ixgbe_hw *hw, u32 reg) -{ - u16 value; - - value = pci_read_config(((struct ixgbe_osdep *)hw->back)->dev, - reg, 2); - - return (value); -} - -void -ixgbe_write_pci_cfg(struct ixgbe_hw *hw, u32 reg, u16 value) -{ - pci_write_config(((struct ixgbe_osdep *)hw->back)->dev, - reg, value, 2); - - return; -} - /* ** Get the width and transaction speed of ** the slot this adapter is plugged into. @@ -4907,7 +3152,13 @@ ixgbe_get_slot_info(struct ixgbe_hw *hw) /* For most devices simply call the shared code routine */ if (hw->device_id != IXGBE_DEV_ID_82599_SFP_SF_QP) { ixgbe_get_bus_info(hw); - goto display; + /* These devices don't use PCI-E */ + switch (hw->mac.type) { + case ixgbe_mac_X550EM_x: + return; + default: + goto display; + } } /* @@ -5028,6 +3279,8 @@ ixgbe_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type) case ixgbe_mac_82599EB: case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: if (type == -1) { /* MISC IVAR */ index = (entry & 1) * 8; ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC); @@ -5055,8 +3308,14 @@ ixgbe_configure_ivars(struct adapter *adapter) if (ixgbe_max_interrupt_rate > 0) newitr = (4000000 / ixgbe_max_interrupt_rate) & 0x0FF8; - else + else { + /* + ** Disable DMA coalescing if interrupt moderation is + ** disabled. + */ + adapter->dmac = 0; newitr = 0; + } for (int i = 0; i < adapter->num_queues; i++, que++) { /* First the RX queue entry */ @@ -5069,7 +3328,7 @@ ixgbe_configure_ivars(struct adapter *adapter) } /* For the Link interrupt */ - ixgbe_set_ivar(adapter, 1, adapter->linkvec, -1); + ixgbe_set_ivar(adapter, 1, adapter->vector, -1); } /* @@ -5116,7 +3375,7 @@ ixgbe_handle_link(void *context, int pending) ixgbe_check_link(&adapter->hw, &adapter->link_speed, &adapter->link_up, 0); - ixgbe_update_link_status(adapter); + ixgbe_update_link_status(adapter); } /* @@ -5157,12 +3416,44 @@ ixgbe_handle_msf(void *context, int pending) struct ixgbe_hw *hw = &adapter->hw; u32 autoneg; bool negotiate; + int err; + + err = hw->phy.ops.identify_sfp(hw); + if (!err) { + ixgbe_setup_optics(adapter); + INIT_DEBUGOUT1("ixgbe_sfp_probe: flags: %X\n", adapter->optics); + } autoneg = hw->phy.autoneg_advertised; if ((!autoneg) && (hw->mac.ops.get_link_capabilities)) hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiate); if (hw->mac.ops.setup_link) hw->mac.ops.setup_link(hw, autoneg, TRUE); + + ifmedia_removeall(&adapter->media); + ixgbe_add_media_types(adapter); + return; +} + +/* +** Tasklet for handling interrupts from an external PHY +*/ +static void +ixgbe_handle_phy(void *context, int pending) +{ + struct adapter *adapter = context; + struct ixgbe_hw *hw = &adapter->hw; + int error; + + error = hw->phy.ops.handle_lasi(hw); + if (error == IXGBE_ERR_OVERTEMP) + device_printf(adapter->dev, + "CRITICAL: EXTERNAL PHY OVER TEMP!! " + " PHY will downshift to lower power state!\n"); + else if (error) + device_printf(adapter->dev, + "Error handling LASI interrupt: %d\n", + error); return; } @@ -5188,6 +3479,127 @@ ixgbe_reinit_fdir(void *context, int pending) } #endif +/********************************************************************* + * + * Configure DMA Coalescing + * + **********************************************************************/ +static void +ixgbe_config_dmac(struct adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + struct ixgbe_dmac_config *dcfg = &hw->mac.dmac_config; + + if (hw->mac.type < ixgbe_mac_X550 || + !hw->mac.ops.dmac_config) + return; + + if (dcfg->watchdog_timer ^ adapter->dmac || + dcfg->link_speed ^ adapter->link_speed) { + dcfg->watchdog_timer = adapter->dmac; + dcfg->fcoe_en = false; + dcfg->link_speed = adapter->link_speed; + dcfg->num_tcs = 1; + + INIT_DEBUGOUT2("dmac settings: watchdog %d, link speed %d\n", + dcfg->watchdog_timer, dcfg->link_speed); + + hw->mac.ops.dmac_config(hw); + } +} + +/* + * Checks whether the adapter supports Energy Efficient Ethernet + * or not, based on device ID. + */ +static void +ixgbe_check_eee_support(struct adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + + adapter->eee_support = adapter->eee_enabled = + (hw->device_id == IXGBE_DEV_ID_X550T || + hw->device_id == IXGBE_DEV_ID_X550EM_X_KR); +} + +/* + * Checks whether the adapter's ports are capable of + * Wake On LAN by reading the adapter's NVM. + * + * Sets each port's hw->wol_enabled value depending + * on the value read here. + */ +static void +ixgbe_check_wol_support(struct adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + u16 dev_caps = 0; + + /* Find out WoL support for port */ + adapter->wol_support = hw->wol_enabled = 0; + ixgbe_get_device_caps(hw, &dev_caps); + if ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0_1) || + ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0) && + hw->bus.func == 0)) + adapter->wol_support = hw->wol_enabled = 1; + + /* Save initial wake up filter configuration */ + adapter->wufc = IXGBE_READ_REG(hw, IXGBE_WUFC); + + return; +} + +/* + * Prepare the adapter/port for LPLU and/or WoL + */ +static int +ixgbe_setup_low_power_mode(struct adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + device_t dev = adapter->dev; + s32 error = 0; + + mtx_assert(&adapter->core_mtx, MA_OWNED); + + /* Limit power management flow to X550EM baseT */ + if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T + && hw->phy.ops.enter_lplu) { + /* Turn off support for APM wakeup. (Using ACPI instead) */ + IXGBE_WRITE_REG(hw, IXGBE_GRC, + IXGBE_READ_REG(hw, IXGBE_GRC) & ~(u32)2); + + /* + * Clear Wake Up Status register to prevent any previous wakeup + * events from waking us up immediately after we suspend. + */ + IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff); + + /* + * Program the Wakeup Filter Control register with user filter + * settings + */ + IXGBE_WRITE_REG(hw, IXGBE_WUFC, adapter->wufc); + + /* Enable wakeups and power management in Wakeup Control */ + IXGBE_WRITE_REG(hw, IXGBE_WUC, + IXGBE_WUC_WKEN | IXGBE_WUC_PME_EN); + + /* X550EM baseT adapters need a special LPLU flow */ + hw->phy.reset_disable = true; + ixgbe_stop(adapter); + error = hw->phy.ops.enter_lplu(hw); + if (error) + device_printf(dev, + "Error entering LPLU: %d\n", error); + hw->phy.reset_disable = false; + } else { + /* Just stop for other adapters */ + ixgbe_stop(adapter); + } + + return error; +} + /********************************************************************** * * Update the board statistics counters. @@ -5196,80 +3608,44 @@ ixgbe_reinit_fdir(void *context, int pending) static void ixgbe_update_stats_counters(struct adapter *adapter) { - struct ifnet *ifp = adapter->ifp; struct ixgbe_hw *hw = &adapter->hw; - u32 missed_rx = 0, bprc, lxon, lxoff, total; - u64 total_missed_rx = 0; + u32 missed_rx = 0, bprc, lxon, lxoff, total; + u64 total_missed_rx = 0; - adapter->stats.crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS); - adapter->stats.illerrc += IXGBE_READ_REG(hw, IXGBE_ILLERRC); - adapter->stats.errbc += IXGBE_READ_REG(hw, IXGBE_ERRBC); - adapter->stats.mspdc += IXGBE_READ_REG(hw, IXGBE_MSPDC); + adapter->stats.pf.crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS); + adapter->stats.pf.illerrc += IXGBE_READ_REG(hw, IXGBE_ILLERRC); + adapter->stats.pf.errbc += IXGBE_READ_REG(hw, IXGBE_ERRBC); + adapter->stats.pf.mspdc += IXGBE_READ_REG(hw, IXGBE_MSPDC); - /* - ** Note: these are for the 8 possible traffic classes, - ** which in current implementation is unused, - ** therefore only 0 should read real data. - */ - for (int i = 0; i < 8; i++) { - u32 mp; - mp = IXGBE_READ_REG(hw, IXGBE_MPC(i)); - /* missed_rx tallies misses for the gprc workaround */ - missed_rx += mp; - /* global total per queue */ - adapter->stats.mpc[i] += mp; - /* Running comprehensive total for stats display */ - total_missed_rx += adapter->stats.mpc[i]; - if (hw->mac.type == ixgbe_mac_82598EB) { - adapter->stats.rnbc[i] += - IXGBE_READ_REG(hw, IXGBE_RNBC(i)); - adapter->stats.qbtc[i] += - IXGBE_READ_REG(hw, IXGBE_QBTC(i)); - adapter->stats.qbrc[i] += - IXGBE_READ_REG(hw, IXGBE_QBRC(i)); - adapter->stats.pxonrxc[i] += - IXGBE_READ_REG(hw, IXGBE_PXONRXC(i)); - } else - adapter->stats.pxonrxc[i] += - IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i)); - adapter->stats.pxontxc[i] += - IXGBE_READ_REG(hw, IXGBE_PXONTXC(i)); - adapter->stats.pxofftxc[i] += - IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i)); - adapter->stats.pxoffrxc[i] += - IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i)); - adapter->stats.pxon2offc[i] += - IXGBE_READ_REG(hw, IXGBE_PXON2OFFCNT(i)); - } for (int i = 0; i < 16; i++) { - adapter->stats.qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i)); - adapter->stats.qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i)); - adapter->stats.qprdc[i] += IXGBE_READ_REG(hw, IXGBE_QPRDC(i)); + adapter->stats.pf.qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i)); + adapter->stats.pf.qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i)); + adapter->stats.pf.qprdc[i] += IXGBE_READ_REG(hw, IXGBE_QPRDC(i)); } - adapter->stats.mlfc += IXGBE_READ_REG(hw, IXGBE_MLFC); - adapter->stats.mrfc += IXGBE_READ_REG(hw, IXGBE_MRFC); - adapter->stats.rlec += IXGBE_READ_REG(hw, IXGBE_RLEC); + adapter->stats.pf.mlfc += IXGBE_READ_REG(hw, IXGBE_MLFC); + adapter->stats.pf.mrfc += IXGBE_READ_REG(hw, IXGBE_MRFC); + adapter->stats.pf.rlec += IXGBE_READ_REG(hw, IXGBE_RLEC); /* Hardware workaround, gprc counts missed packets */ - adapter->stats.gprc += IXGBE_READ_REG(hw, IXGBE_GPRC); - adapter->stats.gprc -= missed_rx; + adapter->stats.pf.gprc += IXGBE_READ_REG(hw, IXGBE_GPRC); + adapter->stats.pf.gprc -= missed_rx; if (hw->mac.type != ixgbe_mac_82598EB) { - adapter->stats.gorc += IXGBE_READ_REG(hw, IXGBE_GORCL) + + adapter->stats.pf.gorc += IXGBE_READ_REG(hw, IXGBE_GORCL) + ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32); - adapter->stats.gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL) + + adapter->stats.pf.gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL) + ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32); - adapter->stats.tor += IXGBE_READ_REG(hw, IXGBE_TORL) + + adapter->stats.pf.tor += IXGBE_READ_REG(hw, IXGBE_TORL) + ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32); - adapter->stats.lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT); - adapter->stats.lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT); + adapter->stats.pf.lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT); + adapter->stats.pf.lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT); } else { - adapter->stats.lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC); - adapter->stats.lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC); + adapter->stats.pf.lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC); + adapter->stats.pf.lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC); /* 82598 only has a counter in the high register */ - adapter->stats.gorc += IXGBE_READ_REG(hw, IXGBE_GORCH); - adapter->stats.gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH); - adapter->stats.tor += IXGBE_READ_REG(hw, IXGBE_TORH); + adapter->stats.pf.gorc += IXGBE_READ_REG(hw, IXGBE_GORCH); + adapter->stats.pf.gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH); + adapter->stats.pf.tor += IXGBE_READ_REG(hw, IXGBE_TORH); } /* @@ -5277,73 +3653,113 @@ ixgbe_update_stats_counters(struct adapter *adapter) * broadcasts, so for now we subtract those. */ bprc = IXGBE_READ_REG(hw, IXGBE_BPRC); - adapter->stats.bprc += bprc; - adapter->stats.mprc += IXGBE_READ_REG(hw, IXGBE_MPRC); + adapter->stats.pf.bprc += bprc; + adapter->stats.pf.mprc += IXGBE_READ_REG(hw, IXGBE_MPRC); if (hw->mac.type == ixgbe_mac_82598EB) - adapter->stats.mprc -= bprc; + adapter->stats.pf.mprc -= bprc; - adapter->stats.prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64); - adapter->stats.prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127); - adapter->stats.prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255); - adapter->stats.prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511); - adapter->stats.prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023); - adapter->stats.prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522); + adapter->stats.pf.prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64); + adapter->stats.pf.prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127); + adapter->stats.pf.prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255); + adapter->stats.pf.prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511); + adapter->stats.pf.prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023); + adapter->stats.pf.prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522); lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC); - adapter->stats.lxontxc += lxon; + adapter->stats.pf.lxontxc += lxon; lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC); - adapter->stats.lxofftxc += lxoff; + adapter->stats.pf.lxofftxc += lxoff; total = lxon + lxoff; - adapter->stats.gptc += IXGBE_READ_REG(hw, IXGBE_GPTC); - adapter->stats.mptc += IXGBE_READ_REG(hw, IXGBE_MPTC); - adapter->stats.ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64); - adapter->stats.gptc -= total; - adapter->stats.mptc -= total; - adapter->stats.ptc64 -= total; - adapter->stats.gotc -= total * ETHER_MIN_LEN; - - adapter->stats.ruc += IXGBE_READ_REG(hw, IXGBE_RUC); - adapter->stats.rfc += IXGBE_READ_REG(hw, IXGBE_RFC); - adapter->stats.roc += IXGBE_READ_REG(hw, IXGBE_ROC); - adapter->stats.rjc += IXGBE_READ_REG(hw, IXGBE_RJC); - adapter->stats.mngprc += IXGBE_READ_REG(hw, IXGBE_MNGPRC); - adapter->stats.mngpdc += IXGBE_READ_REG(hw, IXGBE_MNGPDC); - adapter->stats.mngptc += IXGBE_READ_REG(hw, IXGBE_MNGPTC); - adapter->stats.tpr += IXGBE_READ_REG(hw, IXGBE_TPR); - adapter->stats.tpt += IXGBE_READ_REG(hw, IXGBE_TPT); - adapter->stats.ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127); - adapter->stats.ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255); - adapter->stats.ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511); - adapter->stats.ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023); - adapter->stats.ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522); - adapter->stats.bptc += IXGBE_READ_REG(hw, IXGBE_BPTC); - adapter->stats.xec += IXGBE_READ_REG(hw, IXGBE_XEC); - adapter->stats.fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC); - adapter->stats.fclast += IXGBE_READ_REG(hw, IXGBE_FCLAST); + adapter->stats.pf.gptc += IXGBE_READ_REG(hw, IXGBE_GPTC); + adapter->stats.pf.mptc += IXGBE_READ_REG(hw, IXGBE_MPTC); + adapter->stats.pf.ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64); + adapter->stats.pf.gptc -= total; + adapter->stats.pf.mptc -= total; + adapter->stats.pf.ptc64 -= total; + adapter->stats.pf.gotc -= total * ETHER_MIN_LEN; + + adapter->stats.pf.ruc += IXGBE_READ_REG(hw, IXGBE_RUC); + adapter->stats.pf.rfc += IXGBE_READ_REG(hw, IXGBE_RFC); + adapter->stats.pf.roc += IXGBE_READ_REG(hw, IXGBE_ROC); + adapter->stats.pf.rjc += IXGBE_READ_REG(hw, IXGBE_RJC); + adapter->stats.pf.mngprc += IXGBE_READ_REG(hw, IXGBE_MNGPRC); + adapter->stats.pf.mngpdc += IXGBE_READ_REG(hw, IXGBE_MNGPDC); + adapter->stats.pf.mngptc += IXGBE_READ_REG(hw, IXGBE_MNGPTC); + adapter->stats.pf.tpr += IXGBE_READ_REG(hw, IXGBE_TPR); + adapter->stats.pf.tpt += IXGBE_READ_REG(hw, IXGBE_TPT); + adapter->stats.pf.ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127); + adapter->stats.pf.ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255); + adapter->stats.pf.ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511); + adapter->stats.pf.ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023); + adapter->stats.pf.ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522); + adapter->stats.pf.bptc += IXGBE_READ_REG(hw, IXGBE_BPTC); + adapter->stats.pf.xec += IXGBE_READ_REG(hw, IXGBE_XEC); + adapter->stats.pf.fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC); + adapter->stats.pf.fclast += IXGBE_READ_REG(hw, IXGBE_FCLAST); /* Only read FCOE on 82599 */ if (hw->mac.type != ixgbe_mac_82598EB) { - adapter->stats.fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC); - adapter->stats.fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC); - adapter->stats.fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC); - adapter->stats.fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC); - adapter->stats.fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC); + adapter->stats.pf.fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC); + adapter->stats.pf.fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC); + adapter->stats.pf.fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC); + adapter->stats.pf.fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC); + adapter->stats.pf.fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC); } /* Fill out the OS statistics structure */ - ifp->if_ipackets = adapter->stats.gprc; - ifp->if_opackets = adapter->stats.gptc; - ifp->if_ibytes = adapter->stats.gorc; - ifp->if_obytes = adapter->stats.gotc; - ifp->if_imcasts = adapter->stats.mprc; - ifp->if_omcasts = adapter->stats.mptc; - ifp->if_collisions = 0; - - /* Rx Errors */ - ifp->if_iqdrops = total_missed_rx; - ifp->if_ierrors = adapter->stats.crcerrs + adapter->stats.rlec; + IXGBE_SET_IPACKETS(adapter, adapter->stats.pf.gprc); + IXGBE_SET_OPACKETS(adapter, adapter->stats.pf.gptc); + IXGBE_SET_IBYTES(adapter, adapter->stats.pf.gorc); + IXGBE_SET_OBYTES(adapter, adapter->stats.pf.gotc); + IXGBE_SET_IMCASTS(adapter, adapter->stats.pf.mprc); + IXGBE_SET_OMCASTS(adapter, adapter->stats.pf.mptc); + IXGBE_SET_COLLISIONS(adapter, 0); + IXGBE_SET_IQDROPS(adapter, total_missed_rx); + IXGBE_SET_IERRORS(adapter, adapter->stats.pf.crcerrs + + adapter->stats.pf.rlec); } +#if __FreeBSD_version >= 1100036 +static uint64_t +ixgbe_get_counter(struct ifnet *ifp, ift_counter cnt) +{ + struct adapter *adapter; + struct tx_ring *txr; + uint64_t rv; + + adapter = if_getsoftc(ifp); + + switch (cnt) { + case IFCOUNTER_IPACKETS: + return (adapter->ipackets); + case IFCOUNTER_OPACKETS: + return (adapter->opackets); + case IFCOUNTER_IBYTES: + return (adapter->ibytes); + case IFCOUNTER_OBYTES: + return (adapter->obytes); + case IFCOUNTER_IMCASTS: + return (adapter->imcasts); + case IFCOUNTER_OMCASTS: + return (adapter->omcasts); + case IFCOUNTER_COLLISIONS: + return (0); + case IFCOUNTER_IQDROPS: + return (adapter->iqdrops); + case IFCOUNTER_OQDROPS: + rv = 0; + txr = adapter->tx_rings; + for (int i = 0; i < adapter->num_queues; i++, txr++) + rv += txr->br->br_drops; + return (rv); + case IFCOUNTER_IERRORS: + return (adapter->ierrors); + default: + return (if_get_counter_default(ifp, cnt)); + } +} +#endif + /** ixgbe_sysctl_tdh_handler - Handler function * Retrieves the TDH value from the hardware */ @@ -5444,13 +3860,114 @@ ixgbe_sysctl_interrupt_rate_handler(SYSCTL_HANDLER_ARGS) return 0; } +static void +ixgbe_add_device_sysctls(struct adapter *adapter) +{ + device_t dev = adapter->dev; + struct ixgbe_hw *hw = &adapter->hw; + struct sysctl_oid_list *child; + struct sysctl_ctx_list *ctx; + + ctx = device_get_sysctl_ctx(dev); + child = SYSCTL_CHILDREN(device_get_sysctl_tree(dev)); + + /* Sysctls for all devices */ + SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "fc", + CTLTYPE_INT | CTLFLAG_RW, adapter, 0, + ixgbe_set_flowcntl, "I", IXGBE_SYSCTL_DESC_SET_FC); + + SYSCTL_ADD_INT(ctx, child, OID_AUTO, "enable_aim", + CTLFLAG_RW, + &ixgbe_enable_aim, 1, "Interrupt Moderation"); + + SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "advertise_speed", + CTLTYPE_INT | CTLFLAG_RW, adapter, 0, + ixgbe_set_advertise, "I", IXGBE_SYSCTL_DESC_ADV_SPEED); + + SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "thermal_test", + CTLTYPE_INT | CTLFLAG_RW, adapter, 0, + ixgbe_sysctl_thermal_test, "I", "Thermal Test"); + + /* for X550 devices */ + if (hw->mac.type >= ixgbe_mac_X550) + SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "dmac", + CTLTYPE_INT | CTLFLAG_RW, adapter, 0, + ixgbe_sysctl_dmac, "I", "DMA Coalesce"); + + /* for X550T and X550EM backplane devices */ + if (hw->device_id == IXGBE_DEV_ID_X550T || + hw->device_id == IXGBE_DEV_ID_X550EM_X_KR) { + struct sysctl_oid *eee_node; + struct sysctl_oid_list *eee_list; + + eee_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "eee", + CTLFLAG_RD, NULL, + "Energy Efficient Ethernet sysctls"); + eee_list = SYSCTL_CHILDREN(eee_node); + + SYSCTL_ADD_PROC(ctx, eee_list, OID_AUTO, "enable", + CTLTYPE_INT | CTLFLAG_RW, adapter, 0, + ixgbe_sysctl_eee_enable, "I", + "Enable or Disable EEE"); + + SYSCTL_ADD_PROC(ctx, eee_list, OID_AUTO, "negotiated", + CTLTYPE_INT | CTLFLAG_RD, adapter, 0, + ixgbe_sysctl_eee_negotiated, "I", + "EEE negotiated on link"); + + SYSCTL_ADD_PROC(ctx, eee_list, OID_AUTO, "tx_lpi_status", + CTLTYPE_INT | CTLFLAG_RD, adapter, 0, + ixgbe_sysctl_eee_tx_lpi_status, "I", + "Whether or not TX link is in LPI state"); + + SYSCTL_ADD_PROC(ctx, eee_list, OID_AUTO, "rx_lpi_status", + CTLTYPE_INT | CTLFLAG_RD, adapter, 0, + ixgbe_sysctl_eee_rx_lpi_status, "I", + "Whether or not RX link is in LPI state"); + } + + /* for certain 10GBaseT devices */ + if (hw->device_id == IXGBE_DEV_ID_X550T || + hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) { + SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "wol_enable", + CTLTYPE_INT | CTLFLAG_RW, adapter, 0, + ixgbe_sysctl_wol_enable, "I", + "Enable/Disable Wake on LAN"); + + SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "wufc", + CTLTYPE_INT | CTLFLAG_RW, adapter, 0, + ixgbe_sysctl_wufc, "I", + "Enable/Disable Wake Up Filters"); + } + + /* for X550EM 10GBaseT devices */ + if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) { + struct sysctl_oid *phy_node; + struct sysctl_oid_list *phy_list; + + phy_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "phy", + CTLFLAG_RD, NULL, + "External PHY sysctls"); + phy_list = SYSCTL_CHILDREN(phy_node); + + SYSCTL_ADD_PROC(ctx, phy_list, OID_AUTO, "temp", + CTLTYPE_INT | CTLFLAG_RD, adapter, 0, + ixgbe_sysctl_phy_temp, "I", + "Current External PHY Temperature (Celsius)"); + + SYSCTL_ADD_PROC(ctx, phy_list, OID_AUTO, "overtemp_occurred", + CTLTYPE_INT | CTLFLAG_RD, adapter, 0, + ixgbe_sysctl_phy_overtemp_occurred, "I", + "External PHY High Temperature Event Occurred"); + } +} + /* * Add sysctl variables, one per statistic, to the system. */ static void ixgbe_add_hw_stats(struct adapter *adapter) { - device_t dev = adapter->dev; struct tx_ring *txr = adapter->tx_rings; @@ -5459,7 +3976,7 @@ ixgbe_add_hw_stats(struct adapter *adapter) struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev); struct sysctl_oid *tree = device_get_sysctl_tree(dev); struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree); - struct ixgbe_hw_stats *stats = &adapter->stats; + struct ixgbe_hw_stats *stats = &adapter->stats.pf; struct sysctl_oid *stat_node, *queue_node; struct sysctl_oid_list *stat_list, *queue_list; @@ -5515,6 +4032,9 @@ ixgbe_add_hw_stats(struct adapter *adapter) SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets", CTLFLAG_RD, &txr->total_packets, "Queue Packets Transmitted"); + SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "br_drops", + CTLFLAG_RD, &txr->br->br_drops, + "Packets dropped in buf_ring"); } for (int i = 0; i < adapter->num_queues; i++, rxr++) { @@ -5742,74 +4262,142 @@ ixgbe_set_flowcntl(SYSCTL_HANDLER_ARGS) } /* -** Control link advertise speed: -** 1 - advertise only 1G -** 2 - advertise 100Mb -** 3 - advertise normal +** Control advertised link speed: +** Flags: +** 0x1 - advertise 100 Mb +** 0x2 - advertise 1G +** 0x4 - advertise 10G */ static int ixgbe_set_advertise(SYSCTL_HANDLER_ARGS) { - int error = 0; + int error = 0, requested; struct adapter *adapter; device_t dev; struct ixgbe_hw *hw; - ixgbe_link_speed speed, last; + ixgbe_link_speed speed = 0; adapter = (struct adapter *) arg1; dev = adapter->dev; hw = &adapter->hw; - last = adapter->advertise; - error = sysctl_handle_int(oidp, &adapter->advertise, 0, req); + requested = adapter->advertise; + error = sysctl_handle_int(oidp, &requested, 0, req); if ((error) || (req->newptr == NULL)) return (error); - if (adapter->advertise == last) /* no change */ + /* Checks to validate new value */ + if (adapter->advertise == requested) /* no change */ return (0); if (!((hw->phy.media_type == ixgbe_media_type_copper) || - (hw->phy.multispeed_fiber))) + (hw->phy.multispeed_fiber))) { + device_printf(dev, + "Advertised speed can only be set on copper or " + "multispeed fiber media types.\n"); return (EINVAL); + } - if ((adapter->advertise == 2) && (hw->mac.type != ixgbe_mac_X540)) { - device_printf(dev, "Set Advertise: 100Mb on X540 only\n"); + if (requested < 0x1 || requested > 0x7) { + device_printf(dev, + "Invalid advertised speed; valid modes are 0x1 through 0x7\n"); return (EINVAL); } - if (adapter->advertise == 1) - speed = IXGBE_LINK_SPEED_1GB_FULL; - else if (adapter->advertise == 2) - speed = IXGBE_LINK_SPEED_100_FULL; - else if (adapter->advertise == 3) - speed = IXGBE_LINK_SPEED_1GB_FULL | - IXGBE_LINK_SPEED_10GB_FULL; - else { /* bogus value */ - adapter->advertise = last; + if ((requested & 0x1) + && (hw->mac.type != ixgbe_mac_X540) + && (hw->mac.type != ixgbe_mac_X550)) { + device_printf(dev, "Set Advertise: 100Mb on X540/X550 only\n"); return (EINVAL); } + /* Set new value and report new advertised mode */ + if (requested & 0x1) + speed |= IXGBE_LINK_SPEED_100_FULL; + if (requested & 0x2) + speed |= IXGBE_LINK_SPEED_1GB_FULL; + if (requested & 0x4) + speed |= IXGBE_LINK_SPEED_10GB_FULL; + hw->mac.autotry_restart = TRUE; hw->mac.ops.setup_link(hw, speed, TRUE); + adapter->advertise = requested; return (error); } /* -** Thermal Shutdown Trigger -** - cause a Thermal Overtemp IRQ -** - this now requires firmware enabling -*/ + * The following two sysctls are for X550 BaseT devices; + * they deal with the external PHY used in them. + */ static int -ixgbe_set_thermal_test(SYSCTL_HANDLER_ARGS) +ixgbe_sysctl_phy_temp(SYSCTL_HANDLER_ARGS) { - int error, fire = 0; struct adapter *adapter = (struct adapter *) arg1; struct ixgbe_hw *hw = &adapter->hw; + u16 reg; + + if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) { + device_printf(adapter->dev, + "Device has no supported external thermal sensor.\n"); + return (ENODEV); + } + if (hw->phy.ops.read_reg(hw, IXGBE_PHY_CURRENT_TEMP, + IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, + ®)) { + device_printf(adapter->dev, + "Error reading from PHY's current temperature register\n"); + return (EAGAIN); + } - if (hw->mac.type != ixgbe_mac_X540) - return (0); + /* Shift temp for output */ + reg = reg >> 8; + + return (sysctl_handle_int(oidp, NULL, reg, req)); +} + +/* + * Reports whether the current PHY temperature is over + * the overtemp threshold. + * - This is reported directly from the PHY + */ +static int +ixgbe_sysctl_phy_overtemp_occurred(SYSCTL_HANDLER_ARGS) +{ + struct adapter *adapter = (struct adapter *) arg1; + struct ixgbe_hw *hw = &adapter->hw; + u16 reg; + + if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) { + device_printf(adapter->dev, + "Device has no supported external thermal sensor.\n"); + return (ENODEV); + } + + if (hw->phy.ops.read_reg(hw, IXGBE_PHY_OVERTEMP_STATUS, + IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, + ®)) { + device_printf(adapter->dev, + "Error reading from PHY's temperature status register\n"); + return (EAGAIN); + } + + /* Get occurrence bit */ + reg = !!(reg & 0x4000); + return (sysctl_handle_int(oidp, 0, reg, req)); +} + +/* +** Thermal Shutdown Trigger (internal MAC) +** - Set this to 1 to cause an overtemp event to occur +*/ +static int +ixgbe_sysctl_thermal_test(SYSCTL_HANDLER_ARGS) +{ + struct adapter *adapter = (struct adapter *) arg1; + struct ixgbe_hw *hw = &adapter->hw; + int error, fire = 0; error = sysctl_handle_int(oidp, &fire, 0, req); if ((error) || (req->newptr == NULL)) @@ -5825,6 +4413,223 @@ ixgbe_set_thermal_test(SYSCTL_HANDLER_ARGS) } /* +** Manage DMA Coalescing. +** Control values: +** 0/1 - off / on (use default value of 1000) +** +** Legal timer values are: +** 50,100,250,500,1000,2000,5000,10000 +** +** Turning off interrupt moderation will also turn this off. +*/ +static int +ixgbe_sysctl_dmac(SYSCTL_HANDLER_ARGS) +{ + struct adapter *adapter = (struct adapter *) arg1; + struct ixgbe_hw *hw = &adapter->hw; + struct ifnet *ifp = adapter->ifp; + int error; + u16 oldval; + + oldval = adapter->dmac; + error = sysctl_handle_int(oidp, &adapter->dmac, 0, req); + if ((error) || (req->newptr == NULL)) + return (error); + + switch (hw->mac.type) { + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + break; + default: + device_printf(adapter->dev, + "DMA Coalescing is only supported on X550 devices\n"); + return (ENODEV); + } + + switch (adapter->dmac) { + case 0: + /* Disabled */ + break; + case 1: /* Enable and use default */ + adapter->dmac = 1000; + break; + case 50: + case 100: + case 250: + case 500: + case 1000: + case 2000: + case 5000: + case 10000: + /* Legal values - allow */ + break; + default: + /* Do nothing, illegal value */ + adapter->dmac = oldval; + return (EINVAL); + } + + /* Re-initialize hardware if it's already running */ + if (ifp->if_drv_flags & IFF_DRV_RUNNING) + ixgbe_init(adapter); + + return (0); +} + +/* + * Sysctl to enable/disable the WoL capability, if supported by the adapter. + * Values: + * 0 - disabled + * 1 - enabled + */ +static int +ixgbe_sysctl_wol_enable(SYSCTL_HANDLER_ARGS) +{ + struct adapter *adapter = (struct adapter *) arg1; + struct ixgbe_hw *hw = &adapter->hw; + int new_wol_enabled; + int error = 0; + + new_wol_enabled = hw->wol_enabled; + error = sysctl_handle_int(oidp, &new_wol_enabled, 0, req); + if ((error) || (req->newptr == NULL)) + return (error); + if (new_wol_enabled == hw->wol_enabled) + return (0); + + if (new_wol_enabled > 0 && !adapter->wol_support) + return (ENODEV); + else + hw->wol_enabled = !!(new_wol_enabled); + + return (0); +} + +/* + * Sysctl to enable/disable the Energy Efficient Ethernet capability, + * if supported by the adapter. + * Values: + * 0 - disabled + * 1 - enabled + */ +static int +ixgbe_sysctl_eee_enable(SYSCTL_HANDLER_ARGS) +{ + struct adapter *adapter = (struct adapter *) arg1; + struct ifnet *ifp = adapter->ifp; + int new_eee_enabled, error = 0; + + new_eee_enabled = adapter->eee_enabled; + error = sysctl_handle_int(oidp, &new_eee_enabled, 0, req); + if ((error) || (req->newptr == NULL)) + return (error); + if (new_eee_enabled == adapter->eee_enabled) + return (0); + + if (new_eee_enabled > 0 && !adapter->eee_support) + return (ENODEV); + else + adapter->eee_enabled = !!(new_eee_enabled); + + /* Re-initialize hardware if it's already running */ + if (ifp->if_drv_flags & IFF_DRV_RUNNING) + ixgbe_init(adapter); + + return (0); +} + +/* + * Read-only sysctl indicating whether EEE support was negotiated + * on the link. + */ +static int +ixgbe_sysctl_eee_negotiated(SYSCTL_HANDLER_ARGS) +{ + struct adapter *adapter = (struct adapter *) arg1; + struct ixgbe_hw *hw = &adapter->hw; + bool status; + + status = !!(IXGBE_READ_REG(hw, IXGBE_EEE_STAT) & IXGBE_EEE_STAT_NEG); + + return (sysctl_handle_int(oidp, 0, status, req)); +} + +/* + * Read-only sysctl indicating whether RX Link is in LPI state. + */ +static int +ixgbe_sysctl_eee_rx_lpi_status(SYSCTL_HANDLER_ARGS) +{ + struct adapter *adapter = (struct adapter *) arg1; + struct ixgbe_hw *hw = &adapter->hw; + bool status; + + status = !!(IXGBE_READ_REG(hw, IXGBE_EEE_STAT) & + IXGBE_EEE_RX_LPI_STATUS); + + return (sysctl_handle_int(oidp, 0, status, req)); +} + +/* + * Read-only sysctl indicating whether TX Link is in LPI state. + */ +static int +ixgbe_sysctl_eee_tx_lpi_status(SYSCTL_HANDLER_ARGS) +{ + struct adapter *adapter = (struct adapter *) arg1; + struct ixgbe_hw *hw = &adapter->hw; + bool status; + + status = !!(IXGBE_READ_REG(hw, IXGBE_EEE_STAT) & + IXGBE_EEE_TX_LPI_STATUS); + + return (sysctl_handle_int(oidp, 0, status, req)); +} + +/* + * Sysctl to enable/disable the types of packets that the + * adapter will wake up on upon receipt. + * WUFC - Wake Up Filter Control + * Flags: + * 0x1 - Link Status Change + * 0x2 - Magic Packet + * 0x4 - Direct Exact + * 0x8 - Directed Multicast + * 0x10 - Broadcast + * 0x20 - ARP/IPv4 Request Packet + * 0x40 - Direct IPv4 Packet + * 0x80 - Direct IPv6 Packet + * + * Setting another flag will cause the sysctl to return an + * error. + */ +static int +ixgbe_sysctl_wufc(SYSCTL_HANDLER_ARGS) +{ + struct adapter *adapter = (struct adapter *) arg1; + int error = 0; + u32 new_wufc; + + new_wufc = adapter->wufc; + + error = sysctl_handle_int(oidp, &new_wufc, 0, req); + if ((error) || (req->newptr == NULL)) + return (error); + if (new_wufc == adapter->wufc) + return (0); + + if (new_wufc & 0xffffff00) + return (EINVAL); + else { + new_wufc &= 0xff; + new_wufc |= (0xffffff & adapter->wufc); + adapter->wufc = new_wufc; + } + + return (0); +} + +/* ** Enable the hardware to drop packets when the buffer is ** full. This is useful when multiqueue,so that no single ** queue being full stalls the entire RX engine. We only @@ -5854,3 +4659,29 @@ ixgbe_disable_rx_drop(struct adapter *adapter) IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(i), srrctl); } } + +static void +ixgbe_rearm_queues(struct adapter *adapter, u64 queues) +{ + u32 mask; + + switch (adapter->hw.mac.type) { + case ixgbe_mac_82598EB: + mask = (IXGBE_EIMS_RTX_QUEUE & queues); + IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask); + break; + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + mask = (queues & 0xFFFFFFFF); + IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask); + mask = (queues >> 32); + IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1), mask); + break; + default: + break; + } +} + + diff --git a/sys/dev/ixgbe/if_ixv.c b/sys/dev/ixgbe/if_ixv.c new file mode 100644 index 0000000..a550a85 --- /dev/null +++ b/sys/dev/ixgbe/if_ixv.c @@ -0,0 +1,2107 @@ +/****************************************************************************** + + Copyright (c) 2001-2015, Intel Corporation + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + 3. Neither the name of the Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + POSSIBILITY OF SUCH DAMAGE. + +******************************************************************************/ +/*$FreeBSD$*/ + + +#ifndef IXGBE_STANDALONE_BUILD +#include "opt_inet.h" +#include "opt_inet6.h" +#endif + +#include "ixgbe.h" + +/********************************************************************* + * Driver version + *********************************************************************/ +char ixv_driver_version[] = "1.2.5"; + +/********************************************************************* + * PCI Device ID Table + * + * Used by probe to select devices to load on + * Last field stores an index into ixv_strings + * Last entry must be all 0s + * + * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index } + *********************************************************************/ + +static ixgbe_vendor_info_t ixv_vendor_info_array[] = +{ + {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_VF, 0, 0, 0}, + {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_VF, 0, 0, 0}, + {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550_VF, 0, 0, 0}, + {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_VF, 0, 0, 0}, + /* required last entry */ + {0, 0, 0, 0, 0} +}; + +/********************************************************************* + * Table of branding strings + *********************************************************************/ + +static char *ixv_strings[] = { + "Intel(R) PRO/10GbE Virtual Function Network Driver" +}; + +/********************************************************************* + * Function prototypes + *********************************************************************/ +static int ixv_probe(device_t); +static int ixv_attach(device_t); +static int ixv_detach(device_t); +static int ixv_shutdown(device_t); +static int ixv_ioctl(struct ifnet *, u_long, caddr_t); +static void ixv_init(void *); +static void ixv_init_locked(struct adapter *); +static void ixv_stop(void *); +static void ixv_media_status(struct ifnet *, struct ifmediareq *); +static int ixv_media_change(struct ifnet *); +static void ixv_identify_hardware(struct adapter *); +static int ixv_allocate_pci_resources(struct adapter *); +static int ixv_allocate_msix(struct adapter *); +static int ixv_setup_msix(struct adapter *); +static void ixv_free_pci_resources(struct adapter *); +static void ixv_local_timer(void *); +static void ixv_setup_interface(device_t, struct adapter *); +static void ixv_config_link(struct adapter *); + +static void ixv_initialize_transmit_units(struct adapter *); +static void ixv_initialize_receive_units(struct adapter *); + +static void ixv_enable_intr(struct adapter *); +static void ixv_disable_intr(struct adapter *); +static void ixv_set_multi(struct adapter *); +static void ixv_update_link_status(struct adapter *); +static int ixv_sysctl_debug(SYSCTL_HANDLER_ARGS); +static void ixv_set_ivar(struct adapter *, u8, u8, s8); +static void ixv_configure_ivars(struct adapter *); +static u8 * ixv_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *); + +static void ixv_setup_vlan_support(struct adapter *); +static void ixv_register_vlan(void *, struct ifnet *, u16); +static void ixv_unregister_vlan(void *, struct ifnet *, u16); + +static void ixv_save_stats(struct adapter *); +static void ixv_init_stats(struct adapter *); +static void ixv_update_stats(struct adapter *); +static void ixv_add_stats_sysctls(struct adapter *); + +/* The MSI/X Interrupt handlers */ +static void ixv_msix_que(void *); +static void ixv_msix_mbx(void *); + +/* Deferred interrupt tasklets */ +static void ixv_handle_que(void *, int); +static void ixv_handle_mbx(void *, int); + +/********************************************************************* + * FreeBSD Device Interface Entry Points + *********************************************************************/ + +static device_method_t ixv_methods[] = { + /* Device interface */ + DEVMETHOD(device_probe, ixv_probe), + DEVMETHOD(device_attach, ixv_attach), + DEVMETHOD(device_detach, ixv_detach), + DEVMETHOD(device_shutdown, ixv_shutdown), + DEVMETHOD_END +}; + +static driver_t ixv_driver = { + "ixv", ixv_methods, sizeof(struct adapter), +}; + +devclass_t ixv_devclass; +DRIVER_MODULE(ixv, pci, ixv_driver, ixv_devclass, 0, 0); +MODULE_DEPEND(ixv, pci, 1, 1, 1); +MODULE_DEPEND(ixv, ether, 1, 1, 1); +/* XXX depend on 'ix' ? */ + +/* +** TUNEABLE PARAMETERS: +*/ + +/* +** AIM: Adaptive Interrupt Moderation +** which means that the interrupt rate +** is varied over time based on the +** traffic for that interrupt vector +*/ +static int ixv_enable_aim = FALSE; +TUNABLE_INT("hw.ixv.enable_aim", &ixv_enable_aim); + +/* How many packets rxeof tries to clean at a time */ +static int ixv_rx_process_limit = 256; +TUNABLE_INT("hw.ixv.rx_process_limit", &ixv_rx_process_limit); + +/* How many packets txeof tries to clean at a time */ +static int ixv_tx_process_limit = 256; +TUNABLE_INT("hw.ixv.tx_process_limit", &ixv_tx_process_limit); + +/* Flow control setting, default to full */ +static int ixv_flow_control = ixgbe_fc_full; +TUNABLE_INT("hw.ixv.flow_control", &ixv_flow_control); + +/* + * Header split: this causes the hardware to DMA + * the header into a seperate mbuf from the payload, + * it can be a performance win in some workloads, but + * in others it actually hurts, its off by default. + */ +static int ixv_header_split = FALSE; +TUNABLE_INT("hw.ixv.hdr_split", &ixv_header_split); + +/* +** Number of TX descriptors per ring, +** setting higher than RX as this seems +** the better performing choice. +*/ +static int ixv_txd = DEFAULT_TXD; +TUNABLE_INT("hw.ixv.txd", &ixv_txd); + +/* Number of RX descriptors per ring */ +static int ixv_rxd = DEFAULT_RXD; +TUNABLE_INT("hw.ixv.rxd", &ixv_rxd); + +/* +** Shadow VFTA table, this is needed because +** the real filter table gets cleared during +** a soft reset and we need to repopulate it. +*/ +static u32 ixv_shadow_vfta[IXGBE_VFTA_SIZE]; + +/********************************************************************* + * Device identification routine + * + * ixv_probe determines if the driver should be loaded on + * adapter based on PCI vendor/device id of the adapter. + * + * return BUS_PROBE_DEFAULT on success, positive on failure + *********************************************************************/ + +static int +ixv_probe(device_t dev) +{ + ixgbe_vendor_info_t *ent; + + u16 pci_vendor_id = 0; + u16 pci_device_id = 0; + u16 pci_subvendor_id = 0; + u16 pci_subdevice_id = 0; + char adapter_name[256]; + + + pci_vendor_id = pci_get_vendor(dev); + if (pci_vendor_id != IXGBE_INTEL_VENDOR_ID) + return (ENXIO); + + pci_device_id = pci_get_device(dev); + pci_subvendor_id = pci_get_subvendor(dev); + pci_subdevice_id = pci_get_subdevice(dev); + + ent = ixv_vendor_info_array; + while (ent->vendor_id != 0) { + if ((pci_vendor_id == ent->vendor_id) && + (pci_device_id == ent->device_id) && + + ((pci_subvendor_id == ent->subvendor_id) || + (ent->subvendor_id == 0)) && + + ((pci_subdevice_id == ent->subdevice_id) || + (ent->subdevice_id == 0))) { + sprintf(adapter_name, "%s, Version - %s", + ixv_strings[ent->index], + ixv_driver_version); + device_set_desc_copy(dev, adapter_name); + return (BUS_PROBE_DEFAULT); + } + ent++; + } + return (ENXIO); +} + +/********************************************************************* + * Device initialization routine + * + * The attach entry point is called when the driver is being loaded. + * This routine identifies the type of hardware, allocates all resources + * and initializes the hardware. + * + * return 0 on success, positive on failure + *********************************************************************/ + +static int +ixv_attach(device_t dev) +{ + struct adapter *adapter; + struct ixgbe_hw *hw; + int error = 0; + + INIT_DEBUGOUT("ixv_attach: begin"); + + /* Allocate, clear, and link in our adapter structure */ + adapter = device_get_softc(dev); + adapter->dev = adapter->osdep.dev = dev; + hw = &adapter->hw; + + /* Core Lock Init*/ + IXGBE_CORE_LOCK_INIT(adapter, device_get_nameunit(dev)); + + /* SYSCTL APIs */ + SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), + SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), + OID_AUTO, "debug", CTLTYPE_INT | CTLFLAG_RW, + adapter, 0, ixv_sysctl_debug, "I", "Debug Info"); + + SYSCTL_ADD_INT(device_get_sysctl_ctx(dev), + SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), + OID_AUTO, "enable_aim", CTLFLAG_RW, + &ixv_enable_aim, 1, "Interrupt Moderation"); + + /* Set up the timer callout */ + callout_init_mtx(&adapter->timer, &adapter->core_mtx, 0); + + /* Determine hardware revision */ + ixv_identify_hardware(adapter); + + /* Do base PCI setup - map BAR0 */ + if (ixv_allocate_pci_resources(adapter)) { + device_printf(dev, "Allocation of PCI resources failed\n"); + error = ENXIO; + goto err_out; + } + + /* Do descriptor calc and sanity checks */ + if (((ixv_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 || + ixv_txd < MIN_TXD || ixv_txd > MAX_TXD) { + device_printf(dev, "TXD config issue, using default!\n"); + adapter->num_tx_desc = DEFAULT_TXD; + } else + adapter->num_tx_desc = ixv_txd; + + if (((ixv_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 || + ixv_rxd < MIN_RXD || ixv_rxd > MAX_RXD) { + device_printf(dev, "RXD config issue, using default!\n"); + adapter->num_rx_desc = DEFAULT_RXD; + } else + adapter->num_rx_desc = ixv_rxd; + + /* Allocate our TX/RX Queues */ + if (ixgbe_allocate_queues(adapter)) { + error = ENOMEM; + goto err_out; + } + + /* + ** Initialize the shared code: its + ** at this point the mac type is set. + */ + error = ixgbe_init_shared_code(hw); + if (error) { + device_printf(dev,"Shared Code Initialization Failure\n"); + error = EIO; + goto err_late; + } + + /* Setup the mailbox */ + ixgbe_init_mbx_params_vf(hw); + + ixgbe_reset_hw(hw); + + error = ixgbe_init_hw(hw); + if (error) { + device_printf(dev,"Hardware Initialization Failure\n"); + error = EIO; + goto err_late; + } + + error = ixv_allocate_msix(adapter); + if (error) + goto err_late; + + /* If no mac address was assigned, make a random one */ + if (!ixv_check_ether_addr(hw->mac.addr)) { + u8 addr[ETHER_ADDR_LEN]; + arc4rand(&addr, sizeof(addr), 0); + addr[0] &= 0xFE; + addr[0] |= 0x02; + bcopy(addr, hw->mac.addr, sizeof(addr)); + } + + /* Setup OS specific network interface */ + ixv_setup_interface(dev, adapter); + + /* Do the stats setup */ + ixv_save_stats(adapter); + ixv_init_stats(adapter); + ixv_add_stats_sysctls(adapter); + + /* Register for VLAN events */ + adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config, + ixv_register_vlan, adapter, EVENTHANDLER_PRI_FIRST); + adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig, + ixv_unregister_vlan, adapter, EVENTHANDLER_PRI_FIRST); + + INIT_DEBUGOUT("ixv_attach: end"); + return (0); + +err_late: + ixgbe_free_transmit_structures(adapter); + ixgbe_free_receive_structures(adapter); +err_out: + ixv_free_pci_resources(adapter); + return (error); + +} + +/********************************************************************* + * Device removal routine + * + * The detach entry point is called when the driver is being removed. + * This routine stops the adapter and deallocates all the resources + * that were allocated for driver operation. + * + * return 0 on success, positive on failure + *********************************************************************/ + +static int +ixv_detach(device_t dev) +{ + struct adapter *adapter = device_get_softc(dev); + struct ix_queue *que = adapter->queues; + + INIT_DEBUGOUT("ixv_detach: begin"); + + /* Make sure VLANS are not using driver */ + if (adapter->ifp->if_vlantrunk != NULL) { + device_printf(dev,"Vlan in use, detach first\n"); + return (EBUSY); + } + + IXGBE_CORE_LOCK(adapter); + ixv_stop(adapter); + IXGBE_CORE_UNLOCK(adapter); + + for (int i = 0; i < adapter->num_queues; i++, que++) { + if (que->tq) { + struct tx_ring *txr = que->txr; + taskqueue_drain(que->tq, &txr->txq_task); + taskqueue_drain(que->tq, &que->que_task); + taskqueue_free(que->tq); + } + } + + /* Drain the Mailbox(link) queue */ + if (adapter->tq) { + taskqueue_drain(adapter->tq, &adapter->link_task); + taskqueue_free(adapter->tq); + } + + /* Unregister VLAN events */ + if (adapter->vlan_attach != NULL) + EVENTHANDLER_DEREGISTER(vlan_config, adapter->vlan_attach); + if (adapter->vlan_detach != NULL) + EVENTHANDLER_DEREGISTER(vlan_unconfig, adapter->vlan_detach); + + ether_ifdetach(adapter->ifp); + callout_drain(&adapter->timer); + ixv_free_pci_resources(adapter); + bus_generic_detach(dev); + if_free(adapter->ifp); + + ixgbe_free_transmit_structures(adapter); + ixgbe_free_receive_structures(adapter); + + IXGBE_CORE_LOCK_DESTROY(adapter); + return (0); +} + +/********************************************************************* + * + * Shutdown entry point + * + **********************************************************************/ +static int +ixv_shutdown(device_t dev) +{ + struct adapter *adapter = device_get_softc(dev); + IXGBE_CORE_LOCK(adapter); + ixv_stop(adapter); + IXGBE_CORE_UNLOCK(adapter); + return (0); +} + + +/********************************************************************* + * Ioctl entry point + * + * ixv_ioctl is called when the user wants to configure the + * interface. + * + * return 0 on success, positive on failure + **********************************************************************/ + +static int +ixv_ioctl(struct ifnet * ifp, u_long command, caddr_t data) +{ + struct adapter *adapter = ifp->if_softc; + struct ifreq *ifr = (struct ifreq *) data; +#if defined(INET) || defined(INET6) + struct ifaddr *ifa = (struct ifaddr *) data; + bool avoid_reset = FALSE; +#endif + int error = 0; + + switch (command) { + + case SIOCSIFADDR: +#ifdef INET + if (ifa->ifa_addr->sa_family == AF_INET) + avoid_reset = TRUE; +#endif +#ifdef INET6 + if (ifa->ifa_addr->sa_family == AF_INET6) + avoid_reset = TRUE; +#endif +#if defined(INET) || defined(INET6) + /* + ** Calling init results in link renegotiation, + ** so we avoid doing it when possible. + */ + if (avoid_reset) { + ifp->if_flags |= IFF_UP; + if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) + ixv_init(adapter); + if (!(ifp->if_flags & IFF_NOARP)) + arp_ifinit(ifp, ifa); + } else + error = ether_ioctl(ifp, command, data); + break; +#endif + case SIOCSIFMTU: + IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)"); + if (ifr->ifr_mtu > IXGBE_MAX_FRAME_SIZE - ETHER_HDR_LEN) { + error = EINVAL; + } else { + IXGBE_CORE_LOCK(adapter); + ifp->if_mtu = ifr->ifr_mtu; + adapter->max_frame_size = + ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN; + ixv_init_locked(adapter); + IXGBE_CORE_UNLOCK(adapter); + } + break; + case SIOCSIFFLAGS: + IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)"); + IXGBE_CORE_LOCK(adapter); + if (ifp->if_flags & IFF_UP) { + if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) + ixv_init_locked(adapter); + } else + if (ifp->if_drv_flags & IFF_DRV_RUNNING) + ixv_stop(adapter); + adapter->if_flags = ifp->if_flags; + IXGBE_CORE_UNLOCK(adapter); + break; + case SIOCADDMULTI: + case SIOCDELMULTI: + IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI"); + if (ifp->if_drv_flags & IFF_DRV_RUNNING) { + IXGBE_CORE_LOCK(adapter); + ixv_disable_intr(adapter); + ixv_set_multi(adapter); + ixv_enable_intr(adapter); + IXGBE_CORE_UNLOCK(adapter); + } + break; + case SIOCSIFMEDIA: + case SIOCGIFMEDIA: + IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)"); + error = ifmedia_ioctl(ifp, ifr, &adapter->media, command); + break; + case SIOCSIFCAP: + { + int mask = ifr->ifr_reqcap ^ ifp->if_capenable; + IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)"); + if (mask & IFCAP_HWCSUM) + ifp->if_capenable ^= IFCAP_HWCSUM; + if (mask & IFCAP_TSO4) + ifp->if_capenable ^= IFCAP_TSO4; + if (mask & IFCAP_LRO) + ifp->if_capenable ^= IFCAP_LRO; + if (mask & IFCAP_VLAN_HWTAGGING) + ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; + if (ifp->if_drv_flags & IFF_DRV_RUNNING) { + IXGBE_CORE_LOCK(adapter); + ixv_init_locked(adapter); + IXGBE_CORE_UNLOCK(adapter); + } + VLAN_CAPABILITIES(ifp); + break; + } + + default: + IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)\n", (int)command); + error = ether_ioctl(ifp, command, data); + break; + } + + return (error); +} + +/********************************************************************* + * Init entry point + * + * This routine is used in two ways. It is used by the stack as + * init entry point in network interface structure. It is also used + * by the driver as a hw/sw initialization routine to get to a + * consistent state. + * + * return 0 on success, positive on failure + **********************************************************************/ +#define IXGBE_MHADD_MFS_SHIFT 16 + +static void +ixv_init_locked(struct adapter *adapter) +{ + struct ifnet *ifp = adapter->ifp; + device_t dev = adapter->dev; + struct ixgbe_hw *hw = &adapter->hw; + u32 mhadd, gpie; + + INIT_DEBUGOUT("ixv_init: begin"); + mtx_assert(&adapter->core_mtx, MA_OWNED); + hw->adapter_stopped = FALSE; + ixgbe_stop_adapter(hw); + callout_stop(&adapter->timer); + + /* reprogram the RAR[0] in case user changed it. */ + ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV); + + /* Get the latest mac address, User can use a LAA */ + bcopy(IF_LLADDR(adapter->ifp), hw->mac.addr, + IXGBE_ETH_LENGTH_OF_ADDRESS); + ixgbe_set_rar(hw, 0, hw->mac.addr, 0, 1); + hw->addr_ctrl.rar_used_count = 1; + + /* Prepare transmit descriptors and buffers */ + if (ixgbe_setup_transmit_structures(adapter)) { + device_printf(dev,"Could not setup transmit structures\n"); + ixv_stop(adapter); + return; + } + + ixgbe_reset_hw(hw); + ixv_initialize_transmit_units(adapter); + + /* Setup Multicast table */ + ixv_set_multi(adapter); + + /* + ** Determine the correct mbuf pool + ** for doing jumbo/headersplit + */ + if (ifp->if_mtu > ETHERMTU) + adapter->rx_mbuf_sz = MJUMPAGESIZE; + else + adapter->rx_mbuf_sz = MCLBYTES; + + /* Prepare receive descriptors and buffers */ + if (ixgbe_setup_receive_structures(adapter)) { + device_printf(dev,"Could not setup receive structures\n"); + ixv_stop(adapter); + return; + } + + /* Configure RX settings */ + ixv_initialize_receive_units(adapter); + + /* Enable Enhanced MSIX mode */ + gpie = IXGBE_READ_REG(&adapter->hw, IXGBE_GPIE); + gpie |= IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_EIAME; + gpie |= IXGBE_GPIE_PBA_SUPPORT | IXGBE_GPIE_OCD; + IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie); + + /* Set the various hardware offload abilities */ + ifp->if_hwassist = 0; + if (ifp->if_capenable & IFCAP_TSO4) + ifp->if_hwassist |= CSUM_TSO; + if (ifp->if_capenable & IFCAP_TXCSUM) { + ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP); +#if __FreeBSD_version >= 800000 + ifp->if_hwassist |= CSUM_SCTP; +#endif + } + + /* Set MTU size */ + if (ifp->if_mtu > ETHERMTU) { + mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD); + mhadd &= ~IXGBE_MHADD_MFS_MASK; + mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT; + IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd); + } + + /* Set up VLAN offload and filter */ + ixv_setup_vlan_support(adapter); + + callout_reset(&adapter->timer, hz, ixv_local_timer, adapter); + + /* Set up MSI/X routing */ + ixv_configure_ivars(adapter); + + /* Set up auto-mask */ + IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, IXGBE_EICS_RTX_QUEUE); + + /* Set moderation on the Link interrupt */ + IXGBE_WRITE_REG(hw, IXGBE_VTEITR(adapter->vector), IXGBE_LINK_ITR); + + /* Stats init */ + ixv_init_stats(adapter); + + /* Config/Enable Link */ + ixv_config_link(adapter); + + /* And now turn on interrupts */ + ixv_enable_intr(adapter); + + /* Now inform the stack we're ready */ + ifp->if_drv_flags |= IFF_DRV_RUNNING; + ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; + + return; +} + +static void +ixv_init(void *arg) +{ + struct adapter *adapter = arg; + + IXGBE_CORE_LOCK(adapter); + ixv_init_locked(adapter); + IXGBE_CORE_UNLOCK(adapter); + return; +} + + +/* +** +** MSIX Interrupt Handlers and Tasklets +** +*/ + +static inline void +ixv_enable_queue(struct adapter *adapter, u32 vector) +{ + struct ixgbe_hw *hw = &adapter->hw; + u32 queue = 1 << vector; + u32 mask; + + mask = (IXGBE_EIMS_RTX_QUEUE & queue); + IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask); +} + +static inline void +ixv_disable_queue(struct adapter *adapter, u32 vector) +{ + struct ixgbe_hw *hw = &adapter->hw; + u64 queue = (u64)(1 << vector); + u32 mask; + + mask = (IXGBE_EIMS_RTX_QUEUE & queue); + IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, mask); +} + +static inline void +ixv_rearm_queues(struct adapter *adapter, u64 queues) +{ + u32 mask = (IXGBE_EIMS_RTX_QUEUE & queues); + IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEICS, mask); +} + + +static void +ixv_handle_que(void *context, int pending) +{ + struct ix_queue *que = context; + struct adapter *adapter = que->adapter; + struct tx_ring *txr = que->txr; + struct ifnet *ifp = adapter->ifp; + bool more; + + if (ifp->if_drv_flags & IFF_DRV_RUNNING) { + more = ixgbe_rxeof(que); + IXGBE_TX_LOCK(txr); + ixgbe_txeof(txr); +#if __FreeBSD_version >= 800000 + if (!drbr_empty(ifp, txr->br)) + ixgbe_mq_start_locked(ifp, txr); +#else + if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) + ixgbe_start_locked(txr, ifp); +#endif + IXGBE_TX_UNLOCK(txr); + if (more) { + taskqueue_enqueue(que->tq, &que->que_task); + return; + } + } + + /* Reenable this interrupt */ + ixv_enable_queue(adapter, que->msix); + return; +} + +/********************************************************************* + * + * MSI Queue Interrupt Service routine + * + **********************************************************************/ +void +ixv_msix_que(void *arg) +{ + struct ix_queue *que = arg; + struct adapter *adapter = que->adapter; + struct ifnet *ifp = adapter->ifp; + struct tx_ring *txr = que->txr; + struct rx_ring *rxr = que->rxr; + bool more; + u32 newitr = 0; + + ixv_disable_queue(adapter, que->msix); + ++que->irqs; + + more = ixgbe_rxeof(que); + + IXGBE_TX_LOCK(txr); + ixgbe_txeof(txr); + /* + ** Make certain that if the stack + ** has anything queued the task gets + ** scheduled to handle it. + */ +#ifdef IXGBE_LEGACY_TX + if (!IFQ_DRV_IS_EMPTY(&adapter->ifp->if_snd)) + ixgbe_start_locked(txr, ifp); +#else + if (!drbr_empty(adapter->ifp, txr->br)) + ixgbe_mq_start_locked(ifp, txr); +#endif + IXGBE_TX_UNLOCK(txr); + + /* Do AIM now? */ + + if (ixv_enable_aim == FALSE) + goto no_calc; + /* + ** Do Adaptive Interrupt Moderation: + ** - Write out last calculated setting + ** - Calculate based on average size over + ** the last interval. + */ + if (que->eitr_setting) + IXGBE_WRITE_REG(&adapter->hw, + IXGBE_VTEITR(que->msix), + que->eitr_setting); + + que->eitr_setting = 0; + + /* Idle, do nothing */ + if ((txr->bytes == 0) && (rxr->bytes == 0)) + goto no_calc; + + if ((txr->bytes) && (txr->packets)) + newitr = txr->bytes/txr->packets; + if ((rxr->bytes) && (rxr->packets)) + newitr = max(newitr, + (rxr->bytes / rxr->packets)); + newitr += 24; /* account for hardware frame, crc */ + + /* set an upper boundary */ + newitr = min(newitr, 3000); + + /* Be nice to the mid range */ + if ((newitr > 300) && (newitr < 1200)) + newitr = (newitr / 3); + else + newitr = (newitr / 2); + + newitr |= newitr << 16; + + /* save for next interrupt */ + que->eitr_setting = newitr; + + /* Reset state */ + txr->bytes = 0; + txr->packets = 0; + rxr->bytes = 0; + rxr->packets = 0; + +no_calc: + if (more) + taskqueue_enqueue(que->tq, &que->que_task); + else /* Reenable this interrupt */ + ixv_enable_queue(adapter, que->msix); + return; +} + +static void +ixv_msix_mbx(void *arg) +{ + struct adapter *adapter = arg; + struct ixgbe_hw *hw = &adapter->hw; + u32 reg; + + ++adapter->link_irq; + + /* First get the cause */ + reg = IXGBE_READ_REG(hw, IXGBE_VTEICS); + /* Clear interrupt with write */ + IXGBE_WRITE_REG(hw, IXGBE_VTEICR, reg); + + /* Link status change */ + if (reg & IXGBE_EICR_LSC) + taskqueue_enqueue(adapter->tq, &adapter->link_task); + + IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, IXGBE_EIMS_OTHER); + return; +} + +/********************************************************************* + * + * Media Ioctl callback + * + * This routine is called whenever the user queries the status of + * the interface using ifconfig. + * + **********************************************************************/ +static void +ixv_media_status(struct ifnet * ifp, struct ifmediareq * ifmr) +{ + struct adapter *adapter = ifp->if_softc; + + INIT_DEBUGOUT("ixv_media_status: begin"); + IXGBE_CORE_LOCK(adapter); + ixv_update_link_status(adapter); + + ifmr->ifm_status = IFM_AVALID; + ifmr->ifm_active = IFM_ETHER; + + if (!adapter->link_active) { + IXGBE_CORE_UNLOCK(adapter); + return; + } + + ifmr->ifm_status |= IFM_ACTIVE; + + switch (adapter->link_speed) { + case IXGBE_LINK_SPEED_1GB_FULL: + ifmr->ifm_active |= IFM_1000_T | IFM_FDX; + break; + case IXGBE_LINK_SPEED_10GB_FULL: + ifmr->ifm_active |= IFM_FDX; + break; + } + + IXGBE_CORE_UNLOCK(adapter); + + return; +} + +/********************************************************************* + * + * Media Ioctl callback + * + * This routine is called when the user changes speed/duplex using + * media/mediopt option with ifconfig. + * + **********************************************************************/ +static int +ixv_media_change(struct ifnet * ifp) +{ + struct adapter *adapter = ifp->if_softc; + struct ifmedia *ifm = &adapter->media; + + INIT_DEBUGOUT("ixv_media_change: begin"); + + if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) + return (EINVAL); + + switch (IFM_SUBTYPE(ifm->ifm_media)) { + case IFM_AUTO: + break; + default: + device_printf(adapter->dev, "Only auto media type\n"); + return (EINVAL); + } + + return (0); +} + + +/********************************************************************* + * Multicast Update + * + * This routine is called whenever multicast address list is updated. + * + **********************************************************************/ +#define IXGBE_RAR_ENTRIES 16 + +static void +ixv_set_multi(struct adapter *adapter) +{ + u8 mta[MAX_NUM_MULTICAST_ADDRESSES * IXGBE_ETH_LENGTH_OF_ADDRESS]; + u8 *update_ptr; + struct ifmultiaddr *ifma; + int mcnt = 0; + struct ifnet *ifp = adapter->ifp; + + IOCTL_DEBUGOUT("ixv_set_multi: begin"); + +#if __FreeBSD_version < 800000 + IF_ADDR_LOCK(ifp); +#else + if_maddr_rlock(ifp); +#endif + TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { + if (ifma->ifma_addr->sa_family != AF_LINK) + continue; + bcopy(LLADDR((struct sockaddr_dl *) ifma->ifma_addr), + &mta[mcnt * IXGBE_ETH_LENGTH_OF_ADDRESS], + IXGBE_ETH_LENGTH_OF_ADDRESS); + mcnt++; + } +#if __FreeBSD_version < 800000 + IF_ADDR_UNLOCK(ifp); +#else + if_maddr_runlock(ifp); +#endif + + update_ptr = mta; + + ixgbe_update_mc_addr_list(&adapter->hw, + update_ptr, mcnt, ixv_mc_array_itr, TRUE); + + return; +} + +/* + * This is an iterator function now needed by the multicast + * shared code. It simply feeds the shared code routine the + * addresses in the array of ixv_set_multi() one by one. + */ +static u8 * +ixv_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq) +{ + u8 *addr = *update_ptr; + u8 *newptr; + *vmdq = 0; + + newptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS; + *update_ptr = newptr; + return addr; +} + +/********************************************************************* + * Timer routine + * + * This routine checks for link status,updates statistics, + * and runs the watchdog check. + * + **********************************************************************/ + +static void +ixv_local_timer(void *arg) +{ + struct adapter *adapter = arg; + device_t dev = adapter->dev; + struct ix_queue *que = adapter->queues; + u64 queues = 0; + int hung = 0; + + mtx_assert(&adapter->core_mtx, MA_OWNED); + + ixv_update_link_status(adapter); + + /* Stats Update */ + ixv_update_stats(adapter); + + /* + ** Check the TX queues status + ** - mark hung queues so we don't schedule on them + ** - watchdog only if all queues show hung + */ + for (int i = 0; i < adapter->num_queues; i++, que++) { + /* Keep track of queues with work for soft irq */ + if (que->txr->busy) + queues |= ((u64)1 << que->me); + /* + ** Each time txeof runs without cleaning, but there + ** are uncleaned descriptors it increments busy. If + ** we get to the MAX we declare it hung. + */ + if (que->busy == IXGBE_QUEUE_HUNG) { + ++hung; + /* Mark the queue as inactive */ + adapter->active_queues &= ~((u64)1 << que->me); + continue; + } else { + /* Check if we've come back from hung */ + if ((adapter->active_queues & ((u64)1 << que->me)) == 0) + adapter->active_queues |= ((u64)1 << que->me); + } + if (que->busy >= IXGBE_MAX_TX_BUSY) { + device_printf(dev,"Warning queue %d " + "appears to be hung!\n", i); + que->txr->busy = IXGBE_QUEUE_HUNG; + ++hung; + } + + } + + /* Only truely watchdog if all queues show hung */ + if (hung == adapter->num_queues) + goto watchdog; + else if (queues != 0) { /* Force an IRQ on queues with work */ + ixv_rearm_queues(adapter, queues); + } + + callout_reset(&adapter->timer, hz, ixv_local_timer, adapter); + return; + +watchdog: + device_printf(adapter->dev, "Watchdog timeout -- resetting\n"); + adapter->ifp->if_drv_flags &= ~IFF_DRV_RUNNING; + adapter->watchdog_events++; + ixv_init_locked(adapter); +} + +/* +** Note: this routine updates the OS on the link state +** the real check of the hardware only happens with +** a link interrupt. +*/ +static void +ixv_update_link_status(struct adapter *adapter) +{ + struct ifnet *ifp = adapter->ifp; + device_t dev = adapter->dev; + + if (adapter->link_up){ + if (adapter->link_active == FALSE) { + if (bootverbose) + device_printf(dev,"Link is up %d Gbps %s \n", + ((adapter->link_speed == 128)? 10:1), + "Full Duplex"); + adapter->link_active = TRUE; + if_link_state_change(ifp, LINK_STATE_UP); + } + } else { /* Link down */ + if (adapter->link_active == TRUE) { + if (bootverbose) + device_printf(dev,"Link is Down\n"); + if_link_state_change(ifp, LINK_STATE_DOWN); + adapter->link_active = FALSE; + } + } + + return; +} + + +/********************************************************************* + * + * This routine disables all traffic on the adapter by issuing a + * global reset on the MAC and deallocates TX/RX buffers. + * + **********************************************************************/ + +static void +ixv_stop(void *arg) +{ + struct ifnet *ifp; + struct adapter *adapter = arg; + struct ixgbe_hw *hw = &adapter->hw; + ifp = adapter->ifp; + + mtx_assert(&adapter->core_mtx, MA_OWNED); + + INIT_DEBUGOUT("ixv_stop: begin\n"); + ixv_disable_intr(adapter); + + /* Tell the stack that the interface is no longer active */ + ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); + + ixgbe_reset_hw(hw); + adapter->hw.adapter_stopped = FALSE; + ixgbe_stop_adapter(hw); + callout_stop(&adapter->timer); + + /* reprogram the RAR[0] in case user changed it. */ + ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV); + + return; +} + + +/********************************************************************* + * + * Determine hardware revision. + * + **********************************************************************/ +static void +ixv_identify_hardware(struct adapter *adapter) +{ + device_t dev = adapter->dev; + struct ixgbe_hw *hw = &adapter->hw; + + /* + ** Make sure BUSMASTER is set, on a VM under + ** KVM it may not be and will break things. + */ + pci_enable_busmaster(dev); + + /* Save off the information about this board */ + hw->vendor_id = pci_get_vendor(dev); + hw->device_id = pci_get_device(dev); + hw->revision_id = pci_read_config(dev, PCIR_REVID, 1); + hw->subsystem_vendor_id = + pci_read_config(dev, PCIR_SUBVEND_0, 2); + hw->subsystem_device_id = + pci_read_config(dev, PCIR_SUBDEV_0, 2); + + /* We need this to determine device-specific things */ + ixgbe_set_mac_type(hw); + + /* Set the right number of segments */ + adapter->num_segs = IXGBE_82599_SCATTER; + + return; +} + +/********************************************************************* + * + * Setup MSIX Interrupt resources and handlers + * + **********************************************************************/ +static int +ixv_allocate_msix(struct adapter *adapter) +{ + device_t dev = adapter->dev; + struct ix_queue *que = adapter->queues; + struct tx_ring *txr = adapter->tx_rings; + int error, rid, vector = 0; + + for (int i = 0; i < adapter->num_queues; i++, vector++, que++, txr++) { + rid = vector + 1; + que->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, + RF_SHAREABLE | RF_ACTIVE); + if (que->res == NULL) { + device_printf(dev,"Unable to allocate" + " bus resource: que interrupt [%d]\n", vector); + return (ENXIO); + } + /* Set the handler function */ + error = bus_setup_intr(dev, que->res, + INTR_TYPE_NET | INTR_MPSAFE, NULL, + ixv_msix_que, que, &que->tag); + if (error) { + que->res = NULL; + device_printf(dev, "Failed to register QUE handler"); + return (error); + } +#if __FreeBSD_version >= 800504 + bus_describe_intr(dev, que->res, que->tag, "que %d", i); +#endif + que->msix = vector; + adapter->active_queues |= (u64)(1 << que->msix); + /* + ** Bind the msix vector, and thus the + ** ring to the corresponding cpu. + */ + if (adapter->num_queues > 1) + bus_bind_intr(dev, que->res, i); + TASK_INIT(&txr->txq_task, 0, ixgbe_deferred_mq_start, txr); + TASK_INIT(&que->que_task, 0, ixv_handle_que, que); + que->tq = taskqueue_create_fast("ixv_que", M_NOWAIT, + taskqueue_thread_enqueue, &que->tq); + taskqueue_start_threads(&que->tq, 1, PI_NET, "%s que", + device_get_nameunit(adapter->dev)); + } + + /* and Mailbox */ + rid = vector + 1; + adapter->res = bus_alloc_resource_any(dev, + SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE); + if (!adapter->res) { + device_printf(dev,"Unable to allocate" + " bus resource: MBX interrupt [%d]\n", rid); + return (ENXIO); + } + /* Set the mbx handler function */ + error = bus_setup_intr(dev, adapter->res, + INTR_TYPE_NET | INTR_MPSAFE, NULL, + ixv_msix_mbx, adapter, &adapter->tag); + if (error) { + adapter->res = NULL; + device_printf(dev, "Failed to register LINK handler"); + return (error); + } +#if __FreeBSD_version >= 800504 + bus_describe_intr(dev, adapter->res, adapter->tag, "mbx"); +#endif + adapter->vector = vector; + /* Tasklets for Mailbox */ + TASK_INIT(&adapter->link_task, 0, ixv_handle_mbx, adapter); + adapter->tq = taskqueue_create_fast("ixv_mbx", M_NOWAIT, + taskqueue_thread_enqueue, &adapter->tq); + taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s mbxq", + device_get_nameunit(adapter->dev)); + /* + ** Due to a broken design QEMU will fail to properly + ** enable the guest for MSIX unless the vectors in + ** the table are all set up, so we must rewrite the + ** ENABLE in the MSIX control register again at this + ** point to cause it to successfully initialize us. + */ + if (adapter->hw.mac.type == ixgbe_mac_82599_vf) { + int msix_ctrl; + pci_find_cap(dev, PCIY_MSIX, &rid); + rid += PCIR_MSIX_CTRL; + msix_ctrl = pci_read_config(dev, rid, 2); + msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE; + pci_write_config(dev, rid, msix_ctrl, 2); + } + + return (0); +} + +/* + * Setup MSIX resources, note that the VF + * device MUST use MSIX, there is no fallback. + */ +static int +ixv_setup_msix(struct adapter *adapter) +{ + device_t dev = adapter->dev; + int rid, want; + + + /* First try MSI/X */ + rid = PCIR_BAR(3); + adapter->msix_mem = bus_alloc_resource_any(dev, + SYS_RES_MEMORY, &rid, RF_ACTIVE); + if (adapter->msix_mem == NULL) { + device_printf(adapter->dev, + "Unable to map MSIX table \n"); + goto out; + } + + /* + ** Want two vectors: one for a queue, + ** plus an additional for mailbox. + */ + want = 2; + if ((pci_alloc_msix(dev, &want) == 0) && (want == 2)) { + device_printf(adapter->dev, + "Using MSIX interrupts with %d vectors\n", want); + return (want); + } + /* Release in case alloc was insufficient */ + pci_release_msi(dev); +out: + if (adapter->msix_mem != NULL) { + bus_release_resource(dev, SYS_RES_MEMORY, + rid, adapter->msix_mem); + adapter->msix_mem = NULL; + } + device_printf(adapter->dev,"MSIX config error\n"); + return (ENXIO); +} + + +static int +ixv_allocate_pci_resources(struct adapter *adapter) +{ + int rid; + device_t dev = adapter->dev; + + rid = PCIR_BAR(0); + adapter->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, + &rid, RF_ACTIVE); + + if (!(adapter->pci_mem)) { + device_printf(dev,"Unable to allocate bus resource: memory\n"); + return (ENXIO); + } + + adapter->osdep.mem_bus_space_tag = + rman_get_bustag(adapter->pci_mem); + adapter->osdep.mem_bus_space_handle = + rman_get_bushandle(adapter->pci_mem); + adapter->hw.hw_addr = (u8 *) &adapter->osdep.mem_bus_space_handle; + + adapter->num_queues = 1; + adapter->hw.back = &adapter->osdep; + + /* + ** Now setup MSI/X, should + ** return us the number of + ** configured vectors. + */ + adapter->msix = ixv_setup_msix(adapter); + if (adapter->msix == ENXIO) + return (ENXIO); + else + return (0); +} + +static void +ixv_free_pci_resources(struct adapter * adapter) +{ + struct ix_queue *que = adapter->queues; + device_t dev = adapter->dev; + int rid, memrid; + + memrid = PCIR_BAR(MSIX_82598_BAR); + + /* + ** There is a slight possibility of a failure mode + ** in attach that will result in entering this function + ** before interrupt resources have been initialized, and + ** in that case we do not want to execute the loops below + ** We can detect this reliably by the state of the adapter + ** res pointer. + */ + if (adapter->res == NULL) + goto mem; + + /* + ** Release all msix queue resources: + */ + for (int i = 0; i < adapter->num_queues; i++, que++) { + rid = que->msix + 1; + if (que->tag != NULL) { + bus_teardown_intr(dev, que->res, que->tag); + que->tag = NULL; + } + if (que->res != NULL) + bus_release_resource(dev, SYS_RES_IRQ, rid, que->res); + } + + + /* Clean the Legacy or Link interrupt last */ + if (adapter->vector) /* we are doing MSIX */ + rid = adapter->vector + 1; + else + (adapter->msix != 0) ? (rid = 1):(rid = 0); + + if (adapter->tag != NULL) { + bus_teardown_intr(dev, adapter->res, adapter->tag); + adapter->tag = NULL; + } + if (adapter->res != NULL) + bus_release_resource(dev, SYS_RES_IRQ, rid, adapter->res); + +mem: + if (adapter->msix) + pci_release_msi(dev); + + if (adapter->msix_mem != NULL) + bus_release_resource(dev, SYS_RES_MEMORY, + memrid, adapter->msix_mem); + + if (adapter->pci_mem != NULL) + bus_release_resource(dev, SYS_RES_MEMORY, + PCIR_BAR(0), adapter->pci_mem); + + return; +} + +/********************************************************************* + * + * Setup networking device structure and register an interface. + * + **********************************************************************/ +static void +ixv_setup_interface(device_t dev, struct adapter *adapter) +{ + struct ifnet *ifp; + + INIT_DEBUGOUT("ixv_setup_interface: begin"); + + ifp = adapter->ifp = if_alloc(IFT_ETHER); + if (ifp == NULL) + panic("%s: can not if_alloc()\n", device_get_nameunit(dev)); + if_initname(ifp, device_get_name(dev), device_get_unit(dev)); + ifp->if_baudrate = 1000000000; + ifp->if_init = ixv_init; + ifp->if_softc = adapter; + ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; + ifp->if_ioctl = ixv_ioctl; +#if __FreeBSD_version >= 800000 + ifp->if_transmit = ixgbe_mq_start; + ifp->if_qflush = ixgbe_qflush; +#else + ifp->if_start = ixgbe_start; +#endif + ifp->if_snd.ifq_maxlen = adapter->num_tx_desc - 2; + + ether_ifattach(ifp, adapter->hw.mac.addr); + + adapter->max_frame_size = + ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN; + + /* + * Tell the upper layer(s) we support long frames. + */ + ifp->if_hdrlen = sizeof(struct ether_vlan_header); + + ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_TSO4 | IFCAP_VLAN_HWCSUM; + ifp->if_capabilities |= IFCAP_JUMBO_MTU; + ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING + | IFCAP_VLAN_HWTSO + | IFCAP_VLAN_MTU; + ifp->if_capabilities |= IFCAP_LRO; + ifp->if_capenable = ifp->if_capabilities; + + /* + * Specify the media types supported by this adapter and register + * callbacks to update media and link information + */ + ifmedia_init(&adapter->media, IFM_IMASK, ixv_media_change, + ixv_media_status); + ifmedia_add(&adapter->media, IFM_ETHER | IFM_FDX, 0, NULL); + ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL); + ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO); + + return; +} + +static void +ixv_config_link(struct adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + u32 autoneg, err = 0; + + if (hw->mac.ops.check_link) + err = hw->mac.ops.check_link(hw, &autoneg, + &adapter->link_up, FALSE); + if (err) + goto out; + + if (hw->mac.ops.setup_link) + err = hw->mac.ops.setup_link(hw, + autoneg, adapter->link_up); +out: + return; +} + + +/********************************************************************* + * + * Enable transmit unit. + * + **********************************************************************/ +static void +ixv_initialize_transmit_units(struct adapter *adapter) +{ + struct tx_ring *txr = adapter->tx_rings; + struct ixgbe_hw *hw = &adapter->hw; + + + for (int i = 0; i < adapter->num_queues; i++, txr++) { + u64 tdba = txr->txdma.dma_paddr; + u32 txctrl, txdctl; + + /* Set WTHRESH to 8, burst writeback */ + txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i)); + txdctl |= (8 << 16); + IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl); + + /* Set the HW Tx Head and Tail indices */ + IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDH(i), 0); + IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDT(i), 0); + + /* Set Tx Tail register */ + txr->tail = IXGBE_VFTDT(i); + + /* Set the processing limit */ + txr->process_limit = ixv_tx_process_limit; + + /* Set Ring parameters */ + IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(i), + (tdba & 0x00000000ffffffffULL)); + IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(i), (tdba >> 32)); + IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(i), + adapter->num_tx_desc * + sizeof(struct ixgbe_legacy_tx_desc)); + txctrl = IXGBE_READ_REG(hw, IXGBE_VFDCA_TXCTRL(i)); + txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN; + IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(i), txctrl); + + /* Now enable */ + txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i)); + txdctl |= IXGBE_TXDCTL_ENABLE; + IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl); + } + + return; +} + + +/********************************************************************* + * + * Setup receive registers and features. + * + **********************************************************************/ +#define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2 + +static void +ixv_initialize_receive_units(struct adapter *adapter) +{ + struct rx_ring *rxr = adapter->rx_rings; + struct ixgbe_hw *hw = &adapter->hw; + struct ifnet *ifp = adapter->ifp; + u32 bufsz, fctrl, rxcsum, hlreg; + + + /* Enable broadcasts */ + fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); + fctrl |= IXGBE_FCTRL_BAM; + fctrl |= IXGBE_FCTRL_DPF; + fctrl |= IXGBE_FCTRL_PMCF; + IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); + + /* Set for Jumbo Frames? */ + hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0); + if (ifp->if_mtu > ETHERMTU) { + hlreg |= IXGBE_HLREG0_JUMBOEN; + bufsz = 4096 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT; + } else { + hlreg &= ~IXGBE_HLREG0_JUMBOEN; + bufsz = 2048 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT; + } + IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg); + + for (int i = 0; i < adapter->num_queues; i++, rxr++) { + u64 rdba = rxr->rxdma.dma_paddr; + u32 reg, rxdctl; + + /* Setup the Base and Length of the Rx Descriptor Ring */ + IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(i), + (rdba & 0x00000000ffffffffULL)); + IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(i), + (rdba >> 32)); + IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(i), + adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc)); + + /* Set up the SRRCTL register */ + reg = IXGBE_READ_REG(hw, IXGBE_VFSRRCTL(i)); + reg &= ~IXGBE_SRRCTL_BSIZEHDR_MASK; + reg &= ~IXGBE_SRRCTL_BSIZEPKT_MASK; + reg |= bufsz; + reg |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF; + IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(i), reg); + + /* Setup the HW Rx Head and Tail Descriptor Pointers */ + IXGBE_WRITE_REG(hw, IXGBE_VFRDH(rxr->me), 0); + IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me), + adapter->num_rx_desc - 1); + /* Set the processing limit */ + rxr->process_limit = ixv_rx_process_limit; + + /* Set Rx Tail register */ + rxr->tail = IXGBE_VFRDT(rxr->me); + + /* Do the queue enabling last */ + rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i)); + rxdctl |= IXGBE_RXDCTL_ENABLE; + IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), rxdctl); + for (int k = 0; k < 10; k++) { + if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i)) & + IXGBE_RXDCTL_ENABLE) + break; + else + msec_delay(1); + } + wmb(); + } + + rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM); + + if (ifp->if_capenable & IFCAP_RXCSUM) + rxcsum |= IXGBE_RXCSUM_PCSD; + + if (!(rxcsum & IXGBE_RXCSUM_PCSD)) + rxcsum |= IXGBE_RXCSUM_IPPCSE; + + IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum); + + return; +} + +static void +ixv_setup_vlan_support(struct adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + u32 ctrl, vid, vfta, retry; + + + /* + ** We get here thru init_locked, meaning + ** a soft reset, this has already cleared + ** the VFTA and other state, so if there + ** have been no vlan's registered do nothing. + */ + if (adapter->num_vlans == 0) + return; + + /* Enable the queues */ + for (int i = 0; i < adapter->num_queues; i++) { + ctrl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i)); + ctrl |= IXGBE_RXDCTL_VME; + IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), ctrl); + } + + /* + ** A soft reset zero's out the VFTA, so + ** we need to repopulate it now. + */ + for (int i = 0; i < IXGBE_VFTA_SIZE; i++) { + if (ixv_shadow_vfta[i] == 0) + continue; + vfta = ixv_shadow_vfta[i]; + /* + ** Reconstruct the vlan id's + ** based on the bits set in each + ** of the array ints. + */ + for ( int j = 0; j < 32; j++) { + retry = 0; + if ((vfta & (1 << j)) == 0) + continue; + vid = (i * 32) + j; + /* Call the shared code mailbox routine */ + while (ixgbe_set_vfta(hw, vid, 0, TRUE)) { + if (++retry > 5) + break; + } + } + } +} + +/* +** This routine is run via an vlan config EVENT, +** it enables us to use the HW Filter table since +** we can get the vlan id. This just creates the +** entry in the soft version of the VFTA, init will +** repopulate the real table. +*/ +static void +ixv_register_vlan(void *arg, struct ifnet *ifp, u16 vtag) +{ + struct adapter *adapter = ifp->if_softc; + u16 index, bit; + + if (ifp->if_softc != arg) /* Not our event */ + return; + + if ((vtag == 0) || (vtag > 4095)) /* Invalid */ + return; + + IXGBE_CORE_LOCK(adapter); + index = (vtag >> 5) & 0x7F; + bit = vtag & 0x1F; + ixv_shadow_vfta[index] |= (1 << bit); + ++adapter->num_vlans; + /* Re-init to load the changes */ + ixv_init_locked(adapter); + IXGBE_CORE_UNLOCK(adapter); +} + +/* +** This routine is run via an vlan +** unconfig EVENT, remove our entry +** in the soft vfta. +*/ +static void +ixv_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag) +{ + struct adapter *adapter = ifp->if_softc; + u16 index, bit; + + if (ifp->if_softc != arg) + return; + + if ((vtag == 0) || (vtag > 4095)) /* Invalid */ + return; + + IXGBE_CORE_LOCK(adapter); + index = (vtag >> 5) & 0x7F; + bit = vtag & 0x1F; + ixv_shadow_vfta[index] &= ~(1 << bit); + --adapter->num_vlans; + /* Re-init to load the changes */ + ixv_init_locked(adapter); + IXGBE_CORE_UNLOCK(adapter); +} + +static void +ixv_enable_intr(struct adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + struct ix_queue *que = adapter->queues; + u32 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE); + + + IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask); + + mask = IXGBE_EIMS_ENABLE_MASK; + mask &= ~(IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC); + IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, mask); + + for (int i = 0; i < adapter->num_queues; i++, que++) + ixv_enable_queue(adapter, que->msix); + + IXGBE_WRITE_FLUSH(hw); + + return; +} + +static void +ixv_disable_intr(struct adapter *adapter) +{ + IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIAC, 0); + IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIMC, ~0); + IXGBE_WRITE_FLUSH(&adapter->hw); + return; +} + +/* +** Setup the correct IVAR register for a particular MSIX interrupt +** - entry is the register array entry +** - vector is the MSIX vector for this queue +** - type is RX/TX/MISC +*/ +static void +ixv_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type) +{ + struct ixgbe_hw *hw = &adapter->hw; + u32 ivar, index; + + vector |= IXGBE_IVAR_ALLOC_VAL; + + if (type == -1) { /* MISC IVAR */ + ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC); + ivar &= ~0xFF; + ivar |= vector; + IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar); + } else { /* RX/TX IVARS */ + index = (16 * (entry & 1)) + (8 * type); + ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(entry >> 1)); + ivar &= ~(0xFF << index); + ivar |= (vector << index); + IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(entry >> 1), ivar); + } +} + +static void +ixv_configure_ivars(struct adapter *adapter) +{ + struct ix_queue *que = adapter->queues; + + for (int i = 0; i < adapter->num_queues; i++, que++) { + /* First the RX queue entry */ + ixv_set_ivar(adapter, i, que->msix, 0); + /* ... and the TX */ + ixv_set_ivar(adapter, i, que->msix, 1); + /* Set an initial value in EITR */ + IXGBE_WRITE_REG(&adapter->hw, + IXGBE_VTEITR(que->msix), IXV_EITR_DEFAULT); + } + + /* For the mailbox interrupt */ + ixv_set_ivar(adapter, 1, adapter->vector, -1); +} + + +/* +** Tasklet handler for MSIX MBX interrupts +** - do outside interrupt since it might sleep +*/ +static void +ixv_handle_mbx(void *context, int pending) +{ + struct adapter *adapter = context; + + ixgbe_check_link(&adapter->hw, + &adapter->link_speed, &adapter->link_up, 0); + ixv_update_link_status(adapter); +} + +/* +** The VF stats registers never have a truely virgin +** starting point, so this routine tries to make an +** artificial one, marking ground zero on attach as +** it were. +*/ +static void +ixv_save_stats(struct adapter *adapter) +{ + if (adapter->stats.vf.vfgprc || adapter->stats.vf.vfgptc) { + adapter->stats.vf.saved_reset_vfgprc += + adapter->stats.vf.vfgprc - adapter->stats.vf.base_vfgprc; + adapter->stats.vf.saved_reset_vfgptc += + adapter->stats.vf.vfgptc - adapter->stats.vf.base_vfgptc; + adapter->stats.vf.saved_reset_vfgorc += + adapter->stats.vf.vfgorc - adapter->stats.vf.base_vfgorc; + adapter->stats.vf.saved_reset_vfgotc += + adapter->stats.vf.vfgotc - adapter->stats.vf.base_vfgotc; + adapter->stats.vf.saved_reset_vfmprc += + adapter->stats.vf.vfmprc - adapter->stats.vf.base_vfmprc; + } +} + +static void +ixv_init_stats(struct adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + + adapter->stats.vf.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC); + adapter->stats.vf.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB); + adapter->stats.vf.last_vfgorc |= + (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32); + + adapter->stats.vf.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC); + adapter->stats.vf.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB); + adapter->stats.vf.last_vfgotc |= + (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32); + + adapter->stats.vf.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC); + + adapter->stats.vf.base_vfgprc = adapter->stats.vf.last_vfgprc; + adapter->stats.vf.base_vfgorc = adapter->stats.vf.last_vfgorc; + adapter->stats.vf.base_vfgptc = adapter->stats.vf.last_vfgptc; + adapter->stats.vf.base_vfgotc = adapter->stats.vf.last_vfgotc; + adapter->stats.vf.base_vfmprc = adapter->stats.vf.last_vfmprc; +} + +#define UPDATE_STAT_32(reg, last, count) \ +{ \ + u32 current = IXGBE_READ_REG(hw, reg); \ + if (current < last) \ + count += 0x100000000LL; \ + last = current; \ + count &= 0xFFFFFFFF00000000LL; \ + count |= current; \ +} + +#define UPDATE_STAT_36(lsb, msb, last, count) \ +{ \ + u64 cur_lsb = IXGBE_READ_REG(hw, lsb); \ + u64 cur_msb = IXGBE_READ_REG(hw, msb); \ + u64 current = ((cur_msb << 32) | cur_lsb); \ + if (current < last) \ + count += 0x1000000000LL; \ + last = current; \ + count &= 0xFFFFFFF000000000LL; \ + count |= current; \ +} + +/* +** ixv_update_stats - Update the board statistics counters. +*/ +void +ixv_update_stats(struct adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + + UPDATE_STAT_32(IXGBE_VFGPRC, adapter->stats.vf.last_vfgprc, + adapter->stats.vf.vfgprc); + UPDATE_STAT_32(IXGBE_VFGPTC, adapter->stats.vf.last_vfgptc, + adapter->stats.vf.vfgptc); + UPDATE_STAT_36(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB, + adapter->stats.vf.last_vfgorc, adapter->stats.vf.vfgorc); + UPDATE_STAT_36(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB, + adapter->stats.vf.last_vfgotc, adapter->stats.vf.vfgotc); + UPDATE_STAT_32(IXGBE_VFMPRC, adapter->stats.vf.last_vfmprc, + adapter->stats.vf.vfmprc); +} + +/* + * Add statistic sysctls for the VF. + */ +static void +ixv_add_stats_sysctls(struct adapter *adapter) +{ + device_t dev = adapter->dev; + struct ix_queue *que = &adapter->queues[0]; + struct tx_ring *txr = que->txr; + struct rx_ring *rxr = que->rxr; + + struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev); + struct sysctl_oid *tree = device_get_sysctl_tree(dev); + struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree); + struct ixgbevf_hw_stats *stats = &adapter->stats.vf; + + struct sysctl_oid *stat_node, *queue_node; + struct sysctl_oid_list *stat_list, *queue_list; + + /* Driver Statistics */ + SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "dropped", + CTLFLAG_RD, &adapter->dropped_pkts, + "Driver dropped packets"); + SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "mbuf_defrag_failed", + CTLFLAG_RD, &adapter->mbuf_defrag_failed, + "m_defrag() failed"); + SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events", + CTLFLAG_RD, &adapter->watchdog_events, + "Watchdog timeouts"); + + stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac", + CTLFLAG_RD, NULL, + "VF Statistics (read from HW registers)"); + stat_list = SYSCTL_CHILDREN(stat_node); + + SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_rcvd", + CTLFLAG_RD, &stats->vfgprc, + "Good Packets Received"); + SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_rcvd", + CTLFLAG_RD, &stats->vfgorc, + "Good Octets Received"); + SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_rcvd", + CTLFLAG_RD, &stats->vfmprc, + "Multicast Packets Received"); + SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_txd", + CTLFLAG_RD, &stats->vfgptc, + "Good Packets Transmitted"); + SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_txd", + CTLFLAG_RD, &stats->vfgotc, + "Good Octets Transmitted"); + + queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "que", + CTLFLAG_RD, NULL, + "Queue Statistics (collected by SW)"); + queue_list = SYSCTL_CHILDREN(queue_node); + + SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs", + CTLFLAG_RD, &(que->irqs), + "IRQs on queue"); + SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_irqs", + CTLFLAG_RD, &(rxr->rx_irq), + "RX irqs on queue"); + SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_packets", + CTLFLAG_RD, &(rxr->rx_packets), + "RX packets"); + SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes", + CTLFLAG_RD, &(rxr->rx_bytes), + "RX bytes"); + SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_discarded", + CTLFLAG_RD, &(rxr->rx_discarded), + "Discarded RX packets"); + + SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets", + CTLFLAG_RD, &(txr->total_packets), + "TX Packets"); + SYSCTL_ADD_UINT(ctx, queue_list, OID_AUTO, "tx_bytes", + CTLFLAG_RD, &(txr->bytes), 0, + "TX Bytes"); + SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_no_desc", + CTLFLAG_RD, &(txr->no_desc_avail), + "# of times not enough descriptors were available during TX"); +} + +/********************************************************************** + * + * This routine is called only when em_display_debug_stats is enabled. + * This routine provides a way to take a look at important statistics + * maintained by the driver and hardware. + * + **********************************************************************/ +static void +ixv_print_debug_info(struct adapter *adapter) +{ + device_t dev = adapter->dev; + struct ixgbe_hw *hw = &adapter->hw; + struct ix_queue *que = adapter->queues; + struct rx_ring *rxr; + struct tx_ring *txr; + struct lro_ctrl *lro; + + device_printf(dev,"Error Byte Count = %u \n", + IXGBE_READ_REG(hw, IXGBE_ERRBC)); + + for (int i = 0; i < adapter->num_queues; i++, que++) { + txr = que->txr; + rxr = que->rxr; + lro = &rxr->lro; + device_printf(dev,"QUE(%d) IRQs Handled: %lu\n", + que->msix, (long)que->irqs); + device_printf(dev,"RX(%d) Packets Received: %lld\n", + rxr->me, (long long)rxr->rx_packets); + device_printf(dev,"RX(%d) Bytes Received: %lu\n", + rxr->me, (long)rxr->rx_bytes); + device_printf(dev,"RX(%d) LRO Queued= %d\n", + rxr->me, lro->lro_queued); + device_printf(dev,"RX(%d) LRO Flushed= %d\n", + rxr->me, lro->lro_flushed); + device_printf(dev,"TX(%d) Packets Sent: %lu\n", + txr->me, (long)txr->total_packets); + device_printf(dev,"TX(%d) NO Desc Avail: %lu\n", + txr->me, (long)txr->no_desc_avail); + } + + device_printf(dev,"MBX IRQ Handled: %lu\n", + (long)adapter->link_irq); + return; +} + +static int +ixv_sysctl_debug(SYSCTL_HANDLER_ARGS) +{ + int error, result; + struct adapter *adapter; + + result = -1; + error = sysctl_handle_int(oidp, &result, 0, req); + + if (error || !req->newptr) + return (error); + + if (result == 1) { + adapter = (struct adapter *) arg1; + ixv_print_debug_info(adapter); + } + return error; +} + diff --git a/sys/dev/ixgbe/ix_txrx.c b/sys/dev/ixgbe/ix_txrx.c new file mode 100644 index 0000000..3311f33 --- /dev/null +++ b/sys/dev/ixgbe/ix_txrx.c @@ -0,0 +1,2220 @@ +/****************************************************************************** + + Copyright (c) 2001-2015, Intel Corporation + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + 3. Neither the name of the Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + POSSIBILITY OF SUCH DAMAGE. + +******************************************************************************/ +/*$FreeBSD$*/ + + +#ifndef IXGBE_STANDALONE_BUILD +#include "opt_inet.h" +#include "opt_inet6.h" +#endif + +#include "ixgbe.h" + +#ifdef DEV_NETMAP +#include <net/netmap.h> +#include <sys/selinfo.h> +#include <dev/netmap/netmap_kern.h> + +extern int ix_crcstrip; +#endif + +/* +** HW RSC control: +** this feature only works with +** IPv4, and only on 82599 and later. +** Also this will cause IP forwarding to +** fail and that can't be controlled by +** the stack as LRO can. For all these +** reasons I've deemed it best to leave +** this off and not bother with a tuneable +** interface, this would need to be compiled +** to enable. +*/ +static bool ixgbe_rsc_enable = FALSE; + +#ifdef IXGBE_FDIR +/* +** For Flow Director: this is the +** number of TX packets we sample +** for the filter pool, this means +** every 20th packet will be probed. +** +** This feature can be disabled by +** setting this to 0. +*/ +static int atr_sample_rate = 20; +#endif + +/* Shared PCI config read/write */ +inline u16 +ixgbe_read_pci_cfg(struct ixgbe_hw *hw, u32 reg) +{ + u16 value; + + value = pci_read_config(((struct ixgbe_osdep *)hw->back)->dev, + reg, 2); + + return (value); +} + +inline void +ixgbe_write_pci_cfg(struct ixgbe_hw *hw, u32 reg, u16 value) +{ + pci_write_config(((struct ixgbe_osdep *)hw->back)->dev, + reg, value, 2); + + return; +} + +/********************************************************************* + * Local Function prototypes + *********************************************************************/ +static void ixgbe_setup_transmit_ring(struct tx_ring *); +static void ixgbe_free_transmit_buffers(struct tx_ring *); +static int ixgbe_setup_receive_ring(struct rx_ring *); +static void ixgbe_free_receive_buffers(struct rx_ring *); + +static void ixgbe_rx_checksum(u32, struct mbuf *, u32); +static void ixgbe_refresh_mbufs(struct rx_ring *, int); +static int ixgbe_xmit(struct tx_ring *, struct mbuf **); +static int ixgbe_tx_ctx_setup(struct tx_ring *, + struct mbuf *, u32 *, u32 *); +static int ixgbe_tso_setup(struct tx_ring *, + struct mbuf *, u32 *, u32 *); +#ifdef IXGBE_FDIR +static void ixgbe_atr(struct tx_ring *, struct mbuf *); +#endif +static __inline void ixgbe_rx_discard(struct rx_ring *, int); +static __inline void ixgbe_rx_input(struct rx_ring *, struct ifnet *, + struct mbuf *, u32); + +#ifdef IXGBE_LEGACY_TX +/********************************************************************* + * Transmit entry point + * + * ixgbe_start is called by the stack to initiate a transmit. + * The driver will remain in this routine as long as there are + * packets to transmit and transmit resources are available. + * In case resources are not available stack is notified and + * the packet is requeued. + **********************************************************************/ + +void +ixgbe_start_locked(struct tx_ring *txr, struct ifnet * ifp) +{ + struct mbuf *m_head; + struct adapter *adapter = txr->adapter; + + IXGBE_TX_LOCK_ASSERT(txr); + + if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) + return; + if (!adapter->link_active) + return; + + while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) { + if (txr->tx_avail <= IXGBE_QUEUE_MIN_FREE) + break; + + IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head); + if (m_head == NULL) + break; + + if (ixgbe_xmit(txr, &m_head)) { + if (m_head != NULL) + IFQ_DRV_PREPEND(&ifp->if_snd, m_head); + break; + } + /* Send a copy of the frame to the BPF listener */ + ETHER_BPF_MTAP(ifp, m_head); + } + return; +} + +/* + * Legacy TX start - called by the stack, this + * always uses the first tx ring, and should + * not be used with multiqueue tx enabled. + */ +void +ixgbe_start(struct ifnet *ifp) +{ + struct adapter *adapter = ifp->if_softc; + struct tx_ring *txr = adapter->tx_rings; + + if (ifp->if_drv_flags & IFF_DRV_RUNNING) { + IXGBE_TX_LOCK(txr); + ixgbe_start_locked(txr, ifp); + IXGBE_TX_UNLOCK(txr); + } + return; +} + +#else /* ! IXGBE_LEGACY_TX */ + +/* +** Multiqueue Transmit driver +** +*/ +int +ixgbe_mq_start(struct ifnet *ifp, struct mbuf *m) +{ + struct adapter *adapter = ifp->if_softc; + struct ix_queue *que; + struct tx_ring *txr; + int i, err = 0; + + /* + * When doing RSS, map it to the same outbound queue + * as the incoming flow would be mapped to. + * + * If everything is setup correctly, it should be the + * same bucket that the current CPU we're on is. + */ + if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) + i = m->m_pkthdr.flowid % adapter->num_queues; + else + i = curcpu % adapter->num_queues; + + /* Check for a hung queue and pick alternative */ + if (((1 << i) & adapter->active_queues) == 0) + i = ffsl(adapter->active_queues); + + txr = &adapter->tx_rings[i]; + que = &adapter->queues[i]; + + err = drbr_enqueue(ifp, txr->br, m); + if (err) + return (err); + if (IXGBE_TX_TRYLOCK(txr)) { + ixgbe_mq_start_locked(ifp, txr); + IXGBE_TX_UNLOCK(txr); + } else + taskqueue_enqueue(que->tq, &txr->txq_task); + + return (0); +} + +int +ixgbe_mq_start_locked(struct ifnet *ifp, struct tx_ring *txr) +{ + struct adapter *adapter = txr->adapter; + struct mbuf *next; + int enqueued = 0, err = 0; + + if (((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) || + adapter->link_active == 0) + return (ENETDOWN); + + /* Process the queue */ +#if __FreeBSD_version < 901504 + next = drbr_dequeue(ifp, txr->br); + while (next != NULL) { + if ((err = ixgbe_xmit(txr, &next)) != 0) { + if (next != NULL) + err = drbr_enqueue(ifp, txr->br, next); +#else + while ((next = drbr_peek(ifp, txr->br)) != NULL) { + if ((err = ixgbe_xmit(txr, &next)) != 0) { + if (next == NULL) { + drbr_advance(ifp, txr->br); + } else { + drbr_putback(ifp, txr->br, next); + } +#endif + break; + } +#if __FreeBSD_version >= 901504 + drbr_advance(ifp, txr->br); +#endif + enqueued++; +#if 0 // this is VF-only +#if __FreeBSD_version >= 1100036 + /* + * Since we're looking at the tx ring, we can check + * to see if we're a VF by examing our tail register + * address. + */ + if (txr->tail < IXGBE_TDT(0) && next->m_flags & M_MCAST) + if_inc_counter(ifp, IFCOUNTER_OMCASTS, 1); +#endif +#endif + /* Send a copy of the frame to the BPF listener */ + ETHER_BPF_MTAP(ifp, next); + if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) + break; +#if __FreeBSD_version < 901504 + next = drbr_dequeue(ifp, txr->br); +#endif + } + + if (txr->tx_avail < IXGBE_TX_CLEANUP_THRESHOLD) + ixgbe_txeof(txr); + + return (err); +} + +/* + * Called from a taskqueue to drain queued transmit packets. + */ +void +ixgbe_deferred_mq_start(void *arg, int pending) +{ + struct tx_ring *txr = arg; + struct adapter *adapter = txr->adapter; + struct ifnet *ifp = adapter->ifp; + + IXGBE_TX_LOCK(txr); + if (!drbr_empty(ifp, txr->br)) + ixgbe_mq_start_locked(ifp, txr); + IXGBE_TX_UNLOCK(txr); +} + +/* + * Flush all ring buffers + */ +void +ixgbe_qflush(struct ifnet *ifp) +{ + struct adapter *adapter = ifp->if_softc; + struct tx_ring *txr = adapter->tx_rings; + struct mbuf *m; + + for (int i = 0; i < adapter->num_queues; i++, txr++) { + IXGBE_TX_LOCK(txr); + while ((m = buf_ring_dequeue_sc(txr->br)) != NULL) + m_freem(m); + IXGBE_TX_UNLOCK(txr); + } + if_qflush(ifp); +} +#endif /* IXGBE_LEGACY_TX */ + + +/********************************************************************* + * + * This routine maps the mbufs to tx descriptors, allowing the + * TX engine to transmit the packets. + * - return 0 on success, positive on failure + * + **********************************************************************/ + +static int +ixgbe_xmit(struct tx_ring *txr, struct mbuf **m_headp) +{ + struct adapter *adapter = txr->adapter; + u32 olinfo_status = 0, cmd_type_len; + int i, j, error, nsegs; + int first; + bool remap = TRUE; + struct mbuf *m_head; + bus_dma_segment_t segs[adapter->num_segs]; + bus_dmamap_t map; + struct ixgbe_tx_buf *txbuf; + union ixgbe_adv_tx_desc *txd = NULL; + + m_head = *m_headp; + + /* Basic descriptor defines */ + cmd_type_len = (IXGBE_ADVTXD_DTYP_DATA | + IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT); + + if (m_head->m_flags & M_VLANTAG) + cmd_type_len |= IXGBE_ADVTXD_DCMD_VLE; + + /* + * Important to capture the first descriptor + * used because it will contain the index of + * the one we tell the hardware to report back + */ + first = txr->next_avail_desc; + txbuf = &txr->tx_buffers[first]; + map = txbuf->map; + + /* + * Map the packet for DMA. + */ +retry: + error = bus_dmamap_load_mbuf_sg(txr->txtag, map, + *m_headp, segs, &nsegs, BUS_DMA_NOWAIT); + + if (__predict_false(error)) { + struct mbuf *m; + + switch (error) { + case EFBIG: + /* Try it again? - one try */ + if (remap == TRUE) { + remap = FALSE; + /* + * XXX: m_defrag will choke on + * non-MCLBYTES-sized clusters + */ + m = m_defrag(*m_headp, M_NOWAIT); + if (m == NULL) { + adapter->mbuf_defrag_failed++; + m_freem(*m_headp); + *m_headp = NULL; + return (ENOBUFS); + } + *m_headp = m; + goto retry; + } else + return (error); + case ENOMEM: + txr->no_tx_dma_setup++; + return (error); + default: + txr->no_tx_dma_setup++; + m_freem(*m_headp); + *m_headp = NULL; + return (error); + } + } + + /* Make certain there are enough descriptors */ + if (nsegs > txr->tx_avail - 2) { + txr->no_desc_avail++; + bus_dmamap_unload(txr->txtag, map); + return (ENOBUFS); + } + m_head = *m_headp; + + /* + * Set up the appropriate offload context + * this will consume the first descriptor + */ + error = ixgbe_tx_ctx_setup(txr, m_head, &cmd_type_len, &olinfo_status); + if (__predict_false(error)) { + if (error == ENOBUFS) + *m_headp = NULL; + return (error); + } + +#ifdef IXGBE_FDIR + /* Do the flow director magic */ + if ((txr->atr_sample) && (!adapter->fdir_reinit)) { + ++txr->atr_count; + if (txr->atr_count >= atr_sample_rate) { + ixgbe_atr(txr, m_head); + txr->atr_count = 0; + } + } +#endif + + i = txr->next_avail_desc; + for (j = 0; j < nsegs; j++) { + bus_size_t seglen; + bus_addr_t segaddr; + + txbuf = &txr->tx_buffers[i]; + txd = &txr->tx_base[i]; + seglen = segs[j].ds_len; + segaddr = htole64(segs[j].ds_addr); + + txd->read.buffer_addr = segaddr; + txd->read.cmd_type_len = htole32(txr->txd_cmd | + cmd_type_len |seglen); + txd->read.olinfo_status = htole32(olinfo_status); + + if (++i == txr->num_desc) + i = 0; + } + + txd->read.cmd_type_len |= + htole32(IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS); + txr->tx_avail -= nsegs; + txr->next_avail_desc = i; + + txbuf->m_head = m_head; + /* + * Here we swap the map so the last descriptor, + * which gets the completion interrupt has the + * real map, and the first descriptor gets the + * unused map from this descriptor. + */ + txr->tx_buffers[first].map = txbuf->map; + txbuf->map = map; + bus_dmamap_sync(txr->txtag, map, BUS_DMASYNC_PREWRITE); + + /* Set the EOP descriptor that will be marked done */ + txbuf = &txr->tx_buffers[first]; + txbuf->eop = txd; + + bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map, + BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); + /* + * Advance the Transmit Descriptor Tail (Tdt), this tells the + * hardware that this frame is available to transmit. + */ + ++txr->total_packets; + IXGBE_WRITE_REG(&adapter->hw, txr->tail, i); + + /* Mark queue as having work */ + if (txr->busy == 0) + txr->busy = 1; + + return (0); +} + + +/********************************************************************* + * + * Allocate memory for tx_buffer structures. The tx_buffer stores all + * the information needed to transmit a packet on the wire. This is + * called only once at attach, setup is done every reset. + * + **********************************************************************/ +int +ixgbe_allocate_transmit_buffers(struct tx_ring *txr) +{ + struct adapter *adapter = txr->adapter; + device_t dev = adapter->dev; + struct ixgbe_tx_buf *txbuf; + int error, i; + + /* + * Setup DMA descriptor areas. + */ + if ((error = bus_dma_tag_create( + bus_get_dma_tag(adapter->dev), /* parent */ + 1, 0, /* alignment, bounds */ + BUS_SPACE_MAXADDR, /* lowaddr */ + BUS_SPACE_MAXADDR, /* highaddr */ + NULL, NULL, /* filter, filterarg */ + IXGBE_TSO_SIZE, /* maxsize */ + adapter->num_segs, /* nsegments */ + PAGE_SIZE, /* maxsegsize */ + 0, /* flags */ + NULL, /* lockfunc */ + NULL, /* lockfuncarg */ + &txr->txtag))) { + device_printf(dev,"Unable to allocate TX DMA tag\n"); + goto fail; + } + + if (!(txr->tx_buffers = + (struct ixgbe_tx_buf *) malloc(sizeof(struct ixgbe_tx_buf) * + adapter->num_tx_desc, M_DEVBUF, M_NOWAIT | M_ZERO))) { + device_printf(dev, "Unable to allocate tx_buffer memory\n"); + error = ENOMEM; + goto fail; + } + + /* Create the descriptor buffer dma maps */ + txbuf = txr->tx_buffers; + for (i = 0; i < adapter->num_tx_desc; i++, txbuf++) { + error = bus_dmamap_create(txr->txtag, 0, &txbuf->map); + if (error != 0) { + device_printf(dev, "Unable to create TX DMA map\n"); + goto fail; + } + } + + return 0; +fail: + /* We free all, it handles case where we are in the middle */ + ixgbe_free_transmit_structures(adapter); + return (error); +} + +/********************************************************************* + * + * Initialize a transmit ring. + * + **********************************************************************/ +static void +ixgbe_setup_transmit_ring(struct tx_ring *txr) +{ + struct adapter *adapter = txr->adapter; + struct ixgbe_tx_buf *txbuf; + int i; +#ifdef DEV_NETMAP + struct netmap_adapter *na = NA(adapter->ifp); + struct netmap_slot *slot; +#endif /* DEV_NETMAP */ + + /* Clear the old ring contents */ + IXGBE_TX_LOCK(txr); +#ifdef DEV_NETMAP + /* + * (under lock): if in netmap mode, do some consistency + * checks and set slot to entry 0 of the netmap ring. + */ + slot = netmap_reset(na, NR_TX, txr->me, 0); +#endif /* DEV_NETMAP */ + bzero((void *)txr->tx_base, + (sizeof(union ixgbe_adv_tx_desc)) * adapter->num_tx_desc); + /* Reset indices */ + txr->next_avail_desc = 0; + txr->next_to_clean = 0; + + /* Free any existing tx buffers. */ + txbuf = txr->tx_buffers; + for (i = 0; i < txr->num_desc; i++, txbuf++) { + if (txbuf->m_head != NULL) { + bus_dmamap_sync(txr->txtag, txbuf->map, + BUS_DMASYNC_POSTWRITE); + bus_dmamap_unload(txr->txtag, txbuf->map); + m_freem(txbuf->m_head); + txbuf->m_head = NULL; + } +#ifdef DEV_NETMAP + /* + * In netmap mode, set the map for the packet buffer. + * NOTE: Some drivers (not this one) also need to set + * the physical buffer address in the NIC ring. + * Slots in the netmap ring (indexed by "si") are + * kring->nkr_hwofs positions "ahead" wrt the + * corresponding slot in the NIC ring. In some drivers + * (not here) nkr_hwofs can be negative. Function + * netmap_idx_n2k() handles wraparounds properly. + */ + if (slot) { + int si = netmap_idx_n2k(&na->tx_rings[txr->me], i); + netmap_load_map(na, txr->txtag, txbuf->map, NMB(na, slot + si)); + } +#endif /* DEV_NETMAP */ + /* Clear the EOP descriptor pointer */ + txbuf->eop = NULL; + } + +#ifdef IXGBE_FDIR + /* Set the rate at which we sample packets */ + if (adapter->hw.mac.type != ixgbe_mac_82598EB) + txr->atr_sample = atr_sample_rate; +#endif + + /* Set number of descriptors available */ + txr->tx_avail = adapter->num_tx_desc; + + bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map, + BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); + IXGBE_TX_UNLOCK(txr); +} + +/********************************************************************* + * + * Initialize all transmit rings. + * + **********************************************************************/ +int +ixgbe_setup_transmit_structures(struct adapter *adapter) +{ + struct tx_ring *txr = adapter->tx_rings; + + for (int i = 0; i < adapter->num_queues; i++, txr++) + ixgbe_setup_transmit_ring(txr); + + return (0); +} + +/********************************************************************* + * + * Free all transmit rings. + * + **********************************************************************/ +void +ixgbe_free_transmit_structures(struct adapter *adapter) +{ + struct tx_ring *txr = adapter->tx_rings; + + for (int i = 0; i < adapter->num_queues; i++, txr++) { + IXGBE_TX_LOCK(txr); + ixgbe_free_transmit_buffers(txr); + ixgbe_dma_free(adapter, &txr->txdma); + IXGBE_TX_UNLOCK(txr); + IXGBE_TX_LOCK_DESTROY(txr); + } + free(adapter->tx_rings, M_DEVBUF); +} + +/********************************************************************* + * + * Free transmit ring related data structures. + * + **********************************************************************/ +static void +ixgbe_free_transmit_buffers(struct tx_ring *txr) +{ + struct adapter *adapter = txr->adapter; + struct ixgbe_tx_buf *tx_buffer; + int i; + + INIT_DEBUGOUT("ixgbe_free_transmit_ring: begin"); + + if (txr->tx_buffers == NULL) + return; + + tx_buffer = txr->tx_buffers; + for (i = 0; i < adapter->num_tx_desc; i++, tx_buffer++) { + if (tx_buffer->m_head != NULL) { + bus_dmamap_sync(txr->txtag, tx_buffer->map, + BUS_DMASYNC_POSTWRITE); + bus_dmamap_unload(txr->txtag, + tx_buffer->map); + m_freem(tx_buffer->m_head); + tx_buffer->m_head = NULL; + if (tx_buffer->map != NULL) { + bus_dmamap_destroy(txr->txtag, + tx_buffer->map); + tx_buffer->map = NULL; + } + } else if (tx_buffer->map != NULL) { + bus_dmamap_unload(txr->txtag, + tx_buffer->map); + bus_dmamap_destroy(txr->txtag, + tx_buffer->map); + tx_buffer->map = NULL; + } + } +#ifdef IXGBE_LEGACY_TX + if (txr->br != NULL) + buf_ring_free(txr->br, M_DEVBUF); +#endif + if (txr->tx_buffers != NULL) { + free(txr->tx_buffers, M_DEVBUF); + txr->tx_buffers = NULL; + } + if (txr->txtag != NULL) { + bus_dma_tag_destroy(txr->txtag); + txr->txtag = NULL; + } + return; +} + +/********************************************************************* + * + * Advanced Context Descriptor setup for VLAN, CSUM or TSO + * + **********************************************************************/ + +static int +ixgbe_tx_ctx_setup(struct tx_ring *txr, struct mbuf *mp, + u32 *cmd_type_len, u32 *olinfo_status) +{ + struct adapter *adapter = txr->adapter; + struct ixgbe_adv_tx_context_desc *TXD; + struct ether_vlan_header *eh; + struct ip *ip; + struct ip6_hdr *ip6; + u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0; + int ehdrlen, ip_hlen = 0; + u16 etype; + u8 ipproto = 0; + int offload = TRUE; + int ctxd = txr->next_avail_desc; + u16 vtag = 0; + + /* First check if TSO is to be used */ + if (mp->m_pkthdr.csum_flags & CSUM_TSO) + return (ixgbe_tso_setup(txr, mp, cmd_type_len, olinfo_status)); + + if ((mp->m_pkthdr.csum_flags & CSUM_OFFLOAD) == 0) + offload = FALSE; + + /* Indicate the whole packet as payload when not doing TSO */ + *olinfo_status |= mp->m_pkthdr.len << IXGBE_ADVTXD_PAYLEN_SHIFT; + + /* Now ready a context descriptor */ + TXD = (struct ixgbe_adv_tx_context_desc *) &txr->tx_base[ctxd]; + + /* + ** In advanced descriptors the vlan tag must + ** be placed into the context descriptor. Hence + ** we need to make one even if not doing offloads. + */ + if (mp->m_flags & M_VLANTAG) { + vtag = htole16(mp->m_pkthdr.ether_vtag); + vlan_macip_lens |= (vtag << IXGBE_ADVTXD_VLAN_SHIFT); + } + else if (!IXGBE_IS_X550VF(adapter) && (offload == FALSE)) + return (0); + + /* + * Determine where frame payload starts. + * Jump over vlan headers if already present, + * helpful for QinQ too. + */ + eh = mtod(mp, struct ether_vlan_header *); + if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { + etype = ntohs(eh->evl_proto); + ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; + } else { + etype = ntohs(eh->evl_encap_proto); + ehdrlen = ETHER_HDR_LEN; + } + + /* Set the ether header length */ + vlan_macip_lens |= ehdrlen << IXGBE_ADVTXD_MACLEN_SHIFT; + + if (offload == FALSE) + goto no_offloads; + + switch (etype) { + case ETHERTYPE_IP: + ip = (struct ip *)(mp->m_data + ehdrlen); + ip_hlen = ip->ip_hl << 2; + ipproto = ip->ip_p; + type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4; + break; + case ETHERTYPE_IPV6: + ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen); + ip_hlen = sizeof(struct ip6_hdr); + /* XXX-BZ this will go badly in case of ext hdrs. */ + ipproto = ip6->ip6_nxt; + type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV6; + break; + default: + offload = FALSE; + break; + } + + vlan_macip_lens |= ip_hlen; + + switch (ipproto) { + case IPPROTO_TCP: + if (mp->m_pkthdr.csum_flags & CSUM_TCP) + type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP; + break; + + case IPPROTO_UDP: + if (mp->m_pkthdr.csum_flags & CSUM_UDP) + type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_UDP; + break; + +#if __FreeBSD_version >= 800000 + case IPPROTO_SCTP: + if (mp->m_pkthdr.csum_flags & CSUM_SCTP) + type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_SCTP; + break; +#endif + default: + offload = FALSE; + break; + } + + if (offload) /* For the TX descriptor setup */ + *olinfo_status |= IXGBE_TXD_POPTS_TXSM << 8; + +no_offloads: + type_tucmd_mlhl |= IXGBE_ADVTXD_DCMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT; + + /* Now copy bits into descriptor */ + TXD->vlan_macip_lens = htole32(vlan_macip_lens); + TXD->type_tucmd_mlhl = htole32(type_tucmd_mlhl); + TXD->seqnum_seed = htole32(0); + TXD->mss_l4len_idx = htole32(0); + + /* We've consumed the first desc, adjust counters */ + if (++ctxd == txr->num_desc) + ctxd = 0; + txr->next_avail_desc = ctxd; + --txr->tx_avail; + + return (0); +} + +/********************************************************************** + * + * Setup work for hardware segmentation offload (TSO) on + * adapters using advanced tx descriptors + * + **********************************************************************/ +static int +ixgbe_tso_setup(struct tx_ring *txr, struct mbuf *mp, + u32 *cmd_type_len, u32 *olinfo_status) +{ + struct ixgbe_adv_tx_context_desc *TXD; + u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0; + u32 mss_l4len_idx = 0, paylen; + u16 vtag = 0, eh_type; + int ctxd, ehdrlen, ip_hlen, tcp_hlen; + struct ether_vlan_header *eh; +#ifdef INET6 + struct ip6_hdr *ip6; +#endif +#ifdef INET + struct ip *ip; +#endif + struct tcphdr *th; + + + /* + * Determine where frame payload starts. + * Jump over vlan headers if already present + */ + eh = mtod(mp, struct ether_vlan_header *); + if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { + ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; + eh_type = eh->evl_proto; + } else { + ehdrlen = ETHER_HDR_LEN; + eh_type = eh->evl_encap_proto; + } + + switch (ntohs(eh_type)) { +#ifdef INET6 + case ETHERTYPE_IPV6: + ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen); + /* XXX-BZ For now we do not pretend to support ext. hdrs. */ + if (ip6->ip6_nxt != IPPROTO_TCP) + return (ENXIO); + ip_hlen = sizeof(struct ip6_hdr); + ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen); + th = (struct tcphdr *)((caddr_t)ip6 + ip_hlen); + th->th_sum = in6_cksum_pseudo(ip6, 0, IPPROTO_TCP, 0); + type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV6; + break; +#endif +#ifdef INET + case ETHERTYPE_IP: + ip = (struct ip *)(mp->m_data + ehdrlen); + if (ip->ip_p != IPPROTO_TCP) + return (ENXIO); + ip->ip_sum = 0; + ip_hlen = ip->ip_hl << 2; + th = (struct tcphdr *)((caddr_t)ip + ip_hlen); + th->th_sum = in_pseudo(ip->ip_src.s_addr, + ip->ip_dst.s_addr, htons(IPPROTO_TCP)); + type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4; + /* Tell transmit desc to also do IPv4 checksum. */ + *olinfo_status |= IXGBE_TXD_POPTS_IXSM << 8; + break; +#endif + default: + panic("%s: CSUM_TSO but no supported IP version (0x%04x)", + __func__, ntohs(eh_type)); + break; + } + + ctxd = txr->next_avail_desc; + TXD = (struct ixgbe_adv_tx_context_desc *) &txr->tx_base[ctxd]; + + tcp_hlen = th->th_off << 2; + + /* This is used in the transmit desc in encap */ + paylen = mp->m_pkthdr.len - ehdrlen - ip_hlen - tcp_hlen; + + /* VLAN MACLEN IPLEN */ + if (mp->m_flags & M_VLANTAG) { + vtag = htole16(mp->m_pkthdr.ether_vtag); + vlan_macip_lens |= (vtag << IXGBE_ADVTXD_VLAN_SHIFT); + } + + vlan_macip_lens |= ehdrlen << IXGBE_ADVTXD_MACLEN_SHIFT; + vlan_macip_lens |= ip_hlen; + TXD->vlan_macip_lens = htole32(vlan_macip_lens); + + /* ADV DTYPE TUCMD */ + type_tucmd_mlhl |= IXGBE_ADVTXD_DCMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT; + type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP; + TXD->type_tucmd_mlhl = htole32(type_tucmd_mlhl); + + /* MSS L4LEN IDX */ + mss_l4len_idx |= (mp->m_pkthdr.tso_segsz << IXGBE_ADVTXD_MSS_SHIFT); + mss_l4len_idx |= (tcp_hlen << IXGBE_ADVTXD_L4LEN_SHIFT); + TXD->mss_l4len_idx = htole32(mss_l4len_idx); + + TXD->seqnum_seed = htole32(0); + + if (++ctxd == txr->num_desc) + ctxd = 0; + + txr->tx_avail--; + txr->next_avail_desc = ctxd; + *cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE; + *olinfo_status |= IXGBE_TXD_POPTS_TXSM << 8; + *olinfo_status |= paylen << IXGBE_ADVTXD_PAYLEN_SHIFT; + ++txr->tso_tx; + return (0); +} + + +/********************************************************************** + * + * Examine each tx_buffer in the used queue. If the hardware is done + * processing the packet then free associated resources. The + * tx_buffer is put back on the free queue. + * + **********************************************************************/ +void +ixgbe_txeof(struct tx_ring *txr) +{ +#ifdef DEV_NETMAP + struct adapter *adapter = txr->adapter; + struct ifnet *ifp = adapter->ifp; +#endif + u32 work, processed = 0; + u16 limit = txr->process_limit; + struct ixgbe_tx_buf *buf; + union ixgbe_adv_tx_desc *txd; + + mtx_assert(&txr->tx_mtx, MA_OWNED); + +#ifdef DEV_NETMAP + if (ifp->if_capenable & IFCAP_NETMAP) { + struct netmap_adapter *na = NA(ifp); + struct netmap_kring *kring = &na->tx_rings[txr->me]; + txd = txr->tx_base; + bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map, + BUS_DMASYNC_POSTREAD); + /* + * In netmap mode, all the work is done in the context + * of the client thread. Interrupt handlers only wake up + * clients, which may be sleeping on individual rings + * or on a global resource for all rings. + * To implement tx interrupt mitigation, we wake up the client + * thread roughly every half ring, even if the NIC interrupts + * more frequently. This is implemented as follows: + * - ixgbe_txsync() sets kring->nr_kflags with the index of + * the slot that should wake up the thread (nkr_num_slots + * means the user thread should not be woken up); + * - the driver ignores tx interrupts unless netmap_mitigate=0 + * or the slot has the DD bit set. + */ + if (!netmap_mitigate || + (kring->nr_kflags < kring->nkr_num_slots && + txd[kring->nr_kflags].wb.status & IXGBE_TXD_STAT_DD)) { + netmap_tx_irq(ifp, txr->me); + } + return; + } +#endif /* DEV_NETMAP */ + + if (txr->tx_avail == txr->num_desc) { + txr->busy = 0; + return; + } + + /* Get work starting point */ + work = txr->next_to_clean; + buf = &txr->tx_buffers[work]; + txd = &txr->tx_base[work]; + work -= txr->num_desc; /* The distance to ring end */ + bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map, + BUS_DMASYNC_POSTREAD); + + do { + union ixgbe_adv_tx_desc *eop= buf->eop; + if (eop == NULL) /* No work */ + break; + + if ((eop->wb.status & IXGBE_TXD_STAT_DD) == 0) + break; /* I/O not complete */ + + if (buf->m_head) { + txr->bytes += + buf->m_head->m_pkthdr.len; + bus_dmamap_sync(txr->txtag, + buf->map, + BUS_DMASYNC_POSTWRITE); + bus_dmamap_unload(txr->txtag, + buf->map); + m_freem(buf->m_head); + buf->m_head = NULL; + } + buf->eop = NULL; + ++txr->tx_avail; + + /* We clean the range if multi segment */ + while (txd != eop) { + ++txd; + ++buf; + ++work; + /* wrap the ring? */ + if (__predict_false(!work)) { + work -= txr->num_desc; + buf = txr->tx_buffers; + txd = txr->tx_base; + } + if (buf->m_head) { + txr->bytes += + buf->m_head->m_pkthdr.len; + bus_dmamap_sync(txr->txtag, + buf->map, + BUS_DMASYNC_POSTWRITE); + bus_dmamap_unload(txr->txtag, + buf->map); + m_freem(buf->m_head); + buf->m_head = NULL; + } + ++txr->tx_avail; + buf->eop = NULL; + + } + ++txr->packets; + ++processed; + + /* Try the next packet */ + ++txd; + ++buf; + ++work; + /* reset with a wrap */ + if (__predict_false(!work)) { + work -= txr->num_desc; + buf = txr->tx_buffers; + txd = txr->tx_base; + } + prefetch(txd); + } while (__predict_true(--limit)); + + bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map, + BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); + + work += txr->num_desc; + txr->next_to_clean = work; + + /* + ** Queue Hang detection, we know there's + ** work outstanding or the first return + ** would have been taken, so increment busy + ** if nothing managed to get cleaned, then + ** in local_timer it will be checked and + ** marked as HUNG if it exceeds a MAX attempt. + */ + if ((processed == 0) && (txr->busy != IXGBE_QUEUE_HUNG)) + ++txr->busy; + /* + ** If anything gets cleaned we reset state to 1, + ** note this will turn off HUNG if its set. + */ + if (processed) + txr->busy = 1; + + if (txr->tx_avail == txr->num_desc) + txr->busy = 0; + + return; +} + + +#ifdef IXGBE_FDIR +/* +** This routine parses packet headers so that Flow +** Director can make a hashed filter table entry +** allowing traffic flows to be identified and kept +** on the same cpu. This would be a performance +** hit, but we only do it at IXGBE_FDIR_RATE of +** packets. +*/ +static void +ixgbe_atr(struct tx_ring *txr, struct mbuf *mp) +{ + struct adapter *adapter = txr->adapter; + struct ix_queue *que; + struct ip *ip; + struct tcphdr *th; + struct udphdr *uh; + struct ether_vlan_header *eh; + union ixgbe_atr_hash_dword input = {.dword = 0}; + union ixgbe_atr_hash_dword common = {.dword = 0}; + int ehdrlen, ip_hlen; + u16 etype; + + eh = mtod(mp, struct ether_vlan_header *); + if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { + ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; + etype = eh->evl_proto; + } else { + ehdrlen = ETHER_HDR_LEN; + etype = eh->evl_encap_proto; + } + + /* Only handling IPv4 */ + if (etype != htons(ETHERTYPE_IP)) + return; + + ip = (struct ip *)(mp->m_data + ehdrlen); + ip_hlen = ip->ip_hl << 2; + + /* check if we're UDP or TCP */ + switch (ip->ip_p) { + case IPPROTO_TCP: + th = (struct tcphdr *)((caddr_t)ip + ip_hlen); + /* src and dst are inverted */ + common.port.dst ^= th->th_sport; + common.port.src ^= th->th_dport; + input.formatted.flow_type ^= IXGBE_ATR_FLOW_TYPE_TCPV4; + break; + case IPPROTO_UDP: + uh = (struct udphdr *)((caddr_t)ip + ip_hlen); + /* src and dst are inverted */ + common.port.dst ^= uh->uh_sport; + common.port.src ^= uh->uh_dport; + input.formatted.flow_type ^= IXGBE_ATR_FLOW_TYPE_UDPV4; + break; + default: + return; + } + + input.formatted.vlan_id = htobe16(mp->m_pkthdr.ether_vtag); + if (mp->m_pkthdr.ether_vtag) + common.flex_bytes ^= htons(ETHERTYPE_VLAN); + else + common.flex_bytes ^= etype; + common.ip ^= ip->ip_src.s_addr ^ ip->ip_dst.s_addr; + + que = &adapter->queues[txr->me]; + /* + ** This assumes the Rx queue and Tx + ** queue are bound to the same CPU + */ + ixgbe_fdir_add_signature_filter_82599(&adapter->hw, + input, common, que->msix); +} +#endif /* IXGBE_FDIR */ + +/* +** Used to detect a descriptor that has +** been merged by Hardware RSC. +*/ +static inline u32 +ixgbe_rsc_count(union ixgbe_adv_rx_desc *rx) +{ + return (le32toh(rx->wb.lower.lo_dword.data) & + IXGBE_RXDADV_RSCCNT_MASK) >> IXGBE_RXDADV_RSCCNT_SHIFT; +} + +/********************************************************************* + * + * Initialize Hardware RSC (LRO) feature on 82599 + * for an RX ring, this is toggled by the LRO capability + * even though it is transparent to the stack. + * + * NOTE: since this HW feature only works with IPV4 and + * our testing has shown soft LRO to be as effective + * I have decided to disable this by default. + * + **********************************************************************/ +static void +ixgbe_setup_hw_rsc(struct rx_ring *rxr) +{ + struct adapter *adapter = rxr->adapter; + struct ixgbe_hw *hw = &adapter->hw; + u32 rscctrl, rdrxctl; + + /* If turning LRO/RSC off we need to disable it */ + if ((adapter->ifp->if_capenable & IFCAP_LRO) == 0) { + rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(rxr->me)); + rscctrl &= ~IXGBE_RSCCTL_RSCEN; + return; + } + + rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL); + rdrxctl &= ~IXGBE_RDRXCTL_RSCFRSTSIZE; +#ifdef DEV_NETMAP /* crcstrip is optional in netmap */ + if (adapter->ifp->if_capenable & IFCAP_NETMAP && !ix_crcstrip) +#endif /* DEV_NETMAP */ + rdrxctl |= IXGBE_RDRXCTL_CRCSTRIP; + rdrxctl |= IXGBE_RDRXCTL_RSCACKC; + IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl); + + rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(rxr->me)); + rscctrl |= IXGBE_RSCCTL_RSCEN; + /* + ** Limit the total number of descriptors that + ** can be combined, so it does not exceed 64K + */ + if (rxr->mbuf_sz == MCLBYTES) + rscctrl |= IXGBE_RSCCTL_MAXDESC_16; + else if (rxr->mbuf_sz == MJUMPAGESIZE) + rscctrl |= IXGBE_RSCCTL_MAXDESC_8; + else if (rxr->mbuf_sz == MJUM9BYTES) + rscctrl |= IXGBE_RSCCTL_MAXDESC_4; + else /* Using 16K cluster */ + rscctrl |= IXGBE_RSCCTL_MAXDESC_1; + + IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(rxr->me), rscctrl); + + /* Enable TCP header recognition */ + IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), + (IXGBE_READ_REG(hw, IXGBE_PSRTYPE(0)) | + IXGBE_PSRTYPE_TCPHDR)); + + /* Disable RSC for ACK packets */ + IXGBE_WRITE_REG(hw, IXGBE_RSCDBU, + (IXGBE_RSCDBU_RSCACKDIS | IXGBE_READ_REG(hw, IXGBE_RSCDBU))); + + rxr->hw_rsc = TRUE; +} +/********************************************************************* + * + * Refresh mbuf buffers for RX descriptor rings + * - now keeps its own state so discards due to resource + * exhaustion are unnecessary, if an mbuf cannot be obtained + * it just returns, keeping its placeholder, thus it can simply + * be recalled to try again. + * + **********************************************************************/ +static void +ixgbe_refresh_mbufs(struct rx_ring *rxr, int limit) +{ + struct adapter *adapter = rxr->adapter; + bus_dma_segment_t seg[1]; + struct ixgbe_rx_buf *rxbuf; + struct mbuf *mp; + int i, j, nsegs, error; + bool refreshed = FALSE; + + i = j = rxr->next_to_refresh; + /* Control the loop with one beyond */ + if (++j == rxr->num_desc) + j = 0; + + while (j != limit) { + rxbuf = &rxr->rx_buffers[i]; + if (rxbuf->buf == NULL) { + mp = m_getjcl(M_NOWAIT, MT_DATA, + M_PKTHDR, rxr->mbuf_sz); + if (mp == NULL) + goto update; + if (adapter->max_frame_size <= (MCLBYTES - ETHER_ALIGN)) + m_adj(mp, ETHER_ALIGN); + } else + mp = rxbuf->buf; + + mp->m_pkthdr.len = mp->m_len = rxr->mbuf_sz; + + /* If we're dealing with an mbuf that was copied rather + * than replaced, there's no need to go through busdma. + */ + if ((rxbuf->flags & IXGBE_RX_COPY) == 0) { + /* Get the memory mapping */ + bus_dmamap_unload(rxr->ptag, rxbuf->pmap); + error = bus_dmamap_load_mbuf_sg(rxr->ptag, + rxbuf->pmap, mp, seg, &nsegs, BUS_DMA_NOWAIT); + if (error != 0) { + printf("Refresh mbufs: payload dmamap load" + " failure - %d\n", error); + m_free(mp); + rxbuf->buf = NULL; + goto update; + } + rxbuf->buf = mp; + bus_dmamap_sync(rxr->ptag, rxbuf->pmap, + BUS_DMASYNC_PREREAD); + rxbuf->addr = rxr->rx_base[i].read.pkt_addr = + htole64(seg[0].ds_addr); + } else { + rxr->rx_base[i].read.pkt_addr = rxbuf->addr; + rxbuf->flags &= ~IXGBE_RX_COPY; + } + + refreshed = TRUE; + /* Next is precalculated */ + i = j; + rxr->next_to_refresh = i; + if (++j == rxr->num_desc) + j = 0; + } +update: + if (refreshed) /* Update hardware tail index */ + IXGBE_WRITE_REG(&adapter->hw, + rxr->tail, rxr->next_to_refresh); + return; +} + +/********************************************************************* + * + * Allocate memory for rx_buffer structures. Since we use one + * rx_buffer per received packet, the maximum number of rx_buffer's + * that we'll need is equal to the number of receive descriptors + * that we've allocated. + * + **********************************************************************/ +int +ixgbe_allocate_receive_buffers(struct rx_ring *rxr) +{ + struct adapter *adapter = rxr->adapter; + device_t dev = adapter->dev; + struct ixgbe_rx_buf *rxbuf; + int i, bsize, error; + + bsize = sizeof(struct ixgbe_rx_buf) * rxr->num_desc; + if (!(rxr->rx_buffers = + (struct ixgbe_rx_buf *) malloc(bsize, + M_DEVBUF, M_NOWAIT | M_ZERO))) { + device_printf(dev, "Unable to allocate rx_buffer memory\n"); + error = ENOMEM; + goto fail; + } + + if ((error = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */ + 1, 0, /* alignment, bounds */ + BUS_SPACE_MAXADDR, /* lowaddr */ + BUS_SPACE_MAXADDR, /* highaddr */ + NULL, NULL, /* filter, filterarg */ + MJUM16BYTES, /* maxsize */ + 1, /* nsegments */ + MJUM16BYTES, /* maxsegsize */ + 0, /* flags */ + NULL, /* lockfunc */ + NULL, /* lockfuncarg */ + &rxr->ptag))) { + device_printf(dev, "Unable to create RX DMA tag\n"); + goto fail; + } + + for (i = 0; i < rxr->num_desc; i++, rxbuf++) { + rxbuf = &rxr->rx_buffers[i]; + error = bus_dmamap_create(rxr->ptag, 0, &rxbuf->pmap); + if (error) { + device_printf(dev, "Unable to create RX dma map\n"); + goto fail; + } + } + + return (0); + +fail: + /* Frees all, but can handle partial completion */ + ixgbe_free_receive_structures(adapter); + return (error); +} + + +static void +ixgbe_free_receive_ring(struct rx_ring *rxr) +{ + struct ixgbe_rx_buf *rxbuf; + int i; + + for (i = 0; i < rxr->num_desc; i++) { + rxbuf = &rxr->rx_buffers[i]; + if (rxbuf->buf != NULL) { + bus_dmamap_sync(rxr->ptag, rxbuf->pmap, + BUS_DMASYNC_POSTREAD); + bus_dmamap_unload(rxr->ptag, rxbuf->pmap); + rxbuf->buf->m_flags |= M_PKTHDR; + m_freem(rxbuf->buf); + rxbuf->buf = NULL; + rxbuf->flags = 0; + } + } +} + + +/********************************************************************* + * + * Initialize a receive ring and its buffers. + * + **********************************************************************/ +static int +ixgbe_setup_receive_ring(struct rx_ring *rxr) +{ + struct adapter *adapter; + struct ifnet *ifp; + device_t dev; + struct ixgbe_rx_buf *rxbuf; + bus_dma_segment_t seg[1]; + struct lro_ctrl *lro = &rxr->lro; + int rsize, nsegs, error = 0; +#ifdef DEV_NETMAP + struct netmap_adapter *na = NA(rxr->adapter->ifp); + struct netmap_slot *slot; +#endif /* DEV_NETMAP */ + + adapter = rxr->adapter; + ifp = adapter->ifp; + dev = adapter->dev; + + /* Clear the ring contents */ + IXGBE_RX_LOCK(rxr); +#ifdef DEV_NETMAP + /* same as in ixgbe_setup_transmit_ring() */ + slot = netmap_reset(na, NR_RX, rxr->me, 0); +#endif /* DEV_NETMAP */ + rsize = roundup2(adapter->num_rx_desc * + sizeof(union ixgbe_adv_rx_desc), DBA_ALIGN); + bzero((void *)rxr->rx_base, rsize); + /* Cache the size */ + rxr->mbuf_sz = adapter->rx_mbuf_sz; + + /* Free current RX buffer structs and their mbufs */ + ixgbe_free_receive_ring(rxr); + + /* Now replenish the mbufs */ + for (int j = 0; j != rxr->num_desc; ++j) { + struct mbuf *mp; + + rxbuf = &rxr->rx_buffers[j]; +#ifdef DEV_NETMAP + /* + * In netmap mode, fill the map and set the buffer + * address in the NIC ring, considering the offset + * between the netmap and NIC rings (see comment in + * ixgbe_setup_transmit_ring() ). No need to allocate + * an mbuf, so end the block with a continue; + */ + if (slot) { + int sj = netmap_idx_n2k(&na->rx_rings[rxr->me], j); + uint64_t paddr; + void *addr; + + addr = PNMB(na, slot + sj, &paddr); + netmap_load_map(na, rxr->ptag, rxbuf->pmap, addr); + /* Update descriptor and the cached value */ + rxr->rx_base[j].read.pkt_addr = htole64(paddr); + rxbuf->addr = htole64(paddr); + continue; + } +#endif /* DEV_NETMAP */ + rxbuf->flags = 0; + rxbuf->buf = m_getjcl(M_NOWAIT, MT_DATA, + M_PKTHDR, adapter->rx_mbuf_sz); + if (rxbuf->buf == NULL) { + error = ENOBUFS; + goto fail; + } + mp = rxbuf->buf; + mp->m_pkthdr.len = mp->m_len = rxr->mbuf_sz; + /* Get the memory mapping */ + error = bus_dmamap_load_mbuf_sg(rxr->ptag, + rxbuf->pmap, mp, seg, + &nsegs, BUS_DMA_NOWAIT); + if (error != 0) + goto fail; + bus_dmamap_sync(rxr->ptag, + rxbuf->pmap, BUS_DMASYNC_PREREAD); + /* Update the descriptor and the cached value */ + rxr->rx_base[j].read.pkt_addr = htole64(seg[0].ds_addr); + rxbuf->addr = htole64(seg[0].ds_addr); + } + + + /* Setup our descriptor indices */ + rxr->next_to_check = 0; + rxr->next_to_refresh = 0; + rxr->lro_enabled = FALSE; + rxr->rx_copies = 0; + rxr->rx_bytes = 0; + rxr->vtag_strip = FALSE; + + bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map, + BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); + + /* + ** Now set up the LRO interface: + */ + if (ixgbe_rsc_enable) + ixgbe_setup_hw_rsc(rxr); + else if (ifp->if_capenable & IFCAP_LRO) { + int err = tcp_lro_init(lro); + if (err) { + device_printf(dev, "LRO Initialization failed!\n"); + goto fail; + } + INIT_DEBUGOUT("RX Soft LRO Initialized\n"); + rxr->lro_enabled = TRUE; + lro->ifp = adapter->ifp; + } + + IXGBE_RX_UNLOCK(rxr); + return (0); + +fail: + ixgbe_free_receive_ring(rxr); + IXGBE_RX_UNLOCK(rxr); + return (error); +} + +/********************************************************************* + * + * Initialize all receive rings. + * + **********************************************************************/ +int +ixgbe_setup_receive_structures(struct adapter *adapter) +{ + struct rx_ring *rxr = adapter->rx_rings; + int j; + + for (j = 0; j < adapter->num_queues; j++, rxr++) + if (ixgbe_setup_receive_ring(rxr)) + goto fail; + + return (0); +fail: + /* + * Free RX buffers allocated so far, we will only handle + * the rings that completed, the failing case will have + * cleaned up for itself. 'j' failed, so its the terminus. + */ + for (int i = 0; i < j; ++i) { + rxr = &adapter->rx_rings[i]; + ixgbe_free_receive_ring(rxr); + } + + return (ENOBUFS); +} + + +/********************************************************************* + * + * Free all receive rings. + * + **********************************************************************/ +void +ixgbe_free_receive_structures(struct adapter *adapter) +{ + struct rx_ring *rxr = adapter->rx_rings; + + INIT_DEBUGOUT("ixgbe_free_receive_structures: begin"); + + for (int i = 0; i < adapter->num_queues; i++, rxr++) { + struct lro_ctrl *lro = &rxr->lro; + ixgbe_free_receive_buffers(rxr); + /* Free LRO memory */ + tcp_lro_free(lro); + /* Free the ring memory as well */ + ixgbe_dma_free(adapter, &rxr->rxdma); + } + + free(adapter->rx_rings, M_DEVBUF); +} + + +/********************************************************************* + * + * Free receive ring data structures + * + **********************************************************************/ +void +ixgbe_free_receive_buffers(struct rx_ring *rxr) +{ + struct adapter *adapter = rxr->adapter; + struct ixgbe_rx_buf *rxbuf; + + INIT_DEBUGOUT("ixgbe_free_receive_buffers: begin"); + + /* Cleanup any existing buffers */ + if (rxr->rx_buffers != NULL) { + for (int i = 0; i < adapter->num_rx_desc; i++) { + rxbuf = &rxr->rx_buffers[i]; + if (rxbuf->buf != NULL) { + bus_dmamap_sync(rxr->ptag, rxbuf->pmap, + BUS_DMASYNC_POSTREAD); + bus_dmamap_unload(rxr->ptag, rxbuf->pmap); + rxbuf->buf->m_flags |= M_PKTHDR; + m_freem(rxbuf->buf); + } + rxbuf->buf = NULL; + if (rxbuf->pmap != NULL) { + bus_dmamap_destroy(rxr->ptag, rxbuf->pmap); + rxbuf->pmap = NULL; + } + } + if (rxr->rx_buffers != NULL) { + free(rxr->rx_buffers, M_DEVBUF); + rxr->rx_buffers = NULL; + } + } + + if (rxr->ptag != NULL) { + bus_dma_tag_destroy(rxr->ptag); + rxr->ptag = NULL; + } + + return; +} + +static __inline void +ixgbe_rx_input(struct rx_ring *rxr, struct ifnet *ifp, struct mbuf *m, u32 ptype) +{ + + /* + * ATM LRO is only for IP/TCP packets and TCP checksum of the packet + * should be computed by hardware. Also it should not have VLAN tag in + * ethernet header. In case of IPv6 we do not yet support ext. hdrs. + */ + if (rxr->lro_enabled && + (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0 && + (ptype & IXGBE_RXDADV_PKTTYPE_ETQF) == 0 && + ((ptype & (IXGBE_RXDADV_PKTTYPE_IPV4 | IXGBE_RXDADV_PKTTYPE_TCP)) == + (IXGBE_RXDADV_PKTTYPE_IPV4 | IXGBE_RXDADV_PKTTYPE_TCP) || + (ptype & (IXGBE_RXDADV_PKTTYPE_IPV6 | IXGBE_RXDADV_PKTTYPE_TCP)) == + (IXGBE_RXDADV_PKTTYPE_IPV6 | IXGBE_RXDADV_PKTTYPE_TCP)) && + (m->m_pkthdr.csum_flags & (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) == + (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) { + /* + * Send to the stack if: + ** - LRO not enabled, or + ** - no LRO resources, or + ** - lro enqueue fails + */ + if (rxr->lro.lro_cnt != 0) + if (tcp_lro_rx(&rxr->lro, m, 0) == 0) + return; + } + IXGBE_RX_UNLOCK(rxr); + (*ifp->if_input)(ifp, m); + IXGBE_RX_LOCK(rxr); +} + +static __inline void +ixgbe_rx_discard(struct rx_ring *rxr, int i) +{ + struct ixgbe_rx_buf *rbuf; + + rbuf = &rxr->rx_buffers[i]; + + + /* + ** With advanced descriptors the writeback + ** clobbers the buffer addrs, so its easier + ** to just free the existing mbufs and take + ** the normal refresh path to get new buffers + ** and mapping. + */ + + if (rbuf->fmp != NULL) {/* Partial chain ? */ + rbuf->fmp->m_flags |= M_PKTHDR; + m_freem(rbuf->fmp); + rbuf->fmp = NULL; + rbuf->buf = NULL; /* rbuf->buf is part of fmp's chain */ + } else if (rbuf->buf) { + m_free(rbuf->buf); + rbuf->buf = NULL; + } + bus_dmamap_unload(rxr->ptag, rbuf->pmap); + + rbuf->flags = 0; + + return; +} + + +/********************************************************************* + * + * This routine executes in interrupt context. It replenishes + * the mbufs in the descriptor and sends data which has been + * dma'ed into host memory to upper layer. + * + * Return TRUE for more work, FALSE for all clean. + *********************************************************************/ +bool +ixgbe_rxeof(struct ix_queue *que) +{ + struct adapter *adapter = que->adapter; + struct rx_ring *rxr = que->rxr; + struct ifnet *ifp = adapter->ifp; + struct lro_ctrl *lro = &rxr->lro; + struct lro_entry *queued; + int i, nextp, processed = 0; + u32 staterr = 0; + u16 count = rxr->process_limit; + union ixgbe_adv_rx_desc *cur; + struct ixgbe_rx_buf *rbuf, *nbuf; + u16 pkt_info; + + IXGBE_RX_LOCK(rxr); + +#ifdef DEV_NETMAP + /* Same as the txeof routine: wakeup clients on intr. */ + if (netmap_rx_irq(ifp, rxr->me, &processed)) { + IXGBE_RX_UNLOCK(rxr); + return (FALSE); + } +#endif /* DEV_NETMAP */ + + for (i = rxr->next_to_check; count != 0;) { + struct mbuf *sendmp, *mp; + u32 rsc, ptype; + u16 len; + u16 vtag = 0; + bool eop; + + /* Sync the ring. */ + bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map, + BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); + + cur = &rxr->rx_base[i]; + staterr = le32toh(cur->wb.upper.status_error); + pkt_info = le16toh(cur->wb.lower.lo_dword.hs_rss.pkt_info); + + if ((staterr & IXGBE_RXD_STAT_DD) == 0) + break; + if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) + break; + + count--; + sendmp = NULL; + nbuf = NULL; + rsc = 0; + cur->wb.upper.status_error = 0; + rbuf = &rxr->rx_buffers[i]; + mp = rbuf->buf; + + len = le16toh(cur->wb.upper.length); + ptype = le32toh(cur->wb.lower.lo_dword.data) & + IXGBE_RXDADV_PKTTYPE_MASK; + eop = ((staterr & IXGBE_RXD_STAT_EOP) != 0); + + /* Make sure bad packets are discarded */ + if (eop && (staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK) != 0) { +#if __FreeBSD_version >= 1100036 + if (IXGBE_IS_VF(adapter)) + if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); +#endif + rxr->rx_discarded++; + ixgbe_rx_discard(rxr, i); + goto next_desc; + } + + /* + ** On 82599 which supports a hardware + ** LRO (called HW RSC), packets need + ** not be fragmented across sequential + ** descriptors, rather the next descriptor + ** is indicated in bits of the descriptor. + ** This also means that we might proceses + ** more than one packet at a time, something + ** that has never been true before, it + ** required eliminating global chain pointers + ** in favor of what we are doing here. -jfv + */ + if (!eop) { + /* + ** Figure out the next descriptor + ** of this frame. + */ + if (rxr->hw_rsc == TRUE) { + rsc = ixgbe_rsc_count(cur); + rxr->rsc_num += (rsc - 1); + } + if (rsc) { /* Get hardware index */ + nextp = ((staterr & + IXGBE_RXDADV_NEXTP_MASK) >> + IXGBE_RXDADV_NEXTP_SHIFT); + } else { /* Just sequential */ + nextp = i + 1; + if (nextp == adapter->num_rx_desc) + nextp = 0; + } + nbuf = &rxr->rx_buffers[nextp]; + prefetch(nbuf); + } + /* + ** Rather than using the fmp/lmp global pointers + ** we now keep the head of a packet chain in the + ** buffer struct and pass this along from one + ** descriptor to the next, until we get EOP. + */ + mp->m_len = len; + /* + ** See if there is a stored head + ** that determines what we are + */ + sendmp = rbuf->fmp; + if (sendmp != NULL) { /* secondary frag */ + rbuf->buf = rbuf->fmp = NULL; + mp->m_flags &= ~M_PKTHDR; + sendmp->m_pkthdr.len += mp->m_len; + } else { + /* + * Optimize. This might be a small packet, + * maybe just a TCP ACK. Do a fast copy that + * is cache aligned into a new mbuf, and + * leave the old mbuf+cluster for re-use. + */ + if (eop && len <= IXGBE_RX_COPY_LEN) { + sendmp = m_gethdr(M_NOWAIT, MT_DATA); + if (sendmp != NULL) { + sendmp->m_data += + IXGBE_RX_COPY_ALIGN; + ixgbe_bcopy(mp->m_data, + sendmp->m_data, len); + sendmp->m_len = len; + rxr->rx_copies++; + rbuf->flags |= IXGBE_RX_COPY; + } + } + if (sendmp == NULL) { + rbuf->buf = rbuf->fmp = NULL; + sendmp = mp; + } + + /* first desc of a non-ps chain */ + sendmp->m_flags |= M_PKTHDR; + sendmp->m_pkthdr.len = mp->m_len; + } + ++processed; + + /* Pass the head pointer on */ + if (eop == 0) { + nbuf->fmp = sendmp; + sendmp = NULL; + mp->m_next = nbuf->buf; + } else { /* Sending this frame */ + sendmp->m_pkthdr.rcvif = ifp; + rxr->rx_packets++; + /* capture data for AIM */ + rxr->bytes += sendmp->m_pkthdr.len; + rxr->rx_bytes += sendmp->m_pkthdr.len; + /* Process vlan info */ + if ((rxr->vtag_strip) && + (staterr & IXGBE_RXD_STAT_VP)) + vtag = le16toh(cur->wb.upper.vlan); + if (vtag) { + sendmp->m_pkthdr.ether_vtag = vtag; + sendmp->m_flags |= M_VLANTAG; + } + if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) + ixgbe_rx_checksum(staterr, sendmp, ptype); +#if __FreeBSD_version >= 800000 + sendmp->m_pkthdr.flowid = que->msix; +#endif /* FreeBSD_version */ + } +next_desc: + bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map, + BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); + + /* Advance our pointers to the next descriptor. */ + if (++i == rxr->num_desc) + i = 0; + + /* Now send to the stack or do LRO */ + if (sendmp != NULL) { + rxr->next_to_check = i; + ixgbe_rx_input(rxr, ifp, sendmp, ptype); + i = rxr->next_to_check; + } + + /* Every 8 descriptors we go to refresh mbufs */ + if (processed == 8) { + ixgbe_refresh_mbufs(rxr, i); + processed = 0; + } + } + + /* Refresh any remaining buf structs */ + if (ixgbe_rx_unrefreshed(rxr)) + ixgbe_refresh_mbufs(rxr, i); + + rxr->next_to_check = i; + + /* + * Flush any outstanding LRO work + */ + while ((queued = SLIST_FIRST(&lro->lro_active)) != NULL) { + SLIST_REMOVE_HEAD(&lro->lro_active, next); + tcp_lro_flush(lro, queued); + } + + IXGBE_RX_UNLOCK(rxr); + + /* + ** Still have cleaning to do? + */ + if ((staterr & IXGBE_RXD_STAT_DD) != 0) + return (TRUE); + else + return (FALSE); +} + + +/********************************************************************* + * + * Verify that the hardware indicated that the checksum is valid. + * Inform the stack about the status of checksum so that stack + * doesn't spend time verifying the checksum. + * + *********************************************************************/ +static void +ixgbe_rx_checksum(u32 staterr, struct mbuf * mp, u32 ptype) +{ + u16 status = (u16) staterr; + u8 errors = (u8) (staterr >> 24); + bool sctp = FALSE; + + if ((ptype & IXGBE_RXDADV_PKTTYPE_ETQF) == 0 && + (ptype & IXGBE_RXDADV_PKTTYPE_SCTP) != 0) + sctp = TRUE; + + if (status & IXGBE_RXD_STAT_IPCS) { + if (!(errors & IXGBE_RXD_ERR_IPE)) { + /* IP Checksum Good */ + mp->m_pkthdr.csum_flags = CSUM_IP_CHECKED; + mp->m_pkthdr.csum_flags |= CSUM_IP_VALID; + + } else + mp->m_pkthdr.csum_flags = 0; + } + if (status & IXGBE_RXD_STAT_L4CS) { + u64 type = (CSUM_DATA_VALID | CSUM_PSEUDO_HDR); +#if __FreeBSD_version >= 800000 + if (sctp) + type = CSUM_SCTP_VALID; +#endif + if (!(errors & IXGBE_RXD_ERR_TCPE)) { + mp->m_pkthdr.csum_flags |= type; + if (!sctp) + mp->m_pkthdr.csum_data = htons(0xffff); + } + } + return; +} + +/******************************************************************** + * Manage DMA'able memory. + *******************************************************************/ +static void +ixgbe_dmamap_cb(void *arg, bus_dma_segment_t * segs, int nseg, int error) +{ + if (error) + return; + *(bus_addr_t *) arg = segs->ds_addr; + return; +} + +int +ixgbe_dma_malloc(struct adapter *adapter, bus_size_t size, + struct ixgbe_dma_alloc *dma, int mapflags) +{ + device_t dev = adapter->dev; + int r; + + r = bus_dma_tag_create(bus_get_dma_tag(adapter->dev), /* parent */ + DBA_ALIGN, 0, /* alignment, bounds */ + BUS_SPACE_MAXADDR, /* lowaddr */ + BUS_SPACE_MAXADDR, /* highaddr */ + NULL, NULL, /* filter, filterarg */ + size, /* maxsize */ + 1, /* nsegments */ + size, /* maxsegsize */ + BUS_DMA_ALLOCNOW, /* flags */ + NULL, /* lockfunc */ + NULL, /* lockfuncarg */ + &dma->dma_tag); + if (r != 0) { + device_printf(dev,"ixgbe_dma_malloc: bus_dma_tag_create failed; " + "error %u\n", r); + goto fail_0; + } + r = bus_dmamem_alloc(dma->dma_tag, (void **)&dma->dma_vaddr, + BUS_DMA_NOWAIT, &dma->dma_map); + if (r != 0) { + device_printf(dev,"ixgbe_dma_malloc: bus_dmamem_alloc failed; " + "error %u\n", r); + goto fail_1; + } + r = bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr, + size, + ixgbe_dmamap_cb, + &dma->dma_paddr, + mapflags | BUS_DMA_NOWAIT); + if (r != 0) { + device_printf(dev,"ixgbe_dma_malloc: bus_dmamap_load failed; " + "error %u\n", r); + goto fail_2; + } + dma->dma_size = size; + return (0); +fail_2: + bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map); +fail_1: + bus_dma_tag_destroy(dma->dma_tag); +fail_0: + dma->dma_tag = NULL; + return (r); +} + +void +ixgbe_dma_free(struct adapter *adapter, struct ixgbe_dma_alloc *dma) +{ + bus_dmamap_sync(dma->dma_tag, dma->dma_map, + BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); + bus_dmamap_unload(dma->dma_tag, dma->dma_map); + bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map); + bus_dma_tag_destroy(dma->dma_tag); +} + + +/********************************************************************* + * + * Allocate memory for the transmit and receive rings, and then + * the descriptors associated with each, called only once at attach. + * + **********************************************************************/ +int +ixgbe_allocate_queues(struct adapter *adapter) +{ + device_t dev = adapter->dev; + struct ix_queue *que; + struct tx_ring *txr; + struct rx_ring *rxr; + int rsize, tsize, error = IXGBE_SUCCESS; + int txconf = 0, rxconf = 0; + + /* First allocate the top level queue structs */ + if (!(adapter->queues = + (struct ix_queue *) malloc(sizeof(struct ix_queue) * + adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) { + device_printf(dev, "Unable to allocate queue memory\n"); + error = ENOMEM; + goto fail; + } + + /* First allocate the TX ring struct memory */ + if (!(adapter->tx_rings = + (struct tx_ring *) malloc(sizeof(struct tx_ring) * + adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) { + device_printf(dev, "Unable to allocate TX ring memory\n"); + error = ENOMEM; + goto tx_fail; + } + + /* Next allocate the RX */ + if (!(adapter->rx_rings = + (struct rx_ring *) malloc(sizeof(struct rx_ring) * + adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) { + device_printf(dev, "Unable to allocate RX ring memory\n"); + error = ENOMEM; + goto rx_fail; + } + + /* For the ring itself */ + tsize = roundup2(adapter->num_tx_desc * + sizeof(union ixgbe_adv_tx_desc), DBA_ALIGN); + + /* + * Now set up the TX queues, txconf is needed to handle the + * possibility that things fail midcourse and we need to + * undo memory gracefully + */ + for (int i = 0; i < adapter->num_queues; i++, txconf++) { + /* Set up some basics */ + txr = &adapter->tx_rings[i]; + txr->adapter = adapter; + txr->me = i; + txr->num_desc = adapter->num_tx_desc; + + /* Initialize the TX side lock */ + snprintf(txr->mtx_name, sizeof(txr->mtx_name), "%s:tx(%d)", + device_get_nameunit(dev), txr->me); + mtx_init(&txr->tx_mtx, txr->mtx_name, NULL, MTX_DEF); + + if (ixgbe_dma_malloc(adapter, tsize, + &txr->txdma, BUS_DMA_NOWAIT)) { + device_printf(dev, + "Unable to allocate TX Descriptor memory\n"); + error = ENOMEM; + goto err_tx_desc; + } + txr->tx_base = (union ixgbe_adv_tx_desc *)txr->txdma.dma_vaddr; + bzero((void *)txr->tx_base, tsize); + + /* Now allocate transmit buffers for the ring */ + if (ixgbe_allocate_transmit_buffers(txr)) { + device_printf(dev, + "Critical Failure setting up transmit buffers\n"); + error = ENOMEM; + goto err_tx_desc; + } +#ifndef IXGBE_LEGACY_TX + /* Allocate a buf ring */ + txr->br = buf_ring_alloc(IXGBE_BR_SIZE, M_DEVBUF, + M_WAITOK, &txr->tx_mtx); + if (txr->br == NULL) { + device_printf(dev, + "Critical Failure setting up buf ring\n"); + error = ENOMEM; + goto err_tx_desc; + } +#endif + } + + /* + * Next the RX queues... + */ + rsize = roundup2(adapter->num_rx_desc * + sizeof(union ixgbe_adv_rx_desc), DBA_ALIGN); + for (int i = 0; i < adapter->num_queues; i++, rxconf++) { + rxr = &adapter->rx_rings[i]; + /* Set up some basics */ + rxr->adapter = adapter; + rxr->me = i; + rxr->num_desc = adapter->num_rx_desc; + + /* Initialize the RX side lock */ + snprintf(rxr->mtx_name, sizeof(rxr->mtx_name), "%s:rx(%d)", + device_get_nameunit(dev), rxr->me); + mtx_init(&rxr->rx_mtx, rxr->mtx_name, NULL, MTX_DEF); + + if (ixgbe_dma_malloc(adapter, rsize, + &rxr->rxdma, BUS_DMA_NOWAIT)) { + device_printf(dev, + "Unable to allocate RxDescriptor memory\n"); + error = ENOMEM; + goto err_rx_desc; + } + rxr->rx_base = (union ixgbe_adv_rx_desc *)rxr->rxdma.dma_vaddr; + bzero((void *)rxr->rx_base, rsize); + + /* Allocate receive buffers for the ring*/ + if (ixgbe_allocate_receive_buffers(rxr)) { + device_printf(dev, + "Critical Failure setting up receive buffers\n"); + error = ENOMEM; + goto err_rx_desc; + } + } + + /* + ** Finally set up the queue holding structs + */ + for (int i = 0; i < adapter->num_queues; i++) { + que = &adapter->queues[i]; + que->adapter = adapter; + que->me = i; + que->txr = &adapter->tx_rings[i]; + que->rxr = &adapter->rx_rings[i]; + } + + return (0); + +err_rx_desc: + for (rxr = adapter->rx_rings; rxconf > 0; rxr++, rxconf--) + ixgbe_dma_free(adapter, &rxr->rxdma); +err_tx_desc: + for (txr = adapter->tx_rings; txconf > 0; txr++, txconf--) + ixgbe_dma_free(adapter, &txr->txdma); + free(adapter->rx_rings, M_DEVBUF); +rx_fail: + free(adapter->tx_rings, M_DEVBUF); +tx_fail: + free(adapter->queues, M_DEVBUF); +fail: + return (error); +} diff --git a/sys/dev/ixgbe/ixgbe.h b/sys/dev/ixgbe/ixgbe.h index 72290d1..0023517 100644 --- a/sys/dev/ixgbe/ixgbe.h +++ b/sys/dev/ixgbe/ixgbe.h @@ -1,6 +1,6 @@ /****************************************************************************** - Copyright (c) 2001-2013, Intel Corporation + Copyright (c) 2001-2015, Intel Corporation All rights reserved. Redistribution and use in source and binary forms, with or without @@ -88,8 +88,12 @@ #include <sys/pcpu.h> #include <sys/smp.h> #include <machine/smp.h> +#include <sys/sbuf.h> #include "ixgbe_api.h" +#include "ixgbe_common.h" +#include "ixgbe_phy.h" +#include "ixgbe_vf.h" /* Tunables */ @@ -143,7 +147,11 @@ #define IXGBE_TX_CLEANUP_THRESHOLD (adapter->num_tx_desc / 8) #define IXGBE_TX_OP_THRESHOLD (adapter->num_tx_desc / 32) -#define IXGBE_MAX_FRAME_SIZE 0x3F00 +/* These defines are used in MTU calculations */ +#define IXGBE_MAX_FRAME_SIZE 9728 +#define IXGBE_MTU_HDR (ETHER_HDR_LEN + ETHER_CRC_LEN + \ + ETHER_VLAN_ENCAP_LEN) +#define IXGBE_MAX_MTU (IXGBE_MAX_FRAME_SIZE - IXGBE_MTU_HDR) /* Flow control constants */ #define IXGBE_FC_PAUSE 0xFFFF @@ -195,9 +203,10 @@ #define IXGBE_VFTA_SIZE 128 #define IXGBE_BR_SIZE 4096 #define IXGBE_QUEUE_MIN_FREE 32 +#define IXGBE_MAX_TX_BUSY 10 +#define IXGBE_QUEUE_HUNG 0x80000000 -/* IOCTL define to gather SFP+ Diagnostic data */ -#define SIOCGI2C SIOCGIFGENERIC +#define IXV_EITR_DEFAULT 128 /* Offload bits in mbuf flag */ #if __FreeBSD_version >= 800000 @@ -206,6 +215,15 @@ #define CSUM_OFFLOAD (CSUM_IP|CSUM_TCP|CSUM_UDP) #endif +/* Backward compatibility items for very old versions */ +#ifndef pci_find_cap +#define pci_find_cap pci_find_extcap +#endif + +#ifndef DEVMETHOD_END +#define DEVMETHOD_END { NULL, NULL } +#endif + /* * Interrupt Moderation parameters */ @@ -214,6 +232,16 @@ #define IXGBE_BULK_LATENCY 1200 #define IXGBE_LINK_ITR 2000 +/* MAC type macros */ +#define IXGBE_IS_X550VF(_adapter) \ + ((_adapter->hw.mac.type == ixgbe_mac_X550_vf) || \ + (_adapter->hw.mac.type == ixgbe_mac_X550EM_x_vf)) + +#define IXGBE_IS_VF(_adapter) \ + (IXGBE_IS_X550VF(_adapter) || \ + (_adapter->hw.mac.type == ixgbe_mac_X540_vf) || \ + (_adapter->hw.mac.type == ixgbe_mac_82599_vf)) + /* ***************************************************************************** @@ -278,8 +306,10 @@ struct ix_queue { u32 msix; /* This queue's MSIX vector */ u32 eims; /* This queue's EIMS bit */ u32 eitr_setting; + u32 me; struct resource *res; void *tag; + int busy; struct tx_ring *txr; struct rx_ring *rxr; struct task que_task; @@ -294,7 +324,8 @@ struct tx_ring { struct adapter *adapter; struct mtx tx_mtx; u32 me; - int watchdog_time; + u32 tail; + int busy; union ixgbe_adv_tx_desc *tx_base; struct ixgbe_tx_buf *tx_buffers; struct ixgbe_dma_alloc txdma; @@ -303,11 +334,6 @@ struct tx_ring { u16 next_to_clean; u16 process_limit; u16 num_desc; - enum { - IXGBE_QUEUE_IDLE, - IXGBE_QUEUE_WORKING, - IXGBE_QUEUE_HUNG, - } queue_status; u32 txd_cmd; bus_dma_tag_t txtag; char mtx_name[16]; @@ -337,6 +363,7 @@ struct rx_ring { struct adapter *adapter; struct mtx rx_mtx; u32 me; + u32 tail; union ixgbe_adv_rx_desc *rx_base; struct ixgbe_dma_alloc rxdma; struct lro_ctrl lro; @@ -416,7 +443,14 @@ struct adapter { u16 num_segs; u32 link_speed; bool link_up; - u32 linkvec; + u32 vector; + u16 dmac; + bool eee_support; + bool eee_enabled; + + /* Power management-related */ + bool wol_support; + u32 wufc; /* Mbuf cluster size */ u32 rx_mbuf_sz; @@ -430,6 +464,7 @@ struct adapter { int fdir_reinit; struct task fdir_task; #endif + struct task phy_task; /* PHY intr tasklet */ struct taskqueue *tq; /* @@ -452,7 +487,7 @@ struct adapter { * Allocated at run time, an array of rings. */ struct rx_ring *rx_rings; - u64 que_mask; + u64 active_queues; u32 num_rx_desc; /* Multicast array memory */ @@ -466,8 +501,23 @@ struct adapter { unsigned long mbuf_packet_failed; unsigned long watchdog_events; unsigned long link_irq; - - struct ixgbe_hw_stats stats; + union { + struct ixgbe_hw_stats pf; + struct ixgbevf_hw_stats vf; + } stats; +#if __FreeBSD_version >= 1100036 + /* counter(9) stats */ + u64 ipackets; + u64 ierrors; + u64 opackets; + u64 oerrors; + u64 ibytes; + u64 obytes; + u64 imcasts; + u64 omcasts; + u64 iqdrops; + u64 noproto; +#endif }; @@ -498,6 +548,50 @@ struct adapter { #define PCIER_LINK_STA PCIR_EXPRESS_LINK_STA #endif +/* Stats macros */ +#if __FreeBSD_version >= 1100036 +#define IXGBE_SET_IPACKETS(sc, count) (sc)->ipackets = (count) +#define IXGBE_SET_IERRORS(sc, count) (sc)->ierrors = (count) +#define IXGBE_SET_OPACKETS(sc, count) (sc)->opackets = (count) +#define IXGBE_SET_OERRORS(sc, count) (sc)->oerrors = (count) +#define IXGBE_SET_COLLISIONS(sc, count) +#define IXGBE_SET_IBYTES(sc, count) (sc)->ibytes = (count) +#define IXGBE_SET_OBYTES(sc, count) (sc)->obytes = (count) +#define IXGBE_SET_IMCASTS(sc, count) (sc)->imcasts = (count) +#define IXGBE_SET_OMCASTS(sc, count) (sc)->omcasts = (count) +#define IXGBE_SET_IQDROPS(sc, count) (sc)->iqdrops = (count) +#else +#define IXGBE_SET_IPACKETS(sc, count) (sc)->ifp->if_ipackets = (count) +#define IXGBE_SET_IERRORS(sc, count) (sc)->ifp->if_ierrors = (count) +#define IXGBE_SET_OPACKETS(sc, count) (sc)->ifp->if_opackets = (count) +#define IXGBE_SET_OERRORS(sc, count) (sc)->ifp->if_oerrors = (count) +#define IXGBE_SET_COLLISIONS(sc, count) (sc)->ifp->if_collisions = (count) +#define IXGBE_SET_IBYTES(sc, count) (sc)->ifp->if_ibytes = (count) +#define IXGBE_SET_OBYTES(sc, count) (sc)->ifp->if_obytes = (count) +#define IXGBE_SET_IMCASTS(sc, count) (sc)->ifp->if_imcasts = (count) +#define IXGBE_SET_OMCASTS(sc, count) (sc)->ifp->if_omcasts = (count) +#define IXGBE_SET_IQDROPS(sc, count) (sc)->ifp->if_iqdrops = (count) +#endif + +/* External PHY register addresses */ +#define IXGBE_PHY_CURRENT_TEMP 0xC820 +#define IXGBE_PHY_OVERTEMP_STATUS 0xC830 + +/* Sysctl help messages; displayed with sysctl -d */ +#define IXGBE_SYSCTL_DESC_ADV_SPEED \ + "\nControl advertised link speed using these flags:\n" \ + "\t0x1 - advertise 100M\n" \ + "\t0x2 - advertise 1G\n" \ + "\t0x4 - advertise 10G\n\n" \ + "\t100M is only supported on certain 10GBaseT adapters.\n" + +#define IXGBE_SYSCTL_DESC_SET_FC \ + "\nSet flow control mode using these values:\n" \ + "\t0 - off\n" \ + "\t1 - rx pause\n" \ + "\t2 - tx pause\n" \ + "\t3 - tx and rx pause" + static inline bool ixgbe_is_sfp(struct ixgbe_hw *hw) { @@ -508,6 +602,10 @@ ixgbe_is_sfp(struct ixgbe_hw *hw) case ixgbe_phy_sfp_unknown: case ixgbe_phy_sfp_passive_tyco: case ixgbe_phy_sfp_passive_unknown: + case ixgbe_phy_qsfp_passive_unknown: + case ixgbe_phy_qsfp_active_unknown: + case ixgbe_phy_qsfp_intel: + case ixgbe_phy_qsfp_unknown: return TRUE; default: return FALSE; @@ -540,4 +638,44 @@ ixgbe_rx_unrefreshed(struct rx_ring *rxr) rxr->next_to_refresh - 1); } +/* +** This checks for a zero mac addr, something that will be likely +** unless the Admin on the Host has created one. +*/ +static inline bool +ixv_check_ether_addr(u8 *addr) +{ + bool status = TRUE; + + if ((addr[0] == 0 && addr[1]== 0 && addr[2] == 0 && + addr[3] == 0 && addr[4]== 0 && addr[5] == 0)) + status = FALSE; + return (status); +} + +/* Shared Prototypes */ + +#ifdef IXGBE_LEGACY_TX +void ixgbe_start(struct ifnet *); +void ixgbe_start_locked(struct tx_ring *, struct ifnet *); +#else /* ! IXGBE_LEGACY_TX */ +int ixgbe_mq_start(struct ifnet *, struct mbuf *); +int ixgbe_mq_start_locked(struct ifnet *, struct tx_ring *); +void ixgbe_qflush(struct ifnet *); +void ixgbe_deferred_mq_start(void *, int); +#endif /* IXGBE_LEGACY_TX */ + +int ixgbe_allocate_queues(struct adapter *); +int ixgbe_allocate_transmit_buffers(struct tx_ring *); +int ixgbe_setup_transmit_structures(struct adapter *); +void ixgbe_free_transmit_structures(struct adapter *); +int ixgbe_allocate_receive_buffers(struct rx_ring *); +int ixgbe_setup_receive_structures(struct adapter *); +void ixgbe_free_receive_structures(struct adapter *); +void ixgbe_txeof(struct tx_ring *); +bool ixgbe_rxeof(struct ix_queue *); + +int ixgbe_dma_malloc(struct adapter *, + bus_size_t, struct ixgbe_dma_alloc *, int); +void ixgbe_dma_free(struct adapter *, struct ixgbe_dma_alloc *); #endif /* _IXGBE_H_ */ diff --git a/sys/dev/ixgbe/ixgbe_82598.c b/sys/dev/ixgbe/ixgbe_82598.c index e32f270..46e64c5 100644 --- a/sys/dev/ixgbe/ixgbe_82598.c +++ b/sys/dev/ixgbe/ixgbe_82598.c @@ -1,6 +1,6 @@ /****************************************************************************** - Copyright (c) 2001-2013, Intel Corporation + Copyright (c) 2001-2015, Intel Corporation All rights reserved. Redistribution and use in source and binary forms, with or without @@ -38,6 +38,13 @@ #include "ixgbe_common.h" #include "ixgbe_phy.h" +#define IXGBE_82598_MAX_TX_QUEUES 32 +#define IXGBE_82598_MAX_RX_QUEUES 64 +#define IXGBE_82598_RAR_ENTRIES 16 +#define IXGBE_82598_MC_TBL_SIZE 128 +#define IXGBE_82598_VFT_TBL_SIZE 128 +#define IXGBE_82598_RX_PB_SIZE 512 + static s32 ixgbe_get_link_capabilities_82598(struct ixgbe_hw *hw, ixgbe_link_speed *speed, bool *autoneg); @@ -121,47 +128,48 @@ s32 ixgbe_init_ops_82598(struct ixgbe_hw *hw) ret_val = ixgbe_init_ops_generic(hw); /* PHY */ - phy->ops.init = &ixgbe_init_phy_ops_82598; + phy->ops.init = ixgbe_init_phy_ops_82598; /* MAC */ - mac->ops.start_hw = &ixgbe_start_hw_82598; - mac->ops.enable_relaxed_ordering = &ixgbe_enable_relaxed_ordering_82598; - mac->ops.reset_hw = &ixgbe_reset_hw_82598; - mac->ops.get_media_type = &ixgbe_get_media_type_82598; + mac->ops.start_hw = ixgbe_start_hw_82598; + mac->ops.enable_relaxed_ordering = ixgbe_enable_relaxed_ordering_82598; + mac->ops.reset_hw = ixgbe_reset_hw_82598; + mac->ops.get_media_type = ixgbe_get_media_type_82598; mac->ops.get_supported_physical_layer = - &ixgbe_get_supported_physical_layer_82598; - mac->ops.read_analog_reg8 = &ixgbe_read_analog_reg8_82598; - mac->ops.write_analog_reg8 = &ixgbe_write_analog_reg8_82598; - mac->ops.set_lan_id = &ixgbe_set_lan_id_multi_port_pcie_82598; + ixgbe_get_supported_physical_layer_82598; + mac->ops.read_analog_reg8 = ixgbe_read_analog_reg8_82598; + mac->ops.write_analog_reg8 = ixgbe_write_analog_reg8_82598; + mac->ops.set_lan_id = ixgbe_set_lan_id_multi_port_pcie_82598; + mac->ops.enable_rx_dma = ixgbe_enable_rx_dma_82598; /* RAR, Multicast, VLAN */ - mac->ops.set_vmdq = &ixgbe_set_vmdq_82598; - mac->ops.clear_vmdq = &ixgbe_clear_vmdq_82598; - mac->ops.set_vfta = &ixgbe_set_vfta_82598; + mac->ops.set_vmdq = ixgbe_set_vmdq_82598; + mac->ops.clear_vmdq = ixgbe_clear_vmdq_82598; + mac->ops.set_vfta = ixgbe_set_vfta_82598; mac->ops.set_vlvf = NULL; - mac->ops.clear_vfta = &ixgbe_clear_vfta_82598; + mac->ops.clear_vfta = ixgbe_clear_vfta_82598; /* Flow Control */ - mac->ops.fc_enable = &ixgbe_fc_enable_82598; - - mac->mcft_size = 128; - mac->vft_size = 128; - mac->num_rar_entries = 16; - mac->rx_pb_size = 512; - mac->max_tx_queues = 32; - mac->max_rx_queues = 64; + mac->ops.fc_enable = ixgbe_fc_enable_82598; + + mac->mcft_size = IXGBE_82598_MC_TBL_SIZE; + mac->vft_size = IXGBE_82598_VFT_TBL_SIZE; + mac->num_rar_entries = IXGBE_82598_RAR_ENTRIES; + mac->rx_pb_size = IXGBE_82598_RX_PB_SIZE; + mac->max_rx_queues = IXGBE_82598_MAX_RX_QUEUES; + mac->max_tx_queues = IXGBE_82598_MAX_TX_QUEUES; mac->max_msix_vectors = ixgbe_get_pcie_msix_count_generic(hw); /* SFP+ Module */ - phy->ops.read_i2c_eeprom = &ixgbe_read_i2c_eeprom_82598; - phy->ops.read_i2c_sff8472 = &ixgbe_read_i2c_sff8472_82598; + phy->ops.read_i2c_eeprom = ixgbe_read_i2c_eeprom_82598; + phy->ops.read_i2c_sff8472 = ixgbe_read_i2c_sff8472_82598; /* Link */ - mac->ops.check_link = &ixgbe_check_mac_link_82598; - mac->ops.setup_link = &ixgbe_setup_mac_link_82598; + mac->ops.check_link = ixgbe_check_mac_link_82598; + mac->ops.setup_link = ixgbe_setup_mac_link_82598; mac->ops.flap_tx_laser = NULL; - mac->ops.get_link_capabilities = &ixgbe_get_link_capabilities_82598; - mac->ops.setup_rxpba = &ixgbe_set_rxpba_82598; + mac->ops.get_link_capabilities = ixgbe_get_link_capabilities_82598; + mac->ops.setup_rxpba = ixgbe_set_rxpba_82598; /* Manageability interface */ mac->ops.set_fw_drv_ver = NULL; @@ -194,20 +202,20 @@ s32 ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw) /* Overwrite the link function pointers if copper PHY */ if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) { - mac->ops.setup_link = &ixgbe_setup_copper_link_82598; + mac->ops.setup_link = ixgbe_setup_copper_link_82598; mac->ops.get_link_capabilities = - &ixgbe_get_copper_link_capabilities_generic; + ixgbe_get_copper_link_capabilities_generic; } switch (hw->phy.type) { case ixgbe_phy_tn: - phy->ops.setup_link = &ixgbe_setup_phy_link_tnx; - phy->ops.check_link = &ixgbe_check_phy_link_tnx; + phy->ops.setup_link = ixgbe_setup_phy_link_tnx; + phy->ops.check_link = ixgbe_check_phy_link_tnx; phy->ops.get_firmware_version = - &ixgbe_get_phy_firmware_version_tnx; + ixgbe_get_phy_firmware_version_tnx; break; case ixgbe_phy_nl: - phy->ops.reset = &ixgbe_reset_phy_nl; + phy->ops.reset = ixgbe_reset_phy_nl; /* Call SFP+ identify routine to get the SFP+ module type */ ret_val = phy->ops.identify_sfp(hw); @@ -252,6 +260,8 @@ s32 ixgbe_start_hw_82598(struct ixgbe_hw *hw) DEBUGFUNC("ixgbe_start_hw_82598"); ret_val = ixgbe_start_hw_generic(hw); + if (ret_val) + return ret_val; /* Disable relaxed ordering */ for (i = 0; ((i < hw->mac.max_tx_queues) && @@ -270,8 +280,7 @@ s32 ixgbe_start_hw_82598(struct ixgbe_hw *hw) } /* set the completion timeout for interface */ - if (ret_val == IXGBE_SUCCESS) - ixgbe_set_pcie_completion_timeout(hw); + ixgbe_set_pcie_completion_timeout(hw); return ret_val; } @@ -1409,6 +1418,20 @@ static void ixgbe_set_rxpba_82598(struct ixgbe_hw *hw, int num_pb, /* Setup Tx packet buffer sizes */ for (i = 0; i < IXGBE_MAX_PACKET_BUFFERS; i++) IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), IXGBE_TXPBSIZE_40KB); +} + +/** + * ixgbe_enable_rx_dma_82598 - Enable the Rx DMA unit + * @hw: pointer to hardware structure + * @regval: register value to write to RXCTRL + * + * Enables the Rx DMA unit + **/ +s32 ixgbe_enable_rx_dma_82598(struct ixgbe_hw *hw, u32 regval) +{ + DEBUGFUNC("ixgbe_enable_rx_dma_82598"); - return; + IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, regval); + + return IXGBE_SUCCESS; } diff --git a/sys/dev/ixgbe/ixgbe_82598.h b/sys/dev/ixgbe/ixgbe_82598.h index a195b15..d2241c7 100644 --- a/sys/dev/ixgbe/ixgbe_82598.h +++ b/sys/dev/ixgbe/ixgbe_82598.h @@ -1,6 +1,6 @@ /****************************************************************************** - Copyright (c) 2001-2012, Intel Corporation + Copyright (c) 2001-2015, Intel Corporation All rights reserved. Redistribution and use in source and binary forms, with or without @@ -49,4 +49,5 @@ u32 ixgbe_get_supported_physical_layer_82598(struct ixgbe_hw *hw); s32 ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw); void ixgbe_set_lan_id_multi_port_pcie_82598(struct ixgbe_hw *hw); void ixgbe_set_pcie_completion_timeout(struct ixgbe_hw *hw); +s32 ixgbe_enable_rx_dma_82598(struct ixgbe_hw *hw, u32 regval); #endif /* _IXGBE_82598_H_ */ diff --git a/sys/dev/ixgbe/ixgbe_82599.c b/sys/dev/ixgbe/ixgbe_82599.c index 3cc8cd7..b38620f 100644 --- a/sys/dev/ixgbe/ixgbe_82599.c +++ b/sys/dev/ixgbe/ixgbe_82599.c @@ -1,6 +1,6 @@ /****************************************************************************** - Copyright (c) 2001-2013, Intel Corporation + Copyright (c) 2001-2015, Intel Corporation All rights reserved. Redistribution and use in source and binary forms, with or without @@ -38,6 +38,13 @@ #include "ixgbe_common.h" #include "ixgbe_phy.h" +#define IXGBE_82599_MAX_TX_QUEUES 128 +#define IXGBE_82599_MAX_RX_QUEUES 128 +#define IXGBE_82599_RAR_ENTRIES 128 +#define IXGBE_82599_MC_TBL_SIZE 128 +#define IXGBE_82599_VFT_TBL_SIZE 128 +#define IXGBE_82599_RX_PB_SIZE 512 + static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw, ixgbe_link_speed speed, bool autoneg_wait_to_complete); @@ -46,25 +53,10 @@ static s32 ixgbe_read_eeprom_82599(struct ixgbe_hw *hw, u16 offset, u16 *data); static s32 ixgbe_read_eeprom_buffer_82599(struct ixgbe_hw *hw, u16 offset, u16 words, u16 *data); - -static bool ixgbe_mng_enabled(struct ixgbe_hw *hw) -{ - u32 fwsm, manc, factps; - - fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM); - if ((fwsm & IXGBE_FWSM_MODE_MASK) != IXGBE_FWSM_FW_MODE_PT) - return FALSE; - - manc = IXGBE_READ_REG(hw, IXGBE_MANC); - if (!(manc & IXGBE_MANC_RCV_TCO_EN)) - return FALSE; - - factps = IXGBE_READ_REG(hw, IXGBE_FACTPS); - if (factps & IXGBE_FACTPS_MNGCG) - return FALSE; - - return TRUE; -} +static s32 ixgbe_read_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset, + u8 dev_addr, u8 *data); +static s32 ixgbe_write_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset, + u8 dev_addr, u8 data); void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw) { @@ -77,12 +69,12 @@ void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw) * and MNG not enabled */ if ((mac->ops.get_media_type(hw) == ixgbe_media_type_fiber) && - !hw->mng_fw_enabled) { + !ixgbe_mng_enabled(hw)) { mac->ops.disable_tx_laser = - &ixgbe_disable_tx_laser_multispeed_fiber; + ixgbe_disable_tx_laser_multispeed_fiber; mac->ops.enable_tx_laser = - &ixgbe_enable_tx_laser_multispeed_fiber; - mac->ops.flap_tx_laser = &ixgbe_flap_tx_laser_multispeed_fiber; + ixgbe_enable_tx_laser_multispeed_fiber; + mac->ops.flap_tx_laser = ixgbe_flap_tx_laser_multispeed_fiber; } else { mac->ops.disable_tx_laser = NULL; @@ -92,15 +84,21 @@ void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw) if (hw->phy.multispeed_fiber) { /* Set up dual speed SFP+ support */ - mac->ops.setup_link = &ixgbe_setup_mac_link_multispeed_fiber; + mac->ops.setup_link = ixgbe_setup_mac_link_multispeed_fiber; + mac->ops.setup_mac_link = ixgbe_setup_mac_link_82599; + mac->ops.set_rate_select_speed = + ixgbe_set_hard_rate_select_speed; + if (ixgbe_get_media_type(hw) == ixgbe_media_type_fiber_fixed) + mac->ops.set_rate_select_speed = + ixgbe_set_soft_rate_select_speed; } else { if ((ixgbe_get_media_type(hw) == ixgbe_media_type_backplane) && (hw->phy.smart_speed == ixgbe_smart_speed_auto || hw->phy.smart_speed == ixgbe_smart_speed_on) && !ixgbe_verify_lesm_fw_enabled_82599(hw)) { - mac->ops.setup_link = &ixgbe_setup_mac_link_smartspeed; + mac->ops.setup_link = ixgbe_setup_mac_link_smartspeed; } else { - mac->ops.setup_link = &ixgbe_setup_mac_link_82599; + mac->ops.setup_link = ixgbe_setup_mac_link_82599; } } } @@ -119,9 +117,27 @@ s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw) struct ixgbe_mac_info *mac = &hw->mac; struct ixgbe_phy_info *phy = &hw->phy; s32 ret_val = IXGBE_SUCCESS; + u32 esdp; DEBUGFUNC("ixgbe_init_phy_ops_82599"); + if (hw->device_id == IXGBE_DEV_ID_82599_QSFP_SF_QP) { + /* Store flag indicating I2C bus access control unit. */ + hw->phy.qsfp_shared_i2c_bus = TRUE; + + /* Initialize access to QSFP+ I2C bus */ + esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); + esdp |= IXGBE_ESDP_SDP0_DIR; + esdp &= ~IXGBE_ESDP_SDP1_DIR; + esdp &= ~IXGBE_ESDP_SDP0; + esdp &= ~IXGBE_ESDP_SDP0_NATIVE; + esdp &= ~IXGBE_ESDP_SDP1_NATIVE; + IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp); + IXGBE_WRITE_FLUSH(hw); + + phy->ops.read_i2c_byte = ixgbe_read_i2c_byte_82599; + phy->ops.write_i2c_byte = ixgbe_write_i2c_byte_82599; + } /* Identify the PHY or SFP module */ ret_val = phy->ops.identify(hw); if (ret_val == IXGBE_ERR_SFP_NOT_SUPPORTED) @@ -134,18 +150,18 @@ s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw) /* If copper media, overwrite with copper function pointers */ if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) { - mac->ops.setup_link = &ixgbe_setup_copper_link_82599; + mac->ops.setup_link = ixgbe_setup_copper_link_82599; mac->ops.get_link_capabilities = - &ixgbe_get_copper_link_capabilities_generic; + ixgbe_get_copper_link_capabilities_generic; } - /* Set necessary function pointers based on phy type */ + /* Set necessary function pointers based on PHY type */ switch (hw->phy.type) { case ixgbe_phy_tn: - phy->ops.setup_link = &ixgbe_setup_phy_link_tnx; - phy->ops.check_link = &ixgbe_check_phy_link_tnx; + phy->ops.setup_link = ixgbe_setup_phy_link_tnx; + phy->ops.check_link = ixgbe_check_phy_link_tnx; phy->ops.get_firmware_version = - &ixgbe_get_phy_firmware_version_tnx; + ixgbe_get_phy_firmware_version_tnx; break; default: break; @@ -158,7 +174,6 @@ s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw) { s32 ret_val = IXGBE_SUCCESS; u16 list_offset, data_offset, data_value; - bool got_lock = FALSE; DEBUGFUNC("ixgbe_setup_sfp_modules_82599"); @@ -191,35 +206,15 @@ s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw) /* Release the semaphore */ hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM); - /* Delay obtaining semaphore again to allow FW access */ - msec_delay(hw->eeprom.semaphore_delay); - - /* Need SW/FW semaphore around AUTOC writes if LESM on, - * likewise reset_pipeline requires lock as it also writes - * AUTOC. + /* Delay obtaining semaphore again to allow FW access + * prot_autoc_write uses the semaphore too. */ - if (ixgbe_verify_lesm_fw_enabled_82599(hw)) { - ret_val = hw->mac.ops.acquire_swfw_sync(hw, - IXGBE_GSSR_MAC_CSR_SM); - if (ret_val != IXGBE_SUCCESS) { - ret_val = IXGBE_ERR_SWFW_SYNC; - goto setup_sfp_out; - } - - got_lock = TRUE; - } + msec_delay(hw->eeprom.semaphore_delay); /* Restart DSP and set SFI mode */ - IXGBE_WRITE_REG(hw, IXGBE_AUTOC, ((hw->mac.orig_autoc) | - IXGBE_AUTOC_LMS_10G_SERIAL)); - hw->mac.cached_autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); - ret_val = ixgbe_reset_pipeline_82599(hw); - - if (got_lock) { - hw->mac.ops.release_swfw_sync(hw, - IXGBE_GSSR_MAC_CSR_SM); - got_lock = FALSE; - } + ret_val = hw->mac.ops.prot_autoc_write(hw, + hw->mac.orig_autoc | IXGBE_AUTOC_LMS_10G_SERIAL, + FALSE); if (ret_val) { DEBUGOUT("sfp module setup not complete\n"); @@ -243,6 +238,79 @@ setup_sfp_err: } /** + * prot_autoc_read_82599 - Hides MAC differences needed for AUTOC read + * @hw: pointer to hardware structure + * @locked: Return the if we locked for this read. + * @reg_val: Value we read from AUTOC + * + * For this part (82599) we need to wrap read-modify-writes with a possible + * FW/SW lock. It is assumed this lock will be freed with the next + * prot_autoc_write_82599(). + */ +s32 prot_autoc_read_82599(struct ixgbe_hw *hw, bool *locked, u32 *reg_val) +{ + s32 ret_val; + + *locked = FALSE; + /* If LESM is on then we need to hold the SW/FW semaphore. */ + if (ixgbe_verify_lesm_fw_enabled_82599(hw)) { + ret_val = hw->mac.ops.acquire_swfw_sync(hw, + IXGBE_GSSR_MAC_CSR_SM); + if (ret_val != IXGBE_SUCCESS) + return IXGBE_ERR_SWFW_SYNC; + + *locked = TRUE; + } + + *reg_val = IXGBE_READ_REG(hw, IXGBE_AUTOC); + return IXGBE_SUCCESS; +} + +/** + * prot_autoc_write_82599 - Hides MAC differences needed for AUTOC write + * @hw: pointer to hardware structure + * @reg_val: value to write to AUTOC + * @locked: bool to indicate whether the SW/FW lock was already taken by + * previous proc_autoc_read_82599. + * + * This part (82599) may need to hold the SW/FW lock around all writes to + * AUTOC. Likewise after a write we need to do a pipeline reset. + */ +s32 prot_autoc_write_82599(struct ixgbe_hw *hw, u32 autoc, bool locked) +{ + s32 ret_val = IXGBE_SUCCESS; + + /* Blocked by MNG FW so bail */ + if (ixgbe_check_reset_blocked(hw)) + goto out; + + /* We only need to get the lock if: + * - We didn't do it already (in the read part of a read-modify-write) + * - LESM is enabled. + */ + if (!locked && ixgbe_verify_lesm_fw_enabled_82599(hw)) { + ret_val = hw->mac.ops.acquire_swfw_sync(hw, + IXGBE_GSSR_MAC_CSR_SM); + if (ret_val != IXGBE_SUCCESS) + return IXGBE_ERR_SWFW_SYNC; + + locked = TRUE; + } + + IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc); + ret_val = ixgbe_reset_pipeline_82599(hw); + +out: + /* Free the SW/FW semaphore as we either grabbed it here or + * already had it when this function was called. + */ + if (locked) + hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM); + + return ret_val; +} + +/** * ixgbe_init_ops_82599 - Inits func ptrs and MAC type * @hw: pointer to hardware structure * @@ -263,53 +331,55 @@ s32 ixgbe_init_ops_82599(struct ixgbe_hw *hw) ret_val = ixgbe_init_ops_generic(hw); /* PHY */ - phy->ops.identify = &ixgbe_identify_phy_82599; - phy->ops.init = &ixgbe_init_phy_ops_82599; + phy->ops.identify = ixgbe_identify_phy_82599; + phy->ops.init = ixgbe_init_phy_ops_82599; /* MAC */ - mac->ops.reset_hw = &ixgbe_reset_hw_82599; - mac->ops.enable_relaxed_ordering = &ixgbe_enable_relaxed_ordering_gen2; - mac->ops.get_media_type = &ixgbe_get_media_type_82599; + mac->ops.reset_hw = ixgbe_reset_hw_82599; + mac->ops.enable_relaxed_ordering = ixgbe_enable_relaxed_ordering_gen2; + mac->ops.get_media_type = ixgbe_get_media_type_82599; mac->ops.get_supported_physical_layer = - &ixgbe_get_supported_physical_layer_82599; - mac->ops.disable_sec_rx_path = &ixgbe_disable_sec_rx_path_generic; - mac->ops.enable_sec_rx_path = &ixgbe_enable_sec_rx_path_generic; - mac->ops.enable_rx_dma = &ixgbe_enable_rx_dma_82599; - mac->ops.read_analog_reg8 = &ixgbe_read_analog_reg8_82599; - mac->ops.write_analog_reg8 = &ixgbe_write_analog_reg8_82599; - mac->ops.start_hw = &ixgbe_start_hw_82599; - mac->ops.get_san_mac_addr = &ixgbe_get_san_mac_addr_generic; - mac->ops.set_san_mac_addr = &ixgbe_set_san_mac_addr_generic; - mac->ops.get_device_caps = &ixgbe_get_device_caps_generic; - mac->ops.get_wwn_prefix = &ixgbe_get_wwn_prefix_generic; - mac->ops.get_fcoe_boot_status = &ixgbe_get_fcoe_boot_status_generic; + ixgbe_get_supported_physical_layer_82599; + mac->ops.disable_sec_rx_path = ixgbe_disable_sec_rx_path_generic; + mac->ops.enable_sec_rx_path = ixgbe_enable_sec_rx_path_generic; + mac->ops.enable_rx_dma = ixgbe_enable_rx_dma_82599; + mac->ops.read_analog_reg8 = ixgbe_read_analog_reg8_82599; + mac->ops.write_analog_reg8 = ixgbe_write_analog_reg8_82599; + mac->ops.start_hw = ixgbe_start_hw_82599; + mac->ops.get_san_mac_addr = ixgbe_get_san_mac_addr_generic; + mac->ops.set_san_mac_addr = ixgbe_set_san_mac_addr_generic; + mac->ops.get_device_caps = ixgbe_get_device_caps_generic; + mac->ops.get_wwn_prefix = ixgbe_get_wwn_prefix_generic; + mac->ops.get_fcoe_boot_status = ixgbe_get_fcoe_boot_status_generic; + mac->ops.prot_autoc_read = prot_autoc_read_82599; + mac->ops.prot_autoc_write = prot_autoc_write_82599; /* RAR, Multicast, VLAN */ - mac->ops.set_vmdq = &ixgbe_set_vmdq_generic; - mac->ops.set_vmdq_san_mac = &ixgbe_set_vmdq_san_mac_generic; - mac->ops.clear_vmdq = &ixgbe_clear_vmdq_generic; - mac->ops.insert_mac_addr = &ixgbe_insert_mac_addr_generic; + mac->ops.set_vmdq = ixgbe_set_vmdq_generic; + mac->ops.set_vmdq_san_mac = ixgbe_set_vmdq_san_mac_generic; + mac->ops.clear_vmdq = ixgbe_clear_vmdq_generic; + mac->ops.insert_mac_addr = ixgbe_insert_mac_addr_generic; mac->rar_highwater = 1; - mac->ops.set_vfta = &ixgbe_set_vfta_generic; - mac->ops.set_vlvf = &ixgbe_set_vlvf_generic; - mac->ops.clear_vfta = &ixgbe_clear_vfta_generic; - mac->ops.init_uta_tables = &ixgbe_init_uta_tables_generic; - mac->ops.setup_sfp = &ixgbe_setup_sfp_modules_82599; - mac->ops.set_mac_anti_spoofing = &ixgbe_set_mac_anti_spoofing; - mac->ops.set_vlan_anti_spoofing = &ixgbe_set_vlan_anti_spoofing; + mac->ops.set_vfta = ixgbe_set_vfta_generic; + mac->ops.set_vlvf = ixgbe_set_vlvf_generic; + mac->ops.clear_vfta = ixgbe_clear_vfta_generic; + mac->ops.init_uta_tables = ixgbe_init_uta_tables_generic; + mac->ops.setup_sfp = ixgbe_setup_sfp_modules_82599; + mac->ops.set_mac_anti_spoofing = ixgbe_set_mac_anti_spoofing; + mac->ops.set_vlan_anti_spoofing = ixgbe_set_vlan_anti_spoofing; /* Link */ - mac->ops.get_link_capabilities = &ixgbe_get_link_capabilities_82599; - mac->ops.check_link = &ixgbe_check_mac_link_generic; - mac->ops.setup_rxpba = &ixgbe_set_rxpba_generic; + mac->ops.get_link_capabilities = ixgbe_get_link_capabilities_82599; + mac->ops.check_link = ixgbe_check_mac_link_generic; + mac->ops.setup_rxpba = ixgbe_set_rxpba_generic; ixgbe_init_mac_link_ops_82599(hw); - mac->mcft_size = 128; - mac->vft_size = 128; - mac->num_rar_entries = 128; - mac->rx_pb_size = 512; - mac->max_tx_queues = 128; - mac->max_rx_queues = 128; + mac->mcft_size = IXGBE_82599_MC_TBL_SIZE; + mac->vft_size = IXGBE_82599_VFT_TBL_SIZE; + mac->num_rar_entries = IXGBE_82599_RAR_ENTRIES; + mac->rx_pb_size = IXGBE_82599_RX_PB_SIZE; + mac->max_rx_queues = IXGBE_82599_MAX_RX_QUEUES; + mac->max_tx_queues = IXGBE_82599_MAX_TX_QUEUES; mac->max_msix_vectors = ixgbe_get_pcie_msix_count_generic(hw); mac->arc_subsystem_valid = (IXGBE_READ_REG(hw, IXGBE_FWSM) & @@ -318,17 +388,14 @@ s32 ixgbe_init_ops_82599(struct ixgbe_hw *hw) hw->mbx.ops.init_params = ixgbe_init_mbx_params_pf; /* EEPROM */ - eeprom->ops.read = &ixgbe_read_eeprom_82599; - eeprom->ops.read_buffer = &ixgbe_read_eeprom_buffer_82599; + eeprom->ops.read = ixgbe_read_eeprom_82599; + eeprom->ops.read_buffer = ixgbe_read_eeprom_buffer_82599; /* Manageability interface */ - mac->ops.set_fw_drv_ver = &ixgbe_set_fw_drv_ver_generic; - + mac->ops.set_fw_drv_ver = ixgbe_set_fw_drv_ver_generic; - mac->ops.get_rtrup2tc = &ixgbe_dcb_get_rtrup2tc_generic; - /* Cache if MNG FW is up */ - hw->mng_fw_enabled = ixgbe_mng_enabled(hw); + mac->ops.get_rtrup2tc = ixgbe_dcb_get_rtrup2tc_generic; return ret_val; } @@ -354,6 +421,8 @@ s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw, /* Check if 1G SFP module. */ if (hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0 || hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1 || + hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core0 || + hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core1 || hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 || hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1) { *speed = IXGBE_LINK_SPEED_1GB_FULL; @@ -429,7 +498,14 @@ s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw, if (hw->phy.multispeed_fiber) { *speed |= IXGBE_LINK_SPEED_10GB_FULL | IXGBE_LINK_SPEED_1GB_FULL; - *autoneg = TRUE; + + /* QSFP must not enable full auto-negotiation + * Limited autoneg is enabled at 1G + */ + if (hw->phy.media_type == ixgbe_media_type_fiber_qsfp) + *autoneg = FALSE; + else + *autoneg = TRUE; } out: @@ -482,6 +558,9 @@ enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw) case IXGBE_DEV_ID_82599_T3_LOM: media_type = ixgbe_media_type_copper; break; + case IXGBE_DEV_ID_82599_QSFP_SF_QP: + media_type = ixgbe_media_type_fiber_qsfp; + break; case IXGBE_DEV_ID_82599_BYPASS: media_type = ixgbe_media_type_fiber_fixed; hw->phy.multispeed_fiber = TRUE; @@ -509,8 +588,8 @@ void ixgbe_stop_mac_link_on_d3_82599(struct ixgbe_hw *hw) DEBUGFUNC("ixgbe_stop_mac_link_on_d3_82599"); ixgbe_read_eeprom(hw, IXGBE_EEPROM_CTRL_2, &ee_ctrl_2); - if (!hw->mng_fw_enabled && !hw->wol_enabled && - ee_ctrl_2 & IXGBE_EEPROM_CCD_BIT) { + if (!ixgbe_mng_present(hw) && !hw->wol_enabled && + ee_ctrl_2 & IXGBE_EEPROM_CCD_BIT) { autoc2_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC2); autoc2_reg |= IXGBE_AUTOC2_LINK_DISABLE_ON_D3_MASK; IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2_reg); @@ -597,7 +676,11 @@ void ixgbe_disable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw) { u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP); - /* Disable tx laser; allow 100us to go dark per spec */ + /* Blocked by MNG FW so bail */ + if (ixgbe_check_reset_blocked(hw)) + return; + + /* Disable Tx laser; allow 100us to go dark per spec */ esdp_reg |= IXGBE_ESDP_SDP3; IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg); IXGBE_WRITE_FLUSH(hw); @@ -616,7 +699,7 @@ void ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw) { u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP); - /* Enable tx laser; allow 100ms to light up */ + /* Enable Tx laser; allow 100ms to light up */ esdp_reg &= ~IXGBE_ESDP_SDP3; IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg); IXGBE_WRITE_FLUSH(hw); @@ -630,7 +713,7 @@ void ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw) * When the driver changes the link speeds that it can support, * it sets autotry_restart to TRUE to indicate that we need to * initiate a new autotry session with the link partner. To do - * so, we set the speed then disable and re-enable the tx laser, to + * so, we set the speed then disable and re-enable the Tx laser, to * alert the link partner that it also needs to restart autotry on its * end. This is consistent with TRUE clause 37 autoneg, which also * involves a loss of signal. @@ -639,6 +722,10 @@ void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw) { DEBUGFUNC("ixgbe_flap_tx_laser_multispeed_fiber"); + /* Blocked by MNG FW so bail */ + if (ixgbe_check_reset_blocked(hw)) + return; + if (hw->mac.autotry_restart) { ixgbe_disable_tx_laser_multispeed_fiber(hw); ixgbe_enable_tx_laser_multispeed_fiber(hw); @@ -647,229 +734,32 @@ void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw) } /** - * ixgbe_set_fiber_fixed_speed - Set module link speed for fixed fiber + * ixgbe_set_hard_rate_select_speed - Set module link speed * @hw: pointer to hardware structure * @speed: link speed to set * - * We set the module speed differently for fixed fiber. For other - * multi-speed devices we don't have an error value so here if we - * detect an error we just log it and exit. + * Set module link speed via RS0/RS1 rate select pins. */ -static void ixgbe_set_fiber_fixed_speed(struct ixgbe_hw *hw, +void ixgbe_set_hard_rate_select_speed(struct ixgbe_hw *hw, ixgbe_link_speed speed) { - s32 status; - u8 rs, eeprom_data; + u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP); switch (speed) { case IXGBE_LINK_SPEED_10GB_FULL: - /* one bit mask same as setting on */ - rs = IXGBE_SFF_SOFT_RS_SELECT_10G; + esdp_reg |= (IXGBE_ESDP_SDP5_DIR | IXGBE_ESDP_SDP5); break; case IXGBE_LINK_SPEED_1GB_FULL: - rs = IXGBE_SFF_SOFT_RS_SELECT_1G; + esdp_reg &= ~IXGBE_ESDP_SDP5; + esdp_reg |= IXGBE_ESDP_SDP5_DIR; break; default: DEBUGOUT("Invalid fixed module speed\n"); return; } - /* Set RS0 */ - status = hw->phy.ops.read_i2c_byte(hw, IXGBE_SFF_SFF_8472_OSCB, - IXGBE_I2C_EEPROM_DEV_ADDR2, - &eeprom_data); - if (status) { - DEBUGOUT("Failed to read Rx Rate Select RS0\n"); - goto out; - } - - eeprom_data = (eeprom_data & ~IXGBE_SFF_SOFT_RS_SELECT_MASK) & rs; - - status = hw->phy.ops.write_i2c_byte(hw, IXGBE_SFF_SFF_8472_OSCB, - IXGBE_I2C_EEPROM_DEV_ADDR2, - eeprom_data); - if (status) { - DEBUGOUT("Failed to write Rx Rate Select RS0\n"); - goto out; - } - - /* Set RS1 */ - status = hw->phy.ops.read_i2c_byte(hw, IXGBE_SFF_SFF_8472_ESCB, - IXGBE_I2C_EEPROM_DEV_ADDR2, - &eeprom_data); - if (status) { - DEBUGOUT("Failed to read Rx Rate Select RS1\n"); - goto out; - } - - eeprom_data = (eeprom_data & ~IXGBE_SFF_SOFT_RS_SELECT_MASK) & rs; - - status = hw->phy.ops.write_i2c_byte(hw, IXGBE_SFF_SFF_8472_ESCB, - IXGBE_I2C_EEPROM_DEV_ADDR2, - eeprom_data); - if (status) { - DEBUGOUT("Failed to write Rx Rate Select RS1\n"); - goto out; - } -out: - return; -} - -/** - * ixgbe_setup_mac_link_multispeed_fiber - Set MAC link speed - * @hw: pointer to hardware structure - * @speed: new link speed - * @autoneg_wait_to_complete: TRUE when waiting for completion is needed - * - * Set the link speed in the AUTOC register and restarts link. - **/ -s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw, - ixgbe_link_speed speed, - bool autoneg_wait_to_complete) -{ - s32 status = IXGBE_SUCCESS; - ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN; - ixgbe_link_speed highest_link_speed = IXGBE_LINK_SPEED_UNKNOWN; - u32 speedcnt = 0; - u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP); - u32 i = 0; - bool autoneg, link_up = FALSE; - - DEBUGFUNC("ixgbe_setup_mac_link_multispeed_fiber"); - - /* Mask off requested but non-supported speeds */ - status = ixgbe_get_link_capabilities(hw, &link_speed, &autoneg); - if (status != IXGBE_SUCCESS) - return status; - - speed &= link_speed; - - /* - * Try each speed one by one, highest priority first. We do this in - * software because 10gb fiber doesn't support speed autonegotiation. - */ - if (speed & IXGBE_LINK_SPEED_10GB_FULL) { - speedcnt++; - highest_link_speed = IXGBE_LINK_SPEED_10GB_FULL; - - /* If we already have link at this speed, just jump out */ - status = ixgbe_check_link(hw, &link_speed, &link_up, FALSE); - if (status != IXGBE_SUCCESS) - return status; - - if ((link_speed == IXGBE_LINK_SPEED_10GB_FULL) && link_up) - goto out; - - /* Set the module link speed */ - if (hw->phy.media_type == ixgbe_media_type_fiber_fixed) { - ixgbe_set_fiber_fixed_speed(hw, - IXGBE_LINK_SPEED_10GB_FULL); - } else { - esdp_reg |= (IXGBE_ESDP_SDP5_DIR | IXGBE_ESDP_SDP5); - IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg); - IXGBE_WRITE_FLUSH(hw); - } - - /* Allow module to change analog characteristics (1G->10G) */ - msec_delay(40); - - status = ixgbe_setup_mac_link_82599(hw, - IXGBE_LINK_SPEED_10GB_FULL, - autoneg_wait_to_complete); - if (status != IXGBE_SUCCESS) - return status; - - /* Flap the tx laser if it has not already been done */ - ixgbe_flap_tx_laser(hw); - - /* - * Wait for the controller to acquire link. Per IEEE 802.3ap, - * Section 73.10.2, we may have to wait up to 500ms if KR is - * attempted. 82599 uses the same timing for 10g SFI. - */ - for (i = 0; i < 5; i++) { - /* Wait for the link partner to also set speed */ - msec_delay(100); - - /* If we have link, just jump out */ - status = ixgbe_check_link(hw, &link_speed, - &link_up, FALSE); - if (status != IXGBE_SUCCESS) - return status; - - if (link_up) - goto out; - } - } - - if (speed & IXGBE_LINK_SPEED_1GB_FULL) { - speedcnt++; - if (highest_link_speed == IXGBE_LINK_SPEED_UNKNOWN) - highest_link_speed = IXGBE_LINK_SPEED_1GB_FULL; - - /* If we already have link at this speed, just jump out */ - status = ixgbe_check_link(hw, &link_speed, &link_up, FALSE); - if (status != IXGBE_SUCCESS) - return status; - - if ((link_speed == IXGBE_LINK_SPEED_1GB_FULL) && link_up) - goto out; - - /* Set the module link speed */ - if (hw->phy.media_type == ixgbe_media_type_fiber_fixed) { - ixgbe_set_fiber_fixed_speed(hw, - IXGBE_LINK_SPEED_1GB_FULL); - } else { - esdp_reg &= ~IXGBE_ESDP_SDP5; - esdp_reg |= IXGBE_ESDP_SDP5_DIR; - IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg); - IXGBE_WRITE_FLUSH(hw); - } - - /* Allow module to change analog characteristics (10G->1G) */ - msec_delay(40); - - status = ixgbe_setup_mac_link_82599(hw, - IXGBE_LINK_SPEED_1GB_FULL, - autoneg_wait_to_complete); - if (status != IXGBE_SUCCESS) - return status; - - /* Flap the tx laser if it has not already been done */ - ixgbe_flap_tx_laser(hw); - - /* Wait for the link partner to also set speed */ - msec_delay(100); - - /* If we have link, just jump out */ - status = ixgbe_check_link(hw, &link_speed, &link_up, FALSE); - if (status != IXGBE_SUCCESS) - return status; - - if (link_up) - goto out; - } - - /* - * We didn't get link. Configure back to the highest speed we tried, - * (if there was more than one). We call ourselves back with just the - * single highest speed that the user requested. - */ - if (speedcnt > 1) - status = ixgbe_setup_mac_link_multispeed_fiber(hw, - highest_link_speed, autoneg_wait_to_complete); - -out: - /* Set autoneg_advertised value based on input link speed */ - hw->phy.autoneg_advertised = 0; - - if (speed & IXGBE_LINK_SPEED_10GB_FULL) - hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL; - - if (speed & IXGBE_LINK_SPEED_1GB_FULL) - hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL; - - return status; + IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg); + IXGBE_WRITE_FLUSH(hw); } /** @@ -998,14 +888,15 @@ s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw, { bool autoneg = FALSE; s32 status = IXGBE_SUCCESS; - u32 autoc, pma_pmd_1g, link_mode, start_autoc; + u32 pma_pmd_1g, link_mode; + u32 current_autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); /* holds the value of AUTOC register at this current point in time */ + u32 orig_autoc = 0; /* holds the cached value of AUTOC register */ + u32 autoc = current_autoc; /* Temporary variable used for comparison purposes */ u32 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2); - u32 orig_autoc = 0; u32 pma_pmd_10g_serial = autoc2 & IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK; u32 links_reg; u32 i; ixgbe_link_speed link_capabilities = IXGBE_LINK_SPEED_UNKNOWN; - bool got_lock = FALSE; DEBUGFUNC("ixgbe_setup_mac_link_82599"); @@ -1023,12 +914,10 @@ s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw, /* Use stored value (EEPROM defaults) of AUTOC to find KR/KX4 support*/ if (hw->mac.orig_link_settings_stored) - autoc = hw->mac.orig_autoc; + orig_autoc = hw->mac.orig_autoc; else - autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); + orig_autoc = autoc; - orig_autoc = autoc; - start_autoc = hw->mac.cached_autoc; link_mode = autoc & IXGBE_AUTOC_LMS_MASK; pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK; @@ -1061,39 +950,18 @@ s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw, if ((speed == IXGBE_LINK_SPEED_1GB_FULL) && (pma_pmd_1g == IXGBE_AUTOC_1G_SFI)) { autoc &= ~IXGBE_AUTOC_LMS_MASK; - if (autoneg) + if (autoneg || hw->phy.type == ixgbe_phy_qsfp_intel) autoc |= IXGBE_AUTOC_LMS_1G_AN; else autoc |= IXGBE_AUTOC_LMS_1G_LINK_NO_AN; } } - if (autoc != start_autoc) { - /* Need SW/FW semaphore around AUTOC writes if LESM is on, - * likewise reset_pipeline requires us to hold this lock as - * it also writes to AUTOC. - */ - if (ixgbe_verify_lesm_fw_enabled_82599(hw)) { - status = hw->mac.ops.acquire_swfw_sync(hw, - IXGBE_GSSR_MAC_CSR_SM); - if (status != IXGBE_SUCCESS) { - status = IXGBE_ERR_SWFW_SYNC; - goto out; - } - - got_lock = TRUE; - } - + if (autoc != current_autoc) { /* Restart link */ - IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc); - hw->mac.cached_autoc = autoc; - ixgbe_reset_pipeline_82599(hw); - - if (got_lock) { - hw->mac.ops.release_swfw_sync(hw, - IXGBE_GSSR_MAC_CSR_SM); - got_lock = FALSE; - } + status = hw->mac.ops.prot_autoc_write(hw, autoc, FALSE); + if (status != IXGBE_SUCCESS) + goto out; /* Only poll for autoneg to complete if specified to do so */ if (autoneg_wait_to_complete) { @@ -1161,7 +1029,8 @@ s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw) { ixgbe_link_speed link_speed; s32 status; - u32 ctrl, i, autoc2; + u32 ctrl = 0; + u32 i, autoc, autoc2; u32 curr_lms; bool link_up = FALSE; @@ -1197,11 +1066,7 @@ s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw) hw->phy.ops.reset(hw); /* remember AUTOC from before we reset */ - if (hw->mac.cached_autoc) - curr_lms = hw->mac.cached_autoc & IXGBE_AUTOC_LMS_MASK; - else - curr_lms = IXGBE_READ_REG(hw, IXGBE_AUTOC) & - IXGBE_AUTOC_LMS_MASK; + curr_lms = IXGBE_READ_REG(hw, IXGBE_AUTOC) & IXGBE_AUTOC_LMS_MASK; mac_reset_top: /* @@ -1221,7 +1086,7 @@ mac_reset_top: IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl); IXGBE_WRITE_FLUSH(hw); - /* Poll for reset bit to self-clear indicating reset is complete */ + /* Poll for reset bit to self-clear meaning reset is complete */ for (i = 0; i < 10; i++) { usec_delay(1); ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL); @@ -1238,8 +1103,8 @@ mac_reset_top: /* * Double resets are required for recovery from certain error - * conditions. Between resets, it is necessary to stall to allow time - * for any pending HW events to complete. + * conditions. Between resets, it is necessary to stall to + * allow time for any pending HW events to complete. */ if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) { hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED; @@ -1251,7 +1116,7 @@ mac_reset_top: * stored off yet. Otherwise restore the stored original * values since the reset operation sets back to defaults. */ - hw->mac.cached_autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); + autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2); /* Enable link if disabled in NVM */ @@ -1262,7 +1127,7 @@ mac_reset_top: } if (hw->mac.orig_link_settings_stored == FALSE) { - hw->mac.orig_autoc = hw->mac.cached_autoc; + hw->mac.orig_autoc = autoc; hw->mac.orig_autoc2 = autoc2; hw->mac.orig_link_settings_stored = TRUE; } else { @@ -1273,36 +1138,18 @@ mac_reset_top: * Likewise if we support WoL we don't want change the * LMS state. */ - if ((hw->phy.multispeed_fiber && hw->mng_fw_enabled) || + if ((hw->phy.multispeed_fiber && ixgbe_mng_enabled(hw)) || hw->wol_enabled) hw->mac.orig_autoc = (hw->mac.orig_autoc & ~IXGBE_AUTOC_LMS_MASK) | curr_lms; - if (hw->mac.cached_autoc != hw->mac.orig_autoc) { - /* Need SW/FW semaphore around AUTOC writes if LESM is - * on, likewise reset_pipeline requires us to hold - * this lock as it also writes to AUTOC. - */ - bool got_lock = FALSE; - if (ixgbe_verify_lesm_fw_enabled_82599(hw)) { - status = hw->mac.ops.acquire_swfw_sync(hw, - IXGBE_GSSR_MAC_CSR_SM); - if (status != IXGBE_SUCCESS) { - status = IXGBE_ERR_SWFW_SYNC; - goto reset_hw_out; - } - - got_lock = TRUE; - } - - IXGBE_WRITE_REG(hw, IXGBE_AUTOC, hw->mac.orig_autoc); - hw->mac.cached_autoc = hw->mac.orig_autoc; - ixgbe_reset_pipeline_82599(hw); - - if (got_lock) - hw->mac.ops.release_swfw_sync(hw, - IXGBE_GSSR_MAC_CSR_SM); + if (autoc != hw->mac.orig_autoc) { + status = hw->mac.ops.prot_autoc_write(hw, + hw->mac.orig_autoc, + FALSE); + if (status != IXGBE_SUCCESS) + goto reset_hw_out; } if ((autoc2 & IXGBE_AUTOC2_UPPER_MASK) != @@ -1349,13 +1196,34 @@ reset_hw_out: } /** + * ixgbe_fdir_check_cmd_complete - poll to check whether FDIRCMD is complete + * @hw: pointer to hardware structure + * @fdircmd: current value of FDIRCMD register + */ +static s32 ixgbe_fdir_check_cmd_complete(struct ixgbe_hw *hw, u32 *fdircmd) +{ + int i; + + for (i = 0; i < IXGBE_FDIRCMD_CMD_POLL; i++) { + *fdircmd = IXGBE_READ_REG(hw, IXGBE_FDIRCMD); + if (!(*fdircmd & IXGBE_FDIRCMD_CMD_MASK)) + return IXGBE_SUCCESS; + usec_delay(10); + } + + return IXGBE_ERR_FDIR_CMD_INCOMPLETE; +} + +/** * ixgbe_reinit_fdir_tables_82599 - Reinitialize Flow Director tables. * @hw: pointer to hardware structure **/ s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw) { + s32 err; int i; u32 fdirctrl = IXGBE_READ_REG(hw, IXGBE_FDIRCTRL); + u32 fdircmd; fdirctrl &= ~IXGBE_FDIRCTRL_INIT_DONE; DEBUGFUNC("ixgbe_reinit_fdir_tables_82599"); @@ -1364,16 +1232,10 @@ s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw) * Before starting reinitialization process, * FDIRCMD.CMD must be zero. */ - for (i = 0; i < IXGBE_FDIRCMD_CMD_POLL; i++) { - if (!(IXGBE_READ_REG(hw, IXGBE_FDIRCMD) & - IXGBE_FDIRCMD_CMD_MASK)) - break; - usec_delay(10); - } - if (i >= IXGBE_FDIRCMD_CMD_POLL) { - DEBUGOUT("Flow Director previous command isn't complete, " - "aborting table re-initialization.\n"); - return IXGBE_ERR_FDIR_REINIT_FAILED; + err = ixgbe_fdir_check_cmd_complete(hw, &fdircmd); + if (err) { + DEBUGOUT("Flow Director previous command did not complete, aborting table re-initialization.\n"); + return err; } IXGBE_WRITE_REG(hw, IXGBE_FDIRFREE, 0); @@ -1497,8 +1359,10 @@ s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 fdirctrl) * @hw: pointer to hardware structure * @fdirctrl: value to write to flow director control register, initially * contains just the value of the Rx packet buffer allocation + * @cloud_mode: TRUE - cloud mode, FALSE - other mode **/ -s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 fdirctrl) +s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 fdirctrl, + bool cloud_mode) { DEBUGFUNC("ixgbe_init_fdir_perfect_82599"); @@ -1518,6 +1382,10 @@ s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 fdirctrl) (0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT) | (4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT); + if (cloud_mode) + fdirctrl |=(IXGBE_FDIRCTRL_FILTERMODE_CLOUD << + IXGBE_FDIRCTRL_FILTERMODE_SHIFT); + /* write hashes and fdirctrl register, poll for completion */ ixgbe_fdir_enable_82599(hw, fdirctrl); @@ -1546,14 +1414,14 @@ do { \ bucket_hash ^= hi_hash_dword >> n; \ else if (IXGBE_ATR_SIGNATURE_HASH_KEY & (0x01 << (n + 16))) \ sig_hash ^= hi_hash_dword << (16 - n); \ -} while (0); +} while (0) /** * ixgbe_atr_compute_sig_hash_82599 - Compute the signature hash * @stream: input bitstream to compute the hash on * * This function is almost identical to the function above but contains - * several optomizations such as unwinding all of the loops, letting the + * several optimizations such as unwinding all of the loops, letting the * compiler work out all of the conditional ifs since the keys are static * defines, and computing two keys at once since the hashed dword stream * will be the same for both keys. @@ -1582,7 +1450,7 @@ u32 ixgbe_atr_compute_sig_hash_82599(union ixgbe_atr_hash_dword input, /* * apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to * delay this because bit 0 of the stream should not be processed - * so we do not add the vlan until after bit 0 was processed + * so we do not add the VLAN until after bit 0 was processed */ lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16); @@ -1620,22 +1488,32 @@ u32 ixgbe_atr_compute_sig_hash_82599(union ixgbe_atr_hash_dword input, * @input: unique input dword * @common: compressed common input dword * @queue: queue index to direct traffic to + * + * Note that the tunnel bit in input must not be set when the hardware + * tunneling support does not exist. **/ s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw, union ixgbe_atr_hash_dword input, union ixgbe_atr_hash_dword common, u8 queue) { - u64 fdirhashcmd; - u32 fdircmd; + u64 fdirhashcmd; + u8 flow_type; + bool tunnel; + u32 fdircmd; + s32 err; DEBUGFUNC("ixgbe_fdir_add_signature_filter_82599"); /* * Get the flow_type in order to program FDIRCMD properly * lowest 2 bits are FDIRCMD.L4TYPE, third lowest bit is FDIRCMD.IPV6 + * fifth is FDIRCMD.TUNNEL_FILTER */ - switch (input.formatted.flow_type) { + tunnel = !!(input.formatted.flow_type & IXGBE_ATR_L4TYPE_TUNNEL_MASK); + flow_type = input.formatted.flow_type & + (IXGBE_ATR_L4TYPE_TUNNEL_MASK - 1); + switch (flow_type) { case IXGBE_ATR_FLOW_TYPE_TCPV4: case IXGBE_ATR_FLOW_TYPE_UDPV4: case IXGBE_ATR_FLOW_TYPE_SCTPV4: @@ -1651,8 +1529,10 @@ s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw, /* configure FDIRCMD register */ fdircmd = IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE | IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN; - fdircmd |= input.formatted.flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT; + fdircmd |= (u32)flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT; fdircmd |= (u32)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT; + if (tunnel) + fdircmd |= IXGBE_FDIRCMD_TUNNEL_FILTER; /* * The lower 32-bits of fdirhashcmd is for FDIRHASH, the upper 32-bits @@ -1662,6 +1542,12 @@ s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw, fdirhashcmd |= ixgbe_atr_compute_sig_hash_82599(input, common); IXGBE_WRITE_REG64(hw, IXGBE_FDIRHASH, fdirhashcmd); + err = ixgbe_fdir_check_cmd_complete(hw, &fdircmd); + if (err) { + DEBUGOUT("Flow Director command did not complete!\n"); + return err; + } + DEBUGOUT2("Tx Queue=%x hash=%x\n", queue, (u32)fdirhashcmd); return IXGBE_SUCCESS; @@ -1674,14 +1560,14 @@ do { \ bucket_hash ^= lo_hash_dword >> n; \ if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << (n + 16))) \ bucket_hash ^= hi_hash_dword >> n; \ -} while (0); +} while (0) /** * ixgbe_atr_compute_perfect_hash_82599 - Compute the perfect filter hash * @atr_input: input bitstream to compute the hash on * @input_mask: mask for the input bitstream * - * This function serves two main purposes. First it applys the input_mask + * This function serves two main purposes. First it applies the input_mask * to the atr_input resulting in a cleaned up atr_input data stream. * Secondly it computes the hash and stores it in the bkt_hash field at * the end of the input byte stream. This way it will be available for @@ -1693,34 +1579,20 @@ void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input, u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan; u32 bucket_hash = 0; + u32 hi_dword = 0; + u32 i = 0; /* Apply masks to input data */ - input->dword_stream[0] &= input_mask->dword_stream[0]; - input->dword_stream[1] &= input_mask->dword_stream[1]; - input->dword_stream[2] &= input_mask->dword_stream[2]; - input->dword_stream[3] &= input_mask->dword_stream[3]; - input->dword_stream[4] &= input_mask->dword_stream[4]; - input->dword_stream[5] &= input_mask->dword_stream[5]; - input->dword_stream[6] &= input_mask->dword_stream[6]; - input->dword_stream[7] &= input_mask->dword_stream[7]; - input->dword_stream[8] &= input_mask->dword_stream[8]; - input->dword_stream[9] &= input_mask->dword_stream[9]; - input->dword_stream[10] &= input_mask->dword_stream[10]; + for (i = 0; i < 14; i++) + input->dword_stream[i] &= input_mask->dword_stream[i]; /* record the flow_vm_vlan bits as they are a key part to the hash */ flow_vm_vlan = IXGBE_NTOHL(input->dword_stream[0]); /* generate common hash dword */ - hi_hash_dword = IXGBE_NTOHL(input->dword_stream[1] ^ - input->dword_stream[2] ^ - input->dword_stream[3] ^ - input->dword_stream[4] ^ - input->dword_stream[5] ^ - input->dword_stream[6] ^ - input->dword_stream[7] ^ - input->dword_stream[8] ^ - input->dword_stream[9] ^ - input->dword_stream[10]); + for (i = 1; i <= 13; i++) + hi_dword ^= input->dword_stream[i]; + hi_hash_dword = IXGBE_NTOHL(hi_dword); /* low dword is word swapped version of common */ lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16); @@ -1734,26 +1606,13 @@ void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input, /* * apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to * delay this because bit 0 of the stream should not be processed - * so we do not add the vlan until after bit 0 was processed + * so we do not add the VLAN until after bit 0 was processed */ lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16); /* Process remaining 30 bit of the key */ - IXGBE_COMPUTE_BKT_HASH_ITERATION(1); - IXGBE_COMPUTE_BKT_HASH_ITERATION(2); - IXGBE_COMPUTE_BKT_HASH_ITERATION(3); - IXGBE_COMPUTE_BKT_HASH_ITERATION(4); - IXGBE_COMPUTE_BKT_HASH_ITERATION(5); - IXGBE_COMPUTE_BKT_HASH_ITERATION(6); - IXGBE_COMPUTE_BKT_HASH_ITERATION(7); - IXGBE_COMPUTE_BKT_HASH_ITERATION(8); - IXGBE_COMPUTE_BKT_HASH_ITERATION(9); - IXGBE_COMPUTE_BKT_HASH_ITERATION(10); - IXGBE_COMPUTE_BKT_HASH_ITERATION(11); - IXGBE_COMPUTE_BKT_HASH_ITERATION(12); - IXGBE_COMPUTE_BKT_HASH_ITERATION(13); - IXGBE_COMPUTE_BKT_HASH_ITERATION(14); - IXGBE_COMPUTE_BKT_HASH_ITERATION(15); + for (i = 1; i <= 15; i++) + IXGBE_COMPUTE_BKT_HASH_ITERATION(i); /* * Limit hash to 13 bits since max bucket count is 8K. @@ -1763,7 +1622,7 @@ void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input, } /** - * ixgbe_get_fdirtcpm_82599 - generate a tcp port from atr_input_masks + * ixgbe_get_fdirtcpm_82599 - generate a TCP port from atr_input_masks * @input_mask: mask to be bit swapped * * The source and destination port masks for flow director are bit swapped @@ -1800,12 +1659,12 @@ static u32 ixgbe_get_fdirtcpm_82599(union ixgbe_atr_input *input_mask) IXGBE_NTOHS(((u16)(_value) >> 8) | ((u16)(_value) << 8)) s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw, - union ixgbe_atr_input *input_mask) + union ixgbe_atr_input *input_mask, bool cloud_mode) { /* mask IPv6 since it is currently not supported */ u32 fdirm = IXGBE_FDIRM_DIPv6; u32 fdirtcpm; - + u32 fdirip6m; DEBUGFUNC("ixgbe_fdir_set_atr_input_mask_82599"); /* @@ -1878,59 +1737,146 @@ s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw, return IXGBE_ERR_CONFIG; } - /* Now mask VM pool and destination IPv6 - bits 5 and 2 */ - IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm); + if (cloud_mode) { + fdirm |= IXGBE_FDIRM_L3P; + fdirip6m = ((u32) 0xFFFFU << IXGBE_FDIRIP6M_DIPM_SHIFT); + fdirip6m |= IXGBE_FDIRIP6M_ALWAYS_MASK; + + switch (input_mask->formatted.inner_mac[0] & 0xFF) { + case 0x00: + /* Mask inner MAC, fall through */ + fdirip6m |= IXGBE_FDIRIP6M_INNER_MAC; + case 0xFF: + break; + default: + DEBUGOUT(" Error on inner_mac byte mask\n"); + return IXGBE_ERR_CONFIG; + } + + switch (input_mask->formatted.tni_vni & 0xFFFFFFFF) { + case 0x0: + /* Mask vxlan id */ + fdirip6m |= IXGBE_FDIRIP6M_TNI_VNI; + break; + case 0x00FFFFFF: + fdirip6m |= IXGBE_FDIRIP6M_TNI_VNI_24; + break; + case 0xFFFFFFFF: + break; + default: + DEBUGOUT(" Error on TNI/VNI byte mask\n"); + return IXGBE_ERR_CONFIG; + } + + switch (input_mask->formatted.tunnel_type & 0xFFFF) { + case 0x0: + /* Mask turnnel type, fall through */ + fdirip6m |= IXGBE_FDIRIP6M_TUNNEL_TYPE; + case 0xFFFF: + break; + default: + DEBUGOUT(" Error on tunnel type byte mask\n"); + return IXGBE_ERR_CONFIG; + } + IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIP6M, fdirip6m); - /* store the TCP/UDP port masks, bit reversed from port layout */ - fdirtcpm = ixgbe_get_fdirtcpm_82599(input_mask); + /* Set all bits in FDIRTCPM, FDIRUDPM, FDIRSIP4M and + * FDIRDIP4M in cloud mode to allow L3/L3 packets to + * tunnel. + */ + IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, 0xFFFFFFFF); + IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, 0xFFFFFFFF); + IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRDIP4M, 0xFFFFFFFF); + IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIP4M, 0xFFFFFFFF); + } - /* write both the same so that UDP and TCP use the same mask */ - IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, ~fdirtcpm); - IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, ~fdirtcpm); + /* Now mask VM pool and destination IPv6 - bits 5 and 2 */ + IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm); - /* store source and destination IP masks (big-enian) */ - IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIP4M, - ~input_mask->formatted.src_ip[0]); - IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRDIP4M, - ~input_mask->formatted.dst_ip[0]); + if (!cloud_mode) { + /* store the TCP/UDP port masks, bit reversed from port + * layout */ + fdirtcpm = ixgbe_get_fdirtcpm_82599(input_mask); + + /* write both the same so that UDP and TCP use the same mask */ + IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, ~fdirtcpm); + IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, ~fdirtcpm); + /* also use it for SCTP */ + switch (hw->mac.type) { + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + IXGBE_WRITE_REG(hw, IXGBE_FDIRSCTPM, ~fdirtcpm); + break; + default: + break; + } + /* store source and destination IP masks (big-enian) */ + IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIP4M, + ~input_mask->formatted.src_ip[0]); + IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRDIP4M, + ~input_mask->formatted.dst_ip[0]); + } return IXGBE_SUCCESS; } s32 ixgbe_fdir_write_perfect_filter_82599(struct ixgbe_hw *hw, union ixgbe_atr_input *input, - u16 soft_id, u8 queue) + u16 soft_id, u8 queue, bool cloud_mode) { u32 fdirport, fdirvlan, fdirhash, fdircmd; + u32 addr_low, addr_high; + u32 cloud_type = 0; + s32 err; DEBUGFUNC("ixgbe_fdir_write_perfect_filter_82599"); + if (!cloud_mode) { + /* currently IPv6 is not supported, must be programmed with 0 */ + IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(0), + input->formatted.src_ip[0]); + IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(1), + input->formatted.src_ip[1]); + IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(2), + input->formatted.src_ip[2]); + + /* record the source address (big-endian) */ + IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPSA, + input->formatted.src_ip[0]); + + /* record the first 32 bits of the destination address + * (big-endian) */ + IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPDA, + input->formatted.dst_ip[0]); + + /* record source and destination port (little-endian)*/ + fdirport = IXGBE_NTOHS(input->formatted.dst_port); + fdirport <<= IXGBE_FDIRPORT_DESTINATION_SHIFT; + fdirport |= IXGBE_NTOHS(input->formatted.src_port); + IXGBE_WRITE_REG(hw, IXGBE_FDIRPORT, fdirport); + } - /* currently IPv6 is not supported, must be programmed with 0 */ - IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(0), - input->formatted.src_ip[0]); - IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(1), - input->formatted.src_ip[1]); - IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(2), - input->formatted.src_ip[2]); - - /* record the source address (big-endian) */ - IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPSA, input->formatted.src_ip[0]); - - /* record the first 32 bits of the destination address (big-endian) */ - IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPDA, input->formatted.dst_ip[0]); - - /* record source and destination port (little-endian)*/ - fdirport = IXGBE_NTOHS(input->formatted.dst_port); - fdirport <<= IXGBE_FDIRPORT_DESTINATION_SHIFT; - fdirport |= IXGBE_NTOHS(input->formatted.src_port); - IXGBE_WRITE_REG(hw, IXGBE_FDIRPORT, fdirport); - - /* record vlan (little-endian) and flex_bytes(big-endian) */ + /* record VLAN (little-endian) and flex_bytes(big-endian) */ fdirvlan = IXGBE_STORE_AS_BE16(input->formatted.flex_bytes); fdirvlan <<= IXGBE_FDIRVLAN_FLEX_SHIFT; fdirvlan |= IXGBE_NTOHS(input->formatted.vlan_id); IXGBE_WRITE_REG(hw, IXGBE_FDIRVLAN, fdirvlan); + if (cloud_mode) { + if (input->formatted.tunnel_type != 0) + cloud_type = 0x80000000; + + addr_low = ((u32)input->formatted.inner_mac[0] | + ((u32)input->formatted.inner_mac[1] << 8) | + ((u32)input->formatted.inner_mac[2] << 16) | + ((u32)input->formatted.inner_mac[3] << 24)); + addr_high = ((u32)input->formatted.inner_mac[4] | + ((u32)input->formatted.inner_mac[5] << 8)); + cloud_type |= addr_high; + IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(0), addr_low); + IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(1), cloud_type); + IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(2), input->formatted.tni_vni); + } + /* configure FDIRHASH register */ fdirhash = input->formatted.bkt_hash; fdirhash |= soft_id << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT; @@ -1947,11 +1893,18 @@ s32 ixgbe_fdir_write_perfect_filter_82599(struct ixgbe_hw *hw, IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN; if (queue == IXGBE_FDIR_DROP_QUEUE) fdircmd |= IXGBE_FDIRCMD_DROP; + if (input->formatted.flow_type & IXGBE_ATR_L4TYPE_TUNNEL_MASK) + fdircmd |= IXGBE_FDIRCMD_TUNNEL_FILTER; fdircmd |= input->formatted.flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT; fdircmd |= (u32)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT; fdircmd |= (u32)input->formatted.vm_pool << IXGBE_FDIRCMD_VT_POOL_SHIFT; IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, fdircmd); + err = ixgbe_fdir_check_cmd_complete(hw, &fdircmd); + if (err) { + DEBUGOUT("Flow Director command did not complete!\n"); + return err; + } return IXGBE_SUCCESS; } @@ -1961,9 +1914,8 @@ s32 ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw, u16 soft_id) { u32 fdirhash; - u32 fdircmd = 0; - u32 retry_count; - s32 err = IXGBE_SUCCESS; + u32 fdircmd; + s32 err; /* configure FDIRHASH register */ fdirhash = input->formatted.bkt_hash; @@ -1976,18 +1928,12 @@ s32 ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw, /* Query if filter is present */ IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, IXGBE_FDIRCMD_CMD_QUERY_REM_FILT); - for (retry_count = 10; retry_count; retry_count--) { - /* allow 10us for query to process */ - usec_delay(10); - /* verify query completed successfully */ - fdircmd = IXGBE_READ_REG(hw, IXGBE_FDIRCMD); - if (!(fdircmd & IXGBE_FDIRCMD_CMD_MASK)) - break; + err = ixgbe_fdir_check_cmd_complete(hw, &fdircmd); + if (err) { + DEBUGOUT("Flow Director command did not complete!\n"); + return err; } - if (!retry_count) - err = IXGBE_ERR_FDIR_REINIT_FAILED; - /* if filter exists in hardware then remove it */ if (fdircmd & IXGBE_FDIRCMD_FILTER_VALID) { IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash); @@ -1996,7 +1942,7 @@ s32 ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw, IXGBE_FDIRCMD_CMD_REMOVE_FLOW); } - return err; + return IXGBE_SUCCESS; } /** @@ -2013,7 +1959,7 @@ s32 ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw, s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw, union ixgbe_atr_input *input, union ixgbe_atr_input *input_mask, - u16 soft_id, u8 queue) + u16 soft_id, u8 queue, bool cloud_mode) { s32 err = IXGBE_ERR_CONFIG; @@ -2025,6 +1971,7 @@ s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw, */ switch (input->formatted.flow_type) { case IXGBE_ATR_FLOW_TYPE_IPV4: + case IXGBE_ATR_FLOW_TYPE_TUNNELED_IPV4: input_mask->formatted.flow_type = IXGBE_ATR_L4TYPE_IPV6_MASK; if (input->formatted.dst_port || input->formatted.src_port) { DEBUGOUT(" Error on src/dst port\n"); @@ -2032,12 +1979,15 @@ s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw, } break; case IXGBE_ATR_FLOW_TYPE_SCTPV4: + case IXGBE_ATR_FLOW_TYPE_TUNNELED_SCTPV4: if (input->formatted.dst_port || input->formatted.src_port) { DEBUGOUT(" Error on src/dst port\n"); return IXGBE_ERR_CONFIG; } case IXGBE_ATR_FLOW_TYPE_TCPV4: + case IXGBE_ATR_FLOW_TYPE_TUNNELED_TCPV4: case IXGBE_ATR_FLOW_TYPE_UDPV4: + case IXGBE_ATR_FLOW_TYPE_TUNNELED_UDPV4: input_mask->formatted.flow_type = IXGBE_ATR_L4TYPE_IPV6_MASK | IXGBE_ATR_L4TYPE_MASK; break; @@ -2047,7 +1997,7 @@ s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw, } /* program input mask into the HW */ - err = ixgbe_fdir_set_input_mask_82599(hw, input_mask); + err = ixgbe_fdir_set_input_mask_82599(hw, input_mask, cloud_mode); if (err) return err; @@ -2056,7 +2006,7 @@ s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw, /* program filters to filter memory */ return ixgbe_fdir_write_perfect_filter_82599(hw, input, - soft_id, queue); + soft_id, queue, cloud_mode); } /** @@ -2146,7 +2096,7 @@ out: **/ s32 ixgbe_identify_phy_82599(struct ixgbe_hw *hw) { - s32 status = IXGBE_ERR_PHY_ADDR_INVALID; + s32 status; DEBUGFUNC("ixgbe_identify_phy_82599"); @@ -2155,7 +2105,7 @@ s32 ixgbe_identify_phy_82599(struct ixgbe_hw *hw) if (status != IXGBE_SUCCESS) { /* 82599 10GBASE-T requires an external PHY */ if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper) - goto out; + return status; else status = ixgbe_identify_module_generic(hw); } @@ -2163,14 +2113,13 @@ s32 ixgbe_identify_phy_82599(struct ixgbe_hw *hw) /* Set PHY type none if no PHY detected */ if (hw->phy.type == ixgbe_phy_unknown) { hw->phy.type = ixgbe_phy_none; - status = IXGBE_SUCCESS; + return IXGBE_SUCCESS; } /* Return error if SFP module has been detected but is not supported */ if (hw->phy.type == ixgbe_phy_sfp_unsupported) - status = IXGBE_ERR_SFP_NOT_SUPPORTED; + return IXGBE_ERR_SFP_NOT_SUPPORTED; -out: return status; } @@ -2189,8 +2138,6 @@ u32 ixgbe_get_supported_physical_layer_82599(struct ixgbe_hw *hw) u32 pma_pmd_10g_parallel = autoc & IXGBE_AUTOC_10G_PMA_PMD_MASK; u32 pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK; u16 ext_ability = 0; - u8 comp_codes_10g = 0; - u8 comp_codes_1g = 0; DEBUGFUNC("ixgbe_get_support_physical_layer_82599"); @@ -2258,40 +2205,7 @@ sfp_check: /* SFP check must be done last since DA modules are sometimes used to * test KR mode - we need to id KR mode correctly before SFP module. * Call identify_sfp because the pluggable module may have changed */ - hw->phy.ops.identify_sfp(hw); - if (hw->phy.sfp_type == ixgbe_sfp_type_not_present) - goto out; - - switch (hw->phy.type) { - case ixgbe_phy_sfp_passive_tyco: - case ixgbe_phy_sfp_passive_unknown: - physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU; - break; - case ixgbe_phy_sfp_ftl_active: - case ixgbe_phy_sfp_active_unknown: - physical_layer = IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA; - break; - case ixgbe_phy_sfp_avago: - case ixgbe_phy_sfp_ftl: - case ixgbe_phy_sfp_intel: - case ixgbe_phy_sfp_unknown: - hw->phy.ops.read_i2c_eeprom(hw, - IXGBE_SFF_1GBE_COMP_CODES, &comp_codes_1g); - hw->phy.ops.read_i2c_eeprom(hw, - IXGBE_SFF_10GBE_COMP_CODES, &comp_codes_10g); - if (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE) - physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR; - else if (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE) - physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR; - else if (comp_codes_1g & IXGBE_SFF_1GBASET_CAPABLE) - physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_T; - else if (comp_codes_1g & IXGBE_SFF_1GBASESX_CAPABLE) - physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_SX; - break; - default: - break; - } - + physical_layer = ixgbe_get_supported_phy_sfp_layer_generic(hw); out: return physical_layer; } @@ -2317,7 +2231,10 @@ s32 ixgbe_enable_rx_dma_82599(struct ixgbe_hw *hw, u32 regval) hw->mac.ops.disable_sec_rx_path(hw); - IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, regval); + if (regval & IXGBE_RXCTRL_RXEN) + ixgbe_enable_rx(hw); + else + ixgbe_disable_rx(hw); hw->mac.ops.enable_sec_rx_path(hw); @@ -2325,7 +2242,7 @@ s32 ixgbe_enable_rx_dma_82599(struct ixgbe_hw *hw, u32 regval) } /** - * ixgbe_verify_fw_version_82599 - verify fw version for 82599 + * ixgbe_verify_fw_version_82599 - verify FW version for 82599 * @hw: pointer to hardware structure * * Verifies that installed the firmware version is 0.6 or higher @@ -2419,7 +2336,7 @@ bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw) (fw_lesm_param_offset == 0) || (fw_lesm_param_offset == 0xFFFF)) goto out; - /* get the lesm state word */ + /* get the LESM state word */ status = hw->eeprom.ops.read(hw, (fw_lesm_param_offset + IXGBE_FW_LESM_STATE_1), &fw_lesm_state); @@ -2504,7 +2421,7 @@ static s32 ixgbe_read_eeprom_82599(struct ixgbe_hw *hw, * @hw: pointer to hardware structure * * Reset pipeline by asserting Restart_AN together with LMS change to ensure - * full pipeline reset + * full pipeline reset. This function assumes the SW/FW lock is held. **/ s32 ixgbe_reset_pipeline_82599(struct ixgbe_hw *hw) { @@ -2520,10 +2437,11 @@ s32 ixgbe_reset_pipeline_82599(struct ixgbe_hw *hw) IXGBE_WRITE_FLUSH(hw); } - autoc_reg = hw->mac.cached_autoc; + autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); autoc_reg |= IXGBE_AUTOC_AN_RESTART; /* Write AUTOC register with toggled LMS[2] bit and Restart_AN */ - IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg ^ IXGBE_AUTOC_LMS_1G_AN); + IXGBE_WRITE_REG(hw, IXGBE_AUTOC, + autoc_reg ^ (0x4 << IXGBE_AUTOC_LMS_SHIFT)); /* Wait for AN to leave state 0 */ for (i = 0; i < 10; i++) { msec_delay(4); @@ -2548,5 +2466,116 @@ reset_pipeline_out: return ret_val; } +/** + * ixgbe_read_i2c_byte_82599 - Reads 8 bit word over I2C + * @hw: pointer to hardware structure + * @byte_offset: byte offset to read + * @data: value read + * + * Performs byte read operation to SFP module's EEPROM over I2C interface at + * a specified device address. + **/ +static s32 ixgbe_read_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset, + u8 dev_addr, u8 *data) +{ + u32 esdp; + s32 status; + s32 timeout = 200; + + DEBUGFUNC("ixgbe_read_i2c_byte_82599"); + + if (hw->phy.qsfp_shared_i2c_bus == TRUE) { + /* Acquire I2C bus ownership. */ + esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); + esdp |= IXGBE_ESDP_SDP0; + IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp); + IXGBE_WRITE_FLUSH(hw); + + while (timeout) { + esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); + if (esdp & IXGBE_ESDP_SDP1) + break; + + msec_delay(5); + timeout--; + } + + if (!timeout) { + DEBUGOUT("Driver can't access resource," + " acquiring I2C bus timeout.\n"); + status = IXGBE_ERR_I2C; + goto release_i2c_access; + } + } + + status = ixgbe_read_i2c_byte_generic(hw, byte_offset, dev_addr, data); + +release_i2c_access: + + if (hw->phy.qsfp_shared_i2c_bus == TRUE) { + /* Release I2C bus ownership. */ + esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); + esdp &= ~IXGBE_ESDP_SDP0; + IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp); + IXGBE_WRITE_FLUSH(hw); + } + + return status; +} + +/** + * ixgbe_write_i2c_byte_82599 - Writes 8 bit word over I2C + * @hw: pointer to hardware structure + * @byte_offset: byte offset to write + * @data: value to write + * + * Performs byte write operation to SFP module's EEPROM over I2C interface at + * a specified device address. + **/ +static s32 ixgbe_write_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset, + u8 dev_addr, u8 data) +{ + u32 esdp; + s32 status; + s32 timeout = 200; + + DEBUGFUNC("ixgbe_write_i2c_byte_82599"); + + if (hw->phy.qsfp_shared_i2c_bus == TRUE) { + /* Acquire I2C bus ownership. */ + esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); + esdp |= IXGBE_ESDP_SDP0; + IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp); + IXGBE_WRITE_FLUSH(hw); + + while (timeout) { + esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); + if (esdp & IXGBE_ESDP_SDP1) + break; + + msec_delay(5); + timeout--; + } + + if (!timeout) { + DEBUGOUT("Driver can't access resource," + " acquiring I2C bus timeout.\n"); + status = IXGBE_ERR_I2C; + goto release_i2c_access; + } + } + + status = ixgbe_write_i2c_byte_generic(hw, byte_offset, dev_addr, data); +release_i2c_access: + if (hw->phy.qsfp_shared_i2c_bus == TRUE) { + /* Release I2C bus ownership. */ + esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); + esdp &= ~IXGBE_ESDP_SDP0; + IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp); + IXGBE_WRITE_FLUSH(hw); + } + + return status; +} diff --git a/sys/dev/ixgbe/ixgbe_82599.h b/sys/dev/ixgbe/ixgbe_82599.h index a77d153..bcfb043 100644 --- a/sys/dev/ixgbe/ixgbe_82599.h +++ b/sys/dev/ixgbe/ixgbe_82599.h @@ -1,6 +1,6 @@ /****************************************************************************** - Copyright (c) 2001-2013, Intel Corporation + Copyright (c) 2001-2015, Intel Corporation All rights reserved. Redistribution and use in source and binary forms, with or without @@ -41,9 +41,8 @@ enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw); void ixgbe_disable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw); void ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw); void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw); -s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw, - ixgbe_link_speed speed, - bool autoneg_wait_to_complete); +void ixgbe_set_hard_rate_select_speed(struct ixgbe_hw *hw, + ixgbe_link_speed speed); s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw, ixgbe_link_speed speed, bool autoneg_wait_to_complete); @@ -61,4 +60,6 @@ s32 ixgbe_identify_phy_82599(struct ixgbe_hw *hw); s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw); u32 ixgbe_get_supported_physical_layer_82599(struct ixgbe_hw *hw); s32 ixgbe_enable_rx_dma_82599(struct ixgbe_hw *hw, u32 regval); +s32 prot_autoc_read_82599(struct ixgbe_hw *hw, bool *locked, u32 *reg_val); +s32 prot_autoc_write_82599(struct ixgbe_hw *hw, u32 reg_val, bool locked); #endif /* _IXGBE_82599_H_ */ diff --git a/sys/dev/ixgbe/ixgbe_api.c b/sys/dev/ixgbe/ixgbe_api.c index 925866b..9784e3c 100644 --- a/sys/dev/ixgbe/ixgbe_api.c +++ b/sys/dev/ixgbe/ixgbe_api.c @@ -1,6 +1,6 @@ /****************************************************************************** - Copyright (c) 2001-2013, Intel Corporation + Copyright (c) 2001-2015, Intel Corporation All rights reserved. Redistribution and use in source and binary forms, with or without @@ -35,6 +35,22 @@ #include "ixgbe_api.h" #include "ixgbe_common.h" +static const u32 ixgbe_mvals_base[IXGBE_MVALS_IDX_LIMIT] = { + IXGBE_MVALS_INIT() +}; + +static const u32 ixgbe_mvals_X540[IXGBE_MVALS_IDX_LIMIT] = { + IXGBE_MVALS_INIT(_X540) +}; + +static const u32 ixgbe_mvals_X550[IXGBE_MVALS_IDX_LIMIT] = { + IXGBE_MVALS_INIT(_X550) +}; + +static const u32 ixgbe_mvals_X550EM_x[IXGBE_MVALS_IDX_LIMIT] = { + IXGBE_MVALS_INIT(_X550EM_x) +}; + /** * ixgbe_dcb_get_rtrup2tc - read rtrup2tc reg * @hw: pointer to hardware structure @@ -78,13 +94,21 @@ s32 ixgbe_init_shared_code(struct ixgbe_hw *hw) case ixgbe_mac_82599EB: status = ixgbe_init_ops_82599(hw); break; + case ixgbe_mac_X540: + status = ixgbe_init_ops_X540(hw); + break; + case ixgbe_mac_X550: + status = ixgbe_init_ops_X550(hw); + break; + case ixgbe_mac_X550EM_x: + status = ixgbe_init_ops_X550EM(hw); + break; case ixgbe_mac_82599_vf: case ixgbe_mac_X540_vf: + case ixgbe_mac_X550_vf: + case ixgbe_mac_X550EM_x_vf: status = ixgbe_init_ops_vf(hw); break; - case ixgbe_mac_X540: - status = ixgbe_init_ops_X540(hw); - break; default: status = IXGBE_ERR_DEVICE_NOT_SUPPORTED; break; @@ -112,6 +136,8 @@ s32 ixgbe_set_mac_type(struct ixgbe_hw *hw) return IXGBE_ERR_DEVICE_NOT_SUPPORTED; } + hw->mvals = ixgbe_mvals_base; + switch (hw->device_id) { case IXGBE_DEV_ID_82598: case IXGBE_DEV_ID_82598_BX: @@ -138,6 +164,7 @@ s32 ixgbe_set_mac_type(struct ixgbe_hw *hw) case IXGBE_DEV_ID_82599_SFP_EM: case IXGBE_DEV_ID_82599_SFP_SF2: case IXGBE_DEV_ID_82599_SFP_SF_QP: + case IXGBE_DEV_ID_82599_QSFP_SF_QP: case IXGBE_DEV_ID_82599EN_SFP: case IXGBE_DEV_ID_82599_CX4: case IXGBE_DEV_ID_82599_BYPASS: @@ -151,10 +178,35 @@ s32 ixgbe_set_mac_type(struct ixgbe_hw *hw) case IXGBE_DEV_ID_X540_VF: case IXGBE_DEV_ID_X540_VF_HV: hw->mac.type = ixgbe_mac_X540_vf; + hw->mvals = ixgbe_mvals_X540; break; case IXGBE_DEV_ID_X540T: + case IXGBE_DEV_ID_X540T1: case IXGBE_DEV_ID_X540_BYPASS: hw->mac.type = ixgbe_mac_X540; + hw->mvals = ixgbe_mvals_X540; + break; + case IXGBE_DEV_ID_X550T: + hw->mac.type = ixgbe_mac_X550; + hw->mvals = ixgbe_mvals_X550; + break; + case IXGBE_DEV_ID_X550EM_X_KX4: + case IXGBE_DEV_ID_X550EM_X_KR: + case IXGBE_DEV_ID_X550EM_X_10G_T: + case IXGBE_DEV_ID_X550EM_X_1G_T: + case IXGBE_DEV_ID_X550EM_X_SFP: + hw->mac.type = ixgbe_mac_X550EM_x; + hw->mvals = ixgbe_mvals_X550EM_x; + break; + case IXGBE_DEV_ID_X550_VF: + case IXGBE_DEV_ID_X550_VF_HV: + hw->mac.type = ixgbe_mac_X550_vf; + hw->mvals = ixgbe_mvals_X550; + break; + case IXGBE_DEV_ID_X550EM_X_VF: + case IXGBE_DEV_ID_X550EM_X_VF_HV: + hw->mac.type = ixgbe_mac_X550EM_x_vf; + hw->mvals = ixgbe_mvals_X550EM_x; break; default: ret_val = IXGBE_ERR_DEVICE_NOT_SUPPORTED; @@ -512,6 +564,20 @@ s32 ixgbe_setup_phy_link(struct ixgbe_hw *hw) } /** + * ixgbe_setup_internal_phy - Configure integrated PHY + * @hw: pointer to hardware structure + * + * Reconfigure the integrated PHY in order to enable talk to the external PHY. + * Returns success if not implemented, since nothing needs to be done in this + * case. + */ +s32 ixgbe_setup_internal_phy(struct ixgbe_hw *hw) +{ + return ixgbe_call_func(hw, hw->phy.ops.setup_internal_link, (hw), + IXGBE_SUCCESS); +} + +/** * ixgbe_check_phy_link - Determine link and speed status * @hw: pointer to hardware structure * @@ -541,6 +607,17 @@ s32 ixgbe_setup_phy_link_speed(struct ixgbe_hw *hw, ixgbe_link_speed speed, } /** + * ixgbe_set_phy_power - Control the phy power state + * @hw: pointer to hardware structure + * @on: TRUE for on, FALSE for off + */ +s32 ixgbe_set_phy_power(struct ixgbe_hw *hw, bool on) +{ + return ixgbe_call_func(hw, hw->phy.ops.set_phy_power, (hw, on), + IXGBE_NOT_IMPLEMENTED); +} + +/** * ixgbe_check_link - Get link and speed status * @hw: pointer to hardware structure * @@ -609,6 +686,22 @@ s32 ixgbe_setup_link(struct ixgbe_hw *hw, ixgbe_link_speed speed, } /** + * ixgbe_setup_mac_link - Set link speed + * @hw: pointer to hardware structure + * @speed: new link speed + * + * Configures link settings. Restarts the link. + * Performs autonegotiation if needed. + **/ +s32 ixgbe_setup_mac_link(struct ixgbe_hw *hw, ixgbe_link_speed speed, + bool autoneg_wait_to_complete) +{ + return ixgbe_call_func(hw, hw->mac.ops.setup_mac_link, (hw, speed, + autoneg_wait_to_complete), + IXGBE_NOT_IMPLEMENTED); +} + +/** * ixgbe_get_link_capabilities - Returns link capabilities * @hw: pointer to hardware structure * @@ -1002,6 +1095,18 @@ s32 ixgbe_fc_enable(struct ixgbe_hw *hw) } /** + * ixgbe_setup_fc - Set up flow control + * @hw: pointer to hardware structure + * + * Called at init time to set up flow control. + **/ +s32 ixgbe_setup_fc(struct ixgbe_hw *hw) +{ + return ixgbe_call_func(hw, hw->mac.ops.setup_fc, (hw), + IXGBE_NOT_IMPLEMENTED); +} + +/** * ixgbe_set_fw_drv_ver - Try to send the driver version number FW * @hw: pointer to hardware structure * @maj: driver major number to be sent to firmware @@ -1018,6 +1123,194 @@ s32 ixgbe_set_fw_drv_ver(struct ixgbe_hw *hw, u8 maj, u8 min, u8 build, +/** + * ixgbe_dmac_config - Configure DMA Coalescing registers. + * @hw: pointer to hardware structure + * + * Configure DMA coalescing. If enabling dmac, dmac is activated. + * When disabling dmac, dmac enable dmac bit is cleared. + **/ +s32 ixgbe_dmac_config(struct ixgbe_hw *hw) +{ + return ixgbe_call_func(hw, hw->mac.ops.dmac_config, (hw), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_dmac_update_tcs - Configure DMA Coalescing registers. + * @hw: pointer to hardware structure + * + * Disables dmac, updates per TC settings, and then enable dmac. + **/ +s32 ixgbe_dmac_update_tcs(struct ixgbe_hw *hw) +{ + return ixgbe_call_func(hw, hw->mac.ops.dmac_update_tcs, (hw), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_dmac_config_tcs - Configure DMA Coalescing registers. + * @hw: pointer to hardware structure + * + * Configure DMA coalescing threshold per TC and set high priority bit for + * FCOE TC. The dmac enable bit must be cleared before configuring. + **/ +s32 ixgbe_dmac_config_tcs(struct ixgbe_hw *hw) +{ + return ixgbe_call_func(hw, hw->mac.ops.dmac_config_tcs, (hw), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_setup_eee - Enable/disable EEE support + * @hw: pointer to the HW structure + * @enable_eee: boolean flag to enable EEE + * + * Enable/disable EEE based on enable_ee flag. + * Auto-negotiation must be started after BASE-T EEE bits in PHY register 7.3C + * are modified. + * + **/ +s32 ixgbe_setup_eee(struct ixgbe_hw *hw, bool enable_eee) +{ + return ixgbe_call_func(hw, hw->mac.ops.setup_eee, (hw, enable_eee), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_set_source_address_pruning - Enable/Disable source address pruning + * @hw: pointer to hardware structure + * @enbale: enable or disable source address pruning + * @pool: Rx pool - Rx pool to toggle source address pruning + **/ +void ixgbe_set_source_address_pruning(struct ixgbe_hw *hw, bool enable, + unsigned int pool) +{ + if (hw->mac.ops.set_source_address_pruning) + hw->mac.ops.set_source_address_pruning(hw, enable, pool); +} + +/** + * ixgbe_set_ethertype_anti_spoofing - Enable/Disable Ethertype anti-spoofing + * @hw: pointer to hardware structure + * @enable: enable or disable switch for Ethertype anti-spoofing + * @vf: Virtual Function pool - VF Pool to set for Ethertype anti-spoofing + * + **/ +void ixgbe_set_ethertype_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf) +{ + if (hw->mac.ops.set_ethertype_anti_spoofing) + hw->mac.ops.set_ethertype_anti_spoofing(hw, enable, vf); +} + +/** + * ixgbe_read_iosf_sb_reg - Read 32 bit PHY register + * @hw: pointer to hardware structure + * @reg_addr: 32 bit address of PHY register to read + * @device_type: type of device you want to communicate with + * @phy_data: Pointer to read data from PHY register + * + * Reads a value from a specified PHY register + **/ +s32 ixgbe_read_iosf_sb_reg(struct ixgbe_hw *hw, u32 reg_addr, + u32 device_type, u32 *phy_data) +{ + return ixgbe_call_func(hw, hw->mac.ops.read_iosf_sb_reg, (hw, reg_addr, + device_type, phy_data), IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_write_iosf_sb_reg - Write 32 bit register through IOSF Sideband + * @hw: pointer to hardware structure + * @reg_addr: 32 bit PHY register to write + * @device_type: type of device you want to communicate with + * @phy_data: Data to write to the PHY register + * + * Writes a value to specified PHY register + **/ +s32 ixgbe_write_iosf_sb_reg(struct ixgbe_hw *hw, u32 reg_addr, + u32 device_type, u32 phy_data) +{ + return ixgbe_call_func(hw, hw->mac.ops.write_iosf_sb_reg, (hw, reg_addr, + device_type, phy_data), IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_disable_mdd - Disable malicious driver detection + * @hw: pointer to hardware structure + * + **/ +void ixgbe_disable_mdd(struct ixgbe_hw *hw) +{ + if (hw->mac.ops.disable_mdd) + hw->mac.ops.disable_mdd(hw); +} + +/** + * ixgbe_enable_mdd - Enable malicious driver detection + * @hw: pointer to hardware structure + * + **/ +void ixgbe_enable_mdd(struct ixgbe_hw *hw) +{ + if (hw->mac.ops.enable_mdd) + hw->mac.ops.enable_mdd(hw); +} + +/** + * ixgbe_mdd_event - Handle malicious driver detection event + * @hw: pointer to hardware structure + * @vf_bitmap: vf bitmap of malicious vfs + * + **/ +void ixgbe_mdd_event(struct ixgbe_hw *hw, u32 *vf_bitmap) +{ + if (hw->mac.ops.mdd_event) + hw->mac.ops.mdd_event(hw, vf_bitmap); +} + +/** + * ixgbe_restore_mdd_vf - Restore VF that was disabled during malicious driver + * detection event + * @hw: pointer to hardware structure + * @vf: vf index + * + **/ +void ixgbe_restore_mdd_vf(struct ixgbe_hw *hw, u32 vf) +{ + if (hw->mac.ops.restore_mdd_vf) + hw->mac.ops.restore_mdd_vf(hw, vf); +} + +/** + * ixgbe_enter_lplu - Transition to low power states + * @hw: pointer to hardware structure + * + * Configures Low Power Link Up on transition to low power states + * (from D0 to non-D0). + **/ +s32 ixgbe_enter_lplu(struct ixgbe_hw *hw) +{ + return ixgbe_call_func(hw, hw->phy.ops.enter_lplu, (hw), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_handle_lasi - Handle external Base T PHY interrupt + * @hw: pointer to hardware structure + * + * Handle external Base T PHY interrupt. If high temperature + * failure alarm then return error, else if link status change + * then setup internal/external PHY link + * + * Return IXGBE_ERR_OVERTEMP if interrupt is high temperature + * failure alarm, else return PHY access status. + */ +s32 ixgbe_handle_lasi(struct ixgbe_hw *hw) +{ + return ixgbe_call_func(hw, hw->phy.ops.handle_lasi, (hw), + IXGBE_NOT_IMPLEMENTED); +} /** * ixgbe_read_analog_reg8 - Reads 8 bit analog register @@ -1064,6 +1357,7 @@ s32 ixgbe_init_uta_tables(struct ixgbe_hw *hw) * ixgbe_read_i2c_byte - Reads 8 bit word over I2C at specified device address * @hw: pointer to hardware structure * @byte_offset: byte offset to read + * @dev_addr: I2C bus address to read from * @data: value read * * Performs byte read operation to SFP module's EEPROM over I2C interface. @@ -1076,9 +1370,59 @@ s32 ixgbe_read_i2c_byte(struct ixgbe_hw *hw, u8 byte_offset, u8 dev_addr, } /** + * ixgbe_read_i2c_byte_unlocked - Reads 8 bit word via I2C from device address + * @hw: pointer to hardware structure + * @byte_offset: byte offset to read + * @dev_addr: I2C bus address to read from + * @data: value read + * + * Performs byte read operation to SFP module's EEPROM over I2C interface. + **/ +s32 ixgbe_read_i2c_byte_unlocked(struct ixgbe_hw *hw, u8 byte_offset, + u8 dev_addr, u8 *data) +{ + return ixgbe_call_func(hw, hw->phy.ops.read_i2c_byte_unlocked, + (hw, byte_offset, dev_addr, data), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_read_i2c_combined - Perform I2C read combined operation + * @hw: pointer to the hardware structure + * @addr: I2C bus address to read from + * @reg: I2C device register to read from + * @val: pointer to location to receive read value + * + * Returns an error code on error. + */ +s32 ixgbe_read_i2c_combined(struct ixgbe_hw *hw, u8 addr, u16 reg, u16 *val) +{ + return ixgbe_call_func(hw, hw->phy.ops.read_i2c_combined, (hw, addr, + reg, val), IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_read_i2c_combined_unlocked - Perform I2C read combined operation + * @hw: pointer to the hardware structure + * @addr: I2C bus address to read from + * @reg: I2C device register to read from + * @val: pointer to location to receive read value + * + * Returns an error code on error. + **/ +s32 ixgbe_read_i2c_combined_unlocked(struct ixgbe_hw *hw, u8 addr, u16 reg, + u16 *val) +{ + return ixgbe_call_func(hw, hw->phy.ops.read_i2c_combined_unlocked, + (hw, addr, reg, val), + IXGBE_NOT_IMPLEMENTED); +} + +/** * ixgbe_write_i2c_byte - Writes 8 bit word over I2C * @hw: pointer to hardware structure * @byte_offset: byte offset to write + * @dev_addr: I2C bus address to write to * @data: value to write * * Performs byte write operation to SFP module's EEPROM over I2C interface @@ -1092,6 +1436,55 @@ s32 ixgbe_write_i2c_byte(struct ixgbe_hw *hw, u8 byte_offset, u8 dev_addr, } /** + * ixgbe_write_i2c_byte_unlocked - Writes 8 bit word over I2C + * @hw: pointer to hardware structure + * @byte_offset: byte offset to write + * @dev_addr: I2C bus address to write to + * @data: value to write + * + * Performs byte write operation to SFP module's EEPROM over I2C interface + * at a specified device address. + **/ +s32 ixgbe_write_i2c_byte_unlocked(struct ixgbe_hw *hw, u8 byte_offset, + u8 dev_addr, u8 data) +{ + return ixgbe_call_func(hw, hw->phy.ops.write_i2c_byte_unlocked, + (hw, byte_offset, dev_addr, data), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_write_i2c_combined - Perform I2C write combined operation + * @hw: pointer to the hardware structure + * @addr: I2C bus address to write to + * @reg: I2C device register to write to + * @val: value to write + * + * Returns an error code on error. + */ +s32 ixgbe_write_i2c_combined(struct ixgbe_hw *hw, u8 addr, u16 reg, u16 val) +{ + return ixgbe_call_func(hw, hw->phy.ops.write_i2c_combined, (hw, addr, + reg, val), IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_write_i2c_combined_unlocked - Perform I2C write combined operation + * @hw: pointer to the hardware structure + * @addr: I2C bus address to write to + * @reg: I2C device register to write to + * @val: value to write + * + * Returns an error code on error. + **/ +s32 ixgbe_write_i2c_combined_unlocked(struct ixgbe_hw *hw, u8 addr, u16 reg, + u16 val) +{ + return ixgbe_call_func(hw, hw->phy.ops.write_i2c_combined_unlocked, + (hw, addr, reg, val), IXGBE_NOT_IMPLEMENTED); +} + +/** * ixgbe_write_i2c_eeprom - Writes 8 bit EEPROM word over I2C interface * @hw: pointer to hardware structure * @byte_offset: EEPROM byte offset to write @@ -1179,7 +1572,7 @@ s32 ixgbe_enable_sec_rx_path(struct ixgbe_hw *hw) * Acquires the SWFW semaphore through SW_FW_SYNC register for the specified * function (CSR, PHY0, PHY1, EEPROM, Flash) **/ -s32 ixgbe_acquire_swfw_semaphore(struct ixgbe_hw *hw, u16 mask) +s32 ixgbe_acquire_swfw_semaphore(struct ixgbe_hw *hw, u32 mask) { return ixgbe_call_func(hw, hw->mac.ops.acquire_swfw_sync, (hw, mask), IXGBE_NOT_IMPLEMENTED); @@ -1193,9 +1586,34 @@ s32 ixgbe_acquire_swfw_semaphore(struct ixgbe_hw *hw, u16 mask) * Releases the SWFW semaphore through SW_FW_SYNC register for the specified * function (CSR, PHY0, PHY1, EEPROM, Flash) **/ -void ixgbe_release_swfw_semaphore(struct ixgbe_hw *hw, u16 mask) +void ixgbe_release_swfw_semaphore(struct ixgbe_hw *hw, u32 mask) { if (hw->mac.ops.release_swfw_sync) hw->mac.ops.release_swfw_sync(hw, mask); } + +void ixgbe_disable_rx(struct ixgbe_hw *hw) +{ + if (hw->mac.ops.disable_rx) + hw->mac.ops.disable_rx(hw); +} + +void ixgbe_enable_rx(struct ixgbe_hw *hw) +{ + if (hw->mac.ops.enable_rx) + hw->mac.ops.enable_rx(hw); +} + +/** + * ixgbe_set_rate_select_speed - Set module link speed + * @hw: pointer to hardware structure + * @speed: link speed to set + * + * Set module link speed via the rate select. + */ +void ixgbe_set_rate_select_speed(struct ixgbe_hw *hw, ixgbe_link_speed speed) +{ + if (hw->mac.ops.set_rate_select_speed) + hw->mac.ops.set_rate_select_speed(hw, speed); +} diff --git a/sys/dev/ixgbe/ixgbe_api.h b/sys/dev/ixgbe/ixgbe_api.h index 91023ae..8c2c4a8 100644 --- a/sys/dev/ixgbe/ixgbe_api.h +++ b/sys/dev/ixgbe/ixgbe_api.h @@ -1,6 +1,6 @@ /****************************************************************************** - Copyright (c) 2001-2013, Intel Corporation + Copyright (c) 2001-2015, Intel Corporation All rights reserved. Redistribution and use in source and binary forms, with or without @@ -44,6 +44,8 @@ s32 ixgbe_init_shared_code(struct ixgbe_hw *hw); extern s32 ixgbe_init_ops_82598(struct ixgbe_hw *hw); extern s32 ixgbe_init_ops_82599(struct ixgbe_hw *hw); extern s32 ixgbe_init_ops_X540(struct ixgbe_hw *hw); +extern s32 ixgbe_init_ops_X550(struct ixgbe_hw *hw); +extern s32 ixgbe_init_ops_X550EM(struct ixgbe_hw *hw); extern s32 ixgbe_init_ops_vf(struct ixgbe_hw *hw); s32 ixgbe_set_mac_type(struct ixgbe_hw *hw); @@ -69,17 +71,21 @@ s32 ixgbe_write_phy_reg(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type, u16 phy_data); s32 ixgbe_setup_phy_link(struct ixgbe_hw *hw); +s32 ixgbe_setup_internal_phy(struct ixgbe_hw *hw); s32 ixgbe_check_phy_link(struct ixgbe_hw *hw, ixgbe_link_speed *speed, bool *link_up); s32 ixgbe_setup_phy_link_speed(struct ixgbe_hw *hw, ixgbe_link_speed speed, bool autoneg_wait_to_complete); +s32 ixgbe_set_phy_power(struct ixgbe_hw *, bool on); void ixgbe_disable_tx_laser(struct ixgbe_hw *hw); void ixgbe_enable_tx_laser(struct ixgbe_hw *hw); void ixgbe_flap_tx_laser(struct ixgbe_hw *hw); s32 ixgbe_setup_link(struct ixgbe_hw *hw, ixgbe_link_speed speed, bool autoneg_wait_to_complete); +s32 ixgbe_setup_mac_link(struct ixgbe_hw *hw, ixgbe_link_speed speed, + bool autoneg_wait_to_complete); s32 ixgbe_check_link(struct ixgbe_hw *hw, ixgbe_link_speed *speed, bool *link_up, bool link_up_wait_to_complete); s32 ixgbe_get_link_capabilities(struct ixgbe_hw *hw, ixgbe_link_speed *speed, @@ -123,6 +129,7 @@ s32 ixgbe_set_vfta(struct ixgbe_hw *hw, u32 vlan, s32 ixgbe_set_vlvf(struct ixgbe_hw *hw, u32 vlan, u32 vind, bool vlan_on, bool *vfta_changed); s32 ixgbe_fc_enable(struct ixgbe_hw *hw); +s32 ixgbe_setup_fc(struct ixgbe_hw *hw); s32 ixgbe_set_fw_drv_ver(struct ixgbe_hw *hw, u8 maj, u8 min, u8 build, u8 ver); void ixgbe_set_mta(struct ixgbe_hw *hw, u8 *mc_addr); @@ -139,16 +146,17 @@ s32 ixgbe_enable_sec_rx_path(struct ixgbe_hw *hw); s32 ixgbe_mng_fw_enabled(struct ixgbe_hw *hw); s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw); s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 fdirctrl); -s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 fdirctrl); +s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 fdirctrl, + bool cloud_mode); s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw, union ixgbe_atr_hash_dword input, union ixgbe_atr_hash_dword common, u8 queue); s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw, - union ixgbe_atr_input *input_mask); + union ixgbe_atr_input *input_mask, bool cloud_mode); s32 ixgbe_fdir_write_perfect_filter_82599(struct ixgbe_hw *hw, union ixgbe_atr_input *input, - u16 soft_id, u8 queue); + u16 soft_id, u8 queue, bool cloud_mode); s32 ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw, union ixgbe_atr_input *input, u16 soft_id); @@ -156,7 +164,8 @@ s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw, union ixgbe_atr_input *input, union ixgbe_atr_input *mask, u16 soft_id, - u8 queue); + u8 queue, + bool cloud_mode); void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input, union ixgbe_atr_input *mask); u32 ixgbe_atr_compute_sig_hash_82599(union ixgbe_atr_hash_dword input, @@ -164,16 +173,47 @@ u32 ixgbe_atr_compute_sig_hash_82599(union ixgbe_atr_hash_dword input, bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw); s32 ixgbe_read_i2c_byte(struct ixgbe_hw *hw, u8 byte_offset, u8 dev_addr, u8 *data); +s32 ixgbe_read_i2c_byte_unlocked(struct ixgbe_hw *hw, u8 byte_offset, + u8 dev_addr, u8 *data); +s32 ixgbe_read_i2c_combined(struct ixgbe_hw *hw, u8 addr, u16 reg, u16 *val); +s32 ixgbe_read_i2c_combined_unlocked(struct ixgbe_hw *hw, u8 addr, u16 reg, + u16 *val); s32 ixgbe_write_i2c_byte(struct ixgbe_hw *hw, u8 byte_offset, u8 dev_addr, u8 data); +s32 ixgbe_write_i2c_byte_unlocked(struct ixgbe_hw *hw, u8 byte_offset, + u8 dev_addr, u8 data); +s32 ixgbe_write_i2c_combined(struct ixgbe_hw *hw, u8 addr, u16 reg, u16 val); +s32 ixgbe_write_i2c_combined_unlocked(struct ixgbe_hw *hw, u8 addr, u16 reg, + u16 val); s32 ixgbe_write_i2c_eeprom(struct ixgbe_hw *hw, u8 byte_offset, u8 eeprom_data); s32 ixgbe_get_san_mac_addr(struct ixgbe_hw *hw, u8 *san_mac_addr); s32 ixgbe_set_san_mac_addr(struct ixgbe_hw *hw, u8 *san_mac_addr); s32 ixgbe_get_device_caps(struct ixgbe_hw *hw, u16 *device_caps); -s32 ixgbe_acquire_swfw_semaphore(struct ixgbe_hw *hw, u16 mask); -void ixgbe_release_swfw_semaphore(struct ixgbe_hw *hw, u16 mask); +s32 ixgbe_acquire_swfw_semaphore(struct ixgbe_hw *hw, u32 mask); +void ixgbe_release_swfw_semaphore(struct ixgbe_hw *hw, u32 mask); s32 ixgbe_get_wwn_prefix(struct ixgbe_hw *hw, u16 *wwnn_prefix, u16 *wwpn_prefix); s32 ixgbe_get_fcoe_boot_status(struct ixgbe_hw *hw, u16 *bs); +s32 ixgbe_dmac_config(struct ixgbe_hw *hw); +s32 ixgbe_dmac_update_tcs(struct ixgbe_hw *hw); +s32 ixgbe_dmac_config_tcs(struct ixgbe_hw *hw); +s32 ixgbe_setup_eee(struct ixgbe_hw *hw, bool enable_eee); +void ixgbe_set_source_address_pruning(struct ixgbe_hw *hw, bool enable, + unsigned int vf); +void ixgbe_set_ethertype_anti_spoofing(struct ixgbe_hw *hw, bool enable, + int vf); +s32 ixgbe_read_iosf_sb_reg(struct ixgbe_hw *hw, u32 reg_addr, + u32 device_type, u32 *phy_data); +s32 ixgbe_write_iosf_sb_reg(struct ixgbe_hw *hw, u32 reg_addr, + u32 device_type, u32 phy_data); +void ixgbe_disable_mdd(struct ixgbe_hw *hw); +void ixgbe_enable_mdd(struct ixgbe_hw *hw); +void ixgbe_mdd_event(struct ixgbe_hw *hw, u32 *vf_bitmap); +void ixgbe_restore_mdd_vf(struct ixgbe_hw *hw, u32 vf); +s32 ixgbe_enter_lplu(struct ixgbe_hw *hw); +s32 ixgbe_handle_lasi(struct ixgbe_hw *hw); +void ixgbe_set_rate_select_speed(struct ixgbe_hw *hw, ixgbe_link_speed speed); +void ixgbe_disable_rx(struct ixgbe_hw *hw); +void ixgbe_enable_rx(struct ixgbe_hw *hw); #endif /* _IXGBE_API_H_ */ diff --git a/sys/dev/ixgbe/ixgbe_common.c b/sys/dev/ixgbe/ixgbe_common.c index 1734345..f0a0776 100644 --- a/sys/dev/ixgbe/ixgbe_common.c +++ b/sys/dev/ixgbe/ixgbe_common.c @@ -1,6 +1,6 @@ /****************************************************************************** - Copyright (c) 2001-2013, Intel Corporation + Copyright (c) 2001-2015, Intel Corporation All rights reserved. Redistribution and use in source and binary forms, with or without @@ -75,62 +75,67 @@ s32 ixgbe_init_ops_generic(struct ixgbe_hw *hw) DEBUGFUNC("ixgbe_init_ops_generic"); /* EEPROM */ - eeprom->ops.init_params = &ixgbe_init_eeprom_params_generic; + eeprom->ops.init_params = ixgbe_init_eeprom_params_generic; /* If EEPROM is valid (bit 8 = 1), use EERD otherwise use bit bang */ if (eec & IXGBE_EEC_PRES) { - eeprom->ops.read = &ixgbe_read_eerd_generic; - eeprom->ops.read_buffer = &ixgbe_read_eerd_buffer_generic; + eeprom->ops.read = ixgbe_read_eerd_generic; + eeprom->ops.read_buffer = ixgbe_read_eerd_buffer_generic; } else { - eeprom->ops.read = &ixgbe_read_eeprom_bit_bang_generic; + eeprom->ops.read = ixgbe_read_eeprom_bit_bang_generic; eeprom->ops.read_buffer = - &ixgbe_read_eeprom_buffer_bit_bang_generic; + ixgbe_read_eeprom_buffer_bit_bang_generic; } - eeprom->ops.write = &ixgbe_write_eeprom_generic; - eeprom->ops.write_buffer = &ixgbe_write_eeprom_buffer_bit_bang_generic; + eeprom->ops.write = ixgbe_write_eeprom_generic; + eeprom->ops.write_buffer = ixgbe_write_eeprom_buffer_bit_bang_generic; eeprom->ops.validate_checksum = - &ixgbe_validate_eeprom_checksum_generic; - eeprom->ops.update_checksum = &ixgbe_update_eeprom_checksum_generic; - eeprom->ops.calc_checksum = &ixgbe_calc_eeprom_checksum_generic; + ixgbe_validate_eeprom_checksum_generic; + eeprom->ops.update_checksum = ixgbe_update_eeprom_checksum_generic; + eeprom->ops.calc_checksum = ixgbe_calc_eeprom_checksum_generic; /* MAC */ - mac->ops.init_hw = &ixgbe_init_hw_generic; + mac->ops.init_hw = ixgbe_init_hw_generic; mac->ops.reset_hw = NULL; - mac->ops.start_hw = &ixgbe_start_hw_generic; - mac->ops.clear_hw_cntrs = &ixgbe_clear_hw_cntrs_generic; + mac->ops.start_hw = ixgbe_start_hw_generic; + mac->ops.clear_hw_cntrs = ixgbe_clear_hw_cntrs_generic; mac->ops.get_media_type = NULL; mac->ops.get_supported_physical_layer = NULL; - mac->ops.enable_rx_dma = &ixgbe_enable_rx_dma_generic; - mac->ops.get_mac_addr = &ixgbe_get_mac_addr_generic; - mac->ops.stop_adapter = &ixgbe_stop_adapter_generic; - mac->ops.get_bus_info = &ixgbe_get_bus_info_generic; - mac->ops.set_lan_id = &ixgbe_set_lan_id_multi_port_pcie; - mac->ops.acquire_swfw_sync = &ixgbe_acquire_swfw_sync; - mac->ops.release_swfw_sync = &ixgbe_release_swfw_sync; + mac->ops.enable_rx_dma = ixgbe_enable_rx_dma_generic; + mac->ops.get_mac_addr = ixgbe_get_mac_addr_generic; + mac->ops.stop_adapter = ixgbe_stop_adapter_generic; + mac->ops.get_bus_info = ixgbe_get_bus_info_generic; + mac->ops.set_lan_id = ixgbe_set_lan_id_multi_port_pcie; + mac->ops.acquire_swfw_sync = ixgbe_acquire_swfw_sync; + mac->ops.release_swfw_sync = ixgbe_release_swfw_sync; + mac->ops.prot_autoc_read = prot_autoc_read_generic; + mac->ops.prot_autoc_write = prot_autoc_write_generic; /* LEDs */ - mac->ops.led_on = &ixgbe_led_on_generic; - mac->ops.led_off = &ixgbe_led_off_generic; - mac->ops.blink_led_start = &ixgbe_blink_led_start_generic; - mac->ops.blink_led_stop = &ixgbe_blink_led_stop_generic; + mac->ops.led_on = ixgbe_led_on_generic; + mac->ops.led_off = ixgbe_led_off_generic; + mac->ops.blink_led_start = ixgbe_blink_led_start_generic; + mac->ops.blink_led_stop = ixgbe_blink_led_stop_generic; /* RAR, Multicast, VLAN */ - mac->ops.set_rar = &ixgbe_set_rar_generic; - mac->ops.clear_rar = &ixgbe_clear_rar_generic; + mac->ops.set_rar = ixgbe_set_rar_generic; + mac->ops.clear_rar = ixgbe_clear_rar_generic; mac->ops.insert_mac_addr = NULL; mac->ops.set_vmdq = NULL; mac->ops.clear_vmdq = NULL; - mac->ops.init_rx_addrs = &ixgbe_init_rx_addrs_generic; - mac->ops.update_uc_addr_list = &ixgbe_update_uc_addr_list_generic; - mac->ops.update_mc_addr_list = &ixgbe_update_mc_addr_list_generic; - mac->ops.enable_mc = &ixgbe_enable_mc_generic; - mac->ops.disable_mc = &ixgbe_disable_mc_generic; + mac->ops.init_rx_addrs = ixgbe_init_rx_addrs_generic; + mac->ops.update_uc_addr_list = ixgbe_update_uc_addr_list_generic; + mac->ops.update_mc_addr_list = ixgbe_update_mc_addr_list_generic; + mac->ops.enable_mc = ixgbe_enable_mc_generic; + mac->ops.disable_mc = ixgbe_disable_mc_generic; mac->ops.clear_vfta = NULL; mac->ops.set_vfta = NULL; mac->ops.set_vlvf = NULL; mac->ops.init_uta_tables = NULL; + mac->ops.enable_rx = ixgbe_enable_rx_generic; + mac->ops.disable_rx = ixgbe_disable_rx_generic; /* Flow Control */ - mac->ops.fc_enable = &ixgbe_fc_enable_generic; + mac->ops.fc_enable = ixgbe_fc_enable_generic; + mac->ops.setup_fc = ixgbe_setup_fc_generic; /* Link */ mac->ops.get_link_capabilities = NULL; @@ -162,6 +167,7 @@ bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw) switch (hw->phy.media_type) { case ixgbe_media_type_fiber_fixed: + case ixgbe_media_type_fiber_qsfp: case ixgbe_media_type_fiber: hw->mac.ops.check_link(hw, &speed, &link_up, FALSE); /* if link is down, assume supported */ @@ -179,7 +185,10 @@ bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw) switch (hw->device_id) { case IXGBE_DEV_ID_82599_T3_LOM: case IXGBE_DEV_ID_X540T: + case IXGBE_DEV_ID_X540T1: case IXGBE_DEV_ID_X540_BYPASS: + case IXGBE_DEV_ID_X550T: + case IXGBE_DEV_ID_X550EM_X_10G_T: supported = TRUE; break; default: @@ -196,24 +205,21 @@ bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw) } /** - * ixgbe_setup_fc - Set up flow control + * ixgbe_setup_fc_generic - Set up flow control * @hw: pointer to hardware structure * * Called at init time to set up flow control. **/ -static s32 ixgbe_setup_fc(struct ixgbe_hw *hw) +s32 ixgbe_setup_fc_generic(struct ixgbe_hw *hw) { s32 ret_val = IXGBE_SUCCESS; u32 reg = 0, reg_bp = 0; u16 reg_cu = 0; - bool got_lock = FALSE; + bool locked = FALSE; - DEBUGFUNC("ixgbe_setup_fc"); + DEBUGFUNC("ixgbe_setup_fc_generic"); - /* - * Validate the requested mode. Strict IEEE mode does not allow - * ixgbe_fc_rx_pause because it will cause us to fail at UNH. - */ + /* Validate the requested mode */ if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) { ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED, "ixgbe_fc_rx_pause not valid in strict IEEE mode\n"); @@ -234,11 +240,18 @@ static s32 ixgbe_setup_fc(struct ixgbe_hw *hw) * we link at 10G, the 1G advertisement is harmless and vice versa. */ switch (hw->phy.media_type) { + case ixgbe_media_type_backplane: + /* some MAC's need RMW protection on AUTOC */ + ret_val = hw->mac.ops.prot_autoc_read(hw, &locked, ®_bp); + if (ret_val != IXGBE_SUCCESS) + goto out; + + /* only backplane uses autoc so fall though */ case ixgbe_media_type_fiber_fixed: + case ixgbe_media_type_fiber_qsfp: case ixgbe_media_type_fiber: - case ixgbe_media_type_backplane: reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA); - reg_bp = IXGBE_READ_REG(hw, IXGBE_AUTOC); + break; case ixgbe_media_type_copper: hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_ADVT, @@ -310,7 +323,7 @@ static s32 ixgbe_setup_fc(struct ixgbe_hw *hw) break; } - if (hw->mac.type != ixgbe_mac_X540) { + if (hw->mac.type < ixgbe_mac_X540) { /* * Enable auto-negotiation between the MAC & PHY; * the MAC will advertise clause 37 flow control. @@ -333,35 +346,16 @@ static s32 ixgbe_setup_fc(struct ixgbe_hw *hw) */ if (hw->phy.media_type == ixgbe_media_type_backplane) { reg_bp |= IXGBE_AUTOC_AN_RESTART; - /* Need the SW/FW semaphore around AUTOC writes if 82599 and - * LESM is on, likewise reset_pipeline requries the lock as - * it also writes AUTOC. - */ - if ((hw->mac.type == ixgbe_mac_82599EB) && - ixgbe_verify_lesm_fw_enabled_82599(hw)) { - ret_val = hw->mac.ops.acquire_swfw_sync(hw, - IXGBE_GSSR_MAC_CSR_SM); - if (ret_val != IXGBE_SUCCESS) { - ret_val = IXGBE_ERR_SWFW_SYNC; - goto out; - } - got_lock = TRUE; - } - - IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg_bp); - if (hw->mac.type == ixgbe_mac_82599EB) - ixgbe_reset_pipeline_82599(hw); - - if (got_lock) - hw->mac.ops.release_swfw_sync(hw, - IXGBE_GSSR_MAC_CSR_SM); + ret_val = hw->mac.ops.prot_autoc_write(hw, reg_bp, locked); + if (ret_val) + goto out; } else if ((hw->phy.media_type == ixgbe_media_type_copper) && (ixgbe_device_supports_autoneg_fc(hw))) { hw->phy.ops.write_reg(hw, IXGBE_MDIO_AUTO_NEG_ADVT, IXGBE_MDIO_AUTO_NEG_DEV_TYPE, reg_cu); } - DEBUGOUT1("Set up FC; IXGBE_AUTOC = 0x%08X\n", reg); + DEBUGOUT1("Set up FC; PCS1GLCTL = 0x%08X\n", reg); out: return ret_val; } @@ -575,7 +569,7 @@ s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw) } } - if (hw->mac.type == ixgbe_mac_X540) { + if (hw->mac.type == ixgbe_mac_X550 || hw->mac.type == ixgbe_mac_X540) { if (hw->phy.id == 0) ixgbe_identify_phy(hw); hw->phy.ops.read_reg(hw, IXGBE_PCRC8ECL, @@ -793,7 +787,7 @@ s32 ixgbe_read_pba_raw(struct ixgbe_hw *hw, u16 *eeprom_buf, return ret_val; } else { if (eeprom_buf_size > (u32)(pba->word[1] + - pba->pba_block[0])) { + pba_block_size)) { memcpy(pba->pba_block, &eeprom_buf[pba->word[1]], pba_block_size * sizeof(u16)); @@ -965,7 +959,8 @@ void ixgbe_set_pci_config_data_generic(struct ixgbe_hw *hw, u16 link_status) { struct ixgbe_mac_info *mac = &hw->mac; - hw->bus.type = ixgbe_bus_type_pci_express; + if (hw->bus.type == ixgbe_bus_type_unknown) + hw->bus.type = ixgbe_bus_type_pci_express; switch (link_status & IXGBE_PCI_LINK_WIDTH) { case IXGBE_PCI_LINK_WIDTH_1: @@ -1071,7 +1066,7 @@ s32 ixgbe_stop_adapter_generic(struct ixgbe_hw *hw) hw->adapter_stopped = TRUE; /* Disable the receive unit */ - IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, 0); + ixgbe_disable_rx(hw); /* Clear interrupt mask to stop interrupts from being generated */ IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK); @@ -1096,7 +1091,7 @@ s32 ixgbe_stop_adapter_generic(struct ixgbe_hw *hw) msec_delay(2); /* - * Prevent the PCI-E bus from from hanging by disabling PCI-E master + * Prevent the PCI-E bus from hanging by disabling PCI-E master * access and verify no pending requests */ return ixgbe_disable_pcie_master(hw); @@ -2107,8 +2102,10 @@ static void ixgbe_release_eeprom(struct ixgbe_hw *hw) /** * ixgbe_calc_eeprom_checksum_generic - Calculates and returns the checksum * @hw: pointer to hardware structure + * + * Returns a negative error code on error, or the 16-bit checksum **/ -u16 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw) +s32 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw) { u16 i; u16 j; @@ -2121,33 +2118,44 @@ u16 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw) /* Include 0x0-0x3F in the checksum */ for (i = 0; i < IXGBE_EEPROM_CHECKSUM; i++) { - if (hw->eeprom.ops.read(hw, i, &word) != IXGBE_SUCCESS) { + if (hw->eeprom.ops.read(hw, i, &word)) { DEBUGOUT("EEPROM read failed\n"); - break; + return IXGBE_ERR_EEPROM; } checksum += word; } /* Include all data from pointers except for the fw pointer */ for (i = IXGBE_PCIE_ANALOG_PTR; i < IXGBE_FW_PTR; i++) { - hw->eeprom.ops.read(hw, i, &pointer); + if (hw->eeprom.ops.read(hw, i, &pointer)) { + DEBUGOUT("EEPROM read failed\n"); + return IXGBE_ERR_EEPROM; + } + + /* If the pointer seems invalid */ + if (pointer == 0xFFFF || pointer == 0) + continue; + + if (hw->eeprom.ops.read(hw, pointer, &length)) { + DEBUGOUT("EEPROM read failed\n"); + return IXGBE_ERR_EEPROM; + } - /* Make sure the pointer seems valid */ - if (pointer != 0xFFFF && pointer != 0) { - hw->eeprom.ops.read(hw, pointer, &length); + if (length == 0xFFFF || length == 0) + continue; - if (length != 0xFFFF && length != 0) { - for (j = pointer+1; j <= pointer+length; j++) { - hw->eeprom.ops.read(hw, j, &word); - checksum += word; - } + for (j = pointer + 1; j <= pointer + length; j++) { + if (hw->eeprom.ops.read(hw, j, &word)) { + DEBUGOUT("EEPROM read failed\n"); + return IXGBE_ERR_EEPROM; } + checksum += word; } } checksum = (u16)IXGBE_EEPROM_SUM - checksum; - return checksum; + return (s32)checksum; } /** @@ -2167,32 +2175,38 @@ s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw, DEBUGFUNC("ixgbe_validate_eeprom_checksum_generic"); - /* - * Read the first word from the EEPROM. If this times out or fails, do + /* Read the first word from the EEPROM. If this times out or fails, do * not continue or we could be in for a very long wait while every * EEPROM read fails */ status = hw->eeprom.ops.read(hw, 0, &checksum); + if (status) { + DEBUGOUT("EEPROM read failed\n"); + return status; + } - if (status == IXGBE_SUCCESS) { - checksum = hw->eeprom.ops.calc_checksum(hw); + status = hw->eeprom.ops.calc_checksum(hw); + if (status < 0) + return status; - hw->eeprom.ops.read(hw, IXGBE_EEPROM_CHECKSUM, &read_checksum); + checksum = (u16)(status & 0xffff); - /* - * Verify read checksum from EEPROM is the same as - * calculated checksum - */ - if (read_checksum != checksum) - status = IXGBE_ERR_EEPROM_CHECKSUM; - - /* If the user cares, return the calculated checksum */ - if (checksum_val) - *checksum_val = checksum; - } else { + status = hw->eeprom.ops.read(hw, IXGBE_EEPROM_CHECKSUM, &read_checksum); + if (status) { DEBUGOUT("EEPROM read failed\n"); + return status; } + /* Verify read checksum from EEPROM is the same as + * calculated checksum + */ + if (read_checksum != checksum) + status = IXGBE_ERR_EEPROM_CHECKSUM; + + /* If the user cares, return the calculated checksum */ + if (checksum_val) + *checksum_val = checksum; + return status; } @@ -2207,21 +2221,24 @@ s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw) DEBUGFUNC("ixgbe_update_eeprom_checksum_generic"); - /* - * Read the first word from the EEPROM. If this times out or fails, do + /* Read the first word from the EEPROM. If this times out or fails, do * not continue or we could be in for a very long wait while every * EEPROM read fails */ status = hw->eeprom.ops.read(hw, 0, &checksum); - - if (status == IXGBE_SUCCESS) { - checksum = hw->eeprom.ops.calc_checksum(hw); - status = hw->eeprom.ops.write(hw, IXGBE_EEPROM_CHECKSUM, - checksum); - } else { + if (status) { DEBUGOUT("EEPROM read failed\n"); + return status; } + status = hw->eeprom.ops.calc_checksum(hw); + if (status < 0) + return status; + + checksum = (u16)(status & 0xffff); + + status = hw->eeprom.ops.write(hw, IXGBE_EEPROM_CHECKSUM, checksum); + return status; } @@ -2793,10 +2810,11 @@ s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw) /* * In order to prevent Tx hangs when the internal Tx * switch is enabled we must set the high water mark - * to the maximum FCRTH value. This allows the Tx - * switch to function even under heavy Rx workloads. + * to the Rx packet buffer size - 24KB. This allows + * the Tx switch to function even under heavy Rx + * workloads. */ - fcrth = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)) - 32; + fcrth = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)) - 24576; } IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(i), fcrth); @@ -2888,8 +2906,7 @@ static s32 ixgbe_fc_autoneg_fiber(struct ixgbe_hw *hw) linkstat = IXGBE_READ_REG(hw, IXGBE_PCS1GLSTA); if ((!!(linkstat & IXGBE_PCS1GLSTA_AN_COMPLETE) == 0) || (!!(linkstat & IXGBE_PCS1GLSTA_AN_TIMED_OUT) == 1)) { - ERROR_REPORT1(IXGBE_ERROR_POLLING, - "Auto-Negotiation did not complete or timed out"); + DEBUGOUT("Auto-Negotiation did not complete or timed out\n"); goto out; } @@ -2924,16 +2941,14 @@ static s32 ixgbe_fc_autoneg_backplane(struct ixgbe_hw *hw) */ links = IXGBE_READ_REG(hw, IXGBE_LINKS); if ((links & IXGBE_LINKS_KX_AN_COMP) == 0) { - ERROR_REPORT1(IXGBE_ERROR_POLLING, - "Auto-Negotiation did not complete"); + DEBUGOUT("Auto-Negotiation did not complete\n"); goto out; } if (hw->mac.type == ixgbe_mac_82599EB) { links2 = IXGBE_READ_REG(hw, IXGBE_LINKS2); if ((links2 & IXGBE_LINKS2_AN_SUPPORTED) == 0) { - ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED, - "Link partner is not AN enabled"); + DEBUGOUT("Link partner is not AN enabled\n"); goto out; } } @@ -3012,6 +3027,7 @@ void ixgbe_fc_autoneg(struct ixgbe_hw *hw) switch (hw->phy.media_type) { /* Autoneg flow control on fiber adapters */ case ixgbe_media_type_fiber_fixed: + case ixgbe_media_type_fiber_qsfp: case ixgbe_media_type_fiber: if (speed == IXGBE_LINK_SPEED_1GB_FULL) ret_val = ixgbe_fc_autoneg_fiber(hw); @@ -3101,6 +3117,7 @@ s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw) { s32 status = IXGBE_SUCCESS; u32 i, poll; + u16 value; DEBUGFUNC("ixgbe_disable_pcie_master"); @@ -3108,7 +3125,8 @@ s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw) IXGBE_WRITE_REG(hw, IXGBE_CTRL, IXGBE_CTRL_GIO_DIS); /* Exit if master requests are blocked */ - if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO)) + if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO) || + IXGBE_REMOVED(hw->hw_addr)) goto out; /* Poll for master request bit to clear */ @@ -3136,8 +3154,10 @@ s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw) poll = ixgbe_pcie_timeout_poll(hw); for (i = 0; i < poll; i++) { usec_delay(100); - if (!(IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_DEVICE_STATUS) & - IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING)) + value = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_DEVICE_STATUS); + if (IXGBE_REMOVED(hw->hw_addr)) + goto out; + if (!(value & IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING)) goto out; } @@ -3157,7 +3177,7 @@ out: * Acquires the SWFW semaphore through the GSSR register for the specified * function (CSR, PHY0, PHY1, EEPROM, Flash) **/ -s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask) +s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u32 mask) { u32 gssr = 0; u32 swmask = mask; @@ -3204,7 +3224,7 @@ s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask) * Releases the SWFW semaphore through the GSSR register for the specified * function (CSR, PHY0, PHY1, EEPROM, Flash) **/ -void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u16 mask) +void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u32 mask) { u32 gssr; u32 swmask = mask; @@ -3258,6 +3278,37 @@ s32 ixgbe_disable_sec_rx_path_generic(struct ixgbe_hw *hw) } /** + * prot_autoc_read_generic - Hides MAC differences needed for AUTOC read + * @hw: pointer to hardware structure + * @reg_val: Value we read from AUTOC + * + * The default case requires no protection so just to the register read. + */ +s32 prot_autoc_read_generic(struct ixgbe_hw *hw, bool *locked, u32 *reg_val) +{ + *locked = FALSE; + *reg_val = IXGBE_READ_REG(hw, IXGBE_AUTOC); + return IXGBE_SUCCESS; +} + +/** + * prot_autoc_write_generic - Hides MAC differences needed for AUTOC write + * @hw: pointer to hardware structure + * @reg_val: value to write to AUTOC + * @locked: bool to indicate whether the SW/FW lock was already taken by + * previous read. + * + * The default case requires no protection so just to the register write. + */ +s32 prot_autoc_write_generic(struct ixgbe_hw *hw, u32 reg_val, bool locked) +{ + UNREFERENCED_1PARAMETER(locked); + + IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg_val); + return IXGBE_SUCCESS; +} + +/** * ixgbe_enable_sec_rx_path_generic - Enables the receive data path * @hw: pointer to hardware structure * @@ -3288,7 +3339,10 @@ s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval) { DEBUGFUNC("ixgbe_enable_rx_dma_generic"); - IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, regval); + if (regval & IXGBE_RXCTRL_RXEN) + ixgbe_enable_rx(hw); + else + ixgbe_disable_rx(hw); return IXGBE_SUCCESS; } @@ -3302,9 +3356,10 @@ s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index) { ixgbe_link_speed speed = 0; bool link_up = 0; - u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); + u32 autoc_reg = 0; u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); s32 ret_val = IXGBE_SUCCESS; + bool locked = FALSE; DEBUGFUNC("ixgbe_blink_led_start_generic"); @@ -3315,29 +3370,18 @@ s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index) hw->mac.ops.check_link(hw, &speed, &link_up, FALSE); if (!link_up) { - /* Need the SW/FW semaphore around AUTOC writes if 82599 and - * LESM is on. - */ - bool got_lock = FALSE; - if ((hw->mac.type == ixgbe_mac_82599EB) && - ixgbe_verify_lesm_fw_enabled_82599(hw)) { - ret_val = hw->mac.ops.acquire_swfw_sync(hw, - IXGBE_GSSR_MAC_CSR_SM); - if (ret_val != IXGBE_SUCCESS) { - ret_val = IXGBE_ERR_SWFW_SYNC; - goto out; - } - got_lock = TRUE; - } + ret_val = hw->mac.ops.prot_autoc_read(hw, &locked, &autoc_reg); + if (ret_val != IXGBE_SUCCESS) + goto out; autoc_reg |= IXGBE_AUTOC_AN_RESTART; autoc_reg |= IXGBE_AUTOC_FLU; - IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg); - IXGBE_WRITE_FLUSH(hw); - if (got_lock) - hw->mac.ops.release_swfw_sync(hw, - IXGBE_GSSR_MAC_CSR_SM); + ret_val = hw->mac.ops.prot_autoc_write(hw, autoc_reg, locked); + if (ret_val != IXGBE_SUCCESS) + goto out; + + IXGBE_WRITE_FLUSH(hw); msec_delay(10); } @@ -3357,36 +3401,23 @@ out: **/ s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index) { - u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); + u32 autoc_reg = 0; u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); s32 ret_val = IXGBE_SUCCESS; - bool got_lock = FALSE; + bool locked = FALSE; DEBUGFUNC("ixgbe_blink_led_stop_generic"); - /* Need the SW/FW semaphore around AUTOC writes if 82599 and - * LESM is on. - */ - if ((hw->mac.type == ixgbe_mac_82599EB) && - ixgbe_verify_lesm_fw_enabled_82599(hw)) { - ret_val = hw->mac.ops.acquire_swfw_sync(hw, - IXGBE_GSSR_MAC_CSR_SM); - if (ret_val != IXGBE_SUCCESS) { - ret_val = IXGBE_ERR_SWFW_SYNC; - goto out; - } - got_lock = TRUE; - } + ret_val = hw->mac.ops.prot_autoc_read(hw, &locked, &autoc_reg); + if (ret_val != IXGBE_SUCCESS) + goto out; autoc_reg &= ~IXGBE_AUTOC_FLU; autoc_reg |= IXGBE_AUTOC_AN_RESTART; - IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg); - - if (hw->mac.type == ixgbe_mac_82599EB) - ixgbe_reset_pipeline_82599(hw); - if (got_lock) - hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM); + ret_val = hw->mac.ops.prot_autoc_write(hw, autoc_reg, locked); + if (ret_val != IXGBE_SUCCESS) + goto out; led_reg &= ~IXGBE_LED_MODE_MASK(index); led_reg &= ~IXGBE_LED_BLINK(index); @@ -3541,6 +3572,8 @@ u16 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw) break; case ixgbe_mac_82599EB: case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: pcie_offset = IXGBE_PCIE_MSIX_82599_CAPS; max_msix_count = IXGBE_MAX_MSIX_VECTORS_82599; break; @@ -3550,6 +3583,8 @@ u16 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw) DEBUGFUNC("ixgbe_get_pcie_msix_count_generic"); msix_count = IXGBE_READ_PCIE_WORD(hw, pcie_offset); + if (IXGBE_REMOVED(hw->hw_addr)) + msix_count = 0; msix_count &= IXGBE_PCIE_MSIX_TBL_SZ_MASK; /* MSI-X count is zero-based in HW */ @@ -3653,6 +3688,9 @@ s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq) mpsar_lo = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar)); mpsar_hi = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar)); + if (IXGBE_REMOVED(hw->hw_addr)) + goto done; + if (!mpsar_lo && !mpsar_hi) goto done; @@ -4048,17 +4086,27 @@ s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed, *link_up = FALSE; } - if ((links_reg & IXGBE_LINKS_SPEED_82599) == - IXGBE_LINKS_SPEED_10G_82599) + switch (links_reg & IXGBE_LINKS_SPEED_82599) { + case IXGBE_LINKS_SPEED_10G_82599: *speed = IXGBE_LINK_SPEED_10GB_FULL; - else if ((links_reg & IXGBE_LINKS_SPEED_82599) == - IXGBE_LINKS_SPEED_1G_82599) + if (hw->mac.type >= ixgbe_mac_X550) { + if (links_reg & IXGBE_LINKS_SPEED_NON_STD) + *speed = IXGBE_LINK_SPEED_2_5GB_FULL; + } + break; + case IXGBE_LINKS_SPEED_1G_82599: *speed = IXGBE_LINK_SPEED_1GB_FULL; - else if ((links_reg & IXGBE_LINKS_SPEED_82599) == - IXGBE_LINKS_SPEED_100_82599) + break; + case IXGBE_LINKS_SPEED_100_82599: *speed = IXGBE_LINK_SPEED_100_FULL; - else + if (hw->mac.type >= ixgbe_mac_X550) { + if (links_reg & IXGBE_LINKS_SPEED_NON_STD) + *speed = IXGBE_LINK_SPEED_5GB_FULL; + } + break; + default: *speed = IXGBE_LINK_SPEED_UNKNOWN; + } return IXGBE_SUCCESS; } @@ -4216,7 +4264,7 @@ void ixgbe_set_mac_anti_spoofing(struct ixgbe_hw *hw, bool enable, int pf) * ixgbe_set_vlan_anti_spoofing - Enable/Disable VLAN anti-spoofing * @hw: pointer to hardware structure * @enable: enable or disable switch for VLAN anti-spoofing - * @pf: Virtual Function pool - VF Pool to set for VLAN anti-spoofing + * @vf: Virtual Function pool - VF Pool to set for VLAN anti-spoofing * **/ void ixgbe_set_vlan_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf) @@ -4310,41 +4358,51 @@ u8 ixgbe_calculate_checksum(u8 *buffer, u32 length) * @buffer: contains the command to write and where the return status will * be placed * @length: length of buffer, must be multiple of 4 bytes + * @timeout: time in ms to wait for command completion + * @return_data: read and return data from the buffer (TRUE) or not (FALSE) + * Needed because FW structures are big endian and decoding of + * these fields can be 8 bit or 16 bit based on command. Decoding + * is not easily understood without making a table of commands. + * So we will leave this up to the caller to read back the data + * in these cases. * * Communicates with the manageability block. On success return IXGBE_SUCCESS * else return IXGBE_ERR_HOST_INTERFACE_COMMAND. **/ s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer, - u32 length) + u32 length, u32 timeout, bool return_data) { - u32 hicr, i, bi; + u32 hicr, i, bi, fwsts; u32 hdr_size = sizeof(struct ixgbe_hic_hdr); - u8 buf_len, dword_len; - - s32 ret_val = IXGBE_SUCCESS; + u16 buf_len; + u16 dword_len; DEBUGFUNC("ixgbe_host_interface_command"); - if (length == 0 || length & 0x3 || - length > IXGBE_HI_MAX_BLOCK_BYTE_LENGTH) { - DEBUGOUT("Buffer length failure.\n"); - ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND; - goto out; + if (length == 0 || length > IXGBE_HI_MAX_BLOCK_BYTE_LENGTH) { + DEBUGOUT1("Buffer length failure buffersize=%d.\n", length); + return IXGBE_ERR_HOST_INTERFACE_COMMAND; } + /* Set bit 9 of FWSTS clearing FW reset indication */ + fwsts = IXGBE_READ_REG(hw, IXGBE_FWSTS); + IXGBE_WRITE_REG(hw, IXGBE_FWSTS, fwsts | IXGBE_FWSTS_FWRI); /* Check that the host interface is enabled. */ hicr = IXGBE_READ_REG(hw, IXGBE_HICR); if ((hicr & IXGBE_HICR_EN) == 0) { DEBUGOUT("IXGBE_HOST_EN bit disabled.\n"); - ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND; - goto out; + return IXGBE_ERR_HOST_INTERFACE_COMMAND; + } + + /* Calculate length in DWORDs. We must be DWORD aligned */ + if ((length % (sizeof(u32))) != 0) { + DEBUGOUT("Buffer length failure, not aligned to dword"); + return IXGBE_ERR_INVALID_ARGUMENT; } - /* Calculate length in DWORDs */ dword_len = length >> 2; - /* - * The device driver writes the relevant command block + /* The device driver writes the relevant command block * into the ram area. */ for (i = 0; i < dword_len; i++) @@ -4354,21 +4412,24 @@ s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer, /* Setting this bit tells the ARC that a new command is pending. */ IXGBE_WRITE_REG(hw, IXGBE_HICR, hicr | IXGBE_HICR_C); - for (i = 0; i < IXGBE_HI_COMMAND_TIMEOUT; i++) { + for (i = 0; i < timeout; i++) { hicr = IXGBE_READ_REG(hw, IXGBE_HICR); if (!(hicr & IXGBE_HICR_C)) break; msec_delay(1); } - /* Check command successful completion. */ - if (i == IXGBE_HI_COMMAND_TIMEOUT || - (!(IXGBE_READ_REG(hw, IXGBE_HICR) & IXGBE_HICR_SV))) { - DEBUGOUT("Command has failed with no status valid.\n"); - ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND; - goto out; + /* Check command completion */ + if ((timeout != 0 && i == timeout) || + !(IXGBE_READ_REG(hw, IXGBE_HICR) & IXGBE_HICR_SV)) { + ERROR_REPORT1(IXGBE_ERROR_CAUTION, + "Command has failed with no status valid.\n"); + return IXGBE_ERR_HOST_INTERFACE_COMMAND; } + if (!return_data) + return 0; + /* Calculate length in DWORDs */ dword_len = hdr_size >> 2; @@ -4381,25 +4442,23 @@ s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer, /* If there is any thing in data position pull it in */ buf_len = ((struct ixgbe_hic_hdr *)buffer)->buf_len; if (buf_len == 0) - goto out; + return 0; - if (length < (buf_len + hdr_size)) { + if (length < buf_len + hdr_size) { DEBUGOUT("Buffer not large enough for reply message.\n"); - ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND; - goto out; + return IXGBE_ERR_HOST_INTERFACE_COMMAND; } /* Calculate length in DWORDs, add 3 for odd lengths */ dword_len = (buf_len + 3) >> 2; - /* Pull in the rest of the buffer (bi is where we left off)*/ + /* Pull in the rest of the buffer (bi is where we left off) */ for (; bi <= dword_len; bi++) { buffer[bi] = IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, bi); IXGBE_LE32_TO_CPUS(&buffer[bi]); } -out: - return ret_val; + return 0; } /** @@ -4446,7 +4505,9 @@ s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min, for (i = 0; i <= FW_CEM_MAX_RETRIES; i++) { ret_val = ixgbe_host_interface_command(hw, (u32 *)&fw_cmd, - sizeof(fw_cmd)); + sizeof(fw_cmd), + IXGBE_HI_COMMAND_TIMEOUT, + TRUE); if (ret_val != IXGBE_SUCCESS) continue; @@ -4533,7 +4594,8 @@ void ixgbe_set_rxpba_generic(struct ixgbe_hw *hw, int num_pb, u32 headroom, **/ void ixgbe_clear_tx_pending(struct ixgbe_hw *hw) { - u32 gcr_ext, hlreg0; + u32 gcr_ext, hlreg0, i, poll; + u16 value; /* * If double reset is not requested then all transactions should @@ -4550,6 +4612,25 @@ void ixgbe_clear_tx_pending(struct ixgbe_hw *hw) hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0); IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0 | IXGBE_HLREG0_LPBK); + /* Wait for a last completion before clearing buffers */ + IXGBE_WRITE_FLUSH(hw); + msec_delay(3); + + /* + * Before proceeding, make sure that the PCIe block does not have + * transactions pending. + */ + poll = ixgbe_pcie_timeout_poll(hw); + for (i = 0; i < poll; i++) { + usec_delay(100); + value = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_DEVICE_STATUS); + if (IXGBE_REMOVED(hw->hw_addr)) + goto out; + if (!(value & IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING)) + goto out; + } + +out: /* initiate cleaning flow for buffers in the PCIe transaction layer */ gcr_ext = IXGBE_READ_REG(hw, IXGBE_GCR_EXT); IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, @@ -4582,3 +4663,316 @@ void ixgbe_dcb_get_rtrup2tc_generic(struct ixgbe_hw *hw, u8 *map) (reg >> (i * IXGBE_RTRUP2TC_UP_SHIFT)); return; } + +void ixgbe_disable_rx_generic(struct ixgbe_hw *hw) +{ + u32 pfdtxgswc; + u32 rxctrl; + + rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); + if (rxctrl & IXGBE_RXCTRL_RXEN) { + if (hw->mac.type != ixgbe_mac_82598EB) { + pfdtxgswc = IXGBE_READ_REG(hw, IXGBE_PFDTXGSWC); + if (pfdtxgswc & IXGBE_PFDTXGSWC_VT_LBEN) { + pfdtxgswc &= ~IXGBE_PFDTXGSWC_VT_LBEN; + IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, pfdtxgswc); + hw->mac.set_lben = TRUE; + } else { + hw->mac.set_lben = FALSE; + } + } + rxctrl &= ~IXGBE_RXCTRL_RXEN; + IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl); + } +} + +void ixgbe_enable_rx_generic(struct ixgbe_hw *hw) +{ + u32 pfdtxgswc; + u32 rxctrl; + + rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); + IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, (rxctrl | IXGBE_RXCTRL_RXEN)); + + if (hw->mac.type != ixgbe_mac_82598EB) { + if (hw->mac.set_lben) { + pfdtxgswc = IXGBE_READ_REG(hw, IXGBE_PFDTXGSWC); + pfdtxgswc |= IXGBE_PFDTXGSWC_VT_LBEN; + IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, pfdtxgswc); + hw->mac.set_lben = FALSE; + } + } +} + +/** + * ixgbe_mng_present - returns TRUE when management capability is present + * @hw: pointer to hardware structure + */ +bool ixgbe_mng_present(struct ixgbe_hw *hw) +{ + u32 fwsm; + + if (hw->mac.type < ixgbe_mac_82599EB) + return FALSE; + + fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM); + fwsm &= IXGBE_FWSM_MODE_MASK; + return fwsm == IXGBE_FWSM_FW_MODE_PT; +} + +/** + * ixgbe_mng_enabled - Is the manageability engine enabled? + * @hw: pointer to hardware structure + * + * Returns TRUE if the manageability engine is enabled. + **/ +bool ixgbe_mng_enabled(struct ixgbe_hw *hw) +{ + u32 fwsm, manc, factps; + + fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM); + if ((fwsm & IXGBE_FWSM_MODE_MASK) != IXGBE_FWSM_FW_MODE_PT) + return FALSE; + + manc = IXGBE_READ_REG(hw, IXGBE_MANC); + if (!(manc & IXGBE_MANC_RCV_TCO_EN)) + return FALSE; + + if (hw->mac.type <= ixgbe_mac_X540) { + factps = IXGBE_READ_REG(hw, IXGBE_FACTPS); + if (factps & IXGBE_FACTPS_MNGCG) + return FALSE; + } + + return TRUE; +} + +/** + * ixgbe_setup_mac_link_multispeed_fiber - Set MAC link speed + * @hw: pointer to hardware structure + * @speed: new link speed + * @autoneg_wait_to_complete: TRUE when waiting for completion is needed + * + * Set the link speed in the MAC and/or PHY register and restarts link. + **/ +s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw, + ixgbe_link_speed speed, + bool autoneg_wait_to_complete) +{ + ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN; + ixgbe_link_speed highest_link_speed = IXGBE_LINK_SPEED_UNKNOWN; + s32 status = IXGBE_SUCCESS; + u32 speedcnt = 0; + u32 i = 0; + bool autoneg, link_up = FALSE; + + DEBUGFUNC("ixgbe_setup_mac_link_multispeed_fiber"); + + /* Mask off requested but non-supported speeds */ + status = ixgbe_get_link_capabilities(hw, &link_speed, &autoneg); + if (status != IXGBE_SUCCESS) + return status; + + speed &= link_speed; + + /* Try each speed one by one, highest priority first. We do this in + * software because 10Gb fiber doesn't support speed autonegotiation. + */ + if (speed & IXGBE_LINK_SPEED_10GB_FULL) { + speedcnt++; + highest_link_speed = IXGBE_LINK_SPEED_10GB_FULL; + + /* If we already have link at this speed, just jump out */ + status = ixgbe_check_link(hw, &link_speed, &link_up, FALSE); + if (status != IXGBE_SUCCESS) + return status; + + if ((link_speed == IXGBE_LINK_SPEED_10GB_FULL) && link_up) + goto out; + + /* Set the module link speed */ + switch (hw->phy.media_type) { + case ixgbe_media_type_fiber_fixed: + case ixgbe_media_type_fiber: + ixgbe_set_rate_select_speed(hw, + IXGBE_LINK_SPEED_10GB_FULL); + break; + case ixgbe_media_type_fiber_qsfp: + /* QSFP module automatically detects MAC link speed */ + break; + default: + DEBUGOUT("Unexpected media type.\n"); + break; + } + + /* Allow module to change analog characteristics (1G->10G) */ + msec_delay(40); + + status = ixgbe_setup_mac_link(hw, + IXGBE_LINK_SPEED_10GB_FULL, + autoneg_wait_to_complete); + if (status != IXGBE_SUCCESS) + return status; + + /* Flap the Tx laser if it has not already been done */ + ixgbe_flap_tx_laser(hw); + + /* Wait for the controller to acquire link. Per IEEE 802.3ap, + * Section 73.10.2, we may have to wait up to 500ms if KR is + * attempted. 82599 uses the same timing for 10g SFI. + */ + for (i = 0; i < 5; i++) { + /* Wait for the link partner to also set speed */ + msec_delay(100); + + /* If we have link, just jump out */ + status = ixgbe_check_link(hw, &link_speed, + &link_up, FALSE); + if (status != IXGBE_SUCCESS) + return status; + + if (link_up) + goto out; + } + } + + if (speed & IXGBE_LINK_SPEED_1GB_FULL) { + speedcnt++; + if (highest_link_speed == IXGBE_LINK_SPEED_UNKNOWN) + highest_link_speed = IXGBE_LINK_SPEED_1GB_FULL; + + /* If we already have link at this speed, just jump out */ + status = ixgbe_check_link(hw, &link_speed, &link_up, FALSE); + if (status != IXGBE_SUCCESS) + return status; + + if ((link_speed == IXGBE_LINK_SPEED_1GB_FULL) && link_up) + goto out; + + /* Set the module link speed */ + switch (hw->phy.media_type) { + case ixgbe_media_type_fiber_fixed: + case ixgbe_media_type_fiber: + ixgbe_set_rate_select_speed(hw, + IXGBE_LINK_SPEED_1GB_FULL); + break; + case ixgbe_media_type_fiber_qsfp: + /* QSFP module automatically detects link speed */ + break; + default: + DEBUGOUT("Unexpected media type.\n"); + break; + } + + /* Allow module to change analog characteristics (10G->1G) */ + msec_delay(40); + + status = ixgbe_setup_mac_link(hw, + IXGBE_LINK_SPEED_1GB_FULL, + autoneg_wait_to_complete); + if (status != IXGBE_SUCCESS) + return status; + + /* Flap the Tx laser if it has not already been done */ + ixgbe_flap_tx_laser(hw); + + /* Wait for the link partner to also set speed */ + msec_delay(100); + + /* If we have link, just jump out */ + status = ixgbe_check_link(hw, &link_speed, &link_up, FALSE); + if (status != IXGBE_SUCCESS) + return status; + + if (link_up) + goto out; + } + + /* We didn't get link. Configure back to the highest speed we tried, + * (if there was more than one). We call ourselves back with just the + * single highest speed that the user requested. + */ + if (speedcnt > 1) + status = ixgbe_setup_mac_link_multispeed_fiber(hw, + highest_link_speed, + autoneg_wait_to_complete); + +out: + /* Set autoneg_advertised value based on input link speed */ + hw->phy.autoneg_advertised = 0; + + if (speed & IXGBE_LINK_SPEED_10GB_FULL) + hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL; + + if (speed & IXGBE_LINK_SPEED_1GB_FULL) + hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL; + + return status; +} + +/** + * ixgbe_set_soft_rate_select_speed - Set module link speed + * @hw: pointer to hardware structure + * @speed: link speed to set + * + * Set module link speed via the soft rate select. + */ +void ixgbe_set_soft_rate_select_speed(struct ixgbe_hw *hw, + ixgbe_link_speed speed) +{ + s32 status; + u8 rs, eeprom_data; + + switch (speed) { + case IXGBE_LINK_SPEED_10GB_FULL: + /* one bit mask same as setting on */ + rs = IXGBE_SFF_SOFT_RS_SELECT_10G; + break; + case IXGBE_LINK_SPEED_1GB_FULL: + rs = IXGBE_SFF_SOFT_RS_SELECT_1G; + break; + default: + DEBUGOUT("Invalid fixed module speed\n"); + return; + } + + /* Set RS0 */ + status = hw->phy.ops.read_i2c_byte(hw, IXGBE_SFF_SFF_8472_OSCB, + IXGBE_I2C_EEPROM_DEV_ADDR2, + &eeprom_data); + if (status) { + DEBUGOUT("Failed to read Rx Rate Select RS0\n"); + goto out; + } + + eeprom_data = (eeprom_data & ~IXGBE_SFF_SOFT_RS_SELECT_MASK) | rs; + + status = hw->phy.ops.write_i2c_byte(hw, IXGBE_SFF_SFF_8472_OSCB, + IXGBE_I2C_EEPROM_DEV_ADDR2, + eeprom_data); + if (status) { + DEBUGOUT("Failed to write Rx Rate Select RS0\n"); + goto out; + } + + /* Set RS1 */ + status = hw->phy.ops.read_i2c_byte(hw, IXGBE_SFF_SFF_8472_ESCB, + IXGBE_I2C_EEPROM_DEV_ADDR2, + &eeprom_data); + if (status) { + DEBUGOUT("Failed to read Rx Rate Select RS1\n"); + goto out; + } + + eeprom_data = (eeprom_data & ~IXGBE_SFF_SOFT_RS_SELECT_MASK) | rs; + + status = hw->phy.ops.write_i2c_byte(hw, IXGBE_SFF_SFF_8472_ESCB, + IXGBE_I2C_EEPROM_DEV_ADDR2, + eeprom_data); + if (status) { + DEBUGOUT("Failed to write Rx Rate Select RS1\n"); + goto out; + } +out: + return; +} diff --git a/sys/dev/ixgbe/ixgbe_common.h b/sys/dev/ixgbe/ixgbe_common.h index 7d1ee82..e685f5b 100644 --- a/sys/dev/ixgbe/ixgbe_common.h +++ b/sys/dev/ixgbe/ixgbe_common.h @@ -1,6 +1,6 @@ /****************************************************************************** - Copyright (c) 2001-2013, Intel Corporation + Copyright (c) 2001-2015, Intel Corporation All rights reserved. Redistribution and use in source and binary forms, with or without @@ -41,6 +41,7 @@ IXGBE_WRITE_REG(hw, reg, (u32) value); \ IXGBE_WRITE_REG(hw, reg + 4, (u32) (value >> 32)); \ } while (0) +#define IXGBE_REMOVED(a) (0) #if !defined(NO_READ_PBA_RAW) || !defined(NO_WRITE_PBA_RAW) struct ixgbe_pba { u16 word[2]; @@ -89,7 +90,7 @@ s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset, u16 *data); s32 ixgbe_read_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset, u16 words, u16 *data); -u16 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw); +s32 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw); s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw, u16 *checksum_val); s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw); @@ -113,12 +114,16 @@ s32 ixgbe_enable_sec_rx_path_generic(struct ixgbe_hw *hw); s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw); bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw); void ixgbe_fc_autoneg(struct ixgbe_hw *hw); +s32 ixgbe_setup_fc_generic(struct ixgbe_hw *hw); s32 ixgbe_validate_mac_addr(u8 *mac_addr); -s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask); -void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u16 mask); +s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u32 mask); +void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u32 mask); s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw); +s32 prot_autoc_read_generic(struct ixgbe_hw *hw, bool *, u32 *reg_val); +s32 prot_autoc_write_generic(struct ixgbe_hw *hw, u32 reg_val, bool locked); + s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index); s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index); @@ -155,10 +160,20 @@ s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min, u8 build, u8 ver); u8 ixgbe_calculate_checksum(u8 *buffer, u32 length); s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer, - u32 length); + u32 length, u32 timeout, bool return_data); + void ixgbe_clear_tx_pending(struct ixgbe_hw *hw); extern s32 ixgbe_reset_pipeline_82599(struct ixgbe_hw *hw); extern void ixgbe_stop_mac_link_on_d3_82599(struct ixgbe_hw *hw); - +bool ixgbe_mng_present(struct ixgbe_hw *hw); +bool ixgbe_mng_enabled(struct ixgbe_hw *hw); + +void ixgbe_disable_rx_generic(struct ixgbe_hw *hw); +void ixgbe_enable_rx_generic(struct ixgbe_hw *hw); +s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw, + ixgbe_link_speed speed, + bool autoneg_wait_to_complete); +void ixgbe_set_soft_rate_select_speed(struct ixgbe_hw *hw, + ixgbe_link_speed speed); #endif /* IXGBE_COMMON */ diff --git a/sys/dev/ixgbe/ixgbe_dcb.c b/sys/dev/ixgbe/ixgbe_dcb.c index 149aad1..3659d17 100644 --- a/sys/dev/ixgbe/ixgbe_dcb.c +++ b/sys/dev/ixgbe/ixgbe_dcb.c @@ -1,6 +1,6 @@ /****************************************************************************** - Copyright (c) 2001-2013, Intel Corporation + Copyright (c) 2001-2015, Intel Corporation All rights reserved. Redistribution and use in source and binary forms, with or without @@ -394,6 +394,8 @@ s32 ixgbe_dcb_get_tc_stats(struct ixgbe_hw *hw, struct ixgbe_hw_stats *stats, break; case ixgbe_mac_82599EB: case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: #if !defined(NO_82599_SUPPORT) || !defined(NO_X540_SUPPORT) ret = ixgbe_dcb_get_tc_stats_82599(hw, stats, tc_count); break; @@ -422,6 +424,8 @@ s32 ixgbe_dcb_get_pfc_stats(struct ixgbe_hw *hw, struct ixgbe_hw_stats *stats, break; case ixgbe_mac_82599EB: case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: #if !defined(NO_82599_SUPPORT) || !defined(NO_X540_SUPPORT) ret = ixgbe_dcb_get_pfc_stats_82599(hw, stats, tc_count); break; @@ -461,6 +465,8 @@ s32 ixgbe_dcb_config_rx_arbiter_cee(struct ixgbe_hw *hw, break; case ixgbe_mac_82599EB: case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: #if !defined(NO_82599_SUPPORT) || !defined(NO_X540_SUPPORT) ret = ixgbe_dcb_config_rx_arbiter_82599(hw, refill, max, bwgid, tsa, map); @@ -500,6 +506,8 @@ s32 ixgbe_dcb_config_tx_desc_arbiter_cee(struct ixgbe_hw *hw, break; case ixgbe_mac_82599EB: case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: #if !defined(NO_82599_SUPPORT) || !defined(NO_X540_SUPPORT) ret = ixgbe_dcb_config_tx_desc_arbiter_82599(hw, refill, max, bwgid, tsa); @@ -541,6 +549,8 @@ s32 ixgbe_dcb_config_tx_data_arbiter_cee(struct ixgbe_hw *hw, break; case ixgbe_mac_82599EB: case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: #if !defined(NO_82599_SUPPORT) || !defined(NO_X540_SUPPORT) ret = ixgbe_dcb_config_tx_data_arbiter_82599(hw, refill, max, bwgid, tsa, @@ -576,6 +586,8 @@ s32 ixgbe_dcb_config_pfc_cee(struct ixgbe_hw *hw, break; case ixgbe_mac_82599EB: case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: #if !defined(NO_82599_SUPPORT) || !defined(NO_X540_SUPPORT) ret = ixgbe_dcb_config_pfc_82599(hw, pfc_en, map); break; @@ -602,6 +614,8 @@ s32 ixgbe_dcb_config_tc_stats(struct ixgbe_hw *hw) break; case ixgbe_mac_82599EB: case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: #if !defined(NO_82599_SUPPORT) || !defined(NO_X540_SUPPORT) ret = ixgbe_dcb_config_tc_stats_82599(hw, NULL); break; @@ -647,6 +661,8 @@ s32 ixgbe_dcb_hw_config_cee(struct ixgbe_hw *hw, break; case ixgbe_mac_82599EB: case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: #if !defined(NO_82599_SUPPORT) || !defined(NO_X540_SUPPORT) ixgbe_dcb_config_82599(hw, dcb_config); ret = ixgbe_dcb_hw_config_82599(hw, dcb_config->link_speed, @@ -679,6 +695,8 @@ s32 ixgbe_dcb_config_pfc(struct ixgbe_hw *hw, u8 pfc_en, u8 *map) break; case ixgbe_mac_82599EB: case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: #if !defined(NO_82599_SUPPORT) || !defined(NO_X540_SUPPORT) ret = ixgbe_dcb_config_pfc_82599(hw, pfc_en, map); break; @@ -702,6 +720,8 @@ s32 ixgbe_dcb_hw_config(struct ixgbe_hw *hw, u16 *refill, u16 *max, break; case ixgbe_mac_82599EB: case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: #if !defined(NO_82599_SUPPORT) || !defined(NO_X540_SUPPORT) ixgbe_dcb_config_rx_arbiter_82599(hw, refill, max, bwg_id, tsa, map); diff --git a/sys/dev/ixgbe/ixgbe_dcb.h b/sys/dev/ixgbe/ixgbe_dcb.h index 05e548e..871b784 100644 --- a/sys/dev/ixgbe/ixgbe_dcb.h +++ b/sys/dev/ixgbe/ixgbe_dcb.h @@ -1,6 +1,6 @@ /****************************************************************************** - Copyright (c) 2001-2013, Intel Corporation + Copyright (c) 2001-2015, Intel Corporation All rights reserved. Redistribution and use in source and binary forms, with or without @@ -35,7 +35,6 @@ #ifndef _IXGBE_DCB_H_ #define _IXGBE_DCB_H_ - #include "ixgbe_type.h" /* DCB defines */ diff --git a/sys/dev/ixgbe/ixgbe_dcb_82598.c b/sys/dev/ixgbe/ixgbe_dcb_82598.c index aee808f..fb946c9 100644 --- a/sys/dev/ixgbe/ixgbe_dcb_82598.c +++ b/sys/dev/ixgbe/ixgbe_dcb_82598.c @@ -1,6 +1,6 @@ /****************************************************************************** - Copyright (c) 2001-2013, Intel Corporation + Copyright (c) 2001-2015, Intel Corporation All rights reserved. Redistribution and use in source and binary forms, with or without @@ -347,6 +347,8 @@ s32 ixgbe_dcb_hw_config_82598(struct ixgbe_hw *hw, int link_speed, u16 *refill, u16 *max, u8 *bwg_id, u8 *tsa) { + UNREFERENCED_1PARAMETER(link_speed); + ixgbe_dcb_config_rx_arbiter_82598(hw, refill, max, tsa); ixgbe_dcb_config_tx_desc_arbiter_82598(hw, refill, max, bwg_id, tsa); diff --git a/sys/dev/ixgbe/ixgbe_dcb_82598.h b/sys/dev/ixgbe/ixgbe_dcb_82598.h index 9b634a3..35974f7 100644 --- a/sys/dev/ixgbe/ixgbe_dcb_82598.h +++ b/sys/dev/ixgbe/ixgbe_dcb_82598.h @@ -1,6 +1,6 @@ /****************************************************************************** - Copyright (c) 2001-2013, Intel Corporation + Copyright (c) 2001-2015, Intel Corporation All rights reserved. Redistribution and use in source and binary forms, with or without diff --git a/sys/dev/ixgbe/ixgbe_dcb_82599.c b/sys/dev/ixgbe/ixgbe_dcb_82599.c index 085ca0c..4443411 100644 --- a/sys/dev/ixgbe/ixgbe_dcb_82599.c +++ b/sys/dev/ixgbe/ixgbe_dcb_82599.c @@ -1,6 +1,6 @@ /****************************************************************************** - Copyright (c) 2001-2013, Intel Corporation + Copyright (c) 2001-2015, Intel Corporation All rights reserved. Redistribution and use in source and binary forms, with or without @@ -299,7 +299,7 @@ s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw, u8 pfc_en, u8 *map) */ reg &= ~(IXGBE_MFLCN_RPFCE_MASK | IXGBE_MFLCN_RFCE); - if (hw->mac.type == ixgbe_mac_X540) + if (hw->mac.type >= ixgbe_mac_X540) reg |= pfc_en << IXGBE_MFLCN_RPFCE_SHIFT; if (pfc_en) @@ -329,7 +329,14 @@ s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw, u8 pfc_en, u8 *map) fcrtl = (hw->fc.low_water[i] << 10) | IXGBE_FCRTL_XONE; IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), fcrtl); } else { - reg = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)) - 32; + /* + * In order to prevent Tx hangs when the internal Tx + * switch is enabled we must set the high water mark + * to the Rx packet buffer size - 24KB. This allows + * the Tx switch to function even under heavy Rx + * workloads. + */ + reg = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)) - 24576; IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), 0); } @@ -573,6 +580,7 @@ s32 ixgbe_dcb_hw_config_82599(struct ixgbe_hw *hw, int link_speed, u16 *refill, u16 *max, u8 *bwg_id, u8 *tsa, u8 *map) { + UNREFERENCED_1PARAMETER(link_speed); ixgbe_dcb_config_rx_arbiter_82599(hw, refill, max, bwg_id, tsa, map); diff --git a/sys/dev/ixgbe/ixgbe_dcb_82599.h b/sys/dev/ixgbe/ixgbe_dcb_82599.h index 91428c0..bab7628 100644 --- a/sys/dev/ixgbe/ixgbe_dcb_82599.h +++ b/sys/dev/ixgbe/ixgbe_dcb_82599.h @@ -1,6 +1,6 @@ /****************************************************************************** - Copyright (c) 2001-2013, Intel Corporation + Copyright (c) 2001-2015, Intel Corporation All rights reserved. Redistribution and use in source and binary forms, with or without diff --git a/sys/dev/ixgbe/ixgbe_mbx.c b/sys/dev/ixgbe/ixgbe_mbx.c index f9c1efa..d8ba55a 100644 --- a/sys/dev/ixgbe/ixgbe_mbx.c +++ b/sys/dev/ixgbe/ixgbe_mbx.c @@ -1,6 +1,6 @@ /****************************************************************************** - Copyright (c) 2001-2012, Intel Corporation + Copyright (c) 2001-2015, Intel Corporation All rights reserved. Redistribution and use in source and binary forms, with or without @@ -77,10 +77,11 @@ s32 ixgbe_write_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id) DEBUGFUNC("ixgbe_write_mbx"); - if (size > mbx->size) + if (size > mbx->size) { ret_val = IXGBE_ERR_MBX; - - else if (mbx->ops.write) + ERROR_REPORT2(IXGBE_ERROR_ARGUMENT, + "Invalid mailbox message size %d", size); + } else if (mbx->ops.write) ret_val = mbx->ops.write(hw, msg, size, mbx_id); return ret_val; @@ -170,6 +171,10 @@ static s32 ixgbe_poll_for_msg(struct ixgbe_hw *hw, u16 mbx_id) usec_delay(mbx->usec_delay); } + if (countdown == 0) + ERROR_REPORT2(IXGBE_ERROR_POLLING, + "Polling for VF%d mailbox message timedout", mbx_id); + out: return countdown ? IXGBE_SUCCESS : IXGBE_ERR_MBX; } @@ -198,6 +203,10 @@ static s32 ixgbe_poll_for_ack(struct ixgbe_hw *hw, u16 mbx_id) usec_delay(mbx->usec_delay); } + if (countdown == 0) + ERROR_REPORT2(IXGBE_ERROR_POLLING, + "Polling for VF%d mailbox ack timedout", mbx_id); + out: return countdown ? IXGBE_SUCCESS : IXGBE_ERR_MBX; } @@ -596,6 +605,8 @@ static s32 ixgbe_check_for_rst_pf(struct ixgbe_hw *hw, u16 vf_number) case ixgbe_mac_82599EB: vflre = IXGBE_READ_REG(hw, IXGBE_VFLRE(reg_offset)); break; + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: case ixgbe_mac_X540: vflre = IXGBE_READ_REG(hw, IXGBE_VFLREC(reg_offset)); break; @@ -633,6 +644,10 @@ static s32 ixgbe_obtain_mbx_lock_pf(struct ixgbe_hw *hw, u16 vf_number) p2v_mailbox = IXGBE_READ_REG(hw, IXGBE_PFMAILBOX(vf_number)); if (p2v_mailbox & IXGBE_PFMAILBOX_PFU) ret_val = IXGBE_SUCCESS; + else + ERROR_REPORT2(IXGBE_ERROR_POLLING, + "Failed to obtain mailbox lock for VF%d", vf_number); + return ret_val; } @@ -727,6 +742,8 @@ void ixgbe_init_mbx_params_pf(struct ixgbe_hw *hw) struct ixgbe_mbx_info *mbx = &hw->mbx; if (hw->mac.type != ixgbe_mac_82599EB && + hw->mac.type != ixgbe_mac_X550 && + hw->mac.type != ixgbe_mac_X550EM_x && hw->mac.type != ixgbe_mac_X540) return; diff --git a/sys/dev/ixgbe/ixgbe_mbx.h b/sys/dev/ixgbe/ixgbe_mbx.h index adcba9e..ea75cbe 100644 --- a/sys/dev/ixgbe/ixgbe_mbx.h +++ b/sys/dev/ixgbe/ixgbe_mbx.h @@ -1,6 +1,6 @@ /****************************************************************************** - Copyright (c) 2001-2013, Intel Corporation + Copyright (c) 2001-2015, Intel Corporation All rights reserved. Redistribution and use in source and binary forms, with or without diff --git a/sys/dev/ixgbe/ixgbe_osdep.h b/sys/dev/ixgbe/ixgbe_osdep.h index ebb55f4..95f6ed5c 100644 --- a/sys/dev/ixgbe/ixgbe_osdep.h +++ b/sys/dev/ixgbe/ixgbe_osdep.h @@ -1,6 +1,6 @@ /****************************************************************************** - Copyright (c) 2001-2013, Intel Corporation + Copyright (c) 2001-2015, Intel Corporation All rights reserved. Redistribution and use in source and binary forms, with or without @@ -108,13 +108,14 @@ #define UNREFERENCED_3PARAMETER(_p, _q, _r) #define UNREFERENCED_4PARAMETER(_p, _q, _r, _s) - #define IXGBE_NTOHL(_i) ntohl(_i) #define IXGBE_NTOHS(_i) ntohs(_i) /* XXX these need to be revisited */ -#define IXGBE_CPU_TO_LE32 le32toh -#define IXGBE_LE32_TO_CPUS le32dec +#define IXGBE_CPU_TO_LE32 htole32 +#define IXGBE_LE32_TO_CPUS(x) +#define IXGBE_CPU_TO_BE16 htobe16 +#define IXGBE_CPU_TO_BE32 htobe32 typedef uint8_t u8; typedef int8_t s8; diff --git a/sys/dev/ixgbe/ixgbe_phy.c b/sys/dev/ixgbe/ixgbe_phy.c index cb237fd..88206c7 100644 --- a/sys/dev/ixgbe/ixgbe_phy.c +++ b/sys/dev/ixgbe/ixgbe_phy.c @@ -1,6 +1,6 @@ /****************************************************************************** - Copyright (c) 2001-2013, Intel Corporation + Copyright (c) 2001-2015, Intel Corporation All rights reserved. Redistribution and use in source and binary forms, with or without @@ -46,11 +46,268 @@ static s32 ixgbe_clock_out_i2c_bit(struct ixgbe_hw *hw, bool data); static void ixgbe_raise_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl); static void ixgbe_lower_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl); static s32 ixgbe_set_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl, bool data); -static bool ixgbe_get_i2c_data(u32 *i2cctl); +static bool ixgbe_get_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl); static s32 ixgbe_read_i2c_sff8472_generic(struct ixgbe_hw *hw, u8 byte_offset, u8 *sff8472_data); /** + * ixgbe_out_i2c_byte_ack - Send I2C byte with ack + * @hw: pointer to the hardware structure + * @byte: byte to send + * + * Returns an error code on error. + */ +static s32 ixgbe_out_i2c_byte_ack(struct ixgbe_hw *hw, u8 byte) +{ + s32 status; + + status = ixgbe_clock_out_i2c_byte(hw, byte); + if (status) + return status; + return ixgbe_get_i2c_ack(hw); +} + +/** + * ixgbe_in_i2c_byte_ack - Receive an I2C byte and send ack + * @hw: pointer to the hardware structure + * @byte: pointer to a u8 to receive the byte + * + * Returns an error code on error. + */ +static s32 ixgbe_in_i2c_byte_ack(struct ixgbe_hw *hw, u8 *byte) +{ + s32 status; + + status = ixgbe_clock_in_i2c_byte(hw, byte); + if (status) + return status; + /* ACK */ + return ixgbe_clock_out_i2c_bit(hw, FALSE); +} + +/** + * ixgbe_ones_comp_byte_add - Perform one's complement addition + * @add1 - addend 1 + * @add2 - addend 2 + * + * Returns one's complement 8-bit sum. + */ +static u8 ixgbe_ones_comp_byte_add(u8 add1, u8 add2) +{ + u16 sum = add1 + add2; + + sum = (sum & 0xFF) + (sum >> 8); + return sum & 0xFF; +} + +/** + * ixgbe_read_i2c_combined_generic_int - Perform I2C read combined operation + * @hw: pointer to the hardware structure + * @addr: I2C bus address to read from + * @reg: I2C device register to read from + * @val: pointer to location to receive read value + * @lock: TRUE if to take and release semaphore + * + * Returns an error code on error. + */ +static s32 ixgbe_read_i2c_combined_generic_int(struct ixgbe_hw *hw, u8 addr, + u16 reg, u16 *val, bool lock) +{ + u32 swfw_mask = hw->phy.phy_semaphore_mask; + int max_retry = 10; + int retry = 0; + u8 csum_byte; + u8 high_bits; + u8 low_bits; + u8 reg_high; + u8 csum; + + if (hw->mac.type >= ixgbe_mac_X550) + max_retry = 3; + reg_high = ((reg >> 7) & 0xFE) | 1; /* Indicate read combined */ + csum = ixgbe_ones_comp_byte_add(reg_high, reg & 0xFF); + csum = ~csum; + do { + if (lock && hw->mac.ops.acquire_swfw_sync(hw, swfw_mask)) + return IXGBE_ERR_SWFW_SYNC; + ixgbe_i2c_start(hw); + /* Device Address and write indication */ + if (ixgbe_out_i2c_byte_ack(hw, addr)) + goto fail; + /* Write bits 14:8 */ + if (ixgbe_out_i2c_byte_ack(hw, reg_high)) + goto fail; + /* Write bits 7:0 */ + if (ixgbe_out_i2c_byte_ack(hw, reg & 0xFF)) + goto fail; + /* Write csum */ + if (ixgbe_out_i2c_byte_ack(hw, csum)) + goto fail; + /* Re-start condition */ + ixgbe_i2c_start(hw); + /* Device Address and read indication */ + if (ixgbe_out_i2c_byte_ack(hw, addr | 1)) + goto fail; + /* Get upper bits */ + if (ixgbe_in_i2c_byte_ack(hw, &high_bits)) + goto fail; + /* Get low bits */ + if (ixgbe_in_i2c_byte_ack(hw, &low_bits)) + goto fail; + /* Get csum */ + if (ixgbe_clock_in_i2c_byte(hw, &csum_byte)) + goto fail; + /* NACK */ + if (ixgbe_clock_out_i2c_bit(hw, FALSE)) + goto fail; + ixgbe_i2c_stop(hw); + if (lock) + hw->mac.ops.release_swfw_sync(hw, swfw_mask); + *val = (high_bits << 8) | low_bits; + return 0; + +fail: + ixgbe_i2c_bus_clear(hw); + if (lock) + hw->mac.ops.release_swfw_sync(hw, swfw_mask); + retry++; + if (retry < max_retry) + DEBUGOUT("I2C byte read combined error - Retrying.\n"); + else + DEBUGOUT("I2C byte read combined error.\n"); + } while (retry < max_retry); + + return IXGBE_ERR_I2C; +} + +/** + * ixgbe_read_i2c_combined_generic - Perform I2C read combined operation + * @hw: pointer to the hardware structure + * @addr: I2C bus address to read from + * @reg: I2C device register to read from + * @val: pointer to location to receive read value + * + * Returns an error code on error. + **/ +static s32 ixgbe_read_i2c_combined_generic(struct ixgbe_hw *hw, u8 addr, + u16 reg, u16 *val) +{ + return ixgbe_read_i2c_combined_generic_int(hw, addr, reg, val, TRUE); +} + +/** + * ixgbe_read_i2c_combined_generic_unlocked - Do I2C read combined operation + * @hw: pointer to the hardware structure + * @addr: I2C bus address to read from + * @reg: I2C device register to read from + * @val: pointer to location to receive read value + * + * Returns an error code on error. + **/ +static s32 +ixgbe_read_i2c_combined_generic_unlocked(struct ixgbe_hw *hw, u8 addr, + u16 reg, u16 *val) +{ + return ixgbe_read_i2c_combined_generic_int(hw, addr, reg, val, FALSE); +} + +/** + * ixgbe_write_i2c_combined_generic_int - Perform I2C write combined operation + * @hw: pointer to the hardware structure + * @addr: I2C bus address to write to + * @reg: I2C device register to write to + * @val: value to write + * @lock: TRUE if to take and release semaphore + * + * Returns an error code on error. + */ +static s32 ixgbe_write_i2c_combined_generic_int(struct ixgbe_hw *hw, u8 addr, + u16 reg, u16 val, bool lock) +{ + u32 swfw_mask = hw->phy.phy_semaphore_mask; + int max_retry = 1; + int retry = 0; + u8 reg_high; + u8 csum; + + reg_high = (reg >> 7) & 0xFE; /* Indicate write combined */ + csum = ixgbe_ones_comp_byte_add(reg_high, reg & 0xFF); + csum = ixgbe_ones_comp_byte_add(csum, val >> 8); + csum = ixgbe_ones_comp_byte_add(csum, val & 0xFF); + csum = ~csum; + do { + if (lock && hw->mac.ops.acquire_swfw_sync(hw, swfw_mask)) + return IXGBE_ERR_SWFW_SYNC; + ixgbe_i2c_start(hw); + /* Device Address and write indication */ + if (ixgbe_out_i2c_byte_ack(hw, addr)) + goto fail; + /* Write bits 14:8 */ + if (ixgbe_out_i2c_byte_ack(hw, reg_high)) + goto fail; + /* Write bits 7:0 */ + if (ixgbe_out_i2c_byte_ack(hw, reg & 0xFF)) + goto fail; + /* Write data 15:8 */ + if (ixgbe_out_i2c_byte_ack(hw, val >> 8)) + goto fail; + /* Write data 7:0 */ + if (ixgbe_out_i2c_byte_ack(hw, val & 0xFF)) + goto fail; + /* Write csum */ + if (ixgbe_out_i2c_byte_ack(hw, csum)) + goto fail; + ixgbe_i2c_stop(hw); + if (lock) + hw->mac.ops.release_swfw_sync(hw, swfw_mask); + return 0; + +fail: + ixgbe_i2c_bus_clear(hw); + if (lock) + hw->mac.ops.release_swfw_sync(hw, swfw_mask); + retry++; + if (retry < max_retry) + DEBUGOUT("I2C byte write combined error - Retrying.\n"); + else + DEBUGOUT("I2C byte write combined error.\n"); + } while (retry < max_retry); + + return IXGBE_ERR_I2C; +} + +/** + * ixgbe_write_i2c_combined_generic - Perform I2C write combined operation + * @hw: pointer to the hardware structure + * @addr: I2C bus address to write to + * @reg: I2C device register to write to + * @val: value to write + * + * Returns an error code on error. + **/ +static s32 ixgbe_write_i2c_combined_generic(struct ixgbe_hw *hw, + u8 addr, u16 reg, u16 val) +{ + return ixgbe_write_i2c_combined_generic_int(hw, addr, reg, val, TRUE); +} + +/** + * ixgbe_write_i2c_combined_generic_unlocked - Do I2C write combined operation + * @hw: pointer to the hardware structure + * @addr: I2C bus address to write to + * @reg: I2C device register to write to + * @val: value to write + * + * Returns an error code on error. + **/ +static s32 +ixgbe_write_i2c_combined_generic_unlocked(struct ixgbe_hw *hw, + u8 addr, u16 reg, u16 val) +{ + return ixgbe_write_i2c_combined_generic_int(hw, addr, reg, val, FALSE); +} + +/** * ixgbe_init_phy_ops_generic - Inits PHY function ptrs * @hw: pointer to the hardware structure * @@ -63,25 +320,34 @@ s32 ixgbe_init_phy_ops_generic(struct ixgbe_hw *hw) DEBUGFUNC("ixgbe_init_phy_ops_generic"); /* PHY */ - phy->ops.identify = &ixgbe_identify_phy_generic; - phy->ops.reset = &ixgbe_reset_phy_generic; - phy->ops.read_reg = &ixgbe_read_phy_reg_generic; - phy->ops.write_reg = &ixgbe_write_phy_reg_generic; - phy->ops.read_reg_mdi = &ixgbe_read_phy_reg_mdi; - phy->ops.write_reg_mdi = &ixgbe_write_phy_reg_mdi; - phy->ops.setup_link = &ixgbe_setup_phy_link_generic; - phy->ops.setup_link_speed = &ixgbe_setup_phy_link_speed_generic; + phy->ops.identify = ixgbe_identify_phy_generic; + phy->ops.reset = ixgbe_reset_phy_generic; + phy->ops.read_reg = ixgbe_read_phy_reg_generic; + phy->ops.write_reg = ixgbe_write_phy_reg_generic; + phy->ops.read_reg_mdi = ixgbe_read_phy_reg_mdi; + phy->ops.write_reg_mdi = ixgbe_write_phy_reg_mdi; + phy->ops.setup_link = ixgbe_setup_phy_link_generic; + phy->ops.setup_link_speed = ixgbe_setup_phy_link_speed_generic; phy->ops.check_link = NULL; phy->ops.get_firmware_version = ixgbe_get_phy_firmware_version_generic; - phy->ops.read_i2c_byte = &ixgbe_read_i2c_byte_generic; - phy->ops.write_i2c_byte = &ixgbe_write_i2c_byte_generic; - phy->ops.read_i2c_sff8472 = &ixgbe_read_i2c_sff8472_generic; - phy->ops.read_i2c_eeprom = &ixgbe_read_i2c_eeprom_generic; - phy->ops.write_i2c_eeprom = &ixgbe_write_i2c_eeprom_generic; - phy->ops.i2c_bus_clear = &ixgbe_i2c_bus_clear; - phy->ops.identify_sfp = &ixgbe_identify_module_generic; + phy->ops.read_i2c_byte = ixgbe_read_i2c_byte_generic; + phy->ops.write_i2c_byte = ixgbe_write_i2c_byte_generic; + phy->ops.read_i2c_sff8472 = ixgbe_read_i2c_sff8472_generic; + phy->ops.read_i2c_eeprom = ixgbe_read_i2c_eeprom_generic; + phy->ops.write_i2c_eeprom = ixgbe_write_i2c_eeprom_generic; + phy->ops.i2c_bus_clear = ixgbe_i2c_bus_clear; + phy->ops.identify_sfp = ixgbe_identify_module_generic; phy->sfp_type = ixgbe_sfp_type_unknown; - phy->ops.check_overtemp = &ixgbe_tn_check_overtemp; + phy->ops.read_i2c_combined = ixgbe_read_i2c_combined_generic; + phy->ops.write_i2c_combined = ixgbe_write_i2c_combined_generic; + phy->ops.read_i2c_combined_unlocked = + ixgbe_read_i2c_combined_generic_unlocked; + phy->ops.write_i2c_combined_unlocked = + ixgbe_write_i2c_combined_generic_unlocked; + phy->ops.read_i2c_byte_unlocked = ixgbe_read_i2c_byte_generic_unlocked; + phy->ops.write_i2c_byte_unlocked = + ixgbe_write_i2c_byte_generic_unlocked; + phy->ops.check_overtemp = ixgbe_tn_check_overtemp; return IXGBE_SUCCESS; } @@ -99,6 +365,13 @@ s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw) DEBUGFUNC("ixgbe_identify_phy_generic"); + if (!hw->phy.phy_semaphore_mask) { + if (hw->bus.lan_id) + hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY1_SM; + else + hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY0_SM; + } + if (hw->phy.type == ixgbe_phy_unknown) { for (phy_addr = 0; phy_addr < IXGBE_MAX_PHY_ADDR; phy_addr++) { if (ixgbe_validate_phy_addr(hw, phy_addr)) { @@ -126,11 +399,13 @@ s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw) break; } } - /* clear value if nothing found */ + + /* Certain media types do not have a phy so an address will not + * be found and the code will take this path. Caller has to + * decide if it is an error or not. + */ if (status != IXGBE_SUCCESS) { hw->phy.addr = 0; - ERROR_REPORT1(IXGBE_ERROR_SOFTWARE, - "Could not identify valid PHY address"); } } else { status = IXGBE_SUCCESS; @@ -140,6 +415,35 @@ s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw) } /** + * ixgbe_check_reset_blocked - check status of MNG FW veto bit + * @hw: pointer to the hardware structure + * + * This function checks the MMNGC.MNG_VETO bit to see if there are + * any constraints on link from manageability. For MAC's that don't + * have this bit just return faluse since the link can not be blocked + * via this method. + **/ +s32 ixgbe_check_reset_blocked(struct ixgbe_hw *hw) +{ + u32 mmngc; + + DEBUGFUNC("ixgbe_check_reset_blocked"); + + /* If we don't have this bit, it can't be blocking */ + if (hw->mac.type == ixgbe_mac_82598EB) + return FALSE; + + mmngc = IXGBE_READ_REG(hw, IXGBE_MMNGC); + if (mmngc & IXGBE_MMNGC_MNG_VETO) { + ERROR_REPORT1(IXGBE_ERROR_SOFTWARE, + "MNG_VETO bit detected.\n"); + return TRUE; + } + + return FALSE; +} + +/** * ixgbe_validate_phy_addr - Determines phy address is valid * @hw: pointer to hardware structure * @@ -204,6 +508,7 @@ enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id) case TN1010_PHY_ID: phy_type = ixgbe_phy_tn; break; + case X550_PHY_ID: case X540_PHY_ID: phy_type = ixgbe_phy_aq; break; @@ -213,6 +518,9 @@ enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id) case ATH_PHY_ID: phy_type = ixgbe_phy_nl; break; + case X557_PHY_ID: + phy_type = ixgbe_phy_x550em_ext_t; + break; default: phy_type = ixgbe_phy_unknown; break; @@ -245,6 +553,10 @@ s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw) (IXGBE_ERR_OVERTEMP == hw->phy.ops.check_overtemp(hw))) goto out; + /* Blocked by MNG FW so bail */ + if (ixgbe_check_reset_blocked(hw)) + goto out; + /* * Perform soft PHY reset to the PHY_XS. * This will cause a soft reset to the PHY @@ -368,15 +680,10 @@ s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type, u16 *phy_data) { s32 status; - u16 gssr; + u32 gssr = hw->phy.phy_semaphore_mask; DEBUGFUNC("ixgbe_read_phy_reg_generic"); - if (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1) - gssr = IXGBE_GSSR_PHY1_SM; - else - gssr = IXGBE_GSSR_PHY0_SM; - if (hw->mac.ops.acquire_swfw_sync(hw, gssr) == IXGBE_SUCCESS) { status = ixgbe_read_phy_reg_mdi(hw, reg_addr, device_type, phy_data); @@ -474,15 +781,10 @@ s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type, u16 phy_data) { s32 status; - u16 gssr; + u32 gssr = hw->phy.phy_semaphore_mask; DEBUGFUNC("ixgbe_write_phy_reg_generic"); - if (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1) - gssr = IXGBE_GSSR_PHY1_SM; - else - gssr = IXGBE_GSSR_PHY0_SM; - if (hw->mac.ops.acquire_swfw_sync(hw, gssr) == IXGBE_SUCCESS) { status = ixgbe_write_phy_reg_mdi(hw, reg_addr, device_type, phy_data); @@ -495,16 +797,14 @@ s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr, } /** - * ixgbe_setup_phy_link_generic - Set and restart autoneg + * ixgbe_setup_phy_link_generic - Set and restart auto-neg * @hw: pointer to hardware structure * - * Restart autonegotiation and PHY and waits for completion. + * Restart auto-negotiation and PHY and waits for completion. **/ s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw) { s32 status = IXGBE_SUCCESS; - u32 time_out; - u32 max_time_out = 10; u16 autoneg_reg = IXGBE_MII_AUTONEG_REG; bool autoneg = FALSE; ixgbe_link_speed speed; @@ -528,6 +828,44 @@ s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw) autoneg_reg); } + if (hw->mac.type == ixgbe_mac_X550) { + if (speed & IXGBE_LINK_SPEED_5GB_FULL) { + /* Set or unset auto-negotiation 1G advertisement */ + hw->phy.ops.read_reg(hw, + IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, + &autoneg_reg); + + autoneg_reg &= ~IXGBE_MII_5GBASE_T_ADVERTISE; + if (hw->phy.autoneg_advertised & + IXGBE_LINK_SPEED_5GB_FULL) + autoneg_reg |= IXGBE_MII_5GBASE_T_ADVERTISE; + + hw->phy.ops.write_reg(hw, + IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, + autoneg_reg); + } + + if (speed & IXGBE_LINK_SPEED_2_5GB_FULL) { + /* Set or unset auto-negotiation 1G advertisement */ + hw->phy.ops.read_reg(hw, + IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, + &autoneg_reg); + + autoneg_reg &= ~IXGBE_MII_2_5GBASE_T_ADVERTISE; + if (hw->phy.autoneg_advertised & + IXGBE_LINK_SPEED_2_5GB_FULL) + autoneg_reg |= IXGBE_MII_2_5GBASE_T_ADVERTISE; + + hw->phy.ops.write_reg(hw, + IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, + autoneg_reg); + } + } + if (speed & IXGBE_LINK_SPEED_1GB_FULL) { /* Set or unset auto-negotiation 1G advertisement */ hw->phy.ops.read_reg(hw, @@ -561,7 +899,11 @@ s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw) autoneg_reg); } - /* Restart PHY autonegotiation and wait for completion */ + /* Blocked by MNG FW so don't reset PHY */ + if (ixgbe_check_reset_blocked(hw)) + return status; + + /* Restart PHY auto-negotiation. */ hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_CONTROL, IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &autoneg_reg); @@ -570,25 +912,6 @@ s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw) hw->phy.ops.write_reg(hw, IXGBE_MDIO_AUTO_NEG_CONTROL, IXGBE_MDIO_AUTO_NEG_DEV_TYPE, autoneg_reg); - /* Wait for autonegotiation to finish */ - for (time_out = 0; time_out < max_time_out; time_out++) { - usec_delay(10); - /* Restart PHY autonegotiation and wait for completion */ - status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS, - IXGBE_MDIO_AUTO_NEG_DEV_TYPE, - &autoneg_reg); - - autoneg_reg &= IXGBE_MII_AUTONEG_COMPLETE; - if (autoneg_reg == IXGBE_MII_AUTONEG_COMPLETE) - break; - } - - if (time_out == max_time_out) { - status = IXGBE_ERR_LINK_SETUP; - ERROR_REPORT1(IXGBE_ERROR_POLLING, - "PHY autonegotiation time out"); - } - return status; } @@ -614,6 +937,12 @@ s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw, if (speed & IXGBE_LINK_SPEED_10GB_FULL) hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL; + if (speed & IXGBE_LINK_SPEED_5GB_FULL) + hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_5GB_FULL; + + if (speed & IXGBE_LINK_SPEED_2_5GB_FULL) + hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_2_5GB_FULL; + if (speed & IXGBE_LINK_SPEED_1GB_FULL) hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL; @@ -632,13 +961,14 @@ s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw, * @speed: pointer to link speed * @autoneg: boolean auto-negotiation value * - * Determines the link capabilities by reading the AUTOC register. + * Determines the supported link capabilities by reading the PHY auto + * negotiation register. **/ s32 ixgbe_get_copper_link_capabilities_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed, bool *autoneg) { - s32 status = IXGBE_ERR_LINK_SETUP; + s32 status; u16 speed_ability; DEBUGFUNC("ixgbe_get_copper_link_capabilities_generic"); @@ -659,6 +989,15 @@ s32 ixgbe_get_copper_link_capabilities_generic(struct ixgbe_hw *hw, *speed |= IXGBE_LINK_SPEED_100_FULL; } + /* Internal PHY does not support 100 Mbps */ + if (hw->mac.type == ixgbe_mac_X550EM_x) + *speed &= ~IXGBE_LINK_SPEED_100_FULL; + + if (hw->mac.type == ixgbe_mac_X550) { + *speed |= IXGBE_LINK_SPEED_2_5GB_FULL; + *speed |= IXGBE_LINK_SPEED_5GB_FULL; + } + return status; } @@ -712,16 +1051,14 @@ s32 ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw, ixgbe_link_speed *speed, } /** - * ixgbe_setup_phy_link_tnx - Set and restart autoneg + * ixgbe_setup_phy_link_tnx - Set and restart auto-neg * @hw: pointer to hardware structure * - * Restart autonegotiation and PHY and waits for completion. + * Restart auto-negotiation and PHY and waits for completion. **/ s32 ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw) { s32 status = IXGBE_SUCCESS; - u32 time_out; - u32 max_time_out = 10; u16 autoneg_reg = IXGBE_MII_AUTONEG_REG; bool autoneg = FALSE; ixgbe_link_speed speed; @@ -775,7 +1112,11 @@ s32 ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw) autoneg_reg); } - /* Restart PHY autonegotiation and wait for completion */ + /* Blocked by MNG FW so don't reset PHY */ + if (ixgbe_check_reset_blocked(hw)) + return status; + + /* Restart PHY auto-negotiation. */ hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_CONTROL, IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &autoneg_reg); @@ -784,24 +1125,6 @@ s32 ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw) hw->phy.ops.write_reg(hw, IXGBE_MDIO_AUTO_NEG_CONTROL, IXGBE_MDIO_AUTO_NEG_DEV_TYPE, autoneg_reg); - /* Wait for autonegotiation to finish */ - for (time_out = 0; time_out < max_time_out; time_out++) { - usec_delay(10); - /* Restart PHY autonegotiation and wait for completion */ - status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS, - IXGBE_MDIO_AUTO_NEG_DEV_TYPE, - &autoneg_reg); - - autoneg_reg &= IXGBE_MII_AUTONEG_COMPLETE; - if (autoneg_reg == IXGBE_MII_AUTONEG_COMPLETE) - break; - } - - if (time_out == max_time_out) { - status = IXGBE_ERR_LINK_SETUP; - DEBUGOUT("ixgbe_setup_phy_link_tnx: time out"); - } - return status; } @@ -813,7 +1136,7 @@ s32 ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw) s32 ixgbe_get_phy_firmware_version_tnx(struct ixgbe_hw *hw, u16 *firmware_version) { - s32 status = IXGBE_SUCCESS; + s32 status; DEBUGFUNC("ixgbe_get_phy_firmware_version_tnx"); @@ -832,7 +1155,7 @@ s32 ixgbe_get_phy_firmware_version_tnx(struct ixgbe_hw *hw, s32 ixgbe_get_phy_firmware_version_generic(struct ixgbe_hw *hw, u16 *firmware_version) { - s32 status = IXGBE_SUCCESS; + s32 status; DEBUGFUNC("ixgbe_get_phy_firmware_version_generic"); @@ -858,6 +1181,10 @@ s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw) DEBUGFUNC("ixgbe_reset_phy_nl"); + /* Blocked by MNG FW so bail */ + if (ixgbe_check_reset_blocked(hw)) + goto out; + hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_XS_CONTROL, IXGBE_MDIO_PHY_XS_DEV_TYPE, &phy_data); @@ -972,6 +1299,9 @@ s32 ixgbe_identify_module_generic(struct ixgbe_hw *hw) status = ixgbe_identify_sfp_module_generic(hw); break; + case ixgbe_media_type_fiber_qsfp: + status = ixgbe_identify_qsfp_module_generic(hw); + break; default: hw->phy.sfp_type = ixgbe_sfp_type_not_present; @@ -1009,6 +1339,9 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw) goto out; } + /* LAN ID is needed for I2C access */ + hw->mac.ops.set_lan_id(hw); + status = hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_IDENTIFIER, &identifier); @@ -1016,9 +1349,6 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw) if (status != IXGBE_SUCCESS) goto err_read_i2c_eeprom; - /* LAN ID is needed for sfp_type determination */ - hw->mac.ops.set_lan_id(hw); - if (identifier != IXGBE_SFF_IDENTIFIER_SFP) { hw->phy.type = ixgbe_phy_sfp_unsupported; status = IXGBE_ERR_SFP_NOT_SUPPORTED; @@ -1068,7 +1398,7 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw) hw->phy.sfp_type = ixgbe_sfp_type_lr; else hw->phy.sfp_type = ixgbe_sfp_type_unknown; - } else if (hw->mac.type == ixgbe_mac_82599EB) { + } else { if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE) { if (hw->bus.lan_id == 0) hw->phy.sfp_type = @@ -1115,6 +1445,13 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw) else hw->phy.sfp_type = ixgbe_sfp_type_1g_sx_core1; + } else if (comp_codes_1g & IXGBE_SFF_1GBASELX_CAPABLE) { + if (hw->bus.lan_id == 0) + hw->phy.sfp_type = + ixgbe_sfp_type_1g_lx_core0; + else + hw->phy.sfp_type = + ixgbe_sfp_type_1g_lx_core1; } else { hw->phy.sfp_type = ixgbe_sfp_type_unknown; } @@ -1202,6 +1539,8 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw) if (comp_codes_10g == 0 && !(hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1 || hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0 || + hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core0 || + hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core1 || hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 || hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1)) { hw->phy.type = ixgbe_phy_sfp_unsupported; @@ -1219,6 +1558,8 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw) if (!(enforce_sfp & IXGBE_DEVICE_CAPS_ALLOW_ANY_SFP) && !(hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0 || hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1 || + hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core0 || + hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core1 || hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 || hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1)) { /* Make sure we're a supported PHY type */ @@ -1261,6 +1602,266 @@ err_read_i2c_eeprom: return IXGBE_ERR_SFP_NOT_PRESENT; } +/** + * ixgbe_get_supported_phy_sfp_layer_generic - Returns physical layer type + * @hw: pointer to hardware structure + * + * Determines physical layer capabilities of the current SFP. + */ +s32 ixgbe_get_supported_phy_sfp_layer_generic(struct ixgbe_hw *hw) +{ + u32 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN; + u8 comp_codes_10g = 0; + u8 comp_codes_1g = 0; + + DEBUGFUNC("ixgbe_get_supported_phy_sfp_layer_generic"); + + hw->phy.ops.identify_sfp(hw); + if (hw->phy.sfp_type == ixgbe_sfp_type_not_present) + return physical_layer; + + switch (hw->phy.type) { + case ixgbe_phy_sfp_passive_tyco: + case ixgbe_phy_sfp_passive_unknown: + case ixgbe_phy_qsfp_passive_unknown: + physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU; + break; + case ixgbe_phy_sfp_ftl_active: + case ixgbe_phy_sfp_active_unknown: + case ixgbe_phy_qsfp_active_unknown: + physical_layer = IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA; + break; + case ixgbe_phy_sfp_avago: + case ixgbe_phy_sfp_ftl: + case ixgbe_phy_sfp_intel: + case ixgbe_phy_sfp_unknown: + hw->phy.ops.read_i2c_eeprom(hw, + IXGBE_SFF_1GBE_COMP_CODES, &comp_codes_1g); + hw->phy.ops.read_i2c_eeprom(hw, + IXGBE_SFF_10GBE_COMP_CODES, &comp_codes_10g); + if (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE) + physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR; + else if (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE) + physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR; + else if (comp_codes_1g & IXGBE_SFF_1GBASET_CAPABLE) + physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_T; + else if (comp_codes_1g & IXGBE_SFF_1GBASESX_CAPABLE) + physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_SX; + break; + case ixgbe_phy_qsfp_intel: + case ixgbe_phy_qsfp_unknown: + hw->phy.ops.read_i2c_eeprom(hw, + IXGBE_SFF_QSFP_10GBE_COMP, &comp_codes_10g); + if (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE) + physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR; + else if (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE) + physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR; + break; + default: + break; + } + + return physical_layer; +} + +/** + * ixgbe_identify_qsfp_module_generic - Identifies QSFP modules + * @hw: pointer to hardware structure + * + * Searches for and identifies the QSFP module and assigns appropriate PHY type + **/ +s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw) +{ + s32 status = IXGBE_ERR_PHY_ADDR_INVALID; + u32 vendor_oui = 0; + enum ixgbe_sfp_type stored_sfp_type = hw->phy.sfp_type; + u8 identifier = 0; + u8 comp_codes_1g = 0; + u8 comp_codes_10g = 0; + u8 oui_bytes[3] = {0, 0, 0}; + u16 enforce_sfp = 0; + u8 connector = 0; + u8 cable_length = 0; + u8 device_tech = 0; + bool active_cable = FALSE; + + DEBUGFUNC("ixgbe_identify_qsfp_module_generic"); + + if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_fiber_qsfp) { + hw->phy.sfp_type = ixgbe_sfp_type_not_present; + status = IXGBE_ERR_SFP_NOT_PRESENT; + goto out; + } + + /* LAN ID is needed for I2C access */ + hw->mac.ops.set_lan_id(hw); + + status = hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_IDENTIFIER, + &identifier); + + if (status != IXGBE_SUCCESS) + goto err_read_i2c_eeprom; + + if (identifier != IXGBE_SFF_IDENTIFIER_QSFP_PLUS) { + hw->phy.type = ixgbe_phy_sfp_unsupported; + status = IXGBE_ERR_SFP_NOT_SUPPORTED; + goto out; + } + + hw->phy.id = identifier; + + status = hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_QSFP_10GBE_COMP, + &comp_codes_10g); + + if (status != IXGBE_SUCCESS) + goto err_read_i2c_eeprom; + + status = hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_QSFP_1GBE_COMP, + &comp_codes_1g); + + if (status != IXGBE_SUCCESS) + goto err_read_i2c_eeprom; + + if (comp_codes_10g & IXGBE_SFF_QSFP_DA_PASSIVE_CABLE) { + hw->phy.type = ixgbe_phy_qsfp_passive_unknown; + if (hw->bus.lan_id == 0) + hw->phy.sfp_type = ixgbe_sfp_type_da_cu_core0; + else + hw->phy.sfp_type = ixgbe_sfp_type_da_cu_core1; + } else if (comp_codes_10g & (IXGBE_SFF_10GBASESR_CAPABLE | + IXGBE_SFF_10GBASELR_CAPABLE)) { + if (hw->bus.lan_id == 0) + hw->phy.sfp_type = ixgbe_sfp_type_srlr_core0; + else + hw->phy.sfp_type = ixgbe_sfp_type_srlr_core1; + } else { + if (comp_codes_10g & IXGBE_SFF_QSFP_DA_ACTIVE_CABLE) + active_cable = TRUE; + + if (!active_cable) { + /* check for active DA cables that pre-date + * SFF-8436 v3.6 */ + hw->phy.ops.read_i2c_eeprom(hw, + IXGBE_SFF_QSFP_CONNECTOR, + &connector); + + hw->phy.ops.read_i2c_eeprom(hw, + IXGBE_SFF_QSFP_CABLE_LENGTH, + &cable_length); + + hw->phy.ops.read_i2c_eeprom(hw, + IXGBE_SFF_QSFP_DEVICE_TECH, + &device_tech); + + if ((connector == + IXGBE_SFF_QSFP_CONNECTOR_NOT_SEPARABLE) && + (cable_length > 0) && + ((device_tech >> 4) == + IXGBE_SFF_QSFP_TRANSMITER_850NM_VCSEL)) + active_cable = TRUE; + } + + if (active_cable) { + hw->phy.type = ixgbe_phy_qsfp_active_unknown; + if (hw->bus.lan_id == 0) + hw->phy.sfp_type = + ixgbe_sfp_type_da_act_lmt_core0; + else + hw->phy.sfp_type = + ixgbe_sfp_type_da_act_lmt_core1; + } else { + /* unsupported module type */ + hw->phy.type = ixgbe_phy_sfp_unsupported; + status = IXGBE_ERR_SFP_NOT_SUPPORTED; + goto out; + } + } + + if (hw->phy.sfp_type != stored_sfp_type) + hw->phy.sfp_setup_needed = TRUE; + + /* Determine if the QSFP+ PHY is dual speed or not. */ + hw->phy.multispeed_fiber = FALSE; + if (((comp_codes_1g & IXGBE_SFF_1GBASESX_CAPABLE) && + (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE)) || + ((comp_codes_1g & IXGBE_SFF_1GBASELX_CAPABLE) && + (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE))) + hw->phy.multispeed_fiber = TRUE; + + /* Determine PHY vendor for optical modules */ + if (comp_codes_10g & (IXGBE_SFF_10GBASESR_CAPABLE | + IXGBE_SFF_10GBASELR_CAPABLE)) { + status = hw->phy.ops.read_i2c_eeprom(hw, + IXGBE_SFF_QSFP_VENDOR_OUI_BYTE0, + &oui_bytes[0]); + + if (status != IXGBE_SUCCESS) + goto err_read_i2c_eeprom; + + status = hw->phy.ops.read_i2c_eeprom(hw, + IXGBE_SFF_QSFP_VENDOR_OUI_BYTE1, + &oui_bytes[1]); + + if (status != IXGBE_SUCCESS) + goto err_read_i2c_eeprom; + + status = hw->phy.ops.read_i2c_eeprom(hw, + IXGBE_SFF_QSFP_VENDOR_OUI_BYTE2, + &oui_bytes[2]); + + if (status != IXGBE_SUCCESS) + goto err_read_i2c_eeprom; + + vendor_oui = + ((oui_bytes[0] << IXGBE_SFF_VENDOR_OUI_BYTE0_SHIFT) | + (oui_bytes[1] << IXGBE_SFF_VENDOR_OUI_BYTE1_SHIFT) | + (oui_bytes[2] << IXGBE_SFF_VENDOR_OUI_BYTE2_SHIFT)); + + if (vendor_oui == IXGBE_SFF_VENDOR_OUI_INTEL) + hw->phy.type = ixgbe_phy_qsfp_intel; + else + hw->phy.type = ixgbe_phy_qsfp_unknown; + + ixgbe_get_device_caps(hw, &enforce_sfp); + if (!(enforce_sfp & IXGBE_DEVICE_CAPS_ALLOW_ANY_SFP)) { + /* Make sure we're a supported PHY type */ + if (hw->phy.type == ixgbe_phy_qsfp_intel) { + status = IXGBE_SUCCESS; + } else { + if (hw->allow_unsupported_sfp == TRUE) { + EWARN(hw, "WARNING: Intel (R) Network " + "Connections are quality tested " + "using Intel (R) Ethernet Optics." + " Using untested modules is not " + "supported and may cause unstable" + " operation or damage to the " + "module or the adapter. Intel " + "Corporation is not responsible " + "for any harm caused by using " + "untested modules.\n", status); + status = IXGBE_SUCCESS; + } else { + DEBUGOUT("QSFP module not supported\n"); + hw->phy.type = + ixgbe_phy_sfp_unsupported; + status = IXGBE_ERR_SFP_NOT_SUPPORTED; + } + } + } else { + status = IXGBE_SUCCESS; + } + } + +out: + return status; + +err_read_i2c_eeprom: + hw->phy.sfp_type = ixgbe_sfp_type_not_present; + hw->phy.id = 0; + hw->phy.type = ixgbe_phy_unknown; + + return IXGBE_ERR_SFP_NOT_PRESENT; +} /** @@ -1296,10 +1897,12 @@ s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw, * SR modules */ if (sfp_type == ixgbe_sfp_type_da_act_lmt_core0 || + sfp_type == ixgbe_sfp_type_1g_lx_core0 || sfp_type == ixgbe_sfp_type_1g_cu_core0 || sfp_type == ixgbe_sfp_type_1g_sx_core0) sfp_type = ixgbe_sfp_type_srlr_core0; else if (sfp_type == ixgbe_sfp_type_da_act_lmt_core1 || + sfp_type == ixgbe_sfp_type_1g_lx_core1 || sfp_type == ixgbe_sfp_type_1g_cu_core1 || sfp_type == ixgbe_sfp_type_1g_sx_core1) sfp_type = ixgbe_sfp_type_srlr_core1; @@ -1409,37 +2012,50 @@ s32 ixgbe_write_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset, } /** - * ixgbe_read_i2c_byte_generic - Reads 8 bit word over I2C + * ixgbe_is_sfp_probe - Returns TRUE if SFP is being detected + * @hw: pointer to hardware structure + * @offset: eeprom offset to be read + * @addr: I2C address to be read + */ +static bool ixgbe_is_sfp_probe(struct ixgbe_hw *hw, u8 offset, u8 addr) +{ + if (addr == IXGBE_I2C_EEPROM_DEV_ADDR && + offset == IXGBE_SFF_IDENTIFIER && + hw->phy.sfp_type == ixgbe_sfp_type_not_present) + return TRUE; + return FALSE; +} + +/** + * ixgbe_read_i2c_byte_generic_int - Reads 8 bit word over I2C * @hw: pointer to hardware structure * @byte_offset: byte offset to read * @data: value read + * @lock: TRUE if to take and release semaphore * * Performs byte read operation to SFP module's EEPROM over I2C interface at * a specified device address. **/ -s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset, - u8 dev_addr, u8 *data) +static s32 ixgbe_read_i2c_byte_generic_int(struct ixgbe_hw *hw, u8 byte_offset, + u8 dev_addr, u8 *data, bool lock) { - s32 status = IXGBE_SUCCESS; + s32 status; u32 max_retry = 10; u32 retry = 0; - u16 swfw_mask = 0; + u32 swfw_mask = hw->phy.phy_semaphore_mask; bool nack = 1; *data = 0; DEBUGFUNC("ixgbe_read_i2c_byte_generic"); - if (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1) - swfw_mask = IXGBE_GSSR_PHY1_SM; - else - swfw_mask = IXGBE_GSSR_PHY0_SM; + if (hw->mac.type >= ixgbe_mac_X550) + max_retry = 3; + if (ixgbe_is_sfp_probe(hw, byte_offset, dev_addr)) + max_retry = IXGBE_SFP_DETECT_RETRIES; do { - if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask) - != IXGBE_SUCCESS) { - status = IXGBE_ERR_SWFW_SYNC; - goto read_byte_out; - } + if (lock && hw->mac.ops.acquire_swfw_sync(hw, swfw_mask)) + return IXGBE_ERR_SWFW_SYNC; ixgbe_i2c_start(hw); @@ -1480,12 +2096,16 @@ s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset, goto fail; ixgbe_i2c_stop(hw); - break; + if (lock) + hw->mac.ops.release_swfw_sync(hw, swfw_mask); + return IXGBE_SUCCESS; fail: ixgbe_i2c_bus_clear(hw); - hw->mac.ops.release_swfw_sync(hw, swfw_mask); - msec_delay(100); + if (lock) { + hw->mac.ops.release_swfw_sync(hw, swfw_mask); + msec_delay(100); + } retry++; if (retry < max_retry) DEBUGOUT("I2C byte read error - Retrying.\n"); @@ -1494,40 +2114,64 @@ fail: } while (retry < max_retry); - hw->mac.ops.release_swfw_sync(hw, swfw_mask); - -read_byte_out: return status; } /** - * ixgbe_write_i2c_byte_generic - Writes 8 bit word over I2C + * ixgbe_read_i2c_byte_generic - Reads 8 bit word over I2C + * @hw: pointer to hardware structure + * @byte_offset: byte offset to read + * @data: value read + * + * Performs byte read operation to SFP module's EEPROM over I2C interface at + * a specified device address. + **/ +s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset, + u8 dev_addr, u8 *data) +{ + return ixgbe_read_i2c_byte_generic_int(hw, byte_offset, dev_addr, + data, TRUE); +} + +/** + * ixgbe_read_i2c_byte_generic_unlocked - Reads 8 bit word over I2C + * @hw: pointer to hardware structure + * @byte_offset: byte offset to read + * @data: value read + * + * Performs byte read operation to SFP module's EEPROM over I2C interface at + * a specified device address. + **/ +s32 ixgbe_read_i2c_byte_generic_unlocked(struct ixgbe_hw *hw, u8 byte_offset, + u8 dev_addr, u8 *data) +{ + return ixgbe_read_i2c_byte_generic_int(hw, byte_offset, dev_addr, + data, FALSE); +} + +/** + * ixgbe_write_i2c_byte_generic_int - Writes 8 bit word over I2C * @hw: pointer to hardware structure * @byte_offset: byte offset to write * @data: value to write + * @lock: TRUE if to take and release semaphore * * Performs byte write operation to SFP module's EEPROM over I2C interface at * a specified device address. **/ -s32 ixgbe_write_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset, - u8 dev_addr, u8 data) +static s32 ixgbe_write_i2c_byte_generic_int(struct ixgbe_hw *hw, u8 byte_offset, + u8 dev_addr, u8 data, bool lock) { - s32 status = IXGBE_SUCCESS; + s32 status; u32 max_retry = 1; u32 retry = 0; - u16 swfw_mask = 0; + u32 swfw_mask = hw->phy.phy_semaphore_mask; DEBUGFUNC("ixgbe_write_i2c_byte_generic"); - if (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1) - swfw_mask = IXGBE_GSSR_PHY1_SM; - else - swfw_mask = IXGBE_GSSR_PHY0_SM; - - if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask) != IXGBE_SUCCESS) { - status = IXGBE_ERR_SWFW_SYNC; - goto write_byte_out; - } + if (lock && hw->mac.ops.acquire_swfw_sync(hw, swfw_mask) != + IXGBE_SUCCESS) + return IXGBE_ERR_SWFW_SYNC; do { ixgbe_i2c_start(hw); @@ -1557,7 +2201,9 @@ s32 ixgbe_write_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset, goto fail; ixgbe_i2c_stop(hw); - break; + if (lock) + hw->mac.ops.release_swfw_sync(hw, swfw_mask); + return IXGBE_SUCCESS; fail: ixgbe_i2c_bus_clear(hw); @@ -1568,24 +2214,59 @@ fail: DEBUGOUT("I2C byte write error.\n"); } while (retry < max_retry); - hw->mac.ops.release_swfw_sync(hw, swfw_mask); + if (lock) + hw->mac.ops.release_swfw_sync(hw, swfw_mask); -write_byte_out: return status; } /** + * ixgbe_write_i2c_byte_generic - Writes 8 bit word over I2C + * @hw: pointer to hardware structure + * @byte_offset: byte offset to write + * @data: value to write + * + * Performs byte write operation to SFP module's EEPROM over I2C interface at + * a specified device address. + **/ +s32 ixgbe_write_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset, + u8 dev_addr, u8 data) +{ + return ixgbe_write_i2c_byte_generic_int(hw, byte_offset, dev_addr, + data, TRUE); +} + +/** + * ixgbe_write_i2c_byte_generic_unlocked - Writes 8 bit word over I2C + * @hw: pointer to hardware structure + * @byte_offset: byte offset to write + * @data: value to write + * + * Performs byte write operation to SFP module's EEPROM over I2C interface at + * a specified device address. + **/ +s32 ixgbe_write_i2c_byte_generic_unlocked(struct ixgbe_hw *hw, u8 byte_offset, + u8 dev_addr, u8 data) +{ + return ixgbe_write_i2c_byte_generic_int(hw, byte_offset, dev_addr, + data, FALSE); +} + +/** * ixgbe_i2c_start - Sets I2C start condition * @hw: pointer to hardware structure * * Sets I2C start condition (High -> Low on SDA while SCL is High) + * Set bit-bang mode on X550 hardware. **/ static void ixgbe_i2c_start(struct ixgbe_hw *hw) { - u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL); + u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw)); DEBUGFUNC("ixgbe_i2c_start"); + i2cctl |= IXGBE_I2C_BB_EN_BY_MAC(hw); + /* Start condition must begin with data and clock high */ ixgbe_set_i2c_data(hw, &i2cctl, 1); ixgbe_raise_i2c_clk(hw, &i2cctl); @@ -1610,10 +2291,15 @@ static void ixgbe_i2c_start(struct ixgbe_hw *hw) * @hw: pointer to hardware structure * * Sets I2C stop condition (Low -> High on SDA while SCL is High) + * Disables bit-bang mode and negates data output enable on X550 + * hardware. **/ static void ixgbe_i2c_stop(struct ixgbe_hw *hw) { - u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL); + u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw)); + u32 data_oe_bit = IXGBE_I2C_DATA_OE_N_EN_BY_MAC(hw); + u32 clk_oe_bit = IXGBE_I2C_CLK_OE_N_EN_BY_MAC(hw); + u32 bb_en_bit = IXGBE_I2C_BB_EN_BY_MAC(hw); DEBUGFUNC("ixgbe_i2c_stop"); @@ -1628,6 +2314,13 @@ static void ixgbe_i2c_stop(struct ixgbe_hw *hw) /* bus free time between stop and start (4.7us)*/ usec_delay(IXGBE_I2C_T_BUF); + + if (bb_en_bit || data_oe_bit || clk_oe_bit) { + i2cctl &= ~bb_en_bit; + i2cctl |= data_oe_bit | clk_oe_bit; + IXGBE_WRITE_REG(hw, IXGBE_I2CCTL_BY_MAC(hw), i2cctl); + IXGBE_WRITE_FLUSH(hw); + } } /** @@ -1644,6 +2337,7 @@ static s32 ixgbe_clock_in_i2c_byte(struct ixgbe_hw *hw, u8 *data) DEBUGFUNC("ixgbe_clock_in_i2c_byte"); + *data = 0; for (i = 7; i >= 0; i--) { ixgbe_clock_in_i2c_bit(hw, &bit); *data |= bit << i; @@ -1664,7 +2358,7 @@ static s32 ixgbe_clock_out_i2c_byte(struct ixgbe_hw *hw, u8 data) s32 status = IXGBE_SUCCESS; s32 i; u32 i2cctl; - bool bit = 0; + bool bit; DEBUGFUNC("ixgbe_clock_out_i2c_byte"); @@ -1677,9 +2371,10 @@ static s32 ixgbe_clock_out_i2c_byte(struct ixgbe_hw *hw, u8 data) } /* Release SDA line (set high) */ - i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL); - i2cctl |= IXGBE_I2C_DATA_OUT; - IXGBE_WRITE_REG(hw, IXGBE_I2CCTL, i2cctl); + i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw)); + i2cctl |= IXGBE_I2C_DATA_OUT_BY_MAC(hw); + i2cctl |= IXGBE_I2C_DATA_OE_N_EN_BY_MAC(hw); + IXGBE_WRITE_REG(hw, IXGBE_I2CCTL_BY_MAC(hw), i2cctl); IXGBE_WRITE_FLUSH(hw); return status; @@ -1693,34 +2388,39 @@ static s32 ixgbe_clock_out_i2c_byte(struct ixgbe_hw *hw, u8 data) **/ static s32 ixgbe_get_i2c_ack(struct ixgbe_hw *hw) { + u32 data_oe_bit = IXGBE_I2C_DATA_OE_N_EN_BY_MAC(hw); s32 status = IXGBE_SUCCESS; u32 i = 0; - u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL); + u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw)); u32 timeout = 10; bool ack = 1; DEBUGFUNC("ixgbe_get_i2c_ack"); + if (data_oe_bit) { + i2cctl |= IXGBE_I2C_DATA_OUT_BY_MAC(hw); + i2cctl |= data_oe_bit; + IXGBE_WRITE_REG(hw, IXGBE_I2CCTL_BY_MAC(hw), i2cctl); + IXGBE_WRITE_FLUSH(hw); + } ixgbe_raise_i2c_clk(hw, &i2cctl); - /* Minimum high period of clock is 4us */ usec_delay(IXGBE_I2C_T_HIGH); /* Poll for ACK. Note that ACK in I2C spec is * transition from 1 to 0 */ for (i = 0; i < timeout; i++) { - i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL); - ack = ixgbe_get_i2c_data(&i2cctl); + i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw)); + ack = ixgbe_get_i2c_data(hw, &i2cctl); usec_delay(1); - if (ack == 0) + if (!ack) break; } - if (ack == 1) { - ERROR_REPORT1(IXGBE_ERROR_POLLING, - "I2C ack was not received.\n"); + if (ack) { + DEBUGOUT("I2C ack was not received.\n"); status = IXGBE_ERR_I2C; } @@ -1741,17 +2441,24 @@ static s32 ixgbe_get_i2c_ack(struct ixgbe_hw *hw) **/ static s32 ixgbe_clock_in_i2c_bit(struct ixgbe_hw *hw, bool *data) { - u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL); + u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw)); + u32 data_oe_bit = IXGBE_I2C_DATA_OE_N_EN_BY_MAC(hw); DEBUGFUNC("ixgbe_clock_in_i2c_bit"); + if (data_oe_bit) { + i2cctl |= IXGBE_I2C_DATA_OUT_BY_MAC(hw); + i2cctl |= data_oe_bit; + IXGBE_WRITE_REG(hw, IXGBE_I2CCTL_BY_MAC(hw), i2cctl); + IXGBE_WRITE_FLUSH(hw); + } ixgbe_raise_i2c_clk(hw, &i2cctl); /* Minimum high period of clock is 4us */ usec_delay(IXGBE_I2C_T_HIGH); - i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL); - *data = ixgbe_get_i2c_data(&i2cctl); + i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw)); + *data = ixgbe_get_i2c_data(hw, &i2cctl); ixgbe_lower_i2c_clk(hw, &i2cctl); @@ -1771,7 +2478,7 @@ static s32 ixgbe_clock_in_i2c_bit(struct ixgbe_hw *hw, bool *data) static s32 ixgbe_clock_out_i2c_bit(struct ixgbe_hw *hw, bool data) { s32 status; - u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL); + u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw)); DEBUGFUNC("ixgbe_clock_out_i2c_bit"); @@ -1796,31 +2503,39 @@ static s32 ixgbe_clock_out_i2c_bit(struct ixgbe_hw *hw, bool data) return status; } + /** * ixgbe_raise_i2c_clk - Raises the I2C SCL clock * @hw: pointer to hardware structure * @i2cctl: Current value of I2CCTL register * * Raises the I2C clock line '0'->'1' + * Negates the I2C clock output enable on X550 hardware. **/ static void ixgbe_raise_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl) { + u32 clk_oe_bit = IXGBE_I2C_CLK_OE_N_EN_BY_MAC(hw); u32 i = 0; u32 timeout = IXGBE_I2C_CLOCK_STRETCHING_TIMEOUT; u32 i2cctl_r = 0; DEBUGFUNC("ixgbe_raise_i2c_clk"); + if (clk_oe_bit) { + *i2cctl |= clk_oe_bit; + IXGBE_WRITE_REG(hw, IXGBE_I2CCTL_BY_MAC(hw), *i2cctl); + } + for (i = 0; i < timeout; i++) { - *i2cctl |= IXGBE_I2C_CLK_OUT; + *i2cctl |= IXGBE_I2C_CLK_OUT_BY_MAC(hw); - IXGBE_WRITE_REG(hw, IXGBE_I2CCTL, *i2cctl); + IXGBE_WRITE_REG(hw, IXGBE_I2CCTL_BY_MAC(hw), *i2cctl); IXGBE_WRITE_FLUSH(hw); /* SCL rise time (1000ns) */ usec_delay(IXGBE_I2C_T_RISE); - i2cctl_r = IXGBE_READ_REG(hw, IXGBE_I2CCTL); - if (i2cctl_r & IXGBE_I2C_CLK_IN) + i2cctl_r = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw)); + if (i2cctl_r & IXGBE_I2C_CLK_IN_BY_MAC(hw)) break; } } @@ -1831,15 +2546,16 @@ static void ixgbe_raise_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl) * @i2cctl: Current value of I2CCTL register * * Lowers the I2C clock line '1'->'0' + * Asserts the I2C clock output enable on X550 hardware. **/ static void ixgbe_lower_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl) { - DEBUGFUNC("ixgbe_lower_i2c_clk"); - *i2cctl &= ~IXGBE_I2C_CLK_OUT; + *i2cctl &= ~(IXGBE_I2C_CLK_OUT_BY_MAC(hw)); + *i2cctl &= ~IXGBE_I2C_CLK_OE_N_EN_BY_MAC(hw); - IXGBE_WRITE_REG(hw, IXGBE_I2CCTL, *i2cctl); + IXGBE_WRITE_REG(hw, IXGBE_I2CCTL_BY_MAC(hw), *i2cctl); IXGBE_WRITE_FLUSH(hw); /* SCL fall time (300ns) */ @@ -1853,27 +2569,38 @@ static void ixgbe_lower_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl) * @data: I2C data value (0 or 1) to set * * Sets the I2C data bit + * Asserts the I2C data output enable on X550 hardware. **/ static s32 ixgbe_set_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl, bool data) { + u32 data_oe_bit = IXGBE_I2C_DATA_OE_N_EN_BY_MAC(hw); s32 status = IXGBE_SUCCESS; DEBUGFUNC("ixgbe_set_i2c_data"); if (data) - *i2cctl |= IXGBE_I2C_DATA_OUT; + *i2cctl |= IXGBE_I2C_DATA_OUT_BY_MAC(hw); else - *i2cctl &= ~IXGBE_I2C_DATA_OUT; + *i2cctl &= ~(IXGBE_I2C_DATA_OUT_BY_MAC(hw)); + *i2cctl &= ~data_oe_bit; - IXGBE_WRITE_REG(hw, IXGBE_I2CCTL, *i2cctl); + IXGBE_WRITE_REG(hw, IXGBE_I2CCTL_BY_MAC(hw), *i2cctl); IXGBE_WRITE_FLUSH(hw); /* Data rise/fall (1000ns/300ns) and set-up time (250ns) */ usec_delay(IXGBE_I2C_T_RISE + IXGBE_I2C_T_FALL + IXGBE_I2C_T_SU_DATA); + if (!data) /* Can't verify data in this case */ + return IXGBE_SUCCESS; + if (data_oe_bit) { + *i2cctl |= data_oe_bit; + IXGBE_WRITE_REG(hw, IXGBE_I2CCTL_BY_MAC(hw), *i2cctl); + IXGBE_WRITE_FLUSH(hw); + } + /* Verify data was set correctly */ - *i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL); - if (data != ixgbe_get_i2c_data(i2cctl)) { + *i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw)); + if (data != ixgbe_get_i2c_data(hw, i2cctl)) { status = IXGBE_ERR_I2C; ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE, "Error - I2C data was not set to %X.\n", @@ -1889,14 +2616,23 @@ static s32 ixgbe_set_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl, bool data) * @i2cctl: Current value of I2CCTL register * * Returns the I2C data bit value + * Negates the I2C data output enable on X550 hardware. **/ -static bool ixgbe_get_i2c_data(u32 *i2cctl) +static bool ixgbe_get_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl) { + u32 data_oe_bit = IXGBE_I2C_DATA_OE_N_EN_BY_MAC(hw); bool data; DEBUGFUNC("ixgbe_get_i2c_data"); - if (*i2cctl & IXGBE_I2C_DATA_IN) + if (data_oe_bit) { + *i2cctl |= data_oe_bit; + IXGBE_WRITE_REG(hw, IXGBE_I2CCTL_BY_MAC(hw), *i2cctl); + IXGBE_WRITE_FLUSH(hw); + usec_delay(IXGBE_I2C_T_FALL); + } + + if (*i2cctl & IXGBE_I2C_DATA_IN_BY_MAC(hw)) data = 1; else data = 0; @@ -1913,12 +2649,13 @@ static bool ixgbe_get_i2c_data(u32 *i2cctl) **/ void ixgbe_i2c_bus_clear(struct ixgbe_hw *hw) { - u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL); + u32 i2cctl; u32 i; DEBUGFUNC("ixgbe_i2c_bus_clear"); ixgbe_i2c_start(hw); + i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw)); ixgbe_set_i2c_data(hw, &i2cctl, 1); @@ -1968,3 +2705,33 @@ s32 ixgbe_tn_check_overtemp(struct ixgbe_hw *hw) out: return status; } + +/** + * ixgbe_set_copper_phy_power - Control power for copper phy + * @hw: pointer to hardware structure + * @on: TRUE for on, FALSE for off + */ +s32 ixgbe_set_copper_phy_power(struct ixgbe_hw *hw, bool on) +{ + u32 status; + u16 reg; + + status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_VENDOR_SPECIFIC_1_CONTROL, + IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, + ®); + if (status) + return status; + + if (on) { + reg &= ~IXGBE_MDIO_PHY_SET_LOW_POWER_MODE; + } else { + if (ixgbe_check_reset_blocked(hw)) + return 0; + reg |= IXGBE_MDIO_PHY_SET_LOW_POWER_MODE; + } + + status = hw->phy.ops.write_reg(hw, IXGBE_MDIO_VENDOR_SPECIFIC_1_CONTROL, + IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, + reg); + return status; +} diff --git a/sys/dev/ixgbe/ixgbe_phy.h b/sys/dev/ixgbe/ixgbe_phy.h index dd57d26..fad31bd 100644 --- a/sys/dev/ixgbe/ixgbe_phy.h +++ b/sys/dev/ixgbe/ixgbe_phy.h @@ -1,6 +1,6 @@ /****************************************************************************** - Copyright (c) 2001-2013, Intel Corporation + Copyright (c) 2001-2015, Intel Corporation All rights reserved. Redistribution and use in source and binary forms, with or without @@ -54,6 +54,15 @@ #define IXGBE_SFF_SFF_8472_COMP 0x5E #define IXGBE_SFF_SFF_8472_OSCB 0x6E #define IXGBE_SFF_SFF_8472_ESCB 0x76 +#define IXGBE_SFF_IDENTIFIER_QSFP_PLUS 0xD +#define IXGBE_SFF_QSFP_VENDOR_OUI_BYTE0 0xA5 +#define IXGBE_SFF_QSFP_VENDOR_OUI_BYTE1 0xA6 +#define IXGBE_SFF_QSFP_VENDOR_OUI_BYTE2 0xA7 +#define IXGBE_SFF_QSFP_CONNECTOR 0x82 +#define IXGBE_SFF_QSFP_10GBE_COMP 0x83 +#define IXGBE_SFF_QSFP_1GBE_COMP 0x86 +#define IXGBE_SFF_QSFP_CABLE_LENGTH 0x92 +#define IXGBE_SFF_QSFP_DEVICE_TECH 0x93 /* Bitmasks */ #define IXGBE_SFF_DA_PASSIVE_CABLE 0x4 @@ -67,6 +76,11 @@ #define IXGBE_SFF_SOFT_RS_SELECT_MASK 0x8 #define IXGBE_SFF_SOFT_RS_SELECT_10G 0x8 #define IXGBE_SFF_SOFT_RS_SELECT_1G 0x0 +#define IXGBE_SFF_ADDRESSING_MODE 0x4 +#define IXGBE_SFF_QSFP_DA_ACTIVE_CABLE 0x1 +#define IXGBE_SFF_QSFP_DA_PASSIVE_CABLE 0x8 +#define IXGBE_SFF_QSFP_CONNECTOR_NOT_SEPARABLE 0x23 +#define IXGBE_SFF_QSFP_TRANSMITER_850NM_VCSEL 0x0 #define IXGBE_I2C_EEPROM_READ_MASK 0x100 #define IXGBE_I2C_EEPROM_STATUS_MASK 0x3 #define IXGBE_I2C_EEPROM_STATUS_NO_OPERATION 0x0 @@ -74,6 +88,26 @@ #define IXGBE_I2C_EEPROM_STATUS_FAIL 0x2 #define IXGBE_I2C_EEPROM_STATUS_IN_PROGRESS 0x3 +#define IXGBE_CS4227 0xBE /* CS4227 address */ +#define IXGBE_CS4227_GLOBAL_ID_LSB 0 +#define IXGBE_CS4227_SCRATCH 2 +#define IXGBE_CS4227_GLOBAL_ID_VALUE 0x03E5 +#define IXGBE_CS4227_SCRATCH_VALUE 0x5aa5 +#define IXGBE_CS4227_RETRIES 5 +#define IXGBE_CS4227_LINE_SPARE22_MSB 0x12AD /* Reg to program speed */ +#define IXGBE_CS4227_LINE_SPARE24_LSB 0x12B0 /* Reg to program EDC */ +#define IXGBE_CS4227_HOST_SPARE22_MSB 0x1AAD /* Reg to program speed */ +#define IXGBE_CS4227_HOST_SPARE24_LSB 0x1AB0 /* Reg to program EDC */ +#define IXGBE_CS4227_EDC_MODE_CX1 0x0002 +#define IXGBE_CS4227_EDC_MODE_SR 0x0004 +#define IXGBE_CS4227_RESET_HOLD 500 /* microseconds */ +#define IXGBE_CS4227_RESET_DELAY 500 /* milliseconds */ +#define IXGBE_CS4227_CHECK_DELAY 30 /* milliseconds */ +#define IXGBE_PE 0xE0 /* Port expander address */ +#define IXGBE_PE_OUTPUT 1 /* Output register offset */ +#define IXGBE_PE_CONFIG 3 /* Config register offset */ +#define IXGBE_PE_BIT1 (1 << 1) + /* Flow control defines */ #define IXGBE_TAF_SYM_PAUSE 0x400 #define IXGBE_TAF_ASM_PAUSE 0x800 @@ -101,16 +135,15 @@ #define IXGBE_I2C_T_SU_STO 4 #define IXGBE_I2C_T_BUF 5 +#ifndef IXGBE_SFP_DETECT_RETRIES +#define IXGBE_SFP_DETECT_RETRIES 10 + +#endif /* IXGBE_SFP_DETECT_RETRIES */ #define IXGBE_TN_LASI_STATUS_REG 0x9005 #define IXGBE_TN_LASI_STATUS_TEMP_ALARM 0x0008 /* SFP+ SFF-8472 Compliance */ #define IXGBE_SFF_SFF_8472_UNSUP 0x00 -#define IXGBE_SFF_SFF_8472_REV_9_3 0x01 -#define IXGBE_SFF_SFF_8472_REV_9_5 0x02 -#define IXGBE_SFF_SFF_8472_REV_10_2 0x03 -#define IXGBE_SFF_SFF_8472_REV_10_4 0x04 -#define IXGBE_SFF_SFF_8472_REV_11_0 0x05 s32 ixgbe_init_phy_ops_generic(struct ixgbe_hw *hw); bool ixgbe_validate_phy_addr(struct ixgbe_hw *hw, u32 phy_addr); @@ -133,6 +166,7 @@ s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw, s32 ixgbe_get_copper_link_capabilities_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed, bool *autoneg); +s32 ixgbe_check_reset_blocked(struct ixgbe_hw *hw); /* PHY specific */ s32 ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw, @@ -145,16 +179,23 @@ s32 ixgbe_get_phy_firmware_version_generic(struct ixgbe_hw *hw, u16 *firmware_version); s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw); +s32 ixgbe_set_copper_phy_power(struct ixgbe_hw *hw, bool on); s32 ixgbe_identify_module_generic(struct ixgbe_hw *hw); s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw); +s32 ixgbe_get_supported_phy_sfp_layer_generic(struct ixgbe_hw *hw); +s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw); s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw, u16 *list_offset, u16 *data_offset); s32 ixgbe_tn_check_overtemp(struct ixgbe_hw *hw); s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset, u8 dev_addr, u8 *data); +s32 ixgbe_read_i2c_byte_generic_unlocked(struct ixgbe_hw *hw, u8 byte_offset, + u8 dev_addr, u8 *data); s32 ixgbe_write_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset, u8 dev_addr, u8 data); +s32 ixgbe_write_i2c_byte_generic_unlocked(struct ixgbe_hw *hw, u8 byte_offset, + u8 dev_addr, u8 data); s32 ixgbe_read_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset, u8 *eeprom_data); s32 ixgbe_write_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset, diff --git a/sys/dev/ixgbe/ixgbe_type.h b/sys/dev/ixgbe/ixgbe_type.h index 49f5bc0..2a53952 100644 --- a/sys/dev/ixgbe/ixgbe_type.h +++ b/sys/dev/ixgbe/ixgbe_type.h @@ -1,6 +1,6 @@ /****************************************************************************** - Copyright (c) 2001-2013, Intel Corporation + Copyright (c) 2001-2015, Intel Corporation All rights reserved. Redistribution and use in source and binary forms, with or without @@ -75,6 +75,8 @@ #include "ixgbe_osdep.h" +/* Override this by setting IOMEM in your ixgbe_osdep.h header */ +#define IOMEM /* Vendor ID */ #define IXGBE_INTEL_VENDOR_ID 0x8086 @@ -100,16 +102,22 @@ #define IXGBE_DEV_ID_82599_CX4 0x10F9 #define IXGBE_DEV_ID_82599_SFP 0x10FB #define IXGBE_SUBDEV_ID_82599_SFP 0x11A9 +#define IXGBE_SUBDEV_ID_82599_SFP_WOL0 0x1071 #define IXGBE_SUBDEV_ID_82599_RNDC 0x1F72 #define IXGBE_SUBDEV_ID_82599_560FLR 0x17D0 #define IXGBE_SUBDEV_ID_82599_ECNA_DP 0x0470 #define IXGBE_SUBDEV_ID_82599_SP_560FLR 0x211B #define IXGBE_SUBDEV_ID_82599_LOM_SFP 0x8976 +#define IXGBE_SUBDEV_ID_82599_LOM_SNAP6 0x2159 +#define IXGBE_SUBDEV_ID_82599_SFP_1OCP 0x000D +#define IXGBE_SUBDEV_ID_82599_SFP_2OCP 0x0008 +#define IXGBE_SUBDEV_ID_82599_SFP_LOM 0x06EE #define IXGBE_DEV_ID_82599_BACKPLANE_FCOE 0x152A #define IXGBE_DEV_ID_82599_SFP_FCOE 0x1529 #define IXGBE_DEV_ID_82599_SFP_EM 0x1507 #define IXGBE_DEV_ID_82599_SFP_SF2 0x154D #define IXGBE_DEV_ID_82599_SFP_SF_QP 0x154A +#define IXGBE_DEV_ID_82599_QSFP_SF_QP 0x1558 #define IXGBE_DEV_ID_82599EN_SFP 0x1557 #define IXGBE_SUBDEV_ID_82599EN_SFP_OCP1 0x0001 #define IXGBE_DEV_ID_82599_XAUI_LOM 0x10FC @@ -121,6 +129,21 @@ #define IXGBE_DEV_ID_X540_VF 0x1515 #define IXGBE_DEV_ID_X540_VF_HV 0x1530 #define IXGBE_DEV_ID_X540_BYPASS 0x155C +#define IXGBE_DEV_ID_X540T1 0x1560 +#define IXGBE_DEV_ID_X550T 0x1563 +#define IXGBE_DEV_ID_X550EM_X_KX4 0x15AA +#define IXGBE_DEV_ID_X550EM_X_KR 0x15AB +#define IXGBE_DEV_ID_X550EM_X_SFP 0x15AC +#define IXGBE_DEV_ID_X550EM_X_10G_T 0x15AD +#define IXGBE_DEV_ID_X550EM_X_1G_T 0x15AE +#define IXGBE_DEV_ID_X550_VF_HV 0x1564 +#define IXGBE_DEV_ID_X550_VF 0x1565 +#define IXGBE_DEV_ID_X550EM_X_VF 0x15A8 +#define IXGBE_DEV_ID_X550EM_X_VF_HV 0x15A9 + +#define IXGBE_CAT(r,m) IXGBE_##r##m + +#define IXGBE_BY_MAC(_hw, r) ((_hw)->mvals[IXGBE_CAT(r, _IDX)]) /* General Registers */ #define IXGBE_CTRL 0x00000 @@ -128,7 +151,12 @@ #define IXGBE_CTRL_EXT 0x00018 #define IXGBE_ESDP 0x00020 #define IXGBE_EODSDP 0x00028 -#define IXGBE_I2CCTL 0x00028 +#define IXGBE_I2CCTL_82599 0x00028 +#define IXGBE_I2CCTL IXGBE_I2CCTL_82599 +#define IXGBE_I2CCTL_X540 IXGBE_I2CCTL_82599 +#define IXGBE_I2CCTL_X550 0x15F5C +#define IXGBE_I2CCTL_X550EM_x IXGBE_I2CCTL_X550 +#define IXGBE_I2CCTL_BY_MAC(_hw) IXGBE_BY_MAC((_hw), I2CCTL) #define IXGBE_PHY_GPIO 0x00028 #define IXGBE_MAC_GPIO 0x00030 #define IXGBE_PHYINT_STATUS0 0x00100 @@ -141,18 +169,40 @@ #define IXGBE_EXVET 0x05078 /* NVM Registers */ -#define IXGBE_EEC 0x10010 -#define IXGBE_EERD 0x10014 -#define IXGBE_EEWR 0x10018 -#define IXGBE_FLA 0x1001C +#define IXGBE_EEC 0x10010 +#define IXGBE_EEC_X540 IXGBE_EEC +#define IXGBE_EEC_X550 IXGBE_EEC +#define IXGBE_EEC_X550EM_x IXGBE_EEC +#define IXGBE_EEC_BY_MAC(_hw) IXGBE_EEC + +#define IXGBE_EERD 0x10014 +#define IXGBE_EEWR 0x10018 + +#define IXGBE_FLA 0x1001C +#define IXGBE_FLA_X540 IXGBE_FLA +#define IXGBE_FLA_X550 IXGBE_FLA +#define IXGBE_FLA_X550EM_x IXGBE_FLA +#define IXGBE_FLA_BY_MAC(_hw) IXGBE_FLA + #define IXGBE_EEMNGCTL 0x10110 #define IXGBE_EEMNGDATA 0x10114 #define IXGBE_FLMNGCTL 0x10118 #define IXGBE_FLMNGDATA 0x1011C #define IXGBE_FLMNGCNT 0x10120 #define IXGBE_FLOP 0x1013C -#define IXGBE_GRC 0x10200 -#define IXGBE_SRAMREL 0x10210 + +#define IXGBE_GRC 0x10200 +#define IXGBE_GRC_X540 IXGBE_GRC +#define IXGBE_GRC_X550 IXGBE_GRC +#define IXGBE_GRC_X550EM_x IXGBE_GRC +#define IXGBE_GRC_BY_MAC(_hw) IXGBE_GRC + +#define IXGBE_SRAMREL 0x10210 +#define IXGBE_SRAMREL_X540 IXGBE_SRAMREL +#define IXGBE_SRAMREL_X550 IXGBE_SRAMREL +#define IXGBE_SRAMREL_X550EM_x IXGBE_SRAMREL +#define IXGBE_SRAMREL_BY_MAC(_hw) IXGBE_SRAMREL + #define IXGBE_PHYDBG 0x10218 /* General Receive Control */ @@ -163,10 +213,48 @@ #define IXGBE_VPDDIAG1 0x10208 /* I2CCTL Bit Masks */ -#define IXGBE_I2C_CLK_IN 0x00000001 -#define IXGBE_I2C_CLK_OUT 0x00000002 -#define IXGBE_I2C_DATA_IN 0x00000004 -#define IXGBE_I2C_DATA_OUT 0x00000008 +#define IXGBE_I2C_CLK_IN 0x00000001 +#define IXGBE_I2C_CLK_IN_X540 IXGBE_I2C_CLK_IN +#define IXGBE_I2C_CLK_IN_X550 0x00004000 +#define IXGBE_I2C_CLK_IN_X550EM_x IXGBE_I2C_CLK_IN_X550 +#define IXGBE_I2C_CLK_IN_BY_MAC(_hw) IXGBE_BY_MAC((_hw), I2C_CLK_IN) + +#define IXGBE_I2C_CLK_OUT 0x00000002 +#define IXGBE_I2C_CLK_OUT_X540 IXGBE_I2C_CLK_OUT +#define IXGBE_I2C_CLK_OUT_X550 0x00000200 +#define IXGBE_I2C_CLK_OUT_X550EM_x IXGBE_I2C_CLK_OUT_X550 +#define IXGBE_I2C_CLK_OUT_BY_MAC(_hw) IXGBE_BY_MAC((_hw), I2C_CLK_OUT) + +#define IXGBE_I2C_DATA_IN 0x00000004 +#define IXGBE_I2C_DATA_IN_X540 IXGBE_I2C_DATA_IN +#define IXGBE_I2C_DATA_IN_X550 0x00001000 +#define IXGBE_I2C_DATA_IN_X550EM_x IXGBE_I2C_DATA_IN_X550 +#define IXGBE_I2C_DATA_IN_BY_MAC(_hw) IXGBE_BY_MAC((_hw), I2C_DATA_IN) + +#define IXGBE_I2C_DATA_OUT 0x00000008 +#define IXGBE_I2C_DATA_OUT_X540 IXGBE_I2C_DATA_OUT +#define IXGBE_I2C_DATA_OUT_X550 0x00000400 +#define IXGBE_I2C_DATA_OUT_X550EM_x IXGBE_I2C_DATA_OUT_X550 +#define IXGBE_I2C_DATA_OUT_BY_MAC(_hw) IXGBE_BY_MAC((_hw), I2C_DATA_OUT) + +#define IXGBE_I2C_DATA_OE_N_EN 0 +#define IXGBE_I2C_DATA_OE_N_EN_X540 IXGBE_I2C_DATA_OE_N_EN +#define IXGBE_I2C_DATA_OE_N_EN_X550 0x00000800 +#define IXGBE_I2C_DATA_OE_N_EN_X550EM_x IXGBE_I2C_DATA_OE_N_EN_X550 +#define IXGBE_I2C_DATA_OE_N_EN_BY_MAC(_hw) IXGBE_BY_MAC((_hw), I2C_DATA_OE_N_EN) + +#define IXGBE_I2C_BB_EN 0 +#define IXGBE_I2C_BB_EN_X540 IXGBE_I2C_BB_EN +#define IXGBE_I2C_BB_EN_X550 0x00000100 +#define IXGBE_I2C_BB_EN_X550EM_x IXGBE_I2C_BB_EN_X550 + +#define IXGBE_I2C_BB_EN_BY_MAC(_hw) IXGBE_BY_MAC((_hw), I2C_BB_EN) + +#define IXGBE_I2C_CLK_OE_N_EN 0 +#define IXGBE_I2C_CLK_OE_N_EN_X540 IXGBE_I2C_CLK_OE_N_EN +#define IXGBE_I2C_CLK_OE_N_EN_X550 0x00002000 +#define IXGBE_I2C_CLK_OE_N_EN_X550EM_x IXGBE_I2C_CLK_OE_N_EN_X550 +#define IXGBE_I2C_CLK_OE_N_EN_BY_MAC(_hw) IXGBE_BY_MAC((_hw), I2C_CLK_OE_N_EN) #define IXGBE_I2C_CLOCK_STRETCHING_TIMEOUT 500 @@ -301,6 +389,8 @@ #define IXGBE_VLVF(_i) (0x0F100 + ((_i) * 4)) /* 64 of these (0-63) */ #define IXGBE_VLVFB(_i) (0x0F200 + ((_i) * 4)) /* 128 of these (0-127) */ #define IXGBE_VMVIR(_i) (0x08000 + ((_i) * 4)) /* 64 of these (0-63) */ +#define IXGBE_PFFLPL 0x050B0 +#define IXGBE_PFFLPH 0x050B4 #define IXGBE_VT_CTL 0x051B0 #define IXGBE_PFMAILBOX(_i) (0x04B00 + (4 * (_i))) /* 64 total */ /* 64 Mailboxes, 16 DW each */ @@ -317,6 +407,12 @@ #define IXGBE_MRCTL(_i) (0x0F600 + ((_i) * 4)) #define IXGBE_VMRVLAN(_i) (0x0F610 + ((_i) * 4)) #define IXGBE_VMRVM(_i) (0x0F630 + ((_i) * 4)) +#define IXGBE_LVMMC_RX 0x2FA8 +#define IXGBE_LVMMC_TX 0x8108 +#define IXGBE_LMVM_RX 0x2FA4 +#define IXGBE_LMVM_TX 0x8124 +#define IXGBE_WQBR_RX(_i) (0x2FB0 + ((_i) * 4)) /* 4 total */ +#define IXGBE_WQBR_TX(_i) (0x8130 + ((_i) * 4)) /* 4 total */ #define IXGBE_L34T_IMIR(_i) (0x0E800 + ((_i) * 4)) /*128 of these (0-127)*/ #define IXGBE_RXFECCERR0 0x051B8 #define IXGBE_LLITHRESH 0x0EC90 @@ -325,8 +421,16 @@ #define IXGBE_IMIRVP 0x05AC0 #define IXGBE_VMD_CTL 0x0581C #define IXGBE_RETA(_i) (0x05C00 + ((_i) * 4)) /* 32 of these (0-31) */ +#define IXGBE_ERETA(_i) (0x0EE80 + ((_i) * 4)) /* 96 of these (0-95) */ #define IXGBE_RSSRK(_i) (0x05C80 + ((_i) * 4)) /* 10 of these (0-9) */ +/* Registers for setting up RSS on X550 with SRIOV + * _p - pool number (0..63) + * _i - index (0..10 for PFVFRSSRK, 0..15 for PFVFRETA) + */ +#define IXGBE_PFVFMRQC(_p) (0x03400 + ((_p) * 4)) +#define IXGBE_PFVFRSSRK(_i, _p) (0x018000 + ((_i) * 4) + ((_p) * 0x40)) +#define IXGBE_PFVFRETA(_i, _p) (0x019000 + ((_i) * 4) + ((_p) * 0x40)) /* Flow Director registers */ #define IXGBE_FDIRCTRL 0x0EE00 @@ -336,6 +440,7 @@ #define IXGBE_FDIRSIP4M 0x0EE40 #define IXGBE_FDIRTCPM 0x0EE44 #define IXGBE_FDIRUDPM 0x0EE48 +#define IXGBE_FDIRSCTPM 0x0EE78 #define IXGBE_FDIRIP6M 0x0EE74 #define IXGBE_FDIRM 0x0EE70 @@ -379,6 +484,8 @@ #define IXGBE_DMATXCTL_TE 0x1 /* Transmit Enable */ #define IXGBE_DMATXCTL_NS 0x2 /* No Snoop LSO hdr buffer */ #define IXGBE_DMATXCTL_GDV 0x8 /* Global Double VLAN */ +#define IXGBE_DMATXCTL_MDP_EN 0x20 /* Bit 5 */ +#define IXGBE_DMATXCTL_MBINTEN 0x40 /* Bit 6 */ #define IXGBE_DMATXCTL_VT_SHIFT 16 /* VLAN EtherType */ #define IXGBE_PFDTXGSWC_VT_LBEN 0x1 /* Local L2 VT switch enable */ @@ -387,6 +494,8 @@ #define IXGBE_SPOOF_MACAS_MASK 0xFF #define IXGBE_SPOOF_VLANAS_MASK 0xFF00 #define IXGBE_SPOOF_VLANAS_SHIFT 8 +#define IXGBE_SPOOF_ETHERTYPEAS 0xFF000000 +#define IXGBE_SPOOF_ETHERTYPEAS_SHIFT 16 #define IXGBE_PFVFSPOOF_REG_COUNT 8 /* 16 of these (0-15) */ #define IXGBE_DCA_TXCTRL(_i) (0x07200 + ((_i) * 4)) @@ -408,16 +517,22 @@ #define IXGBE_WUPL 0x05900 #define IXGBE_WUPM 0x05A00 /* wake up pkt memory 0x5A00-0x5A7C */ +#define IXGBE_PROXYS 0x05F60 /* Proxying Status Register */ +#define IXGBE_PROXYFC 0x05F64 /* Proxying Filter Control Register */ +#define IXGBE_VXLANCTRL 0x0000507C /* Rx filter VXLAN UDPPORT Register */ -#define IXGBE_FHFT(_n) (0x09000 + (_n * 0x100)) /* Flex host filter table */ +#define IXGBE_FHFT(_n) (0x09000 + ((_n) * 0x100)) /* Flex host filter table */ /* Ext Flexible Host Filter Table */ -#define IXGBE_FHFT_EXT(_n) (0x09800 + (_n * 0x100)) +#define IXGBE_FHFT_EXT(_n) (0x09800 + ((_n) * 0x100)) +#define IXGBE_FHFT_EXT_X550(_n) (0x09600 + ((_n) * 0x100)) /* Four Flexible Filters are supported */ #define IXGBE_FLEXIBLE_FILTER_COUNT_MAX 4 /* Six Flexible Filters are supported */ #define IXGBE_FLEXIBLE_FILTER_COUNT_MAX_6 6 +/* Eight Flexible Filters are supported */ +#define IXGBE_FLEXIBLE_FILTER_COUNT_MAX_8 8 #define IXGBE_EXT_FLEXIBLE_FILTER_COUNT_MAX 2 /* Each Flexible Filter is at most 128 (0x80) bytes in length */ @@ -450,10 +565,14 @@ #define IXGBE_WUFC_FLX4 0x00100000 /* Flexible Filter 4 Enable */ #define IXGBE_WUFC_FLX5 0x00200000 /* Flexible Filter 5 Enable */ #define IXGBE_WUFC_FLX_FILTERS 0x000F0000 /* Mask for 4 flex filters */ +#define IXGBE_WUFC_FLX_FILTERS_6 0x003F0000 /* Mask for 6 flex filters */ +#define IXGBE_WUFC_FLX_FILTERS_8 0x00FF0000 /* Mask for 8 flex filters */ +#define IXGBE_WUFC_FW_RST_WK 0x80000000 /* Ena wake on FW reset assertion */ /* Mask for Ext. flex filters */ #define IXGBE_WUFC_EXT_FLX_FILTERS 0x00300000 #define IXGBE_WUFC_ALL_FILTERS 0x000F00FF /* Mask all 4 flex filters */ #define IXGBE_WUFC_ALL_FILTERS_6 0x003F00FF /* Mask all 6 flex filters */ +#define IXGBE_WUFC_ALL_FILTERS_8 0x00FF00FF /* Mask all 8 flex filters */ #define IXGBE_WUFC_FLX_OFFSET 16 /* Offset to the Flexible Filters bits */ /* Wake Up Status */ @@ -473,6 +592,23 @@ #define IXGBE_WUS_FLX4 IXGBE_WUFC_FLX4 #define IXGBE_WUS_FLX5 IXGBE_WUFC_FLX5 #define IXGBE_WUS_FLX_FILTERS IXGBE_WUFC_FLX_FILTERS +#define IXGBE_WUS_FW_RST_WK IXGBE_WUFC_FW_RST_WK +/* Proxy Status */ +#define IXGBE_PROXYS_EX 0x00000004 /* Exact packet received */ +#define IXGBE_PROXYS_ARP_DIR 0x00000020 /* ARP w/filter match received */ +#define IXGBE_PROXYS_NS 0x00000200 /* IPV6 NS received */ +#define IXGBE_PROXYS_NS_DIR 0x00000400 /* IPV6 NS w/DA match received */ +#define IXGBE_PROXYS_ARP 0x00000800 /* ARP request packet received */ +#define IXGBE_PROXYS_MLD 0x00001000 /* IPv6 MLD packet received */ + +/* Proxying Filter Control */ +#define IXGBE_PROXYFC_ENABLE 0x00000001 /* Port Proxying Enable */ +#define IXGBE_PROXYFC_EX 0x00000004 /* Directed Exact Proxy Enable */ +#define IXGBE_PROXYFC_ARP_DIR 0x00000020 /* Directed ARP Proxy Enable */ +#define IXGBE_PROXYFC_NS 0x00000200 /* IPv6 Neighbor Solicitation */ +#define IXGBE_PROXYFC_ARP 0x00000800 /* ARP Request Proxy Enable */ +#define IXGBE_PROXYFC_MLD 0x00000800 /* IPv6 MLD Proxy Enable */ +#define IXGBE_PROXYFC_NO_TCO 0x00008000 /* Ignore TCO packets */ #define IXGBE_WUPL_LENGTH_MASK 0xFFFF @@ -489,6 +625,57 @@ #define IXGBE_TDPT2TCCR(_i) (0x0CD20 + ((_i) * 4)) /* 8 of these (0-7) */ #define IXGBE_TDPT2TCSR(_i) (0x0CD40 + ((_i) * 4)) /* 8 of these (0-7) */ +/* Power Management */ +/* DMA Coalescing configuration */ +struct ixgbe_dmac_config { + u16 watchdog_timer; /* usec units */ + bool fcoe_en; + u32 link_speed; + u8 fcoe_tc; + u8 num_tcs; +}; + +/* + * DMA Coalescing threshold Rx PB TC[n] value in Kilobyte by link speed. + * DMACRXT = 10Gbps = 10,000 bits / usec = 1250 bytes / usec 70 * 1250 == + * 87500 bytes [85KB] + */ +#define IXGBE_DMACRXT_10G 0x55 +#define IXGBE_DMACRXT_1G 0x09 +#define IXGBE_DMACRXT_100M 0x01 + +/* DMA Coalescing registers */ +#define IXGBE_DMCMNGTH 0x15F20 /* Management Threshold */ +#define IXGBE_DMACR 0x02400 /* Control register */ +#define IXGBE_DMCTH(_i) (0x03300 + ((_i) * 4)) /* 8 of these */ +#define IXGBE_DMCTLX 0x02404 /* Time to Lx request */ +/* DMA Coalescing register fields */ +#define IXGBE_DMCMNGTH_DMCMNGTH_MASK 0x000FFFF0 /* Mng Threshold mask */ +#define IXGBE_DMCMNGTH_DMCMNGTH_SHIFT 4 /* Management Threshold shift */ +#define IXGBE_DMACR_DMACWT_MASK 0x0000FFFF /* Watchdog Timer mask */ +#define IXGBE_DMACR_HIGH_PRI_TC_MASK 0x00FF0000 +#define IXGBE_DMACR_HIGH_PRI_TC_SHIFT 16 +#define IXGBE_DMACR_EN_MNG_IND 0x10000000 /* Enable Mng Indications */ +#define IXGBE_DMACR_LX_COAL_IND 0x40000000 /* Lx Coalescing indicate */ +#define IXGBE_DMACR_DMAC_EN 0x80000000 /* DMA Coalescing Enable */ +#define IXGBE_DMCTH_DMACRXT_MASK 0x000001FF /* Receive Threshold mask */ +#define IXGBE_DMCTLX_TTLX_MASK 0x00000FFF /* Time to Lx request mask */ + +/* EEE registers */ +#define IXGBE_EEER 0x043A0 /* EEE register */ +#define IXGBE_EEE_STAT 0x04398 /* EEE Status */ +#define IXGBE_EEE_SU 0x04380 /* EEE Set up */ +#define IXGBE_EEE_SU_TEEE_DLY_SHIFT 26 +#define IXGBE_TLPIC 0x041F4 /* EEE Tx LPI count */ +#define IXGBE_RLPIC 0x041F8 /* EEE Rx LPI count */ + +/* EEE register fields */ +#define IXGBE_EEER_TX_LPI_EN 0x00010000 /* Enable EEE LPI TX path */ +#define IXGBE_EEER_RX_LPI_EN 0x00020000 /* Enable EEE LPI RX path */ +#define IXGBE_EEE_STAT_NEG 0x20000000 /* EEE support neg on link */ +#define IXGBE_EEE_RX_LPI_STATUS 0x40000000 /* RX Link in LPI status */ +#define IXGBE_EEE_TX_LPI_STATUS 0x80000000 /* TX Link in LPI status */ + /* Security Control Registers */ @@ -630,6 +817,8 @@ /* FCoE DMA Context Registers */ +/* FCoE Direct DMA Context */ +#define IXGBE_FCDDC(_i, _j) (0x20000 + ((_i) * 0x4) + ((_j) * 0x10)) #define IXGBE_FCPTRL 0x02410 /* FC User Desc. PTR Low */ #define IXGBE_FCPTRH 0x02414 /* FC USer Desc. PTR High */ #define IXGBE_FCBUFF 0x02418 /* FC Buffer Control */ @@ -653,6 +842,12 @@ #define IXGBE_REOFF 0x05158 /* Rx FC EOF */ #define IXGBE_RSOFF 0x051F8 /* Rx FC SOF */ /* FCoE Filter Context Registers */ +#define IXGBE_FCD_ID 0x05114 /* FCoE D_ID */ +#define IXGBE_FCSMAC 0x0510C /* FCoE Source MAC */ +#define IXGBE_FCFLTRW_SMAC_HIGH_SHIFT 16 +/* FCoE Direct Filter Context */ +#define IXGBE_FCDFC(_i, _j) (0x28000 + ((_i) * 0x4) + ((_j) * 0x10)) +#define IXGBE_FCDFCD(_i) (0x30000 + ((_i) * 0x4)) #define IXGBE_FCFLT 0x05108 /* FC FLT Context */ #define IXGBE_FCFLTRW 0x05110 /* FC Filter RW Control */ #define IXGBE_FCPARAM 0x051d8 /* FC Offset Parameter */ @@ -683,6 +878,10 @@ #define IXGBE_FCRETASEL_ENA 0x2 /* FCoE FCRETASEL bit */ #define IXGBE_FCRETA_SIZE 8 /* Max entries in FCRETA */ #define IXGBE_FCRETA_ENTRY_MASK 0x0000007f /* 7 bits for the queue index */ +#define IXGBE_FCRETA_SIZE_X550 32 /* Max entries in FCRETA */ +/* Higher 7 bits for the queue index */ +#define IXGBE_FCRETA_ENTRY_HIGH_MASK 0x007F0000 +#define IXGBE_FCRETA_ENTRY_HIGH_SHIFT 16 /* Stats registers */ #define IXGBE_CRCERRS 0x04000 @@ -781,11 +980,6 @@ #define IXGBE_BXOFFRXC 0x041E0 #define IXGBE_BXONTXC 0x041E4 #define IXGBE_BXOFFTXC 0x041E8 -#define IXGBE_PCRC8ECL 0x0E810 -#define IXGBE_PCRC8ECH 0x0E811 -#define IXGBE_PCRC8ECH_MASK 0x1F -#define IXGBE_LDPCECL 0x0E820 -#define IXGBE_LDPCECH 0x0E821 /* Management */ #define IXGBE_MAVTV(_i) (0x05010 + ((_i) * 4)) /* 8 of these (0-7) */ @@ -807,6 +1001,7 @@ #define IXGBE_BMCIP_IPADDR_VALID 0x00000002 /* Management Bit Fields and Masks */ +#define IXGBE_MANC_MPROXYE 0x40000000 /* Management Proxy Enable */ #define IXGBE_MANC_RCV_TCO_EN 0x00020000 /* Rcv TCO packet enable */ #define IXGBE_MANC_EN_BMC2OS 0x10000000 /* Ena BMC2OS and OS2BMC traffic */ #define IXGBE_MANC_EN_BMC2OS_SHIFT 28 @@ -848,14 +1043,34 @@ #define IXGBE_GSCN_2 0x11028 #define IXGBE_GSCN_3 0x1102C #define IXGBE_FACTPS 0x10150 +#define IXGBE_FACTPS_X540 IXGBE_FACTPS +#define IXGBE_FACTPS_X550 IXGBE_FACTPS +#define IXGBE_FACTPS_X550EM_x IXGBE_FACTPS +#define IXGBE_FACTPS_BY_MAC(_hw) IXGBE_FACTPS + #define IXGBE_PCIEANACTL 0x11040 #define IXGBE_SWSM 0x10140 +#define IXGBE_SWSM_X540 IXGBE_SWSM +#define IXGBE_SWSM_X550 IXGBE_SWSM +#define IXGBE_SWSM_X550EM_x IXGBE_SWSM +#define IXGBE_SWSM_BY_MAC(_hw) IXGBE_SWSM + #define IXGBE_FWSM 0x10148 +#define IXGBE_FWSM_X540 IXGBE_FWSM +#define IXGBE_FWSM_X550 IXGBE_FWSM +#define IXGBE_FWSM_X550EM_x IXGBE_FWSM +#define IXGBE_FWSM_BY_MAC(_hw) IXGBE_FWSM + +#define IXGBE_SWFW_SYNC IXGBE_GSSR +#define IXGBE_SWFW_SYNC_X540 IXGBE_SWFW_SYNC +#define IXGBE_SWFW_SYNC_X550 IXGBE_SWFW_SYNC +#define IXGBE_SWFW_SYNC_X550EM_x IXGBE_SWFW_SYNC +#define IXGBE_SWFW_SYNC_BY_MAC(_hw) IXGBE_SWFW_SYNC + #define IXGBE_GSSR 0x10160 #define IXGBE_MREVID 0x11064 #define IXGBE_DCA_ID 0x11070 #define IXGBE_DCA_CTRL 0x11074 -#define IXGBE_SWFW_SYNC IXGBE_GSSR /* PCI-E registers 82599-Specific */ #define IXGBE_GCR_EXT 0x11050 @@ -867,8 +1082,18 @@ #define IXGBE_PHYDAT_82599 0x11044 #define IXGBE_PHYCTL_82599 0x11048 #define IXGBE_PBACLR_82599 0x11068 -#define IXGBE_CIAA_82599 0x11088 -#define IXGBE_CIAD_82599 0x1108C +#define IXGBE_CIAA 0x11088 +#define IXGBE_CIAD 0x1108C +#define IXGBE_CIAA_82599 IXGBE_CIAA +#define IXGBE_CIAD_82599 IXGBE_CIAD +#define IXGBE_CIAA_X540 IXGBE_CIAA +#define IXGBE_CIAD_X540 IXGBE_CIAD +#define IXGBE_CIAA_X550 0x11508 +#define IXGBE_CIAD_X550 0x11510 +#define IXGBE_CIAA_X550EM_x IXGBE_CIAA_X550 +#define IXGBE_CIAD_X550EM_x IXGBE_CIAD_X550 +#define IXGBE_CIAA_BY_MAC(_hw) IXGBE_BY_MAC((_hw), CIAA) +#define IXGBE_CIAD_BY_MAC(_hw) IXGBE_BY_MAC((_hw), CIAD) #define IXGBE_PICAUSE 0x110B0 #define IXGBE_PIENA 0x110B8 #define IXGBE_CDQ_MBR_82599 0x110B4 @@ -905,6 +1130,7 @@ #define IXGBE_TXSTMPH 0x08C08 /* Tx timestamp value High - RO */ #define IXGBE_SYSTIML 0x08C0C /* System time register Low - RO */ #define IXGBE_SYSTIMH 0x08C10 /* System time register High - RO */ +#define IXGBE_SYSTIMR 0x08C58 /* System time register Residue - RO */ #define IXGBE_TIMINCA 0x08C14 /* Increment attributes register - RW */ #define IXGBE_TIMADJL 0x08C18 /* Time Adjustment Offset register Low - RW */ #define IXGBE_TIMADJH 0x08C1C /* Time Adjustment Offset register High - RW */ @@ -921,6 +1147,9 @@ #define IXGBE_AUXSTMPH0 0x08C40 /* Auxiliary Time Stamp 0 register High - RO */ #define IXGBE_AUXSTMPL1 0x08C44 /* Auxiliary Time Stamp 1 register Low - RO */ #define IXGBE_AUXSTMPH1 0x08C48 /* Auxiliary Time Stamp 1 register High - RO */ +#define IXGBE_TSIM 0x08C68 /* TimeSync Interrupt Mask Register - RW */ +#define IXGBE_TSICR 0x08C60 /* TimeSync Interrupt Cause Register - WO */ +#define IXGBE_TSSDP 0x0003C /* TimeSync SDP Configuration Register - RW */ /* Diagnostic Registers */ #define IXGBE_RDSTATCTL 0x02C20 @@ -1077,6 +1306,7 @@ /* RDRXCTL Bit Masks */ #define IXGBE_RDRXCTL_RDMTS_1_2 0x00000000 /* Rx Desc Min THLD Size */ #define IXGBE_RDRXCTL_CRCSTRIP 0x00000002 /* CRC Strip */ +#define IXGBE_RDRXCTL_PSP 0x00000004 /* Pad Small Packet */ #define IXGBE_RDRXCTL_MVMEN 0x00000020 #define IXGBE_RDRXCTL_RSC_PUSH_DIS 0x00000020 #define IXGBE_RDRXCTL_DMAIDONE 0x00000008 /* DMA init cycle done */ @@ -1086,6 +1316,8 @@ #define IXGBE_RDRXCTL_RSCLLIDIS 0x00800000 /* Disable RSC compl on LLI*/ #define IXGBE_RDRXCTL_RSCACKC 0x02000000 /* must set 1 when RSC ena */ #define IXGBE_RDRXCTL_FCOE_WRFIX 0x04000000 /* must set 1 when RSC ena */ +#define IXGBE_RDRXCTL_MBINTEN 0x10000000 +#define IXGBE_RDRXCTL_MDP_EN 0x20000000 /* RQTC Bit Masks and Shifts */ #define IXGBE_RQTC_SHIFT_TC(_i) ((_i) * 4) @@ -1209,8 +1441,16 @@ #define IXGBE_MDIO_AUTO_NEG_CONTROL 0x0 /* AUTO_NEG Control Reg */ #define IXGBE_MDIO_AUTO_NEG_STATUS 0x1 /* AUTO_NEG Status Reg */ +#define IXGBE_MDIO_AUTO_NEG_VENDOR_STAT 0xC800 /* AUTO_NEG Vendor Status Reg */ +#define IXGBE_MDIO_AUTO_NEG_VENDOR_TX_ALARM 0xCC00 /* AUTO_NEG Vendor TX Reg */ +#define IXGBE_MDIO_AUTO_NEG_VENDOR_TX_ALARM2 0xCC01 /* AUTO_NEG Vendor Tx Reg */ +#define IXGBE_MDIO_AUTO_NEG_VEN_LSC 0x1 /* AUTO_NEG Vendor Tx LSC */ #define IXGBE_MDIO_AUTO_NEG_ADVT 0x10 /* AUTO_NEG Advt Reg */ #define IXGBE_MDIO_AUTO_NEG_LP 0x13 /* AUTO_NEG LP Status Reg */ +#define IXGBE_MDIO_AUTO_NEG_EEE_ADVT 0x3C /* AUTO_NEG EEE Advt Reg */ +#define IXGBE_AUTO_NEG_10GBASE_EEE_ADVT 0x8 /* AUTO NEG EEE 10GBaseT Advt */ +#define IXGBE_AUTO_NEG_1000BASE_EEE_ADVT 0x4 /* AUTO NEG EEE 1000BaseT Advt */ +#define IXGBE_AUTO_NEG_100BASE_EEE_ADVT 0x2 /* AUTO NEG EEE 100BaseT Advt */ #define IXGBE_MDIO_PHY_XS_CONTROL 0x0 /* PHY_XS Control Reg */ #define IXGBE_MDIO_PHY_XS_RESET 0x8000 /* PHY_XS Reset */ #define IXGBE_MDIO_PHY_ID_HIGH 0x2 /* PHY ID High Reg*/ @@ -1224,15 +1464,63 @@ #define IXGBE_MDIO_PHY_1000BASET_ABILITY 0x0020 /* 1000BaseT capable */ #define IXGBE_MDIO_PHY_100BASETX_ABILITY 0x0080 /* 100BaseTX capable */ #define IXGBE_MDIO_PHY_SET_LOW_POWER_MODE 0x0800 /* Set low power mode */ - +#define IXGBE_AUTO_NEG_LP_STATUS 0xE820 /* AUTO NEG Rx LP Status Reg */ +#define IXGBE_AUTO_NEG_LP_1000BASE_CAP 0x8000 /* AUTO NEG Rx LP 1000BaseT Cap */ +#define IXGBE_AUTO_NEG_LP_10GBASE_CAP 0x0800 /* AUTO NEG Rx LP 10GBaseT Cap */ +#define IXGBE_AUTO_NEG_10GBASET_STAT 0x0021 /* AUTO NEG 10G BaseT Stat */ + +#define IXGBE_MDIO_TX_VENDOR_ALARMS_3 0xCC02 /* Vendor Alarms 3 Reg */ +#define IXGBE_MDIO_TX_VENDOR_ALARMS_3_RST_MASK 0x3 /* PHY Reset Complete Mask */ +#define IXGBE_MDIO_GLOBAL_RES_PR_10 0xC479 /* Global Resv Provisioning 10 Reg */ +#define IXGBE_MDIO_POWER_UP_STALL 0x8000 /* Power Up Stall */ +#define IXGBE_MDIO_GLOBAL_INT_CHIP_STD_MASK 0xFF00 /* int std mask */ +#define IXGBE_MDIO_GLOBAL_CHIP_STD_INT_FLAG 0xFC00 /* chip std int flag */ +#define IXGBE_MDIO_GLOBAL_INT_CHIP_VEN_MASK 0xFF01 /* int chip-wide mask */ +#define IXGBE_MDIO_GLOBAL_INT_CHIP_VEN_FLAG 0xFC01 /* int chip-wide mask */ +#define IXGBE_MDIO_GLOBAL_ALARM_1 0xCC00 /* Global alarm 1 */ +#define IXGBE_MDIO_GLOBAL_ALM_1_HI_TMP_FAIL 0x4000 /* high temp failure */ +#define IXGBE_MDIO_GLOBAL_INT_MASK 0xD400 /* Global int mask */ +#define IXGBE_MDIO_GLOBAL_AN_VEN_ALM_INT_EN 0x1000 /* autoneg vendor alarm int enable */ +#define IXGBE_MDIO_GLOBAL_ALARM_1_INT 0x4 /* int in Global alarm 1 */ +#define IXGBE_MDIO_GLOBAL_VEN_ALM_INT_EN 0x1 /* vendor alarm int enable */ +#define IXGBE_MDIO_GLOBAL_STD_ALM2_INT 0x200 /* vendor alarm2 int mask */ +#define IXGBE_MDIO_GLOBAL_INT_HI_TEMP_EN 0x4000 /* int high temp enable */ #define IXGBE_MDIO_PMA_PMD_CONTROL_ADDR 0x0000 /* PMA/PMD Control Reg */ #define IXGBE_MDIO_PMA_PMD_SDA_SCL_ADDR 0xC30A /* PHY_XS SDA/SCL Addr Reg */ #define IXGBE_MDIO_PMA_PMD_SDA_SCL_DATA 0xC30B /* PHY_XS SDA/SCL Data Reg */ #define IXGBE_MDIO_PMA_PMD_SDA_SCL_STAT 0xC30C /* PHY_XS SDA/SCL Status Reg */ +#define IXGBE_MDIO_PMA_TX_VEN_LASI_INT_MASK 0xD401 /* PHY TX Vendor LASI */ +#define IXGBE_MDIO_PMA_TX_VEN_LASI_INT_EN 0x1 /* PHY TX Vendor LASI enable */ +#define IXGBE_MDIO_PMD_STD_TX_DISABLE_CNTR 0x9 /* Standard Transmit Dis Reg */ +#define IXGBE_MDIO_PMD_GLOBAL_TX_DISABLE 0x0001 /* PMD Global Transmit Dis */ + +#define IXGBE_PCRC8ECL 0x0E810 /* PCR CRC-8 Error Count Lo */ +#define IXGBE_PCRC8ECH 0x0E811 /* PCR CRC-8 Error Count Hi */ +#define IXGBE_PCRC8ECH_MASK 0x1F +#define IXGBE_LDPCECL 0x0E820 /* PCR Uncorrected Error Count Lo */ +#define IXGBE_LDPCECH 0x0E821 /* PCR Uncorrected Error Count Hi */ /* MII clause 22/28 definitions */ #define IXGBE_MDIO_PHY_LOW_POWER_MODE 0x0800 +#define IXGBE_MDIO_XENPAK_LASI_STATUS 0x9005 /* XENPAK LASI Status register*/ +#define IXGBE_XENPAK_LASI_LINK_STATUS_ALARM 0x1 /* Link Status Alarm change */ + +#define IXGBE_MDIO_AUTO_NEG_LINK_STATUS 0x4 /* Indicates if link is up */ + +#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_MASK 0x7 /* Speed/Duplex Mask */ +#define IXGBE_MDIO_AUTO_NEG_VEN_STAT_SPEED_MASK 0x6 /* Speed Mask */ +#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10M_HALF 0x0 /* 10Mb/s Half Duplex */ +#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10M_FULL 0x1 /* 10Mb/s Full Duplex */ +#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_100M_HALF 0x2 /* 100Mb/s Half Duplex */ +#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_100M_FULL 0x3 /* 100Mb/s Full Duplex */ +#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_1GB_HALF 0x4 /* 1Gb/s Half Duplex */ +#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_1GB_FULL 0x5 /* 1Gb/s Full Duplex */ +#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10GB_HALF 0x6 /* 10Gb/s Half Duplex */ +#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10GB_FULL 0x7 /* 10Gb/s Full Duplex */ +#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_1GB 0x4 /* 1Gb/s */ +#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10GB 0x6 /* 10Gb/s */ + #define IXGBE_MII_10GBASE_T_AUTONEG_CTRL_REG 0x20 /* 10G Control Reg */ #define IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG 0xC400 /* 1G Provisioning 1 */ #define IXGBE_MII_AUTONEG_XNP_TX_REG 0x17 /* 1G XNP Transmit */ @@ -1240,6 +1528,8 @@ #define IXGBE_MII_10GBASE_T_ADVERTISE 0x1000 /* full duplex, bit:12*/ #define IXGBE_MII_1GBASE_T_ADVERTISE_XNP_TX 0x4000 /* full duplex, bit:14*/ #define IXGBE_MII_1GBASE_T_ADVERTISE 0x8000 /* full duplex, bit:15*/ +#define IXGBE_MII_2_5GBASE_T_ADVERTISE 0x0400 +#define IXGBE_MII_5GBASE_T_ADVERTISE 0x0800 #define IXGBE_MII_100BASE_T_ADVERTISE 0x0100 /* full duplex, bit:8 */ #define IXGBE_MII_100BASE_T_ADVERTISE_HALF 0x0080 /* half duplex, bit:7 */ #define IXGBE_MII_RESTART 0x200 @@ -1254,6 +1544,8 @@ #define TN1010_PHY_ID 0x00A19410 #define TNX_FW_REV 0xB #define X540_PHY_ID 0x01540200 +#define X550_PHY_ID 0x01540220 +#define X557_PHY_ID 0x01540240 #define AQ_FW_REV 0x20 #define QT2022_PHY_ID 0x0043A400 #define ATH_PHY_ID 0x03429050 @@ -1277,6 +1569,19 @@ #define IXGBE_SDP0_GPIEN 0x00000001 /* SDP0 */ #define IXGBE_SDP1_GPIEN 0x00000002 /* SDP1 */ #define IXGBE_SDP2_GPIEN 0x00000004 /* SDP2 */ +#define IXGBE_SDP0_GPIEN_X540 0x00000002 /* SDP0 on X540 and X550 */ +#define IXGBE_SDP1_GPIEN_X540 0x00000004 /* SDP1 on X540 and X550 */ +#define IXGBE_SDP2_GPIEN_X540 0x00000008 /* SDP2 on X540 and X550 */ +#define IXGBE_SDP0_GPIEN_X550 IXGBE_SDP0_GPIEN_X540 +#define IXGBE_SDP1_GPIEN_X550 IXGBE_SDP1_GPIEN_X540 +#define IXGBE_SDP2_GPIEN_X550 IXGBE_SDP2_GPIEN_X540 +#define IXGBE_SDP0_GPIEN_X550EM_x IXGBE_SDP0_GPIEN_X540 +#define IXGBE_SDP1_GPIEN_X550EM_x IXGBE_SDP1_GPIEN_X540 +#define IXGBE_SDP2_GPIEN_X550EM_x IXGBE_SDP2_GPIEN_X540 +#define IXGBE_SDP0_GPIEN_BY_MAC(_hw) IXGBE_BY_MAC((_hw), SDP0_GPIEN) +#define IXGBE_SDP1_GPIEN_BY_MAC(_hw) IXGBE_BY_MAC((_hw), SDP1_GPIEN) +#define IXGBE_SDP2_GPIEN_BY_MAC(_hw) IXGBE_BY_MAC((_hw), SDP2_GPIEN) + #define IXGBE_GPIE_MSIX_MODE 0x00000010 /* MSI-X mode */ #define IXGBE_GPIE_OCD 0x00000020 /* Other Clear Disable */ #define IXGBE_GPIE_EIMEN 0x00000040 /* Immediate Interrupt Enable */ @@ -1454,6 +1759,19 @@ enum { #define IXGBE_EICR_GPI_SDP1 0x02000000 /* Gen Purpose Interrupt on SDP1 */ #define IXGBE_EICR_GPI_SDP2 0x04000000 /* Gen Purpose Interrupt on SDP2 */ #define IXGBE_EICR_ECC 0x10000000 /* ECC Error */ +#define IXGBE_EICR_GPI_SDP0_X540 0x02000000 /* Gen Purpose Interrupt on SDP0 */ +#define IXGBE_EICR_GPI_SDP1_X540 0x04000000 /* Gen Purpose Interrupt on SDP1 */ +#define IXGBE_EICR_GPI_SDP2_X540 0x08000000 /* Gen Purpose Interrupt on SDP2 */ +#define IXGBE_EICR_GPI_SDP0_X550 IXGBE_EICR_GPI_SDP0_X540 +#define IXGBE_EICR_GPI_SDP1_X550 IXGBE_EICR_GPI_SDP1_X540 +#define IXGBE_EICR_GPI_SDP2_X550 IXGBE_EICR_GPI_SDP2_X540 +#define IXGBE_EICR_GPI_SDP0_X550EM_x IXGBE_EICR_GPI_SDP0_X540 +#define IXGBE_EICR_GPI_SDP1_X550EM_x IXGBE_EICR_GPI_SDP1_X540 +#define IXGBE_EICR_GPI_SDP2_X550EM_x IXGBE_EICR_GPI_SDP2_X540 +#define IXGBE_EICR_GPI_SDP0_BY_MAC(_hw) IXGBE_BY_MAC((_hw), EICR_GPI_SDP0) +#define IXGBE_EICR_GPI_SDP1_BY_MAC(_hw) IXGBE_BY_MAC((_hw), EICR_GPI_SDP1) +#define IXGBE_EICR_GPI_SDP2_BY_MAC(_hw) IXGBE_BY_MAC((_hw), EICR_GPI_SDP2) + #define IXGBE_EICR_PBUR 0x10000000 /* Packet Buffer Handler Error */ #define IXGBE_EICR_DHER 0x20000000 /* Descriptor Handler Error */ #define IXGBE_EICR_TCP_TIMER 0x40000000 /* TCP Timer */ @@ -1472,6 +1790,9 @@ enum { #define IXGBE_EICS_GPI_SDP1 IXGBE_EICR_GPI_SDP1 /* SDP1 Gen Purpose Int */ #define IXGBE_EICS_GPI_SDP2 IXGBE_EICR_GPI_SDP2 /* SDP2 Gen Purpose Int */ #define IXGBE_EICS_ECC IXGBE_EICR_ECC /* ECC Error */ +#define IXGBE_EICS_GPI_SDP0_BY_MAC(_hw) IXGBE_EICR_GPI_SDP0_BY_MAC(_hw) +#define IXGBE_EICS_GPI_SDP1_BY_MAC(_hw) IXGBE_EICR_GPI_SDP1_BY_MAC(_hw) +#define IXGBE_EICS_GPI_SDP2_BY_MAC(_hw) IXGBE_EICR_GPI_SDP2_BY_MAC(_hw) #define IXGBE_EICS_PBUR IXGBE_EICR_PBUR /* Pkt Buf Handler Err */ #define IXGBE_EICS_DHER IXGBE_EICR_DHER /* Desc Handler Error */ #define IXGBE_EICS_TCP_TIMER IXGBE_EICR_TCP_TIMER /* TCP Timer */ @@ -1491,6 +1812,9 @@ enum { #define IXGBE_EIMS_GPI_SDP1 IXGBE_EICR_GPI_SDP1 /* SDP1 Gen Purpose Int */ #define IXGBE_EIMS_GPI_SDP2 IXGBE_EICR_GPI_SDP2 /* SDP2 Gen Purpose Int */ #define IXGBE_EIMS_ECC IXGBE_EICR_ECC /* ECC Error */ +#define IXGBE_EIMS_GPI_SDP0_BY_MAC(_hw) IXGBE_EICR_GPI_SDP0_BY_MAC(_hw) +#define IXGBE_EIMS_GPI_SDP1_BY_MAC(_hw) IXGBE_EICR_GPI_SDP1_BY_MAC(_hw) +#define IXGBE_EIMS_GPI_SDP2_BY_MAC(_hw) IXGBE_EICR_GPI_SDP2_BY_MAC(_hw) #define IXGBE_EIMS_PBUR IXGBE_EICR_PBUR /* Pkt Buf Handler Err */ #define IXGBE_EIMS_DHER IXGBE_EICR_DHER /* Descr Handler Error */ #define IXGBE_EIMS_TCP_TIMER IXGBE_EICR_TCP_TIMER /* TCP Timer */ @@ -1509,6 +1833,9 @@ enum { #define IXGBE_EIMC_GPI_SDP1 IXGBE_EICR_GPI_SDP1 /* SDP1 Gen Purpose Int */ #define IXGBE_EIMC_GPI_SDP2 IXGBE_EICR_GPI_SDP2 /* SDP2 Gen Purpose Int */ #define IXGBE_EIMC_ECC IXGBE_EICR_ECC /* ECC Error */ +#define IXGBE_EIMC_GPI_SDP0_BY_MAC(_hw) IXGBE_EICR_GPI_SDP0_BY_MAC(_hw) +#define IXGBE_EIMC_GPI_SDP1_BY_MAC(_hw) IXGBE_EICR_GPI_SDP1_BY_MAC(_hw) +#define IXGBE_EIMC_GPI_SDP2_BY_MAC(_hw) IXGBE_EICR_GPI_SDP2_BY_MAC(_hw) #define IXGBE_EIMC_PBUR IXGBE_EICR_PBUR /* Pkt Buf Handler Err */ #define IXGBE_EIMC_DHER IXGBE_EICR_DHER /* Desc Handler Err */ #define IXGBE_EIMC_TCP_TIMER IXGBE_EICR_TCP_TIMER /* TCP Timer */ @@ -1587,6 +1914,7 @@ enum { #define IXGBE_MAX_ETQF_FILTERS 8 #define IXGBE_ETQF_FCOE 0x08000000 /* bit 27 */ #define IXGBE_ETQF_BCN 0x10000000 /* bit 28 */ +#define IXGBE_ETQF_TX_ANTISPOOF 0x20000000 /* bit 29 */ #define IXGBE_ETQF_1588 0x40000000 /* bit 30 */ #define IXGBE_ETQF_FILTER_EN 0x80000000 /* bit 31 */ #define IXGBE_ETQF_POOL_ENABLE (1 << 26) /* bit 26 */ @@ -1607,11 +1935,15 @@ enum { * FCoE (0x8906): Filter 2 * 1588 (0x88f7): Filter 3 * FIP (0x8914): Filter 4 + * LLDP (0x88CC): Filter 5 + * LACP (0x8809): Filter 6 */ #define IXGBE_ETQF_FILTER_EAPOL 0 #define IXGBE_ETQF_FILTER_FCOE 2 #define IXGBE_ETQF_FILTER_1588 3 #define IXGBE_ETQF_FILTER_FIP 4 +#define IXGBE_ETQF_FILTER_LLDP 5 +#define IXGBE_ETQF_FILTER_LACP 6 /* VLAN Control Bit Masks */ #define IXGBE_VLNCTRL_VET 0x0000FFFF /* bits 0-15 */ #define IXGBE_VLNCTRL_CFI 0x10000000 /* bit 28 */ @@ -1667,6 +1999,9 @@ enum { #define IXGBE_LED_IVRT(_i) IXGBE_LED_OFFSET(IXGBE_LED_IVRT_BASE, _i) #define IXGBE_LED_BLINK(_i) IXGBE_LED_OFFSET(IXGBE_LED_BLINK_BASE, _i) #define IXGBE_LED_MODE_MASK(_i) IXGBE_LED_OFFSET(IXGBE_LED_MODE_MASK_BASE, _i) +#define IXGBE_X557_LED_MANUAL_SET_MASK (1 << 8) +#define IXGBE_X557_MAX_LED_INDEX 3 +#define IXGBE_X557_LED_PROVISIONING 0xC430 /* LED modes */ #define IXGBE_LED_LINK_UP 0x0 @@ -1735,6 +2070,9 @@ enum { #define IXGBE_MACC_FS 0x00040000 #define IXGBE_MAC_RX2TX_LPBK 0x00000002 +/* Veto Bit definiton */ +#define IXGBE_MMNGC_MNG_VETO 0x00000001 + /* LINKS Bit Masks */ #define IXGBE_LINKS_KX_AN_COMP 0x80000000 #define IXGBE_LINKS_UP 0x40000000 @@ -1753,6 +2091,7 @@ enum { #define IXGBE_LINKS_TL_FAULT 0x00001000 #define IXGBE_LINKS_SIGNAL 0x00000F00 +#define IXGBE_LINKS_SPEED_NON_STD 0x08000000 #define IXGBE_LINKS_SPEED_82599 0x30000000 #define IXGBE_LINKS_SPEED_10G_82599 0x30000000 #define IXGBE_LINKS_SPEED_1G_82599 0x20000000 @@ -1795,12 +2134,16 @@ enum { #define IXGBE_SWFW_REGSMP 0x80000000 /* Register Semaphore bit 31 */ /* SW_FW_SYNC/GSSR definitions */ -#define IXGBE_GSSR_EEP_SM 0x0001 -#define IXGBE_GSSR_PHY0_SM 0x0002 -#define IXGBE_GSSR_PHY1_SM 0x0004 -#define IXGBE_GSSR_MAC_CSR_SM 0x0008 -#define IXGBE_GSSR_FLASH_SM 0x0010 -#define IXGBE_GSSR_SW_MNG_SM 0x0400 +#define IXGBE_GSSR_EEP_SM 0x0001 +#define IXGBE_GSSR_PHY0_SM 0x0002 +#define IXGBE_GSSR_PHY1_SM 0x0004 +#define IXGBE_GSSR_MAC_CSR_SM 0x0008 +#define IXGBE_GSSR_FLASH_SM 0x0010 +#define IXGBE_GSSR_NVM_UPDATE_SM 0x0200 +#define IXGBE_GSSR_SW_MNG_SM 0x0400 +#define IXGBE_GSSR_SHARED_I2C_SM 0x1806 /* Wait for both phys and both I2Cs */ +#define IXGBE_GSSR_I2C_MASK 0x1800 +#define IXGBE_GSSR_NVM_PHY_MASK 0xF /* FW Status register bitmask */ #define IXGBE_FWSTS_FWRI 0x00000200 /* Firmware Reset Indication */ @@ -1830,32 +2173,40 @@ enum { #define IXGBE_EEPROM_WORD_SIZE_SHIFT 6 #define IXGBE_EEPROM_OPCODE_BITS 8 +/* FLA Register */ +#define IXGBE_FLA_LOCKED 0x00000040 + /* Part Number String Length */ #define IXGBE_PBANUM_LENGTH 11 /* Checksum and EEPROM pointers */ -#define IXGBE_PBANUM_PTR_GUARD 0xFAFA -#define IXGBE_EEPROM_CHECKSUM 0x3F -#define IXGBE_EEPROM_SUM 0xBABA -#define IXGBE_PCIE_ANALOG_PTR 0x03 -#define IXGBE_ATLAS0_CONFIG_PTR 0x04 -#define IXGBE_PHY_PTR 0x04 -#define IXGBE_ATLAS1_CONFIG_PTR 0x05 -#define IXGBE_OPTION_ROM_PTR 0x05 -#define IXGBE_PCIE_GENERAL_PTR 0x06 -#define IXGBE_PCIE_CONFIG0_PTR 0x07 -#define IXGBE_PCIE_CONFIG1_PTR 0x08 -#define IXGBE_CORE0_PTR 0x09 -#define IXGBE_CORE1_PTR 0x0A -#define IXGBE_MAC0_PTR 0x0B -#define IXGBE_MAC1_PTR 0x0C -#define IXGBE_CSR0_CONFIG_PTR 0x0D -#define IXGBE_CSR1_CONFIG_PTR 0x0E -#define IXGBE_FW_PTR 0x0F -#define IXGBE_PBANUM0_PTR 0x15 -#define IXGBE_PBANUM1_PTR 0x16 -#define IXGBE_ALT_MAC_ADDR_PTR 0x37 -#define IXGBE_FREE_SPACE_PTR 0X3E +#define IXGBE_PBANUM_PTR_GUARD 0xFAFA +#define IXGBE_EEPROM_CHECKSUM 0x3F +#define IXGBE_EEPROM_SUM 0xBABA +#define IXGBE_PCIE_ANALOG_PTR 0x03 +#define IXGBE_ATLAS0_CONFIG_PTR 0x04 +#define IXGBE_PHY_PTR 0x04 +#define IXGBE_ATLAS1_CONFIG_PTR 0x05 +#define IXGBE_OPTION_ROM_PTR 0x05 +#define IXGBE_PCIE_GENERAL_PTR 0x06 +#define IXGBE_PCIE_CONFIG0_PTR 0x07 +#define IXGBE_PCIE_CONFIG1_PTR 0x08 +#define IXGBE_CORE0_PTR 0x09 +#define IXGBE_CORE1_PTR 0x0A +#define IXGBE_MAC0_PTR 0x0B +#define IXGBE_MAC1_PTR 0x0C +#define IXGBE_CSR0_CONFIG_PTR 0x0D +#define IXGBE_CSR1_CONFIG_PTR 0x0E +#define IXGBE_PCIE_ANALOG_PTR_X550 0x02 +#define IXGBE_SHADOW_RAM_SIZE_X550 0x4000 +#define IXGBE_IXGBE_PCIE_GENERAL_SIZE 0x24 +#define IXGBE_PCIE_CONFIG_SIZE 0x08 +#define IXGBE_EEPROM_LAST_WORD 0x41 +#define IXGBE_FW_PTR 0x0F +#define IXGBE_PBANUM0_PTR 0x15 +#define IXGBE_PBANUM1_PTR 0x16 +#define IXGBE_ALT_MAC_ADDR_PTR 0x37 +#define IXGBE_FREE_SPACE_PTR 0X3E #define IXGBE_SAN_MAC_ADDR_PTR 0x28 #define IXGBE_DEVICE_CAPS 0x2C @@ -1896,6 +2247,11 @@ enum { #define IXGBE_NVM_POLL_WRITE 1 /* Flag for polling for wr complete */ #define IXGBE_NVM_POLL_READ 0 /* Flag for polling for rd complete */ +#define NVM_INIT_CTRL_3 0x38 +#define NVM_INIT_CTRL_3_LPLU 0x8 +#define NVM_INIT_CTRL_3_D10GMP_PORT0 0x40 +#define NVM_INIT_CTRL_3_D10GMP_PORT1 0x100 + #define IXGBE_ETH_LENGTH_OF_ADDRESS 6 #define IXGBE_EEPROM_PAGE_SIZE_MAX 128 @@ -2010,7 +2366,7 @@ enum { #define IXGBE_RFCTL_ISCSI_DIS 0x00000001 #define IXGBE_RFCTL_ISCSI_DWC_MASK 0x0000003E #define IXGBE_RFCTL_ISCSI_DWC_SHIFT 1 -#define IXGBE_RFCTL_RSC_DIS 0x00000010 +#define IXGBE_RFCTL_RSC_DIS 0x00000020 #define IXGBE_RFCTL_NFSW_DIS 0x00000040 #define IXGBE_RFCTL_NFSR_DIS 0x00000080 #define IXGBE_RFCTL_NFS_VER_MASK 0x00000300 @@ -2049,6 +2405,14 @@ enum { #define IXGBE_TSAUXC_EN_CLK 0x00000004 #define IXGBE_TSAUXC_SYNCLK 0x00000008 #define IXGBE_TSAUXC_SDP0_INT 0x00000040 +#define IXGBE_TSAUXC_EN_TT0 0x00000001 +#define IXGBE_TSAUXC_EN_TT1 0x00000002 +#define IXGBE_TSAUXC_ST0 0x00000010 +#define IXGBE_TSAUXC_DISABLE_SYSTIME 0x80000000 + +#define IXGBE_TSSDP_TS_SDP0_SEL_MASK 0x000000C0 +#define IXGBE_TSSDP_TS_SDP0_CLK0 0x00000080 +#define IXGBE_TSSDP_TS_SDP0_EN 0x00000100 #define IXGBE_TSYNCTXCTL_VALID 0x00000001 /* Tx timestamp valid */ #define IXGBE_TSYNCTXCTL_ENABLED 0x00000010 /* Tx timestamping enabled */ @@ -2058,8 +2422,19 @@ enum { #define IXGBE_TSYNCRXCTL_TYPE_L2_V2 0x00 #define IXGBE_TSYNCRXCTL_TYPE_L4_V1 0x02 #define IXGBE_TSYNCRXCTL_TYPE_L2_L4_V2 0x04 +#define IXGBE_TSYNCRXCTL_TYPE_ALL 0x08 #define IXGBE_TSYNCRXCTL_TYPE_EVENT_V2 0x0A #define IXGBE_TSYNCRXCTL_ENABLED 0x00000010 /* Rx Timestamping enabled */ +#define IXGBE_TSYNCRXCTL_TSIP_UT_EN 0x00800000 /* Rx Timestamp in Packet */ +#define IXGBE_TSYNCRXCTL_TSIP_UP_MASK 0xFF000000 /* Rx Timestamp UP Mask */ + +#define IXGBE_TSIM_SYS_WRAP 0x00000001 +#define IXGBE_TSIM_TXTS 0x00000002 +#define IXGBE_TSIM_TADJ 0x00000080 + +#define IXGBE_TSICR_SYS_WRAP IXGBE_TSIM_SYS_WRAP +#define IXGBE_TSICR_TXTS IXGBE_TSIM_TXTS +#define IXGBE_TSICR_TADJ IXGBE_TSIM_TADJ #define IXGBE_RXMTRL_V1_CTRLT_MASK 0x000000FF #define IXGBE_RXMTRL_V1_SYNC_MSG 0x00 @@ -2118,10 +2493,12 @@ enum { #define IXGBE_MRQC_RSS_FIELD_IPV4_UDP 0x00400000 #define IXGBE_MRQC_RSS_FIELD_IPV6_UDP 0x00800000 #define IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP 0x01000000 +#define IXGBE_MRQC_MULTIPLE_RSS 0x00002000 #define IXGBE_MRQC_L3L4TXSWEN 0x00008000 /* Queue Drop Enable */ #define IXGBE_QDE_ENABLE 0x00000001 +#define IXGBE_QDE_HIDE_VLAN 0x00000002 #define IXGBE_QDE_IDX_MASK 0x00007F00 #define IXGBE_QDE_IDX_SHIFT 8 #define IXGBE_QDE_WRITE 0x00010000 @@ -2163,10 +2540,12 @@ enum { #define IXGBE_RXD_STAT_IPCS 0x40 /* IP xsum calculated */ #define IXGBE_RXD_STAT_PIF 0x80 /* passed in-exact filter */ #define IXGBE_RXD_STAT_CRCV 0x100 /* Speculative CRC Valid */ +#define IXGBE_RXD_STAT_OUTERIPCS 0x100 /* Cloud IP xsum calculated */ #define IXGBE_RXD_STAT_VEXT 0x200 /* 1st VLAN found */ #define IXGBE_RXD_STAT_UDPV 0x400 /* Valid UDP checksum */ #define IXGBE_RXD_STAT_DYNINT 0x800 /* Pkt caused INT via DYNINT */ #define IXGBE_RXD_STAT_LLINT 0x800 /* Pkt caused Low Latency Interrupt */ +#define IXGBE_RXD_STAT_TSIP 0x08000 /* Time Stamp in packet buffer */ #define IXGBE_RXD_STAT_TS 0x10000 /* Time Stamp */ #define IXGBE_RXD_STAT_SECP 0x20000 /* Security Processing */ #define IXGBE_RXD_STAT_LB 0x40000 /* Loopback Status */ @@ -2180,8 +2559,9 @@ enum { #define IXGBE_RXD_ERR_IPE 0x80 /* IP Checksum Error */ #define IXGBE_RXDADV_ERR_MASK 0xfff00000 /* RDESC.ERRORS mask */ #define IXGBE_RXDADV_ERR_SHIFT 20 /* RDESC.ERRORS shift */ +#define IXGBE_RXDADV_ERR_OUTERIPER 0x04000000 /* CRC IP Header error */ #define IXGBE_RXDADV_ERR_RXE 0x20000000 /* Any MAC Error */ -#define IXGBE_RXDADV_ERR_FCEOFE 0x80000000 /* FCoEFe/IPE */ +#define IXGBE_RXDADV_ERR_FCEOFE 0x80000000 /* FCEOFe/IPE */ #define IXGBE_RXDADV_ERR_FCERR 0x00700000 /* FCERR/FDIRERR */ #define IXGBE_RXDADV_ERR_FDIR_LEN 0x00100000 /* FDIR Length error */ #define IXGBE_RXDADV_ERR_FDIR_DROP 0x00200000 /* FDIR Drop error */ @@ -2212,6 +2592,7 @@ enum { #define IXGBE_RXDADV_STAT_FCSTAT_FCPRSP 0x00000020 /* 10: Recv. FCP_RSP */ #define IXGBE_RXDADV_STAT_FCSTAT_DDP 0x00000030 /* 11: Ctxt w/ DDP */ #define IXGBE_RXDADV_STAT_TS 0x00010000 /* IEEE1588 Time Stamp */ +#define IXGBE_RXDADV_STAT_TSIP 0x00008000 /* Time Stamp in packet buffer */ /* PSRTYPE bit definitions */ #define IXGBE_PSRTYPE_TCPHDR 0x00000010 @@ -2222,6 +2603,10 @@ enum { /* SRRCTL bit definitions */ #define IXGBE_SRRCTL_BSIZEPKT_SHIFT 10 /* so many KBs */ +#define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2 /* 64byte resolution (>> 6) + * + at bit 8 offset (<< 8) + * = (<< 2) + */ #define IXGBE_SRRCTL_RDMTS_SHIFT 22 #define IXGBE_SRRCTL_RDMTS_MASK 0x01C00000 #define IXGBE_SRRCTL_DROP_EN 0x10000000 @@ -2269,6 +2654,8 @@ enum { #define IXGBE_RXDADV_PKTTYPE_UDP 0x00000200 /* UDP hdr present */ #define IXGBE_RXDADV_PKTTYPE_SCTP 0x00000400 /* SCTP hdr present */ #define IXGBE_RXDADV_PKTTYPE_NFS 0x00000800 /* NFS hdr present */ +#define IXGBE_RXDADV_PKTTYPE_VXLAN 0x00000800 /* VXLAN hdr present */ +#define IXGBE_RXDADV_PKTTYPE_TUNNEL 0x00010000 /* Tunnel type */ #define IXGBE_RXDADV_PKTTYPE_IPSEC_ESP 0x00001000 /* IPSec ESP */ #define IXGBE_RXDADV_PKTTYPE_IPSEC_AH 0x00002000 /* IPSec AH */ #define IXGBE_RXDADV_PKTTYPE_LINKSEC 0x00004000 /* LinkSec Encap */ @@ -2319,6 +2706,68 @@ enum { #define IXGBE_MBVFICR(_i) (0x00710 + ((_i) * 4)) #define IXGBE_VFLRE(_i) (((_i & 1) ? 0x001C0 : 0x00600)) #define IXGBE_VFLREC(_i) (0x00700 + ((_i) * 4)) +/* Translated register #defines */ +#define IXGBE_PVFCTRL(P) (0x00300 + (4 * (P))) +#define IXGBE_PVFSTATUS(P) (0x00008 + (0 * (P))) +#define IXGBE_PVFLINKS(P) (0x042A4 + (0 * (P))) +#define IXGBE_PVFRTIMER(P) (0x00048 + (0 * (P))) +#define IXGBE_PVFMAILBOX(P) (0x04C00 + (4 * (P))) +#define IXGBE_PVFRXMEMWRAP(P) (0x03190 + (0 * (P))) +#define IXGBE_PVTEICR(P) (0x00B00 + (4 * (P))) +#define IXGBE_PVTEICS(P) (0x00C00 + (4 * (P))) +#define IXGBE_PVTEIMS(P) (0x00D00 + (4 * (P))) +#define IXGBE_PVTEIMC(P) (0x00E00 + (4 * (P))) +#define IXGBE_PVTEIAC(P) (0x00F00 + (4 * (P))) +#define IXGBE_PVTEIAM(P) (0x04D00 + (4 * (P))) +#define IXGBE_PVTEITR(P) (((P) < 24) ? (0x00820 + ((P) * 4)) : \ + (0x012300 + (((P) - 24) * 4))) +#define IXGBE_PVTIVAR(P) (0x12500 + (4 * (P))) +#define IXGBE_PVTIVAR_MISC(P) (0x04E00 + (4 * (P))) +#define IXGBE_PVTRSCINT(P) (0x12000 + (4 * (P))) +#define IXGBE_VFPBACL(P) (0x110C8 + (4 * (P))) +#define IXGBE_PVFRDBAL(P) ((P < 64) ? (0x01000 + (0x40 * (P))) \ + : (0x0D000 + (0x40 * ((P) - 64)))) +#define IXGBE_PVFRDBAH(P) ((P < 64) ? (0x01004 + (0x40 * (P))) \ + : (0x0D004 + (0x40 * ((P) - 64)))) +#define IXGBE_PVFRDLEN(P) ((P < 64) ? (0x01008 + (0x40 * (P))) \ + : (0x0D008 + (0x40 * ((P) - 64)))) +#define IXGBE_PVFRDH(P) ((P < 64) ? (0x01010 + (0x40 * (P))) \ + : (0x0D010 + (0x40 * ((P) - 64)))) +#define IXGBE_PVFRDT(P) ((P < 64) ? (0x01018 + (0x40 * (P))) \ + : (0x0D018 + (0x40 * ((P) - 64)))) +#define IXGBE_PVFRXDCTL(P) ((P < 64) ? (0x01028 + (0x40 * (P))) \ + : (0x0D028 + (0x40 * ((P) - 64)))) +#define IXGBE_PVFSRRCTL(P) ((P < 64) ? (0x01014 + (0x40 * (P))) \ + : (0x0D014 + (0x40 * ((P) - 64)))) +#define IXGBE_PVFPSRTYPE(P) (0x0EA00 + (4 * (P))) +#define IXGBE_PVFTDBAL(P) (0x06000 + (0x40 * (P))) +#define IXGBE_PVFTDBAH(P) (0x06004 + (0x40 * (P))) +#define IXGBE_PVFTTDLEN(P) (0x06008 + (0x40 * (P))) +#define IXGBE_PVFTDH(P) (0x06010 + (0x40 * (P))) +#define IXGBE_PVFTDT(P) (0x06018 + (0x40 * (P))) +#define IXGBE_PVFTXDCTL(P) (0x06028 + (0x40 * (P))) +#define IXGBE_PVFTDWBAL(P) (0x06038 + (0x40 * (P))) +#define IXGBE_PVFTDWBAH(P) (0x0603C + (0x40 * (P))) +#define IXGBE_PVFDCA_RXCTRL(P) (((P) < 64) ? (0x0100C + (0x40 * (P))) \ + : (0x0D00C + (0x40 * ((P) - 64)))) +#define IXGBE_PVFDCA_TXCTRL(P) (0x0600C + (0x40 * (P))) +#define IXGBE_PVFGPRC(x) (0x0101C + (0x40 * (x))) +#define IXGBE_PVFGPTC(x) (0x08300 + (0x04 * (x))) +#define IXGBE_PVFGORC_LSB(x) (0x01020 + (0x40 * (x))) +#define IXGBE_PVFGORC_MSB(x) (0x0D020 + (0x40 * (x))) +#define IXGBE_PVFGOTC_LSB(x) (0x08400 + (0x08 * (x))) +#define IXGBE_PVFGOTC_MSB(x) (0x08404 + (0x08 * (x))) +#define IXGBE_PVFMPRC(x) (0x0D01C + (0x40 * (x))) + +#define IXGBE_PVFTDWBALn(q_per_pool, vf_number, vf_q_index) \ + (IXGBE_PVFTDWBAL((q_per_pool)*(vf_number) + (vf_q_index))) +#define IXGBE_PVFTDWBAHn(q_per_pool, vf_number, vf_q_index) \ + (IXGBE_PVFTDWBAH((q_per_pool)*(vf_number) + (vf_q_index))) + +#define IXGBE_PVFTDHn(q_per_pool, vf_number, vf_q_index) \ + (IXGBE_PVFTDH((q_per_pool)*(vf_number) + (vf_q_index))) +#define IXGBE_PVFTDTn(q_per_pool, vf_number, vf_q_index) \ + (IXGBE_PVFTDT((q_per_pool)*(vf_number) + (vf_q_index))) /* Little Endian defines */ #ifndef __le16 @@ -2355,7 +2804,11 @@ enum ixgbe_fdir_pballoc_type { #define IXGBE_FDIRCTRL_REPORT_STATUS_ALWAYS 0x00000080 #define IXGBE_FDIRCTRL_DROP_Q_SHIFT 8 #define IXGBE_FDIRCTRL_FLEX_SHIFT 16 +#define IXGBE_FDIRCTRL_FILTERMODE_SHIFT 21 +#define IXGBE_FDIRCTRL_FILTERMODE_MACVLAN 0x0001 /* bit 23:21, 001b */ +#define IXGBE_FDIRCTRL_FILTERMODE_CLOUD 0x0002 /* bit 23:21, 010b */ #define IXGBE_FDIRCTRL_SEARCHLIM 0x00800000 +#define IXGBE_FDIRCTRL_FILTERMODE_MASK 0x00E00000 #define IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT 24 #define IXGBE_FDIRCTRL_FULL_THRESH_MASK 0xF0000000 #define IXGBE_FDIRCTRL_FULL_THRESH_SHIFT 28 @@ -2369,6 +2822,13 @@ enum ixgbe_fdir_pballoc_type { #define IXGBE_FDIRM_L4P 0x00000008 #define IXGBE_FDIRM_FLEX 0x00000010 #define IXGBE_FDIRM_DIPv6 0x00000020 +#define IXGBE_FDIRM_L3P 0x00000040 + +#define IXGBE_FDIRIP6M_INNER_MAC 0x03F0 /* bit 9:4 */ +#define IXGBE_FDIRIP6M_TUNNEL_TYPE 0x0800 /* bit 11 */ +#define IXGBE_FDIRIP6M_TNI_VNI 0xF000 /* bit 15:12 */ +#define IXGBE_FDIRIP6M_TNI_VNI_24 0x1000 /* bit 12 */ +#define IXGBE_FDIRIP6M_ALWAYS_MASK 0x040F /* bit 10, 3:0 */ #define IXGBE_FDIRFREE_FREE_MASK 0xFFFF #define IXGBE_FDIRFREE_FREE_SHIFT 0 @@ -2410,26 +2870,22 @@ enum ixgbe_fdir_pballoc_type { #define IXGBE_FDIRCMD_QUEUE_EN 0x00008000 #define IXGBE_FDIRCMD_FLOW_TYPE_SHIFT 5 #define IXGBE_FDIRCMD_RX_QUEUE_SHIFT 16 +#define IXGBE_FDIRCMD_TUNNEL_FILTER_SHIFT 23 #define IXGBE_FDIRCMD_VT_POOL_SHIFT 24 #define IXGBE_FDIR_INIT_DONE_POLL 10 #define IXGBE_FDIRCMD_CMD_POLL 10 - +#define IXGBE_FDIRCMD_TUNNEL_FILTER 0x00800000 #define IXGBE_FDIR_DROP_QUEUE 127 -#define IXGBE_STATUS_OVERHEATING_BIT 20 /* STATUS overtemp bit num */ -/* iTS sensor related defines*/ -#define IXGBE_TEMP_STATUS_ADDR_X540 0xC830 -#define IXGBE_TEMP_VALUE_ADDR_X540 0xC820 -#define IXGBE_TEMP_PROV_2_ADDR_X540 0xC421 -#define IXGBE_TEMP_PROV_4_ADDR_X540 0xC423 -#define IXGBE_TEMP_STATUS_PAGE_X540 0x1E -#define IXGBE_TEMP_HIGH_FAILURE_BIT_X540 0xE -#define IXGBE_TEMP_HIGH_WARNING_BIT_X540 0xC /* Manageablility Host Interface defines */ #define IXGBE_HI_MAX_BLOCK_BYTE_LENGTH 1792 /* Num of bytes in range */ #define IXGBE_HI_MAX_BLOCK_DWORD_LENGTH 448 /* Num of dwords in range */ #define IXGBE_HI_COMMAND_TIMEOUT 500 /* Process HI command limit */ +#define IXGBE_HI_FLASH_ERASE_TIMEOUT 1000 /* Process Erase command limit */ +#define IXGBE_HI_FLASH_UPDATE_TIMEOUT 5000 /* Process Update command limit */ +#define IXGBE_HI_FLASH_APPLY_TIMEOUT 0 /* Process Apply command limit */ +#define IXGBE_HI_PHY_MGMT_REQ_TIMEOUT 2000 /* Wait up to 2 seconds */ /* CEM Support */ #define FW_CEM_HDR_LEN 0x4 @@ -2439,7 +2895,18 @@ enum ixgbe_fdir_pballoc_type { #define FW_CEM_UNUSED_VER 0x0 #define FW_CEM_MAX_RETRIES 3 #define FW_CEM_RESP_STATUS_SUCCESS 0x1 - +#define FW_READ_SHADOW_RAM_CMD 0x31 +#define FW_READ_SHADOW_RAM_LEN 0x6 +#define FW_WRITE_SHADOW_RAM_CMD 0x33 +#define FW_WRITE_SHADOW_RAM_LEN 0xA /* 8 plus 1 WORD to write */ +#define FW_SHADOW_RAM_DUMP_CMD 0x36 +#define FW_SHADOW_RAM_DUMP_LEN 0 +#define FW_DEFAULT_CHECKSUM 0xFF /* checksum always 0xFF */ +#define FW_NVM_DATA_OFFSET 3 +#define FW_MAX_READ_BUFFER_SIZE 1024 +#define FW_DISABLE_RXEN_CMD 0xDE +#define FW_DISABLE_RXEN_LEN 0x1 +#define FW_PHY_MGMT_REQ_CMD 0x20 /* Host Interface Command Structures */ struct ixgbe_hic_hdr { @@ -2452,6 +2919,25 @@ struct ixgbe_hic_hdr { u8 checksum; }; +struct ixgbe_hic_hdr2_req { + u8 cmd; + u8 buf_lenh; + u8 buf_lenl; + u8 checksum; +}; + +struct ixgbe_hic_hdr2_rsp { + u8 cmd; + u8 buf_lenl; + u8 buf_lenh_status; /* 7-5: high bits of buf_len, 4-0: status */ + u8 checksum; +}; + +union ixgbe_hic_hdr2 { + struct ixgbe_hic_hdr2_req req; + struct ixgbe_hic_hdr2_rsp rsp; +}; + struct ixgbe_hic_drv_info { struct ixgbe_hic_hdr hdr; u8 port_num; @@ -2463,6 +2949,33 @@ struct ixgbe_hic_drv_info { u16 pad2; /* end spacing to ensure length is mult. of dword2 */ }; +/* These need to be dword aligned */ +struct ixgbe_hic_read_shadow_ram { + union ixgbe_hic_hdr2 hdr; + u32 address; + u16 length; + u16 pad2; + u16 data; + u16 pad3; +}; + +struct ixgbe_hic_write_shadow_ram { + union ixgbe_hic_hdr2 hdr; + u32 address; + u16 length; + u16 pad2; + u16 data; + u16 pad3; +}; + +struct ixgbe_hic_disable_rxen { + struct ixgbe_hic_hdr hdr; + u8 port_number; + u8 pad2; + u16 pad3; +}; + + /* Transmit Descriptor - Legacy */ struct ixgbe_legacy_tx_desc { u64 buffer_addr; /* Address of the descriptor's data buffer */ @@ -2604,6 +3117,12 @@ struct ixgbe_adv_tx_context_desc { #define IXGBE_ADVTXD_L4LEN_SHIFT 8 /* Adv ctxt L4LEN shift */ #define IXGBE_ADVTXD_MSS_SHIFT 16 /* Adv ctxt MSS shift */ +#define IXGBE_ADVTXD_OUTER_IPLEN 16 /* Adv ctxt OUTERIPLEN shift */ +#define IXGBE_ADVTXD_TUNNEL_LEN 24 /* Adv ctxt TUNNELLEN shift */ +#define IXGBE_ADVTXD_TUNNEL_TYPE_SHIFT 16 /* Adv Tx Desc Tunnel Type shift */ +#define IXGBE_ADVTXD_OUTERIPCS_SHIFT 17 /* Adv Tx Desc OUTERIPCS Shift */ +#define IXGBE_ADVTXD_TUNNEL_TYPE_NVGRE 1 /* Adv Tx Desc Tunnel Type NVGRE */ + /* Autonegotiation advertised speeds */ typedef u32 ixgbe_autoneg_advertised; /* Link speed */ @@ -2611,6 +3130,8 @@ typedef u32 ixgbe_link_speed; #define IXGBE_LINK_SPEED_UNKNOWN 0 #define IXGBE_LINK_SPEED_100_FULL 0x0008 #define IXGBE_LINK_SPEED_1GB_FULL 0x0020 +#define IXGBE_LINK_SPEED_2_5GB_FULL 0x0400 +#define IXGBE_LINK_SPEED_5GB_FULL 0x0800 #define IXGBE_LINK_SPEED_10GB_FULL 0x0080 #define IXGBE_LINK_SPEED_82598_AUTONEG (IXGBE_LINK_SPEED_1GB_FULL | \ IXGBE_LINK_SPEED_10GB_FULL) @@ -2710,6 +3231,7 @@ typedef u32 ixgbe_physical_layer; #define IXGBE_ATR_L4TYPE_TCP 0x2 #define IXGBE_ATR_L4TYPE_SCTP 0x3 #define IXGBE_ATR_L4TYPE_IPV6_MASK 0x4 +#define IXGBE_ATR_L4TYPE_TUNNEL_MASK 0x10 enum ixgbe_atr_flow_type { IXGBE_ATR_FLOW_TYPE_IPV4 = 0x0, IXGBE_ATR_FLOW_TYPE_UDPV4 = 0x1, @@ -2719,6 +3241,14 @@ enum ixgbe_atr_flow_type { IXGBE_ATR_FLOW_TYPE_UDPV6 = 0x5, IXGBE_ATR_FLOW_TYPE_TCPV6 = 0x6, IXGBE_ATR_FLOW_TYPE_SCTPV6 = 0x7, + IXGBE_ATR_FLOW_TYPE_TUNNELED_IPV4 = 0x10, + IXGBE_ATR_FLOW_TYPE_TUNNELED_UDPV4 = 0x11, + IXGBE_ATR_FLOW_TYPE_TUNNELED_TCPV4 = 0x12, + IXGBE_ATR_FLOW_TYPE_TUNNELED_SCTPV4 = 0x13, + IXGBE_ATR_FLOW_TYPE_TUNNELED_IPV6 = 0x14, + IXGBE_ATR_FLOW_TYPE_TUNNELED_UDPV6 = 0x15, + IXGBE_ATR_FLOW_TYPE_TUNNELED_TCPV6 = 0x16, + IXGBE_ATR_FLOW_TYPE_TUNNELED_SCTPV6 = 0x17, }; /* Flow Director ATR input struct. */ @@ -2730,6 +3260,9 @@ union ixgbe_atr_input { * flow_type - 1 byte * vlan_id - 2 bytes * src_ip - 16 bytes + * inner_mac - 6 bytes + * cloud_mode - 2 bytes + * tni_vni - 4 bytes * dst_ip - 16 bytes * src_port - 2 bytes * dst_port - 2 bytes @@ -2742,12 +3275,15 @@ union ixgbe_atr_input { __be16 vlan_id; __be32 dst_ip[4]; __be32 src_ip[4]; + u8 inner_mac[6]; + __be16 tunnel_type; + __be32 tni_vni; __be16 src_port; __be16 dst_port; __be16 flex_bytes; __be16 bkt_hash; } formatted; - __be32 dword_stream[11]; + __be32 dword_stream[14]; }; /* Flow Director compressed ATR hash input struct */ @@ -2767,6 +3303,36 @@ union ixgbe_atr_hash_dword { }; +#define IXGBE_MVALS_INIT(m) \ + IXGBE_CAT(EEC, m), \ + IXGBE_CAT(FLA, m), \ + IXGBE_CAT(GRC, m), \ + IXGBE_CAT(SRAMREL, m), \ + IXGBE_CAT(FACTPS, m), \ + IXGBE_CAT(SWSM, m), \ + IXGBE_CAT(FWSM, m), \ + IXGBE_CAT(SDP0_GPIEN, m), \ + IXGBE_CAT(SDP1_GPIEN, m), \ + IXGBE_CAT(SDP2_GPIEN, m), \ + IXGBE_CAT(EICR_GPI_SDP0, m), \ + IXGBE_CAT(EICR_GPI_SDP1, m), \ + IXGBE_CAT(EICR_GPI_SDP2, m), \ + IXGBE_CAT(CIAA, m), \ + IXGBE_CAT(CIAD, m), \ + IXGBE_CAT(I2C_CLK_IN, m), \ + IXGBE_CAT(I2C_CLK_OUT, m), \ + IXGBE_CAT(I2C_DATA_IN, m), \ + IXGBE_CAT(I2C_DATA_OUT, m), \ + IXGBE_CAT(I2C_DATA_OE_N_EN, m), \ + IXGBE_CAT(I2C_BB_EN, m), \ + IXGBE_CAT(I2C_CLK_OE_N_EN, m), \ + IXGBE_CAT(I2CCTL, m) + +enum ixgbe_mvals { + IXGBE_MVALS_INIT(_IDX), + IXGBE_MVALS_IDX_LIMIT +}; + /* * Unavailable: The FCoE Boot Option ROM is not present in the flash. * Disabled: Present; boot order is not set for any targets on the port. @@ -2792,6 +3358,10 @@ enum ixgbe_mac_type { ixgbe_mac_82599_vf, ixgbe_mac_X540, ixgbe_mac_X540_vf, + ixgbe_mac_X550, + ixgbe_mac_X550EM_x, + ixgbe_mac_X550_vf, + ixgbe_mac_X550EM_x_vf, ixgbe_num_macs }; @@ -2800,6 +3370,9 @@ enum ixgbe_phy_type { ixgbe_phy_none, ixgbe_phy_tn, ixgbe_phy_aq, + ixgbe_phy_x550em_kr, + ixgbe_phy_x550em_kx4, + ixgbe_phy_x550em_ext_t, ixgbe_phy_cu_unknown, ixgbe_phy_qt, ixgbe_phy_xaui, @@ -2812,6 +3385,10 @@ enum ixgbe_phy_type { ixgbe_phy_sfp_ftl_active, ixgbe_phy_sfp_unknown, ixgbe_phy_sfp_intel, + ixgbe_phy_qsfp_passive_unknown, + ixgbe_phy_qsfp_active_unknown, + ixgbe_phy_qsfp_intel, + ixgbe_phy_qsfp_unknown, ixgbe_phy_sfp_unsupported, /*Enforce bit set with unsupported module*/ ixgbe_phy_generic }; @@ -2843,6 +3420,8 @@ enum ixgbe_sfp_type { ixgbe_sfp_type_1g_cu_core1 = 10, ixgbe_sfp_type_1g_sx_core0 = 11, ixgbe_sfp_type_1g_sx_core1 = 12, + ixgbe_sfp_type_1g_lx_core0 = 13, + ixgbe_sfp_type_1g_lx_core1 = 14, ixgbe_sfp_type_not_present = 0xFFFE, ixgbe_sfp_type_unknown = 0xFFFF }; @@ -2851,6 +3430,7 @@ enum ixgbe_media_type { ixgbe_media_type_unknown = 0, ixgbe_media_type_fiber, ixgbe_media_type_fiber_fixed, + ixgbe_media_type_fiber_qsfp, ixgbe_media_type_copper, ixgbe_media_type_backplane, ixgbe_media_type_cx4, @@ -2880,6 +3460,7 @@ enum ixgbe_bus_type { ixgbe_bus_type_pci, ixgbe_bus_type_pcix, ixgbe_bus_type_pci_express, + ixgbe_bus_type_internal, ixgbe_bus_type_reserved }; @@ -3036,7 +3617,7 @@ struct ixgbe_eeprom_operations { s32 (*write_buffer)(struct ixgbe_hw *, u16, u16, u16 *); s32 (*validate_checksum)(struct ixgbe_hw *, u16 *); s32 (*update_checksum)(struct ixgbe_hw *); - u16 (*calc_checksum)(struct ixgbe_hw *); + s32 (*calc_checksum)(struct ixgbe_hw *); }; struct ixgbe_mac_operations { @@ -3062,17 +3643,21 @@ struct ixgbe_mac_operations { s32 (*enable_rx_dma)(struct ixgbe_hw *, u32); s32 (*disable_sec_rx_path)(struct ixgbe_hw *); s32 (*enable_sec_rx_path)(struct ixgbe_hw *); - s32 (*acquire_swfw_sync)(struct ixgbe_hw *, u16); - void (*release_swfw_sync)(struct ixgbe_hw *, u16); + s32 (*acquire_swfw_sync)(struct ixgbe_hw *, u32); + void (*release_swfw_sync)(struct ixgbe_hw *, u32); + s32 (*prot_autoc_read)(struct ixgbe_hw *, bool *, u32 *); + s32 (*prot_autoc_write)(struct ixgbe_hw *, u32, bool); /* Link */ void (*disable_tx_laser)(struct ixgbe_hw *); void (*enable_tx_laser)(struct ixgbe_hw *); void (*flap_tx_laser)(struct ixgbe_hw *); s32 (*setup_link)(struct ixgbe_hw *, ixgbe_link_speed, bool); + s32 (*setup_mac_link)(struct ixgbe_hw *, ixgbe_link_speed, bool); s32 (*check_link)(struct ixgbe_hw *, ixgbe_link_speed *, bool *, bool); s32 (*get_link_capabilities)(struct ixgbe_hw *, ixgbe_link_speed *, bool *); + void (*set_rate_select_speed)(struct ixgbe_hw *, ixgbe_link_speed); /* Packet Buffer manipulation */ void (*setup_rxpba)(struct ixgbe_hw *, int, u32, int); @@ -3107,13 +3692,26 @@ struct ixgbe_mac_operations { /* Flow Control */ s32 (*fc_enable)(struct ixgbe_hw *); + s32 (*setup_fc)(struct ixgbe_hw *); /* Manageability interface */ s32 (*set_fw_drv_ver)(struct ixgbe_hw *, u8, u8, u8, u8); - s32 (*dmac_config)(struct ixgbe_hw *hw); + void (*get_rtrup2tc)(struct ixgbe_hw *hw, u8 *map); + void (*disable_rx)(struct ixgbe_hw *hw); + void (*enable_rx)(struct ixgbe_hw *hw); + void (*set_source_address_pruning)(struct ixgbe_hw *, bool, + unsigned int); + void (*set_ethertype_anti_spoofing)(struct ixgbe_hw *, bool, int); s32 (*dmac_update_tcs)(struct ixgbe_hw *hw); s32 (*dmac_config_tcs)(struct ixgbe_hw *hw); - void (*get_rtrup2tc)(struct ixgbe_hw *hw, u8 *map); + s32 (*dmac_config)(struct ixgbe_hw *hw); + s32 (*setup_eee)(struct ixgbe_hw *hw, bool enable_eee); + s32 (*read_iosf_sb_reg)(struct ixgbe_hw *, u32, u32, u32 *); + s32 (*write_iosf_sb_reg)(struct ixgbe_hw *, u32, u32, u32); + void (*disable_mdd)(struct ixgbe_hw *hw); + void (*enable_mdd)(struct ixgbe_hw *hw); + void (*mdd_event)(struct ixgbe_hw *hw, u32 *vf_bitmap); + void (*restore_mdd_vf)(struct ixgbe_hw *hw, u32 vf); }; struct ixgbe_phy_operations { @@ -3126,6 +3724,7 @@ struct ixgbe_phy_operations { s32 (*read_reg_mdi)(struct ixgbe_hw *, u32, u32, u16 *); s32 (*write_reg_mdi)(struct ixgbe_hw *, u32, u32, u16); s32 (*setup_link)(struct ixgbe_hw *); + s32 (*setup_internal_link)(struct ixgbe_hw *); s32 (*setup_link_speed)(struct ixgbe_hw *, ixgbe_link_speed, bool); s32 (*check_link)(struct ixgbe_hw *, ixgbe_link_speed *, bool *); s32 (*get_firmware_version)(struct ixgbe_hw *, u16 *); @@ -3135,7 +3734,20 @@ struct ixgbe_phy_operations { s32 (*read_i2c_eeprom)(struct ixgbe_hw *, u8 , u8 *); s32 (*write_i2c_eeprom)(struct ixgbe_hw *, u8, u8); void (*i2c_bus_clear)(struct ixgbe_hw *); + s32 (*read_i2c_combined)(struct ixgbe_hw *, u8 addr, u16 reg, u16 *val); + s32 (*write_i2c_combined)(struct ixgbe_hw *, u8 addr, u16 reg, u16 val); s32 (*check_overtemp)(struct ixgbe_hw *); + s32 (*set_phy_power)(struct ixgbe_hw *, bool on); + s32 (*enter_lplu)(struct ixgbe_hw *); + s32 (*handle_lasi)(struct ixgbe_hw *hw); + s32 (*read_i2c_combined_unlocked)(struct ixgbe_hw *, u8 addr, u16 reg, + u16 *value); + s32 (*write_i2c_combined_unlocked)(struct ixgbe_hw *, u8 addr, u16 reg, + u16 value); + s32 (*read_i2c_byte_unlocked)(struct ixgbe_hw *, u8 offset, u8 addr, + u8 *value); + s32 (*write_i2c_byte_unlocked)(struct ixgbe_hw *, u8 offset, u8 addr, + u8 value); }; struct ixgbe_eeprom_info { @@ -3145,6 +3757,7 @@ struct ixgbe_eeprom_info { u16 word_size; u16 address_bits; u16 word_page_size; + u16 ctrl_word_3; }; #define IXGBE_FLAGS_DOUBLE_RESET_REQUIRED 0x01 @@ -3169,7 +3782,6 @@ struct ixgbe_mac_info { u32 max_tx_queues; u32 max_rx_queues; u32 orig_autoc; - u32 cached_autoc; u8 san_mac_rar_index; bool get_link_status; u32 orig_autoc2; @@ -3178,6 +3790,8 @@ struct ixgbe_mac_info { bool orig_link_settings_stored; bool autotry_restart; u8 flags; + struct ixgbe_dmac_config dmac_config; + bool set_lben; }; struct ixgbe_phy_info { @@ -3189,12 +3803,15 @@ struct ixgbe_phy_info { bool sfp_setup_needed; u32 revision; enum ixgbe_media_type media_type; + u32 phy_semaphore_mask; bool reset_disable; ixgbe_autoneg_advertised autoneg_advertised; enum ixgbe_smart_speed smart_speed; bool smart_speed_active; bool multispeed_fiber; bool reset_if_overtemp; + bool qsfp_shared_i2c_bus; + u32 nw_mng_if_sel; }; #include "ixgbe_mbx.h" @@ -3229,7 +3846,7 @@ struct ixgbe_mbx_info { }; struct ixgbe_hw { - u8 *hw_addr; + u8 IOMEM *hw_addr; void *back; struct ixgbe_mac_info mac; struct ixgbe_addr_filter_info addr_ctrl; @@ -3238,6 +3855,7 @@ struct ixgbe_hw { struct ixgbe_eeprom_info eeprom; struct ixgbe_bus_info bus; struct ixgbe_mbx_info mbx; + const u32 *mvals; u16 device_id; u16 vendor_id; u16 subsystem_device_id; @@ -3247,7 +3865,6 @@ struct ixgbe_hw { int api_version; bool force_full_reset; bool allow_unsupported_sfp; - bool mng_fw_enabled; bool wol_enabled; }; @@ -3292,7 +3909,86 @@ struct ixgbe_hw { #define IXGBE_ERR_OUT_OF_MEM -34 #define IXGBE_ERR_FEATURE_NOT_SUPPORTED -36 #define IXGBE_ERR_EEPROM_PROTECTED_REGION -37 +#define IXGBE_ERR_FDIR_CMD_INCOMPLETE -38 #define IXGBE_NOT_IMPLEMENTED 0x7FFFFFFF + +#define IXGBE_FUSES0_GROUP(_i) (0x11158 + ((_i) * 4)) +#define IXGBE_FUSES0_300MHZ (1 << 5) +#define IXGBE_FUSES0_REV1 (1 << 6) + +#define IXGBE_KRM_PORT_CAR_GEN_CTRL(P) ((P == 0) ? (0x4010) : (0x8010)) +#define IXGBE_KRM_LINK_CTRL_1(P) ((P == 0) ? (0x420C) : (0x820C)) +#define IXGBE_KRM_AN_CNTL_1(P) ((P == 0) ? (0x422C) : (0x822C)) +#define IXGBE_KRM_DSP_TXFFE_STATE_4(P) ((P == 0) ? (0x4634) : (0x8634)) +#define IXGBE_KRM_DSP_TXFFE_STATE_5(P) ((P == 0) ? (0x4638) : (0x8638)) +#define IXGBE_KRM_RX_TRN_LINKUP_CTRL(P) ((P == 0) ? (0x4B00) : (0x8B00)) +#define IXGBE_KRM_PMD_DFX_BURNIN(P) ((P == 0) ? (0x4E00) : (0x8E00)) +#define IXGBE_KRM_TX_COEFF_CTRL_1(P) ((P == 0) ? (0x5520) : (0x9520)) +#define IXGBE_KRM_RX_ANA_CTL(P) ((P == 0) ? (0x5A00) : (0x9A00)) + +#define IXGBE_KRM_PORT_CAR_GEN_CTRL_NELB_32B (1 << 9) +#define IXGBE_KRM_PORT_CAR_GEN_CTRL_NELB_KRPCS (1 << 11) + +#define IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK (0x7 << 8) +#define IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_1G (2 << 8) +#define IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_10G (4 << 8) +#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_FEC_REQ (1 << 14) +#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_FEC (1 << 15) +#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KX (1 << 16) +#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KR (1 << 18) +#define IXGBE_KRM_LINK_CTRL_1_TETH_EEE_CAP_KX (1 << 24) +#define IXGBE_KRM_LINK_CTRL_1_TETH_EEE_CAP_KR (1 << 26) +#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE (1 << 29) +#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_RESTART (1 << 31) + +#define IXGBE_KRM_AN_CNTL_1_SYM_PAUSE (1 << 28) +#define IXGBE_KRM_AN_CNTL_1_ASM_PAUSE (1 << 29) + +#define IXGBE_KRM_DSP_TXFFE_STATE_C0_EN (1 << 6) +#define IXGBE_KRM_DSP_TXFFE_STATE_CP1_CN1_EN (1 << 15) +#define IXGBE_KRM_DSP_TXFFE_STATE_CO_ADAPT_EN (1 << 16) + +#define IXGBE_KRM_RX_TRN_LINKUP_CTRL_CONV_WO_PROTOCOL (1 << 4) +#define IXGBE_KRM_RX_TRN_LINKUP_CTRL_PROTOCOL_BYPASS (1 << 2) + +#define IXGBE_KRM_PMD_DFX_BURNIN_TX_RX_KR_LB_MASK (0x3 << 16) + +#define IXGBE_KRM_TX_COEFF_CTRL_1_CMINUS1_OVRRD_EN (1 << 1) +#define IXGBE_KRM_TX_COEFF_CTRL_1_CPLUS1_OVRRD_EN (1 << 2) +#define IXGBE_KRM_TX_COEFF_CTRL_1_CZERO_EN (1 << 3) +#define IXGBE_KRM_TX_COEFF_CTRL_1_OVRRD_EN (1 << 31) + +#define IXGBE_KX4_LINK_CNTL_1 0x4C +#define IXGBE_KX4_LINK_CNTL_1_TETH_AN_CAP_KX (1 << 16) +#define IXGBE_KX4_LINK_CNTL_1_TETH_AN_CAP_KX4 (1 << 17) +#define IXGBE_KX4_LINK_CNTL_1_TETH_EEE_CAP_KX (1 << 24) +#define IXGBE_KX4_LINK_CNTL_1_TETH_EEE_CAP_KX4 (1 << 25) +#define IXGBE_KX4_LINK_CNTL_1_TETH_AN_ENABLE (1 << 29) +#define IXGBE_KX4_LINK_CNTL_1_TETH_FORCE_LINK_UP (1 << 30) +#define IXGBE_KX4_LINK_CNTL_1_TETH_AN_RESTART (1 << 31) + +#define IXGBE_SB_IOSF_INDIRECT_CTRL 0x00011144 +#define IXGBE_SB_IOSF_INDIRECT_DATA 0x00011148 + +#define IXGBE_SB_IOSF_CTRL_ADDR_SHIFT 0 +#define IXGBE_SB_IOSF_CTRL_ADDR_MASK 0xFF +#define IXGBE_SB_IOSF_CTRL_RESP_STAT_SHIFT 18 +#define IXGBE_SB_IOSF_CTRL_RESP_STAT_MASK \ + (0x3 << IXGBE_SB_IOSF_CTRL_RESP_STAT_SHIFT) +#define IXGBE_SB_IOSF_CTRL_CMPL_ERR_SHIFT 20 +#define IXGBE_SB_IOSF_CTRL_CMPL_ERR_MASK \ + (0xFF << IXGBE_SB_IOSF_CTRL_CMPL_ERR_SHIFT) +#define IXGBE_SB_IOSF_CTRL_TARGET_SELECT_SHIFT 28 +#define IXGBE_SB_IOSF_CTRL_TARGET_SELECT_MASK 0x7 +#define IXGBE_SB_IOSF_CTRL_BUSY_SHIFT 31 +#define IXGBE_SB_IOSF_CTRL_BUSY (1 << IXGBE_SB_IOSF_CTRL_BUSY_SHIFT) +#define IXGBE_SB_IOSF_TARGET_KR_PHY 0 +#define IXGBE_SB_IOSF_TARGET_KX4_PHY 1 +#define IXGBE_SB_IOSF_TARGET_KX4_PCS 2 + +#define IXGBE_NW_MNG_IF_SEL 0x00011178 +#define IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE (1 << 24) + #endif /* _IXGBE_TYPE_H_ */ diff --git a/sys/dev/ixgbe/ixgbe_vf.c b/sys/dev/ixgbe/ixgbe_vf.c index 2774f9b..c010cf4 100644 --- a/sys/dev/ixgbe/ixgbe_vf.c +++ b/sys/dev/ixgbe/ixgbe_vf.c @@ -1,6 +1,6 @@ /****************************************************************************** - Copyright (c) 2001-2013, Intel Corporation + Copyright (c) 2001-2015, Intel Corporation All rights reserved. Redistribution and use in source and binary forms, with or without @@ -89,6 +89,49 @@ s32 ixgbe_init_ops_vf(struct ixgbe_hw *hw) return IXGBE_SUCCESS; } +/* ixgbe_virt_clr_reg - Set register to default (power on) state. + * @hw: pointer to hardware structure + */ +static void ixgbe_virt_clr_reg(struct ixgbe_hw *hw) +{ + int i; + u32 vfsrrctl; + u32 vfdca_rxctrl; + u32 vfdca_txctrl; + + /* VRSRRCTL default values (BSIZEPACKET = 2048, BSIZEHEADER = 256) */ + vfsrrctl = 0x100 << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT; + vfsrrctl |= 0x800 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT; + + /* DCA_RXCTRL default value */ + vfdca_rxctrl = IXGBE_DCA_RXCTRL_DESC_RRO_EN | + IXGBE_DCA_RXCTRL_DATA_WRO_EN | + IXGBE_DCA_RXCTRL_HEAD_WRO_EN; + + /* DCA_TXCTRL default value */ + vfdca_txctrl = IXGBE_DCA_TXCTRL_DESC_RRO_EN | + IXGBE_DCA_TXCTRL_DESC_WRO_EN | + IXGBE_DCA_TXCTRL_DATA_RRO_EN; + + IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, 0); + + for (i = 0; i < 7; i++) { + IXGBE_WRITE_REG(hw, IXGBE_VFRDH(i), 0); + IXGBE_WRITE_REG(hw, IXGBE_VFRDT(i), 0); + IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), 0); + IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(i), vfsrrctl); + IXGBE_WRITE_REG(hw, IXGBE_VFTDH(i), 0); + IXGBE_WRITE_REG(hw, IXGBE_VFTDT(i), 0); + IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), 0); + IXGBE_WRITE_REG(hw, IXGBE_VFTDWBAH(i), 0); + IXGBE_WRITE_REG(hw, IXGBE_VFTDWBAL(i), 0); + IXGBE_WRITE_REG(hw, IXGBE_VFDCA_RXCTRL(i), vfdca_rxctrl); + IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(i), vfdca_txctrl); + } + + IXGBE_WRITE_FLUSH(hw); +} + /** * ixgbe_start_hw_vf - Prepare hardware for Tx/Rx * @hw: pointer to hardware structure @@ -134,7 +177,7 @@ s32 ixgbe_reset_hw_vf(struct ixgbe_hw *hw) struct ixgbe_mbx_info *mbx = &hw->mbx; u32 timeout = IXGBE_VF_INIT_TIMEOUT; s32 ret_val = IXGBE_ERR_INVALID_MAC_ADDR; - u32 ctrl, msgbuf[IXGBE_VF_PERMADDR_MSG_LEN]; + u32 msgbuf[IXGBE_VF_PERMADDR_MSG_LEN]; u8 *addr = (u8 *)(&msgbuf[1]); DEBUGFUNC("ixgbevf_reset_hw_vf"); @@ -145,8 +188,7 @@ s32 ixgbe_reset_hw_vf(struct ixgbe_hw *hw) DEBUGOUT("Issuing a function level reset to MAC\n"); - ctrl = IXGBE_VFREAD_REG(hw, IXGBE_VFCTRL) | IXGBE_CTRL_RST; - IXGBE_VFWRITE_REG(hw, IXGBE_VFCTRL, ctrl); + IXGBE_VFWRITE_REG(hw, IXGBE_VFCTRL, IXGBE_CTRL_RST); IXGBE_WRITE_FLUSH(hw); msec_delay(50); @@ -160,6 +202,9 @@ s32 ixgbe_reset_hw_vf(struct ixgbe_hw *hw) if (!timeout) return IXGBE_ERR_RESET_FAILED; + /* Reset VF registers to initial values */ + ixgbe_virt_clr_reg(hw); + /* mailbox timeout can now become active */ mbx->timeout = IXGBE_VF_MBX_INIT_TIMEOUT; @@ -224,6 +269,8 @@ s32 ixgbe_stop_adapter_vf(struct ixgbe_hw *hw) reg_val &= ~IXGBE_RXDCTL_ENABLE; IXGBE_VFWRITE_REG(hw, IXGBE_VFRXDCTL(i), reg_val); } + /* Clear packet split and pool config */ + IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, 0); /* flush all queues disables */ IXGBE_WRITE_FLUSH(hw); @@ -512,6 +559,21 @@ s32 ixgbe_check_mac_link_vf(struct ixgbe_hw *hw, ixgbe_link_speed *speed, if (!(links_reg & IXGBE_LINKS_UP)) goto out; + /* for SFP+ modules and DA cables on 82599 it can take up to 500usecs + * before the link status is correct + */ + if (mac->type == ixgbe_mac_82599_vf) { + int i; + + for (i = 0; i < 5; i++) { + usec_delay(100); + links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS); + + if (!(links_reg & IXGBE_LINKS_UP)) + goto out; + } + } + switch (links_reg & IXGBE_LINKS_SPEED_82599) { case IXGBE_LINKS_SPEED_10G_82599: *speed = IXGBE_LINK_SPEED_10GB_FULL; @@ -607,4 +669,3 @@ int ixgbevf_get_queues(struct ixgbe_hw *hw, unsigned int *num_tcs, UNREFERENCED_3PARAMETER(hw, num_tcs, default_tc); return IXGBE_SUCCESS; } - diff --git a/sys/dev/ixgbe/ixgbe_vf.h b/sys/dev/ixgbe/ixgbe_vf.h index 8500768..edc8013 100644 --- a/sys/dev/ixgbe/ixgbe_vf.h +++ b/sys/dev/ixgbe/ixgbe_vf.h @@ -1,6 +1,6 @@ /****************************************************************************** - Copyright (c) 2001-2013, Intel Corporation + Copyright (c) 2001-2015, Intel Corporation All rights reserved. Redistribution and use in source and binary forms, with or without @@ -84,6 +84,9 @@ #define IXGBE_VFGOTC_LSB 0x02020 #define IXGBE_VFGOTC_MSB 0x02024 #define IXGBE_VFMPRC 0x01034 +#define IXGBE_VFMRQC 0x3000 +#define IXGBE_VFRSSRK(x) (0x3100 + ((x) * 4)) +#define IXGBE_VFRETA(x) (0x3200 + ((x) * 4)) struct ixgbevf_hw_stats { diff --git a/sys/dev/ixgbe/ixgbe_x540.c b/sys/dev/ixgbe/ixgbe_x540.c index 9622f0e..ddf0674 100644 --- a/sys/dev/ixgbe/ixgbe_x540.c +++ b/sys/dev/ixgbe/ixgbe_x540.c @@ -1,6 +1,6 @@ /****************************************************************************** - Copyright (c) 2001-2013, Intel Corporation + Copyright (c) 2001-2015, Intel Corporation All rights reserved. Redistribution and use in source and binary forms, with or without @@ -38,6 +38,13 @@ #include "ixgbe_common.h" #include "ixgbe_phy.h" +#define IXGBE_X540_MAX_TX_QUEUES 128 +#define IXGBE_X540_MAX_RX_QUEUES 128 +#define IXGBE_X540_RAR_ENTRIES 128 +#define IXGBE_X540_MC_TBL_SIZE 128 +#define IXGBE_X540_VFT_TBL_SIZE 128 +#define IXGBE_X540_RX_PB_SIZE 384 + static s32 ixgbe_poll_flash_update_done_X540(struct ixgbe_hw *hw); static s32 ixgbe_get_swfw_sync_semaphore(struct ixgbe_hw *hw); static void ixgbe_release_swfw_sync_semaphore(struct ixgbe_hw *hw); @@ -63,65 +70,67 @@ s32 ixgbe_init_ops_X540(struct ixgbe_hw *hw) /* EEPROM */ - eeprom->ops.init_params = &ixgbe_init_eeprom_params_X540; - eeprom->ops.read = &ixgbe_read_eerd_X540; - eeprom->ops.read_buffer = &ixgbe_read_eerd_buffer_X540; - eeprom->ops.write = &ixgbe_write_eewr_X540; - eeprom->ops.write_buffer = &ixgbe_write_eewr_buffer_X540; - eeprom->ops.update_checksum = &ixgbe_update_eeprom_checksum_X540; - eeprom->ops.validate_checksum = &ixgbe_validate_eeprom_checksum_X540; - eeprom->ops.calc_checksum = &ixgbe_calc_eeprom_checksum_X540; + eeprom->ops.init_params = ixgbe_init_eeprom_params_X540; + eeprom->ops.read = ixgbe_read_eerd_X540; + eeprom->ops.read_buffer = ixgbe_read_eerd_buffer_X540; + eeprom->ops.write = ixgbe_write_eewr_X540; + eeprom->ops.write_buffer = ixgbe_write_eewr_buffer_X540; + eeprom->ops.update_checksum = ixgbe_update_eeprom_checksum_X540; + eeprom->ops.validate_checksum = ixgbe_validate_eeprom_checksum_X540; + eeprom->ops.calc_checksum = ixgbe_calc_eeprom_checksum_X540; /* PHY */ - phy->ops.init = &ixgbe_init_phy_ops_generic; + phy->ops.init = ixgbe_init_phy_ops_generic; phy->ops.reset = NULL; + if (!ixgbe_mng_present(hw)) + phy->ops.set_phy_power = ixgbe_set_copper_phy_power; /* MAC */ - mac->ops.reset_hw = &ixgbe_reset_hw_X540; - mac->ops.enable_relaxed_ordering = &ixgbe_enable_relaxed_ordering_gen2; - mac->ops.get_media_type = &ixgbe_get_media_type_X540; + mac->ops.reset_hw = ixgbe_reset_hw_X540; + mac->ops.enable_relaxed_ordering = ixgbe_enable_relaxed_ordering_gen2; + mac->ops.get_media_type = ixgbe_get_media_type_X540; mac->ops.get_supported_physical_layer = - &ixgbe_get_supported_physical_layer_X540; + ixgbe_get_supported_physical_layer_X540; mac->ops.read_analog_reg8 = NULL; mac->ops.write_analog_reg8 = NULL; - mac->ops.start_hw = &ixgbe_start_hw_X540; - mac->ops.get_san_mac_addr = &ixgbe_get_san_mac_addr_generic; - mac->ops.set_san_mac_addr = &ixgbe_set_san_mac_addr_generic; - mac->ops.get_device_caps = &ixgbe_get_device_caps_generic; - mac->ops.get_wwn_prefix = &ixgbe_get_wwn_prefix_generic; - mac->ops.get_fcoe_boot_status = &ixgbe_get_fcoe_boot_status_generic; - mac->ops.acquire_swfw_sync = &ixgbe_acquire_swfw_sync_X540; - mac->ops.release_swfw_sync = &ixgbe_release_swfw_sync_X540; - mac->ops.disable_sec_rx_path = &ixgbe_disable_sec_rx_path_generic; - mac->ops.enable_sec_rx_path = &ixgbe_enable_sec_rx_path_generic; + mac->ops.start_hw = ixgbe_start_hw_X540; + mac->ops.get_san_mac_addr = ixgbe_get_san_mac_addr_generic; + mac->ops.set_san_mac_addr = ixgbe_set_san_mac_addr_generic; + mac->ops.get_device_caps = ixgbe_get_device_caps_generic; + mac->ops.get_wwn_prefix = ixgbe_get_wwn_prefix_generic; + mac->ops.get_fcoe_boot_status = ixgbe_get_fcoe_boot_status_generic; + mac->ops.acquire_swfw_sync = ixgbe_acquire_swfw_sync_X540; + mac->ops.release_swfw_sync = ixgbe_release_swfw_sync_X540; + mac->ops.disable_sec_rx_path = ixgbe_disable_sec_rx_path_generic; + mac->ops.enable_sec_rx_path = ixgbe_enable_sec_rx_path_generic; /* RAR, Multicast, VLAN */ - mac->ops.set_vmdq = &ixgbe_set_vmdq_generic; - mac->ops.set_vmdq_san_mac = &ixgbe_set_vmdq_san_mac_generic; - mac->ops.clear_vmdq = &ixgbe_clear_vmdq_generic; - mac->ops.insert_mac_addr = &ixgbe_insert_mac_addr_generic; + mac->ops.set_vmdq = ixgbe_set_vmdq_generic; + mac->ops.set_vmdq_san_mac = ixgbe_set_vmdq_san_mac_generic; + mac->ops.clear_vmdq = ixgbe_clear_vmdq_generic; + mac->ops.insert_mac_addr = ixgbe_insert_mac_addr_generic; mac->rar_highwater = 1; - mac->ops.set_vfta = &ixgbe_set_vfta_generic; - mac->ops.set_vlvf = &ixgbe_set_vlvf_generic; - mac->ops.clear_vfta = &ixgbe_clear_vfta_generic; - mac->ops.init_uta_tables = &ixgbe_init_uta_tables_generic; - mac->ops.set_mac_anti_spoofing = &ixgbe_set_mac_anti_spoofing; - mac->ops.set_vlan_anti_spoofing = &ixgbe_set_vlan_anti_spoofing; + mac->ops.set_vfta = ixgbe_set_vfta_generic; + mac->ops.set_vlvf = ixgbe_set_vlvf_generic; + mac->ops.clear_vfta = ixgbe_clear_vfta_generic; + mac->ops.init_uta_tables = ixgbe_init_uta_tables_generic; + mac->ops.set_mac_anti_spoofing = ixgbe_set_mac_anti_spoofing; + mac->ops.set_vlan_anti_spoofing = ixgbe_set_vlan_anti_spoofing; /* Link */ mac->ops.get_link_capabilities = - &ixgbe_get_copper_link_capabilities_generic; - mac->ops.setup_link = &ixgbe_setup_mac_link_X540; - mac->ops.setup_rxpba = &ixgbe_set_rxpba_generic; - mac->ops.check_link = &ixgbe_check_mac_link_generic; - - - mac->mcft_size = 128; - mac->vft_size = 128; - mac->num_rar_entries = 128; - mac->rx_pb_size = 384; - mac->max_tx_queues = 128; - mac->max_rx_queues = 128; + ixgbe_get_copper_link_capabilities_generic; + mac->ops.setup_link = ixgbe_setup_mac_link_X540; + mac->ops.setup_rxpba = ixgbe_set_rxpba_generic; + mac->ops.check_link = ixgbe_check_mac_link_generic; + + + mac->mcft_size = IXGBE_X540_MC_TBL_SIZE; + mac->vft_size = IXGBE_X540_VFT_TBL_SIZE; + mac->num_rar_entries = IXGBE_X540_RAR_ENTRIES; + mac->rx_pb_size = IXGBE_X540_RX_PB_SIZE; + mac->max_rx_queues = IXGBE_X540_MAX_RX_QUEUES; + mac->max_tx_queues = IXGBE_X540_MAX_TX_QUEUES; mac->max_msix_vectors = ixgbe_get_pcie_msix_count_generic(hw); /* @@ -139,9 +148,9 @@ s32 ixgbe_init_ops_X540(struct ixgbe_hw *hw) mac->ops.blink_led_stop = ixgbe_blink_led_stop_X540; /* Manageability interface */ - mac->ops.set_fw_drv_ver = &ixgbe_set_fw_drv_ver_generic; + mac->ops.set_fw_drv_ver = ixgbe_set_fw_drv_ver_generic; - mac->ops.get_rtrup2tc = &ixgbe_dcb_get_rtrup2tc_generic; + mac->ops.get_rtrup2tc = ixgbe_dcb_get_rtrup2tc_generic; return ret_val; } @@ -469,18 +478,20 @@ s32 ixgbe_write_eewr_buffer_X540(struct ixgbe_hw *hw, * be used internally by function which utilize ixgbe_acquire_swfw_sync_X540. * * @hw: pointer to hardware structure + * + * Returns a negative error code on error, or the 16-bit checksum **/ -u16 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw) +s32 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw) { - u16 i; - u16 j; + u16 i, j; u16 checksum = 0; u16 length = 0; u16 pointer = 0; u16 word = 0; + u16 checksum_last_word = IXGBE_EEPROM_CHECKSUM; + u16 ptr_start = IXGBE_PCIE_ANALOG_PTR; - /* - * Do not use hw->eeprom.ops.read because we do not want to take + /* Do not use hw->eeprom.ops.read because we do not want to take * the synchronization semaphores here. Instead use * ixgbe_read_eerd_generic */ @@ -488,25 +499,25 @@ u16 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw) DEBUGFUNC("ixgbe_calc_eeprom_checksum_X540"); /* Include 0x0-0x3F in the checksum */ - for (i = 0; i < IXGBE_EEPROM_CHECKSUM; i++) { - if (ixgbe_read_eerd_generic(hw, i, &word) != IXGBE_SUCCESS) { + for (i = 0; i <= checksum_last_word; i++) { + if (ixgbe_read_eerd_generic(hw, i, &word)) { DEBUGOUT("EEPROM read failed\n"); - break; + return IXGBE_ERR_EEPROM; } - checksum += word; + if (i != IXGBE_EEPROM_CHECKSUM) + checksum += word; } - /* - * Include all data from pointers 0x3, 0x6-0xE. This excludes the + /* Include all data from pointers 0x3, 0x6-0xE. This excludes the * FW, PHY module, and PCIe Expansion/Option ROM pointers. */ - for (i = IXGBE_PCIE_ANALOG_PTR; i < IXGBE_FW_PTR; i++) { + for (i = ptr_start; i < IXGBE_FW_PTR; i++) { if (i == IXGBE_PHY_PTR || i == IXGBE_OPTION_ROM_PTR) continue; - if (ixgbe_read_eerd_generic(hw, i, &pointer) != IXGBE_SUCCESS) { + if (ixgbe_read_eerd_generic(hw, i, &pointer)) { DEBUGOUT("EEPROM read failed\n"); - break; + return IXGBE_ERR_EEPROM; } /* Skip pointer section if the pointer is invalid. */ @@ -514,10 +525,9 @@ u16 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw) pointer >= hw->eeprom.word_size) continue; - if (ixgbe_read_eerd_generic(hw, pointer, &length) != - IXGBE_SUCCESS) { + if (ixgbe_read_eerd_generic(hw, pointer, &length)) { DEBUGOUT("EEPROM read failed\n"); - break; + return IXGBE_ERR_EEPROM; } /* Skip pointer section if length is invalid. */ @@ -525,11 +535,10 @@ u16 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw) (pointer + length) >= hw->eeprom.word_size) continue; - for (j = pointer+1; j <= pointer+length; j++) { - if (ixgbe_read_eerd_generic(hw, j, &word) != - IXGBE_SUCCESS) { + for (j = pointer + 1; j <= pointer + length; j++) { + if (ixgbe_read_eerd_generic(hw, j, &word)) { DEBUGOUT("EEPROM read failed\n"); - break; + return IXGBE_ERR_EEPROM; } checksum += word; } @@ -537,7 +546,7 @@ u16 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw) checksum = (u16)IXGBE_EEPROM_SUM - checksum; - return checksum; + return (s32)checksum; } /** @@ -557,48 +566,49 @@ s32 ixgbe_validate_eeprom_checksum_X540(struct ixgbe_hw *hw, DEBUGFUNC("ixgbe_validate_eeprom_checksum_X540"); - /* - * Read the first word from the EEPROM. If this times out or fails, do + /* Read the first word from the EEPROM. If this times out or fails, do * not continue or we could be in for a very long wait while every * EEPROM read fails */ status = hw->eeprom.ops.read(hw, 0, &checksum); - - if (status != IXGBE_SUCCESS) { + if (status) { DEBUGOUT("EEPROM read failed\n"); - goto out; + return status; } - if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) == - IXGBE_SUCCESS) { - checksum = hw->eeprom.ops.calc_checksum(hw); + if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM)) + return IXGBE_ERR_SWFW_SYNC; - /* - * Do not use hw->eeprom.ops.read because we do not want to take - * the synchronization semaphores twice here. - */ - ixgbe_read_eerd_generic(hw, IXGBE_EEPROM_CHECKSUM, - &read_checksum); + status = hw->eeprom.ops.calc_checksum(hw); + if (status < 0) + goto out; - /* - * Verify read checksum from EEPROM is the same as - * calculated checksum - */ - if (read_checksum != checksum) { - status = IXGBE_ERR_EEPROM_CHECKSUM; - ERROR_REPORT1(IXGBE_ERROR_INVALID_STATE, - "Invalid EEPROM checksum"); - } + checksum = (u16)(status & 0xffff); - /* If the user cares, return the calculated checksum */ - if (checksum_val) - *checksum_val = checksum; - hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); - } else { - status = IXGBE_ERR_SWFW_SYNC; + /* Do not use hw->eeprom.ops.read because we do not want to take + * the synchronization semaphores twice here. + */ + status = ixgbe_read_eerd_generic(hw, IXGBE_EEPROM_CHECKSUM, + &read_checksum); + if (status) + goto out; + + /* Verify read checksum from EEPROM is the same as + * calculated checksum + */ + if (read_checksum != checksum) { + ERROR_REPORT1(IXGBE_ERROR_INVALID_STATE, + "Invalid EEPROM checksum"); + status = IXGBE_ERR_EEPROM_CHECKSUM; } + /* If the user cares, return the calculated checksum */ + if (checksum_val) + *checksum_val = checksum; + out: + hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); + return status; } @@ -617,33 +627,36 @@ s32 ixgbe_update_eeprom_checksum_X540(struct ixgbe_hw *hw) DEBUGFUNC("ixgbe_update_eeprom_checksum_X540"); - /* - * Read the first word from the EEPROM. If this times out or fails, do + /* Read the first word from the EEPROM. If this times out or fails, do * not continue or we could be in for a very long wait while every * EEPROM read fails */ status = hw->eeprom.ops.read(hw, 0, &checksum); - - if (status != IXGBE_SUCCESS) + if (status) { DEBUGOUT("EEPROM read failed\n"); + return status; + } - if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) == - IXGBE_SUCCESS) { - checksum = hw->eeprom.ops.calc_checksum(hw); + if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM)) + return IXGBE_ERR_SWFW_SYNC; - /* - * Do not use hw->eeprom.ops.write because we do not want to - * take the synchronization semaphores twice here. - */ - status = ixgbe_write_eewr_generic(hw, IXGBE_EEPROM_CHECKSUM, - checksum); + status = hw->eeprom.ops.calc_checksum(hw); + if (status < 0) + goto out; - if (status == IXGBE_SUCCESS) - status = ixgbe_update_flash_X540(hw); - hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); - } else { - status = IXGBE_ERR_SWFW_SYNC; - } + checksum = (u16)(status & 0xffff); + + /* Do not use hw->eeprom.ops.write because we do not want to + * take the synchronization semaphores twice here. + */ + status = ixgbe_write_eewr_generic(hw, IXGBE_EEPROM_CHECKSUM, checksum); + if (status) + goto out; + + status = ixgbe_update_flash_X540(hw); + +out: + hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); return status; } @@ -658,7 +671,7 @@ s32 ixgbe_update_eeprom_checksum_X540(struct ixgbe_hw *hw) s32 ixgbe_update_flash_X540(struct ixgbe_hw *hw) { u32 flup; - s32 status = IXGBE_ERR_EEPROM; + s32 status; DEBUGFUNC("ixgbe_update_flash_X540"); @@ -716,7 +729,7 @@ static s32 ixgbe_poll_flash_update_done_X540(struct ixgbe_hw *hw) status = IXGBE_SUCCESS; break; } - usec_delay(5); + msec_delay(5); } if (i == IXGBE_FLUDONE_ATTEMPTS) @@ -734,58 +747,55 @@ static s32 ixgbe_poll_flash_update_done_X540(struct ixgbe_hw *hw) * Acquires the SWFW semaphore thought the SW_FW_SYNC register for * the specified function (CSR, PHY0, PHY1, NVM, Flash) **/ -s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u16 mask) +s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask) { - u32 swfw_sync; - u32 swmask = mask; - u32 fwmask = mask << 5; - u32 hwmask = 0; + u32 swmask = mask & IXGBE_GSSR_NVM_PHY_MASK; + u32 fwmask = swmask << 5; + u32 swi2c_mask = mask & IXGBE_GSSR_I2C_MASK; u32 timeout = 200; + u32 hwmask = 0; + u32 swfw_sync; u32 i; - s32 ret_val = IXGBE_SUCCESS; DEBUGFUNC("ixgbe_acquire_swfw_sync_X540"); - if (swmask == IXGBE_GSSR_EEP_SM) - hwmask = IXGBE_GSSR_FLASH_SM; + if (swmask & IXGBE_GSSR_EEP_SM) + hwmask |= IXGBE_GSSR_FLASH_SM; /* SW only mask doesn't have FW bit pair */ - if (swmask == IXGBE_GSSR_SW_MNG_SM) - fwmask = 0; + if (mask & IXGBE_GSSR_SW_MNG_SM) + swmask |= IXGBE_GSSR_SW_MNG_SM; + swmask |= swi2c_mask; + fwmask |= swi2c_mask << 2; for (i = 0; i < timeout; i++) { - /* - * SW NVM semaphore bit is used for access to all + /* SW NVM semaphore bit is used for access to all * SW_FW_SYNC bits (not just NVM) */ - if (ixgbe_get_swfw_sync_semaphore(hw)) { - ret_val = IXGBE_ERR_SWFW_SYNC; - goto out; - } + if (ixgbe_get_swfw_sync_semaphore(hw)) + return IXGBE_ERR_SWFW_SYNC; swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC); if (!(swfw_sync & (fwmask | swmask | hwmask))) { swfw_sync |= swmask; IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC, swfw_sync); ixgbe_release_swfw_sync_semaphore(hw); - goto out; - } else { - /* - * Firmware currently using resource (fwmask), hardware - * currently using resource (hwmask), or other software - * thread currently using resource (swmask) - */ - ixgbe_release_swfw_sync_semaphore(hw); msec_delay(5); + return IXGBE_SUCCESS; } + /* Firmware currently using resource (fwmask), hardware + * currently using resource (hwmask), or other software + * thread currently using resource (swmask) + */ + ixgbe_release_swfw_sync_semaphore(hw); + msec_delay(5); } /* Failed to get SW only semaphore */ if (swmask == IXGBE_GSSR_SW_MNG_SM) { - ret_val = IXGBE_ERR_SWFW_SYNC; ERROR_REPORT1(IXGBE_ERROR_POLLING, "Failed to get SW only semaphore"); - goto out; + return IXGBE_ERR_SWFW_SYNC; } /* If the resource is not released by the FW/HW the SW can assume that @@ -793,32 +803,34 @@ s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u16 mask) * of the requested resource(s) while ignoring the corresponding FW/HW * bits in the SW_FW_SYNC register. */ + if (ixgbe_get_swfw_sync_semaphore(hw)) + return IXGBE_ERR_SWFW_SYNC; swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC); if (swfw_sync & (fwmask | hwmask)) { - if (ixgbe_get_swfw_sync_semaphore(hw)) { - ret_val = IXGBE_ERR_SWFW_SYNC; - goto out; - } - swfw_sync |= swmask; IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC, swfw_sync); ixgbe_release_swfw_sync_semaphore(hw); msec_delay(5); + return IXGBE_SUCCESS; } /* If the resource is not released by other SW the SW can assume that * the other SW malfunctions. In that case the SW should clear all SW * flags that it does not own and then repeat the whole process once * again. */ - else if (swfw_sync & swmask) { - ixgbe_release_swfw_sync_X540(hw, IXGBE_GSSR_EEP_SM | - IXGBE_GSSR_PHY0_SM | IXGBE_GSSR_PHY1_SM | - IXGBE_GSSR_MAC_CSR_SM); - ret_val = IXGBE_ERR_SWFW_SYNC; + if (swfw_sync & swmask) { + u32 rmask = IXGBE_GSSR_EEP_SM | IXGBE_GSSR_PHY0_SM | + IXGBE_GSSR_PHY1_SM | IXGBE_GSSR_MAC_CSR_SM; + + if (swi2c_mask) + rmask |= IXGBE_GSSR_I2C_MASK; + ixgbe_release_swfw_sync_X540(hw, rmask); + ixgbe_release_swfw_sync_semaphore(hw); + return IXGBE_ERR_SWFW_SYNC; } + ixgbe_release_swfw_sync_semaphore(hw); -out: - return ret_val; + return IXGBE_ERR_SWFW_SYNC; } /** @@ -829,13 +841,15 @@ out: * Releases the SWFW semaphore through the SW_FW_SYNC register * for the specified function (CSR, PHY0, PHY1, EVM, Flash) **/ -void ixgbe_release_swfw_sync_X540(struct ixgbe_hw *hw, u16 mask) +void ixgbe_release_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask) { + u32 swmask = mask & (IXGBE_GSSR_NVM_PHY_MASK | IXGBE_GSSR_SW_MNG_SM); u32 swfw_sync; - u32 swmask = mask; DEBUGFUNC("ixgbe_release_swfw_sync_X540"); + if (mask & IXGBE_GSSR_I2C_MASK) + swmask |= mask & IXGBE_GSSR_I2C_MASK; ixgbe_get_swfw_sync_semaphore(hw); swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC); @@ -843,10 +857,11 @@ void ixgbe_release_swfw_sync_X540(struct ixgbe_hw *hw, u16 mask) IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC, swfw_sync); ixgbe_release_swfw_sync_semaphore(hw); + msec_delay(5); } /** - * ixgbe_get_nvm_semaphore - Get hardware semaphore + * ixgbe_get_swfw_sync_semaphore - Get hardware semaphore * @hw: pointer to hardware structure * * Sets the hardware semaphores so SW/FW can gain control of shared resources @@ -904,7 +919,7 @@ static s32 ixgbe_get_swfw_sync_semaphore(struct ixgbe_hw *hw) } /** - * ixgbe_release_nvm_semaphore - Release hardware semaphore + * ixgbe_release_swfw_sync_semaphore - Release hardware semaphore * @hw: pointer to hardware structure * * This function clears hardware semaphore bits. @@ -917,14 +932,14 @@ static void ixgbe_release_swfw_sync_semaphore(struct ixgbe_hw *hw) /* Release both semaphores by writing 0 to the bits REGSMP and SMBI */ - swsm = IXGBE_READ_REG(hw, IXGBE_SWSM); - swsm &= ~IXGBE_SWSM_SMBI; - IXGBE_WRITE_REG(hw, IXGBE_SWSM, swsm); - swsm = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC); swsm &= ~IXGBE_SWFW_REGSMP; IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC, swsm); + swsm = IXGBE_READ_REG(hw, IXGBE_SWSM); + swsm &= ~IXGBE_SWSM_SMBI; + IXGBE_WRITE_REG(hw, IXGBE_SWSM, swsm); + IXGBE_WRITE_FLUSH(hw); } @@ -996,5 +1011,3 @@ s32 ixgbe_blink_led_stop_X540(struct ixgbe_hw *hw, u32 index) return IXGBE_SUCCESS; } - - diff --git a/sys/dev/ixgbe/ixgbe_x540.h b/sys/dev/ixgbe/ixgbe_x540.h index c86048b..efd0d41 100644 --- a/sys/dev/ixgbe/ixgbe_x540.h +++ b/sys/dev/ixgbe/ixgbe_x540.h @@ -1,6 +1,6 @@ /****************************************************************************** - Copyright (c) 2001-2013, Intel Corporation + Copyright (c) 2001-2015, Intel Corporation All rights reserved. Redistribution and use in source and binary forms, with or without @@ -55,11 +55,11 @@ s32 ixgbe_write_eewr_buffer_X540(struct ixgbe_hw *hw, u16 offset, u16 words, u16 *data); s32 ixgbe_update_eeprom_checksum_X540(struct ixgbe_hw *hw); s32 ixgbe_validate_eeprom_checksum_X540(struct ixgbe_hw *hw, u16 *checksum_val); -u16 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw); +s32 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw); s32 ixgbe_update_flash_X540(struct ixgbe_hw *hw); -s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u16 mask); -void ixgbe_release_swfw_sync_X540(struct ixgbe_hw *hw, u16 mask); +s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask); +void ixgbe_release_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask); s32 ixgbe_blink_led_start_X540(struct ixgbe_hw *hw, u32 index); s32 ixgbe_blink_led_stop_X540(struct ixgbe_hw *hw, u32 index); diff --git a/sys/dev/ixgbe/ixgbe_x550.c b/sys/dev/ixgbe/ixgbe_x550.c new file mode 100644 index 0000000..65daa17 --- /dev/null +++ b/sys/dev/ixgbe/ixgbe_x550.c @@ -0,0 +1,3191 @@ +/****************************************************************************** + + Copyright (c) 2001-2015, Intel Corporation + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + 3. Neither the name of the Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + POSSIBILITY OF SUCH DAMAGE. + +******************************************************************************/ +/*$FreeBSD$*/ + +#include "ixgbe_x550.h" +#include "ixgbe_x540.h" +#include "ixgbe_type.h" +#include "ixgbe_api.h" +#include "ixgbe_common.h" +#include "ixgbe_phy.h" + +static s32 ixgbe_setup_ixfi_x550em(struct ixgbe_hw *hw, ixgbe_link_speed *speed); + +/** + * ixgbe_init_ops_X550 - Inits func ptrs and MAC type + * @hw: pointer to hardware structure + * + * Initialize the function pointers and assign the MAC type for X550. + * Does not touch the hardware. + **/ +s32 ixgbe_init_ops_X550(struct ixgbe_hw *hw) +{ + struct ixgbe_mac_info *mac = &hw->mac; + struct ixgbe_eeprom_info *eeprom = &hw->eeprom; + s32 ret_val; + + DEBUGFUNC("ixgbe_init_ops_X550"); + + ret_val = ixgbe_init_ops_X540(hw); + mac->ops.dmac_config = ixgbe_dmac_config_X550; + mac->ops.dmac_config_tcs = ixgbe_dmac_config_tcs_X550; + mac->ops.dmac_update_tcs = ixgbe_dmac_update_tcs_X550; + mac->ops.setup_eee = ixgbe_setup_eee_X550; + mac->ops.set_source_address_pruning = + ixgbe_set_source_address_pruning_X550; + mac->ops.set_ethertype_anti_spoofing = + ixgbe_set_ethertype_anti_spoofing_X550; + + mac->ops.get_rtrup2tc = ixgbe_dcb_get_rtrup2tc_generic; + eeprom->ops.init_params = ixgbe_init_eeprom_params_X550; + eeprom->ops.calc_checksum = ixgbe_calc_eeprom_checksum_X550; + eeprom->ops.read = ixgbe_read_ee_hostif_X550; + eeprom->ops.read_buffer = ixgbe_read_ee_hostif_buffer_X550; + eeprom->ops.write = ixgbe_write_ee_hostif_X550; + eeprom->ops.write_buffer = ixgbe_write_ee_hostif_buffer_X550; + eeprom->ops.update_checksum = ixgbe_update_eeprom_checksum_X550; + eeprom->ops.validate_checksum = ixgbe_validate_eeprom_checksum_X550; + + mac->ops.disable_mdd = ixgbe_disable_mdd_X550; + mac->ops.enable_mdd = ixgbe_enable_mdd_X550; + mac->ops.mdd_event = ixgbe_mdd_event_X550; + mac->ops.restore_mdd_vf = ixgbe_restore_mdd_vf_X550; + mac->ops.disable_rx = ixgbe_disable_rx_x550; + if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) { + hw->mac.ops.led_on = ixgbe_led_on_t_X550em; + hw->mac.ops.led_off = ixgbe_led_off_t_X550em; + } + return ret_val; +} + +/** + * ixgbe_read_cs4227 - Read CS4227 register + * @hw: pointer to hardware structure + * @reg: register number to write + * @value: pointer to receive value read + * + * Returns status code + **/ +static s32 ixgbe_read_cs4227(struct ixgbe_hw *hw, u16 reg, u16 *value) +{ + return ixgbe_read_i2c_combined_unlocked(hw, IXGBE_CS4227, reg, value); +} + +/** + * ixgbe_write_cs4227 - Write CS4227 register + * @hw: pointer to hardware structure + * @reg: register number to write + * @value: value to write to register + * + * Returns status code + **/ +static s32 ixgbe_write_cs4227(struct ixgbe_hw *hw, u16 reg, u16 value) +{ + return ixgbe_write_i2c_combined_unlocked(hw, IXGBE_CS4227, reg, value); +} + +/** + * ixgbe_get_cs4227_status - Return CS4227 status + * @hw: pointer to hardware structure + * + * Returns error if CS4227 not successfully initialized + **/ +static s32 ixgbe_get_cs4227_status(struct ixgbe_hw *hw) +{ + s32 status; + u16 value = 0; + u16 reg_slice, reg_val; + u8 retry; + + for (retry = 0; retry < IXGBE_CS4227_RETRIES; ++retry) { + status = ixgbe_read_cs4227(hw, IXGBE_CS4227_GLOBAL_ID_LSB, + &value); + if (status != IXGBE_SUCCESS) + return status; + if (value == IXGBE_CS4227_GLOBAL_ID_VALUE) + break; + msec_delay(IXGBE_CS4227_CHECK_DELAY); + } + if (value != IXGBE_CS4227_GLOBAL_ID_VALUE) + return IXGBE_ERR_PHY; + + status = ixgbe_read_cs4227(hw, IXGBE_CS4227_SCRATCH, &value); + if (status != IXGBE_SUCCESS) + return status; + + /* If this is the first time after power-on, check the ucode. + * Otherwise, this will disrupt link on all ports. Because we + * can only do this the first time, we must check all ports, + * not just our own. + */ + if (value != IXGBE_CS4227_SCRATCH_VALUE) { + reg_slice = IXGBE_CS4227_LINE_SPARE24_LSB; + reg_val = (IXGBE_CS4227_EDC_MODE_CX1 << 1) | 0x1; + status = ixgbe_write_cs4227(hw, reg_slice, + reg_val); + if (status != IXGBE_SUCCESS) + return status; + + reg_slice = IXGBE_CS4227_HOST_SPARE24_LSB; + reg_val = (IXGBE_CS4227_EDC_MODE_CX1 << 1) | 0x1; + status = ixgbe_write_cs4227(hw, reg_slice, + reg_val); + if (status != IXGBE_SUCCESS) + return status; + + reg_slice = IXGBE_CS4227_LINE_SPARE24_LSB + (1 << 12); + reg_val = (IXGBE_CS4227_EDC_MODE_SR << 1) | 0x1; + status = ixgbe_write_cs4227(hw, reg_slice, + reg_val); + if (status != IXGBE_SUCCESS) + return status; + + reg_slice = IXGBE_CS4227_HOST_SPARE24_LSB + (1 << 12); + reg_val = (IXGBE_CS4227_EDC_MODE_SR << 1) | 0x1; + status = ixgbe_write_cs4227(hw, reg_slice, + reg_val); + if (status != IXGBE_SUCCESS) + return status; + + msec_delay(10); + } + + /* Verify that the ucode is operational on all ports. */ + reg_slice = IXGBE_CS4227_LINE_SPARE24_LSB; + reg_val = 0xFFFF; + status = ixgbe_read_cs4227(hw, reg_slice, ®_val); + if (status != IXGBE_SUCCESS) + return status; + if (reg_val != 0) + return IXGBE_ERR_PHY; + + reg_slice = IXGBE_CS4227_HOST_SPARE24_LSB; + reg_val = 0xFFFF; + status = ixgbe_read_cs4227(hw, reg_slice, ®_val); + if (status != IXGBE_SUCCESS) + return status; + if (reg_val != 0) + return IXGBE_ERR_PHY; + + reg_slice = IXGBE_CS4227_LINE_SPARE24_LSB + (1 << 12); + reg_val = 0xFFFF; + status = ixgbe_read_cs4227(hw, reg_slice, ®_val); + if (status != IXGBE_SUCCESS) + return status; + if (reg_val != 0) + return IXGBE_ERR_PHY; + + reg_slice = IXGBE_CS4227_HOST_SPARE24_LSB + (1 << 12); + reg_val = 0xFFFF; + status = ixgbe_read_cs4227(hw, reg_slice, ®_val); + if (status != IXGBE_SUCCESS) + return status; + if (reg_val != 0) + return IXGBE_ERR_PHY; + + /* Set scratch for next time. */ + status = ixgbe_write_cs4227(hw, IXGBE_CS4227_SCRATCH, + IXGBE_CS4227_SCRATCH_VALUE); + if (status != IXGBE_SUCCESS) + return status; + status = ixgbe_read_cs4227(hw, IXGBE_CS4227_SCRATCH, &value); + if (status != IXGBE_SUCCESS) + return status; + if (value != IXGBE_CS4227_SCRATCH_VALUE) + return IXGBE_ERR_PHY; + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_read_pe - Read register from port expander + * @hw: pointer to hardware structure + * @reg: register number to read + * @value: pointer to receive read value + * + * Returns status code + **/ +static s32 ixgbe_read_pe(struct ixgbe_hw *hw, u8 reg, u8 *value) +{ + s32 status; + + status = ixgbe_read_i2c_byte_unlocked(hw, reg, IXGBE_PE, value); + if (status != IXGBE_SUCCESS) + ERROR_REPORT2(IXGBE_ERROR_CAUTION, + "port expander access failed with %d\n", status); + return status; +} + +/** + * ixgbe_write_pe - Write register to port expander + * @hw: pointer to hardware structure + * @reg: register number to write + * @value: value to write + * + * Returns status code + **/ +static s32 ixgbe_write_pe(struct ixgbe_hw *hw, u8 reg, u8 value) +{ + s32 status; + + status = ixgbe_write_i2c_byte_unlocked(hw, reg, IXGBE_PE, value); + if (status != IXGBE_SUCCESS) + ERROR_REPORT2(IXGBE_ERROR_CAUTION, + "port expander access failed with %d\n", status); + return status; +} + +/** + * ixgbe_reset_cs4227 - Reset CS4227 using port expander + * @hw: pointer to hardware structure + * + * Returns error code + **/ +static s32 ixgbe_reset_cs4227(struct ixgbe_hw *hw) +{ + s32 status; + u8 reg; + + status = ixgbe_read_pe(hw, IXGBE_PE_OUTPUT, ®); + if (status != IXGBE_SUCCESS) + return status; + reg |= IXGBE_PE_BIT1; + status = ixgbe_write_pe(hw, IXGBE_PE_OUTPUT, reg); + if (status != IXGBE_SUCCESS) + return status; + + status = ixgbe_read_pe(hw, IXGBE_PE_CONFIG, ®); + if (status != IXGBE_SUCCESS) + return status; + reg &= ~IXGBE_PE_BIT1; + status = ixgbe_write_pe(hw, IXGBE_PE_CONFIG, reg); + if (status != IXGBE_SUCCESS) + return status; + + status = ixgbe_read_pe(hw, IXGBE_PE_OUTPUT, ®); + if (status != IXGBE_SUCCESS) + return status; + reg &= ~IXGBE_PE_BIT1; + status = ixgbe_write_pe(hw, IXGBE_PE_OUTPUT, reg); + if (status != IXGBE_SUCCESS) + return status; + + usec_delay(IXGBE_CS4227_RESET_HOLD); + + status = ixgbe_read_pe(hw, IXGBE_PE_OUTPUT, ®); + if (status != IXGBE_SUCCESS) + return status; + reg |= IXGBE_PE_BIT1; + status = ixgbe_write_pe(hw, IXGBE_PE_OUTPUT, reg); + if (status != IXGBE_SUCCESS) + return status; + + msec_delay(IXGBE_CS4227_RESET_DELAY); + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_check_cs4227 - Check CS4227 and reset as needed + * @hw: pointer to hardware structure + **/ +static void ixgbe_check_cs4227(struct ixgbe_hw *hw) +{ + u32 swfw_mask = hw->phy.phy_semaphore_mask; + s32 status; + u8 retry; + + for (retry = 0; retry < IXGBE_CS4227_RETRIES; retry++) { + status = hw->mac.ops.acquire_swfw_sync(hw, swfw_mask); + if (status != IXGBE_SUCCESS) { + ERROR_REPORT2(IXGBE_ERROR_CAUTION, + "semaphore failed with %d\n", status); + return; + } + status = ixgbe_get_cs4227_status(hw); + if (status == IXGBE_SUCCESS) { + hw->mac.ops.release_swfw_sync(hw, swfw_mask); + msec_delay(hw->eeprom.semaphore_delay); + return; + } + ixgbe_reset_cs4227(hw); + hw->mac.ops.release_swfw_sync(hw, swfw_mask); + msec_delay(hw->eeprom.semaphore_delay); + } + ERROR_REPORT2(IXGBE_ERROR_CAUTION, + "Unable to initialize CS4227, err=%d\n", status); +} + +/** + * ixgbe_setup_mux_ctl - Setup ESDP register for I2C mux control + * @hw: pointer to hardware structure + **/ +static void ixgbe_setup_mux_ctl(struct ixgbe_hw *hw) +{ + u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); + + if (hw->bus.lan_id) { + esdp &= ~(IXGBE_ESDP_SDP1_NATIVE | IXGBE_ESDP_SDP1); + esdp |= IXGBE_ESDP_SDP1_DIR; + } + esdp &= ~(IXGBE_ESDP_SDP0_NATIVE | IXGBE_ESDP_SDP0_DIR); + IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp); + IXGBE_WRITE_FLUSH(hw); +} + +/** + * ixgbe_identify_phy_x550em - Get PHY type based on device id + * @hw: pointer to hardware structure + * + * Returns error code + */ +static s32 ixgbe_identify_phy_x550em(struct ixgbe_hw *hw) +{ + switch (hw->device_id) { + case IXGBE_DEV_ID_X550EM_X_SFP: + /* set up for CS4227 usage */ + hw->phy.phy_semaphore_mask = IXGBE_GSSR_SHARED_I2C_SM; + ixgbe_setup_mux_ctl(hw); + ixgbe_check_cs4227(hw); + + return ixgbe_identify_module_generic(hw); + break; + case IXGBE_DEV_ID_X550EM_X_KX4: + hw->phy.type = ixgbe_phy_x550em_kx4; + break; + case IXGBE_DEV_ID_X550EM_X_KR: + hw->phy.type = ixgbe_phy_x550em_kr; + break; + case IXGBE_DEV_ID_X550EM_X_1G_T: + case IXGBE_DEV_ID_X550EM_X_10G_T: + return ixgbe_identify_phy_generic(hw); + default: + break; + } + return IXGBE_SUCCESS; +} + +static s32 ixgbe_read_phy_reg_x550em(struct ixgbe_hw *hw, u32 reg_addr, + u32 device_type, u16 *phy_data) +{ + UNREFERENCED_4PARAMETER(*hw, reg_addr, device_type, *phy_data); + return IXGBE_NOT_IMPLEMENTED; +} + +static s32 ixgbe_write_phy_reg_x550em(struct ixgbe_hw *hw, u32 reg_addr, + u32 device_type, u16 phy_data) +{ + UNREFERENCED_4PARAMETER(*hw, reg_addr, device_type, phy_data); + return IXGBE_NOT_IMPLEMENTED; +} + +/** +* ixgbe_init_ops_X550EM - Inits func ptrs and MAC type +* @hw: pointer to hardware structure +* +* Initialize the function pointers and for MAC type X550EM. +* Does not touch the hardware. +**/ +s32 ixgbe_init_ops_X550EM(struct ixgbe_hw *hw) +{ + struct ixgbe_mac_info *mac = &hw->mac; + struct ixgbe_eeprom_info *eeprom = &hw->eeprom; + struct ixgbe_phy_info *phy = &hw->phy; + s32 ret_val; + + DEBUGFUNC("ixgbe_init_ops_X550EM"); + + /* Similar to X550 so start there. */ + ret_val = ixgbe_init_ops_X550(hw); + + /* Since this function eventually calls + * ixgbe_init_ops_540 by design, we are setting + * the pointers to NULL explicitly here to overwrite + * the values being set in the x540 function. + */ + + /* FCOE not supported in x550EM */ + mac->ops.get_san_mac_addr = NULL; + mac->ops.set_san_mac_addr = NULL; + mac->ops.get_wwn_prefix = NULL; + mac->ops.get_fcoe_boot_status = NULL; + + /* IPsec not supported in x550EM */ + mac->ops.disable_sec_rx_path = NULL; + mac->ops.enable_sec_rx_path = NULL; + + /* AUTOC register is not present in x550EM. */ + mac->ops.prot_autoc_read = NULL; + mac->ops.prot_autoc_write = NULL; + + /* X550EM bus type is internal*/ + hw->bus.type = ixgbe_bus_type_internal; + mac->ops.get_bus_info = ixgbe_get_bus_info_X550em; + + mac->ops.read_iosf_sb_reg = ixgbe_read_iosf_sb_reg_x550; + mac->ops.write_iosf_sb_reg = ixgbe_write_iosf_sb_reg_x550; + mac->ops.get_media_type = ixgbe_get_media_type_X550em; + mac->ops.setup_sfp = ixgbe_setup_sfp_modules_X550em; + mac->ops.get_link_capabilities = ixgbe_get_link_capabilities_X550em; + mac->ops.reset_hw = ixgbe_reset_hw_X550em; + mac->ops.get_supported_physical_layer = + ixgbe_get_supported_physical_layer_X550em; + + if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) + mac->ops.setup_fc = ixgbe_setup_fc_generic; + else + mac->ops.setup_fc = ixgbe_setup_fc_X550em; + + mac->ops.acquire_swfw_sync = ixgbe_acquire_swfw_sync_X550em; + mac->ops.release_swfw_sync = ixgbe_release_swfw_sync_X550em; + + if (hw->device_id != IXGBE_DEV_ID_X550EM_X_KR) + mac->ops.setup_eee = NULL; + + /* PHY */ + phy->ops.init = ixgbe_init_phy_ops_X550em; + phy->ops.identify = ixgbe_identify_phy_x550em; + if (mac->ops.get_media_type(hw) != ixgbe_media_type_copper) + phy->ops.set_phy_power = NULL; + + + /* EEPROM */ + eeprom->ops.init_params = ixgbe_init_eeprom_params_X540; + eeprom->ops.read = ixgbe_read_ee_hostif_X550; + eeprom->ops.read_buffer = ixgbe_read_ee_hostif_buffer_X550; + eeprom->ops.write = ixgbe_write_ee_hostif_X550; + eeprom->ops.write_buffer = ixgbe_write_ee_hostif_buffer_X550; + eeprom->ops.update_checksum = ixgbe_update_eeprom_checksum_X550; + eeprom->ops.validate_checksum = ixgbe_validate_eeprom_checksum_X550; + eeprom->ops.calc_checksum = ixgbe_calc_eeprom_checksum_X550; + + return ret_val; +} + +/** + * ixgbe_dmac_config_X550 + * @hw: pointer to hardware structure + * + * Configure DMA coalescing. If enabling dmac, dmac is activated. + * When disabling dmac, dmac enable dmac bit is cleared. + **/ +s32 ixgbe_dmac_config_X550(struct ixgbe_hw *hw) +{ + u32 reg, high_pri_tc; + + DEBUGFUNC("ixgbe_dmac_config_X550"); + + /* Disable DMA coalescing before configuring */ + reg = IXGBE_READ_REG(hw, IXGBE_DMACR); + reg &= ~IXGBE_DMACR_DMAC_EN; + IXGBE_WRITE_REG(hw, IXGBE_DMACR, reg); + + /* Disable DMA Coalescing if the watchdog timer is 0 */ + if (!hw->mac.dmac_config.watchdog_timer) + goto out; + + ixgbe_dmac_config_tcs_X550(hw); + + /* Configure DMA Coalescing Control Register */ + reg = IXGBE_READ_REG(hw, IXGBE_DMACR); + + /* Set the watchdog timer in units of 40.96 usec */ + reg &= ~IXGBE_DMACR_DMACWT_MASK; + reg |= (hw->mac.dmac_config.watchdog_timer * 100) / 4096; + + reg &= ~IXGBE_DMACR_HIGH_PRI_TC_MASK; + /* If fcoe is enabled, set high priority traffic class */ + if (hw->mac.dmac_config.fcoe_en) { + high_pri_tc = 1 << hw->mac.dmac_config.fcoe_tc; + reg |= ((high_pri_tc << IXGBE_DMACR_HIGH_PRI_TC_SHIFT) & + IXGBE_DMACR_HIGH_PRI_TC_MASK); + } + reg |= IXGBE_DMACR_EN_MNG_IND; + + /* Enable DMA coalescing after configuration */ + reg |= IXGBE_DMACR_DMAC_EN; + IXGBE_WRITE_REG(hw, IXGBE_DMACR, reg); + +out: + return IXGBE_SUCCESS; +} + +/** + * ixgbe_dmac_config_tcs_X550 + * @hw: pointer to hardware structure + * + * Configure DMA coalescing threshold per TC. The dmac enable bit must + * be cleared before configuring. + **/ +s32 ixgbe_dmac_config_tcs_X550(struct ixgbe_hw *hw) +{ + u32 tc, reg, pb_headroom, rx_pb_size, maxframe_size_kb; + + DEBUGFUNC("ixgbe_dmac_config_tcs_X550"); + + /* Configure DMA coalescing enabled */ + switch (hw->mac.dmac_config.link_speed) { + case IXGBE_LINK_SPEED_100_FULL: + pb_headroom = IXGBE_DMACRXT_100M; + break; + case IXGBE_LINK_SPEED_1GB_FULL: + pb_headroom = IXGBE_DMACRXT_1G; + break; + default: + pb_headroom = IXGBE_DMACRXT_10G; + break; + } + + maxframe_size_kb = ((IXGBE_READ_REG(hw, IXGBE_MAXFRS) >> + IXGBE_MHADD_MFS_SHIFT) / 1024); + + /* Set the per Rx packet buffer receive threshold */ + for (tc = 0; tc < IXGBE_DCB_MAX_TRAFFIC_CLASS; tc++) { + reg = IXGBE_READ_REG(hw, IXGBE_DMCTH(tc)); + reg &= ~IXGBE_DMCTH_DMACRXT_MASK; + + if (tc < hw->mac.dmac_config.num_tcs) { + /* Get Rx PB size */ + rx_pb_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(tc)); + rx_pb_size = (rx_pb_size & IXGBE_RXPBSIZE_MASK) >> + IXGBE_RXPBSIZE_SHIFT; + + /* Calculate receive buffer threshold in kilobytes */ + if (rx_pb_size > pb_headroom) + rx_pb_size = rx_pb_size - pb_headroom; + else + rx_pb_size = 0; + + /* Minimum of MFS shall be set for DMCTH */ + reg |= (rx_pb_size > maxframe_size_kb) ? + rx_pb_size : maxframe_size_kb; + } + IXGBE_WRITE_REG(hw, IXGBE_DMCTH(tc), reg); + } + return IXGBE_SUCCESS; +} + +/** + * ixgbe_dmac_update_tcs_X550 + * @hw: pointer to hardware structure + * + * Disables dmac, updates per TC settings, and then enables dmac. + **/ +s32 ixgbe_dmac_update_tcs_X550(struct ixgbe_hw *hw) +{ + u32 reg; + + DEBUGFUNC("ixgbe_dmac_update_tcs_X550"); + + /* Disable DMA coalescing before configuring */ + reg = IXGBE_READ_REG(hw, IXGBE_DMACR); + reg &= ~IXGBE_DMACR_DMAC_EN; + IXGBE_WRITE_REG(hw, IXGBE_DMACR, reg); + + ixgbe_dmac_config_tcs_X550(hw); + + /* Enable DMA coalescing after configuration */ + reg = IXGBE_READ_REG(hw, IXGBE_DMACR); + reg |= IXGBE_DMACR_DMAC_EN; + IXGBE_WRITE_REG(hw, IXGBE_DMACR, reg); + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_init_eeprom_params_X550 - Initialize EEPROM params + * @hw: pointer to hardware structure + * + * Initializes the EEPROM parameters ixgbe_eeprom_info within the + * ixgbe_hw struct in order to set up EEPROM access. + **/ +s32 ixgbe_init_eeprom_params_X550(struct ixgbe_hw *hw) +{ + struct ixgbe_eeprom_info *eeprom = &hw->eeprom; + u32 eec; + u16 eeprom_size; + + DEBUGFUNC("ixgbe_init_eeprom_params_X550"); + + if (eeprom->type == ixgbe_eeprom_uninitialized) { + eeprom->semaphore_delay = 10; + eeprom->type = ixgbe_flash; + + eec = IXGBE_READ_REG(hw, IXGBE_EEC); + eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >> + IXGBE_EEC_SIZE_SHIFT); + eeprom->word_size = 1 << (eeprom_size + + IXGBE_EEPROM_WORD_SIZE_SHIFT); + + DEBUGOUT2("Eeprom params: type = %d, size = %d\n", + eeprom->type, eeprom->word_size); + } + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_setup_eee_X550 - Enable/disable EEE support + * @hw: pointer to the HW structure + * @enable_eee: boolean flag to enable EEE + * + * Enable/disable EEE based on enable_eee flag. + * Auto-negotiation must be started after BASE-T EEE bits in PHY register 7.3C + * are modified. + * + **/ +s32 ixgbe_setup_eee_X550(struct ixgbe_hw *hw, bool enable_eee) +{ + u32 eeer; + u16 autoneg_eee_reg; + u32 link_reg; + s32 status; + u32 fuse; + + DEBUGFUNC("ixgbe_setup_eee_X550"); + + eeer = IXGBE_READ_REG(hw, IXGBE_EEER); + /* Enable or disable EEE per flag */ + if (enable_eee) { + eeer |= (IXGBE_EEER_TX_LPI_EN | IXGBE_EEER_RX_LPI_EN); + + if (hw->device_id == IXGBE_DEV_ID_X550T) { + /* Advertise EEE capability */ + hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_EEE_ADVT, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &autoneg_eee_reg); + + autoneg_eee_reg |= (IXGBE_AUTO_NEG_10GBASE_EEE_ADVT | + IXGBE_AUTO_NEG_1000BASE_EEE_ADVT | + IXGBE_AUTO_NEG_100BASE_EEE_ADVT); + + hw->phy.ops.write_reg(hw, IXGBE_MDIO_AUTO_NEG_EEE_ADVT, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, autoneg_eee_reg); + } else if (hw->device_id == IXGBE_DEV_ID_X550EM_X_KR) { + /* Not supported on first revision. */ + fuse = IXGBE_READ_REG(hw, IXGBE_FUSES0_GROUP(0)); + if (!(fuse & IXGBE_FUSES0_REV1)) + return IXGBE_SUCCESS; + + status = ixgbe_read_iosf_sb_reg_x550(hw, + IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, &link_reg); + if (status != IXGBE_SUCCESS) + return status; + + link_reg |= IXGBE_KRM_LINK_CTRL_1_TETH_EEE_CAP_KR | + IXGBE_KRM_LINK_CTRL_1_TETH_EEE_CAP_KX; + + /* Don't advertise FEC capability when EEE enabled. */ + link_reg &= ~IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_FEC; + + status = ixgbe_write_iosf_sb_reg_x550(hw, + IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, link_reg); + if (status != IXGBE_SUCCESS) + return status; + } + } else { + eeer &= ~(IXGBE_EEER_TX_LPI_EN | IXGBE_EEER_RX_LPI_EN); + + if (hw->device_id == IXGBE_DEV_ID_X550T) { + /* Disable advertised EEE capability */ + hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_EEE_ADVT, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &autoneg_eee_reg); + + autoneg_eee_reg &= ~(IXGBE_AUTO_NEG_10GBASE_EEE_ADVT | + IXGBE_AUTO_NEG_1000BASE_EEE_ADVT | + IXGBE_AUTO_NEG_100BASE_EEE_ADVT); + + hw->phy.ops.write_reg(hw, IXGBE_MDIO_AUTO_NEG_EEE_ADVT, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, autoneg_eee_reg); + } else if (hw->device_id == IXGBE_DEV_ID_X550EM_X_KR) { + status = ixgbe_read_iosf_sb_reg_x550(hw, + IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, &link_reg); + if (status != IXGBE_SUCCESS) + return status; + + link_reg &= ~(IXGBE_KRM_LINK_CTRL_1_TETH_EEE_CAP_KR | + IXGBE_KRM_LINK_CTRL_1_TETH_EEE_CAP_KX); + + /* Advertise FEC capability when EEE is disabled. */ + link_reg |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_FEC; + + status = ixgbe_write_iosf_sb_reg_x550(hw, + IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, link_reg); + if (status != IXGBE_SUCCESS) + return status; + } + } + IXGBE_WRITE_REG(hw, IXGBE_EEER, eeer); + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_set_source_address_pruning_X550 - Enable/Disbale source address pruning + * @hw: pointer to hardware structure + * @enable: enable or disable source address pruning + * @pool: Rx pool to set source address pruning for + **/ +void ixgbe_set_source_address_pruning_X550(struct ixgbe_hw *hw, bool enable, + unsigned int pool) +{ + u64 pfflp; + + /* max rx pool is 63 */ + if (pool > 63) + return; + + pfflp = (u64)IXGBE_READ_REG(hw, IXGBE_PFFLPL); + pfflp |= (u64)IXGBE_READ_REG(hw, IXGBE_PFFLPH) << 32; + + if (enable) + pfflp |= (1ULL << pool); + else + pfflp &= ~(1ULL << pool); + + IXGBE_WRITE_REG(hw, IXGBE_PFFLPL, (u32)pfflp); + IXGBE_WRITE_REG(hw, IXGBE_PFFLPH, (u32)(pfflp >> 32)); +} + +/** + * ixgbe_set_ethertype_anti_spoofing_X550 - Enable/Disable Ethertype anti-spoofing + * @hw: pointer to hardware structure + * @enable: enable or disable switch for Ethertype anti-spoofing + * @vf: Virtual Function pool - VF Pool to set for Ethertype anti-spoofing + * + **/ +void ixgbe_set_ethertype_anti_spoofing_X550(struct ixgbe_hw *hw, + bool enable, int vf) +{ + int vf_target_reg = vf >> 3; + int vf_target_shift = vf % 8 + IXGBE_SPOOF_ETHERTYPEAS_SHIFT; + u32 pfvfspoof; + + DEBUGFUNC("ixgbe_set_ethertype_anti_spoofing_X550"); + + pfvfspoof = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg)); + if (enable) + pfvfspoof |= (1 << vf_target_shift); + else + pfvfspoof &= ~(1 << vf_target_shift); + + IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), pfvfspoof); +} + +/** + * ixgbe_iosf_wait - Wait for IOSF command completion + * @hw: pointer to hardware structure + * @ctrl: pointer to location to receive final IOSF control value + * + * Returns failing status on timeout + * + * Note: ctrl can be NULL if the IOSF control register value is not needed + **/ +static s32 ixgbe_iosf_wait(struct ixgbe_hw *hw, u32 *ctrl) +{ + u32 i, command; + + /* Check every 10 usec to see if the address cycle completed. + * The SB IOSF BUSY bit will clear when the operation is + * complete + */ + for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) { + command = IXGBE_READ_REG(hw, IXGBE_SB_IOSF_INDIRECT_CTRL); + if ((command & IXGBE_SB_IOSF_CTRL_BUSY) == 0) + break; + usec_delay(10); + } + if (ctrl) + *ctrl = command; + if (i == IXGBE_MDIO_COMMAND_TIMEOUT) { + ERROR_REPORT1(IXGBE_ERROR_POLLING, "Wait timed out\n"); + return IXGBE_ERR_PHY; + } + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_write_iosf_sb_reg_x550 - Writes a value to specified register of the IOSF + * device + * @hw: pointer to hardware structure + * @reg_addr: 32 bit PHY register to write + * @device_type: 3 bit device type + * @data: Data to write to the register + **/ +s32 ixgbe_write_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr, + u32 device_type, u32 data) +{ + u32 gssr = IXGBE_GSSR_PHY1_SM | IXGBE_GSSR_PHY0_SM; + u32 command, error; + s32 ret; + + ret = ixgbe_acquire_swfw_semaphore(hw, gssr); + if (ret != IXGBE_SUCCESS) + return ret; + + ret = ixgbe_iosf_wait(hw, NULL); + if (ret != IXGBE_SUCCESS) + goto out; + + command = ((reg_addr << IXGBE_SB_IOSF_CTRL_ADDR_SHIFT) | + (device_type << IXGBE_SB_IOSF_CTRL_TARGET_SELECT_SHIFT)); + + /* Write IOSF control register */ + IXGBE_WRITE_REG(hw, IXGBE_SB_IOSF_INDIRECT_CTRL, command); + + /* Write IOSF data register */ + IXGBE_WRITE_REG(hw, IXGBE_SB_IOSF_INDIRECT_DATA, data); + + ret = ixgbe_iosf_wait(hw, &command); + + if ((command & IXGBE_SB_IOSF_CTRL_RESP_STAT_MASK) != 0) { + error = (command & IXGBE_SB_IOSF_CTRL_CMPL_ERR_MASK) >> + IXGBE_SB_IOSF_CTRL_CMPL_ERR_SHIFT; + ERROR_REPORT2(IXGBE_ERROR_POLLING, + "Failed to write, error %x\n", error); + ret = IXGBE_ERR_PHY; + } + +out: + ixgbe_release_swfw_semaphore(hw, gssr); + return ret; +} + +/** + * ixgbe_read_iosf_sb_reg_x550 - Writes a value to specified register of the IOSF + * device + * @hw: pointer to hardware structure + * @reg_addr: 32 bit PHY register to write + * @device_type: 3 bit device type + * @phy_data: Pointer to read data from the register + **/ +s32 ixgbe_read_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr, + u32 device_type, u32 *data) +{ + u32 gssr = IXGBE_GSSR_PHY1_SM | IXGBE_GSSR_PHY0_SM; + u32 command, error; + s32 ret; + + ret = ixgbe_acquire_swfw_semaphore(hw, gssr); + if (ret != IXGBE_SUCCESS) + return ret; + + ret = ixgbe_iosf_wait(hw, NULL); + if (ret != IXGBE_SUCCESS) + goto out; + + command = ((reg_addr << IXGBE_SB_IOSF_CTRL_ADDR_SHIFT) | + (device_type << IXGBE_SB_IOSF_CTRL_TARGET_SELECT_SHIFT)); + + /* Write IOSF control register */ + IXGBE_WRITE_REG(hw, IXGBE_SB_IOSF_INDIRECT_CTRL, command); + + ret = ixgbe_iosf_wait(hw, &command); + + if ((command & IXGBE_SB_IOSF_CTRL_RESP_STAT_MASK) != 0) { + error = (command & IXGBE_SB_IOSF_CTRL_CMPL_ERR_MASK) >> + IXGBE_SB_IOSF_CTRL_CMPL_ERR_SHIFT; + ERROR_REPORT2(IXGBE_ERROR_POLLING, + "Failed to read, error %x\n", error); + ret = IXGBE_ERR_PHY; + } + + if (ret == IXGBE_SUCCESS) + *data = IXGBE_READ_REG(hw, IXGBE_SB_IOSF_INDIRECT_DATA); + +out: + ixgbe_release_swfw_semaphore(hw, gssr); + return ret; +} + +/** + * ixgbe_disable_mdd_X550 + * @hw: pointer to hardware structure + * + * Disable malicious driver detection + **/ +void ixgbe_disable_mdd_X550(struct ixgbe_hw *hw) +{ + u32 reg; + + DEBUGFUNC("ixgbe_disable_mdd_X550"); + + /* Disable MDD for TX DMA and interrupt */ + reg = IXGBE_READ_REG(hw, IXGBE_DMATXCTL); + reg &= ~(IXGBE_DMATXCTL_MDP_EN | IXGBE_DMATXCTL_MBINTEN); + IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg); + + /* Disable MDD for RX and interrupt */ + reg = IXGBE_READ_REG(hw, IXGBE_RDRXCTL); + reg &= ~(IXGBE_RDRXCTL_MDP_EN | IXGBE_RDRXCTL_MBINTEN); + IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, reg); +} + +/** + * ixgbe_enable_mdd_X550 + * @hw: pointer to hardware structure + * + * Enable malicious driver detection + **/ +void ixgbe_enable_mdd_X550(struct ixgbe_hw *hw) +{ + u32 reg; + + DEBUGFUNC("ixgbe_enable_mdd_X550"); + + /* Enable MDD for TX DMA and interrupt */ + reg = IXGBE_READ_REG(hw, IXGBE_DMATXCTL); + reg |= (IXGBE_DMATXCTL_MDP_EN | IXGBE_DMATXCTL_MBINTEN); + IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg); + + /* Enable MDD for RX and interrupt */ + reg = IXGBE_READ_REG(hw, IXGBE_RDRXCTL); + reg |= (IXGBE_RDRXCTL_MDP_EN | IXGBE_RDRXCTL_MBINTEN); + IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, reg); +} + +/** + * ixgbe_restore_mdd_vf_X550 + * @hw: pointer to hardware structure + * @vf: vf index + * + * Restore VF that was disabled during malicious driver detection event + **/ +void ixgbe_restore_mdd_vf_X550(struct ixgbe_hw *hw, u32 vf) +{ + u32 idx, reg, num_qs, start_q, bitmask; + + DEBUGFUNC("ixgbe_restore_mdd_vf_X550"); + + /* Map VF to queues */ + reg = IXGBE_READ_REG(hw, IXGBE_MRQC); + switch (reg & IXGBE_MRQC_MRQE_MASK) { + case IXGBE_MRQC_VMDQRT8TCEN: + num_qs = 8; /* 16 VFs / pools */ + bitmask = 0x000000FF; + break; + case IXGBE_MRQC_VMDQRSS32EN: + case IXGBE_MRQC_VMDQRT4TCEN: + num_qs = 4; /* 32 VFs / pools */ + bitmask = 0x0000000F; + break; + default: /* 64 VFs / pools */ + num_qs = 2; + bitmask = 0x00000003; + break; + } + start_q = vf * num_qs; + + /* Release vf's queues by clearing WQBR_TX and WQBR_RX (RW1C) */ + idx = start_q / 32; + reg = 0; + reg |= (bitmask << (start_q % 32)); + IXGBE_WRITE_REG(hw, IXGBE_WQBR_TX(idx), reg); + IXGBE_WRITE_REG(hw, IXGBE_WQBR_RX(idx), reg); +} + +/** + * ixgbe_mdd_event_X550 + * @hw: pointer to hardware structure + * @vf_bitmap: vf bitmap of malicious vfs + * + * Handle malicious driver detection event. + **/ +void ixgbe_mdd_event_X550(struct ixgbe_hw *hw, u32 *vf_bitmap) +{ + u32 wqbr; + u32 i, j, reg, q, shift, vf, idx; + + DEBUGFUNC("ixgbe_mdd_event_X550"); + + /* figure out pool size for mapping to vf's */ + reg = IXGBE_READ_REG(hw, IXGBE_MRQC); + switch (reg & IXGBE_MRQC_MRQE_MASK) { + case IXGBE_MRQC_VMDQRT8TCEN: + shift = 3; /* 16 VFs / pools */ + break; + case IXGBE_MRQC_VMDQRSS32EN: + case IXGBE_MRQC_VMDQRT4TCEN: + shift = 2; /* 32 VFs / pools */ + break; + default: + shift = 1; /* 64 VFs / pools */ + break; + } + + /* Read WQBR_TX and WQBR_RX and check for malicious queues */ + for (i = 0; i < 4; i++) { + wqbr = IXGBE_READ_REG(hw, IXGBE_WQBR_TX(i)); + wqbr |= IXGBE_READ_REG(hw, IXGBE_WQBR_RX(i)); + + if (!wqbr) + continue; + + /* Get malicious queue */ + for (j = 0; j < 32 && wqbr; j++) { + + if (!(wqbr & (1 << j))) + continue; + + /* Get queue from bitmask */ + q = j + (i * 32); + + /* Map queue to vf */ + vf = (q >> shift); + + /* Set vf bit in vf_bitmap */ + idx = vf / 32; + vf_bitmap[idx] |= (1 << (vf % 32)); + wqbr &= ~(1 << j); + } + } +} + +/** + * ixgbe_get_media_type_X550em - Get media type + * @hw: pointer to hardware structure + * + * Returns the media type (fiber, copper, backplane) + */ +enum ixgbe_media_type ixgbe_get_media_type_X550em(struct ixgbe_hw *hw) +{ + enum ixgbe_media_type media_type; + + DEBUGFUNC("ixgbe_get_media_type_X550em"); + + /* Detect if there is a copper PHY attached. */ + switch (hw->device_id) { + case IXGBE_DEV_ID_X550EM_X_KR: + case IXGBE_DEV_ID_X550EM_X_KX4: + media_type = ixgbe_media_type_backplane; + break; + case IXGBE_DEV_ID_X550EM_X_SFP: + media_type = ixgbe_media_type_fiber; + break; + case IXGBE_DEV_ID_X550EM_X_1G_T: + case IXGBE_DEV_ID_X550EM_X_10G_T: + media_type = ixgbe_media_type_copper; + break; + default: + media_type = ixgbe_media_type_unknown; + break; + } + return media_type; +} + +/** + * ixgbe_supported_sfp_modules_X550em - Check if SFP module type is supported + * @hw: pointer to hardware structure + * @linear: TRUE if SFP module is linear + */ +static s32 ixgbe_supported_sfp_modules_X550em(struct ixgbe_hw *hw, bool *linear) +{ + DEBUGFUNC("ixgbe_supported_sfp_modules_X550em"); + + switch (hw->phy.sfp_type) { + case ixgbe_sfp_type_not_present: + return IXGBE_ERR_SFP_NOT_PRESENT; + case ixgbe_sfp_type_da_cu_core0: + case ixgbe_sfp_type_da_cu_core1: + *linear = TRUE; + break; + case ixgbe_sfp_type_srlr_core0: + case ixgbe_sfp_type_srlr_core1: + case ixgbe_sfp_type_da_act_lmt_core0: + case ixgbe_sfp_type_da_act_lmt_core1: + case ixgbe_sfp_type_1g_sx_core0: + case ixgbe_sfp_type_1g_sx_core1: + case ixgbe_sfp_type_1g_lx_core0: + case ixgbe_sfp_type_1g_lx_core1: + *linear = FALSE; + break; + case ixgbe_sfp_type_unknown: + case ixgbe_sfp_type_1g_cu_core0: + case ixgbe_sfp_type_1g_cu_core1: + default: + return IXGBE_ERR_SFP_NOT_SUPPORTED; + } + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_identify_sfp_module_X550em - Identifies SFP modules + * @hw: pointer to hardware structure + * + * Searches for and identifies the SFP module and assigns appropriate PHY type. + **/ +s32 ixgbe_identify_sfp_module_X550em(struct ixgbe_hw *hw) +{ + s32 status; + bool linear; + + DEBUGFUNC("ixgbe_identify_sfp_module_X550em"); + + status = ixgbe_identify_module_generic(hw); + + if (status != IXGBE_SUCCESS) + return status; + + /* Check if SFP module is supported */ + status = ixgbe_supported_sfp_modules_X550em(hw, &linear); + + return status; +} + +/** + * ixgbe_setup_sfp_modules_X550em - Setup MAC link ops + * @hw: pointer to hardware structure + */ +s32 ixgbe_setup_sfp_modules_X550em(struct ixgbe_hw *hw) +{ + s32 status; + bool linear; + + DEBUGFUNC("ixgbe_setup_sfp_modules_X550em"); + + /* Check if SFP module is supported */ + status = ixgbe_supported_sfp_modules_X550em(hw, &linear); + + if (status != IXGBE_SUCCESS) + return status; + + ixgbe_init_mac_link_ops_X550em(hw); + hw->phy.ops.reset = NULL; + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_init_mac_link_ops_X550em - init mac link function pointers + * @hw: pointer to hardware structure + */ +void ixgbe_init_mac_link_ops_X550em(struct ixgbe_hw *hw) +{ + struct ixgbe_mac_info *mac = &hw->mac; + + DEBUGFUNC("ixgbe_init_mac_link_ops_X550em"); + + switch (hw->mac.ops.get_media_type(hw)) { + case ixgbe_media_type_fiber: + /* CS4227 does not support autoneg, so disable the laser control + * functions for SFP+ fiber + */ + mac->ops.disable_tx_laser = NULL; + mac->ops.enable_tx_laser = NULL; + mac->ops.flap_tx_laser = NULL; + mac->ops.setup_link = ixgbe_setup_mac_link_multispeed_fiber; + mac->ops.setup_mac_link = ixgbe_setup_mac_link_sfp_x550em; + mac->ops.set_rate_select_speed = + ixgbe_set_soft_rate_select_speed; + break; + case ixgbe_media_type_copper: + mac->ops.setup_link = ixgbe_setup_mac_link_t_X550em; + mac->ops.check_link = ixgbe_check_link_t_X550em; + break; + default: + break; + } +} + +/** + * ixgbe_get_link_capabilities_x550em - Determines link capabilities + * @hw: pointer to hardware structure + * @speed: pointer to link speed + * @autoneg: TRUE when autoneg or autotry is enabled + */ +s32 ixgbe_get_link_capabilities_X550em(struct ixgbe_hw *hw, + ixgbe_link_speed *speed, + bool *autoneg) +{ + DEBUGFUNC("ixgbe_get_link_capabilities_X550em"); + + /* SFP */ + if (hw->phy.media_type == ixgbe_media_type_fiber) { + + /* CS4227 SFP must not enable auto-negotiation */ + *autoneg = FALSE; + + /* Check if 1G SFP module. */ + if (hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 || + hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1 + || hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core0 || + hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core1) { + *speed = IXGBE_LINK_SPEED_1GB_FULL; + return IXGBE_SUCCESS; + } + + /* Link capabilities are based on SFP */ + if (hw->phy.multispeed_fiber) + *speed = IXGBE_LINK_SPEED_10GB_FULL | + IXGBE_LINK_SPEED_1GB_FULL; + else + *speed = IXGBE_LINK_SPEED_10GB_FULL; + } else { + *speed = IXGBE_LINK_SPEED_10GB_FULL | + IXGBE_LINK_SPEED_1GB_FULL; + *autoneg = TRUE; + } + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_get_lasi_ext_t_x550em - Determime external Base T PHY interrupt cause + * @hw: pointer to hardware structure + * @lsc: pointer to boolean flag which indicates whether external Base T + * PHY interrupt is lsc + * + * Determime if external Base T PHY interrupt cause is high temperature + * failure alarm or link status change. + * + * Return IXGBE_ERR_OVERTEMP if interrupt is high temperature + * failure alarm, else return PHY access status. + */ +static s32 ixgbe_get_lasi_ext_t_x550em(struct ixgbe_hw *hw, bool *lsc) +{ + u32 status; + u16 reg; + + *lsc = FALSE; + + /* Vendor alarm triggered */ + status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_CHIP_STD_INT_FLAG, + IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, + ®); + + if (status != IXGBE_SUCCESS || + !(reg & IXGBE_MDIO_GLOBAL_VEN_ALM_INT_EN)) + return status; + + /* Vendor Auto-Neg alarm triggered or Global alarm 1 triggered */ + status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_INT_CHIP_VEN_FLAG, + IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, + ®); + + if (status != IXGBE_SUCCESS || + !(reg & (IXGBE_MDIO_GLOBAL_AN_VEN_ALM_INT_EN | + IXGBE_MDIO_GLOBAL_ALARM_1_INT))) + return status; + + /* High temperature failure alarm triggered */ + status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_ALARM_1, + IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, + ®); + + if (status != IXGBE_SUCCESS) + return status; + + /* If high temperature failure, then return over temp error and exit */ + if (reg & IXGBE_MDIO_GLOBAL_ALM_1_HI_TMP_FAIL) { + /* power down the PHY in case the PHY FW didn't already */ + ixgbe_set_copper_phy_power(hw, FALSE); + return IXGBE_ERR_OVERTEMP; + } + + /* Vendor alarm 2 triggered */ + status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_CHIP_STD_INT_FLAG, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, ®); + + if (status != IXGBE_SUCCESS || + !(reg & IXGBE_MDIO_GLOBAL_STD_ALM2_INT)) + return status; + + /* link connect/disconnect event occurred */ + status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_VENDOR_TX_ALARM2, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, ®); + + if (status != IXGBE_SUCCESS) + return status; + + /* Indicate LSC */ + if (reg & IXGBE_MDIO_AUTO_NEG_VEN_LSC) + *lsc = TRUE; + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_enable_lasi_ext_t_x550em - Enable external Base T PHY interrupts + * @hw: pointer to hardware structure + * + * Enable link status change and temperature failure alarm for the external + * Base T PHY + * + * Returns PHY access status + */ +static s32 ixgbe_enable_lasi_ext_t_x550em(struct ixgbe_hw *hw) +{ + u32 status; + u16 reg; + bool lsc; + + /* Clear interrupt flags */ + status = ixgbe_get_lasi_ext_t_x550em(hw, &lsc); + + /* Enable link status change alarm */ + status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_PMA_TX_VEN_LASI_INT_MASK, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, ®); + + if (status != IXGBE_SUCCESS) + return status; + + reg |= IXGBE_MDIO_PMA_TX_VEN_LASI_INT_EN; + + status = hw->phy.ops.write_reg(hw, IXGBE_MDIO_PMA_TX_VEN_LASI_INT_MASK, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, reg); + + if (status != IXGBE_SUCCESS) + return status; + + /* Enables high temperature failure alarm */ + status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_INT_MASK, + IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, + ®); + + if (status != IXGBE_SUCCESS) + return status; + + reg |= IXGBE_MDIO_GLOBAL_INT_HI_TEMP_EN; + + status = hw->phy.ops.write_reg(hw, IXGBE_MDIO_GLOBAL_INT_MASK, + IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, + reg); + + if (status != IXGBE_SUCCESS) + return status; + + /* Enable vendor Auto-Neg alarm and Global Interrupt Mask 1 alarm */ + status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_INT_CHIP_VEN_MASK, + IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, + ®); + + if (status != IXGBE_SUCCESS) + return status; + + reg |= (IXGBE_MDIO_GLOBAL_AN_VEN_ALM_INT_EN | + IXGBE_MDIO_GLOBAL_ALARM_1_INT); + + status = hw->phy.ops.write_reg(hw, IXGBE_MDIO_GLOBAL_INT_CHIP_VEN_MASK, + IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, + reg); + + if (status != IXGBE_SUCCESS) + return status; + + /* Enable chip-wide vendor alarm */ + status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_INT_CHIP_STD_MASK, + IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, + ®); + + if (status != IXGBE_SUCCESS) + return status; + + reg |= IXGBE_MDIO_GLOBAL_VEN_ALM_INT_EN; + + status = hw->phy.ops.write_reg(hw, IXGBE_MDIO_GLOBAL_INT_CHIP_STD_MASK, + IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, + reg); + + return status; +} + +/** + * ixgbe_setup_kr_speed_x550em - Configure the KR PHY for link speed. + * @hw: pointer to hardware structure + * @speed: link speed + * + * Configures the integrated KR PHY. + **/ +static s32 ixgbe_setup_kr_speed_x550em(struct ixgbe_hw *hw, + ixgbe_link_speed speed) +{ + s32 status; + u32 reg_val; + + status = ixgbe_read_iosf_sb_reg_x550(hw, + IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val); + if (status) + return status; + + reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE; + reg_val &= ~(IXGBE_KRM_LINK_CTRL_1_TETH_AN_FEC_REQ | + IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_FEC); + reg_val &= ~(IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KR | + IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KX); + + /* Advertise 10G support. */ + if (speed & IXGBE_LINK_SPEED_10GB_FULL) + reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KR; + + /* Advertise 1G support. */ + if (speed & IXGBE_LINK_SPEED_1GB_FULL) + reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KX; + + /* Restart auto-negotiation. */ + reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_RESTART; + status = ixgbe_write_iosf_sb_reg_x550(hw, + IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val); + + return status; +} + +/** + * ixgbe_init_phy_ops_X550em - PHY/SFP specific init + * @hw: pointer to hardware structure + * + * Initialize any function pointers that were not able to be + * set during init_shared_code because the PHY/SFP type was + * not known. Perform the SFP init if necessary. + */ +s32 ixgbe_init_phy_ops_X550em(struct ixgbe_hw *hw) +{ + struct ixgbe_phy_info *phy = &hw->phy; + ixgbe_link_speed speed; + s32 ret_val; + + DEBUGFUNC("ixgbe_init_phy_ops_X550em"); + + hw->mac.ops.set_lan_id(hw); + + if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber) { + phy->phy_semaphore_mask = IXGBE_GSSR_SHARED_I2C_SM; + ixgbe_setup_mux_ctl(hw); + + /* Save NW management interface connected on board. This is used + * to determine internal PHY mode. + */ + phy->nw_mng_if_sel = IXGBE_READ_REG(hw, IXGBE_NW_MNG_IF_SEL); + + /* If internal PHY mode is KR, then initialize KR link */ + if (phy->nw_mng_if_sel & IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE) { + speed = IXGBE_LINK_SPEED_10GB_FULL | + IXGBE_LINK_SPEED_1GB_FULL; + ret_val = ixgbe_setup_kr_speed_x550em(hw, speed); + } + + phy->ops.identify_sfp = ixgbe_identify_sfp_module_X550em; + } + + /* Identify the PHY or SFP module */ + ret_val = phy->ops.identify(hw); + if (ret_val == IXGBE_ERR_SFP_NOT_SUPPORTED) + return ret_val; + + /* Setup function pointers based on detected hardware */ + ixgbe_init_mac_link_ops_X550em(hw); + if (phy->sfp_type != ixgbe_sfp_type_unknown) + phy->ops.reset = NULL; + + /* Set functions pointers based on phy type */ + switch (hw->phy.type) { + case ixgbe_phy_x550em_kx4: + phy->ops.setup_link = ixgbe_setup_kx4_x550em; + phy->ops.read_reg = ixgbe_read_phy_reg_x550em; + phy->ops.write_reg = ixgbe_write_phy_reg_x550em; + break; + case ixgbe_phy_x550em_kr: + phy->ops.setup_link = ixgbe_setup_kr_x550em; + phy->ops.read_reg = ixgbe_read_phy_reg_x550em; + phy->ops.write_reg = ixgbe_write_phy_reg_x550em; + break; + case ixgbe_phy_x550em_ext_t: + /* Save NW management interface connected on board. This is used + * to determine internal PHY mode + */ + phy->nw_mng_if_sel = IXGBE_READ_REG(hw, IXGBE_NW_MNG_IF_SEL); + + /* If internal link mode is XFI, then setup iXFI internal link, + * else setup KR now. + */ + if (!(phy->nw_mng_if_sel & IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE)) { + phy->ops.setup_internal_link = + ixgbe_setup_internal_phy_t_x550em; + } else { + speed = IXGBE_LINK_SPEED_10GB_FULL | + IXGBE_LINK_SPEED_1GB_FULL; + ret_val = ixgbe_setup_kr_speed_x550em(hw, speed); + } + + phy->ops.enter_lplu = ixgbe_enter_lplu_t_x550em; + phy->ops.handle_lasi = ixgbe_handle_lasi_ext_t_x550em; + phy->ops.reset = ixgbe_reset_phy_t_X550em; + break; + default: + break; + } + return ret_val; +} + +/** + * ixgbe_reset_hw_X550em - Perform hardware reset + * @hw: pointer to hardware structure + * + * Resets the hardware by resetting the transmit and receive units, masks + * and clears all interrupts, perform a PHY reset, and perform a link (MAC) + * reset. + */ +s32 ixgbe_reset_hw_X550em(struct ixgbe_hw *hw) +{ + ixgbe_link_speed link_speed; + s32 status; + u32 ctrl = 0; + u32 i; + u32 hlreg0; + bool link_up = FALSE; + + DEBUGFUNC("ixgbe_reset_hw_X550em"); + + /* Call adapter stop to disable Tx/Rx and clear interrupts */ + status = hw->mac.ops.stop_adapter(hw); + if (status != IXGBE_SUCCESS) + return status; + + /* flush pending Tx transactions */ + ixgbe_clear_tx_pending(hw); + + /* PHY ops must be identified and initialized prior to reset */ + + /* Identify PHY and related function pointers */ + status = hw->phy.ops.init(hw); + + if (status == IXGBE_ERR_SFP_NOT_SUPPORTED) + return status; + + /* start the external PHY */ + if (hw->phy.type == ixgbe_phy_x550em_ext_t) { + status = ixgbe_init_ext_t_x550em(hw); + if (status) + return status; + } + + /* Setup SFP module if there is one present. */ + if (hw->phy.sfp_setup_needed) { + status = hw->mac.ops.setup_sfp(hw); + hw->phy.sfp_setup_needed = FALSE; + } + + if (status == IXGBE_ERR_SFP_NOT_SUPPORTED) + return status; + + /* Reset PHY */ + if (!hw->phy.reset_disable && hw->phy.ops.reset) + hw->phy.ops.reset(hw); + +mac_reset_top: + /* Issue global reset to the MAC. Needs to be SW reset if link is up. + * If link reset is used when link is up, it might reset the PHY when + * mng is using it. If link is down or the flag to force full link + * reset is set, then perform link reset. + */ + ctrl = IXGBE_CTRL_LNK_RST; + if (!hw->force_full_reset) { + hw->mac.ops.check_link(hw, &link_speed, &link_up, FALSE); + if (link_up) + ctrl = IXGBE_CTRL_RST; + } + + ctrl |= IXGBE_READ_REG(hw, IXGBE_CTRL); + IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl); + IXGBE_WRITE_FLUSH(hw); + + /* Poll for reset bit to self-clear meaning reset is complete */ + for (i = 0; i < 10; i++) { + usec_delay(1); + ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL); + if (!(ctrl & IXGBE_CTRL_RST_MASK)) + break; + } + + if (ctrl & IXGBE_CTRL_RST_MASK) { + status = IXGBE_ERR_RESET_FAILED; + DEBUGOUT("Reset polling failed to complete.\n"); + } + + msec_delay(50); + + /* Double resets are required for recovery from certain error + * conditions. Between resets, it is necessary to stall to + * allow time for any pending HW events to complete. + */ + if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) { + hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED; + goto mac_reset_top; + } + + /* Store the permanent mac address */ + hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr); + + /* Store MAC address from RAR0, clear receive address registers, and + * clear the multicast table. Also reset num_rar_entries to 128, + * since we modify this value when programming the SAN MAC address. + */ + hw->mac.num_rar_entries = 128; + hw->mac.ops.init_rx_addrs(hw); + + if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) { + /* Config MDIO clock speed. */ + hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0); + hlreg0 &= ~IXGBE_HLREG0_MDCSPD; + IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0); + } + + if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP) + ixgbe_setup_mux_ctl(hw); + + return status; +} + +/** + * ixgbe_init_ext_t_x550em - Start (unstall) the external Base T PHY. + * @hw: pointer to hardware structure + */ +s32 ixgbe_init_ext_t_x550em(struct ixgbe_hw *hw) +{ + u32 status; + u16 reg; + + status = hw->phy.ops.read_reg(hw, + IXGBE_MDIO_TX_VENDOR_ALARMS_3, + IXGBE_MDIO_PMA_PMD_DEV_TYPE, + ®); + + if (status != IXGBE_SUCCESS) + return status; + + /* If PHY FW reset completed bit is set then this is the first + * SW instance after a power on so the PHY FW must be un-stalled. + */ + if (reg & IXGBE_MDIO_TX_VENDOR_ALARMS_3_RST_MASK) { + status = hw->phy.ops.read_reg(hw, + IXGBE_MDIO_GLOBAL_RES_PR_10, + IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, + ®); + + if (status != IXGBE_SUCCESS) + return status; + + reg &= ~IXGBE_MDIO_POWER_UP_STALL; + + status = hw->phy.ops.write_reg(hw, + IXGBE_MDIO_GLOBAL_RES_PR_10, + IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, + reg); + + if (status != IXGBE_SUCCESS) + return status; + } + + return status; +} + +/** + * ixgbe_setup_kr_x550em - Configure the KR PHY. + * @hw: pointer to hardware structure + * + * Configures the integrated KR PHY. + **/ +s32 ixgbe_setup_kr_x550em(struct ixgbe_hw *hw) +{ + return ixgbe_setup_kr_speed_x550em(hw, hw->phy.autoneg_advertised); +} + +/** + * ixgbe_setup_kx4_x550em - Configure the KX4 PHY. + * @hw: pointer to hardware structure + * + * Configures the integrated KX4 PHY. + **/ +s32 ixgbe_setup_kx4_x550em(struct ixgbe_hw *hw) +{ + s32 status; + u32 reg_val; + + status = ixgbe_read_iosf_sb_reg_x550(hw, IXGBE_KX4_LINK_CNTL_1, + IXGBE_SB_IOSF_TARGET_KX4_PCS, ®_val); + if (status) + return status; + + reg_val &= ~(IXGBE_KX4_LINK_CNTL_1_TETH_AN_CAP_KX4 | + IXGBE_KX4_LINK_CNTL_1_TETH_AN_CAP_KX); + + reg_val |= IXGBE_KX4_LINK_CNTL_1_TETH_AN_ENABLE; + + /* Advertise 10G support. */ + if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL) + reg_val |= IXGBE_KX4_LINK_CNTL_1_TETH_AN_CAP_KX4; + + /* Advertise 1G support. */ + if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL) + reg_val |= IXGBE_KX4_LINK_CNTL_1_TETH_AN_CAP_KX; + + /* Restart auto-negotiation. */ + reg_val |= IXGBE_KX4_LINK_CNTL_1_TETH_AN_RESTART; + status = ixgbe_write_iosf_sb_reg_x550(hw, IXGBE_KX4_LINK_CNTL_1, + IXGBE_SB_IOSF_TARGET_KX4_PCS, reg_val); + + return status; +} + +/** + * ixgbe_setup_mac_link_sfp_x550em - Setup internal/external the PHY for SFP + * @hw: pointer to hardware structure + * + * Configure the external PHY and the integrated KR PHY for SFP support. + **/ +s32 ixgbe_setup_mac_link_sfp_x550em(struct ixgbe_hw *hw, + ixgbe_link_speed speed, + bool autoneg_wait_to_complete) +{ + s32 ret_val; + u16 reg_slice, reg_val; + bool setup_linear = FALSE; + UNREFERENCED_1PARAMETER(autoneg_wait_to_complete); + + /* Check if SFP module is supported and linear */ + ret_val = ixgbe_supported_sfp_modules_X550em(hw, &setup_linear); + + /* If no SFP module present, then return success. Return success since + * there is no reason to configure CS4227 and SFP not present error is + * not excepted in the setup MAC link flow. + */ + if (ret_val == IXGBE_ERR_SFP_NOT_PRESENT) + return IXGBE_SUCCESS; + + if (ret_val != IXGBE_SUCCESS) + return ret_val; + + /* Configure CS4227 for LINE connection rate then type. */ + reg_slice = IXGBE_CS4227_LINE_SPARE22_MSB + (hw->bus.lan_id << 12); + reg_val = (speed & IXGBE_LINK_SPEED_10GB_FULL) ? 0 : 0x8000; + ret_val = ixgbe_write_i2c_combined(hw, IXGBE_CS4227, reg_slice, + reg_val); + + reg_slice = IXGBE_CS4227_LINE_SPARE24_LSB + (hw->bus.lan_id << 12); + if (setup_linear) + reg_val = (IXGBE_CS4227_EDC_MODE_CX1 << 1) | 0x1; + else + reg_val = (IXGBE_CS4227_EDC_MODE_SR << 1) | 0x1; + ret_val = ixgbe_write_i2c_combined(hw, IXGBE_CS4227, reg_slice, + reg_val); + + /* Configure CS4227 for HOST connection rate then type. */ + reg_slice = IXGBE_CS4227_HOST_SPARE22_MSB + (hw->bus.lan_id << 12); + reg_val = (speed & IXGBE_LINK_SPEED_10GB_FULL) ? 0 : 0x8000; + ret_val = ixgbe_write_i2c_combined(hw, IXGBE_CS4227, reg_slice, + reg_val); + + reg_slice = IXGBE_CS4227_HOST_SPARE24_LSB + (hw->bus.lan_id << 12); + if (setup_linear) + reg_val = (IXGBE_CS4227_EDC_MODE_CX1 << 1) | 0x1; + else + reg_val = (IXGBE_CS4227_EDC_MODE_SR << 1) | 0x1; + ret_val = ixgbe_write_i2c_combined(hw, IXGBE_CS4227, reg_slice, + reg_val); + + /* If internal link mode is XFI, then setup XFI internal link. */ + if (!(hw->phy.nw_mng_if_sel & IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE)) + ret_val = ixgbe_setup_ixfi_x550em(hw, &speed); + + return ret_val; +} + +/** + * ixgbe_setup_ixfi_x550em - Configure the KR PHY for iXFI mode. + * @hw: pointer to hardware structure + * @speed: the link speed to force + * + * Configures the integrated KR PHY to use iXFI mode. Used to connect an + * internal and external PHY at a specific speed, without autonegotiation. + **/ +static s32 ixgbe_setup_ixfi_x550em(struct ixgbe_hw *hw, ixgbe_link_speed *speed) +{ + s32 status; + u32 reg_val; + + /* Disable AN and force speed to 10G Serial. */ + status = ixgbe_read_iosf_sb_reg_x550(hw, + IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val); + if (status != IXGBE_SUCCESS) + return status; + + reg_val &= ~IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE; + reg_val &= ~IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK; + + /* Select forced link speed for internal PHY. */ + switch (*speed) { + case IXGBE_LINK_SPEED_10GB_FULL: + reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_10G; + break; + case IXGBE_LINK_SPEED_1GB_FULL: + reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_1G; + break; + default: + /* Other link speeds are not supported by internal KR PHY. */ + return IXGBE_ERR_LINK_SETUP; + } + + status = ixgbe_write_iosf_sb_reg_x550(hw, + IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val); + if (status != IXGBE_SUCCESS) + return status; + + /* Disable training protocol FSM. */ + status = ixgbe_read_iosf_sb_reg_x550(hw, + IXGBE_KRM_RX_TRN_LINKUP_CTRL(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val); + if (status != IXGBE_SUCCESS) + return status; + reg_val |= IXGBE_KRM_RX_TRN_LINKUP_CTRL_CONV_WO_PROTOCOL; + status = ixgbe_write_iosf_sb_reg_x550(hw, + IXGBE_KRM_RX_TRN_LINKUP_CTRL(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val); + if (status != IXGBE_SUCCESS) + return status; + + /* Disable Flex from training TXFFE. */ + status = ixgbe_read_iosf_sb_reg_x550(hw, + IXGBE_KRM_DSP_TXFFE_STATE_4(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val); + if (status != IXGBE_SUCCESS) + return status; + reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_C0_EN; + reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_CP1_CN1_EN; + reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_CO_ADAPT_EN; + status = ixgbe_write_iosf_sb_reg_x550(hw, + IXGBE_KRM_DSP_TXFFE_STATE_4(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val); + if (status != IXGBE_SUCCESS) + return status; + status = ixgbe_read_iosf_sb_reg_x550(hw, + IXGBE_KRM_DSP_TXFFE_STATE_5(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val); + if (status != IXGBE_SUCCESS) + return status; + reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_C0_EN; + reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_CP1_CN1_EN; + reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_CO_ADAPT_EN; + status = ixgbe_write_iosf_sb_reg_x550(hw, + IXGBE_KRM_DSP_TXFFE_STATE_5(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val); + if (status != IXGBE_SUCCESS) + return status; + + /* Enable override for coefficients. */ + status = ixgbe_read_iosf_sb_reg_x550(hw, + IXGBE_KRM_TX_COEFF_CTRL_1(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val); + if (status != IXGBE_SUCCESS) + return status; + reg_val |= IXGBE_KRM_TX_COEFF_CTRL_1_OVRRD_EN; + reg_val |= IXGBE_KRM_TX_COEFF_CTRL_1_CZERO_EN; + reg_val |= IXGBE_KRM_TX_COEFF_CTRL_1_CPLUS1_OVRRD_EN; + reg_val |= IXGBE_KRM_TX_COEFF_CTRL_1_CMINUS1_OVRRD_EN; + status = ixgbe_write_iosf_sb_reg_x550(hw, + IXGBE_KRM_TX_COEFF_CTRL_1(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val); + if (status != IXGBE_SUCCESS) + return status; + + /* Toggle port SW reset by AN reset. */ + status = ixgbe_read_iosf_sb_reg_x550(hw, + IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val); + if (status != IXGBE_SUCCESS) + return status; + reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_RESTART; + status = ixgbe_write_iosf_sb_reg_x550(hw, + IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val); + + return status; +} + +/** + * ixgbe_ext_phy_t_x550em_get_link - Get ext phy link status + * @hw: address of hardware structure + * @link_up: address of boolean to indicate link status + * + * Returns error code if unable to get link status. + */ +static s32 ixgbe_ext_phy_t_x550em_get_link(struct ixgbe_hw *hw, bool *link_up) +{ + u32 ret; + u16 autoneg_status; + + *link_up = FALSE; + + /* read this twice back to back to indicate current status */ + ret = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, + &autoneg_status); + if (ret != IXGBE_SUCCESS) + return ret; + + ret = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, + &autoneg_status); + if (ret != IXGBE_SUCCESS) + return ret; + + *link_up = !!(autoneg_status & IXGBE_MDIO_AUTO_NEG_LINK_STATUS); + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_setup_internal_phy_t_x550em - Configure KR PHY to X557 link + * @hw: point to hardware structure + * + * Configures the link between the integrated KR PHY and the external X557 PHY + * The driver will call this function when it gets a link status change + * interrupt from the X557 PHY. This function configures the link speed + * between the PHYs to match the link speed of the BASE-T link. + * + * A return of a non-zero value indicates an error, and the base driver should + * not report link up. + */ +s32 ixgbe_setup_internal_phy_t_x550em(struct ixgbe_hw *hw) +{ + ixgbe_link_speed force_speed; + bool link_up; + u32 status; + u16 speed; + + if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_copper) + return IXGBE_ERR_CONFIG; + + /* If link is not up, then there is no setup necessary so return */ + status = ixgbe_ext_phy_t_x550em_get_link(hw, &link_up); + if (status != IXGBE_SUCCESS) + return status; + + if (!link_up) + return IXGBE_SUCCESS; + + status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_VENDOR_STAT, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, + &speed); + if (status != IXGBE_SUCCESS) + return status; + + /* If link is not still up, then no setup is necessary so return */ + status = ixgbe_ext_phy_t_x550em_get_link(hw, &link_up); + if (status != IXGBE_SUCCESS) + return status; + if (!link_up) + return IXGBE_SUCCESS; + + /* clear everything but the speed and duplex bits */ + speed &= IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_MASK; + + switch (speed) { + case IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10GB_FULL: + force_speed = IXGBE_LINK_SPEED_10GB_FULL; + break; + case IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_1GB_FULL: + force_speed = IXGBE_LINK_SPEED_1GB_FULL; + break; + default: + /* Internal PHY does not support anything else */ + return IXGBE_ERR_INVALID_LINK_SETTINGS; + } + + return ixgbe_setup_ixfi_x550em(hw, &force_speed); +} + +/** + * ixgbe_setup_phy_loopback_x550em - Configure the KR PHY for loopback. + * @hw: pointer to hardware structure + * + * Configures the integrated KR PHY to use internal loopback mode. + **/ +s32 ixgbe_setup_phy_loopback_x550em(struct ixgbe_hw *hw) +{ + s32 status; + u32 reg_val; + + /* Disable AN and force speed to 10G Serial. */ + status = ixgbe_read_iosf_sb_reg_x550(hw, + IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val); + if (status != IXGBE_SUCCESS) + return status; + reg_val &= ~IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE; + reg_val &= ~IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK; + reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_10G; + status = ixgbe_write_iosf_sb_reg_x550(hw, + IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val); + if (status != IXGBE_SUCCESS) + return status; + + /* Set near-end loopback clocks. */ + status = ixgbe_read_iosf_sb_reg_x550(hw, + IXGBE_KRM_PORT_CAR_GEN_CTRL(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val); + if (status != IXGBE_SUCCESS) + return status; + reg_val |= IXGBE_KRM_PORT_CAR_GEN_CTRL_NELB_32B; + reg_val |= IXGBE_KRM_PORT_CAR_GEN_CTRL_NELB_KRPCS; + status = ixgbe_write_iosf_sb_reg_x550(hw, + IXGBE_KRM_PORT_CAR_GEN_CTRL(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val); + if (status != IXGBE_SUCCESS) + return status; + + /* Set loopback enable. */ + status = ixgbe_read_iosf_sb_reg_x550(hw, + IXGBE_KRM_PMD_DFX_BURNIN(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val); + if (status != IXGBE_SUCCESS) + return status; + reg_val |= IXGBE_KRM_PMD_DFX_BURNIN_TX_RX_KR_LB_MASK; + status = ixgbe_write_iosf_sb_reg_x550(hw, + IXGBE_KRM_PMD_DFX_BURNIN(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val); + if (status != IXGBE_SUCCESS) + return status; + + /* Training bypass. */ + status = ixgbe_read_iosf_sb_reg_x550(hw, + IXGBE_KRM_RX_TRN_LINKUP_CTRL(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val); + if (status != IXGBE_SUCCESS) + return status; + reg_val |= IXGBE_KRM_RX_TRN_LINKUP_CTRL_PROTOCOL_BYPASS; + status = ixgbe_write_iosf_sb_reg_x550(hw, + IXGBE_KRM_RX_TRN_LINKUP_CTRL(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val); + + return status; +} + +/** + * ixgbe_read_ee_hostif_X550 - Read EEPROM word using a host interface command + * assuming that the semaphore is already obtained. + * @hw: pointer to hardware structure + * @offset: offset of word in the EEPROM to read + * @data: word read from the EEPROM + * + * Reads a 16 bit word from the EEPROM using the hostif. + **/ +s32 ixgbe_read_ee_hostif_data_X550(struct ixgbe_hw *hw, u16 offset, + u16 *data) +{ + s32 status; + struct ixgbe_hic_read_shadow_ram buffer; + + DEBUGFUNC("ixgbe_read_ee_hostif_data_X550"); + buffer.hdr.req.cmd = FW_READ_SHADOW_RAM_CMD; + buffer.hdr.req.buf_lenh = 0; + buffer.hdr.req.buf_lenl = FW_READ_SHADOW_RAM_LEN; + buffer.hdr.req.checksum = FW_DEFAULT_CHECKSUM; + + /* convert offset from words to bytes */ + buffer.address = IXGBE_CPU_TO_BE32(offset * 2); + /* one word */ + buffer.length = IXGBE_CPU_TO_BE16(sizeof(u16)); + + status = ixgbe_host_interface_command(hw, (u32 *)&buffer, + sizeof(buffer), + IXGBE_HI_COMMAND_TIMEOUT, FALSE); + + if (status) + return status; + + *data = (u16)IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, + FW_NVM_DATA_OFFSET); + + return 0; +} + +/** + * ixgbe_read_ee_hostif_X550 - Read EEPROM word using a host interface command + * @hw: pointer to hardware structure + * @offset: offset of word in the EEPROM to read + * @data: word read from the EEPROM + * + * Reads a 16 bit word from the EEPROM using the hostif. + **/ +s32 ixgbe_read_ee_hostif_X550(struct ixgbe_hw *hw, u16 offset, + u16 *data) +{ + s32 status = IXGBE_SUCCESS; + + DEBUGFUNC("ixgbe_read_ee_hostif_X550"); + + if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) == + IXGBE_SUCCESS) { + status = ixgbe_read_ee_hostif_data_X550(hw, offset, data); + hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); + } else { + status = IXGBE_ERR_SWFW_SYNC; + } + + return status; +} + +/** + * ixgbe_read_ee_hostif_buffer_X550- Read EEPROM word(s) using hostif + * @hw: pointer to hardware structure + * @offset: offset of word in the EEPROM to read + * @words: number of words + * @data: word(s) read from the EEPROM + * + * Reads a 16 bit word(s) from the EEPROM using the hostif. + **/ +s32 ixgbe_read_ee_hostif_buffer_X550(struct ixgbe_hw *hw, + u16 offset, u16 words, u16 *data) +{ + struct ixgbe_hic_read_shadow_ram buffer; + u32 current_word = 0; + u16 words_to_read; + s32 status; + u32 i; + + DEBUGFUNC("ixgbe_read_ee_hostif_buffer_X550"); + + /* Take semaphore for the entire operation. */ + status = hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM); + if (status) { + DEBUGOUT("EEPROM read buffer - semaphore failed\n"); + return status; + } + while (words) { + if (words > FW_MAX_READ_BUFFER_SIZE / 2) + words_to_read = FW_MAX_READ_BUFFER_SIZE / 2; + else + words_to_read = words; + + buffer.hdr.req.cmd = FW_READ_SHADOW_RAM_CMD; + buffer.hdr.req.buf_lenh = 0; + buffer.hdr.req.buf_lenl = FW_READ_SHADOW_RAM_LEN; + buffer.hdr.req.checksum = FW_DEFAULT_CHECKSUM; + + /* convert offset from words to bytes */ + buffer.address = IXGBE_CPU_TO_BE32((offset + current_word) * 2); + buffer.length = IXGBE_CPU_TO_BE16(words_to_read * 2); + + status = ixgbe_host_interface_command(hw, (u32 *)&buffer, + sizeof(buffer), + IXGBE_HI_COMMAND_TIMEOUT, + FALSE); + + if (status) { + DEBUGOUT("Host interface command failed\n"); + goto out; + } + + for (i = 0; i < words_to_read; i++) { + u32 reg = IXGBE_FLEX_MNG + (FW_NVM_DATA_OFFSET << 2) + + 2 * i; + u32 value = IXGBE_READ_REG(hw, reg); + + data[current_word] = (u16)(value & 0xffff); + current_word++; + i++; + if (i < words_to_read) { + value >>= 16; + data[current_word] = (u16)(value & 0xffff); + current_word++; + } + } + words -= words_to_read; + } + +out: + hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); + return status; +} + +/** + * ixgbe_write_ee_hostif_X550 - Write EEPROM word using hostif + * @hw: pointer to hardware structure + * @offset: offset of word in the EEPROM to write + * @data: word write to the EEPROM + * + * Write a 16 bit word to the EEPROM using the hostif. + **/ +s32 ixgbe_write_ee_hostif_data_X550(struct ixgbe_hw *hw, u16 offset, + u16 data) +{ + s32 status; + struct ixgbe_hic_write_shadow_ram buffer; + + DEBUGFUNC("ixgbe_write_ee_hostif_data_X550"); + + buffer.hdr.req.cmd = FW_WRITE_SHADOW_RAM_CMD; + buffer.hdr.req.buf_lenh = 0; + buffer.hdr.req.buf_lenl = FW_WRITE_SHADOW_RAM_LEN; + buffer.hdr.req.checksum = FW_DEFAULT_CHECKSUM; + + /* one word */ + buffer.length = IXGBE_CPU_TO_BE16(sizeof(u16)); + buffer.data = data; + buffer.address = IXGBE_CPU_TO_BE32(offset * 2); + + status = ixgbe_host_interface_command(hw, (u32 *)&buffer, + sizeof(buffer), + IXGBE_HI_COMMAND_TIMEOUT, FALSE); + + return status; +} + +/** + * ixgbe_write_ee_hostif_X550 - Write EEPROM word using hostif + * @hw: pointer to hardware structure + * @offset: offset of word in the EEPROM to write + * @data: word write to the EEPROM + * + * Write a 16 bit word to the EEPROM using the hostif. + **/ +s32 ixgbe_write_ee_hostif_X550(struct ixgbe_hw *hw, u16 offset, + u16 data) +{ + s32 status = IXGBE_SUCCESS; + + DEBUGFUNC("ixgbe_write_ee_hostif_X550"); + + if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) == + IXGBE_SUCCESS) { + status = ixgbe_write_ee_hostif_data_X550(hw, offset, data); + hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); + } else { + DEBUGOUT("write ee hostif failed to get semaphore"); + status = IXGBE_ERR_SWFW_SYNC; + } + + return status; +} + +/** + * ixgbe_write_ee_hostif_buffer_X550 - Write EEPROM word(s) using hostif + * @hw: pointer to hardware structure + * @offset: offset of word in the EEPROM to write + * @words: number of words + * @data: word(s) write to the EEPROM + * + * Write a 16 bit word(s) to the EEPROM using the hostif. + **/ +s32 ixgbe_write_ee_hostif_buffer_X550(struct ixgbe_hw *hw, + u16 offset, u16 words, u16 *data) +{ + s32 status = IXGBE_SUCCESS; + u32 i = 0; + + DEBUGFUNC("ixgbe_write_ee_hostif_buffer_X550"); + + /* Take semaphore for the entire operation. */ + status = hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM); + if (status != IXGBE_SUCCESS) { + DEBUGOUT("EEPROM write buffer - semaphore failed\n"); + goto out; + } + + for (i = 0; i < words; i++) { + status = ixgbe_write_ee_hostif_data_X550(hw, offset + i, + data[i]); + + if (status != IXGBE_SUCCESS) { + DEBUGOUT("Eeprom buffered write failed\n"); + break; + } + } + + hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); +out: + + return status; +} + +/** + * ixgbe_checksum_ptr_x550 - Checksum one pointer region + * @hw: pointer to hardware structure + * @ptr: pointer offset in eeprom + * @size: size of section pointed by ptr, if 0 first word will be used as size + * @csum: address of checksum to update + * + * Returns error status for any failure + */ +static s32 ixgbe_checksum_ptr_x550(struct ixgbe_hw *hw, u16 ptr, + u16 size, u16 *csum, u16 *buffer, + u32 buffer_size) +{ + u16 buf[256]; + s32 status; + u16 length, bufsz, i, start; + u16 *local_buffer; + + bufsz = sizeof(buf) / sizeof(buf[0]); + + /* Read a chunk at the pointer location */ + if (!buffer) { + status = ixgbe_read_ee_hostif_buffer_X550(hw, ptr, bufsz, buf); + if (status) { + DEBUGOUT("Failed to read EEPROM image\n"); + return status; + } + local_buffer = buf; + } else { + if (buffer_size < ptr) + return IXGBE_ERR_PARAM; + local_buffer = &buffer[ptr]; + } + + if (size) { + start = 0; + length = size; + } else { + start = 1; + length = local_buffer[0]; + + /* Skip pointer section if length is invalid. */ + if (length == 0xFFFF || length == 0 || + (ptr + length) >= hw->eeprom.word_size) + return IXGBE_SUCCESS; + } + + if (buffer && ((u32)start + (u32)length > buffer_size)) + return IXGBE_ERR_PARAM; + + for (i = start; length; i++, length--) { + if (i == bufsz && !buffer) { + ptr += bufsz; + i = 0; + if (length < bufsz) + bufsz = length; + + /* Read a chunk at the pointer location */ + status = ixgbe_read_ee_hostif_buffer_X550(hw, ptr, + bufsz, buf); + if (status) { + DEBUGOUT("Failed to read EEPROM image\n"); + return status; + } + } + *csum += local_buffer[i]; + } + return IXGBE_SUCCESS; +} + +/** + * ixgbe_calc_checksum_X550 - Calculates and returns the checksum + * @hw: pointer to hardware structure + * @buffer: pointer to buffer containing calculated checksum + * @buffer_size: size of buffer + * + * Returns a negative error code on error, or the 16-bit checksum + **/ +s32 ixgbe_calc_checksum_X550(struct ixgbe_hw *hw, u16 *buffer, u32 buffer_size) +{ + u16 eeprom_ptrs[IXGBE_EEPROM_LAST_WORD + 1]; + u16 *local_buffer; + s32 status; + u16 checksum = 0; + u16 pointer, i, size; + + DEBUGFUNC("ixgbe_calc_eeprom_checksum_X550"); + + hw->eeprom.ops.init_params(hw); + + if (!buffer) { + /* Read pointer area */ + status = ixgbe_read_ee_hostif_buffer_X550(hw, 0, + IXGBE_EEPROM_LAST_WORD + 1, + eeprom_ptrs); + if (status) { + DEBUGOUT("Failed to read EEPROM image\n"); + return status; + } + local_buffer = eeprom_ptrs; + } else { + if (buffer_size < IXGBE_EEPROM_LAST_WORD) + return IXGBE_ERR_PARAM; + local_buffer = buffer; + } + + /* + * For X550 hardware include 0x0-0x41 in the checksum, skip the + * checksum word itself + */ + for (i = 0; i <= IXGBE_EEPROM_LAST_WORD; i++) + if (i != IXGBE_EEPROM_CHECKSUM) + checksum += local_buffer[i]; + + /* + * Include all data from pointers 0x3, 0x6-0xE. This excludes the + * FW, PHY module, and PCIe Expansion/Option ROM pointers. + */ + for (i = IXGBE_PCIE_ANALOG_PTR_X550; i < IXGBE_FW_PTR; i++) { + if (i == IXGBE_PHY_PTR || i == IXGBE_OPTION_ROM_PTR) + continue; + + pointer = local_buffer[i]; + + /* Skip pointer section if the pointer is invalid. */ + if (pointer == 0xFFFF || pointer == 0 || + pointer >= hw->eeprom.word_size) + continue; + + switch (i) { + case IXGBE_PCIE_GENERAL_PTR: + size = IXGBE_IXGBE_PCIE_GENERAL_SIZE; + break; + case IXGBE_PCIE_CONFIG0_PTR: + case IXGBE_PCIE_CONFIG1_PTR: + size = IXGBE_PCIE_CONFIG_SIZE; + break; + default: + size = 0; + break; + } + + status = ixgbe_checksum_ptr_x550(hw, pointer, size, &checksum, + buffer, buffer_size); + if (status) + return status; + } + + checksum = (u16)IXGBE_EEPROM_SUM - checksum; + + return (s32)checksum; +} + +/** + * ixgbe_calc_eeprom_checksum_X550 - Calculates and returns the checksum + * @hw: pointer to hardware structure + * + * Returns a negative error code on error, or the 16-bit checksum + **/ +s32 ixgbe_calc_eeprom_checksum_X550(struct ixgbe_hw *hw) +{ + return ixgbe_calc_checksum_X550(hw, NULL, 0); +} + +/** + * ixgbe_validate_eeprom_checksum_X550 - Validate EEPROM checksum + * @hw: pointer to hardware structure + * @checksum_val: calculated checksum + * + * Performs checksum calculation and validates the EEPROM checksum. If the + * caller does not need checksum_val, the value can be NULL. + **/ +s32 ixgbe_validate_eeprom_checksum_X550(struct ixgbe_hw *hw, u16 *checksum_val) +{ + s32 status; + u16 checksum; + u16 read_checksum = 0; + + DEBUGFUNC("ixgbe_validate_eeprom_checksum_X550"); + + /* Read the first word from the EEPROM. If this times out or fails, do + * not continue or we could be in for a very long wait while every + * EEPROM read fails + */ + status = hw->eeprom.ops.read(hw, 0, &checksum); + if (status) { + DEBUGOUT("EEPROM read failed\n"); + return status; + } + + status = hw->eeprom.ops.calc_checksum(hw); + if (status < 0) + return status; + + checksum = (u16)(status & 0xffff); + + status = ixgbe_read_ee_hostif_X550(hw, IXGBE_EEPROM_CHECKSUM, + &read_checksum); + if (status) + return status; + + /* Verify read checksum from EEPROM is the same as + * calculated checksum + */ + if (read_checksum != checksum) { + status = IXGBE_ERR_EEPROM_CHECKSUM; + ERROR_REPORT1(IXGBE_ERROR_INVALID_STATE, + "Invalid EEPROM checksum"); + } + + /* If the user cares, return the calculated checksum */ + if (checksum_val) + *checksum_val = checksum; + + return status; +} + +/** + * ixgbe_update_eeprom_checksum_X550 - Updates the EEPROM checksum and flash + * @hw: pointer to hardware structure + * + * After writing EEPROM to shadow RAM using EEWR register, software calculates + * checksum and updates the EEPROM and instructs the hardware to update + * the flash. + **/ +s32 ixgbe_update_eeprom_checksum_X550(struct ixgbe_hw *hw) +{ + s32 status; + u16 checksum = 0; + + DEBUGFUNC("ixgbe_update_eeprom_checksum_X550"); + + /* Read the first word from the EEPROM. If this times out or fails, do + * not continue or we could be in for a very long wait while every + * EEPROM read fails + */ + status = ixgbe_read_ee_hostif_X550(hw, 0, &checksum); + if (status) { + DEBUGOUT("EEPROM read failed\n"); + return status; + } + + status = ixgbe_calc_eeprom_checksum_X550(hw); + if (status < 0) + return status; + + checksum = (u16)(status & 0xffff); + + status = ixgbe_write_ee_hostif_X550(hw, IXGBE_EEPROM_CHECKSUM, + checksum); + if (status) + return status; + + status = ixgbe_update_flash_X550(hw); + + return status; +} + +/** + * ixgbe_update_flash_X550 - Instruct HW to copy EEPROM to Flash device + * @hw: pointer to hardware structure + * + * Issue a shadow RAM dump to FW to copy EEPROM from shadow RAM to the flash. + **/ +s32 ixgbe_update_flash_X550(struct ixgbe_hw *hw) +{ + s32 status = IXGBE_SUCCESS; + union ixgbe_hic_hdr2 buffer; + + DEBUGFUNC("ixgbe_update_flash_X550"); + + buffer.req.cmd = FW_SHADOW_RAM_DUMP_CMD; + buffer.req.buf_lenh = 0; + buffer.req.buf_lenl = FW_SHADOW_RAM_DUMP_LEN; + buffer.req.checksum = FW_DEFAULT_CHECKSUM; + + status = ixgbe_host_interface_command(hw, (u32 *)&buffer, + sizeof(buffer), + IXGBE_HI_COMMAND_TIMEOUT, FALSE); + + return status; +} + +/** + * ixgbe_get_supported_physical_layer_X550em - Returns physical layer type + * @hw: pointer to hardware structure + * + * Determines physical layer capabilities of the current configuration. + **/ +u32 ixgbe_get_supported_physical_layer_X550em(struct ixgbe_hw *hw) +{ + u32 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN; + u16 ext_ability = 0; + + DEBUGFUNC("ixgbe_get_supported_physical_layer_X550em"); + + hw->phy.ops.identify(hw); + + switch (hw->phy.type) { + case ixgbe_phy_x550em_kr: + physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KR | + IXGBE_PHYSICAL_LAYER_1000BASE_KX; + break; + case ixgbe_phy_x550em_kx4: + physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KX4 | + IXGBE_PHYSICAL_LAYER_1000BASE_KX; + break; + case ixgbe_phy_x550em_ext_t: + hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_EXT_ABILITY, + IXGBE_MDIO_PMA_PMD_DEV_TYPE, + &ext_ability); + if (ext_ability & IXGBE_MDIO_PHY_10GBASET_ABILITY) + physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T; + if (ext_ability & IXGBE_MDIO_PHY_1000BASET_ABILITY) + physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T; + break; + default: + break; + } + + if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber) + physical_layer = ixgbe_get_supported_phy_sfp_layer_generic(hw); + + return physical_layer; +} + +/** + * ixgbe_get_bus_info_x550em - Set PCI bus info + * @hw: pointer to hardware structure + * + * Sets bus link width and speed to unknown because X550em is + * not a PCI device. + **/ +s32 ixgbe_get_bus_info_X550em(struct ixgbe_hw *hw) +{ + + DEBUGFUNC("ixgbe_get_bus_info_x550em"); + + hw->bus.width = ixgbe_bus_width_unknown; + hw->bus.speed = ixgbe_bus_speed_unknown; + + hw->mac.ops.set_lan_id(hw); + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_disable_rx_x550 - Disable RX unit + * + * Enables the Rx DMA unit for x550 + **/ +void ixgbe_disable_rx_x550(struct ixgbe_hw *hw) +{ + u32 rxctrl, pfdtxgswc; + s32 status; + struct ixgbe_hic_disable_rxen fw_cmd; + + DEBUGFUNC("ixgbe_enable_rx_dma_x550"); + + rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); + if (rxctrl & IXGBE_RXCTRL_RXEN) { + pfdtxgswc = IXGBE_READ_REG(hw, IXGBE_PFDTXGSWC); + if (pfdtxgswc & IXGBE_PFDTXGSWC_VT_LBEN) { + pfdtxgswc &= ~IXGBE_PFDTXGSWC_VT_LBEN; + IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, pfdtxgswc); + hw->mac.set_lben = TRUE; + } else { + hw->mac.set_lben = FALSE; + } + + fw_cmd.hdr.cmd = FW_DISABLE_RXEN_CMD; + fw_cmd.hdr.buf_len = FW_DISABLE_RXEN_LEN; + fw_cmd.hdr.checksum = FW_DEFAULT_CHECKSUM; + fw_cmd.port_number = (u8)hw->bus.lan_id; + + status = ixgbe_host_interface_command(hw, (u32 *)&fw_cmd, + sizeof(struct ixgbe_hic_disable_rxen), + IXGBE_HI_COMMAND_TIMEOUT, TRUE); + + /* If we fail - disable RX using register write */ + if (status) { + rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); + if (rxctrl & IXGBE_RXCTRL_RXEN) { + rxctrl &= ~IXGBE_RXCTRL_RXEN; + IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl); + } + } + } +} + +/** + * ixgbe_enter_lplu_x550em - Transition to low power states + * @hw: pointer to hardware structure + * + * Configures Low Power Link Up on transition to low power states + * (from D0 to non-D0). Link is required to enter LPLU so avoid resetting the + * X557 PHY immediately prior to entering LPLU. + **/ +s32 ixgbe_enter_lplu_t_x550em(struct ixgbe_hw *hw) +{ + u16 an_10g_cntl_reg, autoneg_reg, speed; + s32 status; + ixgbe_link_speed lcd_speed; + u32 save_autoneg; + bool link_up; + + /* If blocked by MNG FW, then don't restart AN */ + if (ixgbe_check_reset_blocked(hw)) + return IXGBE_SUCCESS; + + status = ixgbe_ext_phy_t_x550em_get_link(hw, &link_up); + if (status != IXGBE_SUCCESS) + return status; + + status = ixgbe_read_eeprom(hw, NVM_INIT_CTRL_3, &hw->eeprom.ctrl_word_3); + + if (status != IXGBE_SUCCESS) + return status; + + /* If link is down, LPLU disabled in NVM, WoL disabled, or manageability + * disabled, then force link down by entering low power mode. + */ + if (!link_up || !(hw->eeprom.ctrl_word_3 & NVM_INIT_CTRL_3_LPLU) || + !(hw->wol_enabled || ixgbe_mng_present(hw))) + return ixgbe_set_copper_phy_power(hw, FALSE); + + /* Determine LCD */ + status = ixgbe_get_lcd_t_x550em(hw, &lcd_speed); + + if (status != IXGBE_SUCCESS) + return status; + + /* If no valid LCD link speed, then force link down and exit. */ + if (lcd_speed == IXGBE_LINK_SPEED_UNKNOWN) + return ixgbe_set_copper_phy_power(hw, FALSE); + + status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_VENDOR_STAT, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, + &speed); + + if (status != IXGBE_SUCCESS) + return status; + + /* If no link now, speed is invalid so take link down */ + status = ixgbe_ext_phy_t_x550em_get_link(hw, &link_up); + if (status != IXGBE_SUCCESS) + return ixgbe_set_copper_phy_power(hw, FALSE); + + /* clear everything but the speed bits */ + speed &= IXGBE_MDIO_AUTO_NEG_VEN_STAT_SPEED_MASK; + + /* If current speed is already LCD, then exit. */ + if (((speed == IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_1GB) && + (lcd_speed == IXGBE_LINK_SPEED_1GB_FULL)) || + ((speed == IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10GB) && + (lcd_speed == IXGBE_LINK_SPEED_10GB_FULL))) + return status; + + /* Clear AN completed indication */ + status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_VENDOR_TX_ALARM, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, + &autoneg_reg); + + if (status != IXGBE_SUCCESS) + return status; + + status = hw->phy.ops.read_reg(hw, IXGBE_MII_10GBASE_T_AUTONEG_CTRL_REG, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, + &an_10g_cntl_reg); + + if (status != IXGBE_SUCCESS) + return status; + + status = hw->phy.ops.read_reg(hw, + IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, + &autoneg_reg); + + if (status != IXGBE_SUCCESS) + return status; + + save_autoneg = hw->phy.autoneg_advertised; + + /* Setup link at least common link speed */ + status = hw->mac.ops.setup_link(hw, lcd_speed, FALSE); + + /* restore autoneg from before setting lplu speed */ + hw->phy.autoneg_advertised = save_autoneg; + + return status; +} + +/** + * ixgbe_get_lcd_x550em - Determine lowest common denominator + * @hw: pointer to hardware structure + * @lcd_speed: pointer to lowest common link speed + * + * Determine lowest common link speed with link partner. + **/ +s32 ixgbe_get_lcd_t_x550em(struct ixgbe_hw *hw, ixgbe_link_speed *lcd_speed) +{ + u16 an_lp_status; + s32 status; + u16 word = hw->eeprom.ctrl_word_3; + + *lcd_speed = IXGBE_LINK_SPEED_UNKNOWN; + + status = hw->phy.ops.read_reg(hw, IXGBE_AUTO_NEG_LP_STATUS, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, + &an_lp_status); + + if (status != IXGBE_SUCCESS) + return status; + + /* If link partner advertised 1G, return 1G */ + if (an_lp_status & IXGBE_AUTO_NEG_LP_1000BASE_CAP) { + *lcd_speed = IXGBE_LINK_SPEED_1GB_FULL; + return status; + } + + /* If 10G disabled for LPLU via NVM D10GMP, then return no valid LCD */ + if ((hw->bus.lan_id && (word & NVM_INIT_CTRL_3_D10GMP_PORT1)) || + (word & NVM_INIT_CTRL_3_D10GMP_PORT0)) + return status; + + /* Link partner not capable of lower speeds, return 10G */ + *lcd_speed = IXGBE_LINK_SPEED_10GB_FULL; + return status; +} + +/** + * ixgbe_setup_fc_X550em - Set up flow control + * @hw: pointer to hardware structure + * + * Called at init time to set up flow control. + **/ +s32 ixgbe_setup_fc_X550em(struct ixgbe_hw *hw) +{ + s32 ret_val = IXGBE_SUCCESS; + u32 pause, asm_dir, reg_val; + + DEBUGFUNC("ixgbe_setup_fc_X550em"); + + /* Validate the requested mode */ + if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) { + ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED, + "ixgbe_fc_rx_pause not valid in strict IEEE mode\n"); + ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS; + goto out; + } + + /* 10gig parts do not have a word in the EEPROM to determine the + * default flow control setting, so we explicitly set it to full. + */ + if (hw->fc.requested_mode == ixgbe_fc_default) + hw->fc.requested_mode = ixgbe_fc_full; + + /* Determine PAUSE and ASM_DIR bits. */ + switch (hw->fc.requested_mode) { + case ixgbe_fc_none: + pause = 0; + asm_dir = 0; + break; + case ixgbe_fc_tx_pause: + pause = 0; + asm_dir = 1; + break; + case ixgbe_fc_rx_pause: + /* Rx Flow control is enabled and Tx Flow control is + * disabled by software override. Since there really + * isn't a way to advertise that we are capable of RX + * Pause ONLY, we will advertise that we support both + * symmetric and asymmetric Rx PAUSE, as such we fall + * through to the fc_full statement. Later, we will + * disable the adapter's ability to send PAUSE frames. + */ + case ixgbe_fc_full: + pause = 1; + asm_dir = 1; + break; + default: + ERROR_REPORT1(IXGBE_ERROR_ARGUMENT, + "Flow control param set incorrectly\n"); + ret_val = IXGBE_ERR_CONFIG; + goto out; + } + + if (hw->phy.media_type == ixgbe_media_type_backplane) { + ret_val = ixgbe_read_iosf_sb_reg_x550(hw, + IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val); + if (ret_val != IXGBE_SUCCESS) + goto out; + reg_val &= ~(IXGBE_KRM_AN_CNTL_1_SYM_PAUSE | + IXGBE_KRM_AN_CNTL_1_ASM_PAUSE); + if (pause) + reg_val |= IXGBE_KRM_AN_CNTL_1_SYM_PAUSE; + if (asm_dir) + reg_val |= IXGBE_KRM_AN_CNTL_1_ASM_PAUSE; + ret_val = ixgbe_write_iosf_sb_reg_x550(hw, + IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val); + + /* Not all devices fully support AN. */ + if (hw->device_id == IXGBE_DEV_ID_X550EM_X_KR) + hw->fc.disable_fc_autoneg = TRUE; + } + +out: + return ret_val; +} + +/** + * ixgbe_set_mux - Set mux for port 1 access with CS4227 + * @hw: pointer to hardware structure + * @state: set mux if 1, clear if 0 + */ +static void ixgbe_set_mux(struct ixgbe_hw *hw, u8 state) +{ + u32 esdp; + + if (!hw->bus.lan_id) + return; + esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); + if (state) + esdp |= IXGBE_ESDP_SDP1; + else + esdp &= ~IXGBE_ESDP_SDP1; + IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp); + IXGBE_WRITE_FLUSH(hw); +} + +/** + * ixgbe_acquire_swfw_sync_X550em - Acquire SWFW semaphore + * @hw: pointer to hardware structure + * @mask: Mask to specify which semaphore to acquire + * + * Acquires the SWFW semaphore and sets the I2C MUX + **/ +s32 ixgbe_acquire_swfw_sync_X550em(struct ixgbe_hw *hw, u32 mask) +{ + s32 status; + + DEBUGFUNC("ixgbe_acquire_swfw_sync_X550em"); + + status = ixgbe_acquire_swfw_sync_X540(hw, mask); + if (status) + return status; + + if (mask & IXGBE_GSSR_I2C_MASK) + ixgbe_set_mux(hw, 1); + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_release_swfw_sync_X550em - Release SWFW semaphore + * @hw: pointer to hardware structure + * @mask: Mask to specify which semaphore to release + * + * Releases the SWFW semaphore and sets the I2C MUX + **/ +void ixgbe_release_swfw_sync_X550em(struct ixgbe_hw *hw, u32 mask) +{ + DEBUGFUNC("ixgbe_release_swfw_sync_X550em"); + + if (mask & IXGBE_GSSR_I2C_MASK) + ixgbe_set_mux(hw, 0); + + ixgbe_release_swfw_sync_X540(hw, mask); +} + +/** + * ixgbe_handle_lasi_ext_t_x550em - Handle external Base T PHY interrupt + * @hw: pointer to hardware structure + * + * Handle external Base T PHY interrupt. If high temperature + * failure alarm then return error, else if link status change + * then setup internal/external PHY link + * + * Return IXGBE_ERR_OVERTEMP if interrupt is high temperature + * failure alarm, else return PHY access status. + */ +s32 ixgbe_handle_lasi_ext_t_x550em(struct ixgbe_hw *hw) +{ + bool lsc; + u32 status; + + status = ixgbe_get_lasi_ext_t_x550em(hw, &lsc); + + if (status != IXGBE_SUCCESS) + return status; + + if (lsc) + return ixgbe_setup_internal_phy(hw); + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_setup_mac_link_t_X550em - Sets the auto advertised link speed + * @hw: pointer to hardware structure + * @speed: new link speed + * @autoneg_wait_to_complete: TRUE when waiting for completion is needed + * + * Setup internal/external PHY link speed based on link speed, then set + * external PHY auto advertised link speed. + * + * Returns error status for any failure + **/ +s32 ixgbe_setup_mac_link_t_X550em(struct ixgbe_hw *hw, + ixgbe_link_speed speed, + bool autoneg_wait_to_complete) +{ + s32 status; + ixgbe_link_speed force_speed; + + DEBUGFUNC("ixgbe_setup_mac_link_t_X550em"); + + /* Setup internal/external PHY link speed to iXFI (10G), unless + * only 1G is auto advertised then setup KX link. + */ + if (speed & IXGBE_LINK_SPEED_10GB_FULL) + force_speed = IXGBE_LINK_SPEED_10GB_FULL; + else + force_speed = IXGBE_LINK_SPEED_1GB_FULL; + + /* If internal link mode is XFI, then setup XFI internal link. */ + if (!(hw->phy.nw_mng_if_sel & IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE)) { + status = ixgbe_setup_ixfi_x550em(hw, &force_speed); + + if (status != IXGBE_SUCCESS) + return status; + } + + return hw->phy.ops.setup_link_speed(hw, speed, autoneg_wait_to_complete); +} + +/** + * ixgbe_check_link_t_X550em - Determine link and speed status + * @hw: pointer to hardware structure + * @speed: pointer to link speed + * @link_up: TRUE when link is up + * @link_up_wait_to_complete: bool used to wait for link up or not + * + * Check that both the MAC and X557 external PHY have link. + **/ +s32 ixgbe_check_link_t_X550em(struct ixgbe_hw *hw, ixgbe_link_speed *speed, + bool *link_up, bool link_up_wait_to_complete) +{ + u32 status; + u16 autoneg_status; + + if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_copper) + return IXGBE_ERR_CONFIG; + + status = ixgbe_check_mac_link_generic(hw, speed, link_up, + link_up_wait_to_complete); + + /* If check link fails or MAC link is not up, then return */ + if (status != IXGBE_SUCCESS || !(*link_up)) + return status; + + /* MAC link is up, so check external PHY link. + * Read this twice back to back to indicate current status. + */ + status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, + &autoneg_status); + + if (status != IXGBE_SUCCESS) + return status; + + status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, + &autoneg_status); + + if (status != IXGBE_SUCCESS) + return status; + + /* If external PHY link is not up, then indicate link not up */ + if (!(autoneg_status & IXGBE_MDIO_AUTO_NEG_LINK_STATUS)) + *link_up = FALSE; + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_reset_phy_t_X550em - Performs X557 PHY reset and enables LASI + * @hw: pointer to hardware structure + **/ +s32 ixgbe_reset_phy_t_X550em(struct ixgbe_hw *hw) +{ + s32 status; + + status = ixgbe_reset_phy_generic(hw); + + if (status != IXGBE_SUCCESS) + return status; + + /* Configure Link Status Alarm and Temperature Threshold interrupts */ + return ixgbe_enable_lasi_ext_t_x550em(hw); +} + +/** + * ixgbe_led_on_t_X550em - Turns on the software controllable LEDs. + * @hw: pointer to hardware structure + * @led_idx: led number to turn on + **/ +s32 ixgbe_led_on_t_X550em(struct ixgbe_hw *hw, u32 led_idx) +{ + u16 phy_data; + + DEBUGFUNC("ixgbe_led_on_t_X550em"); + + if (led_idx >= IXGBE_X557_MAX_LED_INDEX) + return IXGBE_ERR_PARAM; + + /* To turn on the LED, set mode to ON. */ + ixgbe_read_phy_reg(hw, IXGBE_X557_LED_PROVISIONING + led_idx, + IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, &phy_data); + phy_data |= IXGBE_X557_LED_MANUAL_SET_MASK; + ixgbe_write_phy_reg(hw, IXGBE_X557_LED_PROVISIONING + led_idx, + IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, phy_data); + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_led_off_t_X550em - Turns off the software controllable LEDs. + * @hw: pointer to hardware structure + * @led_idx: led number to turn off + **/ +s32 ixgbe_led_off_t_X550em(struct ixgbe_hw *hw, u32 led_idx) +{ + u16 phy_data; + + DEBUGFUNC("ixgbe_led_off_t_X550em"); + + if (led_idx >= IXGBE_X557_MAX_LED_INDEX) + return IXGBE_ERR_PARAM; + + /* To turn on the LED, set mode to ON. */ + ixgbe_read_phy_reg(hw, IXGBE_X557_LED_PROVISIONING + led_idx, + IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, &phy_data); + phy_data &= ~IXGBE_X557_LED_MANUAL_SET_MASK; + ixgbe_write_phy_reg(hw, IXGBE_X557_LED_PROVISIONING + led_idx, + IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, phy_data); + + return IXGBE_SUCCESS; +} + diff --git a/sys/dev/ixgbe/ixgbe_x550.h b/sys/dev/ixgbe/ixgbe_x550.h new file mode 100644 index 0000000..8a544ec --- /dev/null +++ b/sys/dev/ixgbe/ixgbe_x550.h @@ -0,0 +1,109 @@ +/****************************************************************************** + + Copyright (c) 2001-2015, Intel Corporation + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + 3. Neither the name of the Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + POSSIBILITY OF SUCH DAMAGE. + +******************************************************************************/ +/*$FreeBSD$*/ + +#ifndef _IXGBE_X550_H_ +#define _IXGBE_X550_H_ + +#include "ixgbe_type.h" + +s32 ixgbe_dmac_config_X550(struct ixgbe_hw *hw); +s32 ixgbe_dmac_config_tcs_X550(struct ixgbe_hw *hw); +s32 ixgbe_dmac_update_tcs_X550(struct ixgbe_hw *hw); + +s32 ixgbe_get_bus_info_X550em(struct ixgbe_hw *hw); +s32 ixgbe_init_eeprom_params_X550(struct ixgbe_hw *hw); +s32 ixgbe_update_eeprom_checksum_X550(struct ixgbe_hw *hw); +s32 ixgbe_calc_eeprom_checksum_X550(struct ixgbe_hw *hw); +s32 ixgbe_calc_checksum_X550(struct ixgbe_hw *hw, u16 *buffer, u32 buffer_size); +s32 ixgbe_validate_eeprom_checksum_X550(struct ixgbe_hw *hw, u16 *checksum_val); +s32 ixgbe_update_flash_X550(struct ixgbe_hw *hw); +s32 ixgbe_write_ee_hostif_buffer_X550(struct ixgbe_hw *hw, + u16 offset, u16 words, u16 *data); +s32 ixgbe_write_ee_hostif_X550(struct ixgbe_hw *hw, u16 offset, + u16 data); +s32 ixgbe_read_ee_hostif_buffer_X550(struct ixgbe_hw *hw, + u16 offset, u16 words, u16 *data); +s32 ixgbe_read_ee_hostif_X550(struct ixgbe_hw *hw, u16 offset, +u16 *data); +s32 ixgbe_read_ee_hostif_data_X550(struct ixgbe_hw *hw, u16 offset, + u16 *data); +s32 ixgbe_write_ee_hostif_data_X550(struct ixgbe_hw *hw, u16 offset, + u16 data); +s32 ixgbe_set_eee_X550(struct ixgbe_hw *hw, bool enable_eee); +s32 ixgbe_setup_eee_X550(struct ixgbe_hw *hw, bool enable_eee); +void ixgbe_set_source_address_pruning_X550(struct ixgbe_hw *hw, bool enable, + unsigned int pool); +void ixgbe_set_ethertype_anti_spoofing_X550(struct ixgbe_hw *hw, + bool enable, int vf); +s32 ixgbe_write_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr, + u32 device_type, u32 data); +s32 ixgbe_read_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr, + u32 device_type, u32 *data); +void ixgbe_disable_mdd_X550(struct ixgbe_hw *hw); +void ixgbe_enable_mdd_X550(struct ixgbe_hw *hw); +void ixgbe_mdd_event_X550(struct ixgbe_hw *hw, u32 *vf_bitmap); +void ixgbe_restore_mdd_vf_X550(struct ixgbe_hw *hw, u32 vf); +enum ixgbe_media_type ixgbe_get_media_type_X550em(struct ixgbe_hw *hw); +s32 ixgbe_setup_sfp_modules_X550em(struct ixgbe_hw *hw); +s32 ixgbe_get_link_capabilities_X550em(struct ixgbe_hw *hw, + ixgbe_link_speed *speed, bool *autoneg); +void ixgbe_init_mac_link_ops_X550em(struct ixgbe_hw *hw); +s32 ixgbe_reset_hw_X550em(struct ixgbe_hw *hw); +s32 ixgbe_init_phy_ops_X550em(struct ixgbe_hw *hw); +s32 ixgbe_setup_kr_x550em(struct ixgbe_hw *hw); +s32 ixgbe_setup_kx4_x550em(struct ixgbe_hw *hw); +s32 ixgbe_init_ext_t_x550em(struct ixgbe_hw *hw); +s32 ixgbe_setup_internal_phy_t_x550em(struct ixgbe_hw *hw); +s32 ixgbe_setup_phy_loopback_x550em(struct ixgbe_hw *hw); +u32 ixgbe_get_supported_physical_layer_X550em(struct ixgbe_hw *hw); +void ixgbe_disable_rx_x550(struct ixgbe_hw *hw); +s32 ixgbe_get_lcd_t_x550em(struct ixgbe_hw *hw, ixgbe_link_speed *lcd_speed); +s32 ixgbe_enter_lplu_t_x550em(struct ixgbe_hw *hw); +s32 ixgbe_acquire_swfw_sync_X550em(struct ixgbe_hw *hw, u32 mask); +void ixgbe_release_swfw_sync_X550em(struct ixgbe_hw *hw, u32 mask); +s32 ixgbe_setup_fc_X550em(struct ixgbe_hw *hw); +s32 ixgbe_setup_mac_link_sfp_x550em(struct ixgbe_hw *hw, + ixgbe_link_speed speed, + bool autoneg_wait_to_complete); +s32 ixgbe_handle_lasi_ext_t_x550em(struct ixgbe_hw *hw); +s32 ixgbe_setup_mac_link_t_X550em(struct ixgbe_hw *hw, + ixgbe_link_speed speed, + bool autoneg_wait_to_complete); +s32 ixgbe_check_link_t_X550em(struct ixgbe_hw *hw, ixgbe_link_speed *speed, + bool *link_up, bool link_up_wait_to_complete); +s32 ixgbe_reset_phy_t_X550em(struct ixgbe_hw *hw); +s32 ixgbe_identify_sfp_module_X550em(struct ixgbe_hw *hw); +s32 ixgbe_led_on_t_X550em(struct ixgbe_hw *hw, u32 led_idx); +s32 ixgbe_led_off_t_X550em(struct ixgbe_hw *hw, u32 led_idx); +#endif /* _IXGBE_X550_H_ */ diff --git a/sys/dev/ixgbe/ixv.c b/sys/dev/ixgbe/ixv.c deleted file mode 100644 index f0f7593..0000000 --- a/sys/dev/ixgbe/ixv.c +++ /dev/null @@ -1,4007 +0,0 @@ -/****************************************************************************** - - Copyright (c) 2001-2013, Intel Corporation - All rights reserved. - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are met: - - 1. Redistributions of source code must retain the above copyright notice, - this list of conditions and the following disclaimer. - - 2. Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - - 3. Neither the name of the Intel Corporation nor the names of its - contributors may be used to endorse or promote products derived from - this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - POSSIBILITY OF SUCH DAMAGE. - -******************************************************************************/ -/*$FreeBSD$*/ - -#include "opt_inet.h" -#include "opt_inet6.h" -#include "ixv.h" - -/********************************************************************* - * Driver version - *********************************************************************/ -char ixv_driver_version[] = "1.1.4"; - -/********************************************************************* - * PCI Device ID Table - * - * Used by probe to select devices to load on - * Last field stores an index into ixv_strings - * Last entry must be all 0s - * - * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index } - *********************************************************************/ - -static ixv_vendor_info_t ixv_vendor_info_array[] = -{ - {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_VF, 0, 0, 0}, - {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_VF, 0, 0, 0}, - /* required last entry */ - {0, 0, 0, 0, 0} -}; - -/********************************************************************* - * Table of branding strings - *********************************************************************/ - -static char *ixv_strings[] = { - "Intel(R) PRO/10GbE Virtual Function Network Driver" -}; - -/********************************************************************* - * Function prototypes - *********************************************************************/ -static int ixv_probe(device_t); -static int ixv_attach(device_t); -static int ixv_detach(device_t); -static int ixv_shutdown(device_t); -#if __FreeBSD_version < 800000 -static void ixv_start(struct ifnet *); -static void ixv_start_locked(struct tx_ring *, struct ifnet *); -#else -static int ixv_mq_start(struct ifnet *, struct mbuf *); -static int ixv_mq_start_locked(struct ifnet *, - struct tx_ring *, struct mbuf *); -static void ixv_qflush(struct ifnet *); -#endif -static int ixv_ioctl(struct ifnet *, u_long, caddr_t); -static void ixv_init(void *); -static void ixv_init_locked(struct adapter *); -static void ixv_stop(void *); -static void ixv_media_status(struct ifnet *, struct ifmediareq *); -static int ixv_media_change(struct ifnet *); -static void ixv_identify_hardware(struct adapter *); -static int ixv_allocate_pci_resources(struct adapter *); -static int ixv_allocate_msix(struct adapter *); -static int ixv_allocate_queues(struct adapter *); -static int ixv_setup_msix(struct adapter *); -static void ixv_free_pci_resources(struct adapter *); -static void ixv_local_timer(void *); -static void ixv_setup_interface(device_t, struct adapter *); -static void ixv_config_link(struct adapter *); - -static int ixv_allocate_transmit_buffers(struct tx_ring *); -static int ixv_setup_transmit_structures(struct adapter *); -static void ixv_setup_transmit_ring(struct tx_ring *); -static void ixv_initialize_transmit_units(struct adapter *); -static void ixv_free_transmit_structures(struct adapter *); -static void ixv_free_transmit_buffers(struct tx_ring *); - -static int ixv_allocate_receive_buffers(struct rx_ring *); -static int ixv_setup_receive_structures(struct adapter *); -static int ixv_setup_receive_ring(struct rx_ring *); -static void ixv_initialize_receive_units(struct adapter *); -static void ixv_free_receive_structures(struct adapter *); -static void ixv_free_receive_buffers(struct rx_ring *); - -static void ixv_enable_intr(struct adapter *); -static void ixv_disable_intr(struct adapter *); -static bool ixv_txeof(struct tx_ring *); -static bool ixv_rxeof(struct ix_queue *, int); -static void ixv_rx_checksum(u32, struct mbuf *, u32); -static void ixv_set_multi(struct adapter *); -static void ixv_update_link_status(struct adapter *); -static void ixv_refresh_mbufs(struct rx_ring *, int); -static int ixv_xmit(struct tx_ring *, struct mbuf **); -static int ixv_sysctl_stats(SYSCTL_HANDLER_ARGS); -static int ixv_sysctl_debug(SYSCTL_HANDLER_ARGS); -static int ixv_set_flowcntl(SYSCTL_HANDLER_ARGS); -static int ixv_dma_malloc(struct adapter *, bus_size_t, - struct ixv_dma_alloc *, int); -static void ixv_dma_free(struct adapter *, struct ixv_dma_alloc *); -static void ixv_add_rx_process_limit(struct adapter *, const char *, - const char *, int *, int); -static bool ixv_tx_ctx_setup(struct tx_ring *, struct mbuf *); -static bool ixv_tso_setup(struct tx_ring *, struct mbuf *, u32 *); -static void ixv_set_ivar(struct adapter *, u8, u8, s8); -static void ixv_configure_ivars(struct adapter *); -static u8 * ixv_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *); - -static void ixv_setup_vlan_support(struct adapter *); -static void ixv_register_vlan(void *, struct ifnet *, u16); -static void ixv_unregister_vlan(void *, struct ifnet *, u16); - -static void ixv_save_stats(struct adapter *); -static void ixv_init_stats(struct adapter *); -static void ixv_update_stats(struct adapter *); - -static __inline void ixv_rx_discard(struct rx_ring *, int); -static __inline void ixv_rx_input(struct rx_ring *, struct ifnet *, - struct mbuf *, u32); - -/* The MSI/X Interrupt handlers */ -static void ixv_msix_que(void *); -static void ixv_msix_mbx(void *); - -/* Deferred interrupt tasklets */ -static void ixv_handle_que(void *, int); -static void ixv_handle_mbx(void *, int); - -/********************************************************************* - * FreeBSD Device Interface Entry Points - *********************************************************************/ - -static device_method_t ixv_methods[] = { - /* Device interface */ - DEVMETHOD(device_probe, ixv_probe), - DEVMETHOD(device_attach, ixv_attach), - DEVMETHOD(device_detach, ixv_detach), - DEVMETHOD(device_shutdown, ixv_shutdown), - DEVMETHOD_END -}; - -static driver_t ixv_driver = { - "ix", ixv_methods, sizeof(struct adapter), -}; - -extern devclass_t ixgbe_devclass; -DRIVER_MODULE(ixv, pci, ixv_driver, ixgbe_devclass, 0, 0); -MODULE_DEPEND(ixv, pci, 1, 1, 1); -MODULE_DEPEND(ixv, ether, 1, 1, 1); - -/* -** TUNEABLE PARAMETERS: -*/ - -/* -** AIM: Adaptive Interrupt Moderation -** which means that the interrupt rate -** is varied over time based on the -** traffic for that interrupt vector -*/ -static int ixv_enable_aim = FALSE; -TUNABLE_INT("hw.ixv.enable_aim", &ixv_enable_aim); - -/* How many packets rxeof tries to clean at a time */ -static int ixv_rx_process_limit = 128; -TUNABLE_INT("hw.ixv.rx_process_limit", &ixv_rx_process_limit); - -/* Flow control setting, default to full */ -static int ixv_flow_control = ixgbe_fc_full; -TUNABLE_INT("hw.ixv.flow_control", &ixv_flow_control); - -/* - * Header split: this causes the hardware to DMA - * the header into a seperate mbuf from the payload, - * it can be a performance win in some workloads, but - * in others it actually hurts, its off by default. - */ -static int ixv_header_split = FALSE; -TUNABLE_INT("hw.ixv.hdr_split", &ixv_header_split); - -/* -** Number of TX descriptors per ring, -** setting higher than RX as this seems -** the better performing choice. -*/ -static int ixv_txd = DEFAULT_TXD; -TUNABLE_INT("hw.ixv.txd", &ixv_txd); - -/* Number of RX descriptors per ring */ -static int ixv_rxd = DEFAULT_RXD; -TUNABLE_INT("hw.ixv.rxd", &ixv_rxd); - -/* -** Shadow VFTA table, this is needed because -** the real filter table gets cleared during -** a soft reset and we need to repopulate it. -*/ -static u32 ixv_shadow_vfta[VFTA_SIZE]; - -/********************************************************************* - * Device identification routine - * - * ixv_probe determines if the driver should be loaded on - * adapter based on PCI vendor/device id of the adapter. - * - * return BUS_PROBE_DEFAULT on success, positive on failure - *********************************************************************/ - -static int -ixv_probe(device_t dev) -{ - ixv_vendor_info_t *ent; - - u16 pci_vendor_id = 0; - u16 pci_device_id = 0; - u16 pci_subvendor_id = 0; - u16 pci_subdevice_id = 0; - char adapter_name[256]; - - - pci_vendor_id = pci_get_vendor(dev); - if (pci_vendor_id != IXGBE_INTEL_VENDOR_ID) - return (ENXIO); - - pci_device_id = pci_get_device(dev); - pci_subvendor_id = pci_get_subvendor(dev); - pci_subdevice_id = pci_get_subdevice(dev); - - ent = ixv_vendor_info_array; - while (ent->vendor_id != 0) { - if ((pci_vendor_id == ent->vendor_id) && - (pci_device_id == ent->device_id) && - - ((pci_subvendor_id == ent->subvendor_id) || - (ent->subvendor_id == 0)) && - - ((pci_subdevice_id == ent->subdevice_id) || - (ent->subdevice_id == 0))) { - sprintf(adapter_name, "%s, Version - %s", - ixv_strings[ent->index], - ixv_driver_version); - device_set_desc_copy(dev, adapter_name); - return (BUS_PROBE_DEFAULT); - } - ent++; - } - return (ENXIO); -} - -/********************************************************************* - * Device initialization routine - * - * The attach entry point is called when the driver is being loaded. - * This routine identifies the type of hardware, allocates all resources - * and initializes the hardware. - * - * return 0 on success, positive on failure - *********************************************************************/ - -static int -ixv_attach(device_t dev) -{ - struct adapter *adapter; - struct ixgbe_hw *hw; - int error = 0; - - INIT_DEBUGOUT("ixv_attach: begin"); - - /* Allocate, clear, and link in our adapter structure */ - adapter = device_get_softc(dev); - adapter->dev = adapter->osdep.dev = dev; - hw = &adapter->hw; - - /* Core Lock Init*/ - IXV_CORE_LOCK_INIT(adapter, device_get_nameunit(dev)); - - /* SYSCTL APIs */ - SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), - SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), - OID_AUTO, "stats", CTLTYPE_INT | CTLFLAG_RW, - adapter, 0, ixv_sysctl_stats, "I", "Statistics"); - - SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), - SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), - OID_AUTO, "debug", CTLTYPE_INT | CTLFLAG_RW, - adapter, 0, ixv_sysctl_debug, "I", "Debug Info"); - - SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), - SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), - OID_AUTO, "flow_control", CTLTYPE_INT | CTLFLAG_RW, - adapter, 0, ixv_set_flowcntl, "I", "Flow Control"); - - SYSCTL_ADD_INT(device_get_sysctl_ctx(dev), - SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), - OID_AUTO, "enable_aim", CTLFLAG_RW, - &ixv_enable_aim, 1, "Interrupt Moderation"); - - /* Set up the timer callout */ - callout_init_mtx(&adapter->timer, &adapter->core_mtx, 0); - - /* Determine hardware revision */ - ixv_identify_hardware(adapter); - - /* Do base PCI setup - map BAR0 */ - if (ixv_allocate_pci_resources(adapter)) { - device_printf(dev, "Allocation of PCI resources failed\n"); - error = ENXIO; - goto err_out; - } - - /* Do descriptor calc and sanity checks */ - if (((ixv_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 || - ixv_txd < MIN_TXD || ixv_txd > MAX_TXD) { - device_printf(dev, "TXD config issue, using default!\n"); - adapter->num_tx_desc = DEFAULT_TXD; - } else - adapter->num_tx_desc = ixv_txd; - - if (((ixv_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 || - ixv_rxd < MIN_RXD || ixv_rxd > MAX_RXD) { - device_printf(dev, "RXD config issue, using default!\n"); - adapter->num_rx_desc = DEFAULT_RXD; - } else - adapter->num_rx_desc = ixv_rxd; - - /* Allocate our TX/RX Queues */ - if (ixv_allocate_queues(adapter)) { - error = ENOMEM; - goto err_out; - } - - /* - ** Initialize the shared code: its - ** at this point the mac type is set. - */ - error = ixgbe_init_shared_code(hw); - if (error) { - device_printf(dev,"Shared Code Initialization Failure\n"); - error = EIO; - goto err_late; - } - - /* Setup the mailbox */ - ixgbe_init_mbx_params_vf(hw); - - ixgbe_reset_hw(hw); - - /* Get Hardware Flow Control setting */ - hw->fc.requested_mode = ixgbe_fc_full; - hw->fc.pause_time = IXV_FC_PAUSE; - hw->fc.low_water[0] = IXV_FC_LO; - hw->fc.high_water[0] = IXV_FC_HI; - hw->fc.send_xon = TRUE; - - error = ixgbe_init_hw(hw); - if (error) { - device_printf(dev,"Hardware Initialization Failure\n"); - error = EIO; - goto err_late; - } - - error = ixv_allocate_msix(adapter); - if (error) - goto err_late; - - /* Setup OS specific network interface */ - ixv_setup_interface(dev, adapter); - - /* Sysctl for limiting the amount of work done in the taskqueue */ - ixv_add_rx_process_limit(adapter, "rx_processing_limit", - "max number of rx packets to process", &adapter->rx_process_limit, - ixv_rx_process_limit); - - /* Do the stats setup */ - ixv_save_stats(adapter); - ixv_init_stats(adapter); - - /* Register for VLAN events */ - adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config, - ixv_register_vlan, adapter, EVENTHANDLER_PRI_FIRST); - adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig, - ixv_unregister_vlan, adapter, EVENTHANDLER_PRI_FIRST); - - INIT_DEBUGOUT("ixv_attach: end"); - return (0); - -err_late: - ixv_free_transmit_structures(adapter); - ixv_free_receive_structures(adapter); -err_out: - ixv_free_pci_resources(adapter); - return (error); - -} - -/********************************************************************* - * Device removal routine - * - * The detach entry point is called when the driver is being removed. - * This routine stops the adapter and deallocates all the resources - * that were allocated for driver operation. - * - * return 0 on success, positive on failure - *********************************************************************/ - -static int -ixv_detach(device_t dev) -{ - struct adapter *adapter = device_get_softc(dev); - struct ix_queue *que = adapter->queues; - - INIT_DEBUGOUT("ixv_detach: begin"); - - /* Make sure VLANS are not using driver */ - if (adapter->ifp->if_vlantrunk != NULL) { - device_printf(dev,"Vlan in use, detach first\n"); - return (EBUSY); - } - - IXV_CORE_LOCK(adapter); - ixv_stop(adapter); - IXV_CORE_UNLOCK(adapter); - - for (int i = 0; i < adapter->num_queues; i++, que++) { - if (que->tq) { - taskqueue_drain(que->tq, &que->que_task); - taskqueue_free(que->tq); - } - } - - /* Drain the Link queue */ - if (adapter->tq) { - taskqueue_drain(adapter->tq, &adapter->mbx_task); - taskqueue_free(adapter->tq); - } - - /* Unregister VLAN events */ - if (adapter->vlan_attach != NULL) - EVENTHANDLER_DEREGISTER(vlan_config, adapter->vlan_attach); - if (adapter->vlan_detach != NULL) - EVENTHANDLER_DEREGISTER(vlan_unconfig, adapter->vlan_detach); - - ether_ifdetach(adapter->ifp); - callout_drain(&adapter->timer); - ixv_free_pci_resources(adapter); - bus_generic_detach(dev); - if_free(adapter->ifp); - - ixv_free_transmit_structures(adapter); - ixv_free_receive_structures(adapter); - - IXV_CORE_LOCK_DESTROY(adapter); - return (0); -} - -/********************************************************************* - * - * Shutdown entry point - * - **********************************************************************/ -static int -ixv_shutdown(device_t dev) -{ - struct adapter *adapter = device_get_softc(dev); - IXV_CORE_LOCK(adapter); - ixv_stop(adapter); - IXV_CORE_UNLOCK(adapter); - return (0); -} - -#if __FreeBSD_version < 800000 -/********************************************************************* - * Transmit entry point - * - * ixv_start is called by the stack to initiate a transmit. - * The driver will remain in this routine as long as there are - * packets to transmit and transmit resources are available. - * In case resources are not available stack is notified and - * the packet is requeued. - **********************************************************************/ -static void -ixv_start_locked(struct tx_ring *txr, struct ifnet * ifp) -{ - struct mbuf *m_head; - struct adapter *adapter = txr->adapter; - - IXV_TX_LOCK_ASSERT(txr); - - if ((ifp->if_drv_flags & (IFF_DRV_RUNNING|IFF_DRV_OACTIVE)) != - IFF_DRV_RUNNING) - return; - if (!adapter->link_active) - return; - - while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) { - - IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head); - if (m_head == NULL) - break; - - if (ixv_xmit(txr, &m_head)) { - if (m_head == NULL) - break; - ifp->if_drv_flags |= IFF_DRV_OACTIVE; - IFQ_DRV_PREPEND(&ifp->if_snd, m_head); - break; - } - /* Send a copy of the frame to the BPF listener */ - ETHER_BPF_MTAP(ifp, m_head); - - /* Set watchdog on */ - txr->watchdog_check = TRUE; - txr->watchdog_time = ticks; - - } - return; -} - -/* - * Legacy TX start - called by the stack, this - * always uses the first tx ring, and should - * not be used with multiqueue tx enabled. - */ -static void -ixv_start(struct ifnet *ifp) -{ - struct adapter *adapter = ifp->if_softc; - struct tx_ring *txr = adapter->tx_rings; - - if (ifp->if_drv_flags & IFF_DRV_RUNNING) { - IXV_TX_LOCK(txr); - ixv_start_locked(txr, ifp); - IXV_TX_UNLOCK(txr); - } - return; -} - -#else - -/* -** Multiqueue Transmit driver -** -*/ -static int -ixv_mq_start(struct ifnet *ifp, struct mbuf *m) -{ - struct adapter *adapter = ifp->if_softc; - struct ix_queue *que; - struct tx_ring *txr; - int i = 0, err = 0; - - /* Which queue to use */ - if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) - i = m->m_pkthdr.flowid % adapter->num_queues; - - txr = &adapter->tx_rings[i]; - que = &adapter->queues[i]; - - if (IXV_TX_TRYLOCK(txr)) { - err = ixv_mq_start_locked(ifp, txr, m); - IXV_TX_UNLOCK(txr); - } else { - err = drbr_enqueue(ifp, txr->br, m); - taskqueue_enqueue(que->tq, &que->que_task); - } - - return (err); -} - -static int -ixv_mq_start_locked(struct ifnet *ifp, struct tx_ring *txr, struct mbuf *m) -{ - struct adapter *adapter = txr->adapter; - struct mbuf *next; - int enqueued, err = 0; - - if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != - IFF_DRV_RUNNING || adapter->link_active == 0) { - if (m != NULL) - err = drbr_enqueue(ifp, txr->br, m); - return (err); - } - - /* Do a clean if descriptors are low */ - if (txr->tx_avail <= IXV_TX_CLEANUP_THRESHOLD) - ixv_txeof(txr); - - enqueued = 0; - if (m != NULL) { - err = drbr_enqueue(ifp, txr->br, m); - if (err) { - return (err); - } - } - /* Process the queue */ - while ((next = drbr_peek(ifp, txr->br)) != NULL) { - if ((err = ixv_xmit(txr, &next)) != 0) { - if (next == NULL) { - drbr_advance(ifp, txr->br); - } else { - drbr_putback(ifp, txr->br, next); - } - break; - } - drbr_advance(ifp, txr->br); - enqueued++; - ifp->if_obytes += next->m_pkthdr.len; - if (next->m_flags & M_MCAST) - ifp->if_omcasts++; - /* Send a copy of the frame to the BPF listener */ - ETHER_BPF_MTAP(ifp, next); - if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) - break; - if (txr->tx_avail <= IXV_TX_OP_THRESHOLD) { - ifp->if_drv_flags |= IFF_DRV_OACTIVE; - break; - } - } - - if (enqueued > 0) { - /* Set watchdog on */ - txr->watchdog_check = TRUE; - txr->watchdog_time = ticks; - } - - return (err); -} - -/* -** Flush all ring buffers -*/ -static void -ixv_qflush(struct ifnet *ifp) -{ - struct adapter *adapter = ifp->if_softc; - struct tx_ring *txr = adapter->tx_rings; - struct mbuf *m; - - for (int i = 0; i < adapter->num_queues; i++, txr++) { - IXV_TX_LOCK(txr); - while ((m = buf_ring_dequeue_sc(txr->br)) != NULL) - m_freem(m); - IXV_TX_UNLOCK(txr); - } - if_qflush(ifp); -} - -#endif - -/********************************************************************* - * Ioctl entry point - * - * ixv_ioctl is called when the user wants to configure the - * interface. - * - * return 0 on success, positive on failure - **********************************************************************/ - -static int -ixv_ioctl(struct ifnet * ifp, u_long command, caddr_t data) -{ - struct adapter *adapter = ifp->if_softc; - struct ifreq *ifr = (struct ifreq *) data; -#if defined(INET) || defined(INET6) - struct ifaddr *ifa = (struct ifaddr *) data; - bool avoid_reset = FALSE; -#endif - int error = 0; - - switch (command) { - - case SIOCSIFADDR: -#ifdef INET - if (ifa->ifa_addr->sa_family == AF_INET) - avoid_reset = TRUE; -#endif -#ifdef INET6 - if (ifa->ifa_addr->sa_family == AF_INET6) - avoid_reset = TRUE; -#endif -#if defined(INET) || defined(INET6) - /* - ** Calling init results in link renegotiation, - ** so we avoid doing it when possible. - */ - if (avoid_reset) { - ifp->if_flags |= IFF_UP; - if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) - ixv_init(adapter); - if (!(ifp->if_flags & IFF_NOARP)) - arp_ifinit(ifp, ifa); - } else - error = ether_ioctl(ifp, command, data); - break; -#endif - case SIOCSIFMTU: - IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)"); - if (ifr->ifr_mtu > IXV_MAX_FRAME_SIZE - ETHER_HDR_LEN) { - error = EINVAL; - } else { - IXV_CORE_LOCK(adapter); - ifp->if_mtu = ifr->ifr_mtu; - adapter->max_frame_size = - ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN; - ixv_init_locked(adapter); - IXV_CORE_UNLOCK(adapter); - } - break; - case SIOCSIFFLAGS: - IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)"); - IXV_CORE_LOCK(adapter); - if (ifp->if_flags & IFF_UP) { - if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) - ixv_init_locked(adapter); - } else - if (ifp->if_drv_flags & IFF_DRV_RUNNING) - ixv_stop(adapter); - adapter->if_flags = ifp->if_flags; - IXV_CORE_UNLOCK(adapter); - break; - case SIOCADDMULTI: - case SIOCDELMULTI: - IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI"); - if (ifp->if_drv_flags & IFF_DRV_RUNNING) { - IXV_CORE_LOCK(adapter); - ixv_disable_intr(adapter); - ixv_set_multi(adapter); - ixv_enable_intr(adapter); - IXV_CORE_UNLOCK(adapter); - } - break; - case SIOCSIFMEDIA: - case SIOCGIFMEDIA: - IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)"); - error = ifmedia_ioctl(ifp, ifr, &adapter->media, command); - break; - case SIOCSIFCAP: - { - int mask = ifr->ifr_reqcap ^ ifp->if_capenable; - IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)"); - if (mask & IFCAP_HWCSUM) - ifp->if_capenable ^= IFCAP_HWCSUM; - if (mask & IFCAP_TSO4) - ifp->if_capenable ^= IFCAP_TSO4; - if (mask & IFCAP_LRO) - ifp->if_capenable ^= IFCAP_LRO; - if (mask & IFCAP_VLAN_HWTAGGING) - ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; - if (ifp->if_drv_flags & IFF_DRV_RUNNING) { - IXV_CORE_LOCK(adapter); - ixv_init_locked(adapter); - IXV_CORE_UNLOCK(adapter); - } - VLAN_CAPABILITIES(ifp); - break; - } - - default: - IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)\n", (int)command); - error = ether_ioctl(ifp, command, data); - break; - } - - return (error); -} - -/********************************************************************* - * Init entry point - * - * This routine is used in two ways. It is used by the stack as - * init entry point in network interface structure. It is also used - * by the driver as a hw/sw initialization routine to get to a - * consistent state. - * - * return 0 on success, positive on failure - **********************************************************************/ -#define IXGBE_MHADD_MFS_SHIFT 16 - -static void -ixv_init_locked(struct adapter *adapter) -{ - struct ifnet *ifp = adapter->ifp; - device_t dev = adapter->dev; - struct ixgbe_hw *hw = &adapter->hw; - u32 mhadd, gpie; - - INIT_DEBUGOUT("ixv_init: begin"); - mtx_assert(&adapter->core_mtx, MA_OWNED); - hw->adapter_stopped = FALSE; - ixgbe_stop_adapter(hw); - callout_stop(&adapter->timer); - - /* reprogram the RAR[0] in case user changed it. */ - ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV); - - /* Get the latest mac address, User can use a LAA */ - bcopy(IF_LLADDR(adapter->ifp), hw->mac.addr, - IXGBE_ETH_LENGTH_OF_ADDRESS); - ixgbe_set_rar(hw, 0, hw->mac.addr, 0, 1); - hw->addr_ctrl.rar_used_count = 1; - - /* Prepare transmit descriptors and buffers */ - if (ixv_setup_transmit_structures(adapter)) { - device_printf(dev,"Could not setup transmit structures\n"); - ixv_stop(adapter); - return; - } - - ixgbe_reset_hw(hw); - ixv_initialize_transmit_units(adapter); - - /* Setup Multicast table */ - ixv_set_multi(adapter); - - /* - ** Determine the correct mbuf pool - ** for doing jumbo/headersplit - */ - if (ifp->if_mtu > ETHERMTU) - adapter->rx_mbuf_sz = MJUMPAGESIZE; - else - adapter->rx_mbuf_sz = MCLBYTES; - - /* Prepare receive descriptors and buffers */ - if (ixv_setup_receive_structures(adapter)) { - device_printf(dev,"Could not setup receive structures\n"); - ixv_stop(adapter); - return; - } - - /* Configure RX settings */ - ixv_initialize_receive_units(adapter); - - /* Enable Enhanced MSIX mode */ - gpie = IXGBE_READ_REG(&adapter->hw, IXGBE_GPIE); - gpie |= IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_EIAME; - gpie |= IXGBE_GPIE_PBA_SUPPORT | IXGBE_GPIE_OCD; - IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie); - - /* Set the various hardware offload abilities */ - ifp->if_hwassist = 0; - if (ifp->if_capenable & IFCAP_TSO4) - ifp->if_hwassist |= CSUM_TSO; - if (ifp->if_capenable & IFCAP_TXCSUM) { - ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP); -#if __FreeBSD_version >= 800000 - ifp->if_hwassist |= CSUM_SCTP; -#endif - } - - /* Set MTU size */ - if (ifp->if_mtu > ETHERMTU) { - mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD); - mhadd &= ~IXGBE_MHADD_MFS_MASK; - mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT; - IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd); - } - - /* Set up VLAN offload and filter */ - ixv_setup_vlan_support(adapter); - - callout_reset(&adapter->timer, hz, ixv_local_timer, adapter); - - /* Set up MSI/X routing */ - ixv_configure_ivars(adapter); - - /* Set up auto-mask */ - IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, IXGBE_EICS_RTX_QUEUE); - - /* Set moderation on the Link interrupt */ - IXGBE_WRITE_REG(hw, IXGBE_VTEITR(adapter->mbxvec), IXV_LINK_ITR); - - /* Stats init */ - ixv_init_stats(adapter); - - /* Config/Enable Link */ - ixv_config_link(adapter); - - /* And now turn on interrupts */ - ixv_enable_intr(adapter); - - /* Now inform the stack we're ready */ - ifp->if_drv_flags |= IFF_DRV_RUNNING; - ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; - - return; -} - -static void -ixv_init(void *arg) -{ - struct adapter *adapter = arg; - - IXV_CORE_LOCK(adapter); - ixv_init_locked(adapter); - IXV_CORE_UNLOCK(adapter); - return; -} - - -/* -** -** MSIX Interrupt Handlers and Tasklets -** -*/ - -static inline void -ixv_enable_queue(struct adapter *adapter, u32 vector) -{ - struct ixgbe_hw *hw = &adapter->hw; - u32 queue = 1 << vector; - u32 mask; - - mask = (IXGBE_EIMS_RTX_QUEUE & queue); - IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask); -} - -static inline void -ixv_disable_queue(struct adapter *adapter, u32 vector) -{ - struct ixgbe_hw *hw = &adapter->hw; - u64 queue = (u64)(1 << vector); - u32 mask; - - mask = (IXGBE_EIMS_RTX_QUEUE & queue); - IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, mask); -} - -static inline void -ixv_rearm_queues(struct adapter *adapter, u64 queues) -{ - u32 mask = (IXGBE_EIMS_RTX_QUEUE & queues); - IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEICS, mask); -} - - -static void -ixv_handle_que(void *context, int pending) -{ - struct ix_queue *que = context; - struct adapter *adapter = que->adapter; - struct tx_ring *txr = que->txr; - struct ifnet *ifp = adapter->ifp; - bool more; - - if (ifp->if_drv_flags & IFF_DRV_RUNNING) { - more = ixv_rxeof(que, adapter->rx_process_limit); - IXV_TX_LOCK(txr); - ixv_txeof(txr); -#if __FreeBSD_version >= 800000 - if (!drbr_empty(ifp, txr->br)) - ixv_mq_start_locked(ifp, txr, NULL); -#else - if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) - ixv_start_locked(txr, ifp); -#endif - IXV_TX_UNLOCK(txr); - if (more) { - taskqueue_enqueue(que->tq, &que->que_task); - return; - } - } - - /* Reenable this interrupt */ - ixv_enable_queue(adapter, que->msix); - return; -} - -/********************************************************************* - * - * MSI Queue Interrupt Service routine - * - **********************************************************************/ -void -ixv_msix_que(void *arg) -{ - struct ix_queue *que = arg; - struct adapter *adapter = que->adapter; - struct tx_ring *txr = que->txr; - struct rx_ring *rxr = que->rxr; - bool more_tx, more_rx; - u32 newitr = 0; - - ixv_disable_queue(adapter, que->msix); - ++que->irqs; - - more_rx = ixv_rxeof(que, adapter->rx_process_limit); - - IXV_TX_LOCK(txr); - more_tx = ixv_txeof(txr); - /* - ** Make certain that if the stack - ** has anything queued the task gets - ** scheduled to handle it. - */ -#if __FreeBSD_version < 800000 - if (!IFQ_DRV_IS_EMPTY(&adapter->ifp->if_snd)) -#else - if (!drbr_empty(adapter->ifp, txr->br)) -#endif - more_tx = 1; - IXV_TX_UNLOCK(txr); - - more_rx = ixv_rxeof(que, adapter->rx_process_limit); - - /* Do AIM now? */ - - if (ixv_enable_aim == FALSE) - goto no_calc; - /* - ** Do Adaptive Interrupt Moderation: - ** - Write out last calculated setting - ** - Calculate based on average size over - ** the last interval. - */ - if (que->eitr_setting) - IXGBE_WRITE_REG(&adapter->hw, - IXGBE_VTEITR(que->msix), - que->eitr_setting); - - que->eitr_setting = 0; - - /* Idle, do nothing */ - if ((txr->bytes == 0) && (rxr->bytes == 0)) - goto no_calc; - - if ((txr->bytes) && (txr->packets)) - newitr = txr->bytes/txr->packets; - if ((rxr->bytes) && (rxr->packets)) - newitr = max(newitr, - (rxr->bytes / rxr->packets)); - newitr += 24; /* account for hardware frame, crc */ - - /* set an upper boundary */ - newitr = min(newitr, 3000); - - /* Be nice to the mid range */ - if ((newitr > 300) && (newitr < 1200)) - newitr = (newitr / 3); - else - newitr = (newitr / 2); - - newitr |= newitr << 16; - - /* save for next interrupt */ - que->eitr_setting = newitr; - - /* Reset state */ - txr->bytes = 0; - txr->packets = 0; - rxr->bytes = 0; - rxr->packets = 0; - -no_calc: - if (more_tx || more_rx) - taskqueue_enqueue(que->tq, &que->que_task); - else /* Reenable this interrupt */ - ixv_enable_queue(adapter, que->msix); - return; -} - -static void -ixv_msix_mbx(void *arg) -{ - struct adapter *adapter = arg; - struct ixgbe_hw *hw = &adapter->hw; - u32 reg; - - ++adapter->mbx_irq; - - /* First get the cause */ - reg = IXGBE_READ_REG(hw, IXGBE_VTEICS); - /* Clear interrupt with write */ - IXGBE_WRITE_REG(hw, IXGBE_VTEICR, reg); - - /* Link status change */ - if (reg & IXGBE_EICR_LSC) - taskqueue_enqueue(adapter->tq, &adapter->mbx_task); - - IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, IXGBE_EIMS_OTHER); - return; -} - -/********************************************************************* - * - * Media Ioctl callback - * - * This routine is called whenever the user queries the status of - * the interface using ifconfig. - * - **********************************************************************/ -static void -ixv_media_status(struct ifnet * ifp, struct ifmediareq * ifmr) -{ - struct adapter *adapter = ifp->if_softc; - - INIT_DEBUGOUT("ixv_media_status: begin"); - IXV_CORE_LOCK(adapter); - ixv_update_link_status(adapter); - - ifmr->ifm_status = IFM_AVALID; - ifmr->ifm_active = IFM_ETHER; - - if (!adapter->link_active) { - IXV_CORE_UNLOCK(adapter); - return; - } - - ifmr->ifm_status |= IFM_ACTIVE; - - switch (adapter->link_speed) { - case IXGBE_LINK_SPEED_1GB_FULL: - ifmr->ifm_active |= IFM_1000_T | IFM_FDX; - break; - case IXGBE_LINK_SPEED_10GB_FULL: - ifmr->ifm_active |= IFM_FDX; - break; - } - - IXV_CORE_UNLOCK(adapter); - - return; -} - -/********************************************************************* - * - * Media Ioctl callback - * - * This routine is called when the user changes speed/duplex using - * media/mediopt option with ifconfig. - * - **********************************************************************/ -static int -ixv_media_change(struct ifnet * ifp) -{ - struct adapter *adapter = ifp->if_softc; - struct ifmedia *ifm = &adapter->media; - - INIT_DEBUGOUT("ixv_media_change: begin"); - - if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) - return (EINVAL); - - switch (IFM_SUBTYPE(ifm->ifm_media)) { - case IFM_AUTO: - break; - default: - device_printf(adapter->dev, "Only auto media type\n"); - return (EINVAL); - } - - return (0); -} - -/********************************************************************* - * - * This routine maps the mbufs to tx descriptors, allowing the - * TX engine to transmit the packets. - * - return 0 on success, positive on failure - * - **********************************************************************/ - -static int -ixv_xmit(struct tx_ring *txr, struct mbuf **m_headp) -{ - struct adapter *adapter = txr->adapter; - u32 olinfo_status = 0, cmd_type_len; - u32 paylen = 0; - int i, j, error, nsegs; - int first, last = 0; - struct mbuf *m_head; - bus_dma_segment_t segs[32]; - bus_dmamap_t map; - struct ixv_tx_buf *txbuf, *txbuf_mapped; - union ixgbe_adv_tx_desc *txd = NULL; - - m_head = *m_headp; - - /* Basic descriptor defines */ - cmd_type_len = (IXGBE_ADVTXD_DTYP_DATA | - IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT); - - if (m_head->m_flags & M_VLANTAG) - cmd_type_len |= IXGBE_ADVTXD_DCMD_VLE; - - /* - * Important to capture the first descriptor - * used because it will contain the index of - * the one we tell the hardware to report back - */ - first = txr->next_avail_desc; - txbuf = &txr->tx_buffers[first]; - txbuf_mapped = txbuf; - map = txbuf->map; - - /* - * Map the packet for DMA. - */ - error = bus_dmamap_load_mbuf_sg(txr->txtag, map, - *m_headp, segs, &nsegs, BUS_DMA_NOWAIT); - - if (error == EFBIG) { - struct mbuf *m; - - m = m_defrag(*m_headp, M_NOWAIT); - if (m == NULL) { - adapter->mbuf_defrag_failed++; - m_freem(*m_headp); - *m_headp = NULL; - return (ENOBUFS); - } - *m_headp = m; - - /* Try it again */ - error = bus_dmamap_load_mbuf_sg(txr->txtag, map, - *m_headp, segs, &nsegs, BUS_DMA_NOWAIT); - - if (error == ENOMEM) { - adapter->no_tx_dma_setup++; - return (error); - } else if (error != 0) { - adapter->no_tx_dma_setup++; - m_freem(*m_headp); - *m_headp = NULL; - return (error); - } - } else if (error == ENOMEM) { - adapter->no_tx_dma_setup++; - return (error); - } else if (error != 0) { - adapter->no_tx_dma_setup++; - m_freem(*m_headp); - *m_headp = NULL; - return (error); - } - - /* Make certain there are enough descriptors */ - if (nsegs > txr->tx_avail - 2) { - txr->no_desc_avail++; - error = ENOBUFS; - goto xmit_fail; - } - m_head = *m_headp; - - /* - ** Set up the appropriate offload context - ** this becomes the first descriptor of - ** a packet. - */ - if (m_head->m_pkthdr.csum_flags & CSUM_TSO) { - if (ixv_tso_setup(txr, m_head, &paylen)) { - cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE; - olinfo_status |= IXGBE_TXD_POPTS_IXSM << 8; - olinfo_status |= IXGBE_TXD_POPTS_TXSM << 8; - olinfo_status |= paylen << IXGBE_ADVTXD_PAYLEN_SHIFT; - ++adapter->tso_tx; - } else - return (ENXIO); - } else if (ixv_tx_ctx_setup(txr, m_head)) - olinfo_status |= IXGBE_TXD_POPTS_TXSM << 8; - - /* Record payload length */ - if (paylen == 0) - olinfo_status |= m_head->m_pkthdr.len << - IXGBE_ADVTXD_PAYLEN_SHIFT; - - i = txr->next_avail_desc; - for (j = 0; j < nsegs; j++) { - bus_size_t seglen; - bus_addr_t segaddr; - - txbuf = &txr->tx_buffers[i]; - txd = &txr->tx_base[i]; - seglen = segs[j].ds_len; - segaddr = htole64(segs[j].ds_addr); - - txd->read.buffer_addr = segaddr; - txd->read.cmd_type_len = htole32(txr->txd_cmd | - cmd_type_len |seglen); - txd->read.olinfo_status = htole32(olinfo_status); - last = i; /* descriptor that will get completion IRQ */ - - if (++i == adapter->num_tx_desc) - i = 0; - - txbuf->m_head = NULL; - txbuf->eop_index = -1; - } - - txd->read.cmd_type_len |= - htole32(IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS); - txr->tx_avail -= nsegs; - txr->next_avail_desc = i; - - txbuf->m_head = m_head; - txr->tx_buffers[first].map = txbuf->map; - txbuf->map = map; - bus_dmamap_sync(txr->txtag, map, BUS_DMASYNC_PREWRITE); - - /* Set the index of the descriptor that will be marked done */ - txbuf = &txr->tx_buffers[first]; - txbuf->eop_index = last; - - bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map, - BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); - /* - * Advance the Transmit Descriptor Tail (Tdt), this tells the - * hardware that this frame is available to transmit. - */ - ++txr->total_packets; - IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDT(txr->me), i); - - return (0); - -xmit_fail: - bus_dmamap_unload(txr->txtag, txbuf->map); - return (error); - -} - - -/********************************************************************* - * Multicast Update - * - * This routine is called whenever multicast address list is updated. - * - **********************************************************************/ -#define IXGBE_RAR_ENTRIES 16 - -static void -ixv_set_multi(struct adapter *adapter) -{ - u8 mta[MAX_NUM_MULTICAST_ADDRESSES * IXGBE_ETH_LENGTH_OF_ADDRESS]; - u8 *update_ptr; - struct ifmultiaddr *ifma; - int mcnt = 0; - struct ifnet *ifp = adapter->ifp; - - IOCTL_DEBUGOUT("ixv_set_multi: begin"); - -#if __FreeBSD_version < 800000 - IF_ADDR_LOCK(ifp); -#else - if_maddr_rlock(ifp); -#endif - TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { - if (ifma->ifma_addr->sa_family != AF_LINK) - continue; - bcopy(LLADDR((struct sockaddr_dl *) ifma->ifma_addr), - &mta[mcnt * IXGBE_ETH_LENGTH_OF_ADDRESS], - IXGBE_ETH_LENGTH_OF_ADDRESS); - mcnt++; - } -#if __FreeBSD_version < 800000 - IF_ADDR_UNLOCK(ifp); -#else - if_maddr_runlock(ifp); -#endif - - update_ptr = mta; - - ixgbe_update_mc_addr_list(&adapter->hw, - update_ptr, mcnt, ixv_mc_array_itr, TRUE); - - return; -} - -/* - * This is an iterator function now needed by the multicast - * shared code. It simply feeds the shared code routine the - * addresses in the array of ixv_set_multi() one by one. - */ -static u8 * -ixv_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq) -{ - u8 *addr = *update_ptr; - u8 *newptr; - *vmdq = 0; - - newptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS; - *update_ptr = newptr; - return addr; -} - -/********************************************************************* - * Timer routine - * - * This routine checks for link status,updates statistics, - * and runs the watchdog check. - * - **********************************************************************/ - -static void -ixv_local_timer(void *arg) -{ - struct adapter *adapter = arg; - device_t dev = adapter->dev; - struct tx_ring *txr = adapter->tx_rings; - int i; - - mtx_assert(&adapter->core_mtx, MA_OWNED); - - ixv_update_link_status(adapter); - - /* Stats Update */ - ixv_update_stats(adapter); - - /* - * If the interface has been paused - * then don't do the watchdog check - */ - if (IXGBE_READ_REG(&adapter->hw, IXGBE_TFCS) & IXGBE_TFCS_TXOFF) - goto out; - /* - ** Check for time since any descriptor was cleaned - */ - for (i = 0; i < adapter->num_queues; i++, txr++) { - IXV_TX_LOCK(txr); - if (txr->watchdog_check == FALSE) { - IXV_TX_UNLOCK(txr); - continue; - } - if ((ticks - txr->watchdog_time) > IXV_WATCHDOG) - goto hung; - IXV_TX_UNLOCK(txr); - } -out: - ixv_rearm_queues(adapter, adapter->que_mask); - callout_reset(&adapter->timer, hz, ixv_local_timer, adapter); - return; - -hung: - device_printf(adapter->dev, "Watchdog timeout -- resetting\n"); - device_printf(dev,"Queue(%d) tdh = %d, hw tdt = %d\n", txr->me, - IXGBE_READ_REG(&adapter->hw, IXGBE_VFTDH(i)), - IXGBE_READ_REG(&adapter->hw, IXGBE_VFTDT(i))); - device_printf(dev,"TX(%d) desc avail = %d," - "Next TX to Clean = %d\n", - txr->me, txr->tx_avail, txr->next_to_clean); - adapter->ifp->if_drv_flags &= ~IFF_DRV_RUNNING; - adapter->watchdog_events++; - IXV_TX_UNLOCK(txr); - ixv_init_locked(adapter); -} - -/* -** Note: this routine updates the OS on the link state -** the real check of the hardware only happens with -** a link interrupt. -*/ -static void -ixv_update_link_status(struct adapter *adapter) -{ - struct ifnet *ifp = adapter->ifp; - struct tx_ring *txr = adapter->tx_rings; - device_t dev = adapter->dev; - - - if (adapter->link_up){ - if (adapter->link_active == FALSE) { - if (bootverbose) - device_printf(dev,"Link is up %d Gbps %s \n", - ((adapter->link_speed == 128)? 10:1), - "Full Duplex"); - adapter->link_active = TRUE; - if_link_state_change(ifp, LINK_STATE_UP); - } - } else { /* Link down */ - if (adapter->link_active == TRUE) { - if (bootverbose) - device_printf(dev,"Link is Down\n"); - if_link_state_change(ifp, LINK_STATE_DOWN); - adapter->link_active = FALSE; - for (int i = 0; i < adapter->num_queues; - i++, txr++) - txr->watchdog_check = FALSE; - } - } - - return; -} - - -/********************************************************************* - * - * This routine disables all traffic on the adapter by issuing a - * global reset on the MAC and deallocates TX/RX buffers. - * - **********************************************************************/ - -static void -ixv_stop(void *arg) -{ - struct ifnet *ifp; - struct adapter *adapter = arg; - struct ixgbe_hw *hw = &adapter->hw; - ifp = adapter->ifp; - - mtx_assert(&adapter->core_mtx, MA_OWNED); - - INIT_DEBUGOUT("ixv_stop: begin\n"); - ixv_disable_intr(adapter); - - /* Tell the stack that the interface is no longer active */ - ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); - - ixgbe_reset_hw(hw); - adapter->hw.adapter_stopped = FALSE; - ixgbe_stop_adapter(hw); - callout_stop(&adapter->timer); - - /* reprogram the RAR[0] in case user changed it. */ - ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV); - - return; -} - - -/********************************************************************* - * - * Determine hardware revision. - * - **********************************************************************/ -static void -ixv_identify_hardware(struct adapter *adapter) -{ - device_t dev = adapter->dev; - u16 pci_cmd_word; - - /* - ** Make sure BUSMASTER is set, on a VM under - ** KVM it may not be and will break things. - */ - pci_enable_busmaster(dev); - pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2); - - /* Save off the information about this board */ - adapter->hw.vendor_id = pci_get_vendor(dev); - adapter->hw.device_id = pci_get_device(dev); - adapter->hw.revision_id = pci_read_config(dev, PCIR_REVID, 1); - adapter->hw.subsystem_vendor_id = - pci_read_config(dev, PCIR_SUBVEND_0, 2); - adapter->hw.subsystem_device_id = - pci_read_config(dev, PCIR_SUBDEV_0, 2); - - return; -} - -/********************************************************************* - * - * Setup MSIX Interrupt resources and handlers - * - **********************************************************************/ -static int -ixv_allocate_msix(struct adapter *adapter) -{ - device_t dev = adapter->dev; - struct ix_queue *que = adapter->queues; - int error, rid, vector = 0; - - for (int i = 0; i < adapter->num_queues; i++, vector++, que++) { - rid = vector + 1; - que->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, - RF_SHAREABLE | RF_ACTIVE); - if (que->res == NULL) { - device_printf(dev,"Unable to allocate" - " bus resource: que interrupt [%d]\n", vector); - return (ENXIO); - } - /* Set the handler function */ - error = bus_setup_intr(dev, que->res, - INTR_TYPE_NET | INTR_MPSAFE, NULL, - ixv_msix_que, que, &que->tag); - if (error) { - que->res = NULL; - device_printf(dev, "Failed to register QUE handler"); - return (error); - } -#if __FreeBSD_version >= 800504 - bus_describe_intr(dev, que->res, que->tag, "que %d", i); -#endif - que->msix = vector; - adapter->que_mask |= (u64)(1 << que->msix); - /* - ** Bind the msix vector, and thus the - ** ring to the corresponding cpu. - */ - if (adapter->num_queues > 1) - bus_bind_intr(dev, que->res, i); - - TASK_INIT(&que->que_task, 0, ixv_handle_que, que); - que->tq = taskqueue_create_fast("ixv_que", M_NOWAIT, - taskqueue_thread_enqueue, &que->tq); - taskqueue_start_threads(&que->tq, 1, PI_NET, "%s que", - device_get_nameunit(adapter->dev)); - } - - /* and Mailbox */ - rid = vector + 1; - adapter->res = bus_alloc_resource_any(dev, - SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE); - if (!adapter->res) { - device_printf(dev,"Unable to allocate" - " bus resource: MBX interrupt [%d]\n", rid); - return (ENXIO); - } - /* Set the mbx handler function */ - error = bus_setup_intr(dev, adapter->res, - INTR_TYPE_NET | INTR_MPSAFE, NULL, - ixv_msix_mbx, adapter, &adapter->tag); - if (error) { - adapter->res = NULL; - device_printf(dev, "Failed to register LINK handler"); - return (error); - } -#if __FreeBSD_version >= 800504 - bus_describe_intr(dev, adapter->res, adapter->tag, "mbx"); -#endif - adapter->mbxvec = vector; - /* Tasklets for Mailbox */ - TASK_INIT(&adapter->mbx_task, 0, ixv_handle_mbx, adapter); - adapter->tq = taskqueue_create_fast("ixv_mbx", M_NOWAIT, - taskqueue_thread_enqueue, &adapter->tq); - taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s mbxq", - device_get_nameunit(adapter->dev)); - /* - ** Due to a broken design QEMU will fail to properly - ** enable the guest for MSIX unless the vectors in - ** the table are all set up, so we must rewrite the - ** ENABLE in the MSIX control register again at this - ** point to cause it to successfully initialize us. - */ - if (adapter->hw.mac.type == ixgbe_mac_82599_vf) { - int msix_ctrl; - pci_find_cap(dev, PCIY_MSIX, &rid); - rid += PCIR_MSIX_CTRL; - msix_ctrl = pci_read_config(dev, rid, 2); - msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE; - pci_write_config(dev, rid, msix_ctrl, 2); - } - - return (0); -} - -/* - * Setup MSIX resources, note that the VF - * device MUST use MSIX, there is no fallback. - */ -static int -ixv_setup_msix(struct adapter *adapter) -{ - device_t dev = adapter->dev; - int rid, want; - - - /* First try MSI/X */ - rid = PCIR_BAR(3); - adapter->msix_mem = bus_alloc_resource_any(dev, - SYS_RES_MEMORY, &rid, RF_ACTIVE); - if (adapter->msix_mem == NULL) { - device_printf(adapter->dev, - "Unable to map MSIX table \n"); - goto out; - } - - /* - ** Want two vectors: one for a queue, - ** plus an additional for mailbox. - */ - want = 2; - if ((pci_alloc_msix(dev, &want) == 0) && (want == 2)) { - device_printf(adapter->dev, - "Using MSIX interrupts with %d vectors\n", want); - return (want); - } - /* Release in case alloc was insufficient */ - pci_release_msi(dev); -out: - if (adapter->msix_mem != NULL) { - bus_release_resource(dev, SYS_RES_MEMORY, - rid, adapter->msix_mem); - adapter->msix_mem = NULL; - } - device_printf(adapter->dev,"MSIX config error\n"); - return (ENXIO); -} - - -static int -ixv_allocate_pci_resources(struct adapter *adapter) -{ - int rid; - device_t dev = adapter->dev; - - rid = PCIR_BAR(0); - adapter->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, - &rid, RF_ACTIVE); - - if (!(adapter->pci_mem)) { - device_printf(dev,"Unable to allocate bus resource: memory\n"); - return (ENXIO); - } - - adapter->osdep.mem_bus_space_tag = - rman_get_bustag(adapter->pci_mem); - adapter->osdep.mem_bus_space_handle = - rman_get_bushandle(adapter->pci_mem); - adapter->hw.hw_addr = (u8 *) &adapter->osdep.mem_bus_space_handle; - - adapter->num_queues = 1; - adapter->hw.back = &adapter->osdep; - - /* - ** Now setup MSI/X, should - ** return us the number of - ** configured vectors. - */ - adapter->msix = ixv_setup_msix(adapter); - if (adapter->msix == ENXIO) - return (ENXIO); - else - return (0); -} - -static void -ixv_free_pci_resources(struct adapter * adapter) -{ - struct ix_queue *que = adapter->queues; - device_t dev = adapter->dev; - int rid, memrid; - - memrid = PCIR_BAR(MSIX_BAR); - - /* - ** There is a slight possibility of a failure mode - ** in attach that will result in entering this function - ** before interrupt resources have been initialized, and - ** in that case we do not want to execute the loops below - ** We can detect this reliably by the state of the adapter - ** res pointer. - */ - if (adapter->res == NULL) - goto mem; - - /* - ** Release all msix queue resources: - */ - for (int i = 0; i < adapter->num_queues; i++, que++) { - rid = que->msix + 1; - if (que->tag != NULL) { - bus_teardown_intr(dev, que->res, que->tag); - que->tag = NULL; - } - if (que->res != NULL) - bus_release_resource(dev, SYS_RES_IRQ, rid, que->res); - } - - - /* Clean the Legacy or Link interrupt last */ - if (adapter->mbxvec) /* we are doing MSIX */ - rid = adapter->mbxvec + 1; - else - (adapter->msix != 0) ? (rid = 1):(rid = 0); - - if (adapter->tag != NULL) { - bus_teardown_intr(dev, adapter->res, adapter->tag); - adapter->tag = NULL; - } - if (adapter->res != NULL) - bus_release_resource(dev, SYS_RES_IRQ, rid, adapter->res); - -mem: - if (adapter->msix) - pci_release_msi(dev); - - if (adapter->msix_mem != NULL) - bus_release_resource(dev, SYS_RES_MEMORY, - memrid, adapter->msix_mem); - - if (adapter->pci_mem != NULL) - bus_release_resource(dev, SYS_RES_MEMORY, - PCIR_BAR(0), adapter->pci_mem); - - return; -} - -/********************************************************************* - * - * Setup networking device structure and register an interface. - * - **********************************************************************/ -static void -ixv_setup_interface(device_t dev, struct adapter *adapter) -{ - struct ifnet *ifp; - - INIT_DEBUGOUT("ixv_setup_interface: begin"); - - ifp = adapter->ifp = if_alloc(IFT_ETHER); - if (ifp == NULL) - panic("%s: can not if_alloc()\n", device_get_nameunit(dev)); - if_initname(ifp, device_get_name(dev), device_get_unit(dev)); - ifp->if_baudrate = 1000000000; - ifp->if_init = ixv_init; - ifp->if_softc = adapter; - ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; - ifp->if_ioctl = ixv_ioctl; -#if __FreeBSD_version >= 800000 - ifp->if_transmit = ixv_mq_start; - ifp->if_qflush = ixv_qflush; -#else - ifp->if_start = ixv_start; -#endif - ifp->if_snd.ifq_maxlen = adapter->num_tx_desc - 2; - - ether_ifattach(ifp, adapter->hw.mac.addr); - - adapter->max_frame_size = - ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN; - - /* - * Tell the upper layer(s) we support long frames. - */ - ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); - - ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_TSO4 | IFCAP_VLAN_HWCSUM; - ifp->if_capabilities |= IFCAP_JUMBO_MTU; - ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING - | IFCAP_VLAN_HWTSO - | IFCAP_VLAN_MTU; - ifp->if_capenable = ifp->if_capabilities; - - /* Don't enable LRO by default */ - ifp->if_capabilities |= IFCAP_LRO; - - /* - * Specify the media types supported by this adapter and register - * callbacks to update media and link information - */ - ifmedia_init(&adapter->media, IFM_IMASK, ixv_media_change, - ixv_media_status); - ifmedia_add(&adapter->media, IFM_ETHER | IFM_FDX, 0, NULL); - ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL); - ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO); - - return; -} - -static void -ixv_config_link(struct adapter *adapter) -{ - struct ixgbe_hw *hw = &adapter->hw; - u32 autoneg, err = 0; - - if (hw->mac.ops.check_link) - err = hw->mac.ops.check_link(hw, &autoneg, - &adapter->link_up, FALSE); - if (err) - goto out; - - if (hw->mac.ops.setup_link) - err = hw->mac.ops.setup_link(hw, - autoneg, adapter->link_up); -out: - return; -} - -/******************************************************************** - * Manage DMA'able memory. - *******************************************************************/ -static void -ixv_dmamap_cb(void *arg, bus_dma_segment_t * segs, int nseg, int error) -{ - if (error) - return; - *(bus_addr_t *) arg = segs->ds_addr; - return; -} - -static int -ixv_dma_malloc(struct adapter *adapter, bus_size_t size, - struct ixv_dma_alloc *dma, int mapflags) -{ - device_t dev = adapter->dev; - int r; - - r = bus_dma_tag_create(bus_get_dma_tag(adapter->dev), /* parent */ - DBA_ALIGN, 0, /* alignment, bounds */ - BUS_SPACE_MAXADDR, /* lowaddr */ - BUS_SPACE_MAXADDR, /* highaddr */ - NULL, NULL, /* filter, filterarg */ - size, /* maxsize */ - 1, /* nsegments */ - size, /* maxsegsize */ - BUS_DMA_ALLOCNOW, /* flags */ - NULL, /* lockfunc */ - NULL, /* lockfuncarg */ - &dma->dma_tag); - if (r != 0) { - device_printf(dev,"ixv_dma_malloc: bus_dma_tag_create failed; " - "error %u\n", r); - goto fail_0; - } - r = bus_dmamem_alloc(dma->dma_tag, (void **)&dma->dma_vaddr, - BUS_DMA_NOWAIT, &dma->dma_map); - if (r != 0) { - device_printf(dev,"ixv_dma_malloc: bus_dmamem_alloc failed; " - "error %u\n", r); - goto fail_1; - } - r = bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr, - size, - ixv_dmamap_cb, - &dma->dma_paddr, - mapflags | BUS_DMA_NOWAIT); - if (r != 0) { - device_printf(dev,"ixv_dma_malloc: bus_dmamap_load failed; " - "error %u\n", r); - goto fail_2; - } - dma->dma_size = size; - return (0); -fail_2: - bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map); -fail_1: - bus_dma_tag_destroy(dma->dma_tag); -fail_0: - dma->dma_map = NULL; - dma->dma_tag = NULL; - return (r); -} - -static void -ixv_dma_free(struct adapter *adapter, struct ixv_dma_alloc *dma) -{ - bus_dmamap_sync(dma->dma_tag, dma->dma_map, - BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); - bus_dmamap_unload(dma->dma_tag, dma->dma_map); - bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map); - bus_dma_tag_destroy(dma->dma_tag); -} - - -/********************************************************************* - * - * Allocate memory for the transmit and receive rings, and then - * the descriptors associated with each, called only once at attach. - * - **********************************************************************/ -static int -ixv_allocate_queues(struct adapter *adapter) -{ - device_t dev = adapter->dev; - struct ix_queue *que; - struct tx_ring *txr; - struct rx_ring *rxr; - int rsize, tsize, error = 0; - int txconf = 0, rxconf = 0; - - /* First allocate the top level queue structs */ - if (!(adapter->queues = - (struct ix_queue *) malloc(sizeof(struct ix_queue) * - adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) { - device_printf(dev, "Unable to allocate queue memory\n"); - error = ENOMEM; - goto fail; - } - - /* First allocate the TX ring struct memory */ - if (!(adapter->tx_rings = - (struct tx_ring *) malloc(sizeof(struct tx_ring) * - adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) { - device_printf(dev, "Unable to allocate TX ring memory\n"); - error = ENOMEM; - goto tx_fail; - } - - /* Next allocate the RX */ - if (!(adapter->rx_rings = - (struct rx_ring *) malloc(sizeof(struct rx_ring) * - adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) { - device_printf(dev, "Unable to allocate RX ring memory\n"); - error = ENOMEM; - goto rx_fail; - } - - /* For the ring itself */ - tsize = roundup2(adapter->num_tx_desc * - sizeof(union ixgbe_adv_tx_desc), DBA_ALIGN); - - /* - * Now set up the TX queues, txconf is needed to handle the - * possibility that things fail midcourse and we need to - * undo memory gracefully - */ - for (int i = 0; i < adapter->num_queues; i++, txconf++) { - /* Set up some basics */ - txr = &adapter->tx_rings[i]; - txr->adapter = adapter; - txr->me = i; - - /* Initialize the TX side lock */ - snprintf(txr->mtx_name, sizeof(txr->mtx_name), "%s:tx(%d)", - device_get_nameunit(dev), txr->me); - mtx_init(&txr->tx_mtx, txr->mtx_name, NULL, MTX_DEF); - - if (ixv_dma_malloc(adapter, tsize, - &txr->txdma, BUS_DMA_NOWAIT)) { - device_printf(dev, - "Unable to allocate TX Descriptor memory\n"); - error = ENOMEM; - goto err_tx_desc; - } - txr->tx_base = (union ixgbe_adv_tx_desc *)txr->txdma.dma_vaddr; - bzero((void *)txr->tx_base, tsize); - - /* Now allocate transmit buffers for the ring */ - if (ixv_allocate_transmit_buffers(txr)) { - device_printf(dev, - "Critical Failure setting up transmit buffers\n"); - error = ENOMEM; - goto err_tx_desc; - } -#if __FreeBSD_version >= 800000 - /* Allocate a buf ring */ - txr->br = buf_ring_alloc(IXV_BR_SIZE, M_DEVBUF, - M_WAITOK, &txr->tx_mtx); - if (txr->br == NULL) { - device_printf(dev, - "Critical Failure setting up buf ring\n"); - error = ENOMEM; - goto err_tx_desc; - } -#endif - } - - /* - * Next the RX queues... - */ - rsize = roundup2(adapter->num_rx_desc * - sizeof(union ixgbe_adv_rx_desc), DBA_ALIGN); - for (int i = 0; i < adapter->num_queues; i++, rxconf++) { - rxr = &adapter->rx_rings[i]; - /* Set up some basics */ - rxr->adapter = adapter; - rxr->me = i; - - /* Initialize the RX side lock */ - snprintf(rxr->mtx_name, sizeof(rxr->mtx_name), "%s:rx(%d)", - device_get_nameunit(dev), rxr->me); - mtx_init(&rxr->rx_mtx, rxr->mtx_name, NULL, MTX_DEF); - - if (ixv_dma_malloc(adapter, rsize, - &rxr->rxdma, BUS_DMA_NOWAIT)) { - device_printf(dev, - "Unable to allocate RxDescriptor memory\n"); - error = ENOMEM; - goto err_rx_desc; - } - rxr->rx_base = (union ixgbe_adv_rx_desc *)rxr->rxdma.dma_vaddr; - bzero((void *)rxr->rx_base, rsize); - - /* Allocate receive buffers for the ring*/ - if (ixv_allocate_receive_buffers(rxr)) { - device_printf(dev, - "Critical Failure setting up receive buffers\n"); - error = ENOMEM; - goto err_rx_desc; - } - } - - /* - ** Finally set up the queue holding structs - */ - for (int i = 0; i < adapter->num_queues; i++) { - que = &adapter->queues[i]; - que->adapter = adapter; - que->txr = &adapter->tx_rings[i]; - que->rxr = &adapter->rx_rings[i]; - } - - return (0); - -err_rx_desc: - for (rxr = adapter->rx_rings; rxconf > 0; rxr++, rxconf--) - ixv_dma_free(adapter, &rxr->rxdma); -err_tx_desc: - for (txr = adapter->tx_rings; txconf > 0; txr++, txconf--) - ixv_dma_free(adapter, &txr->txdma); - free(adapter->rx_rings, M_DEVBUF); -rx_fail: - free(adapter->tx_rings, M_DEVBUF); -tx_fail: - free(adapter->queues, M_DEVBUF); -fail: - return (error); -} - - -/********************************************************************* - * - * Allocate memory for tx_buffer structures. The tx_buffer stores all - * the information needed to transmit a packet on the wire. This is - * called only once at attach, setup is done every reset. - * - **********************************************************************/ -static int -ixv_allocate_transmit_buffers(struct tx_ring *txr) -{ - struct adapter *adapter = txr->adapter; - device_t dev = adapter->dev; - struct ixv_tx_buf *txbuf; - int error, i; - - /* - * Setup DMA descriptor areas. - */ - if ((error = bus_dma_tag_create( - bus_get_dma_tag(adapter->dev), /* parent */ - 1, 0, /* alignment, bounds */ - BUS_SPACE_MAXADDR, /* lowaddr */ - BUS_SPACE_MAXADDR, /* highaddr */ - NULL, NULL, /* filter, filterarg */ - IXV_TSO_SIZE, /* maxsize */ - 32, /* nsegments */ - PAGE_SIZE, /* maxsegsize */ - 0, /* flags */ - NULL, /* lockfunc */ - NULL, /* lockfuncarg */ - &txr->txtag))) { - device_printf(dev,"Unable to allocate TX DMA tag\n"); - goto fail; - } - - if (!(txr->tx_buffers = - (struct ixv_tx_buf *) malloc(sizeof(struct ixv_tx_buf) * - adapter->num_tx_desc, M_DEVBUF, M_NOWAIT | M_ZERO))) { - device_printf(dev, "Unable to allocate tx_buffer memory\n"); - error = ENOMEM; - goto fail; - } - - /* Create the descriptor buffer dma maps */ - txbuf = txr->tx_buffers; - for (i = 0; i < adapter->num_tx_desc; i++, txbuf++) { - error = bus_dmamap_create(txr->txtag, 0, &txbuf->map); - if (error != 0) { - device_printf(dev, "Unable to create TX DMA map\n"); - goto fail; - } - } - - return 0; -fail: - /* We free all, it handles case where we are in the middle */ - ixv_free_transmit_structures(adapter); - return (error); -} - -/********************************************************************* - * - * Initialize a transmit ring. - * - **********************************************************************/ -static void -ixv_setup_transmit_ring(struct tx_ring *txr) -{ - struct adapter *adapter = txr->adapter; - struct ixv_tx_buf *txbuf; - int i; - - /* Clear the old ring contents */ - IXV_TX_LOCK(txr); - bzero((void *)txr->tx_base, - (sizeof(union ixgbe_adv_tx_desc)) * adapter->num_tx_desc); - /* Reset indices */ - txr->next_avail_desc = 0; - txr->next_to_clean = 0; - - /* Free any existing tx buffers. */ - txbuf = txr->tx_buffers; - for (i = 0; i < adapter->num_tx_desc; i++, txbuf++) { - if (txbuf->m_head != NULL) { - bus_dmamap_sync(txr->txtag, txbuf->map, - BUS_DMASYNC_POSTWRITE); - bus_dmamap_unload(txr->txtag, txbuf->map); - m_freem(txbuf->m_head); - txbuf->m_head = NULL; - } - /* Clear the EOP index */ - txbuf->eop_index = -1; - } - - /* Set number of descriptors available */ - txr->tx_avail = adapter->num_tx_desc; - - bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map, - BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); - IXV_TX_UNLOCK(txr); -} - -/********************************************************************* - * - * Initialize all transmit rings. - * - **********************************************************************/ -static int -ixv_setup_transmit_structures(struct adapter *adapter) -{ - struct tx_ring *txr = adapter->tx_rings; - - for (int i = 0; i < adapter->num_queues; i++, txr++) - ixv_setup_transmit_ring(txr); - - return (0); -} - -/********************************************************************* - * - * Enable transmit unit. - * - **********************************************************************/ -static void -ixv_initialize_transmit_units(struct adapter *adapter) -{ - struct tx_ring *txr = adapter->tx_rings; - struct ixgbe_hw *hw = &adapter->hw; - - - for (int i = 0; i < adapter->num_queues; i++, txr++) { - u64 tdba = txr->txdma.dma_paddr; - u32 txctrl, txdctl; - - /* Set WTHRESH to 8, burst writeback */ - txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i)); - txdctl |= (8 << 16); - IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl); - /* Now enable */ - txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i)); - txdctl |= IXGBE_TXDCTL_ENABLE; - IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl); - - /* Set the HW Tx Head and Tail indices */ - IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDH(i), 0); - IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDT(i), 0); - - /* Setup Transmit Descriptor Cmd Settings */ - txr->txd_cmd = IXGBE_TXD_CMD_IFCS; - txr->watchdog_check = FALSE; - - /* Set Ring parameters */ - IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(i), - (tdba & 0x00000000ffffffffULL)); - IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(i), (tdba >> 32)); - IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(i), - adapter->num_tx_desc * - sizeof(struct ixgbe_legacy_tx_desc)); - txctrl = IXGBE_READ_REG(hw, IXGBE_VFDCA_TXCTRL(i)); - txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN; - IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(i), txctrl); - break; - } - - return; -} - -/********************************************************************* - * - * Free all transmit rings. - * - **********************************************************************/ -static void -ixv_free_transmit_structures(struct adapter *adapter) -{ - struct tx_ring *txr = adapter->tx_rings; - - for (int i = 0; i < adapter->num_queues; i++, txr++) { - IXV_TX_LOCK(txr); - ixv_free_transmit_buffers(txr); - ixv_dma_free(adapter, &txr->txdma); - IXV_TX_UNLOCK(txr); - IXV_TX_LOCK_DESTROY(txr); - } - free(adapter->tx_rings, M_DEVBUF); -} - -/********************************************************************* - * - * Free transmit ring related data structures. - * - **********************************************************************/ -static void -ixv_free_transmit_buffers(struct tx_ring *txr) -{ - struct adapter *adapter = txr->adapter; - struct ixv_tx_buf *tx_buffer; - int i; - - INIT_DEBUGOUT("free_transmit_ring: begin"); - - if (txr->tx_buffers == NULL) - return; - - tx_buffer = txr->tx_buffers; - for (i = 0; i < adapter->num_tx_desc; i++, tx_buffer++) { - if (tx_buffer->m_head != NULL) { - bus_dmamap_sync(txr->txtag, tx_buffer->map, - BUS_DMASYNC_POSTWRITE); - bus_dmamap_unload(txr->txtag, - tx_buffer->map); - m_freem(tx_buffer->m_head); - tx_buffer->m_head = NULL; - if (tx_buffer->map != NULL) { - bus_dmamap_destroy(txr->txtag, - tx_buffer->map); - tx_buffer->map = NULL; - } - } else if (tx_buffer->map != NULL) { - bus_dmamap_unload(txr->txtag, - tx_buffer->map); - bus_dmamap_destroy(txr->txtag, - tx_buffer->map); - tx_buffer->map = NULL; - } - } -#if __FreeBSD_version >= 800000 - if (txr->br != NULL) - buf_ring_free(txr->br, M_DEVBUF); -#endif - if (txr->tx_buffers != NULL) { - free(txr->tx_buffers, M_DEVBUF); - txr->tx_buffers = NULL; - } - if (txr->txtag != NULL) { - bus_dma_tag_destroy(txr->txtag); - txr->txtag = NULL; - } - return; -} - -/********************************************************************* - * - * Advanced Context Descriptor setup for VLAN or CSUM - * - **********************************************************************/ - -static bool -ixv_tx_ctx_setup(struct tx_ring *txr, struct mbuf *mp) -{ - struct adapter *adapter = txr->adapter; - struct ixgbe_adv_tx_context_desc *TXD; - struct ixv_tx_buf *tx_buffer; - u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0; - struct ether_vlan_header *eh; - struct ip *ip; - struct ip6_hdr *ip6; - int ehdrlen, ip_hlen = 0; - u16 etype; - u8 ipproto = 0; - bool offload = TRUE; - int ctxd = txr->next_avail_desc; - u16 vtag = 0; - - - if ((mp->m_pkthdr.csum_flags & CSUM_OFFLOAD) == 0) - offload = FALSE; - - - tx_buffer = &txr->tx_buffers[ctxd]; - TXD = (struct ixgbe_adv_tx_context_desc *) &txr->tx_base[ctxd]; - - /* - ** In advanced descriptors the vlan tag must - ** be placed into the descriptor itself. - */ - if (mp->m_flags & M_VLANTAG) { - vtag = htole16(mp->m_pkthdr.ether_vtag); - vlan_macip_lens |= (vtag << IXGBE_ADVTXD_VLAN_SHIFT); - } else if (offload == FALSE) - return FALSE; - - /* - * Determine where frame payload starts. - * Jump over vlan headers if already present, - * helpful for QinQ too. - */ - eh = mtod(mp, struct ether_vlan_header *); - if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { - etype = ntohs(eh->evl_proto); - ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; - } else { - etype = ntohs(eh->evl_encap_proto); - ehdrlen = ETHER_HDR_LEN; - } - - /* Set the ether header length */ - vlan_macip_lens |= ehdrlen << IXGBE_ADVTXD_MACLEN_SHIFT; - - switch (etype) { - case ETHERTYPE_IP: - ip = (struct ip *)(mp->m_data + ehdrlen); - ip_hlen = ip->ip_hl << 2; - if (mp->m_len < ehdrlen + ip_hlen) - return (FALSE); - ipproto = ip->ip_p; - type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4; - break; - case ETHERTYPE_IPV6: - ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen); - ip_hlen = sizeof(struct ip6_hdr); - if (mp->m_len < ehdrlen + ip_hlen) - return (FALSE); - ipproto = ip6->ip6_nxt; - type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV6; - break; - default: - offload = FALSE; - break; - } - - vlan_macip_lens |= ip_hlen; - type_tucmd_mlhl |= IXGBE_ADVTXD_DCMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT; - - switch (ipproto) { - case IPPROTO_TCP: - if (mp->m_pkthdr.csum_flags & CSUM_TCP) - type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP; - break; - - case IPPROTO_UDP: - if (mp->m_pkthdr.csum_flags & CSUM_UDP) - type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_UDP; - break; - -#if __FreeBSD_version >= 800000 - case IPPROTO_SCTP: - if (mp->m_pkthdr.csum_flags & CSUM_SCTP) - type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_SCTP; - break; -#endif - default: - offload = FALSE; - break; - } - - /* Now copy bits into descriptor */ - TXD->vlan_macip_lens |= htole32(vlan_macip_lens); - TXD->type_tucmd_mlhl |= htole32(type_tucmd_mlhl); - TXD->seqnum_seed = htole32(0); - TXD->mss_l4len_idx = htole32(0); - - tx_buffer->m_head = NULL; - tx_buffer->eop_index = -1; - - /* We've consumed the first desc, adjust counters */ - if (++ctxd == adapter->num_tx_desc) - ctxd = 0; - txr->next_avail_desc = ctxd; - --txr->tx_avail; - - return (offload); -} - -/********************************************************************** - * - * Setup work for hardware segmentation offload (TSO) on - * adapters using advanced tx descriptors - * - **********************************************************************/ -static bool -ixv_tso_setup(struct tx_ring *txr, struct mbuf *mp, u32 *paylen) -{ - struct adapter *adapter = txr->adapter; - struct ixgbe_adv_tx_context_desc *TXD; - struct ixv_tx_buf *tx_buffer; - u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0; - u32 mss_l4len_idx = 0; - u16 vtag = 0; - int ctxd, ehdrlen, hdrlen, ip_hlen, tcp_hlen; - struct ether_vlan_header *eh; - struct ip *ip; - struct tcphdr *th; - - - /* - * Determine where frame payload starts. - * Jump over vlan headers if already present - */ - eh = mtod(mp, struct ether_vlan_header *); - if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) - ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; - else - ehdrlen = ETHER_HDR_LEN; - - /* Ensure we have at least the IP+TCP header in the first mbuf. */ - if (mp->m_len < ehdrlen + sizeof(struct ip) + sizeof(struct tcphdr)) - return FALSE; - - ctxd = txr->next_avail_desc; - tx_buffer = &txr->tx_buffers[ctxd]; - TXD = (struct ixgbe_adv_tx_context_desc *) &txr->tx_base[ctxd]; - - ip = (struct ip *)(mp->m_data + ehdrlen); - if (ip->ip_p != IPPROTO_TCP) - return FALSE; /* 0 */ - ip->ip_sum = 0; - ip_hlen = ip->ip_hl << 2; - th = (struct tcphdr *)((caddr_t)ip + ip_hlen); - th->th_sum = in_pseudo(ip->ip_src.s_addr, - ip->ip_dst.s_addr, htons(IPPROTO_TCP)); - tcp_hlen = th->th_off << 2; - hdrlen = ehdrlen + ip_hlen + tcp_hlen; - - /* This is used in the transmit desc in encap */ - *paylen = mp->m_pkthdr.len - hdrlen; - - /* VLAN MACLEN IPLEN */ - if (mp->m_flags & M_VLANTAG) { - vtag = htole16(mp->m_pkthdr.ether_vtag); - vlan_macip_lens |= (vtag << IXGBE_ADVTXD_VLAN_SHIFT); - } - - vlan_macip_lens |= ehdrlen << IXGBE_ADVTXD_MACLEN_SHIFT; - vlan_macip_lens |= ip_hlen; - TXD->vlan_macip_lens |= htole32(vlan_macip_lens); - - /* ADV DTYPE TUCMD */ - type_tucmd_mlhl |= IXGBE_ADVTXD_DCMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT; - type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP; - type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4; - TXD->type_tucmd_mlhl |= htole32(type_tucmd_mlhl); - - - /* MSS L4LEN IDX */ - mss_l4len_idx |= (mp->m_pkthdr.tso_segsz << IXGBE_ADVTXD_MSS_SHIFT); - mss_l4len_idx |= (tcp_hlen << IXGBE_ADVTXD_L4LEN_SHIFT); - TXD->mss_l4len_idx = htole32(mss_l4len_idx); - - TXD->seqnum_seed = htole32(0); - tx_buffer->m_head = NULL; - tx_buffer->eop_index = -1; - - if (++ctxd == adapter->num_tx_desc) - ctxd = 0; - - txr->tx_avail--; - txr->next_avail_desc = ctxd; - return TRUE; -} - - -/********************************************************************** - * - * Examine each tx_buffer in the used queue. If the hardware is done - * processing the packet then free associated resources. The - * tx_buffer is put back on the free queue. - * - **********************************************************************/ -static bool -ixv_txeof(struct tx_ring *txr) -{ - struct adapter *adapter = txr->adapter; - struct ifnet *ifp = adapter->ifp; - u32 first, last, done; - struct ixv_tx_buf *tx_buffer; - struct ixgbe_legacy_tx_desc *tx_desc, *eop_desc; - - mtx_assert(&txr->tx_mtx, MA_OWNED); - - if (txr->tx_avail == adapter->num_tx_desc) - return FALSE; - - first = txr->next_to_clean; - tx_buffer = &txr->tx_buffers[first]; - /* For cleanup we just use legacy struct */ - tx_desc = (struct ixgbe_legacy_tx_desc *)&txr->tx_base[first]; - last = tx_buffer->eop_index; - if (last == -1) - return FALSE; - eop_desc = (struct ixgbe_legacy_tx_desc *)&txr->tx_base[last]; - - /* - ** Get the index of the first descriptor - ** BEYOND the EOP and call that 'done'. - ** I do this so the comparison in the - ** inner while loop below can be simple - */ - if (++last == adapter->num_tx_desc) last = 0; - done = last; - - bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map, - BUS_DMASYNC_POSTREAD); - /* - ** Only the EOP descriptor of a packet now has the DD - ** bit set, this is what we look for... - */ - while (eop_desc->upper.fields.status & IXGBE_TXD_STAT_DD) { - /* We clean the range of the packet */ - while (first != done) { - tx_desc->upper.data = 0; - tx_desc->lower.data = 0; - tx_desc->buffer_addr = 0; - ++txr->tx_avail; - - if (tx_buffer->m_head) { - bus_dmamap_sync(txr->txtag, - tx_buffer->map, - BUS_DMASYNC_POSTWRITE); - bus_dmamap_unload(txr->txtag, - tx_buffer->map); - m_freem(tx_buffer->m_head); - tx_buffer->m_head = NULL; - tx_buffer->map = NULL; - } - tx_buffer->eop_index = -1; - txr->watchdog_time = ticks; - - if (++first == adapter->num_tx_desc) - first = 0; - - tx_buffer = &txr->tx_buffers[first]; - tx_desc = - (struct ixgbe_legacy_tx_desc *)&txr->tx_base[first]; - } - ++ifp->if_opackets; - /* See if there is more work now */ - last = tx_buffer->eop_index; - if (last != -1) { - eop_desc = - (struct ixgbe_legacy_tx_desc *)&txr->tx_base[last]; - /* Get next done point */ - if (++last == adapter->num_tx_desc) last = 0; - done = last; - } else - break; - } - bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map, - BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); - - txr->next_to_clean = first; - - /* - * If we have enough room, clear IFF_DRV_OACTIVE to tell the stack that - * it is OK to send packets. If there are no pending descriptors, - * clear the timeout. Otherwise, if some descriptors have been freed, - * restart the timeout. - */ - if (txr->tx_avail > IXV_TX_CLEANUP_THRESHOLD) { - ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; - if (txr->tx_avail == adapter->num_tx_desc) { - txr->watchdog_check = FALSE; - return FALSE; - } - } - - return TRUE; -} - -/********************************************************************* - * - * Refresh mbuf buffers for RX descriptor rings - * - now keeps its own state so discards due to resource - * exhaustion are unnecessary, if an mbuf cannot be obtained - * it just returns, keeping its placeholder, thus it can simply - * be recalled to try again. - * - **********************************************************************/ -static void -ixv_refresh_mbufs(struct rx_ring *rxr, int limit) -{ - struct adapter *adapter = rxr->adapter; - bus_dma_segment_t hseg[1]; - bus_dma_segment_t pseg[1]; - struct ixv_rx_buf *rxbuf; - struct mbuf *mh, *mp; - int i, j, nsegs, error; - bool refreshed = FALSE; - - i = j = rxr->next_to_refresh; - /* Get the control variable, one beyond refresh point */ - if (++j == adapter->num_rx_desc) - j = 0; - while (j != limit) { - rxbuf = &rxr->rx_buffers[i]; - if ((rxbuf->m_head == NULL) && (rxr->hdr_split)) { - mh = m_gethdr(M_NOWAIT, MT_DATA); - if (mh == NULL) - goto update; - mh->m_pkthdr.len = mh->m_len = MHLEN; - mh->m_len = MHLEN; - mh->m_flags |= M_PKTHDR; - m_adj(mh, ETHER_ALIGN); - /* Get the memory mapping */ - error = bus_dmamap_load_mbuf_sg(rxr->htag, - rxbuf->hmap, mh, hseg, &nsegs, BUS_DMA_NOWAIT); - if (error != 0) { - printf("GET BUF: dmamap load" - " failure - %d\n", error); - m_free(mh); - goto update; - } - rxbuf->m_head = mh; - bus_dmamap_sync(rxr->htag, rxbuf->hmap, - BUS_DMASYNC_PREREAD); - rxr->rx_base[i].read.hdr_addr = - htole64(hseg[0].ds_addr); - } - - if (rxbuf->m_pack == NULL) { - mp = m_getjcl(M_NOWAIT, MT_DATA, - M_PKTHDR, adapter->rx_mbuf_sz); - if (mp == NULL) - goto update; - } else - mp = rxbuf->m_pack; - - mp->m_pkthdr.len = mp->m_len = adapter->rx_mbuf_sz; - /* Get the memory mapping */ - error = bus_dmamap_load_mbuf_sg(rxr->ptag, - rxbuf->pmap, mp, pseg, &nsegs, BUS_DMA_NOWAIT); - if (error != 0) { - printf("GET BUF: dmamap load" - " failure - %d\n", error); - m_free(mp); - rxbuf->m_pack = NULL; - goto update; - } - rxbuf->m_pack = mp; - bus_dmamap_sync(rxr->ptag, rxbuf->pmap, - BUS_DMASYNC_PREREAD); - rxr->rx_base[i].read.pkt_addr = - htole64(pseg[0].ds_addr); - - refreshed = TRUE; - rxr->next_to_refresh = i = j; - /* Calculate next index */ - if (++j == adapter->num_rx_desc) - j = 0; - } -update: - if (refreshed) /* update tail index */ - IXGBE_WRITE_REG(&adapter->hw, - IXGBE_VFRDT(rxr->me), rxr->next_to_refresh); - return; -} - -/********************************************************************* - * - * Allocate memory for rx_buffer structures. Since we use one - * rx_buffer per received packet, the maximum number of rx_buffer's - * that we'll need is equal to the number of receive descriptors - * that we've allocated. - * - **********************************************************************/ -static int -ixv_allocate_receive_buffers(struct rx_ring *rxr) -{ - struct adapter *adapter = rxr->adapter; - device_t dev = adapter->dev; - struct ixv_rx_buf *rxbuf; - int i, bsize, error; - - bsize = sizeof(struct ixv_rx_buf) * adapter->num_rx_desc; - if (!(rxr->rx_buffers = - (struct ixv_rx_buf *) malloc(bsize, - M_DEVBUF, M_NOWAIT | M_ZERO))) { - device_printf(dev, "Unable to allocate rx_buffer memory\n"); - error = ENOMEM; - goto fail; - } - - if ((error = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */ - 1, 0, /* alignment, bounds */ - BUS_SPACE_MAXADDR, /* lowaddr */ - BUS_SPACE_MAXADDR, /* highaddr */ - NULL, NULL, /* filter, filterarg */ - MSIZE, /* maxsize */ - 1, /* nsegments */ - MSIZE, /* maxsegsize */ - 0, /* flags */ - NULL, /* lockfunc */ - NULL, /* lockfuncarg */ - &rxr->htag))) { - device_printf(dev, "Unable to create RX DMA tag\n"); - goto fail; - } - - if ((error = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */ - 1, 0, /* alignment, bounds */ - BUS_SPACE_MAXADDR, /* lowaddr */ - BUS_SPACE_MAXADDR, /* highaddr */ - NULL, NULL, /* filter, filterarg */ - MJUMPAGESIZE, /* maxsize */ - 1, /* nsegments */ - MJUMPAGESIZE, /* maxsegsize */ - 0, /* flags */ - NULL, /* lockfunc */ - NULL, /* lockfuncarg */ - &rxr->ptag))) { - device_printf(dev, "Unable to create RX DMA tag\n"); - goto fail; - } - - for (i = 0; i < adapter->num_rx_desc; i++, rxbuf++) { - rxbuf = &rxr->rx_buffers[i]; - error = bus_dmamap_create(rxr->htag, - BUS_DMA_NOWAIT, &rxbuf->hmap); - if (error) { - device_printf(dev, "Unable to create RX head map\n"); - goto fail; - } - error = bus_dmamap_create(rxr->ptag, - BUS_DMA_NOWAIT, &rxbuf->pmap); - if (error) { - device_printf(dev, "Unable to create RX pkt map\n"); - goto fail; - } - } - - return (0); - -fail: - /* Frees all, but can handle partial completion */ - ixv_free_receive_structures(adapter); - return (error); -} - -static void -ixv_free_receive_ring(struct rx_ring *rxr) -{ - struct adapter *adapter; - struct ixv_rx_buf *rxbuf; - int i; - - adapter = rxr->adapter; - for (i = 0; i < adapter->num_rx_desc; i++) { - rxbuf = &rxr->rx_buffers[i]; - if (rxbuf->m_head != NULL) { - bus_dmamap_sync(rxr->htag, rxbuf->hmap, - BUS_DMASYNC_POSTREAD); - bus_dmamap_unload(rxr->htag, rxbuf->hmap); - rxbuf->m_head->m_flags |= M_PKTHDR; - m_freem(rxbuf->m_head); - } - if (rxbuf->m_pack != NULL) { - bus_dmamap_sync(rxr->ptag, rxbuf->pmap, - BUS_DMASYNC_POSTREAD); - bus_dmamap_unload(rxr->ptag, rxbuf->pmap); - rxbuf->m_pack->m_flags |= M_PKTHDR; - m_freem(rxbuf->m_pack); - } - rxbuf->m_head = NULL; - rxbuf->m_pack = NULL; - } -} - - -/********************************************************************* - * - * Initialize a receive ring and its buffers. - * - **********************************************************************/ -static int -ixv_setup_receive_ring(struct rx_ring *rxr) -{ - struct adapter *adapter; - struct ifnet *ifp; - device_t dev; - struct ixv_rx_buf *rxbuf; - bus_dma_segment_t pseg[1], hseg[1]; - struct lro_ctrl *lro = &rxr->lro; - int rsize, nsegs, error = 0; - - adapter = rxr->adapter; - ifp = adapter->ifp; - dev = adapter->dev; - - /* Clear the ring contents */ - IXV_RX_LOCK(rxr); - rsize = roundup2(adapter->num_rx_desc * - sizeof(union ixgbe_adv_rx_desc), DBA_ALIGN); - bzero((void *)rxr->rx_base, rsize); - - /* Free current RX buffer structs and their mbufs */ - ixv_free_receive_ring(rxr); - - /* Configure header split? */ - if (ixv_header_split) - rxr->hdr_split = TRUE; - - /* Now replenish the mbufs */ - for (int j = 0; j != adapter->num_rx_desc; ++j) { - struct mbuf *mh, *mp; - - rxbuf = &rxr->rx_buffers[j]; - /* - ** Dont allocate mbufs if not - ** doing header split, its wasteful - */ - if (rxr->hdr_split == FALSE) - goto skip_head; - - /* First the header */ - rxbuf->m_head = m_gethdr(M_NOWAIT, MT_DATA); - if (rxbuf->m_head == NULL) { - error = ENOBUFS; - goto fail; - } - m_adj(rxbuf->m_head, ETHER_ALIGN); - mh = rxbuf->m_head; - mh->m_len = mh->m_pkthdr.len = MHLEN; - mh->m_flags |= M_PKTHDR; - /* Get the memory mapping */ - error = bus_dmamap_load_mbuf_sg(rxr->htag, - rxbuf->hmap, rxbuf->m_head, hseg, - &nsegs, BUS_DMA_NOWAIT); - if (error != 0) /* Nothing elegant to do here */ - goto fail; - bus_dmamap_sync(rxr->htag, - rxbuf->hmap, BUS_DMASYNC_PREREAD); - /* Update descriptor */ - rxr->rx_base[j].read.hdr_addr = htole64(hseg[0].ds_addr); - -skip_head: - /* Now the payload cluster */ - rxbuf->m_pack = m_getjcl(M_NOWAIT, MT_DATA, - M_PKTHDR, adapter->rx_mbuf_sz); - if (rxbuf->m_pack == NULL) { - error = ENOBUFS; - goto fail; - } - mp = rxbuf->m_pack; - mp->m_pkthdr.len = mp->m_len = adapter->rx_mbuf_sz; - /* Get the memory mapping */ - error = bus_dmamap_load_mbuf_sg(rxr->ptag, - rxbuf->pmap, mp, pseg, - &nsegs, BUS_DMA_NOWAIT); - if (error != 0) - goto fail; - bus_dmamap_sync(rxr->ptag, - rxbuf->pmap, BUS_DMASYNC_PREREAD); - /* Update descriptor */ - rxr->rx_base[j].read.pkt_addr = htole64(pseg[0].ds_addr); - } - - - /* Setup our descriptor indices */ - rxr->next_to_check = 0; - rxr->next_to_refresh = 0; - rxr->lro_enabled = FALSE; - rxr->rx_split_packets = 0; - rxr->rx_bytes = 0; - rxr->discard = FALSE; - - bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map, - BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); - - /* - ** Now set up the LRO interface: - */ - if (ifp->if_capenable & IFCAP_LRO) { - int err = tcp_lro_init(lro); - if (err) { - device_printf(dev, "LRO Initialization failed!\n"); - goto fail; - } - INIT_DEBUGOUT("RX Soft LRO Initialized\n"); - rxr->lro_enabled = TRUE; - lro->ifp = adapter->ifp; - } - - IXV_RX_UNLOCK(rxr); - return (0); - -fail: - ixv_free_receive_ring(rxr); - IXV_RX_UNLOCK(rxr); - return (error); -} - -/********************************************************************* - * - * Initialize all receive rings. - * - **********************************************************************/ -static int -ixv_setup_receive_structures(struct adapter *adapter) -{ - struct rx_ring *rxr = adapter->rx_rings; - int j; - - for (j = 0; j < adapter->num_queues; j++, rxr++) - if (ixv_setup_receive_ring(rxr)) - goto fail; - - return (0); -fail: - /* - * Free RX buffers allocated so far, we will only handle - * the rings that completed, the failing case will have - * cleaned up for itself. 'j' failed, so its the terminus. - */ - for (int i = 0; i < j; ++i) { - rxr = &adapter->rx_rings[i]; - ixv_free_receive_ring(rxr); - } - - return (ENOBUFS); -} - -/********************************************************************* - * - * Setup receive registers and features. - * - **********************************************************************/ -#define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2 - -static void -ixv_initialize_receive_units(struct adapter *adapter) -{ - struct rx_ring *rxr = adapter->rx_rings; - struct ixgbe_hw *hw = &adapter->hw; - struct ifnet *ifp = adapter->ifp; - u32 bufsz, fctrl, rxcsum, hlreg; - - - /* Enable broadcasts */ - fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); - fctrl |= IXGBE_FCTRL_BAM; - fctrl |= IXGBE_FCTRL_DPF; - fctrl |= IXGBE_FCTRL_PMCF; - IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); - - /* Set for Jumbo Frames? */ - hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0); - if (ifp->if_mtu > ETHERMTU) { - hlreg |= IXGBE_HLREG0_JUMBOEN; - bufsz = 4096 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT; - } else { - hlreg &= ~IXGBE_HLREG0_JUMBOEN; - bufsz = 2048 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT; - } - IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg); - - for (int i = 0; i < adapter->num_queues; i++, rxr++) { - u64 rdba = rxr->rxdma.dma_paddr; - u32 reg, rxdctl; - - /* Do the queue enabling first */ - rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i)); - rxdctl |= IXGBE_RXDCTL_ENABLE; - IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), rxdctl); - for (int k = 0; k < 10; k++) { - if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i)) & - IXGBE_RXDCTL_ENABLE) - break; - else - msec_delay(1); - } - wmb(); - - /* Setup the Base and Length of the Rx Descriptor Ring */ - IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(i), - (rdba & 0x00000000ffffffffULL)); - IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(i), - (rdba >> 32)); - IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(i), - adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc)); - - /* Set up the SRRCTL register */ - reg = IXGBE_READ_REG(hw, IXGBE_VFSRRCTL(i)); - reg &= ~IXGBE_SRRCTL_BSIZEHDR_MASK; - reg &= ~IXGBE_SRRCTL_BSIZEPKT_MASK; - reg |= bufsz; - if (rxr->hdr_split) { - /* Use a standard mbuf for the header */ - reg |= ((IXV_RX_HDR << - IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) - & IXGBE_SRRCTL_BSIZEHDR_MASK); - reg |= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS; - } else - reg |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF; - IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(i), reg); - - /* Setup the HW Rx Head and Tail Descriptor Pointers */ - IXGBE_WRITE_REG(hw, IXGBE_VFRDH(rxr->me), 0); - IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me), - adapter->num_rx_desc - 1); - } - - rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM); - - if (ifp->if_capenable & IFCAP_RXCSUM) - rxcsum |= IXGBE_RXCSUM_PCSD; - - if (!(rxcsum & IXGBE_RXCSUM_PCSD)) - rxcsum |= IXGBE_RXCSUM_IPPCSE; - - IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum); - - return; -} - -/********************************************************************* - * - * Free all receive rings. - * - **********************************************************************/ -static void -ixv_free_receive_structures(struct adapter *adapter) -{ - struct rx_ring *rxr = adapter->rx_rings; - - for (int i = 0; i < adapter->num_queues; i++, rxr++) { - struct lro_ctrl *lro = &rxr->lro; - ixv_free_receive_buffers(rxr); - /* Free LRO memory */ - tcp_lro_free(lro); - /* Free the ring memory as well */ - ixv_dma_free(adapter, &rxr->rxdma); - } - - free(adapter->rx_rings, M_DEVBUF); -} - - -/********************************************************************* - * - * Free receive ring data structures - * - **********************************************************************/ -static void -ixv_free_receive_buffers(struct rx_ring *rxr) -{ - struct adapter *adapter = rxr->adapter; - struct ixv_rx_buf *rxbuf; - - INIT_DEBUGOUT("free_receive_structures: begin"); - - /* Cleanup any existing buffers */ - if (rxr->rx_buffers != NULL) { - for (int i = 0; i < adapter->num_rx_desc; i++) { - rxbuf = &rxr->rx_buffers[i]; - if (rxbuf->m_head != NULL) { - bus_dmamap_sync(rxr->htag, rxbuf->hmap, - BUS_DMASYNC_POSTREAD); - bus_dmamap_unload(rxr->htag, rxbuf->hmap); - rxbuf->m_head->m_flags |= M_PKTHDR; - m_freem(rxbuf->m_head); - } - if (rxbuf->m_pack != NULL) { - bus_dmamap_sync(rxr->ptag, rxbuf->pmap, - BUS_DMASYNC_POSTREAD); - bus_dmamap_unload(rxr->ptag, rxbuf->pmap); - rxbuf->m_pack->m_flags |= M_PKTHDR; - m_freem(rxbuf->m_pack); - } - rxbuf->m_head = NULL; - rxbuf->m_pack = NULL; - if (rxbuf->hmap != NULL) { - bus_dmamap_destroy(rxr->htag, rxbuf->hmap); - rxbuf->hmap = NULL; - } - if (rxbuf->pmap != NULL) { - bus_dmamap_destroy(rxr->ptag, rxbuf->pmap); - rxbuf->pmap = NULL; - } - } - if (rxr->rx_buffers != NULL) { - free(rxr->rx_buffers, M_DEVBUF); - rxr->rx_buffers = NULL; - } - } - - if (rxr->htag != NULL) { - bus_dma_tag_destroy(rxr->htag); - rxr->htag = NULL; - } - if (rxr->ptag != NULL) { - bus_dma_tag_destroy(rxr->ptag); - rxr->ptag = NULL; - } - - return; -} - -static __inline void -ixv_rx_input(struct rx_ring *rxr, struct ifnet *ifp, struct mbuf *m, u32 ptype) -{ - - /* - * ATM LRO is only for IPv4/TCP packets and TCP checksum of the packet - * should be computed by hardware. Also it should not have VLAN tag in - * ethernet header. - */ - if (rxr->lro_enabled && - (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0 && - (ptype & IXGBE_RXDADV_PKTTYPE_ETQF) == 0 && - (ptype & (IXGBE_RXDADV_PKTTYPE_IPV4 | IXGBE_RXDADV_PKTTYPE_TCP)) == - (IXGBE_RXDADV_PKTTYPE_IPV4 | IXGBE_RXDADV_PKTTYPE_TCP) && - (m->m_pkthdr.csum_flags & (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) == - (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) { - /* - * Send to the stack if: - ** - LRO not enabled, or - ** - no LRO resources, or - ** - lro enqueue fails - */ - if (rxr->lro.lro_cnt != 0) - if (tcp_lro_rx(&rxr->lro, m, 0) == 0) - return; - } - IXV_RX_UNLOCK(rxr); - (*ifp->if_input)(ifp, m); - IXV_RX_LOCK(rxr); -} - -static __inline void -ixv_rx_discard(struct rx_ring *rxr, int i) -{ - struct ixv_rx_buf *rbuf; - - rbuf = &rxr->rx_buffers[i]; - - if (rbuf->fmp != NULL) {/* Partial chain ? */ - rbuf->fmp->m_flags |= M_PKTHDR; - m_freem(rbuf->fmp); - rbuf->fmp = NULL; - } - - /* - ** With advanced descriptors the writeback - ** clobbers the buffer addrs, so its easier - ** to just free the existing mbufs and take - ** the normal refresh path to get new buffers - ** and mapping. - */ - if (rbuf->m_head) { - m_free(rbuf->m_head); - rbuf->m_head = NULL; - } - - if (rbuf->m_pack) { - m_free(rbuf->m_pack); - rbuf->m_pack = NULL; - } - - return; -} - - -/********************************************************************* - * - * This routine executes in interrupt context. It replenishes - * the mbufs in the descriptor and sends data which has been - * dma'ed into host memory to upper layer. - * - * We loop at most count times if count is > 0, or until done if - * count < 0. - * - * Return TRUE for more work, FALSE for all clean. - *********************************************************************/ -static bool -ixv_rxeof(struct ix_queue *que, int count) -{ - struct adapter *adapter = que->adapter; - struct rx_ring *rxr = que->rxr; - struct ifnet *ifp = adapter->ifp; - struct lro_ctrl *lro = &rxr->lro; - struct lro_entry *queued; - int i, nextp, processed = 0; - u32 staterr = 0; - union ixgbe_adv_rx_desc *cur; - struct ixv_rx_buf *rbuf, *nbuf; - - IXV_RX_LOCK(rxr); - - for (i = rxr->next_to_check; count != 0;) { - struct mbuf *sendmp, *mh, *mp; - u32 rsc, ptype; - u16 hlen, plen, hdr, vtag; - bool eop; - - /* Sync the ring. */ - bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map, - BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); - - cur = &rxr->rx_base[i]; - staterr = le32toh(cur->wb.upper.status_error); - - if ((staterr & IXGBE_RXD_STAT_DD) == 0) - break; - if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) - break; - - count--; - sendmp = NULL; - nbuf = NULL; - rsc = 0; - cur->wb.upper.status_error = 0; - rbuf = &rxr->rx_buffers[i]; - mh = rbuf->m_head; - mp = rbuf->m_pack; - - plen = le16toh(cur->wb.upper.length); - ptype = le32toh(cur->wb.lower.lo_dword.data) & - IXGBE_RXDADV_PKTTYPE_MASK; - hdr = le16toh(cur->wb.lower.lo_dword.hs_rss.hdr_info); - vtag = le16toh(cur->wb.upper.vlan); - eop = ((staterr & IXGBE_RXD_STAT_EOP) != 0); - - /* Make sure all parts of a bad packet are discarded */ - if (((staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK) != 0) || - (rxr->discard)) { - ifp->if_ierrors++; - rxr->rx_discarded++; - if (!eop) - rxr->discard = TRUE; - else - rxr->discard = FALSE; - ixv_rx_discard(rxr, i); - goto next_desc; - } - - if (!eop) { - nextp = i + 1; - if (nextp == adapter->num_rx_desc) - nextp = 0; - nbuf = &rxr->rx_buffers[nextp]; - prefetch(nbuf); - } - /* - ** The header mbuf is ONLY used when header - ** split is enabled, otherwise we get normal - ** behavior, ie, both header and payload - ** are DMA'd into the payload buffer. - ** - ** Rather than using the fmp/lmp global pointers - ** we now keep the head of a packet chain in the - ** buffer struct and pass this along from one - ** descriptor to the next, until we get EOP. - */ - if (rxr->hdr_split && (rbuf->fmp == NULL)) { - /* This must be an initial descriptor */ - hlen = (hdr & IXGBE_RXDADV_HDRBUFLEN_MASK) >> - IXGBE_RXDADV_HDRBUFLEN_SHIFT; - if (hlen > IXV_RX_HDR) - hlen = IXV_RX_HDR; - mh->m_len = hlen; - mh->m_flags |= M_PKTHDR; - mh->m_next = NULL; - mh->m_pkthdr.len = mh->m_len; - /* Null buf pointer so it is refreshed */ - rbuf->m_head = NULL; - /* - ** Check the payload length, this - ** could be zero if its a small - ** packet. - */ - if (plen > 0) { - mp->m_len = plen; - mp->m_next = NULL; - mp->m_flags &= ~M_PKTHDR; - mh->m_next = mp; - mh->m_pkthdr.len += mp->m_len; - /* Null buf pointer so it is refreshed */ - rbuf->m_pack = NULL; - rxr->rx_split_packets++; - } - /* - ** Now create the forward - ** chain so when complete - ** we wont have to. - */ - if (eop == 0) { - /* stash the chain head */ - nbuf->fmp = mh; - /* Make forward chain */ - if (plen) - mp->m_next = nbuf->m_pack; - else - mh->m_next = nbuf->m_pack; - } else { - /* Singlet, prepare to send */ - sendmp = mh; - if ((adapter->num_vlans) && - (staterr & IXGBE_RXD_STAT_VP)) { - sendmp->m_pkthdr.ether_vtag = vtag; - sendmp->m_flags |= M_VLANTAG; - } - } - } else { - /* - ** Either no header split, or a - ** secondary piece of a fragmented - ** split packet. - */ - mp->m_len = plen; - /* - ** See if there is a stored head - ** that determines what we are - */ - sendmp = rbuf->fmp; - rbuf->m_pack = rbuf->fmp = NULL; - - if (sendmp != NULL) /* secondary frag */ - sendmp->m_pkthdr.len += mp->m_len; - else { - /* first desc of a non-ps chain */ - sendmp = mp; - sendmp->m_flags |= M_PKTHDR; - sendmp->m_pkthdr.len = mp->m_len; - if (staterr & IXGBE_RXD_STAT_VP) { - sendmp->m_pkthdr.ether_vtag = vtag; - sendmp->m_flags |= M_VLANTAG; - } - } - /* Pass the head pointer on */ - if (eop == 0) { - nbuf->fmp = sendmp; - sendmp = NULL; - mp->m_next = nbuf->m_pack; - } - } - ++processed; - /* Sending this frame? */ - if (eop) { - sendmp->m_pkthdr.rcvif = ifp; - ifp->if_ipackets++; - rxr->rx_packets++; - /* capture data for AIM */ - rxr->bytes += sendmp->m_pkthdr.len; - rxr->rx_bytes += sendmp->m_pkthdr.len; - if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) - ixv_rx_checksum(staterr, sendmp, ptype); -#if __FreeBSD_version >= 800000 - sendmp->m_pkthdr.flowid = que->msix; - M_HASHTYPE_SET(sendmp, M_HASHTYPE_OPAQUE); -#endif - } -next_desc: - bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map, - BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); - - /* Advance our pointers to the next descriptor. */ - if (++i == adapter->num_rx_desc) - i = 0; - - /* Now send to the stack or do LRO */ - if (sendmp != NULL) - ixv_rx_input(rxr, ifp, sendmp, ptype); - - /* Every 8 descriptors we go to refresh mbufs */ - if (processed == 8) { - ixv_refresh_mbufs(rxr, i); - processed = 0; - } - } - - /* Refresh any remaining buf structs */ - if (ixv_rx_unrefreshed(rxr)) - ixv_refresh_mbufs(rxr, i); - - rxr->next_to_check = i; - - /* - * Flush any outstanding LRO work - */ - while ((queued = SLIST_FIRST(&lro->lro_active)) != NULL) { - SLIST_REMOVE_HEAD(&lro->lro_active, next); - tcp_lro_flush(lro, queued); - } - - IXV_RX_UNLOCK(rxr); - - /* - ** We still have cleaning to do? - ** Schedule another interrupt if so. - */ - if ((staterr & IXGBE_RXD_STAT_DD) != 0) { - ixv_rearm_queues(adapter, (u64)(1 << que->msix)); - return (TRUE); - } - - return (FALSE); -} - - -/********************************************************************* - * - * Verify that the hardware indicated that the checksum is valid. - * Inform the stack about the status of checksum so that stack - * doesn't spend time verifying the checksum. - * - *********************************************************************/ -static void -ixv_rx_checksum(u32 staterr, struct mbuf * mp, u32 ptype) -{ - u16 status = (u16) staterr; - u8 errors = (u8) (staterr >> 24); - bool sctp = FALSE; - - if ((ptype & IXGBE_RXDADV_PKTTYPE_ETQF) == 0 && - (ptype & IXGBE_RXDADV_PKTTYPE_SCTP) != 0) - sctp = TRUE; - - if (status & IXGBE_RXD_STAT_IPCS) { - if (!(errors & IXGBE_RXD_ERR_IPE)) { - /* IP Checksum Good */ - mp->m_pkthdr.csum_flags = CSUM_IP_CHECKED; - mp->m_pkthdr.csum_flags |= CSUM_IP_VALID; - - } else - mp->m_pkthdr.csum_flags = 0; - } - if (status & IXGBE_RXD_STAT_L4CS) { - u64 type = (CSUM_DATA_VALID | CSUM_PSEUDO_HDR); -#if __FreeBSD_version >= 800000 - if (sctp) - type = CSUM_SCTP_VALID; -#endif - if (!(errors & IXGBE_RXD_ERR_TCPE)) { - mp->m_pkthdr.csum_flags |= type; - if (!sctp) - mp->m_pkthdr.csum_data = htons(0xffff); - } - } - return; -} - -static void -ixv_setup_vlan_support(struct adapter *adapter) -{ - struct ixgbe_hw *hw = &adapter->hw; - u32 ctrl, vid, vfta, retry; - - - /* - ** We get here thru init_locked, meaning - ** a soft reset, this has already cleared - ** the VFTA and other state, so if there - ** have been no vlan's registered do nothing. - */ - if (adapter->num_vlans == 0) - return; - - /* Enable the queues */ - for (int i = 0; i < adapter->num_queues; i++) { - ctrl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i)); - ctrl |= IXGBE_RXDCTL_VME; - IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), ctrl); - } - - /* - ** A soft reset zero's out the VFTA, so - ** we need to repopulate it now. - */ - for (int i = 0; i < VFTA_SIZE; i++) { - if (ixv_shadow_vfta[i] == 0) - continue; - vfta = ixv_shadow_vfta[i]; - /* - ** Reconstruct the vlan id's - ** based on the bits set in each - ** of the array ints. - */ - for ( int j = 0; j < 32; j++) { - retry = 0; - if ((vfta & (1 << j)) == 0) - continue; - vid = (i * 32) + j; - /* Call the shared code mailbox routine */ - while (ixgbe_set_vfta(hw, vid, 0, TRUE)) { - if (++retry > 5) - break; - } - } - } -} - -/* -** This routine is run via an vlan config EVENT, -** it enables us to use the HW Filter table since -** we can get the vlan id. This just creates the -** entry in the soft version of the VFTA, init will -** repopulate the real table. -*/ -static void -ixv_register_vlan(void *arg, struct ifnet *ifp, u16 vtag) -{ - struct adapter *adapter = ifp->if_softc; - u16 index, bit; - - if (ifp->if_softc != arg) /* Not our event */ - return; - - if ((vtag == 0) || (vtag > 4095)) /* Invalid */ - return; - - IXV_CORE_LOCK(adapter); - index = (vtag >> 5) & 0x7F; - bit = vtag & 0x1F; - ixv_shadow_vfta[index] |= (1 << bit); - ++adapter->num_vlans; - /* Re-init to load the changes */ - ixv_init_locked(adapter); - IXV_CORE_UNLOCK(adapter); -} - -/* -** This routine is run via an vlan -** unconfig EVENT, remove our entry -** in the soft vfta. -*/ -static void -ixv_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag) -{ - struct adapter *adapter = ifp->if_softc; - u16 index, bit; - - if (ifp->if_softc != arg) - return; - - if ((vtag == 0) || (vtag > 4095)) /* Invalid */ - return; - - IXV_CORE_LOCK(adapter); - index = (vtag >> 5) & 0x7F; - bit = vtag & 0x1F; - ixv_shadow_vfta[index] &= ~(1 << bit); - --adapter->num_vlans; - /* Re-init to load the changes */ - ixv_init_locked(adapter); - IXV_CORE_UNLOCK(adapter); -} - -static void -ixv_enable_intr(struct adapter *adapter) -{ - struct ixgbe_hw *hw = &adapter->hw; - struct ix_queue *que = adapter->queues; - u32 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE); - - - IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask); - - mask = IXGBE_EIMS_ENABLE_MASK; - mask &= ~(IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC); - IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, mask); - - for (int i = 0; i < adapter->num_queues; i++, que++) - ixv_enable_queue(adapter, que->msix); - - IXGBE_WRITE_FLUSH(hw); - - return; -} - -static void -ixv_disable_intr(struct adapter *adapter) -{ - IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIAC, 0); - IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIMC, ~0); - IXGBE_WRITE_FLUSH(&adapter->hw); - return; -} - -/* -** Setup the correct IVAR register for a particular MSIX interrupt -** - entry is the register array entry -** - vector is the MSIX vector for this queue -** - type is RX/TX/MISC -*/ -static void -ixv_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type) -{ - struct ixgbe_hw *hw = &adapter->hw; - u32 ivar, index; - - vector |= IXGBE_IVAR_ALLOC_VAL; - - if (type == -1) { /* MISC IVAR */ - ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC); - ivar &= ~0xFF; - ivar |= vector; - IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar); - } else { /* RX/TX IVARS */ - index = (16 * (entry & 1)) + (8 * type); - ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(entry >> 1)); - ivar &= ~(0xFF << index); - ivar |= (vector << index); - IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(entry >> 1), ivar); - } -} - -static void -ixv_configure_ivars(struct adapter *adapter) -{ - struct ix_queue *que = adapter->queues; - - for (int i = 0; i < adapter->num_queues; i++, que++) { - /* First the RX queue entry */ - ixv_set_ivar(adapter, i, que->msix, 0); - /* ... and the TX */ - ixv_set_ivar(adapter, i, que->msix, 1); - /* Set an initial value in EITR */ - IXGBE_WRITE_REG(&adapter->hw, - IXGBE_VTEITR(que->msix), IXV_EITR_DEFAULT); - } - - /* For the Link interrupt */ - ixv_set_ivar(adapter, 1, adapter->mbxvec, -1); -} - - -/* -** Tasklet handler for MSIX MBX interrupts -** - do outside interrupt since it might sleep -*/ -static void -ixv_handle_mbx(void *context, int pending) -{ - struct adapter *adapter = context; - - ixgbe_check_link(&adapter->hw, - &adapter->link_speed, &adapter->link_up, 0); - ixv_update_link_status(adapter); -} - -/* -** The VF stats registers never have a truely virgin -** starting point, so this routine tries to make an -** artificial one, marking ground zero on attach as -** it were. -*/ -static void -ixv_save_stats(struct adapter *adapter) -{ - if (adapter->stats.vfgprc || adapter->stats.vfgptc) { - adapter->stats.saved_reset_vfgprc += - adapter->stats.vfgprc - adapter->stats.base_vfgprc; - adapter->stats.saved_reset_vfgptc += - adapter->stats.vfgptc - adapter->stats.base_vfgptc; - adapter->stats.saved_reset_vfgorc += - adapter->stats.vfgorc - adapter->stats.base_vfgorc; - adapter->stats.saved_reset_vfgotc += - adapter->stats.vfgotc - adapter->stats.base_vfgotc; - adapter->stats.saved_reset_vfmprc += - adapter->stats.vfmprc - adapter->stats.base_vfmprc; - } -} - -static void -ixv_init_stats(struct adapter *adapter) -{ - struct ixgbe_hw *hw = &adapter->hw; - - adapter->stats.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC); - adapter->stats.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB); - adapter->stats.last_vfgorc |= - (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32); - - adapter->stats.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC); - adapter->stats.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB); - adapter->stats.last_vfgotc |= - (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32); - - adapter->stats.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC); - - adapter->stats.base_vfgprc = adapter->stats.last_vfgprc; - adapter->stats.base_vfgorc = adapter->stats.last_vfgorc; - adapter->stats.base_vfgptc = adapter->stats.last_vfgptc; - adapter->stats.base_vfgotc = adapter->stats.last_vfgotc; - adapter->stats.base_vfmprc = adapter->stats.last_vfmprc; -} - -#define UPDATE_STAT_32(reg, last, count) \ -{ \ - u32 current = IXGBE_READ_REG(hw, reg); \ - if (current < last) \ - count += 0x100000000LL; \ - last = current; \ - count &= 0xFFFFFFFF00000000LL; \ - count |= current; \ -} - -#define UPDATE_STAT_36(lsb, msb, last, count) \ -{ \ - u64 cur_lsb = IXGBE_READ_REG(hw, lsb); \ - u64 cur_msb = IXGBE_READ_REG(hw, msb); \ - u64 current = ((cur_msb << 32) | cur_lsb); \ - if (current < last) \ - count += 0x1000000000LL; \ - last = current; \ - count &= 0xFFFFFFF000000000LL; \ - count |= current; \ -} - -/* -** ixv_update_stats - Update the board statistics counters. -*/ -void -ixv_update_stats(struct adapter *adapter) -{ - struct ixgbe_hw *hw = &adapter->hw; - - UPDATE_STAT_32(IXGBE_VFGPRC, adapter->stats.last_vfgprc, - adapter->stats.vfgprc); - UPDATE_STAT_32(IXGBE_VFGPTC, adapter->stats.last_vfgptc, - adapter->stats.vfgptc); - UPDATE_STAT_36(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB, - adapter->stats.last_vfgorc, adapter->stats.vfgorc); - UPDATE_STAT_36(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB, - adapter->stats.last_vfgotc, adapter->stats.vfgotc); - UPDATE_STAT_32(IXGBE_VFMPRC, adapter->stats.last_vfmprc, - adapter->stats.vfmprc); -} - -/********************************************************************** - * - * This routine is called only when ixgbe_display_debug_stats is enabled. - * This routine provides a way to take a look at important statistics - * maintained by the driver and hardware. - * - **********************************************************************/ -static void -ixv_print_hw_stats(struct adapter * adapter) -{ - device_t dev = adapter->dev; - - device_printf(dev,"Std Mbuf Failed = %lu\n", - adapter->mbuf_defrag_failed); - device_printf(dev,"Driver dropped packets = %lu\n", - adapter->dropped_pkts); - device_printf(dev, "watchdog timeouts = %ld\n", - adapter->watchdog_events); - - device_printf(dev,"Good Packets Rcvd = %llu\n", - (long long)adapter->stats.vfgprc); - device_printf(dev,"Good Packets Xmtd = %llu\n", - (long long)adapter->stats.vfgptc); - device_printf(dev,"TSO Transmissions = %lu\n", - adapter->tso_tx); - -} - -/********************************************************************** - * - * This routine is called only when em_display_debug_stats is enabled. - * This routine provides a way to take a look at important statistics - * maintained by the driver and hardware. - * - **********************************************************************/ -static void -ixv_print_debug_info(struct adapter *adapter) -{ - device_t dev = adapter->dev; - struct ixgbe_hw *hw = &adapter->hw; - struct ix_queue *que = adapter->queues; - struct rx_ring *rxr; - struct tx_ring *txr; - struct lro_ctrl *lro; - - device_printf(dev,"Error Byte Count = %u \n", - IXGBE_READ_REG(hw, IXGBE_ERRBC)); - - for (int i = 0; i < adapter->num_queues; i++, que++) { - txr = que->txr; - rxr = que->rxr; - lro = &rxr->lro; - device_printf(dev,"QUE(%d) IRQs Handled: %lu\n", - que->msix, (long)que->irqs); - device_printf(dev,"RX(%d) Packets Received: %lld\n", - rxr->me, (long long)rxr->rx_packets); - device_printf(dev,"RX(%d) Split RX Packets: %lld\n", - rxr->me, (long long)rxr->rx_split_packets); - device_printf(dev,"RX(%d) Bytes Received: %lu\n", - rxr->me, (long)rxr->rx_bytes); - device_printf(dev,"RX(%d) LRO Queued= %d\n", - rxr->me, lro->lro_queued); - device_printf(dev,"RX(%d) LRO Flushed= %d\n", - rxr->me, lro->lro_flushed); - device_printf(dev,"TX(%d) Packets Sent: %lu\n", - txr->me, (long)txr->total_packets); - device_printf(dev,"TX(%d) NO Desc Avail: %lu\n", - txr->me, (long)txr->no_desc_avail); - } - - device_printf(dev,"MBX IRQ Handled: %lu\n", - (long)adapter->mbx_irq); - return; -} - -static int -ixv_sysctl_stats(SYSCTL_HANDLER_ARGS) -{ - int error; - int result; - struct adapter *adapter; - - result = -1; - error = sysctl_handle_int(oidp, &result, 0, req); - - if (error || !req->newptr) - return (error); - - if (result == 1) { - adapter = (struct adapter *) arg1; - ixv_print_hw_stats(adapter); - } - return error; -} - -static int -ixv_sysctl_debug(SYSCTL_HANDLER_ARGS) -{ - int error, result; - struct adapter *adapter; - - result = -1; - error = sysctl_handle_int(oidp, &result, 0, req); - - if (error || !req->newptr) - return (error); - - if (result == 1) { - adapter = (struct adapter *) arg1; - ixv_print_debug_info(adapter); - } - return error; -} - -/* -** Set flow control using sysctl: -** Flow control values: -** 0 - off -** 1 - rx pause -** 2 - tx pause -** 3 - full -*/ -static int -ixv_set_flowcntl(SYSCTL_HANDLER_ARGS) -{ - int error; - struct adapter *adapter; - - error = sysctl_handle_int(oidp, &ixv_flow_control, 0, req); - - if (error) - return (error); - - adapter = (struct adapter *) arg1; - switch (ixv_flow_control) { - case ixgbe_fc_rx_pause: - case ixgbe_fc_tx_pause: - case ixgbe_fc_full: - adapter->hw.fc.requested_mode = ixv_flow_control; - break; - case ixgbe_fc_none: - default: - adapter->hw.fc.requested_mode = ixgbe_fc_none; - } - - ixgbe_fc_enable(&adapter->hw); - return error; -} - -static void -ixv_add_rx_process_limit(struct adapter *adapter, const char *name, - const char *description, int *limit, int value) -{ - *limit = value; - SYSCTL_ADD_INT(device_get_sysctl_ctx(adapter->dev), - SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)), - OID_AUTO, name, CTLFLAG_RW, limit, value, description); -} - diff --git a/sys/dev/ixgbe/ixv.h b/sys/dev/ixgbe/ixv.h deleted file mode 100644 index 096819c3..0000000 --- a/sys/dev/ixgbe/ixv.h +++ /dev/null @@ -1,434 +0,0 @@ -/****************************************************************************** - - Copyright (c) 2001-2012, Intel Corporation - All rights reserved. - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are met: - - 1. Redistributions of source code must retain the above copyright notice, - this list of conditions and the following disclaimer. - - 2. Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - - 3. Neither the name of the Intel Corporation nor the names of its - contributors may be used to endorse or promote products derived from - this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - POSSIBILITY OF SUCH DAMAGE. - -******************************************************************************/ -/*$FreeBSD$*/ - - -#ifndef _IXV_H_ -#define _IXV_H_ - - -#include <sys/param.h> -#include <sys/systm.h> -#include <sys/mbuf.h> -#include <sys/protosw.h> -#include <sys/socket.h> -#include <sys/malloc.h> -#include <sys/kernel.h> -#include <sys/module.h> -#include <sys/sockio.h> - -#include <net/if.h> -#include <net/if_arp.h> -#include <net/bpf.h> -#include <net/ethernet.h> -#include <net/if_dl.h> -#include <net/if_media.h> - -#include <net/bpf.h> -#include <net/if_types.h> -#include <net/if_vlan_var.h> - -#include <netinet/in_systm.h> -#include <netinet/in.h> -#include <netinet/if_ether.h> -#include <netinet/ip.h> -#include <netinet/ip6.h> -#include <netinet/tcp.h> -#include <netinet/tcp_lro.h> -#include <netinet/udp.h> - -#include <machine/in_cksum.h> - -#include <sys/bus.h> -#include <machine/bus.h> -#include <sys/rman.h> -#include <machine/resource.h> -#include <vm/vm.h> -#include <vm/pmap.h> -#include <machine/clock.h> -#include <dev/pci/pcivar.h> -#include <dev/pci/pcireg.h> -#include <sys/proc.h> -#include <sys/sysctl.h> -#include <sys/endian.h> -#include <sys/taskqueue.h> -#include <sys/pcpu.h> -#include <sys/smp.h> -#include <machine/smp.h> - -#include "ixgbe_api.h" -#include "ixgbe_vf.h" - -/* Tunables */ - -/* - * TxDescriptors Valid Range: 64-4096 Default Value: 256 This value is the - * number of transmit descriptors allocated by the driver. Increasing this - * value allows the driver to queue more transmits. Each descriptor is 16 - * bytes. Performance tests have show the 2K value to be optimal for top - * performance. - */ -#define DEFAULT_TXD 1024 -#define PERFORM_TXD 2048 -#define MAX_TXD 4096 -#define MIN_TXD 64 - -/* - * RxDescriptors Valid Range: 64-4096 Default Value: 256 This value is the - * number of receive descriptors allocated for each RX queue. Increasing this - * value allows the driver to buffer more incoming packets. Each descriptor - * is 16 bytes. A receive buffer is also allocated for each descriptor. - * - * Note: with 8 rings and a dual port card, it is possible to bump up - * against the system mbuf pool limit, you can tune nmbclusters - * to adjust for this. - */ -#define DEFAULT_RXD 1024 -#define PERFORM_RXD 2048 -#define MAX_RXD 4096 -#define MIN_RXD 64 - -/* Alignment for rings */ -#define DBA_ALIGN 128 - -/* - * This parameter controls the maximum no of times the driver will loop in - * the isr. Minimum Value = 1 - */ -#define MAX_LOOP 10 - -/* - * This is the max watchdog interval, ie. the time that can - * pass between any two TX clean operations, such only happening - * when the TX hardware is functioning. - */ -#define IXV_WATCHDOG (10 * hz) - -/* - * This parameters control when the driver calls the routine to reclaim - * transmit descriptors. - */ -#define IXV_TX_CLEANUP_THRESHOLD (adapter->num_tx_desc / 8) -#define IXV_TX_OP_THRESHOLD (adapter->num_tx_desc / 32) - -#define IXV_MAX_FRAME_SIZE 0x3F00 - -/* Flow control constants */ -#define IXV_FC_PAUSE 0xFFFF -#define IXV_FC_HI 0x20000 -#define IXV_FC_LO 0x10000 - -/* Defines for printing debug information */ -#define DEBUG_INIT 0 -#define DEBUG_IOCTL 0 -#define DEBUG_HW 0 - -#define INIT_DEBUGOUT(S) if (DEBUG_INIT) printf(S "\n") -#define INIT_DEBUGOUT1(S, A) if (DEBUG_INIT) printf(S "\n", A) -#define INIT_DEBUGOUT2(S, A, B) if (DEBUG_INIT) printf(S "\n", A, B) -#define IOCTL_DEBUGOUT(S) if (DEBUG_IOCTL) printf(S "\n") -#define IOCTL_DEBUGOUT1(S, A) if (DEBUG_IOCTL) printf(S "\n", A) -#define IOCTL_DEBUGOUT2(S, A, B) if (DEBUG_IOCTL) printf(S "\n", A, B) -#define HW_DEBUGOUT(S) if (DEBUG_HW) printf(S "\n") -#define HW_DEBUGOUT1(S, A) if (DEBUG_HW) printf(S "\n", A) -#define HW_DEBUGOUT2(S, A, B) if (DEBUG_HW) printf(S "\n", A, B) - -#define MAX_NUM_MULTICAST_ADDRESSES 128 -#define IXV_EITR_DEFAULT 128 -#define IXV_SCATTER 32 -#define IXV_RX_HDR 128 -#define MSIX_BAR 3 -#define IXV_TSO_SIZE 65535 -#define IXV_BR_SIZE 4096 -#define IXV_LINK_ITR 2000 -#define TX_BUFFER_SIZE ((u32) 1514) -#define VFTA_SIZE 128 - -/* Offload bits in mbuf flag */ -#if __FreeBSD_version >= 800000 -#define CSUM_OFFLOAD (CSUM_IP|CSUM_TCP|CSUM_UDP|CSUM_SCTP) -#else -#define CSUM_OFFLOAD (CSUM_IP|CSUM_TCP|CSUM_UDP) -#endif - -/* - ***************************************************************************** - * vendor_info_array - * - * This array contains the list of Subvendor/Subdevice IDs on which the driver - * should load. - * - ***************************************************************************** - */ -typedef struct _ixv_vendor_info_t { - unsigned int vendor_id; - unsigned int device_id; - unsigned int subvendor_id; - unsigned int subdevice_id; - unsigned int index; -} ixv_vendor_info_t; - - -struct ixv_tx_buf { - u32 eop_index; - struct mbuf *m_head; - bus_dmamap_t map; -}; - -struct ixv_rx_buf { - struct mbuf *m_head; - struct mbuf *m_pack; - struct mbuf *fmp; - bus_dmamap_t hmap; - bus_dmamap_t pmap; -}; - -/* - * Bus dma allocation structure used by ixv_dma_malloc and ixv_dma_free. - */ -struct ixv_dma_alloc { - bus_addr_t dma_paddr; - caddr_t dma_vaddr; - bus_dma_tag_t dma_tag; - bus_dmamap_t dma_map; - bus_dma_segment_t dma_seg; - bus_size_t dma_size; - int dma_nseg; -}; - -/* -** Driver queue struct: this is the interrupt container -** for the associated tx and rx ring. -*/ -struct ix_queue { - struct adapter *adapter; - u32 msix; /* This queue's MSIX vector */ - u32 eims; /* This queue's EIMS bit */ - u32 eitr_setting; - u32 eitr; /* cached reg */ - struct resource *res; - void *tag; - struct tx_ring *txr; - struct rx_ring *rxr; - struct task que_task; - struct taskqueue *tq; - u64 irqs; -}; - -/* - * The transmit ring, one per queue - */ -struct tx_ring { - struct adapter *adapter; - struct mtx tx_mtx; - u32 me; - bool watchdog_check; - int watchdog_time; - union ixgbe_adv_tx_desc *tx_base; - struct ixv_dma_alloc txdma; - u32 next_avail_desc; - u32 next_to_clean; - struct ixv_tx_buf *tx_buffers; - volatile u16 tx_avail; - u32 txd_cmd; - bus_dma_tag_t txtag; - char mtx_name[16]; - struct buf_ring *br; - /* Soft Stats */ - u32 bytes; - u32 packets; - u64 no_desc_avail; - u64 total_packets; -}; - - -/* - * The Receive ring, one per rx queue - */ -struct rx_ring { - struct adapter *adapter; - struct mtx rx_mtx; - u32 me; - union ixgbe_adv_rx_desc *rx_base; - struct ixv_dma_alloc rxdma; - struct lro_ctrl lro; - bool lro_enabled; - bool hdr_split; - bool discard; - u32 next_to_refresh; - u32 next_to_check; - char mtx_name[16]; - struct ixv_rx_buf *rx_buffers; - bus_dma_tag_t htag; - bus_dma_tag_t ptag; - - u32 bytes; /* Used for AIM calc */ - u32 packets; - - /* Soft stats */ - u64 rx_irq; - u64 rx_split_packets; - u64 rx_packets; - u64 rx_bytes; - u64 rx_discarded; -}; - -/* Our adapter structure */ -struct adapter { - struct ifnet *ifp; - struct ixgbe_hw hw; - - struct ixgbe_osdep osdep; - struct device *dev; - - struct resource *pci_mem; - struct resource *msix_mem; - - /* - * Interrupt resources: this set is - * either used for legacy, or for Link - * when doing MSIX - */ - void *tag; - struct resource *res; - - struct ifmedia media; - struct callout timer; - int msix; - int if_flags; - - struct mtx core_mtx; - - eventhandler_tag vlan_attach; - eventhandler_tag vlan_detach; - - u16 num_vlans; - u16 num_queues; - - /* Info about the board itself */ - bool link_active; - u16 max_frame_size; - u32 link_speed; - bool link_up; - u32 mbxvec; - - /* Mbuf cluster size */ - u32 rx_mbuf_sz; - - /* Support for pluggable optics */ - struct task mbx_task; /* Mailbox tasklet */ - struct taskqueue *tq; - - /* - ** Queues: - ** This is the irq holder, it has - ** and RX/TX pair or rings associated - ** with it. - */ - struct ix_queue *queues; - - /* - * Transmit rings: - * Allocated at run time, an array of rings. - */ - struct tx_ring *tx_rings; - int num_tx_desc; - - /* - * Receive rings: - * Allocated at run time, an array of rings. - */ - struct rx_ring *rx_rings; - int num_rx_desc; - u64 que_mask; - u32 rx_process_limit; - - /* Misc stats maintained by the driver */ - unsigned long dropped_pkts; - unsigned long mbuf_defrag_failed; - unsigned long mbuf_header_failed; - unsigned long mbuf_packet_failed; - unsigned long no_tx_map_avail; - unsigned long no_tx_dma_setup; - unsigned long watchdog_events; - unsigned long tso_tx; - unsigned long mbx_irq; - - struct ixgbevf_hw_stats stats; -}; - - -#define IXV_CORE_LOCK_INIT(_sc, _name) \ - mtx_init(&(_sc)->core_mtx, _name, "IXV Core Lock", MTX_DEF) -#define IXV_CORE_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->core_mtx) -#define IXV_TX_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->tx_mtx) -#define IXV_RX_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->rx_mtx) -#define IXV_CORE_LOCK(_sc) mtx_lock(&(_sc)->core_mtx) -#define IXV_TX_LOCK(_sc) mtx_lock(&(_sc)->tx_mtx) -#define IXV_TX_TRYLOCK(_sc) mtx_trylock(&(_sc)->tx_mtx) -#define IXV_RX_LOCK(_sc) mtx_lock(&(_sc)->rx_mtx) -#define IXV_CORE_UNLOCK(_sc) mtx_unlock(&(_sc)->core_mtx) -#define IXV_TX_UNLOCK(_sc) mtx_unlock(&(_sc)->tx_mtx) -#define IXV_RX_UNLOCK(_sc) mtx_unlock(&(_sc)->rx_mtx) -#define IXV_CORE_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->core_mtx, MA_OWNED) -#define IXV_TX_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->tx_mtx, MA_OWNED) - -/* Workaround to make 8.0 buildable */ -#if __FreeBSD_version >= 800000 && __FreeBSD_version < 800504 -static __inline int -drbr_needs_enqueue(struct ifnet *ifp, struct buf_ring *br) -{ -#ifdef ALTQ - if (ALTQ_IS_ENABLED(&ifp->if_snd)) - return (1); -#endif - return (!buf_ring_empty(br)); -} -#endif - -/* -** Find the number of unrefreshed RX descriptors -*/ -static inline u16 -ixv_rx_unrefreshed(struct rx_ring *rxr) -{ - struct adapter *adapter = rxr->adapter; - - if (rxr->next_to_check > rxr->next_to_refresh) - return (rxr->next_to_check - rxr->next_to_refresh - 1); - else - return ((adapter->num_rx_desc + rxr->next_to_check) - - rxr->next_to_refresh - 1); -} - -#endif /* _IXV_H_ */ |