summaryrefslogtreecommitdiffstats
path: root/sys/dev/ixl
diff options
context:
space:
mode:
authorjfv <jfv@FreeBSD.org>2014-08-25 22:04:29 +0000
committerjfv <jfv@FreeBSD.org>2014-08-25 22:04:29 +0000
commitc988cfb907aeea93ac1dc1a10bfbc2221271281d (patch)
tree6fcf026ea614db21de0f732d9f7bbca7b03de800 /sys/dev/ixl
parent4e198665c508b72fb221139ec91629bc98646b9c (diff)
downloadFreeBSD-src-c988cfb907aeea93ac1dc1a10bfbc2221271281d.zip
FreeBSD-src-c988cfb907aeea93ac1dc1a10bfbc2221271281d.tar.gz
MFC of the Intel Base driver for the Intel XL710 Ethernet Controller Family
- It was decided to change the driver name to if_ixl for FreeBSD - This release adds the VF Driver to the tree, it can be built into the kernel or as the if_ixlv module - The VF driver is independent for the first time, this will be desireable when full SRIOV capability is added to the OS. Submitted by: jack.vogel@intel.com and eric.joyner@intel.com
Diffstat (limited to 'sys/dev/ixl')
-rw-r--r--sys/dev/ixl/README342
-rwxr-xr-xsys/dev/ixl/i40e_adminq.c1070
-rwxr-xr-xsys/dev/ixl/i40e_adminq.h123
-rwxr-xr-xsys/dev/ixl/i40e_adminq_cmd.h2180
-rwxr-xr-xsys/dev/ixl/i40e_alloc.h66
-rwxr-xr-xsys/dev/ixl/i40e_common.c4787
-rwxr-xr-xsys/dev/ixl/i40e_hmc.c376
-rwxr-xr-xsys/dev/ixl/i40e_hmc.h244
-rwxr-xr-xsys/dev/ixl/i40e_lan_hmc.c1418
-rwxr-xr-xsys/dev/ixl/i40e_lan_hmc.h201
-rwxr-xr-xsys/dev/ixl/i40e_nvm.c481
-rwxr-xr-xsys/dev/ixl/i40e_osdep.c198
-rwxr-xr-xsys/dev/ixl/i40e_osdep.h230
-rwxr-xr-xsys/dev/ixl/i40e_prototype.h427
-rwxr-xr-xsys/dev/ixl/i40e_register.h3378
-rwxr-xr-xsys/dev/ixl/i40e_register_x710_int.h10713
-rwxr-xr-xsys/dev/ixl/i40e_status.h108
-rwxr-xr-xsys/dev/ixl/i40e_type.h1422
-rwxr-xr-xsys/dev/ixl/i40e_virtchnl.h375
-rwxr-xr-xsys/dev/ixl/if_ixl.c4707
-rw-r--r--sys/dev/ixl/if_ixlv.c2742
-rw-r--r--sys/dev/ixl/ixl.h559
-rw-r--r--sys/dev/ixl/ixl_pf.h96
-rwxr-xr-xsys/dev/ixl/ixl_txrx.c1696
-rw-r--r--sys/dev/ixl/ixlv.h205
-rw-r--r--sys/dev/ixl/ixlvc.c976
26 files changed, 39120 insertions, 0 deletions
diff --git a/sys/dev/ixl/README b/sys/dev/ixl/README
new file mode 100644
index 0000000..066e4e4
--- /dev/null
+++ b/sys/dev/ixl/README
@@ -0,0 +1,342 @@
+ixl FreeBSD* Base Driver for the Intel® XL710 Ethernet Controller Family
+
+/*$FreeBSD$*/
+================================================================
+
+July 21, 2014
+
+
+Contents
+========
+
+- Overview
+- Supported Adapters
+- Building and Installation
+- Additional Configurations
+- Known Limitations
+
+
+Overview
+========
+
+This file describes the IXL FreeBSD* Base driver for the XL710 Ethernet Family of Adapters. The Driver has been developed for use with FreeBSD 10.0 or later, but should be compatible with any supported release.
+
+For questions related to hardware requirements, refer to the documentation supplied with your Intel XL710 adapter. All hardware requirements listed apply for use with FreeBSD.
+
+
+Supported Adapters
+==================
+
+The driver in this release is compatible with XL710 and X710-based Intel Ethernet Network Connections.
+
+
+SFP+ Devices with Pluggable Optics
+----------------------------------
+
+SR Modules
+----------
+ Intel DUAL RATE 1G/10G SFP+ SR (bailed) FTLX8571D3BCV-IT
+ Intel DUAL RATE 1G/10G SFP+ SR (bailed) AFBR-703SDZ-IN2
+
+LR Modules
+----------
+ Intel DUAL RATE 1G/10G SFP+ LR (bailed) FTLX1471D3BCV-IT
+ Intel DUAL RATE 1G/10G SFP+ LR (bailed) AFCT-701SDZ-IN2
+
+QSFP+ Modules
+-------------
+ Intel TRIPLE RATE 1G/10G/40G QSFP+ SR (bailed) E40GQSFPSR
+ Intel TRIPLE RATE 1G/10G/40G QSFP+ LR (bailed) E40GQSFPLR
+ QSFP+ 1G speed is not supported on XL710 based devices.
+
+X710/XL710 Based SFP+ adapters support all passive and active limiting direct attach cables that comply with SFF-8431 v4.1 and SFF-8472 v10.4 specifications.
+
+
+Building and Installation
+=========================
+
+NOTE: You must have kernel sources installed to compile the driver module.
+
+In the instructions below, x.x.x is the driver version
+as indicated in thename of the driver tar.
+
+1. Move the base driver tar file to the directory of your choice. For example, use /home/username/ixl or /usr/local/src/ixl.
+
+2. Untar/unzip the archive:
+ tar xfz ixl-x.x.x.tar.gz
+
+3. To install man page:
+ cd ixl-x.x.x
+ gzip -c ixl.4 > /usr/share/man/man4/ixl.4.gz
+
+4. To load the driver onto a running system:
+ cd ixl-x.x.x/src
+ make load
+
+5. To assign an IP address to the interface, enter the following:
+ ifconfig ixl<interface_num> <IP_address>
+
+6. Verify that the interface works. Enter the following, where <IP_address> is the IP address for another machine on the same subnet as the interface that is being tested:
+
+ ping <IP_address>
+
+7. If you want the driver to load automatically when the system is booted:
+
+ cd ixl-x.x.x/src
+ make
+ make install
+
+ Edit /boot/loader.conf, and add the following line:
+ if_ixl_load="YES"
+
+ Edit /etc/rc.conf, and create the appropriate
+ ifconfig_ixl<interface_num> entry:
+
+ ifconfig_ixl<interface_num>="<ifconfig_settings>"
+
+ Example usage:
+
+ ifconfig_ixl0="inet 192.168.10.1 netmask 255.255.255.0"
+
+ NOTE: For assistance, see the ifconfig man page.
+
+
+
+Configuration and Tuning
+=========================
+
+The driver supports Transmit/Receive Checksum Offload for IPv4 and IPv6,
+TSO forIPv4 and IPv6, LRO, and Jumbo Frames on all 40 Gigabit adapters.
+
+ Jumbo Frames
+ ------------
+ To enable Jumbo Frames, use the ifconfig utility to increase
+ the MTU beyond 1500 bytes.
+
+ - The Jumbo Frames setting on the switch must be set to at least
+ 22 byteslarger than that of the adapter.
+
+ - The maximum MTU setting for Jumbo Frames is 9706. This value
+ coincides with the maximum jumbo frames size of 9728.
+ To modify the setting, enter the following:
+
+ ifconfig ixl<interface_num> <hostname or IP address> mtu 9000
+
+ - To confirm an interface's MTU value, use the ifconfig command.
+ To confirm the MTU used between two specific devices, use:
+
+ route get <destination_IP_address>
+
+ VLANs
+ -----
+ To create a new VLAN pseudo-interface:
+
+ ifconfig <vlan_name> create
+
+ To associate the VLAN pseudo-interface with a physical interface
+ and assign a VLAN ID, IP address, and netmask:
+
+ ifconfig <vlan_name> <ip_address> netmask <subnet_mask> vlan
+ <vlan_id> vlandev <physical_interface>
+
+ Example:
+
+ ifconfig vlan10 10.0.0.1 netmask 255.255.255.0 vlan 10 vlandev ixl0
+
+ In this example, all packets will be marked on egress with
+ 802.1Q VLAN tags, specifying a VLAN ID of 10.
+
+ To remove a VLAN pseudo-interface:
+
+ ifconfig <vlan_name> destroy
+
+
+ Checksum Offload
+ ----------------
+
+ Checksum offloading supports IPv4 and IPv6 with TCP and UDP packets
+ and is supported for both transmit and receive. Checksum offloading
+ for transmit and recieve is enabled by default for both IPv4 and IPv6.
+
+ Checksum offloading can be enabled or disabled using ifconfig.
+ Transmit and receive offloading for IPv4 and Ipv6 are enabled
+ and disabled seperately.
+
+ NOTE: TSO requires Tx checksum, so when Tx checksum
+ is disabled, TSO will also be disabled.
+
+ To enable Tx checksum offloading for ipv4:
+
+ ifconfig ixl<interface_num> txcsum4
+
+ To disable Tx checksum offloading for ipv4:
+
+ ifconfig ixl<interface_num> -txcsum4
+ (NOTE: This will disable TSO4)
+
+ To enable Rx checksum offloading for ipv6:
+
+ ifconfig ixl<interface_num> rxcsum6
+
+ To disable Rx checksum offloading for ipv6:
+
+ ifconfig ixl<interface_num> -rxcsum6
+ (NOTE: This will disable TSO6)
+
+
+ To confirm the current settings:
+
+ ifconfig ixl<interface_num>
+
+
+ TSO
+ ---
+
+ TSO supports both IPv4 and IPv6 and is enabled by default. TSO can
+ be disabled and enabled using the ifconfig utility.
+
+ NOTE: TSO requires Tx checksum, so when Tx checksum is
+ disabled, TSO will also be disabled.
+
+ To disable TSO IPv4:
+
+ ifconfig ixl<interface_num> -tso4
+
+ To enable TSO IPv4:
+
+ ifconfig ixl<interface_num> tso4
+
+ To disable TSO IPv6:
+
+ ifconfig ixl<interface_num> -tso6
+
+ To enable TSO IPv6:
+
+ ifconfig ixl<interface_num> tso6
+
+ To disable BOTH TSO IPv4 and IPv6:
+
+ ifconfig ixl<interface_num> -tso
+
+ To enable BOTH TSO IPv4 and IPv6:
+
+ ifconfig ixl<interface_num> tso
+
+
+ LRO
+ ---
+
+ Large Receive Offload is enabled by default. It can be enabled
+ or disabled by using the ifconfig utility.
+
+ NOTE: LRO should be disabled when forwarding packets.
+
+ To disable LRO:
+
+ ifconfig ixl<interface_num> -lro
+
+ To enable LRO:
+
+ ifconfig ixl<interface_num> lro
+
+
+Flow Control
+------------
+Flow control is disabled by default. To change flow control settings use sysctl.
+
+To enable flow control to Rx pause frames:
+
+ sysctl dev.ixl.<interface_num>.fc=1
+
+To enable flow control to Tx pause frames:
+
+ sysctl dev.ixl.<interface_num>.fc=2
+
+To enable flow control to Rx and Tx pause frames:
+
+ sysctl dev.ixl.<interface_num>.fc=3
+
+To disable flow control:
+
+ sysctl dev.ixl.<interface_num>.fc=0
+
+
+NOTE: You must have a flow control capable link partner.
+
+
+
+ Important system configuration changes:
+ =======================================
+
+
+-Change the file /etc/sysctl.conf, and add the line:
+
+ hw.intr_storm_threshold: 0 (the default is 1000)
+
+-Best throughput results are seen with a large MTU; use 9706 if possible.
+
+-The default number of descriptors per ring is 1024, increasing this may improve performance depending on the use case.
+
+
+Known Limitations
+=================
+
+Network Memory Buffer allocation
+--------------------------------
+ FreeBSD may have a low number of network memory buffers (mbufs) by default. Ifyour mbuf value is too low, it may cause the driver to fail to initialize and/orcause the system to become unresponsive. You can check to see if the system is mbuf-starved by running 'netstat -m'. Increase the number of mbufs by editing the lines below in /etc/sysctl.conf:
+
+ kern.ipc.nmbclusters
+ kern.ipc.nmbjumbop
+ kern.ipc.nmbjumbo9
+ kern.ipc.nmbjumbo16
+ kern.ipc.nmbufs
+
+The amount of memory that you allocate is system specific, and may require some trial and error.
+
+Also, increasing the follwing in /etc/sysctl.conf could help increase network performance:
+
+ kern.ipc.maxsockbuf
+ net.inet.tcp.sendspace
+ net.inet.tcp.recvspace
+ net.inet.udp.maxdgram
+ net.inet.udp.recvspace
+
+
+UDP Stress Test Dropped Packet Issue
+------------------------------------
+ Under small packet UDP stress test with the ixl driver, the FreeBSD system will drop UDP packets due to the fullness of socket buffers. You may want to change the driver's Flow Control variables to the minimum value for controlling packet reception.
+
+
+Disable LRO when routing/bridging
+---------------------------------
+LRO must be turned off when forwarding traffic.
+
+
+Lower than expected performance
+-------------------------------
+ Some PCIe x8 slots are actually configured as x4 slots. These slots have insufficient bandwidth for full line rate with dual port and quad port devices. In addition, if you put a PCIe Generation 3-capable adapter into a PCIe Generation 2 slot, you cannot get full bandwidth. The driver detects this situation and writes the following message in the system log:
+
+ "PCI-Express bandwidth available for this card is not sufficient for optimal performance. For optimal performance a x8 PCI-Express slot is required."
+
+If this error occurs, moving your adapter to a true PCIe Generation 3 x8 slot will resolve the issue.
+
+
+Support
+=======
+
+For general information and support, go to the Intel support website at:
+
+ http://support.intel.com
+
+If an issue is identified with the released source code on the supported kernel with a supported adapter, email the specific information related to the issue tofreebsdnic@mailbox.intel.com.
+
+
+License
+=======
+
+This software program is released under the terms of a license agreement betweenyou ('Licensee') and Intel. Do not use or load this software or any associated materials (collectively, the 'Software') until you have carefully read the full terms and conditions of the LICENSE located in this software package. By loadingor using the Software, you agree to the terms of this Agreement. If you do not
+agree with the terms of this Agreement, do not install or use the Software.
+
+* Other names and brands may be claimed as the property of others.
+
+
diff --git a/sys/dev/ixl/i40e_adminq.c b/sys/dev/ixl/i40e_adminq.c
new file mode 100755
index 0000000..e0f8725
--- /dev/null
+++ b/sys/dev/ixl/i40e_adminq.c
@@ -0,0 +1,1070 @@
+/******************************************************************************
+
+ Copyright (c) 2013-2014, Intel Corporation
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ POSSIBILITY OF SUCH DAMAGE.
+
+******************************************************************************/
+/*$FreeBSD$*/
+
+#include "i40e_status.h"
+#include "i40e_type.h"
+#include "i40e_register.h"
+#include "i40e_adminq.h"
+#include "i40e_prototype.h"
+
+/**
+ * i40e_is_nvm_update_op - return TRUE if this is an NVM update operation
+ * @desc: API request descriptor
+ **/
+static INLINE bool i40e_is_nvm_update_op(struct i40e_aq_desc *desc)
+{
+ return (desc->opcode == CPU_TO_LE16(i40e_aqc_opc_nvm_erase) ||
+ desc->opcode == CPU_TO_LE16(i40e_aqc_opc_nvm_update));
+}
+
+/**
+ * i40e_adminq_init_regs - Initialize AdminQ registers
+ * @hw: pointer to the hardware structure
+ *
+ * This assumes the alloc_asq and alloc_arq functions have already been called
+ **/
+static void i40e_adminq_init_regs(struct i40e_hw *hw)
+{
+ /* set head and tail registers in our local struct */
+ if (i40e_is_vf(hw)) {
+ hw->aq.asq.tail = I40E_VF_ATQT1;
+ hw->aq.asq.head = I40E_VF_ATQH1;
+ hw->aq.asq.len = I40E_VF_ATQLEN1;
+ hw->aq.asq.bal = I40E_VF_ATQBAL1;
+ hw->aq.asq.bah = I40E_VF_ATQBAH1;
+ hw->aq.arq.tail = I40E_VF_ARQT1;
+ hw->aq.arq.head = I40E_VF_ARQH1;
+ hw->aq.arq.len = I40E_VF_ARQLEN1;
+ hw->aq.arq.bal = I40E_VF_ARQBAL1;
+ hw->aq.arq.bah = I40E_VF_ARQBAH1;
+ } else {
+ hw->aq.asq.tail = I40E_PF_ATQT;
+ hw->aq.asq.head = I40E_PF_ATQH;
+ hw->aq.asq.len = I40E_PF_ATQLEN;
+ hw->aq.asq.bal = I40E_PF_ATQBAL;
+ hw->aq.asq.bah = I40E_PF_ATQBAH;
+ hw->aq.arq.tail = I40E_PF_ARQT;
+ hw->aq.arq.head = I40E_PF_ARQH;
+ hw->aq.arq.len = I40E_PF_ARQLEN;
+ hw->aq.arq.bal = I40E_PF_ARQBAL;
+ hw->aq.arq.bah = I40E_PF_ARQBAH;
+ }
+}
+
+/**
+ * i40e_alloc_adminq_asq_ring - Allocate Admin Queue send rings
+ * @hw: pointer to the hardware structure
+ **/
+enum i40e_status_code i40e_alloc_adminq_asq_ring(struct i40e_hw *hw)
+{
+ enum i40e_status_code ret_code;
+
+ ret_code = i40e_allocate_dma_mem(hw, &hw->aq.asq.desc_buf,
+ i40e_mem_atq_ring,
+ (hw->aq.num_asq_entries *
+ sizeof(struct i40e_aq_desc)),
+ I40E_ADMINQ_DESC_ALIGNMENT);
+ if (ret_code)
+ return ret_code;
+
+ ret_code = i40e_allocate_virt_mem(hw, &hw->aq.asq.cmd_buf,
+ (hw->aq.num_asq_entries *
+ sizeof(struct i40e_asq_cmd_details)));
+ if (ret_code) {
+ i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
+ return ret_code;
+ }
+
+ return ret_code;
+}
+
+/**
+ * i40e_alloc_adminq_arq_ring - Allocate Admin Queue receive rings
+ * @hw: pointer to the hardware structure
+ **/
+enum i40e_status_code i40e_alloc_adminq_arq_ring(struct i40e_hw *hw)
+{
+ enum i40e_status_code ret_code;
+
+ ret_code = i40e_allocate_dma_mem(hw, &hw->aq.arq.desc_buf,
+ i40e_mem_arq_ring,
+ (hw->aq.num_arq_entries *
+ sizeof(struct i40e_aq_desc)),
+ I40E_ADMINQ_DESC_ALIGNMENT);
+
+ return ret_code;
+}
+
+/**
+ * i40e_free_adminq_asq - Free Admin Queue send rings
+ * @hw: pointer to the hardware structure
+ *
+ * This assumes the posted send buffers have already been cleaned
+ * and de-allocated
+ **/
+void i40e_free_adminq_asq(struct i40e_hw *hw)
+{
+ i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
+}
+
+/**
+ * i40e_free_adminq_arq - Free Admin Queue receive rings
+ * @hw: pointer to the hardware structure
+ *
+ * This assumes the posted receive buffers have already been cleaned
+ * and de-allocated
+ **/
+void i40e_free_adminq_arq(struct i40e_hw *hw)
+{
+ i40e_free_dma_mem(hw, &hw->aq.arq.desc_buf);
+}
+
+/**
+ * i40e_alloc_arq_bufs - Allocate pre-posted buffers for the receive queue
+ * @hw: pointer to the hardware structure
+ **/
+static enum i40e_status_code i40e_alloc_arq_bufs(struct i40e_hw *hw)
+{
+ enum i40e_status_code ret_code;
+ struct i40e_aq_desc *desc;
+ struct i40e_dma_mem *bi;
+ int i;
+
+ /* We'll be allocating the buffer info memory first, then we can
+ * allocate the mapped buffers for the event processing
+ */
+
+ /* buffer_info structures do not need alignment */
+ ret_code = i40e_allocate_virt_mem(hw, &hw->aq.arq.dma_head,
+ (hw->aq.num_arq_entries * sizeof(struct i40e_dma_mem)));
+ if (ret_code)
+ goto alloc_arq_bufs;
+ hw->aq.arq.r.arq_bi = (struct i40e_dma_mem *)hw->aq.arq.dma_head.va;
+
+ /* allocate the mapped buffers */
+ for (i = 0; i < hw->aq.num_arq_entries; i++) {
+ bi = &hw->aq.arq.r.arq_bi[i];
+ ret_code = i40e_allocate_dma_mem(hw, bi,
+ i40e_mem_arq_buf,
+ hw->aq.arq_buf_size,
+ I40E_ADMINQ_DESC_ALIGNMENT);
+ if (ret_code)
+ goto unwind_alloc_arq_bufs;
+
+ /* now configure the descriptors for use */
+ desc = I40E_ADMINQ_DESC(hw->aq.arq, i);
+
+ desc->flags = CPU_TO_LE16(I40E_AQ_FLAG_BUF);
+ if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF)
+ desc->flags |= CPU_TO_LE16(I40E_AQ_FLAG_LB);
+ desc->opcode = 0;
+ /* This is in accordance with Admin queue design, there is no
+ * register for buffer size configuration
+ */
+ desc->datalen = CPU_TO_LE16((u16)bi->size);
+ desc->retval = 0;
+ desc->cookie_high = 0;
+ desc->cookie_low = 0;
+ desc->params.external.addr_high =
+ CPU_TO_LE32(I40E_HI_DWORD(bi->pa));
+ desc->params.external.addr_low =
+ CPU_TO_LE32(I40E_LO_DWORD(bi->pa));
+ desc->params.external.param0 = 0;
+ desc->params.external.param1 = 0;
+ }
+
+alloc_arq_bufs:
+ return ret_code;
+
+unwind_alloc_arq_bufs:
+ /* don't try to free the one that failed... */
+ i--;
+ for (; i >= 0; i--)
+ i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
+ i40e_free_virt_mem(hw, &hw->aq.arq.dma_head);
+
+ return ret_code;
+}
+
+/**
+ * i40e_alloc_asq_bufs - Allocate empty buffer structs for the send queue
+ * @hw: pointer to the hardware structure
+ **/
+static enum i40e_status_code i40e_alloc_asq_bufs(struct i40e_hw *hw)
+{
+ enum i40e_status_code ret_code;
+ struct i40e_dma_mem *bi;
+ int i;
+
+ /* No mapped memory needed yet, just the buffer info structures */
+ ret_code = i40e_allocate_virt_mem(hw, &hw->aq.asq.dma_head,
+ (hw->aq.num_asq_entries * sizeof(struct i40e_dma_mem)));
+ if (ret_code)
+ goto alloc_asq_bufs;
+ hw->aq.asq.r.asq_bi = (struct i40e_dma_mem *)hw->aq.asq.dma_head.va;
+
+ /* allocate the mapped buffers */
+ for (i = 0; i < hw->aq.num_asq_entries; i++) {
+ bi = &hw->aq.asq.r.asq_bi[i];
+ ret_code = i40e_allocate_dma_mem(hw, bi,
+ i40e_mem_asq_buf,
+ hw->aq.asq_buf_size,
+ I40E_ADMINQ_DESC_ALIGNMENT);
+ if (ret_code)
+ goto unwind_alloc_asq_bufs;
+ }
+alloc_asq_bufs:
+ return ret_code;
+
+unwind_alloc_asq_bufs:
+ /* don't try to free the one that failed... */
+ i--;
+ for (; i >= 0; i--)
+ i40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
+ i40e_free_virt_mem(hw, &hw->aq.asq.dma_head);
+
+ return ret_code;
+}
+
+/**
+ * i40e_free_arq_bufs - Free receive queue buffer info elements
+ * @hw: pointer to the hardware structure
+ **/
+static void i40e_free_arq_bufs(struct i40e_hw *hw)
+{
+ int i;
+
+ /* free descriptors */
+ for (i = 0; i < hw->aq.num_arq_entries; i++)
+ i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
+
+ /* free the descriptor memory */
+ i40e_free_dma_mem(hw, &hw->aq.arq.desc_buf);
+
+ /* free the dma header */
+ i40e_free_virt_mem(hw, &hw->aq.arq.dma_head);
+}
+
+/**
+ * i40e_free_asq_bufs - Free send queue buffer info elements
+ * @hw: pointer to the hardware structure
+ **/
+static void i40e_free_asq_bufs(struct i40e_hw *hw)
+{
+ int i;
+
+ /* only unmap if the address is non-NULL */
+ for (i = 0; i < hw->aq.num_asq_entries; i++)
+ if (hw->aq.asq.r.asq_bi[i].pa)
+ i40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
+
+ /* free the buffer info list */
+ i40e_free_virt_mem(hw, &hw->aq.asq.cmd_buf);
+
+ /* free the descriptor memory */
+ i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
+
+ /* free the dma header */
+ i40e_free_virt_mem(hw, &hw->aq.asq.dma_head);
+}
+
+/**
+ * i40e_config_asq_regs - configure ASQ registers
+ * @hw: pointer to the hardware structure
+ *
+ * Configure base address and length registers for the transmit queue
+ **/
+static enum i40e_status_code i40e_config_asq_regs(struct i40e_hw *hw)
+{
+ enum i40e_status_code ret_code = I40E_SUCCESS;
+ u32 reg = 0;
+
+ /* Clear Head and Tail */
+ wr32(hw, hw->aq.asq.head, 0);
+ wr32(hw, hw->aq.asq.tail, 0);
+
+ /* set starting point */
+ wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries |
+ I40E_PF_ATQLEN_ATQENABLE_MASK));
+ wr32(hw, hw->aq.asq.bal, I40E_LO_DWORD(hw->aq.asq.desc_buf.pa));
+ wr32(hw, hw->aq.asq.bah, I40E_HI_DWORD(hw->aq.asq.desc_buf.pa));
+
+ /* Check one register to verify that config was applied */
+ reg = rd32(hw, hw->aq.asq.bal);
+ if (reg != I40E_LO_DWORD(hw->aq.asq.desc_buf.pa))
+ ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
+
+ return ret_code;
+}
+
+/**
+ * i40e_config_arq_regs - ARQ register configuration
+ * @hw: pointer to the hardware structure
+ *
+ * Configure base address and length registers for the receive (event queue)
+ **/
+static enum i40e_status_code i40e_config_arq_regs(struct i40e_hw *hw)
+{
+ enum i40e_status_code ret_code = I40E_SUCCESS;
+ u32 reg = 0;
+
+ /* Clear Head and Tail */
+ wr32(hw, hw->aq.arq.head, 0);
+ wr32(hw, hw->aq.arq.tail, 0);
+
+ /* set starting point */
+ wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries |
+ I40E_PF_ARQLEN_ARQENABLE_MASK));
+ wr32(hw, hw->aq.arq.bal, I40E_LO_DWORD(hw->aq.arq.desc_buf.pa));
+ wr32(hw, hw->aq.arq.bah, I40E_HI_DWORD(hw->aq.arq.desc_buf.pa));
+
+ /* Update tail in the HW to post pre-allocated buffers */
+ wr32(hw, hw->aq.arq.tail, hw->aq.num_arq_entries - 1);
+
+ /* Check one register to verify that config was applied */
+ reg = rd32(hw, hw->aq.arq.bal);
+ if (reg != I40E_LO_DWORD(hw->aq.arq.desc_buf.pa))
+ ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
+
+ return ret_code;
+}
+
+/**
+ * i40e_init_asq - main initialization routine for ASQ
+ * @hw: pointer to the hardware structure
+ *
+ * This is the main initialization routine for the Admin Send Queue
+ * Prior to calling this function, drivers *MUST* set the following fields
+ * in the hw->aq structure:
+ * - hw->aq.num_asq_entries
+ * - hw->aq.arq_buf_size
+ *
+ * Do *NOT* hold the lock when calling this as the memory allocation routines
+ * called are not going to be atomic context safe
+ **/
+enum i40e_status_code i40e_init_asq(struct i40e_hw *hw)
+{
+ enum i40e_status_code ret_code = I40E_SUCCESS;
+
+ if (hw->aq.asq.count > 0) {
+ /* queue already initialized */
+ ret_code = I40E_ERR_NOT_READY;
+ goto init_adminq_exit;
+ }
+
+ /* verify input for valid configuration */
+ if ((hw->aq.num_asq_entries == 0) ||
+ (hw->aq.asq_buf_size == 0)) {
+ ret_code = I40E_ERR_CONFIG;
+ goto init_adminq_exit;
+ }
+
+ hw->aq.asq.next_to_use = 0;
+ hw->aq.asq.next_to_clean = 0;
+ hw->aq.asq.count = hw->aq.num_asq_entries;
+
+ /* allocate the ring memory */
+ ret_code = i40e_alloc_adminq_asq_ring(hw);
+ if (ret_code != I40E_SUCCESS)
+ goto init_adminq_exit;
+
+ /* allocate buffers in the rings */
+ ret_code = i40e_alloc_asq_bufs(hw);
+ if (ret_code != I40E_SUCCESS)
+ goto init_adminq_free_rings;
+
+ /* initialize base registers */
+ ret_code = i40e_config_asq_regs(hw);
+ if (ret_code != I40E_SUCCESS)
+ goto init_adminq_free_rings;
+
+ /* success! */
+ goto init_adminq_exit;
+
+init_adminq_free_rings:
+ i40e_free_adminq_asq(hw);
+
+init_adminq_exit:
+ return ret_code;
+}
+
+/**
+ * i40e_init_arq - initialize ARQ
+ * @hw: pointer to the hardware structure
+ *
+ * The main initialization routine for the Admin Receive (Event) Queue.
+ * Prior to calling this function, drivers *MUST* set the following fields
+ * in the hw->aq structure:
+ * - hw->aq.num_asq_entries
+ * - hw->aq.arq_buf_size
+ *
+ * Do *NOT* hold the lock when calling this as the memory allocation routines
+ * called are not going to be atomic context safe
+ **/
+enum i40e_status_code i40e_init_arq(struct i40e_hw *hw)
+{
+ enum i40e_status_code ret_code = I40E_SUCCESS;
+
+ if (hw->aq.arq.count > 0) {
+ /* queue already initialized */
+ ret_code = I40E_ERR_NOT_READY;
+ goto init_adminq_exit;
+ }
+
+ /* verify input for valid configuration */
+ if ((hw->aq.num_arq_entries == 0) ||
+ (hw->aq.arq_buf_size == 0)) {
+ ret_code = I40E_ERR_CONFIG;
+ goto init_adminq_exit;
+ }
+
+ hw->aq.arq.next_to_use = 0;
+ hw->aq.arq.next_to_clean = 0;
+ hw->aq.arq.count = hw->aq.num_arq_entries;
+
+ /* allocate the ring memory */
+ ret_code = i40e_alloc_adminq_arq_ring(hw);
+ if (ret_code != I40E_SUCCESS)
+ goto init_adminq_exit;
+
+ /* allocate buffers in the rings */
+ ret_code = i40e_alloc_arq_bufs(hw);
+ if (ret_code != I40E_SUCCESS)
+ goto init_adminq_free_rings;
+
+ /* initialize base registers */
+ ret_code = i40e_config_arq_regs(hw);
+ if (ret_code != I40E_SUCCESS)
+ goto init_adminq_free_rings;
+
+ /* success! */
+ goto init_adminq_exit;
+
+init_adminq_free_rings:
+ i40e_free_adminq_arq(hw);
+
+init_adminq_exit:
+ return ret_code;
+}
+
+/**
+ * i40e_shutdown_asq - shutdown the ASQ
+ * @hw: pointer to the hardware structure
+ *
+ * The main shutdown routine for the Admin Send Queue
+ **/
+enum i40e_status_code i40e_shutdown_asq(struct i40e_hw *hw)
+{
+ enum i40e_status_code ret_code = I40E_SUCCESS;
+
+ if (hw->aq.asq.count == 0)
+ return I40E_ERR_NOT_READY;
+
+ /* Stop firmware AdminQ processing */
+ wr32(hw, hw->aq.asq.head, 0);
+ wr32(hw, hw->aq.asq.tail, 0);
+ wr32(hw, hw->aq.asq.len, 0);
+ wr32(hw, hw->aq.asq.bal, 0);
+ wr32(hw, hw->aq.asq.bah, 0);
+
+ /* make sure spinlock is available */
+ i40e_acquire_spinlock(&hw->aq.asq_spinlock);
+
+ hw->aq.asq.count = 0; /* to indicate uninitialized queue */
+
+ /* free ring buffers */
+ i40e_free_asq_bufs(hw);
+
+ i40e_release_spinlock(&hw->aq.asq_spinlock);
+
+ return ret_code;
+}
+
+/**
+ * i40e_shutdown_arq - shutdown ARQ
+ * @hw: pointer to the hardware structure
+ *
+ * The main shutdown routine for the Admin Receive Queue
+ **/
+enum i40e_status_code i40e_shutdown_arq(struct i40e_hw *hw)
+{
+ enum i40e_status_code ret_code = I40E_SUCCESS;
+
+ if (hw->aq.arq.count == 0)
+ return I40E_ERR_NOT_READY;
+
+ /* Stop firmware AdminQ processing */
+ wr32(hw, hw->aq.arq.head, 0);
+ wr32(hw, hw->aq.arq.tail, 0);
+ wr32(hw, hw->aq.arq.len, 0);
+ wr32(hw, hw->aq.arq.bal, 0);
+ wr32(hw, hw->aq.arq.bah, 0);
+
+ /* make sure spinlock is available */
+ i40e_acquire_spinlock(&hw->aq.arq_spinlock);
+
+ hw->aq.arq.count = 0; /* to indicate uninitialized queue */
+
+ /* free ring buffers */
+ i40e_free_arq_bufs(hw);
+
+ i40e_release_spinlock(&hw->aq.arq_spinlock);
+
+ return ret_code;
+}
+
+/**
+ * i40e_init_adminq - main initialization routine for Admin Queue
+ * @hw: pointer to the hardware structure
+ *
+ * Prior to calling this function, drivers *MUST* set the following fields
+ * in the hw->aq structure:
+ * - hw->aq.num_asq_entries
+ * - hw->aq.num_arq_entries
+ * - hw->aq.arq_buf_size
+ * - hw->aq.asq_buf_size
+ **/
+enum i40e_status_code i40e_init_adminq(struct i40e_hw *hw)
+{
+ enum i40e_status_code ret_code;
+ u16 eetrack_lo, eetrack_hi;
+ int retry = 0;
+ /* verify input for valid configuration */
+ if ((hw->aq.num_arq_entries == 0) ||
+ (hw->aq.num_asq_entries == 0) ||
+ (hw->aq.arq_buf_size == 0) ||
+ (hw->aq.asq_buf_size == 0)) {
+ ret_code = I40E_ERR_CONFIG;
+ goto init_adminq_exit;
+ }
+
+ /* initialize spin locks */
+ i40e_init_spinlock(&hw->aq.asq_spinlock);
+ i40e_init_spinlock(&hw->aq.arq_spinlock);
+
+ /* Set up register offsets */
+ i40e_adminq_init_regs(hw);
+
+ /* setup ASQ command write back timeout */
+ hw->aq.asq_cmd_timeout = I40E_ASQ_CMD_TIMEOUT;
+
+ /* allocate the ASQ */
+ ret_code = i40e_init_asq(hw);
+ if (ret_code != I40E_SUCCESS)
+ goto init_adminq_destroy_spinlocks;
+
+ /* allocate the ARQ */
+ ret_code = i40e_init_arq(hw);
+ if (ret_code != I40E_SUCCESS)
+ goto init_adminq_free_asq;
+
+ if (i40e_is_vf(hw)) /* VF has no need of firmware */
+ goto init_adminq_exit;
+
+/* There are some cases where the firmware may not be quite ready
+ * for AdminQ operations, so we retry the AdminQ setup a few times
+ * if we see timeouts in this first AQ call.
+ */
+ do {
+ ret_code = i40e_aq_get_firmware_version(hw,
+ &hw->aq.fw_maj_ver,
+ &hw->aq.fw_min_ver,
+ &hw->aq.api_maj_ver,
+ &hw->aq.api_min_ver,
+ NULL);
+ if (ret_code != I40E_ERR_ADMIN_QUEUE_TIMEOUT)
+ break;
+ retry++;
+ i40e_msec_delay(100);
+ i40e_resume_aq(hw);
+ } while (retry < 10);
+ if (ret_code != I40E_SUCCESS)
+ goto init_adminq_free_arq;
+
+ /* get the NVM version info */
+ i40e_read_nvm_word(hw, I40E_SR_NVM_IMAGE_VERSION, &hw->nvm.version);
+ i40e_read_nvm_word(hw, I40E_SR_NVM_EETRACK_LO, &eetrack_lo);
+ i40e_read_nvm_word(hw, I40E_SR_NVM_EETRACK_HI, &eetrack_hi);
+ hw->nvm.eetrack = (eetrack_hi << 16) | eetrack_lo;
+
+ if (hw->aq.api_maj_ver > I40E_FW_API_VERSION_MAJOR) {
+ ret_code = I40E_ERR_FIRMWARE_API_VERSION;
+ goto init_adminq_free_arq;
+ }
+
+ /* pre-emptive resource lock release */
+ i40e_aq_release_resource(hw, I40E_NVM_RESOURCE_ID, 0, NULL);
+ hw->aq.nvm_busy = FALSE;
+
+ ret_code = i40e_aq_set_hmc_resource_profile(hw,
+ I40E_HMC_PROFILE_DEFAULT,
+ 0,
+ NULL);
+ ret_code = I40E_SUCCESS;
+
+ /* success! */
+ goto init_adminq_exit;
+
+init_adminq_free_arq:
+ i40e_shutdown_arq(hw);
+init_adminq_free_asq:
+ i40e_shutdown_asq(hw);
+init_adminq_destroy_spinlocks:
+ i40e_destroy_spinlock(&hw->aq.asq_spinlock);
+ i40e_destroy_spinlock(&hw->aq.arq_spinlock);
+
+init_adminq_exit:
+ return ret_code;
+}
+
+/**
+ * i40e_shutdown_adminq - shutdown routine for the Admin Queue
+ * @hw: pointer to the hardware structure
+ **/
+enum i40e_status_code i40e_shutdown_adminq(struct i40e_hw *hw)
+{
+ enum i40e_status_code ret_code = I40E_SUCCESS;
+
+ if (i40e_check_asq_alive(hw))
+ i40e_aq_queue_shutdown(hw, TRUE);
+
+ i40e_shutdown_asq(hw);
+ i40e_shutdown_arq(hw);
+
+ /* destroy the spinlocks */
+ i40e_destroy_spinlock(&hw->aq.asq_spinlock);
+ i40e_destroy_spinlock(&hw->aq.arq_spinlock);
+
+ return ret_code;
+}
+
+/**
+ * i40e_clean_asq - cleans Admin send queue
+ * @hw: pointer to the hardware structure
+ *
+ * returns the number of free desc
+ **/
+u16 i40e_clean_asq(struct i40e_hw *hw)
+{
+ struct i40e_adminq_ring *asq = &(hw->aq.asq);
+ struct i40e_asq_cmd_details *details;
+ u16 ntc = asq->next_to_clean;
+ struct i40e_aq_desc desc_cb;
+ struct i40e_aq_desc *desc;
+
+ desc = I40E_ADMINQ_DESC(*asq, ntc);
+ details = I40E_ADMINQ_DETAILS(*asq, ntc);
+ while (rd32(hw, hw->aq.asq.head) != ntc) {
+ i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
+ "%s: ntc %d head %d.\n", __FUNCTION__, ntc,
+ rd32(hw, hw->aq.asq.head));
+
+ if (details->callback) {
+ I40E_ADMINQ_CALLBACK cb_func =
+ (I40E_ADMINQ_CALLBACK)details->callback;
+ i40e_memcpy(&desc_cb, desc,
+ sizeof(struct i40e_aq_desc), I40E_DMA_TO_DMA);
+ cb_func(hw, &desc_cb);
+ }
+ i40e_memset(desc, 0, sizeof(*desc), I40E_DMA_MEM);
+ i40e_memset(details, 0, sizeof(*details), I40E_NONDMA_MEM);
+ ntc++;
+ if (ntc == asq->count)
+ ntc = 0;
+ desc = I40E_ADMINQ_DESC(*asq, ntc);
+ details = I40E_ADMINQ_DETAILS(*asq, ntc);
+ }
+
+ asq->next_to_clean = ntc;
+
+ return I40E_DESC_UNUSED(asq);
+}
+
+/**
+ * i40e_asq_done - check if FW has processed the Admin Send Queue
+ * @hw: pointer to the hw struct
+ *
+ * Returns TRUE if the firmware has processed all descriptors on the
+ * admin send queue. Returns FALSE if there are still requests pending.
+ **/
+bool i40e_asq_done(struct i40e_hw *hw)
+{
+ /* AQ designers suggest use of head for better
+ * timing reliability than DD bit
+ */
+ return rd32(hw, hw->aq.asq.head) == hw->aq.asq.next_to_use;
+
+}
+
+/**
+ * i40e_asq_send_command - send command to Admin Queue
+ * @hw: pointer to the hw struct
+ * @desc: prefilled descriptor describing the command (non DMA mem)
+ * @buff: buffer to use for indirect commands
+ * @buff_size: size of buffer for indirect commands
+ * @cmd_details: pointer to command details structure
+ *
+ * This is the main send command driver routine for the Admin Queue send
+ * queue. It runs the queue, cleans the queue, etc
+ **/
+enum i40e_status_code i40e_asq_send_command(struct i40e_hw *hw,
+ struct i40e_aq_desc *desc,
+ void *buff, /* can be NULL */
+ u16 buff_size,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ enum i40e_status_code status = I40E_SUCCESS;
+ struct i40e_dma_mem *dma_buff = NULL;
+ struct i40e_asq_cmd_details *details;
+ struct i40e_aq_desc *desc_on_ring;
+ bool cmd_completed = FALSE;
+ u16 retval = 0;
+ u32 val = 0;
+
+ val = rd32(hw, hw->aq.asq.head);
+ if (val >= hw->aq.num_asq_entries) {
+ i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
+ "AQTX: head overrun at %d\n", val);
+ status = I40E_ERR_QUEUE_EMPTY;
+ goto asq_send_command_exit;
+ }
+
+ if (hw->aq.asq.count == 0) {
+ i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
+ "AQTX: Admin queue not initialized.\n");
+ status = I40E_ERR_QUEUE_EMPTY;
+ goto asq_send_command_exit;
+ }
+
+ if (i40e_is_nvm_update_op(desc) && hw->aq.nvm_busy) {
+ i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQTX: NVM busy.\n");
+ status = I40E_ERR_NVM;
+ goto asq_send_command_exit;
+ }
+
+ details = I40E_ADMINQ_DETAILS(hw->aq.asq, hw->aq.asq.next_to_use);
+ if (cmd_details) {
+ i40e_memcpy(details,
+ cmd_details,
+ sizeof(struct i40e_asq_cmd_details),
+ I40E_NONDMA_TO_NONDMA);
+
+ /* If the cmd_details are defined copy the cookie. The
+ * CPU_TO_LE32 is not needed here because the data is ignored
+ * by the FW, only used by the driver
+ */
+ if (details->cookie) {
+ desc->cookie_high =
+ CPU_TO_LE32(I40E_HI_DWORD(details->cookie));
+ desc->cookie_low =
+ CPU_TO_LE32(I40E_LO_DWORD(details->cookie));
+ }
+ } else {
+ i40e_memset(details, 0,
+ sizeof(struct i40e_asq_cmd_details),
+ I40E_NONDMA_MEM);
+ }
+
+ /* clear requested flags and then set additional flags if defined */
+ desc->flags &= ~CPU_TO_LE16(details->flags_dis);
+ desc->flags |= CPU_TO_LE16(details->flags_ena);
+
+ i40e_acquire_spinlock(&hw->aq.asq_spinlock);
+
+ if (buff_size > hw->aq.asq_buf_size) {
+ i40e_debug(hw,
+ I40E_DEBUG_AQ_MESSAGE,
+ "AQTX: Invalid buffer size: %d.\n",
+ buff_size);
+ status = I40E_ERR_INVALID_SIZE;
+ goto asq_send_command_error;
+ }
+
+ if (details->postpone && !details->async) {
+ i40e_debug(hw,
+ I40E_DEBUG_AQ_MESSAGE,
+ "AQTX: Async flag not set along with postpone flag");
+ status = I40E_ERR_PARAM;
+ goto asq_send_command_error;
+ }
+
+ /* call clean and check queue available function to reclaim the
+ * descriptors that were processed by FW, the function returns the
+ * number of desc available
+ */
+ /* the clean function called here could be called in a separate thread
+ * in case of asynchronous completions
+ */
+ if (i40e_clean_asq(hw) == 0) {
+ i40e_debug(hw,
+ I40E_DEBUG_AQ_MESSAGE,
+ "AQTX: Error queue is full.\n");
+ status = I40E_ERR_ADMIN_QUEUE_FULL;
+ goto asq_send_command_error;
+ }
+
+ /* initialize the temp desc pointer with the right desc */
+ desc_on_ring = I40E_ADMINQ_DESC(hw->aq.asq, hw->aq.asq.next_to_use);
+
+ /* if the desc is available copy the temp desc to the right place */
+ i40e_memcpy(desc_on_ring, desc, sizeof(struct i40e_aq_desc),
+ I40E_NONDMA_TO_DMA);
+
+ /* if buff is not NULL assume indirect command */
+ if (buff != NULL) {
+ dma_buff = &(hw->aq.asq.r.asq_bi[hw->aq.asq.next_to_use]);
+ /* copy the user buff into the respective DMA buff */
+ i40e_memcpy(dma_buff->va, buff, buff_size,
+ I40E_NONDMA_TO_DMA);
+ desc_on_ring->datalen = CPU_TO_LE16(buff_size);
+
+ /* Update the address values in the desc with the pa value
+ * for respective buffer
+ */
+ desc_on_ring->params.external.addr_high =
+ CPU_TO_LE32(I40E_HI_DWORD(dma_buff->pa));
+ desc_on_ring->params.external.addr_low =
+ CPU_TO_LE32(I40E_LO_DWORD(dma_buff->pa));
+ }
+
+ /* bump the tail */
+ i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQTX: desc and buffer:\n");
+ i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc_on_ring,
+ buff, buff_size);
+ (hw->aq.asq.next_to_use)++;
+ if (hw->aq.asq.next_to_use == hw->aq.asq.count)
+ hw->aq.asq.next_to_use = 0;
+ if (!details->postpone)
+ wr32(hw, hw->aq.asq.tail, hw->aq.asq.next_to_use);
+
+ /* if cmd_details are not defined or async flag is not set,
+ * we need to wait for desc write back
+ */
+ if (!details->async && !details->postpone) {
+ u32 total_delay = 0;
+
+ do {
+ /* AQ designers suggest use of head for better
+ * timing reliability than DD bit
+ */
+ if (i40e_asq_done(hw))
+ break;
+ /* ugh! delay while spin_lock */
+ i40e_msec_delay(1);
+ total_delay++;
+ } while (total_delay < hw->aq.asq_cmd_timeout);
+ }
+
+ /* if ready, copy the desc back to temp */
+ if (i40e_asq_done(hw)) {
+ i40e_memcpy(desc, desc_on_ring, sizeof(struct i40e_aq_desc),
+ I40E_DMA_TO_NONDMA);
+ if (buff != NULL)
+ i40e_memcpy(buff, dma_buff->va, buff_size,
+ I40E_DMA_TO_NONDMA);
+ retval = LE16_TO_CPU(desc->retval);
+ if (retval != 0) {
+ i40e_debug(hw,
+ I40E_DEBUG_AQ_MESSAGE,
+ "AQTX: Command completed with error 0x%X.\n",
+ retval);
+
+ /* strip off FW internal code */
+ retval &= 0xff;
+ }
+ cmd_completed = TRUE;
+ if ((enum i40e_admin_queue_err)retval == I40E_AQ_RC_OK)
+ status = I40E_SUCCESS;
+ else
+ status = I40E_ERR_ADMIN_QUEUE_ERROR;
+ hw->aq.asq_last_status = (enum i40e_admin_queue_err)retval;
+ }
+
+ i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
+ "AQTX: desc and buffer writeback:\n");
+ i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, buff, buff_size);
+
+ /* update the error if time out occurred */
+ if ((!cmd_completed) &&
+ (!details->async && !details->postpone)) {
+ i40e_debug(hw,
+ I40E_DEBUG_AQ_MESSAGE,
+ "AQTX: Writeback timeout.\n");
+ status = I40E_ERR_ADMIN_QUEUE_TIMEOUT;
+ }
+
+ if (!status && i40e_is_nvm_update_op(desc))
+ hw->aq.nvm_busy = TRUE;
+
+asq_send_command_error:
+ i40e_release_spinlock(&hw->aq.asq_spinlock);
+asq_send_command_exit:
+ return status;
+}
+
+/**
+ * i40e_fill_default_direct_cmd_desc - AQ descriptor helper function
+ * @desc: pointer to the temp descriptor (non DMA mem)
+ * @opcode: the opcode can be used to decide which flags to turn off or on
+ *
+ * Fill the desc with default values
+ **/
+void i40e_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc,
+ u16 opcode)
+{
+ /* zero out the desc */
+ i40e_memset((void *)desc, 0, sizeof(struct i40e_aq_desc),
+ I40E_NONDMA_MEM);
+ desc->opcode = CPU_TO_LE16(opcode);
+ desc->flags = CPU_TO_LE16(I40E_AQ_FLAG_SI);
+}
+
+/**
+ * i40e_clean_arq_element
+ * @hw: pointer to the hw struct
+ * @e: event info from the receive descriptor, includes any buffers
+ * @pending: number of events that could be left to process
+ *
+ * This function cleans one Admin Receive Queue element and returns
+ * the contents through e. It can also return how many events are
+ * left to process through 'pending'
+ **/
+enum i40e_status_code i40e_clean_arq_element(struct i40e_hw *hw,
+ struct i40e_arq_event_info *e,
+ u16 *pending)
+{
+ enum i40e_status_code ret_code = I40E_SUCCESS;
+ u16 ntc = hw->aq.arq.next_to_clean;
+ struct i40e_aq_desc *desc;
+ struct i40e_dma_mem *bi;
+ u16 desc_idx;
+ u16 datalen;
+ u16 flags;
+ u16 ntu;
+
+ /* take the lock before we start messing with the ring */
+ i40e_acquire_spinlock(&hw->aq.arq_spinlock);
+
+ /* set next_to_use to head */
+ ntu = (rd32(hw, hw->aq.arq.head) & I40E_PF_ARQH_ARQH_MASK);
+ if (ntu == ntc) {
+ /* nothing to do - shouldn't need to update ring's values */
+ ret_code = I40E_ERR_ADMIN_QUEUE_NO_WORK;
+ goto clean_arq_element_out;
+ }
+
+ /* now clean the next descriptor */
+ desc = I40E_ADMINQ_DESC(hw->aq.arq, ntc);
+ desc_idx = ntc;
+
+ flags = LE16_TO_CPU(desc->flags);
+ if (flags & I40E_AQ_FLAG_ERR) {
+ ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
+ hw->aq.arq_last_status =
+ (enum i40e_admin_queue_err)LE16_TO_CPU(desc->retval);
+ i40e_debug(hw,
+ I40E_DEBUG_AQ_MESSAGE,
+ "AQRX: Event received with error 0x%X.\n",
+ hw->aq.arq_last_status);
+ }
+
+ i40e_memcpy(&e->desc, desc, sizeof(struct i40e_aq_desc),
+ I40E_DMA_TO_NONDMA);
+ datalen = LE16_TO_CPU(desc->datalen);
+ e->msg_len = min(datalen, e->buf_len);
+ if (e->msg_buf != NULL && (e->msg_len != 0))
+ i40e_memcpy(e->msg_buf,
+ hw->aq.arq.r.arq_bi[desc_idx].va,
+ e->msg_len, I40E_DMA_TO_NONDMA);
+
+ i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQRX: desc and buffer:\n");
+ i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, e->msg_buf,
+ hw->aq.arq_buf_size);
+
+ /* Restore the original datalen and buffer address in the desc,
+ * FW updates datalen to indicate the event message
+ * size
+ */
+ bi = &hw->aq.arq.r.arq_bi[ntc];
+ i40e_memset((void *)desc, 0, sizeof(struct i40e_aq_desc), I40E_DMA_MEM);
+
+ desc->flags = CPU_TO_LE16(I40E_AQ_FLAG_BUF);
+ if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF)
+ desc->flags |= CPU_TO_LE16(I40E_AQ_FLAG_LB);
+ desc->datalen = CPU_TO_LE16((u16)bi->size);
+ desc->params.external.addr_high = CPU_TO_LE32(I40E_HI_DWORD(bi->pa));
+ desc->params.external.addr_low = CPU_TO_LE32(I40E_LO_DWORD(bi->pa));
+
+ /* set tail = the last cleaned desc index. */
+ wr32(hw, hw->aq.arq.tail, ntc);
+ /* ntc is updated to tail + 1 */
+ ntc++;
+ if (ntc == hw->aq.num_arq_entries)
+ ntc = 0;
+ hw->aq.arq.next_to_clean = ntc;
+ hw->aq.arq.next_to_use = ntu;
+
+clean_arq_element_out:
+ /* Set pending if needed, unlock and return */
+ if (pending != NULL)
+ *pending = (ntc > ntu ? hw->aq.arq.count : 0) + (ntu - ntc);
+ i40e_release_spinlock(&hw->aq.arq_spinlock);
+
+ if (i40e_is_nvm_update_op(&e->desc)) {
+ hw->aq.nvm_busy = FALSE;
+ if (hw->aq.nvm_release_on_done) {
+ i40e_release_nvm(hw);
+ hw->aq.nvm_release_on_done = FALSE;
+ }
+ }
+
+ return ret_code;
+}
+
+void i40e_resume_aq(struct i40e_hw *hw)
+{
+ /* Registers are reset after PF reset */
+ hw->aq.asq.next_to_use = 0;
+ hw->aq.asq.next_to_clean = 0;
+
+#if (I40E_VF_ATQLEN_ATQENABLE_MASK != I40E_PF_ATQLEN_ATQENABLE_MASK)
+#error I40E_VF_ATQLEN_ATQENABLE_MASK != I40E_PF_ATQLEN_ATQENABLE_MASK
+#endif
+ i40e_config_asq_regs(hw);
+
+ hw->aq.arq.next_to_use = 0;
+ hw->aq.arq.next_to_clean = 0;
+
+ i40e_config_arq_regs(hw);
+}
diff --git a/sys/dev/ixl/i40e_adminq.h b/sys/dev/ixl/i40e_adminq.h
new file mode 100755
index 0000000..bebbebc
--- /dev/null
+++ b/sys/dev/ixl/i40e_adminq.h
@@ -0,0 +1,123 @@
+/******************************************************************************
+
+ Copyright (c) 2013-2014, Intel Corporation
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ POSSIBILITY OF SUCH DAMAGE.
+
+******************************************************************************/
+/*$FreeBSD$*/
+
+#ifndef _I40E_ADMINQ_H_
+#define _I40E_ADMINQ_H_
+
+#include "i40e_osdep.h"
+#include "i40e_adminq_cmd.h"
+
+#define I40E_ADMINQ_DESC(R, i) \
+ (&(((struct i40e_aq_desc *)((R).desc_buf.va))[i]))
+
+#define I40E_ADMINQ_DESC_ALIGNMENT 4096
+
+struct i40e_adminq_ring {
+ struct i40e_virt_mem dma_head; /* space for dma structures */
+ struct i40e_dma_mem desc_buf; /* descriptor ring memory */
+ struct i40e_virt_mem cmd_buf; /* command buffer memory */
+
+ union {
+ struct i40e_dma_mem *asq_bi;
+ struct i40e_dma_mem *arq_bi;
+ } r;
+
+ u16 count; /* Number of descriptors */
+ u16 rx_buf_len; /* Admin Receive Queue buffer length */
+
+ /* used for interrupt processing */
+ u16 next_to_use;
+ u16 next_to_clean;
+
+ /* used for queue tracking */
+ u32 head;
+ u32 tail;
+ u32 len;
+ u32 bah;
+ u32 bal;
+};
+
+/* ASQ transaction details */
+struct i40e_asq_cmd_details {
+ void *callback; /* cast from type I40E_ADMINQ_CALLBACK */
+ u64 cookie;
+ u16 flags_ena;
+ u16 flags_dis;
+ bool async;
+ bool postpone;
+};
+
+#define I40E_ADMINQ_DETAILS(R, i) \
+ (&(((struct i40e_asq_cmd_details *)((R).cmd_buf.va))[i]))
+
+/* ARQ event information */
+struct i40e_arq_event_info {
+ struct i40e_aq_desc desc;
+ u16 msg_len;
+ u16 buf_len;
+ u8 *msg_buf;
+};
+
+/* Admin Queue information */
+struct i40e_adminq_info {
+ struct i40e_adminq_ring arq; /* receive queue */
+ struct i40e_adminq_ring asq; /* send queue */
+ u32 asq_cmd_timeout; /* send queue cmd write back timeout*/
+ u16 num_arq_entries; /* receive queue depth */
+ u16 num_asq_entries; /* send queue depth */
+ u16 arq_buf_size; /* receive queue buffer size */
+ u16 asq_buf_size; /* send queue buffer size */
+ u16 fw_maj_ver; /* firmware major version */
+ u16 fw_min_ver; /* firmware minor version */
+ u16 api_maj_ver; /* api major version */
+ u16 api_min_ver; /* api minor version */
+ bool nvm_busy;
+ bool nvm_release_on_done;
+
+ struct i40e_spinlock asq_spinlock; /* Send queue spinlock */
+ struct i40e_spinlock arq_spinlock; /* Receive queue spinlock */
+
+ /* last status values on send and receive queues */
+ enum i40e_admin_queue_err asq_last_status;
+ enum i40e_admin_queue_err arq_last_status;
+};
+
+/* general information */
+#define I40E_AQ_LARGE_BUF 512
+#define I40E_ASQ_CMD_TIMEOUT 100 /* msecs */
+
+void i40e_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc,
+ u16 opcode);
+
+#endif /* _I40E_ADMINQ_H_ */
diff --git a/sys/dev/ixl/i40e_adminq_cmd.h b/sys/dev/ixl/i40e_adminq_cmd.h
new file mode 100755
index 0000000..431463d
--- /dev/null
+++ b/sys/dev/ixl/i40e_adminq_cmd.h
@@ -0,0 +1,2180 @@
+/******************************************************************************
+
+ Copyright (c) 2013-2014, Intel Corporation
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ POSSIBILITY OF SUCH DAMAGE.
+
+******************************************************************************/
+/*$FreeBSD$*/
+
+#ifndef _I40E_ADMINQ_CMD_H_
+#define _I40E_ADMINQ_CMD_H_
+
+/* This header file defines the i40e Admin Queue commands and is shared between
+ * i40e Firmware and Software.
+ *
+ * This file needs to comply with the Linux Kernel coding style.
+ */
+
+#define I40E_FW_API_VERSION_MAJOR 0x0001
+#define I40E_FW_API_VERSION_MINOR 0x0002
+
+struct i40e_aq_desc {
+ __le16 flags;
+ __le16 opcode;
+ __le16 datalen;
+ __le16 retval;
+ __le32 cookie_high;
+ __le32 cookie_low;
+ union {
+ struct {
+ __le32 param0;
+ __le32 param1;
+ __le32 param2;
+ __le32 param3;
+ } internal;
+ struct {
+ __le32 param0;
+ __le32 param1;
+ __le32 addr_high;
+ __le32 addr_low;
+ } external;
+ u8 raw[16];
+ } params;
+};
+
+/* Flags sub-structure
+ * |0 |1 |2 |3 |4 |5 |6 |7 |8 |9 |10 |11 |12 |13 |14 |15 |
+ * |DD |CMP|ERR|VFE| * * RESERVED * * |LB |RD |VFC|BUF|SI |EI |FE |
+ */
+
+/* command flags and offsets*/
+#define I40E_AQ_FLAG_DD_SHIFT 0
+#define I40E_AQ_FLAG_CMP_SHIFT 1
+#define I40E_AQ_FLAG_ERR_SHIFT 2
+#define I40E_AQ_FLAG_VFE_SHIFT 3
+#define I40E_AQ_FLAG_LB_SHIFT 9
+#define I40E_AQ_FLAG_RD_SHIFT 10
+#define I40E_AQ_FLAG_VFC_SHIFT 11
+#define I40E_AQ_FLAG_BUF_SHIFT 12
+#define I40E_AQ_FLAG_SI_SHIFT 13
+#define I40E_AQ_FLAG_EI_SHIFT 14
+#define I40E_AQ_FLAG_FE_SHIFT 15
+
+#define I40E_AQ_FLAG_DD (1 << I40E_AQ_FLAG_DD_SHIFT) /* 0x1 */
+#define I40E_AQ_FLAG_CMP (1 << I40E_AQ_FLAG_CMP_SHIFT) /* 0x2 */
+#define I40E_AQ_FLAG_ERR (1 << I40E_AQ_FLAG_ERR_SHIFT) /* 0x4 */
+#define I40E_AQ_FLAG_VFE (1 << I40E_AQ_FLAG_VFE_SHIFT) /* 0x8 */
+#define I40E_AQ_FLAG_LB (1 << I40E_AQ_FLAG_LB_SHIFT) /* 0x200 */
+#define I40E_AQ_FLAG_RD (1 << I40E_AQ_FLAG_RD_SHIFT) /* 0x400 */
+#define I40E_AQ_FLAG_VFC (1 << I40E_AQ_FLAG_VFC_SHIFT) /* 0x800 */
+#define I40E_AQ_FLAG_BUF (1 << I40E_AQ_FLAG_BUF_SHIFT) /* 0x1000 */
+#define I40E_AQ_FLAG_SI (1 << I40E_AQ_FLAG_SI_SHIFT) /* 0x2000 */
+#define I40E_AQ_FLAG_EI (1 << I40E_AQ_FLAG_EI_SHIFT) /* 0x4000 */
+#define I40E_AQ_FLAG_FE (1 << I40E_AQ_FLAG_FE_SHIFT) /* 0x8000 */
+
+/* error codes */
+enum i40e_admin_queue_err {
+ I40E_AQ_RC_OK = 0, /* success */
+ I40E_AQ_RC_EPERM = 1, /* Operation not permitted */
+ I40E_AQ_RC_ENOENT = 2, /* No such element */
+ I40E_AQ_RC_ESRCH = 3, /* Bad opcode */
+ I40E_AQ_RC_EINTR = 4, /* operation interrupted */
+ I40E_AQ_RC_EIO = 5, /* I/O error */
+ I40E_AQ_RC_ENXIO = 6, /* No such resource */
+ I40E_AQ_RC_E2BIG = 7, /* Arg too long */
+ I40E_AQ_RC_EAGAIN = 8, /* Try again */
+ I40E_AQ_RC_ENOMEM = 9, /* Out of memory */
+ I40E_AQ_RC_EACCES = 10, /* Permission denied */
+ I40E_AQ_RC_EFAULT = 11, /* Bad address */
+ I40E_AQ_RC_EBUSY = 12, /* Device or resource busy */
+ I40E_AQ_RC_EEXIST = 13, /* object already exists */
+ I40E_AQ_RC_EINVAL = 14, /* Invalid argument */
+ I40E_AQ_RC_ENOTTY = 15, /* Not a typewriter */
+ I40E_AQ_RC_ENOSPC = 16, /* No space left or alloc failure */
+ I40E_AQ_RC_ENOSYS = 17, /* Function not implemented */
+ I40E_AQ_RC_ERANGE = 18, /* Parameter out of range */
+ I40E_AQ_RC_EFLUSHED = 19, /* Cmd flushed due to prev cmd error */
+ I40E_AQ_RC_BAD_ADDR = 20, /* Descriptor contains a bad pointer */
+ I40E_AQ_RC_EMODE = 21, /* Op not allowed in current dev mode */
+ I40E_AQ_RC_EFBIG = 22, /* File too large */
+};
+
+/* Admin Queue command opcodes */
+enum i40e_admin_queue_opc {
+ /* aq commands */
+ i40e_aqc_opc_get_version = 0x0001,
+ i40e_aqc_opc_driver_version = 0x0002,
+ i40e_aqc_opc_queue_shutdown = 0x0003,
+ i40e_aqc_opc_set_pf_context = 0x0004,
+
+ /* resource ownership */
+ i40e_aqc_opc_request_resource = 0x0008,
+ i40e_aqc_opc_release_resource = 0x0009,
+
+ i40e_aqc_opc_list_func_capabilities = 0x000A,
+ i40e_aqc_opc_list_dev_capabilities = 0x000B,
+
+ i40e_aqc_opc_set_cppm_configuration = 0x0103,
+ i40e_aqc_opc_set_arp_proxy_entry = 0x0104,
+ i40e_aqc_opc_set_ns_proxy_entry = 0x0105,
+
+ /* LAA */
+ i40e_aqc_opc_mng_laa = 0x0106, /* AQ obsolete */
+ i40e_aqc_opc_mac_address_read = 0x0107,
+ i40e_aqc_opc_mac_address_write = 0x0108,
+
+ /* PXE */
+ i40e_aqc_opc_clear_pxe_mode = 0x0110,
+
+ /* internal switch commands */
+ i40e_aqc_opc_get_switch_config = 0x0200,
+ i40e_aqc_opc_add_statistics = 0x0201,
+ i40e_aqc_opc_remove_statistics = 0x0202,
+ i40e_aqc_opc_set_port_parameters = 0x0203,
+ i40e_aqc_opc_get_switch_resource_alloc = 0x0204,
+
+ i40e_aqc_opc_add_vsi = 0x0210,
+ i40e_aqc_opc_update_vsi_parameters = 0x0211,
+ i40e_aqc_opc_get_vsi_parameters = 0x0212,
+
+ i40e_aqc_opc_add_pv = 0x0220,
+ i40e_aqc_opc_update_pv_parameters = 0x0221,
+ i40e_aqc_opc_get_pv_parameters = 0x0222,
+
+ i40e_aqc_opc_add_veb = 0x0230,
+ i40e_aqc_opc_update_veb_parameters = 0x0231,
+ i40e_aqc_opc_get_veb_parameters = 0x0232,
+
+ i40e_aqc_opc_delete_element = 0x0243,
+
+ i40e_aqc_opc_add_macvlan = 0x0250,
+ i40e_aqc_opc_remove_macvlan = 0x0251,
+ i40e_aqc_opc_add_vlan = 0x0252,
+ i40e_aqc_opc_remove_vlan = 0x0253,
+ i40e_aqc_opc_set_vsi_promiscuous_modes = 0x0254,
+ i40e_aqc_opc_add_tag = 0x0255,
+ i40e_aqc_opc_remove_tag = 0x0256,
+ i40e_aqc_opc_add_multicast_etag = 0x0257,
+ i40e_aqc_opc_remove_multicast_etag = 0x0258,
+ i40e_aqc_opc_update_tag = 0x0259,
+ i40e_aqc_opc_add_control_packet_filter = 0x025A,
+ i40e_aqc_opc_remove_control_packet_filter = 0x025B,
+ i40e_aqc_opc_add_cloud_filters = 0x025C,
+ i40e_aqc_opc_remove_cloud_filters = 0x025D,
+
+ i40e_aqc_opc_add_mirror_rule = 0x0260,
+ i40e_aqc_opc_delete_mirror_rule = 0x0261,
+
+ /* DCB commands */
+ i40e_aqc_opc_dcb_ignore_pfc = 0x0301,
+ i40e_aqc_opc_dcb_updated = 0x0302,
+
+ /* TX scheduler */
+ i40e_aqc_opc_configure_vsi_bw_limit = 0x0400,
+ i40e_aqc_opc_configure_vsi_ets_sla_bw_limit = 0x0406,
+ i40e_aqc_opc_configure_vsi_tc_bw = 0x0407,
+ i40e_aqc_opc_query_vsi_bw_config = 0x0408,
+ i40e_aqc_opc_query_vsi_ets_sla_config = 0x040A,
+ i40e_aqc_opc_configure_switching_comp_bw_limit = 0x0410,
+
+ i40e_aqc_opc_enable_switching_comp_ets = 0x0413,
+ i40e_aqc_opc_modify_switching_comp_ets = 0x0414,
+ i40e_aqc_opc_disable_switching_comp_ets = 0x0415,
+ i40e_aqc_opc_configure_switching_comp_ets_bw_limit = 0x0416,
+ i40e_aqc_opc_configure_switching_comp_bw_config = 0x0417,
+ i40e_aqc_opc_query_switching_comp_ets_config = 0x0418,
+ i40e_aqc_opc_query_port_ets_config = 0x0419,
+ i40e_aqc_opc_query_switching_comp_bw_config = 0x041A,
+ i40e_aqc_opc_suspend_port_tx = 0x041B,
+ i40e_aqc_opc_resume_port_tx = 0x041C,
+ i40e_aqc_opc_configure_partition_bw = 0x041D,
+
+ /* hmc */
+ i40e_aqc_opc_query_hmc_resource_profile = 0x0500,
+ i40e_aqc_opc_set_hmc_resource_profile = 0x0501,
+
+ /* phy commands*/
+ i40e_aqc_opc_get_phy_abilities = 0x0600,
+ i40e_aqc_opc_set_phy_config = 0x0601,
+ i40e_aqc_opc_set_mac_config = 0x0603,
+ i40e_aqc_opc_set_link_restart_an = 0x0605,
+ i40e_aqc_opc_get_link_status = 0x0607,
+ i40e_aqc_opc_set_phy_int_mask = 0x0613,
+ i40e_aqc_opc_get_local_advt_reg = 0x0614,
+ i40e_aqc_opc_set_local_advt_reg = 0x0615,
+ i40e_aqc_opc_get_partner_advt = 0x0616,
+ i40e_aqc_opc_set_lb_modes = 0x0618,
+ i40e_aqc_opc_get_phy_wol_caps = 0x0621,
+ i40e_aqc_opc_set_phy_debug = 0x0622,
+ i40e_aqc_opc_upload_ext_phy_fm = 0x0625,
+
+ /* NVM commands */
+ i40e_aqc_opc_nvm_read = 0x0701,
+ i40e_aqc_opc_nvm_erase = 0x0702,
+ i40e_aqc_opc_nvm_update = 0x0703,
+ i40e_aqc_opc_nvm_config_read = 0x0704,
+ i40e_aqc_opc_nvm_config_write = 0x0705,
+
+ /* virtualization commands */
+ i40e_aqc_opc_send_msg_to_pf = 0x0801,
+ i40e_aqc_opc_send_msg_to_vf = 0x0802,
+ i40e_aqc_opc_send_msg_to_peer = 0x0803,
+
+ /* alternate structure */
+ i40e_aqc_opc_alternate_write = 0x0900,
+ i40e_aqc_opc_alternate_write_indirect = 0x0901,
+ i40e_aqc_opc_alternate_read = 0x0902,
+ i40e_aqc_opc_alternate_read_indirect = 0x0903,
+ i40e_aqc_opc_alternate_write_done = 0x0904,
+ i40e_aqc_opc_alternate_set_mode = 0x0905,
+ i40e_aqc_opc_alternate_clear_port = 0x0906,
+
+ /* LLDP commands */
+ i40e_aqc_opc_lldp_get_mib = 0x0A00,
+ i40e_aqc_opc_lldp_update_mib = 0x0A01,
+ i40e_aqc_opc_lldp_add_tlv = 0x0A02,
+ i40e_aqc_opc_lldp_update_tlv = 0x0A03,
+ i40e_aqc_opc_lldp_delete_tlv = 0x0A04,
+ i40e_aqc_opc_lldp_stop = 0x0A05,
+ i40e_aqc_opc_lldp_start = 0x0A06,
+
+ /* Tunnel commands */
+ i40e_aqc_opc_add_udp_tunnel = 0x0B00,
+ i40e_aqc_opc_del_udp_tunnel = 0x0B01,
+ i40e_aqc_opc_tunnel_key_structure = 0x0B10,
+
+ /* Async Events */
+ i40e_aqc_opc_event_lan_overflow = 0x1001,
+
+ /* OEM commands */
+ i40e_aqc_opc_oem_parameter_change = 0xFE00,
+ i40e_aqc_opc_oem_device_status_change = 0xFE01,
+
+ /* debug commands */
+ i40e_aqc_opc_debug_get_deviceid = 0xFF00,
+ i40e_aqc_opc_debug_set_mode = 0xFF01,
+ i40e_aqc_opc_debug_read_reg = 0xFF03,
+ i40e_aqc_opc_debug_write_reg = 0xFF04,
+ i40e_aqc_opc_debug_modify_reg = 0xFF07,
+ i40e_aqc_opc_debug_dump_internals = 0xFF08,
+ i40e_aqc_opc_debug_modify_internals = 0xFF09,
+};
+
+/* command structures and indirect data structures */
+
+/* Structure naming conventions:
+ * - no suffix for direct command descriptor structures
+ * - _data for indirect sent data
+ * - _resp for indirect return data (data which is both will use _data)
+ * - _completion for direct return data
+ * - _element_ for repeated elements (may also be _data or _resp)
+ *
+ * Command structures are expected to overlay the params.raw member of the basic
+ * descriptor, and as such cannot exceed 16 bytes in length.
+ */
+
+/* This macro is used to generate a compilation error if a structure
+ * is not exactly the correct length. It gives a divide by zero error if the
+ * structure is not of the correct size, otherwise it creates an enum that is
+ * never used.
+ */
+#define I40E_CHECK_STRUCT_LEN(n, X) enum i40e_static_assert_enum_##X \
+ { i40e_static_assert_##X = (n)/((sizeof(struct X) == (n)) ? 1 : 0) }
+
+/* This macro is used extensively to ensure that command structures are 16
+ * bytes in length as they have to map to the raw array of that size.
+ */
+#define I40E_CHECK_CMD_LENGTH(X) I40E_CHECK_STRUCT_LEN(16, X)
+
+/* internal (0x00XX) commands */
+
+/* Get version (direct 0x0001) */
+struct i40e_aqc_get_version {
+ __le32 rom_ver;
+ __le32 fw_build;
+ __le16 fw_major;
+ __le16 fw_minor;
+ __le16 api_major;
+ __le16 api_minor;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_get_version);
+
+/* Send driver version (indirect 0x0002) */
+struct i40e_aqc_driver_version {
+ u8 driver_major_ver;
+ u8 driver_minor_ver;
+ u8 driver_build_ver;
+ u8 driver_subbuild_ver;
+ u8 reserved[4];
+ __le32 address_high;
+ __le32 address_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_driver_version);
+
+/* Queue Shutdown (direct 0x0003) */
+struct i40e_aqc_queue_shutdown {
+ __le32 driver_unloading;
+#define I40E_AQ_DRIVER_UNLOADING 0x1
+ u8 reserved[12];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_queue_shutdown);
+
+/* Set PF context (0x0004, direct) */
+struct i40e_aqc_set_pf_context {
+ u8 pf_id;
+ u8 reserved[15];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_set_pf_context);
+
+/* Request resource ownership (direct 0x0008)
+ * Release resource ownership (direct 0x0009)
+ */
+#define I40E_AQ_RESOURCE_NVM 1
+#define I40E_AQ_RESOURCE_SDP 2
+#define I40E_AQ_RESOURCE_ACCESS_READ 1
+#define I40E_AQ_RESOURCE_ACCESS_WRITE 2
+#define I40E_AQ_RESOURCE_NVM_READ_TIMEOUT 3000
+#define I40E_AQ_RESOURCE_NVM_WRITE_TIMEOUT 180000
+
+struct i40e_aqc_request_resource {
+ __le16 resource_id;
+ __le16 access_type;
+ __le32 timeout;
+ __le32 resource_number;
+ u8 reserved[4];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_request_resource);
+
+/* Get function capabilities (indirect 0x000A)
+ * Get device capabilities (indirect 0x000B)
+ */
+struct i40e_aqc_list_capabilites {
+ u8 command_flags;
+#define I40E_AQ_LIST_CAP_PF_INDEX_EN 1
+ u8 pf_index;
+ u8 reserved[2];
+ __le32 count;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_list_capabilites);
+
+struct i40e_aqc_list_capabilities_element_resp {
+ __le16 id;
+ u8 major_rev;
+ u8 minor_rev;
+ __le32 number;
+ __le32 logical_id;
+ __le32 phys_id;
+ u8 reserved[16];
+};
+
+/* list of caps */
+
+#define I40E_AQ_CAP_ID_SWITCH_MODE 0x0001
+#define I40E_AQ_CAP_ID_MNG_MODE 0x0002
+#define I40E_AQ_CAP_ID_NPAR_ACTIVE 0x0003
+#define I40E_AQ_CAP_ID_OS2BMC_CAP 0x0004
+#define I40E_AQ_CAP_ID_FUNCTIONS_VALID 0x0005
+#define I40E_AQ_CAP_ID_ALTERNATE_RAM 0x0006
+#define I40E_AQ_CAP_ID_SRIOV 0x0012
+#define I40E_AQ_CAP_ID_VF 0x0013
+#define I40E_AQ_CAP_ID_VMDQ 0x0014
+#define I40E_AQ_CAP_ID_8021QBG 0x0015
+#define I40E_AQ_CAP_ID_8021QBR 0x0016
+#define I40E_AQ_CAP_ID_VSI 0x0017
+#define I40E_AQ_CAP_ID_DCB 0x0018
+#define I40E_AQ_CAP_ID_FCOE 0x0021
+#define I40E_AQ_CAP_ID_RSS 0x0040
+#define I40E_AQ_CAP_ID_RXQ 0x0041
+#define I40E_AQ_CAP_ID_TXQ 0x0042
+#define I40E_AQ_CAP_ID_MSIX 0x0043
+#define I40E_AQ_CAP_ID_VF_MSIX 0x0044
+#define I40E_AQ_CAP_ID_FLOW_DIRECTOR 0x0045
+#define I40E_AQ_CAP_ID_1588 0x0046
+#define I40E_AQ_CAP_ID_IWARP 0x0051
+#define I40E_AQ_CAP_ID_LED 0x0061
+#define I40E_AQ_CAP_ID_SDP 0x0062
+#define I40E_AQ_CAP_ID_MDIO 0x0063
+#define I40E_AQ_CAP_ID_FLEX10 0x00F1
+#define I40E_AQ_CAP_ID_CEM 0x00F2
+
+/* Set CPPM Configuration (direct 0x0103) */
+struct i40e_aqc_cppm_configuration {
+ __le16 command_flags;
+#define I40E_AQ_CPPM_EN_LTRC 0x0800
+#define I40E_AQ_CPPM_EN_DMCTH 0x1000
+#define I40E_AQ_CPPM_EN_DMCTLX 0x2000
+#define I40E_AQ_CPPM_EN_HPTC 0x4000
+#define I40E_AQ_CPPM_EN_DMARC 0x8000
+ __le16 ttlx;
+ __le32 dmacr;
+ __le16 dmcth;
+ u8 hptc;
+ u8 reserved;
+ __le32 pfltrc;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_cppm_configuration);
+
+/* Set ARP Proxy command / response (indirect 0x0104) */
+struct i40e_aqc_arp_proxy_data {
+ __le16 command_flags;
+#define I40E_AQ_ARP_INIT_IPV4 0x0008
+#define I40E_AQ_ARP_UNSUP_CTL 0x0010
+#define I40E_AQ_ARP_ENA 0x0020
+#define I40E_AQ_ARP_ADD_IPV4 0x0040
+#define I40E_AQ_ARP_DEL_IPV4 0x0080
+ __le16 table_id;
+ __le32 pfpm_proxyfc;
+ __le32 ip_addr;
+ u8 mac_addr[6];
+};
+
+/* Set NS Proxy Table Entry Command (indirect 0x0105) */
+struct i40e_aqc_ns_proxy_data {
+ __le16 table_idx_mac_addr_0;
+ __le16 table_idx_mac_addr_1;
+ __le16 table_idx_ipv6_0;
+ __le16 table_idx_ipv6_1;
+ __le16 control;
+#define I40E_AQ_NS_PROXY_ADD_0 0x0100
+#define I40E_AQ_NS_PROXY_DEL_0 0x0200
+#define I40E_AQ_NS_PROXY_ADD_1 0x0400
+#define I40E_AQ_NS_PROXY_DEL_1 0x0800
+#define I40E_AQ_NS_PROXY_ADD_IPV6_0 0x1000
+#define I40E_AQ_NS_PROXY_DEL_IPV6_0 0x2000
+#define I40E_AQ_NS_PROXY_ADD_IPV6_1 0x4000
+#define I40E_AQ_NS_PROXY_DEL_IPV6_1 0x8000
+#define I40E_AQ_NS_PROXY_COMMAND_SEQ 0x0001
+#define I40E_AQ_NS_PROXY_INIT_IPV6_TBL 0x0002
+#define I40E_AQ_NS_PROXY_INIT_MAC_TBL 0x0004
+ u8 mac_addr_0[6];
+ u8 mac_addr_1[6];
+ u8 local_mac_addr[6];
+ u8 ipv6_addr_0[16]; /* Warning! spec specifies BE byte order */
+ u8 ipv6_addr_1[16];
+};
+
+/* Manage LAA Command (0x0106) - obsolete */
+struct i40e_aqc_mng_laa {
+ __le16 command_flags;
+#define I40E_AQ_LAA_FLAG_WR 0x8000
+ u8 reserved[2];
+ __le32 sal;
+ __le16 sah;
+ u8 reserved2[6];
+};
+
+/* Manage MAC Address Read Command (indirect 0x0107) */
+struct i40e_aqc_mac_address_read {
+ __le16 command_flags;
+#define I40E_AQC_LAN_ADDR_VALID 0x10
+#define I40E_AQC_SAN_ADDR_VALID 0x20
+#define I40E_AQC_PORT_ADDR_VALID 0x40
+#define I40E_AQC_WOL_ADDR_VALID 0x80
+#define I40E_AQC_ADDR_VALID_MASK 0xf0
+ u8 reserved[6];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_mac_address_read);
+
+struct i40e_aqc_mac_address_read_data {
+ u8 pf_lan_mac[6];
+ u8 pf_san_mac[6];
+ u8 port_mac[6];
+ u8 pf_wol_mac[6];
+};
+
+I40E_CHECK_STRUCT_LEN(24, i40e_aqc_mac_address_read_data);
+
+/* Manage MAC Address Write Command (0x0108) */
+struct i40e_aqc_mac_address_write {
+ __le16 command_flags;
+#define I40E_AQC_WRITE_TYPE_LAA_ONLY 0x0000
+#define I40E_AQC_WRITE_TYPE_LAA_WOL 0x4000
+#define I40E_AQC_WRITE_TYPE_PORT 0x8000
+#define I40E_AQC_WRITE_TYPE_MASK 0xc000
+ __le16 mac_sah;
+ __le32 mac_sal;
+ u8 reserved[8];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_mac_address_write);
+
+/* PXE commands (0x011x) */
+
+/* Clear PXE Command and response (direct 0x0110) */
+struct i40e_aqc_clear_pxe {
+ u8 rx_cnt;
+ u8 reserved[15];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_clear_pxe);
+
+/* Switch configuration commands (0x02xx) */
+
+/* Used by many indirect commands that only pass an seid and a buffer in the
+ * command
+ */
+struct i40e_aqc_switch_seid {
+ __le16 seid;
+ u8 reserved[6];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_switch_seid);
+
+/* Get Switch Configuration command (indirect 0x0200)
+ * uses i40e_aqc_switch_seid for the descriptor
+ */
+struct i40e_aqc_get_switch_config_header_resp {
+ __le16 num_reported;
+ __le16 num_total;
+ u8 reserved[12];
+};
+
+struct i40e_aqc_switch_config_element_resp {
+ u8 element_type;
+#define I40E_AQ_SW_ELEM_TYPE_MAC 1
+#define I40E_AQ_SW_ELEM_TYPE_PF 2
+#define I40E_AQ_SW_ELEM_TYPE_VF 3
+#define I40E_AQ_SW_ELEM_TYPE_EMP 4
+#define I40E_AQ_SW_ELEM_TYPE_BMC 5
+#define I40E_AQ_SW_ELEM_TYPE_PV 16
+#define I40E_AQ_SW_ELEM_TYPE_VEB 17
+#define I40E_AQ_SW_ELEM_TYPE_PA 18
+#define I40E_AQ_SW_ELEM_TYPE_VSI 19
+ u8 revision;
+#define I40E_AQ_SW_ELEM_REV_1 1
+ __le16 seid;
+ __le16 uplink_seid;
+ __le16 downlink_seid;
+ u8 reserved[3];
+ u8 connection_type;
+#define I40E_AQ_CONN_TYPE_REGULAR 0x1
+#define I40E_AQ_CONN_TYPE_DEFAULT 0x2
+#define I40E_AQ_CONN_TYPE_CASCADED 0x3
+ __le16 scheduler_id;
+ __le16 element_info;
+};
+
+/* Get Switch Configuration (indirect 0x0200)
+ * an array of elements are returned in the response buffer
+ * the first in the array is the header, remainder are elements
+ */
+struct i40e_aqc_get_switch_config_resp {
+ struct i40e_aqc_get_switch_config_header_resp header;
+ struct i40e_aqc_switch_config_element_resp element[1];
+};
+
+/* Add Statistics (direct 0x0201)
+ * Remove Statistics (direct 0x0202)
+ */
+struct i40e_aqc_add_remove_statistics {
+ __le16 seid;
+ __le16 vlan;
+ __le16 stat_index;
+ u8 reserved[10];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_statistics);
+
+/* Set Port Parameters command (direct 0x0203) */
+struct i40e_aqc_set_port_parameters {
+ __le16 command_flags;
+#define I40E_AQ_SET_P_PARAMS_SAVE_BAD_PACKETS 1
+#define I40E_AQ_SET_P_PARAMS_PAD_SHORT_PACKETS 2 /* must set! */
+#define I40E_AQ_SET_P_PARAMS_DOUBLE_VLAN_ENA 4
+ __le16 bad_frame_vsi;
+ __le16 default_seid; /* reserved for command */
+ u8 reserved[10];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_set_port_parameters);
+
+/* Get Switch Resource Allocation (indirect 0x0204) */
+struct i40e_aqc_get_switch_resource_alloc {
+ u8 num_entries; /* reserved for command */
+ u8 reserved[7];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_get_switch_resource_alloc);
+
+/* expect an array of these structs in the response buffer */
+struct i40e_aqc_switch_resource_alloc_element_resp {
+ u8 resource_type;
+#define I40E_AQ_RESOURCE_TYPE_VEB 0x0
+#define I40E_AQ_RESOURCE_TYPE_VSI 0x1
+#define I40E_AQ_RESOURCE_TYPE_MACADDR 0x2
+#define I40E_AQ_RESOURCE_TYPE_STAG 0x3
+#define I40E_AQ_RESOURCE_TYPE_ETAG 0x4
+#define I40E_AQ_RESOURCE_TYPE_MULTICAST_HASH 0x5
+#define I40E_AQ_RESOURCE_TYPE_UNICAST_HASH 0x6
+#define I40E_AQ_RESOURCE_TYPE_VLAN 0x7
+#define I40E_AQ_RESOURCE_TYPE_VSI_LIST_ENTRY 0x8
+#define I40E_AQ_RESOURCE_TYPE_ETAG_LIST_ENTRY 0x9
+#define I40E_AQ_RESOURCE_TYPE_VLAN_STAT_POOL 0xA
+#define I40E_AQ_RESOURCE_TYPE_MIRROR_RULE 0xB
+#define I40E_AQ_RESOURCE_TYPE_QUEUE_SETS 0xC
+#define I40E_AQ_RESOURCE_TYPE_VLAN_FILTERS 0xD
+#define I40E_AQ_RESOURCE_TYPE_INNER_MAC_FILTERS 0xF
+#define I40E_AQ_RESOURCE_TYPE_IP_FILTERS 0x10
+#define I40E_AQ_RESOURCE_TYPE_GRE_VN_KEYS 0x11
+#define I40E_AQ_RESOURCE_TYPE_VN2_KEYS 0x12
+#define I40E_AQ_RESOURCE_TYPE_TUNNEL_PORTS 0x13
+ u8 reserved1;
+ __le16 guaranteed;
+ __le16 total;
+ __le16 used;
+ __le16 total_unalloced;
+ u8 reserved2[6];
+};
+
+/* Add VSI (indirect 0x0210)
+ * this indirect command uses struct i40e_aqc_vsi_properties_data
+ * as the indirect buffer (128 bytes)
+ *
+ * Update VSI (indirect 0x211)
+ * uses the same data structure as Add VSI
+ *
+ * Get VSI (indirect 0x0212)
+ * uses the same completion and data structure as Add VSI
+ */
+struct i40e_aqc_add_get_update_vsi {
+ __le16 uplink_seid;
+ u8 connection_type;
+#define I40E_AQ_VSI_CONN_TYPE_NORMAL 0x1
+#define I40E_AQ_VSI_CONN_TYPE_DEFAULT 0x2
+#define I40E_AQ_VSI_CONN_TYPE_CASCADED 0x3
+ u8 reserved1;
+ u8 vf_id;
+ u8 reserved2;
+ __le16 vsi_flags;
+#define I40E_AQ_VSI_TYPE_SHIFT 0x0
+#define I40E_AQ_VSI_TYPE_MASK (0x3 << I40E_AQ_VSI_TYPE_SHIFT)
+#define I40E_AQ_VSI_TYPE_VF 0x0
+#define I40E_AQ_VSI_TYPE_VMDQ2 0x1
+#define I40E_AQ_VSI_TYPE_PF 0x2
+#define I40E_AQ_VSI_TYPE_EMP_MNG 0x3
+#define I40E_AQ_VSI_FLAG_CASCADED_PV 0x4
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_get_update_vsi);
+
+struct i40e_aqc_add_get_update_vsi_completion {
+ __le16 seid;
+ __le16 vsi_number;
+ __le16 vsi_used;
+ __le16 vsi_free;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_get_update_vsi_completion);
+
+struct i40e_aqc_vsi_properties_data {
+ /* first 96 byte are written by SW */
+ __le16 valid_sections;
+#define I40E_AQ_VSI_PROP_SWITCH_VALID 0x0001
+#define I40E_AQ_VSI_PROP_SECURITY_VALID 0x0002
+#define I40E_AQ_VSI_PROP_VLAN_VALID 0x0004
+#define I40E_AQ_VSI_PROP_CAS_PV_VALID 0x0008
+#define I40E_AQ_VSI_PROP_INGRESS_UP_VALID 0x0010
+#define I40E_AQ_VSI_PROP_EGRESS_UP_VALID 0x0020
+#define I40E_AQ_VSI_PROP_QUEUE_MAP_VALID 0x0040
+#define I40E_AQ_VSI_PROP_QUEUE_OPT_VALID 0x0080
+#define I40E_AQ_VSI_PROP_OUTER_UP_VALID 0x0100
+#define I40E_AQ_VSI_PROP_SCHED_VALID 0x0200
+ /* switch section */
+ __le16 switch_id; /* 12bit id combined with flags below */
+#define I40E_AQ_VSI_SW_ID_SHIFT 0x0000
+#define I40E_AQ_VSI_SW_ID_MASK (0xFFF << I40E_AQ_VSI_SW_ID_SHIFT)
+#define I40E_AQ_VSI_SW_ID_FLAG_NOT_STAG 0x1000
+#define I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB 0x2000
+#define I40E_AQ_VSI_SW_ID_FLAG_LOCAL_LB 0x4000
+ u8 sw_reserved[2];
+ /* security section */
+ u8 sec_flags;
+#define I40E_AQ_VSI_SEC_FLAG_ALLOW_DEST_OVRD 0x01
+#define I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK 0x02
+#define I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK 0x04
+ u8 sec_reserved;
+ /* VLAN section */
+ __le16 pvid; /* VLANS include priority bits */
+ __le16 fcoe_pvid;
+ u8 port_vlan_flags;
+#define I40E_AQ_VSI_PVLAN_MODE_SHIFT 0x00
+#define I40E_AQ_VSI_PVLAN_MODE_MASK (0x03 << \
+ I40E_AQ_VSI_PVLAN_MODE_SHIFT)
+#define I40E_AQ_VSI_PVLAN_MODE_TAGGED 0x01
+#define I40E_AQ_VSI_PVLAN_MODE_UNTAGGED 0x02
+#define I40E_AQ_VSI_PVLAN_MODE_ALL 0x03
+#define I40E_AQ_VSI_PVLAN_INSERT_PVID 0x04
+#define I40E_AQ_VSI_PVLAN_EMOD_SHIFT 0x03
+#define I40E_AQ_VSI_PVLAN_EMOD_MASK (0x3 << \
+ I40E_AQ_VSI_PVLAN_EMOD_SHIFT)
+#define I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH 0x0
+#define I40E_AQ_VSI_PVLAN_EMOD_STR_UP 0x08
+#define I40E_AQ_VSI_PVLAN_EMOD_STR 0x10
+#define I40E_AQ_VSI_PVLAN_EMOD_NOTHING 0x18
+ u8 pvlan_reserved[3];
+ /* ingress egress up sections */
+ __le32 ingress_table; /* bitmap, 3 bits per up */
+#define I40E_AQ_VSI_UP_TABLE_UP0_SHIFT 0
+#define I40E_AQ_VSI_UP_TABLE_UP0_MASK (0x7 << \
+ I40E_AQ_VSI_UP_TABLE_UP0_SHIFT)
+#define I40E_AQ_VSI_UP_TABLE_UP1_SHIFT 3
+#define I40E_AQ_VSI_UP_TABLE_UP1_MASK (0x7 << \
+ I40E_AQ_VSI_UP_TABLE_UP1_SHIFT)
+#define I40E_AQ_VSI_UP_TABLE_UP2_SHIFT 6
+#define I40E_AQ_VSI_UP_TABLE_UP2_MASK (0x7 << \
+ I40E_AQ_VSI_UP_TABLE_UP2_SHIFT)
+#define I40E_AQ_VSI_UP_TABLE_UP3_SHIFT 9
+#define I40E_AQ_VSI_UP_TABLE_UP3_MASK (0x7 << \
+ I40E_AQ_VSI_UP_TABLE_UP3_SHIFT)
+#define I40E_AQ_VSI_UP_TABLE_UP4_SHIFT 12
+#define I40E_AQ_VSI_UP_TABLE_UP4_MASK (0x7 << \
+ I40E_AQ_VSI_UP_TABLE_UP4_SHIFT)
+#define I40E_AQ_VSI_UP_TABLE_UP5_SHIFT 15
+#define I40E_AQ_VSI_UP_TABLE_UP5_MASK (0x7 << \
+ I40E_AQ_VSI_UP_TABLE_UP5_SHIFT)
+#define I40E_AQ_VSI_UP_TABLE_UP6_SHIFT 18
+#define I40E_AQ_VSI_UP_TABLE_UP6_MASK (0x7 << \
+ I40E_AQ_VSI_UP_TABLE_UP6_SHIFT)
+#define I40E_AQ_VSI_UP_TABLE_UP7_SHIFT 21
+#define I40E_AQ_VSI_UP_TABLE_UP7_MASK (0x7 << \
+ I40E_AQ_VSI_UP_TABLE_UP7_SHIFT)
+ __le32 egress_table; /* same defines as for ingress table */
+ /* cascaded PV section */
+ __le16 cas_pv_tag;
+ u8 cas_pv_flags;
+#define I40E_AQ_VSI_CAS_PV_TAGX_SHIFT 0x00
+#define I40E_AQ_VSI_CAS_PV_TAGX_MASK (0x03 << \
+ I40E_AQ_VSI_CAS_PV_TAGX_SHIFT)
+#define I40E_AQ_VSI_CAS_PV_TAGX_LEAVE 0x00
+#define I40E_AQ_VSI_CAS_PV_TAGX_REMOVE 0x01
+#define I40E_AQ_VSI_CAS_PV_TAGX_COPY 0x02
+#define I40E_AQ_VSI_CAS_PV_INSERT_TAG 0x10
+#define I40E_AQ_VSI_CAS_PV_ETAG_PRUNE 0x20
+#define I40E_AQ_VSI_CAS_PV_ACCEPT_HOST_TAG 0x40
+ u8 cas_pv_reserved;
+ /* queue mapping section */
+ __le16 mapping_flags;
+#define I40E_AQ_VSI_QUE_MAP_CONTIG 0x0
+#define I40E_AQ_VSI_QUE_MAP_NONCONTIG 0x1
+ __le16 queue_mapping[16];
+#define I40E_AQ_VSI_QUEUE_SHIFT 0x0
+#define I40E_AQ_VSI_QUEUE_MASK (0x7FF << I40E_AQ_VSI_QUEUE_SHIFT)
+ __le16 tc_mapping[8];
+#define I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT 0
+#define I40E_AQ_VSI_TC_QUE_OFFSET_MASK (0x1FF << \
+ I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT)
+#define I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT 9
+#define I40E_AQ_VSI_TC_QUE_NUMBER_MASK (0x7 << \
+ I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT)
+ /* queueing option section */
+ u8 queueing_opt_flags;
+#define I40E_AQ_VSI_QUE_OPT_TCP_ENA 0x10
+#define I40E_AQ_VSI_QUE_OPT_FCOE_ENA 0x20
+ u8 queueing_opt_reserved[3];
+ /* scheduler section */
+ u8 up_enable_bits;
+ u8 sched_reserved;
+ /* outer up section */
+ __le32 outer_up_table; /* same structure and defines as ingress table */
+ u8 cmd_reserved[8];
+ /* last 32 bytes are written by FW */
+ __le16 qs_handle[8];
+#define I40E_AQ_VSI_QS_HANDLE_INVALID 0xFFFF
+ __le16 stat_counter_idx;
+ __le16 sched_id;
+ u8 resp_reserved[12];
+};
+
+I40E_CHECK_STRUCT_LEN(128, i40e_aqc_vsi_properties_data);
+
+/* Add Port Virtualizer (direct 0x0220)
+ * also used for update PV (direct 0x0221) but only flags are used
+ * (IS_CTRL_PORT only works on add PV)
+ */
+struct i40e_aqc_add_update_pv {
+ __le16 command_flags;
+#define I40E_AQC_PV_FLAG_PV_TYPE 0x1
+#define I40E_AQC_PV_FLAG_FWD_UNKNOWN_STAG_EN 0x2
+#define I40E_AQC_PV_FLAG_FWD_UNKNOWN_ETAG_EN 0x4
+#define I40E_AQC_PV_FLAG_IS_CTRL_PORT 0x8
+ __le16 uplink_seid;
+ __le16 connected_seid;
+ u8 reserved[10];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_update_pv);
+
+struct i40e_aqc_add_update_pv_completion {
+ /* reserved for update; for add also encodes error if rc == ENOSPC */
+ __le16 pv_seid;
+#define I40E_AQC_PV_ERR_FLAG_NO_PV 0x1
+#define I40E_AQC_PV_ERR_FLAG_NO_SCHED 0x2
+#define I40E_AQC_PV_ERR_FLAG_NO_COUNTER 0x4
+#define I40E_AQC_PV_ERR_FLAG_NO_ENTRY 0x8
+ u8 reserved[14];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_update_pv_completion);
+
+/* Get PV Params (direct 0x0222)
+ * uses i40e_aqc_switch_seid for the descriptor
+ */
+
+struct i40e_aqc_get_pv_params_completion {
+ __le16 seid;
+ __le16 default_stag;
+ __le16 pv_flags; /* same flags as add_pv */
+#define I40E_AQC_GET_PV_PV_TYPE 0x1
+#define I40E_AQC_GET_PV_FRWD_UNKNOWN_STAG 0x2
+#define I40E_AQC_GET_PV_FRWD_UNKNOWN_ETAG 0x4
+ u8 reserved[8];
+ __le16 default_port_seid;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_get_pv_params_completion);
+
+/* Add VEB (direct 0x0230) */
+struct i40e_aqc_add_veb {
+ __le16 uplink_seid;
+ __le16 downlink_seid;
+ __le16 veb_flags;
+#define I40E_AQC_ADD_VEB_FLOATING 0x1
+#define I40E_AQC_ADD_VEB_PORT_TYPE_SHIFT 1
+#define I40E_AQC_ADD_VEB_PORT_TYPE_MASK (0x3 << \
+ I40E_AQC_ADD_VEB_PORT_TYPE_SHIFT)
+#define I40E_AQC_ADD_VEB_PORT_TYPE_DEFAULT 0x2
+#define I40E_AQC_ADD_VEB_PORT_TYPE_DATA 0x4
+#define I40E_AQC_ADD_VEB_ENABLE_L2_FILTER 0x8
+ u8 enable_tcs;
+ u8 reserved[9];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_veb);
+
+struct i40e_aqc_add_veb_completion {
+ u8 reserved[6];
+ __le16 switch_seid;
+ /* also encodes error if rc == ENOSPC; codes are the same as add_pv */
+ __le16 veb_seid;
+#define I40E_AQC_VEB_ERR_FLAG_NO_VEB 0x1
+#define I40E_AQC_VEB_ERR_FLAG_NO_SCHED 0x2
+#define I40E_AQC_VEB_ERR_FLAG_NO_COUNTER 0x4
+#define I40E_AQC_VEB_ERR_FLAG_NO_ENTRY 0x8
+ __le16 statistic_index;
+ __le16 vebs_used;
+ __le16 vebs_free;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_veb_completion);
+
+/* Get VEB Parameters (direct 0x0232)
+ * uses i40e_aqc_switch_seid for the descriptor
+ */
+struct i40e_aqc_get_veb_parameters_completion {
+ __le16 seid;
+ __le16 switch_id;
+ __le16 veb_flags; /* only the first/last flags from 0x0230 is valid */
+ __le16 statistic_index;
+ __le16 vebs_used;
+ __le16 vebs_free;
+ u8 reserved[4];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_get_veb_parameters_completion);
+
+/* Delete Element (direct 0x0243)
+ * uses the generic i40e_aqc_switch_seid
+ */
+
+/* Add MAC-VLAN (indirect 0x0250) */
+
+/* used for the command for most vlan commands */
+struct i40e_aqc_macvlan {
+ __le16 num_addresses;
+ __le16 seid[3];
+#define I40E_AQC_MACVLAN_CMD_SEID_NUM_SHIFT 0
+#define I40E_AQC_MACVLAN_CMD_SEID_NUM_MASK (0x3FF << \
+ I40E_AQC_MACVLAN_CMD_SEID_NUM_SHIFT)
+#define I40E_AQC_MACVLAN_CMD_SEID_VALID 0x8000
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_macvlan);
+
+/* indirect data for command and response */
+struct i40e_aqc_add_macvlan_element_data {
+ u8 mac_addr[6];
+ __le16 vlan_tag;
+ __le16 flags;
+#define I40E_AQC_MACVLAN_ADD_PERFECT_MATCH 0x0001
+#define I40E_AQC_MACVLAN_ADD_HASH_MATCH 0x0002
+#define I40E_AQC_MACVLAN_ADD_IGNORE_VLAN 0x0004
+#define I40E_AQC_MACVLAN_ADD_TO_QUEUE 0x0008
+ __le16 queue_number;
+#define I40E_AQC_MACVLAN_CMD_QUEUE_SHIFT 0
+#define I40E_AQC_MACVLAN_CMD_QUEUE_MASK (0x7FF << \
+ I40E_AQC_MACVLAN_CMD_SEID_NUM_SHIFT)
+ /* response section */
+ u8 match_method;
+#define I40E_AQC_MM_PERFECT_MATCH 0x01
+#define I40E_AQC_MM_HASH_MATCH 0x02
+#define I40E_AQC_MM_ERR_NO_RES 0xFF
+ u8 reserved1[3];
+};
+
+struct i40e_aqc_add_remove_macvlan_completion {
+ __le16 perfect_mac_used;
+ __le16 perfect_mac_free;
+ __le16 unicast_hash_free;
+ __le16 multicast_hash_free;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_macvlan_completion);
+
+/* Remove MAC-VLAN (indirect 0x0251)
+ * uses i40e_aqc_macvlan for the descriptor
+ * data points to an array of num_addresses of elements
+ */
+
+struct i40e_aqc_remove_macvlan_element_data {
+ u8 mac_addr[6];
+ __le16 vlan_tag;
+ u8 flags;
+#define I40E_AQC_MACVLAN_DEL_PERFECT_MATCH 0x01
+#define I40E_AQC_MACVLAN_DEL_HASH_MATCH 0x02
+#define I40E_AQC_MACVLAN_DEL_IGNORE_VLAN 0x08
+#define I40E_AQC_MACVLAN_DEL_ALL_VSIS 0x10
+ u8 reserved[3];
+ /* reply section */
+ u8 error_code;
+#define I40E_AQC_REMOVE_MACVLAN_SUCCESS 0x0
+#define I40E_AQC_REMOVE_MACVLAN_FAIL 0xFF
+ u8 reply_reserved[3];
+};
+
+/* Add VLAN (indirect 0x0252)
+ * Remove VLAN (indirect 0x0253)
+ * use the generic i40e_aqc_macvlan for the command
+ */
+struct i40e_aqc_add_remove_vlan_element_data {
+ __le16 vlan_tag;
+ u8 vlan_flags;
+/* flags for add VLAN */
+#define I40E_AQC_ADD_VLAN_LOCAL 0x1
+#define I40E_AQC_ADD_PVLAN_TYPE_SHIFT 1
+#define I40E_AQC_ADD_PVLAN_TYPE_MASK (0x3 << I40E_AQC_ADD_PVLAN_TYPE_SHIFT)
+#define I40E_AQC_ADD_PVLAN_TYPE_REGULAR 0x0
+#define I40E_AQC_ADD_PVLAN_TYPE_PRIMARY 0x2
+#define I40E_AQC_ADD_PVLAN_TYPE_SECONDARY 0x4
+#define I40E_AQC_VLAN_PTYPE_SHIFT 3
+#define I40E_AQC_VLAN_PTYPE_MASK (0x3 << I40E_AQC_VLAN_PTYPE_SHIFT)
+#define I40E_AQC_VLAN_PTYPE_REGULAR_VSI 0x0
+#define I40E_AQC_VLAN_PTYPE_PROMISC_VSI 0x8
+#define I40E_AQC_VLAN_PTYPE_COMMUNITY_VSI 0x10
+#define I40E_AQC_VLAN_PTYPE_ISOLATED_VSI 0x18
+/* flags for remove VLAN */
+#define I40E_AQC_REMOVE_VLAN_ALL 0x1
+ u8 reserved;
+ u8 result;
+/* flags for add VLAN */
+#define I40E_AQC_ADD_VLAN_SUCCESS 0x0
+#define I40E_AQC_ADD_VLAN_FAIL_REQUEST 0xFE
+#define I40E_AQC_ADD_VLAN_FAIL_RESOURCE 0xFF
+/* flags for remove VLAN */
+#define I40E_AQC_REMOVE_VLAN_SUCCESS 0x0
+#define I40E_AQC_REMOVE_VLAN_FAIL 0xFF
+ u8 reserved1[3];
+};
+
+struct i40e_aqc_add_remove_vlan_completion {
+ u8 reserved[4];
+ __le16 vlans_used;
+ __le16 vlans_free;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+/* Set VSI Promiscuous Modes (direct 0x0254) */
+struct i40e_aqc_set_vsi_promiscuous_modes {
+ __le16 promiscuous_flags;
+ __le16 valid_flags;
+/* flags used for both fields above */
+#define I40E_AQC_SET_VSI_PROMISC_UNICAST 0x01
+#define I40E_AQC_SET_VSI_PROMISC_MULTICAST 0x02
+#define I40E_AQC_SET_VSI_PROMISC_BROADCAST 0x04
+#define I40E_AQC_SET_VSI_DEFAULT 0x08
+#define I40E_AQC_SET_VSI_PROMISC_VLAN 0x10
+ __le16 seid;
+#define I40E_AQC_VSI_PROM_CMD_SEID_MASK 0x3FF
+ __le16 vlan_tag;
+#define I40E_AQC_SET_VSI_VLAN_VALID 0x8000
+ u8 reserved[8];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_set_vsi_promiscuous_modes);
+
+/* Add S/E-tag command (direct 0x0255)
+ * Uses generic i40e_aqc_add_remove_tag_completion for completion
+ */
+struct i40e_aqc_add_tag {
+ __le16 flags;
+#define I40E_AQC_ADD_TAG_FLAG_TO_QUEUE 0x0001
+ __le16 seid;
+#define I40E_AQC_ADD_TAG_CMD_SEID_NUM_SHIFT 0
+#define I40E_AQC_ADD_TAG_CMD_SEID_NUM_MASK (0x3FF << \
+ I40E_AQC_ADD_TAG_CMD_SEID_NUM_SHIFT)
+ __le16 tag;
+ __le16 queue_number;
+ u8 reserved[8];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_tag);
+
+struct i40e_aqc_add_remove_tag_completion {
+ u8 reserved[12];
+ __le16 tags_used;
+ __le16 tags_free;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_tag_completion);
+
+/* Remove S/E-tag command (direct 0x0256)
+ * Uses generic i40e_aqc_add_remove_tag_completion for completion
+ */
+struct i40e_aqc_remove_tag {
+ __le16 seid;
+#define I40E_AQC_REMOVE_TAG_CMD_SEID_NUM_SHIFT 0
+#define I40E_AQC_REMOVE_TAG_CMD_SEID_NUM_MASK (0x3FF << \
+ I40E_AQC_REMOVE_TAG_CMD_SEID_NUM_SHIFT)
+ __le16 tag;
+ u8 reserved[12];
+};
+
+/* Add multicast E-Tag (direct 0x0257)
+ * del multicast E-Tag (direct 0x0258) only uses pv_seid and etag fields
+ * and no external data
+ */
+struct i40e_aqc_add_remove_mcast_etag {
+ __le16 pv_seid;
+ __le16 etag;
+ u8 num_unicast_etags;
+ u8 reserved[3];
+ __le32 addr_high; /* address of array of 2-byte s-tags */
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_mcast_etag);
+
+struct i40e_aqc_add_remove_mcast_etag_completion {
+ u8 reserved[4];
+ __le16 mcast_etags_used;
+ __le16 mcast_etags_free;
+ __le32 addr_high;
+ __le32 addr_low;
+
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_mcast_etag_completion);
+
+/* Update S/E-Tag (direct 0x0259) */
+struct i40e_aqc_update_tag {
+ __le16 seid;
+#define I40E_AQC_UPDATE_TAG_CMD_SEID_NUM_SHIFT 0
+#define I40E_AQC_UPDATE_TAG_CMD_SEID_NUM_MASK (0x3FF << \
+ I40E_AQC_UPDATE_TAG_CMD_SEID_NUM_SHIFT)
+ __le16 old_tag;
+ __le16 new_tag;
+ u8 reserved[10];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_update_tag);
+
+struct i40e_aqc_update_tag_completion {
+ u8 reserved[12];
+ __le16 tags_used;
+ __le16 tags_free;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_update_tag_completion);
+
+/* Add Control Packet filter (direct 0x025A)
+ * Remove Control Packet filter (direct 0x025B)
+ * uses the i40e_aqc_add_oveb_cloud,
+ * and the generic direct completion structure
+ */
+struct i40e_aqc_add_remove_control_packet_filter {
+ u8 mac[6];
+ __le16 etype;
+ __le16 flags;
+#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC 0x0001
+#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP 0x0002
+#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TO_QUEUE 0x0004
+#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TX 0x0008
+#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_RX 0x0000
+ __le16 seid;
+#define I40E_AQC_ADD_CONTROL_PACKET_CMD_SEID_NUM_SHIFT 0
+#define I40E_AQC_ADD_CONTROL_PACKET_CMD_SEID_NUM_MASK (0x3FF << \
+ I40E_AQC_ADD_CONTROL_PACKET_CMD_SEID_NUM_SHIFT)
+ __le16 queue;
+ u8 reserved[2];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_control_packet_filter);
+
+struct i40e_aqc_add_remove_control_packet_filter_completion {
+ __le16 mac_etype_used;
+ __le16 etype_used;
+ __le16 mac_etype_free;
+ __le16 etype_free;
+ u8 reserved[8];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_control_packet_filter_completion);
+
+/* Add Cloud filters (indirect 0x025C)
+ * Remove Cloud filters (indirect 0x025D)
+ * uses the i40e_aqc_add_remove_cloud_filters,
+ * and the generic indirect completion structure
+ */
+struct i40e_aqc_add_remove_cloud_filters {
+ u8 num_filters;
+ u8 reserved;
+ __le16 seid;
+#define I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_SHIFT 0
+#define I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_MASK (0x3FF << \
+ I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_SHIFT)
+ u8 reserved2[4];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_cloud_filters);
+
+struct i40e_aqc_add_remove_cloud_filters_element_data {
+ u8 outer_mac[6];
+ u8 inner_mac[6];
+ __le16 inner_vlan;
+ union {
+ struct {
+ u8 reserved[12];
+ u8 data[4];
+ } v4;
+ struct {
+ u8 data[16];
+ } v6;
+ } ipaddr;
+ __le16 flags;
+#define I40E_AQC_ADD_CLOUD_FILTER_SHIFT 0
+#define I40E_AQC_ADD_CLOUD_FILTER_MASK (0x3F << \
+ I40E_AQC_ADD_CLOUD_FILTER_SHIFT)
+/* 0x0000 reserved */
+#define I40E_AQC_ADD_CLOUD_FILTER_OIP 0x0001
+/* 0x0002 reserved */
+#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN 0x0003
+#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_TEN_ID 0x0004
+/* 0x0005 reserved */
+#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_TEN_ID 0x0006
+/* 0x0007 reserved */
+/* 0x0008 reserved */
+#define I40E_AQC_ADD_CLOUD_FILTER_OMAC 0x0009
+#define I40E_AQC_ADD_CLOUD_FILTER_IMAC 0x000A
+#define I40E_AQC_ADD_CLOUD_FILTER_OMAC_TEN_ID_IMAC 0x000B
+#define I40E_AQC_ADD_CLOUD_FILTER_IIP 0x000C
+
+#define I40E_AQC_ADD_CLOUD_FLAGS_TO_QUEUE 0x0080
+#define I40E_AQC_ADD_CLOUD_VNK_SHIFT 6
+#define I40E_AQC_ADD_CLOUD_VNK_MASK 0x00C0
+#define I40E_AQC_ADD_CLOUD_FLAGS_IPV4 0
+#define I40E_AQC_ADD_CLOUD_FLAGS_IPV6 0x0100
+
+#define I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT 9
+#define I40E_AQC_ADD_CLOUD_TNL_TYPE_MASK 0x1E00
+#define I40E_AQC_ADD_CLOUD_TNL_TYPE_XVLAN 0
+#define I40E_AQC_ADD_CLOUD_TNL_TYPE_NVGRE_OMAC 1
+#define I40E_AQC_ADD_CLOUD_TNL_TYPE_NGE 2
+#define I40E_AQC_ADD_CLOUD_TNL_TYPE_IP 3
+
+ __le32 tenant_id;
+ u8 reserved[4];
+ __le16 queue_number;
+#define I40E_AQC_ADD_CLOUD_QUEUE_SHIFT 0
+#define I40E_AQC_ADD_CLOUD_QUEUE_MASK (0x3F << \
+ I40E_AQC_ADD_CLOUD_QUEUE_SHIFT)
+ u8 reserved2[14];
+ /* response section */
+ u8 allocation_result;
+#define I40E_AQC_ADD_CLOUD_FILTER_SUCCESS 0x0
+#define I40E_AQC_ADD_CLOUD_FILTER_FAIL 0xFF
+ u8 response_reserved[7];
+};
+
+struct i40e_aqc_remove_cloud_filters_completion {
+ __le16 perfect_ovlan_used;
+ __le16 perfect_ovlan_free;
+ __le16 vlan_used;
+ __le16 vlan_free;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_remove_cloud_filters_completion);
+
+/* Add Mirror Rule (indirect or direct 0x0260)
+ * Delete Mirror Rule (indirect or direct 0x0261)
+ * note: some rule types (4,5) do not use an external buffer.
+ * take care to set the flags correctly.
+ */
+struct i40e_aqc_add_delete_mirror_rule {
+ __le16 seid;
+ __le16 rule_type;
+#define I40E_AQC_MIRROR_RULE_TYPE_SHIFT 0
+#define I40E_AQC_MIRROR_RULE_TYPE_MASK (0x7 << \
+ I40E_AQC_MIRROR_RULE_TYPE_SHIFT)
+#define I40E_AQC_MIRROR_RULE_TYPE_VPORT_INGRESS 1
+#define I40E_AQC_MIRROR_RULE_TYPE_VPORT_EGRESS 2
+#define I40E_AQC_MIRROR_RULE_TYPE_VLAN 3
+#define I40E_AQC_MIRROR_RULE_TYPE_ALL_INGRESS 4
+#define I40E_AQC_MIRROR_RULE_TYPE_ALL_EGRESS 5
+ __le16 num_entries;
+ __le16 destination; /* VSI for add, rule id for delete */
+ __le32 addr_high; /* address of array of 2-byte VSI or VLAN ids */
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_delete_mirror_rule);
+
+struct i40e_aqc_add_delete_mirror_rule_completion {
+ u8 reserved[2];
+ __le16 rule_id; /* only used on add */
+ __le16 mirror_rules_used;
+ __le16 mirror_rules_free;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_delete_mirror_rule_completion);
+
+/* DCB 0x03xx*/
+
+/* PFC Ignore (direct 0x0301)
+ * the command and response use the same descriptor structure
+ */
+struct i40e_aqc_pfc_ignore {
+ u8 tc_bitmap;
+ u8 command_flags; /* unused on response */
+#define I40E_AQC_PFC_IGNORE_SET 0x80
+#define I40E_AQC_PFC_IGNORE_CLEAR 0x0
+ u8 reserved[14];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_pfc_ignore);
+
+/* DCB Update (direct 0x0302) uses the i40e_aq_desc structure
+ * with no parameters
+ */
+
+/* TX scheduler 0x04xx */
+
+/* Almost all the indirect commands use
+ * this generic struct to pass the SEID in param0
+ */
+struct i40e_aqc_tx_sched_ind {
+ __le16 vsi_seid;
+ u8 reserved[6];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_tx_sched_ind);
+
+/* Several commands respond with a set of queue set handles */
+struct i40e_aqc_qs_handles_resp {
+ __le16 qs_handles[8];
+};
+
+/* Configure VSI BW limits (direct 0x0400) */
+struct i40e_aqc_configure_vsi_bw_limit {
+ __le16 vsi_seid;
+ u8 reserved[2];
+ __le16 credit;
+ u8 reserved1[2];
+ u8 max_credit; /* 0-3, limit = 2^max */
+ u8 reserved2[7];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_configure_vsi_bw_limit);
+
+/* Configure VSI Bandwidth Limit per Traffic Type (indirect 0x0406)
+ * responds with i40e_aqc_qs_handles_resp
+ */
+struct i40e_aqc_configure_vsi_ets_sla_bw_data {
+ u8 tc_valid_bits;
+ u8 reserved[15];
+ __le16 tc_bw_credits[8]; /* FW writesback QS handles here */
+
+ /* 4 bits per tc 0-7, 4th bit is reserved, limit = 2^max */
+ __le16 tc_bw_max[2];
+ u8 reserved1[28];
+};
+
+/* Configure VSI Bandwidth Allocation per Traffic Type (indirect 0x0407)
+ * responds with i40e_aqc_qs_handles_resp
+ */
+struct i40e_aqc_configure_vsi_tc_bw_data {
+ u8 tc_valid_bits;
+ u8 reserved[3];
+ u8 tc_bw_credits[8];
+ u8 reserved1[4];
+ __le16 qs_handles[8];
+};
+
+/* Query vsi bw configuration (indirect 0x0408) */
+struct i40e_aqc_query_vsi_bw_config_resp {
+ u8 tc_valid_bits;
+ u8 tc_suspended_bits;
+ u8 reserved[14];
+ __le16 qs_handles[8];
+ u8 reserved1[4];
+ __le16 port_bw_limit;
+ u8 reserved2[2];
+ u8 max_bw; /* 0-3, limit = 2^max */
+ u8 reserved3[23];
+};
+
+/* Query VSI Bandwidth Allocation per Traffic Type (indirect 0x040A) */
+struct i40e_aqc_query_vsi_ets_sla_config_resp {
+ u8 tc_valid_bits;
+ u8 reserved[3];
+ u8 share_credits[8];
+ __le16 credits[8];
+
+ /* 4 bits per tc 0-7, 4th bit is reserved, limit = 2^max */
+ __le16 tc_bw_max[2];
+};
+
+/* Configure Switching Component Bandwidth Limit (direct 0x0410) */
+struct i40e_aqc_configure_switching_comp_bw_limit {
+ __le16 seid;
+ u8 reserved[2];
+ __le16 credit;
+ u8 reserved1[2];
+ u8 max_bw; /* 0-3, limit = 2^max */
+ u8 reserved2[7];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_configure_switching_comp_bw_limit);
+
+/* Enable Physical Port ETS (indirect 0x0413)
+ * Modify Physical Port ETS (indirect 0x0414)
+ * Disable Physical Port ETS (indirect 0x0415)
+ */
+struct i40e_aqc_configure_switching_comp_ets_data {
+ u8 reserved[4];
+ u8 tc_valid_bits;
+ u8 seepage;
+#define I40E_AQ_ETS_SEEPAGE_EN_MASK 0x1
+ u8 tc_strict_priority_flags;
+ u8 reserved1[17];
+ u8 tc_bw_share_credits[8];
+ u8 reserved2[96];
+};
+
+/* Configure Switching Component Bandwidth Limits per Tc (indirect 0x0416) */
+struct i40e_aqc_configure_switching_comp_ets_bw_limit_data {
+ u8 tc_valid_bits;
+ u8 reserved[15];
+ __le16 tc_bw_credit[8];
+
+ /* 4 bits per tc 0-7, 4th bit is reserved, limit = 2^max */
+ __le16 tc_bw_max[2];
+ u8 reserved1[28];
+};
+
+/* Configure Switching Component Bandwidth Allocation per Tc
+ * (indirect 0x0417)
+ */
+struct i40e_aqc_configure_switching_comp_bw_config_data {
+ u8 tc_valid_bits;
+ u8 reserved[2];
+ u8 absolute_credits; /* bool */
+ u8 tc_bw_share_credits[8];
+ u8 reserved1[20];
+};
+
+/* Query Switching Component Configuration (indirect 0x0418) */
+struct i40e_aqc_query_switching_comp_ets_config_resp {
+ u8 tc_valid_bits;
+ u8 reserved[35];
+ __le16 port_bw_limit;
+ u8 reserved1[2];
+ u8 tc_bw_max; /* 0-3, limit = 2^max */
+ u8 reserved2[23];
+};
+
+/* Query PhysicalPort ETS Configuration (indirect 0x0419) */
+struct i40e_aqc_query_port_ets_config_resp {
+ u8 reserved[4];
+ u8 tc_valid_bits;
+ u8 reserved1;
+ u8 tc_strict_priority_bits;
+ u8 reserved2;
+ u8 tc_bw_share_credits[8];
+ __le16 tc_bw_limits[8];
+
+ /* 4 bits per tc 0-7, 4th bit reserved, limit = 2^max */
+ __le16 tc_bw_max[2];
+ u8 reserved3[32];
+};
+
+/* Query Switching Component Bandwidth Allocation per Traffic Type
+ * (indirect 0x041A)
+ */
+struct i40e_aqc_query_switching_comp_bw_config_resp {
+ u8 tc_valid_bits;
+ u8 reserved[2];
+ u8 absolute_credits_enable; /* bool */
+ u8 tc_bw_share_credits[8];
+ __le16 tc_bw_limits[8];
+
+ /* 4 bits per tc 0-7, 4th bit is reserved, limit = 2^max */
+ __le16 tc_bw_max[2];
+};
+
+/* Suspend/resume port TX traffic
+ * (direct 0x041B and 0x041C) uses the generic SEID struct
+ */
+
+/* Configure partition BW
+ * (indirect 0x041D)
+ */
+struct i40e_aqc_configure_partition_bw_data {
+ __le16 pf_valid_bits;
+ u8 min_bw[16]; /* guaranteed bandwidth */
+ u8 max_bw[16]; /* bandwidth limit */
+};
+
+/* Get and set the active HMC resource profile and status.
+ * (direct 0x0500) and (direct 0x0501)
+ */
+struct i40e_aq_get_set_hmc_resource_profile {
+ u8 pm_profile;
+ u8 pe_vf_enabled;
+ u8 reserved[14];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aq_get_set_hmc_resource_profile);
+
+enum i40e_aq_hmc_profile {
+ /* I40E_HMC_PROFILE_NO_CHANGE = 0, reserved */
+ I40E_HMC_PROFILE_DEFAULT = 1,
+ I40E_HMC_PROFILE_FAVOR_VF = 2,
+ I40E_HMC_PROFILE_EQUAL = 3,
+};
+
+#define I40E_AQ_GET_HMC_RESOURCE_PROFILE_PM_MASK 0xF
+#define I40E_AQ_GET_HMC_RESOURCE_PROFILE_COUNT_MASK 0x3F
+
+/* Get PHY Abilities (indirect 0x0600) uses the generic indirect struct */
+
+/* set in param0 for get phy abilities to report qualified modules */
+#define I40E_AQ_PHY_REPORT_QUALIFIED_MODULES 0x0001
+#define I40E_AQ_PHY_REPORT_INITIAL_VALUES 0x0002
+
+enum i40e_aq_phy_type {
+ I40E_PHY_TYPE_SGMII = 0x0,
+ I40E_PHY_TYPE_1000BASE_KX = 0x1,
+ I40E_PHY_TYPE_10GBASE_KX4 = 0x2,
+ I40E_PHY_TYPE_10GBASE_KR = 0x3,
+ I40E_PHY_TYPE_40GBASE_KR4 = 0x4,
+ I40E_PHY_TYPE_XAUI = 0x5,
+ I40E_PHY_TYPE_XFI = 0x6,
+ I40E_PHY_TYPE_SFI = 0x7,
+ I40E_PHY_TYPE_XLAUI = 0x8,
+ I40E_PHY_TYPE_XLPPI = 0x9,
+ I40E_PHY_TYPE_40GBASE_CR4_CU = 0xA,
+ I40E_PHY_TYPE_10GBASE_CR1_CU = 0xB,
+ I40E_PHY_TYPE_10GBASE_AOC = 0xC,
+ I40E_PHY_TYPE_40GBASE_AOC = 0xD,
+ I40E_PHY_TYPE_100BASE_TX = 0x11,
+ I40E_PHY_TYPE_1000BASE_T = 0x12,
+ I40E_PHY_TYPE_10GBASE_T = 0x13,
+ I40E_PHY_TYPE_10GBASE_SR = 0x14,
+ I40E_PHY_TYPE_10GBASE_LR = 0x15,
+ I40E_PHY_TYPE_10GBASE_SFPP_CU = 0x16,
+ I40E_PHY_TYPE_10GBASE_CR1 = 0x17,
+ I40E_PHY_TYPE_40GBASE_CR4 = 0x18,
+ I40E_PHY_TYPE_40GBASE_SR4 = 0x19,
+ I40E_PHY_TYPE_40GBASE_LR4 = 0x1A,
+ I40E_PHY_TYPE_1000BASE_SX = 0x1B,
+ I40E_PHY_TYPE_1000BASE_LX = 0x1C,
+ I40E_PHY_TYPE_1000BASE_T_OPTICAL = 0x1D,
+ I40E_PHY_TYPE_20GBASE_KR2 = 0x1E,
+ I40E_PHY_TYPE_MAX
+};
+
+#define I40E_LINK_SPEED_100MB_SHIFT 0x1
+#define I40E_LINK_SPEED_1000MB_SHIFT 0x2
+#define I40E_LINK_SPEED_10GB_SHIFT 0x3
+#define I40E_LINK_SPEED_40GB_SHIFT 0x4
+#define I40E_LINK_SPEED_20GB_SHIFT 0x5
+
+enum i40e_aq_link_speed {
+ I40E_LINK_SPEED_UNKNOWN = 0,
+ I40E_LINK_SPEED_100MB = (1 << I40E_LINK_SPEED_100MB_SHIFT),
+ I40E_LINK_SPEED_1GB = (1 << I40E_LINK_SPEED_1000MB_SHIFT),
+ I40E_LINK_SPEED_10GB = (1 << I40E_LINK_SPEED_10GB_SHIFT),
+ I40E_LINK_SPEED_40GB = (1 << I40E_LINK_SPEED_40GB_SHIFT),
+ I40E_LINK_SPEED_20GB = (1 << I40E_LINK_SPEED_20GB_SHIFT)
+};
+
+struct i40e_aqc_module_desc {
+ u8 oui[3];
+ u8 reserved1;
+ u8 part_number[16];
+ u8 revision[4];
+ u8 reserved2[8];
+};
+
+struct i40e_aq_get_phy_abilities_resp {
+ __le32 phy_type; /* bitmap using the above enum for offsets */
+ u8 link_speed; /* bitmap using the above enum bit patterns */
+ u8 abilities;
+#define I40E_AQ_PHY_FLAG_PAUSE_TX 0x01
+#define I40E_AQ_PHY_FLAG_PAUSE_RX 0x02
+#define I40E_AQ_PHY_FLAG_LOW_POWER 0x04
+#define I40E_AQ_PHY_LINK_ENABLED 0x08
+#define I40E_AQ_PHY_AN_ENABLED 0x10
+#define I40E_AQ_PHY_FLAG_MODULE_QUAL 0x20
+ __le16 eee_capability;
+#define I40E_AQ_EEE_100BASE_TX 0x0002
+#define I40E_AQ_EEE_1000BASE_T 0x0004
+#define I40E_AQ_EEE_10GBASE_T 0x0008
+#define I40E_AQ_EEE_1000BASE_KX 0x0010
+#define I40E_AQ_EEE_10GBASE_KX4 0x0020
+#define I40E_AQ_EEE_10GBASE_KR 0x0040
+ __le32 eeer_val;
+ u8 d3_lpan;
+#define I40E_AQ_SET_PHY_D3_LPAN_ENA 0x01
+ u8 reserved[3];
+ u8 phy_id[4];
+ u8 module_type[3];
+ u8 qualified_module_count;
+#define I40E_AQ_PHY_MAX_QMS 16
+ struct i40e_aqc_module_desc qualified_module[I40E_AQ_PHY_MAX_QMS];
+};
+
+/* Set PHY Config (direct 0x0601) */
+struct i40e_aq_set_phy_config { /* same bits as above in all */
+ __le32 phy_type;
+ u8 link_speed;
+ u8 abilities;
+/* bits 0-2 use the values from get_phy_abilities_resp */
+#define I40E_AQ_PHY_ENABLE_LINK 0x08
+#define I40E_AQ_PHY_ENABLE_AN 0x10
+#define I40E_AQ_PHY_ENABLE_ATOMIC_LINK 0x20
+ __le16 eee_capability;
+ __le32 eeer;
+ u8 low_power_ctrl;
+ u8 reserved[3];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aq_set_phy_config);
+
+/* Set MAC Config command data structure (direct 0x0603) */
+struct i40e_aq_set_mac_config {
+ __le16 max_frame_size;
+ u8 params;
+#define I40E_AQ_SET_MAC_CONFIG_CRC_EN 0x04
+#define I40E_AQ_SET_MAC_CONFIG_PACING_MASK 0x78
+#define I40E_AQ_SET_MAC_CONFIG_PACING_SHIFT 3
+#define I40E_AQ_SET_MAC_CONFIG_PACING_NONE 0x0
+#define I40E_AQ_SET_MAC_CONFIG_PACING_1B_13TX 0xF
+#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_9TX 0x9
+#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_4TX 0x8
+#define I40E_AQ_SET_MAC_CONFIG_PACING_3DW_7TX 0x7
+#define I40E_AQ_SET_MAC_CONFIG_PACING_2DW_3TX 0x6
+#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_1TX 0x5
+#define I40E_AQ_SET_MAC_CONFIG_PACING_3DW_2TX 0x4
+#define I40E_AQ_SET_MAC_CONFIG_PACING_7DW_3TX 0x3
+#define I40E_AQ_SET_MAC_CONFIG_PACING_4DW_1TX 0x2
+#define I40E_AQ_SET_MAC_CONFIG_PACING_9DW_1TX 0x1
+ u8 tx_timer_priority; /* bitmap */
+ __le16 tx_timer_value;
+ __le16 fc_refresh_threshold;
+ u8 reserved[8];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aq_set_mac_config);
+
+/* Restart Auto-Negotiation (direct 0x605) */
+struct i40e_aqc_set_link_restart_an {
+ u8 command;
+#define I40E_AQ_PHY_RESTART_AN 0x02
+#define I40E_AQ_PHY_LINK_ENABLE 0x04
+ u8 reserved[15];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_set_link_restart_an);
+
+/* Get Link Status cmd & response data structure (direct 0x0607) */
+struct i40e_aqc_get_link_status {
+ __le16 command_flags; /* only field set on command */
+#define I40E_AQ_LSE_MASK 0x3
+#define I40E_AQ_LSE_NOP 0x0
+#define I40E_AQ_LSE_DISABLE 0x2
+#define I40E_AQ_LSE_ENABLE 0x3
+/* only response uses this flag */
+#define I40E_AQ_LSE_IS_ENABLED 0x1
+ u8 phy_type; /* i40e_aq_phy_type */
+ u8 link_speed; /* i40e_aq_link_speed */
+ u8 link_info;
+#define I40E_AQ_LINK_UP 0x01
+#define I40E_AQ_LINK_FAULT 0x02
+#define I40E_AQ_LINK_FAULT_TX 0x04
+#define I40E_AQ_LINK_FAULT_RX 0x08
+#define I40E_AQ_LINK_FAULT_REMOTE 0x10
+#define I40E_AQ_MEDIA_AVAILABLE 0x40
+#define I40E_AQ_SIGNAL_DETECT 0x80
+ u8 an_info;
+#define I40E_AQ_AN_COMPLETED 0x01
+#define I40E_AQ_LP_AN_ABILITY 0x02
+#define I40E_AQ_PD_FAULT 0x04
+#define I40E_AQ_FEC_EN 0x08
+#define I40E_AQ_PHY_LOW_POWER 0x10
+#define I40E_AQ_LINK_PAUSE_TX 0x20
+#define I40E_AQ_LINK_PAUSE_RX 0x40
+#define I40E_AQ_QUALIFIED_MODULE 0x80
+ u8 ext_info;
+#define I40E_AQ_LINK_PHY_TEMP_ALARM 0x01
+#define I40E_AQ_LINK_XCESSIVE_ERRORS 0x02
+#define I40E_AQ_LINK_TX_SHIFT 0x02
+#define I40E_AQ_LINK_TX_MASK (0x03 << I40E_AQ_LINK_TX_SHIFT)
+#define I40E_AQ_LINK_TX_ACTIVE 0x00
+#define I40E_AQ_LINK_TX_DRAINED 0x01
+#define I40E_AQ_LINK_TX_FLUSHED 0x03
+#define I40E_AQ_LINK_FORCED_40G 0x10
+ u8 loopback; /* use defines from i40e_aqc_set_lb_mode */
+ __le16 max_frame_size;
+ u8 config;
+#define I40E_AQ_CONFIG_CRC_ENA 0x04
+#define I40E_AQ_CONFIG_PACING_MASK 0x78
+ u8 reserved[5];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_get_link_status);
+
+/* Set event mask command (direct 0x613) */
+struct i40e_aqc_set_phy_int_mask {
+ u8 reserved[8];
+ __le16 event_mask;
+#define I40E_AQ_EVENT_LINK_UPDOWN 0x0002
+#define I40E_AQ_EVENT_MEDIA_NA 0x0004
+#define I40E_AQ_EVENT_LINK_FAULT 0x0008
+#define I40E_AQ_EVENT_PHY_TEMP_ALARM 0x0010
+#define I40E_AQ_EVENT_EXCESSIVE_ERRORS 0x0020
+#define I40E_AQ_EVENT_SIGNAL_DETECT 0x0040
+#define I40E_AQ_EVENT_AN_COMPLETED 0x0080
+#define I40E_AQ_EVENT_MODULE_QUAL_FAIL 0x0100
+#define I40E_AQ_EVENT_PORT_TX_SUSPENDED 0x0200
+ u8 reserved1[6];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_set_phy_int_mask);
+
+/* Get Local AN advt register (direct 0x0614)
+ * Set Local AN advt register (direct 0x0615)
+ * Get Link Partner AN advt register (direct 0x0616)
+ */
+struct i40e_aqc_an_advt_reg {
+ __le32 local_an_reg0;
+ __le16 local_an_reg1;
+ u8 reserved[10];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_an_advt_reg);
+
+/* Set Loopback mode (0x0618) */
+struct i40e_aqc_set_lb_mode {
+ __le16 lb_mode;
+#define I40E_AQ_LB_PHY_LOCAL 0x01
+#define I40E_AQ_LB_PHY_REMOTE 0x02
+#define I40E_AQ_LB_MAC_LOCAL 0x04
+ u8 reserved[14];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_set_lb_mode);
+
+/* Set PHY Debug command (0x0622) */
+struct i40e_aqc_set_phy_debug {
+ u8 command_flags;
+#define I40E_AQ_PHY_DEBUG_RESET_INTERNAL 0x02
+#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_SHIFT 2
+#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_MASK (0x03 << \
+ I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_SHIFT)
+#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_NONE 0x00
+#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_HARD 0x01
+#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_SOFT 0x02
+#define I40E_AQ_PHY_DEBUG_DISABLE_LINK_FW 0x10
+ u8 reserved[15];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_set_phy_debug);
+
+enum i40e_aq_phy_reg_type {
+ I40E_AQC_PHY_REG_INTERNAL = 0x1,
+ I40E_AQC_PHY_REG_EXERNAL_BASET = 0x2,
+ I40E_AQC_PHY_REG_EXERNAL_MODULE = 0x3
+};
+
+/* NVM Read command (indirect 0x0701)
+ * NVM Erase commands (direct 0x0702)
+ * NVM Update commands (indirect 0x0703)
+ */
+struct i40e_aqc_nvm_update {
+ u8 command_flags;
+#define I40E_AQ_NVM_LAST_CMD 0x01
+#define I40E_AQ_NVM_FLASH_ONLY 0x80
+ u8 module_pointer;
+ __le16 length;
+ __le32 offset;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_update);
+
+/* NVM Config Read (indirect 0x0704) */
+struct i40e_aqc_nvm_config_read {
+ __le16 cmd_flags;
+#define ANVM_SINGLE_OR_MULTIPLE_FEATURES_MASK 1
+#define ANVM_READ_SINGLE_FEATURE 0
+#define ANVM_READ_MULTIPLE_FEATURES 1
+ __le16 element_count;
+ __le16 element_id; /* Feature/field ID */
+ u8 reserved[2];
+ __le32 address_high;
+ __le32 address_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_config_read);
+
+/* NVM Config Write (indirect 0x0705) */
+struct i40e_aqc_nvm_config_write {
+ __le16 cmd_flags;
+ __le16 element_count;
+ u8 reserved[4];
+ __le32 address_high;
+ __le32 address_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_config_write);
+
+struct i40e_aqc_nvm_config_data_feature {
+ __le16 feature_id;
+ __le16 instance_id;
+ __le16 feature_options;
+ __le16 feature_selection;
+};
+
+struct i40e_aqc_nvm_config_data_immediate_field {
+#define ANVM_FEATURE_OR_IMMEDIATE_MASK 0x2
+ __le16 field_id;
+ __le16 instance_id;
+ __le16 field_options;
+ __le16 field_value;
+};
+
+/* Send to PF command (indirect 0x0801) id is only used by PF
+ * Send to VF command (indirect 0x0802) id is only used by PF
+ * Send to Peer PF command (indirect 0x0803)
+ */
+struct i40e_aqc_pf_vf_message {
+ __le32 id;
+ u8 reserved[4];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_pf_vf_message);
+
+/* Alternate structure */
+
+/* Direct write (direct 0x0900)
+ * Direct read (direct 0x0902)
+ */
+struct i40e_aqc_alternate_write {
+ __le32 address0;
+ __le32 data0;
+ __le32 address1;
+ __le32 data1;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_alternate_write);
+
+/* Indirect write (indirect 0x0901)
+ * Indirect read (indirect 0x0903)
+ */
+
+struct i40e_aqc_alternate_ind_write {
+ __le32 address;
+ __le32 length;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_alternate_ind_write);
+
+/* Done alternate write (direct 0x0904)
+ * uses i40e_aq_desc
+ */
+struct i40e_aqc_alternate_write_done {
+ __le16 cmd_flags;
+#define I40E_AQ_ALTERNATE_MODE_BIOS_MASK 1
+#define I40E_AQ_ALTERNATE_MODE_BIOS_LEGACY 0
+#define I40E_AQ_ALTERNATE_MODE_BIOS_UEFI 1
+#define I40E_AQ_ALTERNATE_RESET_NEEDED 2
+ u8 reserved[14];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_alternate_write_done);
+
+/* Set OEM mode (direct 0x0905) */
+struct i40e_aqc_alternate_set_mode {
+ __le32 mode;
+#define I40E_AQ_ALTERNATE_MODE_NONE 0
+#define I40E_AQ_ALTERNATE_MODE_OEM 1
+ u8 reserved[12];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_alternate_set_mode);
+
+/* Clear port Alternate RAM (direct 0x0906) uses i40e_aq_desc */
+
+/* async events 0x10xx */
+
+/* Lan Queue Overflow Event (direct, 0x1001) */
+struct i40e_aqc_lan_overflow {
+ __le32 prtdcb_rupto;
+ __le32 otx_ctl;
+ u8 reserved[8];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_lan_overflow);
+
+/* Get LLDP MIB (indirect 0x0A00) */
+struct i40e_aqc_lldp_get_mib {
+ u8 type;
+ u8 reserved1;
+#define I40E_AQ_LLDP_MIB_TYPE_MASK 0x3
+#define I40E_AQ_LLDP_MIB_LOCAL 0x0
+#define I40E_AQ_LLDP_MIB_REMOTE 0x1
+#define I40E_AQ_LLDP_MIB_LOCAL_AND_REMOTE 0x2
+#define I40E_AQ_LLDP_BRIDGE_TYPE_MASK 0xC
+#define I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT 0x2
+#define I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE 0x0
+#define I40E_AQ_LLDP_BRIDGE_TYPE_NON_TPMR 0x1
+#define I40E_AQ_LLDP_TX_SHIFT 0x4
+#define I40E_AQ_LLDP_TX_MASK (0x03 << I40E_AQ_LLDP_TX_SHIFT)
+/* TX pause flags use I40E_AQ_LINK_TX_* above */
+ __le16 local_len;
+ __le16 remote_len;
+ u8 reserved2[2];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_get_mib);
+
+/* Configure LLDP MIB Change Event (direct 0x0A01)
+ * also used for the event (with type in the command field)
+ */
+struct i40e_aqc_lldp_update_mib {
+ u8 command;
+#define I40E_AQ_LLDP_MIB_UPDATE_ENABLE 0x0
+#define I40E_AQ_LLDP_MIB_UPDATE_DISABLE 0x1
+ u8 reserved[7];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_update_mib);
+
+/* Add LLDP TLV (indirect 0x0A02)
+ * Delete LLDP TLV (indirect 0x0A04)
+ */
+struct i40e_aqc_lldp_add_tlv {
+ u8 type; /* only nearest bridge and non-TPMR from 0x0A00 */
+ u8 reserved1[1];
+ __le16 len;
+ u8 reserved2[4];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_add_tlv);
+
+/* Update LLDP TLV (indirect 0x0A03) */
+struct i40e_aqc_lldp_update_tlv {
+ u8 type; /* only nearest bridge and non-TPMR from 0x0A00 */
+ u8 reserved;
+ __le16 old_len;
+ __le16 new_offset;
+ __le16 new_len;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_update_tlv);
+
+/* Stop LLDP (direct 0x0A05) */
+struct i40e_aqc_lldp_stop {
+ u8 command;
+#define I40E_AQ_LLDP_AGENT_STOP 0x0
+#define I40E_AQ_LLDP_AGENT_SHUTDOWN 0x1
+ u8 reserved[15];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_stop);
+
+/* Start LLDP (direct 0x0A06) */
+
+struct i40e_aqc_lldp_start {
+ u8 command;
+#define I40E_AQ_LLDP_AGENT_START 0x1
+ u8 reserved[15];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_start);
+
+/* Apply MIB changes (0x0A07)
+ * uses the generic struc as it contains no data
+ */
+
+/* Add Udp Tunnel command and completion (direct 0x0B00) */
+struct i40e_aqc_add_udp_tunnel {
+ __le16 udp_port;
+ u8 reserved0[3];
+ u8 protocol_type;
+#define I40E_AQC_TUNNEL_TYPE_VXLAN 0x00
+#define I40E_AQC_TUNNEL_TYPE_NGE 0x01
+#define I40E_AQC_TUNNEL_TYPE_TEREDO 0x10
+ u8 reserved1[10];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_udp_tunnel);
+
+struct i40e_aqc_add_udp_tunnel_completion {
+ __le16 udp_port;
+ u8 filter_entry_index;
+ u8 multiple_pfs;
+#define I40E_AQC_SINGLE_PF 0x0
+#define I40E_AQC_MULTIPLE_PFS 0x1
+ u8 total_filters;
+ u8 reserved[11];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_udp_tunnel_completion);
+
+/* remove UDP Tunnel command (0x0B01) */
+struct i40e_aqc_remove_udp_tunnel {
+ u8 reserved[2];
+ u8 index; /* 0 to 15 */
+ u8 reserved2[13];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_remove_udp_tunnel);
+
+struct i40e_aqc_del_udp_tunnel_completion {
+ __le16 udp_port;
+ u8 index; /* 0 to 15 */
+ u8 multiple_pfs;
+ u8 total_filters_used;
+ u8 reserved1[11];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_del_udp_tunnel_completion);
+
+/* tunnel key structure 0x0B10 */
+
+struct i40e_aqc_tunnel_key_structure {
+ u8 key1_off;
+ u8 key2_off;
+ u8 key1_len; /* 0 to 15 */
+ u8 key2_len; /* 0 to 15 */
+ u8 flags;
+#define I40E_AQC_TUNNEL_KEY_STRUCT_OVERRIDE 0x01
+/* response flags */
+#define I40E_AQC_TUNNEL_KEY_STRUCT_SUCCESS 0x01
+#define I40E_AQC_TUNNEL_KEY_STRUCT_MODIFIED 0x02
+#define I40E_AQC_TUNNEL_KEY_STRUCT_OVERRIDDEN 0x03
+ u8 network_key_index;
+#define I40E_AQC_NETWORK_KEY_INDEX_VXLAN 0x0
+#define I40E_AQC_NETWORK_KEY_INDEX_NGE 0x1
+#define I40E_AQC_NETWORK_KEY_INDEX_FLEX_MAC_IN_UDP 0x2
+#define I40E_AQC_NETWORK_KEY_INDEX_GRE 0x3
+ u8 reserved[10];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_tunnel_key_structure);
+
+/* OEM mode commands (direct 0xFE0x) */
+struct i40e_aqc_oem_param_change {
+ __le32 param_type;
+#define I40E_AQ_OEM_PARAM_TYPE_PF_CTL 0
+#define I40E_AQ_OEM_PARAM_TYPE_BW_CTL 1
+#define I40E_AQ_OEM_PARAM_MAC 2
+ __le32 param_value1;
+ u8 param_value2[8];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_oem_param_change);
+
+struct i40e_aqc_oem_state_change {
+ __le32 state;
+#define I40E_AQ_OEM_STATE_LINK_DOWN 0x0
+#define I40E_AQ_OEM_STATE_LINK_UP 0x1
+ u8 reserved[12];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_oem_state_change);
+
+/* debug commands */
+
+/* get device id (0xFF00) uses the generic structure */
+
+/* set test more (0xFF01, internal) */
+
+struct i40e_acq_set_test_mode {
+ u8 mode;
+#define I40E_AQ_TEST_PARTIAL 0
+#define I40E_AQ_TEST_FULL 1
+#define I40E_AQ_TEST_NVM 2
+ u8 reserved[3];
+ u8 command;
+#define I40E_AQ_TEST_OPEN 0
+#define I40E_AQ_TEST_CLOSE 1
+#define I40E_AQ_TEST_INC 2
+ u8 reserved2[3];
+ __le32 address_high;
+ __le32 address_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_acq_set_test_mode);
+
+/* Debug Read Register command (0xFF03)
+ * Debug Write Register command (0xFF04)
+ */
+struct i40e_aqc_debug_reg_read_write {
+ __le32 reserved;
+ __le32 address;
+ __le32 value_high;
+ __le32 value_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_debug_reg_read_write);
+
+/* Scatter/gather Reg Read (indirect 0xFF05)
+ * Scatter/gather Reg Write (indirect 0xFF06)
+ */
+
+/* i40e_aq_desc is used for the command */
+struct i40e_aqc_debug_reg_sg_element_data {
+ __le32 address;
+ __le32 value;
+};
+
+/* Debug Modify register (direct 0xFF07) */
+struct i40e_aqc_debug_modify_reg {
+ __le32 address;
+ __le32 value;
+ __le32 clear_mask;
+ __le32 set_mask;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_debug_modify_reg);
+
+/* dump internal data (0xFF08, indirect) */
+
+#define I40E_AQ_CLUSTER_ID_AUX 0
+#define I40E_AQ_CLUSTER_ID_SWITCH_FLU 1
+#define I40E_AQ_CLUSTER_ID_TXSCHED 2
+#define I40E_AQ_CLUSTER_ID_HMC 3
+#define I40E_AQ_CLUSTER_ID_MAC0 4
+#define I40E_AQ_CLUSTER_ID_MAC1 5
+#define I40E_AQ_CLUSTER_ID_MAC2 6
+#define I40E_AQ_CLUSTER_ID_MAC3 7
+#define I40E_AQ_CLUSTER_ID_DCB 8
+#define I40E_AQ_CLUSTER_ID_EMP_MEM 9
+#define I40E_AQ_CLUSTER_ID_PKT_BUF 10
+#define I40E_AQ_CLUSTER_ID_ALTRAM 11
+
+struct i40e_aqc_debug_dump_internals {
+ u8 cluster_id;
+ u8 table_id;
+ __le16 data_size;
+ __le32 idx;
+ __le32 address_high;
+ __le32 address_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_debug_dump_internals);
+
+struct i40e_aqc_debug_modify_internals {
+ u8 cluster_id;
+ u8 cluster_specific_params[7];
+ __le32 address_high;
+ __le32 address_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_debug_modify_internals);
+
+#endif
diff --git a/sys/dev/ixl/i40e_alloc.h b/sys/dev/ixl/i40e_alloc.h
new file mode 100755
index 0000000..dc6fadd
--- /dev/null
+++ b/sys/dev/ixl/i40e_alloc.h
@@ -0,0 +1,66 @@
+/******************************************************************************
+
+ Copyright (c) 2013-2014, Intel Corporation
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ POSSIBILITY OF SUCH DAMAGE.
+
+******************************************************************************/
+/*$FreeBSD$*/
+
+#ifndef _I40E_ALLOC_H_
+#define _I40E_ALLOC_H_
+
+struct i40e_hw;
+
+/* Memory allocation types */
+enum i40e_memory_type {
+ i40e_mem_arq_buf = 0, /* ARQ indirect command buffer */
+ i40e_mem_asq_buf = 1,
+ i40e_mem_atq_buf = 2, /* ATQ indirect command buffer */
+ i40e_mem_arq_ring = 3, /* ARQ descriptor ring */
+ i40e_mem_atq_ring = 4, /* ATQ descriptor ring */
+ i40e_mem_pd = 5, /* Page Descriptor */
+ i40e_mem_bp = 6, /* Backing Page - 4KB */
+ i40e_mem_bp_jumbo = 7, /* Backing Page - > 4KB */
+ i40e_mem_reserved
+};
+
+/* prototype for functions used for dynamic memory allocation */
+enum i40e_status_code i40e_allocate_dma_mem(struct i40e_hw *hw,
+ struct i40e_dma_mem *mem,
+ enum i40e_memory_type type,
+ u64 size, u32 alignment);
+enum i40e_status_code i40e_free_dma_mem(struct i40e_hw *hw,
+ struct i40e_dma_mem *mem);
+enum i40e_status_code i40e_allocate_virt_mem(struct i40e_hw *hw,
+ struct i40e_virt_mem *mem,
+ u32 size);
+enum i40e_status_code i40e_free_virt_mem(struct i40e_hw *hw,
+ struct i40e_virt_mem *mem);
+
+#endif /* _I40E_ALLOC_H_ */
diff --git a/sys/dev/ixl/i40e_common.c b/sys/dev/ixl/i40e_common.c
new file mode 100755
index 0000000..ad1f945
--- /dev/null
+++ b/sys/dev/ixl/i40e_common.c
@@ -0,0 +1,4787 @@
+/******************************************************************************
+
+ Copyright (c) 2013-2014, Intel Corporation
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ POSSIBILITY OF SUCH DAMAGE.
+
+******************************************************************************/
+/*$FreeBSD$*/
+
+#include "i40e_type.h"
+#include "i40e_adminq.h"
+#include "i40e_prototype.h"
+#include "i40e_virtchnl.h"
+
+/**
+ * i40e_set_mac_type - Sets MAC type
+ * @hw: pointer to the HW structure
+ *
+ * This function sets the mac type of the adapter based on the
+ * vendor ID and device ID stored in the hw structure.
+ **/
+enum i40e_status_code i40e_set_mac_type(struct i40e_hw *hw)
+{
+ enum i40e_status_code status = I40E_SUCCESS;
+
+ DEBUGFUNC("i40e_set_mac_type\n");
+
+ if (hw->vendor_id == I40E_INTEL_VENDOR_ID) {
+ switch (hw->device_id) {
+ case I40E_DEV_ID_SFP_XL710:
+ case I40E_DEV_ID_QEMU:
+ case I40E_DEV_ID_KX_A:
+ case I40E_DEV_ID_KX_B:
+ case I40E_DEV_ID_KX_C:
+ case I40E_DEV_ID_QSFP_A:
+ case I40E_DEV_ID_QSFP_B:
+ case I40E_DEV_ID_QSFP_C:
+ case I40E_DEV_ID_10G_BASE_T:
+ hw->mac.type = I40E_MAC_XL710;
+ break;
+ case I40E_DEV_ID_VF:
+ case I40E_DEV_ID_VF_HV:
+ hw->mac.type = I40E_MAC_VF;
+ break;
+ default:
+ hw->mac.type = I40E_MAC_GENERIC;
+ break;
+ }
+ } else {
+ status = I40E_ERR_DEVICE_NOT_SUPPORTED;
+ }
+
+ DEBUGOUT2("i40e_set_mac_type found mac: %d, returns: %d\n",
+ hw->mac.type, status);
+ return status;
+}
+
+/**
+ * i40e_debug_aq
+ * @hw: debug mask related to admin queue
+ * @mask: debug mask
+ * @desc: pointer to admin queue descriptor
+ * @buffer: pointer to command buffer
+ * @buf_len: max length of buffer
+ *
+ * Dumps debug log about adminq command with descriptor contents.
+ **/
+void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc,
+ void *buffer, u16 buf_len)
+{
+ struct i40e_aq_desc *aq_desc = (struct i40e_aq_desc *)desc;
+ u16 len = LE16_TO_CPU(aq_desc->datalen);
+ u8 *aq_buffer = (u8 *)buffer;
+ u32 data[4];
+ u32 i = 0;
+
+ if ((!(mask & hw->debug_mask)) || (desc == NULL))
+ return;
+
+ i40e_debug(hw, mask,
+ "AQ CMD: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n",
+ aq_desc->opcode, aq_desc->flags, aq_desc->datalen,
+ aq_desc->retval);
+ i40e_debug(hw, mask, "\tcookie (h,l) 0x%08X 0x%08X\n",
+ aq_desc->cookie_high, aq_desc->cookie_low);
+ i40e_debug(hw, mask, "\tparam (0,1) 0x%08X 0x%08X\n",
+ aq_desc->params.internal.param0,
+ aq_desc->params.internal.param1);
+ i40e_debug(hw, mask, "\taddr (h,l) 0x%08X 0x%08X\n",
+ aq_desc->params.external.addr_high,
+ aq_desc->params.external.addr_low);
+
+ if ((buffer != NULL) && (aq_desc->datalen != 0)) {
+ i40e_memset(data, 0, sizeof(data), I40E_NONDMA_MEM);
+ i40e_debug(hw, mask, "AQ CMD Buffer:\n");
+ if (buf_len < len)
+ len = buf_len;
+ for (i = 0; i < len; i++) {
+ data[((i % 16) / 4)] |=
+ ((u32)aq_buffer[i]) << (8 * (i % 4));
+ if ((i % 16) == 15) {
+ i40e_debug(hw, mask,
+ "\t0x%04X %08X %08X %08X %08X\n",
+ i - 15, data[0], data[1], data[2],
+ data[3]);
+ i40e_memset(data, 0, sizeof(data),
+ I40E_NONDMA_MEM);
+ }
+ }
+ if ((i % 16) != 0)
+ i40e_debug(hw, mask, "\t0x%04X %08X %08X %08X %08X\n",
+ i - (i % 16), data[0], data[1], data[2],
+ data[3]);
+ }
+}
+
+/**
+ * i40e_check_asq_alive
+ * @hw: pointer to the hw struct
+ *
+ * Returns TRUE if Queue is enabled else FALSE.
+ **/
+bool i40e_check_asq_alive(struct i40e_hw *hw)
+{
+ if (hw->aq.asq.len)
+ return !!(rd32(hw, hw->aq.asq.len) & I40E_PF_ATQLEN_ATQENABLE_MASK);
+ else
+ return FALSE;
+}
+
+/**
+ * i40e_aq_queue_shutdown
+ * @hw: pointer to the hw struct
+ * @unloading: is the driver unloading itself
+ *
+ * Tell the Firmware that we're shutting down the AdminQ and whether
+ * or not the driver is unloading as well.
+ **/
+enum i40e_status_code i40e_aq_queue_shutdown(struct i40e_hw *hw,
+ bool unloading)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_queue_shutdown *cmd =
+ (struct i40e_aqc_queue_shutdown *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_queue_shutdown);
+
+ if (unloading)
+ cmd->driver_unloading = CPU_TO_LE32(I40E_AQ_DRIVER_UNLOADING);
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL);
+
+ return status;
+}
+
+/* The i40e_ptype_lookup table is used to convert from the 8-bit ptype in the
+ * hardware to a bit-field that can be used by SW to more easily determine the
+ * packet type.
+ *
+ * Macros are used to shorten the table lines and make this table human
+ * readable.
+ *
+ * We store the PTYPE in the top byte of the bit field - this is just so that
+ * we can check that the table doesn't have a row missing, as the index into
+ * the table should be the PTYPE.
+ *
+ * Typical work flow:
+ *
+ * IF NOT i40e_ptype_lookup[ptype].known
+ * THEN
+ * Packet is unknown
+ * ELSE IF i40e_ptype_lookup[ptype].outer_ip == I40E_RX_PTYPE_OUTER_IP
+ * Use the rest of the fields to look at the tunnels, inner protocols, etc
+ * ELSE
+ * Use the enum i40e_rx_l2_ptype to decode the packet type
+ * ENDIF
+ */
+
+/* macro to make the table lines short */
+#define I40E_PTT(PTYPE, OUTER_IP, OUTER_IP_VER, OUTER_FRAG, T, TE, TEF, I, PL)\
+ { PTYPE, \
+ 1, \
+ I40E_RX_PTYPE_OUTER_##OUTER_IP, \
+ I40E_RX_PTYPE_OUTER_##OUTER_IP_VER, \
+ I40E_RX_PTYPE_##OUTER_FRAG, \
+ I40E_RX_PTYPE_TUNNEL_##T, \
+ I40E_RX_PTYPE_TUNNEL_END_##TE, \
+ I40E_RX_PTYPE_##TEF, \
+ I40E_RX_PTYPE_INNER_PROT_##I, \
+ I40E_RX_PTYPE_PAYLOAD_LAYER_##PL }
+
+#define I40E_PTT_UNUSED_ENTRY(PTYPE) \
+ { PTYPE, 0, 0, 0, 0, 0, 0, 0, 0, 0 }
+
+/* shorter macros makes the table fit but are terse */
+#define I40E_RX_PTYPE_NOF I40E_RX_PTYPE_NOT_FRAG
+#define I40E_RX_PTYPE_FRG I40E_RX_PTYPE_FRAG
+#define I40E_RX_PTYPE_INNER_PROT_TS I40E_RX_PTYPE_INNER_PROT_TIMESYNC
+
+/* Lookup table mapping the HW PTYPE to the bit field for decoding */
+struct i40e_rx_ptype_decoded i40e_ptype_lookup[] = {
+ /* L2 Packet types */
+ I40E_PTT_UNUSED_ENTRY(0),
+ I40E_PTT(1, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
+ I40E_PTT(2, L2, NONE, NOF, NONE, NONE, NOF, TS, PAY2),
+ I40E_PTT(3, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
+ I40E_PTT_UNUSED_ENTRY(4),
+ I40E_PTT_UNUSED_ENTRY(5),
+ I40E_PTT(6, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
+ I40E_PTT(7, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
+ I40E_PTT_UNUSED_ENTRY(8),
+ I40E_PTT_UNUSED_ENTRY(9),
+ I40E_PTT(10, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
+ I40E_PTT(11, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE),
+ I40E_PTT(12, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+ I40E_PTT(13, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+ I40E_PTT(14, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+ I40E_PTT(15, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+ I40E_PTT(16, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+ I40E_PTT(17, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+ I40E_PTT(18, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+ I40E_PTT(19, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+ I40E_PTT(20, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+ I40E_PTT(21, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+
+ /* Non Tunneled IPv4 */
+ I40E_PTT(22, IP, IPV4, FRG, NONE, NONE, NOF, NONE, PAY3),
+ I40E_PTT(23, IP, IPV4, NOF, NONE, NONE, NOF, NONE, PAY3),
+ I40E_PTT(24, IP, IPV4, NOF, NONE, NONE, NOF, UDP, PAY4),
+ I40E_PTT_UNUSED_ENTRY(25),
+ I40E_PTT(26, IP, IPV4, NOF, NONE, NONE, NOF, TCP, PAY4),
+ I40E_PTT(27, IP, IPV4, NOF, NONE, NONE, NOF, SCTP, PAY4),
+ I40E_PTT(28, IP, IPV4, NOF, NONE, NONE, NOF, ICMP, PAY4),
+
+ /* IPv4 --> IPv4 */
+ I40E_PTT(29, IP, IPV4, NOF, IP_IP, IPV4, FRG, NONE, PAY3),
+ I40E_PTT(30, IP, IPV4, NOF, IP_IP, IPV4, NOF, NONE, PAY3),
+ I40E_PTT(31, IP, IPV4, NOF, IP_IP, IPV4, NOF, UDP, PAY4),
+ I40E_PTT_UNUSED_ENTRY(32),
+ I40E_PTT(33, IP, IPV4, NOF, IP_IP, IPV4, NOF, TCP, PAY4),
+ I40E_PTT(34, IP, IPV4, NOF, IP_IP, IPV4, NOF, SCTP, PAY4),
+ I40E_PTT(35, IP, IPV4, NOF, IP_IP, IPV4, NOF, ICMP, PAY4),
+
+ /* IPv4 --> IPv6 */
+ I40E_PTT(36, IP, IPV4, NOF, IP_IP, IPV6, FRG, NONE, PAY3),
+ I40E_PTT(37, IP, IPV4, NOF, IP_IP, IPV6, NOF, NONE, PAY3),
+ I40E_PTT(38, IP, IPV4, NOF, IP_IP, IPV6, NOF, UDP, PAY4),
+ I40E_PTT_UNUSED_ENTRY(39),
+ I40E_PTT(40, IP, IPV4, NOF, IP_IP, IPV6, NOF, TCP, PAY4),
+ I40E_PTT(41, IP, IPV4, NOF, IP_IP, IPV6, NOF, SCTP, PAY4),
+ I40E_PTT(42, IP, IPV4, NOF, IP_IP, IPV6, NOF, ICMP, PAY4),
+
+ /* IPv4 --> GRE/NAT */
+ I40E_PTT(43, IP, IPV4, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3),
+
+ /* IPv4 --> GRE/NAT --> IPv4 */
+ I40E_PTT(44, IP, IPV4, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3),
+ I40E_PTT(45, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3),
+ I40E_PTT(46, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4),
+ I40E_PTT_UNUSED_ENTRY(47),
+ I40E_PTT(48, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4),
+ I40E_PTT(49, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4),
+ I40E_PTT(50, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4),
+
+ /* IPv4 --> GRE/NAT --> IPv6 */
+ I40E_PTT(51, IP, IPV4, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3),
+ I40E_PTT(52, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3),
+ I40E_PTT(53, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4),
+ I40E_PTT_UNUSED_ENTRY(54),
+ I40E_PTT(55, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4),
+ I40E_PTT(56, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4),
+ I40E_PTT(57, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4),
+
+ /* IPv4 --> GRE/NAT --> MAC */
+ I40E_PTT(58, IP, IPV4, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3),
+
+ /* IPv4 --> GRE/NAT --> MAC --> IPv4 */
+ I40E_PTT(59, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3),
+ I40E_PTT(60, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3),
+ I40E_PTT(61, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4),
+ I40E_PTT_UNUSED_ENTRY(62),
+ I40E_PTT(63, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4),
+ I40E_PTT(64, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4),
+ I40E_PTT(65, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4),
+
+ /* IPv4 --> GRE/NAT -> MAC --> IPv6 */
+ I40E_PTT(66, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3),
+ I40E_PTT(67, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3),
+ I40E_PTT(68, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4),
+ I40E_PTT_UNUSED_ENTRY(69),
+ I40E_PTT(70, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4),
+ I40E_PTT(71, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4),
+ I40E_PTT(72, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4),
+
+ /* IPv4 --> GRE/NAT --> MAC/VLAN */
+ I40E_PTT(73, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3),
+
+ /* IPv4 ---> GRE/NAT -> MAC/VLAN --> IPv4 */
+ I40E_PTT(74, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3),
+ I40E_PTT(75, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3),
+ I40E_PTT(76, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4),
+ I40E_PTT_UNUSED_ENTRY(77),
+ I40E_PTT(78, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4),
+ I40E_PTT(79, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4),
+ I40E_PTT(80, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4),
+
+ /* IPv4 -> GRE/NAT -> MAC/VLAN --> IPv6 */
+ I40E_PTT(81, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3),
+ I40E_PTT(82, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3),
+ I40E_PTT(83, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4),
+ I40E_PTT_UNUSED_ENTRY(84),
+ I40E_PTT(85, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4),
+ I40E_PTT(86, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4),
+ I40E_PTT(87, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4),
+
+ /* Non Tunneled IPv6 */
+ I40E_PTT(88, IP, IPV6, FRG, NONE, NONE, NOF, NONE, PAY3),
+ I40E_PTT(89, IP, IPV6, NOF, NONE, NONE, NOF, NONE, PAY3),
+ I40E_PTT(90, IP, IPV6, NOF, NONE, NONE, NOF, UDP, PAY3),
+ I40E_PTT_UNUSED_ENTRY(91),
+ I40E_PTT(92, IP, IPV6, NOF, NONE, NONE, NOF, TCP, PAY4),
+ I40E_PTT(93, IP, IPV6, NOF, NONE, NONE, NOF, SCTP, PAY4),
+ I40E_PTT(94, IP, IPV6, NOF, NONE, NONE, NOF, ICMP, PAY4),
+
+ /* IPv6 --> IPv4 */
+ I40E_PTT(95, IP, IPV6, NOF, IP_IP, IPV4, FRG, NONE, PAY3),
+ I40E_PTT(96, IP, IPV6, NOF, IP_IP, IPV4, NOF, NONE, PAY3),
+ I40E_PTT(97, IP, IPV6, NOF, IP_IP, IPV4, NOF, UDP, PAY4),
+ I40E_PTT_UNUSED_ENTRY(98),
+ I40E_PTT(99, IP, IPV6, NOF, IP_IP, IPV4, NOF, TCP, PAY4),
+ I40E_PTT(100, IP, IPV6, NOF, IP_IP, IPV4, NOF, SCTP, PAY4),
+ I40E_PTT(101, IP, IPV6, NOF, IP_IP, IPV4, NOF, ICMP, PAY4),
+
+ /* IPv6 --> IPv6 */
+ I40E_PTT(102, IP, IPV6, NOF, IP_IP, IPV6, FRG, NONE, PAY3),
+ I40E_PTT(103, IP, IPV6, NOF, IP_IP, IPV6, NOF, NONE, PAY3),
+ I40E_PTT(104, IP, IPV6, NOF, IP_IP, IPV6, NOF, UDP, PAY4),
+ I40E_PTT_UNUSED_ENTRY(105),
+ I40E_PTT(106, IP, IPV6, NOF, IP_IP, IPV6, NOF, TCP, PAY4),
+ I40E_PTT(107, IP, IPV6, NOF, IP_IP, IPV6, NOF, SCTP, PAY4),
+ I40E_PTT(108, IP, IPV6, NOF, IP_IP, IPV6, NOF, ICMP, PAY4),
+
+ /* IPv6 --> GRE/NAT */
+ I40E_PTT(109, IP, IPV6, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3),
+
+ /* IPv6 --> GRE/NAT -> IPv4 */
+ I40E_PTT(110, IP, IPV6, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3),
+ I40E_PTT(111, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3),
+ I40E_PTT(112, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4),
+ I40E_PTT_UNUSED_ENTRY(113),
+ I40E_PTT(114, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4),
+ I40E_PTT(115, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4),
+ I40E_PTT(116, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4),
+
+ /* IPv6 --> GRE/NAT -> IPv6 */
+ I40E_PTT(117, IP, IPV6, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3),
+ I40E_PTT(118, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3),
+ I40E_PTT(119, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4),
+ I40E_PTT_UNUSED_ENTRY(120),
+ I40E_PTT(121, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4),
+ I40E_PTT(122, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4),
+ I40E_PTT(123, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4),
+
+ /* IPv6 --> GRE/NAT -> MAC */
+ I40E_PTT(124, IP, IPV6, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3),
+
+ /* IPv6 --> GRE/NAT -> MAC -> IPv4 */
+ I40E_PTT(125, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3),
+ I40E_PTT(126, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3),
+ I40E_PTT(127, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4),
+ I40E_PTT_UNUSED_ENTRY(128),
+ I40E_PTT(129, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4),
+ I40E_PTT(130, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4),
+ I40E_PTT(131, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4),
+
+ /* IPv6 --> GRE/NAT -> MAC -> IPv6 */
+ I40E_PTT(132, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3),
+ I40E_PTT(133, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3),
+ I40E_PTT(134, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4),
+ I40E_PTT_UNUSED_ENTRY(135),
+ I40E_PTT(136, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4),
+ I40E_PTT(137, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4),
+ I40E_PTT(138, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4),
+
+ /* IPv6 --> GRE/NAT -> MAC/VLAN */
+ I40E_PTT(139, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3),
+
+ /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv4 */
+ I40E_PTT(140, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3),
+ I40E_PTT(141, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3),
+ I40E_PTT(142, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4),
+ I40E_PTT_UNUSED_ENTRY(143),
+ I40E_PTT(144, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4),
+ I40E_PTT(145, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4),
+ I40E_PTT(146, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4),
+
+ /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv6 */
+ I40E_PTT(147, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3),
+ I40E_PTT(148, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3),
+ I40E_PTT(149, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4),
+ I40E_PTT_UNUSED_ENTRY(150),
+ I40E_PTT(151, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4),
+ I40E_PTT(152, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4),
+ I40E_PTT(153, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4),
+
+ /* unused entries */
+ I40E_PTT_UNUSED_ENTRY(154),
+ I40E_PTT_UNUSED_ENTRY(155),
+ I40E_PTT_UNUSED_ENTRY(156),
+ I40E_PTT_UNUSED_ENTRY(157),
+ I40E_PTT_UNUSED_ENTRY(158),
+ I40E_PTT_UNUSED_ENTRY(159),
+
+ I40E_PTT_UNUSED_ENTRY(160),
+ I40E_PTT_UNUSED_ENTRY(161),
+ I40E_PTT_UNUSED_ENTRY(162),
+ I40E_PTT_UNUSED_ENTRY(163),
+ I40E_PTT_UNUSED_ENTRY(164),
+ I40E_PTT_UNUSED_ENTRY(165),
+ I40E_PTT_UNUSED_ENTRY(166),
+ I40E_PTT_UNUSED_ENTRY(167),
+ I40E_PTT_UNUSED_ENTRY(168),
+ I40E_PTT_UNUSED_ENTRY(169),
+
+ I40E_PTT_UNUSED_ENTRY(170),
+ I40E_PTT_UNUSED_ENTRY(171),
+ I40E_PTT_UNUSED_ENTRY(172),
+ I40E_PTT_UNUSED_ENTRY(173),
+ I40E_PTT_UNUSED_ENTRY(174),
+ I40E_PTT_UNUSED_ENTRY(175),
+ I40E_PTT_UNUSED_ENTRY(176),
+ I40E_PTT_UNUSED_ENTRY(177),
+ I40E_PTT_UNUSED_ENTRY(178),
+ I40E_PTT_UNUSED_ENTRY(179),
+
+ I40E_PTT_UNUSED_ENTRY(180),
+ I40E_PTT_UNUSED_ENTRY(181),
+ I40E_PTT_UNUSED_ENTRY(182),
+ I40E_PTT_UNUSED_ENTRY(183),
+ I40E_PTT_UNUSED_ENTRY(184),
+ I40E_PTT_UNUSED_ENTRY(185),
+ I40E_PTT_UNUSED_ENTRY(186),
+ I40E_PTT_UNUSED_ENTRY(187),
+ I40E_PTT_UNUSED_ENTRY(188),
+ I40E_PTT_UNUSED_ENTRY(189),
+
+ I40E_PTT_UNUSED_ENTRY(190),
+ I40E_PTT_UNUSED_ENTRY(191),
+ I40E_PTT_UNUSED_ENTRY(192),
+ I40E_PTT_UNUSED_ENTRY(193),
+ I40E_PTT_UNUSED_ENTRY(194),
+ I40E_PTT_UNUSED_ENTRY(195),
+ I40E_PTT_UNUSED_ENTRY(196),
+ I40E_PTT_UNUSED_ENTRY(197),
+ I40E_PTT_UNUSED_ENTRY(198),
+ I40E_PTT_UNUSED_ENTRY(199),
+
+ I40E_PTT_UNUSED_ENTRY(200),
+ I40E_PTT_UNUSED_ENTRY(201),
+ I40E_PTT_UNUSED_ENTRY(202),
+ I40E_PTT_UNUSED_ENTRY(203),
+ I40E_PTT_UNUSED_ENTRY(204),
+ I40E_PTT_UNUSED_ENTRY(205),
+ I40E_PTT_UNUSED_ENTRY(206),
+ I40E_PTT_UNUSED_ENTRY(207),
+ I40E_PTT_UNUSED_ENTRY(208),
+ I40E_PTT_UNUSED_ENTRY(209),
+
+ I40E_PTT_UNUSED_ENTRY(210),
+ I40E_PTT_UNUSED_ENTRY(211),
+ I40E_PTT_UNUSED_ENTRY(212),
+ I40E_PTT_UNUSED_ENTRY(213),
+ I40E_PTT_UNUSED_ENTRY(214),
+ I40E_PTT_UNUSED_ENTRY(215),
+ I40E_PTT_UNUSED_ENTRY(216),
+ I40E_PTT_UNUSED_ENTRY(217),
+ I40E_PTT_UNUSED_ENTRY(218),
+ I40E_PTT_UNUSED_ENTRY(219),
+
+ I40E_PTT_UNUSED_ENTRY(220),
+ I40E_PTT_UNUSED_ENTRY(221),
+ I40E_PTT_UNUSED_ENTRY(222),
+ I40E_PTT_UNUSED_ENTRY(223),
+ I40E_PTT_UNUSED_ENTRY(224),
+ I40E_PTT_UNUSED_ENTRY(225),
+ I40E_PTT_UNUSED_ENTRY(226),
+ I40E_PTT_UNUSED_ENTRY(227),
+ I40E_PTT_UNUSED_ENTRY(228),
+ I40E_PTT_UNUSED_ENTRY(229),
+
+ I40E_PTT_UNUSED_ENTRY(230),
+ I40E_PTT_UNUSED_ENTRY(231),
+ I40E_PTT_UNUSED_ENTRY(232),
+ I40E_PTT_UNUSED_ENTRY(233),
+ I40E_PTT_UNUSED_ENTRY(234),
+ I40E_PTT_UNUSED_ENTRY(235),
+ I40E_PTT_UNUSED_ENTRY(236),
+ I40E_PTT_UNUSED_ENTRY(237),
+ I40E_PTT_UNUSED_ENTRY(238),
+ I40E_PTT_UNUSED_ENTRY(239),
+
+ I40E_PTT_UNUSED_ENTRY(240),
+ I40E_PTT_UNUSED_ENTRY(241),
+ I40E_PTT_UNUSED_ENTRY(242),
+ I40E_PTT_UNUSED_ENTRY(243),
+ I40E_PTT_UNUSED_ENTRY(244),
+ I40E_PTT_UNUSED_ENTRY(245),
+ I40E_PTT_UNUSED_ENTRY(246),
+ I40E_PTT_UNUSED_ENTRY(247),
+ I40E_PTT_UNUSED_ENTRY(248),
+ I40E_PTT_UNUSED_ENTRY(249),
+
+ I40E_PTT_UNUSED_ENTRY(250),
+ I40E_PTT_UNUSED_ENTRY(251),
+ I40E_PTT_UNUSED_ENTRY(252),
+ I40E_PTT_UNUSED_ENTRY(253),
+ I40E_PTT_UNUSED_ENTRY(254),
+ I40E_PTT_UNUSED_ENTRY(255)
+};
+
+
+/**
+ * i40e_init_shared_code - Initialize the shared code
+ * @hw: pointer to hardware structure
+ *
+ * This assigns the MAC type and PHY code and inits the NVM.
+ * Does not touch the hardware. This function must be called prior to any
+ * other function in the shared code. The i40e_hw structure should be
+ * memset to 0 prior to calling this function. The following fields in
+ * hw structure should be filled in prior to calling this function:
+ * hw_addr, back, device_id, vendor_id, subsystem_device_id,
+ * subsystem_vendor_id, and revision_id
+ **/
+enum i40e_status_code i40e_init_shared_code(struct i40e_hw *hw)
+{
+ enum i40e_status_code status = I40E_SUCCESS;
+ u32 reg;
+
+ DEBUGFUNC("i40e_init_shared_code");
+
+ i40e_set_mac_type(hw);
+
+ switch (hw->mac.type) {
+ case I40E_MAC_XL710:
+ break;
+ default:
+ return I40E_ERR_DEVICE_NOT_SUPPORTED;
+ }
+
+ hw->phy.get_link_info = TRUE;
+
+ /* Determine port number */
+ reg = rd32(hw, I40E_PFGEN_PORTNUM);
+ reg = ((reg & I40E_PFGEN_PORTNUM_PORT_NUM_MASK) >>
+ I40E_PFGEN_PORTNUM_PORT_NUM_SHIFT);
+ hw->port = (u8)reg;
+
+ /* Determine the PF number based on the PCI fn */
+ reg = rd32(hw, I40E_GLPCI_CAPSUP);
+ if (reg & I40E_GLPCI_CAPSUP_ARI_EN_MASK)
+ hw->pf_id = (u8)((hw->bus.device << 3) | hw->bus.func);
+ else
+ hw->pf_id = (u8)hw->bus.func;
+
+ status = i40e_init_nvm(hw);
+ return status;
+}
+
+/**
+ * i40e_aq_mac_address_read - Retrieve the MAC addresses
+ * @hw: pointer to the hw struct
+ * @flags: a return indicator of what addresses were added to the addr store
+ * @addrs: the requestor's mac addr store
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+static enum i40e_status_code i40e_aq_mac_address_read(struct i40e_hw *hw,
+ u16 *flags,
+ struct i40e_aqc_mac_address_read_data *addrs,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_mac_address_read *cmd_data =
+ (struct i40e_aqc_mac_address_read *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_mac_address_read);
+ desc.flags |= CPU_TO_LE16(I40E_AQ_FLAG_BUF);
+
+ status = i40e_asq_send_command(hw, &desc, addrs,
+ sizeof(*addrs), cmd_details);
+ *flags = LE16_TO_CPU(cmd_data->command_flags);
+
+ return status;
+}
+
+/**
+ * i40e_aq_mac_address_write - Change the MAC addresses
+ * @hw: pointer to the hw struct
+ * @flags: indicates which MAC to be written
+ * @mac_addr: address to write
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+enum i40e_status_code i40e_aq_mac_address_write(struct i40e_hw *hw,
+ u16 flags, u8 *mac_addr,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_mac_address_write *cmd_data =
+ (struct i40e_aqc_mac_address_write *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_mac_address_write);
+ cmd_data->command_flags = CPU_TO_LE16(flags);
+ cmd_data->mac_sah = CPU_TO_LE16((u16)mac_addr[0] << 8 | mac_addr[1]);
+ cmd_data->mac_sal = CPU_TO_LE32(((u32)mac_addr[2] << 24) |
+ ((u32)mac_addr[3] << 16) |
+ ((u32)mac_addr[4] << 8) |
+ mac_addr[5]);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_get_mac_addr - get MAC address
+ * @hw: pointer to the HW structure
+ * @mac_addr: pointer to MAC address
+ *
+ * Reads the adapter's MAC address from register
+ **/
+enum i40e_status_code i40e_get_mac_addr(struct i40e_hw *hw, u8 *mac_addr)
+{
+ struct i40e_aqc_mac_address_read_data addrs;
+ enum i40e_status_code status;
+ u16 flags = 0;
+
+ status = i40e_aq_mac_address_read(hw, &flags, &addrs, NULL);
+
+ if (flags & I40E_AQC_LAN_ADDR_VALID)
+ memcpy(mac_addr, &addrs.pf_lan_mac, sizeof(addrs.pf_lan_mac));
+
+ return status;
+}
+
+/**
+ * i40e_get_port_mac_addr - get Port MAC address
+ * @hw: pointer to the HW structure
+ * @mac_addr: pointer to Port MAC address
+ *
+ * Reads the adapter's Port MAC address
+ **/
+enum i40e_status_code i40e_get_port_mac_addr(struct i40e_hw *hw, u8 *mac_addr)
+{
+ struct i40e_aqc_mac_address_read_data addrs;
+ enum i40e_status_code status;
+ u16 flags = 0;
+
+ status = i40e_aq_mac_address_read(hw, &flags, &addrs, NULL);
+ if (status)
+ return status;
+
+ if (flags & I40E_AQC_PORT_ADDR_VALID)
+ memcpy(mac_addr, &addrs.port_mac, sizeof(addrs.port_mac));
+ else
+ status = I40E_ERR_INVALID_MAC_ADDR;
+
+ return status;
+}
+
+/**
+ * i40e_pre_tx_queue_cfg - pre tx queue configure
+ * @hw: pointer to the HW structure
+ * @queue: target pf queue index
+ * @enable: state change request
+ *
+ * Handles hw requirement to indicate intention to enable
+ * or disable target queue.
+ **/
+void i40e_pre_tx_queue_cfg(struct i40e_hw *hw, u32 queue, bool enable)
+{
+ u32 abs_queue_idx = hw->func_caps.base_queue + queue;
+ u32 reg_block = 0;
+ u32 reg_val;
+
+ if (abs_queue_idx >= 128) {
+ reg_block = abs_queue_idx / 128;
+ abs_queue_idx %= 128;
+ }
+
+ reg_val = rd32(hw, I40E_GLLAN_TXPRE_QDIS(reg_block));
+ reg_val &= ~I40E_GLLAN_TXPRE_QDIS_QINDX_MASK;
+ reg_val |= (abs_queue_idx << I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT);
+
+ if (enable)
+ reg_val |= I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_MASK;
+ else
+ reg_val |= I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK;
+
+ wr32(hw, I40E_GLLAN_TXPRE_QDIS(reg_block), reg_val);
+}
+
+/**
+ * i40e_validate_mac_addr - Validate unicast MAC address
+ * @mac_addr: pointer to MAC address
+ *
+ * Tests a MAC address to ensure it is a valid Individual Address
+ **/
+enum i40e_status_code i40e_validate_mac_addr(u8 *mac_addr)
+{
+ enum i40e_status_code status = I40E_SUCCESS;
+
+ DEBUGFUNC("i40e_validate_mac_addr");
+
+ /* Broadcast addresses ARE multicast addresses
+ * Make sure it is not a multicast address
+ * Reject the zero address
+ */
+ if (I40E_IS_MULTICAST(mac_addr) ||
+ (mac_addr[0] == 0 && mac_addr[1] == 0 && mac_addr[2] == 0 &&
+ mac_addr[3] == 0 && mac_addr[4] == 0 && mac_addr[5] == 0))
+ status = I40E_ERR_INVALID_MAC_ADDR;
+
+ return status;
+}
+
+/**
+ * i40e_get_media_type - Gets media type
+ * @hw: pointer to the hardware structure
+ **/
+static enum i40e_media_type i40e_get_media_type(struct i40e_hw *hw)
+{
+ enum i40e_media_type media;
+
+ switch (hw->phy.link_info.phy_type) {
+ case I40E_PHY_TYPE_10GBASE_SR:
+ case I40E_PHY_TYPE_10GBASE_LR:
+ case I40E_PHY_TYPE_1000BASE_SX:
+ case I40E_PHY_TYPE_1000BASE_LX:
+ case I40E_PHY_TYPE_40GBASE_SR4:
+ case I40E_PHY_TYPE_40GBASE_LR4:
+ media = I40E_MEDIA_TYPE_FIBER;
+ break;
+ case I40E_PHY_TYPE_100BASE_TX:
+ case I40E_PHY_TYPE_1000BASE_T:
+ case I40E_PHY_TYPE_10GBASE_T:
+ media = I40E_MEDIA_TYPE_BASET;
+ break;
+ case I40E_PHY_TYPE_10GBASE_CR1_CU:
+ case I40E_PHY_TYPE_40GBASE_CR4_CU:
+ case I40E_PHY_TYPE_10GBASE_CR1:
+ case I40E_PHY_TYPE_40GBASE_CR4:
+ case I40E_PHY_TYPE_10GBASE_SFPP_CU:
+ media = I40E_MEDIA_TYPE_DA;
+ break;
+ case I40E_PHY_TYPE_1000BASE_KX:
+ case I40E_PHY_TYPE_10GBASE_KX4:
+ case I40E_PHY_TYPE_10GBASE_KR:
+ case I40E_PHY_TYPE_40GBASE_KR4:
+ media = I40E_MEDIA_TYPE_BACKPLANE;
+ break;
+ case I40E_PHY_TYPE_SGMII:
+ case I40E_PHY_TYPE_XAUI:
+ case I40E_PHY_TYPE_XFI:
+ case I40E_PHY_TYPE_XLAUI:
+ case I40E_PHY_TYPE_XLPPI:
+ default:
+ media = I40E_MEDIA_TYPE_UNKNOWN;
+ break;
+ }
+
+ return media;
+}
+
+#define I40E_PF_RESET_WAIT_COUNT 100
+/**
+ * i40e_pf_reset - Reset the PF
+ * @hw: pointer to the hardware structure
+ *
+ * Assuming someone else has triggered a global reset,
+ * assure the global reset is complete and then reset the PF
+ **/
+enum i40e_status_code i40e_pf_reset(struct i40e_hw *hw)
+{
+ u32 cnt = 0;
+ u32 cnt1 = 0;
+ u32 reg = 0;
+ u32 grst_del;
+
+ /* Poll for Global Reset steady state in case of recent GRST.
+ * The grst delay value is in 100ms units, and we'll wait a
+ * couple counts longer to be sure we don't just miss the end.
+ */
+ grst_del = rd32(hw, I40E_GLGEN_RSTCTL) & I40E_GLGEN_RSTCTL_GRSTDEL_MASK
+ >> I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT;
+ for (cnt = 0; cnt < grst_del + 2; cnt++) {
+ reg = rd32(hw, I40E_GLGEN_RSTAT);
+ if (!(reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK))
+ break;
+ i40e_msec_delay(100);
+ }
+ if (reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK) {
+ DEBUGOUT("Global reset polling failed to complete.\n");
+ return I40E_ERR_RESET_FAILED;
+ }
+
+ /* Now Wait for the FW to be ready */
+ for (cnt1 = 0; cnt1 < I40E_PF_RESET_WAIT_COUNT; cnt1++) {
+ reg = rd32(hw, I40E_GLNVM_ULD);
+ reg &= (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK |
+ I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK);
+ if (reg == (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK |
+ I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK)) {
+ DEBUGOUT1("Core and Global modules ready %d\n", cnt1);
+ break;
+ }
+ i40e_msec_delay(10);
+ }
+ if (!(reg & (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK |
+ I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK))) {
+ DEBUGOUT("wait for FW Reset complete timedout\n");
+ DEBUGOUT1("I40E_GLNVM_ULD = 0x%x\n", reg);
+ return I40E_ERR_RESET_FAILED;
+ }
+
+ /* If there was a Global Reset in progress when we got here,
+ * we don't need to do the PF Reset
+ */
+ if (!cnt) {
+ reg = rd32(hw, I40E_PFGEN_CTRL);
+ wr32(hw, I40E_PFGEN_CTRL,
+ (reg | I40E_PFGEN_CTRL_PFSWR_MASK));
+ for (cnt = 0; cnt < I40E_PF_RESET_WAIT_COUNT; cnt++) {
+ reg = rd32(hw, I40E_PFGEN_CTRL);
+ if (!(reg & I40E_PFGEN_CTRL_PFSWR_MASK))
+ break;
+ i40e_msec_delay(1);
+ }
+ if (reg & I40E_PFGEN_CTRL_PFSWR_MASK) {
+ DEBUGOUT("PF reset polling failed to complete.\n");
+ return I40E_ERR_RESET_FAILED;
+ }
+ }
+
+ i40e_clear_pxe_mode(hw);
+
+
+ return I40E_SUCCESS;
+}
+
+/**
+ * i40e_clear_hw - clear out any left over hw state
+ * @hw: pointer to the hw struct
+ *
+ * Clear queues and interrupts, typically called at init time,
+ * but after the capabilities have been found so we know how many
+ * queues and msix vectors have been allocated.
+ **/
+void i40e_clear_hw(struct i40e_hw *hw)
+{
+ u32 num_queues, base_queue;
+ u32 num_pf_int;
+ u32 num_vf_int;
+ u32 num_vfs;
+ u32 i, j;
+ u32 val;
+ u32 eol = 0x7ff;
+
+ /* get number of interrupts, queues, and vfs */
+ val = rd32(hw, I40E_GLPCI_CNF2);
+ num_pf_int = (val & I40E_GLPCI_CNF2_MSI_X_PF_N_MASK) >>
+ I40E_GLPCI_CNF2_MSI_X_PF_N_SHIFT;
+ num_vf_int = (val & I40E_GLPCI_CNF2_MSI_X_VF_N_MASK) >>
+ I40E_GLPCI_CNF2_MSI_X_VF_N_SHIFT;
+
+ val = rd32(hw, I40E_PFLAN_QALLOC);
+ base_queue = (val & I40E_PFLAN_QALLOC_FIRSTQ_MASK) >>
+ I40E_PFLAN_QALLOC_FIRSTQ_SHIFT;
+ j = (val & I40E_PFLAN_QALLOC_LASTQ_MASK) >>
+ I40E_PFLAN_QALLOC_LASTQ_SHIFT;
+ if (val & I40E_PFLAN_QALLOC_VALID_MASK)
+ num_queues = (j - base_queue) + 1;
+ else
+ num_queues = 0;
+
+ val = rd32(hw, I40E_PF_VT_PFALLOC);
+ i = (val & I40E_PF_VT_PFALLOC_FIRSTVF_MASK) >>
+ I40E_PF_VT_PFALLOC_FIRSTVF_SHIFT;
+ j = (val & I40E_PF_VT_PFALLOC_LASTVF_MASK) >>
+ I40E_PF_VT_PFALLOC_LASTVF_SHIFT;
+ if (val & I40E_PF_VT_PFALLOC_VALID_MASK)
+ num_vfs = (j - i) + 1;
+ else
+ num_vfs = 0;
+
+ /* stop all the interrupts */
+ wr32(hw, I40E_PFINT_ICR0_ENA, 0);
+ val = 0x3 << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT;
+ for (i = 0; i < num_pf_int - 2; i++)
+ wr32(hw, I40E_PFINT_DYN_CTLN(i), val);
+
+ /* Set the FIRSTQ_INDX field to 0x7FF in PFINT_LNKLSTx */
+ val = eol << I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT;
+ wr32(hw, I40E_PFINT_LNKLST0, val);
+ for (i = 0; i < num_pf_int - 2; i++)
+ wr32(hw, I40E_PFINT_LNKLSTN(i), val);
+ val = eol << I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT;
+ for (i = 0; i < num_vfs; i++)
+ wr32(hw, I40E_VPINT_LNKLST0(i), val);
+ for (i = 0; i < num_vf_int - 2; i++)
+ wr32(hw, I40E_VPINT_LNKLSTN(i), val);
+
+ /* warn the HW of the coming Tx disables */
+ for (i = 0; i < num_queues; i++) {
+ u32 abs_queue_idx = base_queue + i;
+ u32 reg_block = 0;
+
+ if (abs_queue_idx >= 128) {
+ reg_block = abs_queue_idx / 128;
+ abs_queue_idx %= 128;
+ }
+
+ val = rd32(hw, I40E_GLLAN_TXPRE_QDIS(reg_block));
+ val &= ~I40E_GLLAN_TXPRE_QDIS_QINDX_MASK;
+ val |= (abs_queue_idx << I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT);
+ val |= I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK;
+
+ wr32(hw, I40E_GLLAN_TXPRE_QDIS(reg_block), val);
+ }
+ i40e_usec_delay(400);
+
+ /* stop all the queues */
+ for (i = 0; i < num_queues; i++) {
+ wr32(hw, I40E_QINT_TQCTL(i), 0);
+ wr32(hw, I40E_QTX_ENA(i), 0);
+ wr32(hw, I40E_QINT_RQCTL(i), 0);
+ wr32(hw, I40E_QRX_ENA(i), 0);
+ }
+
+ /* short wait for all queue disables to settle */
+ i40e_usec_delay(50);
+}
+
+/**
+ * i40e_clear_pxe_mode - clear pxe operations mode
+ * @hw: pointer to the hw struct
+ *
+ * Make sure all PXE mode settings are cleared, including things
+ * like descriptor fetch/write-back mode.
+ **/
+void i40e_clear_pxe_mode(struct i40e_hw *hw)
+{
+ if (i40e_check_asq_alive(hw))
+ i40e_aq_clear_pxe_mode(hw, NULL);
+}
+
+/**
+ * i40e_led_is_mine - helper to find matching led
+ * @hw: pointer to the hw struct
+ * @idx: index into GPIO registers
+ *
+ * returns: 0 if no match, otherwise the value of the GPIO_CTL register
+ */
+static u32 i40e_led_is_mine(struct i40e_hw *hw, int idx)
+{
+ u32 gpio_val = 0;
+ u32 port;
+
+ if (!hw->func_caps.led[idx])
+ return 0;
+
+ gpio_val = rd32(hw, I40E_GLGEN_GPIO_CTL(idx));
+ port = (gpio_val & I40E_GLGEN_GPIO_CTL_PRT_NUM_MASK) >>
+ I40E_GLGEN_GPIO_CTL_PRT_NUM_SHIFT;
+
+ /* if PRT_NUM_NA is 1 then this LED is not port specific, OR
+ * if it is not our port then ignore
+ */
+ if ((gpio_val & I40E_GLGEN_GPIO_CTL_PRT_NUM_NA_MASK) ||
+ (port != hw->port))
+ return 0;
+
+ return gpio_val;
+}
+
+#define I40E_LED0 22
+#define I40E_LINK_ACTIVITY 0xC
+
+/**
+ * i40e_led_get - return current on/off mode
+ * @hw: pointer to the hw struct
+ *
+ * The value returned is the 'mode' field as defined in the
+ * GPIO register definitions: 0x0 = off, 0xf = on, and other
+ * values are variations of possible behaviors relating to
+ * blink, link, and wire.
+ **/
+u32 i40e_led_get(struct i40e_hw *hw)
+{
+ u32 mode = 0;
+ int i;
+
+ /* as per the documentation GPIO 22-29 are the LED
+ * GPIO pins named LED0..LED7
+ */
+ for (i = I40E_LED0; i <= I40E_GLGEN_GPIO_CTL_MAX_INDEX; i++) {
+ u32 gpio_val = i40e_led_is_mine(hw, i);
+
+ if (!gpio_val)
+ continue;
+
+ mode = (gpio_val & I40E_GLGEN_GPIO_CTL_LED_MODE_MASK) >>
+ I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT;
+ break;
+ }
+
+ return mode;
+}
+
+/**
+ * i40e_led_set - set new on/off mode
+ * @hw: pointer to the hw struct
+ * @mode: 0=off, 0xf=on (else see manual for mode details)
+ * @blink: TRUE if the LED should blink when on, FALSE if steady
+ *
+ * if this function is used to turn on the blink it should
+ * be used to disable the blink when restoring the original state.
+ **/
+void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink)
+{
+ int i;
+
+ if (mode & 0xfffffff0)
+ DEBUGOUT1("invalid mode passed in %X\n", mode);
+
+ /* as per the documentation GPIO 22-29 are the LED
+ * GPIO pins named LED0..LED7
+ */
+ for (i = I40E_LED0; i <= I40E_GLGEN_GPIO_CTL_MAX_INDEX; i++) {
+ u32 gpio_val = i40e_led_is_mine(hw, i);
+
+ if (!gpio_val)
+ continue;
+
+ gpio_val &= ~I40E_GLGEN_GPIO_CTL_LED_MODE_MASK;
+ /* this & is a bit of paranoia, but serves as a range check */
+ gpio_val |= ((mode << I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT) &
+ I40E_GLGEN_GPIO_CTL_LED_MODE_MASK);
+
+ if (mode == I40E_LINK_ACTIVITY)
+ blink = FALSE;
+
+ gpio_val |= (blink ? 1 : 0) <<
+ I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT;
+
+ wr32(hw, I40E_GLGEN_GPIO_CTL(i), gpio_val);
+ break;
+ }
+}
+
+/* Admin command wrappers */
+
+/**
+ * i40e_aq_get_phy_capabilities
+ * @hw: pointer to the hw struct
+ * @abilities: structure for PHY capabilities to be filled
+ * @qualified_modules: report Qualified Modules
+ * @report_init: report init capabilities (active are default)
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Returns the various PHY abilities supported on the Port.
+ **/
+enum i40e_status_code i40e_aq_get_phy_capabilities(struct i40e_hw *hw,
+ bool qualified_modules, bool report_init,
+ struct i40e_aq_get_phy_abilities_resp *abilities,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ enum i40e_status_code status;
+ u16 abilities_size = sizeof(struct i40e_aq_get_phy_abilities_resp);
+
+ if (!abilities)
+ return I40E_ERR_PARAM;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_get_phy_abilities);
+
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF);
+ if (abilities_size > I40E_AQ_LARGE_BUF)
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
+
+ if (qualified_modules)
+ desc.params.external.param0 |=
+ CPU_TO_LE32(I40E_AQ_PHY_REPORT_QUALIFIED_MODULES);
+
+ if (report_init)
+ desc.params.external.param0 |=
+ CPU_TO_LE32(I40E_AQ_PHY_REPORT_INITIAL_VALUES);
+
+ status = i40e_asq_send_command(hw, &desc, abilities, abilities_size,
+ cmd_details);
+
+ if (hw->aq.asq_last_status == I40E_AQ_RC_EIO)
+ status = I40E_ERR_UNKNOWN_PHY;
+
+ return status;
+}
+
+/**
+ * i40e_aq_set_phy_config
+ * @hw: pointer to the hw struct
+ * @config: structure with PHY configuration to be set
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Set the various PHY configuration parameters
+ * supported on the Port.One or more of the Set PHY config parameters may be
+ * ignored in an MFP mode as the PF may not have the privilege to set some
+ * of the PHY Config parameters. This status will be indicated by the
+ * command response.
+ **/
+enum i40e_status_code i40e_aq_set_phy_config(struct i40e_hw *hw,
+ struct i40e_aq_set_phy_config *config,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aq_set_phy_config *cmd =
+ (struct i40e_aq_set_phy_config *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ if (!config)
+ return I40E_ERR_PARAM;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_set_phy_config);
+
+ *cmd = *config;
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_set_fc
+ * @hw: pointer to the hw struct
+ *
+ * Set the requested flow control mode using set_phy_config.
+ **/
+enum i40e_status_code i40e_set_fc(struct i40e_hw *hw, u8 *aq_failures,
+ bool atomic_restart)
+{
+ enum i40e_fc_mode fc_mode = hw->fc.requested_mode;
+ struct i40e_aq_get_phy_abilities_resp abilities;
+ struct i40e_aq_set_phy_config config;
+ enum i40e_status_code status;
+ u8 pause_mask = 0x0;
+
+ *aq_failures = 0x0;
+
+ switch (fc_mode) {
+ case I40E_FC_FULL:
+ pause_mask |= I40E_AQ_PHY_FLAG_PAUSE_TX;
+ pause_mask |= I40E_AQ_PHY_FLAG_PAUSE_RX;
+ break;
+ case I40E_FC_RX_PAUSE:
+ pause_mask |= I40E_AQ_PHY_FLAG_PAUSE_RX;
+ break;
+ case I40E_FC_TX_PAUSE:
+ pause_mask |= I40E_AQ_PHY_FLAG_PAUSE_TX;
+ break;
+ default:
+ break;
+ }
+
+ /* Get the current phy config */
+ status = i40e_aq_get_phy_capabilities(hw, FALSE, false, &abilities,
+ NULL);
+ if (status) {
+ *aq_failures |= I40E_SET_FC_AQ_FAIL_GET;
+ return status;
+ }
+
+ memset(&config, 0, sizeof(struct i40e_aq_set_phy_config));
+ /* clear the old pause settings */
+ config.abilities = abilities.abilities & ~(I40E_AQ_PHY_FLAG_PAUSE_TX) &
+ ~(I40E_AQ_PHY_FLAG_PAUSE_RX);
+ /* set the new abilities */
+ config.abilities |= pause_mask;
+ /* If the abilities have changed, then set the new config */
+ if (config.abilities != abilities.abilities) {
+ /* Auto restart link so settings take effect */
+ if (atomic_restart)
+ config.abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
+ /* Copy over all the old settings */
+ config.phy_type = abilities.phy_type;
+ config.link_speed = abilities.link_speed;
+ config.eee_capability = abilities.eee_capability;
+ config.eeer = abilities.eeer_val;
+ config.low_power_ctrl = abilities.d3_lpan;
+ status = i40e_aq_set_phy_config(hw, &config, NULL);
+
+ if (status)
+ *aq_failures |= I40E_SET_FC_AQ_FAIL_SET;
+ }
+ /* Update the link info */
+ status = i40e_update_link_info(hw, TRUE);
+ if (status) {
+ /* Wait a little bit (on 40G cards it sometimes takes a really
+ * long time for link to come back from the atomic reset)
+ * and try once more
+ */
+ i40e_msec_delay(1000);
+ status = i40e_update_link_info(hw, TRUE);
+ }
+ if (status)
+ *aq_failures |= I40E_SET_FC_AQ_FAIL_UPDATE;
+
+ return status;
+}
+
+/**
+ * i40e_aq_set_mac_config
+ * @hw: pointer to the hw struct
+ * @max_frame_size: Maximum Frame Size to be supported by the port
+ * @crc_en: Tell HW to append a CRC to outgoing frames
+ * @pacing: Pacing configurations
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Configure MAC settings for frame size, jumbo frame support and the
+ * addition of a CRC by the hardware.
+ **/
+enum i40e_status_code i40e_aq_set_mac_config(struct i40e_hw *hw,
+ u16 max_frame_size,
+ bool crc_en, u16 pacing,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aq_set_mac_config *cmd =
+ (struct i40e_aq_set_mac_config *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ if (max_frame_size == 0)
+ return I40E_ERR_PARAM;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_set_mac_config);
+
+ cmd->max_frame_size = CPU_TO_LE16(max_frame_size);
+ cmd->params = ((u8)pacing & 0x0F) << 3;
+ if (crc_en)
+ cmd->params |= I40E_AQ_SET_MAC_CONFIG_CRC_EN;
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_clear_pxe_mode
+ * @hw: pointer to the hw struct
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Tell the firmware that the driver is taking over from PXE
+ **/
+enum i40e_status_code i40e_aq_clear_pxe_mode(struct i40e_hw *hw,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ enum i40e_status_code status;
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_clear_pxe *cmd =
+ (struct i40e_aqc_clear_pxe *)&desc.params.raw;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_clear_pxe_mode);
+
+ cmd->rx_cnt = 0x2;
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ wr32(hw, I40E_GLLAN_RCTL_0, 0x1);
+
+ return status;
+}
+
+/**
+ * i40e_aq_set_link_restart_an
+ * @hw: pointer to the hw struct
+ * @enable_link: if TRUE: enable link, if FALSE: disable link
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Sets up the link and restarts the Auto-Negotiation over the link.
+ **/
+enum i40e_status_code i40e_aq_set_link_restart_an(struct i40e_hw *hw,
+ bool enable_link, struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_set_link_restart_an *cmd =
+ (struct i40e_aqc_set_link_restart_an *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_set_link_restart_an);
+
+ cmd->command = I40E_AQ_PHY_RESTART_AN;
+ if (enable_link)
+ cmd->command |= I40E_AQ_PHY_LINK_ENABLE;
+ else
+ cmd->command &= ~I40E_AQ_PHY_LINK_ENABLE;
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_get_link_info
+ * @hw: pointer to the hw struct
+ * @enable_lse: enable/disable LinkStatusEvent reporting
+ * @link: pointer to link status structure - optional
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Returns the link status of the adapter.
+ **/
+enum i40e_status_code i40e_aq_get_link_info(struct i40e_hw *hw,
+ bool enable_lse, struct i40e_link_status *link,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_get_link_status *resp =
+ (struct i40e_aqc_get_link_status *)&desc.params.raw;
+ struct i40e_link_status *hw_link_info = &hw->phy.link_info;
+ enum i40e_status_code status;
+ bool tx_pause, rx_pause;
+ u16 command_flags;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_link_status);
+
+ if (enable_lse)
+ command_flags = I40E_AQ_LSE_ENABLE;
+ else
+ command_flags = I40E_AQ_LSE_DISABLE;
+ resp->command_flags = CPU_TO_LE16(command_flags);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ if (status != I40E_SUCCESS)
+ goto aq_get_link_info_exit;
+
+ /* save off old link status information */
+ i40e_memcpy(&hw->phy.link_info_old, hw_link_info,
+ sizeof(struct i40e_link_status), I40E_NONDMA_TO_NONDMA);
+
+ /* update link status */
+ hw_link_info->phy_type = (enum i40e_aq_phy_type)resp->phy_type;
+ hw->phy.media_type = i40e_get_media_type(hw);
+ hw_link_info->link_speed = (enum i40e_aq_link_speed)resp->link_speed;
+ hw_link_info->link_info = resp->link_info;
+ hw_link_info->an_info = resp->an_info;
+ hw_link_info->ext_info = resp->ext_info;
+ hw_link_info->loopback = resp->loopback;
+ hw_link_info->max_frame_size = LE16_TO_CPU(resp->max_frame_size);
+ hw_link_info->pacing = resp->config & I40E_AQ_CONFIG_PACING_MASK;
+
+ /* update fc info */
+ tx_pause = !!(resp->an_info & I40E_AQ_LINK_PAUSE_TX);
+ rx_pause = !!(resp->an_info & I40E_AQ_LINK_PAUSE_RX);
+ if (tx_pause & rx_pause)
+ hw->fc.current_mode = I40E_FC_FULL;
+ else if (tx_pause)
+ hw->fc.current_mode = I40E_FC_TX_PAUSE;
+ else if (rx_pause)
+ hw->fc.current_mode = I40E_FC_RX_PAUSE;
+ else
+ hw->fc.current_mode = I40E_FC_NONE;
+
+ if (resp->config & I40E_AQ_CONFIG_CRC_ENA)
+ hw_link_info->crc_enable = TRUE;
+ else
+ hw_link_info->crc_enable = FALSE;
+
+ if (resp->command_flags & CPU_TO_LE16(I40E_AQ_LSE_ENABLE))
+ hw_link_info->lse_enable = TRUE;
+ else
+ hw_link_info->lse_enable = FALSE;
+
+ /* save link status information */
+ if (link)
+ i40e_memcpy(link, hw_link_info, sizeof(struct i40e_link_status),
+ I40E_NONDMA_TO_NONDMA);
+
+ /* flag cleared so helper functions don't call AQ again */
+ hw->phy.get_link_info = FALSE;
+
+aq_get_link_info_exit:
+ return status;
+}
+
+/**
+ * i40e_update_link_info
+ * @hw: pointer to the hw struct
+ * @enable_lse: enable/disable LinkStatusEvent reporting
+ *
+ * Returns the link status of the adapter
+ **/
+enum i40e_status_code i40e_update_link_info(struct i40e_hw *hw,
+ bool enable_lse)
+{
+ struct i40e_aq_get_phy_abilities_resp abilities;
+ enum i40e_status_code status;
+
+ status = i40e_aq_get_link_info(hw, enable_lse, NULL, NULL);
+ if (status)
+ return status;
+
+ status = i40e_aq_get_phy_capabilities(hw, FALSE, false,
+ &abilities, NULL);
+ if (status)
+ return status;
+
+ if (abilities.abilities & I40E_AQ_PHY_AN_ENABLED)
+ hw->phy.link_info.an_enabled = TRUE;
+ else
+ hw->phy.link_info.an_enabled = FALSE;
+
+ return status;
+}
+
+/**
+ * i40e_aq_set_phy_int_mask
+ * @hw: pointer to the hw struct
+ * @mask: interrupt mask to be set
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Set link interrupt mask.
+ **/
+enum i40e_status_code i40e_aq_set_phy_int_mask(struct i40e_hw *hw,
+ u16 mask,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_set_phy_int_mask *cmd =
+ (struct i40e_aqc_set_phy_int_mask *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_set_phy_int_mask);
+
+ cmd->event_mask = CPU_TO_LE16(mask);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_get_local_advt_reg
+ * @hw: pointer to the hw struct
+ * @advt_reg: local AN advertisement register value
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Get the Local AN advertisement register value.
+ **/
+enum i40e_status_code i40e_aq_get_local_advt_reg(struct i40e_hw *hw,
+ u64 *advt_reg,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_an_advt_reg *resp =
+ (struct i40e_aqc_an_advt_reg *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_get_local_advt_reg);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ if (status != I40E_SUCCESS)
+ goto aq_get_local_advt_reg_exit;
+
+ *advt_reg = (u64)(LE16_TO_CPU(resp->local_an_reg1)) << 32;
+ *advt_reg |= LE32_TO_CPU(resp->local_an_reg0);
+
+aq_get_local_advt_reg_exit:
+ return status;
+}
+
+/**
+ * i40e_aq_set_local_advt_reg
+ * @hw: pointer to the hw struct
+ * @advt_reg: local AN advertisement register value
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Get the Local AN advertisement register value.
+ **/
+enum i40e_status_code i40e_aq_set_local_advt_reg(struct i40e_hw *hw,
+ u64 advt_reg,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_an_advt_reg *cmd =
+ (struct i40e_aqc_an_advt_reg *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_get_local_advt_reg);
+
+ cmd->local_an_reg0 = CPU_TO_LE32(I40E_LO_DWORD(advt_reg));
+ cmd->local_an_reg1 = CPU_TO_LE16(I40E_HI_DWORD(advt_reg));
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_get_partner_advt
+ * @hw: pointer to the hw struct
+ * @advt_reg: AN partner advertisement register value
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Get the link partner AN advertisement register value.
+ **/
+enum i40e_status_code i40e_aq_get_partner_advt(struct i40e_hw *hw,
+ u64 *advt_reg,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_an_advt_reg *resp =
+ (struct i40e_aqc_an_advt_reg *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_get_partner_advt);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ if (status != I40E_SUCCESS)
+ goto aq_get_partner_advt_exit;
+
+ *advt_reg = (u64)(LE16_TO_CPU(resp->local_an_reg1)) << 32;
+ *advt_reg |= LE32_TO_CPU(resp->local_an_reg0);
+
+aq_get_partner_advt_exit:
+ return status;
+}
+
+/**
+ * i40e_aq_set_lb_modes
+ * @hw: pointer to the hw struct
+ * @lb_modes: loopback mode to be set
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Sets loopback modes.
+ **/
+enum i40e_status_code i40e_aq_set_lb_modes(struct i40e_hw *hw,
+ u16 lb_modes,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_set_lb_mode *cmd =
+ (struct i40e_aqc_set_lb_mode *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_set_lb_modes);
+
+ cmd->lb_mode = CPU_TO_LE16(lb_modes);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_set_phy_debug
+ * @hw: pointer to the hw struct
+ * @cmd_flags: debug command flags
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Reset the external PHY.
+ **/
+enum i40e_status_code i40e_aq_set_phy_debug(struct i40e_hw *hw, u8 cmd_flags,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_set_phy_debug *cmd =
+ (struct i40e_aqc_set_phy_debug *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_set_phy_debug);
+
+ cmd->command_flags = cmd_flags;
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_add_vsi
+ * @hw: pointer to the hw struct
+ * @vsi_ctx: pointer to a vsi context struct
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Add a VSI context to the hardware.
+**/
+enum i40e_status_code i40e_aq_add_vsi(struct i40e_hw *hw,
+ struct i40e_vsi_context *vsi_ctx,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_add_get_update_vsi *cmd =
+ (struct i40e_aqc_add_get_update_vsi *)&desc.params.raw;
+ struct i40e_aqc_add_get_update_vsi_completion *resp =
+ (struct i40e_aqc_add_get_update_vsi_completion *)
+ &desc.params.raw;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_add_vsi);
+
+ cmd->uplink_seid = CPU_TO_LE16(vsi_ctx->uplink_seid);
+ cmd->connection_type = vsi_ctx->connection_type;
+ cmd->vf_id = vsi_ctx->vf_num;
+ cmd->vsi_flags = CPU_TO_LE16(vsi_ctx->flags);
+
+ desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+
+ status = i40e_asq_send_command(hw, &desc, &vsi_ctx->info,
+ sizeof(vsi_ctx->info), cmd_details);
+
+ if (status != I40E_SUCCESS)
+ goto aq_add_vsi_exit;
+
+ vsi_ctx->seid = LE16_TO_CPU(resp->seid);
+ vsi_ctx->vsi_number = LE16_TO_CPU(resp->vsi_number);
+ vsi_ctx->vsis_allocated = LE16_TO_CPU(resp->vsi_used);
+ vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
+
+aq_add_vsi_exit:
+ return status;
+}
+
+/**
+ * i40e_aq_set_default_vsi
+ * @hw: pointer to the hw struct
+ * @seid: vsi number
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+enum i40e_status_code i40e_aq_set_default_vsi(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
+ (struct i40e_aqc_set_vsi_promiscuous_modes *)
+ &desc.params.raw;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_set_vsi_promiscuous_modes);
+
+ cmd->promiscuous_flags = CPU_TO_LE16(I40E_AQC_SET_VSI_DEFAULT);
+ cmd->valid_flags = CPU_TO_LE16(I40E_AQC_SET_VSI_DEFAULT);
+ cmd->seid = CPU_TO_LE16(seid);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_set_vsi_unicast_promiscuous
+ * @hw: pointer to the hw struct
+ * @seid: vsi number
+ * @set: set unicast promiscuous enable/disable
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+enum i40e_status_code i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw *hw,
+ u16 seid, bool set,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
+ (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
+ enum i40e_status_code status;
+ u16 flags = 0;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_set_vsi_promiscuous_modes);
+
+ if (set)
+ flags |= I40E_AQC_SET_VSI_PROMISC_UNICAST;
+
+ cmd->promiscuous_flags = CPU_TO_LE16(flags);
+
+ cmd->valid_flags = CPU_TO_LE16(I40E_AQC_SET_VSI_PROMISC_UNICAST);
+
+ cmd->seid = CPU_TO_LE16(seid);
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_set_vsi_multicast_promiscuous
+ * @hw: pointer to the hw struct
+ * @seid: vsi number
+ * @set: set multicast promiscuous enable/disable
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+enum i40e_status_code i40e_aq_set_vsi_multicast_promiscuous(struct i40e_hw *hw,
+ u16 seid, bool set, struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
+ (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
+ enum i40e_status_code status;
+ u16 flags = 0;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_set_vsi_promiscuous_modes);
+
+ if (set)
+ flags |= I40E_AQC_SET_VSI_PROMISC_MULTICAST;
+
+ cmd->promiscuous_flags = CPU_TO_LE16(flags);
+
+ cmd->valid_flags = CPU_TO_LE16(I40E_AQC_SET_VSI_PROMISC_MULTICAST);
+
+ cmd->seid = CPU_TO_LE16(seid);
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_set_vsi_broadcast
+ * @hw: pointer to the hw struct
+ * @seid: vsi number
+ * @set_filter: TRUE to set filter, FALSE to clear filter
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Set or clear the broadcast promiscuous flag (filter) for a given VSI.
+ **/
+enum i40e_status_code i40e_aq_set_vsi_broadcast(struct i40e_hw *hw,
+ u16 seid, bool set_filter,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
+ (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_set_vsi_promiscuous_modes);
+
+ if (set_filter)
+ cmd->promiscuous_flags
+ |= CPU_TO_LE16(I40E_AQC_SET_VSI_PROMISC_BROADCAST);
+ else
+ cmd->promiscuous_flags
+ &= CPU_TO_LE16(~I40E_AQC_SET_VSI_PROMISC_BROADCAST);
+
+ cmd->valid_flags = CPU_TO_LE16(I40E_AQC_SET_VSI_PROMISC_BROADCAST);
+ cmd->seid = CPU_TO_LE16(seid);
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_get_vsi_params - get VSI configuration info
+ * @hw: pointer to the hw struct
+ * @vsi_ctx: pointer to a vsi context struct
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+enum i40e_status_code i40e_aq_get_vsi_params(struct i40e_hw *hw,
+ struct i40e_vsi_context *vsi_ctx,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_add_get_update_vsi *cmd =
+ (struct i40e_aqc_add_get_update_vsi *)&desc.params.raw;
+ struct i40e_aqc_add_get_update_vsi_completion *resp =
+ (struct i40e_aqc_add_get_update_vsi_completion *)
+ &desc.params.raw;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_get_vsi_parameters);
+
+ cmd->uplink_seid = CPU_TO_LE16(vsi_ctx->seid);
+
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF);
+
+ status = i40e_asq_send_command(hw, &desc, &vsi_ctx->info,
+ sizeof(vsi_ctx->info), NULL);
+
+ if (status != I40E_SUCCESS)
+ goto aq_get_vsi_params_exit;
+
+ vsi_ctx->seid = LE16_TO_CPU(resp->seid);
+ vsi_ctx->vsi_number = LE16_TO_CPU(resp->vsi_number);
+ vsi_ctx->vsis_allocated = LE16_TO_CPU(resp->vsi_used);
+ vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
+
+aq_get_vsi_params_exit:
+ return status;
+}
+
+/**
+ * i40e_aq_update_vsi_params
+ * @hw: pointer to the hw struct
+ * @vsi_ctx: pointer to a vsi context struct
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Update a VSI context.
+ **/
+enum i40e_status_code i40e_aq_update_vsi_params(struct i40e_hw *hw,
+ struct i40e_vsi_context *vsi_ctx,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_add_get_update_vsi *cmd =
+ (struct i40e_aqc_add_get_update_vsi *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_update_vsi_parameters);
+ cmd->uplink_seid = CPU_TO_LE16(vsi_ctx->seid);
+
+ desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+
+ status = i40e_asq_send_command(hw, &desc, &vsi_ctx->info,
+ sizeof(vsi_ctx->info), cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_get_switch_config
+ * @hw: pointer to the hardware structure
+ * @buf: pointer to the result buffer
+ * @buf_size: length of input buffer
+ * @start_seid: seid to start for the report, 0 == beginning
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Fill the buf with switch configuration returned from AdminQ command
+ **/
+enum i40e_status_code i40e_aq_get_switch_config(struct i40e_hw *hw,
+ struct i40e_aqc_get_switch_config_resp *buf,
+ u16 buf_size, u16 *start_seid,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_switch_seid *scfg =
+ (struct i40e_aqc_switch_seid *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_get_switch_config);
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF);
+ if (buf_size > I40E_AQ_LARGE_BUF)
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
+ scfg->seid = CPU_TO_LE16(*start_seid);
+
+ status = i40e_asq_send_command(hw, &desc, buf, buf_size, cmd_details);
+ *start_seid = LE16_TO_CPU(scfg->seid);
+
+ return status;
+}
+
+/**
+ * i40e_aq_get_firmware_version
+ * @hw: pointer to the hw struct
+ * @fw_major_version: firmware major version
+ * @fw_minor_version: firmware minor version
+ * @api_major_version: major queue version
+ * @api_minor_version: minor queue version
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Get the firmware version from the admin queue commands
+ **/
+enum i40e_status_code i40e_aq_get_firmware_version(struct i40e_hw *hw,
+ u16 *fw_major_version, u16 *fw_minor_version,
+ u16 *api_major_version, u16 *api_minor_version,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_get_version *resp =
+ (struct i40e_aqc_get_version *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_version);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ if (status == I40E_SUCCESS) {
+ if (fw_major_version != NULL)
+ *fw_major_version = LE16_TO_CPU(resp->fw_major);
+ if (fw_minor_version != NULL)
+ *fw_minor_version = LE16_TO_CPU(resp->fw_minor);
+ if (api_major_version != NULL)
+ *api_major_version = LE16_TO_CPU(resp->api_major);
+ if (api_minor_version != NULL)
+ *api_minor_version = LE16_TO_CPU(resp->api_minor);
+
+ /* A workaround to fix the API version in SW */
+ if (api_major_version && api_minor_version &&
+ fw_major_version && fw_minor_version &&
+ ((*api_major_version == 1) && (*api_minor_version == 1)) &&
+ (((*fw_major_version == 4) && (*fw_minor_version >= 2)) ||
+ (*fw_major_version > 4)))
+ *api_minor_version = 2;
+ }
+
+ return status;
+}
+
+/**
+ * i40e_aq_send_driver_version
+ * @hw: pointer to the hw struct
+ * @dv: driver's major, minor version
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Send the driver version to the firmware
+ **/
+enum i40e_status_code i40e_aq_send_driver_version(struct i40e_hw *hw,
+ struct i40e_driver_version *dv,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_driver_version *cmd =
+ (struct i40e_aqc_driver_version *)&desc.params.raw;
+ enum i40e_status_code status;
+ u16 len;
+
+ if (dv == NULL)
+ return I40E_ERR_PARAM;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_driver_version);
+
+ desc.flags |= CPU_TO_LE16(I40E_AQ_FLAG_SI);
+ cmd->driver_major_ver = dv->major_version;
+ cmd->driver_minor_ver = dv->minor_version;
+ cmd->driver_build_ver = dv->build_version;
+ cmd->driver_subbuild_ver = dv->subbuild_version;
+
+ len = 0;
+ while (len < sizeof(dv->driver_string) &&
+ (dv->driver_string[len] < 0x80) &&
+ dv->driver_string[len])
+ len++;
+ status = i40e_asq_send_command(hw, &desc, dv->driver_string,
+ len, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_get_link_status - get status of the HW network link
+ * @hw: pointer to the hw struct
+ *
+ * Returns TRUE if link is up, FALSE if link is down.
+ *
+ * Side effect: LinkStatusEvent reporting becomes enabled
+ **/
+bool i40e_get_link_status(struct i40e_hw *hw)
+{
+ enum i40e_status_code status = I40E_SUCCESS;
+ bool link_status = FALSE;
+
+ if (hw->phy.get_link_info) {
+ status = i40e_aq_get_link_info(hw, TRUE, NULL, NULL);
+
+ if (status != I40E_SUCCESS)
+ goto i40e_get_link_status_exit;
+ }
+
+ link_status = hw->phy.link_info.link_info & I40E_AQ_LINK_UP;
+
+i40e_get_link_status_exit:
+ return link_status;
+}
+
+/**
+ * i40e_get_link_speed
+ * @hw: pointer to the hw struct
+ *
+ * Returns the link speed of the adapter.
+ **/
+enum i40e_aq_link_speed i40e_get_link_speed(struct i40e_hw *hw)
+{
+ enum i40e_aq_link_speed speed = I40E_LINK_SPEED_UNKNOWN;
+ enum i40e_status_code status = I40E_SUCCESS;
+
+ if (hw->phy.get_link_info) {
+ status = i40e_aq_get_link_info(hw, TRUE, NULL, NULL);
+
+ if (status != I40E_SUCCESS)
+ goto i40e_link_speed_exit;
+ }
+
+ speed = hw->phy.link_info.link_speed;
+
+i40e_link_speed_exit:
+ return speed;
+}
+
+/**
+ * i40e_aq_add_veb - Insert a VEB between the VSI and the MAC
+ * @hw: pointer to the hw struct
+ * @uplink_seid: the MAC or other gizmo SEID
+ * @downlink_seid: the VSI SEID
+ * @enabled_tc: bitmap of TCs to be enabled
+ * @default_port: TRUE for default port VSI, FALSE for control port
+ * @enable_l2_filtering: TRUE to add L2 filter table rules to regular forwarding rules for cloud support
+ * @veb_seid: pointer to where to put the resulting VEB SEID
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * This asks the FW to add a VEB between the uplink and downlink
+ * elements. If the uplink SEID is 0, this will be a floating VEB.
+ **/
+enum i40e_status_code i40e_aq_add_veb(struct i40e_hw *hw, u16 uplink_seid,
+ u16 downlink_seid, u8 enabled_tc,
+ bool default_port, bool enable_l2_filtering,
+ u16 *veb_seid,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_add_veb *cmd =
+ (struct i40e_aqc_add_veb *)&desc.params.raw;
+ struct i40e_aqc_add_veb_completion *resp =
+ (struct i40e_aqc_add_veb_completion *)&desc.params.raw;
+ enum i40e_status_code status;
+ u16 veb_flags = 0;
+
+ /* SEIDs need to either both be set or both be 0 for floating VEB */
+ if (!!uplink_seid != !!downlink_seid)
+ return I40E_ERR_PARAM;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_veb);
+
+ cmd->uplink_seid = CPU_TO_LE16(uplink_seid);
+ cmd->downlink_seid = CPU_TO_LE16(downlink_seid);
+ cmd->enable_tcs = enabled_tc;
+ if (!uplink_seid)
+ veb_flags |= I40E_AQC_ADD_VEB_FLOATING;
+ if (default_port)
+ veb_flags |= I40E_AQC_ADD_VEB_PORT_TYPE_DEFAULT;
+ else
+ veb_flags |= I40E_AQC_ADD_VEB_PORT_TYPE_DATA;
+
+ if (enable_l2_filtering)
+ veb_flags |= I40E_AQC_ADD_VEB_ENABLE_L2_FILTER;
+
+ cmd->veb_flags = CPU_TO_LE16(veb_flags);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ if (!status && veb_seid)
+ *veb_seid = LE16_TO_CPU(resp->veb_seid);
+
+ return status;
+}
+
+/**
+ * i40e_aq_get_veb_parameters - Retrieve VEB parameters
+ * @hw: pointer to the hw struct
+ * @veb_seid: the SEID of the VEB to query
+ * @switch_id: the uplink switch id
+ * @floating: set to TRUE if the VEB is floating
+ * @statistic_index: index of the stats counter block for this VEB
+ * @vebs_used: number of VEB's used by function
+ * @vebs_free: total VEB's not reserved by any function
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * This retrieves the parameters for a particular VEB, specified by
+ * uplink_seid, and returns them to the caller.
+ **/
+enum i40e_status_code i40e_aq_get_veb_parameters(struct i40e_hw *hw,
+ u16 veb_seid, u16 *switch_id,
+ bool *floating, u16 *statistic_index,
+ u16 *vebs_used, u16 *vebs_free,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_get_veb_parameters_completion *cmd_resp =
+ (struct i40e_aqc_get_veb_parameters_completion *)
+ &desc.params.raw;
+ enum i40e_status_code status;
+
+ if (veb_seid == 0)
+ return I40E_ERR_PARAM;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_get_veb_parameters);
+ cmd_resp->seid = CPU_TO_LE16(veb_seid);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+ if (status)
+ goto get_veb_exit;
+
+ if (switch_id)
+ *switch_id = LE16_TO_CPU(cmd_resp->switch_id);
+ if (statistic_index)
+ *statistic_index = LE16_TO_CPU(cmd_resp->statistic_index);
+ if (vebs_used)
+ *vebs_used = LE16_TO_CPU(cmd_resp->vebs_used);
+ if (vebs_free)
+ *vebs_free = LE16_TO_CPU(cmd_resp->vebs_free);
+ if (floating) {
+ u16 flags = LE16_TO_CPU(cmd_resp->veb_flags);
+ if (flags & I40E_AQC_ADD_VEB_FLOATING)
+ *floating = TRUE;
+ else
+ *floating = FALSE;
+ }
+
+get_veb_exit:
+ return status;
+}
+
+/**
+ * i40e_aq_add_macvlan
+ * @hw: pointer to the hw struct
+ * @seid: VSI for the mac address
+ * @mv_list: list of macvlans to be added
+ * @count: length of the list
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Add MAC/VLAN addresses to the HW filtering
+ **/
+enum i40e_status_code i40e_aq_add_macvlan(struct i40e_hw *hw, u16 seid,
+ struct i40e_aqc_add_macvlan_element_data *mv_list,
+ u16 count, struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_macvlan *cmd =
+ (struct i40e_aqc_macvlan *)&desc.params.raw;
+ enum i40e_status_code status;
+ u16 buf_size;
+
+ if (count == 0 || !mv_list || !hw)
+ return I40E_ERR_PARAM;
+
+ buf_size = count * sizeof(struct i40e_aqc_add_macvlan_element_data);
+
+ /* prep the rest of the request */
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_macvlan);
+ cmd->num_addresses = CPU_TO_LE16(count);
+ cmd->seid[0] = CPU_TO_LE16(I40E_AQC_MACVLAN_CMD_SEID_VALID | seid);
+ cmd->seid[1] = 0;
+ cmd->seid[2] = 0;
+
+ desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+ if (buf_size > I40E_AQ_LARGE_BUF)
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
+
+ status = i40e_asq_send_command(hw, &desc, mv_list, buf_size,
+ cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_remove_macvlan
+ * @hw: pointer to the hw struct
+ * @seid: VSI for the mac address
+ * @mv_list: list of macvlans to be removed
+ * @count: length of the list
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Remove MAC/VLAN addresses from the HW filtering
+ **/
+enum i40e_status_code i40e_aq_remove_macvlan(struct i40e_hw *hw, u16 seid,
+ struct i40e_aqc_remove_macvlan_element_data *mv_list,
+ u16 count, struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_macvlan *cmd =
+ (struct i40e_aqc_macvlan *)&desc.params.raw;
+ enum i40e_status_code status;
+ u16 buf_size;
+
+ if (count == 0 || !mv_list || !hw)
+ return I40E_ERR_PARAM;
+
+ buf_size = count * sizeof(struct i40e_aqc_remove_macvlan_element_data);
+
+ /* prep the rest of the request */
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_remove_macvlan);
+ cmd->num_addresses = CPU_TO_LE16(count);
+ cmd->seid[0] = CPU_TO_LE16(I40E_AQC_MACVLAN_CMD_SEID_VALID | seid);
+ cmd->seid[1] = 0;
+ cmd->seid[2] = 0;
+
+ desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+ if (buf_size > I40E_AQ_LARGE_BUF)
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
+
+ status = i40e_asq_send_command(hw, &desc, mv_list, buf_size,
+ cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_add_vlan - Add VLAN ids to the HW filtering
+ * @hw: pointer to the hw struct
+ * @seid: VSI for the vlan filters
+ * @v_list: list of vlan filters to be added
+ * @count: length of the list
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+enum i40e_status_code i40e_aq_add_vlan(struct i40e_hw *hw, u16 seid,
+ struct i40e_aqc_add_remove_vlan_element_data *v_list,
+ u8 count, struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_macvlan *cmd =
+ (struct i40e_aqc_macvlan *)&desc.params.raw;
+ enum i40e_status_code status;
+ u16 buf_size;
+
+ if (count == 0 || !v_list || !hw)
+ return I40E_ERR_PARAM;
+
+ buf_size = count * sizeof(struct i40e_aqc_add_remove_vlan_element_data);
+
+ /* prep the rest of the request */
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_vlan);
+ cmd->num_addresses = CPU_TO_LE16(count);
+ cmd->seid[0] = CPU_TO_LE16(seid | I40E_AQC_MACVLAN_CMD_SEID_VALID);
+ cmd->seid[1] = 0;
+ cmd->seid[2] = 0;
+
+ desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+ if (buf_size > I40E_AQ_LARGE_BUF)
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
+
+ status = i40e_asq_send_command(hw, &desc, v_list, buf_size,
+ cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_remove_vlan - Remove VLANs from the HW filtering
+ * @hw: pointer to the hw struct
+ * @seid: VSI for the vlan filters
+ * @v_list: list of macvlans to be removed
+ * @count: length of the list
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+enum i40e_status_code i40e_aq_remove_vlan(struct i40e_hw *hw, u16 seid,
+ struct i40e_aqc_add_remove_vlan_element_data *v_list,
+ u8 count, struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_macvlan *cmd =
+ (struct i40e_aqc_macvlan *)&desc.params.raw;
+ enum i40e_status_code status;
+ u16 buf_size;
+
+ if (count == 0 || !v_list || !hw)
+ return I40E_ERR_PARAM;
+
+ buf_size = count * sizeof(struct i40e_aqc_add_remove_vlan_element_data);
+
+ /* prep the rest of the request */
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_remove_vlan);
+ cmd->num_addresses = CPU_TO_LE16(count);
+ cmd->seid[0] = CPU_TO_LE16(seid | I40E_AQC_MACVLAN_CMD_SEID_VALID);
+ cmd->seid[1] = 0;
+ cmd->seid[2] = 0;
+
+ desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+ if (buf_size > I40E_AQ_LARGE_BUF)
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
+
+ status = i40e_asq_send_command(hw, &desc, v_list, buf_size,
+ cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_send_msg_to_vf
+ * @hw: pointer to the hardware structure
+ * @vfid: vf id to send msg
+ * @v_opcode: opcodes for VF-PF communication
+ * @v_retval: return error code
+ * @msg: pointer to the msg buffer
+ * @msglen: msg length
+ * @cmd_details: pointer to command details
+ *
+ * send msg to vf
+ **/
+enum i40e_status_code i40e_aq_send_msg_to_vf(struct i40e_hw *hw, u16 vfid,
+ u32 v_opcode, u32 v_retval, u8 *msg, u16 msglen,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_pf_vf_message *cmd =
+ (struct i40e_aqc_pf_vf_message *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_send_msg_to_vf);
+ cmd->id = CPU_TO_LE32(vfid);
+ desc.cookie_high = CPU_TO_LE32(v_opcode);
+ desc.cookie_low = CPU_TO_LE32(v_retval);
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_SI);
+ if (msglen) {
+ desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF |
+ I40E_AQ_FLAG_RD));
+ if (msglen > I40E_AQ_LARGE_BUF)
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
+ desc.datalen = CPU_TO_LE16(msglen);
+ }
+ status = i40e_asq_send_command(hw, &desc, msg, msglen, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_debug_write_register
+ * @hw: pointer to the hw struct
+ * @reg_addr: register address
+ * @reg_val: register value
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Write to a register using the admin queue commands
+ **/
+enum i40e_status_code i40e_aq_debug_write_register(struct i40e_hw *hw,
+ u32 reg_addr, u64 reg_val,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_debug_reg_read_write *cmd =
+ (struct i40e_aqc_debug_reg_read_write *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_debug_write_reg);
+
+ cmd->address = CPU_TO_LE32(reg_addr);
+ cmd->value_high = CPU_TO_LE32((u32)(reg_val >> 32));
+ cmd->value_low = CPU_TO_LE32((u32)(reg_val & 0xFFFFFFFF));
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_get_hmc_resource_profile
+ * @hw: pointer to the hw struct
+ * @profile: type of profile the HMC is to be set as
+ * @pe_vf_enabled_count: the number of PE enabled VFs the system has
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * query the HMC profile of the device.
+ **/
+enum i40e_status_code i40e_aq_get_hmc_resource_profile(struct i40e_hw *hw,
+ enum i40e_aq_hmc_profile *profile,
+ u8 *pe_vf_enabled_count,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aq_get_set_hmc_resource_profile *resp =
+ (struct i40e_aq_get_set_hmc_resource_profile *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_query_hmc_resource_profile);
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ *profile = (enum i40e_aq_hmc_profile)(resp->pm_profile &
+ I40E_AQ_GET_HMC_RESOURCE_PROFILE_PM_MASK);
+ *pe_vf_enabled_count = resp->pe_vf_enabled &
+ I40E_AQ_GET_HMC_RESOURCE_PROFILE_COUNT_MASK;
+
+ return status;
+}
+
+/**
+ * i40e_aq_set_hmc_resource_profile
+ * @hw: pointer to the hw struct
+ * @profile: type of profile the HMC is to be set as
+ * @pe_vf_enabled_count: the number of PE enabled VFs the system has
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * set the HMC profile of the device.
+ **/
+enum i40e_status_code i40e_aq_set_hmc_resource_profile(struct i40e_hw *hw,
+ enum i40e_aq_hmc_profile profile,
+ u8 pe_vf_enabled_count,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aq_get_set_hmc_resource_profile *cmd =
+ (struct i40e_aq_get_set_hmc_resource_profile *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_set_hmc_resource_profile);
+
+ cmd->pm_profile = (u8)profile;
+ cmd->pe_vf_enabled = pe_vf_enabled_count;
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_request_resource
+ * @hw: pointer to the hw struct
+ * @resource: resource id
+ * @access: access type
+ * @sdp_number: resource number
+ * @timeout: the maximum time in ms that the driver may hold the resource
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * requests common resource using the admin queue commands
+ **/
+enum i40e_status_code i40e_aq_request_resource(struct i40e_hw *hw,
+ enum i40e_aq_resources_ids resource,
+ enum i40e_aq_resource_access_type access,
+ u8 sdp_number, u64 *timeout,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_request_resource *cmd_resp =
+ (struct i40e_aqc_request_resource *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ DEBUGFUNC("i40e_aq_request_resource");
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_request_resource);
+
+ cmd_resp->resource_id = CPU_TO_LE16(resource);
+ cmd_resp->access_type = CPU_TO_LE16(access);
+ cmd_resp->resource_number = CPU_TO_LE32(sdp_number);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+ /* The completion specifies the maximum time in ms that the driver
+ * may hold the resource in the Timeout field.
+ * If the resource is held by someone else, the command completes with
+ * busy return value and the timeout field indicates the maximum time
+ * the current owner of the resource has to free it.
+ */
+ if (status == I40E_SUCCESS || hw->aq.asq_last_status == I40E_AQ_RC_EBUSY)
+ *timeout = LE32_TO_CPU(cmd_resp->timeout);
+
+ return status;
+}
+
+/**
+ * i40e_aq_release_resource
+ * @hw: pointer to the hw struct
+ * @resource: resource id
+ * @sdp_number: resource number
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * release common resource using the admin queue commands
+ **/
+enum i40e_status_code i40e_aq_release_resource(struct i40e_hw *hw,
+ enum i40e_aq_resources_ids resource,
+ u8 sdp_number,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_request_resource *cmd =
+ (struct i40e_aqc_request_resource *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ DEBUGFUNC("i40e_aq_release_resource");
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_release_resource);
+
+ cmd->resource_id = CPU_TO_LE16(resource);
+ cmd->resource_number = CPU_TO_LE32(sdp_number);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_read_nvm
+ * @hw: pointer to the hw struct
+ * @module_pointer: module pointer location in words from the NVM beginning
+ * @offset: byte offset from the module beginning
+ * @length: length of the section to be read (in bytes from the offset)
+ * @data: command buffer (size [bytes] = length)
+ * @last_command: tells if this is the last command in a series
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Read the NVM using the admin queue commands
+ **/
+enum i40e_status_code i40e_aq_read_nvm(struct i40e_hw *hw, u8 module_pointer,
+ u32 offset, u16 length, void *data,
+ bool last_command,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_nvm_update *cmd =
+ (struct i40e_aqc_nvm_update *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ DEBUGFUNC("i40e_aq_read_nvm");
+
+ /* In offset the highest byte must be zeroed. */
+ if (offset & 0xFF000000) {
+ status = I40E_ERR_PARAM;
+ goto i40e_aq_read_nvm_exit;
+ }
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_read);
+
+ /* If this is the last command in a series, set the proper flag. */
+ if (last_command)
+ cmd->command_flags |= I40E_AQ_NVM_LAST_CMD;
+ cmd->module_pointer = module_pointer;
+ cmd->offset = CPU_TO_LE32(offset);
+ cmd->length = CPU_TO_LE16(length);
+
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF);
+ if (length > I40E_AQ_LARGE_BUF)
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
+
+ status = i40e_asq_send_command(hw, &desc, data, length, cmd_details);
+
+i40e_aq_read_nvm_exit:
+ return status;
+}
+
+/**
+ * i40e_aq_erase_nvm
+ * @hw: pointer to the hw struct
+ * @module_pointer: module pointer location in words from the NVM beginning
+ * @offset: offset in the module (expressed in 4 KB from module's beginning)
+ * @length: length of the section to be erased (expressed in 4 KB)
+ * @last_command: tells if this is the last command in a series
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Erase the NVM sector using the admin queue commands
+ **/
+enum i40e_status_code i40e_aq_erase_nvm(struct i40e_hw *hw, u8 module_pointer,
+ u32 offset, u16 length, bool last_command,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_nvm_update *cmd =
+ (struct i40e_aqc_nvm_update *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ DEBUGFUNC("i40e_aq_erase_nvm");
+
+ /* In offset the highest byte must be zeroed. */
+ if (offset & 0xFF000000) {
+ status = I40E_ERR_PARAM;
+ goto i40e_aq_erase_nvm_exit;
+ }
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_erase);
+
+ /* If this is the last command in a series, set the proper flag. */
+ if (last_command)
+ cmd->command_flags |= I40E_AQ_NVM_LAST_CMD;
+ cmd->module_pointer = module_pointer;
+ cmd->offset = CPU_TO_LE32(offset);
+ cmd->length = CPU_TO_LE16(length);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+i40e_aq_erase_nvm_exit:
+ return status;
+}
+
+#define I40E_DEV_FUNC_CAP_SWITCH_MODE 0x01
+#define I40E_DEV_FUNC_CAP_MGMT_MODE 0x02
+#define I40E_DEV_FUNC_CAP_NPAR 0x03
+#define I40E_DEV_FUNC_CAP_OS2BMC 0x04
+#define I40E_DEV_FUNC_CAP_VALID_FUNC 0x05
+#define I40E_DEV_FUNC_CAP_SRIOV_1_1 0x12
+#define I40E_DEV_FUNC_CAP_VF 0x13
+#define I40E_DEV_FUNC_CAP_VMDQ 0x14
+#define I40E_DEV_FUNC_CAP_802_1_QBG 0x15
+#define I40E_DEV_FUNC_CAP_802_1_QBH 0x16
+#define I40E_DEV_FUNC_CAP_VSI 0x17
+#define I40E_DEV_FUNC_CAP_DCB 0x18
+#define I40E_DEV_FUNC_CAP_FCOE 0x21
+#define I40E_DEV_FUNC_CAP_RSS 0x40
+#define I40E_DEV_FUNC_CAP_RX_QUEUES 0x41
+#define I40E_DEV_FUNC_CAP_TX_QUEUES 0x42
+#define I40E_DEV_FUNC_CAP_MSIX 0x43
+#define I40E_DEV_FUNC_CAP_MSIX_VF 0x44
+#define I40E_DEV_FUNC_CAP_FLOW_DIRECTOR 0x45
+#define I40E_DEV_FUNC_CAP_IEEE_1588 0x46
+#define I40E_DEV_FUNC_CAP_MFP_MODE_1 0xF1
+#define I40E_DEV_FUNC_CAP_CEM 0xF2
+#define I40E_DEV_FUNC_CAP_IWARP 0x51
+#define I40E_DEV_FUNC_CAP_LED 0x61
+#define I40E_DEV_FUNC_CAP_SDP 0x62
+#define I40E_DEV_FUNC_CAP_MDIO 0x63
+
+/**
+ * i40e_parse_discover_capabilities
+ * @hw: pointer to the hw struct
+ * @buff: pointer to a buffer containing device/function capability records
+ * @cap_count: number of capability records in the list
+ * @list_type_opc: type of capabilities list to parse
+ *
+ * Parse the device/function capabilities list.
+ **/
+static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff,
+ u32 cap_count,
+ enum i40e_admin_queue_opc list_type_opc)
+{
+ struct i40e_aqc_list_capabilities_element_resp *cap;
+ u32 number, logical_id, phys_id;
+ struct i40e_hw_capabilities *p;
+ u32 i = 0;
+ u16 id;
+
+ cap = (struct i40e_aqc_list_capabilities_element_resp *) buff;
+
+ if (list_type_opc == i40e_aqc_opc_list_dev_capabilities)
+ p = (struct i40e_hw_capabilities *)&hw->dev_caps;
+ else if (list_type_opc == i40e_aqc_opc_list_func_capabilities)
+ p = (struct i40e_hw_capabilities *)&hw->func_caps;
+ else
+ return;
+
+ for (i = 0; i < cap_count; i++, cap++) {
+ id = LE16_TO_CPU(cap->id);
+ number = LE32_TO_CPU(cap->number);
+ logical_id = LE32_TO_CPU(cap->logical_id);
+ phys_id = LE32_TO_CPU(cap->phys_id);
+
+ switch (id) {
+ case I40E_DEV_FUNC_CAP_SWITCH_MODE:
+ p->switch_mode = number;
+ break;
+ case I40E_DEV_FUNC_CAP_MGMT_MODE:
+ p->management_mode = number;
+ break;
+ case I40E_DEV_FUNC_CAP_NPAR:
+ p->npar_enable = number;
+ break;
+ case I40E_DEV_FUNC_CAP_OS2BMC:
+ p->os2bmc = number;
+ break;
+ case I40E_DEV_FUNC_CAP_VALID_FUNC:
+ p->valid_functions = number;
+ break;
+ case I40E_DEV_FUNC_CAP_SRIOV_1_1:
+ if (number == 1)
+ p->sr_iov_1_1 = TRUE;
+ break;
+ case I40E_DEV_FUNC_CAP_VF:
+ p->num_vfs = number;
+ p->vf_base_id = logical_id;
+ break;
+ case I40E_DEV_FUNC_CAP_VMDQ:
+ if (number == 1)
+ p->vmdq = TRUE;
+ break;
+ case I40E_DEV_FUNC_CAP_802_1_QBG:
+ if (number == 1)
+ p->evb_802_1_qbg = TRUE;
+ break;
+ case I40E_DEV_FUNC_CAP_802_1_QBH:
+ if (number == 1)
+ p->evb_802_1_qbh = TRUE;
+ break;
+ case I40E_DEV_FUNC_CAP_VSI:
+ p->num_vsis = number;
+ break;
+ case I40E_DEV_FUNC_CAP_DCB:
+ if (number == 1) {
+ p->dcb = TRUE;
+ p->enabled_tcmap = logical_id;
+ p->maxtc = phys_id;
+ }
+ break;
+ case I40E_DEV_FUNC_CAP_FCOE:
+ if (number == 1)
+ p->fcoe = TRUE;
+ break;
+ case I40E_DEV_FUNC_CAP_RSS:
+ p->rss = TRUE;
+ p->rss_table_size = number;
+ p->rss_table_entry_width = logical_id;
+ break;
+ case I40E_DEV_FUNC_CAP_RX_QUEUES:
+ p->num_rx_qp = number;
+ p->base_queue = phys_id;
+ break;
+ case I40E_DEV_FUNC_CAP_TX_QUEUES:
+ p->num_tx_qp = number;
+ p->base_queue = phys_id;
+ break;
+ case I40E_DEV_FUNC_CAP_MSIX:
+ p->num_msix_vectors = number;
+ break;
+ case I40E_DEV_FUNC_CAP_MSIX_VF:
+ p->num_msix_vectors_vf = number;
+ break;
+ case I40E_DEV_FUNC_CAP_MFP_MODE_1:
+ if (number == 1)
+ p->mfp_mode_1 = TRUE;
+ break;
+ case I40E_DEV_FUNC_CAP_CEM:
+ if (number == 1)
+ p->mgmt_cem = TRUE;
+ break;
+ case I40E_DEV_FUNC_CAP_IWARP:
+ if (number == 1)
+ p->iwarp = TRUE;
+ break;
+ case I40E_DEV_FUNC_CAP_LED:
+ if (phys_id < I40E_HW_CAP_MAX_GPIO)
+ p->led[phys_id] = TRUE;
+ break;
+ case I40E_DEV_FUNC_CAP_SDP:
+ if (phys_id < I40E_HW_CAP_MAX_GPIO)
+ p->sdp[phys_id] = TRUE;
+ break;
+ case I40E_DEV_FUNC_CAP_MDIO:
+ if (number == 1) {
+ p->mdio_port_num = phys_id;
+ p->mdio_port_mode = logical_id;
+ }
+ break;
+ case I40E_DEV_FUNC_CAP_IEEE_1588:
+ if (number == 1)
+ p->ieee_1588 = TRUE;
+ break;
+ case I40E_DEV_FUNC_CAP_FLOW_DIRECTOR:
+ p->fd = TRUE;
+ p->fd_filters_guaranteed = number;
+ p->fd_filters_best_effort = logical_id;
+ break;
+ default:
+ break;
+ }
+ }
+
+ /* Software override ensuring FCoE is disabled if npar or mfp
+ * mode because it is not supported in these modes.
+ */
+ if (p->npar_enable || p->mfp_mode_1)
+ p->fcoe = FALSE;
+
+ /* additional HW specific goodies that might
+ * someday be HW version specific
+ */
+ p->rx_buf_chain_len = I40E_MAX_CHAINED_RX_BUFFERS;
+}
+
+/**
+ * i40e_aq_discover_capabilities
+ * @hw: pointer to the hw struct
+ * @buff: a virtual buffer to hold the capabilities
+ * @buff_size: Size of the virtual buffer
+ * @data_size: Size of the returned data, or buff size needed if AQ err==ENOMEM
+ * @list_type_opc: capabilities type to discover - pass in the command opcode
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Get the device capabilities descriptions from the firmware
+ **/
+enum i40e_status_code i40e_aq_discover_capabilities(struct i40e_hw *hw,
+ void *buff, u16 buff_size, u16 *data_size,
+ enum i40e_admin_queue_opc list_type_opc,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aqc_list_capabilites *cmd;
+ struct i40e_aq_desc desc;
+ enum i40e_status_code status = I40E_SUCCESS;
+
+ cmd = (struct i40e_aqc_list_capabilites *)&desc.params.raw;
+
+ if (list_type_opc != i40e_aqc_opc_list_func_capabilities &&
+ list_type_opc != i40e_aqc_opc_list_dev_capabilities) {
+ status = I40E_ERR_PARAM;
+ goto exit;
+ }
+
+ i40e_fill_default_direct_cmd_desc(&desc, list_type_opc);
+
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF);
+ if (buff_size > I40E_AQ_LARGE_BUF)
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
+
+ status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
+ *data_size = LE16_TO_CPU(desc.datalen);
+
+ if (status)
+ goto exit;
+
+ i40e_parse_discover_capabilities(hw, buff, LE32_TO_CPU(cmd->count),
+ list_type_opc);
+
+exit:
+ return status;
+}
+
+/**
+ * i40e_aq_update_nvm
+ * @hw: pointer to the hw struct
+ * @module_pointer: module pointer location in words from the NVM beginning
+ * @offset: byte offset from the module beginning
+ * @length: length of the section to be written (in bytes from the offset)
+ * @data: command buffer (size [bytes] = length)
+ * @last_command: tells if this is the last command in a series
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Update the NVM using the admin queue commands
+ **/
+enum i40e_status_code i40e_aq_update_nvm(struct i40e_hw *hw, u8 module_pointer,
+ u32 offset, u16 length, void *data,
+ bool last_command,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_nvm_update *cmd =
+ (struct i40e_aqc_nvm_update *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ DEBUGFUNC("i40e_aq_update_nvm");
+
+ /* In offset the highest byte must be zeroed. */
+ if (offset & 0xFF000000) {
+ status = I40E_ERR_PARAM;
+ goto i40e_aq_update_nvm_exit;
+ }
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_update);
+
+ /* If this is the last command in a series, set the proper flag. */
+ if (last_command)
+ cmd->command_flags |= I40E_AQ_NVM_LAST_CMD;
+ cmd->module_pointer = module_pointer;
+ cmd->offset = CPU_TO_LE32(offset);
+ cmd->length = CPU_TO_LE16(length);
+
+ desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+ if (length > I40E_AQ_LARGE_BUF)
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
+
+ status = i40e_asq_send_command(hw, &desc, data, length, cmd_details);
+
+i40e_aq_update_nvm_exit:
+ return status;
+}
+
+/**
+ * i40e_aq_get_lldp_mib
+ * @hw: pointer to the hw struct
+ * @bridge_type: type of bridge requested
+ * @mib_type: Local, Remote or both Local and Remote MIBs
+ * @buff: pointer to a user supplied buffer to store the MIB block
+ * @buff_size: size of the buffer (in bytes)
+ * @local_len : length of the returned Local LLDP MIB
+ * @remote_len: length of the returned Remote LLDP MIB
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Requests the complete LLDP MIB (entire packet).
+ **/
+enum i40e_status_code i40e_aq_get_lldp_mib(struct i40e_hw *hw, u8 bridge_type,
+ u8 mib_type, void *buff, u16 buff_size,
+ u16 *local_len, u16 *remote_len,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_lldp_get_mib *cmd =
+ (struct i40e_aqc_lldp_get_mib *)&desc.params.raw;
+ struct i40e_aqc_lldp_get_mib *resp =
+ (struct i40e_aqc_lldp_get_mib *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ if (buff_size == 0 || !buff)
+ return I40E_ERR_PARAM;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_get_mib);
+ /* Indirect Command */
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF);
+
+ cmd->type = mib_type & I40E_AQ_LLDP_MIB_TYPE_MASK;
+ cmd->type |= ((bridge_type << I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT) &
+ I40E_AQ_LLDP_BRIDGE_TYPE_MASK);
+
+ desc.datalen = CPU_TO_LE16(buff_size);
+
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF);
+ if (buff_size > I40E_AQ_LARGE_BUF)
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
+
+ status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
+ if (!status) {
+ if (local_len != NULL)
+ *local_len = LE16_TO_CPU(resp->local_len);
+ if (remote_len != NULL)
+ *remote_len = LE16_TO_CPU(resp->remote_len);
+ }
+
+ return status;
+}
+
+/**
+ * i40e_aq_cfg_lldp_mib_change_event
+ * @hw: pointer to the hw struct
+ * @enable_update: Enable or Disable event posting
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Enable or Disable posting of an event on ARQ when LLDP MIB
+ * associated with the interface changes
+ **/
+enum i40e_status_code i40e_aq_cfg_lldp_mib_change_event(struct i40e_hw *hw,
+ bool enable_update,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_lldp_update_mib *cmd =
+ (struct i40e_aqc_lldp_update_mib *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_update_mib);
+
+ if (!enable_update)
+ cmd->command |= I40E_AQ_LLDP_MIB_UPDATE_DISABLE;
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_add_lldp_tlv
+ * @hw: pointer to the hw struct
+ * @bridge_type: type of bridge
+ * @buff: buffer with TLV to add
+ * @buff_size: length of the buffer
+ * @tlv_len: length of the TLV to be added
+ * @mib_len: length of the LLDP MIB returned in response
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Add the specified TLV to LLDP Local MIB for the given bridge type,
+ * it is responsibility of the caller to make sure that the TLV is not
+ * already present in the LLDPDU.
+ * In return firmware will write the complete LLDP MIB with the newly
+ * added TLV in the response buffer.
+ **/
+enum i40e_status_code i40e_aq_add_lldp_tlv(struct i40e_hw *hw, u8 bridge_type,
+ void *buff, u16 buff_size, u16 tlv_len,
+ u16 *mib_len,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_lldp_add_tlv *cmd =
+ (struct i40e_aqc_lldp_add_tlv *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ if (buff_size == 0 || !buff || tlv_len == 0)
+ return I40E_ERR_PARAM;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_add_tlv);
+
+ /* Indirect Command */
+ desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+ if (buff_size > I40E_AQ_LARGE_BUF)
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
+ desc.datalen = CPU_TO_LE16(buff_size);
+
+ cmd->type = ((bridge_type << I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT) &
+ I40E_AQ_LLDP_BRIDGE_TYPE_MASK);
+ cmd->len = CPU_TO_LE16(tlv_len);
+
+ status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
+ if (!status) {
+ if (mib_len != NULL)
+ *mib_len = LE16_TO_CPU(desc.datalen);
+ }
+
+ return status;
+}
+
+/**
+ * i40e_aq_update_lldp_tlv
+ * @hw: pointer to the hw struct
+ * @bridge_type: type of bridge
+ * @buff: buffer with TLV to update
+ * @buff_size: size of the buffer holding original and updated TLVs
+ * @old_len: Length of the Original TLV
+ * @new_len: Length of the Updated TLV
+ * @offset: offset of the updated TLV in the buff
+ * @mib_len: length of the returned LLDP MIB
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Update the specified TLV to the LLDP Local MIB for the given bridge type.
+ * Firmware will place the complete LLDP MIB in response buffer with the
+ * updated TLV.
+ **/
+enum i40e_status_code i40e_aq_update_lldp_tlv(struct i40e_hw *hw,
+ u8 bridge_type, void *buff, u16 buff_size,
+ u16 old_len, u16 new_len, u16 offset,
+ u16 *mib_len,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_lldp_update_tlv *cmd =
+ (struct i40e_aqc_lldp_update_tlv *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ if (buff_size == 0 || !buff || offset == 0 ||
+ old_len == 0 || new_len == 0)
+ return I40E_ERR_PARAM;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_update_tlv);
+
+ /* Indirect Command */
+ desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+ if (buff_size > I40E_AQ_LARGE_BUF)
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
+ desc.datalen = CPU_TO_LE16(buff_size);
+
+ cmd->type = ((bridge_type << I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT) &
+ I40E_AQ_LLDP_BRIDGE_TYPE_MASK);
+ cmd->old_len = CPU_TO_LE16(old_len);
+ cmd->new_offset = CPU_TO_LE16(offset);
+ cmd->new_len = CPU_TO_LE16(new_len);
+
+ status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
+ if (!status) {
+ if (mib_len != NULL)
+ *mib_len = LE16_TO_CPU(desc.datalen);
+ }
+
+ return status;
+}
+
+/**
+ * i40e_aq_delete_lldp_tlv
+ * @hw: pointer to the hw struct
+ * @bridge_type: type of bridge
+ * @buff: pointer to a user supplied buffer that has the TLV
+ * @buff_size: length of the buffer
+ * @tlv_len: length of the TLV to be deleted
+ * @mib_len: length of the returned LLDP MIB
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Delete the specified TLV from LLDP Local MIB for the given bridge type.
+ * The firmware places the entire LLDP MIB in the response buffer.
+ **/
+enum i40e_status_code i40e_aq_delete_lldp_tlv(struct i40e_hw *hw,
+ u8 bridge_type, void *buff, u16 buff_size,
+ u16 tlv_len, u16 *mib_len,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_lldp_add_tlv *cmd =
+ (struct i40e_aqc_lldp_add_tlv *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ if (buff_size == 0 || !buff)
+ return I40E_ERR_PARAM;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_delete_tlv);
+
+ /* Indirect Command */
+ desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+ if (buff_size > I40E_AQ_LARGE_BUF)
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
+ desc.datalen = CPU_TO_LE16(buff_size);
+ cmd->len = CPU_TO_LE16(tlv_len);
+ cmd->type = ((bridge_type << I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT) &
+ I40E_AQ_LLDP_BRIDGE_TYPE_MASK);
+
+ status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
+ if (!status) {
+ if (mib_len != NULL)
+ *mib_len = LE16_TO_CPU(desc.datalen);
+ }
+
+ return status;
+}
+
+/**
+ * i40e_aq_stop_lldp
+ * @hw: pointer to the hw struct
+ * @shutdown_agent: True if LLDP Agent needs to be Shutdown
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Stop or Shutdown the embedded LLDP Agent
+ **/
+enum i40e_status_code i40e_aq_stop_lldp(struct i40e_hw *hw, bool shutdown_agent,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_lldp_stop *cmd =
+ (struct i40e_aqc_lldp_stop *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_stop);
+
+ if (shutdown_agent)
+ cmd->command |= I40E_AQ_LLDP_AGENT_SHUTDOWN;
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_start_lldp
+ * @hw: pointer to the hw struct
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Start the embedded LLDP Agent on all ports.
+ **/
+enum i40e_status_code i40e_aq_start_lldp(struct i40e_hw *hw,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_lldp_start *cmd =
+ (struct i40e_aqc_lldp_start *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_start);
+
+ cmd->command = I40E_AQ_LLDP_AGENT_START;
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_add_udp_tunnel
+ * @hw: pointer to the hw struct
+ * @udp_port: the UDP port to add
+ * @header_len: length of the tunneling header length in DWords
+ * @protocol_index: protocol index type
+ * @filter_index: pointer to filter index
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+enum i40e_status_code i40e_aq_add_udp_tunnel(struct i40e_hw *hw,
+ u16 udp_port, u8 protocol_index,
+ u8 *filter_index,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_add_udp_tunnel *cmd =
+ (struct i40e_aqc_add_udp_tunnel *)&desc.params.raw;
+ struct i40e_aqc_del_udp_tunnel_completion *resp =
+ (struct i40e_aqc_del_udp_tunnel_completion *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_udp_tunnel);
+
+ cmd->udp_port = CPU_TO_LE16(udp_port);
+ cmd->protocol_type = protocol_index;
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ if (!status)
+ *filter_index = resp->index;
+
+ return status;
+}
+
+/**
+ * i40e_aq_del_udp_tunnel
+ * @hw: pointer to the hw struct
+ * @index: filter index
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+enum i40e_status_code i40e_aq_del_udp_tunnel(struct i40e_hw *hw, u8 index,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_remove_udp_tunnel *cmd =
+ (struct i40e_aqc_remove_udp_tunnel *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_del_udp_tunnel);
+
+ cmd->index = index;
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_get_switch_resource_alloc (0x0204)
+ * @hw: pointer to the hw struct
+ * @num_entries: pointer to u8 to store the number of resource entries returned
+ * @buf: pointer to a user supplied buffer. This buffer must be large enough
+ * to store the resource information for all resource types. Each
+ * resource type is a i40e_aqc_switch_resource_alloc_data structure.
+ * @count: size, in bytes, of the buffer provided
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Query the resources allocated to a function.
+ **/
+enum i40e_status_code i40e_aq_get_switch_resource_alloc(struct i40e_hw *hw,
+ u8 *num_entries,
+ struct i40e_aqc_switch_resource_alloc_element_resp *buf,
+ u16 count,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_get_switch_resource_alloc *cmd_resp =
+ (struct i40e_aqc_get_switch_resource_alloc *)&desc.params.raw;
+ enum i40e_status_code status;
+ u16 length = count
+ * sizeof(struct i40e_aqc_switch_resource_alloc_element_resp);
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_get_switch_resource_alloc);
+
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF);
+ if (length > I40E_AQ_LARGE_BUF)
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
+
+ status = i40e_asq_send_command(hw, &desc, buf, length, cmd_details);
+
+ if (!status)
+ *num_entries = cmd_resp->num_entries;
+
+ return status;
+}
+
+/**
+ * i40e_aq_delete_element - Delete switch element
+ * @hw: pointer to the hw struct
+ * @seid: the SEID to delete from the switch
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * This deletes a switch element from the switch.
+ **/
+enum i40e_status_code i40e_aq_delete_element(struct i40e_hw *hw, u16 seid,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_switch_seid *cmd =
+ (struct i40e_aqc_switch_seid *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ if (seid == 0)
+ return I40E_ERR_PARAM;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_delete_element);
+
+ cmd->seid = CPU_TO_LE16(seid);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40_aq_add_pvirt - Instantiate a Port Virtualizer on a port
+ * @hw: pointer to the hw struct
+ * @flags: component flags
+ * @mac_seid: uplink seid (MAC SEID)
+ * @vsi_seid: connected vsi seid
+ * @ret_seid: seid of create pv component
+ *
+ * This instantiates an i40e port virtualizer with specified flags.
+ * Depending on specified flags the port virtualizer can act as a
+ * 802.1Qbr port virtualizer or a 802.1Qbg S-component.
+ */
+enum i40e_status_code i40e_aq_add_pvirt(struct i40e_hw *hw, u16 flags,
+ u16 mac_seid, u16 vsi_seid,
+ u16 *ret_seid)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_add_update_pv *cmd =
+ (struct i40e_aqc_add_update_pv *)&desc.params.raw;
+ struct i40e_aqc_add_update_pv_completion *resp =
+ (struct i40e_aqc_add_update_pv_completion *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ if (vsi_seid == 0)
+ return I40E_ERR_PARAM;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_pv);
+ cmd->command_flags = CPU_TO_LE16(flags);
+ cmd->uplink_seid = CPU_TO_LE16(mac_seid);
+ cmd->connected_seid = CPU_TO_LE16(vsi_seid);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL);
+ if (!status && ret_seid)
+ *ret_seid = LE16_TO_CPU(resp->pv_seid);
+
+ return status;
+}
+
+/**
+ * i40e_aq_add_tag - Add an S/E-tag
+ * @hw: pointer to the hw struct
+ * @direct_to_queue: should s-tag direct flow to a specific queue
+ * @vsi_seid: VSI SEID to use this tag
+ * @tag: value of the tag
+ * @queue_num: queue number, only valid is direct_to_queue is TRUE
+ * @tags_used: return value, number of tags in use by this PF
+ * @tags_free: return value, number of unallocated tags
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * This associates an S- or E-tag to a VSI in the switch complex. It returns
+ * the number of tags allocated by the PF, and the number of unallocated
+ * tags available.
+ **/
+enum i40e_status_code i40e_aq_add_tag(struct i40e_hw *hw, bool direct_to_queue,
+ u16 vsi_seid, u16 tag, u16 queue_num,
+ u16 *tags_used, u16 *tags_free,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_add_tag *cmd =
+ (struct i40e_aqc_add_tag *)&desc.params.raw;
+ struct i40e_aqc_add_remove_tag_completion *resp =
+ (struct i40e_aqc_add_remove_tag_completion *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ if (vsi_seid == 0)
+ return I40E_ERR_PARAM;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_tag);
+
+ cmd->seid = CPU_TO_LE16(vsi_seid);
+ cmd->tag = CPU_TO_LE16(tag);
+ if (direct_to_queue) {
+ cmd->flags = CPU_TO_LE16(I40E_AQC_ADD_TAG_FLAG_TO_QUEUE);
+ cmd->queue_number = CPU_TO_LE16(queue_num);
+ }
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ if (!status) {
+ if (tags_used != NULL)
+ *tags_used = LE16_TO_CPU(resp->tags_used);
+ if (tags_free != NULL)
+ *tags_free = LE16_TO_CPU(resp->tags_free);
+ }
+
+ return status;
+}
+
+/**
+ * i40e_aq_remove_tag - Remove an S- or E-tag
+ * @hw: pointer to the hw struct
+ * @vsi_seid: VSI SEID this tag is associated with
+ * @tag: value of the S-tag to delete
+ * @tags_used: return value, number of tags in use by this PF
+ * @tags_free: return value, number of unallocated tags
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * This deletes an S- or E-tag from a VSI in the switch complex. It returns
+ * the number of tags allocated by the PF, and the number of unallocated
+ * tags available.
+ **/
+enum i40e_status_code i40e_aq_remove_tag(struct i40e_hw *hw, u16 vsi_seid,
+ u16 tag, u16 *tags_used, u16 *tags_free,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_remove_tag *cmd =
+ (struct i40e_aqc_remove_tag *)&desc.params.raw;
+ struct i40e_aqc_add_remove_tag_completion *resp =
+ (struct i40e_aqc_add_remove_tag_completion *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ if (vsi_seid == 0)
+ return I40E_ERR_PARAM;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_remove_tag);
+
+ cmd->seid = CPU_TO_LE16(vsi_seid);
+ cmd->tag = CPU_TO_LE16(tag);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ if (!status) {
+ if (tags_used != NULL)
+ *tags_used = LE16_TO_CPU(resp->tags_used);
+ if (tags_free != NULL)
+ *tags_free = LE16_TO_CPU(resp->tags_free);
+ }
+
+ return status;
+}
+
+/**
+ * i40e_aq_add_mcast_etag - Add a multicast E-tag
+ * @hw: pointer to the hw struct
+ * @pv_seid: Port Virtualizer of this SEID to associate E-tag with
+ * @etag: value of E-tag to add
+ * @num_tags_in_buf: number of unicast E-tags in indirect buffer
+ * @buf: address of indirect buffer
+ * @tags_used: return value, number of E-tags in use by this port
+ * @tags_free: return value, number of unallocated M-tags
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * This associates a multicast E-tag to a port virtualizer. It will return
+ * the number of tags allocated by the PF, and the number of unallocated
+ * tags available.
+ *
+ * The indirect buffer pointed to by buf is a list of 2-byte E-tags,
+ * num_tags_in_buf long.
+ **/
+enum i40e_status_code i40e_aq_add_mcast_etag(struct i40e_hw *hw, u16 pv_seid,
+ u16 etag, u8 num_tags_in_buf, void *buf,
+ u16 *tags_used, u16 *tags_free,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_add_remove_mcast_etag *cmd =
+ (struct i40e_aqc_add_remove_mcast_etag *)&desc.params.raw;
+ struct i40e_aqc_add_remove_mcast_etag_completion *resp =
+ (struct i40e_aqc_add_remove_mcast_etag_completion *)&desc.params.raw;
+ enum i40e_status_code status;
+ u16 length = sizeof(u16) * num_tags_in_buf;
+
+ if ((pv_seid == 0) || (buf == NULL) || (num_tags_in_buf == 0))
+ return I40E_ERR_PARAM;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_add_multicast_etag);
+
+ cmd->pv_seid = CPU_TO_LE16(pv_seid);
+ cmd->etag = CPU_TO_LE16(etag);
+ cmd->num_unicast_etags = num_tags_in_buf;
+
+ desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+ if (length > I40E_AQ_LARGE_BUF)
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
+
+ status = i40e_asq_send_command(hw, &desc, buf, length, cmd_details);
+
+ if (!status) {
+ if (tags_used != NULL)
+ *tags_used = LE16_TO_CPU(resp->mcast_etags_used);
+ if (tags_free != NULL)
+ *tags_free = LE16_TO_CPU(resp->mcast_etags_free);
+ }
+
+ return status;
+}
+
+/**
+ * i40e_aq_remove_mcast_etag - Remove a multicast E-tag
+ * @hw: pointer to the hw struct
+ * @pv_seid: Port Virtualizer SEID this M-tag is associated with
+ * @etag: value of the E-tag to remove
+ * @tags_used: return value, number of tags in use by this port
+ * @tags_free: return value, number of unallocated tags
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * This deletes an E-tag from the port virtualizer. It will return
+ * the number of tags allocated by the port, and the number of unallocated
+ * tags available.
+ **/
+enum i40e_status_code i40e_aq_remove_mcast_etag(struct i40e_hw *hw, u16 pv_seid,
+ u16 etag, u16 *tags_used, u16 *tags_free,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_add_remove_mcast_etag *cmd =
+ (struct i40e_aqc_add_remove_mcast_etag *)&desc.params.raw;
+ struct i40e_aqc_add_remove_mcast_etag_completion *resp =
+ (struct i40e_aqc_add_remove_mcast_etag_completion *)&desc.params.raw;
+ enum i40e_status_code status;
+
+
+ if (pv_seid == 0)
+ return I40E_ERR_PARAM;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_remove_multicast_etag);
+
+ cmd->pv_seid = CPU_TO_LE16(pv_seid);
+ cmd->etag = CPU_TO_LE16(etag);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ if (!status) {
+ if (tags_used != NULL)
+ *tags_used = LE16_TO_CPU(resp->mcast_etags_used);
+ if (tags_free != NULL)
+ *tags_free = LE16_TO_CPU(resp->mcast_etags_free);
+ }
+
+ return status;
+}
+
+/**
+ * i40e_aq_update_tag - Update an S/E-tag
+ * @hw: pointer to the hw struct
+ * @vsi_seid: VSI SEID using this S-tag
+ * @old_tag: old tag value
+ * @new_tag: new tag value
+ * @tags_used: return value, number of tags in use by this PF
+ * @tags_free: return value, number of unallocated tags
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * This updates the value of the tag currently attached to this VSI
+ * in the switch complex. It will return the number of tags allocated
+ * by the PF, and the number of unallocated tags available.
+ **/
+enum i40e_status_code i40e_aq_update_tag(struct i40e_hw *hw, u16 vsi_seid,
+ u16 old_tag, u16 new_tag, u16 *tags_used,
+ u16 *tags_free,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_update_tag *cmd =
+ (struct i40e_aqc_update_tag *)&desc.params.raw;
+ struct i40e_aqc_update_tag_completion *resp =
+ (struct i40e_aqc_update_tag_completion *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ if (vsi_seid == 0)
+ return I40E_ERR_PARAM;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_update_tag);
+
+ cmd->seid = CPU_TO_LE16(vsi_seid);
+ cmd->old_tag = CPU_TO_LE16(old_tag);
+ cmd->new_tag = CPU_TO_LE16(new_tag);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ if (!status) {
+ if (tags_used != NULL)
+ *tags_used = LE16_TO_CPU(resp->tags_used);
+ if (tags_free != NULL)
+ *tags_free = LE16_TO_CPU(resp->tags_free);
+ }
+
+ return status;
+}
+
+/**
+ * i40e_aq_dcb_ignore_pfc - Ignore PFC for given TCs
+ * @hw: pointer to the hw struct
+ * @tcmap: TC map for request/release any ignore PFC condition
+ * @request: request or release ignore PFC condition
+ * @tcmap_ret: return TCs for which PFC is currently ignored
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * This sends out request/release to ignore PFC condition for a TC.
+ * It will return the TCs for which PFC is currently ignored.
+ **/
+enum i40e_status_code i40e_aq_dcb_ignore_pfc(struct i40e_hw *hw, u8 tcmap,
+ bool request, u8 *tcmap_ret,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_pfc_ignore *cmd_resp =
+ (struct i40e_aqc_pfc_ignore *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_dcb_ignore_pfc);
+
+ if (request)
+ cmd_resp->command_flags = I40E_AQC_PFC_IGNORE_SET;
+
+ cmd_resp->tc_bitmap = tcmap;
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ if (!status) {
+ if (tcmap_ret != NULL)
+ *tcmap_ret = cmd_resp->tc_bitmap;
+ }
+
+ return status;
+}
+
+/**
+ * i40e_aq_dcb_updated - DCB Updated Command
+ * @hw: pointer to the hw struct
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * When LLDP is handled in PF this command is used by the PF
+ * to notify EMP that a DCB setting is modified.
+ * When LLDP is handled in EMP this command is used by the PF
+ * to notify EMP whenever one of the following parameters get
+ * modified:
+ * - PFCLinkDelayAllowance in PRTDCB_GENC.PFCLDA
+ * - PCIRTT in PRTDCB_GENC.PCIRTT
+ * - Maximum Frame Size for non-FCoE TCs set by PRTDCB_TDPUC.MAX_TXFRAME.
+ * EMP will return when the shared RPB settings have been
+ * recomputed and modified. The retval field in the descriptor
+ * will be set to 0 when RPB is modified.
+ **/
+enum i40e_status_code i40e_aq_dcb_updated(struct i40e_hw *hw,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_dcb_updated);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_add_statistics - Add a statistics block to a VLAN in a switch.
+ * @hw: pointer to the hw struct
+ * @seid: defines the SEID of the switch for which the stats are requested
+ * @vlan_id: the VLAN ID for which the statistics are requested
+ * @stat_index: index of the statistics counters block assigned to this VLAN
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * XL710 supports 128 smonVlanStats counters.This command is used to
+ * allocate a set of smonVlanStats counters to a specific VLAN in a specific
+ * switch.
+ **/
+enum i40e_status_code i40e_aq_add_statistics(struct i40e_hw *hw, u16 seid,
+ u16 vlan_id, u16 *stat_index,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_add_remove_statistics *cmd_resp =
+ (struct i40e_aqc_add_remove_statistics *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ if ((seid == 0) || (stat_index == NULL))
+ return I40E_ERR_PARAM;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_statistics);
+
+ cmd_resp->seid = CPU_TO_LE16(seid);
+ cmd_resp->vlan = CPU_TO_LE16(vlan_id);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ if (!status)
+ *stat_index = LE16_TO_CPU(cmd_resp->stat_index);
+
+ return status;
+}
+
+/**
+ * i40e_aq_remove_statistics - Remove a statistics block to a VLAN in a switch.
+ * @hw: pointer to the hw struct
+ * @seid: defines the SEID of the switch for which the stats are requested
+ * @vlan_id: the VLAN ID for which the statistics are requested
+ * @stat_index: index of the statistics counters block assigned to this VLAN
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * XL710 supports 128 smonVlanStats counters.This command is used to
+ * deallocate a set of smonVlanStats counters to a specific VLAN in a specific
+ * switch.
+ **/
+enum i40e_status_code i40e_aq_remove_statistics(struct i40e_hw *hw, u16 seid,
+ u16 vlan_id, u16 stat_index,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_add_remove_statistics *cmd =
+ (struct i40e_aqc_add_remove_statistics *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ if (seid == 0)
+ return I40E_ERR_PARAM;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_remove_statistics);
+
+ cmd->seid = CPU_TO_LE16(seid);
+ cmd->vlan = CPU_TO_LE16(vlan_id);
+ cmd->stat_index = CPU_TO_LE16(stat_index);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_set_port_parameters - set physical port parameters.
+ * @hw: pointer to the hw struct
+ * @bad_frame_vsi: defines the VSI to which bad frames are forwarded
+ * @save_bad_pac: if set packets with errors are forwarded to the bad frames VSI
+ * @pad_short_pac: if set transmit packets smaller than 60 bytes are padded
+ * @double_vlan: if set double VLAN is enabled
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+enum i40e_status_code i40e_aq_set_port_parameters(struct i40e_hw *hw,
+ u16 bad_frame_vsi, bool save_bad_pac,
+ bool pad_short_pac, bool double_vlan,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aqc_set_port_parameters *cmd;
+ enum i40e_status_code status;
+ struct i40e_aq_desc desc;
+ u16 command_flags = 0;
+
+ cmd = (struct i40e_aqc_set_port_parameters *)&desc.params.raw;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_set_port_parameters);
+
+ cmd->bad_frame_vsi = CPU_TO_LE16(bad_frame_vsi);
+ if (save_bad_pac)
+ command_flags |= I40E_AQ_SET_P_PARAMS_SAVE_BAD_PACKETS;
+ if (pad_short_pac)
+ command_flags |= I40E_AQ_SET_P_PARAMS_PAD_SHORT_PACKETS;
+ if (double_vlan)
+ command_flags |= I40E_AQ_SET_P_PARAMS_DOUBLE_VLAN_ENA;
+ cmd->command_flags = CPU_TO_LE16(command_flags);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_tx_sched_cmd - generic Tx scheduler AQ command handler
+ * @hw: pointer to the hw struct
+ * @seid: seid for the physical port/switching component/vsi
+ * @buff: Indirect buffer to hold data parameters and response
+ * @buff_size: Indirect buffer size
+ * @opcode: Tx scheduler AQ command opcode
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Generic command handler for Tx scheduler AQ commands
+ **/
+static enum i40e_status_code i40e_aq_tx_sched_cmd(struct i40e_hw *hw, u16 seid,
+ void *buff, u16 buff_size,
+ enum i40e_admin_queue_opc opcode,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_tx_sched_ind *cmd =
+ (struct i40e_aqc_tx_sched_ind *)&desc.params.raw;
+ enum i40e_status_code status;
+ bool cmd_param_flag = FALSE;
+
+ switch (opcode) {
+ case i40e_aqc_opc_configure_vsi_ets_sla_bw_limit:
+ case i40e_aqc_opc_configure_vsi_tc_bw:
+ case i40e_aqc_opc_enable_switching_comp_ets:
+ case i40e_aqc_opc_modify_switching_comp_ets:
+ case i40e_aqc_opc_disable_switching_comp_ets:
+ case i40e_aqc_opc_configure_switching_comp_ets_bw_limit:
+ case i40e_aqc_opc_configure_switching_comp_bw_config:
+ cmd_param_flag = TRUE;
+ break;
+ case i40e_aqc_opc_query_vsi_bw_config:
+ case i40e_aqc_opc_query_vsi_ets_sla_config:
+ case i40e_aqc_opc_query_switching_comp_ets_config:
+ case i40e_aqc_opc_query_port_ets_config:
+ case i40e_aqc_opc_query_switching_comp_bw_config:
+ cmd_param_flag = FALSE;
+ break;
+ default:
+ return I40E_ERR_PARAM;
+ }
+
+ i40e_fill_default_direct_cmd_desc(&desc, opcode);
+
+ /* Indirect command */
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF);
+ if (cmd_param_flag)
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_RD);
+ if (buff_size > I40E_AQ_LARGE_BUF)
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
+
+ desc.datalen = CPU_TO_LE16(buff_size);
+
+ cmd->vsi_seid = CPU_TO_LE16(seid);
+
+ status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_config_vsi_bw_limit - Configure VSI BW Limit
+ * @hw: pointer to the hw struct
+ * @seid: VSI seid
+ * @credit: BW limit credits (0 = disabled)
+ * @max_credit: Max BW limit credits
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+enum i40e_status_code i40e_aq_config_vsi_bw_limit(struct i40e_hw *hw,
+ u16 seid, u16 credit, u8 max_credit,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_configure_vsi_bw_limit *cmd =
+ (struct i40e_aqc_configure_vsi_bw_limit *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_configure_vsi_bw_limit);
+
+ cmd->vsi_seid = CPU_TO_LE16(seid);
+ cmd->credit = CPU_TO_LE16(credit);
+ cmd->max_credit = max_credit;
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_config_switch_comp_bw_limit - Configure Switching component BW Limit
+ * @hw: pointer to the hw struct
+ * @seid: switching component seid
+ * @credit: BW limit credits (0 = disabled)
+ * @max_bw: Max BW limit credits
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+enum i40e_status_code i40e_aq_config_switch_comp_bw_limit(struct i40e_hw *hw,
+ u16 seid, u16 credit, u8 max_bw,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_configure_switching_comp_bw_limit *cmd =
+ (struct i40e_aqc_configure_switching_comp_bw_limit *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_configure_switching_comp_bw_limit);
+
+ cmd->seid = CPU_TO_LE16(seid);
+ cmd->credit = CPU_TO_LE16(credit);
+ cmd->max_bw = max_bw;
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_config_vsi_ets_sla_bw_limit - Config VSI BW Limit per TC
+ * @hw: pointer to the hw struct
+ * @seid: VSI seid
+ * @bw_data: Buffer holding enabled TCs, per TC BW limit/credits
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+enum i40e_status_code i40e_aq_config_vsi_ets_sla_bw_limit(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_configure_vsi_ets_sla_bw_data *bw_data,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
+ i40e_aqc_opc_configure_vsi_ets_sla_bw_limit,
+ cmd_details);
+}
+
+/**
+ * i40e_aq_config_vsi_tc_bw - Config VSI BW Allocation per TC
+ * @hw: pointer to the hw struct
+ * @seid: VSI seid
+ * @bw_data: Buffer holding enabled TCs, relative TC BW limit/credits
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+enum i40e_status_code i40e_aq_config_vsi_tc_bw(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_configure_vsi_tc_bw_data *bw_data,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
+ i40e_aqc_opc_configure_vsi_tc_bw,
+ cmd_details);
+}
+
+/**
+ * i40e_aq_config_switch_comp_ets_bw_limit - Config Switch comp BW Limit per TC
+ * @hw: pointer to the hw struct
+ * @seid: seid of the switching component
+ * @bw_data: Buffer holding enabled TCs, per TC BW limit/credits
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+enum i40e_status_code i40e_aq_config_switch_comp_ets_bw_limit(
+ struct i40e_hw *hw, u16 seid,
+ struct i40e_aqc_configure_switching_comp_ets_bw_limit_data *bw_data,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
+ i40e_aqc_opc_configure_switching_comp_ets_bw_limit,
+ cmd_details);
+}
+
+/**
+ * i40e_aq_query_vsi_bw_config - Query VSI BW configuration
+ * @hw: pointer to the hw struct
+ * @seid: seid of the VSI
+ * @bw_data: Buffer to hold VSI BW configuration
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+enum i40e_status_code i40e_aq_query_vsi_bw_config(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_query_vsi_bw_config_resp *bw_data,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
+ i40e_aqc_opc_query_vsi_bw_config,
+ cmd_details);
+}
+
+/**
+ * i40e_aq_query_vsi_ets_sla_config - Query VSI BW configuration per TC
+ * @hw: pointer to the hw struct
+ * @seid: seid of the VSI
+ * @bw_data: Buffer to hold VSI BW configuration per TC
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+enum i40e_status_code i40e_aq_query_vsi_ets_sla_config(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_query_vsi_ets_sla_config_resp *bw_data,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
+ i40e_aqc_opc_query_vsi_ets_sla_config,
+ cmd_details);
+}
+
+/**
+ * i40e_aq_query_switch_comp_ets_config - Query Switch comp BW config per TC
+ * @hw: pointer to the hw struct
+ * @seid: seid of the switching component
+ * @bw_data: Buffer to hold switching component's per TC BW config
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+enum i40e_status_code i40e_aq_query_switch_comp_ets_config(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_query_switching_comp_ets_config_resp *bw_data,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
+ i40e_aqc_opc_query_switching_comp_ets_config,
+ cmd_details);
+}
+
+/**
+ * i40e_aq_query_port_ets_config - Query Physical Port ETS configuration
+ * @hw: pointer to the hw struct
+ * @seid: seid of the VSI or switching component connected to Physical Port
+ * @bw_data: Buffer to hold current ETS configuration for the Physical Port
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+enum i40e_status_code i40e_aq_query_port_ets_config(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_query_port_ets_config_resp *bw_data,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
+ i40e_aqc_opc_query_port_ets_config,
+ cmd_details);
+}
+
+/**
+ * i40e_aq_query_switch_comp_bw_config - Query Switch comp BW configuration
+ * @hw: pointer to the hw struct
+ * @seid: seid of the switching component
+ * @bw_data: Buffer to hold switching component's BW configuration
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+enum i40e_status_code i40e_aq_query_switch_comp_bw_config(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_query_switching_comp_bw_config_resp *bw_data,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
+ i40e_aqc_opc_query_switching_comp_bw_config,
+ cmd_details);
+}
+
+/**
+ * i40e_validate_filter_settings
+ * @hw: pointer to the hardware structure
+ * @settings: Filter control settings
+ *
+ * Check and validate the filter control settings passed.
+ * The function checks for the valid filter/context sizes being
+ * passed for FCoE and PE.
+ *
+ * Returns I40E_SUCCESS if the values passed are valid and within
+ * range else returns an error.
+ **/
+static enum i40e_status_code i40e_validate_filter_settings(struct i40e_hw *hw,
+ struct i40e_filter_control_settings *settings)
+{
+ u32 fcoe_cntx_size, fcoe_filt_size;
+ u32 pe_cntx_size, pe_filt_size;
+ u32 fcoe_fmax;
+
+ u32 val;
+
+ /* Validate FCoE settings passed */
+ switch (settings->fcoe_filt_num) {
+ case I40E_HASH_FILTER_SIZE_1K:
+ case I40E_HASH_FILTER_SIZE_2K:
+ case I40E_HASH_FILTER_SIZE_4K:
+ case I40E_HASH_FILTER_SIZE_8K:
+ case I40E_HASH_FILTER_SIZE_16K:
+ case I40E_HASH_FILTER_SIZE_32K:
+ fcoe_filt_size = I40E_HASH_FILTER_BASE_SIZE;
+ fcoe_filt_size <<= (u32)settings->fcoe_filt_num;
+ break;
+ default:
+ return I40E_ERR_PARAM;
+ }
+
+ switch (settings->fcoe_cntx_num) {
+ case I40E_DMA_CNTX_SIZE_512:
+ case I40E_DMA_CNTX_SIZE_1K:
+ case I40E_DMA_CNTX_SIZE_2K:
+ case I40E_DMA_CNTX_SIZE_4K:
+ fcoe_cntx_size = I40E_DMA_CNTX_BASE_SIZE;
+ fcoe_cntx_size <<= (u32)settings->fcoe_cntx_num;
+ break;
+ default:
+ return I40E_ERR_PARAM;
+ }
+
+ /* Validate PE settings passed */
+ switch (settings->pe_filt_num) {
+ case I40E_HASH_FILTER_SIZE_1K:
+ case I40E_HASH_FILTER_SIZE_2K:
+ case I40E_HASH_FILTER_SIZE_4K:
+ case I40E_HASH_FILTER_SIZE_8K:
+ case I40E_HASH_FILTER_SIZE_16K:
+ case I40E_HASH_FILTER_SIZE_32K:
+ case I40E_HASH_FILTER_SIZE_64K:
+ case I40E_HASH_FILTER_SIZE_128K:
+ case I40E_HASH_FILTER_SIZE_256K:
+ case I40E_HASH_FILTER_SIZE_512K:
+ case I40E_HASH_FILTER_SIZE_1M:
+ pe_filt_size = I40E_HASH_FILTER_BASE_SIZE;
+ pe_filt_size <<= (u32)settings->pe_filt_num;
+ break;
+ default:
+ return I40E_ERR_PARAM;
+ }
+
+ switch (settings->pe_cntx_num) {
+ case I40E_DMA_CNTX_SIZE_512:
+ case I40E_DMA_CNTX_SIZE_1K:
+ case I40E_DMA_CNTX_SIZE_2K:
+ case I40E_DMA_CNTX_SIZE_4K:
+ case I40E_DMA_CNTX_SIZE_8K:
+ case I40E_DMA_CNTX_SIZE_16K:
+ case I40E_DMA_CNTX_SIZE_32K:
+ case I40E_DMA_CNTX_SIZE_64K:
+ case I40E_DMA_CNTX_SIZE_128K:
+ case I40E_DMA_CNTX_SIZE_256K:
+ pe_cntx_size = I40E_DMA_CNTX_BASE_SIZE;
+ pe_cntx_size <<= (u32)settings->pe_cntx_num;
+ break;
+ default:
+ return I40E_ERR_PARAM;
+ }
+
+ /* FCHSIZE + FCDSIZE should not be greater than PMFCOEFMAX */
+ val = rd32(hw, I40E_GLHMC_FCOEFMAX);
+ fcoe_fmax = (val & I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_MASK)
+ >> I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_SHIFT;
+ if (fcoe_filt_size + fcoe_cntx_size > fcoe_fmax)
+ return I40E_ERR_INVALID_SIZE;
+
+ return I40E_SUCCESS;
+}
+
+/**
+ * i40e_set_filter_control
+ * @hw: pointer to the hardware structure
+ * @settings: Filter control settings
+ *
+ * Set the Queue Filters for PE/FCoE and enable filters required
+ * for a single PF. It is expected that these settings are programmed
+ * at the driver initialization time.
+ **/
+enum i40e_status_code i40e_set_filter_control(struct i40e_hw *hw,
+ struct i40e_filter_control_settings *settings)
+{
+ enum i40e_status_code ret = I40E_SUCCESS;
+ u32 hash_lut_size = 0;
+ u32 val;
+
+ if (!settings)
+ return I40E_ERR_PARAM;
+
+ /* Validate the input settings */
+ ret = i40e_validate_filter_settings(hw, settings);
+ if (ret)
+ return ret;
+
+ /* Read the PF Queue Filter control register */
+ val = rd32(hw, I40E_PFQF_CTL_0);
+
+ /* Program required PE hash buckets for the PF */
+ val &= ~I40E_PFQF_CTL_0_PEHSIZE_MASK;
+ val |= ((u32)settings->pe_filt_num << I40E_PFQF_CTL_0_PEHSIZE_SHIFT) &
+ I40E_PFQF_CTL_0_PEHSIZE_MASK;
+ /* Program required PE contexts for the PF */
+ val &= ~I40E_PFQF_CTL_0_PEDSIZE_MASK;
+ val |= ((u32)settings->pe_cntx_num << I40E_PFQF_CTL_0_PEDSIZE_SHIFT) &
+ I40E_PFQF_CTL_0_PEDSIZE_MASK;
+
+ /* Program required FCoE hash buckets for the PF */
+ val &= ~I40E_PFQF_CTL_0_PFFCHSIZE_MASK;
+ val |= ((u32)settings->fcoe_filt_num <<
+ I40E_PFQF_CTL_0_PFFCHSIZE_SHIFT) &
+ I40E_PFQF_CTL_0_PFFCHSIZE_MASK;
+ /* Program required FCoE DDP contexts for the PF */
+ val &= ~I40E_PFQF_CTL_0_PFFCDSIZE_MASK;
+ val |= ((u32)settings->fcoe_cntx_num <<
+ I40E_PFQF_CTL_0_PFFCDSIZE_SHIFT) &
+ I40E_PFQF_CTL_0_PFFCDSIZE_MASK;
+
+ /* Program Hash LUT size for the PF */
+ val &= ~I40E_PFQF_CTL_0_HASHLUTSIZE_MASK;
+ if (settings->hash_lut_size == I40E_HASH_LUT_SIZE_512)
+ hash_lut_size = 1;
+ val |= (hash_lut_size << I40E_PFQF_CTL_0_HASHLUTSIZE_SHIFT) &
+ I40E_PFQF_CTL_0_HASHLUTSIZE_MASK;
+
+ /* Enable FDIR, Ethertype and MACVLAN filters for PF and VFs */
+ if (settings->enable_fdir)
+ val |= I40E_PFQF_CTL_0_FD_ENA_MASK;
+ if (settings->enable_ethtype)
+ val |= I40E_PFQF_CTL_0_ETYPE_ENA_MASK;
+ if (settings->enable_macvlan)
+ val |= I40E_PFQF_CTL_0_MACVLAN_ENA_MASK;
+
+ wr32(hw, I40E_PFQF_CTL_0, val);
+
+ return I40E_SUCCESS;
+}
+
+/**
+ * i40e_aq_add_rem_control_packet_filter - Add or Remove Control Packet Filter
+ * @hw: pointer to the hw struct
+ * @mac_addr: MAC address to use in the filter
+ * @ethtype: Ethertype to use in the filter
+ * @flags: Flags that needs to be applied to the filter
+ * @vsi_seid: seid of the control VSI
+ * @queue: VSI queue number to send the packet to
+ * @is_add: Add control packet filter if True else remove
+ * @stats: Structure to hold information on control filter counts
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * This command will Add or Remove control packet filter for a control VSI.
+ * In return it will update the total number of perfect filter count in
+ * the stats member.
+ **/
+enum i40e_status_code i40e_aq_add_rem_control_packet_filter(struct i40e_hw *hw,
+ u8 *mac_addr, u16 ethtype, u16 flags,
+ u16 vsi_seid, u16 queue, bool is_add,
+ struct i40e_control_filter_stats *stats,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_add_remove_control_packet_filter *cmd =
+ (struct i40e_aqc_add_remove_control_packet_filter *)
+ &desc.params.raw;
+ struct i40e_aqc_add_remove_control_packet_filter_completion *resp =
+ (struct i40e_aqc_add_remove_control_packet_filter_completion *)
+ &desc.params.raw;
+ enum i40e_status_code status;
+
+ if (vsi_seid == 0)
+ return I40E_ERR_PARAM;
+
+ if (is_add) {
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_add_control_packet_filter);
+ cmd->queue = CPU_TO_LE16(queue);
+ } else {
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_remove_control_packet_filter);
+ }
+
+ if (mac_addr)
+ i40e_memcpy(cmd->mac, mac_addr, I40E_ETH_LENGTH_OF_ADDRESS,
+ I40E_NONDMA_TO_NONDMA);
+
+ cmd->etype = CPU_TO_LE16(ethtype);
+ cmd->flags = CPU_TO_LE16(flags);
+ cmd->seid = CPU_TO_LE16(vsi_seid);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ if (!status && stats) {
+ stats->mac_etype_used = LE16_TO_CPU(resp->mac_etype_used);
+ stats->etype_used = LE16_TO_CPU(resp->etype_used);
+ stats->mac_etype_free = LE16_TO_CPU(resp->mac_etype_free);
+ stats->etype_free = LE16_TO_CPU(resp->etype_free);
+ }
+
+ return status;
+}
+
+/**
+ * i40e_aq_add_cloud_filters
+ * @hw: pointer to the hardware structure
+ * @seid: VSI seid to add cloud filters from
+ * @filters: Buffer which contains the filters to be added
+ * @filter_count: number of filters contained in the buffer
+ *
+ * Set the cloud filters for a given VSI. The contents of the
+ * i40e_aqc_add_remove_cloud_filters_element_data are filled
+ * in by the caller of the function.
+ *
+ **/
+enum i40e_status_code i40e_aq_add_cloud_filters(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_add_remove_cloud_filters_element_data *filters,
+ u8 filter_count)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_add_remove_cloud_filters *cmd =
+ (struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw;
+ u16 buff_len;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_add_cloud_filters);
+
+ buff_len = sizeof(struct i40e_aqc_add_remove_cloud_filters_element_data) *
+ filter_count;
+ desc.datalen = CPU_TO_LE16(buff_len);
+ desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+ cmd->num_filters = filter_count;
+ cmd->seid = CPU_TO_LE16(seid);
+
+ status = i40e_asq_send_command(hw, &desc, filters, buff_len, NULL);
+
+ return status;
+}
+
+/**
+ * i40e_aq_remove_cloud_filters
+ * @hw: pointer to the hardware structure
+ * @seid: VSI seid to remove cloud filters from
+ * @filters: Buffer which contains the filters to be removed
+ * @filter_count: number of filters contained in the buffer
+ *
+ * Remove the cloud filters for a given VSI. The contents of the
+ * i40e_aqc_add_remove_cloud_filters_element_data are filled
+ * in by the caller of the function.
+ *
+ **/
+enum i40e_status_code i40e_aq_remove_cloud_filters(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_add_remove_cloud_filters_element_data *filters,
+ u8 filter_count)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_add_remove_cloud_filters *cmd =
+ (struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw;
+ enum i40e_status_code status;
+ u16 buff_len;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_remove_cloud_filters);
+
+ buff_len = sizeof(struct i40e_aqc_add_remove_cloud_filters_element_data) *
+ filter_count;
+ desc.datalen = CPU_TO_LE16(buff_len);
+ desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+ cmd->num_filters = filter_count;
+ cmd->seid = CPU_TO_LE16(seid);
+
+ status = i40e_asq_send_command(hw, &desc, filters, buff_len, NULL);
+
+ return status;
+}
+
+/**
+ * i40e_aq_alternate_write
+ * @hw: pointer to the hardware structure
+ * @reg_addr0: address of first dword to be read
+ * @reg_val0: value to be written under 'reg_addr0'
+ * @reg_addr1: address of second dword to be read
+ * @reg_val1: value to be written under 'reg_addr1'
+ *
+ * Write one or two dwords to alternate structure. Fields are indicated
+ * by 'reg_addr0' and 'reg_addr1' register numbers.
+ *
+ **/
+enum i40e_status_code i40e_aq_alternate_write(struct i40e_hw *hw,
+ u32 reg_addr0, u32 reg_val0,
+ u32 reg_addr1, u32 reg_val1)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_alternate_write *cmd_resp =
+ (struct i40e_aqc_alternate_write *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_alternate_write);
+ cmd_resp->address0 = CPU_TO_LE32(reg_addr0);
+ cmd_resp->address1 = CPU_TO_LE32(reg_addr1);
+ cmd_resp->data0 = CPU_TO_LE32(reg_val0);
+ cmd_resp->data1 = CPU_TO_LE32(reg_val1);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL);
+
+ return status;
+}
+
+/**
+ * i40e_aq_alternate_write_indirect
+ * @hw: pointer to the hardware structure
+ * @addr: address of a first register to be modified
+ * @dw_count: number of alternate structure fields to write
+ * @buffer: pointer to the command buffer
+ *
+ * Write 'dw_count' dwords from 'buffer' to alternate structure
+ * starting at 'addr'.
+ *
+ **/
+enum i40e_status_code i40e_aq_alternate_write_indirect(struct i40e_hw *hw,
+ u32 addr, u32 dw_count, void *buffer)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_alternate_ind_write *cmd_resp =
+ (struct i40e_aqc_alternate_ind_write *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ if (buffer == NULL)
+ return I40E_ERR_PARAM;
+
+ /* Indirect command */
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_alternate_write_indirect);
+
+ desc.flags |= CPU_TO_LE16(I40E_AQ_FLAG_RD);
+ desc.flags |= CPU_TO_LE16(I40E_AQ_FLAG_BUF);
+ if (dw_count > (I40E_AQ_LARGE_BUF/4))
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
+
+ cmd_resp->address = CPU_TO_LE32(addr);
+ cmd_resp->length = CPU_TO_LE32(dw_count);
+ cmd_resp->addr_high = CPU_TO_LE32(I40E_HI_WORD((u64)buffer));
+ cmd_resp->addr_low = CPU_TO_LE32(I40E_LO_DWORD((u64)buffer));
+
+ status = i40e_asq_send_command(hw, &desc, buffer,
+ I40E_LO_DWORD(4*dw_count), NULL);
+
+ return status;
+}
+
+/**
+ * i40e_aq_alternate_read
+ * @hw: pointer to the hardware structure
+ * @reg_addr0: address of first dword to be read
+ * @reg_val0: pointer for data read from 'reg_addr0'
+ * @reg_addr1: address of second dword to be read
+ * @reg_val1: pointer for data read from 'reg_addr1'
+ *
+ * Read one or two dwords from alternate structure. Fields are indicated
+ * by 'reg_addr0' and 'reg_addr1' register numbers. If 'reg_val1' pointer
+ * is not passed then only register at 'reg_addr0' is read.
+ *
+ **/
+enum i40e_status_code i40e_aq_alternate_read(struct i40e_hw *hw,
+ u32 reg_addr0, u32 *reg_val0,
+ u32 reg_addr1, u32 *reg_val1)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_alternate_write *cmd_resp =
+ (struct i40e_aqc_alternate_write *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ if (reg_val0 == NULL)
+ return I40E_ERR_PARAM;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_alternate_read);
+ cmd_resp->address0 = CPU_TO_LE32(reg_addr0);
+ cmd_resp->address1 = CPU_TO_LE32(reg_addr1);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL);
+
+ if (status == I40E_SUCCESS) {
+ *reg_val0 = LE32_TO_CPU(cmd_resp->data0);
+
+ if (reg_val1 != NULL)
+ *reg_val1 = LE32_TO_CPU(cmd_resp->data1);
+ }
+
+ return status;
+}
+
+/**
+ * i40e_aq_alternate_read_indirect
+ * @hw: pointer to the hardware structure
+ * @addr: address of the alternate structure field
+ * @dw_count: number of alternate structure fields to read
+ * @buffer: pointer to the command buffer
+ *
+ * Read 'dw_count' dwords from alternate structure starting at 'addr' and
+ * place them in 'buffer'. The buffer should be allocated by caller.
+ *
+ **/
+enum i40e_status_code i40e_aq_alternate_read_indirect(struct i40e_hw *hw,
+ u32 addr, u32 dw_count, void *buffer)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_alternate_ind_write *cmd_resp =
+ (struct i40e_aqc_alternate_ind_write *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ if (buffer == NULL)
+ return I40E_ERR_PARAM;
+
+ /* Indirect command */
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_alternate_read_indirect);
+
+ desc.flags |= CPU_TO_LE16(I40E_AQ_FLAG_RD);
+ desc.flags |= CPU_TO_LE16(I40E_AQ_FLAG_BUF);
+ if (dw_count > (I40E_AQ_LARGE_BUF/4))
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
+
+ cmd_resp->address = CPU_TO_LE32(addr);
+ cmd_resp->length = CPU_TO_LE32(dw_count);
+ cmd_resp->addr_high = CPU_TO_LE32(I40E_HI_DWORD((u64)buffer));
+ cmd_resp->addr_low = CPU_TO_LE32(I40E_LO_DWORD((u64)buffer));
+
+ status = i40e_asq_send_command(hw, &desc, buffer,
+ I40E_LO_DWORD(4*dw_count), NULL);
+
+ return status;
+}
+
+/**
+ * i40e_aq_alternate_clear
+ * @hw: pointer to the HW structure.
+ *
+ * Clear the alternate structures of the port from which the function
+ * is called.
+ *
+ **/
+enum i40e_status_code i40e_aq_alternate_clear(struct i40e_hw *hw)
+{
+ struct i40e_aq_desc desc;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_alternate_clear_port);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL);
+
+ return status;
+}
+
+/**
+ * i40e_aq_alternate_write_done
+ * @hw: pointer to the HW structure.
+ * @bios_mode: indicates whether the command is executed by UEFI or legacy BIOS
+ * @reset_needed: indicates the SW should trigger GLOBAL reset
+ *
+ * Indicates to the FW that alternate structures have been changed.
+ *
+ **/
+enum i40e_status_code i40e_aq_alternate_write_done(struct i40e_hw *hw,
+ u8 bios_mode, bool *reset_needed)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_alternate_write_done *cmd =
+ (struct i40e_aqc_alternate_write_done *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ if (reset_needed == NULL)
+ return I40E_ERR_PARAM;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_alternate_write_done);
+
+ cmd->cmd_flags = CPU_TO_LE16(bios_mode);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL);
+ if (!status)
+ *reset_needed = ((LE16_TO_CPU(cmd->cmd_flags) &
+ I40E_AQ_ALTERNATE_RESET_NEEDED) != 0);
+
+ return status;
+}
+
+/**
+ * i40e_aq_set_oem_mode
+ * @hw: pointer to the HW structure.
+ * @oem_mode: the OEM mode to be used
+ *
+ * Sets the device to a specific operating mode. Currently the only supported
+ * mode is no_clp, which causes FW to refrain from using Alternate RAM.
+ *
+ **/
+enum i40e_status_code i40e_aq_set_oem_mode(struct i40e_hw *hw,
+ u8 oem_mode)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_alternate_write_done *cmd =
+ (struct i40e_aqc_alternate_write_done *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_alternate_set_mode);
+
+ cmd->cmd_flags = CPU_TO_LE16(oem_mode);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL);
+
+ return status;
+}
+
+/**
+ * i40e_aq_resume_port_tx
+ * @hw: pointer to the hardware structure
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Resume port's Tx traffic
+ **/
+enum i40e_status_code i40e_aq_resume_port_tx(struct i40e_hw *hw,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_resume_port_tx);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_set_pci_config_data - store PCI bus info
+ * @hw: pointer to hardware structure
+ * @link_status: the link status word from PCI config space
+ *
+ * Stores the PCI bus info (speed, width, type) within the i40e_hw structure
+ **/
+void i40e_set_pci_config_data(struct i40e_hw *hw, u16 link_status)
+{
+ hw->bus.type = i40e_bus_type_pci_express;
+
+ switch (link_status & I40E_PCI_LINK_WIDTH) {
+ case I40E_PCI_LINK_WIDTH_1:
+ hw->bus.width = i40e_bus_width_pcie_x1;
+ break;
+ case I40E_PCI_LINK_WIDTH_2:
+ hw->bus.width = i40e_bus_width_pcie_x2;
+ break;
+ case I40E_PCI_LINK_WIDTH_4:
+ hw->bus.width = i40e_bus_width_pcie_x4;
+ break;
+ case I40E_PCI_LINK_WIDTH_8:
+ hw->bus.width = i40e_bus_width_pcie_x8;
+ break;
+ default:
+ hw->bus.width = i40e_bus_width_unknown;
+ break;
+ }
+
+ switch (link_status & I40E_PCI_LINK_SPEED) {
+ case I40E_PCI_LINK_SPEED_2500:
+ hw->bus.speed = i40e_bus_speed_2500;
+ break;
+ case I40E_PCI_LINK_SPEED_5000:
+ hw->bus.speed = i40e_bus_speed_5000;
+ break;
+ case I40E_PCI_LINK_SPEED_8000:
+ hw->bus.speed = i40e_bus_speed_8000;
+ break;
+ default:
+ hw->bus.speed = i40e_bus_speed_unknown;
+ break;
+ }
+}
+
+/**
+ * i40e_read_bw_from_alt_ram
+ * @hw: pointer to the hardware structure
+ * @max_bw: pointer for max_bw read
+ * @min_bw: pointer for min_bw read
+ * @min_valid: pointer for bool that is TRUE if min_bw is a valid value
+ * @max_valid: pointer for bool that is TRUE if max_bw is a valid value
+ *
+ * Read bw from the alternate ram for the given pf
+ **/
+enum i40e_status_code i40e_read_bw_from_alt_ram(struct i40e_hw *hw,
+ u32 *max_bw, u32 *min_bw,
+ bool *min_valid, bool *max_valid)
+{
+ enum i40e_status_code status;
+ u32 max_bw_addr, min_bw_addr;
+
+ /* Calculate the address of the min/max bw registers */
+ max_bw_addr = I40E_ALT_STRUCT_FIRST_PF_OFFSET +
+ I40E_ALT_STRUCT_MAX_BW_OFFSET +
+ (I40E_ALT_STRUCT_DWORDS_PER_PF*hw->pf_id);
+ min_bw_addr = I40E_ALT_STRUCT_FIRST_PF_OFFSET +
+ I40E_ALT_STRUCT_MIN_BW_OFFSET +
+ (I40E_ALT_STRUCT_DWORDS_PER_PF*hw->pf_id);
+
+ /* Read the bandwidths from alt ram */
+ status = i40e_aq_alternate_read(hw, max_bw_addr, max_bw,
+ min_bw_addr, min_bw);
+
+ if (*min_bw & I40E_ALT_BW_VALID_MASK)
+ *min_valid = TRUE;
+ else
+ *min_valid = FALSE;
+
+ if (*max_bw & I40E_ALT_BW_VALID_MASK)
+ *max_valid = TRUE;
+ else
+ *max_valid = FALSE;
+
+ return status;
+}
+
+/**
+ * i40e_aq_configure_partition_bw
+ * @hw: pointer to the hardware structure
+ * @bw_data: Buffer holding valid pfs and bw limits
+ * @cmd_details: pointer to command details
+ *
+ * Configure partitions guaranteed/max bw
+ **/
+enum i40e_status_code i40e_aq_configure_partition_bw(struct i40e_hw *hw,
+ struct i40e_aqc_configure_partition_bw_data *bw_data,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ enum i40e_status_code status;
+ struct i40e_aq_desc desc;
+ u16 bwd_size = sizeof(struct i40e_aqc_configure_partition_bw_data);
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_configure_partition_bw);
+
+ /* Indirect command */
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF);
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_RD);
+
+ if (bwd_size > I40E_AQ_LARGE_BUF)
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
+
+ desc.datalen = CPU_TO_LE16(bwd_size);
+
+ status = i40e_asq_send_command(hw, &desc, bw_data, bwd_size, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_send_msg_to_pf
+ * @hw: pointer to the hardware structure
+ * @v_opcode: opcodes for VF-PF communication
+ * @v_retval: return error code
+ * @msg: pointer to the msg buffer
+ * @msglen: msg length
+ * @cmd_details: pointer to command details
+ *
+ * Send message to PF driver using admin queue. By default, this message
+ * is sent asynchronously, i.e. i40e_asq_send_command() does not wait for
+ * completion before returning.
+ **/
+enum i40e_status_code i40e_aq_send_msg_to_pf(struct i40e_hw *hw,
+ enum i40e_virtchnl_ops v_opcode,
+ enum i40e_status_code v_retval,
+ u8 *msg, u16 msglen,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_asq_cmd_details details;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_send_msg_to_pf);
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_SI);
+ desc.cookie_high = CPU_TO_LE32(v_opcode);
+ desc.cookie_low = CPU_TO_LE32(v_retval);
+ if (msglen) {
+ desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF
+ | I40E_AQ_FLAG_RD));
+ if (msglen > I40E_AQ_LARGE_BUF)
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
+ desc.datalen = CPU_TO_LE16(msglen);
+ }
+ if (!cmd_details) {
+ i40e_memset(&details, 0, sizeof(details), I40E_NONDMA_MEM);
+ details.async = TRUE;
+ cmd_details = &details;
+ }
+ status = i40e_asq_send_command(hw, (struct i40e_aq_desc *)&desc, msg,
+ msglen, cmd_details);
+ return status;
+}
+
+/**
+ * i40e_vf_parse_hw_config
+ * @hw: pointer to the hardware structure
+ * @msg: pointer to the virtual channel VF resource structure
+ *
+ * Given a VF resource message from the PF, populate the hw struct
+ * with appropriate information.
+ **/
+void i40e_vf_parse_hw_config(struct i40e_hw *hw,
+ struct i40e_virtchnl_vf_resource *msg)
+{
+ struct i40e_virtchnl_vsi_resource *vsi_res;
+ int i;
+
+ vsi_res = &msg->vsi_res[0];
+
+ hw->dev_caps.num_vsis = msg->num_vsis;
+ hw->dev_caps.num_rx_qp = msg->num_queue_pairs;
+ hw->dev_caps.num_tx_qp = msg->num_queue_pairs;
+ hw->dev_caps.num_msix_vectors_vf = msg->max_vectors;
+ hw->dev_caps.dcb = msg->vf_offload_flags &
+ I40E_VIRTCHNL_VF_OFFLOAD_L2;
+ hw->dev_caps.fcoe = (msg->vf_offload_flags &
+ I40E_VIRTCHNL_VF_OFFLOAD_FCOE) ? 1 : 0;
+ hw->dev_caps.iwarp = (msg->vf_offload_flags &
+ I40E_VIRTCHNL_VF_OFFLOAD_IWARP) ? 1 : 0;
+ for (i = 0; i < msg->num_vsis; i++) {
+ if (vsi_res->vsi_type == I40E_VSI_SRIOV) {
+ i40e_memcpy(hw->mac.perm_addr,
+ vsi_res->default_mac_addr,
+ I40E_ETH_LENGTH_OF_ADDRESS,
+ I40E_NONDMA_TO_NONDMA);
+ i40e_memcpy(hw->mac.addr, vsi_res->default_mac_addr,
+ I40E_ETH_LENGTH_OF_ADDRESS,
+ I40E_NONDMA_TO_NONDMA);
+ }
+ vsi_res++;
+ }
+}
+
+/**
+ * i40e_vf_reset
+ * @hw: pointer to the hardware structure
+ *
+ * Send a VF_RESET message to the PF. Does not wait for response from PF
+ * as none will be forthcoming. Immediately after calling this function,
+ * the admin queue should be shut down and (optionally) reinitialized.
+ **/
+enum i40e_status_code i40e_vf_reset(struct i40e_hw *hw)
+{
+ return i40e_aq_send_msg_to_pf(hw, I40E_VIRTCHNL_OP_RESET_VF,
+ I40E_SUCCESS, NULL, 0, NULL);
+}
diff --git a/sys/dev/ixl/i40e_hmc.c b/sys/dev/ixl/i40e_hmc.c
new file mode 100755
index 0000000..81cb2ae
--- /dev/null
+++ b/sys/dev/ixl/i40e_hmc.c
@@ -0,0 +1,376 @@
+/******************************************************************************
+
+ Copyright (c) 2013-2014, Intel Corporation
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ POSSIBILITY OF SUCH DAMAGE.
+
+******************************************************************************/
+/*$FreeBSD$*/
+
+#include "i40e_osdep.h"
+#include "i40e_register.h"
+#include "i40e_status.h"
+#include "i40e_alloc.h"
+#include "i40e_hmc.h"
+#ifndef I40E_NO_TYPE_HEADER
+#include "i40e_type.h"
+#endif
+
+/**
+ * i40e_add_sd_table_entry - Adds a segment descriptor to the table
+ * @hw: pointer to our hw struct
+ * @hmc_info: pointer to the HMC configuration information struct
+ * @sd_index: segment descriptor index to manipulate
+ * @type: what type of segment descriptor we're manipulating
+ * @direct_mode_sz: size to alloc in direct mode
+ **/
+enum i40e_status_code i40e_add_sd_table_entry(struct i40e_hw *hw,
+ struct i40e_hmc_info *hmc_info,
+ u32 sd_index,
+ enum i40e_sd_entry_type type,
+ u64 direct_mode_sz)
+{
+ enum i40e_status_code ret_code = I40E_SUCCESS;
+ struct i40e_hmc_sd_entry *sd_entry;
+ enum i40e_memory_type mem_type;
+ bool dma_mem_alloc_done = FALSE;
+ struct i40e_dma_mem mem;
+ u64 alloc_len;
+
+ if (NULL == hmc_info->sd_table.sd_entry) {
+ ret_code = I40E_ERR_BAD_PTR;
+ DEBUGOUT("i40e_add_sd_table_entry: bad sd_entry\n");
+ goto exit;
+ }
+
+ if (sd_index >= hmc_info->sd_table.sd_cnt) {
+ ret_code = I40E_ERR_INVALID_SD_INDEX;
+ DEBUGOUT("i40e_add_sd_table_entry: bad sd_index\n");
+ goto exit;
+ }
+
+ sd_entry = &hmc_info->sd_table.sd_entry[sd_index];
+ if (!sd_entry->valid) {
+ if (I40E_SD_TYPE_PAGED == type) {
+ mem_type = i40e_mem_pd;
+ alloc_len = I40E_HMC_PAGED_BP_SIZE;
+ } else {
+ mem_type = i40e_mem_bp_jumbo;
+ alloc_len = direct_mode_sz;
+ }
+
+ /* allocate a 4K pd page or 2M backing page */
+ ret_code = i40e_allocate_dma_mem(hw, &mem, mem_type, alloc_len,
+ I40E_HMC_PD_BP_BUF_ALIGNMENT);
+ if (ret_code)
+ goto exit;
+ dma_mem_alloc_done = TRUE;
+ if (I40E_SD_TYPE_PAGED == type) {
+ ret_code = i40e_allocate_virt_mem(hw,
+ &sd_entry->u.pd_table.pd_entry_virt_mem,
+ sizeof(struct i40e_hmc_pd_entry) * 512);
+ if (ret_code)
+ goto exit;
+ sd_entry->u.pd_table.pd_entry =
+ (struct i40e_hmc_pd_entry *)
+ sd_entry->u.pd_table.pd_entry_virt_mem.va;
+ i40e_memcpy(&sd_entry->u.pd_table.pd_page_addr,
+ &mem, sizeof(struct i40e_dma_mem),
+ I40E_NONDMA_TO_NONDMA);
+ } else {
+ i40e_memcpy(&sd_entry->u.bp.addr,
+ &mem, sizeof(struct i40e_dma_mem),
+ I40E_NONDMA_TO_NONDMA);
+ sd_entry->u.bp.sd_pd_index = sd_index;
+ }
+ /* initialize the sd entry */
+ hmc_info->sd_table.sd_entry[sd_index].entry_type = type;
+
+ /* increment the ref count */
+ I40E_INC_SD_REFCNT(&hmc_info->sd_table);
+ }
+ /* Increment backing page reference count */
+ if (I40E_SD_TYPE_DIRECT == sd_entry->entry_type)
+ I40E_INC_BP_REFCNT(&sd_entry->u.bp);
+exit:
+ if (I40E_SUCCESS != ret_code)
+ if (dma_mem_alloc_done)
+ i40e_free_dma_mem(hw, &mem);
+
+ return ret_code;
+}
+
+/**
+ * i40e_add_pd_table_entry - Adds page descriptor to the specified table
+ * @hw: pointer to our HW structure
+ * @hmc_info: pointer to the HMC configuration information structure
+ * @pd_index: which page descriptor index to manipulate
+ *
+ * This function:
+ * 1. Initializes the pd entry
+ * 2. Adds pd_entry in the pd_table
+ * 3. Mark the entry valid in i40e_hmc_pd_entry structure
+ * 4. Initializes the pd_entry's ref count to 1
+ * assumptions:
+ * 1. The memory for pd should be pinned down, physically contiguous and
+ * aligned on 4K boundary and zeroed memory.
+ * 2. It should be 4K in size.
+ **/
+enum i40e_status_code i40e_add_pd_table_entry(struct i40e_hw *hw,
+ struct i40e_hmc_info *hmc_info,
+ u32 pd_index)
+{
+ enum i40e_status_code ret_code = I40E_SUCCESS;
+ struct i40e_hmc_pd_table *pd_table;
+ struct i40e_hmc_pd_entry *pd_entry;
+ struct i40e_dma_mem mem;
+ u32 sd_idx, rel_pd_idx;
+ u64 *pd_addr;
+ u64 page_desc;
+
+ if (pd_index / I40E_HMC_PD_CNT_IN_SD >= hmc_info->sd_table.sd_cnt) {
+ ret_code = I40E_ERR_INVALID_PAGE_DESC_INDEX;
+ DEBUGOUT("i40e_add_pd_table_entry: bad pd_index\n");
+ goto exit;
+ }
+
+ /* find corresponding sd */
+ sd_idx = (pd_index / I40E_HMC_PD_CNT_IN_SD);
+ if (I40E_SD_TYPE_PAGED !=
+ hmc_info->sd_table.sd_entry[sd_idx].entry_type)
+ goto exit;
+
+ rel_pd_idx = (pd_index % I40E_HMC_PD_CNT_IN_SD);
+ pd_table = &hmc_info->sd_table.sd_entry[sd_idx].u.pd_table;
+ pd_entry = &pd_table->pd_entry[rel_pd_idx];
+ if (!pd_entry->valid) {
+ /* allocate a 4K backing page */
+ ret_code = i40e_allocate_dma_mem(hw, &mem, i40e_mem_bp,
+ I40E_HMC_PAGED_BP_SIZE,
+ I40E_HMC_PD_BP_BUF_ALIGNMENT);
+ if (ret_code)
+ goto exit;
+
+ i40e_memcpy(&pd_entry->bp.addr, &mem,
+ sizeof(struct i40e_dma_mem), I40E_NONDMA_TO_NONDMA);
+ pd_entry->bp.sd_pd_index = pd_index;
+ pd_entry->bp.entry_type = I40E_SD_TYPE_PAGED;
+ /* Set page address and valid bit */
+ page_desc = mem.pa | 0x1;
+
+ pd_addr = (u64 *)pd_table->pd_page_addr.va;
+ pd_addr += rel_pd_idx;
+
+ /* Add the backing page physical address in the pd entry */
+ i40e_memcpy(pd_addr, &page_desc, sizeof(u64),
+ I40E_NONDMA_TO_DMA);
+
+ pd_entry->sd_index = sd_idx;
+ pd_entry->valid = TRUE;
+ I40E_INC_PD_REFCNT(pd_table);
+ }
+ I40E_INC_BP_REFCNT(&pd_entry->bp);
+exit:
+ return ret_code;
+}
+
+/**
+ * i40e_remove_pd_bp - remove a backing page from a page descriptor
+ * @hw: pointer to our HW structure
+ * @hmc_info: pointer to the HMC configuration information structure
+ * @idx: the page index
+ * @is_pf: distinguishes a VF from a PF
+ *
+ * This function:
+ * 1. Marks the entry in pd tabe (for paged address mode) or in sd table
+ * (for direct address mode) invalid.
+ * 2. Write to register PMPDINV to invalidate the backing page in FV cache
+ * 3. Decrement the ref count for the pd _entry
+ * assumptions:
+ * 1. Caller can deallocate the memory used by backing storage after this
+ * function returns.
+ **/
+enum i40e_status_code i40e_remove_pd_bp(struct i40e_hw *hw,
+ struct i40e_hmc_info *hmc_info,
+ u32 idx)
+{
+ enum i40e_status_code ret_code = I40E_SUCCESS;
+ struct i40e_hmc_pd_entry *pd_entry;
+ struct i40e_hmc_pd_table *pd_table;
+ struct i40e_hmc_sd_entry *sd_entry;
+ u32 sd_idx, rel_pd_idx;
+ u64 *pd_addr;
+
+ /* calculate index */
+ sd_idx = idx / I40E_HMC_PD_CNT_IN_SD;
+ rel_pd_idx = idx % I40E_HMC_PD_CNT_IN_SD;
+ if (sd_idx >= hmc_info->sd_table.sd_cnt) {
+ ret_code = I40E_ERR_INVALID_PAGE_DESC_INDEX;
+ DEBUGOUT("i40e_remove_pd_bp: bad idx\n");
+ goto exit;
+ }
+ sd_entry = &hmc_info->sd_table.sd_entry[sd_idx];
+ if (I40E_SD_TYPE_PAGED != sd_entry->entry_type) {
+ ret_code = I40E_ERR_INVALID_SD_TYPE;
+ DEBUGOUT("i40e_remove_pd_bp: wrong sd_entry type\n");
+ goto exit;
+ }
+ /* get the entry and decrease its ref counter */
+ pd_table = &hmc_info->sd_table.sd_entry[sd_idx].u.pd_table;
+ pd_entry = &pd_table->pd_entry[rel_pd_idx];
+ I40E_DEC_BP_REFCNT(&pd_entry->bp);
+ if (pd_entry->bp.ref_cnt)
+ goto exit;
+
+ /* mark the entry invalid */
+ pd_entry->valid = FALSE;
+ I40E_DEC_PD_REFCNT(pd_table);
+ pd_addr = (u64 *)pd_table->pd_page_addr.va;
+ pd_addr += rel_pd_idx;
+ i40e_memset(pd_addr, 0, sizeof(u64), I40E_DMA_MEM);
+ I40E_INVALIDATE_PF_HMC_PD(hw, sd_idx, idx);
+
+ /* free memory here */
+ ret_code = i40e_free_dma_mem(hw, &(pd_entry->bp.addr));
+ if (I40E_SUCCESS != ret_code)
+ goto exit;
+ if (!pd_table->ref_cnt)
+ i40e_free_virt_mem(hw, &pd_table->pd_entry_virt_mem);
+exit:
+ return ret_code;
+}
+
+/**
+ * i40e_prep_remove_sd_bp - Prepares to remove a backing page from a sd entry
+ * @hmc_info: pointer to the HMC configuration information structure
+ * @idx: the page index
+ **/
+enum i40e_status_code i40e_prep_remove_sd_bp(struct i40e_hmc_info *hmc_info,
+ u32 idx)
+{
+ enum i40e_status_code ret_code = I40E_SUCCESS;
+ struct i40e_hmc_sd_entry *sd_entry;
+
+ /* get the entry and decrease its ref counter */
+ sd_entry = &hmc_info->sd_table.sd_entry[idx];
+ I40E_DEC_BP_REFCNT(&sd_entry->u.bp);
+ if (sd_entry->u.bp.ref_cnt) {
+ ret_code = I40E_ERR_NOT_READY;
+ goto exit;
+ }
+ I40E_DEC_SD_REFCNT(&hmc_info->sd_table);
+
+ /* mark the entry invalid */
+ sd_entry->valid = FALSE;
+exit:
+ return ret_code;
+}
+
+/**
+ * i40e_remove_sd_bp_new - Removes a backing page from a segment descriptor
+ * @hw: pointer to our hw struct
+ * @hmc_info: pointer to the HMC configuration information structure
+ * @idx: the page index
+ * @is_pf: used to distinguish between VF and PF
+ **/
+enum i40e_status_code i40e_remove_sd_bp_new(struct i40e_hw *hw,
+ struct i40e_hmc_info *hmc_info,
+ u32 idx, bool is_pf)
+{
+ struct i40e_hmc_sd_entry *sd_entry;
+ enum i40e_status_code ret_code = I40E_SUCCESS;
+
+ /* get the entry and decrease its ref counter */
+ sd_entry = &hmc_info->sd_table.sd_entry[idx];
+ if (is_pf) {
+ I40E_CLEAR_PF_SD_ENTRY(hw, idx, I40E_SD_TYPE_DIRECT);
+ } else {
+ ret_code = I40E_NOT_SUPPORTED;
+ goto exit;
+ }
+ ret_code = i40e_free_dma_mem(hw, &(sd_entry->u.bp.addr));
+ if (I40E_SUCCESS != ret_code)
+ goto exit;
+exit:
+ return ret_code;
+}
+
+/**
+ * i40e_prep_remove_pd_page - Prepares to remove a PD page from sd entry.
+ * @hmc_info: pointer to the HMC configuration information structure
+ * @idx: segment descriptor index to find the relevant page descriptor
+ **/
+enum i40e_status_code i40e_prep_remove_pd_page(struct i40e_hmc_info *hmc_info,
+ u32 idx)
+{
+ enum i40e_status_code ret_code = I40E_SUCCESS;
+ struct i40e_hmc_sd_entry *sd_entry;
+
+ sd_entry = &hmc_info->sd_table.sd_entry[idx];
+
+ if (sd_entry->u.pd_table.ref_cnt) {
+ ret_code = I40E_ERR_NOT_READY;
+ goto exit;
+ }
+
+ /* mark the entry invalid */
+ sd_entry->valid = FALSE;
+
+ I40E_DEC_SD_REFCNT(&hmc_info->sd_table);
+exit:
+ return ret_code;
+}
+
+/**
+ * i40e_remove_pd_page_new - Removes a PD page from sd entry.
+ * @hw: pointer to our hw struct
+ * @hmc_info: pointer to the HMC configuration information structure
+ * @idx: segment descriptor index to find the relevant page descriptor
+ * @is_pf: used to distinguish between VF and PF
+ **/
+enum i40e_status_code i40e_remove_pd_page_new(struct i40e_hw *hw,
+ struct i40e_hmc_info *hmc_info,
+ u32 idx, bool is_pf)
+{
+ enum i40e_status_code ret_code = I40E_SUCCESS;
+ struct i40e_hmc_sd_entry *sd_entry;
+
+ sd_entry = &hmc_info->sd_table.sd_entry[idx];
+ if (is_pf) {
+ I40E_CLEAR_PF_SD_ENTRY(hw, idx, I40E_SD_TYPE_PAGED);
+ } else {
+ ret_code = I40E_NOT_SUPPORTED;
+ goto exit;
+ }
+ /* free memory here */
+ ret_code = i40e_free_dma_mem(hw, &(sd_entry->u.pd_table.pd_page_addr));
+ if (I40E_SUCCESS != ret_code)
+ goto exit;
+exit:
+ return ret_code;
+}
diff --git a/sys/dev/ixl/i40e_hmc.h b/sys/dev/ixl/i40e_hmc.h
new file mode 100755
index 0000000..cba325f
--- /dev/null
+++ b/sys/dev/ixl/i40e_hmc.h
@@ -0,0 +1,244 @@
+/******************************************************************************
+
+ Copyright (c) 2013-2014, Intel Corporation
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ POSSIBILITY OF SUCH DAMAGE.
+
+******************************************************************************/
+/*$FreeBSD$*/
+
+#ifndef _I40E_HMC_H_
+#define _I40E_HMC_H_
+
+#define I40E_HMC_MAX_BP_COUNT 512
+
+/* forward-declare the HW struct for the compiler */
+struct i40e_hw;
+
+#define I40E_HMC_INFO_SIGNATURE 0x484D5347 /* HMSG */
+#define I40E_HMC_PD_CNT_IN_SD 512
+#define I40E_HMC_DIRECT_BP_SIZE 0x200000 /* 2M */
+#define I40E_HMC_PAGED_BP_SIZE 4096
+#define I40E_HMC_PD_BP_BUF_ALIGNMENT 4096
+#define I40E_FIRST_VF_FPM_ID 16
+
+struct i40e_hmc_obj_info {
+ u64 base; /* base addr in FPM */
+ u32 max_cnt; /* max count available for this hmc func */
+ u32 cnt; /* count of objects driver actually wants to create */
+ u64 size; /* size in bytes of one object */
+};
+
+enum i40e_sd_entry_type {
+ I40E_SD_TYPE_INVALID = 0,
+ I40E_SD_TYPE_PAGED = 1,
+ I40E_SD_TYPE_DIRECT = 2
+};
+
+struct i40e_hmc_bp {
+ enum i40e_sd_entry_type entry_type;
+ struct i40e_dma_mem addr; /* populate to be used by hw */
+ u32 sd_pd_index;
+ u32 ref_cnt;
+};
+
+struct i40e_hmc_pd_entry {
+ struct i40e_hmc_bp bp;
+ u32 sd_index;
+ bool valid;
+};
+
+struct i40e_hmc_pd_table {
+ struct i40e_dma_mem pd_page_addr; /* populate to be used by hw */
+ struct i40e_hmc_pd_entry *pd_entry; /* [512] for sw book keeping */
+ struct i40e_virt_mem pd_entry_virt_mem; /* virt mem for pd_entry */
+
+ u32 ref_cnt;
+ u32 sd_index;
+};
+
+struct i40e_hmc_sd_entry {
+ enum i40e_sd_entry_type entry_type;
+ bool valid;
+
+ union {
+ struct i40e_hmc_pd_table pd_table;
+ struct i40e_hmc_bp bp;
+ } u;
+};
+
+struct i40e_hmc_sd_table {
+ struct i40e_virt_mem addr; /* used to track sd_entry allocations */
+ u32 sd_cnt;
+ u32 ref_cnt;
+ struct i40e_hmc_sd_entry *sd_entry; /* (sd_cnt*512) entries max */
+};
+
+struct i40e_hmc_info {
+ u32 signature;
+ /* equals to pci func num for PF and dynamically allocated for VFs */
+ u8 hmc_fn_id;
+ u16 first_sd_index; /* index of the first available SD */
+
+ /* hmc objects */
+ struct i40e_hmc_obj_info *hmc_obj;
+ struct i40e_virt_mem hmc_obj_virt_mem;
+ struct i40e_hmc_sd_table sd_table;
+};
+
+#define I40E_INC_SD_REFCNT(sd_table) ((sd_table)->ref_cnt++)
+#define I40E_INC_PD_REFCNT(pd_table) ((pd_table)->ref_cnt++)
+#define I40E_INC_BP_REFCNT(bp) ((bp)->ref_cnt++)
+
+#define I40E_DEC_SD_REFCNT(sd_table) ((sd_table)->ref_cnt--)
+#define I40E_DEC_PD_REFCNT(pd_table) ((pd_table)->ref_cnt--)
+#define I40E_DEC_BP_REFCNT(bp) ((bp)->ref_cnt--)
+
+/**
+ * I40E_SET_PF_SD_ENTRY - marks the sd entry as valid in the hardware
+ * @hw: pointer to our hw struct
+ * @pa: pointer to physical address
+ * @sd_index: segment descriptor index
+ * @type: if sd entry is direct or paged
+ **/
+#define I40E_SET_PF_SD_ENTRY(hw, pa, sd_index, type) \
+{ \
+ u32 val1, val2, val3; \
+ val1 = (u32)(I40E_HI_DWORD(pa)); \
+ val2 = (u32)(pa) | (I40E_HMC_MAX_BP_COUNT << \
+ I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT) | \
+ ((((type) == I40E_SD_TYPE_PAGED) ? 0 : 1) << \
+ I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT) | \
+ (1 << I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT); \
+ val3 = (sd_index) | (1u << I40E_PFHMC_SDCMD_PMSDWR_SHIFT); \
+ wr32((hw), I40E_PFHMC_SDDATAHIGH, val1); \
+ wr32((hw), I40E_PFHMC_SDDATALOW, val2); \
+ wr32((hw), I40E_PFHMC_SDCMD, val3); \
+}
+
+/**
+ * I40E_CLEAR_PF_SD_ENTRY - marks the sd entry as invalid in the hardware
+ * @hw: pointer to our hw struct
+ * @sd_index: segment descriptor index
+ * @type: if sd entry is direct or paged
+ **/
+#define I40E_CLEAR_PF_SD_ENTRY(hw, sd_index, type) \
+{ \
+ u32 val2, val3; \
+ val2 = (I40E_HMC_MAX_BP_COUNT << \
+ I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT) | \
+ ((((type) == I40E_SD_TYPE_PAGED) ? 0 : 1) << \
+ I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT); \
+ val3 = (sd_index) | (1u << I40E_PFHMC_SDCMD_PMSDWR_SHIFT); \
+ wr32((hw), I40E_PFHMC_SDDATAHIGH, 0); \
+ wr32((hw), I40E_PFHMC_SDDATALOW, val2); \
+ wr32((hw), I40E_PFHMC_SDCMD, val3); \
+}
+
+/**
+ * I40E_INVALIDATE_PF_HMC_PD - Invalidates the pd cache in the hardware
+ * @hw: pointer to our hw struct
+ * @sd_idx: segment descriptor index
+ * @pd_idx: page descriptor index
+ **/
+#define I40E_INVALIDATE_PF_HMC_PD(hw, sd_idx, pd_idx) \
+ wr32((hw), I40E_PFHMC_PDINV, \
+ (((sd_idx) << I40E_PFHMC_PDINV_PMSDIDX_SHIFT) | \
+ ((pd_idx) << I40E_PFHMC_PDINV_PMPDIDX_SHIFT)))
+
+/**
+ * I40E_FIND_SD_INDEX_LIMIT - finds segment descriptor index limit
+ * @hmc_info: pointer to the HMC configuration information structure
+ * @type: type of HMC resources we're searching
+ * @index: starting index for the object
+ * @cnt: number of objects we're trying to create
+ * @sd_idx: pointer to return index of the segment descriptor in question
+ * @sd_limit: pointer to return the maximum number of segment descriptors
+ *
+ * This function calculates the segment descriptor index and index limit
+ * for the resource defined by i40e_hmc_rsrc_type.
+ **/
+#define I40E_FIND_SD_INDEX_LIMIT(hmc_info, type, index, cnt, sd_idx, sd_limit)\
+{ \
+ u64 fpm_addr, fpm_limit; \
+ fpm_addr = (hmc_info)->hmc_obj[(type)].base + \
+ (hmc_info)->hmc_obj[(type)].size * (index); \
+ fpm_limit = fpm_addr + (hmc_info)->hmc_obj[(type)].size * (cnt);\
+ *(sd_idx) = (u32)(fpm_addr / I40E_HMC_DIRECT_BP_SIZE); \
+ *(sd_limit) = (u32)((fpm_limit - 1) / I40E_HMC_DIRECT_BP_SIZE); \
+ /* add one more to the limit to correct our range */ \
+ *(sd_limit) += 1; \
+}
+
+/**
+ * I40E_FIND_PD_INDEX_LIMIT - finds page descriptor index limit
+ * @hmc_info: pointer to the HMC configuration information struct
+ * @type: HMC resource type we're examining
+ * @idx: starting index for the object
+ * @cnt: number of objects we're trying to create
+ * @pd_index: pointer to return page descriptor index
+ * @pd_limit: pointer to return page descriptor index limit
+ *
+ * Calculates the page descriptor index and index limit for the resource
+ * defined by i40e_hmc_rsrc_type.
+ **/
+#define I40E_FIND_PD_INDEX_LIMIT(hmc_info, type, idx, cnt, pd_index, pd_limit)\
+{ \
+ u64 fpm_adr, fpm_limit; \
+ fpm_adr = (hmc_info)->hmc_obj[(type)].base + \
+ (hmc_info)->hmc_obj[(type)].size * (idx); \
+ fpm_limit = fpm_adr + (hmc_info)->hmc_obj[(type)].size * (cnt); \
+ *(pd_index) = (u32)(fpm_adr / I40E_HMC_PAGED_BP_SIZE); \
+ *(pd_limit) = (u32)((fpm_limit - 1) / I40E_HMC_PAGED_BP_SIZE); \
+ /* add one more to the limit to correct our range */ \
+ *(pd_limit) += 1; \
+}
+enum i40e_status_code i40e_add_sd_table_entry(struct i40e_hw *hw,
+ struct i40e_hmc_info *hmc_info,
+ u32 sd_index,
+ enum i40e_sd_entry_type type,
+ u64 direct_mode_sz);
+
+enum i40e_status_code i40e_add_pd_table_entry(struct i40e_hw *hw,
+ struct i40e_hmc_info *hmc_info,
+ u32 pd_index);
+enum i40e_status_code i40e_remove_pd_bp(struct i40e_hw *hw,
+ struct i40e_hmc_info *hmc_info,
+ u32 idx);
+enum i40e_status_code i40e_prep_remove_sd_bp(struct i40e_hmc_info *hmc_info,
+ u32 idx);
+enum i40e_status_code i40e_remove_sd_bp_new(struct i40e_hw *hw,
+ struct i40e_hmc_info *hmc_info,
+ u32 idx, bool is_pf);
+enum i40e_status_code i40e_prep_remove_pd_page(struct i40e_hmc_info *hmc_info,
+ u32 idx);
+enum i40e_status_code i40e_remove_pd_page_new(struct i40e_hw *hw,
+ struct i40e_hmc_info *hmc_info,
+ u32 idx, bool is_pf);
+
+#endif /* _I40E_HMC_H_ */
diff --git a/sys/dev/ixl/i40e_lan_hmc.c b/sys/dev/ixl/i40e_lan_hmc.c
new file mode 100755
index 0000000..91c20e8
--- /dev/null
+++ b/sys/dev/ixl/i40e_lan_hmc.c
@@ -0,0 +1,1418 @@
+/******************************************************************************
+
+ Copyright (c) 2013-2014, Intel Corporation
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ POSSIBILITY OF SUCH DAMAGE.
+
+******************************************************************************/
+/*$FreeBSD$*/
+
+#include "i40e_osdep.h"
+#include "i40e_register.h"
+#include "i40e_type.h"
+#include "i40e_hmc.h"
+#include "i40e_lan_hmc.h"
+#include "i40e_prototype.h"
+
+/* lan specific interface functions */
+
+/**
+ * i40e_align_l2obj_base - aligns base object pointer to 512 bytes
+ * @offset: base address offset needing alignment
+ *
+ * Aligns the layer 2 function private memory so it's 512-byte aligned.
+ **/
+static u64 i40e_align_l2obj_base(u64 offset)
+{
+ u64 aligned_offset = offset;
+
+ if ((offset % I40E_HMC_L2OBJ_BASE_ALIGNMENT) > 0)
+ aligned_offset += (I40E_HMC_L2OBJ_BASE_ALIGNMENT -
+ (offset % I40E_HMC_L2OBJ_BASE_ALIGNMENT));
+
+ return aligned_offset;
+}
+
+/**
+ * i40e_calculate_l2fpm_size - calculates layer 2 FPM memory size
+ * @txq_num: number of Tx queues needing backing context
+ * @rxq_num: number of Rx queues needing backing context
+ * @fcoe_cntx_num: amount of FCoE statefull contexts needing backing context
+ * @fcoe_filt_num: number of FCoE filters needing backing context
+ *
+ * Calculates the maximum amount of memory for the function required, based
+ * on the number of resources it must provide context for.
+ **/
+u64 i40e_calculate_l2fpm_size(u32 txq_num, u32 rxq_num,
+ u32 fcoe_cntx_num, u32 fcoe_filt_num)
+{
+ u64 fpm_size = 0;
+
+ fpm_size = txq_num * I40E_HMC_OBJ_SIZE_TXQ;
+ fpm_size = i40e_align_l2obj_base(fpm_size);
+
+ fpm_size += (rxq_num * I40E_HMC_OBJ_SIZE_RXQ);
+ fpm_size = i40e_align_l2obj_base(fpm_size);
+
+ fpm_size += (fcoe_cntx_num * I40E_HMC_OBJ_SIZE_FCOE_CNTX);
+ fpm_size = i40e_align_l2obj_base(fpm_size);
+
+ fpm_size += (fcoe_filt_num * I40E_HMC_OBJ_SIZE_FCOE_FILT);
+ fpm_size = i40e_align_l2obj_base(fpm_size);
+
+ return fpm_size;
+}
+
+/**
+ * i40e_init_lan_hmc - initialize i40e_hmc_info struct
+ * @hw: pointer to the HW structure
+ * @txq_num: number of Tx queues needing backing context
+ * @rxq_num: number of Rx queues needing backing context
+ * @fcoe_cntx_num: amount of FCoE statefull contexts needing backing context
+ * @fcoe_filt_num: number of FCoE filters needing backing context
+ *
+ * This function will be called once per physical function initialization.
+ * It will fill out the i40e_hmc_obj_info structure for LAN objects based on
+ * the driver's provided input, as well as information from the HMC itself
+ * loaded from NVRAM.
+ *
+ * Assumptions:
+ * - HMC Resource Profile has been selected before calling this function.
+ **/
+enum i40e_status_code i40e_init_lan_hmc(struct i40e_hw *hw, u32 txq_num,
+ u32 rxq_num, u32 fcoe_cntx_num,
+ u32 fcoe_filt_num)
+{
+ struct i40e_hmc_obj_info *obj, *full_obj;
+ enum i40e_status_code ret_code = I40E_SUCCESS;
+ u64 l2fpm_size;
+ u32 size_exp;
+
+ hw->hmc.signature = I40E_HMC_INFO_SIGNATURE;
+ hw->hmc.hmc_fn_id = hw->pf_id;
+
+ /* allocate memory for hmc_obj */
+ ret_code = i40e_allocate_virt_mem(hw, &hw->hmc.hmc_obj_virt_mem,
+ sizeof(struct i40e_hmc_obj_info) * I40E_HMC_LAN_MAX);
+ if (ret_code)
+ goto init_lan_hmc_out;
+ hw->hmc.hmc_obj = (struct i40e_hmc_obj_info *)
+ hw->hmc.hmc_obj_virt_mem.va;
+
+ /* The full object will be used to create the LAN HMC SD */
+ full_obj = &hw->hmc.hmc_obj[I40E_HMC_LAN_FULL];
+ full_obj->max_cnt = 0;
+ full_obj->cnt = 0;
+ full_obj->base = 0;
+ full_obj->size = 0;
+
+ /* Tx queue context information */
+ obj = &hw->hmc.hmc_obj[I40E_HMC_LAN_TX];
+ obj->max_cnt = rd32(hw, I40E_GLHMC_LANQMAX);
+ obj->cnt = txq_num;
+ obj->base = 0;
+ size_exp = rd32(hw, I40E_GLHMC_LANTXOBJSZ);
+ obj->size = (u64)1 << size_exp;
+
+ /* validate values requested by driver don't exceed HMC capacity */
+ if (txq_num > obj->max_cnt) {
+ ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT;
+ DEBUGOUT3("i40e_init_lan_hmc: Tx context: asks for 0x%x but max allowed is 0x%x, returns error %d\n",
+ txq_num, obj->max_cnt, ret_code);
+ goto init_lan_hmc_out;
+ }
+
+ /* aggregate values into the full LAN object for later */
+ full_obj->max_cnt += obj->max_cnt;
+ full_obj->cnt += obj->cnt;
+
+ /* Rx queue context information */
+ obj = &hw->hmc.hmc_obj[I40E_HMC_LAN_RX];
+ obj->max_cnt = rd32(hw, I40E_GLHMC_LANQMAX);
+ obj->cnt = rxq_num;
+ obj->base = hw->hmc.hmc_obj[I40E_HMC_LAN_TX].base +
+ (hw->hmc.hmc_obj[I40E_HMC_LAN_TX].cnt *
+ hw->hmc.hmc_obj[I40E_HMC_LAN_TX].size);
+ obj->base = i40e_align_l2obj_base(obj->base);
+ size_exp = rd32(hw, I40E_GLHMC_LANRXOBJSZ);
+ obj->size = (u64)1 << size_exp;
+
+ /* validate values requested by driver don't exceed HMC capacity */
+ if (rxq_num > obj->max_cnt) {
+ ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT;
+ DEBUGOUT3("i40e_init_lan_hmc: Rx context: asks for 0x%x but max allowed is 0x%x, returns error %d\n",
+ rxq_num, obj->max_cnt, ret_code);
+ goto init_lan_hmc_out;
+ }
+
+ /* aggregate values into the full LAN object for later */
+ full_obj->max_cnt += obj->max_cnt;
+ full_obj->cnt += obj->cnt;
+
+ /* FCoE context information */
+ obj = &hw->hmc.hmc_obj[I40E_HMC_FCOE_CTX];
+ obj->max_cnt = rd32(hw, I40E_GLHMC_FCOEMAX);
+ obj->cnt = fcoe_cntx_num;
+ obj->base = hw->hmc.hmc_obj[I40E_HMC_LAN_RX].base +
+ (hw->hmc.hmc_obj[I40E_HMC_LAN_RX].cnt *
+ hw->hmc.hmc_obj[I40E_HMC_LAN_RX].size);
+ obj->base = i40e_align_l2obj_base(obj->base);
+ size_exp = rd32(hw, I40E_GLHMC_FCOEDDPOBJSZ);
+ obj->size = (u64)1 << size_exp;
+
+ /* validate values requested by driver don't exceed HMC capacity */
+ if (fcoe_cntx_num > obj->max_cnt) {
+ ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT;
+ DEBUGOUT3("i40e_init_lan_hmc: FCoE context: asks for 0x%x but max allowed is 0x%x, returns error %d\n",
+ fcoe_cntx_num, obj->max_cnt, ret_code);
+ goto init_lan_hmc_out;
+ }
+
+ /* aggregate values into the full LAN object for later */
+ full_obj->max_cnt += obj->max_cnt;
+ full_obj->cnt += obj->cnt;
+
+ /* FCoE filter information */
+ obj = &hw->hmc.hmc_obj[I40E_HMC_FCOE_FILT];
+ obj->max_cnt = rd32(hw, I40E_GLHMC_FCOEFMAX);
+ obj->cnt = fcoe_filt_num;
+ obj->base = hw->hmc.hmc_obj[I40E_HMC_FCOE_CTX].base +
+ (hw->hmc.hmc_obj[I40E_HMC_FCOE_CTX].cnt *
+ hw->hmc.hmc_obj[I40E_HMC_FCOE_CTX].size);
+ obj->base = i40e_align_l2obj_base(obj->base);
+ size_exp = rd32(hw, I40E_GLHMC_FCOEFOBJSZ);
+ obj->size = (u64)1 << size_exp;
+
+ /* validate values requested by driver don't exceed HMC capacity */
+ if (fcoe_filt_num > obj->max_cnt) {
+ ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT;
+ DEBUGOUT3("i40e_init_lan_hmc: FCoE filter: asks for 0x%x but max allowed is 0x%x, returns error %d\n",
+ fcoe_filt_num, obj->max_cnt, ret_code);
+ goto init_lan_hmc_out;
+ }
+
+ /* aggregate values into the full LAN object for later */
+ full_obj->max_cnt += obj->max_cnt;
+ full_obj->cnt += obj->cnt;
+
+ hw->hmc.first_sd_index = 0;
+ hw->hmc.sd_table.ref_cnt = 0;
+ l2fpm_size = i40e_calculate_l2fpm_size(txq_num, rxq_num, fcoe_cntx_num,
+ fcoe_filt_num);
+ if (NULL == hw->hmc.sd_table.sd_entry) {
+ hw->hmc.sd_table.sd_cnt = (u32)
+ (l2fpm_size + I40E_HMC_DIRECT_BP_SIZE - 1) /
+ I40E_HMC_DIRECT_BP_SIZE;
+
+ /* allocate the sd_entry members in the sd_table */
+ ret_code = i40e_allocate_virt_mem(hw, &hw->hmc.sd_table.addr,
+ (sizeof(struct i40e_hmc_sd_entry) *
+ hw->hmc.sd_table.sd_cnt));
+ if (ret_code)
+ goto init_lan_hmc_out;
+ hw->hmc.sd_table.sd_entry =
+ (struct i40e_hmc_sd_entry *)hw->hmc.sd_table.addr.va;
+ }
+ /* store in the LAN full object for later */
+ full_obj->size = l2fpm_size;
+
+init_lan_hmc_out:
+ return ret_code;
+}
+
+/**
+ * i40e_remove_pd_page - Remove a page from the page descriptor table
+ * @hw: pointer to the HW structure
+ * @hmc_info: pointer to the HMC configuration information structure
+ * @idx: segment descriptor index to find the relevant page descriptor
+ *
+ * This function:
+ * 1. Marks the entry in pd table (for paged address mode) invalid
+ * 2. write to register PMPDINV to invalidate the backing page in FV cache
+ * 3. Decrement the ref count for pd_entry
+ * assumptions:
+ * 1. caller can deallocate the memory used by pd after this function
+ * returns.
+ **/
+static enum i40e_status_code i40e_remove_pd_page(struct i40e_hw *hw,
+ struct i40e_hmc_info *hmc_info,
+ u32 idx)
+{
+ enum i40e_status_code ret_code = I40E_SUCCESS;
+
+ if (i40e_prep_remove_pd_page(hmc_info, idx) == I40E_SUCCESS)
+ ret_code = i40e_remove_pd_page_new(hw, hmc_info, idx, TRUE);
+
+ return ret_code;
+}
+
+/**
+ * i40e_remove_sd_bp - remove a backing page from a segment descriptor
+ * @hw: pointer to our HW structure
+ * @hmc_info: pointer to the HMC configuration information structure
+ * @idx: the page index
+ *
+ * This function:
+ * 1. Marks the entry in sd table (for direct address mode) invalid
+ * 2. write to register PMSDCMD, PMSDDATALOW(PMSDDATALOW.PMSDVALID set
+ * to 0) and PMSDDATAHIGH to invalidate the sd page
+ * 3. Decrement the ref count for the sd_entry
+ * assumptions:
+ * 1. caller can deallocate the memory used by backing storage after this
+ * function returns.
+ **/
+static enum i40e_status_code i40e_remove_sd_bp(struct i40e_hw *hw,
+ struct i40e_hmc_info *hmc_info,
+ u32 idx)
+{
+ enum i40e_status_code ret_code = I40E_SUCCESS;
+
+ if (i40e_prep_remove_sd_bp(hmc_info, idx) == I40E_SUCCESS)
+ ret_code = i40e_remove_sd_bp_new(hw, hmc_info, idx, TRUE);
+
+ return ret_code;
+}
+
+/**
+ * i40e_create_lan_hmc_object - allocate backing store for hmc objects
+ * @hw: pointer to the HW structure
+ * @info: pointer to i40e_hmc_create_obj_info struct
+ *
+ * This will allocate memory for PDs and backing pages and populate
+ * the sd and pd entries.
+ **/
+enum i40e_status_code i40e_create_lan_hmc_object(struct i40e_hw *hw,
+ struct i40e_hmc_lan_create_obj_info *info)
+{
+ enum i40e_status_code ret_code = I40E_SUCCESS;
+ struct i40e_hmc_sd_entry *sd_entry;
+ u32 pd_idx1 = 0, pd_lmt1 = 0;
+ u32 pd_idx = 0, pd_lmt = 0;
+ bool pd_error = FALSE;
+ u32 sd_idx, sd_lmt;
+ u64 sd_size;
+ u32 i, j;
+
+ if (NULL == info) {
+ ret_code = I40E_ERR_BAD_PTR;
+ DEBUGOUT("i40e_create_lan_hmc_object: bad info ptr\n");
+ goto exit;
+ }
+ if (NULL == info->hmc_info) {
+ ret_code = I40E_ERR_BAD_PTR;
+ DEBUGOUT("i40e_create_lan_hmc_object: bad hmc_info ptr\n");
+ goto exit;
+ }
+ if (I40E_HMC_INFO_SIGNATURE != info->hmc_info->signature) {
+ ret_code = I40E_ERR_BAD_PTR;
+ DEBUGOUT("i40e_create_lan_hmc_object: bad signature\n");
+ goto exit;
+ }
+
+ if (info->start_idx >= info->hmc_info->hmc_obj[info->rsrc_type].cnt) {
+ ret_code = I40E_ERR_INVALID_HMC_OBJ_INDEX;
+ DEBUGOUT1("i40e_create_lan_hmc_object: returns error %d\n",
+ ret_code);
+ goto exit;
+ }
+ if ((info->start_idx + info->count) >
+ info->hmc_info->hmc_obj[info->rsrc_type].cnt) {
+ ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT;
+ DEBUGOUT1("i40e_create_lan_hmc_object: returns error %d\n",
+ ret_code);
+ goto exit;
+ }
+
+ /* find sd index and limit */
+ I40E_FIND_SD_INDEX_LIMIT(info->hmc_info, info->rsrc_type,
+ info->start_idx, info->count,
+ &sd_idx, &sd_lmt);
+ if (sd_idx >= info->hmc_info->sd_table.sd_cnt ||
+ sd_lmt > info->hmc_info->sd_table.sd_cnt) {
+ ret_code = I40E_ERR_INVALID_SD_INDEX;
+ goto exit;
+ }
+ /* find pd index */
+ I40E_FIND_PD_INDEX_LIMIT(info->hmc_info, info->rsrc_type,
+ info->start_idx, info->count, &pd_idx,
+ &pd_lmt);
+
+ /* This is to cover for cases where you may not want to have an SD with
+ * the full 2M memory but something smaller. By not filling out any
+ * size, the function will default the SD size to be 2M.
+ */
+ if (info->direct_mode_sz == 0)
+ sd_size = I40E_HMC_DIRECT_BP_SIZE;
+ else
+ sd_size = info->direct_mode_sz;
+
+ /* check if all the sds are valid. If not, allocate a page and
+ * initialize it.
+ */
+ for (j = sd_idx; j < sd_lmt; j++) {
+ /* update the sd table entry */
+ ret_code = i40e_add_sd_table_entry(hw, info->hmc_info, j,
+ info->entry_type,
+ sd_size);
+ if (I40E_SUCCESS != ret_code)
+ goto exit_sd_error;
+ sd_entry = &info->hmc_info->sd_table.sd_entry[j];
+ if (I40E_SD_TYPE_PAGED == sd_entry->entry_type) {
+ /* check if all the pds in this sd are valid. If not,
+ * allocate a page and initialize it.
+ */
+
+ /* find pd_idx and pd_lmt in this sd */
+ pd_idx1 = max(pd_idx, (j * I40E_HMC_MAX_BP_COUNT));
+ pd_lmt1 = min(pd_lmt,
+ ((j + 1) * I40E_HMC_MAX_BP_COUNT));
+ for (i = pd_idx1; i < pd_lmt1; i++) {
+ /* update the pd table entry */
+ ret_code = i40e_add_pd_table_entry(hw,
+ info->hmc_info,
+ i);
+ if (I40E_SUCCESS != ret_code) {
+ pd_error = TRUE;
+ break;
+ }
+ }
+ if (pd_error) {
+ /* remove the backing pages from pd_idx1 to i */
+ while (i && (i > pd_idx1)) {
+ i40e_remove_pd_bp(hw, info->hmc_info,
+ (i - 1));
+ i--;
+ }
+ }
+ }
+ if (!sd_entry->valid) {
+ sd_entry->valid = TRUE;
+ switch (sd_entry->entry_type) {
+ case I40E_SD_TYPE_PAGED:
+ I40E_SET_PF_SD_ENTRY(hw,
+ sd_entry->u.pd_table.pd_page_addr.pa,
+ j, sd_entry->entry_type);
+ break;
+ case I40E_SD_TYPE_DIRECT:
+ I40E_SET_PF_SD_ENTRY(hw, sd_entry->u.bp.addr.pa,
+ j, sd_entry->entry_type);
+ break;
+ default:
+ ret_code = I40E_ERR_INVALID_SD_TYPE;
+ goto exit;
+ }
+ }
+ }
+ goto exit;
+
+exit_sd_error:
+ /* cleanup for sd entries from j to sd_idx */
+ while (j && (j > sd_idx)) {
+ sd_entry = &info->hmc_info->sd_table.sd_entry[j - 1];
+ switch (sd_entry->entry_type) {
+ case I40E_SD_TYPE_PAGED:
+ pd_idx1 = max(pd_idx,
+ ((j - 1) * I40E_HMC_MAX_BP_COUNT));
+ pd_lmt1 = min(pd_lmt, (j * I40E_HMC_MAX_BP_COUNT));
+ for (i = pd_idx1; i < pd_lmt1; i++) {
+ i40e_remove_pd_bp(hw, info->hmc_info, i);
+ }
+ i40e_remove_pd_page(hw, info->hmc_info, (j - 1));
+ break;
+ case I40E_SD_TYPE_DIRECT:
+ i40e_remove_sd_bp(hw, info->hmc_info, (j - 1));
+ break;
+ default:
+ ret_code = I40E_ERR_INVALID_SD_TYPE;
+ break;
+ }
+ j--;
+ }
+exit:
+ return ret_code;
+}
+
+/**
+ * i40e_configure_lan_hmc - prepare the HMC backing store
+ * @hw: pointer to the hw structure
+ * @model: the model for the layout of the SD/PD tables
+ *
+ * - This function will be called once per physical function initialization.
+ * - This function will be called after i40e_init_lan_hmc() and before
+ * any LAN/FCoE HMC objects can be created.
+ **/
+enum i40e_status_code i40e_configure_lan_hmc(struct i40e_hw *hw,
+ enum i40e_hmc_model model)
+{
+ struct i40e_hmc_lan_create_obj_info info;
+ u8 hmc_fn_id = hw->hmc.hmc_fn_id;
+ struct i40e_hmc_obj_info *obj;
+ enum i40e_status_code ret_code = I40E_SUCCESS;
+
+ /* Initialize part of the create object info struct */
+ info.hmc_info = &hw->hmc;
+ info.rsrc_type = I40E_HMC_LAN_FULL;
+ info.start_idx = 0;
+ info.direct_mode_sz = hw->hmc.hmc_obj[I40E_HMC_LAN_FULL].size;
+
+ /* Build the SD entry for the LAN objects */
+ switch (model) {
+ case I40E_HMC_MODEL_DIRECT_PREFERRED:
+ case I40E_HMC_MODEL_DIRECT_ONLY:
+ info.entry_type = I40E_SD_TYPE_DIRECT;
+ /* Make one big object, a single SD */
+ info.count = 1;
+ ret_code = i40e_create_lan_hmc_object(hw, &info);
+ if ((ret_code != I40E_SUCCESS) && (model == I40E_HMC_MODEL_DIRECT_PREFERRED))
+ goto try_type_paged;
+ else if (ret_code != I40E_SUCCESS)
+ goto configure_lan_hmc_out;
+ /* else clause falls through the break */
+ break;
+ case I40E_HMC_MODEL_PAGED_ONLY:
+try_type_paged:
+ info.entry_type = I40E_SD_TYPE_PAGED;
+ /* Make one big object in the PD table */
+ info.count = 1;
+ ret_code = i40e_create_lan_hmc_object(hw, &info);
+ if (ret_code != I40E_SUCCESS)
+ goto configure_lan_hmc_out;
+ break;
+ default:
+ /* unsupported type */
+ ret_code = I40E_ERR_INVALID_SD_TYPE;
+ DEBUGOUT1("i40e_configure_lan_hmc: Unknown SD type: %d\n",
+ ret_code);
+ goto configure_lan_hmc_out;
+ }
+
+ /* Configure and program the FPM registers so objects can be created */
+
+ /* Tx contexts */
+ obj = &hw->hmc.hmc_obj[I40E_HMC_LAN_TX];
+ wr32(hw, I40E_GLHMC_LANTXBASE(hmc_fn_id),
+ (u32)((obj->base & I40E_GLHMC_LANTXBASE_FPMLANTXBASE_MASK) / 512));
+ wr32(hw, I40E_GLHMC_LANTXCNT(hmc_fn_id), obj->cnt);
+
+ /* Rx contexts */
+ obj = &hw->hmc.hmc_obj[I40E_HMC_LAN_RX];
+ wr32(hw, I40E_GLHMC_LANRXBASE(hmc_fn_id),
+ (u32)((obj->base & I40E_GLHMC_LANRXBASE_FPMLANRXBASE_MASK) / 512));
+ wr32(hw, I40E_GLHMC_LANRXCNT(hmc_fn_id), obj->cnt);
+
+ /* FCoE contexts */
+ obj = &hw->hmc.hmc_obj[I40E_HMC_FCOE_CTX];
+ wr32(hw, I40E_GLHMC_FCOEDDPBASE(hmc_fn_id),
+ (u32)((obj->base & I40E_GLHMC_FCOEDDPBASE_FPMFCOEDDPBASE_MASK) / 512));
+ wr32(hw, I40E_GLHMC_FCOEDDPCNT(hmc_fn_id), obj->cnt);
+
+ /* FCoE filters */
+ obj = &hw->hmc.hmc_obj[I40E_HMC_FCOE_FILT];
+ wr32(hw, I40E_GLHMC_FCOEFBASE(hmc_fn_id),
+ (u32)((obj->base & I40E_GLHMC_FCOEFBASE_FPMFCOEFBASE_MASK) / 512));
+ wr32(hw, I40E_GLHMC_FCOEFCNT(hmc_fn_id), obj->cnt);
+
+configure_lan_hmc_out:
+ return ret_code;
+}
+
+/**
+ * i40e_delete_hmc_object - remove hmc objects
+ * @hw: pointer to the HW structure
+ * @info: pointer to i40e_hmc_delete_obj_info struct
+ *
+ * This will de-populate the SDs and PDs. It frees
+ * the memory for PDS and backing storage. After this function is returned,
+ * caller should deallocate memory allocated previously for
+ * book-keeping information about PDs and backing storage.
+ **/
+enum i40e_status_code i40e_delete_lan_hmc_object(struct i40e_hw *hw,
+ struct i40e_hmc_lan_delete_obj_info *info)
+{
+ enum i40e_status_code ret_code = I40E_SUCCESS;
+ struct i40e_hmc_pd_table *pd_table;
+ u32 pd_idx, pd_lmt, rel_pd_idx;
+ u32 sd_idx, sd_lmt;
+ u32 i, j;
+
+ if (NULL == info) {
+ ret_code = I40E_ERR_BAD_PTR;
+ DEBUGOUT("i40e_delete_hmc_object: bad info ptr\n");
+ goto exit;
+ }
+ if (NULL == info->hmc_info) {
+ ret_code = I40E_ERR_BAD_PTR;
+ DEBUGOUT("i40e_delete_hmc_object: bad info->hmc_info ptr\n");
+ goto exit;
+ }
+ if (I40E_HMC_INFO_SIGNATURE != info->hmc_info->signature) {
+ ret_code = I40E_ERR_BAD_PTR;
+ DEBUGOUT("i40e_delete_hmc_object: bad hmc_info->signature\n");
+ goto exit;
+ }
+
+ if (NULL == info->hmc_info->sd_table.sd_entry) {
+ ret_code = I40E_ERR_BAD_PTR;
+ DEBUGOUT("i40e_delete_hmc_object: bad sd_entry\n");
+ goto exit;
+ }
+
+ if (NULL == info->hmc_info->hmc_obj) {
+ ret_code = I40E_ERR_BAD_PTR;
+ DEBUGOUT("i40e_delete_hmc_object: bad hmc_info->hmc_obj\n");
+ goto exit;
+ }
+ if (info->start_idx >= info->hmc_info->hmc_obj[info->rsrc_type].cnt) {
+ ret_code = I40E_ERR_INVALID_HMC_OBJ_INDEX;
+ DEBUGOUT1("i40e_delete_hmc_object: returns error %d\n",
+ ret_code);
+ goto exit;
+ }
+
+ if ((info->start_idx + info->count) >
+ info->hmc_info->hmc_obj[info->rsrc_type].cnt) {
+ ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT;
+ DEBUGOUT1("i40e_delete_hmc_object: returns error %d\n",
+ ret_code);
+ goto exit;
+ }
+
+ I40E_FIND_PD_INDEX_LIMIT(info->hmc_info, info->rsrc_type,
+ info->start_idx, info->count, &pd_idx,
+ &pd_lmt);
+
+ for (j = pd_idx; j < pd_lmt; j++) {
+ sd_idx = j / I40E_HMC_PD_CNT_IN_SD;
+
+ if (I40E_SD_TYPE_PAGED !=
+ info->hmc_info->sd_table.sd_entry[sd_idx].entry_type)
+ continue;
+
+ rel_pd_idx = j % I40E_HMC_PD_CNT_IN_SD;
+
+ pd_table =
+ &info->hmc_info->sd_table.sd_entry[sd_idx].u.pd_table;
+ if (pd_table->pd_entry[rel_pd_idx].valid) {
+ ret_code = i40e_remove_pd_bp(hw, info->hmc_info, j);
+ if (I40E_SUCCESS != ret_code)
+ goto exit;
+ }
+ }
+
+ /* find sd index and limit */
+ I40E_FIND_SD_INDEX_LIMIT(info->hmc_info, info->rsrc_type,
+ info->start_idx, info->count,
+ &sd_idx, &sd_lmt);
+ if (sd_idx >= info->hmc_info->sd_table.sd_cnt ||
+ sd_lmt > info->hmc_info->sd_table.sd_cnt) {
+ ret_code = I40E_ERR_INVALID_SD_INDEX;
+ goto exit;
+ }
+
+ for (i = sd_idx; i < sd_lmt; i++) {
+ if (!info->hmc_info->sd_table.sd_entry[i].valid)
+ continue;
+ switch (info->hmc_info->sd_table.sd_entry[i].entry_type) {
+ case I40E_SD_TYPE_DIRECT:
+ ret_code = i40e_remove_sd_bp(hw, info->hmc_info, i);
+ if (I40E_SUCCESS != ret_code)
+ goto exit;
+ break;
+ case I40E_SD_TYPE_PAGED:
+ ret_code = i40e_remove_pd_page(hw, info->hmc_info, i);
+ if (I40E_SUCCESS != ret_code)
+ goto exit;
+ break;
+ default:
+ break;
+ }
+ }
+exit:
+ return ret_code;
+}
+
+/**
+ * i40e_shutdown_lan_hmc - Remove HMC backing store, free allocated memory
+ * @hw: pointer to the hw structure
+ *
+ * This must be called by drivers as they are shutting down and being
+ * removed from the OS.
+ **/
+enum i40e_status_code i40e_shutdown_lan_hmc(struct i40e_hw *hw)
+{
+ struct i40e_hmc_lan_delete_obj_info info;
+ enum i40e_status_code ret_code;
+
+ info.hmc_info = &hw->hmc;
+ info.rsrc_type = I40E_HMC_LAN_FULL;
+ info.start_idx = 0;
+ info.count = 1;
+
+ /* delete the object */
+ ret_code = i40e_delete_lan_hmc_object(hw, &info);
+
+ /* free the SD table entry for LAN */
+ i40e_free_virt_mem(hw, &hw->hmc.sd_table.addr);
+ hw->hmc.sd_table.sd_cnt = 0;
+ hw->hmc.sd_table.sd_entry = NULL;
+
+ /* free memory used for hmc_obj */
+ i40e_free_virt_mem(hw, &hw->hmc.hmc_obj_virt_mem);
+ hw->hmc.hmc_obj = NULL;
+
+ return ret_code;
+}
+
+#define I40E_HMC_STORE(_struct, _ele) \
+ offsetof(struct _struct, _ele), \
+ FIELD_SIZEOF(struct _struct, _ele)
+
+struct i40e_context_ele {
+ u16 offset;
+ u16 size_of;
+ u16 width;
+ u16 lsb;
+};
+
+/* LAN Tx Queue Context */
+static struct i40e_context_ele i40e_hmc_txq_ce_info[] = {
+ /* Field Width LSB */
+ {I40E_HMC_STORE(i40e_hmc_obj_txq, head), 13, 0 },
+ {I40E_HMC_STORE(i40e_hmc_obj_txq, new_context), 1, 30 },
+ {I40E_HMC_STORE(i40e_hmc_obj_txq, base), 57, 32 },
+ {I40E_HMC_STORE(i40e_hmc_obj_txq, fc_ena), 1, 89 },
+ {I40E_HMC_STORE(i40e_hmc_obj_txq, timesync_ena), 1, 90 },
+ {I40E_HMC_STORE(i40e_hmc_obj_txq, fd_ena), 1, 91 },
+ {I40E_HMC_STORE(i40e_hmc_obj_txq, alt_vlan_ena), 1, 92 },
+ {I40E_HMC_STORE(i40e_hmc_obj_txq, cpuid), 8, 96 },
+/* line 1 */
+ {I40E_HMC_STORE(i40e_hmc_obj_txq, thead_wb), 13, 0 + 128 },
+ {I40E_HMC_STORE(i40e_hmc_obj_txq, head_wb_ena), 1, 32 + 128 },
+ {I40E_HMC_STORE(i40e_hmc_obj_txq, qlen), 13, 33 + 128 },
+ {I40E_HMC_STORE(i40e_hmc_obj_txq, tphrdesc_ena), 1, 46 + 128 },
+ {I40E_HMC_STORE(i40e_hmc_obj_txq, tphrpacket_ena), 1, 47 + 128 },
+ {I40E_HMC_STORE(i40e_hmc_obj_txq, tphwdesc_ena), 1, 48 + 128 },
+ {I40E_HMC_STORE(i40e_hmc_obj_txq, head_wb_addr), 64, 64 + 128 },
+/* line 7 */
+ {I40E_HMC_STORE(i40e_hmc_obj_txq, crc), 32, 0 + (7 * 128) },
+ {I40E_HMC_STORE(i40e_hmc_obj_txq, rdylist), 10, 84 + (7 * 128) },
+ {I40E_HMC_STORE(i40e_hmc_obj_txq, rdylist_act), 1, 94 + (7 * 128) },
+ { 0 }
+};
+
+/* LAN Rx Queue Context */
+static struct i40e_context_ele i40e_hmc_rxq_ce_info[] = {
+ /* Field Width LSB */
+ { I40E_HMC_STORE(i40e_hmc_obj_rxq, head), 13, 0 },
+ { I40E_HMC_STORE(i40e_hmc_obj_rxq, cpuid), 8, 13 },
+ { I40E_HMC_STORE(i40e_hmc_obj_rxq, base), 57, 32 },
+ { I40E_HMC_STORE(i40e_hmc_obj_rxq, qlen), 13, 89 },
+ { I40E_HMC_STORE(i40e_hmc_obj_rxq, dbuff), 7, 102 },
+ { I40E_HMC_STORE(i40e_hmc_obj_rxq, hbuff), 5, 109 },
+ { I40E_HMC_STORE(i40e_hmc_obj_rxq, dtype), 2, 114 },
+ { I40E_HMC_STORE(i40e_hmc_obj_rxq, dsize), 1, 116 },
+ { I40E_HMC_STORE(i40e_hmc_obj_rxq, crcstrip), 1, 117 },
+ { I40E_HMC_STORE(i40e_hmc_obj_rxq, fc_ena), 1, 118 },
+ { I40E_HMC_STORE(i40e_hmc_obj_rxq, l2tsel), 1, 119 },
+ { I40E_HMC_STORE(i40e_hmc_obj_rxq, hsplit_0), 4, 120 },
+ { I40E_HMC_STORE(i40e_hmc_obj_rxq, hsplit_1), 2, 124 },
+ { I40E_HMC_STORE(i40e_hmc_obj_rxq, showiv), 1, 127 },
+ { I40E_HMC_STORE(i40e_hmc_obj_rxq, rxmax), 14, 174 },
+ { I40E_HMC_STORE(i40e_hmc_obj_rxq, tphrdesc_ena), 1, 193 },
+ { I40E_HMC_STORE(i40e_hmc_obj_rxq, tphwdesc_ena), 1, 194 },
+ { I40E_HMC_STORE(i40e_hmc_obj_rxq, tphdata_ena), 1, 195 },
+ { I40E_HMC_STORE(i40e_hmc_obj_rxq, tphhead_ena), 1, 196 },
+ { I40E_HMC_STORE(i40e_hmc_obj_rxq, lrxqthresh), 3, 198 },
+ { I40E_HMC_STORE(i40e_hmc_obj_rxq, prefena), 1, 201 },
+ { 0 }
+};
+
+/**
+ * i40e_write_byte - replace HMC context byte
+ * @hmc_bits: pointer to the HMC memory
+ * @ce_info: a description of the struct to be read from
+ * @src: the struct to be read from
+ **/
+static void i40e_write_byte(u8 *hmc_bits,
+ struct i40e_context_ele *ce_info,
+ u8 *src)
+{
+ u8 src_byte, dest_byte, mask;
+ u8 *from, *dest;
+ u16 shift_width;
+
+ /* copy from the next struct field */
+ from = src + ce_info->offset;
+
+ /* prepare the bits and mask */
+ shift_width = ce_info->lsb % 8;
+ mask = ((u8)1 << ce_info->width) - 1;
+
+ src_byte = *from;
+ src_byte &= mask;
+
+ /* shift to correct alignment */
+ mask <<= shift_width;
+ src_byte <<= shift_width;
+
+ /* get the current bits from the target bit string */
+ dest = hmc_bits + (ce_info->lsb / 8);
+
+ i40e_memcpy(&dest_byte, dest, sizeof(dest_byte), I40E_DMA_TO_NONDMA);
+
+ dest_byte &= ~mask; /* get the bits not changing */
+ dest_byte |= src_byte; /* add in the new bits */
+
+ /* put it all back */
+ i40e_memcpy(dest, &dest_byte, sizeof(dest_byte), I40E_NONDMA_TO_DMA);
+}
+
+/**
+ * i40e_write_word - replace HMC context word
+ * @hmc_bits: pointer to the HMC memory
+ * @ce_info: a description of the struct to be read from
+ * @src: the struct to be read from
+ **/
+static void i40e_write_word(u8 *hmc_bits,
+ struct i40e_context_ele *ce_info,
+ u8 *src)
+{
+ u16 src_word, mask;
+ u8 *from, *dest;
+ u16 shift_width;
+ __le16 dest_word;
+
+ /* copy from the next struct field */
+ from = src + ce_info->offset;
+
+ /* prepare the bits and mask */
+ shift_width = ce_info->lsb % 8;
+ mask = ((u16)1 << ce_info->width) - 1;
+
+ /* don't swizzle the bits until after the mask because the mask bits
+ * will be in a different bit position on big endian machines
+ */
+ src_word = *(u16 *)from;
+ src_word &= mask;
+
+ /* shift to correct alignment */
+ mask <<= shift_width;
+ src_word <<= shift_width;
+
+ /* get the current bits from the target bit string */
+ dest = hmc_bits + (ce_info->lsb / 8);
+
+ i40e_memcpy(&dest_word, dest, sizeof(dest_word), I40E_DMA_TO_NONDMA);
+
+ dest_word &= ~(CPU_TO_LE16(mask)); /* get the bits not changing */
+ dest_word |= CPU_TO_LE16(src_word); /* add in the new bits */
+
+ /* put it all back */
+ i40e_memcpy(dest, &dest_word, sizeof(dest_word), I40E_NONDMA_TO_DMA);
+}
+
+/**
+ * i40e_write_dword - replace HMC context dword
+ * @hmc_bits: pointer to the HMC memory
+ * @ce_info: a description of the struct to be read from
+ * @src: the struct to be read from
+ **/
+static void i40e_write_dword(u8 *hmc_bits,
+ struct i40e_context_ele *ce_info,
+ u8 *src)
+{
+ u32 src_dword, mask;
+ u8 *from, *dest;
+ u16 shift_width;
+ __le32 dest_dword;
+
+ /* copy from the next struct field */
+ from = src + ce_info->offset;
+
+ /* prepare the bits and mask */
+ shift_width = ce_info->lsb % 8;
+
+ /* if the field width is exactly 32 on an x86 machine, then the shift
+ * operation will not work because the SHL instructions count is masked
+ * to 5 bits so the shift will do nothing
+ */
+ if (ce_info->width < 32)
+ mask = ((u32)1 << ce_info->width) - 1;
+ else
+ mask = 0xFFFFFFFF;
+
+ /* don't swizzle the bits until after the mask because the mask bits
+ * will be in a different bit position on big endian machines
+ */
+ src_dword = *(u32 *)from;
+ src_dword &= mask;
+
+ /* shift to correct alignment */
+ mask <<= shift_width;
+ src_dword <<= shift_width;
+
+ /* get the current bits from the target bit string */
+ dest = hmc_bits + (ce_info->lsb / 8);
+
+ i40e_memcpy(&dest_dword, dest, sizeof(dest_dword), I40E_DMA_TO_NONDMA);
+
+ dest_dword &= ~(CPU_TO_LE32(mask)); /* get the bits not changing */
+ dest_dword |= CPU_TO_LE32(src_dword); /* add in the new bits */
+
+ /* put it all back */
+ i40e_memcpy(dest, &dest_dword, sizeof(dest_dword), I40E_NONDMA_TO_DMA);
+}
+
+/**
+ * i40e_write_qword - replace HMC context qword
+ * @hmc_bits: pointer to the HMC memory
+ * @ce_info: a description of the struct to be read from
+ * @src: the struct to be read from
+ **/
+static void i40e_write_qword(u8 *hmc_bits,
+ struct i40e_context_ele *ce_info,
+ u8 *src)
+{
+ u64 src_qword, mask;
+ u8 *from, *dest;
+ u16 shift_width;
+ __le64 dest_qword;
+
+ /* copy from the next struct field */
+ from = src + ce_info->offset;
+
+ /* prepare the bits and mask */
+ shift_width = ce_info->lsb % 8;
+
+ /* if the field width is exactly 64 on an x86 machine, then the shift
+ * operation will not work because the SHL instructions count is masked
+ * to 6 bits so the shift will do nothing
+ */
+ if (ce_info->width < 64)
+ mask = ((u64)1 << ce_info->width) - 1;
+ else
+ mask = 0xFFFFFFFFFFFFFFFFUL;
+
+ /* don't swizzle the bits until after the mask because the mask bits
+ * will be in a different bit position on big endian machines
+ */
+ src_qword = *(u64 *)from;
+ src_qword &= mask;
+
+ /* shift to correct alignment */
+ mask <<= shift_width;
+ src_qword <<= shift_width;
+
+ /* get the current bits from the target bit string */
+ dest = hmc_bits + (ce_info->lsb / 8);
+
+ i40e_memcpy(&dest_qword, dest, sizeof(dest_qword), I40E_DMA_TO_NONDMA);
+
+ dest_qword &= ~(CPU_TO_LE64(mask)); /* get the bits not changing */
+ dest_qword |= CPU_TO_LE64(src_qword); /* add in the new bits */
+
+ /* put it all back */
+ i40e_memcpy(dest, &dest_qword, sizeof(dest_qword), I40E_NONDMA_TO_DMA);
+}
+
+/**
+ * i40e_read_byte - read HMC context byte into struct
+ * @hmc_bits: pointer to the HMC memory
+ * @ce_info: a description of the struct to be filled
+ * @dest: the struct to be filled
+ **/
+static void i40e_read_byte(u8 *hmc_bits,
+ struct i40e_context_ele *ce_info,
+ u8 *dest)
+{
+ u8 dest_byte, mask;
+ u8 *src, *target;
+ u16 shift_width;
+
+ /* prepare the bits and mask */
+ shift_width = ce_info->lsb % 8;
+ mask = ((u8)1 << ce_info->width) - 1;
+
+ /* shift to correct alignment */
+ mask <<= shift_width;
+
+ /* get the current bits from the src bit string */
+ src = hmc_bits + (ce_info->lsb / 8);
+
+ i40e_memcpy(&dest_byte, src, sizeof(dest_byte), I40E_DMA_TO_NONDMA);
+
+ dest_byte &= ~(mask);
+
+ dest_byte >>= shift_width;
+
+ /* get the address from the struct field */
+ target = dest + ce_info->offset;
+
+ /* put it back in the struct */
+ i40e_memcpy(target, &dest_byte, sizeof(dest_byte), I40E_NONDMA_TO_DMA);
+}
+
+/**
+ * i40e_read_word - read HMC context word into struct
+ * @hmc_bits: pointer to the HMC memory
+ * @ce_info: a description of the struct to be filled
+ * @dest: the struct to be filled
+ **/
+static void i40e_read_word(u8 *hmc_bits,
+ struct i40e_context_ele *ce_info,
+ u8 *dest)
+{
+ u16 dest_word, mask;
+ u8 *src, *target;
+ u16 shift_width;
+ __le16 src_word;
+
+ /* prepare the bits and mask */
+ shift_width = ce_info->lsb % 8;
+ mask = ((u16)1 << ce_info->width) - 1;
+
+ /* shift to correct alignment */
+ mask <<= shift_width;
+
+ /* get the current bits from the src bit string */
+ src = hmc_bits + (ce_info->lsb / 8);
+
+ i40e_memcpy(&src_word, src, sizeof(src_word), I40E_DMA_TO_NONDMA);
+
+ /* the data in the memory is stored as little endian so mask it
+ * correctly
+ */
+ src_word &= ~(CPU_TO_LE16(mask));
+
+ /* get the data back into host order before shifting */
+ dest_word = LE16_TO_CPU(src_word);
+
+ dest_word >>= shift_width;
+
+ /* get the address from the struct field */
+ target = dest + ce_info->offset;
+
+ /* put it back in the struct */
+ i40e_memcpy(target, &dest_word, sizeof(dest_word), I40E_NONDMA_TO_DMA);
+}
+
+/**
+ * i40e_read_dword - read HMC context dword into struct
+ * @hmc_bits: pointer to the HMC memory
+ * @ce_info: a description of the struct to be filled
+ * @dest: the struct to be filled
+ **/
+static void i40e_read_dword(u8 *hmc_bits,
+ struct i40e_context_ele *ce_info,
+ u8 *dest)
+{
+ u32 dest_dword, mask;
+ u8 *src, *target;
+ u16 shift_width;
+ __le32 src_dword;
+
+ /* prepare the bits and mask */
+ shift_width = ce_info->lsb % 8;
+
+ /* if the field width is exactly 32 on an x86 machine, then the shift
+ * operation will not work because the SHL instructions count is masked
+ * to 5 bits so the shift will do nothing
+ */
+ if (ce_info->width < 32)
+ mask = ((u32)1 << ce_info->width) - 1;
+ else
+ mask = 0xFFFFFFFF;
+
+ /* shift to correct alignment */
+ mask <<= shift_width;
+
+ /* get the current bits from the src bit string */
+ src = hmc_bits + (ce_info->lsb / 8);
+
+ i40e_memcpy(&src_dword, src, sizeof(src_dword), I40E_DMA_TO_NONDMA);
+
+ /* the data in the memory is stored as little endian so mask it
+ * correctly
+ */
+ src_dword &= ~(CPU_TO_LE32(mask));
+
+ /* get the data back into host order before shifting */
+ dest_dword = LE32_TO_CPU(src_dword);
+
+ dest_dword >>= shift_width;
+
+ /* get the address from the struct field */
+ target = dest + ce_info->offset;
+
+ /* put it back in the struct */
+ i40e_memcpy(target, &dest_dword, sizeof(dest_dword),
+ I40E_NONDMA_TO_DMA);
+}
+
+/**
+ * i40e_read_qword - read HMC context qword into struct
+ * @hmc_bits: pointer to the HMC memory
+ * @ce_info: a description of the struct to be filled
+ * @dest: the struct to be filled
+ **/
+static void i40e_read_qword(u8 *hmc_bits,
+ struct i40e_context_ele *ce_info,
+ u8 *dest)
+{
+ u64 dest_qword, mask;
+ u8 *src, *target;
+ u16 shift_width;
+ __le64 src_qword;
+
+ /* prepare the bits and mask */
+ shift_width = ce_info->lsb % 8;
+
+ /* if the field width is exactly 64 on an x86 machine, then the shift
+ * operation will not work because the SHL instructions count is masked
+ * to 6 bits so the shift will do nothing
+ */
+ if (ce_info->width < 64)
+ mask = ((u64)1 << ce_info->width) - 1;
+ else
+ mask = 0xFFFFFFFFFFFFFFFFUL;
+
+ /* shift to correct alignment */
+ mask <<= shift_width;
+
+ /* get the current bits from the src bit string */
+ src = hmc_bits + (ce_info->lsb / 8);
+
+ i40e_memcpy(&src_qword, src, sizeof(src_qword), I40E_DMA_TO_NONDMA);
+
+ /* the data in the memory is stored as little endian so mask it
+ * correctly
+ */
+ src_qword &= ~(CPU_TO_LE64(mask));
+
+ /* get the data back into host order before shifting */
+ dest_qword = LE64_TO_CPU(src_qword);
+
+ dest_qword >>= shift_width;
+
+ /* get the address from the struct field */
+ target = dest + ce_info->offset;
+
+ /* put it back in the struct */
+ i40e_memcpy(target, &dest_qword, sizeof(dest_qword),
+ I40E_NONDMA_TO_DMA);
+}
+
+/**
+ * i40e_get_hmc_context - extract HMC context bits
+ * @context_bytes: pointer to the context bit array
+ * @ce_info: a description of the struct to be filled
+ * @dest: the struct to be filled
+ **/
+static enum i40e_status_code i40e_get_hmc_context(u8 *context_bytes,
+ struct i40e_context_ele *ce_info,
+ u8 *dest)
+{
+ int f;
+
+ for (f = 0; ce_info[f].width != 0; f++) {
+ switch (ce_info[f].size_of) {
+ case 1:
+ i40e_read_byte(context_bytes, &ce_info[f], dest);
+ break;
+ case 2:
+ i40e_read_word(context_bytes, &ce_info[f], dest);
+ break;
+ case 4:
+ i40e_read_dword(context_bytes, &ce_info[f], dest);
+ break;
+ case 8:
+ i40e_read_qword(context_bytes, &ce_info[f], dest);
+ break;
+ default:
+ /* nothing to do, just keep going */
+ break;
+ }
+ }
+
+ return I40E_SUCCESS;
+}
+
+/**
+ * i40e_clear_hmc_context - zero out the HMC context bits
+ * @hw: the hardware struct
+ * @context_bytes: pointer to the context bit array (DMA memory)
+ * @hmc_type: the type of HMC resource
+ **/
+static enum i40e_status_code i40e_clear_hmc_context(struct i40e_hw *hw,
+ u8 *context_bytes,
+ enum i40e_hmc_lan_rsrc_type hmc_type)
+{
+ /* clean the bit array */
+ i40e_memset(context_bytes, 0, (u32)hw->hmc.hmc_obj[hmc_type].size,
+ I40E_DMA_MEM);
+
+ return I40E_SUCCESS;
+}
+
+/**
+ * i40e_set_hmc_context - replace HMC context bits
+ * @context_bytes: pointer to the context bit array
+ * @ce_info: a description of the struct to be filled
+ * @dest: the struct to be filled
+ **/
+static enum i40e_status_code i40e_set_hmc_context(u8 *context_bytes,
+ struct i40e_context_ele *ce_info,
+ u8 *dest)
+{
+ int f;
+
+ for (f = 0; ce_info[f].width != 0; f++) {
+
+ /* we have to deal with each element of the HMC using the
+ * correct size so that we are correct regardless of the
+ * endianness of the machine
+ */
+ switch (ce_info[f].size_of) {
+ case 1:
+ i40e_write_byte(context_bytes, &ce_info[f], dest);
+ break;
+ case 2:
+ i40e_write_word(context_bytes, &ce_info[f], dest);
+ break;
+ case 4:
+ i40e_write_dword(context_bytes, &ce_info[f], dest);
+ break;
+ case 8:
+ i40e_write_qword(context_bytes, &ce_info[f], dest);
+ break;
+ }
+ }
+
+ return I40E_SUCCESS;
+}
+
+/**
+ * i40e_hmc_get_object_va - retrieves an object's virtual address
+ * @hmc_info: pointer to i40e_hmc_info struct
+ * @object_base: pointer to u64 to get the va
+ * @rsrc_type: the hmc resource type
+ * @obj_idx: hmc object index
+ *
+ * This function retrieves the object's virtual address from the object
+ * base pointer. This function is used for LAN Queue contexts.
+ **/
+static
+enum i40e_status_code i40e_hmc_get_object_va(struct i40e_hmc_info *hmc_info,
+ u8 **object_base,
+ enum i40e_hmc_lan_rsrc_type rsrc_type,
+ u32 obj_idx)
+{
+ u32 obj_offset_in_sd, obj_offset_in_pd;
+ struct i40e_hmc_sd_entry *sd_entry;
+ struct i40e_hmc_pd_entry *pd_entry;
+ u32 pd_idx, pd_lmt, rel_pd_idx;
+ enum i40e_status_code ret_code = I40E_SUCCESS;
+ u64 obj_offset_in_fpm;
+ u32 sd_idx, sd_lmt;
+
+ if (NULL == hmc_info) {
+ ret_code = I40E_ERR_BAD_PTR;
+ DEBUGOUT("i40e_hmc_get_object_va: bad hmc_info ptr\n");
+ goto exit;
+ }
+ if (NULL == hmc_info->hmc_obj) {
+ ret_code = I40E_ERR_BAD_PTR;
+ DEBUGOUT("i40e_hmc_get_object_va: bad hmc_info->hmc_obj ptr\n");
+ goto exit;
+ }
+ if (NULL == object_base) {
+ ret_code = I40E_ERR_BAD_PTR;
+ DEBUGOUT("i40e_hmc_get_object_va: bad object_base ptr\n");
+ goto exit;
+ }
+ if (I40E_HMC_INFO_SIGNATURE != hmc_info->signature) {
+ ret_code = I40E_ERR_BAD_PTR;
+ DEBUGOUT("i40e_hmc_get_object_va: bad hmc_info->signature\n");
+ goto exit;
+ }
+ if (obj_idx >= hmc_info->hmc_obj[rsrc_type].cnt) {
+ DEBUGOUT1("i40e_hmc_get_object_va: returns error %d\n",
+ ret_code);
+ ret_code = I40E_ERR_INVALID_HMC_OBJ_INDEX;
+ goto exit;
+ }
+ /* find sd index and limit */
+ I40E_FIND_SD_INDEX_LIMIT(hmc_info, rsrc_type, obj_idx, 1,
+ &sd_idx, &sd_lmt);
+
+ sd_entry = &hmc_info->sd_table.sd_entry[sd_idx];
+ obj_offset_in_fpm = hmc_info->hmc_obj[rsrc_type].base +
+ hmc_info->hmc_obj[rsrc_type].size * obj_idx;
+
+ if (I40E_SD_TYPE_PAGED == sd_entry->entry_type) {
+ I40E_FIND_PD_INDEX_LIMIT(hmc_info, rsrc_type, obj_idx, 1,
+ &pd_idx, &pd_lmt);
+ rel_pd_idx = pd_idx % I40E_HMC_PD_CNT_IN_SD;
+ pd_entry = &sd_entry->u.pd_table.pd_entry[rel_pd_idx];
+ obj_offset_in_pd = (u32)(obj_offset_in_fpm %
+ I40E_HMC_PAGED_BP_SIZE);
+ *object_base = (u8 *)pd_entry->bp.addr.va + obj_offset_in_pd;
+ } else {
+ obj_offset_in_sd = (u32)(obj_offset_in_fpm %
+ I40E_HMC_DIRECT_BP_SIZE);
+ *object_base = (u8 *)sd_entry->u.bp.addr.va + obj_offset_in_sd;
+ }
+exit:
+ return ret_code;
+}
+
+/**
+ * i40e_get_lan_tx_queue_context - return the HMC context for the queue
+ * @hw: the hardware struct
+ * @queue: the queue we care about
+ * @s: the struct to be filled
+ **/
+enum i40e_status_code i40e_get_lan_tx_queue_context(struct i40e_hw *hw,
+ u16 queue,
+ struct i40e_hmc_obj_txq *s)
+{
+ enum i40e_status_code err;
+ u8 *context_bytes;
+
+ err = i40e_hmc_get_object_va(&hw->hmc, &context_bytes,
+ I40E_HMC_LAN_TX, queue);
+ if (err < 0)
+ return err;
+
+ return i40e_get_hmc_context(context_bytes,
+ i40e_hmc_txq_ce_info, (u8 *)s);
+}
+
+/**
+ * i40e_clear_lan_tx_queue_context - clear the HMC context for the queue
+ * @hw: the hardware struct
+ * @queue: the queue we care about
+ **/
+enum i40e_status_code i40e_clear_lan_tx_queue_context(struct i40e_hw *hw,
+ u16 queue)
+{
+ enum i40e_status_code err;
+ u8 *context_bytes;
+
+ err = i40e_hmc_get_object_va(&hw->hmc, &context_bytes,
+ I40E_HMC_LAN_TX, queue);
+ if (err < 0)
+ return err;
+
+ return i40e_clear_hmc_context(hw, context_bytes, I40E_HMC_LAN_TX);
+}
+
+/**
+ * i40e_set_lan_tx_queue_context - set the HMC context for the queue
+ * @hw: the hardware struct
+ * @queue: the queue we care about
+ * @s: the struct to be filled
+ **/
+enum i40e_status_code i40e_set_lan_tx_queue_context(struct i40e_hw *hw,
+ u16 queue,
+ struct i40e_hmc_obj_txq *s)
+{
+ enum i40e_status_code err;
+ u8 *context_bytes;
+
+ err = i40e_hmc_get_object_va(&hw->hmc, &context_bytes,
+ I40E_HMC_LAN_TX, queue);
+ if (err < 0)
+ return err;
+
+ return i40e_set_hmc_context(context_bytes,
+ i40e_hmc_txq_ce_info, (u8 *)s);
+}
+
+/**
+ * i40e_get_lan_rx_queue_context - return the HMC context for the queue
+ * @hw: the hardware struct
+ * @queue: the queue we care about
+ * @s: the struct to be filled
+ **/
+enum i40e_status_code i40e_get_lan_rx_queue_context(struct i40e_hw *hw,
+ u16 queue,
+ struct i40e_hmc_obj_rxq *s)
+{
+ enum i40e_status_code err;
+ u8 *context_bytes;
+
+ err = i40e_hmc_get_object_va(&hw->hmc, &context_bytes,
+ I40E_HMC_LAN_RX, queue);
+ if (err < 0)
+ return err;
+
+ return i40e_get_hmc_context(context_bytes,
+ i40e_hmc_rxq_ce_info, (u8 *)s);
+}
+
+/**
+ * i40e_clear_lan_rx_queue_context - clear the HMC context for the queue
+ * @hw: the hardware struct
+ * @queue: the queue we care about
+ **/
+enum i40e_status_code i40e_clear_lan_rx_queue_context(struct i40e_hw *hw,
+ u16 queue)
+{
+ enum i40e_status_code err;
+ u8 *context_bytes;
+
+ err = i40e_hmc_get_object_va(&hw->hmc, &context_bytes,
+ I40E_HMC_LAN_RX, queue);
+ if (err < 0)
+ return err;
+
+ return i40e_clear_hmc_context(hw, context_bytes, I40E_HMC_LAN_RX);
+}
+
+/**
+ * i40e_set_lan_rx_queue_context - set the HMC context for the queue
+ * @hw: the hardware struct
+ * @queue: the queue we care about
+ * @s: the struct to be filled
+ **/
+enum i40e_status_code i40e_set_lan_rx_queue_context(struct i40e_hw *hw,
+ u16 queue,
+ struct i40e_hmc_obj_rxq *s)
+{
+ enum i40e_status_code err;
+ u8 *context_bytes;
+
+ err = i40e_hmc_get_object_va(&hw->hmc, &context_bytes,
+ I40E_HMC_LAN_RX, queue);
+ if (err < 0)
+ return err;
+
+ return i40e_set_hmc_context(context_bytes,
+ i40e_hmc_rxq_ce_info, (u8 *)s);
+}
diff --git a/sys/dev/ixl/i40e_lan_hmc.h b/sys/dev/ixl/i40e_lan_hmc.h
new file mode 100755
index 0000000..8b73570
--- /dev/null
+++ b/sys/dev/ixl/i40e_lan_hmc.h
@@ -0,0 +1,201 @@
+/******************************************************************************
+
+ Copyright (c) 2013-2014, Intel Corporation
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ POSSIBILITY OF SUCH DAMAGE.
+
+******************************************************************************/
+/*$FreeBSD$*/
+
+#ifndef _I40E_LAN_HMC_H_
+#define _I40E_LAN_HMC_H_
+
+/* forward-declare the HW struct for the compiler */
+struct i40e_hw;
+
+/* HMC element context information */
+
+/* Rx queue context data
+ *
+ * The sizes of the variables may be larger than needed due to crossing byte
+ * boundaries. If we do not have the width of the variable set to the correct
+ * size then we could end up shifting bits off the top of the variable when the
+ * variable is at the top of a byte and crosses over into the next byte.
+ */
+struct i40e_hmc_obj_rxq {
+ u16 head;
+ u16 cpuid; /* bigger than needed, see above for reason */
+ u64 base;
+ u16 qlen;
+#define I40E_RXQ_CTX_DBUFF_SHIFT 7
+ u16 dbuff; /* bigger than needed, see above for reason */
+#define I40E_RXQ_CTX_HBUFF_SHIFT 6
+ u16 hbuff; /* bigger than needed, see above for reason */
+ u8 dtype;
+ u8 dsize;
+ u8 crcstrip;
+ u8 fc_ena;
+ u8 l2tsel;
+ u8 hsplit_0;
+ u8 hsplit_1;
+ u8 showiv;
+ u32 rxmax; /* bigger than needed, see above for reason */
+ u8 tphrdesc_ena;
+ u8 tphwdesc_ena;
+ u8 tphdata_ena;
+ u8 tphhead_ena;
+ u16 lrxqthresh; /* bigger than needed, see above for reason */
+ u8 prefena; /* NOTE: normally must be set to 1 at init */
+};
+
+/* Tx queue context data
+*
+* The sizes of the variables may be larger than needed due to crossing byte
+* boundaries. If we do not have the width of the variable set to the correct
+* size then we could end up shifting bits off the top of the variable when the
+* variable is at the top of a byte and crosses over into the next byte.
+*/
+struct i40e_hmc_obj_txq {
+ u16 head;
+ u8 new_context;
+ u64 base;
+ u8 fc_ena;
+ u8 timesync_ena;
+ u8 fd_ena;
+ u8 alt_vlan_ena;
+ u16 thead_wb;
+ u8 cpuid;
+ u8 head_wb_ena;
+ u16 qlen;
+ u8 tphrdesc_ena;
+ u8 tphrpacket_ena;
+ u8 tphwdesc_ena;
+ u64 head_wb_addr;
+ u32 crc;
+ u16 rdylist;
+ u8 rdylist_act;
+};
+
+/* for hsplit_0 field of Rx HMC context */
+enum i40e_hmc_obj_rx_hsplit_0 {
+ I40E_HMC_OBJ_RX_HSPLIT_0_NO_SPLIT = 0,
+ I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_L2 = 1,
+ I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_IP = 2,
+ I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_TCP_UDP = 4,
+ I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_SCTP = 8,
+};
+
+/* fcoe_cntx and fcoe_filt are for debugging purpose only */
+struct i40e_hmc_obj_fcoe_cntx {
+ u32 rsv[32];
+};
+
+struct i40e_hmc_obj_fcoe_filt {
+ u32 rsv[8];
+};
+
+/* Context sizes for LAN objects */
+enum i40e_hmc_lan_object_size {
+ I40E_HMC_LAN_OBJ_SZ_8 = 0x3,
+ I40E_HMC_LAN_OBJ_SZ_16 = 0x4,
+ I40E_HMC_LAN_OBJ_SZ_32 = 0x5,
+ I40E_HMC_LAN_OBJ_SZ_64 = 0x6,
+ I40E_HMC_LAN_OBJ_SZ_128 = 0x7,
+ I40E_HMC_LAN_OBJ_SZ_256 = 0x8,
+ I40E_HMC_LAN_OBJ_SZ_512 = 0x9,
+};
+
+#define I40E_HMC_L2OBJ_BASE_ALIGNMENT 512
+#define I40E_HMC_OBJ_SIZE_TXQ 128
+#define I40E_HMC_OBJ_SIZE_RXQ 32
+#define I40E_HMC_OBJ_SIZE_FCOE_CNTX 64
+#define I40E_HMC_OBJ_SIZE_FCOE_FILT 64
+
+enum i40e_hmc_lan_rsrc_type {
+ I40E_HMC_LAN_FULL = 0,
+ I40E_HMC_LAN_TX = 1,
+ I40E_HMC_LAN_RX = 2,
+ I40E_HMC_FCOE_CTX = 3,
+ I40E_HMC_FCOE_FILT = 4,
+ I40E_HMC_LAN_MAX = 5
+};
+
+enum i40e_hmc_model {
+ I40E_HMC_MODEL_DIRECT_PREFERRED = 0,
+ I40E_HMC_MODEL_DIRECT_ONLY = 1,
+ I40E_HMC_MODEL_PAGED_ONLY = 2,
+ I40E_HMC_MODEL_UNKNOWN,
+};
+
+struct i40e_hmc_lan_create_obj_info {
+ struct i40e_hmc_info *hmc_info;
+ u32 rsrc_type;
+ u32 start_idx;
+ u32 count;
+ enum i40e_sd_entry_type entry_type;
+ u64 direct_mode_sz;
+};
+
+struct i40e_hmc_lan_delete_obj_info {
+ struct i40e_hmc_info *hmc_info;
+ u32 rsrc_type;
+ u32 start_idx;
+ u32 count;
+};
+
+enum i40e_status_code i40e_init_lan_hmc(struct i40e_hw *hw, u32 txq_num,
+ u32 rxq_num, u32 fcoe_cntx_num,
+ u32 fcoe_filt_num);
+enum i40e_status_code i40e_configure_lan_hmc(struct i40e_hw *hw,
+ enum i40e_hmc_model model);
+enum i40e_status_code i40e_shutdown_lan_hmc(struct i40e_hw *hw);
+
+u64 i40e_calculate_l2fpm_size(u32 txq_num, u32 rxq_num,
+ u32 fcoe_cntx_num, u32 fcoe_filt_num);
+enum i40e_status_code i40e_get_lan_tx_queue_context(struct i40e_hw *hw,
+ u16 queue,
+ struct i40e_hmc_obj_txq *s);
+enum i40e_status_code i40e_clear_lan_tx_queue_context(struct i40e_hw *hw,
+ u16 queue);
+enum i40e_status_code i40e_set_lan_tx_queue_context(struct i40e_hw *hw,
+ u16 queue,
+ struct i40e_hmc_obj_txq *s);
+enum i40e_status_code i40e_get_lan_rx_queue_context(struct i40e_hw *hw,
+ u16 queue,
+ struct i40e_hmc_obj_rxq *s);
+enum i40e_status_code i40e_clear_lan_rx_queue_context(struct i40e_hw *hw,
+ u16 queue);
+enum i40e_status_code i40e_set_lan_rx_queue_context(struct i40e_hw *hw,
+ u16 queue,
+ struct i40e_hmc_obj_rxq *s);
+enum i40e_status_code i40e_create_lan_hmc_object(struct i40e_hw *hw,
+ struct i40e_hmc_lan_create_obj_info *info);
+enum i40e_status_code i40e_delete_lan_hmc_object(struct i40e_hw *hw,
+ struct i40e_hmc_lan_delete_obj_info *info);
+
+#endif /* _I40E_LAN_HMC_H_ */
diff --git a/sys/dev/ixl/i40e_nvm.c b/sys/dev/ixl/i40e_nvm.c
new file mode 100755
index 0000000..52d8602
--- /dev/null
+++ b/sys/dev/ixl/i40e_nvm.c
@@ -0,0 +1,481 @@
+/******************************************************************************
+
+ Copyright (c) 2013-2014, Intel Corporation
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ POSSIBILITY OF SUCH DAMAGE.
+
+******************************************************************************/
+/*$FreeBSD$*/
+
+#include "i40e_prototype.h"
+
+/**
+ * i40e_init_nvm_ops - Initialize NVM function pointers
+ * @hw: pointer to the HW structure
+ *
+ * Setup the function pointers and the NVM info structure. Should be called
+ * once per NVM initialization, e.g. inside the i40e_init_shared_code().
+ * Please notice that the NVM term is used here (& in all methods covered
+ * in this file) as an equivalent of the FLASH part mapped into the SR.
+ * We are accessing FLASH always thru the Shadow RAM.
+ **/
+enum i40e_status_code i40e_init_nvm(struct i40e_hw *hw)
+{
+ struct i40e_nvm_info *nvm = &hw->nvm;
+ enum i40e_status_code ret_code = I40E_SUCCESS;
+ u32 fla, gens;
+ u8 sr_size;
+
+ DEBUGFUNC("i40e_init_nvm");
+
+ /* The SR size is stored regardless of the nvm programming mode
+ * as the blank mode may be used in the factory line.
+ */
+ gens = rd32(hw, I40E_GLNVM_GENS);
+ sr_size = ((gens & I40E_GLNVM_GENS_SR_SIZE_MASK) >>
+ I40E_GLNVM_GENS_SR_SIZE_SHIFT);
+ /* Switching to words (sr_size contains power of 2KB) */
+ nvm->sr_size = (1 << sr_size) * I40E_SR_WORDS_IN_1KB;
+
+ /* Check if we are in the normal or blank NVM programming mode */
+ fla = rd32(hw, I40E_GLNVM_FLA);
+ if (fla & I40E_GLNVM_FLA_LOCKED_MASK) { /* Normal programming mode */
+ /* Max NVM timeout */
+ nvm->timeout = I40E_MAX_NVM_TIMEOUT;
+ nvm->blank_nvm_mode = FALSE;
+ } else { /* Blank programming mode */
+ nvm->blank_nvm_mode = TRUE;
+ ret_code = I40E_ERR_NVM_BLANK_MODE;
+ DEBUGOUT("NVM init error: unsupported blank mode.\n");
+ }
+
+ return ret_code;
+}
+
+/**
+ * i40e_acquire_nvm - Generic request for acquiring the NVM ownership
+ * @hw: pointer to the HW structure
+ * @access: NVM access type (read or write)
+ *
+ * This function will request NVM ownership for reading
+ * via the proper Admin Command.
+ **/
+enum i40e_status_code i40e_acquire_nvm(struct i40e_hw *hw,
+ enum i40e_aq_resource_access_type access)
+{
+ enum i40e_status_code ret_code = I40E_SUCCESS;
+ u64 gtime, timeout;
+ u64 time = 0;
+
+ DEBUGFUNC("i40e_acquire_nvm");
+
+ if (hw->nvm.blank_nvm_mode)
+ goto i40e_i40e_acquire_nvm_exit;
+
+ ret_code = i40e_aq_request_resource(hw, I40E_NVM_RESOURCE_ID, access,
+ 0, &time, NULL);
+ /* Reading the Global Device Timer */
+ gtime = rd32(hw, I40E_GLVFGEN_TIMER);
+
+ /* Store the timeout */
+ hw->nvm.hw_semaphore_timeout = I40E_MS_TO_GTIME(time) + gtime;
+
+ if (ret_code != I40E_SUCCESS) {
+ /* Set the polling timeout */
+ if (time > I40E_MAX_NVM_TIMEOUT)
+ timeout = I40E_MS_TO_GTIME(I40E_MAX_NVM_TIMEOUT)
+ + gtime;
+ else
+ timeout = hw->nvm.hw_semaphore_timeout;
+ /* Poll until the current NVM owner timeouts */
+ while (gtime < timeout) {
+ i40e_msec_delay(10);
+ ret_code = i40e_aq_request_resource(hw,
+ I40E_NVM_RESOURCE_ID,
+ access, 0, &time,
+ NULL);
+ if (ret_code == I40E_SUCCESS) {
+ hw->nvm.hw_semaphore_timeout =
+ I40E_MS_TO_GTIME(time) + gtime;
+ break;
+ }
+ gtime = rd32(hw, I40E_GLVFGEN_TIMER);
+ }
+ if (ret_code != I40E_SUCCESS) {
+ hw->nvm.hw_semaphore_timeout = 0;
+ hw->nvm.hw_semaphore_wait =
+ I40E_MS_TO_GTIME(time) + gtime;
+ DEBUGOUT1("NVM acquire timed out, wait %llu ms before trying again.\n",
+ time);
+ }
+ }
+
+i40e_i40e_acquire_nvm_exit:
+ return ret_code;
+}
+
+/**
+ * i40e_release_nvm - Generic request for releasing the NVM ownership
+ * @hw: pointer to the HW structure
+ *
+ * This function will release NVM resource via the proper Admin Command.
+ **/
+void i40e_release_nvm(struct i40e_hw *hw)
+{
+ DEBUGFUNC("i40e_release_nvm");
+
+ if (!hw->nvm.blank_nvm_mode)
+ i40e_aq_release_resource(hw, I40E_NVM_RESOURCE_ID, 0, NULL);
+}
+
+/**
+ * i40e_poll_sr_srctl_done_bit - Polls the GLNVM_SRCTL done bit
+ * @hw: pointer to the HW structure
+ *
+ * Polls the SRCTL Shadow RAM register done bit.
+ **/
+static enum i40e_status_code i40e_poll_sr_srctl_done_bit(struct i40e_hw *hw)
+{
+ enum i40e_status_code ret_code = I40E_ERR_TIMEOUT;
+ u32 srctl, wait_cnt;
+
+ DEBUGFUNC("i40e_poll_sr_srctl_done_bit");
+
+ /* Poll the I40E_GLNVM_SRCTL until the done bit is set */
+ for (wait_cnt = 0; wait_cnt < I40E_SRRD_SRCTL_ATTEMPTS; wait_cnt++) {
+ srctl = rd32(hw, I40E_GLNVM_SRCTL);
+ if (srctl & I40E_GLNVM_SRCTL_DONE_MASK) {
+ ret_code = I40E_SUCCESS;
+ break;
+ }
+ i40e_usec_delay(5);
+ }
+ if (ret_code == I40E_ERR_TIMEOUT)
+ DEBUGOUT("Done bit in GLNVM_SRCTL not set");
+ return ret_code;
+}
+
+/**
+ * i40e_read_nvm_word - Reads Shadow RAM
+ * @hw: pointer to the HW structure
+ * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
+ * @data: word read from the Shadow RAM
+ *
+ * Reads one 16 bit word from the Shadow RAM using the GLNVM_SRCTL register.
+ **/
+enum i40e_status_code i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
+ u16 *data)
+{
+ enum i40e_status_code ret_code = I40E_ERR_TIMEOUT;
+ u32 sr_reg;
+
+ DEBUGFUNC("i40e_read_nvm_srctl");
+
+ if (offset >= hw->nvm.sr_size) {
+ DEBUGOUT("NVM read error: Offset beyond Shadow RAM limit.\n");
+ ret_code = I40E_ERR_PARAM;
+ goto read_nvm_exit;
+ }
+
+ /* Poll the done bit first */
+ ret_code = i40e_poll_sr_srctl_done_bit(hw);
+ if (ret_code == I40E_SUCCESS) {
+ /* Write the address and start reading */
+ sr_reg = (u32)(offset << I40E_GLNVM_SRCTL_ADDR_SHIFT) |
+ (1 << I40E_GLNVM_SRCTL_START_SHIFT);
+ wr32(hw, I40E_GLNVM_SRCTL, sr_reg);
+
+ /* Poll I40E_GLNVM_SRCTL until the done bit is set */
+ ret_code = i40e_poll_sr_srctl_done_bit(hw);
+ if (ret_code == I40E_SUCCESS) {
+ sr_reg = rd32(hw, I40E_GLNVM_SRDATA);
+ *data = (u16)((sr_reg &
+ I40E_GLNVM_SRDATA_RDDATA_MASK)
+ >> I40E_GLNVM_SRDATA_RDDATA_SHIFT);
+ }
+ }
+ if (ret_code != I40E_SUCCESS)
+ DEBUGOUT1("NVM read error: Couldn't access Shadow RAM address: 0x%x\n",
+ offset);
+
+read_nvm_exit:
+ return ret_code;
+}
+
+/**
+ * i40e_read_nvm_buffer - Reads Shadow RAM buffer
+ * @hw: pointer to the HW structure
+ * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
+ * @words: (in) number of words to read; (out) number of words actually read
+ * @data: words read from the Shadow RAM
+ *
+ * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_srrd()
+ * method. The buffer read is preceded by the NVM ownership take
+ * and followed by the release.
+ **/
+enum i40e_status_code i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset,
+ u16 *words, u16 *data)
+{
+ enum i40e_status_code ret_code = I40E_SUCCESS;
+ u16 index, word;
+
+ DEBUGFUNC("i40e_read_nvm_buffer");
+
+ /* Loop thru the selected region */
+ for (word = 0; word < *words; word++) {
+ index = offset + word;
+ ret_code = i40e_read_nvm_word(hw, index, &data[word]);
+ if (ret_code != I40E_SUCCESS)
+ break;
+ }
+
+ /* Update the number of words read from the Shadow RAM */
+ *words = word;
+
+ return ret_code;
+}
+/**
+ * i40e_write_nvm_aq - Writes Shadow RAM.
+ * @hw: pointer to the HW structure.
+ * @module_pointer: module pointer location in words from the NVM beginning
+ * @offset: offset in words from module start
+ * @words: number of words to write
+ * @data: buffer with words to write to the Shadow RAM
+ * @last_command: tells the AdminQ that this is the last command
+ *
+ * Writes a 16 bit words buffer to the Shadow RAM using the admin command.
+ **/
+enum i40e_status_code i40e_write_nvm_aq(struct i40e_hw *hw, u8 module_pointer,
+ u32 offset, u16 words, void *data,
+ bool last_command)
+{
+ enum i40e_status_code ret_code = I40E_ERR_NVM;
+
+ DEBUGFUNC("i40e_write_nvm_aq");
+
+ /* Here we are checking the SR limit only for the flat memory model.
+ * We cannot do it for the module-based model, as we did not acquire
+ * the NVM resource yet (we cannot get the module pointer value).
+ * Firmware will check the module-based model.
+ */
+ if ((offset + words) > hw->nvm.sr_size)
+ DEBUGOUT("NVM write error: offset beyond Shadow RAM limit.\n");
+ else if (words > I40E_SR_SECTOR_SIZE_IN_WORDS)
+ /* We can write only up to 4KB (one sector), in one AQ write */
+ DEBUGOUT("NVM write fail error: cannot write more than 4KB in a single write.\n");
+ else if (((offset + (words - 1)) / I40E_SR_SECTOR_SIZE_IN_WORDS)
+ != (offset / I40E_SR_SECTOR_SIZE_IN_WORDS))
+ /* A single write cannot spread over two sectors */
+ DEBUGOUT("NVM write error: cannot spread over two sectors in a single write.\n");
+ else
+ ret_code = i40e_aq_update_nvm(hw, module_pointer,
+ 2 * offset, /*bytes*/
+ 2 * words, /*bytes*/
+ data, last_command, NULL);
+
+ return ret_code;
+}
+
+/**
+ * i40e_write_nvm_word - Writes Shadow RAM word
+ * @hw: pointer to the HW structure
+ * @offset: offset of the Shadow RAM word to write
+ * @data: word to write to the Shadow RAM
+ *
+ * Writes a 16 bit word to the SR using the i40e_write_nvm_aq() method.
+ * NVM ownership have to be acquired and released (on ARQ completion event
+ * reception) by caller. To commit SR to NVM update checksum function
+ * should be called.
+ **/
+enum i40e_status_code i40e_write_nvm_word(struct i40e_hw *hw, u32 offset,
+ void *data)
+{
+ DEBUGFUNC("i40e_write_nvm_word");
+
+ /* Value 0x00 below means that we treat SR as a flat mem */
+ return i40e_write_nvm_aq(hw, 0x00, offset, 1, data, FALSE);
+}
+
+/**
+ * i40e_write_nvm_buffer - Writes Shadow RAM buffer
+ * @hw: pointer to the HW structure
+ * @module_pointer: module pointer location in words from the NVM beginning
+ * @offset: offset of the Shadow RAM buffer to write
+ * @words: number of words to write
+ * @data: words to write to the Shadow RAM
+ *
+ * Writes a 16 bit words buffer to the Shadow RAM using the admin command.
+ * NVM ownership must be acquired before calling this function and released
+ * on ARQ completion event reception by caller. To commit SR to NVM update
+ * checksum function should be called.
+ **/
+enum i40e_status_code i40e_write_nvm_buffer(struct i40e_hw *hw,
+ u8 module_pointer, u32 offset,
+ u16 words, void *data)
+{
+ DEBUGFUNC("i40e_write_nvm_buffer");
+
+ /* Here we will only write one buffer as the size of the modules
+ * mirrored in the Shadow RAM is always less than 4K.
+ */
+ return i40e_write_nvm_aq(hw, module_pointer, offset, words,
+ data, FALSE);
+}
+
+/**
+ * i40e_calc_nvm_checksum - Calculates and returns the checksum
+ * @hw: pointer to hardware structure
+ * @checksum: pointer to the checksum
+ *
+ * This function calculates SW Checksum that covers the whole 64kB shadow RAM
+ * except the VPD and PCIe ALT Auto-load modules. The structure and size of VPD
+ * is customer specific and unknown. Therefore, this function skips all maximum
+ * possible size of VPD (1kB).
+ **/
+enum i40e_status_code i40e_calc_nvm_checksum(struct i40e_hw *hw, u16 *checksum)
+{
+ enum i40e_status_code ret_code = I40E_SUCCESS;
+ u16 pcie_alt_module = 0;
+ u16 checksum_local = 0;
+ u16 vpd_module = 0;
+ u16 word = 0;
+ u32 i = 0;
+
+ DEBUGFUNC("i40e_calc_nvm_checksum");
+
+ /* read pointer to VPD area */
+ ret_code = i40e_read_nvm_word(hw, I40E_SR_VPD_PTR, &vpd_module);
+ if (ret_code != I40E_SUCCESS) {
+ ret_code = I40E_ERR_NVM_CHECKSUM;
+ goto i40e_calc_nvm_checksum_exit;
+ }
+
+ /* read pointer to PCIe Alt Auto-load module */
+ ret_code = i40e_read_nvm_word(hw, I40E_SR_PCIE_ALT_AUTO_LOAD_PTR,
+ &pcie_alt_module);
+ if (ret_code != I40E_SUCCESS) {
+ ret_code = I40E_ERR_NVM_CHECKSUM;
+ goto i40e_calc_nvm_checksum_exit;
+ }
+
+ /* Calculate SW checksum that covers the whole 64kB shadow RAM
+ * except the VPD and PCIe ALT Auto-load modules
+ */
+ for (i = 0; i < hw->nvm.sr_size; i++) {
+ /* Skip Checksum word */
+ if (i == I40E_SR_SW_CHECKSUM_WORD)
+ i++;
+ /* Skip VPD module (convert byte size to word count) */
+ if (i == (u32)vpd_module) {
+ i += (I40E_SR_VPD_MODULE_MAX_SIZE / 2);
+ if (i >= hw->nvm.sr_size)
+ break;
+ }
+ /* Skip PCIe ALT module (convert byte size to word count) */
+ if (i == (u32)pcie_alt_module) {
+ i += (I40E_SR_PCIE_ALT_MODULE_MAX_SIZE / 2);
+ if (i >= hw->nvm.sr_size)
+ break;
+ }
+
+ ret_code = i40e_read_nvm_word(hw, (u16)i, &word);
+ if (ret_code != I40E_SUCCESS) {
+ ret_code = I40E_ERR_NVM_CHECKSUM;
+ goto i40e_calc_nvm_checksum_exit;
+ }
+ checksum_local += word;
+ }
+
+ *checksum = (u16)I40E_SR_SW_CHECKSUM_BASE - checksum_local;
+
+i40e_calc_nvm_checksum_exit:
+ return ret_code;
+}
+
+/**
+ * i40e_update_nvm_checksum - Updates the NVM checksum
+ * @hw: pointer to hardware structure
+ *
+ * NVM ownership must be acquired before calling this function and released
+ * on ARQ completion event reception by caller.
+ * This function will commit SR to NVM.
+ **/
+enum i40e_status_code i40e_update_nvm_checksum(struct i40e_hw *hw)
+{
+ enum i40e_status_code ret_code = I40E_SUCCESS;
+ u16 checksum;
+
+ DEBUGFUNC("i40e_update_nvm_checksum");
+
+ ret_code = i40e_calc_nvm_checksum(hw, &checksum);
+ if (ret_code == I40E_SUCCESS)
+ ret_code = i40e_write_nvm_aq(hw, 0x00, I40E_SR_SW_CHECKSUM_WORD,
+ 1, &checksum, TRUE);
+
+ return ret_code;
+}
+
+/**
+ * i40e_validate_nvm_checksum - Validate EEPROM checksum
+ * @hw: pointer to hardware structure
+ * @checksum: calculated checksum
+ *
+ * Performs checksum calculation and validates the NVM SW checksum. If the
+ * caller does not need checksum, the value can be NULL.
+ **/
+enum i40e_status_code i40e_validate_nvm_checksum(struct i40e_hw *hw,
+ u16 *checksum)
+{
+ enum i40e_status_code ret_code = I40E_SUCCESS;
+ u16 checksum_sr = 0;
+ u16 checksum_local = 0;
+
+ DEBUGFUNC("i40e_validate_nvm_checksum");
+
+ ret_code = i40e_calc_nvm_checksum(hw, &checksum_local);
+ if (ret_code != I40E_SUCCESS)
+ goto i40e_validate_nvm_checksum_exit;
+
+ /* Do not use i40e_read_nvm_word() because we do not want to take
+ * the synchronization semaphores twice here.
+ */
+ i40e_read_nvm_word(hw, I40E_SR_SW_CHECKSUM_WORD, &checksum_sr);
+
+ /* Verify read checksum from EEPROM is the same as
+ * calculated checksum
+ */
+ if (checksum_local != checksum_sr)
+ ret_code = I40E_ERR_NVM_CHECKSUM;
+
+ /* If the user cares, return the calculated checksum */
+ if (checksum)
+ *checksum = checksum_local;
+
+i40e_validate_nvm_checksum_exit:
+ return ret_code;
+}
diff --git a/sys/dev/ixl/i40e_osdep.c b/sys/dev/ixl/i40e_osdep.c
new file mode 100755
index 0000000..30e2e57
--- /dev/null
+++ b/sys/dev/ixl/i40e_osdep.c
@@ -0,0 +1,198 @@
+/******************************************************************************
+
+ Copyright (c) 2013-2014, Intel Corporation
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ POSSIBILITY OF SUCH DAMAGE.
+
+******************************************************************************/
+/*$FreeBSD$*/
+
+#include <machine/stdarg.h>
+
+#include "ixl.h"
+
+/********************************************************************
+ * Manage DMA'able memory.
+ *******************************************************************/
+static void
+i40e_dmamap_cb(void *arg, bus_dma_segment_t * segs, int nseg, int error)
+{
+ if (error)
+ return;
+ *(bus_addr_t *) arg = segs->ds_addr;
+ return;
+}
+
+i40e_status
+i40e_allocate_virt(struct i40e_hw *hw, struct i40e_virt_mem *m, u32 size)
+{
+ m->va = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO);
+ return(m->va == NULL);
+}
+
+i40e_status
+i40e_free_virt(struct i40e_hw *hw, struct i40e_virt_mem *m)
+{
+ free(m->va, M_DEVBUF);
+ return(0);
+}
+
+i40e_status
+i40e_allocate_dma(struct i40e_hw *hw, struct i40e_dma_mem *dma,
+ bus_size_t size, u32 alignment)
+{
+ device_t dev = ((struct i40e_osdep *)hw->back)->dev;
+ int err;
+
+
+ err = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */
+ alignment, 0, /* alignment, bounds */
+ BUS_SPACE_MAXADDR, /* lowaddr */
+ BUS_SPACE_MAXADDR, /* highaddr */
+ NULL, NULL, /* filter, filterarg */
+ size, /* maxsize */
+ 1, /* nsegments */
+ size, /* maxsegsize */
+ BUS_DMA_ALLOCNOW, /* flags */
+ NULL, /* lockfunc */
+ NULL, /* lockfuncarg */
+ &dma->tag);
+ if (err != 0) {
+ device_printf(dev,
+ "i40e_allocate_dma: bus_dma_tag_create failed, "
+ "error %u\n", err);
+ goto fail_0;
+ }
+ err = bus_dmamem_alloc(dma->tag, (void **)&dma->va,
+ BUS_DMA_NOWAIT | BUS_DMA_ZERO, &dma->map);
+ if (err != 0) {
+ device_printf(dev,
+ "i40e_allocate_dma: bus_dmamem_alloc failed, "
+ "error %u\n", err);
+ goto fail_1;
+ }
+ err = bus_dmamap_load(dma->tag, dma->map, dma->va,
+ size,
+ i40e_dmamap_cb,
+ &dma->pa,
+ BUS_DMA_NOWAIT);
+ if (err != 0) {
+ device_printf(dev,
+ "i40e_allocate_dma: bus_dmamap_load failed, "
+ "error %u\n", err);
+ goto fail_2;
+ }
+ dma->size = size;
+ bus_dmamap_sync(dma->tag, dma->map,
+ BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
+ return (0);
+fail_2:
+ bus_dmamem_free(dma->tag, dma->va, dma->map);
+fail_1:
+ bus_dma_tag_destroy(dma->tag);
+fail_0:
+ dma->map = NULL;
+ dma->tag = NULL;
+ return (err);
+}
+
+i40e_status
+i40e_free_dma(struct i40e_hw *hw, struct i40e_dma_mem *dma)
+{
+ bus_dmamap_sync(dma->tag, dma->map,
+ BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
+ bus_dmamap_unload(dma->tag, dma->map);
+ bus_dmamem_free(dma->tag, dma->va, dma->map);
+ bus_dma_tag_destroy(dma->tag);
+ return (0);
+}
+
+void
+i40e_init_spinlock(struct i40e_spinlock *lock)
+{
+ mtx_init(&lock->mutex, "mutex",
+ MTX_NETWORK_LOCK, MTX_DEF | MTX_DUPOK);
+}
+
+void
+i40e_acquire_spinlock(struct i40e_spinlock *lock)
+{
+ mtx_lock(&lock->mutex);
+}
+
+void
+i40e_release_spinlock(struct i40e_spinlock *lock)
+{
+ mtx_unlock(&lock->mutex);
+}
+
+void
+i40e_destroy_spinlock(struct i40e_spinlock *lock)
+{
+ mtx_destroy(&lock->mutex);
+}
+
+/*
+** i40e_debug_d - OS dependent version of shared code debug printing
+*/
+void i40e_debug_d(void *hw, u32 mask, char *fmt, ...)
+{
+ char buf[512];
+ va_list args;
+
+ if (!(mask & ((struct i40e_hw *)hw)->debug_mask))
+ return;
+
+ va_start(args, fmt);
+ vsnprintf(buf, sizeof(buf), fmt, args);
+ va_end(args);
+
+ /* the debug string is already formatted with a newline */
+ printf("%s", buf);
+}
+
+u16
+i40e_read_pci_cfg(struct i40e_hw *hw, u32 reg)
+{
+ u16 value;
+
+ value = pci_read_config(((struct i40e_osdep *)hw->back)->dev,
+ reg, 2);
+
+ return (value);
+}
+
+void
+i40e_write_pci_cfg(struct i40e_hw *hw, u32 reg, u16 value)
+{
+ pci_write_config(((struct i40e_osdep *)hw->back)->dev,
+ reg, value, 2);
+
+ return;
+}
+
diff --git a/sys/dev/ixl/i40e_osdep.h b/sys/dev/ixl/i40e_osdep.h
new file mode 100755
index 0000000..5479dd2
--- /dev/null
+++ b/sys/dev/ixl/i40e_osdep.h
@@ -0,0 +1,230 @@
+/******************************************************************************
+
+ Copyright (c) 2013-2014, Intel Corporation
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ POSSIBILITY OF SUCH DAMAGE.
+
+******************************************************************************/
+/*$FreeBSD$*/
+
+#ifndef _I40E_OSDEP_H_
+#define _I40E_OSDEP_H_
+
+#include <sys/types.h>
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/endian.h>
+#include <sys/mbuf.h>
+#include <sys/protosw.h>
+#include <sys/socket.h>
+#include <sys/malloc.h>
+#include <sys/kernel.h>
+#include <sys/bus.h>
+#include <machine/bus.h>
+#include <sys/rman.h>
+#include <machine/resource.h>
+#include <vm/vm.h>
+#include <vm/pmap.h>
+#include <machine/clock.h>
+#include <dev/pci/pcivar.h>
+#include <dev/pci/pcireg.h>
+
+#define ASSERT(x) if(!(x)) panic("IXL: x")
+
+#define i40e_usec_delay(x) DELAY(x)
+#define i40e_msec_delay(x) DELAY(1000*(x))
+
+#define DBG 0
+#define MSGOUT(S, A, B) printf(S "\n", A, B)
+#define DEBUGFUNC(F) DEBUGOUT(F);
+#if DBG
+ #define DEBUGOUT(S) printf(S "\n")
+ #define DEBUGOUT1(S,A) printf(S "\n",A)
+ #define DEBUGOUT2(S,A,B) printf(S "\n",A,B)
+ #define DEBUGOUT3(S,A,B,C) printf(S "\n",A,B,C)
+ #define DEBUGOUT7(S,A,B,C,D,E,F,G) printf(S "\n",A,B,C,D,E,F,G)
+#else
+ #define DEBUGOUT(S)
+ #define DEBUGOUT1(S,A)
+ #define DEBUGOUT2(S,A,B)
+ #define DEBUGOUT3(S,A,B,C)
+ #define DEBUGOUT6(S,A,B,C,D,E,F)
+ #define DEBUGOUT7(S,A,B,C,D,E,F,G)
+#endif
+
+#define UNREFERENCED_XPARAMETER
+#define UNREFERENCED_PARAMETER(_p)
+#define UNREFERENCED_1PARAMETER(_p)
+#define UNREFERENCED_2PARAMETER(_p, _q)
+#define UNREFERENCED_3PARAMETER(_p, _q, _r)
+#define UNREFERENCED_4PARAMETER(_p, _q, _r, _s)
+
+#define STATIC static
+#define INLINE inline
+
+#define FALSE 0
+#define false 0 /* shared code requires this */
+#define TRUE 1
+#define true 1
+#define CMD_MEM_WRT_INVALIDATE 0x0010 /* BIT_4 */
+#define PCI_COMMAND_REGISTER PCIR_COMMAND
+#define ARRAY_SIZE(a) (sizeof(a) / sizeof((a)[0]))
+
+#define i40e_memset(a, b, c, d) memset((a), (b), (c))
+#define i40e_memcpy(a, b, c, d) memcpy((a), (b), (c))
+
+#define CPU_TO_LE16(o) htole16(o)
+#define CPU_TO_LE32(s) htole32(s)
+#define CPU_TO_LE64(h) htole64(h)
+#define LE16_TO_CPU(a) le16toh(a)
+#define LE32_TO_CPU(c) le32toh(c)
+#define LE64_TO_CPU(k) le64toh(k)
+
+#define I40E_NTOHS(a) ntohs(a)
+#define I40E_NTOHL(a) ntohl(a)
+#define I40E_HTONS(a) htons(a)
+#define I40E_HTONL(a) htonl(a)
+
+#define FIELD_SIZEOF(x, y) (sizeof(((x*)0)->y))
+
+typedef uint8_t u8;
+typedef int8_t s8;
+typedef uint16_t u16;
+typedef int16_t s16;
+typedef uint32_t u32;
+typedef int32_t s32;
+typedef uint64_t u64;
+
+/* long string relief */
+typedef enum i40e_status_code i40e_status;
+
+#define __le16 u16
+#define __le32 u32
+#define __le64 u64
+#define __be16 u16
+#define __be32 u32
+#define __be64 u64
+
+/* SW spinlock */
+struct i40e_spinlock {
+ struct mtx mutex;
+};
+
+#define le16_to_cpu
+
+static __inline
+void prefetch(void *x)
+{
+ __asm volatile("prefetcht0 %0" :: "m" (*(unsigned long *)x));
+}
+
+struct i40e_osdep
+{
+ bus_space_tag_t mem_bus_space_tag;
+ bus_space_handle_t mem_bus_space_handle;
+ bus_size_t mem_bus_space_size;
+ struct device *dev;
+};
+
+struct i40e_dma_mem {
+ void *va;
+ u64 pa;
+ bus_dma_tag_t tag;
+ bus_dmamap_t map;
+ bus_dma_segment_t seg;
+ bus_size_t size;
+ int nseg;
+ int flags;
+};
+
+struct i40e_hw; /* forward decl */
+u16 i40e_read_pci_cfg(struct i40e_hw *, u32);
+void i40e_write_pci_cfg(struct i40e_hw *, u32, u16);
+
+#define i40e_allocate_dma_mem(h, m, unused, s, a) i40e_allocate_dma(h, m, s, a)
+#define i40e_free_dma_mem(h, m) i40e_free_dma(h, m)
+
+#define i40e_debug(h, m, s, ...) i40e_debug_d(h, m, s, ##__VA_ARGS__)
+extern void i40e_debug_d(void *hw, u32 mask, char *fmt_str, ...);
+
+struct i40e_virt_mem {
+ void *va;
+ u32 size;
+};
+#define i40e_allocate_virt_mem(h, m, s) i40e_allocate_virt(h, m, s)
+#define i40e_free_virt_mem(h, m) i40e_free_virt(h, m)
+
+/*
+** This hardware supports either 16 or 32 byte rx descriptors
+** we default here to the larger size.
+*/
+#define i40e_rx_desc i40e_32byte_rx_desc
+
+static __inline uint32_t
+rd32_osdep(struct i40e_osdep *osdep, uint32_t reg)
+{
+
+ KASSERT(reg < osdep->mem_bus_space_size,
+ ("ixl: register offset %#jx too large (max is %#jx",
+ (uintmax_t)a, (uintmax_t)osdep->mem_bus_space_size));
+
+ return (bus_space_read_4(osdep->mem_bus_space_tag,
+ osdep->mem_bus_space_handle, reg));
+}
+
+static __inline void
+wr32_osdep(struct i40e_osdep *osdep, uint32_t reg, uint32_t value)
+{
+
+ KASSERT(reg < osdep->mem_bus_space_size,
+ ("ixl: register offset %#jx too large (max is %#jx",
+ (uintmax_t)a, (uintmax_t)osdep->mem_bus_space_size));
+
+ bus_space_write_4(osdep->mem_bus_space_tag,
+ osdep->mem_bus_space_handle, reg, value);
+}
+
+#define rd32(a, reg) rd32_osdep((a)->back, (reg))
+#define wr32(a, reg, value) wr32_osdep((a)->back, (reg), (value))
+
+#define rd64(a, reg) (\
+ bus_space_read_8( ((struct i40e_osdep *)(a)->back)->mem_bus_space_tag, \
+ ((struct i40e_osdep *)(a)->back)->mem_bus_space_handle, \
+ reg))
+
+#define wr64(a, reg, value) (\
+ bus_space_write_8( ((struct i40e_osdep *)(a)->back)->mem_bus_space_tag, \
+ ((struct i40e_osdep *)(a)->back)->mem_bus_space_handle, \
+ reg, value))
+
+#define ixl_flush(a) (\
+ bus_space_read_4( ((struct i40e_osdep *)(a)->back)->mem_bus_space_tag, \
+ ((struct i40e_osdep *)(a)->back)->mem_bus_space_handle, \
+ I40E_GLGEN_STAT))
+
+#endif /* _I40E_OSDEP_H_ */
diff --git a/sys/dev/ixl/i40e_prototype.h b/sys/dev/ixl/i40e_prototype.h
new file mode 100755
index 0000000..db9e3cc
--- /dev/null
+++ b/sys/dev/ixl/i40e_prototype.h
@@ -0,0 +1,427 @@
+/******************************************************************************
+
+ Copyright (c) 2013-2014, Intel Corporation
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ POSSIBILITY OF SUCH DAMAGE.
+
+******************************************************************************/
+/*$FreeBSD$*/
+
+#ifndef _I40E_PROTOTYPE_H_
+#define _I40E_PROTOTYPE_H_
+
+#include "i40e_type.h"
+#include "i40e_alloc.h"
+#include "i40e_virtchnl.h"
+
+/* Prototypes for shared code functions that are not in
+ * the standard function pointer structures. These are
+ * mostly because they are needed even before the init
+ * has happened and will assist in the early SW and FW
+ * setup.
+ */
+
+/* adminq functions */
+enum i40e_status_code i40e_init_adminq(struct i40e_hw *hw);
+enum i40e_status_code i40e_shutdown_adminq(struct i40e_hw *hw);
+enum i40e_status_code i40e_init_asq(struct i40e_hw *hw);
+enum i40e_status_code i40e_init_arq(struct i40e_hw *hw);
+enum i40e_status_code i40e_alloc_adminq_asq_ring(struct i40e_hw *hw);
+enum i40e_status_code i40e_alloc_adminq_arq_ring(struct i40e_hw *hw);
+enum i40e_status_code i40e_shutdown_asq(struct i40e_hw *hw);
+enum i40e_status_code i40e_shutdown_arq(struct i40e_hw *hw);
+u16 i40e_clean_asq(struct i40e_hw *hw);
+void i40e_free_adminq_asq(struct i40e_hw *hw);
+void i40e_free_adminq_arq(struct i40e_hw *hw);
+void i40e_adminq_init_ring_data(struct i40e_hw *hw);
+enum i40e_status_code i40e_clean_arq_element(struct i40e_hw *hw,
+ struct i40e_arq_event_info *e,
+ u16 *events_pending);
+enum i40e_status_code i40e_asq_send_command(struct i40e_hw *hw,
+ struct i40e_aq_desc *desc,
+ void *buff, /* can be NULL */
+ u16 buff_size,
+ struct i40e_asq_cmd_details *cmd_details);
+bool i40e_asq_done(struct i40e_hw *hw);
+
+/* debug function for adminq */
+void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask,
+ void *desc, void *buffer, u16 buf_len);
+
+void i40e_idle_aq(struct i40e_hw *hw);
+void i40e_resume_aq(struct i40e_hw *hw);
+bool i40e_check_asq_alive(struct i40e_hw *hw);
+enum i40e_status_code i40e_aq_queue_shutdown(struct i40e_hw *hw, bool unloading);
+
+
+u32 i40e_led_get(struct i40e_hw *hw);
+void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink);
+
+/* admin send queue commands */
+
+enum i40e_status_code i40e_aq_get_firmware_version(struct i40e_hw *hw,
+ u16 *fw_major_version, u16 *fw_minor_version,
+ u16 *api_major_version, u16 *api_minor_version,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_debug_write_register(struct i40e_hw *hw,
+ u32 reg_addr, u64 reg_val,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_set_phy_debug(struct i40e_hw *hw, u8 cmd_flags,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_set_default_vsi(struct i40e_hw *hw, u16 vsi_id,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_get_phy_capabilities(struct i40e_hw *hw,
+ bool qualified_modules, bool report_init,
+ struct i40e_aq_get_phy_abilities_resp *abilities,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_set_phy_config(struct i40e_hw *hw,
+ struct i40e_aq_set_phy_config *config,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_set_fc(struct i40e_hw *hw, u8 *aq_failures,
+ bool atomic_reset);
+enum i40e_status_code i40e_aq_set_phy_int_mask(struct i40e_hw *hw, u16 mask,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_set_mac_config(struct i40e_hw *hw,
+ u16 max_frame_size, bool crc_en, u16 pacing,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_get_local_advt_reg(struct i40e_hw *hw,
+ u64 *advt_reg,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_get_partner_advt(struct i40e_hw *hw,
+ u64 *advt_reg,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_set_lb_modes(struct i40e_hw *hw, u16 lb_modes,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_clear_pxe_mode(struct i40e_hw *hw,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_set_link_restart_an(struct i40e_hw *hw,
+ bool enable_link, struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_get_link_info(struct i40e_hw *hw,
+ bool enable_lse, struct i40e_link_status *link,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_update_link_info(struct i40e_hw *hw,
+ bool enable_lse);
+enum i40e_status_code i40e_aq_set_local_advt_reg(struct i40e_hw *hw,
+ u64 advt_reg,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_send_driver_version(struct i40e_hw *hw,
+ struct i40e_driver_version *dv,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_add_vsi(struct i40e_hw *hw,
+ struct i40e_vsi_context *vsi_ctx,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_set_vsi_broadcast(struct i40e_hw *hw,
+ u16 vsi_id, bool set_filter,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw *hw,
+ u16 vsi_id, bool set, struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_set_vsi_multicast_promiscuous(struct i40e_hw *hw,
+ u16 vsi_id, bool set, struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_get_vsi_params(struct i40e_hw *hw,
+ struct i40e_vsi_context *vsi_ctx,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_update_vsi_params(struct i40e_hw *hw,
+ struct i40e_vsi_context *vsi_ctx,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_add_veb(struct i40e_hw *hw, u16 uplink_seid,
+ u16 downlink_seid, u8 enabled_tc,
+ bool default_port, bool enable_l2_filtering,
+ u16 *pveb_seid,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_get_veb_parameters(struct i40e_hw *hw,
+ u16 veb_seid, u16 *switch_id, bool *floating,
+ u16 *statistic_index, u16 *vebs_used,
+ u16 *vebs_free,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_add_macvlan(struct i40e_hw *hw, u16 vsi_id,
+ struct i40e_aqc_add_macvlan_element_data *mv_list,
+ u16 count, struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_remove_macvlan(struct i40e_hw *hw, u16 vsi_id,
+ struct i40e_aqc_remove_macvlan_element_data *mv_list,
+ u16 count, struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_add_vlan(struct i40e_hw *hw, u16 vsi_id,
+ struct i40e_aqc_add_remove_vlan_element_data *v_list,
+ u8 count, struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_remove_vlan(struct i40e_hw *hw, u16 vsi_id,
+ struct i40e_aqc_add_remove_vlan_element_data *v_list,
+ u8 count, struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_send_msg_to_vf(struct i40e_hw *hw, u16 vfid,
+ u32 v_opcode, u32 v_retval, u8 *msg, u16 msglen,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_get_switch_config(struct i40e_hw *hw,
+ struct i40e_aqc_get_switch_config_resp *buf,
+ u16 buf_size, u16 *start_seid,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_request_resource(struct i40e_hw *hw,
+ enum i40e_aq_resources_ids resource,
+ enum i40e_aq_resource_access_type access,
+ u8 sdp_number, u64 *timeout,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_release_resource(struct i40e_hw *hw,
+ enum i40e_aq_resources_ids resource,
+ u8 sdp_number,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_read_nvm(struct i40e_hw *hw, u8 module_pointer,
+ u32 offset, u16 length, void *data,
+ bool last_command,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_erase_nvm(struct i40e_hw *hw, u8 module_pointer,
+ u32 offset, u16 length, bool last_command,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_discover_capabilities(struct i40e_hw *hw,
+ void *buff, u16 buff_size, u16 *data_size,
+ enum i40e_admin_queue_opc list_type_opc,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_update_nvm(struct i40e_hw *hw, u8 module_pointer,
+ u32 offset, u16 length, void *data,
+ bool last_command,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_get_lldp_mib(struct i40e_hw *hw, u8 bridge_type,
+ u8 mib_type, void *buff, u16 buff_size,
+ u16 *local_len, u16 *remote_len,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_cfg_lldp_mib_change_event(struct i40e_hw *hw,
+ bool enable_update,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_add_lldp_tlv(struct i40e_hw *hw, u8 bridge_type,
+ void *buff, u16 buff_size, u16 tlv_len,
+ u16 *mib_len,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_update_lldp_tlv(struct i40e_hw *hw,
+ u8 bridge_type, void *buff, u16 buff_size,
+ u16 old_len, u16 new_len, u16 offset,
+ u16 *mib_len,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_delete_lldp_tlv(struct i40e_hw *hw,
+ u8 bridge_type, void *buff, u16 buff_size,
+ u16 tlv_len, u16 *mib_len,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_stop_lldp(struct i40e_hw *hw, bool shutdown_agent,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_start_lldp(struct i40e_hw *hw,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_add_udp_tunnel(struct i40e_hw *hw,
+ u16 udp_port, u8 protocol_index,
+ u8 *filter_index,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_del_udp_tunnel(struct i40e_hw *hw, u8 index,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_get_switch_resource_alloc(struct i40e_hw *hw,
+ u8 *num_entries,
+ struct i40e_aqc_switch_resource_alloc_element_resp *buf,
+ u16 count,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_add_pvirt(struct i40e_hw *hw, u16 flags,
+ u16 mac_seid, u16 vsi_seid,
+ u16 *ret_seid);
+enum i40e_status_code i40e_aq_add_tag(struct i40e_hw *hw, bool direct_to_queue,
+ u16 vsi_seid, u16 tag, u16 queue_num,
+ u16 *tags_used, u16 *tags_free,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_remove_tag(struct i40e_hw *hw, u16 vsi_seid,
+ u16 tag, u16 *tags_used, u16 *tags_free,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_add_mcast_etag(struct i40e_hw *hw, u16 pe_seid,
+ u16 etag, u8 num_tags_in_buf, void *buf,
+ u16 *tags_used, u16 *tags_free,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_remove_mcast_etag(struct i40e_hw *hw, u16 pe_seid,
+ u16 etag, u16 *tags_used, u16 *tags_free,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_update_tag(struct i40e_hw *hw, u16 vsi_seid,
+ u16 old_tag, u16 new_tag, u16 *tags_used,
+ u16 *tags_free,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_add_statistics(struct i40e_hw *hw, u16 seid,
+ u16 vlan_id, u16 *stat_index,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_remove_statistics(struct i40e_hw *hw, u16 seid,
+ u16 vlan_id, u16 stat_index,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_set_port_parameters(struct i40e_hw *hw,
+ u16 bad_frame_vsi, bool save_bad_pac,
+ bool pad_short_pac, bool double_vlan,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_delete_element(struct i40e_hw *hw, u16 seid,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_mac_address_write(struct i40e_hw *hw,
+ u16 flags, u8 *mac_addr,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_config_vsi_bw_limit(struct i40e_hw *hw,
+ u16 seid, u16 credit, u8 max_credit,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_dcb_ignore_pfc(struct i40e_hw *hw,
+ u8 tcmap, bool request, u8 *tcmap_ret,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_get_hmc_resource_profile(struct i40e_hw *hw,
+ enum i40e_aq_hmc_profile *profile,
+ u8 *pe_vf_enabled_count,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_config_switch_comp_ets_bw_limit(
+ struct i40e_hw *hw, u16 seid,
+ struct i40e_aqc_configure_switching_comp_ets_bw_limit_data *bw_data,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_config_vsi_ets_sla_bw_limit(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_configure_vsi_ets_sla_bw_data *bw_data,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_dcb_updated(struct i40e_hw *hw,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_set_hmc_resource_profile(struct i40e_hw *hw,
+ enum i40e_aq_hmc_profile profile,
+ u8 pe_vf_enabled_count,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_config_switch_comp_bw_limit(struct i40e_hw *hw,
+ u16 seid, u16 credit, u8 max_bw,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_config_vsi_tc_bw(struct i40e_hw *hw, u16 seid,
+ struct i40e_aqc_configure_vsi_tc_bw_data *bw_data,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_query_vsi_bw_config(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_query_vsi_bw_config_resp *bw_data,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_query_vsi_ets_sla_config(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_query_vsi_ets_sla_config_resp *bw_data,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_query_switch_comp_ets_config(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_query_switching_comp_ets_config_resp *bw_data,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_query_port_ets_config(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_query_port_ets_config_resp *bw_data,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_query_switch_comp_bw_config(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_query_switching_comp_bw_config_resp *bw_data,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_resume_port_tx(struct i40e_hw *hw,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_add_cloud_filters(struct i40e_hw *hw,
+ u16 vsi,
+ struct i40e_aqc_add_remove_cloud_filters_element_data *filters,
+ u8 filter_count);
+
+enum i40e_status_code i40e_aq_remove_cloud_filters(struct i40e_hw *hw,
+ u16 vsi,
+ struct i40e_aqc_add_remove_cloud_filters_element_data *filters,
+ u8 filter_count);
+
+enum i40e_status_code i40e_aq_alternate_read(struct i40e_hw *hw,
+ u32 reg_addr0, u32 *reg_val0,
+ u32 reg_addr1, u32 *reg_val1);
+enum i40e_status_code i40e_aq_alternate_read_indirect(struct i40e_hw *hw,
+ u32 addr, u32 dw_count, void *buffer);
+enum i40e_status_code i40e_aq_alternate_write(struct i40e_hw *hw,
+ u32 reg_addr0, u32 reg_val0,
+ u32 reg_addr1, u32 reg_val1);
+enum i40e_status_code i40e_aq_alternate_write_indirect(struct i40e_hw *hw,
+ u32 addr, u32 dw_count, void *buffer);
+enum i40e_status_code i40e_aq_alternate_clear(struct i40e_hw *hw);
+enum i40e_status_code i40e_aq_alternate_write_done(struct i40e_hw *hw,
+ u8 bios_mode, bool *reset_needed);
+enum i40e_status_code i40e_aq_set_oem_mode(struct i40e_hw *hw,
+ u8 oem_mode);
+
+/* i40e_common */
+enum i40e_status_code i40e_init_shared_code(struct i40e_hw *hw);
+enum i40e_status_code i40e_pf_reset(struct i40e_hw *hw);
+void i40e_clear_hw(struct i40e_hw *hw);
+void i40e_clear_pxe_mode(struct i40e_hw *hw);
+bool i40e_get_link_status(struct i40e_hw *hw);
+enum i40e_status_code i40e_get_mac_addr(struct i40e_hw *hw, u8 *mac_addr);
+enum i40e_status_code i40e_read_bw_from_alt_ram(struct i40e_hw *hw,
+ u32 *max_bw, u32 *min_bw, bool *min_valid, bool *max_valid);
+enum i40e_status_code i40e_aq_configure_partition_bw(struct i40e_hw *hw,
+ struct i40e_aqc_configure_partition_bw_data *bw_data,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_get_port_mac_addr(struct i40e_hw *hw, u8 *mac_addr);
+void i40e_pre_tx_queue_cfg(struct i40e_hw *hw, u32 queue, bool enable);
+enum i40e_status_code i40e_validate_mac_addr(u8 *mac_addr);
+enum i40e_aq_link_speed i40e_get_link_speed(struct i40e_hw *hw);
+/* prototype for functions used for NVM access */
+enum i40e_status_code i40e_init_nvm(struct i40e_hw *hw);
+enum i40e_status_code i40e_acquire_nvm(struct i40e_hw *hw,
+ enum i40e_aq_resource_access_type access);
+void i40e_release_nvm(struct i40e_hw *hw);
+enum i40e_status_code i40e_read_nvm_srrd(struct i40e_hw *hw, u16 offset,
+ u16 *data);
+enum i40e_status_code i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
+ u16 *data);
+enum i40e_status_code i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset,
+ u16 *words, u16 *data);
+enum i40e_status_code i40e_write_nvm_aq(struct i40e_hw *hw, u8 module,
+ u32 offset, u16 words, void *data,
+ bool last_command);
+enum i40e_status_code i40e_write_nvm_word(struct i40e_hw *hw, u32 offset,
+ void *data);
+enum i40e_status_code i40e_write_nvm_buffer(struct i40e_hw *hw, u8 module,
+ u32 offset, u16 words, void *data);
+enum i40e_status_code i40e_calc_nvm_checksum(struct i40e_hw *hw, u16 *checksum);
+enum i40e_status_code i40e_update_nvm_checksum(struct i40e_hw *hw);
+enum i40e_status_code i40e_validate_nvm_checksum(struct i40e_hw *hw,
+ u16 *checksum);
+enum i40e_status_code i40e_nvmupd_command(struct i40e_hw *hw,
+ struct i40e_nvm_access *cmd,
+ u8 *bytes, int *);
+void i40e_set_pci_config_data(struct i40e_hw *hw, u16 link_status);
+
+enum i40e_status_code i40e_set_mac_type(struct i40e_hw *hw);
+
+extern struct i40e_rx_ptype_decoded i40e_ptype_lookup[];
+
+static INLINE struct i40e_rx_ptype_decoded decode_rx_desc_ptype(u8 ptype)
+{
+ return i40e_ptype_lookup[ptype];
+}
+
+/* prototype for functions used for SW spinlocks */
+void i40e_init_spinlock(struct i40e_spinlock *sp);
+void i40e_acquire_spinlock(struct i40e_spinlock *sp);
+void i40e_release_spinlock(struct i40e_spinlock *sp);
+void i40e_destroy_spinlock(struct i40e_spinlock *sp);
+
+/* i40e_common for VF drivers*/
+void i40e_vf_parse_hw_config(struct i40e_hw *hw,
+ struct i40e_virtchnl_vf_resource *msg);
+enum i40e_status_code i40e_vf_reset(struct i40e_hw *hw);
+enum i40e_status_code i40e_aq_send_msg_to_pf(struct i40e_hw *hw,
+ enum i40e_virtchnl_ops v_opcode,
+ enum i40e_status_code v_retval,
+ u8 *msg, u16 msglen,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_set_filter_control(struct i40e_hw *hw,
+ struct i40e_filter_control_settings *settings);
+enum i40e_status_code i40e_aq_add_rem_control_packet_filter(struct i40e_hw *hw,
+ u8 *mac_addr, u16 ethtype, u16 flags,
+ u16 vsi_seid, u16 queue, bool is_add,
+ struct i40e_control_filter_stats *stats,
+ struct i40e_asq_cmd_details *cmd_details);
+#endif /* _I40E_PROTOTYPE_H_ */
diff --git a/sys/dev/ixl/i40e_register.h b/sys/dev/ixl/i40e_register.h
new file mode 100755
index 0000000..b6364a0
--- /dev/null
+++ b/sys/dev/ixl/i40e_register.h
@@ -0,0 +1,3378 @@
+/******************************************************************************
+
+ Copyright (c) 2013-2014, Intel Corporation
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ POSSIBILITY OF SUCH DAMAGE.
+
+******************************************************************************/
+/*$FreeBSD$*/
+
+#ifndef _I40E_REGISTER_H_
+#define _I40E_REGISTER_H_
+
+
+#define I40E_GL_ARQBAH 0x000801C0 /* Reset: EMPR */
+#define I40E_GL_ARQBAH_ARQBAH_SHIFT 0
+#define I40E_GL_ARQBAH_ARQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_ARQBAH_ARQBAH_SHIFT)
+#define I40E_GL_ARQBAL 0x000800C0 /* Reset: EMPR */
+#define I40E_GL_ARQBAL_ARQBAL_SHIFT 0
+#define I40E_GL_ARQBAL_ARQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_ARQBAL_ARQBAL_SHIFT)
+#define I40E_GL_ARQH 0x000803C0 /* Reset: EMPR */
+#define I40E_GL_ARQH_ARQH_SHIFT 0
+#define I40E_GL_ARQH_ARQH_MASK I40E_MASK(0x3FF, I40E_GL_ARQH_ARQH_SHIFT)
+#define I40E_GL_ARQT 0x000804C0 /* Reset: EMPR */
+#define I40E_GL_ARQT_ARQT_SHIFT 0
+#define I40E_GL_ARQT_ARQT_MASK I40E_MASK(0x3FF, I40E_GL_ARQT_ARQT_SHIFT)
+#define I40E_GL_ATQBAH 0x00080140 /* Reset: EMPR */
+#define I40E_GL_ATQBAH_ATQBAH_SHIFT 0
+#define I40E_GL_ATQBAH_ATQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_ATQBAH_ATQBAH_SHIFT)
+#define I40E_GL_ATQBAL 0x00080040 /* Reset: EMPR */
+#define I40E_GL_ATQBAL_ATQBAL_SHIFT 0
+#define I40E_GL_ATQBAL_ATQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_ATQBAL_ATQBAL_SHIFT)
+#define I40E_GL_ATQH 0x00080340 /* Reset: EMPR */
+#define I40E_GL_ATQH_ATQH_SHIFT 0
+#define I40E_GL_ATQH_ATQH_MASK I40E_MASK(0x3FF, I40E_GL_ATQH_ATQH_SHIFT)
+#define I40E_GL_ATQLEN 0x00080240 /* Reset: EMPR */
+#define I40E_GL_ATQLEN_ATQLEN_SHIFT 0
+#define I40E_GL_ATQLEN_ATQLEN_MASK I40E_MASK(0x3FF, I40E_GL_ATQLEN_ATQLEN_SHIFT)
+#define I40E_GL_ATQLEN_ATQVFE_SHIFT 28
+#define I40E_GL_ATQLEN_ATQVFE_MASK I40E_MASK(0x1, I40E_GL_ATQLEN_ATQVFE_SHIFT)
+#define I40E_GL_ATQLEN_ATQOVFL_SHIFT 29
+#define I40E_GL_ATQLEN_ATQOVFL_MASK I40E_MASK(0x1, I40E_GL_ATQLEN_ATQOVFL_SHIFT)
+#define I40E_GL_ATQLEN_ATQCRIT_SHIFT 30
+#define I40E_GL_ATQLEN_ATQCRIT_MASK I40E_MASK(0x1, I40E_GL_ATQLEN_ATQCRIT_SHIFT)
+#define I40E_GL_ATQLEN_ATQENABLE_SHIFT 31
+#define I40E_GL_ATQLEN_ATQENABLE_MASK I40E_MASK(0x1, I40E_GL_ATQLEN_ATQENABLE_SHIFT)
+#define I40E_GL_ATQT 0x00080440 /* Reset: EMPR */
+#define I40E_GL_ATQT_ATQT_SHIFT 0
+#define I40E_GL_ATQT_ATQT_MASK I40E_MASK(0x3FF, I40E_GL_ATQT_ATQT_SHIFT)
+#define I40E_PF_ARQBAH 0x00080180 /* Reset: EMPR */
+#define I40E_PF_ARQBAH_ARQBAH_SHIFT 0
+#define I40E_PF_ARQBAH_ARQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_PF_ARQBAH_ARQBAH_SHIFT)
+#define I40E_PF_ARQBAL 0x00080080 /* Reset: EMPR */
+#define I40E_PF_ARQBAL_ARQBAL_SHIFT 0
+#define I40E_PF_ARQBAL_ARQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_PF_ARQBAL_ARQBAL_SHIFT)
+#define I40E_PF_ARQH 0x00080380 /* Reset: EMPR */
+#define I40E_PF_ARQH_ARQH_SHIFT 0
+#define I40E_PF_ARQH_ARQH_MASK I40E_MASK(0x3FF, I40E_PF_ARQH_ARQH_SHIFT)
+#define I40E_PF_ARQLEN 0x00080280 /* Reset: EMPR */
+#define I40E_PF_ARQLEN_ARQLEN_SHIFT 0
+#define I40E_PF_ARQLEN_ARQLEN_MASK I40E_MASK(0x3FF, I40E_PF_ARQLEN_ARQLEN_SHIFT)
+#define I40E_PF_ARQLEN_ARQVFE_SHIFT 28
+#define I40E_PF_ARQLEN_ARQVFE_MASK I40E_MASK(0x1, I40E_PF_ARQLEN_ARQVFE_SHIFT)
+#define I40E_PF_ARQLEN_ARQOVFL_SHIFT 29
+#define I40E_PF_ARQLEN_ARQOVFL_MASK I40E_MASK(0x1, I40E_PF_ARQLEN_ARQOVFL_SHIFT)
+#define I40E_PF_ARQLEN_ARQCRIT_SHIFT 30
+#define I40E_PF_ARQLEN_ARQCRIT_MASK I40E_MASK(0x1, I40E_PF_ARQLEN_ARQCRIT_SHIFT)
+#define I40E_PF_ARQLEN_ARQENABLE_SHIFT 31
+#define I40E_PF_ARQLEN_ARQENABLE_MASK I40E_MASK(0x1, I40E_PF_ARQLEN_ARQENABLE_SHIFT)
+#define I40E_PF_ARQT 0x00080480 /* Reset: EMPR */
+#define I40E_PF_ARQT_ARQT_SHIFT 0
+#define I40E_PF_ARQT_ARQT_MASK I40E_MASK(0x3FF, I40E_PF_ARQT_ARQT_SHIFT)
+#define I40E_PF_ATQBAH 0x00080100 /* Reset: EMPR */
+#define I40E_PF_ATQBAH_ATQBAH_SHIFT 0
+#define I40E_PF_ATQBAH_ATQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_PF_ATQBAH_ATQBAH_SHIFT)
+#define I40E_PF_ATQBAL 0x00080000 /* Reset: EMPR */
+#define I40E_PF_ATQBAL_ATQBAL_SHIFT 0
+#define I40E_PF_ATQBAL_ATQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_PF_ATQBAL_ATQBAL_SHIFT)
+#define I40E_PF_ATQH 0x00080300 /* Reset: EMPR */
+#define I40E_PF_ATQH_ATQH_SHIFT 0
+#define I40E_PF_ATQH_ATQH_MASK I40E_MASK(0x3FF, I40E_PF_ATQH_ATQH_SHIFT)
+#define I40E_PF_ATQLEN 0x00080200 /* Reset: EMPR */
+#define I40E_PF_ATQLEN_ATQLEN_SHIFT 0
+#define I40E_PF_ATQLEN_ATQLEN_MASK I40E_MASK(0x3FF, I40E_PF_ATQLEN_ATQLEN_SHIFT)
+#define I40E_PF_ATQLEN_ATQVFE_SHIFT 28
+#define I40E_PF_ATQLEN_ATQVFE_MASK I40E_MASK(0x1, I40E_PF_ATQLEN_ATQVFE_SHIFT)
+#define I40E_PF_ATQLEN_ATQOVFL_SHIFT 29
+#define I40E_PF_ATQLEN_ATQOVFL_MASK I40E_MASK(0x1, I40E_PF_ATQLEN_ATQOVFL_SHIFT)
+#define I40E_PF_ATQLEN_ATQCRIT_SHIFT 30
+#define I40E_PF_ATQLEN_ATQCRIT_MASK I40E_MASK(0x1, I40E_PF_ATQLEN_ATQCRIT_SHIFT)
+#define I40E_PF_ATQLEN_ATQENABLE_SHIFT 31
+#define I40E_PF_ATQLEN_ATQENABLE_MASK I40E_MASK(0x1, I40E_PF_ATQLEN_ATQENABLE_SHIFT)
+#define I40E_PF_ATQT 0x00080400 /* Reset: EMPR */
+#define I40E_PF_ATQT_ATQT_SHIFT 0
+#define I40E_PF_ATQT_ATQT_MASK I40E_MASK(0x3FF, I40E_PF_ATQT_ATQT_SHIFT)
+#define I40E_VF_ARQBAH(_VF) (0x00081400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
+#define I40E_VF_ARQBAH_MAX_INDEX 127
+#define I40E_VF_ARQBAH_ARQBAH_SHIFT 0
+#define I40E_VF_ARQBAH_ARQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ARQBAH_ARQBAH_SHIFT)
+#define I40E_VF_ARQBAL(_VF) (0x00080C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
+#define I40E_VF_ARQBAL_MAX_INDEX 127
+#define I40E_VF_ARQBAL_ARQBAL_SHIFT 0
+#define I40E_VF_ARQBAL_ARQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ARQBAL_ARQBAL_SHIFT)
+#define I40E_VF_ARQH(_VF) (0x00082400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
+#define I40E_VF_ARQH_MAX_INDEX 127
+#define I40E_VF_ARQH_ARQH_SHIFT 0
+#define I40E_VF_ARQH_ARQH_MASK I40E_MASK(0x3FF, I40E_VF_ARQH_ARQH_SHIFT)
+#define I40E_VF_ARQLEN(_VF) (0x00081C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
+#define I40E_VF_ARQLEN_MAX_INDEX 127
+#define I40E_VF_ARQLEN_ARQLEN_SHIFT 0
+#define I40E_VF_ARQLEN_ARQLEN_MASK I40E_MASK(0x3FF, I40E_VF_ARQLEN_ARQLEN_SHIFT)
+#define I40E_VF_ARQLEN_ARQVFE_SHIFT 28
+#define I40E_VF_ARQLEN_ARQVFE_MASK I40E_MASK(0x1, I40E_VF_ARQLEN_ARQVFE_SHIFT)
+#define I40E_VF_ARQLEN_ARQOVFL_SHIFT 29
+#define I40E_VF_ARQLEN_ARQOVFL_MASK I40E_MASK(0x1, I40E_VF_ARQLEN_ARQOVFL_SHIFT)
+#define I40E_VF_ARQLEN_ARQCRIT_SHIFT 30
+#define I40E_VF_ARQLEN_ARQCRIT_MASK I40E_MASK(0x1, I40E_VF_ARQLEN_ARQCRIT_SHIFT)
+#define I40E_VF_ARQLEN_ARQENABLE_SHIFT 31
+#define I40E_VF_ARQLEN_ARQENABLE_MASK I40E_MASK(0x1, I40E_VF_ARQLEN_ARQENABLE_SHIFT)
+#define I40E_VF_ARQT(_VF) (0x00082C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
+#define I40E_VF_ARQT_MAX_INDEX 127
+#define I40E_VF_ARQT_ARQT_SHIFT 0
+#define I40E_VF_ARQT_ARQT_MASK I40E_MASK(0x3FF, I40E_VF_ARQT_ARQT_SHIFT)
+#define I40E_VF_ATQBAH(_VF) (0x00081000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
+#define I40E_VF_ATQBAH_MAX_INDEX 127
+#define I40E_VF_ATQBAH_ATQBAH_SHIFT 0
+#define I40E_VF_ATQBAH_ATQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ATQBAH_ATQBAH_SHIFT)
+#define I40E_VF_ATQBAL(_VF) (0x00080800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
+#define I40E_VF_ATQBAL_MAX_INDEX 127
+#define I40E_VF_ATQBAL_ATQBAL_SHIFT 0
+#define I40E_VF_ATQBAL_ATQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ATQBAL_ATQBAL_SHIFT)
+#define I40E_VF_ATQH(_VF) (0x00082000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
+#define I40E_VF_ATQH_MAX_INDEX 127
+#define I40E_VF_ATQH_ATQH_SHIFT 0
+#define I40E_VF_ATQH_ATQH_MASK I40E_MASK(0x3FF, I40E_VF_ATQH_ATQH_SHIFT)
+#define I40E_VF_ATQLEN(_VF) (0x00081800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
+#define I40E_VF_ATQLEN_MAX_INDEX 127
+#define I40E_VF_ATQLEN_ATQLEN_SHIFT 0
+#define I40E_VF_ATQLEN_ATQLEN_MASK I40E_MASK(0x3FF, I40E_VF_ATQLEN_ATQLEN_SHIFT)
+#define I40E_VF_ATQLEN_ATQVFE_SHIFT 28
+#define I40E_VF_ATQLEN_ATQVFE_MASK I40E_MASK(0x1, I40E_VF_ATQLEN_ATQVFE_SHIFT)
+#define I40E_VF_ATQLEN_ATQOVFL_SHIFT 29
+#define I40E_VF_ATQLEN_ATQOVFL_MASK I40E_MASK(0x1, I40E_VF_ATQLEN_ATQOVFL_SHIFT)
+#define I40E_VF_ATQLEN_ATQCRIT_SHIFT 30
+#define I40E_VF_ATQLEN_ATQCRIT_MASK I40E_MASK(0x1, I40E_VF_ATQLEN_ATQCRIT_SHIFT)
+#define I40E_VF_ATQLEN_ATQENABLE_SHIFT 31
+#define I40E_VF_ATQLEN_ATQENABLE_MASK I40E_MASK(0x1, I40E_VF_ATQLEN_ATQENABLE_SHIFT)
+#define I40E_VF_ATQT(_VF) (0x00082800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
+#define I40E_VF_ATQT_MAX_INDEX 127
+#define I40E_VF_ATQT_ATQT_SHIFT 0
+#define I40E_VF_ATQT_ATQT_MASK I40E_MASK(0x3FF, I40E_VF_ATQT_ATQT_SHIFT)
+#define I40E_PRT_L2TAGSEN 0x001C0B20 /* Reset: CORER */
+#define I40E_PRT_L2TAGSEN_ENABLE_SHIFT 0
+#define I40E_PRT_L2TAGSEN_ENABLE_MASK I40E_MASK(0xFF, I40E_PRT_L2TAGSEN_ENABLE_SHIFT)
+#define I40E_PFCM_LAN_ERRDATA 0x0010C080 /* Reset: PFR */
+#define I40E_PFCM_LAN_ERRDATA_ERROR_CODE_SHIFT 0
+#define I40E_PFCM_LAN_ERRDATA_ERROR_CODE_MASK I40E_MASK(0xF, I40E_PFCM_LAN_ERRDATA_ERROR_CODE_SHIFT)
+#define I40E_PFCM_LAN_ERRDATA_Q_TYPE_SHIFT 4
+#define I40E_PFCM_LAN_ERRDATA_Q_TYPE_MASK I40E_MASK(0x7, I40E_PFCM_LAN_ERRDATA_Q_TYPE_SHIFT)
+#define I40E_PFCM_LAN_ERRDATA_Q_NUM_SHIFT 8
+#define I40E_PFCM_LAN_ERRDATA_Q_NUM_MASK I40E_MASK(0xFFF, I40E_PFCM_LAN_ERRDATA_Q_NUM_SHIFT)
+#define I40E_PFCM_LAN_ERRINFO 0x0010C000 /* Reset: PFR */
+#define I40E_PFCM_LAN_ERRINFO_ERROR_VALID_SHIFT 0
+#define I40E_PFCM_LAN_ERRINFO_ERROR_VALID_MASK I40E_MASK(0x1, I40E_PFCM_LAN_ERRINFO_ERROR_VALID_SHIFT)
+#define I40E_PFCM_LAN_ERRINFO_ERROR_INST_SHIFT 4
+#define I40E_PFCM_LAN_ERRINFO_ERROR_INST_MASK I40E_MASK(0x7, I40E_PFCM_LAN_ERRINFO_ERROR_INST_SHIFT)
+#define I40E_PFCM_LAN_ERRINFO_DBL_ERROR_CNT_SHIFT 8
+#define I40E_PFCM_LAN_ERRINFO_DBL_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_PFCM_LAN_ERRINFO_DBL_ERROR_CNT_SHIFT)
+#define I40E_PFCM_LAN_ERRINFO_RLU_ERROR_CNT_SHIFT 16
+#define I40E_PFCM_LAN_ERRINFO_RLU_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_PFCM_LAN_ERRINFO_RLU_ERROR_CNT_SHIFT)
+#define I40E_PFCM_LAN_ERRINFO_RLS_ERROR_CNT_SHIFT 24
+#define I40E_PFCM_LAN_ERRINFO_RLS_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_PFCM_LAN_ERRINFO_RLS_ERROR_CNT_SHIFT)
+#define I40E_PFCM_LANCTXCTL 0x0010C300 /* Reset: CORER */
+#define I40E_PFCM_LANCTXCTL_QUEUE_NUM_SHIFT 0
+#define I40E_PFCM_LANCTXCTL_QUEUE_NUM_MASK I40E_MASK(0xFFF, I40E_PFCM_LANCTXCTL_QUEUE_NUM_SHIFT)
+#define I40E_PFCM_LANCTXCTL_SUB_LINE_SHIFT 12
+#define I40E_PFCM_LANCTXCTL_SUB_LINE_MASK I40E_MASK(0x7, I40E_PFCM_LANCTXCTL_SUB_LINE_SHIFT)
+#define I40E_PFCM_LANCTXCTL_QUEUE_TYPE_SHIFT 15
+#define I40E_PFCM_LANCTXCTL_QUEUE_TYPE_MASK I40E_MASK(0x3, I40E_PFCM_LANCTXCTL_QUEUE_TYPE_SHIFT)
+#define I40E_PFCM_LANCTXCTL_OP_CODE_SHIFT 17
+#define I40E_PFCM_LANCTXCTL_OP_CODE_MASK I40E_MASK(0x3, I40E_PFCM_LANCTXCTL_OP_CODE_SHIFT)
+#define I40E_PFCM_LANCTXDATA(_i) (0x0010C100 + ((_i) * 128)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_PFCM_LANCTXDATA_MAX_INDEX 3
+#define I40E_PFCM_LANCTXDATA_DATA_SHIFT 0
+#define I40E_PFCM_LANCTXDATA_DATA_MASK I40E_MASK(0xFFFFFFFF, I40E_PFCM_LANCTXDATA_DATA_SHIFT)
+#define I40E_PFCM_LANCTXSTAT 0x0010C380 /* Reset: CORER */
+#define I40E_PFCM_LANCTXSTAT_CTX_DONE_SHIFT 0
+#define I40E_PFCM_LANCTXSTAT_CTX_DONE_MASK I40E_MASK(0x1, I40E_PFCM_LANCTXSTAT_CTX_DONE_SHIFT)
+#define I40E_PFCM_LANCTXSTAT_CTX_MISS_SHIFT 1
+#define I40E_PFCM_LANCTXSTAT_CTX_MISS_MASK I40E_MASK(0x1, I40E_PFCM_LANCTXSTAT_CTX_MISS_SHIFT)
+#define I40E_VFCM_PE_ERRDATA1(_VF) (0x00138800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFCM_PE_ERRDATA1_MAX_INDEX 127
+#define I40E_VFCM_PE_ERRDATA1_ERROR_CODE_SHIFT 0
+#define I40E_VFCM_PE_ERRDATA1_ERROR_CODE_MASK I40E_MASK(0xF, I40E_VFCM_PE_ERRDATA1_ERROR_CODE_SHIFT)
+#define I40E_VFCM_PE_ERRDATA1_Q_TYPE_SHIFT 4
+#define I40E_VFCM_PE_ERRDATA1_Q_TYPE_MASK I40E_MASK(0x7, I40E_VFCM_PE_ERRDATA1_Q_TYPE_SHIFT)
+#define I40E_VFCM_PE_ERRDATA1_Q_NUM_SHIFT 8
+#define I40E_VFCM_PE_ERRDATA1_Q_NUM_MASK I40E_MASK(0x3FFFF, I40E_VFCM_PE_ERRDATA1_Q_NUM_SHIFT)
+#define I40E_VFCM_PE_ERRINFO1(_VF) (0x00138400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFCM_PE_ERRINFO1_MAX_INDEX 127
+#define I40E_VFCM_PE_ERRINFO1_ERROR_VALID_SHIFT 0
+#define I40E_VFCM_PE_ERRINFO1_ERROR_VALID_MASK I40E_MASK(0x1, I40E_VFCM_PE_ERRINFO1_ERROR_VALID_SHIFT)
+#define I40E_VFCM_PE_ERRINFO1_ERROR_INST_SHIFT 4
+#define I40E_VFCM_PE_ERRINFO1_ERROR_INST_MASK I40E_MASK(0x7, I40E_VFCM_PE_ERRINFO1_ERROR_INST_SHIFT)
+#define I40E_VFCM_PE_ERRINFO1_DBL_ERROR_CNT_SHIFT 8
+#define I40E_VFCM_PE_ERRINFO1_DBL_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_VFCM_PE_ERRINFO1_DBL_ERROR_CNT_SHIFT)
+#define I40E_VFCM_PE_ERRINFO1_RLU_ERROR_CNT_SHIFT 16
+#define I40E_VFCM_PE_ERRINFO1_RLU_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_VFCM_PE_ERRINFO1_RLU_ERROR_CNT_SHIFT)
+#define I40E_VFCM_PE_ERRINFO1_RLS_ERROR_CNT_SHIFT 24
+#define I40E_VFCM_PE_ERRINFO1_RLS_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_VFCM_PE_ERRINFO1_RLS_ERROR_CNT_SHIFT)
+#define I40E_GLDCB_GENC 0x00083044 /* Reset: CORER */
+#define I40E_GLDCB_GENC_PCIRTT_SHIFT 0
+#define I40E_GLDCB_GENC_PCIRTT_MASK I40E_MASK(0xFFFF, I40E_GLDCB_GENC_PCIRTT_SHIFT)
+#define I40E_GLDCB_RUPTI 0x00122618 /* Reset: CORER */
+#define I40E_GLDCB_RUPTI_PFCTIMEOUT_UP_SHIFT 0
+#define I40E_GLDCB_RUPTI_PFCTIMEOUT_UP_MASK I40E_MASK(0xFFFFFFFF, I40E_GLDCB_RUPTI_PFCTIMEOUT_UP_SHIFT)
+#define I40E_PRTDCB_FCCFG 0x001E4640 /* Reset: GLOBR */
+#define I40E_PRTDCB_FCCFG_TFCE_SHIFT 3
+#define I40E_PRTDCB_FCCFG_TFCE_MASK I40E_MASK(0x3, I40E_PRTDCB_FCCFG_TFCE_SHIFT)
+#define I40E_PRTDCB_FCRTV 0x001E4600 /* Reset: GLOBR */
+#define I40E_PRTDCB_FCRTV_FC_REFRESH_TH_SHIFT 0
+#define I40E_PRTDCB_FCRTV_FC_REFRESH_TH_MASK I40E_MASK(0xFFFF, I40E_PRTDCB_FCRTV_FC_REFRESH_TH_SHIFT)
+#define I40E_PRTDCB_FCTTVN(_i) (0x001E4580 + ((_i) * 32)) /* _i=0...3 */ /* Reset: GLOBR */
+#define I40E_PRTDCB_FCTTVN_MAX_INDEX 3
+#define I40E_PRTDCB_FCTTVN_TTV_2N_SHIFT 0
+#define I40E_PRTDCB_FCTTVN_TTV_2N_MASK I40E_MASK(0xFFFF, I40E_PRTDCB_FCTTVN_TTV_2N_SHIFT)
+#define I40E_PRTDCB_FCTTVN_TTV_2N_P1_SHIFT 16
+#define I40E_PRTDCB_FCTTVN_TTV_2N_P1_MASK I40E_MASK(0xFFFF, I40E_PRTDCB_FCTTVN_TTV_2N_P1_SHIFT)
+#define I40E_PRTDCB_GENC 0x00083000 /* Reset: CORER */
+#define I40E_PRTDCB_GENC_RESERVED_1_SHIFT 0
+#define I40E_PRTDCB_GENC_RESERVED_1_MASK I40E_MASK(0x3, I40E_PRTDCB_GENC_RESERVED_1_SHIFT)
+#define I40E_PRTDCB_GENC_NUMTC_SHIFT 2
+#define I40E_PRTDCB_GENC_NUMTC_MASK I40E_MASK(0xF, I40E_PRTDCB_GENC_NUMTC_SHIFT)
+#define I40E_PRTDCB_GENC_FCOEUP_SHIFT 6
+#define I40E_PRTDCB_GENC_FCOEUP_MASK I40E_MASK(0x7, I40E_PRTDCB_GENC_FCOEUP_SHIFT)
+#define I40E_PRTDCB_GENC_FCOEUP_VALID_SHIFT 9
+#define I40E_PRTDCB_GENC_FCOEUP_VALID_MASK I40E_MASK(0x1, I40E_PRTDCB_GENC_FCOEUP_VALID_SHIFT)
+#define I40E_PRTDCB_GENC_PFCLDA_SHIFT 16
+#define I40E_PRTDCB_GENC_PFCLDA_MASK I40E_MASK(0xFFFF, I40E_PRTDCB_GENC_PFCLDA_SHIFT)
+#define I40E_PRTDCB_GENS 0x00083020 /* Reset: CORER */
+#define I40E_PRTDCB_GENS_DCBX_STATUS_SHIFT 0
+#define I40E_PRTDCB_GENS_DCBX_STATUS_MASK I40E_MASK(0x7, I40E_PRTDCB_GENS_DCBX_STATUS_SHIFT)
+#define I40E_PRTDCB_MFLCN 0x001E2400 /* Reset: GLOBR */
+#define I40E_PRTDCB_MFLCN_PMCF_SHIFT 0
+#define I40E_PRTDCB_MFLCN_PMCF_MASK I40E_MASK(0x1, I40E_PRTDCB_MFLCN_PMCF_SHIFT)
+#define I40E_PRTDCB_MFLCN_DPF_SHIFT 1
+#define I40E_PRTDCB_MFLCN_DPF_MASK I40E_MASK(0x1, I40E_PRTDCB_MFLCN_DPF_SHIFT)
+#define I40E_PRTDCB_MFLCN_RPFCM_SHIFT 2
+#define I40E_PRTDCB_MFLCN_RPFCM_MASK I40E_MASK(0x1, I40E_PRTDCB_MFLCN_RPFCM_SHIFT)
+#define I40E_PRTDCB_MFLCN_RFCE_SHIFT 3
+#define I40E_PRTDCB_MFLCN_RFCE_MASK I40E_MASK(0x1, I40E_PRTDCB_MFLCN_RFCE_SHIFT)
+#define I40E_PRTDCB_MFLCN_RPFCE_SHIFT 4
+#define I40E_PRTDCB_MFLCN_RPFCE_MASK I40E_MASK(0xFF, I40E_PRTDCB_MFLCN_RPFCE_SHIFT)
+#define I40E_PRTDCB_RETSC 0x001223E0 /* Reset: CORER */
+#define I40E_PRTDCB_RETSC_ETS_MODE_SHIFT 0
+#define I40E_PRTDCB_RETSC_ETS_MODE_MASK I40E_MASK(0x1, I40E_PRTDCB_RETSC_ETS_MODE_SHIFT)
+#define I40E_PRTDCB_RETSC_NON_ETS_MODE_SHIFT 1
+#define I40E_PRTDCB_RETSC_NON_ETS_MODE_MASK I40E_MASK(0x1, I40E_PRTDCB_RETSC_NON_ETS_MODE_SHIFT)
+#define I40E_PRTDCB_RETSC_ETS_MAX_EXP_SHIFT 2
+#define I40E_PRTDCB_RETSC_ETS_MAX_EXP_MASK I40E_MASK(0xF, I40E_PRTDCB_RETSC_ETS_MAX_EXP_SHIFT)
+#define I40E_PRTDCB_RETSC_LLTC_SHIFT 8
+#define I40E_PRTDCB_RETSC_LLTC_MASK I40E_MASK(0xFF, I40E_PRTDCB_RETSC_LLTC_SHIFT)
+#define I40E_PRTDCB_RETSTCC(_i) (0x00122180 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */
+#define I40E_PRTDCB_RETSTCC_MAX_INDEX 7
+#define I40E_PRTDCB_RETSTCC_BWSHARE_SHIFT 0
+#define I40E_PRTDCB_RETSTCC_BWSHARE_MASK I40E_MASK(0x7F, I40E_PRTDCB_RETSTCC_BWSHARE_SHIFT)
+#define I40E_PRTDCB_RETSTCC_UPINTC_MODE_SHIFT 30
+#define I40E_PRTDCB_RETSTCC_UPINTC_MODE_MASK I40E_MASK(0x1, I40E_PRTDCB_RETSTCC_UPINTC_MODE_SHIFT)
+#define I40E_PRTDCB_RETSTCC_ETSTC_SHIFT 31
+#define I40E_PRTDCB_RETSTCC_ETSTC_MASK I40E_MASK(0x1, I40E_PRTDCB_RETSTCC_ETSTC_SHIFT)
+#define I40E_PRTDCB_RPPMC 0x001223A0 /* Reset: CORER */
+#define I40E_PRTDCB_RPPMC_LANRPPM_SHIFT 0
+#define I40E_PRTDCB_RPPMC_LANRPPM_MASK I40E_MASK(0xFF, I40E_PRTDCB_RPPMC_LANRPPM_SHIFT)
+#define I40E_PRTDCB_RPPMC_RDMARPPM_SHIFT 8
+#define I40E_PRTDCB_RPPMC_RDMARPPM_MASK I40E_MASK(0xFF, I40E_PRTDCB_RPPMC_RDMARPPM_SHIFT)
+#define I40E_PRTDCB_RPPMC_RX_FIFO_SIZE_SHIFT 16
+#define I40E_PRTDCB_RPPMC_RX_FIFO_SIZE_MASK I40E_MASK(0xFF, I40E_PRTDCB_RPPMC_RX_FIFO_SIZE_SHIFT)
+#define I40E_PRTDCB_RUP 0x001C0B00 /* Reset: CORER */
+#define I40E_PRTDCB_RUP_NOVLANUP_SHIFT 0
+#define I40E_PRTDCB_RUP_NOVLANUP_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP_NOVLANUP_SHIFT)
+#define I40E_PRTDCB_RUP2TC 0x001C09A0 /* Reset: CORER */
+#define I40E_PRTDCB_RUP2TC_UP0TC_SHIFT 0
+#define I40E_PRTDCB_RUP2TC_UP0TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP0TC_SHIFT)
+#define I40E_PRTDCB_RUP2TC_UP1TC_SHIFT 3
+#define I40E_PRTDCB_RUP2TC_UP1TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP1TC_SHIFT)
+#define I40E_PRTDCB_RUP2TC_UP2TC_SHIFT 6
+#define I40E_PRTDCB_RUP2TC_UP2TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP2TC_SHIFT)
+#define I40E_PRTDCB_RUP2TC_UP3TC_SHIFT 9
+#define I40E_PRTDCB_RUP2TC_UP3TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP3TC_SHIFT)
+#define I40E_PRTDCB_RUP2TC_UP4TC_SHIFT 12
+#define I40E_PRTDCB_RUP2TC_UP4TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP4TC_SHIFT)
+#define I40E_PRTDCB_RUP2TC_UP5TC_SHIFT 15
+#define I40E_PRTDCB_RUP2TC_UP5TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP5TC_SHIFT)
+#define I40E_PRTDCB_RUP2TC_UP6TC_SHIFT 18
+#define I40E_PRTDCB_RUP2TC_UP6TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP6TC_SHIFT)
+#define I40E_PRTDCB_RUP2TC_UP7TC_SHIFT 21
+#define I40E_PRTDCB_RUP2TC_UP7TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP7TC_SHIFT)
+#define I40E_PRTDCB_TC2PFC 0x001C0980 /* Reset: CORER */
+#define I40E_PRTDCB_TC2PFC_TC2PFC_SHIFT 0
+#define I40E_PRTDCB_TC2PFC_TC2PFC_MASK I40E_MASK(0xFF, I40E_PRTDCB_TC2PFC_TC2PFC_SHIFT)
+#define I40E_PRTDCB_TCMSTC(_i) (0x000A0040 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */
+#define I40E_PRTDCB_TCMSTC_MAX_INDEX 7
+#define I40E_PRTDCB_TCMSTC_MSTC_SHIFT 0
+#define I40E_PRTDCB_TCMSTC_MSTC_MASK I40E_MASK(0xFFFFF, I40E_PRTDCB_TCMSTC_MSTC_SHIFT)
+#define I40E_PRTDCB_TCPMC 0x000A21A0 /* Reset: CORER */
+#define I40E_PRTDCB_TCPMC_CPM_SHIFT 0
+#define I40E_PRTDCB_TCPMC_CPM_MASK I40E_MASK(0x1FFF, I40E_PRTDCB_TCPMC_CPM_SHIFT)
+#define I40E_PRTDCB_TCPMC_LLTC_SHIFT 13
+#define I40E_PRTDCB_TCPMC_LLTC_MASK I40E_MASK(0xFF, I40E_PRTDCB_TCPMC_LLTC_SHIFT)
+#define I40E_PRTDCB_TCPMC_TCPM_MODE_SHIFT 30
+#define I40E_PRTDCB_TCPMC_TCPM_MODE_MASK I40E_MASK(0x1, I40E_PRTDCB_TCPMC_TCPM_MODE_SHIFT)
+#define I40E_PRTDCB_TCWSTC(_i) (0x000A2040 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */
+#define I40E_PRTDCB_TCWSTC_MAX_INDEX 7
+#define I40E_PRTDCB_TCWSTC_MSTC_SHIFT 0
+#define I40E_PRTDCB_TCWSTC_MSTC_MASK I40E_MASK(0xFFFFF, I40E_PRTDCB_TCWSTC_MSTC_SHIFT)
+#define I40E_PRTDCB_TDPMC 0x000A0180 /* Reset: CORER */
+#define I40E_PRTDCB_TDPMC_DPM_SHIFT 0
+#define I40E_PRTDCB_TDPMC_DPM_MASK I40E_MASK(0xFF, I40E_PRTDCB_TDPMC_DPM_SHIFT)
+#define I40E_PRTDCB_TDPMC_TCPM_MODE_SHIFT 30
+#define I40E_PRTDCB_TDPMC_TCPM_MODE_MASK I40E_MASK(0x1, I40E_PRTDCB_TDPMC_TCPM_MODE_SHIFT)
+#define I40E_PRTDCB_TETSC_TCB 0x000AE060 /* Reset: CORER */
+#define I40E_PRTDCB_TETSC_TCB_EN_LL_STRICT_PRIORITY_SHIFT 0
+#define I40E_PRTDCB_TETSC_TCB_EN_LL_STRICT_PRIORITY_MASK I40E_MASK(0x1, I40E_PRTDCB_TETSC_TCB_EN_LL_STRICT_PRIORITY_SHIFT)
+#define I40E_PRTDCB_TETSC_TCB_LLTC_SHIFT 8
+#define I40E_PRTDCB_TETSC_TCB_LLTC_MASK I40E_MASK(0xFF, I40E_PRTDCB_TETSC_TCB_LLTC_SHIFT)
+#define I40E_PRTDCB_TETSC_TPB 0x00098060 /* Reset: CORER */
+#define I40E_PRTDCB_TETSC_TPB_EN_LL_STRICT_PRIORITY_SHIFT 0
+#define I40E_PRTDCB_TETSC_TPB_EN_LL_STRICT_PRIORITY_MASK I40E_MASK(0x1, I40E_PRTDCB_TETSC_TPB_EN_LL_STRICT_PRIORITY_SHIFT)
+#define I40E_PRTDCB_TETSC_TPB_LLTC_SHIFT 8
+#define I40E_PRTDCB_TETSC_TPB_LLTC_MASK I40E_MASK(0xFF, I40E_PRTDCB_TETSC_TPB_LLTC_SHIFT)
+#define I40E_PRTDCB_TFCS 0x001E4560 /* Reset: GLOBR */
+#define I40E_PRTDCB_TFCS_TXOFF_SHIFT 0
+#define I40E_PRTDCB_TFCS_TXOFF_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF_SHIFT)
+#define I40E_PRTDCB_TFCS_TXOFF0_SHIFT 8
+#define I40E_PRTDCB_TFCS_TXOFF0_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF0_SHIFT)
+#define I40E_PRTDCB_TFCS_TXOFF1_SHIFT 9
+#define I40E_PRTDCB_TFCS_TXOFF1_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF1_SHIFT)
+#define I40E_PRTDCB_TFCS_TXOFF2_SHIFT 10
+#define I40E_PRTDCB_TFCS_TXOFF2_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF2_SHIFT)
+#define I40E_PRTDCB_TFCS_TXOFF3_SHIFT 11
+#define I40E_PRTDCB_TFCS_TXOFF3_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF3_SHIFT)
+#define I40E_PRTDCB_TFCS_TXOFF4_SHIFT 12
+#define I40E_PRTDCB_TFCS_TXOFF4_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF4_SHIFT)
+#define I40E_PRTDCB_TFCS_TXOFF5_SHIFT 13
+#define I40E_PRTDCB_TFCS_TXOFF5_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF5_SHIFT)
+#define I40E_PRTDCB_TFCS_TXOFF6_SHIFT 14
+#define I40E_PRTDCB_TFCS_TXOFF6_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF6_SHIFT)
+#define I40E_PRTDCB_TFCS_TXOFF7_SHIFT 15
+#define I40E_PRTDCB_TFCS_TXOFF7_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF7_SHIFT)
+#define I40E_PRTDCB_TPFCTS(_i) (0x001E4660 + ((_i) * 32)) /* _i=0...7 */ /* Reset: GLOBR */
+#define I40E_PRTDCB_TPFCTS_MAX_INDEX 7
+#define I40E_PRTDCB_TPFCTS_PFCTIMER_SHIFT 0
+#define I40E_PRTDCB_TPFCTS_PFCTIMER_MASK I40E_MASK(0x3FFF, I40E_PRTDCB_TPFCTS_PFCTIMER_SHIFT)
+#define I40E_GLFCOE_RCTL 0x00269B94 /* Reset: CORER */
+#define I40E_GLFCOE_RCTL_FCOEVER_SHIFT 0
+#define I40E_GLFCOE_RCTL_FCOEVER_MASK I40E_MASK(0xF, I40E_GLFCOE_RCTL_FCOEVER_SHIFT)
+#define I40E_GLFCOE_RCTL_SAVBAD_SHIFT 4
+#define I40E_GLFCOE_RCTL_SAVBAD_MASK I40E_MASK(0x1, I40E_GLFCOE_RCTL_SAVBAD_SHIFT)
+#define I40E_GLFCOE_RCTL_ICRC_SHIFT 5
+#define I40E_GLFCOE_RCTL_ICRC_MASK I40E_MASK(0x1, I40E_GLFCOE_RCTL_ICRC_SHIFT)
+#define I40E_GLFCOE_RCTL_MAX_SIZE_SHIFT 16
+#define I40E_GLFCOE_RCTL_MAX_SIZE_MASK I40E_MASK(0x3FFF, I40E_GLFCOE_RCTL_MAX_SIZE_SHIFT)
+#define I40E_GL_FWSTS 0x00083048 /* Reset: POR */
+#define I40E_GL_FWSTS_FWS0B_SHIFT 0
+#define I40E_GL_FWSTS_FWS0B_MASK I40E_MASK(0xFF, I40E_GL_FWSTS_FWS0B_SHIFT)
+#define I40E_GL_FWSTS_FWRI_SHIFT 9
+#define I40E_GL_FWSTS_FWRI_MASK I40E_MASK(0x1, I40E_GL_FWSTS_FWRI_SHIFT)
+#define I40E_GL_FWSTS_FWS1B_SHIFT 16
+#define I40E_GL_FWSTS_FWS1B_MASK I40E_MASK(0xFF, I40E_GL_FWSTS_FWS1B_SHIFT)
+#define I40E_GLGEN_CLKSTAT 0x000B8184 /* Reset: POR */
+#define I40E_GLGEN_CLKSTAT_CLKMODE_SHIFT 0
+#define I40E_GLGEN_CLKSTAT_CLKMODE_MASK I40E_MASK(0x1, I40E_GLGEN_CLKSTAT_CLKMODE_SHIFT)
+#define I40E_GLGEN_CLKSTAT_U_CLK_SPEED_SHIFT 4
+#define I40E_GLGEN_CLKSTAT_U_CLK_SPEED_MASK I40E_MASK(0x3, I40E_GLGEN_CLKSTAT_U_CLK_SPEED_SHIFT)
+#define I40E_GLGEN_CLKSTAT_P0_CLK_SPEED_SHIFT 8
+#define I40E_GLGEN_CLKSTAT_P0_CLK_SPEED_MASK I40E_MASK(0x7, I40E_GLGEN_CLKSTAT_P0_CLK_SPEED_SHIFT)
+#define I40E_GLGEN_CLKSTAT_P1_CLK_SPEED_SHIFT 12
+#define I40E_GLGEN_CLKSTAT_P1_CLK_SPEED_MASK I40E_MASK(0x7, I40E_GLGEN_CLKSTAT_P1_CLK_SPEED_SHIFT)
+#define I40E_GLGEN_CLKSTAT_P2_CLK_SPEED_SHIFT 16
+#define I40E_GLGEN_CLKSTAT_P2_CLK_SPEED_MASK I40E_MASK(0x7, I40E_GLGEN_CLKSTAT_P2_CLK_SPEED_SHIFT)
+#define I40E_GLGEN_CLKSTAT_P3_CLK_SPEED_SHIFT 20
+#define I40E_GLGEN_CLKSTAT_P3_CLK_SPEED_MASK I40E_MASK(0x7, I40E_GLGEN_CLKSTAT_P3_CLK_SPEED_SHIFT)
+#define I40E_GLGEN_GPIO_CTL(_i) (0x00088100 + ((_i) * 4)) /* _i=0...29 */ /* Reset: POR */
+#define I40E_GLGEN_GPIO_CTL_MAX_INDEX 29
+#define I40E_GLGEN_GPIO_CTL_PRT_NUM_SHIFT 0
+#define I40E_GLGEN_GPIO_CTL_PRT_NUM_MASK I40E_MASK(0x3, I40E_GLGEN_GPIO_CTL_PRT_NUM_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_PRT_NUM_NA_SHIFT 3
+#define I40E_GLGEN_GPIO_CTL_PRT_NUM_NA_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_PRT_NUM_NA_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_PIN_DIR_SHIFT 4
+#define I40E_GLGEN_GPIO_CTL_PIN_DIR_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_PIN_DIR_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_TRI_CTL_SHIFT 5
+#define I40E_GLGEN_GPIO_CTL_TRI_CTL_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_TRI_CTL_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_OUT_CTL_SHIFT 6
+#define I40E_GLGEN_GPIO_CTL_OUT_CTL_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_OUT_CTL_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_PIN_FUNC_SHIFT 7
+#define I40E_GLGEN_GPIO_CTL_PIN_FUNC_MASK I40E_MASK(0x7, I40E_GLGEN_GPIO_CTL_PIN_FUNC_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_LED_INVRT_SHIFT 10
+#define I40E_GLGEN_GPIO_CTL_LED_INVRT_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_LED_INVRT_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT 11
+#define I40E_GLGEN_GPIO_CTL_LED_BLINK_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT 12
+#define I40E_GLGEN_GPIO_CTL_LED_MODE_MASK I40E_MASK(0x1F, I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_INT_MODE_SHIFT 17
+#define I40E_GLGEN_GPIO_CTL_INT_MODE_MASK I40E_MASK(0x3, I40E_GLGEN_GPIO_CTL_INT_MODE_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_OUT_DEFAULT_SHIFT 19
+#define I40E_GLGEN_GPIO_CTL_OUT_DEFAULT_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_OUT_DEFAULT_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_PHY_PIN_NAME_SHIFT 20
+#define I40E_GLGEN_GPIO_CTL_PHY_PIN_NAME_MASK I40E_MASK(0x3F, I40E_GLGEN_GPIO_CTL_PHY_PIN_NAME_SHIFT)
+#define I40E_GLGEN_GPIO_SET 0x00088184 /* Reset: POR */
+#define I40E_GLGEN_GPIO_SET_GPIO_INDX_SHIFT 0
+#define I40E_GLGEN_GPIO_SET_GPIO_INDX_MASK I40E_MASK(0x1F, I40E_GLGEN_GPIO_SET_GPIO_INDX_SHIFT)
+#define I40E_GLGEN_GPIO_SET_SDP_DATA_SHIFT 5
+#define I40E_GLGEN_GPIO_SET_SDP_DATA_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_SET_SDP_DATA_SHIFT)
+#define I40E_GLGEN_GPIO_SET_DRIVE_SDP_SHIFT 6
+#define I40E_GLGEN_GPIO_SET_DRIVE_SDP_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_SET_DRIVE_SDP_SHIFT)
+#define I40E_GLGEN_GPIO_STAT 0x0008817C /* Reset: POR */
+#define I40E_GLGEN_GPIO_STAT_GPIO_VALUE_SHIFT 0
+#define I40E_GLGEN_GPIO_STAT_GPIO_VALUE_MASK I40E_MASK(0x3FFFFFFF, I40E_GLGEN_GPIO_STAT_GPIO_VALUE_SHIFT)
+#define I40E_GLGEN_GPIO_TRANSIT 0x00088180 /* Reset: POR */
+#define I40E_GLGEN_GPIO_TRANSIT_GPIO_TRANSITION_SHIFT 0
+#define I40E_GLGEN_GPIO_TRANSIT_GPIO_TRANSITION_MASK I40E_MASK(0x3FFFFFFF, I40E_GLGEN_GPIO_TRANSIT_GPIO_TRANSITION_SHIFT)
+#define I40E_GLGEN_I2CCMD(_i) (0x000881E0 + ((_i) * 4)) /* _i=0...3 */ /* Reset: POR */
+#define I40E_GLGEN_I2CCMD_MAX_INDEX 3
+#define I40E_GLGEN_I2CCMD_DATA_SHIFT 0
+#define I40E_GLGEN_I2CCMD_DATA_MASK I40E_MASK(0xFFFF, I40E_GLGEN_I2CCMD_DATA_SHIFT)
+#define I40E_GLGEN_I2CCMD_REGADD_SHIFT 16
+#define I40E_GLGEN_I2CCMD_REGADD_MASK I40E_MASK(0xFF, I40E_GLGEN_I2CCMD_REGADD_SHIFT)
+#define I40E_GLGEN_I2CCMD_PHYADD_SHIFT 24
+#define I40E_GLGEN_I2CCMD_PHYADD_MASK I40E_MASK(0x7, I40E_GLGEN_I2CCMD_PHYADD_SHIFT)
+#define I40E_GLGEN_I2CCMD_OP_SHIFT 27
+#define I40E_GLGEN_I2CCMD_OP_MASK I40E_MASK(0x1, I40E_GLGEN_I2CCMD_OP_SHIFT)
+#define I40E_GLGEN_I2CCMD_RESET_SHIFT 28
+#define I40E_GLGEN_I2CCMD_RESET_MASK I40E_MASK(0x1, I40E_GLGEN_I2CCMD_RESET_SHIFT)
+#define I40E_GLGEN_I2CCMD_R_SHIFT 29
+#define I40E_GLGEN_I2CCMD_R_MASK I40E_MASK(0x1, I40E_GLGEN_I2CCMD_R_SHIFT)
+#define I40E_GLGEN_I2CCMD_E_SHIFT 31
+#define I40E_GLGEN_I2CCMD_E_MASK I40E_MASK(0x1, I40E_GLGEN_I2CCMD_E_SHIFT)
+#define I40E_GLGEN_I2CPARAMS(_i) (0x000881AC + ((_i) * 4)) /* _i=0...3 */ /* Reset: POR */
+#define I40E_GLGEN_I2CPARAMS_MAX_INDEX 3
+#define I40E_GLGEN_I2CPARAMS_WRITE_TIME_SHIFT 0
+#define I40E_GLGEN_I2CPARAMS_WRITE_TIME_MASK I40E_MASK(0x1F, I40E_GLGEN_I2CPARAMS_WRITE_TIME_SHIFT)
+#define I40E_GLGEN_I2CPARAMS_READ_TIME_SHIFT 5
+#define I40E_GLGEN_I2CPARAMS_READ_TIME_MASK I40E_MASK(0x7, I40E_GLGEN_I2CPARAMS_READ_TIME_SHIFT)
+#define I40E_GLGEN_I2CPARAMS_I2CBB_EN_SHIFT 8
+#define I40E_GLGEN_I2CPARAMS_I2CBB_EN_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_I2CBB_EN_SHIFT)
+#define I40E_GLGEN_I2CPARAMS_CLK_SHIFT 9
+#define I40E_GLGEN_I2CPARAMS_CLK_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_CLK_SHIFT)
+#define I40E_GLGEN_I2CPARAMS_DATA_OUT_SHIFT 10
+#define I40E_GLGEN_I2CPARAMS_DATA_OUT_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_DATA_OUT_SHIFT)
+#define I40E_GLGEN_I2CPARAMS_DATA_OE_N_SHIFT 11
+#define I40E_GLGEN_I2CPARAMS_DATA_OE_N_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_DATA_OE_N_SHIFT)
+#define I40E_GLGEN_I2CPARAMS_DATA_IN_SHIFT 12
+#define I40E_GLGEN_I2CPARAMS_DATA_IN_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_DATA_IN_SHIFT)
+#define I40E_GLGEN_I2CPARAMS_CLK_OE_N_SHIFT 13
+#define I40E_GLGEN_I2CPARAMS_CLK_OE_N_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_CLK_OE_N_SHIFT)
+#define I40E_GLGEN_I2CPARAMS_CLK_IN_SHIFT 14
+#define I40E_GLGEN_I2CPARAMS_CLK_IN_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_CLK_IN_SHIFT)
+#define I40E_GLGEN_I2CPARAMS_CLK_STRETCH_DIS_SHIFT 15
+#define I40E_GLGEN_I2CPARAMS_CLK_STRETCH_DIS_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_CLK_STRETCH_DIS_SHIFT)
+#define I40E_GLGEN_I2CPARAMS_I2C_DATA_ORDER_SHIFT 31
+#define I40E_GLGEN_I2CPARAMS_I2C_DATA_ORDER_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_I2C_DATA_ORDER_SHIFT)
+#define I40E_GLGEN_LED_CTL 0x00088178 /* Reset: POR */
+#define I40E_GLGEN_LED_CTL_GLOBAL_BLINK_MODE_SHIFT 0
+#define I40E_GLGEN_LED_CTL_GLOBAL_BLINK_MODE_MASK I40E_MASK(0x1, I40E_GLGEN_LED_CTL_GLOBAL_BLINK_MODE_SHIFT)
+#define I40E_GLGEN_MDIO_CTRL(_i) (0x000881D0 + ((_i) * 4)) /* _i=0...3 */ /* Reset: POR */
+#define I40E_GLGEN_MDIO_CTRL_MAX_INDEX 3
+#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD2_SHIFT 0
+#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD2_MASK I40E_MASK(0x1FFFF, I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD2_SHIFT)
+#define I40E_GLGEN_MDIO_CTRL_CONTMDC_SHIFT 17
+#define I40E_GLGEN_MDIO_CTRL_CONTMDC_MASK I40E_MASK(0x1, I40E_GLGEN_MDIO_CTRL_CONTMDC_SHIFT)
+#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_SHIFT 18
+#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_MASK I40E_MASK(0x3FFF, I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_SHIFT)
+#define I40E_GLGEN_MDIO_I2C_SEL(_i) (0x000881C0 + ((_i) * 4)) /* _i=0...3 */ /* Reset: POR */
+#define I40E_GLGEN_MDIO_I2C_SEL_MAX_INDEX 3
+#define I40E_GLGEN_MDIO_I2C_SEL_MDIO_I2C_SEL_SHIFT 0
+#define I40E_GLGEN_MDIO_I2C_SEL_MDIO_I2C_SEL_MASK I40E_MASK(0x1, I40E_GLGEN_MDIO_I2C_SEL_MDIO_I2C_SEL_SHIFT)
+#define I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_SHIFT 1
+#define I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_MASK I40E_MASK(0xF, I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_SHIFT)
+#define I40E_GLGEN_MDIO_I2C_SEL_PHY0_ADDRESS_SHIFT 5
+#define I40E_GLGEN_MDIO_I2C_SEL_PHY0_ADDRESS_MASK I40E_MASK(0x1F, I40E_GLGEN_MDIO_I2C_SEL_PHY0_ADDRESS_SHIFT)
+#define I40E_GLGEN_MDIO_I2C_SEL_PHY1_ADDRESS_SHIFT 10
+#define I40E_GLGEN_MDIO_I2C_SEL_PHY1_ADDRESS_MASK I40E_MASK(0x1F, I40E_GLGEN_MDIO_I2C_SEL_PHY1_ADDRESS_SHIFT)
+#define I40E_GLGEN_MDIO_I2C_SEL_PHY2_ADDRESS_SHIFT 15
+#define I40E_GLGEN_MDIO_I2C_SEL_PHY2_ADDRESS_MASK I40E_MASK(0x1F, I40E_GLGEN_MDIO_I2C_SEL_PHY2_ADDRESS_SHIFT)
+#define I40E_GLGEN_MDIO_I2C_SEL_PHY3_ADDRESS_SHIFT 20
+#define I40E_GLGEN_MDIO_I2C_SEL_PHY3_ADDRESS_MASK I40E_MASK(0x1F, I40E_GLGEN_MDIO_I2C_SEL_PHY3_ADDRESS_SHIFT)
+#define I40E_GLGEN_MDIO_I2C_SEL_MDIO_IF_MODE_SHIFT 25
+#define I40E_GLGEN_MDIO_I2C_SEL_MDIO_IF_MODE_MASK I40E_MASK(0xF, I40E_GLGEN_MDIO_I2C_SEL_MDIO_IF_MODE_SHIFT)
+#define I40E_GLGEN_MDIO_I2C_SEL_EN_FAST_MODE_SHIFT 31
+#define I40E_GLGEN_MDIO_I2C_SEL_EN_FAST_MODE_MASK I40E_MASK(0x1, I40E_GLGEN_MDIO_I2C_SEL_EN_FAST_MODE_SHIFT)
+#define I40E_GLGEN_MSCA(_i) (0x0008818C + ((_i) * 4)) /* _i=0...3 */ /* Reset: POR */
+#define I40E_GLGEN_MSCA_MAX_INDEX 3
+#define I40E_GLGEN_MSCA_MDIADD_SHIFT 0
+#define I40E_GLGEN_MSCA_MDIADD_MASK I40E_MASK(0xFFFF, I40E_GLGEN_MSCA_MDIADD_SHIFT)
+#define I40E_GLGEN_MSCA_DEVADD_SHIFT 16
+#define I40E_GLGEN_MSCA_DEVADD_MASK I40E_MASK(0x1F, I40E_GLGEN_MSCA_DEVADD_SHIFT)
+#define I40E_GLGEN_MSCA_PHYADD_SHIFT 21
+#define I40E_GLGEN_MSCA_PHYADD_MASK I40E_MASK(0x1F, I40E_GLGEN_MSCA_PHYADD_SHIFT)
+#define I40E_GLGEN_MSCA_OPCODE_SHIFT 26
+#define I40E_GLGEN_MSCA_OPCODE_MASK I40E_MASK(0x3, I40E_GLGEN_MSCA_OPCODE_SHIFT)
+#define I40E_GLGEN_MSCA_STCODE_SHIFT 28
+#define I40E_GLGEN_MSCA_STCODE_MASK I40E_MASK(0x3, I40E_GLGEN_MSCA_STCODE_SHIFT)
+#define I40E_GLGEN_MSCA_MDICMD_SHIFT 30
+#define I40E_GLGEN_MSCA_MDICMD_MASK I40E_MASK(0x1, I40E_GLGEN_MSCA_MDICMD_SHIFT)
+#define I40E_GLGEN_MSCA_MDIINPROGEN_SHIFT 31
+#define I40E_GLGEN_MSCA_MDIINPROGEN_MASK I40E_MASK(0x1, I40E_GLGEN_MSCA_MDIINPROGEN_SHIFT)
+#define I40E_GLGEN_MSRWD(_i) (0x0008819C + ((_i) * 4)) /* _i=0...3 */ /* Reset: POR */
+#define I40E_GLGEN_MSRWD_MAX_INDEX 3
+#define I40E_GLGEN_MSRWD_MDIWRDATA_SHIFT 0
+#define I40E_GLGEN_MSRWD_MDIWRDATA_MASK I40E_MASK(0xFFFF, I40E_GLGEN_MSRWD_MDIWRDATA_SHIFT)
+#define I40E_GLGEN_MSRWD_MDIRDDATA_SHIFT 16
+#define I40E_GLGEN_MSRWD_MDIRDDATA_MASK I40E_MASK(0xFFFF, I40E_GLGEN_MSRWD_MDIRDDATA_SHIFT)
+#define I40E_GLGEN_PCIFCNCNT 0x001C0AB4 /* Reset: PCIR */
+#define I40E_GLGEN_PCIFCNCNT_PCIPFCNT_SHIFT 0
+#define I40E_GLGEN_PCIFCNCNT_PCIPFCNT_MASK I40E_MASK(0x1F, I40E_GLGEN_PCIFCNCNT_PCIPFCNT_SHIFT)
+#define I40E_GLGEN_PCIFCNCNT_PCIVFCNT_SHIFT 16
+#define I40E_GLGEN_PCIFCNCNT_PCIVFCNT_MASK I40E_MASK(0xFF, I40E_GLGEN_PCIFCNCNT_PCIVFCNT_SHIFT)
+#define I40E_GLGEN_RSTAT 0x000B8188 /* Reset: POR */
+#define I40E_GLGEN_RSTAT_DEVSTATE_SHIFT 0
+#define I40E_GLGEN_RSTAT_DEVSTATE_MASK I40E_MASK(0x3, I40E_GLGEN_RSTAT_DEVSTATE_SHIFT)
+#define I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT 2
+#define I40E_GLGEN_RSTAT_RESET_TYPE_MASK I40E_MASK(0x3, I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT)
+#define I40E_GLGEN_RSTAT_CORERCNT_SHIFT 4
+#define I40E_GLGEN_RSTAT_CORERCNT_MASK I40E_MASK(0x3, I40E_GLGEN_RSTAT_CORERCNT_SHIFT)
+#define I40E_GLGEN_RSTAT_GLOBRCNT_SHIFT 6
+#define I40E_GLGEN_RSTAT_GLOBRCNT_MASK I40E_MASK(0x3, I40E_GLGEN_RSTAT_GLOBRCNT_SHIFT)
+#define I40E_GLGEN_RSTAT_EMPRCNT_SHIFT 8
+#define I40E_GLGEN_RSTAT_EMPRCNT_MASK I40E_MASK(0x3, I40E_GLGEN_RSTAT_EMPRCNT_SHIFT)
+#define I40E_GLGEN_RSTAT_TIME_TO_RST_SHIFT 10
+#define I40E_GLGEN_RSTAT_TIME_TO_RST_MASK I40E_MASK(0x3F, I40E_GLGEN_RSTAT_TIME_TO_RST_SHIFT)
+#define I40E_GLGEN_RSTCTL 0x000B8180 /* Reset: POR */
+#define I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT 0
+#define I40E_GLGEN_RSTCTL_GRSTDEL_MASK I40E_MASK(0x3F, I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT)
+#define I40E_GLGEN_RSTCTL_ECC_RST_ENA_SHIFT 8
+#define I40E_GLGEN_RSTCTL_ECC_RST_ENA_MASK I40E_MASK(0x1, I40E_GLGEN_RSTCTL_ECC_RST_ENA_SHIFT)
+#define I40E_GLGEN_RSTENA_EMP 0x000B818C /* Reset: POR */
+#define I40E_GLGEN_RSTENA_EMP_EMP_RST_ENA_SHIFT 0
+#define I40E_GLGEN_RSTENA_EMP_EMP_RST_ENA_MASK I40E_MASK(0x1, I40E_GLGEN_RSTENA_EMP_EMP_RST_ENA_SHIFT)
+#define I40E_GLGEN_RTRIG 0x000B8190 /* Reset: CORER */
+#define I40E_GLGEN_RTRIG_CORER_SHIFT 0
+#define I40E_GLGEN_RTRIG_CORER_MASK I40E_MASK(0x1, I40E_GLGEN_RTRIG_CORER_SHIFT)
+#define I40E_GLGEN_RTRIG_GLOBR_SHIFT 1
+#define I40E_GLGEN_RTRIG_GLOBR_MASK I40E_MASK(0x1, I40E_GLGEN_RTRIG_GLOBR_SHIFT)
+#define I40E_GLGEN_RTRIG_EMPFWR_SHIFT 2
+#define I40E_GLGEN_RTRIG_EMPFWR_MASK I40E_MASK(0x1, I40E_GLGEN_RTRIG_EMPFWR_SHIFT)
+#define I40E_GLGEN_STAT 0x000B612C /* Reset: POR */
+#define I40E_GLGEN_STAT_HWRSVD0_SHIFT 0
+#define I40E_GLGEN_STAT_HWRSVD0_MASK I40E_MASK(0x3, I40E_GLGEN_STAT_HWRSVD0_SHIFT)
+#define I40E_GLGEN_STAT_DCBEN_SHIFT 2
+#define I40E_GLGEN_STAT_DCBEN_MASK I40E_MASK(0x1, I40E_GLGEN_STAT_DCBEN_SHIFT)
+#define I40E_GLGEN_STAT_VTEN_SHIFT 3
+#define I40E_GLGEN_STAT_VTEN_MASK I40E_MASK(0x1, I40E_GLGEN_STAT_VTEN_SHIFT)
+#define I40E_GLGEN_STAT_FCOEN_SHIFT 4
+#define I40E_GLGEN_STAT_FCOEN_MASK I40E_MASK(0x1, I40E_GLGEN_STAT_FCOEN_SHIFT)
+#define I40E_GLGEN_STAT_EVBEN_SHIFT 5
+#define I40E_GLGEN_STAT_EVBEN_MASK I40E_MASK(0x1, I40E_GLGEN_STAT_EVBEN_SHIFT)
+#define I40E_GLGEN_STAT_HWRSVD1_SHIFT 6
+#define I40E_GLGEN_STAT_HWRSVD1_MASK I40E_MASK(0x3, I40E_GLGEN_STAT_HWRSVD1_SHIFT)
+#define I40E_GLGEN_VFLRSTAT(_i) (0x00092600 + ((_i) * 4)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLGEN_VFLRSTAT_MAX_INDEX 3
+#define I40E_GLGEN_VFLRSTAT_VFLRE_SHIFT 0
+#define I40E_GLGEN_VFLRSTAT_VFLRE_MASK I40E_MASK(0xFFFFFFFF, I40E_GLGEN_VFLRSTAT_VFLRE_SHIFT)
+#define I40E_GLVFGEN_TIMER 0x000881BC /* Reset: CORER */
+#define I40E_GLVFGEN_TIMER_GTIME_SHIFT 0
+#define I40E_GLVFGEN_TIMER_GTIME_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVFGEN_TIMER_GTIME_SHIFT)
+#define I40E_PFGEN_CTRL 0x00092400 /* Reset: PFR */
+#define I40E_PFGEN_CTRL_PFSWR_SHIFT 0
+#define I40E_PFGEN_CTRL_PFSWR_MASK I40E_MASK(0x1, I40E_PFGEN_CTRL_PFSWR_SHIFT)
+#define I40E_PFGEN_DRUN 0x00092500 /* Reset: CORER */
+#define I40E_PFGEN_DRUN_DRVUNLD_SHIFT 0
+#define I40E_PFGEN_DRUN_DRVUNLD_MASK I40E_MASK(0x1, I40E_PFGEN_DRUN_DRVUNLD_SHIFT)
+#define I40E_PFGEN_PORTNUM 0x001C0480 /* Reset: CORER */
+#define I40E_PFGEN_PORTNUM_PORT_NUM_SHIFT 0
+#define I40E_PFGEN_PORTNUM_PORT_NUM_MASK I40E_MASK(0x3, I40E_PFGEN_PORTNUM_PORT_NUM_SHIFT)
+#define I40E_PFGEN_STATE 0x00088000 /* Reset: CORER */
+#define I40E_PFGEN_STATE_RESERVED_0_SHIFT 0
+#define I40E_PFGEN_STATE_RESERVED_0_MASK I40E_MASK(0x1, I40E_PFGEN_STATE_RESERVED_0_SHIFT)
+#define I40E_PFGEN_STATE_PFFCEN_SHIFT 1
+#define I40E_PFGEN_STATE_PFFCEN_MASK I40E_MASK(0x1, I40E_PFGEN_STATE_PFFCEN_SHIFT)
+#define I40E_PFGEN_STATE_PFLINKEN_SHIFT 2
+#define I40E_PFGEN_STATE_PFLINKEN_MASK I40E_MASK(0x1, I40E_PFGEN_STATE_PFLINKEN_SHIFT)
+#define I40E_PFGEN_STATE_PFSCEN_SHIFT 3
+#define I40E_PFGEN_STATE_PFSCEN_MASK I40E_MASK(0x1, I40E_PFGEN_STATE_PFSCEN_SHIFT)
+#define I40E_PRTGEN_CNF 0x000B8120 /* Reset: POR */
+#define I40E_PRTGEN_CNF_PORT_DIS_SHIFT 0
+#define I40E_PRTGEN_CNF_PORT_DIS_MASK I40E_MASK(0x1, I40E_PRTGEN_CNF_PORT_DIS_SHIFT)
+#define I40E_PRTGEN_CNF_ALLOW_PORT_DIS_SHIFT 1
+#define I40E_PRTGEN_CNF_ALLOW_PORT_DIS_MASK I40E_MASK(0x1, I40E_PRTGEN_CNF_ALLOW_PORT_DIS_SHIFT)
+#define I40E_PRTGEN_CNF_EMP_PORT_DIS_SHIFT 2
+#define I40E_PRTGEN_CNF_EMP_PORT_DIS_MASK I40E_MASK(0x1, I40E_PRTGEN_CNF_EMP_PORT_DIS_SHIFT)
+#define I40E_PRTGEN_CNF2 0x000B8160 /* Reset: POR */
+#define I40E_PRTGEN_CNF2_ACTIVATE_PORT_LINK_SHIFT 0
+#define I40E_PRTGEN_CNF2_ACTIVATE_PORT_LINK_MASK I40E_MASK(0x1, I40E_PRTGEN_CNF2_ACTIVATE_PORT_LINK_SHIFT)
+#define I40E_PRTGEN_STATUS 0x000B8100 /* Reset: POR */
+#define I40E_PRTGEN_STATUS_PORT_VALID_SHIFT 0
+#define I40E_PRTGEN_STATUS_PORT_VALID_MASK I40E_MASK(0x1, I40E_PRTGEN_STATUS_PORT_VALID_SHIFT)
+#define I40E_PRTGEN_STATUS_PORT_ACTIVE_SHIFT 1
+#define I40E_PRTGEN_STATUS_PORT_ACTIVE_MASK I40E_MASK(0x1, I40E_PRTGEN_STATUS_PORT_ACTIVE_SHIFT)
+#define I40E_VFGEN_RSTAT1(_VF) (0x00074400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFGEN_RSTAT1_MAX_INDEX 127
+#define I40E_VFGEN_RSTAT1_VFR_STATE_SHIFT 0
+#define I40E_VFGEN_RSTAT1_VFR_STATE_MASK I40E_MASK(0x3, I40E_VFGEN_RSTAT1_VFR_STATE_SHIFT)
+#define I40E_VPGEN_VFRSTAT(_VF) (0x00091C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */
+#define I40E_VPGEN_VFRSTAT_MAX_INDEX 127
+#define I40E_VPGEN_VFRSTAT_VFRD_SHIFT 0
+#define I40E_VPGEN_VFRSTAT_VFRD_MASK I40E_MASK(0x1, I40E_VPGEN_VFRSTAT_VFRD_SHIFT)
+#define I40E_VPGEN_VFRTRIG(_VF) (0x00091800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */
+#define I40E_VPGEN_VFRTRIG_MAX_INDEX 127
+#define I40E_VPGEN_VFRTRIG_VFSWR_SHIFT 0
+#define I40E_VPGEN_VFRTRIG_VFSWR_MASK I40E_MASK(0x1, I40E_VPGEN_VFRTRIG_VFSWR_SHIFT)
+#define I40E_VSIGEN_RSTAT(_VSI) (0x00090800 + ((_VSI) * 4)) /* _i=0...383 */ /* Reset: CORER */
+#define I40E_VSIGEN_RSTAT_MAX_INDEX 383
+#define I40E_VSIGEN_RSTAT_VMRD_SHIFT 0
+#define I40E_VSIGEN_RSTAT_VMRD_MASK I40E_MASK(0x1, I40E_VSIGEN_RSTAT_VMRD_SHIFT)
+#define I40E_VSIGEN_RTRIG(_VSI) (0x00090000 + ((_VSI) * 4)) /* _i=0...383 */ /* Reset: CORER */
+#define I40E_VSIGEN_RTRIG_MAX_INDEX 383
+#define I40E_VSIGEN_RTRIG_VMSWR_SHIFT 0
+#define I40E_VSIGEN_RTRIG_VMSWR_MASK I40E_MASK(0x1, I40E_VSIGEN_RTRIG_VMSWR_SHIFT)
+#define I40E_GLHMC_FCOEDDPBASE(_i) (0x000C6600 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_FCOEDDPBASE_MAX_INDEX 15
+#define I40E_GLHMC_FCOEDDPBASE_FPMFCOEDDPBASE_SHIFT 0
+#define I40E_GLHMC_FCOEDDPBASE_FPMFCOEDDPBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_FCOEDDPBASE_FPMFCOEDDPBASE_SHIFT)
+#define I40E_GLHMC_FCOEDDPCNT(_i) (0x000C6700 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_FCOEDDPCNT_MAX_INDEX 15
+#define I40E_GLHMC_FCOEDDPCNT_FPMFCOEDDPCNT_SHIFT 0
+#define I40E_GLHMC_FCOEDDPCNT_FPMFCOEDDPCNT_MASK I40E_MASK(0xFFFFF, I40E_GLHMC_FCOEDDPCNT_FPMFCOEDDPCNT_SHIFT)
+#define I40E_GLHMC_FCOEDDPOBJSZ 0x000C2010 /* Reset: CORER */
+#define I40E_GLHMC_FCOEDDPOBJSZ_PMFCOEDDPOBJSZ_SHIFT 0
+#define I40E_GLHMC_FCOEDDPOBJSZ_PMFCOEDDPOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_FCOEDDPOBJSZ_PMFCOEDDPOBJSZ_SHIFT)
+#define I40E_GLHMC_FCOEFBASE(_i) (0x000C6800 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_FCOEFBASE_MAX_INDEX 15
+#define I40E_GLHMC_FCOEFBASE_FPMFCOEFBASE_SHIFT 0
+#define I40E_GLHMC_FCOEFBASE_FPMFCOEFBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_FCOEFBASE_FPMFCOEFBASE_SHIFT)
+#define I40E_GLHMC_FCOEFCNT(_i) (0x000C6900 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_FCOEFCNT_MAX_INDEX 15
+#define I40E_GLHMC_FCOEFCNT_FPMFCOEFCNT_SHIFT 0
+#define I40E_GLHMC_FCOEFCNT_FPMFCOEFCNT_MASK I40E_MASK(0x7FFFFF, I40E_GLHMC_FCOEFCNT_FPMFCOEFCNT_SHIFT)
+#define I40E_GLHMC_FCOEFMAX 0x000C20D0 /* Reset: CORER */
+#define I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_SHIFT 0
+#define I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_MASK I40E_MASK(0xFFFF, I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_SHIFT)
+#define I40E_GLHMC_FCOEFOBJSZ 0x000C2018 /* Reset: CORER */
+#define I40E_GLHMC_FCOEFOBJSZ_PMFCOEFOBJSZ_SHIFT 0
+#define I40E_GLHMC_FCOEFOBJSZ_PMFCOEFOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_FCOEFOBJSZ_PMFCOEFOBJSZ_SHIFT)
+#define I40E_GLHMC_FCOEMAX 0x000C2014 /* Reset: CORER */
+#define I40E_GLHMC_FCOEMAX_PMFCOEMAX_SHIFT 0
+#define I40E_GLHMC_FCOEMAX_PMFCOEMAX_MASK I40E_MASK(0x1FFF, I40E_GLHMC_FCOEMAX_PMFCOEMAX_SHIFT)
+#define I40E_GLHMC_FSIAVBASE(_i) (0x000C5600 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_FSIAVBASE_MAX_INDEX 15
+#define I40E_GLHMC_FSIAVBASE_FPMFSIAVBASE_SHIFT 0
+#define I40E_GLHMC_FSIAVBASE_FPMFSIAVBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_FSIAVBASE_FPMFSIAVBASE_SHIFT)
+#define I40E_GLHMC_FSIAVCNT(_i) (0x000C5700 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_FSIAVCNT_MAX_INDEX 15
+#define I40E_GLHMC_FSIAVCNT_FPMFSIAVCNT_SHIFT 0
+#define I40E_GLHMC_FSIAVCNT_FPMFSIAVCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_FSIAVCNT_FPMFSIAVCNT_SHIFT)
+#define I40E_GLHMC_FSIAVCNT_RSVD_SHIFT 29
+#define I40E_GLHMC_FSIAVCNT_RSVD_MASK I40E_MASK(0x7, I40E_GLHMC_FSIAVCNT_RSVD_SHIFT)
+#define I40E_GLHMC_FSIAVMAX 0x000C2068 /* Reset: CORER */
+#define I40E_GLHMC_FSIAVMAX_PMFSIAVMAX_SHIFT 0
+#define I40E_GLHMC_FSIAVMAX_PMFSIAVMAX_MASK I40E_MASK(0x1FFFF, I40E_GLHMC_FSIAVMAX_PMFSIAVMAX_SHIFT)
+#define I40E_GLHMC_FSIAVOBJSZ 0x000C2064 /* Reset: CORER */
+#define I40E_GLHMC_FSIAVOBJSZ_PMFSIAVOBJSZ_SHIFT 0
+#define I40E_GLHMC_FSIAVOBJSZ_PMFSIAVOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_FSIAVOBJSZ_PMFSIAVOBJSZ_SHIFT)
+#define I40E_GLHMC_FSIMCBASE(_i) (0x000C6000 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_FSIMCBASE_MAX_INDEX 15
+#define I40E_GLHMC_FSIMCBASE_FPMFSIMCBASE_SHIFT 0
+#define I40E_GLHMC_FSIMCBASE_FPMFSIMCBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_FSIMCBASE_FPMFSIMCBASE_SHIFT)
+#define I40E_GLHMC_FSIMCCNT(_i) (0x000C6100 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_FSIMCCNT_MAX_INDEX 15
+#define I40E_GLHMC_FSIMCCNT_FPMFSIMCSZ_SHIFT 0
+#define I40E_GLHMC_FSIMCCNT_FPMFSIMCSZ_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_FSIMCCNT_FPMFSIMCSZ_SHIFT)
+#define I40E_GLHMC_FSIMCMAX 0x000C2060 /* Reset: CORER */
+#define I40E_GLHMC_FSIMCMAX_PMFSIMCMAX_SHIFT 0
+#define I40E_GLHMC_FSIMCMAX_PMFSIMCMAX_MASK I40E_MASK(0x3FFF, I40E_GLHMC_FSIMCMAX_PMFSIMCMAX_SHIFT)
+#define I40E_GLHMC_FSIMCOBJSZ 0x000C205c /* Reset: CORER */
+#define I40E_GLHMC_FSIMCOBJSZ_PMFSIMCOBJSZ_SHIFT 0
+#define I40E_GLHMC_FSIMCOBJSZ_PMFSIMCOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_FSIMCOBJSZ_PMFSIMCOBJSZ_SHIFT)
+#define I40E_GLHMC_LANQMAX 0x000C2008 /* Reset: CORER */
+#define I40E_GLHMC_LANQMAX_PMLANQMAX_SHIFT 0
+#define I40E_GLHMC_LANQMAX_PMLANQMAX_MASK I40E_MASK(0x7FF, I40E_GLHMC_LANQMAX_PMLANQMAX_SHIFT)
+#define I40E_GLHMC_LANRXBASE(_i) (0x000C6400 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_LANRXBASE_MAX_INDEX 15
+#define I40E_GLHMC_LANRXBASE_FPMLANRXBASE_SHIFT 0
+#define I40E_GLHMC_LANRXBASE_FPMLANRXBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_LANRXBASE_FPMLANRXBASE_SHIFT)
+#define I40E_GLHMC_LANRXCNT(_i) (0x000C6500 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_LANRXCNT_MAX_INDEX 15
+#define I40E_GLHMC_LANRXCNT_FPMLANRXCNT_SHIFT 0
+#define I40E_GLHMC_LANRXCNT_FPMLANRXCNT_MASK I40E_MASK(0x7FF, I40E_GLHMC_LANRXCNT_FPMLANRXCNT_SHIFT)
+#define I40E_GLHMC_LANRXOBJSZ 0x000C200c /* Reset: CORER */
+#define I40E_GLHMC_LANRXOBJSZ_PMLANRXOBJSZ_SHIFT 0
+#define I40E_GLHMC_LANRXOBJSZ_PMLANRXOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_LANRXOBJSZ_PMLANRXOBJSZ_SHIFT)
+#define I40E_GLHMC_LANTXBASE(_i) (0x000C6200 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_LANTXBASE_MAX_INDEX 15
+#define I40E_GLHMC_LANTXBASE_FPMLANTXBASE_SHIFT 0
+#define I40E_GLHMC_LANTXBASE_FPMLANTXBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_LANTXBASE_FPMLANTXBASE_SHIFT)
+#define I40E_GLHMC_LANTXBASE_RSVD_SHIFT 24
+#define I40E_GLHMC_LANTXBASE_RSVD_MASK I40E_MASK(0xFF, I40E_GLHMC_LANTXBASE_RSVD_SHIFT)
+#define I40E_GLHMC_LANTXCNT(_i) (0x000C6300 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_LANTXCNT_MAX_INDEX 15
+#define I40E_GLHMC_LANTXCNT_FPMLANTXCNT_SHIFT 0
+#define I40E_GLHMC_LANTXCNT_FPMLANTXCNT_MASK I40E_MASK(0x7FF, I40E_GLHMC_LANTXCNT_FPMLANTXCNT_SHIFT)
+#define I40E_GLHMC_LANTXOBJSZ 0x000C2004 /* Reset: CORER */
+#define I40E_GLHMC_LANTXOBJSZ_PMLANTXOBJSZ_SHIFT 0
+#define I40E_GLHMC_LANTXOBJSZ_PMLANTXOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_LANTXOBJSZ_PMLANTXOBJSZ_SHIFT)
+#define I40E_GLHMC_PFASSIGN(_i) (0x000C0c00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PFASSIGN_MAX_INDEX 15
+#define I40E_GLHMC_PFASSIGN_PMFCNPFASSIGN_SHIFT 0
+#define I40E_GLHMC_PFASSIGN_PMFCNPFASSIGN_MASK I40E_MASK(0xF, I40E_GLHMC_PFASSIGN_PMFCNPFASSIGN_SHIFT)
+#define I40E_GLHMC_SDPART(_i) (0x000C0800 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_SDPART_MAX_INDEX 15
+#define I40E_GLHMC_SDPART_PMSDBASE_SHIFT 0
+#define I40E_GLHMC_SDPART_PMSDBASE_MASK I40E_MASK(0xFFF, I40E_GLHMC_SDPART_PMSDBASE_SHIFT)
+#define I40E_GLHMC_SDPART_PMSDSIZE_SHIFT 16
+#define I40E_GLHMC_SDPART_PMSDSIZE_MASK I40E_MASK(0x1FFF, I40E_GLHMC_SDPART_PMSDSIZE_SHIFT)
+#define I40E_PFHMC_ERRORDATA 0x000C0500 /* Reset: PFR */
+#define I40E_PFHMC_ERRORDATA_HMC_ERROR_DATA_SHIFT 0
+#define I40E_PFHMC_ERRORDATA_HMC_ERROR_DATA_MASK I40E_MASK(0x3FFFFFFF, I40E_PFHMC_ERRORDATA_HMC_ERROR_DATA_SHIFT)
+#define I40E_PFHMC_ERRORINFO 0x000C0400 /* Reset: PFR */
+#define I40E_PFHMC_ERRORINFO_PMF_INDEX_SHIFT 0
+#define I40E_PFHMC_ERRORINFO_PMF_INDEX_MASK I40E_MASK(0x1F, I40E_PFHMC_ERRORINFO_PMF_INDEX_SHIFT)
+#define I40E_PFHMC_ERRORINFO_PMF_ISVF_SHIFT 7
+#define I40E_PFHMC_ERRORINFO_PMF_ISVF_MASK I40E_MASK(0x1, I40E_PFHMC_ERRORINFO_PMF_ISVF_SHIFT)
+#define I40E_PFHMC_ERRORINFO_HMC_ERROR_TYPE_SHIFT 8
+#define I40E_PFHMC_ERRORINFO_HMC_ERROR_TYPE_MASK I40E_MASK(0xF, I40E_PFHMC_ERRORINFO_HMC_ERROR_TYPE_SHIFT)
+#define I40E_PFHMC_ERRORINFO_HMC_OBJECT_TYPE_SHIFT 16
+#define I40E_PFHMC_ERRORINFO_HMC_OBJECT_TYPE_MASK I40E_MASK(0x1F, I40E_PFHMC_ERRORINFO_HMC_OBJECT_TYPE_SHIFT)
+#define I40E_PFHMC_ERRORINFO_ERROR_DETECTED_SHIFT 31
+#define I40E_PFHMC_ERRORINFO_ERROR_DETECTED_MASK I40E_MASK(0x1, I40E_PFHMC_ERRORINFO_ERROR_DETECTED_SHIFT)
+#define I40E_PFHMC_PDINV 0x000C0300 /* Reset: PFR */
+#define I40E_PFHMC_PDINV_PMSDIDX_SHIFT 0
+#define I40E_PFHMC_PDINV_PMSDIDX_MASK I40E_MASK(0xFFF, I40E_PFHMC_PDINV_PMSDIDX_SHIFT)
+#define I40E_PFHMC_PDINV_PMPDIDX_SHIFT 16
+#define I40E_PFHMC_PDINV_PMPDIDX_MASK I40E_MASK(0x1FF, I40E_PFHMC_PDINV_PMPDIDX_SHIFT)
+#define I40E_PFHMC_SDCMD 0x000C0000 /* Reset: PFR */
+#define I40E_PFHMC_SDCMD_PMSDIDX_SHIFT 0
+#define I40E_PFHMC_SDCMD_PMSDIDX_MASK I40E_MASK(0xFFF, I40E_PFHMC_SDCMD_PMSDIDX_SHIFT)
+#define I40E_PFHMC_SDCMD_PMSDWR_SHIFT 31
+#define I40E_PFHMC_SDCMD_PMSDWR_MASK I40E_MASK(0x1, I40E_PFHMC_SDCMD_PMSDWR_SHIFT)
+#define I40E_PFHMC_SDDATAHIGH 0x000C0200 /* Reset: PFR */
+#define I40E_PFHMC_SDDATAHIGH_PMSDDATAHIGH_SHIFT 0
+#define I40E_PFHMC_SDDATAHIGH_PMSDDATAHIGH_MASK I40E_MASK(0xFFFFFFFF, I40E_PFHMC_SDDATAHIGH_PMSDDATAHIGH_SHIFT)
+#define I40E_PFHMC_SDDATALOW 0x000C0100 /* Reset: PFR */
+#define I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT 0
+#define I40E_PFHMC_SDDATALOW_PMSDVALID_MASK I40E_MASK(0x1, I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT)
+#define I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT 1
+#define I40E_PFHMC_SDDATALOW_PMSDTYPE_MASK I40E_MASK(0x1, I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT)
+#define I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT 2
+#define I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_MASK I40E_MASK(0x3FF, I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT)
+#define I40E_PFHMC_SDDATALOW_PMSDDATALOW_SHIFT 12
+#define I40E_PFHMC_SDDATALOW_PMSDDATALOW_MASK I40E_MASK(0xFFFFF, I40E_PFHMC_SDDATALOW_PMSDDATALOW_SHIFT)
+#define I40E_GL_GP_FUSE(_i) (0x0009400C + ((_i) * 4)) /* _i=0...28 */ /* Reset: POR */
+#define I40E_GL_GP_FUSE_MAX_INDEX 28
+#define I40E_GL_GP_FUSE_GL_GP_FUSE_SHIFT 0
+#define I40E_GL_GP_FUSE_GL_GP_FUSE_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_GP_FUSE_GL_GP_FUSE_SHIFT)
+#define I40E_GL_UFUSE 0x00094008 /* Reset: POR */
+#define I40E_GL_UFUSE_FOUR_PORT_ENABLE_SHIFT 1
+#define I40E_GL_UFUSE_FOUR_PORT_ENABLE_MASK I40E_MASK(0x1, I40E_GL_UFUSE_FOUR_PORT_ENABLE_SHIFT)
+#define I40E_GL_UFUSE_NIC_ID_SHIFT 2
+#define I40E_GL_UFUSE_NIC_ID_MASK I40E_MASK(0x1, I40E_GL_UFUSE_NIC_ID_SHIFT)
+#define I40E_GL_UFUSE_ULT_LOCKOUT_SHIFT 10
+#define I40E_GL_UFUSE_ULT_LOCKOUT_MASK I40E_MASK(0x1, I40E_GL_UFUSE_ULT_LOCKOUT_SHIFT)
+#define I40E_GL_UFUSE_CLS_LOCKOUT_SHIFT 11
+#define I40E_GL_UFUSE_CLS_LOCKOUT_MASK I40E_MASK(0x1, I40E_GL_UFUSE_CLS_LOCKOUT_SHIFT)
+#define I40E_EMPINT_GPIO_ENA 0x00088188 /* Reset: POR */
+#define I40E_EMPINT_GPIO_ENA_GPIO0_ENA_SHIFT 0
+#define I40E_EMPINT_GPIO_ENA_GPIO0_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO0_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO1_ENA_SHIFT 1
+#define I40E_EMPINT_GPIO_ENA_GPIO1_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO1_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO2_ENA_SHIFT 2
+#define I40E_EMPINT_GPIO_ENA_GPIO2_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO2_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO3_ENA_SHIFT 3
+#define I40E_EMPINT_GPIO_ENA_GPIO3_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO3_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO4_ENA_SHIFT 4
+#define I40E_EMPINT_GPIO_ENA_GPIO4_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO4_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO5_ENA_SHIFT 5
+#define I40E_EMPINT_GPIO_ENA_GPIO5_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO5_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO6_ENA_SHIFT 6
+#define I40E_EMPINT_GPIO_ENA_GPIO6_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO6_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO7_ENA_SHIFT 7
+#define I40E_EMPINT_GPIO_ENA_GPIO7_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO7_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO8_ENA_SHIFT 8
+#define I40E_EMPINT_GPIO_ENA_GPIO8_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO8_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO9_ENA_SHIFT 9
+#define I40E_EMPINT_GPIO_ENA_GPIO9_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO9_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO10_ENA_SHIFT 10
+#define I40E_EMPINT_GPIO_ENA_GPIO10_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO10_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO11_ENA_SHIFT 11
+#define I40E_EMPINT_GPIO_ENA_GPIO11_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO11_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO12_ENA_SHIFT 12
+#define I40E_EMPINT_GPIO_ENA_GPIO12_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO12_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO13_ENA_SHIFT 13
+#define I40E_EMPINT_GPIO_ENA_GPIO13_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO13_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO14_ENA_SHIFT 14
+#define I40E_EMPINT_GPIO_ENA_GPIO14_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO14_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO15_ENA_SHIFT 15
+#define I40E_EMPINT_GPIO_ENA_GPIO15_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO15_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO16_ENA_SHIFT 16
+#define I40E_EMPINT_GPIO_ENA_GPIO16_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO16_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO17_ENA_SHIFT 17
+#define I40E_EMPINT_GPIO_ENA_GPIO17_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO17_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO18_ENA_SHIFT 18
+#define I40E_EMPINT_GPIO_ENA_GPIO18_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO18_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO19_ENA_SHIFT 19
+#define I40E_EMPINT_GPIO_ENA_GPIO19_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO19_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO20_ENA_SHIFT 20
+#define I40E_EMPINT_GPIO_ENA_GPIO20_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO20_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO21_ENA_SHIFT 21
+#define I40E_EMPINT_GPIO_ENA_GPIO21_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO21_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO22_ENA_SHIFT 22
+#define I40E_EMPINT_GPIO_ENA_GPIO22_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO22_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO23_ENA_SHIFT 23
+#define I40E_EMPINT_GPIO_ENA_GPIO23_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO23_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO24_ENA_SHIFT 24
+#define I40E_EMPINT_GPIO_ENA_GPIO24_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO24_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO25_ENA_SHIFT 25
+#define I40E_EMPINT_GPIO_ENA_GPIO25_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO25_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO26_ENA_SHIFT 26
+#define I40E_EMPINT_GPIO_ENA_GPIO26_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO26_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO27_ENA_SHIFT 27
+#define I40E_EMPINT_GPIO_ENA_GPIO27_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO27_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO28_ENA_SHIFT 28
+#define I40E_EMPINT_GPIO_ENA_GPIO28_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO28_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO29_ENA_SHIFT 29
+#define I40E_EMPINT_GPIO_ENA_GPIO29_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO29_ENA_SHIFT)
+#define I40E_PFGEN_PORTMDIO_NUM 0x0003F100 /* Reset: CORER */
+#define I40E_PFGEN_PORTMDIO_NUM_PORT_NUM_SHIFT 0
+#define I40E_PFGEN_PORTMDIO_NUM_PORT_NUM_MASK I40E_MASK(0x3, I40E_PFGEN_PORTMDIO_NUM_PORT_NUM_SHIFT)
+#define I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_SHIFT 4
+#define I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_MASK I40E_MASK(0x1, I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_SHIFT)
+#define I40E_PFINT_AEQCTL 0x00038700 /* Reset: CORER */
+#define I40E_PFINT_AEQCTL_MSIX_INDX_SHIFT 0
+#define I40E_PFINT_AEQCTL_MSIX_INDX_MASK I40E_MASK(0xFF, I40E_PFINT_AEQCTL_MSIX_INDX_SHIFT)
+#define I40E_PFINT_AEQCTL_ITR_INDX_SHIFT 11
+#define I40E_PFINT_AEQCTL_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_AEQCTL_ITR_INDX_SHIFT)
+#define I40E_PFINT_AEQCTL_MSIX0_INDX_SHIFT 13
+#define I40E_PFINT_AEQCTL_MSIX0_INDX_MASK I40E_MASK(0x7, I40E_PFINT_AEQCTL_MSIX0_INDX_SHIFT)
+#define I40E_PFINT_AEQCTL_CAUSE_ENA_SHIFT 30
+#define I40E_PFINT_AEQCTL_CAUSE_ENA_MASK I40E_MASK(0x1, I40E_PFINT_AEQCTL_CAUSE_ENA_SHIFT)
+#define I40E_PFINT_AEQCTL_INTEVENT_SHIFT 31
+#define I40E_PFINT_AEQCTL_INTEVENT_MASK I40E_MASK(0x1, I40E_PFINT_AEQCTL_INTEVENT_SHIFT)
+#define I40E_PFINT_CEQCTL(_INTPF) (0x00036800 + ((_INTPF) * 4)) /* _i=0...511 */ /* Reset: CORER */
+#define I40E_PFINT_CEQCTL_MAX_INDEX 511
+#define I40E_PFINT_CEQCTL_MSIX_INDX_SHIFT 0
+#define I40E_PFINT_CEQCTL_MSIX_INDX_MASK I40E_MASK(0xFF, I40E_PFINT_CEQCTL_MSIX_INDX_SHIFT)
+#define I40E_PFINT_CEQCTL_ITR_INDX_SHIFT 11
+#define I40E_PFINT_CEQCTL_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_CEQCTL_ITR_INDX_SHIFT)
+#define I40E_PFINT_CEQCTL_MSIX0_INDX_SHIFT 13
+#define I40E_PFINT_CEQCTL_MSIX0_INDX_MASK I40E_MASK(0x7, I40E_PFINT_CEQCTL_MSIX0_INDX_SHIFT)
+#define I40E_PFINT_CEQCTL_NEXTQ_INDX_SHIFT 16
+#define I40E_PFINT_CEQCTL_NEXTQ_INDX_MASK I40E_MASK(0x7FF, I40E_PFINT_CEQCTL_NEXTQ_INDX_SHIFT)
+#define I40E_PFINT_CEQCTL_NEXTQ_TYPE_SHIFT 27
+#define I40E_PFINT_CEQCTL_NEXTQ_TYPE_MASK I40E_MASK(0x3, I40E_PFINT_CEQCTL_NEXTQ_TYPE_SHIFT)
+#define I40E_PFINT_CEQCTL_CAUSE_ENA_SHIFT 30
+#define I40E_PFINT_CEQCTL_CAUSE_ENA_MASK I40E_MASK(0x1, I40E_PFINT_CEQCTL_CAUSE_ENA_SHIFT)
+#define I40E_PFINT_CEQCTL_INTEVENT_SHIFT 31
+#define I40E_PFINT_CEQCTL_INTEVENT_MASK I40E_MASK(0x1, I40E_PFINT_CEQCTL_INTEVENT_SHIFT)
+#define I40E_PFINT_DYN_CTL0 0x00038480 /* Reset: PFR */
+#define I40E_PFINT_DYN_CTL0_INTENA_SHIFT 0
+#define I40E_PFINT_DYN_CTL0_INTENA_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTL0_INTENA_SHIFT)
+#define I40E_PFINT_DYN_CTL0_CLEARPBA_SHIFT 1
+#define I40E_PFINT_DYN_CTL0_CLEARPBA_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTL0_CLEARPBA_SHIFT)
+#define I40E_PFINT_DYN_CTL0_SWINT_TRIG_SHIFT 2
+#define I40E_PFINT_DYN_CTL0_SWINT_TRIG_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTL0_SWINT_TRIG_SHIFT)
+#define I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT 3
+#define I40E_PFINT_DYN_CTL0_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT)
+#define I40E_PFINT_DYN_CTL0_INTERVAL_SHIFT 5
+#define I40E_PFINT_DYN_CTL0_INTERVAL_MASK I40E_MASK(0xFFF, I40E_PFINT_DYN_CTL0_INTERVAL_SHIFT)
+#define I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_SHIFT 24
+#define I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_SHIFT)
+#define I40E_PFINT_DYN_CTL0_SW_ITR_INDX_SHIFT 25
+#define I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_DYN_CTL0_SW_ITR_INDX_SHIFT)
+#define I40E_PFINT_DYN_CTL0_INTENA_MSK_SHIFT 31
+#define I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTL0_INTENA_MSK_SHIFT)
+#define I40E_PFINT_DYN_CTLN(_INTPF) (0x00034800 + ((_INTPF) * 4)) /* _i=0...511 */ /* Reset: PFR */
+#define I40E_PFINT_DYN_CTLN_MAX_INDEX 511
+#define I40E_PFINT_DYN_CTLN_INTENA_SHIFT 0
+#define I40E_PFINT_DYN_CTLN_INTENA_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTLN_INTENA_SHIFT)
+#define I40E_PFINT_DYN_CTLN_CLEARPBA_SHIFT 1
+#define I40E_PFINT_DYN_CTLN_CLEARPBA_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTLN_CLEARPBA_SHIFT)
+#define I40E_PFINT_DYN_CTLN_SWINT_TRIG_SHIFT 2
+#define I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTLN_SWINT_TRIG_SHIFT)
+#define I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT 3
+#define I40E_PFINT_DYN_CTLN_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT)
+#define I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT 5
+#define I40E_PFINT_DYN_CTLN_INTERVAL_MASK I40E_MASK(0xFFF, I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT)
+#define I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_SHIFT 24
+#define I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_SHIFT)
+#define I40E_PFINT_DYN_CTLN_SW_ITR_INDX_SHIFT 25
+#define I40E_PFINT_DYN_CTLN_SW_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_DYN_CTLN_SW_ITR_INDX_SHIFT)
+#define I40E_PFINT_DYN_CTLN_INTENA_MSK_SHIFT 31
+#define I40E_PFINT_DYN_CTLN_INTENA_MSK_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTLN_INTENA_MSK_SHIFT)
+#define I40E_PFINT_GPIO_ENA 0x00088080 /* Reset: CORER */
+#define I40E_PFINT_GPIO_ENA_GPIO0_ENA_SHIFT 0
+#define I40E_PFINT_GPIO_ENA_GPIO0_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO0_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO1_ENA_SHIFT 1
+#define I40E_PFINT_GPIO_ENA_GPIO1_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO1_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO2_ENA_SHIFT 2
+#define I40E_PFINT_GPIO_ENA_GPIO2_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO2_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO3_ENA_SHIFT 3
+#define I40E_PFINT_GPIO_ENA_GPIO3_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO3_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO4_ENA_SHIFT 4
+#define I40E_PFINT_GPIO_ENA_GPIO4_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO4_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO5_ENA_SHIFT 5
+#define I40E_PFINT_GPIO_ENA_GPIO5_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO5_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO6_ENA_SHIFT 6
+#define I40E_PFINT_GPIO_ENA_GPIO6_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO6_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO7_ENA_SHIFT 7
+#define I40E_PFINT_GPIO_ENA_GPIO7_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO7_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO8_ENA_SHIFT 8
+#define I40E_PFINT_GPIO_ENA_GPIO8_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO8_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO9_ENA_SHIFT 9
+#define I40E_PFINT_GPIO_ENA_GPIO9_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO9_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO10_ENA_SHIFT 10
+#define I40E_PFINT_GPIO_ENA_GPIO10_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO10_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO11_ENA_SHIFT 11
+#define I40E_PFINT_GPIO_ENA_GPIO11_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO11_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO12_ENA_SHIFT 12
+#define I40E_PFINT_GPIO_ENA_GPIO12_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO12_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO13_ENA_SHIFT 13
+#define I40E_PFINT_GPIO_ENA_GPIO13_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO13_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO14_ENA_SHIFT 14
+#define I40E_PFINT_GPIO_ENA_GPIO14_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO14_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO15_ENA_SHIFT 15
+#define I40E_PFINT_GPIO_ENA_GPIO15_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO15_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO16_ENA_SHIFT 16
+#define I40E_PFINT_GPIO_ENA_GPIO16_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO16_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO17_ENA_SHIFT 17
+#define I40E_PFINT_GPIO_ENA_GPIO17_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO17_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO18_ENA_SHIFT 18
+#define I40E_PFINT_GPIO_ENA_GPIO18_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO18_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO19_ENA_SHIFT 19
+#define I40E_PFINT_GPIO_ENA_GPIO19_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO19_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO20_ENA_SHIFT 20
+#define I40E_PFINT_GPIO_ENA_GPIO20_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO20_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO21_ENA_SHIFT 21
+#define I40E_PFINT_GPIO_ENA_GPIO21_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO21_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO22_ENA_SHIFT 22
+#define I40E_PFINT_GPIO_ENA_GPIO22_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO22_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO23_ENA_SHIFT 23
+#define I40E_PFINT_GPIO_ENA_GPIO23_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO23_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO24_ENA_SHIFT 24
+#define I40E_PFINT_GPIO_ENA_GPIO24_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO24_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO25_ENA_SHIFT 25
+#define I40E_PFINT_GPIO_ENA_GPIO25_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO25_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO26_ENA_SHIFT 26
+#define I40E_PFINT_GPIO_ENA_GPIO26_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO26_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO27_ENA_SHIFT 27
+#define I40E_PFINT_GPIO_ENA_GPIO27_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO27_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO28_ENA_SHIFT 28
+#define I40E_PFINT_GPIO_ENA_GPIO28_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO28_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO29_ENA_SHIFT 29
+#define I40E_PFINT_GPIO_ENA_GPIO29_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO29_ENA_SHIFT)
+#define I40E_PFINT_ICR0 0x00038780 /* Reset: CORER */
+#define I40E_PFINT_ICR0_INTEVENT_SHIFT 0
+#define I40E_PFINT_ICR0_INTEVENT_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_INTEVENT_SHIFT)
+#define I40E_PFINT_ICR0_QUEUE_0_SHIFT 1
+#define I40E_PFINT_ICR0_QUEUE_0_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_QUEUE_0_SHIFT)
+#define I40E_PFINT_ICR0_QUEUE_1_SHIFT 2
+#define I40E_PFINT_ICR0_QUEUE_1_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_QUEUE_1_SHIFT)
+#define I40E_PFINT_ICR0_QUEUE_2_SHIFT 3
+#define I40E_PFINT_ICR0_QUEUE_2_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_QUEUE_2_SHIFT)
+#define I40E_PFINT_ICR0_QUEUE_3_SHIFT 4
+#define I40E_PFINT_ICR0_QUEUE_3_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_QUEUE_3_SHIFT)
+#define I40E_PFINT_ICR0_QUEUE_4_SHIFT 5
+#define I40E_PFINT_ICR0_QUEUE_4_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_QUEUE_4_SHIFT)
+#define I40E_PFINT_ICR0_QUEUE_5_SHIFT 6
+#define I40E_PFINT_ICR0_QUEUE_5_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_QUEUE_5_SHIFT)
+#define I40E_PFINT_ICR0_QUEUE_6_SHIFT 7
+#define I40E_PFINT_ICR0_QUEUE_6_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_QUEUE_6_SHIFT)
+#define I40E_PFINT_ICR0_QUEUE_7_SHIFT 8
+#define I40E_PFINT_ICR0_QUEUE_7_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_QUEUE_7_SHIFT)
+#define I40E_PFINT_ICR0_ECC_ERR_SHIFT 16
+#define I40E_PFINT_ICR0_ECC_ERR_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ECC_ERR_SHIFT)
+#define I40E_PFINT_ICR0_MAL_DETECT_SHIFT 19
+#define I40E_PFINT_ICR0_MAL_DETECT_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_MAL_DETECT_SHIFT)
+#define I40E_PFINT_ICR0_GRST_SHIFT 20
+#define I40E_PFINT_ICR0_GRST_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_GRST_SHIFT)
+#define I40E_PFINT_ICR0_PCI_EXCEPTION_SHIFT 21
+#define I40E_PFINT_ICR0_PCI_EXCEPTION_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_PCI_EXCEPTION_SHIFT)
+#define I40E_PFINT_ICR0_GPIO_SHIFT 22
+#define I40E_PFINT_ICR0_GPIO_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_GPIO_SHIFT)
+#define I40E_PFINT_ICR0_TIMESYNC_SHIFT 23
+#define I40E_PFINT_ICR0_TIMESYNC_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_TIMESYNC_SHIFT)
+#define I40E_PFINT_ICR0_STORM_DETECT_SHIFT 24
+#define I40E_PFINT_ICR0_STORM_DETECT_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_STORM_DETECT_SHIFT)
+#define I40E_PFINT_ICR0_LINK_STAT_CHANGE_SHIFT 25
+#define I40E_PFINT_ICR0_LINK_STAT_CHANGE_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_LINK_STAT_CHANGE_SHIFT)
+#define I40E_PFINT_ICR0_HMC_ERR_SHIFT 26
+#define I40E_PFINT_ICR0_HMC_ERR_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_HMC_ERR_SHIFT)
+#define I40E_PFINT_ICR0_PE_CRITERR_SHIFT 28
+#define I40E_PFINT_ICR0_PE_CRITERR_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_PE_CRITERR_SHIFT)
+#define I40E_PFINT_ICR0_VFLR_SHIFT 29
+#define I40E_PFINT_ICR0_VFLR_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_VFLR_SHIFT)
+#define I40E_PFINT_ICR0_ADMINQ_SHIFT 30
+#define I40E_PFINT_ICR0_ADMINQ_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ADMINQ_SHIFT)
+#define I40E_PFINT_ICR0_SWINT_SHIFT 31
+#define I40E_PFINT_ICR0_SWINT_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_SWINT_SHIFT)
+#define I40E_PFINT_ICR0_ENA 0x00038800 /* Reset: CORER */
+#define I40E_PFINT_ICR0_ENA_ECC_ERR_SHIFT 16
+#define I40E_PFINT_ICR0_ENA_ECC_ERR_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_ECC_ERR_SHIFT)
+#define I40E_PFINT_ICR0_ENA_MAL_DETECT_SHIFT 19
+#define I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_MAL_DETECT_SHIFT)
+#define I40E_PFINT_ICR0_ENA_GRST_SHIFT 20
+#define I40E_PFINT_ICR0_ENA_GRST_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_GRST_SHIFT)
+#define I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_SHIFT 21
+#define I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_SHIFT)
+#define I40E_PFINT_ICR0_ENA_GPIO_SHIFT 22
+#define I40E_PFINT_ICR0_ENA_GPIO_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_GPIO_SHIFT)
+#define I40E_PFINT_ICR0_ENA_TIMESYNC_SHIFT 23
+#define I40E_PFINT_ICR0_ENA_TIMESYNC_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_TIMESYNC_SHIFT)
+#define I40E_PFINT_ICR0_ENA_STORM_DETECT_SHIFT 24
+#define I40E_PFINT_ICR0_ENA_STORM_DETECT_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_STORM_DETECT_SHIFT)
+#define I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_SHIFT 25
+#define I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_SHIFT)
+#define I40E_PFINT_ICR0_ENA_HMC_ERR_SHIFT 26
+#define I40E_PFINT_ICR0_ENA_HMC_ERR_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_HMC_ERR_SHIFT)
+#define I40E_PFINT_ICR0_ENA_PE_CRITERR_SHIFT 28
+#define I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_PE_CRITERR_SHIFT)
+#define I40E_PFINT_ICR0_ENA_VFLR_SHIFT 29
+#define I40E_PFINT_ICR0_ENA_VFLR_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_VFLR_SHIFT)
+#define I40E_PFINT_ICR0_ENA_ADMINQ_SHIFT 30
+#define I40E_PFINT_ICR0_ENA_ADMINQ_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_ADMINQ_SHIFT)
+#define I40E_PFINT_ICR0_ENA_RSVD_SHIFT 31
+#define I40E_PFINT_ICR0_ENA_RSVD_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_RSVD_SHIFT)
+#define I40E_PFINT_ITR0(_i) (0x00038000 + ((_i) * 128)) /* _i=0...2 */ /* Reset: PFR */
+#define I40E_PFINT_ITR0_MAX_INDEX 2
+#define I40E_PFINT_ITR0_INTERVAL_SHIFT 0
+#define I40E_PFINT_ITR0_INTERVAL_MASK I40E_MASK(0xFFF, I40E_PFINT_ITR0_INTERVAL_SHIFT)
+#define I40E_PFINT_ITRN(_i, _INTPF) (0x00030000 + ((_i) * 2048 + (_INTPF) * 4)) /* _i=0...2, _INTPF=0...511 */ /* Reset: PFR */
+#define I40E_PFINT_ITRN_MAX_INDEX 2
+#define I40E_PFINT_ITRN_INTERVAL_SHIFT 0
+#define I40E_PFINT_ITRN_INTERVAL_MASK I40E_MASK(0xFFF, I40E_PFINT_ITRN_INTERVAL_SHIFT)
+#define I40E_PFINT_LNKLST0 0x00038500 /* Reset: PFR */
+#define I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT 0
+#define I40E_PFINT_LNKLST0_FIRSTQ_INDX_MASK I40E_MASK(0x7FF, I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT)
+#define I40E_PFINT_LNKLST0_FIRSTQ_TYPE_SHIFT 11
+#define I40E_PFINT_LNKLST0_FIRSTQ_TYPE_MASK I40E_MASK(0x3, I40E_PFINT_LNKLST0_FIRSTQ_TYPE_SHIFT)
+#define I40E_PFINT_LNKLSTN(_INTPF) (0x00035000 + ((_INTPF) * 4)) /* _i=0...511 */ /* Reset: PFR */
+#define I40E_PFINT_LNKLSTN_MAX_INDEX 511
+#define I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT 0
+#define I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK I40E_MASK(0x7FF, I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT)
+#define I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT 11
+#define I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_MASK I40E_MASK(0x3, I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT)
+#define I40E_PFINT_RATE0 0x00038580 /* Reset: PFR */
+#define I40E_PFINT_RATE0_INTERVAL_SHIFT 0
+#define I40E_PFINT_RATE0_INTERVAL_MASK I40E_MASK(0x3F, I40E_PFINT_RATE0_INTERVAL_SHIFT)
+#define I40E_PFINT_RATE0_INTRL_ENA_SHIFT 6
+#define I40E_PFINT_RATE0_INTRL_ENA_MASK I40E_MASK(0x1, I40E_PFINT_RATE0_INTRL_ENA_SHIFT)
+#define I40E_PFINT_RATEN(_INTPF) (0x00035800 + ((_INTPF) * 4)) /* _i=0...511 */ /* Reset: PFR */
+#define I40E_PFINT_RATEN_MAX_INDEX 511
+#define I40E_PFINT_RATEN_INTERVAL_SHIFT 0
+#define I40E_PFINT_RATEN_INTERVAL_MASK I40E_MASK(0x3F, I40E_PFINT_RATEN_INTERVAL_SHIFT)
+#define I40E_PFINT_RATEN_INTRL_ENA_SHIFT 6
+#define I40E_PFINT_RATEN_INTRL_ENA_MASK I40E_MASK(0x1, I40E_PFINT_RATEN_INTRL_ENA_SHIFT)
+#define I40E_PFINT_STAT_CTL0 0x00038400 /* Reset: PFR */
+#define I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT 2
+#define I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT)
+#define I40E_QINT_RQCTL(_Q) (0x0003A000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: CORER */
+#define I40E_QINT_RQCTL_MAX_INDEX 1535
+#define I40E_QINT_RQCTL_MSIX_INDX_SHIFT 0
+#define I40E_QINT_RQCTL_MSIX_INDX_MASK I40E_MASK(0xFF, I40E_QINT_RQCTL_MSIX_INDX_SHIFT)
+#define I40E_QINT_RQCTL_ITR_INDX_SHIFT 11
+#define I40E_QINT_RQCTL_ITR_INDX_MASK I40E_MASK(0x3, I40E_QINT_RQCTL_ITR_INDX_SHIFT)
+#define I40E_QINT_RQCTL_MSIX0_INDX_SHIFT 13
+#define I40E_QINT_RQCTL_MSIX0_INDX_MASK I40E_MASK(0x7, I40E_QINT_RQCTL_MSIX0_INDX_SHIFT)
+#define I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT 16
+#define I40E_QINT_RQCTL_NEXTQ_INDX_MASK I40E_MASK(0x7FF, I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT)
+#define I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT 27
+#define I40E_QINT_RQCTL_NEXTQ_TYPE_MASK I40E_MASK(0x3, I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT)
+#define I40E_QINT_RQCTL_CAUSE_ENA_SHIFT 30
+#define I40E_QINT_RQCTL_CAUSE_ENA_MASK I40E_MASK(0x1, I40E_QINT_RQCTL_CAUSE_ENA_SHIFT)
+#define I40E_QINT_RQCTL_INTEVENT_SHIFT 31
+#define I40E_QINT_RQCTL_INTEVENT_MASK I40E_MASK(0x1, I40E_QINT_RQCTL_INTEVENT_SHIFT)
+#define I40E_QINT_TQCTL(_Q) (0x0003C000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: CORER */
+#define I40E_QINT_TQCTL_MAX_INDEX 1535
+#define I40E_QINT_TQCTL_MSIX_INDX_SHIFT 0
+#define I40E_QINT_TQCTL_MSIX_INDX_MASK I40E_MASK(0xFF, I40E_QINT_TQCTL_MSIX_INDX_SHIFT)
+#define I40E_QINT_TQCTL_ITR_INDX_SHIFT 11
+#define I40E_QINT_TQCTL_ITR_INDX_MASK I40E_MASK(0x3, I40E_QINT_TQCTL_ITR_INDX_SHIFT)
+#define I40E_QINT_TQCTL_MSIX0_INDX_SHIFT 13
+#define I40E_QINT_TQCTL_MSIX0_INDX_MASK I40E_MASK(0x7, I40E_QINT_TQCTL_MSIX0_INDX_SHIFT)
+#define I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT 16
+#define I40E_QINT_TQCTL_NEXTQ_INDX_MASK I40E_MASK(0x7FF, I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT)
+#define I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT 27
+#define I40E_QINT_TQCTL_NEXTQ_TYPE_MASK I40E_MASK(0x3, I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT)
+#define I40E_QINT_TQCTL_CAUSE_ENA_SHIFT 30
+#define I40E_QINT_TQCTL_CAUSE_ENA_MASK I40E_MASK(0x1, I40E_QINT_TQCTL_CAUSE_ENA_SHIFT)
+#define I40E_QINT_TQCTL_INTEVENT_SHIFT 31
+#define I40E_QINT_TQCTL_INTEVENT_MASK I40E_MASK(0x1, I40E_QINT_TQCTL_INTEVENT_SHIFT)
+#define I40E_VFINT_DYN_CTL0(_VF) (0x0002A400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFINT_DYN_CTL0_MAX_INDEX 127
+#define I40E_VFINT_DYN_CTL0_INTENA_SHIFT 0
+#define I40E_VFINT_DYN_CTL0_INTENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL0_INTENA_SHIFT)
+#define I40E_VFINT_DYN_CTL0_CLEARPBA_SHIFT 1
+#define I40E_VFINT_DYN_CTL0_CLEARPBA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL0_CLEARPBA_SHIFT)
+#define I40E_VFINT_DYN_CTL0_SWINT_TRIG_SHIFT 2
+#define I40E_VFINT_DYN_CTL0_SWINT_TRIG_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL0_SWINT_TRIG_SHIFT)
+#define I40E_VFINT_DYN_CTL0_ITR_INDX_SHIFT 3
+#define I40E_VFINT_DYN_CTL0_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTL0_ITR_INDX_SHIFT)
+#define I40E_VFINT_DYN_CTL0_INTERVAL_SHIFT 5
+#define I40E_VFINT_DYN_CTL0_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_DYN_CTL0_INTERVAL_SHIFT)
+#define I40E_VFINT_DYN_CTL0_SW_ITR_INDX_ENA_SHIFT 24
+#define I40E_VFINT_DYN_CTL0_SW_ITR_INDX_ENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL0_SW_ITR_INDX_ENA_SHIFT)
+#define I40E_VFINT_DYN_CTL0_SW_ITR_INDX_SHIFT 25
+#define I40E_VFINT_DYN_CTL0_SW_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTL0_SW_ITR_INDX_SHIFT)
+#define I40E_VFINT_DYN_CTL0_INTENA_MSK_SHIFT 31
+#define I40E_VFINT_DYN_CTL0_INTENA_MSK_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL0_INTENA_MSK_SHIFT)
+#define I40E_VFINT_DYN_CTLN(_INTVF) (0x00024800 + ((_INTVF) * 4)) /* _i=0...511 */ /* Reset: VFR */
+#define I40E_VFINT_DYN_CTLN_MAX_INDEX 511
+#define I40E_VFINT_DYN_CTLN_INTENA_SHIFT 0
+#define I40E_VFINT_DYN_CTLN_INTENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN_INTENA_SHIFT)
+#define I40E_VFINT_DYN_CTLN_CLEARPBA_SHIFT 1
+#define I40E_VFINT_DYN_CTLN_CLEARPBA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN_CLEARPBA_SHIFT)
+#define I40E_VFINT_DYN_CTLN_SWINT_TRIG_SHIFT 2
+#define I40E_VFINT_DYN_CTLN_SWINT_TRIG_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN_SWINT_TRIG_SHIFT)
+#define I40E_VFINT_DYN_CTLN_ITR_INDX_SHIFT 3
+#define I40E_VFINT_DYN_CTLN_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTLN_ITR_INDX_SHIFT)
+#define I40E_VFINT_DYN_CTLN_INTERVAL_SHIFT 5
+#define I40E_VFINT_DYN_CTLN_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_DYN_CTLN_INTERVAL_SHIFT)
+#define I40E_VFINT_DYN_CTLN_SW_ITR_INDX_ENA_SHIFT 24
+#define I40E_VFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN_SW_ITR_INDX_ENA_SHIFT)
+#define I40E_VFINT_DYN_CTLN_SW_ITR_INDX_SHIFT 25
+#define I40E_VFINT_DYN_CTLN_SW_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTLN_SW_ITR_INDX_SHIFT)
+#define I40E_VFINT_DYN_CTLN_INTENA_MSK_SHIFT 31
+#define I40E_VFINT_DYN_CTLN_INTENA_MSK_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN_INTENA_MSK_SHIFT)
+#define I40E_VFINT_ICR0(_VF) (0x0002BC00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */
+#define I40E_VFINT_ICR0_MAX_INDEX 127
+#define I40E_VFINT_ICR0_INTEVENT_SHIFT 0
+#define I40E_VFINT_ICR0_INTEVENT_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_INTEVENT_SHIFT)
+#define I40E_VFINT_ICR0_QUEUE_0_SHIFT 1
+#define I40E_VFINT_ICR0_QUEUE_0_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_QUEUE_0_SHIFT)
+#define I40E_VFINT_ICR0_QUEUE_1_SHIFT 2
+#define I40E_VFINT_ICR0_QUEUE_1_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_QUEUE_1_SHIFT)
+#define I40E_VFINT_ICR0_QUEUE_2_SHIFT 3
+#define I40E_VFINT_ICR0_QUEUE_2_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_QUEUE_2_SHIFT)
+#define I40E_VFINT_ICR0_QUEUE_3_SHIFT 4
+#define I40E_VFINT_ICR0_QUEUE_3_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_QUEUE_3_SHIFT)
+#define I40E_VFINT_ICR0_LINK_STAT_CHANGE_SHIFT 25
+#define I40E_VFINT_ICR0_LINK_STAT_CHANGE_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_LINK_STAT_CHANGE_SHIFT)
+#define I40E_VFINT_ICR0_ADMINQ_SHIFT 30
+#define I40E_VFINT_ICR0_ADMINQ_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_ADMINQ_SHIFT)
+#define I40E_VFINT_ICR0_SWINT_SHIFT 31
+#define I40E_VFINT_ICR0_SWINT_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_SWINT_SHIFT)
+#define I40E_VFINT_ICR0_ENA(_VF) (0x0002C000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */
+#define I40E_VFINT_ICR0_ENA_MAX_INDEX 127
+#define I40E_VFINT_ICR0_ENA_LINK_STAT_CHANGE_SHIFT 25
+#define I40E_VFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_ENA_LINK_STAT_CHANGE_SHIFT)
+#define I40E_VFINT_ICR0_ENA_ADMINQ_SHIFT 30
+#define I40E_VFINT_ICR0_ENA_ADMINQ_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_ENA_ADMINQ_SHIFT)
+#define I40E_VFINT_ICR0_ENA_RSVD_SHIFT 31
+#define I40E_VFINT_ICR0_ENA_RSVD_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_ENA_RSVD_SHIFT)
+#define I40E_VFINT_ITR0(_i, _VF) (0x00028000 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...2, _VF=0...127 */ /* Reset: VFR */
+#define I40E_VFINT_ITR0_MAX_INDEX 2
+#define I40E_VFINT_ITR0_INTERVAL_SHIFT 0
+#define I40E_VFINT_ITR0_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_ITR0_INTERVAL_SHIFT)
+#define I40E_VFINT_ITRN(_i, _INTVF) (0x00020000 + ((_i) * 2048 + (_INTVF) * 4)) /* _i=0...2, _INTVF=0...511 */ /* Reset: VFR */
+#define I40E_VFINT_ITRN_MAX_INDEX 2
+#define I40E_VFINT_ITRN_INTERVAL_SHIFT 0
+#define I40E_VFINT_ITRN_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_ITRN_INTERVAL_SHIFT)
+#define I40E_VFINT_STAT_CTL0(_VF) (0x0002A000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFINT_STAT_CTL0_MAX_INDEX 127
+#define I40E_VFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT 2
+#define I40E_VFINT_STAT_CTL0_OTHER_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT)
+#define I40E_VPINT_AEQCTL(_VF) (0x0002B800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */
+#define I40E_VPINT_AEQCTL_MAX_INDEX 127
+#define I40E_VPINT_AEQCTL_MSIX_INDX_SHIFT 0
+#define I40E_VPINT_AEQCTL_MSIX_INDX_MASK I40E_MASK(0xFF, I40E_VPINT_AEQCTL_MSIX_INDX_SHIFT)
+#define I40E_VPINT_AEQCTL_ITR_INDX_SHIFT 11
+#define I40E_VPINT_AEQCTL_ITR_INDX_MASK I40E_MASK(0x3, I40E_VPINT_AEQCTL_ITR_INDX_SHIFT)
+#define I40E_VPINT_AEQCTL_MSIX0_INDX_SHIFT 13
+#define I40E_VPINT_AEQCTL_MSIX0_INDX_MASK I40E_MASK(0x7, I40E_VPINT_AEQCTL_MSIX0_INDX_SHIFT)
+#define I40E_VPINT_AEQCTL_CAUSE_ENA_SHIFT 30
+#define I40E_VPINT_AEQCTL_CAUSE_ENA_MASK I40E_MASK(0x1, I40E_VPINT_AEQCTL_CAUSE_ENA_SHIFT)
+#define I40E_VPINT_AEQCTL_INTEVENT_SHIFT 31
+#define I40E_VPINT_AEQCTL_INTEVENT_MASK I40E_MASK(0x1, I40E_VPINT_AEQCTL_INTEVENT_SHIFT)
+#define I40E_VPINT_CEQCTL(_INTVF) (0x00026800 + ((_INTVF) * 4)) /* _i=0...511 */ /* Reset: CORER */
+#define I40E_VPINT_CEQCTL_MAX_INDEX 511
+#define I40E_VPINT_CEQCTL_MSIX_INDX_SHIFT 0
+#define I40E_VPINT_CEQCTL_MSIX_INDX_MASK I40E_MASK(0xFF, I40E_VPINT_CEQCTL_MSIX_INDX_SHIFT)
+#define I40E_VPINT_CEQCTL_ITR_INDX_SHIFT 11
+#define I40E_VPINT_CEQCTL_ITR_INDX_MASK I40E_MASK(0x3, I40E_VPINT_CEQCTL_ITR_INDX_SHIFT)
+#define I40E_VPINT_CEQCTL_MSIX0_INDX_SHIFT 13
+#define I40E_VPINT_CEQCTL_MSIX0_INDX_MASK I40E_MASK(0x7, I40E_VPINT_CEQCTL_MSIX0_INDX_SHIFT)
+#define I40E_VPINT_CEQCTL_NEXTQ_INDX_SHIFT 16
+#define I40E_VPINT_CEQCTL_NEXTQ_INDX_MASK I40E_MASK(0x7FF, I40E_VPINT_CEQCTL_NEXTQ_INDX_SHIFT)
+#define I40E_VPINT_CEQCTL_NEXTQ_TYPE_SHIFT 27
+#define I40E_VPINT_CEQCTL_NEXTQ_TYPE_MASK I40E_MASK(0x3, I40E_VPINT_CEQCTL_NEXTQ_TYPE_SHIFT)
+#define I40E_VPINT_CEQCTL_CAUSE_ENA_SHIFT 30
+#define I40E_VPINT_CEQCTL_CAUSE_ENA_MASK I40E_MASK(0x1, I40E_VPINT_CEQCTL_CAUSE_ENA_SHIFT)
+#define I40E_VPINT_CEQCTL_INTEVENT_SHIFT 31
+#define I40E_VPINT_CEQCTL_INTEVENT_MASK I40E_MASK(0x1, I40E_VPINT_CEQCTL_INTEVENT_SHIFT)
+#define I40E_VPINT_LNKLST0(_VF) (0x0002A800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VPINT_LNKLST0_MAX_INDEX 127
+#define I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT 0
+#define I40E_VPINT_LNKLST0_FIRSTQ_INDX_MASK I40E_MASK(0x7FF, I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT)
+#define I40E_VPINT_LNKLST0_FIRSTQ_TYPE_SHIFT 11
+#define I40E_VPINT_LNKLST0_FIRSTQ_TYPE_MASK I40E_MASK(0x3, I40E_VPINT_LNKLST0_FIRSTQ_TYPE_SHIFT)
+#define I40E_VPINT_LNKLSTN(_INTVF) (0x00025000 + ((_INTVF) * 4)) /* _i=0...511 */ /* Reset: VFR */
+#define I40E_VPINT_LNKLSTN_MAX_INDEX 511
+#define I40E_VPINT_LNKLSTN_FIRSTQ_INDX_SHIFT 0
+#define I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK I40E_MASK(0x7FF, I40E_VPINT_LNKLSTN_FIRSTQ_INDX_SHIFT)
+#define I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT 11
+#define I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK I40E_MASK(0x3, I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT)
+#define I40E_VPINT_RATE0(_VF) (0x0002AC00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VPINT_RATE0_MAX_INDEX 127
+#define I40E_VPINT_RATE0_INTERVAL_SHIFT 0
+#define I40E_VPINT_RATE0_INTERVAL_MASK I40E_MASK(0x3F, I40E_VPINT_RATE0_INTERVAL_SHIFT)
+#define I40E_VPINT_RATE0_INTRL_ENA_SHIFT 6
+#define I40E_VPINT_RATE0_INTRL_ENA_MASK I40E_MASK(0x1, I40E_VPINT_RATE0_INTRL_ENA_SHIFT)
+#define I40E_VPINT_RATEN(_INTVF) (0x00025800 + ((_INTVF) * 4)) /* _i=0...511 */ /* Reset: VFR */
+#define I40E_VPINT_RATEN_MAX_INDEX 511
+#define I40E_VPINT_RATEN_INTERVAL_SHIFT 0
+#define I40E_VPINT_RATEN_INTERVAL_MASK I40E_MASK(0x3F, I40E_VPINT_RATEN_INTERVAL_SHIFT)
+#define I40E_VPINT_RATEN_INTRL_ENA_SHIFT 6
+#define I40E_VPINT_RATEN_INTRL_ENA_MASK I40E_MASK(0x1, I40E_VPINT_RATEN_INTRL_ENA_SHIFT)
+#define I40E_GL_RDPU_CNTRL 0x00051060 /* Reset: CORER */
+#define I40E_GL_RDPU_CNTRL_RX_PAD_EN_SHIFT 0
+#define I40E_GL_RDPU_CNTRL_RX_PAD_EN_MASK I40E_MASK(0x1, I40E_GL_RDPU_CNTRL_RX_PAD_EN_SHIFT)
+#define I40E_GL_RDPU_CNTRL_ECO_SHIFT 1
+#define I40E_GL_RDPU_CNTRL_ECO_MASK I40E_MASK(0x7FFFFFFF, I40E_GL_RDPU_CNTRL_ECO_SHIFT)
+#define I40E_GLLAN_RCTL_0 0x0012A500 /* Reset: CORER */
+#define I40E_GLLAN_RCTL_0_PXE_MODE_SHIFT 0
+#define I40E_GLLAN_RCTL_0_PXE_MODE_MASK I40E_MASK(0x1, I40E_GLLAN_RCTL_0_PXE_MODE_SHIFT)
+#define I40E_GLLAN_TSOMSK_F 0x000442D8 /* Reset: CORER */
+#define I40E_GLLAN_TSOMSK_F_TCPMSKF_SHIFT 0
+#define I40E_GLLAN_TSOMSK_F_TCPMSKF_MASK I40E_MASK(0xFFF, I40E_GLLAN_TSOMSK_F_TCPMSKF_SHIFT)
+#define I40E_GLLAN_TSOMSK_L 0x000442E0 /* Reset: CORER */
+#define I40E_GLLAN_TSOMSK_L_TCPMSKL_SHIFT 0
+#define I40E_GLLAN_TSOMSK_L_TCPMSKL_MASK I40E_MASK(0xFFF, I40E_GLLAN_TSOMSK_L_TCPMSKL_SHIFT)
+#define I40E_GLLAN_TSOMSK_M 0x000442DC /* Reset: CORER */
+#define I40E_GLLAN_TSOMSK_M_TCPMSKM_SHIFT 0
+#define I40E_GLLAN_TSOMSK_M_TCPMSKM_MASK I40E_MASK(0xFFF, I40E_GLLAN_TSOMSK_M_TCPMSKM_SHIFT)
+#define I40E_GLLAN_TXPRE_QDIS(_i) (0x000e6500 + ((_i) * 4)) /* _i=0...11 */ /* Reset: CORER */
+#define I40E_GLLAN_TXPRE_QDIS_MAX_INDEX 11
+#define I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT 0
+#define I40E_GLLAN_TXPRE_QDIS_QINDX_MASK I40E_MASK(0x7FF, I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT)
+#define I40E_GLLAN_TXPRE_QDIS_QDIS_STAT_SHIFT 16
+#define I40E_GLLAN_TXPRE_QDIS_QDIS_STAT_MASK I40E_MASK(0x1, I40E_GLLAN_TXPRE_QDIS_QDIS_STAT_SHIFT)
+#define I40E_GLLAN_TXPRE_QDIS_SET_QDIS_SHIFT 30
+#define I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK I40E_MASK(0x1, I40E_GLLAN_TXPRE_QDIS_SET_QDIS_SHIFT)
+#define I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_SHIFT 31
+#define I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_MASK I40E_MASK(0x1, I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_SHIFT)
+#define I40E_PFLAN_QALLOC 0x001C0400 /* Reset: CORER */
+#define I40E_PFLAN_QALLOC_FIRSTQ_SHIFT 0
+#define I40E_PFLAN_QALLOC_FIRSTQ_MASK I40E_MASK(0x7FF, I40E_PFLAN_QALLOC_FIRSTQ_SHIFT)
+#define I40E_PFLAN_QALLOC_LASTQ_SHIFT 16
+#define I40E_PFLAN_QALLOC_LASTQ_MASK I40E_MASK(0x7FF, I40E_PFLAN_QALLOC_LASTQ_SHIFT)
+#define I40E_PFLAN_QALLOC_VALID_SHIFT 31
+#define I40E_PFLAN_QALLOC_VALID_MASK I40E_MASK(0x1, I40E_PFLAN_QALLOC_VALID_SHIFT)
+#define I40E_QRX_ENA(_Q) (0x00120000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: PFR */
+#define I40E_QRX_ENA_MAX_INDEX 1535
+#define I40E_QRX_ENA_QENA_REQ_SHIFT 0
+#define I40E_QRX_ENA_QENA_REQ_MASK I40E_MASK(0x1, I40E_QRX_ENA_QENA_REQ_SHIFT)
+#define I40E_QRX_ENA_FAST_QDIS_SHIFT 1
+#define I40E_QRX_ENA_FAST_QDIS_MASK I40E_MASK(0x1, I40E_QRX_ENA_FAST_QDIS_SHIFT)
+#define I40E_QRX_ENA_QENA_STAT_SHIFT 2
+#define I40E_QRX_ENA_QENA_STAT_MASK I40E_MASK(0x1, I40E_QRX_ENA_QENA_STAT_SHIFT)
+#define I40E_QRX_TAIL(_Q) (0x00128000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: CORER */
+#define I40E_QRX_TAIL_MAX_INDEX 1535
+#define I40E_QRX_TAIL_TAIL_SHIFT 0
+#define I40E_QRX_TAIL_TAIL_MASK I40E_MASK(0x1FFF, I40E_QRX_TAIL_TAIL_SHIFT)
+#define I40E_QTX_CTL(_Q) (0x00104000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: CORER */
+#define I40E_QTX_CTL_MAX_INDEX 1535
+#define I40E_QTX_CTL_PFVF_Q_SHIFT 0
+#define I40E_QTX_CTL_PFVF_Q_MASK I40E_MASK(0x3, I40E_QTX_CTL_PFVF_Q_SHIFT)
+#define I40E_QTX_CTL_PF_INDX_SHIFT 2
+#define I40E_QTX_CTL_PF_INDX_MASK I40E_MASK(0xF, I40E_QTX_CTL_PF_INDX_SHIFT)
+#define I40E_QTX_CTL_VFVM_INDX_SHIFT 7
+#define I40E_QTX_CTL_VFVM_INDX_MASK I40E_MASK(0x1FF, I40E_QTX_CTL_VFVM_INDX_SHIFT)
+#define I40E_QTX_ENA(_Q) (0x00100000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: PFR */
+#define I40E_QTX_ENA_MAX_INDEX 1535
+#define I40E_QTX_ENA_QENA_REQ_SHIFT 0
+#define I40E_QTX_ENA_QENA_REQ_MASK I40E_MASK(0x1, I40E_QTX_ENA_QENA_REQ_SHIFT)
+#define I40E_QTX_ENA_FAST_QDIS_SHIFT 1
+#define I40E_QTX_ENA_FAST_QDIS_MASK I40E_MASK(0x1, I40E_QTX_ENA_FAST_QDIS_SHIFT)
+#define I40E_QTX_ENA_QENA_STAT_SHIFT 2
+#define I40E_QTX_ENA_QENA_STAT_MASK I40E_MASK(0x1, I40E_QTX_ENA_QENA_STAT_SHIFT)
+#define I40E_QTX_HEAD(_Q) (0x000E4000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: CORER */
+#define I40E_QTX_HEAD_MAX_INDEX 1535
+#define I40E_QTX_HEAD_HEAD_SHIFT 0
+#define I40E_QTX_HEAD_HEAD_MASK I40E_MASK(0x1FFF, I40E_QTX_HEAD_HEAD_SHIFT)
+#define I40E_QTX_HEAD_RS_PENDING_SHIFT 16
+#define I40E_QTX_HEAD_RS_PENDING_MASK I40E_MASK(0x1, I40E_QTX_HEAD_RS_PENDING_SHIFT)
+#define I40E_QTX_TAIL(_Q) (0x00108000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: PFR */
+#define I40E_QTX_TAIL_MAX_INDEX 1535
+#define I40E_QTX_TAIL_TAIL_SHIFT 0
+#define I40E_QTX_TAIL_TAIL_MASK I40E_MASK(0x1FFF, I40E_QTX_TAIL_TAIL_SHIFT)
+#define I40E_VPLAN_MAPENA(_VF) (0x00074000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VPLAN_MAPENA_MAX_INDEX 127
+#define I40E_VPLAN_MAPENA_TXRX_ENA_SHIFT 0
+#define I40E_VPLAN_MAPENA_TXRX_ENA_MASK I40E_MASK(0x1, I40E_VPLAN_MAPENA_TXRX_ENA_SHIFT)
+#define I40E_VPLAN_QTABLE(_i, _VF) (0x00070000 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...15, _VF=0...127 */ /* Reset: VFR */
+#define I40E_VPLAN_QTABLE_MAX_INDEX 15
+#define I40E_VPLAN_QTABLE_QINDEX_SHIFT 0
+#define I40E_VPLAN_QTABLE_QINDEX_MASK I40E_MASK(0x7FF, I40E_VPLAN_QTABLE_QINDEX_SHIFT)
+#define I40E_VSILAN_QBASE(_VSI) (0x0020C800 + ((_VSI) * 4)) /* _i=0...383 */ /* Reset: PFR */
+#define I40E_VSILAN_QBASE_MAX_INDEX 383
+#define I40E_VSILAN_QBASE_VSIBASE_SHIFT 0
+#define I40E_VSILAN_QBASE_VSIBASE_MASK I40E_MASK(0x7FF, I40E_VSILAN_QBASE_VSIBASE_SHIFT)
+#define I40E_VSILAN_QBASE_VSIQTABLE_ENA_SHIFT 11
+#define I40E_VSILAN_QBASE_VSIQTABLE_ENA_MASK I40E_MASK(0x1, I40E_VSILAN_QBASE_VSIQTABLE_ENA_SHIFT)
+#define I40E_VSILAN_QTABLE(_i, _VSI) (0x00200000 + ((_i) * 2048 + (_VSI) * 4)) /* _i=0...7, _VSI=0...383 */ /* Reset: PFR */
+#define I40E_VSILAN_QTABLE_MAX_INDEX 7
+#define I40E_VSILAN_QTABLE_QINDEX_0_SHIFT 0
+#define I40E_VSILAN_QTABLE_QINDEX_0_MASK I40E_MASK(0x7FF, I40E_VSILAN_QTABLE_QINDEX_0_SHIFT)
+#define I40E_VSILAN_QTABLE_QINDEX_1_SHIFT 16
+#define I40E_VSILAN_QTABLE_QINDEX_1_MASK I40E_MASK(0x7FF, I40E_VSILAN_QTABLE_QINDEX_1_SHIFT)
+#define I40E_PRTGL_SAH 0x001E2140 /* Reset: GLOBR */
+#define I40E_PRTGL_SAH_FC_SAH_SHIFT 0
+#define I40E_PRTGL_SAH_FC_SAH_MASK I40E_MASK(0xFFFF, I40E_PRTGL_SAH_FC_SAH_SHIFT)
+#define I40E_PRTGL_SAH_MFS_SHIFT 16
+#define I40E_PRTGL_SAH_MFS_MASK I40E_MASK(0xFFFF, I40E_PRTGL_SAH_MFS_SHIFT)
+#define I40E_PRTGL_SAL 0x001E2120 /* Reset: GLOBR */
+#define I40E_PRTGL_SAL_FC_SAL_SHIFT 0
+#define I40E_PRTGL_SAL_FC_SAL_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTGL_SAL_FC_SAL_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GCP 0x001E30E0 /* Reset: GLOBR */
+#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GCP_HSEC_CTL_RX_ENABLE_GCP_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GCP_HSEC_CTL_RX_ENABLE_GCP_MASK I40E_MASK(0x1, I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GCP_HSEC_CTL_RX_ENABLE_GCP_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GPP 0x001E3260 /* Reset: GLOBR */
+#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GPP_HSEC_CTL_RX_ENABLE_GPP_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GPP_HSEC_CTL_RX_ENABLE_GPP_MASK I40E_MASK(0x1, I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GPP_HSEC_CTL_RX_ENABLE_GPP_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_PPP 0x001E32E0 /* Reset: GLOBR */
+#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_PPP_HSEC_CTL_RX_ENABLE_PPP_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_PPP_HSEC_CTL_RX_ENABLE_PPP_MASK I40E_MASK(0x1, I40E_PRTMAC_HSEC_CTL_RX_ENABLE_PPP_HSEC_CTL_RX_ENABLE_PPP_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL 0x001E3360 /* Reset: GLOBR */
+#define I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL_HSEC_CTL_RX_FORWARD_CONTROL_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL_HSEC_CTL_RX_FORWARD_CONTROL_MASK I40E_MASK(0x1, I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL_HSEC_CTL_RX_FORWARD_CONTROL_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1 0x001E3110 /* Reset: GLOBR */
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2 0x001E3120 /* Reset: GLOBR */
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_MASK I40E_MASK(0xFFFF, I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE 0x001E30C0 /* Reset: GLOBR */
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_HSEC_CTL_RX_PAUSE_ENABLE_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_HSEC_CTL_RX_PAUSE_ENABLE_MASK I40E_MASK(0x1FF, I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_HSEC_CTL_RX_PAUSE_ENABLE_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART1 0x001E3140 /* Reset: GLOBR */
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART1_HSEC_CTL_RX_PAUSE_SA_PART1_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART1_HSEC_CTL_RX_PAUSE_SA_PART1_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART1_HSEC_CTL_RX_PAUSE_SA_PART1_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART2 0x001E3150 /* Reset: GLOBR */
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART2_HSEC_CTL_RX_PAUSE_SA_PART2_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART2_HSEC_CTL_RX_PAUSE_SA_PART2_MASK I40E_MASK(0xFFFF, I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART2_HSEC_CTL_RX_PAUSE_SA_PART2_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE 0x001E30D0 /* Reset: GLOBR */
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_HSEC_CTL_TX_PAUSE_ENABLE_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_HSEC_CTL_TX_PAUSE_ENABLE_MASK I40E_MASK(0x1FF, I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_HSEC_CTL_TX_PAUSE_ENABLE_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA(_i) (0x001E3370 + ((_i) * 16)) /* _i=0...8 */ /* Reset: GLOBR */
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_MAX_INDEX 8
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_MASK I40E_MASK(0xFFFF, I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER(_i) (0x001E3400 + ((_i) * 16)) /* _i=0...8 */ /* Reset: GLOBR */
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_MAX_INDEX 8
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_MASK I40E_MASK(0xFFFF, I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART1 0x001E34B0 /* Reset: GLOBR */
+#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART1_HSEC_CTL_TX_SA_PART1_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART1_HSEC_CTL_TX_SA_PART1_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTMAC_HSEC_CTL_TX_SA_PART1_HSEC_CTL_TX_SA_PART1_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART2 0x001E34C0 /* Reset: GLOBR */
+#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART2_HSEC_CTL_TX_SA_PART2_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART2_HSEC_CTL_TX_SA_PART2_MASK I40E_MASK(0xFFFF, I40E_PRTMAC_HSEC_CTL_TX_SA_PART2_HSEC_CTL_TX_SA_PART2_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A 0x0008C480 /* Reset: GLOBR */
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE3_SHIFT 0
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE3_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE3_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE2_SHIFT 2
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE2_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE2_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE1_SHIFT 4
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE1_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE1_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE0_SHIFT 6
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE0_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE0_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE3_SHIFT 8
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE3_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE3_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE2_SHIFT 10
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE2_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE2_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE1_SHIFT 12
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE1_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE1_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE0_SHIFT 14
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE0_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE0_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B 0x0008C484 /* Reset: GLOBR */
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE3_SHIFT 0
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE3_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE3_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE2_SHIFT 2
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE2_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE2_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE1_SHIFT 4
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE1_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE1_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE0_SHIFT 6
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE0_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE0_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE3_SHIFT 8
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE3_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE3_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE2_SHIFT 10
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE2_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE2_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE1_SHIFT 12
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE1_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE1_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE0_SHIFT 14
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE0_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE0_SHIFT)
+#define I40E_GL_FWRESETCNT 0x00083100 /* Reset: POR */
+#define I40E_GL_FWRESETCNT_FWRESETCNT_SHIFT 0
+#define I40E_GL_FWRESETCNT_FWRESETCNT_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FWRESETCNT_FWRESETCNT_SHIFT)
+#define I40E_GL_MNG_FWSM 0x000B6134 /* Reset: POR */
+#define I40E_GL_MNG_FWSM_FW_MODES_SHIFT 0
+#define I40E_GL_MNG_FWSM_FW_MODES_MASK I40E_MASK(0x3, I40E_GL_MNG_FWSM_FW_MODES_SHIFT)
+#define I40E_GL_MNG_FWSM_EEP_RELOAD_IND_SHIFT 10
+#define I40E_GL_MNG_FWSM_EEP_RELOAD_IND_MASK I40E_MASK(0x1, I40E_GL_MNG_FWSM_EEP_RELOAD_IND_SHIFT)
+#define I40E_GL_MNG_FWSM_CRC_ERROR_MODULE_SHIFT 11
+#define I40E_GL_MNG_FWSM_CRC_ERROR_MODULE_MASK I40E_MASK(0xF, I40E_GL_MNG_FWSM_CRC_ERROR_MODULE_SHIFT)
+#define I40E_GL_MNG_FWSM_FW_STATUS_VALID_SHIFT 15
+#define I40E_GL_MNG_FWSM_FW_STATUS_VALID_MASK I40E_MASK(0x1, I40E_GL_MNG_FWSM_FW_STATUS_VALID_SHIFT)
+#define I40E_GL_MNG_FWSM_RESET_CNT_SHIFT 16
+#define I40E_GL_MNG_FWSM_RESET_CNT_MASK I40E_MASK(0x7, I40E_GL_MNG_FWSM_RESET_CNT_SHIFT)
+#define I40E_GL_MNG_FWSM_EXT_ERR_IND_SHIFT 19
+#define I40E_GL_MNG_FWSM_EXT_ERR_IND_MASK I40E_MASK(0x3F, I40E_GL_MNG_FWSM_EXT_ERR_IND_SHIFT)
+#define I40E_GL_MNG_FWSM_PHY_SERDES0_CONFIG_ERR_SHIFT 26
+#define I40E_GL_MNG_FWSM_PHY_SERDES0_CONFIG_ERR_MASK I40E_MASK(0x1, I40E_GL_MNG_FWSM_PHY_SERDES0_CONFIG_ERR_SHIFT)
+#define I40E_GL_MNG_FWSM_PHY_SERDES1_CONFIG_ERR_SHIFT 27
+#define I40E_GL_MNG_FWSM_PHY_SERDES1_CONFIG_ERR_MASK I40E_MASK(0x1, I40E_GL_MNG_FWSM_PHY_SERDES1_CONFIG_ERR_SHIFT)
+#define I40E_GL_MNG_FWSM_PHY_SERDES2_CONFIG_ERR_SHIFT 28
+#define I40E_GL_MNG_FWSM_PHY_SERDES2_CONFIG_ERR_MASK I40E_MASK(0x1, I40E_GL_MNG_FWSM_PHY_SERDES2_CONFIG_ERR_SHIFT)
+#define I40E_GL_MNG_FWSM_PHY_SERDES3_CONFIG_ERR_SHIFT 29
+#define I40E_GL_MNG_FWSM_PHY_SERDES3_CONFIG_ERR_MASK I40E_MASK(0x1, I40E_GL_MNG_FWSM_PHY_SERDES3_CONFIG_ERR_SHIFT)
+#define I40E_GL_MNG_HWARB_CTRL 0x000B6130 /* Reset: POR */
+#define I40E_GL_MNG_HWARB_CTRL_NCSI_ARB_EN_SHIFT 0
+#define I40E_GL_MNG_HWARB_CTRL_NCSI_ARB_EN_MASK I40E_MASK(0x1, I40E_GL_MNG_HWARB_CTRL_NCSI_ARB_EN_SHIFT)
+#define I40E_PRT_MNG_FTFT_DATA(_i) (0x000852A0 + ((_i) * 32)) /* _i=0...31 */ /* Reset: POR */
+#define I40E_PRT_MNG_FTFT_DATA_MAX_INDEX 31
+#define I40E_PRT_MNG_FTFT_DATA_DWORD_SHIFT 0
+#define I40E_PRT_MNG_FTFT_DATA_DWORD_MASK I40E_MASK(0xFFFFFFFF, I40E_PRT_MNG_FTFT_DATA_DWORD_SHIFT)
+#define I40E_PRT_MNG_FTFT_LENGTH 0x00085260 /* Reset: POR */
+#define I40E_PRT_MNG_FTFT_LENGTH_LENGTH_SHIFT 0
+#define I40E_PRT_MNG_FTFT_LENGTH_LENGTH_MASK I40E_MASK(0xFF, I40E_PRT_MNG_FTFT_LENGTH_LENGTH_SHIFT)
+#define I40E_PRT_MNG_FTFT_MASK(_i) (0x00085160 + ((_i) * 32)) /* _i=0...7 */ /* Reset: POR */
+#define I40E_PRT_MNG_FTFT_MASK_MAX_INDEX 7
+#define I40E_PRT_MNG_FTFT_MASK_MASK_SHIFT 0
+#define I40E_PRT_MNG_FTFT_MASK_MASK_MASK I40E_MASK(0xFFFF, I40E_PRT_MNG_FTFT_MASK_MASK_SHIFT)
+#define I40E_PRT_MNG_MANC 0x00256A20 /* Reset: POR */
+#define I40E_PRT_MNG_MANC_FLOW_CONTROL_DISCARD_SHIFT 0
+#define I40E_PRT_MNG_MANC_FLOW_CONTROL_DISCARD_MASK I40E_MASK(0x1, I40E_PRT_MNG_MANC_FLOW_CONTROL_DISCARD_SHIFT)
+#define I40E_PRT_MNG_MANC_NCSI_DISCARD_SHIFT 1
+#define I40E_PRT_MNG_MANC_NCSI_DISCARD_MASK I40E_MASK(0x1, I40E_PRT_MNG_MANC_NCSI_DISCARD_SHIFT)
+#define I40E_PRT_MNG_MANC_RCV_TCO_EN_SHIFT 17
+#define I40E_PRT_MNG_MANC_RCV_TCO_EN_MASK I40E_MASK(0x1, I40E_PRT_MNG_MANC_RCV_TCO_EN_SHIFT)
+#define I40E_PRT_MNG_MANC_RCV_ALL_SHIFT 19
+#define I40E_PRT_MNG_MANC_RCV_ALL_MASK I40E_MASK(0x1, I40E_PRT_MNG_MANC_RCV_ALL_SHIFT)
+#define I40E_PRT_MNG_MANC_FIXED_NET_TYPE_SHIFT 25
+#define I40E_PRT_MNG_MANC_FIXED_NET_TYPE_MASK I40E_MASK(0x1, I40E_PRT_MNG_MANC_FIXED_NET_TYPE_SHIFT)
+#define I40E_PRT_MNG_MANC_NET_TYPE_SHIFT 26
+#define I40E_PRT_MNG_MANC_NET_TYPE_MASK I40E_MASK(0x1, I40E_PRT_MNG_MANC_NET_TYPE_SHIFT)
+#define I40E_PRT_MNG_MANC_EN_BMC2OS_SHIFT 28
+#define I40E_PRT_MNG_MANC_EN_BMC2OS_MASK I40E_MASK(0x1, I40E_PRT_MNG_MANC_EN_BMC2OS_SHIFT)
+#define I40E_PRT_MNG_MANC_EN_BMC2NET_SHIFT 29
+#define I40E_PRT_MNG_MANC_EN_BMC2NET_MASK I40E_MASK(0x1, I40E_PRT_MNG_MANC_EN_BMC2NET_SHIFT)
+#define I40E_PRT_MNG_MAVTV(_i) (0x00255900 + ((_i) * 32)) /* _i=0...7 */ /* Reset: POR */
+#define I40E_PRT_MNG_MAVTV_MAX_INDEX 7
+#define I40E_PRT_MNG_MAVTV_VID_SHIFT 0
+#define I40E_PRT_MNG_MAVTV_VID_MASK I40E_MASK(0xFFF, I40E_PRT_MNG_MAVTV_VID_SHIFT)
+#define I40E_PRT_MNG_MDEF(_i) (0x00255D00 + ((_i) * 32)) /* _i=0...7 */ /* Reset: POR */
+#define I40E_PRT_MNG_MDEF_MAX_INDEX 7
+#define I40E_PRT_MNG_MDEF_MAC_EXACT_AND_SHIFT 0
+#define I40E_PRT_MNG_MDEF_MAC_EXACT_AND_MASK I40E_MASK(0xF, I40E_PRT_MNG_MDEF_MAC_EXACT_AND_SHIFT)
+#define I40E_PRT_MNG_MDEF_BROADCAST_AND_SHIFT 4
+#define I40E_PRT_MNG_MDEF_BROADCAST_AND_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_BROADCAST_AND_SHIFT)
+#define I40E_PRT_MNG_MDEF_VLAN_AND_SHIFT 5
+#define I40E_PRT_MNG_MDEF_VLAN_AND_MASK I40E_MASK(0xFF, I40E_PRT_MNG_MDEF_VLAN_AND_SHIFT)
+#define I40E_PRT_MNG_MDEF_IPV4_ADDRESS_AND_SHIFT 13
+#define I40E_PRT_MNG_MDEF_IPV4_ADDRESS_AND_MASK I40E_MASK(0xF, I40E_PRT_MNG_MDEF_IPV4_ADDRESS_AND_SHIFT)
+#define I40E_PRT_MNG_MDEF_IPV6_ADDRESS_AND_SHIFT 17
+#define I40E_PRT_MNG_MDEF_IPV6_ADDRESS_AND_MASK I40E_MASK(0xF, I40E_PRT_MNG_MDEF_IPV6_ADDRESS_AND_SHIFT)
+#define I40E_PRT_MNG_MDEF_MAC_EXACT_OR_SHIFT 21
+#define I40E_PRT_MNG_MDEF_MAC_EXACT_OR_MASK I40E_MASK(0xF, I40E_PRT_MNG_MDEF_MAC_EXACT_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_BROADCAST_OR_SHIFT 25
+#define I40E_PRT_MNG_MDEF_BROADCAST_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_BROADCAST_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_MULTICAST_AND_SHIFT 26
+#define I40E_PRT_MNG_MDEF_MULTICAST_AND_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_MULTICAST_AND_SHIFT)
+#define I40E_PRT_MNG_MDEF_ARP_REQUEST_OR_SHIFT 27
+#define I40E_PRT_MNG_MDEF_ARP_REQUEST_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_ARP_REQUEST_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_ARP_RESPONSE_OR_SHIFT 28
+#define I40E_PRT_MNG_MDEF_ARP_RESPONSE_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_ARP_RESPONSE_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_NEIGHBOR_DISCOVERY_134_OR_SHIFT 29
+#define I40E_PRT_MNG_MDEF_NEIGHBOR_DISCOVERY_134_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_NEIGHBOR_DISCOVERY_134_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_PORT_0X298_OR_SHIFT 30
+#define I40E_PRT_MNG_MDEF_PORT_0X298_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_PORT_0X298_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_PORT_0X26F_OR_SHIFT 31
+#define I40E_PRT_MNG_MDEF_PORT_0X26F_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_PORT_0X26F_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT(_i) (0x00255F00 + ((_i) * 32)) /* _i=0...7 */ /* Reset: POR */
+#define I40E_PRT_MNG_MDEF_EXT_MAX_INDEX 7
+#define I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_AND_SHIFT 0
+#define I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_AND_MASK I40E_MASK(0xF, I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_AND_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_OR_SHIFT 4
+#define I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_OR_MASK I40E_MASK(0xF, I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT_FLEX_PORT_OR_SHIFT 8
+#define I40E_PRT_MNG_MDEF_EXT_FLEX_PORT_OR_MASK I40E_MASK(0xFFFF, I40E_PRT_MNG_MDEF_EXT_FLEX_PORT_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT_FLEX_TCO_SHIFT 24
+#define I40E_PRT_MNG_MDEF_EXT_FLEX_TCO_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_EXT_FLEX_TCO_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_135_OR_SHIFT 25
+#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_135_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_135_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_136_OR_SHIFT 26
+#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_136_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_136_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_137_OR_SHIFT 27
+#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_137_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_137_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT_ICMP_OR_SHIFT 28
+#define I40E_PRT_MNG_MDEF_EXT_ICMP_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_EXT_ICMP_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT_MLD_SHIFT 29
+#define I40E_PRT_MNG_MDEF_EXT_MLD_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_EXT_MLD_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT_APPLY_TO_NETWORK_TRAFFIC_SHIFT 30
+#define I40E_PRT_MNG_MDEF_EXT_APPLY_TO_NETWORK_TRAFFIC_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_EXT_APPLY_TO_NETWORK_TRAFFIC_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT_APPLY_TO_HOST_TRAFFIC_SHIFT 31
+#define I40E_PRT_MNG_MDEF_EXT_APPLY_TO_HOST_TRAFFIC_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_EXT_APPLY_TO_HOST_TRAFFIC_SHIFT)
+#define I40E_PRT_MNG_MDEFVSI(_i) (0x00256580 + ((_i) * 32)) /* _i=0...3 */ /* Reset: POR */
+#define I40E_PRT_MNG_MDEFVSI_MAX_INDEX 3
+#define I40E_PRT_MNG_MDEFVSI_MDEFVSI_2N_SHIFT 0
+#define I40E_PRT_MNG_MDEFVSI_MDEFVSI_2N_MASK I40E_MASK(0xFFFF, I40E_PRT_MNG_MDEFVSI_MDEFVSI_2N_SHIFT)
+#define I40E_PRT_MNG_MDEFVSI_MDEFVSI_2NP1_SHIFT 16
+#define I40E_PRT_MNG_MDEFVSI_MDEFVSI_2NP1_MASK I40E_MASK(0xFFFF, I40E_PRT_MNG_MDEFVSI_MDEFVSI_2NP1_SHIFT)
+#define I40E_PRT_MNG_METF(_i) (0x00256780 + ((_i) * 32)) /* _i=0...3 */ /* Reset: POR */
+#define I40E_PRT_MNG_METF_MAX_INDEX 3
+#define I40E_PRT_MNG_METF_ETYPE_SHIFT 0
+#define I40E_PRT_MNG_METF_ETYPE_MASK I40E_MASK(0xFFFF, I40E_PRT_MNG_METF_ETYPE_SHIFT)
+#define I40E_PRT_MNG_METF_POLARITY_SHIFT 30
+#define I40E_PRT_MNG_METF_POLARITY_MASK I40E_MASK(0x1, I40E_PRT_MNG_METF_POLARITY_SHIFT)
+#define I40E_PRT_MNG_MFUTP(_i) (0x00254E00 + ((_i) * 32)) /* _i=0...15 */ /* Reset: POR */
+#define I40E_PRT_MNG_MFUTP_MAX_INDEX 15
+#define I40E_PRT_MNG_MFUTP_MFUTP_N_SHIFT 0
+#define I40E_PRT_MNG_MFUTP_MFUTP_N_MASK I40E_MASK(0xFFFF, I40E_PRT_MNG_MFUTP_MFUTP_N_SHIFT)
+#define I40E_PRT_MNG_MFUTP_UDP_SHIFT 16
+#define I40E_PRT_MNG_MFUTP_UDP_MASK I40E_MASK(0x1, I40E_PRT_MNG_MFUTP_UDP_SHIFT)
+#define I40E_PRT_MNG_MFUTP_TCP_SHIFT 17
+#define I40E_PRT_MNG_MFUTP_TCP_MASK I40E_MASK(0x1, I40E_PRT_MNG_MFUTP_TCP_SHIFT)
+#define I40E_PRT_MNG_MFUTP_SOURCE_DESTINATION_SHIFT 18
+#define I40E_PRT_MNG_MFUTP_SOURCE_DESTINATION_MASK I40E_MASK(0x1, I40E_PRT_MNG_MFUTP_SOURCE_DESTINATION_SHIFT)
+#define I40E_PRT_MNG_MIPAF4(_i) (0x00256280 + ((_i) * 32)) /* _i=0...3 */ /* Reset: POR */
+#define I40E_PRT_MNG_MIPAF4_MAX_INDEX 3
+#define I40E_PRT_MNG_MIPAF4_MIPAF_SHIFT 0
+#define I40E_PRT_MNG_MIPAF4_MIPAF_MASK I40E_MASK(0xFFFFFFFF, I40E_PRT_MNG_MIPAF4_MIPAF_SHIFT)
+#define I40E_PRT_MNG_MIPAF6(_i) (0x00254200 + ((_i) * 32)) /* _i=0...15 */ /* Reset: POR */
+#define I40E_PRT_MNG_MIPAF6_MAX_INDEX 15
+#define I40E_PRT_MNG_MIPAF6_MIPAF_SHIFT 0
+#define I40E_PRT_MNG_MIPAF6_MIPAF_MASK I40E_MASK(0xFFFFFFFF, I40E_PRT_MNG_MIPAF6_MIPAF_SHIFT)
+#define I40E_PRT_MNG_MMAH(_i) (0x00256380 + ((_i) * 32)) /* _i=0...3 */ /* Reset: POR */
+#define I40E_PRT_MNG_MMAH_MAX_INDEX 3
+#define I40E_PRT_MNG_MMAH_MMAH_SHIFT 0
+#define I40E_PRT_MNG_MMAH_MMAH_MASK I40E_MASK(0xFFFF, I40E_PRT_MNG_MMAH_MMAH_SHIFT)
+#define I40E_PRT_MNG_MMAL(_i) (0x00256480 + ((_i) * 32)) /* _i=0...3 */ /* Reset: POR */
+#define I40E_PRT_MNG_MMAL_MAX_INDEX 3
+#define I40E_PRT_MNG_MMAL_MMAL_SHIFT 0
+#define I40E_PRT_MNG_MMAL_MMAL_MASK I40E_MASK(0xFFFFFFFF, I40E_PRT_MNG_MMAL_MMAL_SHIFT)
+#define I40E_PRT_MNG_MNGONLY 0x00256A60 /* Reset: POR */
+#define I40E_PRT_MNG_MNGONLY_EXCLUSIVE_TO_MANAGEABILITY_SHIFT 0
+#define I40E_PRT_MNG_MNGONLY_EXCLUSIVE_TO_MANAGEABILITY_MASK I40E_MASK(0xFF, I40E_PRT_MNG_MNGONLY_EXCLUSIVE_TO_MANAGEABILITY_SHIFT)
+#define I40E_PRT_MNG_MSFM 0x00256AA0 /* Reset: POR */
+#define I40E_PRT_MNG_MSFM_PORT_26F_UDP_SHIFT 0
+#define I40E_PRT_MNG_MSFM_PORT_26F_UDP_MASK I40E_MASK(0x1, I40E_PRT_MNG_MSFM_PORT_26F_UDP_SHIFT)
+#define I40E_PRT_MNG_MSFM_PORT_26F_TCP_SHIFT 1
+#define I40E_PRT_MNG_MSFM_PORT_26F_TCP_MASK I40E_MASK(0x1, I40E_PRT_MNG_MSFM_PORT_26F_TCP_SHIFT)
+#define I40E_PRT_MNG_MSFM_PORT_298_UDP_SHIFT 2
+#define I40E_PRT_MNG_MSFM_PORT_298_UDP_MASK I40E_MASK(0x1, I40E_PRT_MNG_MSFM_PORT_298_UDP_SHIFT)
+#define I40E_PRT_MNG_MSFM_PORT_298_TCP_SHIFT 3
+#define I40E_PRT_MNG_MSFM_PORT_298_TCP_MASK I40E_MASK(0x1, I40E_PRT_MNG_MSFM_PORT_298_TCP_SHIFT)
+#define I40E_PRT_MNG_MSFM_IPV6_0_MASK_SHIFT 4
+#define I40E_PRT_MNG_MSFM_IPV6_0_MASK_MASK I40E_MASK(0x1, I40E_PRT_MNG_MSFM_IPV6_0_MASK_SHIFT)
+#define I40E_PRT_MNG_MSFM_IPV6_1_MASK_SHIFT 5
+#define I40E_PRT_MNG_MSFM_IPV6_1_MASK_MASK I40E_MASK(0x1, I40E_PRT_MNG_MSFM_IPV6_1_MASK_SHIFT)
+#define I40E_PRT_MNG_MSFM_IPV6_2_MASK_SHIFT 6
+#define I40E_PRT_MNG_MSFM_IPV6_2_MASK_MASK I40E_MASK(0x1, I40E_PRT_MNG_MSFM_IPV6_2_MASK_SHIFT)
+#define I40E_PRT_MNG_MSFM_IPV6_3_MASK_SHIFT 7
+#define I40E_PRT_MNG_MSFM_IPV6_3_MASK_MASK I40E_MASK(0x1, I40E_PRT_MNG_MSFM_IPV6_3_MASK_SHIFT)
+#define I40E_MSIX_PBA(_i) (0x00001000 + ((_i) * 4)) /* _i=0...5 */ /* Reset: FLR */
+#define I40E_MSIX_PBA_MAX_INDEX 5
+#define I40E_MSIX_PBA_PENBIT_SHIFT 0
+#define I40E_MSIX_PBA_PENBIT_MASK I40E_MASK(0xFFFFFFFF, I40E_MSIX_PBA_PENBIT_SHIFT)
+#define I40E_MSIX_TADD(_i) (0x00000000 + ((_i) * 16)) /* _i=0...128 */ /* Reset: FLR */
+#define I40E_MSIX_TADD_MAX_INDEX 128
+#define I40E_MSIX_TADD_MSIXTADD10_SHIFT 0
+#define I40E_MSIX_TADD_MSIXTADD10_MASK I40E_MASK(0x3, I40E_MSIX_TADD_MSIXTADD10_SHIFT)
+#define I40E_MSIX_TADD_MSIXTADD_SHIFT 2
+#define I40E_MSIX_TADD_MSIXTADD_MASK I40E_MASK(0x3FFFFFFF, I40E_MSIX_TADD_MSIXTADD_SHIFT)
+#define I40E_MSIX_TMSG(_i) (0x00000008 + ((_i) * 16)) /* _i=0...128 */ /* Reset: FLR */
+#define I40E_MSIX_TMSG_MAX_INDEX 128
+#define I40E_MSIX_TMSG_MSIXTMSG_SHIFT 0
+#define I40E_MSIX_TMSG_MSIXTMSG_MASK I40E_MASK(0xFFFFFFFF, I40E_MSIX_TMSG_MSIXTMSG_SHIFT)
+#define I40E_MSIX_TUADD(_i) (0x00000004 + ((_i) * 16)) /* _i=0...128 */ /* Reset: FLR */
+#define I40E_MSIX_TUADD_MAX_INDEX 128
+#define I40E_MSIX_TUADD_MSIXTUADD_SHIFT 0
+#define I40E_MSIX_TUADD_MSIXTUADD_MASK I40E_MASK(0xFFFFFFFF, I40E_MSIX_TUADD_MSIXTUADD_SHIFT)
+#define I40E_MSIX_TVCTRL(_i) (0x0000000C + ((_i) * 16)) /* _i=0...128 */ /* Reset: FLR */
+#define I40E_MSIX_TVCTRL_MAX_INDEX 128
+#define I40E_MSIX_TVCTRL_MASK_SHIFT 0
+#define I40E_MSIX_TVCTRL_MASK_MASK I40E_MASK(0x1, I40E_MSIX_TVCTRL_MASK_SHIFT)
+#define I40E_VFMSIX_PBA1(_i) (0x00002000 + ((_i) * 4)) /* _i=0...19 */ /* Reset: VFLR */
+#define I40E_VFMSIX_PBA1_MAX_INDEX 19
+#define I40E_VFMSIX_PBA1_PENBIT_SHIFT 0
+#define I40E_VFMSIX_PBA1_PENBIT_MASK I40E_MASK(0xFFFFFFFF, I40E_VFMSIX_PBA1_PENBIT_SHIFT)
+#define I40E_VFMSIX_TADD1(_i) (0x00002100 + ((_i) * 16)) /* _i=0...639 */ /* Reset: VFLR */
+#define I40E_VFMSIX_TADD1_MAX_INDEX 639
+#define I40E_VFMSIX_TADD1_MSIXTADD10_SHIFT 0
+#define I40E_VFMSIX_TADD1_MSIXTADD10_MASK I40E_MASK(0x3, I40E_VFMSIX_TADD1_MSIXTADD10_SHIFT)
+#define I40E_VFMSIX_TADD1_MSIXTADD_SHIFT 2
+#define I40E_VFMSIX_TADD1_MSIXTADD_MASK I40E_MASK(0x3FFFFFFF, I40E_VFMSIX_TADD1_MSIXTADD_SHIFT)
+#define I40E_VFMSIX_TMSG1(_i) (0x00002108 + ((_i) * 16)) /* _i=0...639 */ /* Reset: VFLR */
+#define I40E_VFMSIX_TMSG1_MAX_INDEX 639
+#define I40E_VFMSIX_TMSG1_MSIXTMSG_SHIFT 0
+#define I40E_VFMSIX_TMSG1_MSIXTMSG_MASK I40E_MASK(0xFFFFFFFF, I40E_VFMSIX_TMSG1_MSIXTMSG_SHIFT)
+#define I40E_VFMSIX_TUADD1(_i) (0x00002104 + ((_i) * 16)) /* _i=0...639 */ /* Reset: VFLR */
+#define I40E_VFMSIX_TUADD1_MAX_INDEX 639
+#define I40E_VFMSIX_TUADD1_MSIXTUADD_SHIFT 0
+#define I40E_VFMSIX_TUADD1_MSIXTUADD_MASK I40E_MASK(0xFFFFFFFF, I40E_VFMSIX_TUADD1_MSIXTUADD_SHIFT)
+#define I40E_VFMSIX_TVCTRL1(_i) (0x0000210C + ((_i) * 16)) /* _i=0...639 */ /* Reset: VFLR */
+#define I40E_VFMSIX_TVCTRL1_MAX_INDEX 639
+#define I40E_VFMSIX_TVCTRL1_MASK_SHIFT 0
+#define I40E_VFMSIX_TVCTRL1_MASK_MASK I40E_MASK(0x1, I40E_VFMSIX_TVCTRL1_MASK_SHIFT)
+#define I40E_GLNVM_FLA 0x000B6108 /* Reset: POR */
+#define I40E_GLNVM_FLA_FL_SCK_SHIFT 0
+#define I40E_GLNVM_FLA_FL_SCK_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_FL_SCK_SHIFT)
+#define I40E_GLNVM_FLA_FL_CE_SHIFT 1
+#define I40E_GLNVM_FLA_FL_CE_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_FL_CE_SHIFT)
+#define I40E_GLNVM_FLA_FL_SI_SHIFT 2
+#define I40E_GLNVM_FLA_FL_SI_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_FL_SI_SHIFT)
+#define I40E_GLNVM_FLA_FL_SO_SHIFT 3
+#define I40E_GLNVM_FLA_FL_SO_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_FL_SO_SHIFT)
+#define I40E_GLNVM_FLA_FL_REQ_SHIFT 4
+#define I40E_GLNVM_FLA_FL_REQ_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_FL_REQ_SHIFT)
+#define I40E_GLNVM_FLA_FL_GNT_SHIFT 5
+#define I40E_GLNVM_FLA_FL_GNT_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_FL_GNT_SHIFT)
+#define I40E_GLNVM_FLA_LOCKED_SHIFT 6
+#define I40E_GLNVM_FLA_LOCKED_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_LOCKED_SHIFT)
+#define I40E_GLNVM_FLA_FL_SADDR_SHIFT 18
+#define I40E_GLNVM_FLA_FL_SADDR_MASK I40E_MASK(0x7FF, I40E_GLNVM_FLA_FL_SADDR_SHIFT)
+#define I40E_GLNVM_FLA_FL_BUSY_SHIFT 30
+#define I40E_GLNVM_FLA_FL_BUSY_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_FL_BUSY_SHIFT)
+#define I40E_GLNVM_FLA_FL_DER_SHIFT 31
+#define I40E_GLNVM_FLA_FL_DER_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_FL_DER_SHIFT)
+#define I40E_GLNVM_FLASHID 0x000B6104 /* Reset: POR */
+#define I40E_GLNVM_FLASHID_FLASHID_SHIFT 0
+#define I40E_GLNVM_FLASHID_FLASHID_MASK I40E_MASK(0xFFFFFF, I40E_GLNVM_FLASHID_FLASHID_SHIFT)
+#define I40E_GLNVM_FLASHID_FLEEP_PERF_SHIFT 31
+#define I40E_GLNVM_FLASHID_FLEEP_PERF_MASK I40E_MASK(0x1, I40E_GLNVM_FLASHID_FLEEP_PERF_SHIFT)
+#define I40E_GLNVM_GENS 0x000B6100 /* Reset: POR */
+#define I40E_GLNVM_GENS_NVM_PRES_SHIFT 0
+#define I40E_GLNVM_GENS_NVM_PRES_MASK I40E_MASK(0x1, I40E_GLNVM_GENS_NVM_PRES_SHIFT)
+#define I40E_GLNVM_GENS_SR_SIZE_SHIFT 5
+#define I40E_GLNVM_GENS_SR_SIZE_MASK I40E_MASK(0x7, I40E_GLNVM_GENS_SR_SIZE_SHIFT)
+#define I40E_GLNVM_GENS_BANK1VAL_SHIFT 8
+#define I40E_GLNVM_GENS_BANK1VAL_MASK I40E_MASK(0x1, I40E_GLNVM_GENS_BANK1VAL_SHIFT)
+#define I40E_GLNVM_GENS_ALT_PRST_SHIFT 23
+#define I40E_GLNVM_GENS_ALT_PRST_MASK I40E_MASK(0x1, I40E_GLNVM_GENS_ALT_PRST_SHIFT)
+#define I40E_GLNVM_GENS_FL_AUTO_RD_SHIFT 25
+#define I40E_GLNVM_GENS_FL_AUTO_RD_MASK I40E_MASK(0x1, I40E_GLNVM_GENS_FL_AUTO_RD_SHIFT)
+#define I40E_GLNVM_PROTCSR(_i) (0x000B6010 + ((_i) * 4)) /* _i=0...59 */ /* Reset: POR */
+#define I40E_GLNVM_PROTCSR_MAX_INDEX 59
+#define I40E_GLNVM_PROTCSR_ADDR_BLOCK_SHIFT 0
+#define I40E_GLNVM_PROTCSR_ADDR_BLOCK_MASK I40E_MASK(0xFFFFFF, I40E_GLNVM_PROTCSR_ADDR_BLOCK_SHIFT)
+#define I40E_GLNVM_SRCTL 0x000B6110 /* Reset: POR */
+#define I40E_GLNVM_SRCTL_SRBUSY_SHIFT 0
+#define I40E_GLNVM_SRCTL_SRBUSY_MASK I40E_MASK(0x1, I40E_GLNVM_SRCTL_SRBUSY_SHIFT)
+#define I40E_GLNVM_SRCTL_ADDR_SHIFT 14
+#define I40E_GLNVM_SRCTL_ADDR_MASK I40E_MASK(0x7FFF, I40E_GLNVM_SRCTL_ADDR_SHIFT)
+#define I40E_GLNVM_SRCTL_WRITE_SHIFT 29
+#define I40E_GLNVM_SRCTL_WRITE_MASK I40E_MASK(0x1, I40E_GLNVM_SRCTL_WRITE_SHIFT)
+#define I40E_GLNVM_SRCTL_START_SHIFT 30
+#define I40E_GLNVM_SRCTL_START_MASK I40E_MASK(0x1, I40E_GLNVM_SRCTL_START_SHIFT)
+#define I40E_GLNVM_SRCTL_DONE_SHIFT 31
+#define I40E_GLNVM_SRCTL_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_SRCTL_DONE_SHIFT)
+#define I40E_GLNVM_SRDATA 0x000B6114 /* Reset: POR */
+#define I40E_GLNVM_SRDATA_WRDATA_SHIFT 0
+#define I40E_GLNVM_SRDATA_WRDATA_MASK I40E_MASK(0xFFFF, I40E_GLNVM_SRDATA_WRDATA_SHIFT)
+#define I40E_GLNVM_SRDATA_RDDATA_SHIFT 16
+#define I40E_GLNVM_SRDATA_RDDATA_MASK I40E_MASK(0xFFFF, I40E_GLNVM_SRDATA_RDDATA_SHIFT)
+#define I40E_GLNVM_ULD 0x000B6008 /* Reset: POR */
+#define I40E_GLNVM_ULD_CONF_PCIR_DONE_SHIFT 0
+#define I40E_GLNVM_ULD_CONF_PCIR_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_PCIR_DONE_SHIFT)
+#define I40E_GLNVM_ULD_CONF_PCIRTL_DONE_SHIFT 1
+#define I40E_GLNVM_ULD_CONF_PCIRTL_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_PCIRTL_DONE_SHIFT)
+#define I40E_GLNVM_ULD_CONF_LCB_DONE_SHIFT 2
+#define I40E_GLNVM_ULD_CONF_LCB_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_LCB_DONE_SHIFT)
+#define I40E_GLNVM_ULD_CONF_CORE_DONE_SHIFT 3
+#define I40E_GLNVM_ULD_CONF_CORE_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_CORE_DONE_SHIFT)
+#define I40E_GLNVM_ULD_CONF_GLOBAL_DONE_SHIFT 4
+#define I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_GLOBAL_DONE_SHIFT)
+#define I40E_GLNVM_ULD_CONF_POR_DONE_SHIFT 5
+#define I40E_GLNVM_ULD_CONF_POR_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_POR_DONE_SHIFT)
+#define I40E_GLNVM_ULD_CONF_PCIE_ANA_DONE_SHIFT 6
+#define I40E_GLNVM_ULD_CONF_PCIE_ANA_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_PCIE_ANA_DONE_SHIFT)
+#define I40E_GLNVM_ULD_CONF_PHY_ANA_DONE_SHIFT 7
+#define I40E_GLNVM_ULD_CONF_PHY_ANA_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_PHY_ANA_DONE_SHIFT)
+#define I40E_GLNVM_ULD_CONF_EMP_DONE_SHIFT 8
+#define I40E_GLNVM_ULD_CONF_EMP_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_EMP_DONE_SHIFT)
+#define I40E_GLNVM_ULD_CONF_PCIALT_DONE_SHIFT 9
+#define I40E_GLNVM_ULD_CONF_PCIALT_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_PCIALT_DONE_SHIFT)
+#define I40E_GLPCI_BYTCTH 0x0009C484 /* Reset: PCIR */
+#define I40E_GLPCI_BYTCTH_PCI_COUNT_BW_BCT_SHIFT 0
+#define I40E_GLPCI_BYTCTH_PCI_COUNT_BW_BCT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_BYTCTH_PCI_COUNT_BW_BCT_SHIFT)
+#define I40E_GLPCI_BYTCTL 0x0009C488 /* Reset: PCIR */
+#define I40E_GLPCI_BYTCTL_PCI_COUNT_BW_BCT_SHIFT 0
+#define I40E_GLPCI_BYTCTL_PCI_COUNT_BW_BCT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_BYTCTL_PCI_COUNT_BW_BCT_SHIFT)
+#define I40E_GLPCI_CAPCTRL 0x000BE4A4 /* Reset: PCIR */
+#define I40E_GLPCI_CAPCTRL_VPD_EN_SHIFT 0
+#define I40E_GLPCI_CAPCTRL_VPD_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPCTRL_VPD_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP 0x000BE4A8 /* Reset: PCIR */
+#define I40E_GLPCI_CAPSUP_PCIE_VER_SHIFT 0
+#define I40E_GLPCI_CAPSUP_PCIE_VER_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_PCIE_VER_SHIFT)
+#define I40E_GLPCI_CAPSUP_LTR_EN_SHIFT 2
+#define I40E_GLPCI_CAPSUP_LTR_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_LTR_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP_TPH_EN_SHIFT 3
+#define I40E_GLPCI_CAPSUP_TPH_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_TPH_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP_ARI_EN_SHIFT 4
+#define I40E_GLPCI_CAPSUP_ARI_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_ARI_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP_IOV_EN_SHIFT 5
+#define I40E_GLPCI_CAPSUP_IOV_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_IOV_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP_ACS_EN_SHIFT 6
+#define I40E_GLPCI_CAPSUP_ACS_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_ACS_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP_SEC_EN_SHIFT 7
+#define I40E_GLPCI_CAPSUP_SEC_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_SEC_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP_ECRC_GEN_EN_SHIFT 16
+#define I40E_GLPCI_CAPSUP_ECRC_GEN_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_ECRC_GEN_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP_ECRC_CHK_EN_SHIFT 17
+#define I40E_GLPCI_CAPSUP_ECRC_CHK_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_ECRC_CHK_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP_IDO_EN_SHIFT 18
+#define I40E_GLPCI_CAPSUP_IDO_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_IDO_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP_MSI_MASK_SHIFT 19
+#define I40E_GLPCI_CAPSUP_MSI_MASK_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_MSI_MASK_SHIFT)
+#define I40E_GLPCI_CAPSUP_CSR_CONF_EN_SHIFT 20
+#define I40E_GLPCI_CAPSUP_CSR_CONF_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_CSR_CONF_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP_LOAD_SUBSYS_ID_SHIFT 30
+#define I40E_GLPCI_CAPSUP_LOAD_SUBSYS_ID_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_LOAD_SUBSYS_ID_SHIFT)
+#define I40E_GLPCI_CAPSUP_LOAD_DEV_ID_SHIFT 31
+#define I40E_GLPCI_CAPSUP_LOAD_DEV_ID_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_LOAD_DEV_ID_SHIFT)
+#define I40E_GLPCI_CNF 0x000BE4C0 /* Reset: POR */
+#define I40E_GLPCI_CNF_FLEX10_SHIFT 1
+#define I40E_GLPCI_CNF_FLEX10_MASK I40E_MASK(0x1, I40E_GLPCI_CNF_FLEX10_SHIFT)
+#define I40E_GLPCI_CNF_WAKE_PIN_EN_SHIFT 2
+#define I40E_GLPCI_CNF_WAKE_PIN_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CNF_WAKE_PIN_EN_SHIFT)
+#define I40E_GLPCI_CNF2 0x000BE494 /* Reset: PCIR */
+#define I40E_GLPCI_CNF2_RO_DIS_SHIFT 0
+#define I40E_GLPCI_CNF2_RO_DIS_MASK I40E_MASK(0x1, I40E_GLPCI_CNF2_RO_DIS_SHIFT)
+#define I40E_GLPCI_CNF2_CACHELINE_SIZE_SHIFT 1
+#define I40E_GLPCI_CNF2_CACHELINE_SIZE_MASK I40E_MASK(0x1, I40E_GLPCI_CNF2_CACHELINE_SIZE_SHIFT)
+#define I40E_GLPCI_CNF2_MSI_X_PF_N_SHIFT 2
+#define I40E_GLPCI_CNF2_MSI_X_PF_N_MASK I40E_MASK(0x7FF, I40E_GLPCI_CNF2_MSI_X_PF_N_SHIFT)
+#define I40E_GLPCI_CNF2_MSI_X_VF_N_SHIFT 13
+#define I40E_GLPCI_CNF2_MSI_X_VF_N_MASK I40E_MASK(0x7FF, I40E_GLPCI_CNF2_MSI_X_VF_N_SHIFT)
+#define I40E_GLPCI_DREVID 0x0009C480 /* Reset: PCIR */
+#define I40E_GLPCI_DREVID_DEFAULT_REVID_SHIFT 0
+#define I40E_GLPCI_DREVID_DEFAULT_REVID_MASK I40E_MASK(0xFF, I40E_GLPCI_DREVID_DEFAULT_REVID_SHIFT)
+#define I40E_GLPCI_GSCL_1 0x0009C48C /* Reset: PCIR */
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_0_SHIFT 0
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_0_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_GIO_COUNT_EN_0_SHIFT)
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_1_SHIFT 1
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_1_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_GIO_COUNT_EN_1_SHIFT)
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_2_SHIFT 2
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_2_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_GIO_COUNT_EN_2_SHIFT)
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_3_SHIFT 3
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_3_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_GIO_COUNT_EN_3_SHIFT)
+#define I40E_GLPCI_GSCL_1_LBC_ENABLE_0_SHIFT 4
+#define I40E_GLPCI_GSCL_1_LBC_ENABLE_0_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_LBC_ENABLE_0_SHIFT)
+#define I40E_GLPCI_GSCL_1_LBC_ENABLE_1_SHIFT 5
+#define I40E_GLPCI_GSCL_1_LBC_ENABLE_1_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_LBC_ENABLE_1_SHIFT)
+#define I40E_GLPCI_GSCL_1_LBC_ENABLE_2_SHIFT 6
+#define I40E_GLPCI_GSCL_1_LBC_ENABLE_2_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_LBC_ENABLE_2_SHIFT)
+#define I40E_GLPCI_GSCL_1_LBC_ENABLE_3_SHIFT 7
+#define I40E_GLPCI_GSCL_1_LBC_ENABLE_3_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_LBC_ENABLE_3_SHIFT)
+#define I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EN_SHIFT 8
+#define I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EN_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EN_SHIFT)
+#define I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EV_SHIFT 9
+#define I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EV_MASK I40E_MASK(0x1F, I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EV_SHIFT)
+#define I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EN_SHIFT 14
+#define I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EN_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EN_SHIFT)
+#define I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EV_SHIFT 15
+#define I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EV_MASK I40E_MASK(0x1F, I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EV_SHIFT)
+#define I40E_GLPCI_GSCL_1_GIO_64_BIT_EN_SHIFT 28
+#define I40E_GLPCI_GSCL_1_GIO_64_BIT_EN_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_GIO_64_BIT_EN_SHIFT)
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_RESET_SHIFT 29
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_RESET_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_GIO_COUNT_RESET_SHIFT)
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_STOP_SHIFT 30
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_STOP_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_GIO_COUNT_STOP_SHIFT)
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_START_SHIFT 31
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_START_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_GIO_COUNT_START_SHIFT)
+#define I40E_GLPCI_GSCL_2 0x0009C490 /* Reset: PCIR */
+#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_0_SHIFT 0
+#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_0_MASK I40E_MASK(0xFF, I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_0_SHIFT)
+#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_1_SHIFT 8
+#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_1_MASK I40E_MASK(0xFF, I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_1_SHIFT)
+#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_2_SHIFT 16
+#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_2_MASK I40E_MASK(0xFF, I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_2_SHIFT)
+#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_3_SHIFT 24
+#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_3_MASK I40E_MASK(0xFF, I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_3_SHIFT)
+#define I40E_GLPCI_GSCL_5_8(_i) (0x0009C494 + ((_i) * 4)) /* _i=0...3 */ /* Reset: PCIR */
+#define I40E_GLPCI_GSCL_5_8_MAX_INDEX 3
+#define I40E_GLPCI_GSCL_5_8_LBC_THRESHOLD_N_SHIFT 0
+#define I40E_GLPCI_GSCL_5_8_LBC_THRESHOLD_N_MASK I40E_MASK(0xFFFF, I40E_GLPCI_GSCL_5_8_LBC_THRESHOLD_N_SHIFT)
+#define I40E_GLPCI_GSCL_5_8_LBC_TIMER_N_SHIFT 16
+#define I40E_GLPCI_GSCL_5_8_LBC_TIMER_N_MASK I40E_MASK(0xFFFF, I40E_GLPCI_GSCL_5_8_LBC_TIMER_N_SHIFT)
+#define I40E_GLPCI_GSCN_0_3(_i) (0x0009C4A4 + ((_i) * 4)) /* _i=0...3 */ /* Reset: PCIR */
+#define I40E_GLPCI_GSCN_0_3_MAX_INDEX 3
+#define I40E_GLPCI_GSCN_0_3_EVENT_COUNTER_SHIFT 0
+#define I40E_GLPCI_GSCN_0_3_EVENT_COUNTER_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_GSCN_0_3_EVENT_COUNTER_SHIFT)
+#define I40E_GLPCI_LATCT 0x0009C4B4 /* Reset: PCIR */
+#define I40E_GLPCI_LATCT_PCI_COUNT_LAT_CT_SHIFT 0
+#define I40E_GLPCI_LATCT_PCI_COUNT_LAT_CT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_LATCT_PCI_COUNT_LAT_CT_SHIFT)
+#define I40E_GLPCI_LBARCTRL 0x000BE484 /* Reset: POR */
+#define I40E_GLPCI_LBARCTRL_PREFBAR_SHIFT 0
+#define I40E_GLPCI_LBARCTRL_PREFBAR_MASK I40E_MASK(0x1, I40E_GLPCI_LBARCTRL_PREFBAR_SHIFT)
+#define I40E_GLPCI_LBARCTRL_BAR32_SHIFT 1
+#define I40E_GLPCI_LBARCTRL_BAR32_MASK I40E_MASK(0x1, I40E_GLPCI_LBARCTRL_BAR32_SHIFT)
+#define I40E_GLPCI_LBARCTRL_FLASH_EXPOSE_SHIFT 3
+#define I40E_GLPCI_LBARCTRL_FLASH_EXPOSE_MASK I40E_MASK(0x1, I40E_GLPCI_LBARCTRL_FLASH_EXPOSE_SHIFT)
+#define I40E_GLPCI_LBARCTRL_RSVD_4_SHIFT 4
+#define I40E_GLPCI_LBARCTRL_RSVD_4_MASK I40E_MASK(0x3, I40E_GLPCI_LBARCTRL_RSVD_4_SHIFT)
+#define I40E_GLPCI_LBARCTRL_FL_SIZE_SHIFT 6
+#define I40E_GLPCI_LBARCTRL_FL_SIZE_MASK I40E_MASK(0x7, I40E_GLPCI_LBARCTRL_FL_SIZE_SHIFT)
+#define I40E_GLPCI_LBARCTRL_RSVD_10_SHIFT 10
+#define I40E_GLPCI_LBARCTRL_RSVD_10_MASK I40E_MASK(0x1, I40E_GLPCI_LBARCTRL_RSVD_10_SHIFT)
+#define I40E_GLPCI_LBARCTRL_EXROM_SIZE_SHIFT 11
+#define I40E_GLPCI_LBARCTRL_EXROM_SIZE_MASK I40E_MASK(0x7, I40E_GLPCI_LBARCTRL_EXROM_SIZE_SHIFT)
+#define I40E_GLPCI_LINKCAP 0x000BE4AC /* Reset: PCIR */
+#define I40E_GLPCI_LINKCAP_LINK_SPEEDS_VECTOR_SHIFT 0
+#define I40E_GLPCI_LINKCAP_LINK_SPEEDS_VECTOR_MASK I40E_MASK(0x3F, I40E_GLPCI_LINKCAP_LINK_SPEEDS_VECTOR_SHIFT)
+#define I40E_GLPCI_LINKCAP_MAX_PAYLOAD_SHIFT 6
+#define I40E_GLPCI_LINKCAP_MAX_PAYLOAD_MASK I40E_MASK(0x7, I40E_GLPCI_LINKCAP_MAX_PAYLOAD_SHIFT)
+#define I40E_GLPCI_LINKCAP_MAX_LINK_WIDTH_SHIFT 9
+#define I40E_GLPCI_LINKCAP_MAX_LINK_WIDTH_MASK I40E_MASK(0xF, I40E_GLPCI_LINKCAP_MAX_LINK_WIDTH_SHIFT)
+#define I40E_GLPCI_PCIERR 0x000BE4FC /* Reset: PCIR */
+#define I40E_GLPCI_PCIERR_PCIE_ERR_REP_SHIFT 0
+#define I40E_GLPCI_PCIERR_PCIE_ERR_REP_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_PCIERR_PCIE_ERR_REP_SHIFT)
+#define I40E_GLPCI_PKTCT 0x0009C4BC /* Reset: PCIR */
+#define I40E_GLPCI_PKTCT_PCI_COUNT_BW_PCT_SHIFT 0
+#define I40E_GLPCI_PKTCT_PCI_COUNT_BW_PCT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_PKTCT_PCI_COUNT_BW_PCT_SHIFT)
+#define I40E_GLPCI_PM_MUX_NPQ 0x0009C4F4 /* Reset: PCIR */
+#define I40E_GLPCI_PM_MUX_NPQ_NPQ_NUM_PORT_SEL_SHIFT 0
+#define I40E_GLPCI_PM_MUX_NPQ_NPQ_NUM_PORT_SEL_MASK I40E_MASK(0x7, I40E_GLPCI_PM_MUX_NPQ_NPQ_NUM_PORT_SEL_SHIFT)
+#define I40E_GLPCI_PM_MUX_NPQ_INNER_NPQ_SEL_SHIFT 16
+#define I40E_GLPCI_PM_MUX_NPQ_INNER_NPQ_SEL_MASK I40E_MASK(0x1F, I40E_GLPCI_PM_MUX_NPQ_INNER_NPQ_SEL_SHIFT)
+#define I40E_GLPCI_PM_MUX_PFB 0x0009C4F0 /* Reset: PCIR */
+#define I40E_GLPCI_PM_MUX_PFB_PFB_PORT_SEL_SHIFT 0
+#define I40E_GLPCI_PM_MUX_PFB_PFB_PORT_SEL_MASK I40E_MASK(0x1F, I40E_GLPCI_PM_MUX_PFB_PFB_PORT_SEL_SHIFT)
+#define I40E_GLPCI_PM_MUX_PFB_INNER_PORT_SEL_SHIFT 16
+#define I40E_GLPCI_PM_MUX_PFB_INNER_PORT_SEL_MASK I40E_MASK(0x7, I40E_GLPCI_PM_MUX_PFB_INNER_PORT_SEL_SHIFT)
+#define I40E_GLPCI_PMSUP 0x000BE4B0 /* Reset: PCIR */
+#define I40E_GLPCI_PMSUP_ASPM_SUP_SHIFT 0
+#define I40E_GLPCI_PMSUP_ASPM_SUP_MASK I40E_MASK(0x3, I40E_GLPCI_PMSUP_ASPM_SUP_SHIFT)
+#define I40E_GLPCI_PMSUP_L0S_EXIT_LAT_SHIFT 2
+#define I40E_GLPCI_PMSUP_L0S_EXIT_LAT_MASK I40E_MASK(0x7, I40E_GLPCI_PMSUP_L0S_EXIT_LAT_SHIFT)
+#define I40E_GLPCI_PMSUP_L1_EXIT_LAT_SHIFT 5
+#define I40E_GLPCI_PMSUP_L1_EXIT_LAT_MASK I40E_MASK(0x7, I40E_GLPCI_PMSUP_L1_EXIT_LAT_SHIFT)
+#define I40E_GLPCI_PMSUP_L0S_ACC_LAT_SHIFT 8
+#define I40E_GLPCI_PMSUP_L0S_ACC_LAT_MASK I40E_MASK(0x7, I40E_GLPCI_PMSUP_L0S_ACC_LAT_SHIFT)
+#define I40E_GLPCI_PMSUP_L1_ACC_LAT_SHIFT 11
+#define I40E_GLPCI_PMSUP_L1_ACC_LAT_MASK I40E_MASK(0x7, I40E_GLPCI_PMSUP_L1_ACC_LAT_SHIFT)
+#define I40E_GLPCI_PMSUP_SLOT_CLK_SHIFT 14
+#define I40E_GLPCI_PMSUP_SLOT_CLK_MASK I40E_MASK(0x1, I40E_GLPCI_PMSUP_SLOT_CLK_SHIFT)
+#define I40E_GLPCI_PMSUP_OBFF_SUP_SHIFT 15
+#define I40E_GLPCI_PMSUP_OBFF_SUP_MASK I40E_MASK(0x3, I40E_GLPCI_PMSUP_OBFF_SUP_SHIFT)
+#define I40E_GLPCI_PQ_MAX_USED_SPC 0x0009C4EC /* Reset: PCIR */
+#define I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_12_SHIFT 0
+#define I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_12_MASK I40E_MASK(0xFF, I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_12_SHIFT)
+#define I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_13_SHIFT 8
+#define I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_13_MASK I40E_MASK(0xFF, I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_13_SHIFT)
+#define I40E_GLPCI_PWRDATA 0x000BE490 /* Reset: PCIR */
+#define I40E_GLPCI_PWRDATA_D0_POWER_SHIFT 0
+#define I40E_GLPCI_PWRDATA_D0_POWER_MASK I40E_MASK(0xFF, I40E_GLPCI_PWRDATA_D0_POWER_SHIFT)
+#define I40E_GLPCI_PWRDATA_COMM_POWER_SHIFT 8
+#define I40E_GLPCI_PWRDATA_COMM_POWER_MASK I40E_MASK(0xFF, I40E_GLPCI_PWRDATA_COMM_POWER_SHIFT)
+#define I40E_GLPCI_PWRDATA_D3_POWER_SHIFT 16
+#define I40E_GLPCI_PWRDATA_D3_POWER_MASK I40E_MASK(0xFF, I40E_GLPCI_PWRDATA_D3_POWER_SHIFT)
+#define I40E_GLPCI_PWRDATA_DATA_SCALE_SHIFT 24
+#define I40E_GLPCI_PWRDATA_DATA_SCALE_MASK I40E_MASK(0x3, I40E_GLPCI_PWRDATA_DATA_SCALE_SHIFT)
+#define I40E_GLPCI_REVID 0x000BE4B4 /* Reset: PCIR */
+#define I40E_GLPCI_REVID_NVM_REVID_SHIFT 0
+#define I40E_GLPCI_REVID_NVM_REVID_MASK I40E_MASK(0xFF, I40E_GLPCI_REVID_NVM_REVID_SHIFT)
+#define I40E_GLPCI_SERH 0x000BE49C /* Reset: PCIR */
+#define I40E_GLPCI_SERH_SER_NUM_H_SHIFT 0
+#define I40E_GLPCI_SERH_SER_NUM_H_MASK I40E_MASK(0xFFFF, I40E_GLPCI_SERH_SER_NUM_H_SHIFT)
+#define I40E_GLPCI_SERL 0x000BE498 /* Reset: PCIR */
+#define I40E_GLPCI_SERL_SER_NUM_L_SHIFT 0
+#define I40E_GLPCI_SERL_SER_NUM_L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_SERL_SER_NUM_L_SHIFT)
+#define I40E_GLPCI_SPARE_BITS_0 0x0009C4F8 /* Reset: PCIR */
+#define I40E_GLPCI_SPARE_BITS_0_SPARE_BITS_SHIFT 0
+#define I40E_GLPCI_SPARE_BITS_0_SPARE_BITS_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_SPARE_BITS_0_SPARE_BITS_SHIFT)
+#define I40E_GLPCI_SPARE_BITS_1 0x0009C4FC /* Reset: PCIR */
+#define I40E_GLPCI_SPARE_BITS_1_SPARE_BITS_SHIFT 0
+#define I40E_GLPCI_SPARE_BITS_1_SPARE_BITS_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_SPARE_BITS_1_SPARE_BITS_SHIFT)
+#define I40E_GLPCI_SUBVENID 0x000BE48C /* Reset: PCIR */
+#define I40E_GLPCI_SUBVENID_SUB_VEN_ID_SHIFT 0
+#define I40E_GLPCI_SUBVENID_SUB_VEN_ID_MASK I40E_MASK(0xFFFF, I40E_GLPCI_SUBVENID_SUB_VEN_ID_SHIFT)
+#define I40E_GLPCI_UPADD 0x000BE4F8 /* Reset: PCIR */
+#define I40E_GLPCI_UPADD_ADDRESS_SHIFT 1
+#define I40E_GLPCI_UPADD_ADDRESS_MASK I40E_MASK(0x7FFFFFFF, I40E_GLPCI_UPADD_ADDRESS_SHIFT)
+#define I40E_GLPCI_VENDORID 0x000BE518 /* Reset: PCIR */
+#define I40E_GLPCI_VENDORID_VENDORID_SHIFT 0
+#define I40E_GLPCI_VENDORID_VENDORID_MASK I40E_MASK(0xFFFF, I40E_GLPCI_VENDORID_VENDORID_SHIFT)
+#define I40E_GLPCI_VFSUP 0x000BE4B8 /* Reset: PCIR */
+#define I40E_GLPCI_VFSUP_VF_PREFETCH_SHIFT 0
+#define I40E_GLPCI_VFSUP_VF_PREFETCH_MASK I40E_MASK(0x1, I40E_GLPCI_VFSUP_VF_PREFETCH_SHIFT)
+#define I40E_GLPCI_VFSUP_VR_BAR_TYPE_SHIFT 1
+#define I40E_GLPCI_VFSUP_VR_BAR_TYPE_MASK I40E_MASK(0x1, I40E_GLPCI_VFSUP_VR_BAR_TYPE_SHIFT)
+#define I40E_PF_FUNC_RID 0x0009C000 /* Reset: PCIR */
+#define I40E_PF_FUNC_RID_FUNCTION_NUMBER_SHIFT 0
+#define I40E_PF_FUNC_RID_FUNCTION_NUMBER_MASK I40E_MASK(0x7, I40E_PF_FUNC_RID_FUNCTION_NUMBER_SHIFT)
+#define I40E_PF_FUNC_RID_DEVICE_NUMBER_SHIFT 3
+#define I40E_PF_FUNC_RID_DEVICE_NUMBER_MASK I40E_MASK(0x1F, I40E_PF_FUNC_RID_DEVICE_NUMBER_SHIFT)
+#define I40E_PF_FUNC_RID_BUS_NUMBER_SHIFT 8
+#define I40E_PF_FUNC_RID_BUS_NUMBER_MASK I40E_MASK(0xFF, I40E_PF_FUNC_RID_BUS_NUMBER_SHIFT)
+#define I40E_PF_PCI_CIAA 0x0009C080 /* Reset: FLR */
+#define I40E_PF_PCI_CIAA_ADDRESS_SHIFT 0
+#define I40E_PF_PCI_CIAA_ADDRESS_MASK I40E_MASK(0xFFF, I40E_PF_PCI_CIAA_ADDRESS_SHIFT)
+#define I40E_PF_PCI_CIAA_VF_NUM_SHIFT 12
+#define I40E_PF_PCI_CIAA_VF_NUM_MASK I40E_MASK(0x7F, I40E_PF_PCI_CIAA_VF_NUM_SHIFT)
+#define I40E_PF_PCI_CIAD 0x0009C100 /* Reset: FLR */
+#define I40E_PF_PCI_CIAD_DATA_SHIFT 0
+#define I40E_PF_PCI_CIAD_DATA_MASK I40E_MASK(0xFFFFFFFF, I40E_PF_PCI_CIAD_DATA_SHIFT)
+#define I40E_PFPCI_CLASS 0x000BE400 /* Reset: PCIR */
+#define I40E_PFPCI_CLASS_STORAGE_CLASS_SHIFT 0
+#define I40E_PFPCI_CLASS_STORAGE_CLASS_MASK I40E_MASK(0x1, I40E_PFPCI_CLASS_STORAGE_CLASS_SHIFT)
+#define I40E_PFPCI_CLASS_RESERVED_1_SHIFT 1
+#define I40E_PFPCI_CLASS_RESERVED_1_MASK I40E_MASK(0x1, I40E_PFPCI_CLASS_RESERVED_1_SHIFT)
+#define I40E_PFPCI_CLASS_PF_IS_LAN_SHIFT 2
+#define I40E_PFPCI_CLASS_PF_IS_LAN_MASK I40E_MASK(0x1, I40E_PFPCI_CLASS_PF_IS_LAN_SHIFT)
+#define I40E_PFPCI_CNF 0x000BE000 /* Reset: PCIR */
+#define I40E_PFPCI_CNF_MSI_EN_SHIFT 2
+#define I40E_PFPCI_CNF_MSI_EN_MASK I40E_MASK(0x1, I40E_PFPCI_CNF_MSI_EN_SHIFT)
+#define I40E_PFPCI_CNF_EXROM_DIS_SHIFT 3
+#define I40E_PFPCI_CNF_EXROM_DIS_MASK I40E_MASK(0x1, I40E_PFPCI_CNF_EXROM_DIS_SHIFT)
+#define I40E_PFPCI_CNF_IO_BAR_SHIFT 4
+#define I40E_PFPCI_CNF_IO_BAR_MASK I40E_MASK(0x1, I40E_PFPCI_CNF_IO_BAR_SHIFT)
+#define I40E_PFPCI_CNF_INT_PIN_SHIFT 5
+#define I40E_PFPCI_CNF_INT_PIN_MASK I40E_MASK(0x3, I40E_PFPCI_CNF_INT_PIN_SHIFT)
+#define I40E_PFPCI_DEVID 0x000BE080 /* Reset: PCIR */
+#define I40E_PFPCI_DEVID_PF_DEV_ID_SHIFT 0
+#define I40E_PFPCI_DEVID_PF_DEV_ID_MASK I40E_MASK(0xFFFF, I40E_PFPCI_DEVID_PF_DEV_ID_SHIFT)
+#define I40E_PFPCI_DEVID_VF_DEV_ID_SHIFT 16
+#define I40E_PFPCI_DEVID_VF_DEV_ID_MASK I40E_MASK(0xFFFF, I40E_PFPCI_DEVID_VF_DEV_ID_SHIFT)
+#define I40E_PFPCI_FACTPS 0x0009C180 /* Reset: FLR */
+#define I40E_PFPCI_FACTPS_FUNC_POWER_STATE_SHIFT 0
+#define I40E_PFPCI_FACTPS_FUNC_POWER_STATE_MASK I40E_MASK(0x3, I40E_PFPCI_FACTPS_FUNC_POWER_STATE_SHIFT)
+#define I40E_PFPCI_FACTPS_FUNC_AUX_EN_SHIFT 3
+#define I40E_PFPCI_FACTPS_FUNC_AUX_EN_MASK I40E_MASK(0x1, I40E_PFPCI_FACTPS_FUNC_AUX_EN_SHIFT)
+#define I40E_PFPCI_FUNC 0x000BE200 /* Reset: POR */
+#define I40E_PFPCI_FUNC_FUNC_DIS_SHIFT 0
+#define I40E_PFPCI_FUNC_FUNC_DIS_MASK I40E_MASK(0x1, I40E_PFPCI_FUNC_FUNC_DIS_SHIFT)
+#define I40E_PFPCI_FUNC_ALLOW_FUNC_DIS_SHIFT 1
+#define I40E_PFPCI_FUNC_ALLOW_FUNC_DIS_MASK I40E_MASK(0x1, I40E_PFPCI_FUNC_ALLOW_FUNC_DIS_SHIFT)
+#define I40E_PFPCI_FUNC_DIS_FUNC_ON_PORT_DIS_SHIFT 2
+#define I40E_PFPCI_FUNC_DIS_FUNC_ON_PORT_DIS_MASK I40E_MASK(0x1, I40E_PFPCI_FUNC_DIS_FUNC_ON_PORT_DIS_SHIFT)
+#define I40E_PFPCI_FUNC2 0x000BE180 /* Reset: PCIR */
+#define I40E_PFPCI_FUNC2_EMP_FUNC_DIS_SHIFT 0
+#define I40E_PFPCI_FUNC2_EMP_FUNC_DIS_MASK I40E_MASK(0x1, I40E_PFPCI_FUNC2_EMP_FUNC_DIS_SHIFT)
+#define I40E_PFPCI_ICAUSE 0x0009C200 /* Reset: PFR */
+#define I40E_PFPCI_ICAUSE_PCIE_ERR_CAUSE_SHIFT 0
+#define I40E_PFPCI_ICAUSE_PCIE_ERR_CAUSE_MASK I40E_MASK(0xFFFFFFFF, I40E_PFPCI_ICAUSE_PCIE_ERR_CAUSE_SHIFT)
+#define I40E_PFPCI_IENA 0x0009C280 /* Reset: PFR */
+#define I40E_PFPCI_IENA_PCIE_ERR_EN_SHIFT 0
+#define I40E_PFPCI_IENA_PCIE_ERR_EN_MASK I40E_MASK(0xFFFFFFFF, I40E_PFPCI_IENA_PCIE_ERR_EN_SHIFT)
+#define I40E_PFPCI_PF_FLUSH_DONE 0x0009C800 /* Reset: PCIR */
+#define I40E_PFPCI_PF_FLUSH_DONE_FLUSH_DONE_SHIFT 0
+#define I40E_PFPCI_PF_FLUSH_DONE_FLUSH_DONE_MASK I40E_MASK(0x1, I40E_PFPCI_PF_FLUSH_DONE_FLUSH_DONE_SHIFT)
+#define I40E_PFPCI_PM 0x000BE300 /* Reset: POR */
+#define I40E_PFPCI_PM_PME_EN_SHIFT 0
+#define I40E_PFPCI_PM_PME_EN_MASK I40E_MASK(0x1, I40E_PFPCI_PM_PME_EN_SHIFT)
+#define I40E_PFPCI_STATUS1 0x000BE280 /* Reset: POR */
+#define I40E_PFPCI_STATUS1_FUNC_VALID_SHIFT 0
+#define I40E_PFPCI_STATUS1_FUNC_VALID_MASK I40E_MASK(0x1, I40E_PFPCI_STATUS1_FUNC_VALID_SHIFT)
+#define I40E_PFPCI_SUBSYSID 0x000BE100 /* Reset: PCIR */
+#define I40E_PFPCI_SUBSYSID_PF_SUBSYS_ID_SHIFT 0
+#define I40E_PFPCI_SUBSYSID_PF_SUBSYS_ID_MASK I40E_MASK(0xFFFF, I40E_PFPCI_SUBSYSID_PF_SUBSYS_ID_SHIFT)
+#define I40E_PFPCI_SUBSYSID_VF_SUBSYS_ID_SHIFT 16
+#define I40E_PFPCI_SUBSYSID_VF_SUBSYS_ID_MASK I40E_MASK(0xFFFF, I40E_PFPCI_SUBSYSID_VF_SUBSYS_ID_SHIFT)
+#define I40E_PFPCI_VF_FLUSH_DONE 0x0000E400 /* Reset: PCIR */
+#define I40E_PFPCI_VF_FLUSH_DONE_FLUSH_DONE_SHIFT 0
+#define I40E_PFPCI_VF_FLUSH_DONE_FLUSH_DONE_MASK I40E_MASK(0x1, I40E_PFPCI_VF_FLUSH_DONE_FLUSH_DONE_SHIFT)
+#define I40E_PFPCI_VF_FLUSH_DONE1(_VF) (0x0009C600 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: PCIR */
+#define I40E_PFPCI_VF_FLUSH_DONE1_MAX_INDEX 127
+#define I40E_PFPCI_VF_FLUSH_DONE1_FLUSH_DONE_SHIFT 0
+#define I40E_PFPCI_VF_FLUSH_DONE1_FLUSH_DONE_MASK I40E_MASK(0x1, I40E_PFPCI_VF_FLUSH_DONE1_FLUSH_DONE_SHIFT)
+#define I40E_PFPCI_VM_FLUSH_DONE 0x0009C880 /* Reset: PCIR */
+#define I40E_PFPCI_VM_FLUSH_DONE_FLUSH_DONE_SHIFT 0
+#define I40E_PFPCI_VM_FLUSH_DONE_FLUSH_DONE_MASK I40E_MASK(0x1, I40E_PFPCI_VM_FLUSH_DONE_FLUSH_DONE_SHIFT)
+#define I40E_PFPCI_VMINDEX 0x0009C300 /* Reset: PCIR */
+#define I40E_PFPCI_VMINDEX_VMINDEX_SHIFT 0
+#define I40E_PFPCI_VMINDEX_VMINDEX_MASK I40E_MASK(0x1FF, I40E_PFPCI_VMINDEX_VMINDEX_SHIFT)
+#define I40E_PFPCI_VMPEND 0x0009C380 /* Reset: PCIR */
+#define I40E_PFPCI_VMPEND_PENDING_SHIFT 0
+#define I40E_PFPCI_VMPEND_PENDING_MASK I40E_MASK(0x1, I40E_PFPCI_VMPEND_PENDING_SHIFT)
+#define I40E_PRTPM_EEE_STAT 0x001E4320 /* Reset: GLOBR */
+#define I40E_PRTPM_EEE_STAT_EEE_NEG_SHIFT 29
+#define I40E_PRTPM_EEE_STAT_EEE_NEG_MASK I40E_MASK(0x1, I40E_PRTPM_EEE_STAT_EEE_NEG_SHIFT)
+#define I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_SHIFT 30
+#define I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_MASK I40E_MASK(0x1, I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_SHIFT)
+#define I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_SHIFT 31
+#define I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_MASK I40E_MASK(0x1, I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_SHIFT)
+#define I40E_PRTPM_EEEC 0x001E4380 /* Reset: GLOBR */
+#define I40E_PRTPM_EEEC_TW_WAKE_MIN_SHIFT 16
+#define I40E_PRTPM_EEEC_TW_WAKE_MIN_MASK I40E_MASK(0x3F, I40E_PRTPM_EEEC_TW_WAKE_MIN_SHIFT)
+#define I40E_PRTPM_EEEC_TX_LU_LPI_DLY_SHIFT 24
+#define I40E_PRTPM_EEEC_TX_LU_LPI_DLY_MASK I40E_MASK(0x3, I40E_PRTPM_EEEC_TX_LU_LPI_DLY_SHIFT)
+#define I40E_PRTPM_EEEC_TEEE_DLY_SHIFT 26
+#define I40E_PRTPM_EEEC_TEEE_DLY_MASK I40E_MASK(0x3F, I40E_PRTPM_EEEC_TEEE_DLY_SHIFT)
+#define I40E_PRTPM_EEEFWD 0x001E4400 /* Reset: GLOBR */
+#define I40E_PRTPM_EEEFWD_EEE_FW_CONFIG_DONE_SHIFT 31
+#define I40E_PRTPM_EEEFWD_EEE_FW_CONFIG_DONE_MASK I40E_MASK(0x1, I40E_PRTPM_EEEFWD_EEE_FW_CONFIG_DONE_SHIFT)
+#define I40E_PRTPM_EEER 0x001E4360 /* Reset: GLOBR */
+#define I40E_PRTPM_EEER_TW_SYSTEM_SHIFT 0
+#define I40E_PRTPM_EEER_TW_SYSTEM_MASK I40E_MASK(0xFFFF, I40E_PRTPM_EEER_TW_SYSTEM_SHIFT)
+#define I40E_PRTPM_EEER_TX_LPI_EN_SHIFT 16
+#define I40E_PRTPM_EEER_TX_LPI_EN_MASK I40E_MASK(0x1, I40E_PRTPM_EEER_TX_LPI_EN_SHIFT)
+#define I40E_PRTPM_EEETXC 0x001E43E0 /* Reset: GLOBR */
+#define I40E_PRTPM_EEETXC_TW_PHY_SHIFT 0
+#define I40E_PRTPM_EEETXC_TW_PHY_MASK I40E_MASK(0xFFFF, I40E_PRTPM_EEETXC_TW_PHY_SHIFT)
+#define I40E_PRTPM_GC 0x000B8140 /* Reset: POR */
+#define I40E_PRTPM_GC_EMP_LINK_ON_SHIFT 0
+#define I40E_PRTPM_GC_EMP_LINK_ON_MASK I40E_MASK(0x1, I40E_PRTPM_GC_EMP_LINK_ON_SHIFT)
+#define I40E_PRTPM_GC_MNG_VETO_SHIFT 1
+#define I40E_PRTPM_GC_MNG_VETO_MASK I40E_MASK(0x1, I40E_PRTPM_GC_MNG_VETO_SHIFT)
+#define I40E_PRTPM_GC_RATD_SHIFT 2
+#define I40E_PRTPM_GC_RATD_MASK I40E_MASK(0x1, I40E_PRTPM_GC_RATD_SHIFT)
+#define I40E_PRTPM_GC_LCDMP_SHIFT 3
+#define I40E_PRTPM_GC_LCDMP_MASK I40E_MASK(0x1, I40E_PRTPM_GC_LCDMP_SHIFT)
+#define I40E_PRTPM_GC_LPLU_ASSERTED_SHIFT 31
+#define I40E_PRTPM_GC_LPLU_ASSERTED_MASK I40E_MASK(0x1, I40E_PRTPM_GC_LPLU_ASSERTED_SHIFT)
+#define I40E_PRTPM_RLPIC 0x001E43A0 /* Reset: GLOBR */
+#define I40E_PRTPM_RLPIC_ERLPIC_SHIFT 0
+#define I40E_PRTPM_RLPIC_ERLPIC_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTPM_RLPIC_ERLPIC_SHIFT)
+#define I40E_PRTPM_TLPIC 0x001E43C0 /* Reset: GLOBR */
+#define I40E_PRTPM_TLPIC_ETLPIC_SHIFT 0
+#define I40E_PRTPM_TLPIC_ETLPIC_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTPM_TLPIC_ETLPIC_SHIFT)
+#define I40E_GLRPB_DPSS 0x000AC828 /* Reset: CORER */
+#define I40E_GLRPB_DPSS_DPS_TCN_SHIFT 0
+#define I40E_GLRPB_DPSS_DPS_TCN_MASK I40E_MASK(0xFFFFF, I40E_GLRPB_DPSS_DPS_TCN_SHIFT)
+#define I40E_GLRPB_GHW 0x000AC830 /* Reset: CORER */
+#define I40E_GLRPB_GHW_GHW_SHIFT 0
+#define I40E_GLRPB_GHW_GHW_MASK I40E_MASK(0xFFFFF, I40E_GLRPB_GHW_GHW_SHIFT)
+#define I40E_GLRPB_GLW 0x000AC834 /* Reset: CORER */
+#define I40E_GLRPB_GLW_GLW_SHIFT 0
+#define I40E_GLRPB_GLW_GLW_MASK I40E_MASK(0xFFFFF, I40E_GLRPB_GLW_GLW_SHIFT)
+#define I40E_GLRPB_PHW 0x000AC844 /* Reset: CORER */
+#define I40E_GLRPB_PHW_PHW_SHIFT 0
+#define I40E_GLRPB_PHW_PHW_MASK I40E_MASK(0xFFFFF, I40E_GLRPB_PHW_PHW_SHIFT)
+#define I40E_GLRPB_PLW 0x000AC848 /* Reset: CORER */
+#define I40E_GLRPB_PLW_PLW_SHIFT 0
+#define I40E_GLRPB_PLW_PLW_MASK I40E_MASK(0xFFFFF, I40E_GLRPB_PLW_PLW_SHIFT)
+#define I40E_PRTRPB_DHW(_i) (0x000AC100 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */
+#define I40E_PRTRPB_DHW_MAX_INDEX 7
+#define I40E_PRTRPB_DHW_DHW_TCN_SHIFT 0
+#define I40E_PRTRPB_DHW_DHW_TCN_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_DHW_DHW_TCN_SHIFT)
+#define I40E_PRTRPB_DLW(_i) (0x000AC220 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */
+#define I40E_PRTRPB_DLW_MAX_INDEX 7
+#define I40E_PRTRPB_DLW_DLW_TCN_SHIFT 0
+#define I40E_PRTRPB_DLW_DLW_TCN_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_DLW_DLW_TCN_SHIFT)
+#define I40E_PRTRPB_DPS(_i) (0x000AC320 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */
+#define I40E_PRTRPB_DPS_MAX_INDEX 7
+#define I40E_PRTRPB_DPS_DPS_TCN_SHIFT 0
+#define I40E_PRTRPB_DPS_DPS_TCN_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_DPS_DPS_TCN_SHIFT)
+#define I40E_PRTRPB_SHT(_i) (0x000AC480 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */
+#define I40E_PRTRPB_SHT_MAX_INDEX 7
+#define I40E_PRTRPB_SHT_SHT_TCN_SHIFT 0
+#define I40E_PRTRPB_SHT_SHT_TCN_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_SHT_SHT_TCN_SHIFT)
+#define I40E_PRTRPB_SHW 0x000AC580 /* Reset: CORER */
+#define I40E_PRTRPB_SHW_SHW_SHIFT 0
+#define I40E_PRTRPB_SHW_SHW_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_SHW_SHW_SHIFT)
+#define I40E_PRTRPB_SLT(_i) (0x000AC5A0 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */
+#define I40E_PRTRPB_SLT_MAX_INDEX 7
+#define I40E_PRTRPB_SLT_SLT_TCN_SHIFT 0
+#define I40E_PRTRPB_SLT_SLT_TCN_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_SLT_SLT_TCN_SHIFT)
+#define I40E_PRTRPB_SLW 0x000AC6A0 /* Reset: CORER */
+#define I40E_PRTRPB_SLW_SLW_SHIFT 0
+#define I40E_PRTRPB_SLW_SLW_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_SLW_SLW_SHIFT)
+#define I40E_PRTRPB_SPS 0x000AC7C0 /* Reset: CORER */
+#define I40E_PRTRPB_SPS_SPS_SHIFT 0
+#define I40E_PRTRPB_SPS_SPS_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_SPS_SPS_SHIFT)
+#define I40E_GLQF_CTL 0x00269BA4 /* Reset: CORER */
+#define I40E_GLQF_CTL_HTOEP_SHIFT 1
+#define I40E_GLQF_CTL_HTOEP_MASK I40E_MASK(0x1, I40E_GLQF_CTL_HTOEP_SHIFT)
+#define I40E_GLQF_CTL_HTOEP_FCOE_SHIFT 2
+#define I40E_GLQF_CTL_HTOEP_FCOE_MASK I40E_MASK(0x1, I40E_GLQF_CTL_HTOEP_FCOE_SHIFT)
+#define I40E_GLQF_CTL_PCNT_ALLOC_SHIFT 3
+#define I40E_GLQF_CTL_PCNT_ALLOC_MASK I40E_MASK(0x7, I40E_GLQF_CTL_PCNT_ALLOC_SHIFT)
+#define I40E_GLQF_CTL_FD_AUTO_PCTYPE_SHIFT 6
+#define I40E_GLQF_CTL_FD_AUTO_PCTYPE_MASK I40E_MASK(0x1, I40E_GLQF_CTL_FD_AUTO_PCTYPE_SHIFT)
+#define I40E_GLQF_CTL_RSVD_SHIFT 7
+#define I40E_GLQF_CTL_RSVD_MASK I40E_MASK(0x1, I40E_GLQF_CTL_RSVD_SHIFT)
+#define I40E_GLQF_CTL_MAXPEBLEN_SHIFT 8
+#define I40E_GLQF_CTL_MAXPEBLEN_MASK I40E_MASK(0x7, I40E_GLQF_CTL_MAXPEBLEN_SHIFT)
+#define I40E_GLQF_CTL_MAXFCBLEN_SHIFT 11
+#define I40E_GLQF_CTL_MAXFCBLEN_MASK I40E_MASK(0x7, I40E_GLQF_CTL_MAXFCBLEN_SHIFT)
+#define I40E_GLQF_CTL_MAXFDBLEN_SHIFT 14
+#define I40E_GLQF_CTL_MAXFDBLEN_MASK I40E_MASK(0x7, I40E_GLQF_CTL_MAXFDBLEN_SHIFT)
+#define I40E_GLQF_CTL_FDBEST_SHIFT 17
+#define I40E_GLQF_CTL_FDBEST_MASK I40E_MASK(0xFF, I40E_GLQF_CTL_FDBEST_SHIFT)
+#define I40E_GLQF_CTL_PROGPRIO_SHIFT 25
+#define I40E_GLQF_CTL_PROGPRIO_MASK I40E_MASK(0x1, I40E_GLQF_CTL_PROGPRIO_SHIFT)
+#define I40E_GLQF_CTL_INVALPRIO_SHIFT 26
+#define I40E_GLQF_CTL_INVALPRIO_MASK I40E_MASK(0x1, I40E_GLQF_CTL_INVALPRIO_SHIFT)
+#define I40E_GLQF_CTL_IGNORE_IP_SHIFT 27
+#define I40E_GLQF_CTL_IGNORE_IP_MASK I40E_MASK(0x1, I40E_GLQF_CTL_IGNORE_IP_SHIFT)
+#define I40E_GLQF_FDCNT_0 0x00269BAC /* Reset: CORER */
+#define I40E_GLQF_FDCNT_0_GUARANT_CNT_SHIFT 0
+#define I40E_GLQF_FDCNT_0_GUARANT_CNT_MASK I40E_MASK(0x1FFF, I40E_GLQF_FDCNT_0_GUARANT_CNT_SHIFT)
+#define I40E_GLQF_FDCNT_0_BESTCNT_SHIFT 13
+#define I40E_GLQF_FDCNT_0_BESTCNT_MASK I40E_MASK(0x1FFF, I40E_GLQF_FDCNT_0_BESTCNT_SHIFT)
+#define I40E_GLQF_HKEY(_i) (0x00270140 + ((_i) * 4)) /* _i=0...12 */ /* Reset: CORER */
+#define I40E_GLQF_HKEY_MAX_INDEX 12
+#define I40E_GLQF_HKEY_KEY_0_SHIFT 0
+#define I40E_GLQF_HKEY_KEY_0_MASK I40E_MASK(0xFF, I40E_GLQF_HKEY_KEY_0_SHIFT)
+#define I40E_GLQF_HKEY_KEY_1_SHIFT 8
+#define I40E_GLQF_HKEY_KEY_1_MASK I40E_MASK(0xFF, I40E_GLQF_HKEY_KEY_1_SHIFT)
+#define I40E_GLQF_HKEY_KEY_2_SHIFT 16
+#define I40E_GLQF_HKEY_KEY_2_MASK I40E_MASK(0xFF, I40E_GLQF_HKEY_KEY_2_SHIFT)
+#define I40E_GLQF_HKEY_KEY_3_SHIFT 24
+#define I40E_GLQF_HKEY_KEY_3_MASK I40E_MASK(0xFF, I40E_GLQF_HKEY_KEY_3_SHIFT)
+#define I40E_GLQF_HSYM(_i) (0x00269D00 + ((_i) * 4)) /* _i=0...63 */ /* Reset: CORER */
+#define I40E_GLQF_HSYM_MAX_INDEX 63
+#define I40E_GLQF_HSYM_SYMH_ENA_SHIFT 0
+#define I40E_GLQF_HSYM_SYMH_ENA_MASK I40E_MASK(0x1, I40E_GLQF_HSYM_SYMH_ENA_SHIFT)
+#define I40E_GLQF_PCNT(_i) (0x00266800 + ((_i) * 4)) /* _i=0...511 */ /* Reset: CORER */
+#define I40E_GLQF_PCNT_MAX_INDEX 511
+#define I40E_GLQF_PCNT_PCNT_SHIFT 0
+#define I40E_GLQF_PCNT_PCNT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLQF_PCNT_PCNT_SHIFT)
+#define I40E_GLQF_SWAP(_i, _j) (0x00267E00 + ((_i) * 4 + (_j) * 8)) /* _i=0...1, _j=0...63 */ /* Reset: CORER */
+#define I40E_GLQF_SWAP_MAX_INDEX 1
+#define I40E_GLQF_SWAP_OFF0_SRC0_SHIFT 0
+#define I40E_GLQF_SWAP_OFF0_SRC0_MASK I40E_MASK(0x3F, I40E_GLQF_SWAP_OFF0_SRC0_SHIFT)
+#define I40E_GLQF_SWAP_OFF0_SRC1_SHIFT 6
+#define I40E_GLQF_SWAP_OFF0_SRC1_MASK I40E_MASK(0x3F, I40E_GLQF_SWAP_OFF0_SRC1_SHIFT)
+#define I40E_GLQF_SWAP_FLEN0_SHIFT 12
+#define I40E_GLQF_SWAP_FLEN0_MASK I40E_MASK(0xF, I40E_GLQF_SWAP_FLEN0_SHIFT)
+#define I40E_GLQF_SWAP_OFF1_SRC0_SHIFT 16
+#define I40E_GLQF_SWAP_OFF1_SRC0_MASK I40E_MASK(0x3F, I40E_GLQF_SWAP_OFF1_SRC0_SHIFT)
+#define I40E_GLQF_SWAP_OFF1_SRC1_SHIFT 22
+#define I40E_GLQF_SWAP_OFF1_SRC1_MASK I40E_MASK(0x3F, I40E_GLQF_SWAP_OFF1_SRC1_SHIFT)
+#define I40E_GLQF_SWAP_FLEN1_SHIFT 28
+#define I40E_GLQF_SWAP_FLEN1_MASK I40E_MASK(0xF, I40E_GLQF_SWAP_FLEN1_SHIFT)
+#define I40E_PFQF_CTL_0 0x001C0AC0 /* Reset: CORER */
+#define I40E_PFQF_CTL_0_PEHSIZE_SHIFT 0
+#define I40E_PFQF_CTL_0_PEHSIZE_MASK I40E_MASK(0x1F, I40E_PFQF_CTL_0_PEHSIZE_SHIFT)
+#define I40E_PFQF_CTL_0_PEDSIZE_SHIFT 5
+#define I40E_PFQF_CTL_0_PEDSIZE_MASK I40E_MASK(0x1F, I40E_PFQF_CTL_0_PEDSIZE_SHIFT)
+#define I40E_PFQF_CTL_0_PFFCHSIZE_SHIFT 10
+#define I40E_PFQF_CTL_0_PFFCHSIZE_MASK I40E_MASK(0xF, I40E_PFQF_CTL_0_PFFCHSIZE_SHIFT)
+#define I40E_PFQF_CTL_0_PFFCDSIZE_SHIFT 14
+#define I40E_PFQF_CTL_0_PFFCDSIZE_MASK I40E_MASK(0x3, I40E_PFQF_CTL_0_PFFCDSIZE_SHIFT)
+#define I40E_PFQF_CTL_0_HASHLUTSIZE_SHIFT 16
+#define I40E_PFQF_CTL_0_HASHLUTSIZE_MASK I40E_MASK(0x1, I40E_PFQF_CTL_0_HASHLUTSIZE_SHIFT)
+#define I40E_PFQF_CTL_0_FD_ENA_SHIFT 17
+#define I40E_PFQF_CTL_0_FD_ENA_MASK I40E_MASK(0x1, I40E_PFQF_CTL_0_FD_ENA_SHIFT)
+#define I40E_PFQF_CTL_0_ETYPE_ENA_SHIFT 18
+#define I40E_PFQF_CTL_0_ETYPE_ENA_MASK I40E_MASK(0x1, I40E_PFQF_CTL_0_ETYPE_ENA_SHIFT)
+#define I40E_PFQF_CTL_0_MACVLAN_ENA_SHIFT 19
+#define I40E_PFQF_CTL_0_MACVLAN_ENA_MASK I40E_MASK(0x1, I40E_PFQF_CTL_0_MACVLAN_ENA_SHIFT)
+#define I40E_PFQF_CTL_0_VFFCHSIZE_SHIFT 20
+#define I40E_PFQF_CTL_0_VFFCHSIZE_MASK I40E_MASK(0xF, I40E_PFQF_CTL_0_VFFCHSIZE_SHIFT)
+#define I40E_PFQF_CTL_0_VFFCDSIZE_SHIFT 24
+#define I40E_PFQF_CTL_0_VFFCDSIZE_MASK I40E_MASK(0x3, I40E_PFQF_CTL_0_VFFCDSIZE_SHIFT)
+#define I40E_PFQF_CTL_1 0x00245D80 /* Reset: CORER */
+#define I40E_PFQF_CTL_1_CLEARFDTABLE_SHIFT 0
+#define I40E_PFQF_CTL_1_CLEARFDTABLE_MASK I40E_MASK(0x1, I40E_PFQF_CTL_1_CLEARFDTABLE_SHIFT)
+#define I40E_PFQF_FDALLOC 0x00246280 /* Reset: CORER */
+#define I40E_PFQF_FDALLOC_FDALLOC_SHIFT 0
+#define I40E_PFQF_FDALLOC_FDALLOC_MASK I40E_MASK(0xFF, I40E_PFQF_FDALLOC_FDALLOC_SHIFT)
+#define I40E_PFQF_FDALLOC_FDBEST_SHIFT 8
+#define I40E_PFQF_FDALLOC_FDBEST_MASK I40E_MASK(0xFF, I40E_PFQF_FDALLOC_FDBEST_SHIFT)
+#define I40E_PFQF_FDSTAT 0x00246380 /* Reset: CORER */
+#define I40E_PFQF_FDSTAT_GUARANT_CNT_SHIFT 0
+#define I40E_PFQF_FDSTAT_GUARANT_CNT_MASK I40E_MASK(0x1FFF, I40E_PFQF_FDSTAT_GUARANT_CNT_SHIFT)
+#define I40E_PFQF_FDSTAT_BEST_CNT_SHIFT 16
+#define I40E_PFQF_FDSTAT_BEST_CNT_MASK I40E_MASK(0x1FFF, I40E_PFQF_FDSTAT_BEST_CNT_SHIFT)
+#define I40E_PFQF_HENA(_i) (0x00245900 + ((_i) * 128)) /* _i=0...1 */ /* Reset: CORER */
+#define I40E_PFQF_HENA_MAX_INDEX 1
+#define I40E_PFQF_HENA_PTYPE_ENA_SHIFT 0
+#define I40E_PFQF_HENA_PTYPE_ENA_MASK I40E_MASK(0xFFFFFFFF, I40E_PFQF_HENA_PTYPE_ENA_SHIFT)
+#define I40E_PFQF_HKEY(_i) (0x00244800 + ((_i) * 128)) /* _i=0...12 */ /* Reset: CORER */
+#define I40E_PFQF_HKEY_MAX_INDEX 12
+#define I40E_PFQF_HKEY_KEY_0_SHIFT 0
+#define I40E_PFQF_HKEY_KEY_0_MASK I40E_MASK(0xFF, I40E_PFQF_HKEY_KEY_0_SHIFT)
+#define I40E_PFQF_HKEY_KEY_1_SHIFT 8
+#define I40E_PFQF_HKEY_KEY_1_MASK I40E_MASK(0xFF, I40E_PFQF_HKEY_KEY_1_SHIFT)
+#define I40E_PFQF_HKEY_KEY_2_SHIFT 16
+#define I40E_PFQF_HKEY_KEY_2_MASK I40E_MASK(0xFF, I40E_PFQF_HKEY_KEY_2_SHIFT)
+#define I40E_PFQF_HKEY_KEY_3_SHIFT 24
+#define I40E_PFQF_HKEY_KEY_3_MASK I40E_MASK(0xFF, I40E_PFQF_HKEY_KEY_3_SHIFT)
+#define I40E_PFQF_HLUT(_i) (0x00240000 + ((_i) * 128)) /* _i=0...127 */ /* Reset: CORER */
+#define I40E_PFQF_HLUT_MAX_INDEX 127
+#define I40E_PFQF_HLUT_LUT0_SHIFT 0
+#define I40E_PFQF_HLUT_LUT0_MASK I40E_MASK(0x3F, I40E_PFQF_HLUT_LUT0_SHIFT)
+#define I40E_PFQF_HLUT_LUT1_SHIFT 8
+#define I40E_PFQF_HLUT_LUT1_MASK I40E_MASK(0x3F, I40E_PFQF_HLUT_LUT1_SHIFT)
+#define I40E_PFQF_HLUT_LUT2_SHIFT 16
+#define I40E_PFQF_HLUT_LUT2_MASK I40E_MASK(0x3F, I40E_PFQF_HLUT_LUT2_SHIFT)
+#define I40E_PFQF_HLUT_LUT3_SHIFT 24
+#define I40E_PFQF_HLUT_LUT3_MASK I40E_MASK(0x3F, I40E_PFQF_HLUT_LUT3_SHIFT)
+#define I40E_PRTQF_CTL_0 0x00256E60 /* Reset: CORER */
+#define I40E_PRTQF_CTL_0_HSYM_ENA_SHIFT 0
+#define I40E_PRTQF_CTL_0_HSYM_ENA_MASK I40E_MASK(0x1, I40E_PRTQF_CTL_0_HSYM_ENA_SHIFT)
+#define I40E_PRTQF_FD_FLXINSET(_i) (0x00253800 + ((_i) * 32)) /* _i=0...63 */ /* Reset: CORER */
+#define I40E_PRTQF_FD_FLXINSET_MAX_INDEX 63
+#define I40E_PRTQF_FD_FLXINSET_INSET_SHIFT 0
+#define I40E_PRTQF_FD_FLXINSET_INSET_MASK I40E_MASK(0xFF, I40E_PRTQF_FD_FLXINSET_INSET_SHIFT)
+#define I40E_PRTQF_FD_MSK(_i, _j) (0x00252000 + ((_i) * 64 + (_j) * 32)) /* _i=0...63, _j=0...1 */ /* Reset: CORER */
+#define I40E_PRTQF_FD_MSK_MAX_INDEX 63
+#define I40E_PRTQF_FD_MSK_MASK_SHIFT 0
+#define I40E_PRTQF_FD_MSK_MASK_MASK I40E_MASK(0xFFFF, I40E_PRTQF_FD_MSK_MASK_SHIFT)
+#define I40E_PRTQF_FD_MSK_OFFSET_SHIFT 16
+#define I40E_PRTQF_FD_MSK_OFFSET_MASK I40E_MASK(0x3F, I40E_PRTQF_FD_MSK_OFFSET_SHIFT)
+#define I40E_PRTQF_FLX_PIT(_i) (0x00255200 + ((_i) * 32)) /* _i=0...8 */ /* Reset: CORER */
+#define I40E_PRTQF_FLX_PIT_MAX_INDEX 8
+#define I40E_PRTQF_FLX_PIT_SOURCE_OFF_SHIFT 0
+#define I40E_PRTQF_FLX_PIT_SOURCE_OFF_MASK I40E_MASK(0x1F, I40E_PRTQF_FLX_PIT_SOURCE_OFF_SHIFT)
+#define I40E_PRTQF_FLX_PIT_FSIZE_SHIFT 5
+#define I40E_PRTQF_FLX_PIT_FSIZE_MASK I40E_MASK(0x1F, I40E_PRTQF_FLX_PIT_FSIZE_SHIFT)
+#define I40E_PRTQF_FLX_PIT_DEST_OFF_SHIFT 10
+#define I40E_PRTQF_FLX_PIT_DEST_OFF_MASK I40E_MASK(0x3F, I40E_PRTQF_FLX_PIT_DEST_OFF_SHIFT)
+#define I40E_VFQF_HENA1(_i, _VF) (0x00230800 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...1, _VF=0...127 */ /* Reset: CORER */
+#define I40E_VFQF_HENA1_MAX_INDEX 1
+#define I40E_VFQF_HENA1_PTYPE_ENA_SHIFT 0
+#define I40E_VFQF_HENA1_PTYPE_ENA_MASK I40E_MASK(0xFFFFFFFF, I40E_VFQF_HENA1_PTYPE_ENA_SHIFT)
+#define I40E_VFQF_HKEY1(_i, _VF) (0x00228000 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...12, _VF=0...127 */ /* Reset: CORER */
+#define I40E_VFQF_HKEY1_MAX_INDEX 12
+#define I40E_VFQF_HKEY1_KEY_0_SHIFT 0
+#define I40E_VFQF_HKEY1_KEY_0_MASK I40E_MASK(0xFF, I40E_VFQF_HKEY1_KEY_0_SHIFT)
+#define I40E_VFQF_HKEY1_KEY_1_SHIFT 8
+#define I40E_VFQF_HKEY1_KEY_1_MASK I40E_MASK(0xFF, I40E_VFQF_HKEY1_KEY_1_SHIFT)
+#define I40E_VFQF_HKEY1_KEY_2_SHIFT 16
+#define I40E_VFQF_HKEY1_KEY_2_MASK I40E_MASK(0xFF, I40E_VFQF_HKEY1_KEY_2_SHIFT)
+#define I40E_VFQF_HKEY1_KEY_3_SHIFT 24
+#define I40E_VFQF_HKEY1_KEY_3_MASK I40E_MASK(0xFF, I40E_VFQF_HKEY1_KEY_3_SHIFT)
+#define I40E_VFQF_HLUT1(_i, _VF) (0x00220000 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...15, _VF=0...127 */ /* Reset: CORER */
+#define I40E_VFQF_HLUT1_MAX_INDEX 15
+#define I40E_VFQF_HLUT1_LUT0_SHIFT 0
+#define I40E_VFQF_HLUT1_LUT0_MASK I40E_MASK(0xF, I40E_VFQF_HLUT1_LUT0_SHIFT)
+#define I40E_VFQF_HLUT1_LUT1_SHIFT 8
+#define I40E_VFQF_HLUT1_LUT1_MASK I40E_MASK(0xF, I40E_VFQF_HLUT1_LUT1_SHIFT)
+#define I40E_VFQF_HLUT1_LUT2_SHIFT 16
+#define I40E_VFQF_HLUT1_LUT2_MASK I40E_MASK(0xF, I40E_VFQF_HLUT1_LUT2_SHIFT)
+#define I40E_VFQF_HLUT1_LUT3_SHIFT 24
+#define I40E_VFQF_HLUT1_LUT3_MASK I40E_MASK(0xF, I40E_VFQF_HLUT1_LUT3_SHIFT)
+#define I40E_VFQF_HREGION1(_i, _VF) (0x0022E000 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...7, _VF=0...127 */ /* Reset: CORER */
+#define I40E_VFQF_HREGION1_MAX_INDEX 7
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_0_SHIFT 0
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_0_MASK I40E_MASK(0x1, I40E_VFQF_HREGION1_OVERRIDE_ENA_0_SHIFT)
+#define I40E_VFQF_HREGION1_REGION_0_SHIFT 1
+#define I40E_VFQF_HREGION1_REGION_0_MASK I40E_MASK(0x7, I40E_VFQF_HREGION1_REGION_0_SHIFT)
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_1_SHIFT 4
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_1_MASK I40E_MASK(0x1, I40E_VFQF_HREGION1_OVERRIDE_ENA_1_SHIFT)
+#define I40E_VFQF_HREGION1_REGION_1_SHIFT 5
+#define I40E_VFQF_HREGION1_REGION_1_MASK I40E_MASK(0x7, I40E_VFQF_HREGION1_REGION_1_SHIFT)
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_2_SHIFT 8
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_2_MASK I40E_MASK(0x1, I40E_VFQF_HREGION1_OVERRIDE_ENA_2_SHIFT)
+#define I40E_VFQF_HREGION1_REGION_2_SHIFT 9
+#define I40E_VFQF_HREGION1_REGION_2_MASK I40E_MASK(0x7, I40E_VFQF_HREGION1_REGION_2_SHIFT)
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_3_SHIFT 12
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_3_MASK I40E_MASK(0x1, I40E_VFQF_HREGION1_OVERRIDE_ENA_3_SHIFT)
+#define I40E_VFQF_HREGION1_REGION_3_SHIFT 13
+#define I40E_VFQF_HREGION1_REGION_3_MASK I40E_MASK(0x7, I40E_VFQF_HREGION1_REGION_3_SHIFT)
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_4_SHIFT 16
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_4_MASK I40E_MASK(0x1, I40E_VFQF_HREGION1_OVERRIDE_ENA_4_SHIFT)
+#define I40E_VFQF_HREGION1_REGION_4_SHIFT 17
+#define I40E_VFQF_HREGION1_REGION_4_MASK I40E_MASK(0x7, I40E_VFQF_HREGION1_REGION_4_SHIFT)
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_5_SHIFT 20
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_5_MASK I40E_MASK(0x1, I40E_VFQF_HREGION1_OVERRIDE_ENA_5_SHIFT)
+#define I40E_VFQF_HREGION1_REGION_5_SHIFT 21
+#define I40E_VFQF_HREGION1_REGION_5_MASK I40E_MASK(0x7, I40E_VFQF_HREGION1_REGION_5_SHIFT)
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_6_SHIFT 24
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_6_MASK I40E_MASK(0x1, I40E_VFQF_HREGION1_OVERRIDE_ENA_6_SHIFT)
+#define I40E_VFQF_HREGION1_REGION_6_SHIFT 25
+#define I40E_VFQF_HREGION1_REGION_6_MASK I40E_MASK(0x7, I40E_VFQF_HREGION1_REGION_6_SHIFT)
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_7_SHIFT 28
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_7_MASK I40E_MASK(0x1, I40E_VFQF_HREGION1_OVERRIDE_ENA_7_SHIFT)
+#define I40E_VFQF_HREGION1_REGION_7_SHIFT 29
+#define I40E_VFQF_HREGION1_REGION_7_MASK I40E_MASK(0x7, I40E_VFQF_HREGION1_REGION_7_SHIFT)
+#define I40E_VPQF_CTL(_VF) (0x001C0000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VPQF_CTL_MAX_INDEX 127
+#define I40E_VPQF_CTL_PEHSIZE_SHIFT 0
+#define I40E_VPQF_CTL_PEHSIZE_MASK I40E_MASK(0x1F, I40E_VPQF_CTL_PEHSIZE_SHIFT)
+#define I40E_VPQF_CTL_PEDSIZE_SHIFT 5
+#define I40E_VPQF_CTL_PEDSIZE_MASK I40E_MASK(0x1F, I40E_VPQF_CTL_PEDSIZE_SHIFT)
+#define I40E_VPQF_CTL_FCHSIZE_SHIFT 10
+#define I40E_VPQF_CTL_FCHSIZE_MASK I40E_MASK(0xF, I40E_VPQF_CTL_FCHSIZE_SHIFT)
+#define I40E_VPQF_CTL_FCDSIZE_SHIFT 14
+#define I40E_VPQF_CTL_FCDSIZE_MASK I40E_MASK(0x3, I40E_VPQF_CTL_FCDSIZE_SHIFT)
+#define I40E_VSIQF_CTL(_VSI) (0x0020D800 + ((_VSI) * 4)) /* _i=0...383 */ /* Reset: PFR */
+#define I40E_VSIQF_CTL_MAX_INDEX 383
+#define I40E_VSIQF_CTL_FCOE_ENA_SHIFT 0
+#define I40E_VSIQF_CTL_FCOE_ENA_MASK I40E_MASK(0x1, I40E_VSIQF_CTL_FCOE_ENA_SHIFT)
+#define I40E_VSIQF_CTL_PETCP_ENA_SHIFT 1
+#define I40E_VSIQF_CTL_PETCP_ENA_MASK I40E_MASK(0x1, I40E_VSIQF_CTL_PETCP_ENA_SHIFT)
+#define I40E_VSIQF_CTL_PEUUDP_ENA_SHIFT 2
+#define I40E_VSIQF_CTL_PEUUDP_ENA_MASK I40E_MASK(0x1, I40E_VSIQF_CTL_PEUUDP_ENA_SHIFT)
+#define I40E_VSIQF_CTL_PEMUDP_ENA_SHIFT 3
+#define I40E_VSIQF_CTL_PEMUDP_ENA_MASK I40E_MASK(0x1, I40E_VSIQF_CTL_PEMUDP_ENA_SHIFT)
+#define I40E_VSIQF_CTL_PEUFRAG_ENA_SHIFT 4
+#define I40E_VSIQF_CTL_PEUFRAG_ENA_MASK I40E_MASK(0x1, I40E_VSIQF_CTL_PEUFRAG_ENA_SHIFT)
+#define I40E_VSIQF_CTL_PEMFRAG_ENA_SHIFT 5
+#define I40E_VSIQF_CTL_PEMFRAG_ENA_MASK I40E_MASK(0x1, I40E_VSIQF_CTL_PEMFRAG_ENA_SHIFT)
+#define I40E_VSIQF_TCREGION(_i, _VSI) (0x00206000 + ((_i) * 2048 + (_VSI) * 4)) /* _i=0...3, _VSI=0...383 */ /* Reset: PFR */
+#define I40E_VSIQF_TCREGION_MAX_INDEX 3
+#define I40E_VSIQF_TCREGION_TC_OFFSET_SHIFT 0
+#define I40E_VSIQF_TCREGION_TC_OFFSET_MASK I40E_MASK(0x1FF, I40E_VSIQF_TCREGION_TC_OFFSET_SHIFT)
+#define I40E_VSIQF_TCREGION_TC_SIZE_SHIFT 9
+#define I40E_VSIQF_TCREGION_TC_SIZE_MASK I40E_MASK(0x7, I40E_VSIQF_TCREGION_TC_SIZE_SHIFT)
+#define I40E_VSIQF_TCREGION_TC_OFFSET2_SHIFT 16
+#define I40E_VSIQF_TCREGION_TC_OFFSET2_MASK I40E_MASK(0x1FF, I40E_VSIQF_TCREGION_TC_OFFSET2_SHIFT)
+#define I40E_VSIQF_TCREGION_TC_SIZE2_SHIFT 25
+#define I40E_VSIQF_TCREGION_TC_SIZE2_MASK I40E_MASK(0x7, I40E_VSIQF_TCREGION_TC_SIZE2_SHIFT)
+#define I40E_GL_FCOECRC(_i) (0x00314d80 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
+#define I40E_GL_FCOECRC_MAX_INDEX 143
+#define I40E_GL_FCOECRC_FCOECRC_SHIFT 0
+#define I40E_GL_FCOECRC_FCOECRC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOECRC_FCOECRC_SHIFT)
+#define I40E_GL_FCOEDDPC(_i) (0x00314480 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
+#define I40E_GL_FCOEDDPC_MAX_INDEX 143
+#define I40E_GL_FCOEDDPC_FCOEDDPC_SHIFT 0
+#define I40E_GL_FCOEDDPC_FCOEDDPC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEDDPC_FCOEDDPC_SHIFT)
+#define I40E_GL_FCOEDIFEC(_i) (0x00318480 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
+#define I40E_GL_FCOEDIFEC_MAX_INDEX 143
+#define I40E_GL_FCOEDIFEC_FCOEDIFRC_SHIFT 0
+#define I40E_GL_FCOEDIFEC_FCOEDIFRC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEDIFEC_FCOEDIFRC_SHIFT)
+#define I40E_GL_FCOEDIFTCL(_i) (0x00354000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
+#define I40E_GL_FCOEDIFTCL_MAX_INDEX 143
+#define I40E_GL_FCOEDIFTCL_FCOEDIFTC_SHIFT 0
+#define I40E_GL_FCOEDIFTCL_FCOEDIFTC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEDIFTCL_FCOEDIFTC_SHIFT)
+#define I40E_GL_FCOEDIXEC(_i) (0x0034c000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
+#define I40E_GL_FCOEDIXEC_MAX_INDEX 143
+#define I40E_GL_FCOEDIXEC_FCOEDIXEC_SHIFT 0
+#define I40E_GL_FCOEDIXEC_FCOEDIXEC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEDIXEC_FCOEDIXEC_SHIFT)
+#define I40E_GL_FCOEDIXVC(_i) (0x00350000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
+#define I40E_GL_FCOEDIXVC_MAX_INDEX 143
+#define I40E_GL_FCOEDIXVC_FCOEDIXVC_SHIFT 0
+#define I40E_GL_FCOEDIXVC_FCOEDIXVC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEDIXVC_FCOEDIXVC_SHIFT)
+#define I40E_GL_FCOEDWRCH(_i) (0x00320004 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
+#define I40E_GL_FCOEDWRCH_MAX_INDEX 143
+#define I40E_GL_FCOEDWRCH_FCOEDWRCH_SHIFT 0
+#define I40E_GL_FCOEDWRCH_FCOEDWRCH_MASK I40E_MASK(0xFFFF, I40E_GL_FCOEDWRCH_FCOEDWRCH_SHIFT)
+#define I40E_GL_FCOEDWRCL(_i) (0x00320000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
+#define I40E_GL_FCOEDWRCL_MAX_INDEX 143
+#define I40E_GL_FCOEDWRCL_FCOEDWRCL_SHIFT 0
+#define I40E_GL_FCOEDWRCL_FCOEDWRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEDWRCL_FCOEDWRCL_SHIFT)
+#define I40E_GL_FCOEDWTCH(_i) (0x00348084 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
+#define I40E_GL_FCOEDWTCH_MAX_INDEX 143
+#define I40E_GL_FCOEDWTCH_FCOEDWTCH_SHIFT 0
+#define I40E_GL_FCOEDWTCH_FCOEDWTCH_MASK I40E_MASK(0xFFFF, I40E_GL_FCOEDWTCH_FCOEDWTCH_SHIFT)
+#define I40E_GL_FCOEDWTCL(_i) (0x00348080 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
+#define I40E_GL_FCOEDWTCL_MAX_INDEX 143
+#define I40E_GL_FCOEDWTCL_FCOEDWTCL_SHIFT 0
+#define I40E_GL_FCOEDWTCL_FCOEDWTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEDWTCL_FCOEDWTCL_SHIFT)
+#define I40E_GL_FCOELAST(_i) (0x00314000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
+#define I40E_GL_FCOELAST_MAX_INDEX 143
+#define I40E_GL_FCOELAST_FCOELAST_SHIFT 0
+#define I40E_GL_FCOELAST_FCOELAST_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOELAST_FCOELAST_SHIFT)
+#define I40E_GL_FCOEPRC(_i) (0x00315200 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
+#define I40E_GL_FCOEPRC_MAX_INDEX 143
+#define I40E_GL_FCOEPRC_FCOEPRC_SHIFT 0
+#define I40E_GL_FCOEPRC_FCOEPRC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEPRC_FCOEPRC_SHIFT)
+#define I40E_GL_FCOEPTC(_i) (0x00344C00 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
+#define I40E_GL_FCOEPTC_MAX_INDEX 143
+#define I40E_GL_FCOEPTC_FCOEPTC_SHIFT 0
+#define I40E_GL_FCOEPTC_FCOEPTC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEPTC_FCOEPTC_SHIFT)
+#define I40E_GL_FCOERPDC(_i) (0x00324000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
+#define I40E_GL_FCOERPDC_MAX_INDEX 143
+#define I40E_GL_FCOERPDC_FCOERPDC_SHIFT 0
+#define I40E_GL_FCOERPDC_FCOERPDC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOERPDC_FCOERPDC_SHIFT)
+#define I40E_GL_RXERR1_L(_i) (0x00318000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
+#define I40E_GL_RXERR1_L_MAX_INDEX 143
+#define I40E_GL_RXERR1_L_FCOEDIFRC_SHIFT 0
+#define I40E_GL_RXERR1_L_FCOEDIFRC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_RXERR1_L_FCOEDIFRC_SHIFT)
+#define I40E_GL_RXERR2_L(_i) (0x0031c000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
+#define I40E_GL_RXERR2_L_MAX_INDEX 143
+#define I40E_GL_RXERR2_L_FCOEDIXAC_SHIFT 0
+#define I40E_GL_RXERR2_L_FCOEDIXAC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_RXERR2_L_FCOEDIXAC_SHIFT)
+#define I40E_GLPRT_BPRCH(_i) (0x003005E4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_BPRCH_MAX_INDEX 3
+#define I40E_GLPRT_BPRCH_UPRCH_SHIFT 0
+#define I40E_GLPRT_BPRCH_UPRCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_BPRCH_UPRCH_SHIFT)
+#define I40E_GLPRT_BPRCL(_i) (0x003005E0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_BPRCL_MAX_INDEX 3
+#define I40E_GLPRT_BPRCL_UPRCH_SHIFT 0
+#define I40E_GLPRT_BPRCL_UPRCH_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_BPRCL_UPRCH_SHIFT)
+#define I40E_GLPRT_BPTCH(_i) (0x00300A04 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_BPTCH_MAX_INDEX 3
+#define I40E_GLPRT_BPTCH_UPRCH_SHIFT 0
+#define I40E_GLPRT_BPTCH_UPRCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_BPTCH_UPRCH_SHIFT)
+#define I40E_GLPRT_BPTCL(_i) (0x00300A00 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_BPTCL_MAX_INDEX 3
+#define I40E_GLPRT_BPTCL_UPRCH_SHIFT 0
+#define I40E_GLPRT_BPTCL_UPRCH_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_BPTCL_UPRCH_SHIFT)
+#define I40E_GLPRT_CRCERRS(_i) (0x00300080 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_CRCERRS_MAX_INDEX 3
+#define I40E_GLPRT_CRCERRS_CRCERRS_SHIFT 0
+#define I40E_GLPRT_CRCERRS_CRCERRS_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_CRCERRS_CRCERRS_SHIFT)
+#define I40E_GLPRT_GORCH(_i) (0x00300004 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_GORCH_MAX_INDEX 3
+#define I40E_GLPRT_GORCH_GORCH_SHIFT 0
+#define I40E_GLPRT_GORCH_GORCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_GORCH_GORCH_SHIFT)
+#define I40E_GLPRT_GORCL(_i) (0x00300000 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_GORCL_MAX_INDEX 3
+#define I40E_GLPRT_GORCL_GORCL_SHIFT 0
+#define I40E_GLPRT_GORCL_GORCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_GORCL_GORCL_SHIFT)
+#define I40E_GLPRT_GOTCH(_i) (0x00300684 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_GOTCH_MAX_INDEX 3
+#define I40E_GLPRT_GOTCH_GOTCH_SHIFT 0
+#define I40E_GLPRT_GOTCH_GOTCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_GOTCH_GOTCH_SHIFT)
+#define I40E_GLPRT_GOTCL(_i) (0x00300680 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_GOTCL_MAX_INDEX 3
+#define I40E_GLPRT_GOTCL_GOTCL_SHIFT 0
+#define I40E_GLPRT_GOTCL_GOTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_GOTCL_GOTCL_SHIFT)
+#define I40E_GLPRT_ILLERRC(_i) (0x003000E0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_ILLERRC_MAX_INDEX 3
+#define I40E_GLPRT_ILLERRC_ILLERRC_SHIFT 0
+#define I40E_GLPRT_ILLERRC_ILLERRC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_ILLERRC_ILLERRC_SHIFT)
+#define I40E_GLPRT_LDPC(_i) (0x00300620 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_LDPC_MAX_INDEX 3
+#define I40E_GLPRT_LDPC_LDPC_SHIFT 0
+#define I40E_GLPRT_LDPC_LDPC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_LDPC_LDPC_SHIFT)
+#define I40E_GLPRT_LXOFFRXC(_i) (0x00300160 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_LXOFFRXC_MAX_INDEX 3
+#define I40E_GLPRT_LXOFFRXC_LXOFFRXCNT_SHIFT 0
+#define I40E_GLPRT_LXOFFRXC_LXOFFRXCNT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_LXOFFRXC_LXOFFRXCNT_SHIFT)
+#define I40E_GLPRT_LXOFFTXC(_i) (0x003009A0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_LXOFFTXC_MAX_INDEX 3
+#define I40E_GLPRT_LXOFFTXC_LXOFFTXC_SHIFT 0
+#define I40E_GLPRT_LXOFFTXC_LXOFFTXC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_LXOFFTXC_LXOFFTXC_SHIFT)
+#define I40E_GLPRT_LXONRXC(_i) (0x00300140 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_LXONRXC_MAX_INDEX 3
+#define I40E_GLPRT_LXONRXC_LXONRXCNT_SHIFT 0
+#define I40E_GLPRT_LXONRXC_LXONRXCNT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_LXONRXC_LXONRXCNT_SHIFT)
+#define I40E_GLPRT_LXONTXC(_i) (0x00300980 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_LXONTXC_MAX_INDEX 3
+#define I40E_GLPRT_LXONTXC_LXONTXC_SHIFT 0
+#define I40E_GLPRT_LXONTXC_LXONTXC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_LXONTXC_LXONTXC_SHIFT)
+#define I40E_GLPRT_MLFC(_i) (0x00300020 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_MLFC_MAX_INDEX 3
+#define I40E_GLPRT_MLFC_MLFC_SHIFT 0
+#define I40E_GLPRT_MLFC_MLFC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_MLFC_MLFC_SHIFT)
+#define I40E_GLPRT_MPRCH(_i) (0x003005C4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_MPRCH_MAX_INDEX 3
+#define I40E_GLPRT_MPRCH_MPRCH_SHIFT 0
+#define I40E_GLPRT_MPRCH_MPRCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_MPRCH_MPRCH_SHIFT)
+#define I40E_GLPRT_MPRCL(_i) (0x003005C0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_MPRCL_MAX_INDEX 3
+#define I40E_GLPRT_MPRCL_MPRCL_SHIFT 0
+#define I40E_GLPRT_MPRCL_MPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_MPRCL_MPRCL_SHIFT)
+#define I40E_GLPRT_MPTCH(_i) (0x003009E4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_MPTCH_MAX_INDEX 3
+#define I40E_GLPRT_MPTCH_MPTCH_SHIFT 0
+#define I40E_GLPRT_MPTCH_MPTCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_MPTCH_MPTCH_SHIFT)
+#define I40E_GLPRT_MPTCL(_i) (0x003009E0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_MPTCL_MAX_INDEX 3
+#define I40E_GLPRT_MPTCL_MPTCL_SHIFT 0
+#define I40E_GLPRT_MPTCL_MPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_MPTCL_MPTCL_SHIFT)
+#define I40E_GLPRT_MRFC(_i) (0x00300040 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_MRFC_MAX_INDEX 3
+#define I40E_GLPRT_MRFC_MRFC_SHIFT 0
+#define I40E_GLPRT_MRFC_MRFC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_MRFC_MRFC_SHIFT)
+#define I40E_GLPRT_PRC1023H(_i) (0x00300504 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_PRC1023H_MAX_INDEX 3
+#define I40E_GLPRT_PRC1023H_PRC1023H_SHIFT 0
+#define I40E_GLPRT_PRC1023H_PRC1023H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PRC1023H_PRC1023H_SHIFT)
+#define I40E_GLPRT_PRC1023L(_i) (0x00300500 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_PRC1023L_MAX_INDEX 3
+#define I40E_GLPRT_PRC1023L_PRC1023L_SHIFT 0
+#define I40E_GLPRT_PRC1023L_PRC1023L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PRC1023L_PRC1023L_SHIFT)
+#define I40E_GLPRT_PRC127H(_i) (0x003004A4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_PRC127H_MAX_INDEX 3
+#define I40E_GLPRT_PRC127H_PRC127H_SHIFT 0
+#define I40E_GLPRT_PRC127H_PRC127H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PRC127H_PRC127H_SHIFT)
+#define I40E_GLPRT_PRC127L(_i) (0x003004A0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_PRC127L_MAX_INDEX 3
+#define I40E_GLPRT_PRC127L_PRC127L_SHIFT 0
+#define I40E_GLPRT_PRC127L_PRC127L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PRC127L_PRC127L_SHIFT)
+#define I40E_GLPRT_PRC1522H(_i) (0x00300524 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_PRC1522H_MAX_INDEX 3
+#define I40E_GLPRT_PRC1522H_PRC1522H_SHIFT 0
+#define I40E_GLPRT_PRC1522H_PRC1522H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PRC1522H_PRC1522H_SHIFT)
+#define I40E_GLPRT_PRC1522L(_i) (0x00300520 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_PRC1522L_MAX_INDEX 3
+#define I40E_GLPRT_PRC1522L_PRC1522L_SHIFT 0
+#define I40E_GLPRT_PRC1522L_PRC1522L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PRC1522L_PRC1522L_SHIFT)
+#define I40E_GLPRT_PRC255H(_i) (0x003004C4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_PRC255H_MAX_INDEX 3
+#define I40E_GLPRT_PRC255H_PRTPRC255H_SHIFT 0
+#define I40E_GLPRT_PRC255H_PRTPRC255H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PRC255H_PRTPRC255H_SHIFT)
+#define I40E_GLPRT_PRC255L(_i) (0x003004C0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_PRC255L_MAX_INDEX 3
+#define I40E_GLPRT_PRC255L_PRC255L_SHIFT 0
+#define I40E_GLPRT_PRC255L_PRC255L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PRC255L_PRC255L_SHIFT)
+#define I40E_GLPRT_PRC511H(_i) (0x003004E4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_PRC511H_MAX_INDEX 3
+#define I40E_GLPRT_PRC511H_PRC511H_SHIFT 0
+#define I40E_GLPRT_PRC511H_PRC511H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PRC511H_PRC511H_SHIFT)
+#define I40E_GLPRT_PRC511L(_i) (0x003004E0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_PRC511L_MAX_INDEX 3
+#define I40E_GLPRT_PRC511L_PRC511L_SHIFT 0
+#define I40E_GLPRT_PRC511L_PRC511L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PRC511L_PRC511L_SHIFT)
+#define I40E_GLPRT_PRC64H(_i) (0x00300484 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_PRC64H_MAX_INDEX 3
+#define I40E_GLPRT_PRC64H_PRC64H_SHIFT 0
+#define I40E_GLPRT_PRC64H_PRC64H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PRC64H_PRC64H_SHIFT)
+#define I40E_GLPRT_PRC64L(_i) (0x00300480 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_PRC64L_MAX_INDEX 3
+#define I40E_GLPRT_PRC64L_PRC64L_SHIFT 0
+#define I40E_GLPRT_PRC64L_PRC64L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PRC64L_PRC64L_SHIFT)
+#define I40E_GLPRT_PRC9522H(_i) (0x00300544 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_PRC9522H_MAX_INDEX 3
+#define I40E_GLPRT_PRC9522H_PRC1522H_SHIFT 0
+#define I40E_GLPRT_PRC9522H_PRC1522H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PRC9522H_PRC1522H_SHIFT)
+#define I40E_GLPRT_PRC9522L(_i) (0x00300540 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_PRC9522L_MAX_INDEX 3
+#define I40E_GLPRT_PRC9522L_PRC1522L_SHIFT 0
+#define I40E_GLPRT_PRC9522L_PRC1522L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PRC9522L_PRC1522L_SHIFT)
+#define I40E_GLPRT_PTC1023H(_i) (0x00300724 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_PTC1023H_MAX_INDEX 3
+#define I40E_GLPRT_PTC1023H_PTC1023H_SHIFT 0
+#define I40E_GLPRT_PTC1023H_PTC1023H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PTC1023H_PTC1023H_SHIFT)
+#define I40E_GLPRT_PTC1023L(_i) (0x00300720 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_PTC1023L_MAX_INDEX 3
+#define I40E_GLPRT_PTC1023L_PTC1023L_SHIFT 0
+#define I40E_GLPRT_PTC1023L_PTC1023L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PTC1023L_PTC1023L_SHIFT)
+#define I40E_GLPRT_PTC127H(_i) (0x003006C4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_PTC127H_MAX_INDEX 3
+#define I40E_GLPRT_PTC127H_PTC127H_SHIFT 0
+#define I40E_GLPRT_PTC127H_PTC127H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PTC127H_PTC127H_SHIFT)
+#define I40E_GLPRT_PTC127L(_i) (0x003006C0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_PTC127L_MAX_INDEX 3
+#define I40E_GLPRT_PTC127L_PTC127L_SHIFT 0
+#define I40E_GLPRT_PTC127L_PTC127L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PTC127L_PTC127L_SHIFT)
+#define I40E_GLPRT_PTC1522H(_i) (0x00300744 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_PTC1522H_MAX_INDEX 3
+#define I40E_GLPRT_PTC1522H_PTC1522H_SHIFT 0
+#define I40E_GLPRT_PTC1522H_PTC1522H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PTC1522H_PTC1522H_SHIFT)
+#define I40E_GLPRT_PTC1522L(_i) (0x00300740 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_PTC1522L_MAX_INDEX 3
+#define I40E_GLPRT_PTC1522L_PTC1522L_SHIFT 0
+#define I40E_GLPRT_PTC1522L_PTC1522L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PTC1522L_PTC1522L_SHIFT)
+#define I40E_GLPRT_PTC255H(_i) (0x003006E4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_PTC255H_MAX_INDEX 3
+#define I40E_GLPRT_PTC255H_PTC255H_SHIFT 0
+#define I40E_GLPRT_PTC255H_PTC255H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PTC255H_PTC255H_SHIFT)
+#define I40E_GLPRT_PTC255L(_i) (0x003006E0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_PTC255L_MAX_INDEX 3
+#define I40E_GLPRT_PTC255L_PTC255L_SHIFT 0
+#define I40E_GLPRT_PTC255L_PTC255L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PTC255L_PTC255L_SHIFT)
+#define I40E_GLPRT_PTC511H(_i) (0x00300704 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_PTC511H_MAX_INDEX 3
+#define I40E_GLPRT_PTC511H_PTC511H_SHIFT 0
+#define I40E_GLPRT_PTC511H_PTC511H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PTC511H_PTC511H_SHIFT)
+#define I40E_GLPRT_PTC511L(_i) (0x00300700 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_PTC511L_MAX_INDEX 3
+#define I40E_GLPRT_PTC511L_PTC511L_SHIFT 0
+#define I40E_GLPRT_PTC511L_PTC511L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PTC511L_PTC511L_SHIFT)
+#define I40E_GLPRT_PTC64H(_i) (0x003006A4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_PTC64H_MAX_INDEX 3
+#define I40E_GLPRT_PTC64H_PTC64H_SHIFT 0
+#define I40E_GLPRT_PTC64H_PTC64H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PTC64H_PTC64H_SHIFT)
+#define I40E_GLPRT_PTC64L(_i) (0x003006A0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_PTC64L_MAX_INDEX 3
+#define I40E_GLPRT_PTC64L_PTC64L_SHIFT 0
+#define I40E_GLPRT_PTC64L_PTC64L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PTC64L_PTC64L_SHIFT)
+#define I40E_GLPRT_PTC9522H(_i) (0x00300764 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_PTC9522H_MAX_INDEX 3
+#define I40E_GLPRT_PTC9522H_PTC9522H_SHIFT 0
+#define I40E_GLPRT_PTC9522H_PTC9522H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PTC9522H_PTC9522H_SHIFT)
+#define I40E_GLPRT_PTC9522L(_i) (0x00300760 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_PTC9522L_MAX_INDEX 3
+#define I40E_GLPRT_PTC9522L_PTC9522L_SHIFT 0
+#define I40E_GLPRT_PTC9522L_PTC9522L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PTC9522L_PTC9522L_SHIFT)
+#define I40E_GLPRT_PXOFFRXC(_i, _j) (0x00300280 + ((_i) * 8 + (_j) * 32)) /* _i=0...3, _j=0...7 */ /* Reset: CORER */
+#define I40E_GLPRT_PXOFFRXC_MAX_INDEX 3
+#define I40E_GLPRT_PXOFFRXC_PRPXOFFRXCNT_SHIFT 0
+#define I40E_GLPRT_PXOFFRXC_PRPXOFFRXCNT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PXOFFRXC_PRPXOFFRXCNT_SHIFT)
+#define I40E_GLPRT_PXOFFTXC(_i, _j) (0x00300880 + ((_i) * 8 + (_j) * 32)) /* _i=0...3, _j=0...7 */ /* Reset: CORER */
+#define I40E_GLPRT_PXOFFTXC_MAX_INDEX 3
+#define I40E_GLPRT_PXOFFTXC_PRPXOFFTXCNT_SHIFT 0
+#define I40E_GLPRT_PXOFFTXC_PRPXOFFTXCNT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PXOFFTXC_PRPXOFFTXCNT_SHIFT)
+#define I40E_GLPRT_PXONRXC(_i, _j) (0x00300180 + ((_i) * 8 + (_j) * 32)) /* _i=0...3, _j=0...7 */ /* Reset: CORER */
+#define I40E_GLPRT_PXONRXC_MAX_INDEX 3
+#define I40E_GLPRT_PXONRXC_PRPXONRXCNT_SHIFT 0
+#define I40E_GLPRT_PXONRXC_PRPXONRXCNT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PXONRXC_PRPXONRXCNT_SHIFT)
+#define I40E_GLPRT_PXONTXC(_i, _j) (0x00300780 + ((_i) * 8 + (_j) * 32)) /* _i=0...3, _j=0...7 */ /* Reset: CORER */
+#define I40E_GLPRT_PXONTXC_MAX_INDEX 3
+#define I40E_GLPRT_PXONTXC_PRPXONTXC_SHIFT 0
+#define I40E_GLPRT_PXONTXC_PRPXONTXC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PXONTXC_PRPXONTXC_SHIFT)
+#define I40E_GLPRT_RDPC(_i) (0x00300600 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_RDPC_MAX_INDEX 3
+#define I40E_GLPRT_RDPC_RDPC_SHIFT 0
+#define I40E_GLPRT_RDPC_RDPC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_RDPC_RDPC_SHIFT)
+#define I40E_GLPRT_RFC(_i) (0x00300560 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_RFC_MAX_INDEX 3
+#define I40E_GLPRT_RFC_RFC_SHIFT 0
+#define I40E_GLPRT_RFC_RFC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_RFC_RFC_SHIFT)
+#define I40E_GLPRT_RJC(_i) (0x00300580 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_RJC_MAX_INDEX 3
+#define I40E_GLPRT_RJC_RJC_SHIFT 0
+#define I40E_GLPRT_RJC_RJC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_RJC_RJC_SHIFT)
+#define I40E_GLPRT_RLEC(_i) (0x003000A0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_RLEC_MAX_INDEX 3
+#define I40E_GLPRT_RLEC_RLEC_SHIFT 0
+#define I40E_GLPRT_RLEC_RLEC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_RLEC_RLEC_SHIFT)
+#define I40E_GLPRT_ROC(_i) (0x00300120 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_ROC_MAX_INDEX 3
+#define I40E_GLPRT_ROC_ROC_SHIFT 0
+#define I40E_GLPRT_ROC_ROC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_ROC_ROC_SHIFT)
+#define I40E_GLPRT_RUC(_i) (0x00300100 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_RUC_MAX_INDEX 3
+#define I40E_GLPRT_RUC_RUC_SHIFT 0
+#define I40E_GLPRT_RUC_RUC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_RUC_RUC_SHIFT)
+#define I40E_GLPRT_RUPP(_i) (0x00300660 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_RUPP_MAX_INDEX 3
+#define I40E_GLPRT_RUPP_RUPP_SHIFT 0
+#define I40E_GLPRT_RUPP_RUPP_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_RUPP_RUPP_SHIFT)
+#define I40E_GLPRT_RXON2OFFCNT(_i, _j) (0x00300380 + ((_i) * 8 + (_j) * 32)) /* _i=0...3, _j=0...7 */ /* Reset: CORER */
+#define I40E_GLPRT_RXON2OFFCNT_MAX_INDEX 3
+#define I40E_GLPRT_RXON2OFFCNT_PRRXON2OFFCNT_SHIFT 0
+#define I40E_GLPRT_RXON2OFFCNT_PRRXON2OFFCNT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_RXON2OFFCNT_PRRXON2OFFCNT_SHIFT)
+#define I40E_GLPRT_TDOLD(_i) (0x00300A20 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_TDOLD_MAX_INDEX 3
+#define I40E_GLPRT_TDOLD_GLPRT_TDOLD_SHIFT 0
+#define I40E_GLPRT_TDOLD_GLPRT_TDOLD_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_TDOLD_GLPRT_TDOLD_SHIFT)
+#define I40E_GLPRT_TDPC(_i) (0x00375400 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_TDPC_MAX_INDEX 3
+#define I40E_GLPRT_TDPC_TDPC_SHIFT 0
+#define I40E_GLPRT_TDPC_TDPC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_TDPC_TDPC_SHIFT)
+#define I40E_GLPRT_UPRCH(_i) (0x003005A4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_UPRCH_MAX_INDEX 3
+#define I40E_GLPRT_UPRCH_UPRCH_SHIFT 0
+#define I40E_GLPRT_UPRCH_UPRCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_UPRCH_UPRCH_SHIFT)
+#define I40E_GLPRT_UPRCL(_i) (0x003005A0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_UPRCL_MAX_INDEX 3
+#define I40E_GLPRT_UPRCL_UPRCL_SHIFT 0
+#define I40E_GLPRT_UPRCL_UPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_UPRCL_UPRCL_SHIFT)
+#define I40E_GLPRT_UPTCH(_i) (0x003009C4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_UPTCH_MAX_INDEX 3
+#define I40E_GLPRT_UPTCH_UPTCH_SHIFT 0
+#define I40E_GLPRT_UPTCH_UPTCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_UPTCH_UPTCH_SHIFT)
+#define I40E_GLPRT_UPTCL(_i) (0x003009C0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_UPTCL_MAX_INDEX 3
+#define I40E_GLPRT_UPTCL_VUPTCH_SHIFT 0
+#define I40E_GLPRT_UPTCL_VUPTCH_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_UPTCL_VUPTCH_SHIFT)
+#define I40E_GLSW_BPRCH(_i) (0x00370104 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLSW_BPRCH_MAX_INDEX 15
+#define I40E_GLSW_BPRCH_BPRCH_SHIFT 0
+#define I40E_GLSW_BPRCH_BPRCH_MASK I40E_MASK(0xFFFF, I40E_GLSW_BPRCH_BPRCH_SHIFT)
+#define I40E_GLSW_BPRCL(_i) (0x00370100 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLSW_BPRCL_MAX_INDEX 15
+#define I40E_GLSW_BPRCL_BPRCL_SHIFT 0
+#define I40E_GLSW_BPRCL_BPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_BPRCL_BPRCL_SHIFT)
+#define I40E_GLSW_BPTCH(_i) (0x00340104 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLSW_BPTCH_MAX_INDEX 15
+#define I40E_GLSW_BPTCH_BPTCH_SHIFT 0
+#define I40E_GLSW_BPTCH_BPTCH_MASK I40E_MASK(0xFFFF, I40E_GLSW_BPTCH_BPTCH_SHIFT)
+#define I40E_GLSW_BPTCL(_i) (0x00340100 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLSW_BPTCL_MAX_INDEX 15
+#define I40E_GLSW_BPTCL_BPTCL_SHIFT 0
+#define I40E_GLSW_BPTCL_BPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_BPTCL_BPTCL_SHIFT)
+#define I40E_GLSW_GORCH(_i) (0x0035C004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLSW_GORCH_MAX_INDEX 15
+#define I40E_GLSW_GORCH_GORCH_SHIFT 0
+#define I40E_GLSW_GORCH_GORCH_MASK I40E_MASK(0xFFFF, I40E_GLSW_GORCH_GORCH_SHIFT)
+#define I40E_GLSW_GORCL(_i) (0x0035c000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLSW_GORCL_MAX_INDEX 15
+#define I40E_GLSW_GORCL_GORCL_SHIFT 0
+#define I40E_GLSW_GORCL_GORCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_GORCL_GORCL_SHIFT)
+#define I40E_GLSW_GOTCH(_i) (0x0032C004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLSW_GOTCH_MAX_INDEX 15
+#define I40E_GLSW_GOTCH_GOTCH_SHIFT 0
+#define I40E_GLSW_GOTCH_GOTCH_MASK I40E_MASK(0xFFFF, I40E_GLSW_GOTCH_GOTCH_SHIFT)
+#define I40E_GLSW_GOTCL(_i) (0x0032c000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLSW_GOTCL_MAX_INDEX 15
+#define I40E_GLSW_GOTCL_GOTCL_SHIFT 0
+#define I40E_GLSW_GOTCL_GOTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_GOTCL_GOTCL_SHIFT)
+#define I40E_GLSW_MPRCH(_i) (0x00370084 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLSW_MPRCH_MAX_INDEX 15
+#define I40E_GLSW_MPRCH_MPRCH_SHIFT 0
+#define I40E_GLSW_MPRCH_MPRCH_MASK I40E_MASK(0xFFFF, I40E_GLSW_MPRCH_MPRCH_SHIFT)
+#define I40E_GLSW_MPRCL(_i) (0x00370080 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLSW_MPRCL_MAX_INDEX 15
+#define I40E_GLSW_MPRCL_MPRCL_SHIFT 0
+#define I40E_GLSW_MPRCL_MPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_MPRCL_MPRCL_SHIFT)
+#define I40E_GLSW_MPTCH(_i) (0x00340084 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLSW_MPTCH_MAX_INDEX 15
+#define I40E_GLSW_MPTCH_MPTCH_SHIFT 0
+#define I40E_GLSW_MPTCH_MPTCH_MASK I40E_MASK(0xFFFF, I40E_GLSW_MPTCH_MPTCH_SHIFT)
+#define I40E_GLSW_MPTCL(_i) (0x00340080 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLSW_MPTCL_MAX_INDEX 15
+#define I40E_GLSW_MPTCL_MPTCL_SHIFT 0
+#define I40E_GLSW_MPTCL_MPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_MPTCL_MPTCL_SHIFT)
+#define I40E_GLSW_RUPP(_i) (0x00370180 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLSW_RUPP_MAX_INDEX 15
+#define I40E_GLSW_RUPP_RUPP_SHIFT 0
+#define I40E_GLSW_RUPP_RUPP_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_RUPP_RUPP_SHIFT)
+#define I40E_GLSW_TDPC(_i) (0x00348000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLSW_TDPC_MAX_INDEX 15
+#define I40E_GLSW_TDPC_TDPC_SHIFT 0
+#define I40E_GLSW_TDPC_TDPC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_TDPC_TDPC_SHIFT)
+#define I40E_GLSW_UPRCH(_i) (0x00370004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLSW_UPRCH_MAX_INDEX 15
+#define I40E_GLSW_UPRCH_UPRCH_SHIFT 0
+#define I40E_GLSW_UPRCH_UPRCH_MASK I40E_MASK(0xFFFF, I40E_GLSW_UPRCH_UPRCH_SHIFT)
+#define I40E_GLSW_UPRCL(_i) (0x00370000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLSW_UPRCL_MAX_INDEX 15
+#define I40E_GLSW_UPRCL_UPRCL_SHIFT 0
+#define I40E_GLSW_UPRCL_UPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_UPRCL_UPRCL_SHIFT)
+#define I40E_GLSW_UPTCH(_i) (0x00340004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLSW_UPTCH_MAX_INDEX 15
+#define I40E_GLSW_UPTCH_UPTCH_SHIFT 0
+#define I40E_GLSW_UPTCH_UPTCH_MASK I40E_MASK(0xFFFF, I40E_GLSW_UPTCH_UPTCH_SHIFT)
+#define I40E_GLSW_UPTCL(_i) (0x00340000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLSW_UPTCL_MAX_INDEX 15
+#define I40E_GLSW_UPTCL_UPTCL_SHIFT 0
+#define I40E_GLSW_UPTCL_UPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_UPTCL_UPTCL_SHIFT)
+#define I40E_GLV_BPRCH(_i) (0x0036D804 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
+#define I40E_GLV_BPRCH_MAX_INDEX 383
+#define I40E_GLV_BPRCH_BPRCH_SHIFT 0
+#define I40E_GLV_BPRCH_BPRCH_MASK I40E_MASK(0xFFFF, I40E_GLV_BPRCH_BPRCH_SHIFT)
+#define I40E_GLV_BPRCL(_i) (0x0036d800 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
+#define I40E_GLV_BPRCL_MAX_INDEX 383
+#define I40E_GLV_BPRCL_BPRCL_SHIFT 0
+#define I40E_GLV_BPRCL_BPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_BPRCL_BPRCL_SHIFT)
+#define I40E_GLV_BPTCH(_i) (0x0033D804 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
+#define I40E_GLV_BPTCH_MAX_INDEX 383
+#define I40E_GLV_BPTCH_BPTCH_SHIFT 0
+#define I40E_GLV_BPTCH_BPTCH_MASK I40E_MASK(0xFFFF, I40E_GLV_BPTCH_BPTCH_SHIFT)
+#define I40E_GLV_BPTCL(_i) (0x0033d800 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
+#define I40E_GLV_BPTCL_MAX_INDEX 383
+#define I40E_GLV_BPTCL_BPTCL_SHIFT 0
+#define I40E_GLV_BPTCL_BPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_BPTCL_BPTCL_SHIFT)
+#define I40E_GLV_GORCH(_i) (0x00358004 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
+#define I40E_GLV_GORCH_MAX_INDEX 383
+#define I40E_GLV_GORCH_GORCH_SHIFT 0
+#define I40E_GLV_GORCH_GORCH_MASK I40E_MASK(0xFFFF, I40E_GLV_GORCH_GORCH_SHIFT)
+#define I40E_GLV_GORCL(_i) (0x00358000 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
+#define I40E_GLV_GORCL_MAX_INDEX 383
+#define I40E_GLV_GORCL_GORCL_SHIFT 0
+#define I40E_GLV_GORCL_GORCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_GORCL_GORCL_SHIFT)
+#define I40E_GLV_GOTCH(_i) (0x00328004 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
+#define I40E_GLV_GOTCH_MAX_INDEX 383
+#define I40E_GLV_GOTCH_GOTCH_SHIFT 0
+#define I40E_GLV_GOTCH_GOTCH_MASK I40E_MASK(0xFFFF, I40E_GLV_GOTCH_GOTCH_SHIFT)
+#define I40E_GLV_GOTCL(_i) (0x00328000 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
+#define I40E_GLV_GOTCL_MAX_INDEX 383
+#define I40E_GLV_GOTCL_GOTCL_SHIFT 0
+#define I40E_GLV_GOTCL_GOTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_GOTCL_GOTCL_SHIFT)
+#define I40E_GLV_MPRCH(_i) (0x0036CC04 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
+#define I40E_GLV_MPRCH_MAX_INDEX 383
+#define I40E_GLV_MPRCH_MPRCH_SHIFT 0
+#define I40E_GLV_MPRCH_MPRCH_MASK I40E_MASK(0xFFFF, I40E_GLV_MPRCH_MPRCH_SHIFT)
+#define I40E_GLV_MPRCL(_i) (0x0036cc00 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
+#define I40E_GLV_MPRCL_MAX_INDEX 383
+#define I40E_GLV_MPRCL_MPRCL_SHIFT 0
+#define I40E_GLV_MPRCL_MPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_MPRCL_MPRCL_SHIFT)
+#define I40E_GLV_MPTCH(_i) (0x0033CC04 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
+#define I40E_GLV_MPTCH_MAX_INDEX 383
+#define I40E_GLV_MPTCH_MPTCH_SHIFT 0
+#define I40E_GLV_MPTCH_MPTCH_MASK I40E_MASK(0xFFFF, I40E_GLV_MPTCH_MPTCH_SHIFT)
+#define I40E_GLV_MPTCL(_i) (0x0033cc00 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
+#define I40E_GLV_MPTCL_MAX_INDEX 383
+#define I40E_GLV_MPTCL_MPTCL_SHIFT 0
+#define I40E_GLV_MPTCL_MPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_MPTCL_MPTCL_SHIFT)
+#define I40E_GLV_RDPC(_i) (0x00310000 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
+#define I40E_GLV_RDPC_MAX_INDEX 383
+#define I40E_GLV_RDPC_RDPC_SHIFT 0
+#define I40E_GLV_RDPC_RDPC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_RDPC_RDPC_SHIFT)
+#define I40E_GLV_RUPP(_i) (0x0036E400 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
+#define I40E_GLV_RUPP_MAX_INDEX 383
+#define I40E_GLV_RUPP_RUPP_SHIFT 0
+#define I40E_GLV_RUPP_RUPP_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_RUPP_RUPP_SHIFT)
+#define I40E_GLV_TEPC(_VSI) (0x00344000 + ((_VSI) * 4)) /* _i=0...383 */ /* Reset: CORER */
+#define I40E_GLV_TEPC_MAX_INDEX 383
+#define I40E_GLV_TEPC_TEPC_SHIFT 0
+#define I40E_GLV_TEPC_TEPC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_TEPC_TEPC_SHIFT)
+#define I40E_GLV_UPRCH(_i) (0x0036C004 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
+#define I40E_GLV_UPRCH_MAX_INDEX 383
+#define I40E_GLV_UPRCH_UPRCH_SHIFT 0
+#define I40E_GLV_UPRCH_UPRCH_MASK I40E_MASK(0xFFFF, I40E_GLV_UPRCH_UPRCH_SHIFT)
+#define I40E_GLV_UPRCL(_i) (0x0036c000 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
+#define I40E_GLV_UPRCL_MAX_INDEX 383
+#define I40E_GLV_UPRCL_UPRCL_SHIFT 0
+#define I40E_GLV_UPRCL_UPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_UPRCL_UPRCL_SHIFT)
+#define I40E_GLV_UPTCH(_i) (0x0033C004 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
+#define I40E_GLV_UPTCH_MAX_INDEX 383
+#define I40E_GLV_UPTCH_GLVUPTCH_SHIFT 0
+#define I40E_GLV_UPTCH_GLVUPTCH_MASK I40E_MASK(0xFFFF, I40E_GLV_UPTCH_GLVUPTCH_SHIFT)
+#define I40E_GLV_UPTCL(_i) (0x0033c000 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
+#define I40E_GLV_UPTCL_MAX_INDEX 383
+#define I40E_GLV_UPTCL_UPTCL_SHIFT 0
+#define I40E_GLV_UPTCL_UPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_UPTCL_UPTCL_SHIFT)
+#define I40E_GLVEBTC_RBCH(_i, _j) (0x00364004 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */
+#define I40E_GLVEBTC_RBCH_MAX_INDEX 7
+#define I40E_GLVEBTC_RBCH_TCBCH_SHIFT 0
+#define I40E_GLVEBTC_RBCH_TCBCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBTC_RBCH_TCBCH_SHIFT)
+#define I40E_GLVEBTC_RBCL(_i, _j) (0x00364000 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */
+#define I40E_GLVEBTC_RBCL_MAX_INDEX 7
+#define I40E_GLVEBTC_RBCL_TCBCL_SHIFT 0
+#define I40E_GLVEBTC_RBCL_TCBCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBTC_RBCL_TCBCL_SHIFT)
+#define I40E_GLVEBTC_RPCH(_i, _j) (0x00368004 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */
+#define I40E_GLVEBTC_RPCH_MAX_INDEX 7
+#define I40E_GLVEBTC_RPCH_TCPCH_SHIFT 0
+#define I40E_GLVEBTC_RPCH_TCPCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBTC_RPCH_TCPCH_SHIFT)
+#define I40E_GLVEBTC_RPCL(_i, _j) (0x00368000 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */
+#define I40E_GLVEBTC_RPCL_MAX_INDEX 7
+#define I40E_GLVEBTC_RPCL_TCPCL_SHIFT 0
+#define I40E_GLVEBTC_RPCL_TCPCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBTC_RPCL_TCPCL_SHIFT)
+#define I40E_GLVEBTC_TBCH(_i, _j) (0x00334004 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */
+#define I40E_GLVEBTC_TBCH_MAX_INDEX 7
+#define I40E_GLVEBTC_TBCH_TCBCH_SHIFT 0
+#define I40E_GLVEBTC_TBCH_TCBCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBTC_TBCH_TCBCH_SHIFT)
+#define I40E_GLVEBTC_TBCL(_i, _j) (0x00334000 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */
+#define I40E_GLVEBTC_TBCL_MAX_INDEX 7
+#define I40E_GLVEBTC_TBCL_TCBCL_SHIFT 0
+#define I40E_GLVEBTC_TBCL_TCBCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBTC_TBCL_TCBCL_SHIFT)
+#define I40E_GLVEBTC_TPCH(_i, _j) (0x00338004 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */
+#define I40E_GLVEBTC_TPCH_MAX_INDEX 7
+#define I40E_GLVEBTC_TPCH_TCPCH_SHIFT 0
+#define I40E_GLVEBTC_TPCH_TCPCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBTC_TPCH_TCPCH_SHIFT)
+#define I40E_GLVEBTC_TPCL(_i, _j) (0x00338000 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */
+#define I40E_GLVEBTC_TPCL_MAX_INDEX 7
+#define I40E_GLVEBTC_TPCL_TCPCL_SHIFT 0
+#define I40E_GLVEBTC_TPCL_TCPCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBTC_TPCL_TCPCL_SHIFT)
+#define I40E_GLVEBVL_BPCH(_i) (0x00374804 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */
+#define I40E_GLVEBVL_BPCH_MAX_INDEX 127
+#define I40E_GLVEBVL_BPCH_VLBPCH_SHIFT 0
+#define I40E_GLVEBVL_BPCH_VLBPCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBVL_BPCH_VLBPCH_SHIFT)
+#define I40E_GLVEBVL_BPCL(_i) (0x00374800 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */
+#define I40E_GLVEBVL_BPCL_MAX_INDEX 127
+#define I40E_GLVEBVL_BPCL_VLBPCL_SHIFT 0
+#define I40E_GLVEBVL_BPCL_VLBPCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBVL_BPCL_VLBPCL_SHIFT)
+#define I40E_GLVEBVL_GORCH(_i) (0x00360004 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */
+#define I40E_GLVEBVL_GORCH_MAX_INDEX 127
+#define I40E_GLVEBVL_GORCH_VLBCH_SHIFT 0
+#define I40E_GLVEBVL_GORCH_VLBCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBVL_GORCH_VLBCH_SHIFT)
+#define I40E_GLVEBVL_GORCL(_i) (0x00360000 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */
+#define I40E_GLVEBVL_GORCL_MAX_INDEX 127
+#define I40E_GLVEBVL_GORCL_VLBCL_SHIFT 0
+#define I40E_GLVEBVL_GORCL_VLBCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBVL_GORCL_VLBCL_SHIFT)
+#define I40E_GLVEBVL_GOTCH(_i) (0x00330004 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */
+#define I40E_GLVEBVL_GOTCH_MAX_INDEX 127
+#define I40E_GLVEBVL_GOTCH_VLBCH_SHIFT 0
+#define I40E_GLVEBVL_GOTCH_VLBCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBVL_GOTCH_VLBCH_SHIFT)
+#define I40E_GLVEBVL_GOTCL(_i) (0x00330000 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */
+#define I40E_GLVEBVL_GOTCL_MAX_INDEX 127
+#define I40E_GLVEBVL_GOTCL_VLBCL_SHIFT 0
+#define I40E_GLVEBVL_GOTCL_VLBCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBVL_GOTCL_VLBCL_SHIFT)
+#define I40E_GLVEBVL_MPCH(_i) (0x00374404 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */
+#define I40E_GLVEBVL_MPCH_MAX_INDEX 127
+#define I40E_GLVEBVL_MPCH_VLMPCH_SHIFT 0
+#define I40E_GLVEBVL_MPCH_VLMPCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBVL_MPCH_VLMPCH_SHIFT)
+#define I40E_GLVEBVL_MPCL(_i) (0x00374400 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */
+#define I40E_GLVEBVL_MPCL_MAX_INDEX 127
+#define I40E_GLVEBVL_MPCL_VLMPCL_SHIFT 0
+#define I40E_GLVEBVL_MPCL_VLMPCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBVL_MPCL_VLMPCL_SHIFT)
+#define I40E_GLVEBVL_UPCH(_i) (0x00374004 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */
+#define I40E_GLVEBVL_UPCH_MAX_INDEX 127
+#define I40E_GLVEBVL_UPCH_VLUPCH_SHIFT 0
+#define I40E_GLVEBVL_UPCH_VLUPCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBVL_UPCH_VLUPCH_SHIFT)
+#define I40E_GLVEBVL_UPCL(_i) (0x00374000 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */
+#define I40E_GLVEBVL_UPCL_MAX_INDEX 127
+#define I40E_GLVEBVL_UPCL_VLUPCL_SHIFT 0
+#define I40E_GLVEBVL_UPCL_VLUPCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBVL_UPCL_VLUPCL_SHIFT)
+#define I40E_GL_MTG_FLU_MSK_H 0x00269F4C /* Reset: CORER */
+#define I40E_GL_MTG_FLU_MSK_H_MASK_HIGH_SHIFT 0
+#define I40E_GL_MTG_FLU_MSK_H_MASK_HIGH_MASK I40E_MASK(0xFFFF, I40E_GL_MTG_FLU_MSK_H_MASK_HIGH_SHIFT)
+#define I40E_GL_SWR_DEF_ACT(_i) (0x00270200 + ((_i) * 4)) /* _i=0...35 */ /* Reset: CORER */
+#define I40E_GL_SWR_DEF_ACT_MAX_INDEX 35
+#define I40E_GL_SWR_DEF_ACT_DEF_ACTION_SHIFT 0
+#define I40E_GL_SWR_DEF_ACT_DEF_ACTION_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_SWR_DEF_ACT_DEF_ACTION_SHIFT)
+#define I40E_GL_SWR_DEF_ACT_EN(_i) (0x0026CFB8 + ((_i) * 4)) /* _i=0...1 */ /* Reset: CORER */
+#define I40E_GL_SWR_DEF_ACT_EN_MAX_INDEX 1
+#define I40E_GL_SWR_DEF_ACT_EN_DEF_ACT_EN_BITMAP_SHIFT 0
+#define I40E_GL_SWR_DEF_ACT_EN_DEF_ACT_EN_BITMAP_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_SWR_DEF_ACT_EN_DEF_ACT_EN_BITMAP_SHIFT)
+#define I40E_PRTTSYN_ADJ 0x001E4280 /* Reset: GLOBR */
+#define I40E_PRTTSYN_ADJ_TSYNADJ_SHIFT 0
+#define I40E_PRTTSYN_ADJ_TSYNADJ_MASK I40E_MASK(0x7FFFFFFF, I40E_PRTTSYN_ADJ_TSYNADJ_SHIFT)
+#define I40E_PRTTSYN_ADJ_SIGN_SHIFT 31
+#define I40E_PRTTSYN_ADJ_SIGN_MASK I40E_MASK(0x1, I40E_PRTTSYN_ADJ_SIGN_SHIFT)
+#define I40E_PRTTSYN_AUX_0(_i) (0x001E42A0 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */
+#define I40E_PRTTSYN_AUX_0_MAX_INDEX 1
+#define I40E_PRTTSYN_AUX_0_OUT_ENA_SHIFT 0
+#define I40E_PRTTSYN_AUX_0_OUT_ENA_MASK I40E_MASK(0x1, I40E_PRTTSYN_AUX_0_OUT_ENA_SHIFT)
+#define I40E_PRTTSYN_AUX_0_OUTMOD_SHIFT 1
+#define I40E_PRTTSYN_AUX_0_OUTMOD_MASK I40E_MASK(0x3, I40E_PRTTSYN_AUX_0_OUTMOD_SHIFT)
+#define I40E_PRTTSYN_AUX_0_OUTLVL_SHIFT 3
+#define I40E_PRTTSYN_AUX_0_OUTLVL_MASK I40E_MASK(0x1, I40E_PRTTSYN_AUX_0_OUTLVL_SHIFT)
+#define I40E_PRTTSYN_AUX_0_PULSEW_SHIFT 8
+#define I40E_PRTTSYN_AUX_0_PULSEW_MASK I40E_MASK(0xF, I40E_PRTTSYN_AUX_0_PULSEW_SHIFT)
+#define I40E_PRTTSYN_AUX_0_EVNTLVL_SHIFT 16
+#define I40E_PRTTSYN_AUX_0_EVNTLVL_MASK I40E_MASK(0x3, I40E_PRTTSYN_AUX_0_EVNTLVL_SHIFT)
+#define I40E_PRTTSYN_AUX_1(_i) (0x001E42E0 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */
+#define I40E_PRTTSYN_AUX_1_MAX_INDEX 1
+#define I40E_PRTTSYN_AUX_1_INSTNT_SHIFT 0
+#define I40E_PRTTSYN_AUX_1_INSTNT_MASK I40E_MASK(0x1, I40E_PRTTSYN_AUX_1_INSTNT_SHIFT)
+#define I40E_PRTTSYN_AUX_1_SAMPLE_TIME_SHIFT 1
+#define I40E_PRTTSYN_AUX_1_SAMPLE_TIME_MASK I40E_MASK(0x1, I40E_PRTTSYN_AUX_1_SAMPLE_TIME_SHIFT)
+#define I40E_PRTTSYN_CLKO(_i) (0x001E4240 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */
+#define I40E_PRTTSYN_CLKO_MAX_INDEX 1
+#define I40E_PRTTSYN_CLKO_TSYNCLKO_SHIFT 0
+#define I40E_PRTTSYN_CLKO_TSYNCLKO_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_CLKO_TSYNCLKO_SHIFT)
+#define I40E_PRTTSYN_CTL0 0x001E4200 /* Reset: GLOBR */
+#define I40E_PRTTSYN_CTL0_CLEAR_TSYNTIMER_SHIFT 0
+#define I40E_PRTTSYN_CTL0_CLEAR_TSYNTIMER_MASK I40E_MASK(0x1, I40E_PRTTSYN_CTL0_CLEAR_TSYNTIMER_SHIFT)
+#define I40E_PRTTSYN_CTL0_TXTIME_INT_ENA_SHIFT 1
+#define I40E_PRTTSYN_CTL0_TXTIME_INT_ENA_MASK I40E_MASK(0x1, I40E_PRTTSYN_CTL0_TXTIME_INT_ENA_SHIFT)
+#define I40E_PRTTSYN_CTL0_EVENT_INT_ENA_SHIFT 2
+#define I40E_PRTTSYN_CTL0_EVENT_INT_ENA_MASK I40E_MASK(0x1, I40E_PRTTSYN_CTL0_EVENT_INT_ENA_SHIFT)
+#define I40E_PRTTSYN_CTL0_TGT_INT_ENA_SHIFT 3
+#define I40E_PRTTSYN_CTL0_TGT_INT_ENA_MASK I40E_MASK(0x1, I40E_PRTTSYN_CTL0_TGT_INT_ENA_SHIFT)
+#define I40E_PRTTSYN_CTL0_PF_ID_SHIFT 8
+#define I40E_PRTTSYN_CTL0_PF_ID_MASK I40E_MASK(0xF, I40E_PRTTSYN_CTL0_PF_ID_SHIFT)
+#define I40E_PRTTSYN_CTL0_TSYNACT_SHIFT 12
+#define I40E_PRTTSYN_CTL0_TSYNACT_MASK I40E_MASK(0x3, I40E_PRTTSYN_CTL0_TSYNACT_SHIFT)
+#define I40E_PRTTSYN_CTL0_TSYNENA_SHIFT 31
+#define I40E_PRTTSYN_CTL0_TSYNENA_MASK I40E_MASK(0x1, I40E_PRTTSYN_CTL0_TSYNENA_SHIFT)
+#define I40E_PRTTSYN_CTL1 0x00085020 /* Reset: CORER */
+#define I40E_PRTTSYN_CTL1_V1MESSTYPE0_SHIFT 0
+#define I40E_PRTTSYN_CTL1_V1MESSTYPE0_MASK I40E_MASK(0xFF, I40E_PRTTSYN_CTL1_V1MESSTYPE0_SHIFT)
+#define I40E_PRTTSYN_CTL1_V1MESSTYPE1_SHIFT 8
+#define I40E_PRTTSYN_CTL1_V1MESSTYPE1_MASK I40E_MASK(0xFF, I40E_PRTTSYN_CTL1_V1MESSTYPE1_SHIFT)
+#define I40E_PRTTSYN_CTL1_V2MESSTYPE0_SHIFT 16
+#define I40E_PRTTSYN_CTL1_V2MESSTYPE0_MASK I40E_MASK(0xF, I40E_PRTTSYN_CTL1_V2MESSTYPE0_SHIFT)
+#define I40E_PRTTSYN_CTL1_V2MESSTYPE1_SHIFT 20
+#define I40E_PRTTSYN_CTL1_V2MESSTYPE1_MASK I40E_MASK(0xF, I40E_PRTTSYN_CTL1_V2MESSTYPE1_SHIFT)
+#define I40E_PRTTSYN_CTL1_TSYNTYPE_SHIFT 24
+#define I40E_PRTTSYN_CTL1_TSYNTYPE_MASK I40E_MASK(0x3, I40E_PRTTSYN_CTL1_TSYNTYPE_SHIFT)
+#define I40E_PRTTSYN_CTL1_UDP_ENA_SHIFT 26
+#define I40E_PRTTSYN_CTL1_UDP_ENA_MASK I40E_MASK(0x3, I40E_PRTTSYN_CTL1_UDP_ENA_SHIFT)
+#define I40E_PRTTSYN_CTL1_TSYNENA_SHIFT 31
+#define I40E_PRTTSYN_CTL1_TSYNENA_MASK I40E_MASK(0x1, I40E_PRTTSYN_CTL1_TSYNENA_SHIFT)
+#define I40E_PRTTSYN_EVNT_H(_i) (0x001E40C0 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */
+#define I40E_PRTTSYN_EVNT_H_MAX_INDEX 1
+#define I40E_PRTTSYN_EVNT_H_TSYNEVNT_H_SHIFT 0
+#define I40E_PRTTSYN_EVNT_H_TSYNEVNT_H_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_EVNT_H_TSYNEVNT_H_SHIFT)
+#define I40E_PRTTSYN_EVNT_L(_i) (0x001E4080 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */
+#define I40E_PRTTSYN_EVNT_L_MAX_INDEX 1
+#define I40E_PRTTSYN_EVNT_L_TSYNEVNT_L_SHIFT 0
+#define I40E_PRTTSYN_EVNT_L_TSYNEVNT_L_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_EVNT_L_TSYNEVNT_L_SHIFT)
+#define I40E_PRTTSYN_INC_H 0x001E4060 /* Reset: GLOBR */
+#define I40E_PRTTSYN_INC_H_TSYNINC_H_SHIFT 0
+#define I40E_PRTTSYN_INC_H_TSYNINC_H_MASK I40E_MASK(0x3F, I40E_PRTTSYN_INC_H_TSYNINC_H_SHIFT)
+#define I40E_PRTTSYN_INC_L 0x001E4040 /* Reset: GLOBR */
+#define I40E_PRTTSYN_INC_L_TSYNINC_L_SHIFT 0
+#define I40E_PRTTSYN_INC_L_TSYNINC_L_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_INC_L_TSYNINC_L_SHIFT)
+#define I40E_PRTTSYN_RXTIME_H(_i) (0x00085040 + ((_i) * 32)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_PRTTSYN_RXTIME_H_MAX_INDEX 3
+#define I40E_PRTTSYN_RXTIME_H_RXTIEM_H_SHIFT 0
+#define I40E_PRTTSYN_RXTIME_H_RXTIEM_H_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_RXTIME_H_RXTIEM_H_SHIFT)
+#define I40E_PRTTSYN_RXTIME_L(_i) (0x000850C0 + ((_i) * 32)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_PRTTSYN_RXTIME_L_MAX_INDEX 3
+#define I40E_PRTTSYN_RXTIME_L_RXTIEM_L_SHIFT 0
+#define I40E_PRTTSYN_RXTIME_L_RXTIEM_L_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_RXTIME_L_RXTIEM_L_SHIFT)
+#define I40E_PRTTSYN_STAT_0 0x001E4220 /* Reset: GLOBR */
+#define I40E_PRTTSYN_STAT_0_EVENT0_SHIFT 0
+#define I40E_PRTTSYN_STAT_0_EVENT0_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_0_EVENT0_SHIFT)
+#define I40E_PRTTSYN_STAT_0_EVENT1_SHIFT 1
+#define I40E_PRTTSYN_STAT_0_EVENT1_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_0_EVENT1_SHIFT)
+#define I40E_PRTTSYN_STAT_0_TGT0_SHIFT 2
+#define I40E_PRTTSYN_STAT_0_TGT0_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_0_TGT0_SHIFT)
+#define I40E_PRTTSYN_STAT_0_TGT1_SHIFT 3
+#define I40E_PRTTSYN_STAT_0_TGT1_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_0_TGT1_SHIFT)
+#define I40E_PRTTSYN_STAT_0_TXTIME_SHIFT 4
+#define I40E_PRTTSYN_STAT_0_TXTIME_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_0_TXTIME_SHIFT)
+#define I40E_PRTTSYN_STAT_1 0x00085140 /* Reset: CORER */
+#define I40E_PRTTSYN_STAT_1_RXT0_SHIFT 0
+#define I40E_PRTTSYN_STAT_1_RXT0_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_1_RXT0_SHIFT)
+#define I40E_PRTTSYN_STAT_1_RXT1_SHIFT 1
+#define I40E_PRTTSYN_STAT_1_RXT1_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_1_RXT1_SHIFT)
+#define I40E_PRTTSYN_STAT_1_RXT2_SHIFT 2
+#define I40E_PRTTSYN_STAT_1_RXT2_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_1_RXT2_SHIFT)
+#define I40E_PRTTSYN_STAT_1_RXT3_SHIFT 3
+#define I40E_PRTTSYN_STAT_1_RXT3_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_1_RXT3_SHIFT)
+#define I40E_PRTTSYN_TGT_H(_i) (0x001E4180 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */
+#define I40E_PRTTSYN_TGT_H_MAX_INDEX 1
+#define I40E_PRTTSYN_TGT_H_TSYNTGTT_H_SHIFT 0
+#define I40E_PRTTSYN_TGT_H_TSYNTGTT_H_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_TGT_H_TSYNTGTT_H_SHIFT)
+#define I40E_PRTTSYN_TGT_L(_i) (0x001E4140 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */
+#define I40E_PRTTSYN_TGT_L_MAX_INDEX 1
+#define I40E_PRTTSYN_TGT_L_TSYNTGTT_L_SHIFT 0
+#define I40E_PRTTSYN_TGT_L_TSYNTGTT_L_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_TGT_L_TSYNTGTT_L_SHIFT)
+#define I40E_PRTTSYN_TIME_H 0x001E4120 /* Reset: GLOBR */
+#define I40E_PRTTSYN_TIME_H_TSYNTIME_H_SHIFT 0
+#define I40E_PRTTSYN_TIME_H_TSYNTIME_H_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_TIME_H_TSYNTIME_H_SHIFT)
+#define I40E_PRTTSYN_TIME_L 0x001E4100 /* Reset: GLOBR */
+#define I40E_PRTTSYN_TIME_L_TSYNTIME_L_SHIFT 0
+#define I40E_PRTTSYN_TIME_L_TSYNTIME_L_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_TIME_L_TSYNTIME_L_SHIFT)
+#define I40E_PRTTSYN_TXTIME_H 0x001E41E0 /* Reset: GLOBR */
+#define I40E_PRTTSYN_TXTIME_H_TXTIEM_H_SHIFT 0
+#define I40E_PRTTSYN_TXTIME_H_TXTIEM_H_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_TXTIME_H_TXTIEM_H_SHIFT)
+#define I40E_PRTTSYN_TXTIME_L 0x001E41C0 /* Reset: GLOBR */
+#define I40E_PRTTSYN_TXTIME_L_TXTIEM_L_SHIFT 0
+#define I40E_PRTTSYN_TXTIME_L_TXTIEM_L_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_TXTIME_L_TXTIEM_L_SHIFT)
+#define I40E_GLSCD_QUANTA 0x000B2080 /* Reset: CORER */
+#define I40E_GLSCD_QUANTA_TSCDQUANTA_SHIFT 0
+#define I40E_GLSCD_QUANTA_TSCDQUANTA_MASK I40E_MASK(0x7, I40E_GLSCD_QUANTA_TSCDQUANTA_SHIFT)
+#define I40E_GL_MDET_RX 0x0012A510 /* Reset: CORER */
+#define I40E_GL_MDET_RX_FUNCTION_SHIFT 0
+#define I40E_GL_MDET_RX_FUNCTION_MASK I40E_MASK(0xFF, I40E_GL_MDET_RX_FUNCTION_SHIFT)
+#define I40E_GL_MDET_RX_EVENT_SHIFT 8
+#define I40E_GL_MDET_RX_EVENT_MASK I40E_MASK(0x1FF, I40E_GL_MDET_RX_EVENT_SHIFT)
+#define I40E_GL_MDET_RX_QUEUE_SHIFT 17
+#define I40E_GL_MDET_RX_QUEUE_MASK I40E_MASK(0x3FFF, I40E_GL_MDET_RX_QUEUE_SHIFT)
+#define I40E_GL_MDET_RX_VALID_SHIFT 31
+#define I40E_GL_MDET_RX_VALID_MASK I40E_MASK(0x1, I40E_GL_MDET_RX_VALID_SHIFT)
+#define I40E_GL_MDET_TX 0x000E6480 /* Reset: CORER */
+#define I40E_GL_MDET_TX_QUEUE_SHIFT 0
+#define I40E_GL_MDET_TX_QUEUE_MASK I40E_MASK(0xFFF, I40E_GL_MDET_TX_QUEUE_SHIFT)
+#define I40E_GL_MDET_TX_VF_NUM_SHIFT 12
+#define I40E_GL_MDET_TX_VF_NUM_MASK I40E_MASK(0x1FF, I40E_GL_MDET_TX_VF_NUM_SHIFT)
+#define I40E_GL_MDET_TX_PF_NUM_SHIFT 21
+#define I40E_GL_MDET_TX_PF_NUM_MASK I40E_MASK(0xF, I40E_GL_MDET_TX_PF_NUM_SHIFT)
+#define I40E_GL_MDET_TX_EVENT_SHIFT 25
+#define I40E_GL_MDET_TX_EVENT_MASK I40E_MASK(0x1F, I40E_GL_MDET_TX_EVENT_SHIFT)
+#define I40E_GL_MDET_TX_VALID_SHIFT 31
+#define I40E_GL_MDET_TX_VALID_MASK I40E_MASK(0x1, I40E_GL_MDET_TX_VALID_SHIFT)
+#define I40E_PF_MDET_RX 0x0012A400 /* Reset: CORER */
+#define I40E_PF_MDET_RX_VALID_SHIFT 0
+#define I40E_PF_MDET_RX_VALID_MASK I40E_MASK(0x1, I40E_PF_MDET_RX_VALID_SHIFT)
+#define I40E_PF_MDET_TX 0x000E6400 /* Reset: CORER */
+#define I40E_PF_MDET_TX_VALID_SHIFT 0
+#define I40E_PF_MDET_TX_VALID_MASK I40E_MASK(0x1, I40E_PF_MDET_TX_VALID_SHIFT)
+#define I40E_PF_VT_PFALLOC 0x001C0500 /* Reset: CORER */
+#define I40E_PF_VT_PFALLOC_FIRSTVF_SHIFT 0
+#define I40E_PF_VT_PFALLOC_FIRSTVF_MASK I40E_MASK(0xFF, I40E_PF_VT_PFALLOC_FIRSTVF_SHIFT)
+#define I40E_PF_VT_PFALLOC_LASTVF_SHIFT 8
+#define I40E_PF_VT_PFALLOC_LASTVF_MASK I40E_MASK(0xFF, I40E_PF_VT_PFALLOC_LASTVF_SHIFT)
+#define I40E_PF_VT_PFALLOC_VALID_SHIFT 31
+#define I40E_PF_VT_PFALLOC_VALID_MASK I40E_MASK(0x1, I40E_PF_VT_PFALLOC_VALID_SHIFT)
+#define I40E_VP_MDET_RX(_VF) (0x0012A000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */
+#define I40E_VP_MDET_RX_MAX_INDEX 127
+#define I40E_VP_MDET_RX_VALID_SHIFT 0
+#define I40E_VP_MDET_RX_VALID_MASK I40E_MASK(0x1, I40E_VP_MDET_RX_VALID_SHIFT)
+#define I40E_VP_MDET_TX(_VF) (0x000E6000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */
+#define I40E_VP_MDET_TX_MAX_INDEX 127
+#define I40E_VP_MDET_TX_VALID_SHIFT 0
+#define I40E_VP_MDET_TX_VALID_MASK I40E_MASK(0x1, I40E_VP_MDET_TX_VALID_SHIFT)
+#define I40E_GLPM_WUMC 0x0006C800 /* Reset: POR */
+#define I40E_GLPM_WUMC_NOTCO_SHIFT 0
+#define I40E_GLPM_WUMC_NOTCO_MASK I40E_MASK(0x1, I40E_GLPM_WUMC_NOTCO_SHIFT)
+#define I40E_GLPM_WUMC_SRST_PIN_VAL_SHIFT 1
+#define I40E_GLPM_WUMC_SRST_PIN_VAL_MASK I40E_MASK(0x1, I40E_GLPM_WUMC_SRST_PIN_VAL_SHIFT)
+#define I40E_GLPM_WUMC_ROL_MODE_SHIFT 2
+#define I40E_GLPM_WUMC_ROL_MODE_MASK I40E_MASK(0x1, I40E_GLPM_WUMC_ROL_MODE_SHIFT)
+#define I40E_GLPM_WUMC_RESERVED_4_SHIFT 3
+#define I40E_GLPM_WUMC_RESERVED_4_MASK I40E_MASK(0x1FFF, I40E_GLPM_WUMC_RESERVED_4_SHIFT)
+#define I40E_GLPM_WUMC_MNG_WU_PF_SHIFT 16
+#define I40E_GLPM_WUMC_MNG_WU_PF_MASK I40E_MASK(0xFFFF, I40E_GLPM_WUMC_MNG_WU_PF_SHIFT)
+#define I40E_PFPM_APM 0x000B8080 /* Reset: POR */
+#define I40E_PFPM_APM_APME_SHIFT 0
+#define I40E_PFPM_APM_APME_MASK I40E_MASK(0x1, I40E_PFPM_APM_APME_SHIFT)
+#define I40E_PFPM_FHFT_LENGTH(_i) (0x0006A000 + ((_i) * 128)) /* _i=0...7 */ /* Reset: POR */
+#define I40E_PFPM_FHFT_LENGTH_MAX_INDEX 7
+#define I40E_PFPM_FHFT_LENGTH_LENGTH_SHIFT 0
+#define I40E_PFPM_FHFT_LENGTH_LENGTH_MASK I40E_MASK(0xFF, I40E_PFPM_FHFT_LENGTH_LENGTH_SHIFT)
+#define I40E_PFPM_WUC 0x0006B200 /* Reset: POR */
+#define I40E_PFPM_WUC_EN_APM_D0_SHIFT 5
+#define I40E_PFPM_WUC_EN_APM_D0_MASK I40E_MASK(0x1, I40E_PFPM_WUC_EN_APM_D0_SHIFT)
+#define I40E_PFPM_WUFC 0x0006B400 /* Reset: POR */
+#define I40E_PFPM_WUFC_LNKC_SHIFT 0
+#define I40E_PFPM_WUFC_LNKC_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_LNKC_SHIFT)
+#define I40E_PFPM_WUFC_MAG_SHIFT 1
+#define I40E_PFPM_WUFC_MAG_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_MAG_SHIFT)
+#define I40E_PFPM_WUFC_MNG_SHIFT 3
+#define I40E_PFPM_WUFC_MNG_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_MNG_SHIFT)
+#define I40E_PFPM_WUFC_FLX0_ACT_SHIFT 4
+#define I40E_PFPM_WUFC_FLX0_ACT_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX0_ACT_SHIFT)
+#define I40E_PFPM_WUFC_FLX1_ACT_SHIFT 5
+#define I40E_PFPM_WUFC_FLX1_ACT_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX1_ACT_SHIFT)
+#define I40E_PFPM_WUFC_FLX2_ACT_SHIFT 6
+#define I40E_PFPM_WUFC_FLX2_ACT_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX2_ACT_SHIFT)
+#define I40E_PFPM_WUFC_FLX3_ACT_SHIFT 7
+#define I40E_PFPM_WUFC_FLX3_ACT_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX3_ACT_SHIFT)
+#define I40E_PFPM_WUFC_FLX4_ACT_SHIFT 8
+#define I40E_PFPM_WUFC_FLX4_ACT_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX4_ACT_SHIFT)
+#define I40E_PFPM_WUFC_FLX5_ACT_SHIFT 9
+#define I40E_PFPM_WUFC_FLX5_ACT_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX5_ACT_SHIFT)
+#define I40E_PFPM_WUFC_FLX6_ACT_SHIFT 10
+#define I40E_PFPM_WUFC_FLX6_ACT_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX6_ACT_SHIFT)
+#define I40E_PFPM_WUFC_FLX7_ACT_SHIFT 11
+#define I40E_PFPM_WUFC_FLX7_ACT_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX7_ACT_SHIFT)
+#define I40E_PFPM_WUFC_FLX0_SHIFT 16
+#define I40E_PFPM_WUFC_FLX0_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX0_SHIFT)
+#define I40E_PFPM_WUFC_FLX1_SHIFT 17
+#define I40E_PFPM_WUFC_FLX1_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX1_SHIFT)
+#define I40E_PFPM_WUFC_FLX2_SHIFT 18
+#define I40E_PFPM_WUFC_FLX2_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX2_SHIFT)
+#define I40E_PFPM_WUFC_FLX3_SHIFT 19
+#define I40E_PFPM_WUFC_FLX3_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX3_SHIFT)
+#define I40E_PFPM_WUFC_FLX4_SHIFT 20
+#define I40E_PFPM_WUFC_FLX4_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX4_SHIFT)
+#define I40E_PFPM_WUFC_FLX5_SHIFT 21
+#define I40E_PFPM_WUFC_FLX5_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX5_SHIFT)
+#define I40E_PFPM_WUFC_FLX6_SHIFT 22
+#define I40E_PFPM_WUFC_FLX6_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX6_SHIFT)
+#define I40E_PFPM_WUFC_FLX7_SHIFT 23
+#define I40E_PFPM_WUFC_FLX7_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX7_SHIFT)
+#define I40E_PFPM_WUFC_FW_RST_WK_SHIFT 31
+#define I40E_PFPM_WUFC_FW_RST_WK_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FW_RST_WK_SHIFT)
+#define I40E_PFPM_WUS 0x0006B600 /* Reset: POR */
+#define I40E_PFPM_WUS_LNKC_SHIFT 0
+#define I40E_PFPM_WUS_LNKC_MASK I40E_MASK(0x1, I40E_PFPM_WUS_LNKC_SHIFT)
+#define I40E_PFPM_WUS_MAG_SHIFT 1
+#define I40E_PFPM_WUS_MAG_MASK I40E_MASK(0x1, I40E_PFPM_WUS_MAG_SHIFT)
+#define I40E_PFPM_WUS_PME_STATUS_SHIFT 2
+#define I40E_PFPM_WUS_PME_STATUS_MASK I40E_MASK(0x1, I40E_PFPM_WUS_PME_STATUS_SHIFT)
+#define I40E_PFPM_WUS_MNG_SHIFT 3
+#define I40E_PFPM_WUS_MNG_MASK I40E_MASK(0x1, I40E_PFPM_WUS_MNG_SHIFT)
+#define I40E_PFPM_WUS_FLX0_SHIFT 16
+#define I40E_PFPM_WUS_FLX0_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FLX0_SHIFT)
+#define I40E_PFPM_WUS_FLX1_SHIFT 17
+#define I40E_PFPM_WUS_FLX1_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FLX1_SHIFT)
+#define I40E_PFPM_WUS_FLX2_SHIFT 18
+#define I40E_PFPM_WUS_FLX2_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FLX2_SHIFT)
+#define I40E_PFPM_WUS_FLX3_SHIFT 19
+#define I40E_PFPM_WUS_FLX3_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FLX3_SHIFT)
+#define I40E_PFPM_WUS_FLX4_SHIFT 20
+#define I40E_PFPM_WUS_FLX4_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FLX4_SHIFT)
+#define I40E_PFPM_WUS_FLX5_SHIFT 21
+#define I40E_PFPM_WUS_FLX5_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FLX5_SHIFT)
+#define I40E_PFPM_WUS_FLX6_SHIFT 22
+#define I40E_PFPM_WUS_FLX6_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FLX6_SHIFT)
+#define I40E_PFPM_WUS_FLX7_SHIFT 23
+#define I40E_PFPM_WUS_FLX7_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FLX7_SHIFT)
+#define I40E_PFPM_WUS_FW_RST_WK_SHIFT 31
+#define I40E_PFPM_WUS_FW_RST_WK_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FW_RST_WK_SHIFT)
+#define I40E_PRTPM_FHFHR 0x0006C000 /* Reset: POR */
+#define I40E_PRTPM_FHFHR_UNICAST_SHIFT 0
+#define I40E_PRTPM_FHFHR_UNICAST_MASK I40E_MASK(0x1, I40E_PRTPM_FHFHR_UNICAST_SHIFT)
+#define I40E_PRTPM_FHFHR_MULTICAST_SHIFT 1
+#define I40E_PRTPM_FHFHR_MULTICAST_MASK I40E_MASK(0x1, I40E_PRTPM_FHFHR_MULTICAST_SHIFT)
+#define I40E_PRTPM_SAH(_i) (0x001E44C0 + ((_i) * 32)) /* _i=0...3 */ /* Reset: PFR */
+#define I40E_PRTPM_SAH_MAX_INDEX 3
+#define I40E_PRTPM_SAH_PFPM_SAH_SHIFT 0
+#define I40E_PRTPM_SAH_PFPM_SAH_MASK I40E_MASK(0xFFFF, I40E_PRTPM_SAH_PFPM_SAH_SHIFT)
+#define I40E_PRTPM_SAH_PF_NUM_SHIFT 26
+#define I40E_PRTPM_SAH_PF_NUM_MASK I40E_MASK(0xF, I40E_PRTPM_SAH_PF_NUM_SHIFT)
+#define I40E_PRTPM_SAH_MC_MAG_EN_SHIFT 30
+#define I40E_PRTPM_SAH_MC_MAG_EN_MASK I40E_MASK(0x1, I40E_PRTPM_SAH_MC_MAG_EN_SHIFT)
+#define I40E_PRTPM_SAH_AV_SHIFT 31
+#define I40E_PRTPM_SAH_AV_MASK I40E_MASK(0x1, I40E_PRTPM_SAH_AV_SHIFT)
+#define I40E_PRTPM_SAL(_i) (0x001E4440 + ((_i) * 32)) /* _i=0...3 */ /* Reset: PFR */
+#define I40E_PRTPM_SAL_MAX_INDEX 3
+#define I40E_PRTPM_SAL_PFPM_SAL_SHIFT 0
+#define I40E_PRTPM_SAL_PFPM_SAL_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTPM_SAL_PFPM_SAL_SHIFT)
+#define I40E_VF_ARQBAH1 0x00006000 /* Reset: EMPR */
+#define I40E_VF_ARQBAH1_ARQBAH_SHIFT 0
+#define I40E_VF_ARQBAH1_ARQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ARQBAH1_ARQBAH_SHIFT)
+#define I40E_VF_ARQBAL1 0x00006C00 /* Reset: EMPR */
+#define I40E_VF_ARQBAL1_ARQBAL_SHIFT 0
+#define I40E_VF_ARQBAL1_ARQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ARQBAL1_ARQBAL_SHIFT)
+#define I40E_VF_ARQH1 0x00007400 /* Reset: EMPR */
+#define I40E_VF_ARQH1_ARQH_SHIFT 0
+#define I40E_VF_ARQH1_ARQH_MASK I40E_MASK(0x3FF, I40E_VF_ARQH1_ARQH_SHIFT)
+#define I40E_VF_ARQLEN1 0x00008000 /* Reset: EMPR */
+#define I40E_VF_ARQLEN1_ARQLEN_SHIFT 0
+#define I40E_VF_ARQLEN1_ARQLEN_MASK I40E_MASK(0x3FF, I40E_VF_ARQLEN1_ARQLEN_SHIFT)
+#define I40E_VF_ARQLEN1_ARQVFE_SHIFT 28
+#define I40E_VF_ARQLEN1_ARQVFE_MASK I40E_MASK(0x1, I40E_VF_ARQLEN1_ARQVFE_SHIFT)
+#define I40E_VF_ARQLEN1_ARQOVFL_SHIFT 29
+#define I40E_VF_ARQLEN1_ARQOVFL_MASK I40E_MASK(0x1, I40E_VF_ARQLEN1_ARQOVFL_SHIFT)
+#define I40E_VF_ARQLEN1_ARQCRIT_SHIFT 30
+#define I40E_VF_ARQLEN1_ARQCRIT_MASK I40E_MASK(0x1, I40E_VF_ARQLEN1_ARQCRIT_SHIFT)
+#define I40E_VF_ARQLEN1_ARQENABLE_SHIFT 31
+#define I40E_VF_ARQLEN1_ARQENABLE_MASK I40E_MASK(0x1, I40E_VF_ARQLEN1_ARQENABLE_SHIFT)
+#define I40E_VF_ARQT1 0x00007000 /* Reset: EMPR */
+#define I40E_VF_ARQT1_ARQT_SHIFT 0
+#define I40E_VF_ARQT1_ARQT_MASK I40E_MASK(0x3FF, I40E_VF_ARQT1_ARQT_SHIFT)
+#define I40E_VF_ATQBAH1 0x00007800 /* Reset: EMPR */
+#define I40E_VF_ATQBAH1_ATQBAH_SHIFT 0
+#define I40E_VF_ATQBAH1_ATQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ATQBAH1_ATQBAH_SHIFT)
+#define I40E_VF_ATQBAL1 0x00007C00 /* Reset: EMPR */
+#define I40E_VF_ATQBAL1_ATQBAL_SHIFT 0
+#define I40E_VF_ATQBAL1_ATQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ATQBAL1_ATQBAL_SHIFT)
+#define I40E_VF_ATQH1 0x00006400 /* Reset: EMPR */
+#define I40E_VF_ATQH1_ATQH_SHIFT 0
+#define I40E_VF_ATQH1_ATQH_MASK I40E_MASK(0x3FF, I40E_VF_ATQH1_ATQH_SHIFT)
+#define I40E_VF_ATQLEN1 0x00006800 /* Reset: EMPR */
+#define I40E_VF_ATQLEN1_ATQLEN_SHIFT 0
+#define I40E_VF_ATQLEN1_ATQLEN_MASK I40E_MASK(0x3FF, I40E_VF_ATQLEN1_ATQLEN_SHIFT)
+#define I40E_VF_ATQLEN1_ATQVFE_SHIFT 28
+#define I40E_VF_ATQLEN1_ATQVFE_MASK I40E_MASK(0x1, I40E_VF_ATQLEN1_ATQVFE_SHIFT)
+#define I40E_VF_ATQLEN1_ATQOVFL_SHIFT 29
+#define I40E_VF_ATQLEN1_ATQOVFL_MASK I40E_MASK(0x1, I40E_VF_ATQLEN1_ATQOVFL_SHIFT)
+#define I40E_VF_ATQLEN1_ATQCRIT_SHIFT 30
+#define I40E_VF_ATQLEN1_ATQCRIT_MASK I40E_MASK(0x1, I40E_VF_ATQLEN1_ATQCRIT_SHIFT)
+#define I40E_VF_ATQLEN1_ATQENABLE_SHIFT 31
+#define I40E_VF_ATQLEN1_ATQENABLE_MASK I40E_MASK(0x1, I40E_VF_ATQLEN1_ATQENABLE_SHIFT)
+#define I40E_VF_ATQT1 0x00008400 /* Reset: EMPR */
+#define I40E_VF_ATQT1_ATQT_SHIFT 0
+#define I40E_VF_ATQT1_ATQT_MASK I40E_MASK(0x3FF, I40E_VF_ATQT1_ATQT_SHIFT)
+#define I40E_VFGEN_RSTAT 0x00008800 /* Reset: VFR */
+#define I40E_VFGEN_RSTAT_VFR_STATE_SHIFT 0
+#define I40E_VFGEN_RSTAT_VFR_STATE_MASK I40E_MASK(0x3, I40E_VFGEN_RSTAT_VFR_STATE_SHIFT)
+#define I40E_VFINT_DYN_CTL01 0x00005C00 /* Reset: VFR */
+#define I40E_VFINT_DYN_CTL01_INTENA_SHIFT 0
+#define I40E_VFINT_DYN_CTL01_INTENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL01_INTENA_SHIFT)
+#define I40E_VFINT_DYN_CTL01_CLEARPBA_SHIFT 1
+#define I40E_VFINT_DYN_CTL01_CLEARPBA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL01_CLEARPBA_SHIFT)
+#define I40E_VFINT_DYN_CTL01_SWINT_TRIG_SHIFT 2
+#define I40E_VFINT_DYN_CTL01_SWINT_TRIG_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL01_SWINT_TRIG_SHIFT)
+#define I40E_VFINT_DYN_CTL01_ITR_INDX_SHIFT 3
+#define I40E_VFINT_DYN_CTL01_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTL01_ITR_INDX_SHIFT)
+#define I40E_VFINT_DYN_CTL01_INTERVAL_SHIFT 5
+#define I40E_VFINT_DYN_CTL01_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_DYN_CTL01_INTERVAL_SHIFT)
+#define I40E_VFINT_DYN_CTL01_SW_ITR_INDX_ENA_SHIFT 24
+#define I40E_VFINT_DYN_CTL01_SW_ITR_INDX_ENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL01_SW_ITR_INDX_ENA_SHIFT)
+#define I40E_VFINT_DYN_CTL01_SW_ITR_INDX_SHIFT 25
+#define I40E_VFINT_DYN_CTL01_SW_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTL01_SW_ITR_INDX_SHIFT)
+#define I40E_VFINT_DYN_CTL01_INTENA_MSK_SHIFT 31
+#define I40E_VFINT_DYN_CTL01_INTENA_MSK_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL01_INTENA_MSK_SHIFT)
+#define I40E_VFINT_DYN_CTLN1(_INTVF) (0x00003800 + ((_INTVF) * 4)) /* _i=0...15 */ /* Reset: VFR */
+#define I40E_VFINT_DYN_CTLN1_MAX_INDEX 15
+#define I40E_VFINT_DYN_CTLN1_INTENA_SHIFT 0
+#define I40E_VFINT_DYN_CTLN1_INTENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN1_INTENA_SHIFT)
+#define I40E_VFINT_DYN_CTLN1_CLEARPBA_SHIFT 1
+#define I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN1_CLEARPBA_SHIFT)
+#define I40E_VFINT_DYN_CTLN1_SWINT_TRIG_SHIFT 2
+#define I40E_VFINT_DYN_CTLN1_SWINT_TRIG_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN1_SWINT_TRIG_SHIFT)
+#define I40E_VFINT_DYN_CTLN1_ITR_INDX_SHIFT 3
+#define I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTLN1_ITR_INDX_SHIFT)
+#define I40E_VFINT_DYN_CTLN1_INTERVAL_SHIFT 5
+#define I40E_VFINT_DYN_CTLN1_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_DYN_CTLN1_INTERVAL_SHIFT)
+#define I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_SHIFT 24
+#define I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_SHIFT)
+#define I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_SHIFT 25
+#define I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_SHIFT)
+#define I40E_VFINT_DYN_CTLN1_INTENA_MSK_SHIFT 31
+#define I40E_VFINT_DYN_CTLN1_INTENA_MSK_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN1_INTENA_MSK_SHIFT)
+#define I40E_VFINT_ICR0_ENA1 0x00005000 /* Reset: CORER */
+#define I40E_VFINT_ICR0_ENA1_LINK_STAT_CHANGE_SHIFT 25
+#define I40E_VFINT_ICR0_ENA1_LINK_STAT_CHANGE_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_ENA1_LINK_STAT_CHANGE_SHIFT)
+#define I40E_VFINT_ICR0_ENA1_ADMINQ_SHIFT 30
+#define I40E_VFINT_ICR0_ENA1_ADMINQ_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_ENA1_ADMINQ_SHIFT)
+#define I40E_VFINT_ICR0_ENA1_RSVD_SHIFT 31
+#define I40E_VFINT_ICR0_ENA1_RSVD_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_ENA1_RSVD_SHIFT)
+#define I40E_VFINT_ICR01 0x00004800 /* Reset: CORER */
+#define I40E_VFINT_ICR01_INTEVENT_SHIFT 0
+#define I40E_VFINT_ICR01_INTEVENT_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_INTEVENT_SHIFT)
+#define I40E_VFINT_ICR01_QUEUE_0_SHIFT 1
+#define I40E_VFINT_ICR01_QUEUE_0_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_QUEUE_0_SHIFT)
+#define I40E_VFINT_ICR01_QUEUE_1_SHIFT 2
+#define I40E_VFINT_ICR01_QUEUE_1_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_QUEUE_1_SHIFT)
+#define I40E_VFINT_ICR01_QUEUE_2_SHIFT 3
+#define I40E_VFINT_ICR01_QUEUE_2_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_QUEUE_2_SHIFT)
+#define I40E_VFINT_ICR01_QUEUE_3_SHIFT 4
+#define I40E_VFINT_ICR01_QUEUE_3_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_QUEUE_3_SHIFT)
+#define I40E_VFINT_ICR01_LINK_STAT_CHANGE_SHIFT 25
+#define I40E_VFINT_ICR01_LINK_STAT_CHANGE_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_LINK_STAT_CHANGE_SHIFT)
+#define I40E_VFINT_ICR01_ADMINQ_SHIFT 30
+#define I40E_VFINT_ICR01_ADMINQ_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_ADMINQ_SHIFT)
+#define I40E_VFINT_ICR01_SWINT_SHIFT 31
+#define I40E_VFINT_ICR01_SWINT_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_SWINT_SHIFT)
+#define I40E_VFINT_ITR01(_i) (0x00004C00 + ((_i) * 4)) /* _i=0...2 */ /* Reset: VFR */
+#define I40E_VFINT_ITR01_MAX_INDEX 2
+#define I40E_VFINT_ITR01_INTERVAL_SHIFT 0
+#define I40E_VFINT_ITR01_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_ITR01_INTERVAL_SHIFT)
+#define I40E_VFINT_ITRN1(_i, _INTVF) (0x00002800 + ((_i) * 64 + (_INTVF) * 4)) /* _i=0...2, _INTVF=0...15 */ /* Reset: VFR */
+#define I40E_VFINT_ITRN1_MAX_INDEX 2
+#define I40E_VFINT_ITRN1_INTERVAL_SHIFT 0
+#define I40E_VFINT_ITRN1_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_ITRN1_INTERVAL_SHIFT)
+#define I40E_VFINT_STAT_CTL01 0x00005400 /* Reset: VFR */
+#define I40E_VFINT_STAT_CTL01_OTHER_ITR_INDX_SHIFT 2
+#define I40E_VFINT_STAT_CTL01_OTHER_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_STAT_CTL01_OTHER_ITR_INDX_SHIFT)
+#define I40E_QRX_TAIL1(_Q) (0x00002000 + ((_Q) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_QRX_TAIL1_MAX_INDEX 15
+#define I40E_QRX_TAIL1_TAIL_SHIFT 0
+#define I40E_QRX_TAIL1_TAIL_MASK I40E_MASK(0x1FFF, I40E_QRX_TAIL1_TAIL_SHIFT)
+#define I40E_QTX_TAIL1(_Q) (0x00000000 + ((_Q) * 4)) /* _i=0...15 */ /* Reset: PFR */
+#define I40E_QTX_TAIL1_MAX_INDEX 15
+#define I40E_QTX_TAIL1_TAIL_SHIFT 0
+#define I40E_QTX_TAIL1_TAIL_MASK I40E_MASK(0x1FFF, I40E_QTX_TAIL1_TAIL_SHIFT)
+#define I40E_VFMSIX_PBA 0x00002000 /* Reset: VFLR */
+#define I40E_VFMSIX_PBA_PENBIT_SHIFT 0
+#define I40E_VFMSIX_PBA_PENBIT_MASK I40E_MASK(0xFFFFFFFF, I40E_VFMSIX_PBA_PENBIT_SHIFT)
+#define I40E_VFMSIX_TADD(_i) (0x00000000 + ((_i) * 16)) /* _i=0...16 */ /* Reset: VFLR */
+#define I40E_VFMSIX_TADD_MAX_INDEX 16
+#define I40E_VFMSIX_TADD_MSIXTADD10_SHIFT 0
+#define I40E_VFMSIX_TADD_MSIXTADD10_MASK I40E_MASK(0x3, I40E_VFMSIX_TADD_MSIXTADD10_SHIFT)
+#define I40E_VFMSIX_TADD_MSIXTADD_SHIFT 2
+#define I40E_VFMSIX_TADD_MSIXTADD_MASK I40E_MASK(0x3FFFFFFF, I40E_VFMSIX_TADD_MSIXTADD_SHIFT)
+#define I40E_VFMSIX_TMSG(_i) (0x00000008 + ((_i) * 16)) /* _i=0...16 */ /* Reset: VFLR */
+#define I40E_VFMSIX_TMSG_MAX_INDEX 16
+#define I40E_VFMSIX_TMSG_MSIXTMSG_SHIFT 0
+#define I40E_VFMSIX_TMSG_MSIXTMSG_MASK I40E_MASK(0xFFFFFFFF, I40E_VFMSIX_TMSG_MSIXTMSG_SHIFT)
+#define I40E_VFMSIX_TUADD(_i) (0x00000004 + ((_i) * 16)) /* _i=0...16 */ /* Reset: VFLR */
+#define I40E_VFMSIX_TUADD_MAX_INDEX 16
+#define I40E_VFMSIX_TUADD_MSIXTUADD_SHIFT 0
+#define I40E_VFMSIX_TUADD_MSIXTUADD_MASK I40E_MASK(0xFFFFFFFF, I40E_VFMSIX_TUADD_MSIXTUADD_SHIFT)
+#define I40E_VFMSIX_TVCTRL(_i) (0x0000000C + ((_i) * 16)) /* _i=0...16 */ /* Reset: VFLR */
+#define I40E_VFMSIX_TVCTRL_MAX_INDEX 16
+#define I40E_VFMSIX_TVCTRL_MASK_SHIFT 0
+#define I40E_VFMSIX_TVCTRL_MASK_MASK I40E_MASK(0x1, I40E_VFMSIX_TVCTRL_MASK_SHIFT)
+#define I40E_VFCM_PE_ERRDATA 0x0000DC00 /* Reset: VFR */
+#define I40E_VFCM_PE_ERRDATA_ERROR_CODE_SHIFT 0
+#define I40E_VFCM_PE_ERRDATA_ERROR_CODE_MASK I40E_MASK(0xF, I40E_VFCM_PE_ERRDATA_ERROR_CODE_SHIFT)
+#define I40E_VFCM_PE_ERRDATA_Q_TYPE_SHIFT 4
+#define I40E_VFCM_PE_ERRDATA_Q_TYPE_MASK I40E_MASK(0x7, I40E_VFCM_PE_ERRDATA_Q_TYPE_SHIFT)
+#define I40E_VFCM_PE_ERRDATA_Q_NUM_SHIFT 8
+#define I40E_VFCM_PE_ERRDATA_Q_NUM_MASK I40E_MASK(0x3FFFF, I40E_VFCM_PE_ERRDATA_Q_NUM_SHIFT)
+#define I40E_VFCM_PE_ERRINFO 0x0000D800 /* Reset: VFR */
+#define I40E_VFCM_PE_ERRINFO_ERROR_VALID_SHIFT 0
+#define I40E_VFCM_PE_ERRINFO_ERROR_VALID_MASK I40E_MASK(0x1, I40E_VFCM_PE_ERRINFO_ERROR_VALID_SHIFT)
+#define I40E_VFCM_PE_ERRINFO_ERROR_INST_SHIFT 4
+#define I40E_VFCM_PE_ERRINFO_ERROR_INST_MASK I40E_MASK(0x7, I40E_VFCM_PE_ERRINFO_ERROR_INST_SHIFT)
+#define I40E_VFCM_PE_ERRINFO_DBL_ERROR_CNT_SHIFT 8
+#define I40E_VFCM_PE_ERRINFO_DBL_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_VFCM_PE_ERRINFO_DBL_ERROR_CNT_SHIFT)
+#define I40E_VFCM_PE_ERRINFO_RLU_ERROR_CNT_SHIFT 16
+#define I40E_VFCM_PE_ERRINFO_RLU_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_VFCM_PE_ERRINFO_RLU_ERROR_CNT_SHIFT)
+#define I40E_VFCM_PE_ERRINFO_RLS_ERROR_CNT_SHIFT 24
+#define I40E_VFCM_PE_ERRINFO_RLS_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_VFCM_PE_ERRINFO_RLS_ERROR_CNT_SHIFT)
+#define I40E_VFQF_HENA(_i) (0x0000C400 + ((_i) * 4)) /* _i=0...1 */ /* Reset: CORER */
+#define I40E_VFQF_HENA_MAX_INDEX 1
+#define I40E_VFQF_HENA_PTYPE_ENA_SHIFT 0
+#define I40E_VFQF_HENA_PTYPE_ENA_MASK I40E_MASK(0xFFFFFFFF, I40E_VFQF_HENA_PTYPE_ENA_SHIFT)
+#define I40E_VFQF_HKEY(_i) (0x0000CC00 + ((_i) * 4)) /* _i=0...12 */ /* Reset: CORER */
+#define I40E_VFQF_HKEY_MAX_INDEX 12
+#define I40E_VFQF_HKEY_KEY_0_SHIFT 0
+#define I40E_VFQF_HKEY_KEY_0_MASK I40E_MASK(0xFF, I40E_VFQF_HKEY_KEY_0_SHIFT)
+#define I40E_VFQF_HKEY_KEY_1_SHIFT 8
+#define I40E_VFQF_HKEY_KEY_1_MASK I40E_MASK(0xFF, I40E_VFQF_HKEY_KEY_1_SHIFT)
+#define I40E_VFQF_HKEY_KEY_2_SHIFT 16
+#define I40E_VFQF_HKEY_KEY_2_MASK I40E_MASK(0xFF, I40E_VFQF_HKEY_KEY_2_SHIFT)
+#define I40E_VFQF_HKEY_KEY_3_SHIFT 24
+#define I40E_VFQF_HKEY_KEY_3_MASK I40E_MASK(0xFF, I40E_VFQF_HKEY_KEY_3_SHIFT)
+#define I40E_VFQF_HLUT(_i) (0x0000D000 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_VFQF_HLUT_MAX_INDEX 15
+#define I40E_VFQF_HLUT_LUT0_SHIFT 0
+#define I40E_VFQF_HLUT_LUT0_MASK I40E_MASK(0xF, I40E_VFQF_HLUT_LUT0_SHIFT)
+#define I40E_VFQF_HLUT_LUT1_SHIFT 8
+#define I40E_VFQF_HLUT_LUT1_MASK I40E_MASK(0xF, I40E_VFQF_HLUT_LUT1_SHIFT)
+#define I40E_VFQF_HLUT_LUT2_SHIFT 16
+#define I40E_VFQF_HLUT_LUT2_MASK I40E_MASK(0xF, I40E_VFQF_HLUT_LUT2_SHIFT)
+#define I40E_VFQF_HLUT_LUT3_SHIFT 24
+#define I40E_VFQF_HLUT_LUT3_MASK I40E_MASK(0xF, I40E_VFQF_HLUT_LUT3_SHIFT)
+#define I40E_VFQF_HREGION(_i) (0x0000D400 + ((_i) * 4)) /* _i=0...7 */ /* Reset: CORER */
+#define I40E_VFQF_HREGION_MAX_INDEX 7
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_0_SHIFT 0
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_0_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_0_SHIFT)
+#define I40E_VFQF_HREGION_REGION_0_SHIFT 1
+#define I40E_VFQF_HREGION_REGION_0_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_0_SHIFT)
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_1_SHIFT 4
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_1_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_1_SHIFT)
+#define I40E_VFQF_HREGION_REGION_1_SHIFT 5
+#define I40E_VFQF_HREGION_REGION_1_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_1_SHIFT)
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_2_SHIFT 8
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_2_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_2_SHIFT)
+#define I40E_VFQF_HREGION_REGION_2_SHIFT 9
+#define I40E_VFQF_HREGION_REGION_2_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_2_SHIFT)
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_3_SHIFT 12
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_3_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_3_SHIFT)
+#define I40E_VFQF_HREGION_REGION_3_SHIFT 13
+#define I40E_VFQF_HREGION_REGION_3_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_3_SHIFT)
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_4_SHIFT 16
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_4_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_4_SHIFT)
+#define I40E_VFQF_HREGION_REGION_4_SHIFT 17
+#define I40E_VFQF_HREGION_REGION_4_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_4_SHIFT)
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_5_SHIFT 20
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_5_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_5_SHIFT)
+#define I40E_VFQF_HREGION_REGION_5_SHIFT 21
+#define I40E_VFQF_HREGION_REGION_5_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_5_SHIFT)
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_6_SHIFT 24
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_6_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_6_SHIFT)
+#define I40E_VFQF_HREGION_REGION_6_SHIFT 25
+#define I40E_VFQF_HREGION_REGION_6_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_6_SHIFT)
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_7_SHIFT 28
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_7_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_7_SHIFT)
+#define I40E_VFQF_HREGION_REGION_7_SHIFT 29
+#define I40E_VFQF_HREGION_REGION_7_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_7_SHIFT)
+#endif
diff --git a/sys/dev/ixl/i40e_register_x710_int.h b/sys/dev/ixl/i40e_register_x710_int.h
new file mode 100755
index 0000000..2f970cd
--- /dev/null
+++ b/sys/dev/ixl/i40e_register_x710_int.h
@@ -0,0 +1,10713 @@
+/******************************************************************************
+
+ Copyright (c) 2013-2014, Intel Corporation
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ POSSIBILITY OF SUCH DAMAGE.
+
+******************************************************************************/
+/*$FreeBSD$*/
+
+#ifndef _I40E_REGISTER_X710_INT_H_
+#define _I40E_REGISTER_X710_INT_H_
+
+/* PF - Admin Queue */
+
+#define I40E_GL_ARQLEN 0x000802C0 /* Reset: EMPR */
+#define I40E_GL_ARQLEN_ARQLEN_SHIFT 0
+#define I40E_GL_ARQLEN_ARQLEN_MASK I40E_MASK(0x3FF, I40E_GL_ARQLEN_ARQLEN_SHIFT)
+#define I40E_GL_ARQLEN_ARQVFE_SHIFT 28
+#define I40E_GL_ARQLEN_ARQVFE_MASK I40E_MASK(0x1, I40E_GL_ARQLEN_ARQVFE_SHIFT)
+#define I40E_GL_ARQLEN_ARQOVFL_SHIFT 29
+#define I40E_GL_ARQLEN_ARQOVFL_MASK I40E_MASK(0x1, I40E_GL_ARQLEN_ARQOVFL_SHIFT)
+#define I40E_GL_ARQLEN_ARQCRIT_SHIFT 30
+#define I40E_GL_ARQLEN_ARQCRIT_MASK I40E_MASK(0x1, I40E_GL_ARQLEN_ARQCRIT_SHIFT)
+#define I40E_GL_ARQLEN_ARQENABLE_SHIFT 31
+#define I40E_GL_ARQLEN_ARQENABLE_MASK I40E_MASK(0x1, I40E_GL_ARQLEN_ARQENABLE_SHIFT)
+
+/* PF - Analyzer Registers */
+
+#define I40E_GL_RCU_PRS_L2TAG(_i) (0x0026CFC0 + ((_i) * 4)) /* _i=0...7 */ /* Reset: CORER */
+#define I40E_GL_RCU_PRS_L2TAG_MAX_INDEX 7
+#define I40E_GL_RCU_PRS_L2TAG_LENGTH_SHIFT 0
+#define I40E_GL_RCU_PRS_L2TAG_LENGTH_MASK I40E_MASK(0x7F, I40E_GL_RCU_PRS_L2TAG_LENGTH_SHIFT)
+#define I40E_GL_RCU_PRS_L2TAG_HAS_UP_SHIFT 7
+#define I40E_GL_RCU_PRS_L2TAG_HAS_UP_MASK I40E_MASK(0x1, I40E_GL_RCU_PRS_L2TAG_HAS_UP_SHIFT)
+#define I40E_GL_RCU_PRS_L2TAG_ISVLAN_SHIFT 9
+#define I40E_GL_RCU_PRS_L2TAG_ISVLAN_MASK I40E_MASK(0x1, I40E_GL_RCU_PRS_L2TAG_ISVLAN_SHIFT)
+#define I40E_GL_RCU_PRS_L2TAG_INNERUP_SHIFT 10
+#define I40E_GL_RCU_PRS_L2TAG_INNERUP_MASK I40E_MASK(0x1, I40E_GL_RCU_PRS_L2TAG_INNERUP_SHIFT)
+#define I40E_GL_RCU_PRS_L2TAG_OUTERUP_SHIFT 11
+#define I40E_GL_RCU_PRS_L2TAG_OUTERUP_MASK I40E_MASK(0x1, I40E_GL_RCU_PRS_L2TAG_OUTERUP_SHIFT)
+#define I40E_GL_RCU_PRS_L2TAG_LONG_SHIFT 12
+#define I40E_GL_RCU_PRS_L2TAG_LONG_MASK I40E_MASK(0x1, I40E_GL_RCU_PRS_L2TAG_LONG_SHIFT)
+#define I40E_GL_RCU_PRS_L2TAG_ISSIA_SHIFT 13
+#define I40E_GL_RCU_PRS_L2TAG_ISSIA_MASK I40E_MASK(0x1, I40E_GL_RCU_PRS_L2TAG_ISSIA_SHIFT)
+#define I40E_GL_RCU_PRS_L2TAG_ETHERTYPE_SHIFT 16
+#define I40E_GL_RCU_PRS_L2TAG_ETHERTYPE_MASK I40E_MASK(0xFFFF, I40E_GL_RCU_PRS_L2TAG_ETHERTYPE_SHIFT)
+
+#define I40E_GL_SWT_L2TAG0(_i) (0x00044278 + ((_i) * 4)) /* _i=0...7 */ /* Reset: CORER */
+#define I40E_GL_SWT_L2TAG0_MAX_INDEX 7
+#define I40E_GL_SWT_L2TAG0_DATA_SHIFT 0
+#define I40E_GL_SWT_L2TAG0_DATA_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_SWT_L2TAG0_DATA_SHIFT)
+
+#define I40E_GL_SWT_L2TAG1(_i) (0x00044298 + ((_i) * 4)) /* _i=0...7 */ /* Reset: CORER */
+#define I40E_GL_SWT_L2TAG1_MAX_INDEX 7
+#define I40E_GL_SWT_L2TAG1_DATA_SHIFT 0
+#define I40E_GL_SWT_L2TAG1_DATA_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_SWT_L2TAG1_DATA_SHIFT)
+
+#define I40E_GL_SWT_L2TAGCTRL(_i) (0x001C0A70 + ((_i) * 4)) /* _i=0...7 */ /* Reset: CORER */
+#define I40E_GL_SWT_L2TAGCTRL_MAX_INDEX 7
+#define I40E_GL_SWT_L2TAGCTRL_LENGTH_SHIFT 0
+#define I40E_GL_SWT_L2TAGCTRL_LENGTH_MASK I40E_MASK(0x7F, I40E_GL_SWT_L2TAGCTRL_LENGTH_SHIFT)
+#define I40E_GL_SWT_L2TAGCTRL_HAS_UP_SHIFT 7
+#define I40E_GL_SWT_L2TAGCTRL_HAS_UP_MASK I40E_MASK(0x1, I40E_GL_SWT_L2TAGCTRL_HAS_UP_SHIFT)
+#define I40E_GL_SWT_L2TAGCTRL_ISVLAN_SHIFT 9
+#define I40E_GL_SWT_L2TAGCTRL_ISVLAN_MASK I40E_MASK(0x1, I40E_GL_SWT_L2TAGCTRL_ISVLAN_SHIFT)
+#define I40E_GL_SWT_L2TAGCTRL_INNERUP_SHIFT 10
+#define I40E_GL_SWT_L2TAGCTRL_INNERUP_MASK I40E_MASK(0x1, I40E_GL_SWT_L2TAGCTRL_INNERUP_SHIFT)
+#define I40E_GL_SWT_L2TAGCTRL_OUTERUP_SHIFT 11
+#define I40E_GL_SWT_L2TAGCTRL_OUTERUP_MASK I40E_MASK(0x1, I40E_GL_SWT_L2TAGCTRL_OUTERUP_SHIFT)
+#define I40E_GL_SWT_L2TAGCTRL_LONG_SHIFT 12
+#define I40E_GL_SWT_L2TAGCTRL_LONG_MASK I40E_MASK(0x1, I40E_GL_SWT_L2TAGCTRL_LONG_SHIFT)
+#define I40E_GL_SWT_L2TAGCTRL_ISSIA_SHIFT 13
+#define I40E_GL_SWT_L2TAGCTRL_ISSIA_MASK I40E_MASK(0x1, I40E_GL_SWT_L2TAGCTRL_ISSIA_SHIFT)
+#define I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_SHIFT 16
+#define I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_MASK I40E_MASK(0xFFFF, I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_SHIFT)
+
+#define I40E_GL_SWT_L2TAGRXEB(_i) (0x00051000 + ((_i) * 4)) /* _i=0...7 */ /* Reset: CORER */
+#define I40E_GL_SWT_L2TAGRXEB_MAX_INDEX 7
+#define I40E_GL_SWT_L2TAGRXEB_OFFSET_SHIFT 0
+#define I40E_GL_SWT_L2TAGRXEB_OFFSET_MASK I40E_MASK(0xFF, I40E_GL_SWT_L2TAGRXEB_OFFSET_SHIFT)
+#define I40E_GL_SWT_L2TAGRXEB_LENGTH_SHIFT 8
+#define I40E_GL_SWT_L2TAGRXEB_LENGTH_MASK I40E_MASK(0x3, I40E_GL_SWT_L2TAGRXEB_LENGTH_SHIFT)
+
+#define I40E_GL_SWT_L2TAGTXIB(_i) (0x000442B8 + ((_i) * 4)) /* _i=0...7 */ /* Reset: CORER */
+#define I40E_GL_SWT_L2TAGTXIB_MAX_INDEX 7
+#define I40E_GL_SWT_L2TAGTXIB_OFFSET_SHIFT 0
+#define I40E_GL_SWT_L2TAGTXIB_OFFSET_MASK I40E_MASK(0xFF, I40E_GL_SWT_L2TAGTXIB_OFFSET_SHIFT)
+#define I40E_GL_SWT_L2TAGTXIB_LENGTH_SHIFT 8
+#define I40E_GL_SWT_L2TAGTXIB_LENGTH_MASK I40E_MASK(0x3, I40E_GL_SWT_L2TAGTXIB_LENGTH_SHIFT)
+
+#define I40E_GLANL_L2ULP(_i) (0x001C0A2C + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLANL_L2ULP_MAX_INDEX 15
+#define I40E_GLANL_L2ULP_ETHERTYPE_SHIFT 0
+#define I40E_GLANL_L2ULP_ETHERTYPE_MASK I40E_MASK(0xFFFF, I40E_GLANL_L2ULP_ETHERTYPE_SHIFT)
+#define I40E_GLANL_L2ULP_ENABLE_SHIFT 31
+#define I40E_GLANL_L2ULP_ENABLE_MASK I40E_MASK(0x1, I40E_GLANL_L2ULP_ENABLE_SHIFT)
+
+#define I40E_GLANL_PRE_LY2 0x001C0A20 /* Reset: CORER */
+#define I40E_GLANL_PRE_LY2_PRE_LY2_L2_SHIFT 0
+#define I40E_GLANL_PRE_LY2_PRE_LY2_L2_MASK I40E_MASK(0xFFFF, I40E_GLANL_PRE_LY2_PRE_LY2_L2_SHIFT)
+
+#define I40E_GLPPRS_INDIRECT_ADDRESS 0x001C0A90 /* Reset: CORER */
+#define I40E_GLPPRS_INDIRECT_ADDRESS_ADDR_SHIFT 0
+#define I40E_GLPPRS_INDIRECT_ADDRESS_ADDR_MASK I40E_MASK(0xFFFF, I40E_GLPPRS_INDIRECT_ADDRESS_ADDR_SHIFT)
+
+#define I40E_GLPPRS_INDIRECT_DATA(_i) (0x001C0A94 + ((_i) * 4)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPPRS_INDIRECT_DATA_MAX_INDEX 3
+#define I40E_GLPPRS_INDIRECT_DATA_DATA_SHIFT 0
+#define I40E_GLPPRS_INDIRECT_DATA_DATA_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPPRS_INDIRECT_DATA_DATA_SHIFT)
+
+#define I40E_GLRDPU_L2TAGCTRL(_i) (0x00051020 + ((_i) * 4)) /* _i=0...7 */ /* Reset: CORER */
+#define I40E_GLRDPU_L2TAGCTRL_MAX_INDEX 7
+#define I40E_GLRDPU_L2TAGCTRL_LENGTH_SHIFT 0
+#define I40E_GLRDPU_L2TAGCTRL_LENGTH_MASK I40E_MASK(0x7F, I40E_GLRDPU_L2TAGCTRL_LENGTH_SHIFT)
+#define I40E_GLRDPU_L2TAGCTRL_HAS_UP_SHIFT 7
+#define I40E_GLRDPU_L2TAGCTRL_HAS_UP_MASK I40E_MASK(0x1, I40E_GLRDPU_L2TAGCTRL_HAS_UP_SHIFT)
+#define I40E_GLRDPU_L2TAGCTRL_ISVLAN_SHIFT 9
+#define I40E_GLRDPU_L2TAGCTRL_ISVLAN_MASK I40E_MASK(0x1, I40E_GLRDPU_L2TAGCTRL_ISVLAN_SHIFT)
+#define I40E_GLRDPU_L2TAGCTRL_INNERUP_SHIFT 10
+#define I40E_GLRDPU_L2TAGCTRL_INNERUP_MASK I40E_MASK(0x1, I40E_GLRDPU_L2TAGCTRL_INNERUP_SHIFT)
+#define I40E_GLRDPU_L2TAGCTRL_OUTERUP_SHIFT 11
+#define I40E_GLRDPU_L2TAGCTRL_OUTERUP_MASK I40E_MASK(0x1, I40E_GLRDPU_L2TAGCTRL_OUTERUP_SHIFT)
+#define I40E_GLRDPU_L2TAGCTRL_LONG_SHIFT 12
+#define I40E_GLRDPU_L2TAGCTRL_LONG_MASK I40E_MASK(0x1, I40E_GLRDPU_L2TAGCTRL_LONG_SHIFT)
+#define I40E_GLRDPU_L2TAGCTRL_ISSIA_SHIFT 13
+#define I40E_GLRDPU_L2TAGCTRL_ISSIA_MASK I40E_MASK(0x1, I40E_GLRDPU_L2TAGCTRL_ISSIA_SHIFT)
+#define I40E_GLRDPU_L2TAGCTRL_ETHERTYPE_SHIFT 16
+#define I40E_GLRDPU_L2TAGCTRL_ETHERTYPE_MASK I40E_MASK(0xFFFF, I40E_GLRDPU_L2TAGCTRL_ETHERTYPE_SHIFT)
+
+#define I40E_GLTDPU_L2TAGCTRL(_i) (0x00044204 + ((_i) * 4)) /* _i=0...7 */ /* Reset: CORER */
+#define I40E_GLTDPU_L2TAGCTRL_MAX_INDEX 7
+#define I40E_GLTDPU_L2TAGCTRL_LENGTH_SHIFT 0
+#define I40E_GLTDPU_L2TAGCTRL_LENGTH_MASK I40E_MASK(0x7F, I40E_GLTDPU_L2TAGCTRL_LENGTH_SHIFT)
+#define I40E_GLTDPU_L2TAGCTRL_HAS_UP_SHIFT 7
+#define I40E_GLTDPU_L2TAGCTRL_HAS_UP_MASK I40E_MASK(0x1, I40E_GLTDPU_L2TAGCTRL_HAS_UP_SHIFT)
+#define I40E_GLTDPU_L2TAGCTRL_ISVLAN_SHIFT 9
+#define I40E_GLTDPU_L2TAGCTRL_ISVLAN_MASK I40E_MASK(0x1, I40E_GLTDPU_L2TAGCTRL_ISVLAN_SHIFT)
+#define I40E_GLTDPU_L2TAGCTRL_INNERUP_SHIFT 10
+#define I40E_GLTDPU_L2TAGCTRL_INNERUP_MASK I40E_MASK(0x1, I40E_GLTDPU_L2TAGCTRL_INNERUP_SHIFT)
+#define I40E_GLTDPU_L2TAGCTRL_OUTERUP_SHIFT 11
+#define I40E_GLTDPU_L2TAGCTRL_OUTERUP_MASK I40E_MASK(0x1, I40E_GLTDPU_L2TAGCTRL_OUTERUP_SHIFT)
+#define I40E_GLTDPU_L2TAGCTRL_LONG_SHIFT 12
+#define I40E_GLTDPU_L2TAGCTRL_LONG_MASK I40E_MASK(0x1, I40E_GLTDPU_L2TAGCTRL_LONG_SHIFT)
+#define I40E_GLTDPU_L2TAGCTRL_ISSIA_SHIFT 13
+#define I40E_GLTDPU_L2TAGCTRL_ISSIA_MASK I40E_MASK(0x1, I40E_GLTDPU_L2TAGCTRL_ISSIA_SHIFT)
+#define I40E_GLTDPU_L2TAGCTRL_ETHERTYPE_SHIFT 16
+#define I40E_GLTDPU_L2TAGCTRL_ETHERTYPE_MASK I40E_MASK(0xFFFF, I40E_GLTDPU_L2TAGCTRL_ETHERTYPE_SHIFT)
+
+#define I40E_GLTDPU_L2ULP(_i) (0x00044224 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLTDPU_L2ULP_MAX_INDEX 15
+#define I40E_GLTDPU_L2ULP_ETHERTYPE_SHIFT 0
+#define I40E_GLTDPU_L2ULP_ETHERTYPE_MASK I40E_MASK(0xFFFF, I40E_GLTDPU_L2ULP_ETHERTYPE_SHIFT)
+#define I40E_GLTDPU_L2ULP_ENABLE_SHIFT 31
+#define I40E_GLTDPU_L2ULP_ENABLE_MASK I40E_MASK(0x1, I40E_GLTDPU_L2ULP_ENABLE_SHIFT)
+
+#define I40E_GLTDPU_PRE_LY2 0x00044200 /* Reset: CORER */
+#define I40E_GLTDPU_PRE_LY2_PRE_LY2_L2_SHIFT 0
+#define I40E_GLTDPU_PRE_LY2_PRE_LY2_L2_MASK I40E_MASK(0xFFFF, I40E_GLTDPU_PRE_LY2_PRE_LY2_L2_SHIFT)
+
+#define I40E_PRT_PPRSL2TAGSEN 0x00087080 /* Reset: CORER */
+#define I40E_PRT_PPRSL2TAGSEN_ENABLE_SHIFT 0
+#define I40E_PRT_PPRSL2TAGSEN_ENABLE_MASK I40E_MASK(0xFF, I40E_PRT_PPRSL2TAGSEN_ENABLE_SHIFT)
+
+#define I40E_PRT_TDPUL2TAGSEN 0x00044140 /* Reset: CORER */
+#define I40E_PRT_TDPUL2TAGSEN_ENABLE_SHIFT 0
+#define I40E_PRT_TDPUL2TAGSEN_ENABLE_MASK I40E_MASK(0xFF, I40E_PRT_TDPUL2TAGSEN_ENABLE_SHIFT)
+
+#define I40E_PRTPPRS_INDIRECT_ADDRESS 0x00084320 /* Reset: CORER */
+#define I40E_PRTPPRS_INDIRECT_ADDRESS_ADDR_SHIFT 0
+#define I40E_PRTPPRS_INDIRECT_ADDRESS_ADDR_MASK I40E_MASK(0xFFFF, I40E_PRTPPRS_INDIRECT_ADDRESS_ADDR_SHIFT)
+
+#define I40E_PRTPPRS_INDIRECT_DATA(_i) (0x00084340 + ((_i) * 32)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_PRTPPRS_INDIRECT_DATA_MAX_INDEX 3
+#define I40E_PRTPPRS_INDIRECT_DATA_DATA_SHIFT 0
+#define I40E_PRTPPRS_INDIRECT_DATA_DATA_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTPPRS_INDIRECT_DATA_DATA_SHIFT)
+
+#define I40E_PRTPPRS_L2TAGCTRL(_i) (0x00084020 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */
+#define I40E_PRTPPRS_L2TAGCTRL_MAX_INDEX 7
+#define I40E_PRTPPRS_L2TAGCTRL_LENGTH_SHIFT 0
+#define I40E_PRTPPRS_L2TAGCTRL_LENGTH_MASK I40E_MASK(0x7F, I40E_PRTPPRS_L2TAGCTRL_LENGTH_SHIFT)
+#define I40E_PRTPPRS_L2TAGCTRL_HAS_UP_SHIFT 7
+#define I40E_PRTPPRS_L2TAGCTRL_HAS_UP_MASK I40E_MASK(0x1, I40E_PRTPPRS_L2TAGCTRL_HAS_UP_SHIFT)
+#define I40E_PRTPPRS_L2TAGCTRL_ISVLAN_SHIFT 9
+#define I40E_PRTPPRS_L2TAGCTRL_ISVLAN_MASK I40E_MASK(0x1, I40E_PRTPPRS_L2TAGCTRL_ISVLAN_SHIFT)
+#define I40E_PRTPPRS_L2TAGCTRL_INNERUP_SHIFT 10
+#define I40E_PRTPPRS_L2TAGCTRL_INNERUP_MASK I40E_MASK(0x1, I40E_PRTPPRS_L2TAGCTRL_INNERUP_SHIFT)
+#define I40E_PRTPPRS_L2TAGCTRL_OUTERUP_SHIFT 11
+#define I40E_PRTPPRS_L2TAGCTRL_OUTERUP_MASK I40E_MASK(0x1, I40E_PRTPPRS_L2TAGCTRL_OUTERUP_SHIFT)
+#define I40E_PRTPPRS_L2TAGCTRL_LONG_SHIFT 12
+#define I40E_PRTPPRS_L2TAGCTRL_LONG_MASK I40E_MASK(0x1, I40E_PRTPPRS_L2TAGCTRL_LONG_SHIFT)
+#define I40E_PRTPPRS_L2TAGCTRL_ISSIA_SHIFT 13
+#define I40E_PRTPPRS_L2TAGCTRL_ISSIA_MASK I40E_MASK(0x1, I40E_PRTPPRS_L2TAGCTRL_ISSIA_SHIFT)
+#define I40E_PRTPPRS_L2TAGCTRL_ETHERTYPE_SHIFT 16
+#define I40E_PRTPPRS_L2TAGCTRL_ETHERTYPE_MASK I40E_MASK(0xFFFF, I40E_PRTPPRS_L2TAGCTRL_ETHERTYPE_SHIFT)
+
+#define I40E_PRTPPRS_L2ULP(_i) (0x00084120 + ((_i) * 32)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_PRTPPRS_L2ULP_MAX_INDEX 15
+#define I40E_PRTPPRS_L2ULP_ETHERTYPE_SHIFT 0
+#define I40E_PRTPPRS_L2ULP_ETHERTYPE_MASK I40E_MASK(0xFFFF, I40E_PRTPPRS_L2ULP_ETHERTYPE_SHIFT)
+#define I40E_PRTPPRS_L2ULP_ENABLE_SHIFT 31
+#define I40E_PRTPPRS_L2ULP_ENABLE_MASK I40E_MASK(0x1, I40E_PRTPPRS_L2ULP_ENABLE_SHIFT)
+
+#define I40E_PRTPPRS_PRE_LY2 0x00084000 /* Reset: CORER */
+#define I40E_PRTPPRS_PRE_LY2_PRE_LY2_L2_SHIFT 0
+#define I40E_PRTPPRS_PRE_LY2_PRE_LY2_L2_MASK I40E_MASK(0xFFFF, I40E_PRTPPRS_PRE_LY2_PRE_LY2_L2_SHIFT)
+
+#define I40E_PRTPPRS_SIATH(_i) (0x00085900 + ((_i) * 32)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_PRTPPRS_SIATH_MAX_INDEX 15
+#define I40E_PRTPPRS_SIATH_ETHERTYPE_SHIFT 0
+#define I40E_PRTPPRS_SIATH_ETHERTYPE_MASK I40E_MASK(0xFFFF, I40E_PRTPPRS_SIATH_ETHERTYPE_SHIFT)
+#define I40E_PRTPPRS_SIATH_VLAN_ID_SHIFT 16
+#define I40E_PRTPPRS_SIATH_VLAN_ID_MASK I40E_MASK(0xFFF, I40E_PRTPPRS_SIATH_VLAN_ID_SHIFT)
+#define I40E_PRTPPRS_SIATH_VALID_SHIFT 31
+#define I40E_PRTPPRS_SIATH_VALID_MASK I40E_MASK(0x1, I40E_PRTPPRS_SIATH_VALID_SHIFT)
+
+#define I40E_PRTPPRS_SIATL(_i) (0x00085700 + ((_i) * 32)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_PRTPPRS_SIATL_MAX_INDEX 15
+#define I40E_PRTPPRS_SIATL_GRE_PROTOCOL_SHIFT 0
+#define I40E_PRTPPRS_SIATL_GRE_PROTOCOL_MASK I40E_MASK(0xFFFF, I40E_PRTPPRS_SIATL_GRE_PROTOCOL_SHIFT)
+#define I40E_PRTPPRS_SIATL_GRE_FLAG_SHIFT 16
+#define I40E_PRTPPRS_SIATL_GRE_FLAG_MASK I40E_MASK(0x1, I40E_PRTPPRS_SIATL_GRE_FLAG_SHIFT)
+#define I40E_PRTPPRS_SIATL_NIBBLE_FLAG_SHIFT 17
+#define I40E_PRTPPRS_SIATL_NIBBLE_FLAG_MASK I40E_MASK(0x1, I40E_PRTPPRS_SIATL_NIBBLE_FLAG_SHIFT)
+#define I40E_PRTPPRS_SIATL_SKIP_OFFSET_SHIFT 18
+#define I40E_PRTPPRS_SIATL_SKIP_OFFSET_MASK I40E_MASK(0x3F, I40E_PRTPPRS_SIATL_SKIP_OFFSET_SHIFT)
+
+/* PF - CM Registers */
+
+#define I40E_GLCM_LANCLSADDR 0x0010C444 /* Reset: CORER */
+#define I40E_GLCM_LANCLSADDR_CLS_ADDR_SHIFT 0
+#define I40E_GLCM_LANCLSADDR_CLS_ADDR_MASK I40E_MASK(0x1FF, I40E_GLCM_LANCLSADDR_CLS_ADDR_SHIFT)
+
+#define I40E_GLCM_LANCLSDATAHI 0x0010C44C /* Reset: CORER */
+#define I40E_GLCM_LANCLSDATAHI_CLS_DATA_HI_SHIFT 0
+#define I40E_GLCM_LANCLSDATAHI_CLS_DATA_HI_MASK I40E_MASK(0xFFFFFFFF, I40E_GLCM_LANCLSDATAHI_CLS_DATA_HI_SHIFT)
+
+#define I40E_GLCM_LANCLSDATALO 0x0010C448 /* Reset: CORER */
+#define I40E_GLCM_LANCLSDATALO_CLS_DATA_LO_SHIFT 0
+#define I40E_GLCM_LANCLSDATALO_CLS_DATA_LO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLCM_LANCLSDATALO_CLS_DATA_LO_SHIFT)
+
+#define I40E_GLCM_LANCONFIG 0x0010C430 /* Reset: CORER */
+#define I40E_GLCM_LANCONFIG_GLOBAL_LOCK_MODE_SHIFT 1
+#define I40E_GLCM_LANCONFIG_GLOBAL_LOCK_MODE_MASK I40E_MASK(0x1, I40E_GLCM_LANCONFIG_GLOBAL_LOCK_MODE_SHIFT)
+#define I40E_GLCM_LANCONFIG_DISABLE_PACKET_COUNT_SHIFT 2
+#define I40E_GLCM_LANCONFIG_DISABLE_PACKET_COUNT_MASK I40E_MASK(0x1, I40E_GLCM_LANCONFIG_DISABLE_PACKET_COUNT_SHIFT)
+#define I40E_GLCM_LANCONFIG_DISABLE_RESCHEDULE_SHIFT 3
+#define I40E_GLCM_LANCONFIG_DISABLE_RESCHEDULE_MASK I40E_MASK(0x1, I40E_GLCM_LANCONFIG_DISABLE_RESCHEDULE_SHIFT)
+#define I40E_GLCM_LANCONFIG_ENABLE_CRC_SHIFT 4
+#define I40E_GLCM_LANCONFIG_ENABLE_CRC_MASK I40E_MASK(0x1, I40E_GLCM_LANCONFIG_ENABLE_CRC_SHIFT)
+#define I40E_GLCM_LANCONFIG_CACHE_DEPTH_SHIFT 5
+#define I40E_GLCM_LANCONFIG_CACHE_DEPTH_MASK I40E_MASK(0x7, I40E_GLCM_LANCONFIG_CACHE_DEPTH_SHIFT)
+#define I40E_GLCM_LANCONFIG_MAXFCOE_SHIFT 8
+#define I40E_GLCM_LANCONFIG_MAXFCOE_MASK I40E_MASK(0x3, I40E_GLCM_LANCONFIG_MAXFCOE_SHIFT)
+#define I40E_GLCM_LANCONFIG_DBG_DPSEL_SHIFT 12
+#define I40E_GLCM_LANCONFIG_DBG_DPSEL_MASK I40E_MASK(0x3, I40E_GLCM_LANCONFIG_DBG_DPSEL_SHIFT)
+#define I40E_GLCM_LANCONFIG_DBG_DWSEL_SHIFT 14
+#define I40E_GLCM_LANCONFIG_DBG_DWSEL_MASK I40E_MASK(0x3, I40E_GLCM_LANCONFIG_DBG_DWSEL_SHIFT)
+#define I40E_GLCM_LANCONFIG_DBG_WRSEL_SHIFT 16
+#define I40E_GLCM_LANCONFIG_DBG_WRSEL_MASK I40E_MASK(0x1, I40E_GLCM_LANCONFIG_DBG_WRSEL_SHIFT)
+#define I40E_GLCM_LANCONFIG_DBGMUX_SEL_LO_SHIFT 20
+#define I40E_GLCM_LANCONFIG_DBGMUX_SEL_LO_MASK I40E_MASK(0xF, I40E_GLCM_LANCONFIG_DBGMUX_SEL_LO_SHIFT)
+#define I40E_GLCM_LANCONFIG_DBGMUX_SEL_HI_SHIFT 24
+#define I40E_GLCM_LANCONFIG_DBGMUX_SEL_HI_MASK I40E_MASK(0xF, I40E_GLCM_LANCONFIG_DBGMUX_SEL_HI_SHIFT)
+#define I40E_GLCM_LANCONFIG_DBGMUX_EN_SHIFT 28
+#define I40E_GLCM_LANCONFIG_DBGMUX_EN_MASK I40E_MASK(0x1, I40E_GLCM_LANCONFIG_DBGMUX_EN_SHIFT)
+
+#define I40E_GLCM_LANCRDTHR 0x0010C41C /* Reset: CORER */
+#define I40E_GLCM_LANCRDTHR_CMLANCRDTHR_SHIFT 0
+#define I40E_GLCM_LANCRDTHR_CMLANCRDTHR_MASK I40E_MASK(0x3FFF, I40E_GLCM_LANCRDTHR_CMLANCRDTHR_SHIFT)
+#define I40E_GLCM_LANCRDTHR_CMLANTCBTHR_SHIFT 16
+#define I40E_GLCM_LANCRDTHR_CMLANTCBTHR_MASK I40E_MASK(0x7F, I40E_GLCM_LANCRDTHR_CMLANTCBTHR_SHIFT)
+
+#define I40E_GLCM_LANCTXDGCTL 0x0010C410 /* Reset: CORER */
+#define I40E_GLCM_LANCTXDGCTL_QUEUE_NUM_SHIFT 0
+#define I40E_GLCM_LANCTXDGCTL_QUEUE_NUM_MASK I40E_MASK(0xFFF, I40E_GLCM_LANCTXDGCTL_QUEUE_NUM_SHIFT)
+#define I40E_GLCM_LANCTXDGCTL_SUB_LINE_SHIFT 12
+#define I40E_GLCM_LANCTXDGCTL_SUB_LINE_MASK I40E_MASK(0x7, I40E_GLCM_LANCTXDGCTL_SUB_LINE_SHIFT)
+#define I40E_GLCM_LANCTXDGCTL_QUEUE_TYPE_SHIFT 15
+#define I40E_GLCM_LANCTXDGCTL_QUEUE_TYPE_MASK I40E_MASK(0x3, I40E_GLCM_LANCTXDGCTL_QUEUE_TYPE_SHIFT)
+#define I40E_GLCM_LANCTXDGCTL_OP_CODE_SHIFT 17
+#define I40E_GLCM_LANCTXDGCTL_OP_CODE_MASK I40E_MASK(0x3, I40E_GLCM_LANCTXDGCTL_OP_CODE_SHIFT)
+#define I40E_GLCM_LANCTXDGCTL_PKTCNT_SHIFT 19
+#define I40E_GLCM_LANCTXDGCTL_PKTCNT_MASK I40E_MASK(0x3, I40E_GLCM_LANCTXDGCTL_PKTCNT_SHIFT)
+#define I40E_GLCM_LANCTXDGCTL_INVALIDATE_SHIFT 21
+#define I40E_GLCM_LANCTXDGCTL_INVALIDATE_MASK I40E_MASK(0x1, I40E_GLCM_LANCTXDGCTL_INVALIDATE_SHIFT)
+#define I40E_GLCM_LANCTXDGCTL_WRITEBACK_SHIFT 22
+#define I40E_GLCM_LANCTXDGCTL_WRITEBACK_MASK I40E_MASK(0x1, I40E_GLCM_LANCTXDGCTL_WRITEBACK_SHIFT)
+#define I40E_GLCM_LANCTXDGCTL_ALLOCATE_SHIFT 23
+#define I40E_GLCM_LANCTXDGCTL_ALLOCATE_MASK I40E_MASK(0x1, I40E_GLCM_LANCTXDGCTL_ALLOCATE_SHIFT)
+
+#define I40E_GLCM_LANCTXDGDATA(_i) (0x0010C400 + ((_i) * 4)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLCM_LANCTXDGDATA_MAX_INDEX 3
+#define I40E_GLCM_LANCTXDGDATA_DATA_SHIFT 0
+#define I40E_GLCM_LANCTXDGDATA_DATA_MASK I40E_MASK(0xFFFFFFFF, I40E_GLCM_LANCTXDGDATA_DATA_SHIFT)
+
+#define I40E_GLCM_LANCTXDGFN 0x0010C418 /* Reset: CORER */
+#define I40E_GLCM_LANCTXDGFN_PF_NUM_SHIFT 0
+#define I40E_GLCM_LANCTXDGFN_PF_NUM_MASK I40E_MASK(0xF, I40E_GLCM_LANCTXDGFN_PF_NUM_SHIFT)
+#define I40E_GLCM_LANCTXDGFN_VM_VF_NUM_SHIFT 4
+#define I40E_GLCM_LANCTXDGFN_VM_VF_NUM_MASK I40E_MASK(0x1FF, I40E_GLCM_LANCTXDGFN_VM_VF_NUM_SHIFT)
+#define I40E_GLCM_LANCTXDGFN_VM_VF_TYPE_SHIFT 16
+#define I40E_GLCM_LANCTXDGFN_VM_VF_TYPE_MASK I40E_MASK(0x3, I40E_GLCM_LANCTXDGFN_VM_VF_TYPE_SHIFT)
+
+#define I40E_GLCM_LANCTXDGSTAT 0x0010C414 /* Reset: CORER */
+#define I40E_GLCM_LANCTXDGSTAT_CTX_DONE_SHIFT 0
+#define I40E_GLCM_LANCTXDGSTAT_CTX_DONE_MASK I40E_MASK(0x1, I40E_GLCM_LANCTXDGSTAT_CTX_DONE_SHIFT)
+#define I40E_GLCM_LANCTXDGSTAT_CTX_MISS_SHIFT 1
+#define I40E_GLCM_LANCTXDGSTAT_CTX_MISS_MASK I40E_MASK(0x1, I40E_GLCM_LANCTXDGSTAT_CTX_MISS_SHIFT)
+
+#define I40E_GLCM_LANDATAREQHI 0x0010C478 /* Reset: CORER */
+#define I40E_GLCM_LANDATAREQHI_CMLANDATAREQHI_SHIFT 0
+#define I40E_GLCM_LANDATAREQHI_CMLANDATAREQHI_MASK I40E_MASK(0xFFFFFF, I40E_GLCM_LANDATAREQHI_CMLANDATAREQHI_SHIFT)
+
+#define I40E_GLCM_LANDATAREQLOW 0x0010C474 /* Reset: CORER */
+#define I40E_GLCM_LANDATAREQLOW_CMLANDATAREQLOW_SHIFT 0
+#define I40E_GLCM_LANDATAREQLOW_CMLANDATAREQLOW_MASK I40E_MASK(0xFFFFFFFF, I40E_GLCM_LANDATAREQLOW_CMLANDATAREQLOW_SHIFT)
+
+#define I40E_GLCM_LANDATASTALLHI 0x0010C480 /* Reset: CORER */
+#define I40E_GLCM_LANDATASTALLHI_CMLANDATASTALLHI_SHIFT 0
+#define I40E_GLCM_LANDATASTALLHI_CMLANDATASTALLHI_MASK I40E_MASK(0xFFFFFF, I40E_GLCM_LANDATASTALLHI_CMLANDATASTALLHI_SHIFT)
+
+#define I40E_GLCM_LANDATASTALLLO 0x0010C47C /* Reset: CORER */
+#define I40E_GLCM_LANDATASTALLLO_CMLANDATASTALLLOW_SHIFT 0
+#define I40E_GLCM_LANDATASTALLLO_CMLANDATASTALLLOW_MASK I40E_MASK(0xFFFFFFFF, I40E_GLCM_LANDATASTALLLO_CMLANDATASTALLLOW_SHIFT)
+
+#define I40E_GLCM_LANLOCKTBLADDR 0x0010C458 /* Reset: CORER */
+#define I40E_GLCM_LANLOCKTBLADDR_LOCKTBL_ADDR_SHIFT 0
+#define I40E_GLCM_LANLOCKTBLADDR_LOCKTBL_ADDR_MASK I40E_MASK(0xF, I40E_GLCM_LANLOCKTBLADDR_LOCKTBL_ADDR_SHIFT)
+
+#define I40E_GLCM_LANLOCKTBLDATAHI 0x0010C460 /* Reset: CORER */
+#define I40E_GLCM_LANLOCKTBLDATAHI_LOCKSEL_SHIFT 0
+#define I40E_GLCM_LANLOCKTBLDATAHI_LOCKSEL_MASK I40E_MASK(0xFF, I40E_GLCM_LANLOCKTBLDATAHI_LOCKSEL_SHIFT)
+#define I40E_GLCM_LANLOCKTBLDATAHI_GPLOCKSEL_SHIFT 8
+#define I40E_GLCM_LANLOCKTBLDATAHI_GPLOCKSEL_MASK I40E_MASK(0xF, I40E_GLCM_LANLOCKTBLDATAHI_GPLOCKSEL_SHIFT)
+
+#define I40E_GLCM_LANLOCKTBLDATALO 0x0010C45C /* Reset: CORER */
+#define I40E_GLCM_LANLOCKTBLDATALO_QNUM_SHIFT 0
+#define I40E_GLCM_LANLOCKTBLDATALO_QNUM_MASK I40E_MASK(0xFFF, I40E_GLCM_LANLOCKTBLDATALO_QNUM_SHIFT)
+#define I40E_GLCM_LANLOCKTBLDATALO_PF_NUM_SHIFT 12
+#define I40E_GLCM_LANLOCKTBLDATALO_PF_NUM_MASK I40E_MASK(0xF, I40E_GLCM_LANLOCKTBLDATALO_PF_NUM_SHIFT)
+#define I40E_GLCM_LANLOCKTBLDATALO_VM_VF_NUM_SHIFT 16
+#define I40E_GLCM_LANLOCKTBLDATALO_VM_VF_NUM_MASK I40E_MASK(0x1FF, I40E_GLCM_LANLOCKTBLDATALO_VM_VF_NUM_SHIFT)
+#define I40E_GLCM_LANLOCKTBLDATALO_VM_VF_TYPE_SHIFT 25
+#define I40E_GLCM_LANLOCKTBLDATALO_VM_VF_TYPE_MASK I40E_MASK(0x3, I40E_GLCM_LANLOCKTBLDATALO_VM_VF_TYPE_SHIFT)
+
+#define I40E_GLCM_LANMISSREQHI 0x0010C488 /* Reset: CORER */
+#define I40E_GLCM_LANMISSREQHI_CMLANMISSREQHI_SHIFT 0
+#define I40E_GLCM_LANMISSREQHI_CMLANMISSREQHI_MASK I40E_MASK(0xFFFFFF, I40E_GLCM_LANMISSREQHI_CMLANMISSREQHI_SHIFT)
+
+#define I40E_GLCM_LANMISSREQLO 0x0010C484 /* Reset: CORER */
+#define I40E_GLCM_LANMISSREQLO_CMLANMISSREQLOW_SHIFT 0
+#define I40E_GLCM_LANMISSREQLO_CMLANMISSREQLOW_MASK I40E_MASK(0xFFFFFFFF, I40E_GLCM_LANMISSREQLO_CMLANMISSREQLOW_SHIFT)
+
+#define I40E_GLCM_LANPKTCNTADDR 0x0010C450 /* Reset: CORER */
+#define I40E_GLCM_LANPKTCNTADDR_PKTCNT_ADDR_SHIFT 0
+#define I40E_GLCM_LANPKTCNTADDR_PKTCNT_ADDR_MASK I40E_MASK(0x1FF, I40E_GLCM_LANPKTCNTADDR_PKTCNT_ADDR_SHIFT)
+
+#define I40E_GLCM_LANPKTCNTDATA 0x0010C454 /* Reset: CORER */
+#define I40E_GLCM_LANPKTCNTDATA_DONE_SHIFT 0
+#define I40E_GLCM_LANPKTCNTDATA_DONE_MASK I40E_MASK(0x1, I40E_GLCM_LANPKTCNTDATA_DONE_SHIFT)
+#define I40E_GLCM_LANPKTCNTDATA_PKTCNT_SHIFT 1
+#define I40E_GLCM_LANPKTCNTDATA_PKTCNT_MASK I40E_MASK(0x7FF, I40E_GLCM_LANPKTCNTDATA_PKTCNT_SHIFT)
+#define I40E_GLCM_LANPKTCNTDATA_RLSTATE_SHIFT 12
+#define I40E_GLCM_LANPKTCNTDATA_RLSTATE_MASK I40E_MASK(0x3, I40E_GLCM_LANPKTCNTDATA_RLSTATE_SHIFT)
+
+#define I40E_GLCM_LANRLADDR 0x0010C43C /* Reset: CORER */
+#define I40E_GLCM_LANRLADDR_RL_ADDR_SHIFT 0
+#define I40E_GLCM_LANRLADDR_RL_ADDR_MASK I40E_MASK(0xFFF, I40E_GLCM_LANRLADDR_RL_ADDR_SHIFT)
+
+#define I40E_GLCM_LANRLDATA 0x0010C440 /* Reset: CORER */
+#define I40E_GLCM_LANRLDATA_RL_DATA_SHIFT 0
+#define I40E_GLCM_LANRLDATA_RL_DATA_MASK I40E_MASK(0xFFFFFFFF, I40E_GLCM_LANRLDATA_RL_DATA_SHIFT)
+
+#define I40E_GLCM_LANRLQUERY(_i) (0x0010C420 + ((_i) * 4)) /* _i=0...1 */ /* Reset: CORER */
+#define I40E_GLCM_LANRLQUERY_MAX_INDEX 1
+#define I40E_GLCM_LANRLQUERY_RLINDEX_SHIFT 0
+#define I40E_GLCM_LANRLQUERY_RLINDEX_MASK I40E_MASK(0x3FF, I40E_GLCM_LANRLQUERY_RLINDEX_SHIFT)
+
+#define I40E_GLCM_LANRLSTAT(_i) (0x0010C428 + ((_i) * 4)) /* _i=0...1 */ /* Reset: CORER */
+#define I40E_GLCM_LANRLSTAT_MAX_INDEX 1
+#define I40E_GLCM_LANRLSTAT_QUERY_DONE_SHIFT 0
+#define I40E_GLCM_LANRLSTAT_QUERY_DONE_MASK I40E_MASK(0x1, I40E_GLCM_LANRLSTAT_QUERY_DONE_SHIFT)
+#define I40E_GLCM_LANRLSTAT_RL_EMPTY_SHIFT 1
+#define I40E_GLCM_LANRLSTAT_RL_EMPTY_MASK I40E_MASK(0x1, I40E_GLCM_LANRLSTAT_RL_EMPTY_SHIFT)
+
+#define I40E_GLCM_LANSNOOPREQHI 0x0010C468 /* Reset: CORER */
+#define I40E_GLCM_LANSNOOPREQHI_CMLANSNOOPREQHI_SHIFT 0
+#define I40E_GLCM_LANSNOOPREQHI_CMLANSNOOPREQHI_MASK I40E_MASK(0xFFFFFF, I40E_GLCM_LANSNOOPREQHI_CMLANSNOOPREQHI_SHIFT)
+
+#define I40E_GLCM_LANSNOOPREQLO 0x0010C464 /* Reset: CORER */
+#define I40E_GLCM_LANSNOOPREQLO_CMLANSNOOPREQLOW_SHIFT 0
+#define I40E_GLCM_LANSNOOPREQLO_CMLANSNOOPREQLOW_MASK I40E_MASK(0xFFFFFFFF, I40E_GLCM_LANSNOOPREQLO_CMLANSNOOPREQLOW_SHIFT)
+
+#define I40E_GLCM_LANSNOOPSTALLHI 0x0010C470 /* Reset: CORER */
+#define I40E_GLCM_LANSNOOPSTALLHI_CMLANSNOOPSTALLHI_SHIFT 0
+#define I40E_GLCM_LANSNOOPSTALLHI_CMLANSNOOPSTALLHI_MASK I40E_MASK(0xFFFFFF, I40E_GLCM_LANSNOOPSTALLHI_CMLANSNOOPSTALLHI_SHIFT)
+
+#define I40E_GLCM_LANSNOOPSTALLLO 0x0010C46C /* Reset: CORER */
+#define I40E_GLCM_LANSNOOPSTALLLO_CMLANSNOOPSTALLLOW_SHIFT 0
+#define I40E_GLCM_LANSNOOPSTALLLO_CMLANSNOOPSTALLLOW_MASK I40E_MASK(0xFFFFFFFF, I40E_GLCM_LANSNOOPSTALLLO_CMLANSNOOPSTALLLOW_SHIFT)
+
+/* PF - DCB Registers */
+
+#define I40E_GLDCB_PACER 0x000A2210 /* Reset: CORER */
+#define I40E_GLDCB_PACER_PACER_VAL_SHIFT 0
+#define I40E_GLDCB_PACER_PACER_VAL_MASK I40E_MASK(0x3FFFFFF, I40E_GLDCB_PACER_PACER_VAL_SHIFT)
+#define I40E_GLDCB_PACER_PACER_EN_SHIFT 31
+#define I40E_GLDCB_PACER_PACER_EN_MASK I40E_MASK(0x1, I40E_GLDCB_PACER_PACER_EN_SHIFT)
+
+#define I40E_GLDCB_PCI_DATA 0x000A0150 /* Reset: CORER */
+#define I40E_GLDCB_PCI_DATA_PCI_DATA_BC_SHIFT 0
+#define I40E_GLDCB_PCI_DATA_PCI_DATA_BC_MASK I40E_MASK(0xFFFFF, I40E_GLDCB_PCI_DATA_PCI_DATA_BC_SHIFT)
+
+#define I40E_GLDCB_RLLPC 0x0005105C /* Reset: CORER */
+#define I40E_GLDCB_RLLPC_LLMAXPCNT_SHIFT 0
+#define I40E_GLDCB_RLLPC_LLMAXPCNT_MASK I40E_MASK(0xFFFF, I40E_GLDCB_RLLPC_LLMAXPCNT_SHIFT)
+#define I40E_GLDCB_RLLPC_BMAXPCNT_SHIFT 16
+#define I40E_GLDCB_RLLPC_BMAXPCNT_MASK I40E_MASK(0xFFFF, I40E_GLDCB_RLLPC_BMAXPCNT_SHIFT)
+
+#define I40E_GLDCB_RLLPSB 0x00051054 /* Reset: CORER */
+#define I40E_GLDCB_RLLPSB_BPCNT_SHIFT 0
+#define I40E_GLDCB_RLLPSB_BPCNT_MASK I40E_MASK(0x3FFFFFF, I40E_GLDCB_RLLPSB_BPCNT_SHIFT)
+
+#define I40E_GLDCB_RLLPSLL 0x00051058 /* Reset: CORER */
+#define I40E_GLDCB_RLLPSLL_LLPCNT_SHIFT 0
+#define I40E_GLDCB_RLLPSLL_LLPCNT_MASK I40E_MASK(0x3FFFFFF, I40E_GLDCB_RLLPSLL_LLPCNT_SHIFT)
+
+#define I40E_GLDCB_RMPMC 0x00122610 /* Reset: CORER */
+#define I40E_GLDCB_RMPMC_RSPM_SHIFT 0
+#define I40E_GLDCB_RMPMC_RSPM_MASK I40E_MASK(0x3F, I40E_GLDCB_RMPMC_RSPM_SHIFT)
+#define I40E_GLDCB_RMPMC_MIQ_NODROP_MODE_SHIFT 6
+#define I40E_GLDCB_RMPMC_MIQ_NODROP_MODE_MASK I40E_MASK(0x1F, I40E_GLDCB_RMPMC_MIQ_NODROP_MODE_SHIFT)
+#define I40E_GLDCB_RMPMC_RPM_DIS_SHIFT 31
+#define I40E_GLDCB_RMPMC_RPM_DIS_MASK I40E_MASK(0x1, I40E_GLDCB_RMPMC_RPM_DIS_SHIFT)
+
+#define I40E_GLDCB_RMPMS 0x00122614 /* Reset: CORER */
+#define I40E_GLDCB_RMPMS_RMPM_SHIFT 0
+#define I40E_GLDCB_RMPMS_RMPM_MASK I40E_MASK(0xFFFF, I40E_GLDCB_RMPMS_RMPM_SHIFT)
+
+#define I40E_GLDCB_RPRRD0 0x00122608 /* Reset: CORER */
+#define I40E_GLDCB_RPRRD0_BWSHARE_40G_SHIFT 0
+#define I40E_GLDCB_RPRRD0_BWSHARE_40G_MASK I40E_MASK(0x3FF, I40E_GLDCB_RPRRD0_BWSHARE_40G_SHIFT)
+#define I40E_GLDCB_RPRRD0_BWSHARE_10G_SHIFT 16
+#define I40E_GLDCB_RPRRD0_BWSHARE_10G_MASK I40E_MASK(0x3FF, I40E_GLDCB_RPRRD0_BWSHARE_10G_SHIFT)
+
+#define I40E_GLDCB_RPRRD1 0x0012260C /* Reset: CORER */
+#define I40E_GLDCB_RPRRD1_BWSHARE_1G_SHIFT 0
+#define I40E_GLDCB_RPRRD1_BWSHARE_1G_MASK I40E_MASK(0x3FF, I40E_GLDCB_RPRRD1_BWSHARE_1G_SHIFT)
+#define I40E_GLDCB_RPRRD1_BWSHARE_100M_SHIFT 16
+#define I40E_GLDCB_RPRRD1_BWSHARE_100M_MASK I40E_MASK(0x3FF, I40E_GLDCB_RPRRD1_BWSHARE_100M_SHIFT)
+
+#define I40E_GLDCB_RSPMC 0x00122604 /* Reset: CORER */
+#define I40E_GLDCB_RSPMC_RSPM_SHIFT 0
+#define I40E_GLDCB_RSPMC_RSPM_MASK I40E_MASK(0xFF, I40E_GLDCB_RSPMC_RSPM_SHIFT)
+#define I40E_GLDCB_RSPMC_RPM_MODE_SHIFT 8
+#define I40E_GLDCB_RSPMC_RPM_MODE_MASK I40E_MASK(0x3, I40E_GLDCB_RSPMC_RPM_MODE_SHIFT)
+#define I40E_GLDCB_RSPMC_PRR_MAX_EXP_SHIFT 10
+#define I40E_GLDCB_RSPMC_PRR_MAX_EXP_MASK I40E_MASK(0xF, I40E_GLDCB_RSPMC_PRR_MAX_EXP_SHIFT)
+#define I40E_GLDCB_RSPMC_PFCTIMER_SHIFT 14
+#define I40E_GLDCB_RSPMC_PFCTIMER_MASK I40E_MASK(0x3FFF, I40E_GLDCB_RSPMC_PFCTIMER_SHIFT)
+#define I40E_GLDCB_RSPMC_RPM_DIS_SHIFT 31
+#define I40E_GLDCB_RSPMC_RPM_DIS_MASK I40E_MASK(0x1, I40E_GLDCB_RSPMC_RPM_DIS_SHIFT)
+
+#define I40E_GLDCB_RSPMS 0x00122600 /* Reset: CORER */
+#define I40E_GLDCB_RSPMS_RSPM_SHIFT 0
+#define I40E_GLDCB_RSPMS_RSPM_MASK I40E_MASK(0x3FFFF, I40E_GLDCB_RSPMS_RSPM_SHIFT)
+
+#define I40E_GLDCB_TFPFCI 0x00098080 /* Reset: CORER */
+#define I40E_GLDCB_TFPFCI_IGNORE_FC_SHIFT 0
+#define I40E_GLDCB_TFPFCI_IGNORE_FC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLDCB_TFPFCI_IGNORE_FC_SHIFT)
+
+#define I40E_GLDCB_TGENC_TLPM 0x000A01C0 /* Reset: CORER */
+#define I40E_GLDCB_TGENC_TLPM_ALLOFFTH_SHIFT 0
+#define I40E_GLDCB_TGENC_TLPM_ALLOFFTH_MASK I40E_MASK(0xFF, I40E_GLDCB_TGENC_TLPM_ALLOFFTH_SHIFT)
+#define I40E_GLDCB_TGENC_TLPM_SHARED_TDATATH_SHIFT 8
+#define I40E_GLDCB_TGENC_TLPM_SHARED_TDATATH_MASK I40E_MASK(0xFF, I40E_GLDCB_TGENC_TLPM_SHARED_TDATATH_SHIFT)
+#define I40E_GLDCB_TGENC_TLPM_SHARED_TDATATH_EN_SHIFT 29
+#define I40E_GLDCB_TGENC_TLPM_SHARED_TDATATH_EN_MASK I40E_MASK(0x1, I40E_GLDCB_TGENC_TLPM_SHARED_TDATATH_EN_SHIFT)
+#define I40E_GLDCB_TGENC_TLPM_TFPM_DIS_SHIFT 30
+#define I40E_GLDCB_TGENC_TLPM_TFPM_DIS_MASK I40E_MASK(0x1, I40E_GLDCB_TGENC_TLPM_TFPM_DIS_SHIFT)
+#define I40E_GLDCB_TGENC_TLPM_FWLB_MODE_SHIFT 31
+#define I40E_GLDCB_TGENC_TLPM_FWLB_MODE_MASK I40E_MASK(0x1, I40E_GLDCB_TGENC_TLPM_FWLB_MODE_SHIFT)
+
+#define I40E_GLDCB_TGENC_TUPM 0x000A2200 /* Reset: CORER */
+#define I40E_GLDCB_TGENC_TUPM_ALLOFFTH_SHIFT 0
+#define I40E_GLDCB_TGENC_TUPM_ALLOFFTH_MASK I40E_MASK(0x1FFF, I40E_GLDCB_TGENC_TUPM_ALLOFFTH_SHIFT)
+#define I40E_GLDCB_TGENC_TUPM_TCPM_DIS_SHIFT 30
+#define I40E_GLDCB_TGENC_TUPM_TCPM_DIS_MASK I40E_MASK(0x1, I40E_GLDCB_TGENC_TUPM_TCPM_DIS_SHIFT)
+#define I40E_GLDCB_TGENC_TUPM_CWLB_MODE_SHIFT 31
+#define I40E_GLDCB_TGENC_TUPM_CWLB_MODE_MASK I40E_MASK(0x1, I40E_GLDCB_TGENC_TUPM_CWLB_MODE_SHIFT)
+
+#define I40E_PRTDCB_FCAH 0x001E24C0 /* Reset: GLOBR */
+#define I40E_PRTDCB_FCAH_PFCAH_SHIFT 0
+#define I40E_PRTDCB_FCAH_PFCAH_MASK I40E_MASK(0xFFFF, I40E_PRTDCB_FCAH_PFCAH_SHIFT)
+
+#define I40E_PRTDCB_FCAL 0x001E24A0 /* Reset: GLOBR */
+#define I40E_PRTDCB_FCAL_PFCAL_SHIFT 0
+#define I40E_PRTDCB_FCAL_PFCAL_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTDCB_FCAL_PFCAL_SHIFT)
+
+#define I40E_PRTDCB_RETSTCS(_i) (0x001222A0 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */
+#define I40E_PRTDCB_RETSTCS_MAX_INDEX 7
+#define I40E_PRTDCB_RETSTCS_CREDITS_SHIFT 0
+#define I40E_PRTDCB_RETSTCS_CREDITS_MASK I40E_MASK(0x1FFFFFFF, I40E_PRTDCB_RETSTCS_CREDITS_SHIFT)
+
+#define I40E_PRTDCB_RLANPMS 0x001223C0 /* Reset: CORER */
+#define I40E_PRTDCB_RLANPMS_LANRPPM_SHIFT 0
+#define I40E_PRTDCB_RLANPMS_LANRPPM_MASK I40E_MASK(0x3FFFF, I40E_PRTDCB_RLANPMS_LANRPPM_SHIFT)
+
+#define I40E_PRTDCB_RPFCTOP 0x001E2480 /* Reset: GLOBR */
+#define I40E_PRTDCB_RPFCTOP_PFCTYPE_SHIFT 0
+#define I40E_PRTDCB_RPFCTOP_PFCTYPE_MASK I40E_MASK(0xFFFF, I40E_PRTDCB_RPFCTOP_PFCTYPE_SHIFT)
+#define I40E_PRTDCB_RPFCTOP_PFCOPCODE_SHIFT 16
+#define I40E_PRTDCB_RPFCTOP_PFCOPCODE_MASK I40E_MASK(0xFFFF, I40E_PRTDCB_RPFCTOP_PFCOPCODE_SHIFT)
+
+#define I40E_PRTDCB_RPRRC 0x00122100 /* Reset: CORER */
+#define I40E_PRTDCB_RPRRC_BWSHARE_SHIFT 0
+#define I40E_PRTDCB_RPRRC_BWSHARE_MASK I40E_MASK(0x3FF, I40E_PRTDCB_RPRRC_BWSHARE_SHIFT)
+
+#define I40E_PRTDCB_RPRRS 0x00122120 /* Reset: CORER */
+#define I40E_PRTDCB_RPRRS_CREDITS_SHIFT 0
+#define I40E_PRTDCB_RPRRS_CREDITS_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTDCB_RPRRS_CREDITS_SHIFT)
+
+#define I40E_PRTDCB_RRDMAPMS 0x00122160 /* Reset: CORER */
+#define I40E_PRTDCB_RRDMAPMS_RDMARPPM_SHIFT 0
+#define I40E_PRTDCB_RRDMAPMS_RDMARPPM_MASK I40E_MASK(0x3FFFF, I40E_PRTDCB_RRDMAPMS_RDMARPPM_SHIFT)
+
+#define I40E_PRTDCB_RUP_PPRS 0x000844C0 /* Reset: CORER */
+#define I40E_PRTDCB_RUP_PPRS_NOVLANUP_SHIFT 0
+#define I40E_PRTDCB_RUP_PPRS_NOVLANUP_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP_PPRS_NOVLANUP_SHIFT)
+
+#define I40E_PRTDCB_RUP_TDPU 0x00044120 /* Reset: CORER */
+#define I40E_PRTDCB_RUP_TDPU_NOVLANUP_SHIFT 0
+#define I40E_PRTDCB_RUP_TDPU_NOVLANUP_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP_TDPU_NOVLANUP_SHIFT)
+
+#define I40E_PRTDCB_RUP2TC_RCB 0x00122280 /* Reset: CORER */
+#define I40E_PRTDCB_RUP2TC_RCB_UP0TC_SHIFT 0
+#define I40E_PRTDCB_RUP2TC_RCB_UP0TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_RCB_UP0TC_SHIFT)
+#define I40E_PRTDCB_RUP2TC_RCB_UP1TC_SHIFT 3
+#define I40E_PRTDCB_RUP2TC_RCB_UP1TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_RCB_UP1TC_SHIFT)
+#define I40E_PRTDCB_RUP2TC_RCB_UP2TC_SHIFT 6
+#define I40E_PRTDCB_RUP2TC_RCB_UP2TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_RCB_UP2TC_SHIFT)
+#define I40E_PRTDCB_RUP2TC_RCB_UP3TC_SHIFT 9
+#define I40E_PRTDCB_RUP2TC_RCB_UP3TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_RCB_UP3TC_SHIFT)
+#define I40E_PRTDCB_RUP2TC_RCB_UP4TC_SHIFT 12
+#define I40E_PRTDCB_RUP2TC_RCB_UP4TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_RCB_UP4TC_SHIFT)
+#define I40E_PRTDCB_RUP2TC_RCB_UP5TC_SHIFT 15
+#define I40E_PRTDCB_RUP2TC_RCB_UP5TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_RCB_UP5TC_SHIFT)
+#define I40E_PRTDCB_RUP2TC_RCB_UP6TC_SHIFT 18
+#define I40E_PRTDCB_RUP2TC_RCB_UP6TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_RCB_UP6TC_SHIFT)
+#define I40E_PRTDCB_RUP2TC_RCB_UP7TC_SHIFT 21
+#define I40E_PRTDCB_RUP2TC_RCB_UP7TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_RCB_UP7TC_SHIFT)
+
+#define I40E_PRTDCB_RUPTQ(_i) (0x00122400 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */
+#define I40E_PRTDCB_RUPTQ_MAX_INDEX 7
+#define I40E_PRTDCB_RUPTQ_RXQNUM_SHIFT 0
+#define I40E_PRTDCB_RUPTQ_RXQNUM_MASK I40E_MASK(0x3FFF, I40E_PRTDCB_RUPTQ_RXQNUM_SHIFT)
+
+#define I40E_PRTDCB_RUPTS(_i) (0x00122500 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */
+#define I40E_PRTDCB_RUPTS_MAX_INDEX 7
+#define I40E_PRTDCB_RUPTS_PFCTIMER_SHIFT 0
+#define I40E_PRTDCB_RUPTS_PFCTIMER_MASK I40E_MASK(0x3FFF, I40E_PRTDCB_RUPTS_PFCTIMER_SHIFT)
+
+#define I40E_PRTDCB_TC2PFC_RCB 0x00122140 /* Reset: CORER */
+#define I40E_PRTDCB_TC2PFC_RCB_TC2PFC_SHIFT 0
+#define I40E_PRTDCB_TC2PFC_RCB_TC2PFC_MASK I40E_MASK(0xFF, I40E_PRTDCB_TC2PFC_RCB_TC2PFC_SHIFT)
+
+#define I40E_PRTDCB_TCLLPC 0x000AE000 /* Reset: CORER */
+#define I40E_PRTDCB_TCLLPC_LLMAXPCNT_SHIFT 0
+#define I40E_PRTDCB_TCLLPC_LLMAXPCNT_MASK I40E_MASK(0xFFFF, I40E_PRTDCB_TCLLPC_LLMAXPCNT_SHIFT)
+#define I40E_PRTDCB_TCLLPC_BMAXPCNT_SHIFT 16
+#define I40E_PRTDCB_TCLLPC_BMAXPCNT_MASK I40E_MASK(0xFFFF, I40E_PRTDCB_TCLLPC_BMAXPCNT_SHIFT)
+
+#define I40E_PRTDCB_TCLLPSB 0x000AE020 /* Reset: CORER */
+#define I40E_PRTDCB_TCLLPSB_BPCNT_SHIFT 0
+#define I40E_PRTDCB_TCLLPSB_BPCNT_MASK I40E_MASK(0x3FFFFFF, I40E_PRTDCB_TCLLPSB_BPCNT_SHIFT)
+
+#define I40E_PRTDCB_TCLLPSLL 0x000AE040 /* Reset: CORER */
+#define I40E_PRTDCB_TCLLPSLL_LLPCNT_SHIFT 0
+#define I40E_PRTDCB_TCLLPSLL_LLPCNT_MASK I40E_MASK(0x3FFFFFF, I40E_PRTDCB_TCLLPSLL_LLPCNT_SHIFT)
+
+#define I40E_PRTDCB_TCPFCPC 0x000A21C0 /* Reset: CORER */
+#define I40E_PRTDCB_TCPFCPC_PORTOFFTH_SHIFT 0
+#define I40E_PRTDCB_TCPFCPC_PORTOFFTH_MASK I40E_MASK(0x1FFF, I40E_PRTDCB_TCPFCPC_PORTOFFTH_SHIFT)
+
+#define I40E_PRTDCB_TCPFCTCC 0x000A21E0 /* Reset: CORER */
+#define I40E_PRTDCB_TCPFCTCC_TCOFFTH_SHIFT 0
+#define I40E_PRTDCB_TCPFCTCC_TCOFFTH_MASK I40E_MASK(0x1FFF, I40E_PRTDCB_TCPFCTCC_TCOFFTH_SHIFT)
+#define I40E_PRTDCB_TCPFCTCC_LL_PRI_TRESH_SHIFT 13
+#define I40E_PRTDCB_TCPFCTCC_LL_PRI_TRESH_MASK I40E_MASK(0x1FFF, I40E_PRTDCB_TCPFCTCC_LL_PRI_TRESH_SHIFT)
+#define I40E_PRTDCB_TCPFCTCC_LL_PRI_EN_SHIFT 31
+#define I40E_PRTDCB_TCPFCTCC_LL_PRI_EN_MASK I40E_MASK(0x1, I40E_PRTDCB_TCPFCTCC_LL_PRI_EN_SHIFT)
+
+#define I40E_PRTDCB_TCWSP 0x000A2160 /* Reset: CORER */
+#define I40E_PRTDCB_TCWSP_WSPORT_SHIFT 0
+#define I40E_PRTDCB_TCWSP_WSPORT_MASK I40E_MASK(0xFFFFF, I40E_PRTDCB_TCWSP_WSPORT_SHIFT)
+
+#define I40E_PRTDCB_TDPMS 0x000A0000 /* Reset: CORER */
+#define I40E_PRTDCB_TDPMS_DPM_SHIFT 0
+#define I40E_PRTDCB_TDPMS_DPM_MASK I40E_MASK(0xFFFFF, I40E_PRTDCB_TDPMS_DPM_SHIFT)
+
+#define I40E_PRTDCB_TDPUC 0x00044100 /* Reset: CORER */
+#define I40E_PRTDCB_TDPUC_MAX_TXFRAME_SHIFT 0
+#define I40E_PRTDCB_TDPUC_MAX_TXFRAME_MASK I40E_MASK(0xFFFF, I40E_PRTDCB_TDPUC_MAX_TXFRAME_SHIFT)
+#define I40E_PRTDCB_TDPUC_MAL_LENGTH_SHIFT 16
+#define I40E_PRTDCB_TDPUC_MAL_LENGTH_MASK I40E_MASK(0x1, I40E_PRTDCB_TDPUC_MAL_LENGTH_SHIFT)
+#define I40E_PRTDCB_TDPUC_MAL_CMD_SHIFT 17
+#define I40E_PRTDCB_TDPUC_MAL_CMD_MASK I40E_MASK(0x1, I40E_PRTDCB_TDPUC_MAL_CMD_SHIFT)
+#define I40E_PRTDCB_TDPUC_TTL_DROP_SHIFT 18
+#define I40E_PRTDCB_TDPUC_TTL_DROP_MASK I40E_MASK(0x1, I40E_PRTDCB_TDPUC_TTL_DROP_SHIFT)
+#define I40E_PRTDCB_TDPUC_UR_DROP_SHIFT 19
+#define I40E_PRTDCB_TDPUC_UR_DROP_MASK I40E_MASK(0x1, I40E_PRTDCB_TDPUC_UR_DROP_SHIFT)
+#define I40E_PRTDCB_TDPUC_CLEAR_DROP_SHIFT 31
+#define I40E_PRTDCB_TDPUC_CLEAR_DROP_MASK I40E_MASK(0x1, I40E_PRTDCB_TDPUC_CLEAR_DROP_SHIFT)
+
+#define I40E_PRTDCB_TFLLPC 0x00098000 /* Reset: CORER */
+#define I40E_PRTDCB_TFLLPC_LLMAXPCNT_SHIFT 0
+#define I40E_PRTDCB_TFLLPC_LLMAXPCNT_MASK I40E_MASK(0xFFFF, I40E_PRTDCB_TFLLPC_LLMAXPCNT_SHIFT)
+#define I40E_PRTDCB_TFLLPC_BMAXPCNT_SHIFT 16
+#define I40E_PRTDCB_TFLLPC_BMAXPCNT_MASK I40E_MASK(0xFFFF, I40E_PRTDCB_TFLLPC_BMAXPCNT_SHIFT)
+
+#define I40E_PRTDCB_TFLLPSB 0x00098020 /* Reset: CORER */
+#define I40E_PRTDCB_TFLLPSB_BPCNT_SHIFT 0
+#define I40E_PRTDCB_TFLLPSB_BPCNT_MASK I40E_MASK(0x3FFFFFF, I40E_PRTDCB_TFLLPSB_BPCNT_SHIFT)
+
+#define I40E_PRTDCB_TFLLPSLL 0x00098040 /* Reset: CORER */
+#define I40E_PRTDCB_TFLLPSLL_LLPCNT_SHIFT 0
+#define I40E_PRTDCB_TFLLPSLL_LLPCNT_MASK I40E_MASK(0x3FFFFFF, I40E_PRTDCB_TFLLPSLL_LLPCNT_SHIFT)
+
+#define I40E_PRTDCB_TFPFCC 0x000A01A0 /* Reset: CORER */
+#define I40E_PRTDCB_TFPFCC_PORTOFFTH_SHIFT 0
+#define I40E_PRTDCB_TFPFCC_PORTOFFTH_MASK I40E_MASK(0xFF, I40E_PRTDCB_TFPFCC_PORTOFFTH_SHIFT)
+#define I40E_PRTDCB_TFPFCC_TCOFFTH_SHIFT 8
+#define I40E_PRTDCB_TFPFCC_TCOFFTH_MASK I40E_MASK(0xFF, I40E_PRTDCB_TFPFCC_TCOFFTH_SHIFT)
+
+#define I40E_PRTDCB_TFWSP 0x000A0140 /* Reset: CORER */
+#define I40E_PRTDCB_TFWSP_WSPORT_SHIFT 0
+#define I40E_PRTDCB_TFWSP_WSPORT_MASK I40E_MASK(0xFFFFF, I40E_PRTDCB_TFWSP_WSPORT_SHIFT)
+
+#define I40E_PRTDCB_TLANCPMS 0x000A2020 /* Reset: CORER */
+#define I40E_PRTDCB_TLANCPMS_LANCPM_SHIFT 0
+#define I40E_PRTDCB_TLANCPMS_LANCPM_MASK I40E_MASK(0xFFFFF, I40E_PRTDCB_TLANCPMS_LANCPM_SHIFT)
+
+#define I40E_PRTDCB_TLPMC 0x000A0160 /* Reset: CORER */
+#define I40E_PRTDCB_TLPMC_TC2PFC_SHIFT 0
+#define I40E_PRTDCB_TLPMC_TC2PFC_MASK I40E_MASK(0xFF, I40E_PRTDCB_TLPMC_TC2PFC_SHIFT)
+
+#define I40E_PRTDCB_TPFCTOP 0x001E4540 /* Reset: GLOBR */
+#define I40E_PRTDCB_TPFCTOP_PFCTYPE_SHIFT 0
+#define I40E_PRTDCB_TPFCTOP_PFCTYPE_MASK I40E_MASK(0xFFFF, I40E_PRTDCB_TPFCTOP_PFCTYPE_SHIFT)
+#define I40E_PRTDCB_TPFCTOP_PFCOPCODE_SHIFT 16
+#define I40E_PRTDCB_TPFCTOP_PFCOPCODE_MASK I40E_MASK(0xFFFF, I40E_PRTDCB_TPFCTOP_PFCOPCODE_SHIFT)
+
+#define I40E_PRTDCB_TRDMACPMS 0x000A2000 /* Reset: CORER */
+#define I40E_PRTDCB_TRDMACPMS_RDMACPM_SHIFT 0
+#define I40E_PRTDCB_TRDMACPMS_RDMACPM_MASK I40E_MASK(0xFFFFF, I40E_PRTDCB_TRDMACPMS_RDMACPM_SHIFT)
+
+#define I40E_PRTDCB_TUP2TC 0x001E4620 /* Reset: GLOBR */
+#define I40E_PRTDCB_TUP2TC_UP0TC_SHIFT 0
+#define I40E_PRTDCB_TUP2TC_UP0TC_MASK I40E_MASK(0x7, I40E_PRTDCB_TUP2TC_UP0TC_SHIFT)
+#define I40E_PRTDCB_TUP2TC_UP1TC_SHIFT 3
+#define I40E_PRTDCB_TUP2TC_UP1TC_MASK I40E_MASK(0x7, I40E_PRTDCB_TUP2TC_UP1TC_SHIFT)
+#define I40E_PRTDCB_TUP2TC_UP2TC_SHIFT 6
+#define I40E_PRTDCB_TUP2TC_UP2TC_MASK I40E_MASK(0x7, I40E_PRTDCB_TUP2TC_UP2TC_SHIFT)
+#define I40E_PRTDCB_TUP2TC_UP3TC_SHIFT 9
+#define I40E_PRTDCB_TUP2TC_UP3TC_MASK I40E_MASK(0x7, I40E_PRTDCB_TUP2TC_UP3TC_SHIFT)
+#define I40E_PRTDCB_TUP2TC_UP4TC_SHIFT 12
+#define I40E_PRTDCB_TUP2TC_UP4TC_MASK I40E_MASK(0x7, I40E_PRTDCB_TUP2TC_UP4TC_SHIFT)
+#define I40E_PRTDCB_TUP2TC_UP5TC_SHIFT 15
+#define I40E_PRTDCB_TUP2TC_UP5TC_MASK I40E_MASK(0x7, I40E_PRTDCB_TUP2TC_UP5TC_SHIFT)
+#define I40E_PRTDCB_TUP2TC_UP6TC_SHIFT 18
+#define I40E_PRTDCB_TUP2TC_UP6TC_MASK I40E_MASK(0x7, I40E_PRTDCB_TUP2TC_UP6TC_SHIFT)
+#define I40E_PRTDCB_TUP2TC_UP7TC_SHIFT 21
+#define I40E_PRTDCB_TUP2TC_UP7TC_MASK I40E_MASK(0x7, I40E_PRTDCB_TUP2TC_UP7TC_SHIFT)
+
+#define I40E_PRTDCB_TUPMC 0x000A2140 /* Reset: CORER */
+#define I40E_PRTDCB_TUPMC_TC2PFC_SHIFT 0
+#define I40E_PRTDCB_TUPMC_TC2PFC_MASK I40E_MASK(0xFF, I40E_PRTDCB_TUPMC_TC2PFC_SHIFT)
+
+/* PF - FCoE Registers */
+
+#define I40E_GLFCOE_ENA 0x001C0A28 /* Reset: CORER */
+#define I40E_GLFCOE_ENA_FCOE_ENA_SHIFT 0
+#define I40E_GLFCOE_ENA_FCOE_ENA_MASK I40E_MASK(0x1, I40E_GLFCOE_ENA_FCOE_ENA_SHIFT)
+
+#define I40E_GLFCOE_ENA_TDPU 0x000442E4 /* Reset: CORER */
+#define I40E_GLFCOE_ENA_TDPU_FCOE_ENA_SHIFT 0
+#define I40E_GLFCOE_ENA_TDPU_FCOE_ENA_MASK I40E_MASK(0x1, I40E_GLFCOE_ENA_TDPU_FCOE_ENA_SHIFT)
+
+#define I40E_GLFCOE_ENA_TLAN 0x000E6484 /* Reset: CORER */
+#define I40E_GLFCOE_ENA_TLAN_FCOE_ENA_SHIFT 0
+#define I40E_GLFCOE_ENA_TLAN_FCOE_ENA_MASK I40E_MASK(0x1, I40E_GLFCOE_ENA_TLAN_FCOE_ENA_SHIFT)
+
+#define I40E_GLFCOE_RLANCTL 0x0012A508 /* Reset: CORER */
+#define I40E_GLFCOE_RLANCTL_FRSTDDPH_SHIFT 1
+#define I40E_GLFCOE_RLANCTL_FRSTDDPH_MASK I40E_MASK(0x1, I40E_GLFCOE_RLANCTL_FRSTDDPH_SHIFT)
+#define I40E_GLFCOE_RLANCTL_ALLH_SHIFT 3
+#define I40E_GLFCOE_RLANCTL_ALLH_MASK I40E_MASK(0x1, I40E_GLFCOE_RLANCTL_ALLH_SHIFT)
+
+#define I40E_GLFCOE_RSOF 0x00269B9C /* Reset: CORER */
+#define I40E_GLFCOE_RSOF_SOF_I2_SHIFT 0
+#define I40E_GLFCOE_RSOF_SOF_I2_MASK I40E_MASK(0xFF, I40E_GLFCOE_RSOF_SOF_I2_SHIFT)
+#define I40E_GLFCOE_RSOF_SOF_I3_SHIFT 8
+#define I40E_GLFCOE_RSOF_SOF_I3_MASK I40E_MASK(0xFF, I40E_GLFCOE_RSOF_SOF_I3_SHIFT)
+#define I40E_GLFCOE_RSOF_SOF_N2_SHIFT 16
+#define I40E_GLFCOE_RSOF_SOF_N2_MASK I40E_MASK(0xFF, I40E_GLFCOE_RSOF_SOF_N2_SHIFT)
+#define I40E_GLFCOE_RSOF_SOF_N3_SHIFT 24
+#define I40E_GLFCOE_RSOF_SOF_N3_MASK I40E_MASK(0xFF, I40E_GLFCOE_RSOF_SOF_N3_SHIFT)
+
+#define I40E_GLFCOE_TEOF 0x000442EC /* Reset: CORER */
+#define I40E_GLFCOE_TEOF_EOF_N_SHIFT 0
+#define I40E_GLFCOE_TEOF_EOF_N_MASK I40E_MASK(0xFF, I40E_GLFCOE_TEOF_EOF_N_SHIFT)
+#define I40E_GLFCOE_TEOF_EOF_T_SHIFT 8
+#define I40E_GLFCOE_TEOF_EOF_T_MASK I40E_MASK(0xFF, I40E_GLFCOE_TEOF_EOF_T_SHIFT)
+#define I40E_GLFCOE_TEOF_EOF_NI_SHIFT 16
+#define I40E_GLFCOE_TEOF_EOF_NI_MASK I40E_MASK(0xFF, I40E_GLFCOE_TEOF_EOF_NI_SHIFT)
+#define I40E_GLFCOE_TEOF_EOF_A_SHIFT 24
+#define I40E_GLFCOE_TEOF_EOF_A_MASK I40E_MASK(0xFF, I40E_GLFCOE_TEOF_EOF_A_SHIFT)
+
+#define I40E_GLFCOE_TSOF 0x000442E8 /* Reset: CORER */
+#define I40E_GLFCOE_TSOF_SOF_I2_SHIFT 0
+#define I40E_GLFCOE_TSOF_SOF_I2_MASK I40E_MASK(0xFF, I40E_GLFCOE_TSOF_SOF_I2_SHIFT)
+#define I40E_GLFCOE_TSOF_SOF_I3_SHIFT 8
+#define I40E_GLFCOE_TSOF_SOF_I3_MASK I40E_MASK(0xFF, I40E_GLFCOE_TSOF_SOF_I3_SHIFT)
+#define I40E_GLFCOE_TSOF_SOF_N2_SHIFT 16
+#define I40E_GLFCOE_TSOF_SOF_N2_MASK I40E_MASK(0xFF, I40E_GLFCOE_TSOF_SOF_N2_SHIFT)
+#define I40E_GLFCOE_TSOF_SOF_N3_SHIFT 24
+#define I40E_GLFCOE_TSOF_SOF_N3_MASK I40E_MASK(0xFF, I40E_GLFCOE_TSOF_SOF_N3_SHIFT)
+
+#define I40E_PRTFCOE_REOF 0x000856A0 /* Reset: CORER */
+#define I40E_PRTFCOE_REOF_EOF_N_SHIFT 0
+#define I40E_PRTFCOE_REOF_EOF_N_MASK I40E_MASK(0xFF, I40E_PRTFCOE_REOF_EOF_N_SHIFT)
+#define I40E_PRTFCOE_REOF_EOF_T_SHIFT 8
+#define I40E_PRTFCOE_REOF_EOF_T_MASK I40E_MASK(0xFF, I40E_PRTFCOE_REOF_EOF_T_SHIFT)
+#define I40E_PRTFCOE_REOF_EOF_NI_SHIFT 16
+#define I40E_PRTFCOE_REOF_EOF_NI_MASK I40E_MASK(0xFF, I40E_PRTFCOE_REOF_EOF_NI_SHIFT)
+#define I40E_PRTFCOE_REOF_EOF_A_SHIFT 24
+#define I40E_PRTFCOE_REOF_EOF_A_MASK I40E_MASK(0xFF, I40E_PRTFCOE_REOF_EOF_A_SHIFT)
+
+/* PF - General Registers */
+
+#define I40E_ECC_ENA 0x00092630 /* Reset: CORER */
+#define I40E_ECC_ENA_ECC_ENA_SHIFT 0
+#define I40E_ECC_ENA_ECC_ENA_MASK I40E_MASK(0x1, I40E_ECC_ENA_ECC_ENA_SHIFT)
+
+#define I40E_GLGEN_CSR_DEBUG_C 0x00078E8C /* Reset: POR */
+#define I40E_GLGEN_CSR_DEBUG_C_CSR_ACCESS_EN_SHIFT 0
+#define I40E_GLGEN_CSR_DEBUG_C_CSR_ACCESS_EN_MASK I40E_MASK(0x1, I40E_GLGEN_CSR_DEBUG_C_CSR_ACCESS_EN_SHIFT)
+#define I40E_GLGEN_CSR_DEBUG_C_CSR_ADDR_PROT_SHIFT 1
+#define I40E_GLGEN_CSR_DEBUG_C_CSR_ADDR_PROT_MASK I40E_MASK(0x1, I40E_GLGEN_CSR_DEBUG_C_CSR_ADDR_PROT_SHIFT)
+
+#define I40E_GLGEN_CSR_DEBUG_F 0x000B6138 /* Reset: POR */
+#define I40E_GLGEN_CSR_DEBUG_F_CSR_PROT_EN_SHIFT 0
+#define I40E_GLGEN_CSR_DEBUG_F_CSR_PROT_EN_MASK I40E_MASK(0x1, I40E_GLGEN_CSR_DEBUG_F_CSR_PROT_EN_SHIFT)
+
+#define I40E_GLGEN_DUAL40 0x001C0A6C /* Reset: CORER */
+#define I40E_GLGEN_DUAL40_DUAL_40G_MODE_SHIFT 0
+#define I40E_GLGEN_DUAL40_DUAL_40G_MODE_MASK I40E_MASK(0x1, I40E_GLGEN_DUAL40_DUAL_40G_MODE_SHIFT)
+
+#define I40E_GLGEN_DUAL40_RPB 0x000AC7E0 /* Reset: CORER */
+#define I40E_GLGEN_DUAL40_RPB_DUAL_40G_MODE_SHIFT 0
+#define I40E_GLGEN_DUAL40_RPB_DUAL_40G_MODE_MASK I40E_MASK(0x1, I40E_GLGEN_DUAL40_RPB_DUAL_40G_MODE_SHIFT)
+
+#define I40E_GLGEN_DUAL40_TLPM 0x000A01C4 /* Reset: CORER */
+#define I40E_GLGEN_DUAL40_TLPM_DUAL_40G_MODE_SHIFT 0
+#define I40E_GLGEN_DUAL40_TLPM_DUAL_40G_MODE_MASK I40E_MASK(0x1, I40E_GLGEN_DUAL40_TLPM_DUAL_40G_MODE_SHIFT)
+
+#define I40E_GLGEN_DUAL40_TUPM 0x000A2204 /* Reset: CORER */
+#define I40E_GLGEN_DUAL40_TUPM_DUAL_40G_MODE_SHIFT 0
+#define I40E_GLGEN_DUAL40_TUPM_DUAL_40G_MODE_MASK I40E_MASK(0x1, I40E_GLGEN_DUAL40_TUPM_DUAL_40G_MODE_SHIFT)
+
+#define I40E_GLGEN_FWHWRCTRL 0x00092610 /* Reset: CORER */
+#define I40E_GLGEN_FWHWRCTRL_PF_ENA_RST_DONE_SHIFT 0
+#define I40E_GLGEN_FWHWRCTRL_PF_ENA_RST_DONE_MASK I40E_MASK(0x1, I40E_GLGEN_FWHWRCTRL_PF_ENA_RST_DONE_SHIFT)
+#define I40E_GLGEN_FWHWRCTRL_VF_ENA_RST_DONE_SHIFT 1
+#define I40E_GLGEN_FWHWRCTRL_VF_ENA_RST_DONE_MASK I40E_MASK(0x1, I40E_GLGEN_FWHWRCTRL_VF_ENA_RST_DONE_SHIFT)
+#define I40E_GLGEN_FWHWRCTRL_VM_ENA_RST_DONE_SHIFT 2
+#define I40E_GLGEN_FWHWRCTRL_VM_ENA_RST_DONE_MASK I40E_MASK(0x1, I40E_GLGEN_FWHWRCTRL_VM_ENA_RST_DONE_SHIFT)
+#define I40E_GLGEN_FWHWRCTRL_PE_CPL_EN_SHIFT 31
+#define I40E_GLGEN_FWHWRCTRL_PE_CPL_EN_MASK I40E_MASK(0x1, I40E_GLGEN_FWHWRCTRL_PE_CPL_EN_SHIFT)
+
+#define I40E_GLGEN_IMRTRIG 0x000B8194 /* Reset: CORER */
+#define I40E_GLGEN_IMRTRIG_CORER_SHIFT 0
+#define I40E_GLGEN_IMRTRIG_CORER_MASK I40E_MASK(0x1, I40E_GLGEN_IMRTRIG_CORER_SHIFT)
+#define I40E_GLGEN_IMRTRIG_GLOBR_SHIFT 1
+#define I40E_GLGEN_IMRTRIG_GLOBR_MASK I40E_MASK(0x1, I40E_GLGEN_IMRTRIG_GLOBR_SHIFT)
+#define I40E_GLGEN_IMRTRIG_EMPFWR_SHIFT 2
+#define I40E_GLGEN_IMRTRIG_EMPFWR_MASK I40E_MASK(0x1, I40E_GLGEN_IMRTRIG_EMPFWR_SHIFT)
+
+#define I40E_GLGEN_MISC_CONFIG 0x000B81A4 /* Reset: POR */
+#define I40E_GLGEN_MISC_CONFIG_SINGLE_10G_PORT_SELECT_SHIFT 0
+#define I40E_GLGEN_MISC_CONFIG_SINGLE_10G_PORT_SELECT_MASK I40E_MASK(0x1, I40E_GLGEN_MISC_CONFIG_SINGLE_10G_PORT_SELECT_SHIFT)
+
+#define I40E_GLGEN_PCIFCNCNT_CSR 0x00078E84 /* Reset: PCIR */
+#define I40E_GLGEN_PCIFCNCNT_CSR_PCIPFCNT_SHIFT 0
+#define I40E_GLGEN_PCIFCNCNT_CSR_PCIPFCNT_MASK I40E_MASK(0x1F, I40E_GLGEN_PCIFCNCNT_CSR_PCIPFCNT_SHIFT)
+#define I40E_GLGEN_PCIFCNCNT_CSR_PCIVFCNT_SHIFT 16
+#define I40E_GLGEN_PCIFCNCNT_CSR_PCIVFCNT_MASK I40E_MASK(0xFF, I40E_GLGEN_PCIFCNCNT_CSR_PCIVFCNT_SHIFT)
+
+#define I40E_GLGEN_PCIFCNCNT_INT 0x0003F840 /* Reset: CORER */
+#define I40E_GLGEN_PCIFCNCNT_INT_PCIPFCNT_SHIFT 0
+#define I40E_GLGEN_PCIFCNCNT_INT_PCIPFCNT_MASK I40E_MASK(0x1F, I40E_GLGEN_PCIFCNCNT_INT_PCIPFCNT_SHIFT)
+#define I40E_GLGEN_PCIFCNCNT_INT_PCIVFCNT_SHIFT 16
+#define I40E_GLGEN_PCIFCNCNT_INT_PCIVFCNT_MASK I40E_MASK(0xFF, I40E_GLGEN_PCIFCNCNT_INT_PCIVFCNT_SHIFT)
+
+#define I40E_GLGEN_PE_ENA 0x000B81A0 /* Reset: POR */
+#define I40E_GLGEN_PE_ENA_PE_ENA_SHIFT 0
+#define I40E_GLGEN_PE_ENA_PE_ENA_MASK I40E_MASK(0x1, I40E_GLGEN_PE_ENA_PE_ENA_SHIFT)
+#define I40E_GLGEN_PE_ENA_PE_CLK_SRC_SEL_SHIFT 1
+#define I40E_GLGEN_PE_ENA_PE_CLK_SRC_SEL_MASK I40E_MASK(0x3, I40E_GLGEN_PE_ENA_PE_CLK_SRC_SEL_SHIFT)
+
+#define I40E_GLGEN_PF_ACC_TO 0x00078E88 /* Reset: POR */
+#define I40E_GLGEN_PF_ACC_TO_PF_ACC_TO_SHIFT 0
+#define I40E_GLGEN_PF_ACC_TO_PF_ACC_TO_MASK I40E_MASK(0xFFFF, I40E_GLGEN_PF_ACC_TO_PF_ACC_TO_SHIFT)
+
+#define I40E_GLGEN_RSTSTAT_REQ 0x00092620 /* Reset: CORER */
+#define I40E_GLGEN_RSTSTAT_REQ_RST_INDEX_SHIFT 0
+#define I40E_GLGEN_RSTSTAT_REQ_RST_INDEX_MASK I40E_MASK(0x1FF, I40E_GLGEN_RSTSTAT_REQ_RST_INDEX_SHIFT)
+#define I40E_GLGEN_RSTSTAT_REQ_RST_TYPE_SHIFT 16
+#define I40E_GLGEN_RSTSTAT_REQ_RST_TYPE_MASK I40E_MASK(0x3, I40E_GLGEN_RSTSTAT_REQ_RST_TYPE_SHIFT)
+
+#define I40E_GLGEN_RSTSTATUS 0x00092624 /* Reset: CORER */
+#define I40E_GLGEN_RSTSTATUS_TDPU_CNT_SHIFT 0
+#define I40E_GLGEN_RSTSTATUS_TDPU_CNT_MASK I40E_MASK(0x1F, I40E_GLGEN_RSTSTATUS_TDPU_CNT_SHIFT)
+#define I40E_GLGEN_RSTSTATUS_RDPU_CNT_SHIFT 8
+#define I40E_GLGEN_RSTSTATUS_RDPU_CNT_MASK I40E_MASK(0x1F, I40E_GLGEN_RSTSTATUS_RDPU_CNT_SHIFT)
+#define I40E_GLGEN_RSTSTATUS_TLAN_CNT_SHIFT 16
+#define I40E_GLGEN_RSTSTATUS_TLAN_CNT_MASK I40E_MASK(0xF, I40E_GLGEN_RSTSTATUS_TLAN_CNT_SHIFT)
+#define I40E_GLGEN_RSTSTATUS_RCU_DONE_SHIFT 20
+#define I40E_GLGEN_RSTSTATUS_RCU_DONE_MASK I40E_MASK(0x1, I40E_GLGEN_RSTSTATUS_RCU_DONE_SHIFT)
+#define I40E_GLGEN_RSTSTATUS_PMAT_DONE_SHIFT 21
+#define I40E_GLGEN_RSTSTATUS_PMAT_DONE_MASK I40E_MASK(0x1, I40E_GLGEN_RSTSTATUS_PMAT_DONE_SHIFT)
+#define I40E_GLGEN_RSTSTATUS_PE_DONE_SHIFT 22
+#define I40E_GLGEN_RSTSTATUS_PE_DONE_MASK I40E_MASK(0x1, I40E_GLGEN_RSTSTATUS_PE_DONE_SHIFT)
+#define I40E_GLGEN_RSTSTATUS_CM_PE_DONE_SHIFT 23
+#define I40E_GLGEN_RSTSTATUS_CM_PE_DONE_MASK I40E_MASK(0x1, I40E_GLGEN_RSTSTATUS_CM_PE_DONE_SHIFT)
+#define I40E_GLGEN_RSTSTATUS_INT_DONE_SHIFT 24
+#define I40E_GLGEN_RSTSTATUS_INT_DONE_MASK I40E_MASK(0x1, I40E_GLGEN_RSTSTATUS_INT_DONE_SHIFT)
+#define I40E_GLGEN_RSTSTATUS_PEOC_DONE_SHIFT 25
+#define I40E_GLGEN_RSTSTATUS_PEOC_DONE_MASK I40E_MASK(0x1, I40E_GLGEN_RSTSTATUS_PEOC_DONE_SHIFT)
+#define I40E_GLGEN_RSTSTATUS_PBLOC_DONE_SHIFT 26
+#define I40E_GLGEN_RSTSTATUS_PBLOC_DONE_MASK I40E_MASK(0x1, I40E_GLGEN_RSTSTATUS_PBLOC_DONE_SHIFT)
+#define I40E_GLGEN_RSTSTATUS_FOC_DONE_SHIFT 27
+#define I40E_GLGEN_RSTSTATUS_FOC_DONE_MASK I40E_MASK(0x1, I40E_GLGEN_RSTSTATUS_FOC_DONE_SHIFT)
+#define I40E_GLGEN_RSTSTATUS_CM_LAN_DONE_SHIFT 28
+#define I40E_GLGEN_RSTSTATUS_CM_LAN_DONE_MASK I40E_MASK(0x1, I40E_GLGEN_RSTSTATUS_CM_LAN_DONE_SHIFT)
+#define I40E_GLGEN_RSTSTATUS_FW_DONE_SHIFT 29
+#define I40E_GLGEN_RSTSTATUS_FW_DONE_MASK I40E_MASK(0x1, I40E_GLGEN_RSTSTATUS_FW_DONE_SHIFT)
+#define I40E_GLGEN_RSTSTATUS_PE_ENA_SHIFT 30
+#define I40E_GLGEN_RSTSTATUS_PE_ENA_MASK I40E_MASK(0x1, I40E_GLGEN_RSTSTATUS_PE_ENA_SHIFT)
+#define I40E_GLGEN_RSTSTATUS_HW_DONE_SHIFT 31
+#define I40E_GLGEN_RSTSTATUS_HW_DONE_MASK I40E_MASK(0x1, I40E_GLGEN_RSTSTATUS_HW_DONE_SHIFT)
+
+#define I40E_GLMNG_WD_ENA 0x000B8198 /* Reset: POR */
+#define I40E_GLMNG_WD_ENA_FW_RST_WD_ENA_SHIFT 0
+#define I40E_GLMNG_WD_ENA_FW_RST_WD_ENA_MASK I40E_MASK(0x1, I40E_GLMNG_WD_ENA_FW_RST_WD_ENA_SHIFT)
+#define I40E_GLMNG_WD_ENA_ECC_RST_ENA_SHIFT 1
+#define I40E_GLMNG_WD_ENA_ECC_RST_ENA_MASK I40E_MASK(0x1, I40E_GLMNG_WD_ENA_ECC_RST_ENA_SHIFT)
+
+#define I40E_GLPHY_ANA_ADD 0x000BA008 /* Reset: POR */
+#define I40E_GLPHY_ANA_ADD_ADDRESS_SHIFT 0
+#define I40E_GLPHY_ANA_ADD_ADDRESS_MASK I40E_MASK(0xFFFF, I40E_GLPHY_ANA_ADD_ADDRESS_SHIFT)
+#define I40E_GLPHY_ANA_ADD_BYTE_EN_SHIFT 28
+#define I40E_GLPHY_ANA_ADD_BYTE_EN_MASK I40E_MASK(0xF, I40E_GLPHY_ANA_ADD_BYTE_EN_SHIFT)
+
+#define I40E_GLPHY_ANA_DATA 0x000BA00C /* Reset: POR */
+#define I40E_GLPHY_ANA_DATA_DATA_SHIFT 0
+#define I40E_GLPHY_ANA_DATA_DATA_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPHY_ANA_DATA_DATA_SHIFT)
+
+#define I40E_PFGEN_FWHWRSTAT 0x00092480 /* Reset: CORER */
+#define I40E_PFGEN_FWHWRSTAT_FW_RST_DONE_SHIFT 0
+#define I40E_PFGEN_FWHWRSTAT_FW_RST_DONE_MASK I40E_MASK(0x1, I40E_PFGEN_FWHWRSTAT_FW_RST_DONE_SHIFT)
+#define I40E_PFGEN_FWHWRSTAT_HW_ONLY_RST_DONE_SHIFT 31
+#define I40E_PFGEN_FWHWRSTAT_HW_ONLY_RST_DONE_MASK I40E_MASK(0x1, I40E_PFGEN_FWHWRSTAT_HW_ONLY_RST_DONE_SHIFT)
+
+#define I40E_PFGEN_PORTNUM_CAR 0x000B8000 /* Reset: POR */
+#define I40E_PFGEN_PORTNUM_CAR_PORT_NUM_SHIFT 0
+#define I40E_PFGEN_PORTNUM_CAR_PORT_NUM_MASK I40E_MASK(0x3, I40E_PFGEN_PORTNUM_CAR_PORT_NUM_SHIFT)
+
+#define I40E_PFGEN_PORTNUM_CSR 0x00078D00 /* Reset: CORER */
+#define I40E_PFGEN_PORTNUM_CSR_PORT_NUM_SHIFT 0
+#define I40E_PFGEN_PORTNUM_CSR_PORT_NUM_MASK I40E_MASK(0x3, I40E_PFGEN_PORTNUM_CSR_PORT_NUM_SHIFT)
+
+#define I40E_PFGEN_PORTNUM_PM 0x0006B800 /* Reset: CORER */
+#define I40E_PFGEN_PORTNUM_PM_PORT_NUM_SHIFT 0
+#define I40E_PFGEN_PORTNUM_PM_PORT_NUM_MASK I40E_MASK(0x3, I40E_PFGEN_PORTNUM_PM_PORT_NUM_SHIFT)
+
+#define I40E_PFGEN_PORTNUM_RCB 0x00122000 /* Reset: CORER */
+#define I40E_PFGEN_PORTNUM_RCB_PORT_NUM_SHIFT 0
+#define I40E_PFGEN_PORTNUM_RCB_PORT_NUM_MASK I40E_MASK(0x3, I40E_PFGEN_PORTNUM_RCB_PORT_NUM_SHIFT)
+
+#define I40E_PFGEN_PORTNUM_TSCD 0x000B2240 /* Reset: CORER */
+#define I40E_PFGEN_PORTNUM_TSCD_PORT_NUM_SHIFT 0
+#define I40E_PFGEN_PORTNUM_TSCD_PORT_NUM_MASK I40E_MASK(0x3, I40E_PFGEN_PORTNUM_TSCD_PORT_NUM_SHIFT)
+
+#define I40E_VPGEN_FWHWRSTAT(_VF) (0x00092000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */
+#define I40E_VPGEN_FWHWRSTAT_MAX_INDEX 127
+#define I40E_VPGEN_FWHWRSTAT_FW_RST_DONE_SHIFT 0
+#define I40E_VPGEN_FWHWRSTAT_FW_RST_DONE_MASK I40E_MASK(0x1, I40E_VPGEN_FWHWRSTAT_FW_RST_DONE_SHIFT)
+#define I40E_VPGEN_FWHWRSTAT_HW_ONLY_RST_DONE_SHIFT 31
+#define I40E_VPGEN_FWHWRSTAT_HW_ONLY_RST_DONE_MASK I40E_MASK(0x1, I40E_VPGEN_FWHWRSTAT_HW_ONLY_RST_DONE_SHIFT)
+
+#define I40E_VSIGEN_FWHWRSTAT(_VSI) (0x00091000 + ((_VSI) * 4)) /* _i=0...383 */ /* Reset: CORER */
+#define I40E_VSIGEN_FWHWRSTAT_MAX_INDEX 383
+#define I40E_VSIGEN_FWHWRSTAT_FW_RST_DONE_SHIFT 0
+#define I40E_VSIGEN_FWHWRSTAT_FW_RST_DONE_MASK I40E_MASK(0x1, I40E_VSIGEN_FWHWRSTAT_FW_RST_DONE_SHIFT)
+#define I40E_VSIGEN_FWHWRSTAT_HW_ONLY_RST_DONE_SHIFT 31
+#define I40E_VSIGEN_FWHWRSTAT_HW_ONLY_RST_DONE_MASK I40E_MASK(0x1, I40E_VSIGEN_FWHWRSTAT_HW_ONLY_RST_DONE_SHIFT)
+
+/* PF - HMC Registers */
+
+#define I40E_GLFOC_CECC_ERR 0x000AA0D4 /* Reset: POR */
+#define I40E_GLFOC_CECC_ERR_UNCOR_ECC_ERR_CNT_SHIFT 0
+#define I40E_GLFOC_CECC_ERR_UNCOR_ECC_ERR_CNT_MASK I40E_MASK(0xFFF, I40E_GLFOC_CECC_ERR_UNCOR_ECC_ERR_CNT_SHIFT)
+#define I40E_GLFOC_CECC_ERR_COR_ECC_ERR_CNT_SHIFT 16
+#define I40E_GLFOC_CECC_ERR_COR_ECC_ERR_CNT_MASK I40E_MASK(0xFFF, I40E_GLFOC_CECC_ERR_COR_ECC_ERR_CNT_SHIFT)
+
+#define I40E_GLFOC_ECC_CTL 0x000AA0CC /* Reset: POR */
+#define I40E_GLFOC_ECC_CTL_HOST_ECC_EN_SHIFT 0
+#define I40E_GLFOC_ECC_CTL_HOST_ECC_EN_MASK I40E_MASK(0x1, I40E_GLFOC_ECC_CTL_HOST_ECC_EN_SHIFT)
+#define I40E_GLFOC_ECC_CTL_HOST_ECC_MASK_INT_SHIFT 1
+#define I40E_GLFOC_ECC_CTL_HOST_ECC_MASK_INT_MASK I40E_MASK(0x1, I40E_GLFOC_ECC_CTL_HOST_ECC_MASK_INT_SHIFT)
+#define I40E_GLFOC_ECC_CTL_HOST_ECC_INVERT1_SHIFT 2
+#define I40E_GLFOC_ECC_CTL_HOST_ECC_INVERT1_MASK I40E_MASK(0x1, I40E_GLFOC_ECC_CTL_HOST_ECC_INVERT1_SHIFT)
+#define I40E_GLFOC_ECC_CTL_HOST_ECC_INVERT2_SHIFT 3
+#define I40E_GLFOC_ECC_CTL_HOST_ECC_INVERT2_MASK I40E_MASK(0x1, I40E_GLFOC_ECC_CTL_HOST_ECC_INVERT2_SHIFT)
+#define I40E_GLFOC_ECC_CTL_CLIENT_ECC_EN_SHIFT 4
+#define I40E_GLFOC_ECC_CTL_CLIENT_ECC_EN_MASK I40E_MASK(0x1, I40E_GLFOC_ECC_CTL_CLIENT_ECC_EN_SHIFT)
+#define I40E_GLFOC_ECC_CTL_CLIENT_ECC_MASK_INT_SHIFT 5
+#define I40E_GLFOC_ECC_CTL_CLIENT_ECC_MASK_INT_MASK I40E_MASK(0x1, I40E_GLFOC_ECC_CTL_CLIENT_ECC_MASK_INT_SHIFT)
+#define I40E_GLFOC_ECC_CTL_CLIENT_ECC_INVERT1_SHIFT 6
+#define I40E_GLFOC_ECC_CTL_CLIENT_ECC_INVERT1_MASK I40E_MASK(0x1, I40E_GLFOC_ECC_CTL_CLIENT_ECC_INVERT1_SHIFT)
+#define I40E_GLFOC_ECC_CTL_CLIENT_ECC_INVERT2_SHIFT 7
+#define I40E_GLFOC_ECC_CTL_CLIENT_ECC_INVERT2_MASK I40E_MASK(0x1, I40E_GLFOC_ECC_CTL_CLIENT_ECC_INVERT2_SHIFT)
+
+#define I40E_GLFOC_ERRDATA0 0x000AA0C0 /* Reset: POR */
+#define I40E_GLFOC_ERRDATA0_ERROR_CODE_SHIFT 0
+#define I40E_GLFOC_ERRDATA0_ERROR_CODE_MASK I40E_MASK(0x3F, I40E_GLFOC_ERRDATA0_ERROR_CODE_SHIFT)
+#define I40E_GLFOC_ERRDATA0_OBJ_TYPE_SHIFT 8
+#define I40E_GLFOC_ERRDATA0_OBJ_TYPE_MASK I40E_MASK(0x1F, I40E_GLFOC_ERRDATA0_OBJ_TYPE_SHIFT)
+#define I40E_GLFOC_ERRDATA0_VM_VF_TYPE_SHIFT 13
+#define I40E_GLFOC_ERRDATA0_VM_VF_TYPE_MASK I40E_MASK(0x3, I40E_GLFOC_ERRDATA0_VM_VF_TYPE_SHIFT)
+#define I40E_GLFOC_ERRDATA0_VM_VF_NUM_SHIFT 15
+#define I40E_GLFOC_ERRDATA0_VM_VF_NUM_MASK I40E_MASK(0x1FF, I40E_GLFOC_ERRDATA0_VM_VF_NUM_SHIFT)
+#define I40E_GLFOC_ERRDATA0_PF_NUM_SHIFT 24
+#define I40E_GLFOC_ERRDATA0_PF_NUM_MASK I40E_MASK(0xF, I40E_GLFOC_ERRDATA0_PF_NUM_SHIFT)
+
+#define I40E_GLFOC_ERRDATA1 0x000AA0C4 /* Reset: POR */
+#define I40E_GLFOC_ERRDATA1_OBJ_INDEX_SHIFT 0
+#define I40E_GLFOC_ERRDATA1_OBJ_INDEX_MASK I40E_MASK(0xFFFFFFF, I40E_GLFOC_ERRDATA1_OBJ_INDEX_SHIFT)
+
+#define I40E_GLFOC_ERRDATA2 0x000AA0C8 /* Reset: POR */
+#define I40E_GLFOC_ERRDATA2_LENGTH_SHIFT 0
+#define I40E_GLFOC_ERRDATA2_LENGTH_MASK I40E_MASK(0x7F, I40E_GLFOC_ERRDATA2_LENGTH_SHIFT)
+#define I40E_GLFOC_ERRDATA2_OFFSET_SHIFT 7
+#define I40E_GLFOC_ERRDATA2_OFFSET_MASK I40E_MASK(0x1FFF, I40E_GLFOC_ERRDATA2_OFFSET_SHIFT)
+#define I40E_GLFOC_ERRDATA2_OPTYPE_SHIFT 20
+#define I40E_GLFOC_ERRDATA2_OPTYPE_MASK I40E_MASK(0x7, I40E_GLFOC_ERRDATA2_OPTYPE_SHIFT)
+#define I40E_GLFOC_ERRDATA2_TAG_SHIFT 23
+#define I40E_GLFOC_ERRDATA2_TAG_MASK I40E_MASK(0x1FF, I40E_GLFOC_ERRDATA2_TAG_SHIFT)
+
+#define I40E_GLFOC_ERRINFO 0x000AA0BC /* Reset: POR */
+#define I40E_GLFOC_ERRINFO_ERROR_VALID_SHIFT 0
+#define I40E_GLFOC_ERRINFO_ERROR_VALID_MASK I40E_MASK(0x1, I40E_GLFOC_ERRINFO_ERROR_VALID_SHIFT)
+#define I40E_GLFOC_ERRINFO_ERROR_CNT_SHIFT 8
+#define I40E_GLFOC_ERRINFO_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_GLFOC_ERRINFO_ERROR_CNT_SHIFT)
+
+#define I40E_GLFOC_FCOEHTE_OBJOFST 0x000AA050 /* Reset: CORER */
+#define I40E_GLFOC_FCOEHTE_OBJOFST_OBJ_TYPE_OFFSET_SHIFT 0
+#define I40E_GLFOC_FCOEHTE_OBJOFST_OBJ_TYPE_OFFSET_MASK I40E_MASK(0x3FF, I40E_GLFOC_FCOEHTE_OBJOFST_OBJ_TYPE_OFFSET_SHIFT)
+
+#define I40E_GLFOC_HECC_ERR 0x000AA0D0 /* Reset: POR */
+#define I40E_GLFOC_HECC_ERR_UNCOR_ECC_ERR_CNT_SHIFT 0
+#define I40E_GLFOC_HECC_ERR_UNCOR_ECC_ERR_CNT_MASK I40E_MASK(0xFFF, I40E_GLFOC_HECC_ERR_UNCOR_ECC_ERR_CNT_SHIFT)
+#define I40E_GLFOC_HECC_ERR_COR_ECC_ERR_CNT_SHIFT 16
+#define I40E_GLFOC_HECC_ERR_COR_ECC_ERR_CNT_MASK I40E_MASK(0xFFF, I40E_GLFOC_HECC_ERR_COR_ECC_ERR_CNT_SHIFT)
+
+#define I40E_GLFOC_LAN32BRSV_OBJOFST 0x000AA058 /* Reset: CORER */
+#define I40E_GLFOC_LAN32BRSV_OBJOFST_OBJ_TYPE_OFFSET_SHIFT 0
+#define I40E_GLFOC_LAN32BRSV_OBJOFST_OBJ_TYPE_OFFSET_MASK I40E_MASK(0x3FF, I40E_GLFOC_LAN32BRSV_OBJOFST_OBJ_TYPE_OFFSET_SHIFT)
+
+#define I40E_GLFOC_LAN64BRSV0_OBJOFST 0x000AA05C /* Reset: CORER */
+#define I40E_GLFOC_LAN64BRSV0_OBJOFST_OBJ_TYPE_OFFSET_SHIFT 0
+#define I40E_GLFOC_LAN64BRSV0_OBJOFST_OBJ_TYPE_OFFSET_MASK I40E_MASK(0x3FF, I40E_GLFOC_LAN64BRSV0_OBJOFST_OBJ_TYPE_OFFSET_SHIFT)
+
+#define I40E_GLFOC_LAN64BRSV1_OBJOFST 0x000AA060 /* Reset: CORER */
+#define I40E_GLFOC_LAN64BRSV1_OBJOFST_OBJ_TYPE_OFFSET_SHIFT 0
+#define I40E_GLFOC_LAN64BRSV1_OBJOFST_OBJ_TYPE_OFFSET_MASK I40E_MASK(0x3FF, I40E_GLFOC_LAN64BRSV1_OBJOFST_OBJ_TYPE_OFFSET_SHIFT)
+
+#define I40E_GLFOC_QUADHTE_OBJOFST 0x000AA054 /* Reset: CORER */
+#define I40E_GLFOC_QUADHTE_OBJOFST_OBJ_TYPE_OFFSET_SHIFT 0
+#define I40E_GLFOC_QUADHTE_OBJOFST_OBJ_TYPE_OFFSET_MASK I40E_MASK(0x3FF, I40E_GLFOC_QUADHTE_OBJOFST_OBJ_TYPE_OFFSET_SHIFT)
+
+#define I40E_GLFOC_STAT_CTL 0x000AA008 /* Reset: CORER */
+#define I40E_GLFOC_STAT_CTL_OBJECT_TYPE_SHIFT 0
+#define I40E_GLFOC_STAT_CTL_OBJECT_TYPE_MASK I40E_MASK(0x1F, I40E_GLFOC_STAT_CTL_OBJECT_TYPE_SHIFT)
+
+#define I40E_GLFOC_STAT_OBJ_CNT 0x000AA00C /* Reset: CORER */
+#define I40E_GLFOC_STAT_OBJ_CNT_OBJECT_COUNT_SHIFT 0
+#define I40E_GLFOC_STAT_OBJ_CNT_OBJECT_COUNT_MASK I40E_MASK(0x3FFF, I40E_GLFOC_STAT_OBJ_CNT_OBJECT_COUNT_SHIFT)
+
+#define I40E_GLFOC_STAT_RD_DATA_IDLE_HI 0x000AA034 /* Reset: CORER */
+#define I40E_GLFOC_STAT_RD_DATA_IDLE_HI_CNT_HI_SHIFT 0
+#define I40E_GLFOC_STAT_RD_DATA_IDLE_HI_CNT_HI_MASK I40E_MASK(0xFFFFFF, I40E_GLFOC_STAT_RD_DATA_IDLE_HI_CNT_HI_SHIFT)
+
+#define I40E_GLFOC_STAT_RD_DATA_IDLE_LO 0x000AA030 /* Reset: CORER */
+#define I40E_GLFOC_STAT_RD_DATA_IDLE_LO_CNT_LO_SHIFT 0
+#define I40E_GLFOC_STAT_RD_DATA_IDLE_LO_CNT_LO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLFOC_STAT_RD_DATA_IDLE_LO_CNT_LO_SHIFT)
+
+#define I40E_GLFOC_STAT_RD_DATA_XFER_HI 0x000AA03C /* Reset: CORER */
+#define I40E_GLFOC_STAT_RD_DATA_XFER_HI_CNT_HI_SHIFT 0
+#define I40E_GLFOC_STAT_RD_DATA_XFER_HI_CNT_HI_MASK I40E_MASK(0xFFFFFF, I40E_GLFOC_STAT_RD_DATA_XFER_HI_CNT_HI_SHIFT)
+
+#define I40E_GLFOC_STAT_RD_DATA_XFER_LO 0x000AA038 /* Reset: CORER */
+#define I40E_GLFOC_STAT_RD_DATA_XFER_LO_CNT_LO_SHIFT 0
+#define I40E_GLFOC_STAT_RD_DATA_XFER_LO_CNT_LO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLFOC_STAT_RD_DATA_XFER_LO_CNT_LO_SHIFT)
+
+#define I40E_GLFOC_STAT_RD_HIT_HI 0x000AA014 /* Reset: CORER */
+#define I40E_GLFOC_STAT_RD_HIT_HI_CNT_HI_SHIFT 0
+#define I40E_GLFOC_STAT_RD_HIT_HI_CNT_HI_MASK I40E_MASK(0xFFFFFF, I40E_GLFOC_STAT_RD_HIT_HI_CNT_HI_SHIFT)
+
+#define I40E_GLFOC_STAT_RD_HIT_LO 0x000AA010 /* Reset: CORER */
+#define I40E_GLFOC_STAT_RD_HIT_LO_CNT_LO_SHIFT 0
+#define I40E_GLFOC_STAT_RD_HIT_LO_CNT_LO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLFOC_STAT_RD_HIT_LO_CNT_LO_SHIFT)
+
+#define I40E_GLFOC_STAT_RD_MISS_HI 0x000AA01C /* Reset: CORER */
+#define I40E_GLFOC_STAT_RD_MISS_HI_CNT_HI_SHIFT 0
+#define I40E_GLFOC_STAT_RD_MISS_HI_CNT_HI_MASK I40E_MASK(0xFFFFFF, I40E_GLFOC_STAT_RD_MISS_HI_CNT_HI_SHIFT)
+
+#define I40E_GLFOC_STAT_RD_MISS_LO 0x000AA018 /* Reset: CORER */
+#define I40E_GLFOC_STAT_RD_MISS_LO_CNT_LO_SHIFT 0
+#define I40E_GLFOC_STAT_RD_MISS_LO_CNT_LO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLFOC_STAT_RD_MISS_LO_CNT_LO_SHIFT)
+
+#define I40E_GLFOC_STAT_WR_DATA_IDLE_HI 0x000AA044 /* Reset: CORER */
+#define I40E_GLFOC_STAT_WR_DATA_IDLE_HI_CNT_HI_SHIFT 0
+#define I40E_GLFOC_STAT_WR_DATA_IDLE_HI_CNT_HI_MASK I40E_MASK(0xFFFFFF, I40E_GLFOC_STAT_WR_DATA_IDLE_HI_CNT_HI_SHIFT)
+
+#define I40E_GLFOC_STAT_WR_DATA_IDLE_LO 0x000AA040 /* Reset: CORER */
+#define I40E_GLFOC_STAT_WR_DATA_IDLE_LO_CNT_LO_SHIFT 0
+#define I40E_GLFOC_STAT_WR_DATA_IDLE_LO_CNT_LO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLFOC_STAT_WR_DATA_IDLE_LO_CNT_LO_SHIFT)
+
+#define I40E_GLFOC_STAT_WR_DATA_XFER_HI 0x000AA04C /* Reset: CORER */
+#define I40E_GLFOC_STAT_WR_DATA_XFER_HI_CNT_HI_SHIFT 0
+#define I40E_GLFOC_STAT_WR_DATA_XFER_HI_CNT_HI_MASK I40E_MASK(0xFFFFFF, I40E_GLFOC_STAT_WR_DATA_XFER_HI_CNT_HI_SHIFT)
+
+#define I40E_GLFOC_STAT_WR_DATA_XFER_LO 0x000AA048 /* Reset: CORER */
+#define I40E_GLFOC_STAT_WR_DATA_XFER_LO_CNT_LO_SHIFT 0
+#define I40E_GLFOC_STAT_WR_DATA_XFER_LO_CNT_LO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLFOC_STAT_WR_DATA_XFER_LO_CNT_LO_SHIFT)
+
+#define I40E_GLFOC_STAT_WR_HIT_HI 0x000AA024 /* Reset: CORER */
+#define I40E_GLFOC_STAT_WR_HIT_HI_CNT_HI_SHIFT 0
+#define I40E_GLFOC_STAT_WR_HIT_HI_CNT_HI_MASK I40E_MASK(0xFFFFFF, I40E_GLFOC_STAT_WR_HIT_HI_CNT_HI_SHIFT)
+
+#define I40E_GLFOC_STAT_WR_HIT_LO 0x000AA020 /* Reset: CORER */
+#define I40E_GLFOC_STAT_WR_HIT_LO_CNT_LO_SHIFT 0
+#define I40E_GLFOC_STAT_WR_HIT_LO_CNT_LO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLFOC_STAT_WR_HIT_LO_CNT_LO_SHIFT)
+
+#define I40E_GLFOC_STAT_WR_MISS_HI 0x000AA02C /* Reset: CORER */
+#define I40E_GLFOC_STAT_WR_MISS_HI_CNT_HI_SHIFT 0
+#define I40E_GLFOC_STAT_WR_MISS_HI_CNT_HI_MASK I40E_MASK(0xFFFFFF, I40E_GLFOC_STAT_WR_MISS_HI_CNT_HI_SHIFT)
+
+#define I40E_GLFOC_STAT_WR_MISS_LO 0x000AA028 /* Reset: CORER */
+#define I40E_GLFOC_STAT_WR_MISS_LO_CNT_LO_SHIFT 0
+#define I40E_GLFOC_STAT_WR_MISS_LO_CNT_LO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLFOC_STAT_WR_MISS_LO_CNT_LO_SHIFT)
+
+#define I40E_GLHMC_EMPOBJCACHECTL0 0x000C20dc /* Reset: CORER */
+#define I40E_GLHMC_EMPOBJCACHECTL0_OBJ_PF_NUM_SHIFT 0
+#define I40E_GLHMC_EMPOBJCACHECTL0_OBJ_PF_NUM_MASK I40E_MASK(0xF, I40E_GLHMC_EMPOBJCACHECTL0_OBJ_PF_NUM_SHIFT)
+#define I40E_GLHMC_EMPOBJCACHECTL0_OBJ_TYPE_SHIFT 8
+#define I40E_GLHMC_EMPOBJCACHECTL0_OBJ_TYPE_MASK I40E_MASK(0x1F, I40E_GLHMC_EMPOBJCACHECTL0_OBJ_TYPE_SHIFT)
+#define I40E_GLHMC_EMPOBJCACHECTL0_CMD_SHIFT 13
+#define I40E_GLHMC_EMPOBJCACHECTL0_CMD_MASK I40E_MASK(0x7, I40E_GLHMC_EMPOBJCACHECTL0_CMD_SHIFT)
+#define I40E_GLHMC_EMPOBJCACHECTL0_OBJ_VM_VF_NUM_SHIFT 16
+#define I40E_GLHMC_EMPOBJCACHECTL0_OBJ_VM_VF_NUM_MASK I40E_MASK(0x1FF, I40E_GLHMC_EMPOBJCACHECTL0_OBJ_VM_VF_NUM_SHIFT)
+#define I40E_GLHMC_EMPOBJCACHECTL0_OBJ_VM_VF_TYPE_SHIFT 27
+#define I40E_GLHMC_EMPOBJCACHECTL0_OBJ_VM_VF_TYPE_MASK I40E_MASK(0x3, I40E_GLHMC_EMPOBJCACHECTL0_OBJ_VM_VF_TYPE_SHIFT)
+#define I40E_GLHMC_EMPOBJCACHECTL0_CMD_FAILED_SHIFT 30
+#define I40E_GLHMC_EMPOBJCACHECTL0_CMD_FAILED_MASK I40E_MASK(0x1, I40E_GLHMC_EMPOBJCACHECTL0_CMD_FAILED_SHIFT)
+#define I40E_GLHMC_EMPOBJCACHECTL0_CMD_DONE_SHIFT 31
+#define I40E_GLHMC_EMPOBJCACHECTL0_CMD_DONE_MASK I40E_MASK(0x1, I40E_GLHMC_EMPOBJCACHECTL0_CMD_DONE_SHIFT)
+
+#define I40E_GLHMC_EMPOBJCACHECTL1 0x000C20e0 /* Reset: CORER */
+#define I40E_GLHMC_EMPOBJCACHECTL1_OBJ_INDEX_SHIFT 0
+#define I40E_GLHMC_EMPOBJCACHECTL1_OBJ_INDEX_MASK I40E_MASK(0xFFFFFFF, I40E_GLHMC_EMPOBJCACHECTL1_OBJ_INDEX_SHIFT)
+
+#define I40E_GLHMC_FWPDINV 0x000C207c /* Reset: CORER */
+#define I40E_GLHMC_FWPDINV_PMSDIDX_SHIFT 0
+#define I40E_GLHMC_FWPDINV_PMSDIDX_MASK I40E_MASK(0xFFF, I40E_GLHMC_FWPDINV_PMSDIDX_SHIFT)
+#define I40E_GLHMC_FWPDINV_PMPDIDX_SHIFT 16
+#define I40E_GLHMC_FWPDINV_PMPDIDX_MASK I40E_MASK(0x1FF, I40E_GLHMC_FWPDINV_PMPDIDX_SHIFT)
+
+#define I40E_GLHMC_FWSDCMD 0x000C2070 /* Reset: CORER */
+#define I40E_GLHMC_FWSDCMD_PMSDIDX_SHIFT 0
+#define I40E_GLHMC_FWSDCMD_PMSDIDX_MASK I40E_MASK(0xFFF, I40E_GLHMC_FWSDCMD_PMSDIDX_SHIFT)
+#define I40E_GLHMC_FWSDCMD_PF_SHIFT 16
+#define I40E_GLHMC_FWSDCMD_PF_MASK I40E_MASK(0xF, I40E_GLHMC_FWSDCMD_PF_SHIFT)
+#define I40E_GLHMC_FWSDCMD_VF_SHIFT 20
+#define I40E_GLHMC_FWSDCMD_VF_MASK I40E_MASK(0x1FF, I40E_GLHMC_FWSDCMD_VF_SHIFT)
+#define I40E_GLHMC_FWSDCMD_PMF_TYPE_SHIFT 29
+#define I40E_GLHMC_FWSDCMD_PMF_TYPE_MASK I40E_MASK(0x3, I40E_GLHMC_FWSDCMD_PMF_TYPE_SHIFT)
+#define I40E_GLHMC_FWSDCMD_PMSDWR_SHIFT 31
+#define I40E_GLHMC_FWSDCMD_PMSDWR_MASK I40E_MASK(0x1, I40E_GLHMC_FWSDCMD_PMSDWR_SHIFT)
+
+#define I40E_GLHMC_FWSDDATAHIGH 0x000C2078 /* Reset: CORER */
+#define I40E_GLHMC_FWSDDATAHIGH_PMSDDATAHIGH_SHIFT 0
+#define I40E_GLHMC_FWSDDATAHIGH_PMSDDATAHIGH_MASK I40E_MASK(0xFFFFFFFF, I40E_GLHMC_FWSDDATAHIGH_PMSDDATAHIGH_SHIFT)
+
+#define I40E_GLHMC_FWSDDATALOW 0x000C2074 /* Reset: CORER */
+#define I40E_GLHMC_FWSDDATALOW_PMSDVALID_SHIFT 0
+#define I40E_GLHMC_FWSDDATALOW_PMSDVALID_MASK I40E_MASK(0x1, I40E_GLHMC_FWSDDATALOW_PMSDVALID_SHIFT)
+#define I40E_GLHMC_FWSDDATALOW_PMSDTYPE_SHIFT 1
+#define I40E_GLHMC_FWSDDATALOW_PMSDTYPE_MASK I40E_MASK(0x1, I40E_GLHMC_FWSDDATALOW_PMSDTYPE_SHIFT)
+#define I40E_GLHMC_FWSDDATALOW_PMSDBPCOUNT_SHIFT 2
+#define I40E_GLHMC_FWSDDATALOW_PMSDBPCOUNT_MASK I40E_MASK(0x3FF, I40E_GLHMC_FWSDDATALOW_PMSDBPCOUNT_SHIFT)
+#define I40E_GLHMC_FWSDDATALOW_PMSDDATALOW_SHIFT 12
+#define I40E_GLHMC_FWSDDATALOW_PMSDDATALOW_MASK I40E_MASK(0xFFFFF, I40E_GLHMC_FWSDDATALOW_PMSDDATALOW_SHIFT)
+
+#define I40E_GLHMC_LAN32BRSVDBASE(_i) (0x000C6a00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_LAN32BRSVDBASE_MAX_INDEX 15
+#define I40E_GLHMC_LAN32BRSVDBASE_FPMLAN32BRSVDBASE_SHIFT 0
+#define I40E_GLHMC_LAN32BRSVDBASE_FPMLAN32BRSVDBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_LAN32BRSVDBASE_FPMLAN32BRSVDBASE_SHIFT)
+
+#define I40E_GLHMC_LAN32BRSVDCNT(_i) (0x000C6b00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_LAN32BRSVDCNT_MAX_INDEX 15
+#define I40E_GLHMC_LAN32BRSVDCNT_FPMLAN32BRSVDCNT_SHIFT 0
+#define I40E_GLHMC_LAN32BRSVDCNT_FPMLAN32BRSVDCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_LAN32BRSVDCNT_FPMLAN32BRSVDCNT_SHIFT)
+
+#define I40E_GLHMC_LAN32BRSVDMAX 0x000C209C /* Reset: CORER */
+#define I40E_GLHMC_LAN32BRSVDMAX_PMLAN32BRSVDMAX_SHIFT 0
+#define I40E_GLHMC_LAN32BRSVDMAX_PMLAN32BRSVDMAX_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_LAN32BRSVDMAX_PMLAN32BRSVDMAX_SHIFT)
+
+#define I40E_GLHMC_LAN32BRSVDOBJSZ 0x000C2098 /* Reset: CORER */
+#define I40E_GLHMC_LAN32BRSVDOBJSZ_PMLAN32BRSVDOBJSZ_SHIFT 0
+#define I40E_GLHMC_LAN32BRSVDOBJSZ_PMLAN32BRSVDOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_LAN32BRSVDOBJSZ_PMLAN32BRSVDOBJSZ_SHIFT)
+
+#define I40E_GLHMC_LAN64BRSVD0BASE(_i) (0x000C6c00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_LAN64BRSVD0BASE_MAX_INDEX 15
+#define I40E_GLHMC_LAN64BRSVD0BASE_FPMLAN64BRSVD0BASE_SHIFT 0
+#define I40E_GLHMC_LAN64BRSVD0BASE_FPMLAN64BRSVD0BASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_LAN64BRSVD0BASE_FPMLAN64BRSVD0BASE_SHIFT)
+
+#define I40E_GLHMC_LAN64BRSVD0CNT(_i) (0x000C6d00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_LAN64BRSVD0CNT_MAX_INDEX 15
+#define I40E_GLHMC_LAN64BRSVD0CNT_FPMLAN64BRSVD0CNT_SHIFT 0
+#define I40E_GLHMC_LAN64BRSVD0CNT_FPMLAN64BRSVD0CNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_LAN64BRSVD0CNT_FPMLAN64BRSVD0CNT_SHIFT)
+
+#define I40E_GLHMC_LAN64BRSVD0MAX 0x000C20a4 /* Reset: CORER */
+#define I40E_GLHMC_LAN64BRSVD0MAX_PMLAN64BRSVD0MAX_SHIFT 0
+#define I40E_GLHMC_LAN64BRSVD0MAX_PMLAN64BRSVD0MAX_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_LAN64BRSVD0MAX_PMLAN64BRSVD0MAX_SHIFT)
+
+#define I40E_GLHMC_LAN64BRSVD0OBJSZ 0x000C20a0 /* Reset: CORER */
+#define I40E_GLHMC_LAN64BRSVD0OBJSZ_PMLAN64BRSVD0OBJSZ_SHIFT 0
+#define I40E_GLHMC_LAN64BRSVD0OBJSZ_PMLAN64BRSVD0OBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_LAN64BRSVD0OBJSZ_PMLAN64BRSVD0OBJSZ_SHIFT)
+
+#define I40E_GLHMC_LAN64BRSVD1BASE(_i) (0x000C6e00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_LAN64BRSVD1BASE_MAX_INDEX 15
+#define I40E_GLHMC_LAN64BRSVD1BASE_FPMLAN64BRSVD1BASE_SHIFT 0
+#define I40E_GLHMC_LAN64BRSVD1BASE_FPMLAN64BRSVD1BASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_LAN64BRSVD1BASE_FPMLAN64BRSVD1BASE_SHIFT)
+
+#define I40E_GLHMC_LAN64BRSVD1CNT(_i) (0x000C6f00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_LAN64BRSVD1CNT_MAX_INDEX 15
+#define I40E_GLHMC_LAN64BRSVD1CNT_FPMLAN64BRSVD1CNT_SHIFT 0
+#define I40E_GLHMC_LAN64BRSVD1CNT_FPMLAN64BRSVD1CNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_LAN64BRSVD1CNT_FPMLAN64BRSVD1CNT_SHIFT)
+
+#define I40E_GLHMC_LAN64BRSVD1MAX 0x000C20ac /* Reset: CORER */
+#define I40E_GLHMC_LAN64BRSVD1MAX_PMLAN64BRSVD1MAX_SHIFT 0
+#define I40E_GLHMC_LAN64BRSVD1MAX_PMLAN64BRSVD1MAX_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_LAN64BRSVD1MAX_PMLAN64BRSVD1MAX_SHIFT)
+
+#define I40E_GLHMC_LAN64BRSVD1OBJSZ 0x000C20a8 /* Reset: CORER */
+#define I40E_GLHMC_LAN64BRSVD1OBJSZ_PMLAN64BRSVD1OBJSZ_SHIFT 0
+#define I40E_GLHMC_LAN64BRSVD1OBJSZ_PMLAN64BRSVD1OBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_LAN64BRSVD1OBJSZ_PMLAN64BRSVD1OBJSZ_SHIFT)
+
+#define I40E_GLHMC_OBJECTCACHECTL0(_i) (0x000C0900 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_OBJECTCACHECTL0_MAX_INDEX 15
+#define I40E_GLHMC_OBJECTCACHECTL0_OBJ_PF_NUM_SHIFT 0
+#define I40E_GLHMC_OBJECTCACHECTL0_OBJ_PF_NUM_MASK I40E_MASK(0xF, I40E_GLHMC_OBJECTCACHECTL0_OBJ_PF_NUM_SHIFT)
+#define I40E_GLHMC_OBJECTCACHECTL0_OBJ_TYPE_SHIFT 8
+#define I40E_GLHMC_OBJECTCACHECTL0_OBJ_TYPE_MASK I40E_MASK(0x1F, I40E_GLHMC_OBJECTCACHECTL0_OBJ_TYPE_SHIFT)
+#define I40E_GLHMC_OBJECTCACHECTL0_CMD_SHIFT 13
+#define I40E_GLHMC_OBJECTCACHECTL0_CMD_MASK I40E_MASK(0x7, I40E_GLHMC_OBJECTCACHECTL0_CMD_SHIFT)
+#define I40E_GLHMC_OBJECTCACHECTL0_OBJ_VM_VF_NUM_SHIFT 16
+#define I40E_GLHMC_OBJECTCACHECTL0_OBJ_VM_VF_NUM_MASK I40E_MASK(0x1FF, I40E_GLHMC_OBJECTCACHECTL0_OBJ_VM_VF_NUM_SHIFT)
+#define I40E_GLHMC_OBJECTCACHECTL0_OBJ_VM_VF_TYPE_SHIFT 27
+#define I40E_GLHMC_OBJECTCACHECTL0_OBJ_VM_VF_TYPE_MASK I40E_MASK(0x3, I40E_GLHMC_OBJECTCACHECTL0_OBJ_VM_VF_TYPE_SHIFT)
+#define I40E_GLHMC_OBJECTCACHECTL0_CMD_FAILED_SHIFT 30
+#define I40E_GLHMC_OBJECTCACHECTL0_CMD_FAILED_MASK I40E_MASK(0x1, I40E_GLHMC_OBJECTCACHECTL0_CMD_FAILED_SHIFT)
+#define I40E_GLHMC_OBJECTCACHECTL0_CMD_DONE_SHIFT 31
+#define I40E_GLHMC_OBJECTCACHECTL0_CMD_DONE_MASK I40E_MASK(0x1, I40E_GLHMC_OBJECTCACHECTL0_CMD_DONE_SHIFT)
+
+#define I40E_GLHMC_OBJECTCACHECTL1(_i) (0x000C0a00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_OBJECTCACHECTL1_MAX_INDEX 15
+#define I40E_GLHMC_OBJECTCACHECTL1_OBJ_INDEX_SHIFT 0
+#define I40E_GLHMC_OBJECTCACHECTL1_OBJ_INDEX_MASK I40E_MASK(0xFFFFFFF, I40E_GLHMC_OBJECTCACHECTL1_OBJ_INDEX_SHIFT)
+
+#define I40E_GLHMC_PMATCFG 0x000C2000 /* Reset: CORER */
+#define I40E_GLHMC_PMATCFG_CM_PE_WEIGHT_SHIFT 0
+#define I40E_GLHMC_PMATCFG_CM_PE_WEIGHT_MASK I40E_MASK(0x1, I40E_GLHMC_PMATCFG_CM_PE_WEIGHT_SHIFT)
+#define I40E_GLHMC_PMATCFG_CM_LAN_WEIGHT_SHIFT 1
+#define I40E_GLHMC_PMATCFG_CM_LAN_WEIGHT_MASK I40E_MASK(0x1, I40E_GLHMC_PMATCFG_CM_LAN_WEIGHT_SHIFT)
+
+#define I40E_GLHMC_PMFTABLE(_i) (0x000C0b00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PMFTABLE_MAX_INDEX 15
+#define I40E_GLHMC_PMFTABLE_PM_FCN_TBL_ENTRY_VLD_SHIFT 31
+#define I40E_GLHMC_PMFTABLE_PM_FCN_TBL_ENTRY_VLD_MASK I40E_MASK(0x1, I40E_GLHMC_PMFTABLE_PM_FCN_TBL_ENTRY_VLD_SHIFT)
+
+#define I40E_GLPBLOC_CACHE_CTRL 0x000A8000 /* Reset: CORER */
+#define I40E_GLPBLOC_CACHE_CTRL_SCALE_FACTOR_SHIFT 0
+#define I40E_GLPBLOC_CACHE_CTRL_SCALE_FACTOR_MASK I40E_MASK(0x3, I40E_GLPBLOC_CACHE_CTRL_SCALE_FACTOR_SHIFT)
+#define I40E_GLPBLOC_CACHE_CTRL_DBGMUX_EN_SHIFT 4
+#define I40E_GLPBLOC_CACHE_CTRL_DBGMUX_EN_MASK I40E_MASK(0x1, I40E_GLPBLOC_CACHE_CTRL_DBGMUX_EN_SHIFT)
+#define I40E_GLPBLOC_CACHE_CTRL_DBGMUX_SEL_LO_SHIFT 8
+#define I40E_GLPBLOC_CACHE_CTRL_DBGMUX_SEL_LO_MASK I40E_MASK(0x1F, I40E_GLPBLOC_CACHE_CTRL_DBGMUX_SEL_LO_SHIFT)
+#define I40E_GLPBLOC_CACHE_CTRL_DBGMUX_SEL_HI_SHIFT 16
+#define I40E_GLPBLOC_CACHE_CTRL_DBGMUX_SEL_HI_MASK I40E_MASK(0x1F, I40E_GLPBLOC_CACHE_CTRL_DBGMUX_SEL_HI_SHIFT)
+
+#define I40E_GLPBLOC_CECC_ERR 0x000A80B4 /* Reset: POR */
+#define I40E_GLPBLOC_CECC_ERR_UNCOR_ECC_ERR_CNT_SHIFT 0
+#define I40E_GLPBLOC_CECC_ERR_UNCOR_ECC_ERR_CNT_MASK I40E_MASK(0xFFF, I40E_GLPBLOC_CECC_ERR_UNCOR_ECC_ERR_CNT_SHIFT)
+#define I40E_GLPBLOC_CECC_ERR_COR_ECC_ERR_CNT_SHIFT 16
+#define I40E_GLPBLOC_CECC_ERR_COR_ECC_ERR_CNT_MASK I40E_MASK(0xFFF, I40E_GLPBLOC_CECC_ERR_COR_ECC_ERR_CNT_SHIFT)
+
+#define I40E_GLPBLOC_ECC_CTL 0x000A80AC /* Reset: POR */
+#define I40E_GLPBLOC_ECC_CTL_HOST_ECC_EN_SHIFT 0
+#define I40E_GLPBLOC_ECC_CTL_HOST_ECC_EN_MASK I40E_MASK(0x1, I40E_GLPBLOC_ECC_CTL_HOST_ECC_EN_SHIFT)
+#define I40E_GLPBLOC_ECC_CTL_HOST_ECC_MASK_INT_SHIFT 1
+#define I40E_GLPBLOC_ECC_CTL_HOST_ECC_MASK_INT_MASK I40E_MASK(0x1, I40E_GLPBLOC_ECC_CTL_HOST_ECC_MASK_INT_SHIFT)
+#define I40E_GLPBLOC_ECC_CTL_HOST_ECC_INVERT1_SHIFT 2
+#define I40E_GLPBLOC_ECC_CTL_HOST_ECC_INVERT1_MASK I40E_MASK(0x1, I40E_GLPBLOC_ECC_CTL_HOST_ECC_INVERT1_SHIFT)
+#define I40E_GLPBLOC_ECC_CTL_HOST_ECC_INVERT2_SHIFT 3
+#define I40E_GLPBLOC_ECC_CTL_HOST_ECC_INVERT2_MASK I40E_MASK(0x1, I40E_GLPBLOC_ECC_CTL_HOST_ECC_INVERT2_SHIFT)
+#define I40E_GLPBLOC_ECC_CTL_CLIENT_ECC_EN_SHIFT 4
+#define I40E_GLPBLOC_ECC_CTL_CLIENT_ECC_EN_MASK I40E_MASK(0x1, I40E_GLPBLOC_ECC_CTL_CLIENT_ECC_EN_SHIFT)
+#define I40E_GLPBLOC_ECC_CTL_CLIENT_ECC_MASK_INT_SHIFT 5
+#define I40E_GLPBLOC_ECC_CTL_CLIENT_ECC_MASK_INT_MASK I40E_MASK(0x1, I40E_GLPBLOC_ECC_CTL_CLIENT_ECC_MASK_INT_SHIFT)
+#define I40E_GLPBLOC_ECC_CTL_CLIENT_ECC_INVERT1_SHIFT 6
+#define I40E_GLPBLOC_ECC_CTL_CLIENT_ECC_INVERT1_MASK I40E_MASK(0x1, I40E_GLPBLOC_ECC_CTL_CLIENT_ECC_INVERT1_SHIFT)
+#define I40E_GLPBLOC_ECC_CTL_CLIENT_ECC_INVERT2_SHIFT 7
+#define I40E_GLPBLOC_ECC_CTL_CLIENT_ECC_INVERT2_MASK I40E_MASK(0x1, I40E_GLPBLOC_ECC_CTL_CLIENT_ECC_INVERT2_SHIFT)
+
+#define I40E_GLPBLOC_ERRDATA0 0x000A80A0 /* Reset: POR */
+#define I40E_GLPBLOC_ERRDATA0_ERROR_CODE_SHIFT 0
+#define I40E_GLPBLOC_ERRDATA0_ERROR_CODE_MASK I40E_MASK(0x3F, I40E_GLPBLOC_ERRDATA0_ERROR_CODE_SHIFT)
+#define I40E_GLPBLOC_ERRDATA0_OBJ_TYPE_SHIFT 8
+#define I40E_GLPBLOC_ERRDATA0_OBJ_TYPE_MASK I40E_MASK(0x1F, I40E_GLPBLOC_ERRDATA0_OBJ_TYPE_SHIFT)
+#define I40E_GLPBLOC_ERRDATA0_VM_VF_TYPE_SHIFT 13
+#define I40E_GLPBLOC_ERRDATA0_VM_VF_TYPE_MASK I40E_MASK(0x3, I40E_GLPBLOC_ERRDATA0_VM_VF_TYPE_SHIFT)
+#define I40E_GLPBLOC_ERRDATA0_VM_VF_NUM_SHIFT 15
+#define I40E_GLPBLOC_ERRDATA0_VM_VF_NUM_MASK I40E_MASK(0x1FF, I40E_GLPBLOC_ERRDATA0_VM_VF_NUM_SHIFT)
+#define I40E_GLPBLOC_ERRDATA0_PF_NUM_SHIFT 24
+#define I40E_GLPBLOC_ERRDATA0_PF_NUM_MASK I40E_MASK(0xF, I40E_GLPBLOC_ERRDATA0_PF_NUM_SHIFT)
+
+#define I40E_GLPBLOC_ERRDATA1 0x000A80A4 /* Reset: POR */
+#define I40E_GLPBLOC_ERRDATA1_OBJ_INDEX_SHIFT 0
+#define I40E_GLPBLOC_ERRDATA1_OBJ_INDEX_MASK I40E_MASK(0xFFFFFFF, I40E_GLPBLOC_ERRDATA1_OBJ_INDEX_SHIFT)
+
+#define I40E_GLPBLOC_ERRDATA2 0x000A80A8 /* Reset: POR */
+#define I40E_GLPBLOC_ERRDATA2_LENGTH_SHIFT 0
+#define I40E_GLPBLOC_ERRDATA2_LENGTH_MASK I40E_MASK(0x7F, I40E_GLPBLOC_ERRDATA2_LENGTH_SHIFT)
+#define I40E_GLPBLOC_ERRDATA2_OFFSET_SHIFT 7
+#define I40E_GLPBLOC_ERRDATA2_OFFSET_MASK I40E_MASK(0x1FFF, I40E_GLPBLOC_ERRDATA2_OFFSET_SHIFT)
+#define I40E_GLPBLOC_ERRDATA2_OPTYPE_SHIFT 20
+#define I40E_GLPBLOC_ERRDATA2_OPTYPE_MASK I40E_MASK(0x7, I40E_GLPBLOC_ERRDATA2_OPTYPE_SHIFT)
+#define I40E_GLPBLOC_ERRDATA2_TAG_SHIFT 23
+#define I40E_GLPBLOC_ERRDATA2_TAG_MASK I40E_MASK(0x1FF, I40E_GLPBLOC_ERRDATA2_TAG_SHIFT)
+
+#define I40E_GLPBLOC_ERRINFO 0x000A809C /* Reset: POR */
+#define I40E_GLPBLOC_ERRINFO_ERROR_VALID_SHIFT 0
+#define I40E_GLPBLOC_ERRINFO_ERROR_VALID_MASK I40E_MASK(0x1, I40E_GLPBLOC_ERRINFO_ERROR_VALID_SHIFT)
+#define I40E_GLPBLOC_ERRINFO_ERROR_CNT_SHIFT 8
+#define I40E_GLPBLOC_ERRINFO_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_GLPBLOC_ERRINFO_ERROR_CNT_SHIFT)
+
+#define I40E_GLPBLOC_HECC_ERR 0x000A80B0 /* Reset: POR */
+#define I40E_GLPBLOC_HECC_ERR_UNCOR_ECC_ERR_CNT_SHIFT 0
+#define I40E_GLPBLOC_HECC_ERR_UNCOR_ECC_ERR_CNT_MASK I40E_MASK(0xFFF, I40E_GLPBLOC_HECC_ERR_UNCOR_ECC_ERR_CNT_SHIFT)
+#define I40E_GLPBLOC_HECC_ERR_COR_ECC_ERR_CNT_SHIFT 16
+#define I40E_GLPBLOC_HECC_ERR_COR_ECC_ERR_CNT_MASK I40E_MASK(0xFFF, I40E_GLPBLOC_HECC_ERR_COR_ECC_ERR_CNT_SHIFT)
+
+#define I40E_GLPBLOC_MRTE_OBJOFST 0x000A8050 /* Reset: CORER */
+#define I40E_GLPBLOC_MRTE_OBJOFST_OBJ_TYPE_OFFSET_SHIFT 0
+#define I40E_GLPBLOC_MRTE_OBJOFST_OBJ_TYPE_OFFSET_MASK I40E_MASK(0x3FF, I40E_GLPBLOC_MRTE_OBJOFST_OBJ_TYPE_OFFSET_SHIFT)
+
+#define I40E_GLPBLOC_PBLE_OBJOFST 0x000A804C /* Reset: CORER */
+#define I40E_GLPBLOC_PBLE_OBJOFST_OBJ_TYPE_OFFSET_SHIFT 0
+#define I40E_GLPBLOC_PBLE_OBJOFST_OBJ_TYPE_OFFSET_MASK I40E_MASK(0x3FF, I40E_GLPBLOC_PBLE_OBJOFST_OBJ_TYPE_OFFSET_SHIFT)
+
+#define I40E_GLPBLOC_STAT_CTL 0x000A8004 /* Reset: CORER */
+#define I40E_GLPBLOC_STAT_CTL_OBJECT_TYPE_SHIFT 0
+#define I40E_GLPBLOC_STAT_CTL_OBJECT_TYPE_MASK I40E_MASK(0x1F, I40E_GLPBLOC_STAT_CTL_OBJECT_TYPE_SHIFT)
+
+#define I40E_GLPBLOC_STAT_OBJ_CNT 0x000A8008 /* Reset: CORER */
+#define I40E_GLPBLOC_STAT_OBJ_CNT_OBJECT_COUNT_SHIFT 0
+#define I40E_GLPBLOC_STAT_OBJ_CNT_OBJECT_COUNT_MASK I40E_MASK(0x3FFF, I40E_GLPBLOC_STAT_OBJ_CNT_OBJECT_COUNT_SHIFT)
+
+#define I40E_GLPBLOC_STAT_RD_DATA_IDLE_HI 0x000A8030 /* Reset: CORER */
+#define I40E_GLPBLOC_STAT_RD_DATA_IDLE_HI_CNT_HI_SHIFT 0
+#define I40E_GLPBLOC_STAT_RD_DATA_IDLE_HI_CNT_HI_MASK I40E_MASK(0xFFFFFF, I40E_GLPBLOC_STAT_RD_DATA_IDLE_HI_CNT_HI_SHIFT)
+
+#define I40E_GLPBLOC_STAT_RD_DATA_IDLE_LO 0x000A802C /* Reset: CORER */
+#define I40E_GLPBLOC_STAT_RD_DATA_IDLE_LO_CNT_LO_SHIFT 0
+#define I40E_GLPBLOC_STAT_RD_DATA_IDLE_LO_CNT_LO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPBLOC_STAT_RD_DATA_IDLE_LO_CNT_LO_SHIFT)
+
+#define I40E_GLPBLOC_STAT_RD_DATA_XFER_HI 0x000A8038 /* Reset: CORER */
+#define I40E_GLPBLOC_STAT_RD_DATA_XFER_HI_CNT_HI_SHIFT 0
+#define I40E_GLPBLOC_STAT_RD_DATA_XFER_HI_CNT_HI_MASK I40E_MASK(0xFFFFFF, I40E_GLPBLOC_STAT_RD_DATA_XFER_HI_CNT_HI_SHIFT)
+
+#define I40E_GLPBLOC_STAT_RD_DATA_XFER_LO 0x000A8034 /* Reset: CORER */
+#define I40E_GLPBLOC_STAT_RD_DATA_XFER_LO_CNT_LO_SHIFT 0
+#define I40E_GLPBLOC_STAT_RD_DATA_XFER_LO_CNT_LO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPBLOC_STAT_RD_DATA_XFER_LO_CNT_LO_SHIFT)
+
+#define I40E_GLPBLOC_STAT_RD_HIT_HI 0x000A8010 /* Reset: CORER */
+#define I40E_GLPBLOC_STAT_RD_HIT_HI_CNT_HI_SHIFT 0
+#define I40E_GLPBLOC_STAT_RD_HIT_HI_CNT_HI_MASK I40E_MASK(0xFFFFFF, I40E_GLPBLOC_STAT_RD_HIT_HI_CNT_HI_SHIFT)
+
+#define I40E_GLPBLOC_STAT_RD_HIT_LO 0x000A800C /* Reset: CORER */
+#define I40E_GLPBLOC_STAT_RD_HIT_LO_CNT_LO_SHIFT 0
+#define I40E_GLPBLOC_STAT_RD_HIT_LO_CNT_LO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPBLOC_STAT_RD_HIT_LO_CNT_LO_SHIFT)
+
+#define I40E_GLPBLOC_STAT_RD_MISS_HI 0x000A8018 /* Reset: CORER */
+#define I40E_GLPBLOC_STAT_RD_MISS_HI_CNT_HI_SHIFT 0
+#define I40E_GLPBLOC_STAT_RD_MISS_HI_CNT_HI_MASK I40E_MASK(0xFFFFFF, I40E_GLPBLOC_STAT_RD_MISS_HI_CNT_HI_SHIFT)
+
+#define I40E_GLPBLOC_STAT_RD_MISS_LO 0x000A8014 /* Reset: CORER */
+#define I40E_GLPBLOC_STAT_RD_MISS_LO_CNT_LO_SHIFT 0
+#define I40E_GLPBLOC_STAT_RD_MISS_LO_CNT_LO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPBLOC_STAT_RD_MISS_LO_CNT_LO_SHIFT)
+
+#define I40E_GLPBLOC_STAT_WR_DATA_IDLE_HI 0x000A8040 /* Reset: CORER */
+#define I40E_GLPBLOC_STAT_WR_DATA_IDLE_HI_CNT_HI_SHIFT 0
+#define I40E_GLPBLOC_STAT_WR_DATA_IDLE_HI_CNT_HI_MASK I40E_MASK(0xFFFFFF, I40E_GLPBLOC_STAT_WR_DATA_IDLE_HI_CNT_HI_SHIFT)
+
+#define I40E_GLPBLOC_STAT_WR_DATA_IDLE_LO 0x000A803C /* Reset: CORER */
+#define I40E_GLPBLOC_STAT_WR_DATA_IDLE_LO_CNT_LO_SHIFT 0
+#define I40E_GLPBLOC_STAT_WR_DATA_IDLE_LO_CNT_LO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPBLOC_STAT_WR_DATA_IDLE_LO_CNT_LO_SHIFT)
+
+#define I40E_GLPBLOC_STAT_WR_DATA_XFER_HI 0x000A8048 /* Reset: CORER */
+#define I40E_GLPBLOC_STAT_WR_DATA_XFER_HI_CNT_HI_SHIFT 0
+#define I40E_GLPBLOC_STAT_WR_DATA_XFER_HI_CNT_HI_MASK I40E_MASK(0xFFFFFF, I40E_GLPBLOC_STAT_WR_DATA_XFER_HI_CNT_HI_SHIFT)
+
+#define I40E_GLPBLOC_STAT_WR_DATA_XFER_LO 0x000A8044 /* Reset: CORER */
+#define I40E_GLPBLOC_STAT_WR_DATA_XFER_LO_CNT_LO_SHIFT 0
+#define I40E_GLPBLOC_STAT_WR_DATA_XFER_LO_CNT_LO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPBLOC_STAT_WR_DATA_XFER_LO_CNT_LO_SHIFT)
+
+#define I40E_GLPBLOC_STAT_WR_HIT_HI 0x000A8020 /* Reset: CORER */
+#define I40E_GLPBLOC_STAT_WR_HIT_HI_CNT_HI_SHIFT 0
+#define I40E_GLPBLOC_STAT_WR_HIT_HI_CNT_HI_MASK I40E_MASK(0xFFFFFF, I40E_GLPBLOC_STAT_WR_HIT_HI_CNT_HI_SHIFT)
+
+#define I40E_GLPBLOC_STAT_WR_HIT_LO 0x000A801C /* Reset: CORER */
+#define I40E_GLPBLOC_STAT_WR_HIT_LO_CNT_LO_SHIFT 0
+#define I40E_GLPBLOC_STAT_WR_HIT_LO_CNT_LO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPBLOC_STAT_WR_HIT_LO_CNT_LO_SHIFT)
+
+#define I40E_GLPBLOC_STAT_WR_MISS_HI 0x000A8028 /* Reset: CORER */
+#define I40E_GLPBLOC_STAT_WR_MISS_HI_CNT_HI_SHIFT 0
+#define I40E_GLPBLOC_STAT_WR_MISS_HI_CNT_HI_MASK I40E_MASK(0xFFFFFF, I40E_GLPBLOC_STAT_WR_MISS_HI_CNT_HI_SHIFT)
+
+#define I40E_GLPBLOC_STAT_WR_MISS_LO 0x000A8024 /* Reset: CORER */
+#define I40E_GLPBLOC_STAT_WR_MISS_LO_CNT_LO_SHIFT 0
+#define I40E_GLPBLOC_STAT_WR_MISS_LO_CNT_LO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPBLOC_STAT_WR_MISS_LO_CNT_LO_SHIFT)
+
+#define I40E_GLPDOC_CACHE_CTRL 0x000D0000 /* Reset: CORER */
+#define I40E_GLPDOC_CACHE_CTRL_SCALE_FACTOR_SHIFT 0
+#define I40E_GLPDOC_CACHE_CTRL_SCALE_FACTOR_MASK I40E_MASK(0x3, I40E_GLPDOC_CACHE_CTRL_SCALE_FACTOR_SHIFT)
+#define I40E_GLPDOC_CACHE_CTRL_DBGMUX_EN_SHIFT 4
+#define I40E_GLPDOC_CACHE_CTRL_DBGMUX_EN_MASK I40E_MASK(0x1, I40E_GLPDOC_CACHE_CTRL_DBGMUX_EN_SHIFT)
+#define I40E_GLPDOC_CACHE_CTRL_DBGMUX_SEL_LO_SHIFT 8
+#define I40E_GLPDOC_CACHE_CTRL_DBGMUX_SEL_LO_MASK I40E_MASK(0x1F, I40E_GLPDOC_CACHE_CTRL_DBGMUX_SEL_LO_SHIFT)
+#define I40E_GLPDOC_CACHE_CTRL_DBGMUX_SEL_HI_SHIFT 16
+#define I40E_GLPDOC_CACHE_CTRL_DBGMUX_SEL_HI_MASK I40E_MASK(0x1F, I40E_GLPDOC_CACHE_CTRL_DBGMUX_SEL_HI_SHIFT)
+
+#define I40E_GLPDOC_CECC_ERR 0x000D0080 /* Reset: POR */
+#define I40E_GLPDOC_CECC_ERR_UNCOR_ECC_ERR_CNT_SHIFT 0
+#define I40E_GLPDOC_CECC_ERR_UNCOR_ECC_ERR_CNT_MASK I40E_MASK(0xFFF, I40E_GLPDOC_CECC_ERR_UNCOR_ECC_ERR_CNT_SHIFT)
+#define I40E_GLPDOC_CECC_ERR_COR_ECC_ERR_CNT_SHIFT 16
+#define I40E_GLPDOC_CECC_ERR_COR_ECC_ERR_CNT_MASK I40E_MASK(0xFFF, I40E_GLPDOC_CECC_ERR_COR_ECC_ERR_CNT_SHIFT)
+
+#define I40E_GLPDOC_ECC_CTL 0x000D007c /* Reset: POR */
+#define I40E_GLPDOC_ECC_CTL_HOST_ECC_EN_SHIFT 0
+#define I40E_GLPDOC_ECC_CTL_HOST_ECC_EN_MASK I40E_MASK(0x1, I40E_GLPDOC_ECC_CTL_HOST_ECC_EN_SHIFT)
+#define I40E_GLPDOC_ECC_CTL_HOST_ECC_MASK_INT_SHIFT 1
+#define I40E_GLPDOC_ECC_CTL_HOST_ECC_MASK_INT_MASK I40E_MASK(0x1, I40E_GLPDOC_ECC_CTL_HOST_ECC_MASK_INT_SHIFT)
+#define I40E_GLPDOC_ECC_CTL_HOST_ECC_INVERT1_SHIFT 2
+#define I40E_GLPDOC_ECC_CTL_HOST_ECC_INVERT1_MASK I40E_MASK(0x1, I40E_GLPDOC_ECC_CTL_HOST_ECC_INVERT1_SHIFT)
+#define I40E_GLPDOC_ECC_CTL_HOST_ECC_INVERT2_SHIFT 3
+#define I40E_GLPDOC_ECC_CTL_HOST_ECC_INVERT2_MASK I40E_MASK(0x1, I40E_GLPDOC_ECC_CTL_HOST_ECC_INVERT2_SHIFT)
+#define I40E_GLPDOC_ECC_CTL_CLIENT_ECC_EN_SHIFT 4
+#define I40E_GLPDOC_ECC_CTL_CLIENT_ECC_EN_MASK I40E_MASK(0x1, I40E_GLPDOC_ECC_CTL_CLIENT_ECC_EN_SHIFT)
+#define I40E_GLPDOC_ECC_CTL_CLIENT_ECC_MASK_INT_SHIFT 5
+#define I40E_GLPDOC_ECC_CTL_CLIENT_ECC_MASK_INT_MASK I40E_MASK(0x1, I40E_GLPDOC_ECC_CTL_CLIENT_ECC_MASK_INT_SHIFT)
+#define I40E_GLPDOC_ECC_CTL_CLIENT_ECC_INVERT1_SHIFT 6
+#define I40E_GLPDOC_ECC_CTL_CLIENT_ECC_INVERT1_MASK I40E_MASK(0x1, I40E_GLPDOC_ECC_CTL_CLIENT_ECC_INVERT1_SHIFT)
+#define I40E_GLPDOC_ECC_CTL_CLIENT_ECC_INVERT2_SHIFT 7
+#define I40E_GLPDOC_ECC_CTL_CLIENT_ECC_INVERT2_MASK I40E_MASK(0x1, I40E_GLPDOC_ECC_CTL_CLIENT_ECC_INVERT2_SHIFT)
+
+#define I40E_GLPDOC_ERRDATA0 0x000D0070 /* Reset: POR */
+#define I40E_GLPDOC_ERRDATA0_ERROR_CODE_SHIFT 0
+#define I40E_GLPDOC_ERRDATA0_ERROR_CODE_MASK I40E_MASK(0x3F, I40E_GLPDOC_ERRDATA0_ERROR_CODE_SHIFT)
+#define I40E_GLPDOC_ERRDATA0_OBJ_TYPE_SHIFT 8
+#define I40E_GLPDOC_ERRDATA0_OBJ_TYPE_MASK I40E_MASK(0x1F, I40E_GLPDOC_ERRDATA0_OBJ_TYPE_SHIFT)
+#define I40E_GLPDOC_ERRDATA0_VM_VF_TYPE_SHIFT 13
+#define I40E_GLPDOC_ERRDATA0_VM_VF_TYPE_MASK I40E_MASK(0x3, I40E_GLPDOC_ERRDATA0_VM_VF_TYPE_SHIFT)
+#define I40E_GLPDOC_ERRDATA0_VM_VF_NUM_SHIFT 15
+#define I40E_GLPDOC_ERRDATA0_VM_VF_NUM_MASK I40E_MASK(0x1FF, I40E_GLPDOC_ERRDATA0_VM_VF_NUM_SHIFT)
+#define I40E_GLPDOC_ERRDATA0_PF_NUM_SHIFT 24
+#define I40E_GLPDOC_ERRDATA0_PF_NUM_MASK I40E_MASK(0xF, I40E_GLPDOC_ERRDATA0_PF_NUM_SHIFT)
+
+#define I40E_GLPDOC_ERRDATA1 0x000D0074 /* Reset: POR */
+#define I40E_GLPDOC_ERRDATA1_OBJ_INDEX_SHIFT 0
+#define I40E_GLPDOC_ERRDATA1_OBJ_INDEX_MASK I40E_MASK(0xFFFFFFF, I40E_GLPDOC_ERRDATA1_OBJ_INDEX_SHIFT)
+
+#define I40E_GLPDOC_ERRDATA2 0x000D0078 /* Reset: POR */
+#define I40E_GLPDOC_ERRDATA2_LENGTH_SHIFT 0
+#define I40E_GLPDOC_ERRDATA2_LENGTH_MASK I40E_MASK(0x7F, I40E_GLPDOC_ERRDATA2_LENGTH_SHIFT)
+#define I40E_GLPDOC_ERRDATA2_OFFSET_SHIFT 7
+#define I40E_GLPDOC_ERRDATA2_OFFSET_MASK I40E_MASK(0x1FFF, I40E_GLPDOC_ERRDATA2_OFFSET_SHIFT)
+#define I40E_GLPDOC_ERRDATA2_OPTYPE_SHIFT 20
+#define I40E_GLPDOC_ERRDATA2_OPTYPE_MASK I40E_MASK(0x7, I40E_GLPDOC_ERRDATA2_OPTYPE_SHIFT)
+#define I40E_GLPDOC_ERRDATA2_TAG_SHIFT 23
+#define I40E_GLPDOC_ERRDATA2_TAG_MASK I40E_MASK(0x1FF, I40E_GLPDOC_ERRDATA2_TAG_SHIFT)
+
+#define I40E_GLPDOC_ERRINFO 0x000D006C /* Reset: POR */
+#define I40E_GLPDOC_ERRINFO_ERROR_VALID_SHIFT 0
+#define I40E_GLPDOC_ERRINFO_ERROR_VALID_MASK I40E_MASK(0x1, I40E_GLPDOC_ERRINFO_ERROR_VALID_SHIFT)
+#define I40E_GLPDOC_ERRINFO_ERROR_CNT_SHIFT 8
+#define I40E_GLPDOC_ERRINFO_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_GLPDOC_ERRINFO_ERROR_CNT_SHIFT)
+
+#define I40E_GLPDOC_STAT_CTL 0x000D0004 /* Reset: CORER */
+#define I40E_GLPDOC_STAT_CTL_OBJECT_TYPE_SHIFT 0
+#define I40E_GLPDOC_STAT_CTL_OBJECT_TYPE_MASK I40E_MASK(0x1F, I40E_GLPDOC_STAT_CTL_OBJECT_TYPE_SHIFT)
+
+#define I40E_GLPDOC_STAT_OBJ_CNT 0x000D0008 /* Reset: CORER */
+#define I40E_GLPDOC_STAT_OBJ_CNT_OBJECT_COUNT_SHIFT 0
+#define I40E_GLPDOC_STAT_OBJ_CNT_OBJECT_COUNT_MASK I40E_MASK(0x3FFF, I40E_GLPDOC_STAT_OBJ_CNT_OBJECT_COUNT_SHIFT)
+
+#define I40E_GLPDOC_STAT_RD_DATA_IDLE_HI 0x000D0020 /* Reset: CORER */
+#define I40E_GLPDOC_STAT_RD_DATA_IDLE_HI_CNT_HI_SHIFT 0
+#define I40E_GLPDOC_STAT_RD_DATA_IDLE_HI_CNT_HI_MASK I40E_MASK(0xFFFFFF, I40E_GLPDOC_STAT_RD_DATA_IDLE_HI_CNT_HI_SHIFT)
+
+#define I40E_GLPDOC_STAT_RD_DATA_IDLE_LO 0x000D001C /* Reset: CORER */
+#define I40E_GLPDOC_STAT_RD_DATA_IDLE_LO_CNT_LO_SHIFT 0
+#define I40E_GLPDOC_STAT_RD_DATA_IDLE_LO_CNT_LO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPDOC_STAT_RD_DATA_IDLE_LO_CNT_LO_SHIFT)
+
+#define I40E_GLPDOC_STAT_RD_DATA_XFER_HI 0x000D0028 /* Reset: CORER */
+#define I40E_GLPDOC_STAT_RD_DATA_XFER_HI_CNT_HI_SHIFT 0
+#define I40E_GLPDOC_STAT_RD_DATA_XFER_HI_CNT_HI_MASK I40E_MASK(0xFFFFFF, I40E_GLPDOC_STAT_RD_DATA_XFER_HI_CNT_HI_SHIFT)
+
+#define I40E_GLPDOC_STAT_RD_DATA_XFER_LO 0x000D0024 /* Reset: CORER */
+#define I40E_GLPDOC_STAT_RD_DATA_XFER_LO_CNT_LO_SHIFT 0
+#define I40E_GLPDOC_STAT_RD_DATA_XFER_LO_CNT_LO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPDOC_STAT_RD_DATA_XFER_LO_CNT_LO_SHIFT)
+
+#define I40E_GLPDOC_STAT_RD_HIT_HI 0x000D0010 /* Reset: CORER */
+#define I40E_GLPDOC_STAT_RD_HIT_HI_CNT_HI_SHIFT 0
+#define I40E_GLPDOC_STAT_RD_HIT_HI_CNT_HI_MASK I40E_MASK(0xFFFFFF, I40E_GLPDOC_STAT_RD_HIT_HI_CNT_HI_SHIFT)
+
+#define I40E_GLPDOC_STAT_RD_HIT_LO 0x000D000C /* Reset: CORER */
+#define I40E_GLPDOC_STAT_RD_HIT_LO_CNT_LO_SHIFT 0
+#define I40E_GLPDOC_STAT_RD_HIT_LO_CNT_LO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPDOC_STAT_RD_HIT_LO_CNT_LO_SHIFT)
+
+#define I40E_GLPDOC_STAT_RD_MISS_HI 0x000D0018 /* Reset: CORER */
+#define I40E_GLPDOC_STAT_RD_MISS_HI_CNT_HI_SHIFT 0
+#define I40E_GLPDOC_STAT_RD_MISS_HI_CNT_HI_MASK I40E_MASK(0xFFFFFF, I40E_GLPDOC_STAT_RD_MISS_HI_CNT_HI_SHIFT)
+
+#define I40E_GLPDOC_STAT_RD_MISS_LO 0x000D0014 /* Reset: CORER */
+#define I40E_GLPDOC_STAT_RD_MISS_LO_CNT_LO_SHIFT 0
+#define I40E_GLPDOC_STAT_RD_MISS_LO_CNT_LO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPDOC_STAT_RD_MISS_LO_CNT_LO_SHIFT)
+
+/* PF - Intel Internal Registers */
+
+#define I40E_DPU_IMEM_CFG 0x00051064 /* Reset: POR */
+#define I40E_DPU_IMEM_CFG_ECC_EN_SHIFT 0
+#define I40E_DPU_IMEM_CFG_ECC_EN_MASK I40E_MASK(0x1, I40E_DPU_IMEM_CFG_ECC_EN_SHIFT)
+#define I40E_DPU_IMEM_CFG_ECC_INVERT_1_SHIFT 1
+#define I40E_DPU_IMEM_CFG_ECC_INVERT_1_MASK I40E_MASK(0x1, I40E_DPU_IMEM_CFG_ECC_INVERT_1_SHIFT)
+#define I40E_DPU_IMEM_CFG_ECC_INVERT_2_SHIFT 2
+#define I40E_DPU_IMEM_CFG_ECC_INVERT_2_MASK I40E_MASK(0x1, I40E_DPU_IMEM_CFG_ECC_INVERT_2_SHIFT)
+#define I40E_DPU_IMEM_CFG_LS_FORCE_SHIFT 3
+#define I40E_DPU_IMEM_CFG_LS_FORCE_MASK I40E_MASK(0x1, I40E_DPU_IMEM_CFG_LS_FORCE_SHIFT)
+#define I40E_DPU_IMEM_CFG_LS_BYPASS_SHIFT 4
+#define I40E_DPU_IMEM_CFG_LS_BYPASS_MASK I40E_MASK(0x1, I40E_DPU_IMEM_CFG_LS_BYPASS_SHIFT)
+#define I40E_DPU_IMEM_CFG_MASK_INT_SHIFT 5
+#define I40E_DPU_IMEM_CFG_MASK_INT_MASK I40E_MASK(0x1, I40E_DPU_IMEM_CFG_MASK_INT_SHIFT)
+#define I40E_DPU_IMEM_CFG_FIX_CNT_SHIFT 8
+#define I40E_DPU_IMEM_CFG_FIX_CNT_MASK I40E_MASK(0x1, I40E_DPU_IMEM_CFG_FIX_CNT_SHIFT)
+#define I40E_DPU_IMEM_CFG_ERR_CNT_SHIFT 9
+#define I40E_DPU_IMEM_CFG_ERR_CNT_MASK I40E_MASK(0x1, I40E_DPU_IMEM_CFG_ERR_CNT_SHIFT)
+#define I40E_DPU_IMEM_CFG_RME_SHIFT 12
+#define I40E_DPU_IMEM_CFG_RME_MASK I40E_MASK(0x1, I40E_DPU_IMEM_CFG_RME_SHIFT)
+#define I40E_DPU_IMEM_CFG_RM_SHIFT 16
+#define I40E_DPU_IMEM_CFG_RM_MASK I40E_MASK(0xF, I40E_DPU_IMEM_CFG_RM_SHIFT)
+
+#define I40E_DPU_IMEM_STATUS 0x00051068 /* Reset: POR */
+#define I40E_DPU_IMEM_STATUS_ECC_ERR_SHIFT 0
+#define I40E_DPU_IMEM_STATUS_ECC_ERR_MASK I40E_MASK(0x1, I40E_DPU_IMEM_STATUS_ECC_ERR_SHIFT)
+#define I40E_DPU_IMEM_STATUS_ECC_FIX_SHIFT 1
+#define I40E_DPU_IMEM_STATUS_ECC_FIX_MASK I40E_MASK(0x1, I40E_DPU_IMEM_STATUS_ECC_FIX_SHIFT)
+#define I40E_DPU_IMEM_STATUS_INIT_DONE_SHIFT 2
+#define I40E_DPU_IMEM_STATUS_INIT_DONE_MASK I40E_MASK(0x1, I40E_DPU_IMEM_STATUS_INIT_DONE_SHIFT)
+#define I40E_DPU_IMEM_STATUS_GLOBAL_INIT_DONE_SHIFT 3
+#define I40E_DPU_IMEM_STATUS_GLOBAL_INIT_DONE_MASK I40E_MASK(0x1, I40E_DPU_IMEM_STATUS_GLOBAL_INIT_DONE_SHIFT)
+
+#define I40E_DPU_RECIPE_ADDR_CFG 0x0005106C /* Reset: POR */
+#define I40E_DPU_RECIPE_ADDR_CFG_ECC_EN_SHIFT 0
+#define I40E_DPU_RECIPE_ADDR_CFG_ECC_EN_MASK I40E_MASK(0x1, I40E_DPU_RECIPE_ADDR_CFG_ECC_EN_SHIFT)
+#define I40E_DPU_RECIPE_ADDR_CFG_ECC_INVERT_1_SHIFT 1
+#define I40E_DPU_RECIPE_ADDR_CFG_ECC_INVERT_1_MASK I40E_MASK(0x1, I40E_DPU_RECIPE_ADDR_CFG_ECC_INVERT_1_SHIFT)
+#define I40E_DPU_RECIPE_ADDR_CFG_ECC_INVERT_2_SHIFT 2
+#define I40E_DPU_RECIPE_ADDR_CFG_ECC_INVERT_2_MASK I40E_MASK(0x1, I40E_DPU_RECIPE_ADDR_CFG_ECC_INVERT_2_SHIFT)
+#define I40E_DPU_RECIPE_ADDR_CFG_LS_FORCE_SHIFT 3
+#define I40E_DPU_RECIPE_ADDR_CFG_LS_FORCE_MASK I40E_MASK(0x1, I40E_DPU_RECIPE_ADDR_CFG_LS_FORCE_SHIFT)
+#define I40E_DPU_RECIPE_ADDR_CFG_LS_BYPASS_SHIFT 4
+#define I40E_DPU_RECIPE_ADDR_CFG_LS_BYPASS_MASK I40E_MASK(0x1, I40E_DPU_RECIPE_ADDR_CFG_LS_BYPASS_SHIFT)
+#define I40E_DPU_RECIPE_ADDR_CFG_MASK_INT_SHIFT 5
+#define I40E_DPU_RECIPE_ADDR_CFG_MASK_INT_MASK I40E_MASK(0x1, I40E_DPU_RECIPE_ADDR_CFG_MASK_INT_SHIFT)
+#define I40E_DPU_RECIPE_ADDR_CFG_FIX_CNT_SHIFT 8
+#define I40E_DPU_RECIPE_ADDR_CFG_FIX_CNT_MASK I40E_MASK(0x1, I40E_DPU_RECIPE_ADDR_CFG_FIX_CNT_SHIFT)
+#define I40E_DPU_RECIPE_ADDR_CFG_ERR_CNT_SHIFT 9
+#define I40E_DPU_RECIPE_ADDR_CFG_ERR_CNT_MASK I40E_MASK(0x1, I40E_DPU_RECIPE_ADDR_CFG_ERR_CNT_SHIFT)
+#define I40E_DPU_RECIPE_ADDR_CFG_RME_SHIFT 12
+#define I40E_DPU_RECIPE_ADDR_CFG_RME_MASK I40E_MASK(0x1, I40E_DPU_RECIPE_ADDR_CFG_RME_SHIFT)
+#define I40E_DPU_RECIPE_ADDR_CFG_RM_SHIFT 16
+#define I40E_DPU_RECIPE_ADDR_CFG_RM_MASK I40E_MASK(0xF, I40E_DPU_RECIPE_ADDR_CFG_RM_SHIFT)
+
+#define I40E_DPU_RECIPE_ADDR_STATUS 0x00051070 /* Reset: POR */
+#define I40E_DPU_RECIPE_ADDR_STATUS_ECC_ERR_SHIFT 0
+#define I40E_DPU_RECIPE_ADDR_STATUS_ECC_ERR_MASK I40E_MASK(0x1, I40E_DPU_RECIPE_ADDR_STATUS_ECC_ERR_SHIFT)
+#define I40E_DPU_RECIPE_ADDR_STATUS_ECC_FIX_SHIFT 1
+#define I40E_DPU_RECIPE_ADDR_STATUS_ECC_FIX_MASK I40E_MASK(0x1, I40E_DPU_RECIPE_ADDR_STATUS_ECC_FIX_SHIFT)
+#define I40E_DPU_RECIPE_ADDR_STATUS_INIT_DONE_SHIFT 2
+#define I40E_DPU_RECIPE_ADDR_STATUS_INIT_DONE_MASK I40E_MASK(0x1, I40E_DPU_RECIPE_ADDR_STATUS_INIT_DONE_SHIFT)
+#define I40E_DPU_RECIPE_ADDR_STATUS_GLOBAL_INIT_DONE_SHIFT 3
+#define I40E_DPU_RECIPE_ADDR_STATUS_GLOBAL_INIT_DONE_MASK I40E_MASK(0x1, I40E_DPU_RECIPE_ADDR_STATUS_GLOBAL_INIT_DONE_SHIFT)
+
+#define I40E_FLEEP_ECC_COR_ERR 0x000B6150 /* Reset: POR */
+#define I40E_FLEEP_ECC_COR_ERR_CNT_SHIFT 0
+#define I40E_FLEEP_ECC_COR_ERR_CNT_MASK I40E_MASK(0xFFF, I40E_FLEEP_ECC_COR_ERR_CNT_SHIFT)
+
+#define I40E_FLEEP_ECC_UNCOR_ERR 0x000B614C /* Reset: POR */
+#define I40E_FLEEP_ECC_UNCOR_ERR_CNT_SHIFT 0
+#define I40E_FLEEP_ECC_UNCOR_ERR_CNT_MASK I40E_MASK(0xFFF, I40E_FLEEP_ECC_UNCOR_ERR_CNT_SHIFT)
+
+#define I40E_FLEEP_MEM_CFG 0x000B6144 /* Reset: POR */
+#define I40E_FLEEP_MEM_CFG_ECC_EN_SHIFT 0
+#define I40E_FLEEP_MEM_CFG_ECC_EN_MASK I40E_MASK(0x1, I40E_FLEEP_MEM_CFG_ECC_EN_SHIFT)
+#define I40E_FLEEP_MEM_CFG_ECC_INVERT_1_SHIFT 1
+#define I40E_FLEEP_MEM_CFG_ECC_INVERT_1_MASK I40E_MASK(0x1, I40E_FLEEP_MEM_CFG_ECC_INVERT_1_SHIFT)
+#define I40E_FLEEP_MEM_CFG_ECC_INVERT_2_SHIFT 2
+#define I40E_FLEEP_MEM_CFG_ECC_INVERT_2_MASK I40E_MASK(0x1, I40E_FLEEP_MEM_CFG_ECC_INVERT_2_SHIFT)
+#define I40E_FLEEP_MEM_CFG_LS_FORCE_SHIFT 3
+#define I40E_FLEEP_MEM_CFG_LS_FORCE_MASK I40E_MASK(0x1, I40E_FLEEP_MEM_CFG_LS_FORCE_SHIFT)
+#define I40E_FLEEP_MEM_CFG_LS_BYPASS_SHIFT 4
+#define I40E_FLEEP_MEM_CFG_LS_BYPASS_MASK I40E_MASK(0x1, I40E_FLEEP_MEM_CFG_LS_BYPASS_SHIFT)
+#define I40E_FLEEP_MEM_CFG_MASK_INT_SHIFT 5
+#define I40E_FLEEP_MEM_CFG_MASK_INT_MASK I40E_MASK(0x1, I40E_FLEEP_MEM_CFG_MASK_INT_SHIFT)
+#define I40E_FLEEP_MEM_CFG_FIX_CNT_SHIFT 8
+#define I40E_FLEEP_MEM_CFG_FIX_CNT_MASK I40E_MASK(0x1, I40E_FLEEP_MEM_CFG_FIX_CNT_SHIFT)
+#define I40E_FLEEP_MEM_CFG_ERR_CNT_SHIFT 9
+#define I40E_FLEEP_MEM_CFG_ERR_CNT_MASK I40E_MASK(0x1, I40E_FLEEP_MEM_CFG_ERR_CNT_SHIFT)
+#define I40E_FLEEP_MEM_CFG_RME_SHIFT 12
+#define I40E_FLEEP_MEM_CFG_RME_MASK I40E_MASK(0x1, I40E_FLEEP_MEM_CFG_RME_SHIFT)
+#define I40E_FLEEP_MEM_CFG_RM_SHIFT 16
+#define I40E_FLEEP_MEM_CFG_RM_MASK I40E_MASK(0xF, I40E_FLEEP_MEM_CFG_RM_SHIFT)
+
+#define I40E_FLEEP_MEM_STATUS 0x000B6148 /* Reset: POR */
+#define I40E_FLEEP_MEM_STATUS_ECC_ERR_SHIFT 0
+#define I40E_FLEEP_MEM_STATUS_ECC_ERR_MASK I40E_MASK(0x1, I40E_FLEEP_MEM_STATUS_ECC_ERR_SHIFT)
+#define I40E_FLEEP_MEM_STATUS_ECC_FIX_SHIFT 1
+#define I40E_FLEEP_MEM_STATUS_ECC_FIX_MASK I40E_MASK(0x1, I40E_FLEEP_MEM_STATUS_ECC_FIX_SHIFT)
+#define I40E_FLEEP_MEM_STATUS_INIT_DONE_SHIFT 2
+#define I40E_FLEEP_MEM_STATUS_INIT_DONE_MASK I40E_MASK(0x1, I40E_FLEEP_MEM_STATUS_INIT_DONE_SHIFT)
+#define I40E_FLEEP_MEM_STATUS_GLOBAL_INIT_DONE_SHIFT 3
+#define I40E_FLEEP_MEM_STATUS_GLOBAL_INIT_DONE_MASK I40E_MASK(0x1, I40E_FLEEP_MEM_STATUS_GLOBAL_INIT_DONE_SHIFT)
+
+#define I40E_FOC_CACHE_DBG_CTL 0x000AA0A4 /* Reset: CORER */
+#define I40E_FOC_CACHE_DBG_CTL_ADR_SHIFT 0
+#define I40E_FOC_CACHE_DBG_CTL_ADR_MASK I40E_MASK(0x3FFFF, I40E_FOC_CACHE_DBG_CTL_ADR_SHIFT)
+#define I40E_FOC_CACHE_DBG_CTL_DW_SEL_SHIFT 18
+#define I40E_FOC_CACHE_DBG_CTL_DW_SEL_MASK I40E_MASK(0xFF, I40E_FOC_CACHE_DBG_CTL_DW_SEL_SHIFT)
+#define I40E_FOC_CACHE_DBG_CTL_RD_EN_SHIFT 30
+#define I40E_FOC_CACHE_DBG_CTL_RD_EN_MASK I40E_MASK(0x1, I40E_FOC_CACHE_DBG_CTL_RD_EN_SHIFT)
+#define I40E_FOC_CACHE_DBG_CTL_DONE_SHIFT 31
+#define I40E_FOC_CACHE_DBG_CTL_DONE_MASK I40E_MASK(0x1, I40E_FOC_CACHE_DBG_CTL_DONE_SHIFT)
+
+#define I40E_FOC_CACHE_DBG_DATA 0x000AA0A8 /* Reset: CORER */
+#define I40E_FOC_CACHE_DBG_DATA_RD_DW_SHIFT 0
+#define I40E_FOC_CACHE_DBG_DATA_RD_DW_MASK I40E_MASK(0xFFFFFFFF, I40E_FOC_CACHE_DBG_DATA_RD_DW_SHIFT)
+
+#define I40E_FOC_CACHE_MEM_CFG 0x000AA064 /* Reset: POR */
+#define I40E_FOC_CACHE_MEM_CFG_ECC_EN_SHIFT 0
+#define I40E_FOC_CACHE_MEM_CFG_ECC_EN_MASK I40E_MASK(0x1, I40E_FOC_CACHE_MEM_CFG_ECC_EN_SHIFT)
+#define I40E_FOC_CACHE_MEM_CFG_ECC_INVERT_1_SHIFT 1
+#define I40E_FOC_CACHE_MEM_CFG_ECC_INVERT_1_MASK I40E_MASK(0x1, I40E_FOC_CACHE_MEM_CFG_ECC_INVERT_1_SHIFT)
+#define I40E_FOC_CACHE_MEM_CFG_ECC_INVERT_2_SHIFT 2
+#define I40E_FOC_CACHE_MEM_CFG_ECC_INVERT_2_MASK I40E_MASK(0x1, I40E_FOC_CACHE_MEM_CFG_ECC_INVERT_2_SHIFT)
+#define I40E_FOC_CACHE_MEM_CFG_LS_FORCE_SHIFT 3
+#define I40E_FOC_CACHE_MEM_CFG_LS_FORCE_MASK I40E_MASK(0x1, I40E_FOC_CACHE_MEM_CFG_LS_FORCE_SHIFT)
+#define I40E_FOC_CACHE_MEM_CFG_LS_BYPASS_SHIFT 4
+#define I40E_FOC_CACHE_MEM_CFG_LS_BYPASS_MASK I40E_MASK(0x1, I40E_FOC_CACHE_MEM_CFG_LS_BYPASS_SHIFT)
+#define I40E_FOC_CACHE_MEM_CFG_MASK_INT_SHIFT 5
+#define I40E_FOC_CACHE_MEM_CFG_MASK_INT_MASK I40E_MASK(0x1, I40E_FOC_CACHE_MEM_CFG_MASK_INT_SHIFT)
+#define I40E_FOC_CACHE_MEM_CFG_FIX_CNT_SHIFT 8
+#define I40E_FOC_CACHE_MEM_CFG_FIX_CNT_MASK I40E_MASK(0x1, I40E_FOC_CACHE_MEM_CFG_FIX_CNT_SHIFT)
+#define I40E_FOC_CACHE_MEM_CFG_ERR_CNT_SHIFT 9
+#define I40E_FOC_CACHE_MEM_CFG_ERR_CNT_MASK I40E_MASK(0x1, I40E_FOC_CACHE_MEM_CFG_ERR_CNT_SHIFT)
+#define I40E_FOC_CACHE_MEM_CFG_RME_SHIFT 12
+#define I40E_FOC_CACHE_MEM_CFG_RME_MASK I40E_MASK(0x1, I40E_FOC_CACHE_MEM_CFG_RME_SHIFT)
+#define I40E_FOC_CACHE_MEM_CFG_RM_SHIFT 16
+#define I40E_FOC_CACHE_MEM_CFG_RM_MASK I40E_MASK(0xF, I40E_FOC_CACHE_MEM_CFG_RM_SHIFT)
+
+#define I40E_FOC_CAHCE_MEM_STATUS 0x000AA068 /* Reset: POR */
+#define I40E_FOC_CAHCE_MEM_STATUS_ECC_ERR_SHIFT 0
+#define I40E_FOC_CAHCE_MEM_STATUS_ECC_ERR_MASK I40E_MASK(0x1, I40E_FOC_CAHCE_MEM_STATUS_ECC_ERR_SHIFT)
+#define I40E_FOC_CAHCE_MEM_STATUS_ECC_FIX_SHIFT 1
+#define I40E_FOC_CAHCE_MEM_STATUS_ECC_FIX_MASK I40E_MASK(0x1, I40E_FOC_CAHCE_MEM_STATUS_ECC_FIX_SHIFT)
+#define I40E_FOC_CAHCE_MEM_STATUS_INIT_DONE_SHIFT 2
+#define I40E_FOC_CAHCE_MEM_STATUS_INIT_DONE_MASK I40E_MASK(0x1, I40E_FOC_CAHCE_MEM_STATUS_INIT_DONE_SHIFT)
+#define I40E_FOC_CAHCE_MEM_STATUS_GLOBAL_INIT_DONE_SHIFT 3
+#define I40E_FOC_CAHCE_MEM_STATUS_GLOBAL_INIT_DONE_MASK I40E_MASK(0x1, I40E_FOC_CAHCE_MEM_STATUS_GLOBAL_INIT_DONE_SHIFT)
+
+#define I40E_FOC_ECC_COR_ERR 0x000AA098 /* Reset: POR */
+#define I40E_FOC_ECC_COR_ERR_CNT_SHIFT 0
+#define I40E_FOC_ECC_COR_ERR_CNT_MASK I40E_MASK(0xFFF, I40E_FOC_ECC_COR_ERR_CNT_SHIFT)
+
+#define I40E_FOC_ECC_UNCOR_ERR 0x000AA094 /* Reset: POR */
+#define I40E_FOC_ECC_UNCOR_ERR_CNT_SHIFT 0
+#define I40E_FOC_ECC_UNCOR_ERR_CNT_MASK I40E_MASK(0xFFF, I40E_FOC_ECC_UNCOR_ERR_CNT_SHIFT)
+
+#define I40E_FOC_EVICT_MEM_CFG 0x000AA084 /* Reset: POR */
+#define I40E_FOC_EVICT_MEM_CFG_ECC_EN_SHIFT 0
+#define I40E_FOC_EVICT_MEM_CFG_ECC_EN_MASK I40E_MASK(0x1, I40E_FOC_EVICT_MEM_CFG_ECC_EN_SHIFT)
+#define I40E_FOC_EVICT_MEM_CFG_ECC_INVERT_1_SHIFT 1
+#define I40E_FOC_EVICT_MEM_CFG_ECC_INVERT_1_MASK I40E_MASK(0x1, I40E_FOC_EVICT_MEM_CFG_ECC_INVERT_1_SHIFT)
+#define I40E_FOC_EVICT_MEM_CFG_ECC_INVERT_2_SHIFT 2
+#define I40E_FOC_EVICT_MEM_CFG_ECC_INVERT_2_MASK I40E_MASK(0x1, I40E_FOC_EVICT_MEM_CFG_ECC_INVERT_2_SHIFT)
+#define I40E_FOC_EVICT_MEM_CFG_LS_FORCE_SHIFT 3
+#define I40E_FOC_EVICT_MEM_CFG_LS_FORCE_MASK I40E_MASK(0x1, I40E_FOC_EVICT_MEM_CFG_LS_FORCE_SHIFT)
+#define I40E_FOC_EVICT_MEM_CFG_LS_BYPASS_SHIFT 4
+#define I40E_FOC_EVICT_MEM_CFG_LS_BYPASS_MASK I40E_MASK(0x1, I40E_FOC_EVICT_MEM_CFG_LS_BYPASS_SHIFT)
+#define I40E_FOC_EVICT_MEM_CFG_MASK_INT_SHIFT 5
+#define I40E_FOC_EVICT_MEM_CFG_MASK_INT_MASK I40E_MASK(0x1, I40E_FOC_EVICT_MEM_CFG_MASK_INT_SHIFT)
+#define I40E_FOC_EVICT_MEM_CFG_FIX_CNT_SHIFT 8
+#define I40E_FOC_EVICT_MEM_CFG_FIX_CNT_MASK I40E_MASK(0x1, I40E_FOC_EVICT_MEM_CFG_FIX_CNT_SHIFT)
+#define I40E_FOC_EVICT_MEM_CFG_ERR_CNT_SHIFT 9
+#define I40E_FOC_EVICT_MEM_CFG_ERR_CNT_MASK I40E_MASK(0x1, I40E_FOC_EVICT_MEM_CFG_ERR_CNT_SHIFT)
+#define I40E_FOC_EVICT_MEM_CFG_RME_SHIFT 12
+#define I40E_FOC_EVICT_MEM_CFG_RME_MASK I40E_MASK(0x1, I40E_FOC_EVICT_MEM_CFG_RME_SHIFT)
+#define I40E_FOC_EVICT_MEM_CFG_RM_SHIFT 16
+#define I40E_FOC_EVICT_MEM_CFG_RM_MASK I40E_MASK(0xF, I40E_FOC_EVICT_MEM_CFG_RM_SHIFT)
+
+#define I40E_FOC_EVICT_MEM_STATUS 0x000AA088 /* Reset: POR */
+#define I40E_FOC_EVICT_MEM_STATUS_ECC_ERR_SHIFT 0
+#define I40E_FOC_EVICT_MEM_STATUS_ECC_ERR_MASK I40E_MASK(0x1, I40E_FOC_EVICT_MEM_STATUS_ECC_ERR_SHIFT)
+#define I40E_FOC_EVICT_MEM_STATUS_ECC_FIX_SHIFT 1
+#define I40E_FOC_EVICT_MEM_STATUS_ECC_FIX_MASK I40E_MASK(0x1, I40E_FOC_EVICT_MEM_STATUS_ECC_FIX_SHIFT)
+#define I40E_FOC_EVICT_MEM_STATUS_INIT_DONE_SHIFT 2
+#define I40E_FOC_EVICT_MEM_STATUS_INIT_DONE_MASK I40E_MASK(0x1, I40E_FOC_EVICT_MEM_STATUS_INIT_DONE_SHIFT)
+#define I40E_FOC_EVICT_MEM_STATUS_GLOBAL_INIT_DONE_SHIFT 3
+#define I40E_FOC_EVICT_MEM_STATUS_GLOBAL_INIT_DONE_MASK I40E_MASK(0x1, I40E_FOC_EVICT_MEM_STATUS_GLOBAL_INIT_DONE_SHIFT)
+
+#define I40E_FOC_FD_DBG_CTL 0x000AA0B4 /* Reset: CORER */
+#define I40E_FOC_FD_DBG_CTL_ADR_SHIFT 0
+#define I40E_FOC_FD_DBG_CTL_ADR_MASK I40E_MASK(0x3FFFF, I40E_FOC_FD_DBG_CTL_ADR_SHIFT)
+#define I40E_FOC_FD_DBG_CTL_DW_SEL_SHIFT 18
+#define I40E_FOC_FD_DBG_CTL_DW_SEL_MASK I40E_MASK(0xFF, I40E_FOC_FD_DBG_CTL_DW_SEL_SHIFT)
+#define I40E_FOC_FD_DBG_CTL_RD_EN_SHIFT 30
+#define I40E_FOC_FD_DBG_CTL_RD_EN_MASK I40E_MASK(0x1, I40E_FOC_FD_DBG_CTL_RD_EN_SHIFT)
+#define I40E_FOC_FD_DBG_CTL_DONE_SHIFT 31
+#define I40E_FOC_FD_DBG_CTL_DONE_MASK I40E_MASK(0x1, I40E_FOC_FD_DBG_CTL_DONE_SHIFT)
+
+#define I40E_FOC_FD_DBG_DATA 0x000AA0B8 /* Reset: CORER */
+#define I40E_FOC_FD_DBG_DATA_RD_DW_SHIFT 0
+#define I40E_FOC_FD_DBG_DATA_RD_DW_MASK I40E_MASK(0xFFFFFFFF, I40E_FOC_FD_DBG_DATA_RD_DW_SHIFT)
+
+#define I40E_FOC_FD_MEM_CFG 0x000AA08C /* Reset: POR */
+#define I40E_FOC_FD_MEM_CFG_ECC_EN_SHIFT 0
+#define I40E_FOC_FD_MEM_CFG_ECC_EN_MASK I40E_MASK(0x1, I40E_FOC_FD_MEM_CFG_ECC_EN_SHIFT)
+#define I40E_FOC_FD_MEM_CFG_ECC_INVERT_1_SHIFT 1
+#define I40E_FOC_FD_MEM_CFG_ECC_INVERT_1_MASK I40E_MASK(0x1, I40E_FOC_FD_MEM_CFG_ECC_INVERT_1_SHIFT)
+#define I40E_FOC_FD_MEM_CFG_ECC_INVERT_2_SHIFT 2
+#define I40E_FOC_FD_MEM_CFG_ECC_INVERT_2_MASK I40E_MASK(0x1, I40E_FOC_FD_MEM_CFG_ECC_INVERT_2_SHIFT)
+#define I40E_FOC_FD_MEM_CFG_LS_FORCE_SHIFT 3
+#define I40E_FOC_FD_MEM_CFG_LS_FORCE_MASK I40E_MASK(0x1, I40E_FOC_FD_MEM_CFG_LS_FORCE_SHIFT)
+#define I40E_FOC_FD_MEM_CFG_LS_BYPASS_SHIFT 4
+#define I40E_FOC_FD_MEM_CFG_LS_BYPASS_MASK I40E_MASK(0x1, I40E_FOC_FD_MEM_CFG_LS_BYPASS_SHIFT)
+#define I40E_FOC_FD_MEM_CFG_MASK_INT_SHIFT 5
+#define I40E_FOC_FD_MEM_CFG_MASK_INT_MASK I40E_MASK(0x1, I40E_FOC_FD_MEM_CFG_MASK_INT_SHIFT)
+#define I40E_FOC_FD_MEM_CFG_FIX_CNT_SHIFT 8
+#define I40E_FOC_FD_MEM_CFG_FIX_CNT_MASK I40E_MASK(0x1, I40E_FOC_FD_MEM_CFG_FIX_CNT_SHIFT)
+#define I40E_FOC_FD_MEM_CFG_ERR_CNT_SHIFT 9
+#define I40E_FOC_FD_MEM_CFG_ERR_CNT_MASK I40E_MASK(0x1, I40E_FOC_FD_MEM_CFG_ERR_CNT_SHIFT)
+#define I40E_FOC_FD_MEM_CFG_RME_SHIFT 12
+#define I40E_FOC_FD_MEM_CFG_RME_MASK I40E_MASK(0x1, I40E_FOC_FD_MEM_CFG_RME_SHIFT)
+#define I40E_FOC_FD_MEM_CFG_RM_SHIFT 16
+#define I40E_FOC_FD_MEM_CFG_RM_MASK I40E_MASK(0xF, I40E_FOC_FD_MEM_CFG_RM_SHIFT)
+
+#define I40E_FOC_FD_MEM_STATUS 0x000AA090 /* Reset: POR */
+#define I40E_FOC_FD_MEM_STATUS_ECC_ERR_SHIFT 0
+#define I40E_FOC_FD_MEM_STATUS_ECC_ERR_MASK I40E_MASK(0x1, I40E_FOC_FD_MEM_STATUS_ECC_ERR_SHIFT)
+#define I40E_FOC_FD_MEM_STATUS_ECC_FIX_SHIFT 1
+#define I40E_FOC_FD_MEM_STATUS_ECC_FIX_MASK I40E_MASK(0x1, I40E_FOC_FD_MEM_STATUS_ECC_FIX_SHIFT)
+#define I40E_FOC_FD_MEM_STATUS_INIT_DONE_SHIFT 2
+#define I40E_FOC_FD_MEM_STATUS_INIT_DONE_MASK I40E_MASK(0x1, I40E_FOC_FD_MEM_STATUS_INIT_DONE_SHIFT)
+#define I40E_FOC_FD_MEM_STATUS_GLOBAL_INIT_DONE_SHIFT 3
+#define I40E_FOC_FD_MEM_STATUS_GLOBAL_INIT_DONE_MASK I40E_MASK(0x1, I40E_FOC_FD_MEM_STATUS_GLOBAL_INIT_DONE_SHIFT)
+
+#define I40E_FOC_FILL_MEM_CFG 0x000AA074 /* Reset: POR */
+#define I40E_FOC_FILL_MEM_CFG_ECC_EN_SHIFT 0
+#define I40E_FOC_FILL_MEM_CFG_ECC_EN_MASK I40E_MASK(0x1, I40E_FOC_FILL_MEM_CFG_ECC_EN_SHIFT)
+#define I40E_FOC_FILL_MEM_CFG_ECC_INVERT_1_SHIFT 1
+#define I40E_FOC_FILL_MEM_CFG_ECC_INVERT_1_MASK I40E_MASK(0x1, I40E_FOC_FILL_MEM_CFG_ECC_INVERT_1_SHIFT)
+#define I40E_FOC_FILL_MEM_CFG_ECC_INVERT_2_SHIFT 2
+#define I40E_FOC_FILL_MEM_CFG_ECC_INVERT_2_MASK I40E_MASK(0x1, I40E_FOC_FILL_MEM_CFG_ECC_INVERT_2_SHIFT)
+#define I40E_FOC_FILL_MEM_CFG_LS_FORCE_SHIFT 3
+#define I40E_FOC_FILL_MEM_CFG_LS_FORCE_MASK I40E_MASK(0x1, I40E_FOC_FILL_MEM_CFG_LS_FORCE_SHIFT)
+#define I40E_FOC_FILL_MEM_CFG_LS_BYPASS_SHIFT 4
+#define I40E_FOC_FILL_MEM_CFG_LS_BYPASS_MASK I40E_MASK(0x1, I40E_FOC_FILL_MEM_CFG_LS_BYPASS_SHIFT)
+#define I40E_FOC_FILL_MEM_CFG_MASK_INT_SHIFT 5
+#define I40E_FOC_FILL_MEM_CFG_MASK_INT_MASK I40E_MASK(0x1, I40E_FOC_FILL_MEM_CFG_MASK_INT_SHIFT)
+#define I40E_FOC_FILL_MEM_CFG_FIX_CNT_SHIFT 8
+#define I40E_FOC_FILL_MEM_CFG_FIX_CNT_MASK I40E_MASK(0x1, I40E_FOC_FILL_MEM_CFG_FIX_CNT_SHIFT)
+#define I40E_FOC_FILL_MEM_CFG_ERR_CNT_SHIFT 9
+#define I40E_FOC_FILL_MEM_CFG_ERR_CNT_MASK I40E_MASK(0x1, I40E_FOC_FILL_MEM_CFG_ERR_CNT_SHIFT)
+#define I40E_FOC_FILL_MEM_CFG_RME_SHIFT 12
+#define I40E_FOC_FILL_MEM_CFG_RME_MASK I40E_MASK(0x1, I40E_FOC_FILL_MEM_CFG_RME_SHIFT)
+#define I40E_FOC_FILL_MEM_CFG_RM_SHIFT 16
+#define I40E_FOC_FILL_MEM_CFG_RM_MASK I40E_MASK(0xF, I40E_FOC_FILL_MEM_CFG_RM_SHIFT)
+
+#define I40E_FOC_FILL_MEM_STATUS 0x000AA078 /* Reset: POR */
+#define I40E_FOC_FILL_MEM_STATUS_ECC_ERR_SHIFT 0
+#define I40E_FOC_FILL_MEM_STATUS_ECC_ERR_MASK I40E_MASK(0x1, I40E_FOC_FILL_MEM_STATUS_ECC_ERR_SHIFT)
+#define I40E_FOC_FILL_MEM_STATUS_ECC_FIX_SHIFT 1
+#define I40E_FOC_FILL_MEM_STATUS_ECC_FIX_MASK I40E_MASK(0x1, I40E_FOC_FILL_MEM_STATUS_ECC_FIX_SHIFT)
+#define I40E_FOC_FILL_MEM_STATUS_INIT_DONE_SHIFT 2
+#define I40E_FOC_FILL_MEM_STATUS_INIT_DONE_MASK I40E_MASK(0x1, I40E_FOC_FILL_MEM_STATUS_INIT_DONE_SHIFT)
+#define I40E_FOC_FILL_MEM_STATUS_GLOBAL_INIT_DONE_SHIFT 3
+#define I40E_FOC_FILL_MEM_STATUS_GLOBAL_INIT_DONE_MASK I40E_MASK(0x1, I40E_FOC_FILL_MEM_STATUS_GLOBAL_INIT_DONE_SHIFT)
+
+#define I40E_FOC_TAG_DBG_CTL 0x000AA09C /* Reset: CORER */
+#define I40E_FOC_TAG_DBG_CTL_ADR_SHIFT 0
+#define I40E_FOC_TAG_DBG_CTL_ADR_MASK I40E_MASK(0x3FFFF, I40E_FOC_TAG_DBG_CTL_ADR_SHIFT)
+#define I40E_FOC_TAG_DBG_CTL_DW_SEL_SHIFT 18
+#define I40E_FOC_TAG_DBG_CTL_DW_SEL_MASK I40E_MASK(0xFF, I40E_FOC_TAG_DBG_CTL_DW_SEL_SHIFT)
+#define I40E_FOC_TAG_DBG_CTL_RD_EN_SHIFT 30
+#define I40E_FOC_TAG_DBG_CTL_RD_EN_MASK I40E_MASK(0x1, I40E_FOC_TAG_DBG_CTL_RD_EN_SHIFT)
+#define I40E_FOC_TAG_DBG_CTL_DONE_SHIFT 31
+#define I40E_FOC_TAG_DBG_CTL_DONE_MASK I40E_MASK(0x1, I40E_FOC_TAG_DBG_CTL_DONE_SHIFT)
+
+#define I40E_FOC_TAG_DBG_DATA 0x000AA0A0 /* Reset: CORER */
+#define I40E_FOC_TAG_DBG_DATA_RD_DW_SHIFT 0
+#define I40E_FOC_TAG_DBG_DATA_RD_DW_MASK I40E_MASK(0xFFFFFFFF, I40E_FOC_TAG_DBG_DATA_RD_DW_SHIFT)
+
+#define I40E_FOC_TAG_MEM_CFG 0x000AA06C /* Reset: POR */
+#define I40E_FOC_TAG_MEM_CFG_ECC_EN_SHIFT 0
+#define I40E_FOC_TAG_MEM_CFG_ECC_EN_MASK I40E_MASK(0x1, I40E_FOC_TAG_MEM_CFG_ECC_EN_SHIFT)
+#define I40E_FOC_TAG_MEM_CFG_ECC_INVERT_1_SHIFT 1
+#define I40E_FOC_TAG_MEM_CFG_ECC_INVERT_1_MASK I40E_MASK(0x1, I40E_FOC_TAG_MEM_CFG_ECC_INVERT_1_SHIFT)
+#define I40E_FOC_TAG_MEM_CFG_ECC_INVERT_2_SHIFT 2
+#define I40E_FOC_TAG_MEM_CFG_ECC_INVERT_2_MASK I40E_MASK(0x1, I40E_FOC_TAG_MEM_CFG_ECC_INVERT_2_SHIFT)
+#define I40E_FOC_TAG_MEM_CFG_LS_FORCE_SHIFT 3
+#define I40E_FOC_TAG_MEM_CFG_LS_FORCE_MASK I40E_MASK(0x1, I40E_FOC_TAG_MEM_CFG_LS_FORCE_SHIFT)
+#define I40E_FOC_TAG_MEM_CFG_LS_BYPASS_SHIFT 4
+#define I40E_FOC_TAG_MEM_CFG_LS_BYPASS_MASK I40E_MASK(0x1, I40E_FOC_TAG_MEM_CFG_LS_BYPASS_SHIFT)
+#define I40E_FOC_TAG_MEM_CFG_MASK_INT_SHIFT 5
+#define I40E_FOC_TAG_MEM_CFG_MASK_INT_MASK I40E_MASK(0x1, I40E_FOC_TAG_MEM_CFG_MASK_INT_SHIFT)
+#define I40E_FOC_TAG_MEM_CFG_FIX_CNT_SHIFT 8
+#define I40E_FOC_TAG_MEM_CFG_FIX_CNT_MASK I40E_MASK(0x1, I40E_FOC_TAG_MEM_CFG_FIX_CNT_SHIFT)
+#define I40E_FOC_TAG_MEM_CFG_ERR_CNT_SHIFT 9
+#define I40E_FOC_TAG_MEM_CFG_ERR_CNT_MASK I40E_MASK(0x1, I40E_FOC_TAG_MEM_CFG_ERR_CNT_SHIFT)
+#define I40E_FOC_TAG_MEM_CFG_RME_SHIFT 12
+#define I40E_FOC_TAG_MEM_CFG_RME_MASK I40E_MASK(0x1, I40E_FOC_TAG_MEM_CFG_RME_SHIFT)
+#define I40E_FOC_TAG_MEM_CFG_RM_SHIFT 16
+#define I40E_FOC_TAG_MEM_CFG_RM_MASK I40E_MASK(0xF, I40E_FOC_TAG_MEM_CFG_RM_SHIFT)
+
+#define I40E_FOC_TAG_MEM_STATUS 0x000AA070 /* Reset: POR */
+#define I40E_FOC_TAG_MEM_STATUS_ECC_ERR_SHIFT 0
+#define I40E_FOC_TAG_MEM_STATUS_ECC_ERR_MASK I40E_MASK(0x1, I40E_FOC_TAG_MEM_STATUS_ECC_ERR_SHIFT)
+#define I40E_FOC_TAG_MEM_STATUS_ECC_FIX_SHIFT 1
+#define I40E_FOC_TAG_MEM_STATUS_ECC_FIX_MASK I40E_MASK(0x1, I40E_FOC_TAG_MEM_STATUS_ECC_FIX_SHIFT)
+#define I40E_FOC_TAG_MEM_STATUS_INIT_DONE_SHIFT 2
+#define I40E_FOC_TAG_MEM_STATUS_INIT_DONE_MASK I40E_MASK(0x1, I40E_FOC_TAG_MEM_STATUS_INIT_DONE_SHIFT)
+#define I40E_FOC_TAG_MEM_STATUS_GLOBAL_INIT_DONE_SHIFT 3
+#define I40E_FOC_TAG_MEM_STATUS_GLOBAL_INIT_DONE_MASK I40E_MASK(0x1, I40E_FOC_TAG_MEM_STATUS_GLOBAL_INIT_DONE_SHIFT)
+
+#define I40E_FVL_STAT_ECC_COR_ERR 0x003800F4 /* Reset: POR */
+#define I40E_FVL_STAT_ECC_COR_ERR_CNT_SHIFT 0
+#define I40E_FVL_STAT_ECC_COR_ERR_CNT_MASK I40E_MASK(0xFFF, I40E_FVL_STAT_ECC_COR_ERR_CNT_SHIFT)
+
+#define I40E_FVL_STAT_ECC_UNCOR_ERR 0x003800F0 /* Reset: POR */
+#define I40E_FVL_STAT_ECC_UNCOR_ERR_CNT_SHIFT 0
+#define I40E_FVL_STAT_ECC_UNCOR_ERR_CNT_MASK I40E_MASK(0xFFF, I40E_FVL_STAT_ECC_UNCOR_ERR_CNT_SHIFT)
+
+#define I40E_FVL_STAT_MEM_CFG(_i) (0x00380000 + ((_i) * 4)) /* _i=0...29 */ /* Reset: POR */
+#define I40E_FVL_STAT_MEM_CFG_MAX_INDEX 29
+#define I40E_FVL_STAT_MEM_CFG_ECC_EN_SHIFT 0
+#define I40E_FVL_STAT_MEM_CFG_ECC_EN_MASK I40E_MASK(0x1, I40E_FVL_STAT_MEM_CFG_ECC_EN_SHIFT)
+#define I40E_FVL_STAT_MEM_CFG_ECC_INVERT_1_SHIFT 1
+#define I40E_FVL_STAT_MEM_CFG_ECC_INVERT_1_MASK I40E_MASK(0x1, I40E_FVL_STAT_MEM_CFG_ECC_INVERT_1_SHIFT)
+#define I40E_FVL_STAT_MEM_CFG_ECC_INVERT_2_SHIFT 2
+#define I40E_FVL_STAT_MEM_CFG_ECC_INVERT_2_MASK I40E_MASK(0x1, I40E_FVL_STAT_MEM_CFG_ECC_INVERT_2_SHIFT)
+#define I40E_FVL_STAT_MEM_CFG_LS_FORCE_SHIFT 3
+#define I40E_FVL_STAT_MEM_CFG_LS_FORCE_MASK I40E_MASK(0x1, I40E_FVL_STAT_MEM_CFG_LS_FORCE_SHIFT)
+#define I40E_FVL_STAT_MEM_CFG_LS_BYPASS_SHIFT 4
+#define I40E_FVL_STAT_MEM_CFG_LS_BYPASS_MASK I40E_MASK(0x1, I40E_FVL_STAT_MEM_CFG_LS_BYPASS_SHIFT)
+#define I40E_FVL_STAT_MEM_CFG_MASK_INT_SHIFT 5
+#define I40E_FVL_STAT_MEM_CFG_MASK_INT_MASK I40E_MASK(0x1, I40E_FVL_STAT_MEM_CFG_MASK_INT_SHIFT)
+#define I40E_FVL_STAT_MEM_CFG_FIX_CNT_SHIFT 8
+#define I40E_FVL_STAT_MEM_CFG_FIX_CNT_MASK I40E_MASK(0x1, I40E_FVL_STAT_MEM_CFG_FIX_CNT_SHIFT)
+#define I40E_FVL_STAT_MEM_CFG_ERR_CNT_SHIFT 9
+#define I40E_FVL_STAT_MEM_CFG_ERR_CNT_MASK I40E_MASK(0x1, I40E_FVL_STAT_MEM_CFG_ERR_CNT_SHIFT)
+#define I40E_FVL_STAT_MEM_CFG_RME_SHIFT 12
+#define I40E_FVL_STAT_MEM_CFG_RME_MASK I40E_MASK(0x1, I40E_FVL_STAT_MEM_CFG_RME_SHIFT)
+#define I40E_FVL_STAT_MEM_CFG_RM_SHIFT 16
+#define I40E_FVL_STAT_MEM_CFG_RM_MASK I40E_MASK(0xF, I40E_FVL_STAT_MEM_CFG_RM_SHIFT)
+
+#define I40E_FVL_STAT_MEM_STATUS(_i) (0x00380078 + ((_i) * 4)) /* _i=0...29 */ /* Reset: POR */
+#define I40E_FVL_STAT_MEM_STATUS_MAX_INDEX 29
+#define I40E_FVL_STAT_MEM_STATUS_ECC_ERR_SHIFT 0
+#define I40E_FVL_STAT_MEM_STATUS_ECC_ERR_MASK I40E_MASK(0x1, I40E_FVL_STAT_MEM_STATUS_ECC_ERR_SHIFT)
+#define I40E_FVL_STAT_MEM_STATUS_ECC_FIX_SHIFT 1
+#define I40E_FVL_STAT_MEM_STATUS_ECC_FIX_MASK I40E_MASK(0x1, I40E_FVL_STAT_MEM_STATUS_ECC_FIX_SHIFT)
+#define I40E_FVL_STAT_MEM_STATUS_INIT_DONE_SHIFT 2
+#define I40E_FVL_STAT_MEM_STATUS_INIT_DONE_MASK I40E_MASK(0x1, I40E_FVL_STAT_MEM_STATUS_INIT_DONE_SHIFT)
+#define I40E_FVL_STAT_MEM_STATUS_GLOBAL_INIT_DONE_SHIFT 3
+#define I40E_FVL_STAT_MEM_STATUS_GLOBAL_INIT_DONE_MASK I40E_MASK(0x1, I40E_FVL_STAT_MEM_STATUS_GLOBAL_INIT_DONE_SHIFT)
+
+#define I40E_GL_CRITERRMODMASK0 0x000B4020 /* Reset: CORER */
+#define I40E_GL_CRITERRMODMASK0_MODULE_MASK0_SHIFT 1
+#define I40E_GL_CRITERRMODMASK0_MODULE_MASK0_MASK I40E_MASK(0x7FFFFFFF, I40E_GL_CRITERRMODMASK0_MODULE_MASK0_SHIFT)
+
+#define I40E_GL_CRITERRMODMASK1 0x000B4024 /* Reset: CORER */
+#define I40E_GL_CRITERRMODMASK1_MODULE_MASK1_SHIFT 1
+#define I40E_GL_CRITERRMODMASK1_MODULE_MASK1_MASK I40E_MASK(0x7FFFFFFF, I40E_GL_CRITERRMODMASK1_MODULE_MASK1_SHIFT)
+
+#define I40E_GL_CRITERRMODMASK2 0x000B4028 /* Reset: CORER */
+#define I40E_GL_CRITERRMODMASK2_MODULE_MASK2_SHIFT 1
+#define I40E_GL_CRITERRMODMASK2_MODULE_MASK2_MASK I40E_MASK(0x7FFFFFFF, I40E_GL_CRITERRMODMASK2_MODULE_MASK2_SHIFT)
+
+#define I40E_GL_CRITERRMODMASK3 0x000B402C /* Reset: CORER */
+#define I40E_GL_CRITERRMODMASK3_MODULE_MASK3_SHIFT 1
+#define I40E_GL_CRITERRMODMASK3_MODULE_MASK3_MASK I40E_MASK(0x7FFFFFFF, I40E_GL_CRITERRMODMASK3_MODULE_MASK3_SHIFT)
+
+#define I40E_GL_CRITERRTRGTMASK0 0x000B4040 /* Reset: CORER */
+#define I40E_GL_CRITERRTRGTMASK0_TRGT_MATCH_0_INST_SHIFT 0
+#define I40E_GL_CRITERRTRGTMASK0_TRGT_MATCH_0_INST_MASK I40E_MASK(0x3F, I40E_GL_CRITERRTRGTMASK0_TRGT_MATCH_0_INST_SHIFT)
+#define I40E_GL_CRITERRTRGTMASK0_TRGT_MATCH_0_TYPE_SHIFT 6
+#define I40E_GL_CRITERRTRGTMASK0_TRGT_MATCH_0_TYPE_MASK I40E_MASK(0x3, I40E_GL_CRITERRTRGTMASK0_TRGT_MATCH_0_TYPE_SHIFT)
+#define I40E_GL_CRITERRTRGTMASK0_TRGT_MATCH_0_MODULE_SHIFT 8
+#define I40E_GL_CRITERRTRGTMASK0_TRGT_MATCH_0_MODULE_MASK I40E_MASK(0xFF, I40E_GL_CRITERRTRGTMASK0_TRGT_MATCH_0_MODULE_SHIFT)
+#define I40E_GL_CRITERRTRGTMASK0_TRGT_MATCH_1_INST_SHIFT 16
+#define I40E_GL_CRITERRTRGTMASK0_TRGT_MATCH_1_INST_MASK I40E_MASK(0x3F, I40E_GL_CRITERRTRGTMASK0_TRGT_MATCH_1_INST_SHIFT)
+#define I40E_GL_CRITERRTRGTMASK0_TRGT_MATCH_1_TYPE_SHIFT 22
+#define I40E_GL_CRITERRTRGTMASK0_TRGT_MATCH_1_TYPE_MASK I40E_MASK(0x3, I40E_GL_CRITERRTRGTMASK0_TRGT_MATCH_1_TYPE_SHIFT)
+#define I40E_GL_CRITERRTRGTMASK0_TRGT_MATCH_1_MODULE_SHIFT 24
+#define I40E_GL_CRITERRTRGTMASK0_TRGT_MATCH_1_MODULE_MASK I40E_MASK(0xFF, I40E_GL_CRITERRTRGTMASK0_TRGT_MATCH_1_MODULE_SHIFT)
+
+#define I40E_GL_CRITERRTRGTMASK1 0x000B4044 /* Reset: CORER */
+#define I40E_GL_CRITERRTRGTMASK1_TRGT_MATCH_2_INST_SHIFT 0
+#define I40E_GL_CRITERRTRGTMASK1_TRGT_MATCH_2_INST_MASK I40E_MASK(0x3F, I40E_GL_CRITERRTRGTMASK1_TRGT_MATCH_2_INST_SHIFT)
+#define I40E_GL_CRITERRTRGTMASK1_TRGT_MATCH_2_TYPE_SHIFT 6
+#define I40E_GL_CRITERRTRGTMASK1_TRGT_MATCH_2_TYPE_MASK I40E_MASK(0x3, I40E_GL_CRITERRTRGTMASK1_TRGT_MATCH_2_TYPE_SHIFT)
+#define I40E_GL_CRITERRTRGTMASK1_TRGT_MATCH_2_MODULE_SHIFT 8
+#define I40E_GL_CRITERRTRGTMASK1_TRGT_MATCH_2_MODULE_MASK I40E_MASK(0xFF, I40E_GL_CRITERRTRGTMASK1_TRGT_MATCH_2_MODULE_SHIFT)
+#define I40E_GL_CRITERRTRGTMASK1_TRGT_MATCH_3_INST_SHIFT 16
+#define I40E_GL_CRITERRTRGTMASK1_TRGT_MATCH_3_INST_MASK I40E_MASK(0x3F, I40E_GL_CRITERRTRGTMASK1_TRGT_MATCH_3_INST_SHIFT)
+#define I40E_GL_CRITERRTRGTMASK1_TRGT_MATCH_3_TYPE_SHIFT 22
+#define I40E_GL_CRITERRTRGTMASK1_TRGT_MATCH_3_TYPE_MASK I40E_MASK(0x3, I40E_GL_CRITERRTRGTMASK1_TRGT_MATCH_3_TYPE_SHIFT)
+#define I40E_GL_CRITERRTRGTMASK1_TRGT_MATCH_3_MODULE_SHIFT 24
+#define I40E_GL_CRITERRTRGTMASK1_TRGT_MATCH_3_MODULE_MASK I40E_MASK(0xFF, I40E_GL_CRITERRTRGTMASK1_TRGT_MATCH_3_MODULE_SHIFT)
+
+#define I40E_GL_CRITERRTRGTMASK2 0x000B4048 /* Reset: CORER */
+#define I40E_GL_CRITERRTRGTMASK2_TRGT_MATCH_4_INST_SHIFT 0
+#define I40E_GL_CRITERRTRGTMASK2_TRGT_MATCH_4_INST_MASK I40E_MASK(0x3F, I40E_GL_CRITERRTRGTMASK2_TRGT_MATCH_4_INST_SHIFT)
+#define I40E_GL_CRITERRTRGTMASK2_TRGT_MATCH_4_TYPE_SHIFT 6
+#define I40E_GL_CRITERRTRGTMASK2_TRGT_MATCH_4_TYPE_MASK I40E_MASK(0x3, I40E_GL_CRITERRTRGTMASK2_TRGT_MATCH_4_TYPE_SHIFT)
+#define I40E_GL_CRITERRTRGTMASK2_TRGT_MATCH_4_MODULE_SHIFT 8
+#define I40E_GL_CRITERRTRGTMASK2_TRGT_MATCH_4_MODULE_MASK I40E_MASK(0xFF, I40E_GL_CRITERRTRGTMASK2_TRGT_MATCH_4_MODULE_SHIFT)
+#define I40E_GL_CRITERRTRGTMASK2_TRGT_MATCH_5_INST_SHIFT 16
+#define I40E_GL_CRITERRTRGTMASK2_TRGT_MATCH_5_INST_MASK I40E_MASK(0x3F, I40E_GL_CRITERRTRGTMASK2_TRGT_MATCH_5_INST_SHIFT)
+#define I40E_GL_CRITERRTRGTMASK2_TRGT_MATCH_5_TYPE_SHIFT 22
+#define I40E_GL_CRITERRTRGTMASK2_TRGT_MATCH_5_TYPE_MASK I40E_MASK(0x3, I40E_GL_CRITERRTRGTMASK2_TRGT_MATCH_5_TYPE_SHIFT)
+#define I40E_GL_CRITERRTRGTMASK2_TRGT_MATCH_5_MODULE_SHIFT 24
+#define I40E_GL_CRITERRTRGTMASK2_TRGT_MATCH_5_MODULE_MASK I40E_MASK(0xFF, I40E_GL_CRITERRTRGTMASK2_TRGT_MATCH_5_MODULE_SHIFT)
+
+#define I40E_GL_CRITERRTRGTMASK3 0x000B404C /* Reset: CORER */
+#define I40E_GL_CRITERRTRGTMASK3_TRGT_MATCH_6_INST_SHIFT 0
+#define I40E_GL_CRITERRTRGTMASK3_TRGT_MATCH_6_INST_MASK I40E_MASK(0x3F, I40E_GL_CRITERRTRGTMASK3_TRGT_MATCH_6_INST_SHIFT)
+#define I40E_GL_CRITERRTRGTMASK3_TRGT_MATCH_6_TYPE_SHIFT 6
+#define I40E_GL_CRITERRTRGTMASK3_TRGT_MATCH_6_TYPE_MASK I40E_MASK(0x3, I40E_GL_CRITERRTRGTMASK3_TRGT_MATCH_6_TYPE_SHIFT)
+#define I40E_GL_CRITERRTRGTMASK3_TRGT_MATCH_6_MODULE_SHIFT 8
+#define I40E_GL_CRITERRTRGTMASK3_TRGT_MATCH_6_MODULE_MASK I40E_MASK(0xFF, I40E_GL_CRITERRTRGTMASK3_TRGT_MATCH_6_MODULE_SHIFT)
+#define I40E_GL_CRITERRTRGTMASK3_TRGT_MATCH_7_INST_SHIFT 16
+#define I40E_GL_CRITERRTRGTMASK3_TRGT_MATCH_7_INST_MASK I40E_MASK(0x3F, I40E_GL_CRITERRTRGTMASK3_TRGT_MATCH_7_INST_SHIFT)
+#define I40E_GL_CRITERRTRGTMASK3_TRGT_MATCH_7_TYPE_SHIFT 22
+#define I40E_GL_CRITERRTRGTMASK3_TRGT_MATCH_7_TYPE_MASK I40E_MASK(0x3, I40E_GL_CRITERRTRGTMASK3_TRGT_MATCH_7_TYPE_SHIFT)
+#define I40E_GL_CRITERRTRGTMASK3_TRGT_MATCH_7_MODULE_SHIFT 24
+#define I40E_GL_CRITERRTRGTMASK3_TRGT_MATCH_7_MODULE_MASK I40E_MASK(0xFF, I40E_GL_CRITERRTRGTMASK3_TRGT_MATCH_7_MODULE_SHIFT)
+
+#define I40E_GL_CRITERRTRGTMASK4 0x000B4050 /* Reset: CORER */
+#define I40E_GL_CRITERRTRGTMASK4_TRGT_MATCH_8_INST_SHIFT 0
+#define I40E_GL_CRITERRTRGTMASK4_TRGT_MATCH_8_INST_MASK I40E_MASK(0x3F, I40E_GL_CRITERRTRGTMASK4_TRGT_MATCH_8_INST_SHIFT)
+#define I40E_GL_CRITERRTRGTMASK4_TRGT_MATCH_8_TYPE_SHIFT 6
+#define I40E_GL_CRITERRTRGTMASK4_TRGT_MATCH_8_TYPE_MASK I40E_MASK(0x3, I40E_GL_CRITERRTRGTMASK4_TRGT_MATCH_8_TYPE_SHIFT)
+#define I40E_GL_CRITERRTRGTMASK4_TRGT_MATCH_8_MODULE_SHIFT 8
+#define I40E_GL_CRITERRTRGTMASK4_TRGT_MATCH_8_MODULE_MASK I40E_MASK(0xFF, I40E_GL_CRITERRTRGTMASK4_TRGT_MATCH_8_MODULE_SHIFT)
+#define I40E_GL_CRITERRTRGTMASK4_TRGT_MATCH_9_INST_SHIFT 16
+#define I40E_GL_CRITERRTRGTMASK4_TRGT_MATCH_9_INST_MASK I40E_MASK(0x3F, I40E_GL_CRITERRTRGTMASK4_TRGT_MATCH_9_INST_SHIFT)
+#define I40E_GL_CRITERRTRGTMASK4_TRGT_MATCH_9_TYPE_SHIFT 22
+#define I40E_GL_CRITERRTRGTMASK4_TRGT_MATCH_9_TYPE_MASK I40E_MASK(0x3, I40E_GL_CRITERRTRGTMASK4_TRGT_MATCH_9_TYPE_SHIFT)
+#define I40E_GL_CRITERRTRGTMASK4_TRGT_MATCH_9_MODULE_SHIFT 24
+#define I40E_GL_CRITERRTRGTMASK4_TRGT_MATCH_9_MODULE_MASK I40E_MASK(0xFF, I40E_GL_CRITERRTRGTMASK4_TRGT_MATCH_9_MODULE_SHIFT)
+
+#define I40E_GL_CRITERRTRGTMASK5 0x000B4054 /* Reset: CORER */
+#define I40E_GL_CRITERRTRGTMASK5_TRGT_MATCH_10_INST_SHIFT 0
+#define I40E_GL_CRITERRTRGTMASK5_TRGT_MATCH_10_INST_MASK I40E_MASK(0x3F, I40E_GL_CRITERRTRGTMASK5_TRGT_MATCH_10_INST_SHIFT)
+#define I40E_GL_CRITERRTRGTMASK5_TRGT_MATCH_10_TYPE_SHIFT 6
+#define I40E_GL_CRITERRTRGTMASK5_TRGT_MATCH_10_TYPE_MASK I40E_MASK(0x3, I40E_GL_CRITERRTRGTMASK5_TRGT_MATCH_10_TYPE_SHIFT)
+#define I40E_GL_CRITERRTRGTMASK5_TRGT_MATCH_10_MODULE_SHIFT 8
+#define I40E_GL_CRITERRTRGTMASK5_TRGT_MATCH_10_MODULE_MASK I40E_MASK(0xFF, I40E_GL_CRITERRTRGTMASK5_TRGT_MATCH_10_MODULE_SHIFT)
+#define I40E_GL_CRITERRTRGTMASK5_TRGT_MATCH_11_INST_SHIFT 16
+#define I40E_GL_CRITERRTRGTMASK5_TRGT_MATCH_11_INST_MASK I40E_MASK(0x3F, I40E_GL_CRITERRTRGTMASK5_TRGT_MATCH_11_INST_SHIFT)
+#define I40E_GL_CRITERRTRGTMASK5_TRGT_MATCH_11_TYPE_SHIFT 22
+#define I40E_GL_CRITERRTRGTMASK5_TRGT_MATCH_11_TYPE_MASK I40E_MASK(0x3, I40E_GL_CRITERRTRGTMASK5_TRGT_MATCH_11_TYPE_SHIFT)
+#define I40E_GL_CRITERRTRGTMASK5_TRGT_MATCH_11_MODULE_SHIFT 24
+#define I40E_GL_CRITERRTRGTMASK5_TRGT_MATCH_11_MODULE_MASK I40E_MASK(0xFF, I40E_GL_CRITERRTRGTMASK5_TRGT_MATCH_11_MODULE_SHIFT)
+
+#define I40E_GL_CRITERRTRGTMASK6 0x000B4058 /* Reset: CORER */
+#define I40E_GL_CRITERRTRGTMASK6_TRGT_MATCH_12_INST_SHIFT 0
+#define I40E_GL_CRITERRTRGTMASK6_TRGT_MATCH_12_INST_MASK I40E_MASK(0x3F, I40E_GL_CRITERRTRGTMASK6_TRGT_MATCH_12_INST_SHIFT)
+#define I40E_GL_CRITERRTRGTMASK6_TRGT_MATCH_12_TYPE_SHIFT 6
+#define I40E_GL_CRITERRTRGTMASK6_TRGT_MATCH_12_TYPE_MASK I40E_MASK(0x3, I40E_GL_CRITERRTRGTMASK6_TRGT_MATCH_12_TYPE_SHIFT)
+#define I40E_GL_CRITERRTRGTMASK6_TRGT_MATCH_12_MODULE_SHIFT 8
+#define I40E_GL_CRITERRTRGTMASK6_TRGT_MATCH_12_MODULE_MASK I40E_MASK(0xFF, I40E_GL_CRITERRTRGTMASK6_TRGT_MATCH_12_MODULE_SHIFT)
+#define I40E_GL_CRITERRTRGTMASK6_TRGT_MATCH_13_INST_SHIFT 16
+#define I40E_GL_CRITERRTRGTMASK6_TRGT_MATCH_13_INST_MASK I40E_MASK(0x3F, I40E_GL_CRITERRTRGTMASK6_TRGT_MATCH_13_INST_SHIFT)
+#define I40E_GL_CRITERRTRGTMASK6_TRGT_MATCH_13_TYPE_SHIFT 22
+#define I40E_GL_CRITERRTRGTMASK6_TRGT_MATCH_13_TYPE_MASK I40E_MASK(0x3, I40E_GL_CRITERRTRGTMASK6_TRGT_MATCH_13_TYPE_SHIFT)
+#define I40E_GL_CRITERRTRGTMASK6_TRGT_MATCH_13_MODULE_SHIFT 24
+#define I40E_GL_CRITERRTRGTMASK6_TRGT_MATCH_13_MODULE_MASK I40E_MASK(0xFF, I40E_GL_CRITERRTRGTMASK6_TRGT_MATCH_13_MODULE_SHIFT)
+
+#define I40E_GL_CRITERRTRGTMASK7 0x000B405C /* Reset: CORER */
+#define I40E_GL_CRITERRTRGTMASK7_TRGT_MATCH_14_INST_SHIFT 0
+#define I40E_GL_CRITERRTRGTMASK7_TRGT_MATCH_14_INST_MASK I40E_MASK(0x3F, I40E_GL_CRITERRTRGTMASK7_TRGT_MATCH_14_INST_SHIFT)
+#define I40E_GL_CRITERRTRGTMASK7_TRGT_MATCH_14_TYPE_SHIFT 6
+#define I40E_GL_CRITERRTRGTMASK7_TRGT_MATCH_14_TYPE_MASK I40E_MASK(0x3, I40E_GL_CRITERRTRGTMASK7_TRGT_MATCH_14_TYPE_SHIFT)
+#define I40E_GL_CRITERRTRGTMASK7_TRGT_MATCH_14_MODULE_SHIFT 8
+#define I40E_GL_CRITERRTRGTMASK7_TRGT_MATCH_14_MODULE_MASK I40E_MASK(0xFF, I40E_GL_CRITERRTRGTMASK7_TRGT_MATCH_14_MODULE_SHIFT)
+#define I40E_GL_CRITERRTRGTMASK7_TRGT_MATCH_15_INST_SHIFT 16
+#define I40E_GL_CRITERRTRGTMASK7_TRGT_MATCH_15_INST_MASK I40E_MASK(0x3F, I40E_GL_CRITERRTRGTMASK7_TRGT_MATCH_15_INST_SHIFT)
+#define I40E_GL_CRITERRTRGTMASK7_TRGT_MATCH_15_TYPE_SHIFT 22
+#define I40E_GL_CRITERRTRGTMASK7_TRGT_MATCH_15_TYPE_MASK I40E_MASK(0x3, I40E_GL_CRITERRTRGTMASK7_TRGT_MATCH_15_TYPE_SHIFT)
+#define I40E_GL_CRITERRTRGTMASK7_TRGT_MATCH_15_MODULE_SHIFT 24
+#define I40E_GL_CRITERRTRGTMASK7_TRGT_MATCH_15_MODULE_MASK I40E_MASK(0xFF, I40E_GL_CRITERRTRGTMASK7_TRGT_MATCH_15_MODULE_SHIFT)
+
+#define I40E_GL_DBG_DATA 0x0026998C /* Reset: CORER */
+#define I40E_GL_DBG_DATA_GL_DBG_DATA_SHIFT 0
+#define I40E_GL_DBG_DATA_GL_DBG_DATA_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_DBG_DATA_GL_DBG_DATA_SHIFT)
+
+#define I40E_GL_DBGEMPR 0x00083108 /* Reset: EMPR */
+#define I40E_GL_DBGEMPR_RSV_DATA_SHIFT 0
+#define I40E_GL_DBGEMPR_RSV_DATA_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_DBGEMPR_RSV_DATA_SHIFT)
+
+#define I40E_GL_DBGPOR 0x00083104 /* Reset: POR */
+#define I40E_GL_DBGPOR_ROM_EMPR_TRIGGER_SHIFT 0
+#define I40E_GL_DBGPOR_ROM_EMPR_TRIGGER_MASK I40E_MASK(0x1, I40E_GL_DBGPOR_ROM_EMPR_TRIGGER_SHIFT)
+#define I40E_GL_DBGPOR_RSV_DATA_SHIFT 1
+#define I40E_GL_DBGPOR_RSV_DATA_MASK I40E_MASK(0x7FFFFFFF, I40E_GL_DBGPOR_RSV_DATA_SHIFT)
+
+#define I40E_GL_DBGRST 0x0008310C /* Reset: POR */
+#define I40E_GL_DBGRST_PRST_RSV_DATA_SHIFT 0
+#define I40E_GL_DBGRST_PRST_RSV_DATA_MASK I40E_MASK(0xFF, I40E_GL_DBGRST_PRST_RSV_DATA_SHIFT)
+#define I40E_GL_DBGRST_IBR_RSV_DATA_SHIFT 8
+#define I40E_GL_DBGRST_IBR_RSV_DATA_MASK I40E_MASK(0xFF, I40E_GL_DBGRST_IBR_RSV_DATA_SHIFT)
+#define I40E_GL_DBGRST_GLBR_RSV_DATA_SHIFT 16
+#define I40E_GL_DBGRST_GLBR_RSV_DATA_MASK I40E_MASK(0xFF, I40E_GL_DBGRST_GLBR_RSV_DATA_SHIFT)
+#define I40E_GL_DBGRST_CORER_RSV_DATA_SHIFT 24
+#define I40E_GL_DBGRST_CORER_RSV_DATA_MASK I40E_MASK(0xFF, I40E_GL_DBGRST_CORER_RSV_DATA_SHIFT)
+
+#define I40E_GL_MDEF_TR_CFG 0x00269A5C /* Reset: CORER */
+#define I40E_GL_MDEF_TR_CFG_TCP_TR_IDX_SHIFT 0
+#define I40E_GL_MDEF_TR_CFG_TCP_TR_IDX_MASK I40E_MASK(0x3F, I40E_GL_MDEF_TR_CFG_TCP_TR_IDX_SHIFT)
+#define I40E_GL_MDEF_TR_CFG_UDP_TR_IDX_SHIFT 6
+#define I40E_GL_MDEF_TR_CFG_UDP_TR_IDX_MASK I40E_MASK(0x3F, I40E_GL_MDEF_TR_CFG_UDP_TR_IDX_SHIFT)
+#define I40E_GL_MDEF_TR_CFG_IPV4_TR_IDX_SHIFT 12
+#define I40E_GL_MDEF_TR_CFG_IPV4_TR_IDX_MASK I40E_MASK(0x3F, I40E_GL_MDEF_TR_CFG_IPV4_TR_IDX_SHIFT)
+#define I40E_GL_MDEF_TR_CFG_IPV6_TR_IDX_SHIFT 18
+#define I40E_GL_MDEF_TR_CFG_IPV6_TR_IDX_MASK I40E_MASK(0x3F, I40E_GL_MDEF_TR_CFG_IPV6_TR_IDX_SHIFT)
+#define I40E_GL_MDEF_TR_CFG_VLAN_TR_IDX_SHIFT 24
+#define I40E_GL_MDEF_TR_CFG_VLAN_TR_IDX_MASK I40E_MASK(0x3F, I40E_GL_MDEF_TR_CFG_VLAN_TR_IDX_SHIFT)
+
+#define I40E_GL_MDEF_TR_EXT_CFG 0x00269A64 /* Reset: CORER */
+#define I40E_GL_MDEF_TR_EXT_CFG_FCTRL_TR_IDX_SHIFT 0
+#define I40E_GL_MDEF_TR_EXT_CFG_FCTRL_TR_IDX_MASK I40E_MASK(0x3F, I40E_GL_MDEF_TR_EXT_CFG_FCTRL_TR_IDX_SHIFT)
+
+#define I40E_GL_MTG_HSH_CTL 0x00269984 /* Reset: CORER */
+#define I40E_GL_MTG_HSH_CTL_HASH_MODE_SHIFT 0
+#define I40E_GL_MTG_HSH_CTL_HASH_MODE_MASK I40E_MASK(0x3, I40E_GL_MTG_HSH_CTL_HASH_MODE_SHIFT)
+
+#define I40E_GL_MTG_MAP 0x0026994C /* Reset: CORER */
+#define I40E_GL_MTG_MAP_ETAG_FV_IDX_SHIFT 0
+#define I40E_GL_MTG_MAP_ETAG_FV_IDX_MASK I40E_MASK(0x3F, I40E_GL_MTG_MAP_ETAG_FV_IDX_SHIFT)
+#define I40E_GL_MTG_MAP_ETAG_TR_IDX_SHIFT 6
+#define I40E_GL_MTG_MAP_ETAG_TR_IDX_MASK I40E_MASK(0x3F, I40E_GL_MTG_MAP_ETAG_TR_IDX_SHIFT)
+#define I40E_GL_MTG_MAP_SRC_TAG_FV_IDX_SHIFT 12
+#define I40E_GL_MTG_MAP_SRC_TAG_FV_IDX_MASK I40E_MASK(0x3F, I40E_GL_MTG_MAP_SRC_TAG_FV_IDX_SHIFT)
+#define I40E_GL_MTG_MAP_STAG_FV_IDX_SHIFT 18
+#define I40E_GL_MTG_MAP_STAG_FV_IDX_MASK I40E_MASK(0x3F, I40E_GL_MTG_MAP_STAG_FV_IDX_SHIFT)
+#define I40E_GL_MTG_MAP_STAG_TR_IDX_SHIFT 24
+#define I40E_GL_MTG_MAP_STAG_TR_IDX_MASK I40E_MASK(0x3F, I40E_GL_MTG_MAP_STAG_TR_IDX_SHIFT)
+
+#define I40E_GL_MTG_MAP_EXT 0x00269954 /* Reset: CORER */
+#define I40E_GL_MTG_MAP_EXT_O_VLAN_FV_IDX_SHIFT 0
+#define I40E_GL_MTG_MAP_EXT_O_VLAN_FV_IDX_MASK I40E_MASK(0x3F, I40E_GL_MTG_MAP_EXT_O_VLAN_FV_IDX_SHIFT)
+#define I40E_GL_MTG_MAP_EXT_O_VLAN_TR_IDX_SHIFT 6
+#define I40E_GL_MTG_MAP_EXT_O_VLAN_TR_IDX_MASK I40E_MASK(0x3F, I40E_GL_MTG_MAP_EXT_O_VLAN_TR_IDX_SHIFT)
+
+#define I40E_GL_MTG_REP_FLU_CTL 0x00269964 /* Reset: CORER */
+#define I40E_GL_MTG_REP_FLU_CTL_FLU_MODE_SHIFT 0
+#define I40E_GL_MTG_REP_FLU_CTL_FLU_MODE_MASK I40E_MASK(0xF, I40E_GL_MTG_REP_FLU_CTL_FLU_MODE_SHIFT)
+#define I40E_GL_MTG_REP_FLU_CTL_FLU_OVTH_SHIFT 8
+#define I40E_GL_MTG_REP_FLU_CTL_FLU_OVTH_MASK I40E_MASK(0xFF, I40E_GL_MTG_REP_FLU_CTL_FLU_OVTH_SHIFT)
+
+#define I40E_GL_MTG_REP_MFIFO_CTL 0x0026999C /* Reset: CORER */
+#define I40E_GL_MTG_REP_MFIFO_CTL_UP_STRICT_PR_SHIFT 0
+#define I40E_GL_MTG_REP_MFIFO_CTL_UP_STRICT_PR_MASK I40E_MASK(0xF, I40E_GL_MTG_REP_MFIFO_CTL_UP_STRICT_PR_SHIFT)
+#define I40E_GL_MTG_REP_MFIFO_CTL_PRT_STRICT_PR_SHIFT 4
+#define I40E_GL_MTG_REP_MFIFO_CTL_PRT_STRICT_PR_MASK I40E_MASK(0x1, I40E_GL_MTG_REP_MFIFO_CTL_PRT_STRICT_PR_SHIFT)
+
+#define I40E_GL_MTG_TBL_CTL 0x0026997C /* Reset: CORER */
+#define I40E_GL_MTG_TBL_CTL_FLU_MODE_SHIFT 0
+#define I40E_GL_MTG_TBL_CTL_FLU_MODE_MASK I40E_MASK(0xF, I40E_GL_MTG_TBL_CTL_FLU_MODE_SHIFT)
+#define I40E_GL_MTG_TBL_CTL_FLU_OVTH_SHIFT 8
+#define I40E_GL_MTG_TBL_CTL_FLU_OVTH_MASK I40E_MASK(0xFF, I40E_GL_MTG_TBL_CTL_FLU_OVTH_SHIFT)
+
+#define I40E_GL_PRE_FLU_CTL(_i) (0x00269240 + ((_i) * 4)) /* _i=0...9 */ /* Reset: CORER */
+#define I40E_GL_PRE_FLU_CTL_MAX_INDEX 9
+#define I40E_GL_PRE_FLU_CTL_FLU_MODE_SHIFT 0
+#define I40E_GL_PRE_FLU_CTL_FLU_MODE_MASK I40E_MASK(0xF, I40E_GL_PRE_FLU_CTL_FLU_MODE_SHIFT)
+#define I40E_GL_PRE_FLU_CTL_FLU_OVTH_SHIFT 8
+#define I40E_GL_PRE_FLU_CTL_FLU_OVTH_MASK I40E_MASK(0xFF, I40E_GL_PRE_FLU_CTL_FLU_OVTH_SHIFT)
+
+#define I40E_GL_PRE_HSH_KEY_D0 0x00269810 /* Reset: CORER */
+#define I40E_GL_PRE_HSH_KEY_D0_HASH_KEY_SHIFT 0
+#define I40E_GL_PRE_HSH_KEY_D0_HASH_KEY_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_PRE_HSH_KEY_D0_HASH_KEY_SHIFT)
+
+#define I40E_GL_PRE_HSH_KEY_D1 0x00269814 /* Reset: CORER */
+#define I40E_GL_PRE_HSH_KEY_D1_HASH_KEY_SHIFT 0
+#define I40E_GL_PRE_HSH_KEY_D1_HASH_KEY_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_PRE_HSH_KEY_D1_HASH_KEY_SHIFT)
+
+#define I40E_GL_PRE_HSH_KEY_D2 0x00269818 /* Reset: CORER */
+#define I40E_GL_PRE_HSH_KEY_D2_HASH_KEY_SHIFT 0
+#define I40E_GL_PRE_HSH_KEY_D2_HASH_KEY_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_PRE_HSH_KEY_D2_HASH_KEY_SHIFT)
+
+#define I40E_GL_PRE_HSH_KEY_D3 0x0026981C /* Reset: CORER */
+#define I40E_GL_PRE_HSH_KEY_D3_HASH_KEY_SHIFT 0
+#define I40E_GL_PRE_HSH_KEY_D3_HASH_KEY_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_PRE_HSH_KEY_D3_HASH_KEY_SHIFT)
+
+#define I40E_GL_PRE_MNG_ARP_FLD_CFG 0x00269A94 /* Reset: CORER */
+#define I40E_GL_PRE_MNG_ARP_FLD_CFG_FV_IDX_SHIFT 0
+#define I40E_GL_PRE_MNG_ARP_FLD_CFG_FV_IDX_MASK I40E_MASK(0x3F, I40E_GL_PRE_MNG_ARP_FLD_CFG_FV_IDX_SHIFT)
+#define I40E_GL_PRE_MNG_ARP_FLD_CFG_TR_IDX_SHIFT 13
+#define I40E_GL_PRE_MNG_ARP_FLD_CFG_TR_IDX_MASK I40E_MASK(0x3F, I40E_GL_PRE_MNG_ARP_FLD_CFG_TR_IDX_SHIFT)
+#define I40E_GL_PRE_MNG_ARP_FLD_CFG_ARP_TAR_IP_FV_IDX_SHIFT 24
+#define I40E_GL_PRE_MNG_ARP_FLD_CFG_ARP_TAR_IP_FV_IDX_MASK I40E_MASK(0x3F, I40E_GL_PRE_MNG_ARP_FLD_CFG_ARP_TAR_IP_FV_IDX_SHIFT)
+
+#define I40E_GL_PRE_MNG_ETH_FLD_CFG 0x00269ABC /* Reset: CORER */
+#define I40E_GL_PRE_MNG_ETH_FLD_CFG_FV_IDX_SHIFT 0
+#define I40E_GL_PRE_MNG_ETH_FLD_CFG_FV_IDX_MASK I40E_MASK(0x3F, I40E_GL_PRE_MNG_ETH_FLD_CFG_FV_IDX_SHIFT)
+
+#define I40E_GL_PRE_MNG_ICMP_FLD_CFG 0x00269A9C /* Reset: CORER */
+#define I40E_GL_PRE_MNG_ICMP_FLD_CFG_FV_IDX_SHIFT 0
+#define I40E_GL_PRE_MNG_ICMP_FLD_CFG_FV_IDX_MASK I40E_MASK(0x3F, I40E_GL_PRE_MNG_ICMP_FLD_CFG_FV_IDX_SHIFT)
+#define I40E_GL_PRE_MNG_ICMP_FLD_CFG_TR_IDX_SHIFT 13
+#define I40E_GL_PRE_MNG_ICMP_FLD_CFG_TR_IDX_MASK I40E_MASK(0x3F, I40E_GL_PRE_MNG_ICMP_FLD_CFG_TR_IDX_SHIFT)
+
+#define I40E_GL_PRE_MNG_IP4_FLD_CFG 0x00269AB4 /* Reset: CORER */
+#define I40E_GL_PRE_MNG_IP4_FLD_CFG_FV_IDX_SHIFT 0
+#define I40E_GL_PRE_MNG_IP4_FLD_CFG_FV_IDX_MASK I40E_MASK(0x3F, I40E_GL_PRE_MNG_IP4_FLD_CFG_FV_IDX_SHIFT)
+#define I40E_GL_PRE_MNG_IP4_FLD_CFG_TR_IDX_SHIFT 13
+#define I40E_GL_PRE_MNG_IP4_FLD_CFG_TR_IDX_MASK I40E_MASK(0x3F, I40E_GL_PRE_MNG_IP4_FLD_CFG_TR_IDX_SHIFT)
+
+#define I40E_GL_PRE_MNG_IP6_FLD_CFG 0x00269A7C /* Reset: CORER */
+#define I40E_GL_PRE_MNG_IP6_FLD_CFG_FV_IDX_SHIFT 0
+#define I40E_GL_PRE_MNG_IP6_FLD_CFG_FV_IDX_MASK I40E_MASK(0x3F, I40E_GL_PRE_MNG_IP6_FLD_CFG_FV_IDX_SHIFT)
+#define I40E_GL_PRE_MNG_IP6_FLD_CFG_TR_IDX_SHIFT 13
+#define I40E_GL_PRE_MNG_IP6_FLD_CFG_TR_IDX_MASK I40E_MASK(0x3F, I40E_GL_PRE_MNG_IP6_FLD_CFG_TR_IDX_SHIFT)
+
+#define I40E_GL_PRE_MNG_MAC_FLD_CFG 0x00269A8C /* Reset: CORER */
+#define I40E_GL_PRE_MNG_MAC_FLD_CFG_FV_IDX_SHIFT 0
+#define I40E_GL_PRE_MNG_MAC_FLD_CFG_FV_IDX_MASK I40E_MASK(0x3F, I40E_GL_PRE_MNG_MAC_FLD_CFG_FV_IDX_SHIFT)
+#define I40E_GL_PRE_MNG_MAC_FLD_CFG_TR_IDX_SHIFT 13
+#define I40E_GL_PRE_MNG_MAC_FLD_CFG_TR_IDX_MASK I40E_MASK(0x3F, I40E_GL_PRE_MNG_MAC_FLD_CFG_TR_IDX_SHIFT)
+
+#define I40E_GL_PRE_MNG_MLD_FLD_CFG 0x00269A6C /* Reset: CORER */
+#define I40E_GL_PRE_MNG_MLD_FLD_CFG_FV_IDX_SHIFT 0
+#define I40E_GL_PRE_MNG_MLD_FLD_CFG_FV_IDX_MASK I40E_MASK(0x3F, I40E_GL_PRE_MNG_MLD_FLD_CFG_FV_IDX_SHIFT)
+#define I40E_GL_PRE_MNG_MLD_FLD_CFG_TR_IDX_SHIFT 13
+#define I40E_GL_PRE_MNG_MLD_FLD_CFG_TR_IDX_MASK I40E_MASK(0x3F, I40E_GL_PRE_MNG_MLD_FLD_CFG_TR_IDX_SHIFT)
+
+#define I40E_GL_PRE_MNG_TCPDP_FLD_CFG 0x00269A74 /* Reset: CORER */
+#define I40E_GL_PRE_MNG_TCPDP_FLD_CFG_FV_IDX_SHIFT 0
+#define I40E_GL_PRE_MNG_TCPDP_FLD_CFG_FV_IDX_MASK I40E_MASK(0x3F, I40E_GL_PRE_MNG_TCPDP_FLD_CFG_FV_IDX_SHIFT)
+#define I40E_GL_PRE_MNG_TCPDP_FLD_CFG_TR_IDX_SHIFT 13
+#define I40E_GL_PRE_MNG_TCPDP_FLD_CFG_TR_IDX_MASK I40E_MASK(0x3F, I40E_GL_PRE_MNG_TCPDP_FLD_CFG_TR_IDX_SHIFT)
+
+#define I40E_GL_PRE_MNG_TCPSP_FLD_CFG 0x00269AA4 /* Reset: CORER */
+#define I40E_GL_PRE_MNG_TCPSP_FLD_CFG_FV_IDX_SHIFT 0
+#define I40E_GL_PRE_MNG_TCPSP_FLD_CFG_FV_IDX_MASK I40E_MASK(0x3F, I40E_GL_PRE_MNG_TCPSP_FLD_CFG_FV_IDX_SHIFT)
+#define I40E_GL_PRE_MNG_TCPSP_FLD_CFG_TR_IDX_SHIFT 13
+#define I40E_GL_PRE_MNG_TCPSP_FLD_CFG_TR_IDX_MASK I40E_MASK(0x3F, I40E_GL_PRE_MNG_TCPSP_FLD_CFG_TR_IDX_SHIFT)
+
+#define I40E_GL_PRE_MNG_UDPDP_FLD_CFG 0x00269AAC /* Reset: CORER */
+#define I40E_GL_PRE_MNG_UDPDP_FLD_CFG_FV_IDX_SHIFT 0
+#define I40E_GL_PRE_MNG_UDPDP_FLD_CFG_FV_IDX_MASK I40E_MASK(0x3F, I40E_GL_PRE_MNG_UDPDP_FLD_CFG_FV_IDX_SHIFT)
+#define I40E_GL_PRE_MNG_UDPDP_FLD_CFG_TR_IDX_SHIFT 13
+#define I40E_GL_PRE_MNG_UDPDP_FLD_CFG_TR_IDX_MASK I40E_MASK(0x3F, I40E_GL_PRE_MNG_UDPDP_FLD_CFG_TR_IDX_SHIFT)
+
+#define I40E_GL_PRE_MNG_UDPSP_FLD_CFG 0x00269AC4 /* Reset: CORER */
+#define I40E_GL_PRE_MNG_UDPSP_FLD_CFG_FV_IDX_SHIFT 0
+#define I40E_GL_PRE_MNG_UDPSP_FLD_CFG_FV_IDX_MASK I40E_MASK(0x3F, I40E_GL_PRE_MNG_UDPSP_FLD_CFG_FV_IDX_SHIFT)
+#define I40E_GL_PRE_MNG_UDPSP_FLD_CFG_TR_IDX_SHIFT 13
+#define I40E_GL_PRE_MNG_UDPSP_FLD_CFG_TR_IDX_MASK I40E_MASK(0x3F, I40E_GL_PRE_MNG_UDPSP_FLD_CFG_TR_IDX_SHIFT)
+
+#define I40E_GL_PRE_MNG_VLAN_FLD_CFG 0x00269A84 /* Reset: CORER */
+#define I40E_GL_PRE_MNG_VLAN_FLD_CFG_FV_IDX_SHIFT 0
+#define I40E_GL_PRE_MNG_VLAN_FLD_CFG_FV_IDX_MASK I40E_MASK(0x3F, I40E_GL_PRE_MNG_VLAN_FLD_CFG_FV_IDX_SHIFT)
+#define I40E_GL_PRE_MNG_VLAN_FLD_CFG_TR_IDX_SHIFT 13
+#define I40E_GL_PRE_MNG_VLAN_FLD_CFG_TR_IDX_MASK I40E_MASK(0x3F, I40E_GL_PRE_MNG_VLAN_FLD_CFG_TR_IDX_SHIFT)
+
+#define I40E_GL_PRE_PRX_BIG_ENT_D2 0x002699EC /* Reset: CORER */
+#define I40E_GL_PRE_PRX_BIG_ENT_D2_USE_PORT_SHIFT 7
+#define I40E_GL_PRE_PRX_BIG_ENT_D2_USE_PORT_MASK I40E_MASK(0x1, I40E_GL_PRE_PRX_BIG_ENT_D2_USE_PORT_SHIFT)
+#define I40E_GL_PRE_PRX_BIG_ENT_D2_TR_EN_SHIFT 8
+#define I40E_GL_PRE_PRX_BIG_ENT_D2_TR_EN_MASK I40E_MASK(0x3F, I40E_GL_PRE_PRX_BIG_ENT_D2_TR_EN_SHIFT)
+#define I40E_GL_PRE_PRX_BIG_ENT_D2_BYTE_MSK0_SHIFT 16
+#define I40E_GL_PRE_PRX_BIG_ENT_D2_BYTE_MSK0_MASK I40E_MASK(0xF, I40E_GL_PRE_PRX_BIG_ENT_D2_BYTE_MSK0_SHIFT)
+#define I40E_GL_PRE_PRX_BIG_ENT_D2_BYTE_MSK1_SHIFT 20
+#define I40E_GL_PRE_PRX_BIG_ENT_D2_BYTE_MSK1_MASK I40E_MASK(0xF, I40E_GL_PRE_PRX_BIG_ENT_D2_BYTE_MSK1_SHIFT)
+#define I40E_GL_PRE_PRX_BIG_ENT_D2_BIT_MSK0_SHIFT 24
+#define I40E_GL_PRE_PRX_BIG_ENT_D2_BIT_MSK0_MASK I40E_MASK(0xFF, I40E_GL_PRE_PRX_BIG_ENT_D2_BIT_MSK0_SHIFT)
+
+#define I40E_GL_PRE_PRX_BIG_HSH_KEY_D0 0x00269A1C /* Reset: CORER */
+#define I40E_GL_PRE_PRX_BIG_HSH_KEY_D0_H0_SHIFT 0
+#define I40E_GL_PRE_PRX_BIG_HSH_KEY_D0_H0_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_PRE_PRX_BIG_HSH_KEY_D0_H0_SHIFT)
+
+#define I40E_GL_PRE_PRX_BIG_HSH_KEY_D2 0x00269A3C /* Reset: CORER */
+#define I40E_GL_PRE_PRX_BIG_HSH_KEY_D2_H2_SHIFT 0
+#define I40E_GL_PRE_PRX_BIG_HSH_KEY_D2_H2_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_PRE_PRX_BIG_HSH_KEY_D2_H2_SHIFT)
+
+#define I40E_GL_PRE_PRX_FLU_CTL 0x00269974 /* Reset: CORER */
+#define I40E_GL_PRE_PRX_FLU_CTL_FLU_MODE_SHIFT 0
+#define I40E_GL_PRE_PRX_FLU_CTL_FLU_MODE_MASK I40E_MASK(0xF, I40E_GL_PRE_PRX_FLU_CTL_FLU_MODE_SHIFT)
+#define I40E_GL_PRE_PRX_FLU_CTL_FLU_OVTH_SHIFT 8
+#define I40E_GL_PRE_PRX_FLU_CTL_FLU_OVTH_MASK I40E_MASK(0xFF, I40E_GL_PRE_PRX_FLU_CTL_FLU_OVTH_SHIFT)
+
+#define I40E_GL_PRE_PRX_GEN_CFG 0x002699AC /* Reset: CORER */
+#define I40E_GL_PRE_PRX_GEN_CFG_FILTER_ENABLE_SHIFT 0
+#define I40E_GL_PRE_PRX_GEN_CFG_FILTER_ENABLE_MASK I40E_MASK(0x1, I40E_GL_PRE_PRX_GEN_CFG_FILTER_ENABLE_SHIFT)
+#define I40E_GL_PRE_PRX_GEN_CFG_HASH_MODE_SHIFT 6
+#define I40E_GL_PRE_PRX_GEN_CFG_HASH_MODE_MASK I40E_MASK(0x3, I40E_GL_PRE_PRX_GEN_CFG_HASH_MODE_SHIFT)
+
+#define I40E_GL_PRE_PRX_HSH_KEY_D1 0x00269A2C /* Reset: CORER */
+#define I40E_GL_PRE_PRX_HSH_KEY_D1_H1_SHIFT 0
+#define I40E_GL_PRE_PRX_HSH_KEY_D1_H1_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_PRE_PRX_HSH_KEY_D1_H1_SHIFT)
+
+#define I40E_GL_PRE_PRX_HSH_KEY_D2 0x00269A44 /* Reset: CORER */
+#define I40E_GL_PRE_PRX_HSH_KEY_D2_H2_SHIFT 0
+#define I40E_GL_PRE_PRX_HSH_KEY_D2_H2_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_PRE_PRX_HSH_KEY_D2_H2_SHIFT)
+
+#define I40E_GL_PRE_PRX_HSH_KEY_D3 0x00269A4C /* Reset: CORER */
+#define I40E_GL_PRE_PRX_HSH_KEY_D3_H3_SHIFT 0
+#define I40E_GL_PRE_PRX_HSH_KEY_D3_H3_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_PRE_PRX_HSH_KEY_D3_H3_SHIFT)
+
+#define I40E_GL_PRE_RDMABM_FLD_CFG 0x002699B4 /* Reset: CORER */
+#define I40E_GL_PRE_RDMABM_FLD_CFG_TCP_DP_FV_IDX_SHIFT 0
+#define I40E_GL_PRE_RDMABM_FLD_CFG_TCP_DP_FV_IDX_MASK I40E_MASK(0x3F, I40E_GL_PRE_RDMABM_FLD_CFG_TCP_DP_FV_IDX_SHIFT)
+#define I40E_GL_PRE_RDMABM_FLD_CFG_TCP_DP_TR_IDX_SHIFT 6
+#define I40E_GL_PRE_RDMABM_FLD_CFG_TCP_DP_TR_IDX_MASK I40E_MASK(0x3F, I40E_GL_PRE_RDMABM_FLD_CFG_TCP_DP_TR_IDX_SHIFT)
+#define I40E_GL_PRE_RDMABM_FLD_CFG_UDP_DP_FV_IDX_SHIFT 16
+#define I40E_GL_PRE_RDMABM_FLD_CFG_UDP_DP_FV_IDX_MASK I40E_MASK(0x3F, I40E_GL_PRE_RDMABM_FLD_CFG_UDP_DP_FV_IDX_SHIFT)
+#define I40E_GL_PRE_RDMABM_FLD_CFG_UDP_DP_TR_IDX_SHIFT 22
+#define I40E_GL_PRE_RDMABM_FLD_CFG_UDP_DP_TR_IDX_MASK I40E_MASK(0x3F, I40E_GL_PRE_RDMABM_FLD_CFG_UDP_DP_TR_IDX_SHIFT)
+
+#define I40E_GL_PRE_TR_MAN(_i) (0x00269F80 + ((_i) * 4)) /* _i=0...7 */ /* Reset: CORER */
+#define I40E_GL_PRE_TR_MAN_MAX_INDEX 7
+#define I40E_GL_PRE_TR_MAN_SRC_TR_IDX0_SHIFT 0
+#define I40E_GL_PRE_TR_MAN_SRC_TR_IDX0_MASK I40E_MASK(0x3F, I40E_GL_PRE_TR_MAN_SRC_TR_IDX0_SHIFT)
+#define I40E_GL_PRE_TR_MAN_SRC_TR_IDX1_SHIFT 8
+#define I40E_GL_PRE_TR_MAN_SRC_TR_IDX1_MASK I40E_MASK(0x3F, I40E_GL_PRE_TR_MAN_SRC_TR_IDX1_SHIFT)
+#define I40E_GL_PRE_TR_MAN_TR_MAN_OP_SHIFT 16
+#define I40E_GL_PRE_TR_MAN_TR_MAN_OP_MASK I40E_MASK(0x3, I40E_GL_PRE_TR_MAN_TR_MAN_OP_SHIFT)
+#define I40E_GL_PRE_TR_MAN_TR_MAN_NEG_SHIFT 18
+#define I40E_GL_PRE_TR_MAN_TR_MAN_NEG_MASK I40E_MASK(0x1, I40E_GL_PRE_TR_MAN_TR_MAN_NEG_SHIFT)
+
+#define I40E_GL_PRS_FRT(_i) (0x00269750 + ((_i) * 4)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GL_PRS_FRT_MAX_INDEX 3
+#define I40E_GL_PRS_FRT_FV_IDX_0_SHIFT 0
+#define I40E_GL_PRS_FRT_FV_IDX_0_MASK I40E_MASK(0x3FF, I40E_GL_PRS_FRT_FV_IDX_0_SHIFT)
+#define I40E_GL_PRS_FRT_FV_IDX_1_SHIFT 16
+#define I40E_GL_PRS_FRT_FV_IDX_1_MASK I40E_MASK(0x3FF, I40E_GL_PRS_FRT_FV_IDX_1_SHIFT)
+
+#define I40E_GL_PRS_L2LEN 0x0026996C /* Reset: CORER */
+#define I40E_GL_PRS_L2LEN_MAC_LEN_SHIFT 8
+#define I40E_GL_PRS_L2LEN_MAC_LEN_MASK I40E_MASK(0xFF, I40E_GL_PRS_L2LEN_MAC_LEN_SHIFT)
+
+#define I40E_GL_PRS_PL_THR 0x00269FE4 /* Reset: CORER */
+#define I40E_GL_PRS_PL_THR_PIPE_LIMIT_P0_SHIFT 0
+#define I40E_GL_PRS_PL_THR_PIPE_LIMIT_P0_MASK I40E_MASK(0xFF, I40E_GL_PRS_PL_THR_PIPE_LIMIT_P0_SHIFT)
+#define I40E_GL_PRS_PL_THR_PIPE_LIMIT_P1_SHIFT 8
+#define I40E_GL_PRS_PL_THR_PIPE_LIMIT_P1_MASK I40E_MASK(0xFF, I40E_GL_PRS_PL_THR_PIPE_LIMIT_P1_SHIFT)
+#define I40E_GL_PRS_PL_THR_PIPE_LIMIT_P2_SHIFT 16
+#define I40E_GL_PRS_PL_THR_PIPE_LIMIT_P2_MASK I40E_MASK(0xFF, I40E_GL_PRS_PL_THR_PIPE_LIMIT_P2_SHIFT)
+#define I40E_GL_PRS_PL_THR_PIPE_LIMIT_P3_SHIFT 24
+#define I40E_GL_PRS_PL_THR_PIPE_LIMIT_P3_MASK I40E_MASK(0xFF, I40E_GL_PRS_PL_THR_PIPE_LIMIT_P3_SHIFT)
+
+#define I40E_GL_PRS_PM_PORT_THR 0x00269FC4 /* Reset: CORER */
+#define I40E_GL_PRS_PM_PORT_THR_THR_PORT_0_SHIFT 0
+#define I40E_GL_PRS_PM_PORT_THR_THR_PORT_0_MASK I40E_MASK(0xFF, I40E_GL_PRS_PM_PORT_THR_THR_PORT_0_SHIFT)
+#define I40E_GL_PRS_PM_PORT_THR_THR_PORT_1_SHIFT 8
+#define I40E_GL_PRS_PM_PORT_THR_THR_PORT_1_MASK I40E_MASK(0xFF, I40E_GL_PRS_PM_PORT_THR_THR_PORT_1_SHIFT)
+#define I40E_GL_PRS_PM_PORT_THR_THR_PORT_2_SHIFT 16
+#define I40E_GL_PRS_PM_PORT_THR_THR_PORT_2_MASK I40E_MASK(0xFF, I40E_GL_PRS_PM_PORT_THR_THR_PORT_2_SHIFT)
+#define I40E_GL_PRS_PM_PORT_THR_THR_PORT_3_SHIFT 24
+#define I40E_GL_PRS_PM_PORT_THR_THR_PORT_3_MASK I40E_MASK(0xFF, I40E_GL_PRS_PM_PORT_THR_THR_PORT_3_SHIFT)
+
+#define I40E_GL_PRS_PM_UP_THR 0x00269FCC /* Reset: CORER */
+#define I40E_GL_PRS_PM_UP_THR_UP_PORT_0_SHIFT 0
+#define I40E_GL_PRS_PM_UP_THR_UP_PORT_0_MASK I40E_MASK(0xFF, I40E_GL_PRS_PM_UP_THR_UP_PORT_0_SHIFT)
+#define I40E_GL_PRS_PM_UP_THR_UP_PORT_1_SHIFT 8
+#define I40E_GL_PRS_PM_UP_THR_UP_PORT_1_MASK I40E_MASK(0xFF, I40E_GL_PRS_PM_UP_THR_UP_PORT_1_SHIFT)
+#define I40E_GL_PRS_PM_UP_THR_UP_PORT_2_SHIFT 16
+#define I40E_GL_PRS_PM_UP_THR_UP_PORT_2_MASK I40E_MASK(0xFF, I40E_GL_PRS_PM_UP_THR_UP_PORT_2_SHIFT)
+#define I40E_GL_PRS_PM_UP_THR_UP_PORT_3_SHIFT 24
+#define I40E_GL_PRS_PM_UP_THR_UP_PORT_3_MASK I40E_MASK(0xFF, I40E_GL_PRS_PM_UP_THR_UP_PORT_3_SHIFT)
+
+#define I40E_GL_RXA_CFG 0x00269944 /* Reset: CORER */
+#define I40E_GL_RXA_CFG_UP_STRICT_PR_SHIFT 0
+#define I40E_GL_RXA_CFG_UP_STRICT_PR_MASK I40E_MASK(0xF, I40E_GL_RXA_CFG_UP_STRICT_PR_SHIFT)
+#define I40E_GL_RXA_CFG_PRT_STRICT_PR_SHIFT 4
+#define I40E_GL_RXA_CFG_PRT_STRICT_PR_MASK I40E_MASK(0x1, I40E_GL_RXA_CFG_PRT_STRICT_PR_SHIFT)
+#define I40E_GL_RXA_CFG_MIN_HDR_LEN_SHIFT 8
+#define I40E_GL_RXA_CFG_MIN_HDR_LEN_MASK I40E_MASK(0x7F, I40E_GL_RXA_CFG_MIN_HDR_LEN_SHIFT)
+#define I40E_GL_RXA_CFG_MAX_HDR_LEN_SHIFT 15
+#define I40E_GL_RXA_CFG_MAX_HDR_LEN_MASK I40E_MASK(0x7F, I40E_GL_RXA_CFG_MAX_HDR_LEN_SHIFT)
+
+#define I40E_GL_SWR_DP 0x00269998 /* Reset: CORER */
+#define I40E_GL_SWR_DP_DUAL_PORT_SHIFT 0
+#define I40E_GL_SWR_DP_DUAL_PORT_MASK I40E_MASK(0x1, I40E_GL_SWR_DP_DUAL_PORT_SHIFT)
+
+#define I40E_GL_SWR_MAC_AS_FLU_ID(_i) (0x00269BE8 + ((_i) * 4)) /* _i=0...1 */ /* Reset: CORER */
+#define I40E_GL_SWR_MAC_AS_FLU_ID_MAX_INDEX 1
+#define I40E_GL_SWR_MAC_AS_FLU_ID_FLU_INDEXES_SHIFT 0
+#define I40E_GL_SWR_MAC_AS_FLU_ID_FLU_INDEXES_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_SWR_MAC_AS_FLU_ID_FLU_INDEXES_SHIFT)
+
+#define I40E_GL_SWR_MIM_DBG_CTL 0x00269FE8 /* Reset: CORER */
+#define I40E_GL_SWR_MIM_DBG_CTL_ADDR_SHIFT 0
+#define I40E_GL_SWR_MIM_DBG_CTL_ADDR_MASK I40E_MASK(0x1FF, I40E_GL_SWR_MIM_DBG_CTL_ADDR_SHIFT)
+#define I40E_GL_SWR_MIM_DBG_CTL_DW_SEL_SHIFT 16
+#define I40E_GL_SWR_MIM_DBG_CTL_DW_SEL_MASK I40E_MASK(0x3F, I40E_GL_SWR_MIM_DBG_CTL_DW_SEL_SHIFT)
+#define I40E_GL_SWR_MIM_DBG_CTL_TARGET_SEL_SHIFT 24
+#define I40E_GL_SWR_MIM_DBG_CTL_TARGET_SEL_MASK I40E_MASK(0x7, I40E_GL_SWR_MIM_DBG_CTL_TARGET_SEL_SHIFT)
+#define I40E_GL_SWR_MIM_DBG_CTL_BLOCK_PRSR_SHIFT 31
+#define I40E_GL_SWR_MIM_DBG_CTL_BLOCK_PRSR_MASK I40E_MASK(0x1, I40E_GL_SWR_MIM_DBG_CTL_BLOCK_PRSR_SHIFT)
+
+#define I40E_GL_SWR_MIM_DBG_STS 0x00269FEC /* Reset: CORER */
+#define I40E_GL_SWR_MIM_DBG_STS_GL_SWR_MIM_DBG_STS_SHIFT 0
+#define I40E_GL_SWR_MIM_DBG_STS_GL_SWR_MIM_DBG_STS_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_SWR_MIM_DBG_STS_GL_SWR_MIM_DBG_STS_SHIFT)
+
+#define I40E_GL_SWR_PM_PORT_THR 0x00269FB4 /* Reset: CORER */
+#define I40E_GL_SWR_PM_PORT_THR_THR_PORT_0_SHIFT 0
+#define I40E_GL_SWR_PM_PORT_THR_THR_PORT_0_MASK I40E_MASK(0xFF, I40E_GL_SWR_PM_PORT_THR_THR_PORT_0_SHIFT)
+#define I40E_GL_SWR_PM_PORT_THR_THR_PORT_1_SHIFT 8
+#define I40E_GL_SWR_PM_PORT_THR_THR_PORT_1_MASK I40E_MASK(0xFF, I40E_GL_SWR_PM_PORT_THR_THR_PORT_1_SHIFT)
+#define I40E_GL_SWR_PM_PORT_THR_THR_PORT_2_SHIFT 16
+#define I40E_GL_SWR_PM_PORT_THR_THR_PORT_2_MASK I40E_MASK(0xFF, I40E_GL_SWR_PM_PORT_THR_THR_PORT_2_SHIFT)
+#define I40E_GL_SWR_PM_PORT_THR_THR_PORT_3_SHIFT 24
+#define I40E_GL_SWR_PM_PORT_THR_THR_PORT_3_MASK I40E_MASK(0xFF, I40E_GL_SWR_PM_PORT_THR_THR_PORT_3_SHIFT)
+
+#define I40E_GL_SWR_REP_FLU_CTL 0x0026995C /* Reset: CORER */
+#define I40E_GL_SWR_REP_FLU_CTL_FLU_MODE_SHIFT 0
+#define I40E_GL_SWR_REP_FLU_CTL_FLU_MODE_MASK I40E_MASK(0xF, I40E_GL_SWR_REP_FLU_CTL_FLU_MODE_SHIFT)
+#define I40E_GL_SWR_REP_FLU_CTL_FLU_OVTH_SHIFT 8
+#define I40E_GL_SWR_REP_FLU_CTL_FLU_OVTH_MASK I40E_MASK(0xFF, I40E_GL_SWR_REP_FLU_CTL_FLU_OVTH_SHIFT)
+
+#define I40E_GL_SWR_REP_MFIFO_CTL 0x00269994 /* Reset: CORER */
+#define I40E_GL_SWR_REP_MFIFO_CTL_UP_STRICT_PR_SHIFT 0
+#define I40E_GL_SWR_REP_MFIFO_CTL_UP_STRICT_PR_MASK I40E_MASK(0xF, I40E_GL_SWR_REP_MFIFO_CTL_UP_STRICT_PR_SHIFT)
+#define I40E_GL_SWR_REP_MFIFO_CTL_PRT_STRICT_PR_SHIFT 4
+#define I40E_GL_SWR_REP_MFIFO_CTL_PRT_STRICT_PR_MASK I40E_MASK(0x1, I40E_GL_SWR_REP_MFIFO_CTL_PRT_STRICT_PR_SHIFT)
+#define I40E_GL_SWR_REP_MFIFO_CTL_SPARE27B_SHIFT 5
+#define I40E_GL_SWR_REP_MFIFO_CTL_SPARE27B_MASK I40E_MASK(0x7FFFFFF, I40E_GL_SWR_REP_MFIFO_CTL_SPARE27B_SHIFT)
+
+#define I40E_GLCM_LAN_CACHE0_MEM_CFG 0x0010C48C /* Reset: POR */
+#define I40E_GLCM_LAN_CACHE0_MEM_CFG_ECC_EN_SHIFT 0
+#define I40E_GLCM_LAN_CACHE0_MEM_CFG_ECC_EN_MASK I40E_MASK(0x1, I40E_GLCM_LAN_CACHE0_MEM_CFG_ECC_EN_SHIFT)
+#define I40E_GLCM_LAN_CACHE0_MEM_CFG_ECC_INVERT_1_SHIFT 1
+#define I40E_GLCM_LAN_CACHE0_MEM_CFG_ECC_INVERT_1_MASK I40E_MASK(0x1, I40E_GLCM_LAN_CACHE0_MEM_CFG_ECC_INVERT_1_SHIFT)
+#define I40E_GLCM_LAN_CACHE0_MEM_CFG_ECC_INVERT_2_SHIFT 2
+#define I40E_GLCM_LAN_CACHE0_MEM_CFG_ECC_INVERT_2_MASK I40E_MASK(0x1, I40E_GLCM_LAN_CACHE0_MEM_CFG_ECC_INVERT_2_SHIFT)
+#define I40E_GLCM_LAN_CACHE0_MEM_CFG_LS_FORCE_SHIFT 3
+#define I40E_GLCM_LAN_CACHE0_MEM_CFG_LS_FORCE_MASK I40E_MASK(0x1, I40E_GLCM_LAN_CACHE0_MEM_CFG_LS_FORCE_SHIFT)
+#define I40E_GLCM_LAN_CACHE0_MEM_CFG_LS_BYPASS_SHIFT 4
+#define I40E_GLCM_LAN_CACHE0_MEM_CFG_LS_BYPASS_MASK I40E_MASK(0x1, I40E_GLCM_LAN_CACHE0_MEM_CFG_LS_BYPASS_SHIFT)
+#define I40E_GLCM_LAN_CACHE0_MEM_CFG_MASK_INT_SHIFT 5
+#define I40E_GLCM_LAN_CACHE0_MEM_CFG_MASK_INT_MASK I40E_MASK(0x1, I40E_GLCM_LAN_CACHE0_MEM_CFG_MASK_INT_SHIFT)
+#define I40E_GLCM_LAN_CACHE0_MEM_CFG_FIX_CNT_SHIFT 8
+#define I40E_GLCM_LAN_CACHE0_MEM_CFG_FIX_CNT_MASK I40E_MASK(0x1, I40E_GLCM_LAN_CACHE0_MEM_CFG_FIX_CNT_SHIFT)
+#define I40E_GLCM_LAN_CACHE0_MEM_CFG_ERR_CNT_SHIFT 9
+#define I40E_GLCM_LAN_CACHE0_MEM_CFG_ERR_CNT_MASK I40E_MASK(0x1, I40E_GLCM_LAN_CACHE0_MEM_CFG_ERR_CNT_SHIFT)
+#define I40E_GLCM_LAN_CACHE0_MEM_CFG_RME_A_SHIFT 12
+#define I40E_GLCM_LAN_CACHE0_MEM_CFG_RME_A_MASK I40E_MASK(0x1, I40E_GLCM_LAN_CACHE0_MEM_CFG_RME_A_SHIFT)
+#define I40E_GLCM_LAN_CACHE0_MEM_CFG_RME_B_SHIFT 13
+#define I40E_GLCM_LAN_CACHE0_MEM_CFG_RME_B_MASK I40E_MASK(0x1, I40E_GLCM_LAN_CACHE0_MEM_CFG_RME_B_SHIFT)
+#define I40E_GLCM_LAN_CACHE0_MEM_CFG_RM_A_SHIFT 16
+#define I40E_GLCM_LAN_CACHE0_MEM_CFG_RM_A_MASK I40E_MASK(0xF, I40E_GLCM_LAN_CACHE0_MEM_CFG_RM_A_SHIFT)
+#define I40E_GLCM_LAN_CACHE0_MEM_CFG_RM_B_SHIFT 20
+#define I40E_GLCM_LAN_CACHE0_MEM_CFG_RM_B_MASK I40E_MASK(0xF, I40E_GLCM_LAN_CACHE0_MEM_CFG_RM_B_SHIFT)
+
+#define I40E_GLCM_LAN_CACHE0_MEM_STATUS 0x0010C490 /* Reset: POR */
+#define I40E_GLCM_LAN_CACHE0_MEM_STATUS_ECC_ERR_SHIFT 0
+#define I40E_GLCM_LAN_CACHE0_MEM_STATUS_ECC_ERR_MASK I40E_MASK(0x1, I40E_GLCM_LAN_CACHE0_MEM_STATUS_ECC_ERR_SHIFT)
+#define I40E_GLCM_LAN_CACHE0_MEM_STATUS_ECC_FIX_SHIFT 1
+#define I40E_GLCM_LAN_CACHE0_MEM_STATUS_ECC_FIX_MASK I40E_MASK(0x1, I40E_GLCM_LAN_CACHE0_MEM_STATUS_ECC_FIX_SHIFT)
+#define I40E_GLCM_LAN_CACHE0_MEM_STATUS_INIT_DONE_SHIFT 2
+#define I40E_GLCM_LAN_CACHE0_MEM_STATUS_INIT_DONE_MASK I40E_MASK(0x1, I40E_GLCM_LAN_CACHE0_MEM_STATUS_INIT_DONE_SHIFT)
+#define I40E_GLCM_LAN_CACHE0_MEM_STATUS_GLOBAL_INIT_DONE_SHIFT 3
+#define I40E_GLCM_LAN_CACHE0_MEM_STATUS_GLOBAL_INIT_DONE_MASK I40E_MASK(0x1, I40E_GLCM_LAN_CACHE0_MEM_STATUS_GLOBAL_INIT_DONE_SHIFT)
+
+#define I40E_GLCM_LAN_CACHE1_MEM_CFG 0x0010C494 /* Reset: POR */
+#define I40E_GLCM_LAN_CACHE1_MEM_CFG_ECC_EN_SHIFT 0
+#define I40E_GLCM_LAN_CACHE1_MEM_CFG_ECC_EN_MASK I40E_MASK(0x1, I40E_GLCM_LAN_CACHE1_MEM_CFG_ECC_EN_SHIFT)
+#define I40E_GLCM_LAN_CACHE1_MEM_CFG_ECC_INVERT_1_SHIFT 1
+#define I40E_GLCM_LAN_CACHE1_MEM_CFG_ECC_INVERT_1_MASK I40E_MASK(0x1, I40E_GLCM_LAN_CACHE1_MEM_CFG_ECC_INVERT_1_SHIFT)
+#define I40E_GLCM_LAN_CACHE1_MEM_CFG_ECC_INVERT_2_SHIFT 2
+#define I40E_GLCM_LAN_CACHE1_MEM_CFG_ECC_INVERT_2_MASK I40E_MASK(0x1, I40E_GLCM_LAN_CACHE1_MEM_CFG_ECC_INVERT_2_SHIFT)
+#define I40E_GLCM_LAN_CACHE1_MEM_CFG_LS_FORCE_SHIFT 3
+#define I40E_GLCM_LAN_CACHE1_MEM_CFG_LS_FORCE_MASK I40E_MASK(0x1, I40E_GLCM_LAN_CACHE1_MEM_CFG_LS_FORCE_SHIFT)
+#define I40E_GLCM_LAN_CACHE1_MEM_CFG_LS_BYPASS_SHIFT 4
+#define I40E_GLCM_LAN_CACHE1_MEM_CFG_LS_BYPASS_MASK I40E_MASK(0x1, I40E_GLCM_LAN_CACHE1_MEM_CFG_LS_BYPASS_SHIFT)
+#define I40E_GLCM_LAN_CACHE1_MEM_CFG_MASK_INT_SHIFT 5
+#define I40E_GLCM_LAN_CACHE1_MEM_CFG_MASK_INT_MASK I40E_MASK(0x1, I40E_GLCM_LAN_CACHE1_MEM_CFG_MASK_INT_SHIFT)
+#define I40E_GLCM_LAN_CACHE1_MEM_CFG_FIX_CNT_SHIFT 8
+#define I40E_GLCM_LAN_CACHE1_MEM_CFG_FIX_CNT_MASK I40E_MASK(0x1, I40E_GLCM_LAN_CACHE1_MEM_CFG_FIX_CNT_SHIFT)
+#define I40E_GLCM_LAN_CACHE1_MEM_CFG_ERR_CNT_SHIFT 9
+#define I40E_GLCM_LAN_CACHE1_MEM_CFG_ERR_CNT_MASK I40E_MASK(0x1, I40E_GLCM_LAN_CACHE1_MEM_CFG_ERR_CNT_SHIFT)
+#define I40E_GLCM_LAN_CACHE1_MEM_CFG_RME_A_SHIFT 12
+#define I40E_GLCM_LAN_CACHE1_MEM_CFG_RME_A_MASK I40E_MASK(0x1, I40E_GLCM_LAN_CACHE1_MEM_CFG_RME_A_SHIFT)
+#define I40E_GLCM_LAN_CACHE1_MEM_CFG_RME_B_SHIFT 13
+#define I40E_GLCM_LAN_CACHE1_MEM_CFG_RME_B_MASK I40E_MASK(0x1, I40E_GLCM_LAN_CACHE1_MEM_CFG_RME_B_SHIFT)
+#define I40E_GLCM_LAN_CACHE1_MEM_CFG_RM_A_SHIFT 16
+#define I40E_GLCM_LAN_CACHE1_MEM_CFG_RM_A_MASK I40E_MASK(0xF, I40E_GLCM_LAN_CACHE1_MEM_CFG_RM_A_SHIFT)
+#define I40E_GLCM_LAN_CACHE1_MEM_CFG_RM_B_SHIFT 20
+#define I40E_GLCM_LAN_CACHE1_MEM_CFG_RM_B_MASK I40E_MASK(0xF, I40E_GLCM_LAN_CACHE1_MEM_CFG_RM_B_SHIFT)
+
+#define I40E_GLCM_LAN_CACHE1_MEM_STATUS 0x0010C498 /* Reset: POR */
+#define I40E_GLCM_LAN_CACHE1_MEM_STATUS_ECC_ERR_SHIFT 0
+#define I40E_GLCM_LAN_CACHE1_MEM_STATUS_ECC_ERR_MASK I40E_MASK(0x1, I40E_GLCM_LAN_CACHE1_MEM_STATUS_ECC_ERR_SHIFT)
+#define I40E_GLCM_LAN_CACHE1_MEM_STATUS_ECC_FIX_SHIFT 1
+#define I40E_GLCM_LAN_CACHE1_MEM_STATUS_ECC_FIX_MASK I40E_MASK(0x1, I40E_GLCM_LAN_CACHE1_MEM_STATUS_ECC_FIX_SHIFT)
+#define I40E_GLCM_LAN_CACHE1_MEM_STATUS_INIT_DONE_SHIFT 2
+#define I40E_GLCM_LAN_CACHE1_MEM_STATUS_INIT_DONE_MASK I40E_MASK(0x1, I40E_GLCM_LAN_CACHE1_MEM_STATUS_INIT_DONE_SHIFT)
+#define I40E_GLCM_LAN_CACHE1_MEM_STATUS_GLOBAL_INIT_DONE_SHIFT 3
+#define I40E_GLCM_LAN_CACHE1_MEM_STATUS_GLOBAL_INIT_DONE_MASK I40E_MASK(0x1, I40E_GLCM_LAN_CACHE1_MEM_STATUS_GLOBAL_INIT_DONE_SHIFT)
+
+#define I40E_GLCM_LAN_DBELL_MEM_CFG 0x0010C49C /* Reset: POR */
+#define I40E_GLCM_LAN_DBELL_MEM_CFG_ECC_EN_SHIFT 0
+#define I40E_GLCM_LAN_DBELL_MEM_CFG_ECC_EN_MASK I40E_MASK(0x1, I40E_GLCM_LAN_DBELL_MEM_CFG_ECC_EN_SHIFT)
+#define I40E_GLCM_LAN_DBELL_MEM_CFG_ECC_INVERT_1_SHIFT 1
+#define I40E_GLCM_LAN_DBELL_MEM_CFG_ECC_INVERT_1_MASK I40E_MASK(0x1, I40E_GLCM_LAN_DBELL_MEM_CFG_ECC_INVERT_1_SHIFT)
+#define I40E_GLCM_LAN_DBELL_MEM_CFG_ECC_INVERT_2_SHIFT 2
+#define I40E_GLCM_LAN_DBELL_MEM_CFG_ECC_INVERT_2_MASK I40E_MASK(0x1, I40E_GLCM_LAN_DBELL_MEM_CFG_ECC_INVERT_2_SHIFT)
+#define I40E_GLCM_LAN_DBELL_MEM_CFG_LS_FORCE_SHIFT 3
+#define I40E_GLCM_LAN_DBELL_MEM_CFG_LS_FORCE_MASK I40E_MASK(0x1, I40E_GLCM_LAN_DBELL_MEM_CFG_LS_FORCE_SHIFT)
+#define I40E_GLCM_LAN_DBELL_MEM_CFG_LS_BYPASS_SHIFT 4
+#define I40E_GLCM_LAN_DBELL_MEM_CFG_LS_BYPASS_MASK I40E_MASK(0x1, I40E_GLCM_LAN_DBELL_MEM_CFG_LS_BYPASS_SHIFT)
+#define I40E_GLCM_LAN_DBELL_MEM_CFG_MASK_INT_SHIFT 5
+#define I40E_GLCM_LAN_DBELL_MEM_CFG_MASK_INT_MASK I40E_MASK(0x1, I40E_GLCM_LAN_DBELL_MEM_CFG_MASK_INT_SHIFT)
+#define I40E_GLCM_LAN_DBELL_MEM_CFG_FIX_CNT_SHIFT 8
+#define I40E_GLCM_LAN_DBELL_MEM_CFG_FIX_CNT_MASK I40E_MASK(0x1, I40E_GLCM_LAN_DBELL_MEM_CFG_FIX_CNT_SHIFT)
+#define I40E_GLCM_LAN_DBELL_MEM_CFG_ERR_CNT_SHIFT 9
+#define I40E_GLCM_LAN_DBELL_MEM_CFG_ERR_CNT_MASK I40E_MASK(0x1, I40E_GLCM_LAN_DBELL_MEM_CFG_ERR_CNT_SHIFT)
+#define I40E_GLCM_LAN_DBELL_MEM_CFG_RME_A_SHIFT 12
+#define I40E_GLCM_LAN_DBELL_MEM_CFG_RME_A_MASK I40E_MASK(0x1, I40E_GLCM_LAN_DBELL_MEM_CFG_RME_A_SHIFT)
+#define I40E_GLCM_LAN_DBELL_MEM_CFG_RME_B_SHIFT 13
+#define I40E_GLCM_LAN_DBELL_MEM_CFG_RME_B_MASK I40E_MASK(0x1, I40E_GLCM_LAN_DBELL_MEM_CFG_RME_B_SHIFT)
+#define I40E_GLCM_LAN_DBELL_MEM_CFG_RM_A_SHIFT 16
+#define I40E_GLCM_LAN_DBELL_MEM_CFG_RM_A_MASK I40E_MASK(0xF, I40E_GLCM_LAN_DBELL_MEM_CFG_RM_A_SHIFT)
+#define I40E_GLCM_LAN_DBELL_MEM_CFG_RM_B_SHIFT 20
+#define I40E_GLCM_LAN_DBELL_MEM_CFG_RM_B_MASK I40E_MASK(0xF, I40E_GLCM_LAN_DBELL_MEM_CFG_RM_B_SHIFT)
+
+#define I40E_GLCM_LAN_DBELL_MEM_STATUS 0x0010C4A0 /* Reset: POR */
+#define I40E_GLCM_LAN_DBELL_MEM_STATUS_ECC_ERR_SHIFT 0
+#define I40E_GLCM_LAN_DBELL_MEM_STATUS_ECC_ERR_MASK I40E_MASK(0x1, I40E_GLCM_LAN_DBELL_MEM_STATUS_ECC_ERR_SHIFT)
+#define I40E_GLCM_LAN_DBELL_MEM_STATUS_ECC_FIX_SHIFT 1
+#define I40E_GLCM_LAN_DBELL_MEM_STATUS_ECC_FIX_MASK I40E_MASK(0x1, I40E_GLCM_LAN_DBELL_MEM_STATUS_ECC_FIX_SHIFT)
+#define I40E_GLCM_LAN_DBELL_MEM_STATUS_INIT_DONE_SHIFT 2
+#define I40E_GLCM_LAN_DBELL_MEM_STATUS_INIT_DONE_MASK I40E_MASK(0x1, I40E_GLCM_LAN_DBELL_MEM_STATUS_INIT_DONE_SHIFT)
+#define I40E_GLCM_LAN_DBELL_MEM_STATUS_GLOBAL_INIT_DONE_SHIFT 3
+#define I40E_GLCM_LAN_DBELL_MEM_STATUS_GLOBAL_INIT_DONE_MASK I40E_MASK(0x1, I40E_GLCM_LAN_DBELL_MEM_STATUS_GLOBAL_INIT_DONE_SHIFT)
+
+#define I40E_GLCM_LAN_ECC_COR_ERR 0x0010C4D0 /* Reset: POR */
+#define I40E_GLCM_LAN_ECC_COR_ERR_CNT_SHIFT 0
+#define I40E_GLCM_LAN_ECC_COR_ERR_CNT_MASK I40E_MASK(0xFFF, I40E_GLCM_LAN_ECC_COR_ERR_CNT_SHIFT)
+
+#define I40E_GLCM_LAN_ECC_UNCOR_ERR 0x0010C4CC /* Reset: POR */
+#define I40E_GLCM_LAN_ECC_UNCOR_ERR_CNT_SHIFT 0
+#define I40E_GLCM_LAN_ECC_UNCOR_ERR_CNT_MASK I40E_MASK(0xFFF, I40E_GLCM_LAN_ECC_UNCOR_ERR_CNT_SHIFT)
+
+#define I40E_GLCM_LAN_EVICTBUF_MEM_CFG 0x0010C4A4 /* Reset: POR */
+#define I40E_GLCM_LAN_EVICTBUF_MEM_CFG_ECC_EN_SHIFT 0
+#define I40E_GLCM_LAN_EVICTBUF_MEM_CFG_ECC_EN_MASK I40E_MASK(0x1, I40E_GLCM_LAN_EVICTBUF_MEM_CFG_ECC_EN_SHIFT)
+#define I40E_GLCM_LAN_EVICTBUF_MEM_CFG_ECC_INVERT_1_SHIFT 1
+#define I40E_GLCM_LAN_EVICTBUF_MEM_CFG_ECC_INVERT_1_MASK I40E_MASK(0x1, I40E_GLCM_LAN_EVICTBUF_MEM_CFG_ECC_INVERT_1_SHIFT)
+#define I40E_GLCM_LAN_EVICTBUF_MEM_CFG_ECC_INVERT_2_SHIFT 2
+#define I40E_GLCM_LAN_EVICTBUF_MEM_CFG_ECC_INVERT_2_MASK I40E_MASK(0x1, I40E_GLCM_LAN_EVICTBUF_MEM_CFG_ECC_INVERT_2_SHIFT)
+#define I40E_GLCM_LAN_EVICTBUF_MEM_CFG_LS_FORCE_SHIFT 3
+#define I40E_GLCM_LAN_EVICTBUF_MEM_CFG_LS_FORCE_MASK I40E_MASK(0x1, I40E_GLCM_LAN_EVICTBUF_MEM_CFG_LS_FORCE_SHIFT)
+#define I40E_GLCM_LAN_EVICTBUF_MEM_CFG_LS_BYPASS_SHIFT 4
+#define I40E_GLCM_LAN_EVICTBUF_MEM_CFG_LS_BYPASS_MASK I40E_MASK(0x1, I40E_GLCM_LAN_EVICTBUF_MEM_CFG_LS_BYPASS_SHIFT)
+#define I40E_GLCM_LAN_EVICTBUF_MEM_CFG_MASK_INT_SHIFT 5
+#define I40E_GLCM_LAN_EVICTBUF_MEM_CFG_MASK_INT_MASK I40E_MASK(0x1, I40E_GLCM_LAN_EVICTBUF_MEM_CFG_MASK_INT_SHIFT)
+#define I40E_GLCM_LAN_EVICTBUF_MEM_CFG_FIX_CNT_SHIFT 8
+#define I40E_GLCM_LAN_EVICTBUF_MEM_CFG_FIX_CNT_MASK I40E_MASK(0x1, I40E_GLCM_LAN_EVICTBUF_MEM_CFG_FIX_CNT_SHIFT)
+#define I40E_GLCM_LAN_EVICTBUF_MEM_CFG_ERR_CNT_SHIFT 9
+#define I40E_GLCM_LAN_EVICTBUF_MEM_CFG_ERR_CNT_MASK I40E_MASK(0x1, I40E_GLCM_LAN_EVICTBUF_MEM_CFG_ERR_CNT_SHIFT)
+#define I40E_GLCM_LAN_EVICTBUF_MEM_CFG_RME_SHIFT 12
+#define I40E_GLCM_LAN_EVICTBUF_MEM_CFG_RME_MASK I40E_MASK(0x1, I40E_GLCM_LAN_EVICTBUF_MEM_CFG_RME_SHIFT)
+#define I40E_GLCM_LAN_EVICTBUF_MEM_CFG_RM_SHIFT 16
+#define I40E_GLCM_LAN_EVICTBUF_MEM_CFG_RM_MASK I40E_MASK(0xF, I40E_GLCM_LAN_EVICTBUF_MEM_CFG_RM_SHIFT)
+
+#define I40E_GLCM_LAN_EVICTBUF_MEM_STATUS 0x0010C4A8 /* Reset: POR */
+#define I40E_GLCM_LAN_EVICTBUF_MEM_STATUS_ECC_ERR_SHIFT 0
+#define I40E_GLCM_LAN_EVICTBUF_MEM_STATUS_ECC_ERR_MASK I40E_MASK(0x1, I40E_GLCM_LAN_EVICTBUF_MEM_STATUS_ECC_ERR_SHIFT)
+#define I40E_GLCM_LAN_EVICTBUF_MEM_STATUS_ECC_FIX_SHIFT 1
+#define I40E_GLCM_LAN_EVICTBUF_MEM_STATUS_ECC_FIX_MASK I40E_MASK(0x1, I40E_GLCM_LAN_EVICTBUF_MEM_STATUS_ECC_FIX_SHIFT)
+#define I40E_GLCM_LAN_EVICTBUF_MEM_STATUS_INIT_DONE_SHIFT 2
+#define I40E_GLCM_LAN_EVICTBUF_MEM_STATUS_INIT_DONE_MASK I40E_MASK(0x1, I40E_GLCM_LAN_EVICTBUF_MEM_STATUS_INIT_DONE_SHIFT)
+#define I40E_GLCM_LAN_EVICTBUF_MEM_STATUS_GLOBAL_INIT_DONE_SHIFT 3
+#define I40E_GLCM_LAN_EVICTBUF_MEM_STATUS_GLOBAL_INIT_DONE_MASK I40E_MASK(0x1, I40E_GLCM_LAN_EVICTBUF_MEM_STATUS_GLOBAL_INIT_DONE_SHIFT)
+
+#define I40E_GLCM_LAN_FILLBUF_MEM_CFG 0x0010C4AC /* Reset: POR */
+#define I40E_GLCM_LAN_FILLBUF_MEM_CFG_ECC_EN_SHIFT 0
+#define I40E_GLCM_LAN_FILLBUF_MEM_CFG_ECC_EN_MASK I40E_MASK(0x1, I40E_GLCM_LAN_FILLBUF_MEM_CFG_ECC_EN_SHIFT)
+#define I40E_GLCM_LAN_FILLBUF_MEM_CFG_ECC_INVERT_1_SHIFT 1
+#define I40E_GLCM_LAN_FILLBUF_MEM_CFG_ECC_INVERT_1_MASK I40E_MASK(0x1, I40E_GLCM_LAN_FILLBUF_MEM_CFG_ECC_INVERT_1_SHIFT)
+#define I40E_GLCM_LAN_FILLBUF_MEM_CFG_ECC_INVERT_2_SHIFT 2
+#define I40E_GLCM_LAN_FILLBUF_MEM_CFG_ECC_INVERT_2_MASK I40E_MASK(0x1, I40E_GLCM_LAN_FILLBUF_MEM_CFG_ECC_INVERT_2_SHIFT)
+#define I40E_GLCM_LAN_FILLBUF_MEM_CFG_LS_FORCE_SHIFT 3
+#define I40E_GLCM_LAN_FILLBUF_MEM_CFG_LS_FORCE_MASK I40E_MASK(0x1, I40E_GLCM_LAN_FILLBUF_MEM_CFG_LS_FORCE_SHIFT)
+#define I40E_GLCM_LAN_FILLBUF_MEM_CFG_LS_BYPASS_SHIFT 4
+#define I40E_GLCM_LAN_FILLBUF_MEM_CFG_LS_BYPASS_MASK I40E_MASK(0x1, I40E_GLCM_LAN_FILLBUF_MEM_CFG_LS_BYPASS_SHIFT)
+#define I40E_GLCM_LAN_FILLBUF_MEM_CFG_MASK_INT_SHIFT 5
+#define I40E_GLCM_LAN_FILLBUF_MEM_CFG_MASK_INT_MASK I40E_MASK(0x1, I40E_GLCM_LAN_FILLBUF_MEM_CFG_MASK_INT_SHIFT)
+#define I40E_GLCM_LAN_FILLBUF_MEM_CFG_FIX_CNT_SHIFT 8
+#define I40E_GLCM_LAN_FILLBUF_MEM_CFG_FIX_CNT_MASK I40E_MASK(0x1, I40E_GLCM_LAN_FILLBUF_MEM_CFG_FIX_CNT_SHIFT)
+#define I40E_GLCM_LAN_FILLBUF_MEM_CFG_ERR_CNT_SHIFT 9
+#define I40E_GLCM_LAN_FILLBUF_MEM_CFG_ERR_CNT_MASK I40E_MASK(0x1, I40E_GLCM_LAN_FILLBUF_MEM_CFG_ERR_CNT_SHIFT)
+#define I40E_GLCM_LAN_FILLBUF_MEM_CFG_RME_SHIFT 12
+#define I40E_GLCM_LAN_FILLBUF_MEM_CFG_RME_MASK I40E_MASK(0x1, I40E_GLCM_LAN_FILLBUF_MEM_CFG_RME_SHIFT)
+#define I40E_GLCM_LAN_FILLBUF_MEM_CFG_RM_SHIFT 16
+#define I40E_GLCM_LAN_FILLBUF_MEM_CFG_RM_MASK I40E_MASK(0xF, I40E_GLCM_LAN_FILLBUF_MEM_CFG_RM_SHIFT)
+
+#define I40E_GLCM_LAN_FILLBUF_MEM_STATUS 0x0010C4B0 /* Reset: POR */
+#define I40E_GLCM_LAN_FILLBUF_MEM_STATUS_ECC_ERR_SHIFT 0
+#define I40E_GLCM_LAN_FILLBUF_MEM_STATUS_ECC_ERR_MASK I40E_MASK(0x1, I40E_GLCM_LAN_FILLBUF_MEM_STATUS_ECC_ERR_SHIFT)
+#define I40E_GLCM_LAN_FILLBUF_MEM_STATUS_ECC_FIX_SHIFT 1
+#define I40E_GLCM_LAN_FILLBUF_MEM_STATUS_ECC_FIX_MASK I40E_MASK(0x1, I40E_GLCM_LAN_FILLBUF_MEM_STATUS_ECC_FIX_SHIFT)
+#define I40E_GLCM_LAN_FILLBUF_MEM_STATUS_INIT_DONE_SHIFT 2
+#define I40E_GLCM_LAN_FILLBUF_MEM_STATUS_INIT_DONE_MASK I40E_MASK(0x1, I40E_GLCM_LAN_FILLBUF_MEM_STATUS_INIT_DONE_SHIFT)
+#define I40E_GLCM_LAN_FILLBUF_MEM_STATUS_GLOBAL_INIT_DONE_SHIFT 3
+#define I40E_GLCM_LAN_FILLBUF_MEM_STATUS_GLOBAL_INIT_DONE_MASK I40E_MASK(0x1, I40E_GLCM_LAN_FILLBUF_MEM_STATUS_GLOBAL_INIT_DONE_SHIFT)
+
+#define I40E_GLCM_LAN_QTXCTL_MEM_CFG 0x0010C4BC /* Reset: POR */
+#define I40E_GLCM_LAN_QTXCTL_MEM_CFG_ECC_EN_SHIFT 0
+#define I40E_GLCM_LAN_QTXCTL_MEM_CFG_ECC_EN_MASK I40E_MASK(0x1, I40E_GLCM_LAN_QTXCTL_MEM_CFG_ECC_EN_SHIFT)
+#define I40E_GLCM_LAN_QTXCTL_MEM_CFG_ECC_INVERT_1_SHIFT 1
+#define I40E_GLCM_LAN_QTXCTL_MEM_CFG_ECC_INVERT_1_MASK I40E_MASK(0x1, I40E_GLCM_LAN_QTXCTL_MEM_CFG_ECC_INVERT_1_SHIFT)
+#define I40E_GLCM_LAN_QTXCTL_MEM_CFG_ECC_INVERT_2_SHIFT 2
+#define I40E_GLCM_LAN_QTXCTL_MEM_CFG_ECC_INVERT_2_MASK I40E_MASK(0x1, I40E_GLCM_LAN_QTXCTL_MEM_CFG_ECC_INVERT_2_SHIFT)
+#define I40E_GLCM_LAN_QTXCTL_MEM_CFG_LS_FORCE_SHIFT 3
+#define I40E_GLCM_LAN_QTXCTL_MEM_CFG_LS_FORCE_MASK I40E_MASK(0x1, I40E_GLCM_LAN_QTXCTL_MEM_CFG_LS_FORCE_SHIFT)
+#define I40E_GLCM_LAN_QTXCTL_MEM_CFG_LS_BYPASS_SHIFT 4
+#define I40E_GLCM_LAN_QTXCTL_MEM_CFG_LS_BYPASS_MASK I40E_MASK(0x1, I40E_GLCM_LAN_QTXCTL_MEM_CFG_LS_BYPASS_SHIFT)
+#define I40E_GLCM_LAN_QTXCTL_MEM_CFG_MASK_INT_SHIFT 5
+#define I40E_GLCM_LAN_QTXCTL_MEM_CFG_MASK_INT_MASK I40E_MASK(0x1, I40E_GLCM_LAN_QTXCTL_MEM_CFG_MASK_INT_SHIFT)
+#define I40E_GLCM_LAN_QTXCTL_MEM_CFG_FIX_CNT_SHIFT 8
+#define I40E_GLCM_LAN_QTXCTL_MEM_CFG_FIX_CNT_MASK I40E_MASK(0x1, I40E_GLCM_LAN_QTXCTL_MEM_CFG_FIX_CNT_SHIFT)
+#define I40E_GLCM_LAN_QTXCTL_MEM_CFG_ERR_CNT_SHIFT 9
+#define I40E_GLCM_LAN_QTXCTL_MEM_CFG_ERR_CNT_MASK I40E_MASK(0x1, I40E_GLCM_LAN_QTXCTL_MEM_CFG_ERR_CNT_SHIFT)
+#define I40E_GLCM_LAN_QTXCTL_MEM_CFG_RME_SHIFT 12
+#define I40E_GLCM_LAN_QTXCTL_MEM_CFG_RME_MASK I40E_MASK(0x1, I40E_GLCM_LAN_QTXCTL_MEM_CFG_RME_SHIFT)
+#define I40E_GLCM_LAN_QTXCTL_MEM_CFG_RM_SHIFT 16
+#define I40E_GLCM_LAN_QTXCTL_MEM_CFG_RM_MASK I40E_MASK(0xF, I40E_GLCM_LAN_QTXCTL_MEM_CFG_RM_SHIFT)
+
+#define I40E_GLCM_LAN_QTXCTL_MEM_STATUS 0x0010C4C0 /* Reset: POR */
+#define I40E_GLCM_LAN_QTXCTL_MEM_STATUS_ECC_ERR_SHIFT 0
+#define I40E_GLCM_LAN_QTXCTL_MEM_STATUS_ECC_ERR_MASK I40E_MASK(0x1, I40E_GLCM_LAN_QTXCTL_MEM_STATUS_ECC_ERR_SHIFT)
+#define I40E_GLCM_LAN_QTXCTL_MEM_STATUS_ECC_FIX_SHIFT 1
+#define I40E_GLCM_LAN_QTXCTL_MEM_STATUS_ECC_FIX_MASK I40E_MASK(0x1, I40E_GLCM_LAN_QTXCTL_MEM_STATUS_ECC_FIX_SHIFT)
+#define I40E_GLCM_LAN_QTXCTL_MEM_STATUS_INIT_DONE_SHIFT 2
+#define I40E_GLCM_LAN_QTXCTL_MEM_STATUS_INIT_DONE_MASK I40E_MASK(0x1, I40E_GLCM_LAN_QTXCTL_MEM_STATUS_INIT_DONE_SHIFT)
+#define I40E_GLCM_LAN_QTXCTL_MEM_STATUS_GLOBAL_INIT_DONE_SHIFT 3
+#define I40E_GLCM_LAN_QTXCTL_MEM_STATUS_GLOBAL_INIT_DONE_MASK I40E_MASK(0x1, I40E_GLCM_LAN_QTXCTL_MEM_STATUS_GLOBAL_INIT_DONE_SHIFT)
+
+#define I40E_GLCM_LAN_RDYLIST_MEM_CFG 0x0010C4B4 /* Reset: POR */
+#define I40E_GLCM_LAN_RDYLIST_MEM_CFG_ECC_EN_SHIFT 0
+#define I40E_GLCM_LAN_RDYLIST_MEM_CFG_ECC_EN_MASK I40E_MASK(0x1, I40E_GLCM_LAN_RDYLIST_MEM_CFG_ECC_EN_SHIFT)
+#define I40E_GLCM_LAN_RDYLIST_MEM_CFG_ECC_INVERT_1_SHIFT 1
+#define I40E_GLCM_LAN_RDYLIST_MEM_CFG_ECC_INVERT_1_MASK I40E_MASK(0x1, I40E_GLCM_LAN_RDYLIST_MEM_CFG_ECC_INVERT_1_SHIFT)
+#define I40E_GLCM_LAN_RDYLIST_MEM_CFG_ECC_INVERT_2_SHIFT 2
+#define I40E_GLCM_LAN_RDYLIST_MEM_CFG_ECC_INVERT_2_MASK I40E_MASK(0x1, I40E_GLCM_LAN_RDYLIST_MEM_CFG_ECC_INVERT_2_SHIFT)
+#define I40E_GLCM_LAN_RDYLIST_MEM_CFG_LS_FORCE_SHIFT 3
+#define I40E_GLCM_LAN_RDYLIST_MEM_CFG_LS_FORCE_MASK I40E_MASK(0x1, I40E_GLCM_LAN_RDYLIST_MEM_CFG_LS_FORCE_SHIFT)
+#define I40E_GLCM_LAN_RDYLIST_MEM_CFG_LS_BYPASS_SHIFT 4
+#define I40E_GLCM_LAN_RDYLIST_MEM_CFG_LS_BYPASS_MASK I40E_MASK(0x1, I40E_GLCM_LAN_RDYLIST_MEM_CFG_LS_BYPASS_SHIFT)
+#define I40E_GLCM_LAN_RDYLIST_MEM_CFG_MASK_INT_SHIFT 5
+#define I40E_GLCM_LAN_RDYLIST_MEM_CFG_MASK_INT_MASK I40E_MASK(0x1, I40E_GLCM_LAN_RDYLIST_MEM_CFG_MASK_INT_SHIFT)
+#define I40E_GLCM_LAN_RDYLIST_MEM_CFG_FIX_CNT_SHIFT 8
+#define I40E_GLCM_LAN_RDYLIST_MEM_CFG_FIX_CNT_MASK I40E_MASK(0x1, I40E_GLCM_LAN_RDYLIST_MEM_CFG_FIX_CNT_SHIFT)
+#define I40E_GLCM_LAN_RDYLIST_MEM_CFG_ERR_CNT_SHIFT 9
+#define I40E_GLCM_LAN_RDYLIST_MEM_CFG_ERR_CNT_MASK I40E_MASK(0x1, I40E_GLCM_LAN_RDYLIST_MEM_CFG_ERR_CNT_SHIFT)
+#define I40E_GLCM_LAN_RDYLIST_MEM_CFG_RME_SHIFT 12
+#define I40E_GLCM_LAN_RDYLIST_MEM_CFG_RME_MASK I40E_MASK(0x1, I40E_GLCM_LAN_RDYLIST_MEM_CFG_RME_SHIFT)
+#define I40E_GLCM_LAN_RDYLIST_MEM_CFG_RM_SHIFT 16
+#define I40E_GLCM_LAN_RDYLIST_MEM_CFG_RM_MASK I40E_MASK(0xF, I40E_GLCM_LAN_RDYLIST_MEM_CFG_RM_SHIFT)
+
+#define I40E_GLCM_LAN_RDYLIST_MEM_STATUS 0x0010C4B8 /* Reset: POR */
+#define I40E_GLCM_LAN_RDYLIST_MEM_STATUS_ECC_ERR_SHIFT 0
+#define I40E_GLCM_LAN_RDYLIST_MEM_STATUS_ECC_ERR_MASK I40E_MASK(0x1, I40E_GLCM_LAN_RDYLIST_MEM_STATUS_ECC_ERR_SHIFT)
+#define I40E_GLCM_LAN_RDYLIST_MEM_STATUS_ECC_FIX_SHIFT 1
+#define I40E_GLCM_LAN_RDYLIST_MEM_STATUS_ECC_FIX_MASK I40E_MASK(0x1, I40E_GLCM_LAN_RDYLIST_MEM_STATUS_ECC_FIX_SHIFT)
+#define I40E_GLCM_LAN_RDYLIST_MEM_STATUS_INIT_DONE_SHIFT 2
+#define I40E_GLCM_LAN_RDYLIST_MEM_STATUS_INIT_DONE_MASK I40E_MASK(0x1, I40E_GLCM_LAN_RDYLIST_MEM_STATUS_INIT_DONE_SHIFT)
+#define I40E_GLCM_LAN_RDYLIST_MEM_STATUS_GLOBAL_INIT_DONE_SHIFT 3
+#define I40E_GLCM_LAN_RDYLIST_MEM_STATUS_GLOBAL_INIT_DONE_MASK I40E_MASK(0x1, I40E_GLCM_LAN_RDYLIST_MEM_STATUS_GLOBAL_INIT_DONE_SHIFT)
+
+#define I40E_GLCM_LAN_TAILPTR_MEM_CFG 0x0010C4C4 /* Reset: POR */
+#define I40E_GLCM_LAN_TAILPTR_MEM_CFG_ECC_EN_SHIFT 0
+#define I40E_GLCM_LAN_TAILPTR_MEM_CFG_ECC_EN_MASK I40E_MASK(0x1, I40E_GLCM_LAN_TAILPTR_MEM_CFG_ECC_EN_SHIFT)
+#define I40E_GLCM_LAN_TAILPTR_MEM_CFG_ECC_INVERT_1_SHIFT 1
+#define I40E_GLCM_LAN_TAILPTR_MEM_CFG_ECC_INVERT_1_MASK I40E_MASK(0x1, I40E_GLCM_LAN_TAILPTR_MEM_CFG_ECC_INVERT_1_SHIFT)
+#define I40E_GLCM_LAN_TAILPTR_MEM_CFG_ECC_INVERT_2_SHIFT 2
+#define I40E_GLCM_LAN_TAILPTR_MEM_CFG_ECC_INVERT_2_MASK I40E_MASK(0x1, I40E_GLCM_LAN_TAILPTR_MEM_CFG_ECC_INVERT_2_SHIFT)
+#define I40E_GLCM_LAN_TAILPTR_MEM_CFG_LS_FORCE_SHIFT 3
+#define I40E_GLCM_LAN_TAILPTR_MEM_CFG_LS_FORCE_MASK I40E_MASK(0x1, I40E_GLCM_LAN_TAILPTR_MEM_CFG_LS_FORCE_SHIFT)
+#define I40E_GLCM_LAN_TAILPTR_MEM_CFG_LS_BYPASS_SHIFT 4
+#define I40E_GLCM_LAN_TAILPTR_MEM_CFG_LS_BYPASS_MASK I40E_MASK(0x1, I40E_GLCM_LAN_TAILPTR_MEM_CFG_LS_BYPASS_SHIFT)
+#define I40E_GLCM_LAN_TAILPTR_MEM_CFG_MASK_INT_SHIFT 5
+#define I40E_GLCM_LAN_TAILPTR_MEM_CFG_MASK_INT_MASK I40E_MASK(0x1, I40E_GLCM_LAN_TAILPTR_MEM_CFG_MASK_INT_SHIFT)
+#define I40E_GLCM_LAN_TAILPTR_MEM_CFG_FIX_CNT_SHIFT 8
+#define I40E_GLCM_LAN_TAILPTR_MEM_CFG_FIX_CNT_MASK I40E_MASK(0x1, I40E_GLCM_LAN_TAILPTR_MEM_CFG_FIX_CNT_SHIFT)
+#define I40E_GLCM_LAN_TAILPTR_MEM_CFG_ERR_CNT_SHIFT 9
+#define I40E_GLCM_LAN_TAILPTR_MEM_CFG_ERR_CNT_MASK I40E_MASK(0x1, I40E_GLCM_LAN_TAILPTR_MEM_CFG_ERR_CNT_SHIFT)
+#define I40E_GLCM_LAN_TAILPTR_MEM_CFG_RME_A_SHIFT 12
+#define I40E_GLCM_LAN_TAILPTR_MEM_CFG_RME_A_MASK I40E_MASK(0x1, I40E_GLCM_LAN_TAILPTR_MEM_CFG_RME_A_SHIFT)
+#define I40E_GLCM_LAN_TAILPTR_MEM_CFG_RME_B_SHIFT 13
+#define I40E_GLCM_LAN_TAILPTR_MEM_CFG_RME_B_MASK I40E_MASK(0x1, I40E_GLCM_LAN_TAILPTR_MEM_CFG_RME_B_SHIFT)
+#define I40E_GLCM_LAN_TAILPTR_MEM_CFG_RM_A_SHIFT 16
+#define I40E_GLCM_LAN_TAILPTR_MEM_CFG_RM_A_MASK I40E_MASK(0xF, I40E_GLCM_LAN_TAILPTR_MEM_CFG_RM_A_SHIFT)
+#define I40E_GLCM_LAN_TAILPTR_MEM_CFG_RM_B_SHIFT 20
+#define I40E_GLCM_LAN_TAILPTR_MEM_CFG_RM_B_MASK I40E_MASK(0xF, I40E_GLCM_LAN_TAILPTR_MEM_CFG_RM_B_SHIFT)
+
+#define I40E_GLCM_LAN_TAILPTR_MEM_STATUS 0x0010C4C8 /* Reset: POR */
+#define I40E_GLCM_LAN_TAILPTR_MEM_STATUS_ECC_ERR_SHIFT 0
+#define I40E_GLCM_LAN_TAILPTR_MEM_STATUS_ECC_ERR_MASK I40E_MASK(0x1, I40E_GLCM_LAN_TAILPTR_MEM_STATUS_ECC_ERR_SHIFT)
+#define I40E_GLCM_LAN_TAILPTR_MEM_STATUS_ECC_FIX_SHIFT 1
+#define I40E_GLCM_LAN_TAILPTR_MEM_STATUS_ECC_FIX_MASK I40E_MASK(0x1, I40E_GLCM_LAN_TAILPTR_MEM_STATUS_ECC_FIX_SHIFT)
+#define I40E_GLCM_LAN_TAILPTR_MEM_STATUS_INIT_DONE_SHIFT 2
+#define I40E_GLCM_LAN_TAILPTR_MEM_STATUS_INIT_DONE_MASK I40E_MASK(0x1, I40E_GLCM_LAN_TAILPTR_MEM_STATUS_INIT_DONE_SHIFT)
+#define I40E_GLCM_LAN_TAILPTR_MEM_STATUS_GLOBAL_INIT_DONE_SHIFT 3
+#define I40E_GLCM_LAN_TAILPTR_MEM_STATUS_GLOBAL_INIT_DONE_MASK I40E_MASK(0x1, I40E_GLCM_LAN_TAILPTR_MEM_STATUS_GLOBAL_INIT_DONE_SHIFT)
+
+#define I40E_GLDFT_NCSI_PADS_CFG 0x0009408C /* Reset: POR */
+#define I40E_GLDFT_NCSI_PADS_CFG_GLNCSI_PADS_CFG_SHIFT 0
+#define I40E_GLDFT_NCSI_PADS_CFG_GLNCSI_PADS_CFG_MASK I40E_MASK(0x1, I40E_GLDFT_NCSI_PADS_CFG_GLNCSI_PADS_CFG_SHIFT)
+
+#define I40E_GLDFT_TS_STAT 0x00094080 /* Reset: POR */
+#define I40E_GLDFT_TS_STAT_SBL_THERM_IND_SHIFT 0
+#define I40E_GLDFT_TS_STAT_SBL_THERM_IND_MASK I40E_MASK(0x7, I40E_GLDFT_TS_STAT_SBL_THERM_IND_SHIFT)
+#define I40E_GLDFT_TS_STAT_SBT_THERM_VAL_SHIFT 3
+#define I40E_GLDFT_TS_STAT_SBT_THERM_VAL_MASK I40E_MASK(0x1FF, I40E_GLDFT_TS_STAT_SBT_THERM_VAL_SHIFT)
+#define I40E_GLDFT_TS_STAT_SBT_THERM_VALID_SHIFT 31
+#define I40E_GLDFT_TS_STAT_SBT_THERM_VALID_MASK I40E_MASK(0x1, I40E_GLDFT_TS_STAT_SBT_THERM_VALID_SHIFT)
+
+#define I40E_GLDFT_VISA_CTRL 0x00094084 /* Reset: POR */
+#define I40E_GLDFT_VISA_CTRL_VISA_INDEX_SHIFT 0
+#define I40E_GLDFT_VISA_CTRL_VISA_INDEX_MASK I40E_MASK(0x1F, I40E_GLDFT_VISA_CTRL_VISA_INDEX_SHIFT)
+#define I40E_GLDFT_VISA_CTRL_VISA_UNIT_ID_SHIFT 5
+#define I40E_GLDFT_VISA_CTRL_VISA_UNIT_ID_MASK I40E_MASK(0x1FF, I40E_GLDFT_VISA_CTRL_VISA_UNIT_ID_SHIFT)
+#define I40E_GLDFT_VISA_CTRL_VISA_OPCODE_SHIFT 31
+#define I40E_GLDFT_VISA_CTRL_VISA_OPCODE_MASK I40E_MASK(0x1, I40E_GLDFT_VISA_CTRL_VISA_OPCODE_SHIFT)
+
+#define I40E_GLDFT_VISA_DATA 0x00094088 /* Reset: POR */
+#define I40E_GLDFT_VISA_DATA_GLDFT_VISA_DATA_SHIFT 0
+#define I40E_GLDFT_VISA_DATA_GLDFT_VISA_DATA_MASK I40E_MASK(0xFFFFFFFF, I40E_GLDFT_VISA_DATA_GLDFT_VISA_DATA_SHIFT)
+
+#define I40E_GLDFT_VISA_DISABLE 0x00094098 /* Reset: POR */
+#define I40E_GLDFT_VISA_DISABLE_VISA_CUSTOMER_DISABLE_SHIFT 0
+#define I40E_GLDFT_VISA_DISABLE_VISA_CUSTOMER_DISABLE_MASK I40E_MASK(0x1, I40E_GLDFT_VISA_DISABLE_VISA_CUSTOMER_DISABLE_SHIFT)
+#define I40E_GLDFT_VISA_DISABLE_VISA_ALL_DISABLE_SHIFT 1
+#define I40E_GLDFT_VISA_DISABLE_VISA_ALL_DISABLE_MASK I40E_MASK(0x1, I40E_GLDFT_VISA_DISABLE_VISA_ALL_DISABLE_SHIFT)
+
+#define I40E_GLDFT_VISA_LANE_LSB 0x00094090 /* Reset: POR */
+#define I40E_GLDFT_VISA_LANE_LSB_GLDFT_VISA_LANE_LSB_SHIFT 0
+#define I40E_GLDFT_VISA_LANE_LSB_GLDFT_VISA_LANE_LSB_MASK I40E_MASK(0xFFFFFFFF, I40E_GLDFT_VISA_LANE_LSB_GLDFT_VISA_LANE_LSB_SHIFT)
+
+#define I40E_GLDFT_VISA_LANE_MSB 0x00094094 /* Reset: POR */
+#define I40E_GLDFT_VISA_LANE_MSB_GLDFT_VISA_LANE_MSB_SHIFT 0
+#define I40E_GLDFT_VISA_LANE_MSB_GLDFT_VISA_LANE_MSB_MASK I40E_MASK(0xFFFFFFFF, I40E_GLDFT_VISA_LANE_MSB_GLDFT_VISA_LANE_MSB_SHIFT)
+
+#define I40E_GLLAN_TCB_STAT 0x000AE0D0 /* Reset: CORER */
+#define I40E_GLLAN_TCB_STAT_LL_STAT_SHIFT 0
+#define I40E_GLLAN_TCB_STAT_LL_STAT_MASK I40E_MASK(0xFFFF, I40E_GLLAN_TCB_STAT_LL_STAT_SHIFT)
+#define I40E_GLLAN_TCB_STAT_RSV_SHIFT 16
+#define I40E_GLLAN_TCB_STAT_RSV_MASK I40E_MASK(0xFFFF, I40E_GLLAN_TCB_STAT_RSV_SHIFT)
+
+#define I40E_GLPCI_CLKCTL 0x000B819C /* Reset: POR */
+#define I40E_GLPCI_CLKCTL_PCI_CLK_DYN_SHIFT 0
+#define I40E_GLPCI_CLKCTL_PCI_CLK_DYN_MASK I40E_MASK(0x1, I40E_GLPCI_CLKCTL_PCI_CLK_DYN_SHIFT)
+#define I40E_GLPCI_CLKCTL_PCI_CLK_STABLE_SHIFT 1
+#define I40E_GLPCI_CLKCTL_PCI_CLK_STABLE_MASK I40E_MASK(0x1, I40E_GLPCI_CLKCTL_PCI_CLK_STABLE_SHIFT)
+
+#define I40E_GLPCI_MCTP_CREDIT 0x000BE4EC /* Reset: PCIR */
+#define I40E_GLPCI_MCTP_CREDIT_HEADER_SHIFT 0
+#define I40E_GLPCI_MCTP_CREDIT_HEADER_MASK I40E_MASK(0xFF, I40E_GLPCI_MCTP_CREDIT_HEADER_SHIFT)
+#define I40E_GLPCI_MCTP_CREDIT_DATA_SHIFT 8
+#define I40E_GLPCI_MCTP_CREDIT_DATA_MASK I40E_MASK(0xFFF, I40E_GLPCI_MCTP_CREDIT_DATA_SHIFT)
+
+#define I40E_GLPCI_MCTP_MASK_0 0x000BE4C4 /* Reset: PCIR */
+#define I40E_GLPCI_MCTP_MASK_0_GLPCI_MCTP_MASK_0_SHIFT 0
+#define I40E_GLPCI_MCTP_MASK_0_GLPCI_MCTP_MASK_0_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_MCTP_MASK_0_GLPCI_MCTP_MASK_0_SHIFT)
+
+#define I40E_GLPCI_MCTP_MASK_1 0x000BE4C8 /* Reset: PCIR */
+#define I40E_GLPCI_MCTP_MASK_1_GLPCI_MCTP_MASK_1_SHIFT 0
+#define I40E_GLPCI_MCTP_MASK_1_GLPCI_MCTP_MASK_1_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_MCTP_MASK_1_GLPCI_MCTP_MASK_1_SHIFT)
+
+#define I40E_GLPCI_MCTP_MASK_2 0x000BE4CC /* Reset: PCIR */
+#define I40E_GLPCI_MCTP_MASK_2_GLPCI_MCTP_MASK_2_SHIFT 0
+#define I40E_GLPCI_MCTP_MASK_2_GLPCI_MCTP_MASK_2_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_MCTP_MASK_2_GLPCI_MCTP_MASK_2_SHIFT)
+
+#define I40E_GLPCI_MCTP_MASK_3 0x000BE4D0 /* Reset: PCIR */
+#define I40E_GLPCI_MCTP_MASK_3_GLPCI_MCTP_MASK_3_SHIFT 0
+#define I40E_GLPCI_MCTP_MASK_3_GLPCI_MCTP_MASK_3_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_MCTP_MASK_3_GLPCI_MCTP_MASK_3_SHIFT)
+
+#define I40E_GLPCI_MCTP_MAX_PAY 0x000BE4E8 /* Reset: PCIR */
+#define I40E_GLPCI_MCTP_MAX_PAY_GLPCI_MCTP_MAX_PAY_SHIFT 0
+#define I40E_GLPCI_MCTP_MAX_PAY_GLPCI_MCTP_MAX_PAY_MASK I40E_MASK(0x7FF, I40E_GLPCI_MCTP_MAX_PAY_GLPCI_MCTP_MAX_PAY_SHIFT)
+
+#define I40E_GLPCI_MCTP_VAL_0 0x000BE4D4 /* Reset: PCIR */
+#define I40E_GLPCI_MCTP_VAL_0_GLPCI_MCTP_VAL_0_SHIFT 0
+#define I40E_GLPCI_MCTP_VAL_0_GLPCI_MCTP_VAL_0_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_MCTP_VAL_0_GLPCI_MCTP_VAL_0_SHIFT)
+
+#define I40E_GLPCI_MCTP_VAL_1 0x000BE4D8 /* Reset: PCIR */
+#define I40E_GLPCI_MCTP_VAL_1_GLPCI_MCTP_VAL_1_SHIFT 0
+#define I40E_GLPCI_MCTP_VAL_1_GLPCI_MCTP_VAL_1_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_MCTP_VAL_1_GLPCI_MCTP_VAL_1_SHIFT)
+
+#define I40E_GLPCI_MCTP_VAL_2 0x000BE4DC /* Reset: PCIR */
+#define I40E_GLPCI_MCTP_VAL_2_GLPCI_MCTP_VAL_2_SHIFT 0
+#define I40E_GLPCI_MCTP_VAL_2_GLPCI_MCTP_VAL_2_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_MCTP_VAL_2_GLPCI_MCTP_VAL_2_SHIFT)
+
+#define I40E_GLPCI_MCTP_VAL_3 0x000BE4E0 /* Reset: PCIR */
+#define I40E_GLPCI_MCTP_VAL_3_GLPCI_MCTP_VAL_3_SHIFT 0
+#define I40E_GLPCI_MCTP_VAL_3_GLPCI_MCTP_VAL_3_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_MCTP_VAL_3_GLPCI_MCTP_VAL_3_SHIFT)
+
+#define I40E_GLPCI_OSR_LIMIT 0x000BE504 /* Reset: PCIR */
+#define I40E_GLPCI_OSR_LIMIT_OSR_LIMIT_SHIFT 0
+#define I40E_GLPCI_OSR_LIMIT_OSR_LIMIT_MASK I40E_MASK(0xFF, I40E_GLPCI_OSR_LIMIT_OSR_LIMIT_SHIFT)
+
+#define I40E_GLPCI_PHY_SPARE_IN 0x000BE508 /* Reset: POR */
+#define I40E_GLPCI_PHY_SPARE_IN_DIG_IN_SPARE_SHIFT 0
+#define I40E_GLPCI_PHY_SPARE_IN_DIG_IN_SPARE_MASK I40E_MASK(0x3FF, I40E_GLPCI_PHY_SPARE_IN_DIG_IN_SPARE_SHIFT)
+
+#define I40E_GLPCI_PHY_SPARE_OUT 0x000BE50C /* Reset: POR */
+#define I40E_GLPCI_PHY_SPARE_OUT_TAMAR_DIG_OUT_SPARE_SHIFT 0
+#define I40E_GLPCI_PHY_SPARE_OUT_TAMAR_DIG_OUT_SPARE_MASK I40E_MASK(0x3FF, I40E_GLPCI_PHY_SPARE_OUT_TAMAR_DIG_OUT_SPARE_SHIFT)
+
+#define I40E_GLPCI_SHUTDOWN_DIS 0x000BE4F0 /* Reset: PCIR */
+#define I40E_GLPCI_SHUTDOWN_DIS_SHUTDOWN_DIS_SHIFT 0
+#define I40E_GLPCI_SHUTDOWN_DIS_SHUTDOWN_DIS_MASK I40E_MASK(0x1, I40E_GLPCI_SHUTDOWN_DIS_SHUTDOWN_DIS_SHIFT)
+
+#define I40E_GLPCI_SPARE1 0x000BE510 /* Reset: POR */
+#define I40E_GLPCI_SPARE1_WU_COMPLIANT_CB_SHIFT 0
+#define I40E_GLPCI_SPARE1_WU_COMPLIANT_CB_MASK I40E_MASK(0x1, I40E_GLPCI_SPARE1_WU_COMPLIANT_CB_SHIFT)
+#define I40E_GLPCI_SPARE1_BYPASS_SIDEBAND_SHIFT 1
+#define I40E_GLPCI_SPARE1_BYPASS_SIDEBAND_MASK I40E_MASK(0x1, I40E_GLPCI_SPARE1_BYPASS_SIDEBAND_SHIFT)
+#define I40E_GLPCI_SPARE1_PFR_EN_SHIFT 2
+#define I40E_GLPCI_SPARE1_PFR_EN_MASK I40E_MASK(0x1, I40E_GLPCI_SPARE1_PFR_EN_SHIFT)
+#define I40E_GLPCI_SPARE1_DISABLE_DUMMY_COMP_0ING_SHIFT 3
+#define I40E_GLPCI_SPARE1_DISABLE_DUMMY_COMP_0ING_MASK I40E_MASK(0x1, I40E_GLPCI_SPARE1_DISABLE_DUMMY_COMP_0ING_SHIFT)
+#define I40E_GLPCI_SPARE1_ROM_EMPR_TRIGGER_SHIFT 4
+#define I40E_GLPCI_SPARE1_ROM_EMPR_TRIGGER_MASK I40E_MASK(0x1, I40E_GLPCI_SPARE1_ROM_EMPR_TRIGGER_SHIFT)
+#define I40E_GLPCI_SPARE1_DISABLE_PFR_ON_BME_SHIFT 5
+#define I40E_GLPCI_SPARE1_DISABLE_PFR_ON_BME_MASK I40E_MASK(0x1, I40E_GLPCI_SPARE1_DISABLE_PFR_ON_BME_SHIFT)
+#define I40E_GLPCI_SPARE1_TAG_RELEASE_ON_ARRIVE_SHIFT 6
+#define I40E_GLPCI_SPARE1_TAG_RELEASE_ON_ARRIVE_MASK I40E_MASK(0x1, I40E_GLPCI_SPARE1_TAG_RELEASE_ON_ARRIVE_SHIFT)
+#define I40E_GLPCI_SPARE1_IOSF_ARB_PIPEM_MODE_SHIFT 7
+#define I40E_GLPCI_SPARE1_IOSF_ARB_PIPEM_MODE_MASK I40E_MASK(0x1, I40E_GLPCI_SPARE1_IOSF_ARB_PIPEM_MODE_SHIFT)
+#define I40E_GLPCI_SPARE1_PCIE_MAX_OS_DATA_SHIFT 8
+#define I40E_GLPCI_SPARE1_PCIE_MAX_OS_DATA_MASK I40E_MASK(0x1FFFF, I40E_GLPCI_SPARE1_PCIE_MAX_OS_DATA_SHIFT)
+#define I40E_GLPCI_SPARE1_SPARE_SHIFT 25
+#define I40E_GLPCI_SPARE1_SPARE_MASK I40E_MASK(0x7F, I40E_GLPCI_SPARE1_SPARE_SHIFT)
+
+#define I40E_GLPCI_SPARE2 0x000BE514 /* Reset: POR */
+#define I40E_GLPCI_SPARE2_SPARE_SHIFT 0
+#define I40E_GLPCI_SPARE2_SPARE_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_SPARE2_SPARE_SHIFT)
+
+#define I40E_GLQF_ABORT_MASK(_i) (0x0026CCC8 + ((_i) * 4)) /* _i=0...1 */ /* Reset: CORER */
+#define I40E_GLQF_ABORT_MASK_MAX_INDEX 1
+#define I40E_GLQF_ABORT_MASK_GLQF_ABORT_MASK_SHIFT 0
+#define I40E_GLQF_ABORT_MASK_GLQF_ABORT_MASK_MASK I40E_MASK(0xFFFFFFFF, I40E_GLQF_ABORT_MASK_GLQF_ABORT_MASK_SHIFT)
+
+#define I40E_GLQF_L2_MAP(_i) (0x0026CBF8 + ((_i) * 4)) /* _i=0...1 */ /* Reset: CORER */
+#define I40E_GLQF_L2_MAP_MAX_INDEX 1
+#define I40E_GLQF_L2_MAP_GLQF_L2_MAP_SHIFT 0
+#define I40E_GLQF_L2_MAP_GLQF_L2_MAP_MASK I40E_MASK(0xFFFFFFFF, I40E_GLQF_L2_MAP_GLQF_L2_MAP_SHIFT)
+
+#define I40E_GLQF_L3_MAP(_i) (0x0026C700 + ((_i) * 4)) /* _i=0...63 */ /* Reset: CORER */
+#define I40E_GLQF_L3_MAP_MAX_INDEX 63
+#define I40E_GLQF_L3_MAP_TR_IDX_CODE_SHIFT 0
+#define I40E_GLQF_L3_MAP_TR_IDX_CODE_MASK I40E_MASK(0x3F, I40E_GLQF_L3_MAP_TR_IDX_CODE_SHIFT)
+#define I40E_GLQF_L3_MAP_TR_OPCODE_SHIFT 6
+#define I40E_GLQF_L3_MAP_TR_OPCODE_MASK I40E_MASK(0x3, I40E_GLQF_L3_MAP_TR_OPCODE_SHIFT)
+#define I40E_GLQF_L3_MAP_MIN_SKIP_GAP_SHIFT 8
+#define I40E_GLQF_L3_MAP_MIN_SKIP_GAP_MASK I40E_MASK(0x7F, I40E_GLQF_L3_MAP_MIN_SKIP_GAP_SHIFT)
+#define I40E_GLQF_L3_MAP_MIN_SKIP_ENA_SHIFT 15
+#define I40E_GLQF_L3_MAP_MIN_SKIP_ENA_MASK I40E_MASK(0x1, I40E_GLQF_L3_MAP_MIN_SKIP_ENA_SHIFT)
+
+#define I40E_GLQF_OPT_MAP 0x0026CBDC /* Reset: CORER */
+#define I40E_GLQF_OPT_MAP_FRAG_IDX_SHIFT 0
+#define I40E_GLQF_OPT_MAP_FRAG_IDX_MASK I40E_MASK(0x3F, I40E_GLQF_OPT_MAP_FRAG_IDX_SHIFT)
+#define I40E_GLQF_OPT_MAP_IP_OPT_IDX_SHIFT 12
+#define I40E_GLQF_OPT_MAP_IP_OPT_IDX_MASK I40E_MASK(0x3F, I40E_GLQF_OPT_MAP_IP_OPT_IDX_SHIFT)
+#define I40E_GLQF_OPT_MAP_TCP_OPT_IDX_SHIFT 18
+#define I40E_GLQF_OPT_MAP_TCP_OPT_IDX_MASK I40E_MASK(0x3F, I40E_GLQF_OPT_MAP_TCP_OPT_IDX_SHIFT)
+
+#define I40E_GLRCB_DBG_CTL 0x00122620 /* Reset: CORER */
+#define I40E_GLRCB_DBG_CTL_MEM_ADDR_SHIFT 0
+#define I40E_GLRCB_DBG_CTL_MEM_ADDR_MASK I40E_MASK(0xFFFF, I40E_GLRCB_DBG_CTL_MEM_ADDR_SHIFT)
+#define I40E_GLRCB_DBG_CTL_MEM_SEL_SHIFT 16
+#define I40E_GLRCB_DBG_CTL_MEM_SEL_MASK I40E_MASK(0x1F, I40E_GLRCB_DBG_CTL_MEM_SEL_SHIFT)
+
+#define I40E_GLRCB_DBG_DATA0 0x00122628 /* Reset: CORER */
+#define I40E_GLRCB_DBG_DATA0_DBG_DATA_SHIFT 0
+#define I40E_GLRCB_DBG_DATA0_DBG_DATA_MASK I40E_MASK(0xFFFFFFFF, I40E_GLRCB_DBG_DATA0_DBG_DATA_SHIFT)
+
+#define I40E_GLRCB_DBG_DATA1 0x0012262C /* Reset: CORER */
+#define I40E_GLRCB_DBG_DATA1_DBG_DATA_SHIFT 0
+#define I40E_GLRCB_DBG_DATA1_DBG_DATA_MASK I40E_MASK(0xFFFFFFFF, I40E_GLRCB_DBG_DATA1_DBG_DATA_SHIFT)
+
+#define I40E_GLRCB_DBG_DATA2 0x00122630 /* Reset: CORER */
+#define I40E_GLRCB_DBG_DATA2_DBG_DATA_SHIFT 0
+#define I40E_GLRCB_DBG_DATA2_DBG_DATA_MASK I40E_MASK(0xFFFFFFFF, I40E_GLRCB_DBG_DATA2_DBG_DATA_SHIFT)
+
+#define I40E_GLRCB_DBG_DATA3 0x00122634 /* Reset: CORER */
+#define I40E_GLRCB_DBG_DATA3_DBG_DATA_SHIFT 0
+#define I40E_GLRCB_DBG_DATA3_DBG_DATA_MASK I40E_MASK(0xFFFFFFFF, I40E_GLRCB_DBG_DATA3_DBG_DATA_SHIFT)
+
+#define I40E_GLRCB_DBG_DATA4 0x00122638 /* Reset: CORER */
+#define I40E_GLRCB_DBG_DATA4_DBG_DATA_SHIFT 0
+#define I40E_GLRCB_DBG_DATA4_DBG_DATA_MASK I40E_MASK(0xFFFFFFFF, I40E_GLRCB_DBG_DATA4_DBG_DATA_SHIFT)
+
+#define I40E_GLRCB_DBG_DATA5 0x0012263C /* Reset: CORER */
+#define I40E_GLRCB_DBG_DATA5_DBG_DATA_SHIFT 0
+#define I40E_GLRCB_DBG_DATA5_DBG_DATA_MASK I40E_MASK(0xFFFFFFFF, I40E_GLRCB_DBG_DATA5_DBG_DATA_SHIFT)
+
+#define I40E_GLRCB_DBG_FEAT 0x0012266C /* Reset: CORER */
+#define I40E_GLRCB_DBG_FEAT_SET_DROP_SHIFT 0
+#define I40E_GLRCB_DBG_FEAT_SET_DROP_MASK I40E_MASK(0xF, I40E_GLRCB_DBG_FEAT_SET_DROP_SHIFT)
+
+#define I40E_GLRCB_DBG_RD_STOP 0x00122640 /* Reset: CORER */
+#define I40E_GLRCB_DBG_RD_STOP_ENA_SHIFT 0
+#define I40E_GLRCB_DBG_RD_STOP_ENA_MASK I40E_MASK(0x1, I40E_GLRCB_DBG_RD_STOP_ENA_SHIFT)
+
+#define I40E_GLRCB_LL_BP_CFG 0x0012261C /* Reset: CORER */
+#define I40E_GLRCB_LL_BP_CFG_MIN_THRS_SHIFT 0
+#define I40E_GLRCB_LL_BP_CFG_MIN_THRS_MASK I40E_MASK(0xFFFF, I40E_GLRCB_LL_BP_CFG_MIN_THRS_SHIFT)
+#define I40E_GLRCB_LL_BP_CFG_MAX_THRS_SHIFT 16
+#define I40E_GLRCB_LL_BP_CFG_MAX_THRS_MASK I40E_MASK(0xFFFF, I40E_GLRCB_LL_BP_CFG_MAX_THRS_SHIFT)
+
+#define I40E_GLRCB_TO_1MS_TICK_CFG 0x00122624 /* Reset: CORER */
+#define I40E_GLRCB_TO_1MS_TICK_CFG_UC_DIV_RATIO_SHIFT 0
+#define I40E_GLRCB_TO_1MS_TICK_CFG_UC_DIV_RATIO_MASK I40E_MASK(0xFFFFF, I40E_GLRCB_TO_1MS_TICK_CFG_UC_DIV_RATIO_SHIFT)
+
+#define I40E_GLRLAN_COMPLETION_FIFO_CTL 0x0012A574 /* Reset: CORER */
+#define I40E_GLRLAN_COMPLETION_FIFO_CTL_BP_THRSHLD_SHIFT 0
+#define I40E_GLRLAN_COMPLETION_FIFO_CTL_BP_THRSHLD_MASK I40E_MASK(0x3FF, I40E_GLRLAN_COMPLETION_FIFO_CTL_BP_THRSHLD_SHIFT)
+
+#define I40E_GLRLAN_DATA_FLUSH_REQ_FIFO_CTL 0x0012A58C /* Reset: CORER */
+#define I40E_GLRLAN_DATA_FLUSH_REQ_FIFO_CTL_BP_THRSHLD_SHIFT 0
+#define I40E_GLRLAN_DATA_FLUSH_REQ_FIFO_CTL_BP_THRSHLD_MASK I40E_MASK(0x3FF, I40E_GLRLAN_DATA_FLUSH_REQ_FIFO_CTL_BP_THRSHLD_SHIFT)
+
+#define I40E_GLRLAN_DBG_CTL 0x0012A594 /* Reset: CORER */
+#define I40E_GLRLAN_DBG_CTL_MEM_ADDR_SHIFT 0
+#define I40E_GLRLAN_DBG_CTL_MEM_ADDR_MASK I40E_MASK(0xFFFF, I40E_GLRLAN_DBG_CTL_MEM_ADDR_SHIFT)
+#define I40E_GLRLAN_DBG_CTL_MEM_SEL_SHIFT 16
+#define I40E_GLRLAN_DBG_CTL_MEM_SEL_MASK I40E_MASK(0x1F, I40E_GLRLAN_DBG_CTL_MEM_SEL_SHIFT)
+
+#define I40E_GLRLAN_DBG_DATA0 0x0012A598 /* Reset: CORER */
+#define I40E_GLRLAN_DBG_DATA0_DBG_DATA_SHIFT 0
+#define I40E_GLRLAN_DBG_DATA0_DBG_DATA_MASK I40E_MASK(0xFFFFFFFF, I40E_GLRLAN_DBG_DATA0_DBG_DATA_SHIFT)
+
+#define I40E_GLRLAN_DBG_DATA1 0x0012A59C /* Reset: CORER */
+#define I40E_GLRLAN_DBG_DATA1_DBG_DATA_SHIFT 0
+#define I40E_GLRLAN_DBG_DATA1_DBG_DATA_MASK I40E_MASK(0xFFFFFFFF, I40E_GLRLAN_DBG_DATA1_DBG_DATA_SHIFT)
+
+#define I40E_GLRLAN_DBG_DATA2 0x0012A5A0 /* Reset: CORER */
+#define I40E_GLRLAN_DBG_DATA2_DBG_DATA_SHIFT 0
+#define I40E_GLRLAN_DBG_DATA2_DBG_DATA_MASK I40E_MASK(0xFFFFFFFF, I40E_GLRLAN_DBG_DATA2_DBG_DATA_SHIFT)
+
+#define I40E_GLRLAN_DBG_DATA3 0x0012A5A4 /* Reset: CORER */
+#define I40E_GLRLAN_DBG_DATA3_DBG_DATA_SHIFT 0
+#define I40E_GLRLAN_DBG_DATA3_DBG_DATA_MASK I40E_MASK(0xFFFFFFFF, I40E_GLRLAN_DBG_DATA3_DBG_DATA_SHIFT)
+
+#define I40E_GLRLAN_DBG_DATA4 0x0012A5A8 /* Reset: CORER */
+#define I40E_GLRLAN_DBG_DATA4_DBG_DATA_SHIFT 0
+#define I40E_GLRLAN_DBG_DATA4_DBG_DATA_MASK I40E_MASK(0xFFFFFFFF, I40E_GLRLAN_DBG_DATA4_DBG_DATA_SHIFT)
+
+#define I40E_GLRLAN_DBG_DATA5 0x0012A5AC /* Reset: CORER */
+#define I40E_GLRLAN_DBG_DATA5_DBG_DATA_SHIFT 0
+#define I40E_GLRLAN_DBG_DATA5_DBG_DATA_MASK I40E_MASK(0xFFFFFFFF, I40E_GLRLAN_DBG_DATA5_DBG_DATA_SHIFT)
+
+#define I40E_GLRLAN_DBG_DATA6 0x0012A5B0 /* Reset: CORER */
+#define I40E_GLRLAN_DBG_DATA6_DBG_DATA_SHIFT 0
+#define I40E_GLRLAN_DBG_DATA6_DBG_DATA_MASK I40E_MASK(0xFFFFFFFF, I40E_GLRLAN_DBG_DATA6_DBG_DATA_SHIFT)
+
+#define I40E_GLRLAN_DBG_DATA7 0x0012A5B4 /* Reset: CORER */
+#define I40E_GLRLAN_DBG_DATA7_DBG_DATA_SHIFT 0
+#define I40E_GLRLAN_DBG_DATA7_DBG_DATA_MASK I40E_MASK(0xFFFFFFFF, I40E_GLRLAN_DBG_DATA7_DBG_DATA_SHIFT)
+
+#define I40E_GLRLAN_DIX_WB_FIFO_CTL 0x0012A590 /* Reset: CORER */
+#define I40E_GLRLAN_DIX_WB_FIFO_CTL_BP_THRSHLD_SHIFT 0
+#define I40E_GLRLAN_DIX_WB_FIFO_CTL_BP_THRSHLD_MASK I40E_MASK(0x3FF, I40E_GLRLAN_DIX_WB_FIFO_CTL_BP_THRSHLD_SHIFT)
+
+#define I40E_GLRLAN_DSCR_FETCH_FIFO_CTL 0x0012A584 /* Reset: CORER */
+#define I40E_GLRLAN_DSCR_FETCH_FIFO_CTL_BP_THRSHLD_SHIFT 0
+#define I40E_GLRLAN_DSCR_FETCH_FIFO_CTL_BP_THRSHLD_MASK I40E_MASK(0x3FF, I40E_GLRLAN_DSCR_FETCH_FIFO_CTL_BP_THRSHLD_SHIFT)
+
+#define I40E_GLRLAN_DSCR_REQ_FIFO_CTL 0x0012A554 /* Reset: CORER */
+#define I40E_GLRLAN_DSCR_REQ_FIFO_CTL_BP_THRSHLD_SHIFT 0
+#define I40E_GLRLAN_DSCR_REQ_FIFO_CTL_BP_THRSHLD_MASK I40E_MASK(0x3FF, I40E_GLRLAN_DSCR_REQ_FIFO_CTL_BP_THRSHLD_SHIFT)
+
+#define I40E_GLRLAN_DSCR_WR_REQ_FIFO_CTL 0x0012A57C /* Reset: CORER */
+#define I40E_GLRLAN_DSCR_WR_REQ_FIFO_CTL_BP_THRSHLD_SHIFT 0
+#define I40E_GLRLAN_DSCR_WR_REQ_FIFO_CTL_BP_THRSHLD_MASK I40E_MASK(0x3FF, I40E_GLRLAN_DSCR_WR_REQ_FIFO_CTL_BP_THRSHLD_SHIFT)
+
+#define I40E_GLRLAN_DUMMY_CNTX_0(_i) (0x0012A5BC + ((_i) * 4)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLRLAN_DUMMY_CNTX_0_MAX_INDEX 3
+#define I40E_GLRLAN_DUMMY_CNTX_0_DUMMY_CNTX_SHIFT 0
+#define I40E_GLRLAN_DUMMY_CNTX_0_DUMMY_CNTX_MASK I40E_MASK(0xFFFFFFFF, I40E_GLRLAN_DUMMY_CNTX_0_DUMMY_CNTX_SHIFT)
+
+#define I40E_GLRLAN_DUMMY_CNTX_1(_i) (0x0012A5CC + ((_i) * 4)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLRLAN_DUMMY_CNTX_1_MAX_INDEX 3
+#define I40E_GLRLAN_DUMMY_CNTX_1_DUMMY_CNTX_SHIFT 0
+#define I40E_GLRLAN_DUMMY_CNTX_1_DUMMY_CNTX_MASK I40E_MASK(0xFFFFFFFF, I40E_GLRLAN_DUMMY_CNTX_1_DUMMY_CNTX_SHIFT)
+
+#define I40E_GLRLAN_DUMMY_CNTX_ENA 0x0012A5DC /* Reset: CORER */
+#define I40E_GLRLAN_DUMMY_CNTX_ENA_DUMMY_CNTX_ENA_SHIFT 0
+#define I40E_GLRLAN_DUMMY_CNTX_ENA_DUMMY_CNTX_ENA_MASK I40E_MASK(0x1, I40E_GLRLAN_DUMMY_CNTX_ENA_DUMMY_CNTX_ENA_SHIFT)
+
+#define I40E_GLRLAN_ITR_NOTIFICATION_FIFO_CTL 0x0012A578 /* Reset: CORER */
+#define I40E_GLRLAN_ITR_NOTIFICATION_FIFO_CTL_BP_THRSHLD_SHIFT 0
+#define I40E_GLRLAN_ITR_NOTIFICATION_FIFO_CTL_BP_THRSHLD_MASK I40E_MASK(0x3FF, I40E_GLRLAN_ITR_NOTIFICATION_FIFO_CTL_BP_THRSHLD_SHIFT)
+
+#define I40E_GLRLAN_ITR_WR_DONE_FIFO_CTL 0x0012A580 /* Reset: CORER */
+#define I40E_GLRLAN_ITR_WR_DONE_FIFO_CTL_BP_THRSHLD_SHIFT 0
+#define I40E_GLRLAN_ITR_WR_DONE_FIFO_CTL_BP_THRSHLD_MASK I40E_MASK(0x3FF, I40E_GLRLAN_ITR_WR_DONE_FIFO_CTL_BP_THRSHLD_SHIFT)
+
+#define I40E_GLRLAN_PIM_REQ_FIFO_CTL 0x0012A570 /* Reset: CORER */
+#define I40E_GLRLAN_PIM_REQ_FIFO_CTL_BP_THRSHLD_SHIFT 0
+#define I40E_GLRLAN_PIM_REQ_FIFO_CTL_BP_THRSHLD_MASK I40E_MASK(0x3FF, I40E_GLRLAN_PIM_REQ_FIFO_CTL_BP_THRSHLD_SHIFT)
+
+#define I40E_GLRLAN_QCNTX_DATA_WB_FIFO_CTL 0x0012A568 /* Reset: CORER */
+#define I40E_GLRLAN_QCNTX_DATA_WB_FIFO_CTL_BP_THRSHLD_SHIFT 0
+#define I40E_GLRLAN_QCNTX_DATA_WB_FIFO_CTL_BP_THRSHLD_MASK I40E_MASK(0x3FF, I40E_GLRLAN_QCNTX_DATA_WB_FIFO_CTL_BP_THRSHLD_SHIFT)
+
+#define I40E_GLRLAN_QCNTX_MT2L_WB_FIFO_CTL 0x0012A56C /* Reset: CORER */
+#define I40E_GLRLAN_QCNTX_MT2L_WB_FIFO_CTL_BP_THRSHLD_SHIFT 0
+#define I40E_GLRLAN_QCNTX_MT2L_WB_FIFO_CTL_BP_THRSHLD_MASK I40E_MASK(0x3FF, I40E_GLRLAN_QCNTX_MT2L_WB_FIFO_CTL_BP_THRSHLD_SHIFT)
+
+#define I40E_GLRLAN_QCNTX_NUM_WB_FIFO_CTL 0x0012A564 /* Reset: CORER */
+#define I40E_GLRLAN_QCNTX_NUM_WB_FIFO_CTL_BP_THRSHLD_SHIFT 0
+#define I40E_GLRLAN_QCNTX_NUM_WB_FIFO_CTL_BP_THRSHLD_MASK I40E_MASK(0x3FF, I40E_GLRLAN_QCNTX_NUM_WB_FIFO_CTL_BP_THRSHLD_SHIFT)
+
+#define I40E_GLRLAN_RDPU_ATTR_FIFO_CTL 0x0012A55C /* Reset: CORER */
+#define I40E_GLRLAN_RDPU_ATTR_FIFO_CTL_BP_THRSHLD_SHIFT 0
+#define I40E_GLRLAN_RDPU_ATTR_FIFO_CTL_BP_THRSHLD_MASK I40E_MASK(0x3FF, I40E_GLRLAN_RDPU_ATTR_FIFO_CTL_BP_THRSHLD_SHIFT)
+
+#define I40E_GLRLAN_RDPU_CMD_FIFO_CTL 0x0012A558 /* Reset: CORER */
+#define I40E_GLRLAN_RDPU_CMD_FIFO_CTL_BP_THRSHLD_SHIFT 0
+#define I40E_GLRLAN_RDPU_CMD_FIFO_CTL_BP_THRSHLD_MASK I40E_MASK(0x3FF, I40E_GLRLAN_RDPU_CMD_FIFO_CTL_BP_THRSHLD_SHIFT)
+
+#define I40E_GLRLAN_RDPU_WB_FIFO_CTL 0x0012A560 /* Reset: CORER */
+#define I40E_GLRLAN_RDPU_WB_FIFO_CTL_BP_THRSHLD_SHIFT 0
+#define I40E_GLRLAN_RDPU_WB_FIFO_CTL_BP_THRSHLD_MASK I40E_MASK(0x3FF, I40E_GLRLAN_RDPU_WB_FIFO_CTL_BP_THRSHLD_SHIFT)
+
+#define I40E_GLRLAN_REQ_INFO_FIFO_CTL 0x0012A588 /* Reset: CORER */
+#define I40E_GLRLAN_REQ_INFO_FIFO_CTL_BP_THRSHLD_SHIFT 0
+#define I40E_GLRLAN_REQ_INFO_FIFO_CTL_BP_THRSHLD_MASK I40E_MASK(0x3FF, I40E_GLRLAN_REQ_INFO_FIFO_CTL_BP_THRSHLD_SHIFT)
+
+#define I40E_GLRLAN_SPARE 0x0012A5B8 /* Reset: CORER */
+#define I40E_GLRLAN_SPARE_SPARE_BITS_SHIFT 0
+#define I40E_GLRLAN_SPARE_SPARE_BITS_MASK I40E_MASK(0xFFFFFFFF, I40E_GLRLAN_SPARE_SPARE_BITS_SHIFT)
+
+#define I40E_GLTLAN_MAX_TCBCMD 0x000E64D4 /* Reset: CORER */
+#define I40E_GLTLAN_MAX_TCBCMD_MAX_TCBCMD_SHIFT 0
+#define I40E_GLTLAN_MAX_TCBCMD_MAX_TCBCMD_MASK I40E_MASK(0xF, I40E_GLTLAN_MAX_TCBCMD_MAX_TCBCMD_SHIFT)
+#define I40E_GLTLAN_MAX_TCBCMD_RSVD1_SHIFT 8
+#define I40E_GLTLAN_MAX_TCBCMD_RSVD1_MASK I40E_MASK(0x3, I40E_GLTLAN_MAX_TCBCMD_RSVD1_SHIFT)
+#define I40E_GLTLAN_MAX_TCBCMD_MULTPL_REQ_DIS_SHIFT 31
+#define I40E_GLTLAN_MAX_TCBCMD_MULTPL_REQ_DIS_MASK I40E_MASK(0x1, I40E_GLTLAN_MAX_TCBCMD_MULTPL_REQ_DIS_SHIFT)
+
+#define I40E_ITR_CAUSE_MEM_0_CFG 0x0003FC00 /* Reset: POR */
+#define I40E_ITR_CAUSE_MEM_0_CFG_ECC_EN_SHIFT 0
+#define I40E_ITR_CAUSE_MEM_0_CFG_ECC_EN_MASK I40E_MASK(0x1, I40E_ITR_CAUSE_MEM_0_CFG_ECC_EN_SHIFT)
+#define I40E_ITR_CAUSE_MEM_0_CFG_ECC_INVERT_1_SHIFT 1
+#define I40E_ITR_CAUSE_MEM_0_CFG_ECC_INVERT_1_MASK I40E_MASK(0x1, I40E_ITR_CAUSE_MEM_0_CFG_ECC_INVERT_1_SHIFT)
+#define I40E_ITR_CAUSE_MEM_0_CFG_ECC_INVERT_2_SHIFT 2
+#define I40E_ITR_CAUSE_MEM_0_CFG_ECC_INVERT_2_MASK I40E_MASK(0x1, I40E_ITR_CAUSE_MEM_0_CFG_ECC_INVERT_2_SHIFT)
+#define I40E_ITR_CAUSE_MEM_0_CFG_LS_FORCE_SHIFT 3
+#define I40E_ITR_CAUSE_MEM_0_CFG_LS_FORCE_MASK I40E_MASK(0x1, I40E_ITR_CAUSE_MEM_0_CFG_LS_FORCE_SHIFT)
+#define I40E_ITR_CAUSE_MEM_0_CFG_LS_BYPASS_SHIFT 4
+#define I40E_ITR_CAUSE_MEM_0_CFG_LS_BYPASS_MASK I40E_MASK(0x1, I40E_ITR_CAUSE_MEM_0_CFG_LS_BYPASS_SHIFT)
+#define I40E_ITR_CAUSE_MEM_0_CFG_MASK_INT_SHIFT 5
+#define I40E_ITR_CAUSE_MEM_0_CFG_MASK_INT_MASK I40E_MASK(0x1, I40E_ITR_CAUSE_MEM_0_CFG_MASK_INT_SHIFT)
+#define I40E_ITR_CAUSE_MEM_0_CFG_FIX_CNT_SHIFT 8
+#define I40E_ITR_CAUSE_MEM_0_CFG_FIX_CNT_MASK I40E_MASK(0x1, I40E_ITR_CAUSE_MEM_0_CFG_FIX_CNT_SHIFT)
+#define I40E_ITR_CAUSE_MEM_0_CFG_ERR_CNT_SHIFT 9
+#define I40E_ITR_CAUSE_MEM_0_CFG_ERR_CNT_MASK I40E_MASK(0x1, I40E_ITR_CAUSE_MEM_0_CFG_ERR_CNT_SHIFT)
+#define I40E_ITR_CAUSE_MEM_0_CFG_RME_SHIFT 12
+#define I40E_ITR_CAUSE_MEM_0_CFG_RME_MASK I40E_MASK(0x1, I40E_ITR_CAUSE_MEM_0_CFG_RME_SHIFT)
+#define I40E_ITR_CAUSE_MEM_0_CFG_RM_SHIFT 16
+#define I40E_ITR_CAUSE_MEM_0_CFG_RM_MASK I40E_MASK(0xF, I40E_ITR_CAUSE_MEM_0_CFG_RM_SHIFT)
+
+#define I40E_ITR_CAUSE_MEM_0_STATUS 0x0003FC04 /* Reset: POR */
+#define I40E_ITR_CAUSE_MEM_0_STATUS_ECC_ERR_SHIFT 0
+#define I40E_ITR_CAUSE_MEM_0_STATUS_ECC_ERR_MASK I40E_MASK(0x1, I40E_ITR_CAUSE_MEM_0_STATUS_ECC_ERR_SHIFT)
+#define I40E_ITR_CAUSE_MEM_0_STATUS_ECC_FIX_SHIFT 1
+#define I40E_ITR_CAUSE_MEM_0_STATUS_ECC_FIX_MASK I40E_MASK(0x1, I40E_ITR_CAUSE_MEM_0_STATUS_ECC_FIX_SHIFT)
+#define I40E_ITR_CAUSE_MEM_0_STATUS_INIT_DONE_SHIFT 2
+#define I40E_ITR_CAUSE_MEM_0_STATUS_INIT_DONE_MASK I40E_MASK(0x1, I40E_ITR_CAUSE_MEM_0_STATUS_INIT_DONE_SHIFT)
+#define I40E_ITR_CAUSE_MEM_0_STATUS_GLOBAL_INIT_DONE_SHIFT 3
+#define I40E_ITR_CAUSE_MEM_0_STATUS_GLOBAL_INIT_DONE_MASK I40E_MASK(0x1, I40E_ITR_CAUSE_MEM_0_STATUS_GLOBAL_INIT_DONE_SHIFT)
+
+#define I40E_ITR_CAUSE_MEM_1_CFG 0x0003FC08 /* Reset: POR */
+#define I40E_ITR_CAUSE_MEM_1_CFG_ECC_EN_SHIFT 0
+#define I40E_ITR_CAUSE_MEM_1_CFG_ECC_EN_MASK I40E_MASK(0x1, I40E_ITR_CAUSE_MEM_1_CFG_ECC_EN_SHIFT)
+#define I40E_ITR_CAUSE_MEM_1_CFG_ECC_INVERT_1_SHIFT 1
+#define I40E_ITR_CAUSE_MEM_1_CFG_ECC_INVERT_1_MASK I40E_MASK(0x1, I40E_ITR_CAUSE_MEM_1_CFG_ECC_INVERT_1_SHIFT)
+#define I40E_ITR_CAUSE_MEM_1_CFG_ECC_INVERT_2_SHIFT 2
+#define I40E_ITR_CAUSE_MEM_1_CFG_ECC_INVERT_2_MASK I40E_MASK(0x1, I40E_ITR_CAUSE_MEM_1_CFG_ECC_INVERT_2_SHIFT)
+#define I40E_ITR_CAUSE_MEM_1_CFG_LS_FORCE_SHIFT 3
+#define I40E_ITR_CAUSE_MEM_1_CFG_LS_FORCE_MASK I40E_MASK(0x1, I40E_ITR_CAUSE_MEM_1_CFG_LS_FORCE_SHIFT)
+#define I40E_ITR_CAUSE_MEM_1_CFG_LS_BYPASS_SHIFT 4
+#define I40E_ITR_CAUSE_MEM_1_CFG_LS_BYPASS_MASK I40E_MASK(0x1, I40E_ITR_CAUSE_MEM_1_CFG_LS_BYPASS_SHIFT)
+#define I40E_ITR_CAUSE_MEM_1_CFG_MASK_INT_SHIFT 5
+#define I40E_ITR_CAUSE_MEM_1_CFG_MASK_INT_MASK I40E_MASK(0x1, I40E_ITR_CAUSE_MEM_1_CFG_MASK_INT_SHIFT)
+#define I40E_ITR_CAUSE_MEM_1_CFG_FIX_CNT_SHIFT 8
+#define I40E_ITR_CAUSE_MEM_1_CFG_FIX_CNT_MASK I40E_MASK(0x1, I40E_ITR_CAUSE_MEM_1_CFG_FIX_CNT_SHIFT)
+#define I40E_ITR_CAUSE_MEM_1_CFG_ERR_CNT_SHIFT 9
+#define I40E_ITR_CAUSE_MEM_1_CFG_ERR_CNT_MASK I40E_MASK(0x1, I40E_ITR_CAUSE_MEM_1_CFG_ERR_CNT_SHIFT)
+#define I40E_ITR_CAUSE_MEM_1_CFG_RME_SHIFT 12
+#define I40E_ITR_CAUSE_MEM_1_CFG_RME_MASK I40E_MASK(0x1, I40E_ITR_CAUSE_MEM_1_CFG_RME_SHIFT)
+#define I40E_ITR_CAUSE_MEM_1_CFG_RM_SHIFT 16
+#define I40E_ITR_CAUSE_MEM_1_CFG_RM_MASK I40E_MASK(0xF, I40E_ITR_CAUSE_MEM_1_CFG_RM_SHIFT)
+
+#define I40E_ITR_CAUSE_MEM_1_STATUS 0x0003FC0C /* Reset: POR */
+#define I40E_ITR_CAUSE_MEM_1_STATUS_ECC_ERR_SHIFT 0
+#define I40E_ITR_CAUSE_MEM_1_STATUS_ECC_ERR_MASK I40E_MASK(0x1, I40E_ITR_CAUSE_MEM_1_STATUS_ECC_ERR_SHIFT)
+#define I40E_ITR_CAUSE_MEM_1_STATUS_ECC_FIX_SHIFT 1
+#define I40E_ITR_CAUSE_MEM_1_STATUS_ECC_FIX_MASK I40E_MASK(0x1, I40E_ITR_CAUSE_MEM_1_STATUS_ECC_FIX_SHIFT)
+#define I40E_ITR_CAUSE_MEM_1_STATUS_INIT_DONE_SHIFT 2
+#define I40E_ITR_CAUSE_MEM_1_STATUS_INIT_DONE_MASK I40E_MASK(0x1, I40E_ITR_CAUSE_MEM_1_STATUS_INIT_DONE_SHIFT)
+#define I40E_ITR_CAUSE_MEM_1_STATUS_GLOBAL_INIT_DONE_SHIFT 3
+#define I40E_ITR_CAUSE_MEM_1_STATUS_GLOBAL_INIT_DONE_MASK I40E_MASK(0x1, I40E_ITR_CAUSE_MEM_1_STATUS_GLOBAL_INIT_DONE_SHIFT)
+
+#define I40E_ITR_ECC_COR_ERR 0x0003FC24 /* Reset: POR */
+#define I40E_ITR_ECC_COR_ERR_CNT_SHIFT 0
+#define I40E_ITR_ECC_COR_ERR_CNT_MASK I40E_MASK(0xFFF, I40E_ITR_ECC_COR_ERR_CNT_SHIFT)
+
+#define I40E_ITR_ECC_UNCOR_ERR 0x0003FC20 /* Reset: POR */
+#define I40E_ITR_ECC_UNCOR_ERR_CNT_SHIFT 0
+#define I40E_ITR_ECC_UNCOR_ERR_CNT_MASK I40E_MASK(0xFFF, I40E_ITR_ECC_UNCOR_ERR_CNT_SHIFT)
+
+#define I40E_ITR_MSIX_MEM_0_CFG 0x0003FC10 /* Reset: POR */
+#define I40E_ITR_MSIX_MEM_0_CFG_ECC_EN_SHIFT 0
+#define I40E_ITR_MSIX_MEM_0_CFG_ECC_EN_MASK I40E_MASK(0x1, I40E_ITR_MSIX_MEM_0_CFG_ECC_EN_SHIFT)
+#define I40E_ITR_MSIX_MEM_0_CFG_ECC_INVERT_1_SHIFT 1
+#define I40E_ITR_MSIX_MEM_0_CFG_ECC_INVERT_1_MASK I40E_MASK(0x1, I40E_ITR_MSIX_MEM_0_CFG_ECC_INVERT_1_SHIFT)
+#define I40E_ITR_MSIX_MEM_0_CFG_ECC_INVERT_2_SHIFT 2
+#define I40E_ITR_MSIX_MEM_0_CFG_ECC_INVERT_2_MASK I40E_MASK(0x1, I40E_ITR_MSIX_MEM_0_CFG_ECC_INVERT_2_SHIFT)
+#define I40E_ITR_MSIX_MEM_0_CFG_LS_FORCE_SHIFT 3
+#define I40E_ITR_MSIX_MEM_0_CFG_LS_FORCE_MASK I40E_MASK(0x1, I40E_ITR_MSIX_MEM_0_CFG_LS_FORCE_SHIFT)
+#define I40E_ITR_MSIX_MEM_0_CFG_LS_BYPASS_SHIFT 4
+#define I40E_ITR_MSIX_MEM_0_CFG_LS_BYPASS_MASK I40E_MASK(0x1, I40E_ITR_MSIX_MEM_0_CFG_LS_BYPASS_SHIFT)
+#define I40E_ITR_MSIX_MEM_0_CFG_MASK_INT_SHIFT 5
+#define I40E_ITR_MSIX_MEM_0_CFG_MASK_INT_MASK I40E_MASK(0x1, I40E_ITR_MSIX_MEM_0_CFG_MASK_INT_SHIFT)
+#define I40E_ITR_MSIX_MEM_0_CFG_FIX_CNT_SHIFT 8
+#define I40E_ITR_MSIX_MEM_0_CFG_FIX_CNT_MASK I40E_MASK(0x1, I40E_ITR_MSIX_MEM_0_CFG_FIX_CNT_SHIFT)
+#define I40E_ITR_MSIX_MEM_0_CFG_ERR_CNT_SHIFT 9
+#define I40E_ITR_MSIX_MEM_0_CFG_ERR_CNT_MASK I40E_MASK(0x1, I40E_ITR_MSIX_MEM_0_CFG_ERR_CNT_SHIFT)
+#define I40E_ITR_MSIX_MEM_0_CFG_RME_SHIFT 12
+#define I40E_ITR_MSIX_MEM_0_CFG_RME_MASK I40E_MASK(0x1, I40E_ITR_MSIX_MEM_0_CFG_RME_SHIFT)
+#define I40E_ITR_MSIX_MEM_0_CFG_RM_SHIFT 16
+#define I40E_ITR_MSIX_MEM_0_CFG_RM_MASK I40E_MASK(0xF, I40E_ITR_MSIX_MEM_0_CFG_RM_SHIFT)
+
+#define I40E_ITR_MSIX_MEM_0_STATUS 0x0003FC14 /* Reset: POR */
+#define I40E_ITR_MSIX_MEM_0_STATUS_ECC_ERR_SHIFT 0
+#define I40E_ITR_MSIX_MEM_0_STATUS_ECC_ERR_MASK I40E_MASK(0x1, I40E_ITR_MSIX_MEM_0_STATUS_ECC_ERR_SHIFT)
+#define I40E_ITR_MSIX_MEM_0_STATUS_ECC_FIX_SHIFT 1
+#define I40E_ITR_MSIX_MEM_0_STATUS_ECC_FIX_MASK I40E_MASK(0x1, I40E_ITR_MSIX_MEM_0_STATUS_ECC_FIX_SHIFT)
+#define I40E_ITR_MSIX_MEM_0_STATUS_INIT_DONE_SHIFT 2
+#define I40E_ITR_MSIX_MEM_0_STATUS_INIT_DONE_MASK I40E_MASK(0x1, I40E_ITR_MSIX_MEM_0_STATUS_INIT_DONE_SHIFT)
+#define I40E_ITR_MSIX_MEM_0_STATUS_GLOBAL_INIT_DONE_SHIFT 3
+#define I40E_ITR_MSIX_MEM_0_STATUS_GLOBAL_INIT_DONE_MASK I40E_MASK(0x1, I40E_ITR_MSIX_MEM_0_STATUS_GLOBAL_INIT_DONE_SHIFT)
+
+#define I40E_ITR_MSIX_MEM_1_CFG 0x0003FC18 /* Reset: POR */
+#define I40E_ITR_MSIX_MEM_1_CFG_ECC_EN_SHIFT 0
+#define I40E_ITR_MSIX_MEM_1_CFG_ECC_EN_MASK I40E_MASK(0x1, I40E_ITR_MSIX_MEM_1_CFG_ECC_EN_SHIFT)
+#define I40E_ITR_MSIX_MEM_1_CFG_ECC_INVERT_1_SHIFT 1
+#define I40E_ITR_MSIX_MEM_1_CFG_ECC_INVERT_1_MASK I40E_MASK(0x1, I40E_ITR_MSIX_MEM_1_CFG_ECC_INVERT_1_SHIFT)
+#define I40E_ITR_MSIX_MEM_1_CFG_ECC_INVERT_2_SHIFT 2
+#define I40E_ITR_MSIX_MEM_1_CFG_ECC_INVERT_2_MASK I40E_MASK(0x1, I40E_ITR_MSIX_MEM_1_CFG_ECC_INVERT_2_SHIFT)
+#define I40E_ITR_MSIX_MEM_1_CFG_LS_FORCE_SHIFT 3
+#define I40E_ITR_MSIX_MEM_1_CFG_LS_FORCE_MASK I40E_MASK(0x1, I40E_ITR_MSIX_MEM_1_CFG_LS_FORCE_SHIFT)
+#define I40E_ITR_MSIX_MEM_1_CFG_LS_BYPASS_SHIFT 4
+#define I40E_ITR_MSIX_MEM_1_CFG_LS_BYPASS_MASK I40E_MASK(0x1, I40E_ITR_MSIX_MEM_1_CFG_LS_BYPASS_SHIFT)
+#define I40E_ITR_MSIX_MEM_1_CFG_MASK_INT_SHIFT 5
+#define I40E_ITR_MSIX_MEM_1_CFG_MASK_INT_MASK I40E_MASK(0x1, I40E_ITR_MSIX_MEM_1_CFG_MASK_INT_SHIFT)
+#define I40E_ITR_MSIX_MEM_1_CFG_FIX_CNT_SHIFT 8
+#define I40E_ITR_MSIX_MEM_1_CFG_FIX_CNT_MASK I40E_MASK(0x1, I40E_ITR_MSIX_MEM_1_CFG_FIX_CNT_SHIFT)
+#define I40E_ITR_MSIX_MEM_1_CFG_ERR_CNT_SHIFT 9
+#define I40E_ITR_MSIX_MEM_1_CFG_ERR_CNT_MASK I40E_MASK(0x1, I40E_ITR_MSIX_MEM_1_CFG_ERR_CNT_SHIFT)
+#define I40E_ITR_MSIX_MEM_1_CFG_RME_SHIFT 12
+#define I40E_ITR_MSIX_MEM_1_CFG_RME_MASK I40E_MASK(0x1, I40E_ITR_MSIX_MEM_1_CFG_RME_SHIFT)
+#define I40E_ITR_MSIX_MEM_1_CFG_RM_SHIFT 16
+#define I40E_ITR_MSIX_MEM_1_CFG_RM_MASK I40E_MASK(0xF, I40E_ITR_MSIX_MEM_1_CFG_RM_SHIFT)
+
+#define I40E_ITR_MSIX_MEM_1_STATUS 0x0003FC1C /* Reset: POR */
+#define I40E_ITR_MSIX_MEM_1_STATUS_ECC_ERR_SHIFT 0
+#define I40E_ITR_MSIX_MEM_1_STATUS_ECC_ERR_MASK I40E_MASK(0x1, I40E_ITR_MSIX_MEM_1_STATUS_ECC_ERR_SHIFT)
+#define I40E_ITR_MSIX_MEM_1_STATUS_ECC_FIX_SHIFT 1
+#define I40E_ITR_MSIX_MEM_1_STATUS_ECC_FIX_MASK I40E_MASK(0x1, I40E_ITR_MSIX_MEM_1_STATUS_ECC_FIX_SHIFT)
+#define I40E_ITR_MSIX_MEM_1_STATUS_INIT_DONE_SHIFT 2
+#define I40E_ITR_MSIX_MEM_1_STATUS_INIT_DONE_MASK I40E_MASK(0x1, I40E_ITR_MSIX_MEM_1_STATUS_INIT_DONE_SHIFT)
+#define I40E_ITR_MSIX_MEM_1_STATUS_GLOBAL_INIT_DONE_SHIFT 3
+#define I40E_ITR_MSIX_MEM_1_STATUS_GLOBAL_INIT_DONE_MASK I40E_MASK(0x1, I40E_ITR_MSIX_MEM_1_STATUS_GLOBAL_INIT_DONE_SHIFT)
+
+#define I40E_MNG_ADMIN_Q_CFG 0x0008304C /* Reset: POR */
+#define I40E_MNG_ADMIN_Q_CFG_ECC_EN_SHIFT 0
+#define I40E_MNG_ADMIN_Q_CFG_ECC_EN_MASK I40E_MASK(0x1, I40E_MNG_ADMIN_Q_CFG_ECC_EN_SHIFT)
+#define I40E_MNG_ADMIN_Q_CFG_ECC_INVERT_1_SHIFT 1
+#define I40E_MNG_ADMIN_Q_CFG_ECC_INVERT_1_MASK I40E_MASK(0x1, I40E_MNG_ADMIN_Q_CFG_ECC_INVERT_1_SHIFT)
+#define I40E_MNG_ADMIN_Q_CFG_ECC_INVERT_2_SHIFT 2
+#define I40E_MNG_ADMIN_Q_CFG_ECC_INVERT_2_MASK I40E_MASK(0x1, I40E_MNG_ADMIN_Q_CFG_ECC_INVERT_2_SHIFT)
+#define I40E_MNG_ADMIN_Q_CFG_LS_FORCE_SHIFT 3
+#define I40E_MNG_ADMIN_Q_CFG_LS_FORCE_MASK I40E_MASK(0x1, I40E_MNG_ADMIN_Q_CFG_LS_FORCE_SHIFT)
+#define I40E_MNG_ADMIN_Q_CFG_LS_BYPASS_SHIFT 4
+#define I40E_MNG_ADMIN_Q_CFG_LS_BYPASS_MASK I40E_MASK(0x1, I40E_MNG_ADMIN_Q_CFG_LS_BYPASS_SHIFT)
+#define I40E_MNG_ADMIN_Q_CFG_MASK_INT_SHIFT 5
+#define I40E_MNG_ADMIN_Q_CFG_MASK_INT_MASK I40E_MASK(0x1, I40E_MNG_ADMIN_Q_CFG_MASK_INT_SHIFT)
+#define I40E_MNG_ADMIN_Q_CFG_FIX_CNT_SHIFT 8
+#define I40E_MNG_ADMIN_Q_CFG_FIX_CNT_MASK I40E_MASK(0x1, I40E_MNG_ADMIN_Q_CFG_FIX_CNT_SHIFT)
+#define I40E_MNG_ADMIN_Q_CFG_ERR_CNT_SHIFT 9
+#define I40E_MNG_ADMIN_Q_CFG_ERR_CNT_MASK I40E_MASK(0x1, I40E_MNG_ADMIN_Q_CFG_ERR_CNT_SHIFT)
+#define I40E_MNG_ADMIN_Q_CFG_RME_SHIFT 12
+#define I40E_MNG_ADMIN_Q_CFG_RME_MASK I40E_MASK(0x1, I40E_MNG_ADMIN_Q_CFG_RME_SHIFT)
+#define I40E_MNG_ADMIN_Q_CFG_RM_SHIFT 16
+#define I40E_MNG_ADMIN_Q_CFG_RM_MASK I40E_MASK(0xF, I40E_MNG_ADMIN_Q_CFG_RM_SHIFT)
+
+#define I40E_MNG_ADMIN_Q_STATUS 0x00083050 /* Reset: POR */
+#define I40E_MNG_ADMIN_Q_STATUS_ECC_ERR_SHIFT 0
+#define I40E_MNG_ADMIN_Q_STATUS_ECC_ERR_MASK I40E_MASK(0x1, I40E_MNG_ADMIN_Q_STATUS_ECC_ERR_SHIFT)
+#define I40E_MNG_ADMIN_Q_STATUS_ECC_FIX_SHIFT 1
+#define I40E_MNG_ADMIN_Q_STATUS_ECC_FIX_MASK I40E_MASK(0x1, I40E_MNG_ADMIN_Q_STATUS_ECC_FIX_SHIFT)
+#define I40E_MNG_ADMIN_Q_STATUS_INIT_DONE_SHIFT 2
+#define I40E_MNG_ADMIN_Q_STATUS_INIT_DONE_MASK I40E_MASK(0x1, I40E_MNG_ADMIN_Q_STATUS_INIT_DONE_SHIFT)
+#define I40E_MNG_ADMIN_Q_STATUS_GLOBAL_INIT_DONE_SHIFT 3
+#define I40E_MNG_ADMIN_Q_STATUS_GLOBAL_INIT_DONE_MASK I40E_MASK(0x1, I40E_MNG_ADMIN_Q_STATUS_GLOBAL_INIT_DONE_SHIFT)
+
+#define I40E_MNG_ALTERNATE_CFG 0x000830A4 /* Reset: POR */
+#define I40E_MNG_ALTERNATE_CFG_ECC_EN_SHIFT 0
+#define I40E_MNG_ALTERNATE_CFG_ECC_EN_MASK I40E_MASK(0x1, I40E_MNG_ALTERNATE_CFG_ECC_EN_SHIFT)
+#define I40E_MNG_ALTERNATE_CFG_ECC_INVERT_1_SHIFT 1
+#define I40E_MNG_ALTERNATE_CFG_ECC_INVERT_1_MASK I40E_MASK(0x1, I40E_MNG_ALTERNATE_CFG_ECC_INVERT_1_SHIFT)
+#define I40E_MNG_ALTERNATE_CFG_ECC_INVERT_2_SHIFT 2
+#define I40E_MNG_ALTERNATE_CFG_ECC_INVERT_2_MASK I40E_MASK(0x1, I40E_MNG_ALTERNATE_CFG_ECC_INVERT_2_SHIFT)
+#define I40E_MNG_ALTERNATE_CFG_LS_FORCE_SHIFT 3
+#define I40E_MNG_ALTERNATE_CFG_LS_FORCE_MASK I40E_MASK(0x1, I40E_MNG_ALTERNATE_CFG_LS_FORCE_SHIFT)
+#define I40E_MNG_ALTERNATE_CFG_LS_BYPASS_SHIFT 4
+#define I40E_MNG_ALTERNATE_CFG_LS_BYPASS_MASK I40E_MASK(0x1, I40E_MNG_ALTERNATE_CFG_LS_BYPASS_SHIFT)
+#define I40E_MNG_ALTERNATE_CFG_MASK_INT_SHIFT 5
+#define I40E_MNG_ALTERNATE_CFG_MASK_INT_MASK I40E_MASK(0x1, I40E_MNG_ALTERNATE_CFG_MASK_INT_SHIFT)
+#define I40E_MNG_ALTERNATE_CFG_FIX_CNT_SHIFT 8
+#define I40E_MNG_ALTERNATE_CFG_FIX_CNT_MASK I40E_MASK(0x1, I40E_MNG_ALTERNATE_CFG_FIX_CNT_SHIFT)
+#define I40E_MNG_ALTERNATE_CFG_ERR_CNT_SHIFT 9
+#define I40E_MNG_ALTERNATE_CFG_ERR_CNT_MASK I40E_MASK(0x1, I40E_MNG_ALTERNATE_CFG_ERR_CNT_SHIFT)
+#define I40E_MNG_ALTERNATE_CFG_RME_SHIFT 12
+#define I40E_MNG_ALTERNATE_CFG_RME_MASK I40E_MASK(0x1, I40E_MNG_ALTERNATE_CFG_RME_SHIFT)
+#define I40E_MNG_ALTERNATE_CFG_RM_SHIFT 16
+#define I40E_MNG_ALTERNATE_CFG_RM_MASK I40E_MASK(0xF, I40E_MNG_ALTERNATE_CFG_RM_SHIFT)
+
+#define I40E_MNG_ALTERNATE_STATUS 0x000830A8 /* Reset: POR */
+#define I40E_MNG_ALTERNATE_STATUS_ECC_ERR_SHIFT 0
+#define I40E_MNG_ALTERNATE_STATUS_ECC_ERR_MASK I40E_MASK(0x1, I40E_MNG_ALTERNATE_STATUS_ECC_ERR_SHIFT)
+#define I40E_MNG_ALTERNATE_STATUS_ECC_FIX_SHIFT 1
+#define I40E_MNG_ALTERNATE_STATUS_ECC_FIX_MASK I40E_MASK(0x1, I40E_MNG_ALTERNATE_STATUS_ECC_FIX_SHIFT)
+#define I40E_MNG_ALTERNATE_STATUS_INIT_DONE_SHIFT 2
+#define I40E_MNG_ALTERNATE_STATUS_INIT_DONE_MASK I40E_MASK(0x1, I40E_MNG_ALTERNATE_STATUS_INIT_DONE_SHIFT)
+#define I40E_MNG_ALTERNATE_STATUS_GLOBAL_INIT_DONE_SHIFT 3
+#define I40E_MNG_ALTERNATE_STATUS_GLOBAL_INIT_DONE_MASK I40E_MASK(0x1, I40E_MNG_ALTERNATE_STATUS_GLOBAL_INIT_DONE_SHIFT)
+
+#define I40E_MNG_CODE_BANK_CFG 0x00083054 /* Reset: POR */
+#define I40E_MNG_CODE_BANK_CFG_ECC_EN_SHIFT 0
+#define I40E_MNG_CODE_BANK_CFG_ECC_EN_MASK I40E_MASK(0x1, I40E_MNG_CODE_BANK_CFG_ECC_EN_SHIFT)
+#define I40E_MNG_CODE_BANK_CFG_ECC_INVERT_1_SHIFT 1
+#define I40E_MNG_CODE_BANK_CFG_ECC_INVERT_1_MASK I40E_MASK(0x1, I40E_MNG_CODE_BANK_CFG_ECC_INVERT_1_SHIFT)
+#define I40E_MNG_CODE_BANK_CFG_ECC_INVERT_2_SHIFT 2
+#define I40E_MNG_CODE_BANK_CFG_ECC_INVERT_2_MASK I40E_MASK(0x1, I40E_MNG_CODE_BANK_CFG_ECC_INVERT_2_SHIFT)
+#define I40E_MNG_CODE_BANK_CFG_LS_FORCE_SHIFT 3
+#define I40E_MNG_CODE_BANK_CFG_LS_FORCE_MASK I40E_MASK(0x1, I40E_MNG_CODE_BANK_CFG_LS_FORCE_SHIFT)
+#define I40E_MNG_CODE_BANK_CFG_LS_BYPASS_SHIFT 4
+#define I40E_MNG_CODE_BANK_CFG_LS_BYPASS_MASK I40E_MASK(0x1, I40E_MNG_CODE_BANK_CFG_LS_BYPASS_SHIFT)
+#define I40E_MNG_CODE_BANK_CFG_MASK_INT_SHIFT 5
+#define I40E_MNG_CODE_BANK_CFG_MASK_INT_MASK I40E_MASK(0x1, I40E_MNG_CODE_BANK_CFG_MASK_INT_SHIFT)
+#define I40E_MNG_CODE_BANK_CFG_FIX_CNT_SHIFT 8
+#define I40E_MNG_CODE_BANK_CFG_FIX_CNT_MASK I40E_MASK(0x1, I40E_MNG_CODE_BANK_CFG_FIX_CNT_SHIFT)
+#define I40E_MNG_CODE_BANK_CFG_ERR_CNT_SHIFT 9
+#define I40E_MNG_CODE_BANK_CFG_ERR_CNT_MASK I40E_MASK(0x1, I40E_MNG_CODE_BANK_CFG_ERR_CNT_SHIFT)
+#define I40E_MNG_CODE_BANK_CFG_RME_SHIFT 12
+#define I40E_MNG_CODE_BANK_CFG_RME_MASK I40E_MASK(0x1, I40E_MNG_CODE_BANK_CFG_RME_SHIFT)
+#define I40E_MNG_CODE_BANK_CFG_RM_SHIFT 16
+#define I40E_MNG_CODE_BANK_CFG_RM_MASK I40E_MASK(0xF, I40E_MNG_CODE_BANK_CFG_RM_SHIFT)
+
+#define I40E_MNG_CODE_BANK_STATUS 0x00083058 /* Reset: POR */
+#define I40E_MNG_CODE_BANK_STATUS_ECC_ERR_SHIFT 0
+#define I40E_MNG_CODE_BANK_STATUS_ECC_ERR_MASK I40E_MASK(0x1, I40E_MNG_CODE_BANK_STATUS_ECC_ERR_SHIFT)
+#define I40E_MNG_CODE_BANK_STATUS_ECC_FIX_SHIFT 1
+#define I40E_MNG_CODE_BANK_STATUS_ECC_FIX_MASK I40E_MASK(0x1, I40E_MNG_CODE_BANK_STATUS_ECC_FIX_SHIFT)
+#define I40E_MNG_CODE_BANK_STATUS_INIT_DONE_SHIFT 2
+#define I40E_MNG_CODE_BANK_STATUS_INIT_DONE_MASK I40E_MASK(0x1, I40E_MNG_CODE_BANK_STATUS_INIT_DONE_SHIFT)
+#define I40E_MNG_CODE_BANK_STATUS_GLOBAL_INIT_DONE_SHIFT 3
+#define I40E_MNG_CODE_BANK_STATUS_GLOBAL_INIT_DONE_MASK I40E_MASK(0x1, I40E_MNG_CODE_BANK_STATUS_GLOBAL_INIT_DONE_SHIFT)
+
+#define I40E_MNG_ECC_COR_ERR 0x000830B8 /* Reset: POR */
+#define I40E_MNG_ECC_COR_ERR_CNT_SHIFT 0
+#define I40E_MNG_ECC_COR_ERR_CNT_MASK I40E_MASK(0xFFF, I40E_MNG_ECC_COR_ERR_CNT_SHIFT)
+
+#define I40E_MNG_ECC_UNCOR_ERR 0x000830B4 /* Reset: POR */
+#define I40E_MNG_ECC_UNCOR_ERR_CNT_SHIFT 0
+#define I40E_MNG_ECC_UNCOR_ERR_CNT_MASK I40E_MASK(0xFFF, I40E_MNG_ECC_UNCOR_ERR_CNT_SHIFT)
+
+#define I40E_MNG_POPULATED_DATA_CFG 0x00083064 /* Reset: POR */
+#define I40E_MNG_POPULATED_DATA_CFG_ECC_EN_SHIFT 0
+#define I40E_MNG_POPULATED_DATA_CFG_ECC_EN_MASK I40E_MASK(0x1, I40E_MNG_POPULATED_DATA_CFG_ECC_EN_SHIFT)
+#define I40E_MNG_POPULATED_DATA_CFG_ECC_INVERT_1_SHIFT 1
+#define I40E_MNG_POPULATED_DATA_CFG_ECC_INVERT_1_MASK I40E_MASK(0x1, I40E_MNG_POPULATED_DATA_CFG_ECC_INVERT_1_SHIFT)
+#define I40E_MNG_POPULATED_DATA_CFG_ECC_INVERT_2_SHIFT 2
+#define I40E_MNG_POPULATED_DATA_CFG_ECC_INVERT_2_MASK I40E_MASK(0x1, I40E_MNG_POPULATED_DATA_CFG_ECC_INVERT_2_SHIFT)
+#define I40E_MNG_POPULATED_DATA_CFG_LS_FORCE_SHIFT 3
+#define I40E_MNG_POPULATED_DATA_CFG_LS_FORCE_MASK I40E_MASK(0x1, I40E_MNG_POPULATED_DATA_CFG_LS_FORCE_SHIFT)
+#define I40E_MNG_POPULATED_DATA_CFG_LS_BYPASS_SHIFT 4
+#define I40E_MNG_POPULATED_DATA_CFG_LS_BYPASS_MASK I40E_MASK(0x1, I40E_MNG_POPULATED_DATA_CFG_LS_BYPASS_SHIFT)
+#define I40E_MNG_POPULATED_DATA_CFG_MASK_INT_SHIFT 5
+#define I40E_MNG_POPULATED_DATA_CFG_MASK_INT_MASK I40E_MASK(0x1, I40E_MNG_POPULATED_DATA_CFG_MASK_INT_SHIFT)
+#define I40E_MNG_POPULATED_DATA_CFG_FIX_CNT_SHIFT 8
+#define I40E_MNG_POPULATED_DATA_CFG_FIX_CNT_MASK I40E_MASK(0x1, I40E_MNG_POPULATED_DATA_CFG_FIX_CNT_SHIFT)
+#define I40E_MNG_POPULATED_DATA_CFG_ERR_CNT_SHIFT 9
+#define I40E_MNG_POPULATED_DATA_CFG_ERR_CNT_MASK I40E_MASK(0x1, I40E_MNG_POPULATED_DATA_CFG_ERR_CNT_SHIFT)
+#define I40E_MNG_POPULATED_DATA_CFG_RME_SHIFT 12
+#define I40E_MNG_POPULATED_DATA_CFG_RME_MASK I40E_MASK(0x1, I40E_MNG_POPULATED_DATA_CFG_RME_SHIFT)
+#define I40E_MNG_POPULATED_DATA_CFG_RM_SHIFT 16
+#define I40E_MNG_POPULATED_DATA_CFG_RM_MASK I40E_MASK(0xF, I40E_MNG_POPULATED_DATA_CFG_RM_SHIFT)
+
+#define I40E_MNG_POPULATED_DATA_STATUS 0x00083068 /* Reset: POR */
+#define I40E_MNG_POPULATED_DATA_STATUS_ECC_ERR_SHIFT 0
+#define I40E_MNG_POPULATED_DATA_STATUS_ECC_ERR_MASK I40E_MASK(0x1, I40E_MNG_POPULATED_DATA_STATUS_ECC_ERR_SHIFT)
+#define I40E_MNG_POPULATED_DATA_STATUS_ECC_FIX_SHIFT 1
+#define I40E_MNG_POPULATED_DATA_STATUS_ECC_FIX_MASK I40E_MASK(0x1, I40E_MNG_POPULATED_DATA_STATUS_ECC_FIX_SHIFT)
+#define I40E_MNG_POPULATED_DATA_STATUS_INIT_DONE_SHIFT 2
+#define I40E_MNG_POPULATED_DATA_STATUS_INIT_DONE_MASK I40E_MASK(0x1, I40E_MNG_POPULATED_DATA_STATUS_INIT_DONE_SHIFT)
+#define I40E_MNG_POPULATED_DATA_STATUS_GLOBAL_INIT_DONE_SHIFT 3
+#define I40E_MNG_POPULATED_DATA_STATUS_GLOBAL_INIT_DONE_MASK I40E_MASK(0x1, I40E_MNG_POPULATED_DATA_STATUS_GLOBAL_INIT_DONE_SHIFT)
+
+#define I40E_MNG_POPULATED_DATA0_CFG 0x0008305C /* Reset: POR */
+#define I40E_MNG_POPULATED_DATA0_CFG_ECC_EN_SHIFT 0
+#define I40E_MNG_POPULATED_DATA0_CFG_ECC_EN_MASK I40E_MASK(0x1, I40E_MNG_POPULATED_DATA0_CFG_ECC_EN_SHIFT)
+#define I40E_MNG_POPULATED_DATA0_CFG_ECC_INVERT_1_SHIFT 1
+#define I40E_MNG_POPULATED_DATA0_CFG_ECC_INVERT_1_MASK I40E_MASK(0x1, I40E_MNG_POPULATED_DATA0_CFG_ECC_INVERT_1_SHIFT)
+#define I40E_MNG_POPULATED_DATA0_CFG_ECC_INVERT_2_SHIFT 2
+#define I40E_MNG_POPULATED_DATA0_CFG_ECC_INVERT_2_MASK I40E_MASK(0x1, I40E_MNG_POPULATED_DATA0_CFG_ECC_INVERT_2_SHIFT)
+#define I40E_MNG_POPULATED_DATA0_CFG_LS_FORCE_SHIFT 3
+#define I40E_MNG_POPULATED_DATA0_CFG_LS_FORCE_MASK I40E_MASK(0x1, I40E_MNG_POPULATED_DATA0_CFG_LS_FORCE_SHIFT)
+#define I40E_MNG_POPULATED_DATA0_CFG_LS_BYPASS_SHIFT 4
+#define I40E_MNG_POPULATED_DATA0_CFG_LS_BYPASS_MASK I40E_MASK(0x1, I40E_MNG_POPULATED_DATA0_CFG_LS_BYPASS_SHIFT)
+#define I40E_MNG_POPULATED_DATA0_CFG_MASK_INT_SHIFT 5
+#define I40E_MNG_POPULATED_DATA0_CFG_MASK_INT_MASK I40E_MASK(0x1, I40E_MNG_POPULATED_DATA0_CFG_MASK_INT_SHIFT)
+#define I40E_MNG_POPULATED_DATA0_CFG_FIX_CNT_SHIFT 8
+#define I40E_MNG_POPULATED_DATA0_CFG_FIX_CNT_MASK I40E_MASK(0x1, I40E_MNG_POPULATED_DATA0_CFG_FIX_CNT_SHIFT)
+#define I40E_MNG_POPULATED_DATA0_CFG_ERR_CNT_SHIFT 9
+#define I40E_MNG_POPULATED_DATA0_CFG_ERR_CNT_MASK I40E_MASK(0x1, I40E_MNG_POPULATED_DATA0_CFG_ERR_CNT_SHIFT)
+#define I40E_MNG_POPULATED_DATA0_CFG_RME_SHIFT 12
+#define I40E_MNG_POPULATED_DATA0_CFG_RME_MASK I40E_MASK(0x1, I40E_MNG_POPULATED_DATA0_CFG_RME_SHIFT)
+#define I40E_MNG_POPULATED_DATA0_CFG_RM_SHIFT 16
+#define I40E_MNG_POPULATED_DATA0_CFG_RM_MASK I40E_MASK(0xF, I40E_MNG_POPULATED_DATA0_CFG_RM_SHIFT)
+
+#define I40E_MNG_POPULATED_DATA0_STATUS 0x00083060 /* Reset: POR */
+#define I40E_MNG_POPULATED_DATA0_STATUS_ECC_ERR_SHIFT 0
+#define I40E_MNG_POPULATED_DATA0_STATUS_ECC_ERR_MASK I40E_MASK(0x1, I40E_MNG_POPULATED_DATA0_STATUS_ECC_ERR_SHIFT)
+#define I40E_MNG_POPULATED_DATA0_STATUS_ECC_FIX_SHIFT 1
+#define I40E_MNG_POPULATED_DATA0_STATUS_ECC_FIX_MASK I40E_MASK(0x1, I40E_MNG_POPULATED_DATA0_STATUS_ECC_FIX_SHIFT)
+#define I40E_MNG_POPULATED_DATA0_STATUS_INIT_DONE_SHIFT 2
+#define I40E_MNG_POPULATED_DATA0_STATUS_INIT_DONE_MASK I40E_MASK(0x1, I40E_MNG_POPULATED_DATA0_STATUS_INIT_DONE_SHIFT)
+#define I40E_MNG_POPULATED_DATA0_STATUS_GLOBAL_INIT_DONE_SHIFT 3
+#define I40E_MNG_POPULATED_DATA0_STATUS_GLOBAL_INIT_DONE_MASK I40E_MASK(0x1, I40E_MNG_POPULATED_DATA0_STATUS_GLOBAL_INIT_DONE_SHIFT)
+
+#define I40E_MNG_ROM_CFG 0x000830AC /* Reset: POR */
+#define I40E_MNG_ROM_CFG_ECC_EN_SHIFT 0
+#define I40E_MNG_ROM_CFG_ECC_EN_MASK I40E_MASK(0x1, I40E_MNG_ROM_CFG_ECC_EN_SHIFT)
+#define I40E_MNG_ROM_CFG_ECC_INVERT_1_SHIFT 1
+#define I40E_MNG_ROM_CFG_ECC_INVERT_1_MASK I40E_MASK(0x1, I40E_MNG_ROM_CFG_ECC_INVERT_1_SHIFT)
+#define I40E_MNG_ROM_CFG_ECC_INVERT_2_SHIFT 2
+#define I40E_MNG_ROM_CFG_ECC_INVERT_2_MASK I40E_MASK(0x1, I40E_MNG_ROM_CFG_ECC_INVERT_2_SHIFT)
+#define I40E_MNG_ROM_CFG_LS_FORCE_SHIFT 3
+#define I40E_MNG_ROM_CFG_LS_FORCE_MASK I40E_MASK(0x1, I40E_MNG_ROM_CFG_LS_FORCE_SHIFT)
+#define I40E_MNG_ROM_CFG_LS_BYPASS_SHIFT 4
+#define I40E_MNG_ROM_CFG_LS_BYPASS_MASK I40E_MASK(0x1, I40E_MNG_ROM_CFG_LS_BYPASS_SHIFT)
+#define I40E_MNG_ROM_CFG_MASK_INT_SHIFT 5
+#define I40E_MNG_ROM_CFG_MASK_INT_MASK I40E_MASK(0x1, I40E_MNG_ROM_CFG_MASK_INT_SHIFT)
+#define I40E_MNG_ROM_CFG_FIX_CNT_SHIFT 8
+#define I40E_MNG_ROM_CFG_FIX_CNT_MASK I40E_MASK(0x1, I40E_MNG_ROM_CFG_FIX_CNT_SHIFT)
+#define I40E_MNG_ROM_CFG_ERR_CNT_SHIFT 9
+#define I40E_MNG_ROM_CFG_ERR_CNT_MASK I40E_MASK(0x1, I40E_MNG_ROM_CFG_ERR_CNT_SHIFT)
+#define I40E_MNG_ROM_CFG_RME_SHIFT 12
+#define I40E_MNG_ROM_CFG_RME_MASK I40E_MASK(0x1, I40E_MNG_ROM_CFG_RME_SHIFT)
+#define I40E_MNG_ROM_CFG_RM_SHIFT 16
+#define I40E_MNG_ROM_CFG_RM_MASK I40E_MASK(0xF, I40E_MNG_ROM_CFG_RM_SHIFT)
+
+#define I40E_MNG_ROM_STATUS 0x000830B0 /* Reset: POR */
+#define I40E_MNG_ROM_STATUS_ECC_ERR_SHIFT 0
+#define I40E_MNG_ROM_STATUS_ECC_ERR_MASK I40E_MASK(0x1, I40E_MNG_ROM_STATUS_ECC_ERR_SHIFT)
+#define I40E_MNG_ROM_STATUS_ECC_FIX_SHIFT 1
+#define I40E_MNG_ROM_STATUS_ECC_FIX_MASK I40E_MASK(0x1, I40E_MNG_ROM_STATUS_ECC_FIX_SHIFT)
+#define I40E_MNG_ROM_STATUS_INIT_DONE_SHIFT 2
+#define I40E_MNG_ROM_STATUS_INIT_DONE_MASK I40E_MASK(0x1, I40E_MNG_ROM_STATUS_INIT_DONE_SHIFT)
+#define I40E_MNG_ROM_STATUS_GLOBAL_INIT_DONE_SHIFT 3
+#define I40E_MNG_ROM_STATUS_GLOBAL_INIT_DONE_MASK I40E_MASK(0x1, I40E_MNG_ROM_STATUS_GLOBAL_INIT_DONE_SHIFT)
+
+#define I40E_MNG_RX_BANK_CFG 0x0008306C /* Reset: POR */
+#define I40E_MNG_RX_BANK_CFG_ECC_EN_SHIFT 0
+#define I40E_MNG_RX_BANK_CFG_ECC_EN_MASK I40E_MASK(0x1, I40E_MNG_RX_BANK_CFG_ECC_EN_SHIFT)
+#define I40E_MNG_RX_BANK_CFG_ECC_INVERT_1_SHIFT 1
+#define I40E_MNG_RX_BANK_CFG_ECC_INVERT_1_MASK I40E_MASK(0x1, I40E_MNG_RX_BANK_CFG_ECC_INVERT_1_SHIFT)
+#define I40E_MNG_RX_BANK_CFG_ECC_INVERT_2_SHIFT 2
+#define I40E_MNG_RX_BANK_CFG_ECC_INVERT_2_MASK I40E_MASK(0x1, I40E_MNG_RX_BANK_CFG_ECC_INVERT_2_SHIFT)
+#define I40E_MNG_RX_BANK_CFG_LS_FORCE_SHIFT 3
+#define I40E_MNG_RX_BANK_CFG_LS_FORCE_MASK I40E_MASK(0x1, I40E_MNG_RX_BANK_CFG_LS_FORCE_SHIFT)
+#define I40E_MNG_RX_BANK_CFG_LS_BYPASS_SHIFT 4
+#define I40E_MNG_RX_BANK_CFG_LS_BYPASS_MASK I40E_MASK(0x1, I40E_MNG_RX_BANK_CFG_LS_BYPASS_SHIFT)
+#define I40E_MNG_RX_BANK_CFG_MASK_INT_SHIFT 5
+#define I40E_MNG_RX_BANK_CFG_MASK_INT_MASK I40E_MASK(0x1, I40E_MNG_RX_BANK_CFG_MASK_INT_SHIFT)
+#define I40E_MNG_RX_BANK_CFG_FIX_CNT_SHIFT 8
+#define I40E_MNG_RX_BANK_CFG_FIX_CNT_MASK I40E_MASK(0x1, I40E_MNG_RX_BANK_CFG_FIX_CNT_SHIFT)
+#define I40E_MNG_RX_BANK_CFG_ERR_CNT_SHIFT 9
+#define I40E_MNG_RX_BANK_CFG_ERR_CNT_MASK I40E_MASK(0x1, I40E_MNG_RX_BANK_CFG_ERR_CNT_SHIFT)
+#define I40E_MNG_RX_BANK_CFG_RME_SHIFT 12
+#define I40E_MNG_RX_BANK_CFG_RME_MASK I40E_MASK(0x1, I40E_MNG_RX_BANK_CFG_RME_SHIFT)
+#define I40E_MNG_RX_BANK_CFG_RM_SHIFT 16
+#define I40E_MNG_RX_BANK_CFG_RM_MASK I40E_MASK(0xF, I40E_MNG_RX_BANK_CFG_RM_SHIFT)
+
+#define I40E_MNG_RX_BANK_STATUS 0x00083070 /* Reset: POR */
+#define I40E_MNG_RX_BANK_STATUS_ECC_ERR_SHIFT 0
+#define I40E_MNG_RX_BANK_STATUS_ECC_ERR_MASK I40E_MASK(0x1, I40E_MNG_RX_BANK_STATUS_ECC_ERR_SHIFT)
+#define I40E_MNG_RX_BANK_STATUS_ECC_FIX_SHIFT 1
+#define I40E_MNG_RX_BANK_STATUS_ECC_FIX_MASK I40E_MASK(0x1, I40E_MNG_RX_BANK_STATUS_ECC_FIX_SHIFT)
+#define I40E_MNG_RX_BANK_STATUS_INIT_DONE_SHIFT 2
+#define I40E_MNG_RX_BANK_STATUS_INIT_DONE_MASK I40E_MASK(0x1, I40E_MNG_RX_BANK_STATUS_INIT_DONE_SHIFT)
+#define I40E_MNG_RX_BANK_STATUS_GLOBAL_INIT_DONE_SHIFT 3
+#define I40E_MNG_RX_BANK_STATUS_GLOBAL_INIT_DONE_MASK I40E_MASK(0x1, I40E_MNG_RX_BANK_STATUS_GLOBAL_INIT_DONE_SHIFT)
+
+#define I40E_MNG_RXF_CFG 0x00083074 /* Reset: POR */
+#define I40E_MNG_RXF_CFG_ECC_EN_SHIFT 0
+#define I40E_MNG_RXF_CFG_ECC_EN_MASK I40E_MASK(0x1, I40E_MNG_RXF_CFG_ECC_EN_SHIFT)
+#define I40E_MNG_RXF_CFG_ECC_INVERT_1_SHIFT 1
+#define I40E_MNG_RXF_CFG_ECC_INVERT_1_MASK I40E_MASK(0x1, I40E_MNG_RXF_CFG_ECC_INVERT_1_SHIFT)
+#define I40E_MNG_RXF_CFG_ECC_INVERT_2_SHIFT 2
+#define I40E_MNG_RXF_CFG_ECC_INVERT_2_MASK I40E_MASK(0x1, I40E_MNG_RXF_CFG_ECC_INVERT_2_SHIFT)
+#define I40E_MNG_RXF_CFG_LS_FORCE_SHIFT 3
+#define I40E_MNG_RXF_CFG_LS_FORCE_MASK I40E_MASK(0x1, I40E_MNG_RXF_CFG_LS_FORCE_SHIFT)
+#define I40E_MNG_RXF_CFG_LS_BYPASS_SHIFT 4
+#define I40E_MNG_RXF_CFG_LS_BYPASS_MASK I40E_MASK(0x1, I40E_MNG_RXF_CFG_LS_BYPASS_SHIFT)
+#define I40E_MNG_RXF_CFG_MASK_INT_SHIFT 5
+#define I40E_MNG_RXF_CFG_MASK_INT_MASK I40E_MASK(0x1, I40E_MNG_RXF_CFG_MASK_INT_SHIFT)
+#define I40E_MNG_RXF_CFG_FIX_CNT_SHIFT 8
+#define I40E_MNG_RXF_CFG_FIX_CNT_MASK I40E_MASK(0x1, I40E_MNG_RXF_CFG_FIX_CNT_SHIFT)
+#define I40E_MNG_RXF_CFG_ERR_CNT_SHIFT 9
+#define I40E_MNG_RXF_CFG_ERR_CNT_MASK I40E_MASK(0x1, I40E_MNG_RXF_CFG_ERR_CNT_SHIFT)
+#define I40E_MNG_RXF_CFG_RME_A_SHIFT 12
+#define I40E_MNG_RXF_CFG_RME_A_MASK I40E_MASK(0x1, I40E_MNG_RXF_CFG_RME_A_SHIFT)
+#define I40E_MNG_RXF_CFG_RME_B_SHIFT 13
+#define I40E_MNG_RXF_CFG_RME_B_MASK I40E_MASK(0x1, I40E_MNG_RXF_CFG_RME_B_SHIFT)
+#define I40E_MNG_RXF_CFG_RM_A_SHIFT 16
+#define I40E_MNG_RXF_CFG_RM_A_MASK I40E_MASK(0xF, I40E_MNG_RXF_CFG_RM_A_SHIFT)
+#define I40E_MNG_RXF_CFG_RM_B_SHIFT 20
+#define I40E_MNG_RXF_CFG_RM_B_MASK I40E_MASK(0xF, I40E_MNG_RXF_CFG_RM_B_SHIFT)
+
+#define I40E_MNG_RXF_STATUS 0x00083078 /* Reset: POR */
+#define I40E_MNG_RXF_STATUS_ECC_ERR_SHIFT 0
+#define I40E_MNG_RXF_STATUS_ECC_ERR_MASK I40E_MASK(0x1, I40E_MNG_RXF_STATUS_ECC_ERR_SHIFT)
+#define I40E_MNG_RXF_STATUS_ECC_FIX_SHIFT 1
+#define I40E_MNG_RXF_STATUS_ECC_FIX_MASK I40E_MASK(0x1, I40E_MNG_RXF_STATUS_ECC_FIX_SHIFT)
+#define I40E_MNG_RXF_STATUS_INIT_DONE_SHIFT 2
+#define I40E_MNG_RXF_STATUS_INIT_DONE_MASK I40E_MASK(0x1, I40E_MNG_RXF_STATUS_INIT_DONE_SHIFT)
+#define I40E_MNG_RXF_STATUS_GLOBAL_INIT_DONE_SHIFT 3
+#define I40E_MNG_RXF_STATUS_GLOBAL_INIT_DONE_MASK I40E_MASK(0x1, I40E_MNG_RXF_STATUS_GLOBAL_INIT_DONE_SHIFT)
+
+#define I40E_MNG_TX0_GLUE_CFG 0x0008307C /* Reset: POR */
+#define I40E_MNG_TX0_GLUE_CFG_ECC_EN_SHIFT 0
+#define I40E_MNG_TX0_GLUE_CFG_ECC_EN_MASK I40E_MASK(0x1, I40E_MNG_TX0_GLUE_CFG_ECC_EN_SHIFT)
+#define I40E_MNG_TX0_GLUE_CFG_ECC_INVERT_1_SHIFT 1
+#define I40E_MNG_TX0_GLUE_CFG_ECC_INVERT_1_MASK I40E_MASK(0x1, I40E_MNG_TX0_GLUE_CFG_ECC_INVERT_1_SHIFT)
+#define I40E_MNG_TX0_GLUE_CFG_ECC_INVERT_2_SHIFT 2
+#define I40E_MNG_TX0_GLUE_CFG_ECC_INVERT_2_MASK I40E_MASK(0x1, I40E_MNG_TX0_GLUE_CFG_ECC_INVERT_2_SHIFT)
+#define I40E_MNG_TX0_GLUE_CFG_LS_FORCE_SHIFT 3
+#define I40E_MNG_TX0_GLUE_CFG_LS_FORCE_MASK I40E_MASK(0x1, I40E_MNG_TX0_GLUE_CFG_LS_FORCE_SHIFT)
+#define I40E_MNG_TX0_GLUE_CFG_LS_BYPASS_SHIFT 4
+#define I40E_MNG_TX0_GLUE_CFG_LS_BYPASS_MASK I40E_MASK(0x1, I40E_MNG_TX0_GLUE_CFG_LS_BYPASS_SHIFT)
+#define I40E_MNG_TX0_GLUE_CFG_MASK_INT_SHIFT 5
+#define I40E_MNG_TX0_GLUE_CFG_MASK_INT_MASK I40E_MASK(0x1, I40E_MNG_TX0_GLUE_CFG_MASK_INT_SHIFT)
+#define I40E_MNG_TX0_GLUE_CFG_FIX_CNT_SHIFT 8
+#define I40E_MNG_TX0_GLUE_CFG_FIX_CNT_MASK I40E_MASK(0x1, I40E_MNG_TX0_GLUE_CFG_FIX_CNT_SHIFT)
+#define I40E_MNG_TX0_GLUE_CFG_ERR_CNT_SHIFT 9
+#define I40E_MNG_TX0_GLUE_CFG_ERR_CNT_MASK I40E_MASK(0x1, I40E_MNG_TX0_GLUE_CFG_ERR_CNT_SHIFT)
+#define I40E_MNG_TX0_GLUE_CFG_RME_A_SHIFT 12
+#define I40E_MNG_TX0_GLUE_CFG_RME_A_MASK I40E_MASK(0x1, I40E_MNG_TX0_GLUE_CFG_RME_A_SHIFT)
+#define I40E_MNG_TX0_GLUE_CFG_RME_B_SHIFT 13
+#define I40E_MNG_TX0_GLUE_CFG_RME_B_MASK I40E_MASK(0x1, I40E_MNG_TX0_GLUE_CFG_RME_B_SHIFT)
+#define I40E_MNG_TX0_GLUE_CFG_RM_A_SHIFT 16
+#define I40E_MNG_TX0_GLUE_CFG_RM_A_MASK I40E_MASK(0xF, I40E_MNG_TX0_GLUE_CFG_RM_A_SHIFT)
+#define I40E_MNG_TX0_GLUE_CFG_RM_B_SHIFT 20
+#define I40E_MNG_TX0_GLUE_CFG_RM_B_MASK I40E_MASK(0xF, I40E_MNG_TX0_GLUE_CFG_RM_B_SHIFT)
+
+#define I40E_MNG_TX0_GLUE_STATUS 0x00083080 /* Reset: POR */
+#define I40E_MNG_TX0_GLUE_STATUS_ECC_ERR_SHIFT 0
+#define I40E_MNG_TX0_GLUE_STATUS_ECC_ERR_MASK I40E_MASK(0x1, I40E_MNG_TX0_GLUE_STATUS_ECC_ERR_SHIFT)
+#define I40E_MNG_TX0_GLUE_STATUS_ECC_FIX_SHIFT 1
+#define I40E_MNG_TX0_GLUE_STATUS_ECC_FIX_MASK I40E_MASK(0x1, I40E_MNG_TX0_GLUE_STATUS_ECC_FIX_SHIFT)
+#define I40E_MNG_TX0_GLUE_STATUS_INIT_DONE_SHIFT 2
+#define I40E_MNG_TX0_GLUE_STATUS_INIT_DONE_MASK I40E_MASK(0x1, I40E_MNG_TX0_GLUE_STATUS_INIT_DONE_SHIFT)
+#define I40E_MNG_TX0_GLUE_STATUS_GLOBAL_INIT_DONE_SHIFT 3
+#define I40E_MNG_TX0_GLUE_STATUS_GLOBAL_INIT_DONE_MASK I40E_MASK(0x1, I40E_MNG_TX0_GLUE_STATUS_GLOBAL_INIT_DONE_SHIFT)
+
+#define I40E_MNG_TX1_GLUE_CFG 0x00083084 /* Reset: POR */
+#define I40E_MNG_TX1_GLUE_CFG_ECC_EN_SHIFT 0
+#define I40E_MNG_TX1_GLUE_CFG_ECC_EN_MASK I40E_MASK(0x1, I40E_MNG_TX1_GLUE_CFG_ECC_EN_SHIFT)
+#define I40E_MNG_TX1_GLUE_CFG_ECC_INVERT_1_SHIFT 1
+#define I40E_MNG_TX1_GLUE_CFG_ECC_INVERT_1_MASK I40E_MASK(0x1, I40E_MNG_TX1_GLUE_CFG_ECC_INVERT_1_SHIFT)
+#define I40E_MNG_TX1_GLUE_CFG_ECC_INVERT_2_SHIFT 2
+#define I40E_MNG_TX1_GLUE_CFG_ECC_INVERT_2_MASK I40E_MASK(0x1, I40E_MNG_TX1_GLUE_CFG_ECC_INVERT_2_SHIFT)
+#define I40E_MNG_TX1_GLUE_CFG_LS_FORCE_SHIFT 3
+#define I40E_MNG_TX1_GLUE_CFG_LS_FORCE_MASK I40E_MASK(0x1, I40E_MNG_TX1_GLUE_CFG_LS_FORCE_SHIFT)
+#define I40E_MNG_TX1_GLUE_CFG_LS_BYPASS_SHIFT 4
+#define I40E_MNG_TX1_GLUE_CFG_LS_BYPASS_MASK I40E_MASK(0x1, I40E_MNG_TX1_GLUE_CFG_LS_BYPASS_SHIFT)
+#define I40E_MNG_TX1_GLUE_CFG_MASK_INT_SHIFT 5
+#define I40E_MNG_TX1_GLUE_CFG_MASK_INT_MASK I40E_MASK(0x1, I40E_MNG_TX1_GLUE_CFG_MASK_INT_SHIFT)
+#define I40E_MNG_TX1_GLUE_CFG_FIX_CNT_SHIFT 8
+#define I40E_MNG_TX1_GLUE_CFG_FIX_CNT_MASK I40E_MASK(0x1, I40E_MNG_TX1_GLUE_CFG_FIX_CNT_SHIFT)
+#define I40E_MNG_TX1_GLUE_CFG_ERR_CNT_SHIFT 9
+#define I40E_MNG_TX1_GLUE_CFG_ERR_CNT_MASK I40E_MASK(0x1, I40E_MNG_TX1_GLUE_CFG_ERR_CNT_SHIFT)
+#define I40E_MNG_TX1_GLUE_CFG_RME_A_SHIFT 12
+#define I40E_MNG_TX1_GLUE_CFG_RME_A_MASK I40E_MASK(0x1, I40E_MNG_TX1_GLUE_CFG_RME_A_SHIFT)
+#define I40E_MNG_TX1_GLUE_CFG_RME_B_SHIFT 13
+#define I40E_MNG_TX1_GLUE_CFG_RME_B_MASK I40E_MASK(0x1, I40E_MNG_TX1_GLUE_CFG_RME_B_SHIFT)
+#define I40E_MNG_TX1_GLUE_CFG_RM_A_SHIFT 16
+#define I40E_MNG_TX1_GLUE_CFG_RM_A_MASK I40E_MASK(0xF, I40E_MNG_TX1_GLUE_CFG_RM_A_SHIFT)
+#define I40E_MNG_TX1_GLUE_CFG_RM_B_SHIFT 20
+#define I40E_MNG_TX1_GLUE_CFG_RM_B_MASK I40E_MASK(0xF, I40E_MNG_TX1_GLUE_CFG_RM_B_SHIFT)
+
+#define I40E_MNG_TX1_GLUE_STATUS 0x00083088 /* Reset: POR */
+#define I40E_MNG_TX1_GLUE_STATUS_ECC_ERR_SHIFT 0
+#define I40E_MNG_TX1_GLUE_STATUS_ECC_ERR_MASK I40E_MASK(0x1, I40E_MNG_TX1_GLUE_STATUS_ECC_ERR_SHIFT)
+#define I40E_MNG_TX1_GLUE_STATUS_ECC_FIX_SHIFT 1
+#define I40E_MNG_TX1_GLUE_STATUS_ECC_FIX_MASK I40E_MASK(0x1, I40E_MNG_TX1_GLUE_STATUS_ECC_FIX_SHIFT)
+#define I40E_MNG_TX1_GLUE_STATUS_INIT_DONE_SHIFT 2
+#define I40E_MNG_TX1_GLUE_STATUS_INIT_DONE_MASK I40E_MASK(0x1, I40E_MNG_TX1_GLUE_STATUS_INIT_DONE_SHIFT)
+#define I40E_MNG_TX1_GLUE_STATUS_GLOBAL_INIT_DONE_SHIFT 3
+#define I40E_MNG_TX1_GLUE_STATUS_GLOBAL_INIT_DONE_MASK I40E_MASK(0x1, I40E_MNG_TX1_GLUE_STATUS_GLOBAL_INIT_DONE_SHIFT)
+
+#define I40E_MNG_TX2_GLUE_CFG 0x0008308C /* Reset: POR */
+#define I40E_MNG_TX2_GLUE_CFG_ECC_EN_SHIFT 0
+#define I40E_MNG_TX2_GLUE_CFG_ECC_EN_MASK I40E_MASK(0x1, I40E_MNG_TX2_GLUE_CFG_ECC_EN_SHIFT)
+#define I40E_MNG_TX2_GLUE_CFG_ECC_INVERT_1_SHIFT 1
+#define I40E_MNG_TX2_GLUE_CFG_ECC_INVERT_1_MASK I40E_MASK(0x1, I40E_MNG_TX2_GLUE_CFG_ECC_INVERT_1_SHIFT)
+#define I40E_MNG_TX2_GLUE_CFG_ECC_INVERT_2_SHIFT 2
+#define I40E_MNG_TX2_GLUE_CFG_ECC_INVERT_2_MASK I40E_MASK(0x1, I40E_MNG_TX2_GLUE_CFG_ECC_INVERT_2_SHIFT)
+#define I40E_MNG_TX2_GLUE_CFG_LS_FORCE_SHIFT 3
+#define I40E_MNG_TX2_GLUE_CFG_LS_FORCE_MASK I40E_MASK(0x1, I40E_MNG_TX2_GLUE_CFG_LS_FORCE_SHIFT)
+#define I40E_MNG_TX2_GLUE_CFG_LS_BYPASS_SHIFT 4
+#define I40E_MNG_TX2_GLUE_CFG_LS_BYPASS_MASK I40E_MASK(0x1, I40E_MNG_TX2_GLUE_CFG_LS_BYPASS_SHIFT)
+#define I40E_MNG_TX2_GLUE_CFG_MASK_INT_SHIFT 5
+#define I40E_MNG_TX2_GLUE_CFG_MASK_INT_MASK I40E_MASK(0x1, I40E_MNG_TX2_GLUE_CFG_MASK_INT_SHIFT)
+#define I40E_MNG_TX2_GLUE_CFG_FIX_CNT_SHIFT 8
+#define I40E_MNG_TX2_GLUE_CFG_FIX_CNT_MASK I40E_MASK(0x1, I40E_MNG_TX2_GLUE_CFG_FIX_CNT_SHIFT)
+#define I40E_MNG_TX2_GLUE_CFG_ERR_CNT_SHIFT 9
+#define I40E_MNG_TX2_GLUE_CFG_ERR_CNT_MASK I40E_MASK(0x1, I40E_MNG_TX2_GLUE_CFG_ERR_CNT_SHIFT)
+#define I40E_MNG_TX2_GLUE_CFG_RME_A_SHIFT 12
+#define I40E_MNG_TX2_GLUE_CFG_RME_A_MASK I40E_MASK(0x1, I40E_MNG_TX2_GLUE_CFG_RME_A_SHIFT)
+#define I40E_MNG_TX2_GLUE_CFG_RME_B_SHIFT 13
+#define I40E_MNG_TX2_GLUE_CFG_RME_B_MASK I40E_MASK(0x1, I40E_MNG_TX2_GLUE_CFG_RME_B_SHIFT)
+#define I40E_MNG_TX2_GLUE_CFG_RM_A_SHIFT 16
+#define I40E_MNG_TX2_GLUE_CFG_RM_A_MASK I40E_MASK(0xF, I40E_MNG_TX2_GLUE_CFG_RM_A_SHIFT)
+#define I40E_MNG_TX2_GLUE_CFG_RM_B_SHIFT 20
+#define I40E_MNG_TX2_GLUE_CFG_RM_B_MASK I40E_MASK(0xF, I40E_MNG_TX2_GLUE_CFG_RM_B_SHIFT)
+
+#define I40E_MNG_TX2_GLUE_STATUS 0x00083090 /* Reset: POR */
+#define I40E_MNG_TX2_GLUE_STATUS_ECC_ERR_SHIFT 0
+#define I40E_MNG_TX2_GLUE_STATUS_ECC_ERR_MASK I40E_MASK(0x1, I40E_MNG_TX2_GLUE_STATUS_ECC_ERR_SHIFT)
+#define I40E_MNG_TX2_GLUE_STATUS_ECC_FIX_SHIFT 1
+#define I40E_MNG_TX2_GLUE_STATUS_ECC_FIX_MASK I40E_MASK(0x1, I40E_MNG_TX2_GLUE_STATUS_ECC_FIX_SHIFT)
+#define I40E_MNG_TX2_GLUE_STATUS_INIT_DONE_SHIFT 2
+#define I40E_MNG_TX2_GLUE_STATUS_INIT_DONE_MASK I40E_MASK(0x1, I40E_MNG_TX2_GLUE_STATUS_INIT_DONE_SHIFT)
+#define I40E_MNG_TX2_GLUE_STATUS_GLOBAL_INIT_DONE_SHIFT 3
+#define I40E_MNG_TX2_GLUE_STATUS_GLOBAL_INIT_DONE_MASK I40E_MASK(0x1, I40E_MNG_TX2_GLUE_STATUS_GLOBAL_INIT_DONE_SHIFT)
+
+#define I40E_MNG_TX3_GLUE_CFG 0x00083094 /* Reset: POR */
+#define I40E_MNG_TX3_GLUE_CFG_ECC_EN_SHIFT 0
+#define I40E_MNG_TX3_GLUE_CFG_ECC_EN_MASK I40E_MASK(0x1, I40E_MNG_TX3_GLUE_CFG_ECC_EN_SHIFT)
+#define I40E_MNG_TX3_GLUE_CFG_ECC_INVERT_1_SHIFT 1
+#define I40E_MNG_TX3_GLUE_CFG_ECC_INVERT_1_MASK I40E_MASK(0x1, I40E_MNG_TX3_GLUE_CFG_ECC_INVERT_1_SHIFT)
+#define I40E_MNG_TX3_GLUE_CFG_ECC_INVERT_2_SHIFT 2
+#define I40E_MNG_TX3_GLUE_CFG_ECC_INVERT_2_MASK I40E_MASK(0x1, I40E_MNG_TX3_GLUE_CFG_ECC_INVERT_2_SHIFT)
+#define I40E_MNG_TX3_GLUE_CFG_LS_FORCE_SHIFT 3
+#define I40E_MNG_TX3_GLUE_CFG_LS_FORCE_MASK I40E_MASK(0x1, I40E_MNG_TX3_GLUE_CFG_LS_FORCE_SHIFT)
+#define I40E_MNG_TX3_GLUE_CFG_LS_BYPASS_SHIFT 4
+#define I40E_MNG_TX3_GLUE_CFG_LS_BYPASS_MASK I40E_MASK(0x1, I40E_MNG_TX3_GLUE_CFG_LS_BYPASS_SHIFT)
+#define I40E_MNG_TX3_GLUE_CFG_MASK_INT_SHIFT 5
+#define I40E_MNG_TX3_GLUE_CFG_MASK_INT_MASK I40E_MASK(0x1, I40E_MNG_TX3_GLUE_CFG_MASK_INT_SHIFT)
+#define I40E_MNG_TX3_GLUE_CFG_FIX_CNT_SHIFT 8
+#define I40E_MNG_TX3_GLUE_CFG_FIX_CNT_MASK I40E_MASK(0x1, I40E_MNG_TX3_GLUE_CFG_FIX_CNT_SHIFT)
+#define I40E_MNG_TX3_GLUE_CFG_ERR_CNT_SHIFT 9
+#define I40E_MNG_TX3_GLUE_CFG_ERR_CNT_MASK I40E_MASK(0x1, I40E_MNG_TX3_GLUE_CFG_ERR_CNT_SHIFT)
+#define I40E_MNG_TX3_GLUE_CFG_RME_A_SHIFT 12
+#define I40E_MNG_TX3_GLUE_CFG_RME_A_MASK I40E_MASK(0x1, I40E_MNG_TX3_GLUE_CFG_RME_A_SHIFT)
+#define I40E_MNG_TX3_GLUE_CFG_RME_B_SHIFT 13
+#define I40E_MNG_TX3_GLUE_CFG_RME_B_MASK I40E_MASK(0x1, I40E_MNG_TX3_GLUE_CFG_RME_B_SHIFT)
+#define I40E_MNG_TX3_GLUE_CFG_RM_A_SHIFT 16
+#define I40E_MNG_TX3_GLUE_CFG_RM_A_MASK I40E_MASK(0xF, I40E_MNG_TX3_GLUE_CFG_RM_A_SHIFT)
+#define I40E_MNG_TX3_GLUE_CFG_RM_B_SHIFT 20
+#define I40E_MNG_TX3_GLUE_CFG_RM_B_MASK I40E_MASK(0xF, I40E_MNG_TX3_GLUE_CFG_RM_B_SHIFT)
+
+#define I40E_MNG_TX3_GLUE_STATUS 0x00083098 /* Reset: POR */
+#define I40E_MNG_TX3_GLUE_STATUS_ECC_ERR_SHIFT 0
+#define I40E_MNG_TX3_GLUE_STATUS_ECC_ERR_MASK I40E_MASK(0x1, I40E_MNG_TX3_GLUE_STATUS_ECC_ERR_SHIFT)
+#define I40E_MNG_TX3_GLUE_STATUS_ECC_FIX_SHIFT 1
+#define I40E_MNG_TX3_GLUE_STATUS_ECC_FIX_MASK I40E_MASK(0x1, I40E_MNG_TX3_GLUE_STATUS_ECC_FIX_SHIFT)
+#define I40E_MNG_TX3_GLUE_STATUS_INIT_DONE_SHIFT 2
+#define I40E_MNG_TX3_GLUE_STATUS_INIT_DONE_MASK I40E_MASK(0x1, I40E_MNG_TX3_GLUE_STATUS_INIT_DONE_SHIFT)
+#define I40E_MNG_TX3_GLUE_STATUS_GLOBAL_INIT_DONE_SHIFT 3
+#define I40E_MNG_TX3_GLUE_STATUS_GLOBAL_INIT_DONE_MASK I40E_MASK(0x1, I40E_MNG_TX3_GLUE_STATUS_GLOBAL_INIT_DONE_SHIFT)
+
+#define I40E_MNG_TX4_GLUE_CFG 0x0008309C /* Reset: POR */
+#define I40E_MNG_TX4_GLUE_CFG_ECC_EN_SHIFT 0
+#define I40E_MNG_TX4_GLUE_CFG_ECC_EN_MASK I40E_MASK(0x1, I40E_MNG_TX4_GLUE_CFG_ECC_EN_SHIFT)
+#define I40E_MNG_TX4_GLUE_CFG_ECC_INVERT_1_SHIFT 1
+#define I40E_MNG_TX4_GLUE_CFG_ECC_INVERT_1_MASK I40E_MASK(0x1, I40E_MNG_TX4_GLUE_CFG_ECC_INVERT_1_SHIFT)
+#define I40E_MNG_TX4_GLUE_CFG_ECC_INVERT_2_SHIFT 2
+#define I40E_MNG_TX4_GLUE_CFG_ECC_INVERT_2_MASK I40E_MASK(0x1, I40E_MNG_TX4_GLUE_CFG_ECC_INVERT_2_SHIFT)
+#define I40E_MNG_TX4_GLUE_CFG_LS_FORCE_SHIFT 3
+#define I40E_MNG_TX4_GLUE_CFG_LS_FORCE_MASK I40E_MASK(0x1, I40E_MNG_TX4_GLUE_CFG_LS_FORCE_SHIFT)
+#define I40E_MNG_TX4_GLUE_CFG_LS_BYPASS_SHIFT 4
+#define I40E_MNG_TX4_GLUE_CFG_LS_BYPASS_MASK I40E_MASK(0x1, I40E_MNG_TX4_GLUE_CFG_LS_BYPASS_SHIFT)
+#define I40E_MNG_TX4_GLUE_CFG_MASK_INT_SHIFT 5
+#define I40E_MNG_TX4_GLUE_CFG_MASK_INT_MASK I40E_MASK(0x1, I40E_MNG_TX4_GLUE_CFG_MASK_INT_SHIFT)
+#define I40E_MNG_TX4_GLUE_CFG_FIX_CNT_SHIFT 8
+#define I40E_MNG_TX4_GLUE_CFG_FIX_CNT_MASK I40E_MASK(0x1, I40E_MNG_TX4_GLUE_CFG_FIX_CNT_SHIFT)
+#define I40E_MNG_TX4_GLUE_CFG_ERR_CNT_SHIFT 9
+#define I40E_MNG_TX4_GLUE_CFG_ERR_CNT_MASK I40E_MASK(0x1, I40E_MNG_TX4_GLUE_CFG_ERR_CNT_SHIFT)
+#define I40E_MNG_TX4_GLUE_CFG_RME_A_SHIFT 12
+#define I40E_MNG_TX4_GLUE_CFG_RME_A_MASK I40E_MASK(0x1, I40E_MNG_TX4_GLUE_CFG_RME_A_SHIFT)
+#define I40E_MNG_TX4_GLUE_CFG_RME_B_SHIFT 13
+#define I40E_MNG_TX4_GLUE_CFG_RME_B_MASK I40E_MASK(0x1, I40E_MNG_TX4_GLUE_CFG_RME_B_SHIFT)
+#define I40E_MNG_TX4_GLUE_CFG_RM_A_SHIFT 16
+#define I40E_MNG_TX4_GLUE_CFG_RM_A_MASK I40E_MASK(0xF, I40E_MNG_TX4_GLUE_CFG_RM_A_SHIFT)
+#define I40E_MNG_TX4_GLUE_CFG_RM_B_SHIFT 20
+#define I40E_MNG_TX4_GLUE_CFG_RM_B_MASK I40E_MASK(0xF, I40E_MNG_TX4_GLUE_CFG_RM_B_SHIFT)
+
+#define I40E_MNG_TX4_GLUE_STATUS 0x000830A0 /* Reset: POR */
+#define I40E_MNG_TX4_GLUE_STATUS_ECC_ERR_SHIFT 0
+#define I40E_MNG_TX4_GLUE_STATUS_ECC_ERR_MASK I40E_MASK(0x1, I40E_MNG_TX4_GLUE_STATUS_ECC_ERR_SHIFT)
+#define I40E_MNG_TX4_GLUE_STATUS_ECC_FIX_SHIFT 1
+#define I40E_MNG_TX4_GLUE_STATUS_ECC_FIX_MASK I40E_MASK(0x1, I40E_MNG_TX4_GLUE_STATUS_ECC_FIX_SHIFT)
+#define I40E_MNG_TX4_GLUE_STATUS_INIT_DONE_SHIFT 2
+#define I40E_MNG_TX4_GLUE_STATUS_INIT_DONE_MASK I40E_MASK(0x1, I40E_MNG_TX4_GLUE_STATUS_INIT_DONE_SHIFT)
+#define I40E_MNG_TX4_GLUE_STATUS_GLOBAL_INIT_DONE_SHIFT 3
+#define I40E_MNG_TX4_GLUE_STATUS_GLOBAL_INIT_DONE_MASK I40E_MASK(0x1, I40E_MNG_TX4_GLUE_STATUS_GLOBAL_INIT_DONE_SHIFT)
+
+#define I40E_PBLOC_CACHE_DBG_CTL 0x000A808C /* Reset: CORER */
+#define I40E_PBLOC_CACHE_DBG_CTL_ADR_SHIFT 0
+#define I40E_PBLOC_CACHE_DBG_CTL_ADR_MASK I40E_MASK(0x3FFFF, I40E_PBLOC_CACHE_DBG_CTL_ADR_SHIFT)
+#define I40E_PBLOC_CACHE_DBG_CTL_DW_SEL_SHIFT 18
+#define I40E_PBLOC_CACHE_DBG_CTL_DW_SEL_MASK I40E_MASK(0xFF, I40E_PBLOC_CACHE_DBG_CTL_DW_SEL_SHIFT)
+#define I40E_PBLOC_CACHE_DBG_CTL_RD_EN_SHIFT 30
+#define I40E_PBLOC_CACHE_DBG_CTL_RD_EN_MASK I40E_MASK(0x1, I40E_PBLOC_CACHE_DBG_CTL_RD_EN_SHIFT)
+#define I40E_PBLOC_CACHE_DBG_CTL_DONE_SHIFT 31
+#define I40E_PBLOC_CACHE_DBG_CTL_DONE_MASK I40E_MASK(0x1, I40E_PBLOC_CACHE_DBG_CTL_DONE_SHIFT)
+
+#define I40E_PBLOC_CACHE_DBG_DATA 0x000A8090 /* Reset: CORER */
+#define I40E_PBLOC_CACHE_DBG_DATA_RD_DW_SHIFT 0
+#define I40E_PBLOC_CACHE_DBG_DATA_RD_DW_MASK I40E_MASK(0xFFFFFFFF, I40E_PBLOC_CACHE_DBG_DATA_RD_DW_SHIFT)
+
+#define I40E_PBLOC_CACHE_MEM_CFG 0x000A8054 /* Reset: POR */
+#define I40E_PBLOC_CACHE_MEM_CFG_ECC_EN_SHIFT 0
+#define I40E_PBLOC_CACHE_MEM_CFG_ECC_EN_MASK I40E_MASK(0x1, I40E_PBLOC_CACHE_MEM_CFG_ECC_EN_SHIFT)
+#define I40E_PBLOC_CACHE_MEM_CFG_ECC_INVERT_1_SHIFT 1
+#define I40E_PBLOC_CACHE_MEM_CFG_ECC_INVERT_1_MASK I40E_MASK(0x1, I40E_PBLOC_CACHE_MEM_CFG_ECC_INVERT_1_SHIFT)
+#define I40E_PBLOC_CACHE_MEM_CFG_ECC_INVERT_2_SHIFT 2
+#define I40E_PBLOC_CACHE_MEM_CFG_ECC_INVERT_2_MASK I40E_MASK(0x1, I40E_PBLOC_CACHE_MEM_CFG_ECC_INVERT_2_SHIFT)
+#define I40E_PBLOC_CACHE_MEM_CFG_LS_FORCE_SHIFT 3
+#define I40E_PBLOC_CACHE_MEM_CFG_LS_FORCE_MASK I40E_MASK(0x1, I40E_PBLOC_CACHE_MEM_CFG_LS_FORCE_SHIFT)
+#define I40E_PBLOC_CACHE_MEM_CFG_LS_BYPASS_SHIFT 4
+#define I40E_PBLOC_CACHE_MEM_CFG_LS_BYPASS_MASK I40E_MASK(0x1, I40E_PBLOC_CACHE_MEM_CFG_LS_BYPASS_SHIFT)
+#define I40E_PBLOC_CACHE_MEM_CFG_MASK_INT_SHIFT 5
+#define I40E_PBLOC_CACHE_MEM_CFG_MASK_INT_MASK I40E_MASK(0x1, I40E_PBLOC_CACHE_MEM_CFG_MASK_INT_SHIFT)
+#define I40E_PBLOC_CACHE_MEM_CFG_FIX_CNT_SHIFT 8
+#define I40E_PBLOC_CACHE_MEM_CFG_FIX_CNT_MASK I40E_MASK(0x1, I40E_PBLOC_CACHE_MEM_CFG_FIX_CNT_SHIFT)
+#define I40E_PBLOC_CACHE_MEM_CFG_ERR_CNT_SHIFT 9
+#define I40E_PBLOC_CACHE_MEM_CFG_ERR_CNT_MASK I40E_MASK(0x1, I40E_PBLOC_CACHE_MEM_CFG_ERR_CNT_SHIFT)
+#define I40E_PBLOC_CACHE_MEM_CFG_RME_SHIFT 12
+#define I40E_PBLOC_CACHE_MEM_CFG_RME_MASK I40E_MASK(0x1, I40E_PBLOC_CACHE_MEM_CFG_RME_SHIFT)
+#define I40E_PBLOC_CACHE_MEM_CFG_RM_SHIFT 16
+#define I40E_PBLOC_CACHE_MEM_CFG_RM_MASK I40E_MASK(0xF, I40E_PBLOC_CACHE_MEM_CFG_RM_SHIFT)
+
+#define I40E_PBLOC_CACHE_MEM_STATUS 0x000A8058 /* Reset: POR */
+#define I40E_PBLOC_CACHE_MEM_STATUS_ECC_ERR_SHIFT 0
+#define I40E_PBLOC_CACHE_MEM_STATUS_ECC_ERR_MASK I40E_MASK(0x1, I40E_PBLOC_CACHE_MEM_STATUS_ECC_ERR_SHIFT)
+#define I40E_PBLOC_CACHE_MEM_STATUS_ECC_FIX_SHIFT 1
+#define I40E_PBLOC_CACHE_MEM_STATUS_ECC_FIX_MASK I40E_MASK(0x1, I40E_PBLOC_CACHE_MEM_STATUS_ECC_FIX_SHIFT)
+#define I40E_PBLOC_CACHE_MEM_STATUS_INIT_DONE_SHIFT 2
+#define I40E_PBLOC_CACHE_MEM_STATUS_INIT_DONE_MASK I40E_MASK(0x1, I40E_PBLOC_CACHE_MEM_STATUS_INIT_DONE_SHIFT)
+#define I40E_PBLOC_CACHE_MEM_STATUS_GLOBAL_INIT_DONE_SHIFT 3
+#define I40E_PBLOC_CACHE_MEM_STATUS_GLOBAL_INIT_DONE_MASK I40E_MASK(0x1, I40E_PBLOC_CACHE_MEM_STATUS_GLOBAL_INIT_DONE_SHIFT)
+
+#define I40E_PBLOC_ECC_COR_ERR 0x000A8080 /* Reset: POR */
+#define I40E_PBLOC_ECC_COR_ERR_CNT_SHIFT 0
+#define I40E_PBLOC_ECC_COR_ERR_CNT_MASK I40E_MASK(0xFFF, I40E_PBLOC_ECC_COR_ERR_CNT_SHIFT)
+
+#define I40E_PBLOC_ECC_UNCOR_ERR 0x000A807C /* Reset: POR */
+#define I40E_PBLOC_ECC_UNCOR_ERR_CNT_SHIFT 0
+#define I40E_PBLOC_ECC_UNCOR_ERR_CNT_MASK I40E_MASK(0xFFF, I40E_PBLOC_ECC_UNCOR_ERR_CNT_SHIFT)
+
+#define I40E_PBLOC_EVICT_MEM_CFG 0x000A8074 /* Reset: POR */
+#define I40E_PBLOC_EVICT_MEM_CFG_ECC_EN_SHIFT 0
+#define I40E_PBLOC_EVICT_MEM_CFG_ECC_EN_MASK I40E_MASK(0x1, I40E_PBLOC_EVICT_MEM_CFG_ECC_EN_SHIFT)
+#define I40E_PBLOC_EVICT_MEM_CFG_ECC_INVERT_1_SHIFT 1
+#define I40E_PBLOC_EVICT_MEM_CFG_ECC_INVERT_1_MASK I40E_MASK(0x1, I40E_PBLOC_EVICT_MEM_CFG_ECC_INVERT_1_SHIFT)
+#define I40E_PBLOC_EVICT_MEM_CFG_ECC_INVERT_2_SHIFT 2
+#define I40E_PBLOC_EVICT_MEM_CFG_ECC_INVERT_2_MASK I40E_MASK(0x1, I40E_PBLOC_EVICT_MEM_CFG_ECC_INVERT_2_SHIFT)
+#define I40E_PBLOC_EVICT_MEM_CFG_LS_FORCE_SHIFT 3
+#define I40E_PBLOC_EVICT_MEM_CFG_LS_FORCE_MASK I40E_MASK(0x1, I40E_PBLOC_EVICT_MEM_CFG_LS_FORCE_SHIFT)
+#define I40E_PBLOC_EVICT_MEM_CFG_LS_BYPASS_SHIFT 4
+#define I40E_PBLOC_EVICT_MEM_CFG_LS_BYPASS_MASK I40E_MASK(0x1, I40E_PBLOC_EVICT_MEM_CFG_LS_BYPASS_SHIFT)
+#define I40E_PBLOC_EVICT_MEM_CFG_MASK_INT_SHIFT 5
+#define I40E_PBLOC_EVICT_MEM_CFG_MASK_INT_MASK I40E_MASK(0x1, I40E_PBLOC_EVICT_MEM_CFG_MASK_INT_SHIFT)
+#define I40E_PBLOC_EVICT_MEM_CFG_FIX_CNT_SHIFT 8
+#define I40E_PBLOC_EVICT_MEM_CFG_FIX_CNT_MASK I40E_MASK(0x1, I40E_PBLOC_EVICT_MEM_CFG_FIX_CNT_SHIFT)
+#define I40E_PBLOC_EVICT_MEM_CFG_ERR_CNT_SHIFT 9
+#define I40E_PBLOC_EVICT_MEM_CFG_ERR_CNT_MASK I40E_MASK(0x1, I40E_PBLOC_EVICT_MEM_CFG_ERR_CNT_SHIFT)
+#define I40E_PBLOC_EVICT_MEM_CFG_RME_A_SHIFT 12
+#define I40E_PBLOC_EVICT_MEM_CFG_RME_A_MASK I40E_MASK(0x1, I40E_PBLOC_EVICT_MEM_CFG_RME_A_SHIFT)
+#define I40E_PBLOC_EVICT_MEM_CFG_RME_B_SHIFT 13
+#define I40E_PBLOC_EVICT_MEM_CFG_RME_B_MASK I40E_MASK(0x1, I40E_PBLOC_EVICT_MEM_CFG_RME_B_SHIFT)
+#define I40E_PBLOC_EVICT_MEM_CFG_RM_A_SHIFT 16
+#define I40E_PBLOC_EVICT_MEM_CFG_RM_A_MASK I40E_MASK(0xF, I40E_PBLOC_EVICT_MEM_CFG_RM_A_SHIFT)
+#define I40E_PBLOC_EVICT_MEM_CFG_RM_B_SHIFT 20
+#define I40E_PBLOC_EVICT_MEM_CFG_RM_B_MASK I40E_MASK(0xF, I40E_PBLOC_EVICT_MEM_CFG_RM_B_SHIFT)
+
+#define I40E_PBLOC_EVICT_MEM_STATUS 0x000A8078 /* Reset: POR */
+#define I40E_PBLOC_EVICT_MEM_STATUS_ECC_ERR_SHIFT 0
+#define I40E_PBLOC_EVICT_MEM_STATUS_ECC_ERR_MASK I40E_MASK(0x1, I40E_PBLOC_EVICT_MEM_STATUS_ECC_ERR_SHIFT)
+#define I40E_PBLOC_EVICT_MEM_STATUS_ECC_FIX_SHIFT 1
+#define I40E_PBLOC_EVICT_MEM_STATUS_ECC_FIX_MASK I40E_MASK(0x1, I40E_PBLOC_EVICT_MEM_STATUS_ECC_FIX_SHIFT)
+#define I40E_PBLOC_EVICT_MEM_STATUS_INIT_DONE_SHIFT 2
+#define I40E_PBLOC_EVICT_MEM_STATUS_INIT_DONE_MASK I40E_MASK(0x1, I40E_PBLOC_EVICT_MEM_STATUS_INIT_DONE_SHIFT)
+#define I40E_PBLOC_EVICT_MEM_STATUS_GLOBAL_INIT_DONE_SHIFT 3
+#define I40E_PBLOC_EVICT_MEM_STATUS_GLOBAL_INIT_DONE_MASK I40E_MASK(0x1, I40E_PBLOC_EVICT_MEM_STATUS_GLOBAL_INIT_DONE_SHIFT)
+
+#define I40E_PBLOC_FILL_MEM_CFG 0x000A8064 /* Reset: POR */
+#define I40E_PBLOC_FILL_MEM_CFG_ECC_EN_SHIFT 0
+#define I40E_PBLOC_FILL_MEM_CFG_ECC_EN_MASK I40E_MASK(0x1, I40E_PBLOC_FILL_MEM_CFG_ECC_EN_SHIFT)
+#define I40E_PBLOC_FILL_MEM_CFG_ECC_INVERT_1_SHIFT 1
+#define I40E_PBLOC_FILL_MEM_CFG_ECC_INVERT_1_MASK I40E_MASK(0x1, I40E_PBLOC_FILL_MEM_CFG_ECC_INVERT_1_SHIFT)
+#define I40E_PBLOC_FILL_MEM_CFG_ECC_INVERT_2_SHIFT 2
+#define I40E_PBLOC_FILL_MEM_CFG_ECC_INVERT_2_MASK I40E_MASK(0x1, I40E_PBLOC_FILL_MEM_CFG_ECC_INVERT_2_SHIFT)
+#define I40E_PBLOC_FILL_MEM_CFG_LS_FORCE_SHIFT 3
+#define I40E_PBLOC_FILL_MEM_CFG_LS_FORCE_MASK I40E_MASK(0x1, I40E_PBLOC_FILL_MEM_CFG_LS_FORCE_SHIFT)
+#define I40E_PBLOC_FILL_MEM_CFG_LS_BYPASS_SHIFT 4
+#define I40E_PBLOC_FILL_MEM_CFG_LS_BYPASS_MASK I40E_MASK(0x1, I40E_PBLOC_FILL_MEM_CFG_LS_BYPASS_SHIFT)
+#define I40E_PBLOC_FILL_MEM_CFG_MASK_INT_SHIFT 5
+#define I40E_PBLOC_FILL_MEM_CFG_MASK_INT_MASK I40E_MASK(0x1, I40E_PBLOC_FILL_MEM_CFG_MASK_INT_SHIFT)
+#define I40E_PBLOC_FILL_MEM_CFG_FIX_CNT_SHIFT 8
+#define I40E_PBLOC_FILL_MEM_CFG_FIX_CNT_MASK I40E_MASK(0x1, I40E_PBLOC_FILL_MEM_CFG_FIX_CNT_SHIFT)
+#define I40E_PBLOC_FILL_MEM_CFG_ERR_CNT_SHIFT 9
+#define I40E_PBLOC_FILL_MEM_CFG_ERR_CNT_MASK I40E_MASK(0x1, I40E_PBLOC_FILL_MEM_CFG_ERR_CNT_SHIFT)
+#define I40E_PBLOC_FILL_MEM_CFG_RME_A_SHIFT 12
+#define I40E_PBLOC_FILL_MEM_CFG_RME_A_MASK I40E_MASK(0x1, I40E_PBLOC_FILL_MEM_CFG_RME_A_SHIFT)
+#define I40E_PBLOC_FILL_MEM_CFG_RME_B_SHIFT 13
+#define I40E_PBLOC_FILL_MEM_CFG_RME_B_MASK I40E_MASK(0x1, I40E_PBLOC_FILL_MEM_CFG_RME_B_SHIFT)
+#define I40E_PBLOC_FILL_MEM_CFG_RM_A_SHIFT 16
+#define I40E_PBLOC_FILL_MEM_CFG_RM_A_MASK I40E_MASK(0xF, I40E_PBLOC_FILL_MEM_CFG_RM_A_SHIFT)
+#define I40E_PBLOC_FILL_MEM_CFG_RM_B_SHIFT 20
+#define I40E_PBLOC_FILL_MEM_CFG_RM_B_MASK I40E_MASK(0xF, I40E_PBLOC_FILL_MEM_CFG_RM_B_SHIFT)
+
+#define I40E_PBLOC_FILL_MEM_STATUS 0x000A8068 /* Reset: POR */
+#define I40E_PBLOC_FILL_MEM_STATUS_ECC_ERR_SHIFT 0
+#define I40E_PBLOC_FILL_MEM_STATUS_ECC_ERR_MASK I40E_MASK(0x1, I40E_PBLOC_FILL_MEM_STATUS_ECC_ERR_SHIFT)
+#define I40E_PBLOC_FILL_MEM_STATUS_ECC_FIX_SHIFT 1
+#define I40E_PBLOC_FILL_MEM_STATUS_ECC_FIX_MASK I40E_MASK(0x1, I40E_PBLOC_FILL_MEM_STATUS_ECC_FIX_SHIFT)
+#define I40E_PBLOC_FILL_MEM_STATUS_INIT_DONE_SHIFT 2
+#define I40E_PBLOC_FILL_MEM_STATUS_INIT_DONE_MASK I40E_MASK(0x1, I40E_PBLOC_FILL_MEM_STATUS_INIT_DONE_SHIFT)
+#define I40E_PBLOC_FILL_MEM_STATUS_GLOBAL_INIT_DONE_SHIFT 3
+#define I40E_PBLOC_FILL_MEM_STATUS_GLOBAL_INIT_DONE_MASK I40E_MASK(0x1, I40E_PBLOC_FILL_MEM_STATUS_GLOBAL_INIT_DONE_SHIFT)
+
+#define I40E_PBLOC_PLIST_DBG_CTL 0x000A8094 /* Reset: CORER */
+#define I40E_PBLOC_PLIST_DBG_CTL_ADR_SHIFT 0
+#define I40E_PBLOC_PLIST_DBG_CTL_ADR_MASK I40E_MASK(0x3FFFF, I40E_PBLOC_PLIST_DBG_CTL_ADR_SHIFT)
+#define I40E_PBLOC_PLIST_DBG_CTL_DW_SEL_SHIFT 18
+#define I40E_PBLOC_PLIST_DBG_CTL_DW_SEL_MASK I40E_MASK(0xFF, I40E_PBLOC_PLIST_DBG_CTL_DW_SEL_SHIFT)
+#define I40E_PBLOC_PLIST_DBG_CTL_RD_EN_SHIFT 30
+#define I40E_PBLOC_PLIST_DBG_CTL_RD_EN_MASK I40E_MASK(0x1, I40E_PBLOC_PLIST_DBG_CTL_RD_EN_SHIFT)
+#define I40E_PBLOC_PLIST_DBG_CTL_DONE_SHIFT 31
+#define I40E_PBLOC_PLIST_DBG_CTL_DONE_MASK I40E_MASK(0x1, I40E_PBLOC_PLIST_DBG_CTL_DONE_SHIFT)
+
+#define I40E_PBLOC_PLIST_DBG_DATA 0x000A8098 /* Reset: CORER */
+#define I40E_PBLOC_PLIST_DBG_DATA_RD_DW_SHIFT 0
+#define I40E_PBLOC_PLIST_DBG_DATA_RD_DW_MASK I40E_MASK(0xFFFFFFFF, I40E_PBLOC_PLIST_DBG_DATA_RD_DW_SHIFT)
+
+#define I40E_PBLOC_PLIST_MEM_CFG 0x000A806C /* Reset: POR */
+#define I40E_PBLOC_PLIST_MEM_CFG_ECC_EN_SHIFT 0
+#define I40E_PBLOC_PLIST_MEM_CFG_ECC_EN_MASK I40E_MASK(0x1, I40E_PBLOC_PLIST_MEM_CFG_ECC_EN_SHIFT)
+#define I40E_PBLOC_PLIST_MEM_CFG_ECC_INVERT_1_SHIFT 1
+#define I40E_PBLOC_PLIST_MEM_CFG_ECC_INVERT_1_MASK I40E_MASK(0x1, I40E_PBLOC_PLIST_MEM_CFG_ECC_INVERT_1_SHIFT)
+#define I40E_PBLOC_PLIST_MEM_CFG_ECC_INVERT_2_SHIFT 2
+#define I40E_PBLOC_PLIST_MEM_CFG_ECC_INVERT_2_MASK I40E_MASK(0x1, I40E_PBLOC_PLIST_MEM_CFG_ECC_INVERT_2_SHIFT)
+#define I40E_PBLOC_PLIST_MEM_CFG_LS_FORCE_SHIFT 3
+#define I40E_PBLOC_PLIST_MEM_CFG_LS_FORCE_MASK I40E_MASK(0x1, I40E_PBLOC_PLIST_MEM_CFG_LS_FORCE_SHIFT)
+#define I40E_PBLOC_PLIST_MEM_CFG_LS_BYPASS_SHIFT 4
+#define I40E_PBLOC_PLIST_MEM_CFG_LS_BYPASS_MASK I40E_MASK(0x1, I40E_PBLOC_PLIST_MEM_CFG_LS_BYPASS_SHIFT)
+#define I40E_PBLOC_PLIST_MEM_CFG_MASK_INT_SHIFT 5
+#define I40E_PBLOC_PLIST_MEM_CFG_MASK_INT_MASK I40E_MASK(0x1, I40E_PBLOC_PLIST_MEM_CFG_MASK_INT_SHIFT)
+#define I40E_PBLOC_PLIST_MEM_CFG_FIX_CNT_SHIFT 8
+#define I40E_PBLOC_PLIST_MEM_CFG_FIX_CNT_MASK I40E_MASK(0x1, I40E_PBLOC_PLIST_MEM_CFG_FIX_CNT_SHIFT)
+#define I40E_PBLOC_PLIST_MEM_CFG_ERR_CNT_SHIFT 9
+#define I40E_PBLOC_PLIST_MEM_CFG_ERR_CNT_MASK I40E_MASK(0x1, I40E_PBLOC_PLIST_MEM_CFG_ERR_CNT_SHIFT)
+#define I40E_PBLOC_PLIST_MEM_CFG_RME_SHIFT 12
+#define I40E_PBLOC_PLIST_MEM_CFG_RME_MASK I40E_MASK(0x1, I40E_PBLOC_PLIST_MEM_CFG_RME_SHIFT)
+#define I40E_PBLOC_PLIST_MEM_CFG_RM_SHIFT 16
+#define I40E_PBLOC_PLIST_MEM_CFG_RM_MASK I40E_MASK(0xF, I40E_PBLOC_PLIST_MEM_CFG_RM_SHIFT)
+
+#define I40E_PBLOC_PLIST_MEM_STATUS 0x000A8070 /* Reset: POR */
+#define I40E_PBLOC_PLIST_MEM_STATUS_ECC_ERR_SHIFT 0
+#define I40E_PBLOC_PLIST_MEM_STATUS_ECC_ERR_MASK I40E_MASK(0x1, I40E_PBLOC_PLIST_MEM_STATUS_ECC_ERR_SHIFT)
+#define I40E_PBLOC_PLIST_MEM_STATUS_ECC_FIX_SHIFT 1
+#define I40E_PBLOC_PLIST_MEM_STATUS_ECC_FIX_MASK I40E_MASK(0x1, I40E_PBLOC_PLIST_MEM_STATUS_ECC_FIX_SHIFT)
+#define I40E_PBLOC_PLIST_MEM_STATUS_INIT_DONE_SHIFT 2
+#define I40E_PBLOC_PLIST_MEM_STATUS_INIT_DONE_MASK I40E_MASK(0x1, I40E_PBLOC_PLIST_MEM_STATUS_INIT_DONE_SHIFT)
+#define I40E_PBLOC_PLIST_MEM_STATUS_GLOBAL_INIT_DONE_SHIFT 3
+#define I40E_PBLOC_PLIST_MEM_STATUS_GLOBAL_INIT_DONE_MASK I40E_MASK(0x1, I40E_PBLOC_PLIST_MEM_STATUS_GLOBAL_INIT_DONE_SHIFT)
+
+#define I40E_PBLOC_TAG_DBG_CTL 0x000A8084 /* Reset: CORER */
+#define I40E_PBLOC_TAG_DBG_CTL_ADR_SHIFT 0
+#define I40E_PBLOC_TAG_DBG_CTL_ADR_MASK I40E_MASK(0x3FFFF, I40E_PBLOC_TAG_DBG_CTL_ADR_SHIFT)
+#define I40E_PBLOC_TAG_DBG_CTL_DW_SEL_SHIFT 18
+#define I40E_PBLOC_TAG_DBG_CTL_DW_SEL_MASK I40E_MASK(0xFF, I40E_PBLOC_TAG_DBG_CTL_DW_SEL_SHIFT)
+#define I40E_PBLOC_TAG_DBG_CTL_RD_EN_SHIFT 30
+#define I40E_PBLOC_TAG_DBG_CTL_RD_EN_MASK I40E_MASK(0x1, I40E_PBLOC_TAG_DBG_CTL_RD_EN_SHIFT)
+#define I40E_PBLOC_TAG_DBG_CTL_DONE_SHIFT 31
+#define I40E_PBLOC_TAG_DBG_CTL_DONE_MASK I40E_MASK(0x1, I40E_PBLOC_TAG_DBG_CTL_DONE_SHIFT)
+
+#define I40E_PBLOC_TAG_DBG_DATA 0x000A8088 /* Reset: CORER */
+#define I40E_PBLOC_TAG_DBG_DATA_RD_DW_SHIFT 0
+#define I40E_PBLOC_TAG_DBG_DATA_RD_DW_MASK I40E_MASK(0xFFFFFFFF, I40E_PBLOC_TAG_DBG_DATA_RD_DW_SHIFT)
+
+#define I40E_PBLOC_TAG_MEM_CFG 0x000A805C /* Reset: POR */
+#define I40E_PBLOC_TAG_MEM_CFG_ECC_EN_SHIFT 0
+#define I40E_PBLOC_TAG_MEM_CFG_ECC_EN_MASK I40E_MASK(0x1, I40E_PBLOC_TAG_MEM_CFG_ECC_EN_SHIFT)
+#define I40E_PBLOC_TAG_MEM_CFG_ECC_INVERT_1_SHIFT 1
+#define I40E_PBLOC_TAG_MEM_CFG_ECC_INVERT_1_MASK I40E_MASK(0x1, I40E_PBLOC_TAG_MEM_CFG_ECC_INVERT_1_SHIFT)
+#define I40E_PBLOC_TAG_MEM_CFG_ECC_INVERT_2_SHIFT 2
+#define I40E_PBLOC_TAG_MEM_CFG_ECC_INVERT_2_MASK I40E_MASK(0x1, I40E_PBLOC_TAG_MEM_CFG_ECC_INVERT_2_SHIFT)
+#define I40E_PBLOC_TAG_MEM_CFG_LS_FORCE_SHIFT 3
+#define I40E_PBLOC_TAG_MEM_CFG_LS_FORCE_MASK I40E_MASK(0x1, I40E_PBLOC_TAG_MEM_CFG_LS_FORCE_SHIFT)
+#define I40E_PBLOC_TAG_MEM_CFG_LS_BYPASS_SHIFT 4
+#define I40E_PBLOC_TAG_MEM_CFG_LS_BYPASS_MASK I40E_MASK(0x1, I40E_PBLOC_TAG_MEM_CFG_LS_BYPASS_SHIFT)
+#define I40E_PBLOC_TAG_MEM_CFG_MASK_INT_SHIFT 5
+#define I40E_PBLOC_TAG_MEM_CFG_MASK_INT_MASK I40E_MASK(0x1, I40E_PBLOC_TAG_MEM_CFG_MASK_INT_SHIFT)
+#define I40E_PBLOC_TAG_MEM_CFG_FIX_CNT_SHIFT 8
+#define I40E_PBLOC_TAG_MEM_CFG_FIX_CNT_MASK I40E_MASK(0x1, I40E_PBLOC_TAG_MEM_CFG_FIX_CNT_SHIFT)
+#define I40E_PBLOC_TAG_MEM_CFG_ERR_CNT_SHIFT 9
+#define I40E_PBLOC_TAG_MEM_CFG_ERR_CNT_MASK I40E_MASK(0x1, I40E_PBLOC_TAG_MEM_CFG_ERR_CNT_SHIFT)
+#define I40E_PBLOC_TAG_MEM_CFG_RME_SHIFT 12
+#define I40E_PBLOC_TAG_MEM_CFG_RME_MASK I40E_MASK(0x1, I40E_PBLOC_TAG_MEM_CFG_RME_SHIFT)
+#define I40E_PBLOC_TAG_MEM_CFG_RM_SHIFT 16
+#define I40E_PBLOC_TAG_MEM_CFG_RM_MASK I40E_MASK(0xF, I40E_PBLOC_TAG_MEM_CFG_RM_SHIFT)
+
+#define I40E_PBLOC_TAG_MEM_STATUS 0x000A8060 /* Reset: POR */
+#define I40E_PBLOC_TAG_MEM_STATUS_ECC_ERR_SHIFT 0
+#define I40E_PBLOC_TAG_MEM_STATUS_ECC_ERR_MASK I40E_MASK(0x1, I40E_PBLOC_TAG_MEM_STATUS_ECC_ERR_SHIFT)
+#define I40E_PBLOC_TAG_MEM_STATUS_ECC_FIX_SHIFT 1
+#define I40E_PBLOC_TAG_MEM_STATUS_ECC_FIX_MASK I40E_MASK(0x1, I40E_PBLOC_TAG_MEM_STATUS_ECC_FIX_SHIFT)
+#define I40E_PBLOC_TAG_MEM_STATUS_INIT_DONE_SHIFT 2
+#define I40E_PBLOC_TAG_MEM_STATUS_INIT_DONE_MASK I40E_MASK(0x1, I40E_PBLOC_TAG_MEM_STATUS_INIT_DONE_SHIFT)
+#define I40E_PBLOC_TAG_MEM_STATUS_GLOBAL_INIT_DONE_SHIFT 3
+#define I40E_PBLOC_TAG_MEM_STATUS_GLOBAL_INIT_DONE_MASK I40E_MASK(0x1, I40E_PBLOC_TAG_MEM_STATUS_GLOBAL_INIT_DONE_SHIFT)
+
+#define I40E_PCIE_ECC_COR_ERR 0x0009D080 /* Reset: POR */
+#define I40E_PCIE_ECC_COR_ERR_CNT_SHIFT 0
+#define I40E_PCIE_ECC_COR_ERR_CNT_MASK I40E_MASK(0xFFF, I40E_PCIE_ECC_COR_ERR_CNT_SHIFT)
+
+#define I40E_PCIE_ECC_UNCOR_ERR 0x0009D07C /* Reset: POR */
+#define I40E_PCIE_ECC_UNCOR_ERR_CNT_SHIFT 0
+#define I40E_PCIE_ECC_UNCOR_ERR_CNT_MASK I40E_MASK(0xFFF, I40E_PCIE_ECC_UNCOR_ERR_CNT_SHIFT)
+
+#define I40E_PCIE_IOSF_RX_DATA_CFG 0x0009D010 /* Reset: POR */
+#define I40E_PCIE_IOSF_RX_DATA_CFG_ECC_EN_SHIFT 0
+#define I40E_PCIE_IOSF_RX_DATA_CFG_ECC_EN_MASK I40E_MASK(0x1, I40E_PCIE_IOSF_RX_DATA_CFG_ECC_EN_SHIFT)
+#define I40E_PCIE_IOSF_RX_DATA_CFG_ECC_INVERT_1_SHIFT 1
+#define I40E_PCIE_IOSF_RX_DATA_CFG_ECC_INVERT_1_MASK I40E_MASK(0x1, I40E_PCIE_IOSF_RX_DATA_CFG_ECC_INVERT_1_SHIFT)
+#define I40E_PCIE_IOSF_RX_DATA_CFG_ECC_INVERT_2_SHIFT 2
+#define I40E_PCIE_IOSF_RX_DATA_CFG_ECC_INVERT_2_MASK I40E_MASK(0x1, I40E_PCIE_IOSF_RX_DATA_CFG_ECC_INVERT_2_SHIFT)
+#define I40E_PCIE_IOSF_RX_DATA_CFG_LS_FORCE_SHIFT 3
+#define I40E_PCIE_IOSF_RX_DATA_CFG_LS_FORCE_MASK I40E_MASK(0x1, I40E_PCIE_IOSF_RX_DATA_CFG_LS_FORCE_SHIFT)
+#define I40E_PCIE_IOSF_RX_DATA_CFG_LS_BYPASS_SHIFT 4
+#define I40E_PCIE_IOSF_RX_DATA_CFG_LS_BYPASS_MASK I40E_MASK(0x1, I40E_PCIE_IOSF_RX_DATA_CFG_LS_BYPASS_SHIFT)
+#define I40E_PCIE_IOSF_RX_DATA_CFG_MASK_INT_SHIFT 5
+#define I40E_PCIE_IOSF_RX_DATA_CFG_MASK_INT_MASK I40E_MASK(0x1, I40E_PCIE_IOSF_RX_DATA_CFG_MASK_INT_SHIFT)
+#define I40E_PCIE_IOSF_RX_DATA_CFG_FIX_CNT_SHIFT 8
+#define I40E_PCIE_IOSF_RX_DATA_CFG_FIX_CNT_MASK I40E_MASK(0x1, I40E_PCIE_IOSF_RX_DATA_CFG_FIX_CNT_SHIFT)
+#define I40E_PCIE_IOSF_RX_DATA_CFG_ERR_CNT_SHIFT 9
+#define I40E_PCIE_IOSF_RX_DATA_CFG_ERR_CNT_MASK I40E_MASK(0x1, I40E_PCIE_IOSF_RX_DATA_CFG_ERR_CNT_SHIFT)
+#define I40E_PCIE_IOSF_RX_DATA_CFG_RME_A_SHIFT 12
+#define I40E_PCIE_IOSF_RX_DATA_CFG_RME_A_MASK I40E_MASK(0x1, I40E_PCIE_IOSF_RX_DATA_CFG_RME_A_SHIFT)
+#define I40E_PCIE_IOSF_RX_DATA_CFG_RME_B_SHIFT 13
+#define I40E_PCIE_IOSF_RX_DATA_CFG_RME_B_MASK I40E_MASK(0x1, I40E_PCIE_IOSF_RX_DATA_CFG_RME_B_SHIFT)
+#define I40E_PCIE_IOSF_RX_DATA_CFG_RM_A_SHIFT 16
+#define I40E_PCIE_IOSF_RX_DATA_CFG_RM_A_MASK I40E_MASK(0xF, I40E_PCIE_IOSF_RX_DATA_CFG_RM_A_SHIFT)
+#define I40E_PCIE_IOSF_RX_DATA_CFG_RM_B_SHIFT 20
+#define I40E_PCIE_IOSF_RX_DATA_CFG_RM_B_MASK I40E_MASK(0xF, I40E_PCIE_IOSF_RX_DATA_CFG_RM_B_SHIFT)
+
+#define I40E_PCIE_IOSF_RX_DATA_STATUS 0x0009D068 /* Reset: POR */
+#define I40E_PCIE_IOSF_RX_DATA_STATUS_ECC_ERR_SHIFT 0
+#define I40E_PCIE_IOSF_RX_DATA_STATUS_ECC_ERR_MASK I40E_MASK(0x1, I40E_PCIE_IOSF_RX_DATA_STATUS_ECC_ERR_SHIFT)
+#define I40E_PCIE_IOSF_RX_DATA_STATUS_ECC_FIX_SHIFT 1
+#define I40E_PCIE_IOSF_RX_DATA_STATUS_ECC_FIX_MASK I40E_MASK(0x1, I40E_PCIE_IOSF_RX_DATA_STATUS_ECC_FIX_SHIFT)
+#define I40E_PCIE_IOSF_RX_DATA_STATUS_INIT_DONE_SHIFT 2
+#define I40E_PCIE_IOSF_RX_DATA_STATUS_INIT_DONE_MASK I40E_MASK(0x1, I40E_PCIE_IOSF_RX_DATA_STATUS_INIT_DONE_SHIFT)
+#define I40E_PCIE_IOSF_RX_DATA_STATUS_GLOBAL_INIT_DONE_SHIFT 3
+#define I40E_PCIE_IOSF_RX_DATA_STATUS_GLOBAL_INIT_DONE_MASK I40E_MASK(0x1, I40E_PCIE_IOSF_RX_DATA_STATUS_GLOBAL_INIT_DONE_SHIFT)
+
+#define I40E_PCIE_IOSF_RX_HDR_CFG 0x0009D028 /* Reset: POR */
+#define I40E_PCIE_IOSF_RX_HDR_CFG_ECC_EN_SHIFT 0
+#define I40E_PCIE_IOSF_RX_HDR_CFG_ECC_EN_MASK I40E_MASK(0x1, I40E_PCIE_IOSF_RX_HDR_CFG_ECC_EN_SHIFT)
+#define I40E_PCIE_IOSF_RX_HDR_CFG_ECC_INVERT_1_SHIFT 1
+#define I40E_PCIE_IOSF_RX_HDR_CFG_ECC_INVERT_1_MASK I40E_MASK(0x1, I40E_PCIE_IOSF_RX_HDR_CFG_ECC_INVERT_1_SHIFT)
+#define I40E_PCIE_IOSF_RX_HDR_CFG_ECC_INVERT_2_SHIFT 2
+#define I40E_PCIE_IOSF_RX_HDR_CFG_ECC_INVERT_2_MASK I40E_MASK(0x1, I40E_PCIE_IOSF_RX_HDR_CFG_ECC_INVERT_2_SHIFT)
+#define I40E_PCIE_IOSF_RX_HDR_CFG_LS_FORCE_SHIFT 3
+#define I40E_PCIE_IOSF_RX_HDR_CFG_LS_FORCE_MASK I40E_MASK(0x1, I40E_PCIE_IOSF_RX_HDR_CFG_LS_FORCE_SHIFT)
+#define I40E_PCIE_IOSF_RX_HDR_CFG_LS_BYPASS_SHIFT 4
+#define I40E_PCIE_IOSF_RX_HDR_CFG_LS_BYPASS_MASK I40E_MASK(0x1, I40E_PCIE_IOSF_RX_HDR_CFG_LS_BYPASS_SHIFT)
+#define I40E_PCIE_IOSF_RX_HDR_CFG_MASK_INT_SHIFT 5
+#define I40E_PCIE_IOSF_RX_HDR_CFG_MASK_INT_MASK I40E_MASK(0x1, I40E_PCIE_IOSF_RX_HDR_CFG_MASK_INT_SHIFT)
+#define I40E_PCIE_IOSF_RX_HDR_CFG_FIX_CNT_SHIFT 8
+#define I40E_PCIE_IOSF_RX_HDR_CFG_FIX_CNT_MASK I40E_MASK(0x1, I40E_PCIE_IOSF_RX_HDR_CFG_FIX_CNT_SHIFT)
+#define I40E_PCIE_IOSF_RX_HDR_CFG_ERR_CNT_SHIFT 9
+#define I40E_PCIE_IOSF_RX_HDR_CFG_ERR_CNT_MASK I40E_MASK(0x1, I40E_PCIE_IOSF_RX_HDR_CFG_ERR_CNT_SHIFT)
+#define I40E_PCIE_IOSF_RX_HDR_CFG_RME_A_SHIFT 12
+#define I40E_PCIE_IOSF_RX_HDR_CFG_RME_A_MASK I40E_MASK(0x1, I40E_PCIE_IOSF_RX_HDR_CFG_RME_A_SHIFT)
+#define I40E_PCIE_IOSF_RX_HDR_CFG_RME_B_SHIFT 13
+#define I40E_PCIE_IOSF_RX_HDR_CFG_RME_B_MASK I40E_MASK(0x1, I40E_PCIE_IOSF_RX_HDR_CFG_RME_B_SHIFT)
+#define I40E_PCIE_IOSF_RX_HDR_CFG_RM_A_SHIFT 16
+#define I40E_PCIE_IOSF_RX_HDR_CFG_RM_A_MASK I40E_MASK(0xF, I40E_PCIE_IOSF_RX_HDR_CFG_RM_A_SHIFT)
+#define I40E_PCIE_IOSF_RX_HDR_CFG_RM_B_SHIFT 20
+#define I40E_PCIE_IOSF_RX_HDR_CFG_RM_B_MASK I40E_MASK(0xF, I40E_PCIE_IOSF_RX_HDR_CFG_RM_B_SHIFT)
+
+#define I40E_PCIE_IOSF_RX_HDR_STATUS 0x0009D05C /* Reset: POR */
+#define I40E_PCIE_IOSF_RX_HDR_STATUS_ECC_ERR_SHIFT 0
+#define I40E_PCIE_IOSF_RX_HDR_STATUS_ECC_ERR_MASK I40E_MASK(0x1, I40E_PCIE_IOSF_RX_HDR_STATUS_ECC_ERR_SHIFT)
+#define I40E_PCIE_IOSF_RX_HDR_STATUS_ECC_FIX_SHIFT 1
+#define I40E_PCIE_IOSF_RX_HDR_STATUS_ECC_FIX_MASK I40E_MASK(0x1, I40E_PCIE_IOSF_RX_HDR_STATUS_ECC_FIX_SHIFT)
+#define I40E_PCIE_IOSF_RX_HDR_STATUS_INIT_DONE_SHIFT 2
+#define I40E_PCIE_IOSF_RX_HDR_STATUS_INIT_DONE_MASK I40E_MASK(0x1, I40E_PCIE_IOSF_RX_HDR_STATUS_INIT_DONE_SHIFT)
+#define I40E_PCIE_IOSF_RX_HDR_STATUS_GLOBAL_INIT_DONE_SHIFT 3
+#define I40E_PCIE_IOSF_RX_HDR_STATUS_GLOBAL_INIT_DONE_MASK I40E_MASK(0x1, I40E_PCIE_IOSF_RX_HDR_STATUS_GLOBAL_INIT_DONE_SHIFT)
+
+#define I40E_PCIE_IOSF_TX_DATA_CFG 0x0009D020 /* Reset: POR */
+#define I40E_PCIE_IOSF_TX_DATA_CFG_ECC_EN_SHIFT 0
+#define I40E_PCIE_IOSF_TX_DATA_CFG_ECC_EN_MASK I40E_MASK(0x1, I40E_PCIE_IOSF_TX_DATA_CFG_ECC_EN_SHIFT)
+#define I40E_PCIE_IOSF_TX_DATA_CFG_ECC_INVERT_1_SHIFT 1
+#define I40E_PCIE_IOSF_TX_DATA_CFG_ECC_INVERT_1_MASK I40E_MASK(0x1, I40E_PCIE_IOSF_TX_DATA_CFG_ECC_INVERT_1_SHIFT)
+#define I40E_PCIE_IOSF_TX_DATA_CFG_ECC_INVERT_2_SHIFT 2
+#define I40E_PCIE_IOSF_TX_DATA_CFG_ECC_INVERT_2_MASK I40E_MASK(0x1, I40E_PCIE_IOSF_TX_DATA_CFG_ECC_INVERT_2_SHIFT)
+#define I40E_PCIE_IOSF_TX_DATA_CFG_LS_FORCE_SHIFT 3
+#define I40E_PCIE_IOSF_TX_DATA_CFG_LS_FORCE_MASK I40E_MASK(0x1, I40E_PCIE_IOSF_TX_DATA_CFG_LS_FORCE_SHIFT)
+#define I40E_PCIE_IOSF_TX_DATA_CFG_LS_BYPASS_SHIFT 4
+#define I40E_PCIE_IOSF_TX_DATA_CFG_LS_BYPASS_MASK I40E_MASK(0x1, I40E_PCIE_IOSF_TX_DATA_CFG_LS_BYPASS_SHIFT)
+#define I40E_PCIE_IOSF_TX_DATA_CFG_MASK_INT_SHIFT 5
+#define I40E_PCIE_IOSF_TX_DATA_CFG_MASK_INT_MASK I40E_MASK(0x1, I40E_PCIE_IOSF_TX_DATA_CFG_MASK_INT_SHIFT)
+#define I40E_PCIE_IOSF_TX_DATA_CFG_FIX_CNT_SHIFT 8
+#define I40E_PCIE_IOSF_TX_DATA_CFG_FIX_CNT_MASK I40E_MASK(0x1, I40E_PCIE_IOSF_TX_DATA_CFG_FIX_CNT_SHIFT)
+#define I40E_PCIE_IOSF_TX_DATA_CFG_ERR_CNT_SHIFT 9
+#define I40E_PCIE_IOSF_TX_DATA_CFG_ERR_CNT_MASK I40E_MASK(0x1, I40E_PCIE_IOSF_TX_DATA_CFG_ERR_CNT_SHIFT)
+#define I40E_PCIE_IOSF_TX_DATA_CFG_RME_A_SHIFT 12
+#define I40E_PCIE_IOSF_TX_DATA_CFG_RME_A_MASK I40E_MASK(0x1, I40E_PCIE_IOSF_TX_DATA_CFG_RME_A_SHIFT)
+#define I40E_PCIE_IOSF_TX_DATA_CFG_RME_B_SHIFT 13
+#define I40E_PCIE_IOSF_TX_DATA_CFG_RME_B_MASK I40E_MASK(0x1, I40E_PCIE_IOSF_TX_DATA_CFG_RME_B_SHIFT)
+#define I40E_PCIE_IOSF_TX_DATA_CFG_RM_A_SHIFT 16
+#define I40E_PCIE_IOSF_TX_DATA_CFG_RM_A_MASK I40E_MASK(0xF, I40E_PCIE_IOSF_TX_DATA_CFG_RM_A_SHIFT)
+#define I40E_PCIE_IOSF_TX_DATA_CFG_RM_B_SHIFT 20
+#define I40E_PCIE_IOSF_TX_DATA_CFG_RM_B_MASK I40E_MASK(0xF, I40E_PCIE_IOSF_TX_DATA_CFG_RM_B_SHIFT)
+
+#define I40E_PCIE_IOSF_TX_DATA_STATUS 0x0009D050 /* Reset: POR */
+#define I40E_PCIE_IOSF_TX_DATA_STATUS_ECC_ERR_SHIFT 0
+#define I40E_PCIE_IOSF_TX_DATA_STATUS_ECC_ERR_MASK I40E_MASK(0x1, I40E_PCIE_IOSF_TX_DATA_STATUS_ECC_ERR_SHIFT)
+#define I40E_PCIE_IOSF_TX_DATA_STATUS_ECC_FIX_SHIFT 1
+#define I40E_PCIE_IOSF_TX_DATA_STATUS_ECC_FIX_MASK I40E_MASK(0x1, I40E_PCIE_IOSF_TX_DATA_STATUS_ECC_FIX_SHIFT)
+#define I40E_PCIE_IOSF_TX_DATA_STATUS_INIT_DONE_SHIFT 2
+#define I40E_PCIE_IOSF_TX_DATA_STATUS_INIT_DONE_MASK I40E_MASK(0x1, I40E_PCIE_IOSF_TX_DATA_STATUS_INIT_DONE_SHIFT)
+#define I40E_PCIE_IOSF_TX_DATA_STATUS_GLOBAL_INIT_DONE_SHIFT 3
+#define I40E_PCIE_IOSF_TX_DATA_STATUS_GLOBAL_INIT_DONE_MASK I40E_MASK(0x1, I40E_PCIE_IOSF_TX_DATA_STATUS_GLOBAL_INIT_DONE_SHIFT)
+
+#define I40E_PCIE_IOSF_TX_HDR_CFG 0x0009D034 /* Reset: POR */
+#define I40E_PCIE_IOSF_TX_HDR_CFG_ECC_EN_SHIFT 0
+#define I40E_PCIE_IOSF_TX_HDR_CFG_ECC_EN_MASK I40E_MASK(0x1, I40E_PCIE_IOSF_TX_HDR_CFG_ECC_EN_SHIFT)
+#define I40E_PCIE_IOSF_TX_HDR_CFG_ECC_INVERT_1_SHIFT 1
+#define I40E_PCIE_IOSF_TX_HDR_CFG_ECC_INVERT_1_MASK I40E_MASK(0x1, I40E_PCIE_IOSF_TX_HDR_CFG_ECC_INVERT_1_SHIFT)
+#define I40E_PCIE_IOSF_TX_HDR_CFG_ECC_INVERT_2_SHIFT 2
+#define I40E_PCIE_IOSF_TX_HDR_CFG_ECC_INVERT_2_MASK I40E_MASK(0x1, I40E_PCIE_IOSF_TX_HDR_CFG_ECC_INVERT_2_SHIFT)
+#define I40E_PCIE_IOSF_TX_HDR_CFG_LS_FORCE_SHIFT 3
+#define I40E_PCIE_IOSF_TX_HDR_CFG_LS_FORCE_MASK I40E_MASK(0x1, I40E_PCIE_IOSF_TX_HDR_CFG_LS_FORCE_SHIFT)
+#define I40E_PCIE_IOSF_TX_HDR_CFG_LS_BYPASS_SHIFT 4
+#define I40E_PCIE_IOSF_TX_HDR_CFG_LS_BYPASS_MASK I40E_MASK(0x1, I40E_PCIE_IOSF_TX_HDR_CFG_LS_BYPASS_SHIFT)
+#define I40E_PCIE_IOSF_TX_HDR_CFG_MASK_INT_SHIFT 5
+#define I40E_PCIE_IOSF_TX_HDR_CFG_MASK_INT_MASK I40E_MASK(0x1, I40E_PCIE_IOSF_TX_HDR_CFG_MASK_INT_SHIFT)
+#define I40E_PCIE_IOSF_TX_HDR_CFG_FIX_CNT_SHIFT 8
+#define I40E_PCIE_IOSF_TX_HDR_CFG_FIX_CNT_MASK I40E_MASK(0x1, I40E_PCIE_IOSF_TX_HDR_CFG_FIX_CNT_SHIFT)
+#define I40E_PCIE_IOSF_TX_HDR_CFG_ERR_CNT_SHIFT 9
+#define I40E_PCIE_IOSF_TX_HDR_CFG_ERR_CNT_MASK I40E_MASK(0x1, I40E_PCIE_IOSF_TX_HDR_CFG_ERR_CNT_SHIFT)
+#define I40E_PCIE_IOSF_TX_HDR_CFG_RME_A_SHIFT 12
+#define I40E_PCIE_IOSF_TX_HDR_CFG_RME_A_MASK I40E_MASK(0x1, I40E_PCIE_IOSF_TX_HDR_CFG_RME_A_SHIFT)
+#define I40E_PCIE_IOSF_TX_HDR_CFG_RME_B_SHIFT 13
+#define I40E_PCIE_IOSF_TX_HDR_CFG_RME_B_MASK I40E_MASK(0x1, I40E_PCIE_IOSF_TX_HDR_CFG_RME_B_SHIFT)
+#define I40E_PCIE_IOSF_TX_HDR_CFG_RM_A_SHIFT 16
+#define I40E_PCIE_IOSF_TX_HDR_CFG_RM_A_MASK I40E_MASK(0xF, I40E_PCIE_IOSF_TX_HDR_CFG_RM_A_SHIFT)
+#define I40E_PCIE_IOSF_TX_HDR_CFG_RM_B_SHIFT 20
+#define I40E_PCIE_IOSF_TX_HDR_CFG_RM_B_MASK I40E_MASK(0xF, I40E_PCIE_IOSF_TX_HDR_CFG_RM_B_SHIFT)
+
+#define I40E_PCIE_IOSF_TX_HDR_STATUS 0x0009D03C /* Reset: POR */
+#define I40E_PCIE_IOSF_TX_HDR_STATUS_ECC_ERR_SHIFT 0
+#define I40E_PCIE_IOSF_TX_HDR_STATUS_ECC_ERR_MASK I40E_MASK(0x1, I40E_PCIE_IOSF_TX_HDR_STATUS_ECC_ERR_SHIFT)
+#define I40E_PCIE_IOSF_TX_HDR_STATUS_ECC_FIX_SHIFT 1
+#define I40E_PCIE_IOSF_TX_HDR_STATUS_ECC_FIX_MASK I40E_MASK(0x1, I40E_PCIE_IOSF_TX_HDR_STATUS_ECC_FIX_SHIFT)
+#define I40E_PCIE_IOSF_TX_HDR_STATUS_INIT_DONE_SHIFT 2
+#define I40E_PCIE_IOSF_TX_HDR_STATUS_INIT_DONE_MASK I40E_MASK(0x1, I40E_PCIE_IOSF_TX_HDR_STATUS_INIT_DONE_SHIFT)
+#define I40E_PCIE_IOSF_TX_HDR_STATUS_GLOBAL_INIT_DONE_SHIFT 3
+#define I40E_PCIE_IOSF_TX_HDR_STATUS_GLOBAL_INIT_DONE_MASK I40E_MASK(0x1, I40E_PCIE_IOSF_TX_HDR_STATUS_GLOBAL_INIT_DONE_SHIFT)
+
+#define I40E_PCIE_MCTP_DATA_CFG 0x0009D01C /* Reset: POR */
+#define I40E_PCIE_MCTP_DATA_CFG_ECC_EN_SHIFT 0
+#define I40E_PCIE_MCTP_DATA_CFG_ECC_EN_MASK I40E_MASK(0x1, I40E_PCIE_MCTP_DATA_CFG_ECC_EN_SHIFT)
+#define I40E_PCIE_MCTP_DATA_CFG_ECC_INVERT_1_SHIFT 1
+#define I40E_PCIE_MCTP_DATA_CFG_ECC_INVERT_1_MASK I40E_MASK(0x1, I40E_PCIE_MCTP_DATA_CFG_ECC_INVERT_1_SHIFT)
+#define I40E_PCIE_MCTP_DATA_CFG_ECC_INVERT_2_SHIFT 2
+#define I40E_PCIE_MCTP_DATA_CFG_ECC_INVERT_2_MASK I40E_MASK(0x1, I40E_PCIE_MCTP_DATA_CFG_ECC_INVERT_2_SHIFT)
+#define I40E_PCIE_MCTP_DATA_CFG_LS_FORCE_SHIFT 3
+#define I40E_PCIE_MCTP_DATA_CFG_LS_FORCE_MASK I40E_MASK(0x1, I40E_PCIE_MCTP_DATA_CFG_LS_FORCE_SHIFT)
+#define I40E_PCIE_MCTP_DATA_CFG_LS_BYPASS_SHIFT 4
+#define I40E_PCIE_MCTP_DATA_CFG_LS_BYPASS_MASK I40E_MASK(0x1, I40E_PCIE_MCTP_DATA_CFG_LS_BYPASS_SHIFT)
+#define I40E_PCIE_MCTP_DATA_CFG_MASK_INT_SHIFT 5
+#define I40E_PCIE_MCTP_DATA_CFG_MASK_INT_MASK I40E_MASK(0x1, I40E_PCIE_MCTP_DATA_CFG_MASK_INT_SHIFT)
+#define I40E_PCIE_MCTP_DATA_CFG_FIX_CNT_SHIFT 8
+#define I40E_PCIE_MCTP_DATA_CFG_FIX_CNT_MASK I40E_MASK(0x1, I40E_PCIE_MCTP_DATA_CFG_FIX_CNT_SHIFT)
+#define I40E_PCIE_MCTP_DATA_CFG_ERR_CNT_SHIFT 9
+#define I40E_PCIE_MCTP_DATA_CFG_ERR_CNT_MASK I40E_MASK(0x1, I40E_PCIE_MCTP_DATA_CFG_ERR_CNT_SHIFT)
+#define I40E_PCIE_MCTP_DATA_CFG_RME_SHIFT 12
+#define I40E_PCIE_MCTP_DATA_CFG_RME_MASK I40E_MASK(0x1, I40E_PCIE_MCTP_DATA_CFG_RME_SHIFT)
+#define I40E_PCIE_MCTP_DATA_CFG_RM_SHIFT 16
+#define I40E_PCIE_MCTP_DATA_CFG_RM_MASK I40E_MASK(0xF, I40E_PCIE_MCTP_DATA_CFG_RM_SHIFT)
+
+#define I40E_PCIE_MCTP_DATA_STATUS 0x0009D04C /* Reset: POR */
+#define I40E_PCIE_MCTP_DATA_STATUS_ECC_ERR_SHIFT 0
+#define I40E_PCIE_MCTP_DATA_STATUS_ECC_ERR_MASK I40E_MASK(0x1, I40E_PCIE_MCTP_DATA_STATUS_ECC_ERR_SHIFT)
+#define I40E_PCIE_MCTP_DATA_STATUS_ECC_FIX_SHIFT 1
+#define I40E_PCIE_MCTP_DATA_STATUS_ECC_FIX_MASK I40E_MASK(0x1, I40E_PCIE_MCTP_DATA_STATUS_ECC_FIX_SHIFT)
+#define I40E_PCIE_MCTP_DATA_STATUS_INIT_DONE_SHIFT 2
+#define I40E_PCIE_MCTP_DATA_STATUS_INIT_DONE_MASK I40E_MASK(0x1, I40E_PCIE_MCTP_DATA_STATUS_INIT_DONE_SHIFT)
+#define I40E_PCIE_MCTP_DATA_STATUS_GLOBAL_INIT_DONE_SHIFT 3
+#define I40E_PCIE_MCTP_DATA_STATUS_GLOBAL_INIT_DONE_MASK I40E_MASK(0x1, I40E_PCIE_MCTP_DATA_STATUS_GLOBAL_INIT_DONE_SHIFT)
+
+#define I40E_PCIE_MCTP_HDR_CFG 0x0009D004 /* Reset: POR */
+#define I40E_PCIE_MCTP_HDR_CFG_ECC_EN_SHIFT 0
+#define I40E_PCIE_MCTP_HDR_CFG_ECC_EN_MASK I40E_MASK(0x1, I40E_PCIE_MCTP_HDR_CFG_ECC_EN_SHIFT)
+#define I40E_PCIE_MCTP_HDR_CFG_ECC_INVERT_1_SHIFT 1
+#define I40E_PCIE_MCTP_HDR_CFG_ECC_INVERT_1_MASK I40E_MASK(0x1, I40E_PCIE_MCTP_HDR_CFG_ECC_INVERT_1_SHIFT)
+#define I40E_PCIE_MCTP_HDR_CFG_ECC_INVERT_2_SHIFT 2
+#define I40E_PCIE_MCTP_HDR_CFG_ECC_INVERT_2_MASK I40E_MASK(0x1, I40E_PCIE_MCTP_HDR_CFG_ECC_INVERT_2_SHIFT)
+#define I40E_PCIE_MCTP_HDR_CFG_LS_FORCE_SHIFT 3
+#define I40E_PCIE_MCTP_HDR_CFG_LS_FORCE_MASK I40E_MASK(0x1, I40E_PCIE_MCTP_HDR_CFG_LS_FORCE_SHIFT)
+#define I40E_PCIE_MCTP_HDR_CFG_LS_BYPASS_SHIFT 4
+#define I40E_PCIE_MCTP_HDR_CFG_LS_BYPASS_MASK I40E_MASK(0x1, I40E_PCIE_MCTP_HDR_CFG_LS_BYPASS_SHIFT)
+#define I40E_PCIE_MCTP_HDR_CFG_MASK_INT_SHIFT 5
+#define I40E_PCIE_MCTP_HDR_CFG_MASK_INT_MASK I40E_MASK(0x1, I40E_PCIE_MCTP_HDR_CFG_MASK_INT_SHIFT)
+#define I40E_PCIE_MCTP_HDR_CFG_FIX_CNT_SHIFT 8
+#define I40E_PCIE_MCTP_HDR_CFG_FIX_CNT_MASK I40E_MASK(0x1, I40E_PCIE_MCTP_HDR_CFG_FIX_CNT_SHIFT)
+#define I40E_PCIE_MCTP_HDR_CFG_ERR_CNT_SHIFT 9
+#define I40E_PCIE_MCTP_HDR_CFG_ERR_CNT_MASK I40E_MASK(0x1, I40E_PCIE_MCTP_HDR_CFG_ERR_CNT_SHIFT)
+#define I40E_PCIE_MCTP_HDR_CFG_RME_SHIFT 12
+#define I40E_PCIE_MCTP_HDR_CFG_RME_MASK I40E_MASK(0x1, I40E_PCIE_MCTP_HDR_CFG_RME_SHIFT)
+#define I40E_PCIE_MCTP_HDR_CFG_RM_SHIFT 16
+#define I40E_PCIE_MCTP_HDR_CFG_RM_MASK I40E_MASK(0xF, I40E_PCIE_MCTP_HDR_CFG_RM_SHIFT)
+
+#define I40E_PCIE_MCTP_HDR_STATUS 0x0009D040 /* Reset: POR */
+#define I40E_PCIE_MCTP_HDR_STATUS_ECC_ERR_SHIFT 0
+#define I40E_PCIE_MCTP_HDR_STATUS_ECC_ERR_MASK I40E_MASK(0x1, I40E_PCIE_MCTP_HDR_STATUS_ECC_ERR_SHIFT)
+#define I40E_PCIE_MCTP_HDR_STATUS_ECC_FIX_SHIFT 1
+#define I40E_PCIE_MCTP_HDR_STATUS_ECC_FIX_MASK I40E_MASK(0x1, I40E_PCIE_MCTP_HDR_STATUS_ECC_FIX_SHIFT)
+#define I40E_PCIE_MCTP_HDR_STATUS_INIT_DONE_SHIFT 2
+#define I40E_PCIE_MCTP_HDR_STATUS_INIT_DONE_MASK I40E_MASK(0x1, I40E_PCIE_MCTP_HDR_STATUS_INIT_DONE_SHIFT)
+#define I40E_PCIE_MCTP_HDR_STATUS_GLOBAL_INIT_DONE_SHIFT 3
+#define I40E_PCIE_MCTP_HDR_STATUS_GLOBAL_INIT_DONE_MASK I40E_MASK(0x1, I40E_PCIE_MCTP_HDR_STATUS_GLOBAL_INIT_DONE_SHIFT)
+
+#define I40E_PCIE_MSIX_VEC_CFG 0x0009D030 /* Reset: POR */
+#define I40E_PCIE_MSIX_VEC_CFG_ECC_EN_SHIFT 0
+#define I40E_PCIE_MSIX_VEC_CFG_ECC_EN_MASK I40E_MASK(0x1, I40E_PCIE_MSIX_VEC_CFG_ECC_EN_SHIFT)
+#define I40E_PCIE_MSIX_VEC_CFG_ECC_INVERT_1_SHIFT 1
+#define I40E_PCIE_MSIX_VEC_CFG_ECC_INVERT_1_MASK I40E_MASK(0x1, I40E_PCIE_MSIX_VEC_CFG_ECC_INVERT_1_SHIFT)
+#define I40E_PCIE_MSIX_VEC_CFG_ECC_INVERT_2_SHIFT 2
+#define I40E_PCIE_MSIX_VEC_CFG_ECC_INVERT_2_MASK I40E_MASK(0x1, I40E_PCIE_MSIX_VEC_CFG_ECC_INVERT_2_SHIFT)
+#define I40E_PCIE_MSIX_VEC_CFG_LS_FORCE_SHIFT 3
+#define I40E_PCIE_MSIX_VEC_CFG_LS_FORCE_MASK I40E_MASK(0x1, I40E_PCIE_MSIX_VEC_CFG_LS_FORCE_SHIFT)
+#define I40E_PCIE_MSIX_VEC_CFG_LS_BYPASS_SHIFT 4
+#define I40E_PCIE_MSIX_VEC_CFG_LS_BYPASS_MASK I40E_MASK(0x1, I40E_PCIE_MSIX_VEC_CFG_LS_BYPASS_SHIFT)
+#define I40E_PCIE_MSIX_VEC_CFG_MASK_INT_SHIFT 5
+#define I40E_PCIE_MSIX_VEC_CFG_MASK_INT_MASK I40E_MASK(0x1, I40E_PCIE_MSIX_VEC_CFG_MASK_INT_SHIFT)
+#define I40E_PCIE_MSIX_VEC_CFG_FIX_CNT_SHIFT 8
+#define I40E_PCIE_MSIX_VEC_CFG_FIX_CNT_MASK I40E_MASK(0x1, I40E_PCIE_MSIX_VEC_CFG_FIX_CNT_SHIFT)
+#define I40E_PCIE_MSIX_VEC_CFG_ERR_CNT_SHIFT 9
+#define I40E_PCIE_MSIX_VEC_CFG_ERR_CNT_MASK I40E_MASK(0x1, I40E_PCIE_MSIX_VEC_CFG_ERR_CNT_SHIFT)
+#define I40E_PCIE_MSIX_VEC_CFG_RME_SHIFT 12
+#define I40E_PCIE_MSIX_VEC_CFG_RME_MASK I40E_MASK(0x1, I40E_PCIE_MSIX_VEC_CFG_RME_SHIFT)
+#define I40E_PCIE_MSIX_VEC_CFG_RM_SHIFT 16
+#define I40E_PCIE_MSIX_VEC_CFG_RM_MASK I40E_MASK(0xF, I40E_PCIE_MSIX_VEC_CFG_RM_SHIFT)
+
+#define I40E_PCIE_MSIX_VEC_STATUS 0x0009D060 /* Reset: POR */
+#define I40E_PCIE_MSIX_VEC_STATUS_ECC_ERR_SHIFT 0
+#define I40E_PCIE_MSIX_VEC_STATUS_ECC_ERR_MASK I40E_MASK(0x1, I40E_PCIE_MSIX_VEC_STATUS_ECC_ERR_SHIFT)
+#define I40E_PCIE_MSIX_VEC_STATUS_ECC_FIX_SHIFT 1
+#define I40E_PCIE_MSIX_VEC_STATUS_ECC_FIX_MASK I40E_MASK(0x1, I40E_PCIE_MSIX_VEC_STATUS_ECC_FIX_SHIFT)
+#define I40E_PCIE_MSIX_VEC_STATUS_INIT_DONE_SHIFT 2
+#define I40E_PCIE_MSIX_VEC_STATUS_INIT_DONE_MASK I40E_MASK(0x1, I40E_PCIE_MSIX_VEC_STATUS_INIT_DONE_SHIFT)
+#define I40E_PCIE_MSIX_VEC_STATUS_GLOBAL_INIT_DONE_SHIFT 3
+#define I40E_PCIE_MSIX_VEC_STATUS_GLOBAL_INIT_DONE_MASK I40E_MASK(0x1, I40E_PCIE_MSIX_VEC_STATUS_GLOBAL_INIT_DONE_SHIFT)
+
+#define I40E_PCIE_NPQ_CPL_LAN_DESC_CFG 0x0009D06C /* Reset: POR */
+#define I40E_PCIE_NPQ_CPL_LAN_DESC_CFG_ECC_EN_SHIFT 0
+#define I40E_PCIE_NPQ_CPL_LAN_DESC_CFG_ECC_EN_MASK I40E_MASK(0x1, I40E_PCIE_NPQ_CPL_LAN_DESC_CFG_ECC_EN_SHIFT)
+#define I40E_PCIE_NPQ_CPL_LAN_DESC_CFG_ECC_INVERT_1_SHIFT 1
+#define I40E_PCIE_NPQ_CPL_LAN_DESC_CFG_ECC_INVERT_1_MASK I40E_MASK(0x1, I40E_PCIE_NPQ_CPL_LAN_DESC_CFG_ECC_INVERT_1_SHIFT)
+#define I40E_PCIE_NPQ_CPL_LAN_DESC_CFG_ECC_INVERT_2_SHIFT 2
+#define I40E_PCIE_NPQ_CPL_LAN_DESC_CFG_ECC_INVERT_2_MASK I40E_MASK(0x1, I40E_PCIE_NPQ_CPL_LAN_DESC_CFG_ECC_INVERT_2_SHIFT)
+#define I40E_PCIE_NPQ_CPL_LAN_DESC_CFG_LS_FORCE_SHIFT 3
+#define I40E_PCIE_NPQ_CPL_LAN_DESC_CFG_LS_FORCE_MASK I40E_MASK(0x1, I40E_PCIE_NPQ_CPL_LAN_DESC_CFG_LS_FORCE_SHIFT)
+#define I40E_PCIE_NPQ_CPL_LAN_DESC_CFG_LS_BYPASS_SHIFT 4
+#define I40E_PCIE_NPQ_CPL_LAN_DESC_CFG_LS_BYPASS_MASK I40E_MASK(0x1, I40E_PCIE_NPQ_CPL_LAN_DESC_CFG_LS_BYPASS_SHIFT)
+#define I40E_PCIE_NPQ_CPL_LAN_DESC_CFG_MASK_INT_SHIFT 5
+#define I40E_PCIE_NPQ_CPL_LAN_DESC_CFG_MASK_INT_MASK I40E_MASK(0x1, I40E_PCIE_NPQ_CPL_LAN_DESC_CFG_MASK_INT_SHIFT)
+#define I40E_PCIE_NPQ_CPL_LAN_DESC_CFG_FIX_CNT_SHIFT 8
+#define I40E_PCIE_NPQ_CPL_LAN_DESC_CFG_FIX_CNT_MASK I40E_MASK(0x1, I40E_PCIE_NPQ_CPL_LAN_DESC_CFG_FIX_CNT_SHIFT)
+#define I40E_PCIE_NPQ_CPL_LAN_DESC_CFG_ERR_CNT_SHIFT 9
+#define I40E_PCIE_NPQ_CPL_LAN_DESC_CFG_ERR_CNT_MASK I40E_MASK(0x1, I40E_PCIE_NPQ_CPL_LAN_DESC_CFG_ERR_CNT_SHIFT)
+#define I40E_PCIE_NPQ_CPL_LAN_DESC_CFG_RME_SHIFT 12
+#define I40E_PCIE_NPQ_CPL_LAN_DESC_CFG_RME_MASK I40E_MASK(0x1, I40E_PCIE_NPQ_CPL_LAN_DESC_CFG_RME_SHIFT)
+#define I40E_PCIE_NPQ_CPL_LAN_DESC_CFG_RM_SHIFT 16
+#define I40E_PCIE_NPQ_CPL_LAN_DESC_CFG_RM_MASK I40E_MASK(0xF, I40E_PCIE_NPQ_CPL_LAN_DESC_CFG_RM_SHIFT)
+
+#define I40E_PCIE_NPQ_CPL_LAN_DESC_STATUS 0x0009D074 /* Reset: POR */
+#define I40E_PCIE_NPQ_CPL_LAN_DESC_STATUS_ECC_ERR_SHIFT 0
+#define I40E_PCIE_NPQ_CPL_LAN_DESC_STATUS_ECC_ERR_MASK I40E_MASK(0x1, I40E_PCIE_NPQ_CPL_LAN_DESC_STATUS_ECC_ERR_SHIFT)
+#define I40E_PCIE_NPQ_CPL_LAN_DESC_STATUS_ECC_FIX_SHIFT 1
+#define I40E_PCIE_NPQ_CPL_LAN_DESC_STATUS_ECC_FIX_MASK I40E_MASK(0x1, I40E_PCIE_NPQ_CPL_LAN_DESC_STATUS_ECC_FIX_SHIFT)
+#define I40E_PCIE_NPQ_CPL_LAN_DESC_STATUS_INIT_DONE_SHIFT 2
+#define I40E_PCIE_NPQ_CPL_LAN_DESC_STATUS_INIT_DONE_MASK I40E_MASK(0x1, I40E_PCIE_NPQ_CPL_LAN_DESC_STATUS_INIT_DONE_SHIFT)
+#define I40E_PCIE_NPQ_CPL_LAN_DESC_STATUS_GLOBAL_INIT_DONE_SHIFT 3
+#define I40E_PCIE_NPQ_CPL_LAN_DESC_STATUS_GLOBAL_INIT_DONE_MASK I40E_MASK(0x1, I40E_PCIE_NPQ_CPL_LAN_DESC_STATUS_GLOBAL_INIT_DONE_SHIFT)
+
+#define I40E_PCIE_NPQ_CPL_MNG_CFG 0x0009D008 /* Reset: POR */
+#define I40E_PCIE_NPQ_CPL_MNG_CFG_ECC_EN_SHIFT 0
+#define I40E_PCIE_NPQ_CPL_MNG_CFG_ECC_EN_MASK I40E_MASK(0x1, I40E_PCIE_NPQ_CPL_MNG_CFG_ECC_EN_SHIFT)
+#define I40E_PCIE_NPQ_CPL_MNG_CFG_ECC_INVERT_1_SHIFT 1
+#define I40E_PCIE_NPQ_CPL_MNG_CFG_ECC_INVERT_1_MASK I40E_MASK(0x1, I40E_PCIE_NPQ_CPL_MNG_CFG_ECC_INVERT_1_SHIFT)
+#define I40E_PCIE_NPQ_CPL_MNG_CFG_ECC_INVERT_2_SHIFT 2
+#define I40E_PCIE_NPQ_CPL_MNG_CFG_ECC_INVERT_2_MASK I40E_MASK(0x1, I40E_PCIE_NPQ_CPL_MNG_CFG_ECC_INVERT_2_SHIFT)
+#define I40E_PCIE_NPQ_CPL_MNG_CFG_LS_FORCE_SHIFT 3
+#define I40E_PCIE_NPQ_CPL_MNG_CFG_LS_FORCE_MASK I40E_MASK(0x1, I40E_PCIE_NPQ_CPL_MNG_CFG_LS_FORCE_SHIFT)
+#define I40E_PCIE_NPQ_CPL_MNG_CFG_LS_BYPASS_SHIFT 4
+#define I40E_PCIE_NPQ_CPL_MNG_CFG_LS_BYPASS_MASK I40E_MASK(0x1, I40E_PCIE_NPQ_CPL_MNG_CFG_LS_BYPASS_SHIFT)
+#define I40E_PCIE_NPQ_CPL_MNG_CFG_MASK_INT_SHIFT 5
+#define I40E_PCIE_NPQ_CPL_MNG_CFG_MASK_INT_MASK I40E_MASK(0x1, I40E_PCIE_NPQ_CPL_MNG_CFG_MASK_INT_SHIFT)
+#define I40E_PCIE_NPQ_CPL_MNG_CFG_FIX_CNT_SHIFT 8
+#define I40E_PCIE_NPQ_CPL_MNG_CFG_FIX_CNT_MASK I40E_MASK(0x1, I40E_PCIE_NPQ_CPL_MNG_CFG_FIX_CNT_SHIFT)
+#define I40E_PCIE_NPQ_CPL_MNG_CFG_ERR_CNT_SHIFT 9
+#define I40E_PCIE_NPQ_CPL_MNG_CFG_ERR_CNT_MASK I40E_MASK(0x1, I40E_PCIE_NPQ_CPL_MNG_CFG_ERR_CNT_SHIFT)
+#define I40E_PCIE_NPQ_CPL_MNG_CFG_RME_SHIFT 12
+#define I40E_PCIE_NPQ_CPL_MNG_CFG_RME_MASK I40E_MASK(0x1, I40E_PCIE_NPQ_CPL_MNG_CFG_RME_SHIFT)
+#define I40E_PCIE_NPQ_CPL_MNG_CFG_RM_SHIFT 16
+#define I40E_PCIE_NPQ_CPL_MNG_CFG_RM_MASK I40E_MASK(0xF, I40E_PCIE_NPQ_CPL_MNG_CFG_RM_SHIFT)
+
+#define I40E_PCIE_NPQ_CPL_MNG_STATUS 0x0009D054 /* Reset: POR */
+#define I40E_PCIE_NPQ_CPL_MNG_STATUS_ECC_ERR_SHIFT 0
+#define I40E_PCIE_NPQ_CPL_MNG_STATUS_ECC_ERR_MASK I40E_MASK(0x1, I40E_PCIE_NPQ_CPL_MNG_STATUS_ECC_ERR_SHIFT)
+#define I40E_PCIE_NPQ_CPL_MNG_STATUS_ECC_FIX_SHIFT 1
+#define I40E_PCIE_NPQ_CPL_MNG_STATUS_ECC_FIX_MASK I40E_MASK(0x1, I40E_PCIE_NPQ_CPL_MNG_STATUS_ECC_FIX_SHIFT)
+#define I40E_PCIE_NPQ_CPL_MNG_STATUS_INIT_DONE_SHIFT 2
+#define I40E_PCIE_NPQ_CPL_MNG_STATUS_INIT_DONE_MASK I40E_MASK(0x1, I40E_PCIE_NPQ_CPL_MNG_STATUS_INIT_DONE_SHIFT)
+#define I40E_PCIE_NPQ_CPL_MNG_STATUS_GLOBAL_INIT_DONE_SHIFT 3
+#define I40E_PCIE_NPQ_CPL_MNG_STATUS_GLOBAL_INIT_DONE_MASK I40E_MASK(0x1, I40E_PCIE_NPQ_CPL_MNG_STATUS_GLOBAL_INIT_DONE_SHIFT)
+
+#define I40E_PCIE_NPQ_CPL_PE_DESC_CFG 0x0009D078 /* Reset: POR */
+#define I40E_PCIE_NPQ_CPL_PE_DESC_CFG_ECC_EN_SHIFT 0
+#define I40E_PCIE_NPQ_CPL_PE_DESC_CFG_ECC_EN_MASK I40E_MASK(0x1, I40E_PCIE_NPQ_CPL_PE_DESC_CFG_ECC_EN_SHIFT)
+#define I40E_PCIE_NPQ_CPL_PE_DESC_CFG_ECC_INVERT_1_SHIFT 1
+#define I40E_PCIE_NPQ_CPL_PE_DESC_CFG_ECC_INVERT_1_MASK I40E_MASK(0x1, I40E_PCIE_NPQ_CPL_PE_DESC_CFG_ECC_INVERT_1_SHIFT)
+#define I40E_PCIE_NPQ_CPL_PE_DESC_CFG_ECC_INVERT_2_SHIFT 2
+#define I40E_PCIE_NPQ_CPL_PE_DESC_CFG_ECC_INVERT_2_MASK I40E_MASK(0x1, I40E_PCIE_NPQ_CPL_PE_DESC_CFG_ECC_INVERT_2_SHIFT)
+#define I40E_PCIE_NPQ_CPL_PE_DESC_CFG_LS_FORCE_SHIFT 3
+#define I40E_PCIE_NPQ_CPL_PE_DESC_CFG_LS_FORCE_MASK I40E_MASK(0x1, I40E_PCIE_NPQ_CPL_PE_DESC_CFG_LS_FORCE_SHIFT)
+#define I40E_PCIE_NPQ_CPL_PE_DESC_CFG_LS_BYPASS_SHIFT 4
+#define I40E_PCIE_NPQ_CPL_PE_DESC_CFG_LS_BYPASS_MASK I40E_MASK(0x1, I40E_PCIE_NPQ_CPL_PE_DESC_CFG_LS_BYPASS_SHIFT)
+#define I40E_PCIE_NPQ_CPL_PE_DESC_CFG_MASK_INT_SHIFT 5
+#define I40E_PCIE_NPQ_CPL_PE_DESC_CFG_MASK_INT_MASK I40E_MASK(0x1, I40E_PCIE_NPQ_CPL_PE_DESC_CFG_MASK_INT_SHIFT)
+#define I40E_PCIE_NPQ_CPL_PE_DESC_CFG_FIX_CNT_SHIFT 8
+#define I40E_PCIE_NPQ_CPL_PE_DESC_CFG_FIX_CNT_MASK I40E_MASK(0x1, I40E_PCIE_NPQ_CPL_PE_DESC_CFG_FIX_CNT_SHIFT)
+#define I40E_PCIE_NPQ_CPL_PE_DESC_CFG_ERR_CNT_SHIFT 9
+#define I40E_PCIE_NPQ_CPL_PE_DESC_CFG_ERR_CNT_MASK I40E_MASK(0x1, I40E_PCIE_NPQ_CPL_PE_DESC_CFG_ERR_CNT_SHIFT)
+#define I40E_PCIE_NPQ_CPL_PE_DESC_CFG_RME_SHIFT 12
+#define I40E_PCIE_NPQ_CPL_PE_DESC_CFG_RME_MASK I40E_MASK(0x1, I40E_PCIE_NPQ_CPL_PE_DESC_CFG_RME_SHIFT)
+#define I40E_PCIE_NPQ_CPL_PE_DESC_CFG_RM_SHIFT 16
+#define I40E_PCIE_NPQ_CPL_PE_DESC_CFG_RM_MASK I40E_MASK(0xF, I40E_PCIE_NPQ_CPL_PE_DESC_CFG_RM_SHIFT)
+
+#define I40E_PCIE_NPQ_CPL_PE_DESC_STATUS 0x0009D070 /* Reset: POR */
+#define I40E_PCIE_NPQ_CPL_PE_DESC_STATUS_ECC_ERR_SHIFT 0
+#define I40E_PCIE_NPQ_CPL_PE_DESC_STATUS_ECC_ERR_MASK I40E_MASK(0x1, I40E_PCIE_NPQ_CPL_PE_DESC_STATUS_ECC_ERR_SHIFT)
+#define I40E_PCIE_NPQ_CPL_PE_DESC_STATUS_ECC_FIX_SHIFT 1
+#define I40E_PCIE_NPQ_CPL_PE_DESC_STATUS_ECC_FIX_MASK I40E_MASK(0x1, I40E_PCIE_NPQ_CPL_PE_DESC_STATUS_ECC_FIX_SHIFT)
+#define I40E_PCIE_NPQ_CPL_PE_DESC_STATUS_INIT_DONE_SHIFT 2
+#define I40E_PCIE_NPQ_CPL_PE_DESC_STATUS_INIT_DONE_MASK I40E_MASK(0x1, I40E_PCIE_NPQ_CPL_PE_DESC_STATUS_INIT_DONE_SHIFT)
+#define I40E_PCIE_NPQ_CPL_PE_DESC_STATUS_GLOBAL_INIT_DONE_SHIFT 3
+#define I40E_PCIE_NPQ_CPL_PE_DESC_STATUS_GLOBAL_INIT_DONE_MASK I40E_MASK(0x1, I40E_PCIE_NPQ_CPL_PE_DESC_STATUS_GLOBAL_INIT_DONE_SHIFT)
+
+#define I40E_PCIE_NPQ_CPL_PMAT_CFG 0x0009D000 /* Reset: POR */
+#define I40E_PCIE_NPQ_CPL_PMAT_CFG_ECC_EN_SHIFT 0
+#define I40E_PCIE_NPQ_CPL_PMAT_CFG_ECC_EN_MASK I40E_MASK(0x1, I40E_PCIE_NPQ_CPL_PMAT_CFG_ECC_EN_SHIFT)
+#define I40E_PCIE_NPQ_CPL_PMAT_CFG_ECC_INVERT_1_SHIFT 1
+#define I40E_PCIE_NPQ_CPL_PMAT_CFG_ECC_INVERT_1_MASK I40E_MASK(0x1, I40E_PCIE_NPQ_CPL_PMAT_CFG_ECC_INVERT_1_SHIFT)
+#define I40E_PCIE_NPQ_CPL_PMAT_CFG_ECC_INVERT_2_SHIFT 2
+#define I40E_PCIE_NPQ_CPL_PMAT_CFG_ECC_INVERT_2_MASK I40E_MASK(0x1, I40E_PCIE_NPQ_CPL_PMAT_CFG_ECC_INVERT_2_SHIFT)
+#define I40E_PCIE_NPQ_CPL_PMAT_CFG_LS_FORCE_SHIFT 3
+#define I40E_PCIE_NPQ_CPL_PMAT_CFG_LS_FORCE_MASK I40E_MASK(0x1, I40E_PCIE_NPQ_CPL_PMAT_CFG_LS_FORCE_SHIFT)
+#define I40E_PCIE_NPQ_CPL_PMAT_CFG_LS_BYPASS_SHIFT 4
+#define I40E_PCIE_NPQ_CPL_PMAT_CFG_LS_BYPASS_MASK I40E_MASK(0x1, I40E_PCIE_NPQ_CPL_PMAT_CFG_LS_BYPASS_SHIFT)
+#define I40E_PCIE_NPQ_CPL_PMAT_CFG_MASK_INT_SHIFT 5
+#define I40E_PCIE_NPQ_CPL_PMAT_CFG_MASK_INT_MASK I40E_MASK(0x1, I40E_PCIE_NPQ_CPL_PMAT_CFG_MASK_INT_SHIFT)
+#define I40E_PCIE_NPQ_CPL_PMAT_CFG_FIX_CNT_SHIFT 8
+#define I40E_PCIE_NPQ_CPL_PMAT_CFG_FIX_CNT_MASK I40E_MASK(0x1, I40E_PCIE_NPQ_CPL_PMAT_CFG_FIX_CNT_SHIFT)
+#define I40E_PCIE_NPQ_CPL_PMAT_CFG_ERR_CNT_SHIFT 9
+#define I40E_PCIE_NPQ_CPL_PMAT_CFG_ERR_CNT_MASK I40E_MASK(0x1, I40E_PCIE_NPQ_CPL_PMAT_CFG_ERR_CNT_SHIFT)
+#define I40E_PCIE_NPQ_CPL_PMAT_CFG_RME_SHIFT 12
+#define I40E_PCIE_NPQ_CPL_PMAT_CFG_RME_MASK I40E_MASK(0x1, I40E_PCIE_NPQ_CPL_PMAT_CFG_RME_SHIFT)
+#define I40E_PCIE_NPQ_CPL_PMAT_CFG_RM_SHIFT 16
+#define I40E_PCIE_NPQ_CPL_PMAT_CFG_RM_MASK I40E_MASK(0xF, I40E_PCIE_NPQ_CPL_PMAT_CFG_RM_SHIFT)
+
+#define I40E_PCIE_NPQ_CPL_PMAT_STATUS 0x0009D048 /* Reset: POR */
+#define I40E_PCIE_NPQ_CPL_PMAT_STATUS_ECC_ERR_SHIFT 0
+#define I40E_PCIE_NPQ_CPL_PMAT_STATUS_ECC_ERR_MASK I40E_MASK(0x1, I40E_PCIE_NPQ_CPL_PMAT_STATUS_ECC_ERR_SHIFT)
+#define I40E_PCIE_NPQ_CPL_PMAT_STATUS_ECC_FIX_SHIFT 1
+#define I40E_PCIE_NPQ_CPL_PMAT_STATUS_ECC_FIX_MASK I40E_MASK(0x1, I40E_PCIE_NPQ_CPL_PMAT_STATUS_ECC_FIX_SHIFT)
+#define I40E_PCIE_NPQ_CPL_PMAT_STATUS_INIT_DONE_SHIFT 2
+#define I40E_PCIE_NPQ_CPL_PMAT_STATUS_INIT_DONE_MASK I40E_MASK(0x1, I40E_PCIE_NPQ_CPL_PMAT_STATUS_INIT_DONE_SHIFT)
+#define I40E_PCIE_NPQ_CPL_PMAT_STATUS_GLOBAL_INIT_DONE_SHIFT 3
+#define I40E_PCIE_NPQ_CPL_PMAT_STATUS_GLOBAL_INIT_DONE_MASK I40E_MASK(0x1, I40E_PCIE_NPQ_CPL_PMAT_STATUS_GLOBAL_INIT_DONE_SHIFT)
+
+#define I40E_PCIE_NPQ_CPL_TDPU_CFG 0x0009D014 /* Reset: POR */
+#define I40E_PCIE_NPQ_CPL_TDPU_CFG_ECC_EN_SHIFT 0
+#define I40E_PCIE_NPQ_CPL_TDPU_CFG_ECC_EN_MASK I40E_MASK(0x1, I40E_PCIE_NPQ_CPL_TDPU_CFG_ECC_EN_SHIFT)
+#define I40E_PCIE_NPQ_CPL_TDPU_CFG_ECC_INVERT_1_SHIFT 1
+#define I40E_PCIE_NPQ_CPL_TDPU_CFG_ECC_INVERT_1_MASK I40E_MASK(0x1, I40E_PCIE_NPQ_CPL_TDPU_CFG_ECC_INVERT_1_SHIFT)
+#define I40E_PCIE_NPQ_CPL_TDPU_CFG_ECC_INVERT_2_SHIFT 2
+#define I40E_PCIE_NPQ_CPL_TDPU_CFG_ECC_INVERT_2_MASK I40E_MASK(0x1, I40E_PCIE_NPQ_CPL_TDPU_CFG_ECC_INVERT_2_SHIFT)
+#define I40E_PCIE_NPQ_CPL_TDPU_CFG_LS_FORCE_SHIFT 3
+#define I40E_PCIE_NPQ_CPL_TDPU_CFG_LS_FORCE_MASK I40E_MASK(0x1, I40E_PCIE_NPQ_CPL_TDPU_CFG_LS_FORCE_SHIFT)
+#define I40E_PCIE_NPQ_CPL_TDPU_CFG_LS_BYPASS_SHIFT 4
+#define I40E_PCIE_NPQ_CPL_TDPU_CFG_LS_BYPASS_MASK I40E_MASK(0x1, I40E_PCIE_NPQ_CPL_TDPU_CFG_LS_BYPASS_SHIFT)
+#define I40E_PCIE_NPQ_CPL_TDPU_CFG_MASK_INT_SHIFT 5
+#define I40E_PCIE_NPQ_CPL_TDPU_CFG_MASK_INT_MASK I40E_MASK(0x1, I40E_PCIE_NPQ_CPL_TDPU_CFG_MASK_INT_SHIFT)
+#define I40E_PCIE_NPQ_CPL_TDPU_CFG_FIX_CNT_SHIFT 8
+#define I40E_PCIE_NPQ_CPL_TDPU_CFG_FIX_CNT_MASK I40E_MASK(0x1, I40E_PCIE_NPQ_CPL_TDPU_CFG_FIX_CNT_SHIFT)
+#define I40E_PCIE_NPQ_CPL_TDPU_CFG_ERR_CNT_SHIFT 9
+#define I40E_PCIE_NPQ_CPL_TDPU_CFG_ERR_CNT_MASK I40E_MASK(0x1, I40E_PCIE_NPQ_CPL_TDPU_CFG_ERR_CNT_SHIFT)
+#define I40E_PCIE_NPQ_CPL_TDPU_CFG_RME_SHIFT 12
+#define I40E_PCIE_NPQ_CPL_TDPU_CFG_RME_MASK I40E_MASK(0x1, I40E_PCIE_NPQ_CPL_TDPU_CFG_RME_SHIFT)
+#define I40E_PCIE_NPQ_CPL_TDPU_CFG_RM_SHIFT 16
+#define I40E_PCIE_NPQ_CPL_TDPU_CFG_RM_MASK I40E_MASK(0xF, I40E_PCIE_NPQ_CPL_TDPU_CFG_RM_SHIFT)
+
+#define I40E_PCIE_NPQ_CPL_TDPU_STATUS 0x0009D064 /* Reset: POR */
+#define I40E_PCIE_NPQ_CPL_TDPU_STATUS_ECC_ERR_SHIFT 0
+#define I40E_PCIE_NPQ_CPL_TDPU_STATUS_ECC_ERR_MASK I40E_MASK(0x1, I40E_PCIE_NPQ_CPL_TDPU_STATUS_ECC_ERR_SHIFT)
+#define I40E_PCIE_NPQ_CPL_TDPU_STATUS_ECC_FIX_SHIFT 1
+#define I40E_PCIE_NPQ_CPL_TDPU_STATUS_ECC_FIX_MASK I40E_MASK(0x1, I40E_PCIE_NPQ_CPL_TDPU_STATUS_ECC_FIX_SHIFT)
+#define I40E_PCIE_NPQ_CPL_TDPU_STATUS_INIT_DONE_SHIFT 2
+#define I40E_PCIE_NPQ_CPL_TDPU_STATUS_INIT_DONE_MASK I40E_MASK(0x1, I40E_PCIE_NPQ_CPL_TDPU_STATUS_INIT_DONE_SHIFT)
+#define I40E_PCIE_NPQ_CPL_TDPU_STATUS_GLOBAL_INIT_DONE_SHIFT 3
+#define I40E_PCIE_NPQ_CPL_TDPU_STATUS_GLOBAL_INIT_DONE_MASK I40E_MASK(0x1, I40E_PCIE_NPQ_CPL_TDPU_STATUS_GLOBAL_INIT_DONE_SHIFT)
+
+#define I40E_PCIE_PQ_C125_CFG 0x0009D018 /* Reset: POR */
+#define I40E_PCIE_PQ_C125_CFG_ECC_EN_SHIFT 0
+#define I40E_PCIE_PQ_C125_CFG_ECC_EN_MASK I40E_MASK(0x1, I40E_PCIE_PQ_C125_CFG_ECC_EN_SHIFT)
+#define I40E_PCIE_PQ_C125_CFG_ECC_INVERT_1_SHIFT 1
+#define I40E_PCIE_PQ_C125_CFG_ECC_INVERT_1_MASK I40E_MASK(0x1, I40E_PCIE_PQ_C125_CFG_ECC_INVERT_1_SHIFT)
+#define I40E_PCIE_PQ_C125_CFG_ECC_INVERT_2_SHIFT 2
+#define I40E_PCIE_PQ_C125_CFG_ECC_INVERT_2_MASK I40E_MASK(0x1, I40E_PCIE_PQ_C125_CFG_ECC_INVERT_2_SHIFT)
+#define I40E_PCIE_PQ_C125_CFG_LS_FORCE_SHIFT 3
+#define I40E_PCIE_PQ_C125_CFG_LS_FORCE_MASK I40E_MASK(0x1, I40E_PCIE_PQ_C125_CFG_LS_FORCE_SHIFT)
+#define I40E_PCIE_PQ_C125_CFG_LS_BYPASS_SHIFT 4
+#define I40E_PCIE_PQ_C125_CFG_LS_BYPASS_MASK I40E_MASK(0x1, I40E_PCIE_PQ_C125_CFG_LS_BYPASS_SHIFT)
+#define I40E_PCIE_PQ_C125_CFG_MASK_INT_SHIFT 5
+#define I40E_PCIE_PQ_C125_CFG_MASK_INT_MASK I40E_MASK(0x1, I40E_PCIE_PQ_C125_CFG_MASK_INT_SHIFT)
+#define I40E_PCIE_PQ_C125_CFG_FIX_CNT_SHIFT 8
+#define I40E_PCIE_PQ_C125_CFG_FIX_CNT_MASK I40E_MASK(0x1, I40E_PCIE_PQ_C125_CFG_FIX_CNT_SHIFT)
+#define I40E_PCIE_PQ_C125_CFG_ERR_CNT_SHIFT 9
+#define I40E_PCIE_PQ_C125_CFG_ERR_CNT_MASK I40E_MASK(0x1, I40E_PCIE_PQ_C125_CFG_ERR_CNT_SHIFT)
+#define I40E_PCIE_PQ_C125_CFG_RME_SHIFT 12
+#define I40E_PCIE_PQ_C125_CFG_RME_MASK I40E_MASK(0x1, I40E_PCIE_PQ_C125_CFG_RME_SHIFT)
+#define I40E_PCIE_PQ_C125_CFG_RM_SHIFT 16
+#define I40E_PCIE_PQ_C125_CFG_RM_MASK I40E_MASK(0xF, I40E_PCIE_PQ_C125_CFG_RM_SHIFT)
+
+#define I40E_PCIE_PQ_C125_STATUS 0x0009D038 /* Reset: POR */
+#define I40E_PCIE_PQ_C125_STATUS_ECC_ERR_SHIFT 0
+#define I40E_PCIE_PQ_C125_STATUS_ECC_ERR_MASK I40E_MASK(0x1, I40E_PCIE_PQ_C125_STATUS_ECC_ERR_SHIFT)
+#define I40E_PCIE_PQ_C125_STATUS_ECC_FIX_SHIFT 1
+#define I40E_PCIE_PQ_C125_STATUS_ECC_FIX_MASK I40E_MASK(0x1, I40E_PCIE_PQ_C125_STATUS_ECC_FIX_SHIFT)
+#define I40E_PCIE_PQ_C125_STATUS_INIT_DONE_SHIFT 2
+#define I40E_PCIE_PQ_C125_STATUS_INIT_DONE_MASK I40E_MASK(0x1, I40E_PCIE_PQ_C125_STATUS_INIT_DONE_SHIFT)
+#define I40E_PCIE_PQ_C125_STATUS_GLOBAL_INIT_DONE_SHIFT 3
+#define I40E_PCIE_PQ_C125_STATUS_GLOBAL_INIT_DONE_MASK I40E_MASK(0x1, I40E_PCIE_PQ_C125_STATUS_GLOBAL_INIT_DONE_SHIFT)
+
+#define I40E_PCIE_PQ_C400_CFG 0x0009D024 /* Reset: POR */
+#define I40E_PCIE_PQ_C400_CFG_ECC_EN_SHIFT 0
+#define I40E_PCIE_PQ_C400_CFG_ECC_EN_MASK I40E_MASK(0x1, I40E_PCIE_PQ_C400_CFG_ECC_EN_SHIFT)
+#define I40E_PCIE_PQ_C400_CFG_ECC_INVERT_1_SHIFT 1
+#define I40E_PCIE_PQ_C400_CFG_ECC_INVERT_1_MASK I40E_MASK(0x1, I40E_PCIE_PQ_C400_CFG_ECC_INVERT_1_SHIFT)
+#define I40E_PCIE_PQ_C400_CFG_ECC_INVERT_2_SHIFT 2
+#define I40E_PCIE_PQ_C400_CFG_ECC_INVERT_2_MASK I40E_MASK(0x1, I40E_PCIE_PQ_C400_CFG_ECC_INVERT_2_SHIFT)
+#define I40E_PCIE_PQ_C400_CFG_LS_FORCE_SHIFT 3
+#define I40E_PCIE_PQ_C400_CFG_LS_FORCE_MASK I40E_MASK(0x1, I40E_PCIE_PQ_C400_CFG_LS_FORCE_SHIFT)
+#define I40E_PCIE_PQ_C400_CFG_LS_BYPASS_SHIFT 4
+#define I40E_PCIE_PQ_C400_CFG_LS_BYPASS_MASK I40E_MASK(0x1, I40E_PCIE_PQ_C400_CFG_LS_BYPASS_SHIFT)
+#define I40E_PCIE_PQ_C400_CFG_MASK_INT_SHIFT 5
+#define I40E_PCIE_PQ_C400_CFG_MASK_INT_MASK I40E_MASK(0x1, I40E_PCIE_PQ_C400_CFG_MASK_INT_SHIFT)
+#define I40E_PCIE_PQ_C400_CFG_FIX_CNT_SHIFT 8
+#define I40E_PCIE_PQ_C400_CFG_FIX_CNT_MASK I40E_MASK(0x1, I40E_PCIE_PQ_C400_CFG_FIX_CNT_SHIFT)
+#define I40E_PCIE_PQ_C400_CFG_ERR_CNT_SHIFT 9
+#define I40E_PCIE_PQ_C400_CFG_ERR_CNT_MASK I40E_MASK(0x1, I40E_PCIE_PQ_C400_CFG_ERR_CNT_SHIFT)
+#define I40E_PCIE_PQ_C400_CFG_RME_SHIFT 12
+#define I40E_PCIE_PQ_C400_CFG_RME_MASK I40E_MASK(0x1, I40E_PCIE_PQ_C400_CFG_RME_SHIFT)
+#define I40E_PCIE_PQ_C400_CFG_RM_SHIFT 16
+#define I40E_PCIE_PQ_C400_CFG_RM_MASK I40E_MASK(0xF, I40E_PCIE_PQ_C400_CFG_RM_SHIFT)
+
+#define I40E_PCIE_PQ_C400_STATUS 0x0009D044 /* Reset: POR */
+#define I40E_PCIE_PQ_C400_STATUS_ECC_ERR_SHIFT 0
+#define I40E_PCIE_PQ_C400_STATUS_ECC_ERR_MASK I40E_MASK(0x1, I40E_PCIE_PQ_C400_STATUS_ECC_ERR_SHIFT)
+#define I40E_PCIE_PQ_C400_STATUS_ECC_FIX_SHIFT 1
+#define I40E_PCIE_PQ_C400_STATUS_ECC_FIX_MASK I40E_MASK(0x1, I40E_PCIE_PQ_C400_STATUS_ECC_FIX_SHIFT)
+#define I40E_PCIE_PQ_C400_STATUS_INIT_DONE_SHIFT 2
+#define I40E_PCIE_PQ_C400_STATUS_INIT_DONE_MASK I40E_MASK(0x1, I40E_PCIE_PQ_C400_STATUS_INIT_DONE_SHIFT)
+#define I40E_PCIE_PQ_C400_STATUS_GLOBAL_INIT_DONE_SHIFT 3
+#define I40E_PCIE_PQ_C400_STATUS_GLOBAL_INIT_DONE_MASK I40E_MASK(0x1, I40E_PCIE_PQ_C400_STATUS_GLOBAL_INIT_DONE_SHIFT)
+
+#define I40E_PCIE_RETRY_BUF_CFG 0x0009D02C /* Reset: POR */
+#define I40E_PCIE_RETRY_BUF_CFG_ECC_EN_SHIFT 0
+#define I40E_PCIE_RETRY_BUF_CFG_ECC_EN_MASK I40E_MASK(0x1, I40E_PCIE_RETRY_BUF_CFG_ECC_EN_SHIFT)
+#define I40E_PCIE_RETRY_BUF_CFG_ECC_INVERT_1_SHIFT 1
+#define I40E_PCIE_RETRY_BUF_CFG_ECC_INVERT_1_MASK I40E_MASK(0x1, I40E_PCIE_RETRY_BUF_CFG_ECC_INVERT_1_SHIFT)
+#define I40E_PCIE_RETRY_BUF_CFG_ECC_INVERT_2_SHIFT 2
+#define I40E_PCIE_RETRY_BUF_CFG_ECC_INVERT_2_MASK I40E_MASK(0x1, I40E_PCIE_RETRY_BUF_CFG_ECC_INVERT_2_SHIFT)
+#define I40E_PCIE_RETRY_BUF_CFG_LS_FORCE_SHIFT 3
+#define I40E_PCIE_RETRY_BUF_CFG_LS_FORCE_MASK I40E_MASK(0x1, I40E_PCIE_RETRY_BUF_CFG_LS_FORCE_SHIFT)
+#define I40E_PCIE_RETRY_BUF_CFG_LS_BYPASS_SHIFT 4
+#define I40E_PCIE_RETRY_BUF_CFG_LS_BYPASS_MASK I40E_MASK(0x1, I40E_PCIE_RETRY_BUF_CFG_LS_BYPASS_SHIFT)
+#define I40E_PCIE_RETRY_BUF_CFG_MASK_INT_SHIFT 5
+#define I40E_PCIE_RETRY_BUF_CFG_MASK_INT_MASK I40E_MASK(0x1, I40E_PCIE_RETRY_BUF_CFG_MASK_INT_SHIFT)
+#define I40E_PCIE_RETRY_BUF_CFG_FIX_CNT_SHIFT 8
+#define I40E_PCIE_RETRY_BUF_CFG_FIX_CNT_MASK I40E_MASK(0x1, I40E_PCIE_RETRY_BUF_CFG_FIX_CNT_SHIFT)
+#define I40E_PCIE_RETRY_BUF_CFG_ERR_CNT_SHIFT 9
+#define I40E_PCIE_RETRY_BUF_CFG_ERR_CNT_MASK I40E_MASK(0x1, I40E_PCIE_RETRY_BUF_CFG_ERR_CNT_SHIFT)
+#define I40E_PCIE_RETRY_BUF_CFG_RME_SHIFT 12
+#define I40E_PCIE_RETRY_BUF_CFG_RME_MASK I40E_MASK(0x1, I40E_PCIE_RETRY_BUF_CFG_RME_SHIFT)
+#define I40E_PCIE_RETRY_BUF_CFG_RM_SHIFT 16
+#define I40E_PCIE_RETRY_BUF_CFG_RM_MASK I40E_MASK(0xF, I40E_PCIE_RETRY_BUF_CFG_RM_SHIFT)
+
+#define I40E_PCIE_RETRY_BUF_STATUS 0x0009D058 /* Reset: POR */
+#define I40E_PCIE_RETRY_BUF_STATUS_ECC_ERR_SHIFT 0
+#define I40E_PCIE_RETRY_BUF_STATUS_ECC_ERR_MASK I40E_MASK(0x1, I40E_PCIE_RETRY_BUF_STATUS_ECC_ERR_SHIFT)
+#define I40E_PCIE_RETRY_BUF_STATUS_ECC_FIX_SHIFT 1
+#define I40E_PCIE_RETRY_BUF_STATUS_ECC_FIX_MASK I40E_MASK(0x1, I40E_PCIE_RETRY_BUF_STATUS_ECC_FIX_SHIFT)
+#define I40E_PCIE_RETRY_BUF_STATUS_INIT_DONE_SHIFT 2
+#define I40E_PCIE_RETRY_BUF_STATUS_INIT_DONE_MASK I40E_MASK(0x1, I40E_PCIE_RETRY_BUF_STATUS_INIT_DONE_SHIFT)
+#define I40E_PCIE_RETRY_BUF_STATUS_GLOBAL_INIT_DONE_SHIFT 3
+#define I40E_PCIE_RETRY_BUF_STATUS_GLOBAL_INIT_DONE_MASK I40E_MASK(0x1, I40E_PCIE_RETRY_BUF_STATUS_GLOBAL_INIT_DONE_SHIFT)
+
+#define I40E_PDOC_CACHE_DBG_CTL 0x000D005C /* Reset: CORER */
+#define I40E_PDOC_CACHE_DBG_CTL_ADR_SHIFT 0
+#define I40E_PDOC_CACHE_DBG_CTL_ADR_MASK I40E_MASK(0x3FFFF, I40E_PDOC_CACHE_DBG_CTL_ADR_SHIFT)
+#define I40E_PDOC_CACHE_DBG_CTL_DW_SEL_SHIFT 18
+#define I40E_PDOC_CACHE_DBG_CTL_DW_SEL_MASK I40E_MASK(0xFF, I40E_PDOC_CACHE_DBG_CTL_DW_SEL_SHIFT)
+#define I40E_PDOC_CACHE_DBG_CTL_RD_EN_SHIFT 30
+#define I40E_PDOC_CACHE_DBG_CTL_RD_EN_MASK I40E_MASK(0x1, I40E_PDOC_CACHE_DBG_CTL_RD_EN_SHIFT)
+#define I40E_PDOC_CACHE_DBG_CTL_DONE_SHIFT 31
+#define I40E_PDOC_CACHE_DBG_CTL_DONE_MASK I40E_MASK(0x1, I40E_PDOC_CACHE_DBG_CTL_DONE_SHIFT)
+
+#define I40E_PDOC_CACHE_DBG_DATA 0x000D0060 /* Reset: CORER */
+#define I40E_PDOC_CACHE_DBG_DATA_RD_DW_SHIFT 0
+#define I40E_PDOC_CACHE_DBG_DATA_RD_DW_MASK I40E_MASK(0xFFFFFFFF, I40E_PDOC_CACHE_DBG_DATA_RD_DW_SHIFT)
+
+#define I40E_PDOC_CACHE_MEM_CFG 0x000D002C /* Reset: POR */
+#define I40E_PDOC_CACHE_MEM_CFG_ECC_EN_SHIFT 0
+#define I40E_PDOC_CACHE_MEM_CFG_ECC_EN_MASK I40E_MASK(0x1, I40E_PDOC_CACHE_MEM_CFG_ECC_EN_SHIFT)
+#define I40E_PDOC_CACHE_MEM_CFG_ECC_INVERT_1_SHIFT 1
+#define I40E_PDOC_CACHE_MEM_CFG_ECC_INVERT_1_MASK I40E_MASK(0x1, I40E_PDOC_CACHE_MEM_CFG_ECC_INVERT_1_SHIFT)
+#define I40E_PDOC_CACHE_MEM_CFG_ECC_INVERT_2_SHIFT 2
+#define I40E_PDOC_CACHE_MEM_CFG_ECC_INVERT_2_MASK I40E_MASK(0x1, I40E_PDOC_CACHE_MEM_CFG_ECC_INVERT_2_SHIFT)
+#define I40E_PDOC_CACHE_MEM_CFG_LS_FORCE_SHIFT 3
+#define I40E_PDOC_CACHE_MEM_CFG_LS_FORCE_MASK I40E_MASK(0x1, I40E_PDOC_CACHE_MEM_CFG_LS_FORCE_SHIFT)
+#define I40E_PDOC_CACHE_MEM_CFG_LS_BYPASS_SHIFT 4
+#define I40E_PDOC_CACHE_MEM_CFG_LS_BYPASS_MASK I40E_MASK(0x1, I40E_PDOC_CACHE_MEM_CFG_LS_BYPASS_SHIFT)
+#define I40E_PDOC_CACHE_MEM_CFG_MASK_INT_SHIFT 5
+#define I40E_PDOC_CACHE_MEM_CFG_MASK_INT_MASK I40E_MASK(0x1, I40E_PDOC_CACHE_MEM_CFG_MASK_INT_SHIFT)
+#define I40E_PDOC_CACHE_MEM_CFG_FIX_CNT_SHIFT 8
+#define I40E_PDOC_CACHE_MEM_CFG_FIX_CNT_MASK I40E_MASK(0x1, I40E_PDOC_CACHE_MEM_CFG_FIX_CNT_SHIFT)
+#define I40E_PDOC_CACHE_MEM_CFG_ERR_CNT_SHIFT 9
+#define I40E_PDOC_CACHE_MEM_CFG_ERR_CNT_MASK I40E_MASK(0x1, I40E_PDOC_CACHE_MEM_CFG_ERR_CNT_SHIFT)
+#define I40E_PDOC_CACHE_MEM_CFG_RME_SHIFT 12
+#define I40E_PDOC_CACHE_MEM_CFG_RME_MASK I40E_MASK(0x1, I40E_PDOC_CACHE_MEM_CFG_RME_SHIFT)
+#define I40E_PDOC_CACHE_MEM_CFG_RM_SHIFT 16
+#define I40E_PDOC_CACHE_MEM_CFG_RM_MASK I40E_MASK(0xF, I40E_PDOC_CACHE_MEM_CFG_RM_SHIFT)
+
+#define I40E_PDOC_CACHE_MEM_STATUS 0x000D0030 /* Reset: POR */
+#define I40E_PDOC_CACHE_MEM_STATUS_ECC_ERR_SHIFT 0
+#define I40E_PDOC_CACHE_MEM_STATUS_ECC_ERR_MASK I40E_MASK(0x1, I40E_PDOC_CACHE_MEM_STATUS_ECC_ERR_SHIFT)
+#define I40E_PDOC_CACHE_MEM_STATUS_ECC_FIX_SHIFT 1
+#define I40E_PDOC_CACHE_MEM_STATUS_ECC_FIX_MASK I40E_MASK(0x1, I40E_PDOC_CACHE_MEM_STATUS_ECC_FIX_SHIFT)
+#define I40E_PDOC_CACHE_MEM_STATUS_INIT_DONE_SHIFT 2
+#define I40E_PDOC_CACHE_MEM_STATUS_INIT_DONE_MASK I40E_MASK(0x1, I40E_PDOC_CACHE_MEM_STATUS_INIT_DONE_SHIFT)
+#define I40E_PDOC_CACHE_MEM_STATUS_GLOBAL_INIT_DONE_SHIFT 3
+#define I40E_PDOC_CACHE_MEM_STATUS_GLOBAL_INIT_DONE_MASK I40E_MASK(0x1, I40E_PDOC_CACHE_MEM_STATUS_GLOBAL_INIT_DONE_SHIFT)
+
+#define I40E_PDOC_ECC_COR_ERR_CNT 0x000D0050 /* Reset: POR */
+#define I40E_PDOC_ECC_COR_ERR_CNT_CNT_SHIFT 0
+#define I40E_PDOC_ECC_COR_ERR_CNT_CNT_MASK I40E_MASK(0xFFF, I40E_PDOC_ECC_COR_ERR_CNT_CNT_SHIFT)
+
+#define I40E_PDOC_ECC_UNCOR_ERR_CNT 0x000D004C /* Reset: POR */
+#define I40E_PDOC_ECC_UNCOR_ERR_CNT_CNT_SHIFT 0
+#define I40E_PDOC_ECC_UNCOR_ERR_CNT_CNT_MASK I40E_MASK(0xFFF, I40E_PDOC_ECC_UNCOR_ERR_CNT_CNT_SHIFT)
+
+#define I40E_PDOC_FILL_MEM_CFG 0x000D003C /* Reset: POR */
+#define I40E_PDOC_FILL_MEM_CFG_ECC_EN_SHIFT 0
+#define I40E_PDOC_FILL_MEM_CFG_ECC_EN_MASK I40E_MASK(0x1, I40E_PDOC_FILL_MEM_CFG_ECC_EN_SHIFT)
+#define I40E_PDOC_FILL_MEM_CFG_ECC_INVERT_1_SHIFT 1
+#define I40E_PDOC_FILL_MEM_CFG_ECC_INVERT_1_MASK I40E_MASK(0x1, I40E_PDOC_FILL_MEM_CFG_ECC_INVERT_1_SHIFT)
+#define I40E_PDOC_FILL_MEM_CFG_ECC_INVERT_2_SHIFT 2
+#define I40E_PDOC_FILL_MEM_CFG_ECC_INVERT_2_MASK I40E_MASK(0x1, I40E_PDOC_FILL_MEM_CFG_ECC_INVERT_2_SHIFT)
+#define I40E_PDOC_FILL_MEM_CFG_LS_FORCE_SHIFT 3
+#define I40E_PDOC_FILL_MEM_CFG_LS_FORCE_MASK I40E_MASK(0x1, I40E_PDOC_FILL_MEM_CFG_LS_FORCE_SHIFT)
+#define I40E_PDOC_FILL_MEM_CFG_LS_BYPASS_SHIFT 4
+#define I40E_PDOC_FILL_MEM_CFG_LS_BYPASS_MASK I40E_MASK(0x1, I40E_PDOC_FILL_MEM_CFG_LS_BYPASS_SHIFT)
+#define I40E_PDOC_FILL_MEM_CFG_MASK_INT_SHIFT 5
+#define I40E_PDOC_FILL_MEM_CFG_MASK_INT_MASK I40E_MASK(0x1, I40E_PDOC_FILL_MEM_CFG_MASK_INT_SHIFT)
+#define I40E_PDOC_FILL_MEM_CFG_FIX_CNT_SHIFT 8
+#define I40E_PDOC_FILL_MEM_CFG_FIX_CNT_MASK I40E_MASK(0x1, I40E_PDOC_FILL_MEM_CFG_FIX_CNT_SHIFT)
+#define I40E_PDOC_FILL_MEM_CFG_ERR_CNT_SHIFT 9
+#define I40E_PDOC_FILL_MEM_CFG_ERR_CNT_MASK I40E_MASK(0x1, I40E_PDOC_FILL_MEM_CFG_ERR_CNT_SHIFT)
+#define I40E_PDOC_FILL_MEM_CFG_RME_SHIFT 12
+#define I40E_PDOC_FILL_MEM_CFG_RME_MASK I40E_MASK(0x1, I40E_PDOC_FILL_MEM_CFG_RME_SHIFT)
+#define I40E_PDOC_FILL_MEM_CFG_RM_SHIFT 16
+#define I40E_PDOC_FILL_MEM_CFG_RM_MASK I40E_MASK(0xF, I40E_PDOC_FILL_MEM_CFG_RM_SHIFT)
+
+#define I40E_PDOC_FILL_MEM_STATUS 0x000D0040 /* Reset: POR */
+#define I40E_PDOC_FILL_MEM_STATUS_ECC_ERR_SHIFT 0
+#define I40E_PDOC_FILL_MEM_STATUS_ECC_ERR_MASK I40E_MASK(0x1, I40E_PDOC_FILL_MEM_STATUS_ECC_ERR_SHIFT)
+#define I40E_PDOC_FILL_MEM_STATUS_ECC_FIX_SHIFT 1
+#define I40E_PDOC_FILL_MEM_STATUS_ECC_FIX_MASK I40E_MASK(0x1, I40E_PDOC_FILL_MEM_STATUS_ECC_FIX_SHIFT)
+#define I40E_PDOC_FILL_MEM_STATUS_INIT_DONE_SHIFT 2
+#define I40E_PDOC_FILL_MEM_STATUS_INIT_DONE_MASK I40E_MASK(0x1, I40E_PDOC_FILL_MEM_STATUS_INIT_DONE_SHIFT)
+#define I40E_PDOC_FILL_MEM_STATUS_GLOBAL_INIT_DONE_SHIFT 3
+#define I40E_PDOC_FILL_MEM_STATUS_GLOBAL_INIT_DONE_MASK I40E_MASK(0x1, I40E_PDOC_FILL_MEM_STATUS_GLOBAL_INIT_DONE_SHIFT)
+
+#define I40E_PDOC_PLIST_DBG_CTL 0x000D0064 /* Reset: CORER */
+#define I40E_PDOC_PLIST_DBG_CTL_ADR_SHIFT 0
+#define I40E_PDOC_PLIST_DBG_CTL_ADR_MASK I40E_MASK(0x3FFFF, I40E_PDOC_PLIST_DBG_CTL_ADR_SHIFT)
+#define I40E_PDOC_PLIST_DBG_CTL_DW_SEL_SHIFT 18
+#define I40E_PDOC_PLIST_DBG_CTL_DW_SEL_MASK I40E_MASK(0xFF, I40E_PDOC_PLIST_DBG_CTL_DW_SEL_SHIFT)
+#define I40E_PDOC_PLIST_DBG_CTL_RD_EN_SHIFT 30
+#define I40E_PDOC_PLIST_DBG_CTL_RD_EN_MASK I40E_MASK(0x1, I40E_PDOC_PLIST_DBG_CTL_RD_EN_SHIFT)
+#define I40E_PDOC_PLIST_DBG_CTL_DONE_SHIFT 31
+#define I40E_PDOC_PLIST_DBG_CTL_DONE_MASK I40E_MASK(0x1, I40E_PDOC_PLIST_DBG_CTL_DONE_SHIFT)
+
+#define I40E_PDOC_PLIST_DBG_DATA 0x000D0068 /* Reset: CORER */
+#define I40E_PDOC_PLIST_DBG_DATA_RD_DW_SHIFT 0
+#define I40E_PDOC_PLIST_DBG_DATA_RD_DW_MASK I40E_MASK(0xFFFFFFFF, I40E_PDOC_PLIST_DBG_DATA_RD_DW_SHIFT)
+
+#define I40E_PDOC_PLIST_MEM_CFG 0x000D0044 /* Reset: POR */
+#define I40E_PDOC_PLIST_MEM_CFG_ECC_EN_SHIFT 0
+#define I40E_PDOC_PLIST_MEM_CFG_ECC_EN_MASK I40E_MASK(0x1, I40E_PDOC_PLIST_MEM_CFG_ECC_EN_SHIFT)
+#define I40E_PDOC_PLIST_MEM_CFG_ECC_INVERT_1_SHIFT 1
+#define I40E_PDOC_PLIST_MEM_CFG_ECC_INVERT_1_MASK I40E_MASK(0x1, I40E_PDOC_PLIST_MEM_CFG_ECC_INVERT_1_SHIFT)
+#define I40E_PDOC_PLIST_MEM_CFG_ECC_INVERT_2_SHIFT 2
+#define I40E_PDOC_PLIST_MEM_CFG_ECC_INVERT_2_MASK I40E_MASK(0x1, I40E_PDOC_PLIST_MEM_CFG_ECC_INVERT_2_SHIFT)
+#define I40E_PDOC_PLIST_MEM_CFG_LS_FORCE_SHIFT 3
+#define I40E_PDOC_PLIST_MEM_CFG_LS_FORCE_MASK I40E_MASK(0x1, I40E_PDOC_PLIST_MEM_CFG_LS_FORCE_SHIFT)
+#define I40E_PDOC_PLIST_MEM_CFG_LS_BYPASS_SHIFT 4
+#define I40E_PDOC_PLIST_MEM_CFG_LS_BYPASS_MASK I40E_MASK(0x1, I40E_PDOC_PLIST_MEM_CFG_LS_BYPASS_SHIFT)
+#define I40E_PDOC_PLIST_MEM_CFG_MASK_INT_SHIFT 5
+#define I40E_PDOC_PLIST_MEM_CFG_MASK_INT_MASK I40E_MASK(0x1, I40E_PDOC_PLIST_MEM_CFG_MASK_INT_SHIFT)
+#define I40E_PDOC_PLIST_MEM_CFG_FIX_CNT_SHIFT 8
+#define I40E_PDOC_PLIST_MEM_CFG_FIX_CNT_MASK I40E_MASK(0x1, I40E_PDOC_PLIST_MEM_CFG_FIX_CNT_SHIFT)
+#define I40E_PDOC_PLIST_MEM_CFG_ERR_CNT_SHIFT 9
+#define I40E_PDOC_PLIST_MEM_CFG_ERR_CNT_MASK I40E_MASK(0x1, I40E_PDOC_PLIST_MEM_CFG_ERR_CNT_SHIFT)
+#define I40E_PDOC_PLIST_MEM_CFG_RME_SHIFT 12
+#define I40E_PDOC_PLIST_MEM_CFG_RME_MASK I40E_MASK(0x1, I40E_PDOC_PLIST_MEM_CFG_RME_SHIFT)
+#define I40E_PDOC_PLIST_MEM_CFG_RM_SHIFT 16
+#define I40E_PDOC_PLIST_MEM_CFG_RM_MASK I40E_MASK(0xF, I40E_PDOC_PLIST_MEM_CFG_RM_SHIFT)
+
+#define I40E_PDOC_PLIST_MEM_STATUS 0x000D0048 /* Reset: POR */
+#define I40E_PDOC_PLIST_MEM_STATUS_ECC_ERR_SHIFT 0
+#define I40E_PDOC_PLIST_MEM_STATUS_ECC_ERR_MASK I40E_MASK(0x1, I40E_PDOC_PLIST_MEM_STATUS_ECC_ERR_SHIFT)
+#define I40E_PDOC_PLIST_MEM_STATUS_ECC_FIX_SHIFT 1
+#define I40E_PDOC_PLIST_MEM_STATUS_ECC_FIX_MASK I40E_MASK(0x1, I40E_PDOC_PLIST_MEM_STATUS_ECC_FIX_SHIFT)
+#define I40E_PDOC_PLIST_MEM_STATUS_INIT_DONE_SHIFT 2
+#define I40E_PDOC_PLIST_MEM_STATUS_INIT_DONE_MASK I40E_MASK(0x1, I40E_PDOC_PLIST_MEM_STATUS_INIT_DONE_SHIFT)
+#define I40E_PDOC_PLIST_MEM_STATUS_GLOBAL_INIT_DONE_SHIFT 3
+#define I40E_PDOC_PLIST_MEM_STATUS_GLOBAL_INIT_DONE_MASK I40E_MASK(0x1, I40E_PDOC_PLIST_MEM_STATUS_GLOBAL_INIT_DONE_SHIFT)
+
+#define I40E_PDOC_TAG_DBG_CTL 0x000D0054 /* Reset: CORER */
+#define I40E_PDOC_TAG_DBG_CTL_ADR_SHIFT 0
+#define I40E_PDOC_TAG_DBG_CTL_ADR_MASK I40E_MASK(0x3FFFF, I40E_PDOC_TAG_DBG_CTL_ADR_SHIFT)
+#define I40E_PDOC_TAG_DBG_CTL_DW_SEL_SHIFT 18
+#define I40E_PDOC_TAG_DBG_CTL_DW_SEL_MASK I40E_MASK(0xFF, I40E_PDOC_TAG_DBG_CTL_DW_SEL_SHIFT)
+#define I40E_PDOC_TAG_DBG_CTL_RD_EN_SHIFT 30
+#define I40E_PDOC_TAG_DBG_CTL_RD_EN_MASK I40E_MASK(0x1, I40E_PDOC_TAG_DBG_CTL_RD_EN_SHIFT)
+#define I40E_PDOC_TAG_DBG_CTL_DONE_SHIFT 31
+#define I40E_PDOC_TAG_DBG_CTL_DONE_MASK I40E_MASK(0x1, I40E_PDOC_TAG_DBG_CTL_DONE_SHIFT)
+
+#define I40E_PDOC_TAG_DBG_DATA 0x000D0058 /* Reset: CORER */
+#define I40E_PDOC_TAG_DBG_DATA_RD_DW_SHIFT 0
+#define I40E_PDOC_TAG_DBG_DATA_RD_DW_MASK I40E_MASK(0xFFFFFFFF, I40E_PDOC_TAG_DBG_DATA_RD_DW_SHIFT)
+
+#define I40E_PDOC_TAG_MEM_CFG 0x000D0038 /* Reset: POR */
+#define I40E_PDOC_TAG_MEM_CFG_ECC_EN_SHIFT 0
+#define I40E_PDOC_TAG_MEM_CFG_ECC_EN_MASK I40E_MASK(0x1, I40E_PDOC_TAG_MEM_CFG_ECC_EN_SHIFT)
+#define I40E_PDOC_TAG_MEM_CFG_ECC_INVERT_1_SHIFT 1
+#define I40E_PDOC_TAG_MEM_CFG_ECC_INVERT_1_MASK I40E_MASK(0x1, I40E_PDOC_TAG_MEM_CFG_ECC_INVERT_1_SHIFT)
+#define I40E_PDOC_TAG_MEM_CFG_ECC_INVERT_2_SHIFT 2
+#define I40E_PDOC_TAG_MEM_CFG_ECC_INVERT_2_MASK I40E_MASK(0x1, I40E_PDOC_TAG_MEM_CFG_ECC_INVERT_2_SHIFT)
+#define I40E_PDOC_TAG_MEM_CFG_LS_FORCE_SHIFT 3
+#define I40E_PDOC_TAG_MEM_CFG_LS_FORCE_MASK I40E_MASK(0x1, I40E_PDOC_TAG_MEM_CFG_LS_FORCE_SHIFT)
+#define I40E_PDOC_TAG_MEM_CFG_LS_BYPASS_SHIFT 4
+#define I40E_PDOC_TAG_MEM_CFG_LS_BYPASS_MASK I40E_MASK(0x1, I40E_PDOC_TAG_MEM_CFG_LS_BYPASS_SHIFT)
+#define I40E_PDOC_TAG_MEM_CFG_MASK_INT_SHIFT 5
+#define I40E_PDOC_TAG_MEM_CFG_MASK_INT_MASK I40E_MASK(0x1, I40E_PDOC_TAG_MEM_CFG_MASK_INT_SHIFT)
+#define I40E_PDOC_TAG_MEM_CFG_FIX_CNT_SHIFT 8
+#define I40E_PDOC_TAG_MEM_CFG_FIX_CNT_MASK I40E_MASK(0x1, I40E_PDOC_TAG_MEM_CFG_FIX_CNT_SHIFT)
+#define I40E_PDOC_TAG_MEM_CFG_ERR_CNT_SHIFT 9
+#define I40E_PDOC_TAG_MEM_CFG_ERR_CNT_MASK I40E_MASK(0x1, I40E_PDOC_TAG_MEM_CFG_ERR_CNT_SHIFT)
+#define I40E_PDOC_TAG_MEM_CFG_RME_SHIFT 12
+#define I40E_PDOC_TAG_MEM_CFG_RME_MASK I40E_MASK(0x1, I40E_PDOC_TAG_MEM_CFG_RME_SHIFT)
+#define I40E_PDOC_TAG_MEM_CFG_RM_SHIFT 16
+#define I40E_PDOC_TAG_MEM_CFG_RM_MASK I40E_MASK(0xF, I40E_PDOC_TAG_MEM_CFG_RM_SHIFT)
+
+#define I40E_PDOC_TAG_MEM_STATUS 0x000D0034 /* Reset: POR */
+#define I40E_PDOC_TAG_MEM_STATUS_ECC_ERR_SHIFT 0
+#define I40E_PDOC_TAG_MEM_STATUS_ECC_ERR_MASK I40E_MASK(0x1, I40E_PDOC_TAG_MEM_STATUS_ECC_ERR_SHIFT)
+#define I40E_PDOC_TAG_MEM_STATUS_ECC_FIX_SHIFT 1
+#define I40E_PDOC_TAG_MEM_STATUS_ECC_FIX_MASK I40E_MASK(0x1, I40E_PDOC_TAG_MEM_STATUS_ECC_FIX_SHIFT)
+#define I40E_PDOC_TAG_MEM_STATUS_INIT_DONE_SHIFT 2
+#define I40E_PDOC_TAG_MEM_STATUS_INIT_DONE_MASK I40E_MASK(0x1, I40E_PDOC_TAG_MEM_STATUS_INIT_DONE_SHIFT)
+#define I40E_PDOC_TAG_MEM_STATUS_GLOBAL_INIT_DONE_SHIFT 3
+#define I40E_PDOC_TAG_MEM_STATUS_GLOBAL_INIT_DONE_MASK I40E_MASK(0x1, I40E_PDOC_TAG_MEM_STATUS_GLOBAL_INIT_DONE_SHIFT)
+
+#define I40E_PKT_INDICATIONS(_i) (0x000AC920 + ((_i) * 4)) /* _i=0...7 */ /* Reset: CORER */
+#define I40E_PKT_INDICATIONS_MAX_INDEX 7
+#define I40E_PKT_INDICATIONS_START_CNT_SHIFT 0
+#define I40E_PKT_INDICATIONS_START_CNT_MASK I40E_MASK(0xFF, I40E_PKT_INDICATIONS_START_CNT_SHIFT)
+#define I40E_PKT_INDICATIONS_END_CNT_SHIFT 8
+#define I40E_PKT_INDICATIONS_END_CNT_MASK I40E_MASK(0xFF, I40E_PKT_INDICATIONS_END_CNT_SHIFT)
+#define I40E_PKT_INDICATIONS_STATUS_CNT_SHIFT 16
+#define I40E_PKT_INDICATIONS_STATUS_CNT_MASK I40E_MASK(0xFF, I40E_PKT_INDICATIONS_STATUS_CNT_SHIFT)
+#define I40E_PKT_INDICATIONS_DROP_CNT_SHIFT 24
+#define I40E_PKT_INDICATIONS_DROP_CNT_MASK I40E_MASK(0xFF, I40E_PKT_INDICATIONS_DROP_CNT_SHIFT)
+
+#define I40E_PMAT_ECC_COR_ERR 0x000C20CC /* Reset: POR */
+#define I40E_PMAT_ECC_COR_ERR_CNT_SHIFT 0
+#define I40E_PMAT_ECC_COR_ERR_CNT_MASK I40E_MASK(0xFFF, I40E_PMAT_ECC_COR_ERR_CNT_SHIFT)
+
+#define I40E_PMAT_ECC_UNCOR_ERR_CNT 0x000C20C8 /* Reset: POR */
+#define I40E_PMAT_ECC_UNCOR_ERR_CNT_CNT_SHIFT 0
+#define I40E_PMAT_ECC_UNCOR_ERR_CNT_CNT_MASK I40E_MASK(0xFFF, I40E_PMAT_ECC_UNCOR_ERR_CNT_CNT_SHIFT)
+
+#define I40E_PMAT_OBJ_BASE_RAM_CFG 0x000C20B8 /* Reset: POR */
+#define I40E_PMAT_OBJ_BASE_RAM_CFG_ECC_EN_SHIFT 0
+#define I40E_PMAT_OBJ_BASE_RAM_CFG_ECC_EN_MASK I40E_MASK(0x1, I40E_PMAT_OBJ_BASE_RAM_CFG_ECC_EN_SHIFT)
+#define I40E_PMAT_OBJ_BASE_RAM_CFG_ECC_INVERT_1_SHIFT 1
+#define I40E_PMAT_OBJ_BASE_RAM_CFG_ECC_INVERT_1_MASK I40E_MASK(0x1, I40E_PMAT_OBJ_BASE_RAM_CFG_ECC_INVERT_1_SHIFT)
+#define I40E_PMAT_OBJ_BASE_RAM_CFG_ECC_INVERT_2_SHIFT 2
+#define I40E_PMAT_OBJ_BASE_RAM_CFG_ECC_INVERT_2_MASK I40E_MASK(0x1, I40E_PMAT_OBJ_BASE_RAM_CFG_ECC_INVERT_2_SHIFT)
+#define I40E_PMAT_OBJ_BASE_RAM_CFG_LS_FORCE_SHIFT 3
+#define I40E_PMAT_OBJ_BASE_RAM_CFG_LS_FORCE_MASK I40E_MASK(0x1, I40E_PMAT_OBJ_BASE_RAM_CFG_LS_FORCE_SHIFT)
+#define I40E_PMAT_OBJ_BASE_RAM_CFG_LS_BYPASS_SHIFT 4
+#define I40E_PMAT_OBJ_BASE_RAM_CFG_LS_BYPASS_MASK I40E_MASK(0x1, I40E_PMAT_OBJ_BASE_RAM_CFG_LS_BYPASS_SHIFT)
+#define I40E_PMAT_OBJ_BASE_RAM_CFG_MASK_INT_SHIFT 5
+#define I40E_PMAT_OBJ_BASE_RAM_CFG_MASK_INT_MASK I40E_MASK(0x1, I40E_PMAT_OBJ_BASE_RAM_CFG_MASK_INT_SHIFT)
+#define I40E_PMAT_OBJ_BASE_RAM_CFG_FIX_CNT_SHIFT 8
+#define I40E_PMAT_OBJ_BASE_RAM_CFG_FIX_CNT_MASK I40E_MASK(0x1, I40E_PMAT_OBJ_BASE_RAM_CFG_FIX_CNT_SHIFT)
+#define I40E_PMAT_OBJ_BASE_RAM_CFG_ERR_CNT_SHIFT 9
+#define I40E_PMAT_OBJ_BASE_RAM_CFG_ERR_CNT_MASK I40E_MASK(0x1, I40E_PMAT_OBJ_BASE_RAM_CFG_ERR_CNT_SHIFT)
+#define I40E_PMAT_OBJ_BASE_RAM_CFG_RME_SHIFT 12
+#define I40E_PMAT_OBJ_BASE_RAM_CFG_RME_MASK I40E_MASK(0x1, I40E_PMAT_OBJ_BASE_RAM_CFG_RME_SHIFT)
+#define I40E_PMAT_OBJ_BASE_RAM_CFG_RM_SHIFT 16
+#define I40E_PMAT_OBJ_BASE_RAM_CFG_RM_MASK I40E_MASK(0xF, I40E_PMAT_OBJ_BASE_RAM_CFG_RM_SHIFT)
+
+#define I40E_PMAT_OBJ_BASE_RAM_STATUS 0x000C20BC /* Reset: POR */
+#define I40E_PMAT_OBJ_BASE_RAM_STATUS_ECC_ERR_SHIFT 0
+#define I40E_PMAT_OBJ_BASE_RAM_STATUS_ECC_ERR_MASK I40E_MASK(0x1, I40E_PMAT_OBJ_BASE_RAM_STATUS_ECC_ERR_SHIFT)
+#define I40E_PMAT_OBJ_BASE_RAM_STATUS_ECC_FIX_SHIFT 1
+#define I40E_PMAT_OBJ_BASE_RAM_STATUS_ECC_FIX_MASK I40E_MASK(0x1, I40E_PMAT_OBJ_BASE_RAM_STATUS_ECC_FIX_SHIFT)
+#define I40E_PMAT_OBJ_BASE_RAM_STATUS_INIT_DONE_SHIFT 2
+#define I40E_PMAT_OBJ_BASE_RAM_STATUS_INIT_DONE_MASK I40E_MASK(0x1, I40E_PMAT_OBJ_BASE_RAM_STATUS_INIT_DONE_SHIFT)
+#define I40E_PMAT_OBJ_BASE_RAM_STATUS_GLOBAL_INIT_DONE_SHIFT 3
+#define I40E_PMAT_OBJ_BASE_RAM_STATUS_GLOBAL_INIT_DONE_MASK I40E_MASK(0x1, I40E_PMAT_OBJ_BASE_RAM_STATUS_GLOBAL_INIT_DONE_SHIFT)
+
+#define I40E_PMAT_OBJ_BNDS_RAM_CFG 0x000C20C0 /* Reset: POR */
+#define I40E_PMAT_OBJ_BNDS_RAM_CFG_ECC_EN_SHIFT 0
+#define I40E_PMAT_OBJ_BNDS_RAM_CFG_ECC_EN_MASK I40E_MASK(0x1, I40E_PMAT_OBJ_BNDS_RAM_CFG_ECC_EN_SHIFT)
+#define I40E_PMAT_OBJ_BNDS_RAM_CFG_ECC_INVERT_1_SHIFT 1
+#define I40E_PMAT_OBJ_BNDS_RAM_CFG_ECC_INVERT_1_MASK I40E_MASK(0x1, I40E_PMAT_OBJ_BNDS_RAM_CFG_ECC_INVERT_1_SHIFT)
+#define I40E_PMAT_OBJ_BNDS_RAM_CFG_ECC_INVERT_2_SHIFT 2
+#define I40E_PMAT_OBJ_BNDS_RAM_CFG_ECC_INVERT_2_MASK I40E_MASK(0x1, I40E_PMAT_OBJ_BNDS_RAM_CFG_ECC_INVERT_2_SHIFT)
+#define I40E_PMAT_OBJ_BNDS_RAM_CFG_LS_FORCE_SHIFT 3
+#define I40E_PMAT_OBJ_BNDS_RAM_CFG_LS_FORCE_MASK I40E_MASK(0x1, I40E_PMAT_OBJ_BNDS_RAM_CFG_LS_FORCE_SHIFT)
+#define I40E_PMAT_OBJ_BNDS_RAM_CFG_LS_BYPASS_SHIFT 4
+#define I40E_PMAT_OBJ_BNDS_RAM_CFG_LS_BYPASS_MASK I40E_MASK(0x1, I40E_PMAT_OBJ_BNDS_RAM_CFG_LS_BYPASS_SHIFT)
+#define I40E_PMAT_OBJ_BNDS_RAM_CFG_MASK_INT_SHIFT 5
+#define I40E_PMAT_OBJ_BNDS_RAM_CFG_MASK_INT_MASK I40E_MASK(0x1, I40E_PMAT_OBJ_BNDS_RAM_CFG_MASK_INT_SHIFT)
+#define I40E_PMAT_OBJ_BNDS_RAM_CFG_FIX_CNT_SHIFT 8
+#define I40E_PMAT_OBJ_BNDS_RAM_CFG_FIX_CNT_MASK I40E_MASK(0x1, I40E_PMAT_OBJ_BNDS_RAM_CFG_FIX_CNT_SHIFT)
+#define I40E_PMAT_OBJ_BNDS_RAM_CFG_ERR_CNT_SHIFT 9
+#define I40E_PMAT_OBJ_BNDS_RAM_CFG_ERR_CNT_MASK I40E_MASK(0x1, I40E_PMAT_OBJ_BNDS_RAM_CFG_ERR_CNT_SHIFT)
+#define I40E_PMAT_OBJ_BNDS_RAM_CFG_RME_SHIFT 12
+#define I40E_PMAT_OBJ_BNDS_RAM_CFG_RME_MASK I40E_MASK(0x1, I40E_PMAT_OBJ_BNDS_RAM_CFG_RME_SHIFT)
+#define I40E_PMAT_OBJ_BNDS_RAM_CFG_RM_SHIFT 16
+#define I40E_PMAT_OBJ_BNDS_RAM_CFG_RM_MASK I40E_MASK(0xF, I40E_PMAT_OBJ_BNDS_RAM_CFG_RM_SHIFT)
+
+#define I40E_PMAT_OBJ_BNDS_RAM_STATUS 0x000C20C4 /* Reset: POR */
+#define I40E_PMAT_OBJ_BNDS_RAM_STATUS_ECC_ERR_SHIFT 0
+#define I40E_PMAT_OBJ_BNDS_RAM_STATUS_ECC_ERR_MASK I40E_MASK(0x1, I40E_PMAT_OBJ_BNDS_RAM_STATUS_ECC_ERR_SHIFT)
+#define I40E_PMAT_OBJ_BNDS_RAM_STATUS_ECC_FIX_SHIFT 1
+#define I40E_PMAT_OBJ_BNDS_RAM_STATUS_ECC_FIX_MASK I40E_MASK(0x1, I40E_PMAT_OBJ_BNDS_RAM_STATUS_ECC_FIX_SHIFT)
+#define I40E_PMAT_OBJ_BNDS_RAM_STATUS_INIT_DONE_SHIFT 2
+#define I40E_PMAT_OBJ_BNDS_RAM_STATUS_INIT_DONE_MASK I40E_MASK(0x1, I40E_PMAT_OBJ_BNDS_RAM_STATUS_INIT_DONE_SHIFT)
+#define I40E_PMAT_OBJ_BNDS_RAM_STATUS_GLOBAL_INIT_DONE_SHIFT 3
+#define I40E_PMAT_OBJ_BNDS_RAM_STATUS_GLOBAL_INIT_DONE_MASK I40E_MASK(0x1, I40E_PMAT_OBJ_BNDS_RAM_STATUS_GLOBAL_INIT_DONE_SHIFT)
+
+#define I40E_PMAT_ST_RAM_CFG 0x000C20B0 /* Reset: POR */
+#define I40E_PMAT_ST_RAM_CFG_ECC_EN_SHIFT 0
+#define I40E_PMAT_ST_RAM_CFG_ECC_EN_MASK I40E_MASK(0x1, I40E_PMAT_ST_RAM_CFG_ECC_EN_SHIFT)
+#define I40E_PMAT_ST_RAM_CFG_ECC_INVERT_1_SHIFT 1
+#define I40E_PMAT_ST_RAM_CFG_ECC_INVERT_1_MASK I40E_MASK(0x1, I40E_PMAT_ST_RAM_CFG_ECC_INVERT_1_SHIFT)
+#define I40E_PMAT_ST_RAM_CFG_ECC_INVERT_2_SHIFT 2
+#define I40E_PMAT_ST_RAM_CFG_ECC_INVERT_2_MASK I40E_MASK(0x1, I40E_PMAT_ST_RAM_CFG_ECC_INVERT_2_SHIFT)
+#define I40E_PMAT_ST_RAM_CFG_LS_FORCE_SHIFT 3
+#define I40E_PMAT_ST_RAM_CFG_LS_FORCE_MASK I40E_MASK(0x1, I40E_PMAT_ST_RAM_CFG_LS_FORCE_SHIFT)
+#define I40E_PMAT_ST_RAM_CFG_LS_BYPASS_SHIFT 4
+#define I40E_PMAT_ST_RAM_CFG_LS_BYPASS_MASK I40E_MASK(0x1, I40E_PMAT_ST_RAM_CFG_LS_BYPASS_SHIFT)
+#define I40E_PMAT_ST_RAM_CFG_MASK_INT_SHIFT 5
+#define I40E_PMAT_ST_RAM_CFG_MASK_INT_MASK I40E_MASK(0x1, I40E_PMAT_ST_RAM_CFG_MASK_INT_SHIFT)
+#define I40E_PMAT_ST_RAM_CFG_FIX_CNT_SHIFT 8
+#define I40E_PMAT_ST_RAM_CFG_FIX_CNT_MASK I40E_MASK(0x1, I40E_PMAT_ST_RAM_CFG_FIX_CNT_SHIFT)
+#define I40E_PMAT_ST_RAM_CFG_ERR_CNT_SHIFT 9
+#define I40E_PMAT_ST_RAM_CFG_ERR_CNT_MASK I40E_MASK(0x1, I40E_PMAT_ST_RAM_CFG_ERR_CNT_SHIFT)
+#define I40E_PMAT_ST_RAM_CFG_RME_SHIFT 12
+#define I40E_PMAT_ST_RAM_CFG_RME_MASK I40E_MASK(0x1, I40E_PMAT_ST_RAM_CFG_RME_SHIFT)
+#define I40E_PMAT_ST_RAM_CFG_RM_SHIFT 16
+#define I40E_PMAT_ST_RAM_CFG_RM_MASK I40E_MASK(0xF, I40E_PMAT_ST_RAM_CFG_RM_SHIFT)
+
+#define I40E_PMAT_ST_RAM_STATUS 0x000C20B4 /* Reset: POR */
+#define I40E_PMAT_ST_RAM_STATUS_ECC_ERR_SHIFT 0
+#define I40E_PMAT_ST_RAM_STATUS_ECC_ERR_MASK I40E_MASK(0x1, I40E_PMAT_ST_RAM_STATUS_ECC_ERR_SHIFT)
+#define I40E_PMAT_ST_RAM_STATUS_ECC_FIX_SHIFT 1
+#define I40E_PMAT_ST_RAM_STATUS_ECC_FIX_MASK I40E_MASK(0x1, I40E_PMAT_ST_RAM_STATUS_ECC_FIX_SHIFT)
+#define I40E_PMAT_ST_RAM_STATUS_INIT_DONE_SHIFT 2
+#define I40E_PMAT_ST_RAM_STATUS_INIT_DONE_MASK I40E_MASK(0x1, I40E_PMAT_ST_RAM_STATUS_INIT_DONE_SHIFT)
+#define I40E_PMAT_ST_RAM_STATUS_GLOBAL_INIT_DONE_SHIFT 3
+#define I40E_PMAT_ST_RAM_STATUS_GLOBAL_INIT_DONE_MASK I40E_MASK(0x1, I40E_PMAT_ST_RAM_STATUS_GLOBAL_INIT_DONE_SHIFT)
+
+#define I40E_PORT_CMD_BUF_MEM_CFG 0x000AE094 /* Reset: POR */
+#define I40E_PORT_CMD_BUF_MEM_CFG_ECC_EN_SHIFT 0
+#define I40E_PORT_CMD_BUF_MEM_CFG_ECC_EN_MASK I40E_MASK(0x1, I40E_PORT_CMD_BUF_MEM_CFG_ECC_EN_SHIFT)
+#define I40E_PORT_CMD_BUF_MEM_CFG_ECC_INVERT_1_SHIFT 1
+#define I40E_PORT_CMD_BUF_MEM_CFG_ECC_INVERT_1_MASK I40E_MASK(0x1, I40E_PORT_CMD_BUF_MEM_CFG_ECC_INVERT_1_SHIFT)
+#define I40E_PORT_CMD_BUF_MEM_CFG_ECC_INVERT_2_SHIFT 2
+#define I40E_PORT_CMD_BUF_MEM_CFG_ECC_INVERT_2_MASK I40E_MASK(0x1, I40E_PORT_CMD_BUF_MEM_CFG_ECC_INVERT_2_SHIFT)
+#define I40E_PORT_CMD_BUF_MEM_CFG_LS_FORCE_SHIFT 3
+#define I40E_PORT_CMD_BUF_MEM_CFG_LS_FORCE_MASK I40E_MASK(0x1, I40E_PORT_CMD_BUF_MEM_CFG_LS_FORCE_SHIFT)
+#define I40E_PORT_CMD_BUF_MEM_CFG_LS_BYPASS_SHIFT 4
+#define I40E_PORT_CMD_BUF_MEM_CFG_LS_BYPASS_MASK I40E_MASK(0x1, I40E_PORT_CMD_BUF_MEM_CFG_LS_BYPASS_SHIFT)
+#define I40E_PORT_CMD_BUF_MEM_CFG_MASK_INT_SHIFT 5
+#define I40E_PORT_CMD_BUF_MEM_CFG_MASK_INT_MASK I40E_MASK(0x1, I40E_PORT_CMD_BUF_MEM_CFG_MASK_INT_SHIFT)
+#define I40E_PORT_CMD_BUF_MEM_CFG_FIX_CNT_SHIFT 8
+#define I40E_PORT_CMD_BUF_MEM_CFG_FIX_CNT_MASK I40E_MASK(0x1, I40E_PORT_CMD_BUF_MEM_CFG_FIX_CNT_SHIFT)
+#define I40E_PORT_CMD_BUF_MEM_CFG_ERR_CNT_SHIFT 9
+#define I40E_PORT_CMD_BUF_MEM_CFG_ERR_CNT_MASK I40E_MASK(0x1, I40E_PORT_CMD_BUF_MEM_CFG_ERR_CNT_SHIFT)
+#define I40E_PORT_CMD_BUF_MEM_CFG_RME_SHIFT 12
+#define I40E_PORT_CMD_BUF_MEM_CFG_RME_MASK I40E_MASK(0x1, I40E_PORT_CMD_BUF_MEM_CFG_RME_SHIFT)
+#define I40E_PORT_CMD_BUF_MEM_CFG_RM_SHIFT 16
+#define I40E_PORT_CMD_BUF_MEM_CFG_RM_MASK I40E_MASK(0xF, I40E_PORT_CMD_BUF_MEM_CFG_RM_SHIFT)
+
+#define I40E_PORT_CMD_BUF_MEM_STATUS 0x000AE098 /* Reset: POR */
+#define I40E_PORT_CMD_BUF_MEM_STATUS_ECC_ERR_SHIFT 0
+#define I40E_PORT_CMD_BUF_MEM_STATUS_ECC_ERR_MASK I40E_MASK(0x1, I40E_PORT_CMD_BUF_MEM_STATUS_ECC_ERR_SHIFT)
+#define I40E_PORT_CMD_BUF_MEM_STATUS_ECC_FIX_SHIFT 1
+#define I40E_PORT_CMD_BUF_MEM_STATUS_ECC_FIX_MASK I40E_MASK(0x1, I40E_PORT_CMD_BUF_MEM_STATUS_ECC_FIX_SHIFT)
+#define I40E_PORT_CMD_BUF_MEM_STATUS_INIT_DONE_SHIFT 2
+#define I40E_PORT_CMD_BUF_MEM_STATUS_INIT_DONE_MASK I40E_MASK(0x1, I40E_PORT_CMD_BUF_MEM_STATUS_INIT_DONE_SHIFT)
+#define I40E_PORT_CMD_BUF_MEM_STATUS_GLOBAL_INIT_DONE_SHIFT 3
+#define I40E_PORT_CMD_BUF_MEM_STATUS_GLOBAL_INIT_DONE_MASK I40E_MASK(0x1, I40E_PORT_CMD_BUF_MEM_STATUS_GLOBAL_INIT_DONE_SHIFT)
+
+#define I40E_PORT_CMD_MNG_MEM_CFG 0x000AE09C /* Reset: POR */
+#define I40E_PORT_CMD_MNG_MEM_CFG_ECC_EN_SHIFT 0
+#define I40E_PORT_CMD_MNG_MEM_CFG_ECC_EN_MASK I40E_MASK(0x1, I40E_PORT_CMD_MNG_MEM_CFG_ECC_EN_SHIFT)
+#define I40E_PORT_CMD_MNG_MEM_CFG_ECC_INVERT_1_SHIFT 1
+#define I40E_PORT_CMD_MNG_MEM_CFG_ECC_INVERT_1_MASK I40E_MASK(0x1, I40E_PORT_CMD_MNG_MEM_CFG_ECC_INVERT_1_SHIFT)
+#define I40E_PORT_CMD_MNG_MEM_CFG_ECC_INVERT_2_SHIFT 2
+#define I40E_PORT_CMD_MNG_MEM_CFG_ECC_INVERT_2_MASK I40E_MASK(0x1, I40E_PORT_CMD_MNG_MEM_CFG_ECC_INVERT_2_SHIFT)
+#define I40E_PORT_CMD_MNG_MEM_CFG_LS_FORCE_SHIFT 3
+#define I40E_PORT_CMD_MNG_MEM_CFG_LS_FORCE_MASK I40E_MASK(0x1, I40E_PORT_CMD_MNG_MEM_CFG_LS_FORCE_SHIFT)
+#define I40E_PORT_CMD_MNG_MEM_CFG_LS_BYPASS_SHIFT 4
+#define I40E_PORT_CMD_MNG_MEM_CFG_LS_BYPASS_MASK I40E_MASK(0x1, I40E_PORT_CMD_MNG_MEM_CFG_LS_BYPASS_SHIFT)
+#define I40E_PORT_CMD_MNG_MEM_CFG_MASK_INT_SHIFT 5
+#define I40E_PORT_CMD_MNG_MEM_CFG_MASK_INT_MASK I40E_MASK(0x1, I40E_PORT_CMD_MNG_MEM_CFG_MASK_INT_SHIFT)
+#define I40E_PORT_CMD_MNG_MEM_CFG_FIX_CNT_SHIFT 8
+#define I40E_PORT_CMD_MNG_MEM_CFG_FIX_CNT_MASK I40E_MASK(0x1, I40E_PORT_CMD_MNG_MEM_CFG_FIX_CNT_SHIFT)
+#define I40E_PORT_CMD_MNG_MEM_CFG_ERR_CNT_SHIFT 9
+#define I40E_PORT_CMD_MNG_MEM_CFG_ERR_CNT_MASK I40E_MASK(0x1, I40E_PORT_CMD_MNG_MEM_CFG_ERR_CNT_SHIFT)
+#define I40E_PORT_CMD_MNG_MEM_CFG_RME_SHIFT 12
+#define I40E_PORT_CMD_MNG_MEM_CFG_RME_MASK I40E_MASK(0x1, I40E_PORT_CMD_MNG_MEM_CFG_RME_SHIFT)
+#define I40E_PORT_CMD_MNG_MEM_CFG_RM_SHIFT 16
+#define I40E_PORT_CMD_MNG_MEM_CFG_RM_MASK I40E_MASK(0xF, I40E_PORT_CMD_MNG_MEM_CFG_RM_SHIFT)
+
+#define I40E_PORT_CMD_MNG_MEM_STATUS 0x000AE0A0 /* Reset: POR */
+#define I40E_PORT_CMD_MNG_MEM_STATUS_ECC_ERR_SHIFT 0
+#define I40E_PORT_CMD_MNG_MEM_STATUS_ECC_ERR_MASK I40E_MASK(0x1, I40E_PORT_CMD_MNG_MEM_STATUS_ECC_ERR_SHIFT)
+#define I40E_PORT_CMD_MNG_MEM_STATUS_ECC_FIX_SHIFT 1
+#define I40E_PORT_CMD_MNG_MEM_STATUS_ECC_FIX_MASK I40E_MASK(0x1, I40E_PORT_CMD_MNG_MEM_STATUS_ECC_FIX_SHIFT)
+#define I40E_PORT_CMD_MNG_MEM_STATUS_INIT_DONE_SHIFT 2
+#define I40E_PORT_CMD_MNG_MEM_STATUS_INIT_DONE_MASK I40E_MASK(0x1, I40E_PORT_CMD_MNG_MEM_STATUS_INIT_DONE_SHIFT)
+#define I40E_PORT_CMD_MNG_MEM_STATUS_GLOBAL_INIT_DONE_SHIFT 3
+#define I40E_PORT_CMD_MNG_MEM_STATUS_GLOBAL_INIT_DONE_MASK I40E_MASK(0x1, I40E_PORT_CMD_MNG_MEM_STATUS_GLOBAL_INIT_DONE_SHIFT)
+
+#define I40E_PPRS_ECC_COR_ERR 0x00085BA0 /* Reset: POR */
+#define I40E_PPRS_ECC_COR_ERR_CNT_SHIFT 0
+#define I40E_PPRS_ECC_COR_ERR_CNT_MASK I40E_MASK(0xFFF, I40E_PPRS_ECC_COR_ERR_CNT_SHIFT)
+
+#define I40E_PPRS_ECC_UNCOR_ERR 0x00085B80 /* Reset: POR */
+#define I40E_PPRS_ECC_UNCOR_ERR_CNT_SHIFT 0
+#define I40E_PPRS_ECC_UNCOR_ERR_CNT_MASK I40E_MASK(0xFFF, I40E_PPRS_ECC_UNCOR_ERR_CNT_SHIFT)
+
+#define I40E_PPRS_PCKT_CFG 0x00085B00 /* Reset: POR */
+#define I40E_PPRS_PCKT_CFG_ECC_EN_SHIFT 0
+#define I40E_PPRS_PCKT_CFG_ECC_EN_MASK I40E_MASK(0x1, I40E_PPRS_PCKT_CFG_ECC_EN_SHIFT)
+#define I40E_PPRS_PCKT_CFG_ECC_INVERT_1_SHIFT 1
+#define I40E_PPRS_PCKT_CFG_ECC_INVERT_1_MASK I40E_MASK(0x1, I40E_PPRS_PCKT_CFG_ECC_INVERT_1_SHIFT)
+#define I40E_PPRS_PCKT_CFG_ECC_INVERT_2_SHIFT 2
+#define I40E_PPRS_PCKT_CFG_ECC_INVERT_2_MASK I40E_MASK(0x1, I40E_PPRS_PCKT_CFG_ECC_INVERT_2_SHIFT)
+#define I40E_PPRS_PCKT_CFG_LS_FORCE_SHIFT 3
+#define I40E_PPRS_PCKT_CFG_LS_FORCE_MASK I40E_MASK(0x1, I40E_PPRS_PCKT_CFG_LS_FORCE_SHIFT)
+#define I40E_PPRS_PCKT_CFG_LS_BYPASS_SHIFT 4
+#define I40E_PPRS_PCKT_CFG_LS_BYPASS_MASK I40E_MASK(0x1, I40E_PPRS_PCKT_CFG_LS_BYPASS_SHIFT)
+#define I40E_PPRS_PCKT_CFG_MASK_INT_SHIFT 5
+#define I40E_PPRS_PCKT_CFG_MASK_INT_MASK I40E_MASK(0x1, I40E_PPRS_PCKT_CFG_MASK_INT_SHIFT)
+#define I40E_PPRS_PCKT_CFG_FIX_CNT_SHIFT 8
+#define I40E_PPRS_PCKT_CFG_FIX_CNT_MASK I40E_MASK(0x1, I40E_PPRS_PCKT_CFG_FIX_CNT_SHIFT)
+#define I40E_PPRS_PCKT_CFG_ERR_CNT_SHIFT 9
+#define I40E_PPRS_PCKT_CFG_ERR_CNT_MASK I40E_MASK(0x1, I40E_PPRS_PCKT_CFG_ERR_CNT_SHIFT)
+#define I40E_PPRS_PCKT_CFG_RME_SHIFT 12
+#define I40E_PPRS_PCKT_CFG_RME_MASK I40E_MASK(0x1, I40E_PPRS_PCKT_CFG_RME_SHIFT)
+#define I40E_PPRS_PCKT_CFG_RM_SHIFT 16
+#define I40E_PPRS_PCKT_CFG_RM_MASK I40E_MASK(0xF, I40E_PPRS_PCKT_CFG_RM_SHIFT)
+
+#define I40E_PPRS_PCKT_STATUS 0x00085B20 /* Reset: POR */
+#define I40E_PPRS_PCKT_STATUS_ECC_ERR_SHIFT 0
+#define I40E_PPRS_PCKT_STATUS_ECC_ERR_MASK I40E_MASK(0x1, I40E_PPRS_PCKT_STATUS_ECC_ERR_SHIFT)
+#define I40E_PPRS_PCKT_STATUS_ECC_FIX_SHIFT 1
+#define I40E_PPRS_PCKT_STATUS_ECC_FIX_MASK I40E_MASK(0x1, I40E_PPRS_PCKT_STATUS_ECC_FIX_SHIFT)
+#define I40E_PPRS_PCKT_STATUS_INIT_DONE_SHIFT 2
+#define I40E_PPRS_PCKT_STATUS_INIT_DONE_MASK I40E_MASK(0x1, I40E_PPRS_PCKT_STATUS_INIT_DONE_SHIFT)
+#define I40E_PPRS_PCKT_STATUS_GLOBAL_INIT_DONE_SHIFT 3
+#define I40E_PPRS_PCKT_STATUS_GLOBAL_INIT_DONE_MASK I40E_MASK(0x1, I40E_PPRS_PCKT_STATUS_GLOBAL_INIT_DONE_SHIFT)
+
+#define I40E_PPRS_RECIPE_CFG 0x00085B40 /* Reset: POR */
+#define I40E_PPRS_RECIPE_CFG_ECC_EN_SHIFT 0
+#define I40E_PPRS_RECIPE_CFG_ECC_EN_MASK I40E_MASK(0x1, I40E_PPRS_RECIPE_CFG_ECC_EN_SHIFT)
+#define I40E_PPRS_RECIPE_CFG_ECC_INVERT_1_SHIFT 1
+#define I40E_PPRS_RECIPE_CFG_ECC_INVERT_1_MASK I40E_MASK(0x1, I40E_PPRS_RECIPE_CFG_ECC_INVERT_1_SHIFT)
+#define I40E_PPRS_RECIPE_CFG_ECC_INVERT_2_SHIFT 2
+#define I40E_PPRS_RECIPE_CFG_ECC_INVERT_2_MASK I40E_MASK(0x1, I40E_PPRS_RECIPE_CFG_ECC_INVERT_2_SHIFT)
+#define I40E_PPRS_RECIPE_CFG_LS_FORCE_SHIFT 3
+#define I40E_PPRS_RECIPE_CFG_LS_FORCE_MASK I40E_MASK(0x1, I40E_PPRS_RECIPE_CFG_LS_FORCE_SHIFT)
+#define I40E_PPRS_RECIPE_CFG_LS_BYPASS_SHIFT 4
+#define I40E_PPRS_RECIPE_CFG_LS_BYPASS_MASK I40E_MASK(0x1, I40E_PPRS_RECIPE_CFG_LS_BYPASS_SHIFT)
+#define I40E_PPRS_RECIPE_CFG_MASK_INT_SHIFT 5
+#define I40E_PPRS_RECIPE_CFG_MASK_INT_MASK I40E_MASK(0x1, I40E_PPRS_RECIPE_CFG_MASK_INT_SHIFT)
+#define I40E_PPRS_RECIPE_CFG_FIX_CNT_SHIFT 8
+#define I40E_PPRS_RECIPE_CFG_FIX_CNT_MASK I40E_MASK(0x1, I40E_PPRS_RECIPE_CFG_FIX_CNT_SHIFT)
+#define I40E_PPRS_RECIPE_CFG_ERR_CNT_SHIFT 9
+#define I40E_PPRS_RECIPE_CFG_ERR_CNT_MASK I40E_MASK(0x1, I40E_PPRS_RECIPE_CFG_ERR_CNT_SHIFT)
+#define I40E_PPRS_RECIPE_CFG_RME_SHIFT 12
+#define I40E_PPRS_RECIPE_CFG_RME_MASK I40E_MASK(0x1, I40E_PPRS_RECIPE_CFG_RME_SHIFT)
+#define I40E_PPRS_RECIPE_CFG_RM_SHIFT 16
+#define I40E_PPRS_RECIPE_CFG_RM_MASK I40E_MASK(0xF, I40E_PPRS_RECIPE_CFG_RM_SHIFT)
+
+#define I40E_PPRS_RECIPE_STATUS 0x00085B60 /* Reset: POR */
+#define I40E_PPRS_RECIPE_STATUS_ECC_ERR_SHIFT 0
+#define I40E_PPRS_RECIPE_STATUS_ECC_ERR_MASK I40E_MASK(0x1, I40E_PPRS_RECIPE_STATUS_ECC_ERR_SHIFT)
+#define I40E_PPRS_RECIPE_STATUS_ECC_FIX_SHIFT 1
+#define I40E_PPRS_RECIPE_STATUS_ECC_FIX_MASK I40E_MASK(0x1, I40E_PPRS_RECIPE_STATUS_ECC_FIX_SHIFT)
+#define I40E_PPRS_RECIPE_STATUS_INIT_DONE_SHIFT 2
+#define I40E_PPRS_RECIPE_STATUS_INIT_DONE_MASK I40E_MASK(0x1, I40E_PPRS_RECIPE_STATUS_INIT_DONE_SHIFT)
+#define I40E_PPRS_RECIPE_STATUS_GLOBAL_INIT_DONE_SHIFT 3
+#define I40E_PPRS_RECIPE_STATUS_GLOBAL_INIT_DONE_MASK I40E_MASK(0x1, I40E_PPRS_RECIPE_STATUS_GLOBAL_INIT_DONE_SHIFT)
+
+#define I40E_PRT_PPRS_CTRL 0x00086000 /* Reset: CORER */
+#define I40E_PRT_PPRS_CTRL_HDR_VLD_SEL_SHIFT 0
+#define I40E_PRT_PPRS_CTRL_HDR_VLD_SEL_MASK I40E_MASK(0x3, I40E_PRT_PPRS_CTRL_HDR_VLD_SEL_SHIFT)
+#define I40E_PRT_PPRS_CTRL_STOP_ANA_DIS_SHIFT 2
+#define I40E_PRT_PPRS_CTRL_STOP_ANA_DIS_MASK I40E_MASK(0x1, I40E_PRT_PPRS_CTRL_STOP_ANA_DIS_SHIFT)
+#define I40E_PRT_PPRS_CTRL_PRE_LY2_L2_EN_SHIFT 3
+#define I40E_PRT_PPRS_CTRL_PRE_LY2_L2_EN_MASK I40E_MASK(0x1, I40E_PRT_PPRS_CTRL_PRE_LY2_L2_EN_SHIFT)
+#define I40E_PRT_PPRS_CTRL_PRE_LY2_L3_EN_SHIFT 4
+#define I40E_PRT_PPRS_CTRL_PRE_LY2_L3_EN_MASK I40E_MASK(0x1, I40E_PRT_PPRS_CTRL_PRE_LY2_L3_EN_SHIFT)
+#define I40E_PRT_PPRS_CTRL_SPARE_27B_SHIFT 5
+#define I40E_PRT_PPRS_CTRL_SPARE_27B_MASK I40E_MASK(0x7FFFFFF, I40E_PRT_PPRS_CTRL_SPARE_27B_SHIFT)
+
+#define I40E_PRT_PPRS_DEFUALT_RECIPE_PTR 0x00086040 /* Reset: CORER */
+#define I40E_PRT_PPRS_DEFUALT_RECIPE_PTR_DEFUALT_RECIPE_PTR_SHIFT 0
+#define I40E_PRT_PPRS_DEFUALT_RECIPE_PTR_DEFUALT_RECIPE_PTR_MASK I40E_MASK(0x3FFFFF, I40E_PRT_PPRS_DEFUALT_RECIPE_PTR_DEFUALT_RECIPE_PTR_SHIFT)
+
+#define I40E_PRT_PPRS_DONE_CNT 0x00087020 /* Reset: CORER */
+#define I40E_PRT_PPRS_DONE_CNT_LY3_DONE_CNT_SHIFT 0
+#define I40E_PRT_PPRS_DONE_CNT_LY3_DONE_CNT_MASK I40E_MASK(0xFFFF, I40E_PRT_PPRS_DONE_CNT_LY3_DONE_CNT_SHIFT)
+#define I40E_PRT_PPRS_DONE_CNT_LY2_DONE_CNT_SHIFT 16
+#define I40E_PRT_PPRS_DONE_CNT_LY2_DONE_CNT_MASK I40E_MASK(0xFFFF, I40E_PRT_PPRS_DONE_CNT_LY2_DONE_CNT_SHIFT)
+
+#define I40E_PRT_PPRS_DROP_CNT 0x00087000 /* Reset: CORER */
+#define I40E_PRT_PPRS_DROP_CNT_PRT_PPRS_DROP_CNT_SHIFT 0
+#define I40E_PRT_PPRS_DROP_CNT_PRT_PPRS_DROP_CNT_MASK I40E_MASK(0xFFFF, I40E_PRT_PPRS_DROP_CNT_PRT_PPRS_DROP_CNT_SHIFT)
+
+#define I40E_PRT_PPRS_HDR_VLD_PCTYPE_EN 0x00086060 /* Reset: CORER */
+#define I40E_PRT_PPRS_HDR_VLD_PCTYPE_EN_HDR_VLD_PCTYPE_EN_SHIFT 0
+#define I40E_PRT_PPRS_HDR_VLD_PCTYPE_EN_HDR_VLD_PCTYPE_EN_MASK I40E_MASK(0xFFFF, I40E_PRT_PPRS_HDR_VLD_PCTYPE_EN_HDR_VLD_PCTYPE_EN_SHIFT)
+
+#define I40E_PRT_PPRS_NOT_PARSE_CNT 0x00087040 /* Reset: CORER */
+#define I40E_PRT_PPRS_NOT_PARSE_CNT_STOP_ANA_CNT_SHIFT 0
+#define I40E_PRT_PPRS_NOT_PARSE_CNT_STOP_ANA_CNT_MASK I40E_MASK(0xFFFF, I40E_PRT_PPRS_NOT_PARSE_CNT_STOP_ANA_CNT_SHIFT)
+#define I40E_PRT_PPRS_NOT_PARSE_CNT_ABORT_CNT_SHIFT 16
+#define I40E_PRT_PPRS_NOT_PARSE_CNT_ABORT_CNT_MASK I40E_MASK(0xFFFF, I40E_PRT_PPRS_NOT_PARSE_CNT_ABORT_CNT_SHIFT)
+
+#define I40E_PRT_PPRS_PERF_BUF 0x00086020 /* Reset: CORER */
+#define I40E_PRT_PPRS_PERF_BUF_HI_TRESH_SHIFT 0
+#define I40E_PRT_PPRS_PERF_BUF_HI_TRESH_MASK I40E_MASK(0x3F, I40E_PRT_PPRS_PERF_BUF_HI_TRESH_SHIFT)
+#define I40E_PRT_PPRS_PERF_BUF_LOW_TRESH_SHIFT 16
+#define I40E_PRT_PPRS_PERF_BUF_LOW_TRESH_MASK I40E_MASK(0x3F, I40E_PRT_PPRS_PERF_BUF_LOW_TRESH_SHIFT)
+
+#define I40E_PRT_PPRS_PKTS_CNT 0x00087060 /* Reset: CORER */
+#define I40E_PRT_PPRS_PKTS_CNT_RPB_IF_CNT_SHIFT 0
+#define I40E_PRT_PPRS_PKTS_CNT_RPB_IF_CNT_MASK I40E_MASK(0xFFFF, I40E_PRT_PPRS_PKTS_CNT_RPB_IF_CNT_SHIFT)
+#define I40E_PRT_PPRS_PKTS_CNT_MAC_IF_CNT_SHIFT 16
+#define I40E_PRT_PPRS_PKTS_CNT_MAC_IF_CNT_MASK I40E_MASK(0xFFFF, I40E_PRT_PPRS_PKTS_CNT_MAC_IF_CNT_SHIFT)
+
+#define I40E_PRT_SWR_PM_THR 0x0026CD00 /* Reset: CORER */
+#define I40E_PRT_SWR_PM_THR_THRESHOLD_SHIFT 0
+#define I40E_PRT_SWR_PM_THR_THRESHOLD_MASK I40E_MASK(0xFF, I40E_PRT_SWR_PM_THR_THRESHOLD_SHIFT)
+
+#define I40E_RCB_CHUNK_DATA_CFG 0x00122644 /* Reset: POR */
+#define I40E_RCB_CHUNK_DATA_CFG_ECC_EN_SHIFT 0
+#define I40E_RCB_CHUNK_DATA_CFG_ECC_EN_MASK I40E_MASK(0x1, I40E_RCB_CHUNK_DATA_CFG_ECC_EN_SHIFT)
+#define I40E_RCB_CHUNK_DATA_CFG_ECC_INVERT_1_SHIFT 1
+#define I40E_RCB_CHUNK_DATA_CFG_ECC_INVERT_1_MASK I40E_MASK(0x1, I40E_RCB_CHUNK_DATA_CFG_ECC_INVERT_1_SHIFT)
+#define I40E_RCB_CHUNK_DATA_CFG_ECC_INVERT_2_SHIFT 2
+#define I40E_RCB_CHUNK_DATA_CFG_ECC_INVERT_2_MASK I40E_MASK(0x1, I40E_RCB_CHUNK_DATA_CFG_ECC_INVERT_2_SHIFT)
+#define I40E_RCB_CHUNK_DATA_CFG_LS_FORCE_SHIFT 3
+#define I40E_RCB_CHUNK_DATA_CFG_LS_FORCE_MASK I40E_MASK(0x1, I40E_RCB_CHUNK_DATA_CFG_LS_FORCE_SHIFT)
+#define I40E_RCB_CHUNK_DATA_CFG_LS_BYPASS_SHIFT 4
+#define I40E_RCB_CHUNK_DATA_CFG_LS_BYPASS_MASK I40E_MASK(0x1, I40E_RCB_CHUNK_DATA_CFG_LS_BYPASS_SHIFT)
+#define I40E_RCB_CHUNK_DATA_CFG_MASK_INT_SHIFT 5
+#define I40E_RCB_CHUNK_DATA_CFG_MASK_INT_MASK I40E_MASK(0x1, I40E_RCB_CHUNK_DATA_CFG_MASK_INT_SHIFT)
+#define I40E_RCB_CHUNK_DATA_CFG_FIX_CNT_SHIFT 8
+#define I40E_RCB_CHUNK_DATA_CFG_FIX_CNT_MASK I40E_MASK(0x1, I40E_RCB_CHUNK_DATA_CFG_FIX_CNT_SHIFT)
+#define I40E_RCB_CHUNK_DATA_CFG_ERR_CNT_SHIFT 9
+#define I40E_RCB_CHUNK_DATA_CFG_ERR_CNT_MASK I40E_MASK(0x1, I40E_RCB_CHUNK_DATA_CFG_ERR_CNT_SHIFT)
+#define I40E_RCB_CHUNK_DATA_CFG_RME_SHIFT 12
+#define I40E_RCB_CHUNK_DATA_CFG_RME_MASK I40E_MASK(0x1, I40E_RCB_CHUNK_DATA_CFG_RME_SHIFT)
+#define I40E_RCB_CHUNK_DATA_CFG_RM_SHIFT 16
+#define I40E_RCB_CHUNK_DATA_CFG_RM_MASK I40E_MASK(0xF, I40E_RCB_CHUNK_DATA_CFG_RM_SHIFT)
+
+#define I40E_RCB_CHUNK_DATA_STATUS 0x00122648 /* Reset: POR */
+#define I40E_RCB_CHUNK_DATA_STATUS_ECC_ERR_SHIFT 0
+#define I40E_RCB_CHUNK_DATA_STATUS_ECC_ERR_MASK I40E_MASK(0x1, I40E_RCB_CHUNK_DATA_STATUS_ECC_ERR_SHIFT)
+#define I40E_RCB_CHUNK_DATA_STATUS_ECC_FIX_SHIFT 1
+#define I40E_RCB_CHUNK_DATA_STATUS_ECC_FIX_MASK I40E_MASK(0x1, I40E_RCB_CHUNK_DATA_STATUS_ECC_FIX_SHIFT)
+#define I40E_RCB_CHUNK_DATA_STATUS_INIT_DONE_SHIFT 2
+#define I40E_RCB_CHUNK_DATA_STATUS_INIT_DONE_MASK I40E_MASK(0x1, I40E_RCB_CHUNK_DATA_STATUS_INIT_DONE_SHIFT)
+#define I40E_RCB_CHUNK_DATA_STATUS_GLOBAL_INIT_DONE_SHIFT 3
+#define I40E_RCB_CHUNK_DATA_STATUS_GLOBAL_INIT_DONE_MASK I40E_MASK(0x1, I40E_RCB_CHUNK_DATA_STATUS_GLOBAL_INIT_DONE_SHIFT)
+
+#define I40E_RCB_ECC_COR_ERR 0x00122668 /* Reset: POR */
+#define I40E_RCB_ECC_COR_ERR_CNT_SHIFT 0
+#define I40E_RCB_ECC_COR_ERR_CNT_MASK I40E_MASK(0xFFF, I40E_RCB_ECC_COR_ERR_CNT_SHIFT)
+
+#define I40E_RCB_ECC_UNCOR_ERR 0x00122664 /* Reset: POR */
+#define I40E_RCB_ECC_UNCOR_ERR_CNT_SHIFT 0
+#define I40E_RCB_ECC_UNCOR_ERR_CNT_MASK I40E_MASK(0xFFF, I40E_RCB_ECC_UNCOR_ERR_CNT_SHIFT)
+
+#define I40E_RCB_HEAD_CACHE_CFG 0x0012264C /* Reset: POR */
+#define I40E_RCB_HEAD_CACHE_CFG_ECC_EN_SHIFT 0
+#define I40E_RCB_HEAD_CACHE_CFG_ECC_EN_MASK I40E_MASK(0x1, I40E_RCB_HEAD_CACHE_CFG_ECC_EN_SHIFT)
+#define I40E_RCB_HEAD_CACHE_CFG_ECC_INVERT_1_SHIFT 1
+#define I40E_RCB_HEAD_CACHE_CFG_ECC_INVERT_1_MASK I40E_MASK(0x1, I40E_RCB_HEAD_CACHE_CFG_ECC_INVERT_1_SHIFT)
+#define I40E_RCB_HEAD_CACHE_CFG_ECC_INVERT_2_SHIFT 2
+#define I40E_RCB_HEAD_CACHE_CFG_ECC_INVERT_2_MASK I40E_MASK(0x1, I40E_RCB_HEAD_CACHE_CFG_ECC_INVERT_2_SHIFT)
+#define I40E_RCB_HEAD_CACHE_CFG_LS_FORCE_SHIFT 3
+#define I40E_RCB_HEAD_CACHE_CFG_LS_FORCE_MASK I40E_MASK(0x1, I40E_RCB_HEAD_CACHE_CFG_LS_FORCE_SHIFT)
+#define I40E_RCB_HEAD_CACHE_CFG_LS_BYPASS_SHIFT 4
+#define I40E_RCB_HEAD_CACHE_CFG_LS_BYPASS_MASK I40E_MASK(0x1, I40E_RCB_HEAD_CACHE_CFG_LS_BYPASS_SHIFT)
+#define I40E_RCB_HEAD_CACHE_CFG_MASK_INT_SHIFT 5
+#define I40E_RCB_HEAD_CACHE_CFG_MASK_INT_MASK I40E_MASK(0x1, I40E_RCB_HEAD_CACHE_CFG_MASK_INT_SHIFT)
+#define I40E_RCB_HEAD_CACHE_CFG_FIX_CNT_SHIFT 8
+#define I40E_RCB_HEAD_CACHE_CFG_FIX_CNT_MASK I40E_MASK(0x1, I40E_RCB_HEAD_CACHE_CFG_FIX_CNT_SHIFT)
+#define I40E_RCB_HEAD_CACHE_CFG_ERR_CNT_SHIFT 9
+#define I40E_RCB_HEAD_CACHE_CFG_ERR_CNT_MASK I40E_MASK(0x1, I40E_RCB_HEAD_CACHE_CFG_ERR_CNT_SHIFT)
+#define I40E_RCB_HEAD_CACHE_CFG_RME_SHIFT 12
+#define I40E_RCB_HEAD_CACHE_CFG_RME_MASK I40E_MASK(0x1, I40E_RCB_HEAD_CACHE_CFG_RME_SHIFT)
+#define I40E_RCB_HEAD_CACHE_CFG_RM_SHIFT 16
+#define I40E_RCB_HEAD_CACHE_CFG_RM_MASK I40E_MASK(0xF, I40E_RCB_HEAD_CACHE_CFG_RM_SHIFT)
+
+#define I40E_RCB_HEAD_CACHE_STATUS 0x00122650 /* Reset: POR */
+#define I40E_RCB_HEAD_CACHE_STATUS_ECC_ERR_SHIFT 0
+#define I40E_RCB_HEAD_CACHE_STATUS_ECC_ERR_MASK I40E_MASK(0x1, I40E_RCB_HEAD_CACHE_STATUS_ECC_ERR_SHIFT)
+#define I40E_RCB_HEAD_CACHE_STATUS_ECC_FIX_SHIFT 1
+#define I40E_RCB_HEAD_CACHE_STATUS_ECC_FIX_MASK I40E_MASK(0x1, I40E_RCB_HEAD_CACHE_STATUS_ECC_FIX_SHIFT)
+#define I40E_RCB_HEAD_CACHE_STATUS_INIT_DONE_SHIFT 2
+#define I40E_RCB_HEAD_CACHE_STATUS_INIT_DONE_MASK I40E_MASK(0x1, I40E_RCB_HEAD_CACHE_STATUS_INIT_DONE_SHIFT)
+#define I40E_RCB_HEAD_CACHE_STATUS_GLOBAL_INIT_DONE_SHIFT 3
+#define I40E_RCB_HEAD_CACHE_STATUS_GLOBAL_INIT_DONE_MASK I40E_MASK(0x1, I40E_RCB_HEAD_CACHE_STATUS_GLOBAL_INIT_DONE_SHIFT)
+
+#define I40E_RCB_INPUT_FIFO_CFG 0x0012265C /* Reset: POR */
+#define I40E_RCB_INPUT_FIFO_CFG_ECC_EN_SHIFT 0
+#define I40E_RCB_INPUT_FIFO_CFG_ECC_EN_MASK I40E_MASK(0x1, I40E_RCB_INPUT_FIFO_CFG_ECC_EN_SHIFT)
+#define I40E_RCB_INPUT_FIFO_CFG_ECC_INVERT_1_SHIFT 1
+#define I40E_RCB_INPUT_FIFO_CFG_ECC_INVERT_1_MASK I40E_MASK(0x1, I40E_RCB_INPUT_FIFO_CFG_ECC_INVERT_1_SHIFT)
+#define I40E_RCB_INPUT_FIFO_CFG_ECC_INVERT_2_SHIFT 2
+#define I40E_RCB_INPUT_FIFO_CFG_ECC_INVERT_2_MASK I40E_MASK(0x1, I40E_RCB_INPUT_FIFO_CFG_ECC_INVERT_2_SHIFT)
+#define I40E_RCB_INPUT_FIFO_CFG_LS_FORCE_SHIFT 3
+#define I40E_RCB_INPUT_FIFO_CFG_LS_FORCE_MASK I40E_MASK(0x1, I40E_RCB_INPUT_FIFO_CFG_LS_FORCE_SHIFT)
+#define I40E_RCB_INPUT_FIFO_CFG_LS_BYPASS_SHIFT 4
+#define I40E_RCB_INPUT_FIFO_CFG_LS_BYPASS_MASK I40E_MASK(0x1, I40E_RCB_INPUT_FIFO_CFG_LS_BYPASS_SHIFT)
+#define I40E_RCB_INPUT_FIFO_CFG_MASK_INT_SHIFT 5
+#define I40E_RCB_INPUT_FIFO_CFG_MASK_INT_MASK I40E_MASK(0x1, I40E_RCB_INPUT_FIFO_CFG_MASK_INT_SHIFT)
+#define I40E_RCB_INPUT_FIFO_CFG_FIX_CNT_SHIFT 8
+#define I40E_RCB_INPUT_FIFO_CFG_FIX_CNT_MASK I40E_MASK(0x1, I40E_RCB_INPUT_FIFO_CFG_FIX_CNT_SHIFT)
+#define I40E_RCB_INPUT_FIFO_CFG_ERR_CNT_SHIFT 9
+#define I40E_RCB_INPUT_FIFO_CFG_ERR_CNT_MASK I40E_MASK(0x1, I40E_RCB_INPUT_FIFO_CFG_ERR_CNT_SHIFT)
+#define I40E_RCB_INPUT_FIFO_CFG_RME_SHIFT 12
+#define I40E_RCB_INPUT_FIFO_CFG_RME_MASK I40E_MASK(0x1, I40E_RCB_INPUT_FIFO_CFG_RME_SHIFT)
+#define I40E_RCB_INPUT_FIFO_CFG_RM_SHIFT 16
+#define I40E_RCB_INPUT_FIFO_CFG_RM_MASK I40E_MASK(0xF, I40E_RCB_INPUT_FIFO_CFG_RM_SHIFT)
+
+#define I40E_RCB_INPUT_FIFO_STATUS 0x00122660 /* Reset: POR */
+#define I40E_RCB_INPUT_FIFO_STATUS_ECC_ERR_SHIFT 0
+#define I40E_RCB_INPUT_FIFO_STATUS_ECC_ERR_MASK I40E_MASK(0x1, I40E_RCB_INPUT_FIFO_STATUS_ECC_ERR_SHIFT)
+#define I40E_RCB_INPUT_FIFO_STATUS_ECC_FIX_SHIFT 1
+#define I40E_RCB_INPUT_FIFO_STATUS_ECC_FIX_MASK I40E_MASK(0x1, I40E_RCB_INPUT_FIFO_STATUS_ECC_FIX_SHIFT)
+#define I40E_RCB_INPUT_FIFO_STATUS_INIT_DONE_SHIFT 2
+#define I40E_RCB_INPUT_FIFO_STATUS_INIT_DONE_MASK I40E_MASK(0x1, I40E_RCB_INPUT_FIFO_STATUS_INIT_DONE_SHIFT)
+#define I40E_RCB_INPUT_FIFO_STATUS_GLOBAL_INIT_DONE_SHIFT 3
+#define I40E_RCB_INPUT_FIFO_STATUS_GLOBAL_INIT_DONE_MASK I40E_MASK(0x1, I40E_RCB_INPUT_FIFO_STATUS_GLOBAL_INIT_DONE_SHIFT)
+
+#define I40E_RCB_LL_CFG 0x00122654 /* Reset: POR */
+#define I40E_RCB_LL_CFG_ECC_EN_SHIFT 0
+#define I40E_RCB_LL_CFG_ECC_EN_MASK I40E_MASK(0x1, I40E_RCB_LL_CFG_ECC_EN_SHIFT)
+#define I40E_RCB_LL_CFG_ECC_INVERT_1_SHIFT 1
+#define I40E_RCB_LL_CFG_ECC_INVERT_1_MASK I40E_MASK(0x1, I40E_RCB_LL_CFG_ECC_INVERT_1_SHIFT)
+#define I40E_RCB_LL_CFG_ECC_INVERT_2_SHIFT 2
+#define I40E_RCB_LL_CFG_ECC_INVERT_2_MASK I40E_MASK(0x1, I40E_RCB_LL_CFG_ECC_INVERT_2_SHIFT)
+#define I40E_RCB_LL_CFG_LS_FORCE_SHIFT 3
+#define I40E_RCB_LL_CFG_LS_FORCE_MASK I40E_MASK(0x1, I40E_RCB_LL_CFG_LS_FORCE_SHIFT)
+#define I40E_RCB_LL_CFG_LS_BYPASS_SHIFT 4
+#define I40E_RCB_LL_CFG_LS_BYPASS_MASK I40E_MASK(0x1, I40E_RCB_LL_CFG_LS_BYPASS_SHIFT)
+#define I40E_RCB_LL_CFG_MASK_INT_SHIFT 5
+#define I40E_RCB_LL_CFG_MASK_INT_MASK I40E_MASK(0x1, I40E_RCB_LL_CFG_MASK_INT_SHIFT)
+#define I40E_RCB_LL_CFG_FIX_CNT_SHIFT 8
+#define I40E_RCB_LL_CFG_FIX_CNT_MASK I40E_MASK(0x1, I40E_RCB_LL_CFG_FIX_CNT_SHIFT)
+#define I40E_RCB_LL_CFG_ERR_CNT_SHIFT 9
+#define I40E_RCB_LL_CFG_ERR_CNT_MASK I40E_MASK(0x1, I40E_RCB_LL_CFG_ERR_CNT_SHIFT)
+#define I40E_RCB_LL_CFG_RME_SHIFT 12
+#define I40E_RCB_LL_CFG_RME_MASK I40E_MASK(0x1, I40E_RCB_LL_CFG_RME_SHIFT)
+#define I40E_RCB_LL_CFG_RM_SHIFT 16
+#define I40E_RCB_LL_CFG_RM_MASK I40E_MASK(0xF, I40E_RCB_LL_CFG_RM_SHIFT)
+
+#define I40E_RCB_LL_STATUS 0x00122658 /* Reset: POR */
+#define I40E_RCB_LL_STATUS_ECC_ERR_SHIFT 0
+#define I40E_RCB_LL_STATUS_ECC_ERR_MASK I40E_MASK(0x1, I40E_RCB_LL_STATUS_ECC_ERR_SHIFT)
+#define I40E_RCB_LL_STATUS_ECC_FIX_SHIFT 1
+#define I40E_RCB_LL_STATUS_ECC_FIX_MASK I40E_MASK(0x1, I40E_RCB_LL_STATUS_ECC_FIX_SHIFT)
+#define I40E_RCB_LL_STATUS_INIT_DONE_SHIFT 2
+#define I40E_RCB_LL_STATUS_INIT_DONE_MASK I40E_MASK(0x1, I40E_RCB_LL_STATUS_INIT_DONE_SHIFT)
+#define I40E_RCB_LL_STATUS_GLOBAL_INIT_DONE_SHIFT 3
+#define I40E_RCB_LL_STATUS_GLOBAL_INIT_DONE_MASK I40E_MASK(0x1, I40E_RCB_LL_STATUS_GLOBAL_INIT_DONE_SHIFT)
+
+#define I40E_RCU_DP_MTG_MFIFO_CFG 0x00269B34 /* Reset: POR */
+#define I40E_RCU_DP_MTG_MFIFO_CFG_ECC_EN_SHIFT 0
+#define I40E_RCU_DP_MTG_MFIFO_CFG_ECC_EN_MASK I40E_MASK(0x1, I40E_RCU_DP_MTG_MFIFO_CFG_ECC_EN_SHIFT)
+#define I40E_RCU_DP_MTG_MFIFO_CFG_ECC_INVERT_1_SHIFT 1
+#define I40E_RCU_DP_MTG_MFIFO_CFG_ECC_INVERT_1_MASK I40E_MASK(0x1, I40E_RCU_DP_MTG_MFIFO_CFG_ECC_INVERT_1_SHIFT)
+#define I40E_RCU_DP_MTG_MFIFO_CFG_ECC_INVERT_2_SHIFT 2
+#define I40E_RCU_DP_MTG_MFIFO_CFG_ECC_INVERT_2_MASK I40E_MASK(0x1, I40E_RCU_DP_MTG_MFIFO_CFG_ECC_INVERT_2_SHIFT)
+#define I40E_RCU_DP_MTG_MFIFO_CFG_LS_FORCE_SHIFT 3
+#define I40E_RCU_DP_MTG_MFIFO_CFG_LS_FORCE_MASK I40E_MASK(0x1, I40E_RCU_DP_MTG_MFIFO_CFG_LS_FORCE_SHIFT)
+#define I40E_RCU_DP_MTG_MFIFO_CFG_LS_BYPASS_SHIFT 4
+#define I40E_RCU_DP_MTG_MFIFO_CFG_LS_BYPASS_MASK I40E_MASK(0x1, I40E_RCU_DP_MTG_MFIFO_CFG_LS_BYPASS_SHIFT)
+#define I40E_RCU_DP_MTG_MFIFO_CFG_MASK_INT_SHIFT 5
+#define I40E_RCU_DP_MTG_MFIFO_CFG_MASK_INT_MASK I40E_MASK(0x1, I40E_RCU_DP_MTG_MFIFO_CFG_MASK_INT_SHIFT)
+#define I40E_RCU_DP_MTG_MFIFO_CFG_FIX_CNT_SHIFT 8
+#define I40E_RCU_DP_MTG_MFIFO_CFG_FIX_CNT_MASK I40E_MASK(0x1, I40E_RCU_DP_MTG_MFIFO_CFG_FIX_CNT_SHIFT)
+#define I40E_RCU_DP_MTG_MFIFO_CFG_ERR_CNT_SHIFT 9
+#define I40E_RCU_DP_MTG_MFIFO_CFG_ERR_CNT_MASK I40E_MASK(0x1, I40E_RCU_DP_MTG_MFIFO_CFG_ERR_CNT_SHIFT)
+#define I40E_RCU_DP_MTG_MFIFO_CFG_RME_SHIFT 12
+#define I40E_RCU_DP_MTG_MFIFO_CFG_RME_MASK I40E_MASK(0x1, I40E_RCU_DP_MTG_MFIFO_CFG_RME_SHIFT)
+#define I40E_RCU_DP_MTG_MFIFO_CFG_RM_SHIFT 16
+#define I40E_RCU_DP_MTG_MFIFO_CFG_RM_MASK I40E_MASK(0xF, I40E_RCU_DP_MTG_MFIFO_CFG_RM_SHIFT)
+
+#define I40E_RCU_DP_MTG_MFIFO_STATUS 0x00269B3C /* Reset: POR */
+#define I40E_RCU_DP_MTG_MFIFO_STATUS_ECC_ERR_SHIFT 0
+#define I40E_RCU_DP_MTG_MFIFO_STATUS_ECC_ERR_MASK I40E_MASK(0x1, I40E_RCU_DP_MTG_MFIFO_STATUS_ECC_ERR_SHIFT)
+#define I40E_RCU_DP_MTG_MFIFO_STATUS_ECC_FIX_SHIFT 1
+#define I40E_RCU_DP_MTG_MFIFO_STATUS_ECC_FIX_MASK I40E_MASK(0x1, I40E_RCU_DP_MTG_MFIFO_STATUS_ECC_FIX_SHIFT)
+#define I40E_RCU_DP_MTG_MFIFO_STATUS_INIT_DONE_SHIFT 2
+#define I40E_RCU_DP_MTG_MFIFO_STATUS_INIT_DONE_MASK I40E_MASK(0x1, I40E_RCU_DP_MTG_MFIFO_STATUS_INIT_DONE_SHIFT)
+#define I40E_RCU_DP_MTG_MFIFO_STATUS_GLOBAL_INIT_DONE_SHIFT 3
+#define I40E_RCU_DP_MTG_MFIFO_STATUS_GLOBAL_INIT_DONE_MASK I40E_MASK(0x1, I40E_RCU_DP_MTG_MFIFO_STATUS_GLOBAL_INIT_DONE_SHIFT)
+
+#define I40E_RCU_DP_SWR_REP_MFIFO_CFG 0x00269B44 /* Reset: POR */
+#define I40E_RCU_DP_SWR_REP_MFIFO_CFG_ECC_EN_SHIFT 0
+#define I40E_RCU_DP_SWR_REP_MFIFO_CFG_ECC_EN_MASK I40E_MASK(0x1, I40E_RCU_DP_SWR_REP_MFIFO_CFG_ECC_EN_SHIFT)
+#define I40E_RCU_DP_SWR_REP_MFIFO_CFG_ECC_INVERT_1_SHIFT 1
+#define I40E_RCU_DP_SWR_REP_MFIFO_CFG_ECC_INVERT_1_MASK I40E_MASK(0x1, I40E_RCU_DP_SWR_REP_MFIFO_CFG_ECC_INVERT_1_SHIFT)
+#define I40E_RCU_DP_SWR_REP_MFIFO_CFG_ECC_INVERT_2_SHIFT 2
+#define I40E_RCU_DP_SWR_REP_MFIFO_CFG_ECC_INVERT_2_MASK I40E_MASK(0x1, I40E_RCU_DP_SWR_REP_MFIFO_CFG_ECC_INVERT_2_SHIFT)
+#define I40E_RCU_DP_SWR_REP_MFIFO_CFG_LS_FORCE_SHIFT 3
+#define I40E_RCU_DP_SWR_REP_MFIFO_CFG_LS_FORCE_MASK I40E_MASK(0x1, I40E_RCU_DP_SWR_REP_MFIFO_CFG_LS_FORCE_SHIFT)
+#define I40E_RCU_DP_SWR_REP_MFIFO_CFG_LS_BYPASS_SHIFT 4
+#define I40E_RCU_DP_SWR_REP_MFIFO_CFG_LS_BYPASS_MASK I40E_MASK(0x1, I40E_RCU_DP_SWR_REP_MFIFO_CFG_LS_BYPASS_SHIFT)
+#define I40E_RCU_DP_SWR_REP_MFIFO_CFG_MASK_INT_SHIFT 5
+#define I40E_RCU_DP_SWR_REP_MFIFO_CFG_MASK_INT_MASK I40E_MASK(0x1, I40E_RCU_DP_SWR_REP_MFIFO_CFG_MASK_INT_SHIFT)
+#define I40E_RCU_DP_SWR_REP_MFIFO_CFG_FIX_CNT_SHIFT 8
+#define I40E_RCU_DP_SWR_REP_MFIFO_CFG_FIX_CNT_MASK I40E_MASK(0x1, I40E_RCU_DP_SWR_REP_MFIFO_CFG_FIX_CNT_SHIFT)
+#define I40E_RCU_DP_SWR_REP_MFIFO_CFG_ERR_CNT_SHIFT 9
+#define I40E_RCU_DP_SWR_REP_MFIFO_CFG_ERR_CNT_MASK I40E_MASK(0x1, I40E_RCU_DP_SWR_REP_MFIFO_CFG_ERR_CNT_SHIFT)
+#define I40E_RCU_DP_SWR_REP_MFIFO_CFG_RME_SHIFT 12
+#define I40E_RCU_DP_SWR_REP_MFIFO_CFG_RME_MASK I40E_MASK(0x1, I40E_RCU_DP_SWR_REP_MFIFO_CFG_RME_SHIFT)
+#define I40E_RCU_DP_SWR_REP_MFIFO_CFG_RM_SHIFT 16
+#define I40E_RCU_DP_SWR_REP_MFIFO_CFG_RM_MASK I40E_MASK(0xF, I40E_RCU_DP_SWR_REP_MFIFO_CFG_RM_SHIFT)
+
+#define I40E_RCU_DP_SWR_REP_MFIFO_STATUS 0x00269B4C /* Reset: POR */
+#define I40E_RCU_DP_SWR_REP_MFIFO_STATUS_ECC_ERR_SHIFT 0
+#define I40E_RCU_DP_SWR_REP_MFIFO_STATUS_ECC_ERR_MASK I40E_MASK(0x1, I40E_RCU_DP_SWR_REP_MFIFO_STATUS_ECC_ERR_SHIFT)
+#define I40E_RCU_DP_SWR_REP_MFIFO_STATUS_ECC_FIX_SHIFT 1
+#define I40E_RCU_DP_SWR_REP_MFIFO_STATUS_ECC_FIX_MASK I40E_MASK(0x1, I40E_RCU_DP_SWR_REP_MFIFO_STATUS_ECC_FIX_SHIFT)
+#define I40E_RCU_DP_SWR_REP_MFIFO_STATUS_INIT_DONE_SHIFT 2
+#define I40E_RCU_DP_SWR_REP_MFIFO_STATUS_INIT_DONE_MASK I40E_MASK(0x1, I40E_RCU_DP_SWR_REP_MFIFO_STATUS_INIT_DONE_SHIFT)
+#define I40E_RCU_DP_SWR_REP_MFIFO_STATUS_GLOBAL_INIT_DONE_SHIFT 3
+#define I40E_RCU_DP_SWR_REP_MFIFO_STATUS_GLOBAL_INIT_DONE_MASK I40E_MASK(0x1, I40E_RCU_DP_SWR_REP_MFIFO_STATUS_GLOBAL_INIT_DONE_SHIFT)
+
+#define I40E_RCU_DP_SWR_UP_STATUS_CFG 0x00269AFC /* Reset: POR */
+#define I40E_RCU_DP_SWR_UP_STATUS_CFG_ECC_EN_SHIFT 0
+#define I40E_RCU_DP_SWR_UP_STATUS_CFG_ECC_EN_MASK I40E_MASK(0x1, I40E_RCU_DP_SWR_UP_STATUS_CFG_ECC_EN_SHIFT)
+#define I40E_RCU_DP_SWR_UP_STATUS_CFG_ECC_INVERT_1_SHIFT 1
+#define I40E_RCU_DP_SWR_UP_STATUS_CFG_ECC_INVERT_1_MASK I40E_MASK(0x1, I40E_RCU_DP_SWR_UP_STATUS_CFG_ECC_INVERT_1_SHIFT)
+#define I40E_RCU_DP_SWR_UP_STATUS_CFG_ECC_INVERT_2_SHIFT 2
+#define I40E_RCU_DP_SWR_UP_STATUS_CFG_ECC_INVERT_2_MASK I40E_MASK(0x1, I40E_RCU_DP_SWR_UP_STATUS_CFG_ECC_INVERT_2_SHIFT)
+#define I40E_RCU_DP_SWR_UP_STATUS_CFG_LS_FORCE_SHIFT 3
+#define I40E_RCU_DP_SWR_UP_STATUS_CFG_LS_FORCE_MASK I40E_MASK(0x1, I40E_RCU_DP_SWR_UP_STATUS_CFG_LS_FORCE_SHIFT)
+#define I40E_RCU_DP_SWR_UP_STATUS_CFG_LS_BYPASS_SHIFT 4
+#define I40E_RCU_DP_SWR_UP_STATUS_CFG_LS_BYPASS_MASK I40E_MASK(0x1, I40E_RCU_DP_SWR_UP_STATUS_CFG_LS_BYPASS_SHIFT)
+#define I40E_RCU_DP_SWR_UP_STATUS_CFG_MASK_INT_SHIFT 5
+#define I40E_RCU_DP_SWR_UP_STATUS_CFG_MASK_INT_MASK I40E_MASK(0x1, I40E_RCU_DP_SWR_UP_STATUS_CFG_MASK_INT_SHIFT)
+#define I40E_RCU_DP_SWR_UP_STATUS_CFG_FIX_CNT_SHIFT 8
+#define I40E_RCU_DP_SWR_UP_STATUS_CFG_FIX_CNT_MASK I40E_MASK(0x1, I40E_RCU_DP_SWR_UP_STATUS_CFG_FIX_CNT_SHIFT)
+#define I40E_RCU_DP_SWR_UP_STATUS_CFG_ERR_CNT_SHIFT 9
+#define I40E_RCU_DP_SWR_UP_STATUS_CFG_ERR_CNT_MASK I40E_MASK(0x1, I40E_RCU_DP_SWR_UP_STATUS_CFG_ERR_CNT_SHIFT)
+#define I40E_RCU_DP_SWR_UP_STATUS_CFG_RME_SHIFT 12
+#define I40E_RCU_DP_SWR_UP_STATUS_CFG_RME_MASK I40E_MASK(0x1, I40E_RCU_DP_SWR_UP_STATUS_CFG_RME_SHIFT)
+#define I40E_RCU_DP_SWR_UP_STATUS_CFG_RM_SHIFT 16
+#define I40E_RCU_DP_SWR_UP_STATUS_CFG_RM_MASK I40E_MASK(0xF, I40E_RCU_DP_SWR_UP_STATUS_CFG_RM_SHIFT)
+
+#define I40E_RCU_DP_SWR_UP_STATUS_STATUS 0x00269B0C /* Reset: POR */
+#define I40E_RCU_DP_SWR_UP_STATUS_STATUS_ECC_ERR_SHIFT 0
+#define I40E_RCU_DP_SWR_UP_STATUS_STATUS_ECC_ERR_MASK I40E_MASK(0x1, I40E_RCU_DP_SWR_UP_STATUS_STATUS_ECC_ERR_SHIFT)
+#define I40E_RCU_DP_SWR_UP_STATUS_STATUS_ECC_FIX_SHIFT 1
+#define I40E_RCU_DP_SWR_UP_STATUS_STATUS_ECC_FIX_MASK I40E_MASK(0x1, I40E_RCU_DP_SWR_UP_STATUS_STATUS_ECC_FIX_SHIFT)
+#define I40E_RCU_DP_SWR_UP_STATUS_STATUS_INIT_DONE_SHIFT 2
+#define I40E_RCU_DP_SWR_UP_STATUS_STATUS_INIT_DONE_MASK I40E_MASK(0x1, I40E_RCU_DP_SWR_UP_STATUS_STATUS_INIT_DONE_SHIFT)
+#define I40E_RCU_DP_SWR_UP_STATUS_STATUS_GLOBAL_INIT_DONE_SHIFT 3
+#define I40E_RCU_DP_SWR_UP_STATUS_STATUS_GLOBAL_INIT_DONE_MASK I40E_MASK(0x1, I40E_RCU_DP_SWR_UP_STATUS_STATUS_GLOBAL_INIT_DONE_SHIFT)
+
+#define I40E_RCU_DP_TFIFO_CFG 0x0026CBD4 /* Reset: POR */
+#define I40E_RCU_DP_TFIFO_CFG_ECC_EN_SHIFT 0
+#define I40E_RCU_DP_TFIFO_CFG_ECC_EN_MASK I40E_MASK(0x1, I40E_RCU_DP_TFIFO_CFG_ECC_EN_SHIFT)
+#define I40E_RCU_DP_TFIFO_CFG_ECC_INVERT_1_SHIFT 1
+#define I40E_RCU_DP_TFIFO_CFG_ECC_INVERT_1_MASK I40E_MASK(0x1, I40E_RCU_DP_TFIFO_CFG_ECC_INVERT_1_SHIFT)
+#define I40E_RCU_DP_TFIFO_CFG_ECC_INVERT_2_SHIFT 2
+#define I40E_RCU_DP_TFIFO_CFG_ECC_INVERT_2_MASK I40E_MASK(0x1, I40E_RCU_DP_TFIFO_CFG_ECC_INVERT_2_SHIFT)
+#define I40E_RCU_DP_TFIFO_CFG_LS_FORCE_SHIFT 3
+#define I40E_RCU_DP_TFIFO_CFG_LS_FORCE_MASK I40E_MASK(0x1, I40E_RCU_DP_TFIFO_CFG_LS_FORCE_SHIFT)
+#define I40E_RCU_DP_TFIFO_CFG_LS_BYPASS_SHIFT 4
+#define I40E_RCU_DP_TFIFO_CFG_LS_BYPASS_MASK I40E_MASK(0x1, I40E_RCU_DP_TFIFO_CFG_LS_BYPASS_SHIFT)
+#define I40E_RCU_DP_TFIFO_CFG_MASK_INT_SHIFT 5
+#define I40E_RCU_DP_TFIFO_CFG_MASK_INT_MASK I40E_MASK(0x1, I40E_RCU_DP_TFIFO_CFG_MASK_INT_SHIFT)
+#define I40E_RCU_DP_TFIFO_CFG_FIX_CNT_SHIFT 8
+#define I40E_RCU_DP_TFIFO_CFG_FIX_CNT_MASK I40E_MASK(0x1, I40E_RCU_DP_TFIFO_CFG_FIX_CNT_SHIFT)
+#define I40E_RCU_DP_TFIFO_CFG_ERR_CNT_SHIFT 9
+#define I40E_RCU_DP_TFIFO_CFG_ERR_CNT_MASK I40E_MASK(0x1, I40E_RCU_DP_TFIFO_CFG_ERR_CNT_SHIFT)
+#define I40E_RCU_DP_TFIFO_CFG_RME_SHIFT 12
+#define I40E_RCU_DP_TFIFO_CFG_RME_MASK I40E_MASK(0x1, I40E_RCU_DP_TFIFO_CFG_RME_SHIFT)
+#define I40E_RCU_DP_TFIFO_CFG_RM_SHIFT 16
+#define I40E_RCU_DP_TFIFO_CFG_RM_MASK I40E_MASK(0xF, I40E_RCU_DP_TFIFO_CFG_RM_SHIFT)
+
+#define I40E_RCU_DP_TFIFO_STATUS 0x0026CBE4 /* Reset: POR */
+#define I40E_RCU_DP_TFIFO_STATUS_ECC_ERR_SHIFT 0
+#define I40E_RCU_DP_TFIFO_STATUS_ECC_ERR_MASK I40E_MASK(0x1, I40E_RCU_DP_TFIFO_STATUS_ECC_ERR_SHIFT)
+#define I40E_RCU_DP_TFIFO_STATUS_ECC_FIX_SHIFT 1
+#define I40E_RCU_DP_TFIFO_STATUS_ECC_FIX_MASK I40E_MASK(0x1, I40E_RCU_DP_TFIFO_STATUS_ECC_FIX_SHIFT)
+#define I40E_RCU_DP_TFIFO_STATUS_INIT_DONE_SHIFT 2
+#define I40E_RCU_DP_TFIFO_STATUS_INIT_DONE_MASK I40E_MASK(0x1, I40E_RCU_DP_TFIFO_STATUS_INIT_DONE_SHIFT)
+#define I40E_RCU_DP_TFIFO_STATUS_GLOBAL_INIT_DONE_SHIFT 3
+#define I40E_RCU_DP_TFIFO_STATUS_GLOBAL_INIT_DONE_MASK I40E_MASK(0x1, I40E_RCU_DP_TFIFO_STATUS_GLOBAL_INIT_DONE_SHIFT)
+
+#define I40E_RCU_FCOE_PCTYPE_OVR_CTL 0x0026CC28 /* Reset: CORER */
+#define I40E_RCU_FCOE_PCTYPE_OVR_CTL_OVERRIDE_METHOD_SHIFT 0
+#define I40E_RCU_FCOE_PCTYPE_OVR_CTL_OVERRIDE_METHOD_MASK I40E_MASK(0xF, I40E_RCU_FCOE_PCTYPE_OVR_CTL_OVERRIDE_METHOD_SHIFT)
+#define I40E_RCU_FCOE_PCTYPE_OVR_CTL_ACTIVE_EXCHANGE_CONTEXT_IDX_SHIFT 4
+#define I40E_RCU_FCOE_PCTYPE_OVR_CTL_ACTIVE_EXCHANGE_CONTEXT_IDX_MASK I40E_MASK(0x7, I40E_RCU_FCOE_PCTYPE_OVR_CTL_ACTIVE_EXCHANGE_CONTEXT_IDX_SHIFT)
+#define I40E_RCU_FCOE_PCTYPE_OVR_CTL_INACTIVE_EXCHANGE_CONTEXT_IDX_SHIFT 8
+#define I40E_RCU_FCOE_PCTYPE_OVR_CTL_INACTIVE_EXCHANGE_CONTEXT_IDX_MASK I40E_MASK(0x7, I40E_RCU_FCOE_PCTYPE_OVR_CTL_INACTIVE_EXCHANGE_CONTEXT_IDX_SHIFT)
+#define I40E_RCU_FCOE_PCTYPE_OVR_CTL_ACTIVE_SEQUENCE_CONTEXT_IDX_SHIFT 12
+#define I40E_RCU_FCOE_PCTYPE_OVR_CTL_ACTIVE_SEQUENCE_CONTEXT_IDX_MASK I40E_MASK(0x7, I40E_RCU_FCOE_PCTYPE_OVR_CTL_ACTIVE_SEQUENCE_CONTEXT_IDX_SHIFT)
+#define I40E_RCU_FCOE_PCTYPE_OVR_CTL_INACTIVE_SEQUENCE_CONTEXT_IDX_SHIFT 16
+#define I40E_RCU_FCOE_PCTYPE_OVR_CTL_INACTIVE_SEQUENCE_CONTEXT_IDX_MASK I40E_MASK(0x7, I40E_RCU_FCOE_PCTYPE_OVR_CTL_INACTIVE_SEQUENCE_CONTEXT_IDX_SHIFT)
+#define I40E_RCU_FCOE_PCTYPE_OVR_CTL_MANUAL_IDX_SHIFT 20
+#define I40E_RCU_FCOE_PCTYPE_OVR_CTL_MANUAL_IDX_MASK I40E_MASK(0x7, I40E_RCU_FCOE_PCTYPE_OVR_CTL_MANUAL_IDX_SHIFT)
+
+#define I40E_RCU_FD_CNT_CFG 0x0026CB04 /* Reset: POR */
+#define I40E_RCU_FD_CNT_CFG_ECC_EN_SHIFT 0
+#define I40E_RCU_FD_CNT_CFG_ECC_EN_MASK I40E_MASK(0x1, I40E_RCU_FD_CNT_CFG_ECC_EN_SHIFT)
+#define I40E_RCU_FD_CNT_CFG_ECC_INVERT_1_SHIFT 1
+#define I40E_RCU_FD_CNT_CFG_ECC_INVERT_1_MASK I40E_MASK(0x1, I40E_RCU_FD_CNT_CFG_ECC_INVERT_1_SHIFT)
+#define I40E_RCU_FD_CNT_CFG_ECC_INVERT_2_SHIFT 2
+#define I40E_RCU_FD_CNT_CFG_ECC_INVERT_2_MASK I40E_MASK(0x1, I40E_RCU_FD_CNT_CFG_ECC_INVERT_2_SHIFT)
+#define I40E_RCU_FD_CNT_CFG_LS_FORCE_SHIFT 3
+#define I40E_RCU_FD_CNT_CFG_LS_FORCE_MASK I40E_MASK(0x1, I40E_RCU_FD_CNT_CFG_LS_FORCE_SHIFT)
+#define I40E_RCU_FD_CNT_CFG_LS_BYPASS_SHIFT 4
+#define I40E_RCU_FD_CNT_CFG_LS_BYPASS_MASK I40E_MASK(0x1, I40E_RCU_FD_CNT_CFG_LS_BYPASS_SHIFT)
+#define I40E_RCU_FD_CNT_CFG_MASK_INT_SHIFT 5
+#define I40E_RCU_FD_CNT_CFG_MASK_INT_MASK I40E_MASK(0x1, I40E_RCU_FD_CNT_CFG_MASK_INT_SHIFT)
+#define I40E_RCU_FD_CNT_CFG_FIX_CNT_SHIFT 8
+#define I40E_RCU_FD_CNT_CFG_FIX_CNT_MASK I40E_MASK(0x1, I40E_RCU_FD_CNT_CFG_FIX_CNT_SHIFT)
+#define I40E_RCU_FD_CNT_CFG_ERR_CNT_SHIFT 9
+#define I40E_RCU_FD_CNT_CFG_ERR_CNT_MASK I40E_MASK(0x1, I40E_RCU_FD_CNT_CFG_ERR_CNT_SHIFT)
+#define I40E_RCU_FD_CNT_CFG_RME_SHIFT 12
+#define I40E_RCU_FD_CNT_CFG_RME_MASK I40E_MASK(0x1, I40E_RCU_FD_CNT_CFG_RME_SHIFT)
+#define I40E_RCU_FD_CNT_CFG_RM_SHIFT 16
+#define I40E_RCU_FD_CNT_CFG_RM_MASK I40E_MASK(0xF, I40E_RCU_FD_CNT_CFG_RM_SHIFT)
+
+#define I40E_RCU_FD_CNT_STATUS 0x0026CB0C /* Reset: POR */
+#define I40E_RCU_FD_CNT_STATUS_ECC_ERR_SHIFT 0
+#define I40E_RCU_FD_CNT_STATUS_ECC_ERR_MASK I40E_MASK(0x1, I40E_RCU_FD_CNT_STATUS_ECC_ERR_SHIFT)
+#define I40E_RCU_FD_CNT_STATUS_ECC_FIX_SHIFT 1
+#define I40E_RCU_FD_CNT_STATUS_ECC_FIX_MASK I40E_MASK(0x1, I40E_RCU_FD_CNT_STATUS_ECC_FIX_SHIFT)
+#define I40E_RCU_FD_CNT_STATUS_INIT_DONE_SHIFT 2
+#define I40E_RCU_FD_CNT_STATUS_INIT_DONE_MASK I40E_MASK(0x1, I40E_RCU_FD_CNT_STATUS_INIT_DONE_SHIFT)
+#define I40E_RCU_FD_CNT_STATUS_GLOBAL_INIT_DONE_SHIFT 3
+#define I40E_RCU_FD_CNT_STATUS_GLOBAL_INIT_DONE_MASK I40E_MASK(0x1, I40E_RCU_FD_CNT_STATUS_GLOBAL_INIT_DONE_SHIFT)
+
+#define I40E_RCU_FD_FLU_LUT_CFG 0x0026CB14 /* Reset: POR */
+#define I40E_RCU_FD_FLU_LUT_CFG_ECC_EN_SHIFT 0
+#define I40E_RCU_FD_FLU_LUT_CFG_ECC_EN_MASK I40E_MASK(0x1, I40E_RCU_FD_FLU_LUT_CFG_ECC_EN_SHIFT)
+#define I40E_RCU_FD_FLU_LUT_CFG_ECC_INVERT_1_SHIFT 1
+#define I40E_RCU_FD_FLU_LUT_CFG_ECC_INVERT_1_MASK I40E_MASK(0x1, I40E_RCU_FD_FLU_LUT_CFG_ECC_INVERT_1_SHIFT)
+#define I40E_RCU_FD_FLU_LUT_CFG_ECC_INVERT_2_SHIFT 2
+#define I40E_RCU_FD_FLU_LUT_CFG_ECC_INVERT_2_MASK I40E_MASK(0x1, I40E_RCU_FD_FLU_LUT_CFG_ECC_INVERT_2_SHIFT)
+#define I40E_RCU_FD_FLU_LUT_CFG_LS_FORCE_SHIFT 3
+#define I40E_RCU_FD_FLU_LUT_CFG_LS_FORCE_MASK I40E_MASK(0x1, I40E_RCU_FD_FLU_LUT_CFG_LS_FORCE_SHIFT)
+#define I40E_RCU_FD_FLU_LUT_CFG_LS_BYPASS_SHIFT 4
+#define I40E_RCU_FD_FLU_LUT_CFG_LS_BYPASS_MASK I40E_MASK(0x1, I40E_RCU_FD_FLU_LUT_CFG_LS_BYPASS_SHIFT)
+#define I40E_RCU_FD_FLU_LUT_CFG_MASK_INT_SHIFT 5
+#define I40E_RCU_FD_FLU_LUT_CFG_MASK_INT_MASK I40E_MASK(0x1, I40E_RCU_FD_FLU_LUT_CFG_MASK_INT_SHIFT)
+#define I40E_RCU_FD_FLU_LUT_CFG_FIX_CNT_SHIFT 8
+#define I40E_RCU_FD_FLU_LUT_CFG_FIX_CNT_MASK I40E_MASK(0x1, I40E_RCU_FD_FLU_LUT_CFG_FIX_CNT_SHIFT)
+#define I40E_RCU_FD_FLU_LUT_CFG_ERR_CNT_SHIFT 9
+#define I40E_RCU_FD_FLU_LUT_CFG_ERR_CNT_MASK I40E_MASK(0x1, I40E_RCU_FD_FLU_LUT_CFG_ERR_CNT_SHIFT)
+#define I40E_RCU_FD_FLU_LUT_CFG_RME_SHIFT 12
+#define I40E_RCU_FD_FLU_LUT_CFG_RME_MASK I40E_MASK(0x1, I40E_RCU_FD_FLU_LUT_CFG_RME_SHIFT)
+#define I40E_RCU_FD_FLU_LUT_CFG_RM_SHIFT 16
+#define I40E_RCU_FD_FLU_LUT_CFG_RM_MASK I40E_MASK(0xF, I40E_RCU_FD_FLU_LUT_CFG_RM_SHIFT)
+
+#define I40E_RCU_FD_FLU_LUT_STATUS 0x0026CB1C /* Reset: POR */
+#define I40E_RCU_FD_FLU_LUT_STATUS_ECC_ERR_SHIFT 0
+#define I40E_RCU_FD_FLU_LUT_STATUS_ECC_ERR_MASK I40E_MASK(0x1, I40E_RCU_FD_FLU_LUT_STATUS_ECC_ERR_SHIFT)
+#define I40E_RCU_FD_FLU_LUT_STATUS_ECC_FIX_SHIFT 1
+#define I40E_RCU_FD_FLU_LUT_STATUS_ECC_FIX_MASK I40E_MASK(0x1, I40E_RCU_FD_FLU_LUT_STATUS_ECC_FIX_SHIFT)
+#define I40E_RCU_FD_FLU_LUT_STATUS_INIT_DONE_SHIFT 2
+#define I40E_RCU_FD_FLU_LUT_STATUS_INIT_DONE_MASK I40E_MASK(0x1, I40E_RCU_FD_FLU_LUT_STATUS_INIT_DONE_SHIFT)
+#define I40E_RCU_FD_FLU_LUT_STATUS_GLOBAL_INIT_DONE_SHIFT 3
+#define I40E_RCU_FD_FLU_LUT_STATUS_GLOBAL_INIT_DONE_MASK I40E_MASK(0x1, I40E_RCU_FD_FLU_LUT_STATUS_GLOBAL_INIT_DONE_SHIFT)
+
+#define I40E_RCU_FOC_TAILS_CFG 0x00269ADC /* Reset: POR */
+#define I40E_RCU_FOC_TAILS_CFG_ECC_EN_SHIFT 0
+#define I40E_RCU_FOC_TAILS_CFG_ECC_EN_MASK I40E_MASK(0x1, I40E_RCU_FOC_TAILS_CFG_ECC_EN_SHIFT)
+#define I40E_RCU_FOC_TAILS_CFG_ECC_INVERT_1_SHIFT 1
+#define I40E_RCU_FOC_TAILS_CFG_ECC_INVERT_1_MASK I40E_MASK(0x1, I40E_RCU_FOC_TAILS_CFG_ECC_INVERT_1_SHIFT)
+#define I40E_RCU_FOC_TAILS_CFG_ECC_INVERT_2_SHIFT 2
+#define I40E_RCU_FOC_TAILS_CFG_ECC_INVERT_2_MASK I40E_MASK(0x1, I40E_RCU_FOC_TAILS_CFG_ECC_INVERT_2_SHIFT)
+#define I40E_RCU_FOC_TAILS_CFG_LS_FORCE_SHIFT 3
+#define I40E_RCU_FOC_TAILS_CFG_LS_FORCE_MASK I40E_MASK(0x1, I40E_RCU_FOC_TAILS_CFG_LS_FORCE_SHIFT)
+#define I40E_RCU_FOC_TAILS_CFG_LS_BYPASS_SHIFT 4
+#define I40E_RCU_FOC_TAILS_CFG_LS_BYPASS_MASK I40E_MASK(0x1, I40E_RCU_FOC_TAILS_CFG_LS_BYPASS_SHIFT)
+#define I40E_RCU_FOC_TAILS_CFG_MASK_INT_SHIFT 5
+#define I40E_RCU_FOC_TAILS_CFG_MASK_INT_MASK I40E_MASK(0x1, I40E_RCU_FOC_TAILS_CFG_MASK_INT_SHIFT)
+#define I40E_RCU_FOC_TAILS_CFG_FIX_CNT_SHIFT 8
+#define I40E_RCU_FOC_TAILS_CFG_FIX_CNT_MASK I40E_MASK(0x1, I40E_RCU_FOC_TAILS_CFG_FIX_CNT_SHIFT)
+#define I40E_RCU_FOC_TAILS_CFG_ERR_CNT_SHIFT 9
+#define I40E_RCU_FOC_TAILS_CFG_ERR_CNT_MASK I40E_MASK(0x1, I40E_RCU_FOC_TAILS_CFG_ERR_CNT_SHIFT)
+#define I40E_RCU_FOC_TAILS_CFG_RME_SHIFT 12
+#define I40E_RCU_FOC_TAILS_CFG_RME_MASK I40E_MASK(0x1, I40E_RCU_FOC_TAILS_CFG_RME_SHIFT)
+#define I40E_RCU_FOC_TAILS_CFG_RM_SHIFT 16
+#define I40E_RCU_FOC_TAILS_CFG_RM_MASK I40E_MASK(0xF, I40E_RCU_FOC_TAILS_CFG_RM_SHIFT)
+
+#define I40E_RCU_FOC_TAILS_STATUS 0x00269B2C /* Reset: POR */
+#define I40E_RCU_FOC_TAILS_STATUS_ECC_ERR_SHIFT 0
+#define I40E_RCU_FOC_TAILS_STATUS_ECC_ERR_MASK I40E_MASK(0x1, I40E_RCU_FOC_TAILS_STATUS_ECC_ERR_SHIFT)
+#define I40E_RCU_FOC_TAILS_STATUS_ECC_FIX_SHIFT 1
+#define I40E_RCU_FOC_TAILS_STATUS_ECC_FIX_MASK I40E_MASK(0x1, I40E_RCU_FOC_TAILS_STATUS_ECC_FIX_SHIFT)
+#define I40E_RCU_FOC_TAILS_STATUS_INIT_DONE_SHIFT 2
+#define I40E_RCU_FOC_TAILS_STATUS_INIT_DONE_MASK I40E_MASK(0x1, I40E_RCU_FOC_TAILS_STATUS_INIT_DONE_SHIFT)
+#define I40E_RCU_FOC_TAILS_STATUS_GLOBAL_INIT_DONE_SHIFT 3
+#define I40E_RCU_FOC_TAILS_STATUS_GLOBAL_INIT_DONE_MASK I40E_MASK(0x1, I40E_RCU_FOC_TAILS_STATUS_GLOBAL_INIT_DONE_SHIFT)
+
+#define I40E_RCU_PST_DBG_CTL 0x0026CC24 /* Reset: CORER */
+#define I40E_RCU_PST_DBG_CTL_IGNORE_FLR_SHIFT 0
+#define I40E_RCU_PST_DBG_CTL_IGNORE_FLR_MASK I40E_MASK(0x1, I40E_RCU_PST_DBG_CTL_IGNORE_FLR_SHIFT)
+#define I40E_RCU_PST_DBG_CTL_IGNORE_VFLR_SHIFT 1
+#define I40E_RCU_PST_DBG_CTL_IGNORE_VFLR_MASK I40E_MASK(0x1, I40E_RCU_PST_DBG_CTL_IGNORE_VFLR_SHIFT)
+#define I40E_RCU_PST_DBG_CTL_IGNORE_VMLR_SHIFT 2
+#define I40E_RCU_PST_DBG_CTL_IGNORE_VMLR_MASK I40E_MASK(0x1, I40E_RCU_PST_DBG_CTL_IGNORE_VMLR_SHIFT)
+#define I40E_RCU_PST_DBG_CTL_USE_PCTYPE_FCOE_SHIFT 3
+#define I40E_RCU_PST_DBG_CTL_USE_PCTYPE_FCOE_MASK I40E_MASK(0x1, I40E_RCU_PST_DBG_CTL_USE_PCTYPE_FCOE_SHIFT)
+#define I40E_RCU_PST_DBG_CTL_IGNORE_ETH_HIT_SHIFT 4
+#define I40E_RCU_PST_DBG_CTL_IGNORE_ETH_HIT_MASK I40E_MASK(0x1, I40E_RCU_PST_DBG_CTL_IGNORE_ETH_HIT_SHIFT)
+#define I40E_RCU_PST_DBG_CTL_IGNORE_MAC_VLAN_HIT_SHIFT 5
+#define I40E_RCU_PST_DBG_CTL_IGNORE_MAC_VLAN_HIT_MASK I40E_MASK(0x1, I40E_RCU_PST_DBG_CTL_IGNORE_MAC_VLAN_HIT_SHIFT)
+#define I40E_RCU_PST_DBG_CTL_IGNORE_SWR_DROP_SHIFT 6
+#define I40E_RCU_PST_DBG_CTL_IGNORE_SWR_DROP_MASK I40E_MASK(0x1, I40E_RCU_PST_DBG_CTL_IGNORE_SWR_DROP_SHIFT)
+#define I40E_RCU_PST_DBG_CTL_HOLD_FLU_JOBS_SHIFT 7
+#define I40E_RCU_PST_DBG_CTL_HOLD_FLU_JOBS_MASK I40E_MASK(0x1, I40E_RCU_PST_DBG_CTL_HOLD_FLU_JOBS_SHIFT)
+#define I40E_RCU_PST_DBG_CTL_FC_HASH_BASE_SHIFT 8
+#define I40E_RCU_PST_DBG_CTL_FC_HASH_BASE_MASK I40E_MASK(0xF, I40E_RCU_PST_DBG_CTL_FC_HASH_BASE_SHIFT)
+#define I40E_RCU_PST_DBG_CTL_PE_HASH_BASE_SHIFT 12
+#define I40E_RCU_PST_DBG_CTL_PE_HASH_BASE_MASK I40E_MASK(0xF, I40E_RCU_PST_DBG_CTL_PE_HASH_BASE_SHIFT)
+#define I40E_RCU_PST_DBG_CTL_FD_HASH_BASE_SHIFT 16
+#define I40E_RCU_PST_DBG_CTL_FD_HASH_BASE_MASK I40E_MASK(0xF, I40E_RCU_PST_DBG_CTL_FD_HASH_BASE_SHIFT)
+#define I40E_RCU_PST_DBG_CTL_FOC_CNTX_LIMIT_BASE_SHIFT 20
+#define I40E_RCU_PST_DBG_CTL_FOC_CNTX_LIMIT_BASE_MASK I40E_MASK(0xF, I40E_RCU_PST_DBG_CTL_FOC_CNTX_LIMIT_BASE_SHIFT)
+#define I40E_RCU_PST_DBG_CTL_ERR_COMP_DIS_SHIFT 24
+#define I40E_RCU_PST_DBG_CTL_ERR_COMP_DIS_MASK I40E_MASK(0x1, I40E_RCU_PST_DBG_CTL_ERR_COMP_DIS_SHIFT)
+#define I40E_RCU_PST_DBG_CTL_REM_COMP_DIS_SHIFT 25
+#define I40E_RCU_PST_DBG_CTL_REM_COMP_DIS_MASK I40E_MASK(0x1, I40E_RCU_PST_DBG_CTL_REM_COMP_DIS_SHIFT)
+#define I40E_RCU_PST_DBG_CTL_HOLD_PST_INPUT_SHIFT 28
+#define I40E_RCU_PST_DBG_CTL_HOLD_PST_INPUT_MASK I40E_MASK(0x1, I40E_RCU_PST_DBG_CTL_HOLD_PST_INPUT_SHIFT)
+#define I40E_RCU_PST_DBG_CTL_INC_INPUT_CMD_SHIFT 29
+#define I40E_RCU_PST_DBG_CTL_INC_INPUT_CMD_MASK I40E_MASK(0x1, I40E_RCU_PST_DBG_CTL_INC_INPUT_CMD_SHIFT)
+
+#define I40E_RCU_PST_DBG_DROP_CNT 0x0026CBEC /* Reset: CORER */
+#define I40E_RCU_PST_DBG_DROP_CNT_FD_DROP_CNT_SHIFT 0
+#define I40E_RCU_PST_DBG_DROP_CNT_FD_DROP_CNT_MASK I40E_MASK(0xFF, I40E_RCU_PST_DBG_DROP_CNT_FD_DROP_CNT_SHIFT)
+#define I40E_RCU_PST_DBG_DROP_CNT_FLR_DROP_CNT_SHIFT 8
+#define I40E_RCU_PST_DBG_DROP_CNT_FLR_DROP_CNT_MASK I40E_MASK(0xFF, I40E_RCU_PST_DBG_DROP_CNT_FLR_DROP_CNT_SHIFT)
+#define I40E_RCU_PST_DBG_DROP_CNT_PF_BOUND_DROP_CNT_SHIFT 16
+#define I40E_RCU_PST_DBG_DROP_CNT_PF_BOUND_DROP_CNT_MASK I40E_MASK(0xFF, I40E_RCU_PST_DBG_DROP_CNT_PF_BOUND_DROP_CNT_SHIFT)
+#define I40E_RCU_PST_DBG_DROP_CNT_SWR_DROP_CNT_SHIFT 24
+#define I40E_RCU_PST_DBG_DROP_CNT_SWR_DROP_CNT_MASK I40E_MASK(0xFF, I40E_RCU_PST_DBG_DROP_CNT_SWR_DROP_CNT_SHIFT)
+
+#define I40E_RCU_PST_DBG_FLU_STATE(_i) (0x0026CB80 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_RCU_PST_DBG_FLU_STATE_MAX_INDEX 15
+#define I40E_RCU_PST_DBG_FLU_STATE_FLU_STATE_SHIFT 0
+#define I40E_RCU_PST_DBG_FLU_STATE_FLU_STATE_MASK I40E_MASK(0x1F, I40E_RCU_PST_DBG_FLU_STATE_FLU_STATE_SHIFT)
+#define I40E_RCU_PST_DBG_FLU_STATE_FLU_HASH_SHIFT 5
+#define I40E_RCU_PST_DBG_FLU_STATE_FLU_HASH_MASK I40E_MASK(0xFFFFF, I40E_RCU_PST_DBG_FLU_STATE_FLU_HASH_SHIFT)
+#define I40E_RCU_PST_DBG_FLU_STATE_FLU_OBJ_SHIFT 25
+#define I40E_RCU_PST_DBG_FLU_STATE_FLU_OBJ_MASK I40E_MASK(0x7, I40E_RCU_PST_DBG_FLU_STATE_FLU_OBJ_SHIFT)
+#define I40E_RCU_PST_DBG_FLU_STATE_FLU_CMD_SHIFT 28
+#define I40E_RCU_PST_DBG_FLU_STATE_FLU_CMD_MASK I40E_MASK(0xF, I40E_RCU_PST_DBG_FLU_STATE_FLU_CMD_SHIFT)
+
+#define I40E_RCU_PST_DBG_Q_SRC_CNT_0 0x0026CC14 /* Reset: CORER */
+#define I40E_RCU_PST_DBG_Q_SRC_CNT_0_CONF_FAIL_CNT_SHIFT 0
+#define I40E_RCU_PST_DBG_Q_SRC_CNT_0_CONF_FAIL_CNT_MASK I40E_MASK(0xFF, I40E_RCU_PST_DBG_Q_SRC_CNT_0_CONF_FAIL_CNT_SHIFT)
+#define I40E_RCU_PST_DBG_Q_SRC_CNT_0_QUAD_HIT_CNT_SHIFT 8
+#define I40E_RCU_PST_DBG_Q_SRC_CNT_0_QUAD_HIT_CNT_MASK I40E_MASK(0xFF, I40E_RCU_PST_DBG_Q_SRC_CNT_0_QUAD_HIT_CNT_SHIFT)
+#define I40E_RCU_PST_DBG_Q_SRC_CNT_0_ETH_HIT_CNT_SHIFT 16
+#define I40E_RCU_PST_DBG_Q_SRC_CNT_0_ETH_HIT_CNT_MASK I40E_MASK(0xFF, I40E_RCU_PST_DBG_Q_SRC_CNT_0_ETH_HIT_CNT_SHIFT)
+#define I40E_RCU_PST_DBG_Q_SRC_CNT_0_FCOE_CNT_SHIFT 24
+#define I40E_RCU_PST_DBG_Q_SRC_CNT_0_FCOE_CNT_MASK I40E_MASK(0xFF, I40E_RCU_PST_DBG_Q_SRC_CNT_0_FCOE_CNT_SHIFT)
+
+#define I40E_RCU_PST_DBG_Q_SRC_CNT_1 0x0026CC1C /* Reset: CORER */
+#define I40E_RCU_PST_DBG_Q_SRC_CNT_1_FD_HIT_CNT_SHIFT 0
+#define I40E_RCU_PST_DBG_Q_SRC_CNT_1_FD_HIT_CNT_MASK I40E_MASK(0xFF, I40E_RCU_PST_DBG_Q_SRC_CNT_1_FD_HIT_CNT_SHIFT)
+#define I40E_RCU_PST_DBG_Q_SRC_CNT_1_MAC_VLAN_CNT_SHIFT 8
+#define I40E_RCU_PST_DBG_Q_SRC_CNT_1_MAC_VLAN_CNT_MASK I40E_MASK(0xFF, I40E_RCU_PST_DBG_Q_SRC_CNT_1_MAC_VLAN_CNT_SHIFT)
+#define I40E_RCU_PST_DBG_Q_SRC_CNT_1_RSS_CNT_SHIFT 16
+#define I40E_RCU_PST_DBG_Q_SRC_CNT_1_RSS_CNT_MASK I40E_MASK(0xFF, I40E_RCU_PST_DBG_Q_SRC_CNT_1_RSS_CNT_SHIFT)
+#define I40E_RCU_PST_DBG_Q_SRC_CNT_1_DEFAULT_CNT_SHIFT 24
+#define I40E_RCU_PST_DBG_Q_SRC_CNT_1_DEFAULT_CNT_MASK I40E_MASK(0xFF, I40E_RCU_PST_DBG_Q_SRC_CNT_1_DEFAULT_CNT_SHIFT)
+
+#define I40E_RCU_PST_DBG_STATUS_0 0x0026CC04 /* Reset: CORER */
+#define I40E_RCU_PST_DBG_STATUS_0_PST_FLR_STAT_SHIFT 0
+#define I40E_RCU_PST_DBG_STATUS_0_PST_FLR_STAT_MASK I40E_MASK(0xFFFF, I40E_RCU_PST_DBG_STATUS_0_PST_FLR_STAT_SHIFT)
+#define I40E_RCU_PST_DBG_STATUS_0_INPUT_FIFO_OCC_SHIFT 16
+#define I40E_RCU_PST_DBG_STATUS_0_INPUT_FIFO_OCC_MASK I40E_MASK(0x3, I40E_RCU_PST_DBG_STATUS_0_INPUT_FIFO_OCC_SHIFT)
+
+#define I40E_RCU_PST_DBG_STATUS_1 0x0026CC0C /* Reset: CORER */
+#define I40E_RCU_PST_DBG_STATUS_1_FLR_FLOW_START_SHIFT 0
+#define I40E_RCU_PST_DBG_STATUS_1_FLR_FLOW_START_MASK I40E_MASK(0xFFFF, I40E_RCU_PST_DBG_STATUS_1_FLR_FLOW_START_SHIFT)
+#define I40E_RCU_PST_DBG_STATUS_1_FLR_FLOW_DONE_SHIFT 16
+#define I40E_RCU_PST_DBG_STATUS_1_FLR_FLOW_DONE_MASK I40E_MASK(0xFFFF, I40E_RCU_PST_DBG_STATUS_1_FLR_FLOW_DONE_SHIFT)
+
+#define I40E_RCU_PST_ECC_COR_ERR 0x0026CBC4 /* Reset: POR */
+#define I40E_RCU_PST_ECC_COR_ERR_CNT_SHIFT 0
+#define I40E_RCU_PST_ECC_COR_ERR_CNT_MASK I40E_MASK(0xFFF, I40E_RCU_PST_ECC_COR_ERR_CNT_SHIFT)
+
+#define I40E_RCU_PST_ECC_UNCOR_ERR 0x0026CBCC /* Reset: POR */
+#define I40E_RCU_PST_ECC_UNCOR_ERR_CNT_SHIFT 0
+#define I40E_RCU_PST_ECC_UNCOR_ERR_CNT_MASK I40E_MASK(0xFFF, I40E_RCU_PST_ECC_UNCOR_ERR_CNT_SHIFT)
+
+#define I40E_RCU_PST_FCOE_CNTX_CHK_EN 0x0026CC08 /* Reset: CORER */
+#define I40E_RCU_PST_FCOE_CNTX_CHK_EN_GEN_FILTER_EN_SHIFT 0
+#define I40E_RCU_PST_FCOE_CNTX_CHK_EN_GEN_FILTER_EN_MASK I40E_MASK(0x1, I40E_RCU_PST_FCOE_CNTX_CHK_EN_GEN_FILTER_EN_SHIFT)
+#define I40E_RCU_PST_FCOE_CNTX_CHK_EN_GEN_SOF2_CLASS_SHIFT 1
+#define I40E_RCU_PST_FCOE_CNTX_CHK_EN_GEN_SOF2_CLASS_MASK I40E_MASK(0x1, I40E_RCU_PST_FCOE_CNTX_CHK_EN_GEN_SOF2_CLASS_SHIFT)
+#define I40E_RCU_PST_FCOE_CNTX_CHK_EN_GEN_SOF3_CLASS_SHIFT 2
+#define I40E_RCU_PST_FCOE_CNTX_CHK_EN_GEN_SOF3_CLASS_MASK I40E_MASK(0x1, I40E_RCU_PST_FCOE_CNTX_CHK_EN_GEN_SOF3_CLASS_SHIFT)
+#define I40E_RCU_PST_FCOE_CNTX_CHK_EN_GEN_EOFA_EOFI_SHIFT 3
+#define I40E_RCU_PST_FCOE_CNTX_CHK_EN_GEN_EOFA_EOFI_MASK I40E_MASK(0x1, I40E_RCU_PST_FCOE_CNTX_CHK_EN_GEN_EOFA_EOFI_SHIFT)
+#define I40E_RCU_PST_FCOE_CNTX_CHK_EN_GEN_FIRST_NO_SOFI_SHIFT 4
+#define I40E_RCU_PST_FCOE_CNTX_CHK_EN_GEN_FIRST_NO_SOFI_MASK I40E_MASK(0x1, I40E_RCU_PST_FCOE_CNTX_CHK_EN_GEN_FIRST_NO_SOFI_SHIFT)
+#define I40E_RCU_PST_FCOE_CNTX_CHK_EN_GEN_MID_SOFI_SHIFT 5
+#define I40E_RCU_PST_FCOE_CNTX_CHK_EN_GEN_MID_SOFI_MASK I40E_MASK(0x1, I40E_RCU_PST_FCOE_CNTX_CHK_EN_GEN_MID_SOFI_SHIFT)
+#define I40E_RCU_PST_FCOE_CNTX_CHK_EN_GEN_NOT_NEW_SEQ_ID_SHIFT 6
+#define I40E_RCU_PST_FCOE_CNTX_CHK_EN_GEN_NOT_NEW_SEQ_ID_MASK I40E_MASK(0x1, I40E_RCU_PST_FCOE_CNTX_CHK_EN_GEN_NOT_NEW_SEQ_ID_SHIFT)
+#define I40E_RCU_PST_FCOE_CNTX_CHK_EN_GEN_NEW_SEQ_ID_SHIFT 7
+#define I40E_RCU_PST_FCOE_CNTX_CHK_EN_GEN_NEW_SEQ_ID_MASK I40E_MASK(0x1, I40E_RCU_PST_FCOE_CNTX_CHK_EN_GEN_NEW_SEQ_ID_SHIFT)
+#define I40E_RCU_PST_FCOE_CNTX_CHK_EN_GEN_NEW_SEQ_CNT_SHIFT 8
+#define I40E_RCU_PST_FCOE_CNTX_CHK_EN_GEN_NEW_SEQ_CNT_MASK I40E_MASK(0x1, I40E_RCU_PST_FCOE_CNTX_CHK_EN_GEN_NEW_SEQ_CNT_SHIFT)
+#define I40E_RCU_PST_FCOE_CNTX_CHK_EN_GEN_DIFF_SEQ_CNT_SHIFT 9
+#define I40E_RCU_PST_FCOE_CNTX_CHK_EN_GEN_DIFF_SEQ_CNT_MASK I40E_MASK(0x1, I40E_RCU_PST_FCOE_CNTX_CHK_EN_GEN_DIFF_SEQ_CNT_SHIFT)
+#define I40E_RCU_PST_FCOE_CNTX_CHK_EN_RSP_SOF_CLASS_SHIFT 11
+#define I40E_RCU_PST_FCOE_CNTX_CHK_EN_RSP_SOF_CLASS_MASK I40E_MASK(0x1, I40E_RCU_PST_FCOE_CNTX_CHK_EN_RSP_SOF_CLASS_SHIFT)
+#define I40E_RCU_PST_FCOE_CNTX_CHK_EN_RSP_INITIATOR_SHIFT 12
+#define I40E_RCU_PST_FCOE_CNTX_CHK_EN_RSP_INITIATOR_MASK I40E_MASK(0x1, I40E_RCU_PST_FCOE_CNTX_CHK_EN_RSP_INITIATOR_SHIFT)
+#define I40E_RCU_PST_FCOE_CNTX_CHK_EN_RSP_LAST_PKT_SHIFT 13
+#define I40E_RCU_PST_FCOE_CNTX_CHK_EN_RSP_LAST_PKT_MASK I40E_MASK(0x1, I40E_RCU_PST_FCOE_CNTX_CHK_EN_RSP_LAST_PKT_SHIFT)
+#define I40E_RCU_PST_FCOE_CNTX_CHK_EN_RSP_EOFT_SHIFT 14
+#define I40E_RCU_PST_FCOE_CNTX_CHK_EN_RSP_EOFT_MASK I40E_MASK(0x1, I40E_RCU_PST_FCOE_CNTX_CHK_EN_RSP_EOFT_SHIFT)
+#define I40E_RCU_PST_FCOE_CNTX_CHK_EN_RSP_SEQ_ID_SHIFT 15
+#define I40E_RCU_PST_FCOE_CNTX_CHK_EN_RSP_SEQ_ID_MASK I40E_MASK(0x1, I40E_RCU_PST_FCOE_CNTX_CHK_EN_RSP_SEQ_ID_SHIFT)
+#define I40E_RCU_PST_FCOE_CNTX_CHK_EN_RSP_EX_CNTX_SHIFT 16
+#define I40E_RCU_PST_FCOE_CNTX_CHK_EN_RSP_EX_CNTX_MASK I40E_MASK(0x1, I40E_RCU_PST_FCOE_CNTX_CHK_EN_RSP_EX_CNTX_SHIFT)
+#define I40E_RCU_PST_FCOE_CNTX_CHK_EN_RSP_SEQ_CNTX_SHIFT 17
+#define I40E_RCU_PST_FCOE_CNTX_CHK_EN_RSP_SEQ_CNTX_MASK I40E_MASK(0x1, I40E_RCU_PST_FCOE_CNTX_CHK_EN_RSP_SEQ_CNTX_SHIFT)
+#define I40E_RCU_PST_FCOE_CNTX_CHK_EN_RSP_SEQ_INITIATIVE_SHIFT 18
+#define I40E_RCU_PST_FCOE_CNTX_CHK_EN_RSP_SEQ_INITIATIVE_MASK I40E_MASK(0x1, I40E_RCU_PST_FCOE_CNTX_CHK_EN_RSP_SEQ_INITIATIVE_SHIFT)
+#define I40E_RCU_PST_FCOE_CNTX_CHK_EN_RSP_RLT_OFFSET_SHIFT 19
+#define I40E_RCU_PST_FCOE_CNTX_CHK_EN_RSP_RLT_OFFSET_MASK I40E_MASK(0x1, I40E_RCU_PST_FCOE_CNTX_CHK_EN_RSP_RLT_OFFSET_SHIFT)
+#define I40E_RCU_PST_FCOE_CNTX_CHK_EN_RSP_FIRST_SEQ_SHIFT 20
+#define I40E_RCU_PST_FCOE_CNTX_CHK_EN_RSP_FIRST_SEQ_MASK I40E_MASK(0x1, I40E_RCU_PST_FCOE_CNTX_CHK_EN_RSP_FIRST_SEQ_SHIFT)
+#define I40E_RCU_PST_FCOE_CNTX_CHK_EN_RSP_ABORT_SEQ_SHIFT 21
+#define I40E_RCU_PST_FCOE_CNTX_CHK_EN_RSP_ABORT_SEQ_MASK I40E_MASK(0x1, I40E_RCU_PST_FCOE_CNTX_CHK_EN_RSP_ABORT_SEQ_SHIFT)
+#define I40E_RCU_PST_FCOE_CNTX_CHK_EN_RSP_LAST_SEQ_SHIFT 22
+#define I40E_RCU_PST_FCOE_CNTX_CHK_EN_RSP_LAST_SEQ_MASK I40E_MASK(0x1, I40E_RCU_PST_FCOE_CNTX_CHK_EN_RSP_LAST_SEQ_SHIFT)
+#define I40E_RCU_PST_FCOE_CNTX_CHK_EN_DATA_NEW_SEQ_ID_SHIFT 23
+#define I40E_RCU_PST_FCOE_CNTX_CHK_EN_DATA_NEW_SEQ_ID_MASK I40E_MASK(0x1, I40E_RCU_PST_FCOE_CNTX_CHK_EN_DATA_NEW_SEQ_ID_SHIFT)
+#define I40E_RCU_PST_FCOE_CNTX_CHK_EN_DATA_DIFF_SEQ_ID_SHIFT 24
+#define I40E_RCU_PST_FCOE_CNTX_CHK_EN_DATA_DIFF_SEQ_ID_MASK I40E_MASK(0x1, I40E_RCU_PST_FCOE_CNTX_CHK_EN_DATA_DIFF_SEQ_ID_SHIFT)
+#define I40E_RCU_PST_FCOE_CNTX_CHK_EN_DATA_EOFT_SHIFT 25
+#define I40E_RCU_PST_FCOE_CNTX_CHK_EN_DATA_EOFT_MASK I40E_MASK(0x1, I40E_RCU_PST_FCOE_CNTX_CHK_EN_DATA_EOFT_SHIFT)
+#define I40E_RCU_PST_FCOE_CNTX_CHK_EN_DATA_EOFN_SHIFT 26
+#define I40E_RCU_PST_FCOE_CNTX_CHK_EN_DATA_EOFN_MASK I40E_MASK(0x1, I40E_RCU_PST_FCOE_CNTX_CHK_EN_DATA_EOFN_SHIFT)
+#define I40E_RCU_PST_FCOE_CNTX_CHK_EN_DATA_EX_CNTX_SHIFT 27
+#define I40E_RCU_PST_FCOE_CNTX_CHK_EN_DATA_EX_CNTX_MASK I40E_MASK(0x1, I40E_RCU_PST_FCOE_CNTX_CHK_EN_DATA_EX_CNTX_SHIFT)
+#define I40E_RCU_PST_FCOE_CNTX_CHK_EN_DATA_SEQ_CNTX_SHIFT 28
+#define I40E_RCU_PST_FCOE_CNTX_CHK_EN_DATA_SEQ_CNTX_MASK I40E_MASK(0x1, I40E_RCU_PST_FCOE_CNTX_CHK_EN_DATA_SEQ_CNTX_SHIFT)
+#define I40E_RCU_PST_FCOE_CNTX_CHK_EN_DATA_PARAM_SHIFT 29
+#define I40E_RCU_PST_FCOE_CNTX_CHK_EN_DATA_PARAM_MASK I40E_MASK(0x1, I40E_RCU_PST_FCOE_CNTX_CHK_EN_DATA_PARAM_SHIFT)
+#define I40E_RCU_PST_FCOE_CNTX_CHK_EN_DATA_FIRST_SEQ_SHIFT 30
+#define I40E_RCU_PST_FCOE_CNTX_CHK_EN_DATA_FIRST_SEQ_MASK I40E_MASK(0x1, I40E_RCU_PST_FCOE_CNTX_CHK_EN_DATA_FIRST_SEQ_SHIFT)
+#define I40E_RCU_PST_FCOE_CNTX_CHK_EN_DATA_ABORT_SEQ_SHIFT 31
+#define I40E_RCU_PST_FCOE_CNTX_CHK_EN_DATA_ABORT_SEQ_MASK I40E_MASK(0x1, I40E_RCU_PST_FCOE_CNTX_CHK_EN_DATA_ABORT_SEQ_SHIFT)
+
+#define I40E_RCU_PST_FCOE_PROT_CHK_EN 0x0026CC10 /* Reset: CORER */
+#define I40E_RCU_PST_FCOE_PROT_CHK_EN_FCOE_VER_SHIFT 0
+#define I40E_RCU_PST_FCOE_PROT_CHK_EN_FCOE_VER_MASK I40E_MASK(0x1, I40E_RCU_PST_FCOE_PROT_CHK_EN_FCOE_VER_SHIFT)
+#define I40E_RCU_PST_FCOE_PROT_CHK_EN_SOF_VALUE_SHIFT 1
+#define I40E_RCU_PST_FCOE_PROT_CHK_EN_SOF_VALUE_MASK I40E_MASK(0x1, I40E_RCU_PST_FCOE_PROT_CHK_EN_SOF_VALUE_SHIFT)
+#define I40E_RCU_PST_FCOE_PROT_CHK_EN_EOF_VALUE_SHIFT 2
+#define I40E_RCU_PST_FCOE_PROT_CHK_EN_EOF_VALUE_MASK I40E_MASK(0x1, I40E_RCU_PST_FCOE_PROT_CHK_EN_EOF_VALUE_SHIFT)
+#define I40E_RCU_PST_FCOE_PROT_CHK_EN_END_SEQ_EOFT_SHIFT 3
+#define I40E_RCU_PST_FCOE_PROT_CHK_EN_END_SEQ_EOFT_MASK I40E_MASK(0x1, I40E_RCU_PST_FCOE_PROT_CHK_EN_END_SEQ_EOFT_SHIFT)
+#define I40E_RCU_PST_FCOE_PROT_CHK_EN_NO_END_SEQ_NO_EOFT_SHIFT 4
+#define I40E_RCU_PST_FCOE_PROT_CHK_EN_NO_END_SEQ_NO_EOFT_MASK I40E_MASK(0x1, I40E_RCU_PST_FCOE_PROT_CHK_EN_NO_END_SEQ_NO_EOFT_SHIFT)
+#define I40E_RCU_PST_FCOE_PROT_CHK_EN_OBSOLETE_FLAGS_SHIFT 5
+#define I40E_RCU_PST_FCOE_PROT_CHK_EN_OBSOLETE_FLAGS_MASK I40E_MASK(0x1, I40E_RCU_PST_FCOE_PROT_CHK_EN_OBSOLETE_FLAGS_SHIFT)
+#define I40E_RCU_PST_FCOE_PROT_CHK_EN_NOT_FCP_SHIFT 6
+#define I40E_RCU_PST_FCOE_PROT_CHK_EN_NOT_FCP_MASK I40E_MASK(0x1, I40E_RCU_PST_FCOE_PROT_CHK_EN_NOT_FCP_SHIFT)
+#define I40E_RCU_PST_FCOE_PROT_CHK_EN_CRC_ERROR_SHIFT 7
+#define I40E_RCU_PST_FCOE_PROT_CHK_EN_CRC_ERROR_MASK I40E_MASK(0x1, I40E_RCU_PST_FCOE_PROT_CHK_EN_CRC_ERROR_SHIFT)
+#define I40E_RCU_PST_FCOE_PROT_CHK_EN_PKT_SIZE_SHIFT 8
+#define I40E_RCU_PST_FCOE_PROT_CHK_EN_PKT_SIZE_MASK I40E_MASK(0x1, I40E_RCU_PST_FCOE_PROT_CHK_EN_PKT_SIZE_SHIFT)
+#define I40E_RCU_PST_FCOE_PROT_CHK_EN_SEQ_INIT_LAST_SHIFT 9
+#define I40E_RCU_PST_FCOE_PROT_CHK_EN_SEQ_INIT_LAST_MASK I40E_MASK(0x1, I40E_RCU_PST_FCOE_PROT_CHK_EN_SEQ_INIT_LAST_SHIFT)
+#define I40E_RCU_PST_FCOE_PROT_CHK_EN_OPTIONAL_HEADERS_SHIFT 10
+#define I40E_RCU_PST_FCOE_PROT_CHK_EN_OPTIONAL_HEADERS_MASK I40E_MASK(0x1, I40E_RCU_PST_FCOE_PROT_CHK_EN_OPTIONAL_HEADERS_SHIFT)
+
+#define I40E_RCU_PST_FOC_ACCESS_STATUS 0x00270110 /* Reset: CORER */
+#define I40E_RCU_PST_FOC_ACCESS_STATUS_WR_ACCESS_CNT_SHIFT 0
+#define I40E_RCU_PST_FOC_ACCESS_STATUS_WR_ACCESS_CNT_MASK I40E_MASK(0xFF, I40E_RCU_PST_FOC_ACCESS_STATUS_WR_ACCESS_CNT_SHIFT)
+#define I40E_RCU_PST_FOC_ACCESS_STATUS_RD_ACCESS_CNT_SHIFT 8
+#define I40E_RCU_PST_FOC_ACCESS_STATUS_RD_ACCESS_CNT_MASK I40E_MASK(0xFF, I40E_RCU_PST_FOC_ACCESS_STATUS_RD_ACCESS_CNT_SHIFT)
+#define I40E_RCU_PST_FOC_ACCESS_STATUS_ERR_CNT_SHIFT 16
+#define I40E_RCU_PST_FOC_ACCESS_STATUS_ERR_CNT_MASK I40E_MASK(0xFF, I40E_RCU_PST_FOC_ACCESS_STATUS_ERR_CNT_SHIFT)
+#define I40E_RCU_PST_FOC_ACCESS_STATUS_LAST_ERR_CODE_SHIFT 24
+#define I40E_RCU_PST_FOC_ACCESS_STATUS_LAST_ERR_CODE_MASK I40E_MASK(0x7, I40E_RCU_PST_FOC_ACCESS_STATUS_LAST_ERR_CODE_SHIFT)
+
+#define I40E_RCU_PST_INPUT_ACL_STATUS(_i) (0x00270100 + ((_i) * 4)) /* _i=0...2 */ /* Reset: CORER */
+#define I40E_RCU_PST_INPUT_ACL_STATUS_MAX_INDEX 2
+#define I40E_RCU_PST_INPUT_ACL_STATUS_RCU_PST_INPUT_ACL_STATUS_SHIFT 0
+#define I40E_RCU_PST_INPUT_ACL_STATUS_RCU_PST_INPUT_ACL_STATUS_MASK I40E_MASK(0xFFFFFFFF, I40E_RCU_PST_INPUT_ACL_STATUS_RCU_PST_INPUT_ACL_STATUS_SHIFT)
+
+#define I40E_RCU_PST_INPUT_MTG_FIELDS(_i) (0x00270080 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_RCU_PST_INPUT_MTG_FIELDS_MAX_INDEX 31
+#define I40E_RCU_PST_INPUT_MTG_FIELDS_RCU_PST_INPUT_MTG_FIELDS_SHIFT 0
+#define I40E_RCU_PST_INPUT_MTG_FIELDS_RCU_PST_INPUT_MTG_FIELDS_MASK I40E_MASK(0xFFFFFFFF, I40E_RCU_PST_INPUT_MTG_FIELDS_RCU_PST_INPUT_MTG_FIELDS_SHIFT)
+
+#define I40E_RCU_PST_INPUT_MTG_STATUS(_i) (0x00270060 + ((_i) * 4)) /* _i=0...7 */ /* Reset: CORER */
+#define I40E_RCU_PST_INPUT_MTG_STATUS_MAX_INDEX 7
+#define I40E_RCU_PST_INPUT_MTG_STATUS_RCU_PST_INPUT_MTG_STATUS_SHIFT 0
+#define I40E_RCU_PST_INPUT_MTG_STATUS_RCU_PST_INPUT_MTG_STATUS_MASK I40E_MASK(0xFFFFFFFF, I40E_RCU_PST_INPUT_MTG_STATUS_RCU_PST_INPUT_MTG_STATUS_SHIFT)
+
+#define I40E_RCU_PST_OUTFIFO_OCC(_i) (0x0026CFE0 + ((_i) * 4)) /* _i=0...7 */ /* Reset: CORER */
+#define I40E_RCU_PST_OUTFIFO_OCC_MAX_INDEX 7
+#define I40E_RCU_PST_OUTFIFO_OCC_UP_1_OCC_SHIFT 0
+#define I40E_RCU_PST_OUTFIFO_OCC_UP_1_OCC_MASK I40E_MASK(0xFF, I40E_RCU_PST_OUTFIFO_OCC_UP_1_OCC_SHIFT)
+#define I40E_RCU_PST_OUTFIFO_OCC_UP_2_OCC_SHIFT 8
+#define I40E_RCU_PST_OUTFIFO_OCC_UP_2_OCC_MASK I40E_MASK(0xFF, I40E_RCU_PST_OUTFIFO_OCC_UP_2_OCC_SHIFT)
+#define I40E_RCU_PST_OUTFIFO_OCC_UP_3_OCC_SHIFT 16
+#define I40E_RCU_PST_OUTFIFO_OCC_UP_3_OCC_MASK I40E_MASK(0xFF, I40E_RCU_PST_OUTFIFO_OCC_UP_3_OCC_SHIFT)
+#define I40E_RCU_PST_OUTFIFO_OCC_UP_4_OCC_SHIFT 24
+#define I40E_RCU_PST_OUTFIFO_OCC_UP_4_OCC_MASK I40E_MASK(0xFF, I40E_RCU_PST_OUTFIFO_OCC_UP_4_OCC_SHIFT)
+
+#define I40E_RCU_PST_RCB_ACL_STATUS(_i) (0x00270030 + ((_i) * 4)) /* _i=0...2 */ /* Reset: CORER */
+#define I40E_RCU_PST_RCB_ACL_STATUS_MAX_INDEX 2
+#define I40E_RCU_PST_RCB_ACL_STATUS_RCU_PST_RCB_ACL_STATUS_SHIFT 0
+#define I40E_RCU_PST_RCB_ACL_STATUS_RCU_PST_RCB_ACL_STATUS_MASK I40E_MASK(0xFFFFFFFF, I40E_RCU_PST_RCB_ACL_STATUS_RCU_PST_RCB_ACL_STATUS_SHIFT)
+
+#define I40E_RCU_PST_RCB_FIFO_FIELDS(_i) (0x00270000 + ((_i) * 4)) /* _i=0...5 */ /* Reset: CORER */
+#define I40E_RCU_PST_RCB_FIFO_FIELDS_MAX_INDEX 5
+#define I40E_RCU_PST_RCB_FIFO_FIELDS_RCU_PST_RCB_FIFO_FIELDS_SHIFT 0
+#define I40E_RCU_PST_RCB_FIFO_FIELDS_RCU_PST_RCB_FIFO_FIELDS_MASK I40E_MASK(0xFFFFFFFF, I40E_RCU_PST_RCB_FIFO_FIELDS_RCU_PST_RCB_FIFO_FIELDS_SHIFT)
+
+#define I40E_RCU_PST_RCB_FIFO_Q_STATUS(_i) (0x00270020 + ((_i) * 4)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_RCU_PST_RCB_FIFO_Q_STATUS_MAX_INDEX 3
+#define I40E_RCU_PST_RCB_FIFO_Q_STATUS_RCU_PST_RCB_FIFO_Q_STATUS_SHIFT 0
+#define I40E_RCU_PST_RCB_FIFO_Q_STATUS_RCU_PST_RCB_FIFO_Q_STATUS_MASK I40E_MASK(0xFFFFFFFF, I40E_RCU_PST_RCB_FIFO_Q_STATUS_RCU_PST_RCB_FIFO_Q_STATUS_SHIFT)
+
+#define I40E_RCU_PST_RCB_MTG_STATUS(_i) (0x00270040 + ((_i) * 4)) /* _i=0...7 */ /* Reset: CORER */
+#define I40E_RCU_PST_RCB_MTG_STATUS_MAX_INDEX 7
+#define I40E_RCU_PST_RCB_MTG_STATUS_RCU_PST_RCB_MTG_STATUS_SHIFT 0
+#define I40E_RCU_PST_RCB_MTG_STATUS_RCU_PST_RCB_MTG_STATUS_MASK I40E_MASK(0xFFFFFFFF, I40E_RCU_PST_RCB_MTG_STATUS_RCU_PST_RCB_MTG_STATUS_SHIFT)
+
+#define I40E_RCU_PST_RCB_OUT_CTL 0x0026CDA8 /* Reset: CORER */
+#define I40E_RCU_PST_RCB_OUT_CTL_BLOCK_RCB_OUT_SHIFT 0
+#define I40E_RCU_PST_RCB_OUT_CTL_BLOCK_RCB_OUT_MASK I40E_MASK(0x1, I40E_RCU_PST_RCB_OUT_CTL_BLOCK_RCB_OUT_SHIFT)
+#define I40E_RCU_PST_RCB_OUT_CTL_STEP_ONE_CMD_SHIFT 1
+#define I40E_RCU_PST_RCB_OUT_CTL_STEP_ONE_CMD_MASK I40E_MASK(0x1, I40E_RCU_PST_RCB_OUT_CTL_STEP_ONE_CMD_SHIFT)
+
+#define I40E_RCU_PST_RCB_OUT_STAT 0x0026CC18 /* Reset: CORER */
+#define I40E_RCU_PST_RCB_OUT_STAT_RCB_FIFO_OCC_SHIFT 0
+#define I40E_RCU_PST_RCB_OUT_STAT_RCB_FIFO_OCC_MASK I40E_MASK(0xF, I40E_RCU_PST_RCB_OUT_STAT_RCB_FIFO_OCC_SHIFT)
+#define I40E_RCU_PST_RCB_OUT_STAT_NEXT_IPLEN_SHIFT 8
+#define I40E_RCU_PST_RCB_OUT_STAT_NEXT_IPLEN_MASK I40E_MASK(0xFFFF, I40E_RCU_PST_RCB_OUT_STAT_NEXT_IPLEN_SHIFT)
+#define I40E_RCU_PST_RCB_OUT_STAT_NEXT_TYPE_SHIFT 24
+#define I40E_RCU_PST_RCB_OUT_STAT_NEXT_TYPE_MASK I40E_MASK(0x7, I40E_RCU_PST_RCB_OUT_STAT_NEXT_TYPE_SHIFT)
+#define I40E_RCU_PST_RCB_OUT_STAT_NEXT_CFG_ERR_SHIFT 28
+#define I40E_RCU_PST_RCB_OUT_STAT_NEXT_CFG_ERR_MASK I40E_MASK(0x1, I40E_RCU_PST_RCB_OUT_STAT_NEXT_CFG_ERR_SHIFT)
+#define I40E_RCU_PST_RCB_OUT_STAT_RSV3_SHIFT 29
+#define I40E_RCU_PST_RCB_OUT_STAT_RSV3_MASK I40E_MASK(0x7, I40E_RCU_PST_RCB_OUT_STAT_RSV3_SHIFT)
+
+#define I40E_RCU_PST_TFIFO_CFG 0x00269B54 /* Reset: POR */
+#define I40E_RCU_PST_TFIFO_CFG_ECC_EN_SHIFT 0
+#define I40E_RCU_PST_TFIFO_CFG_ECC_EN_MASK I40E_MASK(0x1, I40E_RCU_PST_TFIFO_CFG_ECC_EN_SHIFT)
+#define I40E_RCU_PST_TFIFO_CFG_ECC_INVERT_1_SHIFT 1
+#define I40E_RCU_PST_TFIFO_CFG_ECC_INVERT_1_MASK I40E_MASK(0x1, I40E_RCU_PST_TFIFO_CFG_ECC_INVERT_1_SHIFT)
+#define I40E_RCU_PST_TFIFO_CFG_ECC_INVERT_2_SHIFT 2
+#define I40E_RCU_PST_TFIFO_CFG_ECC_INVERT_2_MASK I40E_MASK(0x1, I40E_RCU_PST_TFIFO_CFG_ECC_INVERT_2_SHIFT)
+#define I40E_RCU_PST_TFIFO_CFG_LS_FORCE_SHIFT 3
+#define I40E_RCU_PST_TFIFO_CFG_LS_FORCE_MASK I40E_MASK(0x1, I40E_RCU_PST_TFIFO_CFG_LS_FORCE_SHIFT)
+#define I40E_RCU_PST_TFIFO_CFG_LS_BYPASS_SHIFT 4
+#define I40E_RCU_PST_TFIFO_CFG_LS_BYPASS_MASK I40E_MASK(0x1, I40E_RCU_PST_TFIFO_CFG_LS_BYPASS_SHIFT)
+#define I40E_RCU_PST_TFIFO_CFG_MASK_INT_SHIFT 5
+#define I40E_RCU_PST_TFIFO_CFG_MASK_INT_MASK I40E_MASK(0x1, I40E_RCU_PST_TFIFO_CFG_MASK_INT_SHIFT)
+#define I40E_RCU_PST_TFIFO_CFG_FIX_CNT_SHIFT 8
+#define I40E_RCU_PST_TFIFO_CFG_FIX_CNT_MASK I40E_MASK(0x1, I40E_RCU_PST_TFIFO_CFG_FIX_CNT_SHIFT)
+#define I40E_RCU_PST_TFIFO_CFG_ERR_CNT_SHIFT 9
+#define I40E_RCU_PST_TFIFO_CFG_ERR_CNT_MASK I40E_MASK(0x1, I40E_RCU_PST_TFIFO_CFG_ERR_CNT_SHIFT)
+#define I40E_RCU_PST_TFIFO_CFG_RME_SHIFT 12
+#define I40E_RCU_PST_TFIFO_CFG_RME_MASK I40E_MASK(0x1, I40E_RCU_PST_TFIFO_CFG_RME_SHIFT)
+#define I40E_RCU_PST_TFIFO_CFG_RM_SHIFT 16
+#define I40E_RCU_PST_TFIFO_CFG_RM_MASK I40E_MASK(0xF, I40E_RCU_PST_TFIFO_CFG_RM_SHIFT)
+
+#define I40E_RCU_PST_TFIFO_STATUS 0x00269B5C /* Reset: POR */
+#define I40E_RCU_PST_TFIFO_STATUS_ECC_ERR_SHIFT 0
+#define I40E_RCU_PST_TFIFO_STATUS_ECC_ERR_MASK I40E_MASK(0x1, I40E_RCU_PST_TFIFO_STATUS_ECC_ERR_SHIFT)
+#define I40E_RCU_PST_TFIFO_STATUS_ECC_FIX_SHIFT 1
+#define I40E_RCU_PST_TFIFO_STATUS_ECC_FIX_MASK I40E_MASK(0x1, I40E_RCU_PST_TFIFO_STATUS_ECC_FIX_SHIFT)
+#define I40E_RCU_PST_TFIFO_STATUS_INIT_DONE_SHIFT 2
+#define I40E_RCU_PST_TFIFO_STATUS_INIT_DONE_MASK I40E_MASK(0x1, I40E_RCU_PST_TFIFO_STATUS_INIT_DONE_SHIFT)
+#define I40E_RCU_PST_TFIFO_STATUS_GLOBAL_INIT_DONE_SHIFT 3
+#define I40E_RCU_PST_TFIFO_STATUS_GLOBAL_INIT_DONE_MASK I40E_MASK(0x1, I40E_RCU_PST_TFIFO_STATUS_GLOBAL_INIT_DONE_SHIFT)
+
+#define I40E_RCU_SP_BIG_FLU_CFG 0x0026CDA4 /* Reset: POR */
+#define I40E_RCU_SP_BIG_FLU_CFG_ECC_EN_SHIFT 0
+#define I40E_RCU_SP_BIG_FLU_CFG_ECC_EN_MASK I40E_MASK(0x1, I40E_RCU_SP_BIG_FLU_CFG_ECC_EN_SHIFT)
+#define I40E_RCU_SP_BIG_FLU_CFG_ECC_INVERT_1_SHIFT 1
+#define I40E_RCU_SP_BIG_FLU_CFG_ECC_INVERT_1_MASK I40E_MASK(0x1, I40E_RCU_SP_BIG_FLU_CFG_ECC_INVERT_1_SHIFT)
+#define I40E_RCU_SP_BIG_FLU_CFG_ECC_INVERT_2_SHIFT 2
+#define I40E_RCU_SP_BIG_FLU_CFG_ECC_INVERT_2_MASK I40E_MASK(0x1, I40E_RCU_SP_BIG_FLU_CFG_ECC_INVERT_2_SHIFT)
+#define I40E_RCU_SP_BIG_FLU_CFG_LS_FORCE_SHIFT 3
+#define I40E_RCU_SP_BIG_FLU_CFG_LS_FORCE_MASK I40E_MASK(0x1, I40E_RCU_SP_BIG_FLU_CFG_LS_FORCE_SHIFT)
+#define I40E_RCU_SP_BIG_FLU_CFG_LS_BYPASS_SHIFT 4
+#define I40E_RCU_SP_BIG_FLU_CFG_LS_BYPASS_MASK I40E_MASK(0x1, I40E_RCU_SP_BIG_FLU_CFG_LS_BYPASS_SHIFT)
+#define I40E_RCU_SP_BIG_FLU_CFG_MASK_INT_SHIFT 5
+#define I40E_RCU_SP_BIG_FLU_CFG_MASK_INT_MASK I40E_MASK(0x1, I40E_RCU_SP_BIG_FLU_CFG_MASK_INT_SHIFT)
+#define I40E_RCU_SP_BIG_FLU_CFG_FIX_CNT_SHIFT 8
+#define I40E_RCU_SP_BIG_FLU_CFG_FIX_CNT_MASK I40E_MASK(0x1, I40E_RCU_SP_BIG_FLU_CFG_FIX_CNT_SHIFT)
+#define I40E_RCU_SP_BIG_FLU_CFG_ERR_CNT_SHIFT 9
+#define I40E_RCU_SP_BIG_FLU_CFG_ERR_CNT_MASK I40E_MASK(0x1, I40E_RCU_SP_BIG_FLU_CFG_ERR_CNT_SHIFT)
+#define I40E_RCU_SP_BIG_FLU_CFG_RME_SHIFT 12
+#define I40E_RCU_SP_BIG_FLU_CFG_RME_MASK I40E_MASK(0x1, I40E_RCU_SP_BIG_FLU_CFG_RME_SHIFT)
+#define I40E_RCU_SP_BIG_FLU_CFG_RM_SHIFT 16
+#define I40E_RCU_SP_BIG_FLU_CFG_RM_MASK I40E_MASK(0xF, I40E_RCU_SP_BIG_FLU_CFG_RM_SHIFT)
+
+#define I40E_RCU_SP_BIG_FLU_STATUS 0x0026CDAC /* Reset: POR */
+#define I40E_RCU_SP_BIG_FLU_STATUS_ECC_ERR_SHIFT 0
+#define I40E_RCU_SP_BIG_FLU_STATUS_ECC_ERR_MASK I40E_MASK(0x1, I40E_RCU_SP_BIG_FLU_STATUS_ECC_ERR_SHIFT)
+#define I40E_RCU_SP_BIG_FLU_STATUS_ECC_FIX_SHIFT 1
+#define I40E_RCU_SP_BIG_FLU_STATUS_ECC_FIX_MASK I40E_MASK(0x1, I40E_RCU_SP_BIG_FLU_STATUS_ECC_FIX_SHIFT)
+#define I40E_RCU_SP_BIG_FLU_STATUS_INIT_DONE_SHIFT 2
+#define I40E_RCU_SP_BIG_FLU_STATUS_INIT_DONE_MASK I40E_MASK(0x1, I40E_RCU_SP_BIG_FLU_STATUS_INIT_DONE_SHIFT)
+#define I40E_RCU_SP_BIG_FLU_STATUS_GLOBAL_INIT_DONE_SHIFT 3
+#define I40E_RCU_SP_BIG_FLU_STATUS_GLOBAL_INIT_DONE_MASK I40E_MASK(0x1, I40E_RCU_SP_BIG_FLU_STATUS_GLOBAL_INIT_DONE_SHIFT)
+
+#define I40E_RCU_SP_MTG_VSI_CNTXT_CFG 0x002698B4 /* Reset: POR */
+#define I40E_RCU_SP_MTG_VSI_CNTXT_CFG_ECC_EN_SHIFT 0
+#define I40E_RCU_SP_MTG_VSI_CNTXT_CFG_ECC_EN_MASK I40E_MASK(0x1, I40E_RCU_SP_MTG_VSI_CNTXT_CFG_ECC_EN_SHIFT)
+#define I40E_RCU_SP_MTG_VSI_CNTXT_CFG_ECC_INVERT_1_SHIFT 1
+#define I40E_RCU_SP_MTG_VSI_CNTXT_CFG_ECC_INVERT_1_MASK I40E_MASK(0x1, I40E_RCU_SP_MTG_VSI_CNTXT_CFG_ECC_INVERT_1_SHIFT)
+#define I40E_RCU_SP_MTG_VSI_CNTXT_CFG_ECC_INVERT_2_SHIFT 2
+#define I40E_RCU_SP_MTG_VSI_CNTXT_CFG_ECC_INVERT_2_MASK I40E_MASK(0x1, I40E_RCU_SP_MTG_VSI_CNTXT_CFG_ECC_INVERT_2_SHIFT)
+#define I40E_RCU_SP_MTG_VSI_CNTXT_CFG_LS_FORCE_SHIFT 3
+#define I40E_RCU_SP_MTG_VSI_CNTXT_CFG_LS_FORCE_MASK I40E_MASK(0x1, I40E_RCU_SP_MTG_VSI_CNTXT_CFG_LS_FORCE_SHIFT)
+#define I40E_RCU_SP_MTG_VSI_CNTXT_CFG_LS_BYPASS_SHIFT 4
+#define I40E_RCU_SP_MTG_VSI_CNTXT_CFG_LS_BYPASS_MASK I40E_MASK(0x1, I40E_RCU_SP_MTG_VSI_CNTXT_CFG_LS_BYPASS_SHIFT)
+#define I40E_RCU_SP_MTG_VSI_CNTXT_CFG_MASK_INT_SHIFT 5
+#define I40E_RCU_SP_MTG_VSI_CNTXT_CFG_MASK_INT_MASK I40E_MASK(0x1, I40E_RCU_SP_MTG_VSI_CNTXT_CFG_MASK_INT_SHIFT)
+#define I40E_RCU_SP_MTG_VSI_CNTXT_CFG_FIX_CNT_SHIFT 8
+#define I40E_RCU_SP_MTG_VSI_CNTXT_CFG_FIX_CNT_MASK I40E_MASK(0x1, I40E_RCU_SP_MTG_VSI_CNTXT_CFG_FIX_CNT_SHIFT)
+#define I40E_RCU_SP_MTG_VSI_CNTXT_CFG_ERR_CNT_SHIFT 9
+#define I40E_RCU_SP_MTG_VSI_CNTXT_CFG_ERR_CNT_MASK I40E_MASK(0x1, I40E_RCU_SP_MTG_VSI_CNTXT_CFG_ERR_CNT_SHIFT)
+#define I40E_RCU_SP_MTG_VSI_CNTXT_CFG_RME_SHIFT 12
+#define I40E_RCU_SP_MTG_VSI_CNTXT_CFG_RME_MASK I40E_MASK(0x1, I40E_RCU_SP_MTG_VSI_CNTXT_CFG_RME_SHIFT)
+#define I40E_RCU_SP_MTG_VSI_CNTXT_CFG_RM_SHIFT 16
+#define I40E_RCU_SP_MTG_VSI_CNTXT_CFG_RM_MASK I40E_MASK(0xF, I40E_RCU_SP_MTG_VSI_CNTXT_CFG_RM_SHIFT)
+
+#define I40E_RCU_SP_MTG_VSI_CNTXT_STATUS 0x002698BC /* Reset: POR */
+#define I40E_RCU_SP_MTG_VSI_CNTXT_STATUS_ECC_ERR_SHIFT 0
+#define I40E_RCU_SP_MTG_VSI_CNTXT_STATUS_ECC_ERR_MASK I40E_MASK(0x1, I40E_RCU_SP_MTG_VSI_CNTXT_STATUS_ECC_ERR_SHIFT)
+#define I40E_RCU_SP_MTG_VSI_CNTXT_STATUS_ECC_FIX_SHIFT 1
+#define I40E_RCU_SP_MTG_VSI_CNTXT_STATUS_ECC_FIX_MASK I40E_MASK(0x1, I40E_RCU_SP_MTG_VSI_CNTXT_STATUS_ECC_FIX_SHIFT)
+#define I40E_RCU_SP_MTG_VSI_CNTXT_STATUS_INIT_DONE_SHIFT 2
+#define I40E_RCU_SP_MTG_VSI_CNTXT_STATUS_INIT_DONE_MASK I40E_MASK(0x1, I40E_RCU_SP_MTG_VSI_CNTXT_STATUS_INIT_DONE_SHIFT)
+#define I40E_RCU_SP_MTG_VSI_CNTXT_STATUS_GLOBAL_INIT_DONE_SHIFT 3
+#define I40E_RCU_SP_MTG_VSI_CNTXT_STATUS_GLOBAL_INIT_DONE_MASK I40E_MASK(0x1, I40E_RCU_SP_MTG_VSI_CNTXT_STATUS_GLOBAL_INIT_DONE_SHIFT)
+
+#define I40E_RCU_SP_PST_CONFIG_CFG 0x00269AD4 /* Reset: POR */
+#define I40E_RCU_SP_PST_CONFIG_CFG_ECC_EN_SHIFT 0
+#define I40E_RCU_SP_PST_CONFIG_CFG_ECC_EN_MASK I40E_MASK(0x1, I40E_RCU_SP_PST_CONFIG_CFG_ECC_EN_SHIFT)
+#define I40E_RCU_SP_PST_CONFIG_CFG_ECC_INVERT_1_SHIFT 1
+#define I40E_RCU_SP_PST_CONFIG_CFG_ECC_INVERT_1_MASK I40E_MASK(0x1, I40E_RCU_SP_PST_CONFIG_CFG_ECC_INVERT_1_SHIFT)
+#define I40E_RCU_SP_PST_CONFIG_CFG_ECC_INVERT_2_SHIFT 2
+#define I40E_RCU_SP_PST_CONFIG_CFG_ECC_INVERT_2_MASK I40E_MASK(0x1, I40E_RCU_SP_PST_CONFIG_CFG_ECC_INVERT_2_SHIFT)
+#define I40E_RCU_SP_PST_CONFIG_CFG_LS_FORCE_SHIFT 3
+#define I40E_RCU_SP_PST_CONFIG_CFG_LS_FORCE_MASK I40E_MASK(0x1, I40E_RCU_SP_PST_CONFIG_CFG_LS_FORCE_SHIFT)
+#define I40E_RCU_SP_PST_CONFIG_CFG_LS_BYPASS_SHIFT 4
+#define I40E_RCU_SP_PST_CONFIG_CFG_LS_BYPASS_MASK I40E_MASK(0x1, I40E_RCU_SP_PST_CONFIG_CFG_LS_BYPASS_SHIFT)
+#define I40E_RCU_SP_PST_CONFIG_CFG_MASK_INT_SHIFT 5
+#define I40E_RCU_SP_PST_CONFIG_CFG_MASK_INT_MASK I40E_MASK(0x1, I40E_RCU_SP_PST_CONFIG_CFG_MASK_INT_SHIFT)
+#define I40E_RCU_SP_PST_CONFIG_CFG_FIX_CNT_SHIFT 8
+#define I40E_RCU_SP_PST_CONFIG_CFG_FIX_CNT_MASK I40E_MASK(0x1, I40E_RCU_SP_PST_CONFIG_CFG_FIX_CNT_SHIFT)
+#define I40E_RCU_SP_PST_CONFIG_CFG_ERR_CNT_SHIFT 9
+#define I40E_RCU_SP_PST_CONFIG_CFG_ERR_CNT_MASK I40E_MASK(0x1, I40E_RCU_SP_PST_CONFIG_CFG_ERR_CNT_SHIFT)
+#define I40E_RCU_SP_PST_CONFIG_CFG_RME_SHIFT 12
+#define I40E_RCU_SP_PST_CONFIG_CFG_RME_MASK I40E_MASK(0x1, I40E_RCU_SP_PST_CONFIG_CFG_RME_SHIFT)
+#define I40E_RCU_SP_PST_CONFIG_CFG_RM_SHIFT 16
+#define I40E_RCU_SP_PST_CONFIG_CFG_RM_MASK I40E_MASK(0xF, I40E_RCU_SP_PST_CONFIG_CFG_RM_SHIFT)
+
+#define I40E_RCU_SP_PST_CONFIG_STATUS 0x00269B04 /* Reset: POR */
+#define I40E_RCU_SP_PST_CONFIG_STATUS_ECC_ERR_SHIFT 0
+#define I40E_RCU_SP_PST_CONFIG_STATUS_ECC_ERR_MASK I40E_MASK(0x1, I40E_RCU_SP_PST_CONFIG_STATUS_ECC_ERR_SHIFT)
+#define I40E_RCU_SP_PST_CONFIG_STATUS_ECC_FIX_SHIFT 1
+#define I40E_RCU_SP_PST_CONFIG_STATUS_ECC_FIX_MASK I40E_MASK(0x1, I40E_RCU_SP_PST_CONFIG_STATUS_ECC_FIX_SHIFT)
+#define I40E_RCU_SP_PST_CONFIG_STATUS_INIT_DONE_SHIFT 2
+#define I40E_RCU_SP_PST_CONFIG_STATUS_INIT_DONE_MASK I40E_MASK(0x1, I40E_RCU_SP_PST_CONFIG_STATUS_INIT_DONE_SHIFT)
+#define I40E_RCU_SP_PST_CONFIG_STATUS_GLOBAL_INIT_DONE_SHIFT 3
+#define I40E_RCU_SP_PST_CONFIG_STATUS_GLOBAL_INIT_DONE_MASK I40E_MASK(0x1, I40E_RCU_SP_PST_CONFIG_STATUS_GLOBAL_INIT_DONE_SHIFT)
+
+#define I40E_RCU_SP_PST_RSC_HASH_CFG 0x00269AEC /* Reset: POR */
+#define I40E_RCU_SP_PST_RSC_HASH_CFG_ECC_EN_SHIFT 0
+#define I40E_RCU_SP_PST_RSC_HASH_CFG_ECC_EN_MASK I40E_MASK(0x1, I40E_RCU_SP_PST_RSC_HASH_CFG_ECC_EN_SHIFT)
+#define I40E_RCU_SP_PST_RSC_HASH_CFG_ECC_INVERT_1_SHIFT 1
+#define I40E_RCU_SP_PST_RSC_HASH_CFG_ECC_INVERT_1_MASK I40E_MASK(0x1, I40E_RCU_SP_PST_RSC_HASH_CFG_ECC_INVERT_1_SHIFT)
+#define I40E_RCU_SP_PST_RSC_HASH_CFG_ECC_INVERT_2_SHIFT 2
+#define I40E_RCU_SP_PST_RSC_HASH_CFG_ECC_INVERT_2_MASK I40E_MASK(0x1, I40E_RCU_SP_PST_RSC_HASH_CFG_ECC_INVERT_2_SHIFT)
+#define I40E_RCU_SP_PST_RSC_HASH_CFG_LS_FORCE_SHIFT 3
+#define I40E_RCU_SP_PST_RSC_HASH_CFG_LS_FORCE_MASK I40E_MASK(0x1, I40E_RCU_SP_PST_RSC_HASH_CFG_LS_FORCE_SHIFT)
+#define I40E_RCU_SP_PST_RSC_HASH_CFG_LS_BYPASS_SHIFT 4
+#define I40E_RCU_SP_PST_RSC_HASH_CFG_LS_BYPASS_MASK I40E_MASK(0x1, I40E_RCU_SP_PST_RSC_HASH_CFG_LS_BYPASS_SHIFT)
+#define I40E_RCU_SP_PST_RSC_HASH_CFG_MASK_INT_SHIFT 5
+#define I40E_RCU_SP_PST_RSC_HASH_CFG_MASK_INT_MASK I40E_MASK(0x1, I40E_RCU_SP_PST_RSC_HASH_CFG_MASK_INT_SHIFT)
+#define I40E_RCU_SP_PST_RSC_HASH_CFG_FIX_CNT_SHIFT 8
+#define I40E_RCU_SP_PST_RSC_HASH_CFG_FIX_CNT_MASK I40E_MASK(0x1, I40E_RCU_SP_PST_RSC_HASH_CFG_FIX_CNT_SHIFT)
+#define I40E_RCU_SP_PST_RSC_HASH_CFG_ERR_CNT_SHIFT 9
+#define I40E_RCU_SP_PST_RSC_HASH_CFG_ERR_CNT_MASK I40E_MASK(0x1, I40E_RCU_SP_PST_RSC_HASH_CFG_ERR_CNT_SHIFT)
+#define I40E_RCU_SP_PST_RSC_HASH_CFG_RME_SHIFT 12
+#define I40E_RCU_SP_PST_RSC_HASH_CFG_RME_MASK I40E_MASK(0x1, I40E_RCU_SP_PST_RSC_HASH_CFG_RME_SHIFT)
+#define I40E_RCU_SP_PST_RSC_HASH_CFG_RM_SHIFT 16
+#define I40E_RCU_SP_PST_RSC_HASH_CFG_RM_MASK I40E_MASK(0xF, I40E_RCU_SP_PST_RSC_HASH_CFG_RM_SHIFT)
+
+#define I40E_RCU_SP_PST_RSC_HASH_STATUS 0x00269B14 /* Reset: POR */
+#define I40E_RCU_SP_PST_RSC_HASH_STATUS_ECC_ERR_SHIFT 0
+#define I40E_RCU_SP_PST_RSC_HASH_STATUS_ECC_ERR_MASK I40E_MASK(0x1, I40E_RCU_SP_PST_RSC_HASH_STATUS_ECC_ERR_SHIFT)
+#define I40E_RCU_SP_PST_RSC_HASH_STATUS_ECC_FIX_SHIFT 1
+#define I40E_RCU_SP_PST_RSC_HASH_STATUS_ECC_FIX_MASK I40E_MASK(0x1, I40E_RCU_SP_PST_RSC_HASH_STATUS_ECC_FIX_SHIFT)
+#define I40E_RCU_SP_PST_RSC_HASH_STATUS_INIT_DONE_SHIFT 2
+#define I40E_RCU_SP_PST_RSC_HASH_STATUS_INIT_DONE_MASK I40E_MASK(0x1, I40E_RCU_SP_PST_RSC_HASH_STATUS_INIT_DONE_SHIFT)
+#define I40E_RCU_SP_PST_RSC_HASH_STATUS_GLOBAL_INIT_DONE_SHIFT 3
+#define I40E_RCU_SP_PST_RSC_HASH_STATUS_GLOBAL_INIT_DONE_MASK I40E_MASK(0x1, I40E_RCU_SP_PST_RSC_HASH_STATUS_GLOBAL_INIT_DONE_SHIFT)
+
+#define I40E_RCU_SP_SWR_VSI_CNTXT_CFG 0x002698C4 /* Reset: POR */
+#define I40E_RCU_SP_SWR_VSI_CNTXT_CFG_ECC_EN_SHIFT 0
+#define I40E_RCU_SP_SWR_VSI_CNTXT_CFG_ECC_EN_MASK I40E_MASK(0x1, I40E_RCU_SP_SWR_VSI_CNTXT_CFG_ECC_EN_SHIFT)
+#define I40E_RCU_SP_SWR_VSI_CNTXT_CFG_ECC_INVERT_1_SHIFT 1
+#define I40E_RCU_SP_SWR_VSI_CNTXT_CFG_ECC_INVERT_1_MASK I40E_MASK(0x1, I40E_RCU_SP_SWR_VSI_CNTXT_CFG_ECC_INVERT_1_SHIFT)
+#define I40E_RCU_SP_SWR_VSI_CNTXT_CFG_ECC_INVERT_2_SHIFT 2
+#define I40E_RCU_SP_SWR_VSI_CNTXT_CFG_ECC_INVERT_2_MASK I40E_MASK(0x1, I40E_RCU_SP_SWR_VSI_CNTXT_CFG_ECC_INVERT_2_SHIFT)
+#define I40E_RCU_SP_SWR_VSI_CNTXT_CFG_LS_FORCE_SHIFT 3
+#define I40E_RCU_SP_SWR_VSI_CNTXT_CFG_LS_FORCE_MASK I40E_MASK(0x1, I40E_RCU_SP_SWR_VSI_CNTXT_CFG_LS_FORCE_SHIFT)
+#define I40E_RCU_SP_SWR_VSI_CNTXT_CFG_LS_BYPASS_SHIFT 4
+#define I40E_RCU_SP_SWR_VSI_CNTXT_CFG_LS_BYPASS_MASK I40E_MASK(0x1, I40E_RCU_SP_SWR_VSI_CNTXT_CFG_LS_BYPASS_SHIFT)
+#define I40E_RCU_SP_SWR_VSI_CNTXT_CFG_MASK_INT_SHIFT 5
+#define I40E_RCU_SP_SWR_VSI_CNTXT_CFG_MASK_INT_MASK I40E_MASK(0x1, I40E_RCU_SP_SWR_VSI_CNTXT_CFG_MASK_INT_SHIFT)
+#define I40E_RCU_SP_SWR_VSI_CNTXT_CFG_FIX_CNT_SHIFT 8
+#define I40E_RCU_SP_SWR_VSI_CNTXT_CFG_FIX_CNT_MASK I40E_MASK(0x1, I40E_RCU_SP_SWR_VSI_CNTXT_CFG_FIX_CNT_SHIFT)
+#define I40E_RCU_SP_SWR_VSI_CNTXT_CFG_ERR_CNT_SHIFT 9
+#define I40E_RCU_SP_SWR_VSI_CNTXT_CFG_ERR_CNT_MASK I40E_MASK(0x1, I40E_RCU_SP_SWR_VSI_CNTXT_CFG_ERR_CNT_SHIFT)
+#define I40E_RCU_SP_SWR_VSI_CNTXT_CFG_RME_SHIFT 12
+#define I40E_RCU_SP_SWR_VSI_CNTXT_CFG_RME_MASK I40E_MASK(0x1, I40E_RCU_SP_SWR_VSI_CNTXT_CFG_RME_SHIFT)
+#define I40E_RCU_SP_SWR_VSI_CNTXT_CFG_RM_SHIFT 16
+#define I40E_RCU_SP_SWR_VSI_CNTXT_CFG_RM_MASK I40E_MASK(0xF, I40E_RCU_SP_SWR_VSI_CNTXT_CFG_RM_SHIFT)
+
+#define I40E_RCU_SP_SWR_VSI_CNTXT_STATUS 0x002698CC /* Reset: POR */
+#define I40E_RCU_SP_SWR_VSI_CNTXT_STATUS_ECC_ERR_SHIFT 0
+#define I40E_RCU_SP_SWR_VSI_CNTXT_STATUS_ECC_ERR_MASK I40E_MASK(0x1, I40E_RCU_SP_SWR_VSI_CNTXT_STATUS_ECC_ERR_SHIFT)
+#define I40E_RCU_SP_SWR_VSI_CNTXT_STATUS_ECC_FIX_SHIFT 1
+#define I40E_RCU_SP_SWR_VSI_CNTXT_STATUS_ECC_FIX_MASK I40E_MASK(0x1, I40E_RCU_SP_SWR_VSI_CNTXT_STATUS_ECC_FIX_SHIFT)
+#define I40E_RCU_SP_SWR_VSI_CNTXT_STATUS_INIT_DONE_SHIFT 2
+#define I40E_RCU_SP_SWR_VSI_CNTXT_STATUS_INIT_DONE_MASK I40E_MASK(0x1, I40E_RCU_SP_SWR_VSI_CNTXT_STATUS_INIT_DONE_SHIFT)
+#define I40E_RCU_SP_SWR_VSI_CNTXT_STATUS_GLOBAL_INIT_DONE_SHIFT 3
+#define I40E_RCU_SP_SWR_VSI_CNTXT_STATUS_GLOBAL_INIT_DONE_MASK I40E_MASK(0x1, I40E_RCU_SP_SWR_VSI_CNTXT_STATUS_GLOBAL_INIT_DONE_SHIFT)
+
+#define I40E_RCU_SP16KB_CFG 0x002698D4 /* Reset: POR */
+#define I40E_RCU_SP16KB_CFG_ECC_EN_SHIFT 0
+#define I40E_RCU_SP16KB_CFG_ECC_EN_MASK I40E_MASK(0x1, I40E_RCU_SP16KB_CFG_ECC_EN_SHIFT)
+#define I40E_RCU_SP16KB_CFG_ECC_INVERT_1_SHIFT 1
+#define I40E_RCU_SP16KB_CFG_ECC_INVERT_1_MASK I40E_MASK(0x1, I40E_RCU_SP16KB_CFG_ECC_INVERT_1_SHIFT)
+#define I40E_RCU_SP16KB_CFG_ECC_INVERT_2_SHIFT 2
+#define I40E_RCU_SP16KB_CFG_ECC_INVERT_2_MASK I40E_MASK(0x1, I40E_RCU_SP16KB_CFG_ECC_INVERT_2_SHIFT)
+#define I40E_RCU_SP16KB_CFG_LS_FORCE_SHIFT 3
+#define I40E_RCU_SP16KB_CFG_LS_FORCE_MASK I40E_MASK(0x1, I40E_RCU_SP16KB_CFG_LS_FORCE_SHIFT)
+#define I40E_RCU_SP16KB_CFG_LS_BYPASS_SHIFT 4
+#define I40E_RCU_SP16KB_CFG_LS_BYPASS_MASK I40E_MASK(0x1, I40E_RCU_SP16KB_CFG_LS_BYPASS_SHIFT)
+#define I40E_RCU_SP16KB_CFG_MASK_INT_SHIFT 5
+#define I40E_RCU_SP16KB_CFG_MASK_INT_MASK I40E_MASK(0x1, I40E_RCU_SP16KB_CFG_MASK_INT_SHIFT)
+#define I40E_RCU_SP16KB_CFG_FIX_CNT_SHIFT 8
+#define I40E_RCU_SP16KB_CFG_FIX_CNT_MASK I40E_MASK(0x1, I40E_RCU_SP16KB_CFG_FIX_CNT_SHIFT)
+#define I40E_RCU_SP16KB_CFG_ERR_CNT_SHIFT 9
+#define I40E_RCU_SP16KB_CFG_ERR_CNT_MASK I40E_MASK(0x1, I40E_RCU_SP16KB_CFG_ERR_CNT_SHIFT)
+#define I40E_RCU_SP16KB_CFG_RME_SHIFT 12
+#define I40E_RCU_SP16KB_CFG_RME_MASK I40E_MASK(0x1, I40E_RCU_SP16KB_CFG_RME_SHIFT)
+#define I40E_RCU_SP16KB_CFG_RM_SHIFT 16
+#define I40E_RCU_SP16KB_CFG_RM_MASK I40E_MASK(0xF, I40E_RCU_SP16KB_CFG_RM_SHIFT)
+
+#define I40E_RCU_SP16KB_REP_CFG 0x00269AF4 /* Reset: POR */
+#define I40E_RCU_SP16KB_REP_CFG_ECC_EN_SHIFT 0
+#define I40E_RCU_SP16KB_REP_CFG_ECC_EN_MASK I40E_MASK(0x1, I40E_RCU_SP16KB_REP_CFG_ECC_EN_SHIFT)
+#define I40E_RCU_SP16KB_REP_CFG_ECC_INVERT_1_SHIFT 1
+#define I40E_RCU_SP16KB_REP_CFG_ECC_INVERT_1_MASK I40E_MASK(0x1, I40E_RCU_SP16KB_REP_CFG_ECC_INVERT_1_SHIFT)
+#define I40E_RCU_SP16KB_REP_CFG_ECC_INVERT_2_SHIFT 2
+#define I40E_RCU_SP16KB_REP_CFG_ECC_INVERT_2_MASK I40E_MASK(0x1, I40E_RCU_SP16KB_REP_CFG_ECC_INVERT_2_SHIFT)
+#define I40E_RCU_SP16KB_REP_CFG_LS_FORCE_SHIFT 3
+#define I40E_RCU_SP16KB_REP_CFG_LS_FORCE_MASK I40E_MASK(0x1, I40E_RCU_SP16KB_REP_CFG_LS_FORCE_SHIFT)
+#define I40E_RCU_SP16KB_REP_CFG_LS_BYPASS_SHIFT 4
+#define I40E_RCU_SP16KB_REP_CFG_LS_BYPASS_MASK I40E_MASK(0x1, I40E_RCU_SP16KB_REP_CFG_LS_BYPASS_SHIFT)
+#define I40E_RCU_SP16KB_REP_CFG_MASK_INT_SHIFT 5
+#define I40E_RCU_SP16KB_REP_CFG_MASK_INT_MASK I40E_MASK(0x1, I40E_RCU_SP16KB_REP_CFG_MASK_INT_SHIFT)
+#define I40E_RCU_SP16KB_REP_CFG_FIX_CNT_SHIFT 8
+#define I40E_RCU_SP16KB_REP_CFG_FIX_CNT_MASK I40E_MASK(0x1, I40E_RCU_SP16KB_REP_CFG_FIX_CNT_SHIFT)
+#define I40E_RCU_SP16KB_REP_CFG_ERR_CNT_SHIFT 9
+#define I40E_RCU_SP16KB_REP_CFG_ERR_CNT_MASK I40E_MASK(0x1, I40E_RCU_SP16KB_REP_CFG_ERR_CNT_SHIFT)
+#define I40E_RCU_SP16KB_REP_CFG_RME_SHIFT 12
+#define I40E_RCU_SP16KB_REP_CFG_RME_MASK I40E_MASK(0x1, I40E_RCU_SP16KB_REP_CFG_RME_SHIFT)
+#define I40E_RCU_SP16KB_REP_CFG_RM_SHIFT 16
+#define I40E_RCU_SP16KB_REP_CFG_RM_MASK I40E_MASK(0xF, I40E_RCU_SP16KB_REP_CFG_RM_SHIFT)
+
+#define I40E_RCU_SP16KB_REP_STATUS 0x00269B24 /* Reset: POR */
+#define I40E_RCU_SP16KB_REP_STATUS_ECC_ERR_SHIFT 0
+#define I40E_RCU_SP16KB_REP_STATUS_ECC_ERR_MASK I40E_MASK(0x1, I40E_RCU_SP16KB_REP_STATUS_ECC_ERR_SHIFT)
+#define I40E_RCU_SP16KB_REP_STATUS_ECC_FIX_SHIFT 1
+#define I40E_RCU_SP16KB_REP_STATUS_ECC_FIX_MASK I40E_MASK(0x1, I40E_RCU_SP16KB_REP_STATUS_ECC_FIX_SHIFT)
+#define I40E_RCU_SP16KB_REP_STATUS_INIT_DONE_SHIFT 2
+#define I40E_RCU_SP16KB_REP_STATUS_INIT_DONE_MASK I40E_MASK(0x1, I40E_RCU_SP16KB_REP_STATUS_INIT_DONE_SHIFT)
+#define I40E_RCU_SP16KB_REP_STATUS_GLOBAL_INIT_DONE_SHIFT 3
+#define I40E_RCU_SP16KB_REP_STATUS_GLOBAL_INIT_DONE_MASK I40E_MASK(0x1, I40E_RCU_SP16KB_REP_STATUS_GLOBAL_INIT_DONE_SHIFT)
+
+#define I40E_RCU_SP16KB_STATUS 0x002698DC /* Reset: POR */
+#define I40E_RCU_SP16KB_STATUS_ECC_ERR_SHIFT 0
+#define I40E_RCU_SP16KB_STATUS_ECC_ERR_MASK I40E_MASK(0x1, I40E_RCU_SP16KB_STATUS_ECC_ERR_SHIFT)
+#define I40E_RCU_SP16KB_STATUS_ECC_FIX_SHIFT 1
+#define I40E_RCU_SP16KB_STATUS_ECC_FIX_MASK I40E_MASK(0x1, I40E_RCU_SP16KB_STATUS_ECC_FIX_SHIFT)
+#define I40E_RCU_SP16KB_STATUS_INIT_DONE_SHIFT 2
+#define I40E_RCU_SP16KB_STATUS_INIT_DONE_MASK I40E_MASK(0x1, I40E_RCU_SP16KB_STATUS_INIT_DONE_SHIFT)
+#define I40E_RCU_SP16KB_STATUS_GLOBAL_INIT_DONE_SHIFT 3
+#define I40E_RCU_SP16KB_STATUS_GLOBAL_INIT_DONE_MASK I40E_MASK(0x1, I40E_RCU_SP16KB_STATUS_GLOBAL_INIT_DONE_SHIFT)
+
+#define I40E_RCU_SP1KB_CFG 0x002698E4 /* Reset: POR */
+#define I40E_RCU_SP1KB_CFG_ECC_EN_SHIFT 0
+#define I40E_RCU_SP1KB_CFG_ECC_EN_MASK I40E_MASK(0x1, I40E_RCU_SP1KB_CFG_ECC_EN_SHIFT)
+#define I40E_RCU_SP1KB_CFG_ECC_INVERT_1_SHIFT 1
+#define I40E_RCU_SP1KB_CFG_ECC_INVERT_1_MASK I40E_MASK(0x1, I40E_RCU_SP1KB_CFG_ECC_INVERT_1_SHIFT)
+#define I40E_RCU_SP1KB_CFG_ECC_INVERT_2_SHIFT 2
+#define I40E_RCU_SP1KB_CFG_ECC_INVERT_2_MASK I40E_MASK(0x1, I40E_RCU_SP1KB_CFG_ECC_INVERT_2_SHIFT)
+#define I40E_RCU_SP1KB_CFG_LS_FORCE_SHIFT 3
+#define I40E_RCU_SP1KB_CFG_LS_FORCE_MASK I40E_MASK(0x1, I40E_RCU_SP1KB_CFG_LS_FORCE_SHIFT)
+#define I40E_RCU_SP1KB_CFG_LS_BYPASS_SHIFT 4
+#define I40E_RCU_SP1KB_CFG_LS_BYPASS_MASK I40E_MASK(0x1, I40E_RCU_SP1KB_CFG_LS_BYPASS_SHIFT)
+#define I40E_RCU_SP1KB_CFG_MASK_INT_SHIFT 5
+#define I40E_RCU_SP1KB_CFG_MASK_INT_MASK I40E_MASK(0x1, I40E_RCU_SP1KB_CFG_MASK_INT_SHIFT)
+#define I40E_RCU_SP1KB_CFG_FIX_CNT_SHIFT 8
+#define I40E_RCU_SP1KB_CFG_FIX_CNT_MASK I40E_MASK(0x1, I40E_RCU_SP1KB_CFG_FIX_CNT_SHIFT)
+#define I40E_RCU_SP1KB_CFG_ERR_CNT_SHIFT 9
+#define I40E_RCU_SP1KB_CFG_ERR_CNT_MASK I40E_MASK(0x1, I40E_RCU_SP1KB_CFG_ERR_CNT_SHIFT)
+#define I40E_RCU_SP1KB_CFG_RME_SHIFT 12
+#define I40E_RCU_SP1KB_CFG_RME_MASK I40E_MASK(0x1, I40E_RCU_SP1KB_CFG_RME_SHIFT)
+#define I40E_RCU_SP1KB_CFG_RM_SHIFT 16
+#define I40E_RCU_SP1KB_CFG_RM_MASK I40E_MASK(0xF, I40E_RCU_SP1KB_CFG_RM_SHIFT)
+
+#define I40E_RCU_SP1KB_STATUS 0x002698EC /* Reset: POR */
+#define I40E_RCU_SP1KB_STATUS_ECC_ERR_SHIFT 0
+#define I40E_RCU_SP1KB_STATUS_ECC_ERR_MASK I40E_MASK(0x1, I40E_RCU_SP1KB_STATUS_ECC_ERR_SHIFT)
+#define I40E_RCU_SP1KB_STATUS_ECC_FIX_SHIFT 1
+#define I40E_RCU_SP1KB_STATUS_ECC_FIX_MASK I40E_MASK(0x1, I40E_RCU_SP1KB_STATUS_ECC_FIX_SHIFT)
+#define I40E_RCU_SP1KB_STATUS_INIT_DONE_SHIFT 2
+#define I40E_RCU_SP1KB_STATUS_INIT_DONE_MASK I40E_MASK(0x1, I40E_RCU_SP1KB_STATUS_INIT_DONE_SHIFT)
+#define I40E_RCU_SP1KB_STATUS_GLOBAL_INIT_DONE_SHIFT 3
+#define I40E_RCU_SP1KB_STATUS_GLOBAL_INIT_DONE_MASK I40E_MASK(0x1, I40E_RCU_SP1KB_STATUS_GLOBAL_INIT_DONE_SHIFT)
+
+#define I40E_RCU_SP256B_CFG 0x002698F4 /* Reset: POR */
+#define I40E_RCU_SP256B_CFG_ECC_EN_SHIFT 0
+#define I40E_RCU_SP256B_CFG_ECC_EN_MASK I40E_MASK(0x1, I40E_RCU_SP256B_CFG_ECC_EN_SHIFT)
+#define I40E_RCU_SP256B_CFG_ECC_INVERT_1_SHIFT 1
+#define I40E_RCU_SP256B_CFG_ECC_INVERT_1_MASK I40E_MASK(0x1, I40E_RCU_SP256B_CFG_ECC_INVERT_1_SHIFT)
+#define I40E_RCU_SP256B_CFG_ECC_INVERT_2_SHIFT 2
+#define I40E_RCU_SP256B_CFG_ECC_INVERT_2_MASK I40E_MASK(0x1, I40E_RCU_SP256B_CFG_ECC_INVERT_2_SHIFT)
+#define I40E_RCU_SP256B_CFG_LS_FORCE_SHIFT 3
+#define I40E_RCU_SP256B_CFG_LS_FORCE_MASK I40E_MASK(0x1, I40E_RCU_SP256B_CFG_LS_FORCE_SHIFT)
+#define I40E_RCU_SP256B_CFG_LS_BYPASS_SHIFT 4
+#define I40E_RCU_SP256B_CFG_LS_BYPASS_MASK I40E_MASK(0x1, I40E_RCU_SP256B_CFG_LS_BYPASS_SHIFT)
+#define I40E_RCU_SP256B_CFG_MASK_INT_SHIFT 5
+#define I40E_RCU_SP256B_CFG_MASK_INT_MASK I40E_MASK(0x1, I40E_RCU_SP256B_CFG_MASK_INT_SHIFT)
+#define I40E_RCU_SP256B_CFG_FIX_CNT_SHIFT 8
+#define I40E_RCU_SP256B_CFG_FIX_CNT_MASK I40E_MASK(0x1, I40E_RCU_SP256B_CFG_FIX_CNT_SHIFT)
+#define I40E_RCU_SP256B_CFG_ERR_CNT_SHIFT 9
+#define I40E_RCU_SP256B_CFG_ERR_CNT_MASK I40E_MASK(0x1, I40E_RCU_SP256B_CFG_ERR_CNT_SHIFT)
+#define I40E_RCU_SP256B_CFG_RME_SHIFT 12
+#define I40E_RCU_SP256B_CFG_RME_MASK I40E_MASK(0x1, I40E_RCU_SP256B_CFG_RME_SHIFT)
+#define I40E_RCU_SP256B_CFG_RM_SHIFT 16
+#define I40E_RCU_SP256B_CFG_RM_MASK I40E_MASK(0xF, I40E_RCU_SP256B_CFG_RM_SHIFT)
+
+#define I40E_RCU_SP256B_STATUS 0x002698FC /* Reset: POR */
+#define I40E_RCU_SP256B_STATUS_ECC_ERR_SHIFT 0
+#define I40E_RCU_SP256B_STATUS_ECC_ERR_MASK I40E_MASK(0x1, I40E_RCU_SP256B_STATUS_ECC_ERR_SHIFT)
+#define I40E_RCU_SP256B_STATUS_ECC_FIX_SHIFT 1
+#define I40E_RCU_SP256B_STATUS_ECC_FIX_MASK I40E_MASK(0x1, I40E_RCU_SP256B_STATUS_ECC_FIX_SHIFT)
+#define I40E_RCU_SP256B_STATUS_INIT_DONE_SHIFT 2
+#define I40E_RCU_SP256B_STATUS_INIT_DONE_MASK I40E_MASK(0x1, I40E_RCU_SP256B_STATUS_INIT_DONE_SHIFT)
+#define I40E_RCU_SP256B_STATUS_GLOBAL_INIT_DONE_SHIFT 3
+#define I40E_RCU_SP256B_STATUS_GLOBAL_INIT_DONE_MASK I40E_MASK(0x1, I40E_RCU_SP256B_STATUS_GLOBAL_INIT_DONE_SHIFT)
+
+#define I40E_RCU_SP2KB_CFG 0x00269904 /* Reset: POR */
+#define I40E_RCU_SP2KB_CFG_ECC_EN_SHIFT 0
+#define I40E_RCU_SP2KB_CFG_ECC_EN_MASK I40E_MASK(0x1, I40E_RCU_SP2KB_CFG_ECC_EN_SHIFT)
+#define I40E_RCU_SP2KB_CFG_ECC_INVERT_1_SHIFT 1
+#define I40E_RCU_SP2KB_CFG_ECC_INVERT_1_MASK I40E_MASK(0x1, I40E_RCU_SP2KB_CFG_ECC_INVERT_1_SHIFT)
+#define I40E_RCU_SP2KB_CFG_ECC_INVERT_2_SHIFT 2
+#define I40E_RCU_SP2KB_CFG_ECC_INVERT_2_MASK I40E_MASK(0x1, I40E_RCU_SP2KB_CFG_ECC_INVERT_2_SHIFT)
+#define I40E_RCU_SP2KB_CFG_LS_FORCE_SHIFT 3
+#define I40E_RCU_SP2KB_CFG_LS_FORCE_MASK I40E_MASK(0x1, I40E_RCU_SP2KB_CFG_LS_FORCE_SHIFT)
+#define I40E_RCU_SP2KB_CFG_LS_BYPASS_SHIFT 4
+#define I40E_RCU_SP2KB_CFG_LS_BYPASS_MASK I40E_MASK(0x1, I40E_RCU_SP2KB_CFG_LS_BYPASS_SHIFT)
+#define I40E_RCU_SP2KB_CFG_MASK_INT_SHIFT 5
+#define I40E_RCU_SP2KB_CFG_MASK_INT_MASK I40E_MASK(0x1, I40E_RCU_SP2KB_CFG_MASK_INT_SHIFT)
+#define I40E_RCU_SP2KB_CFG_FIX_CNT_SHIFT 8
+#define I40E_RCU_SP2KB_CFG_FIX_CNT_MASK I40E_MASK(0x1, I40E_RCU_SP2KB_CFG_FIX_CNT_SHIFT)
+#define I40E_RCU_SP2KB_CFG_ERR_CNT_SHIFT 9
+#define I40E_RCU_SP2KB_CFG_ERR_CNT_MASK I40E_MASK(0x1, I40E_RCU_SP2KB_CFG_ERR_CNT_SHIFT)
+#define I40E_RCU_SP2KB_CFG_RME_SHIFT 12
+#define I40E_RCU_SP2KB_CFG_RME_MASK I40E_MASK(0x1, I40E_RCU_SP2KB_CFG_RME_SHIFT)
+#define I40E_RCU_SP2KB_CFG_RM_SHIFT 16
+#define I40E_RCU_SP2KB_CFG_RM_MASK I40E_MASK(0xF, I40E_RCU_SP2KB_CFG_RM_SHIFT)
+
+#define I40E_RCU_SP2KB_STATUS 0x0026990C /* Reset: POR */
+#define I40E_RCU_SP2KB_STATUS_ECC_ERR_SHIFT 0
+#define I40E_RCU_SP2KB_STATUS_ECC_ERR_MASK I40E_MASK(0x1, I40E_RCU_SP2KB_STATUS_ECC_ERR_SHIFT)
+#define I40E_RCU_SP2KB_STATUS_ECC_FIX_SHIFT 1
+#define I40E_RCU_SP2KB_STATUS_ECC_FIX_MASK I40E_MASK(0x1, I40E_RCU_SP2KB_STATUS_ECC_FIX_SHIFT)
+#define I40E_RCU_SP2KB_STATUS_INIT_DONE_SHIFT 2
+#define I40E_RCU_SP2KB_STATUS_INIT_DONE_MASK I40E_MASK(0x1, I40E_RCU_SP2KB_STATUS_INIT_DONE_SHIFT)
+#define I40E_RCU_SP2KB_STATUS_GLOBAL_INIT_DONE_SHIFT 3
+#define I40E_RCU_SP2KB_STATUS_GLOBAL_INIT_DONE_MASK I40E_MASK(0x1, I40E_RCU_SP2KB_STATUS_GLOBAL_INIT_DONE_SHIFT)
+
+#define I40E_RCU_SP4KB_CFG 0x00269914 /* Reset: POR */
+#define I40E_RCU_SP4KB_CFG_ECC_EN_SHIFT 0
+#define I40E_RCU_SP4KB_CFG_ECC_EN_MASK I40E_MASK(0x1, I40E_RCU_SP4KB_CFG_ECC_EN_SHIFT)
+#define I40E_RCU_SP4KB_CFG_ECC_INVERT_1_SHIFT 1
+#define I40E_RCU_SP4KB_CFG_ECC_INVERT_1_MASK I40E_MASK(0x1, I40E_RCU_SP4KB_CFG_ECC_INVERT_1_SHIFT)
+#define I40E_RCU_SP4KB_CFG_ECC_INVERT_2_SHIFT 2
+#define I40E_RCU_SP4KB_CFG_ECC_INVERT_2_MASK I40E_MASK(0x1, I40E_RCU_SP4KB_CFG_ECC_INVERT_2_SHIFT)
+#define I40E_RCU_SP4KB_CFG_LS_FORCE_SHIFT 3
+#define I40E_RCU_SP4KB_CFG_LS_FORCE_MASK I40E_MASK(0x1, I40E_RCU_SP4KB_CFG_LS_FORCE_SHIFT)
+#define I40E_RCU_SP4KB_CFG_LS_BYPASS_SHIFT 4
+#define I40E_RCU_SP4KB_CFG_LS_BYPASS_MASK I40E_MASK(0x1, I40E_RCU_SP4KB_CFG_LS_BYPASS_SHIFT)
+#define I40E_RCU_SP4KB_CFG_MASK_INT_SHIFT 5
+#define I40E_RCU_SP4KB_CFG_MASK_INT_MASK I40E_MASK(0x1, I40E_RCU_SP4KB_CFG_MASK_INT_SHIFT)
+#define I40E_RCU_SP4KB_CFG_FIX_CNT_SHIFT 8
+#define I40E_RCU_SP4KB_CFG_FIX_CNT_MASK I40E_MASK(0x1, I40E_RCU_SP4KB_CFG_FIX_CNT_SHIFT)
+#define I40E_RCU_SP4KB_CFG_ERR_CNT_SHIFT 9
+#define I40E_RCU_SP4KB_CFG_ERR_CNT_MASK I40E_MASK(0x1, I40E_RCU_SP4KB_CFG_ERR_CNT_SHIFT)
+#define I40E_RCU_SP4KB_CFG_RME_SHIFT 12
+#define I40E_RCU_SP4KB_CFG_RME_MASK I40E_MASK(0x1, I40E_RCU_SP4KB_CFG_RME_SHIFT)
+#define I40E_RCU_SP4KB_CFG_RM_SHIFT 16
+#define I40E_RCU_SP4KB_CFG_RM_MASK I40E_MASK(0xF, I40E_RCU_SP4KB_CFG_RM_SHIFT)
+
+#define I40E_RCU_SP4KB_STATUS 0x0026991C /* Reset: POR */
+#define I40E_RCU_SP4KB_STATUS_ECC_ERR_SHIFT 0
+#define I40E_RCU_SP4KB_STATUS_ECC_ERR_MASK I40E_MASK(0x1, I40E_RCU_SP4KB_STATUS_ECC_ERR_SHIFT)
+#define I40E_RCU_SP4KB_STATUS_ECC_FIX_SHIFT 1
+#define I40E_RCU_SP4KB_STATUS_ECC_FIX_MASK I40E_MASK(0x1, I40E_RCU_SP4KB_STATUS_ECC_FIX_SHIFT)
+#define I40E_RCU_SP4KB_STATUS_INIT_DONE_SHIFT 2
+#define I40E_RCU_SP4KB_STATUS_INIT_DONE_MASK I40E_MASK(0x1, I40E_RCU_SP4KB_STATUS_INIT_DONE_SHIFT)
+#define I40E_RCU_SP4KB_STATUS_GLOBAL_INIT_DONE_SHIFT 3
+#define I40E_RCU_SP4KB_STATUS_GLOBAL_INIT_DONE_MASK I40E_MASK(0x1, I40E_RCU_SP4KB_STATUS_GLOBAL_INIT_DONE_SHIFT)
+
+#define I40E_RCU_SP8KB_CFG 0x00269924 /* Reset: POR */
+#define I40E_RCU_SP8KB_CFG_ECC_EN_SHIFT 0
+#define I40E_RCU_SP8KB_CFG_ECC_EN_MASK I40E_MASK(0x1, I40E_RCU_SP8KB_CFG_ECC_EN_SHIFT)
+#define I40E_RCU_SP8KB_CFG_ECC_INVERT_1_SHIFT 1
+#define I40E_RCU_SP8KB_CFG_ECC_INVERT_1_MASK I40E_MASK(0x1, I40E_RCU_SP8KB_CFG_ECC_INVERT_1_SHIFT)
+#define I40E_RCU_SP8KB_CFG_ECC_INVERT_2_SHIFT 2
+#define I40E_RCU_SP8KB_CFG_ECC_INVERT_2_MASK I40E_MASK(0x1, I40E_RCU_SP8KB_CFG_ECC_INVERT_2_SHIFT)
+#define I40E_RCU_SP8KB_CFG_LS_FORCE_SHIFT 3
+#define I40E_RCU_SP8KB_CFG_LS_FORCE_MASK I40E_MASK(0x1, I40E_RCU_SP8KB_CFG_LS_FORCE_SHIFT)
+#define I40E_RCU_SP8KB_CFG_LS_BYPASS_SHIFT 4
+#define I40E_RCU_SP8KB_CFG_LS_BYPASS_MASK I40E_MASK(0x1, I40E_RCU_SP8KB_CFG_LS_BYPASS_SHIFT)
+#define I40E_RCU_SP8KB_CFG_MASK_INT_SHIFT 5
+#define I40E_RCU_SP8KB_CFG_MASK_INT_MASK I40E_MASK(0x1, I40E_RCU_SP8KB_CFG_MASK_INT_SHIFT)
+#define I40E_RCU_SP8KB_CFG_FIX_CNT_SHIFT 8
+#define I40E_RCU_SP8KB_CFG_FIX_CNT_MASK I40E_MASK(0x1, I40E_RCU_SP8KB_CFG_FIX_CNT_SHIFT)
+#define I40E_RCU_SP8KB_CFG_ERR_CNT_SHIFT 9
+#define I40E_RCU_SP8KB_CFG_ERR_CNT_MASK I40E_MASK(0x1, I40E_RCU_SP8KB_CFG_ERR_CNT_SHIFT)
+#define I40E_RCU_SP8KB_CFG_RME_SHIFT 12
+#define I40E_RCU_SP8KB_CFG_RME_MASK I40E_MASK(0x1, I40E_RCU_SP8KB_CFG_RME_SHIFT)
+#define I40E_RCU_SP8KB_CFG_RM_SHIFT 16
+#define I40E_RCU_SP8KB_CFG_RM_MASK I40E_MASK(0xF, I40E_RCU_SP8KB_CFG_RM_SHIFT)
+
+#define I40E_RCU_SP8KB_STATUS 0x0026992C /* Reset: POR */
+#define I40E_RCU_SP8KB_STATUS_ECC_ERR_SHIFT 0
+#define I40E_RCU_SP8KB_STATUS_ECC_ERR_MASK I40E_MASK(0x1, I40E_RCU_SP8KB_STATUS_ECC_ERR_SHIFT)
+#define I40E_RCU_SP8KB_STATUS_ECC_FIX_SHIFT 1
+#define I40E_RCU_SP8KB_STATUS_ECC_FIX_MASK I40E_MASK(0x1, I40E_RCU_SP8KB_STATUS_ECC_FIX_SHIFT)
+#define I40E_RCU_SP8KB_STATUS_INIT_DONE_SHIFT 2
+#define I40E_RCU_SP8KB_STATUS_INIT_DONE_MASK I40E_MASK(0x1, I40E_RCU_SP8KB_STATUS_INIT_DONE_SHIFT)
+#define I40E_RCU_SP8KB_STATUS_GLOBAL_INIT_DONE_SHIFT 3
+#define I40E_RCU_SP8KB_STATUS_GLOBAL_INIT_DONE_MASK I40E_MASK(0x1, I40E_RCU_SP8KB_STATUS_GLOBAL_INIT_DONE_SHIFT)
+
+#define I40E_RCU_SWR_ECC_COR_ERR 0x00269934 /* Reset: POR */
+#define I40E_RCU_SWR_ECC_COR_ERR_CNT_SHIFT 0
+#define I40E_RCU_SWR_ECC_COR_ERR_CNT_MASK I40E_MASK(0xFFF, I40E_RCU_SWR_ECC_COR_ERR_CNT_SHIFT)
+
+#define I40E_RCU_SWR_ECC_UNCOR_ERR 0x0026993C /* Reset: POR */
+#define I40E_RCU_SWR_ECC_UNCOR_ERR_CNT_SHIFT 0
+#define I40E_RCU_SWR_ECC_UNCOR_ERR_CNT_MASK I40E_MASK(0xFFF, I40E_RCU_SWR_ECC_UNCOR_ERR_CNT_SHIFT)
+
+#define I40E_RDPU_ECC_COR_ERR 0x00051080 /* Reset: POR */
+#define I40E_RDPU_ECC_COR_ERR_CNT_SHIFT 0
+#define I40E_RDPU_ECC_COR_ERR_CNT_MASK I40E_MASK(0xFFF, I40E_RDPU_ECC_COR_ERR_CNT_SHIFT)
+
+#define I40E_RDPU_ECC_UNCOR_ERR 0x0005107C /* Reset: POR */
+#define I40E_RDPU_ECC_UNCOR_ERR_CNT_SHIFT 0
+#define I40E_RDPU_ECC_UNCOR_ERR_CNT_MASK I40E_MASK(0xFFF, I40E_RDPU_ECC_UNCOR_ERR_CNT_SHIFT)
+
+#define I40E_RDPU_VSI_LY2_STRIP_CFG 0x00051074 /* Reset: POR */
+#define I40E_RDPU_VSI_LY2_STRIP_CFG_ECC_EN_SHIFT 0
+#define I40E_RDPU_VSI_LY2_STRIP_CFG_ECC_EN_MASK I40E_MASK(0x1, I40E_RDPU_VSI_LY2_STRIP_CFG_ECC_EN_SHIFT)
+#define I40E_RDPU_VSI_LY2_STRIP_CFG_ECC_INVERT_1_SHIFT 1
+#define I40E_RDPU_VSI_LY2_STRIP_CFG_ECC_INVERT_1_MASK I40E_MASK(0x1, I40E_RDPU_VSI_LY2_STRIP_CFG_ECC_INVERT_1_SHIFT)
+#define I40E_RDPU_VSI_LY2_STRIP_CFG_ECC_INVERT_2_SHIFT 2
+#define I40E_RDPU_VSI_LY2_STRIP_CFG_ECC_INVERT_2_MASK I40E_MASK(0x1, I40E_RDPU_VSI_LY2_STRIP_CFG_ECC_INVERT_2_SHIFT)
+#define I40E_RDPU_VSI_LY2_STRIP_CFG_LS_FORCE_SHIFT 3
+#define I40E_RDPU_VSI_LY2_STRIP_CFG_LS_FORCE_MASK I40E_MASK(0x1, I40E_RDPU_VSI_LY2_STRIP_CFG_LS_FORCE_SHIFT)
+#define I40E_RDPU_VSI_LY2_STRIP_CFG_LS_BYPASS_SHIFT 4
+#define I40E_RDPU_VSI_LY2_STRIP_CFG_LS_BYPASS_MASK I40E_MASK(0x1, I40E_RDPU_VSI_LY2_STRIP_CFG_LS_BYPASS_SHIFT)
+#define I40E_RDPU_VSI_LY2_STRIP_CFG_MASK_INT_SHIFT 5
+#define I40E_RDPU_VSI_LY2_STRIP_CFG_MASK_INT_MASK I40E_MASK(0x1, I40E_RDPU_VSI_LY2_STRIP_CFG_MASK_INT_SHIFT)
+#define I40E_RDPU_VSI_LY2_STRIP_CFG_FIX_CNT_SHIFT 8
+#define I40E_RDPU_VSI_LY2_STRIP_CFG_FIX_CNT_MASK I40E_MASK(0x1, I40E_RDPU_VSI_LY2_STRIP_CFG_FIX_CNT_SHIFT)
+#define I40E_RDPU_VSI_LY2_STRIP_CFG_ERR_CNT_SHIFT 9
+#define I40E_RDPU_VSI_LY2_STRIP_CFG_ERR_CNT_MASK I40E_MASK(0x1, I40E_RDPU_VSI_LY2_STRIP_CFG_ERR_CNT_SHIFT)
+#define I40E_RDPU_VSI_LY2_STRIP_CFG_RME_SHIFT 12
+#define I40E_RDPU_VSI_LY2_STRIP_CFG_RME_MASK I40E_MASK(0x1, I40E_RDPU_VSI_LY2_STRIP_CFG_RME_SHIFT)
+#define I40E_RDPU_VSI_LY2_STRIP_CFG_RM_SHIFT 16
+#define I40E_RDPU_VSI_LY2_STRIP_CFG_RM_MASK I40E_MASK(0xF, I40E_RDPU_VSI_LY2_STRIP_CFG_RM_SHIFT)
+
+#define I40E_RDPU_VSI_LY2_STRIP_STATUS 0x00051078 /* Reset: POR */
+#define I40E_RDPU_VSI_LY2_STRIP_STATUS_ECC_ERR_SHIFT 0
+#define I40E_RDPU_VSI_LY2_STRIP_STATUS_ECC_ERR_MASK I40E_MASK(0x1, I40E_RDPU_VSI_LY2_STRIP_STATUS_ECC_ERR_SHIFT)
+#define I40E_RDPU_VSI_LY2_STRIP_STATUS_ECC_FIX_SHIFT 1
+#define I40E_RDPU_VSI_LY2_STRIP_STATUS_ECC_FIX_MASK I40E_MASK(0x1, I40E_RDPU_VSI_LY2_STRIP_STATUS_ECC_FIX_SHIFT)
+#define I40E_RDPU_VSI_LY2_STRIP_STATUS_INIT_DONE_SHIFT 2
+#define I40E_RDPU_VSI_LY2_STRIP_STATUS_INIT_DONE_MASK I40E_MASK(0x1, I40E_RDPU_VSI_LY2_STRIP_STATUS_INIT_DONE_SHIFT)
+#define I40E_RDPU_VSI_LY2_STRIP_STATUS_GLOBAL_INIT_DONE_SHIFT 3
+#define I40E_RDPU_VSI_LY2_STRIP_STATUS_GLOBAL_INIT_DONE_MASK I40E_MASK(0x1, I40E_RDPU_VSI_LY2_STRIP_STATUS_GLOBAL_INIT_DONE_SHIFT)
+
+#define I40E_RLAN_ATTR_FIFO_CFG 0x0012A52C /* Reset: POR */
+#define I40E_RLAN_ATTR_FIFO_CFG_ECC_EN_SHIFT 0
+#define I40E_RLAN_ATTR_FIFO_CFG_ECC_EN_MASK I40E_MASK(0x1, I40E_RLAN_ATTR_FIFO_CFG_ECC_EN_SHIFT)
+#define I40E_RLAN_ATTR_FIFO_CFG_ECC_INVERT_1_SHIFT 1
+#define I40E_RLAN_ATTR_FIFO_CFG_ECC_INVERT_1_MASK I40E_MASK(0x1, I40E_RLAN_ATTR_FIFO_CFG_ECC_INVERT_1_SHIFT)
+#define I40E_RLAN_ATTR_FIFO_CFG_ECC_INVERT_2_SHIFT 2
+#define I40E_RLAN_ATTR_FIFO_CFG_ECC_INVERT_2_MASK I40E_MASK(0x1, I40E_RLAN_ATTR_FIFO_CFG_ECC_INVERT_2_SHIFT)
+#define I40E_RLAN_ATTR_FIFO_CFG_LS_FORCE_SHIFT 3
+#define I40E_RLAN_ATTR_FIFO_CFG_LS_FORCE_MASK I40E_MASK(0x1, I40E_RLAN_ATTR_FIFO_CFG_LS_FORCE_SHIFT)
+#define I40E_RLAN_ATTR_FIFO_CFG_LS_BYPASS_SHIFT 4
+#define I40E_RLAN_ATTR_FIFO_CFG_LS_BYPASS_MASK I40E_MASK(0x1, I40E_RLAN_ATTR_FIFO_CFG_LS_BYPASS_SHIFT)
+#define I40E_RLAN_ATTR_FIFO_CFG_MASK_INT_SHIFT 5
+#define I40E_RLAN_ATTR_FIFO_CFG_MASK_INT_MASK I40E_MASK(0x1, I40E_RLAN_ATTR_FIFO_CFG_MASK_INT_SHIFT)
+#define I40E_RLAN_ATTR_FIFO_CFG_FIX_CNT_SHIFT 8
+#define I40E_RLAN_ATTR_FIFO_CFG_FIX_CNT_MASK I40E_MASK(0x1, I40E_RLAN_ATTR_FIFO_CFG_FIX_CNT_SHIFT)
+#define I40E_RLAN_ATTR_FIFO_CFG_ERR_CNT_SHIFT 9
+#define I40E_RLAN_ATTR_FIFO_CFG_ERR_CNT_MASK I40E_MASK(0x1, I40E_RLAN_ATTR_FIFO_CFG_ERR_CNT_SHIFT)
+#define I40E_RLAN_ATTR_FIFO_CFG_RME_SHIFT 12
+#define I40E_RLAN_ATTR_FIFO_CFG_RME_MASK I40E_MASK(0x1, I40E_RLAN_ATTR_FIFO_CFG_RME_SHIFT)
+#define I40E_RLAN_ATTR_FIFO_CFG_RM_SHIFT 16
+#define I40E_RLAN_ATTR_FIFO_CFG_RM_MASK I40E_MASK(0xF, I40E_RLAN_ATTR_FIFO_CFG_RM_SHIFT)
+
+#define I40E_RLAN_ATTR_FIFO_STATUS 0x0012A530 /* Reset: POR */
+#define I40E_RLAN_ATTR_FIFO_STATUS_ECC_ERR_SHIFT 0
+#define I40E_RLAN_ATTR_FIFO_STATUS_ECC_ERR_MASK I40E_MASK(0x1, I40E_RLAN_ATTR_FIFO_STATUS_ECC_ERR_SHIFT)
+#define I40E_RLAN_ATTR_FIFO_STATUS_ECC_FIX_SHIFT 1
+#define I40E_RLAN_ATTR_FIFO_STATUS_ECC_FIX_MASK I40E_MASK(0x1, I40E_RLAN_ATTR_FIFO_STATUS_ECC_FIX_SHIFT)
+#define I40E_RLAN_ATTR_FIFO_STATUS_INIT_DONE_SHIFT 2
+#define I40E_RLAN_ATTR_FIFO_STATUS_INIT_DONE_MASK I40E_MASK(0x1, I40E_RLAN_ATTR_FIFO_STATUS_INIT_DONE_SHIFT)
+#define I40E_RLAN_ATTR_FIFO_STATUS_GLOBAL_INIT_DONE_SHIFT 3
+#define I40E_RLAN_ATTR_FIFO_STATUS_GLOBAL_INIT_DONE_MASK I40E_MASK(0x1, I40E_RLAN_ATTR_FIFO_STATUS_GLOBAL_INIT_DONE_SHIFT)
+
+#define I40E_RLAN_CCH_CFG 0x0012A514 /* Reset: POR */
+#define I40E_RLAN_CCH_CFG_ECC_EN_SHIFT 0
+#define I40E_RLAN_CCH_CFG_ECC_EN_MASK I40E_MASK(0x1, I40E_RLAN_CCH_CFG_ECC_EN_SHIFT)
+#define I40E_RLAN_CCH_CFG_ECC_INVERT_1_SHIFT 1
+#define I40E_RLAN_CCH_CFG_ECC_INVERT_1_MASK I40E_MASK(0x1, I40E_RLAN_CCH_CFG_ECC_INVERT_1_SHIFT)
+#define I40E_RLAN_CCH_CFG_ECC_INVERT_2_SHIFT 2
+#define I40E_RLAN_CCH_CFG_ECC_INVERT_2_MASK I40E_MASK(0x1, I40E_RLAN_CCH_CFG_ECC_INVERT_2_SHIFT)
+#define I40E_RLAN_CCH_CFG_LS_FORCE_SHIFT 3
+#define I40E_RLAN_CCH_CFG_LS_FORCE_MASK I40E_MASK(0x1, I40E_RLAN_CCH_CFG_LS_FORCE_SHIFT)
+#define I40E_RLAN_CCH_CFG_LS_BYPASS_SHIFT 4
+#define I40E_RLAN_CCH_CFG_LS_BYPASS_MASK I40E_MASK(0x1, I40E_RLAN_CCH_CFG_LS_BYPASS_SHIFT)
+#define I40E_RLAN_CCH_CFG_MASK_INT_SHIFT 5
+#define I40E_RLAN_CCH_CFG_MASK_INT_MASK I40E_MASK(0x1, I40E_RLAN_CCH_CFG_MASK_INT_SHIFT)
+#define I40E_RLAN_CCH_CFG_FIX_CNT_SHIFT 8
+#define I40E_RLAN_CCH_CFG_FIX_CNT_MASK I40E_MASK(0x1, I40E_RLAN_CCH_CFG_FIX_CNT_SHIFT)
+#define I40E_RLAN_CCH_CFG_ERR_CNT_SHIFT 9
+#define I40E_RLAN_CCH_CFG_ERR_CNT_MASK I40E_MASK(0x1, I40E_RLAN_CCH_CFG_ERR_CNT_SHIFT)
+#define I40E_RLAN_CCH_CFG_RME_SHIFT 12
+#define I40E_RLAN_CCH_CFG_RME_MASK I40E_MASK(0x1, I40E_RLAN_CCH_CFG_RME_SHIFT)
+#define I40E_RLAN_CCH_CFG_RM_SHIFT 16
+#define I40E_RLAN_CCH_CFG_RM_MASK I40E_MASK(0xF, I40E_RLAN_CCH_CFG_RM_SHIFT)
+
+#define I40E_RLAN_CCH_STATUS 0x0012A518 /* Reset: POR */
+#define I40E_RLAN_CCH_STATUS_ECC_ERR_SHIFT 0
+#define I40E_RLAN_CCH_STATUS_ECC_ERR_MASK I40E_MASK(0x1, I40E_RLAN_CCH_STATUS_ECC_ERR_SHIFT)
+#define I40E_RLAN_CCH_STATUS_ECC_FIX_SHIFT 1
+#define I40E_RLAN_CCH_STATUS_ECC_FIX_MASK I40E_MASK(0x1, I40E_RLAN_CCH_STATUS_ECC_FIX_SHIFT)
+#define I40E_RLAN_CCH_STATUS_INIT_DONE_SHIFT 2
+#define I40E_RLAN_CCH_STATUS_INIT_DONE_MASK I40E_MASK(0x1, I40E_RLAN_CCH_STATUS_INIT_DONE_SHIFT)
+#define I40E_RLAN_CCH_STATUS_GLOBAL_INIT_DONE_SHIFT 3
+#define I40E_RLAN_CCH_STATUS_GLOBAL_INIT_DONE_MASK I40E_MASK(0x1, I40E_RLAN_CCH_STATUS_GLOBAL_INIT_DONE_SHIFT)
+
+#define I40E_RLAN_CMD_FIFO_CFG 0x0012A534 /* Reset: POR */
+#define I40E_RLAN_CMD_FIFO_CFG_ECC_EN_SHIFT 0
+#define I40E_RLAN_CMD_FIFO_CFG_ECC_EN_MASK I40E_MASK(0x1, I40E_RLAN_CMD_FIFO_CFG_ECC_EN_SHIFT)
+#define I40E_RLAN_CMD_FIFO_CFG_ECC_INVERT_1_SHIFT 1
+#define I40E_RLAN_CMD_FIFO_CFG_ECC_INVERT_1_MASK I40E_MASK(0x1, I40E_RLAN_CMD_FIFO_CFG_ECC_INVERT_1_SHIFT)
+#define I40E_RLAN_CMD_FIFO_CFG_ECC_INVERT_2_SHIFT 2
+#define I40E_RLAN_CMD_FIFO_CFG_ECC_INVERT_2_MASK I40E_MASK(0x1, I40E_RLAN_CMD_FIFO_CFG_ECC_INVERT_2_SHIFT)
+#define I40E_RLAN_CMD_FIFO_CFG_LS_FORCE_SHIFT 3
+#define I40E_RLAN_CMD_FIFO_CFG_LS_FORCE_MASK I40E_MASK(0x1, I40E_RLAN_CMD_FIFO_CFG_LS_FORCE_SHIFT)
+#define I40E_RLAN_CMD_FIFO_CFG_LS_BYPASS_SHIFT 4
+#define I40E_RLAN_CMD_FIFO_CFG_LS_BYPASS_MASK I40E_MASK(0x1, I40E_RLAN_CMD_FIFO_CFG_LS_BYPASS_SHIFT)
+#define I40E_RLAN_CMD_FIFO_CFG_MASK_INT_SHIFT 5
+#define I40E_RLAN_CMD_FIFO_CFG_MASK_INT_MASK I40E_MASK(0x1, I40E_RLAN_CMD_FIFO_CFG_MASK_INT_SHIFT)
+#define I40E_RLAN_CMD_FIFO_CFG_FIX_CNT_SHIFT 8
+#define I40E_RLAN_CMD_FIFO_CFG_FIX_CNT_MASK I40E_MASK(0x1, I40E_RLAN_CMD_FIFO_CFG_FIX_CNT_SHIFT)
+#define I40E_RLAN_CMD_FIFO_CFG_ERR_CNT_SHIFT 9
+#define I40E_RLAN_CMD_FIFO_CFG_ERR_CNT_MASK I40E_MASK(0x1, I40E_RLAN_CMD_FIFO_CFG_ERR_CNT_SHIFT)
+#define I40E_RLAN_CMD_FIFO_CFG_RME_SHIFT 12
+#define I40E_RLAN_CMD_FIFO_CFG_RME_MASK I40E_MASK(0x1, I40E_RLAN_CMD_FIFO_CFG_RME_SHIFT)
+#define I40E_RLAN_CMD_FIFO_CFG_RM_SHIFT 16
+#define I40E_RLAN_CMD_FIFO_CFG_RM_MASK I40E_MASK(0xF, I40E_RLAN_CMD_FIFO_CFG_RM_SHIFT)
+
+#define I40E_RLAN_CMD_FIFO_STATUS 0x0012A538 /* Reset: POR */
+#define I40E_RLAN_CMD_FIFO_STATUS_ECC_ERR_SHIFT 0
+#define I40E_RLAN_CMD_FIFO_STATUS_ECC_ERR_MASK I40E_MASK(0x1, I40E_RLAN_CMD_FIFO_STATUS_ECC_ERR_SHIFT)
+#define I40E_RLAN_CMD_FIFO_STATUS_ECC_FIX_SHIFT 1
+#define I40E_RLAN_CMD_FIFO_STATUS_ECC_FIX_MASK I40E_MASK(0x1, I40E_RLAN_CMD_FIFO_STATUS_ECC_FIX_SHIFT)
+#define I40E_RLAN_CMD_FIFO_STATUS_INIT_DONE_SHIFT 2
+#define I40E_RLAN_CMD_FIFO_STATUS_INIT_DONE_MASK I40E_MASK(0x1, I40E_RLAN_CMD_FIFO_STATUS_INIT_DONE_SHIFT)
+#define I40E_RLAN_CMD_FIFO_STATUS_GLOBAL_INIT_DONE_SHIFT 3
+#define I40E_RLAN_CMD_FIFO_STATUS_GLOBAL_INIT_DONE_MASK I40E_MASK(0x1, I40E_RLAN_CMD_FIFO_STATUS_GLOBAL_INIT_DONE_SHIFT)
+
+#define I40E_RLAN_DCH_LINE_ATTR_CFG 0x0012A51C /* Reset: POR */
+#define I40E_RLAN_DCH_LINE_ATTR_CFG_ECC_EN_SHIFT 0
+#define I40E_RLAN_DCH_LINE_ATTR_CFG_ECC_EN_MASK I40E_MASK(0x1, I40E_RLAN_DCH_LINE_ATTR_CFG_ECC_EN_SHIFT)
+#define I40E_RLAN_DCH_LINE_ATTR_CFG_ECC_INVERT_1_SHIFT 1
+#define I40E_RLAN_DCH_LINE_ATTR_CFG_ECC_INVERT_1_MASK I40E_MASK(0x1, I40E_RLAN_DCH_LINE_ATTR_CFG_ECC_INVERT_1_SHIFT)
+#define I40E_RLAN_DCH_LINE_ATTR_CFG_ECC_INVERT_2_SHIFT 2
+#define I40E_RLAN_DCH_LINE_ATTR_CFG_ECC_INVERT_2_MASK I40E_MASK(0x1, I40E_RLAN_DCH_LINE_ATTR_CFG_ECC_INVERT_2_SHIFT)
+#define I40E_RLAN_DCH_LINE_ATTR_CFG_LS_FORCE_SHIFT 3
+#define I40E_RLAN_DCH_LINE_ATTR_CFG_LS_FORCE_MASK I40E_MASK(0x1, I40E_RLAN_DCH_LINE_ATTR_CFG_LS_FORCE_SHIFT)
+#define I40E_RLAN_DCH_LINE_ATTR_CFG_LS_BYPASS_SHIFT 4
+#define I40E_RLAN_DCH_LINE_ATTR_CFG_LS_BYPASS_MASK I40E_MASK(0x1, I40E_RLAN_DCH_LINE_ATTR_CFG_LS_BYPASS_SHIFT)
+#define I40E_RLAN_DCH_LINE_ATTR_CFG_MASK_INT_SHIFT 5
+#define I40E_RLAN_DCH_LINE_ATTR_CFG_MASK_INT_MASK I40E_MASK(0x1, I40E_RLAN_DCH_LINE_ATTR_CFG_MASK_INT_SHIFT)
+#define I40E_RLAN_DCH_LINE_ATTR_CFG_FIX_CNT_SHIFT 8
+#define I40E_RLAN_DCH_LINE_ATTR_CFG_FIX_CNT_MASK I40E_MASK(0x1, I40E_RLAN_DCH_LINE_ATTR_CFG_FIX_CNT_SHIFT)
+#define I40E_RLAN_DCH_LINE_ATTR_CFG_ERR_CNT_SHIFT 9
+#define I40E_RLAN_DCH_LINE_ATTR_CFG_ERR_CNT_MASK I40E_MASK(0x1, I40E_RLAN_DCH_LINE_ATTR_CFG_ERR_CNT_SHIFT)
+#define I40E_RLAN_DCH_LINE_ATTR_CFG_RME_SHIFT 12
+#define I40E_RLAN_DCH_LINE_ATTR_CFG_RME_MASK I40E_MASK(0x1, I40E_RLAN_DCH_LINE_ATTR_CFG_RME_SHIFT)
+#define I40E_RLAN_DCH_LINE_ATTR_CFG_RM_SHIFT 16
+#define I40E_RLAN_DCH_LINE_ATTR_CFG_RM_MASK I40E_MASK(0xF, I40E_RLAN_DCH_LINE_ATTR_CFG_RM_SHIFT)
+
+#define I40E_RLAN_DCH_LINE_ATTR_STATUS 0x0012A520 /* Reset: POR */
+#define I40E_RLAN_DCH_LINE_ATTR_STATUS_ECC_ERR_SHIFT 0
+#define I40E_RLAN_DCH_LINE_ATTR_STATUS_ECC_ERR_MASK I40E_MASK(0x1, I40E_RLAN_DCH_LINE_ATTR_STATUS_ECC_ERR_SHIFT)
+#define I40E_RLAN_DCH_LINE_ATTR_STATUS_ECC_FIX_SHIFT 1
+#define I40E_RLAN_DCH_LINE_ATTR_STATUS_ECC_FIX_MASK I40E_MASK(0x1, I40E_RLAN_DCH_LINE_ATTR_STATUS_ECC_FIX_SHIFT)
+#define I40E_RLAN_DCH_LINE_ATTR_STATUS_INIT_DONE_SHIFT 2
+#define I40E_RLAN_DCH_LINE_ATTR_STATUS_INIT_DONE_MASK I40E_MASK(0x1, I40E_RLAN_DCH_LINE_ATTR_STATUS_INIT_DONE_SHIFT)
+#define I40E_RLAN_DCH_LINE_ATTR_STATUS_GLOBAL_INIT_DONE_SHIFT 3
+#define I40E_RLAN_DCH_LINE_ATTR_STATUS_GLOBAL_INIT_DONE_MASK I40E_MASK(0x1, I40E_RLAN_DCH_LINE_ATTR_STATUS_GLOBAL_INIT_DONE_SHIFT)
+
+#define I40E_RLAN_DSCR_CH_BNK_CFG 0x0012A544 /* Reset: POR */
+#define I40E_RLAN_DSCR_CH_BNK_CFG_ECC_EN_SHIFT 0
+#define I40E_RLAN_DSCR_CH_BNK_CFG_ECC_EN_MASK I40E_MASK(0x1, I40E_RLAN_DSCR_CH_BNK_CFG_ECC_EN_SHIFT)
+#define I40E_RLAN_DSCR_CH_BNK_CFG_ECC_INVERT_1_SHIFT 1
+#define I40E_RLAN_DSCR_CH_BNK_CFG_ECC_INVERT_1_MASK I40E_MASK(0x1, I40E_RLAN_DSCR_CH_BNK_CFG_ECC_INVERT_1_SHIFT)
+#define I40E_RLAN_DSCR_CH_BNK_CFG_ECC_INVERT_2_SHIFT 2
+#define I40E_RLAN_DSCR_CH_BNK_CFG_ECC_INVERT_2_MASK I40E_MASK(0x1, I40E_RLAN_DSCR_CH_BNK_CFG_ECC_INVERT_2_SHIFT)
+#define I40E_RLAN_DSCR_CH_BNK_CFG_LS_FORCE_SHIFT 3
+#define I40E_RLAN_DSCR_CH_BNK_CFG_LS_FORCE_MASK I40E_MASK(0x1, I40E_RLAN_DSCR_CH_BNK_CFG_LS_FORCE_SHIFT)
+#define I40E_RLAN_DSCR_CH_BNK_CFG_LS_BYPASS_SHIFT 4
+#define I40E_RLAN_DSCR_CH_BNK_CFG_LS_BYPASS_MASK I40E_MASK(0x1, I40E_RLAN_DSCR_CH_BNK_CFG_LS_BYPASS_SHIFT)
+#define I40E_RLAN_DSCR_CH_BNK_CFG_MASK_INT_SHIFT 5
+#define I40E_RLAN_DSCR_CH_BNK_CFG_MASK_INT_MASK I40E_MASK(0x1, I40E_RLAN_DSCR_CH_BNK_CFG_MASK_INT_SHIFT)
+#define I40E_RLAN_DSCR_CH_BNK_CFG_FIX_CNT_SHIFT 8
+#define I40E_RLAN_DSCR_CH_BNK_CFG_FIX_CNT_MASK I40E_MASK(0x1, I40E_RLAN_DSCR_CH_BNK_CFG_FIX_CNT_SHIFT)
+#define I40E_RLAN_DSCR_CH_BNK_CFG_ERR_CNT_SHIFT 9
+#define I40E_RLAN_DSCR_CH_BNK_CFG_ERR_CNT_MASK I40E_MASK(0x1, I40E_RLAN_DSCR_CH_BNK_CFG_ERR_CNT_SHIFT)
+#define I40E_RLAN_DSCR_CH_BNK_CFG_RME_SHIFT 12
+#define I40E_RLAN_DSCR_CH_BNK_CFG_RME_MASK I40E_MASK(0x1, I40E_RLAN_DSCR_CH_BNK_CFG_RME_SHIFT)
+#define I40E_RLAN_DSCR_CH_BNK_CFG_RM_SHIFT 16
+#define I40E_RLAN_DSCR_CH_BNK_CFG_RM_MASK I40E_MASK(0xF, I40E_RLAN_DSCR_CH_BNK_CFG_RM_SHIFT)
+
+#define I40E_RLAN_DSCR_CH_BNK_STATUS 0x0012A548 /* Reset: POR */
+#define I40E_RLAN_DSCR_CH_BNK_STATUS_ECC_ERR_SHIFT 0
+#define I40E_RLAN_DSCR_CH_BNK_STATUS_ECC_ERR_MASK I40E_MASK(0x1, I40E_RLAN_DSCR_CH_BNK_STATUS_ECC_ERR_SHIFT)
+#define I40E_RLAN_DSCR_CH_BNK_STATUS_ECC_FIX_SHIFT 1
+#define I40E_RLAN_DSCR_CH_BNK_STATUS_ECC_FIX_MASK I40E_MASK(0x1, I40E_RLAN_DSCR_CH_BNK_STATUS_ECC_FIX_SHIFT)
+#define I40E_RLAN_DSCR_CH_BNK_STATUS_INIT_DONE_SHIFT 2
+#define I40E_RLAN_DSCR_CH_BNK_STATUS_INIT_DONE_MASK I40E_MASK(0x1, I40E_RLAN_DSCR_CH_BNK_STATUS_INIT_DONE_SHIFT)
+#define I40E_RLAN_DSCR_CH_BNK_STATUS_GLOBAL_INIT_DONE_SHIFT 3
+#define I40E_RLAN_DSCR_CH_BNK_STATUS_GLOBAL_INIT_DONE_MASK I40E_MASK(0x1, I40E_RLAN_DSCR_CH_BNK_STATUS_GLOBAL_INIT_DONE_SHIFT)
+
+#define I40E_RLAN_DSCR_REQ_FIFO_CFG 0x0012A524 /* Reset: POR */
+#define I40E_RLAN_DSCR_REQ_FIFO_CFG_ECC_EN_SHIFT 0
+#define I40E_RLAN_DSCR_REQ_FIFO_CFG_ECC_EN_MASK I40E_MASK(0x1, I40E_RLAN_DSCR_REQ_FIFO_CFG_ECC_EN_SHIFT)
+#define I40E_RLAN_DSCR_REQ_FIFO_CFG_ECC_INVERT_1_SHIFT 1
+#define I40E_RLAN_DSCR_REQ_FIFO_CFG_ECC_INVERT_1_MASK I40E_MASK(0x1, I40E_RLAN_DSCR_REQ_FIFO_CFG_ECC_INVERT_1_SHIFT)
+#define I40E_RLAN_DSCR_REQ_FIFO_CFG_ECC_INVERT_2_SHIFT 2
+#define I40E_RLAN_DSCR_REQ_FIFO_CFG_ECC_INVERT_2_MASK I40E_MASK(0x1, I40E_RLAN_DSCR_REQ_FIFO_CFG_ECC_INVERT_2_SHIFT)
+#define I40E_RLAN_DSCR_REQ_FIFO_CFG_LS_FORCE_SHIFT 3
+#define I40E_RLAN_DSCR_REQ_FIFO_CFG_LS_FORCE_MASK I40E_MASK(0x1, I40E_RLAN_DSCR_REQ_FIFO_CFG_LS_FORCE_SHIFT)
+#define I40E_RLAN_DSCR_REQ_FIFO_CFG_LS_BYPASS_SHIFT 4
+#define I40E_RLAN_DSCR_REQ_FIFO_CFG_LS_BYPASS_MASK I40E_MASK(0x1, I40E_RLAN_DSCR_REQ_FIFO_CFG_LS_BYPASS_SHIFT)
+#define I40E_RLAN_DSCR_REQ_FIFO_CFG_MASK_INT_SHIFT 5
+#define I40E_RLAN_DSCR_REQ_FIFO_CFG_MASK_INT_MASK I40E_MASK(0x1, I40E_RLAN_DSCR_REQ_FIFO_CFG_MASK_INT_SHIFT)
+#define I40E_RLAN_DSCR_REQ_FIFO_CFG_FIX_CNT_SHIFT 8
+#define I40E_RLAN_DSCR_REQ_FIFO_CFG_FIX_CNT_MASK I40E_MASK(0x1, I40E_RLAN_DSCR_REQ_FIFO_CFG_FIX_CNT_SHIFT)
+#define I40E_RLAN_DSCR_REQ_FIFO_CFG_ERR_CNT_SHIFT 9
+#define I40E_RLAN_DSCR_REQ_FIFO_CFG_ERR_CNT_MASK I40E_MASK(0x1, I40E_RLAN_DSCR_REQ_FIFO_CFG_ERR_CNT_SHIFT)
+#define I40E_RLAN_DSCR_REQ_FIFO_CFG_RME_SHIFT 12
+#define I40E_RLAN_DSCR_REQ_FIFO_CFG_RME_MASK I40E_MASK(0x1, I40E_RLAN_DSCR_REQ_FIFO_CFG_RME_SHIFT)
+#define I40E_RLAN_DSCR_REQ_FIFO_CFG_RM_SHIFT 16
+#define I40E_RLAN_DSCR_REQ_FIFO_CFG_RM_MASK I40E_MASK(0xF, I40E_RLAN_DSCR_REQ_FIFO_CFG_RM_SHIFT)
+
+#define I40E_RLAN_DSCR_REQ_FIFO_STATUS 0x0012A528 /* Reset: POR */
+#define I40E_RLAN_DSCR_REQ_FIFO_STATUS_ECC_ERR_SHIFT 0
+#define I40E_RLAN_DSCR_REQ_FIFO_STATUS_ECC_ERR_MASK I40E_MASK(0x1, I40E_RLAN_DSCR_REQ_FIFO_STATUS_ECC_ERR_SHIFT)
+#define I40E_RLAN_DSCR_REQ_FIFO_STATUS_ECC_FIX_SHIFT 1
+#define I40E_RLAN_DSCR_REQ_FIFO_STATUS_ECC_FIX_MASK I40E_MASK(0x1, I40E_RLAN_DSCR_REQ_FIFO_STATUS_ECC_FIX_SHIFT)
+#define I40E_RLAN_DSCR_REQ_FIFO_STATUS_INIT_DONE_SHIFT 2
+#define I40E_RLAN_DSCR_REQ_FIFO_STATUS_INIT_DONE_MASK I40E_MASK(0x1, I40E_RLAN_DSCR_REQ_FIFO_STATUS_INIT_DONE_SHIFT)
+#define I40E_RLAN_DSCR_REQ_FIFO_STATUS_GLOBAL_INIT_DONE_SHIFT 3
+#define I40E_RLAN_DSCR_REQ_FIFO_STATUS_GLOBAL_INIT_DONE_MASK I40E_MASK(0x1, I40E_RLAN_DSCR_REQ_FIFO_STATUS_GLOBAL_INIT_DONE_SHIFT)
+
+#define I40E_RLAN_ECC_COR_ERR 0x0012A550 /* Reset: POR */
+#define I40E_RLAN_ECC_COR_ERR_CNT_SHIFT 0
+#define I40E_RLAN_ECC_COR_ERR_CNT_MASK I40E_MASK(0xFFF, I40E_RLAN_ECC_COR_ERR_CNT_SHIFT)
+
+#define I40E_RLAN_ECC_UNCOR_ERR 0x0012A54C /* Reset: POR */
+#define I40E_RLAN_ECC_UNCOR_ERR_CNT_SHIFT 0
+#define I40E_RLAN_ECC_UNCOR_ERR_CNT_MASK I40E_MASK(0xFFF, I40E_RLAN_ECC_UNCOR_ERR_CNT_SHIFT)
+
+#define I40E_RLAN_TAILS_CFG 0x0012A53C /* Reset: POR */
+#define I40E_RLAN_TAILS_CFG_ECC_EN_SHIFT 0
+#define I40E_RLAN_TAILS_CFG_ECC_EN_MASK I40E_MASK(0x1, I40E_RLAN_TAILS_CFG_ECC_EN_SHIFT)
+#define I40E_RLAN_TAILS_CFG_ECC_INVERT_1_SHIFT 1
+#define I40E_RLAN_TAILS_CFG_ECC_INVERT_1_MASK I40E_MASK(0x1, I40E_RLAN_TAILS_CFG_ECC_INVERT_1_SHIFT)
+#define I40E_RLAN_TAILS_CFG_ECC_INVERT_2_SHIFT 2
+#define I40E_RLAN_TAILS_CFG_ECC_INVERT_2_MASK I40E_MASK(0x1, I40E_RLAN_TAILS_CFG_ECC_INVERT_2_SHIFT)
+#define I40E_RLAN_TAILS_CFG_LS_FORCE_SHIFT 3
+#define I40E_RLAN_TAILS_CFG_LS_FORCE_MASK I40E_MASK(0x1, I40E_RLAN_TAILS_CFG_LS_FORCE_SHIFT)
+#define I40E_RLAN_TAILS_CFG_LS_BYPASS_SHIFT 4
+#define I40E_RLAN_TAILS_CFG_LS_BYPASS_MASK I40E_MASK(0x1, I40E_RLAN_TAILS_CFG_LS_BYPASS_SHIFT)
+#define I40E_RLAN_TAILS_CFG_MASK_INT_SHIFT 5
+#define I40E_RLAN_TAILS_CFG_MASK_INT_MASK I40E_MASK(0x1, I40E_RLAN_TAILS_CFG_MASK_INT_SHIFT)
+#define I40E_RLAN_TAILS_CFG_FIX_CNT_SHIFT 8
+#define I40E_RLAN_TAILS_CFG_FIX_CNT_MASK I40E_MASK(0x1, I40E_RLAN_TAILS_CFG_FIX_CNT_SHIFT)
+#define I40E_RLAN_TAILS_CFG_ERR_CNT_SHIFT 9
+#define I40E_RLAN_TAILS_CFG_ERR_CNT_MASK I40E_MASK(0x1, I40E_RLAN_TAILS_CFG_ERR_CNT_SHIFT)
+#define I40E_RLAN_TAILS_CFG_RME_SHIFT 12
+#define I40E_RLAN_TAILS_CFG_RME_MASK I40E_MASK(0x1, I40E_RLAN_TAILS_CFG_RME_SHIFT)
+#define I40E_RLAN_TAILS_CFG_RM_SHIFT 16
+#define I40E_RLAN_TAILS_CFG_RM_MASK I40E_MASK(0xF, I40E_RLAN_TAILS_CFG_RM_SHIFT)
+
+#define I40E_RLAN_TAILS_STATUS 0x0012A540 /* Reset: POR */
+#define I40E_RLAN_TAILS_STATUS_ECC_ERR_SHIFT 0
+#define I40E_RLAN_TAILS_STATUS_ECC_ERR_MASK I40E_MASK(0x1, I40E_RLAN_TAILS_STATUS_ECC_ERR_SHIFT)
+#define I40E_RLAN_TAILS_STATUS_ECC_FIX_SHIFT 1
+#define I40E_RLAN_TAILS_STATUS_ECC_FIX_MASK I40E_MASK(0x1, I40E_RLAN_TAILS_STATUS_ECC_FIX_SHIFT)
+#define I40E_RLAN_TAILS_STATUS_INIT_DONE_SHIFT 2
+#define I40E_RLAN_TAILS_STATUS_INIT_DONE_MASK I40E_MASK(0x1, I40E_RLAN_TAILS_STATUS_INIT_DONE_SHIFT)
+#define I40E_RLAN_TAILS_STATUS_GLOBAL_INIT_DONE_SHIFT 3
+#define I40E_RLAN_TAILS_STATUS_GLOBAL_INIT_DONE_MASK I40E_MASK(0x1, I40E_RLAN_TAILS_STATUS_GLOBAL_INIT_DONE_SHIFT)
+
+#define I40E_RPB_BACK_PRS_STAT 0x000AC948 /* Reset: CORER */
+#define I40E_RPB_BACK_PRS_STAT_PPRS_0_BP_SHIFT 0
+#define I40E_RPB_BACK_PRS_STAT_PPRS_0_BP_MASK I40E_MASK(0x1, I40E_RPB_BACK_PRS_STAT_PPRS_0_BP_SHIFT)
+#define I40E_RPB_BACK_PRS_STAT_PPRS_1_BP_SHIFT 1
+#define I40E_RPB_BACK_PRS_STAT_PPRS_1_BP_MASK I40E_MASK(0x1, I40E_RPB_BACK_PRS_STAT_PPRS_1_BP_SHIFT)
+#define I40E_RPB_BACK_PRS_STAT_PPRS_2_BP_SHIFT 2
+#define I40E_RPB_BACK_PRS_STAT_PPRS_2_BP_MASK I40E_MASK(0x1, I40E_RPB_BACK_PRS_STAT_PPRS_2_BP_SHIFT)
+#define I40E_RPB_BACK_PRS_STAT_PPRS_3_BP_SHIFT 3
+#define I40E_RPB_BACK_PRS_STAT_PPRS_3_BP_MASK I40E_MASK(0x1, I40E_RPB_BACK_PRS_STAT_PPRS_3_BP_SHIFT)
+#define I40E_RPB_BACK_PRS_STAT_STATUS_BP_SHIFT 4
+#define I40E_RPB_BACK_PRS_STAT_STATUS_BP_MASK I40E_MASK(0x1, I40E_RPB_BACK_PRS_STAT_STATUS_BP_SHIFT)
+#define I40E_RPB_BACK_PRS_STAT_RCU_BP_SHIFT 8
+#define I40E_RPB_BACK_PRS_STAT_RCU_BP_MASK I40E_MASK(0x1, I40E_RPB_BACK_PRS_STAT_RCU_BP_SHIFT)
+#define I40E_RPB_BACK_PRS_STAT_PE0_BP_SHIFT 9
+#define I40E_RPB_BACK_PRS_STAT_PE0_BP_MASK I40E_MASK(0x1, I40E_RPB_BACK_PRS_STAT_PE0_BP_SHIFT)
+#define I40E_RPB_BACK_PRS_STAT_PE1_BP_SHIFT 10
+#define I40E_RPB_BACK_PRS_STAT_PE1_BP_MASK I40E_MASK(0x1, I40E_RPB_BACK_PRS_STAT_PE1_BP_SHIFT)
+#define I40E_RPB_BACK_PRS_STAT_RDPU_BP_SHIFT 11
+#define I40E_RPB_BACK_PRS_STAT_RDPU_BP_MASK I40E_MASK(0x1, I40E_RPB_BACK_PRS_STAT_RDPU_BP_SHIFT)
+#define I40E_RPB_BACK_PRS_STAT_PORT_0_FC_SHIFT 12
+#define I40E_RPB_BACK_PRS_STAT_PORT_0_FC_MASK I40E_MASK(0x1, I40E_RPB_BACK_PRS_STAT_PORT_0_FC_SHIFT)
+#define I40E_RPB_BACK_PRS_STAT_PORT_1_FC_SHIFT 13
+#define I40E_RPB_BACK_PRS_STAT_PORT_1_FC_MASK I40E_MASK(0x1, I40E_RPB_BACK_PRS_STAT_PORT_1_FC_SHIFT)
+#define I40E_RPB_BACK_PRS_STAT_PORT_2_FC_SHIFT 14
+#define I40E_RPB_BACK_PRS_STAT_PORT_2_FC_MASK I40E_MASK(0x1, I40E_RPB_BACK_PRS_STAT_PORT_2_FC_SHIFT)
+#define I40E_RPB_BACK_PRS_STAT_PORT_3_FC_SHIFT 15
+#define I40E_RPB_BACK_PRS_STAT_PORT_3_FC_MASK I40E_MASK(0x1, I40E_RPB_BACK_PRS_STAT_PORT_3_FC_SHIFT)
+
+#define I40E_RPB_CC_CNT_MEM_CFG 0x000AC860 /* Reset: POR */
+#define I40E_RPB_CC_CNT_MEM_CFG_ECC_EN_SHIFT 0
+#define I40E_RPB_CC_CNT_MEM_CFG_ECC_EN_MASK I40E_MASK(0x1, I40E_RPB_CC_CNT_MEM_CFG_ECC_EN_SHIFT)
+#define I40E_RPB_CC_CNT_MEM_CFG_ECC_INVERT_1_SHIFT 1
+#define I40E_RPB_CC_CNT_MEM_CFG_ECC_INVERT_1_MASK I40E_MASK(0x1, I40E_RPB_CC_CNT_MEM_CFG_ECC_INVERT_1_SHIFT)
+#define I40E_RPB_CC_CNT_MEM_CFG_ECC_INVERT_2_SHIFT 2
+#define I40E_RPB_CC_CNT_MEM_CFG_ECC_INVERT_2_MASK I40E_MASK(0x1, I40E_RPB_CC_CNT_MEM_CFG_ECC_INVERT_2_SHIFT)
+#define I40E_RPB_CC_CNT_MEM_CFG_LS_FORCE_SHIFT 3
+#define I40E_RPB_CC_CNT_MEM_CFG_LS_FORCE_MASK I40E_MASK(0x1, I40E_RPB_CC_CNT_MEM_CFG_LS_FORCE_SHIFT)
+#define I40E_RPB_CC_CNT_MEM_CFG_LS_BYPASS_SHIFT 4
+#define I40E_RPB_CC_CNT_MEM_CFG_LS_BYPASS_MASK I40E_MASK(0x1, I40E_RPB_CC_CNT_MEM_CFG_LS_BYPASS_SHIFT)
+#define I40E_RPB_CC_CNT_MEM_CFG_MASK_INT_SHIFT 5
+#define I40E_RPB_CC_CNT_MEM_CFG_MASK_INT_MASK I40E_MASK(0x1, I40E_RPB_CC_CNT_MEM_CFG_MASK_INT_SHIFT)
+#define I40E_RPB_CC_CNT_MEM_CFG_FIX_CNT_SHIFT 8
+#define I40E_RPB_CC_CNT_MEM_CFG_FIX_CNT_MASK I40E_MASK(0x1, I40E_RPB_CC_CNT_MEM_CFG_FIX_CNT_SHIFT)
+#define I40E_RPB_CC_CNT_MEM_CFG_ERR_CNT_SHIFT 9
+#define I40E_RPB_CC_CNT_MEM_CFG_ERR_CNT_MASK I40E_MASK(0x1, I40E_RPB_CC_CNT_MEM_CFG_ERR_CNT_SHIFT)
+#define I40E_RPB_CC_CNT_MEM_CFG_RME_SHIFT 12
+#define I40E_RPB_CC_CNT_MEM_CFG_RME_MASK I40E_MASK(0x1, I40E_RPB_CC_CNT_MEM_CFG_RME_SHIFT)
+#define I40E_RPB_CC_CNT_MEM_CFG_RM_SHIFT 16
+#define I40E_RPB_CC_CNT_MEM_CFG_RM_MASK I40E_MASK(0xF, I40E_RPB_CC_CNT_MEM_CFG_RM_SHIFT)
+
+#define I40E_RPB_CC_CNT_MEM_STATUS 0x000AC864 /* Reset: POR */
+#define I40E_RPB_CC_CNT_MEM_STATUS_ECC_ERR_SHIFT 0
+#define I40E_RPB_CC_CNT_MEM_STATUS_ECC_ERR_MASK I40E_MASK(0x1, I40E_RPB_CC_CNT_MEM_STATUS_ECC_ERR_SHIFT)
+#define I40E_RPB_CC_CNT_MEM_STATUS_ECC_FIX_SHIFT 1
+#define I40E_RPB_CC_CNT_MEM_STATUS_ECC_FIX_MASK I40E_MASK(0x1, I40E_RPB_CC_CNT_MEM_STATUS_ECC_FIX_SHIFT)
+#define I40E_RPB_CC_CNT_MEM_STATUS_INIT_DONE_SHIFT 2
+#define I40E_RPB_CC_CNT_MEM_STATUS_INIT_DONE_MASK I40E_MASK(0x1, I40E_RPB_CC_CNT_MEM_STATUS_INIT_DONE_SHIFT)
+#define I40E_RPB_CC_CNT_MEM_STATUS_GLOBAL_INIT_DONE_SHIFT 3
+#define I40E_RPB_CC_CNT_MEM_STATUS_GLOBAL_INIT_DONE_MASK I40E_MASK(0x1, I40E_RPB_CC_CNT_MEM_STATUS_GLOBAL_INIT_DONE_SHIFT)
+
+#define I40E_RPB_CC_MEM_CFG 0x000AC890 /* Reset: POR */
+#define I40E_RPB_CC_MEM_CFG_ECC_EN_SHIFT 0
+#define I40E_RPB_CC_MEM_CFG_ECC_EN_MASK I40E_MASK(0x1, I40E_RPB_CC_MEM_CFG_ECC_EN_SHIFT)
+#define I40E_RPB_CC_MEM_CFG_ECC_INVERT_1_SHIFT 1
+#define I40E_RPB_CC_MEM_CFG_ECC_INVERT_1_MASK I40E_MASK(0x1, I40E_RPB_CC_MEM_CFG_ECC_INVERT_1_SHIFT)
+#define I40E_RPB_CC_MEM_CFG_ECC_INVERT_2_SHIFT 2
+#define I40E_RPB_CC_MEM_CFG_ECC_INVERT_2_MASK I40E_MASK(0x1, I40E_RPB_CC_MEM_CFG_ECC_INVERT_2_SHIFT)
+#define I40E_RPB_CC_MEM_CFG_LS_FORCE_SHIFT 3
+#define I40E_RPB_CC_MEM_CFG_LS_FORCE_MASK I40E_MASK(0x1, I40E_RPB_CC_MEM_CFG_LS_FORCE_SHIFT)
+#define I40E_RPB_CC_MEM_CFG_LS_BYPASS_SHIFT 4
+#define I40E_RPB_CC_MEM_CFG_LS_BYPASS_MASK I40E_MASK(0x1, I40E_RPB_CC_MEM_CFG_LS_BYPASS_SHIFT)
+#define I40E_RPB_CC_MEM_CFG_MASK_INT_SHIFT 5
+#define I40E_RPB_CC_MEM_CFG_MASK_INT_MASK I40E_MASK(0x1, I40E_RPB_CC_MEM_CFG_MASK_INT_SHIFT)
+#define I40E_RPB_CC_MEM_CFG_FIX_CNT_SHIFT 8
+#define I40E_RPB_CC_MEM_CFG_FIX_CNT_MASK I40E_MASK(0x1, I40E_RPB_CC_MEM_CFG_FIX_CNT_SHIFT)
+#define I40E_RPB_CC_MEM_CFG_ERR_CNT_SHIFT 9
+#define I40E_RPB_CC_MEM_CFG_ERR_CNT_MASK I40E_MASK(0x1, I40E_RPB_CC_MEM_CFG_ERR_CNT_SHIFT)
+#define I40E_RPB_CC_MEM_CFG_RME_SHIFT 12
+#define I40E_RPB_CC_MEM_CFG_RME_MASK I40E_MASK(0x1, I40E_RPB_CC_MEM_CFG_RME_SHIFT)
+#define I40E_RPB_CC_MEM_CFG_RM_SHIFT 16
+#define I40E_RPB_CC_MEM_CFG_RM_MASK I40E_MASK(0xF, I40E_RPB_CC_MEM_CFG_RM_SHIFT)
+
+#define I40E_RPB_CC_MEM_STATUS 0x000AC894 /* Reset: POR */
+#define I40E_RPB_CC_MEM_STATUS_ECC_ERR_SHIFT 0
+#define I40E_RPB_CC_MEM_STATUS_ECC_ERR_MASK I40E_MASK(0x1, I40E_RPB_CC_MEM_STATUS_ECC_ERR_SHIFT)
+#define I40E_RPB_CC_MEM_STATUS_ECC_FIX_SHIFT 1
+#define I40E_RPB_CC_MEM_STATUS_ECC_FIX_MASK I40E_MASK(0x1, I40E_RPB_CC_MEM_STATUS_ECC_FIX_SHIFT)
+#define I40E_RPB_CC_MEM_STATUS_INIT_DONE_SHIFT 2
+#define I40E_RPB_CC_MEM_STATUS_INIT_DONE_MASK I40E_MASK(0x1, I40E_RPB_CC_MEM_STATUS_INIT_DONE_SHIFT)
+#define I40E_RPB_CC_MEM_STATUS_GLOBAL_INIT_DONE_SHIFT 3
+#define I40E_RPB_CC_MEM_STATUS_GLOBAL_INIT_DONE_MASK I40E_MASK(0x1, I40E_RPB_CC_MEM_STATUS_GLOBAL_INIT_DONE_SHIFT)
+
+#define I40E_RPB_CLID_MEM_CFG 0x000AC870 /* Reset: POR */
+#define I40E_RPB_CLID_MEM_CFG_ECC_EN_SHIFT 0
+#define I40E_RPB_CLID_MEM_CFG_ECC_EN_MASK I40E_MASK(0x1, I40E_RPB_CLID_MEM_CFG_ECC_EN_SHIFT)
+#define I40E_RPB_CLID_MEM_CFG_ECC_INVERT_1_SHIFT 1
+#define I40E_RPB_CLID_MEM_CFG_ECC_INVERT_1_MASK I40E_MASK(0x1, I40E_RPB_CLID_MEM_CFG_ECC_INVERT_1_SHIFT)
+#define I40E_RPB_CLID_MEM_CFG_ECC_INVERT_2_SHIFT 2
+#define I40E_RPB_CLID_MEM_CFG_ECC_INVERT_2_MASK I40E_MASK(0x1, I40E_RPB_CLID_MEM_CFG_ECC_INVERT_2_SHIFT)
+#define I40E_RPB_CLID_MEM_CFG_LS_FORCE_SHIFT 3
+#define I40E_RPB_CLID_MEM_CFG_LS_FORCE_MASK I40E_MASK(0x1, I40E_RPB_CLID_MEM_CFG_LS_FORCE_SHIFT)
+#define I40E_RPB_CLID_MEM_CFG_LS_BYPASS_SHIFT 4
+#define I40E_RPB_CLID_MEM_CFG_LS_BYPASS_MASK I40E_MASK(0x1, I40E_RPB_CLID_MEM_CFG_LS_BYPASS_SHIFT)
+#define I40E_RPB_CLID_MEM_CFG_MASK_INT_SHIFT 5
+#define I40E_RPB_CLID_MEM_CFG_MASK_INT_MASK I40E_MASK(0x1, I40E_RPB_CLID_MEM_CFG_MASK_INT_SHIFT)
+#define I40E_RPB_CLID_MEM_CFG_FIX_CNT_SHIFT 8
+#define I40E_RPB_CLID_MEM_CFG_FIX_CNT_MASK I40E_MASK(0x1, I40E_RPB_CLID_MEM_CFG_FIX_CNT_SHIFT)
+#define I40E_RPB_CLID_MEM_CFG_ERR_CNT_SHIFT 9
+#define I40E_RPB_CLID_MEM_CFG_ERR_CNT_MASK I40E_MASK(0x1, I40E_RPB_CLID_MEM_CFG_ERR_CNT_SHIFT)
+#define I40E_RPB_CLID_MEM_CFG_RME_SHIFT 12
+#define I40E_RPB_CLID_MEM_CFG_RME_MASK I40E_MASK(0x1, I40E_RPB_CLID_MEM_CFG_RME_SHIFT)
+#define I40E_RPB_CLID_MEM_CFG_RM_SHIFT 16
+#define I40E_RPB_CLID_MEM_CFG_RM_MASK I40E_MASK(0xF, I40E_RPB_CLID_MEM_CFG_RM_SHIFT)
+
+#define I40E_RPB_CLID_MEM_STATUS 0x000AC874 /* Reset: POR */
+#define I40E_RPB_CLID_MEM_STATUS_ECC_ERR_SHIFT 0
+#define I40E_RPB_CLID_MEM_STATUS_ECC_ERR_MASK I40E_MASK(0x1, I40E_RPB_CLID_MEM_STATUS_ECC_ERR_SHIFT)
+#define I40E_RPB_CLID_MEM_STATUS_ECC_FIX_SHIFT 1
+#define I40E_RPB_CLID_MEM_STATUS_ECC_FIX_MASK I40E_MASK(0x1, I40E_RPB_CLID_MEM_STATUS_ECC_FIX_SHIFT)
+#define I40E_RPB_CLID_MEM_STATUS_INIT_DONE_SHIFT 2
+#define I40E_RPB_CLID_MEM_STATUS_INIT_DONE_MASK I40E_MASK(0x1, I40E_RPB_CLID_MEM_STATUS_INIT_DONE_SHIFT)
+#define I40E_RPB_CLID_MEM_STATUS_GLOBAL_INIT_DONE_SHIFT 3
+#define I40E_RPB_CLID_MEM_STATUS_GLOBAL_INIT_DONE_MASK I40E_MASK(0x1, I40E_RPB_CLID_MEM_STATUS_GLOBAL_INIT_DONE_SHIFT)
+
+#define I40E_RPB_DATA_PIPE_MEM_CFG(_i) (0x000AC898 + ((_i) * 4)) /* _i=0...7 */ /* Reset: POR */
+#define I40E_RPB_DATA_PIPE_MEM_CFG_MAX_INDEX 7
+#define I40E_RPB_DATA_PIPE_MEM_CFG_ECC_EN_SHIFT 0
+#define I40E_RPB_DATA_PIPE_MEM_CFG_ECC_EN_MASK I40E_MASK(0x1, I40E_RPB_DATA_PIPE_MEM_CFG_ECC_EN_SHIFT)
+#define I40E_RPB_DATA_PIPE_MEM_CFG_ECC_INVERT_1_SHIFT 1
+#define I40E_RPB_DATA_PIPE_MEM_CFG_ECC_INVERT_1_MASK I40E_MASK(0x1, I40E_RPB_DATA_PIPE_MEM_CFG_ECC_INVERT_1_SHIFT)
+#define I40E_RPB_DATA_PIPE_MEM_CFG_ECC_INVERT_2_SHIFT 2
+#define I40E_RPB_DATA_PIPE_MEM_CFG_ECC_INVERT_2_MASK I40E_MASK(0x1, I40E_RPB_DATA_PIPE_MEM_CFG_ECC_INVERT_2_SHIFT)
+#define I40E_RPB_DATA_PIPE_MEM_CFG_LS_FORCE_SHIFT 3
+#define I40E_RPB_DATA_PIPE_MEM_CFG_LS_FORCE_MASK I40E_MASK(0x1, I40E_RPB_DATA_PIPE_MEM_CFG_LS_FORCE_SHIFT)
+#define I40E_RPB_DATA_PIPE_MEM_CFG_LS_BYPASS_SHIFT 4
+#define I40E_RPB_DATA_PIPE_MEM_CFG_LS_BYPASS_MASK I40E_MASK(0x1, I40E_RPB_DATA_PIPE_MEM_CFG_LS_BYPASS_SHIFT)
+#define I40E_RPB_DATA_PIPE_MEM_CFG_MASK_INT_SHIFT 5
+#define I40E_RPB_DATA_PIPE_MEM_CFG_MASK_INT_MASK I40E_MASK(0x1, I40E_RPB_DATA_PIPE_MEM_CFG_MASK_INT_SHIFT)
+#define I40E_RPB_DATA_PIPE_MEM_CFG_FIX_CNT_SHIFT 8
+#define I40E_RPB_DATA_PIPE_MEM_CFG_FIX_CNT_MASK I40E_MASK(0x1, I40E_RPB_DATA_PIPE_MEM_CFG_FIX_CNT_SHIFT)
+#define I40E_RPB_DATA_PIPE_MEM_CFG_ERR_CNT_SHIFT 9
+#define I40E_RPB_DATA_PIPE_MEM_CFG_ERR_CNT_MASK I40E_MASK(0x1, I40E_RPB_DATA_PIPE_MEM_CFG_ERR_CNT_SHIFT)
+#define I40E_RPB_DATA_PIPE_MEM_CFG_RME_SHIFT 12
+#define I40E_RPB_DATA_PIPE_MEM_CFG_RME_MASK I40E_MASK(0x1, I40E_RPB_DATA_PIPE_MEM_CFG_RME_SHIFT)
+#define I40E_RPB_DATA_PIPE_MEM_CFG_RM_SHIFT 16
+#define I40E_RPB_DATA_PIPE_MEM_CFG_RM_MASK I40E_MASK(0xF, I40E_RPB_DATA_PIPE_MEM_CFG_RM_SHIFT)
+
+#define I40E_RPB_DATA_PIPE_MEM_STATUS(_i) (0x000AC8B8 + ((_i) * 4)) /* _i=0...7 */ /* Reset: POR */
+#define I40E_RPB_DATA_PIPE_MEM_STATUS_MAX_INDEX 7
+#define I40E_RPB_DATA_PIPE_MEM_STATUS_ECC_ERR_SHIFT 0
+#define I40E_RPB_DATA_PIPE_MEM_STATUS_ECC_ERR_MASK I40E_MASK(0x1, I40E_RPB_DATA_PIPE_MEM_STATUS_ECC_ERR_SHIFT)
+#define I40E_RPB_DATA_PIPE_MEM_STATUS_ECC_FIX_SHIFT 1
+#define I40E_RPB_DATA_PIPE_MEM_STATUS_ECC_FIX_MASK I40E_MASK(0x1, I40E_RPB_DATA_PIPE_MEM_STATUS_ECC_FIX_SHIFT)
+#define I40E_RPB_DATA_PIPE_MEM_STATUS_INIT_DONE_SHIFT 2
+#define I40E_RPB_DATA_PIPE_MEM_STATUS_INIT_DONE_MASK I40E_MASK(0x1, I40E_RPB_DATA_PIPE_MEM_STATUS_INIT_DONE_SHIFT)
+#define I40E_RPB_DATA_PIPE_MEM_STATUS_GLOBAL_INIT_DONE_SHIFT 3
+#define I40E_RPB_DATA_PIPE_MEM_STATUS_GLOBAL_INIT_DONE_MASK I40E_MASK(0x1, I40E_RPB_DATA_PIPE_MEM_STATUS_GLOBAL_INIT_DONE_SHIFT)
+
+#define I40E_RPB_DBG_ACC_CNT 0x000AC8E0 /* Reset: CORER */
+#define I40E_RPB_DBG_ACC_CNT_RPB_DBG_ACC_CNT_SHIFT 0
+#define I40E_RPB_DBG_ACC_CNT_RPB_DBG_ACC_CNT_MASK I40E_MASK(0xFFFF, I40E_RPB_DBG_ACC_CNT_RPB_DBG_ACC_CNT_SHIFT)
+
+#define I40E_RPB_DBG_ACC_CTL 0x000AC8E4 /* Reset: CORER */
+#define I40E_RPB_DBG_ACC_CTL_ADDR_SHIFT 0
+#define I40E_RPB_DBG_ACC_CTL_ADDR_MASK I40E_MASK(0xFFFF, I40E_RPB_DBG_ACC_CTL_ADDR_SHIFT)
+#define I40E_RPB_DBG_ACC_CTL_MEM_SEL_SHIFT 16
+#define I40E_RPB_DBG_ACC_CTL_MEM_SEL_MASK I40E_MASK(0xF, I40E_RPB_DBG_ACC_CTL_MEM_SEL_SHIFT)
+#define I40E_RPB_DBG_ACC_CTL_EXECUTE_SHIFT 20
+#define I40E_RPB_DBG_ACC_CTL_EXECUTE_MASK I40E_MASK(0x1, I40E_RPB_DBG_ACC_CTL_EXECUTE_SHIFT)
+
+#define I40E_RPB_DBG_ACC_DATA(_i) (0x000AC8EC + ((_i) * 4)) /* _i=0...7 */ /* Reset: CORER */
+#define I40E_RPB_DBG_ACC_DATA_MAX_INDEX 7
+#define I40E_RPB_DBG_ACC_DATA_RPB_DBG_READ_DATA_SHIFT 0
+#define I40E_RPB_DBG_ACC_DATA_RPB_DBG_READ_DATA_MASK I40E_MASK(0xFFFFFFFF, I40E_RPB_DBG_ACC_DATA_RPB_DBG_READ_DATA_SHIFT)
+
+#define I40E_RPB_DBG_ACC_STAT 0x000AC8E8 /* Reset: CORER */
+#define I40E_RPB_DBG_ACC_STAT_READY_SHIFT 0
+#define I40E_RPB_DBG_ACC_STAT_READY_MASK I40E_MASK(0x1, I40E_RPB_DBG_ACC_STAT_READY_SHIFT)
+#define I40E_RPB_DBG_ACC_STAT_BUSY_SHIFT 1
+#define I40E_RPB_DBG_ACC_STAT_BUSY_MASK I40E_MASK(0x1, I40E_RPB_DBG_ACC_STAT_BUSY_SHIFT)
+#define I40E_RPB_DBG_ACC_STAT_ADDR_ERR_SHIFT 4
+#define I40E_RPB_DBG_ACC_STAT_ADDR_ERR_MASK I40E_MASK(0x1, I40E_RPB_DBG_ACC_STAT_ADDR_ERR_SHIFT)
+#define I40E_RPB_DBG_ACC_STAT_SEL_ERR_SHIFT 5
+#define I40E_RPB_DBG_ACC_STAT_SEL_ERR_MASK I40E_MASK(0x1, I40E_RPB_DBG_ACC_STAT_SEL_ERR_SHIFT)
+#define I40E_RPB_DBG_ACC_STAT_WD_ERR_SHIFT 6
+#define I40E_RPB_DBG_ACC_STAT_WD_ERR_MASK I40E_MASK(0x1, I40E_RPB_DBG_ACC_STAT_WD_ERR_SHIFT)
+
+#define I40E_RPB_DBG_FEAT 0x000AC940 /* Reset: CORER */
+#define I40E_RPB_DBG_FEAT_DISABLE_REPORTS_SHIFT 0
+#define I40E_RPB_DBG_FEAT_DISABLE_REPORTS_MASK I40E_MASK(0x1, I40E_RPB_DBG_FEAT_DISABLE_REPORTS_SHIFT)
+#define I40E_RPB_DBG_FEAT_DISABLE_RELEASE_SHIFT 1
+#define I40E_RPB_DBG_FEAT_DISABLE_RELEASE_MASK I40E_MASK(0x1, I40E_RPB_DBG_FEAT_DISABLE_RELEASE_SHIFT)
+#define I40E_RPB_DBG_FEAT_DISABLE_CC_SHIFT 2
+#define I40E_RPB_DBG_FEAT_DISABLE_CC_MASK I40E_MASK(0x1, I40E_RPB_DBG_FEAT_DISABLE_CC_SHIFT)
+#define I40E_RPB_DBG_FEAT_DISABLE_SHR_MODE_SHIFT 3
+#define I40E_RPB_DBG_FEAT_DISABLE_SHR_MODE_MASK I40E_MASK(0x1, I40E_RPB_DBG_FEAT_DISABLE_SHR_MODE_SHIFT)
+#define I40E_RPB_DBG_FEAT_DISABLE_RCU_EGR_SHIFT 4
+#define I40E_RPB_DBG_FEAT_DISABLE_RCU_EGR_MASK I40E_MASK(0x1, I40E_RPB_DBG_FEAT_DISABLE_RCU_EGR_SHIFT)
+#define I40E_RPB_DBG_FEAT_DISABLE_PE0_EGR_SHIFT 5
+#define I40E_RPB_DBG_FEAT_DISABLE_PE0_EGR_MASK I40E_MASK(0x1, I40E_RPB_DBG_FEAT_DISABLE_PE0_EGR_SHIFT)
+#define I40E_RPB_DBG_FEAT_DISABLE_PE1_EGR_SHIFT 6
+#define I40E_RPB_DBG_FEAT_DISABLE_PE1_EGR_MASK I40E_MASK(0x1, I40E_RPB_DBG_FEAT_DISABLE_PE1_EGR_SHIFT)
+#define I40E_RPB_DBG_FEAT_DISABLE_RDPU_EGR_SHIFT 7
+#define I40E_RPB_DBG_FEAT_DISABLE_RDPU_EGR_MASK I40E_MASK(0x1, I40E_RPB_DBG_FEAT_DISABLE_RDPU_EGR_SHIFT)
+#define I40E_RPB_DBG_FEAT_FORCE_FC_PORT_SHIFT 8
+#define I40E_RPB_DBG_FEAT_FORCE_FC_PORT_MASK I40E_MASK(0xF, I40E_RPB_DBG_FEAT_FORCE_FC_PORT_SHIFT)
+#define I40E_RPB_DBG_FEAT_FORCE_TPB_FC_PORT_SHIFT 12
+#define I40E_RPB_DBG_FEAT_FORCE_TPB_FC_PORT_MASK I40E_MASK(0xF, I40E_RPB_DBG_FEAT_FORCE_TPB_FC_PORT_SHIFT)
+#define I40E_RPB_DBG_FEAT_FORCE_SHR_MODE_SHIFT 16
+#define I40E_RPB_DBG_FEAT_FORCE_SHR_MODE_MASK I40E_MASK(0x1, I40E_RPB_DBG_FEAT_FORCE_SHR_MODE_SHIFT)
+#define I40E_RPB_DBG_FEAT_DISABLE_ECB_SYNC_SHIFT 17
+#define I40E_RPB_DBG_FEAT_DISABLE_ECB_SYNC_MASK I40E_MASK(0x1, I40E_RPB_DBG_FEAT_DISABLE_ECB_SYNC_SHIFT)
+#define I40E_RPB_DBG_FEAT_LTR_CLK_GEN_VAL_SHIFT 20
+#define I40E_RPB_DBG_FEAT_LTR_CLK_GEN_VAL_MASK I40E_MASK(0xFFF, I40E_RPB_DBG_FEAT_LTR_CLK_GEN_VAL_SHIFT)
+
+#define I40E_RPB_ECC_COR_ERR 0x000AC8DC /* Reset: POR */
+#define I40E_RPB_ECC_COR_ERR_CNT_SHIFT 0
+#define I40E_RPB_ECC_COR_ERR_CNT_MASK I40E_MASK(0xFFF, I40E_RPB_ECC_COR_ERR_CNT_SHIFT)
+
+#define I40E_RPB_ECC_UNCOR_ERR 0x000AC8D8 /* Reset: POR */
+#define I40E_RPB_ECC_UNCOR_ERR_CNT_SHIFT 0
+#define I40E_RPB_ECC_UNCOR_ERR_CNT_MASK I40E_MASK(0xFFF, I40E_RPB_ECC_UNCOR_ERR_CNT_SHIFT)
+
+#define I40E_RPB_EGR_CNT 0x000AC94C /* Reset: CORER */
+#define I40E_RPB_EGR_CNT_RCU_REQ_SHIFT 0
+#define I40E_RPB_EGR_CNT_RCU_REQ_MASK I40E_MASK(0xFF, I40E_RPB_EGR_CNT_RCU_REQ_SHIFT)
+#define I40E_RPB_EGR_CNT_PE_0_REQ_SHIFT 8
+#define I40E_RPB_EGR_CNT_PE_0_REQ_MASK I40E_MASK(0xFF, I40E_RPB_EGR_CNT_PE_0_REQ_SHIFT)
+#define I40E_RPB_EGR_CNT_PE_1_REQ_SHIFT 16
+#define I40E_RPB_EGR_CNT_PE_1_REQ_MASK I40E_MASK(0xFF, I40E_RPB_EGR_CNT_PE_1_REQ_SHIFT)
+#define I40E_RPB_EGR_CNT_RDPU_REQ_SHIFT 24
+#define I40E_RPB_EGR_CNT_RDPU_REQ_MASK I40E_MASK(0xFF, I40E_RPB_EGR_CNT_RDPU_REQ_SHIFT)
+
+#define I40E_RPB_GEN_DBG_CNT 0x000AC944 /* Reset: CORER */
+#define I40E_RPB_GEN_DBG_CNT_FREE_CC_SHIFT 0
+#define I40E_RPB_GEN_DBG_CNT_FREE_CC_MASK I40E_MASK(0x1FF, I40E_RPB_GEN_DBG_CNT_FREE_CC_SHIFT)
+#define I40E_RPB_GEN_DBG_CNT_FREE_CLIDS_SHIFT 16
+#define I40E_RPB_GEN_DBG_CNT_FREE_CLIDS_MASK I40E_MASK(0x3FFF, I40E_RPB_GEN_DBG_CNT_FREE_CLIDS_SHIFT)
+
+#define I40E_RPB_PKT_MEM_CFG 0x000AC868 /* Reset: POR */
+#define I40E_RPB_PKT_MEM_CFG_ECC_EN_SHIFT 0
+#define I40E_RPB_PKT_MEM_CFG_ECC_EN_MASK I40E_MASK(0x1, I40E_RPB_PKT_MEM_CFG_ECC_EN_SHIFT)
+#define I40E_RPB_PKT_MEM_CFG_ECC_INVERT_1_SHIFT 1
+#define I40E_RPB_PKT_MEM_CFG_ECC_INVERT_1_MASK I40E_MASK(0x1, I40E_RPB_PKT_MEM_CFG_ECC_INVERT_1_SHIFT)
+#define I40E_RPB_PKT_MEM_CFG_ECC_INVERT_2_SHIFT 2
+#define I40E_RPB_PKT_MEM_CFG_ECC_INVERT_2_MASK I40E_MASK(0x1, I40E_RPB_PKT_MEM_CFG_ECC_INVERT_2_SHIFT)
+#define I40E_RPB_PKT_MEM_CFG_LS_FORCE_SHIFT 3
+#define I40E_RPB_PKT_MEM_CFG_LS_FORCE_MASK I40E_MASK(0x1, I40E_RPB_PKT_MEM_CFG_LS_FORCE_SHIFT)
+#define I40E_RPB_PKT_MEM_CFG_LS_BYPASS_SHIFT 4
+#define I40E_RPB_PKT_MEM_CFG_LS_BYPASS_MASK I40E_MASK(0x1, I40E_RPB_PKT_MEM_CFG_LS_BYPASS_SHIFT)
+#define I40E_RPB_PKT_MEM_CFG_MASK_INT_SHIFT 5
+#define I40E_RPB_PKT_MEM_CFG_MASK_INT_MASK I40E_MASK(0x1, I40E_RPB_PKT_MEM_CFG_MASK_INT_SHIFT)
+#define I40E_RPB_PKT_MEM_CFG_FIX_CNT_SHIFT 8
+#define I40E_RPB_PKT_MEM_CFG_FIX_CNT_MASK I40E_MASK(0x1, I40E_RPB_PKT_MEM_CFG_FIX_CNT_SHIFT)
+#define I40E_RPB_PKT_MEM_CFG_ERR_CNT_SHIFT 9
+#define I40E_RPB_PKT_MEM_CFG_ERR_CNT_MASK I40E_MASK(0x1, I40E_RPB_PKT_MEM_CFG_ERR_CNT_SHIFT)
+#define I40E_RPB_PKT_MEM_CFG_RME_SHIFT 12
+#define I40E_RPB_PKT_MEM_CFG_RME_MASK I40E_MASK(0x1, I40E_RPB_PKT_MEM_CFG_RME_SHIFT)
+#define I40E_RPB_PKT_MEM_CFG_RM_SHIFT 16
+#define I40E_RPB_PKT_MEM_CFG_RM_MASK I40E_MASK(0xF, I40E_RPB_PKT_MEM_CFG_RM_SHIFT)
+
+#define I40E_RPB_PKT_MEM_STATUS 0x000AC86C /* Reset: POR */
+#define I40E_RPB_PKT_MEM_STATUS_ECC_ERR_SHIFT 0
+#define I40E_RPB_PKT_MEM_STATUS_ECC_ERR_MASK I40E_MASK(0x1, I40E_RPB_PKT_MEM_STATUS_ECC_ERR_SHIFT)
+#define I40E_RPB_PKT_MEM_STATUS_ECC_FIX_SHIFT 1
+#define I40E_RPB_PKT_MEM_STATUS_ECC_FIX_MASK I40E_MASK(0x1, I40E_RPB_PKT_MEM_STATUS_ECC_FIX_SHIFT)
+#define I40E_RPB_PKT_MEM_STATUS_INIT_DONE_SHIFT 2
+#define I40E_RPB_PKT_MEM_STATUS_INIT_DONE_MASK I40E_MASK(0x1, I40E_RPB_PKT_MEM_STATUS_INIT_DONE_SHIFT)
+#define I40E_RPB_PKT_MEM_STATUS_GLOBAL_INIT_DONE_SHIFT 3
+#define I40E_RPB_PKT_MEM_STATUS_GLOBAL_INIT_DONE_MASK I40E_MASK(0x1, I40E_RPB_PKT_MEM_STATUS_GLOBAL_INIT_DONE_SHIFT)
+
+#define I40E_RPB_PPDB_MEM_CFG 0x000AC878 /* Reset: POR */
+#define I40E_RPB_PPDB_MEM_CFG_ECC_EN_SHIFT 0
+#define I40E_RPB_PPDB_MEM_CFG_ECC_EN_MASK I40E_MASK(0x1, I40E_RPB_PPDB_MEM_CFG_ECC_EN_SHIFT)
+#define I40E_RPB_PPDB_MEM_CFG_ECC_INVERT_1_SHIFT 1
+#define I40E_RPB_PPDB_MEM_CFG_ECC_INVERT_1_MASK I40E_MASK(0x1, I40E_RPB_PPDB_MEM_CFG_ECC_INVERT_1_SHIFT)
+#define I40E_RPB_PPDB_MEM_CFG_ECC_INVERT_2_SHIFT 2
+#define I40E_RPB_PPDB_MEM_CFG_ECC_INVERT_2_MASK I40E_MASK(0x1, I40E_RPB_PPDB_MEM_CFG_ECC_INVERT_2_SHIFT)
+#define I40E_RPB_PPDB_MEM_CFG_LS_FORCE_SHIFT 3
+#define I40E_RPB_PPDB_MEM_CFG_LS_FORCE_MASK I40E_MASK(0x1, I40E_RPB_PPDB_MEM_CFG_LS_FORCE_SHIFT)
+#define I40E_RPB_PPDB_MEM_CFG_LS_BYPASS_SHIFT 4
+#define I40E_RPB_PPDB_MEM_CFG_LS_BYPASS_MASK I40E_MASK(0x1, I40E_RPB_PPDB_MEM_CFG_LS_BYPASS_SHIFT)
+#define I40E_RPB_PPDB_MEM_CFG_MASK_INT_SHIFT 5
+#define I40E_RPB_PPDB_MEM_CFG_MASK_INT_MASK I40E_MASK(0x1, I40E_RPB_PPDB_MEM_CFG_MASK_INT_SHIFT)
+#define I40E_RPB_PPDB_MEM_CFG_FIX_CNT_SHIFT 8
+#define I40E_RPB_PPDB_MEM_CFG_FIX_CNT_MASK I40E_MASK(0x1, I40E_RPB_PPDB_MEM_CFG_FIX_CNT_SHIFT)
+#define I40E_RPB_PPDB_MEM_CFG_ERR_CNT_SHIFT 9
+#define I40E_RPB_PPDB_MEM_CFG_ERR_CNT_MASK I40E_MASK(0x1, I40E_RPB_PPDB_MEM_CFG_ERR_CNT_SHIFT)
+#define I40E_RPB_PPDB_MEM_CFG_RME_A_SHIFT 12
+#define I40E_RPB_PPDB_MEM_CFG_RME_A_MASK I40E_MASK(0x1, I40E_RPB_PPDB_MEM_CFG_RME_A_SHIFT)
+#define I40E_RPB_PPDB_MEM_CFG_RME_B_SHIFT 13
+#define I40E_RPB_PPDB_MEM_CFG_RME_B_MASK I40E_MASK(0x1, I40E_RPB_PPDB_MEM_CFG_RME_B_SHIFT)
+#define I40E_RPB_PPDB_MEM_CFG_RM_A_SHIFT 16
+#define I40E_RPB_PPDB_MEM_CFG_RM_A_MASK I40E_MASK(0xF, I40E_RPB_PPDB_MEM_CFG_RM_A_SHIFT)
+#define I40E_RPB_PPDB_MEM_CFG_RM_B_SHIFT 20
+#define I40E_RPB_PPDB_MEM_CFG_RM_B_MASK I40E_MASK(0xF, I40E_RPB_PPDB_MEM_CFG_RM_B_SHIFT)
+
+#define I40E_RPB_PPDB_MEM_STATUS 0x000AC87C /* Reset: POR */
+#define I40E_RPB_PPDB_MEM_STATUS_ECC_ERR_SHIFT 0
+#define I40E_RPB_PPDB_MEM_STATUS_ECC_ERR_MASK I40E_MASK(0x1, I40E_RPB_PPDB_MEM_STATUS_ECC_ERR_SHIFT)
+#define I40E_RPB_PPDB_MEM_STATUS_ECC_FIX_SHIFT 1
+#define I40E_RPB_PPDB_MEM_STATUS_ECC_FIX_MASK I40E_MASK(0x1, I40E_RPB_PPDB_MEM_STATUS_ECC_FIX_SHIFT)
+#define I40E_RPB_PPDB_MEM_STATUS_INIT_DONE_SHIFT 2
+#define I40E_RPB_PPDB_MEM_STATUS_INIT_DONE_MASK I40E_MASK(0x1, I40E_RPB_PPDB_MEM_STATUS_INIT_DONE_SHIFT)
+#define I40E_RPB_PPDB_MEM_STATUS_GLOBAL_INIT_DONE_SHIFT 3
+#define I40E_RPB_PPDB_MEM_STATUS_GLOBAL_INIT_DONE_MASK I40E_MASK(0x1, I40E_RPB_PPDB_MEM_STATUS_GLOBAL_INIT_DONE_SHIFT)
+
+#define I40E_RPB_PPRS_ERR_CNT(_i) (0x000AC910 + ((_i) * 4)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_RPB_PPRS_ERR_CNT_MAX_INDEX 3
+#define I40E_RPB_PPRS_ERR_CNT_PKT_SIZE_ERR_SHIFT 0
+#define I40E_RPB_PPRS_ERR_CNT_PKT_SIZE_ERR_MASK I40E_MASK(0xFF, I40E_RPB_PPRS_ERR_CNT_PKT_SIZE_ERR_SHIFT)
+#define I40E_RPB_PPRS_ERR_CNT_VALID_BTW_PKT_SHIFT 8
+#define I40E_RPB_PPRS_ERR_CNT_VALID_BTW_PKT_MASK I40E_MASK(0xFF, I40E_RPB_PPRS_ERR_CNT_VALID_BTW_PKT_SHIFT)
+
+#define I40E_RPB_REPORT_LL_MEM_CFG 0x000AC880 /* Reset: POR */
+#define I40E_RPB_REPORT_LL_MEM_CFG_ECC_EN_SHIFT 0
+#define I40E_RPB_REPORT_LL_MEM_CFG_ECC_EN_MASK I40E_MASK(0x1, I40E_RPB_REPORT_LL_MEM_CFG_ECC_EN_SHIFT)
+#define I40E_RPB_REPORT_LL_MEM_CFG_ECC_INVERT_1_SHIFT 1
+#define I40E_RPB_REPORT_LL_MEM_CFG_ECC_INVERT_1_MASK I40E_MASK(0x1, I40E_RPB_REPORT_LL_MEM_CFG_ECC_INVERT_1_SHIFT)
+#define I40E_RPB_REPORT_LL_MEM_CFG_ECC_INVERT_2_SHIFT 2
+#define I40E_RPB_REPORT_LL_MEM_CFG_ECC_INVERT_2_MASK I40E_MASK(0x1, I40E_RPB_REPORT_LL_MEM_CFG_ECC_INVERT_2_SHIFT)
+#define I40E_RPB_REPORT_LL_MEM_CFG_LS_FORCE_SHIFT 3
+#define I40E_RPB_REPORT_LL_MEM_CFG_LS_FORCE_MASK I40E_MASK(0x1, I40E_RPB_REPORT_LL_MEM_CFG_LS_FORCE_SHIFT)
+#define I40E_RPB_REPORT_LL_MEM_CFG_LS_BYPASS_SHIFT 4
+#define I40E_RPB_REPORT_LL_MEM_CFG_LS_BYPASS_MASK I40E_MASK(0x1, I40E_RPB_REPORT_LL_MEM_CFG_LS_BYPASS_SHIFT)
+#define I40E_RPB_REPORT_LL_MEM_CFG_MASK_INT_SHIFT 5
+#define I40E_RPB_REPORT_LL_MEM_CFG_MASK_INT_MASK I40E_MASK(0x1, I40E_RPB_REPORT_LL_MEM_CFG_MASK_INT_SHIFT)
+#define I40E_RPB_REPORT_LL_MEM_CFG_FIX_CNT_SHIFT 8
+#define I40E_RPB_REPORT_LL_MEM_CFG_FIX_CNT_MASK I40E_MASK(0x1, I40E_RPB_REPORT_LL_MEM_CFG_FIX_CNT_SHIFT)
+#define I40E_RPB_REPORT_LL_MEM_CFG_ERR_CNT_SHIFT 9
+#define I40E_RPB_REPORT_LL_MEM_CFG_ERR_CNT_MASK I40E_MASK(0x1, I40E_RPB_REPORT_LL_MEM_CFG_ERR_CNT_SHIFT)
+#define I40E_RPB_REPORT_LL_MEM_CFG_RME_SHIFT 12
+#define I40E_RPB_REPORT_LL_MEM_CFG_RME_MASK I40E_MASK(0x1, I40E_RPB_REPORT_LL_MEM_CFG_RME_SHIFT)
+#define I40E_RPB_REPORT_LL_MEM_CFG_RM_SHIFT 16
+#define I40E_RPB_REPORT_LL_MEM_CFG_RM_MASK I40E_MASK(0xF, I40E_RPB_REPORT_LL_MEM_CFG_RM_SHIFT)
+
+#define I40E_RPB_REPORT_LL_MEM_STATUS 0x000AC884 /* Reset: POR */
+#define I40E_RPB_REPORT_LL_MEM_STATUS_ECC_ERR_SHIFT 0
+#define I40E_RPB_REPORT_LL_MEM_STATUS_ECC_ERR_MASK I40E_MASK(0x1, I40E_RPB_REPORT_LL_MEM_STATUS_ECC_ERR_SHIFT)
+#define I40E_RPB_REPORT_LL_MEM_STATUS_ECC_FIX_SHIFT 1
+#define I40E_RPB_REPORT_LL_MEM_STATUS_ECC_FIX_MASK I40E_MASK(0x1, I40E_RPB_REPORT_LL_MEM_STATUS_ECC_FIX_SHIFT)
+#define I40E_RPB_REPORT_LL_MEM_STATUS_INIT_DONE_SHIFT 2
+#define I40E_RPB_REPORT_LL_MEM_STATUS_INIT_DONE_MASK I40E_MASK(0x1, I40E_RPB_REPORT_LL_MEM_STATUS_INIT_DONE_SHIFT)
+#define I40E_RPB_REPORT_LL_MEM_STATUS_GLOBAL_INIT_DONE_SHIFT 3
+#define I40E_RPB_REPORT_LL_MEM_STATUS_GLOBAL_INIT_DONE_MASK I40E_MASK(0x1, I40E_RPB_REPORT_LL_MEM_STATUS_GLOBAL_INIT_DONE_SHIFT)
+
+#define I40E_RPB_REPORT_MEM_CFG 0x000AC888 /* Reset: POR */
+#define I40E_RPB_REPORT_MEM_CFG_ECC_EN_SHIFT 0
+#define I40E_RPB_REPORT_MEM_CFG_ECC_EN_MASK I40E_MASK(0x1, I40E_RPB_REPORT_MEM_CFG_ECC_EN_SHIFT)
+#define I40E_RPB_REPORT_MEM_CFG_ECC_INVERT_1_SHIFT 1
+#define I40E_RPB_REPORT_MEM_CFG_ECC_INVERT_1_MASK I40E_MASK(0x1, I40E_RPB_REPORT_MEM_CFG_ECC_INVERT_1_SHIFT)
+#define I40E_RPB_REPORT_MEM_CFG_ECC_INVERT_2_SHIFT 2
+#define I40E_RPB_REPORT_MEM_CFG_ECC_INVERT_2_MASK I40E_MASK(0x1, I40E_RPB_REPORT_MEM_CFG_ECC_INVERT_2_SHIFT)
+#define I40E_RPB_REPORT_MEM_CFG_LS_FORCE_SHIFT 3
+#define I40E_RPB_REPORT_MEM_CFG_LS_FORCE_MASK I40E_MASK(0x1, I40E_RPB_REPORT_MEM_CFG_LS_FORCE_SHIFT)
+#define I40E_RPB_REPORT_MEM_CFG_LS_BYPASS_SHIFT 4
+#define I40E_RPB_REPORT_MEM_CFG_LS_BYPASS_MASK I40E_MASK(0x1, I40E_RPB_REPORT_MEM_CFG_LS_BYPASS_SHIFT)
+#define I40E_RPB_REPORT_MEM_CFG_MASK_INT_SHIFT 5
+#define I40E_RPB_REPORT_MEM_CFG_MASK_INT_MASK I40E_MASK(0x1, I40E_RPB_REPORT_MEM_CFG_MASK_INT_SHIFT)
+#define I40E_RPB_REPORT_MEM_CFG_FIX_CNT_SHIFT 8
+#define I40E_RPB_REPORT_MEM_CFG_FIX_CNT_MASK I40E_MASK(0x1, I40E_RPB_REPORT_MEM_CFG_FIX_CNT_SHIFT)
+#define I40E_RPB_REPORT_MEM_CFG_ERR_CNT_SHIFT 9
+#define I40E_RPB_REPORT_MEM_CFG_ERR_CNT_MASK I40E_MASK(0x1, I40E_RPB_REPORT_MEM_CFG_ERR_CNT_SHIFT)
+#define I40E_RPB_REPORT_MEM_CFG_RME_SHIFT 12
+#define I40E_RPB_REPORT_MEM_CFG_RME_MASK I40E_MASK(0x1, I40E_RPB_REPORT_MEM_CFG_RME_SHIFT)
+#define I40E_RPB_REPORT_MEM_CFG_RM_SHIFT 16
+#define I40E_RPB_REPORT_MEM_CFG_RM_MASK I40E_MASK(0xF, I40E_RPB_REPORT_MEM_CFG_RM_SHIFT)
+
+#define I40E_RPB_REPORT_MEM_STATUS 0x000AC88C /* Reset: POR */
+#define I40E_RPB_REPORT_MEM_STATUS_ECC_ERR_SHIFT 0
+#define I40E_RPB_REPORT_MEM_STATUS_ECC_ERR_MASK I40E_MASK(0x1, I40E_RPB_REPORT_MEM_STATUS_ECC_ERR_SHIFT)
+#define I40E_RPB_REPORT_MEM_STATUS_ECC_FIX_SHIFT 1
+#define I40E_RPB_REPORT_MEM_STATUS_ECC_FIX_MASK I40E_MASK(0x1, I40E_RPB_REPORT_MEM_STATUS_ECC_FIX_SHIFT)
+#define I40E_RPB_REPORT_MEM_STATUS_INIT_DONE_SHIFT 2
+#define I40E_RPB_REPORT_MEM_STATUS_INIT_DONE_MASK I40E_MASK(0x1, I40E_RPB_REPORT_MEM_STATUS_INIT_DONE_SHIFT)
+#define I40E_RPB_REPORT_MEM_STATUS_GLOBAL_INIT_DONE_SHIFT 3
+#define I40E_RPB_REPORT_MEM_STATUS_GLOBAL_INIT_DONE_MASK I40E_MASK(0x1, I40E_RPB_REPORT_MEM_STATUS_GLOBAL_INIT_DONE_SHIFT)
+
+#define I40E_RPB_RPT_CNT 0x000AC950 /* Reset: CORER */
+#define I40E_RPB_RPT_CNT_RPB_RPT_CNT_SHIFT 0
+#define I40E_RPB_RPT_CNT_RPB_RPT_CNT_MASK I40E_MASK(0xFFFF, I40E_RPB_RPT_CNT_RPB_RPT_CNT_SHIFT)
+
+#define I40E_RPB_RPT_STAT 0x000AC954 /* Reset: CORER */
+#define I40E_RPB_RPT_STAT_RPB_RPT_STAT_SHIFT 0
+#define I40E_RPB_RPT_STAT_RPB_RPT_STAT_MASK I40E_MASK(0xFFFFFFFF, I40E_RPB_RPT_STAT_RPB_RPT_STAT_SHIFT)
+
+#define I40E_RPB_SHR_MOD_CNT 0x000AC90C /* Reset: CORER */
+#define I40E_RPB_SHR_MOD_CNT_RPB_SHR_MOD_CNT_SHIFT 0
+#define I40E_RPB_SHR_MOD_CNT_RPB_SHR_MOD_CNT_MASK I40E_MASK(0xFFFFFFFF, I40E_RPB_SHR_MOD_CNT_RPB_SHR_MOD_CNT_SHIFT)
+
+#define I40E_TCB_ECC_COR_ERR 0x000AE0A8 /* Reset: POR */
+#define I40E_TCB_ECC_COR_ERR_CNT_SHIFT 0
+#define I40E_TCB_ECC_COR_ERR_CNT_MASK I40E_MASK(0xFFF, I40E_TCB_ECC_COR_ERR_CNT_SHIFT)
+
+#define I40E_TCB_ECC_UNCOR_ERR 0x000AE0A4 /* Reset: POR */
+#define I40E_TCB_ECC_UNCOR_ERR_CNT_SHIFT 0
+#define I40E_TCB_ECC_UNCOR_ERR_CNT_MASK I40E_MASK(0xFFF, I40E_TCB_ECC_UNCOR_ERR_CNT_SHIFT)
+
+#define I40E_TCB_PORT_CMD_BUF_DBG_CTL 0x000AE0B4 /* Reset: CORER */
+#define I40E_TCB_PORT_CMD_BUF_DBG_CTL_ADR_SHIFT 0
+#define I40E_TCB_PORT_CMD_BUF_DBG_CTL_ADR_MASK I40E_MASK(0x3FFFF, I40E_TCB_PORT_CMD_BUF_DBG_CTL_ADR_SHIFT)
+#define I40E_TCB_PORT_CMD_BUF_DBG_CTL_DW_SEL_SHIFT 18
+#define I40E_TCB_PORT_CMD_BUF_DBG_CTL_DW_SEL_MASK I40E_MASK(0xFF, I40E_TCB_PORT_CMD_BUF_DBG_CTL_DW_SEL_SHIFT)
+#define I40E_TCB_PORT_CMD_BUF_DBG_CTL_RD_EN_SHIFT 30
+#define I40E_TCB_PORT_CMD_BUF_DBG_CTL_RD_EN_MASK I40E_MASK(0x1, I40E_TCB_PORT_CMD_BUF_DBG_CTL_RD_EN_SHIFT)
+#define I40E_TCB_PORT_CMD_BUF_DBG_CTL_DONE_SHIFT 31
+#define I40E_TCB_PORT_CMD_BUF_DBG_CTL_DONE_MASK I40E_MASK(0x1, I40E_TCB_PORT_CMD_BUF_DBG_CTL_DONE_SHIFT)
+
+#define I40E_TCB_PORT_CMD_BUF_DBG_DATA 0x000AE0CC /* Reset: CORER */
+#define I40E_TCB_PORT_CMD_BUF_DBG_DATA_RD_DW_SHIFT 0
+#define I40E_TCB_PORT_CMD_BUF_DBG_DATA_RD_DW_MASK I40E_MASK(0xFFFFFFFF, I40E_TCB_PORT_CMD_BUF_DBG_DATA_RD_DW_SHIFT)
+
+#define I40E_TCB_PORT_CMD_MNG_DBG_CTL 0x000AE0B8 /* Reset: CORER */
+#define I40E_TCB_PORT_CMD_MNG_DBG_CTL_ADR_SHIFT 0
+#define I40E_TCB_PORT_CMD_MNG_DBG_CTL_ADR_MASK I40E_MASK(0x3FFFF, I40E_TCB_PORT_CMD_MNG_DBG_CTL_ADR_SHIFT)
+#define I40E_TCB_PORT_CMD_MNG_DBG_CTL_DW_SEL_SHIFT 18
+#define I40E_TCB_PORT_CMD_MNG_DBG_CTL_DW_SEL_MASK I40E_MASK(0xFF, I40E_TCB_PORT_CMD_MNG_DBG_CTL_DW_SEL_SHIFT)
+#define I40E_TCB_PORT_CMD_MNG_DBG_CTL_RD_EN_SHIFT 30
+#define I40E_TCB_PORT_CMD_MNG_DBG_CTL_RD_EN_MASK I40E_MASK(0x1, I40E_TCB_PORT_CMD_MNG_DBG_CTL_RD_EN_SHIFT)
+#define I40E_TCB_PORT_CMD_MNG_DBG_CTL_DONE_SHIFT 31
+#define I40E_TCB_PORT_CMD_MNG_DBG_CTL_DONE_MASK I40E_MASK(0x1, I40E_TCB_PORT_CMD_MNG_DBG_CTL_DONE_SHIFT)
+
+#define I40E_TCB_PORT_CMD_MNG_DBG_DATA 0x000AE0C0 /* Reset: CORER */
+#define I40E_TCB_PORT_CMD_MNG_DBG_DATA_RD_DW_SHIFT 0
+#define I40E_TCB_PORT_CMD_MNG_DBG_DATA_RD_DW_MASK I40E_MASK(0xFFFFFFFF, I40E_TCB_PORT_CMD_MNG_DBG_DATA_RD_DW_SHIFT)
+
+#define I40E_TCB_WAIT_CMD_BUF_DBG_CTL 0x000AE0BC /* Reset: CORER */
+#define I40E_TCB_WAIT_CMD_BUF_DBG_CTL_ADR_SHIFT 0
+#define I40E_TCB_WAIT_CMD_BUF_DBG_CTL_ADR_MASK I40E_MASK(0x3FFFF, I40E_TCB_WAIT_CMD_BUF_DBG_CTL_ADR_SHIFT)
+#define I40E_TCB_WAIT_CMD_BUF_DBG_CTL_DW_SEL_SHIFT 18
+#define I40E_TCB_WAIT_CMD_BUF_DBG_CTL_DW_SEL_MASK I40E_MASK(0xFF, I40E_TCB_WAIT_CMD_BUF_DBG_CTL_DW_SEL_SHIFT)
+#define I40E_TCB_WAIT_CMD_BUF_DBG_CTL_RD_EN_SHIFT 30
+#define I40E_TCB_WAIT_CMD_BUF_DBG_CTL_RD_EN_MASK I40E_MASK(0x1, I40E_TCB_WAIT_CMD_BUF_DBG_CTL_RD_EN_SHIFT)
+#define I40E_TCB_WAIT_CMD_BUF_DBG_CTL_DONE_SHIFT 31
+#define I40E_TCB_WAIT_CMD_BUF_DBG_CTL_DONE_MASK I40E_MASK(0x1, I40E_TCB_WAIT_CMD_BUF_DBG_CTL_DONE_SHIFT)
+
+#define I40E_TCB_WAIT_CMD_BUF_DBG_DATA 0x000AE0C4 /* Reset: CORER */
+#define I40E_TCB_WAIT_CMD_BUF_DBG_DATA_RD_DW_SHIFT 0
+#define I40E_TCB_WAIT_CMD_BUF_DBG_DATA_RD_DW_MASK I40E_MASK(0xFFFFFFFF, I40E_TCB_WAIT_CMD_BUF_DBG_DATA_RD_DW_SHIFT)
+
+#define I40E_TCB_WAIT_CMD_MNG_DBG_CTL 0x000AE0B0 /* Reset: CORER */
+#define I40E_TCB_WAIT_CMD_MNG_DBG_CTL_ADR_SHIFT 0
+#define I40E_TCB_WAIT_CMD_MNG_DBG_CTL_ADR_MASK I40E_MASK(0x3FFFF, I40E_TCB_WAIT_CMD_MNG_DBG_CTL_ADR_SHIFT)
+#define I40E_TCB_WAIT_CMD_MNG_DBG_CTL_DW_SEL_SHIFT 18
+#define I40E_TCB_WAIT_CMD_MNG_DBG_CTL_DW_SEL_MASK I40E_MASK(0xFF, I40E_TCB_WAIT_CMD_MNG_DBG_CTL_DW_SEL_SHIFT)
+#define I40E_TCB_WAIT_CMD_MNG_DBG_CTL_RD_EN_SHIFT 30
+#define I40E_TCB_WAIT_CMD_MNG_DBG_CTL_RD_EN_MASK I40E_MASK(0x1, I40E_TCB_WAIT_CMD_MNG_DBG_CTL_RD_EN_SHIFT)
+#define I40E_TCB_WAIT_CMD_MNG_DBG_CTL_DONE_SHIFT 31
+#define I40E_TCB_WAIT_CMD_MNG_DBG_CTL_DONE_MASK I40E_MASK(0x1, I40E_TCB_WAIT_CMD_MNG_DBG_CTL_DONE_SHIFT)
+
+#define I40E_TCB_WAIT_CMD_MNG_DBG_DATA 0x000AE0C8 /* Reset: CORER */
+#define I40E_TCB_WAIT_CMD_MNG_DBG_DATA_RD_DW_SHIFT 0
+#define I40E_TCB_WAIT_CMD_MNG_DBG_DATA_RD_DW_MASK I40E_MASK(0xFFFFFFFF, I40E_TCB_WAIT_CMD_MNG_DBG_DATA_RD_DW_SHIFT)
+
+#define I40E_TDPU_CMD_MUX_MEM_CFG 0x00044304 /* Reset: POR */
+#define I40E_TDPU_CMD_MUX_MEM_CFG_ECC_EN_SHIFT 0
+#define I40E_TDPU_CMD_MUX_MEM_CFG_ECC_EN_MASK I40E_MASK(0x1, I40E_TDPU_CMD_MUX_MEM_CFG_ECC_EN_SHIFT)
+#define I40E_TDPU_CMD_MUX_MEM_CFG_ECC_INVERT_1_SHIFT 1
+#define I40E_TDPU_CMD_MUX_MEM_CFG_ECC_INVERT_1_MASK I40E_MASK(0x1, I40E_TDPU_CMD_MUX_MEM_CFG_ECC_INVERT_1_SHIFT)
+#define I40E_TDPU_CMD_MUX_MEM_CFG_ECC_INVERT_2_SHIFT 2
+#define I40E_TDPU_CMD_MUX_MEM_CFG_ECC_INVERT_2_MASK I40E_MASK(0x1, I40E_TDPU_CMD_MUX_MEM_CFG_ECC_INVERT_2_SHIFT)
+#define I40E_TDPU_CMD_MUX_MEM_CFG_LS_FORCE_SHIFT 3
+#define I40E_TDPU_CMD_MUX_MEM_CFG_LS_FORCE_MASK I40E_MASK(0x1, I40E_TDPU_CMD_MUX_MEM_CFG_LS_FORCE_SHIFT)
+#define I40E_TDPU_CMD_MUX_MEM_CFG_LS_BYPASS_SHIFT 4
+#define I40E_TDPU_CMD_MUX_MEM_CFG_LS_BYPASS_MASK I40E_MASK(0x1, I40E_TDPU_CMD_MUX_MEM_CFG_LS_BYPASS_SHIFT)
+#define I40E_TDPU_CMD_MUX_MEM_CFG_MASK_INT_SHIFT 5
+#define I40E_TDPU_CMD_MUX_MEM_CFG_MASK_INT_MASK I40E_MASK(0x1, I40E_TDPU_CMD_MUX_MEM_CFG_MASK_INT_SHIFT)
+#define I40E_TDPU_CMD_MUX_MEM_CFG_FIX_CNT_SHIFT 8
+#define I40E_TDPU_CMD_MUX_MEM_CFG_FIX_CNT_MASK I40E_MASK(0x1, I40E_TDPU_CMD_MUX_MEM_CFG_FIX_CNT_SHIFT)
+#define I40E_TDPU_CMD_MUX_MEM_CFG_ERR_CNT_SHIFT 9
+#define I40E_TDPU_CMD_MUX_MEM_CFG_ERR_CNT_MASK I40E_MASK(0x1, I40E_TDPU_CMD_MUX_MEM_CFG_ERR_CNT_SHIFT)
+#define I40E_TDPU_CMD_MUX_MEM_CFG_RME_SHIFT 12
+#define I40E_TDPU_CMD_MUX_MEM_CFG_RME_MASK I40E_MASK(0x1, I40E_TDPU_CMD_MUX_MEM_CFG_RME_SHIFT)
+#define I40E_TDPU_CMD_MUX_MEM_CFG_RM_SHIFT 16
+#define I40E_TDPU_CMD_MUX_MEM_CFG_RM_MASK I40E_MASK(0xF, I40E_TDPU_CMD_MUX_MEM_CFG_RM_SHIFT)
+
+#define I40E_TDPU_CMD_MUX_MEM_STATUS 0x00044330 /* Reset: POR */
+#define I40E_TDPU_CMD_MUX_MEM_STATUS_ECC_ERR_SHIFT 0
+#define I40E_TDPU_CMD_MUX_MEM_STATUS_ECC_ERR_MASK I40E_MASK(0x1, I40E_TDPU_CMD_MUX_MEM_STATUS_ECC_ERR_SHIFT)
+#define I40E_TDPU_CMD_MUX_MEM_STATUS_ECC_FIX_SHIFT 1
+#define I40E_TDPU_CMD_MUX_MEM_STATUS_ECC_FIX_MASK I40E_MASK(0x1, I40E_TDPU_CMD_MUX_MEM_STATUS_ECC_FIX_SHIFT)
+#define I40E_TDPU_CMD_MUX_MEM_STATUS_INIT_DONE_SHIFT 2
+#define I40E_TDPU_CMD_MUX_MEM_STATUS_INIT_DONE_MASK I40E_MASK(0x1, I40E_TDPU_CMD_MUX_MEM_STATUS_INIT_DONE_SHIFT)
+#define I40E_TDPU_CMD_MUX_MEM_STATUS_GLOBAL_INIT_DONE_SHIFT 3
+#define I40E_TDPU_CMD_MUX_MEM_STATUS_GLOBAL_INIT_DONE_MASK I40E_MASK(0x1, I40E_TDPU_CMD_MUX_MEM_STATUS_GLOBAL_INIT_DONE_SHIFT)
+
+#define I40E_TDPU_DAC_MEM_CFG 0x00044310 /* Reset: POR */
+#define I40E_TDPU_DAC_MEM_CFG_ECC_EN_SHIFT 0
+#define I40E_TDPU_DAC_MEM_CFG_ECC_EN_MASK I40E_MASK(0x1, I40E_TDPU_DAC_MEM_CFG_ECC_EN_SHIFT)
+#define I40E_TDPU_DAC_MEM_CFG_ECC_INVERT_1_SHIFT 1
+#define I40E_TDPU_DAC_MEM_CFG_ECC_INVERT_1_MASK I40E_MASK(0x1, I40E_TDPU_DAC_MEM_CFG_ECC_INVERT_1_SHIFT)
+#define I40E_TDPU_DAC_MEM_CFG_ECC_INVERT_2_SHIFT 2
+#define I40E_TDPU_DAC_MEM_CFG_ECC_INVERT_2_MASK I40E_MASK(0x1, I40E_TDPU_DAC_MEM_CFG_ECC_INVERT_2_SHIFT)
+#define I40E_TDPU_DAC_MEM_CFG_LS_FORCE_SHIFT 3
+#define I40E_TDPU_DAC_MEM_CFG_LS_FORCE_MASK I40E_MASK(0x1, I40E_TDPU_DAC_MEM_CFG_LS_FORCE_SHIFT)
+#define I40E_TDPU_DAC_MEM_CFG_LS_BYPASS_SHIFT 4
+#define I40E_TDPU_DAC_MEM_CFG_LS_BYPASS_MASK I40E_MASK(0x1, I40E_TDPU_DAC_MEM_CFG_LS_BYPASS_SHIFT)
+#define I40E_TDPU_DAC_MEM_CFG_MASK_INT_SHIFT 5
+#define I40E_TDPU_DAC_MEM_CFG_MASK_INT_MASK I40E_MASK(0x1, I40E_TDPU_DAC_MEM_CFG_MASK_INT_SHIFT)
+#define I40E_TDPU_DAC_MEM_CFG_FIX_CNT_SHIFT 8
+#define I40E_TDPU_DAC_MEM_CFG_FIX_CNT_MASK I40E_MASK(0x1, I40E_TDPU_DAC_MEM_CFG_FIX_CNT_SHIFT)
+#define I40E_TDPU_DAC_MEM_CFG_ERR_CNT_SHIFT 9
+#define I40E_TDPU_DAC_MEM_CFG_ERR_CNT_MASK I40E_MASK(0x1, I40E_TDPU_DAC_MEM_CFG_ERR_CNT_SHIFT)
+#define I40E_TDPU_DAC_MEM_CFG_RME_SHIFT 12
+#define I40E_TDPU_DAC_MEM_CFG_RME_MASK I40E_MASK(0x1, I40E_TDPU_DAC_MEM_CFG_RME_SHIFT)
+#define I40E_TDPU_DAC_MEM_CFG_RM_SHIFT 16
+#define I40E_TDPU_DAC_MEM_CFG_RM_MASK I40E_MASK(0xF, I40E_TDPU_DAC_MEM_CFG_RM_SHIFT)
+
+#define I40E_TDPU_DAC_MEM_STATUS 0x00044328 /* Reset: POR */
+#define I40E_TDPU_DAC_MEM_STATUS_ECC_ERR_SHIFT 0
+#define I40E_TDPU_DAC_MEM_STATUS_ECC_ERR_MASK I40E_MASK(0x1, I40E_TDPU_DAC_MEM_STATUS_ECC_ERR_SHIFT)
+#define I40E_TDPU_DAC_MEM_STATUS_ECC_FIX_SHIFT 1
+#define I40E_TDPU_DAC_MEM_STATUS_ECC_FIX_MASK I40E_MASK(0x1, I40E_TDPU_DAC_MEM_STATUS_ECC_FIX_SHIFT)
+#define I40E_TDPU_DAC_MEM_STATUS_INIT_DONE_SHIFT 2
+#define I40E_TDPU_DAC_MEM_STATUS_INIT_DONE_MASK I40E_MASK(0x1, I40E_TDPU_DAC_MEM_STATUS_INIT_DONE_SHIFT)
+#define I40E_TDPU_DAC_MEM_STATUS_GLOBAL_INIT_DONE_SHIFT 3
+#define I40E_TDPU_DAC_MEM_STATUS_GLOBAL_INIT_DONE_MASK I40E_MASK(0x1, I40E_TDPU_DAC_MEM_STATUS_GLOBAL_INIT_DONE_SHIFT)
+
+#define I40E_TDPU_DAC_MNG_MEM_CFG 0x0004430C /* Reset: POR */
+#define I40E_TDPU_DAC_MNG_MEM_CFG_ECC_EN_SHIFT 0
+#define I40E_TDPU_DAC_MNG_MEM_CFG_ECC_EN_MASK I40E_MASK(0x1, I40E_TDPU_DAC_MNG_MEM_CFG_ECC_EN_SHIFT)
+#define I40E_TDPU_DAC_MNG_MEM_CFG_ECC_INVERT_1_SHIFT 1
+#define I40E_TDPU_DAC_MNG_MEM_CFG_ECC_INVERT_1_MASK I40E_MASK(0x1, I40E_TDPU_DAC_MNG_MEM_CFG_ECC_INVERT_1_SHIFT)
+#define I40E_TDPU_DAC_MNG_MEM_CFG_ECC_INVERT_2_SHIFT 2
+#define I40E_TDPU_DAC_MNG_MEM_CFG_ECC_INVERT_2_MASK I40E_MASK(0x1, I40E_TDPU_DAC_MNG_MEM_CFG_ECC_INVERT_2_SHIFT)
+#define I40E_TDPU_DAC_MNG_MEM_CFG_LS_FORCE_SHIFT 3
+#define I40E_TDPU_DAC_MNG_MEM_CFG_LS_FORCE_MASK I40E_MASK(0x1, I40E_TDPU_DAC_MNG_MEM_CFG_LS_FORCE_SHIFT)
+#define I40E_TDPU_DAC_MNG_MEM_CFG_LS_BYPASS_SHIFT 4
+#define I40E_TDPU_DAC_MNG_MEM_CFG_LS_BYPASS_MASK I40E_MASK(0x1, I40E_TDPU_DAC_MNG_MEM_CFG_LS_BYPASS_SHIFT)
+#define I40E_TDPU_DAC_MNG_MEM_CFG_MASK_INT_SHIFT 5
+#define I40E_TDPU_DAC_MNG_MEM_CFG_MASK_INT_MASK I40E_MASK(0x1, I40E_TDPU_DAC_MNG_MEM_CFG_MASK_INT_SHIFT)
+#define I40E_TDPU_DAC_MNG_MEM_CFG_FIX_CNT_SHIFT 8
+#define I40E_TDPU_DAC_MNG_MEM_CFG_FIX_CNT_MASK I40E_MASK(0x1, I40E_TDPU_DAC_MNG_MEM_CFG_FIX_CNT_SHIFT)
+#define I40E_TDPU_DAC_MNG_MEM_CFG_ERR_CNT_SHIFT 9
+#define I40E_TDPU_DAC_MNG_MEM_CFG_ERR_CNT_MASK I40E_MASK(0x1, I40E_TDPU_DAC_MNG_MEM_CFG_ERR_CNT_SHIFT)
+#define I40E_TDPU_DAC_MNG_MEM_CFG_RME_SHIFT 12
+#define I40E_TDPU_DAC_MNG_MEM_CFG_RME_MASK I40E_MASK(0x1, I40E_TDPU_DAC_MNG_MEM_CFG_RME_SHIFT)
+#define I40E_TDPU_DAC_MNG_MEM_CFG_RM_SHIFT 16
+#define I40E_TDPU_DAC_MNG_MEM_CFG_RM_MASK I40E_MASK(0xF, I40E_TDPU_DAC_MNG_MEM_CFG_RM_SHIFT)
+
+#define I40E_TDPU_DAC_MNG_MEM_STATUS 0x0004432C /* Reset: POR */
+#define I40E_TDPU_DAC_MNG_MEM_STATUS_ECC_ERR_SHIFT 0
+#define I40E_TDPU_DAC_MNG_MEM_STATUS_ECC_ERR_MASK I40E_MASK(0x1, I40E_TDPU_DAC_MNG_MEM_STATUS_ECC_ERR_SHIFT)
+#define I40E_TDPU_DAC_MNG_MEM_STATUS_ECC_FIX_SHIFT 1
+#define I40E_TDPU_DAC_MNG_MEM_STATUS_ECC_FIX_MASK I40E_MASK(0x1, I40E_TDPU_DAC_MNG_MEM_STATUS_ECC_FIX_SHIFT)
+#define I40E_TDPU_DAC_MNG_MEM_STATUS_INIT_DONE_SHIFT 2
+#define I40E_TDPU_DAC_MNG_MEM_STATUS_INIT_DONE_MASK I40E_MASK(0x1, I40E_TDPU_DAC_MNG_MEM_STATUS_INIT_DONE_SHIFT)
+#define I40E_TDPU_DAC_MNG_MEM_STATUS_GLOBAL_INIT_DONE_SHIFT 3
+#define I40E_TDPU_DAC_MNG_MEM_STATUS_GLOBAL_INIT_DONE_MASK I40E_MASK(0x1, I40E_TDPU_DAC_MNG_MEM_STATUS_GLOBAL_INIT_DONE_SHIFT)
+
+#define I40E_TDPU_ECC_COR_ERR 0x0004433C /* Reset: POR */
+#define I40E_TDPU_ECC_COR_ERR_CNT_SHIFT 0
+#define I40E_TDPU_ECC_COR_ERR_CNT_MASK I40E_MASK(0xFFF, I40E_TDPU_ECC_COR_ERR_CNT_SHIFT)
+
+#define I40E_TDPU_ECC_UNCOR_ERR 0x00044338 /* Reset: POR */
+#define I40E_TDPU_ECC_UNCOR_ERR_CNT_SHIFT 0
+#define I40E_TDPU_ECC_UNCOR_ERR_CNT_MASK I40E_MASK(0xFFF, I40E_TDPU_ECC_UNCOR_ERR_CNT_SHIFT)
+
+#define I40E_TDPU_IMEM_CFG 0x000442F8 /* Reset: POR */
+#define I40E_TDPU_IMEM_CFG_ECC_EN_SHIFT 0
+#define I40E_TDPU_IMEM_CFG_ECC_EN_MASK I40E_MASK(0x1, I40E_TDPU_IMEM_CFG_ECC_EN_SHIFT)
+#define I40E_TDPU_IMEM_CFG_ECC_INVERT_1_SHIFT 1
+#define I40E_TDPU_IMEM_CFG_ECC_INVERT_1_MASK I40E_MASK(0x1, I40E_TDPU_IMEM_CFG_ECC_INVERT_1_SHIFT)
+#define I40E_TDPU_IMEM_CFG_ECC_INVERT_2_SHIFT 2
+#define I40E_TDPU_IMEM_CFG_ECC_INVERT_2_MASK I40E_MASK(0x1, I40E_TDPU_IMEM_CFG_ECC_INVERT_2_SHIFT)
+#define I40E_TDPU_IMEM_CFG_LS_FORCE_SHIFT 3
+#define I40E_TDPU_IMEM_CFG_LS_FORCE_MASK I40E_MASK(0x1, I40E_TDPU_IMEM_CFG_LS_FORCE_SHIFT)
+#define I40E_TDPU_IMEM_CFG_LS_BYPASS_SHIFT 4
+#define I40E_TDPU_IMEM_CFG_LS_BYPASS_MASK I40E_MASK(0x1, I40E_TDPU_IMEM_CFG_LS_BYPASS_SHIFT)
+#define I40E_TDPU_IMEM_CFG_MASK_INT_SHIFT 5
+#define I40E_TDPU_IMEM_CFG_MASK_INT_MASK I40E_MASK(0x1, I40E_TDPU_IMEM_CFG_MASK_INT_SHIFT)
+#define I40E_TDPU_IMEM_CFG_FIX_CNT_SHIFT 8
+#define I40E_TDPU_IMEM_CFG_FIX_CNT_MASK I40E_MASK(0x1, I40E_TDPU_IMEM_CFG_FIX_CNT_SHIFT)
+#define I40E_TDPU_IMEM_CFG_ERR_CNT_SHIFT 9
+#define I40E_TDPU_IMEM_CFG_ERR_CNT_MASK I40E_MASK(0x1, I40E_TDPU_IMEM_CFG_ERR_CNT_SHIFT)
+#define I40E_TDPU_IMEM_CFG_RME_SHIFT 12
+#define I40E_TDPU_IMEM_CFG_RME_MASK I40E_MASK(0x1, I40E_TDPU_IMEM_CFG_RME_SHIFT)
+#define I40E_TDPU_IMEM_CFG_RM_SHIFT 16
+#define I40E_TDPU_IMEM_CFG_RM_MASK I40E_MASK(0xF, I40E_TDPU_IMEM_CFG_RM_SHIFT)
+
+#define I40E_TDPU_IMEM_STATUS 0x00044318 /* Reset: POR */
+#define I40E_TDPU_IMEM_STATUS_ECC_ERR_SHIFT 0
+#define I40E_TDPU_IMEM_STATUS_ECC_ERR_MASK I40E_MASK(0x1, I40E_TDPU_IMEM_STATUS_ECC_ERR_SHIFT)
+#define I40E_TDPU_IMEM_STATUS_ECC_FIX_SHIFT 1
+#define I40E_TDPU_IMEM_STATUS_ECC_FIX_MASK I40E_MASK(0x1, I40E_TDPU_IMEM_STATUS_ECC_FIX_SHIFT)
+#define I40E_TDPU_IMEM_STATUS_INIT_DONE_SHIFT 2
+#define I40E_TDPU_IMEM_STATUS_INIT_DONE_MASK I40E_MASK(0x1, I40E_TDPU_IMEM_STATUS_INIT_DONE_SHIFT)
+#define I40E_TDPU_IMEM_STATUS_GLOBAL_INIT_DONE_SHIFT 3
+#define I40E_TDPU_IMEM_STATUS_GLOBAL_INIT_DONE_MASK I40E_MASK(0x1, I40E_TDPU_IMEM_STATUS_GLOBAL_INIT_DONE_SHIFT)
+
+#define I40E_TDPU_RECIPE_ADDR_CFG 0x000442FC /* Reset: POR */
+#define I40E_TDPU_RECIPE_ADDR_CFG_ECC_EN_SHIFT 0
+#define I40E_TDPU_RECIPE_ADDR_CFG_ECC_EN_MASK I40E_MASK(0x1, I40E_TDPU_RECIPE_ADDR_CFG_ECC_EN_SHIFT)
+#define I40E_TDPU_RECIPE_ADDR_CFG_ECC_INVERT_1_SHIFT 1
+#define I40E_TDPU_RECIPE_ADDR_CFG_ECC_INVERT_1_MASK I40E_MASK(0x1, I40E_TDPU_RECIPE_ADDR_CFG_ECC_INVERT_1_SHIFT)
+#define I40E_TDPU_RECIPE_ADDR_CFG_ECC_INVERT_2_SHIFT 2
+#define I40E_TDPU_RECIPE_ADDR_CFG_ECC_INVERT_2_MASK I40E_MASK(0x1, I40E_TDPU_RECIPE_ADDR_CFG_ECC_INVERT_2_SHIFT)
+#define I40E_TDPU_RECIPE_ADDR_CFG_LS_FORCE_SHIFT 3
+#define I40E_TDPU_RECIPE_ADDR_CFG_LS_FORCE_MASK I40E_MASK(0x1, I40E_TDPU_RECIPE_ADDR_CFG_LS_FORCE_SHIFT)
+#define I40E_TDPU_RECIPE_ADDR_CFG_LS_BYPASS_SHIFT 4
+#define I40E_TDPU_RECIPE_ADDR_CFG_LS_BYPASS_MASK I40E_MASK(0x1, I40E_TDPU_RECIPE_ADDR_CFG_LS_BYPASS_SHIFT)
+#define I40E_TDPU_RECIPE_ADDR_CFG_MASK_INT_SHIFT 5
+#define I40E_TDPU_RECIPE_ADDR_CFG_MASK_INT_MASK I40E_MASK(0x1, I40E_TDPU_RECIPE_ADDR_CFG_MASK_INT_SHIFT)
+#define I40E_TDPU_RECIPE_ADDR_CFG_FIX_CNT_SHIFT 8
+#define I40E_TDPU_RECIPE_ADDR_CFG_FIX_CNT_MASK I40E_MASK(0x1, I40E_TDPU_RECIPE_ADDR_CFG_FIX_CNT_SHIFT)
+#define I40E_TDPU_RECIPE_ADDR_CFG_ERR_CNT_SHIFT 9
+#define I40E_TDPU_RECIPE_ADDR_CFG_ERR_CNT_MASK I40E_MASK(0x1, I40E_TDPU_RECIPE_ADDR_CFG_ERR_CNT_SHIFT)
+#define I40E_TDPU_RECIPE_ADDR_CFG_RME_SHIFT 12
+#define I40E_TDPU_RECIPE_ADDR_CFG_RME_MASK I40E_MASK(0x1, I40E_TDPU_RECIPE_ADDR_CFG_RME_SHIFT)
+#define I40E_TDPU_RECIPE_ADDR_CFG_RM_SHIFT 16
+#define I40E_TDPU_RECIPE_ADDR_CFG_RM_MASK I40E_MASK(0xF, I40E_TDPU_RECIPE_ADDR_CFG_RM_SHIFT)
+
+#define I40E_TDPU_RECIPE_ADDR_STATUS 0x0004431C /* Reset: POR */
+#define I40E_TDPU_RECIPE_ADDR_STATUS_ECC_ERR_SHIFT 0
+#define I40E_TDPU_RECIPE_ADDR_STATUS_ECC_ERR_MASK I40E_MASK(0x1, I40E_TDPU_RECIPE_ADDR_STATUS_ECC_ERR_SHIFT)
+#define I40E_TDPU_RECIPE_ADDR_STATUS_ECC_FIX_SHIFT 1
+#define I40E_TDPU_RECIPE_ADDR_STATUS_ECC_FIX_MASK I40E_MASK(0x1, I40E_TDPU_RECIPE_ADDR_STATUS_ECC_FIX_SHIFT)
+#define I40E_TDPU_RECIPE_ADDR_STATUS_INIT_DONE_SHIFT 2
+#define I40E_TDPU_RECIPE_ADDR_STATUS_INIT_DONE_MASK I40E_MASK(0x1, I40E_TDPU_RECIPE_ADDR_STATUS_INIT_DONE_SHIFT)
+#define I40E_TDPU_RECIPE_ADDR_STATUS_GLOBAL_INIT_DONE_SHIFT 3
+#define I40E_TDPU_RECIPE_ADDR_STATUS_GLOBAL_INIT_DONE_MASK I40E_MASK(0x1, I40E_TDPU_RECIPE_ADDR_STATUS_GLOBAL_INIT_DONE_SHIFT)
+
+#define I40E_TDPU_TDRD_MEM_CFG 0x00044314 /* Reset: POR */
+#define I40E_TDPU_TDRD_MEM_CFG_ECC_EN_SHIFT 0
+#define I40E_TDPU_TDRD_MEM_CFG_ECC_EN_MASK I40E_MASK(0x1, I40E_TDPU_TDRD_MEM_CFG_ECC_EN_SHIFT)
+#define I40E_TDPU_TDRD_MEM_CFG_ECC_INVERT_1_SHIFT 1
+#define I40E_TDPU_TDRD_MEM_CFG_ECC_INVERT_1_MASK I40E_MASK(0x1, I40E_TDPU_TDRD_MEM_CFG_ECC_INVERT_1_SHIFT)
+#define I40E_TDPU_TDRD_MEM_CFG_ECC_INVERT_2_SHIFT 2
+#define I40E_TDPU_TDRD_MEM_CFG_ECC_INVERT_2_MASK I40E_MASK(0x1, I40E_TDPU_TDRD_MEM_CFG_ECC_INVERT_2_SHIFT)
+#define I40E_TDPU_TDRD_MEM_CFG_LS_FORCE_SHIFT 3
+#define I40E_TDPU_TDRD_MEM_CFG_LS_FORCE_MASK I40E_MASK(0x1, I40E_TDPU_TDRD_MEM_CFG_LS_FORCE_SHIFT)
+#define I40E_TDPU_TDRD_MEM_CFG_LS_BYPASS_SHIFT 4
+#define I40E_TDPU_TDRD_MEM_CFG_LS_BYPASS_MASK I40E_MASK(0x1, I40E_TDPU_TDRD_MEM_CFG_LS_BYPASS_SHIFT)
+#define I40E_TDPU_TDRD_MEM_CFG_MASK_INT_SHIFT 5
+#define I40E_TDPU_TDRD_MEM_CFG_MASK_INT_MASK I40E_MASK(0x1, I40E_TDPU_TDRD_MEM_CFG_MASK_INT_SHIFT)
+#define I40E_TDPU_TDRD_MEM_CFG_FIX_CNT_SHIFT 8
+#define I40E_TDPU_TDRD_MEM_CFG_FIX_CNT_MASK I40E_MASK(0x1, I40E_TDPU_TDRD_MEM_CFG_FIX_CNT_SHIFT)
+#define I40E_TDPU_TDRD_MEM_CFG_ERR_CNT_SHIFT 9
+#define I40E_TDPU_TDRD_MEM_CFG_ERR_CNT_MASK I40E_MASK(0x1, I40E_TDPU_TDRD_MEM_CFG_ERR_CNT_SHIFT)
+#define I40E_TDPU_TDRD_MEM_CFG_RME_SHIFT 12
+#define I40E_TDPU_TDRD_MEM_CFG_RME_MASK I40E_MASK(0x1, I40E_TDPU_TDRD_MEM_CFG_RME_SHIFT)
+#define I40E_TDPU_TDRD_MEM_CFG_RM_SHIFT 16
+#define I40E_TDPU_TDRD_MEM_CFG_RM_MASK I40E_MASK(0xF, I40E_TDPU_TDRD_MEM_CFG_RM_SHIFT)
+
+#define I40E_TDPU_TDRD_MEM_STATUS 0x00044324 /* Reset: POR */
+#define I40E_TDPU_TDRD_MEM_STATUS_ECC_ERR_SHIFT 0
+#define I40E_TDPU_TDRD_MEM_STATUS_ECC_ERR_MASK I40E_MASK(0x1, I40E_TDPU_TDRD_MEM_STATUS_ECC_ERR_SHIFT)
+#define I40E_TDPU_TDRD_MEM_STATUS_ECC_FIX_SHIFT 1
+#define I40E_TDPU_TDRD_MEM_STATUS_ECC_FIX_MASK I40E_MASK(0x1, I40E_TDPU_TDRD_MEM_STATUS_ECC_FIX_SHIFT)
+#define I40E_TDPU_TDRD_MEM_STATUS_INIT_DONE_SHIFT 2
+#define I40E_TDPU_TDRD_MEM_STATUS_INIT_DONE_MASK I40E_MASK(0x1, I40E_TDPU_TDRD_MEM_STATUS_INIT_DONE_SHIFT)
+#define I40E_TDPU_TDRD_MEM_STATUS_GLOBAL_INIT_DONE_SHIFT 3
+#define I40E_TDPU_TDRD_MEM_STATUS_GLOBAL_INIT_DONE_MASK I40E_MASK(0x1, I40E_TDPU_TDRD_MEM_STATUS_GLOBAL_INIT_DONE_SHIFT)
+
+#define I40E_TDPU_TDWR_MEM_CFG 0x00044308 /* Reset: POR */
+#define I40E_TDPU_TDWR_MEM_CFG_ECC_EN_SHIFT 0
+#define I40E_TDPU_TDWR_MEM_CFG_ECC_EN_MASK I40E_MASK(0x1, I40E_TDPU_TDWR_MEM_CFG_ECC_EN_SHIFT)
+#define I40E_TDPU_TDWR_MEM_CFG_ECC_INVERT_1_SHIFT 1
+#define I40E_TDPU_TDWR_MEM_CFG_ECC_INVERT_1_MASK I40E_MASK(0x1, I40E_TDPU_TDWR_MEM_CFG_ECC_INVERT_1_SHIFT)
+#define I40E_TDPU_TDWR_MEM_CFG_ECC_INVERT_2_SHIFT 2
+#define I40E_TDPU_TDWR_MEM_CFG_ECC_INVERT_2_MASK I40E_MASK(0x1, I40E_TDPU_TDWR_MEM_CFG_ECC_INVERT_2_SHIFT)
+#define I40E_TDPU_TDWR_MEM_CFG_LS_FORCE_SHIFT 3
+#define I40E_TDPU_TDWR_MEM_CFG_LS_FORCE_MASK I40E_MASK(0x1, I40E_TDPU_TDWR_MEM_CFG_LS_FORCE_SHIFT)
+#define I40E_TDPU_TDWR_MEM_CFG_LS_BYPASS_SHIFT 4
+#define I40E_TDPU_TDWR_MEM_CFG_LS_BYPASS_MASK I40E_MASK(0x1, I40E_TDPU_TDWR_MEM_CFG_LS_BYPASS_SHIFT)
+#define I40E_TDPU_TDWR_MEM_CFG_MASK_INT_SHIFT 5
+#define I40E_TDPU_TDWR_MEM_CFG_MASK_INT_MASK I40E_MASK(0x1, I40E_TDPU_TDWR_MEM_CFG_MASK_INT_SHIFT)
+#define I40E_TDPU_TDWR_MEM_CFG_FIX_CNT_SHIFT 8
+#define I40E_TDPU_TDWR_MEM_CFG_FIX_CNT_MASK I40E_MASK(0x1, I40E_TDPU_TDWR_MEM_CFG_FIX_CNT_SHIFT)
+#define I40E_TDPU_TDWR_MEM_CFG_ERR_CNT_SHIFT 9
+#define I40E_TDPU_TDWR_MEM_CFG_ERR_CNT_MASK I40E_MASK(0x1, I40E_TDPU_TDWR_MEM_CFG_ERR_CNT_SHIFT)
+#define I40E_TDPU_TDWR_MEM_CFG_RME_SHIFT 12
+#define I40E_TDPU_TDWR_MEM_CFG_RME_MASK I40E_MASK(0x1, I40E_TDPU_TDWR_MEM_CFG_RME_SHIFT)
+#define I40E_TDPU_TDWR_MEM_CFG_RM_SHIFT 16
+#define I40E_TDPU_TDWR_MEM_CFG_RM_MASK I40E_MASK(0xF, I40E_TDPU_TDWR_MEM_CFG_RM_SHIFT)
+
+#define I40E_TDPU_TDWR_MEM_STATUS 0x00044334 /* Reset: POR */
+#define I40E_TDPU_TDWR_MEM_STATUS_ECC_ERR_SHIFT 0
+#define I40E_TDPU_TDWR_MEM_STATUS_ECC_ERR_MASK I40E_MASK(0x1, I40E_TDPU_TDWR_MEM_STATUS_ECC_ERR_SHIFT)
+#define I40E_TDPU_TDWR_MEM_STATUS_ECC_FIX_SHIFT 1
+#define I40E_TDPU_TDWR_MEM_STATUS_ECC_FIX_MASK I40E_MASK(0x1, I40E_TDPU_TDWR_MEM_STATUS_ECC_FIX_SHIFT)
+#define I40E_TDPU_TDWR_MEM_STATUS_INIT_DONE_SHIFT 2
+#define I40E_TDPU_TDWR_MEM_STATUS_INIT_DONE_MASK I40E_MASK(0x1, I40E_TDPU_TDWR_MEM_STATUS_INIT_DONE_SHIFT)
+#define I40E_TDPU_TDWR_MEM_STATUS_GLOBAL_INIT_DONE_SHIFT 3
+#define I40E_TDPU_TDWR_MEM_STATUS_GLOBAL_INIT_DONE_MASK I40E_MASK(0x1, I40E_TDPU_TDWR_MEM_STATUS_GLOBAL_INIT_DONE_SHIFT)
+
+#define I40E_TDPU_VSI_LY2_INSERT_MEM_CFG 0x00044300 /* Reset: POR */
+#define I40E_TDPU_VSI_LY2_INSERT_MEM_CFG_ECC_EN_SHIFT 0
+#define I40E_TDPU_VSI_LY2_INSERT_MEM_CFG_ECC_EN_MASK I40E_MASK(0x1, I40E_TDPU_VSI_LY2_INSERT_MEM_CFG_ECC_EN_SHIFT)
+#define I40E_TDPU_VSI_LY2_INSERT_MEM_CFG_ECC_INVERT_1_SHIFT 1
+#define I40E_TDPU_VSI_LY2_INSERT_MEM_CFG_ECC_INVERT_1_MASK I40E_MASK(0x1, I40E_TDPU_VSI_LY2_INSERT_MEM_CFG_ECC_INVERT_1_SHIFT)
+#define I40E_TDPU_VSI_LY2_INSERT_MEM_CFG_ECC_INVERT_2_SHIFT 2
+#define I40E_TDPU_VSI_LY2_INSERT_MEM_CFG_ECC_INVERT_2_MASK I40E_MASK(0x1, I40E_TDPU_VSI_LY2_INSERT_MEM_CFG_ECC_INVERT_2_SHIFT)
+#define I40E_TDPU_VSI_LY2_INSERT_MEM_CFG_LS_FORCE_SHIFT 3
+#define I40E_TDPU_VSI_LY2_INSERT_MEM_CFG_LS_FORCE_MASK I40E_MASK(0x1, I40E_TDPU_VSI_LY2_INSERT_MEM_CFG_LS_FORCE_SHIFT)
+#define I40E_TDPU_VSI_LY2_INSERT_MEM_CFG_LS_BYPASS_SHIFT 4
+#define I40E_TDPU_VSI_LY2_INSERT_MEM_CFG_LS_BYPASS_MASK I40E_MASK(0x1, I40E_TDPU_VSI_LY2_INSERT_MEM_CFG_LS_BYPASS_SHIFT)
+#define I40E_TDPU_VSI_LY2_INSERT_MEM_CFG_MASK_INT_SHIFT 5
+#define I40E_TDPU_VSI_LY2_INSERT_MEM_CFG_MASK_INT_MASK I40E_MASK(0x1, I40E_TDPU_VSI_LY2_INSERT_MEM_CFG_MASK_INT_SHIFT)
+#define I40E_TDPU_VSI_LY2_INSERT_MEM_CFG_FIX_CNT_SHIFT 8
+#define I40E_TDPU_VSI_LY2_INSERT_MEM_CFG_FIX_CNT_MASK I40E_MASK(0x1, I40E_TDPU_VSI_LY2_INSERT_MEM_CFG_FIX_CNT_SHIFT)
+#define I40E_TDPU_VSI_LY2_INSERT_MEM_CFG_ERR_CNT_SHIFT 9
+#define I40E_TDPU_VSI_LY2_INSERT_MEM_CFG_ERR_CNT_MASK I40E_MASK(0x1, I40E_TDPU_VSI_LY2_INSERT_MEM_CFG_ERR_CNT_SHIFT)
+#define I40E_TDPU_VSI_LY2_INSERT_MEM_CFG_RME_SHIFT 12
+#define I40E_TDPU_VSI_LY2_INSERT_MEM_CFG_RME_MASK I40E_MASK(0x1, I40E_TDPU_VSI_LY2_INSERT_MEM_CFG_RME_SHIFT)
+#define I40E_TDPU_VSI_LY2_INSERT_MEM_CFG_RM_SHIFT 16
+#define I40E_TDPU_VSI_LY2_INSERT_MEM_CFG_RM_MASK I40E_MASK(0xF, I40E_TDPU_VSI_LY2_INSERT_MEM_CFG_RM_SHIFT)
+
+#define I40E_TDPU_VSI_LY2_INSERT_MEM_STATUS 0x00044320 /* Reset: POR */
+#define I40E_TDPU_VSI_LY2_INSERT_MEM_STATUS_ECC_ERR_SHIFT 0
+#define I40E_TDPU_VSI_LY2_INSERT_MEM_STATUS_ECC_ERR_MASK I40E_MASK(0x1, I40E_TDPU_VSI_LY2_INSERT_MEM_STATUS_ECC_ERR_SHIFT)
+#define I40E_TDPU_VSI_LY2_INSERT_MEM_STATUS_ECC_FIX_SHIFT 1
+#define I40E_TDPU_VSI_LY2_INSERT_MEM_STATUS_ECC_FIX_MASK I40E_MASK(0x1, I40E_TDPU_VSI_LY2_INSERT_MEM_STATUS_ECC_FIX_SHIFT)
+#define I40E_TDPU_VSI_LY2_INSERT_MEM_STATUS_INIT_DONE_SHIFT 2
+#define I40E_TDPU_VSI_LY2_INSERT_MEM_STATUS_INIT_DONE_MASK I40E_MASK(0x1, I40E_TDPU_VSI_LY2_INSERT_MEM_STATUS_INIT_DONE_SHIFT)
+#define I40E_TDPU_VSI_LY2_INSERT_MEM_STATUS_GLOBAL_INIT_DONE_SHIFT 3
+#define I40E_TDPU_VSI_LY2_INSERT_MEM_STATUS_GLOBAL_INIT_DONE_MASK I40E_MASK(0x1, I40E_TDPU_VSI_LY2_INSERT_MEM_STATUS_GLOBAL_INIT_DONE_SHIFT)
+
+#define I40E_TLAN_DEC_MEM_CFG 0x000E6490 /* Reset: POR */
+#define I40E_TLAN_DEC_MEM_CFG_ECC_EN_SHIFT 0
+#define I40E_TLAN_DEC_MEM_CFG_ECC_EN_MASK I40E_MASK(0x1, I40E_TLAN_DEC_MEM_CFG_ECC_EN_SHIFT)
+#define I40E_TLAN_DEC_MEM_CFG_ECC_INVERT_1_SHIFT 1
+#define I40E_TLAN_DEC_MEM_CFG_ECC_INVERT_1_MASK I40E_MASK(0x1, I40E_TLAN_DEC_MEM_CFG_ECC_INVERT_1_SHIFT)
+#define I40E_TLAN_DEC_MEM_CFG_ECC_INVERT_2_SHIFT 2
+#define I40E_TLAN_DEC_MEM_CFG_ECC_INVERT_2_MASK I40E_MASK(0x1, I40E_TLAN_DEC_MEM_CFG_ECC_INVERT_2_SHIFT)
+#define I40E_TLAN_DEC_MEM_CFG_LS_FORCE_SHIFT 3
+#define I40E_TLAN_DEC_MEM_CFG_LS_FORCE_MASK I40E_MASK(0x1, I40E_TLAN_DEC_MEM_CFG_LS_FORCE_SHIFT)
+#define I40E_TLAN_DEC_MEM_CFG_LS_BYPASS_SHIFT 4
+#define I40E_TLAN_DEC_MEM_CFG_LS_BYPASS_MASK I40E_MASK(0x1, I40E_TLAN_DEC_MEM_CFG_LS_BYPASS_SHIFT)
+#define I40E_TLAN_DEC_MEM_CFG_MASK_INT_SHIFT 5
+#define I40E_TLAN_DEC_MEM_CFG_MASK_INT_MASK I40E_MASK(0x1, I40E_TLAN_DEC_MEM_CFG_MASK_INT_SHIFT)
+#define I40E_TLAN_DEC_MEM_CFG_FIX_CNT_SHIFT 8
+#define I40E_TLAN_DEC_MEM_CFG_FIX_CNT_MASK I40E_MASK(0x1, I40E_TLAN_DEC_MEM_CFG_FIX_CNT_SHIFT)
+#define I40E_TLAN_DEC_MEM_CFG_ERR_CNT_SHIFT 9
+#define I40E_TLAN_DEC_MEM_CFG_ERR_CNT_MASK I40E_MASK(0x1, I40E_TLAN_DEC_MEM_CFG_ERR_CNT_SHIFT)
+#define I40E_TLAN_DEC_MEM_CFG_RME_SHIFT 12
+#define I40E_TLAN_DEC_MEM_CFG_RME_MASK I40E_MASK(0x1, I40E_TLAN_DEC_MEM_CFG_RME_SHIFT)
+#define I40E_TLAN_DEC_MEM_CFG_RM_SHIFT 16
+#define I40E_TLAN_DEC_MEM_CFG_RM_MASK I40E_MASK(0xF, I40E_TLAN_DEC_MEM_CFG_RM_SHIFT)
+
+#define I40E_TLAN_DEC_MEM_STATUS 0x000E6494 /* Reset: POR */
+#define I40E_TLAN_DEC_MEM_STATUS_ECC_ERR_SHIFT 0
+#define I40E_TLAN_DEC_MEM_STATUS_ECC_ERR_MASK I40E_MASK(0x1, I40E_TLAN_DEC_MEM_STATUS_ECC_ERR_SHIFT)
+#define I40E_TLAN_DEC_MEM_STATUS_ECC_FIX_SHIFT 1
+#define I40E_TLAN_DEC_MEM_STATUS_ECC_FIX_MASK I40E_MASK(0x1, I40E_TLAN_DEC_MEM_STATUS_ECC_FIX_SHIFT)
+#define I40E_TLAN_DEC_MEM_STATUS_INIT_DONE_SHIFT 2
+#define I40E_TLAN_DEC_MEM_STATUS_INIT_DONE_MASK I40E_MASK(0x1, I40E_TLAN_DEC_MEM_STATUS_INIT_DONE_SHIFT)
+#define I40E_TLAN_DEC_MEM_STATUS_GLOBAL_INIT_DONE_SHIFT 3
+#define I40E_TLAN_DEC_MEM_STATUS_GLOBAL_INIT_DONE_MASK I40E_MASK(0x1, I40E_TLAN_DEC_MEM_STATUS_GLOBAL_INIT_DONE_SHIFT)
+
+#define I40E_TLAN_DEC_MNG_MEM_CFG 0x000E64A0 /* Reset: POR */
+#define I40E_TLAN_DEC_MNG_MEM_CFG_ECC_EN_SHIFT 0
+#define I40E_TLAN_DEC_MNG_MEM_CFG_ECC_EN_MASK I40E_MASK(0x1, I40E_TLAN_DEC_MNG_MEM_CFG_ECC_EN_SHIFT)
+#define I40E_TLAN_DEC_MNG_MEM_CFG_ECC_INVERT_1_SHIFT 1
+#define I40E_TLAN_DEC_MNG_MEM_CFG_ECC_INVERT_1_MASK I40E_MASK(0x1, I40E_TLAN_DEC_MNG_MEM_CFG_ECC_INVERT_1_SHIFT)
+#define I40E_TLAN_DEC_MNG_MEM_CFG_ECC_INVERT_2_SHIFT 2
+#define I40E_TLAN_DEC_MNG_MEM_CFG_ECC_INVERT_2_MASK I40E_MASK(0x1, I40E_TLAN_DEC_MNG_MEM_CFG_ECC_INVERT_2_SHIFT)
+#define I40E_TLAN_DEC_MNG_MEM_CFG_LS_FORCE_SHIFT 3
+#define I40E_TLAN_DEC_MNG_MEM_CFG_LS_FORCE_MASK I40E_MASK(0x1, I40E_TLAN_DEC_MNG_MEM_CFG_LS_FORCE_SHIFT)
+#define I40E_TLAN_DEC_MNG_MEM_CFG_LS_BYPASS_SHIFT 4
+#define I40E_TLAN_DEC_MNG_MEM_CFG_LS_BYPASS_MASK I40E_MASK(0x1, I40E_TLAN_DEC_MNG_MEM_CFG_LS_BYPASS_SHIFT)
+#define I40E_TLAN_DEC_MNG_MEM_CFG_MASK_INT_SHIFT 5
+#define I40E_TLAN_DEC_MNG_MEM_CFG_MASK_INT_MASK I40E_MASK(0x1, I40E_TLAN_DEC_MNG_MEM_CFG_MASK_INT_SHIFT)
+#define I40E_TLAN_DEC_MNG_MEM_CFG_FIX_CNT_SHIFT 8
+#define I40E_TLAN_DEC_MNG_MEM_CFG_FIX_CNT_MASK I40E_MASK(0x1, I40E_TLAN_DEC_MNG_MEM_CFG_FIX_CNT_SHIFT)
+#define I40E_TLAN_DEC_MNG_MEM_CFG_ERR_CNT_SHIFT 9
+#define I40E_TLAN_DEC_MNG_MEM_CFG_ERR_CNT_MASK I40E_MASK(0x1, I40E_TLAN_DEC_MNG_MEM_CFG_ERR_CNT_SHIFT)
+#define I40E_TLAN_DEC_MNG_MEM_CFG_RME_SHIFT 12
+#define I40E_TLAN_DEC_MNG_MEM_CFG_RME_MASK I40E_MASK(0x1, I40E_TLAN_DEC_MNG_MEM_CFG_RME_SHIFT)
+#define I40E_TLAN_DEC_MNG_MEM_CFG_RM_SHIFT 16
+#define I40E_TLAN_DEC_MNG_MEM_CFG_RM_MASK I40E_MASK(0xF, I40E_TLAN_DEC_MNG_MEM_CFG_RM_SHIFT)
+
+#define I40E_TLAN_DEC_MNG_MEM_STATUS 0x000E64A4 /* Reset: POR */
+#define I40E_TLAN_DEC_MNG_MEM_STATUS_ECC_ERR_SHIFT 0
+#define I40E_TLAN_DEC_MNG_MEM_STATUS_ECC_ERR_MASK I40E_MASK(0x1, I40E_TLAN_DEC_MNG_MEM_STATUS_ECC_ERR_SHIFT)
+#define I40E_TLAN_DEC_MNG_MEM_STATUS_ECC_FIX_SHIFT 1
+#define I40E_TLAN_DEC_MNG_MEM_STATUS_ECC_FIX_MASK I40E_MASK(0x1, I40E_TLAN_DEC_MNG_MEM_STATUS_ECC_FIX_SHIFT)
+#define I40E_TLAN_DEC_MNG_MEM_STATUS_INIT_DONE_SHIFT 2
+#define I40E_TLAN_DEC_MNG_MEM_STATUS_INIT_DONE_MASK I40E_MASK(0x1, I40E_TLAN_DEC_MNG_MEM_STATUS_INIT_DONE_SHIFT)
+#define I40E_TLAN_DEC_MNG_MEM_STATUS_GLOBAL_INIT_DONE_SHIFT 3
+#define I40E_TLAN_DEC_MNG_MEM_STATUS_GLOBAL_INIT_DONE_MASK I40E_MASK(0x1, I40E_TLAN_DEC_MNG_MEM_STATUS_GLOBAL_INIT_DONE_SHIFT)
+
+#define I40E_TLAN_DEC_PTRS_MEM_CFG 0x000E6498 /* Reset: POR */
+#define I40E_TLAN_DEC_PTRS_MEM_CFG_ECC_EN_SHIFT 0
+#define I40E_TLAN_DEC_PTRS_MEM_CFG_ECC_EN_MASK I40E_MASK(0x1, I40E_TLAN_DEC_PTRS_MEM_CFG_ECC_EN_SHIFT)
+#define I40E_TLAN_DEC_PTRS_MEM_CFG_ECC_INVERT_1_SHIFT 1
+#define I40E_TLAN_DEC_PTRS_MEM_CFG_ECC_INVERT_1_MASK I40E_MASK(0x1, I40E_TLAN_DEC_PTRS_MEM_CFG_ECC_INVERT_1_SHIFT)
+#define I40E_TLAN_DEC_PTRS_MEM_CFG_ECC_INVERT_2_SHIFT 2
+#define I40E_TLAN_DEC_PTRS_MEM_CFG_ECC_INVERT_2_MASK I40E_MASK(0x1, I40E_TLAN_DEC_PTRS_MEM_CFG_ECC_INVERT_2_SHIFT)
+#define I40E_TLAN_DEC_PTRS_MEM_CFG_LS_FORCE_SHIFT 3
+#define I40E_TLAN_DEC_PTRS_MEM_CFG_LS_FORCE_MASK I40E_MASK(0x1, I40E_TLAN_DEC_PTRS_MEM_CFG_LS_FORCE_SHIFT)
+#define I40E_TLAN_DEC_PTRS_MEM_CFG_LS_BYPASS_SHIFT 4
+#define I40E_TLAN_DEC_PTRS_MEM_CFG_LS_BYPASS_MASK I40E_MASK(0x1, I40E_TLAN_DEC_PTRS_MEM_CFG_LS_BYPASS_SHIFT)
+#define I40E_TLAN_DEC_PTRS_MEM_CFG_MASK_INT_SHIFT 5
+#define I40E_TLAN_DEC_PTRS_MEM_CFG_MASK_INT_MASK I40E_MASK(0x1, I40E_TLAN_DEC_PTRS_MEM_CFG_MASK_INT_SHIFT)
+#define I40E_TLAN_DEC_PTRS_MEM_CFG_FIX_CNT_SHIFT 8
+#define I40E_TLAN_DEC_PTRS_MEM_CFG_FIX_CNT_MASK I40E_MASK(0x1, I40E_TLAN_DEC_PTRS_MEM_CFG_FIX_CNT_SHIFT)
+#define I40E_TLAN_DEC_PTRS_MEM_CFG_ERR_CNT_SHIFT 9
+#define I40E_TLAN_DEC_PTRS_MEM_CFG_ERR_CNT_MASK I40E_MASK(0x1, I40E_TLAN_DEC_PTRS_MEM_CFG_ERR_CNT_SHIFT)
+#define I40E_TLAN_DEC_PTRS_MEM_CFG_RME_SHIFT 12
+#define I40E_TLAN_DEC_PTRS_MEM_CFG_RME_MASK I40E_MASK(0x1, I40E_TLAN_DEC_PTRS_MEM_CFG_RME_SHIFT)
+#define I40E_TLAN_DEC_PTRS_MEM_CFG_RM_SHIFT 16
+#define I40E_TLAN_DEC_PTRS_MEM_CFG_RM_MASK I40E_MASK(0xF, I40E_TLAN_DEC_PTRS_MEM_CFG_RM_SHIFT)
+
+#define I40E_TLAN_DEC_PTRS_MEM_STATUS 0x000E649C /* Reset: POR */
+#define I40E_TLAN_DEC_PTRS_MEM_STATUS_ECC_ERR_SHIFT 0
+#define I40E_TLAN_DEC_PTRS_MEM_STATUS_ECC_ERR_MASK I40E_MASK(0x1, I40E_TLAN_DEC_PTRS_MEM_STATUS_ECC_ERR_SHIFT)
+#define I40E_TLAN_DEC_PTRS_MEM_STATUS_ECC_FIX_SHIFT 1
+#define I40E_TLAN_DEC_PTRS_MEM_STATUS_ECC_FIX_MASK I40E_MASK(0x1, I40E_TLAN_DEC_PTRS_MEM_STATUS_ECC_FIX_SHIFT)
+#define I40E_TLAN_DEC_PTRS_MEM_STATUS_INIT_DONE_SHIFT 2
+#define I40E_TLAN_DEC_PTRS_MEM_STATUS_INIT_DONE_MASK I40E_MASK(0x1, I40E_TLAN_DEC_PTRS_MEM_STATUS_INIT_DONE_SHIFT)
+#define I40E_TLAN_DEC_PTRS_MEM_STATUS_GLOBAL_INIT_DONE_SHIFT 3
+#define I40E_TLAN_DEC_PTRS_MEM_STATUS_GLOBAL_INIT_DONE_MASK I40E_MASK(0x1, I40E_TLAN_DEC_PTRS_MEM_STATUS_GLOBAL_INIT_DONE_SHIFT)
+
+#define I40E_TLAN_ECC_COR_ERR 0x000E64B4 /* Reset: POR */
+#define I40E_TLAN_ECC_COR_ERR_CNT_SHIFT 0
+#define I40E_TLAN_ECC_COR_ERR_CNT_MASK I40E_MASK(0xFFF, I40E_TLAN_ECC_COR_ERR_CNT_SHIFT)
+
+#define I40E_TLAN_ECC_UNCOR_ERR 0x000E64B0 /* Reset: POR */
+#define I40E_TLAN_ECC_UNCOR_ERR_CNT_SHIFT 0
+#define I40E_TLAN_ECC_UNCOR_ERR_CNT_MASK I40E_MASK(0xFFF, I40E_TLAN_ECC_UNCOR_ERR_CNT_SHIFT)
+
+#define I40E_TLAN_HEAD_WB_CFG 0x000E64A8 /* Reset: POR */
+#define I40E_TLAN_HEAD_WB_CFG_ECC_EN_SHIFT 0
+#define I40E_TLAN_HEAD_WB_CFG_ECC_EN_MASK I40E_MASK(0x1, I40E_TLAN_HEAD_WB_CFG_ECC_EN_SHIFT)
+#define I40E_TLAN_HEAD_WB_CFG_ECC_INVERT_1_SHIFT 1
+#define I40E_TLAN_HEAD_WB_CFG_ECC_INVERT_1_MASK I40E_MASK(0x1, I40E_TLAN_HEAD_WB_CFG_ECC_INVERT_1_SHIFT)
+#define I40E_TLAN_HEAD_WB_CFG_ECC_INVERT_2_SHIFT 2
+#define I40E_TLAN_HEAD_WB_CFG_ECC_INVERT_2_MASK I40E_MASK(0x1, I40E_TLAN_HEAD_WB_CFG_ECC_INVERT_2_SHIFT)
+#define I40E_TLAN_HEAD_WB_CFG_LS_FORCE_SHIFT 3
+#define I40E_TLAN_HEAD_WB_CFG_LS_FORCE_MASK I40E_MASK(0x1, I40E_TLAN_HEAD_WB_CFG_LS_FORCE_SHIFT)
+#define I40E_TLAN_HEAD_WB_CFG_LS_BYPASS_SHIFT 4
+#define I40E_TLAN_HEAD_WB_CFG_LS_BYPASS_MASK I40E_MASK(0x1, I40E_TLAN_HEAD_WB_CFG_LS_BYPASS_SHIFT)
+#define I40E_TLAN_HEAD_WB_CFG_MASK_INT_SHIFT 5
+#define I40E_TLAN_HEAD_WB_CFG_MASK_INT_MASK I40E_MASK(0x1, I40E_TLAN_HEAD_WB_CFG_MASK_INT_SHIFT)
+#define I40E_TLAN_HEAD_WB_CFG_FIX_CNT_SHIFT 8
+#define I40E_TLAN_HEAD_WB_CFG_FIX_CNT_MASK I40E_MASK(0x1, I40E_TLAN_HEAD_WB_CFG_FIX_CNT_SHIFT)
+#define I40E_TLAN_HEAD_WB_CFG_ERR_CNT_SHIFT 9
+#define I40E_TLAN_HEAD_WB_CFG_ERR_CNT_MASK I40E_MASK(0x1, I40E_TLAN_HEAD_WB_CFG_ERR_CNT_SHIFT)
+#define I40E_TLAN_HEAD_WB_CFG_RME_SHIFT 12
+#define I40E_TLAN_HEAD_WB_CFG_RME_MASK I40E_MASK(0x1, I40E_TLAN_HEAD_WB_CFG_RME_SHIFT)
+#define I40E_TLAN_HEAD_WB_CFG_RM_SHIFT 16
+#define I40E_TLAN_HEAD_WB_CFG_RM_MASK I40E_MASK(0xF, I40E_TLAN_HEAD_WB_CFG_RM_SHIFT)
+
+#define I40E_TLAN_HEAD_WB_STATUS 0x000E64AC /* Reset: POR */
+#define I40E_TLAN_HEAD_WB_STATUS_ECC_ERR_SHIFT 0
+#define I40E_TLAN_HEAD_WB_STATUS_ECC_ERR_MASK I40E_MASK(0x1, I40E_TLAN_HEAD_WB_STATUS_ECC_ERR_SHIFT)
+#define I40E_TLAN_HEAD_WB_STATUS_ECC_FIX_SHIFT 1
+#define I40E_TLAN_HEAD_WB_STATUS_ECC_FIX_MASK I40E_MASK(0x1, I40E_TLAN_HEAD_WB_STATUS_ECC_FIX_SHIFT)
+#define I40E_TLAN_HEAD_WB_STATUS_INIT_DONE_SHIFT 2
+#define I40E_TLAN_HEAD_WB_STATUS_INIT_DONE_MASK I40E_MASK(0x1, I40E_TLAN_HEAD_WB_STATUS_INIT_DONE_SHIFT)
+#define I40E_TLAN_HEAD_WB_STATUS_GLOBAL_INIT_DONE_SHIFT 3
+#define I40E_TLAN_HEAD_WB_STATUS_GLOBAL_INIT_DONE_MASK I40E_MASK(0x1, I40E_TLAN_HEAD_WB_STATUS_GLOBAL_INIT_DONE_SHIFT)
+
+#define I40E_TPB_CLID_MEM_CFG 0x0009808C /* Reset: POR */
+#define I40E_TPB_CLID_MEM_CFG_ECC_EN_SHIFT 0
+#define I40E_TPB_CLID_MEM_CFG_ECC_EN_MASK I40E_MASK(0x1, I40E_TPB_CLID_MEM_CFG_ECC_EN_SHIFT)
+#define I40E_TPB_CLID_MEM_CFG_ECC_INVERT_1_SHIFT 1
+#define I40E_TPB_CLID_MEM_CFG_ECC_INVERT_1_MASK I40E_MASK(0x1, I40E_TPB_CLID_MEM_CFG_ECC_INVERT_1_SHIFT)
+#define I40E_TPB_CLID_MEM_CFG_ECC_INVERT_2_SHIFT 2
+#define I40E_TPB_CLID_MEM_CFG_ECC_INVERT_2_MASK I40E_MASK(0x1, I40E_TPB_CLID_MEM_CFG_ECC_INVERT_2_SHIFT)
+#define I40E_TPB_CLID_MEM_CFG_LS_FORCE_SHIFT 3
+#define I40E_TPB_CLID_MEM_CFG_LS_FORCE_MASK I40E_MASK(0x1, I40E_TPB_CLID_MEM_CFG_LS_FORCE_SHIFT)
+#define I40E_TPB_CLID_MEM_CFG_LS_BYPASS_SHIFT 4
+#define I40E_TPB_CLID_MEM_CFG_LS_BYPASS_MASK I40E_MASK(0x1, I40E_TPB_CLID_MEM_CFG_LS_BYPASS_SHIFT)
+#define I40E_TPB_CLID_MEM_CFG_MASK_INT_SHIFT 5
+#define I40E_TPB_CLID_MEM_CFG_MASK_INT_MASK I40E_MASK(0x1, I40E_TPB_CLID_MEM_CFG_MASK_INT_SHIFT)
+#define I40E_TPB_CLID_MEM_CFG_FIX_CNT_SHIFT 8
+#define I40E_TPB_CLID_MEM_CFG_FIX_CNT_MASK I40E_MASK(0x1, I40E_TPB_CLID_MEM_CFG_FIX_CNT_SHIFT)
+#define I40E_TPB_CLID_MEM_CFG_ERR_CNT_SHIFT 9
+#define I40E_TPB_CLID_MEM_CFG_ERR_CNT_MASK I40E_MASK(0x1, I40E_TPB_CLID_MEM_CFG_ERR_CNT_SHIFT)
+#define I40E_TPB_CLID_MEM_CFG_RME_SHIFT 12
+#define I40E_TPB_CLID_MEM_CFG_RME_MASK I40E_MASK(0x1, I40E_TPB_CLID_MEM_CFG_RME_SHIFT)
+#define I40E_TPB_CLID_MEM_CFG_RM_SHIFT 16
+#define I40E_TPB_CLID_MEM_CFG_RM_MASK I40E_MASK(0xF, I40E_TPB_CLID_MEM_CFG_RM_SHIFT)
+
+#define I40E_TPB_CLID_MEM_DBG_CTL 0x000980C8 /* Reset: CORER */
+#define I40E_TPB_CLID_MEM_DBG_CTL_ADR_SHIFT 0
+#define I40E_TPB_CLID_MEM_DBG_CTL_ADR_MASK I40E_MASK(0x3FFFF, I40E_TPB_CLID_MEM_DBG_CTL_ADR_SHIFT)
+#define I40E_TPB_CLID_MEM_DBG_CTL_DW_SEL_SHIFT 18
+#define I40E_TPB_CLID_MEM_DBG_CTL_DW_SEL_MASK I40E_MASK(0xFF, I40E_TPB_CLID_MEM_DBG_CTL_DW_SEL_SHIFT)
+#define I40E_TPB_CLID_MEM_DBG_CTL_RD_EN_SHIFT 30
+#define I40E_TPB_CLID_MEM_DBG_CTL_RD_EN_MASK I40E_MASK(0x1, I40E_TPB_CLID_MEM_DBG_CTL_RD_EN_SHIFT)
+#define I40E_TPB_CLID_MEM_DBG_CTL_DONE_SHIFT 31
+#define I40E_TPB_CLID_MEM_DBG_CTL_DONE_MASK I40E_MASK(0x1, I40E_TPB_CLID_MEM_DBG_CTL_DONE_SHIFT)
+
+#define I40E_TPB_CLID_MEM_DBG_DATA 0x000980D4 /* Reset: CORER */
+#define I40E_TPB_CLID_MEM_DBG_DATA_RD_DW_SHIFT 0
+#define I40E_TPB_CLID_MEM_DBG_DATA_RD_DW_MASK I40E_MASK(0xFFFFFFFF, I40E_TPB_CLID_MEM_DBG_DATA_RD_DW_SHIFT)
+
+#define I40E_TPB_CLID_MEM_STATUS 0x00098090 /* Reset: POR */
+#define I40E_TPB_CLID_MEM_STATUS_ECC_ERR_SHIFT 0
+#define I40E_TPB_CLID_MEM_STATUS_ECC_ERR_MASK I40E_MASK(0x1, I40E_TPB_CLID_MEM_STATUS_ECC_ERR_SHIFT)
+#define I40E_TPB_CLID_MEM_STATUS_ECC_FIX_SHIFT 1
+#define I40E_TPB_CLID_MEM_STATUS_ECC_FIX_MASK I40E_MASK(0x1, I40E_TPB_CLID_MEM_STATUS_ECC_FIX_SHIFT)
+#define I40E_TPB_CLID_MEM_STATUS_INIT_DONE_SHIFT 2
+#define I40E_TPB_CLID_MEM_STATUS_INIT_DONE_MASK I40E_MASK(0x1, I40E_TPB_CLID_MEM_STATUS_INIT_DONE_SHIFT)
+#define I40E_TPB_CLID_MEM_STATUS_GLOBAL_INIT_DONE_SHIFT 3
+#define I40E_TPB_CLID_MEM_STATUS_GLOBAL_INIT_DONE_MASK I40E_MASK(0x1, I40E_TPB_CLID_MEM_STATUS_GLOBAL_INIT_DONE_SHIFT)
+
+#define I40E_TPB_DBG_FEAT 0x00098084 /* Reset: CORER */
+#define I40E_TPB_DBG_FEAT_DIS_MIB_SHIFT 0
+#define I40E_TPB_DBG_FEAT_DIS_MIB_MASK I40E_MASK(0xF, I40E_TPB_DBG_FEAT_DIS_MIB_SHIFT)
+#define I40E_TPB_DBG_FEAT_FORCE_FC_IND_SHIFT 4
+#define I40E_TPB_DBG_FEAT_FORCE_FC_IND_MASK I40E_MASK(0xF, I40E_TPB_DBG_FEAT_FORCE_FC_IND_SHIFT)
+#define I40E_TPB_DBG_FEAT_OBEY_FC_OVR_SHIFT 8
+#define I40E_TPB_DBG_FEAT_OBEY_FC_OVR_MASK I40E_MASK(0xF, I40E_TPB_DBG_FEAT_OBEY_FC_OVR_SHIFT)
+#define I40E_TPB_DBG_FEAT_DIS_BURST_CTL_SHIFT 12
+#define I40E_TPB_DBG_FEAT_DIS_BURST_CTL_MASK I40E_MASK(0xF, I40E_TPB_DBG_FEAT_DIS_BURST_CTL_SHIFT)
+
+#define I40E_TPB_ECC_COR_ERR 0x000980B8 /* Reset: POR */
+#define I40E_TPB_ECC_COR_ERR_CNT_SHIFT 0
+#define I40E_TPB_ECC_COR_ERR_CNT_MASK I40E_MASK(0xFFF, I40E_TPB_ECC_COR_ERR_CNT_SHIFT)
+
+#define I40E_TPB_ECC_UNCOR_ERR 0x000980B4 /* Reset: POR */
+#define I40E_TPB_ECC_UNCOR_ERR_CNT_SHIFT 0
+#define I40E_TPB_ECC_UNCOR_ERR_CNT_MASK I40E_MASK(0xFFF, I40E_TPB_ECC_UNCOR_ERR_CNT_SHIFT)
+
+#define I40E_TPB_FC_OVR 0x00098088 /* Reset: CORER */
+#define I40E_TPB_FC_OVR_TPB_FC_OVR_SHIFT 0
+#define I40E_TPB_FC_OVR_TPB_FC_OVR_MASK I40E_MASK(0xFFFFFFFF, I40E_TPB_FC_OVR_TPB_FC_OVR_SHIFT)
+
+#define I40E_TPB_PKT_MEM_CFG 0x00098094 /* Reset: POR */
+#define I40E_TPB_PKT_MEM_CFG_ECC_EN_SHIFT 0
+#define I40E_TPB_PKT_MEM_CFG_ECC_EN_MASK I40E_MASK(0x1, I40E_TPB_PKT_MEM_CFG_ECC_EN_SHIFT)
+#define I40E_TPB_PKT_MEM_CFG_ECC_INVERT_1_SHIFT 1
+#define I40E_TPB_PKT_MEM_CFG_ECC_INVERT_1_MASK I40E_MASK(0x1, I40E_TPB_PKT_MEM_CFG_ECC_INVERT_1_SHIFT)
+#define I40E_TPB_PKT_MEM_CFG_ECC_INVERT_2_SHIFT 2
+#define I40E_TPB_PKT_MEM_CFG_ECC_INVERT_2_MASK I40E_MASK(0x1, I40E_TPB_PKT_MEM_CFG_ECC_INVERT_2_SHIFT)
+#define I40E_TPB_PKT_MEM_CFG_LS_FORCE_SHIFT 3
+#define I40E_TPB_PKT_MEM_CFG_LS_FORCE_MASK I40E_MASK(0x1, I40E_TPB_PKT_MEM_CFG_LS_FORCE_SHIFT)
+#define I40E_TPB_PKT_MEM_CFG_LS_BYPASS_SHIFT 4
+#define I40E_TPB_PKT_MEM_CFG_LS_BYPASS_MASK I40E_MASK(0x1, I40E_TPB_PKT_MEM_CFG_LS_BYPASS_SHIFT)
+#define I40E_TPB_PKT_MEM_CFG_MASK_INT_SHIFT 5
+#define I40E_TPB_PKT_MEM_CFG_MASK_INT_MASK I40E_MASK(0x1, I40E_TPB_PKT_MEM_CFG_MASK_INT_SHIFT)
+#define I40E_TPB_PKT_MEM_CFG_FIX_CNT_SHIFT 8
+#define I40E_TPB_PKT_MEM_CFG_FIX_CNT_MASK I40E_MASK(0x1, I40E_TPB_PKT_MEM_CFG_FIX_CNT_SHIFT)
+#define I40E_TPB_PKT_MEM_CFG_ERR_CNT_SHIFT 9
+#define I40E_TPB_PKT_MEM_CFG_ERR_CNT_MASK I40E_MASK(0x1, I40E_TPB_PKT_MEM_CFG_ERR_CNT_SHIFT)
+#define I40E_TPB_PKT_MEM_CFG_RME_SHIFT 12
+#define I40E_TPB_PKT_MEM_CFG_RME_MASK I40E_MASK(0x1, I40E_TPB_PKT_MEM_CFG_RME_SHIFT)
+#define I40E_TPB_PKT_MEM_CFG_RM_SHIFT 16
+#define I40E_TPB_PKT_MEM_CFG_RM_MASK I40E_MASK(0xF, I40E_TPB_PKT_MEM_CFG_RM_SHIFT)
+
+#define I40E_TPB_PKT_MEM_DBG_CTL 0x000980CC /* Reset: CORER */
+#define I40E_TPB_PKT_MEM_DBG_CTL_ADR_SHIFT 0
+#define I40E_TPB_PKT_MEM_DBG_CTL_ADR_MASK I40E_MASK(0x3FFFF, I40E_TPB_PKT_MEM_DBG_CTL_ADR_SHIFT)
+#define I40E_TPB_PKT_MEM_DBG_CTL_DW_SEL_SHIFT 18
+#define I40E_TPB_PKT_MEM_DBG_CTL_DW_SEL_MASK I40E_MASK(0xFF, I40E_TPB_PKT_MEM_DBG_CTL_DW_SEL_SHIFT)
+#define I40E_TPB_PKT_MEM_DBG_CTL_RD_EN_SHIFT 30
+#define I40E_TPB_PKT_MEM_DBG_CTL_RD_EN_MASK I40E_MASK(0x1, I40E_TPB_PKT_MEM_DBG_CTL_RD_EN_SHIFT)
+#define I40E_TPB_PKT_MEM_DBG_CTL_DONE_SHIFT 31
+#define I40E_TPB_PKT_MEM_DBG_CTL_DONE_MASK I40E_MASK(0x1, I40E_TPB_PKT_MEM_DBG_CTL_DONE_SHIFT)
+
+#define I40E_TPB_PKT_MEM_DBG_DATA 0x000980E0 /* Reset: CORER */
+#define I40E_TPB_PKT_MEM_DBG_DATA_RD_DW_SHIFT 0
+#define I40E_TPB_PKT_MEM_DBG_DATA_RD_DW_MASK I40E_MASK(0xFFFFFFFF, I40E_TPB_PKT_MEM_DBG_DATA_RD_DW_SHIFT)
+
+#define I40E_TPB_PKT_MEM_STATUS 0x00098098 /* Reset: POR */
+#define I40E_TPB_PKT_MEM_STATUS_ECC_ERR_SHIFT 0
+#define I40E_TPB_PKT_MEM_STATUS_ECC_ERR_MASK I40E_MASK(0x1, I40E_TPB_PKT_MEM_STATUS_ECC_ERR_SHIFT)
+#define I40E_TPB_PKT_MEM_STATUS_ECC_FIX_SHIFT 1
+#define I40E_TPB_PKT_MEM_STATUS_ECC_FIX_MASK I40E_MASK(0x1, I40E_TPB_PKT_MEM_STATUS_ECC_FIX_SHIFT)
+#define I40E_TPB_PKT_MEM_STATUS_INIT_DONE_SHIFT 2
+#define I40E_TPB_PKT_MEM_STATUS_INIT_DONE_MASK I40E_MASK(0x1, I40E_TPB_PKT_MEM_STATUS_INIT_DONE_SHIFT)
+#define I40E_TPB_PKT_MEM_STATUS_GLOBAL_INIT_DONE_SHIFT 3
+#define I40E_TPB_PKT_MEM_STATUS_GLOBAL_INIT_DONE_MASK I40E_MASK(0x1, I40E_TPB_PKT_MEM_STATUS_GLOBAL_INIT_DONE_SHIFT)
+
+#define I40E_TPB_REPORT_LL_MEM_CFG 0x0009809C /* Reset: POR */
+#define I40E_TPB_REPORT_LL_MEM_CFG_ECC_EN_SHIFT 0
+#define I40E_TPB_REPORT_LL_MEM_CFG_ECC_EN_MASK I40E_MASK(0x1, I40E_TPB_REPORT_LL_MEM_CFG_ECC_EN_SHIFT)
+#define I40E_TPB_REPORT_LL_MEM_CFG_ECC_INVERT_1_SHIFT 1
+#define I40E_TPB_REPORT_LL_MEM_CFG_ECC_INVERT_1_MASK I40E_MASK(0x1, I40E_TPB_REPORT_LL_MEM_CFG_ECC_INVERT_1_SHIFT)
+#define I40E_TPB_REPORT_LL_MEM_CFG_ECC_INVERT_2_SHIFT 2
+#define I40E_TPB_REPORT_LL_MEM_CFG_ECC_INVERT_2_MASK I40E_MASK(0x1, I40E_TPB_REPORT_LL_MEM_CFG_ECC_INVERT_2_SHIFT)
+#define I40E_TPB_REPORT_LL_MEM_CFG_LS_FORCE_SHIFT 3
+#define I40E_TPB_REPORT_LL_MEM_CFG_LS_FORCE_MASK I40E_MASK(0x1, I40E_TPB_REPORT_LL_MEM_CFG_LS_FORCE_SHIFT)
+#define I40E_TPB_REPORT_LL_MEM_CFG_LS_BYPASS_SHIFT 4
+#define I40E_TPB_REPORT_LL_MEM_CFG_LS_BYPASS_MASK I40E_MASK(0x1, I40E_TPB_REPORT_LL_MEM_CFG_LS_BYPASS_SHIFT)
+#define I40E_TPB_REPORT_LL_MEM_CFG_MASK_INT_SHIFT 5
+#define I40E_TPB_REPORT_LL_MEM_CFG_MASK_INT_MASK I40E_MASK(0x1, I40E_TPB_REPORT_LL_MEM_CFG_MASK_INT_SHIFT)
+#define I40E_TPB_REPORT_LL_MEM_CFG_FIX_CNT_SHIFT 8
+#define I40E_TPB_REPORT_LL_MEM_CFG_FIX_CNT_MASK I40E_MASK(0x1, I40E_TPB_REPORT_LL_MEM_CFG_FIX_CNT_SHIFT)
+#define I40E_TPB_REPORT_LL_MEM_CFG_ERR_CNT_SHIFT 9
+#define I40E_TPB_REPORT_LL_MEM_CFG_ERR_CNT_MASK I40E_MASK(0x1, I40E_TPB_REPORT_LL_MEM_CFG_ERR_CNT_SHIFT)
+#define I40E_TPB_REPORT_LL_MEM_CFG_RME_SHIFT 12
+#define I40E_TPB_REPORT_LL_MEM_CFG_RME_MASK I40E_MASK(0x1, I40E_TPB_REPORT_LL_MEM_CFG_RME_SHIFT)
+#define I40E_TPB_REPORT_LL_MEM_CFG_RM_SHIFT 16
+#define I40E_TPB_REPORT_LL_MEM_CFG_RM_MASK I40E_MASK(0xF, I40E_TPB_REPORT_LL_MEM_CFG_RM_SHIFT)
+
+#define I40E_TPB_REPORT_LL_MEM_DBG_CTL 0x000980C0 /* Reset: CORER */
+#define I40E_TPB_REPORT_LL_MEM_DBG_CTL_ADR_SHIFT 0
+#define I40E_TPB_REPORT_LL_MEM_DBG_CTL_ADR_MASK I40E_MASK(0x3FFFF, I40E_TPB_REPORT_LL_MEM_DBG_CTL_ADR_SHIFT)
+#define I40E_TPB_REPORT_LL_MEM_DBG_CTL_DW_SEL_SHIFT 18
+#define I40E_TPB_REPORT_LL_MEM_DBG_CTL_DW_SEL_MASK I40E_MASK(0xFF, I40E_TPB_REPORT_LL_MEM_DBG_CTL_DW_SEL_SHIFT)
+#define I40E_TPB_REPORT_LL_MEM_DBG_CTL_RD_EN_SHIFT 30
+#define I40E_TPB_REPORT_LL_MEM_DBG_CTL_RD_EN_MASK I40E_MASK(0x1, I40E_TPB_REPORT_LL_MEM_DBG_CTL_RD_EN_SHIFT)
+#define I40E_TPB_REPORT_LL_MEM_DBG_CTL_DONE_SHIFT 31
+#define I40E_TPB_REPORT_LL_MEM_DBG_CTL_DONE_MASK I40E_MASK(0x1, I40E_TPB_REPORT_LL_MEM_DBG_CTL_DONE_SHIFT)
+
+#define I40E_TPB_REPORT_LL_MEM_DBG_DATA 0x000980D8 /* Reset: CORER */
+#define I40E_TPB_REPORT_LL_MEM_DBG_DATA_RD_DW_SHIFT 0
+#define I40E_TPB_REPORT_LL_MEM_DBG_DATA_RD_DW_MASK I40E_MASK(0xFFFFFFFF, I40E_TPB_REPORT_LL_MEM_DBG_DATA_RD_DW_SHIFT)
+
+#define I40E_TPB_REPORT_LL_MEM_STATUS 0x000980A0 /* Reset: POR */
+#define I40E_TPB_REPORT_LL_MEM_STATUS_ECC_ERR_SHIFT 0
+#define I40E_TPB_REPORT_LL_MEM_STATUS_ECC_ERR_MASK I40E_MASK(0x1, I40E_TPB_REPORT_LL_MEM_STATUS_ECC_ERR_SHIFT)
+#define I40E_TPB_REPORT_LL_MEM_STATUS_ECC_FIX_SHIFT 1
+#define I40E_TPB_REPORT_LL_MEM_STATUS_ECC_FIX_MASK I40E_MASK(0x1, I40E_TPB_REPORT_LL_MEM_STATUS_ECC_FIX_SHIFT)
+#define I40E_TPB_REPORT_LL_MEM_STATUS_INIT_DONE_SHIFT 2
+#define I40E_TPB_REPORT_LL_MEM_STATUS_INIT_DONE_MASK I40E_MASK(0x1, I40E_TPB_REPORT_LL_MEM_STATUS_INIT_DONE_SHIFT)
+#define I40E_TPB_REPORT_LL_MEM_STATUS_GLOBAL_INIT_DONE_SHIFT 3
+#define I40E_TPB_REPORT_LL_MEM_STATUS_GLOBAL_INIT_DONE_MASK I40E_MASK(0x1, I40E_TPB_REPORT_LL_MEM_STATUS_GLOBAL_INIT_DONE_SHIFT)
+
+#define I40E_TPB_REPORT_MEM_CFG 0x000980A4 /* Reset: POR */
+#define I40E_TPB_REPORT_MEM_CFG_ECC_EN_SHIFT 0
+#define I40E_TPB_REPORT_MEM_CFG_ECC_EN_MASK I40E_MASK(0x1, I40E_TPB_REPORT_MEM_CFG_ECC_EN_SHIFT)
+#define I40E_TPB_REPORT_MEM_CFG_ECC_INVERT_1_SHIFT 1
+#define I40E_TPB_REPORT_MEM_CFG_ECC_INVERT_1_MASK I40E_MASK(0x1, I40E_TPB_REPORT_MEM_CFG_ECC_INVERT_1_SHIFT)
+#define I40E_TPB_REPORT_MEM_CFG_ECC_INVERT_2_SHIFT 2
+#define I40E_TPB_REPORT_MEM_CFG_ECC_INVERT_2_MASK I40E_MASK(0x1, I40E_TPB_REPORT_MEM_CFG_ECC_INVERT_2_SHIFT)
+#define I40E_TPB_REPORT_MEM_CFG_LS_FORCE_SHIFT 3
+#define I40E_TPB_REPORT_MEM_CFG_LS_FORCE_MASK I40E_MASK(0x1, I40E_TPB_REPORT_MEM_CFG_LS_FORCE_SHIFT)
+#define I40E_TPB_REPORT_MEM_CFG_LS_BYPASS_SHIFT 4
+#define I40E_TPB_REPORT_MEM_CFG_LS_BYPASS_MASK I40E_MASK(0x1, I40E_TPB_REPORT_MEM_CFG_LS_BYPASS_SHIFT)
+#define I40E_TPB_REPORT_MEM_CFG_MASK_INT_SHIFT 5
+#define I40E_TPB_REPORT_MEM_CFG_MASK_INT_MASK I40E_MASK(0x1, I40E_TPB_REPORT_MEM_CFG_MASK_INT_SHIFT)
+#define I40E_TPB_REPORT_MEM_CFG_FIX_CNT_SHIFT 8
+#define I40E_TPB_REPORT_MEM_CFG_FIX_CNT_MASK I40E_MASK(0x1, I40E_TPB_REPORT_MEM_CFG_FIX_CNT_SHIFT)
+#define I40E_TPB_REPORT_MEM_CFG_ERR_CNT_SHIFT 9
+#define I40E_TPB_REPORT_MEM_CFG_ERR_CNT_MASK I40E_MASK(0x1, I40E_TPB_REPORT_MEM_CFG_ERR_CNT_SHIFT)
+#define I40E_TPB_REPORT_MEM_CFG_RME_SHIFT 12
+#define I40E_TPB_REPORT_MEM_CFG_RME_MASK I40E_MASK(0x1, I40E_TPB_REPORT_MEM_CFG_RME_SHIFT)
+#define I40E_TPB_REPORT_MEM_CFG_RM_SHIFT 16
+#define I40E_TPB_REPORT_MEM_CFG_RM_MASK I40E_MASK(0xF, I40E_TPB_REPORT_MEM_CFG_RM_SHIFT)
+
+#define I40E_TPB_REPORT_MEM_DBG_CTL 0x000980C4 /* Reset: CORER */
+#define I40E_TPB_REPORT_MEM_DBG_CTL_ADR_SHIFT 0
+#define I40E_TPB_REPORT_MEM_DBG_CTL_ADR_MASK I40E_MASK(0x3FFFF, I40E_TPB_REPORT_MEM_DBG_CTL_ADR_SHIFT)
+#define I40E_TPB_REPORT_MEM_DBG_CTL_DW_SEL_SHIFT 18
+#define I40E_TPB_REPORT_MEM_DBG_CTL_DW_SEL_MASK I40E_MASK(0xFF, I40E_TPB_REPORT_MEM_DBG_CTL_DW_SEL_SHIFT)
+#define I40E_TPB_REPORT_MEM_DBG_CTL_RD_EN_SHIFT 30
+#define I40E_TPB_REPORT_MEM_DBG_CTL_RD_EN_MASK I40E_MASK(0x1, I40E_TPB_REPORT_MEM_DBG_CTL_RD_EN_SHIFT)
+#define I40E_TPB_REPORT_MEM_DBG_CTL_DONE_SHIFT 31
+#define I40E_TPB_REPORT_MEM_DBG_CTL_DONE_MASK I40E_MASK(0x1, I40E_TPB_REPORT_MEM_DBG_CTL_DONE_SHIFT)
+
+#define I40E_TPB_REPORT_MEM_DBG_DATA 0x000980DC /* Reset: CORER */
+#define I40E_TPB_REPORT_MEM_DBG_DATA_RD_DW_SHIFT 0
+#define I40E_TPB_REPORT_MEM_DBG_DATA_RD_DW_MASK I40E_MASK(0xFFFFFFFF, I40E_TPB_REPORT_MEM_DBG_DATA_RD_DW_SHIFT)
+
+#define I40E_TPB_REPORT_MEM_STATUS 0x000980A8 /* Reset: POR */
+#define I40E_TPB_REPORT_MEM_STATUS_ECC_ERR_SHIFT 0
+#define I40E_TPB_REPORT_MEM_STATUS_ECC_ERR_MASK I40E_MASK(0x1, I40E_TPB_REPORT_MEM_STATUS_ECC_ERR_SHIFT)
+#define I40E_TPB_REPORT_MEM_STATUS_ECC_FIX_SHIFT 1
+#define I40E_TPB_REPORT_MEM_STATUS_ECC_FIX_MASK I40E_MASK(0x1, I40E_TPB_REPORT_MEM_STATUS_ECC_FIX_SHIFT)
+#define I40E_TPB_REPORT_MEM_STATUS_INIT_DONE_SHIFT 2
+#define I40E_TPB_REPORT_MEM_STATUS_INIT_DONE_MASK I40E_MASK(0x1, I40E_TPB_REPORT_MEM_STATUS_INIT_DONE_SHIFT)
+#define I40E_TPB_REPORT_MEM_STATUS_GLOBAL_INIT_DONE_SHIFT 3
+#define I40E_TPB_REPORT_MEM_STATUS_GLOBAL_INIT_DONE_MASK I40E_MASK(0x1, I40E_TPB_REPORT_MEM_STATUS_GLOBAL_INIT_DONE_SHIFT)
+
+#define I40E_TPB_RPB_BUFF_MEM_CFG 0x000980AC /* Reset: POR */
+#define I40E_TPB_RPB_BUFF_MEM_CFG_ECC_EN_SHIFT 0
+#define I40E_TPB_RPB_BUFF_MEM_CFG_ECC_EN_MASK I40E_MASK(0x1, I40E_TPB_RPB_BUFF_MEM_CFG_ECC_EN_SHIFT)
+#define I40E_TPB_RPB_BUFF_MEM_CFG_ECC_INVERT_1_SHIFT 1
+#define I40E_TPB_RPB_BUFF_MEM_CFG_ECC_INVERT_1_MASK I40E_MASK(0x1, I40E_TPB_RPB_BUFF_MEM_CFG_ECC_INVERT_1_SHIFT)
+#define I40E_TPB_RPB_BUFF_MEM_CFG_ECC_INVERT_2_SHIFT 2
+#define I40E_TPB_RPB_BUFF_MEM_CFG_ECC_INVERT_2_MASK I40E_MASK(0x1, I40E_TPB_RPB_BUFF_MEM_CFG_ECC_INVERT_2_SHIFT)
+#define I40E_TPB_RPB_BUFF_MEM_CFG_LS_FORCE_SHIFT 3
+#define I40E_TPB_RPB_BUFF_MEM_CFG_LS_FORCE_MASK I40E_MASK(0x1, I40E_TPB_RPB_BUFF_MEM_CFG_LS_FORCE_SHIFT)
+#define I40E_TPB_RPB_BUFF_MEM_CFG_LS_BYPASS_SHIFT 4
+#define I40E_TPB_RPB_BUFF_MEM_CFG_LS_BYPASS_MASK I40E_MASK(0x1, I40E_TPB_RPB_BUFF_MEM_CFG_LS_BYPASS_SHIFT)
+#define I40E_TPB_RPB_BUFF_MEM_CFG_MASK_INT_SHIFT 5
+#define I40E_TPB_RPB_BUFF_MEM_CFG_MASK_INT_MASK I40E_MASK(0x1, I40E_TPB_RPB_BUFF_MEM_CFG_MASK_INT_SHIFT)
+#define I40E_TPB_RPB_BUFF_MEM_CFG_FIX_CNT_SHIFT 8
+#define I40E_TPB_RPB_BUFF_MEM_CFG_FIX_CNT_MASK I40E_MASK(0x1, I40E_TPB_RPB_BUFF_MEM_CFG_FIX_CNT_SHIFT)
+#define I40E_TPB_RPB_BUFF_MEM_CFG_ERR_CNT_SHIFT 9
+#define I40E_TPB_RPB_BUFF_MEM_CFG_ERR_CNT_MASK I40E_MASK(0x1, I40E_TPB_RPB_BUFF_MEM_CFG_ERR_CNT_SHIFT)
+#define I40E_TPB_RPB_BUFF_MEM_CFG_RME_SHIFT 12
+#define I40E_TPB_RPB_BUFF_MEM_CFG_RME_MASK I40E_MASK(0x1, I40E_TPB_RPB_BUFF_MEM_CFG_RME_SHIFT)
+#define I40E_TPB_RPB_BUFF_MEM_CFG_RM_SHIFT 16
+#define I40E_TPB_RPB_BUFF_MEM_CFG_RM_MASK I40E_MASK(0xF, I40E_TPB_RPB_BUFF_MEM_CFG_RM_SHIFT)
+
+#define I40E_TPB_RPB_BUFF_MEM_DBG_CTL 0x000980BC /* Reset: CORER */
+#define I40E_TPB_RPB_BUFF_MEM_DBG_CTL_ADR_SHIFT 0
+#define I40E_TPB_RPB_BUFF_MEM_DBG_CTL_ADR_MASK I40E_MASK(0x3FFFF, I40E_TPB_RPB_BUFF_MEM_DBG_CTL_ADR_SHIFT)
+#define I40E_TPB_RPB_BUFF_MEM_DBG_CTL_DW_SEL_SHIFT 18
+#define I40E_TPB_RPB_BUFF_MEM_DBG_CTL_DW_SEL_MASK I40E_MASK(0xFF, I40E_TPB_RPB_BUFF_MEM_DBG_CTL_DW_SEL_SHIFT)
+#define I40E_TPB_RPB_BUFF_MEM_DBG_CTL_RD_EN_SHIFT 30
+#define I40E_TPB_RPB_BUFF_MEM_DBG_CTL_RD_EN_MASK I40E_MASK(0x1, I40E_TPB_RPB_BUFF_MEM_DBG_CTL_RD_EN_SHIFT)
+#define I40E_TPB_RPB_BUFF_MEM_DBG_CTL_DONE_SHIFT 31
+#define I40E_TPB_RPB_BUFF_MEM_DBG_CTL_DONE_MASK I40E_MASK(0x1, I40E_TPB_RPB_BUFF_MEM_DBG_CTL_DONE_SHIFT)
+
+#define I40E_TPB_RPB_BUFF_MEM_DBG_DATA 0x000980D0 /* Reset: CORER */
+#define I40E_TPB_RPB_BUFF_MEM_DBG_DATA_RD_DW_SHIFT 0
+#define I40E_TPB_RPB_BUFF_MEM_DBG_DATA_RD_DW_MASK I40E_MASK(0xFFFFFFFF, I40E_TPB_RPB_BUFF_MEM_DBG_DATA_RD_DW_SHIFT)
+
+#define I40E_TPB_RPB_BUFF_MEM_STATUS 0x000980B0 /* Reset: POR */
+#define I40E_TPB_RPB_BUFF_MEM_STATUS_ECC_ERR_SHIFT 0
+#define I40E_TPB_RPB_BUFF_MEM_STATUS_ECC_ERR_MASK I40E_MASK(0x1, I40E_TPB_RPB_BUFF_MEM_STATUS_ECC_ERR_SHIFT)
+#define I40E_TPB_RPB_BUFF_MEM_STATUS_ECC_FIX_SHIFT 1
+#define I40E_TPB_RPB_BUFF_MEM_STATUS_ECC_FIX_MASK I40E_MASK(0x1, I40E_TPB_RPB_BUFF_MEM_STATUS_ECC_FIX_SHIFT)
+#define I40E_TPB_RPB_BUFF_MEM_STATUS_INIT_DONE_SHIFT 2
+#define I40E_TPB_RPB_BUFF_MEM_STATUS_INIT_DONE_MASK I40E_MASK(0x1, I40E_TPB_RPB_BUFF_MEM_STATUS_INIT_DONE_SHIFT)
+#define I40E_TPB_RPB_BUFF_MEM_STATUS_GLOBAL_INIT_DONE_SHIFT 3
+#define I40E_TPB_RPB_BUFF_MEM_STATUS_GLOBAL_INIT_DONE_MASK I40E_MASK(0x1, I40E_TPB_RPB_BUFF_MEM_STATUS_GLOBAL_INIT_DONE_SHIFT)
+
+#define I40E_TSCD_BRANCH_TABLE_CFG 0x000B2218 /* Reset: POR */
+#define I40E_TSCD_BRANCH_TABLE_CFG_ECC_EN_SHIFT 0
+#define I40E_TSCD_BRANCH_TABLE_CFG_ECC_EN_MASK I40E_MASK(0x1, I40E_TSCD_BRANCH_TABLE_CFG_ECC_EN_SHIFT)
+#define I40E_TSCD_BRANCH_TABLE_CFG_ECC_INVERT_1_SHIFT 1
+#define I40E_TSCD_BRANCH_TABLE_CFG_ECC_INVERT_1_MASK I40E_MASK(0x1, I40E_TSCD_BRANCH_TABLE_CFG_ECC_INVERT_1_SHIFT)
+#define I40E_TSCD_BRANCH_TABLE_CFG_ECC_INVERT_2_SHIFT 2
+#define I40E_TSCD_BRANCH_TABLE_CFG_ECC_INVERT_2_MASK I40E_MASK(0x1, I40E_TSCD_BRANCH_TABLE_CFG_ECC_INVERT_2_SHIFT)
+#define I40E_TSCD_BRANCH_TABLE_CFG_LS_FORCE_SHIFT 3
+#define I40E_TSCD_BRANCH_TABLE_CFG_LS_FORCE_MASK I40E_MASK(0x1, I40E_TSCD_BRANCH_TABLE_CFG_LS_FORCE_SHIFT)
+#define I40E_TSCD_BRANCH_TABLE_CFG_LS_BYPASS_SHIFT 4
+#define I40E_TSCD_BRANCH_TABLE_CFG_LS_BYPASS_MASK I40E_MASK(0x1, I40E_TSCD_BRANCH_TABLE_CFG_LS_BYPASS_SHIFT)
+#define I40E_TSCD_BRANCH_TABLE_CFG_MASK_INT_SHIFT 5
+#define I40E_TSCD_BRANCH_TABLE_CFG_MASK_INT_MASK I40E_MASK(0x1, I40E_TSCD_BRANCH_TABLE_CFG_MASK_INT_SHIFT)
+#define I40E_TSCD_BRANCH_TABLE_CFG_FIX_CNT_SHIFT 8
+#define I40E_TSCD_BRANCH_TABLE_CFG_FIX_CNT_MASK I40E_MASK(0x1, I40E_TSCD_BRANCH_TABLE_CFG_FIX_CNT_SHIFT)
+#define I40E_TSCD_BRANCH_TABLE_CFG_ERR_CNT_SHIFT 9
+#define I40E_TSCD_BRANCH_TABLE_CFG_ERR_CNT_MASK I40E_MASK(0x1, I40E_TSCD_BRANCH_TABLE_CFG_ERR_CNT_SHIFT)
+#define I40E_TSCD_BRANCH_TABLE_CFG_RME_SHIFT 12
+#define I40E_TSCD_BRANCH_TABLE_CFG_RME_MASK I40E_MASK(0x1, I40E_TSCD_BRANCH_TABLE_CFG_RME_SHIFT)
+#define I40E_TSCD_BRANCH_TABLE_CFG_RM_SHIFT 16
+#define I40E_TSCD_BRANCH_TABLE_CFG_RM_MASK I40E_MASK(0xF, I40E_TSCD_BRANCH_TABLE_CFG_RM_SHIFT)
+
+#define I40E_TSCD_BRANCH_TABLE_STATUS 0x000B2230 /* Reset: POR */
+#define I40E_TSCD_BRANCH_TABLE_STATUS_ECC_ERR_SHIFT 0
+#define I40E_TSCD_BRANCH_TABLE_STATUS_ECC_ERR_MASK I40E_MASK(0x1, I40E_TSCD_BRANCH_TABLE_STATUS_ECC_ERR_SHIFT)
+#define I40E_TSCD_BRANCH_TABLE_STATUS_ECC_FIX_SHIFT 1
+#define I40E_TSCD_BRANCH_TABLE_STATUS_ECC_FIX_MASK I40E_MASK(0x1, I40E_TSCD_BRANCH_TABLE_STATUS_ECC_FIX_SHIFT)
+#define I40E_TSCD_BRANCH_TABLE_STATUS_INIT_DONE_SHIFT 2
+#define I40E_TSCD_BRANCH_TABLE_STATUS_INIT_DONE_MASK I40E_MASK(0x1, I40E_TSCD_BRANCH_TABLE_STATUS_INIT_DONE_SHIFT)
+#define I40E_TSCD_BRANCH_TABLE_STATUS_GLOBAL_INIT_DONE_SHIFT 3
+#define I40E_TSCD_BRANCH_TABLE_STATUS_GLOBAL_INIT_DONE_MASK I40E_MASK(0x1, I40E_TSCD_BRANCH_TABLE_STATUS_GLOBAL_INIT_DONE_SHIFT)
+
+#define I40E_TSCD_BW_LIMIT_TABLE_CFG 0x000B2204 /* Reset: POR */
+#define I40E_TSCD_BW_LIMIT_TABLE_CFG_ECC_EN_SHIFT 0
+#define I40E_TSCD_BW_LIMIT_TABLE_CFG_ECC_EN_MASK I40E_MASK(0x1, I40E_TSCD_BW_LIMIT_TABLE_CFG_ECC_EN_SHIFT)
+#define I40E_TSCD_BW_LIMIT_TABLE_CFG_ECC_INVERT_1_SHIFT 1
+#define I40E_TSCD_BW_LIMIT_TABLE_CFG_ECC_INVERT_1_MASK I40E_MASK(0x1, I40E_TSCD_BW_LIMIT_TABLE_CFG_ECC_INVERT_1_SHIFT)
+#define I40E_TSCD_BW_LIMIT_TABLE_CFG_ECC_INVERT_2_SHIFT 2
+#define I40E_TSCD_BW_LIMIT_TABLE_CFG_ECC_INVERT_2_MASK I40E_MASK(0x1, I40E_TSCD_BW_LIMIT_TABLE_CFG_ECC_INVERT_2_SHIFT)
+#define I40E_TSCD_BW_LIMIT_TABLE_CFG_LS_FORCE_SHIFT 3
+#define I40E_TSCD_BW_LIMIT_TABLE_CFG_LS_FORCE_MASK I40E_MASK(0x1, I40E_TSCD_BW_LIMIT_TABLE_CFG_LS_FORCE_SHIFT)
+#define I40E_TSCD_BW_LIMIT_TABLE_CFG_LS_BYPASS_SHIFT 4
+#define I40E_TSCD_BW_LIMIT_TABLE_CFG_LS_BYPASS_MASK I40E_MASK(0x1, I40E_TSCD_BW_LIMIT_TABLE_CFG_LS_BYPASS_SHIFT)
+#define I40E_TSCD_BW_LIMIT_TABLE_CFG_MASK_INT_SHIFT 5
+#define I40E_TSCD_BW_LIMIT_TABLE_CFG_MASK_INT_MASK I40E_MASK(0x1, I40E_TSCD_BW_LIMIT_TABLE_CFG_MASK_INT_SHIFT)
+#define I40E_TSCD_BW_LIMIT_TABLE_CFG_FIX_CNT_SHIFT 8
+#define I40E_TSCD_BW_LIMIT_TABLE_CFG_FIX_CNT_MASK I40E_MASK(0x1, I40E_TSCD_BW_LIMIT_TABLE_CFG_FIX_CNT_SHIFT)
+#define I40E_TSCD_BW_LIMIT_TABLE_CFG_ERR_CNT_SHIFT 9
+#define I40E_TSCD_BW_LIMIT_TABLE_CFG_ERR_CNT_MASK I40E_MASK(0x1, I40E_TSCD_BW_LIMIT_TABLE_CFG_ERR_CNT_SHIFT)
+#define I40E_TSCD_BW_LIMIT_TABLE_CFG_RME_A_SHIFT 12
+#define I40E_TSCD_BW_LIMIT_TABLE_CFG_RME_A_MASK I40E_MASK(0x1, I40E_TSCD_BW_LIMIT_TABLE_CFG_RME_A_SHIFT)
+#define I40E_TSCD_BW_LIMIT_TABLE_CFG_RME_B_SHIFT 13
+#define I40E_TSCD_BW_LIMIT_TABLE_CFG_RME_B_MASK I40E_MASK(0x1, I40E_TSCD_BW_LIMIT_TABLE_CFG_RME_B_SHIFT)
+#define I40E_TSCD_BW_LIMIT_TABLE_CFG_RM_A_SHIFT 16
+#define I40E_TSCD_BW_LIMIT_TABLE_CFG_RM_A_MASK I40E_MASK(0xF, I40E_TSCD_BW_LIMIT_TABLE_CFG_RM_A_SHIFT)
+#define I40E_TSCD_BW_LIMIT_TABLE_CFG_RM_B_SHIFT 20
+#define I40E_TSCD_BW_LIMIT_TABLE_CFG_RM_B_MASK I40E_MASK(0xF, I40E_TSCD_BW_LIMIT_TABLE_CFG_RM_B_SHIFT)
+
+#define I40E_TSCD_BW_LIMIT_TABLE_STATUS 0x000B2228 /* Reset: POR */
+#define I40E_TSCD_BW_LIMIT_TABLE_STATUS_ECC_ERR_SHIFT 0
+#define I40E_TSCD_BW_LIMIT_TABLE_STATUS_ECC_ERR_MASK I40E_MASK(0x1, I40E_TSCD_BW_LIMIT_TABLE_STATUS_ECC_ERR_SHIFT)
+#define I40E_TSCD_BW_LIMIT_TABLE_STATUS_ECC_FIX_SHIFT 1
+#define I40E_TSCD_BW_LIMIT_TABLE_STATUS_ECC_FIX_MASK I40E_MASK(0x1, I40E_TSCD_BW_LIMIT_TABLE_STATUS_ECC_FIX_SHIFT)
+#define I40E_TSCD_BW_LIMIT_TABLE_STATUS_INIT_DONE_SHIFT 2
+#define I40E_TSCD_BW_LIMIT_TABLE_STATUS_INIT_DONE_MASK I40E_MASK(0x1, I40E_TSCD_BW_LIMIT_TABLE_STATUS_INIT_DONE_SHIFT)
+#define I40E_TSCD_BW_LIMIT_TABLE_STATUS_GLOBAL_INIT_DONE_SHIFT 3
+#define I40E_TSCD_BW_LIMIT_TABLE_STATUS_GLOBAL_INIT_DONE_MASK I40E_MASK(0x1, I40E_TSCD_BW_LIMIT_TABLE_STATUS_GLOBAL_INIT_DONE_SHIFT)
+
+#define I40E_TSCD_ECC_COR_ERR 0x000B223c /* Reset: POR */
+#define I40E_TSCD_ECC_COR_ERR_CNT_SHIFT 0
+#define I40E_TSCD_ECC_COR_ERR_CNT_MASK I40E_MASK(0xFFF, I40E_TSCD_ECC_COR_ERR_CNT_SHIFT)
+
+#define I40E_TSCD_ECC_UNCOR_ERR 0x000B2238 /* Reset: POR */
+#define I40E_TSCD_ECC_UNCOR_ERR_CNT_SHIFT 0
+#define I40E_TSCD_ECC_UNCOR_ERR_CNT_MASK I40E_MASK(0xFFF, I40E_TSCD_ECC_UNCOR_ERR_CNT_SHIFT)
+
+#define I40E_TSCD_NEXT_NODE_TABLE_CFG 0x000B220C /* Reset: POR */
+#define I40E_TSCD_NEXT_NODE_TABLE_CFG_ECC_EN_SHIFT 0
+#define I40E_TSCD_NEXT_NODE_TABLE_CFG_ECC_EN_MASK I40E_MASK(0x1, I40E_TSCD_NEXT_NODE_TABLE_CFG_ECC_EN_SHIFT)
+#define I40E_TSCD_NEXT_NODE_TABLE_CFG_ECC_INVERT_1_SHIFT 1
+#define I40E_TSCD_NEXT_NODE_TABLE_CFG_ECC_INVERT_1_MASK I40E_MASK(0x1, I40E_TSCD_NEXT_NODE_TABLE_CFG_ECC_INVERT_1_SHIFT)
+#define I40E_TSCD_NEXT_NODE_TABLE_CFG_ECC_INVERT_2_SHIFT 2
+#define I40E_TSCD_NEXT_NODE_TABLE_CFG_ECC_INVERT_2_MASK I40E_MASK(0x1, I40E_TSCD_NEXT_NODE_TABLE_CFG_ECC_INVERT_2_SHIFT)
+#define I40E_TSCD_NEXT_NODE_TABLE_CFG_LS_FORCE_SHIFT 3
+#define I40E_TSCD_NEXT_NODE_TABLE_CFG_LS_FORCE_MASK I40E_MASK(0x1, I40E_TSCD_NEXT_NODE_TABLE_CFG_LS_FORCE_SHIFT)
+#define I40E_TSCD_NEXT_NODE_TABLE_CFG_LS_BYPASS_SHIFT 4
+#define I40E_TSCD_NEXT_NODE_TABLE_CFG_LS_BYPASS_MASK I40E_MASK(0x1, I40E_TSCD_NEXT_NODE_TABLE_CFG_LS_BYPASS_SHIFT)
+#define I40E_TSCD_NEXT_NODE_TABLE_CFG_MASK_INT_SHIFT 5
+#define I40E_TSCD_NEXT_NODE_TABLE_CFG_MASK_INT_MASK I40E_MASK(0x1, I40E_TSCD_NEXT_NODE_TABLE_CFG_MASK_INT_SHIFT)
+#define I40E_TSCD_NEXT_NODE_TABLE_CFG_FIX_CNT_SHIFT 8
+#define I40E_TSCD_NEXT_NODE_TABLE_CFG_FIX_CNT_MASK I40E_MASK(0x1, I40E_TSCD_NEXT_NODE_TABLE_CFG_FIX_CNT_SHIFT)
+#define I40E_TSCD_NEXT_NODE_TABLE_CFG_ERR_CNT_SHIFT 9
+#define I40E_TSCD_NEXT_NODE_TABLE_CFG_ERR_CNT_MASK I40E_MASK(0x1, I40E_TSCD_NEXT_NODE_TABLE_CFG_ERR_CNT_SHIFT)
+#define I40E_TSCD_NEXT_NODE_TABLE_CFG_RME_SHIFT 12
+#define I40E_TSCD_NEXT_NODE_TABLE_CFG_RME_MASK I40E_MASK(0x1, I40E_TSCD_NEXT_NODE_TABLE_CFG_RME_SHIFT)
+#define I40E_TSCD_NEXT_NODE_TABLE_CFG_RM_SHIFT 16
+#define I40E_TSCD_NEXT_NODE_TABLE_CFG_RM_MASK I40E_MASK(0xF, I40E_TSCD_NEXT_NODE_TABLE_CFG_RM_SHIFT)
+
+#define I40E_TSCD_NEXT_NODE_TABLE_STATUS 0x000B222c /* Reset: POR */
+#define I40E_TSCD_NEXT_NODE_TABLE_STATUS_ECC_ERR_SHIFT 0
+#define I40E_TSCD_NEXT_NODE_TABLE_STATUS_ECC_ERR_MASK I40E_MASK(0x1, I40E_TSCD_NEXT_NODE_TABLE_STATUS_ECC_ERR_SHIFT)
+#define I40E_TSCD_NEXT_NODE_TABLE_STATUS_ECC_FIX_SHIFT 1
+#define I40E_TSCD_NEXT_NODE_TABLE_STATUS_ECC_FIX_MASK I40E_MASK(0x1, I40E_TSCD_NEXT_NODE_TABLE_STATUS_ECC_FIX_SHIFT)
+#define I40E_TSCD_NEXT_NODE_TABLE_STATUS_INIT_DONE_SHIFT 2
+#define I40E_TSCD_NEXT_NODE_TABLE_STATUS_INIT_DONE_MASK I40E_MASK(0x1, I40E_TSCD_NEXT_NODE_TABLE_STATUS_INIT_DONE_SHIFT)
+#define I40E_TSCD_NEXT_NODE_TABLE_STATUS_GLOBAL_INIT_DONE_SHIFT 3
+#define I40E_TSCD_NEXT_NODE_TABLE_STATUS_GLOBAL_INIT_DONE_MASK I40E_MASK(0x1, I40E_TSCD_NEXT_NODE_TABLE_STATUS_GLOBAL_INIT_DONE_SHIFT)
+
+#define I40E_TSCD_NODE_TABLE_CFG 0x000B2210 /* Reset: POR */
+#define I40E_TSCD_NODE_TABLE_CFG_ECC_EN_SHIFT 0
+#define I40E_TSCD_NODE_TABLE_CFG_ECC_EN_MASK I40E_MASK(0x1, I40E_TSCD_NODE_TABLE_CFG_ECC_EN_SHIFT)
+#define I40E_TSCD_NODE_TABLE_CFG_ECC_INVERT_1_SHIFT 1
+#define I40E_TSCD_NODE_TABLE_CFG_ECC_INVERT_1_MASK I40E_MASK(0x1, I40E_TSCD_NODE_TABLE_CFG_ECC_INVERT_1_SHIFT)
+#define I40E_TSCD_NODE_TABLE_CFG_ECC_INVERT_2_SHIFT 2
+#define I40E_TSCD_NODE_TABLE_CFG_ECC_INVERT_2_MASK I40E_MASK(0x1, I40E_TSCD_NODE_TABLE_CFG_ECC_INVERT_2_SHIFT)
+#define I40E_TSCD_NODE_TABLE_CFG_LS_FORCE_SHIFT 3
+#define I40E_TSCD_NODE_TABLE_CFG_LS_FORCE_MASK I40E_MASK(0x1, I40E_TSCD_NODE_TABLE_CFG_LS_FORCE_SHIFT)
+#define I40E_TSCD_NODE_TABLE_CFG_LS_BYPASS_SHIFT 4
+#define I40E_TSCD_NODE_TABLE_CFG_LS_BYPASS_MASK I40E_MASK(0x1, I40E_TSCD_NODE_TABLE_CFG_LS_BYPASS_SHIFT)
+#define I40E_TSCD_NODE_TABLE_CFG_MASK_INT_SHIFT 5
+#define I40E_TSCD_NODE_TABLE_CFG_MASK_INT_MASK I40E_MASK(0x1, I40E_TSCD_NODE_TABLE_CFG_MASK_INT_SHIFT)
+#define I40E_TSCD_NODE_TABLE_CFG_FIX_CNT_SHIFT 8
+#define I40E_TSCD_NODE_TABLE_CFG_FIX_CNT_MASK I40E_MASK(0x1, I40E_TSCD_NODE_TABLE_CFG_FIX_CNT_SHIFT)
+#define I40E_TSCD_NODE_TABLE_CFG_ERR_CNT_SHIFT 9
+#define I40E_TSCD_NODE_TABLE_CFG_ERR_CNT_MASK I40E_MASK(0x1, I40E_TSCD_NODE_TABLE_CFG_ERR_CNT_SHIFT)
+#define I40E_TSCD_NODE_TABLE_CFG_RME_SHIFT 12
+#define I40E_TSCD_NODE_TABLE_CFG_RME_MASK I40E_MASK(0x1, I40E_TSCD_NODE_TABLE_CFG_RME_SHIFT)
+#define I40E_TSCD_NODE_TABLE_CFG_RM_SHIFT 16
+#define I40E_TSCD_NODE_TABLE_CFG_RM_MASK I40E_MASK(0xF, I40E_TSCD_NODE_TABLE_CFG_RM_SHIFT)
+
+#define I40E_TSCD_NODE_TABLE_STATUS 0x000B2220 /* Reset: POR */
+#define I40E_TSCD_NODE_TABLE_STATUS_ECC_ERR_SHIFT 0
+#define I40E_TSCD_NODE_TABLE_STATUS_ECC_ERR_MASK I40E_MASK(0x1, I40E_TSCD_NODE_TABLE_STATUS_ECC_ERR_SHIFT)
+#define I40E_TSCD_NODE_TABLE_STATUS_ECC_FIX_SHIFT 1
+#define I40E_TSCD_NODE_TABLE_STATUS_ECC_FIX_MASK I40E_MASK(0x1, I40E_TSCD_NODE_TABLE_STATUS_ECC_FIX_SHIFT)
+#define I40E_TSCD_NODE_TABLE_STATUS_INIT_DONE_SHIFT 2
+#define I40E_TSCD_NODE_TABLE_STATUS_INIT_DONE_MASK I40E_MASK(0x1, I40E_TSCD_NODE_TABLE_STATUS_INIT_DONE_SHIFT)
+#define I40E_TSCD_NODE_TABLE_STATUS_GLOBAL_INIT_DONE_SHIFT 3
+#define I40E_TSCD_NODE_TABLE_STATUS_GLOBAL_INIT_DONE_MASK I40E_MASK(0x1, I40E_TSCD_NODE_TABLE_STATUS_GLOBAL_INIT_DONE_SHIFT)
+
+#define I40E_TSCD_RL_MAP_TABLE_CFG 0x000B2214 /* Reset: POR */
+#define I40E_TSCD_RL_MAP_TABLE_CFG_ECC_EN_SHIFT 0
+#define I40E_TSCD_RL_MAP_TABLE_CFG_ECC_EN_MASK I40E_MASK(0x1, I40E_TSCD_RL_MAP_TABLE_CFG_ECC_EN_SHIFT)
+#define I40E_TSCD_RL_MAP_TABLE_CFG_ECC_INVERT_1_SHIFT 1
+#define I40E_TSCD_RL_MAP_TABLE_CFG_ECC_INVERT_1_MASK I40E_MASK(0x1, I40E_TSCD_RL_MAP_TABLE_CFG_ECC_INVERT_1_SHIFT)
+#define I40E_TSCD_RL_MAP_TABLE_CFG_ECC_INVERT_2_SHIFT 2
+#define I40E_TSCD_RL_MAP_TABLE_CFG_ECC_INVERT_2_MASK I40E_MASK(0x1, I40E_TSCD_RL_MAP_TABLE_CFG_ECC_INVERT_2_SHIFT)
+#define I40E_TSCD_RL_MAP_TABLE_CFG_LS_FORCE_SHIFT 3
+#define I40E_TSCD_RL_MAP_TABLE_CFG_LS_FORCE_MASK I40E_MASK(0x1, I40E_TSCD_RL_MAP_TABLE_CFG_LS_FORCE_SHIFT)
+#define I40E_TSCD_RL_MAP_TABLE_CFG_LS_BYPASS_SHIFT 4
+#define I40E_TSCD_RL_MAP_TABLE_CFG_LS_BYPASS_MASK I40E_MASK(0x1, I40E_TSCD_RL_MAP_TABLE_CFG_LS_BYPASS_SHIFT)
+#define I40E_TSCD_RL_MAP_TABLE_CFG_MASK_INT_SHIFT 5
+#define I40E_TSCD_RL_MAP_TABLE_CFG_MASK_INT_MASK I40E_MASK(0x1, I40E_TSCD_RL_MAP_TABLE_CFG_MASK_INT_SHIFT)
+#define I40E_TSCD_RL_MAP_TABLE_CFG_FIX_CNT_SHIFT 8
+#define I40E_TSCD_RL_MAP_TABLE_CFG_FIX_CNT_MASK I40E_MASK(0x1, I40E_TSCD_RL_MAP_TABLE_CFG_FIX_CNT_SHIFT)
+#define I40E_TSCD_RL_MAP_TABLE_CFG_ERR_CNT_SHIFT 9
+#define I40E_TSCD_RL_MAP_TABLE_CFG_ERR_CNT_MASK I40E_MASK(0x1, I40E_TSCD_RL_MAP_TABLE_CFG_ERR_CNT_SHIFT)
+#define I40E_TSCD_RL_MAP_TABLE_CFG_RME_SHIFT 12
+#define I40E_TSCD_RL_MAP_TABLE_CFG_RME_MASK I40E_MASK(0x1, I40E_TSCD_RL_MAP_TABLE_CFG_RME_SHIFT)
+#define I40E_TSCD_RL_MAP_TABLE_CFG_RM_SHIFT 16
+#define I40E_TSCD_RL_MAP_TABLE_CFG_RM_MASK I40E_MASK(0xF, I40E_TSCD_RL_MAP_TABLE_CFG_RM_SHIFT)
+
+#define I40E_TSCD_RL_MAP_TABLE_STATUS 0x000B2224 /* Reset: POR */
+#define I40E_TSCD_RL_MAP_TABLE_STATUS_ECC_ERR_SHIFT 0
+#define I40E_TSCD_RL_MAP_TABLE_STATUS_ECC_ERR_MASK I40E_MASK(0x1, I40E_TSCD_RL_MAP_TABLE_STATUS_ECC_ERR_SHIFT)
+#define I40E_TSCD_RL_MAP_TABLE_STATUS_ECC_FIX_SHIFT 1
+#define I40E_TSCD_RL_MAP_TABLE_STATUS_ECC_FIX_MASK I40E_MASK(0x1, I40E_TSCD_RL_MAP_TABLE_STATUS_ECC_FIX_SHIFT)
+#define I40E_TSCD_RL_MAP_TABLE_STATUS_INIT_DONE_SHIFT 2
+#define I40E_TSCD_RL_MAP_TABLE_STATUS_INIT_DONE_MASK I40E_MASK(0x1, I40E_TSCD_RL_MAP_TABLE_STATUS_INIT_DONE_SHIFT)
+#define I40E_TSCD_RL_MAP_TABLE_STATUS_GLOBAL_INIT_DONE_SHIFT 3
+#define I40E_TSCD_RL_MAP_TABLE_STATUS_GLOBAL_INIT_DONE_MASK I40E_MASK(0x1, I40E_TSCD_RL_MAP_TABLE_STATUS_GLOBAL_INIT_DONE_SHIFT)
+
+#define I40E_TSCD_SHARED_BW_LIMIT_TABLE_CFG 0x000B2200 /* Reset: POR */
+#define I40E_TSCD_SHARED_BW_LIMIT_TABLE_CFG_ECC_EN_SHIFT 0
+#define I40E_TSCD_SHARED_BW_LIMIT_TABLE_CFG_ECC_EN_MASK I40E_MASK(0x1, I40E_TSCD_SHARED_BW_LIMIT_TABLE_CFG_ECC_EN_SHIFT)
+#define I40E_TSCD_SHARED_BW_LIMIT_TABLE_CFG_ECC_INVERT_1_SHIFT 1
+#define I40E_TSCD_SHARED_BW_LIMIT_TABLE_CFG_ECC_INVERT_1_MASK I40E_MASK(0x1, I40E_TSCD_SHARED_BW_LIMIT_TABLE_CFG_ECC_INVERT_1_SHIFT)
+#define I40E_TSCD_SHARED_BW_LIMIT_TABLE_CFG_ECC_INVERT_2_SHIFT 2
+#define I40E_TSCD_SHARED_BW_LIMIT_TABLE_CFG_ECC_INVERT_2_MASK I40E_MASK(0x1, I40E_TSCD_SHARED_BW_LIMIT_TABLE_CFG_ECC_INVERT_2_SHIFT)
+#define I40E_TSCD_SHARED_BW_LIMIT_TABLE_CFG_LS_FORCE_SHIFT 3
+#define I40E_TSCD_SHARED_BW_LIMIT_TABLE_CFG_LS_FORCE_MASK I40E_MASK(0x1, I40E_TSCD_SHARED_BW_LIMIT_TABLE_CFG_LS_FORCE_SHIFT)
+#define I40E_TSCD_SHARED_BW_LIMIT_TABLE_CFG_LS_BYPASS_SHIFT 4
+#define I40E_TSCD_SHARED_BW_LIMIT_TABLE_CFG_LS_BYPASS_MASK I40E_MASK(0x1, I40E_TSCD_SHARED_BW_LIMIT_TABLE_CFG_LS_BYPASS_SHIFT)
+#define I40E_TSCD_SHARED_BW_LIMIT_TABLE_CFG_MASK_INT_SHIFT 5
+#define I40E_TSCD_SHARED_BW_LIMIT_TABLE_CFG_MASK_INT_MASK I40E_MASK(0x1, I40E_TSCD_SHARED_BW_LIMIT_TABLE_CFG_MASK_INT_SHIFT)
+#define I40E_TSCD_SHARED_BW_LIMIT_TABLE_CFG_FIX_CNT_SHIFT 8
+#define I40E_TSCD_SHARED_BW_LIMIT_TABLE_CFG_FIX_CNT_MASK I40E_MASK(0x1, I40E_TSCD_SHARED_BW_LIMIT_TABLE_CFG_FIX_CNT_SHIFT)
+#define I40E_TSCD_SHARED_BW_LIMIT_TABLE_CFG_ERR_CNT_SHIFT 9
+#define I40E_TSCD_SHARED_BW_LIMIT_TABLE_CFG_ERR_CNT_MASK I40E_MASK(0x1, I40E_TSCD_SHARED_BW_LIMIT_TABLE_CFG_ERR_CNT_SHIFT)
+#define I40E_TSCD_SHARED_BW_LIMIT_TABLE_CFG_RME_SHIFT 12
+#define I40E_TSCD_SHARED_BW_LIMIT_TABLE_CFG_RME_MASK I40E_MASK(0x1, I40E_TSCD_SHARED_BW_LIMIT_TABLE_CFG_RME_SHIFT)
+#define I40E_TSCD_SHARED_BW_LIMIT_TABLE_CFG_RM_SHIFT 16
+#define I40E_TSCD_SHARED_BW_LIMIT_TABLE_CFG_RM_MASK I40E_MASK(0xF, I40E_TSCD_SHARED_BW_LIMIT_TABLE_CFG_RM_SHIFT)
+
+#define I40E_TSCD_SHARED_BW_LIMIT_TABLE_STATUS 0x000B2234 /* Reset: POR */
+#define I40E_TSCD_SHARED_BW_LIMIT_TABLE_STATUS_ECC_ERR_SHIFT 0
+#define I40E_TSCD_SHARED_BW_LIMIT_TABLE_STATUS_ECC_ERR_MASK I40E_MASK(0x1, I40E_TSCD_SHARED_BW_LIMIT_TABLE_STATUS_ECC_ERR_SHIFT)
+#define I40E_TSCD_SHARED_BW_LIMIT_TABLE_STATUS_ECC_FIX_SHIFT 1
+#define I40E_TSCD_SHARED_BW_LIMIT_TABLE_STATUS_ECC_FIX_MASK I40E_MASK(0x1, I40E_TSCD_SHARED_BW_LIMIT_TABLE_STATUS_ECC_FIX_SHIFT)
+#define I40E_TSCD_SHARED_BW_LIMIT_TABLE_STATUS_INIT_DONE_SHIFT 2
+#define I40E_TSCD_SHARED_BW_LIMIT_TABLE_STATUS_INIT_DONE_MASK I40E_MASK(0x1, I40E_TSCD_SHARED_BW_LIMIT_TABLE_STATUS_INIT_DONE_SHIFT)
+#define I40E_TSCD_SHARED_BW_LIMIT_TABLE_STATUS_GLOBAL_INIT_DONE_SHIFT 3
+#define I40E_TSCD_SHARED_BW_LIMIT_TABLE_STATUS_GLOBAL_INIT_DONE_MASK I40E_MASK(0x1, I40E_TSCD_SHARED_BW_LIMIT_TABLE_STATUS_GLOBAL_INIT_DONE_SHIFT)
+
+#define I40E_TXDBG_GL_CNTRL 0x000BC000 /* Reset: CORER */
+#define I40E_TXDBG_GL_CNTRL_TXDBG_MODE_SHIFT 0
+#define I40E_TXDBG_GL_CNTRL_TXDBG_MODE_MASK I40E_MASK(0x7, I40E_TXDBG_GL_CNTRL_TXDBG_MODE_SHIFT)
+
+#define I40E_TXDBG_RD_ENTITY 0x000BC004 /* Reset: CORER */
+#define I40E_TXDBG_RD_ENTITY_RD_LOG_SHIFT 0
+#define I40E_TXDBG_RD_ENTITY_RD_LOG_MASK I40E_MASK(0xFFFFFFFF, I40E_TXDBG_RD_ENTITY_RD_LOG_SHIFT)
+
+#define I40E_TXDBG_RD_ENTITY_CNTRL 0x000BC008 /* Reset: CORER */
+#define I40E_TXDBG_RD_ENTITY_CNTRL_RD_ENTITY_NUM_SHIFT 0
+#define I40E_TXDBG_RD_ENTITY_CNTRL_RD_ENTITY_NUM_MASK I40E_MASK(0xFFF, I40E_TXDBG_RD_ENTITY_CNTRL_RD_ENTITY_NUM_SHIFT)
+
+#define I40E_TXUPDBG_ITR_CAUSE_CTL 0x000E0018 /* Reset: CORER */
+#define I40E_TXUPDBG_ITR_CAUSE_CTL_FILTER_FLOW_EN_SHIFT 0
+#define I40E_TXUPDBG_ITR_CAUSE_CTL_FILTER_FLOW_EN_MASK I40E_MASK(0x1, I40E_TXUPDBG_ITR_CAUSE_CTL_FILTER_FLOW_EN_SHIFT)
+#define I40E_TXUPDBG_ITR_CAUSE_CTL_FLOW_ID_SHIFT 1
+#define I40E_TXUPDBG_ITR_CAUSE_CTL_FLOW_ID_MASK I40E_MASK(0xFFF, I40E_TXUPDBG_ITR_CAUSE_CTL_FLOW_ID_SHIFT)
+#define I40E_TXUPDBG_ITR_CAUSE_CTL_EVENT_ID_SHIFT 13
+#define I40E_TXUPDBG_ITR_CAUSE_CTL_EVENT_ID_MASK I40E_MASK(0x7, I40E_TXUPDBG_ITR_CAUSE_CTL_EVENT_ID_SHIFT)
+
+#define I40E_TXUPDBG_ITR_DONE_CTL 0x000E0020 /* Reset: CORER */
+#define I40E_TXUPDBG_ITR_DONE_CTL_FILTER_FLOW_EN_SHIFT 0
+#define I40E_TXUPDBG_ITR_DONE_CTL_FILTER_FLOW_EN_MASK I40E_MASK(0x1, I40E_TXUPDBG_ITR_DONE_CTL_FILTER_FLOW_EN_SHIFT)
+#define I40E_TXUPDBG_ITR_DONE_CTL_FLOW_ID_SHIFT 1
+#define I40E_TXUPDBG_ITR_DONE_CTL_FLOW_ID_MASK I40E_MASK(0xFFF, I40E_TXUPDBG_ITR_DONE_CTL_FLOW_ID_SHIFT)
+#define I40E_TXUPDBG_ITR_DONE_CTL_EVENT_ID_SHIFT 13
+#define I40E_TXUPDBG_ITR_DONE_CTL_EVENT_ID_MASK I40E_MASK(0x7, I40E_TXUPDBG_ITR_DONE_CTL_EVENT_ID_SHIFT)
+
+#define I40E_TXUPDBG_ITR_EXP_CTL 0x000E001C /* Reset: CORER */
+#define I40E_TXUPDBG_ITR_EXP_CTL_FILTER_FLOW_EN_SHIFT 0
+#define I40E_TXUPDBG_ITR_EXP_CTL_FILTER_FLOW_EN_MASK I40E_MASK(0x1, I40E_TXUPDBG_ITR_EXP_CTL_FILTER_FLOW_EN_SHIFT)
+#define I40E_TXUPDBG_ITR_EXP_CTL_FLOW_ID_SHIFT 1
+#define I40E_TXUPDBG_ITR_EXP_CTL_FLOW_ID_MASK I40E_MASK(0xFFF, I40E_TXUPDBG_ITR_EXP_CTL_FLOW_ID_SHIFT)
+#define I40E_TXUPDBG_ITR_EXP_CTL_EVENT_ID_SHIFT 13
+#define I40E_TXUPDBG_ITR_EXP_CTL_EVENT_ID_MASK I40E_MASK(0x7, I40E_TXUPDBG_ITR_EXP_CTL_EVENT_ID_SHIFT)
+
+#define I40E_TXUPDBG_MAC0IN_CTL 0x000E2008 /* Reset: CORER */
+#define I40E_TXUPDBG_MAC0IN_CTL_FILTER_FLOW_EN_SHIFT 0
+#define I40E_TXUPDBG_MAC0IN_CTL_FILTER_FLOW_EN_MASK I40E_MASK(0x1, I40E_TXUPDBG_MAC0IN_CTL_FILTER_FLOW_EN_SHIFT)
+#define I40E_TXUPDBG_MAC0IN_CTL_FLOW_ID_SHIFT 1
+#define I40E_TXUPDBG_MAC0IN_CTL_FLOW_ID_MASK I40E_MASK(0xFFF, I40E_TXUPDBG_MAC0IN_CTL_FLOW_ID_SHIFT)
+#define I40E_TXUPDBG_MAC0IN_CTL_EVENT_ID_SHIFT 13
+#define I40E_TXUPDBG_MAC0IN_CTL_EVENT_ID_MASK I40E_MASK(0x7, I40E_TXUPDBG_MAC0IN_CTL_EVENT_ID_SHIFT)
+
+#define I40E_TXUPDBG_MAC1IN_CTL 0x000E200C /* Reset: CORER */
+#define I40E_TXUPDBG_MAC1IN_CTL_FILTER_FLOW_EN_SHIFT 0
+#define I40E_TXUPDBG_MAC1IN_CTL_FILTER_FLOW_EN_MASK I40E_MASK(0x1, I40E_TXUPDBG_MAC1IN_CTL_FILTER_FLOW_EN_SHIFT)
+#define I40E_TXUPDBG_MAC1IN_CTL_FLOW_ID_SHIFT 1
+#define I40E_TXUPDBG_MAC1IN_CTL_FLOW_ID_MASK I40E_MASK(0xFFF, I40E_TXUPDBG_MAC1IN_CTL_FLOW_ID_SHIFT)
+#define I40E_TXUPDBG_MAC1IN_CTL_EVENT_ID_SHIFT 13
+#define I40E_TXUPDBG_MAC1IN_CTL_EVENT_ID_MASK I40E_MASK(0x7, I40E_TXUPDBG_MAC1IN_CTL_EVENT_ID_SHIFT)
+
+#define I40E_TXUPDBG_MAC2IN_CTL 0x000E2010 /* Reset: CORER */
+#define I40E_TXUPDBG_MAC2IN_CTL_FILTER_FLOW_EN_SHIFT 0
+#define I40E_TXUPDBG_MAC2IN_CTL_FILTER_FLOW_EN_MASK I40E_MASK(0x1, I40E_TXUPDBG_MAC2IN_CTL_FILTER_FLOW_EN_SHIFT)
+#define I40E_TXUPDBG_MAC2IN_CTL_FLOW_ID_SHIFT 1
+#define I40E_TXUPDBG_MAC2IN_CTL_FLOW_ID_MASK I40E_MASK(0xFFF, I40E_TXUPDBG_MAC2IN_CTL_FLOW_ID_SHIFT)
+#define I40E_TXUPDBG_MAC2IN_CTL_EVENT_ID_SHIFT 13
+#define I40E_TXUPDBG_MAC2IN_CTL_EVENT_ID_MASK I40E_MASK(0x7, I40E_TXUPDBG_MAC2IN_CTL_EVENT_ID_SHIFT)
+
+#define I40E_TXUPDBG_MAC3IN_CTL 0x000E2014 /* Reset: CORER */
+#define I40E_TXUPDBG_MAC3IN_CTL_FILTER_FLOW_EN_SHIFT 0
+#define I40E_TXUPDBG_MAC3IN_CTL_FILTER_FLOW_EN_MASK I40E_MASK(0x1, I40E_TXUPDBG_MAC3IN_CTL_FILTER_FLOW_EN_SHIFT)
+#define I40E_TXUPDBG_MAC3IN_CTL_FLOW_ID_SHIFT 1
+#define I40E_TXUPDBG_MAC3IN_CTL_FLOW_ID_MASK I40E_MASK(0xFFF, I40E_TXUPDBG_MAC3IN_CTL_FLOW_ID_SHIFT)
+#define I40E_TXUPDBG_MAC3IN_CTL_EVENT_ID_SHIFT 13
+#define I40E_TXUPDBG_MAC3IN_CTL_EVENT_ID_MASK I40E_MASK(0x7, I40E_TXUPDBG_MAC3IN_CTL_EVENT_ID_SHIFT)
+
+#define I40E_TXUPDBG_MSIX_CTL 0x000BC00C /* Reset: CORER */
+#define I40E_TXUPDBG_MSIX_CTL_FILTER_FLOW_EN_SHIFT 0
+#define I40E_TXUPDBG_MSIX_CTL_FILTER_FLOW_EN_MASK I40E_MASK(0x1, I40E_TXUPDBG_MSIX_CTL_FILTER_FLOW_EN_SHIFT)
+#define I40E_TXUPDBG_MSIX_CTL_FLOW_ID_SHIFT 1
+#define I40E_TXUPDBG_MSIX_CTL_FLOW_ID_MASK I40E_MASK(0xFFF, I40E_TXUPDBG_MSIX_CTL_FLOW_ID_SHIFT)
+#define I40E_TXUPDBG_MSIX_CTL_EVENT_ID_SHIFT 13
+#define I40E_TXUPDBG_MSIX_CTL_EVENT_ID_MASK I40E_MASK(0x7, I40E_TXUPDBG_MSIX_CTL_EVENT_ID_SHIFT)
+
+#define I40E_TXUPDBG_Q_SCHED_CTL 0x000E000C /* Reset: CORER */
+#define I40E_TXUPDBG_Q_SCHED_CTL_FILTER_FLOW_EN_SHIFT 0
+#define I40E_TXUPDBG_Q_SCHED_CTL_FILTER_FLOW_EN_MASK I40E_MASK(0x1, I40E_TXUPDBG_Q_SCHED_CTL_FILTER_FLOW_EN_SHIFT)
+#define I40E_TXUPDBG_Q_SCHED_CTL_FLOW_ID_SHIFT 1
+#define I40E_TXUPDBG_Q_SCHED_CTL_FLOW_ID_MASK I40E_MASK(0xFFF, I40E_TXUPDBG_Q_SCHED_CTL_FLOW_ID_SHIFT)
+#define I40E_TXUPDBG_Q_SCHED_CTL_FLOW_CHOOSER_SHIFT 13
+#define I40E_TXUPDBG_Q_SCHED_CTL_FLOW_CHOOSER_MASK I40E_MASK(0x3, I40E_TXUPDBG_Q_SCHED_CTL_FLOW_CHOOSER_SHIFT)
+#define I40E_TXUPDBG_Q_SCHED_CTL_EVENT_ID_SHIFT 15
+#define I40E_TXUPDBG_Q_SCHED_CTL_EVENT_ID_MASK I40E_MASK(0x7, I40E_TXUPDBG_Q_SCHED_CTL_EVENT_ID_SHIFT)
+
+#define I40E_TXUPDBG_QG_SCHED_CTL 0x000E0008 /* Reset: CORER */
+#define I40E_TXUPDBG_QG_SCHED_CTL_FILTER_FLOW_EN_SHIFT 0
+#define I40E_TXUPDBG_QG_SCHED_CTL_FILTER_FLOW_EN_MASK I40E_MASK(0x1, I40E_TXUPDBG_QG_SCHED_CTL_FILTER_FLOW_EN_SHIFT)
+#define I40E_TXUPDBG_QG_SCHED_CTL_FLOW_ID_SHIFT 1
+#define I40E_TXUPDBG_QG_SCHED_CTL_FLOW_ID_MASK I40E_MASK(0xFFF, I40E_TXUPDBG_QG_SCHED_CTL_FLOW_ID_SHIFT)
+#define I40E_TXUPDBG_QG_SCHED_CTL_EVENT_ID_SHIFT 13
+#define I40E_TXUPDBG_QG_SCHED_CTL_EVENT_ID_MASK I40E_MASK(0x7, I40E_TXUPDBG_QG_SCHED_CTL_EVENT_ID_SHIFT)
+
+#define I40E_TXUPDBG_TAIL_BUMP_CTL 0x000E0000 /* Reset: CORER */
+#define I40E_TXUPDBG_TAIL_BUMP_CTL_FILTER_FLOW_EN_SHIFT 0
+#define I40E_TXUPDBG_TAIL_BUMP_CTL_FILTER_FLOW_EN_MASK I40E_MASK(0x1, I40E_TXUPDBG_TAIL_BUMP_CTL_FILTER_FLOW_EN_SHIFT)
+#define I40E_TXUPDBG_TAIL_BUMP_CTL_FLOW_ID_SHIFT 1
+#define I40E_TXUPDBG_TAIL_BUMP_CTL_FLOW_ID_MASK I40E_MASK(0xFFF, I40E_TXUPDBG_TAIL_BUMP_CTL_FLOW_ID_SHIFT)
+#define I40E_TXUPDBG_TAIL_BUMP_CTL_EVENT_ID_SHIFT 13
+#define I40E_TXUPDBG_TAIL_BUMP_CTL_EVENT_ID_MASK I40E_MASK(0x7, I40E_TXUPDBG_TAIL_BUMP_CTL_EVENT_ID_SHIFT)
+
+#define I40E_TXUPDBG_TCBIN_CTL 0x000E0010 /* Reset: CORER */
+#define I40E_TXUPDBG_TCBIN_CTL_FILTER_FLOW_EN_SHIFT 0
+#define I40E_TXUPDBG_TCBIN_CTL_FILTER_FLOW_EN_MASK I40E_MASK(0x1, I40E_TXUPDBG_TCBIN_CTL_FILTER_FLOW_EN_SHIFT)
+#define I40E_TXUPDBG_TCBIN_CTL_FLOW_ID_SHIFT 1
+#define I40E_TXUPDBG_TCBIN_CTL_FLOW_ID_MASK I40E_MASK(0xFFF, I40E_TXUPDBG_TCBIN_CTL_FLOW_ID_SHIFT)
+#define I40E_TXUPDBG_TCBIN_CTL_FLOW_CHOOSER_SHIFT 13
+#define I40E_TXUPDBG_TCBIN_CTL_FLOW_CHOOSER_MASK I40E_MASK(0x3, I40E_TXUPDBG_TCBIN_CTL_FLOW_CHOOSER_SHIFT)
+#define I40E_TXUPDBG_TCBIN_CTL_EVENT_ID_SHIFT 15
+#define I40E_TXUPDBG_TCBIN_CTL_EVENT_ID_MASK I40E_MASK(0x7, I40E_TXUPDBG_TCBIN_CTL_EVENT_ID_SHIFT)
+
+#define I40E_TXUPDBG_TDPUIN_CTL 0x000E2000 /* Reset: CORER */
+#define I40E_TXUPDBG_TDPUIN_CTL_FILTER_FLOW_EN_SHIFT 0
+#define I40E_TXUPDBG_TDPUIN_CTL_FILTER_FLOW_EN_MASK I40E_MASK(0x1, I40E_TXUPDBG_TDPUIN_CTL_FILTER_FLOW_EN_SHIFT)
+#define I40E_TXUPDBG_TDPUIN_CTL_FLOW_ID_SHIFT 1
+#define I40E_TXUPDBG_TDPUIN_CTL_FLOW_ID_MASK I40E_MASK(0xFFF, I40E_TXUPDBG_TDPUIN_CTL_FLOW_ID_SHIFT)
+#define I40E_TXUPDBG_TDPUIN_CTL_FLOW_CHOOSER_SHIFT 13
+#define I40E_TXUPDBG_TDPUIN_CTL_FLOW_CHOOSER_MASK I40E_MASK(0x3, I40E_TXUPDBG_TDPUIN_CTL_FLOW_CHOOSER_SHIFT)
+#define I40E_TXUPDBG_TDPUIN_CTL_EVENT_ID_SHIFT 15
+#define I40E_TXUPDBG_TDPUIN_CTL_EVENT_ID_MASK I40E_MASK(0x7, I40E_TXUPDBG_TDPUIN_CTL_EVENT_ID_SHIFT)
+
+#define I40E_TXUPDBG_TLAN2_CTL 0x000E0014 /* Reset: CORER */
+#define I40E_TXUPDBG_TLAN2_CTL_FILTER_FLOW_EN_SHIFT 0
+#define I40E_TXUPDBG_TLAN2_CTL_FILTER_FLOW_EN_MASK I40E_MASK(0x1, I40E_TXUPDBG_TLAN2_CTL_FILTER_FLOW_EN_SHIFT)
+#define I40E_TXUPDBG_TLAN2_CTL_FLOW_ID_SHIFT 1
+#define I40E_TXUPDBG_TLAN2_CTL_FLOW_ID_MASK I40E_MASK(0xFFF, I40E_TXUPDBG_TLAN2_CTL_FLOW_ID_SHIFT)
+#define I40E_TXUPDBG_TLAN2_CTL_FLOW_CHOOSER_SHIFT 13
+#define I40E_TXUPDBG_TLAN2_CTL_FLOW_CHOOSER_MASK I40E_MASK(0x3, I40E_TXUPDBG_TLAN2_CTL_FLOW_CHOOSER_SHIFT)
+#define I40E_TXUPDBG_TLAN2_CTL_EVENT_ID_A_SHIFT 15
+#define I40E_TXUPDBG_TLAN2_CTL_EVENT_ID_A_MASK I40E_MASK(0x7, I40E_TXUPDBG_TLAN2_CTL_EVENT_ID_A_SHIFT)
+#define I40E_TXUPDBG_TLAN2_CTL_EVENT_ID_B_SHIFT 18
+#define I40E_TXUPDBG_TLAN2_CTL_EVENT_ID_B_MASK I40E_MASK(0x7, I40E_TXUPDBG_TLAN2_CTL_EVENT_ID_B_SHIFT)
+
+#define I40E_TXUPDBG_TPBIN_CTL 0x000E2004 /* Reset: CORER */
+#define I40E_TXUPDBG_TPBIN_CTL_FILTER_FLOW_EN_SHIFT 0
+#define I40E_TXUPDBG_TPBIN_CTL_FILTER_FLOW_EN_MASK I40E_MASK(0x1, I40E_TXUPDBG_TPBIN_CTL_FILTER_FLOW_EN_SHIFT)
+#define I40E_TXUPDBG_TPBIN_CTL_FLOW_ID_SHIFT 1
+#define I40E_TXUPDBG_TPBIN_CTL_FLOW_ID_MASK I40E_MASK(0xFFF, I40E_TXUPDBG_TPBIN_CTL_FLOW_ID_SHIFT)
+#define I40E_TXUPDBG_TPBIN_CTL_FLOW_CHOOSER_SHIFT 13
+#define I40E_TXUPDBG_TPBIN_CTL_FLOW_CHOOSER_MASK I40E_MASK(0x3, I40E_TXUPDBG_TPBIN_CTL_FLOW_CHOOSER_SHIFT)
+#define I40E_TXUPDBG_TPBIN_CTL_EVENT_ID_SHIFT 15
+#define I40E_TXUPDBG_TPBIN_CTL_EVENT_ID_MASK I40E_MASK(0x7, I40E_TXUPDBG_TPBIN_CTL_EVENT_ID_SHIFT)
+
+#define I40E_TXUPDBG_WA_CTL 0x000E0004 /* Reset: CORER */
+#define I40E_TXUPDBG_WA_CTL_FILTER_FLOW_EN_SHIFT 0
+#define I40E_TXUPDBG_WA_CTL_FILTER_FLOW_EN_MASK I40E_MASK(0x1, I40E_TXUPDBG_WA_CTL_FILTER_FLOW_EN_SHIFT)
+#define I40E_TXUPDBG_WA_CTL_FLOW_ID_SHIFT 1
+#define I40E_TXUPDBG_WA_CTL_FLOW_ID_MASK I40E_MASK(0xFFF, I40E_TXUPDBG_WA_CTL_FLOW_ID_SHIFT)
+#define I40E_TXUPDBG_WA_CTL_EVENT_ID_A_SHIFT 13
+#define I40E_TXUPDBG_WA_CTL_EVENT_ID_A_MASK I40E_MASK(0x7, I40E_TXUPDBG_WA_CTL_EVENT_ID_A_SHIFT)
+#define I40E_TXUPDBG_WA_CTL_EVENT_ID_B_SHIFT 16
+#define I40E_TXUPDBG_WA_CTL_EVENT_ID_B_MASK I40E_MASK(0x7, I40E_TXUPDBG_WA_CTL_EVENT_ID_B_SHIFT)
+
+#define I40E_WAIT_CMD_BUF_MEM_CFG 0x000AE088 /* Reset: POR */
+#define I40E_WAIT_CMD_BUF_MEM_CFG_ECC_EN_SHIFT 0
+#define I40E_WAIT_CMD_BUF_MEM_CFG_ECC_EN_MASK I40E_MASK(0x1, I40E_WAIT_CMD_BUF_MEM_CFG_ECC_EN_SHIFT)
+#define I40E_WAIT_CMD_BUF_MEM_CFG_ECC_INVERT_1_SHIFT 1
+#define I40E_WAIT_CMD_BUF_MEM_CFG_ECC_INVERT_1_MASK I40E_MASK(0x1, I40E_WAIT_CMD_BUF_MEM_CFG_ECC_INVERT_1_SHIFT)
+#define I40E_WAIT_CMD_BUF_MEM_CFG_ECC_INVERT_2_SHIFT 2
+#define I40E_WAIT_CMD_BUF_MEM_CFG_ECC_INVERT_2_MASK I40E_MASK(0x1, I40E_WAIT_CMD_BUF_MEM_CFG_ECC_INVERT_2_SHIFT)
+#define I40E_WAIT_CMD_BUF_MEM_CFG_LS_FORCE_SHIFT 3
+#define I40E_WAIT_CMD_BUF_MEM_CFG_LS_FORCE_MASK I40E_MASK(0x1, I40E_WAIT_CMD_BUF_MEM_CFG_LS_FORCE_SHIFT)
+#define I40E_WAIT_CMD_BUF_MEM_CFG_LS_BYPASS_SHIFT 4
+#define I40E_WAIT_CMD_BUF_MEM_CFG_LS_BYPASS_MASK I40E_MASK(0x1, I40E_WAIT_CMD_BUF_MEM_CFG_LS_BYPASS_SHIFT)
+#define I40E_WAIT_CMD_BUF_MEM_CFG_MASK_INT_SHIFT 5
+#define I40E_WAIT_CMD_BUF_MEM_CFG_MASK_INT_MASK I40E_MASK(0x1, I40E_WAIT_CMD_BUF_MEM_CFG_MASK_INT_SHIFT)
+#define I40E_WAIT_CMD_BUF_MEM_CFG_FIX_CNT_SHIFT 8
+#define I40E_WAIT_CMD_BUF_MEM_CFG_FIX_CNT_MASK I40E_MASK(0x1, I40E_WAIT_CMD_BUF_MEM_CFG_FIX_CNT_SHIFT)
+#define I40E_WAIT_CMD_BUF_MEM_CFG_ERR_CNT_SHIFT 9
+#define I40E_WAIT_CMD_BUF_MEM_CFG_ERR_CNT_MASK I40E_MASK(0x1, I40E_WAIT_CMD_BUF_MEM_CFG_ERR_CNT_SHIFT)
+#define I40E_WAIT_CMD_BUF_MEM_CFG_RME_SHIFT 12
+#define I40E_WAIT_CMD_BUF_MEM_CFG_RME_MASK I40E_MASK(0x1, I40E_WAIT_CMD_BUF_MEM_CFG_RME_SHIFT)
+#define I40E_WAIT_CMD_BUF_MEM_CFG_RM_SHIFT 16
+#define I40E_WAIT_CMD_BUF_MEM_CFG_RM_MASK I40E_MASK(0xF, I40E_WAIT_CMD_BUF_MEM_CFG_RM_SHIFT)
+
+#define I40E_WAIT_CMD_BUF_MEM_STATUS 0x000AE08C /* Reset: POR */
+#define I40E_WAIT_CMD_BUF_MEM_STATUS_ECC_ERR_SHIFT 0
+#define I40E_WAIT_CMD_BUF_MEM_STATUS_ECC_ERR_MASK I40E_MASK(0x1, I40E_WAIT_CMD_BUF_MEM_STATUS_ECC_ERR_SHIFT)
+#define I40E_WAIT_CMD_BUF_MEM_STATUS_ECC_FIX_SHIFT 1
+#define I40E_WAIT_CMD_BUF_MEM_STATUS_ECC_FIX_MASK I40E_MASK(0x1, I40E_WAIT_CMD_BUF_MEM_STATUS_ECC_FIX_SHIFT)
+#define I40E_WAIT_CMD_BUF_MEM_STATUS_INIT_DONE_SHIFT 2
+#define I40E_WAIT_CMD_BUF_MEM_STATUS_INIT_DONE_MASK I40E_MASK(0x1, I40E_WAIT_CMD_BUF_MEM_STATUS_INIT_DONE_SHIFT)
+#define I40E_WAIT_CMD_BUF_MEM_STATUS_GLOBAL_INIT_DONE_SHIFT 3
+#define I40E_WAIT_CMD_BUF_MEM_STATUS_GLOBAL_INIT_DONE_MASK I40E_MASK(0x1, I40E_WAIT_CMD_BUF_MEM_STATUS_GLOBAL_INIT_DONE_SHIFT)
+
+#define I40E_WAIT_CMD_MNG_MEM_CFG 0x000AE084 /* Reset: POR */
+#define I40E_WAIT_CMD_MNG_MEM_CFG_ECC_EN_SHIFT 0
+#define I40E_WAIT_CMD_MNG_MEM_CFG_ECC_EN_MASK I40E_MASK(0x1, I40E_WAIT_CMD_MNG_MEM_CFG_ECC_EN_SHIFT)
+#define I40E_WAIT_CMD_MNG_MEM_CFG_ECC_INVERT_1_SHIFT 1
+#define I40E_WAIT_CMD_MNG_MEM_CFG_ECC_INVERT_1_MASK I40E_MASK(0x1, I40E_WAIT_CMD_MNG_MEM_CFG_ECC_INVERT_1_SHIFT)
+#define I40E_WAIT_CMD_MNG_MEM_CFG_ECC_INVERT_2_SHIFT 2
+#define I40E_WAIT_CMD_MNG_MEM_CFG_ECC_INVERT_2_MASK I40E_MASK(0x1, I40E_WAIT_CMD_MNG_MEM_CFG_ECC_INVERT_2_SHIFT)
+#define I40E_WAIT_CMD_MNG_MEM_CFG_LS_FORCE_SHIFT 3
+#define I40E_WAIT_CMD_MNG_MEM_CFG_LS_FORCE_MASK I40E_MASK(0x1, I40E_WAIT_CMD_MNG_MEM_CFG_LS_FORCE_SHIFT)
+#define I40E_WAIT_CMD_MNG_MEM_CFG_LS_BYPASS_SHIFT 4
+#define I40E_WAIT_CMD_MNG_MEM_CFG_LS_BYPASS_MASK I40E_MASK(0x1, I40E_WAIT_CMD_MNG_MEM_CFG_LS_BYPASS_SHIFT)
+#define I40E_WAIT_CMD_MNG_MEM_CFG_MASK_INT_SHIFT 5
+#define I40E_WAIT_CMD_MNG_MEM_CFG_MASK_INT_MASK I40E_MASK(0x1, I40E_WAIT_CMD_MNG_MEM_CFG_MASK_INT_SHIFT)
+#define I40E_WAIT_CMD_MNG_MEM_CFG_FIX_CNT_SHIFT 8
+#define I40E_WAIT_CMD_MNG_MEM_CFG_FIX_CNT_MASK I40E_MASK(0x1, I40E_WAIT_CMD_MNG_MEM_CFG_FIX_CNT_SHIFT)
+#define I40E_WAIT_CMD_MNG_MEM_CFG_ERR_CNT_SHIFT 9
+#define I40E_WAIT_CMD_MNG_MEM_CFG_ERR_CNT_MASK I40E_MASK(0x1, I40E_WAIT_CMD_MNG_MEM_CFG_ERR_CNT_SHIFT)
+#define I40E_WAIT_CMD_MNG_MEM_CFG_RME_SHIFT 12
+#define I40E_WAIT_CMD_MNG_MEM_CFG_RME_MASK I40E_MASK(0x1, I40E_WAIT_CMD_MNG_MEM_CFG_RME_SHIFT)
+#define I40E_WAIT_CMD_MNG_MEM_CFG_RM_SHIFT 16
+#define I40E_WAIT_CMD_MNG_MEM_CFG_RM_MASK I40E_MASK(0xF, I40E_WAIT_CMD_MNG_MEM_CFG_RM_SHIFT)
+
+#define I40E_WAIT_CMD_MNG_MEM_STATUS 0x000AE090 /* Reset: POR */
+#define I40E_WAIT_CMD_MNG_MEM_STATUS_ECC_ERR_SHIFT 0
+#define I40E_WAIT_CMD_MNG_MEM_STATUS_ECC_ERR_MASK I40E_MASK(0x1, I40E_WAIT_CMD_MNG_MEM_STATUS_ECC_ERR_SHIFT)
+#define I40E_WAIT_CMD_MNG_MEM_STATUS_ECC_FIX_SHIFT 1
+#define I40E_WAIT_CMD_MNG_MEM_STATUS_ECC_FIX_MASK I40E_MASK(0x1, I40E_WAIT_CMD_MNG_MEM_STATUS_ECC_FIX_SHIFT)
+#define I40E_WAIT_CMD_MNG_MEM_STATUS_INIT_DONE_SHIFT 2
+#define I40E_WAIT_CMD_MNG_MEM_STATUS_INIT_DONE_MASK I40E_MASK(0x1, I40E_WAIT_CMD_MNG_MEM_STATUS_INIT_DONE_SHIFT)
+#define I40E_WAIT_CMD_MNG_MEM_STATUS_GLOBAL_INIT_DONE_SHIFT 3
+#define I40E_WAIT_CMD_MNG_MEM_STATUS_GLOBAL_INIT_DONE_MASK I40E_MASK(0x1, I40E_WAIT_CMD_MNG_MEM_STATUS_GLOBAL_INIT_DONE_SHIFT)
+
+#define I40E_WUC_ECC_COR_ERR 0x0006E8AC /* Reset: POR */
+#define I40E_WUC_ECC_COR_ERR_CNT_SHIFT 0
+#define I40E_WUC_ECC_COR_ERR_CNT_MASK I40E_MASK(0xFFF, I40E_WUC_ECC_COR_ERR_CNT_SHIFT)
+
+#define I40E_WUC_ECC_UNCOR_ERR 0x0006E8A8 /* Reset: POR */
+#define I40E_WUC_ECC_UNCOR_ERR_CNT_SHIFT 0
+#define I40E_WUC_ECC_UNCOR_ERR_CNT_MASK I40E_MASK(0xFFF, I40E_WUC_ECC_UNCOR_ERR_CNT_SHIFT)
+
+#define I40E_WUC_SP_FLEX_CFG 0x0006E898 /* Reset: POR */
+#define I40E_WUC_SP_FLEX_CFG_ECC_EN_SHIFT 0
+#define I40E_WUC_SP_FLEX_CFG_ECC_EN_MASK I40E_MASK(0x1, I40E_WUC_SP_FLEX_CFG_ECC_EN_SHIFT)
+#define I40E_WUC_SP_FLEX_CFG_ECC_INVERT_1_SHIFT 1
+#define I40E_WUC_SP_FLEX_CFG_ECC_INVERT_1_MASK I40E_MASK(0x1, I40E_WUC_SP_FLEX_CFG_ECC_INVERT_1_SHIFT)
+#define I40E_WUC_SP_FLEX_CFG_ECC_INVERT_2_SHIFT 2
+#define I40E_WUC_SP_FLEX_CFG_ECC_INVERT_2_MASK I40E_MASK(0x1, I40E_WUC_SP_FLEX_CFG_ECC_INVERT_2_SHIFT)
+#define I40E_WUC_SP_FLEX_CFG_LS_FORCE_SHIFT 3
+#define I40E_WUC_SP_FLEX_CFG_LS_FORCE_MASK I40E_MASK(0x1, I40E_WUC_SP_FLEX_CFG_LS_FORCE_SHIFT)
+#define I40E_WUC_SP_FLEX_CFG_LS_BYPASS_SHIFT 4
+#define I40E_WUC_SP_FLEX_CFG_LS_BYPASS_MASK I40E_MASK(0x1, I40E_WUC_SP_FLEX_CFG_LS_BYPASS_SHIFT)
+#define I40E_WUC_SP_FLEX_CFG_MASK_INT_SHIFT 5
+#define I40E_WUC_SP_FLEX_CFG_MASK_INT_MASK I40E_MASK(0x1, I40E_WUC_SP_FLEX_CFG_MASK_INT_SHIFT)
+#define I40E_WUC_SP_FLEX_CFG_FIX_CNT_SHIFT 8
+#define I40E_WUC_SP_FLEX_CFG_FIX_CNT_MASK I40E_MASK(0x1, I40E_WUC_SP_FLEX_CFG_FIX_CNT_SHIFT)
+#define I40E_WUC_SP_FLEX_CFG_ERR_CNT_SHIFT 9
+#define I40E_WUC_SP_FLEX_CFG_ERR_CNT_MASK I40E_MASK(0x1, I40E_WUC_SP_FLEX_CFG_ERR_CNT_SHIFT)
+#define I40E_WUC_SP_FLEX_CFG_RME_SHIFT 12
+#define I40E_WUC_SP_FLEX_CFG_RME_MASK I40E_MASK(0x1, I40E_WUC_SP_FLEX_CFG_RME_SHIFT)
+#define I40E_WUC_SP_FLEX_CFG_RM_SHIFT 16
+#define I40E_WUC_SP_FLEX_CFG_RM_MASK I40E_MASK(0xF, I40E_WUC_SP_FLEX_CFG_RM_SHIFT)
+
+#define I40E_WUC_SP_FLEX_MASK_MEM_CFG 0x0006E890 /* Reset: POR */
+#define I40E_WUC_SP_FLEX_MASK_MEM_CFG_ECC_EN_SHIFT 0
+#define I40E_WUC_SP_FLEX_MASK_MEM_CFG_ECC_EN_MASK I40E_MASK(0x1, I40E_WUC_SP_FLEX_MASK_MEM_CFG_ECC_EN_SHIFT)
+#define I40E_WUC_SP_FLEX_MASK_MEM_CFG_ECC_INVERT_1_SHIFT 1
+#define I40E_WUC_SP_FLEX_MASK_MEM_CFG_ECC_INVERT_1_MASK I40E_MASK(0x1, I40E_WUC_SP_FLEX_MASK_MEM_CFG_ECC_INVERT_1_SHIFT)
+#define I40E_WUC_SP_FLEX_MASK_MEM_CFG_ECC_INVERT_2_SHIFT 2
+#define I40E_WUC_SP_FLEX_MASK_MEM_CFG_ECC_INVERT_2_MASK I40E_MASK(0x1, I40E_WUC_SP_FLEX_MASK_MEM_CFG_ECC_INVERT_2_SHIFT)
+#define I40E_WUC_SP_FLEX_MASK_MEM_CFG_LS_FORCE_SHIFT 3
+#define I40E_WUC_SP_FLEX_MASK_MEM_CFG_LS_FORCE_MASK I40E_MASK(0x1, I40E_WUC_SP_FLEX_MASK_MEM_CFG_LS_FORCE_SHIFT)
+#define I40E_WUC_SP_FLEX_MASK_MEM_CFG_LS_BYPASS_SHIFT 4
+#define I40E_WUC_SP_FLEX_MASK_MEM_CFG_LS_BYPASS_MASK I40E_MASK(0x1, I40E_WUC_SP_FLEX_MASK_MEM_CFG_LS_BYPASS_SHIFT)
+#define I40E_WUC_SP_FLEX_MASK_MEM_CFG_MASK_INT_SHIFT 5
+#define I40E_WUC_SP_FLEX_MASK_MEM_CFG_MASK_INT_MASK I40E_MASK(0x1, I40E_WUC_SP_FLEX_MASK_MEM_CFG_MASK_INT_SHIFT)
+#define I40E_WUC_SP_FLEX_MASK_MEM_CFG_FIX_CNT_SHIFT 8
+#define I40E_WUC_SP_FLEX_MASK_MEM_CFG_FIX_CNT_MASK I40E_MASK(0x1, I40E_WUC_SP_FLEX_MASK_MEM_CFG_FIX_CNT_SHIFT)
+#define I40E_WUC_SP_FLEX_MASK_MEM_CFG_ERR_CNT_SHIFT 9
+#define I40E_WUC_SP_FLEX_MASK_MEM_CFG_ERR_CNT_MASK I40E_MASK(0x1, I40E_WUC_SP_FLEX_MASK_MEM_CFG_ERR_CNT_SHIFT)
+#define I40E_WUC_SP_FLEX_MASK_MEM_CFG_RME_SHIFT 12
+#define I40E_WUC_SP_FLEX_MASK_MEM_CFG_RME_MASK I40E_MASK(0x1, I40E_WUC_SP_FLEX_MASK_MEM_CFG_RME_SHIFT)
+#define I40E_WUC_SP_FLEX_MASK_MEM_CFG_RM_SHIFT 16
+#define I40E_WUC_SP_FLEX_MASK_MEM_CFG_RM_MASK I40E_MASK(0xF, I40E_WUC_SP_FLEX_MASK_MEM_CFG_RM_SHIFT)
+
+#define I40E_WUC_SP_FLEX_MASK_STATUS 0x0006E894 /* Reset: POR */
+#define I40E_WUC_SP_FLEX_MASK_STATUS_ECC_ERR_SHIFT 0
+#define I40E_WUC_SP_FLEX_MASK_STATUS_ECC_ERR_MASK I40E_MASK(0x1, I40E_WUC_SP_FLEX_MASK_STATUS_ECC_ERR_SHIFT)
+#define I40E_WUC_SP_FLEX_MASK_STATUS_ECC_FIX_SHIFT 1
+#define I40E_WUC_SP_FLEX_MASK_STATUS_ECC_FIX_MASK I40E_MASK(0x1, I40E_WUC_SP_FLEX_MASK_STATUS_ECC_FIX_SHIFT)
+#define I40E_WUC_SP_FLEX_MASK_STATUS_INIT_DONE_SHIFT 2
+#define I40E_WUC_SP_FLEX_MASK_STATUS_INIT_DONE_MASK I40E_MASK(0x1, I40E_WUC_SP_FLEX_MASK_STATUS_INIT_DONE_SHIFT)
+#define I40E_WUC_SP_FLEX_MASK_STATUS_GLOBAL_INIT_DONE_SHIFT 3
+#define I40E_WUC_SP_FLEX_MASK_STATUS_GLOBAL_INIT_DONE_MASK I40E_MASK(0x1, I40E_WUC_SP_FLEX_MASK_STATUS_GLOBAL_INIT_DONE_SHIFT)
+
+#define I40E_WUC_SP_FLEX_STATUS 0x0006E89C /* Reset: POR */
+#define I40E_WUC_SP_FLEX_STATUS_ECC_ERR_SHIFT 0
+#define I40E_WUC_SP_FLEX_STATUS_ECC_ERR_MASK I40E_MASK(0x1, I40E_WUC_SP_FLEX_STATUS_ECC_ERR_SHIFT)
+#define I40E_WUC_SP_FLEX_STATUS_ECC_FIX_SHIFT 1
+#define I40E_WUC_SP_FLEX_STATUS_ECC_FIX_MASK I40E_MASK(0x1, I40E_WUC_SP_FLEX_STATUS_ECC_FIX_SHIFT)
+#define I40E_WUC_SP_FLEX_STATUS_INIT_DONE_SHIFT 2
+#define I40E_WUC_SP_FLEX_STATUS_INIT_DONE_MASK I40E_MASK(0x1, I40E_WUC_SP_FLEX_STATUS_INIT_DONE_SHIFT)
+#define I40E_WUC_SP_FLEX_STATUS_GLOBAL_INIT_DONE_SHIFT 3
+#define I40E_WUC_SP_FLEX_STATUS_GLOBAL_INIT_DONE_MASK I40E_MASK(0x1, I40E_WUC_SP_FLEX_STATUS_GLOBAL_INIT_DONE_SHIFT)
+
+/* PF - Internal Fuses */
+
+/* PF - Interrupt Registers */
+
+#define I40E_GLINT_CTL 0x0003F800 /* Reset: CORER */
+#define I40E_GLINT_CTL_DIS_AUTOMASK_PF0_SHIFT 0
+#define I40E_GLINT_CTL_DIS_AUTOMASK_PF0_MASK I40E_MASK(0x1, I40E_GLINT_CTL_DIS_AUTOMASK_PF0_SHIFT)
+#define I40E_GLINT_CTL_DIS_AUTOMASK_VF0_SHIFT 1
+#define I40E_GLINT_CTL_DIS_AUTOMASK_VF0_MASK I40E_MASK(0x1, I40E_GLINT_CTL_DIS_AUTOMASK_VF0_SHIFT)
+#define I40E_GLINT_CTL_DIS_AUTOMASK_N_SHIFT 2
+#define I40E_GLINT_CTL_DIS_AUTOMASK_N_MASK I40E_MASK(0x1, I40E_GLINT_CTL_DIS_AUTOMASK_N_SHIFT)
+
+#define I40E_PFINT_ITR0_STAT(_i) (0x00038200 + ((_i) * 128)) /* _i=0...2 */ /* Reset: PFR */
+#define I40E_PFINT_ITR0_STAT_MAX_INDEX 2
+#define I40E_PFINT_ITR0_STAT_ITR_EXPIRE_SHIFT 0
+#define I40E_PFINT_ITR0_STAT_ITR_EXPIRE_MASK I40E_MASK(0x1, I40E_PFINT_ITR0_STAT_ITR_EXPIRE_SHIFT)
+#define I40E_PFINT_ITR0_STAT_EVENT_SHIFT 1
+#define I40E_PFINT_ITR0_STAT_EVENT_MASK I40E_MASK(0x1, I40E_PFINT_ITR0_STAT_EVENT_SHIFT)
+#define I40E_PFINT_ITR0_STAT_ITR_TIME_SHIFT 2
+#define I40E_PFINT_ITR0_STAT_ITR_TIME_MASK I40E_MASK(0xFFF, I40E_PFINT_ITR0_STAT_ITR_TIME_SHIFT)
+
+#define I40E_PFINT_ITRN_STAT(_i, _INTPF) (0x00032000 + ((_i) * 2048 + (_INTPF) * 4)) /* _i=0...2, _INTPF=0...511 */ /* Reset: PFR */
+#define I40E_PFINT_ITRN_STAT_MAX_INDEX 2
+#define I40E_PFINT_ITRN_STAT_ITR_EXPIRE_SHIFT 0
+#define I40E_PFINT_ITRN_STAT_ITR_EXPIRE_MASK I40E_MASK(0x1, I40E_PFINT_ITRN_STAT_ITR_EXPIRE_SHIFT)
+#define I40E_PFINT_ITRN_STAT_EVENT_SHIFT 1
+#define I40E_PFINT_ITRN_STAT_EVENT_MASK I40E_MASK(0x1, I40E_PFINT_ITRN_STAT_EVENT_SHIFT)
+#define I40E_PFINT_ITRN_STAT_ITR_TIME_SHIFT 2
+#define I40E_PFINT_ITRN_STAT_ITR_TIME_MASK I40E_MASK(0xFFF, I40E_PFINT_ITRN_STAT_ITR_TIME_SHIFT)
+
+#define I40E_PFINT_RATE0_STAT 0x00038600 /* Reset: PFR */
+#define I40E_PFINT_RATE0_STAT_CREDIT_SHIFT 0
+#define I40E_PFINT_RATE0_STAT_CREDIT_MASK I40E_MASK(0xF, I40E_PFINT_RATE0_STAT_CREDIT_SHIFT)
+#define I40E_PFINT_RATE0_STAT_INTRL_TIME_SHIFT 4
+#define I40E_PFINT_RATE0_STAT_INTRL_TIME_MASK I40E_MASK(0x3F, I40E_PFINT_RATE0_STAT_INTRL_TIME_SHIFT)
+
+#define I40E_PFINT_RATEN_STAT(_INTPF) (0x00036000 + ((_INTPF) * 4)) /* _i=0...511 */ /* Reset: PFR */
+#define I40E_PFINT_RATEN_STAT_MAX_INDEX 511
+#define I40E_PFINT_RATEN_STAT_CREDIT_SHIFT 0
+#define I40E_PFINT_RATEN_STAT_CREDIT_MASK I40E_MASK(0xF, I40E_PFINT_RATEN_STAT_CREDIT_SHIFT)
+#define I40E_PFINT_RATEN_STAT_INTRL_TIME_SHIFT 4
+#define I40E_PFINT_RATEN_STAT_INTRL_TIME_MASK I40E_MASK(0x3F, I40E_PFINT_RATEN_STAT_INTRL_TIME_SHIFT)
+
+#define I40E_VFINT_ITR0_STAT(_i, _VF) (0x00029000 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...2, _VF=0...127 */ /* Reset: VFR */
+#define I40E_VFINT_ITR0_STAT_MAX_INDEX 2
+#define I40E_VFINT_ITR0_STAT_ITR_EXPIRE_SHIFT 0
+#define I40E_VFINT_ITR0_STAT_ITR_EXPIRE_MASK I40E_MASK(0x1, I40E_VFINT_ITR0_STAT_ITR_EXPIRE_SHIFT)
+#define I40E_VFINT_ITR0_STAT_EVENT_SHIFT 1
+#define I40E_VFINT_ITR0_STAT_EVENT_MASK I40E_MASK(0x1, I40E_VFINT_ITR0_STAT_EVENT_SHIFT)
+#define I40E_VFINT_ITR0_STAT_ITR_TIME_SHIFT 2
+#define I40E_VFINT_ITR0_STAT_ITR_TIME_MASK I40E_MASK(0xFFF, I40E_VFINT_ITR0_STAT_ITR_TIME_SHIFT)
+
+#define I40E_VFINT_ITRN_STAT(_i, _INTVF) (0x00022000 + ((_i) * 2048 + (_INTVF) * 4)) /* _i=0...2, _INTVF=0...511 */ /* Reset: VFR */
+#define I40E_VFINT_ITRN_STAT_MAX_INDEX 2
+#define I40E_VFINT_ITRN_STAT_ITR_EXPIRE_SHIFT 0
+#define I40E_VFINT_ITRN_STAT_ITR_EXPIRE_MASK I40E_MASK(0x1, I40E_VFINT_ITRN_STAT_ITR_EXPIRE_SHIFT)
+#define I40E_VFINT_ITRN_STAT_EVENT_SHIFT 1
+#define I40E_VFINT_ITRN_STAT_EVENT_MASK I40E_MASK(0x1, I40E_VFINT_ITRN_STAT_EVENT_SHIFT)
+#define I40E_VFINT_ITRN_STAT_ITR_TIME_SHIFT 2
+#define I40E_VFINT_ITRN_STAT_ITR_TIME_MASK I40E_MASK(0xFFF, I40E_VFINT_ITRN_STAT_ITR_TIME_SHIFT)
+
+#define I40E_VFINT_RATE0_STAT(_VF) (0x0002B000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFINT_RATE0_STAT_MAX_INDEX 127
+#define I40E_VFINT_RATE0_STAT_CREDIT_SHIFT 0
+#define I40E_VFINT_RATE0_STAT_CREDIT_MASK I40E_MASK(0xF, I40E_VFINT_RATE0_STAT_CREDIT_SHIFT)
+#define I40E_VFINT_RATE0_STAT_INTRL_TIME_SHIFT 4
+#define I40E_VFINT_RATE0_STAT_INTRL_TIME_MASK I40E_MASK(0x3F, I40E_VFINT_RATE0_STAT_INTRL_TIME_SHIFT)
+
+#define I40E_VFINT_RATEN_STAT(_INTVF) (0x00026000 + ((_INTVF) * 4)) /* _i=0...511 */ /* Reset: VFR */
+#define I40E_VFINT_RATEN_STAT_MAX_INDEX 511
+#define I40E_VFINT_RATEN_STAT_CREDIT_SHIFT 0
+#define I40E_VFINT_RATEN_STAT_CREDIT_MASK I40E_MASK(0xF, I40E_VFINT_RATEN_STAT_CREDIT_SHIFT)
+#define I40E_VFINT_RATEN_STAT_INTRL_TIME_SHIFT 4
+#define I40E_VFINT_RATEN_STAT_INTRL_TIME_MASK I40E_MASK(0x3F, I40E_VFINT_RATEN_STAT_INTRL_TIME_SHIFT)
+
+/* PF - LAN Transmit Receive Registers */
+
+#define I40E_GLLAN_PF_RECIPE(_i) (0x0012A5E0 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLLAN_PF_RECIPE_MAX_INDEX 15
+#define I40E_GLLAN_PF_RECIPE_RECIPE_SHIFT 0
+#define I40E_GLLAN_PF_RECIPE_RECIPE_MASK I40E_MASK(0x3, I40E_GLLAN_PF_RECIPE_RECIPE_SHIFT)
+
+#define I40E_GLLAN_RCTL_1 0x0012A504 /* Reset: CORER */
+#define I40E_GLLAN_RCTL_1_RXMAX_EXPANSION_SHIFT 12
+#define I40E_GLLAN_RCTL_1_RXMAX_EXPANSION_MASK I40E_MASK(0xF, I40E_GLLAN_RCTL_1_RXMAX_EXPANSION_SHIFT)
+#define I40E_GLLAN_RCTL_1_RXDWBCTL_SHIFT 16
+#define I40E_GLLAN_RCTL_1_RXDWBCTL_MASK I40E_MASK(0x1, I40E_GLLAN_RCTL_1_RXDWBCTL_SHIFT)
+#define I40E_GLLAN_RCTL_1_RXDRDCTL_SHIFT 17
+#define I40E_GLLAN_RCTL_1_RXDRDCTL_MASK I40E_MASK(0x1, I40E_GLLAN_RCTL_1_RXDRDCTL_SHIFT)
+#define I40E_GLLAN_RCTL_1_RXDESCRDROEN_SHIFT 18
+#define I40E_GLLAN_RCTL_1_RXDESCRDROEN_MASK I40E_MASK(0x1, I40E_GLLAN_RCTL_1_RXDESCRDROEN_SHIFT)
+#define I40E_GLLAN_RCTL_1_RXDATAWRROEN_SHIFT 19
+#define I40E_GLLAN_RCTL_1_RXDATAWRROEN_MASK I40E_MASK(0x1, I40E_GLLAN_RCTL_1_RXDATAWRROEN_SHIFT)
+
+#define I40E_GLLAN_TCTL_0 0x000E6488 /* Reset: CORER */
+#define I40E_GLLAN_TCTL_0_TXLANTH_SHIFT 0
+#define I40E_GLLAN_TCTL_0_TXLANTH_MASK I40E_MASK(0x3F, I40E_GLLAN_TCTL_0_TXLANTH_SHIFT)
+#define I40E_GLLAN_TCTL_0_TXDESCRDROEN_SHIFT 6
+#define I40E_GLLAN_TCTL_0_TXDESCRDROEN_MASK I40E_MASK(0x1, I40E_GLLAN_TCTL_0_TXDESCRDROEN_SHIFT)
+
+#define I40E_GLLAN_TCTL_1 0x000442F0 /* Reset: CORER */
+#define I40E_GLLAN_TCTL_1_TXMAX_EXPANSION_SHIFT 0
+#define I40E_GLLAN_TCTL_1_TXMAX_EXPANSION_MASK I40E_MASK(0xF, I40E_GLLAN_TCTL_1_TXMAX_EXPANSION_SHIFT)
+#define I40E_GLLAN_TCTL_1_TXDATARDROEN_SHIFT 4
+#define I40E_GLLAN_TCTL_1_TXDATARDROEN_MASK I40E_MASK(0x1, I40E_GLLAN_TCTL_1_TXDATARDROEN_SHIFT)
+#define I40E_GLLAN_TCTL_1_RCU_BYPASS_SHIFT 5
+#define I40E_GLLAN_TCTL_1_RCU_BYPASS_MASK I40E_MASK(0x1, I40E_GLLAN_TCTL_1_RCU_BYPASS_SHIFT)
+#define I40E_GLLAN_TCTL_1_LSO_CACHE_BYPASS_SHIFT 6
+#define I40E_GLLAN_TCTL_1_LSO_CACHE_BYPASS_MASK I40E_MASK(0x1, I40E_GLLAN_TCTL_1_LSO_CACHE_BYPASS_SHIFT)
+#define I40E_GLLAN_TCTL_1_DBG_WB_SEL_SHIFT 7
+#define I40E_GLLAN_TCTL_1_DBG_WB_SEL_MASK I40E_MASK(0xF, I40E_GLLAN_TCTL_1_DBG_WB_SEL_SHIFT)
+#define I40E_GLLAN_TCTL_1_DBG_FORCE_RS_SHIFT 11
+#define I40E_GLLAN_TCTL_1_DBG_FORCE_RS_MASK I40E_MASK(0x1, I40E_GLLAN_TCTL_1_DBG_FORCE_RS_SHIFT)
+#define I40E_GLLAN_TCTL_1_DBG_BYPASS_SHIFT 12
+#define I40E_GLLAN_TCTL_1_DBG_BYPASS_MASK I40E_MASK(0x3FF, I40E_GLLAN_TCTL_1_DBG_BYPASS_SHIFT)
+#define I40E_GLLAN_TCTL_1_PRE_L2_ENA_SHIFT 22
+#define I40E_GLLAN_TCTL_1_PRE_L2_ENA_MASK I40E_MASK(0x1, I40E_GLLAN_TCTL_1_PRE_L2_ENA_SHIFT)
+#define I40E_GLLAN_TCTL_1_UR_PROT_DIS_SHIFT 23
+#define I40E_GLLAN_TCTL_1_UR_PROT_DIS_MASK I40E_MASK(0x1, I40E_GLLAN_TCTL_1_UR_PROT_DIS_SHIFT)
+#define I40E_GLLAN_TCTL_1_DBG_ECO_SHIFT 24
+#define I40E_GLLAN_TCTL_1_DBG_ECO_MASK I40E_MASK(0xFF, I40E_GLLAN_TCTL_1_DBG_ECO_SHIFT)
+
+#define I40E_GLLAN_TCTL_2 0x000AE080 /* Reset: CORER */
+#define I40E_GLLAN_TCTL_2_TXMAX_EXPANSION_SHIFT 0
+#define I40E_GLLAN_TCTL_2_TXMAX_EXPANSION_MASK I40E_MASK(0xF, I40E_GLLAN_TCTL_2_TXMAX_EXPANSION_SHIFT)
+#define I40E_GLLAN_TCTL_2_STAT_DBG_ADDR_SHIFT 4
+#define I40E_GLLAN_TCTL_2_STAT_DBG_ADDR_MASK I40E_MASK(0x1F, I40E_GLLAN_TCTL_2_STAT_DBG_ADDR_SHIFT)
+#define I40E_GLLAN_TCTL_2_STAT_DBG_DSEL_SHIFT 9
+#define I40E_GLLAN_TCTL_2_STAT_DBG_DSEL_MASK I40E_MASK(0x7, I40E_GLLAN_TCTL_2_STAT_DBG_DSEL_SHIFT)
+#define I40E_GLLAN_TCTL_2_ECO_SHIFT 12
+#define I40E_GLLAN_TCTL_2_ECO_MASK I40E_MASK(0xFFFFF, I40E_GLLAN_TCTL_2_ECO_SHIFT)
+
+#define I40E_GLLAN_TXEMP_EN 0x000AE0AC /* Reset: CORER */
+#define I40E_GLLAN_TXEMP_EN_TXHOST_EN_SHIFT 0
+#define I40E_GLLAN_TXEMP_EN_TXHOST_EN_MASK I40E_MASK(0x1, I40E_GLLAN_TXEMP_EN_TXHOST_EN_SHIFT)
+
+#define I40E_GLLAN_TXHOST_EN 0x000A2208 /* Reset: CORER */
+#define I40E_GLLAN_TXHOST_EN_TXHOST_EN_SHIFT 0
+#define I40E_GLLAN_TXHOST_EN_TXHOST_EN_MASK I40E_MASK(0x1, I40E_GLLAN_TXHOST_EN_TXHOST_EN_SHIFT)
+
+#define I40E_GLRCU_INDIRECT_ADDRESS 0x001C0AA4 /* Reset: CORER */
+#define I40E_GLRCU_INDIRECT_ADDRESS_GLRCU_INDIRECT_ADDRESS_SHIFT 0
+#define I40E_GLRCU_INDIRECT_ADDRESS_GLRCU_INDIRECT_ADDRESS_MASK I40E_MASK(0xFFFF, I40E_GLRCU_INDIRECT_ADDRESS_GLRCU_INDIRECT_ADDRESS_SHIFT)
+
+#define I40E_GLRCU_INDIRECT_DATA(_i) (0x001C0AA8 + ((_i) * 4)) /* _i=0...1 */ /* Reset: CORER */
+#define I40E_GLRCU_INDIRECT_DATA_MAX_INDEX 1
+#define I40E_GLRCU_INDIRECT_DATA_GLRCU_INDIRECT_DATA_SHIFT 0
+#define I40E_GLRCU_INDIRECT_DATA_GLRCU_INDIRECT_DATA_MASK I40E_MASK(0xFFFFFFFF, I40E_GLRCU_INDIRECT_DATA_GLRCU_INDIRECT_DATA_SHIFT)
+
+#define I40E_GLRCU_LB_INDIRECT_ADDRESS 0x00269BD4 /* Reset: CORER */
+#define I40E_GLRCU_LB_INDIRECT_ADDRESS_GLRCU_INDIRECT_ADDRESS_SHIFT 0
+#define I40E_GLRCU_LB_INDIRECT_ADDRESS_GLRCU_INDIRECT_ADDRESS_MASK I40E_MASK(0xFFFF, I40E_GLRCU_LB_INDIRECT_ADDRESS_GLRCU_INDIRECT_ADDRESS_SHIFT)
+
+#define I40E_GLRCU_LB_INDIRECT_DATA(_i) (0x00269898 + ((_i) * 4)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLRCU_LB_INDIRECT_DATA_MAX_INDEX 3
+#define I40E_GLRCU_LB_INDIRECT_DATA_GLRCU_INDIRECT_DATA_SHIFT 0
+#define I40E_GLRCU_LB_INDIRECT_DATA_GLRCU_INDIRECT_DATA_MASK I40E_MASK(0xFFFFFFFF, I40E_GLRCU_LB_INDIRECT_DATA_GLRCU_INDIRECT_DATA_SHIFT)
+
+#define I40E_GLRCU_RX_INDIRECT_ADDRESS 0x00269BCC /* Reset: CORER */
+#define I40E_GLRCU_RX_INDIRECT_ADDRESS_GLRCU_INDIRECT_ADDRESS_SHIFT 0
+#define I40E_GLRCU_RX_INDIRECT_ADDRESS_GLRCU_INDIRECT_ADDRESS_MASK I40E_MASK(0xFFFF, I40E_GLRCU_RX_INDIRECT_ADDRESS_GLRCU_INDIRECT_ADDRESS_SHIFT)
+
+#define I40E_GLRCU_RX_INDIRECT_DATA(_i) (0x00269888 + ((_i) * 4)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLRCU_RX_INDIRECT_DATA_MAX_INDEX 3
+#define I40E_GLRCU_RX_INDIRECT_DATA_GLRCU_INDIRECT_DATA_SHIFT 0
+#define I40E_GLRCU_RX_INDIRECT_DATA_GLRCU_INDIRECT_DATA_MASK I40E_MASK(0xFFFFFFFF, I40E_GLRCU_RX_INDIRECT_DATA_GLRCU_INDIRECT_DATA_SHIFT)
+
+#define I40E_GLRDPU_INDIRECT_ADDRESS 0x00051040 /* Reset: CORER */
+#define I40E_GLRDPU_INDIRECT_ADDRESS_GLRCU_INDIRECT_ADDRESS_SHIFT 0
+#define I40E_GLRDPU_INDIRECT_ADDRESS_GLRCU_INDIRECT_ADDRESS_MASK I40E_MASK(0xFFFF, I40E_GLRDPU_INDIRECT_ADDRESS_GLRCU_INDIRECT_ADDRESS_SHIFT)
+
+#define I40E_GLRDPU_INDIRECT_DATA(_i) (0x00051044 + ((_i) * 4)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLRDPU_INDIRECT_DATA_MAX_INDEX 3
+#define I40E_GLRDPU_INDIRECT_DATA_GLRCU_INDIRECT_DATA_SHIFT 0
+#define I40E_GLRDPU_INDIRECT_DATA_GLRCU_INDIRECT_DATA_MASK I40E_MASK(0xFFFFFFFF, I40E_GLRDPU_INDIRECT_DATA_GLRCU_INDIRECT_DATA_SHIFT)
+
+#define I40E_GLTDPU_INDIRECT_ADDRESS 0x00044264 /* Reset: CORER */
+#define I40E_GLTDPU_INDIRECT_ADDRESS_GLRCU_INDIRECT_ADDRESS_SHIFT 0
+#define I40E_GLTDPU_INDIRECT_ADDRESS_GLRCU_INDIRECT_ADDRESS_MASK I40E_MASK(0xFFFF, I40E_GLTDPU_INDIRECT_ADDRESS_GLRCU_INDIRECT_ADDRESS_SHIFT)
+
+#define I40E_GLTDPU_INDIRECT_DATA(_i) (0x00044268 + ((_i) * 4)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLTDPU_INDIRECT_DATA_MAX_INDEX 3
+#define I40E_GLTDPU_INDIRECT_DATA_GLRCU_INDIRECT_DATA_SHIFT 0
+#define I40E_GLTDPU_INDIRECT_DATA_GLRCU_INDIRECT_DATA_MASK I40E_MASK(0xFFFFFFFF, I40E_GLTDPU_INDIRECT_DATA_GLRCU_INDIRECT_DATA_SHIFT)
+
+#define I40E_GLTLAN_MIN_MAX_MSS 0x000E64dC /* Reset: CORER */
+#define I40E_GLTLAN_MIN_MAX_MSS_MAHDL_SHIFT 0
+#define I40E_GLTLAN_MIN_MAX_MSS_MAHDL_MASK I40E_MASK(0x3FFF, I40E_GLTLAN_MIN_MAX_MSS_MAHDL_SHIFT)
+#define I40E_GLTLAN_MIN_MAX_MSS_MIHDL_SHIFT 16
+#define I40E_GLTLAN_MIN_MAX_MSS_MIHDL_MASK I40E_MASK(0x3FF, I40E_GLTLAN_MIN_MAX_MSS_MIHDL_SHIFT)
+#define I40E_GLTLAN_MIN_MAX_MSS_RSV_SHIFT 26
+#define I40E_GLTLAN_MIN_MAX_MSS_RSV_MASK I40E_MASK(0x3F, I40E_GLTLAN_MIN_MAX_MSS_RSV_SHIFT)
+
+#define I40E_GLTLAN_MIN_MAX_PKT 0x000E64DC /* Reset: CORER */
+#define I40E_GLTLAN_MIN_MAX_PKT_MAHDL_SHIFT 0
+#define I40E_GLTLAN_MIN_MAX_PKT_MAHDL_MASK I40E_MASK(0x3FFF, I40E_GLTLAN_MIN_MAX_PKT_MAHDL_SHIFT)
+#define I40E_GLTLAN_MIN_MAX_PKT_MIHDL_SHIFT 16
+#define I40E_GLTLAN_MIN_MAX_PKT_MIHDL_MASK I40E_MASK(0x3F, I40E_GLTLAN_MIN_MAX_PKT_MIHDL_SHIFT)
+#define I40E_GLTLAN_MIN_MAX_PKT_RSV_SHIFT 22
+#define I40E_GLTLAN_MIN_MAX_PKT_RSV_MASK I40E_MASK(0x3FF, I40E_GLTLAN_MIN_MAX_PKT_RSV_SHIFT)
+
+#define I40E_PF_VT_PFALLOC_RLAN 0x0012A480 /* Reset: CORER */
+#define I40E_PF_VT_PFALLOC_RLAN_FIRSTVF_SHIFT 0
+#define I40E_PF_VT_PFALLOC_RLAN_FIRSTVF_MASK I40E_MASK(0xFF, I40E_PF_VT_PFALLOC_RLAN_FIRSTVF_SHIFT)
+#define I40E_PF_VT_PFALLOC_RLAN_LASTVF_SHIFT 8
+#define I40E_PF_VT_PFALLOC_RLAN_LASTVF_MASK I40E_MASK(0xFF, I40E_PF_VT_PFALLOC_RLAN_LASTVF_SHIFT)
+#define I40E_PF_VT_PFALLOC_RLAN_VALID_SHIFT 31
+#define I40E_PF_VT_PFALLOC_RLAN_VALID_MASK I40E_MASK(0x1, I40E_PF_VT_PFALLOC_RLAN_VALID_SHIFT)
+
+#define I40E_PFLAN_QALLOC_CSR 0x00078E00 /* Reset: CORER */
+#define I40E_PFLAN_QALLOC_CSR_FIRSTQ_SHIFT 0
+#define I40E_PFLAN_QALLOC_CSR_FIRSTQ_MASK I40E_MASK(0x7FF, I40E_PFLAN_QALLOC_CSR_FIRSTQ_SHIFT)
+#define I40E_PFLAN_QALLOC_CSR_LASTQ_SHIFT 16
+#define I40E_PFLAN_QALLOC_CSR_LASTQ_MASK I40E_MASK(0x7FF, I40E_PFLAN_QALLOC_CSR_LASTQ_SHIFT)
+#define I40E_PFLAN_QALLOC_CSR_VALID_SHIFT 31
+#define I40E_PFLAN_QALLOC_CSR_VALID_MASK I40E_MASK(0x1, I40E_PFLAN_QALLOC_CSR_VALID_SHIFT)
+
+#define I40E_PFLAN_QALLOC_INT 0x0003F000 /* Reset: CORER */
+#define I40E_PFLAN_QALLOC_INT_FIRSTQ_SHIFT 0
+#define I40E_PFLAN_QALLOC_INT_FIRSTQ_MASK I40E_MASK(0x7FF, I40E_PFLAN_QALLOC_INT_FIRSTQ_SHIFT)
+#define I40E_PFLAN_QALLOC_INT_LASTQ_SHIFT 16
+#define I40E_PFLAN_QALLOC_INT_LASTQ_MASK I40E_MASK(0x7FF, I40E_PFLAN_QALLOC_INT_LASTQ_SHIFT)
+#define I40E_PFLAN_QALLOC_INT_VALID_SHIFT 31
+#define I40E_PFLAN_QALLOC_INT_VALID_MASK I40E_MASK(0x1, I40E_PFLAN_QALLOC_INT_VALID_SHIFT)
+
+#define I40E_PFLAN_QALLOC_PMAT 0x000C0600 /* Reset: CORER */
+#define I40E_PFLAN_QALLOC_PMAT_FIRSTQ_SHIFT 0
+#define I40E_PFLAN_QALLOC_PMAT_FIRSTQ_MASK I40E_MASK(0x7FF, I40E_PFLAN_QALLOC_PMAT_FIRSTQ_SHIFT)
+#define I40E_PFLAN_QALLOC_PMAT_LASTQ_SHIFT 16
+#define I40E_PFLAN_QALLOC_PMAT_LASTQ_MASK I40E_MASK(0x7FF, I40E_PFLAN_QALLOC_PMAT_LASTQ_SHIFT)
+#define I40E_PFLAN_QALLOC_PMAT_VALID_SHIFT 31
+#define I40E_PFLAN_QALLOC_PMAT_VALID_MASK I40E_MASK(0x1, I40E_PFLAN_QALLOC_PMAT_VALID_SHIFT)
+
+#define I40E_PFLAN_QALLOC_RCB 0x00122080 /* Reset: CORER */
+#define I40E_PFLAN_QALLOC_RCB_FIRSTQ_SHIFT 0
+#define I40E_PFLAN_QALLOC_RCB_FIRSTQ_MASK I40E_MASK(0x7FF, I40E_PFLAN_QALLOC_RCB_FIRSTQ_SHIFT)
+#define I40E_PFLAN_QALLOC_RCB_LASTQ_SHIFT 16
+#define I40E_PFLAN_QALLOC_RCB_LASTQ_MASK I40E_MASK(0x7FF, I40E_PFLAN_QALLOC_RCB_LASTQ_SHIFT)
+#define I40E_PFLAN_QALLOC_RCB_VALID_SHIFT 31
+#define I40E_PFLAN_QALLOC_RCB_VALID_MASK I40E_MASK(0x1, I40E_PFLAN_QALLOC_RCB_VALID_SHIFT)
+
+#define I40E_PFLAN_QALLOC_RCU 0x00246780 /* Reset: CORER */
+#define I40E_PFLAN_QALLOC_RCU_FIRSTQ_SHIFT 0
+#define I40E_PFLAN_QALLOC_RCU_FIRSTQ_MASK I40E_MASK(0x7FF, I40E_PFLAN_QALLOC_RCU_FIRSTQ_SHIFT)
+#define I40E_PFLAN_QALLOC_RCU_LASTQ_SHIFT 16
+#define I40E_PFLAN_QALLOC_RCU_LASTQ_MASK I40E_MASK(0x7FF, I40E_PFLAN_QALLOC_RCU_LASTQ_SHIFT)
+#define I40E_PFLAN_QALLOC_RCU_VALID_SHIFT 31
+#define I40E_PFLAN_QALLOC_RCU_VALID_MASK I40E_MASK(0x1, I40E_PFLAN_QALLOC_RCU_VALID_SHIFT)
+
+#define I40E_PRTLAN_RXEMP_EN 0x001E4780 /* Reset: GLOBR */
+#define I40E_PRTLAN_RXEMP_EN_RXHOST_EN_SHIFT 0
+#define I40E_PRTLAN_RXEMP_EN_RXHOST_EN_MASK I40E_MASK(0x1, I40E_PRTLAN_RXEMP_EN_RXHOST_EN_SHIFT)
+
+/* PF - MAC Registers */
+
+#define I40E_PRTDCB_MPVCTL 0x001E2460 /* Reset: GLOBR */
+#define I40E_PRTDCB_MPVCTL_PFCV_SHIFT 0
+#define I40E_PRTDCB_MPVCTL_PFCV_MASK I40E_MASK(0xFFFF, I40E_PRTDCB_MPVCTL_PFCV_SHIFT)
+#define I40E_PRTDCB_MPVCTL_RFCV_SHIFT 16
+#define I40E_PRTDCB_MPVCTL_RFCV_MASK I40E_MASK(0xFFFF, I40E_PRTDCB_MPVCTL_RFCV_SHIFT)
+
+#define I40E_PRTMAC_AN_LP_STATUS1 0x0008C680 /* Reset: GLOBR */
+#define I40E_PRTMAC_AN_LP_STATUS1_LP_AN_PAGE_LOW_SHIFT 0
+#define I40E_PRTMAC_AN_LP_STATUS1_LP_AN_PAGE_LOW_MASK I40E_MASK(0xFFFF, I40E_PRTMAC_AN_LP_STATUS1_LP_AN_PAGE_LOW_SHIFT)
+#define I40E_PRTMAC_AN_LP_STATUS1_AN_ARB_STATE_SHIFT 16
+#define I40E_PRTMAC_AN_LP_STATUS1_AN_ARB_STATE_MASK I40E_MASK(0xF, I40E_PRTMAC_AN_LP_STATUS1_AN_ARB_STATE_SHIFT)
+#define I40E_PRTMAC_AN_LP_STATUS1_RSVD_SHIFT 20
+#define I40E_PRTMAC_AN_LP_STATUS1_RSVD_MASK I40E_MASK(0xFFF, I40E_PRTMAC_AN_LP_STATUS1_RSVD_SHIFT)
+
+#define I40E_PRTMAC_HLCTL 0x001E2000 /* Reset: GLOBR */
+#define I40E_PRTMAC_HLCTL_APPEND_CRC_SHIFT 0
+#define I40E_PRTMAC_HLCTL_APPEND_CRC_MASK I40E_MASK(0x1, I40E_PRTMAC_HLCTL_APPEND_CRC_SHIFT)
+#define I40E_PRTMAC_HLCTL_RXCRCSTRP_SHIFT 1
+#define I40E_PRTMAC_HLCTL_RXCRCSTRP_MASK I40E_MASK(0x1, I40E_PRTMAC_HLCTL_RXCRCSTRP_SHIFT)
+#define I40E_PRTMAC_HLCTL_JUMBOEN_SHIFT 2
+#define I40E_PRTMAC_HLCTL_JUMBOEN_MASK I40E_MASK(0x1, I40E_PRTMAC_HLCTL_JUMBOEN_SHIFT)
+#define I40E_PRTMAC_HLCTL_LEGACY_RSVD3_SHIFT 3
+#define I40E_PRTMAC_HLCTL_LEGACY_RSVD3_MASK I40E_MASK(0x1, I40E_PRTMAC_HLCTL_LEGACY_RSVD3_SHIFT)
+#define I40E_PRTMAC_HLCTL_LEGACY_RSVD4_SHIFT 4
+#define I40E_PRTMAC_HLCTL_LEGACY_RSVD4_MASK I40E_MASK(0x1, I40E_PRTMAC_HLCTL_LEGACY_RSVD4_SHIFT)
+#define I40E_PRTMAC_HLCTL_LEGACY_RSVD5_SHIFT 5
+#define I40E_PRTMAC_HLCTL_LEGACY_RSVD5_MASK I40E_MASK(0x1, I40E_PRTMAC_HLCTL_LEGACY_RSVD5_SHIFT)
+#define I40E_PRTMAC_HLCTL_LEGACY_RSVD6_SHIFT 6
+#define I40E_PRTMAC_HLCTL_LEGACY_RSVD6_MASK I40E_MASK(0x1, I40E_PRTMAC_HLCTL_LEGACY_RSVD6_SHIFT)
+#define I40E_PRTMAC_HLCTL_LEGACY_RSVD7_SHIFT 7
+#define I40E_PRTMAC_HLCTL_LEGACY_RSVD7_MASK I40E_MASK(0x3, I40E_PRTMAC_HLCTL_LEGACY_RSVD7_SHIFT)
+#define I40E_PRTMAC_HLCTL_SOFTRESET_SHIFT 9
+#define I40E_PRTMAC_HLCTL_SOFTRESET_MASK I40E_MASK(0x1, I40E_PRTMAC_HLCTL_SOFTRESET_SHIFT)
+#define I40E_PRTMAC_HLCTL_TXPADEN_SHIFT 10
+#define I40E_PRTMAC_HLCTL_TXPADEN_MASK I40E_MASK(0x1, I40E_PRTMAC_HLCTL_TXPADEN_SHIFT)
+#define I40E_PRTMAC_HLCTL_TX_ENABLE_SHIFT 11
+#define I40E_PRTMAC_HLCTL_TX_ENABLE_MASK I40E_MASK(0x1, I40E_PRTMAC_HLCTL_TX_ENABLE_SHIFT)
+#define I40E_PRTMAC_HLCTL_LEGACY_RSVD12_SHIFT 12
+#define I40E_PRTMAC_HLCTL_LEGACY_RSVD12_MASK I40E_MASK(0x1, I40E_PRTMAC_HLCTL_LEGACY_RSVD12_SHIFT)
+#define I40E_PRTMAC_HLCTL_RX_ENABLE_SHIFT 13
+#define I40E_PRTMAC_HLCTL_RX_ENABLE_MASK I40E_MASK(0x1, I40E_PRTMAC_HLCTL_RX_ENABLE_SHIFT)
+#define I40E_PRTMAC_HLCTL_LEGACY_RSVD14_SHIFT 14
+#define I40E_PRTMAC_HLCTL_LEGACY_RSVD14_MASK I40E_MASK(0x1, I40E_PRTMAC_HLCTL_LEGACY_RSVD14_SHIFT)
+#define I40E_PRTMAC_HLCTL_LPBK_SHIFT 15
+#define I40E_PRTMAC_HLCTL_LPBK_MASK I40E_MASK(0x1, I40E_PRTMAC_HLCTL_LPBK_SHIFT)
+#define I40E_PRTMAC_HLCTL_LEGACY_RSVD16_SHIFT 16
+#define I40E_PRTMAC_HLCTL_LEGACY_RSVD16_MASK I40E_MASK(0x1, I40E_PRTMAC_HLCTL_LEGACY_RSVD16_SHIFT)
+#define I40E_PRTMAC_HLCTL_LEGACY_RSVD17_SHIFT 17
+#define I40E_PRTMAC_HLCTL_LEGACY_RSVD17_MASK I40E_MASK(0x1, I40E_PRTMAC_HLCTL_LEGACY_RSVD17_SHIFT)
+#define I40E_PRTMAC_HLCTL_LEGACY_RSVD18_SHIFT 18
+#define I40E_PRTMAC_HLCTL_LEGACY_RSVD18_MASK I40E_MASK(0x1, I40E_PRTMAC_HLCTL_LEGACY_RSVD18_SHIFT)
+#define I40E_PRTMAC_HLCTL_TXLB2NET_SHIFT 19
+#define I40E_PRTMAC_HLCTL_TXLB2NET_MASK I40E_MASK(0x1, I40E_PRTMAC_HLCTL_TXLB2NET_SHIFT)
+#define I40E_PRTMAC_HLCTL_LEGACY_RSVD20_SHIFT 20
+#define I40E_PRTMAC_HLCTL_LEGACY_RSVD20_MASK I40E_MASK(0xF, I40E_PRTMAC_HLCTL_LEGACY_RSVD20_SHIFT)
+#define I40E_PRTMAC_HLCTL_LEGACY_RSVD24_SHIFT 24
+#define I40E_PRTMAC_HLCTL_LEGACY_RSVD24_MASK I40E_MASK(0x7, I40E_PRTMAC_HLCTL_LEGACY_RSVD24_SHIFT)
+#define I40E_PRTMAC_HLCTL_RXLNGTHERREN_SHIFT 27
+#define I40E_PRTMAC_HLCTL_RXLNGTHERREN_MASK I40E_MASK(0x1, I40E_PRTMAC_HLCTL_RXLNGTHERREN_SHIFT)
+#define I40E_PRTMAC_HLCTL_RXPADSTRIPEN_SHIFT 28
+#define I40E_PRTMAC_HLCTL_RXPADSTRIPEN_MASK I40E_MASK(0x1, I40E_PRTMAC_HLCTL_RXPADSTRIPEN_SHIFT)
+
+#define I40E_PRTMAC_HLCTLA 0x001E4760 /* Reset: GLOBR */
+#define I40E_PRTMAC_HLCTLA_DROP_US_PKTS_SHIFT 0
+#define I40E_PRTMAC_HLCTLA_DROP_US_PKTS_MASK I40E_MASK(0x1, I40E_PRTMAC_HLCTLA_DROP_US_PKTS_SHIFT)
+#define I40E_PRTMAC_HLCTLA_RX_FWRD_CTRL_SHIFT 1
+#define I40E_PRTMAC_HLCTLA_RX_FWRD_CTRL_MASK I40E_MASK(0x1, I40E_PRTMAC_HLCTLA_RX_FWRD_CTRL_SHIFT)
+#define I40E_PRTMAC_HLCTLA_CHOP_OS_PKT_SHIFT 2
+#define I40E_PRTMAC_HLCTLA_CHOP_OS_PKT_MASK I40E_MASK(0x1, I40E_PRTMAC_HLCTLA_CHOP_OS_PKT_SHIFT)
+#define I40E_PRTMAC_HLCTLA_TX_HYSTERESIS_SHIFT 4
+#define I40E_PRTMAC_HLCTLA_TX_HYSTERESIS_MASK I40E_MASK(0x7, I40E_PRTMAC_HLCTLA_TX_HYSTERESIS_SHIFT)
+#define I40E_PRTMAC_HLCTLA_HYS_FLUSH_PKT_SHIFT 7
+#define I40E_PRTMAC_HLCTLA_HYS_FLUSH_PKT_MASK I40E_MASK(0x1, I40E_PRTMAC_HLCTLA_HYS_FLUSH_PKT_SHIFT)
+
+#define I40E_PRTMAC_HLSTA 0x001E2020 /* Reset: GLOBR */
+#define I40E_PRTMAC_HLSTA_REVID_SHIFT 0
+#define I40E_PRTMAC_HLSTA_REVID_MASK I40E_MASK(0xF, I40E_PRTMAC_HLSTA_REVID_SHIFT)
+#define I40E_PRTMAC_HLSTA_RESERVED_2_SHIFT 4
+#define I40E_PRTMAC_HLSTA_RESERVED_2_MASK I40E_MASK(0x1, I40E_PRTMAC_HLSTA_RESERVED_2_SHIFT)
+#define I40E_PRTMAC_HLSTA_RXERRSYM_SHIFT 5
+#define I40E_PRTMAC_HLSTA_RXERRSYM_MASK I40E_MASK(0x1, I40E_PRTMAC_HLSTA_RXERRSYM_SHIFT)
+#define I40E_PRTMAC_HLSTA_RXILLSYM_SHIFT 6
+#define I40E_PRTMAC_HLSTA_RXILLSYM_MASK I40E_MASK(0x1, I40E_PRTMAC_HLSTA_RXILLSYM_SHIFT)
+#define I40E_PRTMAC_HLSTA_RXIDLERR_SHIFT 7
+#define I40E_PRTMAC_HLSTA_RXIDLERR_MASK I40E_MASK(0x1, I40E_PRTMAC_HLSTA_RXIDLERR_SHIFT)
+#define I40E_PRTMAC_HLSTA_LEGACY_RSVD1_SHIFT 8
+#define I40E_PRTMAC_HLSTA_LEGACY_RSVD1_MASK I40E_MASK(0x1, I40E_PRTMAC_HLSTA_LEGACY_RSVD1_SHIFT)
+#define I40E_PRTMAC_HLSTA_LEGACY_RSVD2_SHIFT 9
+#define I40E_PRTMAC_HLSTA_LEGACY_RSVD2_MASK I40E_MASK(0x1, I40E_PRTMAC_HLSTA_LEGACY_RSVD2_SHIFT)
+
+#define I40E_PRTMAC_HSEC_CTL_INTERNAL 0x001E3530 /* Reset: GLOBR */
+#define I40E_PRTMAC_HSEC_CTL_INTERNAL_HSEC_RX_SWZL_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_INTERNAL_HSEC_RX_SWZL_MASK I40E_MASK(0x1, I40E_PRTMAC_HSEC_CTL_INTERNAL_HSEC_RX_SWZL_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_INTERNAL_HSEC_TX_SWZL_SHIFT 1
+#define I40E_PRTMAC_HSEC_CTL_INTERNAL_HSEC_TX_SWZL_MASK I40E_MASK(0x1, I40E_PRTMAC_HSEC_CTL_INTERNAL_HSEC_TX_SWZL_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_INTERNAL_HSEC_CTL_RX_CHECK_ACK_SHIFT 2
+#define I40E_PRTMAC_HSEC_CTL_INTERNAL_HSEC_CTL_RX_CHECK_ACK_MASK I40E_MASK(0x1, I40E_PRTMAC_HSEC_CTL_INTERNAL_HSEC_CTL_RX_CHECK_ACK_SHIFT)
+
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_ETYPE_GCP 0x001E3160 /* Reset: GLOBR */
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_ETYPE_GCP_HSEC_CTL_RX_CHECK_ETYPE_GCP_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_ETYPE_GCP_HSEC_CTL_RX_CHECK_ETYPE_GCP_MASK I40E_MASK(0x1, I40E_PRTMAC_HSEC_CTL_RX_CHECK_ETYPE_GCP_HSEC_CTL_RX_CHECK_ETYPE_GCP_SHIFT)
+
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_ETYPE_GPP 0x001E32A0 /* Reset: GLOBR */
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_ETYPE_GPP_HSEC_CTL_RX_CHECK_ETYPE_GPP_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_ETYPE_GPP_HSEC_CTL_RX_CHECK_ETYPE_GPP_MASK I40E_MASK(0x1, I40E_PRTMAC_HSEC_CTL_RX_CHECK_ETYPE_GPP_HSEC_CTL_RX_CHECK_ETYPE_GPP_SHIFT)
+
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_ETYPE_PCP 0x001E3210 /* Reset: GLOBR */
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_ETYPE_PCP_HSEC_CTL_RX_CHECK_ETYPE_PCP_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_ETYPE_PCP_HSEC_CTL_RX_CHECK_ETYPE_PCP_MASK I40E_MASK(0x1, I40E_PRTMAC_HSEC_CTL_RX_CHECK_ETYPE_PCP_HSEC_CTL_RX_CHECK_ETYPE_PCP_SHIFT)
+
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_ETYPE_PPP 0x001E3320 /* Reset: GLOBR */
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_ETYPE_PPP_HSEC_CTL_RX_CHECK_ETYPE_PPP_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_ETYPE_PPP_HSEC_CTL_RX_CHECK_ETYPE_PPP_MASK I40E_MASK(0x1, I40E_PRTMAC_HSEC_CTL_RX_CHECK_ETYPE_PPP_HSEC_CTL_RX_CHECK_ETYPE_PPP_SHIFT)
+
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_MCAST_GCP 0x001E30F0 /* Reset: GLOBR */
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_MCAST_GCP_HSEC_CTL_RX_CHECK_MCAST_GCP_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_MCAST_GCP_HSEC_CTL_RX_CHECK_MCAST_GCP_MASK I40E_MASK(0x1, I40E_PRTMAC_HSEC_CTL_RX_CHECK_MCAST_GCP_HSEC_CTL_RX_CHECK_MCAST_GCP_SHIFT)
+
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_MCAST_GPP 0x001E3270 /* Reset: GLOBR */
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_MCAST_GPP_HSEC_CTL_RX_CHECK_MCAST_GPP_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_MCAST_GPP_HSEC_CTL_RX_CHECK_MCAST_GPP_MASK I40E_MASK(0x1, I40E_PRTMAC_HSEC_CTL_RX_CHECK_MCAST_GPP_HSEC_CTL_RX_CHECK_MCAST_GPP_SHIFT)
+
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_MCAST_PCP 0x001E31C0 /* Reset: GLOBR */
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_MCAST_PCP_HSEC_CTL_RX_CHECK_MCAST_PCP_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_MCAST_PCP_HSEC_CTL_RX_CHECK_MCAST_PCP_MASK I40E_MASK(0x1, I40E_PRTMAC_HSEC_CTL_RX_CHECK_MCAST_PCP_HSEC_CTL_RX_CHECK_MCAST_PCP_SHIFT)
+
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_MCAST_PPP 0x001E32F0 /* Reset: GLOBR */
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_MCAST_PPP_HSEC_CTL_RX_CHECK_MCAST_PPP_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_MCAST_PPP_HSEC_CTL_RX_CHECK_MCAST_PPP_MASK I40E_MASK(0x1, I40E_PRTMAC_HSEC_CTL_RX_CHECK_MCAST_PPP_HSEC_CTL_RX_CHECK_MCAST_PPP_SHIFT)
+
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_OPCODE_GCP 0x001E3170 /* Reset: GLOBR */
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_OPCODE_GCP_HSEC_CTL_RX_CHECK_OPCODE_GCP_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_OPCODE_GCP_HSEC_CTL_RX_CHECK_OPCODE_GCP_MASK I40E_MASK(0x1, I40E_PRTMAC_HSEC_CTL_RX_CHECK_OPCODE_GCP_HSEC_CTL_RX_CHECK_OPCODE_GCP_SHIFT)
+
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_OPCODE_GPP 0x001E32C0 /* Reset: GLOBR */
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_OPCODE_GPP_HSEC_CTL_RX_CHECK_OPCODE_GPP_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_OPCODE_GPP_HSEC_CTL_RX_CHECK_OPCODE_GPP_MASK I40E_MASK(0x1, I40E_PRTMAC_HSEC_CTL_RX_CHECK_OPCODE_GPP_HSEC_CTL_RX_CHECK_OPCODE_GPP_SHIFT)
+
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_OPCODE_PCP 0x001E3230 /* Reset: GLOBR */
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_OPCODE_PCP_HSEC_CTL_RX_CHECK_OPCODE_PCP_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_OPCODE_PCP_HSEC_CTL_RX_CHECK_OPCODE_PCP_MASK I40E_MASK(0x1, I40E_PRTMAC_HSEC_CTL_RX_CHECK_OPCODE_PCP_HSEC_CTL_RX_CHECK_OPCODE_PCP_SHIFT)
+
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_OPCODE_PPP 0x001E3340 /* Reset: GLOBR */
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_OPCODE_PPP_HSEC_CTL_RX_CHECK_OPCODE_PPP_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_OPCODE_PPP_HSEC_CTL_RX_CHECK_OPCODE_PPP_MASK I40E_MASK(0x1, I40E_PRTMAC_HSEC_CTL_RX_CHECK_OPCODE_PPP_HSEC_CTL_RX_CHECK_OPCODE_PPP_SHIFT)
+
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_SA_GCP 0x001E3130 /* Reset: GLOBR */
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_SA_GCP_HSEC_CTL_RX_CHECK_SA_GCP_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_SA_GCP_HSEC_CTL_RX_CHECK_SA_GCP_MASK I40E_MASK(0x1, I40E_PRTMAC_HSEC_CTL_RX_CHECK_SA_GCP_HSEC_CTL_RX_CHECK_SA_GCP_SHIFT)
+
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_SA_GPP 0x001E3290 /* Reset: GLOBR */
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_SA_GPP_HSEC_CTL_RX_CHECK_SA_GPP_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_SA_GPP_HSEC_CTL_RX_CHECK_SA_GPP_MASK I40E_MASK(0x1, I40E_PRTMAC_HSEC_CTL_RX_CHECK_SA_GPP_HSEC_CTL_RX_CHECK_SA_GPP_SHIFT)
+
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_SA_PCP 0x001E3200 /* Reset: GLOBR */
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_SA_PCP_HSEC_CTL_RX_CHECK_SA_PCP_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_SA_PCP_HSEC_CTL_RX_CHECK_SA_PCP_MASK I40E_MASK(0x1, I40E_PRTMAC_HSEC_CTL_RX_CHECK_SA_PCP_HSEC_CTL_RX_CHECK_SA_PCP_SHIFT)
+
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_SA_PPP 0x001E3310 /* Reset: GLOBR */
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_SA_PPP_HSEC_CTL_RX_CHECK_SA_PPP_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_SA_PPP_HSEC_CTL_RX_CHECK_SA_PPP_MASK I40E_MASK(0x1, I40E_PRTMAC_HSEC_CTL_RX_CHECK_SA_PPP_HSEC_CTL_RX_CHECK_SA_PPP_SHIFT)
+
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_UCAST_GCP 0x001E3100 /* Reset: GLOBR */
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_UCAST_GCP_HSEC_CTL_RX_CHECK_UCAST_GCP_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_UCAST_GCP_HSEC_CTL_RX_CHECK_UCAST_GCP_MASK I40E_MASK(0x1, I40E_PRTMAC_HSEC_CTL_RX_CHECK_UCAST_GCP_HSEC_CTL_RX_CHECK_UCAST_GCP_SHIFT)
+
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_UCAST_GPP 0x001E3280 /* Reset: GLOBR */
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_UCAST_GPP_HSEC_CTL_RX_CHECK_UCAST_GPP_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_UCAST_GPP_HSEC_CTL_RX_CHECK_UCAST_GPP_MASK I40E_MASK(0x1, I40E_PRTMAC_HSEC_CTL_RX_CHECK_UCAST_GPP_HSEC_CTL_RX_CHECK_UCAST_GPP_SHIFT)
+
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_UCAST_PCP 0x001E31D0 /* Reset: GLOBR */
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_UCAST_PCP_HSEC_CTL_RX_CHECK_UCAST_PCP_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_UCAST_PCP_HSEC_CTL_RX_CHECK_UCAST_PCP_MASK I40E_MASK(0x1, I40E_PRTMAC_HSEC_CTL_RX_CHECK_UCAST_PCP_HSEC_CTL_RX_CHECK_UCAST_PCP_SHIFT)
+
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_UCAST_PPP 0x001E3300 /* Reset: GLOBR */
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_UCAST_PPP_HSEC_CTL_RX_CHECK_UCAST_PPP_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_UCAST_PPP_HSEC_CTL_RX_CHECK_UCAST_PPP_MASK I40E_MASK(0x1, I40E_PRTMAC_HSEC_CTL_RX_CHECK_UCAST_PPP_HSEC_CTL_RX_CHECK_UCAST_PPP_SHIFT)
+
+#define I40E_PRTMAC_HSEC_CTL_RX_DELETE_FCS 0x001E3080 /* Reset: GLOBR */
+#define I40E_PRTMAC_HSEC_CTL_RX_DELETE_FCS_HSEC_CTL_RX_DELETE_FCS_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_DELETE_FCS_HSEC_CTL_RX_DELETE_FCS_MASK I40E_MASK(0x1, I40E_PRTMAC_HSEC_CTL_RX_DELETE_FCS_HSEC_CTL_RX_DELETE_FCS_SHIFT)
+
+#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE 0x001E3070 /* Reset: GLOBR */
+#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_HSEC_CTL_RX_ENABLE_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_HSEC_CTL_RX_ENABLE_MASK I40E_MASK(0x1, I40E_PRTMAC_HSEC_CTL_RX_ENABLE_HSEC_CTL_RX_ENABLE_SHIFT)
+
+#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_PCP 0x001E31B0 /* Reset: GLOBR */
+#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_PCP_HSEC_CTL_RX_ENABLE_PCP_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_PCP_HSEC_CTL_RX_ENABLE_PCP_MASK I40E_MASK(0x1, I40E_PRTMAC_HSEC_CTL_RX_ENABLE_PCP_HSEC_CTL_RX_ENABLE_PCP_SHIFT)
+
+#define I40E_PRTMAC_HSEC_CTL_RX_ETYPE_GCP 0x001E31A0 /* Reset: GLOBR */
+#define I40E_PRTMAC_HSEC_CTL_RX_ETYPE_GCP_HSEC_CTL_RX_ETYPE_GCP_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_ETYPE_GCP_HSEC_CTL_RX_ETYPE_GCP_MASK I40E_MASK(0xFFFF, I40E_PRTMAC_HSEC_CTL_RX_ETYPE_GCP_HSEC_CTL_RX_ETYPE_GCP_SHIFT)
+
+#define I40E_PRTMAC_HSEC_CTL_RX_ETYPE_GPP 0x001E32B0 /* Reset: GLOBR */
+#define I40E_PRTMAC_HSEC_CTL_RX_ETYPE_GPP_HSEC_CTL_RX_ETYPE_GPP_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_ETYPE_GPP_HSEC_CTL_RX_ETYPE_GPP_MASK I40E_MASK(0xFFFF, I40E_PRTMAC_HSEC_CTL_RX_ETYPE_GPP_HSEC_CTL_RX_ETYPE_GPP_SHIFT)
+
+#define I40E_PRTMAC_HSEC_CTL_RX_ETYPE_PCP 0x001E3220 /* Reset: GLOBR */
+#define I40E_PRTMAC_HSEC_CTL_RX_ETYPE_PCP_HSEC_CTL_RX_ETYPE_PCP_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_ETYPE_PCP_HSEC_CTL_RX_ETYPE_PCP_MASK I40E_MASK(0xFFFF, I40E_PRTMAC_HSEC_CTL_RX_ETYPE_PCP_HSEC_CTL_RX_ETYPE_PCP_SHIFT)
+
+#define I40E_PRTMAC_HSEC_CTL_RX_ETYPE_PPP 0x001E3330 /* Reset: GLOBR */
+#define I40E_PRTMAC_HSEC_CTL_RX_ETYPE_PPP_HSEC_CTL_RX_ETYPE_PPP_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_ETYPE_PPP_HSEC_CTL_RX_ETYPE_PPP_MASK I40E_MASK(0xFFFF, I40E_PRTMAC_HSEC_CTL_RX_ETYPE_PPP_HSEC_CTL_RX_ETYPE_PPP_SHIFT)
+
+#define I40E_PRTMAC_HSEC_CTL_RX_IGNORE_FCS 0x001E3090 /* Reset: GLOBR */
+#define I40E_PRTMAC_HSEC_CTL_RX_IGNORE_FCS_HSEC_CTL_RX_IGNORE_FCS_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_IGNORE_FCS_HSEC_CTL_RX_IGNORE_FCS_MASK I40E_MASK(0x1, I40E_PRTMAC_HSEC_CTL_RX_IGNORE_FCS_HSEC_CTL_RX_IGNORE_FCS_SHIFT)
+
+#define I40E_PRTMAC_HSEC_CTL_RX_MAX_PACKET_LEN 0x001E30A0 /* Reset: GLOBR */
+#define I40E_PRTMAC_HSEC_CTL_RX_MAX_PACKET_LEN_HSEC_CTL_RX_MAX_PACKET_LEN_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_MAX_PACKET_LEN_HSEC_CTL_RX_MAX_PACKET_LEN_MASK I40E_MASK(0x7FFF, I40E_PRTMAC_HSEC_CTL_RX_MAX_PACKET_LEN_HSEC_CTL_RX_MAX_PACKET_LEN_SHIFT)
+
+#define I40E_PRTMAC_HSEC_CTL_RX_MIN_PACKET_LEN 0x001E30B0 /* Reset: GLOBR */
+#define I40E_PRTMAC_HSEC_CTL_RX_MIN_PACKET_LEN_HSEC_CTL_RX_MIN_PACKET_LEN_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_MIN_PACKET_LEN_HSEC_CTL_RX_MIN_PACKET_LEN_MASK I40E_MASK(0xFF, I40E_PRTMAC_HSEC_CTL_RX_MIN_PACKET_LEN_HSEC_CTL_RX_MIN_PACKET_LEN_SHIFT)
+
+#define I40E_PRTMAC_HSEC_CTL_RX_OPCODE_GPP 0x001E32D0 /* Reset: GLOBR */
+#define I40E_PRTMAC_HSEC_CTL_RX_OPCODE_GPP_HSEC_CTL_RX_OPCODE_GPP_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_OPCODE_GPP_HSEC_CTL_RX_OPCODE_GPP_MASK I40E_MASK(0xFFFF, I40E_PRTMAC_HSEC_CTL_RX_OPCODE_GPP_HSEC_CTL_RX_OPCODE_GPP_SHIFT)
+
+#define I40E_PRTMAC_HSEC_CTL_RX_OPCODE_MAX_GCP 0x001E3190 /* Reset: GLOBR */
+#define I40E_PRTMAC_HSEC_CTL_RX_OPCODE_MAX_GCP_HSEC_CTL_RX_OPCODE_MAX_GCP_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_OPCODE_MAX_GCP_HSEC_CTL_RX_OPCODE_MAX_GCP_MASK I40E_MASK(0xFFFF, I40E_PRTMAC_HSEC_CTL_RX_OPCODE_MAX_GCP_HSEC_CTL_RX_OPCODE_MAX_GCP_SHIFT)
+
+#define I40E_PRTMAC_HSEC_CTL_RX_OPCODE_MAX_PCP 0x001E3250 /* Reset: GLOBR */
+#define I40E_PRTMAC_HSEC_CTL_RX_OPCODE_MAX_PCP_HSEC_CTL_RX_OPCODE_MAX_PCP_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_OPCODE_MAX_PCP_HSEC_CTL_RX_OPCODE_MAX_PCP_MASK I40E_MASK(0xFFFF, I40E_PRTMAC_HSEC_CTL_RX_OPCODE_MAX_PCP_HSEC_CTL_RX_OPCODE_MAX_PCP_SHIFT)
+
+#define I40E_PRTMAC_HSEC_CTL_RX_OPCODE_MIN_GCP 0x001E3180 /* Reset: GLOBR */
+#define I40E_PRTMAC_HSEC_CTL_RX_OPCODE_MIN_GCP_HSEC_CTL_RX_OPCODE_MIN_GCP_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_OPCODE_MIN_GCP_HSEC_CTL_RX_OPCODE_MIN_GCP_MASK I40E_MASK(0xFFFF, I40E_PRTMAC_HSEC_CTL_RX_OPCODE_MIN_GCP_HSEC_CTL_RX_OPCODE_MIN_GCP_SHIFT)
+
+#define I40E_PRTMAC_HSEC_CTL_RX_OPCODE_MIN_PCP 0x001E3240 /* Reset: GLOBR */
+#define I40E_PRTMAC_HSEC_CTL_RX_OPCODE_MIN_PCP_HSEC_CTL_RX_OPCODE_MIN_PCP_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_OPCODE_MIN_PCP_HSEC_CTL_RX_OPCODE_MIN_PCP_MASK I40E_MASK(0xFFFF, I40E_PRTMAC_HSEC_CTL_RX_OPCODE_MIN_PCP_HSEC_CTL_RX_OPCODE_MIN_PCP_SHIFT)
+
+#define I40E_PRTMAC_HSEC_CTL_RX_OPCODE_PPP 0x001E3350 /* Reset: GLOBR */
+#define I40E_PRTMAC_HSEC_CTL_RX_OPCODE_PPP_HSEC_CTL_RX_OPCODE_PPP_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_OPCODE_PPP_HSEC_CTL_RX_OPCODE_PPP_MASK I40E_MASK(0xFFFF, I40E_PRTMAC_HSEC_CTL_RX_OPCODE_PPP_HSEC_CTL_RX_OPCODE_PPP_SHIFT)
+
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_MCAST_PART1 0x001E31E0 /* Reset: GLOBR */
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_MCAST_PART1_HSEC_CTL_RX_PAUSE_DA_MCAST_PART1_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_MCAST_PART1_HSEC_CTL_RX_PAUSE_DA_MCAST_PART1_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_MCAST_PART1_HSEC_CTL_RX_PAUSE_DA_MCAST_PART1_SHIFT)
+
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_MCAST_PART2 0x001E31F0 /* Reset: GLOBR */
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_MCAST_PART2_HSEC_CTL_RX_PAUSE_DA_MCAST_PART2_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_MCAST_PART2_HSEC_CTL_RX_PAUSE_DA_MCAST_PART2_MASK I40E_MASK(0xFFFF, I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_MCAST_PART2_HSEC_CTL_RX_PAUSE_DA_MCAST_PART2_SHIFT)
+
+#define I40E_PRTMAC_HSEC_CTL_TX_DA_GPP_PART1 0x001E3490 /* Reset: GLOBR */
+#define I40E_PRTMAC_HSEC_CTL_TX_DA_GPP_PART1_HSEC_CTL_TX_DA_GPP_PART1_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_TX_DA_GPP_PART1_HSEC_CTL_TX_DA_GPP_PART1_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTMAC_HSEC_CTL_TX_DA_GPP_PART1_HSEC_CTL_TX_DA_GPP_PART1_SHIFT)
+
+#define I40E_PRTMAC_HSEC_CTL_TX_DA_GPP_PART2 0x001E34A0 /* Reset: GLOBR */
+#define I40E_PRTMAC_HSEC_CTL_TX_DA_GPP_PART2_HSEC_CTL_TX_DA_GPP_PART2_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_TX_DA_GPP_PART2_HSEC_CTL_TX_DA_GPP_PART2_MASK I40E_MASK(0xFFFF, I40E_PRTMAC_HSEC_CTL_TX_DA_GPP_PART2_HSEC_CTL_TX_DA_GPP_PART2_SHIFT)
+
+#define I40E_PRTMAC_HSEC_CTL_TX_DA_PPP_PART1 0x001E34F0 /* Reset: GLOBR */
+#define I40E_PRTMAC_HSEC_CTL_TX_DA_PPP_PART1_HSEC_CTL_TX_DA_PPP_PART1_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_TX_DA_PPP_PART1_HSEC_CTL_TX_DA_PPP_PART1_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTMAC_HSEC_CTL_TX_DA_PPP_PART1_HSEC_CTL_TX_DA_PPP_PART1_SHIFT)
+
+#define I40E_PRTMAC_HSEC_CTL_TX_DA_PPP_PART2 0x001E3500 /* Reset: GLOBR */
+#define I40E_PRTMAC_HSEC_CTL_TX_DA_PPP_PART2_HSEC_CTL_TX_DA_PPP_PART2_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_TX_DA_PPP_PART2_HSEC_CTL_TX_DA_PPP_PART2_MASK I40E_MASK(0xFFFF, I40E_PRTMAC_HSEC_CTL_TX_DA_PPP_PART2_HSEC_CTL_TX_DA_PPP_PART2_SHIFT)
+
+#define I40E_PRTMAC_HSEC_CTL_TX_ENABLE 0x001E3000 /* Reset: GLOBR */
+#define I40E_PRTMAC_HSEC_CTL_TX_ENABLE_HSEC_CTL_TX_ENABLE_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_TX_ENABLE_HSEC_CTL_TX_ENABLE_MASK I40E_MASK(0x1, I40E_PRTMAC_HSEC_CTL_TX_ENABLE_HSEC_CTL_TX_ENABLE_SHIFT)
+
+#define I40E_PRTMAC_HSEC_CTL_TX_ERR_PKT_MODE 0x001E3060 /* Reset: GLOBR */
+#define I40E_PRTMAC_HSEC_CTL_TX_ERR_PKT_MODE_HSEC_CTL_TX_ERR_PKT_MODE_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_TX_ERR_PKT_MODE_HSEC_CTL_TX_ERR_PKT_MODE_MASK I40E_MASK(0x1, I40E_PRTMAC_HSEC_CTL_TX_ERR_PKT_MODE_HSEC_CTL_TX_ERR_PKT_MODE_SHIFT)
+
+#define I40E_PRTMAC_HSEC_CTL_TX_ETHERTYPE_GPP 0x001E34D0 /* Reset: GLOBR */
+#define I40E_PRTMAC_HSEC_CTL_TX_ETHERTYPE_GPP_HSEC_CTL_TX_ETHERTYPE_GPP_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_TX_ETHERTYPE_GPP_HSEC_CTL_TX_ETHERTYPE_GPP_MASK I40E_MASK(0xFFFF, I40E_PRTMAC_HSEC_CTL_TX_ETHERTYPE_GPP_HSEC_CTL_TX_ETHERTYPE_GPP_SHIFT)
+
+#define I40E_PRTMAC_HSEC_CTL_TX_ETHERTYPE_PPP 0x001E3510 /* Reset: GLOBR */
+#define I40E_PRTMAC_HSEC_CTL_TX_ETHERTYPE_PPP_HSEC_CTL_TX_ETHERTYPE_PPP_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_TX_ETHERTYPE_PPP_HSEC_CTL_TX_ETHERTYPE_PPP_MASK I40E_MASK(0xFFFF, I40E_PRTMAC_HSEC_CTL_TX_ETHERTYPE_PPP_HSEC_CTL_TX_ETHERTYPE_PPP_SHIFT)
+
+#define I40E_PRTMAC_HSEC_CTL_TX_FCS_INS_ENABLE 0x001E3020 /* Reset: GLOBR */
+#define I40E_PRTMAC_HSEC_CTL_TX_FCS_INS_ENABLE_TX_FCS_INS_EN_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_TX_FCS_INS_ENABLE_TX_FCS_INS_EN_MASK I40E_MASK(0x1, I40E_PRTMAC_HSEC_CTL_TX_FCS_INS_ENABLE_TX_FCS_INS_EN_SHIFT)
+
+#define I40E_PRTMAC_HSEC_CTL_TX_FCS_STOMP 0x001E3030 /* Reset: GLOBR */
+#define I40E_PRTMAC_HSEC_CTL_TX_FCS_STOMP_HSEC_CTL_TX_FCS_STOMP_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_TX_FCS_STOMP_HSEC_CTL_TX_FCS_STOMP_MASK I40E_MASK(0x1, I40E_PRTMAC_HSEC_CTL_TX_FCS_STOMP_HSEC_CTL_TX_FCS_STOMP_SHIFT)
+
+#define I40E_PRTMAC_HSEC_CTL_TX_IGNORE_FCS 0x001E3040 /* Reset: GLOBR */
+#define I40E_PRTMAC_HSEC_CTL_TX_IGNORE_FCS_HSEC_CTL_TX_IGNORE_FCS_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_TX_IGNORE_FCS_HSEC_CTL_TX_IGNORE_FCS_MASK I40E_MASK(0x1, I40E_PRTMAC_HSEC_CTL_TX_IGNORE_FCS_HSEC_CTL_TX_IGNORE_FCS_SHIFT)
+
+#define I40E_PRTMAC_HSEC_CTL_TX_OPCODE_GPP 0x001E34E0 /* Reset: GLOBR */
+#define I40E_PRTMAC_HSEC_CTL_TX_OPCODE_GPP_HSEC_CTL_TX_OPCODE_GPP_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_TX_OPCODE_GPP_HSEC_CTL_TX_OPCODE_GPP_MASK I40E_MASK(0xFFFF, I40E_PRTMAC_HSEC_CTL_TX_OPCODE_GPP_HSEC_CTL_TX_OPCODE_GPP_SHIFT)
+
+#define I40E_PRTMAC_HSEC_CTL_TX_OPCODE_PPP 0x001E3520 /* Reset: GLOBR */
+#define I40E_PRTMAC_HSEC_CTL_TX_OPCODE_PPP_HSEC_CTL_TX_OPCODE_PPP_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_TX_OPCODE_PPP_HSEC_CTL_TX_OPCODE_PPP_MASK I40E_MASK(0xFFFF, I40E_PRTMAC_HSEC_CTL_TX_OPCODE_PPP_HSEC_CTL_TX_OPCODE_PPP_SHIFT)
+
+#define I40E_PRTMAC_HSEC_CTL_TX_RDYOUT_THRESH 0x001E3010 /* Reset: GLOBR */
+#define I40E_PRTMAC_HSEC_CTL_TX_RDYOUT_THRESH_HSEC_CTL_TX_RDYOUT_THRESH_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_TX_RDYOUT_THRESH_HSEC_CTL_TX_RDYOUT_THRESH_MASK I40E_MASK(0xF, I40E_PRTMAC_HSEC_CTL_TX_RDYOUT_THRESH_HSEC_CTL_TX_RDYOUT_THRESH_SHIFT)
+
+#define I40E_PRTMAC_HSEC_CTL_TX_TO_RX_LOOPBACK 0x001E3050 /* Reset: GLOBR */
+#define I40E_PRTMAC_HSEC_CTL_TX_TO_RX_LOOPBACK_HSEC_CTL_TX_TO_RX_LOOPBACK_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_TX_TO_RX_LOOPBACK_HSEC_CTL_TX_TO_RX_LOOPBACK_MASK I40E_MASK(0x1, I40E_PRTMAC_HSEC_CTL_TX_TO_RX_LOOPBACK_HSEC_CTL_TX_TO_RX_LOOPBACK_SHIFT)
+
+#define I40E_PRTMAC_HSEC_CTL_XLGMII 0x001E3550 /* Reset: GLOBR */
+#define I40E_PRTMAC_HSEC_CTL_XLGMII_LB_PHY_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_XLGMII_LB_PHY_MASK I40E_MASK(0x1, I40E_PRTMAC_HSEC_CTL_XLGMII_LB_PHY_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_XLGMII_HI_TH_SHIFT 1
+#define I40E_PRTMAC_HSEC_CTL_XLGMII_HI_TH_MASK I40E_MASK(0xF, I40E_PRTMAC_HSEC_CTL_XLGMII_HI_TH_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_XLGMII_LO_TH_SHIFT 5
+#define I40E_PRTMAC_HSEC_CTL_XLGMII_LO_TH_MASK I40E_MASK(0xF, I40E_PRTMAC_HSEC_CTL_XLGMII_LO_TH_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_XLGMII_RX_SWP_CTL_SHIFT 9
+#define I40E_PRTMAC_HSEC_CTL_XLGMII_RX_SWP_CTL_MASK I40E_MASK(0x1, I40E_PRTMAC_HSEC_CTL_XLGMII_RX_SWP_CTL_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_XLGMII_RX_SWP_DAT_SHIFT 10
+#define I40E_PRTMAC_HSEC_CTL_XLGMII_RX_SWP_DAT_MASK I40E_MASK(0x1, I40E_PRTMAC_HSEC_CTL_XLGMII_RX_SWP_DAT_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_XLGMII_RX_SWZL_DAT_SHIFT 11
+#define I40E_PRTMAC_HSEC_CTL_XLGMII_RX_SWZL_DAT_MASK I40E_MASK(0x1, I40E_PRTMAC_HSEC_CTL_XLGMII_RX_SWZL_DAT_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_XLGMII_TX_SWP_CTL_SHIFT 12
+#define I40E_PRTMAC_HSEC_CTL_XLGMII_TX_SWP_CTL_MASK I40E_MASK(0x1, I40E_PRTMAC_HSEC_CTL_XLGMII_TX_SWP_CTL_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_XLGMII_TX_SWP_DAT_SHIFT 13
+#define I40E_PRTMAC_HSEC_CTL_XLGMII_TX_SWP_DAT_MASK I40E_MASK(0x1, I40E_PRTMAC_HSEC_CTL_XLGMII_TX_SWP_DAT_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_XLGMII_TX_SWZL_DAT_SHIFT 14
+#define I40E_PRTMAC_HSEC_CTL_XLGMII_TX_SWZL_DAT_MASK I40E_MASK(0x1, I40E_PRTMAC_HSEC_CTL_XLGMII_TX_SWZL_DAT_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_XLGMII_RX_BYP_INP_SHIFT 15
+#define I40E_PRTMAC_HSEC_CTL_XLGMII_RX_BYP_INP_MASK I40E_MASK(0x3, I40E_PRTMAC_HSEC_CTL_XLGMII_RX_BYP_INP_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_XLGMII_LB2TX_SHIFT 17
+#define I40E_PRTMAC_HSEC_CTL_XLGMII_LB2TX_MASK I40E_MASK(0x1, I40E_PRTMAC_HSEC_CTL_XLGMII_LB2TX_SHIFT)
+
+#define I40E_PRTMAC_HSEC_SINGLE_40G_PORT_SELECT 0x001E3540 /* Reset: GLOBR */
+#define I40E_PRTMAC_HSEC_SINGLE_40G_PORT_SELECT_MAC_SINGLE_40G_PORT_SELECT_SHIFT 0
+#define I40E_PRTMAC_HSEC_SINGLE_40G_PORT_SELECT_MAC_SINGLE_40G_PORT_SELECT_MASK I40E_MASK(0x1, I40E_PRTMAC_HSEC_SINGLE_40G_PORT_SELECT_MAC_SINGLE_40G_PORT_SELECT_SHIFT)
+
+#define I40E_PRTMAC_HSECTL1 0x001E3560 /* Reset: GLOBR */
+#define I40E_PRTMAC_HSECTL1_DROP_US_PKTS_SHIFT 0
+#define I40E_PRTMAC_HSECTL1_DROP_US_PKTS_MASK I40E_MASK(0x1, I40E_PRTMAC_HSECTL1_DROP_US_PKTS_SHIFT)
+#define I40E_PRTMAC_HSECTL1_PAD_US_PKT_SHIFT 3
+#define I40E_PRTMAC_HSECTL1_PAD_US_PKT_MASK I40E_MASK(0x1, I40E_PRTMAC_HSECTL1_PAD_US_PKT_SHIFT)
+#define I40E_PRTMAC_HSECTL1_TX_HYSTERESIS_SHIFT 4
+#define I40E_PRTMAC_HSECTL1_TX_HYSTERESIS_MASK I40E_MASK(0x7, I40E_PRTMAC_HSECTL1_TX_HYSTERESIS_SHIFT)
+#define I40E_PRTMAC_HSECTL1_HYS_FLUSH_PKT_SHIFT 7
+#define I40E_PRTMAC_HSECTL1_HYS_FLUSH_PKT_MASK I40E_MASK(0x1, I40E_PRTMAC_HSECTL1_HYS_FLUSH_PKT_SHIFT)
+#define I40E_PRTMAC_HSECTL1_EN_SFD_CHECK_SHIFT 30
+#define I40E_PRTMAC_HSECTL1_EN_SFD_CHECK_MASK I40E_MASK(0x1, I40E_PRTMAC_HSECTL1_EN_SFD_CHECK_SHIFT)
+#define I40E_PRTMAC_HSECTL1_EN_PREAMBLE_CHECK_SHIFT 31
+#define I40E_PRTMAC_HSECTL1_EN_PREAMBLE_CHECK_MASK I40E_MASK(0x1, I40E_PRTMAC_HSECTL1_EN_PREAMBLE_CHECK_SHIFT)
+
+#define I40E_PRTMAC_LINKSTA 0x001E2420 /* Reset: GLOBR */
+#define I40E_PRTMAC_LINKSTA_FIFO_MTAR_STS_RX_EMPTY_SHIFT 0
+#define I40E_PRTMAC_LINKSTA_FIFO_MTAR_STS_RX_EMPTY_MASK I40E_MASK(0x1, I40E_PRTMAC_LINKSTA_FIFO_MTAR_STS_RX_EMPTY_SHIFT)
+#define I40E_PRTMAC_LINKSTA_FIFO_MTAR_STS_RX_FULL_SHIFT 1
+#define I40E_PRTMAC_LINKSTA_FIFO_MTAR_STS_RX_FULL_MASK I40E_MASK(0x1, I40E_PRTMAC_LINKSTA_FIFO_MTAR_STS_RX_FULL_SHIFT)
+#define I40E_PRTMAC_LINKSTA_MAC_RX_LINK_FAULT_RF_SHIFT 2
+#define I40E_PRTMAC_LINKSTA_MAC_RX_LINK_FAULT_RF_MASK I40E_MASK(0x1, I40E_PRTMAC_LINKSTA_MAC_RX_LINK_FAULT_RF_SHIFT)
+#define I40E_PRTMAC_LINKSTA_MAC_RX_LINK_FAULT_LF_SHIFT 3
+#define I40E_PRTMAC_LINKSTA_MAC_RX_LINK_FAULT_LF_MASK I40E_MASK(0x1, I40E_PRTMAC_LINKSTA_MAC_RX_LINK_FAULT_LF_SHIFT)
+#define I40E_PRTMAC_LINKSTA_MAC_LINK_UP_PREV_SHIFT 7
+#define I40E_PRTMAC_LINKSTA_MAC_LINK_UP_PREV_MASK I40E_MASK(0x1, I40E_PRTMAC_LINKSTA_MAC_LINK_UP_PREV_SHIFT)
+#define I40E_PRTMAC_LINKSTA_MAC_LINK_SPEED_SHIFT 27
+#define I40E_PRTMAC_LINKSTA_MAC_LINK_SPEED_MASK I40E_MASK(0x7, I40E_PRTMAC_LINKSTA_MAC_LINK_SPEED_SHIFT)
+#define I40E_PRTMAC_LINKSTA_MAC_LINK_UP_SHIFT 30
+#define I40E_PRTMAC_LINKSTA_MAC_LINK_UP_MASK I40E_MASK(0x1, I40E_PRTMAC_LINKSTA_MAC_LINK_UP_SHIFT)
+
+#define I40E_PRTMAC_MACC 0x001E24E0 /* Reset: GLOBR */
+#define I40E_PRTMAC_MACC_FORCE_LINK_SHIFT 0
+#define I40E_PRTMAC_MACC_FORCE_LINK_MASK I40E_MASK(0x1, I40E_PRTMAC_MACC_FORCE_LINK_SHIFT)
+#define I40E_PRTMAC_MACC_PHY_LOOP_BACK_SHIFT 1
+#define I40E_PRTMAC_MACC_PHY_LOOP_BACK_MASK I40E_MASK(0x1, I40E_PRTMAC_MACC_PHY_LOOP_BACK_SHIFT)
+#define I40E_PRTMAC_MACC_TX_SWIZZLE_DATA_SHIFT 2
+#define I40E_PRTMAC_MACC_TX_SWIZZLE_DATA_MASK I40E_MASK(0x1, I40E_PRTMAC_MACC_TX_SWIZZLE_DATA_SHIFT)
+#define I40E_PRTMAC_MACC_TX_SWAP_DATA_SHIFT 3
+#define I40E_PRTMAC_MACC_TX_SWAP_DATA_MASK I40E_MASK(0x1, I40E_PRTMAC_MACC_TX_SWAP_DATA_SHIFT)
+#define I40E_PRTMAC_MACC_TX_SWAP_CTRL_SHIFT 4
+#define I40E_PRTMAC_MACC_TX_SWAP_CTRL_MASK I40E_MASK(0x1, I40E_PRTMAC_MACC_TX_SWAP_CTRL_SHIFT)
+#define I40E_PRTMAC_MACC_RX_SWIZZLE_DATA_SHIFT 5
+#define I40E_PRTMAC_MACC_RX_SWIZZLE_DATA_MASK I40E_MASK(0x1, I40E_PRTMAC_MACC_RX_SWIZZLE_DATA_SHIFT)
+#define I40E_PRTMAC_MACC_RX_SWAP_DATA_SHIFT 6
+#define I40E_PRTMAC_MACC_RX_SWAP_DATA_MASK I40E_MASK(0x1, I40E_PRTMAC_MACC_RX_SWAP_DATA_SHIFT)
+#define I40E_PRTMAC_MACC_RX_SWAP_CTRL_SHIFT 7
+#define I40E_PRTMAC_MACC_RX_SWAP_CTRL_MASK I40E_MASK(0x1, I40E_PRTMAC_MACC_RX_SWAP_CTRL_SHIFT)
+#define I40E_PRTMAC_MACC_FIFO_THRSHLD_HI_SHIFT 8
+#define I40E_PRTMAC_MACC_FIFO_THRSHLD_HI_MASK I40E_MASK(0xF, I40E_PRTMAC_MACC_FIFO_THRSHLD_HI_SHIFT)
+#define I40E_PRTMAC_MACC_FIFO_THRSHLD_LO_SHIFT 12
+#define I40E_PRTMAC_MACC_FIFO_THRSHLD_LO_MASK I40E_MASK(0xF, I40E_PRTMAC_MACC_FIFO_THRSHLD_LO_SHIFT)
+#define I40E_PRTMAC_MACC_LEGACY_RSRVD_SHIFT 16
+#define I40E_PRTMAC_MACC_LEGACY_RSRVD_MASK I40E_MASK(0x7, I40E_PRTMAC_MACC_LEGACY_RSRVD_SHIFT)
+#define I40E_PRTMAC_MACC_MASK_FAULT_STATE_SHIFT 19
+#define I40E_PRTMAC_MACC_MASK_FAULT_STATE_MASK I40E_MASK(0x1, I40E_PRTMAC_MACC_MASK_FAULT_STATE_SHIFT)
+#define I40E_PRTMAC_MACC_MASK_XGMII_IF_SHIFT 20
+#define I40E_PRTMAC_MACC_MASK_XGMII_IF_MASK I40E_MASK(0x3, I40E_PRTMAC_MACC_MASK_XGMII_IF_SHIFT)
+#define I40E_PRTMAC_MACC_MASK_LINK_SHIFT 22
+#define I40E_PRTMAC_MACC_MASK_LINK_MASK I40E_MASK(0x1, I40E_PRTMAC_MACC_MASK_LINK_SHIFT)
+#define I40E_PRTMAC_MACC_FORCE_SPEED_VALUE_SHIFT 23
+#define I40E_PRTMAC_MACC_FORCE_SPEED_VALUE_MASK I40E_MASK(0x7, I40E_PRTMAC_MACC_FORCE_SPEED_VALUE_SHIFT)
+#define I40E_PRTMAC_MACC_FORCE_SPEED_EN_SHIFT 26
+#define I40E_PRTMAC_MACC_FORCE_SPEED_EN_MASK I40E_MASK(0x1, I40E_PRTMAC_MACC_FORCE_SPEED_EN_SHIFT)
+
+#define I40E_PRTMAC_PAP 0x001E2040 /* Reset: GLOBR */
+#define I40E_PRTMAC_PAP_TXPAUSECNT_SHIFT 0
+#define I40E_PRTMAC_PAP_TXPAUSECNT_MASK I40E_MASK(0xFFFF, I40E_PRTMAC_PAP_TXPAUSECNT_SHIFT)
+#define I40E_PRTMAC_PAP_PACE_SHIFT 16
+#define I40E_PRTMAC_PAP_PACE_MASK I40E_MASK(0xF, I40E_PRTMAC_PAP_PACE_SHIFT)
+
+#define I40E_PRTMAC_PCS_AN_CONTROL1 0x0008C600 /* Reset: GLOBR */
+#define I40E_PRTMAC_PCS_AN_CONTROL1_ANACK2_SHIFT 1
+#define I40E_PRTMAC_PCS_AN_CONTROL1_ANACK2_MASK I40E_MASK(0x1, I40E_PRTMAC_PCS_AN_CONTROL1_ANACK2_SHIFT)
+#define I40E_PRTMAC_PCS_AN_CONTROL1_ANSF_SHIFT 2
+#define I40E_PRTMAC_PCS_AN_CONTROL1_ANSF_MASK I40E_MASK(0x1F, I40E_PRTMAC_PCS_AN_CONTROL1_ANSF_SHIFT)
+#define I40E_PRTMAC_PCS_AN_CONTROL1_D10GMP_SHIFT 10
+#define I40E_PRTMAC_PCS_AN_CONTROL1_D10GMP_MASK I40E_MASK(0x1, I40E_PRTMAC_PCS_AN_CONTROL1_D10GMP_SHIFT)
+#define I40E_PRTMAC_PCS_AN_CONTROL1_RATD_SHIFT 11
+#define I40E_PRTMAC_PCS_AN_CONTROL1_RATD_MASK I40E_MASK(0x1, I40E_PRTMAC_PCS_AN_CONTROL1_RATD_SHIFT)
+#define I40E_PRTMAC_PCS_AN_CONTROL1_ANRXAT_SHIFT 19
+#define I40E_PRTMAC_PCS_AN_CONTROL1_ANRXAT_MASK I40E_MASK(0xF, I40E_PRTMAC_PCS_AN_CONTROL1_ANRXAT_SHIFT)
+#define I40E_PRTMAC_PCS_AN_CONTROL1_ANRXDM_SHIFT 23
+#define I40E_PRTMAC_PCS_AN_CONTROL1_ANRXDM_MASK I40E_MASK(0x1, I40E_PRTMAC_PCS_AN_CONTROL1_ANRXDM_SHIFT)
+#define I40E_PRTMAC_PCS_AN_CONTROL1_ANRXLM_SHIFT 24
+#define I40E_PRTMAC_PCS_AN_CONTROL1_ANRXLM_MASK I40E_MASK(0x1, I40E_PRTMAC_PCS_AN_CONTROL1_ANRXLM_SHIFT)
+#define I40E_PRTMAC_PCS_AN_CONTROL1_ANPDT_SHIFT 25
+#define I40E_PRTMAC_PCS_AN_CONTROL1_ANPDT_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_AN_CONTROL1_ANPDT_SHIFT)
+#define I40E_PRTMAC_PCS_AN_CONTROL1_RF_SHIFT 27
+#define I40E_PRTMAC_PCS_AN_CONTROL1_RF_MASK I40E_MASK(0x1, I40E_PRTMAC_PCS_AN_CONTROL1_RF_SHIFT)
+#define I40E_PRTMAC_PCS_AN_CONTROL1_PB_SHIFT 28
+#define I40E_PRTMAC_PCS_AN_CONTROL1_PB_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_AN_CONTROL1_PB_SHIFT)
+
+#define I40E_PRTMAC_PCS_AN_CONTROL2 0x0008C620 /* Reset: GLOBR */
+#define I40E_PRTMAC_PCS_AN_CONTROL2_AN_PAGE_D_LOW_OVRD_SHIFT 0
+#define I40E_PRTMAC_PCS_AN_CONTROL2_AN_PAGE_D_LOW_OVRD_MASK I40E_MASK(0xFFFF, I40E_PRTMAC_PCS_AN_CONTROL2_AN_PAGE_D_LOW_OVRD_SHIFT)
+#define I40E_PRTMAC_PCS_AN_CONTROL2_RSVD_SHIFT 16
+#define I40E_PRTMAC_PCS_AN_CONTROL2_RSVD_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_AN_CONTROL2_RSVD_SHIFT)
+#define I40E_PRTMAC_PCS_AN_CONTROL2_DDPT_SHIFT 18
+#define I40E_PRTMAC_PCS_AN_CONTROL2_DDPT_MASK I40E_MASK(0x1, I40E_PRTMAC_PCS_AN_CONTROL2_DDPT_SHIFT)
+#define I40E_PRTMAC_PCS_AN_CONTROL2_AAPLL_SHIFT 19
+#define I40E_PRTMAC_PCS_AN_CONTROL2_AAPLL_MASK I40E_MASK(0x1, I40E_PRTMAC_PCS_AN_CONTROL2_AAPLL_SHIFT)
+#define I40E_PRTMAC_PCS_AN_CONTROL2_ANAPO_SHIFT 20
+#define I40E_PRTMAC_PCS_AN_CONTROL2_ANAPO_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_AN_CONTROL2_ANAPO_SHIFT)
+#define I40E_PRTMAC_PCS_AN_CONTROL2_LH1GSI_SHIFT 22
+#define I40E_PRTMAC_PCS_AN_CONTROL2_LH1GSI_MASK I40E_MASK(0x1, I40E_PRTMAC_PCS_AN_CONTROL2_LH1GSI_SHIFT)
+#define I40E_PRTMAC_PCS_AN_CONTROL2_LH1GAI_SHIFT 23
+#define I40E_PRTMAC_PCS_AN_CONTROL2_LH1GAI_MASK I40E_MASK(0x1, I40E_PRTMAC_PCS_AN_CONTROL2_LH1GAI_SHIFT)
+#define I40E_PRTMAC_PCS_AN_CONTROL2_FANAS_SHIFT 24
+#define I40E_PRTMAC_PCS_AN_CONTROL2_FANAS_MASK I40E_MASK(0xF, I40E_PRTMAC_PCS_AN_CONTROL2_FANAS_SHIFT)
+#define I40E_PRTMAC_PCS_AN_CONTROL2_FASM_SHIFT 28
+#define I40E_PRTMAC_PCS_AN_CONTROL2_FASM_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_AN_CONTROL2_FASM_SHIFT)
+#define I40E_PRTMAC_PCS_AN_CONTROL2_PDD_SHIFT 30
+#define I40E_PRTMAC_PCS_AN_CONTROL2_PDD_MASK I40E_MASK(0x1, I40E_PRTMAC_PCS_AN_CONTROL2_PDD_SHIFT)
+#define I40E_PRTMAC_PCS_AN_CONTROL2_FEC_FORCE_SHIFT 31
+#define I40E_PRTMAC_PCS_AN_CONTROL2_FEC_FORCE_MASK I40E_MASK(0x1, I40E_PRTMAC_PCS_AN_CONTROL2_FEC_FORCE_SHIFT)
+
+#define I40E_PRTMAC_PCS_AN_CONTROL4 0x0008C660 /* Reset: GLOBR */
+#define I40E_PRTMAC_PCS_AN_CONTROL4_RESERVED0_SHIFT 0
+#define I40E_PRTMAC_PCS_AN_CONTROL4_RESERVED0_MASK I40E_MASK(0x1FFF, I40E_PRTMAC_PCS_AN_CONTROL4_RESERVED0_SHIFT)
+#define I40E_PRTMAC_PCS_AN_CONTROL4_FORCE_KR_EEE_AN_VALUE_SHIFT 13
+#define I40E_PRTMAC_PCS_AN_CONTROL4_FORCE_KR_EEE_AN_VALUE_MASK I40E_MASK(0x1, I40E_PRTMAC_PCS_AN_CONTROL4_FORCE_KR_EEE_AN_VALUE_SHIFT)
+#define I40E_PRTMAC_PCS_AN_CONTROL4_FORCE_KR_EEE_AN_RESULT_SHIFT 14
+#define I40E_PRTMAC_PCS_AN_CONTROL4_FORCE_KR_EEE_AN_RESULT_MASK I40E_MASK(0x1, I40E_PRTMAC_PCS_AN_CONTROL4_FORCE_KR_EEE_AN_RESULT_SHIFT)
+#define I40E_PRTMAC_PCS_AN_CONTROL4_FORCE_KX4_EEE_AN_VALUE_SHIFT 15
+#define I40E_PRTMAC_PCS_AN_CONTROL4_FORCE_KX4_EEE_AN_VALUE_MASK I40E_MASK(0x1, I40E_PRTMAC_PCS_AN_CONTROL4_FORCE_KX4_EEE_AN_VALUE_SHIFT)
+#define I40E_PRTMAC_PCS_AN_CONTROL4_FORCE_KX4_EEE_AN_RESULT_SHIFT 16
+#define I40E_PRTMAC_PCS_AN_CONTROL4_FORCE_KX4_EEE_AN_RESULT_MASK I40E_MASK(0x1, I40E_PRTMAC_PCS_AN_CONTROL4_FORCE_KX4_EEE_AN_RESULT_SHIFT)
+#define I40E_PRTMAC_PCS_AN_CONTROL4_FORCE_KX_EEE_AN_VALUE_SHIFT 17
+#define I40E_PRTMAC_PCS_AN_CONTROL4_FORCE_KX_EEE_AN_VALUE_MASK I40E_MASK(0x1, I40E_PRTMAC_PCS_AN_CONTROL4_FORCE_KX_EEE_AN_VALUE_SHIFT)
+#define I40E_PRTMAC_PCS_AN_CONTROL4_FORCE_KX_EEE_AN_RESULT_SHIFT 18
+#define I40E_PRTMAC_PCS_AN_CONTROL4_FORCE_KX_EEE_AN_RESULT_MASK I40E_MASK(0x1, I40E_PRTMAC_PCS_AN_CONTROL4_FORCE_KX_EEE_AN_RESULT_SHIFT)
+#define I40E_PRTMAC_PCS_AN_CONTROL4_FORCE_SGMII_EEE_AN_VALUE_SHIFT 19
+#define I40E_PRTMAC_PCS_AN_CONTROL4_FORCE_SGMII_EEE_AN_VALUE_MASK I40E_MASK(0x1, I40E_PRTMAC_PCS_AN_CONTROL4_FORCE_SGMII_EEE_AN_VALUE_SHIFT)
+#define I40E_PRTMAC_PCS_AN_CONTROL4_FORCE_SGMII_EEE_AN_RESULT_SHIFT 20
+#define I40E_PRTMAC_PCS_AN_CONTROL4_FORCE_SGMII_EEE_AN_RESULT_MASK I40E_MASK(0x1, I40E_PRTMAC_PCS_AN_CONTROL4_FORCE_SGMII_EEE_AN_RESULT_SHIFT)
+#define I40E_PRTMAC_PCS_AN_CONTROL4_RESERVED1_SHIFT 21
+#define I40E_PRTMAC_PCS_AN_CONTROL4_RESERVED1_MASK I40E_MASK(0x7FF, I40E_PRTMAC_PCS_AN_CONTROL4_RESERVED1_SHIFT)
+
+#define I40E_PRTMAC_PCS_LINK_CTRL 0x0008C260 /* Reset: GLOBR */
+#define I40E_PRTMAC_PCS_LINK_CTRL_PMD_40G_R_TYPE_SELECTION_SHIFT 0
+#define I40E_PRTMAC_PCS_LINK_CTRL_PMD_40G_R_TYPE_SELECTION_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_LINK_CTRL_PMD_40G_R_TYPE_SELECTION_SHIFT)
+#define I40E_PRTMAC_PCS_LINK_CTRL_PMD_10G_R_TYPE_SELECTION_SHIFT 2
+#define I40E_PRTMAC_PCS_LINK_CTRL_PMD_10G_R_TYPE_SELECTION_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_LINK_CTRL_PMD_10G_R_TYPE_SELECTION_SHIFT)
+#define I40E_PRTMAC_PCS_LINK_CTRL_PMD_10G_X_TYPE_SELECTION_SHIFT 4
+#define I40E_PRTMAC_PCS_LINK_CTRL_PMD_10G_X_TYPE_SELECTION_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_LINK_CTRL_PMD_10G_X_TYPE_SELECTION_SHIFT)
+#define I40E_PRTMAC_PCS_LINK_CTRL_PMD_1G_X_TYPE_SELECTION0_SHIFT 6
+#define I40E_PRTMAC_PCS_LINK_CTRL_PMD_1G_X_TYPE_SELECTION0_MASK I40E_MASK(0x1, I40E_PRTMAC_PCS_LINK_CTRL_PMD_1G_X_TYPE_SELECTION0_SHIFT)
+#define I40E_PRTMAC_PCS_LINK_CTRL_SPEED_SELECTION_SHIFT 8
+#define I40E_PRTMAC_PCS_LINK_CTRL_SPEED_SELECTION_MASK I40E_MASK(0x7, I40E_PRTMAC_PCS_LINK_CTRL_SPEED_SELECTION_SHIFT)
+#define I40E_PRTMAC_PCS_LINK_CTRL_PMD_1G_X_TYPE_SELECTION1_SHIFT 12
+#define I40E_PRTMAC_PCS_LINK_CTRL_PMD_1G_X_TYPE_SELECTION1_MASK I40E_MASK(0x1, I40E_PRTMAC_PCS_LINK_CTRL_PMD_1G_X_TYPE_SELECTION1_SHIFT)
+#define I40E_PRTMAC_PCS_LINK_CTRL_AN_CLAUSE_37_ENABLE_SHIFT 13
+#define I40E_PRTMAC_PCS_LINK_CTRL_AN_CLAUSE_37_ENABLE_MASK I40E_MASK(0x1, I40E_PRTMAC_PCS_LINK_CTRL_AN_CLAUSE_37_ENABLE_SHIFT)
+#define I40E_PRTMAC_PCS_LINK_CTRL_FEC_CAPABILITY_SHIFT 14
+#define I40E_PRTMAC_PCS_LINK_CTRL_FEC_CAPABILITY_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_LINK_CTRL_FEC_CAPABILITY_SHIFT)
+#define I40E_PRTMAC_PCS_LINK_CTRL_KX_ABILITY_SHIFT 16
+#define I40E_PRTMAC_PCS_LINK_CTRL_KX_ABILITY_MASK I40E_MASK(0x1, I40E_PRTMAC_PCS_LINK_CTRL_KX_ABILITY_SHIFT)
+#define I40E_PRTMAC_PCS_LINK_CTRL_KX4_ABILITY_SHIFT 17
+#define I40E_PRTMAC_PCS_LINK_CTRL_KX4_ABILITY_MASK I40E_MASK(0x1, I40E_PRTMAC_PCS_LINK_CTRL_KX4_ABILITY_SHIFT)
+#define I40E_PRTMAC_PCS_LINK_CTRL_KR_ABILITY_SHIFT 18
+#define I40E_PRTMAC_PCS_LINK_CTRL_KR_ABILITY_MASK I40E_MASK(0x1, I40E_PRTMAC_PCS_LINK_CTRL_KR_ABILITY_SHIFT)
+#define I40E_PRTMAC_PCS_LINK_CTRL_KR4_ABILITY_SHIFT 19
+#define I40E_PRTMAC_PCS_LINK_CTRL_KR4_ABILITY_MASK I40E_MASK(0x1, I40E_PRTMAC_PCS_LINK_CTRL_KR4_ABILITY_SHIFT)
+#define I40E_PRTMAC_PCS_LINK_CTRL_CR4_ABILITY_SHIFT 20
+#define I40E_PRTMAC_PCS_LINK_CTRL_CR4_ABILITY_MASK I40E_MASK(0x1, I40E_PRTMAC_PCS_LINK_CTRL_CR4_ABILITY_SHIFT)
+#define I40E_PRTMAC_PCS_LINK_CTRL_KR2_ABILITY_SHIFT 21
+#define I40E_PRTMAC_PCS_LINK_CTRL_KR2_ABILITY_MASK I40E_MASK(0x1, I40E_PRTMAC_PCS_LINK_CTRL_KR2_ABILITY_SHIFT)
+#define I40E_PRTMAC_PCS_LINK_CTRL_POWER_DOWN_SHIFT 23
+#define I40E_PRTMAC_PCS_LINK_CTRL_POWER_DOWN_MASK I40E_MASK(0x1, I40E_PRTMAC_PCS_LINK_CTRL_POWER_DOWN_SHIFT)
+#define I40E_PRTMAC_PCS_LINK_CTRL_KX_EEE_ABILITY_SHIFT 24
+#define I40E_PRTMAC_PCS_LINK_CTRL_KX_EEE_ABILITY_MASK I40E_MASK(0x1, I40E_PRTMAC_PCS_LINK_CTRL_KX_EEE_ABILITY_SHIFT)
+#define I40E_PRTMAC_PCS_LINK_CTRL_KX4_EEE_ABILITY_SHIFT 25
+#define I40E_PRTMAC_PCS_LINK_CTRL_KX4_EEE_ABILITY_MASK I40E_MASK(0x1, I40E_PRTMAC_PCS_LINK_CTRL_KX4_EEE_ABILITY_SHIFT)
+#define I40E_PRTMAC_PCS_LINK_CTRL_KR_EEE_ABILITY_SHIFT 26
+#define I40E_PRTMAC_PCS_LINK_CTRL_KR_EEE_ABILITY_MASK I40E_MASK(0x1, I40E_PRTMAC_PCS_LINK_CTRL_KR_EEE_ABILITY_SHIFT)
+#define I40E_PRTMAC_PCS_LINK_CTRL_AUTO_NEG_ENABLE_SHIFT 29
+#define I40E_PRTMAC_PCS_LINK_CTRL_AUTO_NEG_ENABLE_MASK I40E_MASK(0x1, I40E_PRTMAC_PCS_LINK_CTRL_AUTO_NEG_ENABLE_SHIFT)
+#define I40E_PRTMAC_PCS_LINK_CTRL_FORCE_LINK_UP_SHIFT 30
+#define I40E_PRTMAC_PCS_LINK_CTRL_FORCE_LINK_UP_MASK I40E_MASK(0x1, I40E_PRTMAC_PCS_LINK_CTRL_FORCE_LINK_UP_SHIFT)
+#define I40E_PRTMAC_PCS_LINK_CTRL_RESTART_AUTO_NEG_SHIFT 31
+#define I40E_PRTMAC_PCS_LINK_CTRL_RESTART_AUTO_NEG_MASK I40E_MASK(0x1, I40E_PRTMAC_PCS_LINK_CTRL_RESTART_AUTO_NEG_SHIFT)
+
+#define I40E_PRTMAC_PCS_LINK_STATUS1 0x0008C200 /* Reset: GLOBR */
+#define I40E_PRTMAC_PCS_LINK_STATUS1_SIGNAL_DETECTED_1G_MODE_SHIFT 5
+#define I40E_PRTMAC_PCS_LINK_STATUS1_SIGNAL_DETECTED_1G_MODE_MASK I40E_MASK(0x1, I40E_PRTMAC_PCS_LINK_STATUS1_SIGNAL_DETECTED_1G_MODE_SHIFT)
+#define I40E_PRTMAC_PCS_LINK_STATUS1_SIGNAL_DETECTED_LANE_0_SHIFT 6
+#define I40E_PRTMAC_PCS_LINK_STATUS1_SIGNAL_DETECTED_LANE_0_MASK I40E_MASK(0x1, I40E_PRTMAC_PCS_LINK_STATUS1_SIGNAL_DETECTED_LANE_0_SHIFT)
+#define I40E_PRTMAC_PCS_LINK_STATUS1_SIGNAL_DETECTED_LANE_1_SHIFT 7
+#define I40E_PRTMAC_PCS_LINK_STATUS1_SIGNAL_DETECTED_LANE_1_MASK I40E_MASK(0x1, I40E_PRTMAC_PCS_LINK_STATUS1_SIGNAL_DETECTED_LANE_1_SHIFT)
+#define I40E_PRTMAC_PCS_LINK_STATUS1_SIGNAL_DETECTED_LANE_2_SHIFT 8
+#define I40E_PRTMAC_PCS_LINK_STATUS1_SIGNAL_DETECTED_LANE_2_MASK I40E_MASK(0x1, I40E_PRTMAC_PCS_LINK_STATUS1_SIGNAL_DETECTED_LANE_2_SHIFT)
+#define I40E_PRTMAC_PCS_LINK_STATUS1_SIGNAL_DETECTED_LANE_3_SHIFT 9
+#define I40E_PRTMAC_PCS_LINK_STATUS1_SIGNAL_DETECTED_LANE_3_MASK I40E_MASK(0x1, I40E_PRTMAC_PCS_LINK_STATUS1_SIGNAL_DETECTED_LANE_3_SHIFT)
+#define I40E_PRTMAC_PCS_LINK_STATUS1_SIGNAL_DETECTED_COMBINED_SHIFT 10
+#define I40E_PRTMAC_PCS_LINK_STATUS1_SIGNAL_DETECTED_COMBINED_MASK I40E_MASK(0x1, I40E_PRTMAC_PCS_LINK_STATUS1_SIGNAL_DETECTED_COMBINED_SHIFT)
+#define I40E_PRTMAC_PCS_LINK_STATUS1_SYNCH_LANE0_SHIFT 11
+#define I40E_PRTMAC_PCS_LINK_STATUS1_SYNCH_LANE0_MASK I40E_MASK(0x1, I40E_PRTMAC_PCS_LINK_STATUS1_SYNCH_LANE0_SHIFT)
+#define I40E_PRTMAC_PCS_LINK_STATUS1_SYNCH_LANE1_SHIFT 12
+#define I40E_PRTMAC_PCS_LINK_STATUS1_SYNCH_LANE1_MASK I40E_MASK(0x1, I40E_PRTMAC_PCS_LINK_STATUS1_SYNCH_LANE1_SHIFT)
+#define I40E_PRTMAC_PCS_LINK_STATUS1_SYNCH_LANE2_SHIFT 13
+#define I40E_PRTMAC_PCS_LINK_STATUS1_SYNCH_LANE2_MASK I40E_MASK(0x1, I40E_PRTMAC_PCS_LINK_STATUS1_SYNCH_LANE2_SHIFT)
+#define I40E_PRTMAC_PCS_LINK_STATUS1_SYNCH_LANE3_SHIFT 14
+#define I40E_PRTMAC_PCS_LINK_STATUS1_SYNCH_LANE3_MASK I40E_MASK(0x1, I40E_PRTMAC_PCS_LINK_STATUS1_SYNCH_LANE3_SHIFT)
+#define I40E_PRTMAC_PCS_LINK_STATUS1_LINK_1000_BASE_X_AN_SHIFT 15
+#define I40E_PRTMAC_PCS_LINK_STATUS1_LINK_1000_BASE_X_AN_MASK I40E_MASK(0x1, I40E_PRTMAC_PCS_LINK_STATUS1_LINK_1000_BASE_X_AN_SHIFT)
+#define I40E_PRTMAC_PCS_LINK_STATUS1_LINK_1000_BASE_X_SHIFT 16
+#define I40E_PRTMAC_PCS_LINK_STATUS1_LINK_1000_BASE_X_MASK I40E_MASK(0x1, I40E_PRTMAC_PCS_LINK_STATUS1_LINK_1000_BASE_X_SHIFT)
+#define I40E_PRTMAC_PCS_LINK_STATUS1_LINK_10G_BASE_X4_PARALLEL_SHIFT 17
+#define I40E_PRTMAC_PCS_LINK_STATUS1_LINK_10G_BASE_X4_PARALLEL_MASK I40E_MASK(0x1, I40E_PRTMAC_PCS_LINK_STATUS1_LINK_10G_BASE_X4_PARALLEL_SHIFT)
+#define I40E_PRTMAC_PCS_LINK_STATUS1_FEC_10G_ENABLED_SHIFT 18
+#define I40E_PRTMAC_PCS_LINK_STATUS1_FEC_10G_ENABLED_MASK I40E_MASK(0x1, I40E_PRTMAC_PCS_LINK_STATUS1_FEC_10G_ENABLED_SHIFT)
+#define I40E_PRTMAC_PCS_LINK_STATUS1_LINK_40G_BASE_R4_SHIFT 19
+#define I40E_PRTMAC_PCS_LINK_STATUS1_LINK_40G_BASE_R4_MASK I40E_MASK(0x1, I40E_PRTMAC_PCS_LINK_STATUS1_LINK_40G_BASE_R4_SHIFT)
+#define I40E_PRTMAC_PCS_LINK_STATUS1_LINK_10G_BASE_R1_SHIFT 20
+#define I40E_PRTMAC_PCS_LINK_STATUS1_LINK_10G_BASE_R1_MASK I40E_MASK(0x1, I40E_PRTMAC_PCS_LINK_STATUS1_LINK_10G_BASE_R1_SHIFT)
+#define I40E_PRTMAC_PCS_LINK_STATUS1_LINK_1G_SGMII_SHIFT 21
+#define I40E_PRTMAC_PCS_LINK_STATUS1_LINK_1G_SGMII_MASK I40E_MASK(0x1, I40E_PRTMAC_PCS_LINK_STATUS1_LINK_1G_SGMII_SHIFT)
+#define I40E_PRTMAC_PCS_LINK_STATUS1_LINK_MODE_SHIFT 22
+#define I40E_PRTMAC_PCS_LINK_STATUS1_LINK_MODE_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_LINK_STATUS1_LINK_MODE_SHIFT)
+#define I40E_PRTMAC_PCS_LINK_STATUS1_LINK_SPEED_SHIFT 24
+#define I40E_PRTMAC_PCS_LINK_STATUS1_LINK_SPEED_MASK I40E_MASK(0x7, I40E_PRTMAC_PCS_LINK_STATUS1_LINK_SPEED_SHIFT)
+#define I40E_PRTMAC_PCS_LINK_STATUS1_LINK_UP_SHIFT 27
+#define I40E_PRTMAC_PCS_LINK_STATUS1_LINK_UP_MASK I40E_MASK(0x1, I40E_PRTMAC_PCS_LINK_STATUS1_LINK_UP_SHIFT)
+#define I40E_PRTMAC_PCS_LINK_STATUS1_AN_COMPLETED_SHIFT 28
+#define I40E_PRTMAC_PCS_LINK_STATUS1_AN_COMPLETED_MASK I40E_MASK(0x1, I40E_PRTMAC_PCS_LINK_STATUS1_AN_COMPLETED_SHIFT)
+#define I40E_PRTMAC_PCS_LINK_STATUS1_PCS_READY_SHIFT 29
+#define I40E_PRTMAC_PCS_LINK_STATUS1_PCS_READY_MASK I40E_MASK(0x1, I40E_PRTMAC_PCS_LINK_STATUS1_PCS_READY_SHIFT)
+#define I40E_PRTMAC_PCS_LINK_STATUS1_MAC_READY_SHIFT 30
+#define I40E_PRTMAC_PCS_LINK_STATUS1_MAC_READY_MASK I40E_MASK(0x1, I40E_PRTMAC_PCS_LINK_STATUS1_MAC_READY_SHIFT)
+
+#define I40E_PRTMAC_PCS_LINK_STATUS2 0x0008C220 /* Reset: GLOBR */
+#define I40E_PRTMAC_PCS_LINK_STATUS2_SIGNAL_DETECTED_FEC_SHIFT 1
+#define I40E_PRTMAC_PCS_LINK_STATUS2_SIGNAL_DETECTED_FEC_MASK I40E_MASK(0x1, I40E_PRTMAC_PCS_LINK_STATUS2_SIGNAL_DETECTED_FEC_SHIFT)
+#define I40E_PRTMAC_PCS_LINK_STATUS2_FEC_BLOCK_LOCK_SHIFT 2
+#define I40E_PRTMAC_PCS_LINK_STATUS2_FEC_BLOCK_LOCK_MASK I40E_MASK(0x1, I40E_PRTMAC_PCS_LINK_STATUS2_FEC_BLOCK_LOCK_SHIFT)
+#define I40E_PRTMAC_PCS_LINK_STATUS2_KR_HI_BERR_SHIFT 3
+#define I40E_PRTMAC_PCS_LINK_STATUS2_KR_HI_BERR_MASK I40E_MASK(0x1, I40E_PRTMAC_PCS_LINK_STATUS2_KR_HI_BERR_SHIFT)
+#define I40E_PRTMAC_PCS_LINK_STATUS2_KR_10G_PCS_LCOK_SHIFT 4
+#define I40E_PRTMAC_PCS_LINK_STATUS2_KR_10G_PCS_LCOK_MASK I40E_MASK(0x1, I40E_PRTMAC_PCS_LINK_STATUS2_KR_10G_PCS_LCOK_SHIFT)
+#define I40E_PRTMAC_PCS_LINK_STATUS2_AN_NEXT_PAGE_RECEIVED_SHIFT 5
+#define I40E_PRTMAC_PCS_LINK_STATUS2_AN_NEXT_PAGE_RECEIVED_MASK I40E_MASK(0x1, I40E_PRTMAC_PCS_LINK_STATUS2_AN_NEXT_PAGE_RECEIVED_SHIFT)
+#define I40E_PRTMAC_PCS_LINK_STATUS2_AN_PAGE_RECEIVED_SHIFT 6
+#define I40E_PRTMAC_PCS_LINK_STATUS2_AN_PAGE_RECEIVED_MASK I40E_MASK(0x1, I40E_PRTMAC_PCS_LINK_STATUS2_AN_PAGE_RECEIVED_SHIFT)
+#define I40E_PRTMAC_PCS_LINK_STATUS2_LINK_STATUS_SHIFT 7
+#define I40E_PRTMAC_PCS_LINK_STATUS2_LINK_STATUS_MASK I40E_MASK(0x1, I40E_PRTMAC_PCS_LINK_STATUS2_LINK_STATUS_SHIFT)
+#define I40E_PRTMAC_PCS_LINK_STATUS2_ALIGNMENT_STATUS_10G_SHIFT 17
+#define I40E_PRTMAC_PCS_LINK_STATUS2_ALIGNMENT_STATUS_10G_MASK I40E_MASK(0x1, I40E_PRTMAC_PCS_LINK_STATUS2_ALIGNMENT_STATUS_10G_SHIFT)
+#define I40E_PRTMAC_PCS_LINK_STATUS2_ALIGNMENT_STATUS_1G_SHIFT 18
+#define I40E_PRTMAC_PCS_LINK_STATUS2_ALIGNMENT_STATUS_1G_MASK I40E_MASK(0x1, I40E_PRTMAC_PCS_LINK_STATUS2_ALIGNMENT_STATUS_1G_SHIFT)
+#define I40E_PRTMAC_PCS_LINK_STATUS2_BP_AN_RECEIVER_IDLE_SHIFT 19
+#define I40E_PRTMAC_PCS_LINK_STATUS2_BP_AN_RECEIVER_IDLE_MASK I40E_MASK(0x1, I40E_PRTMAC_PCS_LINK_STATUS2_BP_AN_RECEIVER_IDLE_SHIFT)
+
+#define I40E_PRTMAC_PCS_MUX_KR 0x0008C000 /* Reset: GLOBR */
+#define I40E_PRTMAC_PCS_MUX_KR_PCS_MUX_KR_SHIFT 0
+#define I40E_PRTMAC_PCS_MUX_KR_PCS_MUX_KR_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTMAC_PCS_MUX_KR_PCS_MUX_KR_SHIFT)
+
+#define I40E_PRTMAC_PCS_MUX_KX 0x0008C008 /* Reset: GLOBR */
+#define I40E_PRTMAC_PCS_MUX_KX_PCS_MUX_KX_SHIFT 0
+#define I40E_PRTMAC_PCS_MUX_KX_PCS_MUX_KX_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTMAC_PCS_MUX_KX_PCS_MUX_KX_SHIFT)
+
+#define I40E_PRTMAC_PHY_ANA_ADD 0x000A4038 /* Reset: GLOBR */
+#define I40E_PRTMAC_PHY_ANA_ADD_ADDRESS_SHIFT 0
+#define I40E_PRTMAC_PHY_ANA_ADD_ADDRESS_MASK I40E_MASK(0xFFFF, I40E_PRTMAC_PHY_ANA_ADD_ADDRESS_SHIFT)
+#define I40E_PRTMAC_PHY_ANA_ADD_BYTE_EN_SHIFT 28
+#define I40E_PRTMAC_PHY_ANA_ADD_BYTE_EN_MASK I40E_MASK(0xF, I40E_PRTMAC_PHY_ANA_ADD_BYTE_EN_SHIFT)
+
+#define I40E_PRTMAC_PHY_ANA_DATA 0x000A403c /* Reset: GLOBR */
+#define I40E_PRTMAC_PHY_ANA_DATA_DATA_SHIFT 0
+#define I40E_PRTMAC_PHY_ANA_DATA_DATA_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTMAC_PHY_ANA_DATA_DATA_SHIFT)
+
+#define I40E_PRTMAC_PMD_MUX_KR 0x0008C004 /* Reset: GLOBR */
+#define I40E_PRTMAC_PMD_MUX_KR_PMD_MUX_KR_SHIFT 0
+#define I40E_PRTMAC_PMD_MUX_KR_PMD_MUX_KR_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTMAC_PMD_MUX_KR_PMD_MUX_KR_SHIFT)
+
+#define I40E_PRTMAC_PMD_MUX_KX 0x0008C00C /* Reset: GLOBR */
+#define I40E_PRTMAC_PMD_MUX_KX_PMD_MUX_KX_SHIFT 0
+#define I40E_PRTMAC_PMD_MUX_KX_PMD_MUX_KX_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTMAC_PMD_MUX_KX_PMD_MUX_KX_SHIFT)
+
+#define I40E_PRTMAC_TREG 0x001E2160 /* Reset: GLOBR */
+#define I40E_PRTMAC_TREG_ILGLCODETXERRTST_SHIFT 0
+#define I40E_PRTMAC_TREG_ILGLCODETXERRTST_MASK I40E_MASK(0xFF, I40E_PRTMAC_TREG_ILGLCODETXERRTST_SHIFT)
+#define I40E_PRTMAC_TREG_CTRLTXERRTST_SHIFT 8
+#define I40E_PRTMAC_TREG_CTRLTXERRTST_MASK I40E_MASK(0x1, I40E_PRTMAC_TREG_CTRLTXERRTST_SHIFT)
+#define I40E_PRTMAC_TREG_TXXGMIITSTMODE_SHIFT 9
+#define I40E_PRTMAC_TREG_TXXGMIITSTMODE_MASK I40E_MASK(0x1, I40E_PRTMAC_TREG_TXXGMIITSTMODE_SHIFT)
+#define I40E_PRTMAC_TREG_BUSYIDLCODE_SHIFT 15
+#define I40E_PRTMAC_TREG_BUSYIDLCODE_MASK I40E_MASK(0xFF, I40E_PRTMAC_TREG_BUSYIDLCODE_SHIFT)
+#define I40E_PRTMAC_TREG_BUSYIDLEN_SHIFT 23
+#define I40E_PRTMAC_TREG_BUSYIDLEN_MASK I40E_MASK(0x1, I40E_PRTMAC_TREG_BUSYIDLEN_SHIFT)
+
+/* PF - Manageability Registers */
+
+#define I40E_EMP_TCO_ISOLATE 0x00078E80 /* Reset: POR */
+#define I40E_EMP_TCO_ISOLATE_EMP_TCO_ISOLATE_SHIFT 0
+#define I40E_EMP_TCO_ISOLATE_EMP_TCO_ISOLATE_MASK I40E_MASK(0xFFFF, I40E_EMP_TCO_ISOLATE_EMP_TCO_ISOLATE_SHIFT)
+
+#define I40E_GL_MNG_FRIACR 0x00083240 /* Reset: EMPR */
+#define I40E_GL_MNG_FRIACR_ADDR_SHIFT 0
+#define I40E_GL_MNG_FRIACR_ADDR_MASK I40E_MASK(0x1FFFFF, I40E_GL_MNG_FRIACR_ADDR_SHIFT)
+#define I40E_GL_MNG_FRIACR_WR_SHIFT 24
+#define I40E_GL_MNG_FRIACR_WR_MASK I40E_MASK(0x1, I40E_GL_MNG_FRIACR_WR_SHIFT)
+#define I40E_GL_MNG_FRIACR_RD_SHIFT 25
+#define I40E_GL_MNG_FRIACR_RD_MASK I40E_MASK(0x1, I40E_GL_MNG_FRIACR_RD_SHIFT)
+
+#define I40E_GL_MNG_FRIARDR 0x00083248 /* Reset: EMPR */
+#define I40E_GL_MNG_FRIARDR_RDATA_SHIFT 0
+#define I40E_GL_MNG_FRIARDR_RDATA_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_MNG_FRIARDR_RDATA_SHIFT)
+
+#define I40E_GL_MNG_FRIARR 0x0008324C /* Reset: EMPR */
+#define I40E_GL_MNG_FRIARR_HALT_SHIFT 0
+#define I40E_GL_MNG_FRIARR_HALT_MASK I40E_MASK(0x1, I40E_GL_MNG_FRIARR_HALT_SHIFT)
+#define I40E_GL_MNG_FRIARR_RST_EN_SHIFT 1
+#define I40E_GL_MNG_FRIARR_RST_EN_MASK I40E_MASK(0x1, I40E_GL_MNG_FRIARR_RST_EN_SHIFT)
+
+#define I40E_GL_MNG_FRIAWDR 0x00083244 /* Reset: EMPR */
+#define I40E_GL_MNG_FRIAWDR_WDATA_SHIFT 0
+#define I40E_GL_MNG_FRIAWDR_WDATA_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_MNG_FRIAWDR_WDATA_SHIFT)
+
+#define I40E_GL_MNG_RRDFM 0x00083040 /* Reset: EMPR */
+#define I40E_GL_MNG_RRDFM_RMII_DBG_FIL_0_SHIFT 0
+#define I40E_GL_MNG_RRDFM_RMII_DBG_FIL_0_MASK I40E_MASK(0x1, I40E_GL_MNG_RRDFM_RMII_DBG_FIL_0_SHIFT)
+#define I40E_GL_MNG_RRDFM_RMII_DBG_FIL_1_SHIFT 1
+#define I40E_GL_MNG_RRDFM_RMII_DBG_FIL_1_MASK I40E_MASK(0x1, I40E_GL_MNG_RRDFM_RMII_DBG_FIL_1_SHIFT)
+#define I40E_GL_MNG_RRDFM_RMII_DBG_FIL_2_SHIFT 2
+#define I40E_GL_MNG_RRDFM_RMII_DBG_FIL_2_MASK I40E_MASK(0x1, I40E_GL_MNG_RRDFM_RMII_DBG_FIL_2_SHIFT)
+#define I40E_GL_MNG_RRDFM_RMII_DBG_FIL_3_SHIFT 3
+#define I40E_GL_MNG_RRDFM_RMII_DBG_FIL_3_MASK I40E_MASK(0x1, I40E_GL_MNG_RRDFM_RMII_DBG_FIL_3_SHIFT)
+
+#define I40E_GL_SWR_PL_THR 0x00269FDC /* Reset: CORER */
+#define I40E_GL_SWR_PL_THR_PIPE_LIMIT_SHIFT 0
+#define I40E_GL_SWR_PL_THR_PIPE_LIMIT_MASK I40E_MASK(0xFF, I40E_GL_SWR_PL_THR_PIPE_LIMIT_SHIFT)
+
+#define I40E_GL_SWR_PM_UP_THR 0x00269FBC /* Reset: CORER */
+#define I40E_GL_SWR_PM_UP_THR_UP_PORT_0_SHIFT 0
+#define I40E_GL_SWR_PM_UP_THR_UP_PORT_0_MASK I40E_MASK(0xFF, I40E_GL_SWR_PM_UP_THR_UP_PORT_0_SHIFT)
+#define I40E_GL_SWR_PM_UP_THR_UP_PORT_1_SHIFT 8
+#define I40E_GL_SWR_PM_UP_THR_UP_PORT_1_MASK I40E_MASK(0xFF, I40E_GL_SWR_PM_UP_THR_UP_PORT_1_SHIFT)
+#define I40E_GL_SWR_PM_UP_THR_UP_PORT_2_SHIFT 16
+#define I40E_GL_SWR_PM_UP_THR_UP_PORT_2_MASK I40E_MASK(0xFF, I40E_GL_SWR_PM_UP_THR_UP_PORT_2_SHIFT)
+#define I40E_GL_SWR_PM_UP_THR_UP_PORT_3_SHIFT 24
+#define I40E_GL_SWR_PM_UP_THR_UP_PORT_3_MASK I40E_MASK(0xFF, I40E_GL_SWR_PM_UP_THR_UP_PORT_3_SHIFT)
+
+#define I40E_PRT_MNG_FTFT_IGNORETAGS 0x00085280 /* Reset: POR */
+#define I40E_PRT_MNG_FTFT_IGNORETAGS_PRT_MNG_FTFT_IGNORETAGS_0_SHIFT 0
+#define I40E_PRT_MNG_FTFT_IGNORETAGS_PRT_MNG_FTFT_IGNORETAGS_0_MASK I40E_MASK(0x1, I40E_PRT_MNG_FTFT_IGNORETAGS_PRT_MNG_FTFT_IGNORETAGS_0_SHIFT)
+#define I40E_PRT_MNG_FTFT_IGNORETAGS_PRT_MNG_FTFT_IGNORETAGS_SHIFT 2
+#define I40E_PRT_MNG_FTFT_IGNORETAGS_PRT_MNG_FTFT_IGNORETAGS_MASK I40E_MASK(0xFF, I40E_PRT_MNG_FTFT_IGNORETAGS_PRT_MNG_FTFT_IGNORETAGS_SHIFT)
+
+/* PF - MSI-X Table Registers */
+
+/* PF - NVM Registers */
+
+#define I40E_EMPNVM_FLCNT 0x000B6128 /* Reset: POR */
+#define I40E_EMPNVM_FLCNT_RDCNT_SHIFT 0
+#define I40E_EMPNVM_FLCNT_RDCNT_MASK I40E_MASK(0x1FFFFFF, I40E_EMPNVM_FLCNT_RDCNT_SHIFT)
+#define I40E_EMPNVM_FLCNT_ABORT_SHIFT 31
+#define I40E_EMPNVM_FLCNT_ABORT_MASK I40E_MASK(0x1, I40E_EMPNVM_FLCNT_ABORT_SHIFT)
+
+#define I40E_EMPNVM_FLCTL 0x000B6120 /* Reset: POR */
+#define I40E_EMPNVM_FLCTL_ADDR_SHIFT 0
+#define I40E_EMPNVM_FLCTL_ADDR_MASK I40E_MASK(0xFFFFFF, I40E_EMPNVM_FLCTL_ADDR_SHIFT)
+#define I40E_EMPNVM_FLCTL_CMD_SHIFT 24
+#define I40E_EMPNVM_FLCTL_CMD_MASK I40E_MASK(0x3, I40E_EMPNVM_FLCTL_CMD_SHIFT)
+#define I40E_EMPNVM_FLCTL_CMDV_SHIFT 26
+#define I40E_EMPNVM_FLCTL_CMDV_MASK I40E_MASK(0x1, I40E_EMPNVM_FLCTL_CMDV_SHIFT)
+#define I40E_EMPNVM_FLCTL_FLBUSY_SHIFT 27
+#define I40E_EMPNVM_FLCTL_FLBUSY_MASK I40E_MASK(0x1, I40E_EMPNVM_FLCTL_FLBUSY_SHIFT)
+#define I40E_EMPNVM_FLCTL_DONE_SHIFT 30
+#define I40E_EMPNVM_FLCTL_DONE_MASK I40E_MASK(0x1, I40E_EMPNVM_FLCTL_DONE_SHIFT)
+#define I40E_EMPNVM_FLCTL_GLDONE_SHIFT 31
+#define I40E_EMPNVM_FLCTL_GLDONE_MASK I40E_MASK(0x1, I40E_EMPNVM_FLCTL_GLDONE_SHIFT)
+
+#define I40E_EMPNVM_FLDATA 0x000B6124 /* Reset: POR */
+#define I40E_EMPNVM_FLDATA_FLMNGDATA_SHIFT 0
+#define I40E_EMPNVM_FLDATA_FLMNGDATA_MASK I40E_MASK(0xFFFFFFFF, I40E_EMPNVM_FLDATA_FLMNGDATA_SHIFT)
+
+#define I40E_EMPNVM_SRCTL 0x000B6118 /* Reset: POR */
+#define I40E_EMPNVM_SRCTL_ADDR_SHIFT 0
+#define I40E_EMPNVM_SRCTL_ADDR_MASK I40E_MASK(0x7FFF, I40E_EMPNVM_SRCTL_ADDR_SHIFT)
+#define I40E_EMPNVM_SRCTL_START_SHIFT 15
+#define I40E_EMPNVM_SRCTL_START_MASK I40E_MASK(0x1, I40E_EMPNVM_SRCTL_START_SHIFT)
+#define I40E_EMPNVM_SRCTL_WRITE_SHIFT 16
+#define I40E_EMPNVM_SRCTL_WRITE_MASK I40E_MASK(0x1, I40E_EMPNVM_SRCTL_WRITE_SHIFT)
+#define I40E_EMPNVM_SRCTL_SRBUSY_SHIFT 17
+#define I40E_EMPNVM_SRCTL_SRBUSY_MASK I40E_MASK(0x1, I40E_EMPNVM_SRCTL_SRBUSY_SHIFT)
+#define I40E_EMPNVM_SRCTL_TRANS_ABORTED_SHIFT 20
+#define I40E_EMPNVM_SRCTL_TRANS_ABORTED_MASK I40E_MASK(0x1, I40E_EMPNVM_SRCTL_TRANS_ABORTED_SHIFT)
+#define I40E_EMPNVM_SRCTL_DEFERAL_SHIFT 29
+#define I40E_EMPNVM_SRCTL_DEFERAL_MASK I40E_MASK(0x1, I40E_EMPNVM_SRCTL_DEFERAL_SHIFT)
+#define I40E_EMPNVM_SRCTL_SR_LOAD_SHIFT 30
+#define I40E_EMPNVM_SRCTL_SR_LOAD_MASK I40E_MASK(0x1, I40E_EMPNVM_SRCTL_SR_LOAD_SHIFT)
+#define I40E_EMPNVM_SRCTL_DONE_SHIFT 31
+#define I40E_EMPNVM_SRCTL_DONE_MASK I40E_MASK(0x1, I40E_EMPNVM_SRCTL_DONE_SHIFT)
+
+#define I40E_EMPNVM_SRDATA 0x000B611C /* Reset: POR */
+#define I40E_EMPNVM_SRDATA_WRDATA_SHIFT 0
+#define I40E_EMPNVM_SRDATA_WRDATA_MASK I40E_MASK(0xFFFF, I40E_EMPNVM_SRDATA_WRDATA_SHIFT)
+#define I40E_EMPNVM_SRDATA_RDDATA_SHIFT 16
+#define I40E_EMPNVM_SRDATA_RDDATA_MASK I40E_MASK(0xFFFF, I40E_EMPNVM_SRDATA_RDDATA_SHIFT)
+
+#define I40E_GLNVM_ALTIMERS 0x000B6140 /* Reset: POR */
+#define I40E_GLNVM_ALTIMERS_PCI_ALTIMER_SHIFT 0
+#define I40E_GLNVM_ALTIMERS_PCI_ALTIMER_MASK I40E_MASK(0xFFF, I40E_GLNVM_ALTIMERS_PCI_ALTIMER_SHIFT)
+#define I40E_GLNVM_ALTIMERS_GEN_ALTIMER_SHIFT 12
+#define I40E_GLNVM_ALTIMERS_GEN_ALTIMER_MASK I40E_MASK(0xFFFFF, I40E_GLNVM_ALTIMERS_GEN_ALTIMER_SHIFT)
+
+#define I40E_GLNVM_EMPLD 0x000B610C /* Reset: POR */
+#define I40E_GLNVM_EMPLD_EMP_CORE_DONE_SHIFT 3
+#define I40E_GLNVM_EMPLD_EMP_CORE_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_EMPLD_EMP_CORE_DONE_SHIFT)
+#define I40E_GLNVM_EMPLD_EMP_GLOBAL_DONE_SHIFT 4
+#define I40E_GLNVM_EMPLD_EMP_GLOBAL_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_EMPLD_EMP_GLOBAL_DONE_SHIFT)
+
+#define I40E_GLNVM_EMPRQ 0x000B613C /* Reset: POR */
+#define I40E_GLNVM_EMPRQ_EMP_CORE_REQD_SHIFT 3
+#define I40E_GLNVM_EMPRQ_EMP_CORE_REQD_MASK I40E_MASK(0x1, I40E_GLNVM_EMPRQ_EMP_CORE_REQD_SHIFT)
+#define I40E_GLNVM_EMPRQ_EMP_GLOBAL_REQD_SHIFT 4
+#define I40E_GLNVM_EMPRQ_EMP_GLOBAL_REQD_MASK I40E_MASK(0x1, I40E_GLNVM_EMPRQ_EMP_GLOBAL_REQD_SHIFT)
+
+#define I40E_GLNVM_SRLD 0x000B600C /* Reset: POR */
+#define I40E_GLNVM_SRLD_HW_PCIR_DONE_SHIFT 0
+#define I40E_GLNVM_SRLD_HW_PCIR_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_SRLD_HW_PCIR_DONE_SHIFT)
+#define I40E_GLNVM_SRLD_HW_PCIRTL_DONE_SHIFT 1
+#define I40E_GLNVM_SRLD_HW_PCIRTL_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_SRLD_HW_PCIRTL_DONE_SHIFT)
+#define I40E_GLNVM_SRLD_HW_LCB_DONE_SHIFT 2
+#define I40E_GLNVM_SRLD_HW_LCB_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_SRLD_HW_LCB_DONE_SHIFT)
+#define I40E_GLNVM_SRLD_HW_CORE_DONE_SHIFT 3
+#define I40E_GLNVM_SRLD_HW_CORE_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_SRLD_HW_CORE_DONE_SHIFT)
+#define I40E_GLNVM_SRLD_HW_GLOBAL_DONE_SHIFT 4
+#define I40E_GLNVM_SRLD_HW_GLOBAL_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_SRLD_HW_GLOBAL_DONE_SHIFT)
+#define I40E_GLNVM_SRLD_HW_POR_DONE_SHIFT 5
+#define I40E_GLNVM_SRLD_HW_POR_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_SRLD_HW_POR_DONE_SHIFT)
+#define I40E_GLNVM_SRLD_HW_PCIE_ANA_DONE_SHIFT 6
+#define I40E_GLNVM_SRLD_HW_PCIE_ANA_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_SRLD_HW_PCIE_ANA_DONE_SHIFT)
+#define I40E_GLNVM_SRLD_HW_PHY_ANA_DONE_SHIFT 7
+#define I40E_GLNVM_SRLD_HW_PHY_ANA_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_SRLD_HW_PHY_ANA_DONE_SHIFT)
+#define I40E_GLNVM_SRLD_HW_EMP_DONE_SHIFT 8
+#define I40E_GLNVM_SRLD_HW_EMP_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_SRLD_HW_EMP_DONE_SHIFT)
+#define I40E_GLNVM_SRLD_HW_PCIALT_DONE_SHIFT 9
+#define I40E_GLNVM_SRLD_HW_PCIALT_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_SRLD_HW_PCIALT_DONE_SHIFT)
+
+#define I40E_GLNVM_ULT 0x000B6154 /* Reset: POR */
+#define I40E_GLNVM_ULT_CONF_PCIR_AE_SHIFT 0
+#define I40E_GLNVM_ULT_CONF_PCIR_AE_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_CONF_PCIR_AE_SHIFT)
+#define I40E_GLNVM_ULT_CONF_PCIRTL_AE_SHIFT 1
+#define I40E_GLNVM_ULT_CONF_PCIRTL_AE_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_CONF_PCIRTL_AE_SHIFT)
+#define I40E_GLNVM_ULT_CONF_LCB_AE_SHIFT 2
+#define I40E_GLNVM_ULT_CONF_LCB_AE_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_CONF_LCB_AE_SHIFT)
+#define I40E_GLNVM_ULT_CONF_CORE_AE_SHIFT 3
+#define I40E_GLNVM_ULT_CONF_CORE_AE_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_CONF_CORE_AE_SHIFT)
+#define I40E_GLNVM_ULT_CONF_GLOBAL_AE_SHIFT 4
+#define I40E_GLNVM_ULT_CONF_GLOBAL_AE_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_CONF_GLOBAL_AE_SHIFT)
+#define I40E_GLNVM_ULT_CONF_POR_AE_SHIFT 5
+#define I40E_GLNVM_ULT_CONF_POR_AE_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_CONF_POR_AE_SHIFT)
+#define I40E_GLNVM_ULT_CONF_EMP_AE_SHIFT 8
+#define I40E_GLNVM_ULT_CONF_EMP_AE_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_CONF_EMP_AE_SHIFT)
+#define I40E_GLNVM_ULT_CONF_PCIALT_AE_SHIFT 9
+#define I40E_GLNVM_ULT_CONF_PCIALT_AE_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_CONF_PCIALT_AE_SHIFT)
+
+#define I40E_MEM_INIT_GATE_AL_DONE 0x000B6004 /* Reset: POR */
+#define I40E_MEM_INIT_GATE_AL_DONE_CMLAN_INIT_DONE_GATE_AL_DONE_SHIFT 0
+#define I40E_MEM_INIT_GATE_AL_DONE_CMLAN_INIT_DONE_GATE_AL_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_GATE_AL_DONE_CMLAN_INIT_DONE_GATE_AL_DONE_SHIFT)
+#define I40E_MEM_INIT_GATE_AL_DONE_PMAT_INIT_DONE_GATE_AL_DONE_SHIFT 1
+#define I40E_MEM_INIT_GATE_AL_DONE_PMAT_INIT_DONE_GATE_AL_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_GATE_AL_DONE_PMAT_INIT_DONE_GATE_AL_DONE_SHIFT)
+#define I40E_MEM_INIT_GATE_AL_DONE_RCU_INIT_DONE_GATE_AL_DONE_SHIFT 2
+#define I40E_MEM_INIT_GATE_AL_DONE_RCU_INIT_DONE_GATE_AL_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_GATE_AL_DONE_RCU_INIT_DONE_GATE_AL_DONE_SHIFT)
+#define I40E_MEM_INIT_GATE_AL_DONE_TDPU_INIT_DONE_GATE_AL_DONE_SHIFT 3
+#define I40E_MEM_INIT_GATE_AL_DONE_TDPU_INIT_DONE_GATE_AL_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_GATE_AL_DONE_TDPU_INIT_DONE_GATE_AL_DONE_SHIFT)
+#define I40E_MEM_INIT_GATE_AL_DONE_TLAN_INIT_DONE_GATE_AL_DONE_SHIFT 4
+#define I40E_MEM_INIT_GATE_AL_DONE_TLAN_INIT_DONE_GATE_AL_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_GATE_AL_DONE_TLAN_INIT_DONE_GATE_AL_DONE_SHIFT)
+#define I40E_MEM_INIT_GATE_AL_DONE_RLAN_INIT_DONE_GATE_AL_DONE_SHIFT 5
+#define I40E_MEM_INIT_GATE_AL_DONE_RLAN_INIT_DONE_GATE_AL_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_GATE_AL_DONE_RLAN_INIT_DONE_GATE_AL_DONE_SHIFT)
+#define I40E_MEM_INIT_GATE_AL_DONE_RDPU_INIT_DONE_GATE_AL_DONE_SHIFT 6
+#define I40E_MEM_INIT_GATE_AL_DONE_RDPU_INIT_DONE_GATE_AL_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_GATE_AL_DONE_RDPU_INIT_DONE_GATE_AL_DONE_SHIFT)
+#define I40E_MEM_INIT_GATE_AL_DONE_PPRS_INIT_DONE_GATE_AL_DONE_SHIFT 7
+#define I40E_MEM_INIT_GATE_AL_DONE_PPRS_INIT_DONE_GATE_AL_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_GATE_AL_DONE_PPRS_INIT_DONE_GATE_AL_DONE_SHIFT)
+#define I40E_MEM_INIT_GATE_AL_DONE_RPB_INIT_DONE_GATE_AL_DONE_SHIFT 8
+#define I40E_MEM_INIT_GATE_AL_DONE_RPB_INIT_DONE_GATE_AL_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_GATE_AL_DONE_RPB_INIT_DONE_GATE_AL_DONE_SHIFT)
+#define I40E_MEM_INIT_GATE_AL_DONE_TPB_INIT_DONE_GATE_AL_DONE_SHIFT 9
+#define I40E_MEM_INIT_GATE_AL_DONE_TPB_INIT_DONE_GATE_AL_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_GATE_AL_DONE_TPB_INIT_DONE_GATE_AL_DONE_SHIFT)
+#define I40E_MEM_INIT_GATE_AL_DONE_FOC_INIT_DONE_GATE_AL_DONE_SHIFT 10
+#define I40E_MEM_INIT_GATE_AL_DONE_FOC_INIT_DONE_GATE_AL_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_GATE_AL_DONE_FOC_INIT_DONE_GATE_AL_DONE_SHIFT)
+#define I40E_MEM_INIT_GATE_AL_DONE_TSCD_INIT_DONE_GATE_AL_DONE_SHIFT 11
+#define I40E_MEM_INIT_GATE_AL_DONE_TSCD_INIT_DONE_GATE_AL_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_GATE_AL_DONE_TSCD_INIT_DONE_GATE_AL_DONE_SHIFT)
+#define I40E_MEM_INIT_GATE_AL_DONE_TCB_INIT_DONE_GATE_AL_DONE_SHIFT 12
+#define I40E_MEM_INIT_GATE_AL_DONE_TCB_INIT_DONE_GATE_AL_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_GATE_AL_DONE_TCB_INIT_DONE_GATE_AL_DONE_SHIFT)
+#define I40E_MEM_INIT_GATE_AL_DONE_RCB_INIT_DONE_GATE_AL_DONE_SHIFT 13
+#define I40E_MEM_INIT_GATE_AL_DONE_RCB_INIT_DONE_GATE_AL_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_GATE_AL_DONE_RCB_INIT_DONE_GATE_AL_DONE_SHIFT)
+#define I40E_MEM_INIT_GATE_AL_DONE_WUC_INIT_DONE_GATE_AL_DONE_SHIFT 14
+#define I40E_MEM_INIT_GATE_AL_DONE_WUC_INIT_DONE_GATE_AL_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_GATE_AL_DONE_WUC_INIT_DONE_GATE_AL_DONE_SHIFT)
+#define I40E_MEM_INIT_GATE_AL_DONE_STAT_INIT_DONE_GATE_AL_DONE_SHIFT 15
+#define I40E_MEM_INIT_GATE_AL_DONE_STAT_INIT_DONE_GATE_AL_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_GATE_AL_DONE_STAT_INIT_DONE_GATE_AL_DONE_SHIFT)
+#define I40E_MEM_INIT_GATE_AL_DONE_ITR_INIT_DONE_GATE_AL_DONE_SHIFT 16
+#define I40E_MEM_INIT_GATE_AL_DONE_ITR_INIT_DONE_GATE_AL_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_GATE_AL_DONE_ITR_INIT_DONE_GATE_AL_DONE_SHIFT)
+
+#define I40E_MEM_INIT_GATE_AL_STR 0x000B6000 /* Reset: POR */
+#define I40E_MEM_INIT_GATE_AL_STR_CMLAN_INIT_DONE_GATE_AL_STRT_SHIFT 0
+#define I40E_MEM_INIT_GATE_AL_STR_CMLAN_INIT_DONE_GATE_AL_STRT_MASK I40E_MASK(0x1, I40E_MEM_INIT_GATE_AL_STR_CMLAN_INIT_DONE_GATE_AL_STRT_SHIFT)
+#define I40E_MEM_INIT_GATE_AL_STR_PMAT_INIT_DONE_GATE_AL_STRT_SHIFT 1
+#define I40E_MEM_INIT_GATE_AL_STR_PMAT_INIT_DONE_GATE_AL_STRT_MASK I40E_MASK(0x1, I40E_MEM_INIT_GATE_AL_STR_PMAT_INIT_DONE_GATE_AL_STRT_SHIFT)
+#define I40E_MEM_INIT_GATE_AL_STR_RCU_INIT_DONE_GATE_AL_STRT_SHIFT 2
+#define I40E_MEM_INIT_GATE_AL_STR_RCU_INIT_DONE_GATE_AL_STRT_MASK I40E_MASK(0x1, I40E_MEM_INIT_GATE_AL_STR_RCU_INIT_DONE_GATE_AL_STRT_SHIFT)
+#define I40E_MEM_INIT_GATE_AL_STR_TDPU_INIT_DONE_GATE_AL_STRT_SHIFT 3
+#define I40E_MEM_INIT_GATE_AL_STR_TDPU_INIT_DONE_GATE_AL_STRT_MASK I40E_MASK(0x1, I40E_MEM_INIT_GATE_AL_STR_TDPU_INIT_DONE_GATE_AL_STRT_SHIFT)
+#define I40E_MEM_INIT_GATE_AL_STR_TLAN_INIT_DONE_GATE_AL_STRT_SHIFT 4
+#define I40E_MEM_INIT_GATE_AL_STR_TLAN_INIT_DONE_GATE_AL_STRT_MASK I40E_MASK(0x1, I40E_MEM_INIT_GATE_AL_STR_TLAN_INIT_DONE_GATE_AL_STRT_SHIFT)
+#define I40E_MEM_INIT_GATE_AL_STR_RLAN_INIT_DONE_GATE_AL_STRT_SHIFT 5
+#define I40E_MEM_INIT_GATE_AL_STR_RLAN_INIT_DONE_GATE_AL_STRT_MASK I40E_MASK(0x1, I40E_MEM_INIT_GATE_AL_STR_RLAN_INIT_DONE_GATE_AL_STRT_SHIFT)
+#define I40E_MEM_INIT_GATE_AL_STR_RDPU_INIT_DONE_GATE_AL_STRT_SHIFT 6
+#define I40E_MEM_INIT_GATE_AL_STR_RDPU_INIT_DONE_GATE_AL_STRT_MASK I40E_MASK(0x1, I40E_MEM_INIT_GATE_AL_STR_RDPU_INIT_DONE_GATE_AL_STRT_SHIFT)
+#define I40E_MEM_INIT_GATE_AL_STR_PPRS_INIT_DONE_GATE_AL_STRT_SHIFT 7
+#define I40E_MEM_INIT_GATE_AL_STR_PPRS_INIT_DONE_GATE_AL_STRT_MASK I40E_MASK(0x1, I40E_MEM_INIT_GATE_AL_STR_PPRS_INIT_DONE_GATE_AL_STRT_SHIFT)
+#define I40E_MEM_INIT_GATE_AL_STR_RPB_INIT_DONE_GATE_AL_STRT_SHIFT 8
+#define I40E_MEM_INIT_GATE_AL_STR_RPB_INIT_DONE_GATE_AL_STRT_MASK I40E_MASK(0x1, I40E_MEM_INIT_GATE_AL_STR_RPB_INIT_DONE_GATE_AL_STRT_SHIFT)
+#define I40E_MEM_INIT_GATE_AL_STR_TPB_INIT_DONE_GATE_AL_STRT_SHIFT 9
+#define I40E_MEM_INIT_GATE_AL_STR_TPB_INIT_DONE_GATE_AL_STRT_MASK I40E_MASK(0x1, I40E_MEM_INIT_GATE_AL_STR_TPB_INIT_DONE_GATE_AL_STRT_SHIFT)
+#define I40E_MEM_INIT_GATE_AL_STR_FOC_INIT_DONE_GATE_AL_STRT_SHIFT 10
+#define I40E_MEM_INIT_GATE_AL_STR_FOC_INIT_DONE_GATE_AL_STRT_MASK I40E_MASK(0x1, I40E_MEM_INIT_GATE_AL_STR_FOC_INIT_DONE_GATE_AL_STRT_SHIFT)
+#define I40E_MEM_INIT_GATE_AL_STR_TSCD_INIT_DONE_GATE_AL_STRT_SHIFT 11
+#define I40E_MEM_INIT_GATE_AL_STR_TSCD_INIT_DONE_GATE_AL_STRT_MASK I40E_MASK(0x1, I40E_MEM_INIT_GATE_AL_STR_TSCD_INIT_DONE_GATE_AL_STRT_SHIFT)
+#define I40E_MEM_INIT_GATE_AL_STR_TCB_INIT_DONE_GATE_AL_STRT_SHIFT 12
+#define I40E_MEM_INIT_GATE_AL_STR_TCB_INIT_DONE_GATE_AL_STRT_MASK I40E_MASK(0x1, I40E_MEM_INIT_GATE_AL_STR_TCB_INIT_DONE_GATE_AL_STRT_SHIFT)
+#define I40E_MEM_INIT_GATE_AL_STR_RCB_INIT_DONE_GATE_AL_STRT_SHIFT 13
+#define I40E_MEM_INIT_GATE_AL_STR_RCB_INIT_DONE_GATE_AL_STRT_MASK I40E_MASK(0x1, I40E_MEM_INIT_GATE_AL_STR_RCB_INIT_DONE_GATE_AL_STRT_SHIFT)
+#define I40E_MEM_INIT_GATE_AL_STR_WUC_INIT_DONE_GATE_AL_STRT_SHIFT 14
+#define I40E_MEM_INIT_GATE_AL_STR_WUC_INIT_DONE_GATE_AL_STRT_MASK I40E_MASK(0x1, I40E_MEM_INIT_GATE_AL_STR_WUC_INIT_DONE_GATE_AL_STRT_SHIFT)
+#define I40E_MEM_INIT_GATE_AL_STR_STAT_INIT_DONE_GATE_AL_STRT_SHIFT 15
+#define I40E_MEM_INIT_GATE_AL_STR_STAT_INIT_DONE_GATE_AL_STRT_MASK I40E_MASK(0x1, I40E_MEM_INIT_GATE_AL_STR_STAT_INIT_DONE_GATE_AL_STRT_SHIFT)
+#define I40E_MEM_INIT_GATE_AL_STR_ITR_INIT_DONE_GATE_AL_STRT_SHIFT 16
+#define I40E_MEM_INIT_GATE_AL_STR_ITR_INIT_DONE_GATE_AL_STRT_MASK I40E_MASK(0x1, I40E_MEM_INIT_GATE_AL_STR_ITR_INIT_DONE_GATE_AL_STRT_SHIFT)
+
+/* PF - PCIe Registers */
+
+#define I40E_EMP_PCI_CIAA 0x0009C4D0 /* Reset: PCIR */
+#define I40E_EMP_PCI_CIAA_ADDRESS_SHIFT 0
+#define I40E_EMP_PCI_CIAA_ADDRESS_MASK I40E_MASK(0xFFF, I40E_EMP_PCI_CIAA_ADDRESS_SHIFT)
+#define I40E_EMP_PCI_CIAA_FNUM_SHIFT 12
+#define I40E_EMP_PCI_CIAA_FNUM_MASK I40E_MASK(0x7F, I40E_EMP_PCI_CIAA_FNUM_SHIFT)
+#define I40E_EMP_PCI_CIAA_PF_SHIFT 19
+#define I40E_EMP_PCI_CIAA_PF_MASK I40E_MASK(0x1, I40E_EMP_PCI_CIAA_PF_SHIFT)
+
+#define I40E_EMP_PCI_CIAD 0x0009C4D4 /* Reset: PCIR */
+#define I40E_EMP_PCI_CIAD_DATA_SHIFT 0
+#define I40E_EMP_PCI_CIAD_DATA_MASK I40E_MASK(0xFFFFFFFF, I40E_EMP_PCI_CIAD_DATA_SHIFT)
+
+#define I40E_GL_PCI_DBGCTL 0x000BE4F4 /* Reset: PCIR */
+#define I40E_GL_PCI_DBGCTL_CONFIG_ACCESS_ENABLE_SHIFT 0
+#define I40E_GL_PCI_DBGCTL_CONFIG_ACCESS_ENABLE_MASK I40E_MASK(0x1, I40E_GL_PCI_DBGCTL_CONFIG_ACCESS_ENABLE_SHIFT)
+
+#define I40E_GLGEN_FWPFRSTAT 0x0009C4E8 /* Reset: PCIR */
+#define I40E_GLGEN_FWPFRSTAT_PF_FLR_SHIFT 0
+#define I40E_GLGEN_FWPFRSTAT_PF_FLR_MASK I40E_MASK(0xFFFF, I40E_GLGEN_FWPFRSTAT_PF_FLR_SHIFT)
+
+#define I40E_GLGEN_FWVFRSTAT(_i) (0x0009C4D8 + ((_i) * 4)) /* _i=0...3 */ /* Reset: PCIR */
+#define I40E_GLGEN_FWVFRSTAT_MAX_INDEX 3
+#define I40E_GLGEN_FWVFRSTAT_VF_FLR_SHIFT 0
+#define I40E_GLGEN_FWVFRSTAT_VF_FLR_MASK I40E_MASK(0xFFFFFFFF, I40E_GLGEN_FWVFRSTAT_VF_FLR_SHIFT)
+
+#define I40E_GLGEN_PCIFCNCNT_PCI 0x000BE4A0 /* Reset: PCIR */
+#define I40E_GLGEN_PCIFCNCNT_PCI_PCIPFCNT_SHIFT 0
+#define I40E_GLGEN_PCIFCNCNT_PCI_PCIPFCNT_MASK I40E_MASK(0x1F, I40E_GLGEN_PCIFCNCNT_PCI_PCIPFCNT_SHIFT)
+#define I40E_GLGEN_PCIFCNCNT_PCI_PCIVFCNT_SHIFT 16
+#define I40E_GLGEN_PCIFCNCNT_PCI_PCIVFCNT_MASK I40E_MASK(0xFF, I40E_GLGEN_PCIFCNCNT_PCI_PCIVFCNT_SHIFT)
+
+#define I40E_GLPCI_ANA_ADD 0x000BA000 /* Reset: POR */
+#define I40E_GLPCI_ANA_ADD_ADDRESS_SHIFT 0
+#define I40E_GLPCI_ANA_ADD_ADDRESS_MASK I40E_MASK(0xFFFF, I40E_GLPCI_ANA_ADD_ADDRESS_SHIFT)
+#define I40E_GLPCI_ANA_ADD_BYTE_EN_SHIFT 28
+#define I40E_GLPCI_ANA_ADD_BYTE_EN_MASK I40E_MASK(0xF, I40E_GLPCI_ANA_ADD_BYTE_EN_SHIFT)
+
+#define I40E_GLPCI_ANA_DATA 0x000BA004 /* Reset: POR */
+#define I40E_GLPCI_ANA_DATA_DATA_SHIFT 0
+#define I40E_GLPCI_ANA_DATA_DATA_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_ANA_DATA_DATA_SHIFT)
+
+#define I40E_GLPCI_LCBADD 0x0009C4C0 /* Reset: PCIR */
+#define I40E_GLPCI_LCBADD_ADDRESS_SHIFT 0
+#define I40E_GLPCI_LCBADD_ADDRESS_MASK I40E_MASK(0x3FFFF, I40E_GLPCI_LCBADD_ADDRESS_SHIFT)
+#define I40E_GLPCI_LCBADD_BLOCK_ID_SHIFT 20
+#define I40E_GLPCI_LCBADD_BLOCK_ID_MASK I40E_MASK(0x7FF, I40E_GLPCI_LCBADD_BLOCK_ID_SHIFT)
+#define I40E_GLPCI_LCBADD_LOCK_SHIFT 31
+#define I40E_GLPCI_LCBADD_LOCK_MASK I40E_MASK(0x1, I40E_GLPCI_LCBADD_LOCK_SHIFT)
+
+#define I40E_GLPCI_LCBDATA 0x0009C4C4 /* Reset: PCIR */
+#define I40E_GLPCI_LCBDATA_LCB_DATA_SHIFT 0
+#define I40E_GLPCI_LCBDATA_LCB_DATA_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_LCBDATA_LCB_DATA_SHIFT)
+
+#define I40E_GLPCI_PCITEST1 0x000BE488 /* Reset: PCIR */
+#define I40E_GLPCI_PCITEST1_IGNORE_RID_SHIFT 0
+#define I40E_GLPCI_PCITEST1_IGNORE_RID_MASK I40E_MASK(0x1, I40E_GLPCI_PCITEST1_IGNORE_RID_SHIFT)
+#define I40E_GLPCI_PCITEST1_V_MSIX_EN_SHIFT 2
+#define I40E_GLPCI_PCITEST1_V_MSIX_EN_MASK I40E_MASK(0x1, I40E_GLPCI_PCITEST1_V_MSIX_EN_SHIFT)
+
+#define I40E_GLPCI_PCITEST2 0x000BE4BC /* Reset: PCIR */
+#define I40E_GLPCI_PCITEST2_IOV_TEST_MODE_SHIFT 0
+#define I40E_GLPCI_PCITEST2_IOV_TEST_MODE_MASK I40E_MASK(0x1, I40E_GLPCI_PCITEST2_IOV_TEST_MODE_SHIFT)
+#define I40E_GLPCI_PCITEST2_TAG_ALLOC_SHIFT 1
+#define I40E_GLPCI_PCITEST2_TAG_ALLOC_MASK I40E_MASK(0x1, I40E_GLPCI_PCITEST2_TAG_ALLOC_SHIFT)
+
+#define I40E_GLTPH_CTRL 0x000BE480 /* Reset: PCIR */
+#define I40E_GLTPH_CTRL_DISABLE_READ_HINT_SHIFT 8
+#define I40E_GLTPH_CTRL_DISABLE_READ_HINT_MASK I40E_MASK(0x1, I40E_GLTPH_CTRL_DISABLE_READ_HINT_SHIFT)
+#define I40E_GLTPH_CTRL_DESC_PH_SHIFT 9
+#define I40E_GLTPH_CTRL_DESC_PH_MASK I40E_MASK(0x3, I40E_GLTPH_CTRL_DESC_PH_SHIFT)
+#define I40E_GLTPH_CTRL_DATA_PH_SHIFT 11
+#define I40E_GLTPH_CTRL_DATA_PH_MASK I40E_MASK(0x3, I40E_GLTPH_CTRL_DATA_PH_SHIFT)
+#define I40E_GLTPH_CTRL_TPH_AUTOLEARN_SHIFT 13
+#define I40E_GLTPH_CTRL_TPH_AUTOLEARN_MASK I40E_MASK(0x1, I40E_GLTPH_CTRL_TPH_AUTOLEARN_SHIFT)
+
+#define I40E_PF_VT_PFALLOC_PCIE 0x000BE380 /* Reset: PCIR */
+#define I40E_PF_VT_PFALLOC_PCIE_FIRSTVF_SHIFT 0
+#define I40E_PF_VT_PFALLOC_PCIE_FIRSTVF_MASK I40E_MASK(0xFF, I40E_PF_VT_PFALLOC_PCIE_FIRSTVF_SHIFT)
+#define I40E_PF_VT_PFALLOC_PCIE_LASTVF_SHIFT 8
+#define I40E_PF_VT_PFALLOC_PCIE_LASTVF_MASK I40E_MASK(0xFF, I40E_PF_VT_PFALLOC_PCIE_LASTVF_SHIFT)
+#define I40E_PF_VT_PFALLOC_PCIE_VALID_SHIFT 31
+#define I40E_PF_VT_PFALLOC_PCIE_VALID_MASK I40E_MASK(0x1, I40E_PF_VT_PFALLOC_PCIE_VALID_SHIFT)
+
+/* PF - Power Management Registers */
+
+#define I40E_GLPCI_PM_EN_STAT 0x000BE4E4 /* Reset: POR */
+#define I40E_GLPCI_PM_EN_STAT_PCIE_PME_EN_PF0_SHIFT 0
+#define I40E_GLPCI_PM_EN_STAT_PCIE_PME_EN_PF0_MASK I40E_MASK(0x1, I40E_GLPCI_PM_EN_STAT_PCIE_PME_EN_PF0_SHIFT)
+#define I40E_GLPCI_PM_EN_STAT_PCIE_PME_EN_PF1_SHIFT 1
+#define I40E_GLPCI_PM_EN_STAT_PCIE_PME_EN_PF1_MASK I40E_MASK(0x1, I40E_GLPCI_PM_EN_STAT_PCIE_PME_EN_PF1_SHIFT)
+#define I40E_GLPCI_PM_EN_STAT_PCIE_PME_EN_PF2_SHIFT 2
+#define I40E_GLPCI_PM_EN_STAT_PCIE_PME_EN_PF2_MASK I40E_MASK(0x1, I40E_GLPCI_PM_EN_STAT_PCIE_PME_EN_PF2_SHIFT)
+#define I40E_GLPCI_PM_EN_STAT_PCIE_PME_EN_PF3_SHIFT 3
+#define I40E_GLPCI_PM_EN_STAT_PCIE_PME_EN_PF3_MASK I40E_MASK(0x1, I40E_GLPCI_PM_EN_STAT_PCIE_PME_EN_PF3_SHIFT)
+#define I40E_GLPCI_PM_EN_STAT_PCIE_PME_EN_PF4_SHIFT 4
+#define I40E_GLPCI_PM_EN_STAT_PCIE_PME_EN_PF4_MASK I40E_MASK(0x1, I40E_GLPCI_PM_EN_STAT_PCIE_PME_EN_PF4_SHIFT)
+#define I40E_GLPCI_PM_EN_STAT_PCIE_PME_EN_PF5_SHIFT 5
+#define I40E_GLPCI_PM_EN_STAT_PCIE_PME_EN_PF5_MASK I40E_MASK(0x1, I40E_GLPCI_PM_EN_STAT_PCIE_PME_EN_PF5_SHIFT)
+#define I40E_GLPCI_PM_EN_STAT_PCIE_PME_EN_PF6_SHIFT 6
+#define I40E_GLPCI_PM_EN_STAT_PCIE_PME_EN_PF6_MASK I40E_MASK(0x1, I40E_GLPCI_PM_EN_STAT_PCIE_PME_EN_PF6_SHIFT)
+#define I40E_GLPCI_PM_EN_STAT_PCIE_PME_EN_PF7_SHIFT 7
+#define I40E_GLPCI_PM_EN_STAT_PCIE_PME_EN_PF7_MASK I40E_MASK(0x1, I40E_GLPCI_PM_EN_STAT_PCIE_PME_EN_PF7_SHIFT)
+#define I40E_GLPCI_PM_EN_STAT_PCIE_PME_EN_PF8_SHIFT 8
+#define I40E_GLPCI_PM_EN_STAT_PCIE_PME_EN_PF8_MASK I40E_MASK(0x1, I40E_GLPCI_PM_EN_STAT_PCIE_PME_EN_PF8_SHIFT)
+#define I40E_GLPCI_PM_EN_STAT_PCIE_PME_EN_PF9_SHIFT 9
+#define I40E_GLPCI_PM_EN_STAT_PCIE_PME_EN_PF9_MASK I40E_MASK(0x1, I40E_GLPCI_PM_EN_STAT_PCIE_PME_EN_PF9_SHIFT)
+#define I40E_GLPCI_PM_EN_STAT_PCIE_PME_EN_PF10_SHIFT 10
+#define I40E_GLPCI_PM_EN_STAT_PCIE_PME_EN_PF10_MASK I40E_MASK(0x1, I40E_GLPCI_PM_EN_STAT_PCIE_PME_EN_PF10_SHIFT)
+#define I40E_GLPCI_PM_EN_STAT_PCIE_PME_EN_PF11_SHIFT 11
+#define I40E_GLPCI_PM_EN_STAT_PCIE_PME_EN_PF11_MASK I40E_MASK(0x1, I40E_GLPCI_PM_EN_STAT_PCIE_PME_EN_PF11_SHIFT)
+#define I40E_GLPCI_PM_EN_STAT_PCIE_PME_EN_PF12_SHIFT 12
+#define I40E_GLPCI_PM_EN_STAT_PCIE_PME_EN_PF12_MASK I40E_MASK(0x1, I40E_GLPCI_PM_EN_STAT_PCIE_PME_EN_PF12_SHIFT)
+#define I40E_GLPCI_PM_EN_STAT_PCIE_PME_EN_PF13_SHIFT 13
+#define I40E_GLPCI_PM_EN_STAT_PCIE_PME_EN_PF13_MASK I40E_MASK(0x1, I40E_GLPCI_PM_EN_STAT_PCIE_PME_EN_PF13_SHIFT)
+#define I40E_GLPCI_PM_EN_STAT_PCIE_PME_EN_PF14_SHIFT 14
+#define I40E_GLPCI_PM_EN_STAT_PCIE_PME_EN_PF14_MASK I40E_MASK(0x1, I40E_GLPCI_PM_EN_STAT_PCIE_PME_EN_PF14_SHIFT)
+#define I40E_GLPCI_PM_EN_STAT_PCIE_PME_EN_PF15_SHIFT 15
+#define I40E_GLPCI_PM_EN_STAT_PCIE_PME_EN_PF15_MASK I40E_MASK(0x1, I40E_GLPCI_PM_EN_STAT_PCIE_PME_EN_PF15_SHIFT)
+
+#define I40E_GLPM_DMAC_ENC 0x000881F0 /* Reset: CORER */
+#define I40E_GLPM_DMAC_ENC_DMACENTRY_SHIFT 0
+#define I40E_GLPM_DMAC_ENC_DMACENTRY_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPM_DMAC_ENC_DMACENTRY_SHIFT)
+
+#define I40E_GLPM_DMAC_EXC 0x000881FC /* Reset: CORER */
+#define I40E_GLPM_DMAC_EXC_DMACTIMEREXIT_SHIFT 0
+#define I40E_GLPM_DMAC_EXC_DMACTIMEREXIT_MASK I40E_MASK(0xFFFF, I40E_GLPM_DMAC_EXC_DMACTIMEREXIT_SHIFT)
+#define I40E_GLPM_DMAC_EXC_DMAIMMEXIT_SHIFT 16
+#define I40E_GLPM_DMAC_EXC_DMAIMMEXIT_MASK I40E_MASK(0xFFFF, I40E_GLPM_DMAC_EXC_DMAIMMEXIT_SHIFT)
+
+#define I40E_GLPM_DMACR 0x000881F4 /* Reset: CORER */
+#define I40E_GLPM_DMACR_DMACWT_SHIFT 0
+#define I40E_GLPM_DMACR_DMACWT_MASK I40E_MASK(0xFFFF, I40E_GLPM_DMACR_DMACWT_SHIFT)
+#define I40E_GLPM_DMACR_EXIT_DC_SHIFT 29
+#define I40E_GLPM_DMACR_EXIT_DC_MASK I40E_MASK(0x1, I40E_GLPM_DMACR_EXIT_DC_SHIFT)
+#define I40E_GLPM_DMACR_LX_COALESCING_INDICATION_SHIFT 30
+#define I40E_GLPM_DMACR_LX_COALESCING_INDICATION_MASK I40E_MASK(0x1, I40E_GLPM_DMACR_LX_COALESCING_INDICATION_SHIFT)
+#define I40E_GLPM_DMACR_DMAC_EN_SHIFT 31
+#define I40E_GLPM_DMACR_DMAC_EN_MASK I40E_MASK(0x1, I40E_GLPM_DMACR_DMAC_EN_SHIFT)
+
+#define I40E_GLPM_DMCTH 0x000AC7E4 /* Reset: CORER */
+#define I40E_GLPM_DMCTH_DMACRXT_SHIFT 0
+#define I40E_GLPM_DMCTH_DMACRXT_MASK I40E_MASK(0x3FF, I40E_GLPM_DMCTH_DMACRXT_SHIFT)
+
+#define I40E_GLPM_DMCTLX 0x000881F8 /* Reset: CORER */
+#define I40E_GLPM_DMCTLX_TTLX_SHIFT 0
+#define I40E_GLPM_DMCTLX_TTLX_MASK I40E_MASK(0xFFF, I40E_GLPM_DMCTLX_TTLX_SHIFT)
+
+#define I40E_GLPM_EEE_SU 0x001E4340 /* Reset: GLOBR */
+#define I40E_GLPM_EEE_SU_DTW_MIN_1000_BASE_T_SHIFT 0
+#define I40E_GLPM_EEE_SU_DTW_MIN_1000_BASE_T_MASK I40E_MASK(0xFF, I40E_GLPM_EEE_SU_DTW_MIN_1000_BASE_T_SHIFT)
+#define I40E_GLPM_EEE_SU_DTW_MIN_100_BASE_TX_SHIFT 8
+#define I40E_GLPM_EEE_SU_DTW_MIN_100_BASE_TX_MASK I40E_MASK(0xFF, I40E_GLPM_EEE_SU_DTW_MIN_100_BASE_TX_SHIFT)
+
+#define I40E_GLPM_EEE_SU_EXT 0x001E4344 /* Reset: GLOBR */
+#define I40E_GLPM_EEE_SU_EXT_DTW_MIN_1000_BASE_KX_SHIFT 0
+#define I40E_GLPM_EEE_SU_EXT_DTW_MIN_1000_BASE_KX_MASK I40E_MASK(0xFF, I40E_GLPM_EEE_SU_EXT_DTW_MIN_1000_BASE_KX_SHIFT)
+#define I40E_GLPM_EEE_SU_EXT_DTW_MIN_10GBASE_KX4_SHIFT 8
+#define I40E_GLPM_EEE_SU_EXT_DTW_MIN_10GBASE_KX4_MASK I40E_MASK(0xFF, I40E_GLPM_EEE_SU_EXT_DTW_MIN_10GBASE_KX4_SHIFT)
+#define I40E_GLPM_EEE_SU_EXT_DTW_MIN_10GBASE_KR_SHIFT 16
+#define I40E_GLPM_EEE_SU_EXT_DTW_MIN_10GBASE_KR_MASK I40E_MASK(0xFF, I40E_GLPM_EEE_SU_EXT_DTW_MIN_10GBASE_KR_SHIFT)
+#define I40E_GLPM_EEE_SU_EXT_DTW_MIN_10GBASE_T_SHIFT 24
+#define I40E_GLPM_EEE_SU_EXT_DTW_MIN_10GBASE_T_MASK I40E_MASK(0xFF, I40E_GLPM_EEE_SU_EXT_DTW_MIN_10GBASE_T_SHIFT)
+
+#define I40E_GLPM_LTRC 0x000BE500 /* Reset: PCIR */
+#define I40E_GLPM_LTRC_SLTRV_SHIFT 0
+#define I40E_GLPM_LTRC_SLTRV_MASK I40E_MASK(0x3FF, I40E_GLPM_LTRC_SLTRV_SHIFT)
+#define I40E_GLPM_LTRC_SSCALE_SHIFT 10
+#define I40E_GLPM_LTRC_SSCALE_MASK I40E_MASK(0x7, I40E_GLPM_LTRC_SSCALE_SHIFT)
+#define I40E_GLPM_LTRC_LTRS_REQUIREMENT_SHIFT 15
+#define I40E_GLPM_LTRC_LTRS_REQUIREMENT_MASK I40E_MASK(0x1, I40E_GLPM_LTRC_LTRS_REQUIREMENT_SHIFT)
+#define I40E_GLPM_LTRC_NSLTRV_SHIFT 16
+#define I40E_GLPM_LTRC_NSLTRV_MASK I40E_MASK(0x3FF, I40E_GLPM_LTRC_NSLTRV_SHIFT)
+#define I40E_GLPM_LTRC_NSSCALE_SHIFT 26
+#define I40E_GLPM_LTRC_NSSCALE_MASK I40E_MASK(0x7, I40E_GLPM_LTRC_NSSCALE_SHIFT)
+#define I40E_GLPM_LTRC_LTR_SEND_SHIFT 30
+#define I40E_GLPM_LTRC_LTR_SEND_MASK I40E_MASK(0x1, I40E_GLPM_LTRC_LTR_SEND_SHIFT)
+#define I40E_GLPM_LTRC_LTRNS_REQUIREMENT_SHIFT 31
+#define I40E_GLPM_LTRC_LTRNS_REQUIREMENT_MASK I40E_MASK(0x1, I40E_GLPM_LTRC_LTRNS_REQUIREMENT_SHIFT)
+
+#define I40E_PRTPM_EEEDBG 0x001E4420 /* Reset: GLOBR */
+#define I40E_PRTPM_EEEDBG_FORCE_TLPI_SHIFT 0
+#define I40E_PRTPM_EEEDBG_FORCE_TLPI_MASK I40E_MASK(0x1, I40E_PRTPM_EEEDBG_FORCE_TLPI_SHIFT)
+
+#define I40E_PRTPM_HPTC 0x000AC800 /* Reset: CORER */
+#define I40E_PRTPM_HPTC_HIGH_PRI_TC_SHIFT 0
+#define I40E_PRTPM_HPTC_HIGH_PRI_TC_MASK I40E_MASK(0xFF, I40E_PRTPM_HPTC_HIGH_PRI_TC_SHIFT)
+
+/* PF - Receive Packet Buffer Registers */
+
+#define I40E_GLRPB_DHWS 0x000AC820 /* Reset: CORER */
+#define I40E_GLRPB_DHWS_DHW_TCN_SHIFT 0
+#define I40E_GLRPB_DHWS_DHW_TCN_MASK I40E_MASK(0xFFFFF, I40E_GLRPB_DHWS_DHW_TCN_SHIFT)
+
+#define I40E_GLRPB_DLWS 0x000AC824 /* Reset: CORER */
+#define I40E_GLRPB_DLWS_DLW_TCN_SHIFT 0
+#define I40E_GLRPB_DLWS_DLW_TCN_MASK I40E_MASK(0xFFFFF, I40E_GLRPB_DLWS_DLW_TCN_SHIFT)
+
+#define I40E_GLRPB_GFC 0x000AC82C /* Reset: CORER */
+#define I40E_GLRPB_GFC_GFC_SHIFT 0
+#define I40E_GLRPB_GFC_GFC_MASK I40E_MASK(0xFFFFF, I40E_GLRPB_GFC_GFC_SHIFT)
+
+#define I40E_GLRPB_GPC 0x000AC838 /* Reset: CORER */
+#define I40E_GLRPB_GPC_GPC_SHIFT 0
+#define I40E_GLRPB_GPC_GPC_MASK I40E_MASK(0x3FFF, I40E_GLRPB_GPC_GPC_SHIFT)
+
+#define I40E_GLRPB_LTRTL 0x000AC83C /* Reset: CORER */
+#define I40E_GLRPB_LTRTL_LTRTL_SHIFT 0
+#define I40E_GLRPB_LTRTL_LTRTL_MASK I40E_MASK(0x3FF, I40E_GLRPB_LTRTL_LTRTL_SHIFT)
+
+#define I40E_GLRPB_LTRTV 0x000AC840 /* Reset: CORER */
+#define I40E_GLRPB_LTRTV_LTRTV_SHIFT 0
+#define I40E_GLRPB_LTRTV_LTRTV_MASK I40E_MASK(0x3FF, I40E_GLRPB_LTRTV_LTRTV_SHIFT)
+
+#define I40E_GLRPB_SHTS 0x000AC84C /* Reset: CORER */
+#define I40E_GLRPB_SHTS_SHT_TCN_SHIFT 0
+#define I40E_GLRPB_SHTS_SHT_TCN_MASK I40E_MASK(0xFFFFF, I40E_GLRPB_SHTS_SHT_TCN_SHIFT)
+
+#define I40E_GLRPB_SHWS 0x000AC850 /* Reset: CORER */
+#define I40E_GLRPB_SHWS_SHW_SHIFT 0
+#define I40E_GLRPB_SHWS_SHW_MASK I40E_MASK(0xFFFFF, I40E_GLRPB_SHWS_SHW_SHIFT)
+
+#define I40E_GLRPB_SLTS 0x000AC854 /* Reset: CORER */
+#define I40E_GLRPB_SLTS_SLT_TCN_SHIFT 0
+#define I40E_GLRPB_SLTS_SLT_TCN_MASK I40E_MASK(0xFFFFF, I40E_GLRPB_SLTS_SLT_TCN_SHIFT)
+
+#define I40E_GLRPB_SLWS 0x000AC858 /* Reset: CORER */
+#define I40E_GLRPB_SLWS_SLW_SHIFT 0
+#define I40E_GLRPB_SLWS_SLW_MASK I40E_MASK(0xFFFFF, I40E_GLRPB_SLWS_SLW_SHIFT)
+
+#define I40E_GLRPB_SPSS 0x000AC85C /* Reset: CORER */
+#define I40E_GLRPB_SPSS_SPS_SHIFT 0
+#define I40E_GLRPB_SPSS_SPS_MASK I40E_MASK(0xFFFFF, I40E_GLRPB_SPSS_SPS_SHIFT)
+
+#define I40E_PRTRPB_DFC(_i) (0x000AC000 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */
+#define I40E_PRTRPB_DFC_MAX_INDEX 7
+#define I40E_PRTRPB_DFC_DFC_TCN_SHIFT 0
+#define I40E_PRTRPB_DFC_DFC_TCN_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_DFC_DFC_TCN_SHIFT)
+
+#define I40E_PRTRPB_PFC 0x000AC420 /* Reset: CORER */
+#define I40E_PRTRPB_PFC_PFC_SHIFT 0
+#define I40E_PRTRPB_PFC_PFC_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_PFC_PFC_SHIFT)
+
+#define I40E_PRTRPB_RUP2TC 0x000AC440 /* Reset: CORER */
+#define I40E_PRTRPB_RUP2TC_UP0TC_SHIFT 0
+#define I40E_PRTRPB_RUP2TC_UP0TC_MASK I40E_MASK(0x7, I40E_PRTRPB_RUP2TC_UP0TC_SHIFT)
+#define I40E_PRTRPB_RUP2TC_UP1TC_SHIFT 3
+#define I40E_PRTRPB_RUP2TC_UP1TC_MASK I40E_MASK(0x7, I40E_PRTRPB_RUP2TC_UP1TC_SHIFT)
+#define I40E_PRTRPB_RUP2TC_UP2TC_SHIFT 6
+#define I40E_PRTRPB_RUP2TC_UP2TC_MASK I40E_MASK(0x7, I40E_PRTRPB_RUP2TC_UP2TC_SHIFT)
+#define I40E_PRTRPB_RUP2TC_UP3TC_SHIFT 9
+#define I40E_PRTRPB_RUP2TC_UP3TC_MASK I40E_MASK(0x7, I40E_PRTRPB_RUP2TC_UP3TC_SHIFT)
+#define I40E_PRTRPB_RUP2TC_UP4TC_SHIFT 12
+#define I40E_PRTRPB_RUP2TC_UP4TC_MASK I40E_MASK(0x7, I40E_PRTRPB_RUP2TC_UP4TC_SHIFT)
+#define I40E_PRTRPB_RUP2TC_UP5TC_SHIFT 15
+#define I40E_PRTRPB_RUP2TC_UP5TC_MASK I40E_MASK(0x7, I40E_PRTRPB_RUP2TC_UP5TC_SHIFT)
+#define I40E_PRTRPB_RUP2TC_UP6TC_SHIFT 18
+#define I40E_PRTRPB_RUP2TC_UP6TC_MASK I40E_MASK(0x7, I40E_PRTRPB_RUP2TC_UP6TC_SHIFT)
+#define I40E_PRTRPB_RUP2TC_UP7TC_SHIFT 21
+#define I40E_PRTRPB_RUP2TC_UP7TC_MASK I40E_MASK(0x7, I40E_PRTRPB_RUP2TC_UP7TC_SHIFT)
+
+#define I40E_PRTRPB_SFC 0x000AC460 /* Reset: CORER */
+#define I40E_PRTRPB_SFC_SFC_SHIFT 0
+#define I40E_PRTRPB_SFC_SFC_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_SFC_SFC_SHIFT)
+
+#define I40E_PRTRPB_SOC(_i) (0x000AC6C0 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */
+#define I40E_PRTRPB_SOC_MAX_INDEX 7
+#define I40E_PRTRPB_SOC_SOC_TCN_SHIFT 0
+#define I40E_PRTRPB_SOC_SOC_TCN_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_SOC_SOC_TCN_SHIFT)
+
+#define I40E_PRTRPB_TC2PFC 0x000AC200 /* Reset: CORER */
+#define I40E_PRTRPB_TC2PFC_TC2PFC_SHIFT 0
+#define I40E_PRTRPB_TC2PFC_TC2PFC_MASK I40E_MASK(0xFF, I40E_PRTRPB_TC2PFC_TC2PFC_SHIFT)
+
+/* PF - Rx Filters Registers */
+
+#define I40E_GL_PRS_FVBM(_i) (0x00269760 + ((_i) * 4)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GL_PRS_FVBM_MAX_INDEX 3
+#define I40E_GL_PRS_FVBM_FV_BYTE_INDX_SHIFT 0
+#define I40E_GL_PRS_FVBM_FV_BYTE_INDX_MASK I40E_MASK(0x7F, I40E_GL_PRS_FVBM_FV_BYTE_INDX_SHIFT)
+#define I40E_GL_PRS_FVBM_RULE_BUS_INDX_SHIFT 8
+#define I40E_GL_PRS_FVBM_RULE_BUS_INDX_MASK I40E_MASK(0x3F, I40E_GL_PRS_FVBM_RULE_BUS_INDX_SHIFT)
+#define I40E_GL_PRS_FVBM_MSK_ENA_SHIFT 31
+#define I40E_GL_PRS_FVBM_MSK_ENA_MASK I40E_MASK(0x1, I40E_GL_PRS_FVBM_MSK_ENA_SHIFT)
+
+#define I40E_GLCM_LAN_FCOEQCNT 0x0010C438 /* Reset: CORER */
+#define I40E_GLCM_LAN_FCOEQCNT_FCOE_DDP_CNT_SHIFT 10
+#define I40E_GLCM_LAN_FCOEQCNT_FCOE_DDP_CNT_MASK I40E_MASK(0x3FF, I40E_GLCM_LAN_FCOEQCNT_FCOE_DDP_CNT_SHIFT)
+
+#define I40E_GLCM_LAN_LANQCNT 0x0010C434 /* Reset: CORER */
+#define I40E_GLCM_LAN_LANQCNT_LANTX_CNT_SHIFT 0
+#define I40E_GLCM_LAN_LANQCNT_LANTX_CNT_MASK I40E_MASK(0x3FF, I40E_GLCM_LAN_LANQCNT_LANTX_CNT_SHIFT)
+#define I40E_GLCM_LAN_LANQCNT_LANRX_CNT_SHIFT 10
+#define I40E_GLCM_LAN_LANQCNT_LANRX_CNT_MASK I40E_MASK(0x3FF, I40E_GLCM_LAN_LANQCNT_LANRX_CNT_SHIFT)
+
+#define I40E_GLFOC_CACHE_CTL 0x000AA000 /* Reset: CORER */
+#define I40E_GLFOC_CACHE_CTL_FD_ALLOCATION_SHIFT 0
+#define I40E_GLFOC_CACHE_CTL_FD_ALLOCATION_MASK I40E_MASK(0x3, I40E_GLFOC_CACHE_CTL_FD_ALLOCATION_SHIFT)
+#define I40E_GLFOC_CACHE_CTL_SCALE_FACTOR_SHIFT 2
+#define I40E_GLFOC_CACHE_CTL_SCALE_FACTOR_MASK I40E_MASK(0x3, I40E_GLFOC_CACHE_CTL_SCALE_FACTOR_SHIFT)
+#define I40E_GLFOC_CACHE_CTL_DBGMUX_EN_SHIFT 4
+#define I40E_GLFOC_CACHE_CTL_DBGMUX_EN_MASK I40E_MASK(0x1, I40E_GLFOC_CACHE_CTL_DBGMUX_EN_SHIFT)
+#define I40E_GLFOC_CACHE_CTL_DBGMUX_SEL_LO_SHIFT 8
+#define I40E_GLFOC_CACHE_CTL_DBGMUX_SEL_LO_MASK I40E_MASK(0x1F, I40E_GLFOC_CACHE_CTL_DBGMUX_SEL_LO_SHIFT)
+#define I40E_GLFOC_CACHE_CTL_DBGMUX_SEL_HI_SHIFT 16
+#define I40E_GLFOC_CACHE_CTL_DBGMUX_SEL_HI_MASK I40E_MASK(0x1F, I40E_GLFOC_CACHE_CTL_DBGMUX_SEL_HI_SHIFT)
+
+#define I40E_GLFOC_FSTAT 0x000AA004 /* Reset: CORER */
+#define I40E_GLFOC_FSTAT_PE_CNT_SHIFT 0
+#define I40E_GLFOC_FSTAT_PE_CNT_MASK I40E_MASK(0x7FF, I40E_GLFOC_FSTAT_PE_CNT_SHIFT)
+#define I40E_GLFOC_FSTAT_FC_CNT_SHIFT 16
+#define I40E_GLFOC_FSTAT_FC_CNT_MASK I40E_MASK(0x7FF, I40E_GLFOC_FSTAT_FC_CNT_SHIFT)
+
+#define I40E_GLQF_FC_INSET(_i, _j) (0x002695A0 + ((_i) * 4 + (_j) * 8)) /* _i=0...1, _j=0...3 */ /* Reset: CORER */
+#define I40E_GLQF_FC_INSET_MAX_INDEX 1
+#define I40E_GLQF_FC_INSET_INSET_SHIFT 0
+#define I40E_GLQF_FC_INSET_INSET_MASK I40E_MASK(0xFFFFFFFF, I40E_GLQF_FC_INSET_INSET_SHIFT)
+
+#define I40E_GLQF_FC_MSK(_i, _j) (0x002690C0 + ((_i) * 4 + (_j) * 16)) /* _i=0...3, _j=0...3 */ /* Reset: CORER */
+#define I40E_GLQF_FC_MSK_MAX_INDEX 3
+#define I40E_GLQF_FC_MSK_MASK_SHIFT 0
+#define I40E_GLQF_FC_MSK_MASK_MASK I40E_MASK(0xFFFF, I40E_GLQF_FC_MSK_MASK_SHIFT)
+#define I40E_GLQF_FC_MSK_OFFSET_SHIFT 16
+#define I40E_GLQF_FC_MSK_OFFSET_MASK I40E_MASK(0x3F, I40E_GLQF_FC_MSK_OFFSET_SHIFT)
+
+#define I40E_GLQF_FCTYPE(_i) (0x00269520 + ((_i) * 4)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLQF_FCTYPE_MAX_INDEX 3
+#define I40E_GLQF_FCTYPE_PCTYPE_INDEX_SHIFT 0
+#define I40E_GLQF_FCTYPE_PCTYPE_INDEX_MASK I40E_MASK(0x3F, I40E_GLQF_FCTYPE_PCTYPE_INDEX_SHIFT)
+#define I40E_GLQF_FCTYPE_PCTYPE_ENA_SHIFT 7
+#define I40E_GLQF_FCTYPE_PCTYPE_ENA_MASK I40E_MASK(0x1, I40E_GLQF_FCTYPE_PCTYPE_ENA_SHIFT)
+
+#define I40E_GLQF_FD_MSK(_i, _j) (0x00267200 + ((_i) * 4 + (_j) * 8)) /* _i=0...1, _j=0...63 */ /* Reset: CORER */
+#define I40E_GLQF_FD_MSK_MAX_INDEX 1
+#define I40E_GLQF_FD_MSK_MASK_SHIFT 0
+#define I40E_GLQF_FD_MSK_MASK_MASK I40E_MASK(0xFFFF, I40E_GLQF_FD_MSK_MASK_SHIFT)
+#define I40E_GLQF_FD_MSK_OFFSET_SHIFT 16
+#define I40E_GLQF_FD_MSK_OFFSET_MASK I40E_MASK(0x3F, I40E_GLQF_FD_MSK_OFFSET_SHIFT)
+
+#define I40E_GLQF_FDCNT_1 0x00269BB4 /* Reset: CORER */
+#define I40E_GLQF_FDCNT_1_BUCKETCNT_SHIFT 0
+#define I40E_GLQF_FDCNT_1_BUCKETCNT_MASK I40E_MASK(0x3FFF, I40E_GLQF_FDCNT_1_BUCKETCNT_SHIFT)
+
+#define I40E_GLQF_FDCNT_2 0x00269BBC /* Reset: CORER */
+#define I40E_GLQF_FDCNT_2_HITSBCNT_SHIFT 0
+#define I40E_GLQF_FDCNT_2_HITSBCNT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLQF_FDCNT_2_HITSBCNT_SHIFT)
+
+#define I40E_GLQF_FDCNT_3 0x00269BC4 /* Reset: CORER */
+#define I40E_GLQF_FDCNT_3_HITLBCNT_SHIFT 0
+#define I40E_GLQF_FDCNT_3_HITLBCNT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLQF_FDCNT_3_HITLBCNT_SHIFT)
+
+#define I40E_GLQF_FDENA(_i) (0x002698A8 + ((_i) * 4)) /* _i=0...1 */ /* Reset: CORER */
+#define I40E_GLQF_FDENA_MAX_INDEX 1
+#define I40E_GLQF_FDENA_FD_ENA_SHIFT 0
+#define I40E_GLQF_FDENA_FD_ENA_MASK I40E_MASK(0xFFFFFFFF, I40E_GLQF_FDENA_FD_ENA_SHIFT)
+
+#define I40E_GLQF_HASH_INSET(_i, _j) (0x00267600 + ((_i) * 4 + (_j) * 8)) /* _i=0...1, _j=0...63 */ /* Reset: CORER */
+#define I40E_GLQF_HASH_INSET_MAX_INDEX 1
+#define I40E_GLQF_HASH_INSET_INSET_SHIFT 0
+#define I40E_GLQF_HASH_INSET_INSET_MASK I40E_MASK(0xFFFFFFFF, I40E_GLQF_HASH_INSET_INSET_SHIFT)
+
+#define I40E_GLQF_HASH_MSK(_i, _j) (0x00267A00 + ((_i) * 4 + (_j) * 8)) /* _i=0...1, _j=0...63 */ /* Reset: CORER */
+#define I40E_GLQF_HASH_MSK_MAX_INDEX 1
+#define I40E_GLQF_HASH_MSK_MASK_SHIFT 0
+#define I40E_GLQF_HASH_MSK_MASK_MASK I40E_MASK(0xFFFF, I40E_GLQF_HASH_MSK_MASK_SHIFT)
+#define I40E_GLQF_HASH_MSK_OFFSET_SHIFT 16
+#define I40E_GLQF_HASH_MSK_OFFSET_MASK I40E_MASK(0x3F, I40E_GLQF_HASH_MSK_OFFSET_SHIFT)
+
+#define I40E_GLQF_ORT(_i) (0x00268900 + ((_i) * 4)) /* _i=0...63 */ /* Reset: CORER */
+#define I40E_GLQF_ORT_MAX_INDEX 63
+#define I40E_GLQF_ORT_PIT_INDX_SHIFT 0
+#define I40E_GLQF_ORT_PIT_INDX_MASK I40E_MASK(0x1F, I40E_GLQF_ORT_PIT_INDX_SHIFT)
+#define I40E_GLQF_ORT_FIELD_CNT_SHIFT 5
+#define I40E_GLQF_ORT_FIELD_CNT_MASK I40E_MASK(0x3, I40E_GLQF_ORT_FIELD_CNT_SHIFT)
+#define I40E_GLQF_ORT_FLX_PAYLOAD_SHIFT 7
+#define I40E_GLQF_ORT_FLX_PAYLOAD_MASK I40E_MASK(0x1, I40E_GLQF_ORT_FLX_PAYLOAD_SHIFT)
+
+#define I40E_GLQF_PE_INSET(_i, _j) (0x00269140 + ((_i) * 4 + (_j) * 8)) /* _i=0...1, _j=0...7 */ /* Reset: CORER */
+#define I40E_GLQF_PE_INSET_MAX_INDEX 1
+#define I40E_GLQF_PE_INSET_INSET_SHIFT 0
+#define I40E_GLQF_PE_INSET_INSET_MASK I40E_MASK(0xFFFFFFFF, I40E_GLQF_PE_INSET_INSET_SHIFT)
+
+#define I40E_GLQF_PE_MSK(_i, _j) (0x002691C0 + ((_i) * 4 + (_j) * 8)) /* _i=0...1, _j=0...7 */ /* Reset: CORER */
+#define I40E_GLQF_PE_MSK_MAX_INDEX 1
+#define I40E_GLQF_PE_MSK_MASK_SHIFT 0
+#define I40E_GLQF_PE_MSK_MASK_MASK I40E_MASK(0xFFFF, I40E_GLQF_PE_MSK_MASK_SHIFT)
+#define I40E_GLQF_PE_MSK_OFFSET_SHIFT 16
+#define I40E_GLQF_PE_MSK_OFFSET_MASK I40E_MASK(0x3F, I40E_GLQF_PE_MSK_OFFSET_SHIFT)
+
+#define I40E_GLQF_PECNT_0 0x00269FA4 /* Reset: CORER */
+#define I40E_GLQF_PECNT_0_PROG_CNT_SHIFT 0
+#define I40E_GLQF_PECNT_0_PROG_CNT_MASK I40E_MASK(0x1F, I40E_GLQF_PECNT_0_PROG_CNT_SHIFT)
+
+#define I40E_GLQF_PECNT_1 0x00269FAC /* Reset: CORER */
+#define I40E_GLQF_PECNT_1_ADD_OK_SHIFT 0
+#define I40E_GLQF_PECNT_1_ADD_OK_MASK I40E_MASK(0x1F, I40E_GLQF_PECNT_1_ADD_OK_SHIFT)
+#define I40E_GLQF_PECNT_1_ADD_FAIL_SHIFT 8
+#define I40E_GLQF_PECNT_1_ADD_FAIL_MASK I40E_MASK(0x1F, I40E_GLQF_PECNT_1_ADD_FAIL_SHIFT)
+#define I40E_GLQF_PECNT_1_REMOVE_OK_SHIFT 16
+#define I40E_GLQF_PECNT_1_REMOVE_OK_MASK I40E_MASK(0x1F, I40E_GLQF_PECNT_1_REMOVE_OK_SHIFT)
+#define I40E_GLQF_PECNT_1_REMOVE_FAIL_SHIFT 24
+#define I40E_GLQF_PECNT_1_REMOVE_FAIL_MASK I40E_MASK(0x1F, I40E_GLQF_PECNT_1_REMOVE_FAIL_SHIFT)
+
+#define I40E_GLQF_PETYPE(_i) (0x00269560 + ((_i) * 4)) /* _i=0...7 */ /* Reset: CORER */
+#define I40E_GLQF_PETYPE_MAX_INDEX 7
+#define I40E_GLQF_PETYPE_PCTYPE_INDEX_SHIFT 0
+#define I40E_GLQF_PETYPE_PCTYPE_INDEX_MASK I40E_MASK(0x3F, I40E_GLQF_PETYPE_PCTYPE_INDEX_SHIFT)
+#define I40E_GLQF_PETYPE_PCTYPE_ENA_SHIFT 7
+#define I40E_GLQF_PETYPE_PCTYPE_ENA_MASK I40E_MASK(0x1, I40E_GLQF_PETYPE_PCTYPE_ENA_SHIFT)
+
+#define I40E_GLQF_PIT(_i) (0x00268C80 + ((_i) * 4)) /* _i=0...23 */ /* Reset: CORER */
+#define I40E_GLQF_PIT_MAX_INDEX 23
+#define I40E_GLQF_PIT_SOURCE_OFF_SHIFT 0
+#define I40E_GLQF_PIT_SOURCE_OFF_MASK I40E_MASK(0x1F, I40E_GLQF_PIT_SOURCE_OFF_SHIFT)
+#define I40E_GLQF_PIT_FSIZE_SHIFT 5
+#define I40E_GLQF_PIT_FSIZE_MASK I40E_MASK(0x1F, I40E_GLQF_PIT_FSIZE_SHIFT)
+#define I40E_GLQF_PIT_DEST_OFF_SHIFT 10
+#define I40E_GLQF_PIT_DEST_OFF_MASK I40E_MASK(0x3F, I40E_GLQF_PIT_DEST_OFF_SHIFT)
+
+#define I40E_GLQF_PTYPE(_i, _j) (0x00268200 + ((_i) * 4 + (_j) * 8)) /* _i=0...1, _j=0...63 */ /* Reset: CORER */
+#define I40E_GLQF_PTYPE_MAX_INDEX 1
+#define I40E_GLQF_PTYPE_PROT_LAYER_SHIFT 0
+#define I40E_GLQF_PTYPE_PROT_LAYER_MASK I40E_MASK(0xFFFFFFFF, I40E_GLQF_PTYPE_PROT_LAYER_SHIFT)
+
+#define I40E_GLQF_PTYPE_ENA(_i, _j) (0x00268600 + ((_i) * 4 + (_j) * 8)) /* _i=0...1, _j=0...63 */ /* Reset: CORER */
+#define I40E_GLQF_PTYPE_ENA_MAX_INDEX 1
+#define I40E_GLQF_PTYPE_ENA_PROT_LAYER_SHIFT 0
+#define I40E_GLQF_PTYPE_ENA_PROT_LAYER_MASK I40E_MASK(0xFFFFFFFF, I40E_GLQF_PTYPE_ENA_PROT_LAYER_SHIFT)
+
+#define I40E_PFQF_CTL_0_PMAT 0x000C0700 /* Reset: CORER */
+#define I40E_PFQF_CTL_0_PMAT_PEHSIZE_SHIFT 0
+#define I40E_PFQF_CTL_0_PMAT_PEHSIZE_MASK I40E_MASK(0x1F, I40E_PFQF_CTL_0_PMAT_PEHSIZE_SHIFT)
+#define I40E_PFQF_CTL_0_PMAT_PEDSIZE_SHIFT 5
+#define I40E_PFQF_CTL_0_PMAT_PEDSIZE_MASK I40E_MASK(0x1F, I40E_PFQF_CTL_0_PMAT_PEDSIZE_SHIFT)
+#define I40E_PFQF_CTL_0_PMAT_PFFCHSIZE_SHIFT 10
+#define I40E_PFQF_CTL_0_PMAT_PFFCHSIZE_MASK I40E_MASK(0xF, I40E_PFQF_CTL_0_PMAT_PFFCHSIZE_SHIFT)
+#define I40E_PFQF_CTL_0_PMAT_PFFCDSIZE_SHIFT 14
+#define I40E_PFQF_CTL_0_PMAT_PFFCDSIZE_MASK I40E_MASK(0x3, I40E_PFQF_CTL_0_PMAT_PFFCDSIZE_SHIFT)
+#define I40E_PFQF_CTL_0_PMAT_HASHLUTSIZE_SHIFT 16
+#define I40E_PFQF_CTL_0_PMAT_HASHLUTSIZE_MASK I40E_MASK(0x1, I40E_PFQF_CTL_0_PMAT_HASHLUTSIZE_SHIFT)
+#define I40E_PFQF_CTL_0_PMAT_FD_ENA_SHIFT 17
+#define I40E_PFQF_CTL_0_PMAT_FD_ENA_MASK I40E_MASK(0x1, I40E_PFQF_CTL_0_PMAT_FD_ENA_SHIFT)
+#define I40E_PFQF_CTL_0_PMAT_ETYPE_ENA_SHIFT 18
+#define I40E_PFQF_CTL_0_PMAT_ETYPE_ENA_MASK I40E_MASK(0x1, I40E_PFQF_CTL_0_PMAT_ETYPE_ENA_SHIFT)
+#define I40E_PFQF_CTL_0_PMAT_MACVLAN_ENA_SHIFT 19
+#define I40E_PFQF_CTL_0_PMAT_MACVLAN_ENA_MASK I40E_MASK(0x1, I40E_PFQF_CTL_0_PMAT_MACVLAN_ENA_SHIFT)
+#define I40E_PFQF_CTL_0_PMAT_VFFCHSIZE_SHIFT 20
+#define I40E_PFQF_CTL_0_PMAT_VFFCHSIZE_MASK I40E_MASK(0xF, I40E_PFQF_CTL_0_PMAT_VFFCHSIZE_SHIFT)
+#define I40E_PFQF_CTL_0_PMAT_VFFCDSIZE_SHIFT 24
+#define I40E_PFQF_CTL_0_PMAT_VFFCDSIZE_MASK I40E_MASK(0x3, I40E_PFQF_CTL_0_PMAT_VFFCDSIZE_SHIFT)
+
+#define I40E_PFQF_CTL_0_RCU 0x00245C80 /* Reset: CORER */
+#define I40E_PFQF_CTL_0_RCU_PEHSIZE_SHIFT 0
+#define I40E_PFQF_CTL_0_RCU_PEHSIZE_MASK I40E_MASK(0x1F, I40E_PFQF_CTL_0_RCU_PEHSIZE_SHIFT)
+#define I40E_PFQF_CTL_0_RCU_PEDSIZE_SHIFT 5
+#define I40E_PFQF_CTL_0_RCU_PEDSIZE_MASK I40E_MASK(0x1F, I40E_PFQF_CTL_0_RCU_PEDSIZE_SHIFT)
+#define I40E_PFQF_CTL_0_RCU_PFFCHSIZE_SHIFT 10
+#define I40E_PFQF_CTL_0_RCU_PFFCHSIZE_MASK I40E_MASK(0xF, I40E_PFQF_CTL_0_RCU_PFFCHSIZE_SHIFT)
+#define I40E_PFQF_CTL_0_RCU_PFFCDSIZE_SHIFT 14
+#define I40E_PFQF_CTL_0_RCU_PFFCDSIZE_MASK I40E_MASK(0x3, I40E_PFQF_CTL_0_RCU_PFFCDSIZE_SHIFT)
+#define I40E_PFQF_CTL_0_RCU_HASHLUTSIZE_SHIFT 16
+#define I40E_PFQF_CTL_0_RCU_HASHLUTSIZE_MASK I40E_MASK(0x1, I40E_PFQF_CTL_0_RCU_HASHLUTSIZE_SHIFT)
+#define I40E_PFQF_CTL_0_RCU_FD_ENA_SHIFT 17
+#define I40E_PFQF_CTL_0_RCU_FD_ENA_MASK I40E_MASK(0x1, I40E_PFQF_CTL_0_RCU_FD_ENA_SHIFT)
+#define I40E_PFQF_CTL_0_RCU_ETYPE_ENA_SHIFT 18
+#define I40E_PFQF_CTL_0_RCU_ETYPE_ENA_MASK I40E_MASK(0x1, I40E_PFQF_CTL_0_RCU_ETYPE_ENA_SHIFT)
+#define I40E_PFQF_CTL_0_RCU_MACVLAN_ENA_SHIFT 19
+#define I40E_PFQF_CTL_0_RCU_MACVLAN_ENA_MASK I40E_MASK(0x1, I40E_PFQF_CTL_0_RCU_MACVLAN_ENA_SHIFT)
+#define I40E_PFQF_CTL_0_RCU_VFFCHSIZE_SHIFT 20
+#define I40E_PFQF_CTL_0_RCU_VFFCHSIZE_MASK I40E_MASK(0xF, I40E_PFQF_CTL_0_RCU_VFFCHSIZE_SHIFT)
+#define I40E_PFQF_CTL_0_RCU_VFFCDSIZE_SHIFT 24
+#define I40E_PFQF_CTL_0_RCU_VFFCDSIZE_MASK I40E_MASK(0x3, I40E_PFQF_CTL_0_RCU_VFFCDSIZE_SHIFT)
+
+#define I40E_PFQF_DDPCNT 0x00246180 /* Reset: CORER */
+#define I40E_PFQF_DDPCNT_DDP_CNT_SHIFT 0
+#define I40E_PFQF_DDPCNT_DDP_CNT_MASK I40E_MASK(0x1FFF, I40E_PFQF_DDPCNT_DDP_CNT_SHIFT)
+
+#define I40E_PFQF_FCCNT_0 0x00245E80 /* Reset: CORER */
+#define I40E_PFQF_FCCNT_0_BUCKETCNT_SHIFT 0
+#define I40E_PFQF_FCCNT_0_BUCKETCNT_MASK I40E_MASK(0x1FFF, I40E_PFQF_FCCNT_0_BUCKETCNT_SHIFT)
+
+#define I40E_PFQF_FCCNT_1 0x00245F80 /* Reset: PFR */
+#define I40E_PFQF_FCCNT_1_HITSBCNT_SHIFT 0
+#define I40E_PFQF_FCCNT_1_HITSBCNT_MASK I40E_MASK(0xFFFFFFFF, I40E_PFQF_FCCNT_1_HITSBCNT_SHIFT)
+
+#define I40E_PFQF_FCCNT_2 0x00246080 /* Reset: PFR */
+#define I40E_PFQF_FCCNT_2_HITLBCNT_SHIFT 0
+#define I40E_PFQF_FCCNT_2_HITLBCNT_MASK I40E_MASK(0xFFFFFFFF, I40E_PFQF_FCCNT_2_HITLBCNT_SHIFT)
+
+#define I40E_PFQF_HREGION(_i) (0x00245400 + ((_i) * 128)) /* _i=0...7 */ /* Reset: CORER */
+#define I40E_PFQF_HREGION_MAX_INDEX 7
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_0_SHIFT 0
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_0_MASK I40E_MASK(0x1, I40E_PFQF_HREGION_OVERRIDE_ENA_0_SHIFT)
+#define I40E_PFQF_HREGION_REGION_0_SHIFT 1
+#define I40E_PFQF_HREGION_REGION_0_MASK I40E_MASK(0x7, I40E_PFQF_HREGION_REGION_0_SHIFT)
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_1_SHIFT 4
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_1_MASK I40E_MASK(0x1, I40E_PFQF_HREGION_OVERRIDE_ENA_1_SHIFT)
+#define I40E_PFQF_HREGION_REGION_1_SHIFT 5
+#define I40E_PFQF_HREGION_REGION_1_MASK I40E_MASK(0x7, I40E_PFQF_HREGION_REGION_1_SHIFT)
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_2_SHIFT 8
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_2_MASK I40E_MASK(0x1, I40E_PFQF_HREGION_OVERRIDE_ENA_2_SHIFT)
+#define I40E_PFQF_HREGION_REGION_2_SHIFT 9
+#define I40E_PFQF_HREGION_REGION_2_MASK I40E_MASK(0x7, I40E_PFQF_HREGION_REGION_2_SHIFT)
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_3_SHIFT 12
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_3_MASK I40E_MASK(0x1, I40E_PFQF_HREGION_OVERRIDE_ENA_3_SHIFT)
+#define I40E_PFQF_HREGION_REGION_3_SHIFT 13
+#define I40E_PFQF_HREGION_REGION_3_MASK I40E_MASK(0x7, I40E_PFQF_HREGION_REGION_3_SHIFT)
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_4_SHIFT 16
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_4_MASK I40E_MASK(0x1, I40E_PFQF_HREGION_OVERRIDE_ENA_4_SHIFT)
+#define I40E_PFQF_HREGION_REGION_4_SHIFT 17
+#define I40E_PFQF_HREGION_REGION_4_MASK I40E_MASK(0x7, I40E_PFQF_HREGION_REGION_4_SHIFT)
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_5_SHIFT 20
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_5_MASK I40E_MASK(0x1, I40E_PFQF_HREGION_OVERRIDE_ENA_5_SHIFT)
+#define I40E_PFQF_HREGION_REGION_5_SHIFT 21
+#define I40E_PFQF_HREGION_REGION_5_MASK I40E_MASK(0x7, I40E_PFQF_HREGION_REGION_5_SHIFT)
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_6_SHIFT 24
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_6_MASK I40E_MASK(0x1, I40E_PFQF_HREGION_OVERRIDE_ENA_6_SHIFT)
+#define I40E_PFQF_HREGION_REGION_6_SHIFT 25
+#define I40E_PFQF_HREGION_REGION_6_MASK I40E_MASK(0x7, I40E_PFQF_HREGION_REGION_6_SHIFT)
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_7_SHIFT 28
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_7_MASK I40E_MASK(0x1, I40E_PFQF_HREGION_OVERRIDE_ENA_7_SHIFT)
+#define I40E_PFQF_HREGION_REGION_7_SHIFT 29
+#define I40E_PFQF_HREGION_REGION_7_MASK I40E_MASK(0x7, I40E_PFQF_HREGION_REGION_7_SHIFT)
+
+#define I40E_PFQF_PECNT_0 0x00246480 /* Reset: CORER */
+#define I40E_PFQF_PECNT_0_BUCKETCNT_SHIFT 0
+#define I40E_PFQF_PECNT_0_BUCKETCNT_MASK I40E_MASK(0x7FFFF, I40E_PFQF_PECNT_0_BUCKETCNT_SHIFT)
+
+#define I40E_PFQF_PECNT_1 0x00246580 /* Reset: PFR */
+#define I40E_PFQF_PECNT_1_HITSBCNT_SHIFT 0
+#define I40E_PFQF_PECNT_1_HITSBCNT_MASK I40E_MASK(0xFFFFFFFF, I40E_PFQF_PECNT_1_HITSBCNT_SHIFT)
+
+#define I40E_PFQF_PECNT_2 0x00246680 /* Reset: PFR */
+#define I40E_PFQF_PECNT_2_HITLBCNT_SHIFT 0
+#define I40E_PFQF_PECNT_2_HITLBCNT_MASK I40E_MASK(0xFFFFFFFF, I40E_PFQF_PECNT_2_HITLBCNT_SHIFT)
+
+#define I40E_PFQF_PECNT_CNTX 0x0026CA80 /* Reset: CORER */
+#define I40E_PFQF_PECNT_CNTX_FLTCNT_SHIFT 0
+#define I40E_PFQF_PECNT_CNTX_FLTCNT_MASK I40E_MASK(0x7FFFF, I40E_PFQF_PECNT_CNTX_FLTCNT_SHIFT)
+
+#define I40E_PRTQF_FD_INSET(_i, _j) (0x00250000 + ((_i) * 64 + (_j) * 32)) /* _i=0...63, _j=0...1 */ /* Reset: CORER */
+#define I40E_PRTQF_FD_INSET_MAX_INDEX 63
+#define I40E_PRTQF_FD_INSET_INSET_SHIFT 0
+#define I40E_PRTQF_FD_INSET_INSET_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTQF_FD_INSET_INSET_SHIFT)
+
+#define I40E_VPQF_CTL_RCU(_VF) (0x00231C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */
+#define I40E_VPQF_CTL_RCU_MAX_INDEX 127
+#define I40E_VPQF_CTL_RCU_PEHSIZE_SHIFT 0
+#define I40E_VPQF_CTL_RCU_PEHSIZE_MASK I40E_MASK(0x1F, I40E_VPQF_CTL_RCU_PEHSIZE_SHIFT)
+#define I40E_VPQF_CTL_RCU_PEDSIZE_SHIFT 5
+#define I40E_VPQF_CTL_RCU_PEDSIZE_MASK I40E_MASK(0x1F, I40E_VPQF_CTL_RCU_PEDSIZE_SHIFT)
+#define I40E_VPQF_CTL_RCU_FCHSIZE_SHIFT 10
+#define I40E_VPQF_CTL_RCU_FCHSIZE_MASK I40E_MASK(0xF, I40E_VPQF_CTL_RCU_FCHSIZE_SHIFT)
+#define I40E_VPQF_CTL_RCU_FCDSIZE_SHIFT 14
+#define I40E_VPQF_CTL_RCU_FCDSIZE_MASK I40E_MASK(0x3, I40E_VPQF_CTL_RCU_FCDSIZE_SHIFT)
+
+#define I40E_VPQF_DDPCNT1(_VF) (0x00231400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */
+#define I40E_VPQF_DDPCNT1_MAX_INDEX 127
+#define I40E_VPQF_DDPCNT1_DDP_CNT_SHIFT 0
+#define I40E_VPQF_DDPCNT1_DDP_CNT_MASK I40E_MASK(0x1FFF, I40E_VPQF_DDPCNT1_DDP_CNT_SHIFT)
+
+#define I40E_VPQF_FCCNT_0(_VF) (0x0026A400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */
+#define I40E_VPQF_FCCNT_0_MAX_INDEX 127
+#define I40E_VPQF_FCCNT_0_BUCKETCNT_SHIFT 0
+#define I40E_VPQF_FCCNT_0_BUCKETCNT_MASK I40E_MASK(0x1FFF, I40E_VPQF_FCCNT_0_BUCKETCNT_SHIFT)
+
+#define I40E_VPQF_PECNT_0(_VF) (0x0026B400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */
+#define I40E_VPQF_PECNT_0_MAX_INDEX 127
+#define I40E_VPQF_PECNT_0_BUCKETCNT_SHIFT 0
+#define I40E_VPQF_PECNT_0_BUCKETCNT_MASK I40E_MASK(0x7FFFF, I40E_VPQF_PECNT_0_BUCKETCNT_SHIFT)
+
+#define I40E_VPQF_PECNT_1(_VF) (0x0026BC00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */
+#define I40E_VPQF_PECNT_1_MAX_INDEX 127
+#define I40E_VPQF_PECNT_1_FLTCNT_SHIFT 0
+#define I40E_VPQF_PECNT_1_FLTCNT_MASK I40E_MASK(0x7FFFF, I40E_VPQF_PECNT_1_FLTCNT_SHIFT)
+
+/* PF - Statistics Registers */
+
+#define I40E_GLPRT_AORCH(_i) (0x00300A44 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_AORCH_MAX_INDEX 3
+#define I40E_GLPRT_AORCH_AORCH_SHIFT 0
+#define I40E_GLPRT_AORCH_AORCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_AORCH_AORCH_SHIFT)
+
+#define I40E_GLPRT_AORCL(_i) (0x00300A40 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_AORCL_MAX_INDEX 3
+#define I40E_GLPRT_AORCL_VGORC_SHIFT 0
+#define I40E_GLPRT_AORCL_VGORC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_AORCL_VGORC_SHIFT)
+
+#define I40E_GLPRT_ERRBC(_i) (0x003000C0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_ERRBC_MAX_INDEX 3
+#define I40E_GLPRT_ERRBC_ERRBC_SHIFT 0
+#define I40E_GLPRT_ERRBC_ERRBC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_ERRBC_ERRBC_SHIFT)
+
+#define I40E_GLPRT_MSPDC(_i) (0x00300060 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_MSPDC_MAX_INDEX 3
+#define I40E_GLPRT_MSPDC_MSPDC_SHIFT 0
+#define I40E_GLPRT_MSPDC_MSPDC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_MSPDC_MSPDC_SHIFT)
+
+#define I40E_GLPRT_STDC(_i) (0x00300640 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_STDC_MAX_INDEX 3
+#define I40E_GLPRT_STDC_STDC_SHIFT 0
+#define I40E_GLPRT_STDC_STDC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_STDC_STDC_SHIFT)
+
+/* PF - Switch Registers */
+
+#define I40E_EMP_MTG_FLU_ICH 0x00269BE4 /* Reset: CORER */
+#define I40E_EMP_MTG_FLU_ICH_PROTOCOL_ID_SHIFT 0
+#define I40E_EMP_MTG_FLU_ICH_PROTOCOL_ID_MASK I40E_MASK(0x3F, I40E_EMP_MTG_FLU_ICH_PROTOCOL_ID_SHIFT)
+#define I40E_EMP_MTG_FLU_ICH_IGNORE_PROTOCOL_SHIFT 6
+#define I40E_EMP_MTG_FLU_ICH_IGNORE_PROTOCOL_MASK I40E_MASK(0x1, I40E_EMP_MTG_FLU_ICH_IGNORE_PROTOCOL_SHIFT)
+#define I40E_EMP_MTG_FLU_ICH_USE_MAN_SHIFT 7
+#define I40E_EMP_MTG_FLU_ICH_USE_MAN_MASK I40E_MASK(0x1, I40E_EMP_MTG_FLU_ICH_USE_MAN_SHIFT)
+
+#define I40E_EMP_MTG_FLU_ICL 0x00269BDC /* Reset: CORER */
+#define I40E_EMP_MTG_FLU_ICL_W0_OFFSET_SHIFT 0
+#define I40E_EMP_MTG_FLU_ICL_W0_OFFSET_MASK I40E_MASK(0x3F, I40E_EMP_MTG_FLU_ICL_W0_OFFSET_SHIFT)
+#define I40E_EMP_MTG_FLU_ICL_W0_STATUS_SHIFT 6
+#define I40E_EMP_MTG_FLU_ICL_W0_STATUS_MASK I40E_MASK(0x1, I40E_EMP_MTG_FLU_ICL_W0_STATUS_SHIFT)
+#define I40E_EMP_MTG_FLU_ICL_W1_OFFSET_SHIFT 8
+#define I40E_EMP_MTG_FLU_ICL_W1_OFFSET_MASK I40E_MASK(0x3F, I40E_EMP_MTG_FLU_ICL_W1_OFFSET_SHIFT)
+#define I40E_EMP_MTG_FLU_ICL_W1_STATUS_SHIFT 14
+#define I40E_EMP_MTG_FLU_ICL_W1_STATUS_MASK I40E_MASK(0x1, I40E_EMP_MTG_FLU_ICL_W1_STATUS_SHIFT)
+#define I40E_EMP_MTG_FLU_ICL_W2_OFFSET_SHIFT 16
+#define I40E_EMP_MTG_FLU_ICL_W2_OFFSET_MASK I40E_MASK(0x3F, I40E_EMP_MTG_FLU_ICL_W2_OFFSET_SHIFT)
+#define I40E_EMP_MTG_FLU_ICL_W2_STATUS_SHIFT 22
+#define I40E_EMP_MTG_FLU_ICL_W2_STATUS_MASK I40E_MASK(0x1, I40E_EMP_MTG_FLU_ICL_W2_STATUS_SHIFT)
+#define I40E_EMP_MTG_FLU_ICL_ETYPE_ENABLE_SHIFT 28
+#define I40E_EMP_MTG_FLU_ICL_ETYPE_ENABLE_MASK I40E_MASK(0x1, I40E_EMP_MTG_FLU_ICL_ETYPE_ENABLE_SHIFT)
+#define I40E_EMP_MTG_FLU_ICL_IGNORE_PHASE_SHIFT 29
+#define I40E_EMP_MTG_FLU_ICL_IGNORE_PHASE_MASK I40E_MASK(0x1, I40E_EMP_MTG_FLU_ICL_IGNORE_PHASE_SHIFT)
+#define I40E_EMP_MTG_FLU_ICL_EGRESS_SHIFT 30
+#define I40E_EMP_MTG_FLU_ICL_EGRESS_MASK I40E_MASK(0x1, I40E_EMP_MTG_FLU_ICL_EGRESS_SHIFT)
+#define I40E_EMP_MTG_FLU_ICL_PORT_ENABLE_SHIFT 31
+#define I40E_EMP_MTG_FLU_ICL_PORT_ENABLE_MASK I40E_MASK(0x1, I40E_EMP_MTG_FLU_ICL_PORT_ENABLE_SHIFT)
+
+#define I40E_EMP_SWT_CCTRL 0x00269770 /* Reset: POR */
+#define I40E_EMP_SWT_CCTRL_LLVSI_SHIFT 10
+#define I40E_EMP_SWT_CCTRL_LLVSI_MASK I40E_MASK(0x3FF, I40E_EMP_SWT_CCTRL_LLVSI_SHIFT)
+#define I40E_EMP_SWT_CCTRL_PROXYVSI_SHIFT 20
+#define I40E_EMP_SWT_CCTRL_PROXYVSI_MASK I40E_MASK(0x3FF, I40E_EMP_SWT_CCTRL_PROXYVSI_SHIFT)
+
+#define I40E_EMP_SWT_CGEN 0x0006D000 /* Reset: POR */
+#define I40E_EMP_SWT_CGEN_GLEN_SHIFT 0
+#define I40E_EMP_SWT_CGEN_GLEN_MASK I40E_MASK(0x1, I40E_EMP_SWT_CGEN_GLEN_SHIFT)
+
+#define I40E_EMP_SWT_CLLE(_i) (0x00269790 + ((_i) * 4)) /* _i=0...3 */ /* Reset: POR */
+#define I40E_EMP_SWT_CLLE_MAX_INDEX 3
+#define I40E_EMP_SWT_CLLE_TAG_SHIFT 0
+#define I40E_EMP_SWT_CLLE_TAG_MASK I40E_MASK(0xFFFF, I40E_EMP_SWT_CLLE_TAG_SHIFT)
+#define I40E_EMP_SWT_CLLE_IGNORE_TAG_SHIFT 16
+#define I40E_EMP_SWT_CLLE_IGNORE_TAG_MASK I40E_MASK(0x1, I40E_EMP_SWT_CLLE_IGNORE_TAG_SHIFT)
+#define I40E_EMP_SWT_CLLE_PORT_NUMBER_SHIFT 17
+#define I40E_EMP_SWT_CLLE_PORT_NUMBER_MASK I40E_MASK(0x3, I40E_EMP_SWT_CLLE_PORT_NUMBER_SHIFT)
+#define I40E_EMP_SWT_CLLE_ENABLE_SHIFT 31
+#define I40E_EMP_SWT_CLLE_ENABLE_MASK I40E_MASK(0x1, I40E_EMP_SWT_CLLE_ENABLE_SHIFT)
+
+#define I40E_EMP_SWT_CMASK 0x0006D180 /* Reset: POR */
+#define I40E_EMP_SWT_CMASK_UNICASTTAGMASK_SHIFT 0
+#define I40E_EMP_SWT_CMASK_UNICASTTAGMASK_MASK I40E_MASK(0xFFFF, I40E_EMP_SWT_CMASK_UNICASTTAGMASK_SHIFT)
+#define I40E_EMP_SWT_CMASK_MULTICASTTAGMASK_SHIFT 16
+#define I40E_EMP_SWT_CMASK_MULTICASTTAGMASK_MASK I40E_MASK(0xFFFF, I40E_EMP_SWT_CMASK_MULTICASTTAGMASK_SHIFT)
+
+#define I40E_EMP_SWT_CMTTD(_i) (0x0006E000 + ((_i) * 4)) /* _i=0...511 */ /* Reset: POR */
+#define I40E_EMP_SWT_CMTTD_MAX_INDEX 511
+#define I40E_EMP_SWT_CMTTD_PFLIST_SHIFT 0
+#define I40E_EMP_SWT_CMTTD_PFLIST_MASK I40E_MASK(0xFFFF, I40E_EMP_SWT_CMTTD_PFLIST_SHIFT)
+
+#define I40E_EMP_SWT_CMTTL(_i) (0x0006D800 + ((_i) * 4)) /* _i=0...511 */ /* Reset: POR */
+#define I40E_EMP_SWT_CMTTL_MAX_INDEX 511
+#define I40E_EMP_SWT_CMTTL_MTAG_SHIFT 0
+#define I40E_EMP_SWT_CMTTL_MTAG_MASK I40E_MASK(0xFFFF, I40E_EMP_SWT_CMTTL_MTAG_SHIFT)
+#define I40E_EMP_SWT_CMTTL_PORT_SHIFT 16
+#define I40E_EMP_SWT_CMTTL_PORT_MASK I40E_MASK(0x3, I40E_EMP_SWT_CMTTL_PORT_SHIFT)
+#define I40E_EMP_SWT_CMTTL_ENABLE_SHIFT 18
+#define I40E_EMP_SWT_CMTTL_ENABLE_MASK I40E_MASK(0x1, I40E_EMP_SWT_CMTTL_ENABLE_SHIFT)
+
+#define I40E_EMP_SWT_COFFSET 0x0006D200 /* Reset: POR */
+#define I40E_EMP_SWT_COFFSET_UNICASTTAGOFFSET_SHIFT 0
+#define I40E_EMP_SWT_COFFSET_UNICASTTAGOFFSET_MASK I40E_MASK(0x1F, I40E_EMP_SWT_COFFSET_UNICASTTAGOFFSET_SHIFT)
+#define I40E_EMP_SWT_COFFSET_RESERVED_2_SHIFT 5
+#define I40E_EMP_SWT_COFFSET_RESERVED_2_MASK I40E_MASK(0x7, I40E_EMP_SWT_COFFSET_RESERVED_2_SHIFT)
+#define I40E_EMP_SWT_COFFSET_MULTICASTTAGOFFSET_SHIFT 8
+#define I40E_EMP_SWT_COFFSET_MULTICASTTAGOFFSET_MASK I40E_MASK(0x1F, I40E_EMP_SWT_COFFSET_MULTICASTTAGOFFSET_SHIFT)
+
+#define I40E_EMP_SWT_CPFE(_i) (0x001C09E0 + ((_i) * 4)) /* _i=0...15 */ /* Reset: POR */
+#define I40E_EMP_SWT_CPFE_MAX_INDEX 15
+#define I40E_EMP_SWT_CPFE_TAG_SHIFT 0
+#define I40E_EMP_SWT_CPFE_TAG_MASK I40E_MASK(0xFFFF, I40E_EMP_SWT_CPFE_TAG_SHIFT)
+#define I40E_EMP_SWT_CPFE_IGNORE_TAG_SHIFT 16
+#define I40E_EMP_SWT_CPFE_IGNORE_TAG_MASK I40E_MASK(0x1, I40E_EMP_SWT_CPFE_IGNORE_TAG_SHIFT)
+#define I40E_EMP_SWT_CPFE_PORT_NUMBER_SHIFT 17
+#define I40E_EMP_SWT_CPFE_PORT_NUMBER_MASK I40E_MASK(0x3, I40E_EMP_SWT_CPFE_PORT_NUMBER_SHIFT)
+#define I40E_EMP_SWT_CPFE_ENABLE_SHIFT 31
+#define I40E_EMP_SWT_CPFE_ENABLE_MASK I40E_MASK(0x1, I40E_EMP_SWT_CPFE_ENABLE_SHIFT)
+
+#define I40E_EMP_SWT_CPFE_RCU(_i) (0x00269040 + ((_i) * 4)) /* _i=0...15 */ /* Reset: POR */
+#define I40E_EMP_SWT_CPFE_RCU_MAX_INDEX 15
+#define I40E_EMP_SWT_CPFE_RCU_TAG_SHIFT 0
+#define I40E_EMP_SWT_CPFE_RCU_TAG_MASK I40E_MASK(0xFFFF, I40E_EMP_SWT_CPFE_RCU_TAG_SHIFT)
+#define I40E_EMP_SWT_CPFE_RCU_IGNORE_TAG_SHIFT 16
+#define I40E_EMP_SWT_CPFE_RCU_IGNORE_TAG_MASK I40E_MASK(0x1, I40E_EMP_SWT_CPFE_RCU_IGNORE_TAG_SHIFT)
+#define I40E_EMP_SWT_CPFE_RCU_PORT_NUMBER_SHIFT 17
+#define I40E_EMP_SWT_CPFE_RCU_PORT_NUMBER_MASK I40E_MASK(0x3, I40E_EMP_SWT_CPFE_RCU_PORT_NUMBER_SHIFT)
+#define I40E_EMP_SWT_CPFE_RCU_ENABLE_SHIFT 31
+#define I40E_EMP_SWT_CPFE_RCU_ENABLE_MASK I40E_MASK(0x1, I40E_EMP_SWT_CPFE_RCU_ENABLE_SHIFT)
+
+#define I40E_EMP_SWT_CPFE_WUC(_i) (0x0006D080 + ((_i) * 4)) /* _i=0...15 */ /* Reset: POR */
+#define I40E_EMP_SWT_CPFE_WUC_MAX_INDEX 15
+#define I40E_EMP_SWT_CPFE_WUC_TAG_SHIFT 0
+#define I40E_EMP_SWT_CPFE_WUC_TAG_MASK I40E_MASK(0xFFFF, I40E_EMP_SWT_CPFE_WUC_TAG_SHIFT)
+#define I40E_EMP_SWT_CPFE_WUC_IGNORE_TAG_SHIFT 16
+#define I40E_EMP_SWT_CPFE_WUC_IGNORE_TAG_MASK I40E_MASK(0x1, I40E_EMP_SWT_CPFE_WUC_IGNORE_TAG_SHIFT)
+#define I40E_EMP_SWT_CPFE_WUC_PORT_NUMBER_SHIFT 17
+#define I40E_EMP_SWT_CPFE_WUC_PORT_NUMBER_MASK I40E_MASK(0x3, I40E_EMP_SWT_CPFE_WUC_PORT_NUMBER_SHIFT)
+#define I40E_EMP_SWT_CPFE_WUC_ENABLE_SHIFT 31
+#define I40E_EMP_SWT_CPFE_WUC_ENABLE_MASK I40E_MASK(0x1, I40E_EMP_SWT_CPFE_WUC_ENABLE_SHIFT)
+
+#define I40E_EMP_SWT_CPTE(_i) (0x002697B0 + ((_i) * 4)) /* _i=0...3 */ /* Reset: POR */
+#define I40E_EMP_SWT_CPTE_MAX_INDEX 3
+#define I40E_EMP_SWT_CPTE_TAG_SHIFT 0
+#define I40E_EMP_SWT_CPTE_TAG_MASK I40E_MASK(0xFFFF, I40E_EMP_SWT_CPTE_TAG_SHIFT)
+#define I40E_EMP_SWT_CPTE_IGNORE_TAG_SHIFT 16
+#define I40E_EMP_SWT_CPTE_IGNORE_TAG_MASK I40E_MASK(0x1, I40E_EMP_SWT_CPTE_IGNORE_TAG_SHIFT)
+#define I40E_EMP_SWT_CPTE_PORT_NUMBER_SHIFT 17
+#define I40E_EMP_SWT_CPTE_PORT_NUMBER_MASK I40E_MASK(0x3, I40E_EMP_SWT_CPTE_PORT_NUMBER_SHIFT)
+#define I40E_EMP_SWT_CPTE_ENABLE_SHIFT 31
+#define I40E_EMP_SWT_CPTE_ENABLE_MASK I40E_MASK(0x1, I40E_EMP_SWT_CPTE_ENABLE_SHIFT)
+
+#define I40E_EMP_SWT_CPTE2(_i) (0x002697D0 + ((_i) * 4)) /* _i=0...3 */ /* Reset: POR */
+#define I40E_EMP_SWT_CPTE2_MAX_INDEX 3
+#define I40E_EMP_SWT_CPTE2_TAG_SHIFT 0
+#define I40E_EMP_SWT_CPTE2_TAG_MASK I40E_MASK(0xFFFF, I40E_EMP_SWT_CPTE2_TAG_SHIFT)
+#define I40E_EMP_SWT_CPTE2_IGNORE_TAG_SHIFT 16
+#define I40E_EMP_SWT_CPTE2_IGNORE_TAG_MASK I40E_MASK(0x1, I40E_EMP_SWT_CPTE2_IGNORE_TAG_SHIFT)
+#define I40E_EMP_SWT_CPTE2_PORT_NUMBER_SHIFT 17
+#define I40E_EMP_SWT_CPTE2_PORT_NUMBER_MASK I40E_MASK(0x3, I40E_EMP_SWT_CPTE2_PORT_NUMBER_SHIFT)
+#define I40E_EMP_SWT_CPTE2_ENABLE_SHIFT 31
+#define I40E_EMP_SWT_CPTE2_ENABLE_MASK I40E_MASK(0x1, I40E_EMP_SWT_CPTE2_ENABLE_SHIFT)
+
+#define I40E_EMP_SWT_CTAG 0x00269B64 /* Reset: POR */
+#define I40E_EMP_SWT_CTAG_TAG_INDEX_SHIFT 0
+#define I40E_EMP_SWT_CTAG_TAG_INDEX_MASK I40E_MASK(0x3F, I40E_EMP_SWT_CTAG_TAG_INDEX_SHIFT)
+#define I40E_EMP_SWT_CTAG_TAG_MASK_SHIFT 10
+#define I40E_EMP_SWT_CTAG_TAG_MASK_MASK I40E_MASK(0xFFFF, I40E_EMP_SWT_CTAG_TAG_MASK_SHIFT)
+
+#define I40E_EMP_SWT_CUPD 0x0006D100 /* Reset: POR */
+#define I40E_EMP_SWT_CUPD_UNTAGGED_PORT0_PF_SHIFT 0
+#define I40E_EMP_SWT_CUPD_UNTAGGED_PORT0_PF_MASK I40E_MASK(0xF, I40E_EMP_SWT_CUPD_UNTAGGED_PORT0_PF_SHIFT)
+#define I40E_EMP_SWT_CUPD_UNTAGGED_PORT1_PF_SHIFT 4
+#define I40E_EMP_SWT_CUPD_UNTAGGED_PORT1_PF_MASK I40E_MASK(0xF, I40E_EMP_SWT_CUPD_UNTAGGED_PORT1_PF_SHIFT)
+#define I40E_EMP_SWT_CUPD_UNTAGGED_PORT2_PF_SHIFT 8
+#define I40E_EMP_SWT_CUPD_UNTAGGED_PORT2_PF_MASK I40E_MASK(0xF, I40E_EMP_SWT_CUPD_UNTAGGED_PORT2_PF_SHIFT)
+#define I40E_EMP_SWT_CUPD_UNTAGGED_PORT3_PF_SHIFT 12
+#define I40E_EMP_SWT_CUPD_UNTAGGED_PORT3_PF_MASK I40E_MASK(0xF, I40E_EMP_SWT_CUPD_UNTAGGED_PORT3_PF_SHIFT)
+#define I40E_EMP_SWT_CUPD_ACCEPTUNTAGGEDPORT0_SHIFT 26
+#define I40E_EMP_SWT_CUPD_ACCEPTUNTAGGEDPORT0_MASK I40E_MASK(0x1, I40E_EMP_SWT_CUPD_ACCEPTUNTAGGEDPORT0_SHIFT)
+#define I40E_EMP_SWT_CUPD_ACCEPTUNTAGGEDPORT1_SHIFT 27
+#define I40E_EMP_SWT_CUPD_ACCEPTUNTAGGEDPORT1_MASK I40E_MASK(0x1, I40E_EMP_SWT_CUPD_ACCEPTUNTAGGEDPORT1_SHIFT)
+#define I40E_EMP_SWT_CUPD_ACCEPTUNTAGGEDPORT2_SHIFT 28
+#define I40E_EMP_SWT_CUPD_ACCEPTUNTAGGEDPORT2_MASK I40E_MASK(0x1, I40E_EMP_SWT_CUPD_ACCEPTUNTAGGEDPORT2_SHIFT)
+#define I40E_EMP_SWT_CUPD_ACCEPTUNTAGGEDPORT3_SHIFT 29
+#define I40E_EMP_SWT_CUPD_ACCEPTUNTAGGEDPORT3_MASK I40E_MASK(0x1, I40E_EMP_SWT_CUPD_ACCEPTUNTAGGEDPORT3_SHIFT)
+#define I40E_EMP_SWT_CUPD_ACCEPTUNMATCHEDUCTST_SHIFT 30
+#define I40E_EMP_SWT_CUPD_ACCEPTUNMATCHEDUCTST_MASK I40E_MASK(0x1, I40E_EMP_SWT_CUPD_ACCEPTUNMATCHEDUCTST_SHIFT)
+#define I40E_EMP_SWT_CUPD_ACCEPTUNMATCHEDMCTST_SHIFT 31
+#define I40E_EMP_SWT_CUPD_ACCEPTUNMATCHEDMCTST_MASK I40E_MASK(0x1, I40E_EMP_SWT_CUPD_ACCEPTUNMATCHEDMCTST_SHIFT)
+
+#define I40E_EMP_SWT_ETHMATCH 0x00269B6C /* Reset: POR */
+#define I40E_EMP_SWT_ETHMATCH_ETHMATCH_SHIFT 0
+#define I40E_EMP_SWT_ETHMATCH_ETHMATCH_MASK I40E_MASK(0xFFFF, I40E_EMP_SWT_ETHMATCH_ETHMATCH_SHIFT)
+
+#define I40E_EMP_SWT_FLU_L1_ICH_PHASE0(_i) (0x002695E0 + ((_i) * 4)) /* _i=0...4 */ /* Reset: CORER */
+#define I40E_EMP_SWT_FLU_L1_ICH_PHASE0_MAX_INDEX 4
+#define I40E_EMP_SWT_FLU_L1_ICH_PHASE0_PROTOCOL_ID_SHIFT 0
+#define I40E_EMP_SWT_FLU_L1_ICH_PHASE0_PROTOCOL_ID_MASK I40E_MASK(0x3F, I40E_EMP_SWT_FLU_L1_ICH_PHASE0_PROTOCOL_ID_SHIFT)
+#define I40E_EMP_SWT_FLU_L1_ICH_PHASE0_IGNORE_PROTOCOL_SHIFT 6
+#define I40E_EMP_SWT_FLU_L1_ICH_PHASE0_IGNORE_PROTOCOL_MASK I40E_MASK(0x1, I40E_EMP_SWT_FLU_L1_ICH_PHASE0_IGNORE_PROTOCOL_SHIFT)
+#define I40E_EMP_SWT_FLU_L1_ICH_PHASE0_USE_MAN_SHIFT 7
+#define I40E_EMP_SWT_FLU_L1_ICH_PHASE0_USE_MAN_MASK I40E_MASK(0x1, I40E_EMP_SWT_FLU_L1_ICH_PHASE0_USE_MAN_SHIFT)
+
+#define I40E_EMP_SWT_FLU_L1_ICH_PHASE1(_i) (0x00269660 + ((_i) * 4)) /* _i=0...4 */ /* Reset: CORER */
+#define I40E_EMP_SWT_FLU_L1_ICH_PHASE1_MAX_INDEX 4
+#define I40E_EMP_SWT_FLU_L1_ICH_PHASE1_PROTOCOL_ID_SHIFT 0
+#define I40E_EMP_SWT_FLU_L1_ICH_PHASE1_PROTOCOL_ID_MASK I40E_MASK(0x3F, I40E_EMP_SWT_FLU_L1_ICH_PHASE1_PROTOCOL_ID_SHIFT)
+#define I40E_EMP_SWT_FLU_L1_ICH_PHASE1_IGNORE_PROTOCOL_SHIFT 6
+#define I40E_EMP_SWT_FLU_L1_ICH_PHASE1_IGNORE_PROTOCOL_MASK I40E_MASK(0x1, I40E_EMP_SWT_FLU_L1_ICH_PHASE1_IGNORE_PROTOCOL_SHIFT)
+#define I40E_EMP_SWT_FLU_L1_ICH_PHASE1_USE_MAN_SHIFT 7
+#define I40E_EMP_SWT_FLU_L1_ICH_PHASE1_USE_MAN_MASK I40E_MASK(0x1, I40E_EMP_SWT_FLU_L1_ICH_PHASE1_USE_MAN_SHIFT)
+
+#define I40E_EMP_SWT_FLU_L1_ICL_PHASE0(_i) (0x00269620 + ((_i) * 4)) /* _i=0...6 */ /* Reset: CORER */
+#define I40E_EMP_SWT_FLU_L1_ICL_PHASE0_MAX_INDEX 6
+#define I40E_EMP_SWT_FLU_L1_ICL_PHASE0_W0_OFFSET_SHIFT 0
+#define I40E_EMP_SWT_FLU_L1_ICL_PHASE0_W0_OFFSET_MASK I40E_MASK(0x3F, I40E_EMP_SWT_FLU_L1_ICL_PHASE0_W0_OFFSET_SHIFT)
+#define I40E_EMP_SWT_FLU_L1_ICL_PHASE0_W0_STATUS_SHIFT 6
+#define I40E_EMP_SWT_FLU_L1_ICL_PHASE0_W0_STATUS_MASK I40E_MASK(0x1, I40E_EMP_SWT_FLU_L1_ICL_PHASE0_W0_STATUS_SHIFT)
+#define I40E_EMP_SWT_FLU_L1_ICL_PHASE0_W1_OFFSET_SHIFT 8
+#define I40E_EMP_SWT_FLU_L1_ICL_PHASE0_W1_OFFSET_MASK I40E_MASK(0x3F, I40E_EMP_SWT_FLU_L1_ICL_PHASE0_W1_OFFSET_SHIFT)
+#define I40E_EMP_SWT_FLU_L1_ICL_PHASE0_W1_STATUS_SHIFT 14
+#define I40E_EMP_SWT_FLU_L1_ICL_PHASE0_W1_STATUS_MASK I40E_MASK(0x1, I40E_EMP_SWT_FLU_L1_ICL_PHASE0_W1_STATUS_SHIFT)
+#define I40E_EMP_SWT_FLU_L1_ICL_PHASE0_W2_OFFSET_SHIFT 16
+#define I40E_EMP_SWT_FLU_L1_ICL_PHASE0_W2_OFFSET_MASK I40E_MASK(0x3F, I40E_EMP_SWT_FLU_L1_ICL_PHASE0_W2_OFFSET_SHIFT)
+#define I40E_EMP_SWT_FLU_L1_ICL_PHASE0_W2_STATUS_SHIFT 22
+#define I40E_EMP_SWT_FLU_L1_ICL_PHASE0_W2_STATUS_MASK I40E_MASK(0x1, I40E_EMP_SWT_FLU_L1_ICL_PHASE0_W2_STATUS_SHIFT)
+#define I40E_EMP_SWT_FLU_L1_ICL_PHASE0_ETYPE_ENABLE_SHIFT 28
+#define I40E_EMP_SWT_FLU_L1_ICL_PHASE0_ETYPE_ENABLE_MASK I40E_MASK(0x1, I40E_EMP_SWT_FLU_L1_ICL_PHASE0_ETYPE_ENABLE_SHIFT)
+#define I40E_EMP_SWT_FLU_L1_ICL_PHASE0_IGNORE_PHASE_SHIFT 29
+#define I40E_EMP_SWT_FLU_L1_ICL_PHASE0_IGNORE_PHASE_MASK I40E_MASK(0x1, I40E_EMP_SWT_FLU_L1_ICL_PHASE0_IGNORE_PHASE_SHIFT)
+#define I40E_EMP_SWT_FLU_L1_ICL_PHASE0_EGRESS_SHIFT 30
+#define I40E_EMP_SWT_FLU_L1_ICL_PHASE0_EGRESS_MASK I40E_MASK(0x1, I40E_EMP_SWT_FLU_L1_ICL_PHASE0_EGRESS_SHIFT)
+#define I40E_EMP_SWT_FLU_L1_ICL_PHASE0_PORT_ENABLE_SHIFT 31
+#define I40E_EMP_SWT_FLU_L1_ICL_PHASE0_PORT_ENABLE_MASK I40E_MASK(0x1, I40E_EMP_SWT_FLU_L1_ICL_PHASE0_PORT_ENABLE_SHIFT)
+
+#define I40E_EMP_SWT_FLU_L1_ICL_PHASE1(_i) (0x002696A0 + ((_i) * 4)) /* _i=0...6 */ /* Reset: CORER */
+#define I40E_EMP_SWT_FLU_L1_ICL_PHASE1_MAX_INDEX 6
+#define I40E_EMP_SWT_FLU_L1_ICL_PHASE1_W0_OFFSET_SHIFT 0
+#define I40E_EMP_SWT_FLU_L1_ICL_PHASE1_W0_OFFSET_MASK I40E_MASK(0x3F, I40E_EMP_SWT_FLU_L1_ICL_PHASE1_W0_OFFSET_SHIFT)
+#define I40E_EMP_SWT_FLU_L1_ICL_PHASE1_W0_STATUS_SHIFT 6
+#define I40E_EMP_SWT_FLU_L1_ICL_PHASE1_W0_STATUS_MASK I40E_MASK(0x1, I40E_EMP_SWT_FLU_L1_ICL_PHASE1_W0_STATUS_SHIFT)
+#define I40E_EMP_SWT_FLU_L1_ICL_PHASE1_W1_OFFSET_SHIFT 8
+#define I40E_EMP_SWT_FLU_L1_ICL_PHASE1_W1_OFFSET_MASK I40E_MASK(0x3F, I40E_EMP_SWT_FLU_L1_ICL_PHASE1_W1_OFFSET_SHIFT)
+#define I40E_EMP_SWT_FLU_L1_ICL_PHASE1_W1_STATUS_SHIFT 14
+#define I40E_EMP_SWT_FLU_L1_ICL_PHASE1_W1_STATUS_MASK I40E_MASK(0x1, I40E_EMP_SWT_FLU_L1_ICL_PHASE1_W1_STATUS_SHIFT)
+#define I40E_EMP_SWT_FLU_L1_ICL_PHASE1_W2_OFFSET_SHIFT 16
+#define I40E_EMP_SWT_FLU_L1_ICL_PHASE1_W2_OFFSET_MASK I40E_MASK(0x3F, I40E_EMP_SWT_FLU_L1_ICL_PHASE1_W2_OFFSET_SHIFT)
+#define I40E_EMP_SWT_FLU_L1_ICL_PHASE1_W2_STATUS_SHIFT 22
+#define I40E_EMP_SWT_FLU_L1_ICL_PHASE1_W2_STATUS_MASK I40E_MASK(0x1, I40E_EMP_SWT_FLU_L1_ICL_PHASE1_W2_STATUS_SHIFT)
+#define I40E_EMP_SWT_FLU_L1_ICL_PHASE1_ETYPE_ENABLE_SHIFT 28
+#define I40E_EMP_SWT_FLU_L1_ICL_PHASE1_ETYPE_ENABLE_MASK I40E_MASK(0x1, I40E_EMP_SWT_FLU_L1_ICL_PHASE1_ETYPE_ENABLE_SHIFT)
+#define I40E_EMP_SWT_FLU_L1_ICL_PHASE1_IGNORE_PHASE_SHIFT 29
+#define I40E_EMP_SWT_FLU_L1_ICL_PHASE1_IGNORE_PHASE_MASK I40E_MASK(0x1, I40E_EMP_SWT_FLU_L1_ICL_PHASE1_IGNORE_PHASE_SHIFT)
+#define I40E_EMP_SWT_FLU_L1_ICL_PHASE1_EGRESS_SHIFT 30
+#define I40E_EMP_SWT_FLU_L1_ICL_PHASE1_EGRESS_MASK I40E_MASK(0x1, I40E_EMP_SWT_FLU_L1_ICL_PHASE1_EGRESS_SHIFT)
+#define I40E_EMP_SWT_FLU_L1_ICL_PHASE1_PORT_ENABLE_SHIFT 31
+#define I40E_EMP_SWT_FLU_L1_ICL_PHASE1_PORT_ENABLE_MASK I40E_MASK(0x1, I40E_EMP_SWT_FLU_L1_ICL_PHASE1_PORT_ENABLE_SHIFT)
+
+#define I40E_EMP_SWT_FLU_L2_IC_PHASE0(_i) (0x002696E0 + ((_i) * 4)) /* _i=0...7 */ /* Reset: CORER */
+#define I40E_EMP_SWT_FLU_L2_IC_PHASE0_MAX_INDEX 7
+#define I40E_EMP_SWT_FLU_L2_IC_PHASE0_FIELD0_L1_OBJECT_TYPE_SHIFT 0
+#define I40E_EMP_SWT_FLU_L2_IC_PHASE0_FIELD0_L1_OBJECT_TYPE_MASK I40E_MASK(0xF, I40E_EMP_SWT_FLU_L2_IC_PHASE0_FIELD0_L1_OBJECT_TYPE_SHIFT)
+#define I40E_EMP_SWT_FLU_L2_IC_PHASE0_FIELD0_ENABLE_SHIFT 4
+#define I40E_EMP_SWT_FLU_L2_IC_PHASE0_FIELD0_ENABLE_MASK I40E_MASK(0x1, I40E_EMP_SWT_FLU_L2_IC_PHASE0_FIELD0_ENABLE_SHIFT)
+#define I40E_EMP_SWT_FLU_L2_IC_PHASE0_FIELD1_L1_OBJECT_TYPE_SHIFT 5
+#define I40E_EMP_SWT_FLU_L2_IC_PHASE0_FIELD1_L1_OBJECT_TYPE_MASK I40E_MASK(0xF, I40E_EMP_SWT_FLU_L2_IC_PHASE0_FIELD1_L1_OBJECT_TYPE_SHIFT)
+#define I40E_EMP_SWT_FLU_L2_IC_PHASE0_FIELD1_ENABLE_SHIFT 9
+#define I40E_EMP_SWT_FLU_L2_IC_PHASE0_FIELD1_ENABLE_MASK I40E_MASK(0x1, I40E_EMP_SWT_FLU_L2_IC_PHASE0_FIELD1_ENABLE_SHIFT)
+#define I40E_EMP_SWT_FLU_L2_IC_PHASE0_FIELD2_L1_OBJECT_TYPE_SHIFT 10
+#define I40E_EMP_SWT_FLU_L2_IC_PHASE0_FIELD2_L1_OBJECT_TYPE_MASK I40E_MASK(0xF, I40E_EMP_SWT_FLU_L2_IC_PHASE0_FIELD2_L1_OBJECT_TYPE_SHIFT)
+#define I40E_EMP_SWT_FLU_L2_IC_PHASE0_FIELD2_ENABLE_SHIFT 14
+#define I40E_EMP_SWT_FLU_L2_IC_PHASE0_FIELD2_ENABLE_MASK I40E_MASK(0x1, I40E_EMP_SWT_FLU_L2_IC_PHASE0_FIELD2_ENABLE_SHIFT)
+#define I40E_EMP_SWT_FLU_L2_IC_PHASE0_ETYPE_ENABLE_SHIFT 18
+#define I40E_EMP_SWT_FLU_L2_IC_PHASE0_ETYPE_ENABLE_MASK I40E_MASK(0x1, I40E_EMP_SWT_FLU_L2_IC_PHASE0_ETYPE_ENABLE_SHIFT)
+#define I40E_EMP_SWT_FLU_L2_IC_PHASE0_IGNORE_PHASE_SHIFT 29
+#define I40E_EMP_SWT_FLU_L2_IC_PHASE0_IGNORE_PHASE_MASK I40E_MASK(0x1, I40E_EMP_SWT_FLU_L2_IC_PHASE0_IGNORE_PHASE_SHIFT)
+#define I40E_EMP_SWT_FLU_L2_IC_PHASE0_EGRESS_INGRESS_SHIFT 30
+#define I40E_EMP_SWT_FLU_L2_IC_PHASE0_EGRESS_INGRESS_MASK I40E_MASK(0x1, I40E_EMP_SWT_FLU_L2_IC_PHASE0_EGRESS_INGRESS_SHIFT)
+#define I40E_EMP_SWT_FLU_L2_IC_PHASE0_PORT_ENABLE_SHIFT 31
+#define I40E_EMP_SWT_FLU_L2_IC_PHASE0_PORT_ENABLE_MASK I40E_MASK(0x1, I40E_EMP_SWT_FLU_L2_IC_PHASE0_PORT_ENABLE_SHIFT)
+
+#define I40E_EMP_SWT_FLU_L2_IC_PHASE1(_i) (0x00269720 + ((_i) * 4)) /* _i=0...7 */ /* Reset: CORER */
+#define I40E_EMP_SWT_FLU_L2_IC_PHASE1_MAX_INDEX 7
+#define I40E_EMP_SWT_FLU_L2_IC_PHASE1_FIELD0_L1_OBJECT_TYPE_SHIFT 0
+#define I40E_EMP_SWT_FLU_L2_IC_PHASE1_FIELD0_L1_OBJECT_TYPE_MASK I40E_MASK(0xF, I40E_EMP_SWT_FLU_L2_IC_PHASE1_FIELD0_L1_OBJECT_TYPE_SHIFT)
+#define I40E_EMP_SWT_FLU_L2_IC_PHASE1_FIELD0_ENABLE_SHIFT 4
+#define I40E_EMP_SWT_FLU_L2_IC_PHASE1_FIELD0_ENABLE_MASK I40E_MASK(0x1, I40E_EMP_SWT_FLU_L2_IC_PHASE1_FIELD0_ENABLE_SHIFT)
+#define I40E_EMP_SWT_FLU_L2_IC_PHASE1_FIELD1_L1_OBJECT_TYPE_SHIFT 5
+#define I40E_EMP_SWT_FLU_L2_IC_PHASE1_FIELD1_L1_OBJECT_TYPE_MASK I40E_MASK(0xF, I40E_EMP_SWT_FLU_L2_IC_PHASE1_FIELD1_L1_OBJECT_TYPE_SHIFT)
+#define I40E_EMP_SWT_FLU_L2_IC_PHASE1_FIELD1_ENABLE_SHIFT 9
+#define I40E_EMP_SWT_FLU_L2_IC_PHASE1_FIELD1_ENABLE_MASK I40E_MASK(0x1, I40E_EMP_SWT_FLU_L2_IC_PHASE1_FIELD1_ENABLE_SHIFT)
+#define I40E_EMP_SWT_FLU_L2_IC_PHASE1_FIELD2_L1_OBJECT_TYPE_SHIFT 10
+#define I40E_EMP_SWT_FLU_L2_IC_PHASE1_FIELD2_L1_OBJECT_TYPE_MASK I40E_MASK(0xF, I40E_EMP_SWT_FLU_L2_IC_PHASE1_FIELD2_L1_OBJECT_TYPE_SHIFT)
+#define I40E_EMP_SWT_FLU_L2_IC_PHASE1_FIELD2_ENABLE_SHIFT 14
+#define I40E_EMP_SWT_FLU_L2_IC_PHASE1_FIELD2_ENABLE_MASK I40E_MASK(0x1, I40E_EMP_SWT_FLU_L2_IC_PHASE1_FIELD2_ENABLE_SHIFT)
+#define I40E_EMP_SWT_FLU_L2_IC_PHASE1_ETYPE_ENABLE_SHIFT 18
+#define I40E_EMP_SWT_FLU_L2_IC_PHASE1_ETYPE_ENABLE_MASK I40E_MASK(0x1, I40E_EMP_SWT_FLU_L2_IC_PHASE1_ETYPE_ENABLE_SHIFT)
+#define I40E_EMP_SWT_FLU_L2_IC_PHASE1_IGNORE_PHASE_SHIFT 29
+#define I40E_EMP_SWT_FLU_L2_IC_PHASE1_IGNORE_PHASE_MASK I40E_MASK(0x1, I40E_EMP_SWT_FLU_L2_IC_PHASE1_IGNORE_PHASE_SHIFT)
+#define I40E_EMP_SWT_FLU_L2_IC_PHASE1_EGRESS_INGRESS_SHIFT 30
+#define I40E_EMP_SWT_FLU_L2_IC_PHASE1_EGRESS_INGRESS_MASK I40E_MASK(0x1, I40E_EMP_SWT_FLU_L2_IC_PHASE1_EGRESS_INGRESS_SHIFT)
+#define I40E_EMP_SWT_FLU_L2_IC_PHASE1_PORT_ENABLE_SHIFT 31
+#define I40E_EMP_SWT_FLU_L2_IC_PHASE1_PORT_ENABLE_MASK I40E_MASK(0x1, I40E_EMP_SWT_FLU_L2_IC_PHASE1_PORT_ENABLE_SHIFT)
+
+#define I40E_EMP_SWT_LOCMD(_i) (0x00269460 + ((_i) * 4)) /* _i=0...7 */ /* Reset: CORER */
+#define I40E_EMP_SWT_LOCMD_MAX_INDEX 7
+#define I40E_EMP_SWT_LOCMD_COMMAND_SHIFT 0
+#define I40E_EMP_SWT_LOCMD_COMMAND_MASK I40E_MASK(0xFFFFFFFF, I40E_EMP_SWT_LOCMD_COMMAND_SHIFT)
+
+#define I40E_EMP_SWT_LOFV(_i) (0x00268D80 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_EMP_SWT_LOFV_MAX_INDEX 31
+#define I40E_EMP_SWT_LOFV_FIELDVECTOR_SHIFT 0
+#define I40E_EMP_SWT_LOFV_FIELDVECTOR_MASK I40E_MASK(0xFFFFFFFF, I40E_EMP_SWT_LOFV_FIELDVECTOR_SHIFT)
+
+#define I40E_EMP_SWT_MIREGVSI(_i, _j) (0x00263000 + ((_i) * 4 + (_j) * 8)) /* _i=0...1, _j=0...383 */ /* Reset: CORER */
+#define I40E_EMP_SWT_MIREGVSI_MAX_INDEX 1
+#define I40E_EMP_SWT_MIREGVSI_ENABLEDRULES_SHIFT 0
+#define I40E_EMP_SWT_MIREGVSI_ENABLEDRULES_MASK I40E_MASK(0xFFFFFFFF, I40E_EMP_SWT_MIREGVSI_ENABLEDRULES_SHIFT)
+
+#define I40E_EMP_SWT_MIRIGVSI(_i, _j) (0x00265000 + ((_i) * 4 + (_j) * 8)) /* _i=0...1, _j=0...383 */ /* Reset: CORER */
+#define I40E_EMP_SWT_MIRIGVSI_MAX_INDEX 1
+#define I40E_EMP_SWT_MIRIGVSI_ENABLEDRULES_SHIFT 0
+#define I40E_EMP_SWT_MIRIGVSI_ENABLEDRULES_MASK I40E_MASK(0xFFFFFFFF, I40E_EMP_SWT_MIRIGVSI_ENABLEDRULES_SHIFT)
+
+#define I40E_EMP_SWT_MIRTARVSI(_i) (0x00268B00 + ((_i) * 4)) /* _i=0...63 */ /* Reset: CORER */
+#define I40E_EMP_SWT_MIRTARVSI_MAX_INDEX 63
+#define I40E_EMP_SWT_MIRTARVSI_TARGETVSI_SHIFT 0
+#define I40E_EMP_SWT_MIRTARVSI_TARGETVSI_MASK I40E_MASK(0x1FF, I40E_EMP_SWT_MIRTARVSI_TARGETVSI_SHIFT)
+#define I40E_EMP_SWT_MIRTARVSI_VFVMNUMBER_SHIFT 9
+#define I40E_EMP_SWT_MIRTARVSI_VFVMNUMBER_MASK I40E_MASK(0x3FF, I40E_EMP_SWT_MIRTARVSI_VFVMNUMBER_SHIFT)
+#define I40E_EMP_SWT_MIRTARVSI_PFNUMBER_SHIFT 19
+#define I40E_EMP_SWT_MIRTARVSI_PFNUMBER_MASK I40E_MASK(0xF, I40E_EMP_SWT_MIRTARVSI_PFNUMBER_SHIFT)
+#define I40E_EMP_SWT_MIRTARVSI_FUNCTIONTYPE_SHIFT 23
+#define I40E_EMP_SWT_MIRTARVSI_FUNCTIONTYPE_MASK I40E_MASK(0x3, I40E_EMP_SWT_MIRTARVSI_FUNCTIONTYPE_SHIFT)
+#define I40E_EMP_SWT_MIRTARVSI_RULEENABLE_SHIFT 31
+#define I40E_EMP_SWT_MIRTARVSI_RULEENABLE_MASK I40E_MASK(0x1, I40E_EMP_SWT_MIRTARVSI_RULEENABLE_SHIFT)
+
+#define I40E_EMP_SWT_STS(_i) (0x002692C0 + ((_i) * 4)) /* _i=0...9 */ /* Reset: CORER */
+#define I40E_EMP_SWT_STS_MAX_INDEX 9
+#define I40E_EMP_SWT_STS_EMP_SWT_STS_SHIFT 0
+#define I40E_EMP_SWT_STS_EMP_SWT_STS_MASK I40E_MASK(0xFFFFFFFF, I40E_EMP_SWT_STS_EMP_SWT_STS_SHIFT)
+
+#define I40E_GL_MTG_FLU_MSK_L 0x00269F44 /* Reset: CORER */
+#define I40E_GL_MTG_FLU_MSK_L_MASK_LOW_SHIFT 0
+#define I40E_GL_MTG_FLU_MSK_L_MASK_LOW_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_MTG_FLU_MSK_L_MASK_LOW_SHIFT)
+
+#define I40E_GL_PRE_FLU_MSK_PH0_H(_i) (0x00269EA0 + ((_i) * 4)) /* _i=0...6 */ /* Reset: CORER */
+#define I40E_GL_PRE_FLU_MSK_PH0_H_MAX_INDEX 6
+#define I40E_GL_PRE_FLU_MSK_PH0_H_MASK_HIGH_SHIFT 0
+#define I40E_GL_PRE_FLU_MSK_PH0_H_MASK_HIGH_MASK I40E_MASK(0xFFFF, I40E_GL_PRE_FLU_MSK_PH0_H_MASK_HIGH_SHIFT)
+
+#define I40E_GL_PRE_FLU_MSK_PH0_L(_i) (0x00269E60 + ((_i) * 4)) /* _i=0...6 */ /* Reset: CORER */
+#define I40E_GL_PRE_FLU_MSK_PH0_L_MAX_INDEX 6
+#define I40E_GL_PRE_FLU_MSK_PH0_L_MASK_LOW_SHIFT 0
+#define I40E_GL_PRE_FLU_MSK_PH0_L_MASK_LOW_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_PRE_FLU_MSK_PH0_L_MASK_LOW_SHIFT)
+
+#define I40E_GL_PRE_FLU_MSK_PH1_H(_i) (0x00269F20 + ((_i) * 4)) /* _i=0...6 */ /* Reset: CORER */
+#define I40E_GL_PRE_FLU_MSK_PH1_H_MAX_INDEX 6
+#define I40E_GL_PRE_FLU_MSK_PH1_H_MASK_HIGH_SHIFT 0
+#define I40E_GL_PRE_FLU_MSK_PH1_H_MASK_HIGH_MASK I40E_MASK(0xFFFF, I40E_GL_PRE_FLU_MSK_PH1_H_MASK_HIGH_SHIFT)
+
+#define I40E_GL_PRE_FLU_MSK_PH1_L(_i) (0x00269EE0 + ((_i) * 4)) /* _i=0...6 */ /* Reset: CORER */
+#define I40E_GL_PRE_FLU_MSK_PH1_L_MAX_INDEX 6
+#define I40E_GL_PRE_FLU_MSK_PH1_L_MASK_LOW_SHIFT 0
+#define I40E_GL_PRE_FLU_MSK_PH1_L_MASK_LOW_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_PRE_FLU_MSK_PH1_L_MASK_LOW_SHIFT)
+
+#define I40E_GL_PRE_GEN_CFG 0x002699A4 /* Reset: CORER */
+#define I40E_GL_PRE_GEN_CFG_FILTER_ENABLE_SHIFT 0
+#define I40E_GL_PRE_GEN_CFG_FILTER_ENABLE_MASK I40E_MASK(0x1, I40E_GL_PRE_GEN_CFG_FILTER_ENABLE_SHIFT)
+#define I40E_GL_PRE_GEN_CFG_HASH_MODE_SHIFT 6
+#define I40E_GL_PRE_GEN_CFG_HASH_MODE_MASK I40E_MASK(0x3, I40E_GL_PRE_GEN_CFG_HASH_MODE_SHIFT)
+
+#define I40E_GL_PRE_PRX_BIG_ENT_D0 0x002699C4 /* Reset: CORER */
+#define I40E_GL_PRE_PRX_BIG_ENT_D0_F0_SRC_IDX_SHIFT 0
+#define I40E_GL_PRE_PRX_BIG_ENT_D0_F0_SRC_IDX_MASK I40E_MASK(0x3F, I40E_GL_PRE_PRX_BIG_ENT_D0_F0_SRC_IDX_SHIFT)
+#define I40E_GL_PRE_PRX_BIG_ENT_D0_F0_SRC_SEL_SHIFT 6
+#define I40E_GL_PRE_PRX_BIG_ENT_D0_F0_SRC_SEL_MASK I40E_MASK(0x1, I40E_GL_PRE_PRX_BIG_ENT_D0_F0_SRC_SEL_SHIFT)
+#define I40E_GL_PRE_PRX_BIG_ENT_D0_F0_SRC_VLD_SHIFT 7
+#define I40E_GL_PRE_PRX_BIG_ENT_D0_F0_SRC_VLD_MASK I40E_MASK(0x1, I40E_GL_PRE_PRX_BIG_ENT_D0_F0_SRC_VLD_SHIFT)
+#define I40E_GL_PRE_PRX_BIG_ENT_D0_F1_SRC_IDX_SHIFT 8
+#define I40E_GL_PRE_PRX_BIG_ENT_D0_F1_SRC_IDX_MASK I40E_MASK(0x3F, I40E_GL_PRE_PRX_BIG_ENT_D0_F1_SRC_IDX_SHIFT)
+#define I40E_GL_PRE_PRX_BIG_ENT_D0_F1_SRC_SEL_SHIFT 14
+#define I40E_GL_PRE_PRX_BIG_ENT_D0_F1_SRC_SEL_MASK I40E_MASK(0x1, I40E_GL_PRE_PRX_BIG_ENT_D0_F1_SRC_SEL_SHIFT)
+#define I40E_GL_PRE_PRX_BIG_ENT_D0_F1_SRC_VLD_SHIFT 15
+#define I40E_GL_PRE_PRX_BIG_ENT_D0_F1_SRC_VLD_MASK I40E_MASK(0x1, I40E_GL_PRE_PRX_BIG_ENT_D0_F1_SRC_VLD_SHIFT)
+#define I40E_GL_PRE_PRX_BIG_ENT_D0_F2_SRC_VLD_SHIFT 16
+#define I40E_GL_PRE_PRX_BIG_ENT_D0_F2_SRC_VLD_MASK I40E_MASK(0x3F, I40E_GL_PRE_PRX_BIG_ENT_D0_F2_SRC_VLD_SHIFT)
+#define I40E_GL_PRE_PRX_BIG_ENT_D0_F2_SRC_SEL_SHIFT 22
+#define I40E_GL_PRE_PRX_BIG_ENT_D0_F2_SRC_SEL_MASK I40E_MASK(0x1, I40E_GL_PRE_PRX_BIG_ENT_D0_F2_SRC_SEL_SHIFT)
+#define I40E_GL_PRE_PRX_BIG_ENT_D0_F2_SRC_IDX_SHIFT 23
+#define I40E_GL_PRE_PRX_BIG_ENT_D0_F2_SRC_IDX_MASK I40E_MASK(0x1, I40E_GL_PRE_PRX_BIG_ENT_D0_F2_SRC_IDX_SHIFT)
+#define I40E_GL_PRE_PRX_BIG_ENT_D0_F3_SRC_VLD_SHIFT 24
+#define I40E_GL_PRE_PRX_BIG_ENT_D0_F3_SRC_VLD_MASK I40E_MASK(0x3F, I40E_GL_PRE_PRX_BIG_ENT_D0_F3_SRC_VLD_SHIFT)
+#define I40E_GL_PRE_PRX_BIG_ENT_D0_F3_SRC_IDX_SHIFT 31
+#define I40E_GL_PRE_PRX_BIG_ENT_D0_F3_SRC_IDX_MASK I40E_MASK(0x1, I40E_GL_PRE_PRX_BIG_ENT_D0_F3_SRC_IDX_SHIFT)
+
+#define I40E_GL_PRE_PRX_BIG_ENT_D1 0x002699D4 /* Reset: CORER */
+#define I40E_GL_PRE_PRX_BIG_ENT_D1_F4_SRC_IDX_SHIFT 0
+#define I40E_GL_PRE_PRX_BIG_ENT_D1_F4_SRC_IDX_MASK I40E_MASK(0x3F, I40E_GL_PRE_PRX_BIG_ENT_D1_F4_SRC_IDX_SHIFT)
+#define I40E_GL_PRE_PRX_BIG_ENT_D1_F4_SRC_SEL_SHIFT 6
+#define I40E_GL_PRE_PRX_BIG_ENT_D1_F4_SRC_SEL_MASK I40E_MASK(0x1, I40E_GL_PRE_PRX_BIG_ENT_D1_F4_SRC_SEL_SHIFT)
+#define I40E_GL_PRE_PRX_BIG_ENT_D1_F4_SRC_VLD_SHIFT 7
+#define I40E_GL_PRE_PRX_BIG_ENT_D1_F4_SRC_VLD_MASK I40E_MASK(0x1, I40E_GL_PRE_PRX_BIG_ENT_D1_F4_SRC_VLD_SHIFT)
+#define I40E_GL_PRE_PRX_BIG_ENT_D1_F5_SRC_IDX_SHIFT 8
+#define I40E_GL_PRE_PRX_BIG_ENT_D1_F5_SRC_IDX_MASK I40E_MASK(0x3F, I40E_GL_PRE_PRX_BIG_ENT_D1_F5_SRC_IDX_SHIFT)
+#define I40E_GL_PRE_PRX_BIG_ENT_D1_F5_SRC_SEL_SHIFT 14
+#define I40E_GL_PRE_PRX_BIG_ENT_D1_F5_SRC_SEL_MASK I40E_MASK(0x1, I40E_GL_PRE_PRX_BIG_ENT_D1_F5_SRC_SEL_SHIFT)
+#define I40E_GL_PRE_PRX_BIG_ENT_D1_F5_SRC_VLD_SHIFT 15
+#define I40E_GL_PRE_PRX_BIG_ENT_D1_F5_SRC_VLD_MASK I40E_MASK(0x1, I40E_GL_PRE_PRX_BIG_ENT_D1_F5_SRC_VLD_SHIFT)
+#define I40E_GL_PRE_PRX_BIG_ENT_D1_F6_SRC_VLD_SHIFT 16
+#define I40E_GL_PRE_PRX_BIG_ENT_D1_F6_SRC_VLD_MASK I40E_MASK(0x3F, I40E_GL_PRE_PRX_BIG_ENT_D1_F6_SRC_VLD_SHIFT)
+#define I40E_GL_PRE_PRX_BIG_ENT_D1_F6_SRC_SEL_SHIFT 22
+#define I40E_GL_PRE_PRX_BIG_ENT_D1_F6_SRC_SEL_MASK I40E_MASK(0x1, I40E_GL_PRE_PRX_BIG_ENT_D1_F6_SRC_SEL_SHIFT)
+#define I40E_GL_PRE_PRX_BIG_ENT_D1_F6_SRC_IDX_SHIFT 23
+#define I40E_GL_PRE_PRX_BIG_ENT_D1_F6_SRC_IDX_MASK I40E_MASK(0x1, I40E_GL_PRE_PRX_BIG_ENT_D1_F6_SRC_IDX_SHIFT)
+#define I40E_GL_PRE_PRX_BIG_ENT_D1_F7_SRC_VLD_SHIFT 24
+#define I40E_GL_PRE_PRX_BIG_ENT_D1_F7_SRC_VLD_MASK I40E_MASK(0x3F, I40E_GL_PRE_PRX_BIG_ENT_D1_F7_SRC_VLD_SHIFT)
+#define I40E_GL_PRE_PRX_BIG_ENT_D1_F7_SRC_IDX_SHIFT 31
+#define I40E_GL_PRE_PRX_BIG_ENT_D1_F7_SRC_IDX_MASK I40E_MASK(0x1, I40E_GL_PRE_PRX_BIG_ENT_D1_F7_SRC_IDX_SHIFT)
+
+#define I40E_GL_PRE_PRX_BIG_ENT_D3 0x00269A0C /* Reset: CORER */
+#define I40E_GL_PRE_PRX_BIG_ENT_D3_BIT_MSK0_SHIFT 0
+#define I40E_GL_PRE_PRX_BIG_ENT_D3_BIT_MSK0_MASK I40E_MASK(0xFF, I40E_GL_PRE_PRX_BIG_ENT_D3_BIT_MSK0_SHIFT)
+
+#define I40E_GL_PRE_PRX_BIG_HSH_KEY_D1 0x00269A34 /* Reset: CORER */
+#define I40E_GL_PRE_PRX_BIG_HSH_KEY_D1_H1_SHIFT 0
+#define I40E_GL_PRE_PRX_BIG_HSH_KEY_D1_H1_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_PRE_PRX_BIG_HSH_KEY_D1_H1_SHIFT)
+
+#define I40E_GL_PRE_PRX_BIG_HSH_KEY_D3 0x00269A54 /* Reset: CORER */
+#define I40E_GL_PRE_PRX_BIG_HSH_KEY_D3_H3_SHIFT 0
+#define I40E_GL_PRE_PRX_BIG_HSH_KEY_D3_H3_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_PRE_PRX_BIG_HSH_KEY_D3_H3_SHIFT)
+
+#define I40E_GL_PRE_PRX_H_PHASE0 0x00269B74 /* Reset: CORER */
+#define I40E_GL_PRE_PRX_H_PHASE0_PROTOCOL_ID_SHIFT 0
+#define I40E_GL_PRE_PRX_H_PHASE0_PROTOCOL_ID_MASK I40E_MASK(0x3F, I40E_GL_PRE_PRX_H_PHASE0_PROTOCOL_ID_SHIFT)
+#define I40E_GL_PRE_PRX_H_PHASE0_IGNORE_PROTOCOL_SHIFT 6
+#define I40E_GL_PRE_PRX_H_PHASE0_IGNORE_PROTOCOL_MASK I40E_MASK(0x1, I40E_GL_PRE_PRX_H_PHASE0_IGNORE_PROTOCOL_SHIFT)
+#define I40E_GL_PRE_PRX_H_PHASE0_MASK0_INDEX_SHIFT 8
+#define I40E_GL_PRE_PRX_H_PHASE0_MASK0_INDEX_MASK I40E_MASK(0xF, I40E_GL_PRE_PRX_H_PHASE0_MASK0_INDEX_SHIFT)
+#define I40E_GL_PRE_PRX_H_PHASE0_MASK1_INDEX_SHIFT 12
+#define I40E_GL_PRE_PRX_H_PHASE0_MASK1_INDEX_MASK I40E_MASK(0xF, I40E_GL_PRE_PRX_H_PHASE0_MASK1_INDEX_SHIFT)
+#define I40E_GL_PRE_PRX_H_PHASE0_MASK0_BITS_SHIFT 16
+#define I40E_GL_PRE_PRX_H_PHASE0_MASK0_BITS_MASK I40E_MASK(0xFF, I40E_GL_PRE_PRX_H_PHASE0_MASK0_BITS_SHIFT)
+#define I40E_GL_PRE_PRX_H_PHASE0_MASK1_BITS_SHIFT 24
+#define I40E_GL_PRE_PRX_H_PHASE0_MASK1_BITS_MASK I40E_MASK(0xFF, I40E_GL_PRE_PRX_H_PHASE0_MASK1_BITS_SHIFT)
+
+#define I40E_GL_PRE_PRX_H_PHASE1 0x00269B7C /* Reset: CORER */
+#define I40E_GL_PRE_PRX_H_PHASE1_PROTOCOL_ID_SHIFT 0
+#define I40E_GL_PRE_PRX_H_PHASE1_PROTOCOL_ID_MASK I40E_MASK(0x3F, I40E_GL_PRE_PRX_H_PHASE1_PROTOCOL_ID_SHIFT)
+#define I40E_GL_PRE_PRX_H_PHASE1_IGNORE_PROTOCOL_SHIFT 6
+#define I40E_GL_PRE_PRX_H_PHASE1_IGNORE_PROTOCOL_MASK I40E_MASK(0x1, I40E_GL_PRE_PRX_H_PHASE1_IGNORE_PROTOCOL_SHIFT)
+#define I40E_GL_PRE_PRX_H_PHASE1_MASK0_INDEX_SHIFT 8
+#define I40E_GL_PRE_PRX_H_PHASE1_MASK0_INDEX_MASK I40E_MASK(0xF, I40E_GL_PRE_PRX_H_PHASE1_MASK0_INDEX_SHIFT)
+#define I40E_GL_PRE_PRX_H_PHASE1_MASK1_INDEX_SHIFT 12
+#define I40E_GL_PRE_PRX_H_PHASE1_MASK1_INDEX_MASK I40E_MASK(0xF, I40E_GL_PRE_PRX_H_PHASE1_MASK1_INDEX_SHIFT)
+#define I40E_GL_PRE_PRX_H_PHASE1_MASK0_BITS_SHIFT 16
+#define I40E_GL_PRE_PRX_H_PHASE1_MASK0_BITS_MASK I40E_MASK(0xFF, I40E_GL_PRE_PRX_H_PHASE1_MASK0_BITS_SHIFT)
+#define I40E_GL_PRE_PRX_H_PHASE1_MASK1_BITS_SHIFT 24
+#define I40E_GL_PRE_PRX_H_PHASE1_MASK1_BITS_MASK I40E_MASK(0xFF, I40E_GL_PRE_PRX_H_PHASE1_MASK1_BITS_SHIFT)
+
+#define I40E_GL_PRE_PRX_HSH_KEY_D0 0x00269A24 /* Reset: CORER */
+#define I40E_GL_PRE_PRX_HSH_KEY_D0_H0_SHIFT 0
+#define I40E_GL_PRE_PRX_HSH_KEY_D0_H0_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_PRE_PRX_HSH_KEY_D0_H0_SHIFT)
+
+#define I40E_GL_PRE_PRX_L_PHASE0 0x00269B8C /* Reset: CORER */
+#define I40E_GL_PRE_PRX_L_PHASE0_W0_OFFSET_SHIFT 0
+#define I40E_GL_PRE_PRX_L_PHASE0_W0_OFFSET_MASK I40E_MASK(0x3F, I40E_GL_PRE_PRX_L_PHASE0_W0_OFFSET_SHIFT)
+#define I40E_GL_PRE_PRX_L_PHASE0_W0_STATUS_SHIFT 6
+#define I40E_GL_PRE_PRX_L_PHASE0_W0_STATUS_MASK I40E_MASK(0x1, I40E_GL_PRE_PRX_L_PHASE0_W0_STATUS_SHIFT)
+#define I40E_GL_PRE_PRX_L_PHASE0_W0_VALID_SHIFT 7
+#define I40E_GL_PRE_PRX_L_PHASE0_W0_VALID_MASK I40E_MASK(0x1, I40E_GL_PRE_PRX_L_PHASE0_W0_VALID_SHIFT)
+#define I40E_GL_PRE_PRX_L_PHASE0_W1_OFFSET_SHIFT 8
+#define I40E_GL_PRE_PRX_L_PHASE0_W1_OFFSET_MASK I40E_MASK(0x3F, I40E_GL_PRE_PRX_L_PHASE0_W1_OFFSET_SHIFT)
+#define I40E_GL_PRE_PRX_L_PHASE0_W1_STATUS_SHIFT 14
+#define I40E_GL_PRE_PRX_L_PHASE0_W1_STATUS_MASK I40E_MASK(0x1, I40E_GL_PRE_PRX_L_PHASE0_W1_STATUS_SHIFT)
+#define I40E_GL_PRE_PRX_L_PHASE0_W1_VALID_SHIFT 15
+#define I40E_GL_PRE_PRX_L_PHASE0_W1_VALID_MASK I40E_MASK(0x1, I40E_GL_PRE_PRX_L_PHASE0_W1_VALID_SHIFT)
+#define I40E_GL_PRE_PRX_L_PHASE0_W2_OFFSET_SHIFT 16
+#define I40E_GL_PRE_PRX_L_PHASE0_W2_OFFSET_MASK I40E_MASK(0x3F, I40E_GL_PRE_PRX_L_PHASE0_W2_OFFSET_SHIFT)
+#define I40E_GL_PRE_PRX_L_PHASE0_W2_STATUS_SHIFT 22
+#define I40E_GL_PRE_PRX_L_PHASE0_W2_STATUS_MASK I40E_MASK(0x1, I40E_GL_PRE_PRX_L_PHASE0_W2_STATUS_SHIFT)
+#define I40E_GL_PRE_PRX_L_PHASE0_W2_VALID_SHIFT 23
+#define I40E_GL_PRE_PRX_L_PHASE0_W2_VALID_MASK I40E_MASK(0x1, I40E_GL_PRE_PRX_L_PHASE0_W2_VALID_SHIFT)
+#define I40E_GL_PRE_PRX_L_PHASE0_ETYPE_ENABLE_SHIFT 28
+#define I40E_GL_PRE_PRX_L_PHASE0_ETYPE_ENABLE_MASK I40E_MASK(0x1, I40E_GL_PRE_PRX_L_PHASE0_ETYPE_ENABLE_SHIFT)
+#define I40E_GL_PRE_PRX_L_PHASE0_PRUNE_SHIFT 29
+#define I40E_GL_PRE_PRX_L_PHASE0_PRUNE_MASK I40E_MASK(0x1, I40E_GL_PRE_PRX_L_PHASE0_PRUNE_SHIFT)
+#define I40E_GL_PRE_PRX_L_PHASE0_EGRESS_SHIFT 30
+#define I40E_GL_PRE_PRX_L_PHASE0_EGRESS_MASK I40E_MASK(0x1, I40E_GL_PRE_PRX_L_PHASE0_EGRESS_SHIFT)
+#define I40E_GL_PRE_PRX_L_PHASE0_PORT_ENABLE_SHIFT 31
+#define I40E_GL_PRE_PRX_L_PHASE0_PORT_ENABLE_MASK I40E_MASK(0x1, I40E_GL_PRE_PRX_L_PHASE0_PORT_ENABLE_SHIFT)
+
+#define I40E_GL_PRE_PRX_L_PHASE1 0x00269B84 /* Reset: CORER */
+#define I40E_GL_PRE_PRX_L_PHASE1_W0_OFFSET_SHIFT 0
+#define I40E_GL_PRE_PRX_L_PHASE1_W0_OFFSET_MASK I40E_MASK(0x3F, I40E_GL_PRE_PRX_L_PHASE1_W0_OFFSET_SHIFT)
+#define I40E_GL_PRE_PRX_L_PHASE1_W0_STATUS_SHIFT 6
+#define I40E_GL_PRE_PRX_L_PHASE1_W0_STATUS_MASK I40E_MASK(0x1, I40E_GL_PRE_PRX_L_PHASE1_W0_STATUS_SHIFT)
+#define I40E_GL_PRE_PRX_L_PHASE1_W0_VALID_SHIFT 7
+#define I40E_GL_PRE_PRX_L_PHASE1_W0_VALID_MASK I40E_MASK(0x1, I40E_GL_PRE_PRX_L_PHASE1_W0_VALID_SHIFT)
+#define I40E_GL_PRE_PRX_L_PHASE1_W1_OFFSET_SHIFT 8
+#define I40E_GL_PRE_PRX_L_PHASE1_W1_OFFSET_MASK I40E_MASK(0x3F, I40E_GL_PRE_PRX_L_PHASE1_W1_OFFSET_SHIFT)
+#define I40E_GL_PRE_PRX_L_PHASE1_W1_STATUS_SHIFT 14
+#define I40E_GL_PRE_PRX_L_PHASE1_W1_STATUS_MASK I40E_MASK(0x1, I40E_GL_PRE_PRX_L_PHASE1_W1_STATUS_SHIFT)
+#define I40E_GL_PRE_PRX_L_PHASE1_W1_VALID_SHIFT 15
+#define I40E_GL_PRE_PRX_L_PHASE1_W1_VALID_MASK I40E_MASK(0x1, I40E_GL_PRE_PRX_L_PHASE1_W1_VALID_SHIFT)
+#define I40E_GL_PRE_PRX_L_PHASE1_W2_OFFSET_SHIFT 16
+#define I40E_GL_PRE_PRX_L_PHASE1_W2_OFFSET_MASK I40E_MASK(0x3F, I40E_GL_PRE_PRX_L_PHASE1_W2_OFFSET_SHIFT)
+#define I40E_GL_PRE_PRX_L_PHASE1_W2_STATUS_SHIFT 22
+#define I40E_GL_PRE_PRX_L_PHASE1_W2_STATUS_MASK I40E_MASK(0x1, I40E_GL_PRE_PRX_L_PHASE1_W2_STATUS_SHIFT)
+#define I40E_GL_PRE_PRX_L_PHASE1_W2_VALID_SHIFT 23
+#define I40E_GL_PRE_PRX_L_PHASE1_W2_VALID_MASK I40E_MASK(0x1, I40E_GL_PRE_PRX_L_PHASE1_W2_VALID_SHIFT)
+#define I40E_GL_PRE_PRX_L_PHASE1_ETYPE_ENABLE_SHIFT 28
+#define I40E_GL_PRE_PRX_L_PHASE1_ETYPE_ENABLE_MASK I40E_MASK(0x1, I40E_GL_PRE_PRX_L_PHASE1_ETYPE_ENABLE_SHIFT)
+#define I40E_GL_PRE_PRX_L_PHASE1_PRUNE_SHIFT 29
+#define I40E_GL_PRE_PRX_L_PHASE1_PRUNE_MASK I40E_MASK(0x1, I40E_GL_PRE_PRX_L_PHASE1_PRUNE_SHIFT)
+#define I40E_GL_PRE_PRX_L_PHASE1_EGRESS_SHIFT 30
+#define I40E_GL_PRE_PRX_L_PHASE1_EGRESS_MASK I40E_MASK(0x1, I40E_GL_PRE_PRX_L_PHASE1_EGRESS_SHIFT)
+#define I40E_GL_PRE_PRX_L_PHASE1_PORT_ENABLE_SHIFT 31
+#define I40E_GL_PRE_PRX_L_PHASE1_PORT_ENABLE_MASK I40E_MASK(0x1, I40E_GL_PRE_PRX_L_PHASE1_PORT_ENABLE_SHIFT)
+
+#define I40E_GL_SW_SWT_STS(_i) (0x00269340 + ((_i) * 4)) /* _i=0...9 */ /* Reset: CORER */
+#define I40E_GL_SW_SWT_STS_MAX_INDEX 9
+#define I40E_GL_SW_SWT_STS_EMP_SWT_STS_SHIFT 0
+#define I40E_GL_SW_SWT_STS_EMP_SWT_STS_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_SW_SWT_STS_EMP_SWT_STS_SHIFT)
+
+#define I40E_GL_SWR_FILTERS_NEED_HIT(_i) (0x0026CF00 + ((_i) * 4)) /* _i=0...1 */ /* Reset: CORER */
+#define I40E_GL_SWR_FILTERS_NEED_HIT_MAX_INDEX 1
+#define I40E_GL_SWR_FILTERS_NEED_HIT_FILTERS_NEED_HIT_SHIFT 0
+#define I40E_GL_SWR_FILTERS_NEED_HIT_FILTERS_NEED_HIT_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_SWR_FILTERS_NEED_HIT_FILTERS_NEED_HIT_SHIFT)
+
+#define I40E_GL_SWR_FILTERS_NEED_MISS(_i) (0x0026CF10 + ((_i) * 4)) /* _i=0...1 */ /* Reset: CORER */
+#define I40E_GL_SWR_FILTERS_NEED_MISS_MAX_INDEX 1
+#define I40E_GL_SWR_FILTERS_NEED_MISS_FILTERS_NEED_MISS_SHIFT 0
+#define I40E_GL_SWR_FILTERS_NEED_MISS_FILTERS_NEED_MISS_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_SWR_FILTERS_NEED_MISS_FILTERS_NEED_MISS_SHIFT)
+
+#define I40E_GL_SWR_HIT_FILTERS(_i) (0x0026CF08 + ((_i) * 4)) /* _i=0...1 */ /* Reset: CORER */
+#define I40E_GL_SWR_HIT_FILTERS_MAX_INDEX 1
+#define I40E_GL_SWR_HIT_FILTERS_HIT_FILTERS_SHIFT 0
+#define I40E_GL_SWR_HIT_FILTERS_HIT_FILTERS_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_SWR_HIT_FILTERS_HIT_FILTERS_SHIFT)
+
+#define I40E_GL_SWR_MISS_FILTERS(_i) (0x0026CF18 + ((_i) * 4)) /* _i=0...1 */ /* Reset: CORER */
+#define I40E_GL_SWR_MISS_FILTERS_MAX_INDEX 1
+#define I40E_GL_SWR_MISS_FILTERS_MISS_FILTERS_SHIFT 0
+#define I40E_GL_SWR_MISS_FILTERS_MISS_FILTERS_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_SWR_MISS_FILTERS_MISS_FILTERS_SHIFT)
+
+#define I40E_GL_SWR_PRI_JOIN_MAP(_i) (0x0026CE20 + ((_i) * 4)) /* _i=0...8 */ /* Reset: CORER */
+#define I40E_GL_SWR_PRI_JOIN_MAP_MAX_INDEX 8
+#define I40E_GL_SWR_PRI_JOIN_MAP_GL_SWR_PRI_MAP_SHIFT 0
+#define I40E_GL_SWR_PRI_JOIN_MAP_GL_SWR_PRI_MAP_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_SWR_PRI_JOIN_MAP_GL_SWR_PRI_MAP_SHIFT)
+
+#define I40E_GL_SWR_PRI_MAP(_i) (0x0026CDE0 + ((_i) * 4)) /* _i=0...8 */ /* Reset: CORER */
+#define I40E_GL_SWR_PRI_MAP_MAX_INDEX 8
+#define I40E_GL_SWR_PRI_MAP_GL_SWR_PRI_MAP_SHIFT 0
+#define I40E_GL_SWR_PRI_MAP_GL_SWR_PRI_MAP_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_SWR_PRI_MAP_GL_SWR_PRI_MAP_SHIFT)
+
+#define I40E_GL_SWT_FLU_BIG_ENT_PHASE0_D0 0x002699BC /* Reset: CORER */
+#define I40E_GL_SWT_FLU_BIG_ENT_PHASE0_D0_F0_SRC_IDX_SHIFT 0
+#define I40E_GL_SWT_FLU_BIG_ENT_PHASE0_D0_F0_SRC_IDX_MASK I40E_MASK(0x3F, I40E_GL_SWT_FLU_BIG_ENT_PHASE0_D0_F0_SRC_IDX_SHIFT)
+#define I40E_GL_SWT_FLU_BIG_ENT_PHASE0_D0_F0_SRC_SEL_SHIFT 6
+#define I40E_GL_SWT_FLU_BIG_ENT_PHASE0_D0_F0_SRC_SEL_MASK I40E_MASK(0x1, I40E_GL_SWT_FLU_BIG_ENT_PHASE0_D0_F0_SRC_SEL_SHIFT)
+#define I40E_GL_SWT_FLU_BIG_ENT_PHASE0_D0_F0_SRC_VLD_SHIFT 7
+#define I40E_GL_SWT_FLU_BIG_ENT_PHASE0_D0_F0_SRC_VLD_MASK I40E_MASK(0x1, I40E_GL_SWT_FLU_BIG_ENT_PHASE0_D0_F0_SRC_VLD_SHIFT)
+#define I40E_GL_SWT_FLU_BIG_ENT_PHASE0_D0_F1_SRC_IDX_SHIFT 8
+#define I40E_GL_SWT_FLU_BIG_ENT_PHASE0_D0_F1_SRC_IDX_MASK I40E_MASK(0x3F, I40E_GL_SWT_FLU_BIG_ENT_PHASE0_D0_F1_SRC_IDX_SHIFT)
+#define I40E_GL_SWT_FLU_BIG_ENT_PHASE0_D0_F1_SRC_SEL_SHIFT 14
+#define I40E_GL_SWT_FLU_BIG_ENT_PHASE0_D0_F1_SRC_SEL_MASK I40E_MASK(0x1, I40E_GL_SWT_FLU_BIG_ENT_PHASE0_D0_F1_SRC_SEL_SHIFT)
+#define I40E_GL_SWT_FLU_BIG_ENT_PHASE0_D0_F1_SRC_VLD_SHIFT 15
+#define I40E_GL_SWT_FLU_BIG_ENT_PHASE0_D0_F1_SRC_VLD_MASK I40E_MASK(0x1, I40E_GL_SWT_FLU_BIG_ENT_PHASE0_D0_F1_SRC_VLD_SHIFT)
+#define I40E_GL_SWT_FLU_BIG_ENT_PHASE0_D0_F2_SRC_VLD_SHIFT 16
+#define I40E_GL_SWT_FLU_BIG_ENT_PHASE0_D0_F2_SRC_VLD_MASK I40E_MASK(0x3F, I40E_GL_SWT_FLU_BIG_ENT_PHASE0_D0_F2_SRC_VLD_SHIFT)
+#define I40E_GL_SWT_FLU_BIG_ENT_PHASE0_D0_F2_SRC_SEL_SHIFT 22
+#define I40E_GL_SWT_FLU_BIG_ENT_PHASE0_D0_F2_SRC_SEL_MASK I40E_MASK(0x1, I40E_GL_SWT_FLU_BIG_ENT_PHASE0_D0_F2_SRC_SEL_SHIFT)
+#define I40E_GL_SWT_FLU_BIG_ENT_PHASE0_D0_F2_SRC_IDX_SHIFT 23
+#define I40E_GL_SWT_FLU_BIG_ENT_PHASE0_D0_F2_SRC_IDX_MASK I40E_MASK(0x1, I40E_GL_SWT_FLU_BIG_ENT_PHASE0_D0_F2_SRC_IDX_SHIFT)
+#define I40E_GL_SWT_FLU_BIG_ENT_PHASE0_D0_F3_SRC_VLD_SHIFT 24
+#define I40E_GL_SWT_FLU_BIG_ENT_PHASE0_D0_F3_SRC_VLD_MASK I40E_MASK(0x3F, I40E_GL_SWT_FLU_BIG_ENT_PHASE0_D0_F3_SRC_VLD_SHIFT)
+#define I40E_GL_SWT_FLU_BIG_ENT_PHASE0_D0_F3_SRC_IDX_SHIFT 31
+#define I40E_GL_SWT_FLU_BIG_ENT_PHASE0_D0_F3_SRC_IDX_MASK I40E_MASK(0x1, I40E_GL_SWT_FLU_BIG_ENT_PHASE0_D0_F3_SRC_IDX_SHIFT)
+
+#define I40E_GL_SWT_FLU_BIG_ENT_PHASE0_D1 0x002699CC /* Reset: CORER */
+#define I40E_GL_SWT_FLU_BIG_ENT_PHASE0_D1_F4_SRC_IDX_SHIFT 0
+#define I40E_GL_SWT_FLU_BIG_ENT_PHASE0_D1_F4_SRC_IDX_MASK I40E_MASK(0x3F, I40E_GL_SWT_FLU_BIG_ENT_PHASE0_D1_F4_SRC_IDX_SHIFT)
+#define I40E_GL_SWT_FLU_BIG_ENT_PHASE0_D1_F4_SRC_SEL_SHIFT 6
+#define I40E_GL_SWT_FLU_BIG_ENT_PHASE0_D1_F4_SRC_SEL_MASK I40E_MASK(0x1, I40E_GL_SWT_FLU_BIG_ENT_PHASE0_D1_F4_SRC_SEL_SHIFT)
+#define I40E_GL_SWT_FLU_BIG_ENT_PHASE0_D1_F4_SRC_VLD_SHIFT 7
+#define I40E_GL_SWT_FLU_BIG_ENT_PHASE0_D1_F4_SRC_VLD_MASK I40E_MASK(0x1, I40E_GL_SWT_FLU_BIG_ENT_PHASE0_D1_F4_SRC_VLD_SHIFT)
+#define I40E_GL_SWT_FLU_BIG_ENT_PHASE0_D1_F5_SRC_IDX_SHIFT 8
+#define I40E_GL_SWT_FLU_BIG_ENT_PHASE0_D1_F5_SRC_IDX_MASK I40E_MASK(0x3F, I40E_GL_SWT_FLU_BIG_ENT_PHASE0_D1_F5_SRC_IDX_SHIFT)
+#define I40E_GL_SWT_FLU_BIG_ENT_PHASE0_D1_F5_SRC_SEL_SHIFT 14
+#define I40E_GL_SWT_FLU_BIG_ENT_PHASE0_D1_F5_SRC_SEL_MASK I40E_MASK(0x1, I40E_GL_SWT_FLU_BIG_ENT_PHASE0_D1_F5_SRC_SEL_SHIFT)
+#define I40E_GL_SWT_FLU_BIG_ENT_PHASE0_D1_F5_SRC_VLD_SHIFT 15
+#define I40E_GL_SWT_FLU_BIG_ENT_PHASE0_D1_F5_SRC_VLD_MASK I40E_MASK(0x1, I40E_GL_SWT_FLU_BIG_ENT_PHASE0_D1_F5_SRC_VLD_SHIFT)
+#define I40E_GL_SWT_FLU_BIG_ENT_PHASE0_D1_F6_SRC_VLD_SHIFT 16
+#define I40E_GL_SWT_FLU_BIG_ENT_PHASE0_D1_F6_SRC_VLD_MASK I40E_MASK(0x3F, I40E_GL_SWT_FLU_BIG_ENT_PHASE0_D1_F6_SRC_VLD_SHIFT)
+#define I40E_GL_SWT_FLU_BIG_ENT_PHASE0_D1_F6_SRC_SEL_SHIFT 22
+#define I40E_GL_SWT_FLU_BIG_ENT_PHASE0_D1_F6_SRC_SEL_MASK I40E_MASK(0x1, I40E_GL_SWT_FLU_BIG_ENT_PHASE0_D1_F6_SRC_SEL_SHIFT)
+#define I40E_GL_SWT_FLU_BIG_ENT_PHASE0_D1_F6_SRC_IDX_SHIFT 23
+#define I40E_GL_SWT_FLU_BIG_ENT_PHASE0_D1_F6_SRC_IDX_MASK I40E_MASK(0x1, I40E_GL_SWT_FLU_BIG_ENT_PHASE0_D1_F6_SRC_IDX_SHIFT)
+#define I40E_GL_SWT_FLU_BIG_ENT_PHASE0_D1_F7_SRC_VLD_SHIFT 24
+#define I40E_GL_SWT_FLU_BIG_ENT_PHASE0_D1_F7_SRC_VLD_MASK I40E_MASK(0x3F, I40E_GL_SWT_FLU_BIG_ENT_PHASE0_D1_F7_SRC_VLD_SHIFT)
+#define I40E_GL_SWT_FLU_BIG_ENT_PHASE0_D1_F7_SRC_IDX_SHIFT 31
+#define I40E_GL_SWT_FLU_BIG_ENT_PHASE0_D1_F7_SRC_IDX_MASK I40E_MASK(0x1, I40E_GL_SWT_FLU_BIG_ENT_PHASE0_D1_F7_SRC_IDX_SHIFT)
+
+#define I40E_GL_SWT_FLU_BIG_ENT_PHASE0_D2 0x002699FC /* Reset: CORER */
+#define I40E_GL_SWT_FLU_BIG_ENT_PHASE0_D2_USE_PHASE_SHIFT 0
+#define I40E_GL_SWT_FLU_BIG_ENT_PHASE0_D2_USE_PHASE_MASK I40E_MASK(0x1, I40E_GL_SWT_FLU_BIG_ENT_PHASE0_D2_USE_PHASE_SHIFT)
+#define I40E_GL_SWT_FLU_BIG_ENT_PHASE0_D2_USE_INGR_SHIFT 1
+#define I40E_GL_SWT_FLU_BIG_ENT_PHASE0_D2_USE_INGR_MASK I40E_MASK(0x1, I40E_GL_SWT_FLU_BIG_ENT_PHASE0_D2_USE_INGR_SHIFT)
+#define I40E_GL_SWT_FLU_BIG_ENT_PHASE0_D2_USE_PORT_SHIFT 7
+#define I40E_GL_SWT_FLU_BIG_ENT_PHASE0_D2_USE_PORT_MASK I40E_MASK(0x1, I40E_GL_SWT_FLU_BIG_ENT_PHASE0_D2_USE_PORT_SHIFT)
+#define I40E_GL_SWT_FLU_BIG_ENT_PHASE0_D2_TR_INDEX_SHIFT 8
+#define I40E_GL_SWT_FLU_BIG_ENT_PHASE0_D2_TR_INDEX_MASK I40E_MASK(0x3F, I40E_GL_SWT_FLU_BIG_ENT_PHASE0_D2_TR_INDEX_SHIFT)
+#define I40E_GL_SWT_FLU_BIG_ENT_PHASE0_D2_USE_MAN_SHIFT 14
+#define I40E_GL_SWT_FLU_BIG_ENT_PHASE0_D2_USE_MAN_MASK I40E_MASK(0x1, I40E_GL_SWT_FLU_BIG_ENT_PHASE0_D2_USE_MAN_SHIFT)
+#define I40E_GL_SWT_FLU_BIG_ENT_PHASE0_D2_USE_TR_SHIFT 15
+#define I40E_GL_SWT_FLU_BIG_ENT_PHASE0_D2_USE_TR_MASK I40E_MASK(0x1, I40E_GL_SWT_FLU_BIG_ENT_PHASE0_D2_USE_TR_SHIFT)
+#define I40E_GL_SWT_FLU_BIG_ENT_PHASE0_D2_BYTE_MSK0_SHIFT 16
+#define I40E_GL_SWT_FLU_BIG_ENT_PHASE0_D2_BYTE_MSK0_MASK I40E_MASK(0xF, I40E_GL_SWT_FLU_BIG_ENT_PHASE0_D2_BYTE_MSK0_SHIFT)
+#define I40E_GL_SWT_FLU_BIG_ENT_PHASE0_D2_BYTE_MSK1_SHIFT 20
+#define I40E_GL_SWT_FLU_BIG_ENT_PHASE0_D2_BYTE_MSK1_MASK I40E_MASK(0xF, I40E_GL_SWT_FLU_BIG_ENT_PHASE0_D2_BYTE_MSK1_SHIFT)
+#define I40E_GL_SWT_FLU_BIG_ENT_PHASE0_D2_BIT_MSK0_SHIFT 24
+#define I40E_GL_SWT_FLU_BIG_ENT_PHASE0_D2_BIT_MSK0_MASK I40E_MASK(0xFF, I40E_GL_SWT_FLU_BIG_ENT_PHASE0_D2_BIT_MSK0_SHIFT)
+
+#define I40E_GL_SWT_FLU_BIG_ENT_PHASE0_D3 0x00269A14 /* Reset: CORER */
+#define I40E_GL_SWT_FLU_BIG_ENT_PHASE0_D3_BIT_MSK0_SHIFT 0
+#define I40E_GL_SWT_FLU_BIG_ENT_PHASE0_D3_BIT_MSK0_MASK I40E_MASK(0xFF, I40E_GL_SWT_FLU_BIG_ENT_PHASE0_D3_BIT_MSK0_SHIFT)
+
+#define I40E_GL_SWT_FLU_BIG_ENT_PHASE1_D0 0x002699DC /* Reset: CORER */
+#define I40E_GL_SWT_FLU_BIG_ENT_PHASE1_D0_F0_SRC_IDX_SHIFT 0
+#define I40E_GL_SWT_FLU_BIG_ENT_PHASE1_D0_F0_SRC_IDX_MASK I40E_MASK(0x3F, I40E_GL_SWT_FLU_BIG_ENT_PHASE1_D0_F0_SRC_IDX_SHIFT)
+#define I40E_GL_SWT_FLU_BIG_ENT_PHASE1_D0_F0_SRC_SEL_SHIFT 6
+#define I40E_GL_SWT_FLU_BIG_ENT_PHASE1_D0_F0_SRC_SEL_MASK I40E_MASK(0x1, I40E_GL_SWT_FLU_BIG_ENT_PHASE1_D0_F0_SRC_SEL_SHIFT)
+#define I40E_GL_SWT_FLU_BIG_ENT_PHASE1_D0_F0_SRC_VLD_SHIFT 7
+#define I40E_GL_SWT_FLU_BIG_ENT_PHASE1_D0_F0_SRC_VLD_MASK I40E_MASK(0x1, I40E_GL_SWT_FLU_BIG_ENT_PHASE1_D0_F0_SRC_VLD_SHIFT)
+#define I40E_GL_SWT_FLU_BIG_ENT_PHASE1_D0_F1_SRC_IDX_SHIFT 8
+#define I40E_GL_SWT_FLU_BIG_ENT_PHASE1_D0_F1_SRC_IDX_MASK I40E_MASK(0x3F, I40E_GL_SWT_FLU_BIG_ENT_PHASE1_D0_F1_SRC_IDX_SHIFT)
+#define I40E_GL_SWT_FLU_BIG_ENT_PHASE1_D0_F1_SRC_SEL_SHIFT 14
+#define I40E_GL_SWT_FLU_BIG_ENT_PHASE1_D0_F1_SRC_SEL_MASK I40E_MASK(0x1, I40E_GL_SWT_FLU_BIG_ENT_PHASE1_D0_F1_SRC_SEL_SHIFT)
+#define I40E_GL_SWT_FLU_BIG_ENT_PHASE1_D0_F1_SRC_VLD_SHIFT 15
+#define I40E_GL_SWT_FLU_BIG_ENT_PHASE1_D0_F1_SRC_VLD_MASK I40E_MASK(0x1, I40E_GL_SWT_FLU_BIG_ENT_PHASE1_D0_F1_SRC_VLD_SHIFT)
+#define I40E_GL_SWT_FLU_BIG_ENT_PHASE1_D0_F2_SRC_VLD_SHIFT 16
+#define I40E_GL_SWT_FLU_BIG_ENT_PHASE1_D0_F2_SRC_VLD_MASK I40E_MASK(0x3F, I40E_GL_SWT_FLU_BIG_ENT_PHASE1_D0_F2_SRC_VLD_SHIFT)
+#define I40E_GL_SWT_FLU_BIG_ENT_PHASE1_D0_F2_SRC_SEL_SHIFT 22
+#define I40E_GL_SWT_FLU_BIG_ENT_PHASE1_D0_F2_SRC_SEL_MASK I40E_MASK(0x1, I40E_GL_SWT_FLU_BIG_ENT_PHASE1_D0_F2_SRC_SEL_SHIFT)
+#define I40E_GL_SWT_FLU_BIG_ENT_PHASE1_D0_F2_SRC_IDX_SHIFT 23
+#define I40E_GL_SWT_FLU_BIG_ENT_PHASE1_D0_F2_SRC_IDX_MASK I40E_MASK(0x1, I40E_GL_SWT_FLU_BIG_ENT_PHASE1_D0_F2_SRC_IDX_SHIFT)
+#define I40E_GL_SWT_FLU_BIG_ENT_PHASE1_D0_F3_SRC_VLD_SHIFT 24
+#define I40E_GL_SWT_FLU_BIG_ENT_PHASE1_D0_F3_SRC_VLD_MASK I40E_MASK(0x3F, I40E_GL_SWT_FLU_BIG_ENT_PHASE1_D0_F3_SRC_VLD_SHIFT)
+#define I40E_GL_SWT_FLU_BIG_ENT_PHASE1_D0_F3_SRC_IDX_SHIFT 31
+#define I40E_GL_SWT_FLU_BIG_ENT_PHASE1_D0_F3_SRC_IDX_MASK I40E_MASK(0x1, I40E_GL_SWT_FLU_BIG_ENT_PHASE1_D0_F3_SRC_IDX_SHIFT)
+
+#define I40E_GL_SWT_FLU_BIG_ENT_PHASE1_D1 0x002699E4 /* Reset: CORER */
+#define I40E_GL_SWT_FLU_BIG_ENT_PHASE1_D1_F4_SRC_IDX_SHIFT 0
+#define I40E_GL_SWT_FLU_BIG_ENT_PHASE1_D1_F4_SRC_IDX_MASK I40E_MASK(0x3F, I40E_GL_SWT_FLU_BIG_ENT_PHASE1_D1_F4_SRC_IDX_SHIFT)
+#define I40E_GL_SWT_FLU_BIG_ENT_PHASE1_D1_F4_SRC_SEL_SHIFT 6
+#define I40E_GL_SWT_FLU_BIG_ENT_PHASE1_D1_F4_SRC_SEL_MASK I40E_MASK(0x1, I40E_GL_SWT_FLU_BIG_ENT_PHASE1_D1_F4_SRC_SEL_SHIFT)
+#define I40E_GL_SWT_FLU_BIG_ENT_PHASE1_D1_F4_SRC_VLD_SHIFT 7
+#define I40E_GL_SWT_FLU_BIG_ENT_PHASE1_D1_F4_SRC_VLD_MASK I40E_MASK(0x1, I40E_GL_SWT_FLU_BIG_ENT_PHASE1_D1_F4_SRC_VLD_SHIFT)
+#define I40E_GL_SWT_FLU_BIG_ENT_PHASE1_D1_F5_SRC_IDX_SHIFT 8
+#define I40E_GL_SWT_FLU_BIG_ENT_PHASE1_D1_F5_SRC_IDX_MASK I40E_MASK(0x3F, I40E_GL_SWT_FLU_BIG_ENT_PHASE1_D1_F5_SRC_IDX_SHIFT)
+#define I40E_GL_SWT_FLU_BIG_ENT_PHASE1_D1_F5_SRC_SEL_SHIFT 14
+#define I40E_GL_SWT_FLU_BIG_ENT_PHASE1_D1_F5_SRC_SEL_MASK I40E_MASK(0x1, I40E_GL_SWT_FLU_BIG_ENT_PHASE1_D1_F5_SRC_SEL_SHIFT)
+#define I40E_GL_SWT_FLU_BIG_ENT_PHASE1_D1_F5_SRC_VLD_SHIFT 15
+#define I40E_GL_SWT_FLU_BIG_ENT_PHASE1_D1_F5_SRC_VLD_MASK I40E_MASK(0x1, I40E_GL_SWT_FLU_BIG_ENT_PHASE1_D1_F5_SRC_VLD_SHIFT)
+#define I40E_GL_SWT_FLU_BIG_ENT_PHASE1_D1_F6_SRC_VLD_SHIFT 16
+#define I40E_GL_SWT_FLU_BIG_ENT_PHASE1_D1_F6_SRC_VLD_MASK I40E_MASK(0x3F, I40E_GL_SWT_FLU_BIG_ENT_PHASE1_D1_F6_SRC_VLD_SHIFT)
+#define I40E_GL_SWT_FLU_BIG_ENT_PHASE1_D1_F6_SRC_SEL_SHIFT 22
+#define I40E_GL_SWT_FLU_BIG_ENT_PHASE1_D1_F6_SRC_SEL_MASK I40E_MASK(0x1, I40E_GL_SWT_FLU_BIG_ENT_PHASE1_D1_F6_SRC_SEL_SHIFT)
+#define I40E_GL_SWT_FLU_BIG_ENT_PHASE1_D1_F6_SRC_IDX_SHIFT 23
+#define I40E_GL_SWT_FLU_BIG_ENT_PHASE1_D1_F6_SRC_IDX_MASK I40E_MASK(0x1, I40E_GL_SWT_FLU_BIG_ENT_PHASE1_D1_F6_SRC_IDX_SHIFT)
+#define I40E_GL_SWT_FLU_BIG_ENT_PHASE1_D1_F7_SRC_VLD_SHIFT 24
+#define I40E_GL_SWT_FLU_BIG_ENT_PHASE1_D1_F7_SRC_VLD_MASK I40E_MASK(0x3F, I40E_GL_SWT_FLU_BIG_ENT_PHASE1_D1_F7_SRC_VLD_SHIFT)
+#define I40E_GL_SWT_FLU_BIG_ENT_PHASE1_D1_F7_SRC_IDX_SHIFT 31
+#define I40E_GL_SWT_FLU_BIG_ENT_PHASE1_D1_F7_SRC_IDX_MASK I40E_MASK(0x1, I40E_GL_SWT_FLU_BIG_ENT_PHASE1_D1_F7_SRC_IDX_SHIFT)
+
+#define I40E_GL_SWT_FLU_BIG_ENT_PHASE1_D2 0x002699F4 /* Reset: CORER */
+#define I40E_GL_SWT_FLU_BIG_ENT_PHASE1_D2_USE_PHASE_SHIFT 0
+#define I40E_GL_SWT_FLU_BIG_ENT_PHASE1_D2_USE_PHASE_MASK I40E_MASK(0x1, I40E_GL_SWT_FLU_BIG_ENT_PHASE1_D2_USE_PHASE_SHIFT)
+#define I40E_GL_SWT_FLU_BIG_ENT_PHASE1_D2_USE_INGR_SHIFT 1
+#define I40E_GL_SWT_FLU_BIG_ENT_PHASE1_D2_USE_INGR_MASK I40E_MASK(0x1, I40E_GL_SWT_FLU_BIG_ENT_PHASE1_D2_USE_INGR_SHIFT)
+#define I40E_GL_SWT_FLU_BIG_ENT_PHASE1_D2_USE_PORT_SHIFT 7
+#define I40E_GL_SWT_FLU_BIG_ENT_PHASE1_D2_USE_PORT_MASK I40E_MASK(0x1, I40E_GL_SWT_FLU_BIG_ENT_PHASE1_D2_USE_PORT_SHIFT)
+#define I40E_GL_SWT_FLU_BIG_ENT_PHASE1_D2_TR_INDEX_SHIFT 8
+#define I40E_GL_SWT_FLU_BIG_ENT_PHASE1_D2_TR_INDEX_MASK I40E_MASK(0x3F, I40E_GL_SWT_FLU_BIG_ENT_PHASE1_D2_TR_INDEX_SHIFT)
+#define I40E_GL_SWT_FLU_BIG_ENT_PHASE1_D2_USE_MAN_SHIFT 14
+#define I40E_GL_SWT_FLU_BIG_ENT_PHASE1_D2_USE_MAN_MASK I40E_MASK(0x1, I40E_GL_SWT_FLU_BIG_ENT_PHASE1_D2_USE_MAN_SHIFT)
+#define I40E_GL_SWT_FLU_BIG_ENT_PHASE1_D2_USE_TR_SHIFT 15
+#define I40E_GL_SWT_FLU_BIG_ENT_PHASE1_D2_USE_TR_MASK I40E_MASK(0x1, I40E_GL_SWT_FLU_BIG_ENT_PHASE1_D2_USE_TR_SHIFT)
+#define I40E_GL_SWT_FLU_BIG_ENT_PHASE1_D2_BYTE_MSK0_SHIFT 16
+#define I40E_GL_SWT_FLU_BIG_ENT_PHASE1_D2_BYTE_MSK0_MASK I40E_MASK(0xF, I40E_GL_SWT_FLU_BIG_ENT_PHASE1_D2_BYTE_MSK0_SHIFT)
+#define I40E_GL_SWT_FLU_BIG_ENT_PHASE1_D2_BYTE_MSK1_SHIFT 20
+#define I40E_GL_SWT_FLU_BIG_ENT_PHASE1_D2_BYTE_MSK1_MASK I40E_MASK(0xF, I40E_GL_SWT_FLU_BIG_ENT_PHASE1_D2_BYTE_MSK1_SHIFT)
+#define I40E_GL_SWT_FLU_BIG_ENT_PHASE1_D2_BIT_MSK0_SHIFT 24
+#define I40E_GL_SWT_FLU_BIG_ENT_PHASE1_D2_BIT_MSK0_MASK I40E_MASK(0xFF, I40E_GL_SWT_FLU_BIG_ENT_PHASE1_D2_BIT_MSK0_SHIFT)
+
+#define I40E_GL_SWT_FLU_BIG_ENT_PHASE1_D3 0x00269A04 /* Reset: CORER */
+#define I40E_GL_SWT_FLU_BIG_ENT_PHASE1_D3_BIT_MSK0_SHIFT 0
+#define I40E_GL_SWT_FLU_BIG_ENT_PHASE1_D3_BIT_MSK0_MASK I40E_MASK(0xFF, I40E_GL_SWT_FLU_BIG_ENT_PHASE1_D3_BIT_MSK0_SHIFT)
+
+#define I40E_GL_SWT_LOCMD_PE(_i) (0x002694A0 + ((_i) * 4)) /* _i=0...7 */ /* Reset: CORER */
+#define I40E_GL_SWT_LOCMD_PE_MAX_INDEX 7
+#define I40E_GL_SWT_LOCMD_PE_COMMAND_SHIFT 0
+#define I40E_GL_SWT_LOCMD_PE_COMMAND_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_SWT_LOCMD_PE_COMMAND_SHIFT)
+
+#define I40E_GL_SWT_LOCMD_SW(_i) (0x002694E0 + ((_i) * 4)) /* _i=0...7 */ /* Reset: CORER */
+#define I40E_GL_SWT_LOCMD_SW_MAX_INDEX 7
+#define I40E_GL_SWT_LOCMD_SW_COMMAND_SHIFT 0
+#define I40E_GL_SWT_LOCMD_SW_COMMAND_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_SWT_LOCMD_SW_COMMAND_SHIFT)
+
+#define I40E_GL_SWT_LOFV_PE(_i) (0x00268E80 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GL_SWT_LOFV_PE_MAX_INDEX 31
+#define I40E_GL_SWT_LOFV_PE_FIELDVECTOR_SHIFT 0
+#define I40E_GL_SWT_LOFV_PE_FIELDVECTOR_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_SWT_LOFV_PE_FIELDVECTOR_SHIFT)
+
+#define I40E_GL_SWT_LOFV_SW(_i) (0x00268F80 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GL_SWT_LOFV_SW_MAX_INDEX 31
+#define I40E_GL_SWT_LOFV_SW_FIELDVECTOR_SHIFT 0
+#define I40E_GL_SWT_LOFV_SW_FIELDVECTOR_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_SWT_LOFV_SW_FIELDVECTOR_SHIFT)
+
+#define I40E_PRT_MSCCNT 0x00256BA0 /* Reset: CORER */
+#define I40E_PRT_MSCCNT_CCOUNT_SHIFT 0
+#define I40E_PRT_MSCCNT_CCOUNT_MASK I40E_MASK(0x1FFFFFF, I40E_PRT_MSCCNT_CCOUNT_SHIFT)
+
+#define I40E_PRT_SBPVSI 0x00256BE0 /* Reset: CORER */
+#define I40E_PRT_SBPVSI_BAD_FRAMES_VSI_SHIFT 0
+#define I40E_PRT_SBPVSI_BAD_FRAMES_VSI_MASK I40E_MASK(0x1FF, I40E_PRT_SBPVSI_BAD_FRAMES_VSI_SHIFT)
+#define I40E_PRT_SBPVSI_SBP_SHIFT 31
+#define I40E_PRT_SBPVSI_SBP_MASK I40E_MASK(0x1, I40E_PRT_SBPVSI_SBP_SHIFT)
+
+#define I40E_PRT_SCSTS 0x00256C20 /* Reset: CORER */
+#define I40E_PRT_SCSTS_BSCA_SHIFT 0
+#define I40E_PRT_SCSTS_BSCA_MASK I40E_MASK(0x1, I40E_PRT_SCSTS_BSCA_SHIFT)
+#define I40E_PRT_SCSTS_BSCAP_SHIFT 1
+#define I40E_PRT_SCSTS_BSCAP_MASK I40E_MASK(0x1, I40E_PRT_SCSTS_BSCAP_SHIFT)
+#define I40E_PRT_SCSTS_MSCA_SHIFT 2
+#define I40E_PRT_SCSTS_MSCA_MASK I40E_MASK(0x1, I40E_PRT_SCSTS_MSCA_SHIFT)
+#define I40E_PRT_SCSTS_MSCAP_SHIFT 3
+#define I40E_PRT_SCSTS_MSCAP_MASK I40E_MASK(0x1, I40E_PRT_SCSTS_MSCAP_SHIFT)
+
+#define I40E_PRT_SWT_BSCCNT 0x00256C60 /* Reset: CORER */
+#define I40E_PRT_SWT_BSCCNT_CCOUNT_SHIFT 0
+#define I40E_PRT_SWT_BSCCNT_CCOUNT_MASK I40E_MASK(0x1FFFFFF, I40E_PRT_SWT_BSCCNT_CCOUNT_SHIFT)
+
+#define I40E_PRT_SWT_BSCTRH 0x00256CA0 /* Reset: CORER */
+#define I40E_PRT_SWT_BSCTRH_UTRESH_SHIFT 0
+#define I40E_PRT_SWT_BSCTRH_UTRESH_MASK I40E_MASK(0x7FFFF, I40E_PRT_SWT_BSCTRH_UTRESH_SHIFT)
+
+#define I40E_PRT_SWT_DEFPORTS 0x00256CE0 /* Reset: CORER */
+#define I40E_PRT_SWT_DEFPORTS_DEFAULT_VSI_SHIFT 0
+#define I40E_PRT_SWT_DEFPORTS_DEFAULT_VSI_MASK I40E_MASK(0x1FF, I40E_PRT_SWT_DEFPORTS_DEFAULT_VSI_SHIFT)
+#define I40E_PRT_SWT_DEFPORTS_DEFAULT_VSI_VALID_SHIFT 31
+#define I40E_PRT_SWT_DEFPORTS_DEFAULT_VSI_VALID_MASK I40E_MASK(0x1, I40E_PRT_SWT_DEFPORTS_DEFAULT_VSI_VALID_SHIFT)
+
+#define I40E_PRT_SWT_MSCTRH 0x00256D20 /* Reset: CORER */
+#define I40E_PRT_SWT_MSCTRH_UTRESH_SHIFT 0
+#define I40E_PRT_SWT_MSCTRH_UTRESH_MASK I40E_MASK(0x7FFFF, I40E_PRT_SWT_MSCTRH_UTRESH_SHIFT)
+
+#define I40E_PRT_SWT_SCBI 0x00256D60 /* Reset: CORER */
+#define I40E_PRT_SWT_SCBI_BI_SHIFT 0
+#define I40E_PRT_SWT_SCBI_BI_MASK I40E_MASK(0x1FFFFFF, I40E_PRT_SWT_SCBI_BI_SHIFT)
+
+#define I40E_PRT_SWT_SCCRL 0x00256DA0 /* Reset: CORER */
+#define I40E_PRT_SWT_SCCRL_MDIPW_SHIFT 0
+#define I40E_PRT_SWT_SCCRL_MDIPW_MASK I40E_MASK(0x1, I40E_PRT_SWT_SCCRL_MDIPW_SHIFT)
+#define I40E_PRT_SWT_SCCRL_MDICW_SHIFT 1
+#define I40E_PRT_SWT_SCCRL_MDICW_MASK I40E_MASK(0x1, I40E_PRT_SWT_SCCRL_MDICW_SHIFT)
+#define I40E_PRT_SWT_SCCRL_BDIPW_SHIFT 2
+#define I40E_PRT_SWT_SCCRL_BDIPW_MASK I40E_MASK(0x1, I40E_PRT_SWT_SCCRL_BDIPW_SHIFT)
+#define I40E_PRT_SWT_SCCRL_BDICW_SHIFT 3
+#define I40E_PRT_SWT_SCCRL_BDICW_MASK I40E_MASK(0x1, I40E_PRT_SWT_SCCRL_BDICW_SHIFT)
+#define I40E_PRT_SWT_SCCRL_BIDU_SHIFT 4
+#define I40E_PRT_SWT_SCCRL_BIDU_MASK I40E_MASK(0x1, I40E_PRT_SWT_SCCRL_BIDU_SHIFT)
+#define I40E_PRT_SWT_SCCRL_INTERVAL_SHIFT 8
+#define I40E_PRT_SWT_SCCRL_INTERVAL_MASK I40E_MASK(0x3FF, I40E_PRT_SWT_SCCRL_INTERVAL_SHIFT)
+
+#define I40E_PRT_SWT_SCTC 0x00256DE0 /* Reset: CORER */
+#define I40E_PRT_SWT_SCTC_COUNT_SHIFT 0
+#define I40E_PRT_SWT_SCTC_COUNT_MASK I40E_MASK(0x3FF, I40E_PRT_SWT_SCTC_COUNT_SHIFT)
+
+#define I40E_PRT_SWT_SWITCHID 0x00256E20 /* Reset: CORER */
+#define I40E_PRT_SWT_SWITCHID_SWID_SHIFT 0
+#define I40E_PRT_SWT_SWITCHID_SWID_MASK I40E_MASK(0xFFF, I40E_PRT_SWT_SWITCHID_SWID_SHIFT)
+#define I40E_PRT_SWT_SWITCHID_ISNSTAG_SHIFT 12
+#define I40E_PRT_SWT_SWITCHID_ISNSTAG_MASK I40E_MASK(0x1, I40E_PRT_SWT_SWITCHID_ISNSTAG_SHIFT)
+#define I40E_PRT_SWT_SWITCHID_SWIDVALID_SHIFT 13
+#define I40E_PRT_SWT_SWITCHID_SWIDVALID_MASK I40E_MASK(0x1, I40E_PRT_SWT_SWITCHID_SWIDVALID_SHIFT)
+#define I40E_PRT_SWT_SWITCHID_FORWARD_MUTICAST_ETAG_SHIFT 31
+#define I40E_PRT_SWT_SWITCHID_FORWARD_MUTICAST_ETAG_MASK I40E_MASK(0x1, I40E_PRT_SWT_SWITCHID_FORWARD_MUTICAST_ETAG_SHIFT)
+
+#define I40E_PRT_TCTUPR(_i) (0x00044000 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */
+#define I40E_PRT_TCTUPR_MAX_INDEX 7
+#define I40E_PRT_TCTUPR_UP0_SHIFT 0
+#define I40E_PRT_TCTUPR_UP0_MASK I40E_MASK(0x7, I40E_PRT_TCTUPR_UP0_SHIFT)
+#define I40E_PRT_TCTUPR_UP1_SHIFT 3
+#define I40E_PRT_TCTUPR_UP1_MASK I40E_MASK(0x7, I40E_PRT_TCTUPR_UP1_SHIFT)
+#define I40E_PRT_TCTUPR_UP2_SHIFT 6
+#define I40E_PRT_TCTUPR_UP2_MASK I40E_MASK(0x7, I40E_PRT_TCTUPR_UP2_SHIFT)
+#define I40E_PRT_TCTUPR_UP3_SHIFT 9
+#define I40E_PRT_TCTUPR_UP3_MASK I40E_MASK(0x7, I40E_PRT_TCTUPR_UP3_SHIFT)
+#define I40E_PRT_TCTUPR_UP4_SHIFT 12
+#define I40E_PRT_TCTUPR_UP4_MASK I40E_MASK(0x7, I40E_PRT_TCTUPR_UP4_SHIFT)
+#define I40E_PRT_TCTUPR_UP5_SHIFT 15
+#define I40E_PRT_TCTUPR_UP5_MASK I40E_MASK(0x7, I40E_PRT_TCTUPR_UP5_SHIFT)
+#define I40E_PRT_TCTUPR_UP6_SHIFT 18
+#define I40E_PRT_TCTUPR_UP6_MASK I40E_MASK(0x7, I40E_PRT_TCTUPR_UP6_SHIFT)
+#define I40E_PRT_TCTUPR_UP7_SHIFT 21
+#define I40E_PRT_TCTUPR_UP7_MASK I40E_MASK(0x7, I40E_PRT_TCTUPR_UP7_SHIFT)
+
+/* PF - TimeSync (IEEE 1588) Registers */
+
+#define I40E_PRTTSYN_VFTIME_H 0x001E4020 /* Reset: GLOBR */
+#define I40E_PRTTSYN_VFTIME_H_TSYNTIME_H_SHIFT 0
+#define I40E_PRTTSYN_VFTIME_H_TSYNTIME_H_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_VFTIME_H_TSYNTIME_H_SHIFT)
+
+#define I40E_PRTTSYN_VFTIME_L 0x001E4000 /* Reset: GLOBR */
+#define I40E_PRTTSYN_VFTIME_L_TSYNTIME_L_SHIFT 0
+#define I40E_PRTTSYN_VFTIME_L_TSYNTIME_L_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_VFTIME_L_TSYNTIME_L_SHIFT)
+
+/* PF - Transmit Scheduler Registers */
+
+#define I40E_GLSCD_BWLCREDUPDATE 0x000B2148 /* Reset: CORER */
+#define I40E_GLSCD_BWLCREDUPDATE_BWLCREDUPDATE_SHIFT 0
+#define I40E_GLSCD_BWLCREDUPDATE_BWLCREDUPDATE_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSCD_BWLCREDUPDATE_BWLCREDUPDATE_SHIFT)
+
+#define I40E_GLSCD_BWLLINESPERARB 0x000B214C /* Reset: CORER */
+#define I40E_GLSCD_BWLLINESPERARB_BWLLINESPERARB_SHIFT 0
+#define I40E_GLSCD_BWLLINESPERARB_BWLLINESPERARB_MASK I40E_MASK(0x7FF, I40E_GLSCD_BWLLINESPERARB_BWLLINESPERARB_SHIFT)
+
+#define I40E_GLSCD_CREDITSPERQUANTA 0x000B2144 /* Reset: CORER */
+#define I40E_GLSCD_CREDITSPERQUANTA_TSCDCREDITSPERQUANTA_SHIFT 0
+#define I40E_GLSCD_CREDITSPERQUANTA_TSCDCREDITSPERQUANTA_MASK I40E_MASK(0xFFFF, I40E_GLSCD_CREDITSPERQUANTA_TSCDCREDITSPERQUANTA_SHIFT)
+
+#define I40E_GLSCD_ERRSTATREG 0x000B2150 /* Reset: CORER */
+#define I40E_GLSCD_ERRSTATREG_LOOP_DETECTED_SHIFT 0
+#define I40E_GLSCD_ERRSTATREG_LOOP_DETECTED_MASK I40E_MASK(0x1, I40E_GLSCD_ERRSTATREG_LOOP_DETECTED_SHIFT)
+#define I40E_GLSCD_ERRSTATREG_SHRTBWLIMUPDATEPER_SHIFT 1
+#define I40E_GLSCD_ERRSTATREG_SHRTBWLIMUPDATEPER_MASK I40E_MASK(0x1, I40E_GLSCD_ERRSTATREG_SHRTBWLIMUPDATEPER_SHIFT)
+
+#define I40E_GLSCD_IFBCMDH 0x000B20A0 /* Reset: CORER */
+#define I40E_GLSCD_IFBCMDH_FLDOFFS_NUMENTS_SHIFT 0
+#define I40E_GLSCD_IFBCMDH_FLDOFFS_NUMENTS_MASK I40E_MASK(0x7F, I40E_GLSCD_IFBCMDH_FLDOFFS_NUMENTS_SHIFT)
+#define I40E_GLSCD_IFBCMDH_FLDSZ_SHIFT 7
+#define I40E_GLSCD_IFBCMDH_FLDSZ_MASK I40E_MASK(0x1F, I40E_GLSCD_IFBCMDH_FLDSZ_SHIFT)
+#define I40E_GLSCD_IFBCMDH_VALUE_ENTRYIDX_SHIFT 12
+#define I40E_GLSCD_IFBCMDH_VALUE_ENTRYIDX_MASK I40E_MASK(0x7FFFF, I40E_GLSCD_IFBCMDH_VALUE_ENTRYIDX_SHIFT)
+#define I40E_GLSCD_IFBCMDH_RSVD_SHIFT 31
+#define I40E_GLSCD_IFBCMDH_RSVD_MASK I40E_MASK(0x1, I40E_GLSCD_IFBCMDH_RSVD_SHIFT)
+
+#define I40E_GLSCD_IFBCMDL 0x000B209c /* Reset: CORER */
+#define I40E_GLSCD_IFBCMDL_OPCODE_SHIFT 0
+#define I40E_GLSCD_IFBCMDL_OPCODE_MASK I40E_MASK(0xF, I40E_GLSCD_IFBCMDL_OPCODE_SHIFT)
+#define I40E_GLSCD_IFBCMDL_TBLTYPE_SHIFT 4
+#define I40E_GLSCD_IFBCMDL_TBLTYPE_MASK I40E_MASK(0xF, I40E_GLSCD_IFBCMDL_TBLTYPE_SHIFT)
+#define I40E_GLSCD_IFBCMDL_TBLENTRYIDX_SHIFT 8
+#define I40E_GLSCD_IFBCMDL_TBLENTRYIDX_MASK I40E_MASK(0x7FF, I40E_GLSCD_IFBCMDL_TBLENTRYIDX_SHIFT)
+#define I40E_GLSCD_IFBCMDL_CTRLTYPE_SHIFT 19
+#define I40E_GLSCD_IFBCMDL_CTRLTYPE_MASK I40E_MASK(0x7, I40E_GLSCD_IFBCMDL_CTRLTYPE_SHIFT)
+#define I40E_GLSCD_IFBCMDL_RSVD_SHIFT 22
+#define I40E_GLSCD_IFBCMDL_RSVD_MASK I40E_MASK(0x3FF, I40E_GLSCD_IFBCMDL_RSVD_SHIFT)
+
+#define I40E_GLSCD_IFCTRL 0x000B20A8 /* Reset: CORER */
+#define I40E_GLSCD_IFCTRL_BCMDDB_SHIFT 0
+#define I40E_GLSCD_IFCTRL_BCMDDB_MASK I40E_MASK(0x1, I40E_GLSCD_IFCTRL_BCMDDB_SHIFT)
+#define I40E_GLSCD_IFCTRL_ICMDCLRERR_SHIFT 1
+#define I40E_GLSCD_IFCTRL_ICMDCLRERR_MASK I40E_MASK(0x1, I40E_GLSCD_IFCTRL_ICMDCLRERR_SHIFT)
+#define I40E_GLSCD_IFCTRL_BCMDCLRERR_SHIFT 2
+#define I40E_GLSCD_IFCTRL_BCMDCLRERR_MASK I40E_MASK(0x1, I40E_GLSCD_IFCTRL_BCMDCLRERR_SHIFT)
+#define I40E_GLSCD_IFCTRL_SCH_ENA_SHIFT 3
+#define I40E_GLSCD_IFCTRL_SCH_ENA_MASK I40E_MASK(0x1, I40E_GLSCD_IFCTRL_SCH_ENA_SHIFT)
+#define I40E_GLSCD_IFCTRL_SMALL_CRED_DISABLE_SHIFT 4
+#define I40E_GLSCD_IFCTRL_SMALL_CRED_DISABLE_MASK I40E_MASK(0x1, I40E_GLSCD_IFCTRL_SMALL_CRED_DISABLE_SHIFT)
+
+#define I40E_GLSCD_IFDATA(_i) (0x000B2084 + ((_i) * 4)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLSCD_IFDATA_MAX_INDEX 3
+#define I40E_GLSCD_IFDATA_TSCDIFDATA_SHIFT 0
+#define I40E_GLSCD_IFDATA_TSCDIFDATA_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSCD_IFDATA_TSCDIFDATA_SHIFT)
+
+#define I40E_GLSCD_IFICMDH 0x000B2098 /* Reset: CORER */
+#define I40E_GLSCD_IFICMDH_FLDOFFS_NUMENTS_SHIFT 0
+#define I40E_GLSCD_IFICMDH_FLDOFFS_NUMENTS_MASK I40E_MASK(0x7F, I40E_GLSCD_IFICMDH_FLDOFFS_NUMENTS_SHIFT)
+#define I40E_GLSCD_IFICMDH_FLDSZ_SHIFT 7
+#define I40E_GLSCD_IFICMDH_FLDSZ_MASK I40E_MASK(0x1F, I40E_GLSCD_IFICMDH_FLDSZ_SHIFT)
+#define I40E_GLSCD_IFICMDH_VALUE_ENTRYIDX_SHIFT 12
+#define I40E_GLSCD_IFICMDH_VALUE_ENTRYIDX_MASK I40E_MASK(0x7FFFF, I40E_GLSCD_IFICMDH_VALUE_ENTRYIDX_SHIFT)
+#define I40E_GLSCD_IFICMDH_RSVD_SHIFT 31
+#define I40E_GLSCD_IFICMDH_RSVD_MASK I40E_MASK(0x1, I40E_GLSCD_IFICMDH_RSVD_SHIFT)
+
+#define I40E_GLSCD_IFICMDL 0x000B2094 /* Reset: CORER */
+#define I40E_GLSCD_IFICMDL_OPCODE_SHIFT 0
+#define I40E_GLSCD_IFICMDL_OPCODE_MASK I40E_MASK(0xF, I40E_GLSCD_IFICMDL_OPCODE_SHIFT)
+#define I40E_GLSCD_IFICMDL_TBLTYPE_SHIFT 4
+#define I40E_GLSCD_IFICMDL_TBLTYPE_MASK I40E_MASK(0xF, I40E_GLSCD_IFICMDL_TBLTYPE_SHIFT)
+#define I40E_GLSCD_IFICMDL_TBLENTRYIDX_SHIFT 8
+#define I40E_GLSCD_IFICMDL_TBLENTRYIDX_MASK I40E_MASK(0x7FF, I40E_GLSCD_IFICMDL_TBLENTRYIDX_SHIFT)
+#define I40E_GLSCD_IFICMDL_CTRLTYPE_SHIFT 19
+#define I40E_GLSCD_IFICMDL_CTRLTYPE_MASK I40E_MASK(0x7, I40E_GLSCD_IFICMDL_CTRLTYPE_SHIFT)
+#define I40E_GLSCD_IFICMDL_RSVD_SHIFT 22
+#define I40E_GLSCD_IFICMDL_RSVD_MASK I40E_MASK(0x3FF, I40E_GLSCD_IFICMDL_RSVD_SHIFT)
+
+#define I40E_GLSCD_IFSTATUS 0x000B20A4 /* Reset: CORER */
+#define I40E_GLSCD_IFSTATUS_ENTRAVAIL_SHIFT 0
+#define I40E_GLSCD_IFSTATUS_ENTRAVAIL_MASK I40E_MASK(0x3F, I40E_GLSCD_IFSTATUS_ENTRAVAIL_SHIFT)
+#define I40E_GLSCD_IFSTATUS_ICMDBZ_SHIFT 6
+#define I40E_GLSCD_IFSTATUS_ICMDBZ_MASK I40E_MASK(0x1, I40E_GLSCD_IFSTATUS_ICMDBZ_SHIFT)
+#define I40E_GLSCD_IFSTATUS_ICMDERR_SHIFT 7
+#define I40E_GLSCD_IFSTATUS_ICMDERR_MASK I40E_MASK(0x1, I40E_GLSCD_IFSTATUS_ICMDERR_SHIFT)
+#define I40E_GLSCD_IFSTATUS_BCMDERR_SHIFT 8
+#define I40E_GLSCD_IFSTATUS_BCMDERR_MASK I40E_MASK(0x1, I40E_GLSCD_IFSTATUS_BCMDERR_SHIFT)
+#define I40E_GLSCD_IFSTATUS_SCH_ENA_SHIFT 9
+#define I40E_GLSCD_IFSTATUS_SCH_ENA_MASK I40E_MASK(0x1, I40E_GLSCD_IFSTATUS_SCH_ENA_SHIFT)
+#define I40E_GLSCD_IFSTATUS_RSVD_SHIFT 10
+#define I40E_GLSCD_IFSTATUS_RSVD_MASK I40E_MASK(0x3FFFFF, I40E_GLSCD_IFSTATUS_RSVD_SHIFT)
+
+#define I40E_GLSCD_INCSCHEDCFGCOUNT 0x000B2140 /* Reset: CORER */
+#define I40E_GLSCD_INCSCHEDCFGCOUNT_INCSCHEDCFGCOUNT_SHIFT 0
+#define I40E_GLSCD_INCSCHEDCFGCOUNT_INCSCHEDCFGCOUNT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSCD_INCSCHEDCFGCOUNT_INCSCHEDCFGCOUNT_SHIFT)
+
+#define I40E_GLSCD_LANTCBCMDS 0x000B2154 /* Reset: CORER */
+#define I40E_GLSCD_LANTCBCMDS_NUMLANTCBCMDS_SHIFT 0
+#define I40E_GLSCD_LANTCBCMDS_NUMLANTCBCMDS_MASK I40E_MASK(0x7F, I40E_GLSCD_LANTCBCMDS_NUMLANTCBCMDS_SHIFT)
+
+#define I40E_GLSCD_LLPREALTHRESH 0x000B213C /* Reset: CORER */
+#define I40E_GLSCD_LLPREALTHRESH_LLPREALTHRESH_SHIFT 0
+#define I40E_GLSCD_LLPREALTHRESH_LLPREALTHRESH_MASK I40E_MASK(0xF, I40E_GLSCD_LLPREALTHRESH_LLPREALTHRESH_SHIFT)
+
+#define I40E_GLSCD_PRGPERFCONTROL(_i) (0x000B20FC + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLSCD_PRGPERFCONTROL_MAX_INDEX 15
+#define I40E_GLSCD_PRGPERFCONTROL_COUNTERTYPE_SHIFT 0
+#define I40E_GLSCD_PRGPERFCONTROL_COUNTERTYPE_MASK I40E_MASK(0x7, I40E_GLSCD_PRGPERFCONTROL_COUNTERTYPE_SHIFT)
+#define I40E_GLSCD_PRGPERFCONTROL_RESOURCESELECT_SHIFT 4
+#define I40E_GLSCD_PRGPERFCONTROL_RESOURCESELECT_MASK I40E_MASK(0x3, I40E_GLSCD_PRGPERFCONTROL_RESOURCESELECT_SHIFT)
+#define I40E_GLSCD_PRGPERFCONTROL_PORTINDEX_SHIFT 6
+#define I40E_GLSCD_PRGPERFCONTROL_PORTINDEX_MASK I40E_MASK(0x3, I40E_GLSCD_PRGPERFCONTROL_PORTINDEX_SHIFT)
+#define I40E_GLSCD_PRGPERFCONTROL_TCINDEX_SHIFT 8
+#define I40E_GLSCD_PRGPERFCONTROL_TCINDEX_MASK I40E_MASK(0x7, I40E_GLSCD_PRGPERFCONTROL_TCINDEX_SHIFT)
+#define I40E_GLSCD_PRGPERFCONTROL_QSINDEX_SHIFT 16
+#define I40E_GLSCD_PRGPERFCONTROL_QSINDEX_MASK I40E_MASK(0x3FF, I40E_GLSCD_PRGPERFCONTROL_QSINDEX_SHIFT)
+
+#define I40E_GLSCD_PRGPERFCOUNT(_i) (0x000B20BC + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLSCD_PRGPERFCOUNT_MAX_INDEX 15
+#define I40E_GLSCD_PRGPERFCOUNT_PRGPERFCOUNT_SHIFT 0
+#define I40E_GLSCD_PRGPERFCOUNT_PRGPERFCOUNT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSCD_PRGPERFCOUNT_PRGPERFCOUNT_SHIFT)
+
+#define I40E_GLSCD_RAM_DBG_CTL(_i) (0x000B28c0 + ((_i) * 4)) /* _i=0...9 */ /* Reset: POR */
+#define I40E_GLSCD_RAM_DBG_CTL_MAX_INDEX 9
+#define I40E_GLSCD_RAM_DBG_CTL_ADR_SHIFT 0
+#define I40E_GLSCD_RAM_DBG_CTL_ADR_MASK I40E_MASK(0x3FFFF, I40E_GLSCD_RAM_DBG_CTL_ADR_SHIFT)
+#define I40E_GLSCD_RAM_DBG_CTL_DW_SEL_SHIFT 18
+#define I40E_GLSCD_RAM_DBG_CTL_DW_SEL_MASK I40E_MASK(0xFF, I40E_GLSCD_RAM_DBG_CTL_DW_SEL_SHIFT)
+#define I40E_GLSCD_RAM_DBG_CTL_RD_EN_SHIFT 30
+#define I40E_GLSCD_RAM_DBG_CTL_RD_EN_MASK I40E_MASK(0x1, I40E_GLSCD_RAM_DBG_CTL_RD_EN_SHIFT)
+#define I40E_GLSCD_RAM_DBG_CTL_DONE_SHIFT 31
+#define I40E_GLSCD_RAM_DBG_CTL_DONE_MASK I40E_MASK(0x1, I40E_GLSCD_RAM_DBG_CTL_DONE_SHIFT)
+
+#define I40E_GLSCD_RAM_DBG_DATA(_i) (0x000b28e8 + ((_i) * 4)) /* _i=0...9 */ /* Reset: POR */
+#define I40E_GLSCD_RAM_DBG_DATA_MAX_INDEX 9
+#define I40E_GLSCD_RAM_DBG_DATA_GLSCD_RAM_DBG_DATA_SHIFT 0
+#define I40E_GLSCD_RAM_DBG_DATA_GLSCD_RAM_DBG_DATA_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSCD_RAM_DBG_DATA_GLSCD_RAM_DBG_DATA_SHIFT)
+
+#define I40E_GLSCD_RLMTBLRD2CMD 0x000B2158 /* Reset: CORER */
+#define I40E_GLSCD_RLMTBLRD2CMD_RLMTBLIDX_SHIFT 0
+#define I40E_GLSCD_RLMTBLRD2CMD_RLMTBLIDX_MASK I40E_MASK(0x3FF, I40E_GLSCD_RLMTBLRD2CMD_RLMTBLIDX_SHIFT)
+
+#define I40E_GLSCD_RLMTBLRD2DATAHI 0x000B2164 /* Reset: CORER */
+#define I40E_GLSCD_RLMTBLRD2DATAHI_DATA_SHIFT 0
+#define I40E_GLSCD_RLMTBLRD2DATAHI_DATA_MASK I40E_MASK(0x7FFFFFF, I40E_GLSCD_RLMTBLRD2DATAHI_DATA_SHIFT)
+
+#define I40E_GLSCD_RLMTBLRD2DATALO 0x000B2160 /* Reset: CORER */
+#define I40E_GLSCD_RLMTBLRD2DATALO_DATA_SHIFT 0
+#define I40E_GLSCD_RLMTBLRD2DATALO_DATA_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSCD_RLMTBLRD2DATALO_DATA_SHIFT)
+
+#define I40E_GLSCD_RLMTBLRD2STATUS 0x000B215C /* Reset: CORER */
+#define I40E_GLSCD_RLMTBLRD2STATUS_VALID_SHIFT 0
+#define I40E_GLSCD_RLMTBLRD2STATUS_VALID_MASK I40E_MASK(0x1, I40E_GLSCD_RLMTBLRD2STATUS_VALID_SHIFT)
+#define I40E_GLSCD_RLMTBLRD2STATUS_RSVD_SHIFT 1
+#define I40E_GLSCD_RLMTBLRD2STATUS_RSVD_MASK I40E_MASK(0x7FFFFFFF, I40E_GLSCD_RLMTBLRD2STATUS_RSVD_SHIFT)
+
+#define I40E_GLSCD_RLMTBLRDCMD 0x000B20AC /* Reset: CORER */
+#define I40E_GLSCD_RLMTBLRDCMD_RLMTBLIDX_SHIFT 0
+#define I40E_GLSCD_RLMTBLRDCMD_RLMTBLIDX_MASK I40E_MASK(0x3FF, I40E_GLSCD_RLMTBLRDCMD_RLMTBLIDX_SHIFT)
+
+#define I40E_GLSCD_RLMTBLRDDATAHI 0x000B20B8 /* Reset: CORER */
+#define I40E_GLSCD_RLMTBLRDDATAHI_DATA_SHIFT 0
+#define I40E_GLSCD_RLMTBLRDDATAHI_DATA_MASK I40E_MASK(0x7FFFFFF, I40E_GLSCD_RLMTBLRDDATAHI_DATA_SHIFT)
+
+#define I40E_GLSCD_RLMTBLRDDATALO 0x000B20B4 /* Reset: CORER */
+#define I40E_GLSCD_RLMTBLRDDATALO_DATA_SHIFT 0
+#define I40E_GLSCD_RLMTBLRDDATALO_DATA_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSCD_RLMTBLRDDATALO_DATA_SHIFT)
+
+#define I40E_GLSCD_RLMTBLRDSTATUS 0x000B20B0 /* Reset: CORER */
+#define I40E_GLSCD_RLMTBLRDSTATUS_VALID_SHIFT 0
+#define I40E_GLSCD_RLMTBLRDSTATUS_VALID_MASK I40E_MASK(0x1, I40E_GLSCD_RLMTBLRDSTATUS_VALID_SHIFT)
+#define I40E_GLSCD_RLMTBLRDSTATUS_RSVD_SHIFT 1
+#define I40E_GLSCD_RLMTBLRDSTATUS_RSVD_MASK I40E_MASK(0x7FFFFFFF, I40E_GLSCD_RLMTBLRDSTATUS_RSVD_SHIFT)
+
+#define I40E_PFSCD_DEFQSETHNDL 0x000B2000 /* Reset: PFR */
+#define I40E_PFSCD_DEFQSETHNDL_DEFQSETHNDL_SHIFT 0
+#define I40E_PFSCD_DEFQSETHNDL_DEFQSETHNDL_MASK I40E_MASK(0xFFFF, I40E_PFSCD_DEFQSETHNDL_DEFQSETHNDL_SHIFT)
+
+/* PF - Virtualization PF Registers */
+
+#define I40E_GL_MDCK_RX 0x0012A50C /* Reset: CORER */
+#define I40E_GL_MDCK_RX_DESC_ADDR_SHIFT 0
+#define I40E_GL_MDCK_RX_DESC_ADDR_MASK I40E_MASK(0x1, I40E_GL_MDCK_RX_DESC_ADDR_SHIFT)
+
+#define I40E_GL_MDCK_TCMD 0x000E648C /* Reset: CORER */
+#define I40E_GL_MDCK_TCMD_DESC_ADDR_SHIFT 0
+#define I40E_GL_MDCK_TCMD_DESC_ADDR_MASK I40E_MASK(0x1, I40E_GL_MDCK_TCMD_DESC_ADDR_SHIFT)
+#define I40E_GL_MDCK_TCMD_MAX_BUFF_SHIFT 2
+#define I40E_GL_MDCK_TCMD_MAX_BUFF_MASK I40E_MASK(0x1, I40E_GL_MDCK_TCMD_MAX_BUFF_SHIFT)
+#define I40E_GL_MDCK_TCMD_MAX_HEAD_SHIFT 3
+#define I40E_GL_MDCK_TCMD_MAX_HEAD_MASK I40E_MASK(0x1, I40E_GL_MDCK_TCMD_MAX_HEAD_SHIFT)
+#define I40E_GL_MDCK_TCMD_NO_HEAD_SHIFT 4
+#define I40E_GL_MDCK_TCMD_NO_HEAD_MASK I40E_MASK(0x1, I40E_GL_MDCK_TCMD_NO_HEAD_SHIFT)
+#define I40E_GL_MDCK_TCMD_TOO_LONG_SHIFT 5
+#define I40E_GL_MDCK_TCMD_TOO_LONG_MASK I40E_MASK(0x1, I40E_GL_MDCK_TCMD_TOO_LONG_SHIFT)
+#define I40E_GL_MDCK_TCMD_SINGLE_TX_SIZE_SHIFT 6
+#define I40E_GL_MDCK_TCMD_SINGLE_TX_SIZE_MASK I40E_MASK(0x1, I40E_GL_MDCK_TCMD_SINGLE_TX_SIZE_SHIFT)
+#define I40E_GL_MDCK_TCMD_ENDLESS_TX_SHIFT 7
+#define I40E_GL_MDCK_TCMD_ENDLESS_TX_MASK I40E_MASK(0x1, I40E_GL_MDCK_TCMD_ENDLESS_TX_SHIFT)
+#define I40E_GL_MDCK_TCMD_BAD_LSO_LEN_SHIFT 8
+#define I40E_GL_MDCK_TCMD_BAD_LSO_LEN_MASK I40E_MASK(0x1, I40E_GL_MDCK_TCMD_BAD_LSO_LEN_SHIFT)
+#define I40E_GL_MDCK_TCMD_BAD_LSO_MSS_SHIFT 9
+#define I40E_GL_MDCK_TCMD_BAD_LSO_MSS_MASK I40E_MASK(0x1, I40E_GL_MDCK_TCMD_BAD_LSO_MSS_SHIFT)
+#define I40E_GL_MDCK_TCMD_M_CONTEXTS_SHIFT 12
+#define I40E_GL_MDCK_TCMD_M_CONTEXTS_MASK I40E_MASK(0x1, I40E_GL_MDCK_TCMD_M_CONTEXTS_SHIFT)
+#define I40E_GL_MDCK_TCMD_BAD_DESC_SEQUENCE_SHIFT 13
+#define I40E_GL_MDCK_TCMD_BAD_DESC_SEQUENCE_MASK I40E_MASK(0x1, I40E_GL_MDCK_TCMD_BAD_DESC_SEQUENCE_SHIFT)
+#define I40E_GL_MDCK_TCMD_BAD_FC_FD_DESC_SHIFT 14
+#define I40E_GL_MDCK_TCMD_BAD_FC_FD_DESC_MASK I40E_MASK(0x1, I40E_GL_MDCK_TCMD_BAD_FC_FD_DESC_SHIFT)
+#define I40E_GL_MDCK_TCMD_NO_PACKET_SHIFT 15
+#define I40E_GL_MDCK_TCMD_NO_PACKET_MASK I40E_MASK(0x1, I40E_GL_MDCK_TCMD_NO_PACKET_SHIFT)
+#define I40E_GL_MDCK_TCMD_DIS_DIF_DIX_SHIFT 16
+#define I40E_GL_MDCK_TCMD_DIS_DIF_DIX_MASK I40E_MASK(0x1, I40E_GL_MDCK_TCMD_DIS_DIF_DIX_SHIFT)
+#define I40E_GL_MDCK_TCMD_DIS_FLEX_SHIFT 17
+#define I40E_GL_MDCK_TCMD_DIS_FLEX_MASK I40E_MASK(0x1, I40E_GL_MDCK_TCMD_DIS_FLEX_SHIFT)
+#define I40E_GL_MDCK_TCMD_ZERO_BSIZE_SHIFT 18
+#define I40E_GL_MDCK_TCMD_ZERO_BSIZE_MASK I40E_MASK(0x1, I40E_GL_MDCK_TCMD_ZERO_BSIZE_SHIFT)
+
+#define I40E_GL_MDCK_TDAT 0x000442F4 /* Reset: CORER */
+#define I40E_GL_MDCK_TDAT_BIG_OFFSET_SHIFT 0
+#define I40E_GL_MDCK_TDAT_BIG_OFFSET_MASK I40E_MASK(0x1, I40E_GL_MDCK_TDAT_BIG_OFFSET_SHIFT)
+#define I40E_GL_MDCK_TDAT_BUFF_ADDR_SHIFT 1
+#define I40E_GL_MDCK_TDAT_BUFF_ADDR_MASK I40E_MASK(0x1, I40E_GL_MDCK_TDAT_BUFF_ADDR_SHIFT)
+#define I40E_GL_MDCK_TDAT_MAL_LENGTH_DIS_SHIFT 2
+#define I40E_GL_MDCK_TDAT_MAL_LENGTH_DIS_MASK I40E_MASK(0x1, I40E_GL_MDCK_TDAT_MAL_LENGTH_DIS_SHIFT)
+#define I40E_GL_MDCK_TDAT_MAL_CMD_DIS_SHIFT 3
+#define I40E_GL_MDCK_TDAT_MAL_CMD_DIS_MASK I40E_MASK(0x1, I40E_GL_MDCK_TDAT_MAL_CMD_DIS_SHIFT)
+
+#define I40E_PF_VIRT_VSTATUS 0x0009C400 /* Reset: PFR */
+#define I40E_PF_VIRT_VSTATUS_NUM_VFS_SHIFT 0
+#define I40E_PF_VIRT_VSTATUS_NUM_VFS_MASK I40E_MASK(0xFF, I40E_PF_VIRT_VSTATUS_NUM_VFS_SHIFT)
+#define I40E_PF_VIRT_VSTATUS_TOTAL_VFS_SHIFT 8
+#define I40E_PF_VIRT_VSTATUS_TOTAL_VFS_MASK I40E_MASK(0xFF, I40E_PF_VIRT_VSTATUS_TOTAL_VFS_SHIFT)
+#define I40E_PF_VIRT_VSTATUS_IOV_ACTIVE_SHIFT 16
+#define I40E_PF_VIRT_VSTATUS_IOV_ACTIVE_MASK I40E_MASK(0x1, I40E_PF_VIRT_VSTATUS_IOV_ACTIVE_SHIFT)
+
+#define I40E_PF_VT_PFALLOC_CSR 0x00078D80 /* Reset: CORER */
+#define I40E_PF_VT_PFALLOC_CSR_FIRSTVF_SHIFT 0
+#define I40E_PF_VT_PFALLOC_CSR_FIRSTVF_MASK I40E_MASK(0xFF, I40E_PF_VT_PFALLOC_CSR_FIRSTVF_SHIFT)
+#define I40E_PF_VT_PFALLOC_CSR_LASTVF_SHIFT 8
+#define I40E_PF_VT_PFALLOC_CSR_LASTVF_MASK I40E_MASK(0xFF, I40E_PF_VT_PFALLOC_CSR_LASTVF_SHIFT)
+#define I40E_PF_VT_PFALLOC_CSR_VALID_SHIFT 31
+#define I40E_PF_VT_PFALLOC_CSR_VALID_MASK I40E_MASK(0x1, I40E_PF_VT_PFALLOC_CSR_VALID_SHIFT)
+
+#define I40E_PF_VT_PFALLOC_INT 0x0003F080 /* Reset: CORER */
+#define I40E_PF_VT_PFALLOC_INT_FIRSTVF_SHIFT 0
+#define I40E_PF_VT_PFALLOC_INT_FIRSTVF_MASK I40E_MASK(0xFF, I40E_PF_VT_PFALLOC_INT_FIRSTVF_SHIFT)
+#define I40E_PF_VT_PFALLOC_INT_LASTVF_SHIFT 8
+#define I40E_PF_VT_PFALLOC_INT_LASTVF_MASK I40E_MASK(0xFF, I40E_PF_VT_PFALLOC_INT_LASTVF_SHIFT)
+#define I40E_PF_VT_PFALLOC_INT_VALID_SHIFT 31
+#define I40E_PF_VT_PFALLOC_INT_VALID_MASK I40E_MASK(0x1, I40E_PF_VT_PFALLOC_INT_VALID_SHIFT)
+
+#define I40E_PF_VT_PFALLOC_PMAT 0x000C0680 /* Reset: CORER */
+#define I40E_PF_VT_PFALLOC_PMAT_FIRSTVF_SHIFT 0
+#define I40E_PF_VT_PFALLOC_PMAT_FIRSTVF_MASK I40E_MASK(0xFF, I40E_PF_VT_PFALLOC_PMAT_FIRSTVF_SHIFT)
+#define I40E_PF_VT_PFALLOC_PMAT_LASTVF_SHIFT 8
+#define I40E_PF_VT_PFALLOC_PMAT_LASTVF_MASK I40E_MASK(0xFF, I40E_PF_VT_PFALLOC_PMAT_LASTVF_SHIFT)
+#define I40E_PF_VT_PFALLOC_PMAT_VALID_SHIFT 31
+#define I40E_PF_VT_PFALLOC_PMAT_VALID_MASK I40E_MASK(0x1, I40E_PF_VT_PFALLOC_PMAT_VALID_SHIFT)
+
+#define I40E_PF_VT_PFALLOC_TSCD 0x000B2280 /* Reset: CORER */
+#define I40E_PF_VT_PFALLOC_TSCD_FIRSTVF_SHIFT 0
+#define I40E_PF_VT_PFALLOC_TSCD_FIRSTVF_MASK I40E_MASK(0xFF, I40E_PF_VT_PFALLOC_TSCD_FIRSTVF_SHIFT)
+#define I40E_PF_VT_PFALLOC_TSCD_LASTVF_SHIFT 8
+#define I40E_PF_VT_PFALLOC_TSCD_LASTVF_MASK I40E_MASK(0xFF, I40E_PF_VT_PFALLOC_TSCD_LASTVF_SHIFT)
+#define I40E_PF_VT_PFALLOC_TSCD_VALID_SHIFT 31
+#define I40E_PF_VT_PFALLOC_TSCD_VALID_MASK I40E_MASK(0x1, I40E_PF_VT_PFALLOC_TSCD_VALID_SHIFT)
+
+#define I40E_PF_VT_PFALLOC_VMLR 0x00092580 /* Reset: CORER */
+#define I40E_PF_VT_PFALLOC_VMLR_FIRSTVF_SHIFT 0
+#define I40E_PF_VT_PFALLOC_VMLR_FIRSTVF_MASK I40E_MASK(0xFF, I40E_PF_VT_PFALLOC_VMLR_FIRSTVF_SHIFT)
+#define I40E_PF_VT_PFALLOC_VMLR_LASTVF_SHIFT 8
+#define I40E_PF_VT_PFALLOC_VMLR_LASTVF_MASK I40E_MASK(0xFF, I40E_PF_VT_PFALLOC_VMLR_LASTVF_SHIFT)
+#define I40E_PF_VT_PFALLOC_VMLR_VALID_SHIFT 31
+#define I40E_PF_VT_PFALLOC_VMLR_VALID_MASK I40E_MASK(0x1, I40E_PF_VT_PFALLOC_VMLR_VALID_SHIFT)
+
+/* PF - VSI Context */
+
+#define I40E_VSI_L2TAGSTXVALID(_VSI) (0x00042800 + ((_VSI) * 4)) /* _i=0...383 */ /* Reset: PFR */
+#define I40E_VSI_L2TAGSTXVALID_MAX_INDEX 383
+#define I40E_VSI_L2TAGSTXVALID_L2TAG1INSERTID_SHIFT 0
+#define I40E_VSI_L2TAGSTXVALID_L2TAG1INSERTID_MASK I40E_MASK(0x7, I40E_VSI_L2TAGSTXVALID_L2TAG1INSERTID_SHIFT)
+#define I40E_VSI_L2TAGSTXVALID_L2TAG1INSERTID_VALID_SHIFT 3
+#define I40E_VSI_L2TAGSTXVALID_L2TAG1INSERTID_VALID_MASK I40E_MASK(0x1, I40E_VSI_L2TAGSTXVALID_L2TAG1INSERTID_VALID_SHIFT)
+#define I40E_VSI_L2TAGSTXVALID_L2TAG2INSERTID_SHIFT 4
+#define I40E_VSI_L2TAGSTXVALID_L2TAG2INSERTID_MASK I40E_MASK(0x7, I40E_VSI_L2TAGSTXVALID_L2TAG2INSERTID_SHIFT)
+#define I40E_VSI_L2TAGSTXVALID_L2TAG2INSERTID_VALID_SHIFT 7
+#define I40E_VSI_L2TAGSTXVALID_L2TAG2INSERTID_VALID_MASK I40E_MASK(0x1, I40E_VSI_L2TAGSTXVALID_L2TAG2INSERTID_VALID_SHIFT)
+#define I40E_VSI_L2TAGSTXVALID_TIR0INSERTID_SHIFT 16
+#define I40E_VSI_L2TAGSTXVALID_TIR0INSERTID_MASK I40E_MASK(0x7, I40E_VSI_L2TAGSTXVALID_TIR0INSERTID_SHIFT)
+#define I40E_VSI_L2TAGSTXVALID_TIR0_INSERT_SHIFT 19
+#define I40E_VSI_L2TAGSTXVALID_TIR0_INSERT_MASK I40E_MASK(0x1, I40E_VSI_L2TAGSTXVALID_TIR0_INSERT_SHIFT)
+#define I40E_VSI_L2TAGSTXVALID_TIR1INSERTID_SHIFT 20
+#define I40E_VSI_L2TAGSTXVALID_TIR1INSERTID_MASK I40E_MASK(0x7, I40E_VSI_L2TAGSTXVALID_TIR1INSERTID_SHIFT)
+#define I40E_VSI_L2TAGSTXVALID_TIR1_INSERT_SHIFT 23
+#define I40E_VSI_L2TAGSTXVALID_TIR1_INSERT_MASK I40E_MASK(0x1, I40E_VSI_L2TAGSTXVALID_TIR1_INSERT_SHIFT)
+#define I40E_VSI_L2TAGSTXVALID_TIR2INSERTID_SHIFT 24
+#define I40E_VSI_L2TAGSTXVALID_TIR2INSERTID_MASK I40E_MASK(0x7, I40E_VSI_L2TAGSTXVALID_TIR2INSERTID_SHIFT)
+#define I40E_VSI_L2TAGSTXVALID_TIR2_INSERT_SHIFT 27
+#define I40E_VSI_L2TAGSTXVALID_TIR2_INSERT_MASK I40E_MASK(0x1, I40E_VSI_L2TAGSTXVALID_TIR2_INSERT_SHIFT)
+
+#define I40E_VSI_PORT(_VSI) (0x000B22C0 + ((_VSI) * 4)) /* _i=0...383 */ /* Reset: PFR */
+#define I40E_VSI_PORT_MAX_INDEX 383
+#define I40E_VSI_PORT_PORT_NUM_SHIFT 0
+#define I40E_VSI_PORT_PORT_NUM_MASK I40E_MASK(0x3, I40E_VSI_PORT_PORT_NUM_SHIFT)
+
+#define I40E_VSI_RUPR(_VSI) (0x00050000 + ((_VSI) * 4)) /* _i=0...383 */ /* Reset: PFR */
+#define I40E_VSI_RUPR_MAX_INDEX 383
+#define I40E_VSI_RUPR_UP0_SHIFT 0
+#define I40E_VSI_RUPR_UP0_MASK I40E_MASK(0x7, I40E_VSI_RUPR_UP0_SHIFT)
+#define I40E_VSI_RUPR_UP1_SHIFT 3
+#define I40E_VSI_RUPR_UP1_MASK I40E_MASK(0x7, I40E_VSI_RUPR_UP1_SHIFT)
+#define I40E_VSI_RUPR_UP2_SHIFT 6
+#define I40E_VSI_RUPR_UP2_MASK I40E_MASK(0x7, I40E_VSI_RUPR_UP2_SHIFT)
+#define I40E_VSI_RUPR_UP3_SHIFT 9
+#define I40E_VSI_RUPR_UP3_MASK I40E_MASK(0x7, I40E_VSI_RUPR_UP3_SHIFT)
+#define I40E_VSI_RUPR_UP4_SHIFT 12
+#define I40E_VSI_RUPR_UP4_MASK I40E_MASK(0x7, I40E_VSI_RUPR_UP4_SHIFT)
+#define I40E_VSI_RUPR_UP5_SHIFT 15
+#define I40E_VSI_RUPR_UP5_MASK I40E_MASK(0x7, I40E_VSI_RUPR_UP5_SHIFT)
+#define I40E_VSI_RUPR_UP6_SHIFT 18
+#define I40E_VSI_RUPR_UP6_MASK I40E_MASK(0x7, I40E_VSI_RUPR_UP6_SHIFT)
+#define I40E_VSI_RUPR_UP7_SHIFT 21
+#define I40E_VSI_RUPR_UP7_MASK I40E_MASK(0x7, I40E_VSI_RUPR_UP7_SHIFT)
+
+#define I40E_VSI_RXSWCTRL(_VSI) (0x00208800 + ((_VSI) * 4)) /* _i=0...383 */ /* Reset: PFR */
+#define I40E_VSI_RXSWCTRL_MAX_INDEX 383
+#define I40E_VSI_RXSWCTRL_MACVSIPRUNEENABLE_SHIFT 0
+#define I40E_VSI_RXSWCTRL_MACVSIPRUNEENABLE_MASK I40E_MASK(0x1, I40E_VSI_RXSWCTRL_MACVSIPRUNEENABLE_SHIFT)
+#define I40E_VSI_RXSWCTRL_VLANPRUNEENABLE_SHIFT 1
+#define I40E_VSI_RXSWCTRL_VLANPRUNEENABLE_MASK I40E_MASK(0x1, I40E_VSI_RXSWCTRL_VLANPRUNEENABLE_SHIFT)
+
+#define I40E_VSI_SRCSWCTRL(_VSI) (0x00209800 + ((_VSI) * 4)) /* _i=0...383 */ /* Reset: PFR */
+#define I40E_VSI_SRCSWCTRL_MAX_INDEX 383
+#define I40E_VSI_SRCSWCTRL_SWID_SHIFT 0
+#define I40E_VSI_SRCSWCTRL_SWID_MASK I40E_MASK(0xFFF, I40E_VSI_SRCSWCTRL_SWID_SHIFT)
+#define I40E_VSI_SRCSWCTRL_ISNSTAG_SHIFT 12
+#define I40E_VSI_SRCSWCTRL_ISNSTAG_MASK I40E_MASK(0x1, I40E_VSI_SRCSWCTRL_ISNSTAG_SHIFT)
+#define I40E_VSI_SRCSWCTRL_SWIDVALID_SHIFT 17
+#define I40E_VSI_SRCSWCTRL_SWIDVALID_MASK I40E_MASK(0x1, I40E_VSI_SRCSWCTRL_SWIDVALID_SHIFT)
+#define I40E_VSI_SRCSWCTRL_ALLOWDESTOVERRIDE_SHIFT 19
+#define I40E_VSI_SRCSWCTRL_ALLOWDESTOVERRIDE_MASK I40E_MASK(0x1, I40E_VSI_SRCSWCTRL_ALLOWDESTOVERRIDE_SHIFT)
+#define I40E_VSI_SRCSWCTRL_ALLOWLOOPBACK_SHIFT 20
+#define I40E_VSI_SRCSWCTRL_ALLOWLOOPBACK_MASK I40E_MASK(0x1, I40E_VSI_SRCSWCTRL_ALLOWLOOPBACK_SHIFT)
+#define I40E_VSI_SRCSWCTRL_LANENABLE_SHIFT 21
+#define I40E_VSI_SRCSWCTRL_LANENABLE_MASK I40E_MASK(0x1, I40E_VSI_SRCSWCTRL_LANENABLE_SHIFT)
+#define I40E_VSI_SRCSWCTRL_VLANAS_SHIFT 22
+#define I40E_VSI_SRCSWCTRL_VLANAS_MASK I40E_MASK(0x1, I40E_VSI_SRCSWCTRL_VLANAS_SHIFT)
+#define I40E_VSI_SRCSWCTRL_MACAS_SHIFT 23
+#define I40E_VSI_SRCSWCTRL_MACAS_MASK I40E_MASK(0x1, I40E_VSI_SRCSWCTRL_MACAS_SHIFT)
+
+#define I40E_VSI_TAIR(_VSI) (0x00041800 + ((_VSI) * 4)) /* _i=0...383 */ /* Reset: PFR */
+#define I40E_VSI_TAIR_MAX_INDEX 383
+#define I40E_VSI_TAIR_PORT_TAG_ID_SHIFT 0
+#define I40E_VSI_TAIR_PORT_TAG_ID_MASK I40E_MASK(0xFFFF, I40E_VSI_TAIR_PORT_TAG_ID_SHIFT)
+
+#define I40E_VSI_TAR(_VSI) (0x00042000 + ((_VSI) * 4)) /* _i=0...383 */ /* Reset: PFR */
+#define I40E_VSI_TAR_MAX_INDEX 383
+#define I40E_VSI_TAR_ACCEPTTAGGED_SHIFT 0
+#define I40E_VSI_TAR_ACCEPTTAGGED_MASK I40E_MASK(0x3FF, I40E_VSI_TAR_ACCEPTTAGGED_SHIFT)
+#define I40E_VSI_TAR_ACCEPTUNTAGGED_SHIFT 16
+#define I40E_VSI_TAR_ACCEPTUNTAGGED_MASK I40E_MASK(0x3FF, I40E_VSI_TAR_ACCEPTUNTAGGED_SHIFT)
+
+#define I40E_VSI_TIR_0(_VSI) (0x00040000 + ((_VSI) * 4)) /* _i=0...383 */ /* Reset: PFR */
+#define I40E_VSI_TIR_0_MAX_INDEX 383
+#define I40E_VSI_TIR_0_PORT_TAG_ID_SHIFT 0
+#define I40E_VSI_TIR_0_PORT_TAG_ID_MASK I40E_MASK(0xFFFF, I40E_VSI_TIR_0_PORT_TAG_ID_SHIFT)
+
+#define I40E_VSI_TIR_1(_VSI) (0x00040800 + ((_VSI) * 4)) /* _i=0...383 */ /* Reset: PFR */
+#define I40E_VSI_TIR_1_MAX_INDEX 383
+#define I40E_VSI_TIR_1_PORT_TAG_ID_SHIFT 0
+#define I40E_VSI_TIR_1_PORT_TAG_ID_MASK I40E_MASK(0xFFFFFFFF, I40E_VSI_TIR_1_PORT_TAG_ID_SHIFT)
+
+#define I40E_VSI_TIR_2(_VSI) (0x00041000 + ((_VSI) * 4)) /* _i=0...383 */ /* Reset: PFR */
+#define I40E_VSI_TIR_2_MAX_INDEX 383
+#define I40E_VSI_TIR_2_PORT_TAG_ID_SHIFT 0
+#define I40E_VSI_TIR_2_PORT_TAG_ID_MASK I40E_MASK(0xFFFF, I40E_VSI_TIR_2_PORT_TAG_ID_SHIFT)
+
+#define I40E_VSI_TSR(_VSI) (0x00050800 + ((_VSI) * 4)) /* _i=0...383 */ /* Reset: PFR */
+#define I40E_VSI_TSR_MAX_INDEX 383
+#define I40E_VSI_TSR_STRIPTAG_SHIFT 0
+#define I40E_VSI_TSR_STRIPTAG_MASK I40E_MASK(0x3FF, I40E_VSI_TSR_STRIPTAG_SHIFT)
+#define I40E_VSI_TSR_SHOWTAG_SHIFT 10
+#define I40E_VSI_TSR_SHOWTAG_MASK I40E_MASK(0x3FF, I40E_VSI_TSR_SHOWTAG_SHIFT)
+#define I40E_VSI_TSR_SHOWPRIONLY_SHIFT 20
+#define I40E_VSI_TSR_SHOWPRIONLY_MASK I40E_MASK(0x3FF, I40E_VSI_TSR_SHOWPRIONLY_SHIFT)
+
+#define I40E_VSI_TUPIOM(_VSI) (0x00043800 + ((_VSI) * 4)) /* _i=0...383 */ /* Reset: PFR */
+#define I40E_VSI_TUPIOM_MAX_INDEX 383
+#define I40E_VSI_TUPIOM_UP0_SHIFT 0
+#define I40E_VSI_TUPIOM_UP0_MASK I40E_MASK(0x7, I40E_VSI_TUPIOM_UP0_SHIFT)
+#define I40E_VSI_TUPIOM_UP1_SHIFT 3
+#define I40E_VSI_TUPIOM_UP1_MASK I40E_MASK(0x7, I40E_VSI_TUPIOM_UP1_SHIFT)
+#define I40E_VSI_TUPIOM_UP2_SHIFT 6
+#define I40E_VSI_TUPIOM_UP2_MASK I40E_MASK(0x7, I40E_VSI_TUPIOM_UP2_SHIFT)
+#define I40E_VSI_TUPIOM_UP3_SHIFT 9
+#define I40E_VSI_TUPIOM_UP3_MASK I40E_MASK(0x7, I40E_VSI_TUPIOM_UP3_SHIFT)
+#define I40E_VSI_TUPIOM_UP4_SHIFT 12
+#define I40E_VSI_TUPIOM_UP4_MASK I40E_MASK(0x7, I40E_VSI_TUPIOM_UP4_SHIFT)
+#define I40E_VSI_TUPIOM_UP5_SHIFT 15
+#define I40E_VSI_TUPIOM_UP5_MASK I40E_MASK(0x7, I40E_VSI_TUPIOM_UP5_SHIFT)
+#define I40E_VSI_TUPIOM_UP6_SHIFT 18
+#define I40E_VSI_TUPIOM_UP6_MASK I40E_MASK(0x7, I40E_VSI_TUPIOM_UP6_SHIFT)
+#define I40E_VSI_TUPIOM_UP7_SHIFT 21
+#define I40E_VSI_TUPIOM_UP7_MASK I40E_MASK(0x7, I40E_VSI_TUPIOM_UP7_SHIFT)
+
+#define I40E_VSI_TUPR(_VSI) (0x00043000 + ((_VSI) * 4)) /* _i=0...383 */ /* Reset: PFR */
+#define I40E_VSI_TUPR_MAX_INDEX 383
+#define I40E_VSI_TUPR_UP0_SHIFT 0
+#define I40E_VSI_TUPR_UP0_MASK I40E_MASK(0x7, I40E_VSI_TUPR_UP0_SHIFT)
+#define I40E_VSI_TUPR_UP1_SHIFT 3
+#define I40E_VSI_TUPR_UP1_MASK I40E_MASK(0x7, I40E_VSI_TUPR_UP1_SHIFT)
+#define I40E_VSI_TUPR_UP2_SHIFT 6
+#define I40E_VSI_TUPR_UP2_MASK I40E_MASK(0x7, I40E_VSI_TUPR_UP2_SHIFT)
+#define I40E_VSI_TUPR_UP3_SHIFT 9
+#define I40E_VSI_TUPR_UP3_MASK I40E_MASK(0x7, I40E_VSI_TUPR_UP3_SHIFT)
+#define I40E_VSI_TUPR_UP4_SHIFT 12
+#define I40E_VSI_TUPR_UP4_MASK I40E_MASK(0x7, I40E_VSI_TUPR_UP4_SHIFT)
+#define I40E_VSI_TUPR_UP5_SHIFT 15
+#define I40E_VSI_TUPR_UP5_MASK I40E_MASK(0x7, I40E_VSI_TUPR_UP5_SHIFT)
+#define I40E_VSI_TUPR_UP6_SHIFT 18
+#define I40E_VSI_TUPR_UP6_MASK I40E_MASK(0x7, I40E_VSI_TUPR_UP6_SHIFT)
+#define I40E_VSI_TUPR_UP7_SHIFT 21
+#define I40E_VSI_TUPR_UP7_MASK I40E_MASK(0x7, I40E_VSI_TUPR_UP7_SHIFT)
+
+#define I40E_VSI_VSI2F(_VSI) (0x0020B800 + ((_VSI) * 4)) /* _i=0...383 */ /* Reset: PFR */
+#define I40E_VSI_VSI2F_MAX_INDEX 383
+#define I40E_VSI_VSI2F_VFVMNUMBER_SHIFT 0
+#define I40E_VSI_VSI2F_VFVMNUMBER_MASK I40E_MASK(0x3FF, I40E_VSI_VSI2F_VFVMNUMBER_SHIFT)
+#define I40E_VSI_VSI2F_PFNUMBER_SHIFT 10
+#define I40E_VSI_VSI2F_PFNUMBER_MASK I40E_MASK(0xF, I40E_VSI_VSI2F_PFNUMBER_SHIFT)
+#define I40E_VSI_VSI2F_FUNCTIONTYPE_SHIFT 14
+#define I40E_VSI_VSI2F_FUNCTIONTYPE_MASK I40E_MASK(0x3, I40E_VSI_VSI2F_FUNCTIONTYPE_SHIFT)
+#define I40E_VSI_VSI2F_BUFFERNUMBER_SHIFT 16
+#define I40E_VSI_VSI2F_BUFFERNUMBER_MASK I40E_MASK(0x7, I40E_VSI_VSI2F_BUFFERNUMBER_SHIFT)
+#define I40E_VSI_VSI2F_RESERVED_5_SHIFT 19
+#define I40E_VSI_VSI2F_RESERVED_5_MASK I40E_MASK(0x7, I40E_VSI_VSI2F_RESERVED_5_SHIFT)
+#define I40E_VSI_VSI2F_VSI_ENABLE_SHIFT 22
+#define I40E_VSI_VSI2F_VSI_ENABLE_MASK I40E_MASK(0x1, I40E_VSI_VSI2F_VSI_ENABLE_SHIFT)
+#define I40E_VSI_VSI2F_VSI_NUMBER_SHIFT 23
+#define I40E_VSI_VSI2F_VSI_NUMBER_MASK I40E_MASK(0x1FF, I40E_VSI_VSI2F_VSI_NUMBER_SHIFT)
+
+/* PF - Wake-Up and Proxying Registers */
+
+#define I40E_PFPM_FHFT_DATA(_i, _j) (0x00060000 + ((_i) * 4096 + (_j) * 128)) /* _i=0...7, _j=0...31 */ /* Reset: POR */
+#define I40E_PFPM_FHFT_DATA_MAX_INDEX 7
+#define I40E_PFPM_FHFT_DATA_DWORD_SHIFT 0
+#define I40E_PFPM_FHFT_DATA_DWORD_MASK I40E_MASK(0xFFFFFFFF, I40E_PFPM_FHFT_DATA_DWORD_SHIFT)
+
+#define I40E_PFPM_FHFT_MASK(_i, _j) (0x00068000 + ((_i) * 1024 + (_j) * 128)) /* _i=0...7, _j=0...7 */ /* Reset: POR */
+#define I40E_PFPM_FHFT_MASK_MAX_INDEX 7
+#define I40E_PFPM_FHFT_MASK_MASK_SHIFT 0
+#define I40E_PFPM_FHFT_MASK_MASK_MASK I40E_MASK(0xFFFF, I40E_PFPM_FHFT_MASK_MASK_SHIFT)
+
+#define I40E_PFPM_PROXYFC 0x00245A80 /* Reset: POR */
+#define I40E_PFPM_PROXYFC_PPROXYE_SHIFT 0
+#define I40E_PFPM_PROXYFC_PPROXYE_MASK I40E_MASK(0x1, I40E_PFPM_PROXYFC_PPROXYE_SHIFT)
+#define I40E_PFPM_PROXYFC_EX_SHIFT 1
+#define I40E_PFPM_PROXYFC_EX_MASK I40E_MASK(0x1, I40E_PFPM_PROXYFC_EX_SHIFT)
+#define I40E_PFPM_PROXYFC_ARP_SHIFT 4
+#define I40E_PFPM_PROXYFC_ARP_MASK I40E_MASK(0x1, I40E_PFPM_PROXYFC_ARP_SHIFT)
+#define I40E_PFPM_PROXYFC_ARP_DIRECTED_SHIFT 5
+#define I40E_PFPM_PROXYFC_ARP_DIRECTED_MASK I40E_MASK(0x1, I40E_PFPM_PROXYFC_ARP_DIRECTED_SHIFT)
+#define I40E_PFPM_PROXYFC_NS_SHIFT 9
+#define I40E_PFPM_PROXYFC_NS_MASK I40E_MASK(0x1, I40E_PFPM_PROXYFC_NS_SHIFT)
+#define I40E_PFPM_PROXYFC_NS_DIRECTED_SHIFT 10
+#define I40E_PFPM_PROXYFC_NS_DIRECTED_MASK I40E_MASK(0x1, I40E_PFPM_PROXYFC_NS_DIRECTED_SHIFT)
+#define I40E_PFPM_PROXYFC_MLD_SHIFT 12
+#define I40E_PFPM_PROXYFC_MLD_MASK I40E_MASK(0x1, I40E_PFPM_PROXYFC_MLD_SHIFT)
+
+#define I40E_PFPM_PROXYS 0x00245B80 /* Reset: POR */
+#define I40E_PFPM_PROXYS_EX_SHIFT 1
+#define I40E_PFPM_PROXYS_EX_MASK I40E_MASK(0x1, I40E_PFPM_PROXYS_EX_SHIFT)
+#define I40E_PFPM_PROXYS_ARP_SHIFT 4
+#define I40E_PFPM_PROXYS_ARP_MASK I40E_MASK(0x1, I40E_PFPM_PROXYS_ARP_SHIFT)
+#define I40E_PFPM_PROXYS_ARP_DIRECTED_SHIFT 5
+#define I40E_PFPM_PROXYS_ARP_DIRECTED_MASK I40E_MASK(0x1, I40E_PFPM_PROXYS_ARP_DIRECTED_SHIFT)
+#define I40E_PFPM_PROXYS_NS_SHIFT 9
+#define I40E_PFPM_PROXYS_NS_MASK I40E_MASK(0x1, I40E_PFPM_PROXYS_NS_SHIFT)
+#define I40E_PFPM_PROXYS_NS_DIRECTED_SHIFT 10
+#define I40E_PFPM_PROXYS_NS_DIRECTED_MASK I40E_MASK(0x1, I40E_PFPM_PROXYS_NS_DIRECTED_SHIFT)
+#define I40E_PFPM_PROXYS_MLD_SHIFT 12
+#define I40E_PFPM_PROXYS_MLD_MASK I40E_MASK(0x1, I40E_PFPM_PROXYS_MLD_SHIFT)
+
+/* VF - Admin Queue */
+
+/* VF - General Registers */
+
+/* VF - Interrupts */
+
+#define I40E_VFINT_ITR0_STAT1(_i) (0x00004400 + ((_i) * 4)) /* _i=0...2 */ /* Reset: VFR */
+#define I40E_VFINT_ITR0_STAT1_MAX_INDEX 2
+#define I40E_VFINT_ITR0_STAT1_ITR_EXPIRE_SHIFT 0
+#define I40E_VFINT_ITR0_STAT1_ITR_EXPIRE_MASK I40E_MASK(0x1, I40E_VFINT_ITR0_STAT1_ITR_EXPIRE_SHIFT)
+#define I40E_VFINT_ITR0_STAT1_EVENT_SHIFT 1
+#define I40E_VFINT_ITR0_STAT1_EVENT_MASK I40E_MASK(0x1, I40E_VFINT_ITR0_STAT1_EVENT_SHIFT)
+#define I40E_VFINT_ITR0_STAT1_ITR_TIME_SHIFT 2
+#define I40E_VFINT_ITR0_STAT1_ITR_TIME_MASK I40E_MASK(0xFFF, I40E_VFINT_ITR0_STAT1_ITR_TIME_SHIFT)
+
+#define I40E_VFINT_ITRN_STAT1(_i, _INTVF) (0x00003000 + ((_i) * 64 + (_INTVF) * 4)) /* _i=0...2, _INTVF=0...15 */ /* Reset: VFR */
+#define I40E_VFINT_ITRN_STAT1_MAX_INDEX 2
+#define I40E_VFINT_ITRN_STAT1_ITR_EXPIRE_SHIFT 0
+#define I40E_VFINT_ITRN_STAT1_ITR_EXPIRE_MASK I40E_MASK(0x1, I40E_VFINT_ITRN_STAT1_ITR_EXPIRE_SHIFT)
+#define I40E_VFINT_ITRN_STAT1_EVENT_SHIFT 1
+#define I40E_VFINT_ITRN_STAT1_EVENT_MASK I40E_MASK(0x1, I40E_VFINT_ITRN_STAT1_EVENT_SHIFT)
+#define I40E_VFINT_ITRN_STAT1_ITR_TIME_SHIFT 2
+#define I40E_VFINT_ITRN_STAT1_ITR_TIME_MASK I40E_MASK(0xFFF, I40E_VFINT_ITRN_STAT1_ITR_TIME_SHIFT)
+
+#define I40E_VFINT_RATE0_STAT1 0x00005800 /* Reset: VFR */
+#define I40E_VFINT_RATE0_STAT1_CREDIT_SHIFT 0
+#define I40E_VFINT_RATE0_STAT1_CREDIT_MASK I40E_MASK(0xF, I40E_VFINT_RATE0_STAT1_CREDIT_SHIFT)
+#define I40E_VFINT_RATE0_STAT1_INTRL_TIME_SHIFT 4
+#define I40E_VFINT_RATE0_STAT1_INTRL_TIME_MASK I40E_MASK(0x3F, I40E_VFINT_RATE0_STAT1_INTRL_TIME_SHIFT)
+
+#define I40E_VFINT_RATEN_STAT1(_INTVF) (0x00004000 + ((_INTVF) * 4)) /* _i=0...15 */ /* Reset: VFR */
+#define I40E_VFINT_RATEN_STAT1_MAX_INDEX 15
+#define I40E_VFINT_RATEN_STAT1_CREDIT_SHIFT 0
+#define I40E_VFINT_RATEN_STAT1_CREDIT_MASK I40E_MASK(0xF, I40E_VFINT_RATEN_STAT1_CREDIT_SHIFT)
+#define I40E_VFINT_RATEN_STAT1_INTRL_TIME_SHIFT 4
+#define I40E_VFINT_RATEN_STAT1_INTRL_TIME_MASK I40E_MASK(0x3F, I40E_VFINT_RATEN_STAT1_INTRL_TIME_SHIFT)
+
+/* VF - LAN Transmit Receive Registers */
+
+/* VF - MSI-X Table Registers */
+
+/* VF - PE Registers */
+
+/* VF - Rx Filters Registers */
+
+#define I40E_VPQF_DDPCNT 0x0000C800 /* Reset: CORER */
+#define I40E_VPQF_DDPCNT_DDP_CNT_SHIFT 0
+#define I40E_VPQF_DDPCNT_DDP_CNT_MASK I40E_MASK(0x1FFF, I40E_VPQF_DDPCNT_DDP_CNT_SHIFT)
+
+/* VF - Time Sync Registers */
+
+#define I40E_PRTTSYN_VFTIME_H1 0x0000E020 /* Reset: GLOBR */
+#define I40E_PRTTSYN_VFTIME_H1_TSYNTIME_H_SHIFT 0
+#define I40E_PRTTSYN_VFTIME_H1_TSYNTIME_H_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_VFTIME_H1_TSYNTIME_H_SHIFT)
+
+#define I40E_PRTTSYN_VFTIME_L1 0x0000E000 /* Reset: GLOBR */
+#define I40E_PRTTSYN_VFTIME_L1_TSYNTIME_L_SHIFT 0
+#define I40E_PRTTSYN_VFTIME_L1_TSYNTIME_L_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_VFTIME_L1_TSYNTIME_L_SHIFT)
+
+/* Used in A0 code flow */
+#define I40E_GLHMC_PEXFMAX 0x000C2048
+#define I40E_GLHMC_PEXFMAX_PMPEXFMAX_SHIFT 0
+#define I40E_GLHMC_PEXFMAX_PMPEXFMAX_MASK (0x3FFFFFF << I40E_GLHMC_PEXFMAX_PMPEXFMAX_SHIFT)
+#endif
diff --git a/sys/dev/ixl/i40e_status.h b/sys/dev/ixl/i40e_status.h
new file mode 100755
index 0000000..24d5e6b
--- /dev/null
+++ b/sys/dev/ixl/i40e_status.h
@@ -0,0 +1,108 @@
+/******************************************************************************
+
+ Copyright (c) 2013-2014, Intel Corporation
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ POSSIBILITY OF SUCH DAMAGE.
+
+******************************************************************************/
+/*$FreeBSD$*/
+
+#ifndef _I40E_STATUS_H_
+#define _I40E_STATUS_H_
+
+/* Error Codes */
+enum i40e_status_code {
+ I40E_SUCCESS = 0,
+ I40E_ERR_NVM = -1,
+ I40E_ERR_NVM_CHECKSUM = -2,
+ I40E_ERR_PHY = -3,
+ I40E_ERR_CONFIG = -4,
+ I40E_ERR_PARAM = -5,
+ I40E_ERR_MAC_TYPE = -6,
+ I40E_ERR_UNKNOWN_PHY = -7,
+ I40E_ERR_LINK_SETUP = -8,
+ I40E_ERR_ADAPTER_STOPPED = -9,
+ I40E_ERR_INVALID_MAC_ADDR = -10,
+ I40E_ERR_DEVICE_NOT_SUPPORTED = -11,
+ I40E_ERR_MASTER_REQUESTS_PENDING = -12,
+ I40E_ERR_INVALID_LINK_SETTINGS = -13,
+ I40E_ERR_AUTONEG_NOT_COMPLETE = -14,
+ I40E_ERR_RESET_FAILED = -15,
+ I40E_ERR_SWFW_SYNC = -16,
+ I40E_ERR_NO_AVAILABLE_VSI = -17,
+ I40E_ERR_NO_MEMORY = -18,
+ I40E_ERR_BAD_PTR = -19,
+ I40E_ERR_RING_FULL = -20,
+ I40E_ERR_INVALID_PD_ID = -21,
+ I40E_ERR_INVALID_QP_ID = -22,
+ I40E_ERR_INVALID_CQ_ID = -23,
+ I40E_ERR_INVALID_CEQ_ID = -24,
+ I40E_ERR_INVALID_AEQ_ID = -25,
+ I40E_ERR_INVALID_SIZE = -26,
+ I40E_ERR_INVALID_ARP_INDEX = -27,
+ I40E_ERR_INVALID_FPM_FUNC_ID = -28,
+ I40E_ERR_QP_INVALID_MSG_SIZE = -29,
+ I40E_ERR_QP_TOOMANY_WRS_POSTED = -30,
+ I40E_ERR_INVALID_FRAG_COUNT = -31,
+ I40E_ERR_QUEUE_EMPTY = -32,
+ I40E_ERR_INVALID_ALIGNMENT = -33,
+ I40E_ERR_FLUSHED_QUEUE = -34,
+ I40E_ERR_INVALID_PUSH_PAGE_INDEX = -35,
+ I40E_ERR_INVALID_IMM_DATA_SIZE = -36,
+ I40E_ERR_TIMEOUT = -37,
+ I40E_ERR_OPCODE_MISMATCH = -38,
+ I40E_ERR_CQP_COMPL_ERROR = -39,
+ I40E_ERR_INVALID_VF_ID = -40,
+ I40E_ERR_INVALID_HMCFN_ID = -41,
+ I40E_ERR_BACKING_PAGE_ERROR = -42,
+ I40E_ERR_NO_PBLCHUNKS_AVAILABLE = -43,
+ I40E_ERR_INVALID_PBLE_INDEX = -44,
+ I40E_ERR_INVALID_SD_INDEX = -45,
+ I40E_ERR_INVALID_PAGE_DESC_INDEX = -46,
+ I40E_ERR_INVALID_SD_TYPE = -47,
+ I40E_ERR_MEMCPY_FAILED = -48,
+ I40E_ERR_INVALID_HMC_OBJ_INDEX = -49,
+ I40E_ERR_INVALID_HMC_OBJ_COUNT = -50,
+ I40E_ERR_INVALID_SRQ_ARM_LIMIT = -51,
+ I40E_ERR_SRQ_ENABLED = -52,
+ I40E_ERR_ADMIN_QUEUE_ERROR = -53,
+ I40E_ERR_ADMIN_QUEUE_TIMEOUT = -54,
+ I40E_ERR_BUF_TOO_SHORT = -55,
+ I40E_ERR_ADMIN_QUEUE_FULL = -56,
+ I40E_ERR_ADMIN_QUEUE_NO_WORK = -57,
+ I40E_ERR_BAD_IWARP_CQE = -58,
+ I40E_ERR_NVM_BLANK_MODE = -59,
+ I40E_ERR_NOT_IMPLEMENTED = -60,
+ I40E_ERR_PE_DOORBELL_NOT_ENABLED = -61,
+ I40E_ERR_DIAG_TEST_FAILED = -62,
+ I40E_ERR_NOT_READY = -63,
+ I40E_NOT_SUPPORTED = -64,
+ I40E_ERR_FIRMWARE_API_VERSION = -65,
+};
+
+#endif /* _I40E_STATUS_H_ */
diff --git a/sys/dev/ixl/i40e_type.h b/sys/dev/ixl/i40e_type.h
new file mode 100755
index 0000000..c1d13f2
--- /dev/null
+++ b/sys/dev/ixl/i40e_type.h
@@ -0,0 +1,1422 @@
+/******************************************************************************
+
+ Copyright (c) 2013-2014, Intel Corporation
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ POSSIBILITY OF SUCH DAMAGE.
+
+******************************************************************************/
+/*$FreeBSD$*/
+
+#ifndef _I40E_TYPE_H_
+#define _I40E_TYPE_H_
+
+#include "i40e_status.h"
+#include "i40e_osdep.h"
+#include "i40e_register.h"
+#include "i40e_adminq.h"
+#include "i40e_hmc.h"
+#include "i40e_lan_hmc.h"
+
+#define UNREFERENCED_XPARAMETER
+
+/* Vendor ID */
+#define I40E_INTEL_VENDOR_ID 0x8086
+
+/* Device IDs */
+#define I40E_DEV_ID_SFP_XL710 0x1572
+#define I40E_DEV_ID_QEMU 0x1574
+#define I40E_DEV_ID_KX_A 0x157F
+#define I40E_DEV_ID_KX_B 0x1580
+#define I40E_DEV_ID_KX_C 0x1581
+#define I40E_DEV_ID_QSFP_A 0x1583
+#define I40E_DEV_ID_QSFP_B 0x1584
+#define I40E_DEV_ID_QSFP_C 0x1585
+#define I40E_DEV_ID_10G_BASE_T 0x1586
+#define I40E_DEV_ID_VF 0x154C
+#define I40E_DEV_ID_VF_HV 0x1571
+
+#define i40e_is_40G_device(d) ((d) == I40E_DEV_ID_QSFP_A || \
+ (d) == I40E_DEV_ID_QSFP_B || \
+ (d) == I40E_DEV_ID_QSFP_C)
+
+#ifndef I40E_MASK
+/* I40E_MASK is a macro used on 32 bit registers */
+#define I40E_MASK(mask, shift) (mask << shift)
+#endif
+
+#define I40E_MAX_PF 16
+#define I40E_MAX_PF_VSI 64
+#define I40E_MAX_PF_QP 128
+#define I40E_MAX_VSI_QP 16
+#define I40E_MAX_VF_VSI 3
+#define I40E_MAX_CHAINED_RX_BUFFERS 5
+#define I40E_MAX_PF_UDP_OFFLOAD_PORTS 16
+
+/* something less than 1 minute */
+#define I40E_HEARTBEAT_TIMEOUT (HZ * 50)
+
+/* Max default timeout in ms, */
+#define I40E_MAX_NVM_TIMEOUT 18000
+
+/* Check whether address is multicast. */
+#define I40E_IS_MULTICAST(address) (bool)(((u8 *)(address))[0] & ((u8)0x01))
+
+/* Check whether an address is broadcast. */
+#define I40E_IS_BROADCAST(address) \
+ ((((u8 *)(address))[0] == ((u8)0xff)) && \
+ (((u8 *)(address))[1] == ((u8)0xff)))
+
+/* Switch from ms to the 1usec global time (this is the GTIME resolution) */
+#define I40E_MS_TO_GTIME(time) ((time) * 1000)
+
+/* forward declaration */
+struct i40e_hw;
+typedef void (*I40E_ADMINQ_CALLBACK)(struct i40e_hw *, struct i40e_aq_desc *);
+
+#define I40E_ETH_LENGTH_OF_ADDRESS 6
+/* Data type manipulation macros. */
+#define I40E_HI_DWORD(x) ((u32)((((x) >> 16) >> 16) & 0xFFFFFFFF))
+#define I40E_LO_DWORD(x) ((u32)((x) & 0xFFFFFFFF))
+
+#define I40E_HI_WORD(x) ((u16)(((x) >> 16) & 0xFFFF))
+#define I40E_LO_WORD(x) ((u16)((x) & 0xFFFF))
+
+#define I40E_HI_BYTE(x) ((u8)(((x) >> 8) & 0xFF))
+#define I40E_LO_BYTE(x) ((u8)((x) & 0xFF))
+
+/* Number of Transmit Descriptors must be a multiple of 8. */
+#define I40E_REQ_TX_DESCRIPTOR_MULTIPLE 8
+/* Number of Receive Descriptors must be a multiple of 32 if
+ * the number of descriptors is greater than 32.
+ */
+#define I40E_REQ_RX_DESCRIPTOR_MULTIPLE 32
+
+#define I40E_DESC_UNUSED(R) \
+ ((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \
+ (R)->next_to_clean - (R)->next_to_use - 1)
+
+/* bitfields for Tx queue mapping in QTX_CTL */
+#define I40E_QTX_CTL_VF_QUEUE 0x0
+#define I40E_QTX_CTL_VM_QUEUE 0x1
+#define I40E_QTX_CTL_PF_QUEUE 0x2
+
+/* debug masks - set these bits in hw->debug_mask to control output */
+enum i40e_debug_mask {
+ I40E_DEBUG_INIT = 0x00000001,
+ I40E_DEBUG_RELEASE = 0x00000002,
+
+ I40E_DEBUG_LINK = 0x00000010,
+ I40E_DEBUG_PHY = 0x00000020,
+ I40E_DEBUG_HMC = 0x00000040,
+ I40E_DEBUG_NVM = 0x00000080,
+ I40E_DEBUG_LAN = 0x00000100,
+ I40E_DEBUG_FLOW = 0x00000200,
+ I40E_DEBUG_DCB = 0x00000400,
+ I40E_DEBUG_DIAG = 0x00000800,
+ I40E_DEBUG_FD = 0x00001000,
+
+ I40E_DEBUG_AQ_MESSAGE = 0x01000000,
+ I40E_DEBUG_AQ_DESCRIPTOR = 0x02000000,
+ I40E_DEBUG_AQ_DESC_BUFFER = 0x04000000,
+ I40E_DEBUG_AQ_COMMAND = 0x06000000,
+ I40E_DEBUG_AQ = 0x0F000000,
+
+ I40E_DEBUG_USER = 0xF0000000,
+
+ I40E_DEBUG_ALL = 0xFFFFFFFF
+};
+
+/* PCI Bus Info */
+#define I40E_PCI_LINK_STATUS 0xB2
+#define I40E_PCI_LINK_WIDTH 0x3F0
+#define I40E_PCI_LINK_WIDTH_1 0x10
+#define I40E_PCI_LINK_WIDTH_2 0x20
+#define I40E_PCI_LINK_WIDTH_4 0x40
+#define I40E_PCI_LINK_WIDTH_8 0x80
+#define I40E_PCI_LINK_SPEED 0xF
+#define I40E_PCI_LINK_SPEED_2500 0x1
+#define I40E_PCI_LINK_SPEED_5000 0x2
+#define I40E_PCI_LINK_SPEED_8000 0x3
+
+/* Memory types */
+enum i40e_memset_type {
+ I40E_NONDMA_MEM = 0,
+ I40E_DMA_MEM
+};
+
+/* Memcpy types */
+enum i40e_memcpy_type {
+ I40E_NONDMA_TO_NONDMA = 0,
+ I40E_NONDMA_TO_DMA,
+ I40E_DMA_TO_DMA,
+ I40E_DMA_TO_NONDMA
+};
+
+/* These are structs for managing the hardware information and the operations.
+ * The structures of function pointers are filled out at init time when we
+ * know for sure exactly which hardware we're working with. This gives us the
+ * flexibility of using the same main driver code but adapting to slightly
+ * different hardware needs as new parts are developed. For this architecture,
+ * the Firmware and AdminQ are intended to insulate the driver from most of the
+ * future changes, but these structures will also do part of the job.
+ */
+enum i40e_mac_type {
+ I40E_MAC_UNKNOWN = 0,
+ I40E_MAC_X710,
+ I40E_MAC_XL710,
+ I40E_MAC_VF,
+ I40E_MAC_GENERIC,
+};
+
+enum i40e_media_type {
+ I40E_MEDIA_TYPE_UNKNOWN = 0,
+ I40E_MEDIA_TYPE_FIBER,
+ I40E_MEDIA_TYPE_BASET,
+ I40E_MEDIA_TYPE_BACKPLANE,
+ I40E_MEDIA_TYPE_CX4,
+ I40E_MEDIA_TYPE_DA,
+ I40E_MEDIA_TYPE_VIRTUAL
+};
+
+enum i40e_fc_mode {
+ I40E_FC_NONE = 0,
+ I40E_FC_RX_PAUSE,
+ I40E_FC_TX_PAUSE,
+ I40E_FC_FULL,
+ I40E_FC_PFC,
+ I40E_FC_DEFAULT
+};
+
+enum i40e_set_fc_aq_failures {
+ I40E_SET_FC_AQ_FAIL_NONE = 0,
+ I40E_SET_FC_AQ_FAIL_GET = 1,
+ I40E_SET_FC_AQ_FAIL_SET = 2,
+ I40E_SET_FC_AQ_FAIL_UPDATE = 4,
+ I40E_SET_FC_AQ_FAIL_SET_UPDATE = 6
+};
+
+enum i40e_vsi_type {
+ I40E_VSI_MAIN = 0,
+ I40E_VSI_VMDQ1,
+ I40E_VSI_VMDQ2,
+ I40E_VSI_CTRL,
+ I40E_VSI_FCOE,
+ I40E_VSI_MIRROR,
+ I40E_VSI_SRIOV,
+ I40E_VSI_FDIR,
+ I40E_VSI_TYPE_UNKNOWN
+};
+
+enum i40e_queue_type {
+ I40E_QUEUE_TYPE_RX = 0,
+ I40E_QUEUE_TYPE_TX,
+ I40E_QUEUE_TYPE_PE_CEQ,
+ I40E_QUEUE_TYPE_UNKNOWN
+};
+
+struct i40e_link_status {
+ enum i40e_aq_phy_type phy_type;
+ enum i40e_aq_link_speed link_speed;
+ u8 link_info;
+ u8 an_info;
+ u8 ext_info;
+ u8 loopback;
+ bool an_enabled;
+ /* is Link Status Event notification to SW enabled */
+ bool lse_enable;
+ u16 max_frame_size;
+ bool crc_enable;
+ u8 pacing;
+};
+
+struct i40e_phy_info {
+ struct i40e_link_status link_info;
+ struct i40e_link_status link_info_old;
+ u32 autoneg_advertised;
+ u32 phy_id;
+ u32 module_type;
+ bool get_link_info;
+ enum i40e_media_type media_type;
+};
+
+#define I40E_HW_CAP_MAX_GPIO 30
+#define I40E_HW_CAP_MDIO_PORT_MODE_MDIO 0
+#define I40E_HW_CAP_MDIO_PORT_MODE_I2C 1
+
+/* Capabilities of a PF or a VF or the whole device */
+struct i40e_hw_capabilities {
+ u32 switch_mode;
+#define I40E_NVM_IMAGE_TYPE_EVB 0x0
+#define I40E_NVM_IMAGE_TYPE_CLOUD 0x2
+#define I40E_NVM_IMAGE_TYPE_UDP_CLOUD 0x3
+
+ u32 management_mode;
+ u32 npar_enable;
+ u32 os2bmc;
+ u32 valid_functions;
+ bool sr_iov_1_1;
+ bool vmdq;
+ bool evb_802_1_qbg; /* Edge Virtual Bridging */
+ bool evb_802_1_qbh; /* Bridge Port Extension */
+ bool dcb;
+ bool fcoe;
+ bool mfp_mode_1;
+ bool mgmt_cem;
+ bool ieee_1588;
+ bool iwarp;
+ bool fd;
+ u32 fd_filters_guaranteed;
+ u32 fd_filters_best_effort;
+ bool rss;
+ u32 rss_table_size;
+ u32 rss_table_entry_width;
+ bool led[I40E_HW_CAP_MAX_GPIO];
+ bool sdp[I40E_HW_CAP_MAX_GPIO];
+ u32 nvm_image_type;
+ u32 num_flow_director_filters;
+ u32 num_vfs;
+ u32 vf_base_id;
+ u32 num_vsis;
+ u32 num_rx_qp;
+ u32 num_tx_qp;
+ u32 base_queue;
+ u32 num_msix_vectors;
+ u32 num_msix_vectors_vf;
+ u32 led_pin_num;
+ u32 sdp_pin_num;
+ u32 mdio_port_num;
+ u32 mdio_port_mode;
+ u8 rx_buf_chain_len;
+ u32 enabled_tcmap;
+ u32 maxtc;
+};
+
+struct i40e_mac_info {
+ enum i40e_mac_type type;
+ u8 addr[I40E_ETH_LENGTH_OF_ADDRESS];
+ u8 perm_addr[I40E_ETH_LENGTH_OF_ADDRESS];
+ u8 san_addr[I40E_ETH_LENGTH_OF_ADDRESS];
+ u8 port_addr[I40E_ETH_LENGTH_OF_ADDRESS];
+ u16 max_fcoeq;
+};
+
+enum i40e_aq_resources_ids {
+ I40E_NVM_RESOURCE_ID = 1
+};
+
+enum i40e_aq_resource_access_type {
+ I40E_RESOURCE_READ = 1,
+ I40E_RESOURCE_WRITE
+};
+
+struct i40e_nvm_info {
+ u64 hw_semaphore_timeout; /* 2usec global time (GTIME resolution) */
+ u64 hw_semaphore_wait; /* - || - */
+ u32 timeout; /* [ms] */
+ u16 sr_size; /* Shadow RAM size in words */
+ bool blank_nvm_mode; /* is NVM empty (no FW present)*/
+ u16 version; /* NVM package version */
+ u32 eetrack; /* NVM data version */
+};
+
+/* definitions used in NVM update support */
+
+enum i40e_nvmupd_cmd {
+ I40E_NVMUPD_INVALID,
+ I40E_NVMUPD_READ_CON,
+ I40E_NVMUPD_READ_SNT,
+ I40E_NVMUPD_READ_LCB,
+ I40E_NVMUPD_READ_SA,
+ I40E_NVMUPD_WRITE_ERA,
+ I40E_NVMUPD_WRITE_CON,
+ I40E_NVMUPD_WRITE_SNT,
+ I40E_NVMUPD_WRITE_LCB,
+ I40E_NVMUPD_WRITE_SA,
+ I40E_NVMUPD_CSUM_CON,
+ I40E_NVMUPD_CSUM_SA,
+ I40E_NVMUPD_CSUM_LCB,
+};
+
+enum i40e_nvmupd_state {
+ I40E_NVMUPD_STATE_INIT,
+ I40E_NVMUPD_STATE_READING,
+ I40E_NVMUPD_STATE_WRITING
+};
+
+/* nvm_access definition and its masks/shifts need to be accessible to
+ * application, core driver, and shared code. Where is the right file?
+ */
+#define I40E_NVM_READ 0xB
+#define I40E_NVM_WRITE 0xC
+
+#define I40E_NVM_MOD_PNT_MASK 0xFF
+
+#define I40E_NVM_TRANS_SHIFT 8
+#define I40E_NVM_TRANS_MASK (0xf << I40E_NVM_TRANS_SHIFT)
+#define I40E_NVM_CON 0x0
+#define I40E_NVM_SNT 0x1
+#define I40E_NVM_LCB 0x2
+#define I40E_NVM_SA (I40E_NVM_SNT | I40E_NVM_LCB)
+#define I40E_NVM_ERA 0x4
+#define I40E_NVM_CSUM 0x8
+
+#define I40E_NVM_ADAPT_SHIFT 16
+#define I40E_NVM_ADAPT_MASK (0xffffULL << I40E_NVM_ADAPT_SHIFT)
+
+#define I40E_NVMUPD_MAX_DATA 4096
+#define I40E_NVMUPD_IFACE_TIMEOUT 2 /* seconds */
+
+struct i40e_nvm_access {
+ u32 command;
+ u32 config;
+ u32 offset; /* in bytes */
+ u32 data_size; /* in bytes */
+ u8 data[1];
+};
+
+/* PCI bus types */
+enum i40e_bus_type {
+ i40e_bus_type_unknown = 0,
+ i40e_bus_type_pci,
+ i40e_bus_type_pcix,
+ i40e_bus_type_pci_express,
+ i40e_bus_type_reserved
+};
+
+/* PCI bus speeds */
+enum i40e_bus_speed {
+ i40e_bus_speed_unknown = 0,
+ i40e_bus_speed_33 = 33,
+ i40e_bus_speed_66 = 66,
+ i40e_bus_speed_100 = 100,
+ i40e_bus_speed_120 = 120,
+ i40e_bus_speed_133 = 133,
+ i40e_bus_speed_2500 = 2500,
+ i40e_bus_speed_5000 = 5000,
+ i40e_bus_speed_8000 = 8000,
+ i40e_bus_speed_reserved
+};
+
+/* PCI bus widths */
+enum i40e_bus_width {
+ i40e_bus_width_unknown = 0,
+ i40e_bus_width_pcie_x1 = 1,
+ i40e_bus_width_pcie_x2 = 2,
+ i40e_bus_width_pcie_x4 = 4,
+ i40e_bus_width_pcie_x8 = 8,
+ i40e_bus_width_32 = 32,
+ i40e_bus_width_64 = 64,
+ i40e_bus_width_reserved
+};
+
+/* Bus parameters */
+struct i40e_bus_info {
+ enum i40e_bus_speed speed;
+ enum i40e_bus_width width;
+ enum i40e_bus_type type;
+
+ u16 func;
+ u16 device;
+ u16 lan_id;
+};
+
+/* Flow control (FC) parameters */
+struct i40e_fc_info {
+ enum i40e_fc_mode current_mode; /* FC mode in effect */
+ enum i40e_fc_mode requested_mode; /* FC mode requested by caller */
+};
+
+#define I40E_MAX_TRAFFIC_CLASS 8
+#define I40E_MAX_USER_PRIORITY 8
+#define I40E_DCBX_MAX_APPS 32
+#define I40E_LLDPDU_SIZE 1500
+
+/* IEEE 802.1Qaz ETS Configuration data */
+struct i40e_ieee_ets_config {
+ u8 willing;
+ u8 cbs;
+ u8 maxtcs;
+ u8 prioritytable[I40E_MAX_TRAFFIC_CLASS];
+ u8 tcbwtable[I40E_MAX_TRAFFIC_CLASS];
+ u8 tsatable[I40E_MAX_TRAFFIC_CLASS];
+};
+
+/* IEEE 802.1Qaz ETS Recommendation data */
+struct i40e_ieee_ets_recommend {
+ u8 prioritytable[I40E_MAX_TRAFFIC_CLASS];
+ u8 tcbwtable[I40E_MAX_TRAFFIC_CLASS];
+ u8 tsatable[I40E_MAX_TRAFFIC_CLASS];
+};
+
+/* IEEE 802.1Qaz PFC Configuration data */
+struct i40e_ieee_pfc_config {
+ u8 willing;
+ u8 mbc;
+ u8 pfccap;
+ u8 pfcenable;
+};
+
+/* IEEE 802.1Qaz Application Priority data */
+struct i40e_ieee_app_priority_table {
+ u8 priority;
+ u8 selector;
+ u16 protocolid;
+};
+
+struct i40e_dcbx_config {
+ u32 numapps;
+ struct i40e_ieee_ets_config etscfg;
+ struct i40e_ieee_ets_recommend etsrec;
+ struct i40e_ieee_pfc_config pfc;
+ struct i40e_ieee_app_priority_table app[I40E_DCBX_MAX_APPS];
+};
+
+/* Port hardware description */
+struct i40e_hw {
+ u8 *hw_addr;
+ void *back;
+
+ /* function pointer structs */
+ struct i40e_phy_info phy;
+ struct i40e_mac_info mac;
+ struct i40e_bus_info bus;
+ struct i40e_nvm_info nvm;
+ struct i40e_fc_info fc;
+
+ /* pci info */
+ u16 device_id;
+ u16 vendor_id;
+ u16 subsystem_device_id;
+ u16 subsystem_vendor_id;
+ u8 revision_id;
+ u8 port;
+ bool adapter_stopped;
+
+ /* capabilities for entire device and PCI func */
+ struct i40e_hw_capabilities dev_caps;
+ struct i40e_hw_capabilities func_caps;
+
+ /* Flow Director shared filter space */
+ u16 fdir_shared_filter_count;
+
+ /* device profile info */
+ u8 pf_id;
+ u16 main_vsi_seid;
+
+ /* Closest numa node to the device */
+ u16 numa_node;
+
+ /* Admin Queue info */
+ struct i40e_adminq_info aq;
+
+ /* state of nvm update process */
+ enum i40e_nvmupd_state nvmupd_state;
+
+ /* HMC info */
+ struct i40e_hmc_info hmc; /* HMC info struct */
+
+ /* LLDP/DCBX Status */
+ u16 dcbx_status;
+
+ /* DCBX info */
+ struct i40e_dcbx_config local_dcbx_config;
+ struct i40e_dcbx_config remote_dcbx_config;
+
+ /* debug mask */
+ u32 debug_mask;
+};
+#define i40e_is_vf(_hw) ((_hw)->mac.type == I40E_MAC_VF)
+
+struct i40e_driver_version {
+ u8 major_version;
+ u8 minor_version;
+ u8 build_version;
+ u8 subbuild_version;
+ u8 driver_string[32];
+};
+
+/* RX Descriptors */
+union i40e_16byte_rx_desc {
+ struct {
+ __le64 pkt_addr; /* Packet buffer address */
+ __le64 hdr_addr; /* Header buffer address */
+ } read;
+ struct {
+ struct {
+ struct {
+ union {
+ __le16 mirroring_status;
+ __le16 fcoe_ctx_id;
+ } mirr_fcoe;
+ __le16 l2tag1;
+ } lo_dword;
+ union {
+ __le32 rss; /* RSS Hash */
+ __le32 fd_id; /* Flow director filter id */
+ __le32 fcoe_param; /* FCoE DDP Context id */
+ } hi_dword;
+ } qword0;
+ struct {
+ /* ext status/error/pktype/length */
+ __le64 status_error_len;
+ } qword1;
+ } wb; /* writeback */
+};
+
+union i40e_32byte_rx_desc {
+ struct {
+ __le64 pkt_addr; /* Packet buffer address */
+ __le64 hdr_addr; /* Header buffer address */
+ /* bit 0 of hdr_buffer_addr is DD bit */
+ __le64 rsvd1;
+ __le64 rsvd2;
+ } read;
+ struct {
+ struct {
+ struct {
+ union {
+ __le16 mirroring_status;
+ __le16 fcoe_ctx_id;
+ } mirr_fcoe;
+ __le16 l2tag1;
+ } lo_dword;
+ union {
+ __le32 rss; /* RSS Hash */
+ __le32 fcoe_param; /* FCoE DDP Context id */
+ /* Flow director filter id in case of
+ * Programming status desc WB
+ */
+ __le32 fd_id;
+ } hi_dword;
+ } qword0;
+ struct {
+ /* status/error/pktype/length */
+ __le64 status_error_len;
+ } qword1;
+ struct {
+ __le16 ext_status; /* extended status */
+ __le16 rsvd;
+ __le16 l2tag2_1;
+ __le16 l2tag2_2;
+ } qword2;
+ struct {
+ union {
+ __le32 flex_bytes_lo;
+ __le32 pe_status;
+ } lo_dword;
+ union {
+ __le32 flex_bytes_hi;
+ __le32 fd_id;
+ } hi_dword;
+ } qword3;
+ } wb; /* writeback */
+};
+
+#define I40E_RXD_QW0_MIRROR_STATUS_SHIFT 8
+#define I40E_RXD_QW0_MIRROR_STATUS_MASK (0x3FUL << \
+ I40E_RXD_QW0_MIRROR_STATUS_SHIFT)
+#define I40E_RXD_QW0_FCOEINDX_SHIFT 0
+#define I40E_RXD_QW0_FCOEINDX_MASK (0xFFFUL << \
+ I40E_RXD_QW0_FCOEINDX_SHIFT)
+
+enum i40e_rx_desc_status_bits {
+ /* Note: These are predefined bit offsets */
+ I40E_RX_DESC_STATUS_DD_SHIFT = 0,
+ I40E_RX_DESC_STATUS_EOF_SHIFT = 1,
+ I40E_RX_DESC_STATUS_L2TAG1P_SHIFT = 2,
+ I40E_RX_DESC_STATUS_L3L4P_SHIFT = 3,
+ I40E_RX_DESC_STATUS_CRCP_SHIFT = 4,
+ I40E_RX_DESC_STATUS_TSYNINDX_SHIFT = 5, /* 2 BITS */
+ I40E_RX_DESC_STATUS_TSYNVALID_SHIFT = 7,
+ I40E_RX_DESC_STATUS_PIF_SHIFT = 8,
+ I40E_RX_DESC_STATUS_UMBCAST_SHIFT = 9, /* 2 BITS */
+ I40E_RX_DESC_STATUS_FLM_SHIFT = 11,
+ I40E_RX_DESC_STATUS_FLTSTAT_SHIFT = 12, /* 2 BITS */
+ I40E_RX_DESC_STATUS_LPBK_SHIFT = 14,
+ I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT = 15,
+ I40E_RX_DESC_STATUS_RESERVED_SHIFT = 16, /* 2 BITS */
+ I40E_RX_DESC_STATUS_UDP_0_SHIFT = 18,
+ I40E_RX_DESC_STATUS_LAST /* this entry must be last!!! */
+};
+
+#define I40E_RXD_QW1_STATUS_SHIFT 0
+#define I40E_RXD_QW1_STATUS_MASK (((1 << I40E_RX_DESC_STATUS_LAST) - 1) << \
+ I40E_RXD_QW1_STATUS_SHIFT)
+
+#define I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT I40E_RX_DESC_STATUS_TSYNINDX_SHIFT
+#define I40E_RXD_QW1_STATUS_TSYNINDX_MASK (0x3UL << \
+ I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT)
+
+#define I40E_RXD_QW1_STATUS_TSYNVALID_SHIFT I40E_RX_DESC_STATUS_TSYNVALID_SHIFT
+#define I40E_RXD_QW1_STATUS_TSYNVALID_MASK (0x1UL << \
+ I40E_RXD_QW1_STATUS_TSYNVALID_SHIFT)
+
+#define I40E_RXD_QW1_STATUS_UMBCAST_SHIFT I40E_RX_DESC_STATUS_UMBCAST
+#define I40E_RXD_QW1_STATUS_UMBCAST_MASK (0x3UL << \
+ I40E_RXD_QW1_STATUS_UMBCAST_SHIFT)
+
+enum i40e_rx_desc_fltstat_values {
+ I40E_RX_DESC_FLTSTAT_NO_DATA = 0,
+ I40E_RX_DESC_FLTSTAT_RSV_FD_ID = 1, /* 16byte desc? FD_ID : RSV */
+ I40E_RX_DESC_FLTSTAT_RSV = 2,
+ I40E_RX_DESC_FLTSTAT_RSS_HASH = 3,
+};
+
+#define I40E_RXD_PACKET_TYPE_UNICAST 0
+#define I40E_RXD_PACKET_TYPE_MULTICAST 1
+#define I40E_RXD_PACKET_TYPE_BROADCAST 2
+#define I40E_RXD_PACKET_TYPE_MIRRORED 3
+
+#define I40E_RXD_QW1_ERROR_SHIFT 19
+#define I40E_RXD_QW1_ERROR_MASK (0xFFUL << I40E_RXD_QW1_ERROR_SHIFT)
+
+enum i40e_rx_desc_error_bits {
+ /* Note: These are predefined bit offsets */
+ I40E_RX_DESC_ERROR_RXE_SHIFT = 0,
+ I40E_RX_DESC_ERROR_RECIPE_SHIFT = 1,
+ I40E_RX_DESC_ERROR_HBO_SHIFT = 2,
+ I40E_RX_DESC_ERROR_L3L4E_SHIFT = 3, /* 3 BITS */
+ I40E_RX_DESC_ERROR_IPE_SHIFT = 3,
+ I40E_RX_DESC_ERROR_L4E_SHIFT = 4,
+ I40E_RX_DESC_ERROR_EIPE_SHIFT = 5,
+ I40E_RX_DESC_ERROR_OVERSIZE_SHIFT = 6,
+ I40E_RX_DESC_ERROR_PPRS_SHIFT = 7
+};
+
+enum i40e_rx_desc_error_l3l4e_fcoe_masks {
+ I40E_RX_DESC_ERROR_L3L4E_NONE = 0,
+ I40E_RX_DESC_ERROR_L3L4E_PROT = 1,
+ I40E_RX_DESC_ERROR_L3L4E_FC = 2,
+ I40E_RX_DESC_ERROR_L3L4E_DMAC_ERR = 3,
+ I40E_RX_DESC_ERROR_L3L4E_DMAC_WARN = 4
+};
+
+#define I40E_RXD_QW1_PTYPE_SHIFT 30
+#define I40E_RXD_QW1_PTYPE_MASK (0xFFULL << I40E_RXD_QW1_PTYPE_SHIFT)
+
+/* Packet type non-ip values */
+enum i40e_rx_l2_ptype {
+ I40E_RX_PTYPE_L2_RESERVED = 0,
+ I40E_RX_PTYPE_L2_MAC_PAY2 = 1,
+ I40E_RX_PTYPE_L2_TIMESYNC_PAY2 = 2,
+ I40E_RX_PTYPE_L2_FIP_PAY2 = 3,
+ I40E_RX_PTYPE_L2_OUI_PAY2 = 4,
+ I40E_RX_PTYPE_L2_MACCNTRL_PAY2 = 5,
+ I40E_RX_PTYPE_L2_LLDP_PAY2 = 6,
+ I40E_RX_PTYPE_L2_ECP_PAY2 = 7,
+ I40E_RX_PTYPE_L2_EVB_PAY2 = 8,
+ I40E_RX_PTYPE_L2_QCN_PAY2 = 9,
+ I40E_RX_PTYPE_L2_EAPOL_PAY2 = 10,
+ I40E_RX_PTYPE_L2_ARP = 11,
+ I40E_RX_PTYPE_L2_FCOE_PAY3 = 12,
+ I40E_RX_PTYPE_L2_FCOE_FCDATA_PAY3 = 13,
+ I40E_RX_PTYPE_L2_FCOE_FCRDY_PAY3 = 14,
+ I40E_RX_PTYPE_L2_FCOE_FCRSP_PAY3 = 15,
+ I40E_RX_PTYPE_L2_FCOE_FCOTHER_PA = 16,
+ I40E_RX_PTYPE_L2_FCOE_VFT_PAY3 = 17,
+ I40E_RX_PTYPE_L2_FCOE_VFT_FCDATA = 18,
+ I40E_RX_PTYPE_L2_FCOE_VFT_FCRDY = 19,
+ I40E_RX_PTYPE_L2_FCOE_VFT_FCRSP = 20,
+ I40E_RX_PTYPE_L2_FCOE_VFT_FCOTHER = 21,
+ I40E_RX_PTYPE_GRENAT4_MAC_PAY3 = 58,
+ I40E_RX_PTYPE_GRENAT4_MACVLAN_IPV6_ICMP_PAY4 = 87,
+ I40E_RX_PTYPE_GRENAT6_MAC_PAY3 = 124,
+ I40E_RX_PTYPE_GRENAT6_MACVLAN_IPV6_ICMP_PAY4 = 153
+};
+
+struct i40e_rx_ptype_decoded {
+ u32 ptype:8;
+ u32 known:1;
+ u32 outer_ip:1;
+ u32 outer_ip_ver:1;
+ u32 outer_frag:1;
+ u32 tunnel_type:3;
+ u32 tunnel_end_prot:2;
+ u32 tunnel_end_frag:1;
+ u32 inner_prot:4;
+ u32 payload_layer:3;
+};
+
+enum i40e_rx_ptype_outer_ip {
+ I40E_RX_PTYPE_OUTER_L2 = 0,
+ I40E_RX_PTYPE_OUTER_IP = 1
+};
+
+enum i40e_rx_ptype_outer_ip_ver {
+ I40E_RX_PTYPE_OUTER_NONE = 0,
+ I40E_RX_PTYPE_OUTER_IPV4 = 0,
+ I40E_RX_PTYPE_OUTER_IPV6 = 1
+};
+
+enum i40e_rx_ptype_outer_fragmented {
+ I40E_RX_PTYPE_NOT_FRAG = 0,
+ I40E_RX_PTYPE_FRAG = 1
+};
+
+enum i40e_rx_ptype_tunnel_type {
+ I40E_RX_PTYPE_TUNNEL_NONE = 0,
+ I40E_RX_PTYPE_TUNNEL_IP_IP = 1,
+ I40E_RX_PTYPE_TUNNEL_IP_GRENAT = 2,
+ I40E_RX_PTYPE_TUNNEL_IP_GRENAT_MAC = 3,
+ I40E_RX_PTYPE_TUNNEL_IP_GRENAT_MAC_VLAN = 4,
+};
+
+enum i40e_rx_ptype_tunnel_end_prot {
+ I40E_RX_PTYPE_TUNNEL_END_NONE = 0,
+ I40E_RX_PTYPE_TUNNEL_END_IPV4 = 1,
+ I40E_RX_PTYPE_TUNNEL_END_IPV6 = 2,
+};
+
+enum i40e_rx_ptype_inner_prot {
+ I40E_RX_PTYPE_INNER_PROT_NONE = 0,
+ I40E_RX_PTYPE_INNER_PROT_UDP = 1,
+ I40E_RX_PTYPE_INNER_PROT_TCP = 2,
+ I40E_RX_PTYPE_INNER_PROT_SCTP = 3,
+ I40E_RX_PTYPE_INNER_PROT_ICMP = 4,
+ I40E_RX_PTYPE_INNER_PROT_TIMESYNC = 5
+};
+
+enum i40e_rx_ptype_payload_layer {
+ I40E_RX_PTYPE_PAYLOAD_LAYER_NONE = 0,
+ I40E_RX_PTYPE_PAYLOAD_LAYER_PAY2 = 1,
+ I40E_RX_PTYPE_PAYLOAD_LAYER_PAY3 = 2,
+ I40E_RX_PTYPE_PAYLOAD_LAYER_PAY4 = 3,
+};
+
+#define I40E_RX_PTYPE_BIT_MASK 0x0FFFFFFF
+#define I40E_RX_PTYPE_SHIFT 56
+
+#define I40E_RXD_QW1_LENGTH_PBUF_SHIFT 38
+#define I40E_RXD_QW1_LENGTH_PBUF_MASK (0x3FFFULL << \
+ I40E_RXD_QW1_LENGTH_PBUF_SHIFT)
+
+#define I40E_RXD_QW1_LENGTH_HBUF_SHIFT 52
+#define I40E_RXD_QW1_LENGTH_HBUF_MASK (0x7FFULL << \
+ I40E_RXD_QW1_LENGTH_HBUF_SHIFT)
+
+#define I40E_RXD_QW1_LENGTH_SPH_SHIFT 63
+#define I40E_RXD_QW1_LENGTH_SPH_MASK (0x1ULL << \
+ I40E_RXD_QW1_LENGTH_SPH_SHIFT)
+
+#define I40E_RXD_QW1_NEXTP_SHIFT 38
+#define I40E_RXD_QW1_NEXTP_MASK (0x1FFFULL << I40E_RXD_QW1_NEXTP_SHIFT)
+
+#define I40E_RXD_QW2_EXT_STATUS_SHIFT 0
+#define I40E_RXD_QW2_EXT_STATUS_MASK (0xFFFFFUL << \
+ I40E_RXD_QW2_EXT_STATUS_SHIFT)
+
+enum i40e_rx_desc_ext_status_bits {
+ /* Note: These are predefined bit offsets */
+ I40E_RX_DESC_EXT_STATUS_L2TAG2P_SHIFT = 0,
+ I40E_RX_DESC_EXT_STATUS_L2TAG3P_SHIFT = 1,
+ I40E_RX_DESC_EXT_STATUS_FLEXBL_SHIFT = 2, /* 2 BITS */
+ I40E_RX_DESC_EXT_STATUS_FLEXBH_SHIFT = 4, /* 2 BITS */
+ I40E_RX_DESC_EXT_STATUS_FDLONGB_SHIFT = 9,
+ I40E_RX_DESC_EXT_STATUS_FCOELONGB_SHIFT = 10,
+ I40E_RX_DESC_EXT_STATUS_PELONGB_SHIFT = 11,
+};
+
+#define I40E_RXD_QW2_L2TAG2_SHIFT 0
+#define I40E_RXD_QW2_L2TAG2_MASK (0xFFFFUL << I40E_RXD_QW2_L2TAG2_SHIFT)
+
+#define I40E_RXD_QW2_L2TAG3_SHIFT 16
+#define I40E_RXD_QW2_L2TAG3_MASK (0xFFFFUL << I40E_RXD_QW2_L2TAG3_SHIFT)
+
+enum i40e_rx_desc_pe_status_bits {
+ /* Note: These are predefined bit offsets */
+ I40E_RX_DESC_PE_STATUS_QPID_SHIFT = 0, /* 18 BITS */
+ I40E_RX_DESC_PE_STATUS_L4PORT_SHIFT = 0, /* 16 BITS */
+ I40E_RX_DESC_PE_STATUS_IPINDEX_SHIFT = 16, /* 8 BITS */
+ I40E_RX_DESC_PE_STATUS_QPIDHIT_SHIFT = 24,
+ I40E_RX_DESC_PE_STATUS_APBVTHIT_SHIFT = 25,
+ I40E_RX_DESC_PE_STATUS_PORTV_SHIFT = 26,
+ I40E_RX_DESC_PE_STATUS_URG_SHIFT = 27,
+ I40E_RX_DESC_PE_STATUS_IPFRAG_SHIFT = 28,
+ I40E_RX_DESC_PE_STATUS_IPOPT_SHIFT = 29
+};
+
+#define I40E_RX_PROG_STATUS_DESC_LENGTH_SHIFT 38
+#define I40E_RX_PROG_STATUS_DESC_LENGTH 0x2000000
+
+#define I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT 2
+#define I40E_RX_PROG_STATUS_DESC_QW1_PROGID_MASK (0x7UL << \
+ I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT)
+
+#define I40E_RX_PROG_STATUS_DESC_QW1_STATUS_SHIFT 0
+#define I40E_RX_PROG_STATUS_DESC_QW1_STATUS_MASK (0x7FFFUL << \
+ I40E_RX_PROG_STATUS_DESC_QW1_STATUS_SHIFT)
+
+#define I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT 19
+#define I40E_RX_PROG_STATUS_DESC_QW1_ERROR_MASK (0x3FUL << \
+ I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT)
+
+enum i40e_rx_prog_status_desc_status_bits {
+ /* Note: These are predefined bit offsets */
+ I40E_RX_PROG_STATUS_DESC_DD_SHIFT = 0,
+ I40E_RX_PROG_STATUS_DESC_PROG_ID_SHIFT = 2 /* 3 BITS */
+};
+
+enum i40e_rx_prog_status_desc_prog_id_masks {
+ I40E_RX_PROG_STATUS_DESC_FD_FILTER_STATUS = 1,
+ I40E_RX_PROG_STATUS_DESC_FCOE_CTXT_PROG_STATUS = 2,
+ I40E_RX_PROG_STATUS_DESC_FCOE_CTXT_INVL_STATUS = 4,
+};
+
+enum i40e_rx_prog_status_desc_error_bits {
+ /* Note: These are predefined bit offsets */
+ I40E_RX_PROG_STATUS_DESC_FD_TBL_FULL_SHIFT = 0,
+ I40E_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT = 1,
+ I40E_RX_PROG_STATUS_DESC_FCOE_TBL_FULL_SHIFT = 2,
+ I40E_RX_PROG_STATUS_DESC_FCOE_CONFLICT_SHIFT = 3
+};
+
+#define I40E_TWO_BIT_MASK 0x3
+#define I40E_THREE_BIT_MASK 0x7
+#define I40E_FOUR_BIT_MASK 0xF
+#define I40E_EIGHTEEN_BIT_MASK 0x3FFFF
+
+/* TX Descriptor */
+struct i40e_tx_desc {
+ __le64 buffer_addr; /* Address of descriptor's data buf */
+ __le64 cmd_type_offset_bsz;
+};
+
+#define I40E_TXD_QW1_DTYPE_SHIFT 0
+#define I40E_TXD_QW1_DTYPE_MASK (0xFUL << I40E_TXD_QW1_DTYPE_SHIFT)
+
+enum i40e_tx_desc_dtype_value {
+ I40E_TX_DESC_DTYPE_DATA = 0x0,
+ I40E_TX_DESC_DTYPE_NOP = 0x1, /* same as Context desc */
+ I40E_TX_DESC_DTYPE_CONTEXT = 0x1,
+ I40E_TX_DESC_DTYPE_FCOE_CTX = 0x2,
+ I40E_TX_DESC_DTYPE_FILTER_PROG = 0x8,
+ I40E_TX_DESC_DTYPE_DDP_CTX = 0x9,
+ I40E_TX_DESC_DTYPE_FLEX_DATA = 0xB,
+ I40E_TX_DESC_DTYPE_FLEX_CTX_1 = 0xC,
+ I40E_TX_DESC_DTYPE_FLEX_CTX_2 = 0xD,
+ I40E_TX_DESC_DTYPE_DESC_DONE = 0xF
+};
+
+#define I40E_TXD_QW1_CMD_SHIFT 4
+#define I40E_TXD_QW1_CMD_MASK (0x3FFUL << I40E_TXD_QW1_CMD_SHIFT)
+
+enum i40e_tx_desc_cmd_bits {
+ I40E_TX_DESC_CMD_EOP = 0x0001,
+ I40E_TX_DESC_CMD_RS = 0x0002,
+ I40E_TX_DESC_CMD_ICRC = 0x0004,
+ I40E_TX_DESC_CMD_IL2TAG1 = 0x0008,
+ I40E_TX_DESC_CMD_DUMMY = 0x0010,
+ I40E_TX_DESC_CMD_IIPT_NONIP = 0x0000, /* 2 BITS */
+ I40E_TX_DESC_CMD_IIPT_IPV6 = 0x0020, /* 2 BITS */
+ I40E_TX_DESC_CMD_IIPT_IPV4 = 0x0040, /* 2 BITS */
+ I40E_TX_DESC_CMD_IIPT_IPV4_CSUM = 0x0060, /* 2 BITS */
+ I40E_TX_DESC_CMD_FCOET = 0x0080,
+ I40E_TX_DESC_CMD_L4T_EOFT_UNK = 0x0000, /* 2 BITS */
+ I40E_TX_DESC_CMD_L4T_EOFT_TCP = 0x0100, /* 2 BITS */
+ I40E_TX_DESC_CMD_L4T_EOFT_SCTP = 0x0200, /* 2 BITS */
+ I40E_TX_DESC_CMD_L4T_EOFT_UDP = 0x0300, /* 2 BITS */
+ I40E_TX_DESC_CMD_L4T_EOFT_EOF_N = 0x0000, /* 2 BITS */
+ I40E_TX_DESC_CMD_L4T_EOFT_EOF_T = 0x0100, /* 2 BITS */
+ I40E_TX_DESC_CMD_L4T_EOFT_EOF_NI = 0x0200, /* 2 BITS */
+ I40E_TX_DESC_CMD_L4T_EOFT_EOF_A = 0x0300, /* 2 BITS */
+};
+
+#define I40E_TXD_QW1_OFFSET_SHIFT 16
+#define I40E_TXD_QW1_OFFSET_MASK (0x3FFFFULL << \
+ I40E_TXD_QW1_OFFSET_SHIFT)
+
+enum i40e_tx_desc_length_fields {
+ /* Note: These are predefined bit offsets */
+ I40E_TX_DESC_LENGTH_MACLEN_SHIFT = 0, /* 7 BITS */
+ I40E_TX_DESC_LENGTH_IPLEN_SHIFT = 7, /* 7 BITS */
+ I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT = 14 /* 4 BITS */
+};
+
+#define I40E_TXD_QW1_MACLEN_MASK (0x7FUL << I40E_TX_DESC_LENGTH_MACLEN_SHIFT)
+#define I40E_TXD_QW1_IPLEN_MASK (0x7FUL << I40E_TX_DESC_LENGTH_IPLEN_SHIFT)
+#define I40E_TXD_QW1_L4LEN_MASK (0xFUL << I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT)
+#define I40E_TXD_QW1_FCLEN_MASK (0xFUL << I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT)
+
+#define I40E_TXD_QW1_TX_BUF_SZ_SHIFT 34
+#define I40E_TXD_QW1_TX_BUF_SZ_MASK (0x3FFFULL << \
+ I40E_TXD_QW1_TX_BUF_SZ_SHIFT)
+
+#define I40E_TXD_QW1_L2TAG1_SHIFT 48
+#define I40E_TXD_QW1_L2TAG1_MASK (0xFFFFULL << I40E_TXD_QW1_L2TAG1_SHIFT)
+
+/* Context descriptors */
+struct i40e_tx_context_desc {
+ __le32 tunneling_params;
+ __le16 l2tag2;
+ __le16 rsvd;
+ __le64 type_cmd_tso_mss;
+};
+
+#define I40E_TXD_CTX_QW1_DTYPE_SHIFT 0
+#define I40E_TXD_CTX_QW1_DTYPE_MASK (0xFUL << I40E_TXD_CTX_QW1_DTYPE_SHIFT)
+
+#define I40E_TXD_CTX_QW1_CMD_SHIFT 4
+#define I40E_TXD_CTX_QW1_CMD_MASK (0xFFFFUL << I40E_TXD_CTX_QW1_CMD_SHIFT)
+
+enum i40e_tx_ctx_desc_cmd_bits {
+ I40E_TX_CTX_DESC_TSO = 0x01,
+ I40E_TX_CTX_DESC_TSYN = 0x02,
+ I40E_TX_CTX_DESC_IL2TAG2 = 0x04,
+ I40E_TX_CTX_DESC_IL2TAG2_IL2H = 0x08,
+ I40E_TX_CTX_DESC_SWTCH_NOTAG = 0x00,
+ I40E_TX_CTX_DESC_SWTCH_UPLINK = 0x10,
+ I40E_TX_CTX_DESC_SWTCH_LOCAL = 0x20,
+ I40E_TX_CTX_DESC_SWTCH_VSI = 0x30,
+ I40E_TX_CTX_DESC_SWPE = 0x40
+};
+
+#define I40E_TXD_CTX_QW1_TSO_LEN_SHIFT 30
+#define I40E_TXD_CTX_QW1_TSO_LEN_MASK (0x3FFFFULL << \
+ I40E_TXD_CTX_QW1_TSO_LEN_SHIFT)
+
+#define I40E_TXD_CTX_QW1_MSS_SHIFT 50
+#define I40E_TXD_CTX_QW1_MSS_MASK (0x3FFFULL << \
+ I40E_TXD_CTX_QW1_MSS_SHIFT)
+
+#define I40E_TXD_CTX_QW1_VSI_SHIFT 50
+#define I40E_TXD_CTX_QW1_VSI_MASK (0x1FFULL << I40E_TXD_CTX_QW1_VSI_SHIFT)
+
+#define I40E_TXD_CTX_QW0_EXT_IP_SHIFT 0
+#define I40E_TXD_CTX_QW0_EXT_IP_MASK (0x3ULL << \
+ I40E_TXD_CTX_QW0_EXT_IP_SHIFT)
+
+enum i40e_tx_ctx_desc_eipt_offload {
+ I40E_TX_CTX_EXT_IP_NONE = 0x0,
+ I40E_TX_CTX_EXT_IP_IPV6 = 0x1,
+ I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM = 0x2,
+ I40E_TX_CTX_EXT_IP_IPV4 = 0x3
+};
+
+#define I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT 2
+#define I40E_TXD_CTX_QW0_EXT_IPLEN_MASK (0x3FULL << \
+ I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT)
+
+#define I40E_TXD_CTX_QW0_NATT_SHIFT 9
+#define I40E_TXD_CTX_QW0_NATT_MASK (0x3ULL << I40E_TXD_CTX_QW0_NATT_SHIFT)
+
+#define I40E_TXD_CTX_UDP_TUNNELING (0x1ULL << I40E_TXD_CTX_QW0_NATT_SHIFT)
+#define I40E_TXD_CTX_GRE_TUNNELING (0x2ULL << I40E_TXD_CTX_QW0_NATT_SHIFT)
+
+#define I40E_TXD_CTX_QW0_EIP_NOINC_SHIFT 11
+#define I40E_TXD_CTX_QW0_EIP_NOINC_MASK (0x1ULL << \
+ I40E_TXD_CTX_QW0_EIP_NOINC_SHIFT)
+
+#define I40E_TXD_CTX_EIP_NOINC_IPID_CONST I40E_TXD_CTX_QW0_EIP_NOINC_MASK
+
+#define I40E_TXD_CTX_QW0_NATLEN_SHIFT 12
+#define I40E_TXD_CTX_QW0_NATLEN_MASK (0X7FULL << \
+ I40E_TXD_CTX_QW0_NATLEN_SHIFT)
+
+#define I40E_TXD_CTX_QW0_DECTTL_SHIFT 19
+#define I40E_TXD_CTX_QW0_DECTTL_MASK (0xFULL << \
+ I40E_TXD_CTX_QW0_DECTTL_SHIFT)
+
+struct i40e_nop_desc {
+ __le64 rsvd;
+ __le64 dtype_cmd;
+};
+
+#define I40E_TXD_NOP_QW1_DTYPE_SHIFT 0
+#define I40E_TXD_NOP_QW1_DTYPE_MASK (0xFUL << I40E_TXD_NOP_QW1_DTYPE_SHIFT)
+
+#define I40E_TXD_NOP_QW1_CMD_SHIFT 4
+#define I40E_TXD_NOP_QW1_CMD_MASK (0x7FUL << I40E_TXD_NOP_QW1_CMD_SHIFT)
+
+enum i40e_tx_nop_desc_cmd_bits {
+ /* Note: These are predefined bit offsets */
+ I40E_TX_NOP_DESC_EOP_SHIFT = 0,
+ I40E_TX_NOP_DESC_RS_SHIFT = 1,
+ I40E_TX_NOP_DESC_RSV_SHIFT = 2 /* 5 bits */
+};
+
+struct i40e_filter_program_desc {
+ __le32 qindex_flex_ptype_vsi;
+ __le32 rsvd;
+ __le32 dtype_cmd_cntindex;
+ __le32 fd_id;
+};
+#define I40E_TXD_FLTR_QW0_QINDEX_SHIFT 0
+#define I40E_TXD_FLTR_QW0_QINDEX_MASK (0x7FFUL << \
+ I40E_TXD_FLTR_QW0_QINDEX_SHIFT)
+#define I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT 11
+#define I40E_TXD_FLTR_QW0_FLEXOFF_MASK (0x7UL << \
+ I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT)
+#define I40E_TXD_FLTR_QW0_PCTYPE_SHIFT 17
+#define I40E_TXD_FLTR_QW0_PCTYPE_MASK (0x3FUL << \
+ I40E_TXD_FLTR_QW0_PCTYPE_SHIFT)
+
+/* Packet Classifier Types for filters */
+enum i40e_filter_pctype {
+ /* Note: Values 0-30 are reserved for future use */
+ I40E_FILTER_PCTYPE_NONF_IPV4_UDP = 31,
+ /* Note: Value 32 is reserved for future use */
+ I40E_FILTER_PCTYPE_NONF_IPV4_TCP = 33,
+ I40E_FILTER_PCTYPE_NONF_IPV4_SCTP = 34,
+ I40E_FILTER_PCTYPE_NONF_IPV4_OTHER = 35,
+ I40E_FILTER_PCTYPE_FRAG_IPV4 = 36,
+ /* Note: Values 37-40 are reserved for future use */
+ I40E_FILTER_PCTYPE_NONF_IPV6_UDP = 41,
+ I40E_FILTER_PCTYPE_NONF_IPV6_TCP = 43,
+ I40E_FILTER_PCTYPE_NONF_IPV6_SCTP = 44,
+ I40E_FILTER_PCTYPE_NONF_IPV6_OTHER = 45,
+ I40E_FILTER_PCTYPE_FRAG_IPV6 = 46,
+ /* Note: Value 47 is reserved for future use */
+ I40E_FILTER_PCTYPE_FCOE_OX = 48,
+ I40E_FILTER_PCTYPE_FCOE_RX = 49,
+ I40E_FILTER_PCTYPE_FCOE_OTHER = 50,
+ /* Note: Values 51-62 are reserved for future use */
+ I40E_FILTER_PCTYPE_L2_PAYLOAD = 63,
+};
+
+enum i40e_filter_program_desc_dest {
+ I40E_FILTER_PROGRAM_DESC_DEST_DROP_PACKET = 0x0,
+ I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX = 0x1,
+ I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_OTHER = 0x2,
+};
+
+enum i40e_filter_program_desc_fd_status {
+ I40E_FILTER_PROGRAM_DESC_FD_STATUS_NONE = 0x0,
+ I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID = 0x1,
+ I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID_4FLEX_BYTES = 0x2,
+ I40E_FILTER_PROGRAM_DESC_FD_STATUS_8FLEX_BYTES = 0x3,
+};
+
+#define I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT 23
+#define I40E_TXD_FLTR_QW0_DEST_VSI_MASK (0x1FFUL << \
+ I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT)
+
+#define I40E_TXD_FLTR_QW1_DTYPE_SHIFT 0
+#define I40E_TXD_FLTR_QW1_DTYPE_MASK (0xFUL << I40E_TXD_FLTR_QW1_DTYPE_SHIFT)
+
+#define I40E_TXD_FLTR_QW1_CMD_SHIFT 4
+#define I40E_TXD_FLTR_QW1_CMD_MASK (0xFFFFULL << \
+ I40E_TXD_FLTR_QW1_CMD_SHIFT)
+
+#define I40E_TXD_FLTR_QW1_PCMD_SHIFT (0x0ULL + I40E_TXD_FLTR_QW1_CMD_SHIFT)
+#define I40E_TXD_FLTR_QW1_PCMD_MASK (0x7ULL << I40E_TXD_FLTR_QW1_PCMD_SHIFT)
+
+enum i40e_filter_program_desc_pcmd {
+ I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE = 0x1,
+ I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE = 0x2,
+};
+
+#define I40E_TXD_FLTR_QW1_DEST_SHIFT (0x3ULL + I40E_TXD_FLTR_QW1_CMD_SHIFT)
+#define I40E_TXD_FLTR_QW1_DEST_MASK (0x3ULL << I40E_TXD_FLTR_QW1_DEST_SHIFT)
+
+#define I40E_TXD_FLTR_QW1_CNT_ENA_SHIFT (0x7ULL + I40E_TXD_FLTR_QW1_CMD_SHIFT)
+#define I40E_TXD_FLTR_QW1_CNT_ENA_MASK (0x1ULL << \
+ I40E_TXD_FLTR_QW1_CNT_ENA_SHIFT)
+
+#define I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT (0x9ULL + \
+ I40E_TXD_FLTR_QW1_CMD_SHIFT)
+#define I40E_TXD_FLTR_QW1_FD_STATUS_MASK (0x3ULL << \
+ I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT)
+
+#define I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT 20
+#define I40E_TXD_FLTR_QW1_CNTINDEX_MASK (0x1FFUL << \
+ I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT)
+
+enum i40e_filter_type {
+ I40E_FLOW_DIRECTOR_FLTR = 0,
+ I40E_PE_QUAD_HASH_FLTR = 1,
+ I40E_ETHERTYPE_FLTR,
+ I40E_FCOE_CTX_FLTR,
+ I40E_MAC_VLAN_FLTR,
+ I40E_HASH_FLTR
+};
+
+struct i40e_vsi_context {
+ u16 seid;
+ u16 uplink_seid;
+ u16 vsi_number;
+ u16 vsis_allocated;
+ u16 vsis_unallocated;
+ u16 flags;
+ u8 pf_num;
+ u8 vf_num;
+ u8 connection_type;
+ struct i40e_aqc_vsi_properties_data info;
+};
+
+struct i40e_veb_context {
+ u16 seid;
+ u16 uplink_seid;
+ u16 veb_number;
+ u16 vebs_allocated;
+ u16 vebs_unallocated;
+ u16 flags;
+ struct i40e_aqc_get_veb_parameters_completion info;
+};
+
+/* Statistics collected by each port, VSI, VEB, and S-channel */
+struct i40e_eth_stats {
+ u64 rx_bytes; /* gorc */
+ u64 rx_unicast; /* uprc */
+ u64 rx_multicast; /* mprc */
+ u64 rx_broadcast; /* bprc */
+ u64 rx_discards; /* rdpc */
+ u64 rx_unknown_protocol; /* rupp */
+ u64 tx_bytes; /* gotc */
+ u64 tx_unicast; /* uptc */
+ u64 tx_multicast; /* mptc */
+ u64 tx_broadcast; /* bptc */
+ u64 tx_discards; /* tdpc */
+ u64 tx_errors; /* tepc */
+};
+
+/* Statistics collected by the MAC */
+struct i40e_hw_port_stats {
+ /* eth stats collected by the port */
+ struct i40e_eth_stats eth;
+
+ /* additional port specific stats */
+ u64 tx_dropped_link_down; /* tdold */
+ u64 crc_errors; /* crcerrs */
+ u64 illegal_bytes; /* illerrc */
+ u64 error_bytes; /* errbc */
+ u64 mac_local_faults; /* mlfc */
+ u64 mac_remote_faults; /* mrfc */
+ u64 rx_length_errors; /* rlec */
+ u64 link_xon_rx; /* lxonrxc */
+ u64 link_xoff_rx; /* lxoffrxc */
+ u64 priority_xon_rx[8]; /* pxonrxc[8] */
+ u64 priority_xoff_rx[8]; /* pxoffrxc[8] */
+ u64 link_xon_tx; /* lxontxc */
+ u64 link_xoff_tx; /* lxofftxc */
+ u64 priority_xon_tx[8]; /* pxontxc[8] */
+ u64 priority_xoff_tx[8]; /* pxofftxc[8] */
+ u64 priority_xon_2_xoff[8]; /* pxon2offc[8] */
+ u64 rx_size_64; /* prc64 */
+ u64 rx_size_127; /* prc127 */
+ u64 rx_size_255; /* prc255 */
+ u64 rx_size_511; /* prc511 */
+ u64 rx_size_1023; /* prc1023 */
+ u64 rx_size_1522; /* prc1522 */
+ u64 rx_size_big; /* prc9522 */
+ u64 rx_undersize; /* ruc */
+ u64 rx_fragments; /* rfc */
+ u64 rx_oversize; /* roc */
+ u64 rx_jabber; /* rjc */
+ u64 tx_size_64; /* ptc64 */
+ u64 tx_size_127; /* ptc127 */
+ u64 tx_size_255; /* ptc255 */
+ u64 tx_size_511; /* ptc511 */
+ u64 tx_size_1023; /* ptc1023 */
+ u64 tx_size_1522; /* ptc1522 */
+ u64 tx_size_big; /* ptc9522 */
+ u64 mac_short_packet_dropped; /* mspdc */
+ u64 checksum_error; /* xec */
+ /* flow director stats */
+ u64 fd_atr_match;
+ u64 fd_sb_match;
+ /* EEE LPI */
+ u32 tx_lpi_status;
+ u32 rx_lpi_status;
+ u64 tx_lpi_count; /* etlpic */
+ u64 rx_lpi_count; /* erlpic */
+};
+
+/* Checksum and Shadow RAM pointers */
+#define I40E_SR_NVM_CONTROL_WORD 0x00
+#define I40E_SR_PCIE_ANALOG_CONFIG_PTR 0x03
+#define I40E_SR_PHY_ANALOG_CONFIG_PTR 0x04
+#define I40E_SR_OPTION_ROM_PTR 0x05
+#define I40E_SR_RO_PCIR_REGS_AUTO_LOAD_PTR 0x06
+#define I40E_SR_AUTO_GENERATED_POINTERS_PTR 0x07
+#define I40E_SR_PCIR_REGS_AUTO_LOAD_PTR 0x08
+#define I40E_SR_EMP_GLOBAL_MODULE_PTR 0x09
+#define I40E_SR_RO_PCIE_LCB_PTR 0x0A
+#define I40E_SR_EMP_IMAGE_PTR 0x0B
+#define I40E_SR_PE_IMAGE_PTR 0x0C
+#define I40E_SR_CSR_PROTECTED_LIST_PTR 0x0D
+#define I40E_SR_MNG_CONFIG_PTR 0x0E
+#define I40E_SR_EMP_MODULE_PTR 0x0F
+#define I40E_SR_PBA_BLOCK_PTR 0x16
+#define I40E_SR_BOOT_CONFIG_PTR 0x17
+#define I40E_SR_NVM_IMAGE_VERSION 0x18
+#define I40E_SR_NVM_WAKE_ON_LAN 0x19
+#define I40E_SR_ALTERNATE_SAN_MAC_ADDRESS_PTR 0x27
+#define I40E_SR_PERMANENT_SAN_MAC_ADDRESS_PTR 0x28
+#define I40E_SR_NVM_EETRACK_LO 0x2D
+#define I40E_SR_NVM_EETRACK_HI 0x2E
+#define I40E_SR_VPD_PTR 0x2F
+#define I40E_SR_PXE_SETUP_PTR 0x30
+#define I40E_SR_PXE_CONFIG_CUST_OPTIONS_PTR 0x31
+#define I40E_SR_SW_ETHERNET_MAC_ADDRESS_PTR 0x37
+#define I40E_SR_POR_REGS_AUTO_LOAD_PTR 0x38
+#define I40E_SR_EMPR_REGS_AUTO_LOAD_PTR 0x3A
+#define I40E_SR_GLOBR_REGS_AUTO_LOAD_PTR 0x3B
+#define I40E_SR_CORER_REGS_AUTO_LOAD_PTR 0x3C
+#define I40E_SR_PCIE_ALT_AUTO_LOAD_PTR 0x3E
+#define I40E_SR_SW_CHECKSUM_WORD 0x3F
+#define I40E_SR_1ST_FREE_PROVISION_AREA_PTR 0x40
+#define I40E_SR_4TH_FREE_PROVISION_AREA_PTR 0x42
+#define I40E_SR_3RD_FREE_PROVISION_AREA_PTR 0x44
+#define I40E_SR_2ND_FREE_PROVISION_AREA_PTR 0x46
+#define I40E_SR_EMP_SR_SETTINGS_PTR 0x48
+
+/* Auxiliary field, mask and shift definition for Shadow RAM and NVM Flash */
+#define I40E_SR_VPD_MODULE_MAX_SIZE 1024
+#define I40E_SR_PCIE_ALT_MODULE_MAX_SIZE 1024
+#define I40E_SR_CONTROL_WORD_1_SHIFT 0x06
+#define I40E_SR_CONTROL_WORD_1_MASK (0x03 << I40E_SR_CONTROL_WORD_1_SHIFT)
+
+/* Shadow RAM related */
+#define I40E_SR_SECTOR_SIZE_IN_WORDS 0x800
+#define I40E_SR_BUF_ALIGNMENT 4096
+#define I40E_SR_WORDS_IN_1KB 512
+/* Checksum should be calculated such that after adding all the words,
+ * including the checksum word itself, the sum should be 0xBABA.
+ */
+#define I40E_SR_SW_CHECKSUM_BASE 0xBABA
+
+#define I40E_SRRD_SRCTL_ATTEMPTS 100000
+
+enum i40e_switch_element_types {
+ I40E_SWITCH_ELEMENT_TYPE_MAC = 1,
+ I40E_SWITCH_ELEMENT_TYPE_PF = 2,
+ I40E_SWITCH_ELEMENT_TYPE_VF = 3,
+ I40E_SWITCH_ELEMENT_TYPE_EMP = 4,
+ I40E_SWITCH_ELEMENT_TYPE_BMC = 6,
+ I40E_SWITCH_ELEMENT_TYPE_PE = 16,
+ I40E_SWITCH_ELEMENT_TYPE_VEB = 17,
+ I40E_SWITCH_ELEMENT_TYPE_PA = 18,
+ I40E_SWITCH_ELEMENT_TYPE_VSI = 19,
+};
+
+/* Supported EtherType filters */
+enum i40e_ether_type_index {
+ I40E_ETHER_TYPE_1588 = 0,
+ I40E_ETHER_TYPE_FIP = 1,
+ I40E_ETHER_TYPE_OUI_EXTENDED = 2,
+ I40E_ETHER_TYPE_MAC_CONTROL = 3,
+ I40E_ETHER_TYPE_LLDP = 4,
+ I40E_ETHER_TYPE_EVB_PROTOCOL1 = 5,
+ I40E_ETHER_TYPE_EVB_PROTOCOL2 = 6,
+ I40E_ETHER_TYPE_QCN_CNM = 7,
+ I40E_ETHER_TYPE_8021X = 8,
+ I40E_ETHER_TYPE_ARP = 9,
+ I40E_ETHER_TYPE_RSV1 = 10,
+ I40E_ETHER_TYPE_RSV2 = 11,
+};
+
+/* Filter context base size is 1K */
+#define I40E_HASH_FILTER_BASE_SIZE 1024
+/* Supported Hash filter values */
+enum i40e_hash_filter_size {
+ I40E_HASH_FILTER_SIZE_1K = 0,
+ I40E_HASH_FILTER_SIZE_2K = 1,
+ I40E_HASH_FILTER_SIZE_4K = 2,
+ I40E_HASH_FILTER_SIZE_8K = 3,
+ I40E_HASH_FILTER_SIZE_16K = 4,
+ I40E_HASH_FILTER_SIZE_32K = 5,
+ I40E_HASH_FILTER_SIZE_64K = 6,
+ I40E_HASH_FILTER_SIZE_128K = 7,
+ I40E_HASH_FILTER_SIZE_256K = 8,
+ I40E_HASH_FILTER_SIZE_512K = 9,
+ I40E_HASH_FILTER_SIZE_1M = 10,
+};
+
+/* DMA context base size is 0.5K */
+#define I40E_DMA_CNTX_BASE_SIZE 512
+/* Supported DMA context values */
+enum i40e_dma_cntx_size {
+ I40E_DMA_CNTX_SIZE_512 = 0,
+ I40E_DMA_CNTX_SIZE_1K = 1,
+ I40E_DMA_CNTX_SIZE_2K = 2,
+ I40E_DMA_CNTX_SIZE_4K = 3,
+ I40E_DMA_CNTX_SIZE_8K = 4,
+ I40E_DMA_CNTX_SIZE_16K = 5,
+ I40E_DMA_CNTX_SIZE_32K = 6,
+ I40E_DMA_CNTX_SIZE_64K = 7,
+ I40E_DMA_CNTX_SIZE_128K = 8,
+ I40E_DMA_CNTX_SIZE_256K = 9,
+};
+
+/* Supported Hash look up table (LUT) sizes */
+enum i40e_hash_lut_size {
+ I40E_HASH_LUT_SIZE_128 = 0,
+ I40E_HASH_LUT_SIZE_512 = 1,
+};
+
+/* Structure to hold a per PF filter control settings */
+struct i40e_filter_control_settings {
+ /* number of PE Quad Hash filter buckets */
+ enum i40e_hash_filter_size pe_filt_num;
+ /* number of PE Quad Hash contexts */
+ enum i40e_dma_cntx_size pe_cntx_num;
+ /* number of FCoE filter buckets */
+ enum i40e_hash_filter_size fcoe_filt_num;
+ /* number of FCoE DDP contexts */
+ enum i40e_dma_cntx_size fcoe_cntx_num;
+ /* size of the Hash LUT */
+ enum i40e_hash_lut_size hash_lut_size;
+ /* enable FDIR filters for PF and its VFs */
+ bool enable_fdir;
+ /* enable Ethertype filters for PF and its VFs */
+ bool enable_ethtype;
+ /* enable MAC/VLAN filters for PF and its VFs */
+ bool enable_macvlan;
+};
+
+/* Structure to hold device level control filter counts */
+struct i40e_control_filter_stats {
+ u16 mac_etype_used; /* Used perfect match MAC/EtherType filters */
+ u16 etype_used; /* Used perfect EtherType filters */
+ u16 mac_etype_free; /* Un-used perfect match MAC/EtherType filters */
+ u16 etype_free; /* Un-used perfect EtherType filters */
+};
+
+enum i40e_reset_type {
+ I40E_RESET_POR = 0,
+ I40E_RESET_CORER = 1,
+ I40E_RESET_GLOBR = 2,
+ I40E_RESET_EMPR = 3,
+};
+
+/* Offsets into Alternate Ram */
+#define I40E_ALT_STRUCT_FIRST_PF_OFFSET 0 /* in dwords */
+#define I40E_ALT_STRUCT_DWORDS_PER_PF 64 /* in dwords */
+#define I40E_ALT_STRUCT_OUTER_VLAN_TAG_OFFSET 0xD /* in dwords */
+#define I40E_ALT_STRUCT_USER_PRIORITY_OFFSET 0xC /* in dwords */
+#define I40E_ALT_STRUCT_MIN_BW_OFFSET 0xE /* in dwords */
+#define I40E_ALT_STRUCT_MAX_BW_OFFSET 0xF /* in dwords */
+
+/* Alternate Ram Bandwidth Masks */
+#define I40E_ALT_BW_VALUE_MASK 0xFF
+#define I40E_ALT_BW_RELATIVE_MASK 0x40000000
+#define I40E_ALT_BW_VALID_MASK 0x80000000
+
+/* RSS Hash Table Size */
+#define I40E_PFQF_CTL_0_HASHLUTSIZE_512 0x00010000
+#endif /* _I40E_TYPE_H_ */
diff --git a/sys/dev/ixl/i40e_virtchnl.h b/sys/dev/ixl/i40e_virtchnl.h
new file mode 100755
index 0000000..034d276
--- /dev/null
+++ b/sys/dev/ixl/i40e_virtchnl.h
@@ -0,0 +1,375 @@
+/******************************************************************************
+
+ Copyright (c) 2013-2014, Intel Corporation
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ POSSIBILITY OF SUCH DAMAGE.
+
+******************************************************************************/
+/*$FreeBSD$*/
+
+#ifndef _I40E_VIRTCHNL_H_
+#define _I40E_VIRTCHNL_H_
+
+#include "i40e_type.h"
+
+/* Description:
+ * This header file describes the VF-PF communication protocol used
+ * by the various i40e drivers.
+ *
+ * Admin queue buffer usage:
+ * desc->opcode is always i40e_aqc_opc_send_msg_to_pf
+ * flags, retval, datalen, and data addr are all used normally.
+ * Firmware copies the cookie fields when sending messages between the PF and
+ * VF, but uses all other fields internally. Due to this limitation, we
+ * must send all messages as "indirect", i.e. using an external buffer.
+ *
+ * All the vsi indexes are relative to the VF. Each VF can have maximum of
+ * three VSIs. All the queue indexes are relative to the VSI. Each VF can
+ * have a maximum of sixteen queues for all of its VSIs.
+ *
+ * The PF is required to return a status code in v_retval for all messages
+ * except RESET_VF, which does not require any response. The return value is of
+ * i40e_status_code type, defined in the i40e_type.h.
+ *
+ * In general, VF driver initialization should roughly follow the order of these
+ * opcodes. The VF driver must first validate the API version of the PF driver,
+ * then request a reset, then get resources, then configure queues and
+ * interrupts. After these operations are complete, the VF driver may start
+ * its queues, optionally add MAC and VLAN filters, and process traffic.
+ */
+
+/* Opcodes for VF-PF communication. These are placed in the v_opcode field
+ * of the virtchnl_msg structure.
+ */
+enum i40e_virtchnl_ops {
+/* VF sends req. to pf for the following
+ * ops.
+ */
+ I40E_VIRTCHNL_OP_UNKNOWN = 0,
+ I40E_VIRTCHNL_OP_VERSION = 1, /* must ALWAYS be 1 */
+ I40E_VIRTCHNL_OP_RESET_VF,
+ I40E_VIRTCHNL_OP_GET_VF_RESOURCES,
+ I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE,
+ I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE,
+ I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES,
+ I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
+ I40E_VIRTCHNL_OP_ENABLE_QUEUES,
+ I40E_VIRTCHNL_OP_DISABLE_QUEUES,
+ I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS,
+ I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS,
+ I40E_VIRTCHNL_OP_ADD_VLAN,
+ I40E_VIRTCHNL_OP_DEL_VLAN,
+ I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE,
+ I40E_VIRTCHNL_OP_GET_STATS,
+ I40E_VIRTCHNL_OP_FCOE,
+ I40E_VIRTCHNL_OP_CONFIG_RSS,
+/* PF sends status change events to vfs using
+ * the following op.
+ */
+ I40E_VIRTCHNL_OP_EVENT,
+};
+
+/* Virtual channel message descriptor. This overlays the admin queue
+ * descriptor. All other data is passed in external buffers.
+ */
+
+struct i40e_virtchnl_msg {
+ u8 pad[8]; /* AQ flags/opcode/len/retval fields */
+ enum i40e_virtchnl_ops v_opcode; /* avoid confusion with desc->opcode */
+ enum i40e_status_code v_retval; /* ditto for desc->retval */
+ u32 vfid; /* used by PF when sending to VF */
+};
+
+/* Message descriptions and data structures.*/
+
+/* I40E_VIRTCHNL_OP_VERSION
+ * VF posts its version number to the PF. PF responds with its version number
+ * in the same format, along with a return code.
+ * Reply from PF has its major/minor versions also in param0 and param1.
+ * If there is a major version mismatch, then the VF cannot operate.
+ * If there is a minor version mismatch, then the VF can operate but should
+ * add a warning to the system log.
+ *
+ * This enum element MUST always be specified as == 1, regardless of other
+ * changes in the API. The PF must always respond to this message without
+ * error regardless of version mismatch.
+ */
+#define I40E_VIRTCHNL_VERSION_MAJOR 1
+#define I40E_VIRTCHNL_VERSION_MINOR 0
+struct i40e_virtchnl_version_info {
+ u32 major;
+ u32 minor;
+};
+
+/* I40E_VIRTCHNL_OP_RESET_VF
+ * VF sends this request to PF with no parameters
+ * PF does NOT respond! VF driver must delay then poll VFGEN_RSTAT register
+ * until reset completion is indicated. The admin queue must be reinitialized
+ * after this operation.
+ *
+ * When reset is complete, PF must ensure that all queues in all VSIs associated
+ * with the VF are stopped, all queue configurations in the HMC are set to 0,
+ * and all MAC and VLAN filters (except the default MAC address) on all VSIs
+ * are cleared.
+ */
+
+/* I40E_VIRTCHNL_OP_GET_VF_RESOURCES
+ * VF sends this request to PF with no parameters
+ * PF responds with an indirect message containing
+ * i40e_virtchnl_vf_resource and one or more
+ * i40e_virtchnl_vsi_resource structures.
+ */
+
+struct i40e_virtchnl_vsi_resource {
+ u16 vsi_id;
+ u16 num_queue_pairs;
+ enum i40e_vsi_type vsi_type;
+ u16 qset_handle;
+ u8 default_mac_addr[I40E_ETH_LENGTH_OF_ADDRESS];
+};
+/* VF offload flags */
+#define I40E_VIRTCHNL_VF_OFFLOAD_L2 0x00000001
+#define I40E_VIRTCHNL_VF_OFFLOAD_IWARP 0x00000002
+#define I40E_VIRTCHNL_VF_OFFLOAD_FCOE 0x00000004
+#define I40E_VIRTCHNL_VF_OFFLOAD_VLAN 0x00010000
+
+struct i40e_virtchnl_vf_resource {
+ u16 num_vsis;
+ u16 num_queue_pairs;
+ u16 max_vectors;
+ u16 max_mtu;
+
+ u32 vf_offload_flags;
+ u32 max_fcoe_contexts;
+ u32 max_fcoe_filters;
+
+ struct i40e_virtchnl_vsi_resource vsi_res[1];
+};
+
+/* I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE
+ * VF sends this message to set up parameters for one TX queue.
+ * External data buffer contains one instance of i40e_virtchnl_txq_info.
+ * PF configures requested queue and returns a status code.
+ */
+
+/* Tx queue config info */
+struct i40e_virtchnl_txq_info {
+ u16 vsi_id;
+ u16 queue_id;
+ u16 ring_len; /* number of descriptors, multiple of 8 */
+ u16 headwb_enabled;
+ u64 dma_ring_addr;
+ u64 dma_headwb_addr;
+};
+
+/* I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE
+ * VF sends this message to set up parameters for one RX queue.
+ * External data buffer contains one instance of i40e_virtchnl_rxq_info.
+ * PF configures requested queue and returns a status code.
+ */
+
+/* Rx queue config info */
+struct i40e_virtchnl_rxq_info {
+ u16 vsi_id;
+ u16 queue_id;
+ u32 ring_len; /* number of descriptors, multiple of 32 */
+ u16 hdr_size;
+ u16 splithdr_enabled;
+ u32 databuffer_size;
+ u32 max_pkt_size;
+ u64 dma_ring_addr;
+ enum i40e_hmc_obj_rx_hsplit_0 rx_split_pos;
+};
+
+/* I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES
+ * VF sends this message to set parameters for all active TX and RX queues
+ * associated with the specified VSI.
+ * PF configures queues and returns status.
+ * If the number of queues specified is greater than the number of queues
+ * associated with the VSI, an error is returned and no queues are configured.
+ */
+struct i40e_virtchnl_queue_pair_info {
+ /* NOTE: vsi_id and queue_id should be identical for both queues. */
+ struct i40e_virtchnl_txq_info txq;
+ struct i40e_virtchnl_rxq_info rxq;
+};
+
+struct i40e_virtchnl_vsi_queue_config_info {
+ u16 vsi_id;
+ u16 num_queue_pairs;
+ struct i40e_virtchnl_queue_pair_info qpair[1];
+};
+
+/* I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP
+ * VF uses this message to map vectors to queues.
+ * The rxq_map and txq_map fields are bitmaps used to indicate which queues
+ * are to be associated with the specified vector.
+ * The "other" causes are always mapped to vector 0.
+ * PF configures interrupt mapping and returns status.
+ */
+struct i40e_virtchnl_vector_map {
+ u16 vsi_id;
+ u16 vector_id;
+ u16 rxq_map;
+ u16 txq_map;
+ u16 rxitr_idx;
+ u16 txitr_idx;
+};
+
+struct i40e_virtchnl_irq_map_info {
+ u16 num_vectors;
+ struct i40e_virtchnl_vector_map vecmap[1];
+};
+
+/* I40E_VIRTCHNL_OP_ENABLE_QUEUES
+ * I40E_VIRTCHNL_OP_DISABLE_QUEUES
+ * VF sends these message to enable or disable TX/RX queue pairs.
+ * The queues fields are bitmaps indicating which queues to act upon.
+ * (Currently, we only support 16 queues per VF, but we make the field
+ * u32 to allow for expansion.)
+ * PF performs requested action and returns status.
+ */
+struct i40e_virtchnl_queue_select {
+ u16 vsi_id;
+ u16 pad;
+ u32 rx_queues;
+ u32 tx_queues;
+};
+
+/* I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS
+ * VF sends this message in order to add one or more unicast or multicast
+ * address filters for the specified VSI.
+ * PF adds the filters and returns status.
+ */
+
+/* I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS
+ * VF sends this message in order to remove one or more unicast or multicast
+ * filters for the specified VSI.
+ * PF removes the filters and returns status.
+ */
+
+struct i40e_virtchnl_ether_addr {
+ u8 addr[I40E_ETH_LENGTH_OF_ADDRESS];
+ u8 pad[2];
+};
+
+struct i40e_virtchnl_ether_addr_list {
+ u16 vsi_id;
+ u16 num_elements;
+ struct i40e_virtchnl_ether_addr list[1];
+};
+
+/* I40E_VIRTCHNL_OP_ADD_VLAN
+ * VF sends this message to add one or more VLAN tag filters for receives.
+ * PF adds the filters and returns status.
+ * If a port VLAN is configured by the PF, this operation will return an
+ * error to the VF.
+ */
+
+/* I40E_VIRTCHNL_OP_DEL_VLAN
+ * VF sends this message to remove one or more VLAN tag filters for receives.
+ * PF removes the filters and returns status.
+ * If a port VLAN is configured by the PF, this operation will return an
+ * error to the VF.
+ */
+
+struct i40e_virtchnl_vlan_filter_list {
+ u16 vsi_id;
+ u16 num_elements;
+ u16 vlan_id[1];
+};
+
+/* I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE
+ * VF sends VSI id and flags.
+ * PF returns status code in retval.
+ * Note: we assume that broadcast accept mode is always enabled.
+ */
+struct i40e_virtchnl_promisc_info {
+ u16 vsi_id;
+ u16 flags;
+};
+
+#define I40E_FLAG_VF_UNICAST_PROMISC 0x00000001
+#define I40E_FLAG_VF_MULTICAST_PROMISC 0x00000002
+
+/* I40E_VIRTCHNL_OP_GET_STATS
+ * VF sends this message to request stats for the selected VSI. VF uses
+ * the i40e_virtchnl_queue_select struct to specify the VSI. The queue_id
+ * field is ignored by the PF.
+ *
+ * PF replies with struct i40e_eth_stats in an external buffer.
+ */
+
+/* I40E_VIRTCHNL_OP_EVENT
+ * PF sends this message to inform the VF driver of events that may affect it.
+ * No direct response is expected from the VF, though it may generate other
+ * messages in response to this one.
+ */
+enum i40e_virtchnl_event_codes {
+ I40E_VIRTCHNL_EVENT_UNKNOWN = 0,
+ I40E_VIRTCHNL_EVENT_LINK_CHANGE,
+ I40E_VIRTCHNL_EVENT_RESET_IMPENDING,
+ I40E_VIRTCHNL_EVENT_PF_DRIVER_CLOSE,
+};
+#define I40E_PF_EVENT_SEVERITY_INFO 0
+#define I40E_PF_EVENT_SEVERITY_ATTENTION 1
+#define I40E_PF_EVENT_SEVERITY_ACTION_REQUIRED 2
+#define I40E_PF_EVENT_SEVERITY_CERTAIN_DOOM 255
+
+struct i40e_virtchnl_pf_event {
+ enum i40e_virtchnl_event_codes event;
+ union {
+ struct {
+ enum i40e_aq_link_speed link_speed;
+ bool link_status;
+ } link_event;
+ } event_data;
+
+ int severity;
+};
+
+/* VF reset states - these are written into the RSTAT register:
+ * I40E_VFGEN_RSTAT1 on the PF
+ * I40E_VFGEN_RSTAT on the VF
+ * When the PF initiates a reset, it writes 0
+ * When the reset is complete, it writes 1
+ * When the PF detects that the VF has recovered, it writes 2
+ * VF checks this register periodically to determine if a reset has occurred,
+ * then polls it to know when the reset is complete.
+ * If either the PF or VF reads the register while the hardware
+ * is in a reset state, it will return DEADBEEF, which, when masked
+ * will result in 3.
+ */
+enum i40e_vfr_states {
+ I40E_VFR_INPROGRESS = 0,
+ I40E_VFR_COMPLETED,
+ I40E_VFR_VFACTIVE,
+ I40E_VFR_UNKNOWN,
+};
+
+#endif /* _I40E_VIRTCHNL_H_ */
diff --git a/sys/dev/ixl/if_ixl.c b/sys/dev/ixl/if_ixl.c
new file mode 100755
index 0000000..de3f817
--- /dev/null
+++ b/sys/dev/ixl/if_ixl.c
@@ -0,0 +1,4707 @@
+/******************************************************************************
+
+ Copyright (c) 2013-2014, Intel Corporation
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ POSSIBILITY OF SUCH DAMAGE.
+
+******************************************************************************/
+/*$FreeBSD$*/
+
+#include "opt_inet.h"
+#include "opt_inet6.h"
+#include "ixl.h"
+#include "ixl_pf.h"
+
+/*********************************************************************
+ * Driver version
+ *********************************************************************/
+char ixl_driver_version[] = "1.2.2";
+
+/*********************************************************************
+ * PCI Device ID Table
+ *
+ * Used by probe to select devices to load on
+ * Last field stores an index into ixl_strings
+ * Last entry must be all 0s
+ *
+ * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
+ *********************************************************************/
+
+static ixl_vendor_info_t ixl_vendor_info_array[] =
+{
+ {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_XL710, 0, 0, 0},
+ {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_A, 0, 0, 0},
+ {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_B, 0, 0, 0},
+ {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_C, 0, 0, 0},
+ {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_A, 0, 0, 0},
+ {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_B, 0, 0, 0},
+ {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_C, 0, 0, 0},
+ {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T, 0, 0, 0},
+ /* required last entry */
+ {0, 0, 0, 0, 0}
+};
+
+/*********************************************************************
+ * Table of branding strings
+ *********************************************************************/
+
+static char *ixl_strings[] = {
+ "Intel(R) Ethernet Connection XL710 Driver"
+};
+
+
+/*********************************************************************
+ * Function prototypes
+ *********************************************************************/
+static int ixl_probe(device_t);
+static int ixl_attach(device_t);
+static int ixl_detach(device_t);
+static int ixl_shutdown(device_t);
+static int ixl_get_hw_capabilities(struct ixl_pf *);
+static void ixl_cap_txcsum_tso(struct ixl_vsi *, struct ifnet *, int);
+static int ixl_ioctl(struct ifnet *, u_long, caddr_t);
+static void ixl_init(void *);
+static void ixl_init_locked(struct ixl_pf *);
+static void ixl_stop(struct ixl_pf *);
+static void ixl_media_status(struct ifnet *, struct ifmediareq *);
+static int ixl_media_change(struct ifnet *);
+static void ixl_update_link_status(struct ixl_pf *);
+static int ixl_allocate_pci_resources(struct ixl_pf *);
+static u16 ixl_get_bus_info(struct i40e_hw *, device_t);
+static int ixl_setup_stations(struct ixl_pf *);
+static int ixl_setup_vsi(struct ixl_vsi *);
+static int ixl_initialize_vsi(struct ixl_vsi *);
+static int ixl_assign_vsi_msix(struct ixl_pf *);
+static int ixl_assign_vsi_legacy(struct ixl_pf *);
+static int ixl_init_msix(struct ixl_pf *);
+static void ixl_configure_msix(struct ixl_pf *);
+static void ixl_configure_itr(struct ixl_pf *);
+static void ixl_configure_legacy(struct ixl_pf *);
+static void ixl_free_pci_resources(struct ixl_pf *);
+static void ixl_local_timer(void *);
+static int ixl_setup_interface(device_t, struct ixl_vsi *);
+static bool ixl_config_link(struct i40e_hw *);
+static void ixl_config_rss(struct ixl_vsi *);
+static void ixl_set_queue_rx_itr(struct ixl_queue *);
+static void ixl_set_queue_tx_itr(struct ixl_queue *);
+
+static void ixl_enable_rings(struct ixl_vsi *);
+static void ixl_disable_rings(struct ixl_vsi *);
+static void ixl_enable_intr(struct ixl_vsi *);
+static void ixl_disable_intr(struct ixl_vsi *);
+
+static void ixl_enable_adminq(struct i40e_hw *);
+static void ixl_disable_adminq(struct i40e_hw *);
+static void ixl_enable_queue(struct i40e_hw *, int);
+static void ixl_disable_queue(struct i40e_hw *, int);
+static void ixl_enable_legacy(struct i40e_hw *);
+static void ixl_disable_legacy(struct i40e_hw *);
+
+static void ixl_set_promisc(struct ixl_vsi *);
+static void ixl_add_multi(struct ixl_vsi *);
+static void ixl_del_multi(struct ixl_vsi *);
+static void ixl_register_vlan(void *, struct ifnet *, u16);
+static void ixl_unregister_vlan(void *, struct ifnet *, u16);
+static void ixl_setup_vlan_filters(struct ixl_vsi *);
+
+static void ixl_init_filters(struct ixl_vsi *);
+static void ixl_add_filter(struct ixl_vsi *, u8 *, s16 vlan);
+static void ixl_del_filter(struct ixl_vsi *, u8 *, s16 vlan);
+static void ixl_add_hw_filters(struct ixl_vsi *, int, int);
+static void ixl_del_hw_filters(struct ixl_vsi *, int);
+static struct ixl_mac_filter *
+ ixl_find_filter(struct ixl_vsi *, u8 *, s16);
+static void ixl_add_mc_filter(struct ixl_vsi *, u8 *);
+
+/* Sysctl debug interface */
+static int ixl_debug_info(SYSCTL_HANDLER_ARGS);
+static void ixl_print_debug_info(struct ixl_pf *);
+
+/* The MSI/X Interrupt handlers */
+static void ixl_intr(void *);
+static void ixl_msix_que(void *);
+static void ixl_msix_adminq(void *);
+static void ixl_handle_mdd_event(struct ixl_pf *);
+
+/* Deferred interrupt tasklets */
+static void ixl_do_adminq(void *, int);
+
+/* Sysctl handlers */
+static int ixl_set_flowcntl(SYSCTL_HANDLER_ARGS);
+static int ixl_set_advertise(SYSCTL_HANDLER_ARGS);
+static int ixl_current_speed(SYSCTL_HANDLER_ARGS);
+
+/* Statistics */
+static void ixl_add_hw_stats(struct ixl_pf *);
+static void ixl_add_sysctls_mac_stats(struct sysctl_ctx_list *,
+ struct sysctl_oid_list *, struct i40e_hw_port_stats *);
+static void ixl_add_sysctls_eth_stats(struct sysctl_ctx_list *,
+ struct sysctl_oid_list *,
+ struct i40e_eth_stats *);
+static void ixl_update_stats_counters(struct ixl_pf *);
+static void ixl_update_eth_stats(struct ixl_vsi *);
+static void ixl_pf_reset_stats(struct ixl_pf *);
+static void ixl_vsi_reset_stats(struct ixl_vsi *);
+static void ixl_stat_update48(struct i40e_hw *, u32, u32, bool,
+ u64 *, u64 *);
+static void ixl_stat_update32(struct i40e_hw *, u32, bool,
+ u64 *, u64 *);
+
+#ifdef IXL_DEBUG
+static int ixl_sysctl_link_status(SYSCTL_HANDLER_ARGS);
+static int ixl_sysctl_phy_abilities(SYSCTL_HANDLER_ARGS);
+static int ixl_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS);
+static int ixl_sysctl_hw_res_info(SYSCTL_HANDLER_ARGS);
+static int ixl_sysctl_dump_txd(SYSCTL_HANDLER_ARGS);
+#endif
+
+/*********************************************************************
+ * FreeBSD Device Interface Entry Points
+ *********************************************************************/
+
+static device_method_t ixl_methods[] = {
+ /* Device interface */
+ DEVMETHOD(device_probe, ixl_probe),
+ DEVMETHOD(device_attach, ixl_attach),
+ DEVMETHOD(device_detach, ixl_detach),
+ DEVMETHOD(device_shutdown, ixl_shutdown),
+ {0, 0}
+};
+
+static driver_t ixl_driver = {
+ "ixl", ixl_methods, sizeof(struct ixl_pf),
+};
+
+devclass_t ixl_devclass;
+DRIVER_MODULE(ixl, pci, ixl_driver, ixl_devclass, 0, 0);
+
+MODULE_DEPEND(ixl, pci, 1, 1, 1);
+MODULE_DEPEND(ixl, ether, 1, 1, 1);
+
+/*
+** Global reset mutex
+*/
+static struct mtx ixl_reset_mtx;
+
+/*
+** TUNEABLE PARAMETERS:
+*/
+
+static SYSCTL_NODE(_hw, OID_AUTO, ixl, CTLFLAG_RD, 0,
+ "IXL driver parameters");
+
+/*
+ * MSIX should be the default for best performance,
+ * but this allows it to be forced off for testing.
+ */
+static int ixl_enable_msix = 1;
+TUNABLE_INT("hw.ixl.enable_msix", &ixl_enable_msix);
+SYSCTL_INT(_hw_ixl, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &ixl_enable_msix, 0,
+ "Enable MSI-X interrupts");
+
+/*
+** Number of descriptors per ring:
+** - TX and RX are the same size
+*/
+static int ixl_ringsz = DEFAULT_RING;
+TUNABLE_INT("hw.ixl.ringsz", &ixl_ringsz);
+SYSCTL_INT(_hw_ixl, OID_AUTO, ring_size, CTLFLAG_RDTUN,
+ &ixl_ringsz, 0, "Descriptor Ring Size");
+
+/*
+** This can be set manually, if left as 0 the
+** number of queues will be calculated based
+** on cpus and msix vectors available.
+*/
+int ixl_max_queues = 0;
+TUNABLE_INT("hw.ixl.max_queues", &ixl_max_queues);
+SYSCTL_INT(_hw_ixl, OID_AUTO, max_queues, CTLFLAG_RDTUN,
+ &ixl_max_queues, 0, "Number of Queues");
+
+/*
+** Controls for Interrupt Throttling
+** - true/false for dynamic adjustment
+** - default values for static ITR
+*/
+int ixl_dynamic_rx_itr = 0;
+TUNABLE_INT("hw.ixl.dynamic_rx_itr", &ixl_dynamic_rx_itr);
+SYSCTL_INT(_hw_ixl, OID_AUTO, dynamic_rx_itr, CTLFLAG_RDTUN,
+ &ixl_dynamic_rx_itr, 0, "Dynamic RX Interrupt Rate");
+
+int ixl_dynamic_tx_itr = 0;
+TUNABLE_INT("hw.ixl.dynamic_tx_itr", &ixl_dynamic_tx_itr);
+SYSCTL_INT(_hw_ixl, OID_AUTO, dynamic_tx_itr, CTLFLAG_RDTUN,
+ &ixl_dynamic_tx_itr, 0, "Dynamic TX Interrupt Rate");
+
+int ixl_rx_itr = IXL_ITR_8K;
+TUNABLE_INT("hw.ixl.rx_itr", &ixl_rx_itr);
+SYSCTL_INT(_hw_ixl, OID_AUTO, rx_itr, CTLFLAG_RDTUN,
+ &ixl_rx_itr, 0, "RX Interrupt Rate");
+
+int ixl_tx_itr = IXL_ITR_4K;
+TUNABLE_INT("hw.ixl.tx_itr", &ixl_tx_itr);
+SYSCTL_INT(_hw_ixl, OID_AUTO, tx_itr, CTLFLAG_RDTUN,
+ &ixl_tx_itr, 0, "TX Interrupt Rate");
+
+#ifdef IXL_FDIR
+static int ixl_enable_fdir = 1;
+TUNABLE_INT("hw.ixl.enable_fdir", &ixl_enable_fdir);
+/* Rate at which we sample */
+int ixl_atr_rate = 20;
+TUNABLE_INT("hw.ixl.atr_rate", &ixl_atr_rate);
+#endif
+
+#ifdef DEV_NETMAP
+#include <dev/netmap/if_ixl_netmap.h>
+#endif /* DEV_NETMAP */
+
+static char *ixl_fc_string[6] = {
+ "None",
+ "Rx",
+ "Tx",
+ "Full",
+ "Priority",
+ "Default"
+};
+
+
+/*********************************************************************
+ * Device identification routine
+ *
+ * ixl_probe determines if the driver should be loaded on
+ * the hardware based on PCI vendor/device id of the device.
+ *
+ * return BUS_PROBE_DEFAULT on success, positive on failure
+ *********************************************************************/
+
+static int
+ixl_probe(device_t dev)
+{
+ ixl_vendor_info_t *ent;
+
+ u16 pci_vendor_id, pci_device_id;
+ u16 pci_subvendor_id, pci_subdevice_id;
+ char device_name[256];
+ static bool lock_init = FALSE;
+
+ INIT_DEBUGOUT("ixl_probe: begin");
+
+ pci_vendor_id = pci_get_vendor(dev);
+ if (pci_vendor_id != I40E_INTEL_VENDOR_ID)
+ return (ENXIO);
+
+ pci_device_id = pci_get_device(dev);
+ pci_subvendor_id = pci_get_subvendor(dev);
+ pci_subdevice_id = pci_get_subdevice(dev);
+
+ ent = ixl_vendor_info_array;
+ while (ent->vendor_id != 0) {
+ if ((pci_vendor_id == ent->vendor_id) &&
+ (pci_device_id == ent->device_id) &&
+
+ ((pci_subvendor_id == ent->subvendor_id) ||
+ (ent->subvendor_id == 0)) &&
+
+ ((pci_subdevice_id == ent->subdevice_id) ||
+ (ent->subdevice_id == 0))) {
+ sprintf(device_name, "%s, Version - %s",
+ ixl_strings[ent->index],
+ ixl_driver_version);
+ device_set_desc_copy(dev, device_name);
+ /* One shot mutex init */
+ if (lock_init == FALSE) {
+ lock_init = TRUE;
+ mtx_init(&ixl_reset_mtx,
+ "ixl_reset",
+ "IXL RESET Lock", MTX_DEF);
+ }
+ return (BUS_PROBE_DEFAULT);
+ }
+ ent++;
+ }
+ return (ENXIO);
+}
+
+/*********************************************************************
+ * Device initialization routine
+ *
+ * The attach entry point is called when the driver is being loaded.
+ * This routine identifies the type of hardware, allocates all resources
+ * and initializes the hardware.
+ *
+ * return 0 on success, positive on failure
+ *********************************************************************/
+
+static int
+ixl_attach(device_t dev)
+{
+ struct ixl_pf *pf;
+ struct i40e_hw *hw;
+ struct ixl_vsi *vsi;
+ u16 bus;
+ int error = 0;
+
+ INIT_DEBUGOUT("ixl_attach: begin");
+
+ /* Allocate, clear, and link in our primary soft structure */
+ pf = device_get_softc(dev);
+ pf->dev = pf->osdep.dev = dev;
+ hw = &pf->hw;
+
+ /*
+ ** Note this assumes we have a single embedded VSI,
+ ** this could be enhanced later to allocate multiple
+ */
+ vsi = &pf->vsi;
+ vsi->dev = pf->dev;
+
+ /* Core Lock Init*/
+ IXL_PF_LOCK_INIT(pf, device_get_nameunit(dev));
+
+ /* Set up the timer callout */
+ callout_init_mtx(&pf->timer, &pf->pf_mtx, 0);
+
+ /* Set up sysctls */
+ SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
+ SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
+ OID_AUTO, "fc", CTLTYPE_INT | CTLFLAG_RW,
+ pf, 0, ixl_set_flowcntl, "I", "Flow Control");
+
+ SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
+ SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
+ OID_AUTO, "advertise_speed", CTLTYPE_INT | CTLFLAG_RW,
+ pf, 0, ixl_set_advertise, "I", "Advertised Speed");
+
+ SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
+ SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
+ OID_AUTO, "current_speed", CTLTYPE_STRING | CTLFLAG_RD,
+ pf, 0, ixl_current_speed, "A", "Current Port Speed");
+
+ SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
+ SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
+ OID_AUTO, "rx_itr", CTLTYPE_INT | CTLFLAG_RW,
+ &ixl_rx_itr, IXL_ITR_8K, "RX ITR");
+
+ SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
+ SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
+ OID_AUTO, "dynamic_rx_itr", CTLTYPE_INT | CTLFLAG_RW,
+ &ixl_dynamic_rx_itr, 0, "Dynamic RX ITR");
+
+ SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
+ SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
+ OID_AUTO, "tx_itr", CTLTYPE_INT | CTLFLAG_RW,
+ &ixl_tx_itr, IXL_ITR_4K, "TX ITR");
+
+ SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
+ SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
+ OID_AUTO, "dynamic_tx_itr", CTLTYPE_INT | CTLFLAG_RW,
+ &ixl_dynamic_tx_itr, 0, "Dynamic TX ITR");
+
+#ifdef IXL_DEBUG
+ SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
+ SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
+ OID_AUTO, "link_status", CTLTYPE_STRING | CTLFLAG_RD,
+ pf, 0, ixl_sysctl_link_status, "A", "Current Link Status");
+
+ SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
+ SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
+ OID_AUTO, "phy_abilities", CTLTYPE_STRING | CTLFLAG_RD,
+ pf, 0, ixl_sysctl_phy_abilities, "A", "PHY Abilities");
+
+ SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
+ SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
+ OID_AUTO, "filter_list", CTLTYPE_STRING | CTLFLAG_RD,
+ pf, 0, ixl_sysctl_sw_filter_list, "A", "SW Filter List");
+
+ SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
+ SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
+ OID_AUTO, "hw_res_info", CTLTYPE_STRING | CTLFLAG_RD,
+ pf, 0, ixl_sysctl_hw_res_info, "A", "HW Resource Allocation");
+
+ SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
+ SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
+ OID_AUTO, "dump_desc", CTLTYPE_INT | CTLFLAG_WR,
+ pf, 0, ixl_sysctl_dump_txd, "I", "Desc dump");
+#endif
+
+ /* Save off the information about this board */
+ hw->vendor_id = pci_get_vendor(dev);
+ hw->device_id = pci_get_device(dev);
+ hw->revision_id = pci_read_config(dev, PCIR_REVID, 1);
+ hw->subsystem_vendor_id =
+ pci_read_config(dev, PCIR_SUBVEND_0, 2);
+ hw->subsystem_device_id =
+ pci_read_config(dev, PCIR_SUBDEV_0, 2);
+
+ hw->bus.device = pci_get_slot(dev);
+ hw->bus.func = pci_get_function(dev);
+
+ /* Do PCI setup - map BAR0, etc */
+ if (ixl_allocate_pci_resources(pf)) {
+ device_printf(dev, "Allocation of PCI resources failed\n");
+ error = ENXIO;
+ goto err_out;
+ }
+
+ /* Create for initial debugging use */
+ SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
+ SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
+ OID_AUTO, "debug", CTLTYPE_INT|CTLFLAG_RW, pf, 0,
+ ixl_debug_info, "I", "Debug Information");
+
+
+ /* Establish a clean starting point */
+ i40e_clear_hw(hw);
+ error = i40e_pf_reset(hw);
+ if (error) {
+ device_printf(dev,"PF reset failure %x\n", error);
+ error = EIO;
+ goto err_out;
+ }
+
+ /* For now always do an initial CORE reset on first device */
+ {
+ static int ixl_dev_count;
+ static int ixl_dev_track[32];
+ u32 my_dev;
+ int i, found = FALSE;
+ u16 bus = pci_get_bus(dev);
+
+ mtx_lock(&ixl_reset_mtx);
+ my_dev = (bus << 8) | hw->bus.device;
+
+ for (i = 0; i < ixl_dev_count; i++) {
+ if (ixl_dev_track[i] == my_dev)
+ found = TRUE;
+ }
+
+ if (!found) {
+ u32 reg;
+
+ ixl_dev_track[ixl_dev_count] = my_dev;
+ ixl_dev_count++;
+
+ INIT_DEBUGOUT("Initial CORE RESET\n");
+ wr32(hw, I40E_GLGEN_RTRIG, I40E_GLGEN_RTRIG_CORER_MASK);
+ ixl_flush(hw);
+ i = 50;
+ do {
+ i40e_msec_delay(50);
+ reg = rd32(hw, I40E_GLGEN_RSTAT);
+ if (!(reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK))
+ break;
+ } while (i--);
+
+ /* paranoia */
+ wr32(hw, I40E_PF_ATQLEN, 0);
+ wr32(hw, I40E_PF_ATQBAL, 0);
+ wr32(hw, I40E_PF_ATQBAH, 0);
+ i40e_clear_pxe_mode(hw);
+ }
+ mtx_unlock(&ixl_reset_mtx);
+ }
+
+ /* Set admin queue parameters */
+ hw->aq.num_arq_entries = IXL_AQ_LEN;
+ hw->aq.num_asq_entries = IXL_AQ_LEN;
+ hw->aq.arq_buf_size = IXL_AQ_BUFSZ;
+ hw->aq.asq_buf_size = IXL_AQ_BUFSZ;
+
+ /* Initialize the shared code */
+ error = i40e_init_shared_code(hw);
+ if (error) {
+ device_printf(dev,"Unable to initialize the shared code\n");
+ error = EIO;
+ goto err_out;
+ }
+
+ /* Set up the admin queue */
+ error = i40e_init_adminq(hw);
+ if (error) {
+ device_printf(dev, "The driver for the device stopped "
+ "because the NVM image is newer than expected.\n"
+ "You must install the most recent version of "
+ " the network driver.\n");
+ goto err_out;
+ }
+ device_printf(dev, "%s\n", ixl_fw_version_str(hw));
+
+ if (hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
+ hw->aq.api_min_ver > I40E_FW_API_VERSION_MINOR)
+ device_printf(dev, "The driver for the device detected "
+ "a newer version of the NVM image than expected.\n"
+ "Please install the most recent version of the network driver.\n");
+ else if (hw->aq.api_maj_ver < I40E_FW_API_VERSION_MAJOR ||
+ hw->aq.api_min_ver < (I40E_FW_API_VERSION_MINOR - 1))
+ device_printf(dev, "The driver for the device detected "
+ "an older version of the NVM image than expected.\n"
+ "Please update the NVM image.\n");
+
+ /* Clear PXE mode */
+ i40e_clear_pxe_mode(hw);
+
+ /* Get capabilities from the device */
+ error = ixl_get_hw_capabilities(pf);
+ if (error) {
+ device_printf(dev, "HW capabilities failure!\n");
+ goto err_get_cap;
+ }
+
+ /* Set up host memory cache */
+ error = i40e_init_lan_hmc(hw, vsi->num_queues, vsi->num_queues, 0, 0);
+ if (error) {
+ device_printf(dev, "init_lan_hmc failed: %d\n", error);
+ goto err_get_cap;
+ }
+
+ error = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
+ if (error) {
+ device_printf(dev, "configure_lan_hmc failed: %d\n", error);
+ goto err_mac_hmc;
+ }
+
+ /* Disable LLDP from the firmware */
+ i40e_aq_stop_lldp(hw, TRUE, NULL);
+
+ i40e_get_mac_addr(hw, hw->mac.addr);
+ error = i40e_validate_mac_addr(hw->mac.addr);
+ if (error) {
+ device_printf(dev, "validate_mac_addr failed: %d\n", error);
+ goto err_mac_hmc;
+ }
+ bcopy(hw->mac.addr, hw->mac.perm_addr, ETHER_ADDR_LEN);
+ i40e_get_port_mac_addr(hw, hw->mac.port_addr);
+
+ if (ixl_setup_stations(pf) != 0) {
+ device_printf(dev, "setup stations failed!\n");
+ error = ENOMEM;
+ goto err_mac_hmc;
+ }
+
+ /* Initialize mac filter list for VSI */
+ SLIST_INIT(&vsi->ftl);
+
+ /* Set up interrupt routing here */
+ if (pf->msix > 1)
+ error = ixl_assign_vsi_msix(pf);
+ else
+ error = ixl_assign_vsi_legacy(pf);
+ if (error)
+ goto err_late;
+
+ i40e_msec_delay(75);
+ error = i40e_aq_set_link_restart_an(hw, TRUE, NULL);
+ if (error) {
+ device_printf(dev, "link restart failed, aq_err=%d\n",
+ pf->hw.aq.asq_last_status);
+ }
+
+ /* Determine link state */
+ vsi->link_up = ixl_config_link(hw);
+
+ /* Report if Unqualified modules are found */
+ if ((vsi->link_up == FALSE) &&
+ (pf->hw.phy.link_info.link_info &
+ I40E_AQ_MEDIA_AVAILABLE) &&
+ (!(pf->hw.phy.link_info.an_info &
+ I40E_AQ_QUALIFIED_MODULE)))
+ device_printf(dev, "Link failed because "
+ "an unqualified module was detected\n");
+
+ /* Setup OS specific network interface */
+ if (ixl_setup_interface(dev, vsi) != 0)
+ goto err_late;
+
+ /* Get the bus configuration and set the shared code */
+ bus = ixl_get_bus_info(hw, dev);
+ i40e_set_pci_config_data(hw, bus);
+
+ /* Initialize statistics */
+ ixl_pf_reset_stats(pf);
+ ixl_update_stats_counters(pf);
+ ixl_add_hw_stats(pf);
+
+ /* Register for VLAN events */
+ vsi->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
+ ixl_register_vlan, vsi, EVENTHANDLER_PRI_FIRST);
+ vsi->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
+ ixl_unregister_vlan, vsi, EVENTHANDLER_PRI_FIRST);
+
+#ifdef DEV_NETMAP
+ ixl_netmap_attach(pf);
+#endif /* DEV_NETMAP */
+
+ INIT_DEBUGOUT("ixl_attach: end");
+ return (0);
+
+err_late:
+ ixl_free_vsi(vsi);
+err_mac_hmc:
+ i40e_shutdown_lan_hmc(hw);
+err_get_cap:
+ i40e_shutdown_adminq(hw);
+err_out:
+ if (vsi->ifp != NULL)
+ if_free(vsi->ifp);
+ ixl_free_pci_resources(pf);
+ IXL_PF_LOCK_DESTROY(pf);
+ return (error);
+}
+
+/*********************************************************************
+ * Device removal routine
+ *
+ * The detach entry point is called when the driver is being removed.
+ * This routine stops the adapter and deallocates all the resources
+ * that were allocated for driver operation.
+ *
+ * return 0 on success, positive on failure
+ *********************************************************************/
+
+static int
+ixl_detach(device_t dev)
+{
+ struct ixl_pf *pf = device_get_softc(dev);
+ struct i40e_hw *hw = &pf->hw;
+ struct ixl_vsi *vsi = &pf->vsi;
+ struct ixl_queue *que = vsi->queues;
+ i40e_status status;
+
+ INIT_DEBUGOUT("ixl_detach: begin");
+
+ /* Make sure VLANS are not using driver */
+ if (vsi->ifp->if_vlantrunk != NULL) {
+ device_printf(dev,"Vlan in use, detach first\n");
+ return (EBUSY);
+ }
+
+ IXL_PF_LOCK(pf);
+ ixl_stop(pf);
+ IXL_PF_UNLOCK(pf);
+
+ for (int i = 0; i < vsi->num_queues; i++, que++) {
+ if (que->tq) {
+ taskqueue_drain(que->tq, &que->task);
+ taskqueue_drain(que->tq, &que->tx_task);
+ taskqueue_free(que->tq);
+ }
+ }
+
+ /* Shutdown LAN HMC */
+ status = i40e_shutdown_lan_hmc(hw);
+ if (status)
+ device_printf(dev,
+ "Shutdown LAN HMC failed with code %d\n", status);
+
+ /* Shutdown admin queue */
+ status = i40e_shutdown_adminq(hw);
+ if (status)
+ device_printf(dev,
+ "Shutdown Admin queue failed with code %d\n", status);
+
+ /* Unregister VLAN events */
+ if (vsi->vlan_attach != NULL)
+ EVENTHANDLER_DEREGISTER(vlan_config, vsi->vlan_attach);
+ if (vsi->vlan_detach != NULL)
+ EVENTHANDLER_DEREGISTER(vlan_unconfig, vsi->vlan_detach);
+
+ ether_ifdetach(vsi->ifp);
+ callout_drain(&pf->timer);
+
+#ifdef DEV_NETMAP
+ netmap_detach(vsi->ifp);
+#endif /* DEV_NETMAP */
+
+ ixl_free_pci_resources(pf);
+ bus_generic_detach(dev);
+ if_free(vsi->ifp);
+ ixl_free_vsi(vsi);
+ IXL_PF_LOCK_DESTROY(pf);
+ return (0);
+}
+
+/*********************************************************************
+ *
+ * Shutdown entry point
+ *
+ **********************************************************************/
+
+static int
+ixl_shutdown(device_t dev)
+{
+ struct ixl_pf *pf = device_get_softc(dev);
+ IXL_PF_LOCK(pf);
+ ixl_stop(pf);
+ IXL_PF_UNLOCK(pf);
+ return (0);
+}
+
+
+/*********************************************************************
+ *
+ * Get the hardware capabilities
+ *
+ **********************************************************************/
+
+static int
+ixl_get_hw_capabilities(struct ixl_pf *pf)
+{
+ struct i40e_aqc_list_capabilities_element_resp *buf;
+ struct i40e_hw *hw = &pf->hw;
+ device_t dev = pf->dev;
+ int error, len;
+ u16 needed;
+ bool again = TRUE;
+
+ len = 40 * sizeof(struct i40e_aqc_list_capabilities_element_resp);
+retry:
+ if (!(buf = (struct i40e_aqc_list_capabilities_element_resp *)
+ malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO))) {
+ device_printf(dev, "Unable to allocate cap memory\n");
+ return (ENOMEM);
+ }
+
+ /* This populates the hw struct */
+ error = i40e_aq_discover_capabilities(hw, buf, len,
+ &needed, i40e_aqc_opc_list_func_capabilities, NULL);
+ free(buf, M_DEVBUF);
+ if ((pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOMEM) &&
+ (again == TRUE)) {
+ /* retry once with a larger buffer */
+ again = FALSE;
+ len = needed;
+ goto retry;
+ } else if (pf->hw.aq.asq_last_status != I40E_AQ_RC_OK) {
+ device_printf(dev, "capability discovery failed: %d\n",
+ pf->hw.aq.asq_last_status);
+ return (ENODEV);
+ }
+
+ /* Capture this PF's starting queue pair */
+ pf->qbase = hw->func_caps.base_queue;
+
+#ifdef IXL_DEBUG
+ device_printf(dev,"pf_id=%d, num_vfs=%d, msix_pf=%d, "
+ "msix_vf=%d, fd_g=%d, fd_b=%d, tx_qp=%d rx_qp=%d qbase=%d\n",
+ hw->pf_id, hw->func_caps.num_vfs,
+ hw->func_caps.num_msix_vectors,
+ hw->func_caps.num_msix_vectors_vf,
+ hw->func_caps.fd_filters_guaranteed,
+ hw->func_caps.fd_filters_best_effort,
+ hw->func_caps.num_tx_qp,
+ hw->func_caps.num_rx_qp,
+ hw->func_caps.base_queue);
+#endif
+ return (error);
+}
+
+static void
+ixl_cap_txcsum_tso(struct ixl_vsi *vsi, struct ifnet *ifp, int mask)
+{
+ device_t dev = vsi->dev;
+
+ /* Enable/disable TXCSUM/TSO4 */
+ if (!(ifp->if_capenable & IFCAP_TXCSUM)
+ && !(ifp->if_capenable & IFCAP_TSO4)) {
+ if (mask & IFCAP_TXCSUM) {
+ ifp->if_capenable |= IFCAP_TXCSUM;
+ /* enable TXCSUM, restore TSO if previously enabled */
+ if (vsi->flags & IXL_FLAGS_KEEP_TSO4) {
+ vsi->flags &= ~IXL_FLAGS_KEEP_TSO4;
+ ifp->if_capenable |= IFCAP_TSO4;
+ }
+ }
+ else if (mask & IFCAP_TSO4) {
+ ifp->if_capenable |= (IFCAP_TXCSUM | IFCAP_TSO4);
+ vsi->flags &= ~IXL_FLAGS_KEEP_TSO4;
+ device_printf(dev,
+ "TSO4 requires txcsum, enabling both...\n");
+ }
+ } else if((ifp->if_capenable & IFCAP_TXCSUM)
+ && !(ifp->if_capenable & IFCAP_TSO4)) {
+ if (mask & IFCAP_TXCSUM)
+ ifp->if_capenable &= ~IFCAP_TXCSUM;
+ else if (mask & IFCAP_TSO4)
+ ifp->if_capenable |= IFCAP_TSO4;
+ } else if((ifp->if_capenable & IFCAP_TXCSUM)
+ && (ifp->if_capenable & IFCAP_TSO4)) {
+ if (mask & IFCAP_TXCSUM) {
+ vsi->flags |= IXL_FLAGS_KEEP_TSO4;
+ ifp->if_capenable &= ~(IFCAP_TXCSUM | IFCAP_TSO4);
+ device_printf(dev,
+ "TSO4 requires txcsum, disabling both...\n");
+ } else if (mask & IFCAP_TSO4)
+ ifp->if_capenable &= ~IFCAP_TSO4;
+ }
+
+ /* Enable/disable TXCSUM_IPV6/TSO6 */
+ if (!(ifp->if_capenable & IFCAP_TXCSUM_IPV6)
+ && !(ifp->if_capenable & IFCAP_TSO6)) {
+ if (mask & IFCAP_TXCSUM_IPV6) {
+ ifp->if_capenable |= IFCAP_TXCSUM_IPV6;
+ if (vsi->flags & IXL_FLAGS_KEEP_TSO6) {
+ vsi->flags &= ~IXL_FLAGS_KEEP_TSO6;
+ ifp->if_capenable |= IFCAP_TSO6;
+ }
+ } else if (mask & IFCAP_TSO6) {
+ ifp->if_capenable |= (IFCAP_TXCSUM_IPV6 | IFCAP_TSO6);
+ vsi->flags &= ~IXL_FLAGS_KEEP_TSO6;
+ device_printf(dev,
+ "TSO6 requires txcsum6, enabling both...\n");
+ }
+ } else if((ifp->if_capenable & IFCAP_TXCSUM_IPV6)
+ && !(ifp->if_capenable & IFCAP_TSO6)) {
+ if (mask & IFCAP_TXCSUM_IPV6)
+ ifp->if_capenable &= ~IFCAP_TXCSUM_IPV6;
+ else if (mask & IFCAP_TSO6)
+ ifp->if_capenable |= IFCAP_TSO6;
+ } else if ((ifp->if_capenable & IFCAP_TXCSUM_IPV6)
+ && (ifp->if_capenable & IFCAP_TSO6)) {
+ if (mask & IFCAP_TXCSUM_IPV6) {
+ vsi->flags |= IXL_FLAGS_KEEP_TSO6;
+ ifp->if_capenable &= ~(IFCAP_TXCSUM_IPV6 | IFCAP_TSO6);
+ device_printf(dev,
+ "TSO6 requires txcsum6, disabling both...\n");
+ } else if (mask & IFCAP_TSO6)
+ ifp->if_capenable &= ~IFCAP_TSO6;
+ }
+}
+
+/*********************************************************************
+ * Ioctl entry point
+ *
+ * ixl_ioctl is called when the user wants to configure the
+ * interface.
+ *
+ * return 0 on success, positive on failure
+ **********************************************************************/
+
+static int
+ixl_ioctl(struct ifnet * ifp, u_long command, caddr_t data)
+{
+ struct ixl_vsi *vsi = ifp->if_softc;
+ struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
+ struct ifreq *ifr = (struct ifreq *) data;
+#if defined(INET) || defined(INET6)
+ struct ifaddr *ifa = (struct ifaddr *)data;
+ bool avoid_reset = FALSE;
+#endif
+ int error = 0;
+
+ switch (command) {
+
+ case SIOCSIFADDR:
+#ifdef INET
+ if (ifa->ifa_addr->sa_family == AF_INET)
+ avoid_reset = TRUE;
+#endif
+#ifdef INET6
+ if (ifa->ifa_addr->sa_family == AF_INET6)
+ avoid_reset = TRUE;
+#endif
+#if defined(INET) || defined(INET6)
+ /*
+ ** Calling init results in link renegotiation,
+ ** so we avoid doing it when possible.
+ */
+ if (avoid_reset) {
+ ifp->if_flags |= IFF_UP;
+ if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
+ ixl_init(pf);
+ if (!(ifp->if_flags & IFF_NOARP))
+ arp_ifinit(ifp, ifa);
+ } else
+ error = ether_ioctl(ifp, command, data);
+ break;
+#endif
+ case SIOCSIFMTU:
+ IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
+ if (ifr->ifr_mtu > IXL_MAX_FRAME -
+ ETHER_HDR_LEN - ETHER_CRC_LEN - ETHER_VLAN_ENCAP_LEN) {
+ error = EINVAL;
+ } else {
+ IXL_PF_LOCK(pf);
+ ifp->if_mtu = ifr->ifr_mtu;
+ vsi->max_frame_size =
+ ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN
+ + ETHER_VLAN_ENCAP_LEN;
+ ixl_init_locked(pf);
+ IXL_PF_UNLOCK(pf);
+ }
+ break;
+ case SIOCSIFFLAGS:
+ IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
+ IXL_PF_LOCK(pf);
+ if (ifp->if_flags & IFF_UP) {
+ if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) {
+ if ((ifp->if_flags ^ pf->if_flags) &
+ (IFF_PROMISC | IFF_ALLMULTI)) {
+ ixl_set_promisc(vsi);
+ }
+ } else
+ ixl_init_locked(pf);
+ } else
+ if (ifp->if_drv_flags & IFF_DRV_RUNNING)
+ ixl_stop(pf);
+ pf->if_flags = ifp->if_flags;
+ IXL_PF_UNLOCK(pf);
+ break;
+ case SIOCADDMULTI:
+ IOCTL_DEBUGOUT("ioctl: SIOCADDMULTI");
+ if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
+ IXL_PF_LOCK(pf);
+ ixl_disable_intr(vsi);
+ ixl_add_multi(vsi);
+ ixl_enable_intr(vsi);
+ IXL_PF_UNLOCK(pf);
+ }
+ break;
+ case SIOCDELMULTI:
+ IOCTL_DEBUGOUT("ioctl: SIOCDELMULTI");
+ if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
+ IXL_PF_LOCK(pf);
+ ixl_disable_intr(vsi);
+ ixl_del_multi(vsi);
+ ixl_enable_intr(vsi);
+ IXL_PF_UNLOCK(pf);
+ }
+ break;
+ case SIOCSIFMEDIA:
+ case SIOCGIFMEDIA:
+ IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
+ error = ifmedia_ioctl(ifp, ifr, &vsi->media, command);
+ break;
+ case SIOCSIFCAP:
+ {
+ int mask = ifr->ifr_reqcap ^ ifp->if_capenable;
+ IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
+
+ ixl_cap_txcsum_tso(vsi, ifp, mask);
+
+ if (mask & IFCAP_RXCSUM)
+ ifp->if_capenable ^= IFCAP_RXCSUM;
+ if (mask & IFCAP_RXCSUM_IPV6)
+ ifp->if_capenable ^= IFCAP_RXCSUM_IPV6;
+ if (mask & IFCAP_LRO)
+ ifp->if_capenable ^= IFCAP_LRO;
+ if (mask & IFCAP_VLAN_HWTAGGING)
+ ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
+ if (mask & IFCAP_VLAN_HWFILTER)
+ ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
+ if (mask & IFCAP_VLAN_HWTSO)
+ ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
+ if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
+ IXL_PF_LOCK(pf);
+ ixl_init_locked(pf);
+ IXL_PF_UNLOCK(pf);
+ }
+ VLAN_CAPABILITIES(ifp);
+
+ break;
+ }
+
+ default:
+ IOCTL_DEBUGOUT("ioctl: UNKNOWN (0x%X)\n", (int)command);
+ error = ether_ioctl(ifp, command, data);
+ break;
+ }
+
+ return (error);
+}
+
+
+/*********************************************************************
+ * Init entry point
+ *
+ * This routine is used in two ways. It is used by the stack as
+ * init entry point in network interface structure. It is also used
+ * by the driver as a hw/sw initialization routine to get to a
+ * consistent state.
+ *
+ * return 0 on success, positive on failure
+ **********************************************************************/
+
+static void
+ixl_init_locked(struct ixl_pf *pf)
+{
+ struct i40e_hw *hw = &pf->hw;
+ struct ixl_vsi *vsi = &pf->vsi;
+ struct ifnet *ifp = vsi->ifp;
+ device_t dev = pf->dev;
+ struct i40e_filter_control_settings filter;
+ u8 tmpaddr[ETHER_ADDR_LEN];
+ int ret;
+
+ mtx_assert(&pf->pf_mtx, MA_OWNED);
+ INIT_DEBUGOUT("ixl_init: begin");
+ ixl_stop(pf);
+
+ /* Get the latest mac address... User might use a LAA */
+ bcopy(IF_LLADDR(vsi->ifp), tmpaddr,
+ I40E_ETH_LENGTH_OF_ADDRESS);
+ if (!cmp_etheraddr(hw->mac.addr, tmpaddr) &&
+ i40e_validate_mac_addr(tmpaddr)) {
+ bcopy(tmpaddr, hw->mac.addr,
+ I40E_ETH_LENGTH_OF_ADDRESS);
+ ret = i40e_aq_mac_address_write(hw,
+ I40E_AQC_WRITE_TYPE_LAA_ONLY,
+ hw->mac.addr, NULL);
+ if (ret) {
+ device_printf(dev, "LLA address"
+ "change failed!!\n");
+ return;
+ }
+ }
+
+ /* Set the various hardware offload abilities */
+ ifp->if_hwassist = 0;
+ if (ifp->if_capenable & IFCAP_TSO)
+ ifp->if_hwassist |= CSUM_TSO;
+ if (ifp->if_capenable & IFCAP_TXCSUM)
+ ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
+ if (ifp->if_capenable & IFCAP_TXCSUM_IPV6)
+ ifp->if_hwassist |= (CSUM_TCP_IPV6 | CSUM_UDP_IPV6);
+
+ /* Set up the device filtering */
+ bzero(&filter, sizeof(filter));
+ filter.enable_ethtype = TRUE;
+ filter.enable_macvlan = TRUE;
+#ifdef IXL_FDIR
+ filter.enable_fdir = TRUE;
+#endif
+ if (i40e_set_filter_control(hw, &filter))
+ device_printf(dev, "set_filter_control() failed\n");
+
+ /* Set up RSS */
+ ixl_config_rss(vsi);
+
+ /* Setup the VSI */
+ ixl_setup_vsi(vsi);
+
+ /*
+ ** Prepare the rings, hmc contexts, etc...
+ */
+ if (ixl_initialize_vsi(vsi)) {
+ device_printf(dev, "initialize vsi failed!!\n");
+ return;
+ }
+
+ /* Add protocol filters to list */
+ ixl_init_filters(vsi);
+
+ /* Setup vlan's if needed */
+ ixl_setup_vlan_filters(vsi);
+
+ /* Start the local timer */
+ callout_reset(&pf->timer, hz, ixl_local_timer, pf);
+
+ /* Set up MSI/X routing and the ITR settings */
+ if (ixl_enable_msix) {
+ ixl_configure_msix(pf);
+ ixl_configure_itr(pf);
+ } else
+ ixl_configure_legacy(pf);
+
+ ixl_enable_rings(vsi);
+
+ i40e_aq_set_default_vsi(hw, vsi->seid, NULL);
+
+ /* Set MTU in hardware*/
+ int aq_error = i40e_aq_set_mac_config(hw, vsi->max_frame_size,
+ TRUE, 0, NULL);
+ if (aq_error)
+ device_printf(vsi->dev,
+ "aq_set_mac_config in init error, code %d\n",
+ aq_error);
+
+ /* And now turn on interrupts */
+ ixl_enable_intr(vsi);
+
+ /* Now inform the stack we're ready */
+ ifp->if_drv_flags |= IFF_DRV_RUNNING;
+ ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
+
+ return;
+}
+
+static void
+ixl_init(void *arg)
+{
+ struct ixl_pf *pf = arg;
+
+ IXL_PF_LOCK(pf);
+ ixl_init_locked(pf);
+ IXL_PF_UNLOCK(pf);
+ return;
+}
+
+/*
+**
+** MSIX Interrupt Handlers and Tasklets
+**
+*/
+static void
+ixl_handle_que(void *context, int pending)
+{
+ struct ixl_queue *que = context;
+ struct ixl_vsi *vsi = que->vsi;
+ struct i40e_hw *hw = vsi->hw;
+ struct tx_ring *txr = &que->txr;
+ struct ifnet *ifp = vsi->ifp;
+ bool more;
+
+ if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
+ more = ixl_rxeof(que, IXL_RX_LIMIT);
+ IXL_TX_LOCK(txr);
+ ixl_txeof(que);
+ if (!drbr_empty(ifp, txr->br))
+ ixl_mq_start_locked(ifp, txr);
+ IXL_TX_UNLOCK(txr);
+ if (more) {
+ taskqueue_enqueue(que->tq, &que->task);
+ return;
+ }
+ }
+
+ /* Reenable this interrupt - hmmm */
+ ixl_enable_queue(hw, que->me);
+ return;
+}
+
+
+/*********************************************************************
+ *
+ * Legacy Interrupt Service routine
+ *
+ **********************************************************************/
+void
+ixl_intr(void *arg)
+{
+ struct ixl_pf *pf = arg;
+ struct i40e_hw *hw = &pf->hw;
+ struct ixl_vsi *vsi = &pf->vsi;
+ struct ixl_queue *que = vsi->queues;
+ struct ifnet *ifp = vsi->ifp;
+ struct tx_ring *txr = &que->txr;
+ u32 reg, icr0, mask;
+ bool more_tx, more_rx;
+
+ ++que->irqs;
+
+ /* Protect against spurious interrupts */
+ if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
+ return;
+
+ icr0 = rd32(hw, I40E_PFINT_ICR0);
+
+ reg = rd32(hw, I40E_PFINT_DYN_CTL0);
+ reg = reg | I40E_PFINT_DYN_CTL0_CLEARPBA_MASK;
+ wr32(hw, I40E_PFINT_DYN_CTL0, reg);
+
+ mask = rd32(hw, I40E_PFINT_ICR0_ENA);
+
+ if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
+ taskqueue_enqueue(pf->tq, &pf->adminq);
+ return;
+ }
+
+ more_rx = ixl_rxeof(que, IXL_RX_LIMIT);
+
+ IXL_TX_LOCK(txr);
+ more_tx = ixl_txeof(que);
+ if (!drbr_empty(vsi->ifp, txr->br))
+ more_tx = 1;
+ IXL_TX_UNLOCK(txr);
+
+ /* re-enable other interrupt causes */
+ wr32(hw, I40E_PFINT_ICR0_ENA, mask);
+
+ /* And now the queues */
+ reg = rd32(hw, I40E_QINT_RQCTL(0));
+ reg |= I40E_QINT_RQCTL_CAUSE_ENA_MASK;
+ wr32(hw, I40E_QINT_RQCTL(0), reg);
+
+ reg = rd32(hw, I40E_QINT_TQCTL(0));
+ reg |= I40E_QINT_TQCTL_CAUSE_ENA_MASK;
+ reg &= ~I40E_PFINT_ICR0_INTEVENT_MASK;
+ wr32(hw, I40E_QINT_TQCTL(0), reg);
+
+ ixl_enable_legacy(hw);
+
+ return;
+}
+
+
+/*********************************************************************
+ *
+ * MSIX VSI Interrupt Service routine
+ *
+ **********************************************************************/
+void
+ixl_msix_que(void *arg)
+{
+ struct ixl_queue *que = arg;
+ struct ixl_vsi *vsi = que->vsi;
+ struct i40e_hw *hw = vsi->hw;
+ struct tx_ring *txr = &que->txr;
+ bool more_tx, more_rx;
+
+ /* Protect against spurious interrupts */
+ if (!(vsi->ifp->if_drv_flags & IFF_DRV_RUNNING))
+ return;
+
+ ++que->irqs;
+
+ more_rx = ixl_rxeof(que, IXL_RX_LIMIT);
+
+ IXL_TX_LOCK(txr);
+ more_tx = ixl_txeof(que);
+ /*
+ ** Make certain that if the stack
+ ** has anything queued the task gets
+ ** scheduled to handle it.
+ */
+ if (!drbr_empty(vsi->ifp, txr->br))
+ more_tx = 1;
+ IXL_TX_UNLOCK(txr);
+
+ ixl_set_queue_rx_itr(que);
+ ixl_set_queue_tx_itr(que);
+
+ if (more_tx || more_rx)
+ taskqueue_enqueue(que->tq, &que->task);
+ else
+ ixl_enable_queue(hw, que->me);
+
+ return;
+}
+
+
+/*********************************************************************
+ *
+ * MSIX Admin Queue Interrupt Service routine
+ *
+ **********************************************************************/
+static void
+ixl_msix_adminq(void *arg)
+{
+ struct ixl_pf *pf = arg;
+ struct i40e_hw *hw = &pf->hw;
+ u32 reg, mask;
+
+ ++pf->admin_irq;
+
+ reg = rd32(hw, I40E_PFINT_ICR0);
+ mask = rd32(hw, I40E_PFINT_ICR0_ENA);
+
+ /* Check on the cause */
+ if (reg & I40E_PFINT_ICR0_ADMINQ_MASK)
+ mask &= ~I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
+
+ if (reg & I40E_PFINT_ICR0_MAL_DETECT_MASK) {
+ ixl_handle_mdd_event(pf);
+ mask &= ~I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
+ }
+
+ if (reg & I40E_PFINT_ICR0_VFLR_MASK)
+ mask &= ~I40E_PFINT_ICR0_ENA_VFLR_MASK;
+
+ reg = rd32(hw, I40E_PFINT_DYN_CTL0);
+ reg = reg | I40E_PFINT_DYN_CTL0_CLEARPBA_MASK;
+ wr32(hw, I40E_PFINT_DYN_CTL0, reg);
+
+ taskqueue_enqueue(pf->tq, &pf->adminq);
+ return;
+}
+
+/*********************************************************************
+ *
+ * Media Ioctl callback
+ *
+ * This routine is called whenever the user queries the status of
+ * the interface using ifconfig.
+ *
+ **********************************************************************/
+static void
+ixl_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
+{
+ struct ixl_vsi *vsi = ifp->if_softc;
+ struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
+ struct i40e_hw *hw = &pf->hw;
+
+ INIT_DEBUGOUT("ixl_media_status: begin");
+ IXL_PF_LOCK(pf);
+
+ ixl_update_link_status(pf);
+
+ ifmr->ifm_status = IFM_AVALID;
+ ifmr->ifm_active = IFM_ETHER;
+
+ if (!vsi->link_up) {
+ IXL_PF_UNLOCK(pf);
+ return;
+ }
+
+ ifmr->ifm_status |= IFM_ACTIVE;
+ /* Hardware is always full-duplex */
+ ifmr->ifm_active |= IFM_FDX;
+
+ switch (hw->phy.link_info.phy_type) {
+ /* 100 M */
+ case I40E_PHY_TYPE_100BASE_TX:
+ ifmr->ifm_active |= IFM_100_TX;
+ break;
+ /* 1 G */
+ case I40E_PHY_TYPE_1000BASE_T:
+ ifmr->ifm_active |= IFM_1000_T;
+ break;
+ case I40E_PHY_TYPE_1000BASE_SX:
+ ifmr->ifm_active |= IFM_1000_SX;
+ break;
+ case I40E_PHY_TYPE_1000BASE_LX:
+ ifmr->ifm_active |= IFM_1000_LX;
+ break;
+ /* 10 G */
+ case I40E_PHY_TYPE_10GBASE_CR1_CU:
+ case I40E_PHY_TYPE_10GBASE_SFPP_CU:
+ ifmr->ifm_active |= IFM_10G_TWINAX;
+ break;
+ case I40E_PHY_TYPE_10GBASE_SR:
+ ifmr->ifm_active |= IFM_10G_SR;
+ break;
+ case I40E_PHY_TYPE_10GBASE_LR:
+ ifmr->ifm_active |= IFM_10G_LR;
+ break;
+ case I40E_PHY_TYPE_10GBASE_T:
+ ifmr->ifm_active |= IFM_10G_T;
+ break;
+ /* 40 G */
+ case I40E_PHY_TYPE_40GBASE_CR4:
+ case I40E_PHY_TYPE_40GBASE_CR4_CU:
+ ifmr->ifm_active |= IFM_40G_CR4;
+ break;
+ case I40E_PHY_TYPE_40GBASE_SR4:
+ ifmr->ifm_active |= IFM_40G_SR4;
+ break;
+ case I40E_PHY_TYPE_40GBASE_LR4:
+ ifmr->ifm_active |= IFM_40G_LR4;
+ break;
+ default:
+ ifmr->ifm_active |= IFM_UNKNOWN;
+ break;
+ }
+ /* Report flow control status as well */
+ if (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX)
+ ifmr->ifm_active |= IFM_ETH_TXPAUSE;
+ if (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX)
+ ifmr->ifm_active |= IFM_ETH_RXPAUSE;
+
+ IXL_PF_UNLOCK(pf);
+
+ return;
+}
+
+/*********************************************************************
+ *
+ * Media Ioctl callback
+ *
+ * This routine is called when the user changes speed/duplex using
+ * media/mediopt option with ifconfig.
+ *
+ **********************************************************************/
+static int
+ixl_media_change(struct ifnet * ifp)
+{
+ struct ixl_vsi *vsi = ifp->if_softc;
+ struct ifmedia *ifm = &vsi->media;
+
+ INIT_DEBUGOUT("ixl_media_change: begin");
+
+ if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
+ return (EINVAL);
+
+ if_printf(ifp, "Media change is currently not supported.\n");
+
+ return (ENODEV);
+}
+
+
+#ifdef IXL_FDIR
+/*
+** ATR: Application Targetted Receive - creates a filter
+** based on TX flow info that will keep the receive
+** portion of the flow on the same queue. Based on the
+** implementation this is only available for TCP connections
+*/
+void
+ixl_atr(struct ixl_queue *que, struct tcphdr *th, int etype)
+{
+ struct ixl_vsi *vsi = que->vsi;
+ struct tx_ring *txr = &que->txr;
+ struct i40e_filter_program_desc *FDIR;
+ u32 ptype, dtype;
+ int idx;
+
+ /* check if ATR is enabled and sample rate */
+ if ((!ixl_enable_fdir) || (!txr->atr_rate))
+ return;
+ /*
+ ** We sample all TCP SYN/FIN packets,
+ ** or at the selected sample rate
+ */
+ txr->atr_count++;
+ if (((th->th_flags & (TH_FIN | TH_SYN)) == 0) &&
+ (txr->atr_count < txr->atr_rate))
+ return;
+ txr->atr_count = 0;
+
+ /* Get a descriptor to use */
+ idx = txr->next_avail;
+ FDIR = (struct i40e_filter_program_desc *) &txr->base[idx];
+ if (++idx == que->num_desc)
+ idx = 0;
+ txr->avail--;
+ txr->next_avail = idx;
+
+ ptype = (que->me << I40E_TXD_FLTR_QW0_QINDEX_SHIFT) &
+ I40E_TXD_FLTR_QW0_QINDEX_MASK;
+
+ ptype |= (etype == ETHERTYPE_IP) ?
+ (I40E_FILTER_PCTYPE_NONF_IPV4_TCP <<
+ I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) :
+ (I40E_FILTER_PCTYPE_NONF_IPV6_TCP <<
+ I40E_TXD_FLTR_QW0_PCTYPE_SHIFT);
+
+ ptype |= vsi->id << I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT;
+
+ dtype = I40E_TX_DESC_DTYPE_FILTER_PROG;
+
+ /*
+ ** We use the TCP TH_FIN as a trigger to remove
+ ** the filter, otherwise its an update.
+ */
+ dtype |= (th->th_flags & TH_FIN) ?
+ (I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE <<
+ I40E_TXD_FLTR_QW1_PCMD_SHIFT) :
+ (I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE <<
+ I40E_TXD_FLTR_QW1_PCMD_SHIFT);
+
+ dtype |= I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX <<
+ I40E_TXD_FLTR_QW1_DEST_SHIFT;
+
+ dtype |= I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID <<
+ I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT;
+
+ FDIR->qindex_flex_ptype_vsi = htole32(ptype);
+ FDIR->dtype_cmd_cntindex = htole32(dtype);
+ return;
+}
+#endif
+
+
+static void
+ixl_set_promisc(struct ixl_vsi *vsi)
+{
+ struct ifnet *ifp = vsi->ifp;
+ struct i40e_hw *hw = vsi->hw;
+ int err, mcnt = 0;
+ bool uni = FALSE, multi = FALSE;
+
+ if (ifp->if_flags & IFF_ALLMULTI)
+ multi = TRUE;
+ else { /* Need to count the multicast addresses */
+ struct ifmultiaddr *ifma;
+ if_maddr_rlock(ifp);
+ TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
+ if (ifma->ifma_addr->sa_family != AF_LINK)
+ continue;
+ if (mcnt == MAX_MULTICAST_ADDR)
+ break;
+ mcnt++;
+ }
+ if_maddr_runlock(ifp);
+ }
+
+ if (mcnt >= MAX_MULTICAST_ADDR)
+ multi = TRUE;
+ if (ifp->if_flags & IFF_PROMISC)
+ uni = TRUE;
+
+ err = i40e_aq_set_vsi_unicast_promiscuous(hw,
+ vsi->seid, uni, NULL);
+ err = i40e_aq_set_vsi_multicast_promiscuous(hw,
+ vsi->seid, multi, NULL);
+ return;
+}
+
+/*********************************************************************
+ * Filter Routines
+ *
+ * Routines for multicast and vlan filter management.
+ *
+ *********************************************************************/
+static void
+ixl_add_multi(struct ixl_vsi *vsi)
+{
+ struct ifmultiaddr *ifma;
+ struct ifnet *ifp = vsi->ifp;
+ struct i40e_hw *hw = vsi->hw;
+ int mcnt = 0, flags;
+
+ IOCTL_DEBUGOUT("ixl_add_multi: begin");
+
+ if_maddr_rlock(ifp);
+ /*
+ ** First just get a count, to decide if we
+ ** we simply use multicast promiscuous.
+ */
+ TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
+ if (ifma->ifma_addr->sa_family != AF_LINK)
+ continue;
+ mcnt++;
+ }
+ if_maddr_runlock(ifp);
+
+ if (__predict_false(mcnt >= MAX_MULTICAST_ADDR)) {
+ /* delete existing MC filters */
+ ixl_del_hw_filters(vsi, mcnt);
+ i40e_aq_set_vsi_multicast_promiscuous(hw,
+ vsi->seid, TRUE, NULL);
+ return;
+ }
+
+ mcnt = 0;
+ if_maddr_rlock(ifp);
+ TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
+ if (ifma->ifma_addr->sa_family != AF_LINK)
+ continue;
+ ixl_add_mc_filter(vsi,
+ (u8*)LLADDR((struct sockaddr_dl *) ifma->ifma_addr));
+ mcnt++;
+ }
+ if_maddr_runlock(ifp);
+ if (mcnt > 0) {
+ flags = (IXL_FILTER_ADD | IXL_FILTER_USED | IXL_FILTER_MC);
+ ixl_add_hw_filters(vsi, flags, mcnt);
+ }
+
+ IOCTL_DEBUGOUT("ixl_add_multi: end");
+ return;
+}
+
+static void
+ixl_del_multi(struct ixl_vsi *vsi)
+{
+ struct ifnet *ifp = vsi->ifp;
+ struct ifmultiaddr *ifma;
+ struct ixl_mac_filter *f;
+ int mcnt = 0;
+ bool match = FALSE;
+
+ IOCTL_DEBUGOUT("ixl_del_multi: begin");
+
+ /* Search for removed multicast addresses */
+ if_maddr_rlock(ifp);
+ SLIST_FOREACH(f, &vsi->ftl, next) {
+ if ((f->flags & IXL_FILTER_USED) && (f->flags & IXL_FILTER_MC)) {
+ match = FALSE;
+ TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
+ if (ifma->ifma_addr->sa_family != AF_LINK)
+ continue;
+ u8 *mc_addr = (u8 *)LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
+ if (cmp_etheraddr(f->macaddr, mc_addr)) {
+ match = TRUE;
+ break;
+ }
+ }
+ if (match == FALSE) {
+ f->flags |= IXL_FILTER_DEL;
+ mcnt++;
+ }
+ }
+ }
+ if_maddr_runlock(ifp);
+
+ if (mcnt > 0)
+ ixl_del_hw_filters(vsi, mcnt);
+}
+
+
+/*********************************************************************
+ * Timer routine
+ *
+ * This routine checks for link status,updates statistics,
+ * and runs the watchdog check.
+ *
+ **********************************************************************/
+
+static void
+ixl_local_timer(void *arg)
+{
+ struct ixl_pf *pf = arg;
+ struct i40e_hw *hw = &pf->hw;
+ struct ixl_vsi *vsi = &pf->vsi;
+ struct ixl_queue *que = vsi->queues;
+ device_t dev = pf->dev;
+ int hung = 0;
+ u32 mask;
+
+ mtx_assert(&pf->pf_mtx, MA_OWNED);
+
+ /* Fire off the adminq task */
+ taskqueue_enqueue(pf->tq, &pf->adminq);
+
+ /* Update stats */
+ ixl_update_stats_counters(pf);
+
+ /*
+ ** Check status of the queues
+ */
+ mask = (I40E_PFINT_DYN_CTLN_INTENA_MASK |
+ I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK);
+
+ for (int i = 0; i < vsi->num_queues; i++,que++) {
+ /* Any queues with outstanding work get a sw irq */
+ if (que->busy)
+ wr32(hw, I40E_PFINT_DYN_CTLN(que->me), mask);
+ /*
+ ** Each time txeof runs without cleaning, but there
+ ** are uncleaned descriptors it increments busy. If
+ ** we get to 5 we declare it hung.
+ */
+ if (que->busy == IXL_QUEUE_HUNG) {
+ ++hung;
+ /* Mark the queue as inactive */
+ vsi->active_queues &= ~((u64)1 << que->me);
+ continue;
+ } else {
+ /* Check if we've come back from hung */
+ if ((vsi->active_queues & ((u64)1 << que->me)) == 0)
+ vsi->active_queues |= ((u64)1 << que->me);
+ }
+ if (que->busy >= IXL_MAX_TX_BUSY) {
+ device_printf(dev,"Warning queue %d "
+ "appears to be hung!\n", i);
+ que->busy = IXL_QUEUE_HUNG;
+ ++hung;
+ }
+ }
+ /* Only reinit if all queues show hung */
+ if (hung == vsi->num_queues)
+ goto hung;
+
+ callout_reset(&pf->timer, hz, ixl_local_timer, pf);
+ return;
+
+hung:
+ device_printf(dev, "Local Timer: HANG DETECT - Resetting!!\n");
+ ixl_init_locked(pf);
+}
+
+/*
+** Note: this routine updates the OS on the link state
+** the real check of the hardware only happens with
+** a link interrupt.
+*/
+static void
+ixl_update_link_status(struct ixl_pf *pf)
+{
+ struct ixl_vsi *vsi = &pf->vsi;
+ struct i40e_hw *hw = &pf->hw;
+ struct ifnet *ifp = vsi->ifp;
+ device_t dev = pf->dev;
+ enum i40e_fc_mode fc;
+
+
+ if (vsi->link_up){
+ if (vsi->link_active == FALSE) {
+ i40e_aq_get_link_info(hw, TRUE, NULL, NULL);
+ if (bootverbose) {
+ fc = hw->fc.current_mode;
+ device_printf(dev,"Link is up %d Gbps %s,"
+ " Flow Control: %s\n",
+ ((vsi->link_speed == I40E_LINK_SPEED_40GB)? 40:10),
+ "Full Duplex", ixl_fc_string[fc]);
+ }
+ vsi->link_active = TRUE;
+ if_link_state_change(ifp, LINK_STATE_UP);
+ }
+ } else { /* Link down */
+ if (vsi->link_active == TRUE) {
+ if (bootverbose)
+ device_printf(dev,"Link is Down\n");
+ if_link_state_change(ifp, LINK_STATE_DOWN);
+ vsi->link_active = FALSE;
+ }
+ }
+
+ return;
+}
+
+/*********************************************************************
+ *
+ * This routine disables all traffic on the adapter by issuing a
+ * global reset on the MAC and deallocates TX/RX buffers.
+ *
+ **********************************************************************/
+
+static void
+ixl_stop(struct ixl_pf *pf)
+{
+ struct ixl_vsi *vsi = &pf->vsi;
+ struct ifnet *ifp = vsi->ifp;
+
+ mtx_assert(&pf->pf_mtx, MA_OWNED);
+
+ INIT_DEBUGOUT("ixl_stop: begin\n");
+ ixl_disable_intr(vsi);
+ ixl_disable_rings(vsi);
+
+ /* Tell the stack that the interface is no longer active */
+ ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
+
+ /* Stop the local timer */
+ callout_stop(&pf->timer);
+
+ return;
+}
+
+
+/*********************************************************************
+ *
+ * Setup MSIX Interrupt resources and handlers for the VSI
+ *
+ **********************************************************************/
+static int
+ixl_assign_vsi_legacy(struct ixl_pf *pf)
+{
+ device_t dev = pf->dev;
+ struct ixl_vsi *vsi = &pf->vsi;
+ struct ixl_queue *que = vsi->queues;
+ int error, rid = 0;
+
+ if (pf->msix == 1)
+ rid = 1;
+ pf->res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
+ &rid, RF_SHAREABLE | RF_ACTIVE);
+ if (pf->res == NULL) {
+ device_printf(dev,"Unable to allocate"
+ " bus resource: vsi legacy/msi interrupt\n");
+ return (ENXIO);
+ }
+
+ /* Set the handler function */
+ error = bus_setup_intr(dev, pf->res,
+ INTR_TYPE_NET | INTR_MPSAFE, NULL,
+ ixl_intr, pf, &pf->tag);
+ if (error) {
+ pf->res = NULL;
+ device_printf(dev, "Failed to register legacy/msi handler");
+ return (error);
+ }
+ bus_describe_intr(dev, pf->res, pf->tag, "irq0");
+ TASK_INIT(&que->tx_task, 0, ixl_deferred_mq_start, que);
+ TASK_INIT(&que->task, 0, ixl_handle_que, que);
+ que->tq = taskqueue_create_fast("ixl_que", M_NOWAIT,
+ taskqueue_thread_enqueue, &que->tq);
+ taskqueue_start_threads(&que->tq, 1, PI_NET, "%s que",
+ device_get_nameunit(dev));
+ TASK_INIT(&pf->adminq, 0, ixl_do_adminq, pf);
+ pf->tq = taskqueue_create_fast("ixl_adm", M_NOWAIT,
+ taskqueue_thread_enqueue, &pf->tq);
+ taskqueue_start_threads(&pf->tq, 1, PI_NET, "%s adminq",
+ device_get_nameunit(dev));
+
+ return (0);
+}
+
+
+/*********************************************************************
+ *
+ * Setup MSIX Interrupt resources and handlers for the VSI
+ *
+ **********************************************************************/
+static int
+ixl_assign_vsi_msix(struct ixl_pf *pf)
+{
+ device_t dev = pf->dev;
+ struct ixl_vsi *vsi = &pf->vsi;
+ struct ixl_queue *que = vsi->queues;
+ struct tx_ring *txr;
+ int error, rid, vector = 0;
+
+ /* Admin Que is vector 0*/
+ rid = vector + 1;
+ pf->res = bus_alloc_resource_any(dev,
+ SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
+ if (!pf->res) {
+ device_printf(dev,"Unable to allocate"
+ " bus resource: Adminq interrupt [%d]\n", rid);
+ return (ENXIO);
+ }
+ /* Set the adminq vector and handler */
+ error = bus_setup_intr(dev, pf->res,
+ INTR_TYPE_NET | INTR_MPSAFE, NULL,
+ ixl_msix_adminq, pf, &pf->tag);
+ if (error) {
+ pf->res = NULL;
+ device_printf(dev, "Failed to register Admin que handler");
+ return (error);
+ }
+ bus_describe_intr(dev, pf->res, pf->tag, "aq");
+ pf->admvec = vector;
+ /* Tasklet for Admin Queue */
+ TASK_INIT(&pf->adminq, 0, ixl_do_adminq, pf);
+ pf->tq = taskqueue_create_fast("ixl_adm", M_NOWAIT,
+ taskqueue_thread_enqueue, &pf->tq);
+ taskqueue_start_threads(&pf->tq, 1, PI_NET, "%s adminq",
+ device_get_nameunit(pf->dev));
+ ++vector;
+
+ /* Now set up the stations */
+ for (int i = 0; i < vsi->num_queues; i++, vector++, que++) {
+ rid = vector + 1;
+ txr = &que->txr;
+ que->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
+ RF_SHAREABLE | RF_ACTIVE);
+ if (que->res == NULL) {
+ device_printf(dev,"Unable to allocate"
+ " bus resource: que interrupt [%d]\n", vector);
+ return (ENXIO);
+ }
+ /* Set the handler function */
+ error = bus_setup_intr(dev, que->res,
+ INTR_TYPE_NET | INTR_MPSAFE, NULL,
+ ixl_msix_que, que, &que->tag);
+ if (error) {
+ que->res = NULL;
+ device_printf(dev, "Failed to register que handler");
+ return (error);
+ }
+ bus_describe_intr(dev, que->res, que->tag, "q%d", i);
+ /* Bind the vector to a CPU */
+ bus_bind_intr(dev, que->res, i);
+ que->msix = vector;
+ TASK_INIT(&que->tx_task, 0, ixl_deferred_mq_start, que);
+ TASK_INIT(&que->task, 0, ixl_handle_que, que);
+ que->tq = taskqueue_create_fast("ixl_que", M_NOWAIT,
+ taskqueue_thread_enqueue, &que->tq);
+ taskqueue_start_threads(&que->tq, 1, PI_NET, "%s que",
+ device_get_nameunit(pf->dev));
+ }
+
+ return (0);
+}
+
+
+/*
+ * Allocate MSI/X vectors
+ */
+static int
+ixl_init_msix(struct ixl_pf *pf)
+{
+ device_t dev = pf->dev;
+ int rid, want, vectors, queues, available;
+
+ /* Override by tuneable */
+ if (ixl_enable_msix == 0)
+ goto msi;
+
+ /*
+ ** When used in a virtualized environment
+ ** PCI BUSMASTER capability may not be set
+ ** so explicity set it here and rewrite
+ ** the ENABLE in the MSIX control register
+ ** at this point to cause the host to
+ ** successfully initialize us.
+ */
+ {
+ u16 pci_cmd_word;
+ int msix_ctrl;
+ pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2);
+ pci_cmd_word |= PCIM_CMD_BUSMASTEREN;
+ pci_write_config(dev, PCIR_COMMAND, pci_cmd_word, 2);
+ pci_find_cap(dev, PCIY_MSIX, &rid);
+ rid += PCIR_MSIX_CTRL;
+ msix_ctrl = pci_read_config(dev, rid, 2);
+ msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE;
+ pci_write_config(dev, rid, msix_ctrl, 2);
+ }
+
+ /* First try MSI/X */
+ rid = PCIR_BAR(IXL_BAR);
+ pf->msix_mem = bus_alloc_resource_any(dev,
+ SYS_RES_MEMORY, &rid, RF_ACTIVE);
+ if (!pf->msix_mem) {
+ /* May not be enabled */
+ device_printf(pf->dev,
+ "Unable to map MSIX table \n");
+ goto msi;
+ }
+
+ available = pci_msix_count(dev);
+ if (available == 0) { /* system has msix disabled */
+ bus_release_resource(dev, SYS_RES_MEMORY,
+ rid, pf->msix_mem);
+ pf->msix_mem = NULL;
+ goto msi;
+ }
+
+ /* Figure out a reasonable auto config value */
+ queues = (mp_ncpus > (available - 1)) ? (available - 1) : mp_ncpus;
+
+ /* Override with hardcoded value if sane */
+ if ((ixl_max_queues != 0) && (ixl_max_queues <= queues))
+ queues = ixl_max_queues;
+
+ /*
+ ** Want one vector (RX/TX pair) per queue
+ ** plus an additional for the admin queue.
+ */
+ want = queues + 1;
+ if (want <= available) /* Have enough */
+ vectors = want;
+ else {
+ device_printf(pf->dev,
+ "MSIX Configuration Problem, "
+ "%d vectors available but %d wanted!\n",
+ available, want);
+ return (0); /* Will go to Legacy setup */
+ }
+
+ if (pci_alloc_msix(dev, &vectors) == 0) {
+ device_printf(pf->dev,
+ "Using MSIX interrupts with %d vectors\n", vectors);
+ pf->msix = vectors;
+ pf->vsi.num_queues = queues;
+ return (vectors);
+ }
+msi:
+ vectors = pci_msi_count(dev);
+ pf->vsi.num_queues = 1;
+ pf->msix = 1;
+ ixl_max_queues = 1;
+ ixl_enable_msix = 0;
+ if (vectors == 1 && pci_alloc_msi(dev, &vectors) == 0)
+ device_printf(pf->dev,"Using an MSI interrupt\n");
+ else {
+ pf->msix = 0;
+ device_printf(pf->dev,"Using a Legacy interrupt\n");
+ }
+ return (vectors);
+}
+
+
+/*
+ * Plumb MSI/X vectors
+ */
+static void
+ixl_configure_msix(struct ixl_pf *pf)
+{
+ struct i40e_hw *hw = &pf->hw;
+ struct ixl_vsi *vsi = &pf->vsi;
+ u32 reg;
+ u16 vector = 1;
+
+ /* First set up the adminq - vector 0 */
+ wr32(hw, I40E_PFINT_ICR0_ENA, 0); /* disable all */
+ rd32(hw, I40E_PFINT_ICR0); /* read to clear */
+
+ reg = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK |
+ I40E_PFINT_ICR0_ENA_GRST_MASK |
+ I40E_PFINT_ICR0_HMC_ERR_MASK |
+ I40E_PFINT_ICR0_ENA_ADMINQ_MASK |
+ I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK |
+ I40E_PFINT_ICR0_ENA_VFLR_MASK |
+ I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK;
+ wr32(hw, I40E_PFINT_ICR0_ENA, reg);
+
+ wr32(hw, I40E_PFINT_LNKLST0, 0x7FF);
+ wr32(hw, I40E_PFINT_ITR0(IXL_RX_ITR), 0x003E);
+
+ wr32(hw, I40E_PFINT_DYN_CTL0,
+ I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK |
+ I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK);
+
+ wr32(hw, I40E_PFINT_STAT_CTL0, 0);
+
+ /* Next configure the queues */
+ for (int i = 0; i < vsi->num_queues; i++, vector++) {
+ wr32(hw, I40E_PFINT_DYN_CTLN(i), i);
+ wr32(hw, I40E_PFINT_LNKLSTN(i), i);
+
+ reg = I40E_QINT_RQCTL_CAUSE_ENA_MASK |
+ (IXL_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
+ (vector << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
+ (i << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
+ (I40E_QUEUE_TYPE_TX << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT);
+ wr32(hw, I40E_QINT_RQCTL(i), reg);
+
+ reg = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
+ (IXL_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
+ (vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
+ ((i+1) << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) |
+ (I40E_QUEUE_TYPE_RX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
+ if (i == (vsi->num_queues - 1))
+ reg |= (IXL_QUEUE_EOL
+ << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
+ wr32(hw, I40E_QINT_TQCTL(i), reg);
+ }
+}
+
+/*
+ * Configure for MSI single vector operation
+ */
+static void
+ixl_configure_legacy(struct ixl_pf *pf)
+{
+ struct i40e_hw *hw = &pf->hw;
+ u32 reg;
+
+
+ wr32(hw, I40E_PFINT_ITR0(0), 0);
+ wr32(hw, I40E_PFINT_ITR0(1), 0);
+
+
+ /* Setup "other" causes */
+ reg = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK
+ | I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK
+ | I40E_PFINT_ICR0_ENA_GRST_MASK
+ | I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK
+ | I40E_PFINT_ICR0_ENA_GPIO_MASK
+ | I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK
+ | I40E_PFINT_ICR0_ENA_HMC_ERR_MASK
+ | I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK
+ | I40E_PFINT_ICR0_ENA_VFLR_MASK
+ | I40E_PFINT_ICR0_ENA_ADMINQ_MASK
+ ;
+ wr32(hw, I40E_PFINT_ICR0_ENA, reg);
+
+ /* SW_ITR_IDX = 0, but don't change INTENA */
+ wr32(hw, I40E_PFINT_DYN_CTL0,
+ I40E_PFINT_DYN_CTLN_SW_ITR_INDX_MASK |
+ I40E_PFINT_DYN_CTLN_INTENA_MSK_MASK);
+ /* SW_ITR_IDX = 0, OTHER_ITR_IDX = 0 */
+ wr32(hw, I40E_PFINT_STAT_CTL0, 0);
+
+ /* FIRSTQ_INDX = 0, FIRSTQ_TYPE = 0 (rx) */
+ wr32(hw, I40E_PFINT_LNKLST0, 0);
+
+ /* Associate the queue pair to the vector and enable the q int */
+ reg = I40E_QINT_RQCTL_CAUSE_ENA_MASK
+ | (IXL_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT)
+ | (I40E_QUEUE_TYPE_TX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
+ wr32(hw, I40E_QINT_RQCTL(0), reg);
+
+ reg = I40E_QINT_TQCTL_CAUSE_ENA_MASK
+ | (IXL_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT)
+ | (IXL_QUEUE_EOL << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
+ wr32(hw, I40E_QINT_TQCTL(0), reg);
+
+ /* Next enable the queue pair */
+ reg = rd32(hw, I40E_QTX_ENA(0));
+ reg |= I40E_QTX_ENA_QENA_REQ_MASK;
+ wr32(hw, I40E_QTX_ENA(0), reg);
+
+ reg = rd32(hw, I40E_QRX_ENA(0));
+ reg |= I40E_QRX_ENA_QENA_REQ_MASK;
+ wr32(hw, I40E_QRX_ENA(0), reg);
+}
+
+
+/*
+ * Set the Initial ITR state
+ */
+static void
+ixl_configure_itr(struct ixl_pf *pf)
+{
+ struct i40e_hw *hw = &pf->hw;
+ struct ixl_vsi *vsi = &pf->vsi;
+ struct ixl_queue *que = vsi->queues;
+
+ vsi->rx_itr_setting = ixl_rx_itr;
+ if (ixl_dynamic_rx_itr)
+ vsi->rx_itr_setting |= IXL_ITR_DYNAMIC;
+ vsi->tx_itr_setting = ixl_tx_itr;
+ if (ixl_dynamic_tx_itr)
+ vsi->tx_itr_setting |= IXL_ITR_DYNAMIC;
+
+ for (int i = 0; i < vsi->num_queues; i++, que++) {
+ struct tx_ring *txr = &que->txr;
+ struct rx_ring *rxr = &que->rxr;
+
+ wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR, i),
+ vsi->rx_itr_setting);
+ rxr->itr = vsi->rx_itr_setting;
+ rxr->latency = IXL_AVE_LATENCY;
+ wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR, i),
+ vsi->tx_itr_setting);
+ txr->itr = vsi->tx_itr_setting;
+ txr->latency = IXL_AVE_LATENCY;
+ }
+}
+
+
+static int
+ixl_allocate_pci_resources(struct ixl_pf *pf)
+{
+ int rid;
+ device_t dev = pf->dev;
+
+ rid = PCIR_BAR(0);
+ pf->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
+ &rid, RF_ACTIVE);
+
+ if (!(pf->pci_mem)) {
+ device_printf(dev,"Unable to allocate bus resource: memory\n");
+ return (ENXIO);
+ }
+
+ pf->osdep.mem_bus_space_tag =
+ rman_get_bustag(pf->pci_mem);
+ pf->osdep.mem_bus_space_handle =
+ rman_get_bushandle(pf->pci_mem);
+ pf->osdep.mem_bus_space_size = rman_get_size(pf->pci_mem);
+ pf->hw.hw_addr = (u8 *) &pf->osdep.mem_bus_space_handle;
+
+ pf->hw.back = &pf->osdep;
+
+ /*
+ ** Now setup MSI or MSI/X, should
+ ** return us the number of supported
+ ** vectors. (Will be 1 for MSI)
+ */
+ pf->msix = ixl_init_msix(pf);
+ return (0);
+}
+
+static void
+ixl_free_pci_resources(struct ixl_pf * pf)
+{
+ struct ixl_vsi *vsi = &pf->vsi;
+ struct ixl_queue *que = vsi->queues;
+ device_t dev = pf->dev;
+ int rid, memrid;
+
+ memrid = PCIR_BAR(IXL_BAR);
+
+ /* We may get here before stations are setup */
+ if ((!ixl_enable_msix) || (que == NULL))
+ goto early;
+
+ /*
+ ** Release all msix VSI resources:
+ */
+ for (int i = 0; i < vsi->num_queues; i++, que++) {
+ rid = que->msix + 1;
+ if (que->tag != NULL) {
+ bus_teardown_intr(dev, que->res, que->tag);
+ que->tag = NULL;
+ }
+ if (que->res != NULL)
+ bus_release_resource(dev, SYS_RES_IRQ, rid, que->res);
+ }
+
+early:
+ /* Clean the AdminQ interrupt last */
+ if (pf->admvec) /* we are doing MSIX */
+ rid = pf->admvec + 1;
+ else
+ (pf->msix != 0) ? (rid = 1):(rid = 0);
+
+ if (pf->tag != NULL) {
+ bus_teardown_intr(dev, pf->res, pf->tag);
+ pf->tag = NULL;
+ }
+ if (pf->res != NULL)
+ bus_release_resource(dev, SYS_RES_IRQ, rid, pf->res);
+
+ if (pf->msix)
+ pci_release_msi(dev);
+
+ if (pf->msix_mem != NULL)
+ bus_release_resource(dev, SYS_RES_MEMORY,
+ memrid, pf->msix_mem);
+
+ if (pf->pci_mem != NULL)
+ bus_release_resource(dev, SYS_RES_MEMORY,
+ PCIR_BAR(0), pf->pci_mem);
+
+ return;
+}
+
+
+/*********************************************************************
+ *
+ * Setup networking device structure and register an interface.
+ *
+ **********************************************************************/
+static int
+ixl_setup_interface(device_t dev, struct ixl_vsi *vsi)
+{
+ struct ifnet *ifp;
+ struct i40e_hw *hw = vsi->hw;
+ struct ixl_queue *que = vsi->queues;
+ struct i40e_aq_get_phy_abilities_resp abilities_resp;
+ enum i40e_status_code aq_error = 0;
+
+ INIT_DEBUGOUT("ixl_setup_interface: begin");
+
+ ifp = vsi->ifp = if_alloc(IFT_ETHER);
+ if (ifp == NULL) {
+ device_printf(dev, "can not allocate ifnet structure\n");
+ return (-1);
+ }
+ if_initname(ifp, device_get_name(dev), device_get_unit(dev));
+ ifp->if_mtu = ETHERMTU;
+ ifp->if_baudrate = 4000000000; // ??
+ ifp->if_init = ixl_init;
+ ifp->if_softc = vsi;
+ ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
+ ifp->if_ioctl = ixl_ioctl;
+
+ ifp->if_transmit = ixl_mq_start;
+
+ ifp->if_qflush = ixl_qflush;
+
+ ifp->if_snd.ifq_maxlen = que->num_desc - 2;
+
+ ether_ifattach(ifp, hw->mac.addr);
+
+ vsi->max_frame_size =
+ ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN
+ + ETHER_VLAN_ENCAP_LEN;
+
+ /*
+ * Tell the upper layer(s) we support long frames.
+ */
+ ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
+
+ ifp->if_capabilities |= IFCAP_HWCSUM;
+ ifp->if_capabilities |= IFCAP_HWCSUM_IPV6;
+ ifp->if_capabilities |= IFCAP_TSO;
+ ifp->if_capabilities |= IFCAP_JUMBO_MTU;
+ ifp->if_capabilities |= IFCAP_LRO;
+
+ /* VLAN capabilties */
+ ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING
+ | IFCAP_VLAN_HWTSO
+ | IFCAP_VLAN_MTU
+ | IFCAP_VLAN_HWCSUM;
+ ifp->if_capenable = ifp->if_capabilities;
+
+ /*
+ ** Don't turn this on by default, if vlans are
+ ** created on another pseudo device (eg. lagg)
+ ** then vlan events are not passed thru, breaking
+ ** operation, but with HW FILTER off it works. If
+ ** using vlans directly on the ixl driver you can
+ ** enable this and get full hardware tag filtering.
+ */
+ ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
+
+ /*
+ * Specify the media types supported by this adapter and register
+ * callbacks to update media and link information
+ */
+ ifmedia_init(&vsi->media, IFM_IMASK, ixl_media_change,
+ ixl_media_status);
+
+ aq_error = i40e_aq_get_phy_capabilities(hw, FALSE, TRUE, &abilities_resp, NULL);
+ if (aq_error) {
+ printf("Error getting supported media types, AQ error %d\n", aq_error);
+ return (EPERM);
+ }
+
+ /* Display supported media types */
+ if (abilities_resp.phy_type & (1 << I40E_PHY_TYPE_100BASE_TX))
+ ifmedia_add(&vsi->media, IFM_ETHER | IFM_100_TX, 0, NULL);
+
+ if (abilities_resp.phy_type & (1 << I40E_PHY_TYPE_1000BASE_T))
+ ifmedia_add(&vsi->media, IFM_ETHER | IFM_1000_T, 0, NULL);
+
+ if (abilities_resp.phy_type & (1 << I40E_PHY_TYPE_10GBASE_CR1_CU) ||
+ abilities_resp.phy_type & (1 << I40E_PHY_TYPE_10GBASE_SFPP_CU))
+ ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_TWINAX, 0, NULL);
+ if (abilities_resp.phy_type & (1 << I40E_PHY_TYPE_10GBASE_SR))
+ ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
+ if (abilities_resp.phy_type & (1 << I40E_PHY_TYPE_10GBASE_LR))
+ ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_LR, 0, NULL);
+ if (abilities_resp.phy_type & (1 << I40E_PHY_TYPE_10GBASE_T))
+ ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_T, 0, NULL);
+
+ if (abilities_resp.phy_type & (1 << I40E_PHY_TYPE_40GBASE_CR4_CU) ||
+ abilities_resp.phy_type & (1 << I40E_PHY_TYPE_40GBASE_CR4))
+ ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_CR4, 0, NULL);
+ if (abilities_resp.phy_type & (1 << I40E_PHY_TYPE_40GBASE_SR4))
+ ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_SR4, 0, NULL);
+ if (abilities_resp.phy_type & (1 << I40E_PHY_TYPE_40GBASE_LR4))
+ ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_LR4, 0, NULL);
+
+ /* Use autoselect media by default */
+ ifmedia_add(&vsi->media, IFM_ETHER | IFM_AUTO, 0, NULL);
+ ifmedia_set(&vsi->media, IFM_ETHER | IFM_AUTO);
+
+ return (0);
+}
+
+static bool
+ixl_config_link(struct i40e_hw *hw)
+{
+ bool check;
+
+ i40e_aq_get_link_info(hw, TRUE, NULL, NULL);
+ check = i40e_get_link_status(hw);
+#ifdef IXL_DEBUG
+ printf("Link is %s\n", check ? "up":"down");
+#endif
+ return (check);
+}
+
+/*********************************************************************
+ *
+ * Initialize this VSI
+ *
+ **********************************************************************/
+static int
+ixl_setup_vsi(struct ixl_vsi *vsi)
+{
+ struct i40e_hw *hw = vsi->hw;
+ device_t dev = vsi->dev;
+ struct i40e_aqc_get_switch_config_resp *sw_config;
+ struct i40e_vsi_context ctxt;
+ u8 aq_buf[I40E_AQ_LARGE_BUF];
+ int ret = I40E_SUCCESS;
+ u16 next = 0;
+
+ sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
+ ret = i40e_aq_get_switch_config(hw, sw_config,
+ sizeof(aq_buf), &next, NULL);
+ if (ret) {
+ device_printf(dev,"aq_get_switch_config failed!!\n");
+ return (ret);
+ }
+#ifdef IXL_DEBUG
+ printf("Switch config: header reported: %d in structure, %d total\n",
+ sw_config->header.num_reported, sw_config->header.num_total);
+ printf("type=%d seid=%d uplink=%d downlink=%d\n",
+ sw_config->element[0].element_type,
+ sw_config->element[0].seid,
+ sw_config->element[0].uplink_seid,
+ sw_config->element[0].downlink_seid);
+#endif
+ /* Save off this important value */
+ vsi->seid = sw_config->element[0].seid;
+
+ memset(&ctxt, 0, sizeof(ctxt));
+ ctxt.seid = vsi->seid;
+ ctxt.pf_num = hw->pf_id;
+ ret = i40e_aq_get_vsi_params(hw, &ctxt, NULL);
+ if (ret) {
+ device_printf(dev,"get vsi params failed %x!!\n", ret);
+ return (ret);
+ }
+#ifdef IXL_DEBUG
+ printf("get_vsi_params: seid: %d, uplinkseid: %d, vsi_number: %d, "
+ "vsis_allocated: %d, vsis_unallocated: %d, flags: 0x%x, "
+ "pfnum: %d, vfnum: %d, stat idx: %d, enabled: %d\n", ctxt.seid,
+ ctxt.uplink_seid, ctxt.vsi_number,
+ ctxt.vsis_allocated, ctxt.vsis_unallocated,
+ ctxt.flags, ctxt.pf_num, ctxt.vf_num,
+ ctxt.info.stat_counter_idx, ctxt.info.up_enable_bits);
+#endif
+ /*
+ ** Set the queue and traffic class bits
+ ** - when multiple traffic classes are supported
+ ** this will need to be more robust.
+ */
+ ctxt.info.valid_sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
+ ctxt.info.mapping_flags |= I40E_AQ_VSI_QUE_MAP_CONTIG;
+ ctxt.info.queue_mapping[0] = 0;
+ ctxt.info.tc_mapping[0] = 0x0800;
+
+ /* Set VLAN receive stripping mode */
+ ctxt.info.valid_sections |= I40E_AQ_VSI_PROP_VLAN_VALID;
+ ctxt.info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL;
+ if (vsi->ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
+ ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
+ else
+ ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
+
+ /* Keep copy of VSI info in VSI for statistic counters */
+ memcpy(&vsi->info, &ctxt.info, sizeof(ctxt.info));
+
+ /* Reset VSI statistics */
+ ixl_vsi_reset_stats(vsi);
+ vsi->hw_filters_add = 0;
+ vsi->hw_filters_del = 0;
+
+ ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
+ if (ret)
+ device_printf(dev,"update vsi params failed %x!!\n",
+ hw->aq.asq_last_status);
+ return (ret);
+}
+
+
+/*********************************************************************
+ *
+ * Initialize the VSI: this handles contexts, which means things
+ * like the number of descriptors, buffer size,
+ * plus we init the rings thru this function.
+ *
+ **********************************************************************/
+static int
+ixl_initialize_vsi(struct ixl_vsi *vsi)
+{
+ struct ixl_queue *que = vsi->queues;
+ device_t dev = vsi->dev;
+ struct i40e_hw *hw = vsi->hw;
+ int err = 0;
+
+
+ for (int i = 0; i < vsi->num_queues; i++, que++) {
+ struct tx_ring *txr = &que->txr;
+ struct rx_ring *rxr = &que->rxr;
+ struct i40e_hmc_obj_txq tctx;
+ struct i40e_hmc_obj_rxq rctx;
+ u32 txctl;
+ u16 size;
+
+
+ /* Setup the HMC TX Context */
+ size = que->num_desc * sizeof(struct i40e_tx_desc);
+ memset(&tctx, 0, sizeof(struct i40e_hmc_obj_txq));
+ tctx.new_context = 1;
+ tctx.base = (txr->dma.pa/128);
+ tctx.qlen = que->num_desc;
+ tctx.fc_ena = 0;
+ tctx.rdylist = vsi->info.qs_handle[0]; /* index is TC */
+ /* Enable HEAD writeback */
+ tctx.head_wb_ena = 1;
+ tctx.head_wb_addr = txr->dma.pa +
+ (que->num_desc * sizeof(struct i40e_tx_desc));
+ tctx.rdylist_act = 0;
+ err = i40e_clear_lan_tx_queue_context(hw, i);
+ if (err) {
+ device_printf(dev, "Unable to clear TX context\n");
+ break;
+ }
+ err = i40e_set_lan_tx_queue_context(hw, i, &tctx);
+ if (err) {
+ device_printf(dev, "Unable to set TX context\n");
+ break;
+ }
+ /* Associate the ring with this PF */
+ txctl = I40E_QTX_CTL_PF_QUEUE;
+ txctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) &
+ I40E_QTX_CTL_PF_INDX_MASK);
+ wr32(hw, I40E_QTX_CTL(i), txctl);
+ ixl_flush(hw);
+
+ /* Do ring (re)init */
+ ixl_init_tx_ring(que);
+
+ /* Next setup the HMC RX Context */
+ if (vsi->max_frame_size <= 2048)
+ rxr->mbuf_sz = MCLBYTES;
+ else
+ rxr->mbuf_sz = MJUMPAGESIZE;
+
+ u16 max_rxmax = rxr->mbuf_sz * hw->func_caps.rx_buf_chain_len;
+
+ /* Set up an RX context for the HMC */
+ memset(&rctx, 0, sizeof(struct i40e_hmc_obj_rxq));
+ rctx.dbuff = rxr->mbuf_sz >> I40E_RXQ_CTX_DBUFF_SHIFT;
+ /* ignore header split for now */
+ rctx.hbuff = 0 >> I40E_RXQ_CTX_HBUFF_SHIFT;
+ rctx.rxmax = (vsi->max_frame_size < max_rxmax) ?
+ vsi->max_frame_size : max_rxmax;
+ rctx.dtype = 0;
+ rctx.dsize = 1; /* do 32byte descriptors */
+ rctx.hsplit_0 = 0; /* no HDR split initially */
+ rctx.base = (rxr->dma.pa/128);
+ rctx.qlen = que->num_desc;
+ rctx.tphrdesc_ena = 1;
+ rctx.tphwdesc_ena = 1;
+ rctx.tphdata_ena = 0;
+ rctx.tphhead_ena = 0;
+ rctx.lrxqthresh = 2;
+#ifdef DEV_NETMAP
+ /* "CRC strip in netmap is conditional" */
+ if (vsi->ifp->if_capenable & IFCAP_NETMAP && !ixl_crcstrip)
+ rctx.crcstrip = 0;
+ else
+#endif /* DEV_NETMAP */
+ rctx.crcstrip = 1;
+ rctx.l2tsel = 1;
+ rctx.showiv = 1;
+ rctx.fc_ena = 0;
+ rctx.prefena = 1;
+
+ err = i40e_clear_lan_rx_queue_context(hw, i);
+ if (err) {
+ device_printf(dev,
+ "Unable to clear RX context %d\n", i);
+ break;
+ }
+ err = i40e_set_lan_rx_queue_context(hw, i, &rctx);
+ if (err) {
+ device_printf(dev, "Unable to set RX context %d\n", i);
+ break;
+ }
+ err = ixl_init_rx_ring(que);
+ if (err) {
+ device_printf(dev, "Fail in init_rx_ring %d\n", i);
+ break;
+ }
+ wr32(vsi->hw, I40E_QRX_TAIL(que->me), 0);
+#ifdef DEV_NETMAP
+ /* TODO appropriately comment
+ * Code based on netmap code in ixgbe_init_locked()
+ * Messes with what the software sets as queue
+ * descriptor tail in hardware.
+ */
+ if (vsi->ifp->if_capenable & IFCAP_NETMAP)
+ {
+ struct netmap_adapter *na = NA(vsi->ifp);
+ struct netmap_kring *kring = &na->rx_rings[que->me];
+ int t = na->num_rx_desc - 1 - kring->nr_hwavail;
+
+ wr32(vsi->hw, I40E_QRX_TAIL(que->me), t);
+ } else
+#endif /* DEV_NETMAP */
+ wr32(vsi->hw, I40E_QRX_TAIL(que->me), que->num_desc - 1);
+ }
+ return (err);
+}
+
+
+/*********************************************************************
+ *
+ * Free all VSI structs.
+ *
+ **********************************************************************/
+void
+ixl_free_vsi(struct ixl_vsi *vsi)
+{
+ struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
+ struct ixl_queue *que = vsi->queues;
+ struct ixl_mac_filter *f;
+
+ /* Free station queues */
+ for (int i = 0; i < vsi->num_queues; i++, que++) {
+ struct tx_ring *txr = &que->txr;
+ struct rx_ring *rxr = &que->rxr;
+
+ if (!mtx_initialized(&txr->mtx)) /* uninitialized */
+ continue;
+ IXL_TX_LOCK(txr);
+ ixl_free_que_tx(que);
+ if (txr->base)
+ i40e_free_dma(&pf->hw, &txr->dma);
+ IXL_TX_UNLOCK(txr);
+ IXL_TX_LOCK_DESTROY(txr);
+
+ if (!mtx_initialized(&rxr->mtx)) /* uninitialized */
+ continue;
+ IXL_RX_LOCK(rxr);
+ ixl_free_que_rx(que);
+ if (rxr->base)
+ i40e_free_dma(&pf->hw, &rxr->dma);
+ IXL_RX_UNLOCK(rxr);
+ IXL_RX_LOCK_DESTROY(rxr);
+
+ }
+ free(vsi->queues, M_DEVBUF);
+
+ /* Free VSI filter list */
+ while (!SLIST_EMPTY(&vsi->ftl)) {
+ f = SLIST_FIRST(&vsi->ftl);
+ SLIST_REMOVE_HEAD(&vsi->ftl, next);
+ free(f, M_DEVBUF);
+ }
+}
+
+
+/*********************************************************************
+ *
+ * Allocate memory for the VSI (virtual station interface) and their
+ * associated queues, rings and the descriptors associated with each,
+ * called only once at attach.
+ *
+ **********************************************************************/
+static int
+ixl_setup_stations(struct ixl_pf *pf)
+{
+ device_t dev = pf->dev;
+ struct ixl_vsi *vsi;
+ struct ixl_queue *que;
+ struct tx_ring *txr;
+ struct rx_ring *rxr;
+ int rsize, tsize;
+ int error = I40E_SUCCESS;
+
+ vsi = &pf->vsi;
+ vsi->back = (void *)pf;
+ vsi->hw = &pf->hw;
+ vsi->id = 0;
+ vsi->num_vlans = 0;
+
+ /* Get memory for the station queues */
+ if (!(vsi->queues =
+ (struct ixl_queue *) malloc(sizeof(struct ixl_queue) *
+ vsi->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
+ device_printf(dev, "Unable to allocate queue memory\n");
+ error = ENOMEM;
+ goto early;
+ }
+
+ for (int i = 0; i < vsi->num_queues; i++) {
+ que = &vsi->queues[i];
+ que->num_desc = ixl_ringsz;
+ que->me = i;
+ que->vsi = vsi;
+ /* mark the queue as active */
+ vsi->active_queues |= (u64)1 << que->me;
+ txr = &que->txr;
+ txr->que = que;
+ txr->tail = I40E_QTX_TAIL(que->me);
+
+ /* Initialize the TX lock */
+ snprintf(txr->mtx_name, sizeof(txr->mtx_name), "%s:tx(%d)",
+ device_get_nameunit(dev), que->me);
+ mtx_init(&txr->mtx, txr->mtx_name, NULL, MTX_DEF);
+ /* Create the TX descriptor ring */
+ tsize = roundup2((que->num_desc *
+ sizeof(struct i40e_tx_desc)) +
+ sizeof(u32), DBA_ALIGN);
+ if (i40e_allocate_dma(&pf->hw,
+ &txr->dma, tsize, DBA_ALIGN)) {
+ device_printf(dev,
+ "Unable to allocate TX Descriptor memory\n");
+ error = ENOMEM;
+ goto fail;
+ }
+ txr->base = (struct i40e_tx_desc *)txr->dma.va;
+ bzero((void *)txr->base, tsize);
+ /* Now allocate transmit soft structs for the ring */
+ if (ixl_allocate_tx_data(que)) {
+ device_printf(dev,
+ "Critical Failure setting up TX structures\n");
+ error = ENOMEM;
+ goto fail;
+ }
+ /* Allocate a buf ring */
+ txr->br = buf_ring_alloc(4096, M_DEVBUF,
+ M_WAITOK, &txr->mtx);
+ if (txr->br == NULL) {
+ device_printf(dev,
+ "Critical Failure setting up TX buf ring\n");
+ error = ENOMEM;
+ goto fail;
+ }
+
+ /*
+ * Next the RX queues...
+ */
+ rsize = roundup2(que->num_desc *
+ sizeof(union i40e_rx_desc), DBA_ALIGN);
+ rxr = &que->rxr;
+ rxr->que = que;
+ rxr->tail = I40E_QRX_TAIL(que->me);
+
+ /* Initialize the RX side lock */
+ snprintf(rxr->mtx_name, sizeof(rxr->mtx_name), "%s:rx(%d)",
+ device_get_nameunit(dev), que->me);
+ mtx_init(&rxr->mtx, rxr->mtx_name, NULL, MTX_DEF);
+
+ if (i40e_allocate_dma(&pf->hw,
+ &rxr->dma, rsize, 4096)) {
+ device_printf(dev,
+ "Unable to allocate RX Descriptor memory\n");
+ error = ENOMEM;
+ goto fail;
+ }
+ rxr->base = (union i40e_rx_desc *)rxr->dma.va;
+ bzero((void *)rxr->base, rsize);
+
+ /* Allocate receive soft structs for the ring*/
+ if (ixl_allocate_rx_data(que)) {
+ device_printf(dev,
+ "Critical Failure setting up receive structs\n");
+ error = ENOMEM;
+ goto fail;
+ }
+ }
+
+ return (0);
+
+fail:
+ for (int i = 0; i < vsi->num_queues; i++) {
+ que = &vsi->queues[i];
+ rxr = &que->rxr;
+ txr = &que->txr;
+ if (rxr->base)
+ i40e_free_dma(&pf->hw, &rxr->dma);
+ if (txr->base)
+ i40e_free_dma(&pf->hw, &txr->dma);
+ }
+
+early:
+ return (error);
+}
+
+/*
+** Provide a update to the queue RX
+** interrupt moderation value.
+*/
+static void
+ixl_set_queue_rx_itr(struct ixl_queue *que)
+{
+ struct ixl_vsi *vsi = que->vsi;
+ struct i40e_hw *hw = vsi->hw;
+ struct rx_ring *rxr = &que->rxr;
+ u16 rx_itr;
+ u16 rx_latency = 0;
+ int rx_bytes;
+
+
+ /* Idle, do nothing */
+ if (rxr->bytes == 0)
+ return;
+
+ if (ixl_dynamic_rx_itr) {
+ rx_bytes = rxr->bytes/rxr->itr;
+ rx_itr = rxr->itr;
+
+ /* Adjust latency range */
+ switch (rxr->latency) {
+ case IXL_LOW_LATENCY:
+ if (rx_bytes > 10) {
+ rx_latency = IXL_AVE_LATENCY;
+ rx_itr = IXL_ITR_20K;
+ }
+ break;
+ case IXL_AVE_LATENCY:
+ if (rx_bytes > 20) {
+ rx_latency = IXL_BULK_LATENCY;
+ rx_itr = IXL_ITR_8K;
+ } else if (rx_bytes <= 10) {
+ rx_latency = IXL_LOW_LATENCY;
+ rx_itr = IXL_ITR_100K;
+ }
+ break;
+ case IXL_BULK_LATENCY:
+ if (rx_bytes <= 20) {
+ rx_latency = IXL_AVE_LATENCY;
+ rx_itr = IXL_ITR_20K;
+ }
+ break;
+ }
+
+ rxr->latency = rx_latency;
+
+ if (rx_itr != rxr->itr) {
+ /* do an exponential smoothing */
+ rx_itr = (10 * rx_itr * rxr->itr) /
+ ((9 * rx_itr) + rxr->itr);
+ rxr->itr = rx_itr & IXL_MAX_ITR;
+ wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR,
+ que->me), rxr->itr);
+ }
+ } else { /* We may have have toggled to non-dynamic */
+ if (vsi->rx_itr_setting & IXL_ITR_DYNAMIC)
+ vsi->rx_itr_setting = ixl_rx_itr;
+ /* Update the hardware if needed */
+ if (rxr->itr != vsi->rx_itr_setting) {
+ rxr->itr = vsi->rx_itr_setting;
+ wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR,
+ que->me), rxr->itr);
+ }
+ }
+ rxr->bytes = 0;
+ rxr->packets = 0;
+ return;
+}
+
+
+/*
+** Provide a update to the queue TX
+** interrupt moderation value.
+*/
+static void
+ixl_set_queue_tx_itr(struct ixl_queue *que)
+{
+ struct ixl_vsi *vsi = que->vsi;
+ struct i40e_hw *hw = vsi->hw;
+ struct tx_ring *txr = &que->txr;
+ u16 tx_itr;
+ u16 tx_latency = 0;
+ int tx_bytes;
+
+
+ /* Idle, do nothing */
+ if (txr->bytes == 0)
+ return;
+
+ if (ixl_dynamic_tx_itr) {
+ tx_bytes = txr->bytes/txr->itr;
+ tx_itr = txr->itr;
+
+ switch (txr->latency) {
+ case IXL_LOW_LATENCY:
+ if (tx_bytes > 10) {
+ tx_latency = IXL_AVE_LATENCY;
+ tx_itr = IXL_ITR_20K;
+ }
+ break;
+ case IXL_AVE_LATENCY:
+ if (tx_bytes > 20) {
+ tx_latency = IXL_BULK_LATENCY;
+ tx_itr = IXL_ITR_8K;
+ } else if (tx_bytes <= 10) {
+ tx_latency = IXL_LOW_LATENCY;
+ tx_itr = IXL_ITR_100K;
+ }
+ break;
+ case IXL_BULK_LATENCY:
+ if (tx_bytes <= 20) {
+ tx_latency = IXL_AVE_LATENCY;
+ tx_itr = IXL_ITR_20K;
+ }
+ break;
+ }
+
+ txr->latency = tx_latency;
+
+ if (tx_itr != txr->itr) {
+ /* do an exponential smoothing */
+ tx_itr = (10 * tx_itr * txr->itr) /
+ ((9 * tx_itr) + txr->itr);
+ txr->itr = tx_itr & IXL_MAX_ITR;
+ wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR,
+ que->me), txr->itr);
+ }
+
+ } else { /* We may have have toggled to non-dynamic */
+ if (vsi->tx_itr_setting & IXL_ITR_DYNAMIC)
+ vsi->tx_itr_setting = ixl_tx_itr;
+ /* Update the hardware if needed */
+ if (txr->itr != vsi->tx_itr_setting) {
+ txr->itr = vsi->tx_itr_setting;
+ wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR,
+ que->me), txr->itr);
+ }
+ }
+ txr->bytes = 0;
+ txr->packets = 0;
+ return;
+}
+
+
+static void
+ixl_add_hw_stats(struct ixl_pf *pf)
+{
+ device_t dev = pf->dev;
+ struct ixl_vsi *vsi = &pf->vsi;
+ struct ixl_queue *queues = vsi->queues;
+ struct i40e_eth_stats *vsi_stats = &vsi->eth_stats;
+ struct i40e_hw_port_stats *pf_stats = &pf->stats;
+
+ struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
+ struct sysctl_oid *tree = device_get_sysctl_tree(dev);
+ struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
+
+ struct sysctl_oid *vsi_node, *queue_node;
+ struct sysctl_oid_list *vsi_list, *queue_list;
+
+ struct tx_ring *txr;
+ struct rx_ring *rxr;
+
+ /* Driver statistics */
+ SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events",
+ CTLFLAG_RD, &pf->watchdog_events,
+ "Watchdog timeouts");
+ SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "admin_irq",
+ CTLFLAG_RD, &pf->admin_irq,
+ "Admin Queue IRQ Handled");
+
+ /* VSI statistics */
+#define QUEUE_NAME_LEN 32
+ char queue_namebuf[QUEUE_NAME_LEN];
+
+ // ERJ: Only one vsi now, re-do when >1 VSI enabled
+ // snprintf(vsi_namebuf, QUEUE_NAME_LEN, "vsi%d", vsi->info.stat_counter_idx);
+ vsi_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "vsi",
+ CTLFLAG_RD, NULL, "VSI-specific stats");
+ vsi_list = SYSCTL_CHILDREN(vsi_node);
+
+ ixl_add_sysctls_eth_stats(ctx, vsi_list, vsi_stats);
+
+ /* Queue statistics */
+ for (int q = 0; q < vsi->num_queues; q++) {
+ snprintf(queue_namebuf, QUEUE_NAME_LEN, "que%d", q);
+ queue_node = SYSCTL_ADD_NODE(ctx, vsi_list, OID_AUTO, queue_namebuf,
+ CTLFLAG_RD, NULL, "Queue #");
+ queue_list = SYSCTL_CHILDREN(queue_node);
+
+ txr = &(queues[q].txr);
+ rxr = &(queues[q].rxr);
+
+ SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "mbuf_defrag_failed",
+ CTLFLAG_RD, &(queues[q].mbuf_defrag_failed),
+ "m_defrag() failed");
+ SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "dropped",
+ CTLFLAG_RD, &(queues[q].dropped_pkts),
+ "Driver dropped packets");
+ SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs",
+ CTLFLAG_RD, &(queues[q].irqs),
+ "irqs on this queue");
+ SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tso_tx",
+ CTLFLAG_RD, &(queues[q].tso),
+ "TSO");
+ SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_dma_setup",
+ CTLFLAG_RD, &(queues[q].tx_dma_setup),
+ "Driver tx dma failure in xmit");
+ SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "no_desc_avail",
+ CTLFLAG_RD, &(txr->no_desc),
+ "Queue No Descriptor Available");
+ SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets",
+ CTLFLAG_RD, &(txr->total_packets),
+ "Queue Packets Transmitted");
+ SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_bytes",
+ CTLFLAG_RD, &(txr->tx_bytes),
+ "Queue Bytes Transmitted");
+ SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_packets",
+ CTLFLAG_RD, &(rxr->rx_packets),
+ "Queue Packets Received");
+ SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes",
+ CTLFLAG_RD, &(rxr->rx_bytes),
+ "Queue Bytes Received");
+ }
+
+ /* MAC stats */
+ ixl_add_sysctls_mac_stats(ctx, child, pf_stats);
+}
+
+static void
+ixl_add_sysctls_eth_stats(struct sysctl_ctx_list *ctx,
+ struct sysctl_oid_list *child,
+ struct i40e_eth_stats *eth_stats)
+{
+ struct ixl_sysctl_info ctls[] =
+ {
+ {&eth_stats->rx_bytes, "good_octets_rcvd", "Good Octets Received"},
+ {&eth_stats->rx_unicast, "ucast_pkts_rcvd",
+ "Unicast Packets Received"},
+ {&eth_stats->rx_multicast, "mcast_pkts_rcvd",
+ "Multicast Packets Received"},
+ {&eth_stats->rx_broadcast, "bcast_pkts_rcvd",
+ "Broadcast Packets Received"},
+ {&eth_stats->rx_discards, "rx_discards", "Discarded RX packets"},
+ {&eth_stats->tx_bytes, "good_octets_txd", "Good Octets Transmitted"},
+ {&eth_stats->tx_unicast, "ucast_pkts_txd", "Unicast Packets Transmitted"},
+ {&eth_stats->tx_multicast, "mcast_pkts_txd",
+ "Multicast Packets Transmitted"},
+ {&eth_stats->tx_broadcast, "bcast_pkts_txd",
+ "Broadcast Packets Transmitted"},
+ {&eth_stats->tx_discards, "tx_discards", "Discarded TX packets"},
+ // end
+ {0,0,0}
+ };
+
+ struct ixl_sysctl_info *entry = ctls;
+ while (entry->stat != 0)
+ {
+ SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, entry->name,
+ CTLFLAG_RD, entry->stat,
+ entry->description);
+ entry++;
+ }
+}
+
+static void
+ixl_add_sysctls_mac_stats(struct sysctl_ctx_list *ctx,
+ struct sysctl_oid_list *child,
+ struct i40e_hw_port_stats *stats)
+{
+ struct sysctl_oid *stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac",
+ CTLFLAG_RD, NULL, "Mac Statistics");
+ struct sysctl_oid_list *stat_list = SYSCTL_CHILDREN(stat_node);
+
+ struct i40e_eth_stats *eth_stats = &stats->eth;
+ ixl_add_sysctls_eth_stats(ctx, stat_list, eth_stats);
+
+ struct ixl_sysctl_info ctls[] =
+ {
+ {&stats->crc_errors, "crc_errors", "CRC Errors"},
+ {&stats->illegal_bytes, "illegal_bytes", "Illegal Byte Errors"},
+ {&stats->mac_local_faults, "local_faults", "MAC Local Faults"},
+ {&stats->mac_remote_faults, "remote_faults", "MAC Remote Faults"},
+ {&stats->rx_length_errors, "rx_length_errors", "Receive Length Errors"},
+ /* Packet Reception Stats */
+ {&stats->rx_size_64, "rx_frames_64", "64 byte frames received"},
+ {&stats->rx_size_127, "rx_frames_65_127", "65-127 byte frames received"},
+ {&stats->rx_size_255, "rx_frames_128_255", "128-255 byte frames received"},
+ {&stats->rx_size_511, "rx_frames_256_511", "256-511 byte frames received"},
+ {&stats->rx_size_1023, "rx_frames_512_1023", "512-1023 byte frames received"},
+ {&stats->rx_size_1522, "rx_frames_1024_1522", "1024-1522 byte frames received"},
+ {&stats->rx_size_big, "rx_frames_big", "1523-9522 byte frames received"},
+ {&stats->rx_undersize, "rx_undersize", "Undersized packets received"},
+ {&stats->rx_fragments, "rx_fragmented", "Fragmented packets received"},
+ {&stats->rx_oversize, "rx_oversized", "Oversized packets received"},
+ {&stats->rx_jabber, "rx_jabber", "Received Jabber"},
+ {&stats->checksum_error, "checksum_errors", "Checksum Errors"},
+ /* Packet Transmission Stats */
+ {&stats->tx_size_64, "tx_frames_64", "64 byte frames transmitted"},
+ {&stats->tx_size_127, "tx_frames_65_127", "65-127 byte frames transmitted"},
+ {&stats->tx_size_255, "tx_frames_128_255", "128-255 byte frames transmitted"},
+ {&stats->tx_size_511, "tx_frames_256_511", "256-511 byte frames transmitted"},
+ {&stats->tx_size_1023, "tx_frames_512_1023", "512-1023 byte frames transmitted"},
+ {&stats->tx_size_1522, "tx_frames_1024_1522", "1024-1522 byte frames transmitted"},
+ {&stats->tx_size_big, "tx_frames_big", "1523-9522 byte frames transmitted"},
+ /* Flow control */
+ {&stats->link_xon_tx, "xon_txd", "Link XON transmitted"},
+ {&stats->link_xon_rx, "xon_recvd", "Link XON received"},
+ {&stats->link_xoff_tx, "xoff_txd", "Link XOFF transmitted"},
+ {&stats->link_xoff_rx, "xoff_recvd", "Link XOFF received"},
+ /* End */
+ {0,0,0}
+ };
+
+ struct ixl_sysctl_info *entry = ctls;
+ while (entry->stat != 0)
+ {
+ SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, entry->name,
+ CTLFLAG_RD, entry->stat,
+ entry->description);
+ entry++;
+ }
+}
+
+/*
+** ixl_config_rss - setup RSS
+** - note this is done for the single vsi
+*/
+static void ixl_config_rss(struct ixl_vsi *vsi)
+{
+ struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
+ struct i40e_hw *hw = vsi->hw;
+ u32 lut = 0;
+ u64 set_hena, hena;
+ int i, j;
+
+ static const u32 seed[I40E_PFQF_HKEY_MAX_INDEX + 1] = {0x41b01687,
+ 0x183cfd8c, 0xce880440, 0x580cbc3c, 0x35897377,
+ 0x328b25e1, 0x4fa98922, 0xb7d90c14, 0xd5bad70d,
+ 0xcd15a2c1, 0xe8580225, 0x4a1e9d11, 0xfe5731be};
+
+ /* Fill out hash function seed */
+ for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
+ wr32(hw, I40E_PFQF_HKEY(i), seed[i]);
+
+ /* Enable PCTYPES for RSS: */
+ set_hena =
+ ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
+ ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP) |
+ ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) |
+ ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) |
+ ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4) |
+ ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
+ ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP) |
+ ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) |
+ ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) |
+ ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6) |
+ ((u64)1 << I40E_FILTER_PCTYPE_L2_PAYLOAD);
+
+ hena = (u64)rd32(hw, I40E_PFQF_HENA(0)) |
+ ((u64)rd32(hw, I40E_PFQF_HENA(1)) << 32);
+ hena |= set_hena;
+ wr32(hw, I40E_PFQF_HENA(0), (u32)hena);
+ wr32(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32));
+
+ /* Populate the LUT with max no. of queues in round robin fashion */
+ for (i = j = 0; i < pf->hw.func_caps.rss_table_size; i++, j++) {
+ if (j == vsi->num_queues)
+ j = 0;
+ /* lut = 4-byte sliding window of 4 lut entries */
+ lut = (lut << 8) | (j &
+ ((0x1 << pf->hw.func_caps.rss_table_entry_width) - 1));
+ /* On i = 3, we have 4 entries in lut; write to the register */
+ if ((i & 3) == 3)
+ wr32(hw, I40E_PFQF_HLUT(i >> 2), lut);
+ }
+ ixl_flush(hw);
+}
+
+
+/*
+** This routine is run via an vlan config EVENT,
+** it enables us to use the HW Filter table since
+** we can get the vlan id. This just creates the
+** entry in the soft version of the VFTA, init will
+** repopulate the real table.
+*/
+static void
+ixl_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
+{
+ struct ixl_vsi *vsi = ifp->if_softc;
+ struct i40e_hw *hw = vsi->hw;
+ struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
+
+ if (ifp->if_softc != arg) /* Not our event */
+ return;
+
+ if ((vtag == 0) || (vtag > 4095)) /* Invalid */
+ return;
+
+ IXL_PF_LOCK(pf);
+ ++vsi->num_vlans;
+ ixl_add_filter(vsi, hw->mac.addr, vtag);
+ IXL_PF_UNLOCK(pf);
+}
+
+/*
+** This routine is run via an vlan
+** unconfig EVENT, remove our entry
+** in the soft vfta.
+*/
+static void
+ixl_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
+{
+ struct ixl_vsi *vsi = ifp->if_softc;
+ struct i40e_hw *hw = vsi->hw;
+ struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
+
+ if (ifp->if_softc != arg)
+ return;
+
+ if ((vtag == 0) || (vtag > 4095)) /* Invalid */
+ return;
+
+ IXL_PF_LOCK(pf);
+ --vsi->num_vlans;
+ ixl_del_filter(vsi, hw->mac.addr, vtag);
+ IXL_PF_UNLOCK(pf);
+}
+
+/*
+** This routine updates vlan filters, called by init
+** it scans the filter table and then updates the hw
+** after a soft reset.
+*/
+static void
+ixl_setup_vlan_filters(struct ixl_vsi *vsi)
+{
+ struct ixl_mac_filter *f;
+ int cnt = 0, flags;
+
+ if (vsi->num_vlans == 0)
+ return;
+ /*
+ ** Scan the filter list for vlan entries,
+ ** mark them for addition and then call
+ ** for the AQ update.
+ */
+ SLIST_FOREACH(f, &vsi->ftl, next) {
+ if (f->flags & IXL_FILTER_VLAN) {
+ f->flags |=
+ (IXL_FILTER_ADD |
+ IXL_FILTER_USED);
+ cnt++;
+ }
+ }
+ if (cnt == 0) {
+ printf("setup vlan: no filters found!\n");
+ return;
+ }
+ flags = IXL_FILTER_VLAN;
+ flags |= (IXL_FILTER_ADD | IXL_FILTER_USED);
+ ixl_add_hw_filters(vsi, flags, cnt);
+ return;
+}
+
+/*
+** Initialize filter list and add filters that the hardware
+** needs to know about.
+*/
+static void
+ixl_init_filters(struct ixl_vsi *vsi)
+{
+ /* Add broadcast address */
+ u8 bc[6] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
+ ixl_add_filter(vsi, bc, IXL_VLAN_ANY);
+}
+
+/*
+** This routine adds mulicast filters
+*/
+static void
+ixl_add_mc_filter(struct ixl_vsi *vsi, u8 *macaddr)
+{
+ struct ixl_mac_filter *f;
+
+ /* Does one already exist */
+ f = ixl_find_filter(vsi, macaddr, IXL_VLAN_ANY);
+ if (f != NULL)
+ return;
+
+ f = ixl_get_filter(vsi);
+ if (f == NULL) {
+ printf("WARNING: no filter available!!\n");
+ return;
+ }
+ bcopy(macaddr, f->macaddr, ETHER_ADDR_LEN);
+ f->vlan = IXL_VLAN_ANY;
+ f->flags |= (IXL_FILTER_ADD | IXL_FILTER_USED
+ | IXL_FILTER_MC);
+
+ return;
+}
+
+/*
+** This routine adds macvlan filters
+*/
+static void
+ixl_add_filter(struct ixl_vsi *vsi, u8 *macaddr, s16 vlan)
+{
+ struct ixl_mac_filter *f, *tmp;
+ device_t dev = vsi->dev;
+
+ DEBUGOUT("ixl_add_filter: begin");
+
+ /* Does one already exist */
+ f = ixl_find_filter(vsi, macaddr, vlan);
+ if (f != NULL)
+ return;
+ /*
+ ** Is this the first vlan being registered, if so we
+ ** need to remove the ANY filter that indicates we are
+ ** not in a vlan, and replace that with a 0 filter.
+ */
+ if ((vlan != IXL_VLAN_ANY) && (vsi->num_vlans == 1)) {
+ tmp = ixl_find_filter(vsi, macaddr, IXL_VLAN_ANY);
+ if (tmp != NULL) {
+ ixl_del_filter(vsi, macaddr, IXL_VLAN_ANY);
+ ixl_add_filter(vsi, macaddr, 0);
+ }
+ }
+
+ f = ixl_get_filter(vsi);
+ if (f == NULL) {
+ device_printf(dev, "WARNING: no filter available!!\n");
+ return;
+ }
+ bcopy(macaddr, f->macaddr, ETHER_ADDR_LEN);
+ f->vlan = vlan;
+ f->flags |= (IXL_FILTER_ADD | IXL_FILTER_USED);
+ if (f->vlan != IXL_VLAN_ANY)
+ f->flags |= IXL_FILTER_VLAN;
+
+ ixl_add_hw_filters(vsi, f->flags, 1);
+ return;
+}
+
+static void
+ixl_del_filter(struct ixl_vsi *vsi, u8 *macaddr, s16 vlan)
+{
+ struct ixl_mac_filter *f;
+
+ f = ixl_find_filter(vsi, macaddr, vlan);
+ if (f == NULL)
+ return;
+
+ f->flags |= IXL_FILTER_DEL;
+ ixl_del_hw_filters(vsi, 1);
+
+ /* Check if this is the last vlan removal */
+ if (vlan != IXL_VLAN_ANY && vsi->num_vlans == 0) {
+ /* Switch back to a non-vlan filter */
+ ixl_del_filter(vsi, macaddr, 0);
+ ixl_add_filter(vsi, macaddr, IXL_VLAN_ANY);
+ }
+ return;
+}
+
+/*
+** Find the filter with both matching mac addr and vlan id
+*/
+static struct ixl_mac_filter *
+ixl_find_filter(struct ixl_vsi *vsi, u8 *macaddr, s16 vlan)
+{
+ struct ixl_mac_filter *f;
+ bool match = FALSE;
+
+ SLIST_FOREACH(f, &vsi->ftl, next) {
+ if (!cmp_etheraddr(f->macaddr, macaddr))
+ continue;
+ if (f->vlan == vlan) {
+ match = TRUE;
+ break;
+ }
+ }
+
+ if (!match)
+ f = NULL;
+ return (f);
+}
+
+/*
+** This routine takes additions to the vsi filter
+** table and creates an Admin Queue call to create
+** the filters in the hardware.
+*/
+static void
+ixl_add_hw_filters(struct ixl_vsi *vsi, int flags, int cnt)
+{
+ struct i40e_aqc_add_macvlan_element_data *a, *b;
+ struct ixl_mac_filter *f;
+ struct i40e_hw *hw = vsi->hw;
+ device_t dev = vsi->dev;
+ int err, j = 0;
+
+ a = malloc(sizeof(struct i40e_aqc_add_macvlan_element_data) * cnt,
+ M_DEVBUF, M_NOWAIT | M_ZERO);
+ if (a == NULL) {
+ device_printf(dev, "add hw filter failed to get memory\n");
+ return;
+ }
+
+ /*
+ ** Scan the filter list, each time we find one
+ ** we add it to the admin queue array and turn off
+ ** the add bit.
+ */
+ SLIST_FOREACH(f, &vsi->ftl, next) {
+ if (f->flags == flags) {
+ b = &a[j]; // a pox on fvl long names :)
+ bcopy(f->macaddr, b->mac_addr, ETHER_ADDR_LEN);
+ b->vlan_tag =
+ (f->vlan == IXL_VLAN_ANY ? 0 : f->vlan);
+ b->flags = I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
+ f->flags &= ~IXL_FILTER_ADD;
+ j++;
+ }
+ if (j == cnt)
+ break;
+ }
+ if (j > 0) {
+ err = i40e_aq_add_macvlan(hw, vsi->seid, a, j, NULL);
+ if (err)
+ device_printf(dev, "aq_add_macvlan failure %d\n",
+ hw->aq.asq_last_status);
+ else
+ vsi->hw_filters_add += j;
+ }
+ free(a, M_DEVBUF);
+ return;
+}
+
+/*
+** This routine takes removals in the vsi filter
+** table and creates an Admin Queue call to delete
+** the filters in the hardware.
+*/
+static void
+ixl_del_hw_filters(struct ixl_vsi *vsi, int cnt)
+{
+ struct i40e_aqc_remove_macvlan_element_data *d, *e;
+ struct i40e_hw *hw = vsi->hw;
+ device_t dev = vsi->dev;
+ struct ixl_mac_filter *f, *f_temp;
+ int err, j = 0;
+
+ DEBUGOUT("ixl_del_hw_filters: begin\n");
+
+ d = malloc(sizeof(struct i40e_aqc_remove_macvlan_element_data) * cnt,
+ M_DEVBUF, M_NOWAIT | M_ZERO);
+ if (d == NULL) {
+ printf("del hw filter failed to get memory\n");
+ return;
+ }
+
+ SLIST_FOREACH_SAFE(f, &vsi->ftl, next, f_temp) {
+ if (f->flags & IXL_FILTER_DEL) {
+ e = &d[j]; // a pox on fvl long names :)
+ bcopy(f->macaddr, e->mac_addr, ETHER_ADDR_LEN);
+ e->vlan_tag = (f->vlan == IXL_VLAN_ANY ? 0 : f->vlan);
+ e->flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
+ /* delete entry from vsi list */
+ SLIST_REMOVE(&vsi->ftl, f, ixl_mac_filter, next);
+ free(f, M_DEVBUF);
+ j++;
+ }
+ if (j == cnt)
+ break;
+ }
+ if (j > 0) {
+ err = i40e_aq_remove_macvlan(hw, vsi->seid, d, j, NULL);
+ /* NOTE: returns ENOENT every time but seems to work fine,
+ so we'll ignore that specific error. */
+ if (err && hw->aq.asq_last_status != I40E_AQ_RC_ENOENT) {
+ int sc = 0;
+ for (int i = 0; i < j; i++)
+ sc += (!d[i].error_code);
+ vsi->hw_filters_del += sc;
+ device_printf(dev,
+ "Failed to remove %d/%d filters, aq error %d\n",
+ j - sc, j, hw->aq.asq_last_status);
+ } else
+ vsi->hw_filters_del += j;
+ }
+ free(d, M_DEVBUF);
+
+ DEBUGOUT("ixl_del_hw_filters: end\n");
+ return;
+}
+
+
+static void
+ixl_enable_rings(struct ixl_vsi *vsi)
+{
+ struct i40e_hw *hw = vsi->hw;
+ u32 reg;
+
+ for (int i = 0; i < vsi->num_queues; i++) {
+ i40e_pre_tx_queue_cfg(hw, i, TRUE);
+
+ reg = rd32(hw, I40E_QTX_ENA(i));
+ reg |= I40E_QTX_ENA_QENA_REQ_MASK |
+ I40E_QTX_ENA_QENA_STAT_MASK;
+ wr32(hw, I40E_QTX_ENA(i), reg);
+ /* Verify the enable took */
+ for (int j = 0; j < 10; j++) {
+ reg = rd32(hw, I40E_QTX_ENA(i));
+ if (reg & I40E_QTX_ENA_QENA_STAT_MASK)
+ break;
+ i40e_msec_delay(10);
+ }
+ if ((reg & I40E_QTX_ENA_QENA_STAT_MASK) == 0)
+ printf("TX queue %d disabled!\n", i);
+
+ reg = rd32(hw, I40E_QRX_ENA(i));
+ reg |= I40E_QRX_ENA_QENA_REQ_MASK |
+ I40E_QRX_ENA_QENA_STAT_MASK;
+ wr32(hw, I40E_QRX_ENA(i), reg);
+ /* Verify the enable took */
+ for (int j = 0; j < 10; j++) {
+ reg = rd32(hw, I40E_QRX_ENA(i));
+ if (reg & I40E_QRX_ENA_QENA_STAT_MASK)
+ break;
+ i40e_msec_delay(10);
+ }
+ if ((reg & I40E_QRX_ENA_QENA_STAT_MASK) == 0)
+ printf("RX queue %d disabled!\n", i);
+ }
+}
+
+static void
+ixl_disable_rings(struct ixl_vsi *vsi)
+{
+ struct i40e_hw *hw = vsi->hw;
+ u32 reg;
+
+ for (int i = 0; i < vsi->num_queues; i++) {
+ i40e_pre_tx_queue_cfg(hw, i, FALSE);
+ i40e_usec_delay(500);
+
+ reg = rd32(hw, I40E_QTX_ENA(i));
+ reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
+ wr32(hw, I40E_QTX_ENA(i), reg);
+ /* Verify the disable took */
+ for (int j = 0; j < 10; j++) {
+ reg = rd32(hw, I40E_QTX_ENA(i));
+ if (!(reg & I40E_QTX_ENA_QENA_STAT_MASK))
+ break;
+ i40e_msec_delay(10);
+ }
+ if (reg & I40E_QTX_ENA_QENA_STAT_MASK)
+ printf("TX queue %d still enabled!\n", i);
+
+ reg = rd32(hw, I40E_QRX_ENA(i));
+ reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
+ wr32(hw, I40E_QRX_ENA(i), reg);
+ /* Verify the disable took */
+ for (int j = 0; j < 10; j++) {
+ reg = rd32(hw, I40E_QRX_ENA(i));
+ if (!(reg & I40E_QRX_ENA_QENA_STAT_MASK))
+ break;
+ i40e_msec_delay(10);
+ }
+ if (reg & I40E_QRX_ENA_QENA_STAT_MASK)
+ printf("RX queue %d still enabled!\n", i);
+ }
+}
+
+/**
+ * ixl_handle_mdd_event
+ *
+ * Called from interrupt handler to identify possibly malicious vfs
+ * (But also detects events from the PF, as well)
+ **/
+static void ixl_handle_mdd_event(struct ixl_pf *pf)
+{
+ struct i40e_hw *hw = &pf->hw;
+ device_t dev = pf->dev;
+ bool mdd_detected = false;
+ bool pf_mdd_detected = false;
+ u32 reg;
+
+ /* find what triggered the MDD event */
+ reg = rd32(hw, I40E_GL_MDET_TX);
+ if (reg & I40E_GL_MDET_TX_VALID_MASK) {
+ u8 pf_num = (reg & I40E_GL_MDET_TX_PF_NUM_MASK) >>
+ I40E_GL_MDET_TX_PF_NUM_SHIFT;
+ u8 event = (reg & I40E_GL_MDET_TX_EVENT_MASK) >>
+ I40E_GL_MDET_TX_EVENT_SHIFT;
+ u8 queue = (reg & I40E_GL_MDET_TX_QUEUE_MASK) >>
+ I40E_GL_MDET_TX_QUEUE_SHIFT;
+ device_printf(dev,
+ "Malicious Driver Detection event 0x%02x"
+ " on TX queue %d pf number 0x%02x\n",
+ event, queue, pf_num);
+ wr32(hw, I40E_GL_MDET_TX, 0xffffffff);
+ mdd_detected = true;
+ }
+ reg = rd32(hw, I40E_GL_MDET_RX);
+ if (reg & I40E_GL_MDET_RX_VALID_MASK) {
+ u8 func = (reg & I40E_GL_MDET_RX_FUNCTION_MASK) >>
+ I40E_GL_MDET_RX_FUNCTION_SHIFT;
+ u8 event = (reg & I40E_GL_MDET_RX_EVENT_MASK) >>
+ I40E_GL_MDET_RX_EVENT_SHIFT;
+ u8 queue = (reg & I40E_GL_MDET_RX_QUEUE_MASK) >>
+ I40E_GL_MDET_RX_QUEUE_SHIFT;
+ device_printf(dev,
+ "Malicious Driver Detection event 0x%02x"
+ " on RX queue %d of function 0x%02x\n",
+ event, queue, func);
+ wr32(hw, I40E_GL_MDET_RX, 0xffffffff);
+ mdd_detected = true;
+ }
+
+ if (mdd_detected) {
+ reg = rd32(hw, I40E_PF_MDET_TX);
+ if (reg & I40E_PF_MDET_TX_VALID_MASK) {
+ wr32(hw, I40E_PF_MDET_TX, 0xFFFF);
+ device_printf(dev,
+ "MDD TX event is for this function 0x%08x",
+ reg);
+ pf_mdd_detected = true;
+ }
+ reg = rd32(hw, I40E_PF_MDET_RX);
+ if (reg & I40E_PF_MDET_RX_VALID_MASK) {
+ wr32(hw, I40E_PF_MDET_RX, 0xFFFF);
+ device_printf(dev,
+ "MDD RX event is for this function 0x%08x",
+ reg);
+ pf_mdd_detected = true;
+ }
+ }
+
+ /* re-enable mdd interrupt cause */
+ reg = rd32(hw, I40E_PFINT_ICR0_ENA);
+ reg |= I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
+ wr32(hw, I40E_PFINT_ICR0_ENA, reg);
+ ixl_flush(hw);
+}
+
+static void
+ixl_enable_intr(struct ixl_vsi *vsi)
+{
+ struct i40e_hw *hw = vsi->hw;
+ struct ixl_queue *que = vsi->queues;
+
+ if (ixl_enable_msix) {
+ ixl_enable_adminq(hw);
+ for (int i = 0; i < vsi->num_queues; i++, que++)
+ ixl_enable_queue(hw, que->me);
+ } else
+ ixl_enable_legacy(hw);
+}
+
+static void
+ixl_disable_intr(struct ixl_vsi *vsi)
+{
+ struct i40e_hw *hw = vsi->hw;
+ struct ixl_queue *que = vsi->queues;
+
+ if (ixl_enable_msix) {
+ ixl_disable_adminq(hw);
+ for (int i = 0; i < vsi->num_queues; i++, que++)
+ ixl_disable_queue(hw, que->me);
+ } else
+ ixl_disable_legacy(hw);
+}
+
+static void
+ixl_enable_adminq(struct i40e_hw *hw)
+{
+ u32 reg;
+
+ reg = I40E_PFINT_DYN_CTL0_INTENA_MASK |
+ I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
+ (IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT);
+ wr32(hw, I40E_PFINT_DYN_CTL0, reg);
+ ixl_flush(hw);
+ return;
+}
+
+static void
+ixl_disable_adminq(struct i40e_hw *hw)
+{
+ u32 reg;
+
+ reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT;
+ wr32(hw, I40E_PFINT_DYN_CTL0, reg);
+
+ return;
+}
+
+static void
+ixl_enable_queue(struct i40e_hw *hw, int id)
+{
+ u32 reg;
+
+ reg = I40E_PFINT_DYN_CTLN_INTENA_MASK |
+ I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
+ (IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
+ wr32(hw, I40E_PFINT_DYN_CTLN(id), reg);
+}
+
+static void
+ixl_disable_queue(struct i40e_hw *hw, int id)
+{
+ u32 reg;
+
+ reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT;
+ wr32(hw, I40E_PFINT_DYN_CTLN(id), reg);
+
+ return;
+}
+
+static void
+ixl_enable_legacy(struct i40e_hw *hw)
+{
+ u32 reg;
+ reg = I40E_PFINT_DYN_CTL0_INTENA_MASK |
+ I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
+ (IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT);
+ wr32(hw, I40E_PFINT_DYN_CTL0, reg);
+}
+
+static void
+ixl_disable_legacy(struct i40e_hw *hw)
+{
+ u32 reg;
+
+ reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT;
+ wr32(hw, I40E_PFINT_DYN_CTL0, reg);
+
+ return;
+}
+
+static void
+ixl_update_stats_counters(struct ixl_pf *pf)
+{
+ struct i40e_hw *hw = &pf->hw;
+ struct ixl_vsi *vsi = &pf->vsi;
+ struct ifnet *ifp = vsi->ifp;
+
+ struct i40e_hw_port_stats *nsd = &pf->stats;
+ struct i40e_hw_port_stats *osd = &pf->stats_offsets;
+
+ /* Update hw stats */
+ ixl_stat_update32(hw, I40E_GLPRT_CRCERRS(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->crc_errors, &nsd->crc_errors);
+ ixl_stat_update32(hw, I40E_GLPRT_ILLERRC(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->illegal_bytes, &nsd->illegal_bytes);
+ ixl_stat_update48(hw, I40E_GLPRT_GORCH(hw->port),
+ I40E_GLPRT_GORCL(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->eth.rx_bytes, &nsd->eth.rx_bytes);
+ ixl_stat_update48(hw, I40E_GLPRT_GOTCH(hw->port),
+ I40E_GLPRT_GOTCL(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->eth.tx_bytes, &nsd->eth.tx_bytes);
+ ixl_stat_update32(hw, I40E_GLPRT_RDPC(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->eth.rx_discards,
+ &nsd->eth.rx_discards);
+ ixl_stat_update32(hw, I40E_GLPRT_TDPC(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->eth.tx_discards,
+ &nsd->eth.tx_discards);
+ ixl_stat_update48(hw, I40E_GLPRT_UPRCH(hw->port),
+ I40E_GLPRT_UPRCL(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->eth.rx_unicast,
+ &nsd->eth.rx_unicast);
+ ixl_stat_update48(hw, I40E_GLPRT_UPTCH(hw->port),
+ I40E_GLPRT_UPTCL(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->eth.tx_unicast,
+ &nsd->eth.tx_unicast);
+ ixl_stat_update48(hw, I40E_GLPRT_MPRCH(hw->port),
+ I40E_GLPRT_MPRCL(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->eth.rx_multicast,
+ &nsd->eth.rx_multicast);
+ ixl_stat_update48(hw, I40E_GLPRT_MPTCH(hw->port),
+ I40E_GLPRT_MPTCL(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->eth.tx_multicast,
+ &nsd->eth.tx_multicast);
+ ixl_stat_update48(hw, I40E_GLPRT_BPRCH(hw->port),
+ I40E_GLPRT_BPRCL(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->eth.rx_broadcast,
+ &nsd->eth.rx_broadcast);
+ ixl_stat_update48(hw, I40E_GLPRT_BPTCH(hw->port),
+ I40E_GLPRT_BPTCL(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->eth.tx_broadcast,
+ &nsd->eth.tx_broadcast);
+
+ ixl_stat_update32(hw, I40E_GLPRT_TDOLD(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->tx_dropped_link_down,
+ &nsd->tx_dropped_link_down);
+ ixl_stat_update32(hw, I40E_GLPRT_MLFC(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->mac_local_faults,
+ &nsd->mac_local_faults);
+ ixl_stat_update32(hw, I40E_GLPRT_MRFC(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->mac_remote_faults,
+ &nsd->mac_remote_faults);
+ ixl_stat_update32(hw, I40E_GLPRT_RLEC(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->rx_length_errors,
+ &nsd->rx_length_errors);
+
+ /* Flow control (LFC) stats */
+ ixl_stat_update32(hw, I40E_GLPRT_LXONRXC(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->link_xon_rx, &nsd->link_xon_rx);
+ ixl_stat_update32(hw, I40E_GLPRT_LXONTXC(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->link_xon_tx, &nsd->link_xon_tx);
+ ixl_stat_update32(hw, I40E_GLPRT_LXOFFRXC(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->link_xoff_rx, &nsd->link_xoff_rx);
+ ixl_stat_update32(hw, I40E_GLPRT_LXOFFTXC(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->link_xoff_tx, &nsd->link_xoff_tx);
+
+ /* Priority flow control stats */
+#if 0
+ for (int i = 0; i < 8; i++) {
+ ixl_stat_update32(hw, I40E_GLPRT_PXONRXC(hw->port, i),
+ pf->stat_offsets_loaded,
+ &osd->priority_xon_rx[i],
+ &nsd->priority_xon_rx[i]);
+ ixl_stat_update32(hw, I40E_GLPRT_PXONTXC(hw->port, i),
+ pf->stat_offsets_loaded,
+ &osd->priority_xon_tx[i],
+ &nsd->priority_xon_tx[i]);
+ ixl_stat_update32(hw, I40E_GLPRT_PXOFFTXC(hw->port, i),
+ pf->stat_offsets_loaded,
+ &osd->priority_xoff_tx[i],
+ &nsd->priority_xoff_tx[i]);
+ ixl_stat_update32(hw,
+ I40E_GLPRT_RXON2OFFCNT(hw->port, i),
+ pf->stat_offsets_loaded,
+ &osd->priority_xon_2_xoff[i],
+ &nsd->priority_xon_2_xoff[i]);
+ }
+#endif
+
+ /* Packet size stats rx */
+ ixl_stat_update48(hw, I40E_GLPRT_PRC64H(hw->port),
+ I40E_GLPRT_PRC64L(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->rx_size_64, &nsd->rx_size_64);
+ ixl_stat_update48(hw, I40E_GLPRT_PRC127H(hw->port),
+ I40E_GLPRT_PRC127L(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->rx_size_127, &nsd->rx_size_127);
+ ixl_stat_update48(hw, I40E_GLPRT_PRC255H(hw->port),
+ I40E_GLPRT_PRC255L(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->rx_size_255, &nsd->rx_size_255);
+ ixl_stat_update48(hw, I40E_GLPRT_PRC511H(hw->port),
+ I40E_GLPRT_PRC511L(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->rx_size_511, &nsd->rx_size_511);
+ ixl_stat_update48(hw, I40E_GLPRT_PRC1023H(hw->port),
+ I40E_GLPRT_PRC1023L(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->rx_size_1023, &nsd->rx_size_1023);
+ ixl_stat_update48(hw, I40E_GLPRT_PRC1522H(hw->port),
+ I40E_GLPRT_PRC1522L(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->rx_size_1522, &nsd->rx_size_1522);
+ ixl_stat_update48(hw, I40E_GLPRT_PRC9522H(hw->port),
+ I40E_GLPRT_PRC9522L(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->rx_size_big, &nsd->rx_size_big);
+
+ /* Packet size stats tx */
+ ixl_stat_update48(hw, I40E_GLPRT_PTC64H(hw->port),
+ I40E_GLPRT_PTC64L(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->tx_size_64, &nsd->tx_size_64);
+ ixl_stat_update48(hw, I40E_GLPRT_PTC127H(hw->port),
+ I40E_GLPRT_PTC127L(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->tx_size_127, &nsd->tx_size_127);
+ ixl_stat_update48(hw, I40E_GLPRT_PTC255H(hw->port),
+ I40E_GLPRT_PTC255L(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->tx_size_255, &nsd->tx_size_255);
+ ixl_stat_update48(hw, I40E_GLPRT_PTC511H(hw->port),
+ I40E_GLPRT_PTC511L(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->tx_size_511, &nsd->tx_size_511);
+ ixl_stat_update48(hw, I40E_GLPRT_PTC1023H(hw->port),
+ I40E_GLPRT_PTC1023L(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->tx_size_1023, &nsd->tx_size_1023);
+ ixl_stat_update48(hw, I40E_GLPRT_PTC1522H(hw->port),
+ I40E_GLPRT_PTC1522L(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->tx_size_1522, &nsd->tx_size_1522);
+ ixl_stat_update48(hw, I40E_GLPRT_PTC9522H(hw->port),
+ I40E_GLPRT_PTC9522L(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->tx_size_big, &nsd->tx_size_big);
+
+ ixl_stat_update32(hw, I40E_GLPRT_RUC(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->rx_undersize, &nsd->rx_undersize);
+ ixl_stat_update32(hw, I40E_GLPRT_RFC(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->rx_fragments, &nsd->rx_fragments);
+ ixl_stat_update32(hw, I40E_GLPRT_ROC(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->rx_oversize, &nsd->rx_oversize);
+ ixl_stat_update32(hw, I40E_GLPRT_RJC(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->rx_jabber, &nsd->rx_jabber);
+ pf->stat_offsets_loaded = true;
+ /* End hw stats */
+
+ /* Update vsi stats */
+ ixl_update_eth_stats(vsi);
+
+ /* OS statistics */
+ // ERJ - these are per-port, update all vsis?
+ ifp->if_ierrors = nsd->crc_errors + nsd->illegal_bytes;
+}
+
+/*
+** Tasklet handler for MSIX Adminq interrupts
+** - do outside interrupt since it might sleep
+*/
+static void
+ixl_do_adminq(void *context, int pending)
+{
+ struct ixl_pf *pf = context;
+ struct i40e_hw *hw = &pf->hw;
+ struct ixl_vsi *vsi = &pf->vsi;
+ struct i40e_arq_event_info event;
+ i40e_status ret;
+ u32 reg, loop = 0;
+ u16 opcode, result;
+
+ event.msg_len = IXL_AQ_BUF_SZ;
+ event.msg_buf = malloc(event.msg_len,
+ M_DEVBUF, M_NOWAIT | M_ZERO);
+ if (!event.msg_buf) {
+ printf("Unable to allocate adminq memory\n");
+ return;
+ }
+
+ /* clean and process any events */
+ do {
+ ret = i40e_clean_arq_element(hw, &event, &result);
+ if (ret)
+ break;
+ opcode = LE16_TO_CPU(event.desc.opcode);
+ switch (opcode) {
+ case i40e_aqc_opc_get_link_status:
+ vsi->link_up = ixl_config_link(hw);
+ ixl_update_link_status(pf);
+ break;
+ case i40e_aqc_opc_send_msg_to_pf:
+ /* process pf/vf communication here */
+ break;
+ case i40e_aqc_opc_event_lan_overflow:
+ break;
+ default:
+#ifdef IXL_DEBUG
+ printf("AdminQ unknown event %x\n", opcode);
+#endif
+ break;
+ }
+
+ } while (result && (loop++ < IXL_ADM_LIMIT));
+
+ reg = rd32(hw, I40E_PFINT_ICR0_ENA);
+ reg |= I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
+ wr32(hw, I40E_PFINT_ICR0_ENA, reg);
+ free(event.msg_buf, M_DEVBUF);
+
+ if (pf->msix > 1)
+ ixl_enable_adminq(&pf->hw);
+ else
+ ixl_enable_intr(vsi);
+}
+
+static int
+ixl_debug_info(SYSCTL_HANDLER_ARGS)
+{
+ struct ixl_pf *pf;
+ int error, input = 0;
+
+ error = sysctl_handle_int(oidp, &input, 0, req);
+
+ if (error || !req->newptr)
+ return (error);
+
+ if (input == 1) {
+ pf = (struct ixl_pf *)arg1;
+ ixl_print_debug_info(pf);
+ }
+
+ return (error);
+}
+
+static void
+ixl_print_debug_info(struct ixl_pf *pf)
+{
+ struct i40e_hw *hw = &pf->hw;
+ struct ixl_vsi *vsi = &pf->vsi;
+ struct ixl_queue *que = vsi->queues;
+ struct rx_ring *rxr = &que->rxr;
+ struct tx_ring *txr = &que->txr;
+ u32 reg;
+
+
+ printf("Queue irqs = %lx\n", que->irqs);
+ printf("AdminQ irqs = %lx\n", pf->admin_irq);
+ printf("RX next check = %x\n", rxr->next_check);
+ printf("RX not ready = %lx\n", rxr->not_done);
+ printf("RX packets = %lx\n", rxr->rx_packets);
+ printf("TX desc avail = %x\n", txr->avail);
+
+ reg = rd32(hw, I40E_GLV_GORCL(0xc));
+ printf("RX Bytes = %x\n", reg);
+ reg = rd32(hw, I40E_GLPRT_GORCL(hw->port));
+ printf("Port RX Bytes = %x\n", reg);
+ reg = rd32(hw, I40E_GLV_RDPC(0xc));
+ printf("RX discard = %x\n", reg);
+ reg = rd32(hw, I40E_GLPRT_RDPC(hw->port));
+ printf("Port RX discard = %x\n", reg);
+
+ reg = rd32(hw, I40E_GLV_TEPC(0xc));
+ printf("TX errors = %x\n", reg);
+ reg = rd32(hw, I40E_GLV_GOTCL(0xc));
+ printf("TX Bytes = %x\n", reg);
+
+ reg = rd32(hw, I40E_GLPRT_RUC(hw->port));
+ printf("RX undersize = %x\n", reg);
+ reg = rd32(hw, I40E_GLPRT_RFC(hw->port));
+ printf("RX fragments = %x\n", reg);
+ reg = rd32(hw, I40E_GLPRT_ROC(hw->port));
+ printf("RX oversize = %x\n", reg);
+ reg = rd32(hw, I40E_GLPRT_RLEC(hw->port));
+ printf("RX length error = %x\n", reg);
+ reg = rd32(hw, I40E_GLPRT_MRFC(hw->port));
+ printf("mac remote fault = %x\n", reg);
+ reg = rd32(hw, I40E_GLPRT_MLFC(hw->port));
+ printf("mac local fault = %x\n", reg);
+}
+
+/**
+ * Update VSI-specific ethernet statistics counters.
+ **/
+void ixl_update_eth_stats(struct ixl_vsi *vsi)
+{
+ struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
+ struct i40e_hw *hw = &pf->hw;
+ struct ifnet *ifp = vsi->ifp;
+ struct i40e_eth_stats *es;
+ struct i40e_eth_stats *oes;
+ u16 stat_idx = vsi->info.stat_counter_idx;
+
+ es = &vsi->eth_stats;
+ oes = &vsi->eth_stats_offsets;
+
+ /* Gather up the stats that the hw collects */
+ ixl_stat_update32(hw, I40E_GLV_TEPC(stat_idx),
+ vsi->stat_offsets_loaded,
+ &oes->tx_errors, &es->tx_errors);
+ ixl_stat_update32(hw, I40E_GLV_RDPC(stat_idx),
+ vsi->stat_offsets_loaded,
+ &oes->rx_discards, &es->rx_discards);
+
+ ixl_stat_update48(hw, I40E_GLV_GORCH(stat_idx),
+ I40E_GLV_GORCL(stat_idx),
+ vsi->stat_offsets_loaded,
+ &oes->rx_bytes, &es->rx_bytes);
+ ixl_stat_update48(hw, I40E_GLV_UPRCH(stat_idx),
+ I40E_GLV_UPRCL(stat_idx),
+ vsi->stat_offsets_loaded,
+ &oes->rx_unicast, &es->rx_unicast);
+ ixl_stat_update48(hw, I40E_GLV_MPRCH(stat_idx),
+ I40E_GLV_MPRCL(stat_idx),
+ vsi->stat_offsets_loaded,
+ &oes->rx_multicast, &es->rx_multicast);
+ ixl_stat_update48(hw, I40E_GLV_BPRCH(stat_idx),
+ I40E_GLV_BPRCL(stat_idx),
+ vsi->stat_offsets_loaded,
+ &oes->rx_broadcast, &es->rx_broadcast);
+
+ ixl_stat_update48(hw, I40E_GLV_GOTCH(stat_idx),
+ I40E_GLV_GOTCL(stat_idx),
+ vsi->stat_offsets_loaded,
+ &oes->tx_bytes, &es->tx_bytes);
+ ixl_stat_update48(hw, I40E_GLV_UPTCH(stat_idx),
+ I40E_GLV_UPTCL(stat_idx),
+ vsi->stat_offsets_loaded,
+ &oes->tx_unicast, &es->tx_unicast);
+ ixl_stat_update48(hw, I40E_GLV_MPTCH(stat_idx),
+ I40E_GLV_MPTCL(stat_idx),
+ vsi->stat_offsets_loaded,
+ &oes->tx_multicast, &es->tx_multicast);
+ ixl_stat_update48(hw, I40E_GLV_BPTCH(stat_idx),
+ I40E_GLV_BPTCL(stat_idx),
+ vsi->stat_offsets_loaded,
+ &oes->tx_broadcast, &es->tx_broadcast);
+ vsi->stat_offsets_loaded = true;
+
+ /* Update ifnet stats */
+ ifp->if_ipackets = es->rx_unicast +
+ es->rx_multicast +
+ es->rx_broadcast;
+ ifp->if_opackets = es->tx_unicast +
+ es->tx_multicast +
+ es->tx_broadcast;
+ ifp->if_ibytes = es->rx_bytes;
+ ifp->if_obytes = es->tx_bytes;
+ ifp->if_imcasts = es->rx_multicast;
+ ifp->if_omcasts = es->tx_multicast;
+
+ ifp->if_oerrors = es->tx_errors;
+ ifp->if_iqdrops = es->rx_discards;
+ ifp->if_noproto = es->rx_unknown_protocol;
+ ifp->if_collisions = 0;
+}
+
+/**
+ * Reset all of the stats for the given pf
+ **/
+void ixl_pf_reset_stats(struct ixl_pf *pf)
+{
+ bzero(&pf->stats, sizeof(struct i40e_hw_port_stats));
+ bzero(&pf->stats_offsets, sizeof(struct i40e_hw_port_stats));
+ pf->stat_offsets_loaded = false;
+}
+
+/**
+ * Resets all stats of the given vsi
+ **/
+void ixl_vsi_reset_stats(struct ixl_vsi *vsi)
+{
+ bzero(&vsi->eth_stats, sizeof(struct i40e_eth_stats));
+ bzero(&vsi->eth_stats_offsets, sizeof(struct i40e_eth_stats));
+ vsi->stat_offsets_loaded = false;
+}
+
+/**
+ * Read and update a 48 bit stat from the hw
+ *
+ * Since the device stats are not reset at PFReset, they likely will not
+ * be zeroed when the driver starts. We'll save the first values read
+ * and use them as offsets to be subtracted from the raw values in order
+ * to report stats that count from zero.
+ **/
+static void
+ixl_stat_update48(struct i40e_hw *hw, u32 hireg, u32 loreg,
+ bool offset_loaded, u64 *offset, u64 *stat)
+{
+ u64 new_data;
+
+#if __FreeBSD__ >= 10 && __amd64__
+ new_data = rd64(hw, loreg);
+#else
+ /*
+ * Use two rd32's instead of one rd64; FreeBSD versions before
+ * 10 don't support 8 byte bus reads/writes.
+ */
+ new_data = rd32(hw, loreg);
+ new_data |= ((u64)(rd32(hw, hireg) & 0xFFFF)) << 32;
+#endif
+
+ if (!offset_loaded)
+ *offset = new_data;
+ if (new_data >= *offset)
+ *stat = new_data - *offset;
+ else
+ *stat = (new_data + ((u64)1 << 48)) - *offset;
+ *stat &= 0xFFFFFFFFFFFFULL;
+}
+
+/**
+ * Read and update a 32 bit stat from the hw
+ **/
+static void
+ixl_stat_update32(struct i40e_hw *hw, u32 reg,
+ bool offset_loaded, u64 *offset, u64 *stat)
+{
+ u32 new_data;
+
+ new_data = rd32(hw, reg);
+ if (!offset_loaded)
+ *offset = new_data;
+ if (new_data >= *offset)
+ *stat = (u32)(new_data - *offset);
+ else
+ *stat = (u32)((new_data + ((u64)1 << 32)) - *offset);
+}
+
+/*
+** Set flow control using sysctl:
+** 0 - off
+** 1 - rx pause
+** 2 - tx pause
+** 3 - full
+*/
+static int
+ixl_set_flowcntl(SYSCTL_HANDLER_ARGS)
+{
+ /*
+ * TODO: ensure flow control is disabled if
+ * priority flow control is enabled
+ *
+ * TODO: ensure tx CRC by hardware should be enabled
+ * if tx flow control is enabled.
+ */
+ struct ixl_pf *pf = (struct ixl_pf *)arg1;
+ struct i40e_hw *hw = &pf->hw;
+ device_t dev = pf->dev;
+ int requested_fc = 0, error = 0;
+ enum i40e_status_code aq_error = 0;
+ u8 fc_aq_err = 0;
+
+ aq_error = i40e_aq_get_link_info(hw, TRUE, NULL, NULL);
+ if (aq_error) {
+ device_printf(dev,
+ "%s: Error retrieving link info from aq, %d\n",
+ __func__, aq_error);
+ return (EAGAIN);
+ }
+
+ /* Read in new mode */
+ requested_fc = hw->fc.current_mode;
+ error = sysctl_handle_int(oidp, &requested_fc, 0, req);
+ if ((error) || (req->newptr == NULL))
+ return (error);
+ if (requested_fc < 0 || requested_fc > 3) {
+ device_printf(dev,
+ "Invalid fc mode; valid modes are 0 through 3\n");
+ return (EINVAL);
+ }
+
+ /*
+ ** Changing flow control mode currently does not work on
+ ** 40GBASE-CR4 PHYs
+ */
+ if (hw->phy.link_info.phy_type == I40E_PHY_TYPE_40GBASE_CR4
+ || hw->phy.link_info.phy_type == I40E_PHY_TYPE_40GBASE_CR4_CU) {
+ device_printf(dev, "Changing flow control mode unsupported"
+ " on 40GBase-CR4 media.\n");
+ return (ENODEV);
+ }
+
+ /* Set fc ability for port */
+ hw->fc.requested_mode = requested_fc;
+ aq_error = i40e_set_fc(hw, &fc_aq_err, TRUE);
+ if (aq_error) {
+ device_printf(dev,
+ "%s: Error setting new fc mode %d; fc_err %#x\n",
+ __func__, aq_error, fc_aq_err);
+ return (EAGAIN);
+ }
+
+ if (hw->fc.current_mode != hw->fc.requested_mode) {
+ device_printf(dev, "%s: FC set failure:\n", __func__);
+ device_printf(dev, "%s: Current: %s / Requested: %s\n",
+ __func__,
+ ixl_fc_string[hw->fc.current_mode],
+ ixl_fc_string[hw->fc.requested_mode]);
+ }
+
+ return (0);
+}
+
+static int
+ixl_current_speed(SYSCTL_HANDLER_ARGS)
+{
+ struct ixl_pf *pf = (struct ixl_pf *)arg1;
+ struct i40e_hw *hw = &pf->hw;
+ int error = 0, index = 0;
+
+ char *speeds[] = {
+ "Unknown",
+ "100M",
+ "1G",
+ "10G",
+ "40G",
+ "20G"
+ };
+
+ ixl_update_link_status(pf);
+
+ switch (hw->phy.link_info.link_speed) {
+ case I40E_LINK_SPEED_100MB:
+ index = 1;
+ break;
+ case I40E_LINK_SPEED_1GB:
+ index = 2;
+ break;
+ case I40E_LINK_SPEED_10GB:
+ index = 3;
+ break;
+ case I40E_LINK_SPEED_40GB:
+ index = 4;
+ break;
+ case I40E_LINK_SPEED_20GB:
+ index = 5;
+ break;
+ case I40E_LINK_SPEED_UNKNOWN:
+ default:
+ index = 0;
+ break;
+ }
+
+ error = sysctl_handle_string(oidp, speeds[index],
+ strlen(speeds[index]), req);
+ return (error);
+}
+
+/*
+** Control link advertise speed:
+** Flags:
+** 0x1 - advertise 100 Mb
+** 0x2 - advertise 1G
+** 0x4 - advertise 10G
+**
+** Does not work on 40G devices.
+*/
+static int
+ixl_set_advertise(SYSCTL_HANDLER_ARGS)
+{
+ struct ixl_pf *pf = (struct ixl_pf *)arg1;
+ struct i40e_hw *hw = &pf->hw;
+ device_t dev = pf->dev;
+ struct i40e_aq_get_phy_abilities_resp abilities;
+ struct i40e_aq_set_phy_config config;
+ int requested_ls = 0;
+ enum i40e_status_code aq_error = 0;
+ int error = 0;
+
+ /*
+ ** FW doesn't support changing advertised speed
+ ** for 40G devices; speed is always 40G.
+ */
+ if (i40e_is_40G_device(hw->device_id))
+ return (ENODEV);
+
+ /* Read in new mode */
+ requested_ls = pf->advertised_speed;
+ error = sysctl_handle_int(oidp, &requested_ls, 0, req);
+ if ((error) || (req->newptr == NULL))
+ return (error);
+ if (requested_ls < 1 || requested_ls > 7) {
+ device_printf(dev,
+ "Invalid advertised speed; valid modes are 0x1 through 0x7\n");
+ return (EINVAL);
+ }
+
+ /* Exit if no change */
+ if (pf->advertised_speed == requested_ls)
+ return (0);
+
+ /* Get current capability information */
+ aq_error = i40e_aq_get_phy_capabilities(hw, FALSE, FALSE, &abilities, NULL);
+ if (aq_error) {
+ device_printf(dev, "%s: Error getting phy capabilities %d,"
+ " aq error: %d\n", __func__, aq_error,
+ hw->aq.asq_last_status);
+ return (EAGAIN);
+ }
+
+ /* Prepare new config */
+ bzero(&config, sizeof(config));
+ config.phy_type = abilities.phy_type;
+ config.abilities = abilities.abilities
+ | I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
+ config.eee_capability = abilities.eee_capability;
+ config.eeer = abilities.eeer_val;
+ config.low_power_ctrl = abilities.d3_lpan;
+ /* Translate into aq cmd link_speed */
+ if (requested_ls & 0x4)
+ config.link_speed |= I40E_LINK_SPEED_10GB;
+ if (requested_ls & 0x2)
+ config.link_speed |= I40E_LINK_SPEED_1GB;
+ if (requested_ls & 0x1)
+ config.link_speed |= I40E_LINK_SPEED_100MB;
+
+ /* Do aq command & restart link */
+ aq_error = i40e_aq_set_phy_config(hw, &config, NULL);
+ if (aq_error) {
+ device_printf(dev, "%s: Error setting new phy config %d,"
+ " aq error: %d\n", __func__, aq_error,
+ hw->aq.asq_last_status);
+ return (EAGAIN);
+ }
+
+ pf->advertised_speed = requested_ls;
+ ixl_update_link_status(pf);
+ return (0);
+}
+
+/*
+** Get the width and transaction speed of
+** the bus this adapter is plugged into.
+*/
+static u16
+ixl_get_bus_info(struct i40e_hw *hw, device_t dev)
+{
+ u16 link;
+ u32 offset;
+
+
+ /* Get the PCI Express Capabilities offset */
+ pci_find_cap(dev, PCIY_EXPRESS, &offset);
+
+ /* ...and read the Link Status Register */
+ link = pci_read_config(dev, offset + PCIER_LINK_STA, 2);
+
+ switch (link & I40E_PCI_LINK_WIDTH) {
+ case I40E_PCI_LINK_WIDTH_1:
+ hw->bus.width = i40e_bus_width_pcie_x1;
+ break;
+ case I40E_PCI_LINK_WIDTH_2:
+ hw->bus.width = i40e_bus_width_pcie_x2;
+ break;
+ case I40E_PCI_LINK_WIDTH_4:
+ hw->bus.width = i40e_bus_width_pcie_x4;
+ break;
+ case I40E_PCI_LINK_WIDTH_8:
+ hw->bus.width = i40e_bus_width_pcie_x8;
+ break;
+ default:
+ hw->bus.width = i40e_bus_width_unknown;
+ break;
+ }
+
+ switch (link & I40E_PCI_LINK_SPEED) {
+ case I40E_PCI_LINK_SPEED_2500:
+ hw->bus.speed = i40e_bus_speed_2500;
+ break;
+ case I40E_PCI_LINK_SPEED_5000:
+ hw->bus.speed = i40e_bus_speed_5000;
+ break;
+ case I40E_PCI_LINK_SPEED_8000:
+ hw->bus.speed = i40e_bus_speed_8000;
+ break;
+ default:
+ hw->bus.speed = i40e_bus_speed_unknown;
+ break;
+ }
+
+
+ device_printf(dev,"PCI Express Bus: Speed %s %s\n",
+ ((hw->bus.speed == i40e_bus_speed_8000) ? "8.0GT/s":
+ (hw->bus.speed == i40e_bus_speed_5000) ? "5.0GT/s":
+ (hw->bus.speed == i40e_bus_speed_2500) ? "2.5GT/s":"Unknown"),
+ (hw->bus.width == i40e_bus_width_pcie_x8) ? "Width x8" :
+ (hw->bus.width == i40e_bus_width_pcie_x4) ? "Width x4" :
+ (hw->bus.width == i40e_bus_width_pcie_x1) ? "Width x1" :
+ ("Unknown"));
+
+ if ((hw->bus.width <= i40e_bus_width_pcie_x8) &&
+ (hw->bus.speed < i40e_bus_speed_8000)) {
+ device_printf(dev, "PCI-Express bandwidth available"
+ " for this device\n is not sufficient for"
+ " normal operation.\n");
+ device_printf(dev, "For expected performance a x8 "
+ "PCIE Gen3 slot is required.\n");
+ }
+
+ return (link);
+}
+
+#ifdef IXL_DEBUG
+static int
+ixl_sysctl_link_status(SYSCTL_HANDLER_ARGS)
+{
+ struct ixl_pf *pf = (struct ixl_pf *)arg1;
+ struct i40e_hw *hw = &pf->hw;
+ struct i40e_link_status link_status;
+ char buf[512];
+
+ enum i40e_status_code aq_error = 0;
+
+ aq_error = i40e_aq_get_link_info(hw, TRUE, &link_status, NULL);
+ if (aq_error) {
+ printf("i40e_aq_get_link_info() error %d\n", aq_error);
+ return (EPERM);
+ }
+
+ sprintf(buf, "\n"
+ "PHY Type : %#04x\n"
+ "Speed : %#04x\n"
+ "Link info: %#04x\n"
+ "AN info : %#04x\n"
+ "Ext info : %#04x",
+ link_status.phy_type, link_status.link_speed,
+ link_status.link_info, link_status.an_info,
+ link_status.ext_info);
+
+ return (sysctl_handle_string(oidp, buf, strlen(buf), req));
+}
+
+static int
+ixl_sysctl_phy_abilities(SYSCTL_HANDLER_ARGS)
+{
+ struct ixl_pf *pf = (struct ixl_pf *)arg1;
+ struct i40e_hw *hw = &pf->hw;
+ struct i40e_aq_get_phy_abilities_resp abilities_resp;
+ char buf[512];
+
+ enum i40e_status_code aq_error = 0;
+
+ // TODO: Print out list of qualified modules as well?
+ aq_error = i40e_aq_get_phy_capabilities(hw, TRUE, FALSE, &abilities_resp, NULL);
+ if (aq_error) {
+ printf("i40e_aq_get_phy_capabilities() error %d\n", aq_error);
+ return (EPERM);
+ }
+
+ sprintf(buf, "\n"
+ "PHY Type : %#010x\n"
+ "Speed : %#04x\n"
+ "Abilities: %#04x\n"
+ "EEE cap : %#06x\n"
+ "EEER reg : %#010x\n"
+ "D3 Lpan : %#04x",
+ abilities_resp.phy_type, abilities_resp.link_speed,
+ abilities_resp.abilities, abilities_resp.eee_capability,
+ abilities_resp.eeer_val, abilities_resp.d3_lpan);
+
+ return (sysctl_handle_string(oidp, buf, strlen(buf), req));
+}
+
+static int
+ixl_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS)
+{
+ struct ixl_pf *pf = (struct ixl_pf *)arg1;
+ struct ixl_vsi *vsi = &pf->vsi;
+ struct ixl_mac_filter *f;
+ char *buf, *buf_i;
+
+ int error = 0;
+ int ftl_len = 0;
+ int ftl_counter = 0;
+ int buf_len = 0;
+ int entry_len = 42;
+
+ SLIST_FOREACH(f, &vsi->ftl, next) {
+ ftl_len++;
+ }
+
+ if (ftl_len < 1) {
+ sysctl_handle_string(oidp, "(none)", 6, req);
+ return (0);
+ }
+
+ buf_len = sizeof(char) * (entry_len + 1) * ftl_len + 2;
+ buf = buf_i = malloc(buf_len, M_DEVBUF, M_NOWAIT);
+
+ sprintf(buf_i++, "\n");
+ SLIST_FOREACH(f, &vsi->ftl, next) {
+ sprintf(buf_i,
+ MAC_FORMAT ", vlan %4d, flags %#06x",
+ MAC_FORMAT_ARGS(f->macaddr), f->vlan, f->flags);
+ buf_i += entry_len;
+ /* don't print '\n' for last entry */
+ if (++ftl_counter != ftl_len) {
+ sprintf(buf_i, "\n");
+ buf_i++;
+ }
+ }
+
+ error = sysctl_handle_string(oidp, buf, strlen(buf), req);
+ if (error)
+ printf("sysctl error: %d\n", error);
+ free(buf, M_DEVBUF);
+ return error;
+}
+
+#define IXL_SW_RES_SIZE 0x14
+static int
+ixl_sysctl_hw_res_info(SYSCTL_HANDLER_ARGS)
+{
+ struct ixl_pf *pf = (struct ixl_pf *)arg1;
+ struct i40e_hw *hw = &pf->hw;
+ device_t dev = pf->dev;
+ struct sbuf *buf;
+ int error = 0;
+
+ u8 num_entries;
+ struct i40e_aqc_switch_resource_alloc_element_resp resp[IXL_SW_RES_SIZE];
+
+ buf = sbuf_new_for_sysctl(NULL, NULL, 0, req);
+ if (!buf) {
+ device_printf(dev, "Could not allocate sbuf for output.\n");
+ return (ENOMEM);
+ }
+
+ error = i40e_aq_get_switch_resource_alloc(hw, &num_entries,
+ resp,
+ IXL_SW_RES_SIZE,
+ NULL);
+ if (error) {
+ device_printf(dev, "%s: get_switch_resource_alloc() error %d, aq error %d\n",
+ __func__, error, hw->aq.asq_last_status);
+ sbuf_delete(buf);
+ return error;
+ }
+ device_printf(dev, "Num_entries: %d\n", num_entries);
+
+ sbuf_cat(buf, "\n");
+ sbuf_printf(buf,
+ "Type | Guaranteed | Total | Used | Un-allocated\n"
+ " | (this) | (all) | (this) | (all) \n");
+ for (int i = 0; i < num_entries; i++) {
+ sbuf_printf(buf,
+ "%#4x | %10d %5d %6d %12d",
+ resp[i].resource_type,
+ resp[i].guaranteed,
+ resp[i].total,
+ resp[i].used,
+ resp[i].total_unalloced);
+ if (i < num_entries - 1)
+ sbuf_cat(buf, "\n");
+ }
+
+ error = sbuf_finish(buf);
+ if (error) {
+ device_printf(dev, "Error finishing sbuf: %d\n", error);
+ sbuf_delete(buf);
+ return error;
+ }
+
+ error = sysctl_handle_string(oidp, sbuf_data(buf), sbuf_len(buf), req);
+ if (error)
+ device_printf(dev, "sysctl error: %d\n", error);
+ sbuf_delete(buf);
+ return error;
+
+}
+
+/*
+** Dump TX desc given index.
+** Doesn't work; don't use.
+** TODO: Also needs a queue index input!
+**/
+static int
+ixl_sysctl_dump_txd(SYSCTL_HANDLER_ARGS)
+{
+ struct ixl_pf *pf = (struct ixl_pf *)arg1;
+ device_t dev = pf->dev;
+ struct sbuf *buf;
+ int error = 0;
+
+ u16 desc_idx = 0;
+
+ buf = sbuf_new_for_sysctl(NULL, NULL, 0, req);
+ if (!buf) {
+ device_printf(dev, "Could not allocate sbuf for output.\n");
+ return (ENOMEM);
+ }
+
+ /* Read in index */
+ error = sysctl_handle_int(oidp, &desc_idx, 0, req);
+ if (error)
+ return (error);
+ if (req->newptr == NULL)
+ return (EIO); // fix
+ if (desc_idx > 1024) { // fix
+ device_printf(dev,
+ "Invalid descriptor index, needs to be < 1024\n"); // fix
+ return (EINVAL);
+ }
+
+ // Don't use this sysctl yet
+ if (TRUE)
+ return (ENODEV);
+
+ sbuf_cat(buf, "\n");
+
+ // set to queue 1?
+ struct ixl_queue *que = pf->vsi.queues;
+ struct tx_ring *txr = &(que[1].txr);
+ struct i40e_tx_desc *txd = &txr->base[desc_idx];
+
+ sbuf_printf(buf, "Que: %d, Desc: %d\n", que->me, desc_idx);
+ sbuf_printf(buf, "Addr: %#18lx\n", txd->buffer_addr);
+ sbuf_printf(buf, "Opts: %#18lx\n", txd->cmd_type_offset_bsz);
+
+ error = sbuf_finish(buf);
+ if (error) {
+ device_printf(dev, "Error finishing sbuf: %d\n", error);
+ sbuf_delete(buf);
+ return error;
+ }
+
+ error = sysctl_handle_string(oidp, sbuf_data(buf), sbuf_len(buf), req);
+ if (error)
+ device_printf(dev, "sysctl error: %d\n", error);
+ sbuf_delete(buf);
+ return error;
+}
+#endif
+
diff --git a/sys/dev/ixl/if_ixlv.c b/sys/dev/ixl/if_ixlv.c
new file mode 100644
index 0000000..0e6e572
--- /dev/null
+++ b/sys/dev/ixl/if_ixlv.c
@@ -0,0 +1,2742 @@
+/******************************************************************************
+
+ Copyright (c) 2013-2014, Intel Corporation
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ POSSIBILITY OF SUCH DAMAGE.
+
+******************************************************************************/
+/*$FreeBSD$*/
+
+#include "opt_inet.h"
+#include "opt_inet6.h"
+#include "ixl.h"
+#include "ixlv.h"
+
+/*********************************************************************
+ * Driver version
+ *********************************************************************/
+char ixlv_driver_version[] = "1.1.4";
+
+/*********************************************************************
+ * PCI Device ID Table
+ *
+ * Used by probe to select devices to load on
+ * Last field stores an index into ixlv_strings
+ * Last entry must be all 0s
+ *
+ * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
+ *********************************************************************/
+
+static ixl_vendor_info_t ixlv_vendor_info_array[] =
+{
+ {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_VF, 0, 0, 0},
+ {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_VF_HV, 0, 0, 0},
+ /* required last entry */
+ {0, 0, 0, 0, 0}
+};
+
+/*********************************************************************
+ * Table of branding strings
+ *********************************************************************/
+
+static char *ixlv_strings[] = {
+ "Intel(R) Ethernet Connection XL710 VF Driver"
+};
+
+
+/*********************************************************************
+ * Function prototypes
+ *********************************************************************/
+static int ixlv_probe(device_t);
+static int ixlv_attach(device_t);
+static int ixlv_detach(device_t);
+static int ixlv_shutdown(device_t);
+static void ixlv_init_locked(struct ixlv_sc *);
+static int ixlv_allocate_pci_resources(struct ixlv_sc *);
+static void ixlv_free_pci_resources(struct ixlv_sc *);
+static int ixlv_assign_msix(struct ixlv_sc *);
+static int ixlv_init_msix(struct ixlv_sc *);
+static int ixlv_init_taskqueue(struct ixlv_sc *);
+static int ixlv_setup_queues(struct ixlv_sc *);
+static void ixlv_config_rss(struct ixlv_sc *);
+static void ixlv_stop(struct ixlv_sc *);
+static void ixlv_add_multi(struct ixl_vsi *);
+static void ixlv_del_multi(struct ixl_vsi *);
+static void ixlv_update_link_status(struct ixlv_sc *);
+static void ixlv_free_queues(struct ixl_vsi *);
+static int ixlv_setup_interface(device_t, struct ixlv_sc *);
+
+static int ixlv_media_change(struct ifnet *);
+static void ixlv_media_status(struct ifnet *, struct ifmediareq *);
+
+static void ixlv_local_timer(void *);
+
+static int ixlv_add_mac_filter(struct ixlv_sc *, u8 *, u16);
+static void ixlv_init_filters(struct ixlv_sc *);
+static void ixlv_free_filters(struct ixlv_sc *);
+
+static void ixlv_msix_que(void *);
+static void ixlv_msix_adminq(void *);
+static void ixlv_do_adminq(void *, int);
+static void ixlv_sched_aq(void *);
+static void ixlv_handle_que(void *, int);
+static int ixlv_reset(struct ixlv_sc *);
+static int ixlv_reset_complete(struct i40e_hw *);
+static void ixlv_set_queue_rx_itr(struct ixl_queue *);
+static void ixlv_set_queue_tx_itr(struct ixl_queue *);
+
+static void ixlv_enable_adminq_irq(struct i40e_hw *);
+static void ixlv_disable_adminq_irq(struct i40e_hw *);
+static void ixlv_enable_queue_irq(struct i40e_hw *, int);
+static void ixlv_disable_queue_irq(struct i40e_hw *, int);
+
+static void ixlv_setup_vlan_filters(struct ixlv_sc *);
+static void ixlv_register_vlan(void *, struct ifnet *, u16);
+static void ixlv_unregister_vlan(void *, struct ifnet *, u16);
+
+static void ixlv_cap_txcsum_tso(struct ixl_vsi *,
+ struct ifnet *, int);
+
+static void ixlv_add_stats_sysctls(struct ixlv_sc *);
+
+/*********************************************************************
+ * FreeBSD Device Interface Entry Points
+ *********************************************************************/
+
+static device_method_t ixlv_methods[] = {
+ /* Device interface */
+ DEVMETHOD(device_probe, ixlv_probe),
+ DEVMETHOD(device_attach, ixlv_attach),
+ DEVMETHOD(device_detach, ixlv_detach),
+ DEVMETHOD(device_shutdown, ixlv_shutdown),
+ {0, 0}
+};
+
+static driver_t ixlv_driver = {
+ "ixlv", ixlv_methods, sizeof(struct ixlv_sc),
+};
+
+devclass_t ixlv_devclass;
+DRIVER_MODULE(ixlv, pci, ixlv_driver, ixlv_devclass, 0, 0);
+
+MODULE_DEPEND(ixlv, pci, 1, 1, 1);
+MODULE_DEPEND(ixlv, ether, 1, 1, 1);
+
+/*
+** TUNEABLE PARAMETERS:
+*/
+
+static SYSCTL_NODE(_hw, OID_AUTO, ixlv, CTLFLAG_RD, 0,
+ "IXLV driver parameters");
+
+/*
+** Number of descriptors per ring:
+** - TX and RX are the same size
+*/
+static int ixlv_ringsz = DEFAULT_RING;
+TUNABLE_INT("hw.ixlv.ringsz", &ixlv_ringsz);
+SYSCTL_INT(_hw_ixlv, OID_AUTO, ring_size, CTLFLAG_RDTUN,
+ &ixlv_ringsz, 0, "Descriptor Ring Size");
+
+/* Set to zero to auto calculate */
+int ixlv_max_queues = 0;
+TUNABLE_INT("hw.ixlv.max_queues", &ixlv_max_queues);
+SYSCTL_INT(_hw_ixlv, OID_AUTO, max_queues, CTLFLAG_RDTUN,
+ &ixlv_max_queues, 0, "Number of Queues");
+
+/*
+** Number of entries in Tx queue buf_ring.
+** Increasing this will reduce the number of
+** errors when transmitting fragmented UDP
+** packets.
+*/
+static int ixlv_txbrsz = DEFAULT_TXBRSZ;
+TUNABLE_INT("hw.ixlv.txbrsz", &ixlv_txbrsz);
+SYSCTL_INT(_hw_ixlv, OID_AUTO, txbr_size, CTLFLAG_RDTUN,
+ &ixlv_txbrsz, 0, "TX Buf Ring Size");
+
+/*
+** Controls for Interrupt Throttling
+** - true/false for dynamic adjustment
+** - default values for static ITR
+*/
+int ixlv_dynamic_rx_itr = 0;
+TUNABLE_INT("hw.ixlv.dynamic_rx_itr", &ixlv_dynamic_rx_itr);
+SYSCTL_INT(_hw_ixlv, OID_AUTO, dynamic_rx_itr, CTLFLAG_RDTUN,
+ &ixlv_dynamic_rx_itr, 0, "Dynamic RX Interrupt Rate");
+
+int ixlv_dynamic_tx_itr = 0;
+TUNABLE_INT("hw.ixlv.dynamic_tx_itr", &ixlv_dynamic_tx_itr);
+SYSCTL_INT(_hw_ixlv, OID_AUTO, dynamic_tx_itr, CTLFLAG_RDTUN,
+ &ixlv_dynamic_tx_itr, 0, "Dynamic TX Interrupt Rate");
+
+int ixlv_rx_itr = IXL_ITR_8K;
+TUNABLE_INT("hw.ixlv.rx_itr", &ixlv_rx_itr);
+SYSCTL_INT(_hw_ixlv, OID_AUTO, rx_itr, CTLFLAG_RDTUN,
+ &ixlv_rx_itr, 0, "RX Interrupt Rate");
+
+int ixlv_tx_itr = IXL_ITR_4K;
+TUNABLE_INT("hw.ixlv.tx_itr", &ixlv_tx_itr);
+SYSCTL_INT(_hw_ixlv, OID_AUTO, tx_itr, CTLFLAG_RDTUN,
+ &ixlv_tx_itr, 0, "TX Interrupt Rate");
+
+
+/*********************************************************************
+ * Device identification routine
+ *
+ * ixlv_probe determines if the driver should be loaded on
+ * the hardware based on PCI vendor/device id of the device.
+ *
+ * return BUS_PROBE_DEFAULT on success, positive on failure
+ *********************************************************************/
+
+static int
+ixlv_probe(device_t dev)
+{
+ ixl_vendor_info_t *ent;
+
+ u16 pci_vendor_id, pci_device_id;
+ u16 pci_subvendor_id, pci_subdevice_id;
+ char device_name[256];
+
+ INIT_DEBUGOUT("ixlv_probe: begin");
+
+ pci_vendor_id = pci_get_vendor(dev);
+ if (pci_vendor_id != I40E_INTEL_VENDOR_ID)
+ return (ENXIO);
+
+ pci_device_id = pci_get_device(dev);
+ pci_subvendor_id = pci_get_subvendor(dev);
+ pci_subdevice_id = pci_get_subdevice(dev);
+
+ ent = ixlv_vendor_info_array;
+ while (ent->vendor_id != 0) {
+ if ((pci_vendor_id == ent->vendor_id) &&
+ (pci_device_id == ent->device_id) &&
+
+ ((pci_subvendor_id == ent->subvendor_id) ||
+ (ent->subvendor_id == 0)) &&
+
+ ((pci_subdevice_id == ent->subdevice_id) ||
+ (ent->subdevice_id == 0))) {
+ sprintf(device_name, "%s, Version - %s",
+ ixlv_strings[ent->index],
+ ixlv_driver_version);
+ device_set_desc_copy(dev, device_name);
+ return (BUS_PROBE_DEFAULT);
+ }
+ ent++;
+ }
+ return (ENXIO);
+}
+
+/*********************************************************************
+ * Device initialization routine
+ *
+ * The attach entry point is called when the driver is being loaded.
+ * This routine identifies the type of hardware, allocates all resources
+ * and initializes the hardware.
+ *
+ * return 0 on success, positive on failure
+ *********************************************************************/
+
+static int
+ixlv_attach(device_t dev)
+{
+ struct ixlv_sc *sc;
+ struct i40e_hw *hw;
+ struct ixl_vsi *vsi;
+ int bufsz, error = 0, retries = 0;
+
+ INIT_DBG_DEV(dev, "begin");
+
+ /* Allocate, clear, and link in our primary soft structure */
+ sc = device_get_softc(dev);
+ sc->dev = sc->osdep.dev = dev;
+ hw = &sc->hw;
+ vsi = &sc->vsi;
+ vsi->dev = dev;
+
+ /* Allocate filter lists */
+ ixlv_init_filters(sc);
+
+ /* Core Lock Init*/
+ mtx_init(&sc->mtx, device_get_nameunit(dev),
+ "IXL SC Lock", MTX_DEF);
+ mtx_init(&sc->aq_task_mtx, device_get_nameunit(dev),
+ "IXL AQ Task Lock", MTX_DEF);
+
+ /* Set up the timer & aq watchdog callouts */
+ callout_init_mtx(&sc->timer, &sc->mtx, 0);
+ callout_init_mtx(&sc->aq_task, &sc->aq_task_mtx, 0);
+
+ /* Save off the information about this board */
+ hw->vendor_id = pci_get_vendor(dev);
+ hw->device_id = pci_get_device(dev);
+ hw->revision_id = pci_read_config(dev, PCIR_REVID, 1);
+ hw->subsystem_vendor_id =
+ pci_read_config(dev, PCIR_SUBVEND_0, 2);
+ hw->subsystem_device_id =
+ pci_read_config(dev, PCIR_SUBDEV_0, 2);
+
+ hw->bus.device = pci_get_slot(dev);
+ hw->bus.func = pci_get_function(dev);
+
+ /* Do PCI setup - map BAR0, etc */
+ if (ixlv_allocate_pci_resources(sc)) {
+ device_printf(dev, "%s: Allocation of PCI resources failed\n",
+ __func__);
+ error = ENXIO;
+ goto err_early;
+ }
+
+ INIT_DBG_DEV(dev, "Allocated PCI resources and MSIX vectors");
+
+ error = i40e_set_mac_type(hw);
+ if (error) {
+ device_printf(dev, "%s: set_mac_type failed: %d\n",
+ __func__, error);
+ goto err_pci_res;
+ }
+
+ error = ixlv_reset_complete(hw);
+ if (error) {
+ device_printf(dev, "%s: Device is still being reset\n",
+ __func__);
+ goto err_pci_res;
+ }
+
+ INIT_DBG_DEV(dev, "VF Device is ready for configuration");
+
+ hw->aq.num_arq_entries = IXL_AQ_LEN;
+ hw->aq.num_asq_entries = IXL_AQ_LEN;
+ hw->aq.arq_buf_size = IXL_AQ_BUFSZ;
+ hw->aq.asq_buf_size = IXL_AQ_BUFSZ;
+
+ error = i40e_init_adminq(hw);
+ if (error) {
+ device_printf(dev, "%s: init_adminq failed: %d\n",
+ __func__, error);
+ goto err_pci_res;
+ }
+
+ INIT_DBG_DEV(dev, "Initialized Admin Queue");
+
+ error = ixlv_send_api_ver(sc);
+ if (error) {
+ device_printf(dev, "%s: unable to send to PF (%d)\n",
+ __func__, error);
+ goto err_aq;
+ }
+
+ while (!i40e_asq_done(hw)) {
+ if (++retries > IXLV_AQ_MAX_ERR) {
+ device_printf(dev, "%s: Admin Queue timeout "
+ "(waiting for send_api_ver)\n", __func__);
+ error = ENXIO;
+ goto err_aq;
+ }
+ i40e_msec_delay(10);
+ }
+
+ INIT_DBG_DEV(dev, "Sent API version message to PF");
+
+ /* Wait for API version msg to arrive */
+ error = ixlv_verify_api_ver(sc);
+ if (error) {
+ device_printf(dev,
+ "%s: Unable to verify API version, error %d\n",
+ __func__, error);
+ goto err_aq;
+ }
+
+ INIT_DBG_DEV(dev, "PF API version verified");
+
+ /* Need API version before sending reset message */
+ error = ixlv_reset(sc);
+ if (error) {
+ device_printf(dev, "VF reset failed; reload the driver\n");
+ goto err_aq;
+ }
+
+ INIT_DBG_DEV(dev, "VF reset complete");
+
+ /* Ask for VF config from PF */
+ error = ixlv_send_vf_config_msg(sc);
+ if (error) {
+ device_printf(dev,
+ "%s: Unable to send VF config request, error %d\n",
+ __func__, error);
+ goto err_aq;
+ }
+
+ retries = 0;
+ while (!i40e_asq_done(hw)) {
+ if (++retries > IXLV_AQ_MAX_ERR) {
+ device_printf(dev, "%s: Admin Queue timeout "
+ "(waiting for send_vf_config_msg)\n", __func__);
+ error = ENXIO;
+ goto err_aq;
+ }
+ i40e_msec_delay(10);
+ }
+
+ INIT_DBG_DEV(dev, "Sent VF config message to PF");
+
+ bufsz = sizeof(struct i40e_virtchnl_vf_resource) +
+ (I40E_MAX_VF_VSI * sizeof(struct i40e_virtchnl_vsi_resource));
+ sc->vf_res = malloc(bufsz, M_DEVBUF, M_NOWAIT);
+ if (!sc->vf_res) {
+ device_printf(dev,
+ "%s: Unable to allocate memory for VF configuration"
+ " message from PF\n", __func__);
+ error = ENOMEM;
+ goto err_aq;
+ }
+
+ /* Check for VF config response */
+ error = ixlv_get_vf_config(sc);
+ if (error) {
+ device_printf(dev,
+ "%s: Unable to get VF configuration from PF\n",
+ __func__);
+ error = EBUSY;
+ goto err_res_buf;
+ }
+
+ INIT_DBG_DEV(dev, "Received valid VF config from PF");
+ INIT_DBG_DEV(dev, "VSIs %d, Queues %d, Max Vectors %d, Max MTU %d",
+ sc->vf_res->num_vsis,
+ sc->vf_res->num_queue_pairs,
+ sc->vf_res->max_vectors,
+ sc->vf_res->max_mtu);
+ INIT_DBG_DEV(dev, "Offload flags: %#010x",
+ sc->vf_res->vf_offload_flags);
+
+ /* got VF config message back from PF, now we can parse it */
+ for (int i = 0; i < sc->vf_res->num_vsis; i++) {
+ if (sc->vf_res->vsi_res[i].vsi_type == I40E_VSI_SRIOV)
+ sc->vsi_res = &sc->vf_res->vsi_res[i];
+ }
+ if (!sc->vsi_res) {
+ device_printf(dev, "%s: no LAN VSI found\n", __func__);
+ goto err_res_buf;
+ }
+
+ INIT_DBG_DEV(dev, "Resource Acquisition complete");
+
+ /* If no mac address was assigned just make a random one */
+ if (!ixlv_check_ether_addr(hw->mac.addr)) {
+ u8 addr[ETHER_ADDR_LEN];
+ arc4rand(&addr, sizeof(addr), 0);
+ addr[0] &= 0xFE;
+ addr[0] |= 0x02;
+ bcopy(addr, hw->mac.addr, sizeof(addr));
+ }
+
+ vsi->id = sc->vsi_res->vsi_id;
+ vsi->back = (void *)sc;
+
+ /* Link in this virtual environment is always 'up' */
+ vsi->link_up = TRUE;
+
+ /* This allocates the memory and early settings */
+ if (ixlv_setup_queues(sc) != 0) {
+ device_printf(dev, "%s: setup queues failed!\n",
+ __func__);
+ goto out;
+ }
+
+ /* Setup the stack interface */
+ if (ixlv_setup_interface(dev, sc) != 0) {
+ device_printf(dev, "%s: setup interface failed!\n",
+ __func__);
+ goto out;
+ }
+
+ INIT_DBG_DEV(dev, "Queue memory and interface setup");
+
+ /* Do queue interrupt setup */
+ ixlv_assign_msix(sc);
+
+ /* Start AdminQ taskqueue */
+ ixlv_init_taskqueue(sc);
+
+ /* Start the admin queue scheduler timer */
+ callout_reset(&sc->aq_task, 2 * hz, ixlv_sched_aq, sc);
+
+ /* Initialize stats */
+ bzero(&sc->vsi.eth_stats, sizeof(struct i40e_eth_stats));
+ ixlv_add_stats_sysctls(sc);
+
+ /* Register for VLAN events */
+ vsi->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
+ ixlv_register_vlan, vsi, EVENTHANDLER_PRI_FIRST);
+ vsi->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
+ ixlv_unregister_vlan, vsi, EVENTHANDLER_PRI_FIRST);
+
+ /* We want AQ enabled early */
+ ixlv_enable_adminq_irq(hw);
+
+ /* Set things up to run init */
+ sc->aq_pending = 0;
+ sc->aq_required = 0;
+ sc->init_state = IXLV_INIT_READY;
+
+ INIT_DBG_DEV(dev, "end");
+ return (error);
+
+out:
+ ixlv_free_queues(vsi);
+err_res_buf:
+ free(sc->vf_res, M_DEVBUF);
+err_aq:
+ i40e_shutdown_adminq(hw);
+err_pci_res:
+ ixlv_free_pci_resources(sc);
+err_early:
+ mtx_destroy(&sc->mtx);
+ mtx_destroy(&sc->aq_task_mtx);
+ ixlv_free_filters(sc);
+ INIT_DBG_DEV(dev, "end: error %d", error);
+ return (error);
+}
+
+/*********************************************************************
+ * Device removal routine
+ *
+ * The detach entry point is called when the driver is being removed.
+ * This routine stops the adapter and deallocates all the resources
+ * that were allocated for driver operation.
+ *
+ * return 0 on success, positive on failure
+ *********************************************************************/
+
+static int
+ixlv_detach(device_t dev)
+{
+ struct ixlv_sc *sc = device_get_softc(dev);
+ struct ixl_vsi *vsi = &sc->vsi;
+ int retries = 0;
+
+ INIT_DBG_DEV(dev, "begin");
+
+ /* Make sure VLANS are not using driver */
+ if (vsi->ifp->if_vlantrunk != NULL) {
+ device_printf(dev, "Vlan in use, detach first\n");
+ INIT_DBG_DEV(dev, "end");
+ return (EBUSY);
+ }
+
+ /* Stop driver */
+ if (vsi->ifp->if_drv_flags & IFF_DRV_RUNNING) {
+ mtx_lock(&sc->mtx);
+ ixlv_stop(sc);
+ mtx_unlock(&sc->mtx);
+
+ /*
+ ** Ensure queues are disabled before examining
+ ** admin queue state later in detach.
+ */
+ while (vsi->ifp->if_drv_flags & IFF_DRV_RUNNING
+ && ++retries < IXLV_AQ_MAX_ERR) {
+ i40e_msec_delay(10);
+ }
+#ifdef IXL_DEBUG
+ if (retries >= IXLV_AQ_MAX_ERR)
+ device_printf(dev, "Issue disabling queues for detach\n");
+#endif
+ }
+
+ /* Unregister VLAN events */
+ if (vsi->vlan_attach != NULL)
+ EVENTHANDLER_DEREGISTER(vlan_config, vsi->vlan_attach);
+ if (vsi->vlan_detach != NULL)
+ EVENTHANDLER_DEREGISTER(vlan_unconfig, vsi->vlan_detach);
+
+ /* Stop AQ callout */
+ callout_drain(&sc->aq_task);
+ callout_stop(&sc->aq_task);
+
+#ifdef IXL_DEBUG
+ /* Report on possible AQ failures */
+ if (sc->aq_required || sc->aq_pending) {
+ device_printf(dev, "AQ status on detach:\n");
+ device_printf(dev, "required : 0x%4b\n", sc->aq_required,
+ IXLV_FLAGS);
+ device_printf(dev, "pending : 0x%4b\n", sc->aq_pending,
+ IXLV_FLAGS);
+ device_printf(dev, "current_op: %d\n", sc->current_op);
+ }
+#endif
+
+ i40e_shutdown_adminq(&sc->hw);
+ while (taskqueue_cancel(sc->tq, &sc->aq_irq, NULL) != 0)
+ taskqueue_drain(sc->tq, &sc->aq_irq);
+ taskqueue_free(sc->tq);
+
+ /* force the state down */
+ vsi->ifp->if_flags &= ~IFF_UP;
+ ether_ifdetach(vsi->ifp);
+ if_free(vsi->ifp);
+
+ free(sc->vf_res, M_DEVBUF);
+ ixlv_free_pci_resources(sc);
+ ixlv_free_queues(vsi);
+ mtx_destroy(&sc->mtx);
+ mtx_destroy(&sc->aq_task_mtx);
+ ixlv_free_filters(sc);
+
+ bus_generic_detach(dev);
+ INIT_DBG_DEV(dev, "end");
+ return (0);
+}
+
+/*********************************************************************
+ *
+ * Shutdown entry point
+ *
+ **********************************************************************/
+
+static int
+ixlv_shutdown(device_t dev)
+{
+ struct ixlv_sc *sc = device_get_softc(dev);
+
+ INIT_DBG_DEV(dev, "begin");
+
+ mtx_lock(&sc->mtx);
+ ixlv_stop(sc);
+ mtx_unlock(&sc->mtx);
+
+ INIT_DBG_DEV(dev, "end");
+ return (0);
+}
+
+/*
+ * Configure TXCSUM(IPV6) and TSO(4/6)
+ * - the hardware handles these together so we
+ * need to tweak them
+ */
+static void
+ixlv_cap_txcsum_tso(struct ixl_vsi *vsi, struct ifnet *ifp, int mask)
+{
+ /* Enable/disable TXCSUM/TSO4 */
+ if (!(ifp->if_capenable & IFCAP_TXCSUM)
+ && !(ifp->if_capenable & IFCAP_TSO4)) {
+ if (mask & IFCAP_TXCSUM) {
+ ifp->if_capenable |= IFCAP_TXCSUM;
+ /* enable TXCSUM, restore TSO if previously enabled */
+ if (vsi->flags & IXL_FLAGS_KEEP_TSO4) {
+ vsi->flags &= ~IXL_FLAGS_KEEP_TSO4;
+ ifp->if_capenable |= IFCAP_TSO4;
+ }
+ }
+ else if (mask & IFCAP_TSO4) {
+ ifp->if_capenable |= (IFCAP_TXCSUM | IFCAP_TSO4);
+ vsi->flags &= ~IXL_FLAGS_KEEP_TSO4;
+ if_printf(ifp,
+ "TSO4 requires txcsum, enabling both...\n");
+ }
+ } else if((ifp->if_capenable & IFCAP_TXCSUM)
+ && !(ifp->if_capenable & IFCAP_TSO4)) {
+ if (mask & IFCAP_TXCSUM)
+ ifp->if_capenable &= ~IFCAP_TXCSUM;
+ else if (mask & IFCAP_TSO4)
+ ifp->if_capenable |= IFCAP_TSO4;
+ } else if((ifp->if_capenable & IFCAP_TXCSUM)
+ && (ifp->if_capenable & IFCAP_TSO4)) {
+ if (mask & IFCAP_TXCSUM) {
+ vsi->flags |= IXL_FLAGS_KEEP_TSO4;
+ ifp->if_capenable &= ~(IFCAP_TXCSUM | IFCAP_TSO4);
+ if_printf(ifp,
+ "TSO4 requires txcsum, disabling both...\n");
+ } else if (mask & IFCAP_TSO4)
+ ifp->if_capenable &= ~IFCAP_TSO4;
+ }
+
+ /* Enable/disable TXCSUM_IPV6/TSO6 */
+ if (!(ifp->if_capenable & IFCAP_TXCSUM_IPV6)
+ && !(ifp->if_capenable & IFCAP_TSO6)) {
+ if (mask & IFCAP_TXCSUM_IPV6) {
+ ifp->if_capenable |= IFCAP_TXCSUM_IPV6;
+ if (vsi->flags & IXL_FLAGS_KEEP_TSO6) {
+ vsi->flags &= ~IXL_FLAGS_KEEP_TSO6;
+ ifp->if_capenable |= IFCAP_TSO6;
+ }
+ } else if (mask & IFCAP_TSO6) {
+ ifp->if_capenable |= (IFCAP_TXCSUM_IPV6 | IFCAP_TSO6);
+ vsi->flags &= ~IXL_FLAGS_KEEP_TSO6;
+ if_printf(ifp,
+ "TSO6 requires txcsum6, enabling both...\n");
+ }
+ } else if((ifp->if_capenable & IFCAP_TXCSUM_IPV6)
+ && !(ifp->if_capenable & IFCAP_TSO6)) {
+ if (mask & IFCAP_TXCSUM_IPV6)
+ ifp->if_capenable &= ~IFCAP_TXCSUM_IPV6;
+ else if (mask & IFCAP_TSO6)
+ ifp->if_capenable |= IFCAP_TSO6;
+ } else if ((ifp->if_capenable & IFCAP_TXCSUM_IPV6)
+ && (ifp->if_capenable & IFCAP_TSO6)) {
+ if (mask & IFCAP_TXCSUM_IPV6) {
+ vsi->flags |= IXL_FLAGS_KEEP_TSO6;
+ ifp->if_capenable &= ~(IFCAP_TXCSUM_IPV6 | IFCAP_TSO6);
+ if_printf(ifp,
+ "TSO6 requires txcsum6, disabling both...\n");
+ } else if (mask & IFCAP_TSO6)
+ ifp->if_capenable &= ~IFCAP_TSO6;
+ }
+}
+
+/*********************************************************************
+ * Ioctl entry point
+ *
+ * ixlv_ioctl is called when the user wants to configure the
+ * interface.
+ *
+ * return 0 on success, positive on failure
+ **********************************************************************/
+
+static int
+ixlv_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
+{
+ struct ixl_vsi *vsi = ifp->if_softc;
+ struct ixlv_sc *sc = vsi->back;
+ struct ifreq *ifr = (struct ifreq *)data;
+#if defined(INET) || defined(INET6)
+ struct ifaddr *ifa = (struct ifaddr *)data;
+ bool avoid_reset = FALSE;
+#endif
+ int error = 0;
+
+
+ switch (command) {
+
+ case SIOCSIFADDR:
+#ifdef INET
+ if (ifa->ifa_addr->sa_family == AF_INET)
+ avoid_reset = TRUE;
+#endif
+#ifdef INET6
+ if (ifa->ifa_addr->sa_family == AF_INET6)
+ avoid_reset = TRUE;
+#endif
+#if defined(INET) || defined(INET6)
+ /*
+ ** Calling init results in link renegotiation,
+ ** so we avoid doing it when possible.
+ */
+ if (avoid_reset) {
+ ifp->if_flags |= IFF_UP;
+ if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
+ ixlv_init(sc);
+ if (!(ifp->if_flags & IFF_NOARP))
+ arp_ifinit(ifp, ifa);
+ } else
+ error = ether_ioctl(ifp, command, data);
+ break;
+#endif
+ case SIOCSIFMTU:
+ IOCTL_DBG_IF2(ifp, "SIOCSIFMTU (Set Interface MTU)");
+ mtx_lock(&sc->mtx);
+ if (ifr->ifr_mtu > IXL_MAX_FRAME -
+ ETHER_HDR_LEN - ETHER_CRC_LEN - ETHER_VLAN_ENCAP_LEN) {
+ error = EINVAL;
+ IOCTL_DBG_IF(ifp, "mtu too large");
+ } else {
+ IOCTL_DBG_IF2(ifp, "mtu: %lu -> %d", ifp->if_mtu, ifr->ifr_mtu);
+ // ERJ: Interestingly enough, these types don't match
+ ifp->if_mtu = ifr->ifr_mtu;
+ vsi->max_frame_size =
+ ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN
+ + ETHER_VLAN_ENCAP_LEN;
+
+ ixlv_init_locked(sc);
+ }
+ mtx_unlock(&sc->mtx);
+ break;
+ case SIOCSIFFLAGS:
+ IOCTL_DBG_IF2(ifp, "SIOCSIFFLAGS (Set Interface Flags)");
+ mtx_lock(&sc->mtx);
+ if (ifp->if_flags & IFF_UP) {
+ if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
+ ixlv_init_locked(sc);
+ } else
+ if (ifp->if_drv_flags & IFF_DRV_RUNNING)
+ ixlv_stop(sc);
+ sc->if_flags = ifp->if_flags;
+ mtx_unlock(&sc->mtx);
+ break;
+ case SIOCADDMULTI:
+ IOCTL_DBG_IF2(ifp, "SIOCADDMULTI");
+ if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
+ mtx_lock(&sc->mtx);
+ ixlv_disable_intr(vsi);
+ ixlv_add_multi(vsi);
+ ixlv_enable_intr(vsi);
+ mtx_unlock(&sc->mtx);
+ }
+ break;
+ case SIOCDELMULTI:
+ IOCTL_DBG_IF2(ifp, "SIOCDELMULTI");
+ if (sc->init_state == IXLV_RUNNING) {
+ mtx_lock(&sc->mtx);
+ ixlv_disable_intr(vsi);
+ ixlv_del_multi(vsi);
+ ixlv_enable_intr(vsi);
+ mtx_unlock(&sc->mtx);
+ }
+ break;
+ case SIOCSIFMEDIA:
+ case SIOCGIFMEDIA:
+ IOCTL_DBG_IF2(ifp, "SIOCxIFMEDIA (Get/Set Interface Media)");
+ error = ifmedia_ioctl(ifp, ifr, &sc->media, command);
+ break;
+ case SIOCSIFCAP:
+ {
+ int mask = ifr->ifr_reqcap ^ ifp->if_capenable;
+ IOCTL_DBG_IF2(ifp, "SIOCSIFCAP (Set Capabilities)");
+
+ ixlv_cap_txcsum_tso(vsi, ifp, mask);
+
+ if (mask & IFCAP_RXCSUM)
+ ifp->if_capenable ^= IFCAP_RXCSUM;
+ if (mask & IFCAP_RXCSUM_IPV6)
+ ifp->if_capenable ^= IFCAP_RXCSUM_IPV6;
+ if (mask & IFCAP_LRO)
+ ifp->if_capenable ^= IFCAP_LRO;
+ if (mask & IFCAP_VLAN_HWTAGGING)
+ ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
+ if (mask & IFCAP_VLAN_HWFILTER)
+ ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
+ if (mask & IFCAP_VLAN_HWTSO)
+ ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
+ if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
+ ixlv_init(sc);
+ }
+ VLAN_CAPABILITIES(ifp);
+
+ break;
+ }
+
+ default:
+ IOCTL_DBG_IF2(ifp, "UNKNOWN (0x%X)", (int)command);
+ error = ether_ioctl(ifp, command, data);
+ break;
+ }
+
+ return (error);
+}
+
+/*
+** To do a reinit on the VF is unfortunately more complicated
+** than a physical device, we must have the PF more or less
+** completely recreate our memory, so many things that were
+** done only once at attach in traditional drivers now must be
+** redone at each reinitialization. This function does that
+** 'prelude' so we can then call the normal locked init code.
+*/
+int
+ixlv_reinit_locked(struct ixlv_sc *sc)
+{
+ struct i40e_hw *hw = &sc->hw;
+ struct ixl_vsi *vsi = &sc->vsi;
+ struct ifnet *ifp = vsi->ifp;
+ struct ixlv_vlan_filter *vf;
+ int error = 0;
+
+ INIT_DBG_IF(ifp, "begin");
+
+ if (ifp->if_drv_flags & IFF_DRV_RUNNING)
+ ixlv_stop(sc);
+
+ if ((sc->init_state == IXLV_RESET_REQUIRED) ||
+ (sc->init_state == IXLV_RESET_PENDING))
+ error = ixlv_reset(sc);
+
+ /* set the state in case we went thru RESET */
+ sc->init_state = IXLV_RUNNING;
+
+ if (vsi->num_vlans != 0)
+ SLIST_FOREACH(vf, sc->vlan_filters, next)
+ vf->flags = IXL_FILTER_ADD;
+ else { /* clean any stale filters */
+ while (!SLIST_EMPTY(sc->vlan_filters)) {
+ vf = SLIST_FIRST(sc->vlan_filters);
+ SLIST_REMOVE_HEAD(sc->vlan_filters, next);
+ free(vf, M_DEVBUF);
+ }
+ }
+
+ ixlv_enable_adminq_irq(hw);
+ sc->aq_pending = 0;
+ sc->aq_required = 0;
+
+ INIT_DBG_IF(ifp, "end");
+ return (error);
+}
+
+
+static void
+ixlv_init_locked(struct ixlv_sc *sc)
+{
+ struct i40e_hw *hw = &sc->hw;
+ struct ixl_vsi *vsi = &sc->vsi;
+ struct ixl_queue *que = vsi->queues;
+ struct ifnet *ifp = vsi->ifp;
+ int error = 0;
+
+ INIT_DBG_IF(ifp, "begin");
+
+ /* Verify we have the core lock */
+ if (!mtx_owned(&sc->mtx)) {
+ if_printf(ifp, "%s: sc mutex not owned; acquire"
+ "before calling this function!\n", __func__);
+ goto init_done;
+ }
+
+ /* Do a reinit first if an init has already been done */
+ if ((sc->init_state == IXLV_RUNNING) ||
+ (sc->init_state == IXLV_RESET_REQUIRED) ||
+ (sc->init_state == IXLV_RESET_PENDING))
+ error = ixlv_reinit_locked(sc);
+ /* Don't bother with init if we failed reinit */
+ if (error)
+ goto init_done;
+
+ /* Check for an LAA mac address... */
+ bcopy(IF_LLADDR(ifp), hw->mac.addr, ETHER_ADDR_LEN);
+
+ ifp->if_hwassist = 0;
+ if (ifp->if_capenable & IFCAP_TSO)
+ ifp->if_hwassist |= CSUM_TSO;
+ if (ifp->if_capenable & IFCAP_TXCSUM)
+ ifp->if_hwassist |= (CSUM_OFFLOAD_IPV4 & ~CSUM_IP);
+ if (ifp->if_capenable & IFCAP_TXCSUM_IPV6)
+ ifp->if_hwassist |= CSUM_OFFLOAD_IPV6;
+
+ /* Add mac filter for this VF to PF */
+ error = ixlv_add_mac_filter(sc, hw->mac.addr, 0);
+
+ // send message, then enqueue another task
+ if (!error || error == EEXIST) {
+ sc->aq_required |= IXLV_FLAG_AQ_ADD_MAC_FILTER;
+ callout_reset(&sc->aq_task, IXLV_CALLOUT_TIMO,
+ ixlv_sched_aq, sc);
+ }
+
+ /* Setup vlan's if needed */
+ ixlv_setup_vlan_filters(sc);
+
+ /*
+ ** Prepare the queues for operation
+ */
+ for (int i = 0; i < vsi->num_queues; i++, que++) {
+ struct rx_ring *rxr = &que->rxr;
+
+ ixl_init_tx_ring(que);
+
+ /* Need to set mbuf size now */
+ if (vsi->max_frame_size <= 2048)
+ rxr->mbuf_sz = MCLBYTES;
+ else
+ rxr->mbuf_sz = MJUMPAGESIZE;
+ ixl_init_rx_ring(que);
+ }
+
+ /* Configure queues */
+ sc->aq_required |= IXLV_FLAG_AQ_CONFIGURE_QUEUES;
+ callout_reset(&sc->aq_task, IXLV_CALLOUT_TIMO,
+ ixlv_sched_aq, sc);
+
+ /* Set up RSS */
+ ixlv_config_rss(sc);
+
+ /* Map vectors */
+ sc->aq_required |= IXLV_FLAG_AQ_MAP_VECTORS;
+ callout_reset(&sc->aq_task, IXLV_CALLOUT_TIMO,
+ ixlv_sched_aq, sc);
+
+ /* Enable queues */
+ sc->aq_required |= IXLV_FLAG_AQ_ENABLE_QUEUES;
+ callout_reset(&sc->aq_task, IXLV_CALLOUT_TIMO,
+ ixlv_sched_aq, sc);
+
+ /* Start the local timer */
+ callout_reset(&sc->timer, hz, ixlv_local_timer, sc);
+
+ sc->init_state = IXLV_RUNNING;
+
+init_done:
+ INIT_DBG_IF(ifp, "end");
+ return;
+}
+
+/*
+** Init entry point for the stack
+*/
+void
+ixlv_init(void *arg)
+{
+ struct ixlv_sc *sc = arg;
+
+ mtx_lock(&sc->mtx);
+ ixlv_init_locked(sc);
+ mtx_unlock(&sc->mtx);
+ return;
+}
+
+/*
+ * Allocate MSI/X vectors, setup the AQ vector early
+ */
+static int
+ixlv_init_msix(struct ixlv_sc *sc)
+{
+ device_t dev = sc->dev;
+ int rid, want, vectors, queues, available;
+
+ rid = PCIR_BAR(IXL_BAR);
+ sc->msix_mem = bus_alloc_resource_any(dev,
+ SYS_RES_MEMORY, &rid, RF_ACTIVE);
+ if (!sc->msix_mem) {
+ /* May not be enabled */
+ device_printf(sc->dev,
+ "Unable to map MSIX table \n");
+ goto fail;
+ }
+
+ available = pci_msix_count(dev);
+ if (available == 0) { /* system has msix disabled */
+ bus_release_resource(dev, SYS_RES_MEMORY,
+ rid, sc->msix_mem);
+ sc->msix_mem = NULL;
+ goto fail;
+ }
+
+ /* Figure out a reasonable auto config value */
+ queues = (mp_ncpus > (available - 1)) ? (available - 1) : mp_ncpus;
+
+ /* Override with hardcoded value if sane */
+ if ((ixlv_max_queues != 0) && (ixlv_max_queues <= queues))
+ queues = ixlv_max_queues;
+
+ /* Enforce the VF max value */
+ if (queues > IXLV_MAX_QUEUES)
+ queues = IXLV_MAX_QUEUES;
+
+ /*
+ ** Want one vector (RX/TX pair) per queue
+ ** plus an additional for the admin queue.
+ */
+ want = queues + 1;
+ if (want <= available) /* Have enough */
+ vectors = want;
+ else {
+ device_printf(sc->dev,
+ "MSIX Configuration Problem, "
+ "%d vectors available but %d wanted!\n",
+ available, want);
+ goto fail;
+ }
+
+ if (pci_alloc_msix(dev, &vectors) == 0) {
+ device_printf(sc->dev,
+ "Using MSIX interrupts with %d vectors\n", vectors);
+ sc->msix = vectors;
+ sc->vsi.num_queues = queues;
+ }
+
+ /*
+ ** Explicitly set the guest PCI BUSMASTER capability
+ ** and we must rewrite the ENABLE in the MSIX control
+ ** register again at this point to cause the host to
+ ** successfully initialize us.
+ */
+ {
+ u16 pci_cmd_word;
+ int msix_ctrl;
+ pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2);
+ pci_cmd_word |= PCIM_CMD_BUSMASTEREN;
+ pci_write_config(dev, PCIR_COMMAND, pci_cmd_word, 2);
+ pci_find_cap(dev, PCIY_MSIX, &rid);
+ rid += PCIR_MSIX_CTRL;
+ msix_ctrl = pci_read_config(dev, rid, 2);
+ msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE;
+ pci_write_config(dev, rid, msix_ctrl, 2);
+ }
+
+ /* Next we need to setup the vector for the Admin Queue */
+ rid = 1; // zero vector + 1
+ sc->res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
+ &rid, RF_SHAREABLE | RF_ACTIVE);
+ if (sc->res == NULL) {
+ device_printf(dev,"Unable to allocate"
+ " bus resource: AQ interrupt \n");
+ goto fail;
+ }
+ if (bus_setup_intr(dev, sc->res,
+ INTR_TYPE_NET | INTR_MPSAFE, NULL,
+ ixlv_msix_adminq, sc, &sc->tag)) {
+ sc->res = NULL;
+ device_printf(dev, "Failed to register AQ handler");
+ goto fail;
+ }
+ bus_describe_intr(dev, sc->res, sc->tag, "adminq");
+
+ return (vectors);
+
+fail:
+ /* The VF driver MUST use MSIX */
+ return (0);
+}
+
+static int
+ixlv_allocate_pci_resources(struct ixlv_sc *sc)
+{
+ int rid;
+ device_t dev = sc->dev;
+
+ rid = PCIR_BAR(0);
+ sc->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
+ &rid, RF_ACTIVE);
+
+ if (!(sc->pci_mem)) {
+ device_printf(dev,"Unable to allocate bus resource: memory\n");
+ return (ENXIO);
+ }
+
+ sc->osdep.mem_bus_space_tag =
+ rman_get_bustag(sc->pci_mem);
+ sc->osdep.mem_bus_space_handle =
+ rman_get_bushandle(sc->pci_mem);
+ sc->osdep.mem_bus_space_size = rman_get_size(sc->pci_mem);
+ sc->hw.hw_addr = (u8 *) &sc->osdep.mem_bus_space_handle;
+
+ sc->hw.back = &sc->osdep;
+
+ /* May need to pre-emptively disable adminq interrupts */
+ ixlv_disable_adminq_irq(&sc->hw);
+
+ /*
+ ** Now setup MSI/X, it will return
+ ** us the number of supported vectors
+ */
+ sc->msix = ixlv_init_msix(sc);
+
+ /* We fail without MSIX support */
+ if (sc->msix == 0)
+ return (ENXIO);
+
+ return (0);
+}
+
+static void
+ixlv_free_pci_resources(struct ixlv_sc *sc)
+{
+ struct ixl_vsi *vsi = &sc->vsi;
+ struct ixl_queue *que = vsi->queues;
+ device_t dev = sc->dev;
+
+ /* We may get here before stations are setup */
+ if (que == NULL)
+ goto early;
+
+ /*
+ ** Release all msix queue resources:
+ */
+ for (int i = 0; i < vsi->num_queues; i++, que++) {
+ int rid = que->msix + 1;
+ if (que->tag != NULL) {
+ bus_teardown_intr(dev, que->res, que->tag);
+ que->tag = NULL;
+ }
+ if (que->res != NULL)
+ bus_release_resource(dev, SYS_RES_IRQ, rid, que->res);
+ }
+
+early:
+ /* Clean the AdminQ interrupt */
+ if (sc->tag != NULL) {
+ bus_teardown_intr(dev, sc->res, sc->tag);
+ sc->tag = NULL;
+ }
+ if (sc->res != NULL)
+ bus_release_resource(dev, SYS_RES_IRQ, 1, sc->res);
+
+ pci_release_msi(dev);
+
+ if (sc->msix_mem != NULL)
+ bus_release_resource(dev, SYS_RES_MEMORY,
+ PCIR_BAR(IXL_BAR), sc->msix_mem);
+
+ if (sc->pci_mem != NULL)
+ bus_release_resource(dev, SYS_RES_MEMORY,
+ PCIR_BAR(0), sc->pci_mem);
+
+ return;
+}
+
+static int
+ixlv_init_taskqueue(struct ixlv_sc *sc)
+{
+ int error = 0;
+
+ /* Tasklet for AQ Interrupts */
+ TASK_INIT(&sc->aq_irq, 0, ixlv_do_adminq, sc);
+
+ sc->tq = taskqueue_create_fast("ixl_adm", M_NOWAIT,
+ taskqueue_thread_enqueue, &sc->tq);
+ taskqueue_start_threads(&sc->tq, 1, PI_NET, "%s sc->tq",
+ device_get_nameunit(sc->dev));
+
+ return (error);
+}
+
+/*********************************************************************
+ *
+ * Setup MSIX Interrupt resources and handlers for the VSI queues
+ *
+ **********************************************************************/
+static int
+ixlv_assign_msix(struct ixlv_sc *sc)
+{
+ device_t dev = sc->dev;
+ struct ixl_vsi *vsi = &sc->vsi;
+ struct ixl_queue *que = vsi->queues;
+ struct tx_ring *txr;
+ int error, rid, vector = 1;
+
+ for (int i = 0; i < vsi->num_queues; i++, vector++, que++) {
+ rid = vector + 1;
+ txr = &que->txr;
+ que->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
+ RF_SHAREABLE | RF_ACTIVE);
+ if (que->res == NULL) {
+ device_printf(dev,"Unable to allocate"
+ " bus resource: que interrupt [%d]\n", vector);
+ return (ENXIO);
+ }
+ /* Set the handler function */
+ error = bus_setup_intr(dev, que->res,
+ INTR_TYPE_NET | INTR_MPSAFE, NULL,
+ ixlv_msix_que, que, &que->tag);
+ if (error) {
+ que->res = NULL;
+ device_printf(dev, "Failed to register que handler");
+ return (error);
+ }
+ bus_describe_intr(dev, que->res, que->tag, "que %d", i);
+ /* Bind the vector to a CPU */
+ bus_bind_intr(dev, que->res, i);
+ que->msix = vector;
+ vsi->que_mask |= (u64)(1 << que->msix);
+ TASK_INIT(&que->tx_task, 0, ixl_deferred_mq_start, que);
+ TASK_INIT(&que->task, 0, ixlv_handle_que, que);
+ que->tq = taskqueue_create_fast("ixlv_que", M_NOWAIT,
+ taskqueue_thread_enqueue, &que->tq);
+ taskqueue_start_threads(&que->tq, 1, PI_NET, "%s que",
+ device_get_nameunit(sc->dev));
+ }
+
+ return (0);
+}
+
+/*
+** XXX: Assumes the vf's admin queue has been initialized.
+*/
+static int
+ixlv_reset(struct ixlv_sc *sc)
+{
+ struct i40e_hw *hw = &sc->hw;
+ device_t dev = sc->dev;
+ int error = 0;
+
+ /* Ask the PF to reset us if we are initiating */
+ if (sc->init_state != IXLV_RESET_PENDING)
+ ixlv_request_reset(sc);
+
+ i40e_msec_delay(100);
+ error = ixlv_reset_complete(hw);
+ if (error) {
+ device_printf(dev, "%s: VF reset failed\n",
+ __func__);
+ return (error);
+ }
+
+ error = i40e_shutdown_adminq(hw);
+ if (error) {
+ device_printf(dev, "%s: shutdown_adminq failed: %d\n",
+ __func__, error);
+ return (error);
+ }
+
+ error = i40e_init_adminq(hw);
+ if (error) {
+ device_printf(dev, "%s: init_adminq failed: %d\n",
+ __func__, error);
+ return(error);
+ }
+
+ return (0);
+}
+
+static int
+ixlv_reset_complete(struct i40e_hw *hw)
+{
+ u32 reg;
+
+ for (int i = 0; i < 100; i++) {
+ reg = rd32(hw, I40E_VFGEN_RSTAT) &
+ I40E_VFGEN_RSTAT_VFR_STATE_MASK;
+
+ if ((reg == I40E_VFR_VFACTIVE) ||
+ (reg == I40E_VFR_COMPLETED))
+ return (0);
+ i40e_usec_delay(20);
+ }
+
+ return (EBUSY);
+}
+
+
+/*********************************************************************
+ *
+ * Setup networking device structure and register an interface.
+ *
+ **********************************************************************/
+static int
+ixlv_setup_interface(device_t dev, struct ixlv_sc *sc)
+{
+ struct ifnet *ifp;
+ struct ixl_vsi *vsi = &sc->vsi;
+ struct ixl_queue *que = vsi->queues;
+
+ INIT_DBG_DEV(dev, "begin");
+
+ ifp = vsi->ifp = if_alloc(IFT_ETHER);
+ if (ifp == NULL) {
+ device_printf(dev, "can not allocate ifnet structure\n");
+ return (-1);
+ }
+
+ if_initname(ifp, device_get_name(dev), device_get_unit(dev));
+
+ ifp->if_mtu = ETHERMTU;
+ ifp->if_baudrate = 4000000000; // ??
+ ifp->if_init = ixlv_init;
+ ifp->if_softc = vsi;
+ ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
+ ifp->if_ioctl = ixlv_ioctl;
+
+ ifp->if_transmit = ixl_mq_start;
+
+ ifp->if_qflush = ixl_qflush;
+ ifp->if_snd.ifq_maxlen = que->num_desc - 2;
+
+ ether_ifattach(ifp, sc->hw.mac.addr);
+
+ vsi->max_frame_size =
+ ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN
+ + ETHER_VLAN_ENCAP_LEN;
+
+ /*
+ * Tell the upper layer(s) we support long frames.
+ */
+ ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
+
+ ifp->if_capabilities |= IFCAP_HWCSUM;
+ ifp->if_capabilities |= IFCAP_HWCSUM_IPV6;
+ ifp->if_capabilities |= IFCAP_TSO;
+ ifp->if_capabilities |= IFCAP_JUMBO_MTU;
+
+ ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING
+ | IFCAP_VLAN_HWTSO
+ | IFCAP_VLAN_MTU
+ | IFCAP_VLAN_HWCSUM
+ | IFCAP_LRO;
+ ifp->if_capenable = ifp->if_capabilities;
+
+ /*
+ ** Don't turn this on by default, if vlans are
+ ** created on another pseudo device (eg. lagg)
+ ** then vlan events are not passed thru, breaking
+ ** operation, but with HW FILTER off it works. If
+ ** using vlans directly on the ixl driver you can
+ ** enable this and get full hardware tag filtering.
+ */
+ ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
+
+ /*
+ * Specify the media types supported by this adapter and register
+ * callbacks to update media and link information
+ */
+ ifmedia_init(&sc->media, IFM_IMASK, ixlv_media_change,
+ ixlv_media_status);
+
+ // JFV Add media types later?
+
+ ifmedia_add(&sc->media, IFM_ETHER | IFM_AUTO, 0, NULL);
+ ifmedia_set(&sc->media, IFM_ETHER | IFM_AUTO);
+
+ INIT_DBG_DEV(dev, "end");
+ return (0);
+}
+
+/*
+** Allocate and setup the interface queues
+*/
+static int
+ixlv_setup_queues(struct ixlv_sc *sc)
+{
+ device_t dev = sc->dev;
+ struct ixl_vsi *vsi;
+ struct ixl_queue *que;
+ struct tx_ring *txr;
+ struct rx_ring *rxr;
+ int rsize, tsize;
+ int error = I40E_SUCCESS;
+
+ vsi = &sc->vsi;
+ vsi->back = (void *)sc;
+ vsi->hw = &sc->hw;
+ vsi->num_vlans = 0;
+
+ /* Get memory for the station queues */
+ if (!(vsi->queues =
+ (struct ixl_queue *) malloc(sizeof(struct ixl_queue) *
+ vsi->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
+ device_printf(dev, "Unable to allocate queue memory\n");
+ error = ENOMEM;
+ goto early;
+ }
+
+ for (int i = 0; i < vsi->num_queues; i++) {
+ que = &vsi->queues[i];
+ que->num_desc = ixlv_ringsz;
+ que->me = i;
+ que->vsi = vsi;
+ /* mark the queue as active */
+ vsi->active_queues |= (u64)1 << que->me;
+
+ txr = &que->txr;
+ txr->que = que;
+ txr->tail = I40E_QTX_TAIL1(que->me);
+ /* Initialize the TX lock */
+ snprintf(txr->mtx_name, sizeof(txr->mtx_name), "%s:tx(%d)",
+ device_get_nameunit(dev), que->me);
+ mtx_init(&txr->mtx, txr->mtx_name, NULL, MTX_DEF);
+ /*
+ ** Create the TX descriptor ring, the extra int is
+ ** added as the location for HEAD WB.
+ */
+ tsize = roundup2((que->num_desc *
+ sizeof(struct i40e_tx_desc)) +
+ sizeof(u32), DBA_ALIGN);
+ if (i40e_allocate_dma(&sc->hw,
+ &txr->dma, tsize, DBA_ALIGN)) {
+ device_printf(dev,
+ "Unable to allocate TX Descriptor memory\n");
+ error = ENOMEM;
+ goto fail;
+ }
+ txr->base = (struct i40e_tx_desc *)txr->dma.va;
+ bzero((void *)txr->base, tsize);
+ /* Now allocate transmit soft structs for the ring */
+ if (ixl_allocate_tx_data(que)) {
+ device_printf(dev,
+ "Critical Failure setting up TX structures\n");
+ error = ENOMEM;
+ goto fail;
+ }
+ /* Allocate a buf ring */
+ txr->br = buf_ring_alloc(ixlv_txbrsz, M_DEVBUF,
+ M_WAITOK, &txr->mtx);
+ if (txr->br == NULL) {
+ device_printf(dev,
+ "Critical Failure setting up TX buf ring\n");
+ error = ENOMEM;
+ goto fail;
+ }
+
+ /*
+ * Next the RX queues...
+ */
+ rsize = roundup2(que->num_desc *
+ sizeof(union i40e_rx_desc), DBA_ALIGN);
+ rxr = &que->rxr;
+ rxr->que = que;
+ rxr->tail = I40E_QRX_TAIL1(que->me);
+
+ /* Initialize the RX side lock */
+ snprintf(rxr->mtx_name, sizeof(rxr->mtx_name), "%s:rx(%d)",
+ device_get_nameunit(dev), que->me);
+ mtx_init(&rxr->mtx, rxr->mtx_name, NULL, MTX_DEF);
+
+ if (i40e_allocate_dma(&sc->hw,
+ &rxr->dma, rsize, 4096)) { //JFV - should this be DBA?
+ device_printf(dev,
+ "Unable to allocate RX Descriptor memory\n");
+ error = ENOMEM;
+ goto fail;
+ }
+ rxr->base = (union i40e_rx_desc *)rxr->dma.va;
+ bzero((void *)rxr->base, rsize);
+
+ /* Allocate receive soft structs for the ring*/
+ if (ixl_allocate_rx_data(que)) {
+ device_printf(dev,
+ "Critical Failure setting up receive structs\n");
+ error = ENOMEM;
+ goto fail;
+ }
+ }
+
+ return (0);
+
+fail:
+ free(vsi->queues, M_DEVBUF);
+ for (int i = 0; i < vsi->num_queues; i++) {
+ que = &vsi->queues[i];
+ rxr = &que->rxr;
+ txr = &que->txr;
+ if (rxr->base)
+ i40e_free_dma(&sc->hw, &rxr->dma);
+ if (txr->base)
+ i40e_free_dma(&sc->hw, &txr->dma);
+ }
+
+early:
+ return (error);
+}
+
+/*
+** This routine is run via an vlan config EVENT,
+** it enables us to use the HW Filter table since
+** we can get the vlan id. This just creates the
+** entry in the soft version of the VFTA, init will
+** repopulate the real table.
+*/
+static void
+ixlv_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
+{
+ struct ixl_vsi *vsi = ifp->if_softc;
+ struct ixlv_sc *sc = vsi->back;
+ struct ixlv_vlan_filter *v;
+
+
+ if (ifp->if_softc != arg) /* Not our event */
+ return;
+
+ if ((vtag == 0) || (vtag > 4095)) /* Invalid */
+ return;
+
+ /* Sanity check - make sure it doesn't already exist */
+ SLIST_FOREACH(v, sc->vlan_filters, next) {
+ if (v->vlan == vtag)
+ return;
+ }
+
+ mtx_lock(&sc->mtx);
+ ++vsi->num_vlans;
+ v = malloc(sizeof(struct ixlv_vlan_filter), M_DEVBUF, M_NOWAIT | M_ZERO);
+ SLIST_INSERT_HEAD(sc->vlan_filters, v, next);
+ v->vlan = vtag;
+ v->flags = IXL_FILTER_ADD;
+ sc->aq_required |= IXLV_FLAG_AQ_ADD_VLAN_FILTER;
+ mtx_unlock(&sc->mtx);
+ return;
+}
+
+/*
+** This routine is run via an vlan
+** unconfig EVENT, remove our entry
+** in the soft vfta.
+*/
+static void
+ixlv_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
+{
+ struct ixl_vsi *vsi = ifp->if_softc;
+ struct ixlv_sc *sc = vsi->back;
+ struct ixlv_vlan_filter *v;
+ int i = 0;
+
+ if (ifp->if_softc != arg)
+ return;
+
+ if ((vtag == 0) || (vtag > 4095)) /* Invalid */
+ return;
+
+ mtx_lock(&sc->mtx);
+ SLIST_FOREACH(v, sc->vlan_filters, next) {
+ if (v->vlan == vtag) {
+ v->flags = IXL_FILTER_DEL;
+ ++i;
+ --vsi->num_vlans;
+ }
+ }
+ if (i)
+ sc->aq_required |= IXLV_FLAG_AQ_DEL_VLAN_FILTER;
+ mtx_unlock(&sc->mtx);
+ return;
+}
+
+/*
+** Get a new filter and add it to the mac filter list.
+*/
+static struct ixlv_mac_filter *
+ixlv_get_mac_filter(struct ixlv_sc *sc)
+{
+ struct ixlv_mac_filter *f;
+
+ f = malloc(sizeof(struct ixlv_mac_filter), M_DEVBUF, M_NOWAIT | M_ZERO);
+ SLIST_INSERT_HEAD(sc->mac_filters, f, next);
+
+ return (f);
+}
+
+/*
+** Find the filter with matching MAC address
+*/
+static struct ixlv_mac_filter *
+ixlv_find_mac_filter(struct ixlv_sc *sc, u8 *macaddr)
+{
+ struct ixlv_mac_filter *f;
+ bool match = FALSE;
+
+ SLIST_FOREACH(f, sc->mac_filters, next) {
+ if (cmp_etheraddr(f->macaddr, macaddr)) {
+ match = TRUE;
+ break;
+ }
+ }
+
+ if (!match)
+ f = NULL;
+ return (f);
+}
+
+/*
+** Admin Queue interrupt handler
+*/
+static void
+ixlv_msix_adminq(void *arg)
+{
+ struct ixlv_sc *sc = arg;
+ struct i40e_hw *hw = &sc->hw;
+ u32 reg, mask;
+
+ reg = rd32(hw, I40E_VFINT_ICR01);
+ mask = rd32(hw, I40E_VFINT_ICR0_ENA1);
+
+ reg = rd32(hw, I40E_VFINT_DYN_CTL01);
+ reg |= I40E_PFINT_DYN_CTL0_CLEARPBA_MASK;
+ wr32(hw, I40E_VFINT_DYN_CTL01, reg);
+
+ /* re-enable interrupt causes */
+ wr32(hw, I40E_VFINT_ICR0_ENA1, mask);
+ wr32(hw, I40E_VFINT_DYN_CTL01, I40E_VFINT_DYN_CTL01_INTENA_MASK);
+
+ /* schedule task */
+ taskqueue_enqueue(sc->tq, &sc->aq_irq);
+ return;
+}
+
+void
+ixlv_enable_intr(struct ixl_vsi *vsi)
+{
+ struct i40e_hw *hw = vsi->hw;
+ struct ixl_queue *que = vsi->queues;
+
+ ixlv_enable_adminq_irq(hw);
+ for (int i = 0; i < vsi->num_queues; i++, que++)
+ ixlv_enable_queue_irq(hw, que->me);
+}
+
+void
+ixlv_disable_intr(struct ixl_vsi *vsi)
+{
+ struct i40e_hw *hw = vsi->hw;
+ struct ixl_queue *que = vsi->queues;
+
+ ixlv_disable_adminq_irq(hw);
+ for (int i = 0; i < vsi->num_queues; i++, que++)
+ ixlv_disable_queue_irq(hw, que->me);
+}
+
+
+static void
+ixlv_disable_adminq_irq(struct i40e_hw *hw)
+{
+ wr32(hw, I40E_VFINT_DYN_CTL01, 0);
+ wr32(hw, I40E_VFINT_ICR0_ENA1, 0);
+ /* flush */
+ rd32(hw, I40E_VFGEN_RSTAT);
+ return;
+}
+
+static void
+ixlv_enable_adminq_irq(struct i40e_hw *hw)
+{
+ wr32(hw, I40E_VFINT_DYN_CTL01,
+ I40E_VFINT_DYN_CTL01_INTENA_MASK |
+ I40E_VFINT_DYN_CTL01_ITR_INDX_MASK);
+ wr32(hw, I40E_VFINT_ICR0_ENA1, I40E_VFINT_ICR0_ENA_ADMINQ_MASK);
+ /* flush */
+ rd32(hw, I40E_VFGEN_RSTAT);
+ return;
+}
+
+static void
+ixlv_enable_queue_irq(struct i40e_hw *hw, int id)
+{
+ u32 reg;
+
+ reg = I40E_VFINT_DYN_CTLN1_INTENA_MASK |
+ I40E_VFINT_DYN_CTLN_CLEARPBA_MASK;
+ wr32(hw, I40E_VFINT_DYN_CTLN1(id), reg);
+}
+
+static void
+ixlv_disable_queue_irq(struct i40e_hw *hw, int id)
+{
+ wr32(hw, I40E_VFINT_DYN_CTLN1(id), 0);
+ rd32(hw, I40E_VFGEN_RSTAT);
+ return;
+}
+
+
+/*
+** Provide a update to the queue RX
+** interrupt moderation value.
+*/
+static void
+ixlv_set_queue_rx_itr(struct ixl_queue *que)
+{
+ struct ixl_vsi *vsi = que->vsi;
+ struct i40e_hw *hw = vsi->hw;
+ struct rx_ring *rxr = &que->rxr;
+ u16 rx_itr;
+ u16 rx_latency = 0;
+ int rx_bytes;
+
+
+ /* Idle, do nothing */
+ if (rxr->bytes == 0)
+ return;
+
+ if (ixlv_dynamic_rx_itr) {
+ rx_bytes = rxr->bytes/rxr->itr;
+ rx_itr = rxr->itr;
+
+ /* Adjust latency range */
+ switch (rxr->latency) {
+ case IXL_LOW_LATENCY:
+ if (rx_bytes > 10) {
+ rx_latency = IXL_AVE_LATENCY;
+ rx_itr = IXL_ITR_20K;
+ }
+ break;
+ case IXL_AVE_LATENCY:
+ if (rx_bytes > 20) {
+ rx_latency = IXL_BULK_LATENCY;
+ rx_itr = IXL_ITR_8K;
+ } else if (rx_bytes <= 10) {
+ rx_latency = IXL_LOW_LATENCY;
+ rx_itr = IXL_ITR_100K;
+ }
+ break;
+ case IXL_BULK_LATENCY:
+ if (rx_bytes <= 20) {
+ rx_latency = IXL_AVE_LATENCY;
+ rx_itr = IXL_ITR_20K;
+ }
+ break;
+ }
+
+ rxr->latency = rx_latency;
+
+ if (rx_itr != rxr->itr) {
+ /* do an exponential smoothing */
+ rx_itr = (10 * rx_itr * rxr->itr) /
+ ((9 * rx_itr) + rxr->itr);
+ rxr->itr = rx_itr & IXL_MAX_ITR;
+ wr32(hw, I40E_VFINT_ITRN1(IXL_RX_ITR,
+ que->me), rxr->itr);
+ }
+ } else { /* We may have have toggled to non-dynamic */
+ if (vsi->rx_itr_setting & IXL_ITR_DYNAMIC)
+ vsi->rx_itr_setting = ixlv_rx_itr;
+ /* Update the hardware if needed */
+ if (rxr->itr != vsi->rx_itr_setting) {
+ rxr->itr = vsi->rx_itr_setting;
+ wr32(hw, I40E_VFINT_ITRN1(IXL_RX_ITR,
+ que->me), rxr->itr);
+ }
+ }
+ rxr->bytes = 0;
+ rxr->packets = 0;
+ return;
+}
+
+
+/*
+** Provide a update to the queue TX
+** interrupt moderation value.
+*/
+static void
+ixlv_set_queue_tx_itr(struct ixl_queue *que)
+{
+ struct ixl_vsi *vsi = que->vsi;
+ struct i40e_hw *hw = vsi->hw;
+ struct tx_ring *txr = &que->txr;
+ u16 tx_itr;
+ u16 tx_latency = 0;
+ int tx_bytes;
+
+
+ /* Idle, do nothing */
+ if (txr->bytes == 0)
+ return;
+
+ if (ixlv_dynamic_tx_itr) {
+ tx_bytes = txr->bytes/txr->itr;
+ tx_itr = txr->itr;
+
+ switch (txr->latency) {
+ case IXL_LOW_LATENCY:
+ if (tx_bytes > 10) {
+ tx_latency = IXL_AVE_LATENCY;
+ tx_itr = IXL_ITR_20K;
+ }
+ break;
+ case IXL_AVE_LATENCY:
+ if (tx_bytes > 20) {
+ tx_latency = IXL_BULK_LATENCY;
+ tx_itr = IXL_ITR_8K;
+ } else if (tx_bytes <= 10) {
+ tx_latency = IXL_LOW_LATENCY;
+ tx_itr = IXL_ITR_100K;
+ }
+ break;
+ case IXL_BULK_LATENCY:
+ if (tx_bytes <= 20) {
+ tx_latency = IXL_AVE_LATENCY;
+ tx_itr = IXL_ITR_20K;
+ }
+ break;
+ }
+
+ txr->latency = tx_latency;
+
+ if (tx_itr != txr->itr) {
+ /* do an exponential smoothing */
+ tx_itr = (10 * tx_itr * txr->itr) /
+ ((9 * tx_itr) + txr->itr);
+ txr->itr = tx_itr & IXL_MAX_ITR;
+ wr32(hw, I40E_VFINT_ITRN1(IXL_TX_ITR,
+ que->me), txr->itr);
+ }
+
+ } else { /* We may have have toggled to non-dynamic */
+ if (vsi->tx_itr_setting & IXL_ITR_DYNAMIC)
+ vsi->tx_itr_setting = ixlv_tx_itr;
+ /* Update the hardware if needed */
+ if (txr->itr != vsi->tx_itr_setting) {
+ txr->itr = vsi->tx_itr_setting;
+ wr32(hw, I40E_VFINT_ITRN1(IXL_TX_ITR,
+ que->me), txr->itr);
+ }
+ }
+ txr->bytes = 0;
+ txr->packets = 0;
+ return;
+}
+
+
+/*
+**
+** MSIX Interrupt Handlers and Tasklets
+**
+*/
+static void
+ixlv_handle_que(void *context, int pending)
+{
+ struct ixl_queue *que = context;
+ struct ixl_vsi *vsi = que->vsi;
+ struct i40e_hw *hw = vsi->hw;
+ struct tx_ring *txr = &que->txr;
+ struct ifnet *ifp = vsi->ifp;
+ bool more;
+
+ if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
+ more = ixl_rxeof(que, IXL_RX_LIMIT);
+ mtx_lock(&txr->mtx);
+ ixl_txeof(que);
+ if (!drbr_empty(ifp, txr->br))
+ ixl_mq_start_locked(ifp, txr);
+ mtx_unlock(&txr->mtx);
+ if (more) {
+ taskqueue_enqueue(que->tq, &que->task);
+ return;
+ }
+ }
+
+ /* Reenable this interrupt - hmmm */
+ ixlv_enable_queue_irq(hw, que->me);
+ return;
+}
+
+
+/*********************************************************************
+ *
+ * MSIX Queue Interrupt Service routine
+ *
+ **********************************************************************/
+static void
+ixlv_msix_que(void *arg)
+{
+ struct ixl_queue *que = arg;
+ struct ixl_vsi *vsi = que->vsi;
+ struct i40e_hw *hw = vsi->hw;
+ struct tx_ring *txr = &que->txr;
+ bool more_tx, more_rx;
+
+ /* Spurious interrupts are ignored */
+ if (!(vsi->ifp->if_drv_flags & IFF_DRV_RUNNING))
+ return;
+
+ ++que->irqs;
+
+ more_rx = ixl_rxeof(que, IXL_RX_LIMIT);
+
+ mtx_lock(&txr->mtx);
+ more_tx = ixl_txeof(que);
+ /*
+ ** Make certain that if the stack
+ ** has anything queued the task gets
+ ** scheduled to handle it.
+ */
+ if (!drbr_empty(vsi->ifp, txr->br))
+ more_tx = 1;
+ mtx_unlock(&txr->mtx);
+
+ ixlv_set_queue_rx_itr(que);
+ ixlv_set_queue_tx_itr(que);
+
+ if (more_tx || more_rx)
+ taskqueue_enqueue(que->tq, &que->task);
+ else
+ ixlv_enable_queue_irq(hw, que->me);
+
+ return;
+}
+
+
+/*********************************************************************
+ *
+ * Media Ioctl callback
+ *
+ * This routine is called whenever the user queries the status of
+ * the interface using ifconfig.
+ *
+ **********************************************************************/
+static void
+ixlv_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
+{
+ struct ixl_vsi *vsi = ifp->if_softc;
+ struct ixlv_sc *sc = vsi->back;
+
+ INIT_DBG_IF(ifp, "begin");
+
+ mtx_lock(&sc->mtx);
+
+ ixlv_update_link_status(sc);
+
+ ifmr->ifm_status = IFM_AVALID;
+ ifmr->ifm_active = IFM_ETHER;
+
+ if (!vsi->link_up) {
+ mtx_unlock(&sc->mtx);
+ INIT_DBG_IF(ifp, "end: link not up");
+ return;
+ }
+
+ ifmr->ifm_status |= IFM_ACTIVE;
+ /* Hardware is always full-duplex */
+ ifmr->ifm_active |= IFM_FDX;
+ mtx_unlock(&sc->mtx);
+ INIT_DBG_IF(ifp, "end");
+ return;
+}
+
+/*********************************************************************
+ *
+ * Media Ioctl callback
+ *
+ * This routine is called when the user changes speed/duplex using
+ * media/mediopt option with ifconfig.
+ *
+ **********************************************************************/
+static int
+ixlv_media_change(struct ifnet * ifp)
+{
+ struct ixl_vsi *vsi = ifp->if_softc;
+ struct ifmedia *ifm = &vsi->media;
+
+ INIT_DBG_IF(ifp, "begin");
+
+ if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
+ return (EINVAL);
+
+ INIT_DBG_IF(ifp, "end");
+ return (0);
+}
+
+
+/*********************************************************************
+ * Multicast Initialization
+ *
+ * This routine is called by init to reset a fresh state.
+ *
+ **********************************************************************/
+
+static void
+ixlv_init_multi(struct ixl_vsi *vsi)
+{
+ struct ixlv_mac_filter *f;
+ struct ixlv_sc *sc = vsi->back;
+ int mcnt = 0;
+
+ IOCTL_DBG_IF(vsi->ifp, "begin");
+
+ /* First clear any multicast filters */
+ SLIST_FOREACH(f, sc->mac_filters, next) {
+ if ((f->flags & IXL_FILTER_USED)
+ && (f->flags & IXL_FILTER_MC)) {
+ f->flags |= IXL_FILTER_DEL;
+ mcnt++;
+ }
+ }
+ if (mcnt > 0)
+ sc->aq_required |= IXLV_FLAG_AQ_DEL_MAC_FILTER;
+
+ IOCTL_DBG_IF(vsi->ifp, "end");
+}
+
+static void
+ixlv_add_multi(struct ixl_vsi *vsi)
+{
+ struct ifmultiaddr *ifma;
+ struct ifnet *ifp = vsi->ifp;
+ struct ixlv_sc *sc = vsi->back;
+ int mcnt = 0;
+
+ IOCTL_DBG_IF(ifp, "begin");
+
+ if_maddr_rlock(ifp);
+ /*
+ ** Get a count, to decide if we
+ ** simply use multicast promiscuous.
+ */
+ TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
+ if (ifma->ifma_addr->sa_family != AF_LINK)
+ continue;
+ mcnt++;
+ }
+ if_maddr_runlock(ifp);
+
+ if (__predict_false(mcnt >= MAX_MULTICAST_ADDR)) {
+ /* delete all multicast filters */
+ ixlv_init_multi(vsi);
+ sc->promiscuous_flags |= I40E_FLAG_VF_MULTICAST_PROMISC;
+ sc->aq_required |= IXLV_FLAG_AQ_CONFIGURE_PROMISC;
+ IOCTL_DEBUGOUT("%s: end: too many filters", __func__);
+ return;
+ }
+
+ mcnt = 0;
+ if_maddr_rlock(ifp);
+ TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
+ if (ifma->ifma_addr->sa_family != AF_LINK)
+ continue;
+ if (!ixlv_add_mac_filter(sc,
+ (u8*)LLADDR((struct sockaddr_dl *) ifma->ifma_addr),
+ IXL_FILTER_MC))
+ mcnt++;
+ }
+ if_maddr_runlock(ifp);
+ /*
+ ** Notify AQ task that sw filters need to be
+ ** added to hw list
+ */
+ if (mcnt > 0)
+ sc->aq_required |= IXLV_FLAG_AQ_ADD_MAC_FILTER;
+
+ IOCTL_DBG_IF(ifp, "end");
+}
+
+static void
+ixlv_del_multi(struct ixl_vsi *vsi)
+{
+ struct ixlv_mac_filter *f;
+ struct ifmultiaddr *ifma;
+ struct ifnet *ifp = vsi->ifp;
+ struct ixlv_sc *sc = vsi->back;
+ int mcnt = 0;
+ bool match = FALSE;
+
+ IOCTL_DBG_IF(ifp, "begin");
+
+ /* Search for removed multicast addresses */
+ if_maddr_rlock(ifp);
+ SLIST_FOREACH(f, sc->mac_filters, next) {
+ if ((f->flags & IXL_FILTER_USED)
+ && (f->flags & IXL_FILTER_MC)) {
+ /* check if mac address in filter is in sc's list */
+ match = FALSE;
+ TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
+ if (ifma->ifma_addr->sa_family != AF_LINK)
+ continue;
+ u8 *mc_addr =
+ (u8 *)LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
+ if (cmp_etheraddr(f->macaddr, mc_addr)) {
+ match = TRUE;
+ break;
+ }
+ }
+ /* if this filter is not in the sc's list, remove it */
+ if (match == FALSE && !(f->flags & IXL_FILTER_DEL)) {
+ f->flags |= IXL_FILTER_DEL;
+ mcnt++;
+ IOCTL_DBG_IF(ifp, "marked: " MAC_FORMAT,
+ MAC_FORMAT_ARGS(f->macaddr));
+ }
+ else if (match == FALSE)
+ IOCTL_DBG_IF(ifp, "exists: " MAC_FORMAT,
+ MAC_FORMAT_ARGS(f->macaddr));
+ }
+ }
+ if_maddr_runlock(ifp);
+
+ if (mcnt > 0)
+ sc->aq_required |= IXLV_FLAG_AQ_DEL_MAC_FILTER;
+
+ IOCTL_DBG_IF(ifp, "end");
+}
+
+/*********************************************************************
+ * Timer routine
+ *
+ * This routine checks for link status,updates statistics,
+ * and runs the watchdog check.
+ *
+ **********************************************************************/
+
+static void
+ixlv_local_timer(void *arg)
+{
+ struct ixlv_sc *sc = arg;
+ struct i40e_hw *hw = &sc->hw;
+ struct ixl_vsi *vsi = &sc->vsi;
+ struct ixl_queue *que = vsi->queues;
+ device_t dev = sc->dev;
+ int hung = 0;
+ u32 mask, val, oldval;
+
+ mtx_assert(&sc->mtx, MA_OWNED);
+
+ /* If Reset is in progress just bail */
+ if (sc->init_state == IXLV_RESET_PENDING)
+ return;
+
+ /* Check for when PF triggers a VF reset */
+ val = rd32(hw, I40E_VFGEN_RSTAT) &
+ I40E_VFGEN_RSTAT_VFR_STATE_MASK;
+
+ if (val != I40E_VFR_VFACTIVE
+ && val != I40E_VFR_COMPLETED) {
+#ifdef IXL_DEBUG
+ device_printf(dev, "%s: reset in progress! (%d)\n",
+ __func__, val);
+#endif
+ return;
+ }
+
+ /* check for Admin queue errors */
+ val = rd32(hw, hw->aq.arq.len);
+ oldval = val;
+ if (val & I40E_VF_ARQLEN_ARQVFE_MASK) {
+ device_printf(dev, "ARQ VF Error detected\n");
+ val &= ~I40E_VF_ARQLEN_ARQVFE_MASK;
+ }
+ if (val & I40E_VF_ARQLEN_ARQOVFL_MASK) {
+ device_printf(dev, "ARQ Overflow Error detected\n");
+ val &= ~I40E_VF_ARQLEN_ARQOVFL_MASK;
+ }
+ if (val & I40E_VF_ARQLEN_ARQCRIT_MASK) {
+ device_printf(dev, "ARQ Critical Error detected\n");
+ val &= ~I40E_VF_ARQLEN_ARQCRIT_MASK;
+ }
+ if (oldval != val)
+ wr32(hw, hw->aq.arq.len, val);
+
+ val = rd32(hw, hw->aq.asq.len);
+ oldval = val;
+ if (val & I40E_VF_ATQLEN_ATQVFE_MASK) {
+ device_printf(dev, "ASQ VF Error detected\n");
+ val &= ~I40E_VF_ATQLEN_ATQVFE_MASK;
+ }
+ if (val & I40E_VF_ATQLEN_ATQOVFL_MASK) {
+ device_printf(dev, "ASQ Overflow Error detected\n");
+ val &= ~I40E_VF_ATQLEN_ATQOVFL_MASK;
+ }
+ if (val & I40E_VF_ATQLEN_ATQCRIT_MASK) {
+ device_printf(dev, "ASQ Critical Error detected\n");
+ val &= ~I40E_VF_ATQLEN_ATQCRIT_MASK;
+ }
+ if (oldval != val)
+ wr32(hw, hw->aq.asq.len, val);
+
+ /* clean and process any events */
+ taskqueue_enqueue(sc->tq, &sc->aq_irq);
+
+ /*
+ ** Check status on the queues for a hang
+ */
+ mask = (I40E_VFINT_DYN_CTLN_INTENA_MASK |
+ I40E_VFINT_DYN_CTLN_SWINT_TRIG_MASK);
+
+ for (int i = 0; i < vsi->num_queues; i++,que++) {
+ /* Any queues with outstanding work get a sw irq */
+ if (que->busy)
+ wr32(hw, I40E_VFINT_DYN_CTLN1(que->me), mask);
+ /*
+ ** Each time txeof runs without cleaning, but there
+ ** are uncleaned descriptors it increments busy. If
+ ** we get to 5 we declare it hung.
+ */
+ if (que->busy == IXL_QUEUE_HUNG) {
+ ++hung;
+ /* Mark the queue as inactive */
+ vsi->active_queues &= ~((u64)1 << que->me);
+ continue;
+ } else {
+ /* Check if we've come back from hung */
+ if ((vsi->active_queues & ((u64)1 << que->me)) == 0)
+ vsi->active_queues |= ((u64)1 << que->me);
+ }
+ if (que->busy >= IXL_MAX_TX_BUSY) {
+ device_printf(dev,"Warning queue %d "
+ "appears to be hung!\n", i);
+ que->busy = IXL_QUEUE_HUNG;
+ ++hung;
+ }
+ }
+ /* Only reset when all queues show hung */
+ if (hung == vsi->num_queues)
+ goto hung;
+ callout_reset(&sc->timer, hz, ixlv_local_timer, sc);
+ return;
+
+hung:
+ device_printf(dev, "Local Timer: TX HANG DETECTED - Resetting!!\n");
+ sc->init_state = IXLV_RESET_REQUIRED;
+ ixlv_init_locked(sc);
+}
+
+/*
+** Note: this routine updates the OS on the link state
+** the real check of the hardware only happens with
+** a link interrupt.
+*/
+static void
+ixlv_update_link_status(struct ixlv_sc *sc)
+{
+ struct ixl_vsi *vsi = &sc->vsi;
+ struct ifnet *ifp = vsi->ifp;
+ device_t dev = sc->dev;
+
+ if (vsi->link_up){
+ if (vsi->link_active == FALSE) {
+ if (bootverbose)
+ device_printf(dev,"Link is Up, %d Gbps\n",
+ (vsi->link_speed == I40E_LINK_SPEED_40GB) ? 40:10);
+ vsi->link_active = TRUE;
+ if_link_state_change(ifp, LINK_STATE_UP);
+ }
+ } else { /* Link down */
+ if (vsi->link_active == TRUE) {
+ if (bootverbose)
+ device_printf(dev,"Link is Down\n");
+ if_link_state_change(ifp, LINK_STATE_DOWN);
+ vsi->link_active = FALSE;
+ }
+ }
+
+ return;
+}
+
+/*********************************************************************
+ *
+ * This routine disables all traffic on the adapter by issuing a
+ * global reset on the MAC and deallocates TX/RX buffers.
+ *
+ **********************************************************************/
+
+static void
+ixlv_stop(struct ixlv_sc *sc)
+{
+ mtx_assert(&sc->sc_mtx, MA_OWNED);
+
+ INIT_DBG_IF(&sc->vsi->ifp, "begin");
+
+ sc->aq_required |= IXLV_FLAG_AQ_DISABLE_QUEUES;
+ callout_reset(&sc->aq_task, IXLV_CALLOUT_TIMO,
+ ixlv_sched_aq, sc);
+
+ /* Stop the local timer */
+ callout_stop(&sc->timer);
+
+ INIT_DBG_IF(&sc->vsi->ifp, "end");
+}
+
+
+/*********************************************************************
+ *
+ * Free all station queue structs.
+ *
+ **********************************************************************/
+static void
+ixlv_free_queues(struct ixl_vsi *vsi)
+{
+ struct ixlv_sc *sc = (struct ixlv_sc *)vsi->back;
+ struct ixl_queue *que = vsi->queues;
+
+ for (int i = 0; i < vsi->num_queues; i++, que++) {
+ struct tx_ring *txr = &que->txr;
+ struct rx_ring *rxr = &que->rxr;
+
+ if (!mtx_initialized(&txr->mtx)) /* uninitialized */
+ continue;
+ IXL_TX_LOCK(txr);
+ ixl_free_que_tx(que);
+ if (txr->base)
+ i40e_free_dma(&sc->hw, &txr->dma);
+ IXL_TX_UNLOCK(txr);
+ IXL_TX_LOCK_DESTROY(txr);
+
+ if (!mtx_initialized(&rxr->mtx)) /* uninitialized */
+ continue;
+ IXL_RX_LOCK(rxr);
+ ixl_free_que_rx(que);
+ if (rxr->base)
+ i40e_free_dma(&sc->hw, &rxr->dma);
+ IXL_RX_UNLOCK(rxr);
+ IXL_RX_LOCK_DESTROY(rxr);
+
+ }
+ free(vsi->queues, M_DEVBUF);
+}
+
+
+/*
+** ixlv_config_rss - setup RSS
+*/
+static void
+ixlv_config_rss(struct ixlv_sc *sc)
+{
+ struct i40e_hw *hw = &sc->hw;
+ struct ixl_vsi *vsi = &sc->vsi;
+ u32 lut = 0;
+ u64 set_hena, hena;
+ int i, j;
+
+ /* set up random bits */
+ static const u32 seed[I40E_VFQF_HKEY_MAX_INDEX + 1] = {
+ 0x794221b4, 0xbca0c5ab, 0x6cd5ebd9, 0x1ada6127,
+ 0x983b3aa1, 0x1c4e71eb, 0x7f6328b2, 0xfcdc0da0,
+ 0xc135cafa, 0x7a6f7e2d, 0xe7102d28, 0x163cd12e,
+ 0x4954b126 };
+
+ /* Fill out hash function seed */
+ for (i = 0; i <= I40E_VFQF_HKEY_MAX_INDEX; i++)
+ wr32(hw, I40E_VFQF_HKEY(i), seed[i]);
+
+ /* Enable PCTYPES for RSS: */
+ set_hena =
+ ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
+ ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP) |
+ ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) |
+ ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) |
+ ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4) |
+ ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
+ ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP) |
+ ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) |
+ ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) |
+ ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6) |
+ ((u64)1 << I40E_FILTER_PCTYPE_L2_PAYLOAD);
+
+ hena = (u64)rd32(hw, I40E_VFQF_HENA(0)) |
+ ((u64)rd32(hw, I40E_VFQF_HENA(1)) << 32);
+ hena |= set_hena;
+ wr32(hw, I40E_VFQF_HENA(0), (u32)hena);
+ wr32(hw, I40E_VFQF_HENA(1), (u32)(hena >> 32));
+
+ /* Populate the LUT with max no. of queues in round robin fashion */
+ for (i = j = 0; i < hw->func_caps.rss_table_size; i++, j++) {
+ if (j == vsi->num_queues)
+ j = 0;
+ /* lut = 4-byte sliding window of 4 lut entries */
+ lut = (lut << 8) | (j &
+ ((0x1 << hw->func_caps.rss_table_entry_width) - 1));
+ /* On i = 3, we have 4 entries in lut; write to the register */
+ if ((i & 3) == 3)
+ wr32(hw, I40E_VFQF_HLUT(i >> 2), lut);
+ }
+ ixl_flush(hw);
+}
+
+
+/*
+** This routine refreshes vlan filters, called by init
+** it scans the filter table and then updates the AQ
+*/
+static void
+ixlv_setup_vlan_filters(struct ixlv_sc *sc)
+{
+ struct ixl_vsi *vsi = &sc->vsi;
+ struct ixlv_vlan_filter *f;
+ int cnt = 0;
+
+ if (vsi->num_vlans == 0)
+ return;
+ /*
+ ** Scan the filter table for vlan entries,
+ ** and if found call for the AQ update.
+ */
+ SLIST_FOREACH(f, sc->vlan_filters, next)
+ if (f->flags & IXL_FILTER_ADD)
+ cnt++;
+ if (cnt == 0)
+ return;
+
+ sc->aq_required |= IXLV_FLAG_AQ_ADD_VLAN_FILTER;
+ return;
+}
+
+
+/*
+** This routine adds new MAC filters to the sc's list;
+** these are later added in hardware by the periodic
+** aq task.
+*/
+static int
+ixlv_add_mac_filter(struct ixlv_sc *sc, u8 *macaddr, u16 flags)
+{
+ struct ixlv_mac_filter *f;
+ device_t dev = sc->dev;
+
+ /* Does one already exist? */
+ f = ixlv_find_mac_filter(sc, macaddr);
+ if (f != NULL) {
+ IDPRINTF(sc->vsi.ifp, "exists: " MAC_FORMAT,
+ MAC_FORMAT_ARGS(macaddr));
+ return (EEXIST);
+ }
+
+ /* If not, get a new empty filter */
+ f = ixlv_get_mac_filter(sc);
+ if (f == NULL) {
+ device_printf(dev, "%s: no filters available!!\n",
+ __func__);
+ return (ENOMEM);
+ }
+
+ IDPRINTF(sc->vsi.ifp, "marked: " MAC_FORMAT,
+ MAC_FORMAT_ARGS(macaddr));
+
+ bcopy(macaddr, f->macaddr, ETHER_ADDR_LEN);
+ f->flags |= (IXL_FILTER_ADD | IXL_FILTER_USED);
+ f->flags |= flags;
+ return (0);
+}
+
+/*
+** Tasklet handler for MSIX Adminq interrupts
+** - done outside interrupt context since it might sleep
+*/
+static void
+ixlv_do_adminq(void *context, int pending)
+{
+ struct ixlv_sc *sc = context;
+ struct i40e_hw *hw = &sc->hw;
+ struct i40e_arq_event_info event;
+ struct i40e_virtchnl_msg *v_msg;
+ i40e_status ret;
+ u16 result = 0;
+
+
+ event.buf_len = IXL_AQ_BUF_SZ;
+ event.msg_buf = malloc(event.buf_len,
+ M_DEVBUF, M_NOWAIT | M_ZERO);
+ if (!event.msg_buf) {
+ printf("Unable to allocate adminq memory\n");
+ return;
+ }
+ v_msg = (struct i40e_virtchnl_msg *)&event.desc;
+
+ mtx_lock(&sc->mtx);
+ /* clean and process any events */
+ do {
+ ret = i40e_clean_arq_element(hw, &event, &result);
+ if (ret)
+ break;
+ ixlv_vc_completion(sc, v_msg->v_opcode,
+ v_msg->v_retval, event.msg_buf, event.msg_len);
+ if (result != 0)
+ bzero(event.msg_buf, IXL_AQ_BUF_SZ);
+ } while (result);
+
+ ixlv_enable_adminq_irq(hw);
+ free(event.msg_buf, M_DEVBUF);
+ mtx_unlock(&sc->mtx);
+ return;
+}
+
+/*
+** ixlv_sched_aq - Periodic scheduling tasklet
+**
+*/
+static void
+ixlv_sched_aq(void *context)
+{
+ struct ixlv_sc *sc = context;
+ struct ixl_vsi *vsi = &sc->vsi;
+
+ /* This is driven by a callout, don't spin */
+ if (!mtx_trylock(&sc->mtx))
+ goto done_nolock;
+
+ if (sc->init_state == IXLV_RESET_PENDING)
+ goto done;
+
+ /* Process requested admin queue tasks */
+ if (sc->aq_pending)
+ goto done;
+
+ if (sc->aq_required & IXLV_FLAG_AQ_MAP_VECTORS) {
+ ixlv_map_queues(sc);
+ goto done;
+ }
+
+ if (sc->aq_required & IXLV_FLAG_AQ_ADD_MAC_FILTER) {
+ ixlv_add_ether_filters(sc);
+ goto done;
+ }
+
+ if (sc->aq_required & IXLV_FLAG_AQ_ADD_VLAN_FILTER) {
+ ixlv_add_vlans(sc);
+ goto done;
+ }
+
+ if (sc->aq_required & IXLV_FLAG_AQ_DEL_MAC_FILTER) {
+ ixlv_del_ether_filters(sc);
+ goto done;
+ }
+
+ if (sc->aq_required & IXLV_FLAG_AQ_DEL_VLAN_FILTER) {
+ ixlv_del_vlans(sc);
+ goto done;
+ }
+
+ if (sc->aq_required & IXLV_FLAG_AQ_CONFIGURE_QUEUES) {
+ ixlv_configure_queues(sc);
+ goto done;
+ }
+
+ if (sc->aq_required & IXLV_FLAG_AQ_DISABLE_QUEUES) {
+ ixlv_disable_queues(sc);
+ goto done;
+ }
+
+ if (sc->aq_required & IXLV_FLAG_AQ_ENABLE_QUEUES) {
+ ixlv_enable_queues(sc);
+ goto done;
+ }
+
+ /* Do stats request only if no other AQ operations requested */
+ if (vsi->ifp->if_drv_flags & IFF_DRV_RUNNING)
+ ixlv_request_stats(sc);
+
+done:
+ mtx_unlock(&sc->mtx);
+done_nolock:
+ if (sc->aq_required) /* Reschedule */
+ callout_reset(&sc->aq_task, IXLV_CALLOUT_TIMO,
+ ixlv_sched_aq, sc);
+ else
+ callout_reset(&sc->aq_task, 2 * hz, ixlv_sched_aq, sc);
+}
+
+static void
+ixlv_add_stats_sysctls(struct ixlv_sc *sc)
+{
+ device_t dev = sc->dev;
+ struct ixl_vsi *vsi = &sc->vsi;
+ struct i40e_eth_stats *es = &vsi->eth_stats;
+
+ struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
+ struct sysctl_oid *tree = device_get_sysctl_tree(dev);
+ struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
+
+ struct sysctl_oid *vsi_node, *queue_node;
+ struct sysctl_oid_list *vsi_list, *queue_list;
+
+#define QUEUE_NAME_LEN 32
+ char queue_namebuf[QUEUE_NAME_LEN];
+
+ struct ixl_queue *queues = vsi->queues;
+ struct tx_ring *txr;
+ struct rx_ring *rxr;
+
+ /* Driver statistics */
+ SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events",
+ CTLFLAG_RD, &sc->watchdog_events,
+ "Watchdog timeouts");
+ SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "admin_irq",
+ CTLFLAG_RD, &sc->admin_irq,
+ "Admin Queue IRQ Handled");
+
+ /* VSI statistics */
+ vsi_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "vsi",
+ CTLFLAG_RD, NULL, "VSI-specific statistics");
+ vsi_list = SYSCTL_CHILDREN(vsi_node);
+
+ struct ixl_sysctl_info ctls[] =
+ {
+ {&es->rx_bytes, "good_octets_rcvd", "Good Octets Received"},
+ {&es->rx_unicast, "ucast_pkts_rcvd",
+ "Unicast Packets Received"},
+ {&es->rx_multicast, "mcast_pkts_rcvd",
+ "Multicast Packets Received"},
+ {&es->rx_broadcast, "bcast_pkts_rcvd",
+ "Broadcast Packets Received"},
+ {&es->rx_discards, "rx_discards", "Discarded RX packets"},
+ {&es->tx_bytes, "good_octets_txd", "Good Octets Transmitted"},
+ {&es->tx_unicast, "ucast_pkts_txd", "Unicast Packets Transmitted"},
+ {&es->tx_multicast, "mcast_pkts_txd",
+ "Multicast Packets Transmitted"},
+ {&es->tx_broadcast, "bcast_pkts_txd",
+ "Broadcast Packets Transmitted"},
+ {&es->tx_discards, "tx_discards", "Discarded TX packets"},
+ // end
+ {0,0,0}
+ };
+ struct ixl_sysctl_info *entry = ctls;
+ while (entry->stat != 0)
+ {
+ SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, entry->name,
+ CTLFLAG_RD, entry->stat,
+ entry->description);
+ entry++;
+ }
+
+ /* Queue statistics */
+ for (int q = 0; q < vsi->num_queues; q++) {
+ snprintf(queue_namebuf, QUEUE_NAME_LEN, "que%d", q);
+ queue_node = SYSCTL_ADD_NODE(ctx, vsi_list, OID_AUTO, queue_namebuf,
+ CTLFLAG_RD, NULL, "Queue Name");
+ queue_list = SYSCTL_CHILDREN(queue_node);
+
+ txr = &(queues[q].txr);
+ rxr = &(queues[q].rxr);
+
+ SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "mbuf_defrag_failed",
+ CTLFLAG_RD, &(queues[q].mbuf_defrag_failed),
+ "m_defrag() failed");
+ SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "dropped",
+ CTLFLAG_RD, &(queues[q].dropped_pkts),
+ "Driver dropped packets");
+ SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs",
+ CTLFLAG_RD, &(queues[q].irqs),
+ "irqs on this queue");
+ SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tso_tx",
+ CTLFLAG_RD, &(queues[q].tso),
+ "TSO");
+ SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_dma_setup",
+ CTLFLAG_RD, &(queues[q].tx_dma_setup),
+ "Driver tx dma failure in xmit");
+ SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "no_desc_avail",
+ CTLFLAG_RD, &(txr->no_desc),
+ "Queue No Descriptor Available");
+ SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets",
+ CTLFLAG_RD, &(txr->total_packets),
+ "Queue Packets Transmitted");
+ SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_bytes",
+ CTLFLAG_RD, &(txr->tx_bytes),
+ "Queue Bytes Transmitted");
+ SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_packets",
+ CTLFLAG_RD, &(rxr->rx_packets),
+ "Queue Packets Received");
+ SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes",
+ CTLFLAG_RD, &(rxr->rx_bytes),
+ "Queue Bytes Received");
+ }
+}
+
+static void
+ixlv_init_filters(struct ixlv_sc *sc)
+{
+ sc->mac_filters = malloc(sizeof(struct ixlv_mac_filter),
+ M_DEVBUF, M_NOWAIT | M_ZERO);
+ SLIST_INIT(sc->mac_filters);
+ sc->vlan_filters = malloc(sizeof(struct ixlv_vlan_filter),
+ M_DEVBUF, M_NOWAIT | M_ZERO);
+ SLIST_INIT(sc->vlan_filters);
+ return;
+}
+
+static void
+ixlv_free_filters(struct ixlv_sc *sc)
+{
+ struct ixlv_mac_filter *f;
+ struct ixlv_vlan_filter *v;
+
+ while (!SLIST_EMPTY(sc->mac_filters)) {
+ f = SLIST_FIRST(sc->mac_filters);
+ SLIST_REMOVE_HEAD(sc->mac_filters, next);
+ free(f, M_DEVBUF);
+ }
+ while (!SLIST_EMPTY(sc->vlan_filters)) {
+ v = SLIST_FIRST(sc->vlan_filters);
+ SLIST_REMOVE_HEAD(sc->vlan_filters, next);
+ free(v, M_DEVBUF);
+ }
+ return;
+}
+
diff --git a/sys/dev/ixl/ixl.h b/sys/dev/ixl/ixl.h
new file mode 100644
index 0000000..25e6d27
--- /dev/null
+++ b/sys/dev/ixl/ixl.h
@@ -0,0 +1,559 @@
+/******************************************************************************
+
+ Copyright (c) 2013-2014, Intel Corporation
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ POSSIBILITY OF SUCH DAMAGE.
+
+******************************************************************************/
+/*$FreeBSD$*/
+
+
+#ifndef _IXL_H_
+#define _IXL_H_
+
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/buf_ring.h>
+#include <sys/mbuf.h>
+#include <sys/protosw.h>
+#include <sys/socket.h>
+#include <sys/malloc.h>
+#include <sys/kernel.h>
+#include <sys/module.h>
+#include <sys/sockio.h>
+
+#include <net/if.h>
+#include <net/if_arp.h>
+#include <net/bpf.h>
+#include <net/ethernet.h>
+#include <net/if_dl.h>
+#include <net/if_media.h>
+
+#include <net/bpf.h>
+#include <net/if_types.h>
+#include <net/if_vlan_var.h>
+
+#include <netinet/in_systm.h>
+#include <netinet/in.h>
+#include <netinet/if_ether.h>
+#include <netinet/ip.h>
+#include <netinet/ip6.h>
+#include <netinet/tcp.h>
+#include <netinet/tcp_lro.h>
+#include <netinet/udp.h>
+#include <netinet/sctp.h>
+
+#include <machine/in_cksum.h>
+
+#include <sys/bus.h>
+#include <machine/bus.h>
+#include <sys/rman.h>
+#include <machine/resource.h>
+#include <vm/vm.h>
+#include <vm/pmap.h>
+#include <machine/clock.h>
+#include <dev/pci/pcivar.h>
+#include <dev/pci/pcireg.h>
+#include <sys/proc.h>
+#include <sys/sysctl.h>
+#include <sys/endian.h>
+#include <sys/taskqueue.h>
+#include <sys/pcpu.h>
+#include <sys/smp.h>
+#include <machine/smp.h>
+
+#include "i40e_type.h"
+#include "i40e_prototype.h"
+
+#ifdef IXL_DEBUG
+#include <sys/sbuf.h>
+
+#define MAC_FORMAT "%02x:%02x:%02x:%02x:%02x:%02x"
+#define MAC_FORMAT_ARGS(mac_addr) \
+ (mac_addr)[0], (mac_addr)[1], (mac_addr)[2], (mac_addr)[3], \
+ (mac_addr)[4], (mac_addr)[5]
+#define ON_OFF_STR(is_set) ((is_set) ? "On" : "Off")
+
+
+#define _DBG_PRINTF(S, ...) printf("%s: " S "\n", __func__, ##__VA_ARGS__)
+#define _DEV_DBG_PRINTF(dev, S, ...) device_printf(dev, "%s: " S "\n", __func__, ##__VA_ARGS__)
+#define _IF_DBG_PRINTF(ifp, S, ...) if_printf(ifp, "%s: " S "\n", __func__, ##__VA_ARGS__)
+
+/* Defines for printing generic debug information */
+#define DPRINTF(...) _DBG_PRINTF(__VA_ARGS__)
+#define DDPRINTF(...) _DEV_DBG_PRINTF(__VA_ARGS__)
+#define IDPRINTF(...) _IF_DBG_PRINTF(__VA_ARGS__)
+
+/* Defines for printing specific debug information */
+#define DEBUG_INIT 1
+#define DEBUG_IOCTL 1
+#define DEBUG_HW 1
+
+#define INIT_DEBUGOUT(...) if (DEBUG_INIT) _DBG_PRINTF(__VA_ARGS__)
+#define INIT_DBG_DEV(...) if (DEBUG_INIT) _DEV_DBG_PRINTF(__VA_ARGS__)
+#define INIT_DBG_IF(...) if (DEBUG_INIT) _IF_DBG_PRINTF(__VA_ARGS__)
+
+#define IOCTL_DEBUGOUT(...) if (DEBUG_IOCTL) _DBG_PRINTF(__VA_ARGS__)
+#define IOCTL_DBG_IF2(ifp, S, ...) if (DEBUG_IOCTL) \
+ if_printf(ifp, S "\n", ##__VA_ARGS__)
+#define IOCTL_DBG_IF(...) if (DEBUG_IOCTL) _IF_DBG_PRINTF(__VA_ARGS__)
+
+#define HW_DEBUGOUT(...) if (DEBUG_HW) _DBG_PRINTF(__VA_ARGS__)
+
+#else
+#define DEBUG_INIT 0
+#define DEBUG_IOCTL 0
+#define DEBUG_HW 0
+
+#define DPRINTF(...)
+#define DDPRINTF(...)
+#define IDPRINTF(...)
+
+#define INIT_DEBUGOUT(...)
+#define INIT_DBG_DEV(...)
+#define INIT_DBG_IF(...)
+#define IOCTL_DEBUGOUT(...)
+#define IOCTL_DBG_IF2(...)
+#define IOCTL_DBG_IF(...)
+#define HW_DEBUGOUT(...)
+#endif
+
+/* Tunables */
+
+/*
+ * Ring Descriptors Valid Range: 32-4096 Default Value: 1024 This value is the
+ * number of tx/rx descriptors allocated by the driver. Increasing this
+ * value allows the driver to queue more operations. Each descriptor is 16
+ * or 32 bytes (configurable in FVL)
+ */
+#define DEFAULT_RING 1024
+#define PERFORM_RING 2048
+#define MAX_RING 4096
+#define MIN_RING 32
+
+/*
+** Default number of entries in Tx queue buf_ring.
+*/
+#define DEFAULT_TXBRSZ (4096 * 4096)
+
+/* Alignment for rings */
+#define DBA_ALIGN 128
+
+/*
+ * This parameter controls the maximum no of times the driver will loop in
+ * the isr. Minimum Value = 1
+ */
+#define MAX_LOOP 10
+
+/*
+ * This is the max watchdog interval, ie. the time that can
+ * pass between any two TX clean operations, such only happening
+ * when the TX hardware is functioning.
+ */
+#define IXL_WATCHDOG (10 * hz)
+
+/*
+ * This parameters control when the driver calls the routine to reclaim
+ * transmit descriptors.
+ */
+#define IXL_TX_CLEANUP_THRESHOLD (que->num_desc / 8)
+#define IXL_TX_OP_THRESHOLD (que->num_desc / 32)
+
+/* Flow control constants */
+#define IXL_FC_PAUSE 0xFFFF
+#define IXL_FC_HI 0x20000
+#define IXL_FC_LO 0x10000
+
+#define MAX_MULTICAST_ADDR 128
+
+#define IXL_BAR 3
+#define IXL_ADM_LIMIT 2
+#define IXL_TSO_SIZE 65535
+#define IXL_TX_BUF_SZ ((u32) 1514)
+#define IXL_AQ_BUF_SZ ((u32) 4096)
+#define IXL_RX_HDR 128
+#define IXL_AQ_LEN 256
+#define IXL_AQ_BUFSZ 4096
+#define IXL_RX_LIMIT 512
+#define IXL_RX_ITR 0
+#define IXL_TX_ITR 1
+#define IXL_ITR_NONE 3
+#define IXL_QUEUE_EOL 0x7FF
+#define IXL_MAX_FRAME 0x2600
+#define IXL_MAX_TX_SEGS 8
+#define IXL_MAX_TSO_SEGS 66
+#define IXL_SPARSE_CHAIN 6
+#define IXL_QUEUE_HUNG 0x80000000
+
+/* ERJ: hardware can support ~1.5k filters between all functions */
+#define IXL_MAX_FILTERS 256
+#define IXL_MAX_TX_BUSY 10
+
+#define IXL_NVM_VERSION_LO_SHIFT 0
+#define IXL_NVM_VERSION_LO_MASK (0xff << IXL_NVM_VERSION_LO_SHIFT)
+#define IXL_NVM_VERSION_HI_SHIFT 12
+#define IXL_NVM_VERSION_HI_MASK (0xf << IXL_NVM_VERSION_HI_SHIFT)
+
+
+/*
+ * Interrupt Moderation parameters
+ */
+#define IXL_MAX_ITR 0x07FF
+#define IXL_ITR_100K 0x0005
+#define IXL_ITR_20K 0x0019
+#define IXL_ITR_8K 0x003E
+#define IXL_ITR_4K 0x007A
+#define IXL_ITR_DYNAMIC 0x8000
+#define IXL_LOW_LATENCY 0
+#define IXL_AVE_LATENCY 1
+#define IXL_BULK_LATENCY 2
+
+/* MacVlan Flags */
+#define IXL_FILTER_USED (u16)(1 << 0)
+#define IXL_FILTER_VLAN (u16)(1 << 1)
+#define IXL_FILTER_ADD (u16)(1 << 2)
+#define IXL_FILTER_DEL (u16)(1 << 3)
+#define IXL_FILTER_MC (u16)(1 << 4)
+
+/* used in the vlan field of the filter when not a vlan */
+#define IXL_VLAN_ANY -1
+
+#define CSUM_OFFLOAD_IPV4 (CSUM_IP|CSUM_TCP|CSUM_UDP|CSUM_SCTP)
+#define CSUM_OFFLOAD_IPV6 (CSUM_TCP_IPV6|CSUM_UDP_IPV6|CSUM_SCTP_IPV6)
+#define CSUM_OFFLOAD (CSUM_OFFLOAD_IPV4|CSUM_OFFLOAD_IPV6|CSUM_TSO)
+
+/* Misc flags for ixl_vsi.flags */
+#define IXL_FLAGS_KEEP_TSO4 (1 << 0)
+#define IXL_FLAGS_KEEP_TSO6 (1 << 1)
+
+#define IXL_TX_LOCK(_sc) mtx_lock(&(_sc)->mtx)
+#define IXL_TX_UNLOCK(_sc) mtx_unlock(&(_sc)->mtx)
+#define IXL_TX_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->mtx)
+#define IXL_TX_TRYLOCK(_sc) mtx_trylock(&(_sc)->mtx)
+#define IXL_TX_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->mtx, MA_OWNED)
+
+#define IXL_RX_LOCK(_sc) mtx_lock(&(_sc)->mtx)
+#define IXL_RX_UNLOCK(_sc) mtx_unlock(&(_sc)->mtx)
+#define IXL_RX_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->mtx)
+
+/*
+ *****************************************************************************
+ * vendor_info_array
+ *
+ * This array contains the list of Subvendor/Subdevice IDs on which the driver
+ * should load.
+ *
+ *****************************************************************************
+ */
+typedef struct _ixl_vendor_info_t {
+ unsigned int vendor_id;
+ unsigned int device_id;
+ unsigned int subvendor_id;
+ unsigned int subdevice_id;
+ unsigned int index;
+} ixl_vendor_info_t;
+
+
+struct ixl_tx_buf {
+ u32 eop_index;
+ struct mbuf *m_head;
+ bus_dmamap_t map;
+ bus_dma_tag_t tag;
+};
+
+struct ixl_rx_buf {
+ struct mbuf *m_head;
+ struct mbuf *m_pack;
+ struct mbuf *fmp;
+ bus_dmamap_t hmap;
+ bus_dmamap_t pmap;
+#ifdef DEV_NETMAP
+ u64 addr;
+#endif
+};
+
+/*
+** This struct has multiple uses, multicast
+** addresses, vlans, and mac filters all use it.
+*/
+struct ixl_mac_filter {
+ SLIST_ENTRY(ixl_mac_filter) next;
+ u8 macaddr[ETHER_ADDR_LEN];
+ s16 vlan;
+ u16 flags;
+};
+
+
+/*
+ * The Transmit ring control struct
+ */
+struct tx_ring {
+ struct ixl_queue *que;
+ struct mtx mtx;
+ u32 tail;
+ struct i40e_tx_desc *base;
+ struct i40e_dma_mem dma;
+ u16 next_avail;
+ u16 next_to_clean;
+ u16 atr_rate;
+ u16 atr_count;
+ u16 itr;
+ u16 latency;
+ struct ixl_tx_buf *buffers;
+ volatile u16 avail;
+ u32 cmd;
+ bus_dma_tag_t tx_tag;
+ bus_dma_tag_t tso_tag;
+ char mtx_name[16];
+ struct buf_ring *br;
+
+ /* Used for Dynamic ITR calculation */
+ u32 packets;
+ u32 bytes;
+
+ /* Soft Stats */
+ u64 tx_bytes;
+ u64 no_desc;
+ u64 total_packets;
+};
+
+
+/*
+ * The Receive ring control struct
+ */
+struct rx_ring {
+ struct ixl_queue *que;
+ struct mtx mtx;
+ union i40e_rx_desc *base;
+ struct i40e_dma_mem dma;
+ struct lro_ctrl lro;
+ bool lro_enabled;
+ bool hdr_split;
+ bool discard;
+ u16 next_refresh;
+ u16 next_check;
+ u16 itr;
+ u16 latency;
+ char mtx_name[16];
+ struct ixl_rx_buf *buffers;
+ u32 mbuf_sz;
+ u32 tail;
+ bus_dma_tag_t htag;
+ bus_dma_tag_t ptag;
+
+ /* Used for Dynamic ITR calculation */
+ u32 packets;
+ u32 bytes;
+
+ /* Soft stats */
+ u64 split;
+ u64 rx_packets;
+ u64 rx_bytes;
+ u64 discarded;
+ u64 not_done;
+};
+
+/*
+** Driver queue struct: this is the interrupt container
+** for the associated tx and rx ring pair.
+*/
+struct ixl_queue {
+ struct ixl_vsi *vsi;
+ u32 me;
+ u32 msix; /* This queue's MSIX vector */
+ u32 eims; /* This queue's EIMS bit */
+ struct resource *res;
+ void *tag;
+ int num_desc; /* both tx and rx */
+ int busy;
+ struct tx_ring txr;
+ struct rx_ring rxr;
+ struct task task;
+ struct task tx_task;
+ struct taskqueue *tq;
+
+ /* Queue stats */
+ u64 irqs;
+ u64 tso;
+ u64 mbuf_defrag_failed;
+ u64 mbuf_hdr_failed;
+ u64 mbuf_pkt_failed;
+ u64 tx_map_avail;
+ u64 tx_dma_setup;
+ u64 dropped_pkts;
+};
+
+/*
+** Virtual Station interface:
+** there would be one of these per traffic class/type
+** for now just one, and its embedded in the pf
+*/
+SLIST_HEAD(ixl_ftl_head, ixl_mac_filter);
+struct ixl_vsi {
+ void *back;
+ struct ifnet *ifp;
+ struct device *dev;
+ struct i40e_hw *hw;
+ struct ifmedia media;
+ u64 que_mask;
+ int id;
+ u16 msix_base; /* station base MSIX vector */
+ u16 num_queues;
+ u16 rx_itr_setting;
+ u16 tx_itr_setting;
+ struct ixl_queue *queues; /* head of queues */
+ bool link_active;
+ u16 seid;
+ u16 max_frame_size;
+ u32 link_speed;
+ bool link_up;
+ u32 fc; /* local flow ctrl setting */
+
+ /* MAC/VLAN Filter list */
+ struct ixl_ftl_head ftl;
+
+ struct i40e_aqc_vsi_properties_data info;
+
+ eventhandler_tag vlan_attach;
+ eventhandler_tag vlan_detach;
+ u16 num_vlans;
+
+ /* Per-VSI stats from hardware */
+ struct i40e_eth_stats eth_stats;
+ struct i40e_eth_stats eth_stats_offsets;
+ bool stat_offsets_loaded;
+
+ /* Driver statistics */
+ u64 hw_filters_del;
+ u64 hw_filters_add;
+
+ /* Misc. */
+ u64 active_queues;
+ u64 flags;
+};
+
+/*
+** Find the number of unrefreshed RX descriptors
+*/
+static inline u16
+ixl_rx_unrefreshed(struct ixl_queue *que)
+{
+ struct rx_ring *rxr = &que->rxr;
+
+ if (rxr->next_check > rxr->next_refresh)
+ return (rxr->next_check - rxr->next_refresh - 1);
+ else
+ return ((que->num_desc + rxr->next_check) -
+ rxr->next_refresh - 1);
+}
+
+/*
+** Find the next available unused filter
+*/
+static inline struct ixl_mac_filter *
+ixl_get_filter(struct ixl_vsi *vsi)
+{
+ struct ixl_mac_filter *f;
+
+ /* create a new empty filter */
+ f = malloc(sizeof(struct ixl_mac_filter),
+ M_DEVBUF, M_NOWAIT | M_ZERO);
+ SLIST_INSERT_HEAD(&vsi->ftl, f, next);
+
+ return (f);
+}
+
+/*
+** Compare two ethernet addresses
+*/
+static inline bool
+cmp_etheraddr(u8 *ea1, u8 *ea2)
+{
+ bool cmp = FALSE;
+
+ if ((ea1[0] == ea2[0]) && (ea1[1] == ea2[1]) &&
+ (ea1[2] == ea2[2]) && (ea1[3] == ea2[3]) &&
+ (ea1[4] == ea2[4]) && (ea1[5] == ea2[5]))
+ cmp = TRUE;
+
+ return (cmp);
+}
+
+/*
+ * Info for stats sysctls
+ */
+struct ixl_sysctl_info {
+ u64 *stat;
+ char *name;
+ char *description;
+};
+
+extern int ixl_atr_rate;
+
+/*
+** ixl_fw_version_str - format the FW and NVM version strings
+*/
+static inline char *
+ixl_fw_version_str(struct i40e_hw *hw)
+{
+ static char buf[32];
+
+ snprintf(buf, sizeof(buf),
+ "f%d.%d a%d.%d n%02x.%02x e%08x",
+ hw->aq.fw_maj_ver, hw->aq.fw_min_ver,
+ hw->aq.api_maj_ver, hw->aq.api_min_ver,
+ (hw->nvm.version & IXL_NVM_VERSION_HI_MASK) >>
+ IXL_NVM_VERSION_HI_SHIFT,
+ (hw->nvm.version & IXL_NVM_VERSION_LO_MASK) >>
+ IXL_NVM_VERSION_LO_SHIFT,
+ hw->nvm.eetrack);
+ return buf;
+}
+
+/*********************************************************************
+ * TXRX Function prototypes
+ *********************************************************************/
+int ixl_allocate_tx_data(struct ixl_queue *);
+int ixl_allocate_rx_data(struct ixl_queue *);
+void ixl_init_tx_ring(struct ixl_queue *);
+int ixl_init_rx_ring(struct ixl_queue *);
+bool ixl_rxeof(struct ixl_queue *, int);
+bool ixl_txeof(struct ixl_queue *);
+int ixl_mq_start(struct ifnet *, struct mbuf *);
+int ixl_mq_start_locked(struct ifnet *, struct tx_ring *);
+void ixl_deferred_mq_start(void *, int);
+void ixl_qflush(struct ifnet *);
+void ixl_free_vsi(struct ixl_vsi *);
+void ixl_free_que_tx(struct ixl_queue *);
+void ixl_free_que_rx(struct ixl_queue *);
+#ifdef IXL_FDIR
+void ixl_atr(struct ixl_queue *, struct tcphdr *, int);
+#endif
+
+#endif /* _IXL_H_ */
diff --git a/sys/dev/ixl/ixl_pf.h b/sys/dev/ixl/ixl_pf.h
new file mode 100644
index 0000000..055c54f
--- /dev/null
+++ b/sys/dev/ixl/ixl_pf.h
@@ -0,0 +1,96 @@
+/******************************************************************************
+
+ Copyright (c) 2013-2014, Intel Corporation
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ POSSIBILITY OF SUCH DAMAGE.
+
+******************************************************************************/
+/*$FreeBSD$*/
+
+
+#ifndef _IXL_PF_H_
+#define _IXL_PF_H_
+
+/* Physical controller structure */
+struct ixl_pf {
+ struct i40e_hw hw;
+ struct i40e_osdep osdep;
+ struct device *dev;
+
+ struct resource *pci_mem;
+ struct resource *msix_mem;
+
+ /*
+ * Interrupt resources: this set is
+ * either used for legacy, or for Link
+ * when doing MSIX
+ */
+ void *tag;
+ struct resource *res;
+
+ struct callout timer;
+ int msix;
+ int if_flags;
+
+ struct mtx pf_mtx;
+
+ u32 qbase;
+ u32 admvec;
+ struct task adminq;
+ struct taskqueue *tq;
+
+ int advertised_speed;
+
+ /*
+ ** VSI - Stations:
+ ** These are the traffic class holders, and
+ ** will have a stack interface and queues
+ ** associated with them.
+ ** NOTE: for now using just one, so embed it.
+ */
+ struct ixl_vsi vsi;
+
+ /* Misc stats maintained by the driver */
+ u64 watchdog_events;
+ u64 admin_irq;
+
+ /* Statistics from hw */
+ struct i40e_hw_port_stats stats;
+ struct i40e_hw_port_stats stats_offsets;
+ bool stat_offsets_loaded;
+};
+
+
+#define IXL_PF_LOCK_INIT(_sc, _name) \
+ mtx_init(&(_sc)->pf_mtx, _name, "IXL PF Lock", MTX_DEF)
+#define IXL_PF_LOCK(_sc) mtx_lock(&(_sc)->pf_mtx)
+#define IXL_PF_UNLOCK(_sc) mtx_unlock(&(_sc)->pf_mtx)
+#define IXL_PF_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->pf_mtx)
+#define IXL_PF_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->pf_mtx, MA_OWNED)
+
+#endif /* _IXL_PF_H_ */
diff --git a/sys/dev/ixl/ixl_txrx.c b/sys/dev/ixl/ixl_txrx.c
new file mode 100755
index 0000000..80678ca
--- /dev/null
+++ b/sys/dev/ixl/ixl_txrx.c
@@ -0,0 +1,1696 @@
+/******************************************************************************
+
+ Copyright (c) 2013-2014, Intel Corporation
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ POSSIBILITY OF SUCH DAMAGE.
+
+******************************************************************************/
+/*$FreeBSD$*/
+
+/*
+** IXL driver TX/RX Routines:
+** This was seperated to allow usage by
+** both the BASE and the VF drivers.
+*/
+
+#include "opt_inet.h"
+#include "opt_inet6.h"
+#include "ixl.h"
+
+/* Local Prototypes */
+static void ixl_rx_checksum(struct mbuf *, u32, u32, u8);
+static void ixl_refresh_mbufs(struct ixl_queue *, int);
+static int ixl_xmit(struct ixl_queue *, struct mbuf **);
+static int ixl_tx_setup_offload(struct ixl_queue *,
+ struct mbuf *, u32 *, u32 *);
+static bool ixl_tso_setup(struct ixl_queue *, struct mbuf *);
+
+static __inline void ixl_rx_discard(struct rx_ring *, int);
+static __inline void ixl_rx_input(struct rx_ring *, struct ifnet *,
+ struct mbuf *, u8);
+
+/*
+** Multiqueue Transmit driver
+**
+*/
+int
+ixl_mq_start(struct ifnet *ifp, struct mbuf *m)
+{
+ struct ixl_vsi *vsi = ifp->if_softc;
+ struct ixl_queue *que;
+ struct tx_ring *txr;
+ int err, i;
+
+ /* Which queue to use */
+ if ((m->m_flags & M_FLOWID) != 0)
+ i = m->m_pkthdr.flowid % vsi->num_queues;
+ else
+ i = curcpu % vsi->num_queues;
+
+ /* Check for a hung queue and pick alternative */
+ if (((1 << i) & vsi->active_queues) == 0)
+ i = ffsl(vsi->active_queues);
+
+ que = &vsi->queues[i];
+ txr = &que->txr;
+
+ err = drbr_enqueue(ifp, txr->br, m);
+ if (err)
+ return(err);
+ if (IXL_TX_TRYLOCK(txr)) {
+ ixl_mq_start_locked(ifp, txr);
+ IXL_TX_UNLOCK(txr);
+ } else
+ taskqueue_enqueue(que->tq, &que->tx_task);
+
+ return (0);
+}
+
+int
+ixl_mq_start_locked(struct ifnet *ifp, struct tx_ring *txr)
+{
+ struct ixl_queue *que = txr->que;
+ struct ixl_vsi *vsi = que->vsi;
+ struct mbuf *next;
+ int err = 0;
+
+
+ if (((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) ||
+ vsi->link_active == 0)
+ return (ENETDOWN);
+
+ /* Process the transmit queue */
+ while ((next = drbr_peek(ifp, txr->br)) != NULL) {
+ if ((err = ixl_xmit(que, &next)) != 0) {
+ if (next == NULL)
+ drbr_advance(ifp, txr->br);
+ else
+ drbr_putback(ifp, txr->br, next);
+ break;
+ }
+ drbr_advance(ifp, txr->br);
+ /* Send a copy of the frame to the BPF listener */
+ ETHER_BPF_MTAP(ifp, next);
+ if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
+ break;
+ }
+
+ if (txr->avail < IXL_TX_CLEANUP_THRESHOLD)
+ ixl_txeof(que);
+
+ return (err);
+}
+
+/*
+ * Called from a taskqueue to drain queued transmit packets.
+ */
+void
+ixl_deferred_mq_start(void *arg, int pending)
+{
+ struct ixl_queue *que = arg;
+ struct tx_ring *txr = &que->txr;
+ struct ixl_vsi *vsi = que->vsi;
+ struct ifnet *ifp = vsi->ifp;
+
+ IXL_TX_LOCK(txr);
+ if (!drbr_empty(ifp, txr->br))
+ ixl_mq_start_locked(ifp, txr);
+ IXL_TX_UNLOCK(txr);
+}
+
+/*
+** Flush all queue ring buffers
+*/
+void
+ixl_qflush(struct ifnet *ifp)
+{
+ struct ixl_vsi *vsi = ifp->if_softc;
+
+ for (int i = 0; i < vsi->num_queues; i++) {
+ struct ixl_queue *que = &vsi->queues[i];
+ struct tx_ring *txr = &que->txr;
+ struct mbuf *m;
+ IXL_TX_LOCK(txr);
+ while ((m = buf_ring_dequeue_sc(txr->br)) != NULL)
+ m_freem(m);
+ IXL_TX_UNLOCK(txr);
+ }
+ if_qflush(ifp);
+}
+
+/*
+** Find mbuf chains passed to the driver
+** that are 'sparse', using more than 8
+** mbufs to deliver an mss-size chunk of data
+*/
+static inline bool
+ixl_tso_detect_sparse(struct mbuf *mp)
+{
+ struct mbuf *m;
+ int num = 0, mss;
+ bool ret = FALSE;
+
+ mss = mp->m_pkthdr.tso_segsz;
+ for (m = mp->m_next; m != NULL; m = m->m_next) {
+ num++;
+ mss -= m->m_len;
+ if (mss < 1)
+ break;
+ if (m->m_next == NULL)
+ break;
+ }
+ if (num > IXL_SPARSE_CHAIN)
+ ret = TRUE;
+
+ return (ret);
+}
+
+
+/*********************************************************************
+ *
+ * This routine maps the mbufs to tx descriptors, allowing the
+ * TX engine to transmit the packets.
+ * - return 0 on success, positive on failure
+ *
+ **********************************************************************/
+#define IXL_TXD_CMD (I40E_TX_DESC_CMD_EOP | I40E_TX_DESC_CMD_RS)
+
+static int
+ixl_xmit(struct ixl_queue *que, struct mbuf **m_headp)
+{
+ struct ixl_vsi *vsi = que->vsi;
+ struct i40e_hw *hw = vsi->hw;
+ struct tx_ring *txr = &que->txr;
+ struct ixl_tx_buf *buf;
+ struct i40e_tx_desc *txd = NULL;
+ struct mbuf *m_head, *m;
+ int i, j, error, nsegs, maxsegs;
+ int first, last = 0;
+ u16 vtag = 0;
+ u32 cmd, off;
+ bus_dmamap_t map;
+ bus_dma_tag_t tag;
+ bus_dma_segment_t segs[IXL_MAX_TSO_SEGS];
+
+
+ cmd = off = 0;
+ m_head = *m_headp;
+
+ /*
+ * Important to capture the first descriptor
+ * used because it will contain the index of
+ * the one we tell the hardware to report back
+ */
+ first = txr->next_avail;
+ buf = &txr->buffers[first];
+ map = buf->map;
+ tag = txr->tx_tag;
+ maxsegs = IXL_MAX_TX_SEGS;
+
+ if (m_head->m_pkthdr.csum_flags & CSUM_TSO) {
+ /* Use larger mapping for TSO */
+ tag = txr->tso_tag;
+ maxsegs = IXL_MAX_TSO_SEGS;
+ if (ixl_tso_detect_sparse(m_head)) {
+ m = m_defrag(m_head, M_NOWAIT);
+ *m_headp = m;
+ }
+ }
+
+ /*
+ * Map the packet for DMA.
+ */
+ error = bus_dmamap_load_mbuf_sg(tag, map,
+ *m_headp, segs, &nsegs, BUS_DMA_NOWAIT);
+
+ if (error == EFBIG) {
+ struct mbuf *m;
+
+ m = m_collapse(*m_headp, M_NOWAIT, maxsegs);
+ if (m == NULL) {
+ que->mbuf_defrag_failed++;
+ m_freem(*m_headp);
+ *m_headp = NULL;
+ return (ENOBUFS);
+ }
+ *m_headp = m;
+
+ /* Try it again */
+ error = bus_dmamap_load_mbuf_sg(tag, map,
+ *m_headp, segs, &nsegs, BUS_DMA_NOWAIT);
+
+ if (error == ENOMEM) {
+ que->tx_dma_setup++;
+ return (error);
+ } else if (error != 0) {
+ que->tx_dma_setup++;
+ m_freem(*m_headp);
+ *m_headp = NULL;
+ return (error);
+ }
+ } else if (error == ENOMEM) {
+ que->tx_dma_setup++;
+ return (error);
+ } else if (error != 0) {
+ que->tx_dma_setup++;
+ m_freem(*m_headp);
+ *m_headp = NULL;
+ return (error);
+ }
+
+ /* Make certain there are enough descriptors */
+ if (nsegs > txr->avail - 2) {
+ txr->no_desc++;
+ error = ENOBUFS;
+ goto xmit_fail;
+ }
+ m_head = *m_headp;
+
+ /* Set up the TSO/CSUM offload */
+ if (m_head->m_pkthdr.csum_flags & CSUM_OFFLOAD) {
+ error = ixl_tx_setup_offload(que, m_head, &cmd, &off);
+ if (error)
+ goto xmit_fail;
+ }
+
+ cmd |= I40E_TX_DESC_CMD_ICRC;
+ /* Grab the VLAN tag */
+ if (m_head->m_flags & M_VLANTAG) {
+ cmd |= I40E_TX_DESC_CMD_IL2TAG1;
+ vtag = htole16(m_head->m_pkthdr.ether_vtag);
+ }
+
+ i = txr->next_avail;
+ for (j = 0; j < nsegs; j++) {
+ bus_size_t seglen;
+
+ buf = &txr->buffers[i];
+ buf->tag = tag; /* Keep track of the type tag */
+ txd = &txr->base[i];
+ seglen = segs[j].ds_len;
+
+ txd->buffer_addr = htole64(segs[j].ds_addr);
+ txd->cmd_type_offset_bsz =
+ htole64(I40E_TX_DESC_DTYPE_DATA
+ | ((u64)cmd << I40E_TXD_QW1_CMD_SHIFT)
+ | ((u64)off << I40E_TXD_QW1_OFFSET_SHIFT)
+ | ((u64)seglen << I40E_TXD_QW1_TX_BUF_SZ_SHIFT)
+ | ((u64)vtag << I40E_TXD_QW1_L2TAG1_SHIFT));
+
+ last = i; /* descriptor that will get completion IRQ */
+
+ if (++i == que->num_desc)
+ i = 0;
+
+ buf->m_head = NULL;
+ buf->eop_index = -1;
+ }
+ /* Set the last descriptor for report */
+ txd->cmd_type_offset_bsz |=
+ htole64(((u64)IXL_TXD_CMD << I40E_TXD_QW1_CMD_SHIFT));
+ txr->avail -= nsegs;
+ txr->next_avail = i;
+
+ buf->m_head = m_head;
+ /* Swap the dma map between the first and last descriptor */
+ txr->buffers[first].map = buf->map;
+ buf->map = map;
+ bus_dmamap_sync(tag, map, BUS_DMASYNC_PREWRITE);
+
+ /* Set the index of the descriptor that will be marked done */
+ buf = &txr->buffers[first];
+ buf->eop_index = last;
+
+ bus_dmamap_sync(txr->dma.tag, txr->dma.map,
+ BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
+ /*
+ * Advance the Transmit Descriptor Tail (Tdt), this tells the
+ * hardware that this frame is available to transmit.
+ */
+ ++txr->total_packets;
+ wr32(hw, txr->tail, i);
+
+ ixl_flush(hw);
+ /* Mark outstanding work */
+ if (que->busy == 0)
+ que->busy = 1;
+ return (0);
+
+xmit_fail:
+ bus_dmamap_unload(tag, buf->map);
+ return (error);
+}
+
+
+/*********************************************************************
+ *
+ * Allocate memory for tx_buffer structures. The tx_buffer stores all
+ * the information needed to transmit a packet on the wire. This is
+ * called only once at attach, setup is done every reset.
+ *
+ **********************************************************************/
+int
+ixl_allocate_tx_data(struct ixl_queue *que)
+{
+ struct tx_ring *txr = &que->txr;
+ struct ixl_vsi *vsi = que->vsi;
+ device_t dev = vsi->dev;
+ struct ixl_tx_buf *buf;
+ int error = 0;
+
+ /*
+ * Setup DMA descriptor areas.
+ */
+ if ((error = bus_dma_tag_create(NULL, /* parent */
+ 1, 0, /* alignment, bounds */
+ BUS_SPACE_MAXADDR, /* lowaddr */
+ BUS_SPACE_MAXADDR, /* highaddr */
+ NULL, NULL, /* filter, filterarg */
+ IXL_TSO_SIZE, /* maxsize */
+ IXL_MAX_TX_SEGS, /* nsegments */
+ PAGE_SIZE, /* maxsegsize */
+ 0, /* flags */
+ NULL, /* lockfunc */
+ NULL, /* lockfuncarg */
+ &txr->tx_tag))) {
+ device_printf(dev,"Unable to allocate TX DMA tag\n");
+ goto fail;
+ }
+
+ /* Make a special tag for TSO */
+ if ((error = bus_dma_tag_create(NULL, /* parent */
+ 1, 0, /* alignment, bounds */
+ BUS_SPACE_MAXADDR, /* lowaddr */
+ BUS_SPACE_MAXADDR, /* highaddr */
+ NULL, NULL, /* filter, filterarg */
+ IXL_TSO_SIZE, /* maxsize */
+ IXL_MAX_TSO_SEGS, /* nsegments */
+ PAGE_SIZE, /* maxsegsize */
+ 0, /* flags */
+ NULL, /* lockfunc */
+ NULL, /* lockfuncarg */
+ &txr->tso_tag))) {
+ device_printf(dev,"Unable to allocate TX TSO DMA tag\n");
+ goto fail;
+ }
+
+ if (!(txr->buffers =
+ (struct ixl_tx_buf *) malloc(sizeof(struct ixl_tx_buf) *
+ que->num_desc, M_DEVBUF, M_NOWAIT | M_ZERO))) {
+ device_printf(dev, "Unable to allocate tx_buffer memory\n");
+ error = ENOMEM;
+ goto fail;
+ }
+
+ /* Create the descriptor buffer default dma maps */
+ buf = txr->buffers;
+ for (int i = 0; i < que->num_desc; i++, buf++) {
+ buf->tag = txr->tx_tag;
+ error = bus_dmamap_create(buf->tag, 0, &buf->map);
+ if (error != 0) {
+ device_printf(dev, "Unable to create TX DMA map\n");
+ goto fail;
+ }
+ }
+fail:
+ return (error);
+}
+
+
+/*********************************************************************
+ *
+ * (Re)Initialize a queue transmit ring.
+ * - called by init, it clears the descriptor ring,
+ * and frees any stale mbufs
+ *
+ **********************************************************************/
+void
+ixl_init_tx_ring(struct ixl_queue *que)
+{
+ struct tx_ring *txr = &que->txr;
+ struct ixl_tx_buf *buf;
+#ifdef DEV_NETMAP
+ struct ixl_vsi *vsi = que->vsi;
+ struct netmap_adapter *na = NA(vsi->ifp);
+ struct netmap_slot *slot;
+#endif /* DEV_NETMAP */
+
+ /* Clear the old ring contents */
+ IXL_TX_LOCK(txr);
+#ifdef DEV_NETMAP
+ slot = netmap_reset(na, NR_TX, que->me, 0);
+#endif
+ bzero((void *)txr->base,
+ (sizeof(struct i40e_tx_desc)) * que->num_desc);
+
+ /* Reset indices */
+ txr->next_avail = 0;
+ txr->next_to_clean = 0;
+
+#ifdef IXL_FDIR
+ /* Initialize flow director */
+ txr->atr_rate = ixl_atr_rate;
+ txr->atr_count = 0;
+#endif
+
+ /* Free any existing tx mbufs. */
+ buf = txr->buffers;
+ for (int i = 0; i < que->num_desc; i++, buf++) {
+ if (buf->m_head != NULL) {
+ bus_dmamap_sync(buf->tag, buf->map,
+ BUS_DMASYNC_POSTWRITE);
+ bus_dmamap_unload(buf->tag, buf->map);
+ m_freem(buf->m_head);
+ buf->m_head = NULL;
+ }
+#ifdef DEV_NETMAP
+ if (slot)
+ {
+ int si = netmap_idx_n2k(&na->tx_rings[que->me], i);
+ netmap_load_map(txr->tag, buf->map, NMB(slot + si));
+ }
+#endif
+ /* Clear the EOP index */
+ buf->eop_index = -1;
+ }
+
+ /* Set number of descriptors available */
+ txr->avail = que->num_desc;
+
+ bus_dmamap_sync(txr->dma.tag, txr->dma.map,
+ BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
+ IXL_TX_UNLOCK(txr);
+}
+
+
+/*********************************************************************
+ *
+ * Free transmit ring related data structures.
+ *
+ **********************************************************************/
+void
+ixl_free_que_tx(struct ixl_queue *que)
+{
+ struct tx_ring *txr = &que->txr;
+ struct ixl_tx_buf *buf;
+
+ INIT_DBG_IF(que->vsi->ifp, "queue %d: begin", que->me);
+
+ for (int i = 0; i < que->num_desc; i++) {
+ buf = &txr->buffers[i];
+ if (buf->m_head != NULL) {
+ bus_dmamap_sync(buf->tag, buf->map,
+ BUS_DMASYNC_POSTWRITE);
+ bus_dmamap_unload(buf->tag,
+ buf->map);
+ m_freem(buf->m_head);
+ buf->m_head = NULL;
+ if (buf->map != NULL) {
+ bus_dmamap_destroy(buf->tag,
+ buf->map);
+ buf->map = NULL;
+ }
+ } else if (buf->map != NULL) {
+ bus_dmamap_unload(buf->tag,
+ buf->map);
+ bus_dmamap_destroy(buf->tag,
+ buf->map);
+ buf->map = NULL;
+ }
+ }
+ if (txr->br != NULL)
+ buf_ring_free(txr->br, M_DEVBUF);
+ if (txr->buffers != NULL) {
+ free(txr->buffers, M_DEVBUF);
+ txr->buffers = NULL;
+ }
+ if (txr->tx_tag != NULL) {
+ bus_dma_tag_destroy(txr->tx_tag);
+ txr->tx_tag = NULL;
+ }
+ if (txr->tso_tag != NULL) {
+ bus_dma_tag_destroy(txr->tso_tag);
+ txr->tso_tag = NULL;
+ }
+
+ INIT_DBG_IF(que->vsi->ifp, "queue %d: end", que->me);
+ return;
+}
+
+/*********************************************************************
+ *
+ * Setup descriptor for hw offloads
+ *
+ **********************************************************************/
+
+static int
+ixl_tx_setup_offload(struct ixl_queue *que,
+ struct mbuf *mp, u32 *cmd, u32 *off)
+{
+ struct ether_vlan_header *eh;
+ struct ip *ip = NULL;
+ struct tcphdr *th = NULL;
+ struct ip6_hdr *ip6;
+ int elen, ip_hlen = 0, tcp_hlen;
+ u16 etype;
+ u8 ipproto = 0;
+ bool tso = FALSE;
+
+
+ /* Set up the TSO context descriptor if required */
+ if (mp->m_pkthdr.csum_flags & CSUM_TSO) {
+ tso = ixl_tso_setup(que, mp);
+ if (tso)
+ ++que->tso;
+ else
+ return (ENXIO);
+ }
+
+ /*
+ * Determine where frame payload starts.
+ * Jump over vlan headers if already present,
+ * helpful for QinQ too.
+ */
+ eh = mtod(mp, struct ether_vlan_header *);
+ if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
+ etype = ntohs(eh->evl_proto);
+ elen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
+ } else {
+ etype = ntohs(eh->evl_encap_proto);
+ elen = ETHER_HDR_LEN;
+ }
+
+ switch (etype) {
+ case ETHERTYPE_IP:
+ ip = (struct ip *)(mp->m_data + elen);
+ ip_hlen = ip->ip_hl << 2;
+ ipproto = ip->ip_p;
+ th = (struct tcphdr *)((caddr_t)ip + ip_hlen);
+ /* The IP checksum must be recalculated with TSO */
+ if (tso)
+ *cmd |= I40E_TX_DESC_CMD_IIPT_IPV4_CSUM;
+ else
+ *cmd |= I40E_TX_DESC_CMD_IIPT_IPV4;
+ break;
+ case ETHERTYPE_IPV6:
+ ip6 = (struct ip6_hdr *)(mp->m_data + elen);
+ ip_hlen = sizeof(struct ip6_hdr);
+ ipproto = ip6->ip6_nxt;
+ th = (struct tcphdr *)((caddr_t)ip6 + ip_hlen);
+ *cmd |= I40E_TX_DESC_CMD_IIPT_IPV6;
+ /* Falls thru */
+ default:
+ break;
+ }
+
+ *off |= (elen >> 1) << I40E_TX_DESC_LENGTH_MACLEN_SHIFT;
+ *off |= (ip_hlen >> 2) << I40E_TX_DESC_LENGTH_IPLEN_SHIFT;
+
+ switch (ipproto) {
+ case IPPROTO_TCP:
+ tcp_hlen = th->th_off << 2;
+ if (mp->m_pkthdr.csum_flags & (CSUM_TCP|CSUM_TCP_IPV6)) {
+ *cmd |= I40E_TX_DESC_CMD_L4T_EOFT_TCP;
+ *off |= (tcp_hlen >> 2) <<
+ I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
+ }
+#ifdef IXL_FDIR
+ ixl_atr(que, th, etype);
+#endif
+ break;
+ case IPPROTO_UDP:
+ if (mp->m_pkthdr.csum_flags & (CSUM_UDP|CSUM_UDP_IPV6)) {
+ *cmd |= I40E_TX_DESC_CMD_L4T_EOFT_UDP;
+ *off |= (sizeof(struct udphdr) >> 2) <<
+ I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
+ }
+ break;
+
+ case IPPROTO_SCTP:
+ if (mp->m_pkthdr.csum_flags & (CSUM_SCTP|CSUM_SCTP_IPV6)) {
+ *cmd |= I40E_TX_DESC_CMD_L4T_EOFT_SCTP;
+ *off |= (sizeof(struct sctphdr) >> 2) <<
+ I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
+ }
+ /* Fall Thru */
+ default:
+ break;
+ }
+
+ return (0);
+}
+
+
+/**********************************************************************
+ *
+ * Setup context for hardware segmentation offload (TSO)
+ *
+ **********************************************************************/
+static bool
+ixl_tso_setup(struct ixl_queue *que, struct mbuf *mp)
+{
+ struct tx_ring *txr = &que->txr;
+ struct i40e_tx_context_desc *TXD;
+ struct ixl_tx_buf *buf;
+ u32 cmd, mss, type, tsolen;
+ u16 etype;
+ int idx, elen, ip_hlen, tcp_hlen;
+ struct ether_vlan_header *eh;
+ struct ip *ip;
+ struct ip6_hdr *ip6;
+ struct tcphdr *th;
+ u64 type_cmd_tso_mss;
+
+ /*
+ * Determine where frame payload starts.
+ * Jump over vlan headers if already present
+ */
+ eh = mtod(mp, struct ether_vlan_header *);
+ if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
+ elen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
+ etype = eh->evl_proto;
+ } else {
+ elen = ETHER_HDR_LEN;
+ etype = eh->evl_encap_proto;
+ }
+
+ switch (ntohs(etype)) {
+#ifdef INET6
+ case ETHERTYPE_IPV6:
+ ip6 = (struct ip6_hdr *)(mp->m_data + elen);
+ if (ip6->ip6_nxt != IPPROTO_TCP)
+ return (ENXIO);
+ ip_hlen = sizeof(struct ip6_hdr);
+ th = (struct tcphdr *)((caddr_t)ip6 + ip_hlen);
+ th->th_sum = in6_cksum_pseudo(ip6, 0, IPPROTO_TCP, 0);
+ tcp_hlen = th->th_off << 2;
+ break;
+#endif
+#ifdef INET
+ case ETHERTYPE_IP:
+ ip = (struct ip *)(mp->m_data + elen);
+ if (ip->ip_p != IPPROTO_TCP)
+ return (ENXIO);
+ ip->ip_sum = 0;
+ ip_hlen = ip->ip_hl << 2;
+ th = (struct tcphdr *)((caddr_t)ip + ip_hlen);
+ th->th_sum = in_pseudo(ip->ip_src.s_addr,
+ ip->ip_dst.s_addr, htons(IPPROTO_TCP));
+ tcp_hlen = th->th_off << 2;
+ break;
+#endif
+ default:
+ panic("%s: CSUM_TSO but no supported IP version (0x%04x)",
+ __func__, ntohs(etype));
+ break;
+ }
+
+ /* Ensure we have at least the IP+TCP header in the first mbuf. */
+ if (mp->m_len < elen + ip_hlen + sizeof(struct tcphdr))
+ return FALSE;
+
+ idx = txr->next_avail;
+ buf = &txr->buffers[idx];
+ TXD = (struct i40e_tx_context_desc *) &txr->base[idx];
+ tsolen = mp->m_pkthdr.len - (elen + ip_hlen + tcp_hlen);
+
+ type = I40E_TX_DESC_DTYPE_CONTEXT;
+ cmd = I40E_TX_CTX_DESC_TSO;
+ mss = mp->m_pkthdr.tso_segsz;
+
+ type_cmd_tso_mss = ((u64)type << I40E_TXD_CTX_QW1_DTYPE_SHIFT) |
+ ((u64)cmd << I40E_TXD_CTX_QW1_CMD_SHIFT) |
+ ((u64)tsolen << I40E_TXD_CTX_QW1_TSO_LEN_SHIFT) |
+ ((u64)mss << I40E_TXD_CTX_QW1_MSS_SHIFT);
+ TXD->type_cmd_tso_mss = htole64(type_cmd_tso_mss);
+
+ TXD->tunneling_params = htole32(0);
+ buf->m_head = NULL;
+ buf->eop_index = -1;
+
+ if (++idx == que->num_desc)
+ idx = 0;
+
+ txr->avail--;
+ txr->next_avail = idx;
+
+ return TRUE;
+}
+
+/*
+** ixl_get_tx_head - Retrieve the value from the
+** location the HW records its HEAD index
+*/
+static inline u32
+ixl_get_tx_head(struct ixl_queue *que)
+{
+ struct tx_ring *txr = &que->txr;
+ void *head = &txr->base[que->num_desc];
+ return LE32_TO_CPU(*(volatile __le32 *)head);
+}
+
+/**********************************************************************
+ *
+ * Examine each tx_buffer in the used queue. If the hardware is done
+ * processing the packet then free associated resources. The
+ * tx_buffer is put back on the free queue.
+ *
+ **********************************************************************/
+bool
+ixl_txeof(struct ixl_queue *que)
+{
+ struct ixl_vsi *vsi = que->vsi;
+ struct ifnet *ifp = vsi->ifp;
+ struct tx_ring *txr = &que->txr;
+ u32 first, last, head, done, processed;
+ struct ixl_tx_buf *buf;
+ struct i40e_tx_desc *tx_desc, *eop_desc;
+
+
+ mtx_assert(&txr->mtx, MA_OWNED);
+
+#ifdef DEV_NETMAP
+ if (ifp->if_capenable & IFCAP_NETMAP) {
+ struct netmap_adapter *na = NA(ifp);
+ struct netmap_kring *kring = &na->tx_rings[que->me];
+ tx_desc = txr->base;
+ bus_dmamap_sync(txr->dma.tag, txr->dma.map,
+ BUS_DMASYNC_POSTREAD);
+ if (!netmap_mitigate ||
+ (kring->nr_kflags < kring->nkr_num_slots &&
+ tx_desc[kring->nr_kflags].cmd_type_offset_bsz &
+ htole32(I40E_TX_DESC_DTYPE_DESC_DONE)))
+ {
+#if NETMAP_API < 4
+ struct ixl_pf *pf = vsi->pf;
+ kring->nr_kflags = kring->nkr_num_slots;
+ selwakeuppri(&na->tx_rings[que->me].si, PI_NET);
+ IXL_TX_UNLOCK(txr);
+ IXL_PF_LOCK(pf);
+ selwakeuppri(&na->tx_si, PI_NET);
+ IXL_PF_UNLOCK(pf);
+ IXL_TX_LOCK(txr);
+#else /* NETMAP_API >= 4 */
+ netmap_tx_irq(ifp, txr->que->me);
+#endif /* NETMAP_API */
+ }
+ // XXX guessing there is no more work to be done
+ return FALSE;
+ }
+#endif /* DEV_NETMAP */
+
+ /* These are not the descriptors you seek, move along :) */
+ if (txr->avail == que->num_desc) {
+ que->busy = 0;
+ return FALSE;
+ }
+
+ processed = 0;
+ first = txr->next_to_clean;
+ buf = &txr->buffers[first];
+ tx_desc = (struct i40e_tx_desc *)&txr->base[first];
+ last = buf->eop_index;
+ if (last == -1)
+ return FALSE;
+ eop_desc = (struct i40e_tx_desc *)&txr->base[last];
+
+ /* Get the Head WB value */
+ head = ixl_get_tx_head(que);
+
+ /*
+ ** Get the index of the first descriptor
+ ** BEYOND the EOP and call that 'done'.
+ ** I do this so the comparison in the
+ ** inner while loop below can be simple
+ */
+ if (++last == que->num_desc) last = 0;
+ done = last;
+
+ bus_dmamap_sync(txr->dma.tag, txr->dma.map,
+ BUS_DMASYNC_POSTREAD);
+ /*
+ ** The HEAD index of the ring is written in a
+ ** defined location, this rather than a done bit
+ ** is what is used to keep track of what must be
+ ** 'cleaned'.
+ */
+ while (first != head) {
+ /* We clean the range of the packet */
+ while (first != done) {
+ ++txr->avail;
+ ++processed;
+
+ if (buf->m_head) {
+ txr->bytes += /* for ITR adjustment */
+ buf->m_head->m_pkthdr.len;
+ txr->tx_bytes += /* for TX stats */
+ buf->m_head->m_pkthdr.len;
+ bus_dmamap_sync(buf->tag,
+ buf->map,
+ BUS_DMASYNC_POSTWRITE);
+ bus_dmamap_unload(buf->tag,
+ buf->map);
+ m_freem(buf->m_head);
+ buf->m_head = NULL;
+ buf->map = NULL;
+ }
+ buf->eop_index = -1;
+
+ if (++first == que->num_desc)
+ first = 0;
+
+ buf = &txr->buffers[first];
+ tx_desc = &txr->base[first];
+ }
+ ++txr->packets;
+ ++ifp->if_opackets;
+ /* See if there is more work now */
+ last = buf->eop_index;
+ if (last != -1) {
+ eop_desc = &txr->base[last];
+ /* Get next done point */
+ if (++last == que->num_desc) last = 0;
+ done = last;
+ } else
+ break;
+ }
+ bus_dmamap_sync(txr->dma.tag, txr->dma.map,
+ BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
+
+ txr->next_to_clean = first;
+
+
+ /*
+ ** Hang detection, we know there's
+ ** work outstanding or the first return
+ ** would have been taken, so indicate an
+ ** unsuccessful pass, in local_timer if
+ ** the value is too great the queue will
+ ** be considered hung. If anything has been
+ ** cleaned then reset the state.
+ */
+ if ((processed == 0) && (que->busy != IXL_QUEUE_HUNG))
+ ++que->busy;
+
+ if (processed)
+ que->busy = 1; /* Note this turns off HUNG */
+
+ /*
+ * If there are no pending descriptors, clear the timeout.
+ */
+ if (txr->avail == que->num_desc) {
+ que->busy = 0;
+ return FALSE;
+ }
+
+ return TRUE;
+}
+
+/*********************************************************************
+ *
+ * Refresh mbuf buffers for RX descriptor rings
+ * - now keeps its own state so discards due to resource
+ * exhaustion are unnecessary, if an mbuf cannot be obtained
+ * it just returns, keeping its placeholder, thus it can simply
+ * be recalled to try again.
+ *
+ **********************************************************************/
+static void
+ixl_refresh_mbufs(struct ixl_queue *que, int limit)
+{
+ struct ixl_vsi *vsi = que->vsi;
+ struct rx_ring *rxr = &que->rxr;
+ bus_dma_segment_t hseg[1];
+ bus_dma_segment_t pseg[1];
+ struct ixl_rx_buf *buf;
+ struct mbuf *mh, *mp;
+ int i, j, nsegs, error;
+ bool refreshed = FALSE;
+
+ i = j = rxr->next_refresh;
+ /* Control the loop with one beyond */
+ if (++j == que->num_desc)
+ j = 0;
+
+ while (j != limit) {
+ buf = &rxr->buffers[i];
+ if (rxr->hdr_split == FALSE)
+ goto no_split;
+
+ if (buf->m_head == NULL) {
+ mh = m_gethdr(M_NOWAIT, MT_DATA);
+ if (mh == NULL)
+ goto update;
+ } else
+ mh = buf->m_head;
+
+ mh->m_pkthdr.len = mh->m_len = MHLEN;
+ mh->m_len = MHLEN;
+ mh->m_flags |= M_PKTHDR;
+ /* Get the memory mapping */
+ error = bus_dmamap_load_mbuf_sg(rxr->htag,
+ buf->hmap, mh, hseg, &nsegs, BUS_DMA_NOWAIT);
+ if (error != 0) {
+ printf("Refresh mbufs: hdr dmamap load"
+ " failure - %d\n", error);
+ m_free(mh);
+ buf->m_head = NULL;
+ goto update;
+ }
+ buf->m_head = mh;
+ bus_dmamap_sync(rxr->htag, buf->hmap,
+ BUS_DMASYNC_PREREAD);
+ rxr->base[i].read.hdr_addr =
+ htole64(hseg[0].ds_addr);
+
+no_split:
+ if (buf->m_pack == NULL) {
+ mp = m_getjcl(M_NOWAIT, MT_DATA,
+ M_PKTHDR, rxr->mbuf_sz);
+ if (mp == NULL)
+ goto update;
+ } else
+ mp = buf->m_pack;
+
+ mp->m_pkthdr.len = mp->m_len = rxr->mbuf_sz;
+ /* Get the memory mapping */
+ error = bus_dmamap_load_mbuf_sg(rxr->ptag,
+ buf->pmap, mp, pseg, &nsegs, BUS_DMA_NOWAIT);
+ if (error != 0) {
+ printf("Refresh mbufs: payload dmamap load"
+ " failure - %d\n", error);
+ m_free(mp);
+ buf->m_pack = NULL;
+ goto update;
+ }
+ buf->m_pack = mp;
+ bus_dmamap_sync(rxr->ptag, buf->pmap,
+ BUS_DMASYNC_PREREAD);
+#ifdef DEV_NETMAP
+ rxr->base[i].read.pkt_addr = buf->addr;
+#else /* !DEV_NETMAP */
+ rxr->base[i].read.pkt_addr =
+ htole64(pseg[0].ds_addr);
+#endif /* DEV_NETMAP */
+ /* Used only when doing header split */
+ rxr->base[i].read.hdr_addr = 0;
+
+ refreshed = TRUE;
+ /* Next is precalculated */
+ i = j;
+ rxr->next_refresh = i;
+ if (++j == que->num_desc)
+ j = 0;
+ }
+update:
+ if (refreshed) /* Update hardware tail index */
+ wr32(vsi->hw, rxr->tail, rxr->next_refresh);
+ return;
+}
+
+
+/*********************************************************************
+ *
+ * Allocate memory for rx_buffer structures. Since we use one
+ * rx_buffer per descriptor, the maximum number of rx_buffer's
+ * that we'll need is equal to the number of receive descriptors
+ * that we've defined.
+ *
+ **********************************************************************/
+int
+ixl_allocate_rx_data(struct ixl_queue *que)
+{
+ struct rx_ring *rxr = &que->rxr;
+ struct ixl_vsi *vsi = que->vsi;
+ device_t dev = vsi->dev;
+ struct ixl_rx_buf *buf;
+ int i, bsize, error;
+
+ bsize = sizeof(struct ixl_rx_buf) * que->num_desc;
+ if (!(rxr->buffers =
+ (struct ixl_rx_buf *) malloc(bsize,
+ M_DEVBUF, M_NOWAIT | M_ZERO))) {
+ device_printf(dev, "Unable to allocate rx_buffer memory\n");
+ error = ENOMEM;
+ return (error);
+ }
+
+ if ((error = bus_dma_tag_create(NULL, /* parent */
+ 1, 0, /* alignment, bounds */
+ BUS_SPACE_MAXADDR, /* lowaddr */
+ BUS_SPACE_MAXADDR, /* highaddr */
+ NULL, NULL, /* filter, filterarg */
+ MSIZE, /* maxsize */
+ 1, /* nsegments */
+ MSIZE, /* maxsegsize */
+ 0, /* flags */
+ NULL, /* lockfunc */
+ NULL, /* lockfuncarg */
+ &rxr->htag))) {
+ device_printf(dev, "Unable to create RX DMA htag\n");
+ return (error);
+ }
+
+ if ((error = bus_dma_tag_create(NULL, /* parent */
+ 1, 0, /* alignment, bounds */
+ BUS_SPACE_MAXADDR, /* lowaddr */
+ BUS_SPACE_MAXADDR, /* highaddr */
+ NULL, NULL, /* filter, filterarg */
+ MJUM16BYTES, /* maxsize */
+ 1, /* nsegments */
+ MJUM16BYTES, /* maxsegsize */
+ 0, /* flags */
+ NULL, /* lockfunc */
+ NULL, /* lockfuncarg */
+ &rxr->ptag))) {
+ device_printf(dev, "Unable to create RX DMA ptag\n");
+ return (error);
+ }
+
+ for (i = 0; i < que->num_desc; i++) {
+ buf = &rxr->buffers[i];
+ error = bus_dmamap_create(rxr->htag,
+ BUS_DMA_NOWAIT, &buf->hmap);
+ if (error) {
+ device_printf(dev, "Unable to create RX head map\n");
+ break;
+ }
+ error = bus_dmamap_create(rxr->ptag,
+ BUS_DMA_NOWAIT, &buf->pmap);
+ if (error) {
+ device_printf(dev, "Unable to create RX pkt map\n");
+ break;
+ }
+ }
+
+ return (error);
+}
+
+
+/*********************************************************************
+ *
+ * (Re)Initialize the queue receive ring and its buffers.
+ *
+ **********************************************************************/
+int
+ixl_init_rx_ring(struct ixl_queue *que)
+{
+ struct ixl_vsi *vsi = que->vsi;
+ struct ifnet *ifp = vsi->ifp;
+ struct rx_ring *rxr = &que->rxr;
+ struct lro_ctrl *lro = &rxr->lro;
+ struct ixl_rx_buf *buf;
+ bus_dma_segment_t pseg[1], hseg[1];
+ int rsize, nsegs, error = 0;
+#ifdef DEV_NETMAP
+ struct netmap_adapter *na = NA(ifp);
+ struct netmap_slot *slot;
+#endif /* DEV_NETMAP */
+
+ IXL_RX_LOCK(rxr);
+#ifdef DEV_NETMAP
+ slot = netmap_reset(na, NR_RX, que->me, 0);
+#endif
+ /* Clear the ring contents */
+ rsize = roundup2(que->num_desc *
+ sizeof(union i40e_rx_desc), DBA_ALIGN);
+ bzero((void *)rxr->base, rsize);
+ /* Cleanup any existing buffers */
+ for (int i = 0; i < que->num_desc; i++) {
+ buf = &rxr->buffers[i];
+ if (buf->m_head != NULL) {
+ bus_dmamap_sync(rxr->htag, buf->hmap,
+ BUS_DMASYNC_POSTREAD);
+ bus_dmamap_unload(rxr->htag, buf->hmap);
+ buf->m_head->m_flags |= M_PKTHDR;
+ m_freem(buf->m_head);
+ }
+ if (buf->m_pack != NULL) {
+ bus_dmamap_sync(rxr->ptag, buf->pmap,
+ BUS_DMASYNC_POSTREAD);
+ bus_dmamap_unload(rxr->ptag, buf->pmap);
+ buf->m_pack->m_flags |= M_PKTHDR;
+ m_freem(buf->m_pack);
+ }
+ buf->m_head = NULL;
+ buf->m_pack = NULL;
+ }
+
+ /* header split is off */
+ rxr->hdr_split = FALSE;
+
+ /* Now replenish the mbufs */
+ for (int j = 0; j != que->num_desc; ++j) {
+ struct mbuf *mh, *mp;
+
+ buf = &rxr->buffers[j];
+#ifdef DEV_NETMAP
+ if (slot)
+ {
+ int sj = netmap_idx_n2k(&na->rx_rings[que->me], j);
+ u64 paddr;
+ void *addr;
+
+ addr = PNMB(slot + sj, &paddr);
+ netmap_load_map(rxr->ptag, buf->pmap, addr);
+ /* Update descriptor and cached value */
+ rxr->base[j].read.pkt_addr = htole64(paddr);
+ buf->addr = htole64(paddr);
+ continue;
+ }
+#endif /* DEV_NETMAP */
+ /*
+ ** Don't allocate mbufs if not
+ ** doing header split, its wasteful
+ */
+ if (rxr->hdr_split == FALSE)
+ goto skip_head;
+
+ /* First the header */
+ buf->m_head = m_gethdr(M_NOWAIT, MT_DATA);
+ if (buf->m_head == NULL) {
+ error = ENOBUFS;
+ goto fail;
+ }
+ m_adj(buf->m_head, ETHER_ALIGN);
+ mh = buf->m_head;
+ mh->m_len = mh->m_pkthdr.len = MHLEN;
+ mh->m_flags |= M_PKTHDR;
+ /* Get the memory mapping */
+ error = bus_dmamap_load_mbuf_sg(rxr->htag,
+ buf->hmap, buf->m_head, hseg,
+ &nsegs, BUS_DMA_NOWAIT);
+ if (error != 0) /* Nothing elegant to do here */
+ goto fail;
+ bus_dmamap_sync(rxr->htag,
+ buf->hmap, BUS_DMASYNC_PREREAD);
+ /* Update descriptor */
+ rxr->base[j].read.hdr_addr = htole64(hseg[0].ds_addr);
+
+skip_head:
+ /* Now the payload cluster */
+ buf->m_pack = m_getjcl(M_NOWAIT, MT_DATA,
+ M_PKTHDR, rxr->mbuf_sz);
+ if (buf->m_pack == NULL) {
+ error = ENOBUFS;
+ goto fail;
+ }
+ mp = buf->m_pack;
+ mp->m_pkthdr.len = mp->m_len = rxr->mbuf_sz;
+ /* Get the memory mapping */
+ error = bus_dmamap_load_mbuf_sg(rxr->ptag,
+ buf->pmap, mp, pseg,
+ &nsegs, BUS_DMA_NOWAIT);
+ if (error != 0)
+ goto fail;
+ bus_dmamap_sync(rxr->ptag,
+ buf->pmap, BUS_DMASYNC_PREREAD);
+ /* Update descriptor */
+ rxr->base[j].read.pkt_addr = htole64(pseg[0].ds_addr);
+ rxr->base[j].read.hdr_addr = 0;
+ }
+
+
+ /* Setup our descriptor indices */
+ rxr->next_check = 0;
+ rxr->next_refresh = 0;
+ rxr->lro_enabled = FALSE;
+ rxr->split = 0;
+ rxr->bytes = 0;
+ rxr->discard = FALSE;
+
+ /*
+ ** Now set up the LRO interface:
+ */
+ if (ifp->if_capenable & IFCAP_LRO) {
+ int err = tcp_lro_init(lro);
+ if (err) {
+ if_printf(ifp, "queue %d: LRO Initialization failed!\n", que->me);
+ goto fail;
+ }
+ INIT_DBG_IF(ifp, "queue %d: RX Soft LRO Initialized", que->me);
+ rxr->lro_enabled = TRUE;
+ lro->ifp = vsi->ifp;
+ }
+
+ bus_dmamap_sync(rxr->dma.tag, rxr->dma.map,
+ BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
+
+fail:
+ IXL_RX_UNLOCK(rxr);
+ return (error);
+}
+
+
+/*********************************************************************
+ *
+ * Free station receive ring data structures
+ *
+ **********************************************************************/
+void
+ixl_free_que_rx(struct ixl_queue *que)
+{
+ struct rx_ring *rxr = &que->rxr;
+ struct ixl_rx_buf *buf;
+
+ INIT_DBG_IF(que->vsi->ifp, "queue %d: begin", que->me);
+
+ /* Cleanup any existing buffers */
+ if (rxr->buffers != NULL) {
+ for (int i = 0; i < que->num_desc; i++) {
+ buf = &rxr->buffers[i];
+ if (buf->m_head != NULL) {
+ bus_dmamap_sync(rxr->htag, buf->hmap,
+ BUS_DMASYNC_POSTREAD);
+ bus_dmamap_unload(rxr->htag, buf->hmap);
+ buf->m_head->m_flags |= M_PKTHDR;
+ m_freem(buf->m_head);
+ }
+ if (buf->m_pack != NULL) {
+ bus_dmamap_sync(rxr->ptag, buf->pmap,
+ BUS_DMASYNC_POSTREAD);
+ bus_dmamap_unload(rxr->ptag, buf->pmap);
+ buf->m_pack->m_flags |= M_PKTHDR;
+ m_freem(buf->m_pack);
+ }
+ buf->m_head = NULL;
+ buf->m_pack = NULL;
+ if (buf->hmap != NULL) {
+ bus_dmamap_destroy(rxr->htag, buf->hmap);
+ buf->hmap = NULL;
+ }
+ if (buf->pmap != NULL) {
+ bus_dmamap_destroy(rxr->ptag, buf->pmap);
+ buf->pmap = NULL;
+ }
+ }
+ if (rxr->buffers != NULL) {
+ free(rxr->buffers, M_DEVBUF);
+ rxr->buffers = NULL;
+ }
+ }
+
+ if (rxr->htag != NULL) {
+ bus_dma_tag_destroy(rxr->htag);
+ rxr->htag = NULL;
+ }
+ if (rxr->ptag != NULL) {
+ bus_dma_tag_destroy(rxr->ptag);
+ rxr->ptag = NULL;
+ }
+
+ INIT_DBG_IF(que->vsi->ifp, "queue %d: end", que->me);
+ return;
+}
+
+static __inline void
+ixl_rx_input(struct rx_ring *rxr, struct ifnet *ifp, struct mbuf *m, u8 ptype)
+{
+ /*
+ * ATM LRO is only for IPv4/TCP packets and TCP checksum of the packet
+ * should be computed by hardware. Also it should not have VLAN tag in
+ * ethernet header.
+ */
+ if (rxr->lro_enabled &&
+ (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0 &&
+ (m->m_pkthdr.csum_flags & (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) ==
+ (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) {
+ /*
+ * Send to the stack if:
+ ** - LRO not enabled, or
+ ** - no LRO resources, or
+ ** - lro enqueue fails
+ */
+ if (rxr->lro.lro_cnt != 0)
+ if (tcp_lro_rx(&rxr->lro, m, 0) == 0)
+ return;
+ }
+ IXL_RX_UNLOCK(rxr);
+ (*ifp->if_input)(ifp, m);
+ IXL_RX_LOCK(rxr);
+}
+
+
+static __inline void
+ixl_rx_discard(struct rx_ring *rxr, int i)
+{
+ struct ixl_rx_buf *rbuf;
+
+ rbuf = &rxr->buffers[i];
+
+ if (rbuf->fmp != NULL) {/* Partial chain ? */
+ rbuf->fmp->m_flags |= M_PKTHDR;
+ m_freem(rbuf->fmp);
+ rbuf->fmp = NULL;
+ }
+
+ /*
+ ** With advanced descriptors the writeback
+ ** clobbers the buffer addrs, so its easier
+ ** to just free the existing mbufs and take
+ ** the normal refresh path to get new buffers
+ ** and mapping.
+ */
+ if (rbuf->m_head) {
+ m_free(rbuf->m_head);
+ rbuf->m_head = NULL;
+ }
+
+ if (rbuf->m_pack) {
+ m_free(rbuf->m_pack);
+ rbuf->m_pack = NULL;
+ }
+
+ return;
+}
+
+
+/*********************************************************************
+ *
+ * This routine executes in interrupt context. It replenishes
+ * the mbufs in the descriptor and sends data which has been
+ * dma'ed into host memory to upper layer.
+ *
+ * We loop at most count times if count is > 0, or until done if
+ * count < 0.
+ *
+ * Return TRUE for more work, FALSE for all clean.
+ *********************************************************************/
+bool
+ixl_rxeof(struct ixl_queue *que, int count)
+{
+ struct ixl_vsi *vsi = que->vsi;
+ struct rx_ring *rxr = &que->rxr;
+ struct ifnet *ifp = vsi->ifp;
+ struct lro_ctrl *lro = &rxr->lro;
+ struct lro_entry *queued;
+ int i, nextp, processed = 0;
+ union i40e_rx_desc *cur;
+ struct ixl_rx_buf *rbuf, *nbuf;
+
+
+ IXL_RX_LOCK(rxr);
+
+#ifdef DEV_NETMAP
+#if NETMAP_API < 4
+ if (ifp->if_capenable & IFCAP_NETMAP)
+ {
+ struct netmap_adapter *na = NA(ifp);
+
+ na->rx_rings[que->me].nr_kflags |= NKR_PENDINTR;
+ selwakeuppri(&na->rx_rings[que->me].si, PI_NET);
+ IXL_RX_UNLOCK(rxr);
+ IXL_PF_LOCK(vsi->pf);
+ selwakeuppri(&na->rx_si, PI_NET);
+ IXL_PF_UNLOCK(vsi->pf);
+ return (FALSE);
+ }
+#else /* NETMAP_API >= 4 */
+ if (netmap_rx_irq(ifp, que->me, &processed))
+ {
+ IXL_RX_UNLOCK(rxr);
+ return (FALSE);
+ }
+#endif /* NETMAP_API */
+#endif /* DEV_NETMAP */
+
+ for (i = rxr->next_check; count != 0;) {
+ struct mbuf *sendmp, *mh, *mp;
+ u32 rsc, status, error;
+ u16 hlen, plen, vtag;
+ u64 qword;
+ u8 ptype;
+ bool eop;
+
+ /* Sync the ring. */
+ bus_dmamap_sync(rxr->dma.tag, rxr->dma.map,
+ BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
+
+ cur = &rxr->base[i];
+ qword = le64toh(cur->wb.qword1.status_error_len);
+ status = (qword & I40E_RXD_QW1_STATUS_MASK)
+ >> I40E_RXD_QW1_STATUS_SHIFT;
+ error = (qword & I40E_RXD_QW1_ERROR_MASK)
+ >> I40E_RXD_QW1_ERROR_SHIFT;
+ plen = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK)
+ >> I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
+ hlen = (qword & I40E_RXD_QW1_LENGTH_HBUF_MASK)
+ >> I40E_RXD_QW1_LENGTH_HBUF_SHIFT;
+ ptype = (qword & I40E_RXD_QW1_PTYPE_MASK)
+ >> I40E_RXD_QW1_PTYPE_SHIFT;
+
+ if ((status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT)) == 0) {
+ ++rxr->not_done;
+ break;
+ }
+ if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
+ break;
+
+ count--;
+ sendmp = NULL;
+ nbuf = NULL;
+ rsc = 0;
+ cur->wb.qword1.status_error_len = 0;
+ rbuf = &rxr->buffers[i];
+ mh = rbuf->m_head;
+ mp = rbuf->m_pack;
+ eop = (status & (1 << I40E_RX_DESC_STATUS_EOF_SHIFT));
+ if (status & (1 << I40E_RX_DESC_STATUS_L2TAG1P_SHIFT))
+ vtag = le16toh(cur->wb.qword0.lo_dword.l2tag1);
+ else
+ vtag = 0;
+
+ /*
+ ** Make sure bad packets are discarded,
+ ** note that only EOP descriptor has valid
+ ** error results.
+ */
+ if (eop && (error & (1 << I40E_RX_DESC_ERROR_RXE_SHIFT))) {
+ ifp->if_ierrors++;
+ rxr->discarded++;
+ ixl_rx_discard(rxr, i);
+ goto next_desc;
+ }
+
+ /* Prefetch the next buffer */
+ if (!eop) {
+ nextp = i + 1;
+ if (nextp == que->num_desc)
+ nextp = 0;
+ nbuf = &rxr->buffers[nextp];
+ prefetch(nbuf);
+ }
+
+ /*
+ ** The header mbuf is ONLY used when header
+ ** split is enabled, otherwise we get normal
+ ** behavior, ie, both header and payload
+ ** are DMA'd into the payload buffer.
+ **
+ ** Rather than using the fmp/lmp global pointers
+ ** we now keep the head of a packet chain in the
+ ** buffer struct and pass this along from one
+ ** descriptor to the next, until we get EOP.
+ */
+ if (rxr->hdr_split && (rbuf->fmp == NULL)) {
+ if (hlen > IXL_RX_HDR)
+ hlen = IXL_RX_HDR;
+ mh->m_len = hlen;
+ mh->m_flags |= M_PKTHDR;
+ mh->m_next = NULL;
+ mh->m_pkthdr.len = mh->m_len;
+ /* Null buf pointer so it is refreshed */
+ rbuf->m_head = NULL;
+ /*
+ ** Check the payload length, this
+ ** could be zero if its a small
+ ** packet.
+ */
+ if (plen > 0) {
+ mp->m_len = plen;
+ mp->m_next = NULL;
+ mp->m_flags &= ~M_PKTHDR;
+ mh->m_next = mp;
+ mh->m_pkthdr.len += mp->m_len;
+ /* Null buf pointer so it is refreshed */
+ rbuf->m_pack = NULL;
+ rxr->split++;
+ }
+ /*
+ ** Now create the forward
+ ** chain so when complete
+ ** we wont have to.
+ */
+ if (eop == 0) {
+ /* stash the chain head */
+ nbuf->fmp = mh;
+ /* Make forward chain */
+ if (plen)
+ mp->m_next = nbuf->m_pack;
+ else
+ mh->m_next = nbuf->m_pack;
+ } else {
+ /* Singlet, prepare to send */
+ sendmp = mh;
+ if (vtag) {
+ sendmp->m_pkthdr.ether_vtag = vtag;
+ sendmp->m_flags |= M_VLANTAG;
+ }
+ }
+ } else {
+ /*
+ ** Either no header split, or a
+ ** secondary piece of a fragmented
+ ** split packet.
+ */
+ mp->m_len = plen;
+ /*
+ ** See if there is a stored head
+ ** that determines what we are
+ */
+ sendmp = rbuf->fmp;
+ rbuf->m_pack = rbuf->fmp = NULL;
+
+ if (sendmp != NULL) /* secondary frag */
+ sendmp->m_pkthdr.len += mp->m_len;
+ else {
+ /* first desc of a non-ps chain */
+ sendmp = mp;
+ sendmp->m_flags |= M_PKTHDR;
+ sendmp->m_pkthdr.len = mp->m_len;
+ if (vtag) {
+ sendmp->m_pkthdr.ether_vtag = vtag;
+ sendmp->m_flags |= M_VLANTAG;
+ }
+ }
+ /* Pass the head pointer on */
+ if (eop == 0) {
+ nbuf->fmp = sendmp;
+ sendmp = NULL;
+ mp->m_next = nbuf->m_pack;
+ }
+ }
+ ++processed;
+ /* Sending this frame? */
+ if (eop) {
+ sendmp->m_pkthdr.rcvif = ifp;
+ /* gather stats */
+ ifp->if_ipackets++;
+ rxr->rx_packets++;
+ rxr->rx_bytes += sendmp->m_pkthdr.len;
+ /* capture data for dynamic ITR adjustment */
+ rxr->packets++;
+ rxr->bytes += sendmp->m_pkthdr.len;
+ if ((ifp->if_capenable & IFCAP_RXCSUM) != 0)
+ ixl_rx_checksum(sendmp, status, error, ptype);
+ sendmp->m_pkthdr.flowid = que->msix;
+ sendmp->m_flags |= M_FLOWID;
+ }
+next_desc:
+ bus_dmamap_sync(rxr->dma.tag, rxr->dma.map,
+ BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
+
+ /* Advance our pointers to the next descriptor. */
+ if (++i == que->num_desc)
+ i = 0;
+
+ /* Now send to the stack or do LRO */
+ if (sendmp != NULL) {
+ rxr->next_check = i;
+ ixl_rx_input(rxr, ifp, sendmp, ptype);
+ i = rxr->next_check;
+ }
+
+ /* Every 8 descriptors we go to refresh mbufs */
+ if (processed == 8) {
+ ixl_refresh_mbufs(que, i);
+ processed = 0;
+ }
+ }
+
+ /* Refresh any remaining buf structs */
+ if (ixl_rx_unrefreshed(que))
+ ixl_refresh_mbufs(que, i);
+
+ rxr->next_check = i;
+
+ /*
+ * Flush any outstanding LRO work
+ */
+ while ((queued = SLIST_FIRST(&lro->lro_active)) != NULL) {
+ SLIST_REMOVE_HEAD(&lro->lro_active, next);
+ tcp_lro_flush(lro, queued);
+ }
+
+ IXL_RX_UNLOCK(rxr);
+ return (FALSE);
+}
+
+
+/*********************************************************************
+ *
+ * Verify that the hardware indicated that the checksum is valid.
+ * Inform the stack about the status of checksum so that stack
+ * doesn't spend time verifying the checksum.
+ *
+ *********************************************************************/
+static void
+ixl_rx_checksum(struct mbuf * mp, u32 status, u32 error, u8 ptype)
+{
+ struct i40e_rx_ptype_decoded decoded;
+
+ decoded = decode_rx_desc_ptype(ptype);
+
+ /* Errors? */
+ if (error & ((1 << I40E_RX_DESC_ERROR_IPE_SHIFT) |
+ (1 << I40E_RX_DESC_ERROR_L4E_SHIFT))) {
+ mp->m_pkthdr.csum_flags = 0;
+ return;
+ }
+
+ /* IPv6 with extension headers likely have bad csum */
+ if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
+ decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV6)
+ if (status &
+ (1 << I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT)) {
+ mp->m_pkthdr.csum_flags = 0;
+ return;
+ }
+
+
+ /* IP Checksum Good */
+ mp->m_pkthdr.csum_flags = CSUM_IP_CHECKED;
+ mp->m_pkthdr.csum_flags |= CSUM_IP_VALID;
+
+ if (status & (1 << I40E_RX_DESC_STATUS_L3L4P_SHIFT)) {
+ mp->m_pkthdr.csum_flags |=
+ (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
+ mp->m_pkthdr.csum_data |= htons(0xffff);
+ }
+ return;
+}
diff --git a/sys/dev/ixl/ixlv.h b/sys/dev/ixl/ixlv.h
new file mode 100644
index 0000000..a5bfe13
--- /dev/null
+++ b/sys/dev/ixl/ixlv.h
@@ -0,0 +1,205 @@
+/******************************************************************************
+
+ Copyright (c) 2013-2014, Intel Corporation
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ POSSIBILITY OF SUCH DAMAGE.
+
+******************************************************************************/
+/*$FreeBSD$*/
+
+
+#ifndef _IXLV_H_
+#define _IXLV_H_
+
+#define IXLV_AQ_MAX_ERR 100
+#define IXLV_MAX_FILTERS 128
+#define IXLV_MAX_QUEUES 16
+#define IXLV_AQ_TIMEOUT (1 * hz)
+#define IXLV_CALLOUT_TIMO (hz / 50) // 20 msec
+
+#define IXLV_FLAG_AQ_ENABLE_QUEUES (u32)(1)
+#define IXLV_FLAG_AQ_DISABLE_QUEUES (u32)(1 << 1)
+#define IXLV_FLAG_AQ_ADD_MAC_FILTER (u32)(1 << 2)
+#define IXLV_FLAG_AQ_ADD_VLAN_FILTER (u32)(1 << 3)
+#define IXLV_FLAG_AQ_DEL_MAC_FILTER (u32)(1 << 4)
+#define IXLV_FLAG_AQ_DEL_VLAN_FILTER (u32)(1 << 5)
+#define IXLV_FLAG_AQ_CONFIGURE_QUEUES (u32)(1 << 6)
+#define IXLV_FLAG_AQ_MAP_VECTORS (u32)(1 << 7)
+#define IXLV_FLAG_AQ_HANDLE_RESET (u32)(1 << 8)
+#define IXLV_FLAG_AQ_CONFIGURE_PROMISC (u32)(1 << 9)
+#define IXLV_FLAG_AQ_GET_STATS (u32)(1 << 10)
+
+/* printf %b arg */
+#define IXLV_FLAGS \
+ "\20\1ENABLE_QUEUES\2DISABLE_QUEUES\3ADD_MAC_FILTER" \
+ "\4ADD_VLAN_FILTER\5DEL_MAC_FILTER\6DEL_VLAN_FILTER" \
+ "\7CONFIGURE_QUEUES\10MAP_VECTORS\11HANDLE_RESET" \
+ "\12CONFIGURE_PROMISC\13GET_STATS"
+
+/* Driver state */
+enum ixlv_state_t {
+ IXLV_START,
+ IXLV_FAILED,
+ IXLV_RESET_REQUIRED,
+ IXLV_RESET_PENDING,
+ IXLV_VERSION_CHECK,
+ IXLV_GET_RESOURCES,
+ IXLV_INIT_READY,
+ IXLV_INIT_START,
+ IXLV_INIT_CONFIG,
+ IXLV_INIT_MAPPING,
+ IXLV_INIT_ENABLE,
+ IXLV_INIT_COMPLETE,
+ IXLV_RUNNING,
+};
+
+struct ixlv_mac_filter {
+ SLIST_ENTRY(ixlv_mac_filter) next;
+ u8 macaddr[ETHER_ADDR_LEN];
+ u16 flags;
+};
+SLIST_HEAD(mac_list, ixlv_mac_filter);
+
+struct ixlv_vlan_filter {
+ SLIST_ENTRY(ixlv_vlan_filter) next;
+ u16 vlan;
+ u16 flags;
+};
+SLIST_HEAD(vlan_list, ixlv_vlan_filter);
+
+/* Software controller structure */
+struct ixlv_sc {
+ struct i40e_hw hw;
+ struct i40e_osdep osdep;
+ struct device *dev;
+
+ struct resource *pci_mem;
+ struct resource *msix_mem;
+
+ enum ixlv_state_t init_state;
+
+ /*
+ * Interrupt resources
+ */
+ void *tag;
+ struct resource *res; /* For the AQ */
+
+ struct ifmedia media;
+ struct callout timer;
+ struct callout aq_task;
+ int msix;
+ int if_flags;
+
+ struct mtx mtx;
+ struct mtx aq_task_mtx;
+
+ u32 qbase;
+ u32 admvec;
+ struct timeout_task timeout;
+ struct task aq_irq;
+ struct task aq_sched;
+ struct taskqueue *tq;
+
+ struct ixl_vsi vsi;
+
+ /* Mac Filter List */
+ struct mac_list *mac_filters;
+
+ /* Vlan Filter List */
+ struct vlan_list *vlan_filters;
+
+ /* Promiscuous mode */
+ u32 promiscuous_flags;
+
+ /* Admin queue task flags */
+ u32 aq_wait_count;
+ u32 aq_required;
+ u32 aq_pending;
+
+ /* Virtual comm channel */
+ enum i40e_virtchnl_ops current_op;
+ struct i40e_virtchnl_vf_resource *vf_res;
+ struct i40e_virtchnl_vsi_resource *vsi_res;
+
+ /* Misc stats maintained by the driver */
+ u64 watchdog_events;
+ u64 admin_irq;
+
+ /* Signaling channels */
+ u8 init_done;
+ u8 config_queues_done;
+ u8 map_vectors_done;
+ u8 enable_queues_done;
+ u8 disable_queues_done;
+ u8 add_ether_done;
+ u8 del_ether_done;
+};
+
+/*
+** This checks for a zero mac addr, something that will be likely
+** unless the Admin on the Host has created one.
+*/
+static inline bool
+ixlv_check_ether_addr(u8 *addr)
+{
+ bool status = TRUE;
+
+ if ((addr[0] == 0 && addr[1]== 0 && addr[2] == 0 &&
+ addr[3] == 0 && addr[4]== 0 && addr[5] == 0))
+ status = FALSE;
+ return (status);
+}
+
+/*
+** VF Common function prototypes
+*/
+int ixlv_send_api_ver(struct ixlv_sc *);
+int ixlv_verify_api_ver(struct ixlv_sc *);
+int ixlv_send_vf_config_msg(struct ixlv_sc *);
+int ixlv_get_vf_config(struct ixlv_sc *);
+void ixlv_init(void *);
+int ixlv_reinit_locked(struct ixlv_sc *);
+void ixlv_configure_queues(struct ixlv_sc *);
+void ixlv_enable_queues(struct ixlv_sc *);
+void ixlv_disable_queues(struct ixlv_sc *);
+void ixlv_map_queues(struct ixlv_sc *);
+void ixlv_enable_intr(struct ixl_vsi *);
+void ixlv_disable_intr(struct ixl_vsi *);
+void ixlv_add_ether_filters(struct ixlv_sc *);
+void ixlv_del_ether_filters(struct ixlv_sc *);
+void ixlv_request_stats(struct ixlv_sc *);
+void ixlv_request_reset(struct ixlv_sc *);
+void ixlv_vc_completion(struct ixlv_sc *,
+ enum i40e_virtchnl_ops, i40e_status, u8 *, u16);
+void ixlv_add_ether_filter(struct ixlv_sc *);
+void ixlv_add_vlans(struct ixlv_sc *);
+void ixlv_del_vlans(struct ixlv_sc *);
+void ixlv_update_stats_counters(struct ixlv_sc *,
+ struct i40e_eth_stats *);
+
+#endif /* _IXLV_H_ */
diff --git a/sys/dev/ixl/ixlvc.c b/sys/dev/ixl/ixlvc.c
new file mode 100644
index 0000000..1f912b5
--- /dev/null
+++ b/sys/dev/ixl/ixlvc.c
@@ -0,0 +1,976 @@
+/******************************************************************************
+
+ Copyright (c) 2013-2014, Intel Corporation
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ POSSIBILITY OF SUCH DAMAGE.
+
+******************************************************************************/
+/*$FreeBSD$*/
+
+/*
+** Virtual Channel support
+** These are support functions to communication
+** between the VF and PF drivers.
+*/
+
+#include "ixl.h"
+#include "ixlv.h"
+#include "i40e_prototype.h"
+
+
+/* busy wait delay in msec */
+#define IXLV_BUSY_WAIT_DELAY 10
+#define IXLV_BUSY_WAIT_COUNT 50
+
+/*
+** Validate VF messages
+*/
+static int ixl_vc_validate_vf_msg(struct ixlv_sc *sc, u32 v_opcode,
+ u8 *msg, u16 msglen)
+{
+ bool err_msg_format = false;
+ int valid_len;
+
+ /* Validate message length. */
+ switch (v_opcode) {
+ case I40E_VIRTCHNL_OP_VERSION:
+ valid_len = sizeof(struct i40e_virtchnl_version_info);
+ break;
+ case I40E_VIRTCHNL_OP_RESET_VF:
+ case I40E_VIRTCHNL_OP_GET_VF_RESOURCES:
+ valid_len = 0;
+ break;
+ case I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE:
+ valid_len = sizeof(struct i40e_virtchnl_txq_info);
+ break;
+ case I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE:
+ valid_len = sizeof(struct i40e_virtchnl_rxq_info);
+ break;
+ case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES:
+ valid_len = sizeof(struct i40e_virtchnl_vsi_queue_config_info);
+ if (msglen >= valid_len) {
+ struct i40e_virtchnl_vsi_queue_config_info *vqc =
+ (struct i40e_virtchnl_vsi_queue_config_info *)msg;
+ valid_len += (vqc->num_queue_pairs *
+ sizeof(struct
+ i40e_virtchnl_queue_pair_info));
+ if (vqc->num_queue_pairs == 0)
+ err_msg_format = true;
+ }
+ break;
+ case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP:
+ valid_len = sizeof(struct i40e_virtchnl_irq_map_info);
+ if (msglen >= valid_len) {
+ struct i40e_virtchnl_irq_map_info *vimi =
+ (struct i40e_virtchnl_irq_map_info *)msg;
+ valid_len += (vimi->num_vectors *
+ sizeof(struct i40e_virtchnl_vector_map));
+ if (vimi->num_vectors == 0)
+ err_msg_format = true;
+ }
+ break;
+ case I40E_VIRTCHNL_OP_ENABLE_QUEUES:
+ case I40E_VIRTCHNL_OP_DISABLE_QUEUES:
+ valid_len = sizeof(struct i40e_virtchnl_queue_select);
+ break;
+ case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS:
+ case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS:
+ valid_len = sizeof(struct i40e_virtchnl_ether_addr_list);
+ if (msglen >= valid_len) {
+ struct i40e_virtchnl_ether_addr_list *veal =
+ (struct i40e_virtchnl_ether_addr_list *)msg;
+ valid_len += veal->num_elements *
+ sizeof(struct i40e_virtchnl_ether_addr);
+ if (veal->num_elements == 0)
+ err_msg_format = true;
+ }
+ break;
+ case I40E_VIRTCHNL_OP_ADD_VLAN:
+ case I40E_VIRTCHNL_OP_DEL_VLAN:
+ valid_len = sizeof(struct i40e_virtchnl_vlan_filter_list);
+ if (msglen >= valid_len) {
+ struct i40e_virtchnl_vlan_filter_list *vfl =
+ (struct i40e_virtchnl_vlan_filter_list *)msg;
+ valid_len += vfl->num_elements * sizeof(u16);
+ if (vfl->num_elements == 0)
+ err_msg_format = true;
+ }
+ break;
+ case I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
+ valid_len = sizeof(struct i40e_virtchnl_promisc_info);
+ break;
+ case I40E_VIRTCHNL_OP_GET_STATS:
+ valid_len = sizeof(struct i40e_virtchnl_queue_select);
+ break;
+ /* These are always errors coming from the VF. */
+ case I40E_VIRTCHNL_OP_EVENT:
+ case I40E_VIRTCHNL_OP_UNKNOWN:
+ default:
+ return EPERM;
+ break;
+ }
+ /* few more checks */
+ if ((valid_len != msglen) || (err_msg_format))
+ return EINVAL;
+ else
+ return 0;
+}
+
+/*
+** ixlv_send_pf_msg
+**
+** Send message to PF and print status if failure.
+*/
+static int
+ixlv_send_pf_msg(struct ixlv_sc *sc,
+ enum i40e_virtchnl_ops op, u8 *msg, u16 len)
+{
+ struct i40e_hw *hw = &sc->hw;
+ device_t dev = sc->dev;
+ i40e_status err;
+ int val_err;
+
+ /*
+ ** Pre-validating messages to the PF, this might be
+ ** removed for performance later?
+ */
+ val_err = ixl_vc_validate_vf_msg(sc, op, msg, len);
+ if (val_err)
+ device_printf(dev, "Error validating msg to PF for op %d,"
+ " msglen %d: error %d\n", op, len, val_err);
+
+ err = i40e_aq_send_msg_to_pf(hw, op, I40E_SUCCESS, msg, len, NULL);
+ if (err)
+ device_printf(dev, "Unable to send opcode %d to PF, "
+ "error %d, aq status %d\n", op, err, hw->aq.asq_last_status);
+ return err;
+}
+
+
+/*
+** ixlv_send_api_ver
+**
+** Send API version admin queue message to the PF. The reply is not checked
+** in this function. Returns 0 if the message was successfully
+** sent, or one of the I40E_ADMIN_QUEUE_ERROR_ statuses if not.
+*/
+int
+ixlv_send_api_ver(struct ixlv_sc *sc)
+{
+ struct i40e_virtchnl_version_info vvi;
+
+ vvi.major = I40E_VIRTCHNL_VERSION_MAJOR;
+ vvi.minor = I40E_VIRTCHNL_VERSION_MINOR;
+
+ return ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_VERSION,
+ (u8 *)&vvi, sizeof(vvi));
+}
+
+/*
+** ixlv_verify_api_ver
+**
+** Compare API versions with the PF. Must be called after admin queue is
+** initialized. Returns 0 if API versions match, EIO if
+** they do not, or I40E_ERR_ADMIN_QUEUE_NO_WORK if the admin queue is empty.
+*/
+int ixlv_verify_api_ver(struct ixlv_sc *sc)
+{
+ struct i40e_virtchnl_version_info *pf_vvi;
+ struct i40e_hw *hw = &sc->hw;
+ struct i40e_arq_event_info event;
+ i40e_status err;
+ int retries = 0;
+
+ event.buf_len = IXL_AQ_BUFSZ;
+ event.msg_buf = malloc(event.buf_len, M_DEVBUF, M_NOWAIT);
+ if (!event.msg_buf) {
+ err = ENOMEM;
+ goto out;
+ }
+
+ do {
+ if (++retries > IXLV_AQ_MAX_ERR)
+ goto out_alloc;
+
+ /* NOTE: initial delay is necessary */
+ i40e_msec_delay(100);
+ err = i40e_clean_arq_element(hw, &event, NULL);
+ } while (err == I40E_ERR_ADMIN_QUEUE_NO_WORK);
+ if (err)
+ goto out_alloc;
+
+ err = (i40e_status)le32toh(event.desc.cookie_low);
+ if (err) {
+ err = EIO;
+ goto out_alloc;
+ }
+
+ if ((enum i40e_virtchnl_ops)le32toh(event.desc.cookie_high) !=
+ I40E_VIRTCHNL_OP_VERSION) {
+ err = EIO;
+ goto out_alloc;
+ }
+
+ pf_vvi = (struct i40e_virtchnl_version_info *)event.msg_buf;
+ if ((pf_vvi->major != I40E_VIRTCHNL_VERSION_MAJOR) ||
+ (pf_vvi->minor != I40E_VIRTCHNL_VERSION_MINOR))
+ err = EIO;
+
+out_alloc:
+ free(event.msg_buf, M_DEVBUF);
+out:
+ return err;
+}
+
+/*
+** ixlv_send_vf_config_msg
+**
+** Send VF configuration request admin queue message to the PF. The reply
+** is not checked in this function. Returns 0 if the message was
+** successfully sent, or one of the I40E_ADMIN_QUEUE_ERROR_ statuses if not.
+*/
+int
+ixlv_send_vf_config_msg(struct ixlv_sc *sc)
+{
+ return ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_GET_VF_RESOURCES,
+ NULL, 0);
+}
+
+/*
+** ixlv_get_vf_config
+**
+** Get VF configuration from PF and populate hw structure. Must be called after
+** admin queue is initialized. Busy waits until response is received from PF,
+** with maximum timeout. Response from PF is returned in the buffer for further
+** processing by the caller.
+*/
+int
+ixlv_get_vf_config(struct ixlv_sc *sc)
+{
+ struct i40e_hw *hw = &sc->hw;
+ device_t dev = sc->dev;
+ struct i40e_arq_event_info event;
+ u16 len;
+ i40e_status err = 0;
+ u32 retries = 0;
+
+ /* Note this assumes a single VSI */
+ len = sizeof(struct i40e_virtchnl_vf_resource) +
+ sizeof(struct i40e_virtchnl_vsi_resource);
+ event.buf_len = len;
+ event.msg_buf = malloc(event.buf_len, M_DEVBUF, M_NOWAIT);
+ if (!event.msg_buf) {
+ err = ENOMEM;
+ goto out;
+ }
+
+ do {
+ err = i40e_clean_arq_element(hw, &event, NULL);
+ if (err == I40E_ERR_ADMIN_QUEUE_NO_WORK) {
+ if (++retries <= IXLV_AQ_MAX_ERR)
+ i40e_msec_delay(100);
+ } else if ((enum i40e_virtchnl_ops)le32toh(event.desc.cookie_high) !=
+ I40E_VIRTCHNL_OP_GET_VF_RESOURCES) {
+ device_printf(dev, "%s: Received a response from PF,"
+ " opcode %d, error %d\n", __func__,
+ le32toh(event.desc.cookie_high),
+ le32toh(event.desc.cookie_low));
+ retries++;
+ continue;
+ } else {
+ err = (i40e_status)le32toh(event.desc.cookie_low);
+ if (err) {
+ device_printf(dev, "%s: Error returned from PF,"
+ " opcode %d, error %d\n", __func__,
+ le32toh(event.desc.cookie_high),
+ le32toh(event.desc.cookie_low));
+ err = EIO;
+ goto out_alloc;
+ }
+ break;
+ }
+
+ if (retries > IXLV_AQ_MAX_ERR) {
+ INIT_DBG_DEV(dev, "Did not receive response after %d tries.",
+ retries);
+ goto out_alloc;
+ }
+
+ } while (err);
+
+ memcpy(sc->vf_res, event.msg_buf, min(event.msg_len, len));
+ i40e_vf_parse_hw_config(hw, sc->vf_res);
+
+out_alloc:
+ free(event.msg_buf, M_DEVBUF);
+out:
+ return err;
+}
+
+/*
+** ixlv_configure_queues
+**
+** Request that the PF set up our queues.
+*/
+void
+ixlv_configure_queues(struct ixlv_sc *sc)
+{
+ device_t dev = sc->dev;
+ struct ixl_vsi *vsi = &sc->vsi;
+ struct ixl_queue *que = vsi->queues;
+ struct tx_ring *txr;
+ struct rx_ring *rxr;
+ int len, pairs;;
+
+ struct i40e_virtchnl_vsi_queue_config_info *vqci;
+ struct i40e_virtchnl_queue_pair_info *vqpi;
+
+
+ if (sc->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
+ /* bail because we already have a command pending */
+#ifdef IXL_DEBUG
+ device_printf(dev, "%s: command %d pending\n",
+ __func__, sc->current_op);
+#endif
+ return;
+ }
+
+ pairs = vsi->num_queues;
+ sc->current_op = I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES;
+ len = sizeof(struct i40e_virtchnl_vsi_queue_config_info) +
+ (sizeof(struct i40e_virtchnl_queue_pair_info) * pairs);
+ vqci = malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO);
+ if (!vqci) {
+ device_printf(dev, "%s: unable to allocate memory\n", __func__);
+ return;
+ }
+ vqci->vsi_id = sc->vsi_res->vsi_id;
+ vqci->num_queue_pairs = pairs;
+ vqpi = vqci->qpair;
+ /* Size check is not needed here - HW max is 16 queue pairs, and we
+ * can fit info for 31 of them into the AQ buffer before it overflows.
+ */
+ for (int i = 0; i < pairs; i++, que++) {
+ txr = &que->txr;
+ rxr = &que->rxr;
+ vqpi->txq.vsi_id = vqci->vsi_id;
+ vqpi->txq.queue_id = i;
+ vqpi->txq.ring_len = que->num_desc;
+ vqpi->txq.dma_ring_addr = txr->dma.pa;
+ /* Enable Head writeback */
+ vqpi->txq.headwb_enabled = 1;
+ vqpi->txq.dma_headwb_addr = txr->dma.pa +
+ (que->num_desc * sizeof(struct i40e_tx_desc));
+
+ vqpi->rxq.vsi_id = vqci->vsi_id;
+ vqpi->rxq.queue_id = i;
+ vqpi->rxq.ring_len = que->num_desc;
+ vqpi->rxq.dma_ring_addr = rxr->dma.pa;
+ vqpi->rxq.max_pkt_size = vsi->max_frame_size;
+ vqpi->rxq.databuffer_size = rxr->mbuf_sz;
+ vqpi++;
+ }
+
+ ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES,
+ (u8 *)vqci, len);
+ free(vqci, M_DEVBUF);
+ sc->aq_pending |= IXLV_FLAG_AQ_CONFIGURE_QUEUES;
+ sc->aq_required &= ~IXLV_FLAG_AQ_CONFIGURE_QUEUES;
+}
+
+/*
+** ixlv_enable_queues
+**
+** Request that the PF enable all of our queues.
+*/
+void
+ixlv_enable_queues(struct ixlv_sc *sc)
+{
+ struct i40e_virtchnl_queue_select vqs;
+
+ if (sc->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
+ /* we already have a command pending */
+#ifdef IXL_DEBUG
+ device_printf(sc->dev, "%s: command %d pending\n",
+ __func__, sc->current_op);
+#endif
+ return;
+ }
+ sc->current_op = I40E_VIRTCHNL_OP_ENABLE_QUEUES;
+ vqs.vsi_id = sc->vsi_res->vsi_id;
+ vqs.tx_queues = (1 << sc->vsi_res->num_queue_pairs) - 1;
+ vqs.rx_queues = vqs.tx_queues;
+ ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_ENABLE_QUEUES,
+ (u8 *)&vqs, sizeof(vqs));
+ sc->aq_pending |= IXLV_FLAG_AQ_ENABLE_QUEUES;
+ sc->aq_required &= ~IXLV_FLAG_AQ_ENABLE_QUEUES;
+}
+
+/*
+** ixlv_disable_queues
+**
+** Request that the PF disable all of our queues.
+*/
+void
+ixlv_disable_queues(struct ixlv_sc *sc)
+{
+ struct i40e_virtchnl_queue_select vqs;
+
+ if (sc->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
+ /* we already have a command pending */
+#ifdef IXL_DEBUG
+ device_printf(sc->dev, "%s: command %d pending\n",
+ __func__, sc->current_op);
+#endif
+ return;
+ }
+ sc->current_op = I40E_VIRTCHNL_OP_DISABLE_QUEUES;
+ vqs.vsi_id = sc->vsi_res->vsi_id;
+ vqs.tx_queues = (1 << sc->vsi_res->num_queue_pairs) - 1;
+ vqs.rx_queues = vqs.tx_queues;
+ ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_DISABLE_QUEUES,
+ (u8 *)&vqs, sizeof(vqs));
+ sc->aq_pending |= IXLV_FLAG_AQ_DISABLE_QUEUES;
+ sc->aq_required &= ~IXLV_FLAG_AQ_DISABLE_QUEUES;
+}
+
+/*
+** ixlv_map_queues
+**
+** Request that the PF map queues to interrupt vectors. Misc causes, including
+** admin queue, are always mapped to vector 0.
+*/
+void
+ixlv_map_queues(struct ixlv_sc *sc)
+{
+ struct i40e_virtchnl_irq_map_info *vm;
+ int i, q, len;
+ struct ixl_vsi *vsi = &sc->vsi;
+ struct ixl_queue *que = vsi->queues;
+
+ if (sc->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
+ /* we already have a command pending */
+#ifdef IXL_DEBUG
+ device_printf(sc->dev, "%s: command %d pending\n",
+ __func__, sc->current_op);
+#endif
+ return;
+ }
+ sc->current_op = I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP;
+
+ /* How many queue vectors, adminq uses one */
+ q = sc->msix - 1;
+
+ len = sizeof(struct i40e_virtchnl_irq_map_info) +
+ (sc->msix * sizeof(struct i40e_virtchnl_vector_map));
+ vm = malloc(len, M_DEVBUF, M_NOWAIT);
+ if (!vm) {
+ printf("%s: unable to allocate memory\n", __func__);
+ return;
+ }
+
+ vm->num_vectors = sc->msix;
+ /* Queue vectors first */
+ for (i = 0; i < q; i++, que++) {
+ vm->vecmap[i].vsi_id = sc->vsi_res->vsi_id;
+ vm->vecmap[i].vector_id = i + 1; /* first is adminq */
+ vm->vecmap[i].txq_map = (1 << que->me);
+ vm->vecmap[i].rxq_map = (1 << que->me);
+ }
+
+ /* Misc vector last - this is only for AdminQ messages */
+ vm->vecmap[i].vsi_id = sc->vsi_res->vsi_id;
+ vm->vecmap[i].vector_id = 0;
+ vm->vecmap[i].txq_map = 0;
+ vm->vecmap[i].rxq_map = 0;
+
+ ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
+ (u8 *)vm, len);
+ free(vm, M_DEVBUF);
+ sc->aq_pending |= IXLV_FLAG_AQ_MAP_VECTORS;
+ sc->aq_required &= ~IXLV_FLAG_AQ_MAP_VECTORS;
+}
+
+/*
+** Scan the Filter List looking for vlans that need
+** to be added, then create the data to hand to the AQ
+** for handling.
+*/
+void
+ixlv_add_vlans(struct ixlv_sc *sc)
+{
+ struct i40e_virtchnl_vlan_filter_list *v;
+ struct ixlv_vlan_filter *f, *ftmp;
+ device_t dev = sc->dev;
+ int len, i = 0, cnt = 0;
+
+ if (sc->current_op != I40E_VIRTCHNL_OP_UNKNOWN)
+ return;
+
+ sc->current_op = I40E_VIRTCHNL_OP_ADD_VLAN;
+
+ /* Get count of VLAN filters to add */
+ SLIST_FOREACH(f, sc->vlan_filters, next) {
+ if (f->flags & IXL_FILTER_ADD)
+ cnt++;
+ }
+
+ if (!cnt) { /* no work... */
+ sc->aq_required &= ~IXLV_FLAG_AQ_ADD_VLAN_FILTER;
+ sc->current_op = I40E_VIRTCHNL_OP_UNKNOWN;
+ return;
+ }
+
+ len = sizeof(struct i40e_virtchnl_vlan_filter_list) +
+ (cnt * sizeof(u16));
+
+ if (len > IXL_AQ_BUF_SZ) {
+ device_printf(dev, "%s: Exceeded Max AQ Buf size\n",
+ __func__);
+ return;
+ }
+
+ v = malloc(len, M_DEVBUF, M_NOWAIT);
+ if (!v) {
+ device_printf(dev, "%s: unable to allocate memory\n",
+ __func__);
+ return;
+ }
+
+ v->vsi_id = sc->vsi_res->vsi_id;
+ v->num_elements = cnt;
+
+ /* Scan the filter array */
+ SLIST_FOREACH_SAFE(f, sc->vlan_filters, next, ftmp) {
+ if (f->flags & IXL_FILTER_ADD) {
+ bcopy(&f->vlan, &v->vlan_id[i], sizeof(u16));
+ f->flags = IXL_FILTER_USED;
+ i++;
+ }
+ if (i == cnt)
+ break;
+ }
+ if (i == 0) { /* Should not happen... */
+ device_printf(dev, "%s: i == 0?\n", __func__);
+ return;
+ }
+
+ ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_ADD_VLAN, (u8 *)v, len);
+ free(v, M_DEVBUF);
+ /* add stats? */
+ sc->aq_pending |= IXLV_FLAG_AQ_ADD_VLAN_FILTER;
+ sc->aq_required &= ~IXLV_FLAG_AQ_ADD_VLAN_FILTER;
+}
+
+/*
+** Scan the Filter Table looking for vlans that need
+** to be removed, then create the data to hand to the AQ
+** for handling.
+*/
+void
+ixlv_del_vlans(struct ixlv_sc *sc)
+{
+ device_t dev = sc->dev;
+ struct i40e_virtchnl_vlan_filter_list *v;
+ struct ixlv_vlan_filter *f, *ftmp;
+ int len, i = 0, cnt = 0;
+
+ if (sc->current_op != I40E_VIRTCHNL_OP_UNKNOWN)
+ return;
+
+ sc->current_op = I40E_VIRTCHNL_OP_DEL_VLAN;
+
+ /* Get count of VLAN filters to delete */
+ SLIST_FOREACH(f, sc->vlan_filters, next) {
+ if (f->flags & IXL_FILTER_DEL)
+ cnt++;
+ }
+
+ if (!cnt) { /* no work... */
+ sc->aq_required &= ~IXLV_FLAG_AQ_DEL_VLAN_FILTER;
+ sc->current_op = I40E_VIRTCHNL_OP_UNKNOWN;
+ return;
+ }
+
+ len = sizeof(struct i40e_virtchnl_vlan_filter_list) +
+ (cnt * sizeof(u16));
+
+ if (len > IXL_AQ_BUF_SZ) {
+ device_printf(dev, "%s: Exceeded Max AQ Buf size\n",
+ __func__);
+ return;
+ }
+
+ v = malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO);
+ if (!v) {
+ device_printf(dev, "%s: unable to allocate memory\n",
+ __func__);
+ return;
+ }
+
+ v->vsi_id = sc->vsi_res->vsi_id;
+ v->num_elements = cnt;
+
+ /* Scan the filter array */
+ SLIST_FOREACH_SAFE(f, sc->vlan_filters, next, ftmp) {
+ if (f->flags & IXL_FILTER_DEL) {
+ bcopy(&f->vlan, &v->vlan_id[i], sizeof(u16));
+ i++;
+ SLIST_REMOVE(sc->vlan_filters, f, ixlv_vlan_filter, next);
+ free(f, M_DEVBUF);
+ }
+ if (i == cnt)
+ break;
+ }
+ if (i == 0) { /* Should not happen... */
+ device_printf(dev, "%s: i == 0?\n", __func__);
+ return;
+ }
+
+ ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_DEL_VLAN, (u8 *)v, len);
+ free(v, M_DEVBUF);
+ /* add stats? */
+ sc->aq_pending |= IXLV_FLAG_AQ_DEL_VLAN_FILTER;
+ sc->aq_required &= ~IXLV_FLAG_AQ_DEL_VLAN_FILTER;
+}
+
+
+/*
+** This routine takes additions to the vsi filter
+** table and creates an Admin Queue call to create
+** the filters in the hardware.
+*/
+void
+ixlv_add_ether_filters(struct ixlv_sc *sc)
+{
+ struct i40e_virtchnl_ether_addr_list *a;
+ struct ixlv_mac_filter *f;
+ device_t dev = sc->dev;
+ int len, j = 0, cnt = 0;
+
+ if (sc->current_op != I40E_VIRTCHNL_OP_UNKNOWN)
+ return;
+
+ sc->current_op = I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS;
+
+ /* Get count of MAC addresses to add */
+ SLIST_FOREACH(f, sc->mac_filters, next) {
+ if (f->flags & IXL_FILTER_ADD)
+ cnt++;
+ }
+ if (cnt == 0) { /* Should not happen... */
+ DDPRINTF(dev, "cnt == 0, exiting...");
+ sc->current_op = I40E_VIRTCHNL_OP_UNKNOWN;
+ sc->aq_required &= ~IXLV_FLAG_AQ_ADD_MAC_FILTER;
+ wakeup(&sc->add_ether_done);
+ return;
+ }
+
+ len = sizeof(struct i40e_virtchnl_ether_addr_list) +
+ (cnt * sizeof(struct i40e_virtchnl_ether_addr));
+
+ a = malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO);
+ if (a == NULL) {
+ device_printf(dev, "%s: Failed to get memory for "
+ "virtchnl_ether_addr_list\n", __func__);
+ return;
+ }
+ a->vsi_id = sc->vsi.id;
+ a->num_elements = cnt;
+
+ /* Scan the filter array */
+ SLIST_FOREACH(f, sc->mac_filters, next) {
+ if (f->flags & IXL_FILTER_ADD) {
+ bcopy(f->macaddr, a->list[j].addr, ETHER_ADDR_LEN);
+ f->flags &= ~IXL_FILTER_ADD;
+ j++;
+
+ DDPRINTF(dev, "ADD: " MAC_FORMAT,
+ MAC_FORMAT_ARGS(f->macaddr));
+ }
+ if (j == cnt)
+ break;
+ }
+ DDPRINTF(dev, "len %d, j %d, cnt %d",
+ len, j, cnt);
+ ixlv_send_pf_msg(sc,
+ I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS, (u8 *)a, len);
+ /* add stats? */
+ free(a, M_DEVBUF);
+ sc->aq_pending |= IXLV_FLAG_AQ_ADD_MAC_FILTER;
+ sc->aq_required &= ~IXLV_FLAG_AQ_ADD_MAC_FILTER;
+ return;
+}
+
+/*
+** This routine takes filters flagged for deletion in the
+** sc MAC filter list and creates an Admin Queue call
+** to delete those filters in the hardware.
+*/
+void
+ixlv_del_ether_filters(struct ixlv_sc *sc)
+{
+ struct i40e_virtchnl_ether_addr_list *d;
+ device_t dev = sc->dev;
+ struct ixlv_mac_filter *f, *f_temp;
+ int len, j = 0, cnt = 0;
+
+ if (sc->current_op != I40E_VIRTCHNL_OP_UNKNOWN)
+ return;
+
+ sc->current_op = I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS;
+
+ /* Get count of MAC addresses to delete */
+ SLIST_FOREACH(f, sc->mac_filters, next) {
+ if (f->flags & IXL_FILTER_DEL)
+ cnt++;
+ }
+ if (cnt == 0) {
+ DDPRINTF(dev, "cnt == 0, exiting...");
+ sc->aq_required &= ~IXLV_FLAG_AQ_DEL_MAC_FILTER;
+ sc->current_op = I40E_VIRTCHNL_OP_UNKNOWN;
+ wakeup(&sc->del_ether_done);
+ return;
+ }
+
+ len = sizeof(struct i40e_virtchnl_ether_addr_list) +
+ (cnt * sizeof(struct i40e_virtchnl_ether_addr));
+
+ d = malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO);
+ if (d == NULL) {
+ device_printf(dev, "%s: Failed to get memory for "
+ "virtchnl_ether_addr_list\n", __func__);
+ return;
+ }
+ d->vsi_id = sc->vsi.id;
+ d->num_elements = cnt;
+
+ /* Scan the filter array */
+ SLIST_FOREACH_SAFE(f, sc->mac_filters, next, f_temp) {
+ if (f->flags & IXL_FILTER_DEL) {
+ bcopy(f->macaddr, d->list[j].addr, ETHER_ADDR_LEN);
+ DDPRINTF(dev, "DEL: " MAC_FORMAT,
+ MAC_FORMAT_ARGS(f->macaddr));
+ j++;
+ SLIST_REMOVE(sc->mac_filters, f, ixlv_mac_filter, next);
+ free(f, M_DEVBUF);
+ }
+ if (j == cnt)
+ break;
+ }
+ ixlv_send_pf_msg(sc,
+ I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS, (u8 *)d, len);
+ /* add stats? */
+ free(d, M_DEVBUF);
+ sc->aq_pending |= IXLV_FLAG_AQ_DEL_MAC_FILTER;
+ sc->aq_required &= ~IXLV_FLAG_AQ_DEL_MAC_FILTER;
+ return;
+}
+
+/*
+** ixlv_request_reset
+** Request that the PF reset this VF. No response is expected.
+*/
+void
+ixlv_request_reset(struct ixlv_sc *sc)
+{
+ /*
+ ** Set the reset status to "in progress" before
+ ** the request, this avoids any possibility of
+ ** a mistaken early detection of completion.
+ */
+ wr32(&sc->hw, I40E_VFGEN_RSTAT, I40E_VFR_INPROGRESS);
+ ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_RESET_VF, NULL, 0);
+ sc->current_op = I40E_VIRTCHNL_OP_UNKNOWN;
+}
+
+/*
+** ixlv_request_stats
+** Request the statistics for this VF's VSI from PF.
+*/
+void
+ixlv_request_stats(struct ixlv_sc *sc)
+{
+ struct i40e_virtchnl_queue_select vqs;
+ int error = 0;
+
+ if (sc->current_op != I40E_VIRTCHNL_OP_UNKNOWN)
+ return;
+
+ sc->current_op = I40E_VIRTCHNL_OP_GET_STATS;
+ vqs.vsi_id = sc->vsi_res->vsi_id;
+ error = ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_GET_STATS,
+ (u8 *)&vqs, sizeof(vqs));
+ /* Low priority, ok if it fails */
+ if (error)
+ sc->current_op = I40E_VIRTCHNL_OP_UNKNOWN;
+}
+
+/*
+** Updates driver's stats counters with VSI stats returned from PF.
+*/
+void
+ixlv_update_stats_counters(struct ixlv_sc *sc, struct i40e_eth_stats *es)
+{
+ struct ifnet *ifp = sc->vsi.ifp;
+
+ ifp->if_ipackets = es->rx_unicast +
+ es->rx_multicast +
+ es->rx_broadcast;
+ ifp->if_opackets = es->tx_unicast +
+ es->tx_multicast +
+ es->tx_broadcast;
+ ifp->if_ibytes = es->rx_bytes;
+ ifp->if_obytes = es->tx_bytes;
+ ifp->if_imcasts = es->rx_multicast;
+ ifp->if_omcasts = es->tx_multicast;
+
+ ifp->if_oerrors = es->tx_errors;
+ ifp->if_iqdrops = es->rx_discards;
+ ifp->if_noproto = es->rx_unknown_protocol;
+
+ sc->vsi.eth_stats = *es;
+}
+
+/*
+** ixlv_vc_completion
+**
+** Asynchronous completion function for admin queue messages. Rather than busy
+** wait, we fire off our requests and assume that no errors will be returned.
+** This function handles the reply messages.
+*/
+void
+ixlv_vc_completion(struct ixlv_sc *sc,
+ enum i40e_virtchnl_ops v_opcode,
+ i40e_status v_retval, u8 *msg, u16 msglen)
+{
+ device_t dev = sc->dev;
+ struct ixl_vsi *vsi = &sc->vsi;
+
+ if (v_opcode == I40E_VIRTCHNL_OP_EVENT) {
+ struct i40e_virtchnl_pf_event *vpe =
+ (struct i40e_virtchnl_pf_event *)msg;
+
+ switch (vpe->event) {
+ case I40E_VIRTCHNL_EVENT_LINK_CHANGE:
+ vsi->link_up =
+ vpe->event_data.link_event.link_status;
+ vsi->link_speed =
+ vpe->event_data.link_event.link_speed;
+ break;
+ case I40E_VIRTCHNL_EVENT_RESET_IMPENDING:
+ device_printf(dev, "PF initiated reset!\n");
+ sc->init_state = IXLV_RESET_PENDING;
+ ixlv_init(sc);
+ break;
+ default:
+ device_printf(dev, "%s: Unknown event %d from AQ\n",
+ __func__, vpe->event);
+ break;
+ }
+
+ return;
+ }
+
+ if (v_opcode != sc->current_op
+ && sc->current_op != I40E_VIRTCHNL_OP_GET_STATS) {
+ device_printf(dev, "%s: Pending op is %d, received %d.\n",
+ __func__, sc->current_op, v_opcode);
+ sc->current_op = I40E_VIRTCHNL_OP_UNKNOWN;
+ return;
+ }
+
+ /* Catch-all error response */
+ if (v_retval) {
+ device_printf(dev,
+ "%s: AQ returned error %d to our request %d!\n",
+ __func__, v_retval, v_opcode);
+ }
+
+#ifdef IXL_DEBUG
+ if (v_opcode != I40E_VIRTCHNL_OP_GET_STATS)
+ DDPRINTF(dev, "opcode %d", v_opcode);
+#endif
+
+ switch (v_opcode) {
+ case I40E_VIRTCHNL_OP_GET_STATS:
+ ixlv_update_stats_counters(sc, (struct i40e_eth_stats *)msg);
+ break;
+ case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS:
+ sc->aq_pending &= ~(IXLV_FLAG_AQ_ADD_MAC_FILTER);
+ if (v_retval) {
+ device_printf(dev, "WARNING: Error adding VF mac filter!\n");
+ device_printf(dev, "WARNING: Device may not receive traffic!\n");
+ }
+ break;
+ case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS:
+ sc->aq_pending &= ~(IXLV_FLAG_AQ_DEL_MAC_FILTER);
+ break;
+ case I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
+ sc->aq_pending &= ~(IXLV_FLAG_AQ_CONFIGURE_PROMISC);
+ break;
+ case I40E_VIRTCHNL_OP_ADD_VLAN:
+ sc->aq_pending &= ~(IXLV_FLAG_AQ_ADD_VLAN_FILTER);
+ break;
+ case I40E_VIRTCHNL_OP_DEL_VLAN:
+ sc->aq_pending &= ~(IXLV_FLAG_AQ_DEL_VLAN_FILTER);
+ break;
+ case I40E_VIRTCHNL_OP_ENABLE_QUEUES:
+ sc->aq_pending &= ~(IXLV_FLAG_AQ_ENABLE_QUEUES);
+ if (v_retval == 0) {
+ /* Turn on all interrupts */
+ ixlv_enable_intr(vsi);
+ /* And inform the stack we're ready */
+ vsi->ifp->if_drv_flags |= IFF_DRV_RUNNING;
+ vsi->ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
+ }
+ break;
+ case I40E_VIRTCHNL_OP_DISABLE_QUEUES:
+ sc->aq_pending &= ~(IXLV_FLAG_AQ_DISABLE_QUEUES);
+ if (v_retval == 0) {
+ /* Turn off all interrupts */
+ ixlv_disable_intr(vsi);
+ /* Tell the stack that the interface is no longer active */
+ vsi->ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
+ }
+ break;
+ case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES:
+ sc->aq_pending &= ~(IXLV_FLAG_AQ_CONFIGURE_QUEUES);
+ break;
+ case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP:
+ sc->aq_pending &= ~(IXLV_FLAG_AQ_MAP_VECTORS);
+ break;
+ default:
+ device_printf(dev,
+ "%s: Received unexpected message %d from PF.\n",
+ __func__, v_opcode);
+ break;
+ }
+ sc->current_op = I40E_VIRTCHNL_OP_UNKNOWN;
+ return;
+}
OpenPOWER on IntegriCloud