summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--share/man/man4/Makefile1
-rw-r--r--share/man/man4/altq.41
-rw-r--r--share/man/man4/cxgbe.4167
-rw-r--r--share/man/man4/vlan.41
-rw-r--r--sys/conf/NOTES3
-rw-r--r--sys/conf/files6
-rw-r--r--sys/conf/kern.pre.mk4
-rw-r--r--sys/dev/cxgbe/adapter.h569
-rw-r--r--sys/dev/cxgbe/common/common.h517
-rw-r--r--sys/dev/cxgbe/common/t4_hw.c4590
-rw-r--r--sys/dev/cxgbe/common/t4_hw.h185
-rw-r--r--sys/dev/cxgbe/common/t4_msg.h2138
-rw-r--r--sys/dev/cxgbe/common/t4_regs.h23972
-rw-r--r--sys/dev/cxgbe/common/t4_regs_values.h192
-rw-r--r--sys/dev/cxgbe/common/t4_tcb.h753
-rw-r--r--sys/dev/cxgbe/common/t4fw_interface.h5392
-rw-r--r--sys/dev/cxgbe/offload.h86
-rw-r--r--sys/dev/cxgbe/osdep.h153
-rw-r--r--sys/dev/cxgbe/t4_ioctl.h58
-rw-r--r--sys/dev/cxgbe/t4_main.c2747
-rw-r--r--sys/dev/cxgbe/t4_sge.c2392
-rw-r--r--sys/modules/Makefile1
-rw-r--r--sys/modules/cxgbe/Makefile16
-rw-r--r--usr.sbin/sysinstall/devices.c1
24 files changed, 43943 insertions, 2 deletions
diff --git a/share/man/man4/Makefile b/share/man/man4/Makefile
index 91a421b..399460b 100644
--- a/share/man/man4/Makefile
+++ b/share/man/man4/Makefile
@@ -83,6 +83,7 @@ MAN= aac.4 \
crypto.4 \
cue.4 \
cxgb.4 \
+ cxgbe.4 \
cy.4 \
da.4 \
dc.4 \
diff --git a/share/man/man4/altq.4 b/share/man/man4/altq.4
index 01a3b2f..7cfced4 100644
--- a/share/man/man4/altq.4
+++ b/share/man/man4/altq.4
@@ -127,6 +127,7 @@ They have been applied to the following hardware drivers:
.Xr bfe 4 ,
.Xr bge 4 ,
.Xr cas 4 ,
+.Xr cxgbe 4 ,
.Xr dc 4 ,
.Xr de 4 ,
.Xr ed 4 ,
diff --git a/share/man/man4/cxgbe.4 b/share/man/man4/cxgbe.4
new file mode 100644
index 0000000..467a15b
--- /dev/null
+++ b/share/man/man4/cxgbe.4
@@ -0,0 +1,167 @@
+.\" Copyright (c) 2011, Chelsio Inc
+.\" All rights reserved.
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions are met:
+.\"
+.\" 1. Redistributions of source code must retain the above copyright notice,
+.\" this list of conditions and the following disclaimer.
+.\"
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\"
+.\" 3. Neither the name of the Chelsio Inc nor the names of its
+.\" contributors may be used to endorse or promote products derived from
+.\" this software without specific prior written permission.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+.\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+.\" ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+.\" LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+.\" CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+.\" SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+.\" INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+.\" CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+.\" ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+.\" POSSIBILITY OF SUCH DAMAGE.
+.\"
+.\" * Other names and brands may be claimed as the property of others.
+.\"
+.\" $FreeBSD$
+.\"
+.Dd February 14, 2011
+.Dt CXGBE 4
+.Os
+.Sh NAME
+.Nm cxgbe
+.Nd "Chelsio T4 10Gb and 1Gb Ethernet adapter driver"
+.Sh SYNOPSIS
+To compile this driver into the kernel,
+place the following lines in your
+kernel configuration file:
+.Bd -ragged -offset indent
+.Cd "device cxgbe"
+.Ed
+.Pp
+To load the driver as a
+module at boot time, place the following line in
+.Xr loader.conf 5 :
+.Bd -literal -offset indent
+if_cxgbe_load="YES"
+.Ed
+.Sh DESCRIPTION
+The
+.Nm
+driver provides support for PCI Express Ethernet adapters based on
+the Chelsio Terminator 4 (T4) ASIC.
+The driver supprts Jumbo Frames, Transmit/Receive checksum offload,
+TCP segmentation offload (TSO), Large Receive Offload (LRO), VLAN
+tag insertion/extraction, VLAN checksum offload, VLAN TSO, and
+Receive Side Steering (RSS).
+
+For further hardware information and questions related to hardware
+requirements, see
+.Pa http://www.chelsio.com/ .
+.Pp
+For more information on configuring this device, see
+.Xr ifconfig 8 .
+.Sh HARDWARE
+The
+.Nm
+driver supports 10Gb and 1Gb Ethernet adapters based on the T4 ASIC:
+.Pp
+.Bl -bullet -compact
+.It
+Chelsio T420-CR
+.It
+Chelsio T422-CR
+.It
+Chelsio T440-CR
+.It
+Chelsio T420-BCH
+.It
+Chelsio T440-BCH
+.It
+Chelsio T440-CH
+.It
+Chelsio T420-SO
+.It
+Chelsio T420-CX
+.It
+Chelsio T420-BT
+.It
+Chelsio T404-BT
+.El
+.Sh LOADER TUNABLES
+Tunables can be set at the
+.Xr loader 8
+prompt before booting the kernel or stored in
+.Xr loader.conf 5 .
+.Bl -tag -width indent
+.It Va hw.cxgbe.max_ntxq_10G_port
+The maximum number of tx queues to use for a 10Gb port.
+The default value is 8.
+.It Va hw.cxgbe.max_nrxq_10G_port
+The maximum number of rx queues to use for a 10Gb port.
+The default value is 8.
+.It Va hw.cxgbe.max_ntxq_1G_port
+The maximum number of tx queues to use for a 1Gb port.
+The default value is 2.
+.It Va hw.cxgbe.max_nrxq_1G_port
+The maximum number of rx queues to use for a 1Gb port.
+The default value is 2.
+.It Va hw.cxgbe.holdoff_timer_idx_10G
+.It Va hw.cxgbe.holdoff_timer_idx_1G
+The timer index value to use to delay interrupts.
+The holdoff timer list has the values 1, 5, 10, 50, 100, and 200
+by default (all values are in microseconds) and the index selects a
+value from this list.
+The default value is 1 for both 10Gb and 1Gb ports, which means the
+timer value is 5us.
+.It Va hw.cxgbe.holdoff_pktc_idx_10G
+.It Va hw.cxgbe.holdoff_pktc_idx_1G
+The packet-count index value to use to delay interrupts.
+The packet-count list has the values 1, 8, 16, and 32 by default
+and the index selects a value from this list.
+The default value is 2 for both 10Gb and 1Gb ports, which means 16
+packets (or the holdoff timer going off) before an interrupt is
+generated.
+.It Va hw.cxgbe.qsize_txq
+The size, in number of entries, of the descriptor ring used for a tx
+queue.
+A buf_ring of the same size is also allocated for additional
+software queuing. See
+.Xr ifnet 9 .
+The default value is 1024.
+.It Va hw.cxgbe.qsize_rxq
+The size, in number of entries, of the descriptor ring used for an
+rx queue.
+The default value is 1024.
+.Sh SUPPORT
+For general information and support,
+go to the Chelsio support website at:
+.Pa http://www.chelsio.com/ .
+.Pp
+If an issue is identified with this driver with a supported adapter,
+email all the specific information related to the issue to
+.Aq support@chelsio.com .
+.Sh SEE ALSO
+.Xr altq 4 ,
+.Xr arp 4 ,
+.Xr cxgb 4 ,
+.Xr netintro 4 ,
+.Xr ng_ether 4 ,
+.Xr ifconfig 8
+.Sh HISTORY
+The
+.Nm
+device driver first appeared in
+.Fx 9.0
+.Sh AUTHORS
+.An -nosplit
+The
+.Nm
+driver was written by
+.An Navdeep Parhar Aq np@FreeBSD.org .
diff --git a/share/man/man4/vlan.4 b/share/man/man4/vlan.4
index 43e8ab5..e0e6152 100644
--- a/share/man/man4/vlan.4
+++ b/share/man/man4/vlan.4
@@ -128,6 +128,7 @@ in the hardware is limited to the following devices:
.Xr bce 4 ,
.Xr bge 4 ,
.Xr cxgb 4 ,
+.Xr cxgbe 4 ,
.Xr em 4 ,
.Xr igb 4 ,
.Xr ixgb 4 ,
diff --git a/sys/conf/NOTES b/sys/conf/NOTES
index 74754f5..462894f 100644
--- a/sys/conf/NOTES
+++ b/sys/conf/NOTES
@@ -1902,6 +1902,8 @@ device xmphy # XaQti XMAC II
# cas: Sun Cassini/Cassini+ and National Semiconductor DP83065 Saturn
# cm: Arcnet SMC COM90c26 / SMC COM90c56
# (and SMC COM90c66 in '56 compatibility mode) adapters.
+# cxgbe: Support for PCI express 10Gb/1Gb adapters based on the Chelsio T4
+# (Terminator 4) ASIC.
# dc: Support for PCI fast ethernet adapters based on the DEC/Intel 21143
# and various workalikes including:
# the ADMtek AL981 Comet and AN985 Centaur, the ASIX Electronics
@@ -2073,6 +2075,7 @@ device wb # Winbond W89C840F
device xl # 3Com 3c90x (``Boomerang'', ``Cyclone'')
# PCI Ethernet NICs.
+device cxgbe # Chelsio T4 10GbE PCIe adapter
device de # DEC/Intel DC21x4x (``Tulip'')
device em # Intel Pro/1000 Gigabit Ethernet
device igb # Intel Pro/1000 PCIE Gigabit Ethernet
diff --git a/sys/conf/files b/sys/conf/files
index 8e47500..86ab783 100644
--- a/sys/conf/files
+++ b/sys/conf/files
@@ -853,6 +853,12 @@ dev/cxgb/sys/uipc_mvec.c optional cxgb pci \
compile-with "${NORMAL_C} -I$S/dev/cxgb"
dev/cxgb/cxgb_t3fw.c optional cxgb cxgb_t3fw \
compile-with "${NORMAL_C} -I$S/dev/cxgb"
+dev/cxgbe/t4_main.c optional cxgbe pci \
+ compile-with "${NORMAL_C} -I$S/dev/cxgbe"
+dev/cxgbe/t4_sge.c optional cxgbe pci \
+ compile-with "${NORMAL_C} -I$S/dev/cxgbe"
+dev/cxgbe/common/t4_hw.c optional cxgbe pci \
+ compile-with "${NORMAL_C} -I$S/dev/cxgbe"
dev/cy/cy.c optional cy
dev/cy/cy_isa.c optional cy isa
dev/cy/cy_pci.c optional cy pci
diff --git a/sys/conf/kern.pre.mk b/sys/conf/kern.pre.mk
index b10d8d7..818787e 100644
--- a/sys/conf/kern.pre.mk
+++ b/sys/conf/kern.pre.mk
@@ -82,8 +82,8 @@ INCLUDES+= -I$S/dev/twa
# ... and XFS
INCLUDES+= -I$S/gnu/fs/xfs/FreeBSD -I$S/gnu/fs/xfs/FreeBSD/support -I$S/gnu/fs/xfs
-# ... and the same for cxgb
-INCLUDES+= -I$S/dev/cxgb
+# ... and the same for cxgb and cxgbe
+INCLUDES+= -I$S/dev/cxgb -I$S/dev/cxgbe
.endif
diff --git a/sys/dev/cxgbe/adapter.h b/sys/dev/cxgbe/adapter.h
new file mode 100644
index 0000000..c0da697
--- /dev/null
+++ b/sys/dev/cxgbe/adapter.h
@@ -0,0 +1,569 @@
+/*-
+ * Copyright (c) 2011 Chelsio Communications, Inc.
+ * All rights reserved.
+ * Written by: Navdeep Parhar <np@FreeBSD.org>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ *
+ */
+
+#ifndef __T4_ADAPTER_H__
+#define __T4_ADAPTER_H__
+
+#include <sys/bus.h>
+#include <sys/rman.h>
+#include <sys/types.h>
+#include <sys/malloc.h>
+#include <dev/pci/pcivar.h>
+#include <dev/pci/pcireg.h>
+#include <machine/bus.h>
+#include <sys/socket.h>
+#include <sys/sysctl.h>
+#include <net/ethernet.h>
+#include <net/if.h>
+#include <net/if_media.h>
+#include <netinet/tcp_lro.h>
+
+#include "offload.h"
+#include "common/t4fw_interface.h"
+
+#define T4_FWNAME "t4fw"
+
+MALLOC_DECLARE(M_CXGBE);
+#define CXGBE_UNIMPLEMENTED(s) \
+ panic("%s (%s, line %d) not implemented yet.", s, __FILE__, __LINE__)
+
+#if defined(__i386__) || defined(__amd64__)
+static __inline void
+prefetch(void *x)
+{
+ __asm volatile("prefetcht0 %0" :: "m" (*(unsigned long *)x));
+}
+#else
+#define prefetch(x)
+#endif
+
+#ifdef __amd64__
+/* XXX: need systemwide bus_space_read_8/bus_space_write_8 */
+static __inline uint64_t
+t4_bus_space_read_8(bus_space_tag_t tag, bus_space_handle_t handle,
+ bus_size_t offset)
+{
+ KASSERT(tag == X86_BUS_SPACE_IO,
+ ("64-bit reads from I/O space not possible."));
+
+ return (*(volatile uint64_t *)(handle + offset));
+}
+
+static __inline void
+t4_bus_space_write_8(bus_space_tag_t tag, bus_space_handle_t bsh,
+ bus_size_t offset, uint64_t value)
+{
+ KASSERT(tag == X86_BUS_SPACE_IO,
+ ("64-bit writes to I/O space not possible."));
+ *(volatile uint64_t *)(bsh + offset) = value;
+}
+#else
+static __inline uint64_t
+t4_bus_space_read_8(bus_space_tag_t tag, bus_space_handle_t handle,
+ bus_size_t offset)
+{
+ return (uint64_t)bus_space_read_4(tag, handle, offset) +
+ ((uint64_t)bus_space_read_4(tag, handle, offset + 4) << 32);
+}
+
+static __inline void
+t4_bus_space_write_8(bus_space_tag_t tag, bus_space_handle_t bsh,
+ bus_size_t offset, uint64_t value)
+{
+ bus_space_write_4(tag, bsh, offset, value);
+ bus_space_write_4(tag, bsh, offset + 4, value >> 32);
+}
+#endif
+
+struct adapter;
+typedef struct adapter adapter_t;
+
+enum {
+ FW_IQ_QSIZE = 256,
+ FW_IQ_ESIZE = 64, /* At least 64 mandated by the firmware spec */
+
+ RX_IQ_QSIZE = 1024,
+ RX_IQ_ESIZE = 64, /* At least 64 so CPL_RX_PKT will fit */
+
+ RX_FL_ESIZE = 64, /* 8 64bit addresses */
+
+ FL_BUF_SIZES = 4,
+
+ TX_EQ_QSIZE = 1024,
+ TX_EQ_ESIZE = 64,
+ TX_SGL_SEGS = 36,
+ TX_WR_FLITS = SGE_MAX_WR_LEN / 8
+};
+
+enum {
+ /* adapter flags */
+ FULL_INIT_DONE = (1 << 0),
+ FW_OK = (1 << 1),
+ INTR_FWD = (1 << 2),
+
+ CXGBE_BUSY = (1 << 9),
+
+ /* port flags */
+ DOOMED = (1 << 0),
+ VI_ENABLED = (1 << 1),
+};
+
+#define IS_DOOMED(pi) (pi->flags & DOOMED)
+#define SET_DOOMED(pi) do {pi->flags |= DOOMED;} while (0)
+#define IS_BUSY(sc) (sc->flags & CXGBE_BUSY)
+#define SET_BUSY(sc) do {sc->flags |= CXGBE_BUSY;} while (0)
+#define CLR_BUSY(sc) do {sc->flags &= ~CXGBE_BUSY;} while (0)
+
+struct port_info {
+ device_t dev;
+ struct adapter *adapter;
+
+ struct ifnet *ifp;
+ struct ifmedia media;
+
+ struct mtx pi_lock;
+ char lockname[16];
+ unsigned long flags;
+ int if_flags;
+
+ uint16_t viid;
+ int16_t xact_addr_filt;/* index of exact MAC address filter */
+ uint16_t rss_size; /* size of VI's RSS table slice */
+ uint8_t lport; /* associated offload logical port */
+ int8_t mdio_addr;
+ uint8_t port_type;
+ uint8_t mod_type;
+ uint8_t port_id;
+ uint8_t tx_chan;
+
+ /* These need to be int as they are used in sysctl */
+ int ntxq; /* # of tx queues */
+ int first_txq; /* index of first tx queue */
+ int nrxq; /* # of rx queues */
+ int first_rxq; /* index of first rx queue */
+ int tmr_idx;
+ int pktc_idx;
+ int qsize_rxq;
+ int qsize_txq;
+
+ struct link_config link_cfg;
+ struct port_stats stats;
+
+ struct callout tick;
+ struct sysctl_ctx_list ctx; /* lives from ifconfig up to down */
+ struct sysctl_oid *oid_rxq;
+ struct sysctl_oid *oid_txq;
+
+ uint8_t hw_addr[ETHER_ADDR_LEN]; /* factory MAC address, won't change */
+};
+
+struct fl_sdesc {
+ struct mbuf *m;
+ bus_dmamap_t map;
+ caddr_t cl;
+ uint8_t tag_idx; /* the sc->fl_tag this map comes from */
+#ifdef INVARIANTS
+ __be64 ba_tag;
+#endif
+};
+
+struct tx_desc {
+ __be64 flit[8];
+};
+
+struct tx_map {
+ struct mbuf *m;
+ bus_dmamap_t map;
+};
+
+struct tx_sdesc {
+ uint8_t desc_used; /* # of hardware descriptors used by the WR */
+ uint8_t map_used; /* # of frames sent out in the WR */
+};
+
+typedef void (iq_intr_handler_t)(void *);
+
+enum {
+ /* iq flags */
+ IQ_ALLOCATED = (1 << 1), /* firmware resources allocated */
+ IQ_STARTED = (1 << 2), /* started */
+};
+
+/*
+ * Ingress Queue: T4 is producer, driver is consumer.
+ */
+struct sge_iq {
+ bus_dma_tag_t desc_tag;
+ bus_dmamap_t desc_map;
+ struct mtx iq_lock;
+ char lockname[16];
+ unsigned int flags;
+ struct adapter *adapter;
+
+ __be64 *desc; /* KVA of descriptor ring */
+ bus_addr_t ba; /* bus address of descriptor ring */
+ const __be64 *cdesc; /* current descriptor */
+ uint8_t gen; /* generation bit */
+ uint8_t intr_params; /* interrupt holdoff parameters */
+ int8_t intr_pktc_idx; /* packet count threshold index */
+ uint8_t intr_next; /* holdoff for next interrupt */
+ uint8_t esize; /* size (bytes) of each entry in the queue */
+ uint16_t qsize; /* size (# of entries) of the queue */
+ uint16_t cidx; /* consumer index */
+ uint16_t cntxt_id; /* SGE context id for the iq */
+ uint16_t abs_id; /* absolute SGE id for the iq */
+ iq_intr_handler_t *handler;
+};
+
+enum {
+ /* eq flags */
+ EQ_ALLOCATED = (1 << 1), /* firmware resources allocated */
+ EQ_STARTED = (1 << 2), /* started */
+ EQ_STALLED = (1 << 3), /* currently stalled */
+};
+
+/*
+ * Egress Queue: driver is producer, T4 is consumer.
+ *
+ * Note: A free list is an egress queue (driver produces the buffers and T4
+ * consumes them) but it's special enough to have its own struct (see sge_fl).
+ */
+struct sge_eq {
+ bus_dma_tag_t tx_tag; /* tag for transmit buffers */
+ bus_dma_tag_t desc_tag;
+ bus_dmamap_t desc_map;
+ char lockname[16];
+ unsigned int flags;
+ struct mtx eq_lock;
+
+ struct tx_desc *desc; /* KVA of descriptor ring */
+ bus_addr_t ba; /* bus address of descriptor ring */
+ struct tx_sdesc *sdesc; /* KVA of software descriptor ring */
+ struct buf_ring *br; /* tx buffer ring */
+ struct sge_qstat *spg; /* status page, for convenience */
+ uint16_t cap; /* max # of desc, for convenience */
+ uint16_t avail; /* available descriptors, for convenience */
+ uint16_t qsize; /* size (# of entries) of the queue */
+ uint16_t cidx; /* consumer idx (desc idx) */
+ uint16_t pidx; /* producer idx (desc idx) */
+ uint16_t pending; /* # of descriptors used since last doorbell */
+ uint32_t cntxt_id; /* SGE context id for the eq */
+
+ /* DMA maps used for tx */
+ struct tx_map *maps;
+ uint32_t map_total; /* # of DMA maps */
+ uint32_t map_pidx; /* next map to be used */
+ uint32_t map_cidx; /* reclaimed up to this index */
+ uint32_t map_avail; /* # of available maps */
+} __aligned(CACHE_LINE_SIZE);
+
+struct sge_fl {
+ bus_dma_tag_t desc_tag;
+ bus_dmamap_t desc_map;
+ bus_dma_tag_t tag[FL_BUF_SIZES];
+ uint8_t tag_idx;
+ struct mtx fl_lock;
+ char lockname[16];
+
+ __be64 *desc; /* KVA of descriptor ring, ptr to addresses */
+ bus_addr_t ba; /* bus address of descriptor ring */
+ struct fl_sdesc *sdesc; /* KVA of software descriptor ring */
+ uint32_t cap; /* max # of buffers, for convenience */
+ uint16_t qsize; /* size (# of entries) of the queue */
+ uint16_t cntxt_id; /* SGE context id for the freelist */
+ uint32_t cidx; /* consumer idx (buffer idx, NOT hw desc idx) */
+ uint32_t pidx; /* producer idx (buffer idx, NOT hw desc idx) */
+ uint32_t needed; /* # of buffers needed to fill up fl. */
+ uint32_t pending; /* # of bufs allocated since last doorbell */
+ unsigned int dmamap_failed;
+};
+
+/* txq: SGE egress queue + miscellaneous items */
+struct sge_txq {
+ struct sge_eq eq; /* MUST be first */
+ struct mbuf *m; /* held up due to temporary resource shortage */
+
+ /* stats for common events first */
+
+ uint64_t txcsum; /* # of times hardware assisted with checksum */
+ uint64_t tso_wrs; /* # of IPv4 TSO work requests */
+ uint64_t vlan_insertion;/* # of times VLAN tag was inserted */
+ uint64_t imm_wrs; /* # of work requests with immediate data */
+ uint64_t sgl_wrs; /* # of work requests with direct SGL */
+ uint64_t txpkt_wrs; /* # of txpkt work requests (not coalesced) */
+ uint64_t txpkts_wrs; /* # of coalesced tx work requests */
+ uint64_t txpkts_pkts; /* # of frames in coalesced tx work requests */
+
+ /* stats for not-that-common events */
+
+ uint32_t no_dmamap; /* no DMA map to load the mbuf */
+ uint32_t no_desc; /* out of hardware descriptors */
+ uint32_t egr_update; /* # of SGE_EGR_UPDATE notifications for txq */
+};
+
+enum {
+ RXQ_LRO_ENABLED = (1 << 0)
+};
+/* rxq: SGE ingress queue + SGE free list + miscellaneous items */
+struct sge_rxq {
+ struct sge_iq iq; /* MUST be first */
+ struct sge_fl fl;
+
+ unsigned int flags;
+ struct port_info *port; /* the port this rxq belongs to */
+ struct lro_ctrl lro; /* LRO state */
+
+ /* stats for common events first */
+
+ uint64_t rxcsum; /* # of times hardware assisted with checksum */
+ uint64_t vlan_extraction;/* # of times VLAN tag was extracted */
+
+ /* stats for not-that-common events */
+
+} __aligned(CACHE_LINE_SIZE);
+
+struct sge {
+ uint16_t timer_val[SGE_NTIMERS];
+ uint8_t counter_val[SGE_NCOUNTERS];
+
+ int nrxq; /* total rx queues (all ports and the rest) */
+ int ntxq; /* total tx queues (all ports and the rest) */
+ int niq; /* total ingress queues */
+ int neq; /* total egress queues */
+
+ struct sge_iq fwq; /* Firmware event queue */
+ struct sge_iq *fiq; /* Forwarded interrupt queues (INTR_FWD) */
+ struct sge_txq *txq; /* NIC tx queues */
+ struct sge_rxq *rxq; /* NIC rx queues */
+
+ uint16_t iq_start;
+ int eq_start;
+ struct sge_iq **iqmap; /* iq->cntxt_id to iq mapping */
+ struct sge_eq **eqmap; /* eq->cntxt_id to eq mapping */
+};
+
+struct adapter {
+ device_t dev;
+ struct cdev *cdev;
+
+ /* PCIe register resources */
+ int regs_rid;
+ struct resource *regs_res;
+ int msix_rid;
+ struct resource *msix_res;
+ bus_space_handle_t bh;
+ bus_space_tag_t bt;
+ bus_size_t mmio_len;
+
+ unsigned int pf;
+ unsigned int mbox;
+
+ /* Interrupt information */
+ int intr_type;
+ int intr_count;
+ struct irq {
+ struct resource *res;
+ int rid;
+ void *tag;
+ } *irq;
+
+ bus_dma_tag_t dmat; /* Parent DMA tag */
+
+ struct sge sge;
+
+ struct port_info *port[MAX_NPORTS];
+ uint8_t chan_map[NCHAN];
+
+ struct tid_info tids;
+
+ int registered_device_map;
+ int open_device_map;
+ int flags;
+
+ char fw_version[32];
+ struct adapter_params params;
+ struct t4_virt_res vres;
+
+ struct mtx sc_lock;
+ char lockname[16];
+};
+
+#define ADAPTER_LOCK(sc) mtx_lock(&(sc)->sc_lock)
+#define ADAPTER_UNLOCK(sc) mtx_unlock(&(sc)->sc_lock)
+#define ADAPTER_LOCK_ASSERT_OWNED(sc) mtx_assert(&(sc)->sc_lock, MA_OWNED)
+#define ADAPTER_LOCK_ASSERT_NOTOWNED(sc) mtx_assert(&(sc)->sc_lock, MA_NOTOWNED)
+
+#define PORT_LOCK(pi) mtx_lock(&(pi)->pi_lock)
+#define PORT_UNLOCK(pi) mtx_unlock(&(pi)->pi_lock)
+#define PORT_LOCK_ASSERT_OWNED(pi) mtx_assert(&(pi)->pi_lock, MA_OWNED)
+#define PORT_LOCK_ASSERT_NOTOWNED(pi) mtx_assert(&(pi)->pi_lock, MA_NOTOWNED)
+
+#define IQ_LOCK(iq) mtx_lock(&(iq)->iq_lock)
+#define IQ_UNLOCK(iq) mtx_unlock(&(iq)->iq_lock)
+#define IQ_LOCK_ASSERT_OWNED(iq) mtx_assert(&(iq)->iq_lock, MA_OWNED)
+#define IQ_LOCK_ASSERT_NOTOWNED(iq) mtx_assert(&(iq)->iq_lock, MA_NOTOWNED)
+
+#define FL_LOCK(fl) mtx_lock(&(fl)->fl_lock)
+#define FL_TRYLOCK(fl) mtx_trylock(&(fl)->fl_lock)
+#define FL_UNLOCK(fl) mtx_unlock(&(fl)->fl_lock)
+#define FL_LOCK_ASSERT_OWNED(fl) mtx_assert(&(fl)->fl_lock, MA_OWNED)
+#define FL_LOCK_ASSERT_NOTOWNED(fl) mtx_assert(&(fl)->fl_lock, MA_NOTOWNED)
+
+#define RXQ_LOCK(rxq) IQ_LOCK(&(rxq)->iq)
+#define RXQ_UNLOCK(rxq) IQ_UNLOCK(&(rxq)->iq)
+#define RXQ_LOCK_ASSERT_OWNED(rxq) IQ_LOCK_ASSERT_OWNED(&(rxq)->iq)
+#define RXQ_LOCK_ASSERT_NOTOWNED(rxq) IQ_LOCK_ASSERT_NOTOWNED(&(rxq)->iq)
+
+#define RXQ_FL_LOCK(rxq) FL_LOCK(&(rxq)->fl)
+#define RXQ_FL_UNLOCK(rxq) FL_UNLOCK(&(rxq)->fl)
+#define RXQ_FL_LOCK_ASSERT_OWNED(rxq) FL_LOCK_ASSERT_OWNED(&(rxq)->fl)
+#define RXQ_FL_LOCK_ASSERT_NOTOWNED(rxq) FL_LOCK_ASSERT_NOTOWNED(&(rxq)->fl)
+
+#define EQ_LOCK(eq) mtx_lock(&(eq)->eq_lock)
+#define EQ_TRYLOCK(eq) mtx_trylock(&(eq)->eq_lock)
+#define EQ_UNLOCK(eq) mtx_unlock(&(eq)->eq_lock)
+#define EQ_LOCK_ASSERT_OWNED(eq) mtx_assert(&(eq)->eq_lock, MA_OWNED)
+#define EQ_LOCK_ASSERT_NOTOWNED(eq) mtx_assert(&(eq)->eq_lock, MA_NOTOWNED)
+
+#define TXQ_LOCK(txq) EQ_LOCK(&(txq)->eq)
+#define TXQ_TRYLOCK(txq) EQ_TRYLOCK(&(txq)->eq)
+#define TXQ_UNLOCK(txq) EQ_UNLOCK(&(txq)->eq)
+#define TXQ_LOCK_ASSERT_OWNED(txq) EQ_LOCK_ASSERT_OWNED(&(txq)->eq)
+#define TXQ_LOCK_ASSERT_NOTOWNED(txq) EQ_LOCK_ASSERT_NOTOWNED(&(txq)->eq)
+
+#define for_each_txq(pi, iter, txq) \
+ txq = &pi->adapter->sge.txq[pi->first_txq]; \
+ for (iter = 0; iter < pi->ntxq; ++iter, ++txq)
+#define for_each_rxq(pi, iter, rxq) \
+ rxq = &pi->adapter->sge.rxq[pi->first_rxq]; \
+ for (iter = 0; iter < pi->nrxq; ++iter, ++rxq)
+
+#define NFIQ(sc) ((sc)->intr_count > 1 ? (sc)->intr_count - 1 : 1)
+
+static inline uint32_t
+t4_read_reg(struct adapter *sc, uint32_t reg)
+{
+ return bus_space_read_4(sc->bt, sc->bh, reg);
+}
+
+static inline void
+t4_write_reg(struct adapter *sc, uint32_t reg, uint32_t val)
+{
+ bus_space_write_4(sc->bt, sc->bh, reg, val);
+}
+
+static inline uint64_t
+t4_read_reg64(struct adapter *sc, uint32_t reg)
+{
+ return t4_bus_space_read_8(sc->bt, sc->bh, reg);
+}
+
+static inline void
+t4_write_reg64(struct adapter *sc, uint32_t reg, uint64_t val)
+{
+ t4_bus_space_write_8(sc->bt, sc->bh, reg, val);
+}
+
+static inline void
+t4_os_pci_read_cfg1(struct adapter *sc, int reg, uint8_t *val)
+{
+ *val = pci_read_config(sc->dev, reg, 1);
+}
+
+static inline void
+t4_os_pci_write_cfg1(struct adapter *sc, int reg, uint8_t val)
+{
+ pci_write_config(sc->dev, reg, val, 1);
+}
+
+static inline void
+t4_os_pci_read_cfg2(struct adapter *sc, int reg, uint16_t *val)
+{
+ *val = pci_read_config(sc->dev, reg, 2);
+}
+
+static inline void
+t4_os_pci_write_cfg2(struct adapter *sc, int reg, uint16_t val)
+{
+ pci_write_config(sc->dev, reg, val, 2);
+}
+
+static inline void
+t4_os_pci_read_cfg4(struct adapter *sc, int reg, uint32_t *val)
+{
+ *val = pci_read_config(sc->dev, reg, 4);
+}
+
+static inline void
+t4_os_pci_write_cfg4(struct adapter *sc, int reg, uint32_t val)
+{
+ pci_write_config(sc->dev, reg, val, 4);
+}
+
+static inline struct port_info *
+adap2pinfo(struct adapter *sc, int idx)
+{
+ return (sc->port[idx]);
+}
+
+static inline void
+t4_os_set_hw_addr(struct adapter *sc, int idx, uint8_t hw_addr[])
+{
+ bcopy(hw_addr, sc->port[idx]->hw_addr, ETHER_ADDR_LEN);
+}
+
+static inline bool is_10G_port(const struct port_info *pi)
+{
+ return ((pi->link_cfg.supported & FW_PORT_CAP_SPEED_10G) != 0);
+}
+
+int t4_os_find_pci_capability(struct adapter *, int);
+int t4_os_pci_save_state(struct adapter *);
+int t4_os_pci_restore_state(struct adapter *);
+
+void t4_os_portmod_changed(const struct adapter *, int);
+void t4_os_link_changed(struct adapter *, int, int);
+
+void t4_sge_init(struct adapter *);
+int t4_create_dma_tag(struct adapter *);
+int t4_destroy_dma_tag(struct adapter *);
+int t4_setup_adapter_iqs(struct adapter *);
+int t4_teardown_adapter_iqs(struct adapter *);
+int t4_setup_eth_queues(struct port_info *);
+int t4_teardown_eth_queues(struct port_info *);
+void t4_intr_all(void *);
+void t4_intr_fwd(void *);
+void t4_intr_err(void *);
+void t4_intr_evt(void *);
+void t4_intr_data(void *);
+int t4_eth_tx(struct ifnet *, struct sge_txq *, struct mbuf *);
+void t4_update_fl_bufsize(struct ifnet *);
+
+#endif
diff --git a/sys/dev/cxgbe/common/common.h b/sys/dev/cxgbe/common/common.h
new file mode 100644
index 0000000..13b2baa
--- /dev/null
+++ b/sys/dev/cxgbe/common/common.h
@@ -0,0 +1,517 @@
+/*-
+ * Copyright (c) 2011 Chelsio Communications, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ *
+ */
+
+#ifndef __CHELSIO_COMMON_H
+#define __CHELSIO_COMMON_H
+
+#include "t4_hw.h"
+
+
+enum {
+ MAX_NPORTS = 4, /* max # of ports */
+ SERNUM_LEN = 24, /* Serial # length */
+ EC_LEN = 16, /* E/C length */
+ ID_LEN = 16, /* ID length */
+};
+
+enum { MEM_EDC0, MEM_EDC1, MEM_MC };
+
+enum dev_master { MASTER_CANT, MASTER_MAY, MASTER_MUST };
+
+enum dev_state { DEV_STATE_UNINIT, DEV_STATE_INIT, DEV_STATE_ERR };
+
+enum {
+ PAUSE_RX = 1 << 0,
+ PAUSE_TX = 1 << 1,
+ PAUSE_AUTONEG = 1 << 2
+};
+
+#define FW_VERSION_MAJOR 1
+#define FW_VERSION_MINOR 2
+#define FW_VERSION_MICRO 65
+
+struct port_stats {
+ u64 tx_octets; /* total # of octets in good frames */
+ u64 tx_frames; /* all good frames */
+ u64 tx_bcast_frames; /* all broadcast frames */
+ u64 tx_mcast_frames; /* all multicast frames */
+ u64 tx_ucast_frames; /* all unicast frames */
+ u64 tx_error_frames; /* all error frames */
+
+ u64 tx_frames_64; /* # of Tx frames in a particular range */
+ u64 tx_frames_65_127;
+ u64 tx_frames_128_255;
+ u64 tx_frames_256_511;
+ u64 tx_frames_512_1023;
+ u64 tx_frames_1024_1518;
+ u64 tx_frames_1519_max;
+
+ u64 tx_drop; /* # of dropped Tx frames */
+ u64 tx_pause; /* # of transmitted pause frames */
+ u64 tx_ppp0; /* # of transmitted PPP prio 0 frames */
+ u64 tx_ppp1; /* # of transmitted PPP prio 1 frames */
+ u64 tx_ppp2; /* # of transmitted PPP prio 2 frames */
+ u64 tx_ppp3; /* # of transmitted PPP prio 3 frames */
+ u64 tx_ppp4; /* # of transmitted PPP prio 4 frames */
+ u64 tx_ppp5; /* # of transmitted PPP prio 5 frames */
+ u64 tx_ppp6; /* # of transmitted PPP prio 6 frames */
+ u64 tx_ppp7; /* # of transmitted PPP prio 7 frames */
+
+ u64 rx_octets; /* total # of octets in good frames */
+ u64 rx_frames; /* all good frames */
+ u64 rx_bcast_frames; /* all broadcast frames */
+ u64 rx_mcast_frames; /* all multicast frames */
+ u64 rx_ucast_frames; /* all unicast frames */
+ u64 rx_too_long; /* # of frames exceeding MTU */
+ u64 rx_jabber; /* # of jabber frames */
+ u64 rx_fcs_err; /* # of received frames with bad FCS */
+ u64 rx_len_err; /* # of received frames with length error */
+ u64 rx_symbol_err; /* symbol errors */
+ u64 rx_runt; /* # of short frames */
+
+ u64 rx_frames_64; /* # of Rx frames in a particular range */
+ u64 rx_frames_65_127;
+ u64 rx_frames_128_255;
+ u64 rx_frames_256_511;
+ u64 rx_frames_512_1023;
+ u64 rx_frames_1024_1518;
+ u64 rx_frames_1519_max;
+
+ u64 rx_pause; /* # of received pause frames */
+ u64 rx_ppp0; /* # of received PPP prio 0 frames */
+ u64 rx_ppp1; /* # of received PPP prio 1 frames */
+ u64 rx_ppp2; /* # of received PPP prio 2 frames */
+ u64 rx_ppp3; /* # of received PPP prio 3 frames */
+ u64 rx_ppp4; /* # of received PPP prio 4 frames */
+ u64 rx_ppp5; /* # of received PPP prio 5 frames */
+ u64 rx_ppp6; /* # of received PPP prio 6 frames */
+ u64 rx_ppp7; /* # of received PPP prio 7 frames */
+
+ u64 rx_ovflow0; /* drops due to buffer-group 0 overflows */
+ u64 rx_ovflow1; /* drops due to buffer-group 1 overflows */
+ u64 rx_ovflow2; /* drops due to buffer-group 2 overflows */
+ u64 rx_ovflow3; /* drops due to buffer-group 3 overflows */
+ u64 rx_trunc0; /* buffer-group 0 truncated packets */
+ u64 rx_trunc1; /* buffer-group 1 truncated packets */
+ u64 rx_trunc2; /* buffer-group 2 truncated packets */
+ u64 rx_trunc3; /* buffer-group 3 truncated packets */
+};
+
+struct lb_port_stats {
+ u64 octets;
+ u64 frames;
+ u64 bcast_frames;
+ u64 mcast_frames;
+ u64 ucast_frames;
+ u64 error_frames;
+
+ u64 frames_64;
+ u64 frames_65_127;
+ u64 frames_128_255;
+ u64 frames_256_511;
+ u64 frames_512_1023;
+ u64 frames_1024_1518;
+ u64 frames_1519_max;
+
+ u64 drop;
+
+ u64 ovflow0;
+ u64 ovflow1;
+ u64 ovflow2;
+ u64 ovflow3;
+ u64 trunc0;
+ u64 trunc1;
+ u64 trunc2;
+ u64 trunc3;
+};
+
+struct tp_tcp_stats {
+ u32 tcpOutRsts;
+ u64 tcpInSegs;
+ u64 tcpOutSegs;
+ u64 tcpRetransSegs;
+};
+
+struct tp_usm_stats {
+ u32 frames;
+ u32 drops;
+ u64 octets;
+};
+
+struct tp_fcoe_stats {
+ u32 framesDDP;
+ u32 framesDrop;
+ u64 octetsDDP;
+};
+
+struct tp_err_stats {
+ u32 macInErrs[4];
+ u32 hdrInErrs[4];
+ u32 tcpInErrs[4];
+ u32 tnlCongDrops[4];
+ u32 ofldChanDrops[4];
+ u32 tnlTxDrops[4];
+ u32 ofldVlanDrops[4];
+ u32 tcp6InErrs[4];
+ u32 ofldNoNeigh;
+ u32 ofldCongDefer;
+};
+
+struct tp_proxy_stats {
+ u32 proxy[4];
+};
+
+struct tp_cpl_stats {
+ u32 req[4];
+ u32 rsp[4];
+ u32 tx_err[4];
+};
+
+struct tp_rdma_stats {
+ u32 rqe_dfr_mod;
+ u32 rqe_dfr_pkt;
+};
+
+struct tp_params {
+ unsigned int ntxchan; /* # of Tx channels */
+ unsigned int tre; /* log2 of core clocks per TP tick */
+ unsigned int dack_re; /* DACK timer resolution */
+ unsigned int la_mask; /* what events are recorded by TP LA */
+ unsigned short tx_modq[NCHAN]; /* channel to modulation queue map */
+};
+
+struct vpd_params {
+ unsigned int cclk;
+ u8 ec[EC_LEN + 1];
+ u8 sn[SERNUM_LEN + 1];
+ u8 id[ID_LEN + 1];
+};
+
+struct pci_params {
+ unsigned int vpd_cap_addr;
+ unsigned char speed;
+ unsigned char width;
+};
+
+/*
+ * Firmware device log.
+ */
+struct devlog_params {
+ u32 memtype; /* which memory (EDC0, EDC1, MC) */
+ u32 start; /* start of log in firmware memory */
+ u32 size; /* size of log */
+};
+
+struct adapter_params {
+ struct tp_params tp;
+ struct vpd_params vpd;
+ struct pci_params pci;
+ struct devlog_params devlog;
+
+ unsigned int sf_size; /* serial flash size in bytes */
+ unsigned int sf_nsec; /* # of flash sectors */
+
+ unsigned int fw_vers;
+ unsigned int tp_vers;
+ u8 api_vers[7];
+
+ unsigned short mtus[NMTUS];
+ unsigned short a_wnd[NCCTRL_WIN];
+ unsigned short b_wnd[NCCTRL_WIN];
+
+ unsigned int mc_size; /* MC memory size */
+ unsigned int nfilters; /* size of filter region */
+
+ unsigned int cim_la_size;
+
+ unsigned int nports; /* # of ethernet ports */
+ unsigned int portvec;
+ unsigned int rev; /* chip revision */
+ unsigned int offload;
+
+ unsigned int ofldq_wr_cred;
+};
+
+enum { /* chip revisions */
+ T4_REV_A = 0,
+};
+
+struct trace_params {
+ u32 data[TRACE_LEN / 4];
+ u32 mask[TRACE_LEN / 4];
+ unsigned short snap_len;
+ unsigned short min_len;
+ unsigned char skip_ofst;
+ unsigned char skip_len;
+ unsigned char invert;
+ unsigned char port;
+};
+
+struct link_config {
+ unsigned short supported; /* link capabilities */
+ unsigned short advertising; /* advertised capabilities */
+ unsigned short requested_speed; /* speed user has requested */
+ unsigned short speed; /* actual link speed */
+ unsigned char requested_fc; /* flow control user has requested */
+ unsigned char fc; /* actual link flow control */
+ unsigned char autoneg; /* autonegotiating? */
+ unsigned char link_ok; /* link up? */
+};
+
+#include "adapter.h"
+
+#ifndef PCI_VENDOR_ID_CHELSIO
+# define PCI_VENDOR_ID_CHELSIO 0x1425
+#endif
+
+#define for_each_port(adapter, iter) \
+ for (iter = 0; iter < (adapter)->params.nports; ++iter)
+
+static inline int is_offload(const struct adapter *adap)
+{
+ return adap->params.offload;
+}
+
+static inline unsigned int core_ticks_per_usec(const struct adapter *adap)
+{
+ return adap->params.vpd.cclk / 1000;
+}
+
+static inline unsigned int us_to_core_ticks(const struct adapter *adap,
+ unsigned int us)
+{
+ return (us * adap->params.vpd.cclk) / 1000;
+}
+
+static inline unsigned int dack_ticks_to_usec(const struct adapter *adap,
+ unsigned int ticks)
+{
+ return (ticks << adap->params.tp.dack_re) / core_ticks_per_usec(adap);
+}
+
+void t4_set_reg_field(struct adapter *adap, unsigned int addr, u32 mask, u32 val);
+int t4_wait_op_done_val(struct adapter *adapter, int reg, u32 mask, int polarity,
+ int attempts, int delay, u32 *valp);
+
+static inline int t4_wait_op_done(struct adapter *adapter, int reg, u32 mask,
+ int polarity, int attempts, int delay)
+{
+ return t4_wait_op_done_val(adapter, reg, mask, polarity, attempts,
+ delay, NULL);
+}
+
+int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
+ void *rpl, bool sleep_ok);
+
+static inline int t4_wr_mbox(struct adapter *adap, int mbox, const void *cmd,
+ int size, void *rpl)
+{
+ return t4_wr_mbox_meat(adap, mbox, cmd, size, rpl, true);
+}
+
+static inline int t4_wr_mbox_ns(struct adapter *adap, int mbox, const void *cmd,
+ int size, void *rpl)
+{
+ return t4_wr_mbox_meat(adap, mbox, cmd, size, rpl, false);
+}
+
+void t4_read_indirect(struct adapter *adap, unsigned int addr_reg,
+ unsigned int data_reg, u32 *vals, unsigned int nregs,
+ unsigned int start_idx);
+void t4_write_indirect(struct adapter *adap, unsigned int addr_reg,
+ unsigned int data_reg, const u32 *vals,
+ unsigned int nregs, unsigned int start_idx);
+
+struct fw_filter_wr;
+
+void t4_intr_enable(struct adapter *adapter);
+void t4_intr_disable(struct adapter *adapter);
+void t4_intr_clear(struct adapter *adapter);
+int t4_slow_intr_handler(struct adapter *adapter);
+
+int t4_hash_mac_addr(const u8 *addr);
+int t4_link_start(struct adapter *adap, unsigned int mbox, unsigned int port,
+ struct link_config *lc);
+int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port);
+int t4_seeprom_read(struct adapter *adapter, u32 addr, u32 *data);
+int t4_seeprom_write(struct adapter *adapter, u32 addr, u32 data);
+int t4_eeprom_ptov(unsigned int phys_addr, unsigned int fn, unsigned int sz);
+int t4_seeprom_wp(struct adapter *adapter, int enable);
+int t4_read_flash(struct adapter *adapter, unsigned int addr, unsigned int nwords,
+ u32 *data, int byte_oriented);
+int t4_load_fw(struct adapter *adapter, const u8 *fw_data, unsigned int size);
+int t4_load_cfg(struct adapter *adapter, const u8 *cfg_data, unsigned int size);
+int t4_get_fw_version(struct adapter *adapter, u32 *vers);
+int t4_get_tp_version(struct adapter *adapter, u32 *vers);
+int t4_check_fw_version(struct adapter *adapter);
+int t4_init_hw(struct adapter *adapter, u32 fw_params);
+int t4_prep_adapter(struct adapter *adapter);
+int t4_port_init(struct port_info *p, int mbox, int pf, int vf);
+int t4_reinit_adapter(struct adapter *adap);
+void t4_fatal_err(struct adapter *adapter);
+int t4_set_trace_filter(struct adapter *adapter, const struct trace_params *tp,
+ int filter_index, int enable);
+void t4_get_trace_filter(struct adapter *adapter, struct trace_params *tp,
+ int filter_index, int *enabled);
+int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid,
+ int start, int n, const u16 *rspq, unsigned int nrspq);
+int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode,
+ unsigned int flags);
+int t4_config_vi_rss(struct adapter *adapter, int mbox, unsigned int viid,
+ unsigned int flags, unsigned int defq);
+int t4_read_rss(struct adapter *adapter, u16 *entries);
+void t4_read_rss_key(struct adapter *adapter, u32 *key);
+void t4_write_rss_key(struct adapter *adap, const u32 *key, int idx);
+void t4_read_rss_pf_config(struct adapter *adapter, unsigned int index, u32 *valp);
+void t4_write_rss_pf_config(struct adapter *adapter, unsigned int index, u32 val);
+void t4_read_rss_vf_config(struct adapter *adapter, unsigned int index,
+ u32 *vfl, u32 *vfh);
+void t4_write_rss_vf_config(struct adapter *adapter, unsigned int index,
+ u32 vfl, u32 vfh);
+u32 t4_read_rss_pf_map(struct adapter *adapter);
+void t4_write_rss_pf_map(struct adapter *adapter, u32 pfmap);
+u32 t4_read_rss_pf_mask(struct adapter *adapter);
+void t4_write_rss_pf_mask(struct adapter *adapter, u32 pfmask);
+int t4_mps_set_active_ports(struct adapter *adap, unsigned int port_mask);
+void t4_pmtx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[]);
+void t4_pmrx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[]);
+void t4_read_cimq_cfg(struct adapter *adap, u16 *base, u16 *size, u16 *thres);
+int t4_read_cim_ibq(struct adapter *adap, unsigned int qid, u32 *data, size_t n);
+int t4_read_cim_obq(struct adapter *adap, unsigned int qid, u32 *data, size_t n);
+int t4_cim_read(struct adapter *adap, unsigned int addr, unsigned int n,
+ unsigned int *valp);
+int t4_cim_write(struct adapter *adap, unsigned int addr, unsigned int n,
+ const unsigned int *valp);
+int t4_cim_ctl_read(struct adapter *adap, unsigned int addr, unsigned int n,
+ unsigned int *valp);
+int t4_cim_read_la(struct adapter *adap, u32 *la_buf, unsigned int *wrptr);
+void t4_cim_read_pif_la(struct adapter *adap, u32 *pif_req, u32 *pif_rsp,
+ unsigned int *pif_req_wrptr, unsigned int *pif_rsp_wrptr);
+void t4_cim_read_ma_la(struct adapter *adap, u32 *ma_req, u32 *ma_rsp);
+int t4_mc_read(struct adapter *adap, u32 addr, __be32 *data, u64 *parity);
+int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *parity);
+int t4_mem_read(struct adapter *adap, int mtype, u32 addr, u32 size,
+ __be32 *data);
+
+void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p);
+void t4_get_lb_stats(struct adapter *adap, int idx, struct lb_port_stats *p);
+void t4_clr_port_stats(struct adapter *adap, int idx);
+
+void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log);
+void t4_read_cong_tbl(struct adapter *adap, u16 incr[NMTUS][NCCTRL_WIN]);
+void t4_read_pace_tbl(struct adapter *adap, unsigned int pace_vals[NTX_SCHED]);
+void t4_get_tx_sched(struct adapter *adap, unsigned int sched, unsigned int *kbps,
+ unsigned int *ipg);
+void t4_tp_wr_bits_indirect(struct adapter *adap, unsigned int addr,
+ unsigned int mask, unsigned int val);
+void t4_tp_read_la(struct adapter *adap, u64 *la_buf, unsigned int *wrptr);
+void t4_tp_get_err_stats(struct adapter *adap, struct tp_err_stats *st);
+void t4_tp_get_proxy_stats(struct adapter *adap, struct tp_proxy_stats *st);
+void t4_tp_get_cpl_stats(struct adapter *adap, struct tp_cpl_stats *st);
+void t4_tp_get_rdma_stats(struct adapter *adap, struct tp_rdma_stats *st);
+void t4_get_usm_stats(struct adapter *adap, struct tp_usm_stats *st);
+void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4,
+ struct tp_tcp_stats *v6);
+void t4_get_fcoe_stats(struct adapter *adap, unsigned int idx,
+ struct tp_fcoe_stats *st);
+void t4_load_mtus(struct adapter *adap, const unsigned short *mtus,
+ const unsigned short *alpha, const unsigned short *beta);
+
+void t4_ulprx_read_la(struct adapter *adap, u32 *la_buf);
+
+int t4_set_sched_bps(struct adapter *adap, int sched, unsigned int kbps);
+int t4_set_sched_ipg(struct adapter *adap, int sched, unsigned int ipg);
+int t4_set_pace_tbl(struct adapter *adap, const unsigned int *pace_vals,
+ unsigned int start, unsigned int n);
+void t4_get_chan_txrate(struct adapter *adap, u64 *nic_rate, u64 *ofld_rate);
+int t4_set_filter_mode(struct adapter *adap, unsigned int mode_map);
+void t4_mk_filtdelwr(unsigned int ftid, struct fw_filter_wr *wr, int qid);
+
+void t4_wol_magic_enable(struct adapter *adap, unsigned int port, const u8 *addr);
+int t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map,
+ u64 mask0, u64 mask1, unsigned int crc, bool enable);
+
+int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox,
+ enum dev_master master, enum dev_state *state);
+int t4_fw_bye(struct adapter *adap, unsigned int mbox);
+int t4_early_init(struct adapter *adap, unsigned int mbox);
+int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset);
+int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
+ unsigned int vf, unsigned int nparams, const u32 *params,
+ u32 *val);
+int t4_set_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
+ unsigned int vf, unsigned int nparams, const u32 *params,
+ const u32 *val);
+int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf,
+ unsigned int vf, unsigned int txq, unsigned int txq_eth_ctrl,
+ unsigned int rxqi, unsigned int rxq, unsigned int tc,
+ unsigned int vi, unsigned int cmask, unsigned int pmask,
+ unsigned int exactf, unsigned int rcaps, unsigned int wxcaps);
+int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port,
+ unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac,
+ unsigned int *rss_size);
+int t4_free_vi(struct adapter *adap, unsigned int mbox,
+ unsigned int pf, unsigned int vf,
+ unsigned int viid);
+int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid,
+ int mtu, int promisc, int all_multi, int bcast, int vlanex,
+ bool sleep_ok);
+int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox, unsigned int viid,
+ bool free, unsigned int naddr, const u8 **addr, u16 *idx,
+ u64 *hash, bool sleep_ok);
+int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid,
+ int idx, const u8 *addr, bool persist, bool add_smt);
+int t4_set_addr_hash(struct adapter *adap, unsigned int mbox, unsigned int viid,
+ bool ucast, u64 vec, bool sleep_ok);
+int t4_enable_vi(struct adapter *adap, unsigned int mbox, unsigned int viid,
+ bool rx_en, bool tx_en);
+int t4_identify_port(struct adapter *adap, unsigned int mbox, unsigned int viid,
+ unsigned int nblinks);
+int t4_mdio_rd(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
+ unsigned int mmd, unsigned int reg, unsigned int *valp);
+int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
+ unsigned int mmd, unsigned int reg, unsigned int val);
+int t4_iq_start_stop(struct adapter *adap, unsigned int mbox, bool start,
+ unsigned int pf, unsigned int vf, unsigned int iqid,
+ unsigned int fl0id, unsigned int fl1id);
+int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
+ unsigned int vf, unsigned int iqtype, unsigned int iqid,
+ unsigned int fl0id, unsigned int fl1id);
+int t4_eth_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
+ unsigned int vf, unsigned int eqid);
+int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
+ unsigned int vf, unsigned int eqid);
+int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
+ unsigned int vf, unsigned int eqid);
+int t4_sge_ctxt_rd(struct adapter *adap, unsigned int mbox, unsigned int cid,
+ enum ctxt_type ctype, u32 *data);
+int t4_sge_ctxt_rd_bd(struct adapter *adap, unsigned int cid, enum ctxt_type ctype,
+ u32 *data);
+int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl);
+#endif /* __CHELSIO_COMMON_H */
diff --git a/sys/dev/cxgbe/common/t4_hw.c b/sys/dev/cxgbe/common/t4_hw.c
new file mode 100644
index 0000000..ce96cb0
--- /dev/null
+++ b/sys/dev/cxgbe/common/t4_hw.c
@@ -0,0 +1,4590 @@
+/*-
+ * Copyright (c) 2011 Chelsio Communications, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include "common.h"
+#include "t4_regs.h"
+#include "t4_regs_values.h"
+#include "t4fw_interface.h"
+
+
+/**
+ * t4_wait_op_done_val - wait until an operation is completed
+ * @adapter: the adapter performing the operation
+ * @reg: the register to check for completion
+ * @mask: a single-bit field within @reg that indicates completion
+ * @polarity: the value of the field when the operation is completed
+ * @attempts: number of check iterations
+ * @delay: delay in usecs between iterations
+ * @valp: where to store the value of the register at completion time
+ *
+ * Wait until an operation is completed by checking a bit in a register
+ * up to @attempts times. If @valp is not NULL the value of the register
+ * at the time it indicated completion is stored there. Returns 0 if the
+ * operation completes and -EAGAIN otherwise.
+ */
+int t4_wait_op_done_val(struct adapter *adapter, int reg, u32 mask,
+ int polarity, int attempts, int delay, u32 *valp)
+{
+ while (1) {
+ u32 val = t4_read_reg(adapter, reg);
+
+ if (!!(val & mask) == polarity) {
+ if (valp)
+ *valp = val;
+ return 0;
+ }
+ if (--attempts == 0)
+ return -EAGAIN;
+ if (delay)
+ udelay(delay);
+ }
+}
+
+/**
+ * t4_set_reg_field - set a register field to a value
+ * @adapter: the adapter to program
+ * @addr: the register address
+ * @mask: specifies the portion of the register to modify
+ * @val: the new value for the register field
+ *
+ * Sets a register field specified by the supplied mask to the
+ * given value.
+ */
+void t4_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask,
+ u32 val)
+{
+ u32 v = t4_read_reg(adapter, addr) & ~mask;
+
+ t4_write_reg(adapter, addr, v | val);
+ (void) t4_read_reg(adapter, addr); /* flush */
+}
+
+/**
+ * t4_read_indirect - read indirectly addressed registers
+ * @adap: the adapter
+ * @addr_reg: register holding the indirect address
+ * @data_reg: register holding the value of the indirect register
+ * @vals: where the read register values are stored
+ * @nregs: how many indirect registers to read
+ * @start_idx: index of first indirect register to read
+ *
+ * Reads registers that are accessed indirectly through an address/data
+ * register pair.
+ */
+void t4_read_indirect(struct adapter *adap, unsigned int addr_reg,
+ unsigned int data_reg, u32 *vals, unsigned int nregs,
+ unsigned int start_idx)
+{
+ while (nregs--) {
+ t4_write_reg(adap, addr_reg, start_idx);
+ *vals++ = t4_read_reg(adap, data_reg);
+ start_idx++;
+ }
+}
+
+/**
+ * t4_write_indirect - write indirectly addressed registers
+ * @adap: the adapter
+ * @addr_reg: register holding the indirect addresses
+ * @data_reg: register holding the value for the indirect registers
+ * @vals: values to write
+ * @nregs: how many indirect registers to write
+ * @start_idx: address of first indirect register to write
+ *
+ * Writes a sequential block of registers that are accessed indirectly
+ * through an address/data register pair.
+ */
+void t4_write_indirect(struct adapter *adap, unsigned int addr_reg,
+ unsigned int data_reg, const u32 *vals,
+ unsigned int nregs, unsigned int start_idx)
+{
+ while (nregs--) {
+ t4_write_reg(adap, addr_reg, start_idx++);
+ t4_write_reg(adap, data_reg, *vals++);
+ }
+}
+
+/*
+ * Get the reply to a mailbox command and store it in @rpl in big-endian order.
+ */
+static void get_mbox_rpl(struct adapter *adap, __be64 *rpl, int nflit,
+ u32 mbox_addr)
+{
+ for ( ; nflit; nflit--, mbox_addr += 8)
+ *rpl++ = cpu_to_be64(t4_read_reg64(adap, mbox_addr));
+}
+
+/*
+ * Handle a FW assertion reported in a mailbox.
+ */
+static void fw_asrt(struct adapter *adap, u32 mbox_addr)
+{
+ struct fw_debug_cmd asrt;
+
+ get_mbox_rpl(adap, (__be64 *)&asrt, sizeof(asrt) / 8, mbox_addr);
+ CH_ALERT(adap, "FW assertion at %.16s:%u, val0 %#x, val1 %#x\n",
+ asrt.u.assert.filename_0_7, ntohl(asrt.u.assert.line),
+ ntohl(asrt.u.assert.x), ntohl(asrt.u.assert.y));
+}
+
+#define X_CIM_PF_NOACCESS 0xeeeeeeee
+/**
+ * t4_wr_mbox_meat - send a command to FW through the given mailbox
+ * @adap: the adapter
+ * @mbox: index of the mailbox to use
+ * @cmd: the command to write
+ * @size: command length in bytes
+ * @rpl: where to optionally store the reply
+ * @sleep_ok: if true we may sleep while awaiting command completion
+ *
+ * Sends the given command to FW through the selected mailbox and waits
+ * for the FW to execute the command. If @rpl is not %NULL it is used to
+ * store the FW's reply to the command. The command and its optional
+ * reply are of the same length. Some FW commands like RESET and
+ * INITIALIZE can take a considerable amount of time to execute.
+ * @sleep_ok determines whether we may sleep while awaiting the response.
+ * If sleeping is allowed we use progressive backoff otherwise we spin.
+ *
+ * The return value is 0 on success or a negative errno on failure. A
+ * failure can happen either because we are not able to execute the
+ * command or FW executes it but signals an error. In the latter case
+ * the return value is the error code indicated by FW (negated).
+ */
+int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
+ void *rpl, bool sleep_ok)
+{
+ /*
+ * We delay in small increments at first in an effort to maintain
+ * responsiveness for simple, fast executing commands but then back
+ * off to larger delays to a maximum retry delay.
+ */
+ static const int delay[] = {
+ 1, 1, 3, 5, 10, 10, 20, 50, 100, 200
+ };
+
+ u32 v;
+ u64 res;
+ int i, ms, delay_idx;
+ const __be64 *p = cmd;
+
+ u32 data_reg = PF_REG(mbox, A_CIM_PF_MAILBOX_DATA);
+ u32 ctl_reg = PF_REG(mbox, A_CIM_PF_MAILBOX_CTRL);
+
+ if ((size & 15) || size > MBOX_LEN)
+ return -EINVAL;
+
+ v = G_MBOWNER(t4_read_reg(adap, ctl_reg));
+ for (i = 0; v == X_MBOWNER_NONE && i < 3; i++)
+ v = G_MBOWNER(t4_read_reg(adap, ctl_reg));
+
+ if (v != X_MBOWNER_PL)
+ return v ? -EBUSY : -ETIMEDOUT;
+
+ for (i = 0; i < size; i += 8, p++)
+ t4_write_reg64(adap, data_reg + i, be64_to_cpu(*p));
+
+ t4_write_reg(adap, ctl_reg, F_MBMSGVALID | V_MBOWNER(X_MBOWNER_FW));
+ t4_read_reg(adap, ctl_reg); /* flush write */
+
+ delay_idx = 0;
+ ms = delay[0];
+
+ for (i = 0; i < FW_CMD_MAX_TIMEOUT; i += ms) {
+ if (sleep_ok) {
+ ms = delay[delay_idx]; /* last element may repeat */
+ if (delay_idx < ARRAY_SIZE(delay) - 1)
+ delay_idx++;
+ msleep(ms);
+ } else
+ mdelay(ms);
+
+ v = t4_read_reg(adap, ctl_reg);
+ if (v == X_CIM_PF_NOACCESS)
+ continue;
+ if (G_MBOWNER(v) == X_MBOWNER_PL) {
+ if (!(v & F_MBMSGVALID)) {
+ t4_write_reg(adap, ctl_reg,
+ V_MBOWNER(X_MBOWNER_NONE));
+ continue;
+ }
+
+ res = t4_read_reg64(adap, data_reg);
+ if (G_FW_CMD_OP(res >> 32) == FW_DEBUG_CMD) {
+ fw_asrt(adap, data_reg);
+ res = V_FW_CMD_RETVAL(EIO);
+ } else if (rpl)
+ get_mbox_rpl(adap, rpl, size / 8, data_reg);
+ t4_write_reg(adap, ctl_reg, V_MBOWNER(X_MBOWNER_NONE));
+ return -G_FW_CMD_RETVAL((int)res);
+ }
+ }
+
+ CH_ERR(adap, "command %#x in mailbox %d timed out\n",
+ *(const u8 *)cmd, mbox);
+ return -ETIMEDOUT;
+}
+
+/**
+ * t4_mc_read - read from MC through backdoor accesses
+ * @adap: the adapter
+ * @addr: address of first byte requested
+ * @data: 64 bytes of data containing the requested address
+ * @ecc: where to store the corresponding 64-bit ECC word
+ *
+ * Read 64 bytes of data from MC starting at a 64-byte-aligned address
+ * that covers the requested address @addr. If @parity is not %NULL it
+ * is assigned the 64-bit ECC word for the read data.
+ */
+int t4_mc_read(struct adapter *adap, u32 addr, __be32 *data, u64 *ecc)
+{
+ int i;
+
+ if (t4_read_reg(adap, A_MC_BIST_CMD) & F_START_BIST)
+ return -EBUSY;
+ t4_write_reg(adap, A_MC_BIST_CMD_ADDR, addr & ~0x3fU);
+ t4_write_reg(adap, A_MC_BIST_CMD_LEN, 64);
+ t4_write_reg(adap, A_MC_BIST_DATA_PATTERN, 0xc);
+ t4_write_reg(adap, A_MC_BIST_CMD, V_BIST_OPCODE(1) | F_START_BIST |
+ V_BIST_CMD_GAP(1));
+ i = t4_wait_op_done(adap, A_MC_BIST_CMD, F_START_BIST, 0, 10, 1);
+ if (i)
+ return i;
+
+#define MC_DATA(i) MC_BIST_STATUS_REG(A_MC_BIST_STATUS_RDATA, i)
+
+ for (i = 15; i >= 0; i--)
+ *data++ = htonl(t4_read_reg(adap, MC_DATA(i)));
+ if (ecc)
+ *ecc = t4_read_reg64(adap, MC_DATA(16));
+#undef MC_DATA
+ return 0;
+}
+
+/**
+ * t4_edc_read - read from EDC through backdoor accesses
+ * @adap: the adapter
+ * @idx: which EDC to access
+ * @addr: address of first byte requested
+ * @data: 64 bytes of data containing the requested address
+ * @ecc: where to store the corresponding 64-bit ECC word
+ *
+ * Read 64 bytes of data from EDC starting at a 64-byte-aligned address
+ * that covers the requested address @addr. If @parity is not %NULL it
+ * is assigned the 64-bit ECC word for the read data.
+ */
+int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
+{
+ int i;
+
+ idx *= EDC_STRIDE;
+ if (t4_read_reg(adap, A_EDC_BIST_CMD + idx) & F_START_BIST)
+ return -EBUSY;
+ t4_write_reg(adap, A_EDC_BIST_CMD_ADDR + idx, addr & ~0x3fU);
+ t4_write_reg(adap, A_EDC_BIST_CMD_LEN + idx, 64);
+ t4_write_reg(adap, A_EDC_BIST_DATA_PATTERN + idx, 0xc);
+ t4_write_reg(adap, A_EDC_BIST_CMD + idx,
+ V_BIST_OPCODE(1) | V_BIST_CMD_GAP(1) | F_START_BIST);
+ i = t4_wait_op_done(adap, A_EDC_BIST_CMD + idx, F_START_BIST, 0, 10, 1);
+ if (i)
+ return i;
+
+#define EDC_DATA(i) (EDC_BIST_STATUS_REG(A_EDC_BIST_STATUS_RDATA, i) + idx)
+
+ for (i = 15; i >= 0; i--)
+ *data++ = htonl(t4_read_reg(adap, EDC_DATA(i)));
+ if (ecc)
+ *ecc = t4_read_reg64(adap, EDC_DATA(16));
+#undef EDC_DATA
+ return 0;
+}
+
+/**
+ * t4_mem_read - read EDC 0, EDC 1 or MC into buffer
+ * @adap: the adapter
+ * @mtype: memory type: MEM_EDC0, MEM_EDC1 or MEM_MC
+ * @addr: address within indicated memory type
+ * @len: amount of memory to read
+ * @buf: host memory buffer
+ *
+ * Reads an [almost] arbitrary memory region in the firmware: the
+ * firmware memory address, length and host buffer must be aligned on
+ * 32-bit boudaries. The memory is returned as a raw byte sequence from
+ * the firmware's memory. If this memory contains data structures which
+ * contain multi-byte integers, it's the callers responsibility to
+ * perform appropriate byte order conversions.
+ */
+int t4_mem_read(struct adapter *adap, int mtype, u32 addr, u32 len,
+ __be32 *buf)
+{
+ u32 pos, start, end, offset;
+ int ret;
+
+ /*
+ * Argument sanity checks ...
+ */
+ if ((addr & 0x3) || (len & 0x3))
+ return -EINVAL;
+
+ /*
+ * The underlaying EDC/MC read routines read 64 bytes at a time so we
+ * need to round down the start and round up the end. We'll start
+ * copying out of the first line at (addr - start) a word at a time.
+ */
+ start = addr & ~(64-1);
+ end = (addr + len + 64-1) & ~(64-1);
+ offset = (addr - start)/sizeof(__be32);
+
+ for (pos = start; pos < end; pos += 64, offset = 0) {
+ __be32 data[16];
+
+ /*
+ * Read the chip's memory block and bail if there's an error.
+ */
+ if (mtype == MEM_MC)
+ ret = t4_mc_read(adap, pos, data, NULL);
+ else
+ ret = t4_edc_read(adap, mtype, pos, data, NULL);
+ if (ret)
+ return ret;
+
+ /*
+ * Copy the data into the caller's memory buffer.
+ */
+ while (offset < 16 && len > 0) {
+ *buf++ = data[offset++];
+ len -= sizeof(__be32);
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * Partial EEPROM Vital Product Data structure. Includes only the ID and
+ * VPD-R header.
+ */
+struct t4_vpd_hdr {
+ u8 id_tag;
+ u8 id_len[2];
+ u8 id_data[ID_LEN];
+ u8 vpdr_tag;
+ u8 vpdr_len[2];
+};
+
+/*
+ * EEPROM reads take a few tens of us while writes can take a bit over 5 ms.
+ */
+#define EEPROM_MAX_RD_POLL 40
+#define EEPROM_MAX_WR_POLL 6
+#define EEPROM_STAT_ADDR 0x7bfc
+#define VPD_BASE 0x400
+#define VPD_BASE_OLD 0
+#define VPD_LEN 512
+#define VPD_INFO_FLD_HDR_SIZE 3
+
+/**
+ * t4_seeprom_read - read a serial EEPROM location
+ * @adapter: adapter to read
+ * @addr: EEPROM virtual address
+ * @data: where to store the read data
+ *
+ * Read a 32-bit word from a location in serial EEPROM using the card's PCI
+ * VPD capability. Note that this function must be called with a virtual
+ * address.
+ */
+int t4_seeprom_read(struct adapter *adapter, u32 addr, u32 *data)
+{
+ u16 val;
+ int attempts = EEPROM_MAX_RD_POLL;
+ unsigned int base = adapter->params.pci.vpd_cap_addr;
+
+ if (addr >= EEPROMVSIZE || (addr & 3))
+ return -EINVAL;
+
+ t4_os_pci_write_cfg2(adapter, base + PCI_VPD_ADDR, (u16)addr);
+ do {
+ udelay(10);
+ t4_os_pci_read_cfg2(adapter, base + PCI_VPD_ADDR, &val);
+ } while (!(val & PCI_VPD_ADDR_F) && --attempts);
+
+ if (!(val & PCI_VPD_ADDR_F)) {
+ CH_ERR(adapter, "reading EEPROM address 0x%x failed\n", addr);
+ return -EIO;
+ }
+ t4_os_pci_read_cfg4(adapter, base + PCI_VPD_DATA, data);
+ *data = le32_to_cpu(*data);
+ return 0;
+}
+
+/**
+ * t4_seeprom_write - write a serial EEPROM location
+ * @adapter: adapter to write
+ * @addr: virtual EEPROM address
+ * @data: value to write
+ *
+ * Write a 32-bit word to a location in serial EEPROM using the card's PCI
+ * VPD capability. Note that this function must be called with a virtual
+ * address.
+ */
+int t4_seeprom_write(struct adapter *adapter, u32 addr, u32 data)
+{
+ u16 val;
+ int attempts = EEPROM_MAX_WR_POLL;
+ unsigned int base = adapter->params.pci.vpd_cap_addr;
+
+ if (addr >= EEPROMVSIZE || (addr & 3))
+ return -EINVAL;
+
+ t4_os_pci_write_cfg4(adapter, base + PCI_VPD_DATA,
+ cpu_to_le32(data));
+ t4_os_pci_write_cfg2(adapter, base + PCI_VPD_ADDR,
+ (u16)addr | PCI_VPD_ADDR_F);
+ do {
+ msleep(1);
+ t4_os_pci_read_cfg2(adapter, base + PCI_VPD_ADDR, &val);
+ } while ((val & PCI_VPD_ADDR_F) && --attempts);
+
+ if (val & PCI_VPD_ADDR_F) {
+ CH_ERR(adapter, "write to EEPROM address 0x%x failed\n", addr);
+ return -EIO;
+ }
+ return 0;
+}
+
+/**
+ * t4_eeprom_ptov - translate a physical EEPROM address to virtual
+ * @phys_addr: the physical EEPROM address
+ * @fn: the PCI function number
+ * @sz: size of function-specific area
+ *
+ * Translate a physical EEPROM address to virtual. The first 1K is
+ * accessed through virtual addresses starting at 31K, the rest is
+ * accessed through virtual addresses starting at 0.
+ *
+ * The mapping is as follows:
+ * [0..1K) -> [31K..32K)
+ * [1K..1K+A) -> [ES-A..ES)
+ * [1K+A..ES) -> [0..ES-A-1K)
+ *
+ * where A = @fn * @sz, and ES = EEPROM size.
+ */
+int t4_eeprom_ptov(unsigned int phys_addr, unsigned int fn, unsigned int sz)
+{
+ fn *= sz;
+ if (phys_addr < 1024)
+ return phys_addr + (31 << 10);
+ if (phys_addr < 1024 + fn)
+ return EEPROMSIZE - fn + phys_addr - 1024;
+ if (phys_addr < EEPROMSIZE)
+ return phys_addr - 1024 - fn;
+ return -EINVAL;
+}
+
+/**
+ * t4_seeprom_wp - enable/disable EEPROM write protection
+ * @adapter: the adapter
+ * @enable: whether to enable or disable write protection
+ *
+ * Enables or disables write protection on the serial EEPROM.
+ */
+int t4_seeprom_wp(struct adapter *adapter, int enable)
+{
+ return t4_seeprom_write(adapter, EEPROM_STAT_ADDR, enable ? 0xc : 0);
+}
+
+/**
+ * get_vpd_keyword_val - Locates an information field keyword in the VPD
+ * @v: Pointer to buffered vpd data structure
+ * @kw: The keyword to search for
+ *
+ * Returns the value of the information field keyword or
+ * -ENOENT otherwise.
+ */
+static int get_vpd_keyword_val(const struct t4_vpd_hdr *v, const char *kw)
+{
+ int i;
+ unsigned int offset , len;
+ const u8 *buf = &v->id_tag;
+ const u8 *vpdr_len = &v->vpdr_tag;
+ offset = sizeof(struct t4_vpd_hdr);
+ len = (u16)vpdr_len[1] + ((u16)vpdr_len[2] << 8);
+
+ if (len + sizeof(struct t4_vpd_hdr) > VPD_LEN) {
+ return -ENOENT;
+ }
+
+ for (i = offset; i + VPD_INFO_FLD_HDR_SIZE <= offset + len;) {
+ if(memcmp(buf + i , kw , 2) == 0){
+ i += VPD_INFO_FLD_HDR_SIZE;
+ return i;
+ }
+
+ i += VPD_INFO_FLD_HDR_SIZE + buf[i+2];
+ }
+
+ return -ENOENT;
+}
+
+
+/**
+ * get_vpd_params - read VPD parameters from VPD EEPROM
+ * @adapter: adapter to read
+ * @p: where to store the parameters
+ *
+ * Reads card parameters stored in VPD EEPROM.
+ */
+static int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
+{
+ int i, ret, addr;
+ int ec, sn;
+ u8 vpd[VPD_LEN], csum;
+ const struct t4_vpd_hdr *v;
+
+ /*
+ * Card information normally starts at VPD_BASE but early cards had
+ * it at 0.
+ */
+ ret = t4_seeprom_read(adapter, VPD_BASE, (u32 *)(vpd));
+ addr = *vpd == 0x82 ? VPD_BASE : VPD_BASE_OLD;
+
+ for (i = 0; i < sizeof(vpd); i += 4) {
+ ret = t4_seeprom_read(adapter, addr + i, (u32 *)(vpd + i));
+ if (ret)
+ return ret;
+ }
+ v = (const struct t4_vpd_hdr *)vpd;
+
+#define FIND_VPD_KW(var,name) do { \
+ var = get_vpd_keyword_val(v , name); \
+ if (var < 0) { \
+ CH_ERR(adapter, "missing VPD keyword " name "\n"); \
+ return -EINVAL; \
+ } \
+} while (0)
+
+ FIND_VPD_KW(i, "RV");
+ for (csum = 0; i >= 0; i--)
+ csum += vpd[i];
+
+ if (csum) {
+ CH_ERR(adapter, "corrupted VPD EEPROM, actual csum %u\n", csum);
+ return -EINVAL;
+ }
+ FIND_VPD_KW(ec, "EC");
+ FIND_VPD_KW(sn, "SN");
+#undef FIND_VPD_KW
+
+ memcpy(p->id, v->id_data, ID_LEN);
+ strstrip(p->id);
+ memcpy(p->ec, vpd + ec, EC_LEN);
+ strstrip(p->ec);
+ i = vpd[sn - VPD_INFO_FLD_HDR_SIZE + 2];
+ memcpy(p->sn, vpd + sn, min(i, SERNUM_LEN));
+ strstrip(p->sn);
+
+ return 0;
+}
+
+/* serial flash and firmware constants and flash config file constants */
+enum {
+ SF_ATTEMPTS = 10, /* max retries for SF operations */
+
+ /* flash command opcodes */
+ SF_PROG_PAGE = 2, /* program page */
+ SF_WR_DISABLE = 4, /* disable writes */
+ SF_RD_STATUS = 5, /* read status register */
+ SF_WR_ENABLE = 6, /* enable writes */
+ SF_RD_DATA_FAST = 0xb, /* read flash */
+ SF_RD_ID = 0x9f, /* read ID */
+ SF_ERASE_SECTOR = 0xd8, /* erase sector */
+
+ FW_START_SEC = 8, /* first flash sector for FW */
+ FW_END_SEC = 15, /* last flash sector for FW */
+ FW_IMG_START = FW_START_SEC * SF_SEC_SIZE,
+ FW_MAX_SIZE = (FW_END_SEC - FW_START_SEC + 1) * SF_SEC_SIZE,
+
+ FLASH_CFG_MAX_SIZE = 0x10000 , /* max size of the flash config file */
+ FLASH_CFG_OFFSET = 0x1f0000,
+ FLASH_CFG_START_SEC = FLASH_CFG_OFFSET / SF_SEC_SIZE,
+ FPGA_FLASH_CFG_OFFSET = 0xf0000 , /* if FPGA mode, then cfg file is at 1MB - 64KB */
+ FPGA_FLASH_CFG_START_SEC = FPGA_FLASH_CFG_OFFSET / SF_SEC_SIZE,
+};
+
+/**
+ * sf1_read - read data from the serial flash
+ * @adapter: the adapter
+ * @byte_cnt: number of bytes to read
+ * @cont: whether another operation will be chained
+ * @lock: whether to lock SF for PL access only
+ * @valp: where to store the read data
+ *
+ * Reads up to 4 bytes of data from the serial flash. The location of
+ * the read needs to be specified prior to calling this by issuing the
+ * appropriate commands to the serial flash.
+ */
+static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont,
+ int lock, u32 *valp)
+{
+ int ret;
+
+ if (!byte_cnt || byte_cnt > 4)
+ return -EINVAL;
+ if (t4_read_reg(adapter, A_SF_OP) & F_BUSY)
+ return -EBUSY;
+ t4_write_reg(adapter, A_SF_OP,
+ V_SF_LOCK(lock) | V_CONT(cont) | V_BYTECNT(byte_cnt - 1));
+ ret = t4_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 5);
+ if (!ret)
+ *valp = t4_read_reg(adapter, A_SF_DATA);
+ return ret;
+}
+
+/**
+ * sf1_write - write data to the serial flash
+ * @adapter: the adapter
+ * @byte_cnt: number of bytes to write
+ * @cont: whether another operation will be chained
+ * @lock: whether to lock SF for PL access only
+ * @val: value to write
+ *
+ * Writes up to 4 bytes of data to the serial flash. The location of
+ * the write needs to be specified prior to calling this by issuing the
+ * appropriate commands to the serial flash.
+ */
+static int sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont,
+ int lock, u32 val)
+{
+ if (!byte_cnt || byte_cnt > 4)
+ return -EINVAL;
+ if (t4_read_reg(adapter, A_SF_OP) & F_BUSY)
+ return -EBUSY;
+ t4_write_reg(adapter, A_SF_DATA, val);
+ t4_write_reg(adapter, A_SF_OP, V_SF_LOCK(lock) |
+ V_CONT(cont) | V_BYTECNT(byte_cnt - 1) | V_OP(1));
+ return t4_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 5);
+}
+
+/**
+ * flash_wait_op - wait for a flash operation to complete
+ * @adapter: the adapter
+ * @attempts: max number of polls of the status register
+ * @delay: delay between polls in ms
+ *
+ * Wait for a flash operation to complete by polling the status register.
+ */
+static int flash_wait_op(struct adapter *adapter, int attempts, int delay)
+{
+ int ret;
+ u32 status;
+
+ while (1) {
+ if ((ret = sf1_write(adapter, 1, 1, 1, SF_RD_STATUS)) != 0 ||
+ (ret = sf1_read(adapter, 1, 0, 1, &status)) != 0)
+ return ret;
+ if (!(status & 1))
+ return 0;
+ if (--attempts == 0)
+ return -EAGAIN;
+ if (delay)
+ msleep(delay);
+ }
+}
+
+/**
+ * t4_read_flash - read words from serial flash
+ * @adapter: the adapter
+ * @addr: the start address for the read
+ * @nwords: how many 32-bit words to read
+ * @data: where to store the read data
+ * @byte_oriented: whether to store data as bytes or as words
+ *
+ * Read the specified number of 32-bit words from the serial flash.
+ * If @byte_oriented is set the read data is stored as a byte array
+ * (i.e., big-endian), otherwise as 32-bit words in the platform's
+ * natural endianess.
+ */
+int t4_read_flash(struct adapter *adapter, unsigned int addr,
+ unsigned int nwords, u32 *data, int byte_oriented)
+{
+ int ret;
+
+ if (addr + nwords * sizeof(u32) > adapter->params.sf_size || (addr & 3))
+ return -EINVAL;
+
+ addr = swab32(addr) | SF_RD_DATA_FAST;
+
+ if ((ret = sf1_write(adapter, 4, 1, 0, addr)) != 0 ||
+ (ret = sf1_read(adapter, 1, 1, 0, data)) != 0)
+ return ret;
+
+ for ( ; nwords; nwords--, data++) {
+ ret = sf1_read(adapter, 4, nwords > 1, nwords == 1, data);
+ if (nwords == 1)
+ t4_write_reg(adapter, A_SF_OP, 0); /* unlock SF */
+ if (ret)
+ return ret;
+ if (byte_oriented)
+ *data = htonl(*data);
+ }
+ return 0;
+}
+
+/**
+ * t4_write_flash - write up to a page of data to the serial flash
+ * @adapter: the adapter
+ * @addr: the start address to write
+ * @n: length of data to write in bytes
+ * @data: the data to write
+ *
+ * Writes up to a page of data (256 bytes) to the serial flash starting
+ * at the given address. All the data must be written to the same page.
+ */
+static int t4_write_flash(struct adapter *adapter, unsigned int addr,
+ unsigned int n, const u8 *data)
+{
+ int ret;
+ u32 buf[SF_PAGE_SIZE / 4];
+ unsigned int i, c, left, val, offset = addr & 0xff;
+
+ if (addr >= adapter->params.sf_size || offset + n > SF_PAGE_SIZE)
+ return -EINVAL;
+
+ val = swab32(addr) | SF_PROG_PAGE;
+
+ if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 ||
+ (ret = sf1_write(adapter, 4, 1, 1, val)) != 0)
+ goto unlock;
+
+ for (left = n; left; left -= c) {
+ c = min(left, 4U);
+ for (val = 0, i = 0; i < c; ++i)
+ val = (val << 8) + *data++;
+
+ ret = sf1_write(adapter, c, c != left, 1, val);
+ if (ret)
+ goto unlock;
+ }
+ ret = flash_wait_op(adapter, 8, 1);
+ if (ret)
+ goto unlock;
+
+ t4_write_reg(adapter, A_SF_OP, 0); /* unlock SF */
+
+ /* Read the page to verify the write succeeded */
+ ret = t4_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf, 1);
+ if (ret)
+ return ret;
+
+ if (memcmp(data - n, (u8 *)buf + offset, n)) {
+ CH_ERR(adapter, "failed to correctly write the flash page "
+ "at %#x\n", addr);
+ return -EIO;
+ }
+ return 0;
+
+unlock:
+ t4_write_reg(adapter, A_SF_OP, 0); /* unlock SF */
+ return ret;
+}
+
+/**
+ * t4_get_fw_version - read the firmware version
+ * @adapter: the adapter
+ * @vers: where to place the version
+ *
+ * Reads the FW version from flash.
+ */
+int t4_get_fw_version(struct adapter *adapter, u32 *vers)
+{
+ return t4_read_flash(adapter,
+ FW_IMG_START + offsetof(struct fw_hdr, fw_ver), 1,
+ vers, 0);
+}
+
+/**
+ * t4_get_tp_version - read the TP microcode version
+ * @adapter: the adapter
+ * @vers: where to place the version
+ *
+ * Reads the TP microcode version from flash.
+ */
+int t4_get_tp_version(struct adapter *adapter, u32 *vers)
+{
+ return t4_read_flash(adapter, FW_IMG_START + offsetof(struct fw_hdr,
+ tp_microcode_ver),
+ 1, vers, 0);
+}
+
+/**
+ * t4_check_fw_version - check if the FW is compatible with this driver
+ * @adapter: the adapter
+ *
+ * Checks if an adapter's FW is compatible with the driver. Returns 0
+ * if there's exact match, a negative error if the version could not be
+ * read or there's a major version mismatch, and a positive value if the
+ * expected major version is found but there's a minor version mismatch.
+ */
+int t4_check_fw_version(struct adapter *adapter)
+{
+ u32 api_vers[2];
+ int ret, major, minor, micro;
+
+ ret = t4_get_fw_version(adapter, &adapter->params.fw_vers);
+ if (!ret)
+ ret = t4_get_tp_version(adapter, &adapter->params.tp_vers);
+ if (!ret)
+ ret = t4_read_flash(adapter,
+ FW_IMG_START + offsetof(struct fw_hdr, intfver_nic),
+ 2, api_vers, 1);
+ if (ret)
+ return ret;
+
+ major = G_FW_HDR_FW_VER_MAJOR(adapter->params.fw_vers);
+ minor = G_FW_HDR_FW_VER_MINOR(adapter->params.fw_vers);
+ micro = G_FW_HDR_FW_VER_MICRO(adapter->params.fw_vers);
+ memcpy(adapter->params.api_vers, api_vers,
+ sizeof(adapter->params.api_vers));
+
+ if (major != FW_VERSION_MAJOR) { /* major mismatch - fail */
+ CH_ERR(adapter, "card FW has major version %u, driver wants "
+ "%u\n", major, FW_VERSION_MAJOR);
+ return -EINVAL;
+ }
+
+ if (minor == FW_VERSION_MINOR && micro == FW_VERSION_MICRO)
+ return 0; /* perfect match */
+
+ /* Minor/micro version mismatch. Report it but often it's OK. */
+ return 1;
+}
+
+/**
+ * t4_flash_erase_sectors - erase a range of flash sectors
+ * @adapter: the adapter
+ * @start: the first sector to erase
+ * @end: the last sector to erase
+ *
+ * Erases the sectors in the given inclusive range.
+ */
+static int t4_flash_erase_sectors(struct adapter *adapter, int start, int end)
+{
+ int ret = 0;
+
+ while (start <= end) {
+ if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 ||
+ (ret = sf1_write(adapter, 4, 0, 1,
+ SF_ERASE_SECTOR | (start << 8))) != 0 ||
+ (ret = flash_wait_op(adapter, 14, 500)) != 0) {
+ CH_ERR(adapter, "erase of flash sector %d failed, "
+ "error %d\n", start, ret);
+ break;
+ }
+ start++;
+ }
+ t4_write_reg(adapter, A_SF_OP, 0); /* unlock SF */
+ return ret;
+}
+
+/**
+ * t4_load_cfg - download config file
+ * @adap: the adapter
+ * @cfg_data: the cfg text file to write
+ * @size: text file size
+ *
+ * Write the supplied config text file to the card's serial flash.
+ */
+int t4_load_cfg(struct adapter *adap, const u8 *cfg_data, unsigned int size)
+{
+ int ret, i, n;
+ unsigned int addr;
+ unsigned int flash_cfg_start_sec;
+ unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
+
+ if (adap->params.sf_size == 0x100000) {
+ addr = FPGA_FLASH_CFG_OFFSET;
+ flash_cfg_start_sec = FPGA_FLASH_CFG_START_SEC;
+ } else {
+ addr = FLASH_CFG_OFFSET;
+ flash_cfg_start_sec = FLASH_CFG_START_SEC;
+ }
+ if (!size) {
+ CH_ERR(adap, "cfg file has no data\n");
+ return -EINVAL;
+ }
+
+ if (size > FLASH_CFG_MAX_SIZE) {
+ CH_ERR(adap, "cfg file too large, max is %u bytes\n",
+ FLASH_CFG_MAX_SIZE);
+ return -EFBIG;
+ }
+
+ i = DIV_ROUND_UP(FLASH_CFG_MAX_SIZE, /* # of sectors spanned */
+ sf_sec_size);
+ ret = t4_flash_erase_sectors(adap, flash_cfg_start_sec,
+ flash_cfg_start_sec + i - 1);
+ if (ret)
+ goto out;
+
+ /* this will write to the flash up to SF_PAGE_SIZE at a time */
+ for (i = 0; i< size; i+= SF_PAGE_SIZE) {
+ if ( (size - i) < SF_PAGE_SIZE)
+ n = size - i;
+ else
+ n = SF_PAGE_SIZE;
+ ret = t4_write_flash(adap, addr, n, cfg_data);
+ if (ret)
+ goto out;
+
+ addr += SF_PAGE_SIZE;
+ cfg_data += SF_PAGE_SIZE;
+ }
+
+out:
+ if (ret)
+ CH_ERR(adap, "config file download failed %d\n", ret);
+ return ret;
+}
+
+
+/**
+ * t4_load_fw - download firmware
+ * @adap: the adapter
+ * @fw_data: the firmware image to write
+ * @size: image size
+ *
+ * Write the supplied firmware image to the card's serial flash.
+ */
+int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size)
+{
+ u32 csum;
+ int ret, addr;
+ unsigned int i;
+ u8 first_page[SF_PAGE_SIZE];
+ const u32 *p = (const u32 *)fw_data;
+ const struct fw_hdr *hdr = (const struct fw_hdr *)fw_data;
+ unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
+
+ if (!size) {
+ CH_ERR(adap, "FW image has no data\n");
+ return -EINVAL;
+ }
+ if (size & 511) {
+ CH_ERR(adap, "FW image size not multiple of 512 bytes\n");
+ return -EINVAL;
+ }
+ if (ntohs(hdr->len512) * 512 != size) {
+ CH_ERR(adap, "FW image size differs from size in FW header\n");
+ return -EINVAL;
+ }
+ if (size > FW_MAX_SIZE) {
+ CH_ERR(adap, "FW image too large, max is %u bytes\n",
+ FW_MAX_SIZE);
+ return -EFBIG;
+ }
+
+ for (csum = 0, i = 0; i < size / sizeof(csum); i++)
+ csum += ntohl(p[i]);
+
+ if (csum != 0xffffffff) {
+ CH_ERR(adap, "corrupted firmware image, checksum %#x\n",
+ csum);
+ return -EINVAL;
+ }
+
+ i = DIV_ROUND_UP(size, sf_sec_size); /* # of sectors spanned */
+ ret = t4_flash_erase_sectors(adap, FW_START_SEC, FW_START_SEC + i - 1);
+ if (ret)
+ goto out;
+
+ /*
+ * We write the correct version at the end so the driver can see a bad
+ * version if the FW write fails. Start by writing a copy of the
+ * first page with a bad version.
+ */
+ memcpy(first_page, fw_data, SF_PAGE_SIZE);
+ ((struct fw_hdr *)first_page)->fw_ver = htonl(0xffffffff);
+ ret = t4_write_flash(adap, FW_IMG_START, SF_PAGE_SIZE, first_page);
+ if (ret)
+ goto out;
+
+ addr = FW_IMG_START;
+ for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) {
+ addr += SF_PAGE_SIZE;
+ fw_data += SF_PAGE_SIZE;
+ ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, fw_data);
+ if (ret)
+ goto out;
+ }
+
+ ret = t4_write_flash(adap,
+ FW_IMG_START + offsetof(struct fw_hdr, fw_ver),
+ sizeof(hdr->fw_ver), (const u8 *)&hdr->fw_ver);
+out:
+ if (ret)
+ CH_ERR(adap, "firmware download failed, error %d\n", ret);
+ return ret;
+}
+
+/**
+ * t4_read_cimq_cfg - read CIM queue configuration
+ * @adap: the adapter
+ * @base: holds the queue base addresses in bytes
+ * @size: holds the queue sizes in bytes
+ * @thres: holds the queue full thresholds in bytes
+ *
+ * Returns the current configuration of the CIM queues, starting with
+ * the IBQs, then the OBQs.
+ */
+void t4_read_cimq_cfg(struct adapter *adap, u16 *base, u16 *size, u16 *thres)
+{
+ unsigned int i, v;
+
+ for (i = 0; i < CIM_NUM_IBQ; i++) {
+ t4_write_reg(adap, A_CIM_QUEUE_CONFIG_REF, F_IBQSELECT |
+ V_QUENUMSELECT(i));
+ v = t4_read_reg(adap, A_CIM_QUEUE_CONFIG_CTRL);
+ *base++ = G_CIMQBASE(v) * 256; /* value is in 256-byte units */
+ *size++ = G_CIMQSIZE(v) * 256; /* value is in 256-byte units */
+ *thres++ = G_QUEFULLTHRSH(v) * 8; /* 8-byte unit */
+ }
+ for (i = 0; i < CIM_NUM_OBQ; i++) {
+ t4_write_reg(adap, A_CIM_QUEUE_CONFIG_REF, F_OBQSELECT |
+ V_QUENUMSELECT(i));
+ v = t4_read_reg(adap, A_CIM_QUEUE_CONFIG_CTRL);
+ *base++ = G_CIMQBASE(v) * 256; /* value is in 256-byte units */
+ *size++ = G_CIMQSIZE(v) * 256; /* value is in 256-byte units */
+ }
+}
+
+/**
+ * t4_read_cim_ibq - read the contents of a CIM inbound queue
+ * @adap: the adapter
+ * @qid: the queue index
+ * @data: where to store the queue contents
+ * @n: capacity of @data in 32-bit words
+ *
+ * Reads the contents of the selected CIM queue starting at address 0 up
+ * to the capacity of @data. @n must be a multiple of 4. Returns < 0 on
+ * error and the number of 32-bit words actually read on success.
+ */
+int t4_read_cim_ibq(struct adapter *adap, unsigned int qid, u32 *data, size_t n)
+{
+ int i, err;
+ unsigned int addr;
+ const unsigned int nwords = CIM_IBQ_SIZE * 4;
+
+ if (qid > 5 || (n & 3))
+ return -EINVAL;
+
+ addr = qid * nwords;
+ if (n > nwords)
+ n = nwords;
+
+ for (i = 0; i < n; i++, addr++) {
+ t4_write_reg(adap, A_CIM_IBQ_DBG_CFG, V_IBQDBGADDR(addr) |
+ F_IBQDBGEN);
+ err = t4_wait_op_done(adap, A_CIM_IBQ_DBG_CFG, F_IBQDBGBUSY, 0,
+ 2, 1);
+ if (err)
+ return err;
+ *data++ = t4_read_reg(adap, A_CIM_IBQ_DBG_DATA);
+ }
+ t4_write_reg(adap, A_CIM_IBQ_DBG_CFG, 0);
+ return i;
+}
+
+/**
+ * t4_read_cim_obq - read the contents of a CIM outbound queue
+ * @adap: the adapter
+ * @qid: the queue index
+ * @data: where to store the queue contents
+ * @n: capacity of @data in 32-bit words
+ *
+ * Reads the contents of the selected CIM queue starting at address 0 up
+ * to the capacity of @data. @n must be a multiple of 4. Returns < 0 on
+ * error and the number of 32-bit words actually read on success.
+ */
+int t4_read_cim_obq(struct adapter *adap, unsigned int qid, u32 *data, size_t n)
+{
+ int i, err;
+ unsigned int addr, v, nwords;
+
+ if (qid > 5 || (n & 3))
+ return -EINVAL;
+
+ t4_write_reg(adap, A_CIM_QUEUE_CONFIG_REF, F_OBQSELECT |
+ V_QUENUMSELECT(qid));
+ v = t4_read_reg(adap, A_CIM_QUEUE_CONFIG_CTRL);
+
+ addr = G_CIMQBASE(v) * 64; /* muliple of 256 -> muliple of 4 */
+ nwords = G_CIMQSIZE(v) * 64; /* same */
+ if (n > nwords)
+ n = nwords;
+
+ for (i = 0; i < n; i++, addr++) {
+ t4_write_reg(adap, A_CIM_OBQ_DBG_CFG, V_OBQDBGADDR(addr) |
+ F_OBQDBGEN);
+ err = t4_wait_op_done(adap, A_CIM_OBQ_DBG_CFG, F_OBQDBGBUSY, 0,
+ 2, 1);
+ if (err)
+ return err;
+ *data++ = t4_read_reg(adap, A_CIM_OBQ_DBG_DATA);
+ }
+ t4_write_reg(adap, A_CIM_OBQ_DBG_CFG, 0);
+ return i;
+}
+
+enum {
+ CIM_QCTL_BASE = 0,
+ CIM_CTL_BASE = 0x2000,
+ CIM_PBT_ADDR_BASE = 0x2800,
+ CIM_PBT_LRF_BASE = 0x3000,
+ CIM_PBT_DATA_BASE = 0x3800
+};
+
+/**
+ * t4_cim_read - read a block from CIM internal address space
+ * @adap: the adapter
+ * @addr: the start address within the CIM address space
+ * @n: number of words to read
+ * @valp: where to store the result
+ *
+ * Reads a block of 4-byte words from the CIM intenal address space.
+ */
+int t4_cim_read(struct adapter *adap, unsigned int addr, unsigned int n,
+ unsigned int *valp)
+{
+ int ret = 0;
+
+ if (t4_read_reg(adap, A_CIM_HOST_ACC_CTRL) & F_HOSTBUSY)
+ return -EBUSY;
+
+ for ( ; !ret && n--; addr += 4) {
+ t4_write_reg(adap, A_CIM_HOST_ACC_CTRL, addr);
+ ret = t4_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, F_HOSTBUSY,
+ 0, 5, 2);
+ if (!ret)
+ *valp++ = t4_read_reg(adap, A_CIM_HOST_ACC_DATA);
+ }
+ return ret;
+}
+
+/**
+ * t4_cim_write - write a block into CIM internal address space
+ * @adap: the adapter
+ * @addr: the start address within the CIM address space
+ * @n: number of words to write
+ * @valp: set of values to write
+ *
+ * Writes a block of 4-byte words into the CIM intenal address space.
+ */
+int t4_cim_write(struct adapter *adap, unsigned int addr, unsigned int n,
+ const unsigned int *valp)
+{
+ int ret = 0;
+
+ if (t4_read_reg(adap, A_CIM_HOST_ACC_CTRL) & F_HOSTBUSY)
+ return -EBUSY;
+
+ for ( ; !ret && n--; addr += 4) {
+ t4_write_reg(adap, A_CIM_HOST_ACC_DATA, *valp++);
+ t4_write_reg(adap, A_CIM_HOST_ACC_CTRL, addr | F_HOSTWRITE);
+ ret = t4_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, F_HOSTBUSY,
+ 0, 5, 2);
+ }
+ return ret;
+}
+
+static int t4_cim_write1(struct adapter *adap, unsigned int addr, unsigned int val)
+{
+ return t4_cim_write(adap, addr, 1, &val);
+}
+
+/**
+ * t4_cim_ctl_read - read a block from CIM control region
+ * @adap: the adapter
+ * @addr: the start address within the CIM control region
+ * @n: number of words to read
+ * @valp: where to store the result
+ *
+ * Reads a block of 4-byte words from the CIM control region.
+ */
+int t4_cim_ctl_read(struct adapter *adap, unsigned int addr, unsigned int n,
+ unsigned int *valp)
+{
+ return t4_cim_read(adap, addr + CIM_CTL_BASE, n, valp);
+}
+
+/**
+ * t4_cim_read_la - read CIM LA capture buffer
+ * @adap: the adapter
+ * @la_buf: where to store the LA data
+ * @wrptr: the HW write pointer within the capture buffer
+ *
+ * Reads the contents of the CIM LA buffer with the most recent entry at
+ * the end of the returned data and with the entry at @wrptr first.
+ * We try to leave the LA in the running state we find it in.
+ */
+int t4_cim_read_la(struct adapter *adap, u32 *la_buf, unsigned int *wrptr)
+{
+ int i, ret;
+ unsigned int cfg, val, idx;
+
+ ret = t4_cim_read(adap, A_UP_UP_DBG_LA_CFG, 1, &cfg);
+ if (ret)
+ return ret;
+
+ if (cfg & F_UPDBGLAEN) { /* LA is running, freeze it */
+ ret = t4_cim_write1(adap, A_UP_UP_DBG_LA_CFG, 0);
+ if (ret)
+ return ret;
+ }
+
+ ret = t4_cim_read(adap, A_UP_UP_DBG_LA_CFG, 1, &val);
+ if (ret)
+ goto restart;
+
+ idx = G_UPDBGLAWRPTR(val);
+ if (wrptr)
+ *wrptr = idx;
+
+ for (i = 0; i < adap->params.cim_la_size; i++) {
+ ret = t4_cim_write1(adap, A_UP_UP_DBG_LA_CFG,
+ V_UPDBGLARDPTR(idx) | F_UPDBGLARDEN);
+ if (ret)
+ break;
+ ret = t4_cim_read(adap, A_UP_UP_DBG_LA_CFG, 1, &val);
+ if (ret)
+ break;
+ if (val & F_UPDBGLARDEN) {
+ ret = -ETIMEDOUT;
+ break;
+ }
+ ret = t4_cim_read(adap, A_UP_UP_DBG_LA_DATA, 1, &la_buf[i]);
+ if (ret)
+ break;
+ idx = (idx + 1) & M_UPDBGLARDPTR;
+ }
+restart:
+ if (cfg & F_UPDBGLAEN) {
+ int r = t4_cim_write1(adap, A_UP_UP_DBG_LA_CFG,
+ cfg & ~F_UPDBGLARDEN);
+ if (!ret)
+ ret = r;
+ }
+ return ret;
+}
+
+void t4_cim_read_pif_la(struct adapter *adap, u32 *pif_req, u32 *pif_rsp,
+ unsigned int *pif_req_wrptr,
+ unsigned int *pif_rsp_wrptr)
+{
+ int i, j;
+ u32 cfg, val, req, rsp;
+
+ cfg = t4_read_reg(adap, A_CIM_DEBUGCFG);
+ if (cfg & F_LADBGEN)
+ t4_write_reg(adap, A_CIM_DEBUGCFG, cfg ^ F_LADBGEN);
+
+ val = t4_read_reg(adap, A_CIM_DEBUGSTS);
+ req = G_POLADBGWRPTR(val);
+ rsp = G_PILADBGWRPTR(val);
+ if (pif_req_wrptr)
+ *pif_req_wrptr = req;
+ if (pif_rsp_wrptr)
+ *pif_rsp_wrptr = rsp;
+
+ for (i = 0; i < CIM_PIFLA_SIZE; i++) {
+ for (j = 0; j < 6; j++) {
+ t4_write_reg(adap, A_CIM_DEBUGCFG, V_POLADBGRDPTR(req) |
+ V_PILADBGRDPTR(rsp));
+ *pif_req++ = t4_read_reg(adap, A_CIM_PO_LA_DEBUGDATA);
+ *pif_rsp++ = t4_read_reg(adap, A_CIM_PI_LA_DEBUGDATA);
+ req++;
+ rsp++;
+ }
+ req = (req + 2) & M_POLADBGRDPTR;
+ rsp = (rsp + 2) & M_PILADBGRDPTR;
+ }
+ t4_write_reg(adap, A_CIM_DEBUGCFG, cfg);
+}
+
+void t4_cim_read_ma_la(struct adapter *adap, u32 *ma_req, u32 *ma_rsp)
+{
+ u32 cfg;
+ int i, j, idx;
+
+ cfg = t4_read_reg(adap, A_CIM_DEBUGCFG);
+ if (cfg & F_LADBGEN)
+ t4_write_reg(adap, A_CIM_DEBUGCFG, cfg ^ F_LADBGEN);
+
+ for (i = 0; i < CIM_MALA_SIZE; i++) {
+ for (j = 0; j < 5; j++) {
+ idx = 8 * i + j;
+ t4_write_reg(adap, A_CIM_DEBUGCFG, V_POLADBGRDPTR(idx) |
+ V_PILADBGRDPTR(idx));
+ *ma_req++ = t4_read_reg(adap, A_CIM_PO_LA_MADEBUGDATA);
+ *ma_rsp++ = t4_read_reg(adap, A_CIM_PI_LA_MADEBUGDATA);
+ }
+ }
+ t4_write_reg(adap, A_CIM_DEBUGCFG, cfg);
+}
+
+/**
+ * t4_tp_read_la - read TP LA capture buffer
+ * @adap: the adapter
+ * @la_buf: where to store the LA data
+ * @wrptr: the HW write pointer within the capture buffer
+ *
+ * Reads the contents of the TP LA buffer with the most recent entry at
+ * the end of the returned data and with the entry at @wrptr first.
+ * We leave the LA in the running state we find it in.
+ */
+void t4_tp_read_la(struct adapter *adap, u64 *la_buf, unsigned int *wrptr)
+{
+ bool last_incomplete;
+ unsigned int i, cfg, val, idx;
+
+ cfg = t4_read_reg(adap, A_TP_DBG_LA_CONFIG) & 0xffff;
+ if (cfg & F_DBGLAENABLE) /* freeze LA */
+ t4_write_reg(adap, A_TP_DBG_LA_CONFIG,
+ adap->params.tp.la_mask | (cfg ^ F_DBGLAENABLE));
+
+ val = t4_read_reg(adap, A_TP_DBG_LA_CONFIG);
+ idx = G_DBGLAWPTR(val);
+ last_incomplete = G_DBGLAMODE(val) >= 2 && (val & F_DBGLAWHLF) == 0;
+ if (last_incomplete)
+ idx = (idx + 1) & M_DBGLARPTR;
+ if (wrptr)
+ *wrptr = idx;
+
+ val &= 0xffff;
+ val &= ~V_DBGLARPTR(M_DBGLARPTR);
+ val |= adap->params.tp.la_mask;
+
+ for (i = 0; i < TPLA_SIZE; i++) {
+ t4_write_reg(adap, A_TP_DBG_LA_CONFIG, V_DBGLARPTR(idx) | val);
+ la_buf[i] = t4_read_reg64(adap, A_TP_DBG_LA_DATAL);
+ idx = (idx + 1) & M_DBGLARPTR;
+ }
+
+ /* Wipe out last entry if it isn't valid */
+ if (last_incomplete)
+ la_buf[TPLA_SIZE - 1] = ~0ULL;
+
+ if (cfg & F_DBGLAENABLE) /* restore running state */
+ t4_write_reg(adap, A_TP_DBG_LA_CONFIG,
+ cfg | adap->params.tp.la_mask);
+}
+
+void t4_ulprx_read_la(struct adapter *adap, u32 *la_buf)
+{
+ unsigned int i, j;
+
+ for (i = 0; i < 8; i++) {
+ u32 *p = la_buf + i;
+
+ t4_write_reg(adap, A_ULP_RX_LA_CTL, i);
+ j = t4_read_reg(adap, A_ULP_RX_LA_WRPTR);
+ t4_write_reg(adap, A_ULP_RX_LA_RDPTR, j);
+ for (j = 0; j < ULPRX_LA_SIZE; j++, p += 8)
+ *p = t4_read_reg(adap, A_ULP_RX_LA_RDDATA);
+ }
+}
+
+#define ADVERT_MASK (FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G |\
+ FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_ANEG)
+
+/**
+ * t4_link_start - apply link configuration to MAC/PHY
+ * @phy: the PHY to setup
+ * @mac: the MAC to setup
+ * @lc: the requested link configuration
+ *
+ * Set up a port's MAC and PHY according to a desired link configuration.
+ * - If the PHY can auto-negotiate first decide what to advertise, then
+ * enable/disable auto-negotiation as desired, and reset.
+ * - If the PHY does not auto-negotiate just reset it.
+ * - If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
+ * otherwise do it later based on the outcome of auto-negotiation.
+ */
+int t4_link_start(struct adapter *adap, unsigned int mbox, unsigned int port,
+ struct link_config *lc)
+{
+ struct fw_port_cmd c;
+ unsigned int fc = 0, mdi = V_FW_PORT_CAP_MDI(FW_PORT_CAP_MDI_AUTO);
+
+ lc->link_ok = 0;
+ if (lc->requested_fc & PAUSE_RX)
+ fc |= FW_PORT_CAP_FC_RX;
+ if (lc->requested_fc & PAUSE_TX)
+ fc |= FW_PORT_CAP_FC_TX;
+
+ memset(&c, 0, sizeof(c));
+ c.op_to_portid = htonl(V_FW_CMD_OP(FW_PORT_CMD) | F_FW_CMD_REQUEST |
+ F_FW_CMD_EXEC | V_FW_PORT_CMD_PORTID(port));
+ c.action_to_len16 = htonl(V_FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) |
+ FW_LEN16(c));
+
+ if (!(lc->supported & FW_PORT_CAP_ANEG)) {
+ c.u.l1cfg.rcap = htonl((lc->supported & ADVERT_MASK) | fc);
+ lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
+ } else if (lc->autoneg == AUTONEG_DISABLE) {
+ c.u.l1cfg.rcap = htonl(lc->requested_speed | fc | mdi);
+ lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
+ } else
+ c.u.l1cfg.rcap = htonl(lc->advertising | fc | mdi);
+
+ return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
+}
+
+/**
+ * t4_restart_aneg - restart autonegotiation
+ * @adap: the adapter
+ * @mbox: mbox to use for the FW command
+ * @port: the port id
+ *
+ * Restarts autonegotiation for the selected port.
+ */
+int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port)
+{
+ struct fw_port_cmd c;
+
+ memset(&c, 0, sizeof(c));
+ c.op_to_portid = htonl(V_FW_CMD_OP(FW_PORT_CMD) | F_FW_CMD_REQUEST |
+ F_FW_CMD_EXEC | V_FW_PORT_CMD_PORTID(port));
+ c.action_to_len16 = htonl(V_FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) |
+ FW_LEN16(c));
+ c.u.l1cfg.rcap = htonl(FW_PORT_CAP_ANEG);
+ return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
+}
+
+struct intr_info {
+ unsigned int mask; /* bits to check in interrupt status */
+ const char *msg; /* message to print or NULL */
+ short stat_idx; /* stat counter to increment or -1 */
+ unsigned short fatal; /* whether the condition reported is fatal */
+};
+
+/**
+ * t4_handle_intr_status - table driven interrupt handler
+ * @adapter: the adapter that generated the interrupt
+ * @reg: the interrupt status register to process
+ * @acts: table of interrupt actions
+ *
+ * A table driven interrupt handler that applies a set of masks to an
+ * interrupt status word and performs the corresponding actions if the
+ * interrupts described by the mask have occured. The actions include
+ * optionally emitting a warning or alert message. The table is terminated
+ * by an entry specifying mask 0. Returns the number of fatal interrupt
+ * conditions.
+ */
+static int t4_handle_intr_status(struct adapter *adapter, unsigned int reg,
+ const struct intr_info *acts)
+{
+ int fatal = 0;
+ unsigned int mask = 0;
+ unsigned int status = t4_read_reg(adapter, reg);
+
+ for ( ; acts->mask; ++acts) {
+ if (!(status & acts->mask))
+ continue;
+ if (acts->fatal) {
+ fatal++;
+ CH_ALERT(adapter, "%s (0x%x)\n",
+ acts->msg, status & acts->mask);
+ } else if (acts->msg)
+ CH_WARN_RATELIMIT(adapter, "%s (0x%x)\n",
+ acts->msg, status & acts->mask);
+ mask |= acts->mask;
+ }
+ status &= mask;
+ if (status) /* clear processed interrupts */
+ t4_write_reg(adapter, reg, status);
+ return fatal;
+}
+
+/*
+ * Interrupt handler for the PCIE module.
+ */
+static void pcie_intr_handler(struct adapter *adapter)
+{
+ static struct intr_info sysbus_intr_info[] = {
+ { F_RNPP, "RXNP array parity error", -1, 1 },
+ { F_RPCP, "RXPC array parity error", -1, 1 },
+ { F_RCIP, "RXCIF array parity error", -1, 1 },
+ { F_RCCP, "Rx completions control array parity error", -1, 1 },
+ { F_RFTP, "RXFT array parity error", -1, 1 },
+ { 0 }
+ };
+ static struct intr_info pcie_port_intr_info[] = {
+ { F_TPCP, "TXPC array parity error", -1, 1 },
+ { F_TNPP, "TXNP array parity error", -1, 1 },
+ { F_TFTP, "TXFT array parity error", -1, 1 },
+ { F_TCAP, "TXCA array parity error", -1, 1 },
+ { F_TCIP, "TXCIF array parity error", -1, 1 },
+ { F_RCAP, "RXCA array parity error", -1, 1 },
+ { F_OTDD, "outbound request TLP discarded", -1, 1 },
+ { F_RDPE, "Rx data parity error", -1, 1 },
+ { F_TDUE, "Tx uncorrectable data error", -1, 1 },
+ { 0 }
+ };
+ static struct intr_info pcie_intr_info[] = {
+ { F_MSIADDRLPERR, "MSI AddrL parity error", -1, 1 },
+ { F_MSIADDRHPERR, "MSI AddrH parity error", -1, 1 },
+ { F_MSIDATAPERR, "MSI data parity error", -1, 1 },
+ { F_MSIXADDRLPERR, "MSI-X AddrL parity error", -1, 1 },
+ { F_MSIXADDRHPERR, "MSI-X AddrH parity error", -1, 1 },
+ { F_MSIXDATAPERR, "MSI-X data parity error", -1, 1 },
+ { F_MSIXDIPERR, "MSI-X DI parity error", -1, 1 },
+ { F_PIOCPLPERR, "PCI PIO completion FIFO parity error", -1, 1 },
+ { F_PIOREQPERR, "PCI PIO request FIFO parity error", -1, 1 },
+ { F_TARTAGPERR, "PCI PCI target tag FIFO parity error", -1, 1 },
+ { F_CCNTPERR, "PCI CMD channel count parity error", -1, 1 },
+ { F_CREQPERR, "PCI CMD channel request parity error", -1, 1 },
+ { F_CRSPPERR, "PCI CMD channel response parity error", -1, 1 },
+ { F_DCNTPERR, "PCI DMA channel count parity error", -1, 1 },
+ { F_DREQPERR, "PCI DMA channel request parity error", -1, 1 },
+ { F_DRSPPERR, "PCI DMA channel response parity error", -1, 1 },
+ { F_HCNTPERR, "PCI HMA channel count parity error", -1, 1 },
+ { F_HREQPERR, "PCI HMA channel request parity error", -1, 1 },
+ { F_HRSPPERR, "PCI HMA channel response parity error", -1, 1 },
+ { F_CFGSNPPERR, "PCI config snoop FIFO parity error", -1, 1 },
+ { F_FIDPERR, "PCI FID parity error", -1, 1 },
+ { F_INTXCLRPERR, "PCI INTx clear parity error", -1, 1 },
+ { F_MATAGPERR, "PCI MA tag parity error", -1, 1 },
+ { F_PIOTAGPERR, "PCI PIO tag parity error", -1, 1 },
+ { F_RXCPLPERR, "PCI Rx completion parity error", -1, 1 },
+ { F_RXWRPERR, "PCI Rx write parity error", -1, 1 },
+ { F_RPLPERR, "PCI replay buffer parity error", -1, 1 },
+ { F_PCIESINT, "PCI core secondary fault", -1, 1 },
+ { F_PCIEPINT, "PCI core primary fault", -1, 1 },
+ { F_UNXSPLCPLERR, "PCI unexpected split completion error", -1,
+ 0 },
+ { 0 }
+ };
+
+ int fat;
+
+ fat = t4_handle_intr_status(adapter,
+ A_PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS,
+ sysbus_intr_info) +
+ t4_handle_intr_status(adapter,
+ A_PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS,
+ pcie_port_intr_info) +
+ t4_handle_intr_status(adapter, A_PCIE_INT_CAUSE, pcie_intr_info);
+ if (fat)
+ t4_fatal_err(adapter);
+}
+
+/*
+ * TP interrupt handler.
+ */
+static void tp_intr_handler(struct adapter *adapter)
+{
+ static struct intr_info tp_intr_info[] = {
+ { 0x3fffffff, "TP parity error", -1, 1 },
+ { F_FLMTXFLSTEMPTY, "TP out of Tx pages", -1, 1 },
+ { 0 }
+ };
+
+ if (t4_handle_intr_status(adapter, A_TP_INT_CAUSE, tp_intr_info))
+ t4_fatal_err(adapter);
+}
+
+/*
+ * SGE interrupt handler.
+ */
+static void sge_intr_handler(struct adapter *adapter)
+{
+ u64 v;
+ u32 err;
+
+ static struct intr_info sge_intr_info[] = {
+ { F_ERR_CPL_EXCEED_IQE_SIZE,
+ "SGE received CPL exceeding IQE size", -1, 1 },
+ { F_ERR_INVALID_CIDX_INC,
+ "SGE GTS CIDX increment too large", -1, 0 },
+ { F_ERR_CPL_OPCODE_0, "SGE received 0-length CPL", -1, 0 },
+ { F_ERR_DROPPED_DB, "SGE doorbell dropped", -1, 0 },
+ { F_ERR_DATA_CPL_ON_HIGH_QID1 | F_ERR_DATA_CPL_ON_HIGH_QID0,
+ "SGE IQID > 1023 received CPL for FL", -1, 0 },
+ { F_ERR_BAD_DB_PIDX3, "SGE DBP 3 pidx increment too large", -1,
+ 0 },
+ { F_ERR_BAD_DB_PIDX2, "SGE DBP 2 pidx increment too large", -1,
+ 0 },
+ { F_ERR_BAD_DB_PIDX1, "SGE DBP 1 pidx increment too large", -1,
+ 0 },
+ { F_ERR_BAD_DB_PIDX0, "SGE DBP 0 pidx increment too large", -1,
+ 0 },
+ { F_ERR_ING_CTXT_PRIO,
+ "SGE too many priority ingress contexts", -1, 0 },
+ { F_ERR_EGR_CTXT_PRIO,
+ "SGE too many priority egress contexts", -1, 0 },
+ { F_INGRESS_SIZE_ERR, "SGE illegal ingress QID", -1, 0 },
+ { F_EGRESS_SIZE_ERR, "SGE illegal egress QID", -1, 0 },
+ { 0 }
+ };
+
+ v = (u64)t4_read_reg(adapter, A_SGE_INT_CAUSE1) |
+ ((u64)t4_read_reg(adapter, A_SGE_INT_CAUSE2) << 32);
+ if (v) {
+ CH_ALERT(adapter, "SGE parity error (%#llx)\n",
+ (unsigned long long)v);
+ t4_write_reg(adapter, A_SGE_INT_CAUSE1, v);
+ t4_write_reg(adapter, A_SGE_INT_CAUSE2, v >> 32);
+ }
+
+ v |= t4_handle_intr_status(adapter, A_SGE_INT_CAUSE3, sge_intr_info);
+
+ err = t4_read_reg(adapter, A_SGE_ERROR_STATS);
+ if (err & F_ERROR_QID_VALID) {
+ CH_ERR(adapter, "SGE error for queue %u\n", G_ERROR_QID(err));
+ t4_write_reg(adapter, A_SGE_ERROR_STATS, F_ERROR_QID_VALID);
+ }
+
+ if (v != 0)
+ t4_fatal_err(adapter);
+}
+
+#define CIM_OBQ_INTR (F_OBQULP0PARERR | F_OBQULP1PARERR | F_OBQULP2PARERR |\
+ F_OBQULP3PARERR | F_OBQSGEPARERR | F_OBQNCSIPARERR)
+#define CIM_IBQ_INTR (F_IBQTP0PARERR | F_IBQTP1PARERR | F_IBQULPPARERR |\
+ F_IBQSGEHIPARERR | F_IBQSGELOPARERR | F_IBQNCSIPARERR)
+
+/*
+ * CIM interrupt handler.
+ */
+static void cim_intr_handler(struct adapter *adapter)
+{
+ static struct intr_info cim_intr_info[] = {
+ { F_PREFDROPINT, "CIM control register prefetch drop", -1, 1 },
+ { CIM_OBQ_INTR, "CIM OBQ parity error", -1, 1 },
+ { CIM_IBQ_INTR, "CIM IBQ parity error", -1, 1 },
+ { F_MBUPPARERR, "CIM mailbox uP parity error", -1, 1 },
+ { F_MBHOSTPARERR, "CIM mailbox host parity error", -1, 1 },
+ { F_TIEQINPARERRINT, "CIM TIEQ outgoing parity error", -1, 1 },
+ { F_TIEQOUTPARERRINT, "CIM TIEQ incoming parity error", -1, 1 },
+ { 0 }
+ };
+ static struct intr_info cim_upintr_info[] = {
+ { F_RSVDSPACEINT, "CIM reserved space access", -1, 1 },
+ { F_ILLTRANSINT, "CIM illegal transaction", -1, 1 },
+ { F_ILLWRINT, "CIM illegal write", -1, 1 },
+ { F_ILLRDINT, "CIM illegal read", -1, 1 },
+ { F_ILLRDBEINT, "CIM illegal read BE", -1, 1 },
+ { F_ILLWRBEINT, "CIM illegal write BE", -1, 1 },
+ { F_SGLRDBOOTINT, "CIM single read from boot space", -1, 1 },
+ { F_SGLWRBOOTINT, "CIM single write to boot space", -1, 1 },
+ { F_BLKWRBOOTINT, "CIM block write to boot space", -1, 1 },
+ { F_SGLRDFLASHINT, "CIM single read from flash space", -1, 1 },
+ { F_SGLWRFLASHINT, "CIM single write to flash space", -1, 1 },
+ { F_BLKWRFLASHINT, "CIM block write to flash space", -1, 1 },
+ { F_SGLRDEEPROMINT, "CIM single EEPROM read", -1, 1 },
+ { F_SGLWREEPROMINT, "CIM single EEPROM write", -1, 1 },
+ { F_BLKRDEEPROMINT, "CIM block EEPROM read", -1, 1 },
+ { F_BLKWREEPROMINT, "CIM block EEPROM write", -1, 1 },
+ { F_SGLRDCTLINT , "CIM single read from CTL space", -1, 1 },
+ { F_SGLWRCTLINT , "CIM single write to CTL space", -1, 1 },
+ { F_BLKRDCTLINT , "CIM block read from CTL space", -1, 1 },
+ { F_BLKWRCTLINT , "CIM block write to CTL space", -1, 1 },
+ { F_SGLRDPLINT , "CIM single read from PL space", -1, 1 },
+ { F_SGLWRPLINT , "CIM single write to PL space", -1, 1 },
+ { F_BLKRDPLINT , "CIM block read from PL space", -1, 1 },
+ { F_BLKWRPLINT , "CIM block write to PL space", -1, 1 },
+ { F_REQOVRLOOKUPINT , "CIM request FIFO overwrite", -1, 1 },
+ { F_RSPOVRLOOKUPINT , "CIM response FIFO overwrite", -1, 1 },
+ { F_TIMEOUTINT , "CIM PIF timeout", -1, 1 },
+ { F_TIMEOUTMAINT , "CIM PIF MA timeout", -1, 1 },
+ { 0 }
+ };
+
+ int fat;
+
+ fat = t4_handle_intr_status(adapter, A_CIM_HOST_INT_CAUSE,
+ cim_intr_info) +
+ t4_handle_intr_status(adapter, A_CIM_HOST_UPACC_INT_CAUSE,
+ cim_upintr_info);
+ if (fat)
+ t4_fatal_err(adapter);
+}
+
+/*
+ * ULP RX interrupt handler.
+ */
+static void ulprx_intr_handler(struct adapter *adapter)
+{
+ static struct intr_info ulprx_intr_info[] = {
+ { F_CAUSE_CTX_1, "ULPRX channel 1 context error", -1, 1 },
+ { F_CAUSE_CTX_0, "ULPRX channel 0 context error", -1, 1 },
+ { 0x7fffff, "ULPRX parity error", -1, 1 },
+ { 0 }
+ };
+
+ if (t4_handle_intr_status(adapter, A_ULP_RX_INT_CAUSE, ulprx_intr_info))
+ t4_fatal_err(adapter);
+}
+
+/*
+ * ULP TX interrupt handler.
+ */
+static void ulptx_intr_handler(struct adapter *adapter)
+{
+ static struct intr_info ulptx_intr_info[] = {
+ { F_PBL_BOUND_ERR_CH3, "ULPTX channel 3 PBL out of bounds", -1,
+ 0 },
+ { F_PBL_BOUND_ERR_CH2, "ULPTX channel 2 PBL out of bounds", -1,
+ 0 },
+ { F_PBL_BOUND_ERR_CH1, "ULPTX channel 1 PBL out of bounds", -1,
+ 0 },
+ { F_PBL_BOUND_ERR_CH0, "ULPTX channel 0 PBL out of bounds", -1,
+ 0 },
+ { 0xfffffff, "ULPTX parity error", -1, 1 },
+ { 0 }
+ };
+
+ if (t4_handle_intr_status(adapter, A_ULP_TX_INT_CAUSE, ulptx_intr_info))
+ t4_fatal_err(adapter);
+}
+
+/*
+ * PM TX interrupt handler.
+ */
+static void pmtx_intr_handler(struct adapter *adapter)
+{
+ static struct intr_info pmtx_intr_info[] = {
+ { F_PCMD_LEN_OVFL0, "PMTX channel 0 pcmd too large", -1, 1 },
+ { F_PCMD_LEN_OVFL1, "PMTX channel 1 pcmd too large", -1, 1 },
+ { F_PCMD_LEN_OVFL2, "PMTX channel 2 pcmd too large", -1, 1 },
+ { F_ZERO_C_CMD_ERROR, "PMTX 0-length pcmd", -1, 1 },
+ { 0xffffff0, "PMTX framing error", -1, 1 },
+ { F_OESPI_PAR_ERROR, "PMTX oespi parity error", -1, 1 },
+ { F_DB_OPTIONS_PAR_ERROR, "PMTX db_options parity error", -1,
+ 1 },
+ { F_ICSPI_PAR_ERROR, "PMTX icspi parity error", -1, 1 },
+ { F_C_PCMD_PAR_ERROR, "PMTX c_pcmd parity error", -1, 1},
+ { 0 }
+ };
+
+ if (t4_handle_intr_status(adapter, A_PM_TX_INT_CAUSE, pmtx_intr_info))
+ t4_fatal_err(adapter);
+}
+
+/*
+ * PM RX interrupt handler.
+ */
+static void pmrx_intr_handler(struct adapter *adapter)
+{
+ static struct intr_info pmrx_intr_info[] = {
+ { F_ZERO_E_CMD_ERROR, "PMRX 0-length pcmd", -1, 1 },
+ { 0x3ffff0, "PMRX framing error", -1, 1 },
+ { F_OCSPI_PAR_ERROR, "PMRX ocspi parity error", -1, 1 },
+ { F_DB_OPTIONS_PAR_ERROR, "PMRX db_options parity error", -1,
+ 1 },
+ { F_IESPI_PAR_ERROR, "PMRX iespi parity error", -1, 1 },
+ { F_E_PCMD_PAR_ERROR, "PMRX e_pcmd parity error", -1, 1},
+ { 0 }
+ };
+
+ if (t4_handle_intr_status(adapter, A_PM_RX_INT_CAUSE, pmrx_intr_info))
+ t4_fatal_err(adapter);
+}
+
+/*
+ * CPL switch interrupt handler.
+ */
+static void cplsw_intr_handler(struct adapter *adapter)
+{
+ static struct intr_info cplsw_intr_info[] = {
+ { F_CIM_OP_MAP_PERR, "CPLSW CIM op_map parity error", -1, 1 },
+ { F_CIM_OVFL_ERROR, "CPLSW CIM overflow", -1, 1 },
+ { F_TP_FRAMING_ERROR, "CPLSW TP framing error", -1, 1 },
+ { F_SGE_FRAMING_ERROR, "CPLSW SGE framing error", -1, 1 },
+ { F_CIM_FRAMING_ERROR, "CPLSW CIM framing error", -1, 1 },
+ { F_ZERO_SWITCH_ERROR, "CPLSW no-switch error", -1, 1 },
+ { 0 }
+ };
+
+ if (t4_handle_intr_status(adapter, A_CPL_INTR_CAUSE, cplsw_intr_info))
+ t4_fatal_err(adapter);
+}
+
+/*
+ * LE interrupt handler.
+ */
+static void le_intr_handler(struct adapter *adap)
+{
+ static struct intr_info le_intr_info[] = {
+ { F_LIPMISS, "LE LIP miss", -1, 0 },
+ { F_LIP0, "LE 0 LIP error", -1, 0 },
+ { F_PARITYERR, "LE parity error", -1, 1 },
+ { F_UNKNOWNCMD, "LE unknown command", -1, 1 },
+ { F_REQQPARERR, "LE request queue parity error", -1, 1 },
+ { 0 }
+ };
+
+ if (t4_handle_intr_status(adap, A_LE_DB_INT_CAUSE, le_intr_info))
+ t4_fatal_err(adap);
+}
+
+/*
+ * MPS interrupt handler.
+ */
+static void mps_intr_handler(struct adapter *adapter)
+{
+ static struct intr_info mps_rx_intr_info[] = {
+ { 0xffffff, "MPS Rx parity error", -1, 1 },
+ { 0 }
+ };
+ static struct intr_info mps_tx_intr_info[] = {
+ { V_TPFIFO(M_TPFIFO), "MPS Tx TP FIFO parity error", -1, 1 },
+ { F_NCSIFIFO, "MPS Tx NC-SI FIFO parity error", -1, 1 },
+ { V_TXDATAFIFO(M_TXDATAFIFO), "MPS Tx data FIFO parity error",
+ -1, 1 },
+ { V_TXDESCFIFO(M_TXDESCFIFO), "MPS Tx desc FIFO parity error",
+ -1, 1 },
+ { F_BUBBLE, "MPS Tx underflow", -1, 1 },
+ { F_SECNTERR, "MPS Tx SOP/EOP error", -1, 1 },
+ { F_FRMERR, "MPS Tx framing error", -1, 1 },
+ { 0 }
+ };
+ static struct intr_info mps_trc_intr_info[] = {
+ { V_FILTMEM(M_FILTMEM), "MPS TRC filter parity error", -1, 1 },
+ { V_PKTFIFO(M_PKTFIFO), "MPS TRC packet FIFO parity error", -1,
+ 1 },
+ { F_MISCPERR, "MPS TRC misc parity error", -1, 1 },
+ { 0 }
+ };
+ static struct intr_info mps_stat_sram_intr_info[] = {
+ { 0x1fffff, "MPS statistics SRAM parity error", -1, 1 },
+ { 0 }
+ };
+ static struct intr_info mps_stat_tx_intr_info[] = {
+ { 0xfffff, "MPS statistics Tx FIFO parity error", -1, 1 },
+ { 0 }
+ };
+ static struct intr_info mps_stat_rx_intr_info[] = {
+ { 0xffffff, "MPS statistics Rx FIFO parity error", -1, 1 },
+ { 0 }
+ };
+ static struct intr_info mps_cls_intr_info[] = {
+ { F_MATCHSRAM, "MPS match SRAM parity error", -1, 1 },
+ { F_MATCHTCAM, "MPS match TCAM parity error", -1, 1 },
+ { F_HASHSRAM, "MPS hash SRAM parity error", -1, 1 },
+ { 0 }
+ };
+
+ int fat;
+
+ fat = t4_handle_intr_status(adapter, A_MPS_RX_PERR_INT_CAUSE,
+ mps_rx_intr_info) +
+ t4_handle_intr_status(adapter, A_MPS_TX_INT_CAUSE,
+ mps_tx_intr_info) +
+ t4_handle_intr_status(adapter, A_MPS_TRC_INT_CAUSE,
+ mps_trc_intr_info) +
+ t4_handle_intr_status(adapter, A_MPS_STAT_PERR_INT_CAUSE_SRAM,
+ mps_stat_sram_intr_info) +
+ t4_handle_intr_status(adapter, A_MPS_STAT_PERR_INT_CAUSE_TX_FIFO,
+ mps_stat_tx_intr_info) +
+ t4_handle_intr_status(adapter, A_MPS_STAT_PERR_INT_CAUSE_RX_FIFO,
+ mps_stat_rx_intr_info) +
+ t4_handle_intr_status(adapter, A_MPS_CLS_INT_CAUSE,
+ mps_cls_intr_info);
+
+ t4_write_reg(adapter, A_MPS_INT_CAUSE, 0);
+ t4_read_reg(adapter, A_MPS_INT_CAUSE); /* flush */
+ if (fat)
+ t4_fatal_err(adapter);
+}
+
+#define MEM_INT_MASK (F_PERR_INT_CAUSE | F_ECC_CE_INT_CAUSE | F_ECC_UE_INT_CAUSE)
+
+/*
+ * EDC/MC interrupt handler.
+ */
+static void mem_intr_handler(struct adapter *adapter, int idx)
+{
+ static const char name[3][5] = { "EDC0", "EDC1", "MC" };
+
+ unsigned int addr, cnt_addr, v;
+
+ if (idx <= MEM_EDC1) {
+ addr = EDC_REG(A_EDC_INT_CAUSE, idx);
+ cnt_addr = EDC_REG(A_EDC_ECC_STATUS, idx);
+ } else {
+ addr = A_MC_INT_CAUSE;
+ cnt_addr = A_MC_ECC_STATUS;
+ }
+
+ v = t4_read_reg(adapter, addr) & MEM_INT_MASK;
+ if (v & F_PERR_INT_CAUSE)
+ CH_ALERT(adapter, "%s FIFO parity error\n", name[idx]);
+ if (v & F_ECC_CE_INT_CAUSE) {
+ u32 cnt = G_ECC_CECNT(t4_read_reg(adapter, cnt_addr));
+
+ t4_write_reg(adapter, cnt_addr, V_ECC_CECNT(M_ECC_CECNT));
+ CH_WARN_RATELIMIT(adapter,
+ "%u %s correctable ECC data error%s\n",
+ cnt, name[idx], cnt > 1 ? "s" : "");
+ }
+ if (v & F_ECC_UE_INT_CAUSE)
+ CH_ALERT(adapter, "%s uncorrectable ECC data error\n",
+ name[idx]);
+
+ t4_write_reg(adapter, addr, v);
+ if (v & (F_PERR_INT_CAUSE | F_ECC_UE_INT_CAUSE))
+ t4_fatal_err(adapter);
+}
+
+/*
+ * MA interrupt handler.
+ */
+static void ma_intr_handler(struct adapter *adapter)
+{
+ u32 v, status = t4_read_reg(adapter, A_MA_INT_CAUSE);
+
+ if (status & F_MEM_PERR_INT_CAUSE)
+ CH_ALERT(adapter, "MA parity error, parity status %#x\n",
+ t4_read_reg(adapter, A_MA_PARITY_ERROR_STATUS));
+ if (status & F_MEM_WRAP_INT_CAUSE) {
+ v = t4_read_reg(adapter, A_MA_INT_WRAP_STATUS);
+ CH_ALERT(adapter, "MA address wrap-around error by client %u to"
+ " address %#x\n", G_MEM_WRAP_CLIENT_NUM(v),
+ G_MEM_WRAP_ADDRESS(v) << 4);
+ }
+ t4_write_reg(adapter, A_MA_INT_CAUSE, status);
+ t4_fatal_err(adapter);
+}
+
+/*
+ * SMB interrupt handler.
+ */
+static void smb_intr_handler(struct adapter *adap)
+{
+ static struct intr_info smb_intr_info[] = {
+ { F_MSTTXFIFOPARINT, "SMB master Tx FIFO parity error", -1, 1 },
+ { F_MSTRXFIFOPARINT, "SMB master Rx FIFO parity error", -1, 1 },
+ { F_SLVFIFOPARINT, "SMB slave FIFO parity error", -1, 1 },
+ { 0 }
+ };
+
+ if (t4_handle_intr_status(adap, A_SMB_INT_CAUSE, smb_intr_info))
+ t4_fatal_err(adap);
+}
+
+/*
+ * NC-SI interrupt handler.
+ */
+static void ncsi_intr_handler(struct adapter *adap)
+{
+ static struct intr_info ncsi_intr_info[] = {
+ { F_CIM_DM_PRTY_ERR, "NC-SI CIM parity error", -1, 1 },
+ { F_MPS_DM_PRTY_ERR, "NC-SI MPS parity error", -1, 1 },
+ { F_TXFIFO_PRTY_ERR, "NC-SI Tx FIFO parity error", -1, 1 },
+ { F_RXFIFO_PRTY_ERR, "NC-SI Rx FIFO parity error", -1, 1 },
+ { 0 }
+ };
+
+ if (t4_handle_intr_status(adap, A_NCSI_INT_CAUSE, ncsi_intr_info))
+ t4_fatal_err(adap);
+}
+
+/*
+ * XGMAC interrupt handler.
+ */
+static void xgmac_intr_handler(struct adapter *adap, int port)
+{
+ u32 v = t4_read_reg(adap, PORT_REG(port, A_XGMAC_PORT_INT_CAUSE));
+
+ v &= F_TXFIFO_PRTY_ERR | F_RXFIFO_PRTY_ERR;
+ if (!v)
+ return;
+
+ if (v & F_TXFIFO_PRTY_ERR)
+ CH_ALERT(adap, "XGMAC %d Tx FIFO parity error\n", port);
+ if (v & F_RXFIFO_PRTY_ERR)
+ CH_ALERT(adap, "XGMAC %d Rx FIFO parity error\n", port);
+ t4_write_reg(adap, PORT_REG(port, A_XGMAC_PORT_INT_CAUSE), v);
+ t4_fatal_err(adap);
+}
+
+/*
+ * PL interrupt handler.
+ */
+static void pl_intr_handler(struct adapter *adap)
+{
+ static struct intr_info pl_intr_info[] = {
+ { F_FATALPERR, "T4 fatal parity error", -1, 1 },
+ { F_PERRVFID, "PL VFID_MAP parity error", -1, 1 },
+ { 0 }
+ };
+
+ if (t4_handle_intr_status(adap, A_PL_PL_INT_CAUSE, pl_intr_info))
+ t4_fatal_err(adap);
+}
+
+#define PF_INTR_MASK (F_PFSW | F_PFCIM)
+#define GLBL_INTR_MASK (F_CIM | F_MPS | F_PL | F_PCIE | F_MC | F_EDC0 | \
+ F_EDC1 | F_LE | F_TP | F_MA | F_PM_TX | F_PM_RX | F_ULP_RX | \
+ F_CPL_SWITCH | F_SGE | F_ULP_TX)
+
+/**
+ * t4_slow_intr_handler - control path interrupt handler
+ * @adapter: the adapter
+ *
+ * T4 interrupt handler for non-data global interrupt events, e.g., errors.
+ * The designation 'slow' is because it involves register reads, while
+ * data interrupts typically don't involve any MMIOs.
+ */
+int t4_slow_intr_handler(struct adapter *adapter)
+{
+ u32 cause = t4_read_reg(adapter, A_PL_INT_CAUSE);
+
+ if (!(cause & GLBL_INTR_MASK))
+ return 0;
+ if (cause & F_CIM)
+ cim_intr_handler(adapter);
+ if (cause & F_MPS)
+ mps_intr_handler(adapter);
+ if (cause & F_NCSI)
+ ncsi_intr_handler(adapter);
+ if (cause & F_PL)
+ pl_intr_handler(adapter);
+ if (cause & F_SMB)
+ smb_intr_handler(adapter);
+ if (cause & F_XGMAC0)
+ xgmac_intr_handler(adapter, 0);
+ if (cause & F_XGMAC1)
+ xgmac_intr_handler(adapter, 1);
+ if (cause & F_XGMAC_KR0)
+ xgmac_intr_handler(adapter, 2);
+ if (cause & F_XGMAC_KR1)
+ xgmac_intr_handler(adapter, 3);
+ if (cause & F_PCIE)
+ pcie_intr_handler(adapter);
+ if (cause & F_MC)
+ mem_intr_handler(adapter, MEM_MC);
+ if (cause & F_EDC0)
+ mem_intr_handler(adapter, MEM_EDC0);
+ if (cause & F_EDC1)
+ mem_intr_handler(adapter, MEM_EDC1);
+ if (cause & F_LE)
+ le_intr_handler(adapter);
+ if (cause & F_TP)
+ tp_intr_handler(adapter);
+ if (cause & F_MA)
+ ma_intr_handler(adapter);
+ if (cause & F_PM_TX)
+ pmtx_intr_handler(adapter);
+ if (cause & F_PM_RX)
+ pmrx_intr_handler(adapter);
+ if (cause & F_ULP_RX)
+ ulprx_intr_handler(adapter);
+ if (cause & F_CPL_SWITCH)
+ cplsw_intr_handler(adapter);
+ if (cause & F_SGE)
+ sge_intr_handler(adapter);
+ if (cause & F_ULP_TX)
+ ulptx_intr_handler(adapter);
+
+ /* Clear the interrupts just processed for which we are the master. */
+ t4_write_reg(adapter, A_PL_INT_CAUSE, cause & GLBL_INTR_MASK);
+ (void) t4_read_reg(adapter, A_PL_INT_CAUSE); /* flush */
+ return 1;
+}
+
+/**
+ * t4_intr_enable - enable interrupts
+ * @adapter: the adapter whose interrupts should be enabled
+ *
+ * Enable PF-specific interrupts for the calling function and the top-level
+ * interrupt concentrator for global interrupts. Interrupts are already
+ * enabled at each module, here we just enable the roots of the interrupt
+ * hierarchies.
+ *
+ * Note: this function should be called only when the driver manages
+ * non PF-specific interrupts from the various HW modules. Only one PCI
+ * function at a time should be doing this.
+ */
+void t4_intr_enable(struct adapter *adapter)
+{
+ u32 pf = G_SOURCEPF(t4_read_reg(adapter, A_PL_WHOAMI));
+
+ t4_write_reg(adapter, A_SGE_INT_ENABLE3, F_ERR_CPL_EXCEED_IQE_SIZE |
+ F_ERR_INVALID_CIDX_INC | F_ERR_CPL_OPCODE_0 |
+ F_ERR_DROPPED_DB | F_ERR_DATA_CPL_ON_HIGH_QID1 |
+ F_ERR_DATA_CPL_ON_HIGH_QID0 | F_ERR_BAD_DB_PIDX3 |
+ F_ERR_BAD_DB_PIDX2 | F_ERR_BAD_DB_PIDX1 |
+ F_ERR_BAD_DB_PIDX0 | F_ERR_ING_CTXT_PRIO |
+ F_ERR_EGR_CTXT_PRIO | F_INGRESS_SIZE_ERR |
+ F_EGRESS_SIZE_ERR);
+ t4_write_reg(adapter, MYPF_REG(A_PL_PF_INT_ENABLE), PF_INTR_MASK);
+ t4_set_reg_field(adapter, A_PL_INT_MAP0, 0, 1 << pf);
+}
+
+/**
+ * t4_intr_disable - disable interrupts
+ * @adapter: the adapter whose interrupts should be disabled
+ *
+ * Disable interrupts. We only disable the top-level interrupt
+ * concentrators. The caller must be a PCI function managing global
+ * interrupts.
+ */
+void t4_intr_disable(struct adapter *adapter)
+{
+ u32 pf = G_SOURCEPF(t4_read_reg(adapter, A_PL_WHOAMI));
+
+ t4_write_reg(adapter, MYPF_REG(A_PL_PF_INT_ENABLE), 0);
+ t4_set_reg_field(adapter, A_PL_INT_MAP0, 1 << pf, 0);
+}
+
+/**
+ * t4_intr_clear - clear all interrupts
+ * @adapter: the adapter whose interrupts should be cleared
+ *
+ * Clears all interrupts. The caller must be a PCI function managing
+ * global interrupts.
+ */
+void t4_intr_clear(struct adapter *adapter)
+{
+ static const unsigned int cause_reg[] = {
+ A_SGE_INT_CAUSE1, A_SGE_INT_CAUSE2, A_SGE_INT_CAUSE3,
+ A_PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS,
+ A_PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS,
+ A_PCIE_NONFAT_ERR, A_PCIE_INT_CAUSE,
+ A_MC_INT_CAUSE,
+ A_MA_INT_WRAP_STATUS, A_MA_PARITY_ERROR_STATUS, A_MA_INT_CAUSE,
+ A_EDC_INT_CAUSE, EDC_REG(A_EDC_INT_CAUSE, 1),
+ A_CIM_HOST_INT_CAUSE, A_CIM_HOST_UPACC_INT_CAUSE,
+ MYPF_REG(A_CIM_PF_HOST_INT_CAUSE),
+ A_TP_INT_CAUSE,
+ A_ULP_RX_INT_CAUSE, A_ULP_TX_INT_CAUSE,
+ A_PM_RX_INT_CAUSE, A_PM_TX_INT_CAUSE,
+ A_MPS_RX_PERR_INT_CAUSE,
+ A_CPL_INTR_CAUSE,
+ MYPF_REG(A_PL_PF_INT_CAUSE),
+ A_PL_PL_INT_CAUSE,
+ A_LE_DB_INT_CAUSE,
+ };
+
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(cause_reg); ++i)
+ t4_write_reg(adapter, cause_reg[i], 0xffffffff);
+
+ t4_write_reg(adapter, A_PL_INT_CAUSE, GLBL_INTR_MASK);
+ (void) t4_read_reg(adapter, A_PL_INT_CAUSE); /* flush */
+}
+
+/**
+ * hash_mac_addr - return the hash value of a MAC address
+ * @addr: the 48-bit Ethernet MAC address
+ *
+ * Hashes a MAC address according to the hash function used by HW inexact
+ * (hash) address matching.
+ */
+static int hash_mac_addr(const u8 *addr)
+{
+ u32 a = ((u32)addr[0] << 16) | ((u32)addr[1] << 8) | addr[2];
+ u32 b = ((u32)addr[3] << 16) | ((u32)addr[4] << 8) | addr[5];
+ a ^= b;
+ a ^= (a >> 12);
+ a ^= (a >> 6);
+ return a & 0x3f;
+}
+
+/**
+ * t4_config_rss_range - configure a portion of the RSS mapping table
+ * @adapter: the adapter
+ * @mbox: mbox to use for the FW command
+ * @viid: virtual interface whose RSS subtable is to be written
+ * @start: start entry in the table to write
+ * @n: how many table entries to write
+ * @rspq: values for the "response queue" (Ingress Queue) lookup table
+ * @nrspq: number of values in @rspq
+ *
+ * Programs the selected part of the VI's RSS mapping table with the
+ * provided values. If @nrspq < @n the supplied values are used repeatedly
+ * until the full table range is populated.
+ *
+ * The caller must ensure the values in @rspq are in the range allowed for
+ * @viid.
+ */
+int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid,
+ int start, int n, const u16 *rspq, unsigned int nrspq)
+{
+ int ret;
+ const u16 *rsp = rspq;
+ const u16 *rsp_end = rspq + nrspq;
+ struct fw_rss_ind_tbl_cmd cmd;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.op_to_viid = htonl(V_FW_CMD_OP(FW_RSS_IND_TBL_CMD) |
+ F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
+ V_FW_RSS_IND_TBL_CMD_VIID(viid));
+ cmd.retval_len16 = htonl(FW_LEN16(cmd));
+
+
+ /*
+ * Each firmware RSS command can accommodate up to 32 RSS Ingress
+ * Queue Identifiers. These Ingress Queue IDs are packed three to
+ * a 32-bit word as 10-bit values with the upper remaining 2 bits
+ * reserved.
+ */
+ while (n > 0) {
+ int nq = min(n, 32);
+ __be32 *qp = &cmd.iq0_to_iq2;
+
+ /*
+ * Set up the firmware RSS command header to send the next
+ * "nq" Ingress Queue IDs to the firmware.
+ */
+ cmd.niqid = htons(nq);
+ cmd.startidx = htons(start);
+
+ /*
+ * "nq" more done for the start of the next loop.
+ */
+ start += nq;
+ n -= nq;
+
+ /*
+ * While there are still Ingress Queue IDs to stuff into the
+ * current firmware RSS command, retrieve them from the
+ * Ingress Queue ID array and insert them into the command.
+ */
+ while (nq > 0) {
+ unsigned int v;
+ /*
+ * Grab up to the next 3 Ingress Queue IDs (wrapping
+ * around the Ingress Queue ID array if necessary) and
+ * insert them into the firmware RSS command at the
+ * current 3-tuple position within the commad.
+ */
+ v = V_FW_RSS_IND_TBL_CMD_IQ0(*rsp);
+ if (++rsp >= rsp_end)
+ rsp = rspq;
+ v |= V_FW_RSS_IND_TBL_CMD_IQ1(*rsp);
+ if (++rsp >= rsp_end)
+ rsp = rspq;
+ v |= V_FW_RSS_IND_TBL_CMD_IQ2(*rsp);
+ if (++rsp >= rsp_end)
+ rsp = rspq;
+
+ *qp++ = htonl(v);
+ nq -= 3;
+ }
+
+ /*
+ * Send this portion of the RRS table update to the firmware;
+ * bail out on any errors.
+ */
+ ret = t4_wr_mbox(adapter, mbox, &cmd, sizeof(cmd), NULL);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * t4_config_glbl_rss - configure the global RSS mode
+ * @adapter: the adapter
+ * @mbox: mbox to use for the FW command
+ * @mode: global RSS mode
+ * @flags: mode-specific flags
+ *
+ * Sets the global RSS mode.
+ */
+int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode,
+ unsigned int flags)
+{
+ struct fw_rss_glb_config_cmd c;
+
+ memset(&c, 0, sizeof(c));
+ c.op_to_write = htonl(V_FW_CMD_OP(FW_RSS_GLB_CONFIG_CMD) |
+ F_FW_CMD_REQUEST | F_FW_CMD_WRITE);
+ c.retval_len16 = htonl(FW_LEN16(c));
+ if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_MANUAL) {
+ c.u.manual.mode_pkd = htonl(V_FW_RSS_GLB_CONFIG_CMD_MODE(mode));
+ } else if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL) {
+ c.u.basicvirtual.mode_pkd =
+ htonl(V_FW_RSS_GLB_CONFIG_CMD_MODE(mode));
+ c.u.basicvirtual.synmapen_to_hashtoeplitz = htonl(flags);
+ } else
+ return -EINVAL;
+ return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL);
+}
+
+/**
+ * t4_config_vi_rss - configure per VI RSS settings
+ * @adapter: the adapter
+ * @mbox: mbox to use for the FW command
+ * @viid: the VI id
+ * @flags: RSS flags
+ * @defq: id of the default RSS queue for the VI.
+ *
+ * Configures VI-specific RSS properties.
+ */
+int t4_config_vi_rss(struct adapter *adapter, int mbox, unsigned int viid,
+ unsigned int flags, unsigned int defq)
+{
+ struct fw_rss_vi_config_cmd c;
+
+ memset(&c, 0, sizeof(c));
+ c.op_to_viid = htonl(V_FW_CMD_OP(FW_RSS_VI_CONFIG_CMD) |
+ F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
+ V_FW_RSS_VI_CONFIG_CMD_VIID(viid));
+ c.retval_len16 = htonl(FW_LEN16(c));
+ c.u.basicvirtual.defaultq_to_udpen = htonl(flags |
+ V_FW_RSS_VI_CONFIG_CMD_DEFAULTQ(defq));
+ return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL);
+}
+
+/* Read an RSS table row */
+static int rd_rss_row(struct adapter *adap, int row, u32 *val)
+{
+ t4_write_reg(adap, A_TP_RSS_LKP_TABLE, 0xfff00000 | row);
+ return t4_wait_op_done_val(adap, A_TP_RSS_LKP_TABLE, F_LKPTBLROWVLD, 1,
+ 5, 0, val);
+}
+
+/**
+ * t4_read_rss - read the contents of the RSS mapping table
+ * @adapter: the adapter
+ * @map: holds the contents of the RSS mapping table
+ *
+ * Reads the contents of the RSS hash->queue mapping table.
+ */
+int t4_read_rss(struct adapter *adapter, u16 *map)
+{
+ u32 val;
+ int i, ret;
+
+ for (i = 0; i < RSS_NENTRIES / 2; ++i) {
+ ret = rd_rss_row(adapter, i, &val);
+ if (ret)
+ return ret;
+ *map++ = G_LKPTBLQUEUE0(val);
+ *map++ = G_LKPTBLQUEUE1(val);
+ }
+ return 0;
+}
+
+/**
+ * t4_read_rss_key - read the global RSS key
+ * @adap: the adapter
+ * @key: 10-entry array holding the 320-bit RSS key
+ *
+ * Reads the global 320-bit RSS key.
+ */
+void t4_read_rss_key(struct adapter *adap, u32 *key)
+{
+ t4_read_indirect(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA, key, 10,
+ A_TP_RSS_SECRET_KEY0);
+}
+
+/**
+ * t4_write_rss_key - program one of the RSS keys
+ * @adap: the adapter
+ * @key: 10-entry array holding the 320-bit RSS key
+ * @idx: which RSS key to write
+ *
+ * Writes one of the RSS keys with the given 320-bit value. If @idx is
+ * 0..15 the corresponding entry in the RSS key table is written,
+ * otherwise the global RSS key is written.
+ */
+void t4_write_rss_key(struct adapter *adap, const u32 *key, int idx)
+{
+ t4_write_indirect(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA, key, 10,
+ A_TP_RSS_SECRET_KEY0);
+ if (idx >= 0 && idx < 16)
+ t4_write_reg(adap, A_TP_RSS_CONFIG_VRT,
+ V_KEYWRADDR(idx) | F_KEYWREN);
+}
+
+/**
+ * t4_read_rss_pf_config - read PF RSS Configuration Table
+ * @adapter: the adapter
+ * @index: the entry in the PF RSS table to read
+ * @valp: where to store the returned value
+ *
+ * Reads the PF RSS Configuration Table at the specified index and returns
+ * the value found there.
+ */
+void t4_read_rss_pf_config(struct adapter *adapter, unsigned int index, u32 *valp)
+{
+ t4_read_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
+ valp, 1, A_TP_RSS_PF0_CONFIG + index);
+}
+
+/**
+ * t4_write_rss_pf_config - write PF RSS Configuration Table
+ * @adapter: the adapter
+ * @index: the entry in the VF RSS table to read
+ * @val: the value to store
+ *
+ * Writes the PF RSS Configuration Table at the specified index with the
+ * specified value.
+ */
+void t4_write_rss_pf_config(struct adapter *adapter, unsigned int index, u32 val)
+{
+ t4_write_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
+ &val, 1, A_TP_RSS_PF0_CONFIG + index);
+}
+
+/**
+ * t4_read_rss_vf_config - read VF RSS Configuration Table
+ * @adapter: the adapter
+ * @index: the entry in the VF RSS table to read
+ * @vfl: where to store the returned VFL
+ * @vfh: where to store the returned VFH
+ *
+ * Reads the VF RSS Configuration Table at the specified index and returns
+ * the (VFL, VFH) values found there.
+ */
+void t4_read_rss_vf_config(struct adapter *adapter, unsigned int index,
+ u32 *vfl, u32 *vfh)
+{
+ u32 vrt;
+
+ /*
+ * Request that the index'th VF Table values be read into VFL/VFH.
+ */
+ vrt = t4_read_reg(adapter, A_TP_RSS_CONFIG_VRT);
+ vrt &= ~(F_VFRDRG | V_VFWRADDR(M_VFWRADDR) | F_VFWREN | F_KEYWREN);
+ vrt |= V_VFWRADDR(index) | F_VFRDEN;
+ t4_write_reg(adapter, A_TP_RSS_CONFIG_VRT, vrt);
+
+ /*
+ * Grab the VFL/VFH values ...
+ */
+ t4_read_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
+ vfl, 1, A_TP_RSS_VFL_CONFIG);
+ t4_read_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
+ vfh, 1, A_TP_RSS_VFH_CONFIG);
+}
+
+/**
+ * t4_write_rss_vf_config - write VF RSS Configuration Table
+ *
+ * @adapter: the adapter
+ * @index: the entry in the VF RSS table to write
+ * @vfl: the VFL to store
+ * @vfh: the VFH to store
+ *
+ * Writes the VF RSS Configuration Table at the specified index with the
+ * specified (VFL, VFH) values.
+ */
+void t4_write_rss_vf_config(struct adapter *adapter, unsigned int index,
+ u32 vfl, u32 vfh)
+{
+ u32 vrt;
+
+ /*
+ * Load up VFL/VFH with the values to be written ...
+ */
+ t4_write_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
+ &vfl, 1, A_TP_RSS_VFL_CONFIG);
+ t4_write_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
+ &vfh, 1, A_TP_RSS_VFH_CONFIG);
+
+ /*
+ * Write the VFL/VFH into the VF Table at index'th location.
+ */
+ vrt = t4_read_reg(adapter, A_TP_RSS_CONFIG_VRT);
+ vrt &= ~(F_VFRDRG | F_VFRDEN | V_VFWRADDR(M_VFWRADDR) | F_KEYWREN);
+ vrt |= V_VFWRADDR(index) | F_VFWREN;
+ t4_write_reg(adapter, A_TP_RSS_CONFIG_VRT, vrt);
+}
+
+/**
+ * t4_read_rss_pf_map - read PF RSS Map
+ * @adapter: the adapter
+ *
+ * Reads the PF RSS Map register and returns its value.
+ */
+u32 t4_read_rss_pf_map(struct adapter *adapter)
+{
+ u32 pfmap;
+
+ t4_read_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
+ &pfmap, 1, A_TP_RSS_PF_MAP);
+ return pfmap;
+}
+
+/**
+ * t4_write_rss_pf_map - write PF RSS Map
+ * @adapter: the adapter
+ * @pfmap: PF RSS Map value
+ *
+ * Writes the specified value to the PF RSS Map register.
+ */
+void t4_write_rss_pf_map(struct adapter *adapter, u32 pfmap)
+{
+ t4_write_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
+ &pfmap, 1, A_TP_RSS_PF_MAP);
+}
+
+/**
+ * t4_read_rss_pf_mask - read PF RSS Mask
+ * @adapter: the adapter
+ *
+ * Reads the PF RSS Mask register and returns its value.
+ */
+u32 t4_read_rss_pf_mask(struct adapter *adapter)
+{
+ u32 pfmask;
+
+ t4_read_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
+ &pfmask, 1, A_TP_RSS_PF_MSK);
+ return pfmask;
+}
+
+/**
+ * t4_write_rss_pf_mask - write PF RSS Mask
+ * @adapter: the adapter
+ * @pfmask: PF RSS Mask value
+ *
+ * Writes the specified value to the PF RSS Mask register.
+ */
+void t4_write_rss_pf_mask(struct adapter *adapter, u32 pfmask)
+{
+ t4_write_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
+ &pfmask, 1, A_TP_RSS_PF_MSK);
+}
+
+/**
+ * t4_set_filter_mode - configure the optional components of filter tuples
+ * @adap: the adapter
+ * @mode_map: a bitmap selcting which optional filter components to enable
+ *
+ * Sets the filter mode by selecting the optional components to enable
+ * in filter tuples. Returns 0 on success and a negative error if the
+ * requested mode needs more bits than are available for optional
+ * components.
+ */
+int t4_set_filter_mode(struct adapter *adap, unsigned int mode_map)
+{
+ static u8 width[] = { 1, 3, 17, 17, 8, 8, 16, 9, 3, 1 };
+
+ int i, nbits = 0;
+
+ for (i = S_FCOE; i <= S_FRAGMENTATION; i++)
+ if (mode_map & (1 << i))
+ nbits += width[i];
+ if (nbits > FILTER_OPT_LEN)
+ return -EINVAL;
+ t4_write_indirect(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA, &mode_map, 1,
+ A_TP_VLAN_PRI_MAP);
+ return 0;
+}
+
+/**
+ * t4_tp_get_tcp_stats - read TP's TCP MIB counters
+ * @adap: the adapter
+ * @v4: holds the TCP/IP counter values
+ * @v6: holds the TCP/IPv6 counter values
+ *
+ * Returns the values of TP's TCP/IP and TCP/IPv6 MIB counters.
+ * Either @v4 or @v6 may be %NULL to skip the corresponding stats.
+ */
+void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4,
+ struct tp_tcp_stats *v6)
+{
+ u32 val[A_TP_MIB_TCP_RXT_SEG_LO - A_TP_MIB_TCP_OUT_RST + 1];
+
+#define STAT_IDX(x) ((A_TP_MIB_TCP_##x) - A_TP_MIB_TCP_OUT_RST)
+#define STAT(x) val[STAT_IDX(x)]
+#define STAT64(x) (((u64)STAT(x##_HI) << 32) | STAT(x##_LO))
+
+ if (v4) {
+ t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, val,
+ ARRAY_SIZE(val), A_TP_MIB_TCP_OUT_RST);
+ v4->tcpOutRsts = STAT(OUT_RST);
+ v4->tcpInSegs = STAT64(IN_SEG);
+ v4->tcpOutSegs = STAT64(OUT_SEG);
+ v4->tcpRetransSegs = STAT64(RXT_SEG);
+ }
+ if (v6) {
+ t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, val,
+ ARRAY_SIZE(val), A_TP_MIB_TCP_V6OUT_RST);
+ v6->tcpOutRsts = STAT(OUT_RST);
+ v6->tcpInSegs = STAT64(IN_SEG);
+ v6->tcpOutSegs = STAT64(OUT_SEG);
+ v6->tcpRetransSegs = STAT64(RXT_SEG);
+ }
+#undef STAT64
+#undef STAT
+#undef STAT_IDX
+}
+
+/**
+ * t4_tp_get_err_stats - read TP's error MIB counters
+ * @adap: the adapter
+ * @st: holds the counter values
+ *
+ * Returns the values of TP's error counters.
+ */
+void t4_tp_get_err_stats(struct adapter *adap, struct tp_err_stats *st)
+{
+ t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, st->macInErrs,
+ 12, A_TP_MIB_MAC_IN_ERR_0);
+ t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, st->tnlCongDrops,
+ 8, A_TP_MIB_TNL_CNG_DROP_0);
+ t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, st->tnlTxDrops,
+ 4, A_TP_MIB_TNL_DROP_0);
+ t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, st->ofldVlanDrops,
+ 4, A_TP_MIB_OFD_VLN_DROP_0);
+ t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, st->tcp6InErrs,
+ 4, A_TP_MIB_TCP_V6IN_ERR_0);
+ t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, &st->ofldNoNeigh,
+ 2, A_TP_MIB_OFD_ARP_DROP);
+}
+
+/**
+ * t4_tp_get_proxy_stats - read TP's proxy MIB counters
+ * @adap: the adapter
+ * @st: holds the counter values
+ *
+ * Returns the values of TP's proxy counters.
+ */
+void t4_tp_get_proxy_stats(struct adapter *adap, struct tp_proxy_stats *st)
+{
+ t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, st->proxy,
+ 4, A_TP_MIB_TNL_LPBK_0);
+}
+
+/**
+ * t4_tp_get_cpl_stats - read TP's CPL MIB counters
+ * @adap: the adapter
+ * @st: holds the counter values
+ *
+ * Returns the values of TP's CPL counters.
+ */
+void t4_tp_get_cpl_stats(struct adapter *adap, struct tp_cpl_stats *st)
+{
+ t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, st->req,
+ 8, A_TP_MIB_CPL_IN_REQ_0);
+ t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, st->tx_err,
+ 4, A_TP_MIB_CPL_OUT_ERR_0);
+}
+
+/**
+ * t4_tp_get_rdma_stats - read TP's RDMA MIB counters
+ * @adap: the adapter
+ * @st: holds the counter values
+ *
+ * Returns the values of TP's RDMA counters.
+ */
+void t4_tp_get_rdma_stats(struct adapter *adap, struct tp_rdma_stats *st)
+{
+ t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, &st->rqe_dfr_mod,
+ 2, A_TP_MIB_RQE_DFR_MOD);
+}
+
+/**
+ * t4_get_fcoe_stats - read TP's FCoE MIB counters for a port
+ * @adap: the adapter
+ * @idx: the port index
+ * @st: holds the counter values
+ *
+ * Returns the values of TP's FCoE counters for the selected port.
+ */
+void t4_get_fcoe_stats(struct adapter *adap, unsigned int idx,
+ struct tp_fcoe_stats *st)
+{
+ u32 val[2];
+
+ t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, &st->framesDDP,
+ 1, A_TP_MIB_FCOE_DDP_0 + idx);
+ t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, &st->framesDrop,
+ 1, A_TP_MIB_FCOE_DROP_0 + idx);
+ t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, val,
+ 2, A_TP_MIB_FCOE_BYTE_0_HI + 2 * idx);
+ st->octetsDDP = ((u64)val[0] << 32) | val[1];
+}
+
+/**
+ * t4_get_usm_stats - read TP's non-TCP DDP MIB counters
+ * @adap: the adapter
+ * @st: holds the counter values
+ *
+ * Returns the values of TP's counters for non-TCP directly-placed packets.
+ */
+void t4_get_usm_stats(struct adapter *adap, struct tp_usm_stats *st)
+{
+ u32 val[4];
+
+ t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, val, 4,
+ A_TP_MIB_USM_PKTS);
+ st->frames = val[0];
+ st->drops = val[1];
+ st->octets = ((u64)val[2] << 32) | val[3];
+}
+
+/**
+ * t4_read_mtu_tbl - returns the values in the HW path MTU table
+ * @adap: the adapter
+ * @mtus: where to store the MTU values
+ * @mtu_log: where to store the MTU base-2 log (may be %NULL)
+ *
+ * Reads the HW path MTU table.
+ */
+void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log)
+{
+ u32 v;
+ int i;
+
+ for (i = 0; i < NMTUS; ++i) {
+ t4_write_reg(adap, A_TP_MTU_TABLE,
+ V_MTUINDEX(0xff) | V_MTUVALUE(i));
+ v = t4_read_reg(adap, A_TP_MTU_TABLE);
+ mtus[i] = G_MTUVALUE(v);
+ if (mtu_log)
+ mtu_log[i] = G_MTUWIDTH(v);
+ }
+}
+
+/**
+ * t4_read_cong_tbl - reads the congestion control table
+ * @adap: the adapter
+ * @incr: where to store the alpha values
+ *
+ * Reads the additive increments programmed into the HW congestion
+ * control table.
+ */
+void t4_read_cong_tbl(struct adapter *adap, u16 incr[NMTUS][NCCTRL_WIN])
+{
+ unsigned int mtu, w;
+
+ for (mtu = 0; mtu < NMTUS; ++mtu)
+ for (w = 0; w < NCCTRL_WIN; ++w) {
+ t4_write_reg(adap, A_TP_CCTRL_TABLE,
+ V_ROWINDEX(0xffff) | (mtu << 5) | w);
+ incr[mtu][w] = (u16)t4_read_reg(adap,
+ A_TP_CCTRL_TABLE) & 0x1fff;
+ }
+}
+
+/**
+ * t4_read_pace_tbl - read the pace table
+ * @adap: the adapter
+ * @pace_vals: holds the returned values
+ *
+ * Returns the values of TP's pace table in microseconds.
+ */
+void t4_read_pace_tbl(struct adapter *adap, unsigned int pace_vals[NTX_SCHED])
+{
+ unsigned int i, v;
+
+ for (i = 0; i < NTX_SCHED; i++) {
+ t4_write_reg(adap, A_TP_PACE_TABLE, 0xffff0000 + i);
+ v = t4_read_reg(adap, A_TP_PACE_TABLE);
+ pace_vals[i] = dack_ticks_to_usec(adap, v);
+ }
+}
+
+/**
+ * t4_tp_wr_bits_indirect - set/clear bits in an indirect TP register
+ * @adap: the adapter
+ * @addr: the indirect TP register address
+ * @mask: specifies the field within the register to modify
+ * @val: new value for the field
+ *
+ * Sets a field of an indirect TP register to the given value.
+ */
+void t4_tp_wr_bits_indirect(struct adapter *adap, unsigned int addr,
+ unsigned int mask, unsigned int val)
+{
+ t4_write_reg(adap, A_TP_PIO_ADDR, addr);
+ val |= t4_read_reg(adap, A_TP_PIO_DATA) & ~mask;
+ t4_write_reg(adap, A_TP_PIO_DATA, val);
+}
+
+/**
+ * init_cong_ctrl - initialize congestion control parameters
+ * @a: the alpha values for congestion control
+ * @b: the beta values for congestion control
+ *
+ * Initialize the congestion control parameters.
+ */
+static void __devinit init_cong_ctrl(unsigned short *a, unsigned short *b)
+{
+ a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1;
+ a[9] = 2;
+ a[10] = 3;
+ a[11] = 4;
+ a[12] = 5;
+ a[13] = 6;
+ a[14] = 7;
+ a[15] = 8;
+ a[16] = 9;
+ a[17] = 10;
+ a[18] = 14;
+ a[19] = 17;
+ a[20] = 21;
+ a[21] = 25;
+ a[22] = 30;
+ a[23] = 35;
+ a[24] = 45;
+ a[25] = 60;
+ a[26] = 80;
+ a[27] = 100;
+ a[28] = 200;
+ a[29] = 300;
+ a[30] = 400;
+ a[31] = 500;
+
+ b[0] = b[1] = b[2] = b[3] = b[4] = b[5] = b[6] = b[7] = b[8] = 0;
+ b[9] = b[10] = 1;
+ b[11] = b[12] = 2;
+ b[13] = b[14] = b[15] = b[16] = 3;
+ b[17] = b[18] = b[19] = b[20] = b[21] = 4;
+ b[22] = b[23] = b[24] = b[25] = b[26] = b[27] = 5;
+ b[28] = b[29] = 6;
+ b[30] = b[31] = 7;
+}
+
+/* The minimum additive increment value for the congestion control table */
+#define CC_MIN_INCR 2U
+
+/**
+ * t4_load_mtus - write the MTU and congestion control HW tables
+ * @adap: the adapter
+ * @mtus: the values for the MTU table
+ * @alpha: the values for the congestion control alpha parameter
+ * @beta: the values for the congestion control beta parameter
+ *
+ * Write the HW MTU table with the supplied MTUs and the high-speed
+ * congestion control table with the supplied alpha, beta, and MTUs.
+ * We write the two tables together because the additive increments
+ * depend on the MTUs.
+ */
+void t4_load_mtus(struct adapter *adap, const unsigned short *mtus,
+ const unsigned short *alpha, const unsigned short *beta)
+{
+ static const unsigned int avg_pkts[NCCTRL_WIN] = {
+ 2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640,
+ 896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480,
+ 28672, 40960, 57344, 81920, 114688, 163840, 229376
+ };
+
+ unsigned int i, w;
+
+ for (i = 0; i < NMTUS; ++i) {
+ unsigned int mtu = mtus[i];
+ unsigned int log2 = fls(mtu);
+
+ if (!(mtu & ((1 << log2) >> 2))) /* round */
+ log2--;
+ t4_write_reg(adap, A_TP_MTU_TABLE, V_MTUINDEX(i) |
+ V_MTUWIDTH(log2) | V_MTUVALUE(mtu));
+
+ for (w = 0; w < NCCTRL_WIN; ++w) {
+ unsigned int inc;
+
+ inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w],
+ CC_MIN_INCR);
+
+ t4_write_reg(adap, A_TP_CCTRL_TABLE, (i << 21) |
+ (w << 16) | (beta[w] << 13) | inc);
+ }
+ }
+}
+
+/**
+ * t4_set_pace_tbl - set the pace table
+ * @adap: the adapter
+ * @pace_vals: the pace values in microseconds
+ * @start: index of the first entry in the HW pace table to set
+ * @n: how many entries to set
+ *
+ * Sets (a subset of the) HW pace table.
+ */
+int t4_set_pace_tbl(struct adapter *adap, const unsigned int *pace_vals,
+ unsigned int start, unsigned int n)
+{
+ unsigned int vals[NTX_SCHED], i;
+ unsigned int tick_ns = dack_ticks_to_usec(adap, 1000);
+
+ if (n > NTX_SCHED)
+ return -ERANGE;
+
+ /* convert values from us to dack ticks, rounding to closest value */
+ for (i = 0; i < n; i++, pace_vals++) {
+ vals[i] = (1000 * *pace_vals + tick_ns / 2) / tick_ns;
+ if (vals[i] > 0x7ff)
+ return -ERANGE;
+ if (*pace_vals && vals[i] == 0)
+ return -ERANGE;
+ }
+ for (i = 0; i < n; i++, start++)
+ t4_write_reg(adap, A_TP_PACE_TABLE, (start << 16) | vals[i]);
+ return 0;
+}
+
+/**
+ * t4_set_sched_bps - set the bit rate for a HW traffic scheduler
+ * @adap: the adapter
+ * @kbps: target rate in Kbps
+ * @sched: the scheduler index
+ *
+ * Configure a Tx HW scheduler for the target rate.
+ */
+int t4_set_sched_bps(struct adapter *adap, int sched, unsigned int kbps)
+{
+ unsigned int v, tps, cpt, bpt, delta, mindelta = ~0;
+ unsigned int clk = adap->params.vpd.cclk * 1000;
+ unsigned int selected_cpt = 0, selected_bpt = 0;
+
+ if (kbps > 0) {
+ kbps *= 125; /* -> bytes */
+ for (cpt = 1; cpt <= 255; cpt++) {
+ tps = clk / cpt;
+ bpt = (kbps + tps / 2) / tps;
+ if (bpt > 0 && bpt <= 255) {
+ v = bpt * tps;
+ delta = v >= kbps ? v - kbps : kbps - v;
+ if (delta < mindelta) {
+ mindelta = delta;
+ selected_cpt = cpt;
+ selected_bpt = bpt;
+ }
+ } else if (selected_cpt)
+ break;
+ }
+ if (!selected_cpt)
+ return -EINVAL;
+ }
+ t4_write_reg(adap, A_TP_TM_PIO_ADDR,
+ A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2);
+ v = t4_read_reg(adap, A_TP_TM_PIO_DATA);
+ if (sched & 1)
+ v = (v & 0xffff) | (selected_cpt << 16) | (selected_bpt << 24);
+ else
+ v = (v & 0xffff0000) | selected_cpt | (selected_bpt << 8);
+ t4_write_reg(adap, A_TP_TM_PIO_DATA, v);
+ return 0;
+}
+
+/**
+ * t4_set_sched_ipg - set the IPG for a Tx HW packet rate scheduler
+ * @adap: the adapter
+ * @sched: the scheduler index
+ * @ipg: the interpacket delay in tenths of nanoseconds
+ *
+ * Set the interpacket delay for a HW packet rate scheduler.
+ */
+int t4_set_sched_ipg(struct adapter *adap, int sched, unsigned int ipg)
+{
+ unsigned int v, addr = A_TP_TX_MOD_Q1_Q0_TIMER_SEPARATOR - sched / 2;
+
+ /* convert ipg to nearest number of core clocks */
+ ipg *= core_ticks_per_usec(adap);
+ ipg = (ipg + 5000) / 10000;
+ if (ipg > M_TXTIMERSEPQ0)
+ return -EINVAL;
+
+ t4_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
+ v = t4_read_reg(adap, A_TP_TM_PIO_DATA);
+ if (sched & 1)
+ v = (v & V_TXTIMERSEPQ0(M_TXTIMERSEPQ0)) | V_TXTIMERSEPQ1(ipg);
+ else
+ v = (v & V_TXTIMERSEPQ1(M_TXTIMERSEPQ1)) | V_TXTIMERSEPQ0(ipg);
+ t4_write_reg(adap, A_TP_TM_PIO_DATA, v);
+ t4_read_reg(adap, A_TP_TM_PIO_DATA);
+ return 0;
+}
+
+/**
+ * t4_get_tx_sched - get the configuration of a Tx HW traffic scheduler
+ * @adap: the adapter
+ * @sched: the scheduler index
+ * @kbps: the byte rate in Kbps
+ * @ipg: the interpacket delay in tenths of nanoseconds
+ *
+ * Return the current configuration of a HW Tx scheduler.
+ */
+void t4_get_tx_sched(struct adapter *adap, unsigned int sched, unsigned int *kbps,
+ unsigned int *ipg)
+{
+ unsigned int v, addr, bpt, cpt;
+
+ if (kbps) {
+ addr = A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2;
+ t4_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
+ v = t4_read_reg(adap, A_TP_TM_PIO_DATA);
+ if (sched & 1)
+ v >>= 16;
+ bpt = (v >> 8) & 0xff;
+ cpt = v & 0xff;
+ if (!cpt)
+ *kbps = 0; /* scheduler disabled */
+ else {
+ v = (adap->params.vpd.cclk * 1000) / cpt; /* ticks/s */
+ *kbps = (v * bpt) / 125;
+ }
+ }
+ if (ipg) {
+ addr = A_TP_TX_MOD_Q1_Q0_TIMER_SEPARATOR - sched / 2;
+ t4_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
+ v = t4_read_reg(adap, A_TP_TM_PIO_DATA);
+ if (sched & 1)
+ v >>= 16;
+ v &= 0xffff;
+ *ipg = (10000 * v) / core_ticks_per_usec(adap);
+ }
+}
+
+/*
+ * Calculates a rate in bytes/s given the number of 256-byte units per 4K core
+ * clocks. The formula is
+ *
+ * bytes/s = bytes256 * 256 * ClkFreq / 4096
+ *
+ * which is equivalent to
+ *
+ * bytes/s = 62.5 * bytes256 * ClkFreq_ms
+ */
+static u64 chan_rate(struct adapter *adap, unsigned int bytes256)
+{
+ u64 v = bytes256 * adap->params.vpd.cclk;
+
+ return v * 62 + v / 2;
+}
+
+/**
+ * t4_get_chan_txrate - get the current per channel Tx rates
+ * @adap: the adapter
+ * @nic_rate: rates for NIC traffic
+ * @ofld_rate: rates for offloaded traffic
+ *
+ * Return the current Tx rates in bytes/s for NIC and offloaded traffic
+ * for each channel.
+ */
+void t4_get_chan_txrate(struct adapter *adap, u64 *nic_rate, u64 *ofld_rate)
+{
+ u32 v;
+
+ v = t4_read_reg(adap, A_TP_TX_TRATE);
+ nic_rate[0] = chan_rate(adap, G_TNLRATE0(v));
+ nic_rate[1] = chan_rate(adap, G_TNLRATE1(v));
+ nic_rate[2] = chan_rate(adap, G_TNLRATE2(v));
+ nic_rate[3] = chan_rate(adap, G_TNLRATE3(v));
+
+ v = t4_read_reg(adap, A_TP_TX_ORATE);
+ ofld_rate[0] = chan_rate(adap, G_OFDRATE0(v));
+ ofld_rate[1] = chan_rate(adap, G_OFDRATE1(v));
+ ofld_rate[2] = chan_rate(adap, G_OFDRATE2(v));
+ ofld_rate[3] = chan_rate(adap, G_OFDRATE3(v));
+}
+
+/**
+ * t4_set_trace_filter - configure one of the tracing filters
+ * @adap: the adapter
+ * @tp: the desired trace filter parameters
+ * @idx: which filter to configure
+ * @enable: whether to enable or disable the filter
+ *
+ * Configures one of the tracing filters available in HW. If @enable is
+ * %0 @tp is not examined and may be %NULL.
+ */
+int t4_set_trace_filter(struct adapter *adap, const struct trace_params *tp, int idx,
+ int enable)
+{
+ int i, ofst = idx * 4;
+ u32 data_reg, mask_reg, cfg;
+ u32 multitrc = F_TRCMULTIFILTER;
+
+ if (!enable) {
+ t4_write_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst, 0);
+ goto out;
+ }
+
+ if (tp->port > 11 || tp->invert > 1 || tp->skip_len > M_TFLENGTH ||
+ tp->skip_ofst > M_TFOFFSET || tp->min_len > M_TFMINPKTSIZE ||
+ tp->snap_len > 9600 || (idx && tp->snap_len > 256))
+ return -EINVAL;
+
+ if (tp->snap_len > 256) { /* must be tracer 0 */
+ if ((t4_read_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + 4) |
+ t4_read_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + 8) |
+ t4_read_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + 12)) &
+ F_TFEN)
+ return -EINVAL; /* other tracers are enabled */
+ multitrc = 0;
+ } else if (idx) {
+ i = t4_read_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_B);
+ if (G_TFCAPTUREMAX(i) > 256 &&
+ (t4_read_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_A) & F_TFEN))
+ return -EINVAL;
+ }
+
+ /* stop the tracer we'll be changing */
+ t4_write_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst, 0);
+
+ /* disable tracing globally if running in the wrong single/multi mode */
+ cfg = t4_read_reg(adap, A_MPS_TRC_CFG);
+ if ((cfg & F_TRCEN) && multitrc != (cfg & F_TRCMULTIFILTER)) {
+ t4_write_reg(adap, A_MPS_TRC_CFG, cfg ^ F_TRCEN);
+ t4_read_reg(adap, A_MPS_TRC_CFG); /* flush */
+ msleep(1);
+ if (!(t4_read_reg(adap, A_MPS_TRC_CFG) & F_TRCFIFOEMPTY))
+ return -ETIMEDOUT;
+ }
+ /*
+ * At this point either the tracing is enabled and in the right mode or
+ * disabled.
+ */
+
+ idx *= (A_MPS_TRC_FILTER1_MATCH - A_MPS_TRC_FILTER0_MATCH);
+ data_reg = A_MPS_TRC_FILTER0_MATCH + idx;
+ mask_reg = A_MPS_TRC_FILTER0_DONT_CARE + idx;
+
+ for (i = 0; i < TRACE_LEN / 4; i++, data_reg += 4, mask_reg += 4) {
+ t4_write_reg(adap, data_reg, tp->data[i]);
+ t4_write_reg(adap, mask_reg, ~tp->mask[i]);
+ }
+ t4_write_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_B + ofst,
+ V_TFCAPTUREMAX(tp->snap_len) |
+ V_TFMINPKTSIZE(tp->min_len));
+ t4_write_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst,
+ V_TFOFFSET(tp->skip_ofst) | V_TFLENGTH(tp->skip_len) |
+ V_TFPORT(tp->port) | F_TFEN | V_TFINVERTMATCH(tp->invert));
+
+ cfg &= ~F_TRCMULTIFILTER;
+ t4_write_reg(adap, A_MPS_TRC_CFG, cfg | F_TRCEN | multitrc);
+out: t4_read_reg(adap, A_MPS_TRC_CFG); /* flush */
+ return 0;
+}
+
+/**
+ * t4_get_trace_filter - query one of the tracing filters
+ * @adap: the adapter
+ * @tp: the current trace filter parameters
+ * @idx: which trace filter to query
+ * @enabled: non-zero if the filter is enabled
+ *
+ * Returns the current settings of one of the HW tracing filters.
+ */
+void t4_get_trace_filter(struct adapter *adap, struct trace_params *tp, int idx,
+ int *enabled)
+{
+ u32 ctla, ctlb;
+ int i, ofst = idx * 4;
+ u32 data_reg, mask_reg;
+
+ ctla = t4_read_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst);
+ ctlb = t4_read_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_B + ofst);
+
+ *enabled = !!(ctla & F_TFEN);
+ tp->snap_len = G_TFCAPTUREMAX(ctlb);
+ tp->min_len = G_TFMINPKTSIZE(ctlb);
+ tp->skip_ofst = G_TFOFFSET(ctla);
+ tp->skip_len = G_TFLENGTH(ctla);
+ tp->invert = !!(ctla & F_TFINVERTMATCH);
+ tp->port = G_TFPORT(ctla);
+
+ ofst = (A_MPS_TRC_FILTER1_MATCH - A_MPS_TRC_FILTER0_MATCH) * idx;
+ data_reg = A_MPS_TRC_FILTER0_MATCH + ofst;
+ mask_reg = A_MPS_TRC_FILTER0_DONT_CARE + ofst;
+
+ for (i = 0; i < TRACE_LEN / 4; i++, data_reg += 4, mask_reg += 4) {
+ tp->mask[i] = ~t4_read_reg(adap, mask_reg);
+ tp->data[i] = t4_read_reg(adap, data_reg) & tp->mask[i];
+ }
+}
+
+/**
+ * t4_pmtx_get_stats - returns the HW stats from PMTX
+ * @adap: the adapter
+ * @cnt: where to store the count statistics
+ * @cycles: where to store the cycle statistics
+ *
+ * Returns performance statistics from PMTX.
+ */
+void t4_pmtx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[])
+{
+ int i;
+
+ for (i = 0; i < PM_NSTATS; i++) {
+ t4_write_reg(adap, A_PM_TX_STAT_CONFIG, i + 1);
+ cnt[i] = t4_read_reg(adap, A_PM_TX_STAT_COUNT);
+ cycles[i] = t4_read_reg64(adap, A_PM_TX_STAT_LSB);
+ }
+}
+
+/**
+ * t4_pmrx_get_stats - returns the HW stats from PMRX
+ * @adap: the adapter
+ * @cnt: where to store the count statistics
+ * @cycles: where to store the cycle statistics
+ *
+ * Returns performance statistics from PMRX.
+ */
+void t4_pmrx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[])
+{
+ int i;
+
+ for (i = 0; i < PM_NSTATS; i++) {
+ t4_write_reg(adap, A_PM_RX_STAT_CONFIG, i + 1);
+ cnt[i] = t4_read_reg(adap, A_PM_RX_STAT_COUNT);
+ cycles[i] = t4_read_reg64(adap, A_PM_RX_STAT_LSB);
+ }
+}
+
+/**
+ * get_mps_bg_map - return the buffer groups associated with a port
+ * @adap: the adapter
+ * @idx: the port index
+ *
+ * Returns a bitmap indicating which MPS buffer groups are associated
+ * with the given port. Bit i is set if buffer group i is used by the
+ * port.
+ */
+static unsigned int get_mps_bg_map(struct adapter *adap, int idx)
+{
+ u32 n = G_NUMPORTS(t4_read_reg(adap, A_MPS_CMN_CTL));
+
+ if (n == 0)
+ return idx == 0 ? 0xf : 0;
+ if (n == 1)
+ return idx < 2 ? (3 << (2 * idx)) : 0;
+ return 1 << idx;
+}
+
+/**
+ * t4_get_port_stats - collect port statistics
+ * @adap: the adapter
+ * @idx: the port index
+ * @p: the stats structure to fill
+ *
+ * Collect statistics related to the given port from HW.
+ */
+void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p)
+{
+ u32 bgmap = get_mps_bg_map(adap, idx);
+
+#define GET_STAT(name) \
+ t4_read_reg64(adap, PORT_REG(idx, A_MPS_PORT_STAT_##name##_L))
+#define GET_STAT_COM(name) t4_read_reg64(adap, A_MPS_STAT_##name##_L)
+
+ p->tx_octets = GET_STAT(TX_PORT_BYTES);
+ p->tx_frames = GET_STAT(TX_PORT_FRAMES);
+ p->tx_bcast_frames = GET_STAT(TX_PORT_BCAST);
+ p->tx_mcast_frames = GET_STAT(TX_PORT_MCAST);
+ p->tx_ucast_frames = GET_STAT(TX_PORT_UCAST);
+ p->tx_error_frames = GET_STAT(TX_PORT_ERROR);
+ p->tx_frames_64 = GET_STAT(TX_PORT_64B);
+ p->tx_frames_65_127 = GET_STAT(TX_PORT_65B_127B);
+ p->tx_frames_128_255 = GET_STAT(TX_PORT_128B_255B);
+ p->tx_frames_256_511 = GET_STAT(TX_PORT_256B_511B);
+ p->tx_frames_512_1023 = GET_STAT(TX_PORT_512B_1023B);
+ p->tx_frames_1024_1518 = GET_STAT(TX_PORT_1024B_1518B);
+ p->tx_frames_1519_max = GET_STAT(TX_PORT_1519B_MAX);
+ p->tx_drop = GET_STAT(TX_PORT_DROP);
+ p->tx_pause = GET_STAT(TX_PORT_PAUSE);
+ p->tx_ppp0 = GET_STAT(TX_PORT_PPP0);
+ p->tx_ppp1 = GET_STAT(TX_PORT_PPP1);
+ p->tx_ppp2 = GET_STAT(TX_PORT_PPP2);
+ p->tx_ppp3 = GET_STAT(TX_PORT_PPP3);
+ p->tx_ppp4 = GET_STAT(TX_PORT_PPP4);
+ p->tx_ppp5 = GET_STAT(TX_PORT_PPP5);
+ p->tx_ppp6 = GET_STAT(TX_PORT_PPP6);
+ p->tx_ppp7 = GET_STAT(TX_PORT_PPP7);
+
+ p->rx_octets = GET_STAT(RX_PORT_BYTES);
+ p->rx_frames = GET_STAT(RX_PORT_FRAMES);
+ p->rx_bcast_frames = GET_STAT(RX_PORT_BCAST);
+ p->rx_mcast_frames = GET_STAT(RX_PORT_MCAST);
+ p->rx_ucast_frames = GET_STAT(RX_PORT_UCAST);
+ p->rx_too_long = GET_STAT(RX_PORT_MTU_ERROR);
+ p->rx_jabber = GET_STAT(RX_PORT_MTU_CRC_ERROR);
+ p->rx_fcs_err = GET_STAT(RX_PORT_CRC_ERROR);
+ p->rx_len_err = GET_STAT(RX_PORT_LEN_ERROR);
+ p->rx_symbol_err = GET_STAT(RX_PORT_SYM_ERROR);
+ p->rx_runt = GET_STAT(RX_PORT_LESS_64B);
+ p->rx_frames_64 = GET_STAT(RX_PORT_64B);
+ p->rx_frames_65_127 = GET_STAT(RX_PORT_65B_127B);
+ p->rx_frames_128_255 = GET_STAT(RX_PORT_128B_255B);
+ p->rx_frames_256_511 = GET_STAT(RX_PORT_256B_511B);
+ p->rx_frames_512_1023 = GET_STAT(RX_PORT_512B_1023B);
+ p->rx_frames_1024_1518 = GET_STAT(RX_PORT_1024B_1518B);
+ p->rx_frames_1519_max = GET_STAT(RX_PORT_1519B_MAX);
+ p->rx_pause = GET_STAT(RX_PORT_PAUSE);
+ p->rx_ppp0 = GET_STAT(RX_PORT_PPP0);
+ p->rx_ppp1 = GET_STAT(RX_PORT_PPP1);
+ p->rx_ppp2 = GET_STAT(RX_PORT_PPP2);
+ p->rx_ppp3 = GET_STAT(RX_PORT_PPP3);
+ p->rx_ppp4 = GET_STAT(RX_PORT_PPP4);
+ p->rx_ppp5 = GET_STAT(RX_PORT_PPP5);
+ p->rx_ppp6 = GET_STAT(RX_PORT_PPP6);
+ p->rx_ppp7 = GET_STAT(RX_PORT_PPP7);
+
+ p->rx_ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_DROP_FRAME) : 0;
+ p->rx_ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_DROP_FRAME) : 0;
+ p->rx_ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_DROP_FRAME) : 0;
+ p->rx_ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_DROP_FRAME) : 0;
+ p->rx_trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_TRUNC_FRAME) : 0;
+ p->rx_trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_TRUNC_FRAME) : 0;
+ p->rx_trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_TRUNC_FRAME) : 0;
+ p->rx_trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_TRUNC_FRAME) : 0;
+
+#undef GET_STAT
+#undef GET_STAT_COM
+}
+
+/**
+ * t4_clr_port_stats - clear port statistics
+ * @adap: the adapter
+ * @idx: the port index
+ *
+ * Clear HW statistics for the given port.
+ */
+void t4_clr_port_stats(struct adapter *adap, int idx)
+{
+ unsigned int i;
+ u32 bgmap = get_mps_bg_map(adap, idx);
+
+ for (i = A_MPS_PORT_STAT_TX_PORT_BYTES_L;
+ i <= A_MPS_PORT_STAT_TX_PORT_PPP7_H; i += 8)
+ t4_write_reg(adap, PORT_REG(idx, i), 0);
+ for (i = A_MPS_PORT_STAT_RX_PORT_BYTES_L;
+ i <= A_MPS_PORT_STAT_RX_PORT_LESS_64B_H; i += 8)
+ t4_write_reg(adap, PORT_REG(idx, i), 0);
+ for (i = 0; i < 4; i++)
+ if (bgmap & (1 << i)) {
+ t4_write_reg(adap,
+ A_MPS_STAT_RX_BG_0_MAC_DROP_FRAME_L + i * 8, 0);
+ t4_write_reg(adap,
+ A_MPS_STAT_RX_BG_0_MAC_TRUNC_FRAME_L + i * 8, 0);
+ }
+}
+
+/**
+ * t4_get_lb_stats - collect loopback port statistics
+ * @adap: the adapter
+ * @idx: the loopback port index
+ * @p: the stats structure to fill
+ *
+ * Return HW statistics for the given loopback port.
+ */
+void t4_get_lb_stats(struct adapter *adap, int idx, struct lb_port_stats *p)
+{
+ u32 bgmap = get_mps_bg_map(adap, idx);
+
+#define GET_STAT(name) \
+ t4_read_reg64(adap, PORT_REG(idx, A_MPS_PORT_STAT_LB_PORT_##name##_L))
+#define GET_STAT_COM(name) t4_read_reg64(adap, A_MPS_STAT_##name##_L)
+
+ p->octets = GET_STAT(BYTES);
+ p->frames = GET_STAT(FRAMES);
+ p->bcast_frames = GET_STAT(BCAST);
+ p->mcast_frames = GET_STAT(MCAST);
+ p->ucast_frames = GET_STAT(UCAST);
+ p->error_frames = GET_STAT(ERROR);
+
+ p->frames_64 = GET_STAT(64B);
+ p->frames_65_127 = GET_STAT(65B_127B);
+ p->frames_128_255 = GET_STAT(128B_255B);
+ p->frames_256_511 = GET_STAT(256B_511B);
+ p->frames_512_1023 = GET_STAT(512B_1023B);
+ p->frames_1024_1518 = GET_STAT(1024B_1518B);
+ p->frames_1519_max = GET_STAT(1519B_MAX);
+ p->drop = t4_read_reg(adap, PORT_REG(idx,
+ A_MPS_PORT_STAT_LB_PORT_DROP_FRAMES));
+
+ p->ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_LB_DROP_FRAME) : 0;
+ p->ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_LB_DROP_FRAME) : 0;
+ p->ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_LB_DROP_FRAME) : 0;
+ p->ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_LB_DROP_FRAME) : 0;
+ p->trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_LB_TRUNC_FRAME) : 0;
+ p->trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_LB_TRUNC_FRAME) : 0;
+ p->trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_LB_TRUNC_FRAME) : 0;
+ p->trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_LB_TRUNC_FRAME) : 0;
+
+#undef GET_STAT
+#undef GET_STAT_COM
+}
+
+/**
+ * t4_wol_magic_enable - enable/disable magic packet WoL
+ * @adap: the adapter
+ * @port: the physical port index
+ * @addr: MAC address expected in magic packets, %NULL to disable
+ *
+ * Enables/disables magic packet wake-on-LAN for the selected port.
+ */
+void t4_wol_magic_enable(struct adapter *adap, unsigned int port,
+ const u8 *addr)
+{
+ if (addr) {
+ t4_write_reg(adap, PORT_REG(port, A_XGMAC_PORT_MAGIC_MACID_LO),
+ (addr[2] << 24) | (addr[3] << 16) |
+ (addr[4] << 8) | addr[5]);
+ t4_write_reg(adap, PORT_REG(port, A_XGMAC_PORT_MAGIC_MACID_HI),
+ (addr[0] << 8) | addr[1]);
+ }
+ t4_set_reg_field(adap, PORT_REG(port, A_XGMAC_PORT_CFG2), F_MAGICEN,
+ V_MAGICEN(addr != NULL));
+}
+
+/**
+ * t4_wol_pat_enable - enable/disable pattern-based WoL
+ * @adap: the adapter
+ * @port: the physical port index
+ * @map: bitmap of which HW pattern filters to set
+ * @mask0: byte mask for bytes 0-63 of a packet
+ * @mask1: byte mask for bytes 64-127 of a packet
+ * @crc: Ethernet CRC for selected bytes
+ * @enable: enable/disable switch
+ *
+ * Sets the pattern filters indicated in @map to mask out the bytes
+ * specified in @mask0/@mask1 in received packets and compare the CRC of
+ * the resulting packet against @crc. If @enable is %true pattern-based
+ * WoL is enabled, otherwise disabled.
+ */
+int t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map,
+ u64 mask0, u64 mask1, unsigned int crc, bool enable)
+{
+ int i;
+
+ if (!enable) {
+ t4_set_reg_field(adap, PORT_REG(port, A_XGMAC_PORT_CFG2),
+ F_PATEN, 0);
+ return 0;
+ }
+ if (map > 0xff)
+ return -EINVAL;
+
+#define EPIO_REG(name) PORT_REG(port, A_XGMAC_PORT_EPIO_##name)
+
+ t4_write_reg(adap, EPIO_REG(DATA1), mask0 >> 32);
+ t4_write_reg(adap, EPIO_REG(DATA2), mask1);
+ t4_write_reg(adap, EPIO_REG(DATA3), mask1 >> 32);
+
+ for (i = 0; i < NWOL_PAT; i++, map >>= 1) {
+ if (!(map & 1))
+ continue;
+
+ /* write byte masks */
+ t4_write_reg(adap, EPIO_REG(DATA0), mask0);
+ t4_write_reg(adap, EPIO_REG(OP), V_ADDRESS(i) | F_EPIOWR);
+ t4_read_reg(adap, EPIO_REG(OP)); /* flush */
+ if (t4_read_reg(adap, EPIO_REG(OP)) & F_BUSY)
+ return -ETIMEDOUT;
+
+ /* write CRC */
+ t4_write_reg(adap, EPIO_REG(DATA0), crc);
+ t4_write_reg(adap, EPIO_REG(OP), V_ADDRESS(i + 32) | F_EPIOWR);
+ t4_read_reg(adap, EPIO_REG(OP)); /* flush */
+ if (t4_read_reg(adap, EPIO_REG(OP)) & F_BUSY)
+ return -ETIMEDOUT;
+ }
+#undef EPIO_REG
+
+ t4_set_reg_field(adap, PORT_REG(port, A_XGMAC_PORT_CFG2), 0, F_PATEN);
+ return 0;
+}
+
+/**
+ * t4_mk_filtdelwr - create a delete filter WR
+ * @ftid: the filter ID
+ * @wr: the filter work request to populate
+ * @qid: ingress queue to receive the delete notification
+ *
+ * Creates a filter work request to delete the supplied filter. If @qid is
+ * negative the delete notification is suppressed.
+ */
+void t4_mk_filtdelwr(unsigned int ftid, struct fw_filter_wr *wr, int qid)
+{
+ memset(wr, 0, sizeof(*wr));
+ wr->op_pkd = htonl(V_FW_WR_OP(FW_FILTER_WR));
+ wr->len16_pkd = htonl(V_FW_WR_LEN16(sizeof(*wr) / 16));
+ wr->tid_to_iq = htonl(V_FW_FILTER_WR_TID(ftid) |
+ V_FW_FILTER_WR_NOREPLY(qid < 0));
+ wr->del_filter_to_l2tix = htonl(F_FW_FILTER_WR_DEL_FILTER);
+ if (qid >= 0)
+ wr->rx_chan_rx_rpl_iq = htons(V_FW_FILTER_WR_RX_RPL_IQ(qid));
+}
+
+#define INIT_CMD(var, cmd, rd_wr) do { \
+ (var).op_to_write = htonl(V_FW_CMD_OP(FW_##cmd##_CMD) | \
+ F_FW_CMD_REQUEST | F_FW_CMD_##rd_wr); \
+ (var).retval_len16 = htonl(FW_LEN16(var)); \
+} while (0)
+
+/**
+ * t4_mdio_rd - read a PHY register through MDIO
+ * @adap: the adapter
+ * @mbox: mailbox to use for the FW command
+ * @phy_addr: the PHY address
+ * @mmd: the PHY MMD to access (0 for clause 22 PHYs)
+ * @reg: the register to read
+ * @valp: where to store the value
+ *
+ * Issues a FW command through the given mailbox to read a PHY register.
+ */
+int t4_mdio_rd(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
+ unsigned int mmd, unsigned int reg, unsigned int *valp)
+{
+ int ret;
+ struct fw_ldst_cmd c;
+
+ memset(&c, 0, sizeof(c));
+ c.op_to_addrspace = htonl(V_FW_CMD_OP(FW_LDST_CMD) | F_FW_CMD_REQUEST |
+ F_FW_CMD_READ | V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO));
+ c.cycles_to_len16 = htonl(FW_LEN16(c));
+ c.u.mdio.paddr_mmd = htons(V_FW_LDST_CMD_PADDR(phy_addr) |
+ V_FW_LDST_CMD_MMD(mmd));
+ c.u.mdio.raddr = htons(reg);
+
+ ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
+ if (ret == 0)
+ *valp = ntohs(c.u.mdio.rval);
+ return ret;
+}
+
+/**
+ * t4_mdio_wr - write a PHY register through MDIO
+ * @adap: the adapter
+ * @mbox: mailbox to use for the FW command
+ * @phy_addr: the PHY address
+ * @mmd: the PHY MMD to access (0 for clause 22 PHYs)
+ * @reg: the register to write
+ * @valp: value to write
+ *
+ * Issues a FW command through the given mailbox to write a PHY register.
+ */
+int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
+ unsigned int mmd, unsigned int reg, unsigned int val)
+{
+ struct fw_ldst_cmd c;
+
+ memset(&c, 0, sizeof(c));
+ c.op_to_addrspace = htonl(V_FW_CMD_OP(FW_LDST_CMD) | F_FW_CMD_REQUEST |
+ F_FW_CMD_WRITE | V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO));
+ c.cycles_to_len16 = htonl(FW_LEN16(c));
+ c.u.mdio.paddr_mmd = htons(V_FW_LDST_CMD_PADDR(phy_addr) |
+ V_FW_LDST_CMD_MMD(mmd));
+ c.u.mdio.raddr = htons(reg);
+ c.u.mdio.rval = htons(val);
+
+ return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
+}
+
+/**
+ * t4_sge_ctxt_rd - read an SGE context through FW
+ * @adap: the adapter
+ * @mbox: mailbox to use for the FW command
+ * @cid: the context id
+ * @ctype: the context type
+ * @data: where to store the context data
+ *
+ * Issues a FW command through the given mailbox to read an SGE context.
+ */
+int t4_sge_ctxt_rd(struct adapter *adap, unsigned int mbox, unsigned int cid,
+ enum ctxt_type ctype, u32 *data)
+{
+ int ret;
+ struct fw_ldst_cmd c;
+
+ if (ctype == CTXT_EGRESS)
+ ret = FW_LDST_ADDRSPC_SGE_EGRC;
+ else if (ctype == CTXT_INGRESS)
+ ret = FW_LDST_ADDRSPC_SGE_INGC;
+ else if (ctype == CTXT_FLM)
+ ret = FW_LDST_ADDRSPC_SGE_FLMC;
+ else
+ ret = FW_LDST_ADDRSPC_SGE_CONMC;
+
+ memset(&c, 0, sizeof(c));
+ c.op_to_addrspace = htonl(V_FW_CMD_OP(FW_LDST_CMD) | F_FW_CMD_REQUEST |
+ F_FW_CMD_READ | V_FW_LDST_CMD_ADDRSPACE(ret));
+ c.cycles_to_len16 = htonl(FW_LEN16(c));
+ c.u.idctxt.physid = htonl(cid);
+
+ ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
+ if (ret == 0) {
+ data[0] = ntohl(c.u.idctxt.ctxt_data0);
+ data[1] = ntohl(c.u.idctxt.ctxt_data1);
+ data[2] = ntohl(c.u.idctxt.ctxt_data2);
+ data[3] = ntohl(c.u.idctxt.ctxt_data3);
+ data[4] = ntohl(c.u.idctxt.ctxt_data4);
+ data[5] = ntohl(c.u.idctxt.ctxt_data5);
+ }
+ return ret;
+}
+
+/**
+ * t4_sge_ctxt_rd_bd - read an SGE context bypassing FW
+ * @adap: the adapter
+ * @cid: the context id
+ * @ctype: the context type
+ * @data: where to store the context data
+ *
+ * Reads an SGE context directly, bypassing FW. This is only for
+ * debugging when FW is unavailable.
+ */
+int t4_sge_ctxt_rd_bd(struct adapter *adap, unsigned int cid, enum ctxt_type ctype,
+ u32 *data)
+{
+ int i, ret;
+
+ t4_write_reg(adap, A_SGE_CTXT_CMD, V_CTXTQID(cid) | V_CTXTTYPE(ctype));
+ ret = t4_wait_op_done(adap, A_SGE_CTXT_CMD, F_BUSY, 0, 3, 1);
+ if (!ret)
+ for (i = A_SGE_CTXT_DATA0; i <= A_SGE_CTXT_DATA5; i += 4)
+ *data++ = t4_read_reg(adap, i);
+ return ret;
+}
+
+/**
+ * t4_fw_hello - establish communication with FW
+ * @adap: the adapter
+ * @mbox: mailbox to use for the FW command
+ * @evt_mbox: mailbox to receive async FW events
+ * @master: specifies the caller's willingness to be the device master
+ * @state: returns the current device state
+ *
+ * Issues a command to establish communication with FW.
+ */
+int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox,
+ enum dev_master master, enum dev_state *state)
+{
+ int ret;
+ struct fw_hello_cmd c;
+
+ memset(&c, 0, sizeof(c));
+ INIT_CMD(c, HELLO, WRITE);
+ c.err_to_mbasyncnot = htonl(
+ V_FW_HELLO_CMD_MASTERDIS(master == MASTER_CANT) |
+ V_FW_HELLO_CMD_MASTERFORCE(master == MASTER_MUST) |
+ V_FW_HELLO_CMD_MBMASTER(master == MASTER_MUST ? mbox :
+ M_FW_HELLO_CMD_MBMASTER) |
+ V_FW_HELLO_CMD_MBASYNCNOT(evt_mbox));
+
+ ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
+ if (ret == 0 && state) {
+ u32 v = ntohl(c.err_to_mbasyncnot);
+ if (v & F_FW_HELLO_CMD_INIT)
+ *state = DEV_STATE_INIT;
+ else if (v & F_FW_HELLO_CMD_ERR)
+ *state = DEV_STATE_ERR;
+ else
+ *state = DEV_STATE_UNINIT;
+ return G_FW_HELLO_CMD_MBMASTER(v);
+ }
+ return ret;
+}
+
+/**
+ * t4_fw_bye - end communication with FW
+ * @adap: the adapter
+ * @mbox: mailbox to use for the FW command
+ *
+ * Issues a command to terminate communication with FW.
+ */
+int t4_fw_bye(struct adapter *adap, unsigned int mbox)
+{
+ struct fw_bye_cmd c;
+
+ memset(&c, 0, sizeof(c));
+ INIT_CMD(c, BYE, WRITE);
+ return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
+}
+
+/**
+ * t4_init_cmd - ask FW to initialize the device
+ * @adap: the adapter
+ * @mbox: mailbox to use for the FW command
+ *
+ * Issues a command to FW to partially initialize the device. This
+ * performs initialization that generally doesn't depend on user input.
+ */
+int t4_early_init(struct adapter *adap, unsigned int mbox)
+{
+ struct fw_initialize_cmd c;
+
+ memset(&c, 0, sizeof(c));
+ INIT_CMD(c, INITIALIZE, WRITE);
+ return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
+}
+
+/**
+ * t4_fw_reset - issue a reset to FW
+ * @adap: the adapter
+ * @mbox: mailbox to use for the FW command
+ * @reset: specifies the type of reset to perform
+ *
+ * Issues a reset command of the specified type to FW.
+ */
+int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset)
+{
+ struct fw_reset_cmd c;
+
+ memset(&c, 0, sizeof(c));
+ INIT_CMD(c, RESET, WRITE);
+ c.val = htonl(reset);
+ return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
+}
+
+/**
+ * t4_query_params - query FW or device parameters
+ * @adap: the adapter
+ * @mbox: mailbox to use for the FW command
+ * @pf: the PF
+ * @vf: the VF
+ * @nparams: the number of parameters
+ * @params: the parameter names
+ * @val: the parameter values
+ *
+ * Reads the value of FW or device parameters. Up to 7 parameters can be
+ * queried at once.
+ */
+int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
+ unsigned int vf, unsigned int nparams, const u32 *params,
+ u32 *val)
+{
+ int i, ret;
+ struct fw_params_cmd c;
+ __be32 *p = &c.param[0].mnem;
+
+ if (nparams > 7)
+ return -EINVAL;
+
+ memset(&c, 0, sizeof(c));
+ c.op_to_vfn = htonl(V_FW_CMD_OP(FW_PARAMS_CMD) | F_FW_CMD_REQUEST |
+ F_FW_CMD_READ | V_FW_PARAMS_CMD_PFN(pf) |
+ V_FW_PARAMS_CMD_VFN(vf));
+ c.retval_len16 = htonl(FW_LEN16(c));
+
+ for (i = 0; i < nparams; i++, p += 2)
+ *p = htonl(*params++);
+
+ ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
+ if (ret == 0)
+ for (i = 0, p = &c.param[0].val; i < nparams; i++, p += 2)
+ *val++ = ntohl(*p);
+ return ret;
+}
+
+/**
+ * t4_set_params - sets FW or device parameters
+ * @adap: the adapter
+ * @mbox: mailbox to use for the FW command
+ * @pf: the PF
+ * @vf: the VF
+ * @nparams: the number of parameters
+ * @params: the parameter names
+ * @val: the parameter values
+ *
+ * Sets the value of FW or device parameters. Up to 7 parameters can be
+ * specified at once.
+ */
+int t4_set_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
+ unsigned int vf, unsigned int nparams, const u32 *params,
+ const u32 *val)
+{
+ struct fw_params_cmd c;
+ __be32 *p = &c.param[0].mnem;
+
+ if (nparams > 7)
+ return -EINVAL;
+
+ memset(&c, 0, sizeof(c));
+ c.op_to_vfn = htonl(V_FW_CMD_OP(FW_PARAMS_CMD) | F_FW_CMD_REQUEST |
+ F_FW_CMD_WRITE | V_FW_PARAMS_CMD_PFN(pf) |
+ V_FW_PARAMS_CMD_VFN(vf));
+ c.retval_len16 = htonl(FW_LEN16(c));
+
+ while (nparams--) {
+ *p++ = htonl(*params++);
+ *p++ = htonl(*val++);
+ }
+
+ return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
+}
+
+/**
+ * t4_cfg_pfvf - configure PF/VF resource limits
+ * @adap: the adapter
+ * @mbox: mailbox to use for the FW command
+ * @pf: the PF being configured
+ * @vf: the VF being configured
+ * @txq: the max number of egress queues
+ * @txq_eth_ctrl: the max number of egress Ethernet or control queues
+ * @rxqi: the max number of interrupt-capable ingress queues
+ * @rxq: the max number of interruptless ingress queues
+ * @tc: the PCI traffic class
+ * @vi: the max number of virtual interfaces
+ * @cmask: the channel access rights mask for the PF/VF
+ * @pmask: the port access rights mask for the PF/VF
+ * @nexact: the maximum number of exact MPS filters
+ * @rcaps: read capabilities
+ * @wxcaps: write/execute capabilities
+ *
+ * Configures resource limits and capabilities for a physical or virtual
+ * function.
+ */
+int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf,
+ unsigned int vf, unsigned int txq, unsigned int txq_eth_ctrl,
+ unsigned int rxqi, unsigned int rxq, unsigned int tc,
+ unsigned int vi, unsigned int cmask, unsigned int pmask,
+ unsigned int nexact, unsigned int rcaps, unsigned int wxcaps)
+{
+ struct fw_pfvf_cmd c;
+
+ memset(&c, 0, sizeof(c));
+ c.op_to_vfn = htonl(V_FW_CMD_OP(FW_PFVF_CMD) | F_FW_CMD_REQUEST |
+ F_FW_CMD_WRITE | V_FW_PFVF_CMD_PFN(pf) |
+ V_FW_PFVF_CMD_VFN(vf));
+ c.retval_len16 = htonl(FW_LEN16(c));
+ c.niqflint_niq = htonl(V_FW_PFVF_CMD_NIQFLINT(rxqi) |
+ V_FW_PFVF_CMD_NIQ(rxq));
+ c.type_to_neq = htonl(V_FW_PFVF_CMD_CMASK(cmask) |
+ V_FW_PFVF_CMD_PMASK(pmask) |
+ V_FW_PFVF_CMD_NEQ(txq));
+ c.tc_to_nexactf = htonl(V_FW_PFVF_CMD_TC(tc) | V_FW_PFVF_CMD_NVI(vi) |
+ V_FW_PFVF_CMD_NEXACTF(nexact));
+ c.r_caps_to_nethctrl = htonl(V_FW_PFVF_CMD_R_CAPS(rcaps) |
+ V_FW_PFVF_CMD_WX_CAPS(wxcaps) |
+ V_FW_PFVF_CMD_NETHCTRL(txq_eth_ctrl));
+ return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
+}
+
+/**
+ * t4_alloc_vi - allocate a virtual interface
+ * @adap: the adapter
+ * @mbox: mailbox to use for the FW command
+ * @port: physical port associated with the VI
+ * @pf: the PF owning the VI
+ * @vf: the VF owning the VI
+ * @nmac: number of MAC addresses needed (1 to 5)
+ * @mac: the MAC addresses of the VI
+ * @rss_size: size of RSS table slice associated with this VI
+ *
+ * Allocates a virtual interface for the given physical port. If @mac is
+ * not %NULL it contains the MAC addresses of the VI as assigned by FW.
+ * @mac should be large enough to hold @nmac Ethernet addresses, they are
+ * stored consecutively so the space needed is @nmac * 6 bytes.
+ * Returns a negative error number or the non-negative VI id.
+ */
+int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port,
+ unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac,
+ unsigned int *rss_size)
+{
+ int ret;
+ struct fw_vi_cmd c;
+
+ memset(&c, 0, sizeof(c));
+ c.op_to_vfn = htonl(V_FW_CMD_OP(FW_VI_CMD) | F_FW_CMD_REQUEST |
+ F_FW_CMD_WRITE | F_FW_CMD_EXEC |
+ V_FW_VI_CMD_PFN(pf) | V_FW_VI_CMD_VFN(vf));
+ c.alloc_to_len16 = htonl(F_FW_VI_CMD_ALLOC | FW_LEN16(c));
+ c.portid_pkd = V_FW_VI_CMD_PORTID(port);
+ c.nmac = nmac - 1;
+
+ ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
+ if (ret)
+ return ret;
+
+ if (mac) {
+ memcpy(mac, c.mac, sizeof(c.mac));
+ switch (nmac) {
+ case 5:
+ memcpy(mac + 24, c.nmac3, sizeof(c.nmac3));
+ case 4:
+ memcpy(mac + 18, c.nmac2, sizeof(c.nmac2));
+ case 3:
+ memcpy(mac + 12, c.nmac1, sizeof(c.nmac1));
+ case 2:
+ memcpy(mac + 6, c.nmac0, sizeof(c.nmac0));
+ }
+ }
+ if (rss_size)
+ *rss_size = G_FW_VI_CMD_RSSSIZE(ntohs(c.rsssize_pkd));
+ return G_FW_VI_CMD_VIID(ntohs(c.type_to_viid));
+}
+
+/**
+ * t4_free_vi - free a virtual interface
+ * @adap: the adapter
+ * @mbox: mailbox to use for the FW command
+ * @pf: the PF owning the VI
+ * @vf: the VF owning the VI
+ * @viid: virtual interface identifiler
+ *
+ * Free a previously allocated virtual interface.
+ */
+int t4_free_vi(struct adapter *adap, unsigned int mbox, unsigned int pf,
+ unsigned int vf, unsigned int viid)
+{
+ struct fw_vi_cmd c;
+
+ memset(&c, 0, sizeof(c));
+ c.op_to_vfn = htonl(V_FW_CMD_OP(FW_VI_CMD) |
+ F_FW_CMD_REQUEST |
+ F_FW_CMD_EXEC |
+ V_FW_VI_CMD_PFN(pf) |
+ V_FW_VI_CMD_VFN(vf));
+ c.alloc_to_len16 = htonl(F_FW_VI_CMD_FREE | FW_LEN16(c));
+ c.type_to_viid = htons(V_FW_VI_CMD_VIID(viid));
+
+ return t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
+}
+
+/**
+ * t4_set_rxmode - set Rx properties of a virtual interface
+ * @adap: the adapter
+ * @mbox: mailbox to use for the FW command
+ * @viid: the VI id
+ * @mtu: the new MTU or -1
+ * @promisc: 1 to enable promiscuous mode, 0 to disable it, -1 no change
+ * @all_multi: 1 to enable all-multi mode, 0 to disable it, -1 no change
+ * @bcast: 1 to enable broadcast Rx, 0 to disable it, -1 no change
+ * @vlanex: 1 to enable HVLAN extraction, 0 to disable it, -1 no change
+ * @sleep_ok: if true we may sleep while awaiting command completion
+ *
+ * Sets Rx properties of a virtual interface.
+ */
+int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid,
+ int mtu, int promisc, int all_multi, int bcast, int vlanex,
+ bool sleep_ok)
+{
+ struct fw_vi_rxmode_cmd c;
+
+ /* convert to FW values */
+ if (mtu < 0)
+ mtu = M_FW_VI_RXMODE_CMD_MTU;
+ if (promisc < 0)
+ promisc = M_FW_VI_RXMODE_CMD_PROMISCEN;
+ if (all_multi < 0)
+ all_multi = M_FW_VI_RXMODE_CMD_ALLMULTIEN;
+ if (bcast < 0)
+ bcast = M_FW_VI_RXMODE_CMD_BROADCASTEN;
+ if (vlanex < 0)
+ vlanex = M_FW_VI_RXMODE_CMD_VLANEXEN;
+
+ memset(&c, 0, sizeof(c));
+ c.op_to_viid = htonl(V_FW_CMD_OP(FW_VI_RXMODE_CMD) | F_FW_CMD_REQUEST |
+ F_FW_CMD_WRITE | V_FW_VI_RXMODE_CMD_VIID(viid));
+ c.retval_len16 = htonl(FW_LEN16(c));
+ c.mtu_to_vlanexen = htonl(V_FW_VI_RXMODE_CMD_MTU(mtu) |
+ V_FW_VI_RXMODE_CMD_PROMISCEN(promisc) |
+ V_FW_VI_RXMODE_CMD_ALLMULTIEN(all_multi) |
+ V_FW_VI_RXMODE_CMD_BROADCASTEN(bcast) |
+ V_FW_VI_RXMODE_CMD_VLANEXEN(vlanex));
+ return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
+}
+
+/**
+ * t4_alloc_mac_filt - allocates exact-match filters for MAC addresses
+ * @adap: the adapter
+ * @mbox: mailbox to use for the FW command
+ * @viid: the VI id
+ * @free: if true any existing filters for this VI id are first removed
+ * @naddr: the number of MAC addresses to allocate filters for (up to 7)
+ * @addr: the MAC address(es)
+ * @idx: where to store the index of each allocated filter
+ * @hash: pointer to hash address filter bitmap
+ * @sleep_ok: call is allowed to sleep
+ *
+ * Allocates an exact-match filter for each of the supplied addresses and
+ * sets it to the corresponding address. If @idx is not %NULL it should
+ * have at least @naddr entries, each of which will be set to the index of
+ * the filter allocated for the corresponding MAC address. If a filter
+ * could not be allocated for an address its index is set to 0xffff.
+ * If @hash is not %NULL addresses that fail to allocate an exact filter
+ * are hashed and update the hash filter bitmap pointed at by @hash.
+ *
+ * Returns a negative error number or the number of filters allocated.
+ */
+int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox,
+ unsigned int viid, bool free, unsigned int naddr,
+ const u8 **addr, u16 *idx, u64 *hash, bool sleep_ok)
+{
+ int offset, ret = 0;
+ struct fw_vi_mac_cmd c;
+ unsigned int nfilters = 0;
+ unsigned int rem = naddr;
+
+ if (naddr > FW_CLS_TCAM_NUM_ENTRIES)
+ return -EINVAL;
+
+ for (offset = 0; offset < naddr ; /**/) {
+ unsigned int fw_naddr = (rem < ARRAY_SIZE(c.u.exact)
+ ? rem
+ : ARRAY_SIZE(c.u.exact));
+ size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd,
+ u.exact[fw_naddr]), 16);
+ struct fw_vi_mac_exact *p;
+ int i;
+
+ memset(&c, 0, sizeof(c));
+ c.op_to_viid = htonl(V_FW_CMD_OP(FW_VI_MAC_CMD) |
+ F_FW_CMD_REQUEST |
+ F_FW_CMD_WRITE |
+ V_FW_CMD_EXEC(free) |
+ V_FW_VI_MAC_CMD_VIID(viid));
+ c.freemacs_to_len16 = htonl(V_FW_VI_MAC_CMD_FREEMACS(free) |
+ V_FW_CMD_LEN16(len16));
+
+ for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) {
+ p->valid_to_idx = htons(
+ F_FW_VI_MAC_CMD_VALID |
+ V_FW_VI_MAC_CMD_IDX(FW_VI_MAC_ADD_MAC));
+ memcpy(p->macaddr, addr[offset+i], sizeof(p->macaddr));
+ }
+
+ /*
+ * It's okay if we run out of space in our MAC address arena.
+ * Some of the addresses we submit may get stored so we need
+ * to run through the reply to see what the results were ...
+ */
+ ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), &c, sleep_ok);
+ if (ret && ret != -FW_ENOMEM)
+ break;
+
+ for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) {
+ u16 index = G_FW_VI_MAC_CMD_IDX(ntohs(p->valid_to_idx));
+
+ if (idx)
+ idx[offset+i] = (index >= FW_CLS_TCAM_NUM_ENTRIES
+ ? 0xffff
+ : index);
+ if (index < FW_CLS_TCAM_NUM_ENTRIES)
+ nfilters++;
+ else if (hash)
+ *hash |= (1ULL << hash_mac_addr(addr[offset+i]));
+ }
+
+ free = false;
+ offset += fw_naddr;
+ rem -= fw_naddr;
+ }
+
+ if (ret == 0 || ret == -FW_ENOMEM)
+ ret = nfilters;
+ return ret;
+}
+
+/**
+ * t4_change_mac - modifies the exact-match filter for a MAC address
+ * @adap: the adapter
+ * @mbox: mailbox to use for the FW command
+ * @viid: the VI id
+ * @idx: index of existing filter for old value of MAC address, or -1
+ * @addr: the new MAC address value
+ * @persist: whether a new MAC allocation should be persistent
+ * @add_smt: if true also add the address to the HW SMT
+ *
+ * Modifies an exact-match filter and sets it to the new MAC address if
+ * @idx >= 0, or adds the MAC address to a new filter if @idx < 0. In the
+ * latter case the address is added persistently if @persist is %true.
+ *
+ * Note that in general it is not possible to modify the value of a given
+ * filter so the generic way to modify an address filter is to free the one
+ * being used by the old address value and allocate a new filter for the
+ * new address value.
+ *
+ * Returns a negative error number or the index of the filter with the new
+ * MAC value. Note that this index may differ from @idx.
+ */
+int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid,
+ int idx, const u8 *addr, bool persist, bool add_smt)
+{
+ int ret, mode;
+ struct fw_vi_mac_cmd c;
+ struct fw_vi_mac_exact *p = c.u.exact;
+
+ if (idx < 0) /* new allocation */
+ idx = persist ? FW_VI_MAC_ADD_PERSIST_MAC : FW_VI_MAC_ADD_MAC;
+ mode = add_smt ? FW_VI_MAC_SMT_AND_MPSTCAM : FW_VI_MAC_MPS_TCAM_ENTRY;
+
+ memset(&c, 0, sizeof(c));
+ c.op_to_viid = htonl(V_FW_CMD_OP(FW_VI_MAC_CMD) | F_FW_CMD_REQUEST |
+ F_FW_CMD_WRITE | V_FW_VI_MAC_CMD_VIID(viid));
+ c.freemacs_to_len16 = htonl(V_FW_CMD_LEN16(1));
+ p->valid_to_idx = htons(F_FW_VI_MAC_CMD_VALID |
+ V_FW_VI_MAC_CMD_SMAC_RESULT(mode) |
+ V_FW_VI_MAC_CMD_IDX(idx));
+ memcpy(p->macaddr, addr, sizeof(p->macaddr));
+
+ ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
+ if (ret == 0) {
+ ret = G_FW_VI_MAC_CMD_IDX(ntohs(p->valid_to_idx));
+ if (ret >= FW_CLS_TCAM_NUM_ENTRIES)
+ ret = -ENOMEM;
+ }
+ return ret;
+}
+
+/**
+ * t4_set_addr_hash - program the MAC inexact-match hash filter
+ * @adap: the adapter
+ * @mbox: mailbox to use for the FW command
+ * @viid: the VI id
+ * @ucast: whether the hash filter should also match unicast addresses
+ * @vec: the value to be written to the hash filter
+ * @sleep_ok: call is allowed to sleep
+ *
+ * Sets the 64-bit inexact-match hash filter for a virtual interface.
+ */
+int t4_set_addr_hash(struct adapter *adap, unsigned int mbox, unsigned int viid,
+ bool ucast, u64 vec, bool sleep_ok)
+{
+ struct fw_vi_mac_cmd c;
+
+ memset(&c, 0, sizeof(c));
+ c.op_to_viid = htonl(V_FW_CMD_OP(FW_VI_MAC_CMD) | F_FW_CMD_REQUEST |
+ F_FW_CMD_WRITE | V_FW_VI_ENABLE_CMD_VIID(viid));
+ c.freemacs_to_len16 = htonl(F_FW_VI_MAC_CMD_HASHVECEN |
+ V_FW_VI_MAC_CMD_HASHUNIEN(ucast) |
+ V_FW_CMD_LEN16(1));
+ c.u.hash.hashvec = cpu_to_be64(vec);
+ return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
+}
+
+/**
+ * t4_enable_vi - enable/disable a virtual interface
+ * @adap: the adapter
+ * @mbox: mailbox to use for the FW command
+ * @viid: the VI id
+ * @rx_en: 1=enable Rx, 0=disable Rx
+ * @tx_en: 1=enable Tx, 0=disable Tx
+ *
+ * Enables/disables a virtual interface.
+ */
+int t4_enable_vi(struct adapter *adap, unsigned int mbox, unsigned int viid,
+ bool rx_en, bool tx_en)
+{
+ struct fw_vi_enable_cmd c;
+
+ memset(&c, 0, sizeof(c));
+ c.op_to_viid = htonl(V_FW_CMD_OP(FW_VI_ENABLE_CMD) | F_FW_CMD_REQUEST |
+ F_FW_CMD_EXEC | V_FW_VI_ENABLE_CMD_VIID(viid));
+ c.ien_to_len16 = htonl(V_FW_VI_ENABLE_CMD_IEN(rx_en) |
+ V_FW_VI_ENABLE_CMD_EEN(tx_en) | FW_LEN16(c));
+ return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
+}
+
+/**
+ * t4_identify_port - identify a VI's port by blinking its LED
+ * @adap: the adapter
+ * @mbox: mailbox to use for the FW command
+ * @viid: the VI id
+ * @nblinks: how many times to blink LED at 2.5 Hz
+ *
+ * Identifies a VI's port by blinking its LED.
+ */
+int t4_identify_port(struct adapter *adap, unsigned int mbox, unsigned int viid,
+ unsigned int nblinks)
+{
+ struct fw_vi_enable_cmd c;
+
+ memset(&c, 0, sizeof(c));
+ c.op_to_viid = htonl(V_FW_CMD_OP(FW_VI_ENABLE_CMD) | F_FW_CMD_REQUEST |
+ F_FW_CMD_EXEC | V_FW_VI_ENABLE_CMD_VIID(viid));
+ c.ien_to_len16 = htonl(F_FW_VI_ENABLE_CMD_LED | FW_LEN16(c));
+ c.blinkdur = htons(nblinks);
+ return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
+}
+
+/**
+ * t4_iq_start_stop - enable/disable an ingress queue and its FLs
+ * @adap: the adapter
+ * @mbox: mailbox to use for the FW command
+ * @start: %true to enable the queues, %false to disable them
+ * @pf: the PF owning the queues
+ * @vf: the VF owning the queues
+ * @iqid: ingress queue id
+ * @fl0id: FL0 queue id or 0xffff if no attached FL0
+ * @fl1id: FL1 queue id or 0xffff if no attached FL1
+ *
+ * Starts or stops an ingress queue and its associated FLs, if any.
+ */
+int t4_iq_start_stop(struct adapter *adap, unsigned int mbox, bool start,
+ unsigned int pf, unsigned int vf, unsigned int iqid,
+ unsigned int fl0id, unsigned int fl1id)
+{
+ struct fw_iq_cmd c;
+
+ memset(&c, 0, sizeof(c));
+ c.op_to_vfn = htonl(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST |
+ F_FW_CMD_EXEC | V_FW_IQ_CMD_PFN(pf) |
+ V_FW_IQ_CMD_VFN(vf));
+ c.alloc_to_len16 = htonl(V_FW_IQ_CMD_IQSTART(start) |
+ V_FW_IQ_CMD_IQSTOP(!start) | FW_LEN16(c));
+ c.iqid = htons(iqid);
+ c.fl0id = htons(fl0id);
+ c.fl1id = htons(fl1id);
+ return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
+}
+
+/**
+ * t4_iq_free - free an ingress queue and its FLs
+ * @adap: the adapter
+ * @mbox: mailbox to use for the FW command
+ * @pf: the PF owning the queues
+ * @vf: the VF owning the queues
+ * @iqtype: the ingress queue type (FW_IQ_TYPE_FL_INT_CAP, etc.)
+ * @iqid: ingress queue id
+ * @fl0id: FL0 queue id or 0xffff if no attached FL0
+ * @fl1id: FL1 queue id or 0xffff if no attached FL1
+ *
+ * Frees an ingress queue and its associated FLs, if any.
+ */
+int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
+ unsigned int vf, unsigned int iqtype, unsigned int iqid,
+ unsigned int fl0id, unsigned int fl1id)
+{
+ struct fw_iq_cmd c;
+
+ memset(&c, 0, sizeof(c));
+ c.op_to_vfn = htonl(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST |
+ F_FW_CMD_EXEC | V_FW_IQ_CMD_PFN(pf) |
+ V_FW_IQ_CMD_VFN(vf));
+ c.alloc_to_len16 = htonl(F_FW_IQ_CMD_FREE | FW_LEN16(c));
+ c.type_to_iqandstindex = htonl(V_FW_IQ_CMD_TYPE(iqtype));
+ c.iqid = htons(iqid);
+ c.fl0id = htons(fl0id);
+ c.fl1id = htons(fl1id);
+ return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
+}
+
+/**
+ * t4_eth_eq_free - free an Ethernet egress queue
+ * @adap: the adapter
+ * @mbox: mailbox to use for the FW command
+ * @pf: the PF owning the queue
+ * @vf: the VF owning the queue
+ * @eqid: egress queue id
+ *
+ * Frees an Ethernet egress queue.
+ */
+int t4_eth_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
+ unsigned int vf, unsigned int eqid)
+{
+ struct fw_eq_eth_cmd c;
+
+ memset(&c, 0, sizeof(c));
+ c.op_to_vfn = htonl(V_FW_CMD_OP(FW_EQ_ETH_CMD) | F_FW_CMD_REQUEST |
+ F_FW_CMD_EXEC | V_FW_EQ_ETH_CMD_PFN(pf) |
+ V_FW_EQ_ETH_CMD_VFN(vf));
+ c.alloc_to_len16 = htonl(F_FW_EQ_ETH_CMD_FREE | FW_LEN16(c));
+ c.eqid_pkd = htonl(V_FW_EQ_ETH_CMD_EQID(eqid));
+ return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
+}
+
+/**
+ * t4_ctrl_eq_free - free a control egress queue
+ * @adap: the adapter
+ * @mbox: mailbox to use for the FW command
+ * @pf: the PF owning the queue
+ * @vf: the VF owning the queue
+ * @eqid: egress queue id
+ *
+ * Frees a control egress queue.
+ */
+int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
+ unsigned int vf, unsigned int eqid)
+{
+ struct fw_eq_ctrl_cmd c;
+
+ memset(&c, 0, sizeof(c));
+ c.op_to_vfn = htonl(V_FW_CMD_OP(FW_EQ_CTRL_CMD) | F_FW_CMD_REQUEST |
+ F_FW_CMD_EXEC | V_FW_EQ_CTRL_CMD_PFN(pf) |
+ V_FW_EQ_CTRL_CMD_VFN(vf));
+ c.alloc_to_len16 = htonl(F_FW_EQ_CTRL_CMD_FREE | FW_LEN16(c));
+ c.cmpliqid_eqid = htonl(V_FW_EQ_CTRL_CMD_EQID(eqid));
+ return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
+}
+
+/**
+ * t4_ofld_eq_free - free an offload egress queue
+ * @adap: the adapter
+ * @mbox: mailbox to use for the FW command
+ * @pf: the PF owning the queue
+ * @vf: the VF owning the queue
+ * @eqid: egress queue id
+ *
+ * Frees a control egress queue.
+ */
+int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
+ unsigned int vf, unsigned int eqid)
+{
+ struct fw_eq_ofld_cmd c;
+
+ memset(&c, 0, sizeof(c));
+ c.op_to_vfn = htonl(V_FW_CMD_OP(FW_EQ_OFLD_CMD) | F_FW_CMD_REQUEST |
+ F_FW_CMD_EXEC | V_FW_EQ_OFLD_CMD_PFN(pf) |
+ V_FW_EQ_OFLD_CMD_VFN(vf));
+ c.alloc_to_len16 = htonl(F_FW_EQ_OFLD_CMD_FREE | FW_LEN16(c));
+ c.eqid_pkd = htonl(V_FW_EQ_OFLD_CMD_EQID(eqid));
+ return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
+}
+
+/**
+ * t4_handle_fw_rpl - process a FW reply message
+ * @adap: the adapter
+ * @rpl: start of the FW message
+ *
+ * Processes a FW message, such as link state change messages.
+ */
+int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl)
+{
+ u8 opcode = *(const u8 *)rpl;
+
+ if (opcode == FW_PORT_CMD) { /* link/module state change message */
+ int speed = 0, fc = 0, i;
+ const struct fw_port_cmd *p = (const void *)rpl;
+ int chan = G_FW_PORT_CMD_PORTID(ntohl(p->op_to_portid));
+ struct port_info *pi = NULL;
+ struct link_config *lc;
+ u32 stat = ntohl(p->u.info.lstatus_to_modtype);
+ int link_ok = (stat & F_FW_PORT_CMD_LSTATUS) != 0;
+ u32 mod = G_FW_PORT_CMD_MODTYPE(stat);
+
+ if (stat & F_FW_PORT_CMD_RXPAUSE)
+ fc |= PAUSE_RX;
+ if (stat & F_FW_PORT_CMD_TXPAUSE)
+ fc |= PAUSE_TX;
+ if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_100M))
+ speed = SPEED_100;
+ else if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_1G))
+ speed = SPEED_1000;
+ else if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_10G))
+ speed = SPEED_10000;
+
+ for_each_port(adap, i) {
+ pi = adap2pinfo(adap, i);
+ if (pi->tx_chan == chan)
+ break;
+ }
+ lc = &pi->link_cfg;
+
+ if (link_ok != lc->link_ok || speed != lc->speed ||
+ fc != lc->fc) { /* something changed */
+ lc->link_ok = link_ok;
+ lc->speed = speed;
+ lc->fc = fc;
+ t4_os_link_changed(adap, i, link_ok);
+ }
+ if (mod != pi->mod_type) {
+ pi->mod_type = mod;
+ t4_os_portmod_changed(adap, i);
+ }
+ }
+ return 0;
+}
+
+/**
+ * get_pci_mode - determine a card's PCI mode
+ * @adapter: the adapter
+ * @p: where to store the PCI settings
+ *
+ * Determines a card's PCI mode and associated parameters, such as speed
+ * and width.
+ */
+static void __devinit get_pci_mode(struct adapter *adapter,
+ struct pci_params *p)
+{
+ u16 val;
+ u32 pcie_cap;
+
+ pcie_cap = t4_os_find_pci_capability(adapter, PCI_CAP_ID_EXP);
+ if (pcie_cap) {
+ t4_os_pci_read_cfg2(adapter, pcie_cap + PCI_EXP_LNKSTA, &val);
+ p->speed = val & PCI_EXP_LNKSTA_CLS;
+ p->width = (val & PCI_EXP_LNKSTA_NLW) >> 4;
+ }
+}
+
+/**
+ * init_link_config - initialize a link's SW state
+ * @lc: structure holding the link state
+ * @caps: link capabilities
+ *
+ * Initializes the SW state maintained for each link, including the link's
+ * capabilities and default speed/flow-control/autonegotiation settings.
+ */
+static void __devinit init_link_config(struct link_config *lc,
+ unsigned int caps)
+{
+ lc->supported = caps;
+ lc->requested_speed = 0;
+ lc->speed = 0;
+ lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX;
+ if (lc->supported & FW_PORT_CAP_ANEG) {
+ lc->advertising = lc->supported & ADVERT_MASK;
+ lc->autoneg = AUTONEG_ENABLE;
+ lc->requested_fc |= PAUSE_AUTONEG;
+ } else {
+ lc->advertising = 0;
+ lc->autoneg = AUTONEG_DISABLE;
+ }
+}
+
+static int __devinit wait_dev_ready(struct adapter *adap)
+{
+ u32 whoami;
+
+ whoami = t4_read_reg(adap, A_PL_WHOAMI);
+
+ if (whoami != 0xffffffff && whoami != X_CIM_PF_NOACCESS)
+ return 0;
+
+ msleep(500);
+ whoami = t4_read_reg(adap, A_PL_WHOAMI);
+ return (whoami != 0xffffffff && whoami != X_CIM_PF_NOACCESS
+ ? 0 : -EIO);
+}
+
+static int __devinit get_flash_params(struct adapter *adapter)
+{
+ int ret;
+ u32 info = 0;
+
+ ret = sf1_write(adapter, 1, 1, 0, SF_RD_ID);
+ if (!ret)
+ ret = sf1_read(adapter, 3, 0, 1, &info);
+ t4_write_reg(adapter, A_SF_OP, 0); /* unlock SF */
+ if (ret < 0)
+ return ret;
+
+ if ((info & 0xff) != 0x20) /* not a Numonix flash */
+ return -EINVAL;
+ info >>= 16; /* log2 of size */
+ if (info >= 0x14 && info < 0x18)
+ adapter->params.sf_nsec = 1 << (info - 16);
+ else if (info == 0x18)
+ adapter->params.sf_nsec = 64;
+ else
+ return -EINVAL;
+ adapter->params.sf_size = 1 << info;
+ return 0;
+}
+
+/**
+ * t4_prep_adapter - prepare SW and HW for operation
+ * @adapter: the adapter
+ * @reset: if true perform a HW reset
+ *
+ * Initialize adapter SW state for the various HW modules, set initial
+ * values for some adapter tunables, take PHYs out of reset, and
+ * initialize the MDIO interface.
+ */
+int __devinit t4_prep_adapter(struct adapter *adapter)
+{
+ int ret;
+
+ ret = wait_dev_ready(adapter);
+ if (ret < 0)
+ return ret;
+
+ get_pci_mode(adapter, &adapter->params.pci);
+
+ adapter->params.rev = t4_read_reg(adapter, A_PL_REV);
+ adapter->params.pci.vpd_cap_addr =
+ t4_os_find_pci_capability(adapter, PCI_CAP_ID_VPD);
+
+ ret = get_flash_params(adapter);
+ if (ret < 0)
+ return ret;
+
+ ret = get_vpd_params(adapter, &adapter->params.vpd);
+ if (ret < 0)
+ return ret;
+
+ if (t4_read_reg(adapter, A_SGE_PC0_REQ_BIST_CMD) != 0xffffffff) {
+ adapter->params.cim_la_size = 2 * CIMLA_SIZE;
+ } else {
+ adapter->params.cim_la_size = CIMLA_SIZE;
+ }
+
+ init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd);
+
+ /*
+ * Default port and clock for debugging in case we can't reach FW.
+ */
+ adapter->params.nports = 1;
+ adapter->params.portvec = 1;
+ adapter->params.vpd.cclk = 50000;
+
+ return 0;
+}
+
+int __devinit t4_port_init(struct port_info *p, int mbox, int pf, int vf)
+{
+ u8 addr[6];
+ int ret, i, j;
+ struct fw_port_cmd c;
+ unsigned int rss_size;
+ adapter_t *adap = p->adapter;
+
+ memset(&c, 0, sizeof(c));
+
+ for (i = 0, j = -1; i <= p->port_id; i++) {
+ do {
+ j++;
+ } while ((adap->params.portvec & (1 << j)) == 0);
+ }
+
+ c.op_to_portid = htonl(V_FW_CMD_OP(FW_PORT_CMD) |
+ F_FW_CMD_REQUEST | F_FW_CMD_READ |
+ V_FW_PORT_CMD_PORTID(j));
+ c.action_to_len16 = htonl(
+ V_FW_PORT_CMD_ACTION(FW_PORT_ACTION_GET_PORT_INFO) |
+ FW_LEN16(c));
+ ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
+ if (ret)
+ return ret;
+
+ ret = t4_alloc_vi(adap, mbox, j, pf, vf, 1, addr, &rss_size);
+ if (ret < 0)
+ return ret;
+
+ p->viid = ret;
+ p->tx_chan = j;
+ p->lport = j;
+ p->rss_size = rss_size;
+ t4_os_set_hw_addr(adap, p->port_id, addr);
+
+ ret = ntohl(c.u.info.lstatus_to_modtype);
+ p->mdio_addr = (ret & F_FW_PORT_CMD_MDIOCAP) ?
+ G_FW_PORT_CMD_MDIOADDR(ret) : -1;
+ p->port_type = G_FW_PORT_CMD_PTYPE(ret);
+ p->mod_type = G_FW_PORT_CMD_MODTYPE(ret);
+
+ init_link_config(&p->link_cfg, ntohs(c.u.info.pcap));
+
+ return 0;
+}
diff --git a/sys/dev/cxgbe/common/t4_hw.h b/sys/dev/cxgbe/common/t4_hw.h
new file mode 100644
index 0000000..fd48aab
--- /dev/null
+++ b/sys/dev/cxgbe/common/t4_hw.h
@@ -0,0 +1,185 @@
+/*-
+ * Copyright (c) 2011 Chelsio Communications, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ *
+ */
+
+#ifndef __T4_HW_H
+#define __T4_HW_H
+
+#include "osdep.h"
+
+enum {
+ NCHAN = 4, /* # of HW channels */
+ MAX_MTU = 9600, /* max MAC MTU, excluding header + FCS */
+ EEPROMSIZE = 17408, /* Serial EEPROM physical size */
+ EEPROMVSIZE = 32768, /* Serial EEPROM virtual address space size */
+ EEPROMPFSIZE = 1024, /* EEPROM writable area size for PFn, n>0 */
+ RSS_NENTRIES = 2048, /* # of entries in RSS mapping table */
+ TCB_SIZE = 128, /* TCB size */
+ NMTUS = 16, /* size of MTU table */
+ NCCTRL_WIN = 32, /* # of congestion control windows */
+ NTX_SCHED = 8, /* # of HW Tx scheduling queues */
+ PM_NSTATS = 5, /* # of PM stats */
+ MBOX_LEN = 64, /* mailbox size in bytes */
+ TRACE_LEN = 112, /* length of trace data and mask */
+ FILTER_OPT_LEN = 36, /* filter tuple width for optional components */
+ NWOL_PAT = 8, /* # of WoL patterns */
+ WOL_PAT_LEN = 128, /* length of WoL patterns */
+};
+
+enum {
+ CIM_NUM_IBQ = 6, /* # of CIM IBQs */
+ CIM_NUM_OBQ = 6, /* # of CIM OBQs */
+ CIMLA_SIZE = 2048, /* # of 32-bit words in CIM LA */
+ CIM_PIFLA_SIZE = 64, /* # of 192-bit words in CIM PIF LA */
+ CIM_MALA_SIZE = 64, /* # of 160-bit words in CIM MA LA */
+ CIM_IBQ_SIZE = 128, /* # of 128-bit words in a CIM IBQ */
+ TPLA_SIZE = 128, /* # of 64-bit words in TP LA */
+ ULPRX_LA_SIZE = 512, /* # of 256-bit words in ULP_RX LA */
+};
+
+enum {
+ SF_PAGE_SIZE = 256, /* serial flash page size */
+ SF_SEC_SIZE = 64 * 1024, /* serial flash sector size */
+};
+
+/* SGE context types */
+enum ctxt_type { CTXT_EGRESS, CTXT_INGRESS, CTXT_FLM, CTXT_CNM };
+
+enum { RSP_TYPE_FLBUF, RSP_TYPE_CPL, RSP_TYPE_INTR }; /* response entry types */
+
+enum { MBOX_OWNER_NONE, MBOX_OWNER_FW, MBOX_OWNER_DRV }; /* mailbox owners */
+
+enum {
+ SGE_MAX_WR_LEN = 512, /* max WR size in bytes */
+ SGE_CTXT_SIZE = 24, /* size of SGE context */
+ SGE_NTIMERS = 6, /* # of interrupt holdoff timer values */
+ SGE_NCOUNTERS = 4, /* # of interrupt packet counter values */
+};
+
+struct sge_qstat { /* data written to SGE queue status entries */
+ volatile __be32 qid;
+ volatile __be16 cidx;
+ volatile __be16 pidx;
+};
+
+#define S_QSTAT_PIDX 0
+#define M_QSTAT_PIDX 0xffff
+#define G_QSTAT_PIDX(x) (((x) >> S_QSTAT_PIDX) & M_QSTAT_PIDX)
+
+#define S_QSTAT_CIDX 16
+#define M_QSTAT_CIDX 0xffff
+#define G_QSTAT_CIDX(x) (((x) >> S_QSTAT_CIDX) & M_QSTAT_CIDX)
+
+/*
+ * Structure for last 128 bits of response descriptors
+ */
+struct rsp_ctrl {
+ __be32 hdrbuflen_pidx;
+ __be32 pldbuflen_qid;
+ union {
+ u8 type_gen;
+ __be64 last_flit;
+ } u;
+};
+
+#define S_RSPD_NEWBUF 31
+#define V_RSPD_NEWBUF(x) ((x) << S_RSPD_NEWBUF)
+#define F_RSPD_NEWBUF V_RSPD_NEWBUF(1U)
+
+#define S_RSPD_LEN 0
+#define M_RSPD_LEN 0x7fffffff
+#define V_RSPD_LEN(x) ((x) << S_RSPD_LEN)
+#define G_RSPD_LEN(x) (((x) >> S_RSPD_LEN) & M_RSPD_LEN)
+
+#define S_RSPD_QID S_RSPD_LEN
+#define M_RSPD_QID M_RSPD_LEN
+#define V_RSPD_QID(x) V_RSPD_LEN(x)
+#define G_RSPD_QID(x) G_RSPD_LEN(x)
+
+#define S_RSPD_GEN 7
+#define V_RSPD_GEN(x) ((x) << S_RSPD_GEN)
+#define F_RSPD_GEN V_RSPD_GEN(1U)
+
+#define S_RSPD_QOVFL 6
+#define V_RSPD_QOVFL(x) ((x) << S_RSPD_QOVFL)
+#define F_RSPD_QOVFL V_RSPD_QOVFL(1U)
+
+#define S_RSPD_TYPE 4
+#define M_RSPD_TYPE 0x3
+#define V_RSPD_TYPE(x) ((x) << S_RSPD_TYPE)
+#define G_RSPD_TYPE(x) (((x) >> S_RSPD_TYPE) & M_RSPD_TYPE)
+
+/* Rx queue interrupt deferral fields: counter enable and timer index */
+#define S_QINTR_CNT_EN 0
+#define V_QINTR_CNT_EN(x) ((x) << S_QINTR_CNT_EN)
+#define F_QINTR_CNT_EN V_QINTR_CNT_EN(1U)
+
+#define S_QINTR_TIMER_IDX 1
+#define M_QINTR_TIMER_IDX 0x7
+#define V_QINTR_TIMER_IDX(x) ((x) << S_QINTR_TIMER_IDX)
+#define G_QINTR_TIMER_IDX(x) (((x) >> S_QINTR_TIMER_IDX) & M_QINTR_TIMER_IDX)
+
+/* # of pages a pagepod can hold without needing another pagepod */
+#define PPOD_PAGES 4U
+
+struct pagepod {
+ __be64 vld_tid_pgsz_tag_color;
+ __be64 len_offset;
+ __be64 rsvd;
+ __be64 addr[PPOD_PAGES + 1];
+};
+
+#define S_PPOD_COLOR 0
+#define M_PPOD_COLOR 0x3F
+#define V_PPOD_COLOR(x) ((x) << S_PPOD_COLOR)
+
+#define S_PPOD_TAG 6
+#define M_PPOD_TAG 0xFFFFFF
+#define V_PPOD_TAG(x) ((x) << S_PPOD_TAG)
+
+#define S_PPOD_PGSZ 30
+#define M_PPOD_PGSZ 0x3
+#define V_PPOD_PGSZ(x) ((x) << S_PPOD_PGSZ)
+
+#define S_PPOD_TID 32
+#define M_PPOD_TID 0xFFFFFF
+#define V_PPOD_TID(x) ((__u64)(x) << S_PPOD_TID)
+
+#define S_PPOD_VALID 56
+#define V_PPOD_VALID(x) ((__u64)(x) << S_PPOD_VALID)
+#define F_PPOD_VALID V_PPOD_VALID(1ULL)
+
+#define S_PPOD_LEN 32
+#define M_PPOD_LEN 0xFFFFFFFF
+#define V_PPOD_LEN(x) ((__u64)(x) << S_PPOD_LEN)
+
+#define S_PPOD_OFST 0
+#define M_PPOD_OFST 0xFFFFFFFF
+#define V_PPOD_OFST(x) ((x) << S_PPOD_OFST)
+
+#endif /* __T4_HW_H */
diff --git a/sys/dev/cxgbe/common/t4_msg.h b/sys/dev/cxgbe/common/t4_msg.h
new file mode 100644
index 0000000..d3def60
--- /dev/null
+++ b/sys/dev/cxgbe/common/t4_msg.h
@@ -0,0 +1,2138 @@
+/*-
+ * Copyright (c) 2011 Chelsio Communications, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ *
+ */
+
+#ifndef T4_MSG_H
+#define T4_MSG_H
+
+enum {
+ CPL_PASS_OPEN_REQ = 0x1,
+ CPL_PASS_ACCEPT_RPL = 0x2,
+ CPL_ACT_OPEN_REQ = 0x3,
+ CPL_SET_TCB = 0x4,
+ CPL_SET_TCB_FIELD = 0x5,
+ CPL_GET_TCB = 0x6,
+ CPL_PCMD = 0x7,
+ CPL_CLOSE_CON_REQ = 0x8,
+ CPL_CLOSE_LISTSRV_REQ = 0x9,
+ CPL_ABORT_REQ = 0xA,
+ CPL_ABORT_RPL = 0xB,
+ CPL_TX_DATA = 0xC,
+ CPL_RX_DATA_ACK = 0xD,
+ CPL_TX_PKT = 0xE,
+ CPL_RTE_DELETE_REQ = 0xF,
+ CPL_RTE_WRITE_REQ = 0x10,
+ CPL_RTE_READ_REQ = 0x11,
+ CPL_L2T_WRITE_REQ = 0x12,
+ CPL_L2T_READ_REQ = 0x13,
+ CPL_SMT_WRITE_REQ = 0x14,
+ CPL_SMT_READ_REQ = 0x15,
+ CPL_BARRIER = 0x18,
+ CPL_TID_RELEASE = 0x1A,
+ CPL_RX_MPS_PKT = 0x1B,
+
+ CPL_CLOSE_LISTSRV_RPL = 0x20,
+ CPL_ERROR = 0x21,
+ CPL_GET_TCB_RPL = 0x22,
+ CPL_L2T_WRITE_RPL = 0x23,
+ CPL_PASS_OPEN_RPL = 0x24,
+ CPL_ACT_OPEN_RPL = 0x25,
+ CPL_PEER_CLOSE = 0x26,
+ CPL_RTE_DELETE_RPL = 0x27,
+ CPL_RTE_WRITE_RPL = 0x28,
+ CPL_RX_URG_PKT = 0x29,
+ CPL_ABORT_REQ_RSS = 0x2B,
+ CPL_RX_URG_NOTIFY = 0x2C,
+ CPL_ABORT_RPL_RSS = 0x2D,
+ CPL_SMT_WRITE_RPL = 0x2E,
+ CPL_TX_DATA_ACK = 0x2F,
+
+ CPL_RX_PHYS_ADDR = 0x30,
+ CPL_PCMD_READ_RPL = 0x31,
+ CPL_CLOSE_CON_RPL = 0x32,
+ CPL_ISCSI_HDR = 0x33,
+ CPL_L2T_READ_RPL = 0x34,
+ CPL_RDMA_CQE = 0x35,
+ CPL_RDMA_CQE_READ_RSP = 0x36,
+ CPL_RDMA_CQE_ERR = 0x37,
+ CPL_RTE_READ_RPL = 0x38,
+ CPL_RX_DATA = 0x39,
+ CPL_SET_TCB_RPL = 0x3A,
+ CPL_RX_PKT = 0x3B,
+ CPL_PCMD_RPL = 0x3C,
+ CPL_HIT_NOTIFY = 0x3D,
+ CPL_PKT_NOTIFY = 0x3E,
+ CPL_RX_DDP_COMPLETE = 0x3F,
+
+ CPL_ACT_ESTABLISH = 0x40,
+ CPL_PASS_ESTABLISH = 0x41,
+ CPL_RX_DATA_DDP = 0x42,
+ CPL_SMT_READ_RPL = 0x43,
+ CPL_PASS_ACCEPT_REQ = 0x44,
+ CPL_RX2TX_PKT = 0x45,
+ CPL_RX_FCOE_DDP = 0x46,
+ CPL_FCOE_HDR = 0x47,
+
+ CPL_RDMA_READ_REQ = 0x60,
+
+ CPL_SET_LE_REQ = 0x80,
+ CPL_PASS_OPEN_REQ6 = 0x81,
+ CPL_ACT_OPEN_REQ6 = 0x83,
+
+ CPL_TX_DMA_ACK = 0xA0,
+ CPL_RDMA_TERMINATE = 0xA2,
+ CPL_RDMA_WRITE = 0xA4,
+ CPL_SGE_EGR_UPDATE = 0xA5,
+ CPL_SET_LE_RPL = 0xA6,
+ CPL_FW2_MSG = 0xA7,
+ CPL_FW2_PLD = 0xA8,
+
+ CPL_TRACE_PKT = 0xB0,
+ CPL_RX2TX_DATA = 0xB1,
+
+ CPL_FW4_MSG = 0xC0,
+ CPL_FW4_PLD = 0xC1,
+ CPL_FW4_ACK = 0xC3,
+
+ CPL_FW6_MSG = 0xE0,
+ CPL_FW6_PLD = 0xE1,
+ CPL_TX_PKT_LSO = 0xED,
+ CPL_TX_PKT_XT = 0xEE,
+
+ NUM_CPL_CMDS /* must be last and previous entries must be sorted */
+};
+
+enum CPL_error {
+ CPL_ERR_NONE = 0,
+ CPL_ERR_TCAM_PARITY = 1,
+ CPL_ERR_TCAM_FULL = 3,
+ CPL_ERR_BAD_LENGTH = 15,
+ CPL_ERR_BAD_ROUTE = 18,
+ CPL_ERR_CONN_RESET = 20,
+ CPL_ERR_CONN_EXIST_SYNRECV = 21,
+ CPL_ERR_CONN_EXIST = 22,
+ CPL_ERR_ARP_MISS = 23,
+ CPL_ERR_BAD_SYN = 24,
+ CPL_ERR_CONN_TIMEDOUT = 30,
+ CPL_ERR_XMIT_TIMEDOUT = 31,
+ CPL_ERR_PERSIST_TIMEDOUT = 32,
+ CPL_ERR_FINWAIT2_TIMEDOUT = 33,
+ CPL_ERR_KEEPALIVE_TIMEDOUT = 34,
+ CPL_ERR_RTX_NEG_ADVICE = 35,
+ CPL_ERR_PERSIST_NEG_ADVICE = 36,
+ CPL_ERR_ABORT_FAILED = 42,
+ CPL_ERR_IWARP_FLM = 50,
+};
+
+enum {
+ CPL_CONN_POLICY_AUTO = 0,
+ CPL_CONN_POLICY_ASK = 1,
+ CPL_CONN_POLICY_FILTER = 2,
+ CPL_CONN_POLICY_DENY = 3
+};
+
+enum {
+ ULP_MODE_NONE = 0,
+ ULP_MODE_ISCSI = 2,
+ ULP_MODE_RDMA = 4,
+ ULP_MODE_TCPDDP = 5,
+ ULP_MODE_FCOE = 6,
+};
+
+enum {
+ ULP_CRC_HEADER = 1 << 0,
+ ULP_CRC_DATA = 1 << 1
+};
+
+enum {
+ CPL_PASS_OPEN_ACCEPT,
+ CPL_PASS_OPEN_REJECT,
+ CPL_PASS_OPEN_ACCEPT_TNL
+};
+
+enum {
+ CPL_ABORT_SEND_RST = 0,
+ CPL_ABORT_NO_RST,
+};
+
+enum { /* TX_PKT_XT checksum types */
+ TX_CSUM_TCP = 0,
+ TX_CSUM_UDP = 1,
+ TX_CSUM_CRC16 = 4,
+ TX_CSUM_CRC32 = 5,
+ TX_CSUM_CRC32C = 6,
+ TX_CSUM_FCOE = 7,
+ TX_CSUM_TCPIP = 8,
+ TX_CSUM_UDPIP = 9,
+ TX_CSUM_TCPIP6 = 10,
+ TX_CSUM_UDPIP6 = 11,
+ TX_CSUM_IP = 12,
+};
+
+enum { /* packet type in CPL_RX_PKT */
+ PKTYPE_XACT_UCAST = 0,
+ PKTYPE_HASH_UCAST = 1,
+ PKTYPE_XACT_MCAST = 2,
+ PKTYPE_HASH_MCAST = 3,
+ PKTYPE_PROMISC = 4,
+ PKTYPE_HPROMISC = 5,
+ PKTYPE_BCAST = 6
+};
+
+enum { /* DMAC type in CPL_RX_PKT */
+ DATYPE_UCAST,
+ DATYPE_MCAST,
+ DATYPE_BCAST
+};
+
+enum { /* TCP congestion control algorithms */
+ CONG_ALG_RENO,
+ CONG_ALG_TAHOE,
+ CONG_ALG_NEWRENO,
+ CONG_ALG_HIGHSPEED
+};
+
+enum { /* RSS hash type */
+ RSS_HASH_NONE = 0, /* no hash computed */
+ RSS_HASH_IP = 1, /* IP or IPv6 2-tuple hash */
+ RSS_HASH_TCP = 2, /* TCP 4-tuple hash */
+ RSS_HASH_UDP = 3 /* UDP 4-tuple hash */
+};
+
+enum { /* LE commands */
+ LE_CMD_READ = 0x4,
+ LE_CMD_WRITE = 0xb
+};
+
+enum { /* LE request size */
+ LE_SZ_NONE = 0,
+ LE_SZ_33 = 1,
+ LE_SZ_66 = 2,
+ LE_SZ_132 = 3,
+ LE_SZ_264 = 4,
+ LE_SZ_528 = 5
+};
+
+union opcode_tid {
+ __be32 opcode_tid;
+ __u8 opcode;
+};
+
+#define S_CPL_OPCODE 24
+#define V_CPL_OPCODE(x) ((x) << S_CPL_OPCODE)
+#define G_CPL_OPCODE(x) (((x) >> S_CPL_OPCODE) & 0xFF)
+#define G_TID(x) ((x) & 0xFFFFFF)
+
+/* tid is assumed to be 24-bits */
+#define MK_OPCODE_TID(opcode, tid) (V_CPL_OPCODE(opcode) | (tid))
+
+#define OPCODE_TID(cmd) ((cmd)->ot.opcode_tid)
+
+/* extract the TID from a CPL command */
+#define GET_TID(cmd) (G_TID(ntohl(OPCODE_TID(cmd))))
+
+/* partitioning of TID fields that also carry a queue id */
+#define S_TID_TID 0
+#define M_TID_TID 0x3fff
+#define V_TID_TID(x) ((x) << S_TID_TID)
+#define G_TID_TID(x) (((x) >> S_TID_TID) & M_TID_TID)
+
+#define S_TID_QID 14
+#define M_TID_QID 0x3ff
+#define V_TID_QID(x) ((x) << S_TID_QID)
+#define G_TID_QID(x) (((x) >> S_TID_QID) & M_TID_QID)
+
+union opcode_info {
+ __be64 opcode_info;
+ __u8 opcode;
+};
+
+struct tcp_options {
+ __be16 mss;
+ __u8 wsf;
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ __u8 :4;
+ __u8 unknown:1;
+ __u8 :1;
+ __u8 sack:1;
+ __u8 tstamp:1;
+#else
+ __u8 tstamp:1;
+ __u8 sack:1;
+ __u8 :1;
+ __u8 unknown:1;
+ __u8 :4;
+#endif
+};
+
+struct rss_header {
+ __u8 opcode;
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ __u8 channel:2;
+ __u8 filter_hit:1;
+ __u8 filter_tid:1;
+ __u8 hash_type:2;
+ __u8 ipv6:1;
+ __u8 send2fw:1;
+#else
+ __u8 send2fw:1;
+ __u8 ipv6:1;
+ __u8 hash_type:2;
+ __u8 filter_tid:1;
+ __u8 filter_hit:1;
+ __u8 channel:2;
+#endif
+ __be16 qid;
+ __be32 hash_val;
+};
+
+#define S_HASHTYPE 20
+#define M_HASHTYPE 0x3
+#define G_HASHTYPE(x) (((x) >> S_HASHTYPE) & M_HASHTYPE)
+
+#define S_QNUM 0
+#define M_QNUM 0xFFFF
+#define G_QNUM(x) (((x) >> S_QNUM) & M_QNUM)
+
+#ifndef CHELSIO_FW
+struct work_request_hdr {
+ __be32 wr_hi;
+ __be32 wr_mid;
+ __be64 wr_lo;
+};
+
+/* wr_mid fields */
+#define S_WR_LEN16 0
+#define M_WR_LEN16 0xFF
+#define V_WR_LEN16(x) ((x) << S_WR_LEN16)
+#define G_WR_LEN16(x) (((x) >> S_WR_LEN16) & M_WR_LEN16)
+
+/* wr_hi fields */
+#define S_WR_OP 24
+#define M_WR_OP 0xFF
+#define V_WR_OP(x) ((__u64)(x) << S_WR_OP)
+#define G_WR_OP(x) (((x) >> S_WR_OP) & M_WR_OP)
+
+# define WR_HDR struct work_request_hdr wr
+# define WR_HDR_SIZE sizeof(struct work_request_hdr)
+# define RSS_HDR
+#else
+# define WR_HDR
+# define WR_HDR_SIZE 0
+# define RSS_HDR struct rss_header rss_hdr;
+#endif
+
+/* option 0 fields */
+#define S_ACCEPT_MODE 0
+#define M_ACCEPT_MODE 0x3
+#define V_ACCEPT_MODE(x) ((x) << S_ACCEPT_MODE)
+#define G_ACCEPT_MODE(x) (((x) >> S_ACCEPT_MODE) & M_ACCEPT_MODE)
+
+#define S_TX_CHAN 2
+#define M_TX_CHAN 0x3
+#define V_TX_CHAN(x) ((x) << S_TX_CHAN)
+#define G_TX_CHAN(x) (((x) >> S_TX_CHAN) & M_TX_CHAN)
+
+#define S_NO_CONG 4
+#define V_NO_CONG(x) ((x) << S_NO_CONG)
+#define F_NO_CONG V_NO_CONG(1U)
+
+#define S_DELACK 5
+#define V_DELACK(x) ((x) << S_DELACK)
+#define F_DELACK V_DELACK(1U)
+
+#define S_INJECT_TIMER 6
+#define V_INJECT_TIMER(x) ((x) << S_INJECT_TIMER)
+#define F_INJECT_TIMER V_INJECT_TIMER(1U)
+
+#define S_NON_OFFLOAD 7
+#define V_NON_OFFLOAD(x) ((x) << S_NON_OFFLOAD)
+#define F_NON_OFFLOAD V_NON_OFFLOAD(1U)
+
+#define S_ULP_MODE 8
+#define M_ULP_MODE 0xF
+#define V_ULP_MODE(x) ((x) << S_ULP_MODE)
+#define G_ULP_MODE(x) (((x) >> S_ULP_MODE) & M_ULP_MODE)
+
+#define S_RCV_BUFSIZ 12
+#define M_RCV_BUFSIZ 0x3FFU
+#define V_RCV_BUFSIZ(x) ((x) << S_RCV_BUFSIZ)
+#define G_RCV_BUFSIZ(x) (((x) >> S_RCV_BUFSIZ) & M_RCV_BUFSIZ)
+
+#define S_DSCP 22
+#define M_DSCP 0x3F
+#define V_DSCP(x) ((x) << S_DSCP)
+#define G_DSCP(x) (((x) >> S_DSCP) & M_DSCP)
+
+#define S_SMAC_SEL 28
+#define M_SMAC_SEL 0xFF
+#define V_SMAC_SEL(x) ((__u64)(x) << S_SMAC_SEL)
+#define G_SMAC_SEL(x) (((x) >> S_SMAC_SEL) & M_SMAC_SEL)
+
+#define S_L2T_IDX 36
+#define M_L2T_IDX 0xFFF
+#define V_L2T_IDX(x) ((__u64)(x) << S_L2T_IDX)
+#define G_L2T_IDX(x) (((x) >> S_L2T_IDX) & M_L2T_IDX)
+
+#define S_TCAM_BYPASS 48
+#define V_TCAM_BYPASS(x) ((__u64)(x) << S_TCAM_BYPASS)
+#define F_TCAM_BYPASS V_TCAM_BYPASS(1ULL)
+
+#define S_NAGLE 49
+#define V_NAGLE(x) ((__u64)(x) << S_NAGLE)
+#define F_NAGLE V_NAGLE(1ULL)
+
+#define S_WND_SCALE 50
+#define M_WND_SCALE 0xF
+#define V_WND_SCALE(x) ((__u64)(x) << S_WND_SCALE)
+#define G_WND_SCALE(x) (((x) >> S_WND_SCALE) & M_WND_SCALE)
+
+#define S_KEEP_ALIVE 54
+#define V_KEEP_ALIVE(x) ((__u64)(x) << S_KEEP_ALIVE)
+#define F_KEEP_ALIVE V_KEEP_ALIVE(1ULL)
+
+#define S_MAX_RT 55
+#define M_MAX_RT 0xF
+#define V_MAX_RT(x) ((__u64)(x) << S_MAX_RT)
+#define G_MAX_RT(x) (((x) >> S_MAX_RT) & M_MAX_RT)
+
+#define S_MAX_RT_OVERRIDE 59
+#define V_MAX_RT_OVERRIDE(x) ((__u64)(x) << S_MAX_RT_OVERRIDE)
+#define F_MAX_RT_OVERRIDE V_MAX_RT_OVERRIDE(1ULL)
+
+#define S_MSS_IDX 60
+#define M_MSS_IDX 0xF
+#define V_MSS_IDX(x) ((__u64)(x) << S_MSS_IDX)
+#define G_MSS_IDX(x) (((x) >> S_MSS_IDX) & M_MSS_IDX)
+
+/* option 1 fields */
+#define S_SYN_RSS_ENABLE 0
+#define V_SYN_RSS_ENABLE(x) ((x) << S_SYN_RSS_ENABLE)
+#define F_SYN_RSS_ENABLE V_SYN_RSS_ENABLE(1U)
+
+#define S_SYN_RSS_USE_HASH 1
+#define V_SYN_RSS_USE_HASH(x) ((x) << S_SYN_RSS_USE_HASH)
+#define F_SYN_RSS_USE_HASH V_SYN_RSS_USE_HASH(1U)
+
+#define S_SYN_RSS_QUEUE 2
+#define M_SYN_RSS_QUEUE 0x3FF
+#define V_SYN_RSS_QUEUE(x) ((x) << S_SYN_RSS_QUEUE)
+#define G_SYN_RSS_QUEUE(x) (((x) >> S_SYN_RSS_QUEUE) & M_SYN_RSS_QUEUE)
+
+#define S_LISTEN_INTF 12
+#define M_LISTEN_INTF 0xFF
+#define V_LISTEN_INTF(x) ((x) << S_LISTEN_INTF)
+#define G_LISTEN_INTF(x) (((x) >> S_LISTEN_INTF) & M_LISTEN_INTF)
+
+#define S_LISTEN_FILTER 20
+#define V_LISTEN_FILTER(x) ((x) << S_LISTEN_FILTER)
+#define F_LISTEN_FILTER V_LISTEN_FILTER(1U)
+
+#define S_SYN_DEFENSE 21
+#define V_SYN_DEFENSE(x) ((x) << S_SYN_DEFENSE)
+#define F_SYN_DEFENSE V_SYN_DEFENSE(1U)
+
+#define S_CONN_POLICY 22
+#define M_CONN_POLICY 0x3
+#define V_CONN_POLICY(x) ((x) << S_CONN_POLICY)
+#define G_CONN_POLICY(x) (((x) >> S_CONN_POLICY) & M_CONN_POLICY)
+
+/* option 2 fields */
+#define S_RSS_QUEUE 0
+#define M_RSS_QUEUE 0x3FF
+#define V_RSS_QUEUE(x) ((x) << S_RSS_QUEUE)
+#define G_RSS_QUEUE(x) (((x) >> S_RSS_QUEUE) & M_RSS_QUEUE)
+
+#define S_RSS_QUEUE_VALID 10
+#define V_RSS_QUEUE_VALID(x) ((x) << S_RSS_QUEUE_VALID)
+#define F_RSS_QUEUE_VALID V_RSS_QUEUE_VALID(1U)
+
+#define S_RX_COALESCE_VALID 11
+#define V_RX_COALESCE_VALID(x) ((x) << S_RX_COALESCE_VALID)
+#define F_RX_COALESCE_VALID V_RX_COALESCE_VALID(1U)
+
+#define S_RX_COALESCE 12
+#define M_RX_COALESCE 0x3
+#define V_RX_COALESCE(x) ((x) << S_RX_COALESCE)
+#define G_RX_COALESCE(x) (((x) >> S_RX_COALESCE) & M_RX_COALESCE)
+
+#define S_CONG_CNTRL 14
+#define M_CONG_CNTRL 0x3
+#define V_CONG_CNTRL(x) ((x) << S_CONG_CNTRL)
+#define G_CONG_CNTRL(x) (((x) >> S_CONG_CNTRL) & M_CONG_CNTRL)
+
+#define S_PACE 16
+#define M_PACE 0x3
+#define V_PACE(x) ((x) << S_PACE)
+#define G_PACE(x) (((x) >> S_PACE) & M_PACE)
+
+#define S_CONG_CNTRL_VALID 18
+#define V_CONG_CNTRL_VALID(x) ((x) << S_CONG_CNTRL_VALID)
+#define F_CONG_CNTRL_VALID V_CONG_CNTRL_VALID(1U)
+
+#define S_PACE_VALID 19
+#define V_PACE_VALID(x) ((x) << S_PACE_VALID)
+#define F_PACE_VALID V_PACE_VALID(1U)
+
+#define S_RX_FC_DISABLE 20
+#define V_RX_FC_DISABLE(x) ((x) << S_RX_FC_DISABLE)
+#define F_RX_FC_DISABLE V_RX_FC_DISABLE(1U)
+
+#define S_RX_FC_DDP 21
+#define V_RX_FC_DDP(x) ((x) << S_RX_FC_DDP)
+#define F_RX_FC_DDP V_RX_FC_DDP(1U)
+
+#define S_RX_FC_VALID 22
+#define V_RX_FC_VALID(x) ((x) << S_RX_FC_VALID)
+#define F_RX_FC_VALID V_RX_FC_VALID(1U)
+
+#define S_TX_QUEUE 23
+#define M_TX_QUEUE 0x7
+#define V_TX_QUEUE(x) ((x) << S_TX_QUEUE)
+#define G_TX_QUEUE(x) (((x) >> S_TX_QUEUE) & M_TX_QUEUE)
+
+#define S_RX_CHANNEL 26
+#define V_RX_CHANNEL(x) ((x) << S_RX_CHANNEL)
+#define F_RX_CHANNEL V_RX_CHANNEL(1U)
+
+#define S_CCTRL_ECN 27
+#define V_CCTRL_ECN(x) ((x) << S_CCTRL_ECN)
+#define F_CCTRL_ECN V_CCTRL_ECN(1U)
+
+#define S_WND_SCALE_EN 28
+#define V_WND_SCALE_EN(x) ((x) << S_WND_SCALE_EN)
+#define F_WND_SCALE_EN V_WND_SCALE_EN(1U)
+
+#define S_TSTAMPS_EN 29
+#define V_TSTAMPS_EN(x) ((x) << S_TSTAMPS_EN)
+#define F_TSTAMPS_EN V_TSTAMPS_EN(1U)
+
+#define S_SACK_EN 30
+#define V_SACK_EN(x) ((x) << S_SACK_EN)
+#define F_SACK_EN V_SACK_EN(1U)
+
+struct cpl_pass_open_req {
+ WR_HDR;
+ union opcode_tid ot;
+ __be16 local_port;
+ __be16 peer_port;
+ __be32 local_ip;
+ __be32 peer_ip;
+ __be64 opt0;
+ __be64 opt1;
+};
+
+struct cpl_pass_open_req6 {
+ WR_HDR;
+ union opcode_tid ot;
+ __be16 local_port;
+ __be16 peer_port;
+ __be64 local_ip_hi;
+ __be64 local_ip_lo;
+ __be64 peer_ip_hi;
+ __be64 peer_ip_lo;
+ __be64 opt0;
+ __be64 opt1;
+};
+
+struct cpl_pass_open_rpl {
+ RSS_HDR
+ union opcode_tid ot;
+ __u8 rsvd[3];
+ __u8 status;
+};
+
+struct cpl_pass_establish {
+ RSS_HDR
+ union opcode_tid ot;
+ __be32 rsvd;
+ __be32 tos_stid;
+ __be16 mac_idx;
+ __be16 tcp_opt;
+ __be32 snd_isn;
+ __be32 rcv_isn;
+};
+
+/* cpl_pass_establish.tos_stid fields */
+#define S_PASS_OPEN_TID 0
+#define M_PASS_OPEN_TID 0xFFFFFF
+#define V_PASS_OPEN_TID(x) ((x) << S_PASS_OPEN_TID)
+#define G_PASS_OPEN_TID(x) (((x) >> S_PASS_OPEN_TID) & M_PASS_OPEN_TID)
+
+#define S_PASS_OPEN_TOS 24
+#define M_PASS_OPEN_TOS 0xFF
+#define V_PASS_OPEN_TOS(x) ((x) << S_PASS_OPEN_TOS)
+#define G_PASS_OPEN_TOS(x) (((x) >> S_PASS_OPEN_TOS) & M_PASS_OPEN_TOS)
+
+/* cpl_pass_establish.tcp_opt fields (also applies to act_open_establish) */
+#define G_TCPOPT_WSCALE_OK(x) (((x) >> 5) & 1)
+#define G_TCPOPT_SACK(x) (((x) >> 6) & 1)
+#define G_TCPOPT_TSTAMP(x) (((x) >> 7) & 1)
+#define G_TCPOPT_SND_WSCALE(x) (((x) >> 8) & 0xf)
+#define G_TCPOPT_MSS(x) (((x) >> 12) & 0xf)
+
+struct cpl_pass_accept_req {
+ RSS_HDR
+ union opcode_tid ot;
+ __be16 rsvd;
+ __be16 len;
+ __be32 hdr_len;
+ __be16 vlan;
+ __be16 l2info;
+ __be32 tos_stid;
+ struct tcp_options tcpopt;
+};
+
+/* cpl_pass_accept_req.hdr_len fields */
+#define S_SYN_RX_CHAN 0
+#define M_SYN_RX_CHAN 0xF
+#define V_SYN_RX_CHAN(x) ((x) << S_SYN_RX_CHAN)
+#define G_SYN_RX_CHAN(x) (((x) >> S_SYN_RX_CHAN) & M_SYN_RX_CHAN)
+
+#define S_TCP_HDR_LEN 10
+#define M_TCP_HDR_LEN 0x3F
+#define V_TCP_HDR_LEN(x) ((x) << S_TCP_HDR_LEN)
+#define G_TCP_HDR_LEN(x) (((x) >> S_TCP_HDR_LEN) & M_TCP_HDR_LEN)
+
+#define S_IP_HDR_LEN 16
+#define M_IP_HDR_LEN 0x3FF
+#define V_IP_HDR_LEN(x) ((x) << S_IP_HDR_LEN)
+#define G_IP_HDR_LEN(x) (((x) >> S_IP_HDR_LEN) & M_IP_HDR_LEN)
+
+#define S_ETH_HDR_LEN 26
+#define M_ETH_HDR_LEN 0x1F
+#define V_ETH_HDR_LEN(x) ((x) << S_ETH_HDR_LEN)
+#define G_ETH_HDR_LEN(x) (((x) >> S_ETH_HDR_LEN) & M_ETH_HDR_LEN)
+
+/* cpl_pass_accept_req.l2info fields */
+#define S_SYN_MAC_IDX 0
+#define M_SYN_MAC_IDX 0x1FF
+#define V_SYN_MAC_IDX(x) ((x) << S_SYN_MAC_IDX)
+#define G_SYN_MAC_IDX(x) (((x) >> S_SYN_MAC_IDX) & M_SYN_MAC_IDX)
+
+#define S_SYN_XACT_MATCH 9
+#define V_SYN_XACT_MATCH(x) ((x) << S_SYN_XACT_MATCH)
+#define F_SYN_XACT_MATCH V_SYN_XACT_MATCH(1U)
+
+#define S_SYN_INTF 12
+#define M_SYN_INTF 0xF
+#define V_SYN_INTF(x) ((x) << S_SYN_INTF)
+#define G_SYN_INTF(x) (((x) >> S_SYN_INTF) & M_SYN_INTF)
+
+struct cpl_pass_accept_rpl {
+ WR_HDR;
+ union opcode_tid ot;
+ __be32 opt2;
+ __be64 opt0;
+};
+
+struct cpl_act_open_req {
+ WR_HDR;
+ union opcode_tid ot;
+ __be16 local_port;
+ __be16 peer_port;
+ __be32 local_ip;
+ __be32 peer_ip;
+ __be64 opt0;
+ __be32 params;
+ __be32 opt2;
+};
+
+/* cpl_act_open_req.params fields XXX */
+#define S_AOPEN_VLAN_PRI 9
+#define M_AOPEN_VLAN_PRI 0x3
+#define V_AOPEN_VLAN_PRI(x) ((x) << S_AOPEN_VLAN_PRI)
+#define G_AOPEN_VLAN_PRI(x) (((x) >> S_AOPEN_VLAN_PRI) & M_AOPEN_VLAN_PRI)
+
+#define S_AOPEN_VLAN_PRI_VALID 11
+#define V_AOPEN_VLAN_PRI_VALID(x) ((x) << S_AOPEN_VLAN_PRI_VALID)
+#define F_AOPEN_VLAN_PRI_VALID V_AOPEN_VLAN_PRI_VALID(1U)
+
+#define S_AOPEN_PKT_TYPE 12
+#define M_AOPEN_PKT_TYPE 0x3
+#define V_AOPEN_PKT_TYPE(x) ((x) << S_AOPEN_PKT_TYPE)
+#define G_AOPEN_PKT_TYPE(x) (((x) >> S_AOPEN_PKT_TYPE) & M_AOPEN_PKT_TYPE)
+
+#define S_AOPEN_MAC_MATCH 14
+#define M_AOPEN_MAC_MATCH 0x1F
+#define V_AOPEN_MAC_MATCH(x) ((x) << S_AOPEN_MAC_MATCH)
+#define G_AOPEN_MAC_MATCH(x) (((x) >> S_AOPEN_MAC_MATCH) & M_AOPEN_MAC_MATCH)
+
+#define S_AOPEN_MAC_MATCH_VALID 19
+#define V_AOPEN_MAC_MATCH_VALID(x) ((x) << S_AOPEN_MAC_MATCH_VALID)
+#define F_AOPEN_MAC_MATCH_VALID V_AOPEN_MAC_MATCH_VALID(1U)
+
+#define S_AOPEN_IFF_VLAN 20
+#define M_AOPEN_IFF_VLAN 0xFFF
+#define V_AOPEN_IFF_VLAN(x) ((x) << S_AOPEN_IFF_VLAN)
+#define G_AOPEN_IFF_VLAN(x) (((x) >> S_AOPEN_IFF_VLAN) & M_AOPEN_IFF_VLAN)
+
+struct cpl_act_open_req6 {
+ WR_HDR;
+ union opcode_tid ot;
+ __be16 local_port;
+ __be16 peer_port;
+ __be64 local_ip_hi;
+ __be64 local_ip_lo;
+ __be64 peer_ip_hi;
+ __be64 peer_ip_lo;
+ __be64 opt0;
+ __be32 params;
+ __be32 opt2;
+};
+
+struct cpl_act_open_rpl {
+ RSS_HDR
+ union opcode_tid ot;
+ __be32 atid_status;
+};
+
+/* cpl_act_open_rpl.atid_status fields */
+#define S_AOPEN_STATUS 0
+#define M_AOPEN_STATUS 0xFF
+#define V_AOPEN_STATUS(x) ((x) << S_AOPEN_STATUS)
+#define G_AOPEN_STATUS(x) (((x) >> S_AOPEN_STATUS) & M_AOPEN_STATUS)
+
+#define S_AOPEN_ATID 8
+#define M_AOPEN_ATID 0xFFFFFF
+#define V_AOPEN_ATID(x) ((x) << S_AOPEN_ATID)
+#define G_AOPEN_ATID(x) (((x) >> S_AOPEN_ATID) & M_AOPEN_ATID)
+
+struct cpl_act_establish {
+ RSS_HDR
+ union opcode_tid ot;
+ __be32 rsvd;
+ __be32 tos_atid;
+ __be16 mac_idx;
+ __be16 tcp_opt;
+ __be32 snd_isn;
+ __be32 rcv_isn;
+};
+
+struct cpl_get_tcb {
+ WR_HDR;
+ union opcode_tid ot;
+ __be16 reply_ctrl;
+ __be16 cookie;
+};
+
+/* cpl_get_tcb.reply_ctrl fields */
+#define S_QUEUENO 0
+#define M_QUEUENO 0x3FF
+#define V_QUEUENO(x) ((x) << S_QUEUENO)
+#define G_QUEUENO(x) (((x) >> S_QUEUENO) & M_QUEUENO)
+
+#define S_REPLY_CHAN 14
+#define V_REPLY_CHAN(x) ((x) << S_REPLY_CHAN)
+#define F_REPLY_CHAN V_REPLY_CHAN(1U)
+
+#define S_NO_REPLY 15
+#define V_NO_REPLY(x) ((x) << S_NO_REPLY)
+#define F_NO_REPLY V_NO_REPLY(1U)
+
+struct cpl_get_tcb_rpl {
+ RSS_HDR
+ union opcode_tid ot;
+ __u8 cookie;
+ __u8 status;
+ __be16 len;
+};
+
+struct cpl_set_tcb {
+ WR_HDR;
+ union opcode_tid ot;
+ __be16 reply_ctrl;
+ __be16 cookie;
+};
+
+struct cpl_set_tcb_field {
+ WR_HDR;
+ union opcode_tid ot;
+ __be16 reply_ctrl;
+ __be16 word_cookie;
+ __be64 mask;
+ __be64 val;
+};
+
+/* cpl_set_tcb_field.word_cookie fields */
+#define S_WORD 0
+#define M_WORD 0x1F
+#define V_WORD(x) ((x) << S_WORD)
+#define G_WORD(x) (((x) >> S_WORD) & M_WORD)
+
+#define S_COOKIE 5
+#define M_COOKIE 0x7
+#define V_COOKIE(x) ((x) << S_COOKIE)
+#define G_COOKIE(x) (((x) >> S_COOKIE) & M_COOKIE)
+
+struct cpl_set_tcb_rpl {
+ RSS_HDR
+ union opcode_tid ot;
+ __be16 rsvd;
+ __u8 cookie;
+ __u8 status;
+ __be64 oldval;
+};
+
+struct cpl_close_con_req {
+ WR_HDR;
+ union opcode_tid ot;
+ __be32 rsvd;
+};
+
+struct cpl_close_con_rpl {
+ RSS_HDR
+ union opcode_tid ot;
+ __u8 rsvd[3];
+ __u8 status;
+ __be32 snd_nxt;
+ __be32 rcv_nxt;
+};
+
+struct cpl_close_listsvr_req {
+ WR_HDR;
+ union opcode_tid ot;
+ __be16 reply_ctrl;
+ __be16 rsvd;
+};
+
+/* additional cpl_close_listsvr_req.reply_ctrl field */
+#define S_LISTSVR_IPV6 14
+#define V_LISTSVR_IPV6(x) ((x) << S_LISTSVR_IPV6)
+#define F_LISTSVR_IPV6 V_LISTSVR_IPV6(1U)
+
+struct cpl_close_listsvr_rpl {
+ RSS_HDR
+ union opcode_tid ot;
+ __u8 rsvd[3];
+ __u8 status;
+};
+
+struct cpl_abort_req_rss {
+ RSS_HDR
+ union opcode_tid ot;
+ __u8 rsvd[3];
+ __u8 status;
+};
+
+struct cpl_abort_req {
+ WR_HDR;
+ union opcode_tid ot;
+ __be32 rsvd0;
+ __u8 rsvd1;
+ __u8 cmd;
+ __u8 rsvd2[6];
+};
+
+struct cpl_abort_rpl_rss {
+ RSS_HDR
+ union opcode_tid ot;
+ __u8 rsvd[3];
+ __u8 status;
+};
+
+struct cpl_abort_rpl {
+ WR_HDR;
+ union opcode_tid ot;
+ __be32 rsvd0;
+ __u8 rsvd1;
+ __u8 cmd;
+ __u8 rsvd2[6];
+};
+
+struct cpl_peer_close {
+ RSS_HDR
+ union opcode_tid ot;
+ __be32 rcv_nxt;
+};
+
+struct cpl_tid_release {
+ WR_HDR;
+ union opcode_tid ot;
+ __be32 rsvd;
+};
+
+struct tx_data_wr {
+ __be32 wr_hi;
+ __be32 wr_lo;
+ __be32 len;
+ __be32 flags;
+ __be32 sndseq;
+ __be32 param;
+};
+
+/* tx_data_wr.flags fields */
+#define S_TX_ACK_PAGES 21
+#define M_TX_ACK_PAGES 0x7
+#define V_TX_ACK_PAGES(x) ((x) << S_TX_ACK_PAGES)
+#define G_TX_ACK_PAGES(x) (((x) >> S_TX_ACK_PAGES) & M_TX_ACK_PAGES)
+
+/* tx_data_wr.param fields */
+#define S_TX_PORT 0
+#define M_TX_PORT 0x7
+#define V_TX_PORT(x) ((x) << S_TX_PORT)
+#define G_TX_PORT(x) (((x) >> S_TX_PORT) & M_TX_PORT)
+
+#define S_TX_MSS 4
+#define M_TX_MSS 0xF
+#define V_TX_MSS(x) ((x) << S_TX_MSS)
+#define G_TX_MSS(x) (((x) >> S_TX_MSS) & M_TX_MSS)
+
+#define S_TX_QOS 8
+#define M_TX_QOS 0xFF
+#define V_TX_QOS(x) ((x) << S_TX_QOS)
+#define G_TX_QOS(x) (((x) >> S_TX_QOS) & M_TX_QOS)
+
+#define S_TX_SNDBUF 16
+#define M_TX_SNDBUF 0xFFFF
+#define V_TX_SNDBUF(x) ((x) << S_TX_SNDBUF)
+#define G_TX_SNDBUF(x) (((x) >> S_TX_SNDBUF) & M_TX_SNDBUF)
+
+struct cpl_tx_data {
+ union opcode_tid ot;
+ __be32 len;
+ __be32 rsvd;
+ __be32 flags;
+};
+
+/* cpl_tx_data.flags fields */
+#define S_TX_PROXY 5
+#define V_TX_PROXY(x) ((x) << S_TX_PROXY)
+#define F_TX_PROXY V_TX_PROXY(1U)
+
+#define S_TX_ULP_SUBMODE 6
+#define M_TX_ULP_SUBMODE 0xF
+#define V_TX_ULP_SUBMODE(x) ((x) << S_TX_ULP_SUBMODE)
+#define G_TX_ULP_SUBMODE(x) (((x) >> S_TX_ULP_SUBMODE) & M_TX_ULP_SUBMODE)
+
+#define S_TX_ULP_MODE 10
+#define M_TX_ULP_MODE 0xF
+#define V_TX_ULP_MODE(x) ((x) << S_TX_ULP_MODE)
+#define G_TX_ULP_MODE(x) (((x) >> S_TX_ULP_MODE) & M_TX_ULP_MODE)
+
+#define S_TX_SHOVE 14
+#define V_TX_SHOVE(x) ((x) << S_TX_SHOVE)
+#define F_TX_SHOVE V_TX_SHOVE(1U)
+
+#define S_TX_MORE 15
+#define V_TX_MORE(x) ((x) << S_TX_MORE)
+#define F_TX_MORE V_TX_MORE(1U)
+
+#define S_TX_URG 16
+#define V_TX_URG(x) ((x) << S_TX_URG)
+#define F_TX_URG V_TX_URG(1U)
+
+#define S_TX_FLUSH 17
+#define V_TX_FLUSH(x) ((x) << S_TX_FLUSH)
+#define F_TX_FLUSH V_TX_FLUSH(1U)
+
+#define S_TX_SAVE 18
+#define V_TX_SAVE(x) ((x) << S_TX_SAVE)
+#define F_TX_SAVE V_TX_SAVE(1U)
+
+#define S_TX_TNL 19
+#define V_TX_TNL(x) ((x) << S_TX_TNL)
+#define F_TX_TNL V_TX_TNL(1U)
+
+/* additional tx_data_wr.flags fields */
+#define S_TX_CPU_IDX 0
+#define M_TX_CPU_IDX 0x3F
+#define V_TX_CPU_IDX(x) ((x) << S_TX_CPU_IDX)
+#define G_TX_CPU_IDX(x) (((x) >> S_TX_CPU_IDX) & M_TX_CPU_IDX)
+
+#define S_TX_CLOSE 17
+#define V_TX_CLOSE(x) ((x) << S_TX_CLOSE)
+#define F_TX_CLOSE V_TX_CLOSE(1U)
+
+#define S_TX_INIT 18
+#define V_TX_INIT(x) ((x) << S_TX_INIT)
+#define F_TX_INIT V_TX_INIT(1U)
+
+#define S_TX_IMM_ACK 19
+#define V_TX_IMM_ACK(x) ((x) << S_TX_IMM_ACK)
+#define F_TX_IMM_ACK V_TX_IMM_ACK(1U)
+
+#define S_TX_IMM_DMA 20
+#define V_TX_IMM_DMA(x) ((x) << S_TX_IMM_DMA)
+#define F_TX_IMM_DMA V_TX_IMM_DMA(1U)
+
+struct cpl_tx_data_ack {
+ RSS_HDR
+ union opcode_tid ot;
+ __be32 snd_una;
+};
+
+struct cpl_wr_ack { /* XXX */
+ RSS_HDR
+ union opcode_tid ot;
+ __be16 credits;
+ __be16 rsvd;
+ __be32 snd_nxt;
+ __be32 snd_una;
+};
+
+struct cpl_tx_pkt_core {
+ __be32 ctrl0;
+ __be16 pack;
+ __be16 len;
+ __be64 ctrl1;
+};
+
+struct cpl_tx_pkt {
+ WR_HDR;
+ struct cpl_tx_pkt_core c;
+};
+
+#define cpl_tx_pkt_xt cpl_tx_pkt
+
+/* cpl_tx_pkt_core.ctrl0 fields */
+#define S_TXPKT_VF 0
+#define M_TXPKT_VF 0xFF
+#define V_TXPKT_VF(x) ((x) << S_TXPKT_VF)
+#define G_TXPKT_VF(x) (((x) >> S_TXPKT_VF) & M_TXPKT_VF)
+
+#define S_TXPKT_PF 8
+#define M_TXPKT_PF 0x7
+#define V_TXPKT_PF(x) ((x) << S_TXPKT_PF)
+#define G_TXPKT_PF(x) (((x) >> S_TXPKT_PF) & M_TXPKT_PF)
+
+#define S_TXPKT_VF_VLD 11
+#define V_TXPKT_VF_VLD(x) ((x) << S_TXPKT_VF_VLD)
+#define F_TXPKT_VF_VLD V_TXPKT_VF_VLD(1U)
+
+#define S_TXPKT_OVLAN_IDX 12
+#define M_TXPKT_OVLAN_IDX 0xF
+#define V_TXPKT_OVLAN_IDX(x) ((x) << S_TXPKT_OVLAN_IDX)
+#define G_TXPKT_OVLAN_IDX(x) (((x) >> S_TXPKT_OVLAN_IDX) & M_TXPKT_OVLAN_IDX)
+
+#define S_TXPKT_INTF 16
+#define M_TXPKT_INTF 0xF
+#define V_TXPKT_INTF(x) ((x) << S_TXPKT_INTF)
+#define G_TXPKT_INTF(x) (((x) >> S_TXPKT_INTF) & M_TXPKT_INTF)
+
+#define S_TXPKT_SPECIAL_STAT 20
+#define V_TXPKT_SPECIAL_STAT(x) ((x) << S_TXPKT_SPECIAL_STAT)
+#define F_TXPKT_SPECIAL_STAT V_TXPKT_SPECIAL_STAT(1U)
+
+#define S_TXPKT_INS_OVLAN 21
+#define V_TXPKT_INS_OVLAN(x) ((x) << S_TXPKT_INS_OVLAN)
+#define F_TXPKT_INS_OVLAN V_TXPKT_INS_OVLAN(1U)
+
+#define S_TXPKT_STAT_DIS 22
+#define V_TXPKT_STAT_DIS(x) ((x) << S_TXPKT_STAT_DIS)
+#define F_TXPKT_STAT_DIS V_TXPKT_STAT_DIS(1U)
+
+#define S_TXPKT_LOOPBACK 23
+#define V_TXPKT_LOOPBACK(x) ((x) << S_TXPKT_LOOPBACK)
+#define F_TXPKT_LOOPBACK V_TXPKT_LOOPBACK(1U)
+
+#define S_TXPKT_OPCODE 24
+#define M_TXPKT_OPCODE 0xFF
+#define V_TXPKT_OPCODE(x) ((x) << S_TXPKT_OPCODE)
+#define G_TXPKT_OPCODE(x) (((x) >> S_TXPKT_OPCODE) & M_TXPKT_OPCODE)
+
+/* cpl_tx_pkt_core.ctrl1 fields */
+#define S_TXPKT_SA_IDX 0
+#define M_TXPKT_SA_IDX 0xFFF
+#define V_TXPKT_SA_IDX(x) ((x) << S_TXPKT_SA_IDX)
+#define G_TXPKT_SA_IDX(x) (((x) >> S_TXPKT_SA_IDX) & M_TXPKT_SA_IDX)
+
+#define S_TXPKT_CSUM_END 12
+#define M_TXPKT_CSUM_END 0xFF
+#define V_TXPKT_CSUM_END(x) ((x) << S_TXPKT_CSUM_END)
+#define G_TXPKT_CSUM_END(x) (((x) >> S_TXPKT_CSUM_END) & M_TXPKT_CSUM_END)
+
+#define S_TXPKT_CSUM_START 20
+#define M_TXPKT_CSUM_START 0x3FF
+#define V_TXPKT_CSUM_START(x) ((x) << S_TXPKT_CSUM_START)
+#define G_TXPKT_CSUM_START(x) (((x) >> S_TXPKT_CSUM_START) & M_TXPKT_CSUM_START)
+
+#define S_TXPKT_IPHDR_LEN 20
+#define M_TXPKT_IPHDR_LEN 0x3FFF
+#define V_TXPKT_IPHDR_LEN(x) ((__u64)(x) << S_TXPKT_IPHDR_LEN)
+#define G_TXPKT_IPHDR_LEN(x) (((x) >> S_TXPKT_IPHDR_LEN) & M_TXPKT_IPHDR_LEN)
+
+#define S_TXPKT_CSUM_LOC 30
+#define M_TXPKT_CSUM_LOC 0x3FF
+#define V_TXPKT_CSUM_LOC(x) ((__u64)(x) << S_TXPKT_CSUM_LOC)
+#define G_TXPKT_CSUM_LOC(x) (((x) >> S_TXPKT_CSUM_LOC) & M_TXPKT_CSUM_LOC)
+
+#define S_TXPKT_ETHHDR_LEN 34
+#define M_TXPKT_ETHHDR_LEN 0x3F
+#define V_TXPKT_ETHHDR_LEN(x) ((__u64)(x) << S_TXPKT_ETHHDR_LEN)
+#define G_TXPKT_ETHHDR_LEN(x) (((x) >> S_TXPKT_ETHHDR_LEN) & M_TXPKT_ETHHDR_LEN)
+
+#define S_TXPKT_CSUM_TYPE 40
+#define M_TXPKT_CSUM_TYPE 0xF
+#define V_TXPKT_CSUM_TYPE(x) ((__u64)(x) << S_TXPKT_CSUM_TYPE)
+#define G_TXPKT_CSUM_TYPE(x) (((x) >> S_TXPKT_CSUM_TYPE) & M_TXPKT_CSUM_TYPE)
+
+#define S_TXPKT_VLAN 44
+#define M_TXPKT_VLAN 0xFFFF
+#define V_TXPKT_VLAN(x) ((__u64)(x) << S_TXPKT_VLAN)
+#define G_TXPKT_VLAN(x) (((x) >> S_TXPKT_VLAN) & M_TXPKT_VLAN)
+
+#define S_TXPKT_VLAN_VLD 60
+#define V_TXPKT_VLAN_VLD(x) ((__u64)(x) << S_TXPKT_VLAN_VLD)
+#define F_TXPKT_VLAN_VLD V_TXPKT_VLAN_VLD(1ULL)
+
+#define S_TXPKT_IPSEC 61
+#define V_TXPKT_IPSEC(x) ((__u64)(x) << S_TXPKT_IPSEC)
+#define F_TXPKT_IPSEC V_TXPKT_IPSEC(1ULL)
+
+#define S_TXPKT_IPCSUM_DIS 62
+#define V_TXPKT_IPCSUM_DIS(x) ((__u64)(x) << S_TXPKT_IPCSUM_DIS)
+#define F_TXPKT_IPCSUM_DIS V_TXPKT_IPCSUM_DIS(1ULL)
+
+#define S_TXPKT_L4CSUM_DIS 63
+#define V_TXPKT_L4CSUM_DIS(x) ((__u64)(x) << S_TXPKT_L4CSUM_DIS)
+#define F_TXPKT_L4CSUM_DIS V_TXPKT_L4CSUM_DIS(1ULL)
+
+struct cpl_tx_pkt_lso {
+ __be32 lso_ctrl;
+ __be16 ipid_ofst;
+ __be16 mss;
+ __be32 seqno_offset;
+ __be32 len;
+ /* encapsulated CPL (TX_PKT, TX_PKT_XT or TX_DATA) follows here */
+};
+
+/* cpl_tx_pkt_lso.lso_ctrl fields */
+#define S_LSO_TCPHDR_LEN 0
+#define M_LSO_TCPHDR_LEN 0xF
+#define V_LSO_TCPHDR_LEN(x) ((x) << S_LSO_TCPHDR_LEN)
+#define G_LSO_TCPHDR_LEN(x) (((x) >> S_LSO_TCPHDR_LEN) & M_LSO_TCPHDR_LEN)
+
+#define S_LSO_IPHDR_LEN 4
+#define M_LSO_IPHDR_LEN 0xFFF
+#define V_LSO_IPHDR_LEN(x) ((x) << S_LSO_IPHDR_LEN)
+#define G_LSO_IPHDR_LEN(x) (((x) >> S_LSO_IPHDR_LEN) & M_LSO_IPHDR_LEN)
+
+#define S_LSO_ETHHDR_LEN 16
+#define M_LSO_ETHHDR_LEN 0xF
+#define V_LSO_ETHHDR_LEN(x) ((x) << S_LSO_ETHHDR_LEN)
+#define G_LSO_ETHHDR_LEN(x) (((x) >> S_LSO_ETHHDR_LEN) & M_LSO_ETHHDR_LEN)
+
+#define S_LSO_IPV6 20
+#define V_LSO_IPV6(x) ((x) << S_LSO_IPV6)
+#define F_LSO_IPV6 V_LSO_IPV6(1U)
+
+#define S_LSO_OFLD_ENCAP 21
+#define V_LSO_OFLD_ENCAP(x) ((x) << S_LSO_OFLD_ENCAP)
+#define F_LSO_OFLD_ENCAP V_LSO_OFLD_ENCAP(1U)
+
+#define S_LSO_LAST_SLICE 22
+#define V_LSO_LAST_SLICE(x) ((x) << S_LSO_LAST_SLICE)
+#define F_LSO_LAST_SLICE V_LSO_LAST_SLICE(1U)
+
+#define S_LSO_FIRST_SLICE 23
+#define V_LSO_FIRST_SLICE(x) ((x) << S_LSO_FIRST_SLICE)
+#define F_LSO_FIRST_SLICE V_LSO_FIRST_SLICE(1U)
+
+#define S_LSO_OPCODE 24
+#define M_LSO_OPCODE 0xFF
+#define V_LSO_OPCODE(x) ((x) << S_LSO_OPCODE)
+#define G_LSO_OPCODE(x) (((x) >> S_LSO_OPCODE) & M_LSO_OPCODE)
+
+/* cpl_tx_pkt_lso.mss fields */
+#define S_LSO_MSS 0
+#define M_LSO_MSS 0x3FFF
+#define V_LSO_MSS(x) ((x) << S_LSO_MSS)
+#define G_LSO_MSS(x) (((x) >> S_LSO_MSS) & M_LSO_MSS)
+
+#define S_LSO_IPID_SPLIT 15
+#define V_LSO_IPID_SPLIT(x) ((x) << S_LSO_IPID_SPLIT)
+#define F_LSO_IPID_SPLIT V_LSO_IPID_SPLIT(1U)
+
+struct cpl_tx_pkt_coalesce {
+ __be32 cntrl;
+ __be32 len;
+ __be64 addr;
+};
+
+struct tx_pkt_coalesce_wr {
+ WR_HDR;
+#if !(defined C99_NOT_SUPPORTED)
+ struct cpl_tx_pkt_coalesce cpl[0];
+#endif
+};
+
+struct mngt_pktsched_wr {
+ __be32 wr_hi;
+ __be32 wr_lo;
+ __u8 mngt_opcode;
+ __u8 rsvd[7];
+ __u8 sched;
+ __u8 idx;
+ __u8 min;
+ __u8 max;
+ __u8 binding;
+ __u8 rsvd1[3];
+};
+
+struct cpl_iscsi_hdr_no_rss {
+ union opcode_tid ot;
+ __be16 pdu_len_ddp;
+ __be16 len;
+ __be32 seq;
+ __be16 urg;
+ __u8 rsvd;
+ __u8 status;
+};
+
+struct cpl_iscsi_hdr {
+ RSS_HDR
+ union opcode_tid ot;
+ __be16 pdu_len_ddp;
+ __be16 len;
+ __be32 seq;
+ __be16 urg;
+ __u8 rsvd;
+ __u8 status;
+};
+
+/* cpl_iscsi_hdr.pdu_len_ddp fields */
+#define S_ISCSI_PDU_LEN 0
+#define M_ISCSI_PDU_LEN 0x7FFF
+#define V_ISCSI_PDU_LEN(x) ((x) << S_ISCSI_PDU_LEN)
+#define G_ISCSI_PDU_LEN(x) (((x) >> S_ISCSI_PDU_LEN) & M_ISCSI_PDU_LEN)
+
+#define S_ISCSI_DDP 15
+#define V_ISCSI_DDP(x) ((x) << S_ISCSI_DDP)
+#define F_ISCSI_DDP V_ISCSI_DDP(1U)
+
+struct cpl_rx_data {
+ RSS_HDR
+ union opcode_tid ot;
+ __be16 rsvd;
+ __be16 len;
+ __be32 seq;
+ __be16 urg;
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ __u8 dack_mode:2;
+ __u8 psh:1;
+ __u8 heartbeat:1;
+ __u8 ddp_off:1;
+ __u8 :3;
+#else
+ __u8 :3;
+ __u8 ddp_off:1;
+ __u8 heartbeat:1;
+ __u8 psh:1;
+ __u8 dack_mode:2;
+#endif
+ __u8 status;
+};
+
+struct cpl_fcoe_hdr {
+ RSS_HDR
+ union opcode_tid ot;
+ __be16 oxid;
+ __be16 len;
+ __be32 rctl_fctl;
+ __u8 cs_ctl;
+ __u8 df_ctl;
+ __u8 sof;
+ __u8 eof;
+ __be16 seq_cnt;
+ __u8 seq_id;
+ __u8 type;
+ __be32 param;
+};
+
+struct cpl_rx_urg_notify {
+ RSS_HDR
+ union opcode_tid ot;
+ __be32 seq;
+};
+
+struct cpl_rx_urg_pkt {
+ RSS_HDR
+ union opcode_tid ot;
+ __be16 rsvd;
+ __be16 len;
+};
+
+struct cpl_rx_data_ack {
+ WR_HDR;
+ union opcode_tid ot;
+ __be32 credit_dack;
+};
+
+/* cpl_rx_data_ack.ack_seq fields */
+#define S_RX_CREDITS 0
+#define M_RX_CREDITS 0x3FFFFFF
+#define V_RX_CREDITS(x) ((x) << S_RX_CREDITS)
+#define G_RX_CREDITS(x) (((x) >> S_RX_CREDITS) & M_RX_CREDITS)
+
+#define S_RX_MODULATE_TX 26
+#define V_RX_MODULATE_TX(x) ((x) << S_RX_MODULATE_TX)
+#define F_RX_MODULATE_TX V_RX_MODULATE_TX(1U)
+
+#define S_RX_MODULATE_RX 27
+#define V_RX_MODULATE_RX(x) ((x) << S_RX_MODULATE_RX)
+#define F_RX_MODULATE_RX V_RX_MODULATE_RX(1U)
+
+#define S_RX_FORCE_ACK 28
+#define V_RX_FORCE_ACK(x) ((x) << S_RX_FORCE_ACK)
+#define F_RX_FORCE_ACK V_RX_FORCE_ACK(1U)
+
+#define S_RX_DACK_MODE 29
+#define M_RX_DACK_MODE 0x3
+#define V_RX_DACK_MODE(x) ((x) << S_RX_DACK_MODE)
+#define G_RX_DACK_MODE(x) (((x) >> S_RX_DACK_MODE) & M_RX_DACK_MODE)
+
+#define S_RX_DACK_CHANGE 31
+#define V_RX_DACK_CHANGE(x) ((x) << S_RX_DACK_CHANGE)
+#define F_RX_DACK_CHANGE V_RX_DACK_CHANGE(1U)
+
+struct cpl_rx_ddp_complete {
+ RSS_HDR
+ union opcode_tid ot;
+ __be32 ddp_report;
+ __be32 rcv_nxt;
+ __be32 rsvd;
+};
+
+struct cpl_rx_data_ddp {
+ RSS_HDR
+ union opcode_tid ot;
+ __be16 urg;
+ __be16 len;
+ __be32 seq;
+ union {
+ __be32 nxt_seq;
+ __be32 ddp_report;
+ } u;
+ __be32 ulp_crc;
+ __be32 ddpvld;
+};
+
+struct cpl_rx_fcoe_ddp {
+ RSS_HDR
+ union opcode_tid ot;
+ __be16 rsvd;
+ __be16 len;
+ __be32 seq;
+ __be32 ddp_report;
+ __be32 ulp_crc;
+ __be32 ddpvld;
+};
+
+/* cpl_rx_{data,fcoe}_ddp.ddpvld fields */
+#define S_DDP_VALID 15
+#define M_DDP_VALID 0x1FFFF
+#define V_DDP_VALID(x) ((x) << S_DDP_VALID)
+#define G_DDP_VALID(x) (((x) >> S_DDP_VALID) & M_DDP_VALID)
+
+#define S_DDP_PPOD_MISMATCH 15
+#define V_DDP_PPOD_MISMATCH(x) ((x) << S_DDP_PPOD_MISMATCH)
+#define F_DDP_PPOD_MISMATCH V_DDP_PPOD_MISMATCH(1U)
+
+#define S_DDP_PDU 16
+#define V_DDP_PDU(x) ((x) << S_DDP_PDU)
+#define F_DDP_PDU V_DDP_PDU(1U)
+
+#define S_DDP_LLIMIT_ERR 17
+#define V_DDP_LLIMIT_ERR(x) ((x) << S_DDP_LLIMIT_ERR)
+#define F_DDP_LLIMIT_ERR V_DDP_LLIMIT_ERR(1U)
+
+#define S_DDP_PPOD_PARITY_ERR 18
+#define V_DDP_PPOD_PARITY_ERR(x) ((x) << S_DDP_PPOD_PARITY_ERR)
+#define F_DDP_PPOD_PARITY_ERR V_DDP_PPOD_PARITY_ERR(1U)
+
+#define S_DDP_PADDING_ERR 19
+#define V_DDP_PADDING_ERR(x) ((x) << S_DDP_PADDING_ERR)
+#define F_DDP_PADDING_ERR V_DDP_PADDING_ERR(1U)
+
+#define S_DDP_HDRCRC_ERR 20
+#define V_DDP_HDRCRC_ERR(x) ((x) << S_DDP_HDRCRC_ERR)
+#define F_DDP_HDRCRC_ERR V_DDP_HDRCRC_ERR(1U)
+
+#define S_DDP_DATACRC_ERR 21
+#define V_DDP_DATACRC_ERR(x) ((x) << S_DDP_DATACRC_ERR)
+#define F_DDP_DATACRC_ERR V_DDP_DATACRC_ERR(1U)
+
+#define S_DDP_INVALID_TAG 22
+#define V_DDP_INVALID_TAG(x) ((x) << S_DDP_INVALID_TAG)
+#define F_DDP_INVALID_TAG V_DDP_INVALID_TAG(1U)
+
+#define S_DDP_ULIMIT_ERR 23
+#define V_DDP_ULIMIT_ERR(x) ((x) << S_DDP_ULIMIT_ERR)
+#define F_DDP_ULIMIT_ERR V_DDP_ULIMIT_ERR(1U)
+
+#define S_DDP_OFFSET_ERR 24
+#define V_DDP_OFFSET_ERR(x) ((x) << S_DDP_OFFSET_ERR)
+#define F_DDP_OFFSET_ERR V_DDP_OFFSET_ERR(1U)
+
+#define S_DDP_COLOR_ERR 25
+#define V_DDP_COLOR_ERR(x) ((x) << S_DDP_COLOR_ERR)
+#define F_DDP_COLOR_ERR V_DDP_COLOR_ERR(1U)
+
+#define S_DDP_TID_MISMATCH 26
+#define V_DDP_TID_MISMATCH(x) ((x) << S_DDP_TID_MISMATCH)
+#define F_DDP_TID_MISMATCH V_DDP_TID_MISMATCH(1U)
+
+#define S_DDP_INVALID_PPOD 27
+#define V_DDP_INVALID_PPOD(x) ((x) << S_DDP_INVALID_PPOD)
+#define F_DDP_INVALID_PPOD V_DDP_INVALID_PPOD(1U)
+
+#define S_DDP_ULP_MODE 28
+#define M_DDP_ULP_MODE 0xF
+#define V_DDP_ULP_MODE(x) ((x) << S_DDP_ULP_MODE)
+#define G_DDP_ULP_MODE(x) (((x) >> S_DDP_ULP_MODE) & M_DDP_ULP_MODE)
+
+/* cpl_rx_{data,fcoe}_ddp.ddp_report fields */
+#define S_DDP_OFFSET 0
+#define M_DDP_OFFSET 0xFFFFFF
+#define V_DDP_OFFSET(x) ((x) << S_DDP_OFFSET)
+#define G_DDP_OFFSET(x) (((x) >> S_DDP_OFFSET) & M_DDP_OFFSET)
+
+#define S_DDP_DACK_MODE 24
+#define M_DDP_DACK_MODE 0x3
+#define V_DDP_DACK_MODE(x) ((x) << S_DDP_DACK_MODE)
+#define G_DDP_DACK_MODE(x) (((x) >> S_DDP_DACK_MODE) & M_DDP_DACK_MODE)
+
+#define S_DDP_BUF_IDX 26
+#define V_DDP_BUF_IDX(x) ((x) << S_DDP_BUF_IDX)
+#define F_DDP_BUF_IDX V_DDP_BUF_IDX(1U)
+
+#define S_DDP_URG 27
+#define V_DDP_URG(x) ((x) << S_DDP_URG)
+#define F_DDP_URG V_DDP_URG(1U)
+
+#define S_DDP_PSH 28
+#define V_DDP_PSH(x) ((x) << S_DDP_PSH)
+#define F_DDP_PSH V_DDP_PSH(1U)
+
+#define S_DDP_BUF_COMPLETE 29
+#define V_DDP_BUF_COMPLETE(x) ((x) << S_DDP_BUF_COMPLETE)
+#define F_DDP_BUF_COMPLETE V_DDP_BUF_COMPLETE(1U)
+
+#define S_DDP_BUF_TIMED_OUT 30
+#define V_DDP_BUF_TIMED_OUT(x) ((x) << S_DDP_BUF_TIMED_OUT)
+#define F_DDP_BUF_TIMED_OUT V_DDP_BUF_TIMED_OUT(1U)
+
+#define S_DDP_INV 31
+#define V_DDP_INV(x) ((x) << S_DDP_INV)
+#define F_DDP_INV V_DDP_INV(1U)
+
+struct cpl_rx_pkt {
+ RSS_HDR
+ __u8 opcode;
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ __u8 iff:4;
+ __u8 csum_calc:1;
+ __u8 ipmi_pkt:1;
+ __u8 vlan_ex:1;
+ __u8 ip_frag:1;
+#else
+ __u8 ip_frag:1;
+ __u8 vlan_ex:1;
+ __u8 ipmi_pkt:1;
+ __u8 csum_calc:1;
+ __u8 iff:4;
+#endif
+ __be16 csum;
+ __be16 vlan;
+ __be16 len;
+ __be32 l2info;
+ __be16 hdr_len;
+ __be16 err_vec;
+};
+
+/* rx_pkt.l2info fields */
+#define S_RX_ETHHDR_LEN 0
+#define M_RX_ETHHDR_LEN 0x1F
+#define V_RX_ETHHDR_LEN(x) ((x) << S_RX_ETHHDR_LEN)
+#define G_RX_ETHHDR_LEN(x) (((x) >> S_RX_ETHHDR_LEN) & M_RX_ETHHDR_LEN)
+
+#define S_RX_PKTYPE 5
+#define M_RX_PKTYPE 0x7
+#define V_RX_PKTYPE(x) ((x) << S_RX_PKTYPE)
+#define G_RX_PKTYPE(x) (((x) >> S_RX_PKTYPE) & M_RX_PKTYPE)
+
+#define S_RX_MACIDX 8
+#define M_RX_MACIDX 0x1FF
+#define V_RX_MACIDX(x) ((x) << S_RX_MACIDX)
+#define G_RX_MACIDX(x) (((x) >> S_RX_MACIDX) & M_RX_MACIDX)
+
+#define S_RX_DATYPE 18
+#define M_RX_DATYPE 0x3
+#define V_RX_DATYPE(x) ((x) << S_RX_DATYPE)
+#define G_RX_DATYPE(x) (((x) >> S_RX_DATYPE) & M_RX_DATYPE)
+
+#define S_RXF_PSH 20
+#define V_RXF_PSH(x) ((x) << S_RXF_PSH)
+#define F_RXF_PSH V_RXF_PSH(1U)
+
+#define S_RXF_SYN 21
+#define V_RXF_SYN(x) ((x) << S_RXF_SYN)
+#define F_RXF_SYN V_RXF_SYN(1U)
+
+#define S_RXF_UDP 22
+#define V_RXF_UDP(x) ((x) << S_RXF_UDP)
+#define F_RXF_UDP V_RXF_UDP(1U)
+
+#define S_RXF_TCP 23
+#define V_RXF_TCP(x) ((x) << S_RXF_TCP)
+#define F_RXF_TCP V_RXF_TCP(1U)
+
+#define S_RXF_IP 24
+#define V_RXF_IP(x) ((x) << S_RXF_IP)
+#define F_RXF_IP V_RXF_IP(1U)
+
+#define S_RXF_IP6 25
+#define V_RXF_IP6(x) ((x) << S_RXF_IP6)
+#define F_RXF_IP6 V_RXF_IP6(1U)
+
+#define S_RXF_SYN_COOKIE 26
+#define V_RXF_SYN_COOKIE(x) ((x) << S_RXF_SYN_COOKIE)
+#define F_RXF_SYN_COOKIE V_RXF_SYN_COOKIE(1U)
+
+#define S_RXF_FCOE 26
+#define V_RXF_FCOE(x) ((x) << S_RXF_FCOE)
+#define F_RXF_FCOE V_RXF_FCOE(1U)
+
+#define S_RXF_LRO 27
+#define V_RXF_LRO(x) ((x) << S_RXF_LRO)
+#define F_RXF_LRO V_RXF_LRO(1U)
+
+#define S_RX_CHAN 28
+#define M_RX_CHAN 0xF
+#define V_RX_CHAN(x) ((x) << S_RX_CHAN)
+#define G_RX_CHAN(x) (((x) >> S_RX_CHAN) & M_RX_CHAN)
+
+/* rx_pkt.hdr_len fields */
+#define S_RX_TCPHDR_LEN 0
+#define M_RX_TCPHDR_LEN 0x3F
+#define V_RX_TCPHDR_LEN(x) ((x) << S_RX_TCPHDR_LEN)
+#define G_RX_TCPHDR_LEN(x) (((x) >> S_RX_TCPHDR_LEN) & M_RX_TCPHDR_LEN)
+
+#define S_RX_IPHDR_LEN 6
+#define M_RX_IPHDR_LEN 0x3FF
+#define V_RX_IPHDR_LEN(x) ((x) << S_RX_IPHDR_LEN)
+#define G_RX_IPHDR_LEN(x) (((x) >> S_RX_IPHDR_LEN) & M_RX_IPHDR_LEN)
+
+/* rx_pkt.err_vec fields */
+#define S_RXERR_OR 0
+#define V_RXERR_OR(x) ((x) << S_RXERR_OR)
+#define F_RXERR_OR V_RXERR_OR(1U)
+
+#define S_RXERR_MAC 1
+#define V_RXERR_MAC(x) ((x) << S_RXERR_MAC)
+#define F_RXERR_MAC V_RXERR_MAC(1U)
+
+#define S_RXERR_IPVERS 2
+#define V_RXERR_IPVERS(x) ((x) << S_RXERR_IPVERS)
+#define F_RXERR_IPVERS V_RXERR_IPVERS(1U)
+
+#define S_RXERR_FRAG 3
+#define V_RXERR_FRAG(x) ((x) << S_RXERR_FRAG)
+#define F_RXERR_FRAG V_RXERR_FRAG(1U)
+
+#define S_RXERR_ATTACK 4
+#define V_RXERR_ATTACK(x) ((x) << S_RXERR_ATTACK)
+#define F_RXERR_ATTACK V_RXERR_ATTACK(1U)
+
+#define S_RXERR_ETHHDR_LEN 5
+#define V_RXERR_ETHHDR_LEN(x) ((x) << S_RXERR_ETHHDR_LEN)
+#define F_RXERR_ETHHDR_LEN V_RXERR_ETHHDR_LEN(1U)
+
+#define S_RXERR_IPHDR_LEN 6
+#define V_RXERR_IPHDR_LEN(x) ((x) << S_RXERR_IPHDR_LEN)
+#define F_RXERR_IPHDR_LEN V_RXERR_IPHDR_LEN(1U)
+
+#define S_RXERR_TCPHDR_LEN 7
+#define V_RXERR_TCPHDR_LEN(x) ((x) << S_RXERR_TCPHDR_LEN)
+#define F_RXERR_TCPHDR_LEN V_RXERR_TCPHDR_LEN(1U)
+
+#define S_RXERR_PKT_LEN 8
+#define V_RXERR_PKT_LEN(x) ((x) << S_RXERR_PKT_LEN)
+#define F_RXERR_PKT_LEN V_RXERR_PKT_LEN(1U)
+
+#define S_RXERR_TCP_OPT 9
+#define V_RXERR_TCP_OPT(x) ((x) << S_RXERR_TCP_OPT)
+#define F_RXERR_TCP_OPT V_RXERR_TCP_OPT(1U)
+
+#define S_RXERR_IPCSUM 12
+#define V_RXERR_IPCSUM(x) ((x) << S_RXERR_IPCSUM)
+#define F_RXERR_IPCSUM V_RXERR_IPCSUM(1U)
+
+#define S_RXERR_CSUM 13
+#define V_RXERR_CSUM(x) ((x) << S_RXERR_CSUM)
+#define F_RXERR_CSUM V_RXERR_CSUM(1U)
+
+#define S_RXERR_PING 14
+#define V_RXERR_PING(x) ((x) << S_RXERR_PING)
+#define F_RXERR_PING V_RXERR_PING(1U)
+
+struct cpl_trace_pkt {
+ RSS_HDR
+ __u8 opcode;
+ __u8 intf;
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ __u8 runt:4;
+ __u8 filter_hit:4;
+ __u8 :6;
+ __u8 err:1;
+ __u8 trunc:1;
+#else
+ __u8 filter_hit:4;
+ __u8 runt:4;
+ __u8 trunc:1;
+ __u8 err:1;
+ __u8 :6;
+#endif
+ __be16 rsvd;
+ __be16 len;
+ __be64 tstamp;
+};
+
+struct cpl_rte_delete_req {
+ WR_HDR;
+ union opcode_tid ot;
+ __be32 params;
+};
+
+/* {cpl_rte_delete_req, cpl_rte_read_req}.params fields */
+#define S_RTE_REQ_LUT_IX 8
+#define M_RTE_REQ_LUT_IX 0x7FF
+#define V_RTE_REQ_LUT_IX(x) ((x) << S_RTE_REQ_LUT_IX)
+#define G_RTE_REQ_LUT_IX(x) (((x) >> S_RTE_REQ_LUT_IX) & M_RTE_REQ_LUT_IX)
+
+#define S_RTE_REQ_LUT_BASE 19
+#define M_RTE_REQ_LUT_BASE 0x7FF
+#define V_RTE_REQ_LUT_BASE(x) ((x) << S_RTE_REQ_LUT_BASE)
+#define G_RTE_REQ_LUT_BASE(x) (((x) >> S_RTE_REQ_LUT_BASE) & M_RTE_REQ_LUT_BASE)
+
+#define S_RTE_READ_REQ_SELECT 31
+#define V_RTE_READ_REQ_SELECT(x) ((x) << S_RTE_READ_REQ_SELECT)
+#define F_RTE_READ_REQ_SELECT V_RTE_READ_REQ_SELECT(1U)
+
+struct cpl_rte_delete_rpl {
+ RSS_HDR
+ union opcode_tid ot;
+ __u8 status;
+ __u8 rsvd[3];
+};
+
+struct cpl_rte_write_req {
+ WR_HDR;
+ union opcode_tid ot;
+ __u32 write_sel;
+ __be32 lut_params;
+ __be32 l2t_idx;
+ __be32 netmask;
+ __be32 faddr;
+};
+
+/* cpl_rte_write_req.write_sel fields */
+#define S_RTE_WR_L2TIDX 31
+#define V_RTE_WR_L2TIDX(x) ((x) << S_RTE_WR_L2TIDX)
+#define F_RTE_WR_L2TIDX V_RTE_WR_L2TIDX(1U)
+
+#define S_RTE_WR_FADDR 30
+#define V_RTE_WR_FADDR(x) ((x) << S_RTE_WR_FADDR)
+#define F_RTE_WR_FADDR V_RTE_WR_FADDR(1U)
+
+/* cpl_rte_write_req.lut_params fields */
+#define S_RTE_WR_LUT_IX 10
+#define M_RTE_WR_LUT_IX 0x7FF
+#define V_RTE_WR_LUT_IX(x) ((x) << S_RTE_WR_LUT_IX)
+#define G_RTE_WR_LUT_IX(x) (((x) >> S_RTE_WR_LUT_IX) & M_RTE_WR_LUT_IX)
+
+#define S_RTE_WR_LUT_BASE 21
+#define M_RTE_WR_LUT_BASE 0x7FF
+#define V_RTE_WR_LUT_BASE(x) ((x) << S_RTE_WR_LUT_BASE)
+#define G_RTE_WR_LUT_BASE(x) (((x) >> S_RTE_WR_LUT_BASE) & M_RTE_WR_LUT_BASE)
+
+struct cpl_rte_write_rpl {
+ RSS_HDR
+ union opcode_tid ot;
+ __u8 status;
+ __u8 rsvd[3];
+};
+
+struct cpl_rte_read_req {
+ WR_HDR;
+ union opcode_tid ot;
+ __be32 params;
+};
+
+struct cpl_rte_read_rpl {
+ RSS_HDR
+ union opcode_tid ot;
+ __u8 status;
+ __u8 rsvd;
+ __be16 l2t_idx;
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ __u32 :30;
+ __u32 select:1;
+#else
+ __u32 select:1;
+ __u32 :30;
+#endif
+ __be32 addr;
+};
+
+struct cpl_l2t_write_req {
+ WR_HDR;
+ union opcode_tid ot;
+ __be16 params;
+ __be16 l2t_idx;
+ __be16 vlan;
+ __u8 dst_mac[6];
+};
+
+/* cpl_l2t_write_req.params fields */
+#define S_L2T_W_INFO 2
+#define M_L2T_W_INFO 0x3F
+#define V_L2T_W_INFO(x) ((x) << S_L2T_W_INFO)
+#define G_L2T_W_INFO(x) (((x) >> S_L2T_W_INFO) & M_L2T_W_INFO)
+
+#define S_L2T_W_PORT 8
+#define M_L2T_W_PORT 0xF
+#define V_L2T_W_PORT(x) ((x) << S_L2T_W_PORT)
+#define G_L2T_W_PORT(x) (((x) >> S_L2T_W_PORT) & M_L2T_W_PORT)
+
+#define S_L2T_W_NOREPLY 15
+#define V_L2T_W_NOREPLY(x) ((x) << S_L2T_W_NOREPLY)
+#define F_L2T_W_NOREPLY V_L2T_W_NOREPLY(1U)
+
+struct cpl_l2t_write_rpl {
+ RSS_HDR
+ union opcode_tid ot;
+ __u8 status;
+ __u8 rsvd[3];
+};
+
+struct cpl_l2t_read_req {
+ WR_HDR;
+ union opcode_tid ot;
+ __be32 l2t_idx;
+};
+
+struct cpl_l2t_read_rpl {
+ RSS_HDR
+ union opcode_tid ot;
+ __u8 status;
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ __u8 :4;
+ __u8 iff:4;
+#else
+ __u8 iff:4;
+ __u8 :4;
+#endif
+ __be16 vlan;
+ __be16 info;
+ __u8 dst_mac[6];
+};
+
+struct cpl_smt_write_req {
+ WR_HDR;
+ union opcode_tid ot;
+ __be32 params;
+ __be16 pfvf1;
+ __u8 src_mac1[6];
+ __be16 pfvf0;
+ __u8 src_mac0[6];
+};
+
+/* cpl_smt_{read,write}_req.params fields */
+#define S_SMTW_OVLAN_IDX 16
+#define M_SMTW_OVLAN_IDX 0xF
+#define V_SMTW_OVLAN_IDX(x) ((x) << S_SMTW_OVLAN_IDX)
+#define G_SMTW_OVLAN_IDX(x) (((x) >> S_SMTW_OVLAN_IDX) & M_SMTW_OVLAN_IDX)
+
+#define S_SMTW_IDX 20
+#define M_SMTW_IDX 0x7F
+#define V_SMTW_IDX(x) ((x) << S_SMTW_IDX)
+#define G_SMTW_IDX(x) (((x) >> S_SMTW_IDX) & M_SMTW_IDX)
+
+#define S_SMTW_NORPL 31
+#define V_SMTW_NORPL(x) ((x) << S_SMTW_NORPL)
+#define F_SMTW_NORPL V_SMTW_NORPL(1U)
+
+/* cpl_smt_{read,write}_req.pfvf? fields */
+#define S_SMTW_VF 0
+#define M_SMTW_VF 0xFF
+#define V_SMTW_VF(x) ((x) << S_SMTW_VF)
+#define G_SMTW_VF(x) (((x) >> S_SMTW_VF) & M_SMTW_VF)
+
+#define S_SMTW_PF 8
+#define M_SMTW_PF 0x7
+#define V_SMTW_PF(x) ((x) << S_SMTW_PF)
+#define G_SMTW_PF(x) (((x) >> S_SMTW_PF) & M_SMTW_PF)
+
+#define S_SMTW_VF_VLD 11
+#define V_SMTW_VF_VLD(x) ((x) << S_SMTW_VF_VLD)
+#define F_SMTW_VF_VLD V_SMTW_VF_VLD(1U)
+
+struct cpl_smt_write_rpl {
+ RSS_HDR
+ union opcode_tid ot;
+ __u8 status;
+ __u8 rsvd[3];
+};
+
+struct cpl_smt_read_req {
+ WR_HDR;
+ union opcode_tid ot;
+ __be32 params;
+};
+
+struct cpl_smt_read_rpl {
+ RSS_HDR
+ union opcode_tid ot;
+ __u8 status;
+ __u8 ovlan_idx;
+ __be16 rsvd;
+ __be16 pfvf1;
+ __u8 src_mac1[6];
+ __be16 pfvf0;
+ __u8 src_mac0[6];
+};
+
+struct cpl_barrier {
+ WR_HDR;
+ __u8 opcode;
+ __u8 chan_map;
+ __be16 rsvd0;
+ __be32 rsvd1;
+};
+
+/* cpl_barrier.chan_map fields */
+#define S_CHAN_MAP 4
+#define M_CHAN_MAP 0xF
+#define V_CHAN_MAP(x) ((x) << S_CHAN_MAP)
+#define G_CHAN_MAP(x) (((x) >> S_CHAN_MAP) & M_CHAN_MAP)
+
+struct cpl_error {
+ RSS_HDR
+ union opcode_tid ot;
+ __be32 error;
+};
+
+struct cpl_hit_notify {
+ RSS_HDR
+ union opcode_tid ot;
+ __be32 rsvd;
+ __be32 info;
+ __be32 reason;
+};
+
+struct cpl_pkt_notify {
+ RSS_HDR
+ union opcode_tid ot;
+ __be16 rsvd;
+ __be16 len;
+ __be32 info;
+ __be32 reason;
+};
+
+/* cpl_{hit,pkt}_notify.info fields */
+#define S_NTFY_MAC_IDX 0
+#define M_NTFY_MAC_IDX 0x1FF
+#define V_NTFY_MAC_IDX(x) ((x) << S_NTFY_MAC_IDX)
+#define G_NTFY_MAC_IDX(x) (((x) >> S_NTFY_MAC_IDX) & M_NTFY_MAC_IDX)
+
+#define S_NTFY_INTF 10
+#define M_NTFY_INTF 0xF
+#define V_NTFY_INTF(x) ((x) << S_NTFY_INTF)
+#define G_NTFY_INTF(x) (((x) >> S_NTFY_INTF) & M_NTFY_INTF)
+
+#define S_NTFY_TCPHDR_LEN 14
+#define M_NTFY_TCPHDR_LEN 0xF
+#define V_NTFY_TCPHDR_LEN(x) ((x) << S_NTFY_TCPHDR_LEN)
+#define G_NTFY_TCPHDR_LEN(x) (((x) >> S_NTFY_TCPHDR_LEN) & M_NTFY_TCPHDR_LEN)
+
+#define S_NTFY_IPHDR_LEN 18
+#define M_NTFY_IPHDR_LEN 0x1FF
+#define V_NTFY_IPHDR_LEN(x) ((x) << S_NTFY_IPHDR_LEN)
+#define G_NTFY_IPHDR_LEN(x) (((x) >> S_NTFY_IPHDR_LEN) & M_NTFY_IPHDR_LEN)
+
+#define S_NTFY_ETHHDR_LEN 27
+#define M_NTFY_ETHHDR_LEN 0x1F
+#define V_NTFY_ETHHDR_LEN(x) ((x) << S_NTFY_ETHHDR_LEN)
+#define G_NTFY_ETHHDR_LEN(x) (((x) >> S_NTFY_ETHHDR_LEN) & M_NTFY_ETHHDR_LEN)
+
+struct cpl_rdma_terminate {
+ RSS_HDR
+ union opcode_tid ot;
+ __be16 rsvd;
+ __be16 len;
+};
+
+struct cpl_set_le_req {
+ WR_HDR;
+ union opcode_tid ot;
+ __be16 reply_ctrl;
+ __be16 params;
+ __be64 mask_hi;
+ __be64 mask_lo;
+ __be64 val_hi;
+ __be64 val_lo;
+};
+
+/* cpl_set_le_req.reply_ctrl additional fields */
+#define S_LE_REQ_IP6 13
+#define V_LE_REQ_IP6(x) ((x) << S_LE_REQ_IP6)
+#define F_LE_REQ_IP6 V_LE_REQ_IP6(1U)
+
+/* cpl_set_le_req.params fields */
+#define S_LE_CHAN 0
+#define M_LE_CHAN 0x3
+#define V_LE_CHAN(x) ((x) << S_LE_CHAN)
+#define G_LE_CHAN(x) (((x) >> S_LE_CHAN) & M_LE_CHAN)
+
+#define S_LE_OFFSET 5
+#define M_LE_OFFSET 0x7
+#define V_LE_OFFSET(x) ((x) << S_LE_OFFSET)
+#define G_LE_OFFSET(x) (((x) >> S_LE_OFFSET) & M_LE_OFFSET)
+
+#define S_LE_MORE 8
+#define V_LE_MORE(x) ((x) << S_LE_MORE)
+#define F_LE_MORE V_LE_MORE(1U)
+
+#define S_LE_REQSIZE 9
+#define M_LE_REQSIZE 0x7
+#define V_LE_REQSIZE(x) ((x) << S_LE_REQSIZE)
+#define G_LE_REQSIZE(x) (((x) >> S_LE_REQSIZE) & M_LE_REQSIZE)
+
+#define S_LE_REQCMD 12
+#define M_LE_REQCMD 0xF
+#define V_LE_REQCMD(x) ((x) << S_LE_REQCMD)
+#define G_LE_REQCMD(x) (((x) >> S_LE_REQCMD) & M_LE_REQCMD)
+
+struct cpl_set_le_rpl {
+ RSS_HDR
+ union opcode_tid ot;
+ __u8 chan;
+ __u8 info;
+ __be16 len;
+};
+
+/* cpl_set_le_rpl.info fields */
+#define S_LE_RSPCMD 0
+#define M_LE_RSPCMD 0xF
+#define V_LE_RSPCMD(x) ((x) << S_LE_RSPCMD)
+#define G_LE_RSPCMD(x) (((x) >> S_LE_RSPCMD) & M_LE_RSPCMD)
+
+#define S_LE_RSPSIZE 4
+#define M_LE_RSPSIZE 0x7
+#define V_LE_RSPSIZE(x) ((x) << S_LE_RSPSIZE)
+#define G_LE_RSPSIZE(x) (((x) >> S_LE_RSPSIZE) & M_LE_RSPSIZE)
+
+#define S_LE_RSPTYPE 7
+#define V_LE_RSPTYPE(x) ((x) << S_LE_RSPTYPE)
+#define F_LE_RSPTYPE V_LE_RSPTYPE(1U)
+
+struct cpl_sge_egr_update {
+ RSS_HDR
+ __be32 opcode_qid;
+ __be16 cidx;
+ __be16 pidx;
+};
+
+/* cpl_sge_egr_update.ot fields */
+#define S_EGR_QID 0
+#define M_EGR_QID 0x1FFFF
+#define V_EGR_QID(x) ((x) << S_EGR_QID)
+#define G_EGR_QID(x) (((x) >> S_EGR_QID) & M_EGR_QID)
+
+struct cpl_fw2_pld {
+ RSS_HDR
+ u8 opcode;
+ u8 rsvd[5];
+ __be16 len;
+};
+
+struct cpl_fw4_pld {
+ RSS_HDR
+ u8 opcode;
+ u8 rsvd0[3];
+ u8 type;
+ u8 rsvd1;
+ __be16 len;
+ __be64 data;
+ __be64 rsvd2;
+};
+
+struct cpl_fw6_pld {
+ RSS_HDR
+ u8 opcode;
+ u8 rsvd[5];
+ __be16 len;
+ __be64 data[4];
+};
+
+struct cpl_fw2_msg {
+ RSS_HDR
+ union opcode_info oi;
+};
+
+struct cpl_fw4_msg {
+ RSS_HDR
+ u8 opcode;
+ u8 type;
+ __be16 rsvd0;
+ __be32 rsvd1;
+ __be64 data[2];
+};
+
+struct cpl_fw4_ack {
+ RSS_HDR
+ union opcode_tid ot;
+ u8 credits;
+ u8 rsvd0[2];
+ u8 seq_vld;
+ __be32 snd_nxt;
+ __be32 snd_una;
+ __be64 rsvd1;
+};
+
+struct cpl_fw6_msg {
+ RSS_HDR
+ u8 opcode;
+ u8 type;
+ __be16 rsvd0;
+ __be32 rsvd1;
+ __be64 data[4];
+};
+
+/* cpl_fw6_msg.type values */
+enum {
+ FW6_TYPE_CMD_RPL = 0,
+};
+
+/* ULP_TX opcodes */
+enum {
+ ULP_TX_MEM_READ = 2,
+ ULP_TX_MEM_WRITE = 3,
+ ULP_TX_PKT = 4
+};
+
+enum {
+ ULP_TX_SC_NOOP = 0x80,
+ ULP_TX_SC_IMM = 0x81,
+ ULP_TX_SC_DSGL = 0x82,
+ ULP_TX_SC_ISGL = 0x83
+};
+
+#define S_ULPTX_CMD 24
+#define M_ULPTX_CMD 0xFF
+#define V_ULPTX_CMD(x) ((x) << S_ULPTX_CMD)
+
+#define S_ULPTX_LEN16 0
+#define M_ULPTX_LEN16 0xFF
+#define V_ULPTX_LEN16(x) ((x) << S_ULPTX_LEN16)
+
+#define S_ULP_TX_SC_MORE 23
+#define V_ULP_TX_SC_MORE(x) ((x) << S_ULP_TX_SC_MORE)
+#define F_ULP_TX_SC_MORE V_ULP_TX_SC_MORE(1U)
+
+struct ulptx_sge_pair {
+ __be32 len[2];
+ __be64 addr[2];
+};
+
+struct ulptx_sgl {
+ __be32 cmd_nsge;
+ __be32 len0;
+ __be64 addr0;
+#if !(defined C99_NOT_SUPPORTED)
+ struct ulptx_sge_pair sge[0];
+#endif
+};
+
+struct ulptx_isge {
+ __be32 stag;
+ __be32 len;
+ __be64 target_ofst;
+};
+
+struct ulptx_isgl {
+ __be32 cmd_nisge;
+ __be32 rsvd;
+#if !(defined C99_NOT_SUPPORTED)
+ struct ulptx_isge sge[0];
+#endif
+};
+
+struct ulptx_idata {
+ __be32 cmd_more;
+ __be32 len;
+};
+
+#define S_ULPTX_NSGE 0
+#define M_ULPTX_NSGE 0xFFFF
+#define V_ULPTX_NSGE(x) ((x) << S_ULPTX_NSGE)
+
+struct ulp_mem_io {
+ WR_HDR;
+ __be32 cmd;
+ __be32 len16; /* command length */
+ __be32 dlen; /* data length in 32-byte units */
+ __be32 lock_addr;
+};
+
+/* additional ulp_mem_io.cmd fields */
+#define S_ULP_MEMIO_ORDER 23
+#define V_ULP_MEMIO_ORDER(x) ((x) << S_ULP_MEMIO_ORDER)
+#define F_ULP_MEMIO_ORDER V_ULP_MEMIO_ORDER(1U)
+
+/* ulp_mem_io.lock_addr fields */
+#define S_ULP_MEMIO_ADDR 0
+#define M_ULP_MEMIO_ADDR 0x7FFFFFF
+#define V_ULP_MEMIO_ADDR(x) ((x) << S_ULP_MEMIO_ADDR)
+
+#define S_ULP_MEMIO_LOCK 31
+#define V_ULP_MEMIO_LOCK(x) ((x) << S_ULP_MEMIO_LOCK)
+#define F_ULP_MEMIO_LOCK V_ULP_MEMIO_LOCK(1U)
+
+/* ulp_mem_io.dlen fields */
+#define S_ULP_MEMIO_DATA_LEN 0
+#define M_ULP_MEMIO_DATA_LEN 0x1F
+#define V_ULP_MEMIO_DATA_LEN(x) ((x) << S_ULP_MEMIO_DATA_LEN)
+
+struct ulp_txpkt {
+ __be32 cmd_dest;
+ __be32 len;
+};
+
+/* ulp_txpkt.cmd_dest fields */
+#define S_ULP_TXPKT_DEST 16
+#define M_ULP_TXPKT_DEST 0x3
+#define V_ULP_TXPKT_DEST(x) ((x) << S_ULP_TXPKT_DEST)
+
+#define S_ULP_TXPKT_FID 4
+#define M_ULP_TXPKT_FID 0x7ff
+#define V_ULP_TXPKT_FID(x) ((x) << S_ULP_TXPKT_FID)
+
+#endif /* T4_MSG_H */
diff --git a/sys/dev/cxgbe/common/t4_regs.h b/sys/dev/cxgbe/common/t4_regs.h
new file mode 100644
index 0000000..dfe733b
--- /dev/null
+++ b/sys/dev/cxgbe/common/t4_regs.h
@@ -0,0 +1,23972 @@
+/*-
+ * Copyright (c) 2011 Chelsio Communications, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ *
+ */
+
+/* This file is automatically generated --- changes will be lost */
+
+#define MYPF_BASE 0x1b000
+#define MYPF_REG(reg_addr) (MYPF_BASE + (reg_addr))
+
+#define PF0_BASE 0x1e000
+#define PF0_REG(reg_addr) (PF0_BASE + (reg_addr))
+
+#define PF1_BASE 0x1e400
+#define PF1_REG(reg_addr) (PF1_BASE + (reg_addr))
+
+#define PF2_BASE 0x1e800
+#define PF2_REG(reg_addr) (PF2_BASE + (reg_addr))
+
+#define PF3_BASE 0x1ec00
+#define PF3_REG(reg_addr) (PF3_BASE + (reg_addr))
+
+#define PF4_BASE 0x1f000
+#define PF4_REG(reg_addr) (PF4_BASE + (reg_addr))
+
+#define PF5_BASE 0x1f400
+#define PF5_REG(reg_addr) (PF5_BASE + (reg_addr))
+
+#define PF6_BASE 0x1f800
+#define PF6_REG(reg_addr) (PF6_BASE + (reg_addr))
+
+#define PF7_BASE 0x1fc00
+#define PF7_REG(reg_addr) (PF7_BASE + (reg_addr))
+
+#define PF_STRIDE 0x400
+#define PF_BASE(idx) (PF0_BASE + (idx) * PF_STRIDE)
+#define PF_REG(idx, reg) (PF_BASE(idx) + (reg))
+
+#define MYPORT_BASE 0x1c000
+#define MYPORT_REG(reg_addr) (MYPORT_BASE + (reg_addr))
+
+#define PORT0_BASE 0x20000
+#define PORT0_REG(reg_addr) (PORT0_BASE + (reg_addr))
+
+#define PORT1_BASE 0x22000
+#define PORT1_REG(reg_addr) (PORT1_BASE + (reg_addr))
+
+#define PORT2_BASE 0x24000
+#define PORT2_REG(reg_addr) (PORT2_BASE + (reg_addr))
+
+#define PORT3_BASE 0x26000
+#define PORT3_REG(reg_addr) (PORT3_BASE + (reg_addr))
+
+#define PORT_STRIDE 0x2000
+#define PORT_BASE(idx) (PORT0_BASE + (idx) * PORT_STRIDE)
+#define PORT_REG(idx, reg) (PORT_BASE(idx) + (reg))
+
+#define VF_SGE_BASE 0x0
+#define VF_SGE_REG(reg_addr) (VF_SGE_BASE + (reg_addr))
+
+#define VF_MPS_BASE 0x100
+#define VF_MPS_REG(reg_addr) (VF_MPS_BASE + (reg_addr))
+
+#define VF_PL_BASE 0x200
+#define VF_PL_REG(reg_addr) (VF_PL_BASE + (reg_addr))
+
+#define VF_MBDATA_BASE 0x240
+#define VF_MBDATA_REG(reg_addr) (VF_MBDATA_BASE + (reg_addr))
+
+#define VF_CIM_BASE 0x300
+#define VF_CIM_REG(reg_addr) (VF_CIM_BASE + (reg_addr))
+
+#define EDC_STRIDE (EDC_1_BASE_ADDR - EDC_0_BASE_ADDR)
+#define EDC_REG(reg, idx) (reg + EDC_STRIDE * idx)
+
+#define SGE_QUEUE_BASE_MAP_HIGH(idx) (A_SGE_QUEUE_BASE_MAP_HIGH + (idx) * 8)
+#define NUM_SGE_QUEUE_BASE_MAP_HIGH_INSTANCES 136
+
+#define SGE_QUEUE_BASE_MAP_LOW(idx) (A_SGE_QUEUE_BASE_MAP_LOW + (idx) * 8)
+#define NUM_SGE_QUEUE_BASE_MAP_LOW_INSTANCES 136
+
+#define PCIE_DMA_REG(reg_addr, idx) ((reg_addr) + (idx) * 8)
+#define NUM_PCIE_DMA_INSTANCES 4
+
+#define PCIE_CMD_REG(reg_addr, idx) ((reg_addr) + (idx) * 8)
+#define NUM_PCIE_CMD_INSTANCES 2
+
+#define PCIE_HMA_REG(reg_addr, idx) ((reg_addr) + (idx) * 8)
+#define NUM_PCIE_HMA_INSTANCES 1
+
+#define PCIE_MEM_ACCESS_REG(reg_addr, idx) ((reg_addr) + (idx) * 8)
+#define NUM_PCIE_MEM_ACCESS_INSTANCES 8
+
+#define PCIE_MAILBOX_REG(reg_addr, idx) ((reg_addr) + (idx) * 8)
+#define NUM_PCIE_MAILBOX_INSTANCES 1
+
+#define PCIE_FW_REG(reg_addr, idx) ((reg_addr) + (idx) * 4)
+#define NUM_PCIE_FW_INSTANCES 8
+
+#define PCIE_FUNC_REG(reg_addr, idx) ((reg_addr) + (idx) * 8)
+#define NUM_PCIE_FUNC_INSTANCES 256
+
+#define PCIE_FID(idx) (A_PCIE_FID + (idx) * 4)
+#define NUM_PCIE_FID_INSTANCES 2048
+
+#define PCIE_DMA_BUF_REG(reg_addr, idx) ((reg_addr) + (idx) * 8)
+#define NUM_PCIE_DMA_BUF_INSTANCES 4
+
+#define MC_DDR3PHYDATX8_REG(reg_addr, idx) ((reg_addr) + (idx) * 256)
+#define NUM_MC_DDR3PHYDATX8_INSTANCES 9
+
+#define MC_BIST_STATUS_REG(reg_addr, idx) ((reg_addr) + (idx) * 4)
+#define NUM_MC_BIST_STATUS_INSTANCES 18
+
+#define EDC_BIST_STATUS_REG(reg_addr, idx) ((reg_addr) + (idx) * 4)
+#define NUM_EDC_BIST_STATUS_INSTANCES 18
+
+#define CIM_PF_MAILBOX_DATA(idx) (A_CIM_PF_MAILBOX_DATA + (idx) * 4)
+#define NUM_CIM_PF_MAILBOX_DATA_INSTANCES 16
+
+#define MPS_TRC_FILTER_MATCH_CTL_A(idx) (A_MPS_TRC_FILTER_MATCH_CTL_A + (idx) * 4)
+#define NUM_MPS_TRC_FILTER_MATCH_CTL_A_INSTANCES 4
+
+#define MPS_TRC_FILTER_MATCH_CTL_B(idx) (A_MPS_TRC_FILTER_MATCH_CTL_B + (idx) * 4)
+#define NUM_MPS_TRC_FILTER_MATCH_CTL_B_INSTANCES 4
+
+#define MPS_TRC_FILTER_RUNT_CTL(idx) (A_MPS_TRC_FILTER_RUNT_CTL + (idx) * 4)
+#define NUM_MPS_TRC_FILTER_RUNT_CTL_INSTANCES 4
+
+#define MPS_TRC_FILTER_DROP(idx) (A_MPS_TRC_FILTER_DROP + (idx) * 4)
+#define NUM_MPS_TRC_FILTER_DROP_INSTANCES 4
+
+#define MPS_TRC_FILTER0_MATCH(idx) (A_MPS_TRC_FILTER0_MATCH + (idx) * 4)
+#define NUM_MPS_TRC_FILTER0_MATCH_INSTANCES 28
+
+#define MPS_TRC_FILTER0_DONT_CARE(idx) (A_MPS_TRC_FILTER0_DONT_CARE + (idx) * 4)
+#define NUM_MPS_TRC_FILTER0_DONT_CARE_INSTANCES 28
+
+#define MPS_TRC_FILTER1_MATCH(idx) (A_MPS_TRC_FILTER1_MATCH + (idx) * 4)
+#define NUM_MPS_TRC_FILTER1_MATCH_INSTANCES 28
+
+#define MPS_TRC_FILTER1_DONT_CARE(idx) (A_MPS_TRC_FILTER1_DONT_CARE + (idx) * 4)
+#define NUM_MPS_TRC_FILTER1_DONT_CARE_INSTANCES 28
+
+#define MPS_TRC_FILTER2_MATCH(idx) (A_MPS_TRC_FILTER2_MATCH + (idx) * 4)
+#define NUM_MPS_TRC_FILTER2_MATCH_INSTANCES 28
+
+#define MPS_TRC_FILTER2_DONT_CARE(idx) (A_MPS_TRC_FILTER2_DONT_CARE + (idx) * 4)
+#define NUM_MPS_TRC_FILTER2_DONT_CARE_INSTANCES 28
+
+#define MPS_TRC_FILTER3_MATCH(idx) (A_MPS_TRC_FILTER3_MATCH + (idx) * 4)
+#define NUM_MPS_TRC_FILTER3_MATCH_INSTANCES 28
+
+#define MPS_TRC_FILTER3_DONT_CARE(idx) (A_MPS_TRC_FILTER3_DONT_CARE + (idx) * 4)
+#define NUM_MPS_TRC_FILTER3_DONT_CARE_INSTANCES 28
+
+#define MPS_PORT_CLS_HASH_SRAM(idx) (A_MPS_PORT_CLS_HASH_SRAM + (idx) * 4)
+#define NUM_MPS_PORT_CLS_HASH_SRAM_INSTANCES 65
+
+#define MPS_CLS_VLAN_TABLE(idx) (A_MPS_CLS_VLAN_TABLE + (idx) * 4)
+#define NUM_MPS_CLS_VLAN_TABLE_INSTANCES 9
+
+#define MPS_CLS_SRAM_L(idx) (A_MPS_CLS_SRAM_L + (idx) * 8)
+#define NUM_MPS_CLS_SRAM_L_INSTANCES 336
+
+#define MPS_CLS_SRAM_H(idx) (A_MPS_CLS_SRAM_H + (idx) * 8)
+#define NUM_MPS_CLS_SRAM_H_INSTANCES 336
+
+#define MPS_CLS_TCAM_Y_L(idx) (A_MPS_CLS_TCAM_Y_L + (idx) * 16)
+#define NUM_MPS_CLS_TCAM_Y_L_INSTANCES 512
+
+#define MPS_CLS_TCAM_Y_H(idx) (A_MPS_CLS_TCAM_Y_H + (idx) * 16)
+#define NUM_MPS_CLS_TCAM_Y_H_INSTANCES 512
+
+#define MPS_CLS_TCAM_X_L(idx) (A_MPS_CLS_TCAM_X_L + (idx) * 16)
+#define NUM_MPS_CLS_TCAM_X_L_INSTANCES 512
+
+#define MPS_CLS_TCAM_X_H(idx) (A_MPS_CLS_TCAM_X_H + (idx) * 16)
+#define NUM_MPS_CLS_TCAM_X_H_INSTANCES 512
+
+#define PL_SEMAPHORE_LOCK(idx) (A_PL_SEMAPHORE_LOCK + (idx) * 4)
+#define NUM_PL_SEMAPHORE_LOCK_INSTANCES 8
+
+#define PL_VF_SLICE_L(idx) (A_PL_VF_SLICE_L + (idx) * 8)
+#define NUM_PL_VF_SLICE_L_INSTANCES 8
+
+#define PL_VF_SLICE_H(idx) (A_PL_VF_SLICE_H + (idx) * 8)
+#define NUM_PL_VF_SLICE_H_INSTANCES 8
+
+#define PL_FLR_VF_STATUS(idx) (A_PL_FLR_VF_STATUS + (idx) * 4)
+#define NUM_PL_FLR_VF_STATUS_INSTANCES 4
+
+#define PL_VFID_MAP(idx) (A_PL_VFID_MAP + (idx) * 4)
+#define NUM_PL_VFID_MAP_INSTANCES 256
+
+#define LE_DB_MASK_IPV4(idx) (A_LE_DB_MASK_IPV4 + (idx) * 4)
+#define NUM_LE_DB_MASK_IPV4_INSTANCES 17
+
+#define LE_DB_MASK_IPV6(idx) (A_LE_DB_MASK_IPV6 + (idx) * 4)
+#define NUM_LE_DB_MASK_IPV6_INSTANCES 17
+
+#define LE_DB_DBGI_REQ_DATA(idx) (A_LE_DB_DBGI_REQ_DATA + (idx) * 4)
+#define NUM_LE_DB_DBGI_REQ_DATA_INSTANCES 17
+
+#define LE_DB_DBGI_REQ_MASK(idx) (A_LE_DB_DBGI_REQ_MASK + (idx) * 4)
+#define NUM_LE_DB_DBGI_REQ_MASK_INSTANCES 17
+
+#define LE_DB_DBGI_RSP_DATA(idx) (A_LE_DB_DBGI_RSP_DATA + (idx) * 4)
+#define NUM_LE_DB_DBGI_RSP_DATA_INSTANCES 17
+
+#define LE_DB_ACTIVE_MASK_IPV4(idx) (A_LE_DB_ACTIVE_MASK_IPV4 + (idx) * 4)
+#define NUM_LE_DB_ACTIVE_MASK_IPV4_INSTANCES 17
+
+#define LE_DB_ACTIVE_MASK_IPV6(idx) (A_LE_DB_ACTIVE_MASK_IPV6 + (idx) * 4)
+#define NUM_LE_DB_ACTIVE_MASK_IPV6_INSTANCES 17
+
+#define LE_HASH_MASK_GEN_IPV4(idx) (A_LE_HASH_MASK_GEN_IPV4 + (idx) * 4)
+#define NUM_LE_HASH_MASK_GEN_IPV4_INSTANCES 4
+
+#define LE_HASH_MASK_GEN_IPV6(idx) (A_LE_HASH_MASK_GEN_IPV6 + (idx) * 4)
+#define NUM_LE_HASH_MASK_GEN_IPV6_INSTANCES 12
+
+#define LE_HASH_MASK_CMP_IPV4(idx) (A_LE_HASH_MASK_CMP_IPV4 + (idx) * 4)
+#define NUM_LE_HASH_MASK_CMP_IPV4_INSTANCES 4
+
+#define LE_HASH_MASK_CMP_IPV6(idx) (A_LE_HASH_MASK_CMP_IPV6 + (idx) * 4)
+#define NUM_LE_HASH_MASK_CMP_IPV6_INSTANCES 12
+
+#define UP_TSCH_CHANNEL_REG(reg_addr, idx) ((reg_addr) + (idx) * 16)
+#define NUM_UP_TSCH_CHANNEL_INSTANCES 4
+
+#define CIM_CTL_MAILBOX_VF_STATUS(idx) (A_CIM_CTL_MAILBOX_VF_STATUS + (idx) * 4)
+#define NUM_CIM_CTL_MAILBOX_VF_STATUS_INSTANCES 4
+
+#define CIM_CTL_MAILBOX_VFN_CTL(idx) (A_CIM_CTL_MAILBOX_VFN_CTL + (idx) * 16)
+#define NUM_CIM_CTL_MAILBOX_VFN_CTL_INSTANCES 128
+
+#define CIM_CTL_TSCH_CHANNEL_REG(reg_addr, idx) ((reg_addr) + (idx) * 288)
+#define NUM_CIM_CTL_TSCH_CHANNEL_INSTANCES 4
+
+#define CIM_CTL_TSCH_CHANNEL_TSCH_CLASS_REG(reg_addr, idx) ((reg_addr) + (idx) * 16)
+#define NUM_CIM_CTL_TSCH_CHANNEL_TSCH_CLASS_INSTANCES 16
+
+/* registers for module SGE */
+#define SGE_BASE_ADDR 0x1000
+
+#define A_SGE_PF_KDOORBELL 0x0
+
+#define S_QID 15
+#define M_QID 0x1ffffU
+#define V_QID(x) ((x) << S_QID)
+#define G_QID(x) (((x) >> S_QID) & M_QID)
+
+#define S_DBPRIO 14
+#define V_DBPRIO(x) ((x) << S_DBPRIO)
+#define F_DBPRIO V_DBPRIO(1U)
+
+#define S_PIDX 0
+#define M_PIDX 0x3fffU
+#define V_PIDX(x) ((x) << S_PIDX)
+#define G_PIDX(x) (((x) >> S_PIDX) & M_PIDX)
+
+#define A_SGE_VF_KDOORBELL 0x0
+#define A_SGE_PF_GTS 0x4
+
+#define S_INGRESSQID 16
+#define M_INGRESSQID 0xffffU
+#define V_INGRESSQID(x) ((x) << S_INGRESSQID)
+#define G_INGRESSQID(x) (((x) >> S_INGRESSQID) & M_INGRESSQID)
+
+#define S_TIMERREG 13
+#define M_TIMERREG 0x7U
+#define V_TIMERREG(x) ((x) << S_TIMERREG)
+#define G_TIMERREG(x) (((x) >> S_TIMERREG) & M_TIMERREG)
+
+#define S_SEINTARM 12
+#define V_SEINTARM(x) ((x) << S_SEINTARM)
+#define F_SEINTARM V_SEINTARM(1U)
+
+#define S_CIDXINC 0
+#define M_CIDXINC 0xfffU
+#define V_CIDXINC(x) ((x) << S_CIDXINC)
+#define G_CIDXINC(x) (((x) >> S_CIDXINC) & M_CIDXINC)
+
+#define A_SGE_VF_GTS 0x4
+#define A_SGE_CONTROL 0x1008
+
+#define S_IGRALLCPLTOFL 31
+#define V_IGRALLCPLTOFL(x) ((x) << S_IGRALLCPLTOFL)
+#define F_IGRALLCPLTOFL V_IGRALLCPLTOFL(1U)
+
+#define S_FLSPLITMIN 22
+#define M_FLSPLITMIN 0x1ffU
+#define V_FLSPLITMIN(x) ((x) << S_FLSPLITMIN)
+#define G_FLSPLITMIN(x) (((x) >> S_FLSPLITMIN) & M_FLSPLITMIN)
+
+#define S_FLSPLITMODE 20
+#define M_FLSPLITMODE 0x3U
+#define V_FLSPLITMODE(x) ((x) << S_FLSPLITMODE)
+#define G_FLSPLITMODE(x) (((x) >> S_FLSPLITMODE) & M_FLSPLITMODE)
+
+#define S_DCASYSTYPE 19
+#define V_DCASYSTYPE(x) ((x) << S_DCASYSTYPE)
+#define F_DCASYSTYPE V_DCASYSTYPE(1U)
+
+#define S_RXPKTCPLMODE 18
+#define V_RXPKTCPLMODE(x) ((x) << S_RXPKTCPLMODE)
+#define F_RXPKTCPLMODE V_RXPKTCPLMODE(1U)
+
+#define S_EGRSTATUSPAGESIZE 17
+#define V_EGRSTATUSPAGESIZE(x) ((x) << S_EGRSTATUSPAGESIZE)
+#define F_EGRSTATUSPAGESIZE V_EGRSTATUSPAGESIZE(1U)
+
+#define S_INGHINTENABLE1 15
+#define V_INGHINTENABLE1(x) ((x) << S_INGHINTENABLE1)
+#define F_INGHINTENABLE1 V_INGHINTENABLE1(1U)
+
+#define S_INGHINTENABLE0 14
+#define V_INGHINTENABLE0(x) ((x) << S_INGHINTENABLE0)
+#define F_INGHINTENABLE0 V_INGHINTENABLE0(1U)
+
+#define S_INGINTCOMPAREIDX 13
+#define V_INGINTCOMPAREIDX(x) ((x) << S_INGINTCOMPAREIDX)
+#define F_INGINTCOMPAREIDX V_INGINTCOMPAREIDX(1U)
+
+#define S_PKTSHIFT 10
+#define M_PKTSHIFT 0x7U
+#define V_PKTSHIFT(x) ((x) << S_PKTSHIFT)
+#define G_PKTSHIFT(x) (((x) >> S_PKTSHIFT) & M_PKTSHIFT)
+
+#define S_INGPCIEBOUNDARY 7
+#define M_INGPCIEBOUNDARY 0x7U
+#define V_INGPCIEBOUNDARY(x) ((x) << S_INGPCIEBOUNDARY)
+#define G_INGPCIEBOUNDARY(x) (((x) >> S_INGPCIEBOUNDARY) & M_INGPCIEBOUNDARY)
+
+#define S_INGPADBOUNDARY 4
+#define M_INGPADBOUNDARY 0x7U
+#define V_INGPADBOUNDARY(x) ((x) << S_INGPADBOUNDARY)
+#define G_INGPADBOUNDARY(x) (((x) >> S_INGPADBOUNDARY) & M_INGPADBOUNDARY)
+
+#define S_EGRPCIEBOUNDARY 1
+#define M_EGRPCIEBOUNDARY 0x7U
+#define V_EGRPCIEBOUNDARY(x) ((x) << S_EGRPCIEBOUNDARY)
+#define G_EGRPCIEBOUNDARY(x) (((x) >> S_EGRPCIEBOUNDARY) & M_EGRPCIEBOUNDARY)
+
+#define S_GLOBALENABLE 0
+#define V_GLOBALENABLE(x) ((x) << S_GLOBALENABLE)
+#define F_GLOBALENABLE V_GLOBALENABLE(1U)
+
+#define A_SGE_HOST_PAGE_SIZE 0x100c
+
+#define S_HOSTPAGESIZEPF7 28
+#define M_HOSTPAGESIZEPF7 0xfU
+#define V_HOSTPAGESIZEPF7(x) ((x) << S_HOSTPAGESIZEPF7)
+#define G_HOSTPAGESIZEPF7(x) (((x) >> S_HOSTPAGESIZEPF7) & M_HOSTPAGESIZEPF7)
+
+#define S_HOSTPAGESIZEPF6 24
+#define M_HOSTPAGESIZEPF6 0xfU
+#define V_HOSTPAGESIZEPF6(x) ((x) << S_HOSTPAGESIZEPF6)
+#define G_HOSTPAGESIZEPF6(x) (((x) >> S_HOSTPAGESIZEPF6) & M_HOSTPAGESIZEPF6)
+
+#define S_HOSTPAGESIZEPF5 20
+#define M_HOSTPAGESIZEPF5 0xfU
+#define V_HOSTPAGESIZEPF5(x) ((x) << S_HOSTPAGESIZEPF5)
+#define G_HOSTPAGESIZEPF5(x) (((x) >> S_HOSTPAGESIZEPF5) & M_HOSTPAGESIZEPF5)
+
+#define S_HOSTPAGESIZEPF4 16
+#define M_HOSTPAGESIZEPF4 0xfU
+#define V_HOSTPAGESIZEPF4(x) ((x) << S_HOSTPAGESIZEPF4)
+#define G_HOSTPAGESIZEPF4(x) (((x) >> S_HOSTPAGESIZEPF4) & M_HOSTPAGESIZEPF4)
+
+#define S_HOSTPAGESIZEPF3 12
+#define M_HOSTPAGESIZEPF3 0xfU
+#define V_HOSTPAGESIZEPF3(x) ((x) << S_HOSTPAGESIZEPF3)
+#define G_HOSTPAGESIZEPF3(x) (((x) >> S_HOSTPAGESIZEPF3) & M_HOSTPAGESIZEPF3)
+
+#define S_HOSTPAGESIZEPF2 8
+#define M_HOSTPAGESIZEPF2 0xfU
+#define V_HOSTPAGESIZEPF2(x) ((x) << S_HOSTPAGESIZEPF2)
+#define G_HOSTPAGESIZEPF2(x) (((x) >> S_HOSTPAGESIZEPF2) & M_HOSTPAGESIZEPF2)
+
+#define S_HOSTPAGESIZEPF1 4
+#define M_HOSTPAGESIZEPF1 0xfU
+#define V_HOSTPAGESIZEPF1(x) ((x) << S_HOSTPAGESIZEPF1)
+#define G_HOSTPAGESIZEPF1(x) (((x) >> S_HOSTPAGESIZEPF1) & M_HOSTPAGESIZEPF1)
+
+#define S_HOSTPAGESIZEPF0 0
+#define M_HOSTPAGESIZEPF0 0xfU
+#define V_HOSTPAGESIZEPF0(x) ((x) << S_HOSTPAGESIZEPF0)
+#define G_HOSTPAGESIZEPF0(x) (((x) >> S_HOSTPAGESIZEPF0) & M_HOSTPAGESIZEPF0)
+
+#define A_SGE_EGRESS_QUEUES_PER_PAGE_PF 0x1010
+
+#define S_QUEUESPERPAGEPF7 28
+#define M_QUEUESPERPAGEPF7 0xfU
+#define V_QUEUESPERPAGEPF7(x) ((x) << S_QUEUESPERPAGEPF7)
+#define G_QUEUESPERPAGEPF7(x) (((x) >> S_QUEUESPERPAGEPF7) & M_QUEUESPERPAGEPF7)
+
+#define S_QUEUESPERPAGEPF6 24
+#define M_QUEUESPERPAGEPF6 0xfU
+#define V_QUEUESPERPAGEPF6(x) ((x) << S_QUEUESPERPAGEPF6)
+#define G_QUEUESPERPAGEPF6(x) (((x) >> S_QUEUESPERPAGEPF6) & M_QUEUESPERPAGEPF6)
+
+#define S_QUEUESPERPAGEPF5 20
+#define M_QUEUESPERPAGEPF5 0xfU
+#define V_QUEUESPERPAGEPF5(x) ((x) << S_QUEUESPERPAGEPF5)
+#define G_QUEUESPERPAGEPF5(x) (((x) >> S_QUEUESPERPAGEPF5) & M_QUEUESPERPAGEPF5)
+
+#define S_QUEUESPERPAGEPF4 16
+#define M_QUEUESPERPAGEPF4 0xfU
+#define V_QUEUESPERPAGEPF4(x) ((x) << S_QUEUESPERPAGEPF4)
+#define G_QUEUESPERPAGEPF4(x) (((x) >> S_QUEUESPERPAGEPF4) & M_QUEUESPERPAGEPF4)
+
+#define S_QUEUESPERPAGEPF3 12
+#define M_QUEUESPERPAGEPF3 0xfU
+#define V_QUEUESPERPAGEPF3(x) ((x) << S_QUEUESPERPAGEPF3)
+#define G_QUEUESPERPAGEPF3(x) (((x) >> S_QUEUESPERPAGEPF3) & M_QUEUESPERPAGEPF3)
+
+#define S_QUEUESPERPAGEPF2 8
+#define M_QUEUESPERPAGEPF2 0xfU
+#define V_QUEUESPERPAGEPF2(x) ((x) << S_QUEUESPERPAGEPF2)
+#define G_QUEUESPERPAGEPF2(x) (((x) >> S_QUEUESPERPAGEPF2) & M_QUEUESPERPAGEPF2)
+
+#define S_QUEUESPERPAGEPF1 4
+#define M_QUEUESPERPAGEPF1 0xfU
+#define V_QUEUESPERPAGEPF1(x) ((x) << S_QUEUESPERPAGEPF1)
+#define G_QUEUESPERPAGEPF1(x) (((x) >> S_QUEUESPERPAGEPF1) & M_QUEUESPERPAGEPF1)
+
+#define S_QUEUESPERPAGEPF0 0
+#define M_QUEUESPERPAGEPF0 0xfU
+#define V_QUEUESPERPAGEPF0(x) ((x) << S_QUEUESPERPAGEPF0)
+#define G_QUEUESPERPAGEPF0(x) (((x) >> S_QUEUESPERPAGEPF0) & M_QUEUESPERPAGEPF0)
+
+#define A_SGE_EGRESS_QUEUES_PER_PAGE_VF 0x1014
+
+#define S_QUEUESPERPAGEVFPF7 28
+#define M_QUEUESPERPAGEVFPF7 0xfU
+#define V_QUEUESPERPAGEVFPF7(x) ((x) << S_QUEUESPERPAGEVFPF7)
+#define G_QUEUESPERPAGEVFPF7(x) (((x) >> S_QUEUESPERPAGEVFPF7) & M_QUEUESPERPAGEVFPF7)
+
+#define S_QUEUESPERPAGEVFPF6 24
+#define M_QUEUESPERPAGEVFPF6 0xfU
+#define V_QUEUESPERPAGEVFPF6(x) ((x) << S_QUEUESPERPAGEVFPF6)
+#define G_QUEUESPERPAGEVFPF6(x) (((x) >> S_QUEUESPERPAGEVFPF6) & M_QUEUESPERPAGEVFPF6)
+
+#define S_QUEUESPERPAGEVFPF5 20
+#define M_QUEUESPERPAGEVFPF5 0xfU
+#define V_QUEUESPERPAGEVFPF5(x) ((x) << S_QUEUESPERPAGEVFPF5)
+#define G_QUEUESPERPAGEVFPF5(x) (((x) >> S_QUEUESPERPAGEVFPF5) & M_QUEUESPERPAGEVFPF5)
+
+#define S_QUEUESPERPAGEVFPF4 16
+#define M_QUEUESPERPAGEVFPF4 0xfU
+#define V_QUEUESPERPAGEVFPF4(x) ((x) << S_QUEUESPERPAGEVFPF4)
+#define G_QUEUESPERPAGEVFPF4(x) (((x) >> S_QUEUESPERPAGEVFPF4) & M_QUEUESPERPAGEVFPF4)
+
+#define S_QUEUESPERPAGEVFPF3 12
+#define M_QUEUESPERPAGEVFPF3 0xfU
+#define V_QUEUESPERPAGEVFPF3(x) ((x) << S_QUEUESPERPAGEVFPF3)
+#define G_QUEUESPERPAGEVFPF3(x) (((x) >> S_QUEUESPERPAGEVFPF3) & M_QUEUESPERPAGEVFPF3)
+
+#define S_QUEUESPERPAGEVFPF2 8
+#define M_QUEUESPERPAGEVFPF2 0xfU
+#define V_QUEUESPERPAGEVFPF2(x) ((x) << S_QUEUESPERPAGEVFPF2)
+#define G_QUEUESPERPAGEVFPF2(x) (((x) >> S_QUEUESPERPAGEVFPF2) & M_QUEUESPERPAGEVFPF2)
+
+#define S_QUEUESPERPAGEVFPF1 4
+#define M_QUEUESPERPAGEVFPF1 0xfU
+#define V_QUEUESPERPAGEVFPF1(x) ((x) << S_QUEUESPERPAGEVFPF1)
+#define G_QUEUESPERPAGEVFPF1(x) (((x) >> S_QUEUESPERPAGEVFPF1) & M_QUEUESPERPAGEVFPF1)
+
+#define S_QUEUESPERPAGEVFPF0 0
+#define M_QUEUESPERPAGEVFPF0 0xfU
+#define V_QUEUESPERPAGEVFPF0(x) ((x) << S_QUEUESPERPAGEVFPF0)
+#define G_QUEUESPERPAGEVFPF0(x) (((x) >> S_QUEUESPERPAGEVFPF0) & M_QUEUESPERPAGEVFPF0)
+
+#define A_SGE_USER_MODE_LIMITS 0x1018
+
+#define S_OPCODE_MIN 24
+#define M_OPCODE_MIN 0xffU
+#define V_OPCODE_MIN(x) ((x) << S_OPCODE_MIN)
+#define G_OPCODE_MIN(x) (((x) >> S_OPCODE_MIN) & M_OPCODE_MIN)
+
+#define S_OPCODE_MAX 16
+#define M_OPCODE_MAX 0xffU
+#define V_OPCODE_MAX(x) ((x) << S_OPCODE_MAX)
+#define G_OPCODE_MAX(x) (((x) >> S_OPCODE_MAX) & M_OPCODE_MAX)
+
+#define S_LENGTH_MIN 8
+#define M_LENGTH_MIN 0xffU
+#define V_LENGTH_MIN(x) ((x) << S_LENGTH_MIN)
+#define G_LENGTH_MIN(x) (((x) >> S_LENGTH_MIN) & M_LENGTH_MIN)
+
+#define S_LENGTH_MAX 0
+#define M_LENGTH_MAX 0xffU
+#define V_LENGTH_MAX(x) ((x) << S_LENGTH_MAX)
+#define G_LENGTH_MAX(x) (((x) >> S_LENGTH_MAX) & M_LENGTH_MAX)
+
+#define A_SGE_WR_ERROR 0x101c
+
+#define S_WR_ERROR_OPCODE 0
+#define M_WR_ERROR_OPCODE 0xffU
+#define V_WR_ERROR_OPCODE(x) ((x) << S_WR_ERROR_OPCODE)
+#define G_WR_ERROR_OPCODE(x) (((x) >> S_WR_ERROR_OPCODE) & M_WR_ERROR_OPCODE)
+
+#define A_SGE_PERR_INJECT 0x1020
+
+#define S_MEMSEL 1
+#define M_MEMSEL 0x1fU
+#define V_MEMSEL(x) ((x) << S_MEMSEL)
+#define G_MEMSEL(x) (((x) >> S_MEMSEL) & M_MEMSEL)
+
+#define S_INJECTDATAERR 0
+#define V_INJECTDATAERR(x) ((x) << S_INJECTDATAERR)
+#define F_INJECTDATAERR V_INJECTDATAERR(1U)
+
+#define A_SGE_INT_CAUSE1 0x1024
+
+#define S_PERR_FLM_CREDITFIFO 30
+#define V_PERR_FLM_CREDITFIFO(x) ((x) << S_PERR_FLM_CREDITFIFO)
+#define F_PERR_FLM_CREDITFIFO V_PERR_FLM_CREDITFIFO(1U)
+
+#define S_PERR_IMSG_HINT_FIFO 29
+#define V_PERR_IMSG_HINT_FIFO(x) ((x) << S_PERR_IMSG_HINT_FIFO)
+#define F_PERR_IMSG_HINT_FIFO V_PERR_IMSG_HINT_FIFO(1U)
+
+#define S_PERR_MC_PC 28
+#define V_PERR_MC_PC(x) ((x) << S_PERR_MC_PC)
+#define F_PERR_MC_PC V_PERR_MC_PC(1U)
+
+#define S_PERR_MC_IGR_CTXT 27
+#define V_PERR_MC_IGR_CTXT(x) ((x) << S_PERR_MC_IGR_CTXT)
+#define F_PERR_MC_IGR_CTXT V_PERR_MC_IGR_CTXT(1U)
+
+#define S_PERR_MC_EGR_CTXT 26
+#define V_PERR_MC_EGR_CTXT(x) ((x) << S_PERR_MC_EGR_CTXT)
+#define F_PERR_MC_EGR_CTXT V_PERR_MC_EGR_CTXT(1U)
+
+#define S_PERR_MC_FLM 25
+#define V_PERR_MC_FLM(x) ((x) << S_PERR_MC_FLM)
+#define F_PERR_MC_FLM V_PERR_MC_FLM(1U)
+
+#define S_PERR_PC_MCTAG 24
+#define V_PERR_PC_MCTAG(x) ((x) << S_PERR_PC_MCTAG)
+#define F_PERR_PC_MCTAG V_PERR_PC_MCTAG(1U)
+
+#define S_PERR_PC_CHPI_RSP1 23
+#define V_PERR_PC_CHPI_RSP1(x) ((x) << S_PERR_PC_CHPI_RSP1)
+#define F_PERR_PC_CHPI_RSP1 V_PERR_PC_CHPI_RSP1(1U)
+
+#define S_PERR_PC_CHPI_RSP0 22
+#define V_PERR_PC_CHPI_RSP0(x) ((x) << S_PERR_PC_CHPI_RSP0)
+#define F_PERR_PC_CHPI_RSP0 V_PERR_PC_CHPI_RSP0(1U)
+
+#define S_PERR_DBP_PC_RSP_FIFO3 21
+#define V_PERR_DBP_PC_RSP_FIFO3(x) ((x) << S_PERR_DBP_PC_RSP_FIFO3)
+#define F_PERR_DBP_PC_RSP_FIFO3 V_PERR_DBP_PC_RSP_FIFO3(1U)
+
+#define S_PERR_DBP_PC_RSP_FIFO2 20
+#define V_PERR_DBP_PC_RSP_FIFO2(x) ((x) << S_PERR_DBP_PC_RSP_FIFO2)
+#define F_PERR_DBP_PC_RSP_FIFO2 V_PERR_DBP_PC_RSP_FIFO2(1U)
+
+#define S_PERR_DBP_PC_RSP_FIFO1 19
+#define V_PERR_DBP_PC_RSP_FIFO1(x) ((x) << S_PERR_DBP_PC_RSP_FIFO1)
+#define F_PERR_DBP_PC_RSP_FIFO1 V_PERR_DBP_PC_RSP_FIFO1(1U)
+
+#define S_PERR_DBP_PC_RSP_FIFO0 18
+#define V_PERR_DBP_PC_RSP_FIFO0(x) ((x) << S_PERR_DBP_PC_RSP_FIFO0)
+#define F_PERR_DBP_PC_RSP_FIFO0 V_PERR_DBP_PC_RSP_FIFO0(1U)
+
+#define S_PERR_DMARBT 17
+#define V_PERR_DMARBT(x) ((x) << S_PERR_DMARBT)
+#define F_PERR_DMARBT V_PERR_DMARBT(1U)
+
+#define S_PERR_FLM_DBPFIFO 16
+#define V_PERR_FLM_DBPFIFO(x) ((x) << S_PERR_FLM_DBPFIFO)
+#define F_PERR_FLM_DBPFIFO V_PERR_FLM_DBPFIFO(1U)
+
+#define S_PERR_FLM_MCREQ_FIFO 15
+#define V_PERR_FLM_MCREQ_FIFO(x) ((x) << S_PERR_FLM_MCREQ_FIFO)
+#define F_PERR_FLM_MCREQ_FIFO V_PERR_FLM_MCREQ_FIFO(1U)
+
+#define S_PERR_FLM_HINTFIFO 14
+#define V_PERR_FLM_HINTFIFO(x) ((x) << S_PERR_FLM_HINTFIFO)
+#define F_PERR_FLM_HINTFIFO V_PERR_FLM_HINTFIFO(1U)
+
+#define S_PERR_ALIGN_CTL_FIFO3 13
+#define V_PERR_ALIGN_CTL_FIFO3(x) ((x) << S_PERR_ALIGN_CTL_FIFO3)
+#define F_PERR_ALIGN_CTL_FIFO3 V_PERR_ALIGN_CTL_FIFO3(1U)
+
+#define S_PERR_ALIGN_CTL_FIFO2 12
+#define V_PERR_ALIGN_CTL_FIFO2(x) ((x) << S_PERR_ALIGN_CTL_FIFO2)
+#define F_PERR_ALIGN_CTL_FIFO2 V_PERR_ALIGN_CTL_FIFO2(1U)
+
+#define S_PERR_ALIGN_CTL_FIFO1 11
+#define V_PERR_ALIGN_CTL_FIFO1(x) ((x) << S_PERR_ALIGN_CTL_FIFO1)
+#define F_PERR_ALIGN_CTL_FIFO1 V_PERR_ALIGN_CTL_FIFO1(1U)
+
+#define S_PERR_ALIGN_CTL_FIFO0 10
+#define V_PERR_ALIGN_CTL_FIFO0(x) ((x) << S_PERR_ALIGN_CTL_FIFO0)
+#define F_PERR_ALIGN_CTL_FIFO0 V_PERR_ALIGN_CTL_FIFO0(1U)
+
+#define S_PERR_EDMA_FIFO3 9
+#define V_PERR_EDMA_FIFO3(x) ((x) << S_PERR_EDMA_FIFO3)
+#define F_PERR_EDMA_FIFO3 V_PERR_EDMA_FIFO3(1U)
+
+#define S_PERR_EDMA_FIFO2 8
+#define V_PERR_EDMA_FIFO2(x) ((x) << S_PERR_EDMA_FIFO2)
+#define F_PERR_EDMA_FIFO2 V_PERR_EDMA_FIFO2(1U)
+
+#define S_PERR_EDMA_FIFO1 7
+#define V_PERR_EDMA_FIFO1(x) ((x) << S_PERR_EDMA_FIFO1)
+#define F_PERR_EDMA_FIFO1 V_PERR_EDMA_FIFO1(1U)
+
+#define S_PERR_EDMA_FIFO0 6
+#define V_PERR_EDMA_FIFO0(x) ((x) << S_PERR_EDMA_FIFO0)
+#define F_PERR_EDMA_FIFO0 V_PERR_EDMA_FIFO0(1U)
+
+#define S_PERR_PD_FIFO3 5
+#define V_PERR_PD_FIFO3(x) ((x) << S_PERR_PD_FIFO3)
+#define F_PERR_PD_FIFO3 V_PERR_PD_FIFO3(1U)
+
+#define S_PERR_PD_FIFO2 4
+#define V_PERR_PD_FIFO2(x) ((x) << S_PERR_PD_FIFO2)
+#define F_PERR_PD_FIFO2 V_PERR_PD_FIFO2(1U)
+
+#define S_PERR_PD_FIFO1 3
+#define V_PERR_PD_FIFO1(x) ((x) << S_PERR_PD_FIFO1)
+#define F_PERR_PD_FIFO1 V_PERR_PD_FIFO1(1U)
+
+#define S_PERR_PD_FIFO0 2
+#define V_PERR_PD_FIFO0(x) ((x) << S_PERR_PD_FIFO0)
+#define F_PERR_PD_FIFO0 V_PERR_PD_FIFO0(1U)
+
+#define S_PERR_ING_CTXT_MIFRSP 1
+#define V_PERR_ING_CTXT_MIFRSP(x) ((x) << S_PERR_ING_CTXT_MIFRSP)
+#define F_PERR_ING_CTXT_MIFRSP V_PERR_ING_CTXT_MIFRSP(1U)
+
+#define S_PERR_EGR_CTXT_MIFRSP 0
+#define V_PERR_EGR_CTXT_MIFRSP(x) ((x) << S_PERR_EGR_CTXT_MIFRSP)
+#define F_PERR_EGR_CTXT_MIFRSP V_PERR_EGR_CTXT_MIFRSP(1U)
+
+#define A_SGE_INT_ENABLE1 0x1028
+#define A_SGE_PERR_ENABLE1 0x102c
+#define A_SGE_INT_CAUSE2 0x1030
+
+#define S_PERR_HINT_DELAY_FIFO1 30
+#define V_PERR_HINT_DELAY_FIFO1(x) ((x) << S_PERR_HINT_DELAY_FIFO1)
+#define F_PERR_HINT_DELAY_FIFO1 V_PERR_HINT_DELAY_FIFO1(1U)
+
+#define S_PERR_HINT_DELAY_FIFO0 29
+#define V_PERR_HINT_DELAY_FIFO0(x) ((x) << S_PERR_HINT_DELAY_FIFO0)
+#define F_PERR_HINT_DELAY_FIFO0 V_PERR_HINT_DELAY_FIFO0(1U)
+
+#define S_PERR_IMSG_PD_FIFO 28
+#define V_PERR_IMSG_PD_FIFO(x) ((x) << S_PERR_IMSG_PD_FIFO)
+#define F_PERR_IMSG_PD_FIFO V_PERR_IMSG_PD_FIFO(1U)
+
+#define S_PERR_ULPTX_FIFO1 27
+#define V_PERR_ULPTX_FIFO1(x) ((x) << S_PERR_ULPTX_FIFO1)
+#define F_PERR_ULPTX_FIFO1 V_PERR_ULPTX_FIFO1(1U)
+
+#define S_PERR_ULPTX_FIFO0 26
+#define V_PERR_ULPTX_FIFO0(x) ((x) << S_PERR_ULPTX_FIFO0)
+#define F_PERR_ULPTX_FIFO0 V_PERR_ULPTX_FIFO0(1U)
+
+#define S_PERR_IDMA2IMSG_FIFO1 25
+#define V_PERR_IDMA2IMSG_FIFO1(x) ((x) << S_PERR_IDMA2IMSG_FIFO1)
+#define F_PERR_IDMA2IMSG_FIFO1 V_PERR_IDMA2IMSG_FIFO1(1U)
+
+#define S_PERR_IDMA2IMSG_FIFO0 24
+#define V_PERR_IDMA2IMSG_FIFO0(x) ((x) << S_PERR_IDMA2IMSG_FIFO0)
+#define F_PERR_IDMA2IMSG_FIFO0 V_PERR_IDMA2IMSG_FIFO0(1U)
+
+#define S_PERR_HEADERSPLIT_FIFO1 23
+#define V_PERR_HEADERSPLIT_FIFO1(x) ((x) << S_PERR_HEADERSPLIT_FIFO1)
+#define F_PERR_HEADERSPLIT_FIFO1 V_PERR_HEADERSPLIT_FIFO1(1U)
+
+#define S_PERR_HEADERSPLIT_FIFO0 22
+#define V_PERR_HEADERSPLIT_FIFO0(x) ((x) << S_PERR_HEADERSPLIT_FIFO0)
+#define F_PERR_HEADERSPLIT_FIFO0 V_PERR_HEADERSPLIT_FIFO0(1U)
+
+#define S_PERR_ESWITCH_FIFO3 21
+#define V_PERR_ESWITCH_FIFO3(x) ((x) << S_PERR_ESWITCH_FIFO3)
+#define F_PERR_ESWITCH_FIFO3 V_PERR_ESWITCH_FIFO3(1U)
+
+#define S_PERR_ESWITCH_FIFO2 20
+#define V_PERR_ESWITCH_FIFO2(x) ((x) << S_PERR_ESWITCH_FIFO2)
+#define F_PERR_ESWITCH_FIFO2 V_PERR_ESWITCH_FIFO2(1U)
+
+#define S_PERR_ESWITCH_FIFO1 19
+#define V_PERR_ESWITCH_FIFO1(x) ((x) << S_PERR_ESWITCH_FIFO1)
+#define F_PERR_ESWITCH_FIFO1 V_PERR_ESWITCH_FIFO1(1U)
+
+#define S_PERR_ESWITCH_FIFO0 18
+#define V_PERR_ESWITCH_FIFO0(x) ((x) << S_PERR_ESWITCH_FIFO0)
+#define F_PERR_ESWITCH_FIFO0 V_PERR_ESWITCH_FIFO0(1U)
+
+#define S_PERR_PC_DBP1 17
+#define V_PERR_PC_DBP1(x) ((x) << S_PERR_PC_DBP1)
+#define F_PERR_PC_DBP1 V_PERR_PC_DBP1(1U)
+
+#define S_PERR_PC_DBP0 16
+#define V_PERR_PC_DBP0(x) ((x) << S_PERR_PC_DBP0)
+#define F_PERR_PC_DBP0 V_PERR_PC_DBP0(1U)
+
+#define S_PERR_IMSG_OB_FIFO 15
+#define V_PERR_IMSG_OB_FIFO(x) ((x) << S_PERR_IMSG_OB_FIFO)
+#define F_PERR_IMSG_OB_FIFO V_PERR_IMSG_OB_FIFO(1U)
+
+#define S_PERR_CONM_SRAM 14
+#define V_PERR_CONM_SRAM(x) ((x) << S_PERR_CONM_SRAM)
+#define F_PERR_CONM_SRAM V_PERR_CONM_SRAM(1U)
+
+#define S_PERR_PC_MC_RSP 13
+#define V_PERR_PC_MC_RSP(x) ((x) << S_PERR_PC_MC_RSP)
+#define F_PERR_PC_MC_RSP V_PERR_PC_MC_RSP(1U)
+
+#define S_PERR_ISW_IDMA0_FIFO 12
+#define V_PERR_ISW_IDMA0_FIFO(x) ((x) << S_PERR_ISW_IDMA0_FIFO)
+#define F_PERR_ISW_IDMA0_FIFO V_PERR_ISW_IDMA0_FIFO(1U)
+
+#define S_PERR_ISW_IDMA1_FIFO 11
+#define V_PERR_ISW_IDMA1_FIFO(x) ((x) << S_PERR_ISW_IDMA1_FIFO)
+#define F_PERR_ISW_IDMA1_FIFO V_PERR_ISW_IDMA1_FIFO(1U)
+
+#define S_PERR_ISW_DBP_FIFO 10
+#define V_PERR_ISW_DBP_FIFO(x) ((x) << S_PERR_ISW_DBP_FIFO)
+#define F_PERR_ISW_DBP_FIFO V_PERR_ISW_DBP_FIFO(1U)
+
+#define S_PERR_ISW_GTS_FIFO 9
+#define V_PERR_ISW_GTS_FIFO(x) ((x) << S_PERR_ISW_GTS_FIFO)
+#define F_PERR_ISW_GTS_FIFO V_PERR_ISW_GTS_FIFO(1U)
+
+#define S_PERR_ITP_EVR 8
+#define V_PERR_ITP_EVR(x) ((x) << S_PERR_ITP_EVR)
+#define F_PERR_ITP_EVR V_PERR_ITP_EVR(1U)
+
+#define S_PERR_FLM_CNTXMEM 7
+#define V_PERR_FLM_CNTXMEM(x) ((x) << S_PERR_FLM_CNTXMEM)
+#define F_PERR_FLM_CNTXMEM V_PERR_FLM_CNTXMEM(1U)
+
+#define S_PERR_FLM_L1CACHE 6
+#define V_PERR_FLM_L1CACHE(x) ((x) << S_PERR_FLM_L1CACHE)
+#define F_PERR_FLM_L1CACHE V_PERR_FLM_L1CACHE(1U)
+
+#define S_PERR_DBP_HINT_FIFO 5
+#define V_PERR_DBP_HINT_FIFO(x) ((x) << S_PERR_DBP_HINT_FIFO)
+#define F_PERR_DBP_HINT_FIFO V_PERR_DBP_HINT_FIFO(1U)
+
+#define S_PERR_DBP_HP_FIFO 4
+#define V_PERR_DBP_HP_FIFO(x) ((x) << S_PERR_DBP_HP_FIFO)
+#define F_PERR_DBP_HP_FIFO V_PERR_DBP_HP_FIFO(1U)
+
+#define S_PERR_DBP_LP_FIFO 3
+#define V_PERR_DBP_LP_FIFO(x) ((x) << S_PERR_DBP_LP_FIFO)
+#define F_PERR_DBP_LP_FIFO V_PERR_DBP_LP_FIFO(1U)
+
+#define S_PERR_ING_CTXT_CACHE 2
+#define V_PERR_ING_CTXT_CACHE(x) ((x) << S_PERR_ING_CTXT_CACHE)
+#define F_PERR_ING_CTXT_CACHE V_PERR_ING_CTXT_CACHE(1U)
+
+#define S_PERR_EGR_CTXT_CACHE 1
+#define V_PERR_EGR_CTXT_CACHE(x) ((x) << S_PERR_EGR_CTXT_CACHE)
+#define F_PERR_EGR_CTXT_CACHE V_PERR_EGR_CTXT_CACHE(1U)
+
+#define S_PERR_BASE_SIZE 0
+#define V_PERR_BASE_SIZE(x) ((x) << S_PERR_BASE_SIZE)
+#define F_PERR_BASE_SIZE V_PERR_BASE_SIZE(1U)
+
+#define A_SGE_INT_ENABLE2 0x1034
+#define A_SGE_PERR_ENABLE2 0x1038
+#define A_SGE_INT_CAUSE3 0x103c
+
+#define S_ERR_FLM_DBP 31
+#define V_ERR_FLM_DBP(x) ((x) << S_ERR_FLM_DBP)
+#define F_ERR_FLM_DBP V_ERR_FLM_DBP(1U)
+
+#define S_ERR_FLM_IDMA1 30
+#define V_ERR_FLM_IDMA1(x) ((x) << S_ERR_FLM_IDMA1)
+#define F_ERR_FLM_IDMA1 V_ERR_FLM_IDMA1(1U)
+
+#define S_ERR_FLM_IDMA0 29
+#define V_ERR_FLM_IDMA0(x) ((x) << S_ERR_FLM_IDMA0)
+#define F_ERR_FLM_IDMA0 V_ERR_FLM_IDMA0(1U)
+
+#define S_ERR_FLM_HINT 28
+#define V_ERR_FLM_HINT(x) ((x) << S_ERR_FLM_HINT)
+#define F_ERR_FLM_HINT V_ERR_FLM_HINT(1U)
+
+#define S_ERR_PCIE_ERROR3 27
+#define V_ERR_PCIE_ERROR3(x) ((x) << S_ERR_PCIE_ERROR3)
+#define F_ERR_PCIE_ERROR3 V_ERR_PCIE_ERROR3(1U)
+
+#define S_ERR_PCIE_ERROR2 26
+#define V_ERR_PCIE_ERROR2(x) ((x) << S_ERR_PCIE_ERROR2)
+#define F_ERR_PCIE_ERROR2 V_ERR_PCIE_ERROR2(1U)
+
+#define S_ERR_PCIE_ERROR1 25
+#define V_ERR_PCIE_ERROR1(x) ((x) << S_ERR_PCIE_ERROR1)
+#define F_ERR_PCIE_ERROR1 V_ERR_PCIE_ERROR1(1U)
+
+#define S_ERR_PCIE_ERROR0 24
+#define V_ERR_PCIE_ERROR0(x) ((x) << S_ERR_PCIE_ERROR0)
+#define F_ERR_PCIE_ERROR0 V_ERR_PCIE_ERROR0(1U)
+
+#define S_ERR_TIMER_ABOVE_MAX_QID 23
+#define V_ERR_TIMER_ABOVE_MAX_QID(x) ((x) << S_ERR_TIMER_ABOVE_MAX_QID)
+#define F_ERR_TIMER_ABOVE_MAX_QID V_ERR_TIMER_ABOVE_MAX_QID(1U)
+
+#define S_ERR_CPL_EXCEED_IQE_SIZE 22
+#define V_ERR_CPL_EXCEED_IQE_SIZE(x) ((x) << S_ERR_CPL_EXCEED_IQE_SIZE)
+#define F_ERR_CPL_EXCEED_IQE_SIZE V_ERR_CPL_EXCEED_IQE_SIZE(1U)
+
+#define S_ERR_INVALID_CIDX_INC 21
+#define V_ERR_INVALID_CIDX_INC(x) ((x) << S_ERR_INVALID_CIDX_INC)
+#define F_ERR_INVALID_CIDX_INC V_ERR_INVALID_CIDX_INC(1U)
+
+#define S_ERR_ITP_TIME_PAUSED 20
+#define V_ERR_ITP_TIME_PAUSED(x) ((x) << S_ERR_ITP_TIME_PAUSED)
+#define F_ERR_ITP_TIME_PAUSED V_ERR_ITP_TIME_PAUSED(1U)
+
+#define S_ERR_CPL_OPCODE_0 19
+#define V_ERR_CPL_OPCODE_0(x) ((x) << S_ERR_CPL_OPCODE_0)
+#define F_ERR_CPL_OPCODE_0 V_ERR_CPL_OPCODE_0(1U)
+
+#define S_ERR_DROPPED_DB 18
+#define V_ERR_DROPPED_DB(x) ((x) << S_ERR_DROPPED_DB)
+#define F_ERR_DROPPED_DB V_ERR_DROPPED_DB(1U)
+
+#define S_ERR_DATA_CPL_ON_HIGH_QID1 17
+#define V_ERR_DATA_CPL_ON_HIGH_QID1(x) ((x) << S_ERR_DATA_CPL_ON_HIGH_QID1)
+#define F_ERR_DATA_CPL_ON_HIGH_QID1 V_ERR_DATA_CPL_ON_HIGH_QID1(1U)
+
+#define S_ERR_DATA_CPL_ON_HIGH_QID0 16
+#define V_ERR_DATA_CPL_ON_HIGH_QID0(x) ((x) << S_ERR_DATA_CPL_ON_HIGH_QID0)
+#define F_ERR_DATA_CPL_ON_HIGH_QID0 V_ERR_DATA_CPL_ON_HIGH_QID0(1U)
+
+#define S_ERR_BAD_DB_PIDX3 15
+#define V_ERR_BAD_DB_PIDX3(x) ((x) << S_ERR_BAD_DB_PIDX3)
+#define F_ERR_BAD_DB_PIDX3 V_ERR_BAD_DB_PIDX3(1U)
+
+#define S_ERR_BAD_DB_PIDX2 14
+#define V_ERR_BAD_DB_PIDX2(x) ((x) << S_ERR_BAD_DB_PIDX2)
+#define F_ERR_BAD_DB_PIDX2 V_ERR_BAD_DB_PIDX2(1U)
+
+#define S_ERR_BAD_DB_PIDX1 13
+#define V_ERR_BAD_DB_PIDX1(x) ((x) << S_ERR_BAD_DB_PIDX1)
+#define F_ERR_BAD_DB_PIDX1 V_ERR_BAD_DB_PIDX1(1U)
+
+#define S_ERR_BAD_DB_PIDX0 12
+#define V_ERR_BAD_DB_PIDX0(x) ((x) << S_ERR_BAD_DB_PIDX0)
+#define F_ERR_BAD_DB_PIDX0 V_ERR_BAD_DB_PIDX0(1U)
+
+#define S_ERR_ING_PCIE_CHAN 11
+#define V_ERR_ING_PCIE_CHAN(x) ((x) << S_ERR_ING_PCIE_CHAN)
+#define F_ERR_ING_PCIE_CHAN V_ERR_ING_PCIE_CHAN(1U)
+
+#define S_ERR_ING_CTXT_PRIO 10
+#define V_ERR_ING_CTXT_PRIO(x) ((x) << S_ERR_ING_CTXT_PRIO)
+#define F_ERR_ING_CTXT_PRIO V_ERR_ING_CTXT_PRIO(1U)
+
+#define S_ERR_EGR_CTXT_PRIO 9
+#define V_ERR_EGR_CTXT_PRIO(x) ((x) << S_ERR_EGR_CTXT_PRIO)
+#define F_ERR_EGR_CTXT_PRIO V_ERR_EGR_CTXT_PRIO(1U)
+
+#define S_DBFIFO_HP_INT 8
+#define V_DBFIFO_HP_INT(x) ((x) << S_DBFIFO_HP_INT)
+#define F_DBFIFO_HP_INT V_DBFIFO_HP_INT(1U)
+
+#define S_DBFIFO_LP_INT 7
+#define V_DBFIFO_LP_INT(x) ((x) << S_DBFIFO_LP_INT)
+#define F_DBFIFO_LP_INT V_DBFIFO_LP_INT(1U)
+
+#define S_REG_ADDRESS_ERR 6
+#define V_REG_ADDRESS_ERR(x) ((x) << S_REG_ADDRESS_ERR)
+#define F_REG_ADDRESS_ERR V_REG_ADDRESS_ERR(1U)
+
+#define S_INGRESS_SIZE_ERR 5
+#define V_INGRESS_SIZE_ERR(x) ((x) << S_INGRESS_SIZE_ERR)
+#define F_INGRESS_SIZE_ERR V_INGRESS_SIZE_ERR(1U)
+
+#define S_EGRESS_SIZE_ERR 4
+#define V_EGRESS_SIZE_ERR(x) ((x) << S_EGRESS_SIZE_ERR)
+#define F_EGRESS_SIZE_ERR V_EGRESS_SIZE_ERR(1U)
+
+#define S_ERR_INV_CTXT3 3
+#define V_ERR_INV_CTXT3(x) ((x) << S_ERR_INV_CTXT3)
+#define F_ERR_INV_CTXT3 V_ERR_INV_CTXT3(1U)
+
+#define S_ERR_INV_CTXT2 2
+#define V_ERR_INV_CTXT2(x) ((x) << S_ERR_INV_CTXT2)
+#define F_ERR_INV_CTXT2 V_ERR_INV_CTXT2(1U)
+
+#define S_ERR_INV_CTXT1 1
+#define V_ERR_INV_CTXT1(x) ((x) << S_ERR_INV_CTXT1)
+#define F_ERR_INV_CTXT1 V_ERR_INV_CTXT1(1U)
+
+#define S_ERR_INV_CTXT0 0
+#define V_ERR_INV_CTXT0(x) ((x) << S_ERR_INV_CTXT0)
+#define F_ERR_INV_CTXT0 V_ERR_INV_CTXT0(1U)
+
+#define A_SGE_INT_ENABLE3 0x1040
+#define A_SGE_FL_BUFFER_SIZE0 0x1044
+
+#define S_SIZE 4
+#define M_SIZE 0xfffffffU
+#define V_SIZE(x) ((x) << S_SIZE)
+#define G_SIZE(x) (((x) >> S_SIZE) & M_SIZE)
+
+#define A_SGE_FL_BUFFER_SIZE1 0x1048
+#define A_SGE_FL_BUFFER_SIZE2 0x104c
+#define A_SGE_FL_BUFFER_SIZE3 0x1050
+#define A_SGE_FL_BUFFER_SIZE4 0x1054
+#define A_SGE_FL_BUFFER_SIZE5 0x1058
+#define A_SGE_FL_BUFFER_SIZE6 0x105c
+#define A_SGE_FL_BUFFER_SIZE7 0x1060
+#define A_SGE_FL_BUFFER_SIZE8 0x1064
+#define A_SGE_FL_BUFFER_SIZE9 0x1068
+#define A_SGE_FL_BUFFER_SIZE10 0x106c
+#define A_SGE_FL_BUFFER_SIZE11 0x1070
+#define A_SGE_FL_BUFFER_SIZE12 0x1074
+#define A_SGE_FL_BUFFER_SIZE13 0x1078
+#define A_SGE_FL_BUFFER_SIZE14 0x107c
+#define A_SGE_FL_BUFFER_SIZE15 0x1080
+#define A_SGE_DBQ_CTXT_BADDR 0x1084
+
+#define S_BASEADDR 3
+#define M_BASEADDR 0x1fffffffU
+#define V_BASEADDR(x) ((x) << S_BASEADDR)
+#define G_BASEADDR(x) (((x) >> S_BASEADDR) & M_BASEADDR)
+
+#define A_SGE_IMSG_CTXT_BADDR 0x1088
+#define A_SGE_FLM_CACHE_BADDR 0x108c
+#define A_SGE_FLM_CFG 0x1090
+
+#define S_OPMODE 26
+#define M_OPMODE 0x3fU
+#define V_OPMODE(x) ((x) << S_OPMODE)
+#define G_OPMODE(x) (((x) >> S_OPMODE) & M_OPMODE)
+
+#define S_NOHDR 18
+#define V_NOHDR(x) ((x) << S_NOHDR)
+#define F_NOHDR V_NOHDR(1U)
+
+#define S_CACHEPTRCNT 16
+#define M_CACHEPTRCNT 0x3U
+#define V_CACHEPTRCNT(x) ((x) << S_CACHEPTRCNT)
+#define G_CACHEPTRCNT(x) (((x) >> S_CACHEPTRCNT) & M_CACHEPTRCNT)
+
+#define S_EDRAMPTRCNT 14
+#define M_EDRAMPTRCNT 0x3U
+#define V_EDRAMPTRCNT(x) ((x) << S_EDRAMPTRCNT)
+#define G_EDRAMPTRCNT(x) (((x) >> S_EDRAMPTRCNT) & M_EDRAMPTRCNT)
+
+#define S_HDRSTARTFLQ 11
+#define M_HDRSTARTFLQ 0x7U
+#define V_HDRSTARTFLQ(x) ((x) << S_HDRSTARTFLQ)
+#define G_HDRSTARTFLQ(x) (((x) >> S_HDRSTARTFLQ) & M_HDRSTARTFLQ)
+
+#define S_FETCHTHRESH 6
+#define M_FETCHTHRESH 0x1fU
+#define V_FETCHTHRESH(x) ((x) << S_FETCHTHRESH)
+#define G_FETCHTHRESH(x) (((x) >> S_FETCHTHRESH) & M_FETCHTHRESH)
+
+#define S_CREDITCNT 4
+#define M_CREDITCNT 0x3U
+#define V_CREDITCNT(x) ((x) << S_CREDITCNT)
+#define G_CREDITCNT(x) (((x) >> S_CREDITCNT) & M_CREDITCNT)
+
+#define S_NOEDRAM 0
+#define V_NOEDRAM(x) ((x) << S_NOEDRAM)
+#define F_NOEDRAM V_NOEDRAM(1U)
+
+#define A_SGE_CONM_CTRL 0x1094
+
+#define S_EGRTHRESHOLD 8
+#define M_EGRTHRESHOLD 0x3fU
+#define V_EGRTHRESHOLD(x) ((x) << S_EGRTHRESHOLD)
+#define G_EGRTHRESHOLD(x) (((x) >> S_EGRTHRESHOLD) & M_EGRTHRESHOLD)
+
+#define S_INGTHRESHOLD 2
+#define M_INGTHRESHOLD 0x3fU
+#define V_INGTHRESHOLD(x) ((x) << S_INGTHRESHOLD)
+#define G_INGTHRESHOLD(x) (((x) >> S_INGTHRESHOLD) & M_INGTHRESHOLD)
+
+#define S_MPS_ENABLE 1
+#define V_MPS_ENABLE(x) ((x) << S_MPS_ENABLE)
+#define F_MPS_ENABLE V_MPS_ENABLE(1U)
+
+#define S_TP_ENABLE 0
+#define V_TP_ENABLE(x) ((x) << S_TP_ENABLE)
+#define F_TP_ENABLE V_TP_ENABLE(1U)
+
+#define A_SGE_TIMESTAMP_LO 0x1098
+#define A_SGE_TIMESTAMP_HI 0x109c
+
+#define S_TSOP 28
+#define M_TSOP 0x3U
+#define V_TSOP(x) ((x) << S_TSOP)
+#define G_TSOP(x) (((x) >> S_TSOP) & M_TSOP)
+
+#define S_TSVAL 0
+#define M_TSVAL 0xfffffffU
+#define V_TSVAL(x) ((x) << S_TSVAL)
+#define G_TSVAL(x) (((x) >> S_TSVAL) & M_TSVAL)
+
+#define A_SGE_INGRESS_RX_THRESHOLD 0x10a0
+
+#define S_THRESHOLD_0 24
+#define M_THRESHOLD_0 0x3fU
+#define V_THRESHOLD_0(x) ((x) << S_THRESHOLD_0)
+#define G_THRESHOLD_0(x) (((x) >> S_THRESHOLD_0) & M_THRESHOLD_0)
+
+#define S_THRESHOLD_1 16
+#define M_THRESHOLD_1 0x3fU
+#define V_THRESHOLD_1(x) ((x) << S_THRESHOLD_1)
+#define G_THRESHOLD_1(x) (((x) >> S_THRESHOLD_1) & M_THRESHOLD_1)
+
+#define S_THRESHOLD_2 8
+#define M_THRESHOLD_2 0x3fU
+#define V_THRESHOLD_2(x) ((x) << S_THRESHOLD_2)
+#define G_THRESHOLD_2(x) (((x) >> S_THRESHOLD_2) & M_THRESHOLD_2)
+
+#define S_THRESHOLD_3 0
+#define M_THRESHOLD_3 0x3fU
+#define V_THRESHOLD_3(x) ((x) << S_THRESHOLD_3)
+#define G_THRESHOLD_3(x) (((x) >> S_THRESHOLD_3) & M_THRESHOLD_3)
+
+#define A_SGE_DBFIFO_STATUS 0x10a4
+
+#define S_HP_INT_THRESH 28
+#define M_HP_INT_THRESH 0xfU
+#define V_HP_INT_THRESH(x) ((x) << S_HP_INT_THRESH)
+#define G_HP_INT_THRESH(x) (((x) >> S_HP_INT_THRESH) & M_HP_INT_THRESH)
+
+#define S_HP_COUNT 16
+#define M_HP_COUNT 0x7ffU
+#define V_HP_COUNT(x) ((x) << S_HP_COUNT)
+#define G_HP_COUNT(x) (((x) >> S_HP_COUNT) & M_HP_COUNT)
+
+#define S_LP_INT_THRESH 12
+#define M_LP_INT_THRESH 0xfU
+#define V_LP_INT_THRESH(x) ((x) << S_LP_INT_THRESH)
+#define G_LP_INT_THRESH(x) (((x) >> S_LP_INT_THRESH) & M_LP_INT_THRESH)
+
+#define S_LP_COUNT 0
+#define M_LP_COUNT 0x7ffU
+#define V_LP_COUNT(x) ((x) << S_LP_COUNT)
+#define G_LP_COUNT(x) (((x) >> S_LP_COUNT) & M_LP_COUNT)
+
+#define A_SGE_DOORBELL_CONTROL 0x10a8
+
+#define S_HINTDEPTHCTL 27
+#define M_HINTDEPTHCTL 0x1fU
+#define V_HINTDEPTHCTL(x) ((x) << S_HINTDEPTHCTL)
+#define G_HINTDEPTHCTL(x) (((x) >> S_HINTDEPTHCTL) & M_HINTDEPTHCTL)
+
+#define S_NOCOALESCE 26
+#define V_NOCOALESCE(x) ((x) << S_NOCOALESCE)
+#define F_NOCOALESCE V_NOCOALESCE(1U)
+
+#define S_HP_WEIGHT 24
+#define M_HP_WEIGHT 0x3U
+#define V_HP_WEIGHT(x) ((x) << S_HP_WEIGHT)
+#define G_HP_WEIGHT(x) (((x) >> S_HP_WEIGHT) & M_HP_WEIGHT)
+
+#define S_HP_DISABLE 23
+#define V_HP_DISABLE(x) ((x) << S_HP_DISABLE)
+#define F_HP_DISABLE V_HP_DISABLE(1U)
+
+#define S_FORCEUSERDBTOLP 22
+#define V_FORCEUSERDBTOLP(x) ((x) << S_FORCEUSERDBTOLP)
+#define F_FORCEUSERDBTOLP V_FORCEUSERDBTOLP(1U)
+
+#define S_FORCEVFPF0DBTOLP 21
+#define V_FORCEVFPF0DBTOLP(x) ((x) << S_FORCEVFPF0DBTOLP)
+#define F_FORCEVFPF0DBTOLP V_FORCEVFPF0DBTOLP(1U)
+
+#define S_FORCEVFPF1DBTOLP 20
+#define V_FORCEVFPF1DBTOLP(x) ((x) << S_FORCEVFPF1DBTOLP)
+#define F_FORCEVFPF1DBTOLP V_FORCEVFPF1DBTOLP(1U)
+
+#define S_FORCEVFPF2DBTOLP 19
+#define V_FORCEVFPF2DBTOLP(x) ((x) << S_FORCEVFPF2DBTOLP)
+#define F_FORCEVFPF2DBTOLP V_FORCEVFPF2DBTOLP(1U)
+
+#define S_FORCEVFPF3DBTOLP 18
+#define V_FORCEVFPF3DBTOLP(x) ((x) << S_FORCEVFPF3DBTOLP)
+#define F_FORCEVFPF3DBTOLP V_FORCEVFPF3DBTOLP(1U)
+
+#define S_FORCEVFPF4DBTOLP 17
+#define V_FORCEVFPF4DBTOLP(x) ((x) << S_FORCEVFPF4DBTOLP)
+#define F_FORCEVFPF4DBTOLP V_FORCEVFPF4DBTOLP(1U)
+
+#define S_FORCEVFPF5DBTOLP 16
+#define V_FORCEVFPF5DBTOLP(x) ((x) << S_FORCEVFPF5DBTOLP)
+#define F_FORCEVFPF5DBTOLP V_FORCEVFPF5DBTOLP(1U)
+
+#define S_FORCEVFPF6DBTOLP 15
+#define V_FORCEVFPF6DBTOLP(x) ((x) << S_FORCEVFPF6DBTOLP)
+#define F_FORCEVFPF6DBTOLP V_FORCEVFPF6DBTOLP(1U)
+
+#define S_FORCEVFPF7DBTOLP 14
+#define V_FORCEVFPF7DBTOLP(x) ((x) << S_FORCEVFPF7DBTOLP)
+#define F_FORCEVFPF7DBTOLP V_FORCEVFPF7DBTOLP(1U)
+
+#define S_ENABLE_DROP 13
+#define V_ENABLE_DROP(x) ((x) << S_ENABLE_DROP)
+#define F_ENABLE_DROP V_ENABLE_DROP(1U)
+
+#define S_DROP_TIMEOUT 1
+#define M_DROP_TIMEOUT 0xfffU
+#define V_DROP_TIMEOUT(x) ((x) << S_DROP_TIMEOUT)
+#define G_DROP_TIMEOUT(x) (((x) >> S_DROP_TIMEOUT) & M_DROP_TIMEOUT)
+
+#define S_DROPPED_DB 0
+#define V_DROPPED_DB(x) ((x) << S_DROPPED_DB)
+#define F_DROPPED_DB V_DROPPED_DB(1U)
+
+#define A_SGE_DROPPED_DOORBELL 0x10ac
+#define A_SGE_DOORBELL_THROTTLE_CONTROL 0x10b0
+
+#define S_THROTTLE_COUNT 1
+#define M_THROTTLE_COUNT 0xfffU
+#define V_THROTTLE_COUNT(x) ((x) << S_THROTTLE_COUNT)
+#define G_THROTTLE_COUNT(x) (((x) >> S_THROTTLE_COUNT) & M_THROTTLE_COUNT)
+
+#define S_THROTTLE_ENABLE 0
+#define V_THROTTLE_ENABLE(x) ((x) << S_THROTTLE_ENABLE)
+#define F_THROTTLE_ENABLE V_THROTTLE_ENABLE(1U)
+
+#define A_SGE_ITP_CONTROL 0x10b4
+
+#define S_CRITICAL_TIME 10
+#define M_CRITICAL_TIME 0x7fffU
+#define V_CRITICAL_TIME(x) ((x) << S_CRITICAL_TIME)
+#define G_CRITICAL_TIME(x) (((x) >> S_CRITICAL_TIME) & M_CRITICAL_TIME)
+
+#define S_LL_EMPTY 4
+#define M_LL_EMPTY 0x3fU
+#define V_LL_EMPTY(x) ((x) << S_LL_EMPTY)
+#define G_LL_EMPTY(x) (((x) >> S_LL_EMPTY) & M_LL_EMPTY)
+
+#define S_LL_READ_WAIT_DISABLE 0
+#define V_LL_READ_WAIT_DISABLE(x) ((x) << S_LL_READ_WAIT_DISABLE)
+#define F_LL_READ_WAIT_DISABLE V_LL_READ_WAIT_DISABLE(1U)
+
+#define A_SGE_TIMER_VALUE_0_AND_1 0x10b8
+
+#define S_TIMERVALUE0 16
+#define M_TIMERVALUE0 0xffffU
+#define V_TIMERVALUE0(x) ((x) << S_TIMERVALUE0)
+#define G_TIMERVALUE0(x) (((x) >> S_TIMERVALUE0) & M_TIMERVALUE0)
+
+#define S_TIMERVALUE1 0
+#define M_TIMERVALUE1 0xffffU
+#define V_TIMERVALUE1(x) ((x) << S_TIMERVALUE1)
+#define G_TIMERVALUE1(x) (((x) >> S_TIMERVALUE1) & M_TIMERVALUE1)
+
+#define A_SGE_TIMER_VALUE_2_AND_3 0x10bc
+
+#define S_TIMERVALUE2 16
+#define M_TIMERVALUE2 0xffffU
+#define V_TIMERVALUE2(x) ((x) << S_TIMERVALUE2)
+#define G_TIMERVALUE2(x) (((x) >> S_TIMERVALUE2) & M_TIMERVALUE2)
+
+#define S_TIMERVALUE3 0
+#define M_TIMERVALUE3 0xffffU
+#define V_TIMERVALUE3(x) ((x) << S_TIMERVALUE3)
+#define G_TIMERVALUE3(x) (((x) >> S_TIMERVALUE3) & M_TIMERVALUE3)
+
+#define A_SGE_TIMER_VALUE_4_AND_5 0x10c0
+
+#define S_TIMERVALUE4 16
+#define M_TIMERVALUE4 0xffffU
+#define V_TIMERVALUE4(x) ((x) << S_TIMERVALUE4)
+#define G_TIMERVALUE4(x) (((x) >> S_TIMERVALUE4) & M_TIMERVALUE4)
+
+#define S_TIMERVALUE5 0
+#define M_TIMERVALUE5 0xffffU
+#define V_TIMERVALUE5(x) ((x) << S_TIMERVALUE5)
+#define G_TIMERVALUE5(x) (((x) >> S_TIMERVALUE5) & M_TIMERVALUE5)
+
+#define A_SGE_PD_RSP_CREDIT01 0x10c4
+
+#define S_RSPCREDITEN0 31
+#define V_RSPCREDITEN0(x) ((x) << S_RSPCREDITEN0)
+#define F_RSPCREDITEN0 V_RSPCREDITEN0(1U)
+
+#define S_MAXTAG0 24
+#define M_MAXTAG0 0x7fU
+#define V_MAXTAG0(x) ((x) << S_MAXTAG0)
+#define G_MAXTAG0(x) (((x) >> S_MAXTAG0) & M_MAXTAG0)
+
+#define S_MAXRSPCNT0 16
+#define M_MAXRSPCNT0 0xffU
+#define V_MAXRSPCNT0(x) ((x) << S_MAXRSPCNT0)
+#define G_MAXRSPCNT0(x) (((x) >> S_MAXRSPCNT0) & M_MAXRSPCNT0)
+
+#define S_RSPCREDITEN1 15
+#define V_RSPCREDITEN1(x) ((x) << S_RSPCREDITEN1)
+#define F_RSPCREDITEN1 V_RSPCREDITEN1(1U)
+
+#define S_MAXTAG1 8
+#define M_MAXTAG1 0x7fU
+#define V_MAXTAG1(x) ((x) << S_MAXTAG1)
+#define G_MAXTAG1(x) (((x) >> S_MAXTAG1) & M_MAXTAG1)
+
+#define S_MAXRSPCNT1 0
+#define M_MAXRSPCNT1 0xffU
+#define V_MAXRSPCNT1(x) ((x) << S_MAXRSPCNT1)
+#define G_MAXRSPCNT1(x) (((x) >> S_MAXRSPCNT1) & M_MAXRSPCNT1)
+
+#define A_SGE_PD_RSP_CREDIT23 0x10c8
+
+#define S_RSPCREDITEN2 31
+#define V_RSPCREDITEN2(x) ((x) << S_RSPCREDITEN2)
+#define F_RSPCREDITEN2 V_RSPCREDITEN2(1U)
+
+#define S_MAXTAG2 24
+#define M_MAXTAG2 0x7fU
+#define V_MAXTAG2(x) ((x) << S_MAXTAG2)
+#define G_MAXTAG2(x) (((x) >> S_MAXTAG2) & M_MAXTAG2)
+
+#define S_MAXRSPCNT2 16
+#define M_MAXRSPCNT2 0xffU
+#define V_MAXRSPCNT2(x) ((x) << S_MAXRSPCNT2)
+#define G_MAXRSPCNT2(x) (((x) >> S_MAXRSPCNT2) & M_MAXRSPCNT2)
+
+#define S_RSPCREDITEN3 15
+#define V_RSPCREDITEN3(x) ((x) << S_RSPCREDITEN3)
+#define F_RSPCREDITEN3 V_RSPCREDITEN3(1U)
+
+#define S_MAXTAG3 8
+#define M_MAXTAG3 0x7fU
+#define V_MAXTAG3(x) ((x) << S_MAXTAG3)
+#define G_MAXTAG3(x) (((x) >> S_MAXTAG3) & M_MAXTAG3)
+
+#define S_MAXRSPCNT3 0
+#define M_MAXRSPCNT3 0xffU
+#define V_MAXRSPCNT3(x) ((x) << S_MAXRSPCNT3)
+#define G_MAXRSPCNT3(x) (((x) >> S_MAXRSPCNT3) & M_MAXRSPCNT3)
+
+#define A_SGE_DEBUG_INDEX 0x10cc
+#define A_SGE_DEBUG_DATA_HIGH 0x10d0
+#define A_SGE_DEBUG_DATA_LOW 0x10d4
+#define A_SGE_REVISION 0x10d8
+#define A_SGE_INT_CAUSE4 0x10dc
+
+#define S_ERR_BAD_UPFL_INC_CREDIT3 8
+#define V_ERR_BAD_UPFL_INC_CREDIT3(x) ((x) << S_ERR_BAD_UPFL_INC_CREDIT3)
+#define F_ERR_BAD_UPFL_INC_CREDIT3 V_ERR_BAD_UPFL_INC_CREDIT3(1U)
+
+#define S_ERR_BAD_UPFL_INC_CREDIT2 7
+#define V_ERR_BAD_UPFL_INC_CREDIT2(x) ((x) << S_ERR_BAD_UPFL_INC_CREDIT2)
+#define F_ERR_BAD_UPFL_INC_CREDIT2 V_ERR_BAD_UPFL_INC_CREDIT2(1U)
+
+#define S_ERR_BAD_UPFL_INC_CREDIT1 6
+#define V_ERR_BAD_UPFL_INC_CREDIT1(x) ((x) << S_ERR_BAD_UPFL_INC_CREDIT1)
+#define F_ERR_BAD_UPFL_INC_CREDIT1 V_ERR_BAD_UPFL_INC_CREDIT1(1U)
+
+#define S_ERR_BAD_UPFL_INC_CREDIT0 5
+#define V_ERR_BAD_UPFL_INC_CREDIT0(x) ((x) << S_ERR_BAD_UPFL_INC_CREDIT0)
+#define F_ERR_BAD_UPFL_INC_CREDIT0 V_ERR_BAD_UPFL_INC_CREDIT0(1U)
+
+#define S_ERR_PHYSADDR_LEN0_IDMA1 4
+#define V_ERR_PHYSADDR_LEN0_IDMA1(x) ((x) << S_ERR_PHYSADDR_LEN0_IDMA1)
+#define F_ERR_PHYSADDR_LEN0_IDMA1 V_ERR_PHYSADDR_LEN0_IDMA1(1U)
+
+#define S_ERR_PHYSADDR_LEN0_IDMA0 3
+#define V_ERR_PHYSADDR_LEN0_IDMA0(x) ((x) << S_ERR_PHYSADDR_LEN0_IDMA0)
+#define F_ERR_PHYSADDR_LEN0_IDMA0 V_ERR_PHYSADDR_LEN0_IDMA0(1U)
+
+#define S_ERR_FLM_INVALID_PKT_DROP1 2
+#define V_ERR_FLM_INVALID_PKT_DROP1(x) ((x) << S_ERR_FLM_INVALID_PKT_DROP1)
+#define F_ERR_FLM_INVALID_PKT_DROP1 V_ERR_FLM_INVALID_PKT_DROP1(1U)
+
+#define S_ERR_FLM_INVALID_PKT_DROP0 1
+#define V_ERR_FLM_INVALID_PKT_DROP0(x) ((x) << S_ERR_FLM_INVALID_PKT_DROP0)
+#define F_ERR_FLM_INVALID_PKT_DROP0 V_ERR_FLM_INVALID_PKT_DROP0(1U)
+
+#define S_ERR_UNEXPECTED_TIMER 0
+#define V_ERR_UNEXPECTED_TIMER(x) ((x) << S_ERR_UNEXPECTED_TIMER)
+#define F_ERR_UNEXPECTED_TIMER V_ERR_UNEXPECTED_TIMER(1U)
+
+#define A_SGE_INT_ENABLE4 0x10e0
+#define A_SGE_STAT_TOTAL 0x10e4
+#define A_SGE_STAT_MATCH 0x10e8
+#define A_SGE_STAT_CFG 0x10ec
+
+#define S_ITPOPMODE 8
+#define V_ITPOPMODE(x) ((x) << S_ITPOPMODE)
+#define F_ITPOPMODE V_ITPOPMODE(1U)
+
+#define S_EGRCTXTOPMODE 6
+#define M_EGRCTXTOPMODE 0x3U
+#define V_EGRCTXTOPMODE(x) ((x) << S_EGRCTXTOPMODE)
+#define G_EGRCTXTOPMODE(x) (((x) >> S_EGRCTXTOPMODE) & M_EGRCTXTOPMODE)
+
+#define S_INGCTXTOPMODE 4
+#define M_INGCTXTOPMODE 0x3U
+#define V_INGCTXTOPMODE(x) ((x) << S_INGCTXTOPMODE)
+#define G_INGCTXTOPMODE(x) (((x) >> S_INGCTXTOPMODE) & M_INGCTXTOPMODE)
+
+#define S_STATMODE 2
+#define M_STATMODE 0x3U
+#define V_STATMODE(x) ((x) << S_STATMODE)
+#define G_STATMODE(x) (((x) >> S_STATMODE) & M_STATMODE)
+
+#define S_STATSOURCE 0
+#define M_STATSOURCE 0x3U
+#define V_STATSOURCE(x) ((x) << S_STATSOURCE)
+#define G_STATSOURCE(x) (((x) >> S_STATSOURCE) & M_STATSOURCE)
+
+#define A_SGE_HINT_CFG 0x10f0
+
+#define S_HINTSALLOWEDNOHDR 6
+#define M_HINTSALLOWEDNOHDR 0x3fU
+#define V_HINTSALLOWEDNOHDR(x) ((x) << S_HINTSALLOWEDNOHDR)
+#define G_HINTSALLOWEDNOHDR(x) (((x) >> S_HINTSALLOWEDNOHDR) & M_HINTSALLOWEDNOHDR)
+
+#define S_HINTSALLOWEDHDR 0
+#define M_HINTSALLOWEDHDR 0x3fU
+#define V_HINTSALLOWEDHDR(x) ((x) << S_HINTSALLOWEDHDR)
+#define G_HINTSALLOWEDHDR(x) (((x) >> S_HINTSALLOWEDHDR) & M_HINTSALLOWEDHDR)
+
+#define A_SGE_INGRESS_QUEUES_PER_PAGE_PF 0x10f4
+#define A_SGE_INGRESS_QUEUES_PER_PAGE_VF 0x10f8
+#define A_SGE_PD_WRR_CONFIG 0x10fc
+
+#define S_EDMA_WEIGHT 0
+#define M_EDMA_WEIGHT 0x3fU
+#define V_EDMA_WEIGHT(x) ((x) << S_EDMA_WEIGHT)
+#define G_EDMA_WEIGHT(x) (((x) >> S_EDMA_WEIGHT) & M_EDMA_WEIGHT)
+
+#define A_SGE_ERROR_STATS 0x1100
+
+#define S_UNCAPTURED_ERROR 18
+#define V_UNCAPTURED_ERROR(x) ((x) << S_UNCAPTURED_ERROR)
+#define F_UNCAPTURED_ERROR V_UNCAPTURED_ERROR(1U)
+
+#define S_ERROR_QID_VALID 17
+#define V_ERROR_QID_VALID(x) ((x) << S_ERROR_QID_VALID)
+#define F_ERROR_QID_VALID V_ERROR_QID_VALID(1U)
+
+#define S_ERROR_QID 0
+#define M_ERROR_QID 0x1ffffU
+#define V_ERROR_QID(x) ((x) << S_ERROR_QID)
+#define G_ERROR_QID(x) (((x) >> S_ERROR_QID) & M_ERROR_QID)
+
+#define A_SGE_SHARED_TAG_CHAN_CFG 0x1104
+
+#define S_MINTAG3 24
+#define M_MINTAG3 0xffU
+#define V_MINTAG3(x) ((x) << S_MINTAG3)
+#define G_MINTAG3(x) (((x) >> S_MINTAG3) & M_MINTAG3)
+
+#define S_MINTAG2 16
+#define M_MINTAG2 0xffU
+#define V_MINTAG2(x) ((x) << S_MINTAG2)
+#define G_MINTAG2(x) (((x) >> S_MINTAG2) & M_MINTAG2)
+
+#define S_MINTAG1 8
+#define M_MINTAG1 0xffU
+#define V_MINTAG1(x) ((x) << S_MINTAG1)
+#define G_MINTAG1(x) (((x) >> S_MINTAG1) & M_MINTAG1)
+
+#define S_MINTAG0 0
+#define M_MINTAG0 0xffU
+#define V_MINTAG0(x) ((x) << S_MINTAG0)
+#define G_MINTAG0(x) (((x) >> S_MINTAG0) & M_MINTAG0)
+
+#define A_SGE_SHARED_TAG_POOL_CFG 0x1108
+
+#define S_TAGPOOLTOTAL 0
+#define M_TAGPOOLTOTAL 0xffU
+#define V_TAGPOOLTOTAL(x) ((x) << S_TAGPOOLTOTAL)
+#define G_TAGPOOLTOTAL(x) (((x) >> S_TAGPOOLTOTAL) & M_TAGPOOLTOTAL)
+
+#define A_SGE_PC0_REQ_BIST_CMD 0x1180
+#define A_SGE_PC0_REQ_BIST_ERROR_CNT 0x1184
+#define A_SGE_PC1_REQ_BIST_CMD 0x1190
+#define A_SGE_PC1_REQ_BIST_ERROR_CNT 0x1194
+#define A_SGE_PC0_RSP_BIST_CMD 0x11a0
+#define A_SGE_PC0_RSP_BIST_ERROR_CNT 0x11a4
+#define A_SGE_PC1_RSP_BIST_CMD 0x11b0
+#define A_SGE_PC1_RSP_BIST_ERROR_CNT 0x11b4
+#define A_SGE_CTXT_CMD 0x11fc
+
+#define S_BUSY 31
+#define V_BUSY(x) ((x) << S_BUSY)
+#define F_BUSY V_BUSY(1U)
+
+#define S_CTXTOP 28
+#define M_CTXTOP 0x3U
+#define V_CTXTOP(x) ((x) << S_CTXTOP)
+#define G_CTXTOP(x) (((x) >> S_CTXTOP) & M_CTXTOP)
+
+#define S_CTXTTYPE 24
+#define M_CTXTTYPE 0x3U
+#define V_CTXTTYPE(x) ((x) << S_CTXTTYPE)
+#define G_CTXTTYPE(x) (((x) >> S_CTXTTYPE) & M_CTXTTYPE)
+
+#define S_CTXTQID 0
+#define M_CTXTQID 0x1ffffU
+#define V_CTXTQID(x) ((x) << S_CTXTQID)
+#define G_CTXTQID(x) (((x) >> S_CTXTQID) & M_CTXTQID)
+
+#define A_SGE_CTXT_DATA0 0x1200
+#define A_SGE_CTXT_DATA1 0x1204
+#define A_SGE_CTXT_DATA2 0x1208
+#define A_SGE_CTXT_DATA3 0x120c
+#define A_SGE_CTXT_DATA4 0x1210
+#define A_SGE_CTXT_DATA5 0x1214
+#define A_SGE_CTXT_DATA6 0x1218
+#define A_SGE_CTXT_DATA7 0x121c
+#define A_SGE_CTXT_MASK0 0x1220
+#define A_SGE_CTXT_MASK1 0x1224
+#define A_SGE_CTXT_MASK2 0x1228
+#define A_SGE_CTXT_MASK3 0x122c
+#define A_SGE_CTXT_MASK4 0x1230
+#define A_SGE_CTXT_MASK5 0x1234
+#define A_SGE_CTXT_MASK6 0x1238
+#define A_SGE_CTXT_MASK7 0x123c
+#define A_SGE_QUEUE_BASE_MAP_HIGH 0x1300
+
+#define S_EGRESS_LOG2SIZE 27
+#define M_EGRESS_LOG2SIZE 0x1fU
+#define V_EGRESS_LOG2SIZE(x) ((x) << S_EGRESS_LOG2SIZE)
+#define G_EGRESS_LOG2SIZE(x) (((x) >> S_EGRESS_LOG2SIZE) & M_EGRESS_LOG2SIZE)
+
+#define S_EGRESS_BASE 10
+#define M_EGRESS_BASE 0x1ffffU
+#define V_EGRESS_BASE(x) ((x) << S_EGRESS_BASE)
+#define G_EGRESS_BASE(x) (((x) >> S_EGRESS_BASE) & M_EGRESS_BASE)
+
+#define S_INGRESS2_LOG2SIZE 5
+#define M_INGRESS2_LOG2SIZE 0x1fU
+#define V_INGRESS2_LOG2SIZE(x) ((x) << S_INGRESS2_LOG2SIZE)
+#define G_INGRESS2_LOG2SIZE(x) (((x) >> S_INGRESS2_LOG2SIZE) & M_INGRESS2_LOG2SIZE)
+
+#define S_INGRESS1_LOG2SIZE 0
+#define M_INGRESS1_LOG2SIZE 0x1fU
+#define V_INGRESS1_LOG2SIZE(x) ((x) << S_INGRESS1_LOG2SIZE)
+#define G_INGRESS1_LOG2SIZE(x) (((x) >> S_INGRESS1_LOG2SIZE) & M_INGRESS1_LOG2SIZE)
+
+#define A_SGE_QUEUE_BASE_MAP_LOW 0x1304
+
+#define S_INGRESS2_BASE 16
+#define M_INGRESS2_BASE 0xffffU
+#define V_INGRESS2_BASE(x) ((x) << S_INGRESS2_BASE)
+#define G_INGRESS2_BASE(x) (((x) >> S_INGRESS2_BASE) & M_INGRESS2_BASE)
+
+#define S_INGRESS1_BASE 0
+#define M_INGRESS1_BASE 0xffffU
+#define V_INGRESS1_BASE(x) ((x) << S_INGRESS1_BASE)
+#define G_INGRESS1_BASE(x) (((x) >> S_INGRESS1_BASE) & M_INGRESS1_BASE)
+
+#define A_SGE_LA_RDPTR_0 0x1800
+#define A_SGE_LA_RDDATA_0 0x1804
+#define A_SGE_LA_WRPTR_0 0x1808
+#define A_SGE_LA_RESERVED_0 0x180c
+#define A_SGE_LA_RDPTR_1 0x1810
+#define A_SGE_LA_RDDATA_1 0x1814
+#define A_SGE_LA_WRPTR_1 0x1818
+#define A_SGE_LA_RESERVED_1 0x181c
+#define A_SGE_LA_RDPTR_2 0x1820
+#define A_SGE_LA_RDDATA_2 0x1824
+#define A_SGE_LA_WRPTR_2 0x1828
+#define A_SGE_LA_RESERVED_2 0x182c
+#define A_SGE_LA_RDPTR_3 0x1830
+#define A_SGE_LA_RDDATA_3 0x1834
+#define A_SGE_LA_WRPTR_3 0x1838
+#define A_SGE_LA_RESERVED_3 0x183c
+#define A_SGE_LA_RDPTR_4 0x1840
+#define A_SGE_LA_RDDATA_4 0x1844
+#define A_SGE_LA_WRPTR_4 0x1848
+#define A_SGE_LA_RESERVED_4 0x184c
+#define A_SGE_LA_RDPTR_5 0x1850
+#define A_SGE_LA_RDDATA_5 0x1854
+#define A_SGE_LA_WRPTR_5 0x1858
+#define A_SGE_LA_RESERVED_5 0x185c
+#define A_SGE_LA_RDPTR_6 0x1860
+#define A_SGE_LA_RDDATA_6 0x1864
+#define A_SGE_LA_WRPTR_6 0x1868
+#define A_SGE_LA_RESERVED_6 0x186c
+#define A_SGE_LA_RDPTR_7 0x1870
+#define A_SGE_LA_RDDATA_7 0x1874
+#define A_SGE_LA_WRPTR_7 0x1878
+#define A_SGE_LA_RESERVED_7 0x187c
+#define A_SGE_LA_RDPTR_8 0x1880
+#define A_SGE_LA_RDDATA_8 0x1884
+#define A_SGE_LA_WRPTR_8 0x1888
+#define A_SGE_LA_RESERVED_8 0x188c
+#define A_SGE_LA_RDPTR_9 0x1890
+#define A_SGE_LA_RDDATA_9 0x1894
+#define A_SGE_LA_WRPTR_9 0x1898
+#define A_SGE_LA_RESERVED_9 0x189c
+#define A_SGE_LA_RDPTR_10 0x18a0
+#define A_SGE_LA_RDDATA_10 0x18a4
+#define A_SGE_LA_WRPTR_10 0x18a8
+#define A_SGE_LA_RESERVED_10 0x18ac
+#define A_SGE_LA_RDPTR_11 0x18b0
+#define A_SGE_LA_RDDATA_11 0x18b4
+#define A_SGE_LA_WRPTR_11 0x18b8
+#define A_SGE_LA_RESERVED_11 0x18bc
+#define A_SGE_LA_RDPTR_12 0x18c0
+#define A_SGE_LA_RDDATA_12 0x18c4
+#define A_SGE_LA_WRPTR_12 0x18c8
+#define A_SGE_LA_RESERVED_12 0x18cc
+#define A_SGE_LA_RDPTR_13 0x18d0
+#define A_SGE_LA_RDDATA_13 0x18d4
+#define A_SGE_LA_WRPTR_13 0x18d8
+#define A_SGE_LA_RESERVED_13 0x18dc
+#define A_SGE_LA_RDPTR_14 0x18e0
+#define A_SGE_LA_RDDATA_14 0x18e4
+#define A_SGE_LA_WRPTR_14 0x18e8
+#define A_SGE_LA_RESERVED_14 0x18ec
+#define A_SGE_LA_RDPTR_15 0x18f0
+#define A_SGE_LA_RDDATA_15 0x18f4
+#define A_SGE_LA_WRPTR_15 0x18f8
+#define A_SGE_LA_RESERVED_15 0x18fc
+
+/* registers for module PCIE */
+#define PCIE_BASE_ADDR 0x3000
+
+#define A_PCIE_PF_CFG 0x40
+
+#define S_INTXSTAT 16
+#define V_INTXSTAT(x) ((x) << S_INTXSTAT)
+#define F_INTXSTAT V_INTXSTAT(1U)
+
+#define S_AUXPWRPMEN 15
+#define V_AUXPWRPMEN(x) ((x) << S_AUXPWRPMEN)
+#define F_AUXPWRPMEN V_AUXPWRPMEN(1U)
+
+#define S_NOSOFTRESET 14
+#define V_NOSOFTRESET(x) ((x) << S_NOSOFTRESET)
+#define F_NOSOFTRESET V_NOSOFTRESET(1U)
+
+#define S_AIVEC 4
+#define M_AIVEC 0x3ffU
+#define V_AIVEC(x) ((x) << S_AIVEC)
+#define G_AIVEC(x) (((x) >> S_AIVEC) & M_AIVEC)
+
+#define S_INTXTYPE 2
+#define M_INTXTYPE 0x3U
+#define V_INTXTYPE(x) ((x) << S_INTXTYPE)
+#define G_INTXTYPE(x) (((x) >> S_INTXTYPE) & M_INTXTYPE)
+
+#define S_D3HOTEN 1
+#define V_D3HOTEN(x) ((x) << S_D3HOTEN)
+#define F_D3HOTEN V_D3HOTEN(1U)
+
+#define S_CLIDECEN 0
+#define V_CLIDECEN(x) ((x) << S_CLIDECEN)
+#define F_CLIDECEN V_CLIDECEN(1U)
+
+#define A_PCIE_PF_CLI 0x44
+#define A_PCIE_PF_GEN_MSG 0x48
+
+#define S_MSGTYPE 0
+#define M_MSGTYPE 0xffU
+#define V_MSGTYPE(x) ((x) << S_MSGTYPE)
+#define G_MSGTYPE(x) (((x) >> S_MSGTYPE) & M_MSGTYPE)
+
+#define A_PCIE_PF_EXPROM_OFST 0x4c
+
+#define S_OFFSET 10
+#define M_OFFSET 0x3fffU
+#define V_OFFSET(x) ((x) << S_OFFSET)
+#define G_OFFSET(x) (((x) >> S_OFFSET) & M_OFFSET)
+
+#define A_PCIE_INT_ENABLE 0x3000
+
+#define S_NONFATALERR 30
+#define V_NONFATALERR(x) ((x) << S_NONFATALERR)
+#define F_NONFATALERR V_NONFATALERR(1U)
+
+#define S_UNXSPLCPLERR 29
+#define V_UNXSPLCPLERR(x) ((x) << S_UNXSPLCPLERR)
+#define F_UNXSPLCPLERR V_UNXSPLCPLERR(1U)
+
+#define S_PCIEPINT 28
+#define V_PCIEPINT(x) ((x) << S_PCIEPINT)
+#define F_PCIEPINT V_PCIEPINT(1U)
+
+#define S_PCIESINT 27
+#define V_PCIESINT(x) ((x) << S_PCIESINT)
+#define F_PCIESINT V_PCIESINT(1U)
+
+#define S_RPLPERR 26
+#define V_RPLPERR(x) ((x) << S_RPLPERR)
+#define F_RPLPERR V_RPLPERR(1U)
+
+#define S_RXWRPERR 25
+#define V_RXWRPERR(x) ((x) << S_RXWRPERR)
+#define F_RXWRPERR V_RXWRPERR(1U)
+
+#define S_RXCPLPERR 24
+#define V_RXCPLPERR(x) ((x) << S_RXCPLPERR)
+#define F_RXCPLPERR V_RXCPLPERR(1U)
+
+#define S_PIOTAGPERR 23
+#define V_PIOTAGPERR(x) ((x) << S_PIOTAGPERR)
+#define F_PIOTAGPERR V_PIOTAGPERR(1U)
+
+#define S_MATAGPERR 22
+#define V_MATAGPERR(x) ((x) << S_MATAGPERR)
+#define F_MATAGPERR V_MATAGPERR(1U)
+
+#define S_INTXCLRPERR 21
+#define V_INTXCLRPERR(x) ((x) << S_INTXCLRPERR)
+#define F_INTXCLRPERR V_INTXCLRPERR(1U)
+
+#define S_FIDPERR 20
+#define V_FIDPERR(x) ((x) << S_FIDPERR)
+#define F_FIDPERR V_FIDPERR(1U)
+
+#define S_CFGSNPPERR 19
+#define V_CFGSNPPERR(x) ((x) << S_CFGSNPPERR)
+#define F_CFGSNPPERR V_CFGSNPPERR(1U)
+
+#define S_HRSPPERR 18
+#define V_HRSPPERR(x) ((x) << S_HRSPPERR)
+#define F_HRSPPERR V_HRSPPERR(1U)
+
+#define S_HREQPERR 17
+#define V_HREQPERR(x) ((x) << S_HREQPERR)
+#define F_HREQPERR V_HREQPERR(1U)
+
+#define S_HCNTPERR 16
+#define V_HCNTPERR(x) ((x) << S_HCNTPERR)
+#define F_HCNTPERR V_HCNTPERR(1U)
+
+#define S_DRSPPERR 15
+#define V_DRSPPERR(x) ((x) << S_DRSPPERR)
+#define F_DRSPPERR V_DRSPPERR(1U)
+
+#define S_DREQPERR 14
+#define V_DREQPERR(x) ((x) << S_DREQPERR)
+#define F_DREQPERR V_DREQPERR(1U)
+
+#define S_DCNTPERR 13
+#define V_DCNTPERR(x) ((x) << S_DCNTPERR)
+#define F_DCNTPERR V_DCNTPERR(1U)
+
+#define S_CRSPPERR 12
+#define V_CRSPPERR(x) ((x) << S_CRSPPERR)
+#define F_CRSPPERR V_CRSPPERR(1U)
+
+#define S_CREQPERR 11
+#define V_CREQPERR(x) ((x) << S_CREQPERR)
+#define F_CREQPERR V_CREQPERR(1U)
+
+#define S_CCNTPERR 10
+#define V_CCNTPERR(x) ((x) << S_CCNTPERR)
+#define F_CCNTPERR V_CCNTPERR(1U)
+
+#define S_TARTAGPERR 9
+#define V_TARTAGPERR(x) ((x) << S_TARTAGPERR)
+#define F_TARTAGPERR V_TARTAGPERR(1U)
+
+#define S_PIOREQPERR 8
+#define V_PIOREQPERR(x) ((x) << S_PIOREQPERR)
+#define F_PIOREQPERR V_PIOREQPERR(1U)
+
+#define S_PIOCPLPERR 7
+#define V_PIOCPLPERR(x) ((x) << S_PIOCPLPERR)
+#define F_PIOCPLPERR V_PIOCPLPERR(1U)
+
+#define S_MSIXDIPERR 6
+#define V_MSIXDIPERR(x) ((x) << S_MSIXDIPERR)
+#define F_MSIXDIPERR V_MSIXDIPERR(1U)
+
+#define S_MSIXDATAPERR 5
+#define V_MSIXDATAPERR(x) ((x) << S_MSIXDATAPERR)
+#define F_MSIXDATAPERR V_MSIXDATAPERR(1U)
+
+#define S_MSIXADDRHPERR 4
+#define V_MSIXADDRHPERR(x) ((x) << S_MSIXADDRHPERR)
+#define F_MSIXADDRHPERR V_MSIXADDRHPERR(1U)
+
+#define S_MSIXADDRLPERR 3
+#define V_MSIXADDRLPERR(x) ((x) << S_MSIXADDRLPERR)
+#define F_MSIXADDRLPERR V_MSIXADDRLPERR(1U)
+
+#define S_MSIDATAPERR 2
+#define V_MSIDATAPERR(x) ((x) << S_MSIDATAPERR)
+#define F_MSIDATAPERR V_MSIDATAPERR(1U)
+
+#define S_MSIADDRHPERR 1
+#define V_MSIADDRHPERR(x) ((x) << S_MSIADDRHPERR)
+#define F_MSIADDRHPERR V_MSIADDRHPERR(1U)
+
+#define S_MSIADDRLPERR 0
+#define V_MSIADDRLPERR(x) ((x) << S_MSIADDRLPERR)
+#define F_MSIADDRLPERR V_MSIADDRLPERR(1U)
+
+#define A_PCIE_INT_CAUSE 0x3004
+#define A_PCIE_PERR_ENABLE 0x3008
+#define A_PCIE_PERR_INJECT 0x300c
+
+#define S_IDE 0
+#define V_IDE(x) ((x) << S_IDE)
+#define F_IDE V_IDE(1U)
+
+#define A_PCIE_NONFAT_ERR 0x3010
+
+#define S_RDRSPERR 9
+#define V_RDRSPERR(x) ((x) << S_RDRSPERR)
+#define F_RDRSPERR V_RDRSPERR(1U)
+
+#define S_VPDRSPERR 8
+#define V_VPDRSPERR(x) ((x) << S_VPDRSPERR)
+#define F_VPDRSPERR V_VPDRSPERR(1U)
+
+#define S_POPD 7
+#define V_POPD(x) ((x) << S_POPD)
+#define F_POPD V_POPD(1U)
+
+#define S_POPH 6
+#define V_POPH(x) ((x) << S_POPH)
+#define F_POPH V_POPH(1U)
+
+#define S_POPC 5
+#define V_POPC(x) ((x) << S_POPC)
+#define F_POPC V_POPC(1U)
+
+#define S_MEMREQ 4
+#define V_MEMREQ(x) ((x) << S_MEMREQ)
+#define F_MEMREQ V_MEMREQ(1U)
+
+#define S_PIOREQ 3
+#define V_PIOREQ(x) ((x) << S_PIOREQ)
+#define F_PIOREQ V_PIOREQ(1U)
+
+#define S_TAGDROP 2
+#define V_TAGDROP(x) ((x) << S_TAGDROP)
+#define F_TAGDROP V_TAGDROP(1U)
+
+#define S_TAGCPL 1
+#define V_TAGCPL(x) ((x) << S_TAGCPL)
+#define F_TAGCPL V_TAGCPL(1U)
+
+#define S_CFGSNP 0
+#define V_CFGSNP(x) ((x) << S_CFGSNP)
+#define F_CFGSNP V_CFGSNP(1U)
+
+#define A_PCIE_CFG 0x3014
+
+#define S_CFGDMAXPYLDSZRX 26
+#define M_CFGDMAXPYLDSZRX 0x7U
+#define V_CFGDMAXPYLDSZRX(x) ((x) << S_CFGDMAXPYLDSZRX)
+#define G_CFGDMAXPYLDSZRX(x) (((x) >> S_CFGDMAXPYLDSZRX) & M_CFGDMAXPYLDSZRX)
+
+#define S_CFGDMAXPYLDSZTX 23
+#define M_CFGDMAXPYLDSZTX 0x7U
+#define V_CFGDMAXPYLDSZTX(x) ((x) << S_CFGDMAXPYLDSZTX)
+#define G_CFGDMAXPYLDSZTX(x) (((x) >> S_CFGDMAXPYLDSZTX) & M_CFGDMAXPYLDSZTX)
+
+#define S_CFGDMAXRDREQSZ 20
+#define M_CFGDMAXRDREQSZ 0x7U
+#define V_CFGDMAXRDREQSZ(x) ((x) << S_CFGDMAXRDREQSZ)
+#define G_CFGDMAXRDREQSZ(x) (((x) >> S_CFGDMAXRDREQSZ) & M_CFGDMAXRDREQSZ)
+
+#define S_MASYNCEN 19
+#define V_MASYNCEN(x) ((x) << S_MASYNCEN)
+#define F_MASYNCEN V_MASYNCEN(1U)
+
+#define S_DCAENDMA 18
+#define V_DCAENDMA(x) ((x) << S_DCAENDMA)
+#define F_DCAENDMA V_DCAENDMA(1U)
+
+#define S_DCAENCMD 17
+#define V_DCAENCMD(x) ((x) << S_DCAENCMD)
+#define F_DCAENCMD V_DCAENCMD(1U)
+
+#define S_VFMSIPNDEN 16
+#define V_VFMSIPNDEN(x) ((x) << S_VFMSIPNDEN)
+#define F_VFMSIPNDEN V_VFMSIPNDEN(1U)
+
+#define S_FORCETXERROR 15
+#define V_FORCETXERROR(x) ((x) << S_FORCETXERROR)
+#define F_FORCETXERROR V_FORCETXERROR(1U)
+
+#define S_VPDREQPROTECT 14
+#define V_VPDREQPROTECT(x) ((x) << S_VPDREQPROTECT)
+#define F_VPDREQPROTECT V_VPDREQPROTECT(1U)
+
+#define S_FIDTABLEINVALID 13
+#define V_FIDTABLEINVALID(x) ((x) << S_FIDTABLEINVALID)
+#define F_FIDTABLEINVALID V_FIDTABLEINVALID(1U)
+
+#define S_BYPASSMSIXCACHE 12
+#define V_BYPASSMSIXCACHE(x) ((x) << S_BYPASSMSIXCACHE)
+#define F_BYPASSMSIXCACHE V_BYPASSMSIXCACHE(1U)
+
+#define S_BYPASSMSICACHE 11
+#define V_BYPASSMSICACHE(x) ((x) << S_BYPASSMSICACHE)
+#define F_BYPASSMSICACHE V_BYPASSMSICACHE(1U)
+
+#define S_SIMSPEED 10
+#define V_SIMSPEED(x) ((x) << S_SIMSPEED)
+#define F_SIMSPEED V_SIMSPEED(1U)
+
+#define S_TC0_STAMP 9
+#define V_TC0_STAMP(x) ((x) << S_TC0_STAMP)
+#define F_TC0_STAMP V_TC0_STAMP(1U)
+
+#define S_AI_TCVAL 6
+#define M_AI_TCVAL 0x7U
+#define V_AI_TCVAL(x) ((x) << S_AI_TCVAL)
+#define G_AI_TCVAL(x) (((x) >> S_AI_TCVAL) & M_AI_TCVAL)
+
+#define S_DMASTOPEN 5
+#define V_DMASTOPEN(x) ((x) << S_DMASTOPEN)
+#define F_DMASTOPEN V_DMASTOPEN(1U)
+
+#define S_DEVSTATERSTMODE 4
+#define V_DEVSTATERSTMODE(x) ((x) << S_DEVSTATERSTMODE)
+#define F_DEVSTATERSTMODE V_DEVSTATERSTMODE(1U)
+
+#define S_HOTRSTPCIECRSTMODE 3
+#define V_HOTRSTPCIECRSTMODE(x) ((x) << S_HOTRSTPCIECRSTMODE)
+#define F_HOTRSTPCIECRSTMODE V_HOTRSTPCIECRSTMODE(1U)
+
+#define S_DLDNPCIECRSTMODE 2
+#define V_DLDNPCIECRSTMODE(x) ((x) << S_DLDNPCIECRSTMODE)
+#define F_DLDNPCIECRSTMODE V_DLDNPCIECRSTMODE(1U)
+
+#define S_DLDNPCIEPRECRSTMODE 1
+#define V_DLDNPCIEPRECRSTMODE(x) ((x) << S_DLDNPCIEPRECRSTMODE)
+#define F_DLDNPCIEPRECRSTMODE V_DLDNPCIEPRECRSTMODE(1U)
+
+#define S_LINKDNRSTEN 0
+#define V_LINKDNRSTEN(x) ((x) << S_LINKDNRSTEN)
+#define F_LINKDNRSTEN V_LINKDNRSTEN(1U)
+
+#define A_PCIE_DMA_CTRL 0x3018
+
+#define S_LITTLEENDIAN 7
+#define V_LITTLEENDIAN(x) ((x) << S_LITTLEENDIAN)
+#define F_LITTLEENDIAN V_LITTLEENDIAN(1U)
+
+#define A_PCIE_DMA_CFG 0x301c
+
+#define S_MAXPYLDSIZE 28
+#define M_MAXPYLDSIZE 0x7U
+#define V_MAXPYLDSIZE(x) ((x) << S_MAXPYLDSIZE)
+#define G_MAXPYLDSIZE(x) (((x) >> S_MAXPYLDSIZE) & M_MAXPYLDSIZE)
+
+#define S_MAXRDREQSIZE 25
+#define M_MAXRDREQSIZE 0x7U
+#define V_MAXRDREQSIZE(x) ((x) << S_MAXRDREQSIZE)
+#define G_MAXRDREQSIZE(x) (((x) >> S_MAXRDREQSIZE) & M_MAXRDREQSIZE)
+
+#define S_DMA_MAXRSPCNT 16
+#define M_DMA_MAXRSPCNT 0x1ffU
+#define V_DMA_MAXRSPCNT(x) ((x) << S_DMA_MAXRSPCNT)
+#define G_DMA_MAXRSPCNT(x) (((x) >> S_DMA_MAXRSPCNT) & M_DMA_MAXRSPCNT)
+
+#define S_DMA_MAXREQCNT 8
+#define M_DMA_MAXREQCNT 0xffU
+#define V_DMA_MAXREQCNT(x) ((x) << S_DMA_MAXREQCNT)
+#define G_DMA_MAXREQCNT(x) (((x) >> S_DMA_MAXREQCNT) & M_DMA_MAXREQCNT)
+
+#define S_MAXTAG 0
+#define M_MAXTAG 0x7fU
+#define V_MAXTAG(x) ((x) << S_MAXTAG)
+#define G_MAXTAG(x) (((x) >> S_MAXTAG) & M_MAXTAG)
+
+#define A_PCIE_DMA_STAT 0x3020
+
+#define S_STATEREQ 28
+#define M_STATEREQ 0xfU
+#define V_STATEREQ(x) ((x) << S_STATEREQ)
+#define G_STATEREQ(x) (((x) >> S_STATEREQ) & M_STATEREQ)
+
+#define S_DMA_RSPCNT 16
+#define M_DMA_RSPCNT 0xfffU
+#define V_DMA_RSPCNT(x) ((x) << S_DMA_RSPCNT)
+#define G_DMA_RSPCNT(x) (((x) >> S_DMA_RSPCNT) & M_DMA_RSPCNT)
+
+#define S_STATEAREQ 13
+#define M_STATEAREQ 0x7U
+#define V_STATEAREQ(x) ((x) << S_STATEAREQ)
+#define G_STATEAREQ(x) (((x) >> S_STATEAREQ) & M_STATEAREQ)
+
+#define S_TAGFREE 12
+#define V_TAGFREE(x) ((x) << S_TAGFREE)
+#define F_TAGFREE V_TAGFREE(1U)
+
+#define S_DMA_REQCNT 0
+#define M_DMA_REQCNT 0x7ffU
+#define V_DMA_REQCNT(x) ((x) << S_DMA_REQCNT)
+#define G_DMA_REQCNT(x) (((x) >> S_DMA_REQCNT) & M_DMA_REQCNT)
+
+#define A_PCIE_CMD_CTRL 0x303c
+#define A_PCIE_CMD_CFG 0x3040
+
+#define S_MAXRSPCNT 16
+#define M_MAXRSPCNT 0xfU
+#define V_MAXRSPCNT(x) ((x) << S_MAXRSPCNT)
+#define G_MAXRSPCNT(x) (((x) >> S_MAXRSPCNT) & M_MAXRSPCNT)
+
+#define S_MAXREQCNT 8
+#define M_MAXREQCNT 0x1fU
+#define V_MAXREQCNT(x) ((x) << S_MAXREQCNT)
+#define G_MAXREQCNT(x) (((x) >> S_MAXREQCNT) & M_MAXREQCNT)
+
+#define A_PCIE_CMD_STAT 0x3044
+
+#define S_RSPCNT 16
+#define M_RSPCNT 0x7fU
+#define V_RSPCNT(x) ((x) << S_RSPCNT)
+#define G_RSPCNT(x) (((x) >> S_RSPCNT) & M_RSPCNT)
+
+#define S_REQCNT 0
+#define M_REQCNT 0xffU
+#define V_REQCNT(x) ((x) << S_REQCNT)
+#define G_REQCNT(x) (((x) >> S_REQCNT) & M_REQCNT)
+
+#define A_PCIE_HMA_CTRL 0x3050
+
+#define S_IPLTSSM 12
+#define M_IPLTSSM 0xfU
+#define V_IPLTSSM(x) ((x) << S_IPLTSSM)
+#define G_IPLTSSM(x) (((x) >> S_IPLTSSM) & M_IPLTSSM)
+
+#define S_IPCONFIGDOWN 8
+#define M_IPCONFIGDOWN 0x7U
+#define V_IPCONFIGDOWN(x) ((x) << S_IPCONFIGDOWN)
+#define G_IPCONFIGDOWN(x) (((x) >> S_IPCONFIGDOWN) & M_IPCONFIGDOWN)
+
+#define A_PCIE_HMA_CFG 0x3054
+
+#define S_HMA_MAXRSPCNT 16
+#define M_HMA_MAXRSPCNT 0x1fU
+#define V_HMA_MAXRSPCNT(x) ((x) << S_HMA_MAXRSPCNT)
+#define G_HMA_MAXRSPCNT(x) (((x) >> S_HMA_MAXRSPCNT) & M_HMA_MAXRSPCNT)
+
+#define A_PCIE_HMA_STAT 0x3058
+
+#define S_HMA_RSPCNT 16
+#define M_HMA_RSPCNT 0xffU
+#define V_HMA_RSPCNT(x) ((x) << S_HMA_RSPCNT)
+#define G_HMA_RSPCNT(x) (((x) >> S_HMA_RSPCNT) & M_HMA_RSPCNT)
+
+#define A_PCIE_PIO_FIFO_CFG 0x305c
+
+#define S_CPLCONFIG 16
+#define M_CPLCONFIG 0xffffU
+#define V_CPLCONFIG(x) ((x) << S_CPLCONFIG)
+#define G_CPLCONFIG(x) (((x) >> S_CPLCONFIG) & M_CPLCONFIG)
+
+#define S_PIOSTOPEN 12
+#define V_PIOSTOPEN(x) ((x) << S_PIOSTOPEN)
+#define F_PIOSTOPEN V_PIOSTOPEN(1U)
+
+#define S_IPLANESWAP 11
+#define V_IPLANESWAP(x) ((x) << S_IPLANESWAP)
+#define F_IPLANESWAP V_IPLANESWAP(1U)
+
+#define S_FORCESTRICTTS1 10
+#define V_FORCESTRICTTS1(x) ((x) << S_FORCESTRICTTS1)
+#define F_FORCESTRICTTS1 V_FORCESTRICTTS1(1U)
+
+#define S_FORCEPROGRESSCNT 0
+#define M_FORCEPROGRESSCNT 0x3ffU
+#define V_FORCEPROGRESSCNT(x) ((x) << S_FORCEPROGRESSCNT)
+#define G_FORCEPROGRESSCNT(x) (((x) >> S_FORCEPROGRESSCNT) & M_FORCEPROGRESSCNT)
+
+#define A_PCIE_CFG_SPACE_REQ 0x3060
+
+#define S_ENABLE 30
+#define V_ENABLE(x) ((x) << S_ENABLE)
+#define F_ENABLE V_ENABLE(1U)
+
+#define S_AI 29
+#define V_AI(x) ((x) << S_AI)
+#define F_AI V_AI(1U)
+
+#define S_LOCALCFG 28
+#define V_LOCALCFG(x) ((x) << S_LOCALCFG)
+#define F_LOCALCFG V_LOCALCFG(1U)
+
+#define S_BUS 20
+#define M_BUS 0xffU
+#define V_BUS(x) ((x) << S_BUS)
+#define G_BUS(x) (((x) >> S_BUS) & M_BUS)
+
+#define S_DEVICE 15
+#define M_DEVICE 0x1fU
+#define V_DEVICE(x) ((x) << S_DEVICE)
+#define G_DEVICE(x) (((x) >> S_DEVICE) & M_DEVICE)
+
+#define S_FUNCTION 12
+#define M_FUNCTION 0x7U
+#define V_FUNCTION(x) ((x) << S_FUNCTION)
+#define G_FUNCTION(x) (((x) >> S_FUNCTION) & M_FUNCTION)
+
+#define S_EXTREGISTER 8
+#define M_EXTREGISTER 0xfU
+#define V_EXTREGISTER(x) ((x) << S_EXTREGISTER)
+#define G_EXTREGISTER(x) (((x) >> S_EXTREGISTER) & M_EXTREGISTER)
+
+#define S_REGISTER 0
+#define M_REGISTER 0xffU
+#define V_REGISTER(x) ((x) << S_REGISTER)
+#define G_REGISTER(x) (((x) >> S_REGISTER) & M_REGISTER)
+
+#define A_PCIE_CFG_SPACE_DATA 0x3064
+#define A_PCIE_MEM_ACCESS_BASE_WIN 0x3068
+
+#define S_PCIEOFST 10
+#define M_PCIEOFST 0x3fffffU
+#define V_PCIEOFST(x) ((x) << S_PCIEOFST)
+#define G_PCIEOFST(x) (((x) >> S_PCIEOFST) & M_PCIEOFST)
+
+#define S_BIR 8
+#define M_BIR 0x3U
+#define V_BIR(x) ((x) << S_BIR)
+#define G_BIR(x) (((x) >> S_BIR) & M_BIR)
+
+#define S_WINDOW 0
+#define M_WINDOW 0xffU
+#define V_WINDOW(x) ((x) << S_WINDOW)
+#define G_WINDOW(x) (((x) >> S_WINDOW) & M_WINDOW)
+
+#define A_PCIE_MEM_ACCESS_OFFSET 0x306c
+#define A_PCIE_MAILBOX_BASE_WIN 0x30a8
+
+#define S_MBOXPCIEOFST 6
+#define M_MBOXPCIEOFST 0x3ffffffU
+#define V_MBOXPCIEOFST(x) ((x) << S_MBOXPCIEOFST)
+#define G_MBOXPCIEOFST(x) (((x) >> S_MBOXPCIEOFST) & M_MBOXPCIEOFST)
+
+#define S_MBOXBIR 4
+#define M_MBOXBIR 0x3U
+#define V_MBOXBIR(x) ((x) << S_MBOXBIR)
+#define G_MBOXBIR(x) (((x) >> S_MBOXBIR) & M_MBOXBIR)
+
+#define S_MBOXWIN 0
+#define M_MBOXWIN 0x3U
+#define V_MBOXWIN(x) ((x) << S_MBOXWIN)
+#define G_MBOXWIN(x) (((x) >> S_MBOXWIN) & M_MBOXWIN)
+
+#define A_PCIE_MAILBOX_OFFSET 0x30ac
+#define A_PCIE_MA_CTRL 0x30b0
+
+#define S_MA_TAGFREE 29
+#define V_MA_TAGFREE(x) ((x) << S_MA_TAGFREE)
+#define F_MA_TAGFREE V_MA_TAGFREE(1U)
+
+#define S_MA_MAXRSPCNT 24
+#define M_MA_MAXRSPCNT 0x1fU
+#define V_MA_MAXRSPCNT(x) ((x) << S_MA_MAXRSPCNT)
+#define G_MA_MAXRSPCNT(x) (((x) >> S_MA_MAXRSPCNT) & M_MA_MAXRSPCNT)
+
+#define S_MA_MAXREQCNT 16
+#define M_MA_MAXREQCNT 0x1fU
+#define V_MA_MAXREQCNT(x) ((x) << S_MA_MAXREQCNT)
+#define G_MA_MAXREQCNT(x) (((x) >> S_MA_MAXREQCNT) & M_MA_MAXREQCNT)
+
+#define S_MA_LE 15
+#define V_MA_LE(x) ((x) << S_MA_LE)
+#define F_MA_LE V_MA_LE(1U)
+
+#define S_MA_MAXPYLDSIZE 12
+#define M_MA_MAXPYLDSIZE 0x7U
+#define V_MA_MAXPYLDSIZE(x) ((x) << S_MA_MAXPYLDSIZE)
+#define G_MA_MAXPYLDSIZE(x) (((x) >> S_MA_MAXPYLDSIZE) & M_MA_MAXPYLDSIZE)
+
+#define S_MA_MAXRDREQSIZE 8
+#define M_MA_MAXRDREQSIZE 0x7U
+#define V_MA_MAXRDREQSIZE(x) ((x) << S_MA_MAXRDREQSIZE)
+#define G_MA_MAXRDREQSIZE(x) (((x) >> S_MA_MAXRDREQSIZE) & M_MA_MAXRDREQSIZE)
+
+#define S_MA_MAXTAG 0
+#define M_MA_MAXTAG 0x1fU
+#define V_MA_MAXTAG(x) ((x) << S_MA_MAXTAG)
+#define G_MA_MAXTAG(x) (((x) >> S_MA_MAXTAG) & M_MA_MAXTAG)
+
+#define A_PCIE_MA_SYNC 0x30b4
+#define A_PCIE_FW 0x30b8
+#define A_PCIE_FW_PF 0x30bc
+#define A_PCIE_PIO_PAUSE 0x30dc
+
+#define S_PIOPAUSEDONE 31
+#define V_PIOPAUSEDONE(x) ((x) << S_PIOPAUSEDONE)
+#define F_PIOPAUSEDONE V_PIOPAUSEDONE(1U)
+
+#define S_PIOPAUSETIME 4
+#define M_PIOPAUSETIME 0xffffffU
+#define V_PIOPAUSETIME(x) ((x) << S_PIOPAUSETIME)
+#define G_PIOPAUSETIME(x) (((x) >> S_PIOPAUSETIME) & M_PIOPAUSETIME)
+
+#define S_PIOPAUSE 0
+#define V_PIOPAUSE(x) ((x) << S_PIOPAUSE)
+#define F_PIOPAUSE V_PIOPAUSE(1U)
+
+#define A_PCIE_SYS_CFG_READY 0x30e0
+#define A_PCIE_STATIC_CFG1 0x30e4
+
+#define S_LINKDOWN_RESET_EN 26
+#define V_LINKDOWN_RESET_EN(x) ((x) << S_LINKDOWN_RESET_EN)
+#define F_LINKDOWN_RESET_EN V_LINKDOWN_RESET_EN(1U)
+
+#define S_IN_WR_DISCONTIG 25
+#define V_IN_WR_DISCONTIG(x) ((x) << S_IN_WR_DISCONTIG)
+#define F_IN_WR_DISCONTIG V_IN_WR_DISCONTIG(1U)
+
+#define S_IN_RD_CPLSIZE 22
+#define M_IN_RD_CPLSIZE 0x7U
+#define V_IN_RD_CPLSIZE(x) ((x) << S_IN_RD_CPLSIZE)
+#define G_IN_RD_CPLSIZE(x) (((x) >> S_IN_RD_CPLSIZE) & M_IN_RD_CPLSIZE)
+
+#define S_IN_RD_BUFMODE 20
+#define M_IN_RD_BUFMODE 0x3U
+#define V_IN_RD_BUFMODE(x) ((x) << S_IN_RD_BUFMODE)
+#define G_IN_RD_BUFMODE(x) (((x) >> S_IN_RD_BUFMODE) & M_IN_RD_BUFMODE)
+
+#define S_GBIF_NPTRANS_TOT 18
+#define M_GBIF_NPTRANS_TOT 0x3U
+#define V_GBIF_NPTRANS_TOT(x) ((x) << S_GBIF_NPTRANS_TOT)
+#define G_GBIF_NPTRANS_TOT(x) (((x) >> S_GBIF_NPTRANS_TOT) & M_GBIF_NPTRANS_TOT)
+
+#define S_IN_PDAT_TOT 15
+#define M_IN_PDAT_TOT 0x7U
+#define V_IN_PDAT_TOT(x) ((x) << S_IN_PDAT_TOT)
+#define G_IN_PDAT_TOT(x) (((x) >> S_IN_PDAT_TOT) & M_IN_PDAT_TOT)
+
+#define S_PCIE_NPTRANS_TOT 12
+#define M_PCIE_NPTRANS_TOT 0x7U
+#define V_PCIE_NPTRANS_TOT(x) ((x) << S_PCIE_NPTRANS_TOT)
+#define G_PCIE_NPTRANS_TOT(x) (((x) >> S_PCIE_NPTRANS_TOT) & M_PCIE_NPTRANS_TOT)
+
+#define S_OUT_PDAT_TOT 9
+#define M_OUT_PDAT_TOT 0x7U
+#define V_OUT_PDAT_TOT(x) ((x) << S_OUT_PDAT_TOT)
+#define G_OUT_PDAT_TOT(x) (((x) >> S_OUT_PDAT_TOT) & M_OUT_PDAT_TOT)
+
+#define S_GBIF_MAX_WRSIZE 6
+#define M_GBIF_MAX_WRSIZE 0x7U
+#define V_GBIF_MAX_WRSIZE(x) ((x) << S_GBIF_MAX_WRSIZE)
+#define G_GBIF_MAX_WRSIZE(x) (((x) >> S_GBIF_MAX_WRSIZE) & M_GBIF_MAX_WRSIZE)
+
+#define S_GBIF_MAX_RDSIZE 3
+#define M_GBIF_MAX_RDSIZE 0x7U
+#define V_GBIF_MAX_RDSIZE(x) ((x) << S_GBIF_MAX_RDSIZE)
+#define G_GBIF_MAX_RDSIZE(x) (((x) >> S_GBIF_MAX_RDSIZE) & M_GBIF_MAX_RDSIZE)
+
+#define S_PCIE_MAX_RDSIZE 0
+#define M_PCIE_MAX_RDSIZE 0x7U
+#define V_PCIE_MAX_RDSIZE(x) ((x) << S_PCIE_MAX_RDSIZE)
+#define G_PCIE_MAX_RDSIZE(x) (((x) >> S_PCIE_MAX_RDSIZE) & M_PCIE_MAX_RDSIZE)
+
+#define A_PCIE_DBG_INDIR_REQ 0x30ec
+
+#define S_DBGENABLE 31
+#define V_DBGENABLE(x) ((x) << S_DBGENABLE)
+#define F_DBGENABLE V_DBGENABLE(1U)
+
+#define S_DBGAUTOINC 30
+#define V_DBGAUTOINC(x) ((x) << S_DBGAUTOINC)
+#define F_DBGAUTOINC V_DBGAUTOINC(1U)
+
+#define S_POINTER 8
+#define M_POINTER 0xffffU
+#define V_POINTER(x) ((x) << S_POINTER)
+#define G_POINTER(x) (((x) >> S_POINTER) & M_POINTER)
+
+#define S_SELECT 0
+#define M_SELECT 0xfU
+#define V_SELECT(x) ((x) << S_SELECT)
+#define G_SELECT(x) (((x) >> S_SELECT) & M_SELECT)
+
+#define A_PCIE_DBG_INDIR_DATA_0 0x30f0
+#define A_PCIE_DBG_INDIR_DATA_1 0x30f4
+#define A_PCIE_DBG_INDIR_DATA_2 0x30f8
+#define A_PCIE_DBG_INDIR_DATA_3 0x30fc
+#define A_PCIE_FUNC_INT_CFG 0x3100
+
+#define S_PBAOFST 28
+#define M_PBAOFST 0xfU
+#define V_PBAOFST(x) ((x) << S_PBAOFST)
+#define G_PBAOFST(x) (((x) >> S_PBAOFST) & M_PBAOFST)
+
+#define S_TABOFST 24
+#define M_TABOFST 0xfU
+#define V_TABOFST(x) ((x) << S_TABOFST)
+#define G_TABOFST(x) (((x) >> S_TABOFST) & M_TABOFST)
+
+#define S_VECNUM 12
+#define M_VECNUM 0x3ffU
+#define V_VECNUM(x) ((x) << S_VECNUM)
+#define G_VECNUM(x) (((x) >> S_VECNUM) & M_VECNUM)
+
+#define S_VECBASE 0
+#define M_VECBASE 0x7ffU
+#define V_VECBASE(x) ((x) << S_VECBASE)
+#define G_VECBASE(x) (((x) >> S_VECBASE) & M_VECBASE)
+
+#define A_PCIE_FUNC_CTL_STAT 0x3104
+
+#define S_SENDFLRRSP 31
+#define V_SENDFLRRSP(x) ((x) << S_SENDFLRRSP)
+#define F_SENDFLRRSP V_SENDFLRRSP(1U)
+
+#define S_IMMFLRRSP 24
+#define V_IMMFLRRSP(x) ((x) << S_IMMFLRRSP)
+#define F_IMMFLRRSP V_IMMFLRRSP(1U)
+
+#define S_TXNDISABLE 20
+#define V_TXNDISABLE(x) ((x) << S_TXNDISABLE)
+#define F_TXNDISABLE V_TXNDISABLE(1U)
+
+#define S_PNDTXNS 8
+#define M_PNDTXNS 0x3ffU
+#define V_PNDTXNS(x) ((x) << S_PNDTXNS)
+#define G_PNDTXNS(x) (((x) >> S_PNDTXNS) & M_PNDTXNS)
+
+#define S_VFVLD 3
+#define V_VFVLD(x) ((x) << S_VFVLD)
+#define F_VFVLD V_VFVLD(1U)
+
+#define S_PFNUM 0
+#define M_PFNUM 0x7U
+#define V_PFNUM(x) ((x) << S_PFNUM)
+#define G_PFNUM(x) (((x) >> S_PFNUM) & M_PFNUM)
+
+#define A_PCIE_FID 0x3900
+
+#define S_PAD 11
+#define V_PAD(x) ((x) << S_PAD)
+#define F_PAD V_PAD(1U)
+
+#define S_TC 8
+#define M_TC 0x7U
+#define V_TC(x) ((x) << S_TC)
+#define G_TC(x) (((x) >> S_TC) & M_TC)
+
+#define S_FUNC 0
+#define M_FUNC 0xffU
+#define V_FUNC(x) ((x) << S_FUNC)
+#define G_FUNC(x) (((x) >> S_FUNC) & M_FUNC)
+
+#define A_PCIE_CORE_UTL_SYSTEM_BUS_CONTROL 0x5900
+
+#define S_SMTD 27
+#define V_SMTD(x) ((x) << S_SMTD)
+#define F_SMTD V_SMTD(1U)
+
+#define S_SSTD 26
+#define V_SSTD(x) ((x) << S_SSTD)
+#define F_SSTD V_SSTD(1U)
+
+#define S_SWD0 23
+#define V_SWD0(x) ((x) << S_SWD0)
+#define F_SWD0 V_SWD0(1U)
+
+#define S_SWD1 22
+#define V_SWD1(x) ((x) << S_SWD1)
+#define F_SWD1 V_SWD1(1U)
+
+#define S_SWD2 21
+#define V_SWD2(x) ((x) << S_SWD2)
+#define F_SWD2 V_SWD2(1U)
+
+#define S_SWD3 20
+#define V_SWD3(x) ((x) << S_SWD3)
+#define F_SWD3 V_SWD3(1U)
+
+#define S_SWD4 19
+#define V_SWD4(x) ((x) << S_SWD4)
+#define F_SWD4 V_SWD4(1U)
+
+#define S_SWD5 18
+#define V_SWD5(x) ((x) << S_SWD5)
+#define F_SWD5 V_SWD5(1U)
+
+#define S_SWD6 17
+#define V_SWD6(x) ((x) << S_SWD6)
+#define F_SWD6 V_SWD6(1U)
+
+#define S_SWD7 16
+#define V_SWD7(x) ((x) << S_SWD7)
+#define F_SWD7 V_SWD7(1U)
+
+#define S_SWD8 15
+#define V_SWD8(x) ((x) << S_SWD8)
+#define F_SWD8 V_SWD8(1U)
+
+#define S_SRD0 13
+#define V_SRD0(x) ((x) << S_SRD0)
+#define F_SRD0 V_SRD0(1U)
+
+#define S_SRD1 12
+#define V_SRD1(x) ((x) << S_SRD1)
+#define F_SRD1 V_SRD1(1U)
+
+#define S_SRD2 11
+#define V_SRD2(x) ((x) << S_SRD2)
+#define F_SRD2 V_SRD2(1U)
+
+#define S_SRD3 10
+#define V_SRD3(x) ((x) << S_SRD3)
+#define F_SRD3 V_SRD3(1U)
+
+#define S_SRD4 9
+#define V_SRD4(x) ((x) << S_SRD4)
+#define F_SRD4 V_SRD4(1U)
+
+#define S_SRD5 8
+#define V_SRD5(x) ((x) << S_SRD5)
+#define F_SRD5 V_SRD5(1U)
+
+#define S_SRD6 7
+#define V_SRD6(x) ((x) << S_SRD6)
+#define F_SRD6 V_SRD6(1U)
+
+#define S_SRD7 6
+#define V_SRD7(x) ((x) << S_SRD7)
+#define F_SRD7 V_SRD7(1U)
+
+#define S_SRD8 5
+#define V_SRD8(x) ((x) << S_SRD8)
+#define F_SRD8 V_SRD8(1U)
+
+#define S_CRRE 3
+#define V_CRRE(x) ((x) << S_CRRE)
+#define F_CRRE V_CRRE(1U)
+
+#define S_CRMC 0
+#define M_CRMC 0x7U
+#define V_CRMC(x) ((x) << S_CRMC)
+#define G_CRMC(x) (((x) >> S_CRMC) & M_CRMC)
+
+#define A_PCIE_CORE_UTL_STATUS 0x5904
+
+#define S_USBP 31
+#define V_USBP(x) ((x) << S_USBP)
+#define F_USBP V_USBP(1U)
+
+#define S_UPEP 30
+#define V_UPEP(x) ((x) << S_UPEP)
+#define F_UPEP V_UPEP(1U)
+
+#define S_RCEP 29
+#define V_RCEP(x) ((x) << S_RCEP)
+#define F_RCEP V_RCEP(1U)
+
+#define S_EPEP 28
+#define V_EPEP(x) ((x) << S_EPEP)
+#define F_EPEP V_EPEP(1U)
+
+#define S_USBS 27
+#define V_USBS(x) ((x) << S_USBS)
+#define F_USBS V_USBS(1U)
+
+#define S_UPES 26
+#define V_UPES(x) ((x) << S_UPES)
+#define F_UPES V_UPES(1U)
+
+#define S_RCES 25
+#define V_RCES(x) ((x) << S_RCES)
+#define F_RCES V_RCES(1U)
+
+#define S_EPES 24
+#define V_EPES(x) ((x) << S_EPES)
+#define F_EPES V_EPES(1U)
+
+#define A_PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS 0x5908
+
+#define S_RNPP 31
+#define V_RNPP(x) ((x) << S_RNPP)
+#define F_RNPP V_RNPP(1U)
+
+#define S_RPCP 29
+#define V_RPCP(x) ((x) << S_RPCP)
+#define F_RPCP V_RPCP(1U)
+
+#define S_RCIP 27
+#define V_RCIP(x) ((x) << S_RCIP)
+#define F_RCIP V_RCIP(1U)
+
+#define S_RCCP 26
+#define V_RCCP(x) ((x) << S_RCCP)
+#define F_RCCP V_RCCP(1U)
+
+#define S_RFTP 23
+#define V_RFTP(x) ((x) << S_RFTP)
+#define F_RFTP V_RFTP(1U)
+
+#define S_PTRP 20
+#define V_PTRP(x) ((x) << S_PTRP)
+#define F_PTRP V_PTRP(1U)
+
+#define A_PCIE_CORE_UTL_SYSTEM_BUS_AGENT_ERROR_SEVERITY 0x590c
+
+#define S_RNPS 31
+#define V_RNPS(x) ((x) << S_RNPS)
+#define F_RNPS V_RNPS(1U)
+
+#define S_RPCS 29
+#define V_RPCS(x) ((x) << S_RPCS)
+#define F_RPCS V_RPCS(1U)
+
+#define S_RCIS 27
+#define V_RCIS(x) ((x) << S_RCIS)
+#define F_RCIS V_RCIS(1U)
+
+#define S_RCCS 26
+#define V_RCCS(x) ((x) << S_RCCS)
+#define F_RCCS V_RCCS(1U)
+
+#define S_RFTS 23
+#define V_RFTS(x) ((x) << S_RFTS)
+#define F_RFTS V_RFTS(1U)
+
+#define A_PCIE_CORE_UTL_SYSTEM_BUS_AGENT_INTERRUPT_ENABLE 0x5910
+
+#define S_RNPI 31
+#define V_RNPI(x) ((x) << S_RNPI)
+#define F_RNPI V_RNPI(1U)
+
+#define S_RPCI 29
+#define V_RPCI(x) ((x) << S_RPCI)
+#define F_RPCI V_RPCI(1U)
+
+#define S_RCII 27
+#define V_RCII(x) ((x) << S_RCII)
+#define F_RCII V_RCII(1U)
+
+#define S_RCCI 26
+#define V_RCCI(x) ((x) << S_RCCI)
+#define F_RCCI V_RCCI(1U)
+
+#define S_RFTI 23
+#define V_RFTI(x) ((x) << S_RFTI)
+#define F_RFTI V_RFTI(1U)
+
+#define A_PCIE_CORE_SYSTEM_BUS_BURST_SIZE_CONFIGURATION 0x5920
+
+#define S_SBRS 28
+#define M_SBRS 0x7U
+#define V_SBRS(x) ((x) << S_SBRS)
+#define G_SBRS(x) (((x) >> S_SBRS) & M_SBRS)
+
+#define S_OTWS 20
+#define M_OTWS 0x7U
+#define V_OTWS(x) ((x) << S_OTWS)
+#define G_OTWS(x) (((x) >> S_OTWS) & M_OTWS)
+
+#define A_PCIE_CORE_REVISION_ID 0x5924
+
+#define S_RVID 20
+#define M_RVID 0xfffU
+#define V_RVID(x) ((x) << S_RVID)
+#define G_RVID(x) (((x) >> S_RVID) & M_RVID)
+
+#define S_BRVN 12
+#define M_BRVN 0xffU
+#define V_BRVN(x) ((x) << S_BRVN)
+#define G_BRVN(x) (((x) >> S_BRVN) & M_BRVN)
+
+#define A_PCIE_CORE_OUTBOUND_POSTED_HEADER_BUFFER_ALLOCATION 0x5960
+
+#define S_OP0H 24
+#define M_OP0H 0xfU
+#define V_OP0H(x) ((x) << S_OP0H)
+#define G_OP0H(x) (((x) >> S_OP0H) & M_OP0H)
+
+#define S_OP1H 16
+#define M_OP1H 0xfU
+#define V_OP1H(x) ((x) << S_OP1H)
+#define G_OP1H(x) (((x) >> S_OP1H) & M_OP1H)
+
+#define S_OP2H 8
+#define M_OP2H 0xfU
+#define V_OP2H(x) ((x) << S_OP2H)
+#define G_OP2H(x) (((x) >> S_OP2H) & M_OP2H)
+
+#define S_OP3H 0
+#define M_OP3H 0xfU
+#define V_OP3H(x) ((x) << S_OP3H)
+#define G_OP3H(x) (((x) >> S_OP3H) & M_OP3H)
+
+#define A_PCIE_CORE_OUTBOUND_POSTED_DATA_BUFFER_ALLOCATION 0x5968
+
+#define S_OP0D 24
+#define M_OP0D 0x7fU
+#define V_OP0D(x) ((x) << S_OP0D)
+#define G_OP0D(x) (((x) >> S_OP0D) & M_OP0D)
+
+#define S_OP1D 16
+#define M_OP1D 0x7fU
+#define V_OP1D(x) ((x) << S_OP1D)
+#define G_OP1D(x) (((x) >> S_OP1D) & M_OP1D)
+
+#define S_OP2D 8
+#define M_OP2D 0x7fU
+#define V_OP2D(x) ((x) << S_OP2D)
+#define G_OP2D(x) (((x) >> S_OP2D) & M_OP2D)
+
+#define S_OP3D 0
+#define M_OP3D 0x7fU
+#define V_OP3D(x) ((x) << S_OP3D)
+#define G_OP3D(x) (((x) >> S_OP3D) & M_OP3D)
+
+#define A_PCIE_CORE_INBOUND_POSTED_HEADER_BUFFER_ALLOCATION 0x5970
+
+#define S_IP0H 24
+#define M_IP0H 0x3fU
+#define V_IP0H(x) ((x) << S_IP0H)
+#define G_IP0H(x) (((x) >> S_IP0H) & M_IP0H)
+
+#define S_IP1H 16
+#define M_IP1H 0x3fU
+#define V_IP1H(x) ((x) << S_IP1H)
+#define G_IP1H(x) (((x) >> S_IP1H) & M_IP1H)
+
+#define S_IP2H 8
+#define M_IP2H 0x3fU
+#define V_IP2H(x) ((x) << S_IP2H)
+#define G_IP2H(x) (((x) >> S_IP2H) & M_IP2H)
+
+#define S_IP3H 0
+#define M_IP3H 0x3fU
+#define V_IP3H(x) ((x) << S_IP3H)
+#define G_IP3H(x) (((x) >> S_IP3H) & M_IP3H)
+
+#define A_PCIE_CORE_INBOUND_POSTED_DATA_BUFFER_ALLOCATION 0x5978
+
+#define S_IP0D 24
+#define M_IP0D 0xffU
+#define V_IP0D(x) ((x) << S_IP0D)
+#define G_IP0D(x) (((x) >> S_IP0D) & M_IP0D)
+
+#define S_IP1D 16
+#define M_IP1D 0xffU
+#define V_IP1D(x) ((x) << S_IP1D)
+#define G_IP1D(x) (((x) >> S_IP1D) & M_IP1D)
+
+#define S_IP2D 8
+#define M_IP2D 0xffU
+#define V_IP2D(x) ((x) << S_IP2D)
+#define G_IP2D(x) (((x) >> S_IP2D) & M_IP2D)
+
+#define S_IP3D 0
+#define M_IP3D 0xffU
+#define V_IP3D(x) ((x) << S_IP3D)
+#define G_IP3D(x) (((x) >> S_IP3D) & M_IP3D)
+
+#define A_PCIE_CORE_OUTBOUND_NON_POSTED_BUFFER_ALLOCATION 0x5980
+
+#define S_ON0H 24
+#define M_ON0H 0xfU
+#define V_ON0H(x) ((x) << S_ON0H)
+#define G_ON0H(x) (((x) >> S_ON0H) & M_ON0H)
+
+#define S_ON1H 16
+#define M_ON1H 0xfU
+#define V_ON1H(x) ((x) << S_ON1H)
+#define G_ON1H(x) (((x) >> S_ON1H) & M_ON1H)
+
+#define S_ON2H 8
+#define M_ON2H 0xfU
+#define V_ON2H(x) ((x) << S_ON2H)
+#define G_ON2H(x) (((x) >> S_ON2H) & M_ON2H)
+
+#define S_ON3H 0
+#define M_ON3H 0xfU
+#define V_ON3H(x) ((x) << S_ON3H)
+#define G_ON3H(x) (((x) >> S_ON3H) & M_ON3H)
+
+#define A_PCIE_CORE_INBOUND_NON_POSTED_REQUESTS_BUFFER_ALLOCATION 0x5988
+
+#define S_IN0H 24
+#define M_IN0H 0x3fU
+#define V_IN0H(x) ((x) << S_IN0H)
+#define G_IN0H(x) (((x) >> S_IN0H) & M_IN0H)
+
+#define S_IN1H 16
+#define M_IN1H 0x3fU
+#define V_IN1H(x) ((x) << S_IN1H)
+#define G_IN1H(x) (((x) >> S_IN1H) & M_IN1H)
+
+#define S_IN2H 8
+#define M_IN2H 0x3fU
+#define V_IN2H(x) ((x) << S_IN2H)
+#define G_IN2H(x) (((x) >> S_IN2H) & M_IN2H)
+
+#define S_IN3H 0
+#define M_IN3H 0x3fU
+#define V_IN3H(x) ((x) << S_IN3H)
+#define G_IN3H(x) (((x) >> S_IN3H) & M_IN3H)
+
+#define A_PCIE_CORE_PCI_EXPRESS_TAGS_ALLOCATION 0x5990
+
+#define S_OC0T 24
+#define M_OC0T 0xffU
+#define V_OC0T(x) ((x) << S_OC0T)
+#define G_OC0T(x) (((x) >> S_OC0T) & M_OC0T)
+
+#define S_OC1T 16
+#define M_OC1T 0xffU
+#define V_OC1T(x) ((x) << S_OC1T)
+#define G_OC1T(x) (((x) >> S_OC1T) & M_OC1T)
+
+#define S_OC2T 8
+#define M_OC2T 0xffU
+#define V_OC2T(x) ((x) << S_OC2T)
+#define G_OC2T(x) (((x) >> S_OC2T) & M_OC2T)
+
+#define S_OC3T 0
+#define M_OC3T 0xffU
+#define V_OC3T(x) ((x) << S_OC3T)
+#define G_OC3T(x) (((x) >> S_OC3T) & M_OC3T)
+
+#define A_PCIE_CORE_GBIF_READ_TAGS_ALLOCATION 0x5998
+
+#define S_IC0T 24
+#define M_IC0T 0x3fU
+#define V_IC0T(x) ((x) << S_IC0T)
+#define G_IC0T(x) (((x) >> S_IC0T) & M_IC0T)
+
+#define S_IC1T 16
+#define M_IC1T 0x3fU
+#define V_IC1T(x) ((x) << S_IC1T)
+#define G_IC1T(x) (((x) >> S_IC1T) & M_IC1T)
+
+#define S_IC2T 8
+#define M_IC2T 0x3fU
+#define V_IC2T(x) ((x) << S_IC2T)
+#define G_IC2T(x) (((x) >> S_IC2T) & M_IC2T)
+
+#define S_IC3T 0
+#define M_IC3T 0x3fU
+#define V_IC3T(x) ((x) << S_IC3T)
+#define G_IC3T(x) (((x) >> S_IC3T) & M_IC3T)
+
+#define A_PCIE_CORE_UTL_PCI_EXPRESS_PORT_CONTROL 0x59a0
+
+#define S_VRB0 31
+#define V_VRB0(x) ((x) << S_VRB0)
+#define F_VRB0 V_VRB0(1U)
+
+#define S_VRB1 30
+#define V_VRB1(x) ((x) << S_VRB1)
+#define F_VRB1 V_VRB1(1U)
+
+#define S_VRB2 29
+#define V_VRB2(x) ((x) << S_VRB2)
+#define F_VRB2 V_VRB2(1U)
+
+#define S_VRB3 28
+#define V_VRB3(x) ((x) << S_VRB3)
+#define F_VRB3 V_VRB3(1U)
+
+#define S_PSFE 26
+#define V_PSFE(x) ((x) << S_PSFE)
+#define F_PSFE V_PSFE(1U)
+
+#define S_RVDE 25
+#define V_RVDE(x) ((x) << S_RVDE)
+#define F_RVDE V_RVDE(1U)
+
+#define S_TXE0 23
+#define V_TXE0(x) ((x) << S_TXE0)
+#define F_TXE0 V_TXE0(1U)
+
+#define S_TXE1 22
+#define V_TXE1(x) ((x) << S_TXE1)
+#define F_TXE1 V_TXE1(1U)
+
+#define S_TXE2 21
+#define V_TXE2(x) ((x) << S_TXE2)
+#define F_TXE2 V_TXE2(1U)
+
+#define S_TXE3 20
+#define V_TXE3(x) ((x) << S_TXE3)
+#define F_TXE3 V_TXE3(1U)
+
+#define S_RPAM 13
+#define V_RPAM(x) ((x) << S_RPAM)
+#define F_RPAM V_RPAM(1U)
+
+#define S_RTOS 4
+#define M_RTOS 0xfU
+#define V_RTOS(x) ((x) << S_RTOS)
+#define G_RTOS(x) (((x) >> S_RTOS) & M_RTOS)
+
+#define A_PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS 0x59a4
+
+#define S_TPCP 30
+#define V_TPCP(x) ((x) << S_TPCP)
+#define F_TPCP V_TPCP(1U)
+
+#define S_TNPP 29
+#define V_TNPP(x) ((x) << S_TNPP)
+#define F_TNPP V_TNPP(1U)
+
+#define S_TFTP 28
+#define V_TFTP(x) ((x) << S_TFTP)
+#define F_TFTP V_TFTP(1U)
+
+#define S_TCAP 27
+#define V_TCAP(x) ((x) << S_TCAP)
+#define F_TCAP V_TCAP(1U)
+
+#define S_TCIP 26
+#define V_TCIP(x) ((x) << S_TCIP)
+#define F_TCIP V_TCIP(1U)
+
+#define S_RCAP 25
+#define V_RCAP(x) ((x) << S_RCAP)
+#define F_RCAP V_RCAP(1U)
+
+#define S_PLUP 23
+#define V_PLUP(x) ((x) << S_PLUP)
+#define F_PLUP V_PLUP(1U)
+
+#define S_PLDN 22
+#define V_PLDN(x) ((x) << S_PLDN)
+#define F_PLDN V_PLDN(1U)
+
+#define S_OTDD 21
+#define V_OTDD(x) ((x) << S_OTDD)
+#define F_OTDD V_OTDD(1U)
+
+#define S_GTRP 20
+#define V_GTRP(x) ((x) << S_GTRP)
+#define F_GTRP V_GTRP(1U)
+
+#define S_RDPE 18
+#define V_RDPE(x) ((x) << S_RDPE)
+#define F_RDPE V_RDPE(1U)
+
+#define S_TDCE 17
+#define V_TDCE(x) ((x) << S_TDCE)
+#define F_TDCE V_TDCE(1U)
+
+#define S_TDUE 16
+#define V_TDUE(x) ((x) << S_TDUE)
+#define F_TDUE V_TDUE(1U)
+
+#define A_PCIE_CORE_UTL_PCI_EXPRESS_PORT_ERROR_SEVERITY 0x59a8
+
+#define S_TPCS 30
+#define V_TPCS(x) ((x) << S_TPCS)
+#define F_TPCS V_TPCS(1U)
+
+#define S_TNPS 29
+#define V_TNPS(x) ((x) << S_TNPS)
+#define F_TNPS V_TNPS(1U)
+
+#define S_TFTS 28
+#define V_TFTS(x) ((x) << S_TFTS)
+#define F_TFTS V_TFTS(1U)
+
+#define S_TCAS 27
+#define V_TCAS(x) ((x) << S_TCAS)
+#define F_TCAS V_TCAS(1U)
+
+#define S_TCIS 26
+#define V_TCIS(x) ((x) << S_TCIS)
+#define F_TCIS V_TCIS(1U)
+
+#define S_RCAS 25
+#define V_RCAS(x) ((x) << S_RCAS)
+#define F_RCAS V_RCAS(1U)
+
+#define S_PLUS 23
+#define V_PLUS(x) ((x) << S_PLUS)
+#define F_PLUS V_PLUS(1U)
+
+#define S_PLDS 22
+#define V_PLDS(x) ((x) << S_PLDS)
+#define F_PLDS V_PLDS(1U)
+
+#define S_OTDS 21
+#define V_OTDS(x) ((x) << S_OTDS)
+#define F_OTDS V_OTDS(1U)
+
+#define S_RDPS 18
+#define V_RDPS(x) ((x) << S_RDPS)
+#define F_RDPS V_RDPS(1U)
+
+#define S_TDCS 17
+#define V_TDCS(x) ((x) << S_TDCS)
+#define F_TDCS V_TDCS(1U)
+
+#define S_TDUS 16
+#define V_TDUS(x) ((x) << S_TDUS)
+#define F_TDUS V_TDUS(1U)
+
+#define A_PCIE_CORE_UTL_PCI_EXPRESS_PORT_INTERRUPT_ENABLE 0x59ac
+
+#define S_TPCI 30
+#define V_TPCI(x) ((x) << S_TPCI)
+#define F_TPCI V_TPCI(1U)
+
+#define S_TNPI 29
+#define V_TNPI(x) ((x) << S_TNPI)
+#define F_TNPI V_TNPI(1U)
+
+#define S_TFTI 28
+#define V_TFTI(x) ((x) << S_TFTI)
+#define F_TFTI V_TFTI(1U)
+
+#define S_TCAI 27
+#define V_TCAI(x) ((x) << S_TCAI)
+#define F_TCAI V_TCAI(1U)
+
+#define S_TCII 26
+#define V_TCII(x) ((x) << S_TCII)
+#define F_TCII V_TCII(1U)
+
+#define S_RCAI 25
+#define V_RCAI(x) ((x) << S_RCAI)
+#define F_RCAI V_RCAI(1U)
+
+#define S_PLUI 23
+#define V_PLUI(x) ((x) << S_PLUI)
+#define F_PLUI V_PLUI(1U)
+
+#define S_PLDI 22
+#define V_PLDI(x) ((x) << S_PLDI)
+#define F_PLDI V_PLDI(1U)
+
+#define S_OTDI 21
+#define V_OTDI(x) ((x) << S_OTDI)
+#define F_OTDI V_OTDI(1U)
+
+#define A_PCIE_CORE_ROOT_COMPLEX_STATUS 0x59b0
+
+#define S_RLCE 31
+#define V_RLCE(x) ((x) << S_RLCE)
+#define F_RLCE V_RLCE(1U)
+
+#define S_RLNE 30
+#define V_RLNE(x) ((x) << S_RLNE)
+#define F_RLNE V_RLNE(1U)
+
+#define S_RLFE 29
+#define V_RLFE(x) ((x) << S_RLFE)
+#define F_RLFE V_RLFE(1U)
+
+#define S_RCPE 25
+#define V_RCPE(x) ((x) << S_RCPE)
+#define F_RCPE V_RCPE(1U)
+
+#define S_RCTO 24
+#define V_RCTO(x) ((x) << S_RCTO)
+#define F_RCTO V_RCTO(1U)
+
+#define S_PINA 23
+#define V_PINA(x) ((x) << S_PINA)
+#define F_PINA V_PINA(1U)
+
+#define S_PINB 22
+#define V_PINB(x) ((x) << S_PINB)
+#define F_PINB V_PINB(1U)
+
+#define S_PINC 21
+#define V_PINC(x) ((x) << S_PINC)
+#define F_PINC V_PINC(1U)
+
+#define S_PIND 20
+#define V_PIND(x) ((x) << S_PIND)
+#define F_PIND V_PIND(1U)
+
+#define S_ALER 19
+#define V_ALER(x) ((x) << S_ALER)
+#define F_ALER V_ALER(1U)
+
+#define S_CRSE 18
+#define V_CRSE(x) ((x) << S_CRSE)
+#define F_CRSE V_CRSE(1U)
+
+#define A_PCIE_CORE_ROOT_COMPLEX_ERROR_SEVERITY 0x59b4
+
+#define S_RLCS 31
+#define V_RLCS(x) ((x) << S_RLCS)
+#define F_RLCS V_RLCS(1U)
+
+#define S_RLNS 30
+#define V_RLNS(x) ((x) << S_RLNS)
+#define F_RLNS V_RLNS(1U)
+
+#define S_RLFS 29
+#define V_RLFS(x) ((x) << S_RLFS)
+#define F_RLFS V_RLFS(1U)
+
+#define S_RCPS 25
+#define V_RCPS(x) ((x) << S_RCPS)
+#define F_RCPS V_RCPS(1U)
+
+#define S_RCTS 24
+#define V_RCTS(x) ((x) << S_RCTS)
+#define F_RCTS V_RCTS(1U)
+
+#define S_PAAS 23
+#define V_PAAS(x) ((x) << S_PAAS)
+#define F_PAAS V_PAAS(1U)
+
+#define S_PABS 22
+#define V_PABS(x) ((x) << S_PABS)
+#define F_PABS V_PABS(1U)
+
+#define S_PACS 21
+#define V_PACS(x) ((x) << S_PACS)
+#define F_PACS V_PACS(1U)
+
+#define S_PADS 20
+#define V_PADS(x) ((x) << S_PADS)
+#define F_PADS V_PADS(1U)
+
+#define S_ALES 19
+#define V_ALES(x) ((x) << S_ALES)
+#define F_ALES V_ALES(1U)
+
+#define S_CRSS 18
+#define V_CRSS(x) ((x) << S_CRSS)
+#define F_CRSS V_CRSS(1U)
+
+#define A_PCIE_CORE_ROOT_COMPLEX_INTERRUPT_ENABLE 0x59b8
+
+#define S_RLCI 31
+#define V_RLCI(x) ((x) << S_RLCI)
+#define F_RLCI V_RLCI(1U)
+
+#define S_RLNI 30
+#define V_RLNI(x) ((x) << S_RLNI)
+#define F_RLNI V_RLNI(1U)
+
+#define S_RLFI 29
+#define V_RLFI(x) ((x) << S_RLFI)
+#define F_RLFI V_RLFI(1U)
+
+#define S_RCPI 25
+#define V_RCPI(x) ((x) << S_RCPI)
+#define F_RCPI V_RCPI(1U)
+
+#define S_RCTI 24
+#define V_RCTI(x) ((x) << S_RCTI)
+#define F_RCTI V_RCTI(1U)
+
+#define S_PAAI 23
+#define V_PAAI(x) ((x) << S_PAAI)
+#define F_PAAI V_PAAI(1U)
+
+#define S_PABI 22
+#define V_PABI(x) ((x) << S_PABI)
+#define F_PABI V_PABI(1U)
+
+#define S_PACI 21
+#define V_PACI(x) ((x) << S_PACI)
+#define F_PACI V_PACI(1U)
+
+#define S_PADI 20
+#define V_PADI(x) ((x) << S_PADI)
+#define F_PADI V_PADI(1U)
+
+#define S_ALEI 19
+#define V_ALEI(x) ((x) << S_ALEI)
+#define F_ALEI V_ALEI(1U)
+
+#define S_CRSI 18
+#define V_CRSI(x) ((x) << S_CRSI)
+#define F_CRSI V_CRSI(1U)
+
+#define A_PCIE_CORE_ENDPOINT_STATUS 0x59bc
+
+#define S_PTOM 31
+#define V_PTOM(x) ((x) << S_PTOM)
+#define F_PTOM V_PTOM(1U)
+
+#define S_ALEA 29
+#define V_ALEA(x) ((x) << S_ALEA)
+#define F_ALEA V_ALEA(1U)
+
+#define S_PMC0 23
+#define V_PMC0(x) ((x) << S_PMC0)
+#define F_PMC0 V_PMC0(1U)
+
+#define S_PMC1 22
+#define V_PMC1(x) ((x) << S_PMC1)
+#define F_PMC1 V_PMC1(1U)
+
+#define S_PMC2 21
+#define V_PMC2(x) ((x) << S_PMC2)
+#define F_PMC2 V_PMC2(1U)
+
+#define S_PMC3 20
+#define V_PMC3(x) ((x) << S_PMC3)
+#define F_PMC3 V_PMC3(1U)
+
+#define S_PMC4 19
+#define V_PMC4(x) ((x) << S_PMC4)
+#define F_PMC4 V_PMC4(1U)
+
+#define S_PMC5 18
+#define V_PMC5(x) ((x) << S_PMC5)
+#define F_PMC5 V_PMC5(1U)
+
+#define S_PMC6 17
+#define V_PMC6(x) ((x) << S_PMC6)
+#define F_PMC6 V_PMC6(1U)
+
+#define S_PMC7 16
+#define V_PMC7(x) ((x) << S_PMC7)
+#define F_PMC7 V_PMC7(1U)
+
+#define A_PCIE_CORE_ENDPOINT_ERROR_SEVERITY 0x59c0
+
+#define S_PTOS 31
+#define V_PTOS(x) ((x) << S_PTOS)
+#define F_PTOS V_PTOS(1U)
+
+#define S_AENS 29
+#define V_AENS(x) ((x) << S_AENS)
+#define F_AENS V_AENS(1U)
+
+#define S_PC0S 23
+#define V_PC0S(x) ((x) << S_PC0S)
+#define F_PC0S V_PC0S(1U)
+
+#define S_PC1S 22
+#define V_PC1S(x) ((x) << S_PC1S)
+#define F_PC1S V_PC1S(1U)
+
+#define S_PC2S 21
+#define V_PC2S(x) ((x) << S_PC2S)
+#define F_PC2S V_PC2S(1U)
+
+#define S_PC3S 20
+#define V_PC3S(x) ((x) << S_PC3S)
+#define F_PC3S V_PC3S(1U)
+
+#define S_PC4S 19
+#define V_PC4S(x) ((x) << S_PC4S)
+#define F_PC4S V_PC4S(1U)
+
+#define S_PC5S 18
+#define V_PC5S(x) ((x) << S_PC5S)
+#define F_PC5S V_PC5S(1U)
+
+#define S_PC6S 17
+#define V_PC6S(x) ((x) << S_PC6S)
+#define F_PC6S V_PC6S(1U)
+
+#define S_PC7S 16
+#define V_PC7S(x) ((x) << S_PC7S)
+#define F_PC7S V_PC7S(1U)
+
+#define S_PME0 15
+#define V_PME0(x) ((x) << S_PME0)
+#define F_PME0 V_PME0(1U)
+
+#define S_PME1 14
+#define V_PME1(x) ((x) << S_PME1)
+#define F_PME1 V_PME1(1U)
+
+#define S_PME2 13
+#define V_PME2(x) ((x) << S_PME2)
+#define F_PME2 V_PME2(1U)
+
+#define S_PME3 12
+#define V_PME3(x) ((x) << S_PME3)
+#define F_PME3 V_PME3(1U)
+
+#define S_PME4 11
+#define V_PME4(x) ((x) << S_PME4)
+#define F_PME4 V_PME4(1U)
+
+#define S_PME5 10
+#define V_PME5(x) ((x) << S_PME5)
+#define F_PME5 V_PME5(1U)
+
+#define S_PME6 9
+#define V_PME6(x) ((x) << S_PME6)
+#define F_PME6 V_PME6(1U)
+
+#define S_PME7 8
+#define V_PME7(x) ((x) << S_PME7)
+#define F_PME7 V_PME7(1U)
+
+#define A_PCIE_CORE_ENDPOINT_INTERRUPT_ENABLE 0x59c4
+
+#define S_PTOI 31
+#define V_PTOI(x) ((x) << S_PTOI)
+#define F_PTOI V_PTOI(1U)
+
+#define S_AENI 29
+#define V_AENI(x) ((x) << S_AENI)
+#define F_AENI V_AENI(1U)
+
+#define S_PC0I 23
+#define V_PC0I(x) ((x) << S_PC0I)
+#define F_PC0I V_PC0I(1U)
+
+#define S_PC1I 22
+#define V_PC1I(x) ((x) << S_PC1I)
+#define F_PC1I V_PC1I(1U)
+
+#define S_PC2I 21
+#define V_PC2I(x) ((x) << S_PC2I)
+#define F_PC2I V_PC2I(1U)
+
+#define S_PC3I 20
+#define V_PC3I(x) ((x) << S_PC3I)
+#define F_PC3I V_PC3I(1U)
+
+#define S_PC4I 19
+#define V_PC4I(x) ((x) << S_PC4I)
+#define F_PC4I V_PC4I(1U)
+
+#define S_PC5I 18
+#define V_PC5I(x) ((x) << S_PC5I)
+#define F_PC5I V_PC5I(1U)
+
+#define S_PC6I 17
+#define V_PC6I(x) ((x) << S_PC6I)
+#define F_PC6I V_PC6I(1U)
+
+#define S_PC7I 16
+#define V_PC7I(x) ((x) << S_PC7I)
+#define F_PC7I V_PC7I(1U)
+
+#define A_PCIE_CORE_PCI_POWER_MANAGEMENT_CONTROL_1 0x59c8
+
+#define S_TOAK 31
+#define V_TOAK(x) ((x) << S_TOAK)
+#define F_TOAK V_TOAK(1U)
+
+#define S_L1RS 23
+#define V_L1RS(x) ((x) << S_L1RS)
+#define F_L1RS V_L1RS(1U)
+
+#define S_L23S 22
+#define V_L23S(x) ((x) << S_L23S)
+#define F_L23S V_L23S(1U)
+
+#define S_AL1S 21
+#define V_AL1S(x) ((x) << S_AL1S)
+#define F_AL1S V_AL1S(1U)
+
+#define S_ALET 19
+#define V_ALET(x) ((x) << S_ALET)
+#define F_ALET V_ALET(1U)
+
+#define A_PCIE_CORE_PCI_POWER_MANAGEMENT_CONTROL_2 0x59cc
+
+#define S_CPM0 30
+#define M_CPM0 0x3U
+#define V_CPM0(x) ((x) << S_CPM0)
+#define G_CPM0(x) (((x) >> S_CPM0) & M_CPM0)
+
+#define S_CPM1 28
+#define M_CPM1 0x3U
+#define V_CPM1(x) ((x) << S_CPM1)
+#define G_CPM1(x) (((x) >> S_CPM1) & M_CPM1)
+
+#define S_CPM2 26
+#define M_CPM2 0x3U
+#define V_CPM2(x) ((x) << S_CPM2)
+#define G_CPM2(x) (((x) >> S_CPM2) & M_CPM2)
+
+#define S_CPM3 24
+#define M_CPM3 0x3U
+#define V_CPM3(x) ((x) << S_CPM3)
+#define G_CPM3(x) (((x) >> S_CPM3) & M_CPM3)
+
+#define S_CPM4 22
+#define M_CPM4 0x3U
+#define V_CPM4(x) ((x) << S_CPM4)
+#define G_CPM4(x) (((x) >> S_CPM4) & M_CPM4)
+
+#define S_CPM5 20
+#define M_CPM5 0x3U
+#define V_CPM5(x) ((x) << S_CPM5)
+#define G_CPM5(x) (((x) >> S_CPM5) & M_CPM5)
+
+#define S_CPM6 18
+#define M_CPM6 0x3U
+#define V_CPM6(x) ((x) << S_CPM6)
+#define G_CPM6(x) (((x) >> S_CPM6) & M_CPM6)
+
+#define S_CPM7 16
+#define M_CPM7 0x3U
+#define V_CPM7(x) ((x) << S_CPM7)
+#define G_CPM7(x) (((x) >> S_CPM7) & M_CPM7)
+
+#define S_OPM0 14
+#define M_OPM0 0x3U
+#define V_OPM0(x) ((x) << S_OPM0)
+#define G_OPM0(x) (((x) >> S_OPM0) & M_OPM0)
+
+#define S_OPM1 12
+#define M_OPM1 0x3U
+#define V_OPM1(x) ((x) << S_OPM1)
+#define G_OPM1(x) (((x) >> S_OPM1) & M_OPM1)
+
+#define S_OPM2 10
+#define M_OPM2 0x3U
+#define V_OPM2(x) ((x) << S_OPM2)
+#define G_OPM2(x) (((x) >> S_OPM2) & M_OPM2)
+
+#define S_OPM3 8
+#define M_OPM3 0x3U
+#define V_OPM3(x) ((x) << S_OPM3)
+#define G_OPM3(x) (((x) >> S_OPM3) & M_OPM3)
+
+#define S_OPM4 6
+#define M_OPM4 0x3U
+#define V_OPM4(x) ((x) << S_OPM4)
+#define G_OPM4(x) (((x) >> S_OPM4) & M_OPM4)
+
+#define S_OPM5 4
+#define M_OPM5 0x3U
+#define V_OPM5(x) ((x) << S_OPM5)
+#define G_OPM5(x) (((x) >> S_OPM5) & M_OPM5)
+
+#define S_OPM6 2
+#define M_OPM6 0x3U
+#define V_OPM6(x) ((x) << S_OPM6)
+#define G_OPM6(x) (((x) >> S_OPM6) & M_OPM6)
+
+#define S_OPM7 0
+#define M_OPM7 0x3U
+#define V_OPM7(x) ((x) << S_OPM7)
+#define G_OPM7(x) (((x) >> S_OPM7) & M_OPM7)
+
+#define A_PCIE_CORE_GENERAL_PURPOSE_CONTROL_1 0x59d0
+#define A_PCIE_CORE_GENERAL_PURPOSE_CONTROL_2 0x59d4
+#define A_PCIE_REVISION 0x5a00
+#define A_PCIE_PDEBUG_INDEX 0x5a04
+
+#define S_PDEBUGSELH 16
+#define M_PDEBUGSELH 0x3fU
+#define V_PDEBUGSELH(x) ((x) << S_PDEBUGSELH)
+#define G_PDEBUGSELH(x) (((x) >> S_PDEBUGSELH) & M_PDEBUGSELH)
+
+#define S_PDEBUGSELL 0
+#define M_PDEBUGSELL 0x3fU
+#define V_PDEBUGSELL(x) ((x) << S_PDEBUGSELL)
+#define G_PDEBUGSELL(x) (((x) >> S_PDEBUGSELL) & M_PDEBUGSELL)
+
+#define A_PCIE_PDEBUG_DATA_HIGH 0x5a08
+#define A_PCIE_PDEBUG_DATA_LOW 0x5a0c
+#define A_PCIE_CDEBUG_INDEX 0x5a10
+
+#define S_CDEBUGSELH 16
+#define M_CDEBUGSELH 0xffU
+#define V_CDEBUGSELH(x) ((x) << S_CDEBUGSELH)
+#define G_CDEBUGSELH(x) (((x) >> S_CDEBUGSELH) & M_CDEBUGSELH)
+
+#define S_CDEBUGSELL 0
+#define M_CDEBUGSELL 0xffU
+#define V_CDEBUGSELL(x) ((x) << S_CDEBUGSELL)
+#define G_CDEBUGSELL(x) (((x) >> S_CDEBUGSELL) & M_CDEBUGSELL)
+
+#define A_PCIE_CDEBUG_DATA_HIGH 0x5a14
+#define A_PCIE_CDEBUG_DATA_LOW 0x5a18
+#define A_PCIE_DMAW_SOP_CNT 0x5a1c
+
+#define S_CH3 24
+#define M_CH3 0xffU
+#define V_CH3(x) ((x) << S_CH3)
+#define G_CH3(x) (((x) >> S_CH3) & M_CH3)
+
+#define S_CH2 16
+#define M_CH2 0xffU
+#define V_CH2(x) ((x) << S_CH2)
+#define G_CH2(x) (((x) >> S_CH2) & M_CH2)
+
+#define S_CH1 8
+#define M_CH1 0xffU
+#define V_CH1(x) ((x) << S_CH1)
+#define G_CH1(x) (((x) >> S_CH1) & M_CH1)
+
+#define S_CH0 0
+#define M_CH0 0xffU
+#define V_CH0(x) ((x) << S_CH0)
+#define G_CH0(x) (((x) >> S_CH0) & M_CH0)
+
+#define A_PCIE_DMAW_EOP_CNT 0x5a20
+#define A_PCIE_DMAR_REQ_CNT 0x5a24
+#define A_PCIE_DMAR_RSP_SOP_CNT 0x5a28
+#define A_PCIE_DMAR_RSP_EOP_CNT 0x5a2c
+#define A_PCIE_DMAR_RSP_ERR_CNT 0x5a30
+#define A_PCIE_DMAI_CNT 0x5a34
+#define A_PCIE_CMDW_CNT 0x5a38
+
+#define S_CH1_EOP 24
+#define M_CH1_EOP 0xffU
+#define V_CH1_EOP(x) ((x) << S_CH1_EOP)
+#define G_CH1_EOP(x) (((x) >> S_CH1_EOP) & M_CH1_EOP)
+
+#define S_CH1_SOP 16
+#define M_CH1_SOP 0xffU
+#define V_CH1_SOP(x) ((x) << S_CH1_SOP)
+#define G_CH1_SOP(x) (((x) >> S_CH1_SOP) & M_CH1_SOP)
+
+#define S_CH0_EOP 8
+#define M_CH0_EOP 0xffU
+#define V_CH0_EOP(x) ((x) << S_CH0_EOP)
+#define G_CH0_EOP(x) (((x) >> S_CH0_EOP) & M_CH0_EOP)
+
+#define S_CH0_SOP 0
+#define M_CH0_SOP 0xffU
+#define V_CH0_SOP(x) ((x) << S_CH0_SOP)
+#define G_CH0_SOP(x) (((x) >> S_CH0_SOP) & M_CH0_SOP)
+
+#define A_PCIE_CMDR_REQ_CNT 0x5a3c
+#define A_PCIE_CMDR_RSP_CNT 0x5a40
+#define A_PCIE_CMDR_RSP_ERR_CNT 0x5a44
+#define A_PCIE_HMA_REQ_CNT 0x5a48
+
+#define S_CH0_READ 16
+#define M_CH0_READ 0xffU
+#define V_CH0_READ(x) ((x) << S_CH0_READ)
+#define G_CH0_READ(x) (((x) >> S_CH0_READ) & M_CH0_READ)
+
+#define S_CH0_WEOP 8
+#define M_CH0_WEOP 0xffU
+#define V_CH0_WEOP(x) ((x) << S_CH0_WEOP)
+#define G_CH0_WEOP(x) (((x) >> S_CH0_WEOP) & M_CH0_WEOP)
+
+#define S_CH0_WSOP 0
+#define M_CH0_WSOP 0xffU
+#define V_CH0_WSOP(x) ((x) << S_CH0_WSOP)
+#define G_CH0_WSOP(x) (((x) >> S_CH0_WSOP) & M_CH0_WSOP)
+
+#define A_PCIE_HMA_RSP_CNT 0x5a4c
+#define A_PCIE_DMA10_RSP_FREE 0x5a50
+
+#define S_CH1_RSP_FREE 16
+#define M_CH1_RSP_FREE 0xfffU
+#define V_CH1_RSP_FREE(x) ((x) << S_CH1_RSP_FREE)
+#define G_CH1_RSP_FREE(x) (((x) >> S_CH1_RSP_FREE) & M_CH1_RSP_FREE)
+
+#define S_CH0_RSP_FREE 0
+#define M_CH0_RSP_FREE 0xfffU
+#define V_CH0_RSP_FREE(x) ((x) << S_CH0_RSP_FREE)
+#define G_CH0_RSP_FREE(x) (((x) >> S_CH0_RSP_FREE) & M_CH0_RSP_FREE)
+
+#define A_PCIE_DMA32_RSP_FREE 0x5a54
+
+#define S_CH3_RSP_FREE 16
+#define M_CH3_RSP_FREE 0xfffU
+#define V_CH3_RSP_FREE(x) ((x) << S_CH3_RSP_FREE)
+#define G_CH3_RSP_FREE(x) (((x) >> S_CH3_RSP_FREE) & M_CH3_RSP_FREE)
+
+#define S_CH2_RSP_FREE 0
+#define M_CH2_RSP_FREE 0xfffU
+#define V_CH2_RSP_FREE(x) ((x) << S_CH2_RSP_FREE)
+#define G_CH2_RSP_FREE(x) (((x) >> S_CH2_RSP_FREE) & M_CH2_RSP_FREE)
+
+#define A_PCIE_CMD_RSP_FREE 0x5a58
+
+#define S_CMD_CH1_RSP_FREE 16
+#define M_CMD_CH1_RSP_FREE 0x7fU
+#define V_CMD_CH1_RSP_FREE(x) ((x) << S_CMD_CH1_RSP_FREE)
+#define G_CMD_CH1_RSP_FREE(x) (((x) >> S_CMD_CH1_RSP_FREE) & M_CMD_CH1_RSP_FREE)
+
+#define S_CMD_CH0_RSP_FREE 0
+#define M_CMD_CH0_RSP_FREE 0x7fU
+#define V_CMD_CH0_RSP_FREE(x) ((x) << S_CMD_CH0_RSP_FREE)
+#define G_CMD_CH0_RSP_FREE(x) (((x) >> S_CMD_CH0_RSP_FREE) & M_CMD_CH0_RSP_FREE)
+
+#define A_PCIE_HMA_RSP_FREE 0x5a5c
+#define A_PCIE_BUS_MST_STAT_0 0x5a60
+#define A_PCIE_BUS_MST_STAT_1 0x5a64
+#define A_PCIE_BUS_MST_STAT_2 0x5a68
+#define A_PCIE_BUS_MST_STAT_3 0x5a6c
+#define A_PCIE_BUS_MST_STAT_4 0x5a70
+#define A_PCIE_BUS_MST_STAT_5 0x5a74
+#define A_PCIE_BUS_MST_STAT_6 0x5a78
+#define A_PCIE_BUS_MST_STAT_7 0x5a7c
+#define A_PCIE_RSP_ERR_STAT_0 0x5a80
+#define A_PCIE_RSP_ERR_STAT_1 0x5a84
+#define A_PCIE_RSP_ERR_STAT_2 0x5a88
+#define A_PCIE_RSP_ERR_STAT_3 0x5a8c
+#define A_PCIE_RSP_ERR_STAT_4 0x5a90
+#define A_PCIE_RSP_ERR_STAT_5 0x5a94
+#define A_PCIE_RSP_ERR_STAT_6 0x5a98
+#define A_PCIE_RSP_ERR_STAT_7 0x5a9c
+#define A_PCIE_MSI_EN_0 0x5aa0
+#define A_PCIE_MSI_EN_1 0x5aa4
+#define A_PCIE_MSI_EN_2 0x5aa8
+#define A_PCIE_MSI_EN_3 0x5aac
+#define A_PCIE_MSI_EN_4 0x5ab0
+#define A_PCIE_MSI_EN_5 0x5ab4
+#define A_PCIE_MSI_EN_6 0x5ab8
+#define A_PCIE_MSI_EN_7 0x5abc
+#define A_PCIE_MSIX_EN_0 0x5ac0
+#define A_PCIE_MSIX_EN_1 0x5ac4
+#define A_PCIE_MSIX_EN_2 0x5ac8
+#define A_PCIE_MSIX_EN_3 0x5acc
+#define A_PCIE_MSIX_EN_4 0x5ad0
+#define A_PCIE_MSIX_EN_5 0x5ad4
+#define A_PCIE_MSIX_EN_6 0x5ad8
+#define A_PCIE_MSIX_EN_7 0x5adc
+#define A_PCIE_DMA_BUF_CTL 0x5ae0
+
+#define S_BUFRDCNT 18
+#define M_BUFRDCNT 0x3fffU
+#define V_BUFRDCNT(x) ((x) << S_BUFRDCNT)
+#define G_BUFRDCNT(x) (((x) >> S_BUFRDCNT) & M_BUFRDCNT)
+
+#define S_BUFWRCNT 9
+#define M_BUFWRCNT 0x1ffU
+#define V_BUFWRCNT(x) ((x) << S_BUFWRCNT)
+#define G_BUFWRCNT(x) (((x) >> S_BUFWRCNT) & M_BUFWRCNT)
+
+#define S_MAXBUFWRREQ 0
+#define M_MAXBUFWRREQ 0x1ffU
+#define V_MAXBUFWRREQ(x) ((x) << S_MAXBUFWRREQ)
+#define G_MAXBUFWRREQ(x) (((x) >> S_MAXBUFWRREQ) & M_MAXBUFWRREQ)
+
+/* registers for module DBG */
+#define DBG_BASE_ADDR 0x6000
+
+#define A_DBG_DBG0_CFG 0x6000
+
+#define S_MODULESELECT 12
+#define M_MODULESELECT 0xffU
+#define V_MODULESELECT(x) ((x) << S_MODULESELECT)
+#define G_MODULESELECT(x) (((x) >> S_MODULESELECT) & M_MODULESELECT)
+
+#define S_REGSELECT 4
+#define M_REGSELECT 0xffU
+#define V_REGSELECT(x) ((x) << S_REGSELECT)
+#define G_REGSELECT(x) (((x) >> S_REGSELECT) & M_REGSELECT)
+
+#define S_CLKSELECT 0
+#define M_CLKSELECT 0xfU
+#define V_CLKSELECT(x) ((x) << S_CLKSELECT)
+#define G_CLKSELECT(x) (((x) >> S_CLKSELECT) & M_CLKSELECT)
+
+#define A_DBG_DBG0_EN 0x6004
+
+#define S_PORTEN_PONR 16
+#define V_PORTEN_PONR(x) ((x) << S_PORTEN_PONR)
+#define F_PORTEN_PONR V_PORTEN_PONR(1U)
+
+#define S_PORTEN_POND 12
+#define V_PORTEN_POND(x) ((x) << S_PORTEN_POND)
+#define F_PORTEN_POND V_PORTEN_POND(1U)
+
+#define S_SDRHALFWORD0 8
+#define V_SDRHALFWORD0(x) ((x) << S_SDRHALFWORD0)
+#define F_SDRHALFWORD0 V_SDRHALFWORD0(1U)
+
+#define S_DDREN 4
+#define V_DDREN(x) ((x) << S_DDREN)
+#define F_DDREN V_DDREN(1U)
+
+#define S_DBG_PORTEN 0
+#define V_DBG_PORTEN(x) ((x) << S_DBG_PORTEN)
+#define F_DBG_PORTEN V_DBG_PORTEN(1U)
+
+#define A_DBG_DBG1_CFG 0x6008
+#define A_DBG_DBG1_EN 0x600c
+#define A_DBG_GPIO_EN 0x6010
+
+#define S_GPIO15_OEN 31
+#define V_GPIO15_OEN(x) ((x) << S_GPIO15_OEN)
+#define F_GPIO15_OEN V_GPIO15_OEN(1U)
+
+#define S_GPIO14_OEN 30
+#define V_GPIO14_OEN(x) ((x) << S_GPIO14_OEN)
+#define F_GPIO14_OEN V_GPIO14_OEN(1U)
+
+#define S_GPIO13_OEN 29
+#define V_GPIO13_OEN(x) ((x) << S_GPIO13_OEN)
+#define F_GPIO13_OEN V_GPIO13_OEN(1U)
+
+#define S_GPIO12_OEN 28
+#define V_GPIO12_OEN(x) ((x) << S_GPIO12_OEN)
+#define F_GPIO12_OEN V_GPIO12_OEN(1U)
+
+#define S_GPIO11_OEN 27
+#define V_GPIO11_OEN(x) ((x) << S_GPIO11_OEN)
+#define F_GPIO11_OEN V_GPIO11_OEN(1U)
+
+#define S_GPIO10_OEN 26
+#define V_GPIO10_OEN(x) ((x) << S_GPIO10_OEN)
+#define F_GPIO10_OEN V_GPIO10_OEN(1U)
+
+#define S_GPIO9_OEN 25
+#define V_GPIO9_OEN(x) ((x) << S_GPIO9_OEN)
+#define F_GPIO9_OEN V_GPIO9_OEN(1U)
+
+#define S_GPIO8_OEN 24
+#define V_GPIO8_OEN(x) ((x) << S_GPIO8_OEN)
+#define F_GPIO8_OEN V_GPIO8_OEN(1U)
+
+#define S_GPIO7_OEN 23
+#define V_GPIO7_OEN(x) ((x) << S_GPIO7_OEN)
+#define F_GPIO7_OEN V_GPIO7_OEN(1U)
+
+#define S_GPIO6_OEN 22
+#define V_GPIO6_OEN(x) ((x) << S_GPIO6_OEN)
+#define F_GPIO6_OEN V_GPIO6_OEN(1U)
+
+#define S_GPIO5_OEN 21
+#define V_GPIO5_OEN(x) ((x) << S_GPIO5_OEN)
+#define F_GPIO5_OEN V_GPIO5_OEN(1U)
+
+#define S_GPIO4_OEN 20
+#define V_GPIO4_OEN(x) ((x) << S_GPIO4_OEN)
+#define F_GPIO4_OEN V_GPIO4_OEN(1U)
+
+#define S_GPIO3_OEN 19
+#define V_GPIO3_OEN(x) ((x) << S_GPIO3_OEN)
+#define F_GPIO3_OEN V_GPIO3_OEN(1U)
+
+#define S_GPIO2_OEN 18
+#define V_GPIO2_OEN(x) ((x) << S_GPIO2_OEN)
+#define F_GPIO2_OEN V_GPIO2_OEN(1U)
+
+#define S_GPIO1_OEN 17
+#define V_GPIO1_OEN(x) ((x) << S_GPIO1_OEN)
+#define F_GPIO1_OEN V_GPIO1_OEN(1U)
+
+#define S_GPIO0_OEN 16
+#define V_GPIO0_OEN(x) ((x) << S_GPIO0_OEN)
+#define F_GPIO0_OEN V_GPIO0_OEN(1U)
+
+#define S_GPIO15_OUT_VAL 15
+#define V_GPIO15_OUT_VAL(x) ((x) << S_GPIO15_OUT_VAL)
+#define F_GPIO15_OUT_VAL V_GPIO15_OUT_VAL(1U)
+
+#define S_GPIO14_OUT_VAL 14
+#define V_GPIO14_OUT_VAL(x) ((x) << S_GPIO14_OUT_VAL)
+#define F_GPIO14_OUT_VAL V_GPIO14_OUT_VAL(1U)
+
+#define S_GPIO13_OUT_VAL 13
+#define V_GPIO13_OUT_VAL(x) ((x) << S_GPIO13_OUT_VAL)
+#define F_GPIO13_OUT_VAL V_GPIO13_OUT_VAL(1U)
+
+#define S_GPIO12_OUT_VAL 12
+#define V_GPIO12_OUT_VAL(x) ((x) << S_GPIO12_OUT_VAL)
+#define F_GPIO12_OUT_VAL V_GPIO12_OUT_VAL(1U)
+
+#define S_GPIO11_OUT_VAL 11
+#define V_GPIO11_OUT_VAL(x) ((x) << S_GPIO11_OUT_VAL)
+#define F_GPIO11_OUT_VAL V_GPIO11_OUT_VAL(1U)
+
+#define S_GPIO10_OUT_VAL 10
+#define V_GPIO10_OUT_VAL(x) ((x) << S_GPIO10_OUT_VAL)
+#define F_GPIO10_OUT_VAL V_GPIO10_OUT_VAL(1U)
+
+#define S_GPIO9_OUT_VAL 9
+#define V_GPIO9_OUT_VAL(x) ((x) << S_GPIO9_OUT_VAL)
+#define F_GPIO9_OUT_VAL V_GPIO9_OUT_VAL(1U)
+
+#define S_GPIO8_OUT_VAL 8
+#define V_GPIO8_OUT_VAL(x) ((x) << S_GPIO8_OUT_VAL)
+#define F_GPIO8_OUT_VAL V_GPIO8_OUT_VAL(1U)
+
+#define S_GPIO7_OUT_VAL 7
+#define V_GPIO7_OUT_VAL(x) ((x) << S_GPIO7_OUT_VAL)
+#define F_GPIO7_OUT_VAL V_GPIO7_OUT_VAL(1U)
+
+#define S_GPIO6_OUT_VAL 6
+#define V_GPIO6_OUT_VAL(x) ((x) << S_GPIO6_OUT_VAL)
+#define F_GPIO6_OUT_VAL V_GPIO6_OUT_VAL(1U)
+
+#define S_GPIO5_OUT_VAL 5
+#define V_GPIO5_OUT_VAL(x) ((x) << S_GPIO5_OUT_VAL)
+#define F_GPIO5_OUT_VAL V_GPIO5_OUT_VAL(1U)
+
+#define S_GPIO4_OUT_VAL 4
+#define V_GPIO4_OUT_VAL(x) ((x) << S_GPIO4_OUT_VAL)
+#define F_GPIO4_OUT_VAL V_GPIO4_OUT_VAL(1U)
+
+#define S_GPIO3_OUT_VAL 3
+#define V_GPIO3_OUT_VAL(x) ((x) << S_GPIO3_OUT_VAL)
+#define F_GPIO3_OUT_VAL V_GPIO3_OUT_VAL(1U)
+
+#define S_GPIO2_OUT_VAL 2
+#define V_GPIO2_OUT_VAL(x) ((x) << S_GPIO2_OUT_VAL)
+#define F_GPIO2_OUT_VAL V_GPIO2_OUT_VAL(1U)
+
+#define S_GPIO1_OUT_VAL 1
+#define V_GPIO1_OUT_VAL(x) ((x) << S_GPIO1_OUT_VAL)
+#define F_GPIO1_OUT_VAL V_GPIO1_OUT_VAL(1U)
+
+#define S_GPIO0_OUT_VAL 0
+#define V_GPIO0_OUT_VAL(x) ((x) << S_GPIO0_OUT_VAL)
+#define F_GPIO0_OUT_VAL V_GPIO0_OUT_VAL(1U)
+
+#define A_DBG_GPIO_IN 0x6014
+
+#define S_GPIO15_CHG_DET 31
+#define V_GPIO15_CHG_DET(x) ((x) << S_GPIO15_CHG_DET)
+#define F_GPIO15_CHG_DET V_GPIO15_CHG_DET(1U)
+
+#define S_GPIO14_CHG_DET 30
+#define V_GPIO14_CHG_DET(x) ((x) << S_GPIO14_CHG_DET)
+#define F_GPIO14_CHG_DET V_GPIO14_CHG_DET(1U)
+
+#define S_GPIO13_CHG_DET 29
+#define V_GPIO13_CHG_DET(x) ((x) << S_GPIO13_CHG_DET)
+#define F_GPIO13_CHG_DET V_GPIO13_CHG_DET(1U)
+
+#define S_GPIO12_CHG_DET 28
+#define V_GPIO12_CHG_DET(x) ((x) << S_GPIO12_CHG_DET)
+#define F_GPIO12_CHG_DET V_GPIO12_CHG_DET(1U)
+
+#define S_GPIO11_CHG_DET 27
+#define V_GPIO11_CHG_DET(x) ((x) << S_GPIO11_CHG_DET)
+#define F_GPIO11_CHG_DET V_GPIO11_CHG_DET(1U)
+
+#define S_GPIO10_CHG_DET 26
+#define V_GPIO10_CHG_DET(x) ((x) << S_GPIO10_CHG_DET)
+#define F_GPIO10_CHG_DET V_GPIO10_CHG_DET(1U)
+
+#define S_GPIO9_CHG_DET 25
+#define V_GPIO9_CHG_DET(x) ((x) << S_GPIO9_CHG_DET)
+#define F_GPIO9_CHG_DET V_GPIO9_CHG_DET(1U)
+
+#define S_GPIO8_CHG_DET 24
+#define V_GPIO8_CHG_DET(x) ((x) << S_GPIO8_CHG_DET)
+#define F_GPIO8_CHG_DET V_GPIO8_CHG_DET(1U)
+
+#define S_GPIO7_CHG_DET 23
+#define V_GPIO7_CHG_DET(x) ((x) << S_GPIO7_CHG_DET)
+#define F_GPIO7_CHG_DET V_GPIO7_CHG_DET(1U)
+
+#define S_GPIO6_CHG_DET 22
+#define V_GPIO6_CHG_DET(x) ((x) << S_GPIO6_CHG_DET)
+#define F_GPIO6_CHG_DET V_GPIO6_CHG_DET(1U)
+
+#define S_GPIO5_CHG_DET 21
+#define V_GPIO5_CHG_DET(x) ((x) << S_GPIO5_CHG_DET)
+#define F_GPIO5_CHG_DET V_GPIO5_CHG_DET(1U)
+
+#define S_GPIO4_CHG_DET 20
+#define V_GPIO4_CHG_DET(x) ((x) << S_GPIO4_CHG_DET)
+#define F_GPIO4_CHG_DET V_GPIO4_CHG_DET(1U)
+
+#define S_GPIO3_CHG_DET 19
+#define V_GPIO3_CHG_DET(x) ((x) << S_GPIO3_CHG_DET)
+#define F_GPIO3_CHG_DET V_GPIO3_CHG_DET(1U)
+
+#define S_GPIO2_CHG_DET 18
+#define V_GPIO2_CHG_DET(x) ((x) << S_GPIO2_CHG_DET)
+#define F_GPIO2_CHG_DET V_GPIO2_CHG_DET(1U)
+
+#define S_GPIO1_CHG_DET 17
+#define V_GPIO1_CHG_DET(x) ((x) << S_GPIO1_CHG_DET)
+#define F_GPIO1_CHG_DET V_GPIO1_CHG_DET(1U)
+
+#define S_GPIO0_CHG_DET 16
+#define V_GPIO0_CHG_DET(x) ((x) << S_GPIO0_CHG_DET)
+#define F_GPIO0_CHG_DET V_GPIO0_CHG_DET(1U)
+
+#define S_GPIO15_IN 15
+#define V_GPIO15_IN(x) ((x) << S_GPIO15_IN)
+#define F_GPIO15_IN V_GPIO15_IN(1U)
+
+#define S_GPIO14_IN 14
+#define V_GPIO14_IN(x) ((x) << S_GPIO14_IN)
+#define F_GPIO14_IN V_GPIO14_IN(1U)
+
+#define S_GPIO13_IN 13
+#define V_GPIO13_IN(x) ((x) << S_GPIO13_IN)
+#define F_GPIO13_IN V_GPIO13_IN(1U)
+
+#define S_GPIO12_IN 12
+#define V_GPIO12_IN(x) ((x) << S_GPIO12_IN)
+#define F_GPIO12_IN V_GPIO12_IN(1U)
+
+#define S_GPIO11_IN 11
+#define V_GPIO11_IN(x) ((x) << S_GPIO11_IN)
+#define F_GPIO11_IN V_GPIO11_IN(1U)
+
+#define S_GPIO10_IN 10
+#define V_GPIO10_IN(x) ((x) << S_GPIO10_IN)
+#define F_GPIO10_IN V_GPIO10_IN(1U)
+
+#define S_GPIO9_IN 9
+#define V_GPIO9_IN(x) ((x) << S_GPIO9_IN)
+#define F_GPIO9_IN V_GPIO9_IN(1U)
+
+#define S_GPIO8_IN 8
+#define V_GPIO8_IN(x) ((x) << S_GPIO8_IN)
+#define F_GPIO8_IN V_GPIO8_IN(1U)
+
+#define S_GPIO7_IN 7
+#define V_GPIO7_IN(x) ((x) << S_GPIO7_IN)
+#define F_GPIO7_IN V_GPIO7_IN(1U)
+
+#define S_GPIO6_IN 6
+#define V_GPIO6_IN(x) ((x) << S_GPIO6_IN)
+#define F_GPIO6_IN V_GPIO6_IN(1U)
+
+#define S_GPIO5_IN 5
+#define V_GPIO5_IN(x) ((x) << S_GPIO5_IN)
+#define F_GPIO5_IN V_GPIO5_IN(1U)
+
+#define S_GPIO4_IN 4
+#define V_GPIO4_IN(x) ((x) << S_GPIO4_IN)
+#define F_GPIO4_IN V_GPIO4_IN(1U)
+
+#define S_GPIO3_IN 3
+#define V_GPIO3_IN(x) ((x) << S_GPIO3_IN)
+#define F_GPIO3_IN V_GPIO3_IN(1U)
+
+#define S_GPIO2_IN 2
+#define V_GPIO2_IN(x) ((x) << S_GPIO2_IN)
+#define F_GPIO2_IN V_GPIO2_IN(1U)
+
+#define S_GPIO1_IN 1
+#define V_GPIO1_IN(x) ((x) << S_GPIO1_IN)
+#define F_GPIO1_IN V_GPIO1_IN(1U)
+
+#define S_GPIO0_IN 0
+#define V_GPIO0_IN(x) ((x) << S_GPIO0_IN)
+#define F_GPIO0_IN V_GPIO0_IN(1U)
+
+#define A_DBG_INT_ENABLE 0x6018
+
+#define S_IBM_FDL_FAIL_INT_ENBL 25
+#define V_IBM_FDL_FAIL_INT_ENBL(x) ((x) << S_IBM_FDL_FAIL_INT_ENBL)
+#define F_IBM_FDL_FAIL_INT_ENBL V_IBM_FDL_FAIL_INT_ENBL(1U)
+
+#define S_ARM_FAIL_INT_ENBL 24
+#define V_ARM_FAIL_INT_ENBL(x) ((x) << S_ARM_FAIL_INT_ENBL)
+#define F_ARM_FAIL_INT_ENBL V_ARM_FAIL_INT_ENBL(1U)
+
+#define S_ARM_ERROR_OUT_INT_ENBL 23
+#define V_ARM_ERROR_OUT_INT_ENBL(x) ((x) << S_ARM_ERROR_OUT_INT_ENBL)
+#define F_ARM_ERROR_OUT_INT_ENBL V_ARM_ERROR_OUT_INT_ENBL(1U)
+
+#define S_PLL_LOCK_LOST_INT_ENBL 22
+#define V_PLL_LOCK_LOST_INT_ENBL(x) ((x) << S_PLL_LOCK_LOST_INT_ENBL)
+#define F_PLL_LOCK_LOST_INT_ENBL V_PLL_LOCK_LOST_INT_ENBL(1U)
+
+#define S_C_LOCK 21
+#define V_C_LOCK(x) ((x) << S_C_LOCK)
+#define F_C_LOCK V_C_LOCK(1U)
+
+#define S_M_LOCK 20
+#define V_M_LOCK(x) ((x) << S_M_LOCK)
+#define F_M_LOCK V_M_LOCK(1U)
+
+#define S_U_LOCK 19
+#define V_U_LOCK(x) ((x) << S_U_LOCK)
+#define F_U_LOCK V_U_LOCK(1U)
+
+#define S_PCIE_LOCK 18
+#define V_PCIE_LOCK(x) ((x) << S_PCIE_LOCK)
+#define F_PCIE_LOCK V_PCIE_LOCK(1U)
+
+#define S_KX_LOCK 17
+#define V_KX_LOCK(x) ((x) << S_KX_LOCK)
+#define F_KX_LOCK V_KX_LOCK(1U)
+
+#define S_KR_LOCK 16
+#define V_KR_LOCK(x) ((x) << S_KR_LOCK)
+#define F_KR_LOCK V_KR_LOCK(1U)
+
+#define S_GPIO15 15
+#define V_GPIO15(x) ((x) << S_GPIO15)
+#define F_GPIO15 V_GPIO15(1U)
+
+#define S_GPIO14 14
+#define V_GPIO14(x) ((x) << S_GPIO14)
+#define F_GPIO14 V_GPIO14(1U)
+
+#define S_GPIO13 13
+#define V_GPIO13(x) ((x) << S_GPIO13)
+#define F_GPIO13 V_GPIO13(1U)
+
+#define S_GPIO12 12
+#define V_GPIO12(x) ((x) << S_GPIO12)
+#define F_GPIO12 V_GPIO12(1U)
+
+#define S_GPIO11 11
+#define V_GPIO11(x) ((x) << S_GPIO11)
+#define F_GPIO11 V_GPIO11(1U)
+
+#define S_GPIO10 10
+#define V_GPIO10(x) ((x) << S_GPIO10)
+#define F_GPIO10 V_GPIO10(1U)
+
+#define S_GPIO9 9
+#define V_GPIO9(x) ((x) << S_GPIO9)
+#define F_GPIO9 V_GPIO9(1U)
+
+#define S_GPIO8 8
+#define V_GPIO8(x) ((x) << S_GPIO8)
+#define F_GPIO8 V_GPIO8(1U)
+
+#define S_GPIO7 7
+#define V_GPIO7(x) ((x) << S_GPIO7)
+#define F_GPIO7 V_GPIO7(1U)
+
+#define S_GPIO6 6
+#define V_GPIO6(x) ((x) << S_GPIO6)
+#define F_GPIO6 V_GPIO6(1U)
+
+#define S_GPIO5 5
+#define V_GPIO5(x) ((x) << S_GPIO5)
+#define F_GPIO5 V_GPIO5(1U)
+
+#define S_GPIO4 4
+#define V_GPIO4(x) ((x) << S_GPIO4)
+#define F_GPIO4 V_GPIO4(1U)
+
+#define S_GPIO3 3
+#define V_GPIO3(x) ((x) << S_GPIO3)
+#define F_GPIO3 V_GPIO3(1U)
+
+#define S_GPIO2 2
+#define V_GPIO2(x) ((x) << S_GPIO2)
+#define F_GPIO2 V_GPIO2(1U)
+
+#define S_GPIO1 1
+#define V_GPIO1(x) ((x) << S_GPIO1)
+#define F_GPIO1 V_GPIO1(1U)
+
+#define S_GPIO0 0
+#define V_GPIO0(x) ((x) << S_GPIO0)
+#define F_GPIO0 V_GPIO0(1U)
+
+#define A_DBG_INT_CAUSE 0x601c
+
+#define S_IBM_FDL_FAIL_INT_CAUSE 25
+#define V_IBM_FDL_FAIL_INT_CAUSE(x) ((x) << S_IBM_FDL_FAIL_INT_CAUSE)
+#define F_IBM_FDL_FAIL_INT_CAUSE V_IBM_FDL_FAIL_INT_CAUSE(1U)
+
+#define S_ARM_FAIL_INT_CAUSE 24
+#define V_ARM_FAIL_INT_CAUSE(x) ((x) << S_ARM_FAIL_INT_CAUSE)
+#define F_ARM_FAIL_INT_CAUSE V_ARM_FAIL_INT_CAUSE(1U)
+
+#define S_ARM_ERROR_OUT_INT_CAUSE 23
+#define V_ARM_ERROR_OUT_INT_CAUSE(x) ((x) << S_ARM_ERROR_OUT_INT_CAUSE)
+#define F_ARM_ERROR_OUT_INT_CAUSE V_ARM_ERROR_OUT_INT_CAUSE(1U)
+
+#define S_PLL_LOCK_LOST_INT_CAUSE 22
+#define V_PLL_LOCK_LOST_INT_CAUSE(x) ((x) << S_PLL_LOCK_LOST_INT_CAUSE)
+#define F_PLL_LOCK_LOST_INT_CAUSE V_PLL_LOCK_LOST_INT_CAUSE(1U)
+
+#define A_DBG_DBG0_RST_VALUE 0x6020
+
+#define S_DEBUGDATA 0
+#define M_DEBUGDATA 0xffffU
+#define V_DEBUGDATA(x) ((x) << S_DEBUGDATA)
+#define G_DEBUGDATA(x) (((x) >> S_DEBUGDATA) & M_DEBUGDATA)
+
+#define A_DBG_OVERWRSERCFG_EN 0x6024
+
+#define S_OVERWRSERCFG_EN 0
+#define V_OVERWRSERCFG_EN(x) ((x) << S_OVERWRSERCFG_EN)
+#define F_OVERWRSERCFG_EN V_OVERWRSERCFG_EN(1U)
+
+#define A_DBG_PLL_OCLK_PAD_EN 0x6028
+
+#define S_PCIE_OCLK_EN 20
+#define V_PCIE_OCLK_EN(x) ((x) << S_PCIE_OCLK_EN)
+#define F_PCIE_OCLK_EN V_PCIE_OCLK_EN(1U)
+
+#define S_KX_OCLK_EN 16
+#define V_KX_OCLK_EN(x) ((x) << S_KX_OCLK_EN)
+#define F_KX_OCLK_EN V_KX_OCLK_EN(1U)
+
+#define S_U_OCLK_EN 12
+#define V_U_OCLK_EN(x) ((x) << S_U_OCLK_EN)
+#define F_U_OCLK_EN V_U_OCLK_EN(1U)
+
+#define S_KR_OCLK_EN 8
+#define V_KR_OCLK_EN(x) ((x) << S_KR_OCLK_EN)
+#define F_KR_OCLK_EN V_KR_OCLK_EN(1U)
+
+#define S_M_OCLK_EN 4
+#define V_M_OCLK_EN(x) ((x) << S_M_OCLK_EN)
+#define F_M_OCLK_EN V_M_OCLK_EN(1U)
+
+#define S_C_OCLK_EN 0
+#define V_C_OCLK_EN(x) ((x) << S_C_OCLK_EN)
+#define F_C_OCLK_EN V_C_OCLK_EN(1U)
+
+#define A_DBG_PLL_LOCK 0x602c
+
+#define S_PLL_P_LOCK 20
+#define V_PLL_P_LOCK(x) ((x) << S_PLL_P_LOCK)
+#define F_PLL_P_LOCK V_PLL_P_LOCK(1U)
+
+#define S_PLL_KX_LOCK 16
+#define V_PLL_KX_LOCK(x) ((x) << S_PLL_KX_LOCK)
+#define F_PLL_KX_LOCK V_PLL_KX_LOCK(1U)
+
+#define S_PLL_U_LOCK 12
+#define V_PLL_U_LOCK(x) ((x) << S_PLL_U_LOCK)
+#define F_PLL_U_LOCK V_PLL_U_LOCK(1U)
+
+#define S_PLL_KR_LOCK 8
+#define V_PLL_KR_LOCK(x) ((x) << S_PLL_KR_LOCK)
+#define F_PLL_KR_LOCK V_PLL_KR_LOCK(1U)
+
+#define S_PLL_M_LOCK 4
+#define V_PLL_M_LOCK(x) ((x) << S_PLL_M_LOCK)
+#define F_PLL_M_LOCK V_PLL_M_LOCK(1U)
+
+#define S_PLL_C_LOCK 0
+#define V_PLL_C_LOCK(x) ((x) << S_PLL_C_LOCK)
+#define F_PLL_C_LOCK V_PLL_C_LOCK(1U)
+
+#define A_DBG_GPIO_ACT_LOW 0x6030
+
+#define S_P_LOCK_ACT_LOW 21
+#define V_P_LOCK_ACT_LOW(x) ((x) << S_P_LOCK_ACT_LOW)
+#define F_P_LOCK_ACT_LOW V_P_LOCK_ACT_LOW(1U)
+
+#define S_C_LOCK_ACT_LOW 20
+#define V_C_LOCK_ACT_LOW(x) ((x) << S_C_LOCK_ACT_LOW)
+#define F_C_LOCK_ACT_LOW V_C_LOCK_ACT_LOW(1U)
+
+#define S_M_LOCK_ACT_LOW 19
+#define V_M_LOCK_ACT_LOW(x) ((x) << S_M_LOCK_ACT_LOW)
+#define F_M_LOCK_ACT_LOW V_M_LOCK_ACT_LOW(1U)
+
+#define S_U_LOCK_ACT_LOW 18
+#define V_U_LOCK_ACT_LOW(x) ((x) << S_U_LOCK_ACT_LOW)
+#define F_U_LOCK_ACT_LOW V_U_LOCK_ACT_LOW(1U)
+
+#define S_KR_LOCK_ACT_LOW 17
+#define V_KR_LOCK_ACT_LOW(x) ((x) << S_KR_LOCK_ACT_LOW)
+#define F_KR_LOCK_ACT_LOW V_KR_LOCK_ACT_LOW(1U)
+
+#define S_KX_LOCK_ACT_LOW 16
+#define V_KX_LOCK_ACT_LOW(x) ((x) << S_KX_LOCK_ACT_LOW)
+#define F_KX_LOCK_ACT_LOW V_KX_LOCK_ACT_LOW(1U)
+
+#define S_GPIO15_ACT_LOW 15
+#define V_GPIO15_ACT_LOW(x) ((x) << S_GPIO15_ACT_LOW)
+#define F_GPIO15_ACT_LOW V_GPIO15_ACT_LOW(1U)
+
+#define S_GPIO14_ACT_LOW 14
+#define V_GPIO14_ACT_LOW(x) ((x) << S_GPIO14_ACT_LOW)
+#define F_GPIO14_ACT_LOW V_GPIO14_ACT_LOW(1U)
+
+#define S_GPIO13_ACT_LOW 13
+#define V_GPIO13_ACT_LOW(x) ((x) << S_GPIO13_ACT_LOW)
+#define F_GPIO13_ACT_LOW V_GPIO13_ACT_LOW(1U)
+
+#define S_GPIO12_ACT_LOW 12
+#define V_GPIO12_ACT_LOW(x) ((x) << S_GPIO12_ACT_LOW)
+#define F_GPIO12_ACT_LOW V_GPIO12_ACT_LOW(1U)
+
+#define S_GPIO11_ACT_LOW 11
+#define V_GPIO11_ACT_LOW(x) ((x) << S_GPIO11_ACT_LOW)
+#define F_GPIO11_ACT_LOW V_GPIO11_ACT_LOW(1U)
+
+#define S_GPIO10_ACT_LOW 10
+#define V_GPIO10_ACT_LOW(x) ((x) << S_GPIO10_ACT_LOW)
+#define F_GPIO10_ACT_LOW V_GPIO10_ACT_LOW(1U)
+
+#define S_GPIO9_ACT_LOW 9
+#define V_GPIO9_ACT_LOW(x) ((x) << S_GPIO9_ACT_LOW)
+#define F_GPIO9_ACT_LOW V_GPIO9_ACT_LOW(1U)
+
+#define S_GPIO8_ACT_LOW 8
+#define V_GPIO8_ACT_LOW(x) ((x) << S_GPIO8_ACT_LOW)
+#define F_GPIO8_ACT_LOW V_GPIO8_ACT_LOW(1U)
+
+#define S_GPIO7_ACT_LOW 7
+#define V_GPIO7_ACT_LOW(x) ((x) << S_GPIO7_ACT_LOW)
+#define F_GPIO7_ACT_LOW V_GPIO7_ACT_LOW(1U)
+
+#define S_GPIO6_ACT_LOW 6
+#define V_GPIO6_ACT_LOW(x) ((x) << S_GPIO6_ACT_LOW)
+#define F_GPIO6_ACT_LOW V_GPIO6_ACT_LOW(1U)
+
+#define S_GPIO5_ACT_LOW 5
+#define V_GPIO5_ACT_LOW(x) ((x) << S_GPIO5_ACT_LOW)
+#define F_GPIO5_ACT_LOW V_GPIO5_ACT_LOW(1U)
+
+#define S_GPIO4_ACT_LOW 4
+#define V_GPIO4_ACT_LOW(x) ((x) << S_GPIO4_ACT_LOW)
+#define F_GPIO4_ACT_LOW V_GPIO4_ACT_LOW(1U)
+
+#define S_GPIO3_ACT_LOW 3
+#define V_GPIO3_ACT_LOW(x) ((x) << S_GPIO3_ACT_LOW)
+#define F_GPIO3_ACT_LOW V_GPIO3_ACT_LOW(1U)
+
+#define S_GPIO2_ACT_LOW 2
+#define V_GPIO2_ACT_LOW(x) ((x) << S_GPIO2_ACT_LOW)
+#define F_GPIO2_ACT_LOW V_GPIO2_ACT_LOW(1U)
+
+#define S_GPIO1_ACT_LOW 1
+#define V_GPIO1_ACT_LOW(x) ((x) << S_GPIO1_ACT_LOW)
+#define F_GPIO1_ACT_LOW V_GPIO1_ACT_LOW(1U)
+
+#define S_GPIO0_ACT_LOW 0
+#define V_GPIO0_ACT_LOW(x) ((x) << S_GPIO0_ACT_LOW)
+#define F_GPIO0_ACT_LOW V_GPIO0_ACT_LOW(1U)
+
+#define A_DBG_EFUSE_BYTE0_3 0x6034
+#define A_DBG_EFUSE_BYTE4_7 0x6038
+#define A_DBG_EFUSE_BYTE8_11 0x603c
+#define A_DBG_EFUSE_BYTE12_15 0x6040
+#define A_DBG_STATIC_U_PLL_CONF 0x6044
+
+#define S_STATIC_U_PLL_MULT 23
+#define M_STATIC_U_PLL_MULT 0x1ffU
+#define V_STATIC_U_PLL_MULT(x) ((x) << S_STATIC_U_PLL_MULT)
+#define G_STATIC_U_PLL_MULT(x) (((x) >> S_STATIC_U_PLL_MULT) & M_STATIC_U_PLL_MULT)
+
+#define S_STATIC_U_PLL_PREDIV 18
+#define M_STATIC_U_PLL_PREDIV 0x1fU
+#define V_STATIC_U_PLL_PREDIV(x) ((x) << S_STATIC_U_PLL_PREDIV)
+#define G_STATIC_U_PLL_PREDIV(x) (((x) >> S_STATIC_U_PLL_PREDIV) & M_STATIC_U_PLL_PREDIV)
+
+#define S_STATIC_U_PLL_RANGEA 14
+#define M_STATIC_U_PLL_RANGEA 0xfU
+#define V_STATIC_U_PLL_RANGEA(x) ((x) << S_STATIC_U_PLL_RANGEA)
+#define G_STATIC_U_PLL_RANGEA(x) (((x) >> S_STATIC_U_PLL_RANGEA) & M_STATIC_U_PLL_RANGEA)
+
+#define S_STATIC_U_PLL_RANGEB 10
+#define M_STATIC_U_PLL_RANGEB 0xfU
+#define V_STATIC_U_PLL_RANGEB(x) ((x) << S_STATIC_U_PLL_RANGEB)
+#define G_STATIC_U_PLL_RANGEB(x) (((x) >> S_STATIC_U_PLL_RANGEB) & M_STATIC_U_PLL_RANGEB)
+
+#define S_STATIC_U_PLL_TUNE 0
+#define M_STATIC_U_PLL_TUNE 0x3ffU
+#define V_STATIC_U_PLL_TUNE(x) ((x) << S_STATIC_U_PLL_TUNE)
+#define G_STATIC_U_PLL_TUNE(x) (((x) >> S_STATIC_U_PLL_TUNE) & M_STATIC_U_PLL_TUNE)
+
+#define A_DBG_STATIC_C_PLL_CONF 0x6048
+
+#define S_STATIC_C_PLL_MULT 23
+#define M_STATIC_C_PLL_MULT 0x1ffU
+#define V_STATIC_C_PLL_MULT(x) ((x) << S_STATIC_C_PLL_MULT)
+#define G_STATIC_C_PLL_MULT(x) (((x) >> S_STATIC_C_PLL_MULT) & M_STATIC_C_PLL_MULT)
+
+#define S_STATIC_C_PLL_PREDIV 18
+#define M_STATIC_C_PLL_PREDIV 0x1fU
+#define V_STATIC_C_PLL_PREDIV(x) ((x) << S_STATIC_C_PLL_PREDIV)
+#define G_STATIC_C_PLL_PREDIV(x) (((x) >> S_STATIC_C_PLL_PREDIV) & M_STATIC_C_PLL_PREDIV)
+
+#define S_STATIC_C_PLL_RANGEA 14
+#define M_STATIC_C_PLL_RANGEA 0xfU
+#define V_STATIC_C_PLL_RANGEA(x) ((x) << S_STATIC_C_PLL_RANGEA)
+#define G_STATIC_C_PLL_RANGEA(x) (((x) >> S_STATIC_C_PLL_RANGEA) & M_STATIC_C_PLL_RANGEA)
+
+#define S_STATIC_C_PLL_RANGEB 10
+#define M_STATIC_C_PLL_RANGEB 0xfU
+#define V_STATIC_C_PLL_RANGEB(x) ((x) << S_STATIC_C_PLL_RANGEB)
+#define G_STATIC_C_PLL_RANGEB(x) (((x) >> S_STATIC_C_PLL_RANGEB) & M_STATIC_C_PLL_RANGEB)
+
+#define S_STATIC_C_PLL_TUNE 0
+#define M_STATIC_C_PLL_TUNE 0x3ffU
+#define V_STATIC_C_PLL_TUNE(x) ((x) << S_STATIC_C_PLL_TUNE)
+#define G_STATIC_C_PLL_TUNE(x) (((x) >> S_STATIC_C_PLL_TUNE) & M_STATIC_C_PLL_TUNE)
+
+#define A_DBG_STATIC_M_PLL_CONF 0x604c
+
+#define S_STATIC_M_PLL_MULT 23
+#define M_STATIC_M_PLL_MULT 0x1ffU
+#define V_STATIC_M_PLL_MULT(x) ((x) << S_STATIC_M_PLL_MULT)
+#define G_STATIC_M_PLL_MULT(x) (((x) >> S_STATIC_M_PLL_MULT) & M_STATIC_M_PLL_MULT)
+
+#define S_STATIC_M_PLL_PREDIV 18
+#define M_STATIC_M_PLL_PREDIV 0x1fU
+#define V_STATIC_M_PLL_PREDIV(x) ((x) << S_STATIC_M_PLL_PREDIV)
+#define G_STATIC_M_PLL_PREDIV(x) (((x) >> S_STATIC_M_PLL_PREDIV) & M_STATIC_M_PLL_PREDIV)
+
+#define S_STATIC_M_PLL_RANGEA 14
+#define M_STATIC_M_PLL_RANGEA 0xfU
+#define V_STATIC_M_PLL_RANGEA(x) ((x) << S_STATIC_M_PLL_RANGEA)
+#define G_STATIC_M_PLL_RANGEA(x) (((x) >> S_STATIC_M_PLL_RANGEA) & M_STATIC_M_PLL_RANGEA)
+
+#define S_STATIC_M_PLL_RANGEB 10
+#define M_STATIC_M_PLL_RANGEB 0xfU
+#define V_STATIC_M_PLL_RANGEB(x) ((x) << S_STATIC_M_PLL_RANGEB)
+#define G_STATIC_M_PLL_RANGEB(x) (((x) >> S_STATIC_M_PLL_RANGEB) & M_STATIC_M_PLL_RANGEB)
+
+#define S_STATIC_M_PLL_TUNE 0
+#define M_STATIC_M_PLL_TUNE 0x3ffU
+#define V_STATIC_M_PLL_TUNE(x) ((x) << S_STATIC_M_PLL_TUNE)
+#define G_STATIC_M_PLL_TUNE(x) (((x) >> S_STATIC_M_PLL_TUNE) & M_STATIC_M_PLL_TUNE)
+
+#define A_DBG_STATIC_KX_PLL_CONF 0x6050
+
+#define S_STATIC_KX_PLL_C 21
+#define M_STATIC_KX_PLL_C 0xffU
+#define V_STATIC_KX_PLL_C(x) ((x) << S_STATIC_KX_PLL_C)
+#define G_STATIC_KX_PLL_C(x) (((x) >> S_STATIC_KX_PLL_C) & M_STATIC_KX_PLL_C)
+
+#define S_STATIC_KX_PLL_M 15
+#define M_STATIC_KX_PLL_M 0x3fU
+#define V_STATIC_KX_PLL_M(x) ((x) << S_STATIC_KX_PLL_M)
+#define G_STATIC_KX_PLL_M(x) (((x) >> S_STATIC_KX_PLL_M) & M_STATIC_KX_PLL_M)
+
+#define S_STATIC_KX_PLL_N1 11
+#define M_STATIC_KX_PLL_N1 0xfU
+#define V_STATIC_KX_PLL_N1(x) ((x) << S_STATIC_KX_PLL_N1)
+#define G_STATIC_KX_PLL_N1(x) (((x) >> S_STATIC_KX_PLL_N1) & M_STATIC_KX_PLL_N1)
+
+#define S_STATIC_KX_PLL_N2 7
+#define M_STATIC_KX_PLL_N2 0xfU
+#define V_STATIC_KX_PLL_N2(x) ((x) << S_STATIC_KX_PLL_N2)
+#define G_STATIC_KX_PLL_N2(x) (((x) >> S_STATIC_KX_PLL_N2) & M_STATIC_KX_PLL_N2)
+
+#define S_STATIC_KX_PLL_N3 3
+#define M_STATIC_KX_PLL_N3 0xfU
+#define V_STATIC_KX_PLL_N3(x) ((x) << S_STATIC_KX_PLL_N3)
+#define G_STATIC_KX_PLL_N3(x) (((x) >> S_STATIC_KX_PLL_N3) & M_STATIC_KX_PLL_N3)
+
+#define S_STATIC_KX_PLL_P 0
+#define M_STATIC_KX_PLL_P 0x7U
+#define V_STATIC_KX_PLL_P(x) ((x) << S_STATIC_KX_PLL_P)
+#define G_STATIC_KX_PLL_P(x) (((x) >> S_STATIC_KX_PLL_P) & M_STATIC_KX_PLL_P)
+
+#define A_DBG_STATIC_KR_PLL_CONF 0x6054
+
+#define S_STATIC_KR_PLL_C 21
+#define M_STATIC_KR_PLL_C 0xffU
+#define V_STATIC_KR_PLL_C(x) ((x) << S_STATIC_KR_PLL_C)
+#define G_STATIC_KR_PLL_C(x) (((x) >> S_STATIC_KR_PLL_C) & M_STATIC_KR_PLL_C)
+
+#define S_STATIC_KR_PLL_M 15
+#define M_STATIC_KR_PLL_M 0x3fU
+#define V_STATIC_KR_PLL_M(x) ((x) << S_STATIC_KR_PLL_M)
+#define G_STATIC_KR_PLL_M(x) (((x) >> S_STATIC_KR_PLL_M) & M_STATIC_KR_PLL_M)
+
+#define S_STATIC_KR_PLL_N1 11
+#define M_STATIC_KR_PLL_N1 0xfU
+#define V_STATIC_KR_PLL_N1(x) ((x) << S_STATIC_KR_PLL_N1)
+#define G_STATIC_KR_PLL_N1(x) (((x) >> S_STATIC_KR_PLL_N1) & M_STATIC_KR_PLL_N1)
+
+#define S_STATIC_KR_PLL_N2 7
+#define M_STATIC_KR_PLL_N2 0xfU
+#define V_STATIC_KR_PLL_N2(x) ((x) << S_STATIC_KR_PLL_N2)
+#define G_STATIC_KR_PLL_N2(x) (((x) >> S_STATIC_KR_PLL_N2) & M_STATIC_KR_PLL_N2)
+
+#define S_STATIC_KR_PLL_N3 3
+#define M_STATIC_KR_PLL_N3 0xfU
+#define V_STATIC_KR_PLL_N3(x) ((x) << S_STATIC_KR_PLL_N3)
+#define G_STATIC_KR_PLL_N3(x) (((x) >> S_STATIC_KR_PLL_N3) & M_STATIC_KR_PLL_N3)
+
+#define S_STATIC_KR_PLL_P 0
+#define M_STATIC_KR_PLL_P 0x7U
+#define V_STATIC_KR_PLL_P(x) ((x) << S_STATIC_KR_PLL_P)
+#define G_STATIC_KR_PLL_P(x) (((x) >> S_STATIC_KR_PLL_P) & M_STATIC_KR_PLL_P)
+
+#define A_DBG_EXTRA_STATIC_BITS_CONF 0x6058
+
+#define S_STATIC_M_PLL_RESET 30
+#define V_STATIC_M_PLL_RESET(x) ((x) << S_STATIC_M_PLL_RESET)
+#define F_STATIC_M_PLL_RESET V_STATIC_M_PLL_RESET(1U)
+
+#define S_STATIC_M_PLL_SLEEP 29
+#define V_STATIC_M_PLL_SLEEP(x) ((x) << S_STATIC_M_PLL_SLEEP)
+#define F_STATIC_M_PLL_SLEEP V_STATIC_M_PLL_SLEEP(1U)
+
+#define S_STATIC_M_PLL_BYPASS 28
+#define V_STATIC_M_PLL_BYPASS(x) ((x) << S_STATIC_M_PLL_BYPASS)
+#define F_STATIC_M_PLL_BYPASS V_STATIC_M_PLL_BYPASS(1U)
+
+#define S_STATIC_MPLL_CLK_SEL 27
+#define V_STATIC_MPLL_CLK_SEL(x) ((x) << S_STATIC_MPLL_CLK_SEL)
+#define F_STATIC_MPLL_CLK_SEL V_STATIC_MPLL_CLK_SEL(1U)
+
+#define S_STATIC_U_PLL_SLEEP 26
+#define V_STATIC_U_PLL_SLEEP(x) ((x) << S_STATIC_U_PLL_SLEEP)
+#define F_STATIC_U_PLL_SLEEP V_STATIC_U_PLL_SLEEP(1U)
+
+#define S_STATIC_C_PLL_SLEEP 25
+#define V_STATIC_C_PLL_SLEEP(x) ((x) << S_STATIC_C_PLL_SLEEP)
+#define F_STATIC_C_PLL_SLEEP V_STATIC_C_PLL_SLEEP(1U)
+
+#define S_STATIC_LVDS_CLKOUT_SEL 23
+#define M_STATIC_LVDS_CLKOUT_SEL 0x3U
+#define V_STATIC_LVDS_CLKOUT_SEL(x) ((x) << S_STATIC_LVDS_CLKOUT_SEL)
+#define G_STATIC_LVDS_CLKOUT_SEL(x) (((x) >> S_STATIC_LVDS_CLKOUT_SEL) & M_STATIC_LVDS_CLKOUT_SEL)
+
+#define S_STATIC_LVDS_CLKOUT_EN 22
+#define V_STATIC_LVDS_CLKOUT_EN(x) ((x) << S_STATIC_LVDS_CLKOUT_EN)
+#define F_STATIC_LVDS_CLKOUT_EN V_STATIC_LVDS_CLKOUT_EN(1U)
+
+#define S_STATIC_CCLK_FREQ_SEL 20
+#define M_STATIC_CCLK_FREQ_SEL 0x3U
+#define V_STATIC_CCLK_FREQ_SEL(x) ((x) << S_STATIC_CCLK_FREQ_SEL)
+#define G_STATIC_CCLK_FREQ_SEL(x) (((x) >> S_STATIC_CCLK_FREQ_SEL) & M_STATIC_CCLK_FREQ_SEL)
+
+#define S_STATIC_UCLK_FREQ_SEL 18
+#define M_STATIC_UCLK_FREQ_SEL 0x3U
+#define V_STATIC_UCLK_FREQ_SEL(x) ((x) << S_STATIC_UCLK_FREQ_SEL)
+#define G_STATIC_UCLK_FREQ_SEL(x) (((x) >> S_STATIC_UCLK_FREQ_SEL) & M_STATIC_UCLK_FREQ_SEL)
+
+#define S_EXPHYCLK_SEL_EN 17
+#define V_EXPHYCLK_SEL_EN(x) ((x) << S_EXPHYCLK_SEL_EN)
+#define F_EXPHYCLK_SEL_EN V_EXPHYCLK_SEL_EN(1U)
+
+#define S_EXPHYCLK_SEL 15
+#define M_EXPHYCLK_SEL 0x3U
+#define V_EXPHYCLK_SEL(x) ((x) << S_EXPHYCLK_SEL)
+#define G_EXPHYCLK_SEL(x) (((x) >> S_EXPHYCLK_SEL) & M_EXPHYCLK_SEL)
+
+#define S_STATIC_U_PLL_BYPASS 14
+#define V_STATIC_U_PLL_BYPASS(x) ((x) << S_STATIC_U_PLL_BYPASS)
+#define F_STATIC_U_PLL_BYPASS V_STATIC_U_PLL_BYPASS(1U)
+
+#define S_STATIC_C_PLL_BYPASS 13
+#define V_STATIC_C_PLL_BYPASS(x) ((x) << S_STATIC_C_PLL_BYPASS)
+#define F_STATIC_C_PLL_BYPASS V_STATIC_C_PLL_BYPASS(1U)
+
+#define S_STATIC_KR_PLL_BYPASS 12
+#define V_STATIC_KR_PLL_BYPASS(x) ((x) << S_STATIC_KR_PLL_BYPASS)
+#define F_STATIC_KR_PLL_BYPASS V_STATIC_KR_PLL_BYPASS(1U)
+
+#define S_STATIC_KX_PLL_BYPASS 11
+#define V_STATIC_KX_PLL_BYPASS(x) ((x) << S_STATIC_KX_PLL_BYPASS)
+#define F_STATIC_KX_PLL_BYPASS V_STATIC_KX_PLL_BYPASS(1U)
+
+#define S_STATIC_KX_PLL_V 7
+#define M_STATIC_KX_PLL_V 0xfU
+#define V_STATIC_KX_PLL_V(x) ((x) << S_STATIC_KX_PLL_V)
+#define G_STATIC_KX_PLL_V(x) (((x) >> S_STATIC_KX_PLL_V) & M_STATIC_KX_PLL_V)
+
+#define S_STATIC_KR_PLL_V 3
+#define M_STATIC_KR_PLL_V 0xfU
+#define V_STATIC_KR_PLL_V(x) ((x) << S_STATIC_KR_PLL_V)
+#define G_STATIC_KR_PLL_V(x) (((x) >> S_STATIC_KR_PLL_V) & M_STATIC_KR_PLL_V)
+
+#define S_PSRO_SEL 0
+#define M_PSRO_SEL 0x7U
+#define V_PSRO_SEL(x) ((x) << S_PSRO_SEL)
+#define G_PSRO_SEL(x) (((x) >> S_PSRO_SEL) & M_PSRO_SEL)
+
+#define A_DBG_STATIC_OCLK_MUXSEL_CONF 0x605c
+
+#define S_M_OCLK_MUXSEL 12
+#define V_M_OCLK_MUXSEL(x) ((x) << S_M_OCLK_MUXSEL)
+#define F_M_OCLK_MUXSEL V_M_OCLK_MUXSEL(1U)
+
+#define S_C_OCLK_MUXSEL 10
+#define M_C_OCLK_MUXSEL 0x3U
+#define V_C_OCLK_MUXSEL(x) ((x) << S_C_OCLK_MUXSEL)
+#define G_C_OCLK_MUXSEL(x) (((x) >> S_C_OCLK_MUXSEL) & M_C_OCLK_MUXSEL)
+
+#define S_U_OCLK_MUXSEL 8
+#define M_U_OCLK_MUXSEL 0x3U
+#define V_U_OCLK_MUXSEL(x) ((x) << S_U_OCLK_MUXSEL)
+#define G_U_OCLK_MUXSEL(x) (((x) >> S_U_OCLK_MUXSEL) & M_U_OCLK_MUXSEL)
+
+#define S_P_OCLK_MUXSEL 6
+#define M_P_OCLK_MUXSEL 0x3U
+#define V_P_OCLK_MUXSEL(x) ((x) << S_P_OCLK_MUXSEL)
+#define G_P_OCLK_MUXSEL(x) (((x) >> S_P_OCLK_MUXSEL) & M_P_OCLK_MUXSEL)
+
+#define S_KX_OCLK_MUXSEL 3
+#define M_KX_OCLK_MUXSEL 0x7U
+#define V_KX_OCLK_MUXSEL(x) ((x) << S_KX_OCLK_MUXSEL)
+#define G_KX_OCLK_MUXSEL(x) (((x) >> S_KX_OCLK_MUXSEL) & M_KX_OCLK_MUXSEL)
+
+#define S_KR_OCLK_MUXSEL 0
+#define M_KR_OCLK_MUXSEL 0x7U
+#define V_KR_OCLK_MUXSEL(x) ((x) << S_KR_OCLK_MUXSEL)
+#define G_KR_OCLK_MUXSEL(x) (((x) >> S_KR_OCLK_MUXSEL) & M_KR_OCLK_MUXSEL)
+
+#define A_DBG_TRACE0_CONF_COMPREG0 0x6060
+#define A_DBG_TRACE0_CONF_COMPREG1 0x6064
+#define A_DBG_TRACE1_CONF_COMPREG0 0x6068
+#define A_DBG_TRACE1_CONF_COMPREG1 0x606c
+#define A_DBG_TRACE0_CONF_MASKREG0 0x6070
+#define A_DBG_TRACE0_CONF_MASKREG1 0x6074
+#define A_DBG_TRACE1_CONF_MASKREG0 0x6078
+#define A_DBG_TRACE1_CONF_MASKREG1 0x607c
+#define A_DBG_TRACE_COUNTER 0x6080
+
+#define S_COUNTER1 16
+#define M_COUNTER1 0xffffU
+#define V_COUNTER1(x) ((x) << S_COUNTER1)
+#define G_COUNTER1(x) (((x) >> S_COUNTER1) & M_COUNTER1)
+
+#define S_COUNTER0 0
+#define M_COUNTER0 0xffffU
+#define V_COUNTER0(x) ((x) << S_COUNTER0)
+#define G_COUNTER0(x) (((x) >> S_COUNTER0) & M_COUNTER0)
+
+#define A_DBG_STATIC_REFCLK_PERIOD 0x6084
+
+#define S_STATIC_REFCLK_PERIOD 0
+#define M_STATIC_REFCLK_PERIOD 0xffffU
+#define V_STATIC_REFCLK_PERIOD(x) ((x) << S_STATIC_REFCLK_PERIOD)
+#define G_STATIC_REFCLK_PERIOD(x) (((x) >> S_STATIC_REFCLK_PERIOD) & M_STATIC_REFCLK_PERIOD)
+
+#define A_DBG_TRACE_CONF 0x6088
+
+#define S_DBG_TRACE_OPERATE_WITH_TRG 5
+#define V_DBG_TRACE_OPERATE_WITH_TRG(x) ((x) << S_DBG_TRACE_OPERATE_WITH_TRG)
+#define F_DBG_TRACE_OPERATE_WITH_TRG V_DBG_TRACE_OPERATE_WITH_TRG(1U)
+
+#define S_DBG_TRACE_OPERATE_EN 4
+#define V_DBG_TRACE_OPERATE_EN(x) ((x) << S_DBG_TRACE_OPERATE_EN)
+#define F_DBG_TRACE_OPERATE_EN V_DBG_TRACE_OPERATE_EN(1U)
+
+#define S_DBG_OPERATE_INDV_COMBINED 3
+#define V_DBG_OPERATE_INDV_COMBINED(x) ((x) << S_DBG_OPERATE_INDV_COMBINED)
+#define F_DBG_OPERATE_INDV_COMBINED V_DBG_OPERATE_INDV_COMBINED(1U)
+
+#define S_DBG_OPERATE_ORDER_OF_TRIGGER 2
+#define V_DBG_OPERATE_ORDER_OF_TRIGGER(x) ((x) << S_DBG_OPERATE_ORDER_OF_TRIGGER)
+#define F_DBG_OPERATE_ORDER_OF_TRIGGER V_DBG_OPERATE_ORDER_OF_TRIGGER(1U)
+
+#define S_DBG_OPERATE_SGL_DBL_TRIGGER 1
+#define V_DBG_OPERATE_SGL_DBL_TRIGGER(x) ((x) << S_DBG_OPERATE_SGL_DBL_TRIGGER)
+#define F_DBG_OPERATE_SGL_DBL_TRIGGER V_DBG_OPERATE_SGL_DBL_TRIGGER(1U)
+
+#define S_DBG_OPERATE0_OR_1 0
+#define V_DBG_OPERATE0_OR_1(x) ((x) << S_DBG_OPERATE0_OR_1)
+#define F_DBG_OPERATE0_OR_1 V_DBG_OPERATE0_OR_1(1U)
+
+#define A_DBG_TRACE_RDEN 0x608c
+
+#define S_RD_ADDR1 10
+#define M_RD_ADDR1 0xffU
+#define V_RD_ADDR1(x) ((x) << S_RD_ADDR1)
+#define G_RD_ADDR1(x) (((x) >> S_RD_ADDR1) & M_RD_ADDR1)
+
+#define S_RD_ADDR0 2
+#define M_RD_ADDR0 0xffU
+#define V_RD_ADDR0(x) ((x) << S_RD_ADDR0)
+#define G_RD_ADDR0(x) (((x) >> S_RD_ADDR0) & M_RD_ADDR0)
+
+#define S_RD_EN1 1
+#define V_RD_EN1(x) ((x) << S_RD_EN1)
+#define F_RD_EN1 V_RD_EN1(1U)
+
+#define S_RD_EN0 0
+#define V_RD_EN0(x) ((x) << S_RD_EN0)
+#define F_RD_EN0 V_RD_EN0(1U)
+
+#define A_DBG_TRACE_WRADDR 0x6090
+
+#define S_WR_POINTER_ADDR1 16
+#define M_WR_POINTER_ADDR1 0xffU
+#define V_WR_POINTER_ADDR1(x) ((x) << S_WR_POINTER_ADDR1)
+#define G_WR_POINTER_ADDR1(x) (((x) >> S_WR_POINTER_ADDR1) & M_WR_POINTER_ADDR1)
+
+#define S_WR_POINTER_ADDR0 0
+#define M_WR_POINTER_ADDR0 0xffU
+#define V_WR_POINTER_ADDR0(x) ((x) << S_WR_POINTER_ADDR0)
+#define G_WR_POINTER_ADDR0(x) (((x) >> S_WR_POINTER_ADDR0) & M_WR_POINTER_ADDR0)
+
+#define A_DBG_TRACE0_DATA_OUT 0x6094
+#define A_DBG_TRACE1_DATA_OUT 0x6098
+#define A_DBG_PVT_REG_CALIBRATE_CTL 0x6100
+
+#define S_HALT_CALIBRATE 1
+#define V_HALT_CALIBRATE(x) ((x) << S_HALT_CALIBRATE)
+#define F_HALT_CALIBRATE V_HALT_CALIBRATE(1U)
+
+#define S_RESET_CALIBRATE 0
+#define V_RESET_CALIBRATE(x) ((x) << S_RESET_CALIBRATE)
+#define F_RESET_CALIBRATE V_RESET_CALIBRATE(1U)
+
+#define A_DBG_PVT_REG_UPDATE_CTL 0x6104
+
+#define S_FAST_UPDATE 8
+#define V_FAST_UPDATE(x) ((x) << S_FAST_UPDATE)
+#define F_FAST_UPDATE V_FAST_UPDATE(1U)
+
+#define S_FORCE_REG_IN_VALUE 2
+#define V_FORCE_REG_IN_VALUE(x) ((x) << S_FORCE_REG_IN_VALUE)
+#define F_FORCE_REG_IN_VALUE V_FORCE_REG_IN_VALUE(1U)
+
+#define S_HALT_UPDATE 1
+#define V_HALT_UPDATE(x) ((x) << S_HALT_UPDATE)
+#define F_HALT_UPDATE V_HALT_UPDATE(1U)
+
+#define A_DBG_PVT_REG_LAST_MEASUREMENT 0x6108
+
+#define S_LAST_MEASUREMENT_SELECT 8
+#define M_LAST_MEASUREMENT_SELECT 0x3U
+#define V_LAST_MEASUREMENT_SELECT(x) ((x) << S_LAST_MEASUREMENT_SELECT)
+#define G_LAST_MEASUREMENT_SELECT(x) (((x) >> S_LAST_MEASUREMENT_SELECT) & M_LAST_MEASUREMENT_SELECT)
+
+#define S_LAST_MEASUREMENT_RESULT_BANK_B 4
+#define M_LAST_MEASUREMENT_RESULT_BANK_B 0xfU
+#define V_LAST_MEASUREMENT_RESULT_BANK_B(x) ((x) << S_LAST_MEASUREMENT_RESULT_BANK_B)
+#define G_LAST_MEASUREMENT_RESULT_BANK_B(x) (((x) >> S_LAST_MEASUREMENT_RESULT_BANK_B) & M_LAST_MEASUREMENT_RESULT_BANK_B)
+
+#define S_LAST_MEASUREMENT_RESULT_BANK_A 0
+#define M_LAST_MEASUREMENT_RESULT_BANK_A 0xfU
+#define V_LAST_MEASUREMENT_RESULT_BANK_A(x) ((x) << S_LAST_MEASUREMENT_RESULT_BANK_A)
+#define G_LAST_MEASUREMENT_RESULT_BANK_A(x) (((x) >> S_LAST_MEASUREMENT_RESULT_BANK_A) & M_LAST_MEASUREMENT_RESULT_BANK_A)
+
+#define A_DBG_PVT_REG_DRVN 0x610c
+
+#define S_PVT_REG_DRVN_EN 8
+#define V_PVT_REG_DRVN_EN(x) ((x) << S_PVT_REG_DRVN_EN)
+#define F_PVT_REG_DRVN_EN V_PVT_REG_DRVN_EN(1U)
+
+#define S_PVT_REG_DRVN_B 4
+#define M_PVT_REG_DRVN_B 0xfU
+#define V_PVT_REG_DRVN_B(x) ((x) << S_PVT_REG_DRVN_B)
+#define G_PVT_REG_DRVN_B(x) (((x) >> S_PVT_REG_DRVN_B) & M_PVT_REG_DRVN_B)
+
+#define S_PVT_REG_DRVN_A 0
+#define M_PVT_REG_DRVN_A 0xfU
+#define V_PVT_REG_DRVN_A(x) ((x) << S_PVT_REG_DRVN_A)
+#define G_PVT_REG_DRVN_A(x) (((x) >> S_PVT_REG_DRVN_A) & M_PVT_REG_DRVN_A)
+
+#define A_DBG_PVT_REG_DRVP 0x6110
+
+#define S_PVT_REG_DRVP_EN 8
+#define V_PVT_REG_DRVP_EN(x) ((x) << S_PVT_REG_DRVP_EN)
+#define F_PVT_REG_DRVP_EN V_PVT_REG_DRVP_EN(1U)
+
+#define S_PVT_REG_DRVP_B 4
+#define M_PVT_REG_DRVP_B 0xfU
+#define V_PVT_REG_DRVP_B(x) ((x) << S_PVT_REG_DRVP_B)
+#define G_PVT_REG_DRVP_B(x) (((x) >> S_PVT_REG_DRVP_B) & M_PVT_REG_DRVP_B)
+
+#define S_PVT_REG_DRVP_A 0
+#define M_PVT_REG_DRVP_A 0xfU
+#define V_PVT_REG_DRVP_A(x) ((x) << S_PVT_REG_DRVP_A)
+#define G_PVT_REG_DRVP_A(x) (((x) >> S_PVT_REG_DRVP_A) & M_PVT_REG_DRVP_A)
+
+#define A_DBG_PVT_REG_TERMN 0x6114
+
+#define S_PVT_REG_TERMN_EN 8
+#define V_PVT_REG_TERMN_EN(x) ((x) << S_PVT_REG_TERMN_EN)
+#define F_PVT_REG_TERMN_EN V_PVT_REG_TERMN_EN(1U)
+
+#define S_PVT_REG_TERMN_B 4
+#define M_PVT_REG_TERMN_B 0xfU
+#define V_PVT_REG_TERMN_B(x) ((x) << S_PVT_REG_TERMN_B)
+#define G_PVT_REG_TERMN_B(x) (((x) >> S_PVT_REG_TERMN_B) & M_PVT_REG_TERMN_B)
+
+#define S_PVT_REG_TERMN_A 0
+#define M_PVT_REG_TERMN_A 0xfU
+#define V_PVT_REG_TERMN_A(x) ((x) << S_PVT_REG_TERMN_A)
+#define G_PVT_REG_TERMN_A(x) (((x) >> S_PVT_REG_TERMN_A) & M_PVT_REG_TERMN_A)
+
+#define A_DBG_PVT_REG_TERMP 0x6118
+
+#define S_PVT_REG_TERMP_EN 8
+#define V_PVT_REG_TERMP_EN(x) ((x) << S_PVT_REG_TERMP_EN)
+#define F_PVT_REG_TERMP_EN V_PVT_REG_TERMP_EN(1U)
+
+#define S_PVT_REG_TERMP_B 4
+#define M_PVT_REG_TERMP_B 0xfU
+#define V_PVT_REG_TERMP_B(x) ((x) << S_PVT_REG_TERMP_B)
+#define G_PVT_REG_TERMP_B(x) (((x) >> S_PVT_REG_TERMP_B) & M_PVT_REG_TERMP_B)
+
+#define S_PVT_REG_TERMP_A 0
+#define M_PVT_REG_TERMP_A 0xfU
+#define V_PVT_REG_TERMP_A(x) ((x) << S_PVT_REG_TERMP_A)
+#define G_PVT_REG_TERMP_A(x) (((x) >> S_PVT_REG_TERMP_A) & M_PVT_REG_TERMP_A)
+
+#define A_DBG_PVT_REG_THRESHOLD 0x611c
+
+#define S_PVT_CALIBRATION_DONE 8
+#define V_PVT_CALIBRATION_DONE(x) ((x) << S_PVT_CALIBRATION_DONE)
+#define F_PVT_CALIBRATION_DONE V_PVT_CALIBRATION_DONE(1U)
+
+#define S_THRESHOLD_TERMP_MAX_SYNC 7
+#define V_THRESHOLD_TERMP_MAX_SYNC(x) ((x) << S_THRESHOLD_TERMP_MAX_SYNC)
+#define F_THRESHOLD_TERMP_MAX_SYNC V_THRESHOLD_TERMP_MAX_SYNC(1U)
+
+#define S_THRESHOLD_TERMP_MIN_SYNC 6
+#define V_THRESHOLD_TERMP_MIN_SYNC(x) ((x) << S_THRESHOLD_TERMP_MIN_SYNC)
+#define F_THRESHOLD_TERMP_MIN_SYNC V_THRESHOLD_TERMP_MIN_SYNC(1U)
+
+#define S_THRESHOLD_TERMN_MAX_SYNC 5
+#define V_THRESHOLD_TERMN_MAX_SYNC(x) ((x) << S_THRESHOLD_TERMN_MAX_SYNC)
+#define F_THRESHOLD_TERMN_MAX_SYNC V_THRESHOLD_TERMN_MAX_SYNC(1U)
+
+#define S_THRESHOLD_TERMN_MIN_SYNC 4
+#define V_THRESHOLD_TERMN_MIN_SYNC(x) ((x) << S_THRESHOLD_TERMN_MIN_SYNC)
+#define F_THRESHOLD_TERMN_MIN_SYNC V_THRESHOLD_TERMN_MIN_SYNC(1U)
+
+#define S_THRESHOLD_DRVP_MAX_SYNC 3
+#define V_THRESHOLD_DRVP_MAX_SYNC(x) ((x) << S_THRESHOLD_DRVP_MAX_SYNC)
+#define F_THRESHOLD_DRVP_MAX_SYNC V_THRESHOLD_DRVP_MAX_SYNC(1U)
+
+#define S_THRESHOLD_DRVP_MIN_SYNC 2
+#define V_THRESHOLD_DRVP_MIN_SYNC(x) ((x) << S_THRESHOLD_DRVP_MIN_SYNC)
+#define F_THRESHOLD_DRVP_MIN_SYNC V_THRESHOLD_DRVP_MIN_SYNC(1U)
+
+#define S_THRESHOLD_DRVN_MAX_SYNC 1
+#define V_THRESHOLD_DRVN_MAX_SYNC(x) ((x) << S_THRESHOLD_DRVN_MAX_SYNC)
+#define F_THRESHOLD_DRVN_MAX_SYNC V_THRESHOLD_DRVN_MAX_SYNC(1U)
+
+#define S_THRESHOLD_DRVN_MIN_SYNC 0
+#define V_THRESHOLD_DRVN_MIN_SYNC(x) ((x) << S_THRESHOLD_DRVN_MIN_SYNC)
+#define F_THRESHOLD_DRVN_MIN_SYNC V_THRESHOLD_DRVN_MIN_SYNC(1U)
+
+#define A_DBG_PVT_REG_IN_TERMP 0x6120
+
+#define S_REG_IN_TERMP_B 4
+#define M_REG_IN_TERMP_B 0xfU
+#define V_REG_IN_TERMP_B(x) ((x) << S_REG_IN_TERMP_B)
+#define G_REG_IN_TERMP_B(x) (((x) >> S_REG_IN_TERMP_B) & M_REG_IN_TERMP_B)
+
+#define S_REG_IN_TERMP_A 0
+#define M_REG_IN_TERMP_A 0xfU
+#define V_REG_IN_TERMP_A(x) ((x) << S_REG_IN_TERMP_A)
+#define G_REG_IN_TERMP_A(x) (((x) >> S_REG_IN_TERMP_A) & M_REG_IN_TERMP_A)
+
+#define A_DBG_PVT_REG_IN_TERMN 0x6124
+
+#define S_REG_IN_TERMN_B 4
+#define M_REG_IN_TERMN_B 0xfU
+#define V_REG_IN_TERMN_B(x) ((x) << S_REG_IN_TERMN_B)
+#define G_REG_IN_TERMN_B(x) (((x) >> S_REG_IN_TERMN_B) & M_REG_IN_TERMN_B)
+
+#define S_REG_IN_TERMN_A 0
+#define M_REG_IN_TERMN_A 0xfU
+#define V_REG_IN_TERMN_A(x) ((x) << S_REG_IN_TERMN_A)
+#define G_REG_IN_TERMN_A(x) (((x) >> S_REG_IN_TERMN_A) & M_REG_IN_TERMN_A)
+
+#define A_DBG_PVT_REG_IN_DRVP 0x6128
+
+#define S_REG_IN_DRVP_B 4
+#define M_REG_IN_DRVP_B 0xfU
+#define V_REG_IN_DRVP_B(x) ((x) << S_REG_IN_DRVP_B)
+#define G_REG_IN_DRVP_B(x) (((x) >> S_REG_IN_DRVP_B) & M_REG_IN_DRVP_B)
+
+#define S_REG_IN_DRVP_A 0
+#define M_REG_IN_DRVP_A 0xfU
+#define V_REG_IN_DRVP_A(x) ((x) << S_REG_IN_DRVP_A)
+#define G_REG_IN_DRVP_A(x) (((x) >> S_REG_IN_DRVP_A) & M_REG_IN_DRVP_A)
+
+#define A_DBG_PVT_REG_IN_DRVN 0x612c
+
+#define S_REG_IN_DRVN_B 4
+#define M_REG_IN_DRVN_B 0xfU
+#define V_REG_IN_DRVN_B(x) ((x) << S_REG_IN_DRVN_B)
+#define G_REG_IN_DRVN_B(x) (((x) >> S_REG_IN_DRVN_B) & M_REG_IN_DRVN_B)
+
+#define S_REG_IN_DRVN_A 0
+#define M_REG_IN_DRVN_A 0xfU
+#define V_REG_IN_DRVN_A(x) ((x) << S_REG_IN_DRVN_A)
+#define G_REG_IN_DRVN_A(x) (((x) >> S_REG_IN_DRVN_A) & M_REG_IN_DRVN_A)
+
+#define A_DBG_PVT_REG_OUT_TERMP 0x6130
+
+#define S_REG_OUT_TERMP_B 4
+#define M_REG_OUT_TERMP_B 0xfU
+#define V_REG_OUT_TERMP_B(x) ((x) << S_REG_OUT_TERMP_B)
+#define G_REG_OUT_TERMP_B(x) (((x) >> S_REG_OUT_TERMP_B) & M_REG_OUT_TERMP_B)
+
+#define S_REG_OUT_TERMP_A 0
+#define M_REG_OUT_TERMP_A 0xfU
+#define V_REG_OUT_TERMP_A(x) ((x) << S_REG_OUT_TERMP_A)
+#define G_REG_OUT_TERMP_A(x) (((x) >> S_REG_OUT_TERMP_A) & M_REG_OUT_TERMP_A)
+
+#define A_DBG_PVT_REG_OUT_TERMN 0x6134
+
+#define S_REG_OUT_TERMN_B 4
+#define M_REG_OUT_TERMN_B 0xfU
+#define V_REG_OUT_TERMN_B(x) ((x) << S_REG_OUT_TERMN_B)
+#define G_REG_OUT_TERMN_B(x) (((x) >> S_REG_OUT_TERMN_B) & M_REG_OUT_TERMN_B)
+
+#define S_REG_OUT_TERMN_A 0
+#define M_REG_OUT_TERMN_A 0xfU
+#define V_REG_OUT_TERMN_A(x) ((x) << S_REG_OUT_TERMN_A)
+#define G_REG_OUT_TERMN_A(x) (((x) >> S_REG_OUT_TERMN_A) & M_REG_OUT_TERMN_A)
+
+#define A_DBG_PVT_REG_OUT_DRVP 0x6138
+
+#define S_REG_OUT_DRVP_B 4
+#define M_REG_OUT_DRVP_B 0xfU
+#define V_REG_OUT_DRVP_B(x) ((x) << S_REG_OUT_DRVP_B)
+#define G_REG_OUT_DRVP_B(x) (((x) >> S_REG_OUT_DRVP_B) & M_REG_OUT_DRVP_B)
+
+#define S_REG_OUT_DRVP_A 0
+#define M_REG_OUT_DRVP_A 0xfU
+#define V_REG_OUT_DRVP_A(x) ((x) << S_REG_OUT_DRVP_A)
+#define G_REG_OUT_DRVP_A(x) (((x) >> S_REG_OUT_DRVP_A) & M_REG_OUT_DRVP_A)
+
+#define A_DBG_PVT_REG_OUT_DRVN 0x613c
+
+#define S_REG_OUT_DRVN_B 4
+#define M_REG_OUT_DRVN_B 0xfU
+#define V_REG_OUT_DRVN_B(x) ((x) << S_REG_OUT_DRVN_B)
+#define G_REG_OUT_DRVN_B(x) (((x) >> S_REG_OUT_DRVN_B) & M_REG_OUT_DRVN_B)
+
+#define S_REG_OUT_DRVN_A 0
+#define M_REG_OUT_DRVN_A 0xfU
+#define V_REG_OUT_DRVN_A(x) ((x) << S_REG_OUT_DRVN_A)
+#define G_REG_OUT_DRVN_A(x) (((x) >> S_REG_OUT_DRVN_A) & M_REG_OUT_DRVN_A)
+
+#define A_DBG_PVT_REG_HISTORY_TERMP 0x6140
+
+#define S_TERMP_B_HISTORY 4
+#define M_TERMP_B_HISTORY 0xfU
+#define V_TERMP_B_HISTORY(x) ((x) << S_TERMP_B_HISTORY)
+#define G_TERMP_B_HISTORY(x) (((x) >> S_TERMP_B_HISTORY) & M_TERMP_B_HISTORY)
+
+#define S_TERMP_A_HISTORY 0
+#define M_TERMP_A_HISTORY 0xfU
+#define V_TERMP_A_HISTORY(x) ((x) << S_TERMP_A_HISTORY)
+#define G_TERMP_A_HISTORY(x) (((x) >> S_TERMP_A_HISTORY) & M_TERMP_A_HISTORY)
+
+#define A_DBG_PVT_REG_HISTORY_TERMN 0x6144
+
+#define S_TERMN_B_HISTORY 4
+#define M_TERMN_B_HISTORY 0xfU
+#define V_TERMN_B_HISTORY(x) ((x) << S_TERMN_B_HISTORY)
+#define G_TERMN_B_HISTORY(x) (((x) >> S_TERMN_B_HISTORY) & M_TERMN_B_HISTORY)
+
+#define S_TERMN_A_HISTORY 0
+#define M_TERMN_A_HISTORY 0xfU
+#define V_TERMN_A_HISTORY(x) ((x) << S_TERMN_A_HISTORY)
+#define G_TERMN_A_HISTORY(x) (((x) >> S_TERMN_A_HISTORY) & M_TERMN_A_HISTORY)
+
+#define A_DBG_PVT_REG_HISTORY_DRVP 0x6148
+
+#define S_DRVP_B_HISTORY 4
+#define M_DRVP_B_HISTORY 0xfU
+#define V_DRVP_B_HISTORY(x) ((x) << S_DRVP_B_HISTORY)
+#define G_DRVP_B_HISTORY(x) (((x) >> S_DRVP_B_HISTORY) & M_DRVP_B_HISTORY)
+
+#define S_DRVP_A_HISTORY 0
+#define M_DRVP_A_HISTORY 0xfU
+#define V_DRVP_A_HISTORY(x) ((x) << S_DRVP_A_HISTORY)
+#define G_DRVP_A_HISTORY(x) (((x) >> S_DRVP_A_HISTORY) & M_DRVP_A_HISTORY)
+
+#define A_DBG_PVT_REG_HISTORY_DRVN 0x614c
+
+#define S_DRVN_B_HISTORY 4
+#define M_DRVN_B_HISTORY 0xfU
+#define V_DRVN_B_HISTORY(x) ((x) << S_DRVN_B_HISTORY)
+#define G_DRVN_B_HISTORY(x) (((x) >> S_DRVN_B_HISTORY) & M_DRVN_B_HISTORY)
+
+#define S_DRVN_A_HISTORY 0
+#define M_DRVN_A_HISTORY 0xfU
+#define V_DRVN_A_HISTORY(x) ((x) << S_DRVN_A_HISTORY)
+#define G_DRVN_A_HISTORY(x) (((x) >> S_DRVN_A_HISTORY) & M_DRVN_A_HISTORY)
+
+#define A_DBG_PVT_REG_SAMPLE_WAIT_CLKS 0x6150
+
+#define S_SAMPLE_WAIT_CLKS 0
+#define M_SAMPLE_WAIT_CLKS 0x1fU
+#define V_SAMPLE_WAIT_CLKS(x) ((x) << S_SAMPLE_WAIT_CLKS)
+#define G_SAMPLE_WAIT_CLKS(x) (((x) >> S_SAMPLE_WAIT_CLKS) & M_SAMPLE_WAIT_CLKS)
+
+/* registers for module MC */
+#define MC_BASE_ADDR 0x6200
+
+#define A_MC_PCTL_SCFG 0x6200
+
+#define S_RKINF_EN 5
+#define V_RKINF_EN(x) ((x) << S_RKINF_EN)
+#define F_RKINF_EN V_RKINF_EN(1U)
+
+#define S_DUAL_PCTL_EN 4
+#define V_DUAL_PCTL_EN(x) ((x) << S_DUAL_PCTL_EN)
+#define F_DUAL_PCTL_EN V_DUAL_PCTL_EN(1U)
+
+#define S_SLAVE_MODE 3
+#define V_SLAVE_MODE(x) ((x) << S_SLAVE_MODE)
+#define F_SLAVE_MODE V_SLAVE_MODE(1U)
+
+#define S_LOOPBACK_EN 1
+#define V_LOOPBACK_EN(x) ((x) << S_LOOPBACK_EN)
+#define F_LOOPBACK_EN V_LOOPBACK_EN(1U)
+
+#define S_HW_LOW_POWER_EN 0
+#define V_HW_LOW_POWER_EN(x) ((x) << S_HW_LOW_POWER_EN)
+#define F_HW_LOW_POWER_EN V_HW_LOW_POWER_EN(1U)
+
+#define A_MC_PCTL_SCTL 0x6204
+
+#define S_STATE_CMD 0
+#define M_STATE_CMD 0x7U
+#define V_STATE_CMD(x) ((x) << S_STATE_CMD)
+#define G_STATE_CMD(x) (((x) >> S_STATE_CMD) & M_STATE_CMD)
+
+#define A_MC_PCTL_STAT 0x6208
+
+#define S_CTL_STAT 0
+#define M_CTL_STAT 0x7U
+#define V_CTL_STAT(x) ((x) << S_CTL_STAT)
+#define G_CTL_STAT(x) (((x) >> S_CTL_STAT) & M_CTL_STAT)
+
+#define A_MC_PCTL_MCMD 0x6240
+
+#define S_START_CMD 31
+#define V_START_CMD(x) ((x) << S_START_CMD)
+#define F_START_CMD V_START_CMD(1U)
+
+#define S_CMD_ADD_DEL 24
+#define M_CMD_ADD_DEL 0xfU
+#define V_CMD_ADD_DEL(x) ((x) << S_CMD_ADD_DEL)
+#define G_CMD_ADD_DEL(x) (((x) >> S_CMD_ADD_DEL) & M_CMD_ADD_DEL)
+
+#define S_RANK_SEL 20
+#define M_RANK_SEL 0xfU
+#define V_RANK_SEL(x) ((x) << S_RANK_SEL)
+#define G_RANK_SEL(x) (((x) >> S_RANK_SEL) & M_RANK_SEL)
+
+#define S_BANK_ADDR 17
+#define M_BANK_ADDR 0x7U
+#define V_BANK_ADDR(x) ((x) << S_BANK_ADDR)
+#define G_BANK_ADDR(x) (((x) >> S_BANK_ADDR) & M_BANK_ADDR)
+
+#define S_CMD_ADDR 4
+#define M_CMD_ADDR 0x1fffU
+#define V_CMD_ADDR(x) ((x) << S_CMD_ADDR)
+#define G_CMD_ADDR(x) (((x) >> S_CMD_ADDR) & M_CMD_ADDR)
+
+#define S_CMD_OPCODE 0
+#define M_CMD_OPCODE 0x7U
+#define V_CMD_OPCODE(x) ((x) << S_CMD_OPCODE)
+#define G_CMD_OPCODE(x) (((x) >> S_CMD_OPCODE) & M_CMD_OPCODE)
+
+#define A_MC_PCTL_POWCTL 0x6244
+
+#define S_POWER_UP_START 0
+#define V_POWER_UP_START(x) ((x) << S_POWER_UP_START)
+#define F_POWER_UP_START V_POWER_UP_START(1U)
+
+#define A_MC_PCTL_POWSTAT 0x6248
+
+#define S_PHY_CALIBDONE 1
+#define V_PHY_CALIBDONE(x) ((x) << S_PHY_CALIBDONE)
+#define F_PHY_CALIBDONE V_PHY_CALIBDONE(1U)
+
+#define S_POWER_UP_DONE 0
+#define V_POWER_UP_DONE(x) ((x) << S_POWER_UP_DONE)
+#define F_POWER_UP_DONE V_POWER_UP_DONE(1U)
+
+#define A_MC_PCTL_MCFG 0x6280
+
+#define S_TFAW_CFG 18
+#define M_TFAW_CFG 0x3U
+#define V_TFAW_CFG(x) ((x) << S_TFAW_CFG)
+#define G_TFAW_CFG(x) (((x) >> S_TFAW_CFG) & M_TFAW_CFG)
+
+#define S_PD_EXIT_MODE 17
+#define V_PD_EXIT_MODE(x) ((x) << S_PD_EXIT_MODE)
+#define F_PD_EXIT_MODE V_PD_EXIT_MODE(1U)
+
+#define S_PD_TYPE 16
+#define V_PD_TYPE(x) ((x) << S_PD_TYPE)
+#define F_PD_TYPE V_PD_TYPE(1U)
+
+#define S_PD_IDLE 8
+#define M_PD_IDLE 0xffU
+#define V_PD_IDLE(x) ((x) << S_PD_IDLE)
+#define G_PD_IDLE(x) (((x) >> S_PD_IDLE) & M_PD_IDLE)
+
+#define S_PAGE_POLICY 6
+#define M_PAGE_POLICY 0x3U
+#define V_PAGE_POLICY(x) ((x) << S_PAGE_POLICY)
+#define G_PAGE_POLICY(x) (((x) >> S_PAGE_POLICY) & M_PAGE_POLICY)
+
+#define S_DDR3_EN 5
+#define V_DDR3_EN(x) ((x) << S_DDR3_EN)
+#define F_DDR3_EN V_DDR3_EN(1U)
+
+#define S_TWO_T_EN 3
+#define V_TWO_T_EN(x) ((x) << S_TWO_T_EN)
+#define F_TWO_T_EN V_TWO_T_EN(1U)
+
+#define S_BL8INT_EN 2
+#define V_BL8INT_EN(x) ((x) << S_BL8INT_EN)
+#define F_BL8INT_EN V_BL8INT_EN(1U)
+
+#define S_MEM_BL 0
+#define V_MEM_BL(x) ((x) << S_MEM_BL)
+#define F_MEM_BL V_MEM_BL(1U)
+
+#define A_MC_PCTL_PPCFG 0x6284
+
+#define S_RPMEM_DIS 1
+#define M_RPMEM_DIS 0xffU
+#define V_RPMEM_DIS(x) ((x) << S_RPMEM_DIS)
+#define G_RPMEM_DIS(x) (((x) >> S_RPMEM_DIS) & M_RPMEM_DIS)
+
+#define S_PPMEM_EN 0
+#define V_PPMEM_EN(x) ((x) << S_PPMEM_EN)
+#define F_PPMEM_EN V_PPMEM_EN(1U)
+
+#define A_MC_PCTL_MSTAT 0x6288
+
+#define S_POWER_DOWN 0
+#define V_POWER_DOWN(x) ((x) << S_POWER_DOWN)
+#define F_POWER_DOWN V_POWER_DOWN(1U)
+
+#define A_MC_PCTL_ODTCFG 0x628c
+
+#define S_RANK3_ODT_DEFAULT 28
+#define V_RANK3_ODT_DEFAULT(x) ((x) << S_RANK3_ODT_DEFAULT)
+#define F_RANK3_ODT_DEFAULT V_RANK3_ODT_DEFAULT(1U)
+
+#define S_RANK3_ODT_WRITE_SEL 27
+#define V_RANK3_ODT_WRITE_SEL(x) ((x) << S_RANK3_ODT_WRITE_SEL)
+#define F_RANK3_ODT_WRITE_SEL V_RANK3_ODT_WRITE_SEL(1U)
+
+#define S_RANK3_ODT_WRITE_NSE 26
+#define V_RANK3_ODT_WRITE_NSE(x) ((x) << S_RANK3_ODT_WRITE_NSE)
+#define F_RANK3_ODT_WRITE_NSE V_RANK3_ODT_WRITE_NSE(1U)
+
+#define S_RANK3_ODT_READ_SEL 25
+#define V_RANK3_ODT_READ_SEL(x) ((x) << S_RANK3_ODT_READ_SEL)
+#define F_RANK3_ODT_READ_SEL V_RANK3_ODT_READ_SEL(1U)
+
+#define S_RANK3_ODT_READ_NSEL 24
+#define V_RANK3_ODT_READ_NSEL(x) ((x) << S_RANK3_ODT_READ_NSEL)
+#define F_RANK3_ODT_READ_NSEL V_RANK3_ODT_READ_NSEL(1U)
+
+#define S_RANK2_ODT_DEFAULT 20
+#define V_RANK2_ODT_DEFAULT(x) ((x) << S_RANK2_ODT_DEFAULT)
+#define F_RANK2_ODT_DEFAULT V_RANK2_ODT_DEFAULT(1U)
+
+#define S_RANK2_ODT_WRITE_SEL 19
+#define V_RANK2_ODT_WRITE_SEL(x) ((x) << S_RANK2_ODT_WRITE_SEL)
+#define F_RANK2_ODT_WRITE_SEL V_RANK2_ODT_WRITE_SEL(1U)
+
+#define S_RANK2_ODT_WRITE_NSEL 18
+#define V_RANK2_ODT_WRITE_NSEL(x) ((x) << S_RANK2_ODT_WRITE_NSEL)
+#define F_RANK2_ODT_WRITE_NSEL V_RANK2_ODT_WRITE_NSEL(1U)
+
+#define S_RANK2_ODT_READ_SEL 17
+#define V_RANK2_ODT_READ_SEL(x) ((x) << S_RANK2_ODT_READ_SEL)
+#define F_RANK2_ODT_READ_SEL V_RANK2_ODT_READ_SEL(1U)
+
+#define S_RANK2_ODT_READ_NSEL 16
+#define V_RANK2_ODT_READ_NSEL(x) ((x) << S_RANK2_ODT_READ_NSEL)
+#define F_RANK2_ODT_READ_NSEL V_RANK2_ODT_READ_NSEL(1U)
+
+#define S_RANK1_ODT_DEFAULT 12
+#define V_RANK1_ODT_DEFAULT(x) ((x) << S_RANK1_ODT_DEFAULT)
+#define F_RANK1_ODT_DEFAULT V_RANK1_ODT_DEFAULT(1U)
+
+#define S_RANK1_ODT_WRITE_SEL 11
+#define V_RANK1_ODT_WRITE_SEL(x) ((x) << S_RANK1_ODT_WRITE_SEL)
+#define F_RANK1_ODT_WRITE_SEL V_RANK1_ODT_WRITE_SEL(1U)
+
+#define S_RANK1_ODT_WRITE_NSEL 10
+#define V_RANK1_ODT_WRITE_NSEL(x) ((x) << S_RANK1_ODT_WRITE_NSEL)
+#define F_RANK1_ODT_WRITE_NSEL V_RANK1_ODT_WRITE_NSEL(1U)
+
+#define S_RANK1_ODT_READ_SEL 9
+#define V_RANK1_ODT_READ_SEL(x) ((x) << S_RANK1_ODT_READ_SEL)
+#define F_RANK1_ODT_READ_SEL V_RANK1_ODT_READ_SEL(1U)
+
+#define S_RANK1_ODT_READ_NSEL 8
+#define V_RANK1_ODT_READ_NSEL(x) ((x) << S_RANK1_ODT_READ_NSEL)
+#define F_RANK1_ODT_READ_NSEL V_RANK1_ODT_READ_NSEL(1U)
+
+#define S_RANK0_ODT_DEFAULT 4
+#define V_RANK0_ODT_DEFAULT(x) ((x) << S_RANK0_ODT_DEFAULT)
+#define F_RANK0_ODT_DEFAULT V_RANK0_ODT_DEFAULT(1U)
+
+#define S_RANK0_ODT_WRITE_SEL 3
+#define V_RANK0_ODT_WRITE_SEL(x) ((x) << S_RANK0_ODT_WRITE_SEL)
+#define F_RANK0_ODT_WRITE_SEL V_RANK0_ODT_WRITE_SEL(1U)
+
+#define S_RANK0_ODT_WRITE_NSEL 2
+#define V_RANK0_ODT_WRITE_NSEL(x) ((x) << S_RANK0_ODT_WRITE_NSEL)
+#define F_RANK0_ODT_WRITE_NSEL V_RANK0_ODT_WRITE_NSEL(1U)
+
+#define S_RANK0_ODT_READ_SEL 1
+#define V_RANK0_ODT_READ_SEL(x) ((x) << S_RANK0_ODT_READ_SEL)
+#define F_RANK0_ODT_READ_SEL V_RANK0_ODT_READ_SEL(1U)
+
+#define S_RANK0_ODT_READ_NSEL 0
+#define V_RANK0_ODT_READ_NSEL(x) ((x) << S_RANK0_ODT_READ_NSEL)
+#define F_RANK0_ODT_READ_NSEL V_RANK0_ODT_READ_NSEL(1U)
+
+#define A_MC_PCTL_DQSECFG 0x6290
+
+#define S_DV_ALAT 20
+#define M_DV_ALAT 0xfU
+#define V_DV_ALAT(x) ((x) << S_DV_ALAT)
+#define G_DV_ALAT(x) (((x) >> S_DV_ALAT) & M_DV_ALAT)
+
+#define S_DV_ALEN 16
+#define M_DV_ALEN 0x3U
+#define V_DV_ALEN(x) ((x) << S_DV_ALEN)
+#define G_DV_ALEN(x) (((x) >> S_DV_ALEN) & M_DV_ALEN)
+
+#define S_DSE_ALAT 12
+#define M_DSE_ALAT 0xfU
+#define V_DSE_ALAT(x) ((x) << S_DSE_ALAT)
+#define G_DSE_ALAT(x) (((x) >> S_DSE_ALAT) & M_DSE_ALAT)
+
+#define S_DSE_ALEN 8
+#define M_DSE_ALEN 0x3U
+#define V_DSE_ALEN(x) ((x) << S_DSE_ALEN)
+#define G_DSE_ALEN(x) (((x) >> S_DSE_ALEN) & M_DSE_ALEN)
+
+#define S_QSE_ALAT 4
+#define M_QSE_ALAT 0xfU
+#define V_QSE_ALAT(x) ((x) << S_QSE_ALAT)
+#define G_QSE_ALAT(x) (((x) >> S_QSE_ALAT) & M_QSE_ALAT)
+
+#define S_QSE_ALEN 0
+#define M_QSE_ALEN 0x3U
+#define V_QSE_ALEN(x) ((x) << S_QSE_ALEN)
+#define G_QSE_ALEN(x) (((x) >> S_QSE_ALEN) & M_QSE_ALEN)
+
+#define A_MC_PCTL_DTUPDES 0x6294
+
+#define S_DTU_RD_MISSING 13
+#define V_DTU_RD_MISSING(x) ((x) << S_DTU_RD_MISSING)
+#define F_DTU_RD_MISSING V_DTU_RD_MISSING(1U)
+
+#define S_DTU_EAFFL 9
+#define M_DTU_EAFFL 0xfU
+#define V_DTU_EAFFL(x) ((x) << S_DTU_EAFFL)
+#define G_DTU_EAFFL(x) (((x) >> S_DTU_EAFFL) & M_DTU_EAFFL)
+
+#define S_DTU_RANDOM_ERROR 8
+#define V_DTU_RANDOM_ERROR(x) ((x) << S_DTU_RANDOM_ERROR)
+#define F_DTU_RANDOM_ERROR V_DTU_RANDOM_ERROR(1U)
+
+#define S_DTU_ERROR_B7 7
+#define V_DTU_ERROR_B7(x) ((x) << S_DTU_ERROR_B7)
+#define F_DTU_ERROR_B7 V_DTU_ERROR_B7(1U)
+
+#define S_DTU_ERR_B6 6
+#define V_DTU_ERR_B6(x) ((x) << S_DTU_ERR_B6)
+#define F_DTU_ERR_B6 V_DTU_ERR_B6(1U)
+
+#define S_DTU_ERR_B5 5
+#define V_DTU_ERR_B5(x) ((x) << S_DTU_ERR_B5)
+#define F_DTU_ERR_B5 V_DTU_ERR_B5(1U)
+
+#define S_DTU_ERR_B4 4
+#define V_DTU_ERR_B4(x) ((x) << S_DTU_ERR_B4)
+#define F_DTU_ERR_B4 V_DTU_ERR_B4(1U)
+
+#define S_DTU_ERR_B3 3
+#define V_DTU_ERR_B3(x) ((x) << S_DTU_ERR_B3)
+#define F_DTU_ERR_B3 V_DTU_ERR_B3(1U)
+
+#define S_DTU_ERR_B2 2
+#define V_DTU_ERR_B2(x) ((x) << S_DTU_ERR_B2)
+#define F_DTU_ERR_B2 V_DTU_ERR_B2(1U)
+
+#define S_DTU_ERR_B1 1
+#define V_DTU_ERR_B1(x) ((x) << S_DTU_ERR_B1)
+#define F_DTU_ERR_B1 V_DTU_ERR_B1(1U)
+
+#define S_DTU_ERR_B0 0
+#define V_DTU_ERR_B0(x) ((x) << S_DTU_ERR_B0)
+#define F_DTU_ERR_B0 V_DTU_ERR_B0(1U)
+
+#define A_MC_PCTL_DTUNA 0x6298
+#define A_MC_PCTL_DTUNE 0x629c
+#define A_MC_PCTL_DTUPRDO 0x62a0
+
+#define S_DTU_ALLBITS_1 16
+#define M_DTU_ALLBITS_1 0xffffU
+#define V_DTU_ALLBITS_1(x) ((x) << S_DTU_ALLBITS_1)
+#define G_DTU_ALLBITS_1(x) (((x) >> S_DTU_ALLBITS_1) & M_DTU_ALLBITS_1)
+
+#define S_DTU_ALLBITS_0 0
+#define M_DTU_ALLBITS_0 0xffffU
+#define V_DTU_ALLBITS_0(x) ((x) << S_DTU_ALLBITS_0)
+#define G_DTU_ALLBITS_0(x) (((x) >> S_DTU_ALLBITS_0) & M_DTU_ALLBITS_0)
+
+#define A_MC_PCTL_DTUPRD1 0x62a4
+
+#define S_DTU_ALLBITS_3 16
+#define M_DTU_ALLBITS_3 0xffffU
+#define V_DTU_ALLBITS_3(x) ((x) << S_DTU_ALLBITS_3)
+#define G_DTU_ALLBITS_3(x) (((x) >> S_DTU_ALLBITS_3) & M_DTU_ALLBITS_3)
+
+#define S_DTU_ALLBITS_2 0
+#define M_DTU_ALLBITS_2 0xffffU
+#define V_DTU_ALLBITS_2(x) ((x) << S_DTU_ALLBITS_2)
+#define G_DTU_ALLBITS_2(x) (((x) >> S_DTU_ALLBITS_2) & M_DTU_ALLBITS_2)
+
+#define A_MC_PCTL_DTUPRD2 0x62a8
+
+#define S_DTU_ALLBITS_5 16
+#define M_DTU_ALLBITS_5 0xffffU
+#define V_DTU_ALLBITS_5(x) ((x) << S_DTU_ALLBITS_5)
+#define G_DTU_ALLBITS_5(x) (((x) >> S_DTU_ALLBITS_5) & M_DTU_ALLBITS_5)
+
+#define S_DTU_ALLBITS_4 0
+#define M_DTU_ALLBITS_4 0xffffU
+#define V_DTU_ALLBITS_4(x) ((x) << S_DTU_ALLBITS_4)
+#define G_DTU_ALLBITS_4(x) (((x) >> S_DTU_ALLBITS_4) & M_DTU_ALLBITS_4)
+
+#define A_MC_PCTL_DTUPRD3 0x62ac
+
+#define S_DTU_ALLBITS_7 16
+#define M_DTU_ALLBITS_7 0xffffU
+#define V_DTU_ALLBITS_7(x) ((x) << S_DTU_ALLBITS_7)
+#define G_DTU_ALLBITS_7(x) (((x) >> S_DTU_ALLBITS_7) & M_DTU_ALLBITS_7)
+
+#define S_DTU_ALLBITS_6 0
+#define M_DTU_ALLBITS_6 0xffffU
+#define V_DTU_ALLBITS_6(x) ((x) << S_DTU_ALLBITS_6)
+#define G_DTU_ALLBITS_6(x) (((x) >> S_DTU_ALLBITS_6) & M_DTU_ALLBITS_6)
+
+#define A_MC_PCTL_DTUAWDT 0x62b0
+
+#define S_NUMBER_RANKS 9
+#define M_NUMBER_RANKS 0x3U
+#define V_NUMBER_RANKS(x) ((x) << S_NUMBER_RANKS)
+#define G_NUMBER_RANKS(x) (((x) >> S_NUMBER_RANKS) & M_NUMBER_RANKS)
+
+#define S_ROW_ADDR_WIDTH 6
+#define M_ROW_ADDR_WIDTH 0x3U
+#define V_ROW_ADDR_WIDTH(x) ((x) << S_ROW_ADDR_WIDTH)
+#define G_ROW_ADDR_WIDTH(x) (((x) >> S_ROW_ADDR_WIDTH) & M_ROW_ADDR_WIDTH)
+
+#define S_BANK_ADDR_WIDTH 3
+#define M_BANK_ADDR_WIDTH 0x3U
+#define V_BANK_ADDR_WIDTH(x) ((x) << S_BANK_ADDR_WIDTH)
+#define G_BANK_ADDR_WIDTH(x) (((x) >> S_BANK_ADDR_WIDTH) & M_BANK_ADDR_WIDTH)
+
+#define S_COLUMN_ADDR_WIDTH 0
+#define M_COLUMN_ADDR_WIDTH 0x3U
+#define V_COLUMN_ADDR_WIDTH(x) ((x) << S_COLUMN_ADDR_WIDTH)
+#define G_COLUMN_ADDR_WIDTH(x) (((x) >> S_COLUMN_ADDR_WIDTH) & M_COLUMN_ADDR_WIDTH)
+
+#define A_MC_PCTL_TOGCNT1U 0x62c0
+
+#define S_TOGGLE_COUNTER_1U 0
+#define M_TOGGLE_COUNTER_1U 0x3ffU
+#define V_TOGGLE_COUNTER_1U(x) ((x) << S_TOGGLE_COUNTER_1U)
+#define G_TOGGLE_COUNTER_1U(x) (((x) >> S_TOGGLE_COUNTER_1U) & M_TOGGLE_COUNTER_1U)
+
+#define A_MC_PCTL_TINIT 0x62c4
+
+#define S_T_INIT 0
+#define M_T_INIT 0x1ffU
+#define V_T_INIT(x) ((x) << S_T_INIT)
+#define G_T_INIT(x) (((x) >> S_T_INIT) & M_T_INIT)
+
+#define A_MC_PCTL_TRSTH 0x62c8
+
+#define S_T_RSTH 0
+#define M_T_RSTH 0x3ffU
+#define V_T_RSTH(x) ((x) << S_T_RSTH)
+#define G_T_RSTH(x) (((x) >> S_T_RSTH) & M_T_RSTH)
+
+#define A_MC_PCTL_TOGCNT100N 0x62cc
+
+#define S_TOGGLE_COUNTER_100N 0
+#define M_TOGGLE_COUNTER_100N 0x7fU
+#define V_TOGGLE_COUNTER_100N(x) ((x) << S_TOGGLE_COUNTER_100N)
+#define G_TOGGLE_COUNTER_100N(x) (((x) >> S_TOGGLE_COUNTER_100N) & M_TOGGLE_COUNTER_100N)
+
+#define A_MC_PCTL_TREFI 0x62d0
+
+#define S_T_REFI 0
+#define M_T_REFI 0xffU
+#define V_T_REFI(x) ((x) << S_T_REFI)
+#define G_T_REFI(x) (((x) >> S_T_REFI) & M_T_REFI)
+
+#define A_MC_PCTL_TMRD 0x62d4
+
+#define S_T_MRD 0
+#define M_T_MRD 0x7U
+#define V_T_MRD(x) ((x) << S_T_MRD)
+#define G_T_MRD(x) (((x) >> S_T_MRD) & M_T_MRD)
+
+#define A_MC_PCTL_TRFC 0x62d8
+
+#define S_T_RFC 0
+#define M_T_RFC 0xffU
+#define V_T_RFC(x) ((x) << S_T_RFC)
+#define G_T_RFC(x) (((x) >> S_T_RFC) & M_T_RFC)
+
+#define A_MC_PCTL_TRP 0x62dc
+
+#define S_T_RP 0
+#define M_T_RP 0xfU
+#define V_T_RP(x) ((x) << S_T_RP)
+#define G_T_RP(x) (((x) >> S_T_RP) & M_T_RP)
+
+#define A_MC_PCTL_TRTW 0x62e0
+
+#define S_T_RTW 0
+#define M_T_RTW 0x7U
+#define V_T_RTW(x) ((x) << S_T_RTW)
+#define G_T_RTW(x) (((x) >> S_T_RTW) & M_T_RTW)
+
+#define A_MC_PCTL_TAL 0x62e4
+
+#define S_T_AL 0
+#define M_T_AL 0xfU
+#define V_T_AL(x) ((x) << S_T_AL)
+#define G_T_AL(x) (((x) >> S_T_AL) & M_T_AL)
+
+#define A_MC_PCTL_TCL 0x62e8
+
+#define S_T_CL 0
+#define M_T_CL 0xfU
+#define V_T_CL(x) ((x) << S_T_CL)
+#define G_T_CL(x) (((x) >> S_T_CL) & M_T_CL)
+
+#define A_MC_PCTL_TCWL 0x62ec
+
+#define S_T_CWL 0
+#define M_T_CWL 0xfU
+#define V_T_CWL(x) ((x) << S_T_CWL)
+#define G_T_CWL(x) (((x) >> S_T_CWL) & M_T_CWL)
+
+#define A_MC_PCTL_TRAS 0x62f0
+
+#define S_T_RAS 0
+#define M_T_RAS 0x3fU
+#define V_T_RAS(x) ((x) << S_T_RAS)
+#define G_T_RAS(x) (((x) >> S_T_RAS) & M_T_RAS)
+
+#define A_MC_PCTL_TRC 0x62f4
+
+#define S_T_RC 0
+#define M_T_RC 0x3fU
+#define V_T_RC(x) ((x) << S_T_RC)
+#define G_T_RC(x) (((x) >> S_T_RC) & M_T_RC)
+
+#define A_MC_PCTL_TRCD 0x62f8
+
+#define S_T_RCD 0
+#define M_T_RCD 0xfU
+#define V_T_RCD(x) ((x) << S_T_RCD)
+#define G_T_RCD(x) (((x) >> S_T_RCD) & M_T_RCD)
+
+#define A_MC_PCTL_TRRD 0x62fc
+
+#define S_T_RRD 0
+#define M_T_RRD 0xfU
+#define V_T_RRD(x) ((x) << S_T_RRD)
+#define G_T_RRD(x) (((x) >> S_T_RRD) & M_T_RRD)
+
+#define A_MC_PCTL_TRTP 0x6300
+
+#define S_T_RTP 0
+#define M_T_RTP 0x7U
+#define V_T_RTP(x) ((x) << S_T_RTP)
+#define G_T_RTP(x) (((x) >> S_T_RTP) & M_T_RTP)
+
+#define A_MC_PCTL_TWR 0x6304
+
+#define S_T_WR 0
+#define M_T_WR 0x7U
+#define V_T_WR(x) ((x) << S_T_WR)
+#define G_T_WR(x) (((x) >> S_T_WR) & M_T_WR)
+
+#define A_MC_PCTL_TWTR 0x6308
+
+#define S_T_WTR 0
+#define M_T_WTR 0x7U
+#define V_T_WTR(x) ((x) << S_T_WTR)
+#define G_T_WTR(x) (((x) >> S_T_WTR) & M_T_WTR)
+
+#define A_MC_PCTL_TEXSR 0x630c
+
+#define S_T_EXSR 0
+#define M_T_EXSR 0x3ffU
+#define V_T_EXSR(x) ((x) << S_T_EXSR)
+#define G_T_EXSR(x) (((x) >> S_T_EXSR) & M_T_EXSR)
+
+#define A_MC_PCTL_TXP 0x6310
+
+#define S_T_XP 0
+#define M_T_XP 0x7U
+#define V_T_XP(x) ((x) << S_T_XP)
+#define G_T_XP(x) (((x) >> S_T_XP) & M_T_XP)
+
+#define A_MC_PCTL_TXPDLL 0x6314
+
+#define S_T_XPDLL 0
+#define M_T_XPDLL 0x3fU
+#define V_T_XPDLL(x) ((x) << S_T_XPDLL)
+#define G_T_XPDLL(x) (((x) >> S_T_XPDLL) & M_T_XPDLL)
+
+#define A_MC_PCTL_TZQCS 0x6318
+
+#define S_T_ZQCS 0
+#define M_T_ZQCS 0x7fU
+#define V_T_ZQCS(x) ((x) << S_T_ZQCS)
+#define G_T_ZQCS(x) (((x) >> S_T_ZQCS) & M_T_ZQCS)
+
+#define A_MC_PCTL_TZQCSI 0x631c
+
+#define S_T_ZQCSI 0
+#define M_T_ZQCSI 0xfffU
+#define V_T_ZQCSI(x) ((x) << S_T_ZQCSI)
+#define G_T_ZQCSI(x) (((x) >> S_T_ZQCSI) & M_T_ZQCSI)
+
+#define A_MC_PCTL_TDQS 0x6320
+
+#define S_T_DQS 0
+#define M_T_DQS 0x7U
+#define V_T_DQS(x) ((x) << S_T_DQS)
+#define G_T_DQS(x) (((x) >> S_T_DQS) & M_T_DQS)
+
+#define A_MC_PCTL_TCKSRE 0x6324
+
+#define S_T_CKSRE 0
+#define M_T_CKSRE 0xfU
+#define V_T_CKSRE(x) ((x) << S_T_CKSRE)
+#define G_T_CKSRE(x) (((x) >> S_T_CKSRE) & M_T_CKSRE)
+
+#define A_MC_PCTL_TCKSRX 0x6328
+
+#define S_T_CKSRX 0
+#define M_T_CKSRX 0xfU
+#define V_T_CKSRX(x) ((x) << S_T_CKSRX)
+#define G_T_CKSRX(x) (((x) >> S_T_CKSRX) & M_T_CKSRX)
+
+#define A_MC_PCTL_TCKE 0x632c
+
+#define S_T_CKE 0
+#define M_T_CKE 0x7U
+#define V_T_CKE(x) ((x) << S_T_CKE)
+#define G_T_CKE(x) (((x) >> S_T_CKE) & M_T_CKE)
+
+#define A_MC_PCTL_TMOD 0x6330
+
+#define S_T_MOD 0
+#define M_T_MOD 0xfU
+#define V_T_MOD(x) ((x) << S_T_MOD)
+#define G_T_MOD(x) (((x) >> S_T_MOD) & M_T_MOD)
+
+#define A_MC_PCTL_TRSTL 0x6334
+
+#define S_RSTHOLD 0
+#define M_RSTHOLD 0x7fU
+#define V_RSTHOLD(x) ((x) << S_RSTHOLD)
+#define G_RSTHOLD(x) (((x) >> S_RSTHOLD) & M_RSTHOLD)
+
+#define A_MC_PCTL_TZQCL 0x6338
+
+#define S_T_ZQCL 0
+#define M_T_ZQCL 0x3ffU
+#define V_T_ZQCL(x) ((x) << S_T_ZQCL)
+#define G_T_ZQCL(x) (((x) >> S_T_ZQCL) & M_T_ZQCL)
+
+#define A_MC_PCTL_DWLCFG0 0x6370
+
+#define S_T_ADWL_VEC 0
+#define M_T_ADWL_VEC 0x1ffU
+#define V_T_ADWL_VEC(x) ((x) << S_T_ADWL_VEC)
+#define G_T_ADWL_VEC(x) (((x) >> S_T_ADWL_VEC) & M_T_ADWL_VEC)
+
+#define A_MC_PCTL_DWLCFG1 0x6374
+#define A_MC_PCTL_DWLCFG2 0x6378
+#define A_MC_PCTL_DWLCFG3 0x637c
+#define A_MC_PCTL_ECCCFG 0x6380
+
+#define S_INLINE_SYN_EN 4
+#define V_INLINE_SYN_EN(x) ((x) << S_INLINE_SYN_EN)
+#define F_INLINE_SYN_EN V_INLINE_SYN_EN(1U)
+
+#define S_ECC_EN 3
+#define V_ECC_EN(x) ((x) << S_ECC_EN)
+#define F_ECC_EN V_ECC_EN(1U)
+
+#define S_ECC_INTR_EN 2
+#define V_ECC_INTR_EN(x) ((x) << S_ECC_INTR_EN)
+#define F_ECC_INTR_EN V_ECC_INTR_EN(1U)
+
+#define A_MC_PCTL_ECCTST 0x6384
+
+#define S_ECC_TEST_MASK 0
+#define M_ECC_TEST_MASK 0xffU
+#define V_ECC_TEST_MASK(x) ((x) << S_ECC_TEST_MASK)
+#define G_ECC_TEST_MASK(x) (((x) >> S_ECC_TEST_MASK) & M_ECC_TEST_MASK)
+
+#define A_MC_PCTL_ECCCLR 0x6388
+
+#define S_CLR_ECC_LOG 1
+#define V_CLR_ECC_LOG(x) ((x) << S_CLR_ECC_LOG)
+#define F_CLR_ECC_LOG V_CLR_ECC_LOG(1U)
+
+#define S_CLR_ECC_INTR 0
+#define V_CLR_ECC_INTR(x) ((x) << S_CLR_ECC_INTR)
+#define F_CLR_ECC_INTR V_CLR_ECC_INTR(1U)
+
+#define A_MC_PCTL_ECCLOG 0x638c
+#define A_MC_PCTL_DTUWACTL 0x6400
+
+#define S_DTU_WR_RANK 30
+#define M_DTU_WR_RANK 0x3U
+#define V_DTU_WR_RANK(x) ((x) << S_DTU_WR_RANK)
+#define G_DTU_WR_RANK(x) (((x) >> S_DTU_WR_RANK) & M_DTU_WR_RANK)
+
+#define S_DTU_WR_ROW 13
+#define M_DTU_WR_ROW 0x1ffffU
+#define V_DTU_WR_ROW(x) ((x) << S_DTU_WR_ROW)
+#define G_DTU_WR_ROW(x) (((x) >> S_DTU_WR_ROW) & M_DTU_WR_ROW)
+
+#define S_DTU_WR_BANK 10
+#define M_DTU_WR_BANK 0x7U
+#define V_DTU_WR_BANK(x) ((x) << S_DTU_WR_BANK)
+#define G_DTU_WR_BANK(x) (((x) >> S_DTU_WR_BANK) & M_DTU_WR_BANK)
+
+#define S_DTU_WR_COL 0
+#define M_DTU_WR_COL 0x3ffU
+#define V_DTU_WR_COL(x) ((x) << S_DTU_WR_COL)
+#define G_DTU_WR_COL(x) (((x) >> S_DTU_WR_COL) & M_DTU_WR_COL)
+
+#define A_MC_PCTL_DTURACTL 0x6404
+
+#define S_DTU_RD_RANK 30
+#define M_DTU_RD_RANK 0x3U
+#define V_DTU_RD_RANK(x) ((x) << S_DTU_RD_RANK)
+#define G_DTU_RD_RANK(x) (((x) >> S_DTU_RD_RANK) & M_DTU_RD_RANK)
+
+#define S_DTU_RD_ROW 13
+#define M_DTU_RD_ROW 0x1ffffU
+#define V_DTU_RD_ROW(x) ((x) << S_DTU_RD_ROW)
+#define G_DTU_RD_ROW(x) (((x) >> S_DTU_RD_ROW) & M_DTU_RD_ROW)
+
+#define S_DTU_RD_BANK 10
+#define M_DTU_RD_BANK 0x7U
+#define V_DTU_RD_BANK(x) ((x) << S_DTU_RD_BANK)
+#define G_DTU_RD_BANK(x) (((x) >> S_DTU_RD_BANK) & M_DTU_RD_BANK)
+
+#define S_DTU_RD_COL 0
+#define M_DTU_RD_COL 0x3ffU
+#define V_DTU_RD_COL(x) ((x) << S_DTU_RD_COL)
+#define G_DTU_RD_COL(x) (((x) >> S_DTU_RD_COL) & M_DTU_RD_COL)
+
+#define A_MC_PCTL_DTUCFG 0x6408
+
+#define S_DTU_ROW_INCREMENTS 16
+#define M_DTU_ROW_INCREMENTS 0x7fU
+#define V_DTU_ROW_INCREMENTS(x) ((x) << S_DTU_ROW_INCREMENTS)
+#define G_DTU_ROW_INCREMENTS(x) (((x) >> S_DTU_ROW_INCREMENTS) & M_DTU_ROW_INCREMENTS)
+
+#define S_DTU_WR_MULTI_RD 15
+#define V_DTU_WR_MULTI_RD(x) ((x) << S_DTU_WR_MULTI_RD)
+#define F_DTU_WR_MULTI_RD V_DTU_WR_MULTI_RD(1U)
+
+#define S_DTU_DATA_MASK_EN 14
+#define V_DTU_DATA_MASK_EN(x) ((x) << S_DTU_DATA_MASK_EN)
+#define F_DTU_DATA_MASK_EN V_DTU_DATA_MASK_EN(1U)
+
+#define S_DTU_TARGET_LANE 10
+#define M_DTU_TARGET_LANE 0xfU
+#define V_DTU_TARGET_LANE(x) ((x) << S_DTU_TARGET_LANE)
+#define G_DTU_TARGET_LANE(x) (((x) >> S_DTU_TARGET_LANE) & M_DTU_TARGET_LANE)
+
+#define S_DTU_GENERATE_RANDOM 9
+#define V_DTU_GENERATE_RANDOM(x) ((x) << S_DTU_GENERATE_RANDOM)
+#define F_DTU_GENERATE_RANDOM V_DTU_GENERATE_RANDOM(1U)
+
+#define S_DTU_INCR_BANKS 8
+#define V_DTU_INCR_BANKS(x) ((x) << S_DTU_INCR_BANKS)
+#define F_DTU_INCR_BANKS V_DTU_INCR_BANKS(1U)
+
+#define S_DTU_INCR_COLS 7
+#define V_DTU_INCR_COLS(x) ((x) << S_DTU_INCR_COLS)
+#define F_DTU_INCR_COLS V_DTU_INCR_COLS(1U)
+
+#define S_DTU_NALEN 1
+#define M_DTU_NALEN 0x3fU
+#define V_DTU_NALEN(x) ((x) << S_DTU_NALEN)
+#define G_DTU_NALEN(x) (((x) >> S_DTU_NALEN) & M_DTU_NALEN)
+
+#define S_DTU_ENABLE 0
+#define V_DTU_ENABLE(x) ((x) << S_DTU_ENABLE)
+#define F_DTU_ENABLE V_DTU_ENABLE(1U)
+
+#define A_MC_PCTL_DTUECTL 0x640c
+
+#define S_WR_MULTI_RD_RST 2
+#define V_WR_MULTI_RD_RST(x) ((x) << S_WR_MULTI_RD_RST)
+#define F_WR_MULTI_RD_RST V_WR_MULTI_RD_RST(1U)
+
+#define S_RUN_ERROR_REPORTS 1
+#define V_RUN_ERROR_REPORTS(x) ((x) << S_RUN_ERROR_REPORTS)
+#define F_RUN_ERROR_REPORTS V_RUN_ERROR_REPORTS(1U)
+
+#define S_RUN_DTU 0
+#define V_RUN_DTU(x) ((x) << S_RUN_DTU)
+#define F_RUN_DTU V_RUN_DTU(1U)
+
+#define A_MC_PCTL_DTUWD0 0x6410
+
+#define S_DTU_WR_BYTE3 24
+#define M_DTU_WR_BYTE3 0xffU
+#define V_DTU_WR_BYTE3(x) ((x) << S_DTU_WR_BYTE3)
+#define G_DTU_WR_BYTE3(x) (((x) >> S_DTU_WR_BYTE3) & M_DTU_WR_BYTE3)
+
+#define S_DTU_WR_BYTE2 16
+#define M_DTU_WR_BYTE2 0xffU
+#define V_DTU_WR_BYTE2(x) ((x) << S_DTU_WR_BYTE2)
+#define G_DTU_WR_BYTE2(x) (((x) >> S_DTU_WR_BYTE2) & M_DTU_WR_BYTE2)
+
+#define S_DTU_WR_BYTE1 8
+#define M_DTU_WR_BYTE1 0xffU
+#define V_DTU_WR_BYTE1(x) ((x) << S_DTU_WR_BYTE1)
+#define G_DTU_WR_BYTE1(x) (((x) >> S_DTU_WR_BYTE1) & M_DTU_WR_BYTE1)
+
+#define S_DTU_WR_BYTE0 0
+#define M_DTU_WR_BYTE0 0xffU
+#define V_DTU_WR_BYTE0(x) ((x) << S_DTU_WR_BYTE0)
+#define G_DTU_WR_BYTE0(x) (((x) >> S_DTU_WR_BYTE0) & M_DTU_WR_BYTE0)
+
+#define A_MC_PCTL_DTUWD1 0x6414
+
+#define S_DTU_WR_BYTE7 24
+#define M_DTU_WR_BYTE7 0xffU
+#define V_DTU_WR_BYTE7(x) ((x) << S_DTU_WR_BYTE7)
+#define G_DTU_WR_BYTE7(x) (((x) >> S_DTU_WR_BYTE7) & M_DTU_WR_BYTE7)
+
+#define S_DTU_WR_BYTE6 16
+#define M_DTU_WR_BYTE6 0xffU
+#define V_DTU_WR_BYTE6(x) ((x) << S_DTU_WR_BYTE6)
+#define G_DTU_WR_BYTE6(x) (((x) >> S_DTU_WR_BYTE6) & M_DTU_WR_BYTE6)
+
+#define S_DTU_WR_BYTE5 8
+#define M_DTU_WR_BYTE5 0xffU
+#define V_DTU_WR_BYTE5(x) ((x) << S_DTU_WR_BYTE5)
+#define G_DTU_WR_BYTE5(x) (((x) >> S_DTU_WR_BYTE5) & M_DTU_WR_BYTE5)
+
+#define S_DTU_WR_BYTE4 0
+#define M_DTU_WR_BYTE4 0xffU
+#define V_DTU_WR_BYTE4(x) ((x) << S_DTU_WR_BYTE4)
+#define G_DTU_WR_BYTE4(x) (((x) >> S_DTU_WR_BYTE4) & M_DTU_WR_BYTE4)
+
+#define A_MC_PCTL_DTUWD2 0x6418
+
+#define S_DTU_WR_BYTE11 24
+#define M_DTU_WR_BYTE11 0xffU
+#define V_DTU_WR_BYTE11(x) ((x) << S_DTU_WR_BYTE11)
+#define G_DTU_WR_BYTE11(x) (((x) >> S_DTU_WR_BYTE11) & M_DTU_WR_BYTE11)
+
+#define S_DTU_WR_BYTE10 16
+#define M_DTU_WR_BYTE10 0xffU
+#define V_DTU_WR_BYTE10(x) ((x) << S_DTU_WR_BYTE10)
+#define G_DTU_WR_BYTE10(x) (((x) >> S_DTU_WR_BYTE10) & M_DTU_WR_BYTE10)
+
+#define S_DTU_WR_BYTE9 8
+#define M_DTU_WR_BYTE9 0xffU
+#define V_DTU_WR_BYTE9(x) ((x) << S_DTU_WR_BYTE9)
+#define G_DTU_WR_BYTE9(x) (((x) >> S_DTU_WR_BYTE9) & M_DTU_WR_BYTE9)
+
+#define S_DTU_WR_BYTE8 0
+#define M_DTU_WR_BYTE8 0xffU
+#define V_DTU_WR_BYTE8(x) ((x) << S_DTU_WR_BYTE8)
+#define G_DTU_WR_BYTE8(x) (((x) >> S_DTU_WR_BYTE8) & M_DTU_WR_BYTE8)
+
+#define A_MC_PCTL_DTUWD3 0x641c
+
+#define S_DTU_WR_BYTE15 24
+#define M_DTU_WR_BYTE15 0xffU
+#define V_DTU_WR_BYTE15(x) ((x) << S_DTU_WR_BYTE15)
+#define G_DTU_WR_BYTE15(x) (((x) >> S_DTU_WR_BYTE15) & M_DTU_WR_BYTE15)
+
+#define S_DTU_WR_BYTE14 16
+#define M_DTU_WR_BYTE14 0xffU
+#define V_DTU_WR_BYTE14(x) ((x) << S_DTU_WR_BYTE14)
+#define G_DTU_WR_BYTE14(x) (((x) >> S_DTU_WR_BYTE14) & M_DTU_WR_BYTE14)
+
+#define S_DTU_WR_BYTE13 8
+#define M_DTU_WR_BYTE13 0xffU
+#define V_DTU_WR_BYTE13(x) ((x) << S_DTU_WR_BYTE13)
+#define G_DTU_WR_BYTE13(x) (((x) >> S_DTU_WR_BYTE13) & M_DTU_WR_BYTE13)
+
+#define S_DTU_WR_BYTE12 0
+#define M_DTU_WR_BYTE12 0xffU
+#define V_DTU_WR_BYTE12(x) ((x) << S_DTU_WR_BYTE12)
+#define G_DTU_WR_BYTE12(x) (((x) >> S_DTU_WR_BYTE12) & M_DTU_WR_BYTE12)
+
+#define A_MC_PCTL_DTUWDM 0x6420
+
+#define S_DM_WR_BYTE0 0
+#define M_DM_WR_BYTE0 0xffffU
+#define V_DM_WR_BYTE0(x) ((x) << S_DM_WR_BYTE0)
+#define G_DM_WR_BYTE0(x) (((x) >> S_DM_WR_BYTE0) & M_DM_WR_BYTE0)
+
+#define A_MC_PCTL_DTURD0 0x6424
+
+#define S_DTU_RD_BYTE3 24
+#define M_DTU_RD_BYTE3 0xffU
+#define V_DTU_RD_BYTE3(x) ((x) << S_DTU_RD_BYTE3)
+#define G_DTU_RD_BYTE3(x) (((x) >> S_DTU_RD_BYTE3) & M_DTU_RD_BYTE3)
+
+#define S_DTU_RD_BYTE2 16
+#define M_DTU_RD_BYTE2 0xffU
+#define V_DTU_RD_BYTE2(x) ((x) << S_DTU_RD_BYTE2)
+#define G_DTU_RD_BYTE2(x) (((x) >> S_DTU_RD_BYTE2) & M_DTU_RD_BYTE2)
+
+#define S_DTU_RD_BYTE1 8
+#define M_DTU_RD_BYTE1 0xffU
+#define V_DTU_RD_BYTE1(x) ((x) << S_DTU_RD_BYTE1)
+#define G_DTU_RD_BYTE1(x) (((x) >> S_DTU_RD_BYTE1) & M_DTU_RD_BYTE1)
+
+#define S_DTU_RD_BYTE0 0
+#define M_DTU_RD_BYTE0 0xffU
+#define V_DTU_RD_BYTE0(x) ((x) << S_DTU_RD_BYTE0)
+#define G_DTU_RD_BYTE0(x) (((x) >> S_DTU_RD_BYTE0) & M_DTU_RD_BYTE0)
+
+#define A_MC_PCTL_DTURD1 0x6428
+
+#define S_DTU_RD_BYTE7 24
+#define M_DTU_RD_BYTE7 0xffU
+#define V_DTU_RD_BYTE7(x) ((x) << S_DTU_RD_BYTE7)
+#define G_DTU_RD_BYTE7(x) (((x) >> S_DTU_RD_BYTE7) & M_DTU_RD_BYTE7)
+
+#define S_DTU_RD_BYTE6 16
+#define M_DTU_RD_BYTE6 0xffU
+#define V_DTU_RD_BYTE6(x) ((x) << S_DTU_RD_BYTE6)
+#define G_DTU_RD_BYTE6(x) (((x) >> S_DTU_RD_BYTE6) & M_DTU_RD_BYTE6)
+
+#define S_DTU_RD_BYTE5 8
+#define M_DTU_RD_BYTE5 0xffU
+#define V_DTU_RD_BYTE5(x) ((x) << S_DTU_RD_BYTE5)
+#define G_DTU_RD_BYTE5(x) (((x) >> S_DTU_RD_BYTE5) & M_DTU_RD_BYTE5)
+
+#define S_DTU_RD_BYTE4 0
+#define M_DTU_RD_BYTE4 0xffU
+#define V_DTU_RD_BYTE4(x) ((x) << S_DTU_RD_BYTE4)
+#define G_DTU_RD_BYTE4(x) (((x) >> S_DTU_RD_BYTE4) & M_DTU_RD_BYTE4)
+
+#define A_MC_PCTL_DTURD2 0x642c
+
+#define S_DTU_RD_BYTE11 24
+#define M_DTU_RD_BYTE11 0xffU
+#define V_DTU_RD_BYTE11(x) ((x) << S_DTU_RD_BYTE11)
+#define G_DTU_RD_BYTE11(x) (((x) >> S_DTU_RD_BYTE11) & M_DTU_RD_BYTE11)
+
+#define S_DTU_RD_BYTE10 16
+#define M_DTU_RD_BYTE10 0xffU
+#define V_DTU_RD_BYTE10(x) ((x) << S_DTU_RD_BYTE10)
+#define G_DTU_RD_BYTE10(x) (((x) >> S_DTU_RD_BYTE10) & M_DTU_RD_BYTE10)
+
+#define S_DTU_RD_BYTE9 8
+#define M_DTU_RD_BYTE9 0xffU
+#define V_DTU_RD_BYTE9(x) ((x) << S_DTU_RD_BYTE9)
+#define G_DTU_RD_BYTE9(x) (((x) >> S_DTU_RD_BYTE9) & M_DTU_RD_BYTE9)
+
+#define S_DTU_RD_BYTE8 0
+#define M_DTU_RD_BYTE8 0xffU
+#define V_DTU_RD_BYTE8(x) ((x) << S_DTU_RD_BYTE8)
+#define G_DTU_RD_BYTE8(x) (((x) >> S_DTU_RD_BYTE8) & M_DTU_RD_BYTE8)
+
+#define A_MC_PCTL_DTURD3 0x6430
+
+#define S_DTU_RD_BYTE15 24
+#define M_DTU_RD_BYTE15 0xffU
+#define V_DTU_RD_BYTE15(x) ((x) << S_DTU_RD_BYTE15)
+#define G_DTU_RD_BYTE15(x) (((x) >> S_DTU_RD_BYTE15) & M_DTU_RD_BYTE15)
+
+#define S_DTU_RD_BYTE14 16
+#define M_DTU_RD_BYTE14 0xffU
+#define V_DTU_RD_BYTE14(x) ((x) << S_DTU_RD_BYTE14)
+#define G_DTU_RD_BYTE14(x) (((x) >> S_DTU_RD_BYTE14) & M_DTU_RD_BYTE14)
+
+#define S_DTU_RD_BYTE13 8
+#define M_DTU_RD_BYTE13 0xffU
+#define V_DTU_RD_BYTE13(x) ((x) << S_DTU_RD_BYTE13)
+#define G_DTU_RD_BYTE13(x) (((x) >> S_DTU_RD_BYTE13) & M_DTU_RD_BYTE13)
+
+#define S_DTU_RD_BYTE12 0
+#define M_DTU_RD_BYTE12 0xffU
+#define V_DTU_RD_BYTE12(x) ((x) << S_DTU_RD_BYTE12)
+#define G_DTU_RD_BYTE12(x) (((x) >> S_DTU_RD_BYTE12) & M_DTU_RD_BYTE12)
+
+#define A_MC_DTULFSRWD 0x6434
+#define A_MC_PCTL_DTULFSRRD 0x6438
+#define A_MC_PCTL_DTUEAF 0x643c
+
+#define S_EA_RANK 30
+#define M_EA_RANK 0x3U
+#define V_EA_RANK(x) ((x) << S_EA_RANK)
+#define G_EA_RANK(x) (((x) >> S_EA_RANK) & M_EA_RANK)
+
+#define S_EA_ROW 13
+#define M_EA_ROW 0x1ffffU
+#define V_EA_ROW(x) ((x) << S_EA_ROW)
+#define G_EA_ROW(x) (((x) >> S_EA_ROW) & M_EA_ROW)
+
+#define S_EA_BANK 10
+#define M_EA_BANK 0x7U
+#define V_EA_BANK(x) ((x) << S_EA_BANK)
+#define G_EA_BANK(x) (((x) >> S_EA_BANK) & M_EA_BANK)
+
+#define S_EA_COLUMN 0
+#define M_EA_COLUMN 0x3ffU
+#define V_EA_COLUMN(x) ((x) << S_EA_COLUMN)
+#define G_EA_COLUMN(x) (((x) >> S_EA_COLUMN) & M_EA_COLUMN)
+
+#define A_MC_PCTL_PHYPVTCFG 0x6500
+
+#define S_PVT_UPD_REQ_EN 15
+#define V_PVT_UPD_REQ_EN(x) ((x) << S_PVT_UPD_REQ_EN)
+#define F_PVT_UPD_REQ_EN V_PVT_UPD_REQ_EN(1U)
+
+#define S_PVT_UPD_TRIG_POL 14
+#define V_PVT_UPD_TRIG_POL(x) ((x) << S_PVT_UPD_TRIG_POL)
+#define F_PVT_UPD_TRIG_POL V_PVT_UPD_TRIG_POL(1U)
+
+#define S_PVT_UPD_TRIG_TYPE 12
+#define V_PVT_UPD_TRIG_TYPE(x) ((x) << S_PVT_UPD_TRIG_TYPE)
+#define F_PVT_UPD_TRIG_TYPE V_PVT_UPD_TRIG_TYPE(1U)
+
+#define S_PVT_UPD_DONE_POL 10
+#define V_PVT_UPD_DONE_POL(x) ((x) << S_PVT_UPD_DONE_POL)
+#define F_PVT_UPD_DONE_POL V_PVT_UPD_DONE_POL(1U)
+
+#define S_PVT_UPD_DONE_TYPE 8
+#define M_PVT_UPD_DONE_TYPE 0x3U
+#define V_PVT_UPD_DONE_TYPE(x) ((x) << S_PVT_UPD_DONE_TYPE)
+#define G_PVT_UPD_DONE_TYPE(x) (((x) >> S_PVT_UPD_DONE_TYPE) & M_PVT_UPD_DONE_TYPE)
+
+#define S_PHY_UPD_REQ_EN 7
+#define V_PHY_UPD_REQ_EN(x) ((x) << S_PHY_UPD_REQ_EN)
+#define F_PHY_UPD_REQ_EN V_PHY_UPD_REQ_EN(1U)
+
+#define S_PHY_UPD_TRIG_POL 6
+#define V_PHY_UPD_TRIG_POL(x) ((x) << S_PHY_UPD_TRIG_POL)
+#define F_PHY_UPD_TRIG_POL V_PHY_UPD_TRIG_POL(1U)
+
+#define S_PHY_UPD_TRIG_TYPE 4
+#define V_PHY_UPD_TRIG_TYPE(x) ((x) << S_PHY_UPD_TRIG_TYPE)
+#define F_PHY_UPD_TRIG_TYPE V_PHY_UPD_TRIG_TYPE(1U)
+
+#define S_PHY_UPD_DONE_POL 2
+#define V_PHY_UPD_DONE_POL(x) ((x) << S_PHY_UPD_DONE_POL)
+#define F_PHY_UPD_DONE_POL V_PHY_UPD_DONE_POL(1U)
+
+#define S_PHY_UPD_DONE_TYPE 0
+#define M_PHY_UPD_DONE_TYPE 0x3U
+#define V_PHY_UPD_DONE_TYPE(x) ((x) << S_PHY_UPD_DONE_TYPE)
+#define G_PHY_UPD_DONE_TYPE(x) (((x) >> S_PHY_UPD_DONE_TYPE) & M_PHY_UPD_DONE_TYPE)
+
+#define A_MC_PCTL_PHYPVTSTAT 0x6504
+
+#define S_I_PVT_UPD_TRIG 5
+#define V_I_PVT_UPD_TRIG(x) ((x) << S_I_PVT_UPD_TRIG)
+#define F_I_PVT_UPD_TRIG V_I_PVT_UPD_TRIG(1U)
+
+#define S_I_PVT_UPD_DONE 4
+#define V_I_PVT_UPD_DONE(x) ((x) << S_I_PVT_UPD_DONE)
+#define F_I_PVT_UPD_DONE V_I_PVT_UPD_DONE(1U)
+
+#define S_I_PHY_UPD_TRIG 1
+#define V_I_PHY_UPD_TRIG(x) ((x) << S_I_PHY_UPD_TRIG)
+#define F_I_PHY_UPD_TRIG V_I_PHY_UPD_TRIG(1U)
+
+#define S_I_PHY_UPD_DONE 0
+#define V_I_PHY_UPD_DONE(x) ((x) << S_I_PHY_UPD_DONE)
+#define F_I_PHY_UPD_DONE V_I_PHY_UPD_DONE(1U)
+
+#define A_MC_PCTL_PHYTUPDON 0x6508
+
+#define S_PHY_T_UPDON 0
+#define M_PHY_T_UPDON 0xffU
+#define V_PHY_T_UPDON(x) ((x) << S_PHY_T_UPDON)
+#define G_PHY_T_UPDON(x) (((x) >> S_PHY_T_UPDON) & M_PHY_T_UPDON)
+
+#define A_MC_PCTL_PHYTUPDDLY 0x650c
+
+#define S_PHY_T_UPDDLY 0
+#define M_PHY_T_UPDDLY 0xfU
+#define V_PHY_T_UPDDLY(x) ((x) << S_PHY_T_UPDDLY)
+#define G_PHY_T_UPDDLY(x) (((x) >> S_PHY_T_UPDDLY) & M_PHY_T_UPDDLY)
+
+#define A_MC_PCTL_PVTTUPON 0x6510
+
+#define S_PVT_T_UPDON 0
+#define M_PVT_T_UPDON 0xffU
+#define V_PVT_T_UPDON(x) ((x) << S_PVT_T_UPDON)
+#define G_PVT_T_UPDON(x) (((x) >> S_PVT_T_UPDON) & M_PVT_T_UPDON)
+
+#define A_MC_PCTL_PVTTUPDDLY 0x6514
+
+#define S_PVT_T_UPDDLY 0
+#define M_PVT_T_UPDDLY 0xfU
+#define V_PVT_T_UPDDLY(x) ((x) << S_PVT_T_UPDDLY)
+#define G_PVT_T_UPDDLY(x) (((x) >> S_PVT_T_UPDDLY) & M_PVT_T_UPDDLY)
+
+#define A_MC_PCTL_PHYPVTUPDI 0x6518
+
+#define S_PHYPVT_T_UPDI 0
+#define M_PHYPVT_T_UPDI 0xffU
+#define V_PHYPVT_T_UPDI(x) ((x) << S_PHYPVT_T_UPDI)
+#define G_PHYPVT_T_UPDI(x) (((x) >> S_PHYPVT_T_UPDI) & M_PHYPVT_T_UPDI)
+
+#define A_MC_PCTL_PHYIOCRV1 0x651c
+
+#define S_BYTE_OE_CTL 16
+#define M_BYTE_OE_CTL 0x3U
+#define V_BYTE_OE_CTL(x) ((x) << S_BYTE_OE_CTL)
+#define G_BYTE_OE_CTL(x) (((x) >> S_BYTE_OE_CTL) & M_BYTE_OE_CTL)
+
+#define S_DYN_SOC_ODT_ALAT 12
+#define M_DYN_SOC_ODT_ALAT 0xfU
+#define V_DYN_SOC_ODT_ALAT(x) ((x) << S_DYN_SOC_ODT_ALAT)
+#define G_DYN_SOC_ODT_ALAT(x) (((x) >> S_DYN_SOC_ODT_ALAT) & M_DYN_SOC_ODT_ALAT)
+
+#define S_DYN_SOC_ODT_ATEN 8
+#define M_DYN_SOC_ODT_ATEN 0x3U
+#define V_DYN_SOC_ODT_ATEN(x) ((x) << S_DYN_SOC_ODT_ATEN)
+#define G_DYN_SOC_ODT_ATEN(x) (((x) >> S_DYN_SOC_ODT_ATEN) & M_DYN_SOC_ODT_ATEN)
+
+#define S_DYN_SOC_ODT 2
+#define V_DYN_SOC_ODT(x) ((x) << S_DYN_SOC_ODT)
+#define F_DYN_SOC_ODT V_DYN_SOC_ODT(1U)
+
+#define S_SOC_ODT_EN 0
+#define V_SOC_ODT_EN(x) ((x) << S_SOC_ODT_EN)
+#define F_SOC_ODT_EN V_SOC_ODT_EN(1U)
+
+#define A_MC_PCTL_PHYTUPDWAIT 0x6520
+
+#define S_PHY_T_UPDWAIT 0
+#define M_PHY_T_UPDWAIT 0x3fU
+#define V_PHY_T_UPDWAIT(x) ((x) << S_PHY_T_UPDWAIT)
+#define G_PHY_T_UPDWAIT(x) (((x) >> S_PHY_T_UPDWAIT) & M_PHY_T_UPDWAIT)
+
+#define A_MC_PCTL_PVTTUPDWAIT 0x6524
+
+#define S_PVT_T_UPDWAIT 0
+#define M_PVT_T_UPDWAIT 0x3fU
+#define V_PVT_T_UPDWAIT(x) ((x) << S_PVT_T_UPDWAIT)
+#define G_PVT_T_UPDWAIT(x) (((x) >> S_PVT_T_UPDWAIT) & M_PVT_T_UPDWAIT)
+
+#define A_MC_DDR3PHYAC_GCR 0x6a00
+
+#define S_WLRANK 8
+#define M_WLRANK 0x3U
+#define V_WLRANK(x) ((x) << S_WLRANK)
+#define G_WLRANK(x) (((x) >> S_WLRANK) & M_WLRANK)
+
+#define S_FDEPTH 6
+#define M_FDEPTH 0x3U
+#define V_FDEPTH(x) ((x) << S_FDEPTH)
+#define G_FDEPTH(x) (((x) >> S_FDEPTH) & M_FDEPTH)
+
+#define S_LPFDEPTH 4
+#define M_LPFDEPTH 0x3U
+#define V_LPFDEPTH(x) ((x) << S_LPFDEPTH)
+#define G_LPFDEPTH(x) (((x) >> S_LPFDEPTH) & M_LPFDEPTH)
+
+#define S_LPFEN 3
+#define V_LPFEN(x) ((x) << S_LPFEN)
+#define F_LPFEN V_LPFEN(1U)
+
+#define S_WL 2
+#define V_WL(x) ((x) << S_WL)
+#define F_WL V_WL(1U)
+
+#define S_CAL 1
+#define V_CAL(x) ((x) << S_CAL)
+#define F_CAL V_CAL(1U)
+
+#define S_MDLEN 0
+#define V_MDLEN(x) ((x) << S_MDLEN)
+#define F_MDLEN V_MDLEN(1U)
+
+#define A_MC_DDR3PHYAC_RCR0 0x6a04
+
+#define S_OCPONR 8
+#define V_OCPONR(x) ((x) << S_OCPONR)
+#define F_OCPONR V_OCPONR(1U)
+
+#define S_OCPOND 7
+#define V_OCPOND(x) ((x) << S_OCPOND)
+#define F_OCPOND V_OCPOND(1U)
+
+#define S_OCOEN 6
+#define V_OCOEN(x) ((x) << S_OCOEN)
+#define F_OCOEN V_OCOEN(1U)
+
+#define S_CKEPONR 5
+#define V_CKEPONR(x) ((x) << S_CKEPONR)
+#define F_CKEPONR V_CKEPONR(1U)
+
+#define S_CKEPOND 4
+#define V_CKEPOND(x) ((x) << S_CKEPOND)
+#define F_CKEPOND V_CKEPOND(1U)
+
+#define S_CKEOEN 3
+#define V_CKEOEN(x) ((x) << S_CKEOEN)
+#define F_CKEOEN V_CKEOEN(1U)
+
+#define S_CKPONR 2
+#define V_CKPONR(x) ((x) << S_CKPONR)
+#define F_CKPONR V_CKPONR(1U)
+
+#define S_CKPOND 1
+#define V_CKPOND(x) ((x) << S_CKPOND)
+#define F_CKPOND V_CKPOND(1U)
+
+#define S_CKOEN 0
+#define V_CKOEN(x) ((x) << S_CKOEN)
+#define F_CKOEN V_CKOEN(1U)
+
+#define A_MC_DDR3PHYAC_ACCR 0x6a14
+
+#define S_ACPONR 8
+#define V_ACPONR(x) ((x) << S_ACPONR)
+#define F_ACPONR V_ACPONR(1U)
+
+#define S_ACPOND 7
+#define V_ACPOND(x) ((x) << S_ACPOND)
+#define F_ACPOND V_ACPOND(1U)
+
+#define S_ACOEN 6
+#define V_ACOEN(x) ((x) << S_ACOEN)
+#define F_ACOEN V_ACOEN(1U)
+
+#define S_CK5PONR 5
+#define V_CK5PONR(x) ((x) << S_CK5PONR)
+#define F_CK5PONR V_CK5PONR(1U)
+
+#define S_CK5POND 4
+#define V_CK5POND(x) ((x) << S_CK5POND)
+#define F_CK5POND V_CK5POND(1U)
+
+#define S_CK5OEN 3
+#define V_CK5OEN(x) ((x) << S_CK5OEN)
+#define F_CK5OEN V_CK5OEN(1U)
+
+#define S_CK4PONR 2
+#define V_CK4PONR(x) ((x) << S_CK4PONR)
+#define F_CK4PONR V_CK4PONR(1U)
+
+#define S_CK4POND 1
+#define V_CK4POND(x) ((x) << S_CK4POND)
+#define F_CK4POND V_CK4POND(1U)
+
+#define S_CK4OEN 0
+#define V_CK4OEN(x) ((x) << S_CK4OEN)
+#define F_CK4OEN V_CK4OEN(1U)
+
+#define A_MC_DDR3PHYAC_GSR 0x6a18
+
+#define S_WLERR 4
+#define V_WLERR(x) ((x) << S_WLERR)
+#define F_WLERR V_WLERR(1U)
+
+#define S_INIT 3
+#define V_INIT(x) ((x) << S_INIT)
+#define F_INIT V_INIT(1U)
+
+#define S_ACCAL 0
+#define V_ACCAL(x) ((x) << S_ACCAL)
+#define F_ACCAL V_ACCAL(1U)
+
+#define A_MC_DDR3PHYAC_ECSR 0x6a1c
+
+#define S_WLDEC 1
+#define V_WLDEC(x) ((x) << S_WLDEC)
+#define F_WLDEC V_WLDEC(1U)
+
+#define S_WLINC 0
+#define V_WLINC(x) ((x) << S_WLINC)
+#define F_WLINC V_WLINC(1U)
+
+#define A_MC_DDR3PHYAC_OCSR 0x6a20
+#define A_MC_DDR3PHYAC_MDIPR 0x6a24
+
+#define S_PRD 0
+#define M_PRD 0x3ffU
+#define V_PRD(x) ((x) << S_PRD)
+#define G_PRD(x) (((x) >> S_PRD) & M_PRD)
+
+#define A_MC_DDR3PHYAC_MDTPR 0x6a28
+#define A_MC_DDR3PHYAC_MDPPR0 0x6a2c
+#define A_MC_DDR3PHYAC_MDPPR1 0x6a30
+#define A_MC_DDR3PHYAC_PMBDR0 0x6a34
+
+#define S_DFLTDLY 0
+#define M_DFLTDLY 0x7fU
+#define V_DFLTDLY(x) ((x) << S_DFLTDLY)
+#define G_DFLTDLY(x) (((x) >> S_DFLTDLY) & M_DFLTDLY)
+
+#define A_MC_DDR3PHYAC_PMBDR1 0x6a38
+#define A_MC_DDR3PHYAC_ACR 0x6a60
+
+#define S_TSEL 9
+#define V_TSEL(x) ((x) << S_TSEL)
+#define F_TSEL V_TSEL(1U)
+
+#define S_ISEL 7
+#define M_ISEL 0x3U
+#define V_ISEL(x) ((x) << S_ISEL)
+#define G_ISEL(x) (((x) >> S_ISEL) & M_ISEL)
+
+#define S_CALBYP 2
+#define V_CALBYP(x) ((x) << S_CALBYP)
+#define F_CALBYP V_CALBYP(1U)
+
+#define S_SDRSELINV 1
+#define V_SDRSELINV(x) ((x) << S_SDRSELINV)
+#define F_SDRSELINV V_SDRSELINV(1U)
+
+#define S_CKINV 0
+#define V_CKINV(x) ((x) << S_CKINV)
+#define F_CKINV V_CKINV(1U)
+
+#define A_MC_DDR3PHYAC_PSCR 0x6a64
+
+#define S_PSCALE 0
+#define M_PSCALE 0x3ffU
+#define V_PSCALE(x) ((x) << S_PSCALE)
+#define G_PSCALE(x) (((x) >> S_PSCALE) & M_PSCALE)
+
+#define A_MC_DDR3PHYAC_PRCR 0x6a68
+
+#define S_PHYINIT 9
+#define V_PHYINIT(x) ((x) << S_PHYINIT)
+#define F_PHYINIT V_PHYINIT(1U)
+
+#define S_PHYHRST 7
+#define V_PHYHRST(x) ((x) << S_PHYHRST)
+#define F_PHYHRST V_PHYHRST(1U)
+
+#define S_RSTCLKS 3
+#define M_RSTCLKS 0xfU
+#define V_RSTCLKS(x) ((x) << S_RSTCLKS)
+#define G_RSTCLKS(x) (((x) >> S_RSTCLKS) & M_RSTCLKS)
+
+#define S_PLLPD 2
+#define V_PLLPD(x) ((x) << S_PLLPD)
+#define F_PLLPD V_PLLPD(1U)
+
+#define S_PLLRST 1
+#define V_PLLRST(x) ((x) << S_PLLRST)
+#define F_PLLRST V_PLLRST(1U)
+
+#define S_PHYRST 0
+#define V_PHYRST(x) ((x) << S_PHYRST)
+#define F_PHYRST V_PHYRST(1U)
+
+#define A_MC_DDR3PHYAC_PLLCR0 0x6a6c
+
+#define S_RSTCXKS 4
+#define M_RSTCXKS 0x1fU
+#define V_RSTCXKS(x) ((x) << S_RSTCXKS)
+#define G_RSTCXKS(x) (((x) >> S_RSTCXKS) & M_RSTCXKS)
+
+#define S_ICPSEL 3
+#define V_ICPSEL(x) ((x) << S_ICPSEL)
+#define F_ICPSEL V_ICPSEL(1U)
+
+#define S_TESTA 0
+#define M_TESTA 0x7U
+#define V_TESTA(x) ((x) << S_TESTA)
+#define G_TESTA(x) (((x) >> S_TESTA) & M_TESTA)
+
+#define A_MC_DDR3PHYAC_PLLCR1 0x6a70
+
+#define S_BYPASS 9
+#define V_BYPASS(x) ((x) << S_BYPASS)
+#define F_BYPASS V_BYPASS(1U)
+
+#define S_BDIV 3
+#define M_BDIV 0x3U
+#define V_BDIV(x) ((x) << S_BDIV)
+#define G_BDIV(x) (((x) >> S_BDIV) & M_BDIV)
+
+#define S_TESTD 0
+#define M_TESTD 0x7U
+#define V_TESTD(x) ((x) << S_TESTD)
+#define G_TESTD(x) (((x) >> S_TESTD) & M_TESTD)
+
+#define A_MC_DDR3PHYAC_CLKENR 0x6a78
+
+#define S_CKCLKEN 3
+#define M_CKCLKEN 0x3fU
+#define V_CKCLKEN(x) ((x) << S_CKCLKEN)
+#define G_CKCLKEN(x) (((x) >> S_CKCLKEN) & M_CKCLKEN)
+
+#define S_HDRCLKEN 2
+#define V_HDRCLKEN(x) ((x) << S_HDRCLKEN)
+#define F_HDRCLKEN V_HDRCLKEN(1U)
+
+#define S_SDRCLKEN 1
+#define V_SDRCLKEN(x) ((x) << S_SDRCLKEN)
+#define F_SDRCLKEN V_SDRCLKEN(1U)
+
+#define S_DDRCLKEN 0
+#define V_DDRCLKEN(x) ((x) << S_DDRCLKEN)
+#define F_DDRCLKEN V_DDRCLKEN(1U)
+
+#define A_MC_DDR3PHYDATX8_GCR 0x6b00
+
+#define S_PONR 6
+#define V_PONR(x) ((x) << S_PONR)
+#define F_PONR V_PONR(1U)
+
+#define S_POND 5
+#define V_POND(x) ((x) << S_POND)
+#define F_POND V_POND(1U)
+
+#define S_RDBDVT 4
+#define V_RDBDVT(x) ((x) << S_RDBDVT)
+#define F_RDBDVT V_RDBDVT(1U)
+
+#define S_WDBDVT 3
+#define V_WDBDVT(x) ((x) << S_WDBDVT)
+#define F_WDBDVT V_WDBDVT(1U)
+
+#define S_RDSDVT 2
+#define V_RDSDVT(x) ((x) << S_RDSDVT)
+#define F_RDSDVT V_RDSDVT(1U)
+
+#define S_WDSDVT 1
+#define V_WDSDVT(x) ((x) << S_WDSDVT)
+#define F_WDSDVT V_WDSDVT(1U)
+
+#define S_WLSDVT 0
+#define V_WLSDVT(x) ((x) << S_WLSDVT)
+#define F_WLSDVT V_WLSDVT(1U)
+
+#define A_MC_DDR3PHYDATX8_WDSDR 0x6b04
+
+#define S_WDSDR_DLY 0
+#define M_WDSDR_DLY 0x3ffU
+#define V_WDSDR_DLY(x) ((x) << S_WDSDR_DLY)
+#define G_WDSDR_DLY(x) (((x) >> S_WDSDR_DLY) & M_WDSDR_DLY)
+
+#define A_MC_DDR3PHYDATX8_WLDPR 0x6b08
+#define A_MC_DDR3PHYDATX8_WLDR 0x6b0c
+
+#define S_WL_DLY 0
+#define M_WL_DLY 0x3ffU
+#define V_WL_DLY(x) ((x) << S_WL_DLY)
+#define G_WL_DLY(x) (((x) >> S_WL_DLY) & M_WL_DLY)
+
+#define A_MC_DDR3PHYDATX8_WDBDR0 0x6b1c
+
+#define S_DLY 0
+#define M_DLY 0x7fU
+#define V_DLY(x) ((x) << S_DLY)
+#define G_DLY(x) (((x) >> S_DLY) & M_DLY)
+
+#define A_MC_DDR3PHYDATX8_WDBDR1 0x6b20
+#define A_MC_DDR3PHYDATX8_WDBDR2 0x6b24
+#define A_MC_DDR3PHYDATX8_WDBDR3 0x6b28
+#define A_MC_DDR3PHYDATX8_WDBDR4 0x6b2c
+#define A_MC_DDR3PHYDATX8_WDBDR5 0x6b30
+#define A_MC_DDR3PHYDATX8_WDBDR6 0x6b34
+#define A_MC_DDR3PHYDATX8_WDBDR7 0x6b38
+#define A_MC_DDR3PHYDATX8_WDBDR8 0x6b3c
+#define A_MC_DDR3PHYDATX8_WDBDMR 0x6b40
+
+#define S_MAXDLY 0
+#define M_MAXDLY 0x7fU
+#define V_MAXDLY(x) ((x) << S_MAXDLY)
+#define G_MAXDLY(x) (((x) >> S_MAXDLY) & M_MAXDLY)
+
+#define A_MC_DDR3PHYDATX8_RDSDR 0x6b44
+
+#define S_RDSDR_DLY 0
+#define M_RDSDR_DLY 0x3ffU
+#define V_RDSDR_DLY(x) ((x) << S_RDSDR_DLY)
+#define G_RDSDR_DLY(x) (((x) >> S_RDSDR_DLY) & M_RDSDR_DLY)
+
+#define A_MC_DDR3PHYDATX8_RDBDR0 0x6b48
+#define A_MC_DDR3PHYDATX8_RDBDR1 0x6b4c
+#define A_MC_DDR3PHYDATX8_RDBDR2 0x6b50
+#define A_MC_DDR3PHYDATX8_RDBDR3 0x6b54
+#define A_MC_DDR3PHYDATX8_RDBDR4 0x6b58
+#define A_MC_DDR3PHYDATX8_RDBDR5 0x6b5c
+#define A_MC_DDR3PHYDATX8_RDBDR6 0x6b60
+#define A_MC_DDR3PHYDATX8_RDBDR7 0x6b64
+#define A_MC_DDR3PHYDATX8_RDBDMR 0x6b68
+#define A_MC_DDR3PHYDATX8_PMBDR0 0x6b6c
+#define A_MC_DDR3PHYDATX8_PMBDR1 0x6b70
+#define A_MC_DDR3PHYDATX8_PMBDR2 0x6b74
+#define A_MC_DDR3PHYDATX8_PMBDR3 0x6b78
+#define A_MC_DDR3PHYDATX8_WDBDPR 0x6b7c
+
+#define S_DP_DLY 0
+#define M_DP_DLY 0x1ffU
+#define V_DP_DLY(x) ((x) << S_DP_DLY)
+#define G_DP_DLY(x) (((x) >> S_DP_DLY) & M_DP_DLY)
+
+#define A_MC_DDR3PHYDATX8_RDBDPR 0x6b80
+#define A_MC_DDR3PHYDATX8_GSR 0x6b84
+
+#define S_WLDONE 3
+#define V_WLDONE(x) ((x) << S_WLDONE)
+#define F_WLDONE V_WLDONE(1U)
+
+#define S_WLCAL 2
+#define V_WLCAL(x) ((x) << S_WLCAL)
+#define F_WLCAL V_WLCAL(1U)
+
+#define S_READ 1
+#define V_READ(x) ((x) << S_READ)
+#define F_READ V_READ(1U)
+
+#define S_RDQSCAL 0
+#define V_RDQSCAL(x) ((x) << S_RDQSCAL)
+#define F_RDQSCAL V_RDQSCAL(1U)
+
+#define A_MC_DDR3PHYDATX8_ACR 0x6bf0
+
+#define S_PHYHSRST 9
+#define V_PHYHSRST(x) ((x) << S_PHYHSRST)
+#define F_PHYHSRST V_PHYHSRST(1U)
+
+#define S_WLSTEP 8
+#define V_WLSTEP(x) ((x) << S_WLSTEP)
+#define F_WLSTEP V_WLSTEP(1U)
+
+#define S_SDR_SEL_INV 2
+#define V_SDR_SEL_INV(x) ((x) << S_SDR_SEL_INV)
+#define F_SDR_SEL_INV V_SDR_SEL_INV(1U)
+
+#define S_DDRSELINV 1
+#define V_DDRSELINV(x) ((x) << S_DDRSELINV)
+#define F_DDRSELINV V_DDRSELINV(1U)
+
+#define S_DSINV 0
+#define V_DSINV(x) ((x) << S_DSINV)
+#define F_DSINV V_DSINV(1U)
+
+#define A_MC_DDR3PHYDATX8_RSR 0x6bf4
+
+#define S_WLRANKSEL 9
+#define V_WLRANKSEL(x) ((x) << S_WLRANKSEL)
+#define F_WLRANKSEL V_WLRANKSEL(1U)
+
+#define S_RANK 0
+#define M_RANK 0x3U
+#define V_RANK(x) ((x) << S_RANK)
+#define G_RANK(x) (((x) >> S_RANK) & M_RANK)
+
+#define A_MC_DDR3PHYDATX8_CLKENR 0x6bf8
+
+#define S_DTOSEL 8
+#define M_DTOSEL 0x3U
+#define V_DTOSEL(x) ((x) << S_DTOSEL)
+#define G_DTOSEL(x) (((x) >> S_DTOSEL) & M_DTOSEL)
+
+#define A_MC_PVT_REG_CALIBRATE_CTL 0x7400
+#define A_MC_PVT_REG_UPDATE_CTL 0x7404
+#define A_MC_PVT_REG_LAST_MEASUREMENT 0x7408
+#define A_MC_PVT_REG_DRVN 0x740c
+#define A_MC_PVT_REG_DRVP 0x7410
+#define A_MC_PVT_REG_TERMN 0x7414
+#define A_MC_PVT_REG_TERMP 0x7418
+#define A_MC_PVT_REG_THRESHOLD 0x741c
+#define A_MC_PVT_REG_IN_TERMP 0x7420
+#define A_MC_PVT_REG_IN_TERMN 0x7424
+#define A_MC_PVT_REG_IN_DRVP 0x7428
+#define A_MC_PVT_REG_IN_DRVN 0x742c
+#define A_MC_PVT_REG_OUT_TERMP 0x7430
+#define A_MC_PVT_REG_OUT_TERMN 0x7434
+#define A_MC_PVT_REG_OUT_DRVP 0x7438
+#define A_MC_PVT_REG_OUT_DRVN 0x743c
+#define A_MC_PVT_REG_HISTORY_TERMP 0x7440
+#define A_MC_PVT_REG_HISTORY_TERMN 0x7444
+#define A_MC_PVT_REG_HISTORY_DRVP 0x7448
+#define A_MC_PVT_REG_HISTORY_DRVN 0x744c
+#define A_MC_PVT_REG_SAMPLE_WAIT_CLKS 0x7450
+#define A_MC_DDRPHY_RST_CTRL 0x7500
+
+#define S_DDRIO_ENABLE 1
+#define V_DDRIO_ENABLE(x) ((x) << S_DDRIO_ENABLE)
+#define F_DDRIO_ENABLE V_DDRIO_ENABLE(1U)
+
+#define S_PHY_RST_N 0
+#define V_PHY_RST_N(x) ((x) << S_PHY_RST_N)
+#define F_PHY_RST_N V_PHY_RST_N(1U)
+
+#define A_MC_PERFORMANCE_CTRL 0x7504
+
+#define S_STALL_CHK_BIT 2
+#define V_STALL_CHK_BIT(x) ((x) << S_STALL_CHK_BIT)
+#define F_STALL_CHK_BIT V_STALL_CHK_BIT(1U)
+
+#define S_DDR3_BRC_MODE 1
+#define V_DDR3_BRC_MODE(x) ((x) << S_DDR3_BRC_MODE)
+#define F_DDR3_BRC_MODE V_DDR3_BRC_MODE(1U)
+
+#define S_RMW_PERF_CTRL 0
+#define V_RMW_PERF_CTRL(x) ((x) << S_RMW_PERF_CTRL)
+#define F_RMW_PERF_CTRL V_RMW_PERF_CTRL(1U)
+
+#define A_MC_ECC_CTRL 0x7508
+
+#define S_ECC_BYPASS_BIST 1
+#define V_ECC_BYPASS_BIST(x) ((x) << S_ECC_BYPASS_BIST)
+#define F_ECC_BYPASS_BIST V_ECC_BYPASS_BIST(1U)
+
+#define S_ECC_DISABLE 0
+#define V_ECC_DISABLE(x) ((x) << S_ECC_DISABLE)
+#define F_ECC_DISABLE V_ECC_DISABLE(1U)
+
+#define A_MC_PAR_ENABLE 0x750c
+
+#define S_ECC_UE_PAR_ENABLE 3
+#define V_ECC_UE_PAR_ENABLE(x) ((x) << S_ECC_UE_PAR_ENABLE)
+#define F_ECC_UE_PAR_ENABLE V_ECC_UE_PAR_ENABLE(1U)
+
+#define S_ECC_CE_PAR_ENABLE 2
+#define V_ECC_CE_PAR_ENABLE(x) ((x) << S_ECC_CE_PAR_ENABLE)
+#define F_ECC_CE_PAR_ENABLE V_ECC_CE_PAR_ENABLE(1U)
+
+#define S_PERR_REG_INT_ENABLE 1
+#define V_PERR_REG_INT_ENABLE(x) ((x) << S_PERR_REG_INT_ENABLE)
+#define F_PERR_REG_INT_ENABLE V_PERR_REG_INT_ENABLE(1U)
+
+#define S_PERR_BLK_INT_ENABLE 0
+#define V_PERR_BLK_INT_ENABLE(x) ((x) << S_PERR_BLK_INT_ENABLE)
+#define F_PERR_BLK_INT_ENABLE V_PERR_BLK_INT_ENABLE(1U)
+
+#define A_MC_PAR_CAUSE 0x7510
+
+#define S_ECC_UE_PAR_CAUSE 3
+#define V_ECC_UE_PAR_CAUSE(x) ((x) << S_ECC_UE_PAR_CAUSE)
+#define F_ECC_UE_PAR_CAUSE V_ECC_UE_PAR_CAUSE(1U)
+
+#define S_ECC_CE_PAR_CAUSE 2
+#define V_ECC_CE_PAR_CAUSE(x) ((x) << S_ECC_CE_PAR_CAUSE)
+#define F_ECC_CE_PAR_CAUSE V_ECC_CE_PAR_CAUSE(1U)
+
+#define S_FIFOR_PAR_CAUSE 1
+#define V_FIFOR_PAR_CAUSE(x) ((x) << S_FIFOR_PAR_CAUSE)
+#define F_FIFOR_PAR_CAUSE V_FIFOR_PAR_CAUSE(1U)
+
+#define S_RDATA_FIFOR_PAR_CAUSE 0
+#define V_RDATA_FIFOR_PAR_CAUSE(x) ((x) << S_RDATA_FIFOR_PAR_CAUSE)
+#define F_RDATA_FIFOR_PAR_CAUSE V_RDATA_FIFOR_PAR_CAUSE(1U)
+
+#define A_MC_INT_ENABLE 0x7514
+
+#define S_ECC_UE_INT_ENABLE 2
+#define V_ECC_UE_INT_ENABLE(x) ((x) << S_ECC_UE_INT_ENABLE)
+#define F_ECC_UE_INT_ENABLE V_ECC_UE_INT_ENABLE(1U)
+
+#define S_ECC_CE_INT_ENABLE 1
+#define V_ECC_CE_INT_ENABLE(x) ((x) << S_ECC_CE_INT_ENABLE)
+#define F_ECC_CE_INT_ENABLE V_ECC_CE_INT_ENABLE(1U)
+
+#define S_PERR_INT_ENABLE 0
+#define V_PERR_INT_ENABLE(x) ((x) << S_PERR_INT_ENABLE)
+#define F_PERR_INT_ENABLE V_PERR_INT_ENABLE(1U)
+
+#define A_MC_INT_CAUSE 0x7518
+
+#define S_ECC_UE_INT_CAUSE 2
+#define V_ECC_UE_INT_CAUSE(x) ((x) << S_ECC_UE_INT_CAUSE)
+#define F_ECC_UE_INT_CAUSE V_ECC_UE_INT_CAUSE(1U)
+
+#define S_ECC_CE_INT_CAUSE 1
+#define V_ECC_CE_INT_CAUSE(x) ((x) << S_ECC_CE_INT_CAUSE)
+#define F_ECC_CE_INT_CAUSE V_ECC_CE_INT_CAUSE(1U)
+
+#define S_PERR_INT_CAUSE 0
+#define V_PERR_INT_CAUSE(x) ((x) << S_PERR_INT_CAUSE)
+#define F_PERR_INT_CAUSE V_PERR_INT_CAUSE(1U)
+
+#define A_MC_ECC_STATUS 0x751c
+
+#define S_ECC_CECNT 16
+#define M_ECC_CECNT 0xffffU
+#define V_ECC_CECNT(x) ((x) << S_ECC_CECNT)
+#define G_ECC_CECNT(x) (((x) >> S_ECC_CECNT) & M_ECC_CECNT)
+
+#define S_ECC_UECNT 0
+#define M_ECC_UECNT 0xffffU
+#define V_ECC_UECNT(x) ((x) << S_ECC_UECNT)
+#define G_ECC_UECNT(x) (((x) >> S_ECC_UECNT) & M_ECC_UECNT)
+
+#define A_MC_PHY_CTRL 0x7520
+
+#define S_CTLPHYRR 0
+#define V_CTLPHYRR(x) ((x) << S_CTLPHYRR)
+#define F_CTLPHYRR V_CTLPHYRR(1U)
+
+#define A_MC_STATIC_CFG_STATUS 0x7524
+
+#define S_STATIC_MODE 9
+#define V_STATIC_MODE(x) ((x) << S_STATIC_MODE)
+#define F_STATIC_MODE V_STATIC_MODE(1U)
+
+#define S_STATIC_DEN 6
+#define M_STATIC_DEN 0x7U
+#define V_STATIC_DEN(x) ((x) << S_STATIC_DEN)
+#define G_STATIC_DEN(x) (((x) >> S_STATIC_DEN) & M_STATIC_DEN)
+
+#define S_STATIC_ORG 5
+#define V_STATIC_ORG(x) ((x) << S_STATIC_ORG)
+#define F_STATIC_ORG V_STATIC_ORG(1U)
+
+#define S_STATIC_RKS 4
+#define V_STATIC_RKS(x) ((x) << S_STATIC_RKS)
+#define F_STATIC_RKS V_STATIC_RKS(1U)
+
+#define S_STATIC_WIDTH 1
+#define M_STATIC_WIDTH 0x7U
+#define V_STATIC_WIDTH(x) ((x) << S_STATIC_WIDTH)
+#define G_STATIC_WIDTH(x) (((x) >> S_STATIC_WIDTH) & M_STATIC_WIDTH)
+
+#define S_STATIC_SLOW 0
+#define V_STATIC_SLOW(x) ((x) << S_STATIC_SLOW)
+#define F_STATIC_SLOW V_STATIC_SLOW(1U)
+
+#define A_MC_CORE_PCTL_STAT 0x7528
+
+#define S_PCTL_ACCESS_STAT 0
+#define M_PCTL_ACCESS_STAT 0x7U
+#define V_PCTL_ACCESS_STAT(x) ((x) << S_PCTL_ACCESS_STAT)
+#define G_PCTL_ACCESS_STAT(x) (((x) >> S_PCTL_ACCESS_STAT) & M_PCTL_ACCESS_STAT)
+
+#define A_MC_DEBUG_CNT 0x752c
+
+#define S_WDATA_OCNT 8
+#define M_WDATA_OCNT 0x1fU
+#define V_WDATA_OCNT(x) ((x) << S_WDATA_OCNT)
+#define G_WDATA_OCNT(x) (((x) >> S_WDATA_OCNT) & M_WDATA_OCNT)
+
+#define S_RDATA_OCNT 0
+#define M_RDATA_OCNT 0x1fU
+#define V_RDATA_OCNT(x) ((x) << S_RDATA_OCNT)
+#define G_RDATA_OCNT(x) (((x) >> S_RDATA_OCNT) & M_RDATA_OCNT)
+
+#define A_MC_BONUS 0x7530
+#define A_MC_BIST_CMD 0x7600
+
+#define S_START_BIST 31
+#define V_START_BIST(x) ((x) << S_START_BIST)
+#define F_START_BIST V_START_BIST(1U)
+
+#define S_BIST_CMD_GAP 8
+#define M_BIST_CMD_GAP 0xffU
+#define V_BIST_CMD_GAP(x) ((x) << S_BIST_CMD_GAP)
+#define G_BIST_CMD_GAP(x) (((x) >> S_BIST_CMD_GAP) & M_BIST_CMD_GAP)
+
+#define S_BIST_OPCODE 0
+#define M_BIST_OPCODE 0x3U
+#define V_BIST_OPCODE(x) ((x) << S_BIST_OPCODE)
+#define G_BIST_OPCODE(x) (((x) >> S_BIST_OPCODE) & M_BIST_OPCODE)
+
+#define A_MC_BIST_CMD_ADDR 0x7604
+#define A_MC_BIST_CMD_LEN 0x7608
+#define A_MC_BIST_DATA_PATTERN 0x760c
+
+#define S_BIST_DATA_TYPE 0
+#define M_BIST_DATA_TYPE 0xfU
+#define V_BIST_DATA_TYPE(x) ((x) << S_BIST_DATA_TYPE)
+#define G_BIST_DATA_TYPE(x) (((x) >> S_BIST_DATA_TYPE) & M_BIST_DATA_TYPE)
+
+#define A_MC_BIST_USER_WDATA0 0x7614
+#define A_MC_BIST_USER_WDATA1 0x7618
+#define A_MC_BIST_USER_WDATA2 0x761c
+
+#define S_USER_DATA2 0
+#define M_USER_DATA2 0xffU
+#define V_USER_DATA2(x) ((x) << S_USER_DATA2)
+#define G_USER_DATA2(x) (((x) >> S_USER_DATA2) & M_USER_DATA2)
+
+#define A_MC_BIST_NUM_ERR 0x7680
+#define A_MC_BIST_ERR_FIRST_ADDR 0x7684
+#define A_MC_BIST_STATUS_RDATA 0x7688
+
+/* registers for module MA */
+#define MA_BASE_ADDR 0x7700
+
+#define A_MA_CLIENT0_RD_LATENCY_THRESHOLD 0x7700
+
+#define S_THRESHOLD1 17
+#define M_THRESHOLD1 0x7fffU
+#define V_THRESHOLD1(x) ((x) << S_THRESHOLD1)
+#define G_THRESHOLD1(x) (((x) >> S_THRESHOLD1) & M_THRESHOLD1)
+
+#define S_THRESHOLD1_EN 16
+#define V_THRESHOLD1_EN(x) ((x) << S_THRESHOLD1_EN)
+#define F_THRESHOLD1_EN V_THRESHOLD1_EN(1U)
+
+#define S_THRESHOLD0 1
+#define M_THRESHOLD0 0x7fffU
+#define V_THRESHOLD0(x) ((x) << S_THRESHOLD0)
+#define G_THRESHOLD0(x) (((x) >> S_THRESHOLD0) & M_THRESHOLD0)
+
+#define S_THRESHOLD0_EN 0
+#define V_THRESHOLD0_EN(x) ((x) << S_THRESHOLD0_EN)
+#define F_THRESHOLD0_EN V_THRESHOLD0_EN(1U)
+
+#define A_MA_CLIENT0_WR_LATENCY_THRESHOLD 0x7704
+#define A_MA_CLIENT1_RD_LATENCY_THRESHOLD 0x7708
+#define A_MA_CLIENT1_WR_LATENCY_THRESHOLD 0x770c
+#define A_MA_CLIENT2_RD_LATENCY_THRESHOLD 0x7710
+#define A_MA_CLIENT2_WR_LATENCY_THRESHOLD 0x7714
+#define A_MA_CLIENT3_RD_LATENCY_THRESHOLD 0x7718
+#define A_MA_CLIENT3_WR_LATENCY_THRESHOLD 0x771c
+#define A_MA_CLIENT4_RD_LATENCY_THRESHOLD 0x7720
+#define A_MA_CLIENT4_WR_LATENCY_THRESHOLD 0x7724
+#define A_MA_CLIENT5_RD_LATENCY_THRESHOLD 0x7728
+#define A_MA_CLIENT5_WR_LATENCY_THRESHOLD 0x772c
+#define A_MA_CLIENT6_RD_LATENCY_THRESHOLD 0x7730
+#define A_MA_CLIENT6_WR_LATENCY_THRESHOLD 0x7734
+#define A_MA_CLIENT7_RD_LATENCY_THRESHOLD 0x7738
+#define A_MA_CLIENT7_WR_LATENCY_THRESHOLD 0x773c
+#define A_MA_CLIENT8_RD_LATENCY_THRESHOLD 0x7740
+#define A_MA_CLIENT8_WR_LATENCY_THRESHOLD 0x7744
+#define A_MA_CLIENT9_RD_LATENCY_THRESHOLD 0x7748
+#define A_MA_CLIENT9_WR_LATENCY_THRESHOLD 0x774c
+#define A_MA_CLIENT10_RD_LATENCY_THRESHOLD 0x7750
+#define A_MA_CLIENT10_WR_LATENCY_THRESHOLD 0x7754
+#define A_MA_CLIENT11_RD_LATENCY_THRESHOLD 0x7758
+#define A_MA_CLIENT11_WR_LATENCY_THRESHOLD 0x775c
+#define A_MA_CLIENT12_RD_LATENCY_THRESHOLD 0x7760
+#define A_MA_CLIENT12_WR_LATENCY_THRESHOLD 0x7764
+#define A_MA_SGE_TH0_DEBUG_CNT 0x7768
+
+#define S_DBG_READ_DATA_CNT 24
+#define M_DBG_READ_DATA_CNT 0xffU
+#define V_DBG_READ_DATA_CNT(x) ((x) << S_DBG_READ_DATA_CNT)
+#define G_DBG_READ_DATA_CNT(x) (((x) >> S_DBG_READ_DATA_CNT) & M_DBG_READ_DATA_CNT)
+
+#define S_DBG_READ_REQ_CNT 16
+#define M_DBG_READ_REQ_CNT 0xffU
+#define V_DBG_READ_REQ_CNT(x) ((x) << S_DBG_READ_REQ_CNT)
+#define G_DBG_READ_REQ_CNT(x) (((x) >> S_DBG_READ_REQ_CNT) & M_DBG_READ_REQ_CNT)
+
+#define S_DBG_WRITE_DATA_CNT 8
+#define M_DBG_WRITE_DATA_CNT 0xffU
+#define V_DBG_WRITE_DATA_CNT(x) ((x) << S_DBG_WRITE_DATA_CNT)
+#define G_DBG_WRITE_DATA_CNT(x) (((x) >> S_DBG_WRITE_DATA_CNT) & M_DBG_WRITE_DATA_CNT)
+
+#define S_DBG_WRITE_REQ_CNT 0
+#define M_DBG_WRITE_REQ_CNT 0xffU
+#define V_DBG_WRITE_REQ_CNT(x) ((x) << S_DBG_WRITE_REQ_CNT)
+#define G_DBG_WRITE_REQ_CNT(x) (((x) >> S_DBG_WRITE_REQ_CNT) & M_DBG_WRITE_REQ_CNT)
+
+#define A_MA_SGE_TH1_DEBUG_CNT 0x776c
+#define A_MA_ULPTX_DEBUG_CNT 0x7770
+#define A_MA_ULPRX_DEBUG_CNT 0x7774
+#define A_MA_ULPTXRX_DEBUG_CNT 0x7778
+#define A_MA_TP_TH0_DEBUG_CNT 0x777c
+#define A_MA_TP_TH1_DEBUG_CNT 0x7780
+#define A_MA_LE_DEBUG_CNT 0x7784
+#define A_MA_CIM_DEBUG_CNT 0x7788
+#define A_MA_PCIE_DEBUG_CNT 0x778c
+#define A_MA_PMTX_DEBUG_CNT 0x7790
+#define A_MA_PMRX_DEBUG_CNT 0x7794
+#define A_MA_HMA_DEBUG_CNT 0x7798
+#define A_MA_EDRAM0_BAR 0x77c0
+
+#define S_EDRAM0_BASE 16
+#define M_EDRAM0_BASE 0xfffU
+#define V_EDRAM0_BASE(x) ((x) << S_EDRAM0_BASE)
+#define G_EDRAM0_BASE(x) (((x) >> S_EDRAM0_BASE) & M_EDRAM0_BASE)
+
+#define S_EDRAM0_SIZE 0
+#define M_EDRAM0_SIZE 0xfffU
+#define V_EDRAM0_SIZE(x) ((x) << S_EDRAM0_SIZE)
+#define G_EDRAM0_SIZE(x) (((x) >> S_EDRAM0_SIZE) & M_EDRAM0_SIZE)
+
+#define A_MA_EDRAM1_BAR 0x77c4
+
+#define S_EDRAM1_BASE 16
+#define M_EDRAM1_BASE 0xfffU
+#define V_EDRAM1_BASE(x) ((x) << S_EDRAM1_BASE)
+#define G_EDRAM1_BASE(x) (((x) >> S_EDRAM1_BASE) & M_EDRAM1_BASE)
+
+#define S_EDRAM1_SIZE 0
+#define M_EDRAM1_SIZE 0xfffU
+#define V_EDRAM1_SIZE(x) ((x) << S_EDRAM1_SIZE)
+#define G_EDRAM1_SIZE(x) (((x) >> S_EDRAM1_SIZE) & M_EDRAM1_SIZE)
+
+#define A_MA_EXT_MEMORY_BAR 0x77c8
+
+#define S_EXT_MEM_BASE 16
+#define M_EXT_MEM_BASE 0xfffU
+#define V_EXT_MEM_BASE(x) ((x) << S_EXT_MEM_BASE)
+#define G_EXT_MEM_BASE(x) (((x) >> S_EXT_MEM_BASE) & M_EXT_MEM_BASE)
+
+#define S_EXT_MEM_SIZE 0
+#define M_EXT_MEM_SIZE 0xfffU
+#define V_EXT_MEM_SIZE(x) ((x) << S_EXT_MEM_SIZE)
+#define G_EXT_MEM_SIZE(x) (((x) >> S_EXT_MEM_SIZE) & M_EXT_MEM_SIZE)
+
+#define A_MA_HOST_MEMORY_BAR 0x77cc
+
+#define S_HMA_BASE 16
+#define M_HMA_BASE 0xfffU
+#define V_HMA_BASE(x) ((x) << S_HMA_BASE)
+#define G_HMA_BASE(x) (((x) >> S_HMA_BASE) & M_HMA_BASE)
+
+#define S_HMA_SIZE 0
+#define M_HMA_SIZE 0xfffU
+#define V_HMA_SIZE(x) ((x) << S_HMA_SIZE)
+#define G_HMA_SIZE(x) (((x) >> S_HMA_SIZE) & M_HMA_SIZE)
+
+#define A_MA_EXT_MEM_PAGE_SIZE 0x77d0
+
+#define S_BRC_MODE 2
+#define V_BRC_MODE(x) ((x) << S_BRC_MODE)
+#define F_BRC_MODE V_BRC_MODE(1U)
+
+#define S_EXT_MEM_PAGE_SIZE 0
+#define M_EXT_MEM_PAGE_SIZE 0x3U
+#define V_EXT_MEM_PAGE_SIZE(x) ((x) << S_EXT_MEM_PAGE_SIZE)
+#define G_EXT_MEM_PAGE_SIZE(x) (((x) >> S_EXT_MEM_PAGE_SIZE) & M_EXT_MEM_PAGE_SIZE)
+
+#define A_MA_ARB_CTRL 0x77d4
+
+#define S_DIS_PAGE_HINT 1
+#define V_DIS_PAGE_HINT(x) ((x) << S_DIS_PAGE_HINT)
+#define F_DIS_PAGE_HINT V_DIS_PAGE_HINT(1U)
+
+#define S_DIS_ADV_ARB 0
+#define V_DIS_ADV_ARB(x) ((x) << S_DIS_ADV_ARB)
+#define F_DIS_ADV_ARB V_DIS_ADV_ARB(1U)
+
+#define A_MA_TARGET_MEM_ENABLE 0x77d8
+
+#define S_HMA_ENABLE 3
+#define V_HMA_ENABLE(x) ((x) << S_HMA_ENABLE)
+#define F_HMA_ENABLE V_HMA_ENABLE(1U)
+
+#define S_EXT_MEM_ENABLE 2
+#define V_EXT_MEM_ENABLE(x) ((x) << S_EXT_MEM_ENABLE)
+#define F_EXT_MEM_ENABLE V_EXT_MEM_ENABLE(1U)
+
+#define S_EDRAM1_ENABLE 1
+#define V_EDRAM1_ENABLE(x) ((x) << S_EDRAM1_ENABLE)
+#define F_EDRAM1_ENABLE V_EDRAM1_ENABLE(1U)
+
+#define S_EDRAM0_ENABLE 0
+#define V_EDRAM0_ENABLE(x) ((x) << S_EDRAM0_ENABLE)
+#define F_EDRAM0_ENABLE V_EDRAM0_ENABLE(1U)
+
+#define A_MA_INT_ENABLE 0x77dc
+
+#define S_MEM_PERR_INT_ENABLE 1
+#define V_MEM_PERR_INT_ENABLE(x) ((x) << S_MEM_PERR_INT_ENABLE)
+#define F_MEM_PERR_INT_ENABLE V_MEM_PERR_INT_ENABLE(1U)
+
+#define S_MEM_WRAP_INT_ENABLE 0
+#define V_MEM_WRAP_INT_ENABLE(x) ((x) << S_MEM_WRAP_INT_ENABLE)
+#define F_MEM_WRAP_INT_ENABLE V_MEM_WRAP_INT_ENABLE(1U)
+
+#define A_MA_INT_CAUSE 0x77e0
+
+#define S_MEM_PERR_INT_CAUSE 1
+#define V_MEM_PERR_INT_CAUSE(x) ((x) << S_MEM_PERR_INT_CAUSE)
+#define F_MEM_PERR_INT_CAUSE V_MEM_PERR_INT_CAUSE(1U)
+
+#define S_MEM_WRAP_INT_CAUSE 0
+#define V_MEM_WRAP_INT_CAUSE(x) ((x) << S_MEM_WRAP_INT_CAUSE)
+#define F_MEM_WRAP_INT_CAUSE V_MEM_WRAP_INT_CAUSE(1U)
+
+#define A_MA_INT_WRAP_STATUS 0x77e4
+
+#define S_MEM_WRAP_ADDRESS 4
+#define M_MEM_WRAP_ADDRESS 0xfffffffU
+#define V_MEM_WRAP_ADDRESS(x) ((x) << S_MEM_WRAP_ADDRESS)
+#define G_MEM_WRAP_ADDRESS(x) (((x) >> S_MEM_WRAP_ADDRESS) & M_MEM_WRAP_ADDRESS)
+
+#define S_MEM_WRAP_CLIENT_NUM 0
+#define M_MEM_WRAP_CLIENT_NUM 0xfU
+#define V_MEM_WRAP_CLIENT_NUM(x) ((x) << S_MEM_WRAP_CLIENT_NUM)
+#define G_MEM_WRAP_CLIENT_NUM(x) (((x) >> S_MEM_WRAP_CLIENT_NUM) & M_MEM_WRAP_CLIENT_NUM)
+
+#define A_MA_TP_THREAD1_MAPPER 0x77e8
+
+#define S_TP_THREAD1_EN 0
+#define M_TP_THREAD1_EN 0xffU
+#define V_TP_THREAD1_EN(x) ((x) << S_TP_THREAD1_EN)
+#define G_TP_THREAD1_EN(x) (((x) >> S_TP_THREAD1_EN) & M_TP_THREAD1_EN)
+
+#define A_MA_SGE_THREAD1_MAPPER 0x77ec
+
+#define S_SGE_THREAD1_EN 0
+#define M_SGE_THREAD1_EN 0xffU
+#define V_SGE_THREAD1_EN(x) ((x) << S_SGE_THREAD1_EN)
+#define G_SGE_THREAD1_EN(x) (((x) >> S_SGE_THREAD1_EN) & M_SGE_THREAD1_EN)
+
+#define A_MA_PARITY_ERROR_ENABLE 0x77f0
+
+#define S_TP_DMARBT_PAR_ERROR_EN 31
+#define V_TP_DMARBT_PAR_ERROR_EN(x) ((x) << S_TP_DMARBT_PAR_ERROR_EN)
+#define F_TP_DMARBT_PAR_ERROR_EN V_TP_DMARBT_PAR_ERROR_EN(1U)
+
+#define S_LOGIC_FIFO_PAR_ERROR_EN 30
+#define V_LOGIC_FIFO_PAR_ERROR_EN(x) ((x) << S_LOGIC_FIFO_PAR_ERROR_EN)
+#define F_LOGIC_FIFO_PAR_ERROR_EN V_LOGIC_FIFO_PAR_ERROR_EN(1U)
+
+#define S_ARB3_PAR_WRQUEUE_ERROR_EN 29
+#define V_ARB3_PAR_WRQUEUE_ERROR_EN(x) ((x) << S_ARB3_PAR_WRQUEUE_ERROR_EN)
+#define F_ARB3_PAR_WRQUEUE_ERROR_EN V_ARB3_PAR_WRQUEUE_ERROR_EN(1U)
+
+#define S_ARB2_PAR_WRQUEUE_ERROR_EN 28
+#define V_ARB2_PAR_WRQUEUE_ERROR_EN(x) ((x) << S_ARB2_PAR_WRQUEUE_ERROR_EN)
+#define F_ARB2_PAR_WRQUEUE_ERROR_EN V_ARB2_PAR_WRQUEUE_ERROR_EN(1U)
+
+#define S_ARB1_PAR_WRQUEUE_ERROR_EN 27
+#define V_ARB1_PAR_WRQUEUE_ERROR_EN(x) ((x) << S_ARB1_PAR_WRQUEUE_ERROR_EN)
+#define F_ARB1_PAR_WRQUEUE_ERROR_EN V_ARB1_PAR_WRQUEUE_ERROR_EN(1U)
+
+#define S_ARB0_PAR_WRQUEUE_ERROR_EN 26
+#define V_ARB0_PAR_WRQUEUE_ERROR_EN(x) ((x) << S_ARB0_PAR_WRQUEUE_ERROR_EN)
+#define F_ARB0_PAR_WRQUEUE_ERROR_EN V_ARB0_PAR_WRQUEUE_ERROR_EN(1U)
+
+#define S_ARB3_PAR_RDQUEUE_ERROR_EN 25
+#define V_ARB3_PAR_RDQUEUE_ERROR_EN(x) ((x) << S_ARB3_PAR_RDQUEUE_ERROR_EN)
+#define F_ARB3_PAR_RDQUEUE_ERROR_EN V_ARB3_PAR_RDQUEUE_ERROR_EN(1U)
+
+#define S_ARB2_PAR_RDQUEUE_ERROR_EN 24
+#define V_ARB2_PAR_RDQUEUE_ERROR_EN(x) ((x) << S_ARB2_PAR_RDQUEUE_ERROR_EN)
+#define F_ARB2_PAR_RDQUEUE_ERROR_EN V_ARB2_PAR_RDQUEUE_ERROR_EN(1U)
+
+#define S_ARB1_PAR_RDQUEUE_ERROR_EN 23
+#define V_ARB1_PAR_RDQUEUE_ERROR_EN(x) ((x) << S_ARB1_PAR_RDQUEUE_ERROR_EN)
+#define F_ARB1_PAR_RDQUEUE_ERROR_EN V_ARB1_PAR_RDQUEUE_ERROR_EN(1U)
+
+#define S_ARB0_PAR_RDQUEUE_ERROR_EN 22
+#define V_ARB0_PAR_RDQUEUE_ERROR_EN(x) ((x) << S_ARB0_PAR_RDQUEUE_ERROR_EN)
+#define F_ARB0_PAR_RDQUEUE_ERROR_EN V_ARB0_PAR_RDQUEUE_ERROR_EN(1U)
+
+#define S_CL10_PAR_WRQUEUE_ERROR_EN 21
+#define V_CL10_PAR_WRQUEUE_ERROR_EN(x) ((x) << S_CL10_PAR_WRQUEUE_ERROR_EN)
+#define F_CL10_PAR_WRQUEUE_ERROR_EN V_CL10_PAR_WRQUEUE_ERROR_EN(1U)
+
+#define S_CL9_PAR_WRQUEUE_ERROR_EN 20
+#define V_CL9_PAR_WRQUEUE_ERROR_EN(x) ((x) << S_CL9_PAR_WRQUEUE_ERROR_EN)
+#define F_CL9_PAR_WRQUEUE_ERROR_EN V_CL9_PAR_WRQUEUE_ERROR_EN(1U)
+
+#define S_CL8_PAR_WRQUEUE_ERROR_EN 19
+#define V_CL8_PAR_WRQUEUE_ERROR_EN(x) ((x) << S_CL8_PAR_WRQUEUE_ERROR_EN)
+#define F_CL8_PAR_WRQUEUE_ERROR_EN V_CL8_PAR_WRQUEUE_ERROR_EN(1U)
+
+#define S_CL7_PAR_WRQUEUE_ERROR_EN 18
+#define V_CL7_PAR_WRQUEUE_ERROR_EN(x) ((x) << S_CL7_PAR_WRQUEUE_ERROR_EN)
+#define F_CL7_PAR_WRQUEUE_ERROR_EN V_CL7_PAR_WRQUEUE_ERROR_EN(1U)
+
+#define S_CL6_PAR_WRQUEUE_ERROR_EN 17
+#define V_CL6_PAR_WRQUEUE_ERROR_EN(x) ((x) << S_CL6_PAR_WRQUEUE_ERROR_EN)
+#define F_CL6_PAR_WRQUEUE_ERROR_EN V_CL6_PAR_WRQUEUE_ERROR_EN(1U)
+
+#define S_CL5_PAR_WRQUEUE_ERROR_EN 16
+#define V_CL5_PAR_WRQUEUE_ERROR_EN(x) ((x) << S_CL5_PAR_WRQUEUE_ERROR_EN)
+#define F_CL5_PAR_WRQUEUE_ERROR_EN V_CL5_PAR_WRQUEUE_ERROR_EN(1U)
+
+#define S_CL4_PAR_WRQUEUE_ERROR_EN 15
+#define V_CL4_PAR_WRQUEUE_ERROR_EN(x) ((x) << S_CL4_PAR_WRQUEUE_ERROR_EN)
+#define F_CL4_PAR_WRQUEUE_ERROR_EN V_CL4_PAR_WRQUEUE_ERROR_EN(1U)
+
+#define S_CL3_PAR_WRQUEUE_ERROR_EN 14
+#define V_CL3_PAR_WRQUEUE_ERROR_EN(x) ((x) << S_CL3_PAR_WRQUEUE_ERROR_EN)
+#define F_CL3_PAR_WRQUEUE_ERROR_EN V_CL3_PAR_WRQUEUE_ERROR_EN(1U)
+
+#define S_CL2_PAR_WRQUEUE_ERROR_EN 13
+#define V_CL2_PAR_WRQUEUE_ERROR_EN(x) ((x) << S_CL2_PAR_WRQUEUE_ERROR_EN)
+#define F_CL2_PAR_WRQUEUE_ERROR_EN V_CL2_PAR_WRQUEUE_ERROR_EN(1U)
+
+#define S_CL1_PAR_WRQUEUE_ERROR_EN 12
+#define V_CL1_PAR_WRQUEUE_ERROR_EN(x) ((x) << S_CL1_PAR_WRQUEUE_ERROR_EN)
+#define F_CL1_PAR_WRQUEUE_ERROR_EN V_CL1_PAR_WRQUEUE_ERROR_EN(1U)
+
+#define S_CL0_PAR_WRQUEUE_ERROR_EN 11
+#define V_CL0_PAR_WRQUEUE_ERROR_EN(x) ((x) << S_CL0_PAR_WRQUEUE_ERROR_EN)
+#define F_CL0_PAR_WRQUEUE_ERROR_EN V_CL0_PAR_WRQUEUE_ERROR_EN(1U)
+
+#define S_CL10_PAR_RDQUEUE_ERROR_EN 10
+#define V_CL10_PAR_RDQUEUE_ERROR_EN(x) ((x) << S_CL10_PAR_RDQUEUE_ERROR_EN)
+#define F_CL10_PAR_RDQUEUE_ERROR_EN V_CL10_PAR_RDQUEUE_ERROR_EN(1U)
+
+#define S_CL9_PAR_RDQUEUE_ERROR_EN 9
+#define V_CL9_PAR_RDQUEUE_ERROR_EN(x) ((x) << S_CL9_PAR_RDQUEUE_ERROR_EN)
+#define F_CL9_PAR_RDQUEUE_ERROR_EN V_CL9_PAR_RDQUEUE_ERROR_EN(1U)
+
+#define S_CL8_PAR_RDQUEUE_ERROR_EN 8
+#define V_CL8_PAR_RDQUEUE_ERROR_EN(x) ((x) << S_CL8_PAR_RDQUEUE_ERROR_EN)
+#define F_CL8_PAR_RDQUEUE_ERROR_EN V_CL8_PAR_RDQUEUE_ERROR_EN(1U)
+
+#define S_CL7_PAR_RDQUEUE_ERROR_EN 7
+#define V_CL7_PAR_RDQUEUE_ERROR_EN(x) ((x) << S_CL7_PAR_RDQUEUE_ERROR_EN)
+#define F_CL7_PAR_RDQUEUE_ERROR_EN V_CL7_PAR_RDQUEUE_ERROR_EN(1U)
+
+#define S_CL6_PAR_RDQUEUE_ERROR_EN 6
+#define V_CL6_PAR_RDQUEUE_ERROR_EN(x) ((x) << S_CL6_PAR_RDQUEUE_ERROR_EN)
+#define F_CL6_PAR_RDQUEUE_ERROR_EN V_CL6_PAR_RDQUEUE_ERROR_EN(1U)
+
+#define S_CL5_PAR_RDQUEUE_ERROR_EN 5
+#define V_CL5_PAR_RDQUEUE_ERROR_EN(x) ((x) << S_CL5_PAR_RDQUEUE_ERROR_EN)
+#define F_CL5_PAR_RDQUEUE_ERROR_EN V_CL5_PAR_RDQUEUE_ERROR_EN(1U)
+
+#define S_CL4_PAR_RDQUEUE_ERROR_EN 4
+#define V_CL4_PAR_RDQUEUE_ERROR_EN(x) ((x) << S_CL4_PAR_RDQUEUE_ERROR_EN)
+#define F_CL4_PAR_RDQUEUE_ERROR_EN V_CL4_PAR_RDQUEUE_ERROR_EN(1U)
+
+#define S_CL3_PAR_RDQUEUE_ERROR_EN 3
+#define V_CL3_PAR_RDQUEUE_ERROR_EN(x) ((x) << S_CL3_PAR_RDQUEUE_ERROR_EN)
+#define F_CL3_PAR_RDQUEUE_ERROR_EN V_CL3_PAR_RDQUEUE_ERROR_EN(1U)
+
+#define S_CL2_PAR_RDQUEUE_ERROR_EN 2
+#define V_CL2_PAR_RDQUEUE_ERROR_EN(x) ((x) << S_CL2_PAR_RDQUEUE_ERROR_EN)
+#define F_CL2_PAR_RDQUEUE_ERROR_EN V_CL2_PAR_RDQUEUE_ERROR_EN(1U)
+
+#define S_CL1_PAR_RDQUEUE_ERROR_EN 1
+#define V_CL1_PAR_RDQUEUE_ERROR_EN(x) ((x) << S_CL1_PAR_RDQUEUE_ERROR_EN)
+#define F_CL1_PAR_RDQUEUE_ERROR_EN V_CL1_PAR_RDQUEUE_ERROR_EN(1U)
+
+#define S_CL0_PAR_RDQUEUE_ERROR_EN 0
+#define V_CL0_PAR_RDQUEUE_ERROR_EN(x) ((x) << S_CL0_PAR_RDQUEUE_ERROR_EN)
+#define F_CL0_PAR_RDQUEUE_ERROR_EN V_CL0_PAR_RDQUEUE_ERROR_EN(1U)
+
+#define A_MA_PARITY_ERROR_STATUS 0x77f4
+
+#define S_TP_DMARBT_PAR_ERROR 31
+#define V_TP_DMARBT_PAR_ERROR(x) ((x) << S_TP_DMARBT_PAR_ERROR)
+#define F_TP_DMARBT_PAR_ERROR V_TP_DMARBT_PAR_ERROR(1U)
+
+#define S_LOGIC_FIFO_PAR_ERROR 30
+#define V_LOGIC_FIFO_PAR_ERROR(x) ((x) << S_LOGIC_FIFO_PAR_ERROR)
+#define F_LOGIC_FIFO_PAR_ERROR V_LOGIC_FIFO_PAR_ERROR(1U)
+
+#define S_ARB3_PAR_WRQUEUE_ERROR 29
+#define V_ARB3_PAR_WRQUEUE_ERROR(x) ((x) << S_ARB3_PAR_WRQUEUE_ERROR)
+#define F_ARB3_PAR_WRQUEUE_ERROR V_ARB3_PAR_WRQUEUE_ERROR(1U)
+
+#define S_ARB2_PAR_WRQUEUE_ERROR 28
+#define V_ARB2_PAR_WRQUEUE_ERROR(x) ((x) << S_ARB2_PAR_WRQUEUE_ERROR)
+#define F_ARB2_PAR_WRQUEUE_ERROR V_ARB2_PAR_WRQUEUE_ERROR(1U)
+
+#define S_ARB1_PAR_WRQUEUE_ERROR 27
+#define V_ARB1_PAR_WRQUEUE_ERROR(x) ((x) << S_ARB1_PAR_WRQUEUE_ERROR)
+#define F_ARB1_PAR_WRQUEUE_ERROR V_ARB1_PAR_WRQUEUE_ERROR(1U)
+
+#define S_ARB0_PAR_WRQUEUE_ERROR 26
+#define V_ARB0_PAR_WRQUEUE_ERROR(x) ((x) << S_ARB0_PAR_WRQUEUE_ERROR)
+#define F_ARB0_PAR_WRQUEUE_ERROR V_ARB0_PAR_WRQUEUE_ERROR(1U)
+
+#define S_ARB3_PAR_RDQUEUE_ERROR 25
+#define V_ARB3_PAR_RDQUEUE_ERROR(x) ((x) << S_ARB3_PAR_RDQUEUE_ERROR)
+#define F_ARB3_PAR_RDQUEUE_ERROR V_ARB3_PAR_RDQUEUE_ERROR(1U)
+
+#define S_ARB2_PAR_RDQUEUE_ERROR 24
+#define V_ARB2_PAR_RDQUEUE_ERROR(x) ((x) << S_ARB2_PAR_RDQUEUE_ERROR)
+#define F_ARB2_PAR_RDQUEUE_ERROR V_ARB2_PAR_RDQUEUE_ERROR(1U)
+
+#define S_ARB1_PAR_RDQUEUE_ERROR 23
+#define V_ARB1_PAR_RDQUEUE_ERROR(x) ((x) << S_ARB1_PAR_RDQUEUE_ERROR)
+#define F_ARB1_PAR_RDQUEUE_ERROR V_ARB1_PAR_RDQUEUE_ERROR(1U)
+
+#define S_ARB0_PAR_RDQUEUE_ERROR 22
+#define V_ARB0_PAR_RDQUEUE_ERROR(x) ((x) << S_ARB0_PAR_RDQUEUE_ERROR)
+#define F_ARB0_PAR_RDQUEUE_ERROR V_ARB0_PAR_RDQUEUE_ERROR(1U)
+
+#define S_CL10_PAR_WRQUEUE_ERROR 21
+#define V_CL10_PAR_WRQUEUE_ERROR(x) ((x) << S_CL10_PAR_WRQUEUE_ERROR)
+#define F_CL10_PAR_WRQUEUE_ERROR V_CL10_PAR_WRQUEUE_ERROR(1U)
+
+#define S_CL9_PAR_WRQUEUE_ERROR 20
+#define V_CL9_PAR_WRQUEUE_ERROR(x) ((x) << S_CL9_PAR_WRQUEUE_ERROR)
+#define F_CL9_PAR_WRQUEUE_ERROR V_CL9_PAR_WRQUEUE_ERROR(1U)
+
+#define S_CL8_PAR_WRQUEUE_ERROR 19
+#define V_CL8_PAR_WRQUEUE_ERROR(x) ((x) << S_CL8_PAR_WRQUEUE_ERROR)
+#define F_CL8_PAR_WRQUEUE_ERROR V_CL8_PAR_WRQUEUE_ERROR(1U)
+
+#define S_CL7_PAR_WRQUEUE_ERROR 18
+#define V_CL7_PAR_WRQUEUE_ERROR(x) ((x) << S_CL7_PAR_WRQUEUE_ERROR)
+#define F_CL7_PAR_WRQUEUE_ERROR V_CL7_PAR_WRQUEUE_ERROR(1U)
+
+#define S_CL6_PAR_WRQUEUE_ERROR 17
+#define V_CL6_PAR_WRQUEUE_ERROR(x) ((x) << S_CL6_PAR_WRQUEUE_ERROR)
+#define F_CL6_PAR_WRQUEUE_ERROR V_CL6_PAR_WRQUEUE_ERROR(1U)
+
+#define S_CL5_PAR_WRQUEUE_ERROR 16
+#define V_CL5_PAR_WRQUEUE_ERROR(x) ((x) << S_CL5_PAR_WRQUEUE_ERROR)
+#define F_CL5_PAR_WRQUEUE_ERROR V_CL5_PAR_WRQUEUE_ERROR(1U)
+
+#define S_CL4_PAR_WRQUEUE_ERROR 15
+#define V_CL4_PAR_WRQUEUE_ERROR(x) ((x) << S_CL4_PAR_WRQUEUE_ERROR)
+#define F_CL4_PAR_WRQUEUE_ERROR V_CL4_PAR_WRQUEUE_ERROR(1U)
+
+#define S_CL3_PAR_WRQUEUE_ERROR 14
+#define V_CL3_PAR_WRQUEUE_ERROR(x) ((x) << S_CL3_PAR_WRQUEUE_ERROR)
+#define F_CL3_PAR_WRQUEUE_ERROR V_CL3_PAR_WRQUEUE_ERROR(1U)
+
+#define S_CL2_PAR_WRQUEUE_ERROR 13
+#define V_CL2_PAR_WRQUEUE_ERROR(x) ((x) << S_CL2_PAR_WRQUEUE_ERROR)
+#define F_CL2_PAR_WRQUEUE_ERROR V_CL2_PAR_WRQUEUE_ERROR(1U)
+
+#define S_CL1_PAR_WRQUEUE_ERROR 12
+#define V_CL1_PAR_WRQUEUE_ERROR(x) ((x) << S_CL1_PAR_WRQUEUE_ERROR)
+#define F_CL1_PAR_WRQUEUE_ERROR V_CL1_PAR_WRQUEUE_ERROR(1U)
+
+#define S_CL0_PAR_WRQUEUE_ERROR 11
+#define V_CL0_PAR_WRQUEUE_ERROR(x) ((x) << S_CL0_PAR_WRQUEUE_ERROR)
+#define F_CL0_PAR_WRQUEUE_ERROR V_CL0_PAR_WRQUEUE_ERROR(1U)
+
+#define S_CL10_PAR_RDQUEUE_ERROR 10
+#define V_CL10_PAR_RDQUEUE_ERROR(x) ((x) << S_CL10_PAR_RDQUEUE_ERROR)
+#define F_CL10_PAR_RDQUEUE_ERROR V_CL10_PAR_RDQUEUE_ERROR(1U)
+
+#define S_CL9_PAR_RDQUEUE_ERROR 9
+#define V_CL9_PAR_RDQUEUE_ERROR(x) ((x) << S_CL9_PAR_RDQUEUE_ERROR)
+#define F_CL9_PAR_RDQUEUE_ERROR V_CL9_PAR_RDQUEUE_ERROR(1U)
+
+#define S_CL8_PAR_RDQUEUE_ERROR 8
+#define V_CL8_PAR_RDQUEUE_ERROR(x) ((x) << S_CL8_PAR_RDQUEUE_ERROR)
+#define F_CL8_PAR_RDQUEUE_ERROR V_CL8_PAR_RDQUEUE_ERROR(1U)
+
+#define S_CL7_PAR_RDQUEUE_ERROR 7
+#define V_CL7_PAR_RDQUEUE_ERROR(x) ((x) << S_CL7_PAR_RDQUEUE_ERROR)
+#define F_CL7_PAR_RDQUEUE_ERROR V_CL7_PAR_RDQUEUE_ERROR(1U)
+
+#define S_CL6_PAR_RDQUEUE_ERROR 6
+#define V_CL6_PAR_RDQUEUE_ERROR(x) ((x) << S_CL6_PAR_RDQUEUE_ERROR)
+#define F_CL6_PAR_RDQUEUE_ERROR V_CL6_PAR_RDQUEUE_ERROR(1U)
+
+#define S_CL5_PAR_RDQUEUE_ERROR 5
+#define V_CL5_PAR_RDQUEUE_ERROR(x) ((x) << S_CL5_PAR_RDQUEUE_ERROR)
+#define F_CL5_PAR_RDQUEUE_ERROR V_CL5_PAR_RDQUEUE_ERROR(1U)
+
+#define S_CL4_PAR_RDQUEUE_ERROR 4
+#define V_CL4_PAR_RDQUEUE_ERROR(x) ((x) << S_CL4_PAR_RDQUEUE_ERROR)
+#define F_CL4_PAR_RDQUEUE_ERROR V_CL4_PAR_RDQUEUE_ERROR(1U)
+
+#define S_CL3_PAR_RDQUEUE_ERROR 3
+#define V_CL3_PAR_RDQUEUE_ERROR(x) ((x) << S_CL3_PAR_RDQUEUE_ERROR)
+#define F_CL3_PAR_RDQUEUE_ERROR V_CL3_PAR_RDQUEUE_ERROR(1U)
+
+#define S_CL2_PAR_RDQUEUE_ERROR 2
+#define V_CL2_PAR_RDQUEUE_ERROR(x) ((x) << S_CL2_PAR_RDQUEUE_ERROR)
+#define F_CL2_PAR_RDQUEUE_ERROR V_CL2_PAR_RDQUEUE_ERROR(1U)
+
+#define S_CL1_PAR_RDQUEUE_ERROR 1
+#define V_CL1_PAR_RDQUEUE_ERROR(x) ((x) << S_CL1_PAR_RDQUEUE_ERROR)
+#define F_CL1_PAR_RDQUEUE_ERROR V_CL1_PAR_RDQUEUE_ERROR(1U)
+
+#define S_CL0_PAR_RDQUEUE_ERROR 0
+#define V_CL0_PAR_RDQUEUE_ERROR(x) ((x) << S_CL0_PAR_RDQUEUE_ERROR)
+#define F_CL0_PAR_RDQUEUE_ERROR V_CL0_PAR_RDQUEUE_ERROR(1U)
+
+#define A_MA_SGE_PCIE_COHERANCY_CTRL 0x77f8
+
+#define S_BONUS_REG 6
+#define M_BONUS_REG 0x3ffffffU
+#define V_BONUS_REG(x) ((x) << S_BONUS_REG)
+#define G_BONUS_REG(x) (((x) >> S_BONUS_REG) & M_BONUS_REG)
+
+#define S_COHERANCY_CMD_TYPE 4
+#define M_COHERANCY_CMD_TYPE 0x3U
+#define V_COHERANCY_CMD_TYPE(x) ((x) << S_COHERANCY_CMD_TYPE)
+#define G_COHERANCY_CMD_TYPE(x) (((x) >> S_COHERANCY_CMD_TYPE) & M_COHERANCY_CMD_TYPE)
+
+#define S_COHERANCY_THREAD_NUM 1
+#define M_COHERANCY_THREAD_NUM 0x7U
+#define V_COHERANCY_THREAD_NUM(x) ((x) << S_COHERANCY_THREAD_NUM)
+#define G_COHERANCY_THREAD_NUM(x) (((x) >> S_COHERANCY_THREAD_NUM) & M_COHERANCY_THREAD_NUM)
+
+#define S_COHERANCY_ENABLE 0
+#define V_COHERANCY_ENABLE(x) ((x) << S_COHERANCY_ENABLE)
+#define F_COHERANCY_ENABLE V_COHERANCY_ENABLE(1U)
+
+#define A_MA_ERROR_ENABLE 0x77fc
+
+#define S_UE_ENABLE 0
+#define V_UE_ENABLE(x) ((x) << S_UE_ENABLE)
+#define F_UE_ENABLE V_UE_ENABLE(1U)
+
+/* registers for module EDC_0 */
+#define EDC_0_BASE_ADDR 0x7900
+
+#define A_EDC_REF 0x7900
+
+#define S_EDC_INST_NUM 18
+#define V_EDC_INST_NUM(x) ((x) << S_EDC_INST_NUM)
+#define F_EDC_INST_NUM V_EDC_INST_NUM(1U)
+
+#define S_ENABLE_PERF 17
+#define V_ENABLE_PERF(x) ((x) << S_ENABLE_PERF)
+#define F_ENABLE_PERF V_ENABLE_PERF(1U)
+
+#define S_ECC_BYPASS 16
+#define V_ECC_BYPASS(x) ((x) << S_ECC_BYPASS)
+#define F_ECC_BYPASS V_ECC_BYPASS(1U)
+
+#define S_REFFREQ 0
+#define M_REFFREQ 0xffffU
+#define V_REFFREQ(x) ((x) << S_REFFREQ)
+#define G_REFFREQ(x) (((x) >> S_REFFREQ) & M_REFFREQ)
+
+#define A_EDC_BIST_CMD 0x7904
+#define A_EDC_BIST_CMD_ADDR 0x7908
+#define A_EDC_BIST_CMD_LEN 0x790c
+#define A_EDC_BIST_DATA_PATTERN 0x7910
+#define A_EDC_BIST_USER_WDATA0 0x7914
+#define A_EDC_BIST_USER_WDATA1 0x7918
+#define A_EDC_BIST_USER_WDATA2 0x791c
+#define A_EDC_BIST_NUM_ERR 0x7920
+#define A_EDC_BIST_ERR_FIRST_ADDR 0x7924
+#define A_EDC_BIST_STATUS_RDATA 0x7928
+#define A_EDC_PAR_ENABLE 0x7970
+
+#define S_ECC_UE 2
+#define V_ECC_UE(x) ((x) << S_ECC_UE)
+#define F_ECC_UE V_ECC_UE(1U)
+
+#define S_ECC_CE 1
+#define V_ECC_CE(x) ((x) << S_ECC_CE)
+#define F_ECC_CE V_ECC_CE(1U)
+
+#define A_EDC_INT_ENABLE 0x7974
+#define A_EDC_INT_CAUSE 0x7978
+
+#define S_ECC_UE_PAR 5
+#define V_ECC_UE_PAR(x) ((x) << S_ECC_UE_PAR)
+#define F_ECC_UE_PAR V_ECC_UE_PAR(1U)
+
+#define S_ECC_CE_PAR 4
+#define V_ECC_CE_PAR(x) ((x) << S_ECC_CE_PAR)
+#define F_ECC_CE_PAR V_ECC_CE_PAR(1U)
+
+#define S_PERR_PAR_CAUSE 3
+#define V_PERR_PAR_CAUSE(x) ((x) << S_PERR_PAR_CAUSE)
+#define F_PERR_PAR_CAUSE V_PERR_PAR_CAUSE(1U)
+
+#define A_EDC_ECC_STATUS 0x797c
+
+/* registers for module EDC_1 */
+#define EDC_1_BASE_ADDR 0x7980
+
+/* registers for module HMA */
+#define HMA_BASE_ADDR 0x7a00
+
+/* registers for module CIM */
+#define CIM_BASE_ADDR 0x7b00
+
+#define A_CIM_VF_EXT_MAILBOX_CTRL 0x0
+
+#define S_VFMBGENERIC 4
+#define M_VFMBGENERIC 0xfU
+#define V_VFMBGENERIC(x) ((x) << S_VFMBGENERIC)
+#define G_VFMBGENERIC(x) (((x) >> S_VFMBGENERIC) & M_VFMBGENERIC)
+
+#define A_CIM_VF_EXT_MAILBOX_STATUS 0x4
+
+#define S_MBVFREADY 0
+#define V_MBVFREADY(x) ((x) << S_MBVFREADY)
+#define F_MBVFREADY V_MBVFREADY(1U)
+
+#define A_CIM_PF_MAILBOX_DATA 0x240
+#define A_CIM_PF_MAILBOX_CTRL 0x280
+
+#define S_MBGENERIC 4
+#define M_MBGENERIC 0xfffffffU
+#define V_MBGENERIC(x) ((x) << S_MBGENERIC)
+#define G_MBGENERIC(x) (((x) >> S_MBGENERIC) & M_MBGENERIC)
+
+#define S_MBMSGVALID 3
+#define V_MBMSGVALID(x) ((x) << S_MBMSGVALID)
+#define F_MBMSGVALID V_MBMSGVALID(1U)
+
+#define S_MBINTREQ 2
+#define V_MBINTREQ(x) ((x) << S_MBINTREQ)
+#define F_MBINTREQ V_MBINTREQ(1U)
+
+#define S_MBOWNER 0
+#define M_MBOWNER 0x3U
+#define V_MBOWNER(x) ((x) << S_MBOWNER)
+#define G_MBOWNER(x) (((x) >> S_MBOWNER) & M_MBOWNER)
+
+#define A_CIM_PF_MAILBOX_ACC_STATUS 0x284
+
+#define S_MBWRBUSY 31
+#define V_MBWRBUSY(x) ((x) << S_MBWRBUSY)
+#define F_MBWRBUSY V_MBWRBUSY(1U)
+
+#define A_CIM_PF_HOST_INT_ENABLE 0x288
+
+#define S_MBMSGRDYINTEN 19
+#define V_MBMSGRDYINTEN(x) ((x) << S_MBMSGRDYINTEN)
+#define F_MBMSGRDYINTEN V_MBMSGRDYINTEN(1U)
+
+#define A_CIM_PF_HOST_INT_CAUSE 0x28c
+
+#define S_MBMSGRDYINT 19
+#define V_MBMSGRDYINT(x) ((x) << S_MBMSGRDYINT)
+#define F_MBMSGRDYINT V_MBMSGRDYINT(1U)
+
+#define A_CIM_BOOT_CFG 0x7b00
+
+#define S_BOOTADDR 8
+#define M_BOOTADDR 0xffffffU
+#define V_BOOTADDR(x) ((x) << S_BOOTADDR)
+#define G_BOOTADDR(x) (((x) >> S_BOOTADDR) & M_BOOTADDR)
+
+#define S_UPGEN 2
+#define M_UPGEN 0x3fU
+#define V_UPGEN(x) ((x) << S_UPGEN)
+#define G_UPGEN(x) (((x) >> S_UPGEN) & M_UPGEN)
+
+#define S_BOOTSDRAM 1
+#define V_BOOTSDRAM(x) ((x) << S_BOOTSDRAM)
+#define F_BOOTSDRAM V_BOOTSDRAM(1U)
+
+#define S_UPCRST 0
+#define V_UPCRST(x) ((x) << S_UPCRST)
+#define F_UPCRST V_UPCRST(1U)
+
+#define A_CIM_FLASH_BASE_ADDR 0x7b04
+
+#define S_FLASHBASEADDR 6
+#define M_FLASHBASEADDR 0x3ffffU
+#define V_FLASHBASEADDR(x) ((x) << S_FLASHBASEADDR)
+#define G_FLASHBASEADDR(x) (((x) >> S_FLASHBASEADDR) & M_FLASHBASEADDR)
+
+#define A_CIM_FLASH_ADDR_SIZE 0x7b08
+
+#define S_FLASHADDRSIZE 4
+#define M_FLASHADDRSIZE 0xfffffU
+#define V_FLASHADDRSIZE(x) ((x) << S_FLASHADDRSIZE)
+#define G_FLASHADDRSIZE(x) (((x) >> S_FLASHADDRSIZE) & M_FLASHADDRSIZE)
+
+#define A_CIM_EEPROM_BASE_ADDR 0x7b0c
+
+#define S_EEPROMBASEADDR 6
+#define M_EEPROMBASEADDR 0x3ffffU
+#define V_EEPROMBASEADDR(x) ((x) << S_EEPROMBASEADDR)
+#define G_EEPROMBASEADDR(x) (((x) >> S_EEPROMBASEADDR) & M_EEPROMBASEADDR)
+
+#define A_CIM_EEPROM_ADDR_SIZE 0x7b10
+
+#define S_EEPROMADDRSIZE 4
+#define M_EEPROMADDRSIZE 0xfffffU
+#define V_EEPROMADDRSIZE(x) ((x) << S_EEPROMADDRSIZE)
+#define G_EEPROMADDRSIZE(x) (((x) >> S_EEPROMADDRSIZE) & M_EEPROMADDRSIZE)
+
+#define A_CIM_SDRAM_BASE_ADDR 0x7b14
+
+#define S_SDRAMBASEADDR 6
+#define M_SDRAMBASEADDR 0x3ffffffU
+#define V_SDRAMBASEADDR(x) ((x) << S_SDRAMBASEADDR)
+#define G_SDRAMBASEADDR(x) (((x) >> S_SDRAMBASEADDR) & M_SDRAMBASEADDR)
+
+#define A_CIM_SDRAM_ADDR_SIZE 0x7b18
+
+#define S_SDRAMADDRSIZE 4
+#define M_SDRAMADDRSIZE 0xfffffffU
+#define V_SDRAMADDRSIZE(x) ((x) << S_SDRAMADDRSIZE)
+#define G_SDRAMADDRSIZE(x) (((x) >> S_SDRAMADDRSIZE) & M_SDRAMADDRSIZE)
+
+#define A_CIM_EXTMEM2_BASE_ADDR 0x7b1c
+
+#define S_EXTMEM2BASEADDR 6
+#define M_EXTMEM2BASEADDR 0x3ffffffU
+#define V_EXTMEM2BASEADDR(x) ((x) << S_EXTMEM2BASEADDR)
+#define G_EXTMEM2BASEADDR(x) (((x) >> S_EXTMEM2BASEADDR) & M_EXTMEM2BASEADDR)
+
+#define A_CIM_EXTMEM2_ADDR_SIZE 0x7b20
+
+#define S_EXTMEM2ADDRSIZE 4
+#define M_EXTMEM2ADDRSIZE 0xfffffffU
+#define V_EXTMEM2ADDRSIZE(x) ((x) << S_EXTMEM2ADDRSIZE)
+#define G_EXTMEM2ADDRSIZE(x) (((x) >> S_EXTMEM2ADDRSIZE) & M_EXTMEM2ADDRSIZE)
+
+#define A_CIM_UP_SPARE_INT 0x7b24
+
+#define S_TDEBUGINT 4
+#define V_TDEBUGINT(x) ((x) << S_TDEBUGINT)
+#define F_TDEBUGINT V_TDEBUGINT(1U)
+
+#define S_BOOTVECSEL 3
+#define V_BOOTVECSEL(x) ((x) << S_BOOTVECSEL)
+#define F_BOOTVECSEL V_BOOTVECSEL(1U)
+
+#define S_UPSPAREINT 0
+#define M_UPSPAREINT 0x7U
+#define V_UPSPAREINT(x) ((x) << S_UPSPAREINT)
+#define G_UPSPAREINT(x) (((x) >> S_UPSPAREINT) & M_UPSPAREINT)
+
+#define A_CIM_HOST_INT_ENABLE 0x7b28
+
+#define S_TIEQOUTPARERRINTEN 20
+#define V_TIEQOUTPARERRINTEN(x) ((x) << S_TIEQOUTPARERRINTEN)
+#define F_TIEQOUTPARERRINTEN V_TIEQOUTPARERRINTEN(1U)
+
+#define S_TIEQINPARERRINTEN 19
+#define V_TIEQINPARERRINTEN(x) ((x) << S_TIEQINPARERRINTEN)
+#define F_TIEQINPARERRINTEN V_TIEQINPARERRINTEN(1U)
+
+#define S_MBHOSTPARERR 18
+#define V_MBHOSTPARERR(x) ((x) << S_MBHOSTPARERR)
+#define F_MBHOSTPARERR V_MBHOSTPARERR(1U)
+
+#define S_MBUPPARERR 17
+#define V_MBUPPARERR(x) ((x) << S_MBUPPARERR)
+#define F_MBUPPARERR V_MBUPPARERR(1U)
+
+#define S_IBQTP0PARERR 16
+#define V_IBQTP0PARERR(x) ((x) << S_IBQTP0PARERR)
+#define F_IBQTP0PARERR V_IBQTP0PARERR(1U)
+
+#define S_IBQTP1PARERR 15
+#define V_IBQTP1PARERR(x) ((x) << S_IBQTP1PARERR)
+#define F_IBQTP1PARERR V_IBQTP1PARERR(1U)
+
+#define S_IBQULPPARERR 14
+#define V_IBQULPPARERR(x) ((x) << S_IBQULPPARERR)
+#define F_IBQULPPARERR V_IBQULPPARERR(1U)
+
+#define S_IBQSGELOPARERR 13
+#define V_IBQSGELOPARERR(x) ((x) << S_IBQSGELOPARERR)
+#define F_IBQSGELOPARERR V_IBQSGELOPARERR(1U)
+
+#define S_IBQSGEHIPARERR 12
+#define V_IBQSGEHIPARERR(x) ((x) << S_IBQSGEHIPARERR)
+#define F_IBQSGEHIPARERR V_IBQSGEHIPARERR(1U)
+
+#define S_IBQNCSIPARERR 11
+#define V_IBQNCSIPARERR(x) ((x) << S_IBQNCSIPARERR)
+#define F_IBQNCSIPARERR V_IBQNCSIPARERR(1U)
+
+#define S_OBQULP0PARERR 10
+#define V_OBQULP0PARERR(x) ((x) << S_OBQULP0PARERR)
+#define F_OBQULP0PARERR V_OBQULP0PARERR(1U)
+
+#define S_OBQULP1PARERR 9
+#define V_OBQULP1PARERR(x) ((x) << S_OBQULP1PARERR)
+#define F_OBQULP1PARERR V_OBQULP1PARERR(1U)
+
+#define S_OBQULP2PARERR 8
+#define V_OBQULP2PARERR(x) ((x) << S_OBQULP2PARERR)
+#define F_OBQULP2PARERR V_OBQULP2PARERR(1U)
+
+#define S_OBQULP3PARERR 7
+#define V_OBQULP3PARERR(x) ((x) << S_OBQULP3PARERR)
+#define F_OBQULP3PARERR V_OBQULP3PARERR(1U)
+
+#define S_OBQSGEPARERR 6
+#define V_OBQSGEPARERR(x) ((x) << S_OBQSGEPARERR)
+#define F_OBQSGEPARERR V_OBQSGEPARERR(1U)
+
+#define S_OBQNCSIPARERR 5
+#define V_OBQNCSIPARERR(x) ((x) << S_OBQNCSIPARERR)
+#define F_OBQNCSIPARERR V_OBQNCSIPARERR(1U)
+
+#define S_TIMER1INTEN 3
+#define V_TIMER1INTEN(x) ((x) << S_TIMER1INTEN)
+#define F_TIMER1INTEN V_TIMER1INTEN(1U)
+
+#define S_TIMER0INTEN 2
+#define V_TIMER0INTEN(x) ((x) << S_TIMER0INTEN)
+#define F_TIMER0INTEN V_TIMER0INTEN(1U)
+
+#define S_PREFDROPINTEN 1
+#define V_PREFDROPINTEN(x) ((x) << S_PREFDROPINTEN)
+#define F_PREFDROPINTEN V_PREFDROPINTEN(1U)
+
+#define A_CIM_HOST_INT_CAUSE 0x7b2c
+
+#define S_TIEQOUTPARERRINT 20
+#define V_TIEQOUTPARERRINT(x) ((x) << S_TIEQOUTPARERRINT)
+#define F_TIEQOUTPARERRINT V_TIEQOUTPARERRINT(1U)
+
+#define S_TIEQINPARERRINT 19
+#define V_TIEQINPARERRINT(x) ((x) << S_TIEQINPARERRINT)
+#define F_TIEQINPARERRINT V_TIEQINPARERRINT(1U)
+
+#define S_TIMER1INT 3
+#define V_TIMER1INT(x) ((x) << S_TIMER1INT)
+#define F_TIMER1INT V_TIMER1INT(1U)
+
+#define S_TIMER0INT 2
+#define V_TIMER0INT(x) ((x) << S_TIMER0INT)
+#define F_TIMER0INT V_TIMER0INT(1U)
+
+#define S_PREFDROPINT 1
+#define V_PREFDROPINT(x) ((x) << S_PREFDROPINT)
+#define F_PREFDROPINT V_PREFDROPINT(1U)
+
+#define S_UPACCNONZERO 0
+#define V_UPACCNONZERO(x) ((x) << S_UPACCNONZERO)
+#define F_UPACCNONZERO V_UPACCNONZERO(1U)
+
+#define A_CIM_HOST_UPACC_INT_ENABLE 0x7b30
+
+#define S_EEPROMWRINTEN 30
+#define V_EEPROMWRINTEN(x) ((x) << S_EEPROMWRINTEN)
+#define F_EEPROMWRINTEN V_EEPROMWRINTEN(1U)
+
+#define S_TIMEOUTMAINTEN 29
+#define V_TIMEOUTMAINTEN(x) ((x) << S_TIMEOUTMAINTEN)
+#define F_TIMEOUTMAINTEN V_TIMEOUTMAINTEN(1U)
+
+#define S_TIMEOUTINTEN 28
+#define V_TIMEOUTINTEN(x) ((x) << S_TIMEOUTINTEN)
+#define F_TIMEOUTINTEN V_TIMEOUTINTEN(1U)
+
+#define S_RSPOVRLOOKUPINTEN 27
+#define V_RSPOVRLOOKUPINTEN(x) ((x) << S_RSPOVRLOOKUPINTEN)
+#define F_RSPOVRLOOKUPINTEN V_RSPOVRLOOKUPINTEN(1U)
+
+#define S_REQOVRLOOKUPINTEN 26
+#define V_REQOVRLOOKUPINTEN(x) ((x) << S_REQOVRLOOKUPINTEN)
+#define F_REQOVRLOOKUPINTEN V_REQOVRLOOKUPINTEN(1U)
+
+#define S_BLKWRPLINTEN 25
+#define V_BLKWRPLINTEN(x) ((x) << S_BLKWRPLINTEN)
+#define F_BLKWRPLINTEN V_BLKWRPLINTEN(1U)
+
+#define S_BLKRDPLINTEN 24
+#define V_BLKRDPLINTEN(x) ((x) << S_BLKRDPLINTEN)
+#define F_BLKRDPLINTEN V_BLKRDPLINTEN(1U)
+
+#define S_SGLWRPLINTEN 23
+#define V_SGLWRPLINTEN(x) ((x) << S_SGLWRPLINTEN)
+#define F_SGLWRPLINTEN V_SGLWRPLINTEN(1U)
+
+#define S_SGLRDPLINTEN 22
+#define V_SGLRDPLINTEN(x) ((x) << S_SGLRDPLINTEN)
+#define F_SGLRDPLINTEN V_SGLRDPLINTEN(1U)
+
+#define S_BLKWRCTLINTEN 21
+#define V_BLKWRCTLINTEN(x) ((x) << S_BLKWRCTLINTEN)
+#define F_BLKWRCTLINTEN V_BLKWRCTLINTEN(1U)
+
+#define S_BLKRDCTLINTEN 20
+#define V_BLKRDCTLINTEN(x) ((x) << S_BLKRDCTLINTEN)
+#define F_BLKRDCTLINTEN V_BLKRDCTLINTEN(1U)
+
+#define S_SGLWRCTLINTEN 19
+#define V_SGLWRCTLINTEN(x) ((x) << S_SGLWRCTLINTEN)
+#define F_SGLWRCTLINTEN V_SGLWRCTLINTEN(1U)
+
+#define S_SGLRDCTLINTEN 18
+#define V_SGLRDCTLINTEN(x) ((x) << S_SGLRDCTLINTEN)
+#define F_SGLRDCTLINTEN V_SGLRDCTLINTEN(1U)
+
+#define S_BLKWREEPROMINTEN 17
+#define V_BLKWREEPROMINTEN(x) ((x) << S_BLKWREEPROMINTEN)
+#define F_BLKWREEPROMINTEN V_BLKWREEPROMINTEN(1U)
+
+#define S_BLKRDEEPROMINTEN 16
+#define V_BLKRDEEPROMINTEN(x) ((x) << S_BLKRDEEPROMINTEN)
+#define F_BLKRDEEPROMINTEN V_BLKRDEEPROMINTEN(1U)
+
+#define S_SGLWREEPROMINTEN 15
+#define V_SGLWREEPROMINTEN(x) ((x) << S_SGLWREEPROMINTEN)
+#define F_SGLWREEPROMINTEN V_SGLWREEPROMINTEN(1U)
+
+#define S_SGLRDEEPROMINTEN 14
+#define V_SGLRDEEPROMINTEN(x) ((x) << S_SGLRDEEPROMINTEN)
+#define F_SGLRDEEPROMINTEN V_SGLRDEEPROMINTEN(1U)
+
+#define S_BLKWRFLASHINTEN 13
+#define V_BLKWRFLASHINTEN(x) ((x) << S_BLKWRFLASHINTEN)
+#define F_BLKWRFLASHINTEN V_BLKWRFLASHINTEN(1U)
+
+#define S_BLKRDFLASHINTEN 12
+#define V_BLKRDFLASHINTEN(x) ((x) << S_BLKRDFLASHINTEN)
+#define F_BLKRDFLASHINTEN V_BLKRDFLASHINTEN(1U)
+
+#define S_SGLWRFLASHINTEN 11
+#define V_SGLWRFLASHINTEN(x) ((x) << S_SGLWRFLASHINTEN)
+#define F_SGLWRFLASHINTEN V_SGLWRFLASHINTEN(1U)
+
+#define S_SGLRDFLASHINTEN 10
+#define V_SGLRDFLASHINTEN(x) ((x) << S_SGLRDFLASHINTEN)
+#define F_SGLRDFLASHINTEN V_SGLRDFLASHINTEN(1U)
+
+#define S_BLKWRBOOTINTEN 9
+#define V_BLKWRBOOTINTEN(x) ((x) << S_BLKWRBOOTINTEN)
+#define F_BLKWRBOOTINTEN V_BLKWRBOOTINTEN(1U)
+
+#define S_BLKRDBOOTINTEN 8
+#define V_BLKRDBOOTINTEN(x) ((x) << S_BLKRDBOOTINTEN)
+#define F_BLKRDBOOTINTEN V_BLKRDBOOTINTEN(1U)
+
+#define S_SGLWRBOOTINTEN 7
+#define V_SGLWRBOOTINTEN(x) ((x) << S_SGLWRBOOTINTEN)
+#define F_SGLWRBOOTINTEN V_SGLWRBOOTINTEN(1U)
+
+#define S_SGLRDBOOTINTEN 6
+#define V_SGLRDBOOTINTEN(x) ((x) << S_SGLRDBOOTINTEN)
+#define F_SGLRDBOOTINTEN V_SGLRDBOOTINTEN(1U)
+
+#define S_ILLWRBEINTEN 5
+#define V_ILLWRBEINTEN(x) ((x) << S_ILLWRBEINTEN)
+#define F_ILLWRBEINTEN V_ILLWRBEINTEN(1U)
+
+#define S_ILLRDBEINTEN 4
+#define V_ILLRDBEINTEN(x) ((x) << S_ILLRDBEINTEN)
+#define F_ILLRDBEINTEN V_ILLRDBEINTEN(1U)
+
+#define S_ILLRDINTEN 3
+#define V_ILLRDINTEN(x) ((x) << S_ILLRDINTEN)
+#define F_ILLRDINTEN V_ILLRDINTEN(1U)
+
+#define S_ILLWRINTEN 2
+#define V_ILLWRINTEN(x) ((x) << S_ILLWRINTEN)
+#define F_ILLWRINTEN V_ILLWRINTEN(1U)
+
+#define S_ILLTRANSINTEN 1
+#define V_ILLTRANSINTEN(x) ((x) << S_ILLTRANSINTEN)
+#define F_ILLTRANSINTEN V_ILLTRANSINTEN(1U)
+
+#define S_RSVDSPACEINTEN 0
+#define V_RSVDSPACEINTEN(x) ((x) << S_RSVDSPACEINTEN)
+#define F_RSVDSPACEINTEN V_RSVDSPACEINTEN(1U)
+
+#define A_CIM_HOST_UPACC_INT_CAUSE 0x7b34
+
+#define S_EEPROMWRINT 30
+#define V_EEPROMWRINT(x) ((x) << S_EEPROMWRINT)
+#define F_EEPROMWRINT V_EEPROMWRINT(1U)
+
+#define S_TIMEOUTMAINT 29
+#define V_TIMEOUTMAINT(x) ((x) << S_TIMEOUTMAINT)
+#define F_TIMEOUTMAINT V_TIMEOUTMAINT(1U)
+
+#define S_TIMEOUTINT 28
+#define V_TIMEOUTINT(x) ((x) << S_TIMEOUTINT)
+#define F_TIMEOUTINT V_TIMEOUTINT(1U)
+
+#define S_RSPOVRLOOKUPINT 27
+#define V_RSPOVRLOOKUPINT(x) ((x) << S_RSPOVRLOOKUPINT)
+#define F_RSPOVRLOOKUPINT V_RSPOVRLOOKUPINT(1U)
+
+#define S_REQOVRLOOKUPINT 26
+#define V_REQOVRLOOKUPINT(x) ((x) << S_REQOVRLOOKUPINT)
+#define F_REQOVRLOOKUPINT V_REQOVRLOOKUPINT(1U)
+
+#define S_BLKWRPLINT 25
+#define V_BLKWRPLINT(x) ((x) << S_BLKWRPLINT)
+#define F_BLKWRPLINT V_BLKWRPLINT(1U)
+
+#define S_BLKRDPLINT 24
+#define V_BLKRDPLINT(x) ((x) << S_BLKRDPLINT)
+#define F_BLKRDPLINT V_BLKRDPLINT(1U)
+
+#define S_SGLWRPLINT 23
+#define V_SGLWRPLINT(x) ((x) << S_SGLWRPLINT)
+#define F_SGLWRPLINT V_SGLWRPLINT(1U)
+
+#define S_SGLRDPLINT 22
+#define V_SGLRDPLINT(x) ((x) << S_SGLRDPLINT)
+#define F_SGLRDPLINT V_SGLRDPLINT(1U)
+
+#define S_BLKWRCTLINT 21
+#define V_BLKWRCTLINT(x) ((x) << S_BLKWRCTLINT)
+#define F_BLKWRCTLINT V_BLKWRCTLINT(1U)
+
+#define S_BLKRDCTLINT 20
+#define V_BLKRDCTLINT(x) ((x) << S_BLKRDCTLINT)
+#define F_BLKRDCTLINT V_BLKRDCTLINT(1U)
+
+#define S_SGLWRCTLINT 19
+#define V_SGLWRCTLINT(x) ((x) << S_SGLWRCTLINT)
+#define F_SGLWRCTLINT V_SGLWRCTLINT(1U)
+
+#define S_SGLRDCTLINT 18
+#define V_SGLRDCTLINT(x) ((x) << S_SGLRDCTLINT)
+#define F_SGLRDCTLINT V_SGLRDCTLINT(1U)
+
+#define S_BLKWREEPROMINT 17
+#define V_BLKWREEPROMINT(x) ((x) << S_BLKWREEPROMINT)
+#define F_BLKWREEPROMINT V_BLKWREEPROMINT(1U)
+
+#define S_BLKRDEEPROMINT 16
+#define V_BLKRDEEPROMINT(x) ((x) << S_BLKRDEEPROMINT)
+#define F_BLKRDEEPROMINT V_BLKRDEEPROMINT(1U)
+
+#define S_SGLWREEPROMINT 15
+#define V_SGLWREEPROMINT(x) ((x) << S_SGLWREEPROMINT)
+#define F_SGLWREEPROMINT V_SGLWREEPROMINT(1U)
+
+#define S_SGLRDEEPROMINT 14
+#define V_SGLRDEEPROMINT(x) ((x) << S_SGLRDEEPROMINT)
+#define F_SGLRDEEPROMINT V_SGLRDEEPROMINT(1U)
+
+#define S_BLKWRFLASHINT 13
+#define V_BLKWRFLASHINT(x) ((x) << S_BLKWRFLASHINT)
+#define F_BLKWRFLASHINT V_BLKWRFLASHINT(1U)
+
+#define S_BLKRDFLASHINT 12
+#define V_BLKRDFLASHINT(x) ((x) << S_BLKRDFLASHINT)
+#define F_BLKRDFLASHINT V_BLKRDFLASHINT(1U)
+
+#define S_SGLWRFLASHINT 11
+#define V_SGLWRFLASHINT(x) ((x) << S_SGLWRFLASHINT)
+#define F_SGLWRFLASHINT V_SGLWRFLASHINT(1U)
+
+#define S_SGLRDFLASHINT 10
+#define V_SGLRDFLASHINT(x) ((x) << S_SGLRDFLASHINT)
+#define F_SGLRDFLASHINT V_SGLRDFLASHINT(1U)
+
+#define S_BLKWRBOOTINT 9
+#define V_BLKWRBOOTINT(x) ((x) << S_BLKWRBOOTINT)
+#define F_BLKWRBOOTINT V_BLKWRBOOTINT(1U)
+
+#define S_BLKRDBOOTINT 8
+#define V_BLKRDBOOTINT(x) ((x) << S_BLKRDBOOTINT)
+#define F_BLKRDBOOTINT V_BLKRDBOOTINT(1U)
+
+#define S_SGLWRBOOTINT 7
+#define V_SGLWRBOOTINT(x) ((x) << S_SGLWRBOOTINT)
+#define F_SGLWRBOOTINT V_SGLWRBOOTINT(1U)
+
+#define S_SGLRDBOOTINT 6
+#define V_SGLRDBOOTINT(x) ((x) << S_SGLRDBOOTINT)
+#define F_SGLRDBOOTINT V_SGLRDBOOTINT(1U)
+
+#define S_ILLWRBEINT 5
+#define V_ILLWRBEINT(x) ((x) << S_ILLWRBEINT)
+#define F_ILLWRBEINT V_ILLWRBEINT(1U)
+
+#define S_ILLRDBEINT 4
+#define V_ILLRDBEINT(x) ((x) << S_ILLRDBEINT)
+#define F_ILLRDBEINT V_ILLRDBEINT(1U)
+
+#define S_ILLRDINT 3
+#define V_ILLRDINT(x) ((x) << S_ILLRDINT)
+#define F_ILLRDINT V_ILLRDINT(1U)
+
+#define S_ILLWRINT 2
+#define V_ILLWRINT(x) ((x) << S_ILLWRINT)
+#define F_ILLWRINT V_ILLWRINT(1U)
+
+#define S_ILLTRANSINT 1
+#define V_ILLTRANSINT(x) ((x) << S_ILLTRANSINT)
+#define F_ILLTRANSINT V_ILLTRANSINT(1U)
+
+#define S_RSVDSPACEINT 0
+#define V_RSVDSPACEINT(x) ((x) << S_RSVDSPACEINT)
+#define F_RSVDSPACEINT V_RSVDSPACEINT(1U)
+
+#define A_CIM_UP_INT_ENABLE 0x7b38
+
+#define S_MSTPLINTEN 4
+#define V_MSTPLINTEN(x) ((x) << S_MSTPLINTEN)
+#define F_MSTPLINTEN V_MSTPLINTEN(1U)
+
+#define A_CIM_UP_INT_CAUSE 0x7b3c
+
+#define S_MSTPLINT 4
+#define V_MSTPLINT(x) ((x) << S_MSTPLINT)
+#define F_MSTPLINT V_MSTPLINT(1U)
+
+#define A_CIM_UP_ACC_INT_ENABLE 0x7b40
+#define A_CIM_UP_ACC_INT_CAUSE 0x7b44
+#define A_CIM_QUEUE_CONFIG_REF 0x7b48
+
+#define S_OBQSELECT 4
+#define V_OBQSELECT(x) ((x) << S_OBQSELECT)
+#define F_OBQSELECT V_OBQSELECT(1U)
+
+#define S_IBQSELECT 3
+#define V_IBQSELECT(x) ((x) << S_IBQSELECT)
+#define F_IBQSELECT V_IBQSELECT(1U)
+
+#define S_QUENUMSELECT 0
+#define M_QUENUMSELECT 0x7U
+#define V_QUENUMSELECT(x) ((x) << S_QUENUMSELECT)
+#define G_QUENUMSELECT(x) (((x) >> S_QUENUMSELECT) & M_QUENUMSELECT)
+
+#define A_CIM_QUEUE_CONFIG_CTRL 0x7b4c
+
+#define S_CIMQSIZE 24
+#define M_CIMQSIZE 0x3fU
+#define V_CIMQSIZE(x) ((x) << S_CIMQSIZE)
+#define G_CIMQSIZE(x) (((x) >> S_CIMQSIZE) & M_CIMQSIZE)
+
+#define S_CIMQBASE 16
+#define M_CIMQBASE 0x3fU
+#define V_CIMQBASE(x) ((x) << S_CIMQBASE)
+#define G_CIMQBASE(x) (((x) >> S_CIMQBASE) & M_CIMQBASE)
+
+#define S_CIMQDBG8BEN 9
+#define V_CIMQDBG8BEN(x) ((x) << S_CIMQDBG8BEN)
+#define F_CIMQDBG8BEN V_CIMQDBG8BEN(1U)
+
+#define S_QUEFULLTHRSH 0
+#define M_QUEFULLTHRSH 0x1ffU
+#define V_QUEFULLTHRSH(x) ((x) << S_QUEFULLTHRSH)
+#define G_QUEFULLTHRSH(x) (((x) >> S_QUEFULLTHRSH) & M_QUEFULLTHRSH)
+
+#define A_CIM_HOST_ACC_CTRL 0x7b50
+
+#define S_HOSTBUSY 17
+#define V_HOSTBUSY(x) ((x) << S_HOSTBUSY)
+#define F_HOSTBUSY V_HOSTBUSY(1U)
+
+#define S_HOSTWRITE 16
+#define V_HOSTWRITE(x) ((x) << S_HOSTWRITE)
+#define F_HOSTWRITE V_HOSTWRITE(1U)
+
+#define S_HOSTADDR 0
+#define M_HOSTADDR 0xffffU
+#define V_HOSTADDR(x) ((x) << S_HOSTADDR)
+#define G_HOSTADDR(x) (((x) >> S_HOSTADDR) & M_HOSTADDR)
+
+#define A_CIM_HOST_ACC_DATA 0x7b54
+#define A_CIM_CDEBUGDATA 0x7b58
+
+#define S_CDEBUGDATAH 16
+#define M_CDEBUGDATAH 0xffffU
+#define V_CDEBUGDATAH(x) ((x) << S_CDEBUGDATAH)
+#define G_CDEBUGDATAH(x) (((x) >> S_CDEBUGDATAH) & M_CDEBUGDATAH)
+
+#define S_CDEBUGDATAL 0
+#define M_CDEBUGDATAL 0xffffU
+#define V_CDEBUGDATAL(x) ((x) << S_CDEBUGDATAL)
+#define G_CDEBUGDATAL(x) (((x) >> S_CDEBUGDATAL) & M_CDEBUGDATAL)
+
+#define A_CIM_IBQ_DBG_CFG 0x7b60
+
+#define S_IBQDBGADDR 16
+#define M_IBQDBGADDR 0xfffU
+#define V_IBQDBGADDR(x) ((x) << S_IBQDBGADDR)
+#define G_IBQDBGADDR(x) (((x) >> S_IBQDBGADDR) & M_IBQDBGADDR)
+
+#define S_IBQDBGWR 2
+#define V_IBQDBGWR(x) ((x) << S_IBQDBGWR)
+#define F_IBQDBGWR V_IBQDBGWR(1U)
+
+#define S_IBQDBGBUSY 1
+#define V_IBQDBGBUSY(x) ((x) << S_IBQDBGBUSY)
+#define F_IBQDBGBUSY V_IBQDBGBUSY(1U)
+
+#define S_IBQDBGEN 0
+#define V_IBQDBGEN(x) ((x) << S_IBQDBGEN)
+#define F_IBQDBGEN V_IBQDBGEN(1U)
+
+#define A_CIM_OBQ_DBG_CFG 0x7b64
+
+#define S_OBQDBGADDR 16
+#define M_OBQDBGADDR 0xfffU
+#define V_OBQDBGADDR(x) ((x) << S_OBQDBGADDR)
+#define G_OBQDBGADDR(x) (((x) >> S_OBQDBGADDR) & M_OBQDBGADDR)
+
+#define S_OBQDBGWR 2
+#define V_OBQDBGWR(x) ((x) << S_OBQDBGWR)
+#define F_OBQDBGWR V_OBQDBGWR(1U)
+
+#define S_OBQDBGBUSY 1
+#define V_OBQDBGBUSY(x) ((x) << S_OBQDBGBUSY)
+#define F_OBQDBGBUSY V_OBQDBGBUSY(1U)
+
+#define S_OBQDBGEN 0
+#define V_OBQDBGEN(x) ((x) << S_OBQDBGEN)
+#define F_OBQDBGEN V_OBQDBGEN(1U)
+
+#define A_CIM_IBQ_DBG_DATA 0x7b68
+#define A_CIM_OBQ_DBG_DATA 0x7b6c
+#define A_CIM_DEBUGCFG 0x7b70
+
+#define S_POLADBGRDPTR 23
+#define M_POLADBGRDPTR 0x1ffU
+#define V_POLADBGRDPTR(x) ((x) << S_POLADBGRDPTR)
+#define G_POLADBGRDPTR(x) (((x) >> S_POLADBGRDPTR) & M_POLADBGRDPTR)
+
+#define S_PILADBGRDPTR 14
+#define M_PILADBGRDPTR 0x1ffU
+#define V_PILADBGRDPTR(x) ((x) << S_PILADBGRDPTR)
+#define G_PILADBGRDPTR(x) (((x) >> S_PILADBGRDPTR) & M_PILADBGRDPTR)
+
+#define S_LAMASKTRIG 13
+#define V_LAMASKTRIG(x) ((x) << S_LAMASKTRIG)
+#define F_LAMASKTRIG V_LAMASKTRIG(1U)
+
+#define S_LADBGEN 12
+#define V_LADBGEN(x) ((x) << S_LADBGEN)
+#define F_LADBGEN V_LADBGEN(1U)
+
+#define S_LAFILLONCE 11
+#define V_LAFILLONCE(x) ((x) << S_LAFILLONCE)
+#define F_LAFILLONCE V_LAFILLONCE(1U)
+
+#define S_LAMASKSTOP 10
+#define V_LAMASKSTOP(x) ((x) << S_LAMASKSTOP)
+#define F_LAMASKSTOP V_LAMASKSTOP(1U)
+
+#define S_DEBUGSELH 5
+#define M_DEBUGSELH 0x1fU
+#define V_DEBUGSELH(x) ((x) << S_DEBUGSELH)
+#define G_DEBUGSELH(x) (((x) >> S_DEBUGSELH) & M_DEBUGSELH)
+
+#define S_DEBUGSELL 0
+#define M_DEBUGSELL 0x1fU
+#define V_DEBUGSELL(x) ((x) << S_DEBUGSELL)
+#define G_DEBUGSELL(x) (((x) >> S_DEBUGSELL) & M_DEBUGSELL)
+
+#define A_CIM_DEBUGSTS 0x7b74
+
+#define S_LARESET 31
+#define V_LARESET(x) ((x) << S_LARESET)
+#define F_LARESET V_LARESET(1U)
+
+#define S_POLADBGWRPTR 16
+#define M_POLADBGWRPTR 0x1ffU
+#define V_POLADBGWRPTR(x) ((x) << S_POLADBGWRPTR)
+#define G_POLADBGWRPTR(x) (((x) >> S_POLADBGWRPTR) & M_POLADBGWRPTR)
+
+#define S_PILADBGWRPTR 0
+#define M_PILADBGWRPTR 0x1ffU
+#define V_PILADBGWRPTR(x) ((x) << S_PILADBGWRPTR)
+#define G_PILADBGWRPTR(x) (((x) >> S_PILADBGWRPTR) & M_PILADBGWRPTR)
+
+#define A_CIM_PO_LA_DEBUGDATA 0x7b78
+#define A_CIM_PI_LA_DEBUGDATA 0x7b7c
+#define A_CIM_PO_LA_MADEBUGDATA 0x7b80
+#define A_CIM_PI_LA_MADEBUGDATA 0x7b84
+#define A_CIM_PO_LA_PIFSMDEBUGDATA 0x7b8c
+#define A_CIM_MEM_ZONE0_VA 0x7b90
+
+#define S_MEM_ZONE_VA 4
+#define M_MEM_ZONE_VA 0xfffffffU
+#define V_MEM_ZONE_VA(x) ((x) << S_MEM_ZONE_VA)
+#define G_MEM_ZONE_VA(x) (((x) >> S_MEM_ZONE_VA) & M_MEM_ZONE_VA)
+
+#define A_CIM_MEM_ZONE0_BA 0x7b94
+
+#define S_MEM_ZONE_BA 6
+#define M_MEM_ZONE_BA 0x3ffffffU
+#define V_MEM_ZONE_BA(x) ((x) << S_MEM_ZONE_BA)
+#define G_MEM_ZONE_BA(x) (((x) >> S_MEM_ZONE_BA) & M_MEM_ZONE_BA)
+
+#define S_PBT_ENABLE 5
+#define V_PBT_ENABLE(x) ((x) << S_PBT_ENABLE)
+#define F_PBT_ENABLE V_PBT_ENABLE(1U)
+
+#define S_ZONE_DST 0
+#define M_ZONE_DST 0x3U
+#define V_ZONE_DST(x) ((x) << S_ZONE_DST)
+#define G_ZONE_DST(x) (((x) >> S_ZONE_DST) & M_ZONE_DST)
+
+#define A_CIM_MEM_ZONE0_LEN 0x7b98
+
+#define S_MEM_ZONE_LEN 4
+#define M_MEM_ZONE_LEN 0xfffffffU
+#define V_MEM_ZONE_LEN(x) ((x) << S_MEM_ZONE_LEN)
+#define G_MEM_ZONE_LEN(x) (((x) >> S_MEM_ZONE_LEN) & M_MEM_ZONE_LEN)
+
+#define A_CIM_MEM_ZONE1_VA 0x7b9c
+#define A_CIM_MEM_ZONE1_BA 0x7ba0
+#define A_CIM_MEM_ZONE1_LEN 0x7ba4
+#define A_CIM_MEM_ZONE2_VA 0x7ba8
+#define A_CIM_MEM_ZONE2_BA 0x7bac
+#define A_CIM_MEM_ZONE2_LEN 0x7bb0
+#define A_CIM_MEM_ZONE3_VA 0x7bb4
+#define A_CIM_MEM_ZONE3_BA 0x7bb8
+#define A_CIM_MEM_ZONE3_LEN 0x7bbc
+#define A_CIM_MEM_ZONE4_VA 0x7bc0
+#define A_CIM_MEM_ZONE4_BA 0x7bc4
+#define A_CIM_MEM_ZONE4_LEN 0x7bc8
+#define A_CIM_MEM_ZONE5_VA 0x7bcc
+#define A_CIM_MEM_ZONE5_BA 0x7bd0
+#define A_CIM_MEM_ZONE5_LEN 0x7bd4
+#define A_CIM_MEM_ZONE6_VA 0x7bd8
+#define A_CIM_MEM_ZONE6_BA 0x7bdc
+#define A_CIM_MEM_ZONE6_LEN 0x7be0
+#define A_CIM_MEM_ZONE7_VA 0x7be4
+#define A_CIM_MEM_ZONE7_BA 0x7be8
+#define A_CIM_MEM_ZONE7_LEN 0x7bec
+#define A_CIM_BOOT_LEN 0x7bf0
+
+#define S_BOOTLEN 4
+#define M_BOOTLEN 0xfffffffU
+#define V_BOOTLEN(x) ((x) << S_BOOTLEN)
+#define G_BOOTLEN(x) (((x) >> S_BOOTLEN) & M_BOOTLEN)
+
+#define A_CIM_GLB_TIMER_CTL 0x7bf4
+
+#define S_TIMER1EN 4
+#define V_TIMER1EN(x) ((x) << S_TIMER1EN)
+#define F_TIMER1EN V_TIMER1EN(1U)
+
+#define S_TIMER0EN 3
+#define V_TIMER0EN(x) ((x) << S_TIMER0EN)
+#define F_TIMER0EN V_TIMER0EN(1U)
+
+#define S_TIMEREN 1
+#define V_TIMEREN(x) ((x) << S_TIMEREN)
+#define F_TIMEREN V_TIMEREN(1U)
+
+#define A_CIM_GLB_TIMER 0x7bf8
+#define A_CIM_GLB_TIMER_TICK 0x7bfc
+
+#define S_GLBLTTICK 0
+#define M_GLBLTTICK 0xffffU
+#define V_GLBLTTICK(x) ((x) << S_GLBLTTICK)
+#define G_GLBLTTICK(x) (((x) >> S_GLBLTTICK) & M_GLBLTTICK)
+
+#define A_CIM_TIMER0 0x7c00
+#define A_CIM_TIMER1 0x7c04
+#define A_CIM_DEBUG_ADDR_TIMEOUT 0x7c08
+
+#define S_DADDRTIMEOUT 2
+#define M_DADDRTIMEOUT 0x3fffffffU
+#define V_DADDRTIMEOUT(x) ((x) << S_DADDRTIMEOUT)
+#define G_DADDRTIMEOUT(x) (((x) >> S_DADDRTIMEOUT) & M_DADDRTIMEOUT)
+
+#define A_CIM_DEBUG_ADDR_ILLEGAL 0x7c0c
+
+#define S_DADDRILLEGAL 2
+#define M_DADDRILLEGAL 0x3fffffffU
+#define V_DADDRILLEGAL(x) ((x) << S_DADDRILLEGAL)
+#define G_DADDRILLEGAL(x) (((x) >> S_DADDRILLEGAL) & M_DADDRILLEGAL)
+
+#define A_CIM_DEBUG_PIF_CAUSE_MASK 0x7c10
+
+#define S_DPIFHOSTMASK 0
+#define M_DPIFHOSTMASK 0x1fffffU
+#define V_DPIFHOSTMASK(x) ((x) << S_DPIFHOSTMASK)
+#define G_DPIFHOSTMASK(x) (((x) >> S_DPIFHOSTMASK) & M_DPIFHOSTMASK)
+
+#define A_CIM_DEBUG_PIF_UPACC_CAUSE_MASK 0x7c14
+
+#define S_DPIFHUPAMASK 0
+#define M_DPIFHUPAMASK 0x7fffffffU
+#define V_DPIFHUPAMASK(x) ((x) << S_DPIFHUPAMASK)
+#define G_DPIFHUPAMASK(x) (((x) >> S_DPIFHUPAMASK) & M_DPIFHUPAMASK)
+
+#define A_CIM_DEBUG_UP_CAUSE_MASK 0x7c18
+
+#define S_DUPMASK 0
+#define M_DUPMASK 0x1fffffU
+#define V_DUPMASK(x) ((x) << S_DUPMASK)
+#define G_DUPMASK(x) (((x) >> S_DUPMASK) & M_DUPMASK)
+
+#define A_CIM_DEBUG_UP_UPACC_CAUSE_MASK 0x7c1c
+
+#define S_DUPUACCMASK 0
+#define M_DUPUACCMASK 0x7fffffffU
+#define V_DUPUACCMASK(x) ((x) << S_DUPUACCMASK)
+#define G_DUPUACCMASK(x) (((x) >> S_DUPUACCMASK) & M_DUPUACCMASK)
+
+#define A_CIM_PERR_INJECT 0x7c20
+#define A_CIM_PERR_ENABLE 0x7c24
+
+#define S_PERREN 0
+#define M_PERREN 0x1fffffU
+#define V_PERREN(x) ((x) << S_PERREN)
+#define G_PERREN(x) (((x) >> S_PERREN) & M_PERREN)
+
+#define A_CIM_EEPROM_BUSY_BIT 0x7c28
+
+#define S_EEPROMBUSY 0
+#define V_EEPROMBUSY(x) ((x) << S_EEPROMBUSY)
+#define F_EEPROMBUSY V_EEPROMBUSY(1U)
+
+#define A_CIM_MA_TIMER_EN 0x7c2c
+
+#define S_MA_TIMER_ENABLE 0
+#define V_MA_TIMER_ENABLE(x) ((x) << S_MA_TIMER_ENABLE)
+#define F_MA_TIMER_ENABLE V_MA_TIMER_ENABLE(1U)
+
+#define A_CIM_UP_PO_SINGLE_OUTSTANDING 0x7c30
+
+#define S_UP_PO_SINGLE_OUTSTANDING 0
+#define V_UP_PO_SINGLE_OUTSTANDING(x) ((x) << S_UP_PO_SINGLE_OUTSTANDING)
+#define F_UP_PO_SINGLE_OUTSTANDING V_UP_PO_SINGLE_OUTSTANDING(1U)
+
+#define A_CIM_CIM_DEBUG_SPARE 0x7c34
+#define A_CIM_UP_OPERATION_FREQ 0x7c38
+
+/* registers for module TP */
+#define TP_BASE_ADDR 0x7d00
+
+#define A_TP_IN_CONFIG 0x7d00
+
+#define S_TCPOPTPARSERDISCH3 27
+#define V_TCPOPTPARSERDISCH3(x) ((x) << S_TCPOPTPARSERDISCH3)
+#define F_TCPOPTPARSERDISCH3 V_TCPOPTPARSERDISCH3(1U)
+
+#define S_TCPOPTPARSERDISCH2 26
+#define V_TCPOPTPARSERDISCH2(x) ((x) << S_TCPOPTPARSERDISCH2)
+#define F_TCPOPTPARSERDISCH2 V_TCPOPTPARSERDISCH2(1U)
+
+#define S_TCPOPTPARSERDISCH1 25
+#define V_TCPOPTPARSERDISCH1(x) ((x) << S_TCPOPTPARSERDISCH1)
+#define F_TCPOPTPARSERDISCH1 V_TCPOPTPARSERDISCH1(1U)
+
+#define S_TCPOPTPARSERDISCH0 24
+#define V_TCPOPTPARSERDISCH0(x) ((x) << S_TCPOPTPARSERDISCH0)
+#define F_TCPOPTPARSERDISCH0 V_TCPOPTPARSERDISCH0(1U)
+
+#define S_CRCPASSPRT3 23
+#define V_CRCPASSPRT3(x) ((x) << S_CRCPASSPRT3)
+#define F_CRCPASSPRT3 V_CRCPASSPRT3(1U)
+
+#define S_CRCPASSPRT2 22
+#define V_CRCPASSPRT2(x) ((x) << S_CRCPASSPRT2)
+#define F_CRCPASSPRT2 V_CRCPASSPRT2(1U)
+
+#define S_CRCPASSPRT1 21
+#define V_CRCPASSPRT1(x) ((x) << S_CRCPASSPRT1)
+#define F_CRCPASSPRT1 V_CRCPASSPRT1(1U)
+
+#define S_CRCPASSPRT0 20
+#define V_CRCPASSPRT0(x) ((x) << S_CRCPASSPRT0)
+#define F_CRCPASSPRT0 V_CRCPASSPRT0(1U)
+
+#define S_VEPAMODE 19
+#define V_VEPAMODE(x) ((x) << S_VEPAMODE)
+#define F_VEPAMODE V_VEPAMODE(1U)
+
+#define S_FIPUPEN 18
+#define V_FIPUPEN(x) ((x) << S_FIPUPEN)
+#define F_FIPUPEN V_FIPUPEN(1U)
+
+#define S_FCOEUPEN 17
+#define V_FCOEUPEN(x) ((x) << S_FCOEUPEN)
+#define F_FCOEUPEN V_FCOEUPEN(1U)
+
+#define S_FCOEENABLE 16
+#define V_FCOEENABLE(x) ((x) << S_FCOEENABLE)
+#define F_FCOEENABLE V_FCOEENABLE(1U)
+
+#define S_IPV6ENABLE 15
+#define V_IPV6ENABLE(x) ((x) << S_IPV6ENABLE)
+#define F_IPV6ENABLE V_IPV6ENABLE(1U)
+
+#define S_NICMODE 14
+#define V_NICMODE(x) ((x) << S_NICMODE)
+#define F_NICMODE V_NICMODE(1U)
+
+#define S_ECHECKSUMCHECKTCP 13
+#define V_ECHECKSUMCHECKTCP(x) ((x) << S_ECHECKSUMCHECKTCP)
+#define F_ECHECKSUMCHECKTCP V_ECHECKSUMCHECKTCP(1U)
+
+#define S_ECHECKSUMCHECKIP 12
+#define V_ECHECKSUMCHECKIP(x) ((x) << S_ECHECKSUMCHECKIP)
+#define F_ECHECKSUMCHECKIP V_ECHECKSUMCHECKIP(1U)
+
+#define S_EREPORTUDPHDRLEN 11
+#define V_EREPORTUDPHDRLEN(x) ((x) << S_EREPORTUDPHDRLEN)
+#define F_EREPORTUDPHDRLEN V_EREPORTUDPHDRLEN(1U)
+
+#define S_IN_ECPL 10
+#define V_IN_ECPL(x) ((x) << S_IN_ECPL)
+#define F_IN_ECPL V_IN_ECPL(1U)
+
+#define S_VNTAGENABLE 9
+#define V_VNTAGENABLE(x) ((x) << S_VNTAGENABLE)
+#define F_VNTAGENABLE V_VNTAGENABLE(1U)
+
+#define S_IN_EETH 8
+#define V_IN_EETH(x) ((x) << S_IN_EETH)
+#define F_IN_EETH V_IN_EETH(1U)
+
+#define S_CCHECKSUMCHECKTCP 6
+#define V_CCHECKSUMCHECKTCP(x) ((x) << S_CCHECKSUMCHECKTCP)
+#define F_CCHECKSUMCHECKTCP V_CCHECKSUMCHECKTCP(1U)
+
+#define S_CCHECKSUMCHECKIP 5
+#define V_CCHECKSUMCHECKIP(x) ((x) << S_CCHECKSUMCHECKIP)
+#define F_CCHECKSUMCHECKIP V_CCHECKSUMCHECKIP(1U)
+
+#define S_CTAG 4
+#define V_CTAG(x) ((x) << S_CTAG)
+#define F_CTAG V_CTAG(1U)
+
+#define S_IN_CCPL 3
+#define V_IN_CCPL(x) ((x) << S_IN_CCPL)
+#define F_IN_CCPL V_IN_CCPL(1U)
+
+#define S_IN_CETH 1
+#define V_IN_CETH(x) ((x) << S_IN_CETH)
+#define F_IN_CETH V_IN_CETH(1U)
+
+#define S_CTUNNEL 0
+#define V_CTUNNEL(x) ((x) << S_CTUNNEL)
+#define F_CTUNNEL V_CTUNNEL(1U)
+
+#define A_TP_OUT_CONFIG 0x7d04
+
+#define S_PORTQFCEN 28
+#define M_PORTQFCEN 0xfU
+#define V_PORTQFCEN(x) ((x) << S_PORTQFCEN)
+#define G_PORTQFCEN(x) (((x) >> S_PORTQFCEN) & M_PORTQFCEN)
+
+#define S_EPKTDISTCHN3 23
+#define V_EPKTDISTCHN3(x) ((x) << S_EPKTDISTCHN3)
+#define F_EPKTDISTCHN3 V_EPKTDISTCHN3(1U)
+
+#define S_EPKTDISTCHN2 22
+#define V_EPKTDISTCHN2(x) ((x) << S_EPKTDISTCHN2)
+#define F_EPKTDISTCHN2 V_EPKTDISTCHN2(1U)
+
+#define S_EPKTDISTCHN1 21
+#define V_EPKTDISTCHN1(x) ((x) << S_EPKTDISTCHN1)
+#define F_EPKTDISTCHN1 V_EPKTDISTCHN1(1U)
+
+#define S_EPKTDISTCHN0 20
+#define V_EPKTDISTCHN0(x) ((x) << S_EPKTDISTCHN0)
+#define F_EPKTDISTCHN0 V_EPKTDISTCHN0(1U)
+
+#define S_TTLMODE 19
+#define V_TTLMODE(x) ((x) << S_TTLMODE)
+#define F_TTLMODE V_TTLMODE(1U)
+
+#define S_EQFCDMAC 18
+#define V_EQFCDMAC(x) ((x) << S_EQFCDMAC)
+#define F_EQFCDMAC V_EQFCDMAC(1U)
+
+#define S_ELPBKINCMPSSTAT 17
+#define V_ELPBKINCMPSSTAT(x) ((x) << S_ELPBKINCMPSSTAT)
+#define F_ELPBKINCMPSSTAT V_ELPBKINCMPSSTAT(1U)
+
+#define S_IPIDSPLITMODE 16
+#define V_IPIDSPLITMODE(x) ((x) << S_IPIDSPLITMODE)
+#define F_IPIDSPLITMODE V_IPIDSPLITMODE(1U)
+
+#define S_VLANEXTENABLEPORT3 15
+#define V_VLANEXTENABLEPORT3(x) ((x) << S_VLANEXTENABLEPORT3)
+#define F_VLANEXTENABLEPORT3 V_VLANEXTENABLEPORT3(1U)
+
+#define S_VLANEXTENABLEPORT2 14
+#define V_VLANEXTENABLEPORT2(x) ((x) << S_VLANEXTENABLEPORT2)
+#define F_VLANEXTENABLEPORT2 V_VLANEXTENABLEPORT2(1U)
+
+#define S_VLANEXTENABLEPORT1 13
+#define V_VLANEXTENABLEPORT1(x) ((x) << S_VLANEXTENABLEPORT1)
+#define F_VLANEXTENABLEPORT1 V_VLANEXTENABLEPORT1(1U)
+
+#define S_VLANEXTENABLEPORT0 12
+#define V_VLANEXTENABLEPORT0(x) ((x) << S_VLANEXTENABLEPORT0)
+#define F_VLANEXTENABLEPORT0 V_VLANEXTENABLEPORT0(1U)
+
+#define S_ECHECKSUMINSERTTCP 11
+#define V_ECHECKSUMINSERTTCP(x) ((x) << S_ECHECKSUMINSERTTCP)
+#define F_ECHECKSUMINSERTTCP V_ECHECKSUMINSERTTCP(1U)
+
+#define S_ECHECKSUMINSERTIP 10
+#define V_ECHECKSUMINSERTIP(x) ((x) << S_ECHECKSUMINSERTIP)
+#define F_ECHECKSUMINSERTIP V_ECHECKSUMINSERTIP(1U)
+
+#define S_ECPL 8
+#define V_ECPL(x) ((x) << S_ECPL)
+#define F_ECPL V_ECPL(1U)
+
+#define S_EPRIORITY 7
+#define V_EPRIORITY(x) ((x) << S_EPRIORITY)
+#define F_EPRIORITY V_EPRIORITY(1U)
+
+#define S_EETHERNET 6
+#define V_EETHERNET(x) ((x) << S_EETHERNET)
+#define F_EETHERNET V_EETHERNET(1U)
+
+#define S_CCHECKSUMINSERTTCP 5
+#define V_CCHECKSUMINSERTTCP(x) ((x) << S_CCHECKSUMINSERTTCP)
+#define F_CCHECKSUMINSERTTCP V_CCHECKSUMINSERTTCP(1U)
+
+#define S_CCHECKSUMINSERTIP 4
+#define V_CCHECKSUMINSERTIP(x) ((x) << S_CCHECKSUMINSERTIP)
+#define F_CCHECKSUMINSERTIP V_CCHECKSUMINSERTIP(1U)
+
+#define S_CCPL 2
+#define V_CCPL(x) ((x) << S_CCPL)
+#define F_CCPL V_CCPL(1U)
+
+#define S_CETHERNET 0
+#define V_CETHERNET(x) ((x) << S_CETHERNET)
+#define F_CETHERNET V_CETHERNET(1U)
+
+#define A_TP_GLOBAL_CONFIG 0x7d08
+
+#define S_SYNCOOKIEPARAMS 26
+#define M_SYNCOOKIEPARAMS 0x3fU
+#define V_SYNCOOKIEPARAMS(x) ((x) << S_SYNCOOKIEPARAMS)
+#define G_SYNCOOKIEPARAMS(x) (((x) >> S_SYNCOOKIEPARAMS) & M_SYNCOOKIEPARAMS)
+
+#define S_RXFLOWCONTROLDISABLE 25
+#define V_RXFLOWCONTROLDISABLE(x) ((x) << S_RXFLOWCONTROLDISABLE)
+#define F_RXFLOWCONTROLDISABLE V_RXFLOWCONTROLDISABLE(1U)
+
+#define S_TXPACINGENABLE 24
+#define V_TXPACINGENABLE(x) ((x) << S_TXPACINGENABLE)
+#define F_TXPACINGENABLE V_TXPACINGENABLE(1U)
+
+#define S_ATTACKFILTERENABLE 23
+#define V_ATTACKFILTERENABLE(x) ((x) << S_ATTACKFILTERENABLE)
+#define F_ATTACKFILTERENABLE V_ATTACKFILTERENABLE(1U)
+
+#define S_SYNCOOKIENOOPTIONS 22
+#define V_SYNCOOKIENOOPTIONS(x) ((x) << S_SYNCOOKIENOOPTIONS)
+#define F_SYNCOOKIENOOPTIONS V_SYNCOOKIENOOPTIONS(1U)
+
+#define S_PROTECTEDMODE 21
+#define V_PROTECTEDMODE(x) ((x) << S_PROTECTEDMODE)
+#define F_PROTECTEDMODE V_PROTECTEDMODE(1U)
+
+#define S_PINGDROP 20
+#define V_PINGDROP(x) ((x) << S_PINGDROP)
+#define F_PINGDROP V_PINGDROP(1U)
+
+#define S_FRAGMENTDROP 19
+#define V_FRAGMENTDROP(x) ((x) << S_FRAGMENTDROP)
+#define F_FRAGMENTDROP V_FRAGMENTDROP(1U)
+
+#define S_FIVETUPLELOOKUP 17
+#define M_FIVETUPLELOOKUP 0x3U
+#define V_FIVETUPLELOOKUP(x) ((x) << S_FIVETUPLELOOKUP)
+#define G_FIVETUPLELOOKUP(x) (((x) >> S_FIVETUPLELOOKUP) & M_FIVETUPLELOOKUP)
+
+#define S_OFDMPSSTATS 16
+#define V_OFDMPSSTATS(x) ((x) << S_OFDMPSSTATS)
+#define F_OFDMPSSTATS V_OFDMPSSTATS(1U)
+
+#define S_DONTFRAGMENT 15
+#define V_DONTFRAGMENT(x) ((x) << S_DONTFRAGMENT)
+#define F_DONTFRAGMENT V_DONTFRAGMENT(1U)
+
+#define S_IPIDENTSPLIT 14
+#define V_IPIDENTSPLIT(x) ((x) << S_IPIDENTSPLIT)
+#define F_IPIDENTSPLIT V_IPIDENTSPLIT(1U)
+
+#define S_IPCHECKSUMOFFLOAD 13
+#define V_IPCHECKSUMOFFLOAD(x) ((x) << S_IPCHECKSUMOFFLOAD)
+#define F_IPCHECKSUMOFFLOAD V_IPCHECKSUMOFFLOAD(1U)
+
+#define S_UDPCHECKSUMOFFLOAD 12
+#define V_UDPCHECKSUMOFFLOAD(x) ((x) << S_UDPCHECKSUMOFFLOAD)
+#define F_UDPCHECKSUMOFFLOAD V_UDPCHECKSUMOFFLOAD(1U)
+
+#define S_TCPCHECKSUMOFFLOAD 11
+#define V_TCPCHECKSUMOFFLOAD(x) ((x) << S_TCPCHECKSUMOFFLOAD)
+#define F_TCPCHECKSUMOFFLOAD V_TCPCHECKSUMOFFLOAD(1U)
+
+#define S_RSSLOOPBACKENABLE 10
+#define V_RSSLOOPBACKENABLE(x) ((x) << S_RSSLOOPBACKENABLE)
+#define F_RSSLOOPBACKENABLE V_RSSLOOPBACKENABLE(1U)
+
+#define S_TCAMSERVERUSE 8
+#define M_TCAMSERVERUSE 0x3U
+#define V_TCAMSERVERUSE(x) ((x) << S_TCAMSERVERUSE)
+#define G_TCAMSERVERUSE(x) (((x) >> S_TCAMSERVERUSE) & M_TCAMSERVERUSE)
+
+#define S_IPTTL 0
+#define M_IPTTL 0xffU
+#define V_IPTTL(x) ((x) << S_IPTTL)
+#define G_IPTTL(x) (((x) >> S_IPTTL) & M_IPTTL)
+
+#define A_TP_DB_CONFIG 0x7d0c
+
+#define S_DBMAXOPCNT 24
+#define M_DBMAXOPCNT 0xffU
+#define V_DBMAXOPCNT(x) ((x) << S_DBMAXOPCNT)
+#define G_DBMAXOPCNT(x) (((x) >> S_DBMAXOPCNT) & M_DBMAXOPCNT)
+
+#define S_CXMAXOPCNTDISABLE 23
+#define V_CXMAXOPCNTDISABLE(x) ((x) << S_CXMAXOPCNTDISABLE)
+#define F_CXMAXOPCNTDISABLE V_CXMAXOPCNTDISABLE(1U)
+
+#define S_CXMAXOPCNT 16
+#define M_CXMAXOPCNT 0x7fU
+#define V_CXMAXOPCNT(x) ((x) << S_CXMAXOPCNT)
+#define G_CXMAXOPCNT(x) (((x) >> S_CXMAXOPCNT) & M_CXMAXOPCNT)
+
+#define S_TXMAXOPCNTDISABLE 15
+#define V_TXMAXOPCNTDISABLE(x) ((x) << S_TXMAXOPCNTDISABLE)
+#define F_TXMAXOPCNTDISABLE V_TXMAXOPCNTDISABLE(1U)
+
+#define S_TXMAXOPCNT 8
+#define M_TXMAXOPCNT 0x7fU
+#define V_TXMAXOPCNT(x) ((x) << S_TXMAXOPCNT)
+#define G_TXMAXOPCNT(x) (((x) >> S_TXMAXOPCNT) & M_TXMAXOPCNT)
+
+#define S_RXMAXOPCNTDISABLE 7
+#define V_RXMAXOPCNTDISABLE(x) ((x) << S_RXMAXOPCNTDISABLE)
+#define F_RXMAXOPCNTDISABLE V_RXMAXOPCNTDISABLE(1U)
+
+#define S_RXMAXOPCNT 0
+#define M_RXMAXOPCNT 0x7fU
+#define V_RXMAXOPCNT(x) ((x) << S_RXMAXOPCNT)
+#define G_RXMAXOPCNT(x) (((x) >> S_RXMAXOPCNT) & M_RXMAXOPCNT)
+
+#define A_TP_CMM_TCB_BASE 0x7d10
+#define A_TP_CMM_MM_BASE 0x7d14
+#define A_TP_CMM_TIMER_BASE 0x7d18
+#define A_TP_CMM_MM_FLST_SIZE 0x7d1c
+
+#define S_RXPOOLSIZE 16
+#define M_RXPOOLSIZE 0xffffU
+#define V_RXPOOLSIZE(x) ((x) << S_RXPOOLSIZE)
+#define G_RXPOOLSIZE(x) (((x) >> S_RXPOOLSIZE) & M_RXPOOLSIZE)
+
+#define S_TXPOOLSIZE 0
+#define M_TXPOOLSIZE 0xffffU
+#define V_TXPOOLSIZE(x) ((x) << S_TXPOOLSIZE)
+#define G_TXPOOLSIZE(x) (((x) >> S_TXPOOLSIZE) & M_TXPOOLSIZE)
+
+#define A_TP_PMM_TX_BASE 0x7d20
+#define A_TP_PMM_DEFRAG_BASE 0x7d24
+#define A_TP_PMM_RX_BASE 0x7d28
+#define A_TP_PMM_RX_PAGE_SIZE 0x7d2c
+#define A_TP_PMM_RX_MAX_PAGE 0x7d30
+
+#define S_PMRXNUMCHN 31
+#define V_PMRXNUMCHN(x) ((x) << S_PMRXNUMCHN)
+#define F_PMRXNUMCHN V_PMRXNUMCHN(1U)
+
+#define S_PMRXMAXPAGE 0
+#define M_PMRXMAXPAGE 0x1fffffU
+#define V_PMRXMAXPAGE(x) ((x) << S_PMRXMAXPAGE)
+#define G_PMRXMAXPAGE(x) (((x) >> S_PMRXMAXPAGE) & M_PMRXMAXPAGE)
+
+#define A_TP_PMM_TX_PAGE_SIZE 0x7d34
+#define A_TP_PMM_TX_MAX_PAGE 0x7d38
+
+#define S_PMTXNUMCHN 30
+#define M_PMTXNUMCHN 0x3U
+#define V_PMTXNUMCHN(x) ((x) << S_PMTXNUMCHN)
+#define G_PMTXNUMCHN(x) (((x) >> S_PMTXNUMCHN) & M_PMTXNUMCHN)
+
+#define S_PMTXMAXPAGE 0
+#define M_PMTXMAXPAGE 0x1fffffU
+#define V_PMTXMAXPAGE(x) ((x) << S_PMTXMAXPAGE)
+#define G_PMTXMAXPAGE(x) (((x) >> S_PMTXMAXPAGE) & M_PMTXMAXPAGE)
+
+#define A_TP_TCP_OPTIONS 0x7d40
+
+#define S_MTUDEFAULT 16
+#define M_MTUDEFAULT 0xffffU
+#define V_MTUDEFAULT(x) ((x) << S_MTUDEFAULT)
+#define G_MTUDEFAULT(x) (((x) >> S_MTUDEFAULT) & M_MTUDEFAULT)
+
+#define S_MTUENABLE 10
+#define V_MTUENABLE(x) ((x) << S_MTUENABLE)
+#define F_MTUENABLE V_MTUENABLE(1U)
+
+#define S_SACKTX 9
+#define V_SACKTX(x) ((x) << S_SACKTX)
+#define F_SACKTX V_SACKTX(1U)
+
+#define S_SACKRX 8
+#define V_SACKRX(x) ((x) << S_SACKRX)
+#define F_SACKRX V_SACKRX(1U)
+
+#define S_SACKMODE 4
+#define M_SACKMODE 0x3U
+#define V_SACKMODE(x) ((x) << S_SACKMODE)
+#define G_SACKMODE(x) (((x) >> S_SACKMODE) & M_SACKMODE)
+
+#define S_WINDOWSCALEMODE 2
+#define M_WINDOWSCALEMODE 0x3U
+#define V_WINDOWSCALEMODE(x) ((x) << S_WINDOWSCALEMODE)
+#define G_WINDOWSCALEMODE(x) (((x) >> S_WINDOWSCALEMODE) & M_WINDOWSCALEMODE)
+
+#define S_TIMESTAMPSMODE 0
+#define M_TIMESTAMPSMODE 0x3U
+#define V_TIMESTAMPSMODE(x) ((x) << S_TIMESTAMPSMODE)
+#define G_TIMESTAMPSMODE(x) (((x) >> S_TIMESTAMPSMODE) & M_TIMESTAMPSMODE)
+
+#define A_TP_DACK_CONFIG 0x7d44
+
+#define S_AUTOSTATE3 30
+#define M_AUTOSTATE3 0x3U
+#define V_AUTOSTATE3(x) ((x) << S_AUTOSTATE3)
+#define G_AUTOSTATE3(x) (((x) >> S_AUTOSTATE3) & M_AUTOSTATE3)
+
+#define S_AUTOSTATE2 28
+#define M_AUTOSTATE2 0x3U
+#define V_AUTOSTATE2(x) ((x) << S_AUTOSTATE2)
+#define G_AUTOSTATE2(x) (((x) >> S_AUTOSTATE2) & M_AUTOSTATE2)
+
+#define S_AUTOSTATE1 26
+#define M_AUTOSTATE1 0x3U
+#define V_AUTOSTATE1(x) ((x) << S_AUTOSTATE1)
+#define G_AUTOSTATE1(x) (((x) >> S_AUTOSTATE1) & M_AUTOSTATE1)
+
+#define S_BYTETHRESHOLD 8
+#define M_BYTETHRESHOLD 0x3ffffU
+#define V_BYTETHRESHOLD(x) ((x) << S_BYTETHRESHOLD)
+#define G_BYTETHRESHOLD(x) (((x) >> S_BYTETHRESHOLD) & M_BYTETHRESHOLD)
+
+#define S_MSSTHRESHOLD 4
+#define M_MSSTHRESHOLD 0x7U
+#define V_MSSTHRESHOLD(x) ((x) << S_MSSTHRESHOLD)
+#define G_MSSTHRESHOLD(x) (((x) >> S_MSSTHRESHOLD) & M_MSSTHRESHOLD)
+
+#define S_AUTOCAREFUL 2
+#define V_AUTOCAREFUL(x) ((x) << S_AUTOCAREFUL)
+#define F_AUTOCAREFUL V_AUTOCAREFUL(1U)
+
+#define S_AUTOENABLE 1
+#define V_AUTOENABLE(x) ((x) << S_AUTOENABLE)
+#define F_AUTOENABLE V_AUTOENABLE(1U)
+
+#define S_MODE 0
+#define V_MODE(x) ((x) << S_MODE)
+#define F_MODE V_MODE(1U)
+
+#define A_TP_PC_CONFIG 0x7d48
+
+#define S_CMCACHEDISABLE 31
+#define V_CMCACHEDISABLE(x) ((x) << S_CMCACHEDISABLE)
+#define F_CMCACHEDISABLE V_CMCACHEDISABLE(1U)
+
+#define S_ENABLEOCSPIFULL 30
+#define V_ENABLEOCSPIFULL(x) ((x) << S_ENABLEOCSPIFULL)
+#define F_ENABLEOCSPIFULL V_ENABLEOCSPIFULL(1U)
+
+#define S_ENABLEFLMERRORDDP 29
+#define V_ENABLEFLMERRORDDP(x) ((x) << S_ENABLEFLMERRORDDP)
+#define F_ENABLEFLMERRORDDP V_ENABLEFLMERRORDDP(1U)
+
+#define S_LOCKTID 28
+#define V_LOCKTID(x) ((x) << S_LOCKTID)
+#define F_LOCKTID V_LOCKTID(1U)
+
+#define S_DISABLEINVPEND 27
+#define V_DISABLEINVPEND(x) ((x) << S_DISABLEINVPEND)
+#define F_DISABLEINVPEND V_DISABLEINVPEND(1U)
+
+#define S_ENABLEFILTERCOUNT 26
+#define V_ENABLEFILTERCOUNT(x) ((x) << S_ENABLEFILTERCOUNT)
+#define F_ENABLEFILTERCOUNT V_ENABLEFILTERCOUNT(1U)
+
+#define S_RDDPCONGEN 25
+#define V_RDDPCONGEN(x) ((x) << S_RDDPCONGEN)
+#define F_RDDPCONGEN V_RDDPCONGEN(1U)
+
+#define S_ENABLEONFLYPDU 24
+#define V_ENABLEONFLYPDU(x) ((x) << S_ENABLEONFLYPDU)
+#define F_ENABLEONFLYPDU V_ENABLEONFLYPDU(1U)
+
+#define S_ENABLEMINRCVWND 23
+#define V_ENABLEMINRCVWND(x) ((x) << S_ENABLEMINRCVWND)
+#define F_ENABLEMINRCVWND V_ENABLEMINRCVWND(1U)
+
+#define S_ENABLEMAXRCVWND 22
+#define V_ENABLEMAXRCVWND(x) ((x) << S_ENABLEMAXRCVWND)
+#define F_ENABLEMAXRCVWND V_ENABLEMAXRCVWND(1U)
+
+#define S_TXDATAACKRATEENABLE 21
+#define V_TXDATAACKRATEENABLE(x) ((x) << S_TXDATAACKRATEENABLE)
+#define F_TXDATAACKRATEENABLE V_TXDATAACKRATEENABLE(1U)
+
+#define S_TXDEFERENABLE 20
+#define V_TXDEFERENABLE(x) ((x) << S_TXDEFERENABLE)
+#define F_TXDEFERENABLE V_TXDEFERENABLE(1U)
+
+#define S_RXCONGESTIONMODE 19
+#define V_RXCONGESTIONMODE(x) ((x) << S_RXCONGESTIONMODE)
+#define F_RXCONGESTIONMODE V_RXCONGESTIONMODE(1U)
+
+#define S_HEARBEATONCEDACK 18
+#define V_HEARBEATONCEDACK(x) ((x) << S_HEARBEATONCEDACK)
+#define F_HEARBEATONCEDACK V_HEARBEATONCEDACK(1U)
+
+#define S_HEARBEATONCEHEAP 17
+#define V_HEARBEATONCEHEAP(x) ((x) << S_HEARBEATONCEHEAP)
+#define F_HEARBEATONCEHEAP V_HEARBEATONCEHEAP(1U)
+
+#define S_HEARBEATDACK 16
+#define V_HEARBEATDACK(x) ((x) << S_HEARBEATDACK)
+#define F_HEARBEATDACK V_HEARBEATDACK(1U)
+
+#define S_TXCONGESTIONMODE 15
+#define V_TXCONGESTIONMODE(x) ((x) << S_TXCONGESTIONMODE)
+#define F_TXCONGESTIONMODE V_TXCONGESTIONMODE(1U)
+
+#define S_ACCEPTLATESTRCVADV 14
+#define V_ACCEPTLATESTRCVADV(x) ((x) << S_ACCEPTLATESTRCVADV)
+#define F_ACCEPTLATESTRCVADV V_ACCEPTLATESTRCVADV(1U)
+
+#define S_DISABLESYNDATA 13
+#define V_DISABLESYNDATA(x) ((x) << S_DISABLESYNDATA)
+#define F_DISABLESYNDATA V_DISABLESYNDATA(1U)
+
+#define S_DISABLEWINDOWPSH 12
+#define V_DISABLEWINDOWPSH(x) ((x) << S_DISABLEWINDOWPSH)
+#define F_DISABLEWINDOWPSH V_DISABLEWINDOWPSH(1U)
+
+#define S_DISABLEFINOLDDATA 11
+#define V_DISABLEFINOLDDATA(x) ((x) << S_DISABLEFINOLDDATA)
+#define F_DISABLEFINOLDDATA V_DISABLEFINOLDDATA(1U)
+
+#define S_ENABLEFLMERROR 10
+#define V_ENABLEFLMERROR(x) ((x) << S_ENABLEFLMERROR)
+#define F_ENABLEFLMERROR V_ENABLEFLMERROR(1U)
+
+#define S_ENABLEOPTMTU 9
+#define V_ENABLEOPTMTU(x) ((x) << S_ENABLEOPTMTU)
+#define F_ENABLEOPTMTU V_ENABLEOPTMTU(1U)
+
+#define S_FILTERPEERFIN 8
+#define V_FILTERPEERFIN(x) ((x) << S_FILTERPEERFIN)
+#define F_FILTERPEERFIN V_FILTERPEERFIN(1U)
+
+#define S_ENABLEFEEDBACKSEND 7
+#define V_ENABLEFEEDBACKSEND(x) ((x) << S_ENABLEFEEDBACKSEND)
+#define F_ENABLEFEEDBACKSEND V_ENABLEFEEDBACKSEND(1U)
+
+#define S_ENABLERDMAERROR 6
+#define V_ENABLERDMAERROR(x) ((x) << S_ENABLERDMAERROR)
+#define F_ENABLERDMAERROR V_ENABLERDMAERROR(1U)
+
+#define S_ENABLEDDPFLOWCONTROL 5
+#define V_ENABLEDDPFLOWCONTROL(x) ((x) << S_ENABLEDDPFLOWCONTROL)
+#define F_ENABLEDDPFLOWCONTROL V_ENABLEDDPFLOWCONTROL(1U)
+
+#define S_DISABLEHELDFIN 4
+#define V_DISABLEHELDFIN(x) ((x) << S_DISABLEHELDFIN)
+#define F_DISABLEHELDFIN V_DISABLEHELDFIN(1U)
+
+#define S_ENABLEOFDOVLAN 3
+#define V_ENABLEOFDOVLAN(x) ((x) << S_ENABLEOFDOVLAN)
+#define F_ENABLEOFDOVLAN V_ENABLEOFDOVLAN(1U)
+
+#define S_DISABLETIMEWAIT 2
+#define V_DISABLETIMEWAIT(x) ((x) << S_DISABLETIMEWAIT)
+#define F_DISABLETIMEWAIT V_DISABLETIMEWAIT(1U)
+
+#define S_ENABLEVLANCHECK 1
+#define V_ENABLEVLANCHECK(x) ((x) << S_ENABLEVLANCHECK)
+#define F_ENABLEVLANCHECK V_ENABLEVLANCHECK(1U)
+
+#define S_TXDATAACKPAGEENABLE 0
+#define V_TXDATAACKPAGEENABLE(x) ((x) << S_TXDATAACKPAGEENABLE)
+#define F_TXDATAACKPAGEENABLE V_TXDATAACKPAGEENABLE(1U)
+
+#define A_TP_PC_CONFIG2 0x7d4c
+
+#define S_ENABLEMTUVFMODE 31
+#define V_ENABLEMTUVFMODE(x) ((x) << S_ENABLEMTUVFMODE)
+#define F_ENABLEMTUVFMODE V_ENABLEMTUVFMODE(1U)
+
+#define S_ENABLEMIBVFMODE 30
+#define V_ENABLEMIBVFMODE(x) ((x) << S_ENABLEMIBVFMODE)
+#define F_ENABLEMIBVFMODE V_ENABLEMIBVFMODE(1U)
+
+#define S_DISABLELBKCHECK 29
+#define V_DISABLELBKCHECK(x) ((x) << S_DISABLELBKCHECK)
+#define F_DISABLELBKCHECK V_DISABLELBKCHECK(1U)
+
+#define S_ENABLEURGDDPOFF 28
+#define V_ENABLEURGDDPOFF(x) ((x) << S_ENABLEURGDDPOFF)
+#define F_ENABLEURGDDPOFF V_ENABLEURGDDPOFF(1U)
+
+#define S_ENABLEFILTERLPBK 27
+#define V_ENABLEFILTERLPBK(x) ((x) << S_ENABLEFILTERLPBK)
+#define F_ENABLEFILTERLPBK V_ENABLEFILTERLPBK(1U)
+
+#define S_DISABLETBLMMGR 26
+#define V_DISABLETBLMMGR(x) ((x) << S_DISABLETBLMMGR)
+#define F_DISABLETBLMMGR V_DISABLETBLMMGR(1U)
+
+#define S_CNGRECSNDNXT 25
+#define V_CNGRECSNDNXT(x) ((x) << S_CNGRECSNDNXT)
+#define F_CNGRECSNDNXT V_CNGRECSNDNXT(1U)
+
+#define S_ENABLELBKCHN 24
+#define V_ENABLELBKCHN(x) ((x) << S_ENABLELBKCHN)
+#define F_ENABLELBKCHN V_ENABLELBKCHN(1U)
+
+#define S_ENABLELROECN 23
+#define V_ENABLELROECN(x) ((x) << S_ENABLELROECN)
+#define F_ENABLELROECN V_ENABLELROECN(1U)
+
+#define S_ENABLEPCMDCHECK 22
+#define V_ENABLEPCMDCHECK(x) ((x) << S_ENABLEPCMDCHECK)
+#define F_ENABLEPCMDCHECK V_ENABLEPCMDCHECK(1U)
+
+#define S_ENABLEELBKAFULL 21
+#define V_ENABLEELBKAFULL(x) ((x) << S_ENABLEELBKAFULL)
+#define F_ENABLEELBKAFULL V_ENABLEELBKAFULL(1U)
+
+#define S_ENABLECLBKAFULL 20
+#define V_ENABLECLBKAFULL(x) ((x) << S_ENABLECLBKAFULL)
+#define F_ENABLECLBKAFULL V_ENABLECLBKAFULL(1U)
+
+#define S_ENABLEOESPIFULL 19
+#define V_ENABLEOESPIFULL(x) ((x) << S_ENABLEOESPIFULL)
+#define F_ENABLEOESPIFULL V_ENABLEOESPIFULL(1U)
+
+#define S_DISABLEHITCHECK 18
+#define V_DISABLEHITCHECK(x) ((x) << S_DISABLEHITCHECK)
+#define F_DISABLEHITCHECK V_DISABLEHITCHECK(1U)
+
+#define S_ENABLERSSERRCHECK 17
+#define V_ENABLERSSERRCHECK(x) ((x) << S_ENABLERSSERRCHECK)
+#define F_ENABLERSSERRCHECK V_ENABLERSSERRCHECK(1U)
+
+#define S_DISABLENEWPSHFLAG 16
+#define V_DISABLENEWPSHFLAG(x) ((x) << S_DISABLENEWPSHFLAG)
+#define F_DISABLENEWPSHFLAG V_DISABLENEWPSHFLAG(1U)
+
+#define S_ENABLERDDPRCVADVCLR 15
+#define V_ENABLERDDPRCVADVCLR(x) ((x) << S_ENABLERDDPRCVADVCLR)
+#define F_ENABLERDDPRCVADVCLR V_ENABLERDDPRCVADVCLR(1U)
+
+#define S_ENABLETXDATAARPMISS 14
+#define V_ENABLETXDATAARPMISS(x) ((x) << S_ENABLETXDATAARPMISS)
+#define F_ENABLETXDATAARPMISS V_ENABLETXDATAARPMISS(1U)
+
+#define S_ENABLEARPMISS 13
+#define V_ENABLEARPMISS(x) ((x) << S_ENABLEARPMISS)
+#define F_ENABLEARPMISS V_ENABLEARPMISS(1U)
+
+#define S_ENABLERSTPAWS 12
+#define V_ENABLERSTPAWS(x) ((x) << S_ENABLERSTPAWS)
+#define F_ENABLERSTPAWS V_ENABLERSTPAWS(1U)
+
+#define S_ENABLEIPV6RSS 11
+#define V_ENABLEIPV6RSS(x) ((x) << S_ENABLEIPV6RSS)
+#define F_ENABLEIPV6RSS V_ENABLEIPV6RSS(1U)
+
+#define S_ENABLENONOFDHYBRSS 10
+#define V_ENABLENONOFDHYBRSS(x) ((x) << S_ENABLENONOFDHYBRSS)
+#define F_ENABLENONOFDHYBRSS V_ENABLENONOFDHYBRSS(1U)
+
+#define S_ENABLEUDP4TUPRSS 9
+#define V_ENABLEUDP4TUPRSS(x) ((x) << S_ENABLEUDP4TUPRSS)
+#define F_ENABLEUDP4TUPRSS V_ENABLEUDP4TUPRSS(1U)
+
+#define S_ENABLERXPKTTMSTPRSS 8
+#define V_ENABLERXPKTTMSTPRSS(x) ((x) << S_ENABLERXPKTTMSTPRSS)
+#define F_ENABLERXPKTTMSTPRSS V_ENABLERXPKTTMSTPRSS(1U)
+
+#define S_ENABLEEPCMDAFULL 7
+#define V_ENABLEEPCMDAFULL(x) ((x) << S_ENABLEEPCMDAFULL)
+#define F_ENABLEEPCMDAFULL V_ENABLEEPCMDAFULL(1U)
+
+#define S_ENABLECPCMDAFULL 6
+#define V_ENABLECPCMDAFULL(x) ((x) << S_ENABLECPCMDAFULL)
+#define F_ENABLECPCMDAFULL V_ENABLECPCMDAFULL(1U)
+
+#define S_ENABLEEHDRAFULL 5
+#define V_ENABLEEHDRAFULL(x) ((x) << S_ENABLEEHDRAFULL)
+#define F_ENABLEEHDRAFULL V_ENABLEEHDRAFULL(1U)
+
+#define S_ENABLECHDRAFULL 4
+#define V_ENABLECHDRAFULL(x) ((x) << S_ENABLECHDRAFULL)
+#define F_ENABLECHDRAFULL V_ENABLECHDRAFULL(1U)
+
+#define S_ENABLEEMACAFULL 3
+#define V_ENABLEEMACAFULL(x) ((x) << S_ENABLEEMACAFULL)
+#define F_ENABLEEMACAFULL V_ENABLEEMACAFULL(1U)
+
+#define S_ENABLENONOFDTIDRSS 2
+#define V_ENABLENONOFDTIDRSS(x) ((x) << S_ENABLENONOFDTIDRSS)
+#define F_ENABLENONOFDTIDRSS V_ENABLENONOFDTIDRSS(1U)
+
+#define S_ENABLENONOFDTCBRSS 1
+#define V_ENABLENONOFDTCBRSS(x) ((x) << S_ENABLENONOFDTCBRSS)
+#define F_ENABLENONOFDTCBRSS V_ENABLENONOFDTCBRSS(1U)
+
+#define S_ENABLETNLOFDCLOSED 0
+#define V_ENABLETNLOFDCLOSED(x) ((x) << S_ENABLETNLOFDCLOSED)
+#define F_ENABLETNLOFDCLOSED V_ENABLETNLOFDCLOSED(1U)
+
+#define A_TP_TCP_BACKOFF_REG0 0x7d50
+
+#define S_TIMERBACKOFFINDEX3 24
+#define M_TIMERBACKOFFINDEX3 0xffU
+#define V_TIMERBACKOFFINDEX3(x) ((x) << S_TIMERBACKOFFINDEX3)
+#define G_TIMERBACKOFFINDEX3(x) (((x) >> S_TIMERBACKOFFINDEX3) & M_TIMERBACKOFFINDEX3)
+
+#define S_TIMERBACKOFFINDEX2 16
+#define M_TIMERBACKOFFINDEX2 0xffU
+#define V_TIMERBACKOFFINDEX2(x) ((x) << S_TIMERBACKOFFINDEX2)
+#define G_TIMERBACKOFFINDEX2(x) (((x) >> S_TIMERBACKOFFINDEX2) & M_TIMERBACKOFFINDEX2)
+
+#define S_TIMERBACKOFFINDEX1 8
+#define M_TIMERBACKOFFINDEX1 0xffU
+#define V_TIMERBACKOFFINDEX1(x) ((x) << S_TIMERBACKOFFINDEX1)
+#define G_TIMERBACKOFFINDEX1(x) (((x) >> S_TIMERBACKOFFINDEX1) & M_TIMERBACKOFFINDEX1)
+
+#define S_TIMERBACKOFFINDEX0 0
+#define M_TIMERBACKOFFINDEX0 0xffU
+#define V_TIMERBACKOFFINDEX0(x) ((x) << S_TIMERBACKOFFINDEX0)
+#define G_TIMERBACKOFFINDEX0(x) (((x) >> S_TIMERBACKOFFINDEX0) & M_TIMERBACKOFFINDEX0)
+
+#define A_TP_TCP_BACKOFF_REG1 0x7d54
+
+#define S_TIMERBACKOFFINDEX7 24
+#define M_TIMERBACKOFFINDEX7 0xffU
+#define V_TIMERBACKOFFINDEX7(x) ((x) << S_TIMERBACKOFFINDEX7)
+#define G_TIMERBACKOFFINDEX7(x) (((x) >> S_TIMERBACKOFFINDEX7) & M_TIMERBACKOFFINDEX7)
+
+#define S_TIMERBACKOFFINDEX6 16
+#define M_TIMERBACKOFFINDEX6 0xffU
+#define V_TIMERBACKOFFINDEX6(x) ((x) << S_TIMERBACKOFFINDEX6)
+#define G_TIMERBACKOFFINDEX6(x) (((x) >> S_TIMERBACKOFFINDEX6) & M_TIMERBACKOFFINDEX6)
+
+#define S_TIMERBACKOFFINDEX5 8
+#define M_TIMERBACKOFFINDEX5 0xffU
+#define V_TIMERBACKOFFINDEX5(x) ((x) << S_TIMERBACKOFFINDEX5)
+#define G_TIMERBACKOFFINDEX5(x) (((x) >> S_TIMERBACKOFFINDEX5) & M_TIMERBACKOFFINDEX5)
+
+#define S_TIMERBACKOFFINDEX4 0
+#define M_TIMERBACKOFFINDEX4 0xffU
+#define V_TIMERBACKOFFINDEX4(x) ((x) << S_TIMERBACKOFFINDEX4)
+#define G_TIMERBACKOFFINDEX4(x) (((x) >> S_TIMERBACKOFFINDEX4) & M_TIMERBACKOFFINDEX4)
+
+#define A_TP_TCP_BACKOFF_REG2 0x7d58
+
+#define S_TIMERBACKOFFINDEX11 24
+#define M_TIMERBACKOFFINDEX11 0xffU
+#define V_TIMERBACKOFFINDEX11(x) ((x) << S_TIMERBACKOFFINDEX11)
+#define G_TIMERBACKOFFINDEX11(x) (((x) >> S_TIMERBACKOFFINDEX11) & M_TIMERBACKOFFINDEX11)
+
+#define S_TIMERBACKOFFINDEX10 16
+#define M_TIMERBACKOFFINDEX10 0xffU
+#define V_TIMERBACKOFFINDEX10(x) ((x) << S_TIMERBACKOFFINDEX10)
+#define G_TIMERBACKOFFINDEX10(x) (((x) >> S_TIMERBACKOFFINDEX10) & M_TIMERBACKOFFINDEX10)
+
+#define S_TIMERBACKOFFINDEX9 8
+#define M_TIMERBACKOFFINDEX9 0xffU
+#define V_TIMERBACKOFFINDEX9(x) ((x) << S_TIMERBACKOFFINDEX9)
+#define G_TIMERBACKOFFINDEX9(x) (((x) >> S_TIMERBACKOFFINDEX9) & M_TIMERBACKOFFINDEX9)
+
+#define S_TIMERBACKOFFINDEX8 0
+#define M_TIMERBACKOFFINDEX8 0xffU
+#define V_TIMERBACKOFFINDEX8(x) ((x) << S_TIMERBACKOFFINDEX8)
+#define G_TIMERBACKOFFINDEX8(x) (((x) >> S_TIMERBACKOFFINDEX8) & M_TIMERBACKOFFINDEX8)
+
+#define A_TP_TCP_BACKOFF_REG3 0x7d5c
+
+#define S_TIMERBACKOFFINDEX15 24
+#define M_TIMERBACKOFFINDEX15 0xffU
+#define V_TIMERBACKOFFINDEX15(x) ((x) << S_TIMERBACKOFFINDEX15)
+#define G_TIMERBACKOFFINDEX15(x) (((x) >> S_TIMERBACKOFFINDEX15) & M_TIMERBACKOFFINDEX15)
+
+#define S_TIMERBACKOFFINDEX14 16
+#define M_TIMERBACKOFFINDEX14 0xffU
+#define V_TIMERBACKOFFINDEX14(x) ((x) << S_TIMERBACKOFFINDEX14)
+#define G_TIMERBACKOFFINDEX14(x) (((x) >> S_TIMERBACKOFFINDEX14) & M_TIMERBACKOFFINDEX14)
+
+#define S_TIMERBACKOFFINDEX13 8
+#define M_TIMERBACKOFFINDEX13 0xffU
+#define V_TIMERBACKOFFINDEX13(x) ((x) << S_TIMERBACKOFFINDEX13)
+#define G_TIMERBACKOFFINDEX13(x) (((x) >> S_TIMERBACKOFFINDEX13) & M_TIMERBACKOFFINDEX13)
+
+#define S_TIMERBACKOFFINDEX12 0
+#define M_TIMERBACKOFFINDEX12 0xffU
+#define V_TIMERBACKOFFINDEX12(x) ((x) << S_TIMERBACKOFFINDEX12)
+#define G_TIMERBACKOFFINDEX12(x) (((x) >> S_TIMERBACKOFFINDEX12) & M_TIMERBACKOFFINDEX12)
+
+#define A_TP_PARA_REG0 0x7d60
+
+#define S_INITCWNDIDLE 27
+#define V_INITCWNDIDLE(x) ((x) << S_INITCWNDIDLE)
+#define F_INITCWNDIDLE V_INITCWNDIDLE(1U)
+
+#define S_INITCWND 24
+#define M_INITCWND 0x7U
+#define V_INITCWND(x) ((x) << S_INITCWND)
+#define G_INITCWND(x) (((x) >> S_INITCWND) & M_INITCWND)
+
+#define S_DUPACKTHRESH 20
+#define M_DUPACKTHRESH 0xfU
+#define V_DUPACKTHRESH(x) ((x) << S_DUPACKTHRESH)
+#define G_DUPACKTHRESH(x) (((x) >> S_DUPACKTHRESH) & M_DUPACKTHRESH)
+
+#define S_CPLERRENABLE 12
+#define V_CPLERRENABLE(x) ((x) << S_CPLERRENABLE)
+#define F_CPLERRENABLE V_CPLERRENABLE(1U)
+
+#define S_FASTTNLCNT 11
+#define V_FASTTNLCNT(x) ((x) << S_FASTTNLCNT)
+#define F_FASTTNLCNT V_FASTTNLCNT(1U)
+
+#define S_FASTTBLCNT 10
+#define V_FASTTBLCNT(x) ((x) << S_FASTTBLCNT)
+#define F_FASTTBLCNT V_FASTTBLCNT(1U)
+
+#define S_TPTCAMKEY 9
+#define V_TPTCAMKEY(x) ((x) << S_TPTCAMKEY)
+#define F_TPTCAMKEY V_TPTCAMKEY(1U)
+
+#define S_SWSMODE 8
+#define V_SWSMODE(x) ((x) << S_SWSMODE)
+#define F_SWSMODE V_SWSMODE(1U)
+
+#define S_TSMPMODE 6
+#define M_TSMPMODE 0x3U
+#define V_TSMPMODE(x) ((x) << S_TSMPMODE)
+#define G_TSMPMODE(x) (((x) >> S_TSMPMODE) & M_TSMPMODE)
+
+#define S_BYTECOUNTLIMIT 4
+#define M_BYTECOUNTLIMIT 0x3U
+#define V_BYTECOUNTLIMIT(x) ((x) << S_BYTECOUNTLIMIT)
+#define G_BYTECOUNTLIMIT(x) (((x) >> S_BYTECOUNTLIMIT) & M_BYTECOUNTLIMIT)
+
+#define S_SWSSHOVE 3
+#define V_SWSSHOVE(x) ((x) << S_SWSSHOVE)
+#define F_SWSSHOVE V_SWSSHOVE(1U)
+
+#define S_TBLTIMER 2
+#define V_TBLTIMER(x) ((x) << S_TBLTIMER)
+#define F_TBLTIMER V_TBLTIMER(1U)
+
+#define S_RXTPACE 1
+#define V_RXTPACE(x) ((x) << S_RXTPACE)
+#define F_RXTPACE V_RXTPACE(1U)
+
+#define S_SWSTIMER 0
+#define V_SWSTIMER(x) ((x) << S_SWSTIMER)
+#define F_SWSTIMER V_SWSTIMER(1U)
+
+#define A_TP_PARA_REG1 0x7d64
+
+#define S_INITRWND 16
+#define M_INITRWND 0xffffU
+#define V_INITRWND(x) ((x) << S_INITRWND)
+#define G_INITRWND(x) (((x) >> S_INITRWND) & M_INITRWND)
+
+#define S_INITIALSSTHRESH 0
+#define M_INITIALSSTHRESH 0xffffU
+#define V_INITIALSSTHRESH(x) ((x) << S_INITIALSSTHRESH)
+#define G_INITIALSSTHRESH(x) (((x) >> S_INITIALSSTHRESH) & M_INITIALSSTHRESH)
+
+#define A_TP_PARA_REG2 0x7d68
+
+#define S_MAXRXDATA 16
+#define M_MAXRXDATA 0xffffU
+#define V_MAXRXDATA(x) ((x) << S_MAXRXDATA)
+#define G_MAXRXDATA(x) (((x) >> S_MAXRXDATA) & M_MAXRXDATA)
+
+#define S_RXCOALESCESIZE 0
+#define M_RXCOALESCESIZE 0xffffU
+#define V_RXCOALESCESIZE(x) ((x) << S_RXCOALESCESIZE)
+#define G_RXCOALESCESIZE(x) (((x) >> S_RXCOALESCESIZE) & M_RXCOALESCESIZE)
+
+#define A_TP_PARA_REG3 0x7d6c
+
+#define S_ENABLETNLCNGLPBK 31
+#define V_ENABLETNLCNGLPBK(x) ((x) << S_ENABLETNLCNGLPBK)
+#define F_ENABLETNLCNGLPBK V_ENABLETNLCNGLPBK(1U)
+
+#define S_ENABLETNLCNGFIFO 30
+#define V_ENABLETNLCNGFIFO(x) ((x) << S_ENABLETNLCNGFIFO)
+#define F_ENABLETNLCNGFIFO V_ENABLETNLCNGFIFO(1U)
+
+#define S_ENABLETNLCNGHDR 29
+#define V_ENABLETNLCNGHDR(x) ((x) << S_ENABLETNLCNGHDR)
+#define F_ENABLETNLCNGHDR V_ENABLETNLCNGHDR(1U)
+
+#define S_ENABLETNLCNGSGE 28
+#define V_ENABLETNLCNGSGE(x) ((x) << S_ENABLETNLCNGSGE)
+#define F_ENABLETNLCNGSGE V_ENABLETNLCNGSGE(1U)
+
+#define S_RXMACCHECK 27
+#define V_RXMACCHECK(x) ((x) << S_RXMACCHECK)
+#define F_RXMACCHECK V_RXMACCHECK(1U)
+
+#define S_RXSYNFILTER 26
+#define V_RXSYNFILTER(x) ((x) << S_RXSYNFILTER)
+#define F_RXSYNFILTER V_RXSYNFILTER(1U)
+
+#define S_CNGCTRLECN 25
+#define V_CNGCTRLECN(x) ((x) << S_CNGCTRLECN)
+#define F_CNGCTRLECN V_CNGCTRLECN(1U)
+
+#define S_RXDDPOFFINIT 24
+#define V_RXDDPOFFINIT(x) ((x) << S_RXDDPOFFINIT)
+#define F_RXDDPOFFINIT V_RXDDPOFFINIT(1U)
+
+#define S_TUNNELCNGDROP3 23
+#define V_TUNNELCNGDROP3(x) ((x) << S_TUNNELCNGDROP3)
+#define F_TUNNELCNGDROP3 V_TUNNELCNGDROP3(1U)
+
+#define S_TUNNELCNGDROP2 22
+#define V_TUNNELCNGDROP2(x) ((x) << S_TUNNELCNGDROP2)
+#define F_TUNNELCNGDROP2 V_TUNNELCNGDROP2(1U)
+
+#define S_TUNNELCNGDROP1 21
+#define V_TUNNELCNGDROP1(x) ((x) << S_TUNNELCNGDROP1)
+#define F_TUNNELCNGDROP1 V_TUNNELCNGDROP1(1U)
+
+#define S_TUNNELCNGDROP0 20
+#define V_TUNNELCNGDROP0(x) ((x) << S_TUNNELCNGDROP0)
+#define F_TUNNELCNGDROP0 V_TUNNELCNGDROP0(1U)
+
+#define S_TXDATAACKIDX 16
+#define M_TXDATAACKIDX 0xfU
+#define V_TXDATAACKIDX(x) ((x) << S_TXDATAACKIDX)
+#define G_TXDATAACKIDX(x) (((x) >> S_TXDATAACKIDX) & M_TXDATAACKIDX)
+
+#define S_RXFRAGENABLE 12
+#define M_RXFRAGENABLE 0x7U
+#define V_RXFRAGENABLE(x) ((x) << S_RXFRAGENABLE)
+#define G_RXFRAGENABLE(x) (((x) >> S_RXFRAGENABLE) & M_RXFRAGENABLE)
+
+#define S_TXPACEFIXEDSTRICT 11
+#define V_TXPACEFIXEDSTRICT(x) ((x) << S_TXPACEFIXEDSTRICT)
+#define F_TXPACEFIXEDSTRICT V_TXPACEFIXEDSTRICT(1U)
+
+#define S_TXPACEAUTOSTRICT 10
+#define V_TXPACEAUTOSTRICT(x) ((x) << S_TXPACEAUTOSTRICT)
+#define F_TXPACEAUTOSTRICT V_TXPACEAUTOSTRICT(1U)
+
+#define S_TXPACEFIXED 9
+#define V_TXPACEFIXED(x) ((x) << S_TXPACEFIXED)
+#define F_TXPACEFIXED V_TXPACEFIXED(1U)
+
+#define S_TXPACEAUTO 8
+#define V_TXPACEAUTO(x) ((x) << S_TXPACEAUTO)
+#define F_TXPACEAUTO V_TXPACEAUTO(1U)
+
+#define S_RXCHNTUNNEL 7
+#define V_RXCHNTUNNEL(x) ((x) << S_RXCHNTUNNEL)
+#define F_RXCHNTUNNEL V_RXCHNTUNNEL(1U)
+
+#define S_RXURGTUNNEL 6
+#define V_RXURGTUNNEL(x) ((x) << S_RXURGTUNNEL)
+#define F_RXURGTUNNEL V_RXURGTUNNEL(1U)
+
+#define S_RXURGMODE 5
+#define V_RXURGMODE(x) ((x) << S_RXURGMODE)
+#define F_RXURGMODE V_RXURGMODE(1U)
+
+#define S_TXURGMODE 4
+#define V_TXURGMODE(x) ((x) << S_TXURGMODE)
+#define F_TXURGMODE V_TXURGMODE(1U)
+
+#define S_CNGCTRLMODE 2
+#define M_CNGCTRLMODE 0x3U
+#define V_CNGCTRLMODE(x) ((x) << S_CNGCTRLMODE)
+#define G_CNGCTRLMODE(x) (((x) >> S_CNGCTRLMODE) & M_CNGCTRLMODE)
+
+#define S_RXCOALESCEENABLE 1
+#define V_RXCOALESCEENABLE(x) ((x) << S_RXCOALESCEENABLE)
+#define F_RXCOALESCEENABLE V_RXCOALESCEENABLE(1U)
+
+#define S_RXCOALESCEPSHEN 0
+#define V_RXCOALESCEPSHEN(x) ((x) << S_RXCOALESCEPSHEN)
+#define F_RXCOALESCEPSHEN V_RXCOALESCEPSHEN(1U)
+
+#define A_TP_PARA_REG4 0x7d70
+
+#define S_HIGHSPEEDCFG 24
+#define M_HIGHSPEEDCFG 0xffU
+#define V_HIGHSPEEDCFG(x) ((x) << S_HIGHSPEEDCFG)
+#define G_HIGHSPEEDCFG(x) (((x) >> S_HIGHSPEEDCFG) & M_HIGHSPEEDCFG)
+
+#define S_NEWRENOCFG 16
+#define M_NEWRENOCFG 0xffU
+#define V_NEWRENOCFG(x) ((x) << S_NEWRENOCFG)
+#define G_NEWRENOCFG(x) (((x) >> S_NEWRENOCFG) & M_NEWRENOCFG)
+
+#define S_TAHOECFG 8
+#define M_TAHOECFG 0xffU
+#define V_TAHOECFG(x) ((x) << S_TAHOECFG)
+#define G_TAHOECFG(x) (((x) >> S_TAHOECFG) & M_TAHOECFG)
+
+#define S_RENOCFG 0
+#define M_RENOCFG 0xffU
+#define V_RENOCFG(x) ((x) << S_RENOCFG)
+#define G_RENOCFG(x) (((x) >> S_RENOCFG) & M_RENOCFG)
+
+#define A_TP_PARA_REG5 0x7d74
+
+#define S_INDICATESIZE 16
+#define M_INDICATESIZE 0xffffU
+#define V_INDICATESIZE(x) ((x) << S_INDICATESIZE)
+#define G_INDICATESIZE(x) (((x) >> S_INDICATESIZE) & M_INDICATESIZE)
+
+#define S_MAXPROXYSIZE 12
+#define M_MAXPROXYSIZE 0xfU
+#define V_MAXPROXYSIZE(x) ((x) << S_MAXPROXYSIZE)
+#define G_MAXPROXYSIZE(x) (((x) >> S_MAXPROXYSIZE) & M_MAXPROXYSIZE)
+
+#define S_ENABLEREADPDU 11
+#define V_ENABLEREADPDU(x) ((x) << S_ENABLEREADPDU)
+#define F_ENABLEREADPDU V_ENABLEREADPDU(1U)
+
+#define S_RXREADAHEAD 10
+#define V_RXREADAHEAD(x) ((x) << S_RXREADAHEAD)
+#define F_RXREADAHEAD V_RXREADAHEAD(1U)
+
+#define S_EMPTYRQENABLE 9
+#define V_EMPTYRQENABLE(x) ((x) << S_EMPTYRQENABLE)
+#define F_EMPTYRQENABLE V_EMPTYRQENABLE(1U)
+
+#define S_SCHDENABLE 8
+#define V_SCHDENABLE(x) ((x) << S_SCHDENABLE)
+#define F_SCHDENABLE V_SCHDENABLE(1U)
+
+#define S_REARMDDPOFFSET 4
+#define V_REARMDDPOFFSET(x) ((x) << S_REARMDDPOFFSET)
+#define F_REARMDDPOFFSET V_REARMDDPOFFSET(1U)
+
+#define S_RESETDDPOFFSET 3
+#define V_RESETDDPOFFSET(x) ((x) << S_RESETDDPOFFSET)
+#define F_RESETDDPOFFSET V_RESETDDPOFFSET(1U)
+
+#define S_ONFLYDDPENABLE 2
+#define V_ONFLYDDPENABLE(x) ((x) << S_ONFLYDDPENABLE)
+#define F_ONFLYDDPENABLE V_ONFLYDDPENABLE(1U)
+
+#define S_DACKTIMERSPIN 1
+#define V_DACKTIMERSPIN(x) ((x) << S_DACKTIMERSPIN)
+#define F_DACKTIMERSPIN V_DACKTIMERSPIN(1U)
+
+#define S_PUSHTIMERENABLE 0
+#define V_PUSHTIMERENABLE(x) ((x) << S_PUSHTIMERENABLE)
+#define F_PUSHTIMERENABLE V_PUSHTIMERENABLE(1U)
+
+#define A_TP_PARA_REG6 0x7d78
+
+#define S_TXPDUSIZEADJ 24
+#define M_TXPDUSIZEADJ 0xffU
+#define V_TXPDUSIZEADJ(x) ((x) << S_TXPDUSIZEADJ)
+#define G_TXPDUSIZEADJ(x) (((x) >> S_TXPDUSIZEADJ) & M_TXPDUSIZEADJ)
+
+#define S_LIMITEDTRANSMIT 20
+#define M_LIMITEDTRANSMIT 0xfU
+#define V_LIMITEDTRANSMIT(x) ((x) << S_LIMITEDTRANSMIT)
+#define G_LIMITEDTRANSMIT(x) (((x) >> S_LIMITEDTRANSMIT) & M_LIMITEDTRANSMIT)
+
+#define S_ENABLECSAV 19
+#define V_ENABLECSAV(x) ((x) << S_ENABLECSAV)
+#define F_ENABLECSAV V_ENABLECSAV(1U)
+
+#define S_ENABLEDEFERPDU 18
+#define V_ENABLEDEFERPDU(x) ((x) << S_ENABLEDEFERPDU)
+#define F_ENABLEDEFERPDU V_ENABLEDEFERPDU(1U)
+
+#define S_ENABLEFLUSH 17
+#define V_ENABLEFLUSH(x) ((x) << S_ENABLEFLUSH)
+#define F_ENABLEFLUSH V_ENABLEFLUSH(1U)
+
+#define S_ENABLEBYTEPERSIST 16
+#define V_ENABLEBYTEPERSIST(x) ((x) << S_ENABLEBYTEPERSIST)
+#define F_ENABLEBYTEPERSIST V_ENABLEBYTEPERSIST(1U)
+
+#define S_DISABLETMOCNG 15
+#define V_DISABLETMOCNG(x) ((x) << S_DISABLETMOCNG)
+#define F_DISABLETMOCNG V_DISABLETMOCNG(1U)
+
+#define S_TXREADAHEAD 14
+#define V_TXREADAHEAD(x) ((x) << S_TXREADAHEAD)
+#define F_TXREADAHEAD V_TXREADAHEAD(1U)
+
+#define S_ALLOWEXEPTION 13
+#define V_ALLOWEXEPTION(x) ((x) << S_ALLOWEXEPTION)
+#define F_ALLOWEXEPTION V_ALLOWEXEPTION(1U)
+
+#define S_ENABLEDEFERACK 12
+#define V_ENABLEDEFERACK(x) ((x) << S_ENABLEDEFERACK)
+#define F_ENABLEDEFERACK V_ENABLEDEFERACK(1U)
+
+#define S_ENABLEESND 11
+#define V_ENABLEESND(x) ((x) << S_ENABLEESND)
+#define F_ENABLEESND V_ENABLEESND(1U)
+
+#define S_ENABLECSND 10
+#define V_ENABLECSND(x) ((x) << S_ENABLECSND)
+#define F_ENABLECSND V_ENABLECSND(1U)
+
+#define S_ENABLEPDUE 9
+#define V_ENABLEPDUE(x) ((x) << S_ENABLEPDUE)
+#define F_ENABLEPDUE V_ENABLEPDUE(1U)
+
+#define S_ENABLEPDUC 8
+#define V_ENABLEPDUC(x) ((x) << S_ENABLEPDUC)
+#define F_ENABLEPDUC V_ENABLEPDUC(1U)
+
+#define S_ENABLEBUFI 7
+#define V_ENABLEBUFI(x) ((x) << S_ENABLEBUFI)
+#define F_ENABLEBUFI V_ENABLEBUFI(1U)
+
+#define S_ENABLEBUFE 6
+#define V_ENABLEBUFE(x) ((x) << S_ENABLEBUFE)
+#define F_ENABLEBUFE V_ENABLEBUFE(1U)
+
+#define S_ENABLEDEFER 5
+#define V_ENABLEDEFER(x) ((x) << S_ENABLEDEFER)
+#define F_ENABLEDEFER V_ENABLEDEFER(1U)
+
+#define S_ENABLECLEARRXMTOOS 4
+#define V_ENABLECLEARRXMTOOS(x) ((x) << S_ENABLECLEARRXMTOOS)
+#define F_ENABLECLEARRXMTOOS V_ENABLECLEARRXMTOOS(1U)
+
+#define S_DISABLEPDUCNG 3
+#define V_DISABLEPDUCNG(x) ((x) << S_DISABLEPDUCNG)
+#define F_DISABLEPDUCNG V_DISABLEPDUCNG(1U)
+
+#define S_DISABLEPDUTIMEOUT 2
+#define V_DISABLEPDUTIMEOUT(x) ((x) << S_DISABLEPDUTIMEOUT)
+#define F_DISABLEPDUTIMEOUT V_DISABLEPDUTIMEOUT(1U)
+
+#define S_DISABLEPDURXMT 1
+#define V_DISABLEPDURXMT(x) ((x) << S_DISABLEPDURXMT)
+#define F_DISABLEPDURXMT V_DISABLEPDURXMT(1U)
+
+#define S_DISABLEPDUXMT 0
+#define V_DISABLEPDUXMT(x) ((x) << S_DISABLEPDUXMT)
+#define F_DISABLEPDUXMT V_DISABLEPDUXMT(1U)
+
+#define A_TP_PARA_REG7 0x7d7c
+
+#define S_PMMAXXFERLEN1 16
+#define M_PMMAXXFERLEN1 0xffffU
+#define V_PMMAXXFERLEN1(x) ((x) << S_PMMAXXFERLEN1)
+#define G_PMMAXXFERLEN1(x) (((x) >> S_PMMAXXFERLEN1) & M_PMMAXXFERLEN1)
+
+#define S_PMMAXXFERLEN0 0
+#define M_PMMAXXFERLEN0 0xffffU
+#define V_PMMAXXFERLEN0(x) ((x) << S_PMMAXXFERLEN0)
+#define G_PMMAXXFERLEN0(x) (((x) >> S_PMMAXXFERLEN0) & M_PMMAXXFERLEN0)
+
+#define A_TP_ENG_CONFIG 0x7d80
+
+#define S_TABLELATENCYDONE 28
+#define M_TABLELATENCYDONE 0xfU
+#define V_TABLELATENCYDONE(x) ((x) << S_TABLELATENCYDONE)
+#define G_TABLELATENCYDONE(x) (((x) >> S_TABLELATENCYDONE) & M_TABLELATENCYDONE)
+
+#define S_TABLELATENCYSTART 24
+#define M_TABLELATENCYSTART 0xfU
+#define V_TABLELATENCYSTART(x) ((x) << S_TABLELATENCYSTART)
+#define G_TABLELATENCYSTART(x) (((x) >> S_TABLELATENCYSTART) & M_TABLELATENCYSTART)
+
+#define S_ENGINELATENCYDELTA 16
+#define M_ENGINELATENCYDELTA 0xfU
+#define V_ENGINELATENCYDELTA(x) ((x) << S_ENGINELATENCYDELTA)
+#define G_ENGINELATENCYDELTA(x) (((x) >> S_ENGINELATENCYDELTA) & M_ENGINELATENCYDELTA)
+
+#define S_ENGINELATENCYMMGR 12
+#define M_ENGINELATENCYMMGR 0xfU
+#define V_ENGINELATENCYMMGR(x) ((x) << S_ENGINELATENCYMMGR)
+#define G_ENGINELATENCYMMGR(x) (((x) >> S_ENGINELATENCYMMGR) & M_ENGINELATENCYMMGR)
+
+#define S_ENGINELATENCYWIREIP6 8
+#define M_ENGINELATENCYWIREIP6 0xfU
+#define V_ENGINELATENCYWIREIP6(x) ((x) << S_ENGINELATENCYWIREIP6)
+#define G_ENGINELATENCYWIREIP6(x) (((x) >> S_ENGINELATENCYWIREIP6) & M_ENGINELATENCYWIREIP6)
+
+#define S_ENGINELATENCYWIRE 4
+#define M_ENGINELATENCYWIRE 0xfU
+#define V_ENGINELATENCYWIRE(x) ((x) << S_ENGINELATENCYWIRE)
+#define G_ENGINELATENCYWIRE(x) (((x) >> S_ENGINELATENCYWIRE) & M_ENGINELATENCYWIRE)
+
+#define S_ENGINELATENCYBASE 0
+#define M_ENGINELATENCYBASE 0xfU
+#define V_ENGINELATENCYBASE(x) ((x) << S_ENGINELATENCYBASE)
+#define G_ENGINELATENCYBASE(x) (((x) >> S_ENGINELATENCYBASE) & M_ENGINELATENCYBASE)
+
+#define A_TP_ERR_CONFIG 0x7d8c
+
+#define S_TNLERRORPING 30
+#define V_TNLERRORPING(x) ((x) << S_TNLERRORPING)
+#define F_TNLERRORPING V_TNLERRORPING(1U)
+
+#define S_TNLERRORCSUM 29
+#define V_TNLERRORCSUM(x) ((x) << S_TNLERRORCSUM)
+#define F_TNLERRORCSUM V_TNLERRORCSUM(1U)
+
+#define S_TNLERRORCSUMIP 28
+#define V_TNLERRORCSUMIP(x) ((x) << S_TNLERRORCSUMIP)
+#define F_TNLERRORCSUMIP V_TNLERRORCSUMIP(1U)
+
+#define S_TNLERRORTCPOPT 25
+#define V_TNLERRORTCPOPT(x) ((x) << S_TNLERRORTCPOPT)
+#define F_TNLERRORTCPOPT V_TNLERRORTCPOPT(1U)
+
+#define S_TNLERRORPKTLEN 24
+#define V_TNLERRORPKTLEN(x) ((x) << S_TNLERRORPKTLEN)
+#define F_TNLERRORPKTLEN V_TNLERRORPKTLEN(1U)
+
+#define S_TNLERRORTCPHDRLEN 23
+#define V_TNLERRORTCPHDRLEN(x) ((x) << S_TNLERRORTCPHDRLEN)
+#define F_TNLERRORTCPHDRLEN V_TNLERRORTCPHDRLEN(1U)
+
+#define S_TNLERRORIPHDRLEN 22
+#define V_TNLERRORIPHDRLEN(x) ((x) << S_TNLERRORIPHDRLEN)
+#define F_TNLERRORIPHDRLEN V_TNLERRORIPHDRLEN(1U)
+
+#define S_TNLERRORETHHDRLEN 21
+#define V_TNLERRORETHHDRLEN(x) ((x) << S_TNLERRORETHHDRLEN)
+#define F_TNLERRORETHHDRLEN V_TNLERRORETHHDRLEN(1U)
+
+#define S_TNLERRORATTACK 20
+#define V_TNLERRORATTACK(x) ((x) << S_TNLERRORATTACK)
+#define F_TNLERRORATTACK V_TNLERRORATTACK(1U)
+
+#define S_TNLERRORFRAG 19
+#define V_TNLERRORFRAG(x) ((x) << S_TNLERRORFRAG)
+#define F_TNLERRORFRAG V_TNLERRORFRAG(1U)
+
+#define S_TNLERRORIPVER 18
+#define V_TNLERRORIPVER(x) ((x) << S_TNLERRORIPVER)
+#define F_TNLERRORIPVER V_TNLERRORIPVER(1U)
+
+#define S_TNLERRORMAC 17
+#define V_TNLERRORMAC(x) ((x) << S_TNLERRORMAC)
+#define F_TNLERRORMAC V_TNLERRORMAC(1U)
+
+#define S_TNLERRORANY 16
+#define V_TNLERRORANY(x) ((x) << S_TNLERRORANY)
+#define F_TNLERRORANY V_TNLERRORANY(1U)
+
+#define S_DROPERRORPING 14
+#define V_DROPERRORPING(x) ((x) << S_DROPERRORPING)
+#define F_DROPERRORPING V_DROPERRORPING(1U)
+
+#define S_DROPERRORCSUM 13
+#define V_DROPERRORCSUM(x) ((x) << S_DROPERRORCSUM)
+#define F_DROPERRORCSUM V_DROPERRORCSUM(1U)
+
+#define S_DROPERRORCSUMIP 12
+#define V_DROPERRORCSUMIP(x) ((x) << S_DROPERRORCSUMIP)
+#define F_DROPERRORCSUMIP V_DROPERRORCSUMIP(1U)
+
+#define S_DROPERRORTCPOPT 9
+#define V_DROPERRORTCPOPT(x) ((x) << S_DROPERRORTCPOPT)
+#define F_DROPERRORTCPOPT V_DROPERRORTCPOPT(1U)
+
+#define S_DROPERRORPKTLEN 8
+#define V_DROPERRORPKTLEN(x) ((x) << S_DROPERRORPKTLEN)
+#define F_DROPERRORPKTLEN V_DROPERRORPKTLEN(1U)
+
+#define S_DROPERRORTCPHDRLEN 7
+#define V_DROPERRORTCPHDRLEN(x) ((x) << S_DROPERRORTCPHDRLEN)
+#define F_DROPERRORTCPHDRLEN V_DROPERRORTCPHDRLEN(1U)
+
+#define S_DROPERRORIPHDRLEN 6
+#define V_DROPERRORIPHDRLEN(x) ((x) << S_DROPERRORIPHDRLEN)
+#define F_DROPERRORIPHDRLEN V_DROPERRORIPHDRLEN(1U)
+
+#define S_DROPERRORETHHDRLEN 5
+#define V_DROPERRORETHHDRLEN(x) ((x) << S_DROPERRORETHHDRLEN)
+#define F_DROPERRORETHHDRLEN V_DROPERRORETHHDRLEN(1U)
+
+#define S_DROPERRORATTACK 4
+#define V_DROPERRORATTACK(x) ((x) << S_DROPERRORATTACK)
+#define F_DROPERRORATTACK V_DROPERRORATTACK(1U)
+
+#define S_DROPERRORFRAG 3
+#define V_DROPERRORFRAG(x) ((x) << S_DROPERRORFRAG)
+#define F_DROPERRORFRAG V_DROPERRORFRAG(1U)
+
+#define S_DROPERRORIPVER 2
+#define V_DROPERRORIPVER(x) ((x) << S_DROPERRORIPVER)
+#define F_DROPERRORIPVER V_DROPERRORIPVER(1U)
+
+#define S_DROPERRORMAC 1
+#define V_DROPERRORMAC(x) ((x) << S_DROPERRORMAC)
+#define F_DROPERRORMAC V_DROPERRORMAC(1U)
+
+#define S_DROPERRORANY 0
+#define V_DROPERRORANY(x) ((x) << S_DROPERRORANY)
+#define F_DROPERRORANY V_DROPERRORANY(1U)
+
+#define A_TP_TIMER_RESOLUTION 0x7d90
+
+#define S_TIMERRESOLUTION 16
+#define M_TIMERRESOLUTION 0xffU
+#define V_TIMERRESOLUTION(x) ((x) << S_TIMERRESOLUTION)
+#define G_TIMERRESOLUTION(x) (((x) >> S_TIMERRESOLUTION) & M_TIMERRESOLUTION)
+
+#define S_TIMESTAMPRESOLUTION 8
+#define M_TIMESTAMPRESOLUTION 0xffU
+#define V_TIMESTAMPRESOLUTION(x) ((x) << S_TIMESTAMPRESOLUTION)
+#define G_TIMESTAMPRESOLUTION(x) (((x) >> S_TIMESTAMPRESOLUTION) & M_TIMESTAMPRESOLUTION)
+
+#define S_DELAYEDACKRESOLUTION 0
+#define M_DELAYEDACKRESOLUTION 0xffU
+#define V_DELAYEDACKRESOLUTION(x) ((x) << S_DELAYEDACKRESOLUTION)
+#define G_DELAYEDACKRESOLUTION(x) (((x) >> S_DELAYEDACKRESOLUTION) & M_DELAYEDACKRESOLUTION)
+
+#define A_TP_MSL 0x7d94
+
+#define S_MSL 0
+#define M_MSL 0x3fffffffU
+#define V_MSL(x) ((x) << S_MSL)
+#define G_MSL(x) (((x) >> S_MSL) & M_MSL)
+
+#define A_TP_RXT_MIN 0x7d98
+
+#define S_RXTMIN 0
+#define M_RXTMIN 0x3fffffffU
+#define V_RXTMIN(x) ((x) << S_RXTMIN)
+#define G_RXTMIN(x) (((x) >> S_RXTMIN) & M_RXTMIN)
+
+#define A_TP_RXT_MAX 0x7d9c
+
+#define S_RXTMAX 0
+#define M_RXTMAX 0x3fffffffU
+#define V_RXTMAX(x) ((x) << S_RXTMAX)
+#define G_RXTMAX(x) (((x) >> S_RXTMAX) & M_RXTMAX)
+
+#define A_TP_PERS_MIN 0x7da0
+
+#define S_PERSMIN 0
+#define M_PERSMIN 0x3fffffffU
+#define V_PERSMIN(x) ((x) << S_PERSMIN)
+#define G_PERSMIN(x) (((x) >> S_PERSMIN) & M_PERSMIN)
+
+#define A_TP_PERS_MAX 0x7da4
+
+#define S_PERSMAX 0
+#define M_PERSMAX 0x3fffffffU
+#define V_PERSMAX(x) ((x) << S_PERSMAX)
+#define G_PERSMAX(x) (((x) >> S_PERSMAX) & M_PERSMAX)
+
+#define A_TP_KEEP_IDLE 0x7da8
+
+#define S_KEEPALIVEIDLE 0
+#define M_KEEPALIVEIDLE 0x3fffffffU
+#define V_KEEPALIVEIDLE(x) ((x) << S_KEEPALIVEIDLE)
+#define G_KEEPALIVEIDLE(x) (((x) >> S_KEEPALIVEIDLE) & M_KEEPALIVEIDLE)
+
+#define A_TP_KEEP_INTVL 0x7dac
+
+#define S_KEEPALIVEINTVL 0
+#define M_KEEPALIVEINTVL 0x3fffffffU
+#define V_KEEPALIVEINTVL(x) ((x) << S_KEEPALIVEINTVL)
+#define G_KEEPALIVEINTVL(x) (((x) >> S_KEEPALIVEINTVL) & M_KEEPALIVEINTVL)
+
+#define A_TP_INIT_SRTT 0x7db0
+
+#define S_MAXRTT 16
+#define M_MAXRTT 0xffffU
+#define V_MAXRTT(x) ((x) << S_MAXRTT)
+#define G_MAXRTT(x) (((x) >> S_MAXRTT) & M_MAXRTT)
+
+#define S_INITSRTT 0
+#define M_INITSRTT 0xffffU
+#define V_INITSRTT(x) ((x) << S_INITSRTT)
+#define G_INITSRTT(x) (((x) >> S_INITSRTT) & M_INITSRTT)
+
+#define A_TP_DACK_TIMER 0x7db4
+
+#define S_DACKTIME 0
+#define M_DACKTIME 0xfffU
+#define V_DACKTIME(x) ((x) << S_DACKTIME)
+#define G_DACKTIME(x) (((x) >> S_DACKTIME) & M_DACKTIME)
+
+#define A_TP_FINWAIT2_TIMER 0x7db8
+
+#define S_FINWAIT2TIME 0
+#define M_FINWAIT2TIME 0x3fffffffU
+#define V_FINWAIT2TIME(x) ((x) << S_FINWAIT2TIME)
+#define G_FINWAIT2TIME(x) (((x) >> S_FINWAIT2TIME) & M_FINWAIT2TIME)
+
+#define A_TP_FAST_FINWAIT2_TIMER 0x7dbc
+
+#define S_FASTFINWAIT2TIME 0
+#define M_FASTFINWAIT2TIME 0x3fffffffU
+#define V_FASTFINWAIT2TIME(x) ((x) << S_FASTFINWAIT2TIME)
+#define G_FASTFINWAIT2TIME(x) (((x) >> S_FASTFINWAIT2TIME) & M_FASTFINWAIT2TIME)
+
+#define A_TP_SHIFT_CNT 0x7dc0
+
+#define S_SYNSHIFTMAX 24
+#define M_SYNSHIFTMAX 0xffU
+#define V_SYNSHIFTMAX(x) ((x) << S_SYNSHIFTMAX)
+#define G_SYNSHIFTMAX(x) (((x) >> S_SYNSHIFTMAX) & M_SYNSHIFTMAX)
+
+#define S_RXTSHIFTMAXR1 20
+#define M_RXTSHIFTMAXR1 0xfU
+#define V_RXTSHIFTMAXR1(x) ((x) << S_RXTSHIFTMAXR1)
+#define G_RXTSHIFTMAXR1(x) (((x) >> S_RXTSHIFTMAXR1) & M_RXTSHIFTMAXR1)
+
+#define S_RXTSHIFTMAXR2 16
+#define M_RXTSHIFTMAXR2 0xfU
+#define V_RXTSHIFTMAXR2(x) ((x) << S_RXTSHIFTMAXR2)
+#define G_RXTSHIFTMAXR2(x) (((x) >> S_RXTSHIFTMAXR2) & M_RXTSHIFTMAXR2)
+
+#define S_PERSHIFTBACKOFFMAX 12
+#define M_PERSHIFTBACKOFFMAX 0xfU
+#define V_PERSHIFTBACKOFFMAX(x) ((x) << S_PERSHIFTBACKOFFMAX)
+#define G_PERSHIFTBACKOFFMAX(x) (((x) >> S_PERSHIFTBACKOFFMAX) & M_PERSHIFTBACKOFFMAX)
+
+#define S_PERSHIFTMAX 8
+#define M_PERSHIFTMAX 0xfU
+#define V_PERSHIFTMAX(x) ((x) << S_PERSHIFTMAX)
+#define G_PERSHIFTMAX(x) (((x) >> S_PERSHIFTMAX) & M_PERSHIFTMAX)
+
+#define S_KEEPALIVEMAXR1 4
+#define M_KEEPALIVEMAXR1 0xfU
+#define V_KEEPALIVEMAXR1(x) ((x) << S_KEEPALIVEMAXR1)
+#define G_KEEPALIVEMAXR1(x) (((x) >> S_KEEPALIVEMAXR1) & M_KEEPALIVEMAXR1)
+
+#define S_KEEPALIVEMAXR2 0
+#define M_KEEPALIVEMAXR2 0xfU
+#define V_KEEPALIVEMAXR2(x) ((x) << S_KEEPALIVEMAXR2)
+#define G_KEEPALIVEMAXR2(x) (((x) >> S_KEEPALIVEMAXR2) & M_KEEPALIVEMAXR2)
+
+#define A_TP_TM_CONFIG 0x7dc4
+
+#define S_CMTIMERMAXNUM 0
+#define M_CMTIMERMAXNUM 0x7U
+#define V_CMTIMERMAXNUM(x) ((x) << S_CMTIMERMAXNUM)
+#define G_CMTIMERMAXNUM(x) (((x) >> S_CMTIMERMAXNUM) & M_CMTIMERMAXNUM)
+
+#define A_TP_TIME_LO 0x7dc8
+#define A_TP_TIME_HI 0x7dcc
+#define A_TP_PORT_MTU_0 0x7dd0
+
+#define S_PORT1MTUVALUE 16
+#define M_PORT1MTUVALUE 0xffffU
+#define V_PORT1MTUVALUE(x) ((x) << S_PORT1MTUVALUE)
+#define G_PORT1MTUVALUE(x) (((x) >> S_PORT1MTUVALUE) & M_PORT1MTUVALUE)
+
+#define S_PORT0MTUVALUE 0
+#define M_PORT0MTUVALUE 0xffffU
+#define V_PORT0MTUVALUE(x) ((x) << S_PORT0MTUVALUE)
+#define G_PORT0MTUVALUE(x) (((x) >> S_PORT0MTUVALUE) & M_PORT0MTUVALUE)
+
+#define A_TP_PORT_MTU_1 0x7dd4
+
+#define S_PORT3MTUVALUE 16
+#define M_PORT3MTUVALUE 0xffffU
+#define V_PORT3MTUVALUE(x) ((x) << S_PORT3MTUVALUE)
+#define G_PORT3MTUVALUE(x) (((x) >> S_PORT3MTUVALUE) & M_PORT3MTUVALUE)
+
+#define S_PORT2MTUVALUE 0
+#define M_PORT2MTUVALUE 0xffffU
+#define V_PORT2MTUVALUE(x) ((x) << S_PORT2MTUVALUE)
+#define G_PORT2MTUVALUE(x) (((x) >> S_PORT2MTUVALUE) & M_PORT2MTUVALUE)
+
+#define A_TP_PACE_TABLE 0x7dd8
+#define A_TP_CCTRL_TABLE 0x7ddc
+
+#define S_ROWINDEX 16
+#define M_ROWINDEX 0xffffU
+#define V_ROWINDEX(x) ((x) << S_ROWINDEX)
+#define G_ROWINDEX(x) (((x) >> S_ROWINDEX) & M_ROWINDEX)
+
+#define S_ROWVALUE 0
+#define M_ROWVALUE 0xffffU
+#define V_ROWVALUE(x) ((x) << S_ROWVALUE)
+#define G_ROWVALUE(x) (((x) >> S_ROWVALUE) & M_ROWVALUE)
+
+#define A_TP_MTU_TABLE 0x7de4
+
+#define S_MTUINDEX 24
+#define M_MTUINDEX 0xffU
+#define V_MTUINDEX(x) ((x) << S_MTUINDEX)
+#define G_MTUINDEX(x) (((x) >> S_MTUINDEX) & M_MTUINDEX)
+
+#define S_MTUWIDTH 16
+#define M_MTUWIDTH 0xfU
+#define V_MTUWIDTH(x) ((x) << S_MTUWIDTH)
+#define G_MTUWIDTH(x) (((x) >> S_MTUWIDTH) & M_MTUWIDTH)
+
+#define S_MTUVALUE 0
+#define M_MTUVALUE 0x3fffU
+#define V_MTUVALUE(x) ((x) << S_MTUVALUE)
+#define G_MTUVALUE(x) (((x) >> S_MTUVALUE) & M_MTUVALUE)
+
+#define A_TP_ULP_TABLE 0x7de8
+
+#define S_ULPTYPE7FIELD 28
+#define M_ULPTYPE7FIELD 0xfU
+#define V_ULPTYPE7FIELD(x) ((x) << S_ULPTYPE7FIELD)
+#define G_ULPTYPE7FIELD(x) (((x) >> S_ULPTYPE7FIELD) & M_ULPTYPE7FIELD)
+
+#define S_ULPTYPE6FIELD 24
+#define M_ULPTYPE6FIELD 0xfU
+#define V_ULPTYPE6FIELD(x) ((x) << S_ULPTYPE6FIELD)
+#define G_ULPTYPE6FIELD(x) (((x) >> S_ULPTYPE6FIELD) & M_ULPTYPE6FIELD)
+
+#define S_ULPTYPE5FIELD 20
+#define M_ULPTYPE5FIELD 0xfU
+#define V_ULPTYPE5FIELD(x) ((x) << S_ULPTYPE5FIELD)
+#define G_ULPTYPE5FIELD(x) (((x) >> S_ULPTYPE5FIELD) & M_ULPTYPE5FIELD)
+
+#define S_ULPTYPE4FIELD 16
+#define M_ULPTYPE4FIELD 0xfU
+#define V_ULPTYPE4FIELD(x) ((x) << S_ULPTYPE4FIELD)
+#define G_ULPTYPE4FIELD(x) (((x) >> S_ULPTYPE4FIELD) & M_ULPTYPE4FIELD)
+
+#define S_ULPTYPE3FIELD 12
+#define M_ULPTYPE3FIELD 0xfU
+#define V_ULPTYPE3FIELD(x) ((x) << S_ULPTYPE3FIELD)
+#define G_ULPTYPE3FIELD(x) (((x) >> S_ULPTYPE3FIELD) & M_ULPTYPE3FIELD)
+
+#define S_ULPTYPE2FIELD 8
+#define M_ULPTYPE2FIELD 0xfU
+#define V_ULPTYPE2FIELD(x) ((x) << S_ULPTYPE2FIELD)
+#define G_ULPTYPE2FIELD(x) (((x) >> S_ULPTYPE2FIELD) & M_ULPTYPE2FIELD)
+
+#define S_ULPTYPE1FIELD 4
+#define M_ULPTYPE1FIELD 0xfU
+#define V_ULPTYPE1FIELD(x) ((x) << S_ULPTYPE1FIELD)
+#define G_ULPTYPE1FIELD(x) (((x) >> S_ULPTYPE1FIELD) & M_ULPTYPE1FIELD)
+
+#define S_ULPTYPE0FIELD 0
+#define M_ULPTYPE0FIELD 0xfU
+#define V_ULPTYPE0FIELD(x) ((x) << S_ULPTYPE0FIELD)
+#define G_ULPTYPE0FIELD(x) (((x) >> S_ULPTYPE0FIELD) & M_ULPTYPE0FIELD)
+
+#define A_TP_RSS_LKP_TABLE 0x7dec
+
+#define S_LKPTBLROWVLD 31
+#define V_LKPTBLROWVLD(x) ((x) << S_LKPTBLROWVLD)
+#define F_LKPTBLROWVLD V_LKPTBLROWVLD(1U)
+
+#define S_LKPTBLROWIDX 20
+#define M_LKPTBLROWIDX 0x3ffU
+#define V_LKPTBLROWIDX(x) ((x) << S_LKPTBLROWIDX)
+#define G_LKPTBLROWIDX(x) (((x) >> S_LKPTBLROWIDX) & M_LKPTBLROWIDX)
+
+#define S_LKPTBLQUEUE1 10
+#define M_LKPTBLQUEUE1 0x3ffU
+#define V_LKPTBLQUEUE1(x) ((x) << S_LKPTBLQUEUE1)
+#define G_LKPTBLQUEUE1(x) (((x) >> S_LKPTBLQUEUE1) & M_LKPTBLQUEUE1)
+
+#define S_LKPTBLQUEUE0 0
+#define M_LKPTBLQUEUE0 0x3ffU
+#define V_LKPTBLQUEUE0(x) ((x) << S_LKPTBLQUEUE0)
+#define G_LKPTBLQUEUE0(x) (((x) >> S_LKPTBLQUEUE0) & M_LKPTBLQUEUE0)
+
+#define A_TP_RSS_CONFIG 0x7df0
+
+#define S_TNL4TUPENIPV6 31
+#define V_TNL4TUPENIPV6(x) ((x) << S_TNL4TUPENIPV6)
+#define F_TNL4TUPENIPV6 V_TNL4TUPENIPV6(1U)
+
+#define S_TNL2TUPENIPV6 30
+#define V_TNL2TUPENIPV6(x) ((x) << S_TNL2TUPENIPV6)
+#define F_TNL2TUPENIPV6 V_TNL2TUPENIPV6(1U)
+
+#define S_TNL4TUPENIPV4 29
+#define V_TNL4TUPENIPV4(x) ((x) << S_TNL4TUPENIPV4)
+#define F_TNL4TUPENIPV4 V_TNL4TUPENIPV4(1U)
+
+#define S_TNL2TUPENIPV4 28
+#define V_TNL2TUPENIPV4(x) ((x) << S_TNL2TUPENIPV4)
+#define F_TNL2TUPENIPV4 V_TNL2TUPENIPV4(1U)
+
+#define S_TNLTCPSEL 27
+#define V_TNLTCPSEL(x) ((x) << S_TNLTCPSEL)
+#define F_TNLTCPSEL V_TNLTCPSEL(1U)
+
+#define S_TNLIP6SEL 26
+#define V_TNLIP6SEL(x) ((x) << S_TNLIP6SEL)
+#define F_TNLIP6SEL V_TNLIP6SEL(1U)
+
+#define S_TNLVRTSEL 25
+#define V_TNLVRTSEL(x) ((x) << S_TNLVRTSEL)
+#define F_TNLVRTSEL V_TNLVRTSEL(1U)
+
+#define S_TNLMAPEN 24
+#define V_TNLMAPEN(x) ((x) << S_TNLMAPEN)
+#define F_TNLMAPEN V_TNLMAPEN(1U)
+
+#define S_OFDHASHSAVE 19
+#define V_OFDHASHSAVE(x) ((x) << S_OFDHASHSAVE)
+#define F_OFDHASHSAVE V_OFDHASHSAVE(1U)
+
+#define S_OFDVRTSEL 18
+#define V_OFDVRTSEL(x) ((x) << S_OFDVRTSEL)
+#define F_OFDVRTSEL V_OFDVRTSEL(1U)
+
+#define S_OFDMAPEN 17
+#define V_OFDMAPEN(x) ((x) << S_OFDMAPEN)
+#define F_OFDMAPEN V_OFDMAPEN(1U)
+
+#define S_OFDLKPEN 16
+#define V_OFDLKPEN(x) ((x) << S_OFDLKPEN)
+#define F_OFDLKPEN V_OFDLKPEN(1U)
+
+#define S_SYN4TUPENIPV6 15
+#define V_SYN4TUPENIPV6(x) ((x) << S_SYN4TUPENIPV6)
+#define F_SYN4TUPENIPV6 V_SYN4TUPENIPV6(1U)
+
+#define S_SYN2TUPENIPV6 14
+#define V_SYN2TUPENIPV6(x) ((x) << S_SYN2TUPENIPV6)
+#define F_SYN2TUPENIPV6 V_SYN2TUPENIPV6(1U)
+
+#define S_SYN4TUPENIPV4 13
+#define V_SYN4TUPENIPV4(x) ((x) << S_SYN4TUPENIPV4)
+#define F_SYN4TUPENIPV4 V_SYN4TUPENIPV4(1U)
+
+#define S_SYN2TUPENIPV4 12
+#define V_SYN2TUPENIPV4(x) ((x) << S_SYN2TUPENIPV4)
+#define F_SYN2TUPENIPV4 V_SYN2TUPENIPV4(1U)
+
+#define S_SYNIP6SEL 11
+#define V_SYNIP6SEL(x) ((x) << S_SYNIP6SEL)
+#define F_SYNIP6SEL V_SYNIP6SEL(1U)
+
+#define S_SYNVRTSEL 10
+#define V_SYNVRTSEL(x) ((x) << S_SYNVRTSEL)
+#define F_SYNVRTSEL V_SYNVRTSEL(1U)
+
+#define S_SYNMAPEN 9
+#define V_SYNMAPEN(x) ((x) << S_SYNMAPEN)
+#define F_SYNMAPEN V_SYNMAPEN(1U)
+
+#define S_SYNLKPEN 8
+#define V_SYNLKPEN(x) ((x) << S_SYNLKPEN)
+#define F_SYNLKPEN V_SYNLKPEN(1U)
+
+#define S_CHANNELENABLE 7
+#define V_CHANNELENABLE(x) ((x) << S_CHANNELENABLE)
+#define F_CHANNELENABLE V_CHANNELENABLE(1U)
+
+#define S_PORTENABLE 6
+#define V_PORTENABLE(x) ((x) << S_PORTENABLE)
+#define F_PORTENABLE V_PORTENABLE(1U)
+
+#define S_TNLALLLOOKUP 5
+#define V_TNLALLLOOKUP(x) ((x) << S_TNLALLLOOKUP)
+#define F_TNLALLLOOKUP V_TNLALLLOOKUP(1U)
+
+#define S_VIRTENABLE 4
+#define V_VIRTENABLE(x) ((x) << S_VIRTENABLE)
+#define F_VIRTENABLE V_VIRTENABLE(1U)
+
+#define S_CONGESTIONENABLE 3
+#define V_CONGESTIONENABLE(x) ((x) << S_CONGESTIONENABLE)
+#define F_CONGESTIONENABLE V_CONGESTIONENABLE(1U)
+
+#define S_HASHTOEPLITZ 2
+#define V_HASHTOEPLITZ(x) ((x) << S_HASHTOEPLITZ)
+#define F_HASHTOEPLITZ V_HASHTOEPLITZ(1U)
+
+#define S_UDPENABLE 1
+#define V_UDPENABLE(x) ((x) << S_UDPENABLE)
+#define F_UDPENABLE V_UDPENABLE(1U)
+
+#define S_DISABLE 0
+#define V_DISABLE(x) ((x) << S_DISABLE)
+#define F_DISABLE V_DISABLE(1U)
+
+#define A_TP_RSS_CONFIG_TNL 0x7df4
+
+#define S_MASKSIZE 28
+#define M_MASKSIZE 0xfU
+#define V_MASKSIZE(x) ((x) << S_MASKSIZE)
+#define G_MASKSIZE(x) (((x) >> S_MASKSIZE) & M_MASKSIZE)
+
+#define S_MASKFILTER 16
+#define M_MASKFILTER 0x7ffU
+#define V_MASKFILTER(x) ((x) << S_MASKFILTER)
+#define G_MASKFILTER(x) (((x) >> S_MASKFILTER) & M_MASKFILTER)
+
+#define S_USEWIRECH 0
+#define V_USEWIRECH(x) ((x) << S_USEWIRECH)
+#define F_USEWIRECH V_USEWIRECH(1U)
+
+#define A_TP_RSS_CONFIG_OFD 0x7df8
+
+#define S_RRCPLMAPEN 20
+#define V_RRCPLMAPEN(x) ((x) << S_RRCPLMAPEN)
+#define F_RRCPLMAPEN V_RRCPLMAPEN(1U)
+
+#define S_RRCPLQUEWIDTH 16
+#define M_RRCPLQUEWIDTH 0xfU
+#define V_RRCPLQUEWIDTH(x) ((x) << S_RRCPLQUEWIDTH)
+#define G_RRCPLQUEWIDTH(x) (((x) >> S_RRCPLQUEWIDTH) & M_RRCPLQUEWIDTH)
+
+#define A_TP_RSS_CONFIG_SYN 0x7dfc
+#define A_TP_RSS_CONFIG_VRT 0x7e00
+
+#define S_VFRDRG 25
+#define V_VFRDRG(x) ((x) << S_VFRDRG)
+#define F_VFRDRG V_VFRDRG(1U)
+
+#define S_VFRDEN 24
+#define V_VFRDEN(x) ((x) << S_VFRDEN)
+#define F_VFRDEN V_VFRDEN(1U)
+
+#define S_VFPERREN 23
+#define V_VFPERREN(x) ((x) << S_VFPERREN)
+#define F_VFPERREN V_VFPERREN(1U)
+
+#define S_KEYPERREN 22
+#define V_KEYPERREN(x) ((x) << S_KEYPERREN)
+#define F_KEYPERREN V_KEYPERREN(1U)
+
+#define S_DISABLEVLAN 21
+#define V_DISABLEVLAN(x) ((x) << S_DISABLEVLAN)
+#define F_DISABLEVLAN V_DISABLEVLAN(1U)
+
+#define S_ENABLEUP0 20
+#define V_ENABLEUP0(x) ((x) << S_ENABLEUP0)
+#define F_ENABLEUP0 V_ENABLEUP0(1U)
+
+#define S_HASHDELAY 16
+#define M_HASHDELAY 0xfU
+#define V_HASHDELAY(x) ((x) << S_HASHDELAY)
+#define G_HASHDELAY(x) (((x) >> S_HASHDELAY) & M_HASHDELAY)
+
+#define S_VFWRADDR 8
+#define M_VFWRADDR 0x7fU
+#define V_VFWRADDR(x) ((x) << S_VFWRADDR)
+#define G_VFWRADDR(x) (((x) >> S_VFWRADDR) & M_VFWRADDR)
+
+#define S_KEYMODE 6
+#define M_KEYMODE 0x3U
+#define V_KEYMODE(x) ((x) << S_KEYMODE)
+#define G_KEYMODE(x) (((x) >> S_KEYMODE) & M_KEYMODE)
+
+#define S_VFWREN 5
+#define V_VFWREN(x) ((x) << S_VFWREN)
+#define F_VFWREN V_VFWREN(1U)
+
+#define S_KEYWREN 4
+#define V_KEYWREN(x) ((x) << S_KEYWREN)
+#define F_KEYWREN V_KEYWREN(1U)
+
+#define S_KEYWRADDR 0
+#define M_KEYWRADDR 0xfU
+#define V_KEYWRADDR(x) ((x) << S_KEYWRADDR)
+#define G_KEYWRADDR(x) (((x) >> S_KEYWRADDR) & M_KEYWRADDR)
+
+#define A_TP_RSS_CONFIG_CNG 0x7e04
+
+#define S_CHNCOUNT3 31
+#define V_CHNCOUNT3(x) ((x) << S_CHNCOUNT3)
+#define F_CHNCOUNT3 V_CHNCOUNT3(1U)
+
+#define S_CHNCOUNT2 30
+#define V_CHNCOUNT2(x) ((x) << S_CHNCOUNT2)
+#define F_CHNCOUNT2 V_CHNCOUNT2(1U)
+
+#define S_CHNCOUNT1 29
+#define V_CHNCOUNT1(x) ((x) << S_CHNCOUNT1)
+#define F_CHNCOUNT1 V_CHNCOUNT1(1U)
+
+#define S_CHNCOUNT0 28
+#define V_CHNCOUNT0(x) ((x) << S_CHNCOUNT0)
+#define F_CHNCOUNT0 V_CHNCOUNT0(1U)
+
+#define S_CHNUNDFLOW3 27
+#define V_CHNUNDFLOW3(x) ((x) << S_CHNUNDFLOW3)
+#define F_CHNUNDFLOW3 V_CHNUNDFLOW3(1U)
+
+#define S_CHNUNDFLOW2 26
+#define V_CHNUNDFLOW2(x) ((x) << S_CHNUNDFLOW2)
+#define F_CHNUNDFLOW2 V_CHNUNDFLOW2(1U)
+
+#define S_CHNUNDFLOW1 25
+#define V_CHNUNDFLOW1(x) ((x) << S_CHNUNDFLOW1)
+#define F_CHNUNDFLOW1 V_CHNUNDFLOW1(1U)
+
+#define S_CHNUNDFLOW0 24
+#define V_CHNUNDFLOW0(x) ((x) << S_CHNUNDFLOW0)
+#define F_CHNUNDFLOW0 V_CHNUNDFLOW0(1U)
+
+#define S_CHNOVRFLOW3 23
+#define V_CHNOVRFLOW3(x) ((x) << S_CHNOVRFLOW3)
+#define F_CHNOVRFLOW3 V_CHNOVRFLOW3(1U)
+
+#define S_CHNOVRFLOW2 22
+#define V_CHNOVRFLOW2(x) ((x) << S_CHNOVRFLOW2)
+#define F_CHNOVRFLOW2 V_CHNOVRFLOW2(1U)
+
+#define S_CHNOVRFLOW1 21
+#define V_CHNOVRFLOW1(x) ((x) << S_CHNOVRFLOW1)
+#define F_CHNOVRFLOW1 V_CHNOVRFLOW1(1U)
+
+#define S_CHNOVRFLOW0 20
+#define V_CHNOVRFLOW0(x) ((x) << S_CHNOVRFLOW0)
+#define F_CHNOVRFLOW0 V_CHNOVRFLOW0(1U)
+
+#define S_RSTCHN3 19
+#define V_RSTCHN3(x) ((x) << S_RSTCHN3)
+#define F_RSTCHN3 V_RSTCHN3(1U)
+
+#define S_RSTCHN2 18
+#define V_RSTCHN2(x) ((x) << S_RSTCHN2)
+#define F_RSTCHN2 V_RSTCHN2(1U)
+
+#define S_RSTCHN1 17
+#define V_RSTCHN1(x) ((x) << S_RSTCHN1)
+#define F_RSTCHN1 V_RSTCHN1(1U)
+
+#define S_RSTCHN0 16
+#define V_RSTCHN0(x) ((x) << S_RSTCHN0)
+#define F_RSTCHN0 V_RSTCHN0(1U)
+
+#define S_UPDVLD 15
+#define V_UPDVLD(x) ((x) << S_UPDVLD)
+#define F_UPDVLD V_UPDVLD(1U)
+
+#define S_XOFF 14
+#define V_XOFF(x) ((x) << S_XOFF)
+#define F_XOFF V_XOFF(1U)
+
+#define S_UPDCHN3 13
+#define V_UPDCHN3(x) ((x) << S_UPDCHN3)
+#define F_UPDCHN3 V_UPDCHN3(1U)
+
+#define S_UPDCHN2 12
+#define V_UPDCHN2(x) ((x) << S_UPDCHN2)
+#define F_UPDCHN2 V_UPDCHN2(1U)
+
+#define S_UPDCHN1 11
+#define V_UPDCHN1(x) ((x) << S_UPDCHN1)
+#define F_UPDCHN1 V_UPDCHN1(1U)
+
+#define S_UPDCHN0 10
+#define V_UPDCHN0(x) ((x) << S_UPDCHN0)
+#define F_UPDCHN0 V_UPDCHN0(1U)
+
+#define S_QUEUE 0
+#define M_QUEUE 0x3ffU
+#define V_QUEUE(x) ((x) << S_QUEUE)
+#define G_QUEUE(x) (((x) >> S_QUEUE) & M_QUEUE)
+
+#define A_TP_LA_TABLE_0 0x7e10
+
+#define S_VIRTPORT1TABLE 16
+#define M_VIRTPORT1TABLE 0xffffU
+#define V_VIRTPORT1TABLE(x) ((x) << S_VIRTPORT1TABLE)
+#define G_VIRTPORT1TABLE(x) (((x) >> S_VIRTPORT1TABLE) & M_VIRTPORT1TABLE)
+
+#define S_VIRTPORT0TABLE 0
+#define M_VIRTPORT0TABLE 0xffffU
+#define V_VIRTPORT0TABLE(x) ((x) << S_VIRTPORT0TABLE)
+#define G_VIRTPORT0TABLE(x) (((x) >> S_VIRTPORT0TABLE) & M_VIRTPORT0TABLE)
+
+#define A_TP_LA_TABLE_1 0x7e14
+
+#define S_VIRTPORT3TABLE 16
+#define M_VIRTPORT3TABLE 0xffffU
+#define V_VIRTPORT3TABLE(x) ((x) << S_VIRTPORT3TABLE)
+#define G_VIRTPORT3TABLE(x) (((x) >> S_VIRTPORT3TABLE) & M_VIRTPORT3TABLE)
+
+#define S_VIRTPORT2TABLE 0
+#define M_VIRTPORT2TABLE 0xffffU
+#define V_VIRTPORT2TABLE(x) ((x) << S_VIRTPORT2TABLE)
+#define G_VIRTPORT2TABLE(x) (((x) >> S_VIRTPORT2TABLE) & M_VIRTPORT2TABLE)
+
+#define A_TP_TM_PIO_ADDR 0x7e18
+#define A_TP_TM_PIO_DATA 0x7e1c
+#define A_TP_MOD_CONFIG 0x7e24
+
+#define S_RXCHANNELWEIGHT1 24
+#define M_RXCHANNELWEIGHT1 0xffU
+#define V_RXCHANNELWEIGHT1(x) ((x) << S_RXCHANNELWEIGHT1)
+#define G_RXCHANNELWEIGHT1(x) (((x) >> S_RXCHANNELWEIGHT1) & M_RXCHANNELWEIGHT1)
+
+#define S_RXCHANNELWEIGHT0 16
+#define M_RXCHANNELWEIGHT0 0xffU
+#define V_RXCHANNELWEIGHT0(x) ((x) << S_RXCHANNELWEIGHT0)
+#define G_RXCHANNELWEIGHT0(x) (((x) >> S_RXCHANNELWEIGHT0) & M_RXCHANNELWEIGHT0)
+
+#define S_TIMERMODE 8
+#define M_TIMERMODE 0xffU
+#define V_TIMERMODE(x) ((x) << S_TIMERMODE)
+#define G_TIMERMODE(x) (((x) >> S_TIMERMODE) & M_TIMERMODE)
+
+#define S_TXCHANNELXOFFEN 0
+#define M_TXCHANNELXOFFEN 0xfU
+#define V_TXCHANNELXOFFEN(x) ((x) << S_TXCHANNELXOFFEN)
+#define G_TXCHANNELXOFFEN(x) (((x) >> S_TXCHANNELXOFFEN) & M_TXCHANNELXOFFEN)
+
+#define A_TP_TX_MOD_QUEUE_REQ_MAP 0x7e28
+
+#define S_RX_MOD_WEIGHT 24
+#define M_RX_MOD_WEIGHT 0xffU
+#define V_RX_MOD_WEIGHT(x) ((x) << S_RX_MOD_WEIGHT)
+#define G_RX_MOD_WEIGHT(x) (((x) >> S_RX_MOD_WEIGHT) & M_RX_MOD_WEIGHT)
+
+#define S_TX_MOD_WEIGHT 16
+#define M_TX_MOD_WEIGHT 0xffU
+#define V_TX_MOD_WEIGHT(x) ((x) << S_TX_MOD_WEIGHT)
+#define G_TX_MOD_WEIGHT(x) (((x) >> S_TX_MOD_WEIGHT) & M_TX_MOD_WEIGHT)
+
+#define S_TX_MOD_QUEUE_REQ_MAP 0
+#define M_TX_MOD_QUEUE_REQ_MAP 0xffffU
+#define V_TX_MOD_QUEUE_REQ_MAP(x) ((x) << S_TX_MOD_QUEUE_REQ_MAP)
+#define G_TX_MOD_QUEUE_REQ_MAP(x) (((x) >> S_TX_MOD_QUEUE_REQ_MAP) & M_TX_MOD_QUEUE_REQ_MAP)
+
+#define A_TP_TX_MOD_QUEUE_WEIGHT1 0x7e2c
+
+#define S_TX_MODQ_WEIGHT7 24
+#define M_TX_MODQ_WEIGHT7 0xffU
+#define V_TX_MODQ_WEIGHT7(x) ((x) << S_TX_MODQ_WEIGHT7)
+#define G_TX_MODQ_WEIGHT7(x) (((x) >> S_TX_MODQ_WEIGHT7) & M_TX_MODQ_WEIGHT7)
+
+#define S_TX_MODQ_WEIGHT6 16
+#define M_TX_MODQ_WEIGHT6 0xffU
+#define V_TX_MODQ_WEIGHT6(x) ((x) << S_TX_MODQ_WEIGHT6)
+#define G_TX_MODQ_WEIGHT6(x) (((x) >> S_TX_MODQ_WEIGHT6) & M_TX_MODQ_WEIGHT6)
+
+#define S_TX_MODQ_WEIGHT5 8
+#define M_TX_MODQ_WEIGHT5 0xffU
+#define V_TX_MODQ_WEIGHT5(x) ((x) << S_TX_MODQ_WEIGHT5)
+#define G_TX_MODQ_WEIGHT5(x) (((x) >> S_TX_MODQ_WEIGHT5) & M_TX_MODQ_WEIGHT5)
+
+#define S_TX_MODQ_WEIGHT4 0
+#define M_TX_MODQ_WEIGHT4 0xffU
+#define V_TX_MODQ_WEIGHT4(x) ((x) << S_TX_MODQ_WEIGHT4)
+#define G_TX_MODQ_WEIGHT4(x) (((x) >> S_TX_MODQ_WEIGHT4) & M_TX_MODQ_WEIGHT4)
+
+#define A_TP_TX_MOD_QUEUE_WEIGHT0 0x7e30
+
+#define S_TX_MODQ_WEIGHT3 24
+#define M_TX_MODQ_WEIGHT3 0xffU
+#define V_TX_MODQ_WEIGHT3(x) ((x) << S_TX_MODQ_WEIGHT3)
+#define G_TX_MODQ_WEIGHT3(x) (((x) >> S_TX_MODQ_WEIGHT3) & M_TX_MODQ_WEIGHT3)
+
+#define S_TX_MODQ_WEIGHT2 16
+#define M_TX_MODQ_WEIGHT2 0xffU
+#define V_TX_MODQ_WEIGHT2(x) ((x) << S_TX_MODQ_WEIGHT2)
+#define G_TX_MODQ_WEIGHT2(x) (((x) >> S_TX_MODQ_WEIGHT2) & M_TX_MODQ_WEIGHT2)
+
+#define S_TX_MODQ_WEIGHT1 8
+#define M_TX_MODQ_WEIGHT1 0xffU
+#define V_TX_MODQ_WEIGHT1(x) ((x) << S_TX_MODQ_WEIGHT1)
+#define G_TX_MODQ_WEIGHT1(x) (((x) >> S_TX_MODQ_WEIGHT1) & M_TX_MODQ_WEIGHT1)
+
+#define S_TX_MODQ_WEIGHT0 0
+#define M_TX_MODQ_WEIGHT0 0xffU
+#define V_TX_MODQ_WEIGHT0(x) ((x) << S_TX_MODQ_WEIGHT0)
+#define G_TX_MODQ_WEIGHT0(x) (((x) >> S_TX_MODQ_WEIGHT0) & M_TX_MODQ_WEIGHT0)
+
+#define A_TP_TX_MOD_CHANNEL_WEIGHT 0x7e34
+#define A_TP_MOD_RATE_LIMIT 0x7e38
+
+#define S_RX_MOD_RATE_LIMIT_INC 24
+#define M_RX_MOD_RATE_LIMIT_INC 0xffU
+#define V_RX_MOD_RATE_LIMIT_INC(x) ((x) << S_RX_MOD_RATE_LIMIT_INC)
+#define G_RX_MOD_RATE_LIMIT_INC(x) (((x) >> S_RX_MOD_RATE_LIMIT_INC) & M_RX_MOD_RATE_LIMIT_INC)
+
+#define S_RX_MOD_RATE_LIMIT_TICK 16
+#define M_RX_MOD_RATE_LIMIT_TICK 0xffU
+#define V_RX_MOD_RATE_LIMIT_TICK(x) ((x) << S_RX_MOD_RATE_LIMIT_TICK)
+#define G_RX_MOD_RATE_LIMIT_TICK(x) (((x) >> S_RX_MOD_RATE_LIMIT_TICK) & M_RX_MOD_RATE_LIMIT_TICK)
+
+#define S_TX_MOD_RATE_LIMIT_INC 8
+#define M_TX_MOD_RATE_LIMIT_INC 0xffU
+#define V_TX_MOD_RATE_LIMIT_INC(x) ((x) << S_TX_MOD_RATE_LIMIT_INC)
+#define G_TX_MOD_RATE_LIMIT_INC(x) (((x) >> S_TX_MOD_RATE_LIMIT_INC) & M_TX_MOD_RATE_LIMIT_INC)
+
+#define S_TX_MOD_RATE_LIMIT_TICK 0
+#define M_TX_MOD_RATE_LIMIT_TICK 0xffU
+#define V_TX_MOD_RATE_LIMIT_TICK(x) ((x) << S_TX_MOD_RATE_LIMIT_TICK)
+#define G_TX_MOD_RATE_LIMIT_TICK(x) (((x) >> S_TX_MOD_RATE_LIMIT_TICK) & M_TX_MOD_RATE_LIMIT_TICK)
+
+#define A_TP_PIO_ADDR 0x7e40
+#define A_TP_PIO_DATA 0x7e44
+#define A_TP_RESET 0x7e4c
+
+#define S_FLSTINITENABLE 1
+#define V_FLSTINITENABLE(x) ((x) << S_FLSTINITENABLE)
+#define F_FLSTINITENABLE V_FLSTINITENABLE(1U)
+
+#define S_TPRESET 0
+#define V_TPRESET(x) ((x) << S_TPRESET)
+#define F_TPRESET V_TPRESET(1U)
+
+#define A_TP_MIB_INDEX 0x7e50
+#define A_TP_MIB_DATA 0x7e54
+#define A_TP_SYNC_TIME_HI 0x7e58
+#define A_TP_SYNC_TIME_LO 0x7e5c
+#define A_TP_CMM_MM_RX_FLST_BASE 0x7e60
+#define A_TP_CMM_MM_TX_FLST_BASE 0x7e64
+#define A_TP_CMM_MM_PS_FLST_BASE 0x7e68
+#define A_TP_CMM_MM_MAX_PSTRUCT 0x7e6c
+
+#define S_CMMAXPSTRUCT 0
+#define M_CMMAXPSTRUCT 0x1fffffU
+#define V_CMMAXPSTRUCT(x) ((x) << S_CMMAXPSTRUCT)
+#define G_CMMAXPSTRUCT(x) (((x) >> S_CMMAXPSTRUCT) & M_CMMAXPSTRUCT)
+
+#define A_TP_INT_ENABLE 0x7e70
+
+#define S_FLMTXFLSTEMPTY 30
+#define V_FLMTXFLSTEMPTY(x) ((x) << S_FLMTXFLSTEMPTY)
+#define F_FLMTXFLSTEMPTY V_FLMTXFLSTEMPTY(1U)
+
+#define S_RSSLKPPERR 29
+#define V_RSSLKPPERR(x) ((x) << S_RSSLKPPERR)
+#define F_RSSLKPPERR V_RSSLKPPERR(1U)
+
+#define S_FLMPERRSET 28
+#define V_FLMPERRSET(x) ((x) << S_FLMPERRSET)
+#define F_FLMPERRSET V_FLMPERRSET(1U)
+
+#define S_PROTOCOLSRAMPERR 27
+#define V_PROTOCOLSRAMPERR(x) ((x) << S_PROTOCOLSRAMPERR)
+#define F_PROTOCOLSRAMPERR V_PROTOCOLSRAMPERR(1U)
+
+#define S_ARPLUTPERR 26
+#define V_ARPLUTPERR(x) ((x) << S_ARPLUTPERR)
+#define F_ARPLUTPERR V_ARPLUTPERR(1U)
+
+#define S_CMRCFOPPERR 25
+#define V_CMRCFOPPERR(x) ((x) << S_CMRCFOPPERR)
+#define F_CMRCFOPPERR V_CMRCFOPPERR(1U)
+
+#define S_CMCACHEPERR 24
+#define V_CMCACHEPERR(x) ((x) << S_CMCACHEPERR)
+#define F_CMCACHEPERR V_CMCACHEPERR(1U)
+
+#define S_CMRCFDATAPERR 23
+#define V_CMRCFDATAPERR(x) ((x) << S_CMRCFDATAPERR)
+#define F_CMRCFDATAPERR V_CMRCFDATAPERR(1U)
+
+#define S_DBL2TLUTPERR 22
+#define V_DBL2TLUTPERR(x) ((x) << S_DBL2TLUTPERR)
+#define F_DBL2TLUTPERR V_DBL2TLUTPERR(1U)
+
+#define S_DBTXTIDPERR 21
+#define V_DBTXTIDPERR(x) ((x) << S_DBTXTIDPERR)
+#define F_DBTXTIDPERR V_DBTXTIDPERR(1U)
+
+#define S_DBEXTPERR 20
+#define V_DBEXTPERR(x) ((x) << S_DBEXTPERR)
+#define F_DBEXTPERR V_DBEXTPERR(1U)
+
+#define S_DBOPPERR 19
+#define V_DBOPPERR(x) ((x) << S_DBOPPERR)
+#define F_DBOPPERR V_DBOPPERR(1U)
+
+#define S_TMCACHEPERR 18
+#define V_TMCACHEPERR(x) ((x) << S_TMCACHEPERR)
+#define F_TMCACHEPERR V_TMCACHEPERR(1U)
+
+#define S_ETPOUTCPLFIFOPERR 17
+#define V_ETPOUTCPLFIFOPERR(x) ((x) << S_ETPOUTCPLFIFOPERR)
+#define F_ETPOUTCPLFIFOPERR V_ETPOUTCPLFIFOPERR(1U)
+
+#define S_ETPOUTTCPFIFOPERR 16
+#define V_ETPOUTTCPFIFOPERR(x) ((x) << S_ETPOUTTCPFIFOPERR)
+#define F_ETPOUTTCPFIFOPERR V_ETPOUTTCPFIFOPERR(1U)
+
+#define S_ETPOUTIPFIFOPERR 15
+#define V_ETPOUTIPFIFOPERR(x) ((x) << S_ETPOUTIPFIFOPERR)
+#define F_ETPOUTIPFIFOPERR V_ETPOUTIPFIFOPERR(1U)
+
+#define S_ETPOUTETHFIFOPERR 14
+#define V_ETPOUTETHFIFOPERR(x) ((x) << S_ETPOUTETHFIFOPERR)
+#define F_ETPOUTETHFIFOPERR V_ETPOUTETHFIFOPERR(1U)
+
+#define S_ETPINCPLFIFOPERR 13
+#define V_ETPINCPLFIFOPERR(x) ((x) << S_ETPINCPLFIFOPERR)
+#define F_ETPINCPLFIFOPERR V_ETPINCPLFIFOPERR(1U)
+
+#define S_ETPINTCPOPTFIFOPERR 12
+#define V_ETPINTCPOPTFIFOPERR(x) ((x) << S_ETPINTCPOPTFIFOPERR)
+#define F_ETPINTCPOPTFIFOPERR V_ETPINTCPOPTFIFOPERR(1U)
+
+#define S_ETPINTCPFIFOPERR 11
+#define V_ETPINTCPFIFOPERR(x) ((x) << S_ETPINTCPFIFOPERR)
+#define F_ETPINTCPFIFOPERR V_ETPINTCPFIFOPERR(1U)
+
+#define S_ETPINIPFIFOPERR 10
+#define V_ETPINIPFIFOPERR(x) ((x) << S_ETPINIPFIFOPERR)
+#define F_ETPINIPFIFOPERR V_ETPINIPFIFOPERR(1U)
+
+#define S_ETPINETHFIFOPERR 9
+#define V_ETPINETHFIFOPERR(x) ((x) << S_ETPINETHFIFOPERR)
+#define F_ETPINETHFIFOPERR V_ETPINETHFIFOPERR(1U)
+
+#define S_CTPOUTCPLFIFOPERR 8
+#define V_CTPOUTCPLFIFOPERR(x) ((x) << S_CTPOUTCPLFIFOPERR)
+#define F_CTPOUTCPLFIFOPERR V_CTPOUTCPLFIFOPERR(1U)
+
+#define S_CTPOUTTCPFIFOPERR 7
+#define V_CTPOUTTCPFIFOPERR(x) ((x) << S_CTPOUTTCPFIFOPERR)
+#define F_CTPOUTTCPFIFOPERR V_CTPOUTTCPFIFOPERR(1U)
+
+#define S_CTPOUTIPFIFOPERR 6
+#define V_CTPOUTIPFIFOPERR(x) ((x) << S_CTPOUTIPFIFOPERR)
+#define F_CTPOUTIPFIFOPERR V_CTPOUTIPFIFOPERR(1U)
+
+#define S_CTPOUTETHFIFOPERR 5
+#define V_CTPOUTETHFIFOPERR(x) ((x) << S_CTPOUTETHFIFOPERR)
+#define F_CTPOUTETHFIFOPERR V_CTPOUTETHFIFOPERR(1U)
+
+#define S_CTPINCPLFIFOPERR 4
+#define V_CTPINCPLFIFOPERR(x) ((x) << S_CTPINCPLFIFOPERR)
+#define F_CTPINCPLFIFOPERR V_CTPINCPLFIFOPERR(1U)
+
+#define S_CTPINTCPOPFIFOPERR 3
+#define V_CTPINTCPOPFIFOPERR(x) ((x) << S_CTPINTCPOPFIFOPERR)
+#define F_CTPINTCPOPFIFOPERR V_CTPINTCPOPFIFOPERR(1U)
+
+#define S_PDUFBKFIFOPERR 2
+#define V_PDUFBKFIFOPERR(x) ((x) << S_PDUFBKFIFOPERR)
+#define F_PDUFBKFIFOPERR V_PDUFBKFIFOPERR(1U)
+
+#define S_CMOPEXTFIFOPERR 1
+#define V_CMOPEXTFIFOPERR(x) ((x) << S_CMOPEXTFIFOPERR)
+#define F_CMOPEXTFIFOPERR V_CMOPEXTFIFOPERR(1U)
+
+#define S_DELINVFIFOPERR 0
+#define V_DELINVFIFOPERR(x) ((x) << S_DELINVFIFOPERR)
+#define F_DELINVFIFOPERR V_DELINVFIFOPERR(1U)
+
+#define A_TP_INT_CAUSE 0x7e74
+#define A_TP_PER_ENABLE 0x7e78
+#define A_TP_FLM_FREE_PS_CNT 0x7e80
+
+#define S_FREEPSTRUCTCOUNT 0
+#define M_FREEPSTRUCTCOUNT 0x1fffffU
+#define V_FREEPSTRUCTCOUNT(x) ((x) << S_FREEPSTRUCTCOUNT)
+#define G_FREEPSTRUCTCOUNT(x) (((x) >> S_FREEPSTRUCTCOUNT) & M_FREEPSTRUCTCOUNT)
+
+#define A_TP_FLM_FREE_RX_CNT 0x7e84
+
+#define S_FREERXPAGECHN 28
+#define V_FREERXPAGECHN(x) ((x) << S_FREERXPAGECHN)
+#define F_FREERXPAGECHN V_FREERXPAGECHN(1U)
+
+#define S_FREERXPAGECOUNT 0
+#define M_FREERXPAGECOUNT 0x1fffffU
+#define V_FREERXPAGECOUNT(x) ((x) << S_FREERXPAGECOUNT)
+#define G_FREERXPAGECOUNT(x) (((x) >> S_FREERXPAGECOUNT) & M_FREERXPAGECOUNT)
+
+#define A_TP_FLM_FREE_TX_CNT 0x7e88
+
+#define S_FREETXPAGECHN 28
+#define M_FREETXPAGECHN 0x3U
+#define V_FREETXPAGECHN(x) ((x) << S_FREETXPAGECHN)
+#define G_FREETXPAGECHN(x) (((x) >> S_FREETXPAGECHN) & M_FREETXPAGECHN)
+
+#define S_FREETXPAGECOUNT 0
+#define M_FREETXPAGECOUNT 0x1fffffU
+#define V_FREETXPAGECOUNT(x) ((x) << S_FREETXPAGECOUNT)
+#define G_FREETXPAGECOUNT(x) (((x) >> S_FREETXPAGECOUNT) & M_FREETXPAGECOUNT)
+
+#define A_TP_TM_HEAP_PUSH_CNT 0x7e8c
+#define A_TP_TM_HEAP_POP_CNT 0x7e90
+#define A_TP_TM_DACK_PUSH_CNT 0x7e94
+#define A_TP_TM_DACK_POP_CNT 0x7e98
+#define A_TP_TM_MOD_PUSH_CNT 0x7e9c
+#define A_TP_MOD_POP_CNT 0x7ea0
+#define A_TP_TIMER_SEPARATOR 0x7ea4
+
+#define S_TIMERSEPARATOR 16
+#define M_TIMERSEPARATOR 0xffffU
+#define V_TIMERSEPARATOR(x) ((x) << S_TIMERSEPARATOR)
+#define G_TIMERSEPARATOR(x) (((x) >> S_TIMERSEPARATOR) & M_TIMERSEPARATOR)
+
+#define S_DISABLETIMEFREEZE 0
+#define V_DISABLETIMEFREEZE(x) ((x) << S_DISABLETIMEFREEZE)
+#define F_DISABLETIMEFREEZE V_DISABLETIMEFREEZE(1U)
+
+#define A_TP_DEBUG_FLAGS 0x7eac
+
+#define S_RXTIMERDACKFIRST 26
+#define V_RXTIMERDACKFIRST(x) ((x) << S_RXTIMERDACKFIRST)
+#define F_RXTIMERDACKFIRST V_RXTIMERDACKFIRST(1U)
+
+#define S_RXTIMERDACK 25
+#define V_RXTIMERDACK(x) ((x) << S_RXTIMERDACK)
+#define F_RXTIMERDACK V_RXTIMERDACK(1U)
+
+#define S_RXTIMERHEARTBEAT 24
+#define V_RXTIMERHEARTBEAT(x) ((x) << S_RXTIMERHEARTBEAT)
+#define F_RXTIMERHEARTBEAT V_RXTIMERHEARTBEAT(1U)
+
+#define S_RXPAWSDROP 23
+#define V_RXPAWSDROP(x) ((x) << S_RXPAWSDROP)
+#define F_RXPAWSDROP V_RXPAWSDROP(1U)
+
+#define S_RXURGDATADROP 22
+#define V_RXURGDATADROP(x) ((x) << S_RXURGDATADROP)
+#define F_RXURGDATADROP V_RXURGDATADROP(1U)
+
+#define S_RXFUTUREDATA 21
+#define V_RXFUTUREDATA(x) ((x) << S_RXFUTUREDATA)
+#define F_RXFUTUREDATA V_RXFUTUREDATA(1U)
+
+#define S_RXRCVRXMDATA 20
+#define V_RXRCVRXMDATA(x) ((x) << S_RXRCVRXMDATA)
+#define F_RXRCVRXMDATA V_RXRCVRXMDATA(1U)
+
+#define S_RXRCVOOODATAFIN 19
+#define V_RXRCVOOODATAFIN(x) ((x) << S_RXRCVOOODATAFIN)
+#define F_RXRCVOOODATAFIN V_RXRCVOOODATAFIN(1U)
+
+#define S_RXRCVOOODATA 18
+#define V_RXRCVOOODATA(x) ((x) << S_RXRCVOOODATA)
+#define F_RXRCVOOODATA V_RXRCVOOODATA(1U)
+
+#define S_RXRCVWNDZERO 17
+#define V_RXRCVWNDZERO(x) ((x) << S_RXRCVWNDZERO)
+#define F_RXRCVWNDZERO V_RXRCVWNDZERO(1U)
+
+#define S_RXRCVWNDLTMSS 16
+#define V_RXRCVWNDLTMSS(x) ((x) << S_RXRCVWNDLTMSS)
+#define F_RXRCVWNDLTMSS V_RXRCVWNDLTMSS(1U)
+
+#define S_TXDUPACKINC 11
+#define V_TXDUPACKINC(x) ((x) << S_TXDUPACKINC)
+#define F_TXDUPACKINC V_TXDUPACKINC(1U)
+
+#define S_TXRXMURG 10
+#define V_TXRXMURG(x) ((x) << S_TXRXMURG)
+#define F_TXRXMURG V_TXRXMURG(1U)
+
+#define S_TXRXMFIN 9
+#define V_TXRXMFIN(x) ((x) << S_TXRXMFIN)
+#define F_TXRXMFIN V_TXRXMFIN(1U)
+
+#define S_TXRXMSYN 8
+#define V_TXRXMSYN(x) ((x) << S_TXRXMSYN)
+#define F_TXRXMSYN V_TXRXMSYN(1U)
+
+#define S_TXRXMNEWRENO 7
+#define V_TXRXMNEWRENO(x) ((x) << S_TXRXMNEWRENO)
+#define F_TXRXMNEWRENO V_TXRXMNEWRENO(1U)
+
+#define S_TXRXMFAST 6
+#define V_TXRXMFAST(x) ((x) << S_TXRXMFAST)
+#define F_TXRXMFAST V_TXRXMFAST(1U)
+
+#define S_TXRXMTIMER 5
+#define V_TXRXMTIMER(x) ((x) << S_TXRXMTIMER)
+#define F_TXRXMTIMER V_TXRXMTIMER(1U)
+
+#define S_TXRXMTIMERKEEPALIVE 4
+#define V_TXRXMTIMERKEEPALIVE(x) ((x) << S_TXRXMTIMERKEEPALIVE)
+#define F_TXRXMTIMERKEEPALIVE V_TXRXMTIMERKEEPALIVE(1U)
+
+#define S_TXRXMTIMERPERSIST 3
+#define V_TXRXMTIMERPERSIST(x) ((x) << S_TXRXMTIMERPERSIST)
+#define F_TXRXMTIMERPERSIST V_TXRXMTIMERPERSIST(1U)
+
+#define S_TXRCVADVSHRUNK 2
+#define V_TXRCVADVSHRUNK(x) ((x) << S_TXRCVADVSHRUNK)
+#define F_TXRCVADVSHRUNK V_TXRCVADVSHRUNK(1U)
+
+#define S_TXRCVADVZERO 1
+#define V_TXRCVADVZERO(x) ((x) << S_TXRCVADVZERO)
+#define F_TXRCVADVZERO V_TXRCVADVZERO(1U)
+
+#define S_TXRCVADVLTMSS 0
+#define V_TXRCVADVLTMSS(x) ((x) << S_TXRCVADVLTMSS)
+#define F_TXRCVADVLTMSS V_TXRCVADVLTMSS(1U)
+
+#define A_TP_RX_SCHED 0x7eb0
+
+#define S_RXCOMMITRESET1 31
+#define V_RXCOMMITRESET1(x) ((x) << S_RXCOMMITRESET1)
+#define F_RXCOMMITRESET1 V_RXCOMMITRESET1(1U)
+
+#define S_RXCOMMITRESET0 30
+#define V_RXCOMMITRESET0(x) ((x) << S_RXCOMMITRESET0)
+#define F_RXCOMMITRESET0 V_RXCOMMITRESET0(1U)
+
+#define S_RXFORCECONG1 29
+#define V_RXFORCECONG1(x) ((x) << S_RXFORCECONG1)
+#define F_RXFORCECONG1 V_RXFORCECONG1(1U)
+
+#define S_RXFORCECONG0 28
+#define V_RXFORCECONG0(x) ((x) << S_RXFORCECONG0)
+#define F_RXFORCECONG0 V_RXFORCECONG0(1U)
+
+#define S_ENABLELPBKFULL1 26
+#define M_ENABLELPBKFULL1 0x3U
+#define V_ENABLELPBKFULL1(x) ((x) << S_ENABLELPBKFULL1)
+#define G_ENABLELPBKFULL1(x) (((x) >> S_ENABLELPBKFULL1) & M_ENABLELPBKFULL1)
+
+#define S_ENABLELPBKFULL0 24
+#define M_ENABLELPBKFULL0 0x3U
+#define V_ENABLELPBKFULL0(x) ((x) << S_ENABLELPBKFULL0)
+#define G_ENABLELPBKFULL0(x) (((x) >> S_ENABLELPBKFULL0) & M_ENABLELPBKFULL0)
+
+#define S_ENABLEFIFOFULL1 22
+#define M_ENABLEFIFOFULL1 0x3U
+#define V_ENABLEFIFOFULL1(x) ((x) << S_ENABLEFIFOFULL1)
+#define G_ENABLEFIFOFULL1(x) (((x) >> S_ENABLEFIFOFULL1) & M_ENABLEFIFOFULL1)
+
+#define S_ENABLEPCMDFULL1 20
+#define M_ENABLEPCMDFULL1 0x3U
+#define V_ENABLEPCMDFULL1(x) ((x) << S_ENABLEPCMDFULL1)
+#define G_ENABLEPCMDFULL1(x) (((x) >> S_ENABLEPCMDFULL1) & M_ENABLEPCMDFULL1)
+
+#define S_ENABLEHDRFULL1 18
+#define M_ENABLEHDRFULL1 0x3U
+#define V_ENABLEHDRFULL1(x) ((x) << S_ENABLEHDRFULL1)
+#define G_ENABLEHDRFULL1(x) (((x) >> S_ENABLEHDRFULL1) & M_ENABLEHDRFULL1)
+
+#define S_ENABLEFIFOFULL0 16
+#define M_ENABLEFIFOFULL0 0x3U
+#define V_ENABLEFIFOFULL0(x) ((x) << S_ENABLEFIFOFULL0)
+#define G_ENABLEFIFOFULL0(x) (((x) >> S_ENABLEFIFOFULL0) & M_ENABLEFIFOFULL0)
+
+#define S_ENABLEPCMDFULL0 14
+#define M_ENABLEPCMDFULL0 0x3U
+#define V_ENABLEPCMDFULL0(x) ((x) << S_ENABLEPCMDFULL0)
+#define G_ENABLEPCMDFULL0(x) (((x) >> S_ENABLEPCMDFULL0) & M_ENABLEPCMDFULL0)
+
+#define S_ENABLEHDRFULL0 12
+#define M_ENABLEHDRFULL0 0x3U
+#define V_ENABLEHDRFULL0(x) ((x) << S_ENABLEHDRFULL0)
+#define G_ENABLEHDRFULL0(x) (((x) >> S_ENABLEHDRFULL0) & M_ENABLEHDRFULL0)
+
+#define S_COMMITLIMIT1 6
+#define M_COMMITLIMIT1 0x3fU
+#define V_COMMITLIMIT1(x) ((x) << S_COMMITLIMIT1)
+#define G_COMMITLIMIT1(x) (((x) >> S_COMMITLIMIT1) & M_COMMITLIMIT1)
+
+#define S_COMMITLIMIT0 0
+#define M_COMMITLIMIT0 0x3fU
+#define V_COMMITLIMIT0(x) ((x) << S_COMMITLIMIT0)
+#define G_COMMITLIMIT0(x) (((x) >> S_COMMITLIMIT0) & M_COMMITLIMIT0)
+
+#define A_TP_TX_SCHED 0x7eb4
+
+#define S_COMMITRESET3 31
+#define V_COMMITRESET3(x) ((x) << S_COMMITRESET3)
+#define F_COMMITRESET3 V_COMMITRESET3(1U)
+
+#define S_COMMITRESET2 30
+#define V_COMMITRESET2(x) ((x) << S_COMMITRESET2)
+#define F_COMMITRESET2 V_COMMITRESET2(1U)
+
+#define S_COMMITRESET1 29
+#define V_COMMITRESET1(x) ((x) << S_COMMITRESET1)
+#define F_COMMITRESET1 V_COMMITRESET1(1U)
+
+#define S_COMMITRESET0 28
+#define V_COMMITRESET0(x) ((x) << S_COMMITRESET0)
+#define F_COMMITRESET0 V_COMMITRESET0(1U)
+
+#define S_FORCECONG3 27
+#define V_FORCECONG3(x) ((x) << S_FORCECONG3)
+#define F_FORCECONG3 V_FORCECONG3(1U)
+
+#define S_FORCECONG2 26
+#define V_FORCECONG2(x) ((x) << S_FORCECONG2)
+#define F_FORCECONG2 V_FORCECONG2(1U)
+
+#define S_FORCECONG1 25
+#define V_FORCECONG1(x) ((x) << S_FORCECONG1)
+#define F_FORCECONG1 V_FORCECONG1(1U)
+
+#define S_FORCECONG0 24
+#define V_FORCECONG0(x) ((x) << S_FORCECONG0)
+#define F_FORCECONG0 V_FORCECONG0(1U)
+
+#define S_COMMITLIMIT3 18
+#define M_COMMITLIMIT3 0x3fU
+#define V_COMMITLIMIT3(x) ((x) << S_COMMITLIMIT3)
+#define G_COMMITLIMIT3(x) (((x) >> S_COMMITLIMIT3) & M_COMMITLIMIT3)
+
+#define S_COMMITLIMIT2 12
+#define M_COMMITLIMIT2 0x3fU
+#define V_COMMITLIMIT2(x) ((x) << S_COMMITLIMIT2)
+#define G_COMMITLIMIT2(x) (((x) >> S_COMMITLIMIT2) & M_COMMITLIMIT2)
+
+#define A_TP_FX_SCHED 0x7eb8
+
+#define S_TXCHNXOFF3 19
+#define V_TXCHNXOFF3(x) ((x) << S_TXCHNXOFF3)
+#define F_TXCHNXOFF3 V_TXCHNXOFF3(1U)
+
+#define S_TXCHNXOFF2 18
+#define V_TXCHNXOFF2(x) ((x) << S_TXCHNXOFF2)
+#define F_TXCHNXOFF2 V_TXCHNXOFF2(1U)
+
+#define S_TXCHNXOFF1 17
+#define V_TXCHNXOFF1(x) ((x) << S_TXCHNXOFF1)
+#define F_TXCHNXOFF1 V_TXCHNXOFF1(1U)
+
+#define S_TXCHNXOFF0 16
+#define V_TXCHNXOFF0(x) ((x) << S_TXCHNXOFF0)
+#define F_TXCHNXOFF0 V_TXCHNXOFF0(1U)
+
+#define S_TXMODXOFF7 15
+#define V_TXMODXOFF7(x) ((x) << S_TXMODXOFF7)
+#define F_TXMODXOFF7 V_TXMODXOFF7(1U)
+
+#define S_TXMODXOFF6 14
+#define V_TXMODXOFF6(x) ((x) << S_TXMODXOFF6)
+#define F_TXMODXOFF6 V_TXMODXOFF6(1U)
+
+#define S_TXMODXOFF5 13
+#define V_TXMODXOFF5(x) ((x) << S_TXMODXOFF5)
+#define F_TXMODXOFF5 V_TXMODXOFF5(1U)
+
+#define S_TXMODXOFF4 12
+#define V_TXMODXOFF4(x) ((x) << S_TXMODXOFF4)
+#define F_TXMODXOFF4 V_TXMODXOFF4(1U)
+
+#define S_TXMODXOFF3 11
+#define V_TXMODXOFF3(x) ((x) << S_TXMODXOFF3)
+#define F_TXMODXOFF3 V_TXMODXOFF3(1U)
+
+#define S_TXMODXOFF2 10
+#define V_TXMODXOFF2(x) ((x) << S_TXMODXOFF2)
+#define F_TXMODXOFF2 V_TXMODXOFF2(1U)
+
+#define S_TXMODXOFF1 9
+#define V_TXMODXOFF1(x) ((x) << S_TXMODXOFF1)
+#define F_TXMODXOFF1 V_TXMODXOFF1(1U)
+
+#define S_TXMODXOFF0 8
+#define V_TXMODXOFF0(x) ((x) << S_TXMODXOFF0)
+#define F_TXMODXOFF0 V_TXMODXOFF0(1U)
+
+#define S_RXCHNXOFF3 7
+#define V_RXCHNXOFF3(x) ((x) << S_RXCHNXOFF3)
+#define F_RXCHNXOFF3 V_RXCHNXOFF3(1U)
+
+#define S_RXCHNXOFF2 6
+#define V_RXCHNXOFF2(x) ((x) << S_RXCHNXOFF2)
+#define F_RXCHNXOFF2 V_RXCHNXOFF2(1U)
+
+#define S_RXCHNXOFF1 5
+#define V_RXCHNXOFF1(x) ((x) << S_RXCHNXOFF1)
+#define F_RXCHNXOFF1 V_RXCHNXOFF1(1U)
+
+#define S_RXCHNXOFF0 4
+#define V_RXCHNXOFF0(x) ((x) << S_RXCHNXOFF0)
+#define F_RXCHNXOFF0 V_RXCHNXOFF0(1U)
+
+#define S_RXMODXOFF1 1
+#define V_RXMODXOFF1(x) ((x) << S_RXMODXOFF1)
+#define F_RXMODXOFF1 V_RXMODXOFF1(1U)
+
+#define S_RXMODXOFF0 0
+#define V_RXMODXOFF0(x) ((x) << S_RXMODXOFF0)
+#define F_RXMODXOFF0 V_RXMODXOFF0(1U)
+
+#define A_TP_TX_ORATE 0x7ebc
+
+#define S_OFDRATE3 24
+#define M_OFDRATE3 0xffU
+#define V_OFDRATE3(x) ((x) << S_OFDRATE3)
+#define G_OFDRATE3(x) (((x) >> S_OFDRATE3) & M_OFDRATE3)
+
+#define S_OFDRATE2 16
+#define M_OFDRATE2 0xffU
+#define V_OFDRATE2(x) ((x) << S_OFDRATE2)
+#define G_OFDRATE2(x) (((x) >> S_OFDRATE2) & M_OFDRATE2)
+
+#define S_OFDRATE1 8
+#define M_OFDRATE1 0xffU
+#define V_OFDRATE1(x) ((x) << S_OFDRATE1)
+#define G_OFDRATE1(x) (((x) >> S_OFDRATE1) & M_OFDRATE1)
+
+#define S_OFDRATE0 0
+#define M_OFDRATE0 0xffU
+#define V_OFDRATE0(x) ((x) << S_OFDRATE0)
+#define G_OFDRATE0(x) (((x) >> S_OFDRATE0) & M_OFDRATE0)
+
+#define A_TP_IX_SCHED0 0x7ec0
+#define A_TP_IX_SCHED1 0x7ec4
+#define A_TP_IX_SCHED2 0x7ec8
+#define A_TP_IX_SCHED3 0x7ecc
+#define A_TP_TX_TRATE 0x7ed0
+
+#define S_TNLRATE3 24
+#define M_TNLRATE3 0xffU
+#define V_TNLRATE3(x) ((x) << S_TNLRATE3)
+#define G_TNLRATE3(x) (((x) >> S_TNLRATE3) & M_TNLRATE3)
+
+#define S_TNLRATE2 16
+#define M_TNLRATE2 0xffU
+#define V_TNLRATE2(x) ((x) << S_TNLRATE2)
+#define G_TNLRATE2(x) (((x) >> S_TNLRATE2) & M_TNLRATE2)
+
+#define S_TNLRATE1 8
+#define M_TNLRATE1 0xffU
+#define V_TNLRATE1(x) ((x) << S_TNLRATE1)
+#define G_TNLRATE1(x) (((x) >> S_TNLRATE1) & M_TNLRATE1)
+
+#define S_TNLRATE0 0
+#define M_TNLRATE0 0xffU
+#define V_TNLRATE0(x) ((x) << S_TNLRATE0)
+#define G_TNLRATE0(x) (((x) >> S_TNLRATE0) & M_TNLRATE0)
+
+#define A_TP_DBG_LA_CONFIG 0x7ed4
+
+#define S_DBGLAOPCENABLE 24
+#define M_DBGLAOPCENABLE 0xffU
+#define V_DBGLAOPCENABLE(x) ((x) << S_DBGLAOPCENABLE)
+#define G_DBGLAOPCENABLE(x) (((x) >> S_DBGLAOPCENABLE) & M_DBGLAOPCENABLE)
+
+#define S_DBGLAWHLF 23
+#define V_DBGLAWHLF(x) ((x) << S_DBGLAWHLF)
+#define F_DBGLAWHLF V_DBGLAWHLF(1U)
+
+#define S_DBGLAWPTR 16
+#define M_DBGLAWPTR 0x7fU
+#define V_DBGLAWPTR(x) ((x) << S_DBGLAWPTR)
+#define G_DBGLAWPTR(x) (((x) >> S_DBGLAWPTR) & M_DBGLAWPTR)
+
+#define S_DBGLAMODE 14
+#define M_DBGLAMODE 0x3U
+#define V_DBGLAMODE(x) ((x) << S_DBGLAMODE)
+#define G_DBGLAMODE(x) (((x) >> S_DBGLAMODE) & M_DBGLAMODE)
+
+#define S_DBGLAFATALFREEZE 13
+#define V_DBGLAFATALFREEZE(x) ((x) << S_DBGLAFATALFREEZE)
+#define F_DBGLAFATALFREEZE V_DBGLAFATALFREEZE(1U)
+
+#define S_DBGLAENABLE 12
+#define V_DBGLAENABLE(x) ((x) << S_DBGLAENABLE)
+#define F_DBGLAENABLE V_DBGLAENABLE(1U)
+
+#define S_DBGLARPTR 0
+#define M_DBGLARPTR 0x7fU
+#define V_DBGLARPTR(x) ((x) << S_DBGLARPTR)
+#define G_DBGLARPTR(x) (((x) >> S_DBGLARPTR) & M_DBGLARPTR)
+
+#define A_TP_DBG_LA_DATAL 0x7ed8
+#define A_TP_DBG_LA_DATAH 0x7edc
+#define A_TP_PROTOCOL_CNTRL 0x7ee8
+
+#define S_WRITEENABLE 31
+#define V_WRITEENABLE(x) ((x) << S_WRITEENABLE)
+#define F_WRITEENABLE V_WRITEENABLE(1U)
+
+#define S_TCAMENABLE 10
+#define V_TCAMENABLE(x) ((x) << S_TCAMENABLE)
+#define F_TCAMENABLE V_TCAMENABLE(1U)
+
+#define S_BLOCKSELECT 8
+#define M_BLOCKSELECT 0x3U
+#define V_BLOCKSELECT(x) ((x) << S_BLOCKSELECT)
+#define G_BLOCKSELECT(x) (((x) >> S_BLOCKSELECT) & M_BLOCKSELECT)
+
+#define S_LINEADDRESS 1
+#define M_LINEADDRESS 0x7fU
+#define V_LINEADDRESS(x) ((x) << S_LINEADDRESS)
+#define G_LINEADDRESS(x) (((x) >> S_LINEADDRESS) & M_LINEADDRESS)
+
+#define S_REQUESTDONE 0
+#define V_REQUESTDONE(x) ((x) << S_REQUESTDONE)
+#define F_REQUESTDONE V_REQUESTDONE(1U)
+
+#define A_TP_PROTOCOL_DATA0 0x7eec
+#define A_TP_PROTOCOL_DATA1 0x7ef0
+#define A_TP_PROTOCOL_DATA2 0x7ef4
+#define A_TP_PROTOCOL_DATA3 0x7ef8
+#define A_TP_PROTOCOL_DATA4 0x7efc
+
+#define S_PROTOCOLDATAFIELD 0
+#define M_PROTOCOLDATAFIELD 0xfU
+#define V_PROTOCOLDATAFIELD(x) ((x) << S_PROTOCOLDATAFIELD)
+#define G_PROTOCOLDATAFIELD(x) (((x) >> S_PROTOCOLDATAFIELD) & M_PROTOCOLDATAFIELD)
+
+#define A_TP_TX_MOD_Q7_Q6_TIMER_SEPARATOR 0x0
+
+#define S_TXTIMERSEPQ7 16
+#define M_TXTIMERSEPQ7 0xffffU
+#define V_TXTIMERSEPQ7(x) ((x) << S_TXTIMERSEPQ7)
+#define G_TXTIMERSEPQ7(x) (((x) >> S_TXTIMERSEPQ7) & M_TXTIMERSEPQ7)
+
+#define S_TXTIMERSEPQ6 0
+#define M_TXTIMERSEPQ6 0xffffU
+#define V_TXTIMERSEPQ6(x) ((x) << S_TXTIMERSEPQ6)
+#define G_TXTIMERSEPQ6(x) (((x) >> S_TXTIMERSEPQ6) & M_TXTIMERSEPQ6)
+
+#define A_TP_TX_MOD_Q5_Q4_TIMER_SEPARATOR 0x1
+
+#define S_TXTIMERSEPQ5 16
+#define M_TXTIMERSEPQ5 0xffffU
+#define V_TXTIMERSEPQ5(x) ((x) << S_TXTIMERSEPQ5)
+#define G_TXTIMERSEPQ5(x) (((x) >> S_TXTIMERSEPQ5) & M_TXTIMERSEPQ5)
+
+#define S_TXTIMERSEPQ4 0
+#define M_TXTIMERSEPQ4 0xffffU
+#define V_TXTIMERSEPQ4(x) ((x) << S_TXTIMERSEPQ4)
+#define G_TXTIMERSEPQ4(x) (((x) >> S_TXTIMERSEPQ4) & M_TXTIMERSEPQ4)
+
+#define A_TP_TX_MOD_Q3_Q2_TIMER_SEPARATOR 0x2
+
+#define S_TXTIMERSEPQ3 16
+#define M_TXTIMERSEPQ3 0xffffU
+#define V_TXTIMERSEPQ3(x) ((x) << S_TXTIMERSEPQ3)
+#define G_TXTIMERSEPQ3(x) (((x) >> S_TXTIMERSEPQ3) & M_TXTIMERSEPQ3)
+
+#define S_TXTIMERSEPQ2 0
+#define M_TXTIMERSEPQ2 0xffffU
+#define V_TXTIMERSEPQ2(x) ((x) << S_TXTIMERSEPQ2)
+#define G_TXTIMERSEPQ2(x) (((x) >> S_TXTIMERSEPQ2) & M_TXTIMERSEPQ2)
+
+#define A_TP_TX_MOD_Q1_Q0_TIMER_SEPARATOR 0x3
+
+#define S_TXTIMERSEPQ1 16
+#define M_TXTIMERSEPQ1 0xffffU
+#define V_TXTIMERSEPQ1(x) ((x) << S_TXTIMERSEPQ1)
+#define G_TXTIMERSEPQ1(x) (((x) >> S_TXTIMERSEPQ1) & M_TXTIMERSEPQ1)
+
+#define S_TXTIMERSEPQ0 0
+#define M_TXTIMERSEPQ0 0xffffU
+#define V_TXTIMERSEPQ0(x) ((x) << S_TXTIMERSEPQ0)
+#define G_TXTIMERSEPQ0(x) (((x) >> S_TXTIMERSEPQ0) & M_TXTIMERSEPQ0)
+
+#define A_TP_RX_MOD_Q1_Q0_TIMER_SEPARATOR 0x4
+
+#define S_RXTIMERSEPQ1 16
+#define M_RXTIMERSEPQ1 0xffffU
+#define V_RXTIMERSEPQ1(x) ((x) << S_RXTIMERSEPQ1)
+#define G_RXTIMERSEPQ1(x) (((x) >> S_RXTIMERSEPQ1) & M_RXTIMERSEPQ1)
+
+#define S_RXTIMERSEPQ0 0
+#define M_RXTIMERSEPQ0 0xffffU
+#define V_RXTIMERSEPQ0(x) ((x) << S_RXTIMERSEPQ0)
+#define G_RXTIMERSEPQ0(x) (((x) >> S_RXTIMERSEPQ0) & M_RXTIMERSEPQ0)
+
+#define A_TP_TX_MOD_Q7_Q6_RATE_LIMIT 0x5
+
+#define S_TXRATEINCQ7 24
+#define M_TXRATEINCQ7 0xffU
+#define V_TXRATEINCQ7(x) ((x) << S_TXRATEINCQ7)
+#define G_TXRATEINCQ7(x) (((x) >> S_TXRATEINCQ7) & M_TXRATEINCQ7)
+
+#define S_TXRATETCKQ7 16
+#define M_TXRATETCKQ7 0xffU
+#define V_TXRATETCKQ7(x) ((x) << S_TXRATETCKQ7)
+#define G_TXRATETCKQ7(x) (((x) >> S_TXRATETCKQ7) & M_TXRATETCKQ7)
+
+#define S_TXRATEINCQ6 8
+#define M_TXRATEINCQ6 0xffU
+#define V_TXRATEINCQ6(x) ((x) << S_TXRATEINCQ6)
+#define G_TXRATEINCQ6(x) (((x) >> S_TXRATEINCQ6) & M_TXRATEINCQ6)
+
+#define S_TXRATETCKQ6 0
+#define M_TXRATETCKQ6 0xffU
+#define V_TXRATETCKQ6(x) ((x) << S_TXRATETCKQ6)
+#define G_TXRATETCKQ6(x) (((x) >> S_TXRATETCKQ6) & M_TXRATETCKQ6)
+
+#define A_TP_TX_MOD_Q5_Q4_RATE_LIMIT 0x6
+
+#define S_TXRATEINCQ5 24
+#define M_TXRATEINCQ5 0xffU
+#define V_TXRATEINCQ5(x) ((x) << S_TXRATEINCQ5)
+#define G_TXRATEINCQ5(x) (((x) >> S_TXRATEINCQ5) & M_TXRATEINCQ5)
+
+#define S_TXRATETCKQ5 16
+#define M_TXRATETCKQ5 0xffU
+#define V_TXRATETCKQ5(x) ((x) << S_TXRATETCKQ5)
+#define G_TXRATETCKQ5(x) (((x) >> S_TXRATETCKQ5) & M_TXRATETCKQ5)
+
+#define S_TXRATEINCQ4 8
+#define M_TXRATEINCQ4 0xffU
+#define V_TXRATEINCQ4(x) ((x) << S_TXRATEINCQ4)
+#define G_TXRATEINCQ4(x) (((x) >> S_TXRATEINCQ4) & M_TXRATEINCQ4)
+
+#define S_TXRATETCKQ4 0
+#define M_TXRATETCKQ4 0xffU
+#define V_TXRATETCKQ4(x) ((x) << S_TXRATETCKQ4)
+#define G_TXRATETCKQ4(x) (((x) >> S_TXRATETCKQ4) & M_TXRATETCKQ4)
+
+#define A_TP_TX_MOD_Q3_Q2_RATE_LIMIT 0x7
+
+#define S_TXRATEINCQ3 24
+#define M_TXRATEINCQ3 0xffU
+#define V_TXRATEINCQ3(x) ((x) << S_TXRATEINCQ3)
+#define G_TXRATEINCQ3(x) (((x) >> S_TXRATEINCQ3) & M_TXRATEINCQ3)
+
+#define S_TXRATETCKQ3 16
+#define M_TXRATETCKQ3 0xffU
+#define V_TXRATETCKQ3(x) ((x) << S_TXRATETCKQ3)
+#define G_TXRATETCKQ3(x) (((x) >> S_TXRATETCKQ3) & M_TXRATETCKQ3)
+
+#define S_TXRATEINCQ2 8
+#define M_TXRATEINCQ2 0xffU
+#define V_TXRATEINCQ2(x) ((x) << S_TXRATEINCQ2)
+#define G_TXRATEINCQ2(x) (((x) >> S_TXRATEINCQ2) & M_TXRATEINCQ2)
+
+#define S_TXRATETCKQ2 0
+#define M_TXRATETCKQ2 0xffU
+#define V_TXRATETCKQ2(x) ((x) << S_TXRATETCKQ2)
+#define G_TXRATETCKQ2(x) (((x) >> S_TXRATETCKQ2) & M_TXRATETCKQ2)
+
+#define A_TP_TX_MOD_Q1_Q0_RATE_LIMIT 0x8
+
+#define S_TXRATEINCQ1 24
+#define M_TXRATEINCQ1 0xffU
+#define V_TXRATEINCQ1(x) ((x) << S_TXRATEINCQ1)
+#define G_TXRATEINCQ1(x) (((x) >> S_TXRATEINCQ1) & M_TXRATEINCQ1)
+
+#define S_TXRATETCKQ1 16
+#define M_TXRATETCKQ1 0xffU
+#define V_TXRATETCKQ1(x) ((x) << S_TXRATETCKQ1)
+#define G_TXRATETCKQ1(x) (((x) >> S_TXRATETCKQ1) & M_TXRATETCKQ1)
+
+#define S_TXRATEINCQ0 8
+#define M_TXRATEINCQ0 0xffU
+#define V_TXRATEINCQ0(x) ((x) << S_TXRATEINCQ0)
+#define G_TXRATEINCQ0(x) (((x) >> S_TXRATEINCQ0) & M_TXRATEINCQ0)
+
+#define S_TXRATETCKQ0 0
+#define M_TXRATETCKQ0 0xffU
+#define V_TXRATETCKQ0(x) ((x) << S_TXRATETCKQ0)
+#define G_TXRATETCKQ0(x) (((x) >> S_TXRATETCKQ0) & M_TXRATETCKQ0)
+
+#define A_TP_RX_MOD_Q1_Q0_RATE_LIMIT 0x9
+
+#define S_RXRATEINCQ1 24
+#define M_RXRATEINCQ1 0xffU
+#define V_RXRATEINCQ1(x) ((x) << S_RXRATEINCQ1)
+#define G_RXRATEINCQ1(x) (((x) >> S_RXRATEINCQ1) & M_RXRATEINCQ1)
+
+#define S_RXRATETCKQ1 16
+#define M_RXRATETCKQ1 0xffU
+#define V_RXRATETCKQ1(x) ((x) << S_RXRATETCKQ1)
+#define G_RXRATETCKQ1(x) (((x) >> S_RXRATETCKQ1) & M_RXRATETCKQ1)
+
+#define S_RXRATEINCQ0 8
+#define M_RXRATEINCQ0 0xffU
+#define V_RXRATEINCQ0(x) ((x) << S_RXRATEINCQ0)
+#define G_RXRATEINCQ0(x) (((x) >> S_RXRATEINCQ0) & M_RXRATEINCQ0)
+
+#define S_RXRATETCKQ0 0
+#define M_RXRATETCKQ0 0xffU
+#define V_RXRATETCKQ0(x) ((x) << S_RXRATETCKQ0)
+#define G_RXRATETCKQ0(x) (((x) >> S_RXRATETCKQ0) & M_RXRATETCKQ0)
+
+#define A_TP_TX_MOD_C3_C2_RATE_LIMIT 0xa
+#define A_TP_TX_MOD_C1_C0_RATE_LIMIT 0xb
+#define A_TP_RX_SCHED_MAP 0x20
+
+#define S_RXMAPCHANNEL3 24
+#define M_RXMAPCHANNEL3 0xffU
+#define V_RXMAPCHANNEL3(x) ((x) << S_RXMAPCHANNEL3)
+#define G_RXMAPCHANNEL3(x) (((x) >> S_RXMAPCHANNEL3) & M_RXMAPCHANNEL3)
+
+#define S_RXMAPCHANNEL2 16
+#define M_RXMAPCHANNEL2 0xffU
+#define V_RXMAPCHANNEL2(x) ((x) << S_RXMAPCHANNEL2)
+#define G_RXMAPCHANNEL2(x) (((x) >> S_RXMAPCHANNEL2) & M_RXMAPCHANNEL2)
+
+#define S_RXMAPCHANNEL1 8
+#define M_RXMAPCHANNEL1 0xffU
+#define V_RXMAPCHANNEL1(x) ((x) << S_RXMAPCHANNEL1)
+#define G_RXMAPCHANNEL1(x) (((x) >> S_RXMAPCHANNEL1) & M_RXMAPCHANNEL1)
+
+#define S_RXMAPCHANNEL0 0
+#define M_RXMAPCHANNEL0 0xffU
+#define V_RXMAPCHANNEL0(x) ((x) << S_RXMAPCHANNEL0)
+#define G_RXMAPCHANNEL0(x) (((x) >> S_RXMAPCHANNEL0) & M_RXMAPCHANNEL0)
+
+#define A_TP_RX_SCHED_SGE 0x21
+
+#define S_RXSGEMOD1 12
+#define M_RXSGEMOD1 0xfU
+#define V_RXSGEMOD1(x) ((x) << S_RXSGEMOD1)
+#define G_RXSGEMOD1(x) (((x) >> S_RXSGEMOD1) & M_RXSGEMOD1)
+
+#define S_RXSGEMOD0 8
+#define M_RXSGEMOD0 0xfU
+#define V_RXSGEMOD0(x) ((x) << S_RXSGEMOD0)
+#define G_RXSGEMOD0(x) (((x) >> S_RXSGEMOD0) & M_RXSGEMOD0)
+
+#define S_RXSGECHANNEL3 3
+#define V_RXSGECHANNEL3(x) ((x) << S_RXSGECHANNEL3)
+#define F_RXSGECHANNEL3 V_RXSGECHANNEL3(1U)
+
+#define S_RXSGECHANNEL2 2
+#define V_RXSGECHANNEL2(x) ((x) << S_RXSGECHANNEL2)
+#define F_RXSGECHANNEL2 V_RXSGECHANNEL2(1U)
+
+#define S_RXSGECHANNEL1 1
+#define V_RXSGECHANNEL1(x) ((x) << S_RXSGECHANNEL1)
+#define F_RXSGECHANNEL1 V_RXSGECHANNEL1(1U)
+
+#define S_RXSGECHANNEL0 0
+#define V_RXSGECHANNEL0(x) ((x) << S_RXSGECHANNEL0)
+#define F_RXSGECHANNEL0 V_RXSGECHANNEL0(1U)
+
+#define A_TP_TX_SCHED_MAP 0x22
+
+#define S_TXMAPCHANNEL3 12
+#define M_TXMAPCHANNEL3 0xfU
+#define V_TXMAPCHANNEL3(x) ((x) << S_TXMAPCHANNEL3)
+#define G_TXMAPCHANNEL3(x) (((x) >> S_TXMAPCHANNEL3) & M_TXMAPCHANNEL3)
+
+#define S_TXMAPCHANNEL2 8
+#define M_TXMAPCHANNEL2 0xfU
+#define V_TXMAPCHANNEL2(x) ((x) << S_TXMAPCHANNEL2)
+#define G_TXMAPCHANNEL2(x) (((x) >> S_TXMAPCHANNEL2) & M_TXMAPCHANNEL2)
+
+#define S_TXMAPCHANNEL1 4
+#define M_TXMAPCHANNEL1 0xfU
+#define V_TXMAPCHANNEL1(x) ((x) << S_TXMAPCHANNEL1)
+#define G_TXMAPCHANNEL1(x) (((x) >> S_TXMAPCHANNEL1) & M_TXMAPCHANNEL1)
+
+#define S_TXMAPCHANNEL0 0
+#define M_TXMAPCHANNEL0 0xfU
+#define V_TXMAPCHANNEL0(x) ((x) << S_TXMAPCHANNEL0)
+#define G_TXMAPCHANNEL0(x) (((x) >> S_TXMAPCHANNEL0) & M_TXMAPCHANNEL0)
+
+#define A_TP_TX_SCHED_HDR 0x23
+
+#define S_TXMAPHDRCHANNEL7 28
+#define M_TXMAPHDRCHANNEL7 0xfU
+#define V_TXMAPHDRCHANNEL7(x) ((x) << S_TXMAPHDRCHANNEL7)
+#define G_TXMAPHDRCHANNEL7(x) (((x) >> S_TXMAPHDRCHANNEL7) & M_TXMAPHDRCHANNEL7)
+
+#define S_TXMAPHDRCHANNEL6 24
+#define M_TXMAPHDRCHANNEL6 0xfU
+#define V_TXMAPHDRCHANNEL6(x) ((x) << S_TXMAPHDRCHANNEL6)
+#define G_TXMAPHDRCHANNEL6(x) (((x) >> S_TXMAPHDRCHANNEL6) & M_TXMAPHDRCHANNEL6)
+
+#define S_TXMAPHDRCHANNEL5 20
+#define M_TXMAPHDRCHANNEL5 0xfU
+#define V_TXMAPHDRCHANNEL5(x) ((x) << S_TXMAPHDRCHANNEL5)
+#define G_TXMAPHDRCHANNEL5(x) (((x) >> S_TXMAPHDRCHANNEL5) & M_TXMAPHDRCHANNEL5)
+
+#define S_TXMAPHDRCHANNEL4 16
+#define M_TXMAPHDRCHANNEL4 0xfU
+#define V_TXMAPHDRCHANNEL4(x) ((x) << S_TXMAPHDRCHANNEL4)
+#define G_TXMAPHDRCHANNEL4(x) (((x) >> S_TXMAPHDRCHANNEL4) & M_TXMAPHDRCHANNEL4)
+
+#define S_TXMAPHDRCHANNEL3 12
+#define M_TXMAPHDRCHANNEL3 0xfU
+#define V_TXMAPHDRCHANNEL3(x) ((x) << S_TXMAPHDRCHANNEL3)
+#define G_TXMAPHDRCHANNEL3(x) (((x) >> S_TXMAPHDRCHANNEL3) & M_TXMAPHDRCHANNEL3)
+
+#define S_TXMAPHDRCHANNEL2 8
+#define M_TXMAPHDRCHANNEL2 0xfU
+#define V_TXMAPHDRCHANNEL2(x) ((x) << S_TXMAPHDRCHANNEL2)
+#define G_TXMAPHDRCHANNEL2(x) (((x) >> S_TXMAPHDRCHANNEL2) & M_TXMAPHDRCHANNEL2)
+
+#define S_TXMAPHDRCHANNEL1 4
+#define M_TXMAPHDRCHANNEL1 0xfU
+#define V_TXMAPHDRCHANNEL1(x) ((x) << S_TXMAPHDRCHANNEL1)
+#define G_TXMAPHDRCHANNEL1(x) (((x) >> S_TXMAPHDRCHANNEL1) & M_TXMAPHDRCHANNEL1)
+
+#define S_TXMAPHDRCHANNEL0 0
+#define M_TXMAPHDRCHANNEL0 0xfU
+#define V_TXMAPHDRCHANNEL0(x) ((x) << S_TXMAPHDRCHANNEL0)
+#define G_TXMAPHDRCHANNEL0(x) (((x) >> S_TXMAPHDRCHANNEL0) & M_TXMAPHDRCHANNEL0)
+
+#define A_TP_TX_SCHED_FIFO 0x24
+
+#define S_TXMAPFIFOCHANNEL7 28
+#define M_TXMAPFIFOCHANNEL7 0xfU
+#define V_TXMAPFIFOCHANNEL7(x) ((x) << S_TXMAPFIFOCHANNEL7)
+#define G_TXMAPFIFOCHANNEL7(x) (((x) >> S_TXMAPFIFOCHANNEL7) & M_TXMAPFIFOCHANNEL7)
+
+#define S_TXMAPFIFOCHANNEL6 24
+#define M_TXMAPFIFOCHANNEL6 0xfU
+#define V_TXMAPFIFOCHANNEL6(x) ((x) << S_TXMAPFIFOCHANNEL6)
+#define G_TXMAPFIFOCHANNEL6(x) (((x) >> S_TXMAPFIFOCHANNEL6) & M_TXMAPFIFOCHANNEL6)
+
+#define S_TXMAPFIFOCHANNEL5 20
+#define M_TXMAPFIFOCHANNEL5 0xfU
+#define V_TXMAPFIFOCHANNEL5(x) ((x) << S_TXMAPFIFOCHANNEL5)
+#define G_TXMAPFIFOCHANNEL5(x) (((x) >> S_TXMAPFIFOCHANNEL5) & M_TXMAPFIFOCHANNEL5)
+
+#define S_TXMAPFIFOCHANNEL4 16
+#define M_TXMAPFIFOCHANNEL4 0xfU
+#define V_TXMAPFIFOCHANNEL4(x) ((x) << S_TXMAPFIFOCHANNEL4)
+#define G_TXMAPFIFOCHANNEL4(x) (((x) >> S_TXMAPFIFOCHANNEL4) & M_TXMAPFIFOCHANNEL4)
+
+#define S_TXMAPFIFOCHANNEL3 12
+#define M_TXMAPFIFOCHANNEL3 0xfU
+#define V_TXMAPFIFOCHANNEL3(x) ((x) << S_TXMAPFIFOCHANNEL3)
+#define G_TXMAPFIFOCHANNEL3(x) (((x) >> S_TXMAPFIFOCHANNEL3) & M_TXMAPFIFOCHANNEL3)
+
+#define S_TXMAPFIFOCHANNEL2 8
+#define M_TXMAPFIFOCHANNEL2 0xfU
+#define V_TXMAPFIFOCHANNEL2(x) ((x) << S_TXMAPFIFOCHANNEL2)
+#define G_TXMAPFIFOCHANNEL2(x) (((x) >> S_TXMAPFIFOCHANNEL2) & M_TXMAPFIFOCHANNEL2)
+
+#define S_TXMAPFIFOCHANNEL1 4
+#define M_TXMAPFIFOCHANNEL1 0xfU
+#define V_TXMAPFIFOCHANNEL1(x) ((x) << S_TXMAPFIFOCHANNEL1)
+#define G_TXMAPFIFOCHANNEL1(x) (((x) >> S_TXMAPFIFOCHANNEL1) & M_TXMAPFIFOCHANNEL1)
+
+#define S_TXMAPFIFOCHANNEL0 0
+#define M_TXMAPFIFOCHANNEL0 0xfU
+#define V_TXMAPFIFOCHANNEL0(x) ((x) << S_TXMAPFIFOCHANNEL0)
+#define G_TXMAPFIFOCHANNEL0(x) (((x) >> S_TXMAPFIFOCHANNEL0) & M_TXMAPFIFOCHANNEL0)
+
+#define A_TP_TX_SCHED_PCMD 0x25
+
+#define S_TXMAPPCMDCHANNEL7 28
+#define M_TXMAPPCMDCHANNEL7 0xfU
+#define V_TXMAPPCMDCHANNEL7(x) ((x) << S_TXMAPPCMDCHANNEL7)
+#define G_TXMAPPCMDCHANNEL7(x) (((x) >> S_TXMAPPCMDCHANNEL7) & M_TXMAPPCMDCHANNEL7)
+
+#define S_TXMAPPCMDCHANNEL6 24
+#define M_TXMAPPCMDCHANNEL6 0xfU
+#define V_TXMAPPCMDCHANNEL6(x) ((x) << S_TXMAPPCMDCHANNEL6)
+#define G_TXMAPPCMDCHANNEL6(x) (((x) >> S_TXMAPPCMDCHANNEL6) & M_TXMAPPCMDCHANNEL6)
+
+#define S_TXMAPPCMDCHANNEL5 20
+#define M_TXMAPPCMDCHANNEL5 0xfU
+#define V_TXMAPPCMDCHANNEL5(x) ((x) << S_TXMAPPCMDCHANNEL5)
+#define G_TXMAPPCMDCHANNEL5(x) (((x) >> S_TXMAPPCMDCHANNEL5) & M_TXMAPPCMDCHANNEL5)
+
+#define S_TXMAPPCMDCHANNEL4 16
+#define M_TXMAPPCMDCHANNEL4 0xfU
+#define V_TXMAPPCMDCHANNEL4(x) ((x) << S_TXMAPPCMDCHANNEL4)
+#define G_TXMAPPCMDCHANNEL4(x) (((x) >> S_TXMAPPCMDCHANNEL4) & M_TXMAPPCMDCHANNEL4)
+
+#define S_TXMAPPCMDCHANNEL3 12
+#define M_TXMAPPCMDCHANNEL3 0xfU
+#define V_TXMAPPCMDCHANNEL3(x) ((x) << S_TXMAPPCMDCHANNEL3)
+#define G_TXMAPPCMDCHANNEL3(x) (((x) >> S_TXMAPPCMDCHANNEL3) & M_TXMAPPCMDCHANNEL3)
+
+#define S_TXMAPPCMDCHANNEL2 8
+#define M_TXMAPPCMDCHANNEL2 0xfU
+#define V_TXMAPPCMDCHANNEL2(x) ((x) << S_TXMAPPCMDCHANNEL2)
+#define G_TXMAPPCMDCHANNEL2(x) (((x) >> S_TXMAPPCMDCHANNEL2) & M_TXMAPPCMDCHANNEL2)
+
+#define S_TXMAPPCMDCHANNEL1 4
+#define M_TXMAPPCMDCHANNEL1 0xfU
+#define V_TXMAPPCMDCHANNEL1(x) ((x) << S_TXMAPPCMDCHANNEL1)
+#define G_TXMAPPCMDCHANNEL1(x) (((x) >> S_TXMAPPCMDCHANNEL1) & M_TXMAPPCMDCHANNEL1)
+
+#define S_TXMAPPCMDCHANNEL0 0
+#define M_TXMAPPCMDCHANNEL0 0xfU
+#define V_TXMAPPCMDCHANNEL0(x) ((x) << S_TXMAPPCMDCHANNEL0)
+#define G_TXMAPPCMDCHANNEL0(x) (((x) >> S_TXMAPPCMDCHANNEL0) & M_TXMAPPCMDCHANNEL0)
+
+#define A_TP_TX_SCHED_LPBK 0x26
+
+#define S_TXMAPLPBKCHANNEL7 28
+#define M_TXMAPLPBKCHANNEL7 0xfU
+#define V_TXMAPLPBKCHANNEL7(x) ((x) << S_TXMAPLPBKCHANNEL7)
+#define G_TXMAPLPBKCHANNEL7(x) (((x) >> S_TXMAPLPBKCHANNEL7) & M_TXMAPLPBKCHANNEL7)
+
+#define S_TXMAPLPBKCHANNEL6 24
+#define M_TXMAPLPBKCHANNEL6 0xfU
+#define V_TXMAPLPBKCHANNEL6(x) ((x) << S_TXMAPLPBKCHANNEL6)
+#define G_TXMAPLPBKCHANNEL6(x) (((x) >> S_TXMAPLPBKCHANNEL6) & M_TXMAPLPBKCHANNEL6)
+
+#define S_TXMAPLPBKCHANNEL5 20
+#define M_TXMAPLPBKCHANNEL5 0xfU
+#define V_TXMAPLPBKCHANNEL5(x) ((x) << S_TXMAPLPBKCHANNEL5)
+#define G_TXMAPLPBKCHANNEL5(x) (((x) >> S_TXMAPLPBKCHANNEL5) & M_TXMAPLPBKCHANNEL5)
+
+#define S_TXMAPLPBKCHANNEL4 16
+#define M_TXMAPLPBKCHANNEL4 0xfU
+#define V_TXMAPLPBKCHANNEL4(x) ((x) << S_TXMAPLPBKCHANNEL4)
+#define G_TXMAPLPBKCHANNEL4(x) (((x) >> S_TXMAPLPBKCHANNEL4) & M_TXMAPLPBKCHANNEL4)
+
+#define S_TXMAPLPBKCHANNEL3 12
+#define M_TXMAPLPBKCHANNEL3 0xfU
+#define V_TXMAPLPBKCHANNEL3(x) ((x) << S_TXMAPLPBKCHANNEL3)
+#define G_TXMAPLPBKCHANNEL3(x) (((x) >> S_TXMAPLPBKCHANNEL3) & M_TXMAPLPBKCHANNEL3)
+
+#define S_TXMAPLPBKCHANNEL2 8
+#define M_TXMAPLPBKCHANNEL2 0xfU
+#define V_TXMAPLPBKCHANNEL2(x) ((x) << S_TXMAPLPBKCHANNEL2)
+#define G_TXMAPLPBKCHANNEL2(x) (((x) >> S_TXMAPLPBKCHANNEL2) & M_TXMAPLPBKCHANNEL2)
+
+#define S_TXMAPLPBKCHANNEL1 4
+#define M_TXMAPLPBKCHANNEL1 0xfU
+#define V_TXMAPLPBKCHANNEL1(x) ((x) << S_TXMAPLPBKCHANNEL1)
+#define G_TXMAPLPBKCHANNEL1(x) (((x) >> S_TXMAPLPBKCHANNEL1) & M_TXMAPLPBKCHANNEL1)
+
+#define S_TXMAPLPBKCHANNEL0 0
+#define M_TXMAPLPBKCHANNEL0 0xfU
+#define V_TXMAPLPBKCHANNEL0(x) ((x) << S_TXMAPLPBKCHANNEL0)
+#define G_TXMAPLPBKCHANNEL0(x) (((x) >> S_TXMAPLPBKCHANNEL0) & M_TXMAPLPBKCHANNEL0)
+
+#define A_TP_CHANNEL_MAP 0x27
+
+#define S_RXMAPCHANNELELN 16
+#define M_RXMAPCHANNELELN 0xfU
+#define V_RXMAPCHANNELELN(x) ((x) << S_RXMAPCHANNELELN)
+#define G_RXMAPCHANNELELN(x) (((x) >> S_RXMAPCHANNELELN) & M_RXMAPCHANNELELN)
+
+#define S_RXMAPE2LCHANNEL3 14
+#define M_RXMAPE2LCHANNEL3 0x3U
+#define V_RXMAPE2LCHANNEL3(x) ((x) << S_RXMAPE2LCHANNEL3)
+#define G_RXMAPE2LCHANNEL3(x) (((x) >> S_RXMAPE2LCHANNEL3) & M_RXMAPE2LCHANNEL3)
+
+#define S_RXMAPE2LCHANNEL2 12
+#define M_RXMAPE2LCHANNEL2 0x3U
+#define V_RXMAPE2LCHANNEL2(x) ((x) << S_RXMAPE2LCHANNEL2)
+#define G_RXMAPE2LCHANNEL2(x) (((x) >> S_RXMAPE2LCHANNEL2) & M_RXMAPE2LCHANNEL2)
+
+#define S_RXMAPE2LCHANNEL1 10
+#define M_RXMAPE2LCHANNEL1 0x3U
+#define V_RXMAPE2LCHANNEL1(x) ((x) << S_RXMAPE2LCHANNEL1)
+#define G_RXMAPE2LCHANNEL1(x) (((x) >> S_RXMAPE2LCHANNEL1) & M_RXMAPE2LCHANNEL1)
+
+#define S_RXMAPE2LCHANNEL0 8
+#define M_RXMAPE2LCHANNEL0 0x3U
+#define V_RXMAPE2LCHANNEL0(x) ((x) << S_RXMAPE2LCHANNEL0)
+#define G_RXMAPE2LCHANNEL0(x) (((x) >> S_RXMAPE2LCHANNEL0) & M_RXMAPE2LCHANNEL0)
+
+#define S_RXMAPC2CCHANNEL3 7
+#define V_RXMAPC2CCHANNEL3(x) ((x) << S_RXMAPC2CCHANNEL3)
+#define F_RXMAPC2CCHANNEL3 V_RXMAPC2CCHANNEL3(1U)
+
+#define S_RXMAPC2CCHANNEL2 6
+#define V_RXMAPC2CCHANNEL2(x) ((x) << S_RXMAPC2CCHANNEL2)
+#define F_RXMAPC2CCHANNEL2 V_RXMAPC2CCHANNEL2(1U)
+
+#define S_RXMAPC2CCHANNEL1 5
+#define V_RXMAPC2CCHANNEL1(x) ((x) << S_RXMAPC2CCHANNEL1)
+#define F_RXMAPC2CCHANNEL1 V_RXMAPC2CCHANNEL1(1U)
+
+#define S_RXMAPC2CCHANNEL0 4
+#define V_RXMAPC2CCHANNEL0(x) ((x) << S_RXMAPC2CCHANNEL0)
+#define F_RXMAPC2CCHANNEL0 V_RXMAPC2CCHANNEL0(1U)
+
+#define S_RXMAPE2CCHANNEL3 3
+#define V_RXMAPE2CCHANNEL3(x) ((x) << S_RXMAPE2CCHANNEL3)
+#define F_RXMAPE2CCHANNEL3 V_RXMAPE2CCHANNEL3(1U)
+
+#define S_RXMAPE2CCHANNEL2 2
+#define V_RXMAPE2CCHANNEL2(x) ((x) << S_RXMAPE2CCHANNEL2)
+#define F_RXMAPE2CCHANNEL2 V_RXMAPE2CCHANNEL2(1U)
+
+#define S_RXMAPE2CCHANNEL1 1
+#define V_RXMAPE2CCHANNEL1(x) ((x) << S_RXMAPE2CCHANNEL1)
+#define F_RXMAPE2CCHANNEL1 V_RXMAPE2CCHANNEL1(1U)
+
+#define S_RXMAPE2CCHANNEL0 0
+#define V_RXMAPE2CCHANNEL0(x) ((x) << S_RXMAPE2CCHANNEL0)
+#define F_RXMAPE2CCHANNEL0 V_RXMAPE2CCHANNEL0(1U)
+
+#define A_TP_RX_LPBK 0x28
+#define A_TP_TX_LPBK 0x29
+#define A_TP_TX_SCHED_PPP 0x2a
+
+#define S_TXPPPENPORT3 24
+#define M_TXPPPENPORT3 0xffU
+#define V_TXPPPENPORT3(x) ((x) << S_TXPPPENPORT3)
+#define G_TXPPPENPORT3(x) (((x) >> S_TXPPPENPORT3) & M_TXPPPENPORT3)
+
+#define S_TXPPPENPORT2 16
+#define M_TXPPPENPORT2 0xffU
+#define V_TXPPPENPORT2(x) ((x) << S_TXPPPENPORT2)
+#define G_TXPPPENPORT2(x) (((x) >> S_TXPPPENPORT2) & M_TXPPPENPORT2)
+
+#define S_TXPPPENPORT1 8
+#define M_TXPPPENPORT1 0xffU
+#define V_TXPPPENPORT1(x) ((x) << S_TXPPPENPORT1)
+#define G_TXPPPENPORT1(x) (((x) >> S_TXPPPENPORT1) & M_TXPPPENPORT1)
+
+#define S_TXPPPENPORT0 0
+#define M_TXPPPENPORT0 0xffU
+#define V_TXPPPENPORT0(x) ((x) << S_TXPPPENPORT0)
+#define G_TXPPPENPORT0(x) (((x) >> S_TXPPPENPORT0) & M_TXPPPENPORT0)
+
+#define A_TP_IPMI_CFG1 0x2e
+
+#define S_VLANENABLE 31
+#define V_VLANENABLE(x) ((x) << S_VLANENABLE)
+#define F_VLANENABLE V_VLANENABLE(1U)
+
+#define S_PRIMARYPORTENABLE 30
+#define V_PRIMARYPORTENABLE(x) ((x) << S_PRIMARYPORTENABLE)
+#define F_PRIMARYPORTENABLE V_PRIMARYPORTENABLE(1U)
+
+#define S_SECUREPORTENABLE 29
+#define V_SECUREPORTENABLE(x) ((x) << S_SECUREPORTENABLE)
+#define F_SECUREPORTENABLE V_SECUREPORTENABLE(1U)
+
+#define S_ARPENABLE 28
+#define V_ARPENABLE(x) ((x) << S_ARPENABLE)
+#define F_ARPENABLE V_ARPENABLE(1U)
+
+#define S_IPMI_VLAN 0
+#define M_IPMI_VLAN 0xffffU
+#define V_IPMI_VLAN(x) ((x) << S_IPMI_VLAN)
+#define G_IPMI_VLAN(x) (((x) >> S_IPMI_VLAN) & M_IPMI_VLAN)
+
+#define A_TP_IPMI_CFG2 0x2f
+
+#define S_SECUREPORT 16
+#define M_SECUREPORT 0xffffU
+#define V_SECUREPORT(x) ((x) << S_SECUREPORT)
+#define G_SECUREPORT(x) (((x) >> S_SECUREPORT) & M_SECUREPORT)
+
+#define S_PRIMARYPORT 0
+#define M_PRIMARYPORT 0xffffU
+#define V_PRIMARYPORT(x) ((x) << S_PRIMARYPORT)
+#define G_PRIMARYPORT(x) (((x) >> S_PRIMARYPORT) & M_PRIMARYPORT)
+
+#define A_TP_RSS_PF0_CONFIG 0x30
+
+#define S_MAPENABLE 31
+#define V_MAPENABLE(x) ((x) << S_MAPENABLE)
+#define F_MAPENABLE V_MAPENABLE(1U)
+
+#define S_CHNENABLE 30
+#define V_CHNENABLE(x) ((x) << S_CHNENABLE)
+#define F_CHNENABLE V_CHNENABLE(1U)
+
+#define S_PRTENABLE 29
+#define V_PRTENABLE(x) ((x) << S_PRTENABLE)
+#define F_PRTENABLE V_PRTENABLE(1U)
+
+#define S_UDPFOURTUPEN 28
+#define V_UDPFOURTUPEN(x) ((x) << S_UDPFOURTUPEN)
+#define F_UDPFOURTUPEN V_UDPFOURTUPEN(1U)
+
+#define S_IP6FOURTUPEN 27
+#define V_IP6FOURTUPEN(x) ((x) << S_IP6FOURTUPEN)
+#define F_IP6FOURTUPEN V_IP6FOURTUPEN(1U)
+
+#define S_IP6TWOTUPEN 26
+#define V_IP6TWOTUPEN(x) ((x) << S_IP6TWOTUPEN)
+#define F_IP6TWOTUPEN V_IP6TWOTUPEN(1U)
+
+#define S_IP4FOURTUPEN 25
+#define V_IP4FOURTUPEN(x) ((x) << S_IP4FOURTUPEN)
+#define F_IP4FOURTUPEN V_IP4FOURTUPEN(1U)
+
+#define S_IP4TWOTUPEN 24
+#define V_IP4TWOTUPEN(x) ((x) << S_IP4TWOTUPEN)
+#define F_IP4TWOTUPEN V_IP4TWOTUPEN(1U)
+
+#define S_IVFWIDTH 20
+#define M_IVFWIDTH 0xfU
+#define V_IVFWIDTH(x) ((x) << S_IVFWIDTH)
+#define G_IVFWIDTH(x) (((x) >> S_IVFWIDTH) & M_IVFWIDTH)
+
+#define S_CH1DEFAULTQUEUE 10
+#define M_CH1DEFAULTQUEUE 0x3ffU
+#define V_CH1DEFAULTQUEUE(x) ((x) << S_CH1DEFAULTQUEUE)
+#define G_CH1DEFAULTQUEUE(x) (((x) >> S_CH1DEFAULTQUEUE) & M_CH1DEFAULTQUEUE)
+
+#define S_CH0DEFAULTQUEUE 0
+#define M_CH0DEFAULTQUEUE 0x3ffU
+#define V_CH0DEFAULTQUEUE(x) ((x) << S_CH0DEFAULTQUEUE)
+#define G_CH0DEFAULTQUEUE(x) (((x) >> S_CH0DEFAULTQUEUE) & M_CH0DEFAULTQUEUE)
+
+#define A_TP_RSS_PF1_CONFIG 0x31
+#define A_TP_RSS_PF2_CONFIG 0x32
+#define A_TP_RSS_PF3_CONFIG 0x33
+#define A_TP_RSS_PF4_CONFIG 0x34
+#define A_TP_RSS_PF5_CONFIG 0x35
+#define A_TP_RSS_PF6_CONFIG 0x36
+#define A_TP_RSS_PF7_CONFIG 0x37
+#define A_TP_RSS_PF_MAP 0x38
+
+#define S_LKPIDXSIZE 24
+#define M_LKPIDXSIZE 0x3U
+#define V_LKPIDXSIZE(x) ((x) << S_LKPIDXSIZE)
+#define G_LKPIDXSIZE(x) (((x) >> S_LKPIDXSIZE) & M_LKPIDXSIZE)
+
+#define S_PF7LKPIDX 21
+#define M_PF7LKPIDX 0x7U
+#define V_PF7LKPIDX(x) ((x) << S_PF7LKPIDX)
+#define G_PF7LKPIDX(x) (((x) >> S_PF7LKPIDX) & M_PF7LKPIDX)
+
+#define S_PF6LKPIDX 18
+#define M_PF6LKPIDX 0x7U
+#define V_PF6LKPIDX(x) ((x) << S_PF6LKPIDX)
+#define G_PF6LKPIDX(x) (((x) >> S_PF6LKPIDX) & M_PF6LKPIDX)
+
+#define S_PF5LKPIDX 15
+#define M_PF5LKPIDX 0x7U
+#define V_PF5LKPIDX(x) ((x) << S_PF5LKPIDX)
+#define G_PF5LKPIDX(x) (((x) >> S_PF5LKPIDX) & M_PF5LKPIDX)
+
+#define S_PF4LKPIDX 12
+#define M_PF4LKPIDX 0x7U
+#define V_PF4LKPIDX(x) ((x) << S_PF4LKPIDX)
+#define G_PF4LKPIDX(x) (((x) >> S_PF4LKPIDX) & M_PF4LKPIDX)
+
+#define S_PF3LKPIDX 9
+#define M_PF3LKPIDX 0x7U
+#define V_PF3LKPIDX(x) ((x) << S_PF3LKPIDX)
+#define G_PF3LKPIDX(x) (((x) >> S_PF3LKPIDX) & M_PF3LKPIDX)
+
+#define S_PF2LKPIDX 6
+#define M_PF2LKPIDX 0x7U
+#define V_PF2LKPIDX(x) ((x) << S_PF2LKPIDX)
+#define G_PF2LKPIDX(x) (((x) >> S_PF2LKPIDX) & M_PF2LKPIDX)
+
+#define S_PF1LKPIDX 3
+#define M_PF1LKPIDX 0x7U
+#define V_PF1LKPIDX(x) ((x) << S_PF1LKPIDX)
+#define G_PF1LKPIDX(x) (((x) >> S_PF1LKPIDX) & M_PF1LKPIDX)
+
+#define S_PF0LKPIDX 0
+#define M_PF0LKPIDX 0x7U
+#define V_PF0LKPIDX(x) ((x) << S_PF0LKPIDX)
+#define G_PF0LKPIDX(x) (((x) >> S_PF0LKPIDX) & M_PF0LKPIDX)
+
+#define A_TP_RSS_PF_MSK 0x39
+
+#define S_PF7MSKSIZE 28
+#define M_PF7MSKSIZE 0xfU
+#define V_PF7MSKSIZE(x) ((x) << S_PF7MSKSIZE)
+#define G_PF7MSKSIZE(x) (((x) >> S_PF7MSKSIZE) & M_PF7MSKSIZE)
+
+#define S_PF6MSKSIZE 24
+#define M_PF6MSKSIZE 0xfU
+#define V_PF6MSKSIZE(x) ((x) << S_PF6MSKSIZE)
+#define G_PF6MSKSIZE(x) (((x) >> S_PF6MSKSIZE) & M_PF6MSKSIZE)
+
+#define S_PF5MSKSIZE 20
+#define M_PF5MSKSIZE 0xfU
+#define V_PF5MSKSIZE(x) ((x) << S_PF5MSKSIZE)
+#define G_PF5MSKSIZE(x) (((x) >> S_PF5MSKSIZE) & M_PF5MSKSIZE)
+
+#define S_PF4MSKSIZE 16
+#define M_PF4MSKSIZE 0xfU
+#define V_PF4MSKSIZE(x) ((x) << S_PF4MSKSIZE)
+#define G_PF4MSKSIZE(x) (((x) >> S_PF4MSKSIZE) & M_PF4MSKSIZE)
+
+#define S_PF3MSKSIZE 12
+#define M_PF3MSKSIZE 0xfU
+#define V_PF3MSKSIZE(x) ((x) << S_PF3MSKSIZE)
+#define G_PF3MSKSIZE(x) (((x) >> S_PF3MSKSIZE) & M_PF3MSKSIZE)
+
+#define S_PF2MSKSIZE 8
+#define M_PF2MSKSIZE 0xfU
+#define V_PF2MSKSIZE(x) ((x) << S_PF2MSKSIZE)
+#define G_PF2MSKSIZE(x) (((x) >> S_PF2MSKSIZE) & M_PF2MSKSIZE)
+
+#define S_PF1MSKSIZE 4
+#define M_PF1MSKSIZE 0xfU
+#define V_PF1MSKSIZE(x) ((x) << S_PF1MSKSIZE)
+#define G_PF1MSKSIZE(x) (((x) >> S_PF1MSKSIZE) & M_PF1MSKSIZE)
+
+#define S_PF0MSKSIZE 0
+#define M_PF0MSKSIZE 0xfU
+#define V_PF0MSKSIZE(x) ((x) << S_PF0MSKSIZE)
+#define G_PF0MSKSIZE(x) (((x) >> S_PF0MSKSIZE) & M_PF0MSKSIZE)
+
+#define A_TP_RSS_VFL_CONFIG 0x3a
+#define A_TP_RSS_VFH_CONFIG 0x3b
+
+#define S_ENABLEUDPHASH 31
+#define V_ENABLEUDPHASH(x) ((x) << S_ENABLEUDPHASH)
+#define F_ENABLEUDPHASH V_ENABLEUDPHASH(1U)
+
+#define S_VFUPEN 30
+#define V_VFUPEN(x) ((x) << S_VFUPEN)
+#define F_VFUPEN V_VFUPEN(1U)
+
+#define S_VFVLNEX 28
+#define V_VFVLNEX(x) ((x) << S_VFVLNEX)
+#define F_VFVLNEX V_VFVLNEX(1U)
+
+#define S_VFPRTEN 27
+#define V_VFPRTEN(x) ((x) << S_VFPRTEN)
+#define F_VFPRTEN V_VFPRTEN(1U)
+
+#define S_VFCHNEN 26
+#define V_VFCHNEN(x) ((x) << S_VFCHNEN)
+#define F_VFCHNEN V_VFCHNEN(1U)
+
+#define S_DEFAULTQUEUE 16
+#define M_DEFAULTQUEUE 0x3ffU
+#define V_DEFAULTQUEUE(x) ((x) << S_DEFAULTQUEUE)
+#define G_DEFAULTQUEUE(x) (((x) >> S_DEFAULTQUEUE) & M_DEFAULTQUEUE)
+
+#define S_VFLKPIDX 8
+#define M_VFLKPIDX 0xffU
+#define V_VFLKPIDX(x) ((x) << S_VFLKPIDX)
+#define G_VFLKPIDX(x) (((x) >> S_VFLKPIDX) & M_VFLKPIDX)
+
+#define S_VFIP6FOURTUPEN 7
+#define V_VFIP6FOURTUPEN(x) ((x) << S_VFIP6FOURTUPEN)
+#define F_VFIP6FOURTUPEN V_VFIP6FOURTUPEN(1U)
+
+#define S_VFIP6TWOTUPEN 6
+#define V_VFIP6TWOTUPEN(x) ((x) << S_VFIP6TWOTUPEN)
+#define F_VFIP6TWOTUPEN V_VFIP6TWOTUPEN(1U)
+
+#define S_VFIP4FOURTUPEN 5
+#define V_VFIP4FOURTUPEN(x) ((x) << S_VFIP4FOURTUPEN)
+#define F_VFIP4FOURTUPEN V_VFIP4FOURTUPEN(1U)
+
+#define S_VFIP4TWOTUPEN 4
+#define V_VFIP4TWOTUPEN(x) ((x) << S_VFIP4TWOTUPEN)
+#define F_VFIP4TWOTUPEN V_VFIP4TWOTUPEN(1U)
+
+#define S_KEYINDEX 0
+#define M_KEYINDEX 0xfU
+#define V_KEYINDEX(x) ((x) << S_KEYINDEX)
+#define G_KEYINDEX(x) (((x) >> S_KEYINDEX) & M_KEYINDEX)
+
+#define A_TP_RSS_SECRET_KEY0 0x40
+#define A_TP_RSS_SECRET_KEY1 0x41
+#define A_TP_RSS_SECRET_KEY2 0x42
+#define A_TP_RSS_SECRET_KEY3 0x43
+#define A_TP_RSS_SECRET_KEY4 0x44
+#define A_TP_RSS_SECRET_KEY5 0x45
+#define A_TP_RSS_SECRET_KEY6 0x46
+#define A_TP_RSS_SECRET_KEY7 0x47
+#define A_TP_RSS_SECRET_KEY8 0x48
+#define A_TP_RSS_SECRET_KEY9 0x49
+#define A_TP_ETHER_TYPE_VL 0x50
+
+#define S_CQFCTYPE 16
+#define M_CQFCTYPE 0xffffU
+#define V_CQFCTYPE(x) ((x) << S_CQFCTYPE)
+#define G_CQFCTYPE(x) (((x) >> S_CQFCTYPE) & M_CQFCTYPE)
+
+#define S_VLANTYPE 0
+#define M_VLANTYPE 0xffffU
+#define V_VLANTYPE(x) ((x) << S_VLANTYPE)
+#define G_VLANTYPE(x) (((x) >> S_VLANTYPE) & M_VLANTYPE)
+
+#define A_TP_ETHER_TYPE_IP 0x51
+
+#define S_IPV6TYPE 16
+#define M_IPV6TYPE 0xffffU
+#define V_IPV6TYPE(x) ((x) << S_IPV6TYPE)
+#define G_IPV6TYPE(x) (((x) >> S_IPV6TYPE) & M_IPV6TYPE)
+
+#define S_IPV4TYPE 0
+#define M_IPV4TYPE 0xffffU
+#define V_IPV4TYPE(x) ((x) << S_IPV4TYPE)
+#define G_IPV4TYPE(x) (((x) >> S_IPV4TYPE) & M_IPV4TYPE)
+
+#define A_TP_DBG_CLEAR 0x60
+#define A_TP_DBG_CORE_HDR0 0x61
+
+#define S_E_TCP_OP_SRDY 16
+#define V_E_TCP_OP_SRDY(x) ((x) << S_E_TCP_OP_SRDY)
+#define F_E_TCP_OP_SRDY V_E_TCP_OP_SRDY(1U)
+
+#define S_E_PLD_TXZEROP_SRDY 15
+#define V_E_PLD_TXZEROP_SRDY(x) ((x) << S_E_PLD_TXZEROP_SRDY)
+#define F_E_PLD_TXZEROP_SRDY V_E_PLD_TXZEROP_SRDY(1U)
+
+#define S_E_PLD_RX_SRDY 14
+#define V_E_PLD_RX_SRDY(x) ((x) << S_E_PLD_RX_SRDY)
+#define F_E_PLD_RX_SRDY V_E_PLD_RX_SRDY(1U)
+
+#define S_E_RX_ERROR_SRDY 13
+#define V_E_RX_ERROR_SRDY(x) ((x) << S_E_RX_ERROR_SRDY)
+#define F_E_RX_ERROR_SRDY V_E_RX_ERROR_SRDY(1U)
+
+#define S_E_RX_ISS_SRDY 12
+#define V_E_RX_ISS_SRDY(x) ((x) << S_E_RX_ISS_SRDY)
+#define F_E_RX_ISS_SRDY V_E_RX_ISS_SRDY(1U)
+
+#define S_C_TCP_OP_SRDY 11
+#define V_C_TCP_OP_SRDY(x) ((x) << S_C_TCP_OP_SRDY)
+#define F_C_TCP_OP_SRDY V_C_TCP_OP_SRDY(1U)
+
+#define S_C_PLD_TXZEROP_SRDY 10
+#define V_C_PLD_TXZEROP_SRDY(x) ((x) << S_C_PLD_TXZEROP_SRDY)
+#define F_C_PLD_TXZEROP_SRDY V_C_PLD_TXZEROP_SRDY(1U)
+
+#define S_C_PLD_RX_SRDY 9
+#define V_C_PLD_RX_SRDY(x) ((x) << S_C_PLD_RX_SRDY)
+#define F_C_PLD_RX_SRDY V_C_PLD_RX_SRDY(1U)
+
+#define S_C_RX_ERROR_SRDY 8
+#define V_C_RX_ERROR_SRDY(x) ((x) << S_C_RX_ERROR_SRDY)
+#define F_C_RX_ERROR_SRDY V_C_RX_ERROR_SRDY(1U)
+
+#define S_C_RX_ISS_SRDY 7
+#define V_C_RX_ISS_SRDY(x) ((x) << S_C_RX_ISS_SRDY)
+#define F_C_RX_ISS_SRDY V_C_RX_ISS_SRDY(1U)
+
+#define S_E_CPL5_TXVALID 6
+#define V_E_CPL5_TXVALID(x) ((x) << S_E_CPL5_TXVALID)
+#define F_E_CPL5_TXVALID V_E_CPL5_TXVALID(1U)
+
+#define S_E_ETH_TXVALID 5
+#define V_E_ETH_TXVALID(x) ((x) << S_E_ETH_TXVALID)
+#define F_E_ETH_TXVALID V_E_ETH_TXVALID(1U)
+
+#define S_E_IP_TXVALID 4
+#define V_E_IP_TXVALID(x) ((x) << S_E_IP_TXVALID)
+#define F_E_IP_TXVALID V_E_IP_TXVALID(1U)
+
+#define S_E_TCP_TXVALID 3
+#define V_E_TCP_TXVALID(x) ((x) << S_E_TCP_TXVALID)
+#define F_E_TCP_TXVALID V_E_TCP_TXVALID(1U)
+
+#define S_C_CPL5_RXVALID 2
+#define V_C_CPL5_RXVALID(x) ((x) << S_C_CPL5_RXVALID)
+#define F_C_CPL5_RXVALID V_C_CPL5_RXVALID(1U)
+
+#define S_C_CPL5_TXVALID 1
+#define V_C_CPL5_TXVALID(x) ((x) << S_C_CPL5_TXVALID)
+#define F_C_CPL5_TXVALID V_C_CPL5_TXVALID(1U)
+
+#define S_E_TCP_OPT_RXVALID 0
+#define V_E_TCP_OPT_RXVALID(x) ((x) << S_E_TCP_OPT_RXVALID)
+#define F_E_TCP_OPT_RXVALID V_E_TCP_OPT_RXVALID(1U)
+
+#define A_TP_DBG_CORE_HDR1 0x62
+
+#define S_E_CPL5_TXFULL 6
+#define V_E_CPL5_TXFULL(x) ((x) << S_E_CPL5_TXFULL)
+#define F_E_CPL5_TXFULL V_E_CPL5_TXFULL(1U)
+
+#define S_E_ETH_TXFULL 5
+#define V_E_ETH_TXFULL(x) ((x) << S_E_ETH_TXFULL)
+#define F_E_ETH_TXFULL V_E_ETH_TXFULL(1U)
+
+#define S_E_IP_TXFULL 4
+#define V_E_IP_TXFULL(x) ((x) << S_E_IP_TXFULL)
+#define F_E_IP_TXFULL V_E_IP_TXFULL(1U)
+
+#define S_E_TCP_TXFULL 3
+#define V_E_TCP_TXFULL(x) ((x) << S_E_TCP_TXFULL)
+#define F_E_TCP_TXFULL V_E_TCP_TXFULL(1U)
+
+#define S_C_CPL5_RXFULL 2
+#define V_C_CPL5_RXFULL(x) ((x) << S_C_CPL5_RXFULL)
+#define F_C_CPL5_RXFULL V_C_CPL5_RXFULL(1U)
+
+#define S_C_CPL5_TXFULL 1
+#define V_C_CPL5_TXFULL(x) ((x) << S_C_CPL5_TXFULL)
+#define F_C_CPL5_TXFULL V_C_CPL5_TXFULL(1U)
+
+#define S_E_TCP_OPT_RXFULL 0
+#define V_E_TCP_OPT_RXFULL(x) ((x) << S_E_TCP_OPT_RXFULL)
+#define F_E_TCP_OPT_RXFULL V_E_TCP_OPT_RXFULL(1U)
+
+#define A_TP_DBG_CORE_FATAL 0x63
+
+#define S_EMSGFATAL 31
+#define V_EMSGFATAL(x) ((x) << S_EMSGFATAL)
+#define F_EMSGFATAL V_EMSGFATAL(1U)
+
+#define S_CMSGFATAL 30
+#define V_CMSGFATAL(x) ((x) << S_CMSGFATAL)
+#define F_CMSGFATAL V_CMSGFATAL(1U)
+
+#define S_PAWSFATAL 29
+#define V_PAWSFATAL(x) ((x) << S_PAWSFATAL)
+#define F_PAWSFATAL V_PAWSFATAL(1U)
+
+#define S_SRAMFATAL 28
+#define V_SRAMFATAL(x) ((x) << S_SRAMFATAL)
+#define F_SRAMFATAL V_SRAMFATAL(1U)
+
+#define S_EPCMDCONG 24
+#define M_EPCMDCONG 0xfU
+#define V_EPCMDCONG(x) ((x) << S_EPCMDCONG)
+#define G_EPCMDCONG(x) (((x) >> S_EPCMDCONG) & M_EPCMDCONG)
+
+#define S_CPCMDCONG 22
+#define M_CPCMDCONG 0x3U
+#define V_CPCMDCONG(x) ((x) << S_CPCMDCONG)
+#define G_CPCMDCONG(x) (((x) >> S_CPCMDCONG) & M_CPCMDCONG)
+
+#define S_CPCMDLENFATAL 21
+#define V_CPCMDLENFATAL(x) ((x) << S_CPCMDLENFATAL)
+#define F_CPCMDLENFATAL V_CPCMDLENFATAL(1U)
+
+#define S_EPCMDLENFATAL 20
+#define V_EPCMDLENFATAL(x) ((x) << S_EPCMDLENFATAL)
+#define F_EPCMDLENFATAL V_EPCMDLENFATAL(1U)
+
+#define S_CPCMDVALID 16
+#define M_CPCMDVALID 0xfU
+#define V_CPCMDVALID(x) ((x) << S_CPCMDVALID)
+#define G_CPCMDVALID(x) (((x) >> S_CPCMDVALID) & M_CPCMDVALID)
+
+#define S_CPCMDAFULL 12
+#define M_CPCMDAFULL 0xfU
+#define V_CPCMDAFULL(x) ((x) << S_CPCMDAFULL)
+#define G_CPCMDAFULL(x) (((x) >> S_CPCMDAFULL) & M_CPCMDAFULL)
+
+#define S_EPCMDVALID 10
+#define M_EPCMDVALID 0x3U
+#define V_EPCMDVALID(x) ((x) << S_EPCMDVALID)
+#define G_EPCMDVALID(x) (((x) >> S_EPCMDVALID) & M_EPCMDVALID)
+
+#define S_EPCMDAFULL 8
+#define M_EPCMDAFULL 0x3U
+#define V_EPCMDAFULL(x) ((x) << S_EPCMDAFULL)
+#define G_EPCMDAFULL(x) (((x) >> S_EPCMDAFULL) & M_EPCMDAFULL)
+
+#define S_CPCMDEOIFATAL 7
+#define V_CPCMDEOIFATAL(x) ((x) << S_CPCMDEOIFATAL)
+#define F_CPCMDEOIFATAL V_CPCMDEOIFATAL(1U)
+
+#define S_CMDBRQFATAL 4
+#define V_CMDBRQFATAL(x) ((x) << S_CMDBRQFATAL)
+#define F_CMDBRQFATAL V_CMDBRQFATAL(1U)
+
+#define S_CNONZEROPPOPCNT 2
+#define M_CNONZEROPPOPCNT 0x3U
+#define V_CNONZEROPPOPCNT(x) ((x) << S_CNONZEROPPOPCNT)
+#define G_CNONZEROPPOPCNT(x) (((x) >> S_CNONZEROPPOPCNT) & M_CNONZEROPPOPCNT)
+
+#define S_CPCMDEOICNT 0
+#define M_CPCMDEOICNT 0x3U
+#define V_CPCMDEOICNT(x) ((x) << S_CPCMDEOICNT)
+#define G_CPCMDEOICNT(x) (((x) >> S_CPCMDEOICNT) & M_CPCMDEOICNT)
+
+#define A_TP_DBG_CORE_OUT 0x64
+
+#define S_CCPLENC 26
+#define V_CCPLENC(x) ((x) << S_CCPLENC)
+#define F_CCPLENC V_CCPLENC(1U)
+
+#define S_CWRCPLPKT 25
+#define V_CWRCPLPKT(x) ((x) << S_CWRCPLPKT)
+#define F_CWRCPLPKT V_CWRCPLPKT(1U)
+
+#define S_CWRETHPKT 24
+#define V_CWRETHPKT(x) ((x) << S_CWRETHPKT)
+#define F_CWRETHPKT V_CWRETHPKT(1U)
+
+#define S_CWRIPPKT 23
+#define V_CWRIPPKT(x) ((x) << S_CWRIPPKT)
+#define F_CWRIPPKT V_CWRIPPKT(1U)
+
+#define S_CWRTCPPKT 22
+#define V_CWRTCPPKT(x) ((x) << S_CWRTCPPKT)
+#define F_CWRTCPPKT V_CWRTCPPKT(1U)
+
+#define S_CWRZEROP 21
+#define V_CWRZEROP(x) ((x) << S_CWRZEROP)
+#define F_CWRZEROP V_CWRZEROP(1U)
+
+#define S_CCPLTXFULL 20
+#define V_CCPLTXFULL(x) ((x) << S_CCPLTXFULL)
+#define F_CCPLTXFULL V_CCPLTXFULL(1U)
+
+#define S_CETHTXFULL 19
+#define V_CETHTXFULL(x) ((x) << S_CETHTXFULL)
+#define F_CETHTXFULL V_CETHTXFULL(1U)
+
+#define S_CIPTXFULL 18
+#define V_CIPTXFULL(x) ((x) << S_CIPTXFULL)
+#define F_CIPTXFULL V_CIPTXFULL(1U)
+
+#define S_CTCPTXFULL 17
+#define V_CTCPTXFULL(x) ((x) << S_CTCPTXFULL)
+#define F_CTCPTXFULL V_CTCPTXFULL(1U)
+
+#define S_CPLDTXZEROPDRDY 16
+#define V_CPLDTXZEROPDRDY(x) ((x) << S_CPLDTXZEROPDRDY)
+#define F_CPLDTXZEROPDRDY V_CPLDTXZEROPDRDY(1U)
+
+#define S_ECPLENC 10
+#define V_ECPLENC(x) ((x) << S_ECPLENC)
+#define F_ECPLENC V_ECPLENC(1U)
+
+#define S_EWRCPLPKT 9
+#define V_EWRCPLPKT(x) ((x) << S_EWRCPLPKT)
+#define F_EWRCPLPKT V_EWRCPLPKT(1U)
+
+#define S_EWRETHPKT 8
+#define V_EWRETHPKT(x) ((x) << S_EWRETHPKT)
+#define F_EWRETHPKT V_EWRETHPKT(1U)
+
+#define S_EWRIPPKT 7
+#define V_EWRIPPKT(x) ((x) << S_EWRIPPKT)
+#define F_EWRIPPKT V_EWRIPPKT(1U)
+
+#define S_EWRTCPPKT 6
+#define V_EWRTCPPKT(x) ((x) << S_EWRTCPPKT)
+#define F_EWRTCPPKT V_EWRTCPPKT(1U)
+
+#define S_EWRZEROP 5
+#define V_EWRZEROP(x) ((x) << S_EWRZEROP)
+#define F_EWRZEROP V_EWRZEROP(1U)
+
+#define S_ECPLTXFULL 4
+#define V_ECPLTXFULL(x) ((x) << S_ECPLTXFULL)
+#define F_ECPLTXFULL V_ECPLTXFULL(1U)
+
+#define S_EETHTXFULL 3
+#define V_EETHTXFULL(x) ((x) << S_EETHTXFULL)
+#define F_EETHTXFULL V_EETHTXFULL(1U)
+
+#define S_EIPTXFULL 2
+#define V_EIPTXFULL(x) ((x) << S_EIPTXFULL)
+#define F_EIPTXFULL V_EIPTXFULL(1U)
+
+#define S_ETCPTXFULL 1
+#define V_ETCPTXFULL(x) ((x) << S_ETCPTXFULL)
+#define F_ETCPTXFULL V_ETCPTXFULL(1U)
+
+#define S_EPLDTXZEROPDRDY 0
+#define V_EPLDTXZEROPDRDY(x) ((x) << S_EPLDTXZEROPDRDY)
+#define F_EPLDTXZEROPDRDY V_EPLDTXZEROPDRDY(1U)
+
+#define A_TP_DBG_CORE_TID 0x65
+
+#define S_LINENUMBER 24
+#define M_LINENUMBER 0x7fU
+#define V_LINENUMBER(x) ((x) << S_LINENUMBER)
+#define G_LINENUMBER(x) (((x) >> S_LINENUMBER) & M_LINENUMBER)
+
+#define S_SPURIOUSMSG 23
+#define V_SPURIOUSMSG(x) ((x) << S_SPURIOUSMSG)
+#define F_SPURIOUSMSG V_SPURIOUSMSG(1U)
+
+#define S_SYNLEARNED 20
+#define V_SYNLEARNED(x) ((x) << S_SYNLEARNED)
+#define F_SYNLEARNED V_SYNLEARNED(1U)
+
+#define S_TIDVALUE 0
+#define M_TIDVALUE 0xfffffU
+#define V_TIDVALUE(x) ((x) << S_TIDVALUE)
+#define G_TIDVALUE(x) (((x) >> S_TIDVALUE) & M_TIDVALUE)
+
+#define A_TP_DBG_ENG_RES0 0x66
+
+#define S_RESOURCESREADY 31
+#define V_RESOURCESREADY(x) ((x) << S_RESOURCESREADY)
+#define F_RESOURCESREADY V_RESOURCESREADY(1U)
+
+#define S_RCFOPCODEOUTSRDY 30
+#define V_RCFOPCODEOUTSRDY(x) ((x) << S_RCFOPCODEOUTSRDY)
+#define F_RCFOPCODEOUTSRDY V_RCFOPCODEOUTSRDY(1U)
+
+#define S_RCFDATAOUTSRDY 29
+#define V_RCFDATAOUTSRDY(x) ((x) << S_RCFDATAOUTSRDY)
+#define F_RCFDATAOUTSRDY V_RCFDATAOUTSRDY(1U)
+
+#define S_FLUSHINPUTMSG 28
+#define V_FLUSHINPUTMSG(x) ((x) << S_FLUSHINPUTMSG)
+#define F_FLUSHINPUTMSG V_FLUSHINPUTMSG(1U)
+
+#define S_RCFOPSRCOUT 26
+#define M_RCFOPSRCOUT 0x3U
+#define V_RCFOPSRCOUT(x) ((x) << S_RCFOPSRCOUT)
+#define G_RCFOPSRCOUT(x) (((x) >> S_RCFOPSRCOUT) & M_RCFOPSRCOUT)
+
+#define S_C_MSG 25
+#define V_C_MSG(x) ((x) << S_C_MSG)
+#define F_C_MSG V_C_MSG(1U)
+
+#define S_E_MSG 24
+#define V_E_MSG(x) ((x) << S_E_MSG)
+#define F_E_MSG V_E_MSG(1U)
+
+#define S_RCFOPCODEOUT 20
+#define M_RCFOPCODEOUT 0xfU
+#define V_RCFOPCODEOUT(x) ((x) << S_RCFOPCODEOUT)
+#define G_RCFOPCODEOUT(x) (((x) >> S_RCFOPCODEOUT) & M_RCFOPCODEOUT)
+
+#define S_EFFRCFOPCODEOUT 16
+#define M_EFFRCFOPCODEOUT 0xfU
+#define V_EFFRCFOPCODEOUT(x) ((x) << S_EFFRCFOPCODEOUT)
+#define G_EFFRCFOPCODEOUT(x) (((x) >> S_EFFRCFOPCODEOUT) & M_EFFRCFOPCODEOUT)
+
+#define S_SEENRESOURCESREADY 15
+#define V_SEENRESOURCESREADY(x) ((x) << S_SEENRESOURCESREADY)
+#define F_SEENRESOURCESREADY V_SEENRESOURCESREADY(1U)
+
+#define S_RESOURCESREADYCOPY 14
+#define V_RESOURCESREADYCOPY(x) ((x) << S_RESOURCESREADYCOPY)
+#define F_RESOURCESREADYCOPY V_RESOURCESREADYCOPY(1U)
+
+#define S_OPCODEWAITSFORDATA 13
+#define V_OPCODEWAITSFORDATA(x) ((x) << S_OPCODEWAITSFORDATA)
+#define F_OPCODEWAITSFORDATA V_OPCODEWAITSFORDATA(1U)
+
+#define S_CPLDRXSRDY 12
+#define V_CPLDRXSRDY(x) ((x) << S_CPLDRXSRDY)
+#define F_CPLDRXSRDY V_CPLDRXSRDY(1U)
+
+#define S_CPLDRXZEROPSRDY 11
+#define V_CPLDRXZEROPSRDY(x) ((x) << S_CPLDRXZEROPSRDY)
+#define F_CPLDRXZEROPSRDY V_CPLDRXZEROPSRDY(1U)
+
+#define S_EPLDRXZEROPSRDY 10
+#define V_EPLDRXZEROPSRDY(x) ((x) << S_EPLDRXZEROPSRDY)
+#define F_EPLDRXZEROPSRDY V_EPLDRXZEROPSRDY(1U)
+
+#define S_ERXERRORSRDY 9
+#define V_ERXERRORSRDY(x) ((x) << S_ERXERRORSRDY)
+#define F_ERXERRORSRDY V_ERXERRORSRDY(1U)
+
+#define S_EPLDRXSRDY 8
+#define V_EPLDRXSRDY(x) ((x) << S_EPLDRXSRDY)
+#define F_EPLDRXSRDY V_EPLDRXSRDY(1U)
+
+#define S_CRXBUSY 7
+#define V_CRXBUSY(x) ((x) << S_CRXBUSY)
+#define F_CRXBUSY V_CRXBUSY(1U)
+
+#define S_ERXBUSY 6
+#define V_ERXBUSY(x) ((x) << S_ERXBUSY)
+#define F_ERXBUSY V_ERXBUSY(1U)
+
+#define S_TIMERINSERTBUSY 5
+#define V_TIMERINSERTBUSY(x) ((x) << S_TIMERINSERTBUSY)
+#define F_TIMERINSERTBUSY V_TIMERINSERTBUSY(1U)
+
+#define S_WCFBUSY 4
+#define V_WCFBUSY(x) ((x) << S_WCFBUSY)
+#define F_WCFBUSY V_WCFBUSY(1U)
+
+#define S_CTXBUSY 3
+#define V_CTXBUSY(x) ((x) << S_CTXBUSY)
+#define F_CTXBUSY V_CTXBUSY(1U)
+
+#define S_CPCMDBUSY 2
+#define V_CPCMDBUSY(x) ((x) << S_CPCMDBUSY)
+#define F_CPCMDBUSY V_CPCMDBUSY(1U)
+
+#define S_ETXBUSY 1
+#define V_ETXBUSY(x) ((x) << S_ETXBUSY)
+#define F_ETXBUSY V_ETXBUSY(1U)
+
+#define S_EPCMDBUSY 0
+#define V_EPCMDBUSY(x) ((x) << S_EPCMDBUSY)
+#define F_EPCMDBUSY V_EPCMDBUSY(1U)
+
+#define A_TP_DBG_ENG_RES1 0x67
+
+#define S_RXCPLSRDY 31
+#define V_RXCPLSRDY(x) ((x) << S_RXCPLSRDY)
+#define F_RXCPLSRDY V_RXCPLSRDY(1U)
+
+#define S_RXOPTSRDY 30
+#define V_RXOPTSRDY(x) ((x) << S_RXOPTSRDY)
+#define F_RXOPTSRDY V_RXOPTSRDY(1U)
+
+#define S_RXPLDLENSRDY 29
+#define V_RXPLDLENSRDY(x) ((x) << S_RXPLDLENSRDY)
+#define F_RXPLDLENSRDY V_RXPLDLENSRDY(1U)
+
+#define S_RXNOTBUSY 28
+#define V_RXNOTBUSY(x) ((x) << S_RXNOTBUSY)
+#define F_RXNOTBUSY V_RXNOTBUSY(1U)
+
+#define S_CPLCMDIN 20
+#define M_CPLCMDIN 0xffU
+#define V_CPLCMDIN(x) ((x) << S_CPLCMDIN)
+#define G_CPLCMDIN(x) (((x) >> S_CPLCMDIN) & M_CPLCMDIN)
+
+#define S_RCFPTIDSRDY 19
+#define V_RCFPTIDSRDY(x) ((x) << S_RCFPTIDSRDY)
+#define F_RCFPTIDSRDY V_RCFPTIDSRDY(1U)
+
+#define S_EPDUHDRSRDY 18
+#define V_EPDUHDRSRDY(x) ((x) << S_EPDUHDRSRDY)
+#define F_EPDUHDRSRDY V_EPDUHDRSRDY(1U)
+
+#define S_TUNNELPKTREG 17
+#define V_TUNNELPKTREG(x) ((x) << S_TUNNELPKTREG)
+#define F_TUNNELPKTREG V_TUNNELPKTREG(1U)
+
+#define S_TXPKTCSUMSRDY 16
+#define V_TXPKTCSUMSRDY(x) ((x) << S_TXPKTCSUMSRDY)
+#define F_TXPKTCSUMSRDY V_TXPKTCSUMSRDY(1U)
+
+#define S_TABLEACCESSLATENCY 12
+#define M_TABLEACCESSLATENCY 0xfU
+#define V_TABLEACCESSLATENCY(x) ((x) << S_TABLEACCESSLATENCY)
+#define G_TABLEACCESSLATENCY(x) (((x) >> S_TABLEACCESSLATENCY) & M_TABLEACCESSLATENCY)
+
+#define S_MMGRDONE 11
+#define V_MMGRDONE(x) ((x) << S_MMGRDONE)
+#define F_MMGRDONE V_MMGRDONE(1U)
+
+#define S_SEENMMGRDONE 10
+#define V_SEENMMGRDONE(x) ((x) << S_SEENMMGRDONE)
+#define F_SEENMMGRDONE V_SEENMMGRDONE(1U)
+
+#define S_RXERRORSRDY 9
+#define V_RXERRORSRDY(x) ((x) << S_RXERRORSRDY)
+#define F_RXERRORSRDY V_RXERRORSRDY(1U)
+
+#define S_RCFOPTIONSTCPSRDY 8
+#define V_RCFOPTIONSTCPSRDY(x) ((x) << S_RCFOPTIONSTCPSRDY)
+#define F_RCFOPTIONSTCPSRDY V_RCFOPTIONSTCPSRDY(1U)
+
+#define S_ENGINESTATE 6
+#define M_ENGINESTATE 0x3U
+#define V_ENGINESTATE(x) ((x) << S_ENGINESTATE)
+#define G_ENGINESTATE(x) (((x) >> S_ENGINESTATE) & M_ENGINESTATE)
+
+#define S_TABLEACCESINCREMENT 5
+#define V_TABLEACCESINCREMENT(x) ((x) << S_TABLEACCESINCREMENT)
+#define F_TABLEACCESINCREMENT V_TABLEACCESINCREMENT(1U)
+
+#define S_TABLEACCESCOMPLETE 4
+#define V_TABLEACCESCOMPLETE(x) ((x) << S_TABLEACCESCOMPLETE)
+#define F_TABLEACCESCOMPLETE V_TABLEACCESCOMPLETE(1U)
+
+#define S_RCFOPCODEOUTUSABLE 3
+#define V_RCFOPCODEOUTUSABLE(x) ((x) << S_RCFOPCODEOUTUSABLE)
+#define F_RCFOPCODEOUTUSABLE V_RCFOPCODEOUTUSABLE(1U)
+
+#define S_RCFDATAOUTUSABLE 2
+#define V_RCFDATAOUTUSABLE(x) ((x) << S_RCFDATAOUTUSABLE)
+#define F_RCFDATAOUTUSABLE V_RCFDATAOUTUSABLE(1U)
+
+#define S_RCFDATAWAITAFTERRD 1
+#define V_RCFDATAWAITAFTERRD(x) ((x) << S_RCFDATAWAITAFTERRD)
+#define F_RCFDATAWAITAFTERRD V_RCFDATAWAITAFTERRD(1U)
+
+#define S_RCFDATACMRDY 0
+#define V_RCFDATACMRDY(x) ((x) << S_RCFDATACMRDY)
+#define F_RCFDATACMRDY V_RCFDATACMRDY(1U)
+
+#define A_TP_DBG_ENG_RES2 0x68
+
+#define S_CPLCMDRAW 24
+#define M_CPLCMDRAW 0xffU
+#define V_CPLCMDRAW(x) ((x) << S_CPLCMDRAW)
+#define G_CPLCMDRAW(x) (((x) >> S_CPLCMDRAW) & M_CPLCMDRAW)
+
+#define S_RXMACPORT 20
+#define M_RXMACPORT 0xfU
+#define V_RXMACPORT(x) ((x) << S_RXMACPORT)
+#define G_RXMACPORT(x) (((x) >> S_RXMACPORT) & M_RXMACPORT)
+
+#define S_TXECHANNEL 18
+#define M_TXECHANNEL 0x3U
+#define V_TXECHANNEL(x) ((x) << S_TXECHANNEL)
+#define G_TXECHANNEL(x) (((x) >> S_TXECHANNEL) & M_TXECHANNEL)
+
+#define S_RXECHANNEL 16
+#define M_RXECHANNEL 0x3U
+#define V_RXECHANNEL(x) ((x) << S_RXECHANNEL)
+#define G_RXECHANNEL(x) (((x) >> S_RXECHANNEL) & M_RXECHANNEL)
+
+#define S_CDATAOUT 15
+#define V_CDATAOUT(x) ((x) << S_CDATAOUT)
+#define F_CDATAOUT V_CDATAOUT(1U)
+
+#define S_CREADPDU 14
+#define V_CREADPDU(x) ((x) << S_CREADPDU)
+#define F_CREADPDU V_CREADPDU(1U)
+
+#define S_EDATAOUT 13
+#define V_EDATAOUT(x) ((x) << S_EDATAOUT)
+#define F_EDATAOUT V_EDATAOUT(1U)
+
+#define S_EREADPDU 12
+#define V_EREADPDU(x) ((x) << S_EREADPDU)
+#define F_EREADPDU V_EREADPDU(1U)
+
+#define S_ETCPOPSRDY 11
+#define V_ETCPOPSRDY(x) ((x) << S_ETCPOPSRDY)
+#define F_ETCPOPSRDY V_ETCPOPSRDY(1U)
+
+#define S_CTCPOPSRDY 10
+#define V_CTCPOPSRDY(x) ((x) << S_CTCPOPSRDY)
+#define F_CTCPOPSRDY V_CTCPOPSRDY(1U)
+
+#define S_CPKTOUT 9
+#define V_CPKTOUT(x) ((x) << S_CPKTOUT)
+#define F_CPKTOUT V_CPKTOUT(1U)
+
+#define S_CMDBRSPSRDY 8
+#define V_CMDBRSPSRDY(x) ((x) << S_CMDBRSPSRDY)
+#define F_CMDBRSPSRDY V_CMDBRSPSRDY(1U)
+
+#define S_RXPSTRUCTSFULL 6
+#define M_RXPSTRUCTSFULL 0x3U
+#define V_RXPSTRUCTSFULL(x) ((x) << S_RXPSTRUCTSFULL)
+#define G_RXPSTRUCTSFULL(x) (((x) >> S_RXPSTRUCTSFULL) & M_RXPSTRUCTSFULL)
+
+#define S_RXPAGEPOOLFULL 4
+#define M_RXPAGEPOOLFULL 0x3U
+#define V_RXPAGEPOOLFULL(x) ((x) << S_RXPAGEPOOLFULL)
+#define G_RXPAGEPOOLFULL(x) (((x) >> S_RXPAGEPOOLFULL) & M_RXPAGEPOOLFULL)
+
+#define S_RCFREASONOUT 0
+#define M_RCFREASONOUT 0xfU
+#define V_RCFREASONOUT(x) ((x) << S_RCFREASONOUT)
+#define G_RCFREASONOUT(x) (((x) >> S_RCFREASONOUT) & M_RCFREASONOUT)
+
+#define A_TP_DBG_CORE_PCMD 0x69
+
+#define S_CPCMDEOPCNT 30
+#define M_CPCMDEOPCNT 0x3U
+#define V_CPCMDEOPCNT(x) ((x) << S_CPCMDEOPCNT)
+#define G_CPCMDEOPCNT(x) (((x) >> S_CPCMDEOPCNT) & M_CPCMDEOPCNT)
+
+#define S_CPCMDLENSAVE 16
+#define M_CPCMDLENSAVE 0x3fffU
+#define V_CPCMDLENSAVE(x) ((x) << S_CPCMDLENSAVE)
+#define G_CPCMDLENSAVE(x) (((x) >> S_CPCMDLENSAVE) & M_CPCMDLENSAVE)
+
+#define S_EPCMDEOPCNT 14
+#define M_EPCMDEOPCNT 0x3U
+#define V_EPCMDEOPCNT(x) ((x) << S_EPCMDEOPCNT)
+#define G_EPCMDEOPCNT(x) (((x) >> S_EPCMDEOPCNT) & M_EPCMDEOPCNT)
+
+#define S_EPCMDLENSAVE 0
+#define M_EPCMDLENSAVE 0x3fffU
+#define V_EPCMDLENSAVE(x) ((x) << S_EPCMDLENSAVE)
+#define G_EPCMDLENSAVE(x) (((x) >> S_EPCMDLENSAVE) & M_EPCMDLENSAVE)
+
+#define A_TP_DBG_SCHED_TX 0x6a
+
+#define S_TXCHNXOFF 28
+#define M_TXCHNXOFF 0xfU
+#define V_TXCHNXOFF(x) ((x) << S_TXCHNXOFF)
+#define G_TXCHNXOFF(x) (((x) >> S_TXCHNXOFF) & M_TXCHNXOFF)
+
+#define S_TXFIFOCNG 24
+#define M_TXFIFOCNG 0xfU
+#define V_TXFIFOCNG(x) ((x) << S_TXFIFOCNG)
+#define G_TXFIFOCNG(x) (((x) >> S_TXFIFOCNG) & M_TXFIFOCNG)
+
+#define S_TXPCMDCNG 20
+#define M_TXPCMDCNG 0xfU
+#define V_TXPCMDCNG(x) ((x) << S_TXPCMDCNG)
+#define G_TXPCMDCNG(x) (((x) >> S_TXPCMDCNG) & M_TXPCMDCNG)
+
+#define S_TXLPBKCNG 16
+#define M_TXLPBKCNG 0xfU
+#define V_TXLPBKCNG(x) ((x) << S_TXLPBKCNG)
+#define G_TXLPBKCNG(x) (((x) >> S_TXLPBKCNG) & M_TXLPBKCNG)
+
+#define S_TXHDRCNG 8
+#define M_TXHDRCNG 0xffU
+#define V_TXHDRCNG(x) ((x) << S_TXHDRCNG)
+#define G_TXHDRCNG(x) (((x) >> S_TXHDRCNG) & M_TXHDRCNG)
+
+#define S_TXMODXOFF 0
+#define M_TXMODXOFF 0xffU
+#define V_TXMODXOFF(x) ((x) << S_TXMODXOFF)
+#define G_TXMODXOFF(x) (((x) >> S_TXMODXOFF) & M_TXMODXOFF)
+
+#define A_TP_DBG_SCHED_RX 0x6b
+
+#define S_RXCHNXOFF 28
+#define M_RXCHNXOFF 0xfU
+#define V_RXCHNXOFF(x) ((x) << S_RXCHNXOFF)
+#define G_RXCHNXOFF(x) (((x) >> S_RXCHNXOFF) & M_RXCHNXOFF)
+
+#define S_RXSGECNG 24
+#define M_RXSGECNG 0xfU
+#define V_RXSGECNG(x) ((x) << S_RXSGECNG)
+#define G_RXSGECNG(x) (((x) >> S_RXSGECNG) & M_RXSGECNG)
+
+#define S_RXFIFOCNG 22
+#define M_RXFIFOCNG 0x3U
+#define V_RXFIFOCNG(x) ((x) << S_RXFIFOCNG)
+#define G_RXFIFOCNG(x) (((x) >> S_RXFIFOCNG) & M_RXFIFOCNG)
+
+#define S_RXPCMDCNG 20
+#define M_RXPCMDCNG 0x3U
+#define V_RXPCMDCNG(x) ((x) << S_RXPCMDCNG)
+#define G_RXPCMDCNG(x) (((x) >> S_RXPCMDCNG) & M_RXPCMDCNG)
+
+#define S_RXLPBKCNG 16
+#define M_RXLPBKCNG 0xfU
+#define V_RXLPBKCNG(x) ((x) << S_RXLPBKCNG)
+#define G_RXLPBKCNG(x) (((x) >> S_RXLPBKCNG) & M_RXLPBKCNG)
+
+#define S_RXHDRCNG 8
+#define M_RXHDRCNG 0xfU
+#define V_RXHDRCNG(x) ((x) << S_RXHDRCNG)
+#define G_RXHDRCNG(x) (((x) >> S_RXHDRCNG) & M_RXHDRCNG)
+
+#define S_RXMODXOFF 0
+#define M_RXMODXOFF 0x3U
+#define V_RXMODXOFF(x) ((x) << S_RXMODXOFF)
+#define G_RXMODXOFF(x) (((x) >> S_RXMODXOFF) & M_RXMODXOFF)
+
+#define A_TP_TX_DROP_CFG_CH0 0x12b
+
+#define S_TIMERENABLED 31
+#define V_TIMERENABLED(x) ((x) << S_TIMERENABLED)
+#define F_TIMERENABLED V_TIMERENABLED(1U)
+
+#define S_TIMERERRORENABLE 30
+#define V_TIMERERRORENABLE(x) ((x) << S_TIMERERRORENABLE)
+#define F_TIMERERRORENABLE V_TIMERERRORENABLE(1U)
+
+#define S_TIMERTHRESHOLD 4
+#define M_TIMERTHRESHOLD 0x3ffffffU
+#define V_TIMERTHRESHOLD(x) ((x) << S_TIMERTHRESHOLD)
+#define G_TIMERTHRESHOLD(x) (((x) >> S_TIMERTHRESHOLD) & M_TIMERTHRESHOLD)
+
+#define S_PACKETDROPS 0
+#define M_PACKETDROPS 0xfU
+#define V_PACKETDROPS(x) ((x) << S_PACKETDROPS)
+#define G_PACKETDROPS(x) (((x) >> S_PACKETDROPS) & M_PACKETDROPS)
+
+#define A_TP_TX_DROP_CFG_CH1 0x12c
+#define A_TP_TX_DROP_CNT_CH0 0x12d
+
+#define S_TXDROPCNTCH0SENT 16
+#define M_TXDROPCNTCH0SENT 0xffffU
+#define V_TXDROPCNTCH0SENT(x) ((x) << S_TXDROPCNTCH0SENT)
+#define G_TXDROPCNTCH0SENT(x) (((x) >> S_TXDROPCNTCH0SENT) & M_TXDROPCNTCH0SENT)
+
+#define S_TXDROPCNTCH0RCVD 0
+#define M_TXDROPCNTCH0RCVD 0xffffU
+#define V_TXDROPCNTCH0RCVD(x) ((x) << S_TXDROPCNTCH0RCVD)
+#define G_TXDROPCNTCH0RCVD(x) (((x) >> S_TXDROPCNTCH0RCVD) & M_TXDROPCNTCH0RCVD)
+
+#define A_TP_TX_DROP_CNT_CH1 0x12e
+
+#define S_TXDROPCNTCH1SENT 16
+#define M_TXDROPCNTCH1SENT 0xffffU
+#define V_TXDROPCNTCH1SENT(x) ((x) << S_TXDROPCNTCH1SENT)
+#define G_TXDROPCNTCH1SENT(x) (((x) >> S_TXDROPCNTCH1SENT) & M_TXDROPCNTCH1SENT)
+
+#define S_TXDROPCNTCH1RCVD 0
+#define M_TXDROPCNTCH1RCVD 0xffffU
+#define V_TXDROPCNTCH1RCVD(x) ((x) << S_TXDROPCNTCH1RCVD)
+#define G_TXDROPCNTCH1RCVD(x) (((x) >> S_TXDROPCNTCH1RCVD) & M_TXDROPCNTCH1RCVD)
+
+#define A_TP_TX_DROP_MODE 0x12f
+
+#define S_TXDROPMODECH3 3
+#define V_TXDROPMODECH3(x) ((x) << S_TXDROPMODECH3)
+#define F_TXDROPMODECH3 V_TXDROPMODECH3(1U)
+
+#define S_TXDROPMODECH2 2
+#define V_TXDROPMODECH2(x) ((x) << S_TXDROPMODECH2)
+#define F_TXDROPMODECH2 V_TXDROPMODECH2(1U)
+
+#define S_TXDROPMODECH1 1
+#define V_TXDROPMODECH1(x) ((x) << S_TXDROPMODECH1)
+#define F_TXDROPMODECH1 V_TXDROPMODECH1(1U)
+
+#define S_TXDROPMODECH0 0
+#define V_TXDROPMODECH0(x) ((x) << S_TXDROPMODECH0)
+#define F_TXDROPMODECH0 V_TXDROPMODECH0(1U)
+
+#define A_TP_DBG_ESIDE_PKT0 0x130
+
+#define S_ETXSOPCNT 28
+#define M_ETXSOPCNT 0xfU
+#define V_ETXSOPCNT(x) ((x) << S_ETXSOPCNT)
+#define G_ETXSOPCNT(x) (((x) >> S_ETXSOPCNT) & M_ETXSOPCNT)
+
+#define S_ETXEOPCNT 24
+#define M_ETXEOPCNT 0xfU
+#define V_ETXEOPCNT(x) ((x) << S_ETXEOPCNT)
+#define G_ETXEOPCNT(x) (((x) >> S_ETXEOPCNT) & M_ETXEOPCNT)
+
+#define S_ETXPLDSOPCNT 20
+#define M_ETXPLDSOPCNT 0xfU
+#define V_ETXPLDSOPCNT(x) ((x) << S_ETXPLDSOPCNT)
+#define G_ETXPLDSOPCNT(x) (((x) >> S_ETXPLDSOPCNT) & M_ETXPLDSOPCNT)
+
+#define S_ETXPLDEOPCNT 16
+#define M_ETXPLDEOPCNT 0xfU
+#define V_ETXPLDEOPCNT(x) ((x) << S_ETXPLDEOPCNT)
+#define G_ETXPLDEOPCNT(x) (((x) >> S_ETXPLDEOPCNT) & M_ETXPLDEOPCNT)
+
+#define S_ERXSOPCNT 12
+#define M_ERXSOPCNT 0xfU
+#define V_ERXSOPCNT(x) ((x) << S_ERXSOPCNT)
+#define G_ERXSOPCNT(x) (((x) >> S_ERXSOPCNT) & M_ERXSOPCNT)
+
+#define S_ERXEOPCNT 8
+#define M_ERXEOPCNT 0xfU
+#define V_ERXEOPCNT(x) ((x) << S_ERXEOPCNT)
+#define G_ERXEOPCNT(x) (((x) >> S_ERXEOPCNT) & M_ERXEOPCNT)
+
+#define S_ERXPLDSOPCNT 4
+#define M_ERXPLDSOPCNT 0xfU
+#define V_ERXPLDSOPCNT(x) ((x) << S_ERXPLDSOPCNT)
+#define G_ERXPLDSOPCNT(x) (((x) >> S_ERXPLDSOPCNT) & M_ERXPLDSOPCNT)
+
+#define S_ERXPLDEOPCNT 0
+#define M_ERXPLDEOPCNT 0xfU
+#define V_ERXPLDEOPCNT(x) ((x) << S_ERXPLDEOPCNT)
+#define G_ERXPLDEOPCNT(x) (((x) >> S_ERXPLDEOPCNT) & M_ERXPLDEOPCNT)
+
+#define A_TP_DBG_ESIDE_PKT1 0x131
+#define A_TP_DBG_ESIDE_PKT2 0x132
+#define A_TP_DBG_ESIDE_PKT3 0x133
+#define A_TP_DBG_ESIDE_FIFO0 0x134
+
+#define S_PLDRXCSUMVALID1 31
+#define V_PLDRXCSUMVALID1(x) ((x) << S_PLDRXCSUMVALID1)
+#define F_PLDRXCSUMVALID1 V_PLDRXCSUMVALID1(1U)
+
+#define S_PLDRXZEROPSRDY1 30
+#define V_PLDRXZEROPSRDY1(x) ((x) << S_PLDRXZEROPSRDY1)
+#define F_PLDRXZEROPSRDY1 V_PLDRXZEROPSRDY1(1U)
+
+#define S_PLDRXVALID1 29
+#define V_PLDRXVALID1(x) ((x) << S_PLDRXVALID1)
+#define F_PLDRXVALID1 V_PLDRXVALID1(1U)
+
+#define S_TCPRXVALID1 28
+#define V_TCPRXVALID1(x) ((x) << S_TCPRXVALID1)
+#define F_TCPRXVALID1 V_TCPRXVALID1(1U)
+
+#define S_IPRXVALID1 27
+#define V_IPRXVALID1(x) ((x) << S_IPRXVALID1)
+#define F_IPRXVALID1 V_IPRXVALID1(1U)
+
+#define S_ETHRXVALID1 26
+#define V_ETHRXVALID1(x) ((x) << S_ETHRXVALID1)
+#define F_ETHRXVALID1 V_ETHRXVALID1(1U)
+
+#define S_CPLRXVALID1 25
+#define V_CPLRXVALID1(x) ((x) << S_CPLRXVALID1)
+#define F_CPLRXVALID1 V_CPLRXVALID1(1U)
+
+#define S_FSTATIC1 24
+#define V_FSTATIC1(x) ((x) << S_FSTATIC1)
+#define F_FSTATIC1 V_FSTATIC1(1U)
+
+#define S_ERRORSRDY1 23
+#define V_ERRORSRDY1(x) ((x) << S_ERRORSRDY1)
+#define F_ERRORSRDY1 V_ERRORSRDY1(1U)
+
+#define S_PLDTXSRDY1 22
+#define V_PLDTXSRDY1(x) ((x) << S_PLDTXSRDY1)
+#define F_PLDTXSRDY1 V_PLDTXSRDY1(1U)
+
+#define S_DBVLD1 21
+#define V_DBVLD1(x) ((x) << S_DBVLD1)
+#define F_DBVLD1 V_DBVLD1(1U)
+
+#define S_PLDTXVALID1 20
+#define V_PLDTXVALID1(x) ((x) << S_PLDTXVALID1)
+#define F_PLDTXVALID1 V_PLDTXVALID1(1U)
+
+#define S_ETXVALID1 19
+#define V_ETXVALID1(x) ((x) << S_ETXVALID1)
+#define F_ETXVALID1 V_ETXVALID1(1U)
+
+#define S_ETXFULL1 18
+#define V_ETXFULL1(x) ((x) << S_ETXFULL1)
+#define F_ETXFULL1 V_ETXFULL1(1U)
+
+#define S_ERXVALID1 17
+#define V_ERXVALID1(x) ((x) << S_ERXVALID1)
+#define F_ERXVALID1 V_ERXVALID1(1U)
+
+#define S_ERXFULL1 16
+#define V_ERXFULL1(x) ((x) << S_ERXFULL1)
+#define F_ERXFULL1 V_ERXFULL1(1U)
+
+#define S_PLDRXCSUMVALID0 15
+#define V_PLDRXCSUMVALID0(x) ((x) << S_PLDRXCSUMVALID0)
+#define F_PLDRXCSUMVALID0 V_PLDRXCSUMVALID0(1U)
+
+#define S_PLDRXZEROPSRDY0 14
+#define V_PLDRXZEROPSRDY0(x) ((x) << S_PLDRXZEROPSRDY0)
+#define F_PLDRXZEROPSRDY0 V_PLDRXZEROPSRDY0(1U)
+
+#define S_PLDRXVALID0 13
+#define V_PLDRXVALID0(x) ((x) << S_PLDRXVALID0)
+#define F_PLDRXVALID0 V_PLDRXVALID0(1U)
+
+#define S_TCPRXVALID0 12
+#define V_TCPRXVALID0(x) ((x) << S_TCPRXVALID0)
+#define F_TCPRXVALID0 V_TCPRXVALID0(1U)
+
+#define S_IPRXVALID0 11
+#define V_IPRXVALID0(x) ((x) << S_IPRXVALID0)
+#define F_IPRXVALID0 V_IPRXVALID0(1U)
+
+#define S_ETHRXVALID0 10
+#define V_ETHRXVALID0(x) ((x) << S_ETHRXVALID0)
+#define F_ETHRXVALID0 V_ETHRXVALID0(1U)
+
+#define S_CPLRXVALID0 9
+#define V_CPLRXVALID0(x) ((x) << S_CPLRXVALID0)
+#define F_CPLRXVALID0 V_CPLRXVALID0(1U)
+
+#define S_FSTATIC0 8
+#define V_FSTATIC0(x) ((x) << S_FSTATIC0)
+#define F_FSTATIC0 V_FSTATIC0(1U)
+
+#define S_ERRORSRDY0 7
+#define V_ERRORSRDY0(x) ((x) << S_ERRORSRDY0)
+#define F_ERRORSRDY0 V_ERRORSRDY0(1U)
+
+#define S_PLDTXSRDY0 6
+#define V_PLDTXSRDY0(x) ((x) << S_PLDTXSRDY0)
+#define F_PLDTXSRDY0 V_PLDTXSRDY0(1U)
+
+#define S_DBVLD0 5
+#define V_DBVLD0(x) ((x) << S_DBVLD0)
+#define F_DBVLD0 V_DBVLD0(1U)
+
+#define S_PLDTXVALID0 4
+#define V_PLDTXVALID0(x) ((x) << S_PLDTXVALID0)
+#define F_PLDTXVALID0 V_PLDTXVALID0(1U)
+
+#define S_ETXVALID0 3
+#define V_ETXVALID0(x) ((x) << S_ETXVALID0)
+#define F_ETXVALID0 V_ETXVALID0(1U)
+
+#define S_ETXFULL0 2
+#define V_ETXFULL0(x) ((x) << S_ETXFULL0)
+#define F_ETXFULL0 V_ETXFULL0(1U)
+
+#define S_ERXVALID0 1
+#define V_ERXVALID0(x) ((x) << S_ERXVALID0)
+#define F_ERXVALID0 V_ERXVALID0(1U)
+
+#define S_ERXFULL0 0
+#define V_ERXFULL0(x) ((x) << S_ERXFULL0)
+#define F_ERXFULL0 V_ERXFULL0(1U)
+
+#define A_TP_DBG_ESIDE_FIFO1 0x135
+
+#define S_PLDRXCSUMVALID3 31
+#define V_PLDRXCSUMVALID3(x) ((x) << S_PLDRXCSUMVALID3)
+#define F_PLDRXCSUMVALID3 V_PLDRXCSUMVALID3(1U)
+
+#define S_PLDRXZEROPSRDY3 30
+#define V_PLDRXZEROPSRDY3(x) ((x) << S_PLDRXZEROPSRDY3)
+#define F_PLDRXZEROPSRDY3 V_PLDRXZEROPSRDY3(1U)
+
+#define S_PLDRXVALID3 29
+#define V_PLDRXVALID3(x) ((x) << S_PLDRXVALID3)
+#define F_PLDRXVALID3 V_PLDRXVALID3(1U)
+
+#define S_TCPRXVALID3 28
+#define V_TCPRXVALID3(x) ((x) << S_TCPRXVALID3)
+#define F_TCPRXVALID3 V_TCPRXVALID3(1U)
+
+#define S_IPRXVALID3 27
+#define V_IPRXVALID3(x) ((x) << S_IPRXVALID3)
+#define F_IPRXVALID3 V_IPRXVALID3(1U)
+
+#define S_ETHRXVALID3 26
+#define V_ETHRXVALID3(x) ((x) << S_ETHRXVALID3)
+#define F_ETHRXVALID3 V_ETHRXVALID3(1U)
+
+#define S_CPLRXVALID3 25
+#define V_CPLRXVALID3(x) ((x) << S_CPLRXVALID3)
+#define F_CPLRXVALID3 V_CPLRXVALID3(1U)
+
+#define S_FSTATIC3 24
+#define V_FSTATIC3(x) ((x) << S_FSTATIC3)
+#define F_FSTATIC3 V_FSTATIC3(1U)
+
+#define S_ERRORSRDY3 23
+#define V_ERRORSRDY3(x) ((x) << S_ERRORSRDY3)
+#define F_ERRORSRDY3 V_ERRORSRDY3(1U)
+
+#define S_PLDTXSRDY3 22
+#define V_PLDTXSRDY3(x) ((x) << S_PLDTXSRDY3)
+#define F_PLDTXSRDY3 V_PLDTXSRDY3(1U)
+
+#define S_DBVLD3 21
+#define V_DBVLD3(x) ((x) << S_DBVLD3)
+#define F_DBVLD3 V_DBVLD3(1U)
+
+#define S_PLDTXVALID3 20
+#define V_PLDTXVALID3(x) ((x) << S_PLDTXVALID3)
+#define F_PLDTXVALID3 V_PLDTXVALID3(1U)
+
+#define S_ETXVALID3 19
+#define V_ETXVALID3(x) ((x) << S_ETXVALID3)
+#define F_ETXVALID3 V_ETXVALID3(1U)
+
+#define S_ETXFULL3 18
+#define V_ETXFULL3(x) ((x) << S_ETXFULL3)
+#define F_ETXFULL3 V_ETXFULL3(1U)
+
+#define S_ERXVALID3 17
+#define V_ERXVALID3(x) ((x) << S_ERXVALID3)
+#define F_ERXVALID3 V_ERXVALID3(1U)
+
+#define S_ERXFULL3 16
+#define V_ERXFULL3(x) ((x) << S_ERXFULL3)
+#define F_ERXFULL3 V_ERXFULL3(1U)
+
+#define S_PLDRXCSUMVALID2 15
+#define V_PLDRXCSUMVALID2(x) ((x) << S_PLDRXCSUMVALID2)
+#define F_PLDRXCSUMVALID2 V_PLDRXCSUMVALID2(1U)
+
+#define S_PLDRXZEROPSRDY2 14
+#define V_PLDRXZEROPSRDY2(x) ((x) << S_PLDRXZEROPSRDY2)
+#define F_PLDRXZEROPSRDY2 V_PLDRXZEROPSRDY2(1U)
+
+#define S_PLDRXVALID2 13
+#define V_PLDRXVALID2(x) ((x) << S_PLDRXVALID2)
+#define F_PLDRXVALID2 V_PLDRXVALID2(1U)
+
+#define S_TCPRXVALID2 12
+#define V_TCPRXVALID2(x) ((x) << S_TCPRXVALID2)
+#define F_TCPRXVALID2 V_TCPRXVALID2(1U)
+
+#define S_IPRXVALID2 11
+#define V_IPRXVALID2(x) ((x) << S_IPRXVALID2)
+#define F_IPRXVALID2 V_IPRXVALID2(1U)
+
+#define S_ETHRXVALID2 10
+#define V_ETHRXVALID2(x) ((x) << S_ETHRXVALID2)
+#define F_ETHRXVALID2 V_ETHRXVALID2(1U)
+
+#define S_CPLRXVALID2 9
+#define V_CPLRXVALID2(x) ((x) << S_CPLRXVALID2)
+#define F_CPLRXVALID2 V_CPLRXVALID2(1U)
+
+#define S_FSTATIC2 8
+#define V_FSTATIC2(x) ((x) << S_FSTATIC2)
+#define F_FSTATIC2 V_FSTATIC2(1U)
+
+#define S_ERRORSRDY2 7
+#define V_ERRORSRDY2(x) ((x) << S_ERRORSRDY2)
+#define F_ERRORSRDY2 V_ERRORSRDY2(1U)
+
+#define S_PLDTXSRDY2 6
+#define V_PLDTXSRDY2(x) ((x) << S_PLDTXSRDY2)
+#define F_PLDTXSRDY2 V_PLDTXSRDY2(1U)
+
+#define S_DBVLD2 5
+#define V_DBVLD2(x) ((x) << S_DBVLD2)
+#define F_DBVLD2 V_DBVLD2(1U)
+
+#define S_PLDTXVALID2 4
+#define V_PLDTXVALID2(x) ((x) << S_PLDTXVALID2)
+#define F_PLDTXVALID2 V_PLDTXVALID2(1U)
+
+#define S_ETXVALID2 3
+#define V_ETXVALID2(x) ((x) << S_ETXVALID2)
+#define F_ETXVALID2 V_ETXVALID2(1U)
+
+#define S_ETXFULL2 2
+#define V_ETXFULL2(x) ((x) << S_ETXFULL2)
+#define F_ETXFULL2 V_ETXFULL2(1U)
+
+#define S_ERXVALID2 1
+#define V_ERXVALID2(x) ((x) << S_ERXVALID2)
+#define F_ERXVALID2 V_ERXVALID2(1U)
+
+#define S_ERXFULL2 0
+#define V_ERXFULL2(x) ((x) << S_ERXFULL2)
+#define F_ERXFULL2 V_ERXFULL2(1U)
+
+#define A_TP_DBG_ESIDE_DISP0 0x136
+
+#define S_RESRDY 31
+#define V_RESRDY(x) ((x) << S_RESRDY)
+#define F_RESRDY V_RESRDY(1U)
+
+#define S_STATE 28
+#define M_STATE 0x7U
+#define V_STATE(x) ((x) << S_STATE)
+#define G_STATE(x) (((x) >> S_STATE) & M_STATE)
+
+#define S_FIFOCPL5RXVALID 27
+#define V_FIFOCPL5RXVALID(x) ((x) << S_FIFOCPL5RXVALID)
+#define F_FIFOCPL5RXVALID V_FIFOCPL5RXVALID(1U)
+
+#define S_FIFOETHRXVALID 26
+#define V_FIFOETHRXVALID(x) ((x) << S_FIFOETHRXVALID)
+#define F_FIFOETHRXVALID V_FIFOETHRXVALID(1U)
+
+#define S_FIFOETHRXSOCP 25
+#define V_FIFOETHRXSOCP(x) ((x) << S_FIFOETHRXSOCP)
+#define F_FIFOETHRXSOCP V_FIFOETHRXSOCP(1U)
+
+#define S_FIFOPLDRXZEROP 24
+#define V_FIFOPLDRXZEROP(x) ((x) << S_FIFOPLDRXZEROP)
+#define F_FIFOPLDRXZEROP V_FIFOPLDRXZEROP(1U)
+
+#define S_PLDRXVALID 23
+#define V_PLDRXVALID(x) ((x) << S_PLDRXVALID)
+#define F_PLDRXVALID V_PLDRXVALID(1U)
+
+#define S_FIFOPLDRXZEROP_SRDY 22
+#define V_FIFOPLDRXZEROP_SRDY(x) ((x) << S_FIFOPLDRXZEROP_SRDY)
+#define F_FIFOPLDRXZEROP_SRDY V_FIFOPLDRXZEROP_SRDY(1U)
+
+#define S_FIFOIPRXVALID 21
+#define V_FIFOIPRXVALID(x) ((x) << S_FIFOIPRXVALID)
+#define F_FIFOIPRXVALID V_FIFOIPRXVALID(1U)
+
+#define S_FIFOTCPRXVALID 20
+#define V_FIFOTCPRXVALID(x) ((x) << S_FIFOTCPRXVALID)
+#define F_FIFOTCPRXVALID V_FIFOTCPRXVALID(1U)
+
+#define S_PLDRXCSUMVALID 19
+#define V_PLDRXCSUMVALID(x) ((x) << S_PLDRXCSUMVALID)
+#define F_PLDRXCSUMVALID V_PLDRXCSUMVALID(1U)
+
+#define S_FIFOIPCSUMSRDY 18
+#define V_FIFOIPCSUMSRDY(x) ((x) << S_FIFOIPCSUMSRDY)
+#define F_FIFOIPCSUMSRDY V_FIFOIPCSUMSRDY(1U)
+
+#define S_FIFOIPPSEUDOCSUMSRDY 17
+#define V_FIFOIPPSEUDOCSUMSRDY(x) ((x) << S_FIFOIPPSEUDOCSUMSRDY)
+#define F_FIFOIPPSEUDOCSUMSRDY V_FIFOIPPSEUDOCSUMSRDY(1U)
+
+#define S_FIFOTCPCSUMSRDY 16
+#define V_FIFOTCPCSUMSRDY(x) ((x) << S_FIFOTCPCSUMSRDY)
+#define F_FIFOTCPCSUMSRDY V_FIFOTCPCSUMSRDY(1U)
+
+#define S_ESTATIC4 12
+#define M_ESTATIC4 0xfU
+#define V_ESTATIC4(x) ((x) << S_ESTATIC4)
+#define G_ESTATIC4(x) (((x) >> S_ESTATIC4) & M_ESTATIC4)
+
+#define S_FIFOCPLSOCPCNT 10
+#define M_FIFOCPLSOCPCNT 0x3U
+#define V_FIFOCPLSOCPCNT(x) ((x) << S_FIFOCPLSOCPCNT)
+#define G_FIFOCPLSOCPCNT(x) (((x) >> S_FIFOCPLSOCPCNT) & M_FIFOCPLSOCPCNT)
+
+#define S_FIFOETHSOCPCNT 8
+#define M_FIFOETHSOCPCNT 0x3U
+#define V_FIFOETHSOCPCNT(x) ((x) << S_FIFOETHSOCPCNT)
+#define G_FIFOETHSOCPCNT(x) (((x) >> S_FIFOETHSOCPCNT) & M_FIFOETHSOCPCNT)
+
+#define S_FIFOIPSOCPCNT 6
+#define M_FIFOIPSOCPCNT 0x3U
+#define V_FIFOIPSOCPCNT(x) ((x) << S_FIFOIPSOCPCNT)
+#define G_FIFOIPSOCPCNT(x) (((x) >> S_FIFOIPSOCPCNT) & M_FIFOIPSOCPCNT)
+
+#define S_FIFOTCPSOCPCNT 4
+#define M_FIFOTCPSOCPCNT 0x3U
+#define V_FIFOTCPSOCPCNT(x) ((x) << S_FIFOTCPSOCPCNT)
+#define G_FIFOTCPSOCPCNT(x) (((x) >> S_FIFOTCPSOCPCNT) & M_FIFOTCPSOCPCNT)
+
+#define S_PLD_RXZEROP_CNT 2
+#define M_PLD_RXZEROP_CNT 0x3U
+#define V_PLD_RXZEROP_CNT(x) ((x) << S_PLD_RXZEROP_CNT)
+#define G_PLD_RXZEROP_CNT(x) (((x) >> S_PLD_RXZEROP_CNT) & M_PLD_RXZEROP_CNT)
+
+#define S_ESTATIC6 1
+#define V_ESTATIC6(x) ((x) << S_ESTATIC6)
+#define F_ESTATIC6 V_ESTATIC6(1U)
+
+#define S_TXFULL 0
+#define V_TXFULL(x) ((x) << S_TXFULL)
+#define F_TXFULL V_TXFULL(1U)
+
+#define A_TP_DBG_ESIDE_DISP1 0x137
+#define A_TP_MAC_MATCH_MAP0 0x138
+
+#define S_MAPVALUEWR 16
+#define M_MAPVALUEWR 0xffU
+#define V_MAPVALUEWR(x) ((x) << S_MAPVALUEWR)
+#define G_MAPVALUEWR(x) (((x) >> S_MAPVALUEWR) & M_MAPVALUEWR)
+
+#define S_MAPINDEX 2
+#define M_MAPINDEX 0x1ffU
+#define V_MAPINDEX(x) ((x) << S_MAPINDEX)
+#define G_MAPINDEX(x) (((x) >> S_MAPINDEX) & M_MAPINDEX)
+
+#define S_MAPREAD 1
+#define V_MAPREAD(x) ((x) << S_MAPREAD)
+#define F_MAPREAD V_MAPREAD(1U)
+
+#define S_MAPWRITE 0
+#define V_MAPWRITE(x) ((x) << S_MAPWRITE)
+#define F_MAPWRITE V_MAPWRITE(1U)
+
+#define A_TP_MAC_MATCH_MAP1 0x139
+
+#define S_MAPVALUERD 0
+#define M_MAPVALUERD 0x1ffU
+#define V_MAPVALUERD(x) ((x) << S_MAPVALUERD)
+#define G_MAPVALUERD(x) (((x) >> S_MAPVALUERD) & M_MAPVALUERD)
+
+#define A_TP_DBG_ESIDE_DISP2 0x13a
+#define A_TP_DBG_ESIDE_DISP3 0x13b
+#define A_TP_DBG_ESIDE_HDR0 0x13c
+
+#define S_TCPSOPCNT 28
+#define M_TCPSOPCNT 0xfU
+#define V_TCPSOPCNT(x) ((x) << S_TCPSOPCNT)
+#define G_TCPSOPCNT(x) (((x) >> S_TCPSOPCNT) & M_TCPSOPCNT)
+
+#define S_TCPEOPCNT 24
+#define M_TCPEOPCNT 0xfU
+#define V_TCPEOPCNT(x) ((x) << S_TCPEOPCNT)
+#define G_TCPEOPCNT(x) (((x) >> S_TCPEOPCNT) & M_TCPEOPCNT)
+
+#define S_IPSOPCNT 20
+#define M_IPSOPCNT 0xfU
+#define V_IPSOPCNT(x) ((x) << S_IPSOPCNT)
+#define G_IPSOPCNT(x) (((x) >> S_IPSOPCNT) & M_IPSOPCNT)
+
+#define S_IPEOPCNT 16
+#define M_IPEOPCNT 0xfU
+#define V_IPEOPCNT(x) ((x) << S_IPEOPCNT)
+#define G_IPEOPCNT(x) (((x) >> S_IPEOPCNT) & M_IPEOPCNT)
+
+#define S_ETHSOPCNT 12
+#define M_ETHSOPCNT 0xfU
+#define V_ETHSOPCNT(x) ((x) << S_ETHSOPCNT)
+#define G_ETHSOPCNT(x) (((x) >> S_ETHSOPCNT) & M_ETHSOPCNT)
+
+#define S_ETHEOPCNT 8
+#define M_ETHEOPCNT 0xfU
+#define V_ETHEOPCNT(x) ((x) << S_ETHEOPCNT)
+#define G_ETHEOPCNT(x) (((x) >> S_ETHEOPCNT) & M_ETHEOPCNT)
+
+#define S_CPLSOPCNT 4
+#define M_CPLSOPCNT 0xfU
+#define V_CPLSOPCNT(x) ((x) << S_CPLSOPCNT)
+#define G_CPLSOPCNT(x) (((x) >> S_CPLSOPCNT) & M_CPLSOPCNT)
+
+#define S_CPLEOPCNT 0
+#define M_CPLEOPCNT 0xfU
+#define V_CPLEOPCNT(x) ((x) << S_CPLEOPCNT)
+#define G_CPLEOPCNT(x) (((x) >> S_CPLEOPCNT) & M_CPLEOPCNT)
+
+#define A_TP_DBG_ESIDE_HDR1 0x13d
+#define A_TP_DBG_ESIDE_HDR2 0x13e
+#define A_TP_DBG_ESIDE_HDR3 0x13f
+#define A_TP_VLAN_PRI_MAP 0x140
+
+#define S_FRAGMENTATION 9
+#define V_FRAGMENTATION(x) ((x) << S_FRAGMENTATION)
+#define F_FRAGMENTATION V_FRAGMENTATION(1U)
+
+#define S_MPSHITTYPE 8
+#define V_MPSHITTYPE(x) ((x) << S_MPSHITTYPE)
+#define F_MPSHITTYPE V_MPSHITTYPE(1U)
+
+#define S_MACMATCH 7
+#define V_MACMATCH(x) ((x) << S_MACMATCH)
+#define F_MACMATCH V_MACMATCH(1U)
+
+#define S_ETHERTYPE 6
+#define V_ETHERTYPE(x) ((x) << S_ETHERTYPE)
+#define F_ETHERTYPE V_ETHERTYPE(1U)
+
+#define S_PROTOCOL 5
+#define V_PROTOCOL(x) ((x) << S_PROTOCOL)
+#define F_PROTOCOL V_PROTOCOL(1U)
+
+#define S_TOS 4
+#define V_TOS(x) ((x) << S_TOS)
+#define F_TOS V_TOS(1U)
+
+#define S_VLAN 3
+#define V_VLAN(x) ((x) << S_VLAN)
+#define F_VLAN V_VLAN(1U)
+
+#define S_VNIC_ID 2
+#define V_VNIC_ID(x) ((x) << S_VNIC_ID)
+#define F_VNIC_ID V_VNIC_ID(1U)
+
+#define S_PORT 1
+#define V_PORT(x) ((x) << S_PORT)
+#define F_PORT V_PORT(1U)
+
+#define S_FCOE 0
+#define V_FCOE(x) ((x) << S_FCOE)
+#define F_FCOE V_FCOE(1U)
+
+#define A_TP_INGRESS_CONFIG 0x141
+
+#define S_OPAQUE_TYPE 16
+#define M_OPAQUE_TYPE 0xffffU
+#define V_OPAQUE_TYPE(x) ((x) << S_OPAQUE_TYPE)
+#define G_OPAQUE_TYPE(x) (((x) >> S_OPAQUE_TYPE) & M_OPAQUE_TYPE)
+
+#define S_OPAQUE_RM 15
+#define V_OPAQUE_RM(x) ((x) << S_OPAQUE_RM)
+#define F_OPAQUE_RM V_OPAQUE_RM(1U)
+
+#define S_OPAQUE_HDR_SIZE 14
+#define V_OPAQUE_HDR_SIZE(x) ((x) << S_OPAQUE_HDR_SIZE)
+#define F_OPAQUE_HDR_SIZE V_OPAQUE_HDR_SIZE(1U)
+
+#define S_OPAQUE_RM_MAC_IN_MAC 13
+#define V_OPAQUE_RM_MAC_IN_MAC(x) ((x) << S_OPAQUE_RM_MAC_IN_MAC)
+#define F_OPAQUE_RM_MAC_IN_MAC V_OPAQUE_RM_MAC_IN_MAC(1U)
+
+#define S_FCOE_TARGET 12
+#define V_FCOE_TARGET(x) ((x) << S_FCOE_TARGET)
+#define F_FCOE_TARGET V_FCOE_TARGET(1U)
+
+#define S_VNIC 11
+#define V_VNIC(x) ((x) << S_VNIC)
+#define F_VNIC V_VNIC(1U)
+
+#define S_CSUM_HAS_PSEUDO_HDR 10
+#define V_CSUM_HAS_PSEUDO_HDR(x) ((x) << S_CSUM_HAS_PSEUDO_HDR)
+#define F_CSUM_HAS_PSEUDO_HDR V_CSUM_HAS_PSEUDO_HDR(1U)
+
+#define S_RM_OVLAN 9
+#define V_RM_OVLAN(x) ((x) << S_RM_OVLAN)
+#define F_RM_OVLAN V_RM_OVLAN(1U)
+
+#define S_LOOKUPEVERYPKT 8
+#define V_LOOKUPEVERYPKT(x) ((x) << S_LOOKUPEVERYPKT)
+#define F_LOOKUPEVERYPKT V_LOOKUPEVERYPKT(1U)
+
+#define S_IPV6_EXT_HDR_SKIP 0
+#define M_IPV6_EXT_HDR_SKIP 0xffU
+#define V_IPV6_EXT_HDR_SKIP(x) ((x) << S_IPV6_EXT_HDR_SKIP)
+#define G_IPV6_EXT_HDR_SKIP(x) (((x) >> S_IPV6_EXT_HDR_SKIP) & M_IPV6_EXT_HDR_SKIP)
+
+#define A_TP_TX_DROP_CFG_CH2 0x142
+#define A_TP_TX_DROP_CFG_CH3 0x143
+#define A_TP_EGRESS_CONFIG 0x145
+
+#define S_REWRITEFORCETOSIZE 0
+#define V_REWRITEFORCETOSIZE(x) ((x) << S_REWRITEFORCETOSIZE)
+#define F_REWRITEFORCETOSIZE V_REWRITEFORCETOSIZE(1U)
+
+#define A_TP_EHDR_CONFIG_LO 0x146
+
+#define S_CPLLIMIT 24
+#define M_CPLLIMIT 0xffU
+#define V_CPLLIMIT(x) ((x) << S_CPLLIMIT)
+#define G_CPLLIMIT(x) (((x) >> S_CPLLIMIT) & M_CPLLIMIT)
+
+#define S_ETHLIMIT 16
+#define M_ETHLIMIT 0xffU
+#define V_ETHLIMIT(x) ((x) << S_ETHLIMIT)
+#define G_ETHLIMIT(x) (((x) >> S_ETHLIMIT) & M_ETHLIMIT)
+
+#define S_IPLIMIT 8
+#define M_IPLIMIT 0xffU
+#define V_IPLIMIT(x) ((x) << S_IPLIMIT)
+#define G_IPLIMIT(x) (((x) >> S_IPLIMIT) & M_IPLIMIT)
+
+#define S_TCPLIMIT 0
+#define M_TCPLIMIT 0xffU
+#define V_TCPLIMIT(x) ((x) << S_TCPLIMIT)
+#define G_TCPLIMIT(x) (((x) >> S_TCPLIMIT) & M_TCPLIMIT)
+
+#define A_TP_EHDR_CONFIG_HI 0x147
+#define A_TP_DBG_ESIDE_INT 0x148
+
+#define S_ERXSOP2X 28
+#define M_ERXSOP2X 0xfU
+#define V_ERXSOP2X(x) ((x) << S_ERXSOP2X)
+#define G_ERXSOP2X(x) (((x) >> S_ERXSOP2X) & M_ERXSOP2X)
+
+#define S_ERXEOP2X 24
+#define M_ERXEOP2X 0xfU
+#define V_ERXEOP2X(x) ((x) << S_ERXEOP2X)
+#define G_ERXEOP2X(x) (((x) >> S_ERXEOP2X) & M_ERXEOP2X)
+
+#define S_ERXVALID2X 20
+#define M_ERXVALID2X 0xfU
+#define V_ERXVALID2X(x) ((x) << S_ERXVALID2X)
+#define G_ERXVALID2X(x) (((x) >> S_ERXVALID2X) & M_ERXVALID2X)
+
+#define S_ERXAFULL2X 16
+#define M_ERXAFULL2X 0xfU
+#define V_ERXAFULL2X(x) ((x) << S_ERXAFULL2X)
+#define G_ERXAFULL2X(x) (((x) >> S_ERXAFULL2X) & M_ERXAFULL2X)
+
+#define S_PLD2XTXVALID 12
+#define M_PLD2XTXVALID 0xfU
+#define V_PLD2XTXVALID(x) ((x) << S_PLD2XTXVALID)
+#define G_PLD2XTXVALID(x) (((x) >> S_PLD2XTXVALID) & M_PLD2XTXVALID)
+
+#define S_PLD2XTXAFULL 8
+#define M_PLD2XTXAFULL 0xfU
+#define V_PLD2XTXAFULL(x) ((x) << S_PLD2XTXAFULL)
+#define G_PLD2XTXAFULL(x) (((x) >> S_PLD2XTXAFULL) & M_PLD2XTXAFULL)
+
+#define S_ERRORSRDY 7
+#define V_ERRORSRDY(x) ((x) << S_ERRORSRDY)
+#define F_ERRORSRDY V_ERRORSRDY(1U)
+
+#define S_ERRORDRDY 6
+#define V_ERRORDRDY(x) ((x) << S_ERRORDRDY)
+#define F_ERRORDRDY V_ERRORDRDY(1U)
+
+#define S_TCPOPSRDY 5
+#define V_TCPOPSRDY(x) ((x) << S_TCPOPSRDY)
+#define F_TCPOPSRDY V_TCPOPSRDY(1U)
+
+#define S_TCPOPDRDY 4
+#define V_TCPOPDRDY(x) ((x) << S_TCPOPDRDY)
+#define F_TCPOPDRDY V_TCPOPDRDY(1U)
+
+#define S_PLDTXSRDY 3
+#define V_PLDTXSRDY(x) ((x) << S_PLDTXSRDY)
+#define F_PLDTXSRDY V_PLDTXSRDY(1U)
+
+#define S_PLDTXDRDY 2
+#define V_PLDTXDRDY(x) ((x) << S_PLDTXDRDY)
+#define F_PLDTXDRDY V_PLDTXDRDY(1U)
+
+#define S_TCPOPTTXVALID 1
+#define V_TCPOPTTXVALID(x) ((x) << S_TCPOPTTXVALID)
+#define F_TCPOPTTXVALID V_TCPOPTTXVALID(1U)
+
+#define S_TCPOPTTXFULL 0
+#define V_TCPOPTTXFULL(x) ((x) << S_TCPOPTTXFULL)
+#define F_TCPOPTTXFULL V_TCPOPTTXFULL(1U)
+
+#define A_TP_DBG_ESIDE_DEMUX 0x149
+
+#define S_EALLDONE 28
+#define M_EALLDONE 0xfU
+#define V_EALLDONE(x) ((x) << S_EALLDONE)
+#define G_EALLDONE(x) (((x) >> S_EALLDONE) & M_EALLDONE)
+
+#define S_EFIFOPLDDONE 24
+#define M_EFIFOPLDDONE 0xfU
+#define V_EFIFOPLDDONE(x) ((x) << S_EFIFOPLDDONE)
+#define G_EFIFOPLDDONE(x) (((x) >> S_EFIFOPLDDONE) & M_EFIFOPLDDONE)
+
+#define S_EDBDONE 20
+#define M_EDBDONE 0xfU
+#define V_EDBDONE(x) ((x) << S_EDBDONE)
+#define G_EDBDONE(x) (((x) >> S_EDBDONE) & M_EDBDONE)
+
+#define S_EISSFIFODONE 16
+#define M_EISSFIFODONE 0xfU
+#define V_EISSFIFODONE(x) ((x) << S_EISSFIFODONE)
+#define G_EISSFIFODONE(x) (((x) >> S_EISSFIFODONE) & M_EISSFIFODONE)
+
+#define S_EACKERRFIFODONE 12
+#define M_EACKERRFIFODONE 0xfU
+#define V_EACKERRFIFODONE(x) ((x) << S_EACKERRFIFODONE)
+#define G_EACKERRFIFODONE(x) (((x) >> S_EACKERRFIFODONE) & M_EACKERRFIFODONE)
+
+#define S_EFIFOERRORDONE 8
+#define M_EFIFOERRORDONE 0xfU
+#define V_EFIFOERRORDONE(x) ((x) << S_EFIFOERRORDONE)
+#define G_EFIFOERRORDONE(x) (((x) >> S_EFIFOERRORDONE) & M_EFIFOERRORDONE)
+
+#define S_ERXPKTATTRFIFOFDONE 4
+#define M_ERXPKTATTRFIFOFDONE 0xfU
+#define V_ERXPKTATTRFIFOFDONE(x) ((x) << S_ERXPKTATTRFIFOFDONE)
+#define G_ERXPKTATTRFIFOFDONE(x) (((x) >> S_ERXPKTATTRFIFOFDONE) & M_ERXPKTATTRFIFOFDONE)
+
+#define S_ETCPOPDONE 0
+#define M_ETCPOPDONE 0xfU
+#define V_ETCPOPDONE(x) ((x) << S_ETCPOPDONE)
+#define G_ETCPOPDONE(x) (((x) >> S_ETCPOPDONE) & M_ETCPOPDONE)
+
+#define A_TP_DBG_ESIDE_IN0 0x14a
+
+#define S_RXVALID 31
+#define V_RXVALID(x) ((x) << S_RXVALID)
+#define F_RXVALID V_RXVALID(1U)
+
+#define S_RXFULL 30
+#define V_RXFULL(x) ((x) << S_RXFULL)
+#define F_RXFULL V_RXFULL(1U)
+
+#define S_RXSOCP 29
+#define V_RXSOCP(x) ((x) << S_RXSOCP)
+#define F_RXSOCP V_RXSOCP(1U)
+
+#define S_RXEOP 28
+#define V_RXEOP(x) ((x) << S_RXEOP)
+#define F_RXEOP V_RXEOP(1U)
+
+#define S_RXVALID_I 27
+#define V_RXVALID_I(x) ((x) << S_RXVALID_I)
+#define F_RXVALID_I V_RXVALID_I(1U)
+
+#define S_RXFULL_I 26
+#define V_RXFULL_I(x) ((x) << S_RXFULL_I)
+#define F_RXFULL_I V_RXFULL_I(1U)
+
+#define S_RXSOCP_I 25
+#define V_RXSOCP_I(x) ((x) << S_RXSOCP_I)
+#define F_RXSOCP_I V_RXSOCP_I(1U)
+
+#define S_RXEOP_I 24
+#define V_RXEOP_I(x) ((x) << S_RXEOP_I)
+#define F_RXEOP_I V_RXEOP_I(1U)
+
+#define S_RXVALID_I2 23
+#define V_RXVALID_I2(x) ((x) << S_RXVALID_I2)
+#define F_RXVALID_I2 V_RXVALID_I2(1U)
+
+#define S_RXFULL_I2 22
+#define V_RXFULL_I2(x) ((x) << S_RXFULL_I2)
+#define F_RXFULL_I2 V_RXFULL_I2(1U)
+
+#define S_RXSOCP_I2 21
+#define V_RXSOCP_I2(x) ((x) << S_RXSOCP_I2)
+#define F_RXSOCP_I2 V_RXSOCP_I2(1U)
+
+#define S_RXEOP_I2 20
+#define V_RXEOP_I2(x) ((x) << S_RXEOP_I2)
+#define F_RXEOP_I2 V_RXEOP_I2(1U)
+
+#define S_CT_MPA_TXVALID_FIFO 19
+#define V_CT_MPA_TXVALID_FIFO(x) ((x) << S_CT_MPA_TXVALID_FIFO)
+#define F_CT_MPA_TXVALID_FIFO V_CT_MPA_TXVALID_FIFO(1U)
+
+#define S_CT_MPA_TXFULL_FIFO 18
+#define V_CT_MPA_TXFULL_FIFO(x) ((x) << S_CT_MPA_TXFULL_FIFO)
+#define F_CT_MPA_TXFULL_FIFO V_CT_MPA_TXFULL_FIFO(1U)
+
+#define S_CT_MPA_TXVALID 17
+#define V_CT_MPA_TXVALID(x) ((x) << S_CT_MPA_TXVALID)
+#define F_CT_MPA_TXVALID V_CT_MPA_TXVALID(1U)
+
+#define S_CT_MPA_TXFULL 16
+#define V_CT_MPA_TXFULL(x) ((x) << S_CT_MPA_TXFULL)
+#define F_CT_MPA_TXFULL V_CT_MPA_TXFULL(1U)
+
+#define S_RXVALID_BUF 15
+#define V_RXVALID_BUF(x) ((x) << S_RXVALID_BUF)
+#define F_RXVALID_BUF V_RXVALID_BUF(1U)
+
+#define S_RXFULL_BUF 14
+#define V_RXFULL_BUF(x) ((x) << S_RXFULL_BUF)
+#define F_RXFULL_BUF V_RXFULL_BUF(1U)
+
+#define S_PLD_TXVALID 13
+#define V_PLD_TXVALID(x) ((x) << S_PLD_TXVALID)
+#define F_PLD_TXVALID V_PLD_TXVALID(1U)
+
+#define S_PLD_TXFULL 12
+#define V_PLD_TXFULL(x) ((x) << S_PLD_TXFULL)
+#define F_PLD_TXFULL V_PLD_TXFULL(1U)
+
+#define S_ISS_FIFO_SRDY 11
+#define V_ISS_FIFO_SRDY(x) ((x) << S_ISS_FIFO_SRDY)
+#define F_ISS_FIFO_SRDY V_ISS_FIFO_SRDY(1U)
+
+#define S_ISS_FIFO_DRDY 10
+#define V_ISS_FIFO_DRDY(x) ((x) << S_ISS_FIFO_DRDY)
+#define F_ISS_FIFO_DRDY V_ISS_FIFO_DRDY(1U)
+
+#define S_CT_TCP_OP_ISS_SRDY 9
+#define V_CT_TCP_OP_ISS_SRDY(x) ((x) << S_CT_TCP_OP_ISS_SRDY)
+#define F_CT_TCP_OP_ISS_SRDY V_CT_TCP_OP_ISS_SRDY(1U)
+
+#define S_CT_TCP_OP_ISS_DRDY 8
+#define V_CT_TCP_OP_ISS_DRDY(x) ((x) << S_CT_TCP_OP_ISS_DRDY)
+#define F_CT_TCP_OP_ISS_DRDY V_CT_TCP_OP_ISS_DRDY(1U)
+
+#define S_P2CSUMERROR_SRDY 7
+#define V_P2CSUMERROR_SRDY(x) ((x) << S_P2CSUMERROR_SRDY)
+#define F_P2CSUMERROR_SRDY V_P2CSUMERROR_SRDY(1U)
+
+#define S_P2CSUMERROR_DRDY 6
+#define V_P2CSUMERROR_DRDY(x) ((x) << S_P2CSUMERROR_DRDY)
+#define F_P2CSUMERROR_DRDY V_P2CSUMERROR_DRDY(1U)
+
+#define S_FIFO_ERROR_SRDY 5
+#define V_FIFO_ERROR_SRDY(x) ((x) << S_FIFO_ERROR_SRDY)
+#define F_FIFO_ERROR_SRDY V_FIFO_ERROR_SRDY(1U)
+
+#define S_FIFO_ERROR_DRDY 4
+#define V_FIFO_ERROR_DRDY(x) ((x) << S_FIFO_ERROR_DRDY)
+#define F_FIFO_ERROR_DRDY V_FIFO_ERROR_DRDY(1U)
+
+#define S_PLD_SRDY 3
+#define V_PLD_SRDY(x) ((x) << S_PLD_SRDY)
+#define F_PLD_SRDY V_PLD_SRDY(1U)
+
+#define S_PLD_DRDY 2
+#define V_PLD_DRDY(x) ((x) << S_PLD_DRDY)
+#define F_PLD_DRDY V_PLD_DRDY(1U)
+
+#define S_RX_PKT_ATTR_SRDY 1
+#define V_RX_PKT_ATTR_SRDY(x) ((x) << S_RX_PKT_ATTR_SRDY)
+#define F_RX_PKT_ATTR_SRDY V_RX_PKT_ATTR_SRDY(1U)
+
+#define S_RX_PKT_ATTR_DRDY 0
+#define V_RX_PKT_ATTR_DRDY(x) ((x) << S_RX_PKT_ATTR_DRDY)
+#define F_RX_PKT_ATTR_DRDY V_RX_PKT_ATTR_DRDY(1U)
+
+#define A_TP_DBG_ESIDE_IN1 0x14b
+#define A_TP_DBG_ESIDE_IN2 0x14c
+#define A_TP_DBG_ESIDE_IN3 0x14d
+#define A_TP_DBG_ESIDE_FRM 0x14e
+
+#define S_ERX2XERROR 28
+#define M_ERX2XERROR 0xfU
+#define V_ERX2XERROR(x) ((x) << S_ERX2XERROR)
+#define G_ERX2XERROR(x) (((x) >> S_ERX2XERROR) & M_ERX2XERROR)
+
+#define S_EPLDTX2XERROR 24
+#define M_EPLDTX2XERROR 0xfU
+#define V_EPLDTX2XERROR(x) ((x) << S_EPLDTX2XERROR)
+#define G_EPLDTX2XERROR(x) (((x) >> S_EPLDTX2XERROR) & M_EPLDTX2XERROR)
+
+#define S_ETXERROR 20
+#define M_ETXERROR 0xfU
+#define V_ETXERROR(x) ((x) << S_ETXERROR)
+#define G_ETXERROR(x) (((x) >> S_ETXERROR) & M_ETXERROR)
+
+#define S_EPLDRXERROR 16
+#define M_EPLDRXERROR 0xfU
+#define V_EPLDRXERROR(x) ((x) << S_EPLDRXERROR)
+#define G_EPLDRXERROR(x) (((x) >> S_EPLDRXERROR) & M_EPLDRXERROR)
+
+#define S_ERXSIZEERROR3 12
+#define M_ERXSIZEERROR3 0xfU
+#define V_ERXSIZEERROR3(x) ((x) << S_ERXSIZEERROR3)
+#define G_ERXSIZEERROR3(x) (((x) >> S_ERXSIZEERROR3) & M_ERXSIZEERROR3)
+
+#define S_ERXSIZEERROR2 8
+#define M_ERXSIZEERROR2 0xfU
+#define V_ERXSIZEERROR2(x) ((x) << S_ERXSIZEERROR2)
+#define G_ERXSIZEERROR2(x) (((x) >> S_ERXSIZEERROR2) & M_ERXSIZEERROR2)
+
+#define S_ERXSIZEERROR1 4
+#define M_ERXSIZEERROR1 0xfU
+#define V_ERXSIZEERROR1(x) ((x) << S_ERXSIZEERROR1)
+#define G_ERXSIZEERROR1(x) (((x) >> S_ERXSIZEERROR1) & M_ERXSIZEERROR1)
+
+#define S_ERXSIZEERROR0 0
+#define M_ERXSIZEERROR0 0xfU
+#define V_ERXSIZEERROR0(x) ((x) << S_ERXSIZEERROR0)
+#define G_ERXSIZEERROR0(x) (((x) >> S_ERXSIZEERROR0) & M_ERXSIZEERROR0)
+
+#define A_TP_DBG_ESIDE_DRP 0x14f
+
+#define S_RXDROP3 24
+#define M_RXDROP3 0xffU
+#define V_RXDROP3(x) ((x) << S_RXDROP3)
+#define G_RXDROP3(x) (((x) >> S_RXDROP3) & M_RXDROP3)
+
+#define S_RXDROP2 16
+#define M_RXDROP2 0xffU
+#define V_RXDROP2(x) ((x) << S_RXDROP2)
+#define G_RXDROP2(x) (((x) >> S_RXDROP2) & M_RXDROP2)
+
+#define S_RXDROP1 8
+#define M_RXDROP1 0xffU
+#define V_RXDROP1(x) ((x) << S_RXDROP1)
+#define G_RXDROP1(x) (((x) >> S_RXDROP1) & M_RXDROP1)
+
+#define S_RXDROP0 0
+#define M_RXDROP0 0xffU
+#define V_RXDROP0(x) ((x) << S_RXDROP0)
+#define G_RXDROP0(x) (((x) >> S_RXDROP0) & M_RXDROP0)
+
+#define A_TP_DBG_ESIDE_TX 0x150
+
+#define S_ETXVALID 4
+#define M_ETXVALID 0xfU
+#define V_ETXVALID(x) ((x) << S_ETXVALID)
+#define G_ETXVALID(x) (((x) >> S_ETXVALID) & M_ETXVALID)
+
+#define S_ETXFULL 0
+#define M_ETXFULL 0xfU
+#define V_ETXFULL(x) ((x) << S_ETXFULL)
+#define G_ETXFULL(x) (((x) >> S_ETXFULL) & M_ETXFULL)
+
+#define A_TP_ESIDE_SVID_MASK 0x151
+#define A_TP_ESIDE_DVID_MASK 0x152
+#define A_TP_ESIDE_ALIGN_MASK 0x153
+
+#define S_USE_LOOP_BIT 24
+#define V_USE_LOOP_BIT(x) ((x) << S_USE_LOOP_BIT)
+#define F_USE_LOOP_BIT V_USE_LOOP_BIT(1U)
+
+#define S_LOOP_OFFSET 16
+#define M_LOOP_OFFSET 0xffU
+#define V_LOOP_OFFSET(x) ((x) << S_LOOP_OFFSET)
+#define G_LOOP_OFFSET(x) (((x) >> S_LOOP_OFFSET) & M_LOOP_OFFSET)
+
+#define S_DVID_ID_OFFSET 8
+#define M_DVID_ID_OFFSET 0xffU
+#define V_DVID_ID_OFFSET(x) ((x) << S_DVID_ID_OFFSET)
+#define G_DVID_ID_OFFSET(x) (((x) >> S_DVID_ID_OFFSET) & M_DVID_ID_OFFSET)
+
+#define S_SVID_ID_OFFSET 0
+#define M_SVID_ID_OFFSET 0xffU
+#define V_SVID_ID_OFFSET(x) ((x) << S_SVID_ID_OFFSET)
+#define G_SVID_ID_OFFSET(x) (((x) >> S_SVID_ID_OFFSET) & M_SVID_ID_OFFSET)
+
+#define A_TP_DBG_CSIDE_RX0 0x230
+
+#define S_CRXSOPCNT 28
+#define M_CRXSOPCNT 0xfU
+#define V_CRXSOPCNT(x) ((x) << S_CRXSOPCNT)
+#define G_CRXSOPCNT(x) (((x) >> S_CRXSOPCNT) & M_CRXSOPCNT)
+
+#define S_CRXEOPCNT 24
+#define M_CRXEOPCNT 0xfU
+#define V_CRXEOPCNT(x) ((x) << S_CRXEOPCNT)
+#define G_CRXEOPCNT(x) (((x) >> S_CRXEOPCNT) & M_CRXEOPCNT)
+
+#define S_CRXPLDSOPCNT 20
+#define M_CRXPLDSOPCNT 0xfU
+#define V_CRXPLDSOPCNT(x) ((x) << S_CRXPLDSOPCNT)
+#define G_CRXPLDSOPCNT(x) (((x) >> S_CRXPLDSOPCNT) & M_CRXPLDSOPCNT)
+
+#define S_CRXPLDEOPCNT 16
+#define M_CRXPLDEOPCNT 0xfU
+#define V_CRXPLDEOPCNT(x) ((x) << S_CRXPLDEOPCNT)
+#define G_CRXPLDEOPCNT(x) (((x) >> S_CRXPLDEOPCNT) & M_CRXPLDEOPCNT)
+
+#define S_CRXARBSOPCNT 12
+#define M_CRXARBSOPCNT 0xfU
+#define V_CRXARBSOPCNT(x) ((x) << S_CRXARBSOPCNT)
+#define G_CRXARBSOPCNT(x) (((x) >> S_CRXARBSOPCNT) & M_CRXARBSOPCNT)
+
+#define S_CRXARBEOPCNT 8
+#define M_CRXARBEOPCNT 0xfU
+#define V_CRXARBEOPCNT(x) ((x) << S_CRXARBEOPCNT)
+#define G_CRXARBEOPCNT(x) (((x) >> S_CRXARBEOPCNT) & M_CRXARBEOPCNT)
+
+#define S_CRXCPLSOPCNT 4
+#define M_CRXCPLSOPCNT 0xfU
+#define V_CRXCPLSOPCNT(x) ((x) << S_CRXCPLSOPCNT)
+#define G_CRXCPLSOPCNT(x) (((x) >> S_CRXCPLSOPCNT) & M_CRXCPLSOPCNT)
+
+#define S_CRXCPLEOPCNT 0
+#define M_CRXCPLEOPCNT 0xfU
+#define V_CRXCPLEOPCNT(x) ((x) << S_CRXCPLEOPCNT)
+#define G_CRXCPLEOPCNT(x) (((x) >> S_CRXCPLEOPCNT) & M_CRXCPLEOPCNT)
+
+#define A_TP_DBG_CSIDE_RX1 0x231
+#define A_TP_DBG_CSIDE_RX2 0x232
+#define A_TP_DBG_CSIDE_RX3 0x233
+#define A_TP_DBG_CSIDE_TX0 0x234
+
+#define S_TXSOPCNT 28
+#define M_TXSOPCNT 0xfU
+#define V_TXSOPCNT(x) ((x) << S_TXSOPCNT)
+#define G_TXSOPCNT(x) (((x) >> S_TXSOPCNT) & M_TXSOPCNT)
+
+#define S_TXEOPCNT 24
+#define M_TXEOPCNT 0xfU
+#define V_TXEOPCNT(x) ((x) << S_TXEOPCNT)
+#define G_TXEOPCNT(x) (((x) >> S_TXEOPCNT) & M_TXEOPCNT)
+
+#define S_TXPLDSOPCNT 20
+#define M_TXPLDSOPCNT 0xfU
+#define V_TXPLDSOPCNT(x) ((x) << S_TXPLDSOPCNT)
+#define G_TXPLDSOPCNT(x) (((x) >> S_TXPLDSOPCNT) & M_TXPLDSOPCNT)
+
+#define S_TXPLDEOPCNT 16
+#define M_TXPLDEOPCNT 0xfU
+#define V_TXPLDEOPCNT(x) ((x) << S_TXPLDEOPCNT)
+#define G_TXPLDEOPCNT(x) (((x) >> S_TXPLDEOPCNT) & M_TXPLDEOPCNT)
+
+#define S_TXARBSOPCNT 12
+#define M_TXARBSOPCNT 0xfU
+#define V_TXARBSOPCNT(x) ((x) << S_TXARBSOPCNT)
+#define G_TXARBSOPCNT(x) (((x) >> S_TXARBSOPCNT) & M_TXARBSOPCNT)
+
+#define S_TXARBEOPCNT 8
+#define M_TXARBEOPCNT 0xfU
+#define V_TXARBEOPCNT(x) ((x) << S_TXARBEOPCNT)
+#define G_TXARBEOPCNT(x) (((x) >> S_TXARBEOPCNT) & M_TXARBEOPCNT)
+
+#define S_TXCPLSOPCNT 4
+#define M_TXCPLSOPCNT 0xfU
+#define V_TXCPLSOPCNT(x) ((x) << S_TXCPLSOPCNT)
+#define G_TXCPLSOPCNT(x) (((x) >> S_TXCPLSOPCNT) & M_TXCPLSOPCNT)
+
+#define S_TXCPLEOPCNT 0
+#define M_TXCPLEOPCNT 0xfU
+#define V_TXCPLEOPCNT(x) ((x) << S_TXCPLEOPCNT)
+#define G_TXCPLEOPCNT(x) (((x) >> S_TXCPLEOPCNT) & M_TXCPLEOPCNT)
+
+#define A_TP_DBG_CSIDE_TX1 0x235
+#define A_TP_DBG_CSIDE_TX2 0x236
+#define A_TP_DBG_CSIDE_TX3 0x237
+#define A_TP_DBG_CSIDE_FIFO0 0x238
+
+#define S_PLD_RXZEROP_SRDY1 31
+#define V_PLD_RXZEROP_SRDY1(x) ((x) << S_PLD_RXZEROP_SRDY1)
+#define F_PLD_RXZEROP_SRDY1 V_PLD_RXZEROP_SRDY1(1U)
+
+#define S_PLD_RXZEROP_DRDY1 30
+#define V_PLD_RXZEROP_DRDY1(x) ((x) << S_PLD_RXZEROP_DRDY1)
+#define F_PLD_RXZEROP_DRDY1 V_PLD_RXZEROP_DRDY1(1U)
+
+#define S_PLD_TXZEROP_SRDY1 29
+#define V_PLD_TXZEROP_SRDY1(x) ((x) << S_PLD_TXZEROP_SRDY1)
+#define F_PLD_TXZEROP_SRDY1 V_PLD_TXZEROP_SRDY1(1U)
+
+#define S_PLD_TXZEROP_DRDY1 28
+#define V_PLD_TXZEROP_DRDY1(x) ((x) << S_PLD_TXZEROP_DRDY1)
+#define F_PLD_TXZEROP_DRDY1 V_PLD_TXZEROP_DRDY1(1U)
+
+#define S_PLD_TX_SRDY1 27
+#define V_PLD_TX_SRDY1(x) ((x) << S_PLD_TX_SRDY1)
+#define F_PLD_TX_SRDY1 V_PLD_TX_SRDY1(1U)
+
+#define S_PLD_TX_DRDY1 26
+#define V_PLD_TX_DRDY1(x) ((x) << S_PLD_TX_DRDY1)
+#define F_PLD_TX_DRDY1 V_PLD_TX_DRDY1(1U)
+
+#define S_ERROR_SRDY1 25
+#define V_ERROR_SRDY1(x) ((x) << S_ERROR_SRDY1)
+#define F_ERROR_SRDY1 V_ERROR_SRDY1(1U)
+
+#define S_ERROR_DRDY1 24
+#define V_ERROR_DRDY1(x) ((x) << S_ERROR_DRDY1)
+#define F_ERROR_DRDY1 V_ERROR_DRDY1(1U)
+
+#define S_DB_VLD1 23
+#define V_DB_VLD1(x) ((x) << S_DB_VLD1)
+#define F_DB_VLD1 V_DB_VLD1(1U)
+
+#define S_DB_GT1 22
+#define V_DB_GT1(x) ((x) << S_DB_GT1)
+#define F_DB_GT1 V_DB_GT1(1U)
+
+#define S_TXVALID1 21
+#define V_TXVALID1(x) ((x) << S_TXVALID1)
+#define F_TXVALID1 V_TXVALID1(1U)
+
+#define S_TXFULL1 20
+#define V_TXFULL1(x) ((x) << S_TXFULL1)
+#define F_TXFULL1 V_TXFULL1(1U)
+
+#define S_PLD_TXVALID1 19
+#define V_PLD_TXVALID1(x) ((x) << S_PLD_TXVALID1)
+#define F_PLD_TXVALID1 V_PLD_TXVALID1(1U)
+
+#define S_PLD_TXFULL1 18
+#define V_PLD_TXFULL1(x) ((x) << S_PLD_TXFULL1)
+#define F_PLD_TXFULL1 V_PLD_TXFULL1(1U)
+
+#define S_CPL5_TXVALID1 17
+#define V_CPL5_TXVALID1(x) ((x) << S_CPL5_TXVALID1)
+#define F_CPL5_TXVALID1 V_CPL5_TXVALID1(1U)
+
+#define S_CPL5_TXFULL1 16
+#define V_CPL5_TXFULL1(x) ((x) << S_CPL5_TXFULL1)
+#define F_CPL5_TXFULL1 V_CPL5_TXFULL1(1U)
+
+#define S_PLD_RXZEROP_SRDY0 15
+#define V_PLD_RXZEROP_SRDY0(x) ((x) << S_PLD_RXZEROP_SRDY0)
+#define F_PLD_RXZEROP_SRDY0 V_PLD_RXZEROP_SRDY0(1U)
+
+#define S_PLD_RXZEROP_DRDY0 14
+#define V_PLD_RXZEROP_DRDY0(x) ((x) << S_PLD_RXZEROP_DRDY0)
+#define F_PLD_RXZEROP_DRDY0 V_PLD_RXZEROP_DRDY0(1U)
+
+#define S_PLD_TXZEROP_SRDY0 13
+#define V_PLD_TXZEROP_SRDY0(x) ((x) << S_PLD_TXZEROP_SRDY0)
+#define F_PLD_TXZEROP_SRDY0 V_PLD_TXZEROP_SRDY0(1U)
+
+#define S_PLD_TXZEROP_DRDY0 12
+#define V_PLD_TXZEROP_DRDY0(x) ((x) << S_PLD_TXZEROP_DRDY0)
+#define F_PLD_TXZEROP_DRDY0 V_PLD_TXZEROP_DRDY0(1U)
+
+#define S_PLD_TX_SRDY0 11
+#define V_PLD_TX_SRDY0(x) ((x) << S_PLD_TX_SRDY0)
+#define F_PLD_TX_SRDY0 V_PLD_TX_SRDY0(1U)
+
+#define S_PLD_TX_DRDY0 10
+#define V_PLD_TX_DRDY0(x) ((x) << S_PLD_TX_DRDY0)
+#define F_PLD_TX_DRDY0 V_PLD_TX_DRDY0(1U)
+
+#define S_ERROR_SRDY0 9
+#define V_ERROR_SRDY0(x) ((x) << S_ERROR_SRDY0)
+#define F_ERROR_SRDY0 V_ERROR_SRDY0(1U)
+
+#define S_ERROR_DRDY0 8
+#define V_ERROR_DRDY0(x) ((x) << S_ERROR_DRDY0)
+#define F_ERROR_DRDY0 V_ERROR_DRDY0(1U)
+
+#define S_DB_VLD0 7
+#define V_DB_VLD0(x) ((x) << S_DB_VLD0)
+#define F_DB_VLD0 V_DB_VLD0(1U)
+
+#define S_DB_GT0 6
+#define V_DB_GT0(x) ((x) << S_DB_GT0)
+#define F_DB_GT0 V_DB_GT0(1U)
+
+#define S_TXVALID0 5
+#define V_TXVALID0(x) ((x) << S_TXVALID0)
+#define F_TXVALID0 V_TXVALID0(1U)
+
+#define S_TXFULL0 4
+#define V_TXFULL0(x) ((x) << S_TXFULL0)
+#define F_TXFULL0 V_TXFULL0(1U)
+
+#define S_PLD_TXVALID0 3
+#define V_PLD_TXVALID0(x) ((x) << S_PLD_TXVALID0)
+#define F_PLD_TXVALID0 V_PLD_TXVALID0(1U)
+
+#define S_PLD_TXFULL0 2
+#define V_PLD_TXFULL0(x) ((x) << S_PLD_TXFULL0)
+#define F_PLD_TXFULL0 V_PLD_TXFULL0(1U)
+
+#define S_CPL5_TXVALID0 1
+#define V_CPL5_TXVALID0(x) ((x) << S_CPL5_TXVALID0)
+#define F_CPL5_TXVALID0 V_CPL5_TXVALID0(1U)
+
+#define S_CPL5_TXFULL0 0
+#define V_CPL5_TXFULL0(x) ((x) << S_CPL5_TXFULL0)
+#define F_CPL5_TXFULL0 V_CPL5_TXFULL0(1U)
+
+#define A_TP_DBG_CSIDE_FIFO1 0x239
+
+#define S_PLD_RXZEROP_SRDY3 31
+#define V_PLD_RXZEROP_SRDY3(x) ((x) << S_PLD_RXZEROP_SRDY3)
+#define F_PLD_RXZEROP_SRDY3 V_PLD_RXZEROP_SRDY3(1U)
+
+#define S_PLD_RXZEROP_DRDY3 30
+#define V_PLD_RXZEROP_DRDY3(x) ((x) << S_PLD_RXZEROP_DRDY3)
+#define F_PLD_RXZEROP_DRDY3 V_PLD_RXZEROP_DRDY3(1U)
+
+#define S_PLD_TXZEROP_SRDY3 29
+#define V_PLD_TXZEROP_SRDY3(x) ((x) << S_PLD_TXZEROP_SRDY3)
+#define F_PLD_TXZEROP_SRDY3 V_PLD_TXZEROP_SRDY3(1U)
+
+#define S_PLD_TXZEROP_DRDY3 28
+#define V_PLD_TXZEROP_DRDY3(x) ((x) << S_PLD_TXZEROP_DRDY3)
+#define F_PLD_TXZEROP_DRDY3 V_PLD_TXZEROP_DRDY3(1U)
+
+#define S_PLD_TX_SRDY3 27
+#define V_PLD_TX_SRDY3(x) ((x) << S_PLD_TX_SRDY3)
+#define F_PLD_TX_SRDY3 V_PLD_TX_SRDY3(1U)
+
+#define S_PLD_TX_DRDY3 26
+#define V_PLD_TX_DRDY3(x) ((x) << S_PLD_TX_DRDY3)
+#define F_PLD_TX_DRDY3 V_PLD_TX_DRDY3(1U)
+
+#define S_ERROR_SRDY3 25
+#define V_ERROR_SRDY3(x) ((x) << S_ERROR_SRDY3)
+#define F_ERROR_SRDY3 V_ERROR_SRDY3(1U)
+
+#define S_ERROR_DRDY3 24
+#define V_ERROR_DRDY3(x) ((x) << S_ERROR_DRDY3)
+#define F_ERROR_DRDY3 V_ERROR_DRDY3(1U)
+
+#define S_DB_VLD3 23
+#define V_DB_VLD3(x) ((x) << S_DB_VLD3)
+#define F_DB_VLD3 V_DB_VLD3(1U)
+
+#define S_DB_GT3 22
+#define V_DB_GT3(x) ((x) << S_DB_GT3)
+#define F_DB_GT3 V_DB_GT3(1U)
+
+#define S_TXVALID3 21
+#define V_TXVALID3(x) ((x) << S_TXVALID3)
+#define F_TXVALID3 V_TXVALID3(1U)
+
+#define S_TXFULL3 20
+#define V_TXFULL3(x) ((x) << S_TXFULL3)
+#define F_TXFULL3 V_TXFULL3(1U)
+
+#define S_PLD_TXVALID3 19
+#define V_PLD_TXVALID3(x) ((x) << S_PLD_TXVALID3)
+#define F_PLD_TXVALID3 V_PLD_TXVALID3(1U)
+
+#define S_PLD_TXFULL3 18
+#define V_PLD_TXFULL3(x) ((x) << S_PLD_TXFULL3)
+#define F_PLD_TXFULL3 V_PLD_TXFULL3(1U)
+
+#define S_CPL5_TXVALID3 17
+#define V_CPL5_TXVALID3(x) ((x) << S_CPL5_TXVALID3)
+#define F_CPL5_TXVALID3 V_CPL5_TXVALID3(1U)
+
+#define S_CPL5_TXFULL3 16
+#define V_CPL5_TXFULL3(x) ((x) << S_CPL5_TXFULL3)
+#define F_CPL5_TXFULL3 V_CPL5_TXFULL3(1U)
+
+#define S_PLD_RXZEROP_SRDY2 15
+#define V_PLD_RXZEROP_SRDY2(x) ((x) << S_PLD_RXZEROP_SRDY2)
+#define F_PLD_RXZEROP_SRDY2 V_PLD_RXZEROP_SRDY2(1U)
+
+#define S_PLD_RXZEROP_DRDY2 14
+#define V_PLD_RXZEROP_DRDY2(x) ((x) << S_PLD_RXZEROP_DRDY2)
+#define F_PLD_RXZEROP_DRDY2 V_PLD_RXZEROP_DRDY2(1U)
+
+#define S_PLD_TXZEROP_SRDY2 13
+#define V_PLD_TXZEROP_SRDY2(x) ((x) << S_PLD_TXZEROP_SRDY2)
+#define F_PLD_TXZEROP_SRDY2 V_PLD_TXZEROP_SRDY2(1U)
+
+#define S_PLD_TXZEROP_DRDY2 12
+#define V_PLD_TXZEROP_DRDY2(x) ((x) << S_PLD_TXZEROP_DRDY2)
+#define F_PLD_TXZEROP_DRDY2 V_PLD_TXZEROP_DRDY2(1U)
+
+#define S_PLD_TX_SRDY2 11
+#define V_PLD_TX_SRDY2(x) ((x) << S_PLD_TX_SRDY2)
+#define F_PLD_TX_SRDY2 V_PLD_TX_SRDY2(1U)
+
+#define S_PLD_TX_DRDY2 10
+#define V_PLD_TX_DRDY2(x) ((x) << S_PLD_TX_DRDY2)
+#define F_PLD_TX_DRDY2 V_PLD_TX_DRDY2(1U)
+
+#define S_ERROR_SRDY2 9
+#define V_ERROR_SRDY2(x) ((x) << S_ERROR_SRDY2)
+#define F_ERROR_SRDY2 V_ERROR_SRDY2(1U)
+
+#define S_ERROR_DRDY2 8
+#define V_ERROR_DRDY2(x) ((x) << S_ERROR_DRDY2)
+#define F_ERROR_DRDY2 V_ERROR_DRDY2(1U)
+
+#define S_DB_VLD2 7
+#define V_DB_VLD2(x) ((x) << S_DB_VLD2)
+#define F_DB_VLD2 V_DB_VLD2(1U)
+
+#define S_DB_GT2 6
+#define V_DB_GT2(x) ((x) << S_DB_GT2)
+#define F_DB_GT2 V_DB_GT2(1U)
+
+#define S_TXVALID2 5
+#define V_TXVALID2(x) ((x) << S_TXVALID2)
+#define F_TXVALID2 V_TXVALID2(1U)
+
+#define S_TXFULL2 4
+#define V_TXFULL2(x) ((x) << S_TXFULL2)
+#define F_TXFULL2 V_TXFULL2(1U)
+
+#define S_PLD_TXVALID2 3
+#define V_PLD_TXVALID2(x) ((x) << S_PLD_TXVALID2)
+#define F_PLD_TXVALID2 V_PLD_TXVALID2(1U)
+
+#define S_PLD_TXFULL2 2
+#define V_PLD_TXFULL2(x) ((x) << S_PLD_TXFULL2)
+#define F_PLD_TXFULL2 V_PLD_TXFULL2(1U)
+
+#define S_CPL5_TXVALID2 1
+#define V_CPL5_TXVALID2(x) ((x) << S_CPL5_TXVALID2)
+#define F_CPL5_TXVALID2 V_CPL5_TXVALID2(1U)
+
+#define S_CPL5_TXFULL2 0
+#define V_CPL5_TXFULL2(x) ((x) << S_CPL5_TXFULL2)
+#define F_CPL5_TXFULL2 V_CPL5_TXFULL2(1U)
+
+#define A_TP_DBG_CSIDE_DISP0 0x23a
+
+#define S_CPL5RXVALID 27
+#define V_CPL5RXVALID(x) ((x) << S_CPL5RXVALID)
+#define F_CPL5RXVALID V_CPL5RXVALID(1U)
+
+#define S_CSTATIC1 26
+#define V_CSTATIC1(x) ((x) << S_CSTATIC1)
+#define F_CSTATIC1 V_CSTATIC1(1U)
+
+#define S_CSTATIC2 25
+#define V_CSTATIC2(x) ((x) << S_CSTATIC2)
+#define F_CSTATIC2 V_CSTATIC2(1U)
+
+#define S_PLD_RXZEROP 24
+#define V_PLD_RXZEROP(x) ((x) << S_PLD_RXZEROP)
+#define F_PLD_RXZEROP V_PLD_RXZEROP(1U)
+
+#define S_DDP_IN_PROGRESS 23
+#define V_DDP_IN_PROGRESS(x) ((x) << S_DDP_IN_PROGRESS)
+#define F_DDP_IN_PROGRESS V_DDP_IN_PROGRESS(1U)
+
+#define S_PLD_RXZEROP_SRDY 22
+#define V_PLD_RXZEROP_SRDY(x) ((x) << S_PLD_RXZEROP_SRDY)
+#define F_PLD_RXZEROP_SRDY V_PLD_RXZEROP_SRDY(1U)
+
+#define S_CSTATIC3 21
+#define V_CSTATIC3(x) ((x) << S_CSTATIC3)
+#define F_CSTATIC3 V_CSTATIC3(1U)
+
+#define S_DDP_DRDY 20
+#define V_DDP_DRDY(x) ((x) << S_DDP_DRDY)
+#define F_DDP_DRDY V_DDP_DRDY(1U)
+
+#define S_DDP_PRE_STATE 17
+#define M_DDP_PRE_STATE 0x7U
+#define V_DDP_PRE_STATE(x) ((x) << S_DDP_PRE_STATE)
+#define G_DDP_PRE_STATE(x) (((x) >> S_DDP_PRE_STATE) & M_DDP_PRE_STATE)
+
+#define S_DDP_SRDY 16
+#define V_DDP_SRDY(x) ((x) << S_DDP_SRDY)
+#define F_DDP_SRDY V_DDP_SRDY(1U)
+
+#define S_DDP_MSG_CODE 12
+#define M_DDP_MSG_CODE 0xfU
+#define V_DDP_MSG_CODE(x) ((x) << S_DDP_MSG_CODE)
+#define G_DDP_MSG_CODE(x) (((x) >> S_DDP_MSG_CODE) & M_DDP_MSG_CODE)
+
+#define S_CPL5_SOCP_CNT 10
+#define M_CPL5_SOCP_CNT 0x3U
+#define V_CPL5_SOCP_CNT(x) ((x) << S_CPL5_SOCP_CNT)
+#define G_CPL5_SOCP_CNT(x) (((x) >> S_CPL5_SOCP_CNT) & M_CPL5_SOCP_CNT)
+
+#define S_CSTATIC4 4
+#define M_CSTATIC4 0x3fU
+#define V_CSTATIC4(x) ((x) << S_CSTATIC4)
+#define G_CSTATIC4(x) (((x) >> S_CSTATIC4) & M_CSTATIC4)
+
+#define S_CMD_SEL 1
+#define V_CMD_SEL(x) ((x) << S_CMD_SEL)
+#define F_CMD_SEL V_CMD_SEL(1U)
+
+#define A_TP_DBG_CSIDE_DISP1 0x23b
+#define A_TP_DBG_CSIDE_DDP0 0x23c
+
+#define S_DDPMSGLATEST7 28
+#define M_DDPMSGLATEST7 0xfU
+#define V_DDPMSGLATEST7(x) ((x) << S_DDPMSGLATEST7)
+#define G_DDPMSGLATEST7(x) (((x) >> S_DDPMSGLATEST7) & M_DDPMSGLATEST7)
+
+#define S_DDPMSGLATEST6 24
+#define M_DDPMSGLATEST6 0xfU
+#define V_DDPMSGLATEST6(x) ((x) << S_DDPMSGLATEST6)
+#define G_DDPMSGLATEST6(x) (((x) >> S_DDPMSGLATEST6) & M_DDPMSGLATEST6)
+
+#define S_DDPMSGLATEST5 20
+#define M_DDPMSGLATEST5 0xfU
+#define V_DDPMSGLATEST5(x) ((x) << S_DDPMSGLATEST5)
+#define G_DDPMSGLATEST5(x) (((x) >> S_DDPMSGLATEST5) & M_DDPMSGLATEST5)
+
+#define S_DDPMSGLATEST4 16
+#define M_DDPMSGLATEST4 0xfU
+#define V_DDPMSGLATEST4(x) ((x) << S_DDPMSGLATEST4)
+#define G_DDPMSGLATEST4(x) (((x) >> S_DDPMSGLATEST4) & M_DDPMSGLATEST4)
+
+#define S_DDPMSGLATEST3 12
+#define M_DDPMSGLATEST3 0xfU
+#define V_DDPMSGLATEST3(x) ((x) << S_DDPMSGLATEST3)
+#define G_DDPMSGLATEST3(x) (((x) >> S_DDPMSGLATEST3) & M_DDPMSGLATEST3)
+
+#define S_DDPMSGLATEST2 8
+#define M_DDPMSGLATEST2 0xfU
+#define V_DDPMSGLATEST2(x) ((x) << S_DDPMSGLATEST2)
+#define G_DDPMSGLATEST2(x) (((x) >> S_DDPMSGLATEST2) & M_DDPMSGLATEST2)
+
+#define S_DDPMSGLATEST1 4
+#define M_DDPMSGLATEST1 0xfU
+#define V_DDPMSGLATEST1(x) ((x) << S_DDPMSGLATEST1)
+#define G_DDPMSGLATEST1(x) (((x) >> S_DDPMSGLATEST1) & M_DDPMSGLATEST1)
+
+#define S_DDPMSGLATEST0 0
+#define M_DDPMSGLATEST0 0xfU
+#define V_DDPMSGLATEST0(x) ((x) << S_DDPMSGLATEST0)
+#define G_DDPMSGLATEST0(x) (((x) >> S_DDPMSGLATEST0) & M_DDPMSGLATEST0)
+
+#define A_TP_DBG_CSIDE_DDP1 0x23d
+#define A_TP_DBG_CSIDE_FRM 0x23e
+
+#define S_CRX2XERROR 28
+#define M_CRX2XERROR 0xfU
+#define V_CRX2XERROR(x) ((x) << S_CRX2XERROR)
+#define G_CRX2XERROR(x) (((x) >> S_CRX2XERROR) & M_CRX2XERROR)
+
+#define S_CPLDTX2XERROR 24
+#define M_CPLDTX2XERROR 0xfU
+#define V_CPLDTX2XERROR(x) ((x) << S_CPLDTX2XERROR)
+#define G_CPLDTX2XERROR(x) (((x) >> S_CPLDTX2XERROR) & M_CPLDTX2XERROR)
+
+#define S_CTXERROR 22
+#define M_CTXERROR 0x3U
+#define V_CTXERROR(x) ((x) << S_CTXERROR)
+#define G_CTXERROR(x) (((x) >> S_CTXERROR) & M_CTXERROR)
+
+#define S_CPLDRXERROR 20
+#define M_CPLDRXERROR 0x3U
+#define V_CPLDRXERROR(x) ((x) << S_CPLDRXERROR)
+#define G_CPLDRXERROR(x) (((x) >> S_CPLDRXERROR) & M_CPLDRXERROR)
+
+#define S_CPLRXERROR 18
+#define M_CPLRXERROR 0x3U
+#define V_CPLRXERROR(x) ((x) << S_CPLRXERROR)
+#define G_CPLRXERROR(x) (((x) >> S_CPLRXERROR) & M_CPLRXERROR)
+
+#define S_CPLTXERROR 16
+#define M_CPLTXERROR 0x3U
+#define V_CPLTXERROR(x) ((x) << S_CPLTXERROR)
+#define G_CPLTXERROR(x) (((x) >> S_CPLTXERROR) & M_CPLTXERROR)
+
+#define S_CPRSERROR 0
+#define M_CPRSERROR 0xfU
+#define V_CPRSERROR(x) ((x) << S_CPRSERROR)
+#define G_CPRSERROR(x) (((x) >> S_CPRSERROR) & M_CPRSERROR)
+
+#define A_TP_DBG_CSIDE_INT 0x23f
+
+#define S_CRXVALID2X 28
+#define M_CRXVALID2X 0xfU
+#define V_CRXVALID2X(x) ((x) << S_CRXVALID2X)
+#define G_CRXVALID2X(x) (((x) >> S_CRXVALID2X) & M_CRXVALID2X)
+
+#define S_CRXAFULL2X 24
+#define M_CRXAFULL2X 0xfU
+#define V_CRXAFULL2X(x) ((x) << S_CRXAFULL2X)
+#define G_CRXAFULL2X(x) (((x) >> S_CRXAFULL2X) & M_CRXAFULL2X)
+
+#define S_CTXVALID2X 22
+#define M_CTXVALID2X 0x3U
+#define V_CTXVALID2X(x) ((x) << S_CTXVALID2X)
+#define G_CTXVALID2X(x) (((x) >> S_CTXVALID2X) & M_CTXVALID2X)
+
+#define S_CTXAFULL2X 20
+#define M_CTXAFULL2X 0x3U
+#define V_CTXAFULL2X(x) ((x) << S_CTXAFULL2X)
+#define G_CTXAFULL2X(x) (((x) >> S_CTXAFULL2X) & M_CTXAFULL2X)
+
+#define S_PLD2X_RXVALID 18
+#define M_PLD2X_RXVALID 0x3U
+#define V_PLD2X_RXVALID(x) ((x) << S_PLD2X_RXVALID)
+#define G_PLD2X_RXVALID(x) (((x) >> S_PLD2X_RXVALID) & M_PLD2X_RXVALID)
+
+#define S_PLD2X_RXAFULL 16
+#define M_PLD2X_RXAFULL 0x3U
+#define V_PLD2X_RXAFULL(x) ((x) << S_PLD2X_RXAFULL)
+#define G_PLD2X_RXAFULL(x) (((x) >> S_PLD2X_RXAFULL) & M_PLD2X_RXAFULL)
+
+#define S_CSIDE_DDP_VALID 14
+#define M_CSIDE_DDP_VALID 0x3U
+#define V_CSIDE_DDP_VALID(x) ((x) << S_CSIDE_DDP_VALID)
+#define G_CSIDE_DDP_VALID(x) (((x) >> S_CSIDE_DDP_VALID) & M_CSIDE_DDP_VALID)
+
+#define S_DDP_AFULL 12
+#define M_DDP_AFULL 0x3U
+#define V_DDP_AFULL(x) ((x) << S_DDP_AFULL)
+#define G_DDP_AFULL(x) (((x) >> S_DDP_AFULL) & M_DDP_AFULL)
+
+#define S_TRC_RXVALID 11
+#define V_TRC_RXVALID(x) ((x) << S_TRC_RXVALID)
+#define F_TRC_RXVALID V_TRC_RXVALID(1U)
+
+#define S_TRC_RXFULL 10
+#define V_TRC_RXFULL(x) ((x) << S_TRC_RXFULL)
+#define F_TRC_RXFULL V_TRC_RXFULL(1U)
+
+#define S_CPL5_TXVALID 9
+#define V_CPL5_TXVALID(x) ((x) << S_CPL5_TXVALID)
+#define F_CPL5_TXVALID V_CPL5_TXVALID(1U)
+
+#define S_CPL5_TXFULL 8
+#define V_CPL5_TXFULL(x) ((x) << S_CPL5_TXFULL)
+#define F_CPL5_TXFULL V_CPL5_TXFULL(1U)
+
+#define S_PLD2X_TXVALID 4
+#define M_PLD2X_TXVALID 0xfU
+#define V_PLD2X_TXVALID(x) ((x) << S_PLD2X_TXVALID)
+#define G_PLD2X_TXVALID(x) (((x) >> S_PLD2X_TXVALID) & M_PLD2X_TXVALID)
+
+#define S_PLD2X_TXAFULL 0
+#define M_PLD2X_TXAFULL 0xfU
+#define V_PLD2X_TXAFULL(x) ((x) << S_PLD2X_TXAFULL)
+#define G_PLD2X_TXAFULL(x) (((x) >> S_PLD2X_TXAFULL) & M_PLD2X_TXAFULL)
+
+#define A_TP_CHDR_CONFIG 0x240
+
+#define S_CH1HIGH 24
+#define M_CH1HIGH 0xffU
+#define V_CH1HIGH(x) ((x) << S_CH1HIGH)
+#define G_CH1HIGH(x) (((x) >> S_CH1HIGH) & M_CH1HIGH)
+
+#define S_CH1LOW 16
+#define M_CH1LOW 0xffU
+#define V_CH1LOW(x) ((x) << S_CH1LOW)
+#define G_CH1LOW(x) (((x) >> S_CH1LOW) & M_CH1LOW)
+
+#define S_CH0HIGH 8
+#define M_CH0HIGH 0xffU
+#define V_CH0HIGH(x) ((x) << S_CH0HIGH)
+#define G_CH0HIGH(x) (((x) >> S_CH0HIGH) & M_CH0HIGH)
+
+#define S_CH0LOW 0
+#define M_CH0LOW 0xffU
+#define V_CH0LOW(x) ((x) << S_CH0LOW)
+#define G_CH0LOW(x) (((x) >> S_CH0LOW) & M_CH0LOW)
+
+#define A_TP_UTRN_CONFIG 0x241
+
+#define S_CH2FIFOLIMIT 16
+#define M_CH2FIFOLIMIT 0xffU
+#define V_CH2FIFOLIMIT(x) ((x) << S_CH2FIFOLIMIT)
+#define G_CH2FIFOLIMIT(x) (((x) >> S_CH2FIFOLIMIT) & M_CH2FIFOLIMIT)
+
+#define S_CH1FIFOLIMIT 8
+#define M_CH1FIFOLIMIT 0xffU
+#define V_CH1FIFOLIMIT(x) ((x) << S_CH1FIFOLIMIT)
+#define G_CH1FIFOLIMIT(x) (((x) >> S_CH1FIFOLIMIT) & M_CH1FIFOLIMIT)
+
+#define S_CH0FIFOLIMIT 0
+#define M_CH0FIFOLIMIT 0xffU
+#define V_CH0FIFOLIMIT(x) ((x) << S_CH0FIFOLIMIT)
+#define G_CH0FIFOLIMIT(x) (((x) >> S_CH0FIFOLIMIT) & M_CH0FIFOLIMIT)
+
+#define A_TP_CDSP_CONFIG 0x242
+
+#define S_WRITEZEROEN 4
+#define V_WRITEZEROEN(x) ((x) << S_WRITEZEROEN)
+#define F_WRITEZEROEN V_WRITEZEROEN(1U)
+
+#define S_WRITEZEROOP 0
+#define M_WRITEZEROOP 0xfU
+#define V_WRITEZEROOP(x) ((x) << S_WRITEZEROOP)
+#define G_WRITEZEROOP(x) (((x) >> S_WRITEZEROOP) & M_WRITEZEROOP)
+
+#define A_TP_TRC_CONFIG 0x244
+
+#define S_TRCRR 1
+#define V_TRCRR(x) ((x) << S_TRCRR)
+#define F_TRCRR V_TRCRR(1U)
+
+#define S_TRCCH 0
+#define V_TRCCH(x) ((x) << S_TRCCH)
+#define F_TRCCH V_TRCCH(1U)
+
+#define A_TP_TAG_CONFIG 0x245
+
+#define S_ETAGTYPE 16
+#define M_ETAGTYPE 0xffffU
+#define V_ETAGTYPE(x) ((x) << S_ETAGTYPE)
+#define G_ETAGTYPE(x) (((x) >> S_ETAGTYPE) & M_ETAGTYPE)
+
+#define A_TP_DBG_CSIDE_PRS 0x246
+
+#define S_CPRSSTATE3 24
+#define M_CPRSSTATE3 0x7U
+#define V_CPRSSTATE3(x) ((x) << S_CPRSSTATE3)
+#define G_CPRSSTATE3(x) (((x) >> S_CPRSSTATE3) & M_CPRSSTATE3)
+
+#define S_CPRSSTATE2 16
+#define M_CPRSSTATE2 0x7U
+#define V_CPRSSTATE2(x) ((x) << S_CPRSSTATE2)
+#define G_CPRSSTATE2(x) (((x) >> S_CPRSSTATE2) & M_CPRSSTATE2)
+
+#define S_CPRSSTATE1 8
+#define M_CPRSSTATE1 0x7U
+#define V_CPRSSTATE1(x) ((x) << S_CPRSSTATE1)
+#define G_CPRSSTATE1(x) (((x) >> S_CPRSSTATE1) & M_CPRSSTATE1)
+
+#define S_CPRSSTATE0 0
+#define M_CPRSSTATE0 0x7U
+#define V_CPRSSTATE0(x) ((x) << S_CPRSSTATE0)
+#define G_CPRSSTATE0(x) (((x) >> S_CPRSSTATE0) & M_CPRSSTATE0)
+
+#define A_TP_DBG_CSIDE_DEMUX 0x247
+
+#define S_CALLDONE 28
+#define M_CALLDONE 0xfU
+#define V_CALLDONE(x) ((x) << S_CALLDONE)
+#define G_CALLDONE(x) (((x) >> S_CALLDONE) & M_CALLDONE)
+
+#define S_CTCPL5DONE 24
+#define M_CTCPL5DONE 0xfU
+#define V_CTCPL5DONE(x) ((x) << S_CTCPL5DONE)
+#define G_CTCPL5DONE(x) (((x) >> S_CTCPL5DONE) & M_CTCPL5DONE)
+
+#define S_CTXZEROPDONE 20
+#define M_CTXZEROPDONE 0xfU
+#define V_CTXZEROPDONE(x) ((x) << S_CTXZEROPDONE)
+#define G_CTXZEROPDONE(x) (((x) >> S_CTXZEROPDONE) & M_CTXZEROPDONE)
+
+#define S_CPLDDONE 16
+#define M_CPLDDONE 0xfU
+#define V_CPLDDONE(x) ((x) << S_CPLDDONE)
+#define G_CPLDDONE(x) (((x) >> S_CPLDDONE) & M_CPLDDONE)
+
+#define S_CTTCPOPDONE 12
+#define M_CTTCPOPDONE 0xfU
+#define V_CTTCPOPDONE(x) ((x) << S_CTTCPOPDONE)
+#define G_CTTCPOPDONE(x) (((x) >> S_CTTCPOPDONE) & M_CTTCPOPDONE)
+
+#define S_CDBDONE 8
+#define M_CDBDONE 0xfU
+#define V_CDBDONE(x) ((x) << S_CDBDONE)
+#define G_CDBDONE(x) (((x) >> S_CDBDONE) & M_CDBDONE)
+
+#define S_CISSFIFODONE 4
+#define M_CISSFIFODONE 0xfU
+#define V_CISSFIFODONE(x) ((x) << S_CISSFIFODONE)
+#define G_CISSFIFODONE(x) (((x) >> S_CISSFIFODONE) & M_CISSFIFODONE)
+
+#define S_CTXPKTCSUMDONE 0
+#define M_CTXPKTCSUMDONE 0xfU
+#define V_CTXPKTCSUMDONE(x) ((x) << S_CTXPKTCSUMDONE)
+#define G_CTXPKTCSUMDONE(x) (((x) >> S_CTXPKTCSUMDONE) & M_CTXPKTCSUMDONE)
+
+#define A_TP_FIFO_CONFIG 0x8c0
+
+#define S_CH1_OUTPUT 27
+#define M_CH1_OUTPUT 0x1fU
+#define V_CH1_OUTPUT(x) ((x) << S_CH1_OUTPUT)
+#define G_CH1_OUTPUT(x) (((x) >> S_CH1_OUTPUT) & M_CH1_OUTPUT)
+
+#define S_CH2_OUTPUT 22
+#define M_CH2_OUTPUT 0x1fU
+#define V_CH2_OUTPUT(x) ((x) << S_CH2_OUTPUT)
+#define G_CH2_OUTPUT(x) (((x) >> S_CH2_OUTPUT) & M_CH2_OUTPUT)
+
+#define S_STROBE1 16
+#define V_STROBE1(x) ((x) << S_STROBE1)
+#define F_STROBE1 V_STROBE1(1U)
+
+#define S_CH1_INPUT 11
+#define M_CH1_INPUT 0x1fU
+#define V_CH1_INPUT(x) ((x) << S_CH1_INPUT)
+#define G_CH1_INPUT(x) (((x) >> S_CH1_INPUT) & M_CH1_INPUT)
+
+#define S_CH2_INPUT 6
+#define M_CH2_INPUT 0x1fU
+#define V_CH2_INPUT(x) ((x) << S_CH2_INPUT)
+#define G_CH2_INPUT(x) (((x) >> S_CH2_INPUT) & M_CH2_INPUT)
+
+#define S_CH3_INPUT 1
+#define M_CH3_INPUT 0x1fU
+#define V_CH3_INPUT(x) ((x) << S_CH3_INPUT)
+#define G_CH3_INPUT(x) (((x) >> S_CH3_INPUT) & M_CH3_INPUT)
+
+#define S_STROBE0 0
+#define V_STROBE0(x) ((x) << S_STROBE0)
+#define F_STROBE0 V_STROBE0(1U)
+
+#define A_TP_MIB_MAC_IN_ERR_0 0x0
+#define A_TP_MIB_MAC_IN_ERR_1 0x1
+#define A_TP_MIB_MAC_IN_ERR_2 0x2
+#define A_TP_MIB_MAC_IN_ERR_3 0x3
+#define A_TP_MIB_HDR_IN_ERR_0 0x4
+#define A_TP_MIB_HDR_IN_ERR_1 0x5
+#define A_TP_MIB_HDR_IN_ERR_2 0x6
+#define A_TP_MIB_HDR_IN_ERR_3 0x7
+#define A_TP_MIB_TCP_IN_ERR_0 0x8
+#define A_TP_MIB_TCP_IN_ERR_1 0x9
+#define A_TP_MIB_TCP_IN_ERR_2 0xa
+#define A_TP_MIB_TCP_IN_ERR_3 0xb
+#define A_TP_MIB_TCP_OUT_RST 0xc
+#define A_TP_MIB_TCP_IN_SEG_HI 0x10
+#define A_TP_MIB_TCP_IN_SEG_LO 0x11
+#define A_TP_MIB_TCP_OUT_SEG_HI 0x12
+#define A_TP_MIB_TCP_OUT_SEG_LO 0x13
+#define A_TP_MIB_TCP_RXT_SEG_HI 0x14
+#define A_TP_MIB_TCP_RXT_SEG_LO 0x15
+#define A_TP_MIB_TNL_CNG_DROP_0 0x18
+#define A_TP_MIB_TNL_CNG_DROP_1 0x19
+#define A_TP_MIB_TNL_CNG_DROP_2 0x1a
+#define A_TP_MIB_TNL_CNG_DROP_3 0x1b
+#define A_TP_MIB_OFD_CHN_DROP_0 0x1c
+#define A_TP_MIB_OFD_CHN_DROP_1 0x1d
+#define A_TP_MIB_OFD_CHN_DROP_2 0x1e
+#define A_TP_MIB_OFD_CHN_DROP_3 0x1f
+#define A_TP_MIB_TNL_OUT_PKT_0 0x20
+#define A_TP_MIB_TNL_OUT_PKT_1 0x21
+#define A_TP_MIB_TNL_OUT_PKT_2 0x22
+#define A_TP_MIB_TNL_OUT_PKT_3 0x23
+#define A_TP_MIB_TNL_IN_PKT_0 0x24
+#define A_TP_MIB_TNL_IN_PKT_1 0x25
+#define A_TP_MIB_TNL_IN_PKT_2 0x26
+#define A_TP_MIB_TNL_IN_PKT_3 0x27
+#define A_TP_MIB_TCP_V6IN_ERR_0 0x28
+#define A_TP_MIB_TCP_V6IN_ERR_1 0x29
+#define A_TP_MIB_TCP_V6IN_ERR_2 0x2a
+#define A_TP_MIB_TCP_V6IN_ERR_3 0x2b
+#define A_TP_MIB_TCP_V6OUT_RST 0x2c
+#define A_TP_MIB_TCP_V6IN_SEG_HI 0x30
+#define A_TP_MIB_TCP_V6IN_SEG_LO 0x31
+#define A_TP_MIB_TCP_V6OUT_SEG_HI 0x32
+#define A_TP_MIB_TCP_V6OUT_SEG_LO 0x33
+#define A_TP_MIB_TCP_V6RXT_SEG_HI 0x34
+#define A_TP_MIB_TCP_V6RXT_SEG_LO 0x35
+#define A_TP_MIB_OFD_ARP_DROP 0x36
+#define A_TP_MIB_OFD_DFR_DROP 0x37
+#define A_TP_MIB_CPL_IN_REQ_0 0x38
+#define A_TP_MIB_CPL_IN_REQ_1 0x39
+#define A_TP_MIB_CPL_IN_REQ_2 0x3a
+#define A_TP_MIB_CPL_IN_REQ_3 0x3b
+#define A_TP_MIB_CPL_OUT_RSP_0 0x3c
+#define A_TP_MIB_CPL_OUT_RSP_1 0x3d
+#define A_TP_MIB_CPL_OUT_RSP_2 0x3e
+#define A_TP_MIB_CPL_OUT_RSP_3 0x3f
+#define A_TP_MIB_TNL_LPBK_0 0x40
+#define A_TP_MIB_TNL_LPBK_1 0x41
+#define A_TP_MIB_TNL_LPBK_2 0x42
+#define A_TP_MIB_TNL_LPBK_3 0x43
+#define A_TP_MIB_TNL_DROP_0 0x44
+#define A_TP_MIB_TNL_DROP_1 0x45
+#define A_TP_MIB_TNL_DROP_2 0x46
+#define A_TP_MIB_TNL_DROP_3 0x47
+#define A_TP_MIB_FCOE_DDP_0 0x48
+#define A_TP_MIB_FCOE_DDP_1 0x49
+#define A_TP_MIB_FCOE_DDP_2 0x4a
+#define A_TP_MIB_FCOE_DDP_3 0x4b
+#define A_TP_MIB_FCOE_DROP_0 0x4c
+#define A_TP_MIB_FCOE_DROP_1 0x4d
+#define A_TP_MIB_FCOE_DROP_2 0x4e
+#define A_TP_MIB_FCOE_DROP_3 0x4f
+#define A_TP_MIB_FCOE_BYTE_0_HI 0x50
+#define A_TP_MIB_FCOE_BYTE_0_LO 0x51
+#define A_TP_MIB_FCOE_BYTE_1_HI 0x52
+#define A_TP_MIB_FCOE_BYTE_1_LO 0x53
+#define A_TP_MIB_FCOE_BYTE_2_HI 0x54
+#define A_TP_MIB_FCOE_BYTE_2_LO 0x55
+#define A_TP_MIB_FCOE_BYTE_3_HI 0x56
+#define A_TP_MIB_FCOE_BYTE_3_LO 0x57
+#define A_TP_MIB_OFD_VLN_DROP_0 0x58
+#define A_TP_MIB_OFD_VLN_DROP_1 0x59
+#define A_TP_MIB_OFD_VLN_DROP_2 0x5a
+#define A_TP_MIB_OFD_VLN_DROP_3 0x5b
+#define A_TP_MIB_USM_PKTS 0x5c
+#define A_TP_MIB_USM_DROP 0x5d
+#define A_TP_MIB_USM_BYTES_HI 0x5e
+#define A_TP_MIB_USM_BYTES_LO 0x5f
+#define A_TP_MIB_TID_DEL 0x60
+#define A_TP_MIB_TID_INV 0x61
+#define A_TP_MIB_TID_ACT 0x62
+#define A_TP_MIB_TID_PAS 0x63
+#define A_TP_MIB_RQE_DFR_MOD 0x64
+#define A_TP_MIB_RQE_DFR_PKT 0x65
+#define A_TP_MIB_CPL_OUT_ERR_0 0x68
+#define A_TP_MIB_CPL_OUT_ERR_1 0x69
+#define A_TP_MIB_CPL_OUT_ERR_2 0x6a
+#define A_TP_MIB_CPL_OUT_ERR_3 0x6b
+
+/* registers for module ULP_TX */
+#define ULP_TX_BASE_ADDR 0x8dc0
+
+#define A_ULP_TX_CONFIG 0x8dc0
+
+#define S_STAG_MIX_ENABLE 2
+#define V_STAG_MIX_ENABLE(x) ((x) << S_STAG_MIX_ENABLE)
+#define F_STAG_MIX_ENABLE V_STAG_MIX_ENABLE(1U)
+
+#define S_STAGF_FIX_DISABLE 1
+#define V_STAGF_FIX_DISABLE(x) ((x) << S_STAGF_FIX_DISABLE)
+#define F_STAGF_FIX_DISABLE V_STAGF_FIX_DISABLE(1U)
+
+#define S_EXTRA_TAG_INSERTION_ENABLE 0
+#define V_EXTRA_TAG_INSERTION_ENABLE(x) ((x) << S_EXTRA_TAG_INSERTION_ENABLE)
+#define F_EXTRA_TAG_INSERTION_ENABLE V_EXTRA_TAG_INSERTION_ENABLE(1U)
+
+#define A_ULP_TX_PERR_INJECT 0x8dc4
+#define A_ULP_TX_INT_ENABLE 0x8dc8
+
+#define S_PBL_BOUND_ERR_CH3 31
+#define V_PBL_BOUND_ERR_CH3(x) ((x) << S_PBL_BOUND_ERR_CH3)
+#define F_PBL_BOUND_ERR_CH3 V_PBL_BOUND_ERR_CH3(1U)
+
+#define S_PBL_BOUND_ERR_CH2 30
+#define V_PBL_BOUND_ERR_CH2(x) ((x) << S_PBL_BOUND_ERR_CH2)
+#define F_PBL_BOUND_ERR_CH2 V_PBL_BOUND_ERR_CH2(1U)
+
+#define S_PBL_BOUND_ERR_CH1 29
+#define V_PBL_BOUND_ERR_CH1(x) ((x) << S_PBL_BOUND_ERR_CH1)
+#define F_PBL_BOUND_ERR_CH1 V_PBL_BOUND_ERR_CH1(1U)
+
+#define S_PBL_BOUND_ERR_CH0 28
+#define V_PBL_BOUND_ERR_CH0(x) ((x) << S_PBL_BOUND_ERR_CH0)
+#define F_PBL_BOUND_ERR_CH0 V_PBL_BOUND_ERR_CH0(1U)
+
+#define S_SGE2ULP_FIFO_PERR_SET3 27
+#define V_SGE2ULP_FIFO_PERR_SET3(x) ((x) << S_SGE2ULP_FIFO_PERR_SET3)
+#define F_SGE2ULP_FIFO_PERR_SET3 V_SGE2ULP_FIFO_PERR_SET3(1U)
+
+#define S_SGE2ULP_FIFO_PERR_SET2 26
+#define V_SGE2ULP_FIFO_PERR_SET2(x) ((x) << S_SGE2ULP_FIFO_PERR_SET2)
+#define F_SGE2ULP_FIFO_PERR_SET2 V_SGE2ULP_FIFO_PERR_SET2(1U)
+
+#define S_SGE2ULP_FIFO_PERR_SET1 25
+#define V_SGE2ULP_FIFO_PERR_SET1(x) ((x) << S_SGE2ULP_FIFO_PERR_SET1)
+#define F_SGE2ULP_FIFO_PERR_SET1 V_SGE2ULP_FIFO_PERR_SET1(1U)
+
+#define S_SGE2ULP_FIFO_PERR_SET0 24
+#define V_SGE2ULP_FIFO_PERR_SET0(x) ((x) << S_SGE2ULP_FIFO_PERR_SET0)
+#define F_SGE2ULP_FIFO_PERR_SET0 V_SGE2ULP_FIFO_PERR_SET0(1U)
+
+#define S_CIM2ULP_FIFO_PERR_SET3 23
+#define V_CIM2ULP_FIFO_PERR_SET3(x) ((x) << S_CIM2ULP_FIFO_PERR_SET3)
+#define F_CIM2ULP_FIFO_PERR_SET3 V_CIM2ULP_FIFO_PERR_SET3(1U)
+
+#define S_CIM2ULP_FIFO_PERR_SET2 22
+#define V_CIM2ULP_FIFO_PERR_SET2(x) ((x) << S_CIM2ULP_FIFO_PERR_SET2)
+#define F_CIM2ULP_FIFO_PERR_SET2 V_CIM2ULP_FIFO_PERR_SET2(1U)
+
+#define S_CIM2ULP_FIFO_PERR_SET1 21
+#define V_CIM2ULP_FIFO_PERR_SET1(x) ((x) << S_CIM2ULP_FIFO_PERR_SET1)
+#define F_CIM2ULP_FIFO_PERR_SET1 V_CIM2ULP_FIFO_PERR_SET1(1U)
+
+#define S_CIM2ULP_FIFO_PERR_SET0 20
+#define V_CIM2ULP_FIFO_PERR_SET0(x) ((x) << S_CIM2ULP_FIFO_PERR_SET0)
+#define F_CIM2ULP_FIFO_PERR_SET0 V_CIM2ULP_FIFO_PERR_SET0(1U)
+
+#define S_CQE_FIFO_PERR_SET3 19
+#define V_CQE_FIFO_PERR_SET3(x) ((x) << S_CQE_FIFO_PERR_SET3)
+#define F_CQE_FIFO_PERR_SET3 V_CQE_FIFO_PERR_SET3(1U)
+
+#define S_CQE_FIFO_PERR_SET2 18
+#define V_CQE_FIFO_PERR_SET2(x) ((x) << S_CQE_FIFO_PERR_SET2)
+#define F_CQE_FIFO_PERR_SET2 V_CQE_FIFO_PERR_SET2(1U)
+
+#define S_CQE_FIFO_PERR_SET1 17
+#define V_CQE_FIFO_PERR_SET1(x) ((x) << S_CQE_FIFO_PERR_SET1)
+#define F_CQE_FIFO_PERR_SET1 V_CQE_FIFO_PERR_SET1(1U)
+
+#define S_CQE_FIFO_PERR_SET0 16
+#define V_CQE_FIFO_PERR_SET0(x) ((x) << S_CQE_FIFO_PERR_SET0)
+#define F_CQE_FIFO_PERR_SET0 V_CQE_FIFO_PERR_SET0(1U)
+
+#define S_PBL_FIFO_PERR_SET3 15
+#define V_PBL_FIFO_PERR_SET3(x) ((x) << S_PBL_FIFO_PERR_SET3)
+#define F_PBL_FIFO_PERR_SET3 V_PBL_FIFO_PERR_SET3(1U)
+
+#define S_PBL_FIFO_PERR_SET2 14
+#define V_PBL_FIFO_PERR_SET2(x) ((x) << S_PBL_FIFO_PERR_SET2)
+#define F_PBL_FIFO_PERR_SET2 V_PBL_FIFO_PERR_SET2(1U)
+
+#define S_PBL_FIFO_PERR_SET1 13
+#define V_PBL_FIFO_PERR_SET1(x) ((x) << S_PBL_FIFO_PERR_SET1)
+#define F_PBL_FIFO_PERR_SET1 V_PBL_FIFO_PERR_SET1(1U)
+
+#define S_PBL_FIFO_PERR_SET0 12
+#define V_PBL_FIFO_PERR_SET0(x) ((x) << S_PBL_FIFO_PERR_SET0)
+#define F_PBL_FIFO_PERR_SET0 V_PBL_FIFO_PERR_SET0(1U)
+
+#define S_CMD_FIFO_PERR_SET3 11
+#define V_CMD_FIFO_PERR_SET3(x) ((x) << S_CMD_FIFO_PERR_SET3)
+#define F_CMD_FIFO_PERR_SET3 V_CMD_FIFO_PERR_SET3(1U)
+
+#define S_CMD_FIFO_PERR_SET2 10
+#define V_CMD_FIFO_PERR_SET2(x) ((x) << S_CMD_FIFO_PERR_SET2)
+#define F_CMD_FIFO_PERR_SET2 V_CMD_FIFO_PERR_SET2(1U)
+
+#define S_CMD_FIFO_PERR_SET1 9
+#define V_CMD_FIFO_PERR_SET1(x) ((x) << S_CMD_FIFO_PERR_SET1)
+#define F_CMD_FIFO_PERR_SET1 V_CMD_FIFO_PERR_SET1(1U)
+
+#define S_CMD_FIFO_PERR_SET0 8
+#define V_CMD_FIFO_PERR_SET0(x) ((x) << S_CMD_FIFO_PERR_SET0)
+#define F_CMD_FIFO_PERR_SET0 V_CMD_FIFO_PERR_SET0(1U)
+
+#define S_LSO_HDR_SRAM_PERR_SET3 7
+#define V_LSO_HDR_SRAM_PERR_SET3(x) ((x) << S_LSO_HDR_SRAM_PERR_SET3)
+#define F_LSO_HDR_SRAM_PERR_SET3 V_LSO_HDR_SRAM_PERR_SET3(1U)
+
+#define S_LSO_HDR_SRAM_PERR_SET2 6
+#define V_LSO_HDR_SRAM_PERR_SET2(x) ((x) << S_LSO_HDR_SRAM_PERR_SET2)
+#define F_LSO_HDR_SRAM_PERR_SET2 V_LSO_HDR_SRAM_PERR_SET2(1U)
+
+#define S_LSO_HDR_SRAM_PERR_SET1 5
+#define V_LSO_HDR_SRAM_PERR_SET1(x) ((x) << S_LSO_HDR_SRAM_PERR_SET1)
+#define F_LSO_HDR_SRAM_PERR_SET1 V_LSO_HDR_SRAM_PERR_SET1(1U)
+
+#define S_LSO_HDR_SRAM_PERR_SET0 4
+#define V_LSO_HDR_SRAM_PERR_SET0(x) ((x) << S_LSO_HDR_SRAM_PERR_SET0)
+#define F_LSO_HDR_SRAM_PERR_SET0 V_LSO_HDR_SRAM_PERR_SET0(1U)
+
+#define S_IMM_DATA_PERR_SET_CH3 3
+#define V_IMM_DATA_PERR_SET_CH3(x) ((x) << S_IMM_DATA_PERR_SET_CH3)
+#define F_IMM_DATA_PERR_SET_CH3 V_IMM_DATA_PERR_SET_CH3(1U)
+
+#define S_IMM_DATA_PERR_SET_CH2 2
+#define V_IMM_DATA_PERR_SET_CH2(x) ((x) << S_IMM_DATA_PERR_SET_CH2)
+#define F_IMM_DATA_PERR_SET_CH2 V_IMM_DATA_PERR_SET_CH2(1U)
+
+#define S_IMM_DATA_PERR_SET_CH1 1
+#define V_IMM_DATA_PERR_SET_CH1(x) ((x) << S_IMM_DATA_PERR_SET_CH1)
+#define F_IMM_DATA_PERR_SET_CH1 V_IMM_DATA_PERR_SET_CH1(1U)
+
+#define S_IMM_DATA_PERR_SET_CH0 0
+#define V_IMM_DATA_PERR_SET_CH0(x) ((x) << S_IMM_DATA_PERR_SET_CH0)
+#define F_IMM_DATA_PERR_SET_CH0 V_IMM_DATA_PERR_SET_CH0(1U)
+
+#define A_ULP_TX_INT_CAUSE 0x8dcc
+#define A_ULP_TX_PERR_ENABLE 0x8dd0
+#define A_ULP_TX_TPT_LLIMIT 0x8dd4
+#define A_ULP_TX_TPT_ULIMIT 0x8dd8
+#define A_ULP_TX_PBL_LLIMIT 0x8ddc
+#define A_ULP_TX_PBL_ULIMIT 0x8de0
+#define A_ULP_TX_CPL_ERR_OFFSET 0x8de4
+#define A_ULP_TX_CPL_ERR_MASK_L 0x8de8
+#define A_ULP_TX_CPL_ERR_MASK_H 0x8dec
+#define A_ULP_TX_CPL_ERR_VALUE_L 0x8df0
+#define A_ULP_TX_CPL_ERR_VALUE_H 0x8df4
+#define A_ULP_TX_CPL_PACK_SIZE1 0x8df8
+
+#define S_CH3SIZE1 24
+#define M_CH3SIZE1 0xffU
+#define V_CH3SIZE1(x) ((x) << S_CH3SIZE1)
+#define G_CH3SIZE1(x) (((x) >> S_CH3SIZE1) & M_CH3SIZE1)
+
+#define S_CH2SIZE1 16
+#define M_CH2SIZE1 0xffU
+#define V_CH2SIZE1(x) ((x) << S_CH2SIZE1)
+#define G_CH2SIZE1(x) (((x) >> S_CH2SIZE1) & M_CH2SIZE1)
+
+#define S_CH1SIZE1 8
+#define M_CH1SIZE1 0xffU
+#define V_CH1SIZE1(x) ((x) << S_CH1SIZE1)
+#define G_CH1SIZE1(x) (((x) >> S_CH1SIZE1) & M_CH1SIZE1)
+
+#define S_CH0SIZE1 0
+#define M_CH0SIZE1 0xffU
+#define V_CH0SIZE1(x) ((x) << S_CH0SIZE1)
+#define G_CH0SIZE1(x) (((x) >> S_CH0SIZE1) & M_CH0SIZE1)
+
+#define A_ULP_TX_CPL_PACK_SIZE2 0x8dfc
+
+#define S_CH3SIZE2 24
+#define M_CH3SIZE2 0xffU
+#define V_CH3SIZE2(x) ((x) << S_CH3SIZE2)
+#define G_CH3SIZE2(x) (((x) >> S_CH3SIZE2) & M_CH3SIZE2)
+
+#define S_CH2SIZE2 16
+#define M_CH2SIZE2 0xffU
+#define V_CH2SIZE2(x) ((x) << S_CH2SIZE2)
+#define G_CH2SIZE2(x) (((x) >> S_CH2SIZE2) & M_CH2SIZE2)
+
+#define S_CH1SIZE2 8
+#define M_CH1SIZE2 0xffU
+#define V_CH1SIZE2(x) ((x) << S_CH1SIZE2)
+#define G_CH1SIZE2(x) (((x) >> S_CH1SIZE2) & M_CH1SIZE2)
+
+#define S_CH0SIZE2 0
+#define M_CH0SIZE2 0xffU
+#define V_CH0SIZE2(x) ((x) << S_CH0SIZE2)
+#define G_CH0SIZE2(x) (((x) >> S_CH0SIZE2) & M_CH0SIZE2)
+
+#define A_ULP_TX_ERR_MSG2CIM 0x8e00
+#define A_ULP_TX_ERR_TABLE_BASE 0x8e04
+#define A_ULP_TX_ERR_CNT_CH0 0x8e10
+
+#define S_ERR_CNT0 0
+#define M_ERR_CNT0 0xfffffU
+#define V_ERR_CNT0(x) ((x) << S_ERR_CNT0)
+#define G_ERR_CNT0(x) (((x) >> S_ERR_CNT0) & M_ERR_CNT0)
+
+#define A_ULP_TX_ERR_CNT_CH1 0x8e14
+
+#define S_ERR_CNT1 0
+#define M_ERR_CNT1 0xfffffU
+#define V_ERR_CNT1(x) ((x) << S_ERR_CNT1)
+#define G_ERR_CNT1(x) (((x) >> S_ERR_CNT1) & M_ERR_CNT1)
+
+#define A_ULP_TX_ERR_CNT_CH2 0x8e18
+
+#define S_ERR_CNT2 0
+#define M_ERR_CNT2 0xfffffU
+#define V_ERR_CNT2(x) ((x) << S_ERR_CNT2)
+#define G_ERR_CNT2(x) (((x) >> S_ERR_CNT2) & M_ERR_CNT2)
+
+#define A_ULP_TX_ERR_CNT_CH3 0x8e1c
+
+#define S_ERR_CNT3 0
+#define M_ERR_CNT3 0xfffffU
+#define V_ERR_CNT3(x) ((x) << S_ERR_CNT3)
+#define G_ERR_CNT3(x) (((x) >> S_ERR_CNT3) & M_ERR_CNT3)
+
+#define A_ULP_TX_ULP2TP_BIST_CMD 0x8e30
+#define A_ULP_TX_ULP2TP_BIST_ERROR_CNT 0x8e34
+#define A_ULP_TX_FPGA_CMD_CTRL 0x8e38
+#define A_ULP_TX_FPGA_CMD_0 0x8e3c
+#define A_ULP_TX_FPGA_CMD_1 0x8e40
+#define A_ULP_TX_FPGA_CMD_2 0x8e44
+#define A_ULP_TX_FPGA_CMD_3 0x8e48
+#define A_ULP_TX_FPGA_CMD_4 0x8e4c
+#define A_ULP_TX_FPGA_CMD_5 0x8e50
+#define A_ULP_TX_FPGA_CMD_6 0x8e54
+#define A_ULP_TX_FPGA_CMD_7 0x8e58
+#define A_ULP_TX_FPGA_CMD_8 0x8e5c
+#define A_ULP_TX_FPGA_CMD_9 0x8e60
+#define A_ULP_TX_FPGA_CMD_10 0x8e64
+#define A_ULP_TX_FPGA_CMD_11 0x8e68
+#define A_ULP_TX_FPGA_CMD_12 0x8e6c
+#define A_ULP_TX_FPGA_CMD_13 0x8e70
+#define A_ULP_TX_FPGA_CMD_14 0x8e74
+#define A_ULP_TX_FPGA_CMD_15 0x8e78
+#define A_ULP_TX_SE_CNT_ERR 0x8ea0
+
+#define S_ERR_CH3 12
+#define M_ERR_CH3 0xfU
+#define V_ERR_CH3(x) ((x) << S_ERR_CH3)
+#define G_ERR_CH3(x) (((x) >> S_ERR_CH3) & M_ERR_CH3)
+
+#define S_ERR_CH2 8
+#define M_ERR_CH2 0xfU
+#define V_ERR_CH2(x) ((x) << S_ERR_CH2)
+#define G_ERR_CH2(x) (((x) >> S_ERR_CH2) & M_ERR_CH2)
+
+#define S_ERR_CH1 4
+#define M_ERR_CH1 0xfU
+#define V_ERR_CH1(x) ((x) << S_ERR_CH1)
+#define G_ERR_CH1(x) (((x) >> S_ERR_CH1) & M_ERR_CH1)
+
+#define S_ERR_CH0 0
+#define M_ERR_CH0 0xfU
+#define V_ERR_CH0(x) ((x) << S_ERR_CH0)
+#define G_ERR_CH0(x) (((x) >> S_ERR_CH0) & M_ERR_CH0)
+
+#define A_ULP_TX_SE_CNT_CLR 0x8ea4
+
+#define S_CLR_DROP 16
+#define M_CLR_DROP 0xfU
+#define V_CLR_DROP(x) ((x) << S_CLR_DROP)
+#define G_CLR_DROP(x) (((x) >> S_CLR_DROP) & M_CLR_DROP)
+
+#define S_CLR_CH3 12
+#define M_CLR_CH3 0xfU
+#define V_CLR_CH3(x) ((x) << S_CLR_CH3)
+#define G_CLR_CH3(x) (((x) >> S_CLR_CH3) & M_CLR_CH3)
+
+#define S_CLR_CH2 8
+#define M_CLR_CH2 0xfU
+#define V_CLR_CH2(x) ((x) << S_CLR_CH2)
+#define G_CLR_CH2(x) (((x) >> S_CLR_CH2) & M_CLR_CH2)
+
+#define S_CLR_CH1 4
+#define M_CLR_CH1 0xfU
+#define V_CLR_CH1(x) ((x) << S_CLR_CH1)
+#define G_CLR_CH1(x) (((x) >> S_CLR_CH1) & M_CLR_CH1)
+
+#define S_CLR_CH0 0
+#define M_CLR_CH0 0xfU
+#define V_CLR_CH0(x) ((x) << S_CLR_CH0)
+#define G_CLR_CH0(x) (((x) >> S_CLR_CH0) & M_CLR_CH0)
+
+#define A_ULP_TX_SE_CNT_CH0 0x8ea8
+
+#define S_SOP_CNT_ULP2TP 28
+#define M_SOP_CNT_ULP2TP 0xfU
+#define V_SOP_CNT_ULP2TP(x) ((x) << S_SOP_CNT_ULP2TP)
+#define G_SOP_CNT_ULP2TP(x) (((x) >> S_SOP_CNT_ULP2TP) & M_SOP_CNT_ULP2TP)
+
+#define S_EOP_CNT_ULP2TP 24
+#define M_EOP_CNT_ULP2TP 0xfU
+#define V_EOP_CNT_ULP2TP(x) ((x) << S_EOP_CNT_ULP2TP)
+#define G_EOP_CNT_ULP2TP(x) (((x) >> S_EOP_CNT_ULP2TP) & M_EOP_CNT_ULP2TP)
+
+#define S_SOP_CNT_LSO_IN 20
+#define M_SOP_CNT_LSO_IN 0xfU
+#define V_SOP_CNT_LSO_IN(x) ((x) << S_SOP_CNT_LSO_IN)
+#define G_SOP_CNT_LSO_IN(x) (((x) >> S_SOP_CNT_LSO_IN) & M_SOP_CNT_LSO_IN)
+
+#define S_EOP_CNT_LSO_IN 16
+#define M_EOP_CNT_LSO_IN 0xfU
+#define V_EOP_CNT_LSO_IN(x) ((x) << S_EOP_CNT_LSO_IN)
+#define G_EOP_CNT_LSO_IN(x) (((x) >> S_EOP_CNT_LSO_IN) & M_EOP_CNT_LSO_IN)
+
+#define S_SOP_CNT_ALG_IN 12
+#define M_SOP_CNT_ALG_IN 0xfU
+#define V_SOP_CNT_ALG_IN(x) ((x) << S_SOP_CNT_ALG_IN)
+#define G_SOP_CNT_ALG_IN(x) (((x) >> S_SOP_CNT_ALG_IN) & M_SOP_CNT_ALG_IN)
+
+#define S_EOP_CNT_ALG_IN 8
+#define M_EOP_CNT_ALG_IN 0xfU
+#define V_EOP_CNT_ALG_IN(x) ((x) << S_EOP_CNT_ALG_IN)
+#define G_EOP_CNT_ALG_IN(x) (((x) >> S_EOP_CNT_ALG_IN) & M_EOP_CNT_ALG_IN)
+
+#define S_SOP_CNT_CIM2ULP 4
+#define M_SOP_CNT_CIM2ULP 0xfU
+#define V_SOP_CNT_CIM2ULP(x) ((x) << S_SOP_CNT_CIM2ULP)
+#define G_SOP_CNT_CIM2ULP(x) (((x) >> S_SOP_CNT_CIM2ULP) & M_SOP_CNT_CIM2ULP)
+
+#define S_EOP_CNT_CIM2ULP 0
+#define M_EOP_CNT_CIM2ULP 0xfU
+#define V_EOP_CNT_CIM2ULP(x) ((x) << S_EOP_CNT_CIM2ULP)
+#define G_EOP_CNT_CIM2ULP(x) (((x) >> S_EOP_CNT_CIM2ULP) & M_EOP_CNT_CIM2ULP)
+
+#define A_ULP_TX_SE_CNT_CH1 0x8eac
+#define A_ULP_TX_SE_CNT_CH2 0x8eb0
+#define A_ULP_TX_SE_CNT_CH3 0x8eb4
+#define A_ULP_TX_DROP_CNT 0x8eb8
+
+#define S_DROP_CH3 12
+#define M_DROP_CH3 0xfU
+#define V_DROP_CH3(x) ((x) << S_DROP_CH3)
+#define G_DROP_CH3(x) (((x) >> S_DROP_CH3) & M_DROP_CH3)
+
+#define S_DROP_CH2 8
+#define M_DROP_CH2 0xfU
+#define V_DROP_CH2(x) ((x) << S_DROP_CH2)
+#define G_DROP_CH2(x) (((x) >> S_DROP_CH2) & M_DROP_CH2)
+
+#define S_DROP_CH1 4
+#define M_DROP_CH1 0xfU
+#define V_DROP_CH1(x) ((x) << S_DROP_CH1)
+#define G_DROP_CH1(x) (((x) >> S_DROP_CH1) & M_DROP_CH1)
+
+#define S_DROP_CH0 0
+#define M_DROP_CH0 0xfU
+#define V_DROP_CH0(x) ((x) << S_DROP_CH0)
+#define G_DROP_CH0(x) (((x) >> S_DROP_CH0) & M_DROP_CH0)
+
+#define A_ULP_TX_LA_RDPTR_0 0x8ec0
+#define A_ULP_TX_LA_RDDATA_0 0x8ec4
+#define A_ULP_TX_LA_WRPTR_0 0x8ec8
+#define A_ULP_TX_LA_RESERVED_0 0x8ecc
+#define A_ULP_TX_LA_RDPTR_1 0x8ed0
+#define A_ULP_TX_LA_RDDATA_1 0x8ed4
+#define A_ULP_TX_LA_WRPTR_1 0x8ed8
+#define A_ULP_TX_LA_RESERVED_1 0x8edc
+#define A_ULP_TX_LA_RDPTR_2 0x8ee0
+#define A_ULP_TX_LA_RDDATA_2 0x8ee4
+#define A_ULP_TX_LA_WRPTR_2 0x8ee8
+#define A_ULP_TX_LA_RESERVED_2 0x8eec
+#define A_ULP_TX_LA_RDPTR_3 0x8ef0
+#define A_ULP_TX_LA_RDDATA_3 0x8ef4
+#define A_ULP_TX_LA_WRPTR_3 0x8ef8
+#define A_ULP_TX_LA_RESERVED_3 0x8efc
+#define A_ULP_TX_LA_RDPTR_4 0x8f00
+#define A_ULP_TX_LA_RDDATA_4 0x8f04
+#define A_ULP_TX_LA_WRPTR_4 0x8f08
+#define A_ULP_TX_LA_RESERVED_4 0x8f0c
+#define A_ULP_TX_LA_RDPTR_5 0x8f10
+#define A_ULP_TX_LA_RDDATA_5 0x8f14
+#define A_ULP_TX_LA_WRPTR_5 0x8f18
+#define A_ULP_TX_LA_RESERVED_5 0x8f1c
+#define A_ULP_TX_LA_RDPTR_6 0x8f20
+#define A_ULP_TX_LA_RDDATA_6 0x8f24
+#define A_ULP_TX_LA_WRPTR_6 0x8f28
+#define A_ULP_TX_LA_RESERVED_6 0x8f2c
+#define A_ULP_TX_LA_RDPTR_7 0x8f30
+#define A_ULP_TX_LA_RDDATA_7 0x8f34
+#define A_ULP_TX_LA_WRPTR_7 0x8f38
+#define A_ULP_TX_LA_RESERVED_7 0x8f3c
+#define A_ULP_TX_LA_RDPTR_8 0x8f40
+#define A_ULP_TX_LA_RDDATA_8 0x8f44
+#define A_ULP_TX_LA_WRPTR_8 0x8f48
+#define A_ULP_TX_LA_RESERVED_8 0x8f4c
+#define A_ULP_TX_LA_RDPTR_9 0x8f50
+#define A_ULP_TX_LA_RDDATA_9 0x8f54
+#define A_ULP_TX_LA_WRPTR_9 0x8f58
+#define A_ULP_TX_LA_RESERVED_9 0x8f5c
+#define A_ULP_TX_LA_RDPTR_10 0x8f60
+#define A_ULP_TX_LA_RDDATA_10 0x8f64
+#define A_ULP_TX_LA_WRPTR_10 0x8f68
+#define A_ULP_TX_LA_RESERVED_10 0x8f6c
+
+/* registers for module PM_RX */
+#define PM_RX_BASE_ADDR 0x8fc0
+
+#define A_PM_RX_CFG 0x8fc0
+#define A_PM_RX_MODE 0x8fc4
+
+#define S_RX_USE_BUNDLE_LEN 4
+#define V_RX_USE_BUNDLE_LEN(x) ((x) << S_RX_USE_BUNDLE_LEN)
+#define F_RX_USE_BUNDLE_LEN V_RX_USE_BUNDLE_LEN(1U)
+
+#define S_STAT_TO_CH 3
+#define V_STAT_TO_CH(x) ((x) << S_STAT_TO_CH)
+#define F_STAT_TO_CH V_STAT_TO_CH(1U)
+
+#define S_STAT_FROM_CH 1
+#define M_STAT_FROM_CH 0x3U
+#define V_STAT_FROM_CH(x) ((x) << S_STAT_FROM_CH)
+#define G_STAT_FROM_CH(x) (((x) >> S_STAT_FROM_CH) & M_STAT_FROM_CH)
+
+#define S_PREFETCH_ENABLE 0
+#define V_PREFETCH_ENABLE(x) ((x) << S_PREFETCH_ENABLE)
+#define F_PREFETCH_ENABLE V_PREFETCH_ENABLE(1U)
+
+#define A_PM_RX_STAT_CONFIG 0x8fc8
+#define A_PM_RX_STAT_COUNT 0x8fcc
+#define A_PM_RX_STAT_LSB 0x8fd0
+#define A_PM_RX_STAT_MSB 0x8fd4
+#define A_PM_RX_INT_ENABLE 0x8fd8
+
+#define S_ZERO_E_CMD_ERROR 22
+#define V_ZERO_E_CMD_ERROR(x) ((x) << S_ZERO_E_CMD_ERROR)
+#define F_ZERO_E_CMD_ERROR V_ZERO_E_CMD_ERROR(1U)
+
+#define S_IESPI0_FIFO2X_RX_FRAMING_ERROR 21
+#define V_IESPI0_FIFO2X_RX_FRAMING_ERROR(x) ((x) << S_IESPI0_FIFO2X_RX_FRAMING_ERROR)
+#define F_IESPI0_FIFO2X_RX_FRAMING_ERROR V_IESPI0_FIFO2X_RX_FRAMING_ERROR(1U)
+
+#define S_IESPI1_FIFO2X_RX_FRAMING_ERROR 20
+#define V_IESPI1_FIFO2X_RX_FRAMING_ERROR(x) ((x) << S_IESPI1_FIFO2X_RX_FRAMING_ERROR)
+#define F_IESPI1_FIFO2X_RX_FRAMING_ERROR V_IESPI1_FIFO2X_RX_FRAMING_ERROR(1U)
+
+#define S_IESPI2_FIFO2X_RX_FRAMING_ERROR 19
+#define V_IESPI2_FIFO2X_RX_FRAMING_ERROR(x) ((x) << S_IESPI2_FIFO2X_RX_FRAMING_ERROR)
+#define F_IESPI2_FIFO2X_RX_FRAMING_ERROR V_IESPI2_FIFO2X_RX_FRAMING_ERROR(1U)
+
+#define S_IESPI3_FIFO2X_RX_FRAMING_ERROR 18
+#define V_IESPI3_FIFO2X_RX_FRAMING_ERROR(x) ((x) << S_IESPI3_FIFO2X_RX_FRAMING_ERROR)
+#define F_IESPI3_FIFO2X_RX_FRAMING_ERROR V_IESPI3_FIFO2X_RX_FRAMING_ERROR(1U)
+
+#define S_IESPI0_RX_FRAMING_ERROR 17
+#define V_IESPI0_RX_FRAMING_ERROR(x) ((x) << S_IESPI0_RX_FRAMING_ERROR)
+#define F_IESPI0_RX_FRAMING_ERROR V_IESPI0_RX_FRAMING_ERROR(1U)
+
+#define S_IESPI1_RX_FRAMING_ERROR 16
+#define V_IESPI1_RX_FRAMING_ERROR(x) ((x) << S_IESPI1_RX_FRAMING_ERROR)
+#define F_IESPI1_RX_FRAMING_ERROR V_IESPI1_RX_FRAMING_ERROR(1U)
+
+#define S_IESPI2_RX_FRAMING_ERROR 15
+#define V_IESPI2_RX_FRAMING_ERROR(x) ((x) << S_IESPI2_RX_FRAMING_ERROR)
+#define F_IESPI2_RX_FRAMING_ERROR V_IESPI2_RX_FRAMING_ERROR(1U)
+
+#define S_IESPI3_RX_FRAMING_ERROR 14
+#define V_IESPI3_RX_FRAMING_ERROR(x) ((x) << S_IESPI3_RX_FRAMING_ERROR)
+#define F_IESPI3_RX_FRAMING_ERROR V_IESPI3_RX_FRAMING_ERROR(1U)
+
+#define S_IESPI0_TX_FRAMING_ERROR 13
+#define V_IESPI0_TX_FRAMING_ERROR(x) ((x) << S_IESPI0_TX_FRAMING_ERROR)
+#define F_IESPI0_TX_FRAMING_ERROR V_IESPI0_TX_FRAMING_ERROR(1U)
+
+#define S_IESPI1_TX_FRAMING_ERROR 12
+#define V_IESPI1_TX_FRAMING_ERROR(x) ((x) << S_IESPI1_TX_FRAMING_ERROR)
+#define F_IESPI1_TX_FRAMING_ERROR V_IESPI1_TX_FRAMING_ERROR(1U)
+
+#define S_IESPI2_TX_FRAMING_ERROR 11
+#define V_IESPI2_TX_FRAMING_ERROR(x) ((x) << S_IESPI2_TX_FRAMING_ERROR)
+#define F_IESPI2_TX_FRAMING_ERROR V_IESPI2_TX_FRAMING_ERROR(1U)
+
+#define S_IESPI3_TX_FRAMING_ERROR 10
+#define V_IESPI3_TX_FRAMING_ERROR(x) ((x) << S_IESPI3_TX_FRAMING_ERROR)
+#define F_IESPI3_TX_FRAMING_ERROR V_IESPI3_TX_FRAMING_ERROR(1U)
+
+#define S_OCSPI0_RX_FRAMING_ERROR 9
+#define V_OCSPI0_RX_FRAMING_ERROR(x) ((x) << S_OCSPI0_RX_FRAMING_ERROR)
+#define F_OCSPI0_RX_FRAMING_ERROR V_OCSPI0_RX_FRAMING_ERROR(1U)
+
+#define S_OCSPI1_RX_FRAMING_ERROR 8
+#define V_OCSPI1_RX_FRAMING_ERROR(x) ((x) << S_OCSPI1_RX_FRAMING_ERROR)
+#define F_OCSPI1_RX_FRAMING_ERROR V_OCSPI1_RX_FRAMING_ERROR(1U)
+
+#define S_OCSPI0_TX_FRAMING_ERROR 7
+#define V_OCSPI0_TX_FRAMING_ERROR(x) ((x) << S_OCSPI0_TX_FRAMING_ERROR)
+#define F_OCSPI0_TX_FRAMING_ERROR V_OCSPI0_TX_FRAMING_ERROR(1U)
+
+#define S_OCSPI1_TX_FRAMING_ERROR 6
+#define V_OCSPI1_TX_FRAMING_ERROR(x) ((x) << S_OCSPI1_TX_FRAMING_ERROR)
+#define F_OCSPI1_TX_FRAMING_ERROR V_OCSPI1_TX_FRAMING_ERROR(1U)
+
+#define S_OCSPI0_OFIFO2X_TX_FRAMING_ERROR 5
+#define V_OCSPI0_OFIFO2X_TX_FRAMING_ERROR(x) ((x) << S_OCSPI0_OFIFO2X_TX_FRAMING_ERROR)
+#define F_OCSPI0_OFIFO2X_TX_FRAMING_ERROR V_OCSPI0_OFIFO2X_TX_FRAMING_ERROR(1U)
+
+#define S_OCSPI1_OFIFO2X_TX_FRAMING_ERROR 4
+#define V_OCSPI1_OFIFO2X_TX_FRAMING_ERROR(x) ((x) << S_OCSPI1_OFIFO2X_TX_FRAMING_ERROR)
+#define F_OCSPI1_OFIFO2X_TX_FRAMING_ERROR V_OCSPI1_OFIFO2X_TX_FRAMING_ERROR(1U)
+
+#define S_OCSPI_PAR_ERROR 3
+#define V_OCSPI_PAR_ERROR(x) ((x) << S_OCSPI_PAR_ERROR)
+#define F_OCSPI_PAR_ERROR V_OCSPI_PAR_ERROR(1U)
+
+#define S_DB_OPTIONS_PAR_ERROR 2
+#define V_DB_OPTIONS_PAR_ERROR(x) ((x) << S_DB_OPTIONS_PAR_ERROR)
+#define F_DB_OPTIONS_PAR_ERROR V_DB_OPTIONS_PAR_ERROR(1U)
+
+#define S_IESPI_PAR_ERROR 1
+#define V_IESPI_PAR_ERROR(x) ((x) << S_IESPI_PAR_ERROR)
+#define F_IESPI_PAR_ERROR V_IESPI_PAR_ERROR(1U)
+
+#define S_E_PCMD_PAR_ERROR 0
+#define V_E_PCMD_PAR_ERROR(x) ((x) << S_E_PCMD_PAR_ERROR)
+#define F_E_PCMD_PAR_ERROR V_E_PCMD_PAR_ERROR(1U)
+
+#define A_PM_RX_INT_CAUSE 0x8fdc
+
+/* registers for module PM_TX */
+#define PM_TX_BASE_ADDR 0x8fe0
+
+#define A_PM_TX_CFG 0x8fe0
+
+#define S_CH3_OUTPUT 17
+#define M_CH3_OUTPUT 0x1fU
+#define V_CH3_OUTPUT(x) ((x) << S_CH3_OUTPUT)
+#define G_CH3_OUTPUT(x) (((x) >> S_CH3_OUTPUT) & M_CH3_OUTPUT)
+
+#define A_PM_TX_MODE 0x8fe4
+
+#define S_CONG_THRESH3 25
+#define M_CONG_THRESH3 0x7fU
+#define V_CONG_THRESH3(x) ((x) << S_CONG_THRESH3)
+#define G_CONG_THRESH3(x) (((x) >> S_CONG_THRESH3) & M_CONG_THRESH3)
+
+#define S_CONG_THRESH2 18
+#define M_CONG_THRESH2 0x7fU
+#define V_CONG_THRESH2(x) ((x) << S_CONG_THRESH2)
+#define G_CONG_THRESH2(x) (((x) >> S_CONG_THRESH2) & M_CONG_THRESH2)
+
+#define S_CONG_THRESH1 11
+#define M_CONG_THRESH1 0x7fU
+#define V_CONG_THRESH1(x) ((x) << S_CONG_THRESH1)
+#define G_CONG_THRESH1(x) (((x) >> S_CONG_THRESH1) & M_CONG_THRESH1)
+
+#define S_CONG_THRESH0 4
+#define M_CONG_THRESH0 0x7fU
+#define V_CONG_THRESH0(x) ((x) << S_CONG_THRESH0)
+#define G_CONG_THRESH0(x) (((x) >> S_CONG_THRESH0) & M_CONG_THRESH0)
+
+#define S_TX_USE_BUNDLE_LEN 3
+#define V_TX_USE_BUNDLE_LEN(x) ((x) << S_TX_USE_BUNDLE_LEN)
+#define F_TX_USE_BUNDLE_LEN V_TX_USE_BUNDLE_LEN(1U)
+
+#define S_STAT_CHANNEL 1
+#define M_STAT_CHANNEL 0x3U
+#define V_STAT_CHANNEL(x) ((x) << S_STAT_CHANNEL)
+#define G_STAT_CHANNEL(x) (((x) >> S_STAT_CHANNEL) & M_STAT_CHANNEL)
+
+#define A_PM_TX_STAT_CONFIG 0x8fe8
+#define A_PM_TX_STAT_COUNT 0x8fec
+#define A_PM_TX_STAT_LSB 0x8ff0
+#define A_PM_TX_STAT_MSB 0x8ff4
+#define A_PM_TX_INT_ENABLE 0x8ff8
+
+#define S_PCMD_LEN_OVFL0 31
+#define V_PCMD_LEN_OVFL0(x) ((x) << S_PCMD_LEN_OVFL0)
+#define F_PCMD_LEN_OVFL0 V_PCMD_LEN_OVFL0(1U)
+
+#define S_PCMD_LEN_OVFL1 30
+#define V_PCMD_LEN_OVFL1(x) ((x) << S_PCMD_LEN_OVFL1)
+#define F_PCMD_LEN_OVFL1 V_PCMD_LEN_OVFL1(1U)
+
+#define S_PCMD_LEN_OVFL2 29
+#define V_PCMD_LEN_OVFL2(x) ((x) << S_PCMD_LEN_OVFL2)
+#define F_PCMD_LEN_OVFL2 V_PCMD_LEN_OVFL2(1U)
+
+#define S_ZERO_C_CMD_ERRO 28
+#define V_ZERO_C_CMD_ERRO(x) ((x) << S_ZERO_C_CMD_ERRO)
+#define F_ZERO_C_CMD_ERRO V_ZERO_C_CMD_ERRO(1U)
+
+#define S_ICSPI0_FIFO2X_RX_FRAMING_ERROR 27
+#define V_ICSPI0_FIFO2X_RX_FRAMING_ERROR(x) ((x) << S_ICSPI0_FIFO2X_RX_FRAMING_ERROR)
+#define F_ICSPI0_FIFO2X_RX_FRAMING_ERROR V_ICSPI0_FIFO2X_RX_FRAMING_ERROR(1U)
+
+#define S_ICSPI1_FIFO2X_RX_FRAMING_ERROR 26
+#define V_ICSPI1_FIFO2X_RX_FRAMING_ERROR(x) ((x) << S_ICSPI1_FIFO2X_RX_FRAMING_ERROR)
+#define F_ICSPI1_FIFO2X_RX_FRAMING_ERROR V_ICSPI1_FIFO2X_RX_FRAMING_ERROR(1U)
+
+#define S_ICSPI2_FIFO2X_RX_FRAMING_ERROR 25
+#define V_ICSPI2_FIFO2X_RX_FRAMING_ERROR(x) ((x) << S_ICSPI2_FIFO2X_RX_FRAMING_ERROR)
+#define F_ICSPI2_FIFO2X_RX_FRAMING_ERROR V_ICSPI2_FIFO2X_RX_FRAMING_ERROR(1U)
+
+#define S_ICSPI3_FIFO2X_RX_FRAMING_ERROR 24
+#define V_ICSPI3_FIFO2X_RX_FRAMING_ERROR(x) ((x) << S_ICSPI3_FIFO2X_RX_FRAMING_ERROR)
+#define F_ICSPI3_FIFO2X_RX_FRAMING_ERROR V_ICSPI3_FIFO2X_RX_FRAMING_ERROR(1U)
+
+#define S_ICSPI0_RX_FRAMING_ERROR 23
+#define V_ICSPI0_RX_FRAMING_ERROR(x) ((x) << S_ICSPI0_RX_FRAMING_ERROR)
+#define F_ICSPI0_RX_FRAMING_ERROR V_ICSPI0_RX_FRAMING_ERROR(1U)
+
+#define S_ICSPI1_RX_FRAMING_ERROR 22
+#define V_ICSPI1_RX_FRAMING_ERROR(x) ((x) << S_ICSPI1_RX_FRAMING_ERROR)
+#define F_ICSPI1_RX_FRAMING_ERROR V_ICSPI1_RX_FRAMING_ERROR(1U)
+
+#define S_ICSPI2_RX_FRAMING_ERROR 21
+#define V_ICSPI2_RX_FRAMING_ERROR(x) ((x) << S_ICSPI2_RX_FRAMING_ERROR)
+#define F_ICSPI2_RX_FRAMING_ERROR V_ICSPI2_RX_FRAMING_ERROR(1U)
+
+#define S_ICSPI3_RX_FRAMING_ERROR 20
+#define V_ICSPI3_RX_FRAMING_ERROR(x) ((x) << S_ICSPI3_RX_FRAMING_ERROR)
+#define F_ICSPI3_RX_FRAMING_ERROR V_ICSPI3_RX_FRAMING_ERROR(1U)
+
+#define S_ICSPI0_TX_FRAMING_ERROR 19
+#define V_ICSPI0_TX_FRAMING_ERROR(x) ((x) << S_ICSPI0_TX_FRAMING_ERROR)
+#define F_ICSPI0_TX_FRAMING_ERROR V_ICSPI0_TX_FRAMING_ERROR(1U)
+
+#define S_ICSPI1_TX_FRAMING_ERROR 18
+#define V_ICSPI1_TX_FRAMING_ERROR(x) ((x) << S_ICSPI1_TX_FRAMING_ERROR)
+#define F_ICSPI1_TX_FRAMING_ERROR V_ICSPI1_TX_FRAMING_ERROR(1U)
+
+#define S_ICSPI2_TX_FRAMING_ERROR 17
+#define V_ICSPI2_TX_FRAMING_ERROR(x) ((x) << S_ICSPI2_TX_FRAMING_ERROR)
+#define F_ICSPI2_TX_FRAMING_ERROR V_ICSPI2_TX_FRAMING_ERROR(1U)
+
+#define S_ICSPI3_TX_FRAMING_ERROR 16
+#define V_ICSPI3_TX_FRAMING_ERROR(x) ((x) << S_ICSPI3_TX_FRAMING_ERROR)
+#define F_ICSPI3_TX_FRAMING_ERROR V_ICSPI3_TX_FRAMING_ERROR(1U)
+
+#define S_OESPI0_RX_FRAMING_ERROR 15
+#define V_OESPI0_RX_FRAMING_ERROR(x) ((x) << S_OESPI0_RX_FRAMING_ERROR)
+#define F_OESPI0_RX_FRAMING_ERROR V_OESPI0_RX_FRAMING_ERROR(1U)
+
+#define S_OESPI1_RX_FRAMING_ERROR 14
+#define V_OESPI1_RX_FRAMING_ERROR(x) ((x) << S_OESPI1_RX_FRAMING_ERROR)
+#define F_OESPI1_RX_FRAMING_ERROR V_OESPI1_RX_FRAMING_ERROR(1U)
+
+#define S_OESPI2_RX_FRAMING_ERROR 13
+#define V_OESPI2_RX_FRAMING_ERROR(x) ((x) << S_OESPI2_RX_FRAMING_ERROR)
+#define F_OESPI2_RX_FRAMING_ERROR V_OESPI2_RX_FRAMING_ERROR(1U)
+
+#define S_OESPI3_RX_FRAMING_ERROR 12
+#define V_OESPI3_RX_FRAMING_ERROR(x) ((x) << S_OESPI3_RX_FRAMING_ERROR)
+#define F_OESPI3_RX_FRAMING_ERROR V_OESPI3_RX_FRAMING_ERROR(1U)
+
+#define S_OESPI0_TX_FRAMING_ERROR 11
+#define V_OESPI0_TX_FRAMING_ERROR(x) ((x) << S_OESPI0_TX_FRAMING_ERROR)
+#define F_OESPI0_TX_FRAMING_ERROR V_OESPI0_TX_FRAMING_ERROR(1U)
+
+#define S_OESPI1_TX_FRAMING_ERROR 10
+#define V_OESPI1_TX_FRAMING_ERROR(x) ((x) << S_OESPI1_TX_FRAMING_ERROR)
+#define F_OESPI1_TX_FRAMING_ERROR V_OESPI1_TX_FRAMING_ERROR(1U)
+
+#define S_OESPI2_TX_FRAMING_ERROR 9
+#define V_OESPI2_TX_FRAMING_ERROR(x) ((x) << S_OESPI2_TX_FRAMING_ERROR)
+#define F_OESPI2_TX_FRAMING_ERROR V_OESPI2_TX_FRAMING_ERROR(1U)
+
+#define S_OESPI3_TX_FRAMING_ERROR 8
+#define V_OESPI3_TX_FRAMING_ERROR(x) ((x) << S_OESPI3_TX_FRAMING_ERROR)
+#define F_OESPI3_TX_FRAMING_ERROR V_OESPI3_TX_FRAMING_ERROR(1U)
+
+#define S_OESPI0_OFIFO2X_TX_FRAMING_ERROR 7
+#define V_OESPI0_OFIFO2X_TX_FRAMING_ERROR(x) ((x) << S_OESPI0_OFIFO2X_TX_FRAMING_ERROR)
+#define F_OESPI0_OFIFO2X_TX_FRAMING_ERROR V_OESPI0_OFIFO2X_TX_FRAMING_ERROR(1U)
+
+#define S_OESPI1_OFIFO2X_TX_FRAMING_ERROR 6
+#define V_OESPI1_OFIFO2X_TX_FRAMING_ERROR(x) ((x) << S_OESPI1_OFIFO2X_TX_FRAMING_ERROR)
+#define F_OESPI1_OFIFO2X_TX_FRAMING_ERROR V_OESPI1_OFIFO2X_TX_FRAMING_ERROR(1U)
+
+#define S_OESPI2_OFIFO2X_TX_FRAMING_ERROR 5
+#define V_OESPI2_OFIFO2X_TX_FRAMING_ERROR(x) ((x) << S_OESPI2_OFIFO2X_TX_FRAMING_ERROR)
+#define F_OESPI2_OFIFO2X_TX_FRAMING_ERROR V_OESPI2_OFIFO2X_TX_FRAMING_ERROR(1U)
+
+#define S_OESPI3_OFIFO2X_TX_FRAMING_ERROR 4
+#define V_OESPI3_OFIFO2X_TX_FRAMING_ERROR(x) ((x) << S_OESPI3_OFIFO2X_TX_FRAMING_ERROR)
+#define F_OESPI3_OFIFO2X_TX_FRAMING_ERROR V_OESPI3_OFIFO2X_TX_FRAMING_ERROR(1U)
+
+#define S_OESPI_PAR_ERROR 3
+#define V_OESPI_PAR_ERROR(x) ((x) << S_OESPI_PAR_ERROR)
+#define F_OESPI_PAR_ERROR V_OESPI_PAR_ERROR(1U)
+
+#define S_ICSPI_PAR_ERROR 1
+#define V_ICSPI_PAR_ERROR(x) ((x) << S_ICSPI_PAR_ERROR)
+#define F_ICSPI_PAR_ERROR V_ICSPI_PAR_ERROR(1U)
+
+#define S_C_PCMD_PAR_ERROR 0
+#define V_C_PCMD_PAR_ERROR(x) ((x) << S_C_PCMD_PAR_ERROR)
+#define F_C_PCMD_PAR_ERROR V_C_PCMD_PAR_ERROR(1U)
+
+#define A_PM_TX_INT_CAUSE 0x8ffc
+
+#define S_ZERO_C_CMD_ERROR 28
+#define V_ZERO_C_CMD_ERROR(x) ((x) << S_ZERO_C_CMD_ERROR)
+#define F_ZERO_C_CMD_ERROR V_ZERO_C_CMD_ERROR(1U)
+
+/* registers for module MPS */
+#define MPS_BASE_ADDR 0x9000
+
+#define A_MPS_PORT_CTL 0x0
+
+#define S_LPBKEN 31
+#define V_LPBKEN(x) ((x) << S_LPBKEN)
+#define F_LPBKEN V_LPBKEN(1U)
+
+#define S_PORTTXEN 30
+#define V_PORTTXEN(x) ((x) << S_PORTTXEN)
+#define F_PORTTXEN V_PORTTXEN(1U)
+
+#define S_PORTRXEN 29
+#define V_PORTRXEN(x) ((x) << S_PORTRXEN)
+#define F_PORTRXEN V_PORTRXEN(1U)
+
+#define S_PPPEN 28
+#define V_PPPEN(x) ((x) << S_PPPEN)
+#define F_PPPEN V_PPPEN(1U)
+
+#define S_FCSSTRIPEN 27
+#define V_FCSSTRIPEN(x) ((x) << S_FCSSTRIPEN)
+#define F_FCSSTRIPEN V_FCSSTRIPEN(1U)
+
+#define S_PPPANDPAUSE 26
+#define V_PPPANDPAUSE(x) ((x) << S_PPPANDPAUSE)
+#define F_PPPANDPAUSE V_PPPANDPAUSE(1U)
+
+#define S_PRIOPPPENMAP 16
+#define M_PRIOPPPENMAP 0xffU
+#define V_PRIOPPPENMAP(x) ((x) << S_PRIOPPPENMAP)
+#define G_PRIOPPPENMAP(x) (((x) >> S_PRIOPPPENMAP) & M_PRIOPPPENMAP)
+
+#define A_MPS_VF_CTL 0x0
+#define A_MPS_PORT_PAUSE_CTL 0x4
+
+#define S_TIMEUNIT 0
+#define M_TIMEUNIT 0xffffU
+#define V_TIMEUNIT(x) ((x) << S_TIMEUNIT)
+#define G_TIMEUNIT(x) (((x) >> S_TIMEUNIT) & M_TIMEUNIT)
+
+#define A_MPS_PORT_TX_PAUSE_CTL 0x8
+
+#define S_REGSENDOFF 24
+#define M_REGSENDOFF 0xffU
+#define V_REGSENDOFF(x) ((x) << S_REGSENDOFF)
+#define G_REGSENDOFF(x) (((x) >> S_REGSENDOFF) & M_REGSENDOFF)
+
+#define S_REGSENDON 16
+#define M_REGSENDON 0xffU
+#define V_REGSENDON(x) ((x) << S_REGSENDON)
+#define G_REGSENDON(x) (((x) >> S_REGSENDON) & M_REGSENDON)
+
+#define S_SGESENDEN 8
+#define M_SGESENDEN 0xffU
+#define V_SGESENDEN(x) ((x) << S_SGESENDEN)
+#define G_SGESENDEN(x) (((x) >> S_SGESENDEN) & M_SGESENDEN)
+
+#define S_RXSENDEN 0
+#define M_RXSENDEN 0xffU
+#define V_RXSENDEN(x) ((x) << S_RXSENDEN)
+#define G_RXSENDEN(x) (((x) >> S_RXSENDEN) & M_RXSENDEN)
+
+#define A_MPS_PORT_TX_PAUSE_CTL2 0xc
+
+#define S_XOFFDISABLE 0
+#define V_XOFFDISABLE(x) ((x) << S_XOFFDISABLE)
+#define F_XOFFDISABLE V_XOFFDISABLE(1U)
+
+#define A_MPS_PORT_RX_PAUSE_CTL 0x10
+
+#define S_REGHALTON 8
+#define M_REGHALTON 0xffU
+#define V_REGHALTON(x) ((x) << S_REGHALTON)
+#define G_REGHALTON(x) (((x) >> S_REGHALTON) & M_REGHALTON)
+
+#define S_RXHALTEN 0
+#define M_RXHALTEN 0xffU
+#define V_RXHALTEN(x) ((x) << S_RXHALTEN)
+#define G_RXHALTEN(x) (((x) >> S_RXHALTEN) & M_RXHALTEN)
+
+#define A_MPS_PORT_TX_PAUSE_STATUS 0x14
+
+#define S_REGSENDING 16
+#define M_REGSENDING 0xffU
+#define V_REGSENDING(x) ((x) << S_REGSENDING)
+#define G_REGSENDING(x) (((x) >> S_REGSENDING) & M_REGSENDING)
+
+#define S_SGESENDING 8
+#define M_SGESENDING 0xffU
+#define V_SGESENDING(x) ((x) << S_SGESENDING)
+#define G_SGESENDING(x) (((x) >> S_SGESENDING) & M_SGESENDING)
+
+#define S_RXSENDING 0
+#define M_RXSENDING 0xffU
+#define V_RXSENDING(x) ((x) << S_RXSENDING)
+#define G_RXSENDING(x) (((x) >> S_RXSENDING) & M_RXSENDING)
+
+#define A_MPS_PORT_RX_PAUSE_STATUS 0x18
+
+#define S_REGHALTED 8
+#define M_REGHALTED 0xffU
+#define V_REGHALTED(x) ((x) << S_REGHALTED)
+#define G_REGHALTED(x) (((x) >> S_REGHALTED) & M_REGHALTED)
+
+#define S_RXHALTED 0
+#define M_RXHALTED 0xffU
+#define V_RXHALTED(x) ((x) << S_RXHALTED)
+#define G_RXHALTED(x) (((x) >> S_RXHALTED) & M_RXHALTED)
+
+#define A_MPS_PORT_TX_PAUSE_DEST_L 0x1c
+#define A_MPS_PORT_TX_PAUSE_DEST_H 0x20
+
+#define S_ADDR 0
+#define M_ADDR 0xffffU
+#define V_ADDR(x) ((x) << S_ADDR)
+#define G_ADDR(x) (((x) >> S_ADDR) & M_ADDR)
+
+#define A_MPS_PORT_TX_PAUSE_SOURCE_L 0x24
+#define A_MPS_PORT_TX_PAUSE_SOURCE_H 0x28
+#define A_MPS_PORT_PRTY_BUFFER_GROUP_MAP 0x2c
+
+#define S_PRTY7 14
+#define M_PRTY7 0x3U
+#define V_PRTY7(x) ((x) << S_PRTY7)
+#define G_PRTY7(x) (((x) >> S_PRTY7) & M_PRTY7)
+
+#define S_PRTY6 12
+#define M_PRTY6 0x3U
+#define V_PRTY6(x) ((x) << S_PRTY6)
+#define G_PRTY6(x) (((x) >> S_PRTY6) & M_PRTY6)
+
+#define S_PRTY5 10
+#define M_PRTY5 0x3U
+#define V_PRTY5(x) ((x) << S_PRTY5)
+#define G_PRTY5(x) (((x) >> S_PRTY5) & M_PRTY5)
+
+#define S_PRTY4 8
+#define M_PRTY4 0x3U
+#define V_PRTY4(x) ((x) << S_PRTY4)
+#define G_PRTY4(x) (((x) >> S_PRTY4) & M_PRTY4)
+
+#define S_PRTY3 6
+#define M_PRTY3 0x3U
+#define V_PRTY3(x) ((x) << S_PRTY3)
+#define G_PRTY3(x) (((x) >> S_PRTY3) & M_PRTY3)
+
+#define S_PRTY2 4
+#define M_PRTY2 0x3U
+#define V_PRTY2(x) ((x) << S_PRTY2)
+#define G_PRTY2(x) (((x) >> S_PRTY2) & M_PRTY2)
+
+#define S_PRTY1 2
+#define M_PRTY1 0x3U
+#define V_PRTY1(x) ((x) << S_PRTY1)
+#define G_PRTY1(x) (((x) >> S_PRTY1) & M_PRTY1)
+
+#define S_PRTY0 0
+#define M_PRTY0 0x3U
+#define V_PRTY0(x) ((x) << S_PRTY0)
+#define G_PRTY0(x) (((x) >> S_PRTY0) & M_PRTY0)
+
+#define A_MPS_VF_STAT_TX_VF_BCAST_BYTES_L 0x80
+#define A_MPS_VF_STAT_TX_VF_BCAST_BYTES_H 0x84
+#define A_MPS_VF_STAT_TX_VF_BCAST_FRAMES_L 0x88
+#define A_MPS_VF_STAT_TX_VF_BCAST_FRAMES_H 0x8c
+#define A_MPS_VF_STAT_TX_VF_MCAST_BYTES_L 0x90
+#define A_MPS_VF_STAT_TX_VF_MCAST_BYTES_H 0x94
+#define A_MPS_VF_STAT_TX_VF_MCAST_FRAMES_L 0x98
+#define A_MPS_VF_STAT_TX_VF_MCAST_FRAMES_H 0x9c
+#define A_MPS_VF_STAT_TX_VF_UCAST_BYTES_L 0xa0
+#define A_MPS_VF_STAT_TX_VF_UCAST_BYTES_H 0xa4
+#define A_MPS_VF_STAT_TX_VF_UCAST_FRAMES_L 0xa8
+#define A_MPS_VF_STAT_TX_VF_UCAST_FRAMES_H 0xac
+#define A_MPS_VF_STAT_TX_VF_DROP_FRAMES_L 0xb0
+#define A_MPS_VF_STAT_TX_VF_DROP_FRAMES_H 0xb4
+#define A_MPS_VF_STAT_TX_VF_OFFLOAD_BYTES_L 0xb8
+#define A_MPS_VF_STAT_TX_VF_OFFLOAD_BYTES_H 0xbc
+#define A_MPS_VF_STAT_TX_VF_OFFLOAD_FRAMES_L 0xc0
+#define A_MPS_VF_STAT_TX_VF_OFFLOAD_FRAMES_H 0xc4
+#define A_MPS_VF_STAT_RX_VF_BCAST_BYTES_L 0xc8
+#define A_MPS_VF_STAT_RX_VF_BCAST_BYTES_H 0xcc
+#define A_MPS_VF_STAT_RX_VF_BCAST_FRAMES_L 0xd0
+#define A_MPS_VF_STAT_RX_VF_BCAST_FRAMES_H 0xd4
+#define A_MPS_VF_STAT_RX_VF_MCAST_BYTES_L 0xd8
+#define A_MPS_VF_STAT_RX_VF_MCAST_BYTES_H 0xdc
+#define A_MPS_VF_STAT_RX_VF_MCAST_FRAMES_L 0xe0
+#define A_MPS_VF_STAT_RX_VF_MCAST_FRAMES_H 0xe4
+#define A_MPS_VF_STAT_RX_VF_UCAST_BYTES_L 0xe8
+#define A_MPS_VF_STAT_RX_VF_UCAST_BYTES_H 0xec
+#define A_MPS_VF_STAT_RX_VF_UCAST_FRAMES_L 0xf0
+#define A_MPS_VF_STAT_RX_VF_UCAST_FRAMES_H 0xf4
+#define A_MPS_VF_STAT_RX_VF_ERR_FRAMES_L 0xf8
+#define A_MPS_VF_STAT_RX_VF_ERR_FRAMES_H 0xfc
+#define A_MPS_PORT_RX_CTL 0x100
+
+#define S_NO_RPLCT_M 20
+#define V_NO_RPLCT_M(x) ((x) << S_NO_RPLCT_M)
+#define F_NO_RPLCT_M V_NO_RPLCT_M(1U)
+
+#define S_RPLCT_SEL_L 18
+#define M_RPLCT_SEL_L 0x3U
+#define V_RPLCT_SEL_L(x) ((x) << S_RPLCT_SEL_L)
+#define G_RPLCT_SEL_L(x) (((x) >> S_RPLCT_SEL_L) & M_RPLCT_SEL_L)
+
+#define S_FLTR_VLAN_SEL 17
+#define V_FLTR_VLAN_SEL(x) ((x) << S_FLTR_VLAN_SEL)
+#define F_FLTR_VLAN_SEL V_FLTR_VLAN_SEL(1U)
+
+#define S_PRIO_VLAN_SEL 16
+#define V_PRIO_VLAN_SEL(x) ((x) << S_PRIO_VLAN_SEL)
+#define F_PRIO_VLAN_SEL V_PRIO_VLAN_SEL(1U)
+
+#define S_CHK_8023_LEN_M 15
+#define V_CHK_8023_LEN_M(x) ((x) << S_CHK_8023_LEN_M)
+#define F_CHK_8023_LEN_M V_CHK_8023_LEN_M(1U)
+
+#define S_CHK_8023_LEN_L 14
+#define V_CHK_8023_LEN_L(x) ((x) << S_CHK_8023_LEN_L)
+#define F_CHK_8023_LEN_L V_CHK_8023_LEN_L(1U)
+
+#define S_NIV_DROP 13
+#define V_NIV_DROP(x) ((x) << S_NIV_DROP)
+#define F_NIV_DROP V_NIV_DROP(1U)
+
+#define S_NOV_DROP 12
+#define V_NOV_DROP(x) ((x) << S_NOV_DROP)
+#define F_NOV_DROP V_NOV_DROP(1U)
+
+#define S_CLS_PRT 11
+#define V_CLS_PRT(x) ((x) << S_CLS_PRT)
+#define F_CLS_PRT V_CLS_PRT(1U)
+
+#define S_RX_QFC_EN 10
+#define V_RX_QFC_EN(x) ((x) << S_RX_QFC_EN)
+#define F_RX_QFC_EN V_RX_QFC_EN(1U)
+
+#define S_QFC_FWD_UP 9
+#define V_QFC_FWD_UP(x) ((x) << S_QFC_FWD_UP)
+#define F_QFC_FWD_UP V_QFC_FWD_UP(1U)
+
+#define S_PPP_FWD_UP 8
+#define V_PPP_FWD_UP(x) ((x) << S_PPP_FWD_UP)
+#define F_PPP_FWD_UP V_PPP_FWD_UP(1U)
+
+#define S_PAUSE_FWD_UP 7
+#define V_PAUSE_FWD_UP(x) ((x) << S_PAUSE_FWD_UP)
+#define F_PAUSE_FWD_UP V_PAUSE_FWD_UP(1U)
+
+#define S_LPBK_BP 6
+#define V_LPBK_BP(x) ((x) << S_LPBK_BP)
+#define F_LPBK_BP V_LPBK_BP(1U)
+
+#define S_PASS_NO_MATCH 5
+#define V_PASS_NO_MATCH(x) ((x) << S_PASS_NO_MATCH)
+#define F_PASS_NO_MATCH V_PASS_NO_MATCH(1U)
+
+#define S_IVLAN_EN 4
+#define V_IVLAN_EN(x) ((x) << S_IVLAN_EN)
+#define F_IVLAN_EN V_IVLAN_EN(1U)
+
+#define S_OVLAN_EN3 3
+#define V_OVLAN_EN3(x) ((x) << S_OVLAN_EN3)
+#define F_OVLAN_EN3 V_OVLAN_EN3(1U)
+
+#define S_OVLAN_EN2 2
+#define V_OVLAN_EN2(x) ((x) << S_OVLAN_EN2)
+#define F_OVLAN_EN2 V_OVLAN_EN2(1U)
+
+#define S_OVLAN_EN1 1
+#define V_OVLAN_EN1(x) ((x) << S_OVLAN_EN1)
+#define F_OVLAN_EN1 V_OVLAN_EN1(1U)
+
+#define S_OVLAN_EN0 0
+#define V_OVLAN_EN0(x) ((x) << S_OVLAN_EN0)
+#define F_OVLAN_EN0 V_OVLAN_EN0(1U)
+
+#define A_MPS_PORT_RX_MTU 0x104
+#define A_MPS_PORT_RX_PF_MAP 0x108
+#define A_MPS_PORT_RX_VF_MAP0 0x10c
+#define A_MPS_PORT_RX_VF_MAP1 0x110
+#define A_MPS_PORT_RX_VF_MAP2 0x114
+#define A_MPS_PORT_RX_VF_MAP3 0x118
+#define A_MPS_PORT_RX_IVLAN 0x11c
+
+#define S_IVLAN_ETYPE 0
+#define M_IVLAN_ETYPE 0xffffU
+#define V_IVLAN_ETYPE(x) ((x) << S_IVLAN_ETYPE)
+#define G_IVLAN_ETYPE(x) (((x) >> S_IVLAN_ETYPE) & M_IVLAN_ETYPE)
+
+#define A_MPS_PORT_RX_OVLAN0 0x120
+
+#define S_OVLAN_MASK 16
+#define M_OVLAN_MASK 0xffffU
+#define V_OVLAN_MASK(x) ((x) << S_OVLAN_MASK)
+#define G_OVLAN_MASK(x) (((x) >> S_OVLAN_MASK) & M_OVLAN_MASK)
+
+#define S_OVLAN_ETYPE 0
+#define M_OVLAN_ETYPE 0xffffU
+#define V_OVLAN_ETYPE(x) ((x) << S_OVLAN_ETYPE)
+#define G_OVLAN_ETYPE(x) (((x) >> S_OVLAN_ETYPE) & M_OVLAN_ETYPE)
+
+#define A_MPS_PORT_RX_OVLAN1 0x124
+#define A_MPS_PORT_RX_OVLAN2 0x128
+#define A_MPS_PORT_RX_OVLAN3 0x12c
+#define A_MPS_PORT_RX_RSS_HASH 0x130
+#define A_MPS_PORT_RX_RSS_CONTROL 0x134
+
+#define S_RSS_CTRL 16
+#define M_RSS_CTRL 0xffU
+#define V_RSS_CTRL(x) ((x) << S_RSS_CTRL)
+#define G_RSS_CTRL(x) (((x) >> S_RSS_CTRL) & M_RSS_CTRL)
+
+#define S_QUE_NUM 0
+#define M_QUE_NUM 0xffffU
+#define V_QUE_NUM(x) ((x) << S_QUE_NUM)
+#define G_QUE_NUM(x) (((x) >> S_QUE_NUM) & M_QUE_NUM)
+
+#define A_MPS_PORT_RX_CTL1 0x138
+
+#define S_FIXED_PFVF_MAC 13
+#define V_FIXED_PFVF_MAC(x) ((x) << S_FIXED_PFVF_MAC)
+#define F_FIXED_PFVF_MAC V_FIXED_PFVF_MAC(1U)
+
+#define S_FIXED_PFVF_LPBK 12
+#define V_FIXED_PFVF_LPBK(x) ((x) << S_FIXED_PFVF_LPBK)
+#define F_FIXED_PFVF_LPBK V_FIXED_PFVF_LPBK(1U)
+
+#define S_FIXED_PFVF_LPBK_OV 11
+#define V_FIXED_PFVF_LPBK_OV(x) ((x) << S_FIXED_PFVF_LPBK_OV)
+#define F_FIXED_PFVF_LPBK_OV V_FIXED_PFVF_LPBK_OV(1U)
+
+#define S_FIXED_PF 8
+#define M_FIXED_PF 0x7U
+#define V_FIXED_PF(x) ((x) << S_FIXED_PF)
+#define G_FIXED_PF(x) (((x) >> S_FIXED_PF) & M_FIXED_PF)
+
+#define S_FIXED_VF_VLD 7
+#define V_FIXED_VF_VLD(x) ((x) << S_FIXED_VF_VLD)
+#define F_FIXED_VF_VLD V_FIXED_VF_VLD(1U)
+
+#define S_FIXED_VF 0
+#define M_FIXED_VF 0x7fU
+#define V_FIXED_VF(x) ((x) << S_FIXED_VF)
+#define G_FIXED_VF(x) (((x) >> S_FIXED_VF) & M_FIXED_VF)
+
+#define A_MPS_PORT_RX_SPARE 0x13c
+#define A_MPS_PORT_TX_MAC_RELOAD_CH0 0x190
+
+#define S_CREDIT 0
+#define M_CREDIT 0xffffU
+#define V_CREDIT(x) ((x) << S_CREDIT)
+#define G_CREDIT(x) (((x) >> S_CREDIT) & M_CREDIT)
+
+#define A_MPS_PORT_TX_MAC_RELOAD_CH1 0x194
+#define A_MPS_PORT_TX_MAC_RELOAD_CH2 0x198
+#define A_MPS_PORT_TX_MAC_RELOAD_CH3 0x19c
+#define A_MPS_PORT_TX_MAC_RELOAD_CH4 0x1a0
+#define A_MPS_PORT_TX_LPBK_RELOAD_CH0 0x1a8
+#define A_MPS_PORT_TX_LPBK_RELOAD_CH1 0x1ac
+#define A_MPS_PORT_TX_LPBK_RELOAD_CH2 0x1b0
+#define A_MPS_PORT_TX_LPBK_RELOAD_CH3 0x1b4
+#define A_MPS_PORT_TX_LPBK_RELOAD_CH4 0x1b8
+#define A_MPS_PORT_TX_FIFO_CTL 0x1c4
+
+#define S_FIFOTH 5
+#define M_FIFOTH 0x1ffU
+#define V_FIFOTH(x) ((x) << S_FIFOTH)
+#define G_FIFOTH(x) (((x) >> S_FIFOTH) & M_FIFOTH)
+
+#define S_FIFOEN 4
+#define V_FIFOEN(x) ((x) << S_FIFOEN)
+#define F_FIFOEN V_FIFOEN(1U)
+
+#define S_MAXPKTCNT 0
+#define M_MAXPKTCNT 0xfU
+#define V_MAXPKTCNT(x) ((x) << S_MAXPKTCNT)
+#define G_MAXPKTCNT(x) (((x) >> S_MAXPKTCNT) & M_MAXPKTCNT)
+
+#define A_MPS_PORT_FPGA_PAUSE_CTL 0x1c8
+#define A_MPS_PORT_CLS_HASH_SRAM 0x200
+
+#define S_VALID 20
+#define V_VALID(x) ((x) << S_VALID)
+#define F_VALID V_VALID(1U)
+
+#define S_HASHPORTMAP 16
+#define M_HASHPORTMAP 0xfU
+#define V_HASHPORTMAP(x) ((x) << S_HASHPORTMAP)
+#define G_HASHPORTMAP(x) (((x) >> S_HASHPORTMAP) & M_HASHPORTMAP)
+
+#define S_MULTILISTEN 15
+#define V_MULTILISTEN(x) ((x) << S_MULTILISTEN)
+#define F_MULTILISTEN V_MULTILISTEN(1U)
+
+#define S_PRIORITY 12
+#define M_PRIORITY 0x7U
+#define V_PRIORITY(x) ((x) << S_PRIORITY)
+#define G_PRIORITY(x) (((x) >> S_PRIORITY) & M_PRIORITY)
+
+#define S_REPLICATE 11
+#define V_REPLICATE(x) ((x) << S_REPLICATE)
+#define F_REPLICATE V_REPLICATE(1U)
+
+#define S_PF 8
+#define M_PF 0x7U
+#define V_PF(x) ((x) << S_PF)
+#define G_PF(x) (((x) >> S_PF) & M_PF)
+
+#define S_VF_VALID 7
+#define V_VF_VALID(x) ((x) << S_VF_VALID)
+#define F_VF_VALID V_VF_VALID(1U)
+
+#define S_VF 0
+#define M_VF 0x7fU
+#define V_VF(x) ((x) << S_VF)
+#define G_VF(x) (((x) >> S_VF) & M_VF)
+
+#define A_MPS_PF_CTL 0x2c0
+
+#define S_TXEN 1
+#define V_TXEN(x) ((x) << S_TXEN)
+#define F_TXEN V_TXEN(1U)
+
+#define S_RXEN 0
+#define V_RXEN(x) ((x) << S_RXEN)
+#define F_RXEN V_RXEN(1U)
+
+#define A_MPS_PF_TX_QINQ_VLAN 0x2e0
+
+#define S_PROTOCOLID 16
+#define M_PROTOCOLID 0xffffU
+#define V_PROTOCOLID(x) ((x) << S_PROTOCOLID)
+#define G_PROTOCOLID(x) (((x) >> S_PROTOCOLID) & M_PROTOCOLID)
+
+#define S_VLAN_PRIO 13
+#define M_VLAN_PRIO 0x7U
+#define V_VLAN_PRIO(x) ((x) << S_VLAN_PRIO)
+#define G_VLAN_PRIO(x) (((x) >> S_VLAN_PRIO) & M_VLAN_PRIO)
+
+#define S_CFI 12
+#define V_CFI(x) ((x) << S_CFI)
+#define F_CFI V_CFI(1U)
+
+#define S_TAG 0
+#define M_TAG 0xfffU
+#define V_TAG(x) ((x) << S_TAG)
+#define G_TAG(x) (((x) >> S_TAG) & M_TAG)
+
+#define A_MPS_PF_STAT_TX_PF_BCAST_BYTES_L 0x300
+#define A_MPS_PF_STAT_TX_PF_BCAST_BYTES_H 0x304
+#define A_MPS_PORT_CLS_HASH_CTL 0x304
+
+#define S_UNICASTENABLE 31
+#define V_UNICASTENABLE(x) ((x) << S_UNICASTENABLE)
+#define F_UNICASTENABLE V_UNICASTENABLE(1U)
+
+#define A_MPS_PF_STAT_TX_PF_BCAST_FRAMES_L 0x308
+#define A_MPS_PORT_CLS_PROMISCUOUS_CTL 0x308
+
+#define S_PROMISCEN 31
+#define V_PROMISCEN(x) ((x) << S_PROMISCEN)
+#define F_PROMISCEN V_PROMISCEN(1U)
+
+#define A_MPS_PF_STAT_TX_PF_BCAST_FRAMES_H 0x30c
+#define A_MPS_PORT_CLS_BMC_MAC_ADDR_L 0x30c
+#define A_MPS_PF_STAT_TX_PF_MCAST_BYTES_L 0x310
+#define A_MPS_PORT_CLS_BMC_MAC_ADDR_H 0x310
+
+#define S_MATCHBOTH 17
+#define V_MATCHBOTH(x) ((x) << S_MATCHBOTH)
+#define F_MATCHBOTH V_MATCHBOTH(1U)
+
+#define S_BMC_VLD 16
+#define V_BMC_VLD(x) ((x) << S_BMC_VLD)
+#define F_BMC_VLD V_BMC_VLD(1U)
+
+#define A_MPS_PF_STAT_TX_PF_MCAST_BYTES_H 0x314
+#define A_MPS_PORT_CLS_BMC_VLAN 0x314
+
+#define S_BMC_VLAN_SEL 13
+#define V_BMC_VLAN_SEL(x) ((x) << S_BMC_VLAN_SEL)
+#define F_BMC_VLAN_SEL V_BMC_VLAN_SEL(1U)
+
+#define S_VLAN_VLD 12
+#define V_VLAN_VLD(x) ((x) << S_VLAN_VLD)
+#define F_VLAN_VLD V_VLAN_VLD(1U)
+
+#define A_MPS_PF_STAT_TX_PF_MCAST_FRAMES_L 0x318
+#define A_MPS_PORT_CLS_CTL 0x318
+
+#define S_PF_VLAN_SEL 0
+#define V_PF_VLAN_SEL(x) ((x) << S_PF_VLAN_SEL)
+#define F_PF_VLAN_SEL V_PF_VLAN_SEL(1U)
+
+#define A_MPS_PF_STAT_TX_PF_MCAST_FRAMES_H 0x31c
+#define A_MPS_PF_STAT_TX_PF_UCAST_BYTES_L 0x320
+#define A_MPS_PF_STAT_TX_PF_UCAST_BYTES_H 0x324
+#define A_MPS_PF_STAT_TX_PF_UCAST_FRAMES_L 0x328
+#define A_MPS_PF_STAT_TX_PF_UCAST_FRAMES_H 0x32c
+#define A_MPS_PF_STAT_TX_PF_OFFLOAD_BYTES_L 0x330
+#define A_MPS_PF_STAT_TX_PF_OFFLOAD_BYTES_H 0x334
+#define A_MPS_PF_STAT_TX_PF_OFFLOAD_FRAMES_L 0x338
+#define A_MPS_PF_STAT_TX_PF_OFFLOAD_FRAMES_H 0x33c
+#define A_MPS_PF_STAT_RX_PF_BYTES_L 0x340
+#define A_MPS_PF_STAT_RX_PF_BYTES_H 0x344
+#define A_MPS_PF_STAT_RX_PF_FRAMES_L 0x348
+#define A_MPS_PF_STAT_RX_PF_FRAMES_H 0x34c
+#define A_MPS_PF_STAT_RX_PF_BCAST_BYTES_L 0x350
+#define A_MPS_PF_STAT_RX_PF_BCAST_BYTES_H 0x354
+#define A_MPS_PF_STAT_RX_PF_BCAST_FRAMES_L 0x358
+#define A_MPS_PF_STAT_RX_PF_BCAST_FRAMES_H 0x35c
+#define A_MPS_PF_STAT_RX_PF_MCAST_BYTES_L 0x360
+#define A_MPS_PF_STAT_RX_PF_MCAST_BYTES_H 0x364
+#define A_MPS_PF_STAT_RX_PF_MCAST_FRAMES_L 0x368
+#define A_MPS_PF_STAT_RX_PF_MCAST_FRAMES_H 0x36c
+#define A_MPS_PF_STAT_RX_PF_UCAST_BYTES_L 0x370
+#define A_MPS_PF_STAT_RX_PF_UCAST_BYTES_H 0x374
+#define A_MPS_PF_STAT_RX_PF_UCAST_FRAMES_L 0x378
+#define A_MPS_PF_STAT_RX_PF_UCAST_FRAMES_H 0x37c
+#define A_MPS_PF_STAT_RX_PF_ERR_FRAMES_L 0x380
+#define A_MPS_PF_STAT_RX_PF_ERR_FRAMES_H 0x384
+#define A_MPS_PORT_STAT_TX_PORT_BYTES_L 0x400
+#define A_MPS_PORT_STAT_TX_PORT_BYTES_H 0x404
+#define A_MPS_PORT_STAT_TX_PORT_FRAMES_L 0x408
+#define A_MPS_PORT_STAT_TX_PORT_FRAMES_H 0x40c
+#define A_MPS_PORT_STAT_TX_PORT_BCAST_L 0x410
+#define A_MPS_PORT_STAT_TX_PORT_BCAST_H 0x414
+#define A_MPS_PORT_STAT_TX_PORT_MCAST_L 0x418
+#define A_MPS_PORT_STAT_TX_PORT_MCAST_H 0x41c
+#define A_MPS_PORT_STAT_TX_PORT_UCAST_L 0x420
+#define A_MPS_PORT_STAT_TX_PORT_UCAST_H 0x424
+#define A_MPS_PORT_STAT_TX_PORT_ERROR_L 0x428
+#define A_MPS_PORT_STAT_TX_PORT_ERROR_H 0x42c
+#define A_MPS_PORT_STAT_TX_PORT_64B_L 0x430
+#define A_MPS_PORT_STAT_TX_PORT_64B_H 0x434
+#define A_MPS_PORT_STAT_TX_PORT_65B_127B_L 0x438
+#define A_MPS_PORT_STAT_TX_PORT_65B_127B_H 0x43c
+#define A_MPS_PORT_STAT_TX_PORT_128B_255B_L 0x440
+#define A_MPS_PORT_STAT_TX_PORT_128B_255B_H 0x444
+#define A_MPS_PORT_STAT_TX_PORT_256B_511B_L 0x448
+#define A_MPS_PORT_STAT_TX_PORT_256B_511B_H 0x44c
+#define A_MPS_PORT_STAT_TX_PORT_512B_1023B_L 0x450
+#define A_MPS_PORT_STAT_TX_PORT_512B_1023B_H 0x454
+#define A_MPS_PORT_STAT_TX_PORT_1024B_1518B_L 0x458
+#define A_MPS_PORT_STAT_TX_PORT_1024B_1518B_H 0x45c
+#define A_MPS_PORT_STAT_TX_PORT_1519B_MAX_L 0x460
+#define A_MPS_PORT_STAT_TX_PORT_1519B_MAX_H 0x464
+#define A_MPS_PORT_STAT_TX_PORT_DROP_L 0x468
+#define A_MPS_PORT_STAT_TX_PORT_DROP_H 0x46c
+#define A_MPS_PORT_STAT_TX_PORT_PAUSE_L 0x470
+#define A_MPS_PORT_STAT_TX_PORT_PAUSE_H 0x474
+#define A_MPS_PORT_STAT_TX_PORT_PPP0_L 0x478
+#define A_MPS_PORT_STAT_TX_PORT_PPP0_H 0x47c
+#define A_MPS_PORT_STAT_TX_PORT_PPP1_L 0x480
+#define A_MPS_PORT_STAT_TX_PORT_PPP1_H 0x484
+#define A_MPS_PORT_STAT_TX_PORT_PPP2_L 0x488
+#define A_MPS_PORT_STAT_TX_PORT_PPP2_H 0x48c
+#define A_MPS_PORT_STAT_TX_PORT_PPP3_L 0x490
+#define A_MPS_PORT_STAT_TX_PORT_PPP3_H 0x494
+#define A_MPS_PORT_STAT_TX_PORT_PPP4_L 0x498
+#define A_MPS_PORT_STAT_TX_PORT_PPP4_H 0x49c
+#define A_MPS_PORT_STAT_TX_PORT_PPP5_L 0x4a0
+#define A_MPS_PORT_STAT_TX_PORT_PPP5_H 0x4a4
+#define A_MPS_PORT_STAT_TX_PORT_PPP6_L 0x4a8
+#define A_MPS_PORT_STAT_TX_PORT_PPP6_H 0x4ac
+#define A_MPS_PORT_STAT_TX_PORT_PPP7_L 0x4b0
+#define A_MPS_PORT_STAT_TX_PORT_PPP7_H 0x4b4
+#define A_MPS_PORT_STAT_LB_PORT_BYTES_L 0x4c0
+#define A_MPS_PORT_STAT_LB_PORT_BYTES_H 0x4c4
+#define A_MPS_PORT_STAT_LB_PORT_FRAMES_L 0x4c8
+#define A_MPS_PORT_STAT_LB_PORT_FRAMES_H 0x4cc
+#define A_MPS_PORT_STAT_LB_PORT_BCAST_L 0x4d0
+#define A_MPS_PORT_STAT_LB_PORT_BCAST_H 0x4d4
+#define A_MPS_PORT_STAT_LB_PORT_MCAST_L 0x4d8
+#define A_MPS_PORT_STAT_LB_PORT_MCAST_H 0x4dc
+#define A_MPS_PORT_STAT_LB_PORT_UCAST_L 0x4e0
+#define A_MPS_PORT_STAT_LB_PORT_UCAST_H 0x4e4
+#define A_MPS_PORT_STAT_LB_PORT_ERROR_L 0x4e8
+#define A_MPS_PORT_STAT_LB_PORT_ERROR_H 0x4ec
+#define A_MPS_PORT_STAT_LB_PORT_64B_L 0x4f0
+#define A_MPS_PORT_STAT_LB_PORT_64B_H 0x4f4
+#define A_MPS_PORT_STAT_LB_PORT_65B_127B_L 0x4f8
+#define A_MPS_PORT_STAT_LB_PORT_65B_127B_H 0x4fc
+#define A_MPS_PORT_STAT_LB_PORT_128B_255B_L 0x500
+#define A_MPS_PORT_STAT_LB_PORT_128B_255B_H 0x504
+#define A_MPS_PORT_STAT_LB_PORT_256B_511B_L 0x508
+#define A_MPS_PORT_STAT_LB_PORT_256B_511B_H 0x50c
+#define A_MPS_PORT_STAT_LB_PORT_512B_1023B_L 0x510
+#define A_MPS_PORT_STAT_LB_PORT_512B_1023B_H 0x514
+#define A_MPS_PORT_STAT_LB_PORT_1024B_1518B_L 0x518
+#define A_MPS_PORT_STAT_LB_PORT_1024B_1518B_H 0x51c
+#define A_MPS_PORT_STAT_LB_PORT_1519B_MAX_L 0x520
+#define A_MPS_PORT_STAT_LB_PORT_1519B_MAX_H 0x524
+#define A_MPS_PORT_STAT_LB_PORT_DROP_FRAMES 0x528
+#define A_MPS_PORT_STAT_RX_PORT_BYTES_L 0x540
+#define A_MPS_PORT_STAT_RX_PORT_BYTES_H 0x544
+#define A_MPS_PORT_STAT_RX_PORT_FRAMES_L 0x548
+#define A_MPS_PORT_STAT_RX_PORT_FRAMES_H 0x54c
+#define A_MPS_PORT_STAT_RX_PORT_BCAST_L 0x550
+#define A_MPS_PORT_STAT_RX_PORT_BCAST_H 0x554
+#define A_MPS_PORT_STAT_RX_PORT_MCAST_L 0x558
+#define A_MPS_PORT_STAT_RX_PORT_MCAST_H 0x55c
+#define A_MPS_PORT_STAT_RX_PORT_UCAST_L 0x560
+#define A_MPS_PORT_STAT_RX_PORT_UCAST_H 0x564
+#define A_MPS_PORT_STAT_RX_PORT_MTU_ERROR_L 0x568
+#define A_MPS_PORT_STAT_RX_PORT_MTU_ERROR_H 0x56c
+#define A_MPS_PORT_STAT_RX_PORT_MTU_CRC_ERROR_L 0x570
+#define A_MPS_PORT_STAT_RX_PORT_MTU_CRC_ERROR_H 0x574
+#define A_MPS_PORT_STAT_RX_PORT_CRC_ERROR_L 0x578
+#define A_MPS_PORT_STAT_RX_PORT_CRC_ERROR_H 0x57c
+#define A_MPS_PORT_STAT_RX_PORT_LEN_ERROR_L 0x580
+#define A_MPS_PORT_STAT_RX_PORT_LEN_ERROR_H 0x584
+#define A_MPS_PORT_STAT_RX_PORT_SYM_ERROR_L 0x588
+#define A_MPS_PORT_STAT_RX_PORT_SYM_ERROR_H 0x58c
+#define A_MPS_PORT_STAT_RX_PORT_64B_L 0x590
+#define A_MPS_PORT_STAT_RX_PORT_64B_H 0x594
+#define A_MPS_PORT_STAT_RX_PORT_65B_127B_L 0x598
+#define A_MPS_PORT_STAT_RX_PORT_65B_127B_H 0x59c
+#define A_MPS_PORT_STAT_RX_PORT_128B_255B_L 0x5a0
+#define A_MPS_PORT_STAT_RX_PORT_128B_255B_H 0x5a4
+#define A_MPS_PORT_STAT_RX_PORT_256B_511B_L 0x5a8
+#define A_MPS_PORT_STAT_RX_PORT_256B_511B_H 0x5ac
+#define A_MPS_PORT_STAT_RX_PORT_512B_1023B_L 0x5b0
+#define A_MPS_PORT_STAT_RX_PORT_512B_1023B_H 0x5b4
+#define A_MPS_PORT_STAT_RX_PORT_1024B_1518B_L 0x5b8
+#define A_MPS_PORT_STAT_RX_PORT_1024B_1518B_H 0x5bc
+#define A_MPS_PORT_STAT_RX_PORT_1519B_MAX_L 0x5c0
+#define A_MPS_PORT_STAT_RX_PORT_1519B_MAX_H 0x5c4
+#define A_MPS_PORT_STAT_RX_PORT_PAUSE_L 0x5c8
+#define A_MPS_PORT_STAT_RX_PORT_PAUSE_H 0x5cc
+#define A_MPS_PORT_STAT_RX_PORT_PPP0_L 0x5d0
+#define A_MPS_PORT_STAT_RX_PORT_PPP0_H 0x5d4
+#define A_MPS_PORT_STAT_RX_PORT_PPP1_L 0x5d8
+#define A_MPS_PORT_STAT_RX_PORT_PPP1_H 0x5dc
+#define A_MPS_PORT_STAT_RX_PORT_PPP2_L 0x5e0
+#define A_MPS_PORT_STAT_RX_PORT_PPP2_H 0x5e4
+#define A_MPS_PORT_STAT_RX_PORT_PPP3_L 0x5e8
+#define A_MPS_PORT_STAT_RX_PORT_PPP3_H 0x5ec
+#define A_MPS_PORT_STAT_RX_PORT_PPP4_L 0x5f0
+#define A_MPS_PORT_STAT_RX_PORT_PPP4_H 0x5f4
+#define A_MPS_PORT_STAT_RX_PORT_PPP5_L 0x5f8
+#define A_MPS_PORT_STAT_RX_PORT_PPP5_H 0x5fc
+#define A_MPS_PORT_STAT_RX_PORT_PPP6_L 0x600
+#define A_MPS_PORT_STAT_RX_PORT_PPP6_H 0x604
+#define A_MPS_PORT_STAT_RX_PORT_PPP7_L 0x608
+#define A_MPS_PORT_STAT_RX_PORT_PPP7_H 0x60c
+#define A_MPS_PORT_STAT_RX_PORT_LESS_64B_L 0x610
+#define A_MPS_PORT_STAT_RX_PORT_LESS_64B_H 0x614
+#define A_MPS_CMN_CTL 0x9000
+
+#define S_DETECT8023 3
+#define V_DETECT8023(x) ((x) << S_DETECT8023)
+#define F_DETECT8023 V_DETECT8023(1U)
+
+#define S_VFDIRECTACCESS 2
+#define V_VFDIRECTACCESS(x) ((x) << S_VFDIRECTACCESS)
+#define F_VFDIRECTACCESS V_VFDIRECTACCESS(1U)
+
+#define S_NUMPORTS 0
+#define M_NUMPORTS 0x3U
+#define V_NUMPORTS(x) ((x) << S_NUMPORTS)
+#define G_NUMPORTS(x) (((x) >> S_NUMPORTS) & M_NUMPORTS)
+
+#define A_MPS_INT_ENABLE 0x9004
+
+#define S_STATINTENB 5
+#define V_STATINTENB(x) ((x) << S_STATINTENB)
+#define F_STATINTENB V_STATINTENB(1U)
+
+#define S_TXINTENB 4
+#define V_TXINTENB(x) ((x) << S_TXINTENB)
+#define F_TXINTENB V_TXINTENB(1U)
+
+#define S_RXINTENB 3
+#define V_RXINTENB(x) ((x) << S_RXINTENB)
+#define F_RXINTENB V_RXINTENB(1U)
+
+#define S_TRCINTENB 2
+#define V_TRCINTENB(x) ((x) << S_TRCINTENB)
+#define F_TRCINTENB V_TRCINTENB(1U)
+
+#define S_CLSINTENB 1
+#define V_CLSINTENB(x) ((x) << S_CLSINTENB)
+#define F_CLSINTENB V_CLSINTENB(1U)
+
+#define S_PLINTENB 0
+#define V_PLINTENB(x) ((x) << S_PLINTENB)
+#define F_PLINTENB V_PLINTENB(1U)
+
+#define A_MPS_INT_CAUSE 0x9008
+
+#define S_STATINT 5
+#define V_STATINT(x) ((x) << S_STATINT)
+#define F_STATINT V_STATINT(1U)
+
+#define S_TXINT 4
+#define V_TXINT(x) ((x) << S_TXINT)
+#define F_TXINT V_TXINT(1U)
+
+#define S_RXINT 3
+#define V_RXINT(x) ((x) << S_RXINT)
+#define F_RXINT V_RXINT(1U)
+
+#define S_TRCINT 2
+#define V_TRCINT(x) ((x) << S_TRCINT)
+#define F_TRCINT V_TRCINT(1U)
+
+#define S_CLSINT 1
+#define V_CLSINT(x) ((x) << S_CLSINT)
+#define F_CLSINT V_CLSINT(1U)
+
+#define S_PLINT 0
+#define V_PLINT(x) ((x) << S_PLINT)
+#define F_PLINT V_PLINT(1U)
+
+#define A_MPS_VF_TX_CTL_31_0 0x9010
+#define A_MPS_VF_TX_CTL_63_32 0x9014
+#define A_MPS_VF_TX_CTL_95_64 0x9018
+#define A_MPS_VF_TX_CTL_127_96 0x901c
+#define A_MPS_VF_RX_CTL_31_0 0x9020
+#define A_MPS_VF_RX_CTL_63_32 0x9024
+#define A_MPS_VF_RX_CTL_95_64 0x9028
+#define A_MPS_VF_RX_CTL_127_96 0x902c
+#define A_MPS_TX_PAUSE_DURATION_BUF_GRP0 0x9030
+
+#define S_VALUE 0
+#define M_VALUE 0xffffU
+#define V_VALUE(x) ((x) << S_VALUE)
+#define G_VALUE(x) (((x) >> S_VALUE) & M_VALUE)
+
+#define A_MPS_TX_PAUSE_DURATION_BUF_GRP1 0x9034
+#define A_MPS_TX_PAUSE_DURATION_BUF_GRP2 0x9038
+#define A_MPS_TX_PAUSE_DURATION_BUF_GRP3 0x903c
+#define A_MPS_TX_PAUSE_RETRANS_BUF_GRP0 0x9040
+#define A_MPS_TX_PAUSE_RETRANS_BUF_GRP1 0x9044
+#define A_MPS_TX_PAUSE_RETRANS_BUF_GRP2 0x9048
+#define A_MPS_TX_PAUSE_RETRANS_BUF_GRP3 0x904c
+#define A_MPS_TP_CSIDE_MUX_CTL_P0 0x9050
+
+#define S_WEIGHT 0
+#define M_WEIGHT 0xfffU
+#define V_WEIGHT(x) ((x) << S_WEIGHT)
+#define G_WEIGHT(x) (((x) >> S_WEIGHT) & M_WEIGHT)
+
+#define A_MPS_TP_CSIDE_MUX_CTL_P1 0x9054
+#define A_MPS_WOL_CTL_MODE 0x9058
+
+#define S_WOL_MODE 0
+#define V_WOL_MODE(x) ((x) << S_WOL_MODE)
+#define F_WOL_MODE V_WOL_MODE(1U)
+
+#define A_MPS_FPGA_DEBUG 0x9060
+
+#define S_LPBK_EN 8
+#define V_LPBK_EN(x) ((x) << S_LPBK_EN)
+#define F_LPBK_EN V_LPBK_EN(1U)
+
+#define S_CH_MAP3 6
+#define M_CH_MAP3 0x3U
+#define V_CH_MAP3(x) ((x) << S_CH_MAP3)
+#define G_CH_MAP3(x) (((x) >> S_CH_MAP3) & M_CH_MAP3)
+
+#define S_CH_MAP2 4
+#define M_CH_MAP2 0x3U
+#define V_CH_MAP2(x) ((x) << S_CH_MAP2)
+#define G_CH_MAP2(x) (((x) >> S_CH_MAP2) & M_CH_MAP2)
+
+#define S_CH_MAP1 2
+#define M_CH_MAP1 0x3U
+#define V_CH_MAP1(x) ((x) << S_CH_MAP1)
+#define G_CH_MAP1(x) (((x) >> S_CH_MAP1) & M_CH_MAP1)
+
+#define S_CH_MAP0 0
+#define M_CH_MAP0 0x3U
+#define V_CH_MAP0(x) ((x) << S_CH_MAP0)
+#define G_CH_MAP0(x) (((x) >> S_CH_MAP0) & M_CH_MAP0)
+
+#define A_MPS_DEBUG_CTL 0x9068
+
+#define S_DBGMODECTL_H 11
+#define V_DBGMODECTL_H(x) ((x) << S_DBGMODECTL_H)
+#define F_DBGMODECTL_H V_DBGMODECTL_H(1U)
+
+#define S_DBGSEL_H 6
+#define M_DBGSEL_H 0x1fU
+#define V_DBGSEL_H(x) ((x) << S_DBGSEL_H)
+#define G_DBGSEL_H(x) (((x) >> S_DBGSEL_H) & M_DBGSEL_H)
+
+#define S_DBGMODECTL_L 5
+#define V_DBGMODECTL_L(x) ((x) << S_DBGMODECTL_L)
+#define F_DBGMODECTL_L V_DBGMODECTL_L(1U)
+
+#define S_DBGSEL_L 0
+#define M_DBGSEL_L 0x1fU
+#define V_DBGSEL_L(x) ((x) << S_DBGSEL_L)
+#define G_DBGSEL_L(x) (((x) >> S_DBGSEL_L) & M_DBGSEL_L)
+
+#define A_MPS_DEBUG_DATA_REG_L 0x906c
+#define A_MPS_DEBUG_DATA_REG_H 0x9070
+#define A_MPS_TOP_SPARE 0x9074
+
+#define S_TOPSPARE 12
+#define M_TOPSPARE 0xfffffU
+#define V_TOPSPARE(x) ((x) << S_TOPSPARE)
+#define G_TOPSPARE(x) (((x) >> S_TOPSPARE) & M_TOPSPARE)
+
+#define S_CHIKN_14463 8
+#define M_CHIKN_14463 0xfU
+#define V_CHIKN_14463(x) ((x) << S_CHIKN_14463)
+#define G_CHIKN_14463(x) (((x) >> S_CHIKN_14463) & M_CHIKN_14463)
+
+#define S_OVLANSELLPBK3 7
+#define V_OVLANSELLPBK3(x) ((x) << S_OVLANSELLPBK3)
+#define F_OVLANSELLPBK3 V_OVLANSELLPBK3(1U)
+
+#define S_OVLANSELLPBK2 6
+#define V_OVLANSELLPBK2(x) ((x) << S_OVLANSELLPBK2)
+#define F_OVLANSELLPBK2 V_OVLANSELLPBK2(1U)
+
+#define S_OVLANSELLPBK1 5
+#define V_OVLANSELLPBK1(x) ((x) << S_OVLANSELLPBK1)
+#define F_OVLANSELLPBK1 V_OVLANSELLPBK1(1U)
+
+#define S_OVLANSELLPBK0 4
+#define V_OVLANSELLPBK0(x) ((x) << S_OVLANSELLPBK0)
+#define F_OVLANSELLPBK0 V_OVLANSELLPBK0(1U)
+
+#define S_OVLANSELMAC3 3
+#define V_OVLANSELMAC3(x) ((x) << S_OVLANSELMAC3)
+#define F_OVLANSELMAC3 V_OVLANSELMAC3(1U)
+
+#define S_OVLANSELMAC2 2
+#define V_OVLANSELMAC2(x) ((x) << S_OVLANSELMAC2)
+#define F_OVLANSELMAC2 V_OVLANSELMAC2(1U)
+
+#define S_OVLANSELMAC1 1
+#define V_OVLANSELMAC1(x) ((x) << S_OVLANSELMAC1)
+#define F_OVLANSELMAC1 V_OVLANSELMAC1(1U)
+
+#define S_OVLANSELMAC0 0
+#define V_OVLANSELMAC0(x) ((x) << S_OVLANSELMAC0)
+#define F_OVLANSELMAC0 V_OVLANSELMAC0(1U)
+
+#define A_MPS_BUILD_REVISION 0x90fc
+#define A_MPS_TX_PRTY_SEL 0x9400
+
+#define S_CH4_PRTY 20
+#define M_CH4_PRTY 0x7U
+#define V_CH4_PRTY(x) ((x) << S_CH4_PRTY)
+#define G_CH4_PRTY(x) (((x) >> S_CH4_PRTY) & M_CH4_PRTY)
+
+#define S_CH3_PRTY 16
+#define M_CH3_PRTY 0x7U
+#define V_CH3_PRTY(x) ((x) << S_CH3_PRTY)
+#define G_CH3_PRTY(x) (((x) >> S_CH3_PRTY) & M_CH3_PRTY)
+
+#define S_CH2_PRTY 12
+#define M_CH2_PRTY 0x7U
+#define V_CH2_PRTY(x) ((x) << S_CH2_PRTY)
+#define G_CH2_PRTY(x) (((x) >> S_CH2_PRTY) & M_CH2_PRTY)
+
+#define S_CH1_PRTY 8
+#define M_CH1_PRTY 0x7U
+#define V_CH1_PRTY(x) ((x) << S_CH1_PRTY)
+#define G_CH1_PRTY(x) (((x) >> S_CH1_PRTY) & M_CH1_PRTY)
+
+#define S_CH0_PRTY 4
+#define M_CH0_PRTY 0x7U
+#define V_CH0_PRTY(x) ((x) << S_CH0_PRTY)
+#define G_CH0_PRTY(x) (((x) >> S_CH0_PRTY) & M_CH0_PRTY)
+
+#define S_TP_SOURCE 2
+#define M_TP_SOURCE 0x3U
+#define V_TP_SOURCE(x) ((x) << S_TP_SOURCE)
+#define G_TP_SOURCE(x) (((x) >> S_TP_SOURCE) & M_TP_SOURCE)
+
+#define S_NCSI_SOURCE 0
+#define M_NCSI_SOURCE 0x3U
+#define V_NCSI_SOURCE(x) ((x) << S_NCSI_SOURCE)
+#define G_NCSI_SOURCE(x) (((x) >> S_NCSI_SOURCE) & M_NCSI_SOURCE)
+
+#define A_MPS_TX_INT_ENABLE 0x9404
+
+#define S_PORTERR 16
+#define V_PORTERR(x) ((x) << S_PORTERR)
+#define F_PORTERR V_PORTERR(1U)
+
+#define S_FRMERR 15
+#define V_FRMERR(x) ((x) << S_FRMERR)
+#define F_FRMERR V_FRMERR(1U)
+
+#define S_SECNTERR 14
+#define V_SECNTERR(x) ((x) << S_SECNTERR)
+#define F_SECNTERR V_SECNTERR(1U)
+
+#define S_BUBBLE 13
+#define V_BUBBLE(x) ((x) << S_BUBBLE)
+#define F_BUBBLE V_BUBBLE(1U)
+
+#define S_TXDESCFIFO 9
+#define M_TXDESCFIFO 0xfU
+#define V_TXDESCFIFO(x) ((x) << S_TXDESCFIFO)
+#define G_TXDESCFIFO(x) (((x) >> S_TXDESCFIFO) & M_TXDESCFIFO)
+
+#define S_TXDATAFIFO 5
+#define M_TXDATAFIFO 0xfU
+#define V_TXDATAFIFO(x) ((x) << S_TXDATAFIFO)
+#define G_TXDATAFIFO(x) (((x) >> S_TXDATAFIFO) & M_TXDATAFIFO)
+
+#define S_NCSIFIFO 4
+#define V_NCSIFIFO(x) ((x) << S_NCSIFIFO)
+#define F_NCSIFIFO V_NCSIFIFO(1U)
+
+#define S_TPFIFO 0
+#define M_TPFIFO 0xfU
+#define V_TPFIFO(x) ((x) << S_TPFIFO)
+#define G_TPFIFO(x) (((x) >> S_TPFIFO) & M_TPFIFO)
+
+#define A_MPS_TX_INT_CAUSE 0x9408
+#define A_MPS_TX_PERR_ENABLE 0x9410
+#define A_MPS_TX_PERR_INJECT 0x9414
+
+#define S_MPSTXMEMSEL 1
+#define M_MPSTXMEMSEL 0x1fU
+#define V_MPSTXMEMSEL(x) ((x) << S_MPSTXMEMSEL)
+#define G_MPSTXMEMSEL(x) (((x) >> S_MPSTXMEMSEL) & M_MPSTXMEMSEL)
+
+#define A_MPS_TX_SE_CNT_TP01 0x9418
+#define A_MPS_TX_SE_CNT_TP23 0x941c
+#define A_MPS_TX_SE_CNT_MAC01 0x9420
+#define A_MPS_TX_SE_CNT_MAC23 0x9424
+#define A_MPS_TX_SECNT_SPI_BUBBLE_ERR 0x9428
+
+#define S_BUBBLEERR 16
+#define M_BUBBLEERR 0xffU
+#define V_BUBBLEERR(x) ((x) << S_BUBBLEERR)
+#define G_BUBBLEERR(x) (((x) >> S_BUBBLEERR) & M_BUBBLEERR)
+
+#define S_SPI 8
+#define M_SPI 0xffU
+#define V_SPI(x) ((x) << S_SPI)
+#define G_SPI(x) (((x) >> S_SPI) & M_SPI)
+
+#define S_SECNT 0
+#define M_SECNT 0xffU
+#define V_SECNT(x) ((x) << S_SECNT)
+#define G_SECNT(x) (((x) >> S_SECNT) & M_SECNT)
+
+#define A_MPS_TX_SECNT_BUBBLE_CLR 0x942c
+
+#define S_BUBBLECLR 8
+#define M_BUBBLECLR 0xffU
+#define V_BUBBLECLR(x) ((x) << S_BUBBLECLR)
+#define G_BUBBLECLR(x) (((x) >> S_BUBBLECLR) & M_BUBBLECLR)
+
+#define A_MPS_TX_PORT_ERR 0x9430
+
+#define S_LPBKPT3 7
+#define V_LPBKPT3(x) ((x) << S_LPBKPT3)
+#define F_LPBKPT3 V_LPBKPT3(1U)
+
+#define S_LPBKPT2 6
+#define V_LPBKPT2(x) ((x) << S_LPBKPT2)
+#define F_LPBKPT2 V_LPBKPT2(1U)
+
+#define S_LPBKPT1 5
+#define V_LPBKPT1(x) ((x) << S_LPBKPT1)
+#define F_LPBKPT1 V_LPBKPT1(1U)
+
+#define S_LPBKPT0 4
+#define V_LPBKPT0(x) ((x) << S_LPBKPT0)
+#define F_LPBKPT0 V_LPBKPT0(1U)
+
+#define S_PT3 3
+#define V_PT3(x) ((x) << S_PT3)
+#define F_PT3 V_PT3(1U)
+
+#define S_PT2 2
+#define V_PT2(x) ((x) << S_PT2)
+#define F_PT2 V_PT2(1U)
+
+#define S_PT1 1
+#define V_PT1(x) ((x) << S_PT1)
+#define F_PT1 V_PT1(1U)
+
+#define S_PT0 0
+#define V_PT0(x) ((x) << S_PT0)
+#define F_PT0 V_PT0(1U)
+
+#define A_MPS_TX_LPBK_DROP_BP_CTL_CH0 0x9434
+
+#define S_BPEN 1
+#define V_BPEN(x) ((x) << S_BPEN)
+#define F_BPEN V_BPEN(1U)
+
+#define S_DROPEN 0
+#define V_DROPEN(x) ((x) << S_DROPEN)
+#define F_DROPEN V_DROPEN(1U)
+
+#define A_MPS_TX_LPBK_DROP_BP_CTL_CH1 0x9438
+#define A_MPS_TX_LPBK_DROP_BP_CTL_CH2 0x943c
+#define A_MPS_TX_LPBK_DROP_BP_CTL_CH3 0x9440
+#define A_MPS_TX_DEBUG_REG_TP2TX_10 0x9444
+
+#define S_SOPCH1 31
+#define V_SOPCH1(x) ((x) << S_SOPCH1)
+#define F_SOPCH1 V_SOPCH1(1U)
+
+#define S_EOPCH1 30
+#define V_EOPCH1(x) ((x) << S_EOPCH1)
+#define F_EOPCH1 V_EOPCH1(1U)
+
+#define S_SIZECH1 27
+#define M_SIZECH1 0x7U
+#define V_SIZECH1(x) ((x) << S_SIZECH1)
+#define G_SIZECH1(x) (((x) >> S_SIZECH1) & M_SIZECH1)
+
+#define S_ERRCH1 26
+#define V_ERRCH1(x) ((x) << S_ERRCH1)
+#define F_ERRCH1 V_ERRCH1(1U)
+
+#define S_FULLCH1 25
+#define V_FULLCH1(x) ((x) << S_FULLCH1)
+#define F_FULLCH1 V_FULLCH1(1U)
+
+#define S_VALIDCH1 24
+#define V_VALIDCH1(x) ((x) << S_VALIDCH1)
+#define F_VALIDCH1 V_VALIDCH1(1U)
+
+#define S_DATACH1 16
+#define M_DATACH1 0xffU
+#define V_DATACH1(x) ((x) << S_DATACH1)
+#define G_DATACH1(x) (((x) >> S_DATACH1) & M_DATACH1)
+
+#define S_SOPCH0 15
+#define V_SOPCH0(x) ((x) << S_SOPCH0)
+#define F_SOPCH0 V_SOPCH0(1U)
+
+#define S_EOPCH0 14
+#define V_EOPCH0(x) ((x) << S_EOPCH0)
+#define F_EOPCH0 V_EOPCH0(1U)
+
+#define S_SIZECH0 11
+#define M_SIZECH0 0x7U
+#define V_SIZECH0(x) ((x) << S_SIZECH0)
+#define G_SIZECH0(x) (((x) >> S_SIZECH0) & M_SIZECH0)
+
+#define S_ERRCH0 10
+#define V_ERRCH0(x) ((x) << S_ERRCH0)
+#define F_ERRCH0 V_ERRCH0(1U)
+
+#define S_FULLCH0 9
+#define V_FULLCH0(x) ((x) << S_FULLCH0)
+#define F_FULLCH0 V_FULLCH0(1U)
+
+#define S_VALIDCH0 8
+#define V_VALIDCH0(x) ((x) << S_VALIDCH0)
+#define F_VALIDCH0 V_VALIDCH0(1U)
+
+#define S_DATACH0 0
+#define M_DATACH0 0xffU
+#define V_DATACH0(x) ((x) << S_DATACH0)
+#define G_DATACH0(x) (((x) >> S_DATACH0) & M_DATACH0)
+
+#define A_MPS_TX_DEBUG_REG_TP2TX_32 0x9448
+
+#define S_SOPCH3 31
+#define V_SOPCH3(x) ((x) << S_SOPCH3)
+#define F_SOPCH3 V_SOPCH3(1U)
+
+#define S_EOPCH3 30
+#define V_EOPCH3(x) ((x) << S_EOPCH3)
+#define F_EOPCH3 V_EOPCH3(1U)
+
+#define S_SIZECH3 27
+#define M_SIZECH3 0x7U
+#define V_SIZECH3(x) ((x) << S_SIZECH3)
+#define G_SIZECH3(x) (((x) >> S_SIZECH3) & M_SIZECH3)
+
+#define S_ERRCH3 26
+#define V_ERRCH3(x) ((x) << S_ERRCH3)
+#define F_ERRCH3 V_ERRCH3(1U)
+
+#define S_FULLCH3 25
+#define V_FULLCH3(x) ((x) << S_FULLCH3)
+#define F_FULLCH3 V_FULLCH3(1U)
+
+#define S_VALIDCH3 24
+#define V_VALIDCH3(x) ((x) << S_VALIDCH3)
+#define F_VALIDCH3 V_VALIDCH3(1U)
+
+#define S_DATACH3 16
+#define M_DATACH3 0xffU
+#define V_DATACH3(x) ((x) << S_DATACH3)
+#define G_DATACH3(x) (((x) >> S_DATACH3) & M_DATACH3)
+
+#define S_SOPCH2 15
+#define V_SOPCH2(x) ((x) << S_SOPCH2)
+#define F_SOPCH2 V_SOPCH2(1U)
+
+#define S_EOPCH2 14
+#define V_EOPCH2(x) ((x) << S_EOPCH2)
+#define F_EOPCH2 V_EOPCH2(1U)
+
+#define S_SIZECH2 11
+#define M_SIZECH2 0x7U
+#define V_SIZECH2(x) ((x) << S_SIZECH2)
+#define G_SIZECH2(x) (((x) >> S_SIZECH2) & M_SIZECH2)
+
+#define S_ERRCH2 10
+#define V_ERRCH2(x) ((x) << S_ERRCH2)
+#define F_ERRCH2 V_ERRCH2(1U)
+
+#define S_FULLCH2 9
+#define V_FULLCH2(x) ((x) << S_FULLCH2)
+#define F_FULLCH2 V_FULLCH2(1U)
+
+#define S_VALIDCH2 8
+#define V_VALIDCH2(x) ((x) << S_VALIDCH2)
+#define F_VALIDCH2 V_VALIDCH2(1U)
+
+#define S_DATACH2 0
+#define M_DATACH2 0xffU
+#define V_DATACH2(x) ((x) << S_DATACH2)
+#define G_DATACH2(x) (((x) >> S_DATACH2) & M_DATACH2)
+
+#define A_MPS_TX_DEBUG_REG_TX2MAC_10 0x944c
+
+#define S_SOPPT1 31
+#define V_SOPPT1(x) ((x) << S_SOPPT1)
+#define F_SOPPT1 V_SOPPT1(1U)
+
+#define S_EOPPT1 30
+#define V_EOPPT1(x) ((x) << S_EOPPT1)
+#define F_EOPPT1 V_EOPPT1(1U)
+
+#define S_SIZEPT1 27
+#define M_SIZEPT1 0x7U
+#define V_SIZEPT1(x) ((x) << S_SIZEPT1)
+#define G_SIZEPT1(x) (((x) >> S_SIZEPT1) & M_SIZEPT1)
+
+#define S_ERRPT1 26
+#define V_ERRPT1(x) ((x) << S_ERRPT1)
+#define F_ERRPT1 V_ERRPT1(1U)
+
+#define S_FULLPT1 25
+#define V_FULLPT1(x) ((x) << S_FULLPT1)
+#define F_FULLPT1 V_FULLPT1(1U)
+
+#define S_VALIDPT1 24
+#define V_VALIDPT1(x) ((x) << S_VALIDPT1)
+#define F_VALIDPT1 V_VALIDPT1(1U)
+
+#define S_DATAPT1 16
+#define M_DATAPT1 0xffU
+#define V_DATAPT1(x) ((x) << S_DATAPT1)
+#define G_DATAPT1(x) (((x) >> S_DATAPT1) & M_DATAPT1)
+
+#define S_SOPPT0 15
+#define V_SOPPT0(x) ((x) << S_SOPPT0)
+#define F_SOPPT0 V_SOPPT0(1U)
+
+#define S_EOPPT0 14
+#define V_EOPPT0(x) ((x) << S_EOPPT0)
+#define F_EOPPT0 V_EOPPT0(1U)
+
+#define S_SIZEPT0 11
+#define M_SIZEPT0 0x7U
+#define V_SIZEPT0(x) ((x) << S_SIZEPT0)
+#define G_SIZEPT0(x) (((x) >> S_SIZEPT0) & M_SIZEPT0)
+
+#define S_ERRPT0 10
+#define V_ERRPT0(x) ((x) << S_ERRPT0)
+#define F_ERRPT0 V_ERRPT0(1U)
+
+#define S_FULLPT0 9
+#define V_FULLPT0(x) ((x) << S_FULLPT0)
+#define F_FULLPT0 V_FULLPT0(1U)
+
+#define S_VALIDPT0 8
+#define V_VALIDPT0(x) ((x) << S_VALIDPT0)
+#define F_VALIDPT0 V_VALIDPT0(1U)
+
+#define S_DATAPT0 0
+#define M_DATAPT0 0xffU
+#define V_DATAPT0(x) ((x) << S_DATAPT0)
+#define G_DATAPT0(x) (((x) >> S_DATAPT0) & M_DATAPT0)
+
+#define A_MPS_TX_DEBUG_REG_TX2MAC_32 0x9450
+
+#define S_SOPPT3 31
+#define V_SOPPT3(x) ((x) << S_SOPPT3)
+#define F_SOPPT3 V_SOPPT3(1U)
+
+#define S_EOPPT3 30
+#define V_EOPPT3(x) ((x) << S_EOPPT3)
+#define F_EOPPT3 V_EOPPT3(1U)
+
+#define S_SIZEPT3 27
+#define M_SIZEPT3 0x7U
+#define V_SIZEPT3(x) ((x) << S_SIZEPT3)
+#define G_SIZEPT3(x) (((x) >> S_SIZEPT3) & M_SIZEPT3)
+
+#define S_ERRPT3 26
+#define V_ERRPT3(x) ((x) << S_ERRPT3)
+#define F_ERRPT3 V_ERRPT3(1U)
+
+#define S_FULLPT3 25
+#define V_FULLPT3(x) ((x) << S_FULLPT3)
+#define F_FULLPT3 V_FULLPT3(1U)
+
+#define S_VALIDPT3 24
+#define V_VALIDPT3(x) ((x) << S_VALIDPT3)
+#define F_VALIDPT3 V_VALIDPT3(1U)
+
+#define S_DATAPT3 16
+#define M_DATAPT3 0xffU
+#define V_DATAPT3(x) ((x) << S_DATAPT3)
+#define G_DATAPT3(x) (((x) >> S_DATAPT3) & M_DATAPT3)
+
+#define S_SOPPT2 15
+#define V_SOPPT2(x) ((x) << S_SOPPT2)
+#define F_SOPPT2 V_SOPPT2(1U)
+
+#define S_EOPPT2 14
+#define V_EOPPT2(x) ((x) << S_EOPPT2)
+#define F_EOPPT2 V_EOPPT2(1U)
+
+#define S_SIZEPT2 11
+#define M_SIZEPT2 0x7U
+#define V_SIZEPT2(x) ((x) << S_SIZEPT2)
+#define G_SIZEPT2(x) (((x) >> S_SIZEPT2) & M_SIZEPT2)
+
+#define S_ERRPT2 10
+#define V_ERRPT2(x) ((x) << S_ERRPT2)
+#define F_ERRPT2 V_ERRPT2(1U)
+
+#define S_FULLPT2 9
+#define V_FULLPT2(x) ((x) << S_FULLPT2)
+#define F_FULLPT2 V_FULLPT2(1U)
+
+#define S_VALIDPT2 8
+#define V_VALIDPT2(x) ((x) << S_VALIDPT2)
+#define F_VALIDPT2 V_VALIDPT2(1U)
+
+#define S_DATAPT2 0
+#define M_DATAPT2 0xffU
+#define V_DATAPT2(x) ((x) << S_DATAPT2)
+#define G_DATAPT2(x) (((x) >> S_DATAPT2) & M_DATAPT2)
+
+#define A_MPS_TX_SGE_CH_PAUSE_IGNR 0x9454
+
+#define S_SGEPAUSEIGNR 0
+#define M_SGEPAUSEIGNR 0xfU
+#define V_SGEPAUSEIGNR(x) ((x) << S_SGEPAUSEIGNR)
+#define G_SGEPAUSEIGNR(x) (((x) >> S_SGEPAUSEIGNR) & M_SGEPAUSEIGNR)
+
+#define A_MPS_TX_DEBUG_SUBPART_SEL 0x9458
+
+#define S_SUBPRTH 11
+#define M_SUBPRTH 0x1fU
+#define V_SUBPRTH(x) ((x) << S_SUBPRTH)
+#define G_SUBPRTH(x) (((x) >> S_SUBPRTH) & M_SUBPRTH)
+
+#define S_PORTH 8
+#define M_PORTH 0x7U
+#define V_PORTH(x) ((x) << S_PORTH)
+#define G_PORTH(x) (((x) >> S_PORTH) & M_PORTH)
+
+#define S_SUBPRTL 3
+#define M_SUBPRTL 0x1fU
+#define V_SUBPRTL(x) ((x) << S_SUBPRTL)
+#define G_SUBPRTL(x) (((x) >> S_SUBPRTL) & M_SUBPRTL)
+
+#define S_PORTL 0
+#define M_PORTL 0x7U
+#define V_PORTL(x) ((x) << S_PORTL)
+#define G_PORTL(x) (((x) >> S_PORTL) & M_PORTL)
+
+#define A_MPS_STAT_CTL 0x9600
+
+#define S_COUNTVFINPF 1
+#define V_COUNTVFINPF(x) ((x) << S_COUNTVFINPF)
+#define F_COUNTVFINPF V_COUNTVFINPF(1U)
+
+#define S_LPBKERRSTAT 0
+#define V_LPBKERRSTAT(x) ((x) << S_LPBKERRSTAT)
+#define F_LPBKERRSTAT V_LPBKERRSTAT(1U)
+
+#define A_MPS_STAT_INT_ENABLE 0x9608
+
+#define S_PLREADSYNCERR 0
+#define V_PLREADSYNCERR(x) ((x) << S_PLREADSYNCERR)
+#define F_PLREADSYNCERR V_PLREADSYNCERR(1U)
+
+#define A_MPS_STAT_INT_CAUSE 0x960c
+#define A_MPS_STAT_PERR_INT_ENABLE_SRAM 0x9610
+
+#define S_RXBG 20
+#define V_RXBG(x) ((x) << S_RXBG)
+#define F_RXBG V_RXBG(1U)
+
+#define S_RXVF 18
+#define M_RXVF 0x3U
+#define V_RXVF(x) ((x) << S_RXVF)
+#define G_RXVF(x) (((x) >> S_RXVF) & M_RXVF)
+
+#define S_TXVF 16
+#define M_TXVF 0x3U
+#define V_TXVF(x) ((x) << S_TXVF)
+#define G_TXVF(x) (((x) >> S_TXVF) & M_TXVF)
+
+#define S_RXPF 13
+#define M_RXPF 0x7U
+#define V_RXPF(x) ((x) << S_RXPF)
+#define G_RXPF(x) (((x) >> S_RXPF) & M_RXPF)
+
+#define S_TXPF 11
+#define M_TXPF 0x3U
+#define V_TXPF(x) ((x) << S_TXPF)
+#define G_TXPF(x) (((x) >> S_TXPF) & M_TXPF)
+
+#define S_RXPORT 7
+#define M_RXPORT 0xfU
+#define V_RXPORT(x) ((x) << S_RXPORT)
+#define G_RXPORT(x) (((x) >> S_RXPORT) & M_RXPORT)
+
+#define S_LBPORT 4
+#define M_LBPORT 0x7U
+#define V_LBPORT(x) ((x) << S_LBPORT)
+#define G_LBPORT(x) (((x) >> S_LBPORT) & M_LBPORT)
+
+#define S_TXPORT 0
+#define M_TXPORT 0xfU
+#define V_TXPORT(x) ((x) << S_TXPORT)
+#define G_TXPORT(x) (((x) >> S_TXPORT) & M_TXPORT)
+
+#define A_MPS_STAT_PERR_INT_CAUSE_SRAM 0x9614
+#define A_MPS_STAT_PERR_ENABLE_SRAM 0x9618
+#define A_MPS_STAT_PERR_INT_ENABLE_TX_FIFO 0x961c
+
+#define S_TX 12
+#define M_TX 0xffU
+#define V_TX(x) ((x) << S_TX)
+#define G_TX(x) (((x) >> S_TX) & M_TX)
+
+#define S_TXPAUSEFIFO 8
+#define M_TXPAUSEFIFO 0xfU
+#define V_TXPAUSEFIFO(x) ((x) << S_TXPAUSEFIFO)
+#define G_TXPAUSEFIFO(x) (((x) >> S_TXPAUSEFIFO) & M_TXPAUSEFIFO)
+
+#define S_DROP 0
+#define M_DROP 0xffU
+#define V_DROP(x) ((x) << S_DROP)
+#define G_DROP(x) (((x) >> S_DROP) & M_DROP)
+
+#define A_MPS_STAT_PERR_INT_CAUSE_TX_FIFO 0x9620
+#define A_MPS_STAT_PERR_ENABLE_TX_FIFO 0x9624
+#define A_MPS_STAT_PERR_INT_ENABLE_RX_FIFO 0x9628
+
+#define S_PAUSEFIFO 20
+#define M_PAUSEFIFO 0xfU
+#define V_PAUSEFIFO(x) ((x) << S_PAUSEFIFO)
+#define G_PAUSEFIFO(x) (((x) >> S_PAUSEFIFO) & M_PAUSEFIFO)
+
+#define S_LPBK 16
+#define M_LPBK 0xfU
+#define V_LPBK(x) ((x) << S_LPBK)
+#define G_LPBK(x) (((x) >> S_LPBK) & M_LPBK)
+
+#define S_NQ 8
+#define M_NQ 0xffU
+#define V_NQ(x) ((x) << S_NQ)
+#define G_NQ(x) (((x) >> S_NQ) & M_NQ)
+
+#define S_PV 4
+#define M_PV 0xfU
+#define V_PV(x) ((x) << S_PV)
+#define G_PV(x) (((x) >> S_PV) & M_PV)
+
+#define S_MAC 0
+#define M_MAC 0xfU
+#define V_MAC(x) ((x) << S_MAC)
+#define G_MAC(x) (((x) >> S_MAC) & M_MAC)
+
+#define A_MPS_STAT_PERR_INT_CAUSE_RX_FIFO 0x962c
+#define A_MPS_STAT_PERR_ENABLE_RX_FIFO 0x9630
+#define A_MPS_STAT_PERR_INJECT 0x9634
+
+#define S_STATMEMSEL 1
+#define M_STATMEMSEL 0x7fU
+#define V_STATMEMSEL(x) ((x) << S_STATMEMSEL)
+#define G_STATMEMSEL(x) (((x) >> S_STATMEMSEL) & M_STATMEMSEL)
+
+#define A_MPS_STAT_DEBUG_SUB_SEL 0x9638
+#define A_MPS_STAT_RX_BG_0_MAC_DROP_FRAME_L 0x9640
+#define A_MPS_STAT_RX_BG_0_MAC_DROP_FRAME_H 0x9644
+#define A_MPS_STAT_RX_BG_1_MAC_DROP_FRAME_L 0x9648
+#define A_MPS_STAT_RX_BG_1_MAC_DROP_FRAME_H 0x964c
+#define A_MPS_STAT_RX_BG_2_MAC_DROP_FRAME_L 0x9650
+#define A_MPS_STAT_RX_BG_2_MAC_DROP_FRAME_H 0x9654
+#define A_MPS_STAT_RX_BG_3_MAC_DROP_FRAME_L 0x9658
+#define A_MPS_STAT_RX_BG_3_MAC_DROP_FRAME_H 0x965c
+#define A_MPS_STAT_RX_BG_0_LB_DROP_FRAME_L 0x9660
+#define A_MPS_STAT_RX_BG_0_LB_DROP_FRAME_H 0x9664
+#define A_MPS_STAT_RX_BG_1_LB_DROP_FRAME_L 0x9668
+#define A_MPS_STAT_RX_BG_1_LB_DROP_FRAME_H 0x966c
+#define A_MPS_STAT_RX_BG_2_LB_DROP_FRAME_L 0x9670
+#define A_MPS_STAT_RX_BG_2_LB_DROP_FRAME_H 0x9674
+#define A_MPS_STAT_RX_BG_3_LB_DROP_FRAME_L 0x9678
+#define A_MPS_STAT_RX_BG_3_LB_DROP_FRAME_H 0x967c
+#define A_MPS_STAT_RX_BG_0_MAC_TRUNC_FRAME_L 0x9680
+#define A_MPS_STAT_RX_BG_0_MAC_TRUNC_FRAME_H 0x9684
+#define A_MPS_STAT_RX_BG_1_MAC_TRUNC_FRAME_L 0x9688
+#define A_MPS_STAT_RX_BG_1_MAC_TRUNC_FRAME_H 0x968c
+#define A_MPS_STAT_RX_BG_2_MAC_TRUNC_FRAME_L 0x9690
+#define A_MPS_STAT_RX_BG_2_MAC_TRUNC_FRAME_H 0x9694
+#define A_MPS_STAT_RX_BG_3_MAC_TRUNC_FRAME_L 0x9698
+#define A_MPS_STAT_RX_BG_3_MAC_TRUNC_FRAME_H 0x969c
+#define A_MPS_STAT_RX_BG_0_LB_TRUNC_FRAME_L 0x96a0
+#define A_MPS_STAT_RX_BG_0_LB_TRUNC_FRAME_H 0x96a4
+#define A_MPS_STAT_RX_BG_1_LB_TRUNC_FRAME_L 0x96a8
+#define A_MPS_STAT_RX_BG_1_LB_TRUNC_FRAME_H 0x96ac
+#define A_MPS_STAT_RX_BG_2_LB_TRUNC_FRAME_L 0x96b0
+#define A_MPS_STAT_RX_BG_2_LB_TRUNC_FRAME_H 0x96b4
+#define A_MPS_STAT_RX_BG_3_LB_TRUNC_FRAME_L 0x96b8
+#define A_MPS_STAT_RX_BG_3_LB_TRUNC_FRAME_H 0x96bc
+#define A_MPS_TRC_CFG 0x9800
+
+#define S_TRCFIFOEMPTY 4
+#define V_TRCFIFOEMPTY(x) ((x) << S_TRCFIFOEMPTY)
+#define F_TRCFIFOEMPTY V_TRCFIFOEMPTY(1U)
+
+#define S_TRCIGNOREDROPINPUT 3
+#define V_TRCIGNOREDROPINPUT(x) ((x) << S_TRCIGNOREDROPINPUT)
+#define F_TRCIGNOREDROPINPUT V_TRCIGNOREDROPINPUT(1U)
+
+#define S_TRCKEEPDUPLICATES 2
+#define V_TRCKEEPDUPLICATES(x) ((x) << S_TRCKEEPDUPLICATES)
+#define F_TRCKEEPDUPLICATES V_TRCKEEPDUPLICATES(1U)
+
+#define S_TRCEN 1
+#define V_TRCEN(x) ((x) << S_TRCEN)
+#define F_TRCEN V_TRCEN(1U)
+
+#define S_TRCMULTIFILTER 0
+#define V_TRCMULTIFILTER(x) ((x) << S_TRCMULTIFILTER)
+#define F_TRCMULTIFILTER V_TRCMULTIFILTER(1U)
+
+#define A_MPS_TRC_RSS_HASH 0x9804
+#define A_MPS_TRC_RSS_CONTROL 0x9808
+
+#define S_RSSCONTROL 16
+#define M_RSSCONTROL 0xffU
+#define V_RSSCONTROL(x) ((x) << S_RSSCONTROL)
+#define G_RSSCONTROL(x) (((x) >> S_RSSCONTROL) & M_RSSCONTROL)
+
+#define S_QUEUENUMBER 0
+#define M_QUEUENUMBER 0xffffU
+#define V_QUEUENUMBER(x) ((x) << S_QUEUENUMBER)
+#define G_QUEUENUMBER(x) (((x) >> S_QUEUENUMBER) & M_QUEUENUMBER)
+
+#define A_MPS_TRC_FILTER_MATCH_CTL_A 0x9810
+
+#define S_TFINVERTMATCH 24
+#define V_TFINVERTMATCH(x) ((x) << S_TFINVERTMATCH)
+#define F_TFINVERTMATCH V_TFINVERTMATCH(1U)
+
+#define S_TFPKTTOOLARGE 23
+#define V_TFPKTTOOLARGE(x) ((x) << S_TFPKTTOOLARGE)
+#define F_TFPKTTOOLARGE V_TFPKTTOOLARGE(1U)
+
+#define S_TFEN 22
+#define V_TFEN(x) ((x) << S_TFEN)
+#define F_TFEN V_TFEN(1U)
+
+#define S_TFPORT 18
+#define M_TFPORT 0xfU
+#define V_TFPORT(x) ((x) << S_TFPORT)
+#define G_TFPORT(x) (((x) >> S_TFPORT) & M_TFPORT)
+
+#define S_TFDROP 17
+#define V_TFDROP(x) ((x) << S_TFDROP)
+#define F_TFDROP V_TFDROP(1U)
+
+#define S_TFSOPEOPERR 16
+#define V_TFSOPEOPERR(x) ((x) << S_TFSOPEOPERR)
+#define F_TFSOPEOPERR V_TFSOPEOPERR(1U)
+
+#define S_TFLENGTH 8
+#define M_TFLENGTH 0x1fU
+#define V_TFLENGTH(x) ((x) << S_TFLENGTH)
+#define G_TFLENGTH(x) (((x) >> S_TFLENGTH) & M_TFLENGTH)
+
+#define S_TFOFFSET 0
+#define M_TFOFFSET 0x1fU
+#define V_TFOFFSET(x) ((x) << S_TFOFFSET)
+#define G_TFOFFSET(x) (((x) >> S_TFOFFSET) & M_TFOFFSET)
+
+#define A_MPS_TRC_FILTER_MATCH_CTL_B 0x9820
+
+#define S_TFMINPKTSIZE 16
+#define M_TFMINPKTSIZE 0x1ffU
+#define V_TFMINPKTSIZE(x) ((x) << S_TFMINPKTSIZE)
+#define G_TFMINPKTSIZE(x) (((x) >> S_TFMINPKTSIZE) & M_TFMINPKTSIZE)
+
+#define S_TFCAPTUREMAX 0
+#define M_TFCAPTUREMAX 0x3fffU
+#define V_TFCAPTUREMAX(x) ((x) << S_TFCAPTUREMAX)
+#define G_TFCAPTUREMAX(x) (((x) >> S_TFCAPTUREMAX) & M_TFCAPTUREMAX)
+
+#define A_MPS_TRC_FILTER_RUNT_CTL 0x9830
+
+#define S_TFRUNTSIZE 0
+#define M_TFRUNTSIZE 0x3fU
+#define V_TFRUNTSIZE(x) ((x) << S_TFRUNTSIZE)
+#define G_TFRUNTSIZE(x) (((x) >> S_TFRUNTSIZE) & M_TFRUNTSIZE)
+
+#define A_MPS_TRC_FILTER_DROP 0x9840
+
+#define S_TFDROPINPCOUNT 16
+#define M_TFDROPINPCOUNT 0xffffU
+#define V_TFDROPINPCOUNT(x) ((x) << S_TFDROPINPCOUNT)
+#define G_TFDROPINPCOUNT(x) (((x) >> S_TFDROPINPCOUNT) & M_TFDROPINPCOUNT)
+
+#define S_TFDROPBUFFERCOUNT 0
+#define M_TFDROPBUFFERCOUNT 0xffffU
+#define V_TFDROPBUFFERCOUNT(x) ((x) << S_TFDROPBUFFERCOUNT)
+#define G_TFDROPBUFFERCOUNT(x) (((x) >> S_TFDROPBUFFERCOUNT) & M_TFDROPBUFFERCOUNT)
+
+#define A_MPS_TRC_PERR_INJECT 0x9850
+
+#define S_TRCMEMSEL 1
+#define M_TRCMEMSEL 0xfU
+#define V_TRCMEMSEL(x) ((x) << S_TRCMEMSEL)
+#define G_TRCMEMSEL(x) (((x) >> S_TRCMEMSEL) & M_TRCMEMSEL)
+
+#define A_MPS_TRC_PERR_ENABLE 0x9854
+
+#define S_MISCPERR 8
+#define V_MISCPERR(x) ((x) << S_MISCPERR)
+#define F_MISCPERR V_MISCPERR(1U)
+
+#define S_PKTFIFO 4
+#define M_PKTFIFO 0xfU
+#define V_PKTFIFO(x) ((x) << S_PKTFIFO)
+#define G_PKTFIFO(x) (((x) >> S_PKTFIFO) & M_PKTFIFO)
+
+#define S_FILTMEM 0
+#define M_FILTMEM 0xfU
+#define V_FILTMEM(x) ((x) << S_FILTMEM)
+#define G_FILTMEM(x) (((x) >> S_FILTMEM) & M_FILTMEM)
+
+#define A_MPS_TRC_INT_ENABLE 0x9858
+
+#define S_TRCPLERRENB 9
+#define V_TRCPLERRENB(x) ((x) << S_TRCPLERRENB)
+#define F_TRCPLERRENB V_TRCPLERRENB(1U)
+
+#define A_MPS_TRC_INT_CAUSE 0x985c
+#define A_MPS_TRC_TIMESTAMP_L 0x9860
+#define A_MPS_TRC_TIMESTAMP_H 0x9864
+#define A_MPS_TRC_FILTER0_MATCH 0x9c00
+#define A_MPS_TRC_FILTER0_DONT_CARE 0x9c80
+#define A_MPS_TRC_FILTER1_MATCH 0x9d00
+#define A_MPS_TRC_FILTER1_DONT_CARE 0x9d80
+#define A_MPS_TRC_FILTER2_MATCH 0x9e00
+#define A_MPS_TRC_FILTER2_DONT_CARE 0x9e80
+#define A_MPS_TRC_FILTER3_MATCH 0x9f00
+#define A_MPS_TRC_FILTER3_DONT_CARE 0x9f80
+#define A_MPS_CLS_CTL 0xd000
+
+#define S_MEMWRITEFAULT 4
+#define V_MEMWRITEFAULT(x) ((x) << S_MEMWRITEFAULT)
+#define F_MEMWRITEFAULT V_MEMWRITEFAULT(1U)
+
+#define S_MEMWRITEWAITING 3
+#define V_MEMWRITEWAITING(x) ((x) << S_MEMWRITEWAITING)
+#define F_MEMWRITEWAITING V_MEMWRITEWAITING(1U)
+
+#define S_CIMNOPROMISCUOUS 2
+#define V_CIMNOPROMISCUOUS(x) ((x) << S_CIMNOPROMISCUOUS)
+#define F_CIMNOPROMISCUOUS V_CIMNOPROMISCUOUS(1U)
+
+#define S_HYPERVISORONLY 1
+#define V_HYPERVISORONLY(x) ((x) << S_HYPERVISORONLY)
+#define F_HYPERVISORONLY V_HYPERVISORONLY(1U)
+
+#define S_VLANCLSEN 0
+#define V_VLANCLSEN(x) ((x) << S_VLANCLSEN)
+#define F_VLANCLSEN V_VLANCLSEN(1U)
+
+#define A_MPS_CLS_ARB_WEIGHT 0xd004
+
+#define S_PLWEIGHT 16
+#define M_PLWEIGHT 0x1fU
+#define V_PLWEIGHT(x) ((x) << S_PLWEIGHT)
+#define G_PLWEIGHT(x) (((x) >> S_PLWEIGHT) & M_PLWEIGHT)
+
+#define S_CIMWEIGHT 8
+#define M_CIMWEIGHT 0x1fU
+#define V_CIMWEIGHT(x) ((x) << S_CIMWEIGHT)
+#define G_CIMWEIGHT(x) (((x) >> S_CIMWEIGHT) & M_CIMWEIGHT)
+
+#define S_LPBKWEIGHT 0
+#define M_LPBKWEIGHT 0x1fU
+#define V_LPBKWEIGHT(x) ((x) << S_LPBKWEIGHT)
+#define G_LPBKWEIGHT(x) (((x) >> S_LPBKWEIGHT) & M_LPBKWEIGHT)
+
+#define A_MPS_CLS_BMC_MAC_ADDR_L 0xd010
+#define A_MPS_CLS_BMC_MAC_ADDR_H 0xd014
+#define A_MPS_CLS_BMC_VLAN 0xd018
+#define A_MPS_CLS_PERR_INJECT 0xd01c
+
+#define S_CLS_MEMSEL 1
+#define M_CLS_MEMSEL 0x3U
+#define V_CLS_MEMSEL(x) ((x) << S_CLS_MEMSEL)
+#define G_CLS_MEMSEL(x) (((x) >> S_CLS_MEMSEL) & M_CLS_MEMSEL)
+
+#define A_MPS_CLS_PERR_ENABLE 0xd020
+
+#define S_HASHSRAM 2
+#define V_HASHSRAM(x) ((x) << S_HASHSRAM)
+#define F_HASHSRAM V_HASHSRAM(1U)
+
+#define S_MATCHTCAM 1
+#define V_MATCHTCAM(x) ((x) << S_MATCHTCAM)
+#define F_MATCHTCAM V_MATCHTCAM(1U)
+
+#define S_MATCHSRAM 0
+#define V_MATCHSRAM(x) ((x) << S_MATCHSRAM)
+#define F_MATCHSRAM V_MATCHSRAM(1U)
+
+#define A_MPS_CLS_INT_ENABLE 0xd024
+
+#define S_PLERRENB 3
+#define V_PLERRENB(x) ((x) << S_PLERRENB)
+#define F_PLERRENB V_PLERRENB(1U)
+
+#define A_MPS_CLS_INT_CAUSE 0xd028
+#define A_MPS_CLS_PL_TEST_DATA_L 0xd02c
+#define A_MPS_CLS_PL_TEST_DATA_H 0xd030
+#define A_MPS_CLS_PL_TEST_RES_DATA 0xd034
+
+#define S_CLS_PRIORITY 24
+#define M_CLS_PRIORITY 0x7U
+#define V_CLS_PRIORITY(x) ((x) << S_CLS_PRIORITY)
+#define G_CLS_PRIORITY(x) (((x) >> S_CLS_PRIORITY) & M_CLS_PRIORITY)
+
+#define S_CLS_REPLICATE 23
+#define V_CLS_REPLICATE(x) ((x) << S_CLS_REPLICATE)
+#define F_CLS_REPLICATE V_CLS_REPLICATE(1U)
+
+#define S_CLS_INDEX 14
+#define M_CLS_INDEX 0x1ffU
+#define V_CLS_INDEX(x) ((x) << S_CLS_INDEX)
+#define G_CLS_INDEX(x) (((x) >> S_CLS_INDEX) & M_CLS_INDEX)
+
+#define S_CLS_VF 7
+#define M_CLS_VF 0x7fU
+#define V_CLS_VF(x) ((x) << S_CLS_VF)
+#define G_CLS_VF(x) (((x) >> S_CLS_VF) & M_CLS_VF)
+
+#define S_CLS_VF_VLD 6
+#define V_CLS_VF_VLD(x) ((x) << S_CLS_VF_VLD)
+#define F_CLS_VF_VLD V_CLS_VF_VLD(1U)
+
+#define S_CLS_PF 3
+#define M_CLS_PF 0x7U
+#define V_CLS_PF(x) ((x) << S_CLS_PF)
+#define G_CLS_PF(x) (((x) >> S_CLS_PF) & M_CLS_PF)
+
+#define S_CLS_MATCH 0
+#define M_CLS_MATCH 0x7U
+#define V_CLS_MATCH(x) ((x) << S_CLS_MATCH)
+#define G_CLS_MATCH(x) (((x) >> S_CLS_MATCH) & M_CLS_MATCH)
+
+#define A_MPS_CLS_PL_TEST_CTL 0xd038
+
+#define S_PLTESTCTL 0
+#define V_PLTESTCTL(x) ((x) << S_PLTESTCTL)
+#define F_PLTESTCTL V_PLTESTCTL(1U)
+
+#define A_MPS_CLS_PORT_BMC_CTL 0xd03c
+
+#define S_PRTBMCCTL 0
+#define V_PRTBMCCTL(x) ((x) << S_PRTBMCCTL)
+#define F_PRTBMCCTL V_PRTBMCCTL(1U)
+
+#define A_MPS_CLS_VLAN_TABLE 0xdfc0
+
+#define S_VLAN_MASK 16
+#define M_VLAN_MASK 0xfffU
+#define V_VLAN_MASK(x) ((x) << S_VLAN_MASK)
+#define G_VLAN_MASK(x) (((x) >> S_VLAN_MASK) & M_VLAN_MASK)
+
+#define S_VLANPF 13
+#define M_VLANPF 0x7U
+#define V_VLANPF(x) ((x) << S_VLANPF)
+#define G_VLANPF(x) (((x) >> S_VLANPF) & M_VLANPF)
+
+#define S_VLAN_VALID 12
+#define V_VLAN_VALID(x) ((x) << S_VLAN_VALID)
+#define F_VLAN_VALID V_VLAN_VALID(1U)
+
+#define A_MPS_CLS_SRAM_L 0xe000
+
+#define S_MULTILISTEN3 28
+#define V_MULTILISTEN3(x) ((x) << S_MULTILISTEN3)
+#define F_MULTILISTEN3 V_MULTILISTEN3(1U)
+
+#define S_MULTILISTEN2 27
+#define V_MULTILISTEN2(x) ((x) << S_MULTILISTEN2)
+#define F_MULTILISTEN2 V_MULTILISTEN2(1U)
+
+#define S_MULTILISTEN1 26
+#define V_MULTILISTEN1(x) ((x) << S_MULTILISTEN1)
+#define F_MULTILISTEN1 V_MULTILISTEN1(1U)
+
+#define S_MULTILISTEN0 25
+#define V_MULTILISTEN0(x) ((x) << S_MULTILISTEN0)
+#define F_MULTILISTEN0 V_MULTILISTEN0(1U)
+
+#define S_SRAM_PRIO3 22
+#define M_SRAM_PRIO3 0x7U
+#define V_SRAM_PRIO3(x) ((x) << S_SRAM_PRIO3)
+#define G_SRAM_PRIO3(x) (((x) >> S_SRAM_PRIO3) & M_SRAM_PRIO3)
+
+#define S_SRAM_PRIO2 19
+#define M_SRAM_PRIO2 0x7U
+#define V_SRAM_PRIO2(x) ((x) << S_SRAM_PRIO2)
+#define G_SRAM_PRIO2(x) (((x) >> S_SRAM_PRIO2) & M_SRAM_PRIO2)
+
+#define S_SRAM_PRIO1 16
+#define M_SRAM_PRIO1 0x7U
+#define V_SRAM_PRIO1(x) ((x) << S_SRAM_PRIO1)
+#define G_SRAM_PRIO1(x) (((x) >> S_SRAM_PRIO1) & M_SRAM_PRIO1)
+
+#define S_SRAM_PRIO0 13
+#define M_SRAM_PRIO0 0x7U
+#define V_SRAM_PRIO0(x) ((x) << S_SRAM_PRIO0)
+#define G_SRAM_PRIO0(x) (((x) >> S_SRAM_PRIO0) & M_SRAM_PRIO0)
+
+#define S_SRAM_VLD 12
+#define V_SRAM_VLD(x) ((x) << S_SRAM_VLD)
+#define F_SRAM_VLD V_SRAM_VLD(1U)
+
+#define A_MPS_CLS_SRAM_H 0xe004
+
+#define S_MACPARITY1 9
+#define V_MACPARITY1(x) ((x) << S_MACPARITY1)
+#define F_MACPARITY1 V_MACPARITY1(1U)
+
+#define S_MACPARITY0 8
+#define V_MACPARITY0(x) ((x) << S_MACPARITY0)
+#define F_MACPARITY0 V_MACPARITY0(1U)
+
+#define S_MACPARITYMASKSIZE 4
+#define M_MACPARITYMASKSIZE 0xfU
+#define V_MACPARITYMASKSIZE(x) ((x) << S_MACPARITYMASKSIZE)
+#define G_MACPARITYMASKSIZE(x) (((x) >> S_MACPARITYMASKSIZE) & M_MACPARITYMASKSIZE)
+
+#define S_PORTMAP 0
+#define M_PORTMAP 0xfU
+#define V_PORTMAP(x) ((x) << S_PORTMAP)
+#define G_PORTMAP(x) (((x) >> S_PORTMAP) & M_PORTMAP)
+
+#define A_MPS_CLS_TCAM_Y_L 0xf000
+#define A_MPS_CLS_TCAM_Y_H 0xf004
+
+#define S_TCAMYH 0
+#define M_TCAMYH 0xffffU
+#define V_TCAMYH(x) ((x) << S_TCAMYH)
+#define G_TCAMYH(x) (((x) >> S_TCAMYH) & M_TCAMYH)
+
+#define A_MPS_CLS_TCAM_X_L 0xf008
+#define A_MPS_CLS_TCAM_X_H 0xf00c
+
+#define S_TCAMXH 0
+#define M_TCAMXH 0xffffU
+#define V_TCAMXH(x) ((x) << S_TCAMXH)
+#define G_TCAMXH(x) (((x) >> S_TCAMXH) & M_TCAMXH)
+
+#define A_MPS_RX_CTL 0x11000
+
+#define S_FILT_VLAN_SEL 17
+#define V_FILT_VLAN_SEL(x) ((x) << S_FILT_VLAN_SEL)
+#define F_FILT_VLAN_SEL V_FILT_VLAN_SEL(1U)
+
+#define S_CBA_EN 16
+#define V_CBA_EN(x) ((x) << S_CBA_EN)
+#define F_CBA_EN V_CBA_EN(1U)
+
+#define S_BLK_SNDR 12
+#define M_BLK_SNDR 0xfU
+#define V_BLK_SNDR(x) ((x) << S_BLK_SNDR)
+#define G_BLK_SNDR(x) (((x) >> S_BLK_SNDR) & M_BLK_SNDR)
+
+#define S_CMPRS 8
+#define M_CMPRS 0xfU
+#define V_CMPRS(x) ((x) << S_CMPRS)
+#define G_CMPRS(x) (((x) >> S_CMPRS) & M_CMPRS)
+
+#define S_SNF 0
+#define M_SNF 0xffU
+#define V_SNF(x) ((x) << S_SNF)
+#define G_SNF(x) (((x) >> S_SNF) & M_SNF)
+
+#define A_MPS_RX_PORT_MUX_CTL 0x11004
+
+#define S_CTL_P3 12
+#define M_CTL_P3 0xfU
+#define V_CTL_P3(x) ((x) << S_CTL_P3)
+#define G_CTL_P3(x) (((x) >> S_CTL_P3) & M_CTL_P3)
+
+#define S_CTL_P2 8
+#define M_CTL_P2 0xfU
+#define V_CTL_P2(x) ((x) << S_CTL_P2)
+#define G_CTL_P2(x) (((x) >> S_CTL_P2) & M_CTL_P2)
+
+#define S_CTL_P1 4
+#define M_CTL_P1 0xfU
+#define V_CTL_P1(x) ((x) << S_CTL_P1)
+#define G_CTL_P1(x) (((x) >> S_CTL_P1) & M_CTL_P1)
+
+#define S_CTL_P0 0
+#define M_CTL_P0 0xfU
+#define V_CTL_P0(x) ((x) << S_CTL_P0)
+#define G_CTL_P0(x) (((x) >> S_CTL_P0) & M_CTL_P0)
+
+#define A_MPS_RX_PG_FL 0x11008
+
+#define S_RST 16
+#define V_RST(x) ((x) << S_RST)
+#define F_RST V_RST(1U)
+
+#define S_CNT 0
+#define M_CNT 0xffffU
+#define V_CNT(x) ((x) << S_CNT)
+#define G_CNT(x) (((x) >> S_CNT) & M_CNT)
+
+#define A_MPS_RX_PKT_FL 0x1100c
+#define A_MPS_RX_PG_RSV0 0x11010
+
+#define S_CLR_INTR 31
+#define V_CLR_INTR(x) ((x) << S_CLR_INTR)
+#define F_CLR_INTR V_CLR_INTR(1U)
+
+#define S_SET_INTR 30
+#define V_SET_INTR(x) ((x) << S_SET_INTR)
+#define F_SET_INTR V_SET_INTR(1U)
+
+#define S_USED 16
+#define M_USED 0x7ffU
+#define V_USED(x) ((x) << S_USED)
+#define G_USED(x) (((x) >> S_USED) & M_USED)
+
+#define S_ALLOC 0
+#define M_ALLOC 0x7ffU
+#define V_ALLOC(x) ((x) << S_ALLOC)
+#define G_ALLOC(x) (((x) >> S_ALLOC) & M_ALLOC)
+
+#define A_MPS_RX_PG_RSV1 0x11014
+#define A_MPS_RX_PG_RSV2 0x11018
+#define A_MPS_RX_PG_RSV3 0x1101c
+#define A_MPS_RX_PG_RSV4 0x11020
+#define A_MPS_RX_PG_RSV5 0x11024
+#define A_MPS_RX_PG_RSV6 0x11028
+#define A_MPS_RX_PG_RSV7 0x1102c
+#define A_MPS_RX_PG_SHR_BG0 0x11030
+
+#define S_EN 31
+#define V_EN(x) ((x) << S_EN)
+#define F_EN V_EN(1U)
+
+#define S_SEL 30
+#define V_SEL(x) ((x) << S_SEL)
+#define F_SEL V_SEL(1U)
+
+#define S_MAX 16
+#define M_MAX 0x7ffU
+#define V_MAX(x) ((x) << S_MAX)
+#define G_MAX(x) (((x) >> S_MAX) & M_MAX)
+
+#define S_BORW 0
+#define M_BORW 0x7ffU
+#define V_BORW(x) ((x) << S_BORW)
+#define G_BORW(x) (((x) >> S_BORW) & M_BORW)
+
+#define A_MPS_RX_PG_SHR_BG1 0x11034
+#define A_MPS_RX_PG_SHR_BG2 0x11038
+#define A_MPS_RX_PG_SHR_BG3 0x1103c
+#define A_MPS_RX_PG_SHR0 0x11040
+
+#define S_QUOTA 16
+#define M_QUOTA 0x7ffU
+#define V_QUOTA(x) ((x) << S_QUOTA)
+#define G_QUOTA(x) (((x) >> S_QUOTA) & M_QUOTA)
+
+#define S_SHR_USED 0
+#define M_SHR_USED 0x7ffU
+#define V_SHR_USED(x) ((x) << S_SHR_USED)
+#define G_SHR_USED(x) (((x) >> S_SHR_USED) & M_SHR_USED)
+
+#define A_MPS_RX_PG_SHR1 0x11044
+#define A_MPS_RX_PG_HYST_BG0 0x11048
+
+#define S_TH 0
+#define M_TH 0x7ffU
+#define V_TH(x) ((x) << S_TH)
+#define G_TH(x) (((x) >> S_TH) & M_TH)
+
+#define A_MPS_RX_PG_HYST_BG1 0x1104c
+#define A_MPS_RX_PG_HYST_BG2 0x11050
+#define A_MPS_RX_PG_HYST_BG3 0x11054
+#define A_MPS_RX_OCH_CTL 0x11058
+
+#define S_DROP_WT 27
+#define M_DROP_WT 0x1fU
+#define V_DROP_WT(x) ((x) << S_DROP_WT)
+#define G_DROP_WT(x) (((x) >> S_DROP_WT) & M_DROP_WT)
+
+#define S_TRUNC_WT 22
+#define M_TRUNC_WT 0x1fU
+#define V_TRUNC_WT(x) ((x) << S_TRUNC_WT)
+#define G_TRUNC_WT(x) (((x) >> S_TRUNC_WT) & M_TRUNC_WT)
+
+#define S_OCH_DRAIN 13
+#define M_OCH_DRAIN 0x1fU
+#define V_OCH_DRAIN(x) ((x) << S_OCH_DRAIN)
+#define G_OCH_DRAIN(x) (((x) >> S_OCH_DRAIN) & M_OCH_DRAIN)
+
+#define S_OCH_DROP 8
+#define M_OCH_DROP 0x1fU
+#define V_OCH_DROP(x) ((x) << S_OCH_DROP)
+#define G_OCH_DROP(x) (((x) >> S_OCH_DROP) & M_OCH_DROP)
+
+#define S_STOP 0
+#define M_STOP 0x1fU
+#define V_STOP(x) ((x) << S_STOP)
+#define G_STOP(x) (((x) >> S_STOP) & M_STOP)
+
+#define A_MPS_RX_LPBK_BP0 0x1105c
+
+#define S_THRESH 0
+#define M_THRESH 0x7ffU
+#define V_THRESH(x) ((x) << S_THRESH)
+#define G_THRESH(x) (((x) >> S_THRESH) & M_THRESH)
+
+#define A_MPS_RX_LPBK_BP1 0x11060
+#define A_MPS_RX_LPBK_BP2 0x11064
+#define A_MPS_RX_LPBK_BP3 0x11068
+#define A_MPS_RX_PORT_GAP 0x1106c
+
+#define S_GAP 0
+#define M_GAP 0xfffffU
+#define V_GAP(x) ((x) << S_GAP)
+#define G_GAP(x) (((x) >> S_GAP) & M_GAP)
+
+#define A_MPS_RX_CHMN_CNT 0x11070
+#define A_MPS_RX_PERR_INT_CAUSE 0x11074
+
+#define S_FF 23
+#define V_FF(x) ((x) << S_FF)
+#define F_FF V_FF(1U)
+
+#define S_PGMO 22
+#define V_PGMO(x) ((x) << S_PGMO)
+#define F_PGMO V_PGMO(1U)
+
+#define S_PGME 21
+#define V_PGME(x) ((x) << S_PGME)
+#define F_PGME V_PGME(1U)
+
+#define S_CHMN 20
+#define V_CHMN(x) ((x) << S_CHMN)
+#define F_CHMN V_CHMN(1U)
+
+#define S_RPLC 19
+#define V_RPLC(x) ((x) << S_RPLC)
+#define F_RPLC V_RPLC(1U)
+
+#define S_ATRB 18
+#define V_ATRB(x) ((x) << S_ATRB)
+#define F_ATRB V_ATRB(1U)
+
+#define S_PSMX 17
+#define V_PSMX(x) ((x) << S_PSMX)
+#define F_PSMX V_PSMX(1U)
+
+#define S_PGLL 16
+#define V_PGLL(x) ((x) << S_PGLL)
+#define F_PGLL V_PGLL(1U)
+
+#define S_PGFL 15
+#define V_PGFL(x) ((x) << S_PGFL)
+#define F_PGFL V_PGFL(1U)
+
+#define S_PKTQ 14
+#define V_PKTQ(x) ((x) << S_PKTQ)
+#define F_PKTQ V_PKTQ(1U)
+
+#define S_PKFL 13
+#define V_PKFL(x) ((x) << S_PKFL)
+#define F_PKFL V_PKFL(1U)
+
+#define S_PPM3 12
+#define V_PPM3(x) ((x) << S_PPM3)
+#define F_PPM3 V_PPM3(1U)
+
+#define S_PPM2 11
+#define V_PPM2(x) ((x) << S_PPM2)
+#define F_PPM2 V_PPM2(1U)
+
+#define S_PPM1 10
+#define V_PPM1(x) ((x) << S_PPM1)
+#define F_PPM1 V_PPM1(1U)
+
+#define S_PPM0 9
+#define V_PPM0(x) ((x) << S_PPM0)
+#define F_PPM0 V_PPM0(1U)
+
+#define S_SPMX 8
+#define V_SPMX(x) ((x) << S_SPMX)
+#define F_SPMX V_SPMX(1U)
+
+#define S_CDL3 7
+#define V_CDL3(x) ((x) << S_CDL3)
+#define F_CDL3 V_CDL3(1U)
+
+#define S_CDL2 6
+#define V_CDL2(x) ((x) << S_CDL2)
+#define F_CDL2 V_CDL2(1U)
+
+#define S_CDL1 5
+#define V_CDL1(x) ((x) << S_CDL1)
+#define F_CDL1 V_CDL1(1U)
+
+#define S_CDL0 4
+#define V_CDL0(x) ((x) << S_CDL0)
+#define F_CDL0 V_CDL0(1U)
+
+#define S_CDM3 3
+#define V_CDM3(x) ((x) << S_CDM3)
+#define F_CDM3 V_CDM3(1U)
+
+#define S_CDM2 2
+#define V_CDM2(x) ((x) << S_CDM2)
+#define F_CDM2 V_CDM2(1U)
+
+#define S_CDM1 1
+#define V_CDM1(x) ((x) << S_CDM1)
+#define F_CDM1 V_CDM1(1U)
+
+#define S_CDM0 0
+#define V_CDM0(x) ((x) << S_CDM0)
+#define F_CDM0 V_CDM0(1U)
+
+#define A_MPS_RX_PERR_INT_ENABLE 0x11078
+#define A_MPS_RX_PERR_ENABLE 0x1107c
+#define A_MPS_RX_PERR_INJECT 0x11080
+#define A_MPS_RX_FUNC_INT_CAUSE 0x11084
+
+#define S_INT_ERR_INT 8
+#define M_INT_ERR_INT 0x1fU
+#define V_INT_ERR_INT(x) ((x) << S_INT_ERR_INT)
+#define G_INT_ERR_INT(x) (((x) >> S_INT_ERR_INT) & M_INT_ERR_INT)
+
+#define S_PG_TH_INT7 7
+#define V_PG_TH_INT7(x) ((x) << S_PG_TH_INT7)
+#define F_PG_TH_INT7 V_PG_TH_INT7(1U)
+
+#define S_PG_TH_INT6 6
+#define V_PG_TH_INT6(x) ((x) << S_PG_TH_INT6)
+#define F_PG_TH_INT6 V_PG_TH_INT6(1U)
+
+#define S_PG_TH_INT5 5
+#define V_PG_TH_INT5(x) ((x) << S_PG_TH_INT5)
+#define F_PG_TH_INT5 V_PG_TH_INT5(1U)
+
+#define S_PG_TH_INT4 4
+#define V_PG_TH_INT4(x) ((x) << S_PG_TH_INT4)
+#define F_PG_TH_INT4 V_PG_TH_INT4(1U)
+
+#define S_PG_TH_INT3 3
+#define V_PG_TH_INT3(x) ((x) << S_PG_TH_INT3)
+#define F_PG_TH_INT3 V_PG_TH_INT3(1U)
+
+#define S_PG_TH_INT2 2
+#define V_PG_TH_INT2(x) ((x) << S_PG_TH_INT2)
+#define F_PG_TH_INT2 V_PG_TH_INT2(1U)
+
+#define S_PG_TH_INT1 1
+#define V_PG_TH_INT1(x) ((x) << S_PG_TH_INT1)
+#define F_PG_TH_INT1 V_PG_TH_INT1(1U)
+
+#define S_PG_TH_INT0 0
+#define V_PG_TH_INT0(x) ((x) << S_PG_TH_INT0)
+#define F_PG_TH_INT0 V_PG_TH_INT0(1U)
+
+#define A_MPS_RX_FUNC_INT_ENABLE 0x11088
+#define A_MPS_RX_PAUSE_GEN_TH_0 0x1108c
+
+#define S_TH_HIGH 16
+#define M_TH_HIGH 0xffffU
+#define V_TH_HIGH(x) ((x) << S_TH_HIGH)
+#define G_TH_HIGH(x) (((x) >> S_TH_HIGH) & M_TH_HIGH)
+
+#define S_TH_LOW 0
+#define M_TH_LOW 0xffffU
+#define V_TH_LOW(x) ((x) << S_TH_LOW)
+#define G_TH_LOW(x) (((x) >> S_TH_LOW) & M_TH_LOW)
+
+#define A_MPS_RX_PAUSE_GEN_TH_1 0x11090
+#define A_MPS_RX_PAUSE_GEN_TH_2 0x11094
+#define A_MPS_RX_PAUSE_GEN_TH_3 0x11098
+#define A_MPS_RX_PPP_ATRB 0x1109c
+
+#define S_ETYPE 16
+#define M_ETYPE 0xffffU
+#define V_ETYPE(x) ((x) << S_ETYPE)
+#define G_ETYPE(x) (((x) >> S_ETYPE) & M_ETYPE)
+
+#define S_OPCODE 0
+#define M_OPCODE 0xffffU
+#define V_OPCODE(x) ((x) << S_OPCODE)
+#define G_OPCODE(x) (((x) >> S_OPCODE) & M_OPCODE)
+
+#define A_MPS_RX_QFC0_ATRB 0x110a0
+
+#define S_DA 0
+#define M_DA 0xffffU
+#define V_DA(x) ((x) << S_DA)
+#define G_DA(x) (((x) >> S_DA) & M_DA)
+
+#define A_MPS_RX_QFC1_ATRB 0x110a4
+#define A_MPS_RX_PT_ARB0 0x110a8
+
+#define S_LPBK_WT 16
+#define M_LPBK_WT 0x3fffU
+#define V_LPBK_WT(x) ((x) << S_LPBK_WT)
+#define G_LPBK_WT(x) (((x) >> S_LPBK_WT) & M_LPBK_WT)
+
+#define S_MAC_WT 0
+#define M_MAC_WT 0x3fffU
+#define V_MAC_WT(x) ((x) << S_MAC_WT)
+#define G_MAC_WT(x) (((x) >> S_MAC_WT) & M_MAC_WT)
+
+#define A_MPS_RX_PT_ARB1 0x110ac
+#define A_MPS_RX_PT_ARB2 0x110b0
+#define A_MPS_RX_PT_ARB3 0x110b4
+#define A_MPS_RX_PT_ARB4 0x110b8
+#define A_MPS_PF_OUT_EN 0x110bc
+
+#define S_OUTEN 0
+#define M_OUTEN 0xffU
+#define V_OUTEN(x) ((x) << S_OUTEN)
+#define G_OUTEN(x) (((x) >> S_OUTEN) & M_OUTEN)
+
+#define A_MPS_BMC_MTU 0x110c0
+
+#define S_MTU 0
+#define M_MTU 0x3fffU
+#define V_MTU(x) ((x) << S_MTU)
+#define G_MTU(x) (((x) >> S_MTU) & M_MTU)
+
+#define A_MPS_BMC_PKT_CNT 0x110c4
+#define A_MPS_BMC_BYTE_CNT 0x110c8
+#define A_MPS_PFVF_ATRB_CTL 0x110cc
+
+#define S_RD_WRN 31
+#define V_RD_WRN(x) ((x) << S_RD_WRN)
+#define F_RD_WRN V_RD_WRN(1U)
+
+#define S_PFVF 0
+#define M_PFVF 0xffU
+#define V_PFVF(x) ((x) << S_PFVF)
+#define G_PFVF(x) (((x) >> S_PFVF) & M_PFVF)
+
+#define A_MPS_PFVF_ATRB 0x110d0
+
+#define S_ATTR_PF 28
+#define M_ATTR_PF 0x7U
+#define V_ATTR_PF(x) ((x) << S_ATTR_PF)
+#define G_ATTR_PF(x) (((x) >> S_ATTR_PF) & M_ATTR_PF)
+
+#define S_OFF 18
+#define V_OFF(x) ((x) << S_OFF)
+#define F_OFF V_OFF(1U)
+
+#define S_NV_DROP 17
+#define V_NV_DROP(x) ((x) << S_NV_DROP)
+#define F_NV_DROP V_NV_DROP(1U)
+
+#define S_ATTR_MODE 16
+#define V_ATTR_MODE(x) ((x) << S_ATTR_MODE)
+#define F_ATTR_MODE V_ATTR_MODE(1U)
+
+#define A_MPS_PFVF_ATRB_FLTR0 0x110d4
+
+#define S_VLAN_EN 16
+#define V_VLAN_EN(x) ((x) << S_VLAN_EN)
+#define F_VLAN_EN V_VLAN_EN(1U)
+
+#define S_VLAN_ID 0
+#define M_VLAN_ID 0xfffU
+#define V_VLAN_ID(x) ((x) << S_VLAN_ID)
+#define G_VLAN_ID(x) (((x) >> S_VLAN_ID) & M_VLAN_ID)
+
+#define A_MPS_PFVF_ATRB_FLTR1 0x110d8
+#define A_MPS_PFVF_ATRB_FLTR2 0x110dc
+#define A_MPS_PFVF_ATRB_FLTR3 0x110e0
+#define A_MPS_PFVF_ATRB_FLTR4 0x110e4
+#define A_MPS_PFVF_ATRB_FLTR5 0x110e8
+#define A_MPS_PFVF_ATRB_FLTR6 0x110ec
+#define A_MPS_PFVF_ATRB_FLTR7 0x110f0
+#define A_MPS_PFVF_ATRB_FLTR8 0x110f4
+#define A_MPS_PFVF_ATRB_FLTR9 0x110f8
+#define A_MPS_PFVF_ATRB_FLTR10 0x110fc
+#define A_MPS_PFVF_ATRB_FLTR11 0x11100
+#define A_MPS_PFVF_ATRB_FLTR12 0x11104
+#define A_MPS_PFVF_ATRB_FLTR13 0x11108
+#define A_MPS_PFVF_ATRB_FLTR14 0x1110c
+#define A_MPS_PFVF_ATRB_FLTR15 0x11110
+#define A_MPS_RPLC_MAP_CTL 0x11114
+
+#define S_RPLC_MAP_ADDR 0
+#define M_RPLC_MAP_ADDR 0x3ffU
+#define V_RPLC_MAP_ADDR(x) ((x) << S_RPLC_MAP_ADDR)
+#define G_RPLC_MAP_ADDR(x) (((x) >> S_RPLC_MAP_ADDR) & M_RPLC_MAP_ADDR)
+
+#define A_MPS_PF_RPLCT_MAP 0x11118
+
+#define S_PF_EN 0
+#define M_PF_EN 0xffU
+#define V_PF_EN(x) ((x) << S_PF_EN)
+#define G_PF_EN(x) (((x) >> S_PF_EN) & M_PF_EN)
+
+#define A_MPS_VF_RPLCT_MAP0 0x1111c
+#define A_MPS_VF_RPLCT_MAP1 0x11120
+#define A_MPS_VF_RPLCT_MAP2 0x11124
+#define A_MPS_VF_RPLCT_MAP3 0x11128
+#define A_MPS_MEM_DBG_CTL 0x1112c
+
+#define S_PKD 17
+#define V_PKD(x) ((x) << S_PKD)
+#define F_PKD V_PKD(1U)
+
+#define S_PGD 16
+#define V_PGD(x) ((x) << S_PGD)
+#define F_PGD V_PGD(1U)
+
+#define A_MPS_PKD_MEM_DATA0 0x11130
+#define A_MPS_PKD_MEM_DATA1 0x11134
+#define A_MPS_PKD_MEM_DATA2 0x11138
+#define A_MPS_PGD_MEM_DATA 0x1113c
+#define A_MPS_RX_SE_CNT_ERR 0x11140
+
+#define S_RX_SE_ERRMAP 0
+#define M_RX_SE_ERRMAP 0xfffffU
+#define V_RX_SE_ERRMAP(x) ((x) << S_RX_SE_ERRMAP)
+#define G_RX_SE_ERRMAP(x) (((x) >> S_RX_SE_ERRMAP) & M_RX_SE_ERRMAP)
+
+#define A_MPS_RX_SE_CNT_CLR 0x11144
+#define A_MPS_RX_SE_CNT_IN0 0x11148
+
+#define S_SOP_CNT_PM 24
+#define M_SOP_CNT_PM 0xffU
+#define V_SOP_CNT_PM(x) ((x) << S_SOP_CNT_PM)
+#define G_SOP_CNT_PM(x) (((x) >> S_SOP_CNT_PM) & M_SOP_CNT_PM)
+
+#define S_EOP_CNT_PM 16
+#define M_EOP_CNT_PM 0xffU
+#define V_EOP_CNT_PM(x) ((x) << S_EOP_CNT_PM)
+#define G_EOP_CNT_PM(x) (((x) >> S_EOP_CNT_PM) & M_EOP_CNT_PM)
+
+#define S_SOP_CNT_IN 8
+#define M_SOP_CNT_IN 0xffU
+#define V_SOP_CNT_IN(x) ((x) << S_SOP_CNT_IN)
+#define G_SOP_CNT_IN(x) (((x) >> S_SOP_CNT_IN) & M_SOP_CNT_IN)
+
+#define S_EOP_CNT_IN 0
+#define M_EOP_CNT_IN 0xffU
+#define V_EOP_CNT_IN(x) ((x) << S_EOP_CNT_IN)
+#define G_EOP_CNT_IN(x) (((x) >> S_EOP_CNT_IN) & M_EOP_CNT_IN)
+
+#define A_MPS_RX_SE_CNT_IN1 0x1114c
+#define A_MPS_RX_SE_CNT_IN2 0x11150
+#define A_MPS_RX_SE_CNT_IN3 0x11154
+#define A_MPS_RX_SE_CNT_IN4 0x11158
+#define A_MPS_RX_SE_CNT_IN5 0x1115c
+#define A_MPS_RX_SE_CNT_IN6 0x11160
+#define A_MPS_RX_SE_CNT_IN7 0x11164
+#define A_MPS_RX_SE_CNT_OUT01 0x11168
+
+#define S_SOP_CNT_1 24
+#define M_SOP_CNT_1 0xffU
+#define V_SOP_CNT_1(x) ((x) << S_SOP_CNT_1)
+#define G_SOP_CNT_1(x) (((x) >> S_SOP_CNT_1) & M_SOP_CNT_1)
+
+#define S_EOP_CNT_1 16
+#define M_EOP_CNT_1 0xffU
+#define V_EOP_CNT_1(x) ((x) << S_EOP_CNT_1)
+#define G_EOP_CNT_1(x) (((x) >> S_EOP_CNT_1) & M_EOP_CNT_1)
+
+#define S_SOP_CNT_0 8
+#define M_SOP_CNT_0 0xffU
+#define V_SOP_CNT_0(x) ((x) << S_SOP_CNT_0)
+#define G_SOP_CNT_0(x) (((x) >> S_SOP_CNT_0) & M_SOP_CNT_0)
+
+#define S_EOP_CNT_0 0
+#define M_EOP_CNT_0 0xffU
+#define V_EOP_CNT_0(x) ((x) << S_EOP_CNT_0)
+#define G_EOP_CNT_0(x) (((x) >> S_EOP_CNT_0) & M_EOP_CNT_0)
+
+#define A_MPS_RX_SE_CNT_OUT23 0x1116c
+
+#define S_SOP_CNT_3 24
+#define M_SOP_CNT_3 0xffU
+#define V_SOP_CNT_3(x) ((x) << S_SOP_CNT_3)
+#define G_SOP_CNT_3(x) (((x) >> S_SOP_CNT_3) & M_SOP_CNT_3)
+
+#define S_EOP_CNT_3 16
+#define M_EOP_CNT_3 0xffU
+#define V_EOP_CNT_3(x) ((x) << S_EOP_CNT_3)
+#define G_EOP_CNT_3(x) (((x) >> S_EOP_CNT_3) & M_EOP_CNT_3)
+
+#define S_SOP_CNT_2 8
+#define M_SOP_CNT_2 0xffU
+#define V_SOP_CNT_2(x) ((x) << S_SOP_CNT_2)
+#define G_SOP_CNT_2(x) (((x) >> S_SOP_CNT_2) & M_SOP_CNT_2)
+
+#define S_EOP_CNT_2 0
+#define M_EOP_CNT_2 0xffU
+#define V_EOP_CNT_2(x) ((x) << S_EOP_CNT_2)
+#define G_EOP_CNT_2(x) (((x) >> S_EOP_CNT_2) & M_EOP_CNT_2)
+
+#define A_MPS_RX_SPI_ERR 0x11170
+
+#define S_LENERR 21
+#define M_LENERR 0xfU
+#define V_LENERR(x) ((x) << S_LENERR)
+#define G_LENERR(x) (((x) >> S_LENERR) & M_LENERR)
+
+#define S_SPIERR 0
+#define M_SPIERR 0x1fffffU
+#define V_SPIERR(x) ((x) << S_SPIERR)
+#define G_SPIERR(x) (((x) >> S_SPIERR) & M_SPIERR)
+
+#define A_MPS_RX_IN_BUS_STATE 0x11174
+
+#define S_ST3 24
+#define M_ST3 0xffU
+#define V_ST3(x) ((x) << S_ST3)
+#define G_ST3(x) (((x) >> S_ST3) & M_ST3)
+
+#define S_ST2 16
+#define M_ST2 0xffU
+#define V_ST2(x) ((x) << S_ST2)
+#define G_ST2(x) (((x) >> S_ST2) & M_ST2)
+
+#define S_ST1 8
+#define M_ST1 0xffU
+#define V_ST1(x) ((x) << S_ST1)
+#define G_ST1(x) (((x) >> S_ST1) & M_ST1)
+
+#define S_ST0 0
+#define M_ST0 0xffU
+#define V_ST0(x) ((x) << S_ST0)
+#define G_ST0(x) (((x) >> S_ST0) & M_ST0)
+
+#define A_MPS_RX_OUT_BUS_STATE 0x11178
+
+#define S_ST_NCSI 23
+#define M_ST_NCSI 0x1ffU
+#define V_ST_NCSI(x) ((x) << S_ST_NCSI)
+#define G_ST_NCSI(x) (((x) >> S_ST_NCSI) & M_ST_NCSI)
+
+#define S_ST_TP 0
+#define M_ST_TP 0x7fffffU
+#define V_ST_TP(x) ((x) << S_ST_TP)
+#define G_ST_TP(x) (((x) >> S_ST_TP) & M_ST_TP)
+
+#define A_MPS_RX_DBG_CTL 0x1117c
+
+#define S_OUT_DBG_CHNL 8
+#define M_OUT_DBG_CHNL 0x7U
+#define V_OUT_DBG_CHNL(x) ((x) << S_OUT_DBG_CHNL)
+#define G_OUT_DBG_CHNL(x) (((x) >> S_OUT_DBG_CHNL) & M_OUT_DBG_CHNL)
+
+#define S_DBG_PKD_QSEL 7
+#define V_DBG_PKD_QSEL(x) ((x) << S_DBG_PKD_QSEL)
+#define F_DBG_PKD_QSEL V_DBG_PKD_QSEL(1U)
+
+#define S_DBG_CDS_INV 6
+#define V_DBG_CDS_INV(x) ((x) << S_DBG_CDS_INV)
+#define F_DBG_CDS_INV V_DBG_CDS_INV(1U)
+
+#define S_IN_DBG_PORT 3
+#define M_IN_DBG_PORT 0x7U
+#define V_IN_DBG_PORT(x) ((x) << S_IN_DBG_PORT)
+#define G_IN_DBG_PORT(x) (((x) >> S_IN_DBG_PORT) & M_IN_DBG_PORT)
+
+#define S_IN_DBG_CHNL 0
+#define M_IN_DBG_CHNL 0x7U
+#define V_IN_DBG_CHNL(x) ((x) << S_IN_DBG_CHNL)
+#define G_IN_DBG_CHNL(x) (((x) >> S_IN_DBG_CHNL) & M_IN_DBG_CHNL)
+
+#define A_MPS_RX_CLS_DROP_CNT0 0x11180
+
+#define S_LPBK_CNT0 16
+#define M_LPBK_CNT0 0xffffU
+#define V_LPBK_CNT0(x) ((x) << S_LPBK_CNT0)
+#define G_LPBK_CNT0(x) (((x) >> S_LPBK_CNT0) & M_LPBK_CNT0)
+
+#define S_MAC_CNT0 0
+#define M_MAC_CNT0 0xffffU
+#define V_MAC_CNT0(x) ((x) << S_MAC_CNT0)
+#define G_MAC_CNT0(x) (((x) >> S_MAC_CNT0) & M_MAC_CNT0)
+
+#define A_MPS_RX_CLS_DROP_CNT1 0x11184
+
+#define S_LPBK_CNT1 16
+#define M_LPBK_CNT1 0xffffU
+#define V_LPBK_CNT1(x) ((x) << S_LPBK_CNT1)
+#define G_LPBK_CNT1(x) (((x) >> S_LPBK_CNT1) & M_LPBK_CNT1)
+
+#define S_MAC_CNT1 0
+#define M_MAC_CNT1 0xffffU
+#define V_MAC_CNT1(x) ((x) << S_MAC_CNT1)
+#define G_MAC_CNT1(x) (((x) >> S_MAC_CNT1) & M_MAC_CNT1)
+
+#define A_MPS_RX_CLS_DROP_CNT2 0x11188
+
+#define S_LPBK_CNT2 16
+#define M_LPBK_CNT2 0xffffU
+#define V_LPBK_CNT2(x) ((x) << S_LPBK_CNT2)
+#define G_LPBK_CNT2(x) (((x) >> S_LPBK_CNT2) & M_LPBK_CNT2)
+
+#define S_MAC_CNT2 0
+#define M_MAC_CNT2 0xffffU
+#define V_MAC_CNT2(x) ((x) << S_MAC_CNT2)
+#define G_MAC_CNT2(x) (((x) >> S_MAC_CNT2) & M_MAC_CNT2)
+
+#define A_MPS_RX_CLS_DROP_CNT3 0x1118c
+
+#define S_LPBK_CNT3 16
+#define M_LPBK_CNT3 0xffffU
+#define V_LPBK_CNT3(x) ((x) << S_LPBK_CNT3)
+#define G_LPBK_CNT3(x) (((x) >> S_LPBK_CNT3) & M_LPBK_CNT3)
+
+#define S_MAC_CNT3 0
+#define M_MAC_CNT3 0xffffU
+#define V_MAC_CNT3(x) ((x) << S_MAC_CNT3)
+#define G_MAC_CNT3(x) (((x) >> S_MAC_CNT3) & M_MAC_CNT3)
+
+#define A_MPS_RX_SPARE 0x11190
+
+/* registers for module CPL_SWITCH */
+#define CPL_SWITCH_BASE_ADDR 0x19040
+
+#define A_CPL_SWITCH_CNTRL 0x19040
+
+#define S_CPL_PKT_TID 8
+#define M_CPL_PKT_TID 0xffffffU
+#define V_CPL_PKT_TID(x) ((x) << S_CPL_PKT_TID)
+#define G_CPL_PKT_TID(x) (((x) >> S_CPL_PKT_TID) & M_CPL_PKT_TID)
+
+#define S_CIM_TRUNCATE_ENABLE 5
+#define V_CIM_TRUNCATE_ENABLE(x) ((x) << S_CIM_TRUNCATE_ENABLE)
+#define F_CIM_TRUNCATE_ENABLE V_CIM_TRUNCATE_ENABLE(1U)
+
+#define S_CIM_TO_UP_FULL_SIZE 4
+#define V_CIM_TO_UP_FULL_SIZE(x) ((x) << S_CIM_TO_UP_FULL_SIZE)
+#define F_CIM_TO_UP_FULL_SIZE V_CIM_TO_UP_FULL_SIZE(1U)
+
+#define S_CPU_NO_ENABLE 3
+#define V_CPU_NO_ENABLE(x) ((x) << S_CPU_NO_ENABLE)
+#define F_CPU_NO_ENABLE V_CPU_NO_ENABLE(1U)
+
+#define S_SWITCH_TABLE_ENABLE 2
+#define V_SWITCH_TABLE_ENABLE(x) ((x) << S_SWITCH_TABLE_ENABLE)
+#define F_SWITCH_TABLE_ENABLE V_SWITCH_TABLE_ENABLE(1U)
+
+#define S_SGE_ENABLE 1
+#define V_SGE_ENABLE(x) ((x) << S_SGE_ENABLE)
+#define F_SGE_ENABLE V_SGE_ENABLE(1U)
+
+#define S_CIM_ENABLE 0
+#define V_CIM_ENABLE(x) ((x) << S_CIM_ENABLE)
+#define F_CIM_ENABLE V_CIM_ENABLE(1U)
+
+#define A_CPL_SWITCH_TBL_IDX 0x19044
+
+#define S_SWITCH_TBL_IDX 0
+#define M_SWITCH_TBL_IDX 0xfU
+#define V_SWITCH_TBL_IDX(x) ((x) << S_SWITCH_TBL_IDX)
+#define G_SWITCH_TBL_IDX(x) (((x) >> S_SWITCH_TBL_IDX) & M_SWITCH_TBL_IDX)
+
+#define A_CPL_SWITCH_TBL_DATA 0x19048
+#define A_CPL_SWITCH_ZERO_ERROR 0x1904c
+
+#define S_ZERO_CMD_CH1 8
+#define M_ZERO_CMD_CH1 0xffU
+#define V_ZERO_CMD_CH1(x) ((x) << S_ZERO_CMD_CH1)
+#define G_ZERO_CMD_CH1(x) (((x) >> S_ZERO_CMD_CH1) & M_ZERO_CMD_CH1)
+
+#define S_ZERO_CMD_CH0 0
+#define M_ZERO_CMD_CH0 0xffU
+#define V_ZERO_CMD_CH0(x) ((x) << S_ZERO_CMD_CH0)
+#define G_ZERO_CMD_CH0(x) (((x) >> S_ZERO_CMD_CH0) & M_ZERO_CMD_CH0)
+
+#define A_CPL_INTR_ENABLE 0x19050
+
+#define S_CIM_OP_MAP_PERR 5
+#define V_CIM_OP_MAP_PERR(x) ((x) << S_CIM_OP_MAP_PERR)
+#define F_CIM_OP_MAP_PERR V_CIM_OP_MAP_PERR(1U)
+
+#define S_CIM_OVFL_ERROR 4
+#define V_CIM_OVFL_ERROR(x) ((x) << S_CIM_OVFL_ERROR)
+#define F_CIM_OVFL_ERROR V_CIM_OVFL_ERROR(1U)
+
+#define S_TP_FRAMING_ERROR 3
+#define V_TP_FRAMING_ERROR(x) ((x) << S_TP_FRAMING_ERROR)
+#define F_TP_FRAMING_ERROR V_TP_FRAMING_ERROR(1U)
+
+#define S_SGE_FRAMING_ERROR 2
+#define V_SGE_FRAMING_ERROR(x) ((x) << S_SGE_FRAMING_ERROR)
+#define F_SGE_FRAMING_ERROR V_SGE_FRAMING_ERROR(1U)
+
+#define S_CIM_FRAMING_ERROR 1
+#define V_CIM_FRAMING_ERROR(x) ((x) << S_CIM_FRAMING_ERROR)
+#define F_CIM_FRAMING_ERROR V_CIM_FRAMING_ERROR(1U)
+
+#define S_ZERO_SWITCH_ERROR 0
+#define V_ZERO_SWITCH_ERROR(x) ((x) << S_ZERO_SWITCH_ERROR)
+#define F_ZERO_SWITCH_ERROR V_ZERO_SWITCH_ERROR(1U)
+
+#define A_CPL_INTR_CAUSE 0x19054
+#define A_CPL_MAP_TBL_IDX 0x19058
+
+#define S_MAP_TBL_IDX 0
+#define M_MAP_TBL_IDX 0xffU
+#define V_MAP_TBL_IDX(x) ((x) << S_MAP_TBL_IDX)
+#define G_MAP_TBL_IDX(x) (((x) >> S_MAP_TBL_IDX) & M_MAP_TBL_IDX)
+
+#define A_CPL_MAP_TBL_DATA 0x1905c
+
+#define S_MAP_TBL_DATA 0
+#define M_MAP_TBL_DATA 0xffU
+#define V_MAP_TBL_DATA(x) ((x) << S_MAP_TBL_DATA)
+#define G_MAP_TBL_DATA(x) (((x) >> S_MAP_TBL_DATA) & M_MAP_TBL_DATA)
+
+/* registers for module SMB */
+#define SMB_BASE_ADDR 0x19060
+
+#define A_SMB_GLOBAL_TIME_CFG 0x19060
+
+#define S_MACROCNTCFG 8
+#define M_MACROCNTCFG 0x1fU
+#define V_MACROCNTCFG(x) ((x) << S_MACROCNTCFG)
+#define G_MACROCNTCFG(x) (((x) >> S_MACROCNTCFG) & M_MACROCNTCFG)
+
+#define S_MICROCNTCFG 0
+#define M_MICROCNTCFG 0xffU
+#define V_MICROCNTCFG(x) ((x) << S_MICROCNTCFG)
+#define G_MICROCNTCFG(x) (((x) >> S_MICROCNTCFG) & M_MICROCNTCFG)
+
+#define A_SMB_MST_TIMEOUT_CFG 0x19064
+
+#define S_MSTTIMEOUTCFG 0
+#define M_MSTTIMEOUTCFG 0xffffffU
+#define V_MSTTIMEOUTCFG(x) ((x) << S_MSTTIMEOUTCFG)
+#define G_MSTTIMEOUTCFG(x) (((x) >> S_MSTTIMEOUTCFG) & M_MSTTIMEOUTCFG)
+
+#define A_SMB_MST_CTL_CFG 0x19068
+
+#define S_MSTFIFODBG 31
+#define V_MSTFIFODBG(x) ((x) << S_MSTFIFODBG)
+#define F_MSTFIFODBG V_MSTFIFODBG(1U)
+
+#define S_MSTFIFODBGCLR 30
+#define V_MSTFIFODBGCLR(x) ((x) << S_MSTFIFODBGCLR)
+#define F_MSTFIFODBGCLR V_MSTFIFODBGCLR(1U)
+
+#define S_MSTRXBYTECFG 12
+#define M_MSTRXBYTECFG 0x3fU
+#define V_MSTRXBYTECFG(x) ((x) << S_MSTRXBYTECFG)
+#define G_MSTRXBYTECFG(x) (((x) >> S_MSTRXBYTECFG) & M_MSTRXBYTECFG)
+
+#define S_MSTTXBYTECFG 6
+#define M_MSTTXBYTECFG 0x3fU
+#define V_MSTTXBYTECFG(x) ((x) << S_MSTTXBYTECFG)
+#define G_MSTTXBYTECFG(x) (((x) >> S_MSTTXBYTECFG) & M_MSTTXBYTECFG)
+
+#define S_MSTRESET 1
+#define V_MSTRESET(x) ((x) << S_MSTRESET)
+#define F_MSTRESET V_MSTRESET(1U)
+
+#define S_MSTCTLEN 0
+#define V_MSTCTLEN(x) ((x) << S_MSTCTLEN)
+#define F_MSTCTLEN V_MSTCTLEN(1U)
+
+#define A_SMB_MST_CTL_STS 0x1906c
+
+#define S_MSTRXBYTECNT 12
+#define M_MSTRXBYTECNT 0x3fU
+#define V_MSTRXBYTECNT(x) ((x) << S_MSTRXBYTECNT)
+#define G_MSTRXBYTECNT(x) (((x) >> S_MSTRXBYTECNT) & M_MSTRXBYTECNT)
+
+#define S_MSTTXBYTECNT 6
+#define M_MSTTXBYTECNT 0x3fU
+#define V_MSTTXBYTECNT(x) ((x) << S_MSTTXBYTECNT)
+#define G_MSTTXBYTECNT(x) (((x) >> S_MSTTXBYTECNT) & M_MSTTXBYTECNT)
+
+#define S_MSTBUSYSTS 0
+#define V_MSTBUSYSTS(x) ((x) << S_MSTBUSYSTS)
+#define F_MSTBUSYSTS V_MSTBUSYSTS(1U)
+
+#define A_SMB_MST_TX_FIFO_RDWR 0x19070
+#define A_SMB_MST_RX_FIFO_RDWR 0x19074
+#define A_SMB_SLV_TIMEOUT_CFG 0x19078
+
+#define S_SLVTIMEOUTCFG 0
+#define M_SLVTIMEOUTCFG 0xffffffU
+#define V_SLVTIMEOUTCFG(x) ((x) << S_SLVTIMEOUTCFG)
+#define G_SLVTIMEOUTCFG(x) (((x) >> S_SLVTIMEOUTCFG) & M_SLVTIMEOUTCFG)
+
+#define A_SMB_SLV_CTL_CFG 0x1907c
+
+#define S_SLVFIFODBG 31
+#define V_SLVFIFODBG(x) ((x) << S_SLVFIFODBG)
+#define F_SLVFIFODBG V_SLVFIFODBG(1U)
+
+#define S_SLVFIFODBGCLR 30
+#define V_SLVFIFODBGCLR(x) ((x) << S_SLVFIFODBGCLR)
+#define F_SLVFIFODBGCLR V_SLVFIFODBGCLR(1U)
+
+#define S_SLVCRCOUTBITINV 21
+#define V_SLVCRCOUTBITINV(x) ((x) << S_SLVCRCOUTBITINV)
+#define F_SLVCRCOUTBITINV V_SLVCRCOUTBITINV(1U)
+
+#define S_SLVCRCOUTBITREV 20
+#define V_SLVCRCOUTBITREV(x) ((x) << S_SLVCRCOUTBITREV)
+#define F_SLVCRCOUTBITREV V_SLVCRCOUTBITREV(1U)
+
+#define S_SLVCRCINBITREV 19
+#define V_SLVCRCINBITREV(x) ((x) << S_SLVCRCINBITREV)
+#define F_SLVCRCINBITREV V_SLVCRCINBITREV(1U)
+
+#define S_SLVCRCPRESET 11
+#define M_SLVCRCPRESET 0xffU
+#define V_SLVCRCPRESET(x) ((x) << S_SLVCRCPRESET)
+#define G_SLVCRCPRESET(x) (((x) >> S_SLVCRCPRESET) & M_SLVCRCPRESET)
+
+#define S_SLVADDRCFG 4
+#define M_SLVADDRCFG 0x7fU
+#define V_SLVADDRCFG(x) ((x) << S_SLVADDRCFG)
+#define G_SLVADDRCFG(x) (((x) >> S_SLVADDRCFG) & M_SLVADDRCFG)
+
+#define S_SLVALRTSET 2
+#define V_SLVALRTSET(x) ((x) << S_SLVALRTSET)
+#define F_SLVALRTSET V_SLVALRTSET(1U)
+
+#define S_SLVRESET 1
+#define V_SLVRESET(x) ((x) << S_SLVRESET)
+#define F_SLVRESET V_SLVRESET(1U)
+
+#define S_SLVCTLEN 0
+#define V_SLVCTLEN(x) ((x) << S_SLVCTLEN)
+#define F_SLVCTLEN V_SLVCTLEN(1U)
+
+#define A_SMB_SLV_CTL_STS 0x19080
+
+#define S_SLVFIFOTXCNT 12
+#define M_SLVFIFOTXCNT 0x3fU
+#define V_SLVFIFOTXCNT(x) ((x) << S_SLVFIFOTXCNT)
+#define G_SLVFIFOTXCNT(x) (((x) >> S_SLVFIFOTXCNT) & M_SLVFIFOTXCNT)
+
+#define S_SLVFIFOCNT 6
+#define M_SLVFIFOCNT 0x3fU
+#define V_SLVFIFOCNT(x) ((x) << S_SLVFIFOCNT)
+#define G_SLVFIFOCNT(x) (((x) >> S_SLVFIFOCNT) & M_SLVFIFOCNT)
+
+#define S_SLVALRTSTS 2
+#define V_SLVALRTSTS(x) ((x) << S_SLVALRTSTS)
+#define F_SLVALRTSTS V_SLVALRTSTS(1U)
+
+#define S_SLVBUSYSTS 0
+#define V_SLVBUSYSTS(x) ((x) << S_SLVBUSYSTS)
+#define F_SLVBUSYSTS V_SLVBUSYSTS(1U)
+
+#define A_SMB_SLV_FIFO_RDWR 0x19084
+#define A_SMB_INT_ENABLE 0x1908c
+
+#define S_MSTTXFIFOPAREN 21
+#define V_MSTTXFIFOPAREN(x) ((x) << S_MSTTXFIFOPAREN)
+#define F_MSTTXFIFOPAREN V_MSTTXFIFOPAREN(1U)
+
+#define S_MSTRXFIFOPAREN 20
+#define V_MSTRXFIFOPAREN(x) ((x) << S_MSTRXFIFOPAREN)
+#define F_MSTRXFIFOPAREN V_MSTRXFIFOPAREN(1U)
+
+#define S_SLVFIFOPAREN 19
+#define V_SLVFIFOPAREN(x) ((x) << S_SLVFIFOPAREN)
+#define F_SLVFIFOPAREN V_SLVFIFOPAREN(1U)
+
+#define S_SLVUNEXPBUSSTOPEN 18
+#define V_SLVUNEXPBUSSTOPEN(x) ((x) << S_SLVUNEXPBUSSTOPEN)
+#define F_SLVUNEXPBUSSTOPEN V_SLVUNEXPBUSSTOPEN(1U)
+
+#define S_SLVUNEXPBUSSTARTEN 17
+#define V_SLVUNEXPBUSSTARTEN(x) ((x) << S_SLVUNEXPBUSSTARTEN)
+#define F_SLVUNEXPBUSSTARTEN V_SLVUNEXPBUSSTARTEN(1U)
+
+#define S_SLVCOMMANDCODEINVEN 16
+#define V_SLVCOMMANDCODEINVEN(x) ((x) << S_SLVCOMMANDCODEINVEN)
+#define F_SLVCOMMANDCODEINVEN V_SLVCOMMANDCODEINVEN(1U)
+
+#define S_SLVBYTECNTERREN 15
+#define V_SLVBYTECNTERREN(x) ((x) << S_SLVBYTECNTERREN)
+#define F_SLVBYTECNTERREN V_SLVBYTECNTERREN(1U)
+
+#define S_SLVUNEXPACKMSTEN 14
+#define V_SLVUNEXPACKMSTEN(x) ((x) << S_SLVUNEXPACKMSTEN)
+#define F_SLVUNEXPACKMSTEN V_SLVUNEXPACKMSTEN(1U)
+
+#define S_SLVUNEXPNACKMSTEN 13
+#define V_SLVUNEXPNACKMSTEN(x) ((x) << S_SLVUNEXPNACKMSTEN)
+#define F_SLVUNEXPNACKMSTEN V_SLVUNEXPNACKMSTEN(1U)
+
+#define S_SLVNOBUSSTOPEN 12
+#define V_SLVNOBUSSTOPEN(x) ((x) << S_SLVNOBUSSTOPEN)
+#define F_SLVNOBUSSTOPEN V_SLVNOBUSSTOPEN(1U)
+
+#define S_SLVNOREPSTARTEN 11
+#define V_SLVNOREPSTARTEN(x) ((x) << S_SLVNOREPSTARTEN)
+#define F_SLVNOREPSTARTEN V_SLVNOREPSTARTEN(1U)
+
+#define S_SLVRXADDRINTEN 10
+#define V_SLVRXADDRINTEN(x) ((x) << S_SLVRXADDRINTEN)
+#define F_SLVRXADDRINTEN V_SLVRXADDRINTEN(1U)
+
+#define S_SLVRXPECERRINTEN 9
+#define V_SLVRXPECERRINTEN(x) ((x) << S_SLVRXPECERRINTEN)
+#define F_SLVRXPECERRINTEN V_SLVRXPECERRINTEN(1U)
+
+#define S_SLVPREPTOARPINTEN 8
+#define V_SLVPREPTOARPINTEN(x) ((x) << S_SLVPREPTOARPINTEN)
+#define F_SLVPREPTOARPINTEN V_SLVPREPTOARPINTEN(1U)
+
+#define S_SLVTIMEOUTINTEN 7
+#define V_SLVTIMEOUTINTEN(x) ((x) << S_SLVTIMEOUTINTEN)
+#define F_SLVTIMEOUTINTEN V_SLVTIMEOUTINTEN(1U)
+
+#define S_SLVERRINTEN 6
+#define V_SLVERRINTEN(x) ((x) << S_SLVERRINTEN)
+#define F_SLVERRINTEN V_SLVERRINTEN(1U)
+
+#define S_SLVDONEINTEN 5
+#define V_SLVDONEINTEN(x) ((x) << S_SLVDONEINTEN)
+#define F_SLVDONEINTEN V_SLVDONEINTEN(1U)
+
+#define S_SLVRXRDYINTEN 4
+#define V_SLVRXRDYINTEN(x) ((x) << S_SLVRXRDYINTEN)
+#define F_SLVRXRDYINTEN V_SLVRXRDYINTEN(1U)
+
+#define S_MSTTIMEOUTINTEN 3
+#define V_MSTTIMEOUTINTEN(x) ((x) << S_MSTTIMEOUTINTEN)
+#define F_MSTTIMEOUTINTEN V_MSTTIMEOUTINTEN(1U)
+
+#define S_MSTNACKINTEN 2
+#define V_MSTNACKINTEN(x) ((x) << S_MSTNACKINTEN)
+#define F_MSTNACKINTEN V_MSTNACKINTEN(1U)
+
+#define S_MSTLOSTARBINTEN 1
+#define V_MSTLOSTARBINTEN(x) ((x) << S_MSTLOSTARBINTEN)
+#define F_MSTLOSTARBINTEN V_MSTLOSTARBINTEN(1U)
+
+#define S_MSTDONEINTEN 0
+#define V_MSTDONEINTEN(x) ((x) << S_MSTDONEINTEN)
+#define F_MSTDONEINTEN V_MSTDONEINTEN(1U)
+
+#define A_SMB_INT_CAUSE 0x19090
+
+#define S_MSTTXFIFOPARINT 21
+#define V_MSTTXFIFOPARINT(x) ((x) << S_MSTTXFIFOPARINT)
+#define F_MSTTXFIFOPARINT V_MSTTXFIFOPARINT(1U)
+
+#define S_MSTRXFIFOPARINT 20
+#define V_MSTRXFIFOPARINT(x) ((x) << S_MSTRXFIFOPARINT)
+#define F_MSTRXFIFOPARINT V_MSTRXFIFOPARINT(1U)
+
+#define S_SLVFIFOPARINT 19
+#define V_SLVFIFOPARINT(x) ((x) << S_SLVFIFOPARINT)
+#define F_SLVFIFOPARINT V_SLVFIFOPARINT(1U)
+
+#define S_SLVUNEXPBUSSTOPINT 18
+#define V_SLVUNEXPBUSSTOPINT(x) ((x) << S_SLVUNEXPBUSSTOPINT)
+#define F_SLVUNEXPBUSSTOPINT V_SLVUNEXPBUSSTOPINT(1U)
+
+#define S_SLVUNEXPBUSSTARTINT 17
+#define V_SLVUNEXPBUSSTARTINT(x) ((x) << S_SLVUNEXPBUSSTARTINT)
+#define F_SLVUNEXPBUSSTARTINT V_SLVUNEXPBUSSTARTINT(1U)
+
+#define S_SLVCOMMANDCODEINVINT 16
+#define V_SLVCOMMANDCODEINVINT(x) ((x) << S_SLVCOMMANDCODEINVINT)
+#define F_SLVCOMMANDCODEINVINT V_SLVCOMMANDCODEINVINT(1U)
+
+#define S_SLVBYTECNTERRINT 15
+#define V_SLVBYTECNTERRINT(x) ((x) << S_SLVBYTECNTERRINT)
+#define F_SLVBYTECNTERRINT V_SLVBYTECNTERRINT(1U)
+
+#define S_SLVUNEXPACKMSTINT 14
+#define V_SLVUNEXPACKMSTINT(x) ((x) << S_SLVUNEXPACKMSTINT)
+#define F_SLVUNEXPACKMSTINT V_SLVUNEXPACKMSTINT(1U)
+
+#define S_SLVUNEXPNACKMSTINT 13
+#define V_SLVUNEXPNACKMSTINT(x) ((x) << S_SLVUNEXPNACKMSTINT)
+#define F_SLVUNEXPNACKMSTINT V_SLVUNEXPNACKMSTINT(1U)
+
+#define S_SLVNOBUSSTOPINT 12
+#define V_SLVNOBUSSTOPINT(x) ((x) << S_SLVNOBUSSTOPINT)
+#define F_SLVNOBUSSTOPINT V_SLVNOBUSSTOPINT(1U)
+
+#define S_SLVNOREPSTARTINT 11
+#define V_SLVNOREPSTARTINT(x) ((x) << S_SLVNOREPSTARTINT)
+#define F_SLVNOREPSTARTINT V_SLVNOREPSTARTINT(1U)
+
+#define S_SLVRXADDRINT 10
+#define V_SLVRXADDRINT(x) ((x) << S_SLVRXADDRINT)
+#define F_SLVRXADDRINT V_SLVRXADDRINT(1U)
+
+#define S_SLVRXPECERRINT 9
+#define V_SLVRXPECERRINT(x) ((x) << S_SLVRXPECERRINT)
+#define F_SLVRXPECERRINT V_SLVRXPECERRINT(1U)
+
+#define S_SLVPREPTOARPINT 8
+#define V_SLVPREPTOARPINT(x) ((x) << S_SLVPREPTOARPINT)
+#define F_SLVPREPTOARPINT V_SLVPREPTOARPINT(1U)
+
+#define S_SLVTIMEOUTINT 7
+#define V_SLVTIMEOUTINT(x) ((x) << S_SLVTIMEOUTINT)
+#define F_SLVTIMEOUTINT V_SLVTIMEOUTINT(1U)
+
+#define S_SLVERRINT 6
+#define V_SLVERRINT(x) ((x) << S_SLVERRINT)
+#define F_SLVERRINT V_SLVERRINT(1U)
+
+#define S_SLVDONEINT 5
+#define V_SLVDONEINT(x) ((x) << S_SLVDONEINT)
+#define F_SLVDONEINT V_SLVDONEINT(1U)
+
+#define S_SLVRXRDYINT 4
+#define V_SLVRXRDYINT(x) ((x) << S_SLVRXRDYINT)
+#define F_SLVRXRDYINT V_SLVRXRDYINT(1U)
+
+#define S_MSTTIMEOUTINT 3
+#define V_MSTTIMEOUTINT(x) ((x) << S_MSTTIMEOUTINT)
+#define F_MSTTIMEOUTINT V_MSTTIMEOUTINT(1U)
+
+#define S_MSTNACKINT 2
+#define V_MSTNACKINT(x) ((x) << S_MSTNACKINT)
+#define F_MSTNACKINT V_MSTNACKINT(1U)
+
+#define S_MSTLOSTARBINT 1
+#define V_MSTLOSTARBINT(x) ((x) << S_MSTLOSTARBINT)
+#define F_MSTLOSTARBINT V_MSTLOSTARBINT(1U)
+
+#define S_MSTDONEINT 0
+#define V_MSTDONEINT(x) ((x) << S_MSTDONEINT)
+#define F_MSTDONEINT V_MSTDONEINT(1U)
+
+#define A_SMB_DEBUG_DATA 0x19094
+
+#define S_DEBUGDATAH 16
+#define M_DEBUGDATAH 0xffffU
+#define V_DEBUGDATAH(x) ((x) << S_DEBUGDATAH)
+#define G_DEBUGDATAH(x) (((x) >> S_DEBUGDATAH) & M_DEBUGDATAH)
+
+#define S_DEBUGDATAL 0
+#define M_DEBUGDATAL 0xffffU
+#define V_DEBUGDATAL(x) ((x) << S_DEBUGDATAL)
+#define G_DEBUGDATAL(x) (((x) >> S_DEBUGDATAL) & M_DEBUGDATAL)
+
+#define A_SMB_PERR_EN 0x19098
+
+#define S_MSTTXFIFOPERREN 2
+#define V_MSTTXFIFOPERREN(x) ((x) << S_MSTTXFIFOPERREN)
+#define F_MSTTXFIFOPERREN V_MSTTXFIFOPERREN(1U)
+
+#define S_MSTRXFIFOPERREN 1
+#define V_MSTRXFIFOPERREN(x) ((x) << S_MSTRXFIFOPERREN)
+#define F_MSTRXFIFOPERREN V_MSTRXFIFOPERREN(1U)
+
+#define S_SLVFIFOPERREN 0
+#define V_SLVFIFOPERREN(x) ((x) << S_SLVFIFOPERREN)
+#define F_SLVFIFOPERREN V_SLVFIFOPERREN(1U)
+
+#define A_SMB_PERR_INJ 0x1909c
+
+#define S_MSTTXINJDATAERR 3
+#define V_MSTTXINJDATAERR(x) ((x) << S_MSTTXINJDATAERR)
+#define F_MSTTXINJDATAERR V_MSTTXINJDATAERR(1U)
+
+#define S_MSTRXINJDATAERR 2
+#define V_MSTRXINJDATAERR(x) ((x) << S_MSTRXINJDATAERR)
+#define F_MSTRXINJDATAERR V_MSTRXINJDATAERR(1U)
+
+#define S_SLVINJDATAERR 1
+#define V_SLVINJDATAERR(x) ((x) << S_SLVINJDATAERR)
+#define F_SLVINJDATAERR V_SLVINJDATAERR(1U)
+
+#define S_FIFOINJDATAERREN 0
+#define V_FIFOINJDATAERREN(x) ((x) << S_FIFOINJDATAERREN)
+#define F_FIFOINJDATAERREN V_FIFOINJDATAERREN(1U)
+
+#define A_SMB_SLV_ARP_CTL 0x190a0
+
+#define S_ARPCOMMANDCODE 2
+#define M_ARPCOMMANDCODE 0xffU
+#define V_ARPCOMMANDCODE(x) ((x) << S_ARPCOMMANDCODE)
+#define G_ARPCOMMANDCODE(x) (((x) >> S_ARPCOMMANDCODE) & M_ARPCOMMANDCODE)
+
+#define S_ARPADDRRES 1
+#define V_ARPADDRRES(x) ((x) << S_ARPADDRRES)
+#define F_ARPADDRRES V_ARPADDRRES(1U)
+
+#define S_ARPADDRVAL 0
+#define V_ARPADDRVAL(x) ((x) << S_ARPADDRVAL)
+#define F_ARPADDRVAL V_ARPADDRVAL(1U)
+
+#define A_SMB_ARP_UDID0 0x190a4
+#define A_SMB_ARP_UDID1 0x190a8
+
+#define S_SUBSYSTEMVENDORID 16
+#define M_SUBSYSTEMVENDORID 0xffffU
+#define V_SUBSYSTEMVENDORID(x) ((x) << S_SUBSYSTEMVENDORID)
+#define G_SUBSYSTEMVENDORID(x) (((x) >> S_SUBSYSTEMVENDORID) & M_SUBSYSTEMVENDORID)
+
+#define S_SUBSYSTEMDEVICEID 0
+#define M_SUBSYSTEMDEVICEID 0xffffU
+#define V_SUBSYSTEMDEVICEID(x) ((x) << S_SUBSYSTEMDEVICEID)
+#define G_SUBSYSTEMDEVICEID(x) (((x) >> S_SUBSYSTEMDEVICEID) & M_SUBSYSTEMDEVICEID)
+
+#define A_SMB_ARP_UDID2 0x190ac
+
+#define S_DEVICEID 16
+#define M_DEVICEID 0xffffU
+#define V_DEVICEID(x) ((x) << S_DEVICEID)
+#define G_DEVICEID(x) (((x) >> S_DEVICEID) & M_DEVICEID)
+
+#define S_INTERFACE 0
+#define M_INTERFACE 0xffffU
+#define V_INTERFACE(x) ((x) << S_INTERFACE)
+#define G_INTERFACE(x) (((x) >> S_INTERFACE) & M_INTERFACE)
+
+#define A_SMB_ARP_UDID3 0x190b0
+
+#define S_DEVICECAP 24
+#define M_DEVICECAP 0xffU
+#define V_DEVICECAP(x) ((x) << S_DEVICECAP)
+#define G_DEVICECAP(x) (((x) >> S_DEVICECAP) & M_DEVICECAP)
+
+#define S_VERSIONID 16
+#define M_VERSIONID 0xffU
+#define V_VERSIONID(x) ((x) << S_VERSIONID)
+#define G_VERSIONID(x) (((x) >> S_VERSIONID) & M_VERSIONID)
+
+#define S_VENDORID 0
+#define M_VENDORID 0xffffU
+#define V_VENDORID(x) ((x) << S_VENDORID)
+#define G_VENDORID(x) (((x) >> S_VENDORID) & M_VENDORID)
+
+#define A_SMB_SLV_AUX_ADDR0 0x190b4
+
+#define S_AUXADDR0VAL 6
+#define V_AUXADDR0VAL(x) ((x) << S_AUXADDR0VAL)
+#define F_AUXADDR0VAL V_AUXADDR0VAL(1U)
+
+#define S_AUXADDR0 0
+#define M_AUXADDR0 0x3fU
+#define V_AUXADDR0(x) ((x) << S_AUXADDR0)
+#define G_AUXADDR0(x) (((x) >> S_AUXADDR0) & M_AUXADDR0)
+
+#define A_SMB_SLV_AUX_ADDR1 0x190b8
+
+#define S_AUXADDR1VAL 6
+#define V_AUXADDR1VAL(x) ((x) << S_AUXADDR1VAL)
+#define F_AUXADDR1VAL V_AUXADDR1VAL(1U)
+
+#define S_AUXADDR1 0
+#define M_AUXADDR1 0x3fU
+#define V_AUXADDR1(x) ((x) << S_AUXADDR1)
+#define G_AUXADDR1(x) (((x) >> S_AUXADDR1) & M_AUXADDR1)
+
+#define A_SMB_SLV_AUX_ADDR2 0x190bc
+
+#define S_AUXADDR2VAL 6
+#define V_AUXADDR2VAL(x) ((x) << S_AUXADDR2VAL)
+#define F_AUXADDR2VAL V_AUXADDR2VAL(1U)
+
+#define S_AUXADDR2 0
+#define M_AUXADDR2 0x3fU
+#define V_AUXADDR2(x) ((x) << S_AUXADDR2)
+#define G_AUXADDR2(x) (((x) >> S_AUXADDR2) & M_AUXADDR2)
+
+#define A_SMB_SLV_AUX_ADDR3 0x190c0
+
+#define S_AUXADDR3VAL 6
+#define V_AUXADDR3VAL(x) ((x) << S_AUXADDR3VAL)
+#define F_AUXADDR3VAL V_AUXADDR3VAL(1U)
+
+#define S_AUXADDR3 0
+#define M_AUXADDR3 0x3fU
+#define V_AUXADDR3(x) ((x) << S_AUXADDR3)
+#define G_AUXADDR3(x) (((x) >> S_AUXADDR3) & M_AUXADDR3)
+
+#define A_SMB_COMMAND_CODE0 0x190c4
+
+#define S_SMBUSCOMMANDCODE0 0
+#define M_SMBUSCOMMANDCODE0 0xffU
+#define V_SMBUSCOMMANDCODE0(x) ((x) << S_SMBUSCOMMANDCODE0)
+#define G_SMBUSCOMMANDCODE0(x) (((x) >> S_SMBUSCOMMANDCODE0) & M_SMBUSCOMMANDCODE0)
+
+#define A_SMB_COMMAND_CODE1 0x190c8
+
+#define S_SMBUSCOMMANDCODE1 0
+#define M_SMBUSCOMMANDCODE1 0xffU
+#define V_SMBUSCOMMANDCODE1(x) ((x) << S_SMBUSCOMMANDCODE1)
+#define G_SMBUSCOMMANDCODE1(x) (((x) >> S_SMBUSCOMMANDCODE1) & M_SMBUSCOMMANDCODE1)
+
+#define A_SMB_COMMAND_CODE2 0x190cc
+
+#define S_SMBUSCOMMANDCODE2 0
+#define M_SMBUSCOMMANDCODE2 0xffU
+#define V_SMBUSCOMMANDCODE2(x) ((x) << S_SMBUSCOMMANDCODE2)
+#define G_SMBUSCOMMANDCODE2(x) (((x) >> S_SMBUSCOMMANDCODE2) & M_SMBUSCOMMANDCODE2)
+
+#define A_SMB_COMMAND_CODE3 0x190d0
+
+#define S_SMBUSCOMMANDCODE3 0
+#define M_SMBUSCOMMANDCODE3 0xffU
+#define V_SMBUSCOMMANDCODE3(x) ((x) << S_SMBUSCOMMANDCODE3)
+#define G_SMBUSCOMMANDCODE3(x) (((x) >> S_SMBUSCOMMANDCODE3) & M_SMBUSCOMMANDCODE3)
+
+#define A_SMB_COMMAND_CODE4 0x190d4
+
+#define S_SMBUSCOMMANDCODE4 0
+#define M_SMBUSCOMMANDCODE4 0xffU
+#define V_SMBUSCOMMANDCODE4(x) ((x) << S_SMBUSCOMMANDCODE4)
+#define G_SMBUSCOMMANDCODE4(x) (((x) >> S_SMBUSCOMMANDCODE4) & M_SMBUSCOMMANDCODE4)
+
+#define A_SMB_COMMAND_CODE5 0x190d8
+
+#define S_SMBUSCOMMANDCODE5 0
+#define M_SMBUSCOMMANDCODE5 0xffU
+#define V_SMBUSCOMMANDCODE5(x) ((x) << S_SMBUSCOMMANDCODE5)
+#define G_SMBUSCOMMANDCODE5(x) (((x) >> S_SMBUSCOMMANDCODE5) & M_SMBUSCOMMANDCODE5)
+
+#define A_SMB_COMMAND_CODE6 0x190dc
+
+#define S_SMBUSCOMMANDCODE6 0
+#define M_SMBUSCOMMANDCODE6 0xffU
+#define V_SMBUSCOMMANDCODE6(x) ((x) << S_SMBUSCOMMANDCODE6)
+#define G_SMBUSCOMMANDCODE6(x) (((x) >> S_SMBUSCOMMANDCODE6) & M_SMBUSCOMMANDCODE6)
+
+#define A_SMB_COMMAND_CODE7 0x190e0
+
+#define S_SMBUSCOMMANDCODE7 0
+#define M_SMBUSCOMMANDCODE7 0xffU
+#define V_SMBUSCOMMANDCODE7(x) ((x) << S_SMBUSCOMMANDCODE7)
+#define G_SMBUSCOMMANDCODE7(x) (((x) >> S_SMBUSCOMMANDCODE7) & M_SMBUSCOMMANDCODE7)
+
+#define A_SMB_MICRO_CNT_CLK_CFG 0x190e4
+
+#define S_MACROCNTCLKCFG 8
+#define M_MACROCNTCLKCFG 0x1fU
+#define V_MACROCNTCLKCFG(x) ((x) << S_MACROCNTCLKCFG)
+#define G_MACROCNTCLKCFG(x) (((x) >> S_MACROCNTCLKCFG) & M_MACROCNTCLKCFG)
+
+#define S_MICROCNTCLKCFG 0
+#define M_MICROCNTCLKCFG 0xffU
+#define V_MICROCNTCLKCFG(x) ((x) << S_MICROCNTCLKCFG)
+#define G_MICROCNTCLKCFG(x) (((x) >> S_MICROCNTCLKCFG) & M_MICROCNTCLKCFG)
+
+/* registers for module I2CM */
+#define I2CM_BASE_ADDR 0x190f0
+
+#define A_I2CM_CFG 0x190f0
+
+#define S_I2C_CLKDIV 0
+#define M_I2C_CLKDIV 0xfffU
+#define V_I2C_CLKDIV(x) ((x) << S_I2C_CLKDIV)
+#define G_I2C_CLKDIV(x) (((x) >> S_I2C_CLKDIV) & M_I2C_CLKDIV)
+
+#define A_I2CM_DATA 0x190f4
+
+#define S_I2C_DATA 0
+#define M_I2C_DATA 0xffU
+#define V_I2C_DATA(x) ((x) << S_I2C_DATA)
+#define G_I2C_DATA(x) (((x) >> S_I2C_DATA) & M_I2C_DATA)
+
+#define A_I2CM_OP 0x190f8
+
+#define S_I2C_ACK 30
+#define V_I2C_ACK(x) ((x) << S_I2C_ACK)
+#define F_I2C_ACK V_I2C_ACK(1U)
+
+#define S_I2C_CONT 1
+#define V_I2C_CONT(x) ((x) << S_I2C_CONT)
+#define F_I2C_CONT V_I2C_CONT(1U)
+
+#define S_OP 0
+#define V_OP(x) ((x) << S_OP)
+#define F_OP V_OP(1U)
+
+/* registers for module MI */
+#define MI_BASE_ADDR 0x19100
+
+#define A_MI_CFG 0x19100
+
+#define S_T4_ST 14
+#define V_T4_ST(x) ((x) << S_T4_ST)
+#define F_T4_ST V_T4_ST(1U)
+
+#define S_CLKDIV 5
+#define M_CLKDIV 0xffU
+#define V_CLKDIV(x) ((x) << S_CLKDIV)
+#define G_CLKDIV(x) (((x) >> S_CLKDIV) & M_CLKDIV)
+
+#define S_ST 3
+#define M_ST 0x3U
+#define V_ST(x) ((x) << S_ST)
+#define G_ST(x) (((x) >> S_ST) & M_ST)
+
+#define S_PREEN 2
+#define V_PREEN(x) ((x) << S_PREEN)
+#define F_PREEN V_PREEN(1U)
+
+#define S_MDIINV 1
+#define V_MDIINV(x) ((x) << S_MDIINV)
+#define F_MDIINV V_MDIINV(1U)
+
+#define S_MDIO_1P2V_SEL 0
+#define V_MDIO_1P2V_SEL(x) ((x) << S_MDIO_1P2V_SEL)
+#define F_MDIO_1P2V_SEL V_MDIO_1P2V_SEL(1U)
+
+#define A_MI_ADDR 0x19104
+
+#define S_PHYADDR 5
+#define M_PHYADDR 0x1fU
+#define V_PHYADDR(x) ((x) << S_PHYADDR)
+#define G_PHYADDR(x) (((x) >> S_PHYADDR) & M_PHYADDR)
+
+#define S_REGADDR 0
+#define M_REGADDR 0x1fU
+#define V_REGADDR(x) ((x) << S_REGADDR)
+#define G_REGADDR(x) (((x) >> S_REGADDR) & M_REGADDR)
+
+#define A_MI_DATA 0x19108
+
+#define S_MDIDATA 0
+#define M_MDIDATA 0xffffU
+#define V_MDIDATA(x) ((x) << S_MDIDATA)
+#define G_MDIDATA(x) (((x) >> S_MDIDATA) & M_MDIDATA)
+
+#define A_MI_OP 0x1910c
+
+#define S_INC 2
+#define V_INC(x) ((x) << S_INC)
+#define F_INC V_INC(1U)
+
+#define S_MDIOP 0
+#define M_MDIOP 0x3U
+#define V_MDIOP(x) ((x) << S_MDIOP)
+#define G_MDIOP(x) (((x) >> S_MDIOP) & M_MDIOP)
+
+/* registers for module UART */
+#define UART_BASE_ADDR 0x19110
+
+#define A_UART_CONFIG 0x19110
+
+#define S_STOPBITS 22
+#define M_STOPBITS 0x3U
+#define V_STOPBITS(x) ((x) << S_STOPBITS)
+#define G_STOPBITS(x) (((x) >> S_STOPBITS) & M_STOPBITS)
+
+#define S_PARITY 20
+#define M_PARITY 0x3U
+#define V_PARITY(x) ((x) << S_PARITY)
+#define G_PARITY(x) (((x) >> S_PARITY) & M_PARITY)
+
+#define S_DATABITS 16
+#define M_DATABITS 0xfU
+#define V_DATABITS(x) ((x) << S_DATABITS)
+#define G_DATABITS(x) (((x) >> S_DATABITS) & M_DATABITS)
+
+#define S_UART_CLKDIV 0
+#define M_UART_CLKDIV 0xfffU
+#define V_UART_CLKDIV(x) ((x) << S_UART_CLKDIV)
+#define G_UART_CLKDIV(x) (((x) >> S_UART_CLKDIV) & M_UART_CLKDIV)
+
+/* registers for module PMU */
+#define PMU_BASE_ADDR 0x19120
+
+#define A_PMU_PART_CG_PWRMODE 0x19120
+
+#define S_TPPARTCGEN 14
+#define V_TPPARTCGEN(x) ((x) << S_TPPARTCGEN)
+#define F_TPPARTCGEN V_TPPARTCGEN(1U)
+
+#define S_PDPPARTCGEN 13
+#define V_PDPPARTCGEN(x) ((x) << S_PDPPARTCGEN)
+#define F_PDPPARTCGEN V_PDPPARTCGEN(1U)
+
+#define S_PCIEPARTCGEN 12
+#define V_PCIEPARTCGEN(x) ((x) << S_PCIEPARTCGEN)
+#define F_PCIEPARTCGEN V_PCIEPARTCGEN(1U)
+
+#define S_EDC1PARTCGEN 11
+#define V_EDC1PARTCGEN(x) ((x) << S_EDC1PARTCGEN)
+#define F_EDC1PARTCGEN V_EDC1PARTCGEN(1U)
+
+#define S_MCPARTCGEN 10
+#define V_MCPARTCGEN(x) ((x) << S_MCPARTCGEN)
+#define F_MCPARTCGEN V_MCPARTCGEN(1U)
+
+#define S_EDC0PARTCGEN 9
+#define V_EDC0PARTCGEN(x) ((x) << S_EDC0PARTCGEN)
+#define F_EDC0PARTCGEN V_EDC0PARTCGEN(1U)
+
+#define S_LEPARTCGEN 8
+#define V_LEPARTCGEN(x) ((x) << S_LEPARTCGEN)
+#define F_LEPARTCGEN V_LEPARTCGEN(1U)
+
+#define S_INITPOWERMODE 0
+#define M_INITPOWERMODE 0x3U
+#define V_INITPOWERMODE(x) ((x) << S_INITPOWERMODE)
+#define G_INITPOWERMODE(x) (((x) >> S_INITPOWERMODE) & M_INITPOWERMODE)
+
+#define A_PMU_SLEEPMODE_WAKEUP 0x19124
+
+#define S_HWWAKEUPEN 5
+#define V_HWWAKEUPEN(x) ((x) << S_HWWAKEUPEN)
+#define F_HWWAKEUPEN V_HWWAKEUPEN(1U)
+
+#define S_PORT3SLEEPMODE 4
+#define V_PORT3SLEEPMODE(x) ((x) << S_PORT3SLEEPMODE)
+#define F_PORT3SLEEPMODE V_PORT3SLEEPMODE(1U)
+
+#define S_PORT2SLEEPMODE 3
+#define V_PORT2SLEEPMODE(x) ((x) << S_PORT2SLEEPMODE)
+#define F_PORT2SLEEPMODE V_PORT2SLEEPMODE(1U)
+
+#define S_PORT1SLEEPMODE 2
+#define V_PORT1SLEEPMODE(x) ((x) << S_PORT1SLEEPMODE)
+#define F_PORT1SLEEPMODE V_PORT1SLEEPMODE(1U)
+
+#define S_PORT0SLEEPMODE 1
+#define V_PORT0SLEEPMODE(x) ((x) << S_PORT0SLEEPMODE)
+#define F_PORT0SLEEPMODE V_PORT0SLEEPMODE(1U)
+
+#define S_WAKEUP 0
+#define V_WAKEUP(x) ((x) << S_WAKEUP)
+#define F_WAKEUP V_WAKEUP(1U)
+
+/* registers for module ULP_RX */
+#define ULP_RX_BASE_ADDR 0x19150
+
+#define A_ULP_RX_CTL 0x19150
+
+#define S_PCMD1THRESHOLD 24
+#define M_PCMD1THRESHOLD 0xffU
+#define V_PCMD1THRESHOLD(x) ((x) << S_PCMD1THRESHOLD)
+#define G_PCMD1THRESHOLD(x) (((x) >> S_PCMD1THRESHOLD) & M_PCMD1THRESHOLD)
+
+#define S_PCMD0THRESHOLD 16
+#define M_PCMD0THRESHOLD 0xffU
+#define V_PCMD0THRESHOLD(x) ((x) << S_PCMD0THRESHOLD)
+#define G_PCMD0THRESHOLD(x) (((x) >> S_PCMD0THRESHOLD) & M_PCMD0THRESHOLD)
+
+#define S_DISABLE_0B_STAG_ERR 14
+#define V_DISABLE_0B_STAG_ERR(x) ((x) << S_DISABLE_0B_STAG_ERR)
+#define F_DISABLE_0B_STAG_ERR V_DISABLE_0B_STAG_ERR(1U)
+
+#define S_RDMA_0B_WR_OPCODE 10
+#define M_RDMA_0B_WR_OPCODE 0xfU
+#define V_RDMA_0B_WR_OPCODE(x) ((x) << S_RDMA_0B_WR_OPCODE)
+#define G_RDMA_0B_WR_OPCODE(x) (((x) >> S_RDMA_0B_WR_OPCODE) & M_RDMA_0B_WR_OPCODE)
+
+#define S_RDMA_0B_WR_PASS 9
+#define V_RDMA_0B_WR_PASS(x) ((x) << S_RDMA_0B_WR_PASS)
+#define F_RDMA_0B_WR_PASS V_RDMA_0B_WR_PASS(1U)
+
+#define S_STAG_RQE 8
+#define V_STAG_RQE(x) ((x) << S_STAG_RQE)
+#define F_STAG_RQE V_STAG_RQE(1U)
+
+#define S_RDMA_STATE_EN 7
+#define V_RDMA_STATE_EN(x) ((x) << S_RDMA_STATE_EN)
+#define F_RDMA_STATE_EN V_RDMA_STATE_EN(1U)
+
+#define S_CRC1_EN 6
+#define V_CRC1_EN(x) ((x) << S_CRC1_EN)
+#define F_CRC1_EN V_CRC1_EN(1U)
+
+#define S_RDMA_0B_WR_CQE 5
+#define V_RDMA_0B_WR_CQE(x) ((x) << S_RDMA_0B_WR_CQE)
+#define F_RDMA_0B_WR_CQE V_RDMA_0B_WR_CQE(1U)
+
+#define S_PCIE_ATRB_EN 4
+#define V_PCIE_ATRB_EN(x) ((x) << S_PCIE_ATRB_EN)
+#define F_PCIE_ATRB_EN V_PCIE_ATRB_EN(1U)
+
+#define S_RDMA_PERMISSIVE_MODE 3
+#define V_RDMA_PERMISSIVE_MODE(x) ((x) << S_RDMA_PERMISSIVE_MODE)
+#define F_RDMA_PERMISSIVE_MODE V_RDMA_PERMISSIVE_MODE(1U)
+
+#define S_PAGEPODME 2
+#define V_PAGEPODME(x) ((x) << S_PAGEPODME)
+#define F_PAGEPODME V_PAGEPODME(1U)
+
+#define S_ISCSITAGTCB 1
+#define V_ISCSITAGTCB(x) ((x) << S_ISCSITAGTCB)
+#define F_ISCSITAGTCB V_ISCSITAGTCB(1U)
+
+#define S_TDDPTAGTCB 0
+#define V_TDDPTAGTCB(x) ((x) << S_TDDPTAGTCB)
+#define F_TDDPTAGTCB V_TDDPTAGTCB(1U)
+
+#define A_ULP_RX_INT_ENABLE 0x19154
+
+#define S_ENABLE_CTX_1 24
+#define V_ENABLE_CTX_1(x) ((x) << S_ENABLE_CTX_1)
+#define F_ENABLE_CTX_1 V_ENABLE_CTX_1(1U)
+
+#define S_ENABLE_CTX_0 23
+#define V_ENABLE_CTX_0(x) ((x) << S_ENABLE_CTX_0)
+#define F_ENABLE_CTX_0 V_ENABLE_CTX_0(1U)
+
+#define S_ENABLE_FF 22
+#define V_ENABLE_FF(x) ((x) << S_ENABLE_FF)
+#define F_ENABLE_FF V_ENABLE_FF(1U)
+
+#define S_ENABLE_APF_1 21
+#define V_ENABLE_APF_1(x) ((x) << S_ENABLE_APF_1)
+#define F_ENABLE_APF_1 V_ENABLE_APF_1(1U)
+
+#define S_ENABLE_APF_0 20
+#define V_ENABLE_APF_0(x) ((x) << S_ENABLE_APF_0)
+#define F_ENABLE_APF_0 V_ENABLE_APF_0(1U)
+
+#define S_ENABLE_AF_1 19
+#define V_ENABLE_AF_1(x) ((x) << S_ENABLE_AF_1)
+#define F_ENABLE_AF_1 V_ENABLE_AF_1(1U)
+
+#define S_ENABLE_AF_0 18
+#define V_ENABLE_AF_0(x) ((x) << S_ENABLE_AF_0)
+#define F_ENABLE_AF_0 V_ENABLE_AF_0(1U)
+
+#define S_ENABLE_PCMDF_1 17
+#define V_ENABLE_PCMDF_1(x) ((x) << S_ENABLE_PCMDF_1)
+#define F_ENABLE_PCMDF_1 V_ENABLE_PCMDF_1(1U)
+
+#define S_ENABLE_MPARC_1 16
+#define V_ENABLE_MPARC_1(x) ((x) << S_ENABLE_MPARC_1)
+#define F_ENABLE_MPARC_1 V_ENABLE_MPARC_1(1U)
+
+#define S_ENABLE_MPARF_1 15
+#define V_ENABLE_MPARF_1(x) ((x) << S_ENABLE_MPARF_1)
+#define F_ENABLE_MPARF_1 V_ENABLE_MPARF_1(1U)
+
+#define S_ENABLE_DDPCF_1 14
+#define V_ENABLE_DDPCF_1(x) ((x) << S_ENABLE_DDPCF_1)
+#define F_ENABLE_DDPCF_1 V_ENABLE_DDPCF_1(1U)
+
+#define S_ENABLE_TPTCF_1 13
+#define V_ENABLE_TPTCF_1(x) ((x) << S_ENABLE_TPTCF_1)
+#define F_ENABLE_TPTCF_1 V_ENABLE_TPTCF_1(1U)
+
+#define S_ENABLE_PCMDF_0 12
+#define V_ENABLE_PCMDF_0(x) ((x) << S_ENABLE_PCMDF_0)
+#define F_ENABLE_PCMDF_0 V_ENABLE_PCMDF_0(1U)
+
+#define S_ENABLE_MPARC_0 11
+#define V_ENABLE_MPARC_0(x) ((x) << S_ENABLE_MPARC_0)
+#define F_ENABLE_MPARC_0 V_ENABLE_MPARC_0(1U)
+
+#define S_ENABLE_MPARF_0 10
+#define V_ENABLE_MPARF_0(x) ((x) << S_ENABLE_MPARF_0)
+#define F_ENABLE_MPARF_0 V_ENABLE_MPARF_0(1U)
+
+#define S_ENABLE_DDPCF_0 9
+#define V_ENABLE_DDPCF_0(x) ((x) << S_ENABLE_DDPCF_0)
+#define F_ENABLE_DDPCF_0 V_ENABLE_DDPCF_0(1U)
+
+#define S_ENABLE_TPTCF_0 8
+#define V_ENABLE_TPTCF_0(x) ((x) << S_ENABLE_TPTCF_0)
+#define F_ENABLE_TPTCF_0 V_ENABLE_TPTCF_0(1U)
+
+#define S_ENABLE_DDPDF_1 7
+#define V_ENABLE_DDPDF_1(x) ((x) << S_ENABLE_DDPDF_1)
+#define F_ENABLE_DDPDF_1 V_ENABLE_DDPDF_1(1U)
+
+#define S_ENABLE_DDPMF_1 6
+#define V_ENABLE_DDPMF_1(x) ((x) << S_ENABLE_DDPMF_1)
+#define F_ENABLE_DDPMF_1 V_ENABLE_DDPMF_1(1U)
+
+#define S_ENABLE_MEMRF_1 5
+#define V_ENABLE_MEMRF_1(x) ((x) << S_ENABLE_MEMRF_1)
+#define F_ENABLE_MEMRF_1 V_ENABLE_MEMRF_1(1U)
+
+#define S_ENABLE_PRSDF_1 4
+#define V_ENABLE_PRSDF_1(x) ((x) << S_ENABLE_PRSDF_1)
+#define F_ENABLE_PRSDF_1 V_ENABLE_PRSDF_1(1U)
+
+#define S_ENABLE_DDPDF_0 3
+#define V_ENABLE_DDPDF_0(x) ((x) << S_ENABLE_DDPDF_0)
+#define F_ENABLE_DDPDF_0 V_ENABLE_DDPDF_0(1U)
+
+#define S_ENABLE_DDPMF_0 2
+#define V_ENABLE_DDPMF_0(x) ((x) << S_ENABLE_DDPMF_0)
+#define F_ENABLE_DDPMF_0 V_ENABLE_DDPMF_0(1U)
+
+#define S_ENABLE_MEMRF_0 1
+#define V_ENABLE_MEMRF_0(x) ((x) << S_ENABLE_MEMRF_0)
+#define F_ENABLE_MEMRF_0 V_ENABLE_MEMRF_0(1U)
+
+#define S_ENABLE_PRSDF_0 0
+#define V_ENABLE_PRSDF_0(x) ((x) << S_ENABLE_PRSDF_0)
+#define F_ENABLE_PRSDF_0 V_ENABLE_PRSDF_0(1U)
+
+#define A_ULP_RX_INT_CAUSE 0x19158
+
+#define S_CAUSE_CTX_1 24
+#define V_CAUSE_CTX_1(x) ((x) << S_CAUSE_CTX_1)
+#define F_CAUSE_CTX_1 V_CAUSE_CTX_1(1U)
+
+#define S_CAUSE_CTX_0 23
+#define V_CAUSE_CTX_0(x) ((x) << S_CAUSE_CTX_0)
+#define F_CAUSE_CTX_0 V_CAUSE_CTX_0(1U)
+
+#define S_CAUSE_FF 22
+#define V_CAUSE_FF(x) ((x) << S_CAUSE_FF)
+#define F_CAUSE_FF V_CAUSE_FF(1U)
+
+#define S_CAUSE_APF_1 21
+#define V_CAUSE_APF_1(x) ((x) << S_CAUSE_APF_1)
+#define F_CAUSE_APF_1 V_CAUSE_APF_1(1U)
+
+#define S_CAUSE_APF_0 20
+#define V_CAUSE_APF_0(x) ((x) << S_CAUSE_APF_0)
+#define F_CAUSE_APF_0 V_CAUSE_APF_0(1U)
+
+#define S_CAUSE_AF_1 19
+#define V_CAUSE_AF_1(x) ((x) << S_CAUSE_AF_1)
+#define F_CAUSE_AF_1 V_CAUSE_AF_1(1U)
+
+#define S_CAUSE_AF_0 18
+#define V_CAUSE_AF_0(x) ((x) << S_CAUSE_AF_0)
+#define F_CAUSE_AF_0 V_CAUSE_AF_0(1U)
+
+#define S_CAUSE_PCMDF_1 17
+#define V_CAUSE_PCMDF_1(x) ((x) << S_CAUSE_PCMDF_1)
+#define F_CAUSE_PCMDF_1 V_CAUSE_PCMDF_1(1U)
+
+#define S_CAUSE_MPARC_1 16
+#define V_CAUSE_MPARC_1(x) ((x) << S_CAUSE_MPARC_1)
+#define F_CAUSE_MPARC_1 V_CAUSE_MPARC_1(1U)
+
+#define S_CAUSE_MPARF_1 15
+#define V_CAUSE_MPARF_1(x) ((x) << S_CAUSE_MPARF_1)
+#define F_CAUSE_MPARF_1 V_CAUSE_MPARF_1(1U)
+
+#define S_CAUSE_DDPCF_1 14
+#define V_CAUSE_DDPCF_1(x) ((x) << S_CAUSE_DDPCF_1)
+#define F_CAUSE_DDPCF_1 V_CAUSE_DDPCF_1(1U)
+
+#define S_CAUSE_TPTCF_1 13
+#define V_CAUSE_TPTCF_1(x) ((x) << S_CAUSE_TPTCF_1)
+#define F_CAUSE_TPTCF_1 V_CAUSE_TPTCF_1(1U)
+
+#define S_CAUSE_PCMDF_0 12
+#define V_CAUSE_PCMDF_0(x) ((x) << S_CAUSE_PCMDF_0)
+#define F_CAUSE_PCMDF_0 V_CAUSE_PCMDF_0(1U)
+
+#define S_CAUSE_MPARC_0 11
+#define V_CAUSE_MPARC_0(x) ((x) << S_CAUSE_MPARC_0)
+#define F_CAUSE_MPARC_0 V_CAUSE_MPARC_0(1U)
+
+#define S_CAUSE_MPARF_0 10
+#define V_CAUSE_MPARF_0(x) ((x) << S_CAUSE_MPARF_0)
+#define F_CAUSE_MPARF_0 V_CAUSE_MPARF_0(1U)
+
+#define S_CAUSE_DDPCF_0 9
+#define V_CAUSE_DDPCF_0(x) ((x) << S_CAUSE_DDPCF_0)
+#define F_CAUSE_DDPCF_0 V_CAUSE_DDPCF_0(1U)
+
+#define S_CAUSE_TPTCF_0 8
+#define V_CAUSE_TPTCF_0(x) ((x) << S_CAUSE_TPTCF_0)
+#define F_CAUSE_TPTCF_0 V_CAUSE_TPTCF_0(1U)
+
+#define S_CAUSE_DDPDF_1 7
+#define V_CAUSE_DDPDF_1(x) ((x) << S_CAUSE_DDPDF_1)
+#define F_CAUSE_DDPDF_1 V_CAUSE_DDPDF_1(1U)
+
+#define S_CAUSE_DDPMF_1 6
+#define V_CAUSE_DDPMF_1(x) ((x) << S_CAUSE_DDPMF_1)
+#define F_CAUSE_DDPMF_1 V_CAUSE_DDPMF_1(1U)
+
+#define S_CAUSE_MEMRF_1 5
+#define V_CAUSE_MEMRF_1(x) ((x) << S_CAUSE_MEMRF_1)
+#define F_CAUSE_MEMRF_1 V_CAUSE_MEMRF_1(1U)
+
+#define S_CAUSE_PRSDF_1 4
+#define V_CAUSE_PRSDF_1(x) ((x) << S_CAUSE_PRSDF_1)
+#define F_CAUSE_PRSDF_1 V_CAUSE_PRSDF_1(1U)
+
+#define S_CAUSE_DDPDF_0 3
+#define V_CAUSE_DDPDF_0(x) ((x) << S_CAUSE_DDPDF_0)
+#define F_CAUSE_DDPDF_0 V_CAUSE_DDPDF_0(1U)
+
+#define S_CAUSE_DDPMF_0 2
+#define V_CAUSE_DDPMF_0(x) ((x) << S_CAUSE_DDPMF_0)
+#define F_CAUSE_DDPMF_0 V_CAUSE_DDPMF_0(1U)
+
+#define S_CAUSE_MEMRF_0 1
+#define V_CAUSE_MEMRF_0(x) ((x) << S_CAUSE_MEMRF_0)
+#define F_CAUSE_MEMRF_0 V_CAUSE_MEMRF_0(1U)
+
+#define S_CAUSE_PRSDF_0 0
+#define V_CAUSE_PRSDF_0(x) ((x) << S_CAUSE_PRSDF_0)
+#define F_CAUSE_PRSDF_0 V_CAUSE_PRSDF_0(1U)
+
+#define A_ULP_RX_ISCSI_LLIMIT 0x1915c
+
+#define S_ISCSILLIMIT 6
+#define M_ISCSILLIMIT 0x3ffffffU
+#define V_ISCSILLIMIT(x) ((x) << S_ISCSILLIMIT)
+#define G_ISCSILLIMIT(x) (((x) >> S_ISCSILLIMIT) & M_ISCSILLIMIT)
+
+#define A_ULP_RX_ISCSI_ULIMIT 0x19160
+
+#define S_ISCSIULIMIT 6
+#define M_ISCSIULIMIT 0x3ffffffU
+#define V_ISCSIULIMIT(x) ((x) << S_ISCSIULIMIT)
+#define G_ISCSIULIMIT(x) (((x) >> S_ISCSIULIMIT) & M_ISCSIULIMIT)
+
+#define A_ULP_RX_ISCSI_TAGMASK 0x19164
+
+#define S_ISCSITAGMASK 6
+#define M_ISCSITAGMASK 0x3ffffffU
+#define V_ISCSITAGMASK(x) ((x) << S_ISCSITAGMASK)
+#define G_ISCSITAGMASK(x) (((x) >> S_ISCSITAGMASK) & M_ISCSITAGMASK)
+
+#define A_ULP_RX_ISCSI_PSZ 0x19168
+
+#define S_HPZ3 24
+#define M_HPZ3 0xfU
+#define V_HPZ3(x) ((x) << S_HPZ3)
+#define G_HPZ3(x) (((x) >> S_HPZ3) & M_HPZ3)
+
+#define S_HPZ2 16
+#define M_HPZ2 0xfU
+#define V_HPZ2(x) ((x) << S_HPZ2)
+#define G_HPZ2(x) (((x) >> S_HPZ2) & M_HPZ2)
+
+#define S_HPZ1 8
+#define M_HPZ1 0xfU
+#define V_HPZ1(x) ((x) << S_HPZ1)
+#define G_HPZ1(x) (((x) >> S_HPZ1) & M_HPZ1)
+
+#define S_HPZ0 0
+#define M_HPZ0 0xfU
+#define V_HPZ0(x) ((x) << S_HPZ0)
+#define G_HPZ0(x) (((x) >> S_HPZ0) & M_HPZ0)
+
+#define A_ULP_RX_TDDP_LLIMIT 0x1916c
+
+#define S_TDDPLLIMIT 6
+#define M_TDDPLLIMIT 0x3ffffffU
+#define V_TDDPLLIMIT(x) ((x) << S_TDDPLLIMIT)
+#define G_TDDPLLIMIT(x) (((x) >> S_TDDPLLIMIT) & M_TDDPLLIMIT)
+
+#define A_ULP_RX_TDDP_ULIMIT 0x19170
+
+#define S_TDDPULIMIT 6
+#define M_TDDPULIMIT 0x3ffffffU
+#define V_TDDPULIMIT(x) ((x) << S_TDDPULIMIT)
+#define G_TDDPULIMIT(x) (((x) >> S_TDDPULIMIT) & M_TDDPULIMIT)
+
+#define A_ULP_RX_TDDP_TAGMASK 0x19174
+
+#define S_TDDPTAGMASK 6
+#define M_TDDPTAGMASK 0x3ffffffU
+#define V_TDDPTAGMASK(x) ((x) << S_TDDPTAGMASK)
+#define G_TDDPTAGMASK(x) (((x) >> S_TDDPTAGMASK) & M_TDDPTAGMASK)
+
+#define A_ULP_RX_TDDP_PSZ 0x19178
+#define A_ULP_RX_STAG_LLIMIT 0x1917c
+#define A_ULP_RX_STAG_ULIMIT 0x19180
+#define A_ULP_RX_RQ_LLIMIT 0x19184
+#define A_ULP_RX_RQ_ULIMIT 0x19188
+#define A_ULP_RX_PBL_LLIMIT 0x1918c
+#define A_ULP_RX_PBL_ULIMIT 0x19190
+#define A_ULP_RX_CTX_BASE 0x19194
+#define A_ULP_RX_PERR_ENABLE 0x1919c
+#define A_ULP_RX_PERR_INJECT 0x191a0
+#define A_ULP_RX_RQUDP_LLIMIT 0x191a4
+#define A_ULP_RX_RQUDP_ULIMIT 0x191a8
+#define A_ULP_RX_CTX_ACC_CH0 0x191ac
+
+#define S_REQ 21
+#define V_REQ(x) ((x) << S_REQ)
+#define F_REQ V_REQ(1U)
+
+#define S_WB 20
+#define V_WB(x) ((x) << S_WB)
+#define F_WB V_WB(1U)
+
+#define S_ULPRX_TID 0
+#define M_ULPRX_TID 0xfffffU
+#define V_ULPRX_TID(x) ((x) << S_ULPRX_TID)
+#define G_ULPRX_TID(x) (((x) >> S_ULPRX_TID) & M_ULPRX_TID)
+
+#define A_ULP_RX_CTX_ACC_CH1 0x191b0
+#define A_ULP_RX_SE_CNT_ERR 0x191d0
+#define A_ULP_RX_SE_CNT_CLR 0x191d4
+
+#define S_CLRCHAN0 4
+#define M_CLRCHAN0 0xfU
+#define V_CLRCHAN0(x) ((x) << S_CLRCHAN0)
+#define G_CLRCHAN0(x) (((x) >> S_CLRCHAN0) & M_CLRCHAN0)
+
+#define S_CLRCHAN1 0
+#define M_CLRCHAN1 0xfU
+#define V_CLRCHAN1(x) ((x) << S_CLRCHAN1)
+#define G_CLRCHAN1(x) (((x) >> S_CLRCHAN1) & M_CLRCHAN1)
+
+#define A_ULP_RX_SE_CNT_CH0 0x191d8
+
+#define S_SOP_CNT_OUT0 28
+#define M_SOP_CNT_OUT0 0xfU
+#define V_SOP_CNT_OUT0(x) ((x) << S_SOP_CNT_OUT0)
+#define G_SOP_CNT_OUT0(x) (((x) >> S_SOP_CNT_OUT0) & M_SOP_CNT_OUT0)
+
+#define S_EOP_CNT_OUT0 24
+#define M_EOP_CNT_OUT0 0xfU
+#define V_EOP_CNT_OUT0(x) ((x) << S_EOP_CNT_OUT0)
+#define G_EOP_CNT_OUT0(x) (((x) >> S_EOP_CNT_OUT0) & M_EOP_CNT_OUT0)
+
+#define S_SOP_CNT_AL0 20
+#define M_SOP_CNT_AL0 0xfU
+#define V_SOP_CNT_AL0(x) ((x) << S_SOP_CNT_AL0)
+#define G_SOP_CNT_AL0(x) (((x) >> S_SOP_CNT_AL0) & M_SOP_CNT_AL0)
+
+#define S_EOP_CNT_AL0 16
+#define M_EOP_CNT_AL0 0xfU
+#define V_EOP_CNT_AL0(x) ((x) << S_EOP_CNT_AL0)
+#define G_EOP_CNT_AL0(x) (((x) >> S_EOP_CNT_AL0) & M_EOP_CNT_AL0)
+
+#define S_SOP_CNT_MR0 12
+#define M_SOP_CNT_MR0 0xfU
+#define V_SOP_CNT_MR0(x) ((x) << S_SOP_CNT_MR0)
+#define G_SOP_CNT_MR0(x) (((x) >> S_SOP_CNT_MR0) & M_SOP_CNT_MR0)
+
+#define S_EOP_CNT_MR0 8
+#define M_EOP_CNT_MR0 0xfU
+#define V_EOP_CNT_MR0(x) ((x) << S_EOP_CNT_MR0)
+#define G_EOP_CNT_MR0(x) (((x) >> S_EOP_CNT_MR0) & M_EOP_CNT_MR0)
+
+#define S_SOP_CNT_IN0 4
+#define M_SOP_CNT_IN0 0xfU
+#define V_SOP_CNT_IN0(x) ((x) << S_SOP_CNT_IN0)
+#define G_SOP_CNT_IN0(x) (((x) >> S_SOP_CNT_IN0) & M_SOP_CNT_IN0)
+
+#define S_EOP_CNT_IN0 0
+#define M_EOP_CNT_IN0 0xfU
+#define V_EOP_CNT_IN0(x) ((x) << S_EOP_CNT_IN0)
+#define G_EOP_CNT_IN0(x) (((x) >> S_EOP_CNT_IN0) & M_EOP_CNT_IN0)
+
+#define A_ULP_RX_SE_CNT_CH1 0x191dc
+
+#define S_SOP_CNT_OUT1 28
+#define M_SOP_CNT_OUT1 0xfU
+#define V_SOP_CNT_OUT1(x) ((x) << S_SOP_CNT_OUT1)
+#define G_SOP_CNT_OUT1(x) (((x) >> S_SOP_CNT_OUT1) & M_SOP_CNT_OUT1)
+
+#define S_EOP_CNT_OUT1 24
+#define M_EOP_CNT_OUT1 0xfU
+#define V_EOP_CNT_OUT1(x) ((x) << S_EOP_CNT_OUT1)
+#define G_EOP_CNT_OUT1(x) (((x) >> S_EOP_CNT_OUT1) & M_EOP_CNT_OUT1)
+
+#define S_SOP_CNT_AL1 20
+#define M_SOP_CNT_AL1 0xfU
+#define V_SOP_CNT_AL1(x) ((x) << S_SOP_CNT_AL1)
+#define G_SOP_CNT_AL1(x) (((x) >> S_SOP_CNT_AL1) & M_SOP_CNT_AL1)
+
+#define S_EOP_CNT_AL1 16
+#define M_EOP_CNT_AL1 0xfU
+#define V_EOP_CNT_AL1(x) ((x) << S_EOP_CNT_AL1)
+#define G_EOP_CNT_AL1(x) (((x) >> S_EOP_CNT_AL1) & M_EOP_CNT_AL1)
+
+#define S_SOP_CNT_MR1 12
+#define M_SOP_CNT_MR1 0xfU
+#define V_SOP_CNT_MR1(x) ((x) << S_SOP_CNT_MR1)
+#define G_SOP_CNT_MR1(x) (((x) >> S_SOP_CNT_MR1) & M_SOP_CNT_MR1)
+
+#define S_EOP_CNT_MR1 8
+#define M_EOP_CNT_MR1 0xfU
+#define V_EOP_CNT_MR1(x) ((x) << S_EOP_CNT_MR1)
+#define G_EOP_CNT_MR1(x) (((x) >> S_EOP_CNT_MR1) & M_EOP_CNT_MR1)
+
+#define S_SOP_CNT_IN1 4
+#define M_SOP_CNT_IN1 0xfU
+#define V_SOP_CNT_IN1(x) ((x) << S_SOP_CNT_IN1)
+#define G_SOP_CNT_IN1(x) (((x) >> S_SOP_CNT_IN1) & M_SOP_CNT_IN1)
+
+#define S_EOP_CNT_IN1 0
+#define M_EOP_CNT_IN1 0xfU
+#define V_EOP_CNT_IN1(x) ((x) << S_EOP_CNT_IN1)
+#define G_EOP_CNT_IN1(x) (((x) >> S_EOP_CNT_IN1) & M_EOP_CNT_IN1)
+
+#define A_ULP_RX_DBG_CTL 0x191e0
+
+#define S_EN_DBG_H 17
+#define V_EN_DBG_H(x) ((x) << S_EN_DBG_H)
+#define F_EN_DBG_H V_EN_DBG_H(1U)
+
+#define S_EN_DBG_L 16
+#define V_EN_DBG_L(x) ((x) << S_EN_DBG_L)
+#define F_EN_DBG_L V_EN_DBG_L(1U)
+
+#define S_SEL_H 8
+#define M_SEL_H 0xffU
+#define V_SEL_H(x) ((x) << S_SEL_H)
+#define G_SEL_H(x) (((x) >> S_SEL_H) & M_SEL_H)
+
+#define S_SEL_L 0
+#define M_SEL_L 0xffU
+#define V_SEL_L(x) ((x) << S_SEL_L)
+#define G_SEL_L(x) (((x) >> S_SEL_L) & M_SEL_L)
+
+#define A_ULP_RX_DBG_DATAH 0x191e4
+#define A_ULP_RX_DBG_DATAL 0x191e8
+#define A_ULP_RX_LA_CHNL 0x19238
+
+#define S_CHNL_SEL 0
+#define V_CHNL_SEL(x) ((x) << S_CHNL_SEL)
+#define F_CHNL_SEL V_CHNL_SEL(1U)
+
+#define A_ULP_RX_LA_CTL 0x1923c
+
+#define S_TRC_SEL 0
+#define V_TRC_SEL(x) ((x) << S_TRC_SEL)
+#define F_TRC_SEL V_TRC_SEL(1U)
+
+#define A_ULP_RX_LA_RDPTR 0x19240
+
+#define S_RD_PTR 0
+#define M_RD_PTR 0x1ffU
+#define V_RD_PTR(x) ((x) << S_RD_PTR)
+#define G_RD_PTR(x) (((x) >> S_RD_PTR) & M_RD_PTR)
+
+#define A_ULP_RX_LA_RDDATA 0x19244
+#define A_ULP_RX_LA_WRPTR 0x19248
+
+#define S_WR_PTR 0
+#define M_WR_PTR 0x1ffU
+#define V_WR_PTR(x) ((x) << S_WR_PTR)
+#define G_WR_PTR(x) (((x) >> S_WR_PTR) & M_WR_PTR)
+
+#define A_ULP_RX_LA_RESERVED 0x1924c
+
+/* registers for module SF */
+#define SF_BASE_ADDR 0x193f8
+
+#define A_SF_DATA 0x193f8
+#define A_SF_OP 0x193fc
+
+#define S_SF_LOCK 4
+#define V_SF_LOCK(x) ((x) << S_SF_LOCK)
+#define F_SF_LOCK V_SF_LOCK(1U)
+
+#define S_CONT 3
+#define V_CONT(x) ((x) << S_CONT)
+#define F_CONT V_CONT(1U)
+
+#define S_BYTECNT 1
+#define M_BYTECNT 0x3U
+#define V_BYTECNT(x) ((x) << S_BYTECNT)
+#define G_BYTECNT(x) (((x) >> S_BYTECNT) & M_BYTECNT)
+
+/* registers for module PL */
+#define PL_BASE_ADDR 0x19400
+
+#define A_PL_VF_WHOAMI 0x0
+
+#define S_PORTXMAP 24
+#define M_PORTXMAP 0x7U
+#define V_PORTXMAP(x) ((x) << S_PORTXMAP)
+#define G_PORTXMAP(x) (((x) >> S_PORTXMAP) & M_PORTXMAP)
+
+#define S_SOURCEBUS 16
+#define M_SOURCEBUS 0x3U
+#define V_SOURCEBUS(x) ((x) << S_SOURCEBUS)
+#define G_SOURCEBUS(x) (((x) >> S_SOURCEBUS) & M_SOURCEBUS)
+
+#define S_SOURCEPF 8
+#define M_SOURCEPF 0x7U
+#define V_SOURCEPF(x) ((x) << S_SOURCEPF)
+#define G_SOURCEPF(x) (((x) >> S_SOURCEPF) & M_SOURCEPF)
+
+#define S_ISVF 7
+#define V_ISVF(x) ((x) << S_ISVF)
+#define F_ISVF V_ISVF(1U)
+
+#define S_VFID 0
+#define M_VFID 0x7fU
+#define V_VFID(x) ((x) << S_VFID)
+#define G_VFID(x) (((x) >> S_VFID) & M_VFID)
+
+#define A_PL_PF_INT_CAUSE 0x3c0
+
+#define S_PFSW 3
+#define V_PFSW(x) ((x) << S_PFSW)
+#define F_PFSW V_PFSW(1U)
+
+#define S_PFSGE 2
+#define V_PFSGE(x) ((x) << S_PFSGE)
+#define F_PFSGE V_PFSGE(1U)
+
+#define S_PFCIM 1
+#define V_PFCIM(x) ((x) << S_PFCIM)
+#define F_PFCIM V_PFCIM(1U)
+
+#define S_PFMPS 0
+#define V_PFMPS(x) ((x) << S_PFMPS)
+#define F_PFMPS V_PFMPS(1U)
+
+#define A_PL_PF_INT_ENABLE 0x3c4
+#define A_PL_PF_CTL 0x3c8
+
+#define S_SWINT 0
+#define V_SWINT(x) ((x) << S_SWINT)
+#define F_SWINT V_SWINT(1U)
+
+#define A_PL_WHOAMI 0x19400
+#define A_PL_PERR_CAUSE 0x19404
+
+#define S_UART 28
+#define V_UART(x) ((x) << S_UART)
+#define F_UART V_UART(1U)
+
+#define S_ULP_TX 27
+#define V_ULP_TX(x) ((x) << S_ULP_TX)
+#define F_ULP_TX V_ULP_TX(1U)
+
+#define S_SGE 26
+#define V_SGE(x) ((x) << S_SGE)
+#define F_SGE V_SGE(1U)
+
+#define S_HMA 25
+#define V_HMA(x) ((x) << S_HMA)
+#define F_HMA V_HMA(1U)
+
+#define S_CPL_SWITCH 24
+#define V_CPL_SWITCH(x) ((x) << S_CPL_SWITCH)
+#define F_CPL_SWITCH V_CPL_SWITCH(1U)
+
+#define S_ULP_RX 23
+#define V_ULP_RX(x) ((x) << S_ULP_RX)
+#define F_ULP_RX V_ULP_RX(1U)
+
+#define S_PM_RX 22
+#define V_PM_RX(x) ((x) << S_PM_RX)
+#define F_PM_RX V_PM_RX(1U)
+
+#define S_PM_TX 21
+#define V_PM_TX(x) ((x) << S_PM_TX)
+#define F_PM_TX V_PM_TX(1U)
+
+#define S_MA 20
+#define V_MA(x) ((x) << S_MA)
+#define F_MA V_MA(1U)
+
+#define S_TP 19
+#define V_TP(x) ((x) << S_TP)
+#define F_TP V_TP(1U)
+
+#define S_LE 18
+#define V_LE(x) ((x) << S_LE)
+#define F_LE V_LE(1U)
+
+#define S_EDC1 17
+#define V_EDC1(x) ((x) << S_EDC1)
+#define F_EDC1 V_EDC1(1U)
+
+#define S_EDC0 16
+#define V_EDC0(x) ((x) << S_EDC0)
+#define F_EDC0 V_EDC0(1U)
+
+#define S_MC 15
+#define V_MC(x) ((x) << S_MC)
+#define F_MC V_MC(1U)
+
+#define S_PCIE 14
+#define V_PCIE(x) ((x) << S_PCIE)
+#define F_PCIE V_PCIE(1U)
+
+#define S_PMU 13
+#define V_PMU(x) ((x) << S_PMU)
+#define F_PMU V_PMU(1U)
+
+#define S_XGMAC_KR1 12
+#define V_XGMAC_KR1(x) ((x) << S_XGMAC_KR1)
+#define F_XGMAC_KR1 V_XGMAC_KR1(1U)
+
+#define S_XGMAC_KR0 11
+#define V_XGMAC_KR0(x) ((x) << S_XGMAC_KR0)
+#define F_XGMAC_KR0 V_XGMAC_KR0(1U)
+
+#define S_XGMAC1 10
+#define V_XGMAC1(x) ((x) << S_XGMAC1)
+#define F_XGMAC1 V_XGMAC1(1U)
+
+#define S_XGMAC0 9
+#define V_XGMAC0(x) ((x) << S_XGMAC0)
+#define F_XGMAC0 V_XGMAC0(1U)
+
+#define S_SMB 8
+#define V_SMB(x) ((x) << S_SMB)
+#define F_SMB V_SMB(1U)
+
+#define S_SF 7
+#define V_SF(x) ((x) << S_SF)
+#define F_SF V_SF(1U)
+
+#define S_PL 6
+#define V_PL(x) ((x) << S_PL)
+#define F_PL V_PL(1U)
+
+#define S_NCSI 5
+#define V_NCSI(x) ((x) << S_NCSI)
+#define F_NCSI V_NCSI(1U)
+
+#define S_MPS 4
+#define V_MPS(x) ((x) << S_MPS)
+#define F_MPS V_MPS(1U)
+
+#define S_MI 3
+#define V_MI(x) ((x) << S_MI)
+#define F_MI V_MI(1U)
+
+#define S_DBG 2
+#define V_DBG(x) ((x) << S_DBG)
+#define F_DBG V_DBG(1U)
+
+#define S_I2CM 1
+#define V_I2CM(x) ((x) << S_I2CM)
+#define F_I2CM V_I2CM(1U)
+
+#define S_CIM 0
+#define V_CIM(x) ((x) << S_CIM)
+#define F_CIM V_CIM(1U)
+
+#define A_PL_PERR_ENABLE 0x19408
+#define A_PL_INT_CAUSE 0x1940c
+
+#define S_FLR 30
+#define V_FLR(x) ((x) << S_FLR)
+#define F_FLR V_FLR(1U)
+
+#define S_SW_CIM 29
+#define V_SW_CIM(x) ((x) << S_SW_CIM)
+#define F_SW_CIM V_SW_CIM(1U)
+
+#define A_PL_INT_ENABLE 0x19410
+#define A_PL_INT_MAP0 0x19414
+
+#define S_MAPNCSI 16
+#define M_MAPNCSI 0x1ffU
+#define V_MAPNCSI(x) ((x) << S_MAPNCSI)
+#define G_MAPNCSI(x) (((x) >> S_MAPNCSI) & M_MAPNCSI)
+
+#define S_MAPDEFAULT 0
+#define M_MAPDEFAULT 0x1ffU
+#define V_MAPDEFAULT(x) ((x) << S_MAPDEFAULT)
+#define G_MAPDEFAULT(x) (((x) >> S_MAPDEFAULT) & M_MAPDEFAULT)
+
+#define A_PL_INT_MAP1 0x19418
+
+#define S_MAPXGMAC1 16
+#define M_MAPXGMAC1 0x1ffU
+#define V_MAPXGMAC1(x) ((x) << S_MAPXGMAC1)
+#define G_MAPXGMAC1(x) (((x) >> S_MAPXGMAC1) & M_MAPXGMAC1)
+
+#define S_MAPXGMAC0 0
+#define M_MAPXGMAC0 0x1ffU
+#define V_MAPXGMAC0(x) ((x) << S_MAPXGMAC0)
+#define G_MAPXGMAC0(x) (((x) >> S_MAPXGMAC0) & M_MAPXGMAC0)
+
+#define A_PL_INT_MAP2 0x1941c
+
+#define S_MAPXGMAC_KR1 16
+#define M_MAPXGMAC_KR1 0x1ffU
+#define V_MAPXGMAC_KR1(x) ((x) << S_MAPXGMAC_KR1)
+#define G_MAPXGMAC_KR1(x) (((x) >> S_MAPXGMAC_KR1) & M_MAPXGMAC_KR1)
+
+#define S_MAPXGMAC_KR0 0
+#define M_MAPXGMAC_KR0 0x1ffU
+#define V_MAPXGMAC_KR0(x) ((x) << S_MAPXGMAC_KR0)
+#define G_MAPXGMAC_KR0(x) (((x) >> S_MAPXGMAC_KR0) & M_MAPXGMAC_KR0)
+
+#define A_PL_INT_MAP3 0x19420
+
+#define S_MAPMI 16
+#define M_MAPMI 0x1ffU
+#define V_MAPMI(x) ((x) << S_MAPMI)
+#define G_MAPMI(x) (((x) >> S_MAPMI) & M_MAPMI)
+
+#define S_MAPSMB 0
+#define M_MAPSMB 0x1ffU
+#define V_MAPSMB(x) ((x) << S_MAPSMB)
+#define G_MAPSMB(x) (((x) >> S_MAPSMB) & M_MAPSMB)
+
+#define A_PL_INT_MAP4 0x19424
+
+#define S_MAPDBG 16
+#define M_MAPDBG 0x1ffU
+#define V_MAPDBG(x) ((x) << S_MAPDBG)
+#define G_MAPDBG(x) (((x) >> S_MAPDBG) & M_MAPDBG)
+
+#define S_MAPI2CM 0
+#define M_MAPI2CM 0x1ffU
+#define V_MAPI2CM(x) ((x) << S_MAPI2CM)
+#define G_MAPI2CM(x) (((x) >> S_MAPI2CM) & M_MAPI2CM)
+
+#define A_PL_RST 0x19428
+
+#define S_FATALPERREN 3
+#define V_FATALPERREN(x) ((x) << S_FATALPERREN)
+#define F_FATALPERREN V_FATALPERREN(1U)
+
+#define S_SWINTCIM 2
+#define V_SWINTCIM(x) ((x) << S_SWINTCIM)
+#define F_SWINTCIM V_SWINTCIM(1U)
+
+#define S_PIORST 1
+#define V_PIORST(x) ((x) << S_PIORST)
+#define F_PIORST V_PIORST(1U)
+
+#define S_PIORSTMODE 0
+#define V_PIORSTMODE(x) ((x) << S_PIORSTMODE)
+#define F_PIORSTMODE V_PIORSTMODE(1U)
+
+#define A_PL_PL_PERR_INJECT 0x1942c
+
+#define S_PL_MEMSEL 1
+#define V_PL_MEMSEL(x) ((x) << S_PL_MEMSEL)
+#define F_PL_MEMSEL V_PL_MEMSEL(1U)
+
+#define A_PL_PL_INT_CAUSE 0x19430
+
+#define S_PF_ENABLEERR 5
+#define V_PF_ENABLEERR(x) ((x) << S_PF_ENABLEERR)
+#define F_PF_ENABLEERR V_PF_ENABLEERR(1U)
+
+#define S_FATALPERR 4
+#define V_FATALPERR(x) ((x) << S_FATALPERR)
+#define F_FATALPERR V_FATALPERR(1U)
+
+#define S_INVALIDACCESS 3
+#define V_INVALIDACCESS(x) ((x) << S_INVALIDACCESS)
+#define F_INVALIDACCESS V_INVALIDACCESS(1U)
+
+#define S_TIMEOUT 2
+#define V_TIMEOUT(x) ((x) << S_TIMEOUT)
+#define F_TIMEOUT V_TIMEOUT(1U)
+
+#define S_PLERR 1
+#define V_PLERR(x) ((x) << S_PLERR)
+#define F_PLERR V_PLERR(1U)
+
+#define S_PERRVFID 0
+#define V_PERRVFID(x) ((x) << S_PERRVFID)
+#define F_PERRVFID V_PERRVFID(1U)
+
+#define A_PL_PL_INT_ENABLE 0x19434
+#define A_PL_PL_PERR_ENABLE 0x19438
+#define A_PL_REV 0x1943c
+
+#define S_REV 0
+#define M_REV 0xfU
+#define V_REV(x) ((x) << S_REV)
+#define G_REV(x) (((x) >> S_REV) & M_REV)
+
+#define A_PL_SEMAPHORE_CTL 0x1944c
+
+#define S_LOCKSTATUS 16
+#define M_LOCKSTATUS 0xffU
+#define V_LOCKSTATUS(x) ((x) << S_LOCKSTATUS)
+#define G_LOCKSTATUS(x) (((x) >> S_LOCKSTATUS) & M_LOCKSTATUS)
+
+#define S_OWNEROVERRIDE 8
+#define V_OWNEROVERRIDE(x) ((x) << S_OWNEROVERRIDE)
+#define F_OWNEROVERRIDE V_OWNEROVERRIDE(1U)
+
+#define S_ENABLEPF 0
+#define M_ENABLEPF 0xffU
+#define V_ENABLEPF(x) ((x) << S_ENABLEPF)
+#define G_ENABLEPF(x) (((x) >> S_ENABLEPF) & M_ENABLEPF)
+
+#define A_PL_SEMAPHORE_LOCK 0x19450
+
+#define S_SEMLOCK 31
+#define V_SEMLOCK(x) ((x) << S_SEMLOCK)
+#define F_SEMLOCK V_SEMLOCK(1U)
+
+#define S_SEMSRCBUS 3
+#define M_SEMSRCBUS 0x3U
+#define V_SEMSRCBUS(x) ((x) << S_SEMSRCBUS)
+#define G_SEMSRCBUS(x) (((x) >> S_SEMSRCBUS) & M_SEMSRCBUS)
+
+#define S_SEMSRCPF 0
+#define M_SEMSRCPF 0x7U
+#define V_SEMSRCPF(x) ((x) << S_SEMSRCPF)
+#define G_SEMSRCPF(x) (((x) >> S_SEMSRCPF) & M_SEMSRCPF)
+
+#define A_PL_PF_ENABLE 0x19470
+
+#define S_PF_ENABLE 0
+#define M_PF_ENABLE 0xffU
+#define V_PF_ENABLE(x) ((x) << S_PF_ENABLE)
+#define G_PF_ENABLE(x) (((x) >> S_PF_ENABLE) & M_PF_ENABLE)
+
+#define A_PL_PORTX_MAP 0x19474
+
+#define S_MAP7 28
+#define M_MAP7 0x7U
+#define V_MAP7(x) ((x) << S_MAP7)
+#define G_MAP7(x) (((x) >> S_MAP7) & M_MAP7)
+
+#define S_MAP6 24
+#define M_MAP6 0x7U
+#define V_MAP6(x) ((x) << S_MAP6)
+#define G_MAP6(x) (((x) >> S_MAP6) & M_MAP6)
+
+#define S_MAP5 20
+#define M_MAP5 0x7U
+#define V_MAP5(x) ((x) << S_MAP5)
+#define G_MAP5(x) (((x) >> S_MAP5) & M_MAP5)
+
+#define S_MAP4 16
+#define M_MAP4 0x7U
+#define V_MAP4(x) ((x) << S_MAP4)
+#define G_MAP4(x) (((x) >> S_MAP4) & M_MAP4)
+
+#define S_MAP3 12
+#define M_MAP3 0x7U
+#define V_MAP3(x) ((x) << S_MAP3)
+#define G_MAP3(x) (((x) >> S_MAP3) & M_MAP3)
+
+#define S_MAP2 8
+#define M_MAP2 0x7U
+#define V_MAP2(x) ((x) << S_MAP2)
+#define G_MAP2(x) (((x) >> S_MAP2) & M_MAP2)
+
+#define S_MAP1 4
+#define M_MAP1 0x7U
+#define V_MAP1(x) ((x) << S_MAP1)
+#define G_MAP1(x) (((x) >> S_MAP1) & M_MAP1)
+
+#define S_MAP0 0
+#define M_MAP0 0x7U
+#define V_MAP0(x) ((x) << S_MAP0)
+#define G_MAP0(x) (((x) >> S_MAP0) & M_MAP0)
+
+#define A_PL_VF_SLICE_L 0x19490
+
+#define S_LIMITADDR 16
+#define M_LIMITADDR 0x3ffU
+#define V_LIMITADDR(x) ((x) << S_LIMITADDR)
+#define G_LIMITADDR(x) (((x) >> S_LIMITADDR) & M_LIMITADDR)
+
+#define S_SLICEBASEADDR 0
+#define M_SLICEBASEADDR 0x3ffU
+#define V_SLICEBASEADDR(x) ((x) << S_SLICEBASEADDR)
+#define G_SLICEBASEADDR(x) (((x) >> S_SLICEBASEADDR) & M_SLICEBASEADDR)
+
+#define A_PL_VF_SLICE_H 0x19494
+
+#define S_MODINDX 16
+#define M_MODINDX 0x7U
+#define V_MODINDX(x) ((x) << S_MODINDX)
+#define G_MODINDX(x) (((x) >> S_MODINDX) & M_MODINDX)
+
+#define S_MODOFFSET 0
+#define M_MODOFFSET 0x3ffU
+#define V_MODOFFSET(x) ((x) << S_MODOFFSET)
+#define G_MODOFFSET(x) (((x) >> S_MODOFFSET) & M_MODOFFSET)
+
+#define A_PL_FLR_VF_STATUS 0x194d0
+#define A_PL_FLR_PF_STATUS 0x194e0
+
+#define S_FLR_PF 0
+#define M_FLR_PF 0xffU
+#define V_FLR_PF(x) ((x) << S_FLR_PF)
+#define G_FLR_PF(x) (((x) >> S_FLR_PF) & M_FLR_PF)
+
+#define A_PL_TIMEOUT_CTL 0x194f0
+
+#define S_PL_TIMEOUT 0
+#define M_PL_TIMEOUT 0xffffU
+#define V_PL_TIMEOUT(x) ((x) << S_PL_TIMEOUT)
+#define G_PL_TIMEOUT(x) (((x) >> S_PL_TIMEOUT) & M_PL_TIMEOUT)
+
+#define A_PL_TIMEOUT_STATUS0 0x194f4
+
+#define S_PL_TOADDR 2
+#define M_PL_TOADDR 0xfffffffU
+#define V_PL_TOADDR(x) ((x) << S_PL_TOADDR)
+#define G_PL_TOADDR(x) (((x) >> S_PL_TOADDR) & M_PL_TOADDR)
+
+#define A_PL_TIMEOUT_STATUS1 0x194f8
+
+#define S_PL_TOVALID 31
+#define V_PL_TOVALID(x) ((x) << S_PL_TOVALID)
+#define F_PL_TOVALID V_PL_TOVALID(1U)
+
+#define S_WRITE 22
+#define V_WRITE(x) ((x) << S_WRITE)
+#define F_WRITE V_WRITE(1U)
+
+#define S_PL_TOBUS 20
+#define M_PL_TOBUS 0x3U
+#define V_PL_TOBUS(x) ((x) << S_PL_TOBUS)
+#define G_PL_TOBUS(x) (((x) >> S_PL_TOBUS) & M_PL_TOBUS)
+
+#define S_RGN 19
+#define V_RGN(x) ((x) << S_RGN)
+#define F_RGN V_RGN(1U)
+
+#define S_PL_TOPF 16
+#define M_PL_TOPF 0x7U
+#define V_PL_TOPF(x) ((x) << S_PL_TOPF)
+#define G_PL_TOPF(x) (((x) >> S_PL_TOPF) & M_PL_TOPF)
+
+#define S_PL_TORID 0
+#define M_PL_TORID 0xffffU
+#define V_PL_TORID(x) ((x) << S_PL_TORID)
+#define G_PL_TORID(x) (((x) >> S_PL_TORID) & M_PL_TORID)
+
+#define A_PL_VFID_MAP 0x19800
+
+#define S_VFID_VLD 7
+#define V_VFID_VLD(x) ((x) << S_VFID_VLD)
+#define F_VFID_VLD V_VFID_VLD(1U)
+
+/* registers for module LE */
+#define LE_BASE_ADDR 0x19c00
+
+#define A_LE_BUF_CONFIG 0x19c00
+#define A_LE_DB_CONFIG 0x19c04
+
+#define S_TCAMCMDOVLAPEN 21
+#define V_TCAMCMDOVLAPEN(x) ((x) << S_TCAMCMDOVLAPEN)
+#define F_TCAMCMDOVLAPEN V_TCAMCMDOVLAPEN(1U)
+
+#define S_HASHEN 20
+#define V_HASHEN(x) ((x) << S_HASHEN)
+#define F_HASHEN V_HASHEN(1U)
+
+#define S_ASBOTHSRCHEN 18
+#define V_ASBOTHSRCHEN(x) ((x) << S_ASBOTHSRCHEN)
+#define F_ASBOTHSRCHEN V_ASBOTHSRCHEN(1U)
+
+#define S_ASLIPCOMPEN 17
+#define V_ASLIPCOMPEN(x) ((x) << S_ASLIPCOMPEN)
+#define F_ASLIPCOMPEN V_ASLIPCOMPEN(1U)
+
+#define S_BUILD 16
+#define V_BUILD(x) ((x) << S_BUILD)
+#define F_BUILD V_BUILD(1U)
+
+#define S_FILTEREN 11
+#define V_FILTEREN(x) ((x) << S_FILTEREN)
+#define F_FILTEREN V_FILTEREN(1U)
+
+#define S_SYNMODE 7
+#define M_SYNMODE 0x3U
+#define V_SYNMODE(x) ((x) << S_SYNMODE)
+#define G_SYNMODE(x) (((x) >> S_SYNMODE) & M_SYNMODE)
+
+#define S_LEBUSEN 5
+#define V_LEBUSEN(x) ((x) << S_LEBUSEN)
+#define F_LEBUSEN V_LEBUSEN(1U)
+
+#define S_ELOOKDUMEN 4
+#define V_ELOOKDUMEN(x) ((x) << S_ELOOKDUMEN)
+#define F_ELOOKDUMEN V_ELOOKDUMEN(1U)
+
+#define S_IPV4ONLYEN 3
+#define V_IPV4ONLYEN(x) ((x) << S_IPV4ONLYEN)
+#define F_IPV4ONLYEN V_IPV4ONLYEN(1U)
+
+#define S_MOSTCMDOEN 2
+#define V_MOSTCMDOEN(x) ((x) << S_MOSTCMDOEN)
+#define F_MOSTCMDOEN V_MOSTCMDOEN(1U)
+
+#define S_DELACTSYNOEN 1
+#define V_DELACTSYNOEN(x) ((x) << S_DELACTSYNOEN)
+#define F_DELACTSYNOEN V_DELACTSYNOEN(1U)
+
+#define S_CMDOVERLAPDIS 0
+#define V_CMDOVERLAPDIS(x) ((x) << S_CMDOVERLAPDIS)
+#define F_CMDOVERLAPDIS V_CMDOVERLAPDIS(1U)
+
+#define A_LE_MISC 0x19c08
+
+#define S_CMPUNVAIL 0
+#define M_CMPUNVAIL 0xfU
+#define V_CMPUNVAIL(x) ((x) << S_CMPUNVAIL)
+#define G_CMPUNVAIL(x) (((x) >> S_CMPUNVAIL) & M_CMPUNVAIL)
+
+#define A_LE_DB_ROUTING_TABLE_INDEX 0x19c10
+
+#define S_RTINDX 7
+#define M_RTINDX 0x3fU
+#define V_RTINDX(x) ((x) << S_RTINDX)
+#define G_RTINDX(x) (((x) >> S_RTINDX) & M_RTINDX)
+
+#define A_LE_DB_FILTER_TABLE_INDEX 0x19c14
+
+#define S_FTINDX 7
+#define M_FTINDX 0x3fU
+#define V_FTINDX(x) ((x) << S_FTINDX)
+#define G_FTINDX(x) (((x) >> S_FTINDX) & M_FTINDX)
+
+#define A_LE_DB_SERVER_INDEX 0x19c18
+
+#define S_SRINDX 7
+#define M_SRINDX 0x3fU
+#define V_SRINDX(x) ((x) << S_SRINDX)
+#define G_SRINDX(x) (((x) >> S_SRINDX) & M_SRINDX)
+
+#define A_LE_DB_CLIP_TABLE_INDEX 0x19c1c
+
+#define S_CLIPTINDX 7
+#define M_CLIPTINDX 0x3fU
+#define V_CLIPTINDX(x) ((x) << S_CLIPTINDX)
+#define G_CLIPTINDX(x) (((x) >> S_CLIPTINDX) & M_CLIPTINDX)
+
+#define A_LE_DB_ACT_CNT_IPV4 0x19c20
+
+#define S_ACTCNTIPV4 0
+#define M_ACTCNTIPV4 0xfffffU
+#define V_ACTCNTIPV4(x) ((x) << S_ACTCNTIPV4)
+#define G_ACTCNTIPV4(x) (((x) >> S_ACTCNTIPV4) & M_ACTCNTIPV4)
+
+#define A_LE_DB_ACT_CNT_IPV6 0x19c24
+
+#define S_ACTCNTIPV6 0
+#define M_ACTCNTIPV6 0xfffffU
+#define V_ACTCNTIPV6(x) ((x) << S_ACTCNTIPV6)
+#define G_ACTCNTIPV6(x) (((x) >> S_ACTCNTIPV6) & M_ACTCNTIPV6)
+
+#define A_LE_DB_HASH_CONFIG 0x19c28
+
+#define S_HASHTIDSIZE 16
+#define M_HASHTIDSIZE 0x3fU
+#define V_HASHTIDSIZE(x) ((x) << S_HASHTIDSIZE)
+#define G_HASHTIDSIZE(x) (((x) >> S_HASHTIDSIZE) & M_HASHTIDSIZE)
+
+#define S_HASHSIZE 0
+#define M_HASHSIZE 0x3fU
+#define V_HASHSIZE(x) ((x) << S_HASHSIZE)
+#define G_HASHSIZE(x) (((x) >> S_HASHSIZE) & M_HASHSIZE)
+
+#define A_LE_DB_HASH_TABLE_BASE 0x19c2c
+#define A_LE_DB_HASH_TID_BASE 0x19c30
+#define A_LE_DB_SIZE 0x19c34
+#define A_LE_DB_INT_ENABLE 0x19c38
+
+#define S_MSGSEL 27
+#define M_MSGSEL 0x1fU
+#define V_MSGSEL(x) ((x) << S_MSGSEL)
+#define G_MSGSEL(x) (((x) >> S_MSGSEL) & M_MSGSEL)
+
+#define S_REQQPARERR 16
+#define V_REQQPARERR(x) ((x) << S_REQQPARERR)
+#define F_REQQPARERR V_REQQPARERR(1U)
+
+#define S_UNKNOWNCMD 15
+#define V_UNKNOWNCMD(x) ((x) << S_UNKNOWNCMD)
+#define F_UNKNOWNCMD V_UNKNOWNCMD(1U)
+
+#define S_DROPFILTERHIT 13
+#define V_DROPFILTERHIT(x) ((x) << S_DROPFILTERHIT)
+#define F_DROPFILTERHIT V_DROPFILTERHIT(1U)
+
+#define S_FILTERHIT 12
+#define V_FILTERHIT(x) ((x) << S_FILTERHIT)
+#define F_FILTERHIT V_FILTERHIT(1U)
+
+#define S_SYNCOOKIEOFF 11
+#define V_SYNCOOKIEOFF(x) ((x) << S_SYNCOOKIEOFF)
+#define F_SYNCOOKIEOFF V_SYNCOOKIEOFF(1U)
+
+#define S_SYNCOOKIEBAD 10
+#define V_SYNCOOKIEBAD(x) ((x) << S_SYNCOOKIEBAD)
+#define F_SYNCOOKIEBAD V_SYNCOOKIEBAD(1U)
+
+#define S_SYNCOOKIE 9
+#define V_SYNCOOKIE(x) ((x) << S_SYNCOOKIE)
+#define F_SYNCOOKIE V_SYNCOOKIE(1U)
+
+#define S_NFASRCHFAIL 8
+#define V_NFASRCHFAIL(x) ((x) << S_NFASRCHFAIL)
+#define F_NFASRCHFAIL V_NFASRCHFAIL(1U)
+
+#define S_ACTRGNFULL 7
+#define V_ACTRGNFULL(x) ((x) << S_ACTRGNFULL)
+#define F_ACTRGNFULL V_ACTRGNFULL(1U)
+
+#define S_PARITYERR 6
+#define V_PARITYERR(x) ((x) << S_PARITYERR)
+#define F_PARITYERR V_PARITYERR(1U)
+
+#define S_LIPMISS 5
+#define V_LIPMISS(x) ((x) << S_LIPMISS)
+#define F_LIPMISS V_LIPMISS(1U)
+
+#define S_LIP0 4
+#define V_LIP0(x) ((x) << S_LIP0)
+#define F_LIP0 V_LIP0(1U)
+
+#define S_MISS 3
+#define V_MISS(x) ((x) << S_MISS)
+#define F_MISS V_MISS(1U)
+
+#define S_ROUTINGHIT 2
+#define V_ROUTINGHIT(x) ((x) << S_ROUTINGHIT)
+#define F_ROUTINGHIT V_ROUTINGHIT(1U)
+
+#define S_ACTIVEHIT 1
+#define V_ACTIVEHIT(x) ((x) << S_ACTIVEHIT)
+#define F_ACTIVEHIT V_ACTIVEHIT(1U)
+
+#define S_SERVERHIT 0
+#define V_SERVERHIT(x) ((x) << S_SERVERHIT)
+#define F_SERVERHIT V_SERVERHIT(1U)
+
+#define A_LE_DB_INT_CAUSE 0x19c3c
+#define A_LE_DB_INT_TID 0x19c40
+
+#define S_INTTID 0
+#define M_INTTID 0xfffffU
+#define V_INTTID(x) ((x) << S_INTTID)
+#define G_INTTID(x) (((x) >> S_INTTID) & M_INTTID)
+
+#define A_LE_DB_INT_PTID 0x19c44
+
+#define S_INTPTID 0
+#define M_INTPTID 0xfffffU
+#define V_INTPTID(x) ((x) << S_INTPTID)
+#define G_INTPTID(x) (((x) >> S_INTPTID) & M_INTPTID)
+
+#define A_LE_DB_INT_INDEX 0x19c48
+
+#define S_INTINDEX 0
+#define M_INTINDEX 0xfffffU
+#define V_INTINDEX(x) ((x) << S_INTINDEX)
+#define G_INTINDEX(x) (((x) >> S_INTINDEX) & M_INTINDEX)
+
+#define A_LE_DB_INT_CMD 0x19c4c
+
+#define S_INTCMD 0
+#define M_INTCMD 0xfU
+#define V_INTCMD(x) ((x) << S_INTCMD)
+#define G_INTCMD(x) (((x) >> S_INTCMD) & M_INTCMD)
+
+#define A_LE_DB_MASK_IPV4 0x19c50
+#define A_LE_DB_MASK_IPV6 0x19ca0
+#define A_LE_DB_REQ_RSP_CNT 0x19ce4
+#define A_LE_DB_DBGI_CONFIG 0x19cf0
+
+#define S_DBGICMDPERR 31
+#define V_DBGICMDPERR(x) ((x) << S_DBGICMDPERR)
+#define F_DBGICMDPERR V_DBGICMDPERR(1U)
+
+#define S_DBGICMDRANGE 22
+#define M_DBGICMDRANGE 0x7U
+#define V_DBGICMDRANGE(x) ((x) << S_DBGICMDRANGE)
+#define G_DBGICMDRANGE(x) (((x) >> S_DBGICMDRANGE) & M_DBGICMDRANGE)
+
+#define S_DBGICMDMSKTYPE 21
+#define V_DBGICMDMSKTYPE(x) ((x) << S_DBGICMDMSKTYPE)
+#define F_DBGICMDMSKTYPE V_DBGICMDMSKTYPE(1U)
+
+#define S_DBGICMDSEARCH 20
+#define V_DBGICMDSEARCH(x) ((x) << S_DBGICMDSEARCH)
+#define F_DBGICMDSEARCH V_DBGICMDSEARCH(1U)
+
+#define S_DBGICMDREAD 19
+#define V_DBGICMDREAD(x) ((x) << S_DBGICMDREAD)
+#define F_DBGICMDREAD V_DBGICMDREAD(1U)
+
+#define S_DBGICMDLEARN 18
+#define V_DBGICMDLEARN(x) ((x) << S_DBGICMDLEARN)
+#define F_DBGICMDLEARN V_DBGICMDLEARN(1U)
+
+#define S_DBGICMDERASE 17
+#define V_DBGICMDERASE(x) ((x) << S_DBGICMDERASE)
+#define F_DBGICMDERASE V_DBGICMDERASE(1U)
+
+#define S_DBGICMDIPV6 16
+#define V_DBGICMDIPV6(x) ((x) << S_DBGICMDIPV6)
+#define F_DBGICMDIPV6 V_DBGICMDIPV6(1U)
+
+#define S_DBGICMDTYPE 13
+#define M_DBGICMDTYPE 0x7U
+#define V_DBGICMDTYPE(x) ((x) << S_DBGICMDTYPE)
+#define G_DBGICMDTYPE(x) (((x) >> S_DBGICMDTYPE) & M_DBGICMDTYPE)
+
+#define S_DBGICMDACKERR 12
+#define V_DBGICMDACKERR(x) ((x) << S_DBGICMDACKERR)
+#define F_DBGICMDACKERR V_DBGICMDACKERR(1U)
+
+#define S_DBGICMDBUSY 3
+#define V_DBGICMDBUSY(x) ((x) << S_DBGICMDBUSY)
+#define F_DBGICMDBUSY V_DBGICMDBUSY(1U)
+
+#define S_DBGICMDSTRT 2
+#define V_DBGICMDSTRT(x) ((x) << S_DBGICMDSTRT)
+#define F_DBGICMDSTRT V_DBGICMDSTRT(1U)
+
+#define S_DBGICMDMODE 0
+#define M_DBGICMDMODE 0x3U
+#define V_DBGICMDMODE(x) ((x) << S_DBGICMDMODE)
+#define G_DBGICMDMODE(x) (((x) >> S_DBGICMDMODE) & M_DBGICMDMODE)
+
+#define A_LE_DB_DBGI_REQ_TCAM_CMD 0x19cf4
+
+#define S_DBGICMD 20
+#define M_DBGICMD 0xfU
+#define V_DBGICMD(x) ((x) << S_DBGICMD)
+#define G_DBGICMD(x) (((x) >> S_DBGICMD) & M_DBGICMD)
+
+#define S_DBGITINDEX 0
+#define M_DBGITINDEX 0xfffffU
+#define V_DBGITINDEX(x) ((x) << S_DBGITINDEX)
+#define G_DBGITINDEX(x) (((x) >> S_DBGITINDEX) & M_DBGITINDEX)
+
+#define A_LE_PERR_ENABLE 0x19cf8
+
+#define S_REQQUEUE 1
+#define V_REQQUEUE(x) ((x) << S_REQQUEUE)
+#define F_REQQUEUE V_REQQUEUE(1U)
+
+#define S_TCAM 0
+#define V_TCAM(x) ((x) << S_TCAM)
+#define F_TCAM V_TCAM(1U)
+
+#define A_LE_SPARE 0x19cfc
+#define A_LE_DB_DBGI_REQ_DATA 0x19d00
+#define A_LE_DB_DBGI_REQ_MASK 0x19d50
+#define A_LE_DB_DBGI_RSP_STATUS 0x19d94
+
+#define S_DBGIRSPINDEX 12
+#define M_DBGIRSPINDEX 0xfffffU
+#define V_DBGIRSPINDEX(x) ((x) << S_DBGIRSPINDEX)
+#define G_DBGIRSPINDEX(x) (((x) >> S_DBGIRSPINDEX) & M_DBGIRSPINDEX)
+
+#define S_DBGIRSPMSG 8
+#define M_DBGIRSPMSG 0xfU
+#define V_DBGIRSPMSG(x) ((x) << S_DBGIRSPMSG)
+#define G_DBGIRSPMSG(x) (((x) >> S_DBGIRSPMSG) & M_DBGIRSPMSG)
+
+#define S_DBGIRSPMSGVLD 7
+#define V_DBGIRSPMSGVLD(x) ((x) << S_DBGIRSPMSGVLD)
+#define F_DBGIRSPMSGVLD V_DBGIRSPMSGVLD(1U)
+
+#define S_DBGIRSPMHIT 2
+#define V_DBGIRSPMHIT(x) ((x) << S_DBGIRSPMHIT)
+#define F_DBGIRSPMHIT V_DBGIRSPMHIT(1U)
+
+#define S_DBGIRSPHIT 1
+#define V_DBGIRSPHIT(x) ((x) << S_DBGIRSPHIT)
+#define F_DBGIRSPHIT V_DBGIRSPHIT(1U)
+
+#define S_DBGIRSPVALID 0
+#define V_DBGIRSPVALID(x) ((x) << S_DBGIRSPVALID)
+#define F_DBGIRSPVALID V_DBGIRSPVALID(1U)
+
+#define A_LE_DB_DBGI_RSP_DATA 0x19da0
+#define A_LE_DB_DBGI_RSP_LAST_CMD 0x19de4
+
+#define S_LASTCMDB 16
+#define M_LASTCMDB 0x7ffU
+#define V_LASTCMDB(x) ((x) << S_LASTCMDB)
+#define G_LASTCMDB(x) (((x) >> S_LASTCMDB) & M_LASTCMDB)
+
+#define S_LASTCMDA 0
+#define M_LASTCMDA 0x7ffU
+#define V_LASTCMDA(x) ((x) << S_LASTCMDA)
+#define G_LASTCMDA(x) (((x) >> S_LASTCMDA) & M_LASTCMDA)
+
+#define A_LE_DB_DROP_FILTER_ENTRY 0x19de8
+
+#define S_DROPFILTEREN 31
+#define V_DROPFILTEREN(x) ((x) << S_DROPFILTEREN)
+#define F_DROPFILTEREN V_DROPFILTEREN(1U)
+
+#define S_DROPFILTERCLEAR 17
+#define V_DROPFILTERCLEAR(x) ((x) << S_DROPFILTERCLEAR)
+#define F_DROPFILTERCLEAR V_DROPFILTERCLEAR(1U)
+
+#define S_DROPFILTERSET 16
+#define V_DROPFILTERSET(x) ((x) << S_DROPFILTERSET)
+#define F_DROPFILTERSET V_DROPFILTERSET(1U)
+
+#define S_DROPFILTERFIDX 0
+#define M_DROPFILTERFIDX 0x1fffU
+#define V_DROPFILTERFIDX(x) ((x) << S_DROPFILTERFIDX)
+#define G_DROPFILTERFIDX(x) (((x) >> S_DROPFILTERFIDX) & M_DROPFILTERFIDX)
+
+#define A_LE_DB_PTID_SVRBASE 0x19df0
+
+#define S_SVRBASE_ADDR 2
+#define M_SVRBASE_ADDR 0x3ffffU
+#define V_SVRBASE_ADDR(x) ((x) << S_SVRBASE_ADDR)
+#define G_SVRBASE_ADDR(x) (((x) >> S_SVRBASE_ADDR) & M_SVRBASE_ADDR)
+
+#define A_LE_DB_FTID_FLTRBASE 0x19df4
+
+#define S_FLTRBASE_ADDR 2
+#define M_FLTRBASE_ADDR 0x3ffffU
+#define V_FLTRBASE_ADDR(x) ((x) << S_FLTRBASE_ADDR)
+#define G_FLTRBASE_ADDR(x) (((x) >> S_FLTRBASE_ADDR) & M_FLTRBASE_ADDR)
+
+#define A_LE_DB_TID_HASHBASE 0x19df8
+
+#define S_HASHBASE_ADDR 2
+#define M_HASHBASE_ADDR 0xfffffU
+#define V_HASHBASE_ADDR(x) ((x) << S_HASHBASE_ADDR)
+#define G_HASHBASE_ADDR(x) (((x) >> S_HASHBASE_ADDR) & M_HASHBASE_ADDR)
+
+#define A_LE_PERR_INJECT 0x19dfc
+
+#define S_LEMEMSEL 1
+#define M_LEMEMSEL 0x7U
+#define V_LEMEMSEL(x) ((x) << S_LEMEMSEL)
+#define G_LEMEMSEL(x) (((x) >> S_LEMEMSEL) & M_LEMEMSEL)
+
+#define A_LE_DB_ACTIVE_MASK_IPV4 0x19e00
+#define A_LE_DB_ACTIVE_MASK_IPV6 0x19e50
+#define A_LE_HASH_MASK_GEN_IPV4 0x19ea0
+#define A_LE_HASH_MASK_GEN_IPV6 0x19eb0
+#define A_LE_HASH_MASK_CMP_IPV4 0x19ee0
+#define A_LE_HASH_MASK_CMP_IPV6 0x19ef0
+#define A_LE_DEBUG_LA_CONFIG 0x19f20
+#define A_LE_REQ_DEBUG_LA_DATA 0x19f24
+#define A_LE_REQ_DEBUG_LA_WRPTR 0x19f28
+#define A_LE_RSP_DEBUG_LA_DATA 0x19f2c
+#define A_LE_RSP_DEBUG_LA_WRPTR 0x19f30
+
+/* registers for module NCSI */
+#define NCSI_BASE_ADDR 0x1a000
+
+#define A_NCSI_PORT_CFGREG 0x1a000
+
+#define S_WIREEN 28
+#define M_WIREEN 0xfU
+#define V_WIREEN(x) ((x) << S_WIREEN)
+#define G_WIREEN(x) (((x) >> S_WIREEN) & M_WIREEN)
+
+#define S_STRP_CRC 24
+#define M_STRP_CRC 0xfU
+#define V_STRP_CRC(x) ((x) << S_STRP_CRC)
+#define G_STRP_CRC(x) (((x) >> S_STRP_CRC) & M_STRP_CRC)
+
+#define S_RX_HALT 22
+#define V_RX_HALT(x) ((x) << S_RX_HALT)
+#define F_RX_HALT V_RX_HALT(1U)
+
+#define S_FLUSH_RX_FIFO 21
+#define V_FLUSH_RX_FIFO(x) ((x) << S_FLUSH_RX_FIFO)
+#define F_FLUSH_RX_FIFO V_FLUSH_RX_FIFO(1U)
+
+#define S_HW_ARB_EN 20
+#define V_HW_ARB_EN(x) ((x) << S_HW_ARB_EN)
+#define F_HW_ARB_EN V_HW_ARB_EN(1U)
+
+#define S_SOFT_PKG_SEL 19
+#define V_SOFT_PKG_SEL(x) ((x) << S_SOFT_PKG_SEL)
+#define F_SOFT_PKG_SEL V_SOFT_PKG_SEL(1U)
+
+#define S_ERR_DISCARD_EN 18
+#define V_ERR_DISCARD_EN(x) ((x) << S_ERR_DISCARD_EN)
+#define F_ERR_DISCARD_EN V_ERR_DISCARD_EN(1U)
+
+#define S_MAX_PKT_SIZE 4
+#define M_MAX_PKT_SIZE 0x3fffU
+#define V_MAX_PKT_SIZE(x) ((x) << S_MAX_PKT_SIZE)
+#define G_MAX_PKT_SIZE(x) (((x) >> S_MAX_PKT_SIZE) & M_MAX_PKT_SIZE)
+
+#define S_RX_BYTE_SWAP 3
+#define V_RX_BYTE_SWAP(x) ((x) << S_RX_BYTE_SWAP)
+#define F_RX_BYTE_SWAP V_RX_BYTE_SWAP(1U)
+
+#define S_TX_BYTE_SWAP 2
+#define V_TX_BYTE_SWAP(x) ((x) << S_TX_BYTE_SWAP)
+#define F_TX_BYTE_SWAP V_TX_BYTE_SWAP(1U)
+
+#define A_NCSI_RST_CTRL 0x1a004
+
+#define S_MAC_REF_RST 2
+#define V_MAC_REF_RST(x) ((x) << S_MAC_REF_RST)
+#define F_MAC_REF_RST V_MAC_REF_RST(1U)
+
+#define S_MAC_RX_RST 1
+#define V_MAC_RX_RST(x) ((x) << S_MAC_RX_RST)
+#define F_MAC_RX_RST V_MAC_RX_RST(1U)
+
+#define S_MAC_TX_RST 0
+#define V_MAC_TX_RST(x) ((x) << S_MAC_TX_RST)
+#define F_MAC_TX_RST V_MAC_TX_RST(1U)
+
+#define A_NCSI_CH0_SADDR_LOW 0x1a010
+#define A_NCSI_CH0_SADDR_HIGH 0x1a014
+
+#define S_CHO_SADDR_EN 31
+#define V_CHO_SADDR_EN(x) ((x) << S_CHO_SADDR_EN)
+#define F_CHO_SADDR_EN V_CHO_SADDR_EN(1U)
+
+#define S_CH0_SADDR_HIGH 0
+#define M_CH0_SADDR_HIGH 0xffffU
+#define V_CH0_SADDR_HIGH(x) ((x) << S_CH0_SADDR_HIGH)
+#define G_CH0_SADDR_HIGH(x) (((x) >> S_CH0_SADDR_HIGH) & M_CH0_SADDR_HIGH)
+
+#define A_NCSI_CH1_SADDR_LOW 0x1a018
+#define A_NCSI_CH1_SADDR_HIGH 0x1a01c
+
+#define S_CH1_SADDR_EN 31
+#define V_CH1_SADDR_EN(x) ((x) << S_CH1_SADDR_EN)
+#define F_CH1_SADDR_EN V_CH1_SADDR_EN(1U)
+
+#define S_CH1_SADDR_HIGH 0
+#define M_CH1_SADDR_HIGH 0xffffU
+#define V_CH1_SADDR_HIGH(x) ((x) << S_CH1_SADDR_HIGH)
+#define G_CH1_SADDR_HIGH(x) (((x) >> S_CH1_SADDR_HIGH) & M_CH1_SADDR_HIGH)
+
+#define A_NCSI_CH2_SADDR_LOW 0x1a020
+#define A_NCSI_CH2_SADDR_HIGH 0x1a024
+
+#define S_CH2_SADDR_EN 31
+#define V_CH2_SADDR_EN(x) ((x) << S_CH2_SADDR_EN)
+#define F_CH2_SADDR_EN V_CH2_SADDR_EN(1U)
+
+#define S_CH2_SADDR_HIGH 0
+#define M_CH2_SADDR_HIGH 0xffffU
+#define V_CH2_SADDR_HIGH(x) ((x) << S_CH2_SADDR_HIGH)
+#define G_CH2_SADDR_HIGH(x) (((x) >> S_CH2_SADDR_HIGH) & M_CH2_SADDR_HIGH)
+
+#define A_NCSI_CH3_SADDR_LOW 0x1a028
+#define A_NCSI_CH3_SADDR_HIGH 0x1a02c
+
+#define S_CH3_SADDR_EN 31
+#define V_CH3_SADDR_EN(x) ((x) << S_CH3_SADDR_EN)
+#define F_CH3_SADDR_EN V_CH3_SADDR_EN(1U)
+
+#define S_CH3_SADDR_HIGH 0
+#define M_CH3_SADDR_HIGH 0xffffU
+#define V_CH3_SADDR_HIGH(x) ((x) << S_CH3_SADDR_HIGH)
+#define G_CH3_SADDR_HIGH(x) (((x) >> S_CH3_SADDR_HIGH) & M_CH3_SADDR_HIGH)
+
+#define A_NCSI_WORK_REQHDR_0 0x1a030
+#define A_NCSI_WORK_REQHDR_1 0x1a034
+#define A_NCSI_WORK_REQHDR_2 0x1a038
+#define A_NCSI_WORK_REQHDR_3 0x1a03c
+#define A_NCSI_MPS_HDR_LO 0x1a040
+#define A_NCSI_MPS_HDR_HI 0x1a044
+#define A_NCSI_CTL 0x1a048
+
+#define S_STRIP_OVLAN 3
+#define V_STRIP_OVLAN(x) ((x) << S_STRIP_OVLAN)
+#define F_STRIP_OVLAN V_STRIP_OVLAN(1U)
+
+#define S_BMC_DROP_NON_BC 2
+#define V_BMC_DROP_NON_BC(x) ((x) << S_BMC_DROP_NON_BC)
+#define F_BMC_DROP_NON_BC V_BMC_DROP_NON_BC(1U)
+
+#define S_BMC_RX_FWD_ALL 1
+#define V_BMC_RX_FWD_ALL(x) ((x) << S_BMC_RX_FWD_ALL)
+#define F_BMC_RX_FWD_ALL V_BMC_RX_FWD_ALL(1U)
+
+#define S_FWD_BMC 0
+#define V_FWD_BMC(x) ((x) << S_FWD_BMC)
+#define F_FWD_BMC V_FWD_BMC(1U)
+
+#define A_NCSI_NCSI_ETYPE 0x1a04c
+
+#define S_NCSI_ETHERTYPE 0
+#define M_NCSI_ETHERTYPE 0xffffU
+#define V_NCSI_ETHERTYPE(x) ((x) << S_NCSI_ETHERTYPE)
+#define G_NCSI_ETHERTYPE(x) (((x) >> S_NCSI_ETHERTYPE) & M_NCSI_ETHERTYPE)
+
+#define A_NCSI_RX_FIFO_CNT 0x1a050
+
+#define S_NCSI_RXFIFO_CNT 0
+#define M_NCSI_RXFIFO_CNT 0x7ffU
+#define V_NCSI_RXFIFO_CNT(x) ((x) << S_NCSI_RXFIFO_CNT)
+#define G_NCSI_RXFIFO_CNT(x) (((x) >> S_NCSI_RXFIFO_CNT) & M_NCSI_RXFIFO_CNT)
+
+#define A_NCSI_RX_ERR_CNT 0x1a054
+#define A_NCSI_RX_OF_CNT 0x1a058
+#define A_NCSI_RX_MS_CNT 0x1a05c
+#define A_NCSI_RX_IE_CNT 0x1a060
+#define A_NCSI_MPS_DEMUX_CNT 0x1a064
+
+#define S_MPS2CIM_CNT 16
+#define M_MPS2CIM_CNT 0x1ffU
+#define V_MPS2CIM_CNT(x) ((x) << S_MPS2CIM_CNT)
+#define G_MPS2CIM_CNT(x) (((x) >> S_MPS2CIM_CNT) & M_MPS2CIM_CNT)
+
+#define S_MPS2BMC_CNT 0
+#define M_MPS2BMC_CNT 0x1ffU
+#define V_MPS2BMC_CNT(x) ((x) << S_MPS2BMC_CNT)
+#define G_MPS2BMC_CNT(x) (((x) >> S_MPS2BMC_CNT) & M_MPS2BMC_CNT)
+
+#define A_NCSI_CIM_DEMUX_CNT 0x1a068
+
+#define S_CIM2MPS_CNT 16
+#define M_CIM2MPS_CNT 0x1ffU
+#define V_CIM2MPS_CNT(x) ((x) << S_CIM2MPS_CNT)
+#define G_CIM2MPS_CNT(x) (((x) >> S_CIM2MPS_CNT) & M_CIM2MPS_CNT)
+
+#define S_CIM2BMC_CNT 0
+#define M_CIM2BMC_CNT 0x1ffU
+#define V_CIM2BMC_CNT(x) ((x) << S_CIM2BMC_CNT)
+#define G_CIM2BMC_CNT(x) (((x) >> S_CIM2BMC_CNT) & M_CIM2BMC_CNT)
+
+#define A_NCSI_TX_FIFO_CNT 0x1a06c
+
+#define S_TX_FIFO_CNT 0
+#define M_TX_FIFO_CNT 0x3ffU
+#define V_TX_FIFO_CNT(x) ((x) << S_TX_FIFO_CNT)
+#define G_TX_FIFO_CNT(x) (((x) >> S_TX_FIFO_CNT) & M_TX_FIFO_CNT)
+
+#define A_NCSI_SE_CNT_CTL 0x1a0b0
+
+#define S_SE_CNT_CLR 0
+#define M_SE_CNT_CLR 0xfU
+#define V_SE_CNT_CLR(x) ((x) << S_SE_CNT_CLR)
+#define G_SE_CNT_CLR(x) (((x) >> S_SE_CNT_CLR) & M_SE_CNT_CLR)
+
+#define A_NCSI_SE_CNT_MPS 0x1a0b4
+
+#define S_NC2MPS_SOP_CNT 24
+#define M_NC2MPS_SOP_CNT 0xffU
+#define V_NC2MPS_SOP_CNT(x) ((x) << S_NC2MPS_SOP_CNT)
+#define G_NC2MPS_SOP_CNT(x) (((x) >> S_NC2MPS_SOP_CNT) & M_NC2MPS_SOP_CNT)
+
+#define S_NC2MPS_EOP_CNT 16
+#define M_NC2MPS_EOP_CNT 0x3fU
+#define V_NC2MPS_EOP_CNT(x) ((x) << S_NC2MPS_EOP_CNT)
+#define G_NC2MPS_EOP_CNT(x) (((x) >> S_NC2MPS_EOP_CNT) & M_NC2MPS_EOP_CNT)
+
+#define S_MPS2NC_SOP_CNT 8
+#define M_MPS2NC_SOP_CNT 0xffU
+#define V_MPS2NC_SOP_CNT(x) ((x) << S_MPS2NC_SOP_CNT)
+#define G_MPS2NC_SOP_CNT(x) (((x) >> S_MPS2NC_SOP_CNT) & M_MPS2NC_SOP_CNT)
+
+#define S_MPS2NC_EOP_CNT 0
+#define M_MPS2NC_EOP_CNT 0xffU
+#define V_MPS2NC_EOP_CNT(x) ((x) << S_MPS2NC_EOP_CNT)
+#define G_MPS2NC_EOP_CNT(x) (((x) >> S_MPS2NC_EOP_CNT) & M_MPS2NC_EOP_CNT)
+
+#define A_NCSI_SE_CNT_CIM 0x1a0b8
+
+#define S_NC2CIM_SOP_CNT 24
+#define M_NC2CIM_SOP_CNT 0xffU
+#define V_NC2CIM_SOP_CNT(x) ((x) << S_NC2CIM_SOP_CNT)
+#define G_NC2CIM_SOP_CNT(x) (((x) >> S_NC2CIM_SOP_CNT) & M_NC2CIM_SOP_CNT)
+
+#define S_NC2CIM_EOP_CNT 16
+#define M_NC2CIM_EOP_CNT 0x3fU
+#define V_NC2CIM_EOP_CNT(x) ((x) << S_NC2CIM_EOP_CNT)
+#define G_NC2CIM_EOP_CNT(x) (((x) >> S_NC2CIM_EOP_CNT) & M_NC2CIM_EOP_CNT)
+
+#define S_CIM2NC_SOP_CNT 8
+#define M_CIM2NC_SOP_CNT 0xffU
+#define V_CIM2NC_SOP_CNT(x) ((x) << S_CIM2NC_SOP_CNT)
+#define G_CIM2NC_SOP_CNT(x) (((x) >> S_CIM2NC_SOP_CNT) & M_CIM2NC_SOP_CNT)
+
+#define S_CIM2NC_EOP_CNT 0
+#define M_CIM2NC_EOP_CNT 0xffU
+#define V_CIM2NC_EOP_CNT(x) ((x) << S_CIM2NC_EOP_CNT)
+#define G_CIM2NC_EOP_CNT(x) (((x) >> S_CIM2NC_EOP_CNT) & M_CIM2NC_EOP_CNT)
+
+#define A_NCSI_BUS_DEBUG 0x1a0bc
+
+#define S_SOP_CNT_ERR 12
+#define M_SOP_CNT_ERR 0xfU
+#define V_SOP_CNT_ERR(x) ((x) << S_SOP_CNT_ERR)
+#define G_SOP_CNT_ERR(x) (((x) >> S_SOP_CNT_ERR) & M_SOP_CNT_ERR)
+
+#define S_BUS_STATE_MPS_OUT 6
+#define M_BUS_STATE_MPS_OUT 0x3U
+#define V_BUS_STATE_MPS_OUT(x) ((x) << S_BUS_STATE_MPS_OUT)
+#define G_BUS_STATE_MPS_OUT(x) (((x) >> S_BUS_STATE_MPS_OUT) & M_BUS_STATE_MPS_OUT)
+
+#define S_BUS_STATE_MPS_IN 4
+#define M_BUS_STATE_MPS_IN 0x3U
+#define V_BUS_STATE_MPS_IN(x) ((x) << S_BUS_STATE_MPS_IN)
+#define G_BUS_STATE_MPS_IN(x) (((x) >> S_BUS_STATE_MPS_IN) & M_BUS_STATE_MPS_IN)
+
+#define S_BUS_STATE_CIM_OUT 2
+#define M_BUS_STATE_CIM_OUT 0x3U
+#define V_BUS_STATE_CIM_OUT(x) ((x) << S_BUS_STATE_CIM_OUT)
+#define G_BUS_STATE_CIM_OUT(x) (((x) >> S_BUS_STATE_CIM_OUT) & M_BUS_STATE_CIM_OUT)
+
+#define S_BUS_STATE_CIM_IN 0
+#define M_BUS_STATE_CIM_IN 0x3U
+#define V_BUS_STATE_CIM_IN(x) ((x) << S_BUS_STATE_CIM_IN)
+#define G_BUS_STATE_CIM_IN(x) (((x) >> S_BUS_STATE_CIM_IN) & M_BUS_STATE_CIM_IN)
+
+#define A_NCSI_LA_RDPTR 0x1a0c0
+#define A_NCSI_LA_RDDATA 0x1a0c4
+#define A_NCSI_LA_WRPTR 0x1a0c8
+#define A_NCSI_LA_RESERVED 0x1a0cc
+#define A_NCSI_LA_CTL 0x1a0d0
+#define A_NCSI_INT_ENABLE 0x1a0d4
+
+#define S_CIM_DM_PRTY_ERR 8
+#define V_CIM_DM_PRTY_ERR(x) ((x) << S_CIM_DM_PRTY_ERR)
+#define F_CIM_DM_PRTY_ERR V_CIM_DM_PRTY_ERR(1U)
+
+#define S_MPS_DM_PRTY_ERR 7
+#define V_MPS_DM_PRTY_ERR(x) ((x) << S_MPS_DM_PRTY_ERR)
+#define F_MPS_DM_PRTY_ERR V_MPS_DM_PRTY_ERR(1U)
+
+#define S_TOKEN 6
+#define V_TOKEN(x) ((x) << S_TOKEN)
+#define F_TOKEN V_TOKEN(1U)
+
+#define S_ARB_DONE 5
+#define V_ARB_DONE(x) ((x) << S_ARB_DONE)
+#define F_ARB_DONE V_ARB_DONE(1U)
+
+#define S_ARB_STARTED 4
+#define V_ARB_STARTED(x) ((x) << S_ARB_STARTED)
+#define F_ARB_STARTED V_ARB_STARTED(1U)
+
+#define S_WOL 3
+#define V_WOL(x) ((x) << S_WOL)
+#define F_WOL V_WOL(1U)
+
+#define S_MACINT 2
+#define V_MACINT(x) ((x) << S_MACINT)
+#define F_MACINT V_MACINT(1U)
+
+#define S_TXFIFO_PRTY_ERR 1
+#define V_TXFIFO_PRTY_ERR(x) ((x) << S_TXFIFO_PRTY_ERR)
+#define F_TXFIFO_PRTY_ERR V_TXFIFO_PRTY_ERR(1U)
+
+#define S_RXFIFO_PRTY_ERR 0
+#define V_RXFIFO_PRTY_ERR(x) ((x) << S_RXFIFO_PRTY_ERR)
+#define F_RXFIFO_PRTY_ERR V_RXFIFO_PRTY_ERR(1U)
+
+#define A_NCSI_INT_CAUSE 0x1a0d8
+#define A_NCSI_STATUS 0x1a0dc
+
+#define S_MASTER 1
+#define V_MASTER(x) ((x) << S_MASTER)
+#define F_MASTER V_MASTER(1U)
+
+#define S_ARB_STATUS 0
+#define V_ARB_STATUS(x) ((x) << S_ARB_STATUS)
+#define F_ARB_STATUS V_ARB_STATUS(1U)
+
+#define A_NCSI_PAUSE_CTRL 0x1a0e0
+
+#define S_FORCEPAUSE 0
+#define V_FORCEPAUSE(x) ((x) << S_FORCEPAUSE)
+#define F_FORCEPAUSE V_FORCEPAUSE(1U)
+
+#define A_NCSI_PAUSE_TIMEOUT 0x1a0e4
+#define A_NCSI_PAUSE_WM 0x1a0ec
+
+#define S_PAUSEHWM 16
+#define M_PAUSEHWM 0x7ffU
+#define V_PAUSEHWM(x) ((x) << S_PAUSEHWM)
+#define G_PAUSEHWM(x) (((x) >> S_PAUSEHWM) & M_PAUSEHWM)
+
+#define S_PAUSELWM 0
+#define M_PAUSELWM 0x7ffU
+#define V_PAUSELWM(x) ((x) << S_PAUSELWM)
+#define G_PAUSELWM(x) (((x) >> S_PAUSELWM) & M_PAUSELWM)
+
+#define A_NCSI_DEBUG 0x1a0f0
+
+#define S_DEBUGSEL 0
+#define M_DEBUGSEL 0x3fU
+#define V_DEBUGSEL(x) ((x) << S_DEBUGSEL)
+#define G_DEBUGSEL(x) (((x) >> S_DEBUGSEL) & M_DEBUGSEL)
+
+#define A_NCSI_PERR_INJECT 0x1a0f4
+
+#define S_MCSIMELSEL 1
+#define V_MCSIMELSEL(x) ((x) << S_MCSIMELSEL)
+#define F_MCSIMELSEL V_MCSIMELSEL(1U)
+
+#define A_NCSI_MACB_NETWORK_CTRL 0x1a100
+
+#define S_TXSNDZEROPAUSE 12
+#define V_TXSNDZEROPAUSE(x) ((x) << S_TXSNDZEROPAUSE)
+#define F_TXSNDZEROPAUSE V_TXSNDZEROPAUSE(1U)
+
+#define S_TXSNDPAUSE 11
+#define V_TXSNDPAUSE(x) ((x) << S_TXSNDPAUSE)
+#define F_TXSNDPAUSE V_TXSNDPAUSE(1U)
+
+#define S_TXSTOP 10
+#define V_TXSTOP(x) ((x) << S_TXSTOP)
+#define F_TXSTOP V_TXSTOP(1U)
+
+#define S_TXSTART 9
+#define V_TXSTART(x) ((x) << S_TXSTART)
+#define F_TXSTART V_TXSTART(1U)
+
+#define S_BACKPRESS 8
+#define V_BACKPRESS(x) ((x) << S_BACKPRESS)
+#define F_BACKPRESS V_BACKPRESS(1U)
+
+#define S_STATWREN 7
+#define V_STATWREN(x) ((x) << S_STATWREN)
+#define F_STATWREN V_STATWREN(1U)
+
+#define S_INCRSTAT 6
+#define V_INCRSTAT(x) ((x) << S_INCRSTAT)
+#define F_INCRSTAT V_INCRSTAT(1U)
+
+#define S_CLEARSTAT 5
+#define V_CLEARSTAT(x) ((x) << S_CLEARSTAT)
+#define F_CLEARSTAT V_CLEARSTAT(1U)
+
+#define S_ENMGMTPORT 4
+#define V_ENMGMTPORT(x) ((x) << S_ENMGMTPORT)
+#define F_ENMGMTPORT V_ENMGMTPORT(1U)
+
+#define S_NCSITXEN 3
+#define V_NCSITXEN(x) ((x) << S_NCSITXEN)
+#define F_NCSITXEN V_NCSITXEN(1U)
+
+#define S_NCSIRXEN 2
+#define V_NCSIRXEN(x) ((x) << S_NCSIRXEN)
+#define F_NCSIRXEN V_NCSIRXEN(1U)
+
+#define S_LOOPLOCAL 1
+#define V_LOOPLOCAL(x) ((x) << S_LOOPLOCAL)
+#define F_LOOPLOCAL V_LOOPLOCAL(1U)
+
+#define S_LOOPPHY 0
+#define V_LOOPPHY(x) ((x) << S_LOOPPHY)
+#define F_LOOPPHY V_LOOPPHY(1U)
+
+#define A_NCSI_MACB_NETWORK_CFG 0x1a104
+
+#define S_PCLKDIV128 22
+#define V_PCLKDIV128(x) ((x) << S_PCLKDIV128)
+#define F_PCLKDIV128 V_PCLKDIV128(1U)
+
+#define S_COPYPAUSE 21
+#define V_COPYPAUSE(x) ((x) << S_COPYPAUSE)
+#define F_COPYPAUSE V_COPYPAUSE(1U)
+
+#define S_NONSTDPREOK 20
+#define V_NONSTDPREOK(x) ((x) << S_NONSTDPREOK)
+#define F_NONSTDPREOK V_NONSTDPREOK(1U)
+
+#define S_NOFCS 19
+#define V_NOFCS(x) ((x) << S_NOFCS)
+#define F_NOFCS V_NOFCS(1U)
+
+#define S_RXENHALFDUP 18
+#define V_RXENHALFDUP(x) ((x) << S_RXENHALFDUP)
+#define F_RXENHALFDUP V_RXENHALFDUP(1U)
+
+#define S_NOCOPYFCS 17
+#define V_NOCOPYFCS(x) ((x) << S_NOCOPYFCS)
+#define F_NOCOPYFCS V_NOCOPYFCS(1U)
+
+#define S_LENCHKEN 16
+#define V_LENCHKEN(x) ((x) << S_LENCHKEN)
+#define F_LENCHKEN V_LENCHKEN(1U)
+
+#define S_RXBUFOFFSET 14
+#define M_RXBUFOFFSET 0x3U
+#define V_RXBUFOFFSET(x) ((x) << S_RXBUFOFFSET)
+#define G_RXBUFOFFSET(x) (((x) >> S_RXBUFOFFSET) & M_RXBUFOFFSET)
+
+#define S_PAUSEEN 13
+#define V_PAUSEEN(x) ((x) << S_PAUSEEN)
+#define F_PAUSEEN V_PAUSEEN(1U)
+
+#define S_RETRYTEST 12
+#define V_RETRYTEST(x) ((x) << S_RETRYTEST)
+#define F_RETRYTEST V_RETRYTEST(1U)
+
+#define S_PCLKDIV 10
+#define M_PCLKDIV 0x3U
+#define V_PCLKDIV(x) ((x) << S_PCLKDIV)
+#define G_PCLKDIV(x) (((x) >> S_PCLKDIV) & M_PCLKDIV)
+
+#define S_EXTCLASS 9
+#define V_EXTCLASS(x) ((x) << S_EXTCLASS)
+#define F_EXTCLASS V_EXTCLASS(1U)
+
+#define S_EN1536FRAME 8
+#define V_EN1536FRAME(x) ((x) << S_EN1536FRAME)
+#define F_EN1536FRAME V_EN1536FRAME(1U)
+
+#define S_UCASTHASHEN 7
+#define V_UCASTHASHEN(x) ((x) << S_UCASTHASHEN)
+#define F_UCASTHASHEN V_UCASTHASHEN(1U)
+
+#define S_MCASTHASHEN 6
+#define V_MCASTHASHEN(x) ((x) << S_MCASTHASHEN)
+#define F_MCASTHASHEN V_MCASTHASHEN(1U)
+
+#define S_RXBCASTDIS 5
+#define V_RXBCASTDIS(x) ((x) << S_RXBCASTDIS)
+#define F_RXBCASTDIS V_RXBCASTDIS(1U)
+
+#define S_NCSICOPYALLFRAMES 4
+#define V_NCSICOPYALLFRAMES(x) ((x) << S_NCSICOPYALLFRAMES)
+#define F_NCSICOPYALLFRAMES V_NCSICOPYALLFRAMES(1U)
+
+#define S_JUMBOEN 3
+#define V_JUMBOEN(x) ((x) << S_JUMBOEN)
+#define F_JUMBOEN V_JUMBOEN(1U)
+
+#define S_SEREN 2
+#define V_SEREN(x) ((x) << S_SEREN)
+#define F_SEREN V_SEREN(1U)
+
+#define S_FULLDUPLEX 1
+#define V_FULLDUPLEX(x) ((x) << S_FULLDUPLEX)
+#define F_FULLDUPLEX V_FULLDUPLEX(1U)
+
+#define S_SPEED 0
+#define V_SPEED(x) ((x) << S_SPEED)
+#define F_SPEED V_SPEED(1U)
+
+#define A_NCSI_MACB_NETWORK_STATUS 0x1a108
+
+#define S_PHYMGMTSTATUS 2
+#define V_PHYMGMTSTATUS(x) ((x) << S_PHYMGMTSTATUS)
+#define F_PHYMGMTSTATUS V_PHYMGMTSTATUS(1U)
+
+#define S_MDISTATUS 1
+#define V_MDISTATUS(x) ((x) << S_MDISTATUS)
+#define F_MDISTATUS V_MDISTATUS(1U)
+
+#define S_LINKSTATUS 0
+#define V_LINKSTATUS(x) ((x) << S_LINKSTATUS)
+#define F_LINKSTATUS V_LINKSTATUS(1U)
+
+#define A_NCSI_MACB_TX_STATUS 0x1a114
+
+#define S_UNDERRUNERR 6
+#define V_UNDERRUNERR(x) ((x) << S_UNDERRUNERR)
+#define F_UNDERRUNERR V_UNDERRUNERR(1U)
+
+#define S_TXCOMPLETE 5
+#define V_TXCOMPLETE(x) ((x) << S_TXCOMPLETE)
+#define F_TXCOMPLETE V_TXCOMPLETE(1U)
+
+#define S_BUFFEREXHAUSTED 4
+#define V_BUFFEREXHAUSTED(x) ((x) << S_BUFFEREXHAUSTED)
+#define F_BUFFEREXHAUSTED V_BUFFEREXHAUSTED(1U)
+
+#define S_TXPROGRESS 3
+#define V_TXPROGRESS(x) ((x) << S_TXPROGRESS)
+#define F_TXPROGRESS V_TXPROGRESS(1U)
+
+#define S_RETRYLIMIT 2
+#define V_RETRYLIMIT(x) ((x) << S_RETRYLIMIT)
+#define F_RETRYLIMIT V_RETRYLIMIT(1U)
+
+#define S_COLEVENT 1
+#define V_COLEVENT(x) ((x) << S_COLEVENT)
+#define F_COLEVENT V_COLEVENT(1U)
+
+#define S_USEDBITREAD 0
+#define V_USEDBITREAD(x) ((x) << S_USEDBITREAD)
+#define F_USEDBITREAD V_USEDBITREAD(1U)
+
+#define A_NCSI_MACB_RX_BUF_QPTR 0x1a118
+
+#define S_RXBUFQPTR 2
+#define M_RXBUFQPTR 0x3fffffffU
+#define V_RXBUFQPTR(x) ((x) << S_RXBUFQPTR)
+#define G_RXBUFQPTR(x) (((x) >> S_RXBUFQPTR) & M_RXBUFQPTR)
+
+#define A_NCSI_MACB_TX_BUF_QPTR 0x1a11c
+
+#define S_TXBUFQPTR 2
+#define M_TXBUFQPTR 0x3fffffffU
+#define V_TXBUFQPTR(x) ((x) << S_TXBUFQPTR)
+#define G_TXBUFQPTR(x) (((x) >> S_TXBUFQPTR) & M_TXBUFQPTR)
+
+#define A_NCSI_MACB_RX_STATUS 0x1a120
+
+#define S_RXOVERRUNERR 2
+#define V_RXOVERRUNERR(x) ((x) << S_RXOVERRUNERR)
+#define F_RXOVERRUNERR V_RXOVERRUNERR(1U)
+
+#define S_MACB_FRAMERCVD 1
+#define V_MACB_FRAMERCVD(x) ((x) << S_MACB_FRAMERCVD)
+#define F_MACB_FRAMERCVD V_MACB_FRAMERCVD(1U)
+
+#define S_NORXBUF 0
+#define V_NORXBUF(x) ((x) << S_NORXBUF)
+#define F_NORXBUF V_NORXBUF(1U)
+
+#define A_NCSI_MACB_INT_STATUS 0x1a124
+
+#define S_PAUSETIMEZERO 13
+#define V_PAUSETIMEZERO(x) ((x) << S_PAUSETIMEZERO)
+#define F_PAUSETIMEZERO V_PAUSETIMEZERO(1U)
+
+#define S_PAUSERCVD 12
+#define V_PAUSERCVD(x) ((x) << S_PAUSERCVD)
+#define F_PAUSERCVD V_PAUSERCVD(1U)
+
+#define S_HRESPNOTOK 11
+#define V_HRESPNOTOK(x) ((x) << S_HRESPNOTOK)
+#define F_HRESPNOTOK V_HRESPNOTOK(1U)
+
+#define S_RXOVERRUN 10
+#define V_RXOVERRUN(x) ((x) << S_RXOVERRUN)
+#define F_RXOVERRUN V_RXOVERRUN(1U)
+
+#define S_LINKCHANGE 9
+#define V_LINKCHANGE(x) ((x) << S_LINKCHANGE)
+#define F_LINKCHANGE V_LINKCHANGE(1U)
+
+#define S_INT_TXCOMPLETE 7
+#define V_INT_TXCOMPLETE(x) ((x) << S_INT_TXCOMPLETE)
+#define F_INT_TXCOMPLETE V_INT_TXCOMPLETE(1U)
+
+#define S_TXBUFERR 6
+#define V_TXBUFERR(x) ((x) << S_TXBUFERR)
+#define F_TXBUFERR V_TXBUFERR(1U)
+
+#define S_RETRYLIMITERR 5
+#define V_RETRYLIMITERR(x) ((x) << S_RETRYLIMITERR)
+#define F_RETRYLIMITERR V_RETRYLIMITERR(1U)
+
+#define S_TXBUFUNDERRUN 4
+#define V_TXBUFUNDERRUN(x) ((x) << S_TXBUFUNDERRUN)
+#define F_TXBUFUNDERRUN V_TXBUFUNDERRUN(1U)
+
+#define S_TXUSEDBITREAD 3
+#define V_TXUSEDBITREAD(x) ((x) << S_TXUSEDBITREAD)
+#define F_TXUSEDBITREAD V_TXUSEDBITREAD(1U)
+
+#define S_RXUSEDBITREAD 2
+#define V_RXUSEDBITREAD(x) ((x) << S_RXUSEDBITREAD)
+#define F_RXUSEDBITREAD V_RXUSEDBITREAD(1U)
+
+#define S_RXCOMPLETE 1
+#define V_RXCOMPLETE(x) ((x) << S_RXCOMPLETE)
+#define F_RXCOMPLETE V_RXCOMPLETE(1U)
+
+#define S_MGMTFRAMESENT 0
+#define V_MGMTFRAMESENT(x) ((x) << S_MGMTFRAMESENT)
+#define F_MGMTFRAMESENT V_MGMTFRAMESENT(1U)
+
+#define A_NCSI_MACB_INT_EN 0x1a128
+#define A_NCSI_MACB_INT_DIS 0x1a12c
+#define A_NCSI_MACB_INT_MASK 0x1a130
+#define A_NCSI_MACB_PAUSE_TIME 0x1a138
+
+#define S_PAUSETIME 0
+#define M_PAUSETIME 0xffffU
+#define V_PAUSETIME(x) ((x) << S_PAUSETIME)
+#define G_PAUSETIME(x) (((x) >> S_PAUSETIME) & M_PAUSETIME)
+
+#define A_NCSI_MACB_PAUSE_FRAMES_RCVD 0x1a13c
+
+#define S_PAUSEFRRCVD 0
+#define M_PAUSEFRRCVD 0xffffU
+#define V_PAUSEFRRCVD(x) ((x) << S_PAUSEFRRCVD)
+#define G_PAUSEFRRCVD(x) (((x) >> S_PAUSEFRRCVD) & M_PAUSEFRRCVD)
+
+#define A_NCSI_MACB_TX_FRAMES_OK 0x1a140
+
+#define S_TXFRAMESOK 0
+#define M_TXFRAMESOK 0xffffffU
+#define V_TXFRAMESOK(x) ((x) << S_TXFRAMESOK)
+#define G_TXFRAMESOK(x) (((x) >> S_TXFRAMESOK) & M_TXFRAMESOK)
+
+#define A_NCSI_MACB_SINGLE_COL_FRAMES 0x1a144
+
+#define S_SINGLECOLTXFRAMES 0
+#define M_SINGLECOLTXFRAMES 0xffffU
+#define V_SINGLECOLTXFRAMES(x) ((x) << S_SINGLECOLTXFRAMES)
+#define G_SINGLECOLTXFRAMES(x) (((x) >> S_SINGLECOLTXFRAMES) & M_SINGLECOLTXFRAMES)
+
+#define A_NCSI_MACB_MUL_COL_FRAMES 0x1a148
+
+#define S_MULCOLTXFRAMES 0
+#define M_MULCOLTXFRAMES 0xffffU
+#define V_MULCOLTXFRAMES(x) ((x) << S_MULCOLTXFRAMES)
+#define G_MULCOLTXFRAMES(x) (((x) >> S_MULCOLTXFRAMES) & M_MULCOLTXFRAMES)
+
+#define A_NCSI_MACB_RX_FRAMES_OK 0x1a14c
+
+#define S_RXFRAMESOK 0
+#define M_RXFRAMESOK 0xffffffU
+#define V_RXFRAMESOK(x) ((x) << S_RXFRAMESOK)
+#define G_RXFRAMESOK(x) (((x) >> S_RXFRAMESOK) & M_RXFRAMESOK)
+
+#define A_NCSI_MACB_FCS_ERR 0x1a150
+
+#define S_RXFCSERR 0
+#define M_RXFCSERR 0xffU
+#define V_RXFCSERR(x) ((x) << S_RXFCSERR)
+#define G_RXFCSERR(x) (((x) >> S_RXFCSERR) & M_RXFCSERR)
+
+#define A_NCSI_MACB_ALIGN_ERR 0x1a154
+
+#define S_RXALIGNERR 0
+#define M_RXALIGNERR 0xffU
+#define V_RXALIGNERR(x) ((x) << S_RXALIGNERR)
+#define G_RXALIGNERR(x) (((x) >> S_RXALIGNERR) & M_RXALIGNERR)
+
+#define A_NCSI_MACB_DEF_TX_FRAMES 0x1a158
+
+#define S_TXDEFERREDFRAMES 0
+#define M_TXDEFERREDFRAMES 0xffffU
+#define V_TXDEFERREDFRAMES(x) ((x) << S_TXDEFERREDFRAMES)
+#define G_TXDEFERREDFRAMES(x) (((x) >> S_TXDEFERREDFRAMES) & M_TXDEFERREDFRAMES)
+
+#define A_NCSI_MACB_LATE_COL 0x1a15c
+
+#define S_LATECOLLISIONS 0
+#define M_LATECOLLISIONS 0xffffU
+#define V_LATECOLLISIONS(x) ((x) << S_LATECOLLISIONS)
+#define G_LATECOLLISIONS(x) (((x) >> S_LATECOLLISIONS) & M_LATECOLLISIONS)
+
+#define A_NCSI_MACB_EXCESSIVE_COL 0x1a160
+
+#define S_EXCESSIVECOLLISIONS 0
+#define M_EXCESSIVECOLLISIONS 0xffU
+#define V_EXCESSIVECOLLISIONS(x) ((x) << S_EXCESSIVECOLLISIONS)
+#define G_EXCESSIVECOLLISIONS(x) (((x) >> S_EXCESSIVECOLLISIONS) & M_EXCESSIVECOLLISIONS)
+
+#define A_NCSI_MACB_TX_UNDERRUN_ERR 0x1a164
+
+#define S_TXUNDERRUNERR 0
+#define M_TXUNDERRUNERR 0xffU
+#define V_TXUNDERRUNERR(x) ((x) << S_TXUNDERRUNERR)
+#define G_TXUNDERRUNERR(x) (((x) >> S_TXUNDERRUNERR) & M_TXUNDERRUNERR)
+
+#define A_NCSI_MACB_CARRIER_SENSE_ERR 0x1a168
+
+#define S_CARRIERSENSEERRS 0
+#define M_CARRIERSENSEERRS 0xffU
+#define V_CARRIERSENSEERRS(x) ((x) << S_CARRIERSENSEERRS)
+#define G_CARRIERSENSEERRS(x) (((x) >> S_CARRIERSENSEERRS) & M_CARRIERSENSEERRS)
+
+#define A_NCSI_MACB_RX_RESOURCE_ERR 0x1a16c
+
+#define S_RXRESOURCEERR 0
+#define M_RXRESOURCEERR 0xffffU
+#define V_RXRESOURCEERR(x) ((x) << S_RXRESOURCEERR)
+#define G_RXRESOURCEERR(x) (((x) >> S_RXRESOURCEERR) & M_RXRESOURCEERR)
+
+#define A_NCSI_MACB_RX_OVERRUN_ERR 0x1a170
+
+#define S_RXOVERRUNERRCNT 0
+#define M_RXOVERRUNERRCNT 0xffU
+#define V_RXOVERRUNERRCNT(x) ((x) << S_RXOVERRUNERRCNT)
+#define G_RXOVERRUNERRCNT(x) (((x) >> S_RXOVERRUNERRCNT) & M_RXOVERRUNERRCNT)
+
+#define A_NCSI_MACB_RX_SYMBOL_ERR 0x1a174
+
+#define S_RXSYMBOLERR 0
+#define M_RXSYMBOLERR 0xffU
+#define V_RXSYMBOLERR(x) ((x) << S_RXSYMBOLERR)
+#define G_RXSYMBOLERR(x) (((x) >> S_RXSYMBOLERR) & M_RXSYMBOLERR)
+
+#define A_NCSI_MACB_RX_OVERSIZE_FRAME 0x1a178
+
+#define S_RXOVERSIZEERR 0
+#define M_RXOVERSIZEERR 0xffU
+#define V_RXOVERSIZEERR(x) ((x) << S_RXOVERSIZEERR)
+#define G_RXOVERSIZEERR(x) (((x) >> S_RXOVERSIZEERR) & M_RXOVERSIZEERR)
+
+#define A_NCSI_MACB_RX_JABBER_ERR 0x1a17c
+
+#define S_RXJABBERERR 0
+#define M_RXJABBERERR 0xffU
+#define V_RXJABBERERR(x) ((x) << S_RXJABBERERR)
+#define G_RXJABBERERR(x) (((x) >> S_RXJABBERERR) & M_RXJABBERERR)
+
+#define A_NCSI_MACB_RX_UNDERSIZE_FRAME 0x1a180
+
+#define S_RXUNDERSIZEFR 0
+#define M_RXUNDERSIZEFR 0xffU
+#define V_RXUNDERSIZEFR(x) ((x) << S_RXUNDERSIZEFR)
+#define G_RXUNDERSIZEFR(x) (((x) >> S_RXUNDERSIZEFR) & M_RXUNDERSIZEFR)
+
+#define A_NCSI_MACB_SQE_TEST_ERR 0x1a184
+
+#define S_SQETESTERR 0
+#define M_SQETESTERR 0xffU
+#define V_SQETESTERR(x) ((x) << S_SQETESTERR)
+#define G_SQETESTERR(x) (((x) >> S_SQETESTERR) & M_SQETESTERR)
+
+#define A_NCSI_MACB_LENGTH_ERR 0x1a188
+
+#define S_LENGTHERR 0
+#define M_LENGTHERR 0xffU
+#define V_LENGTHERR(x) ((x) << S_LENGTHERR)
+#define G_LENGTHERR(x) (((x) >> S_LENGTHERR) & M_LENGTHERR)
+
+#define A_NCSI_MACB_TX_PAUSE_FRAMES 0x1a18c
+
+#define S_TXPAUSEFRAMES 0
+#define M_TXPAUSEFRAMES 0xffffU
+#define V_TXPAUSEFRAMES(x) ((x) << S_TXPAUSEFRAMES)
+#define G_TXPAUSEFRAMES(x) (((x) >> S_TXPAUSEFRAMES) & M_TXPAUSEFRAMES)
+
+#define A_NCSI_MACB_HASH_LOW 0x1a190
+#define A_NCSI_MACB_HASH_HIGH 0x1a194
+#define A_NCSI_MACB_SPECIFIC_1_LOW 0x1a198
+#define A_NCSI_MACB_SPECIFIC_1_HIGH 0x1a19c
+
+#define S_MATCHHIGH 0
+#define M_MATCHHIGH 0xffffU
+#define V_MATCHHIGH(x) ((x) << S_MATCHHIGH)
+#define G_MATCHHIGH(x) (((x) >> S_MATCHHIGH) & M_MATCHHIGH)
+
+#define A_NCSI_MACB_SPECIFIC_2_LOW 0x1a1a0
+#define A_NCSI_MACB_SPECIFIC_2_HIGH 0x1a1a4
+#define A_NCSI_MACB_SPECIFIC_3_LOW 0x1a1a8
+#define A_NCSI_MACB_SPECIFIC_3_HIGH 0x1a1ac
+#define A_NCSI_MACB_SPECIFIC_4_LOW 0x1a1b0
+#define A_NCSI_MACB_SPECIFIC_4_HIGH 0x1a1b4
+#define A_NCSI_MACB_TYPE_ID 0x1a1b8
+
+#define S_TYPEID 0
+#define M_TYPEID 0xffffU
+#define V_TYPEID(x) ((x) << S_TYPEID)
+#define G_TYPEID(x) (((x) >> S_TYPEID) & M_TYPEID)
+
+#define A_NCSI_MACB_TX_PAUSE_QUANTUM 0x1a1bc
+
+#define S_TXPAUSEQUANTUM 0
+#define M_TXPAUSEQUANTUM 0xffffU
+#define V_TXPAUSEQUANTUM(x) ((x) << S_TXPAUSEQUANTUM)
+#define G_TXPAUSEQUANTUM(x) (((x) >> S_TXPAUSEQUANTUM) & M_TXPAUSEQUANTUM)
+
+#define A_NCSI_MACB_USER_IO 0x1a1c0
+
+#define S_USERPROGINPUT 16
+#define M_USERPROGINPUT 0xffffU
+#define V_USERPROGINPUT(x) ((x) << S_USERPROGINPUT)
+#define G_USERPROGINPUT(x) (((x) >> S_USERPROGINPUT) & M_USERPROGINPUT)
+
+#define S_USERPROGOUTPUT 0
+#define M_USERPROGOUTPUT 0xffffU
+#define V_USERPROGOUTPUT(x) ((x) << S_USERPROGOUTPUT)
+#define G_USERPROGOUTPUT(x) (((x) >> S_USERPROGOUTPUT) & M_USERPROGOUTPUT)
+
+#define A_NCSI_MACB_WOL_CFG 0x1a1c4
+
+#define S_MCHASHEN 19
+#define V_MCHASHEN(x) ((x) << S_MCHASHEN)
+#define F_MCHASHEN V_MCHASHEN(1U)
+
+#define S_SPECIFIC1EN 18
+#define V_SPECIFIC1EN(x) ((x) << S_SPECIFIC1EN)
+#define F_SPECIFIC1EN V_SPECIFIC1EN(1U)
+
+#define S_ARPEN 17
+#define V_ARPEN(x) ((x) << S_ARPEN)
+#define F_ARPEN V_ARPEN(1U)
+
+#define S_MAGICPKTEN 16
+#define V_MAGICPKTEN(x) ((x) << S_MAGICPKTEN)
+#define F_MAGICPKTEN V_MAGICPKTEN(1U)
+
+#define S_ARPIPADDR 0
+#define M_ARPIPADDR 0xffffU
+#define V_ARPIPADDR(x) ((x) << S_ARPIPADDR)
+#define G_ARPIPADDR(x) (((x) >> S_ARPIPADDR) & M_ARPIPADDR)
+
+#define A_NCSI_MACB_REV_STATUS 0x1a1fc
+
+#define S_PARTREF 16
+#define M_PARTREF 0xffffU
+#define V_PARTREF(x) ((x) << S_PARTREF)
+#define G_PARTREF(x) (((x) >> S_PARTREF) & M_PARTREF)
+
+#define S_DESREV 0
+#define M_DESREV 0xffffU
+#define V_DESREV(x) ((x) << S_DESREV)
+#define G_DESREV(x) (((x) >> S_DESREV) & M_DESREV)
+
+/* registers for module XGMAC */
+#define XGMAC_BASE_ADDR 0x0
+
+#define A_XGMAC_PORT_CFG 0x1000
+
+#define S_XGMII_CLK_SEL 29
+#define M_XGMII_CLK_SEL 0x7U
+#define V_XGMII_CLK_SEL(x) ((x) << S_XGMII_CLK_SEL)
+#define G_XGMII_CLK_SEL(x) (((x) >> S_XGMII_CLK_SEL) & M_XGMII_CLK_SEL)
+
+#define S_SINKTX 27
+#define V_SINKTX(x) ((x) << S_SINKTX)
+#define F_SINKTX V_SINKTX(1U)
+
+#define S_SINKTXONLINKDOWN 26
+#define V_SINKTXONLINKDOWN(x) ((x) << S_SINKTXONLINKDOWN)
+#define F_SINKTXONLINKDOWN V_SINKTXONLINKDOWN(1U)
+
+#define S_XG2G_SPEED_MODE 25
+#define V_XG2G_SPEED_MODE(x) ((x) << S_XG2G_SPEED_MODE)
+#define F_XG2G_SPEED_MODE V_XG2G_SPEED_MODE(1U)
+
+#define S_LOOPNOFWD 24
+#define V_LOOPNOFWD(x) ((x) << S_LOOPNOFWD)
+#define F_LOOPNOFWD V_LOOPNOFWD(1U)
+
+#define S_XGM_TX_PAUSE_SIZE 23
+#define V_XGM_TX_PAUSE_SIZE(x) ((x) << S_XGM_TX_PAUSE_SIZE)
+#define F_XGM_TX_PAUSE_SIZE V_XGM_TX_PAUSE_SIZE(1U)
+
+#define S_XGM_TX_PAUSE_FRAME 22
+#define V_XGM_TX_PAUSE_FRAME(x) ((x) << S_XGM_TX_PAUSE_FRAME)
+#define F_XGM_TX_PAUSE_FRAME V_XGM_TX_PAUSE_FRAME(1U)
+
+#define S_XGM_TX_DISABLE_PRE 21
+#define V_XGM_TX_DISABLE_PRE(x) ((x) << S_XGM_TX_DISABLE_PRE)
+#define F_XGM_TX_DISABLE_PRE V_XGM_TX_DISABLE_PRE(1U)
+
+#define S_XGM_TX_DISABLE_CRC 20
+#define V_XGM_TX_DISABLE_CRC(x) ((x) << S_XGM_TX_DISABLE_CRC)
+#define F_XGM_TX_DISABLE_CRC V_XGM_TX_DISABLE_CRC(1U)
+
+#define S_SMUX_RX_LOOP 19
+#define V_SMUX_RX_LOOP(x) ((x) << S_SMUX_RX_LOOP)
+#define F_SMUX_RX_LOOP V_SMUX_RX_LOOP(1U)
+
+#define S_RX_LANE_SWAP 18
+#define V_RX_LANE_SWAP(x) ((x) << S_RX_LANE_SWAP)
+#define F_RX_LANE_SWAP V_RX_LANE_SWAP(1U)
+
+#define S_TX_LANE_SWAP 17
+#define V_TX_LANE_SWAP(x) ((x) << S_TX_LANE_SWAP)
+#define F_TX_LANE_SWAP V_TX_LANE_SWAP(1U)
+
+#define S_SIGNAL_DET 14
+#define V_SIGNAL_DET(x) ((x) << S_SIGNAL_DET)
+#define F_SIGNAL_DET V_SIGNAL_DET(1U)
+
+#define S_PMUX_RX_LOOP 13
+#define V_PMUX_RX_LOOP(x) ((x) << S_PMUX_RX_LOOP)
+#define F_PMUX_RX_LOOP V_PMUX_RX_LOOP(1U)
+
+#define S_PMUX_TX_LOOP 12
+#define V_PMUX_TX_LOOP(x) ((x) << S_PMUX_TX_LOOP)
+#define F_PMUX_TX_LOOP V_PMUX_TX_LOOP(1U)
+
+#define S_XGM_RX_SEL 10
+#define M_XGM_RX_SEL 0x3U
+#define V_XGM_RX_SEL(x) ((x) << S_XGM_RX_SEL)
+#define G_XGM_RX_SEL(x) (((x) >> S_XGM_RX_SEL) & M_XGM_RX_SEL)
+
+#define S_PCS_TX_SEL 8
+#define M_PCS_TX_SEL 0x3U
+#define V_PCS_TX_SEL(x) ((x) << S_PCS_TX_SEL)
+#define G_PCS_TX_SEL(x) (((x) >> S_PCS_TX_SEL) & M_PCS_TX_SEL)
+
+#define S_XAUI20_REM_PRE 5
+#define V_XAUI20_REM_PRE(x) ((x) << S_XAUI20_REM_PRE)
+#define F_XAUI20_REM_PRE V_XAUI20_REM_PRE(1U)
+
+#define S_XAUI20_XGMII_SEL 4
+#define V_XAUI20_XGMII_SEL(x) ((x) << S_XAUI20_XGMII_SEL)
+#define F_XAUI20_XGMII_SEL V_XAUI20_XGMII_SEL(1U)
+
+#define S_PORT_SEL 0
+#define V_PORT_SEL(x) ((x) << S_PORT_SEL)
+#define F_PORT_SEL V_PORT_SEL(1U)
+
+#define A_XGMAC_PORT_RESET_CTRL 0x1004
+
+#define S_AUXEXT_RESET 10
+#define V_AUXEXT_RESET(x) ((x) << S_AUXEXT_RESET)
+#define F_AUXEXT_RESET V_AUXEXT_RESET(1U)
+
+#define S_TXFIFO_RESET 9
+#define V_TXFIFO_RESET(x) ((x) << S_TXFIFO_RESET)
+#define F_TXFIFO_RESET V_TXFIFO_RESET(1U)
+
+#define S_RXFIFO_RESET 8
+#define V_RXFIFO_RESET(x) ((x) << S_RXFIFO_RESET)
+#define F_RXFIFO_RESET V_RXFIFO_RESET(1U)
+
+#define S_BEAN_RESET 7
+#define V_BEAN_RESET(x) ((x) << S_BEAN_RESET)
+#define F_BEAN_RESET V_BEAN_RESET(1U)
+
+#define S_XAUI_RESET 6
+#define V_XAUI_RESET(x) ((x) << S_XAUI_RESET)
+#define F_XAUI_RESET V_XAUI_RESET(1U)
+
+#define S_AE_RESET 5
+#define V_AE_RESET(x) ((x) << S_AE_RESET)
+#define F_AE_RESET V_AE_RESET(1U)
+
+#define S_XGM_RESET 4
+#define V_XGM_RESET(x) ((x) << S_XGM_RESET)
+#define F_XGM_RESET V_XGM_RESET(1U)
+
+#define S_XG2G_RESET 3
+#define V_XG2G_RESET(x) ((x) << S_XG2G_RESET)
+#define F_XG2G_RESET V_XG2G_RESET(1U)
+
+#define S_WOL_RESET 2
+#define V_WOL_RESET(x) ((x) << S_WOL_RESET)
+#define F_WOL_RESET V_WOL_RESET(1U)
+
+#define S_XFI_PCS_RESET 1
+#define V_XFI_PCS_RESET(x) ((x) << S_XFI_PCS_RESET)
+#define F_XFI_PCS_RESET V_XFI_PCS_RESET(1U)
+
+#define S_HSS_RESET 0
+#define V_HSS_RESET(x) ((x) << S_HSS_RESET)
+#define F_HSS_RESET V_HSS_RESET(1U)
+
+#define A_XGMAC_PORT_LED_CFG 0x1008
+
+#define S_LED1_CFG 5
+#define M_LED1_CFG 0x7U
+#define V_LED1_CFG(x) ((x) << S_LED1_CFG)
+#define G_LED1_CFG(x) (((x) >> S_LED1_CFG) & M_LED1_CFG)
+
+#define S_LED1_POLARITY_INV 4
+#define V_LED1_POLARITY_INV(x) ((x) << S_LED1_POLARITY_INV)
+#define F_LED1_POLARITY_INV V_LED1_POLARITY_INV(1U)
+
+#define S_LED0_CFG 1
+#define M_LED0_CFG 0x7U
+#define V_LED0_CFG(x) ((x) << S_LED0_CFG)
+#define G_LED0_CFG(x) (((x) >> S_LED0_CFG) & M_LED0_CFG)
+
+#define S_LED0_POLARITY_INV 0
+#define V_LED0_POLARITY_INV(x) ((x) << S_LED0_POLARITY_INV)
+#define F_LED0_POLARITY_INV V_LED0_POLARITY_INV(1U)
+
+#define A_XGMAC_PORT_LED_COUNTHI 0x100c
+
+#define S_LED_COUNT_HI 0
+#define M_LED_COUNT_HI 0x1ffffffU
+#define V_LED_COUNT_HI(x) ((x) << S_LED_COUNT_HI)
+#define G_LED_COUNT_HI(x) (((x) >> S_LED_COUNT_HI) & M_LED_COUNT_HI)
+
+#define A_XGMAC_PORT_LED_COUNTLO 0x1010
+
+#define S_LED_COUNT_LO 0
+#define M_LED_COUNT_LO 0x1ffffffU
+#define V_LED_COUNT_LO(x) ((x) << S_LED_COUNT_LO)
+#define G_LED_COUNT_LO(x) (((x) >> S_LED_COUNT_LO) & M_LED_COUNT_LO)
+
+#define A_XGMAC_PORT_DEBUG_CFG 0x1014
+
+#define S_TESTCLK_SEL 0
+#define M_TESTCLK_SEL 0xfU
+#define V_TESTCLK_SEL(x) ((x) << S_TESTCLK_SEL)
+#define G_TESTCLK_SEL(x) (((x) >> S_TESTCLK_SEL) & M_TESTCLK_SEL)
+
+#define A_XGMAC_PORT_CFG2 0x1018
+
+#define S_RX_POLARITY_INV 28
+#define M_RX_POLARITY_INV 0xfU
+#define V_RX_POLARITY_INV(x) ((x) << S_RX_POLARITY_INV)
+#define G_RX_POLARITY_INV(x) (((x) >> S_RX_POLARITY_INV) & M_RX_POLARITY_INV)
+
+#define S_TX_POLARITY_INV 24
+#define M_TX_POLARITY_INV 0xfU
+#define V_TX_POLARITY_INV(x) ((x) << S_TX_POLARITY_INV)
+#define G_TX_POLARITY_INV(x) (((x) >> S_TX_POLARITY_INV) & M_TX_POLARITY_INV)
+
+#define S_INSTANCENUM 22
+#define M_INSTANCENUM 0x3U
+#define V_INSTANCENUM(x) ((x) << S_INSTANCENUM)
+#define G_INSTANCENUM(x) (((x) >> S_INSTANCENUM) & M_INSTANCENUM)
+
+#define S_STOPONPERR 21
+#define V_STOPONPERR(x) ((x) << S_STOPONPERR)
+#define F_STOPONPERR V_STOPONPERR(1U)
+
+#define S_MACTXEN 20
+#define V_MACTXEN(x) ((x) << S_MACTXEN)
+#define F_MACTXEN V_MACTXEN(1U)
+
+#define S_MACRXEN 19
+#define V_MACRXEN(x) ((x) << S_MACRXEN)
+#define F_MACRXEN V_MACRXEN(1U)
+
+#define S_PATEN 18
+#define V_PATEN(x) ((x) << S_PATEN)
+#define F_PATEN V_PATEN(1U)
+
+#define S_MAGICEN 17
+#define V_MAGICEN(x) ((x) << S_MAGICEN)
+#define F_MAGICEN V_MAGICEN(1U)
+
+#define S_TX_IPG 4
+#define M_TX_IPG 0x1fffU
+#define V_TX_IPG(x) ((x) << S_TX_IPG)
+#define G_TX_IPG(x) (((x) >> S_TX_IPG) & M_TX_IPG)
+
+#define S_AEC_PMA_TX_READY 1
+#define V_AEC_PMA_TX_READY(x) ((x) << S_AEC_PMA_TX_READY)
+#define F_AEC_PMA_TX_READY V_AEC_PMA_TX_READY(1U)
+
+#define S_AEC_PMA_RX_READY 0
+#define V_AEC_PMA_RX_READY(x) ((x) << S_AEC_PMA_RX_READY)
+#define F_AEC_PMA_RX_READY V_AEC_PMA_RX_READY(1U)
+
+#define A_XGMAC_PORT_PKT_COUNT 0x101c
+
+#define S_TX_SOP_COUNT 24
+#define M_TX_SOP_COUNT 0xffU
+#define V_TX_SOP_COUNT(x) ((x) << S_TX_SOP_COUNT)
+#define G_TX_SOP_COUNT(x) (((x) >> S_TX_SOP_COUNT) & M_TX_SOP_COUNT)
+
+#define S_TX_EOP_COUNT 16
+#define M_TX_EOP_COUNT 0xffU
+#define V_TX_EOP_COUNT(x) ((x) << S_TX_EOP_COUNT)
+#define G_TX_EOP_COUNT(x) (((x) >> S_TX_EOP_COUNT) & M_TX_EOP_COUNT)
+
+#define S_RX_SOP_COUNT 8
+#define M_RX_SOP_COUNT 0xffU
+#define V_RX_SOP_COUNT(x) ((x) << S_RX_SOP_COUNT)
+#define G_RX_SOP_COUNT(x) (((x) >> S_RX_SOP_COUNT) & M_RX_SOP_COUNT)
+
+#define S_RX_EOP_COUNT 0
+#define M_RX_EOP_COUNT 0xffU
+#define V_RX_EOP_COUNT(x) ((x) << S_RX_EOP_COUNT)
+#define G_RX_EOP_COUNT(x) (((x) >> S_RX_EOP_COUNT) & M_RX_EOP_COUNT)
+
+#define A_XGMAC_PORT_PERR_INJECT 0x1020
+
+#define S_XGMMEMSEL 1
+#define V_XGMMEMSEL(x) ((x) << S_XGMMEMSEL)
+#define F_XGMMEMSEL V_XGMMEMSEL(1U)
+
+#define A_XGMAC_PORT_MAGIC_MACID_LO 0x1024
+#define A_XGMAC_PORT_MAGIC_MACID_HI 0x1028
+
+#define S_MAC_WOL_DA 0
+#define M_MAC_WOL_DA 0xffffU
+#define V_MAC_WOL_DA(x) ((x) << S_MAC_WOL_DA)
+#define G_MAC_WOL_DA(x) (((x) >> S_MAC_WOL_DA) & M_MAC_WOL_DA)
+
+#define A_XGMAC_PORT_BUILD_REVISION 0x102c
+#define A_XGMAC_PORT_XGMII_SE_COUNT 0x1030
+
+#define S_TXSOP 24
+#define M_TXSOP 0xffU
+#define V_TXSOP(x) ((x) << S_TXSOP)
+#define G_TXSOP(x) (((x) >> S_TXSOP) & M_TXSOP)
+
+#define S_TXEOP 16
+#define M_TXEOP 0xffU
+#define V_TXEOP(x) ((x) << S_TXEOP)
+#define G_TXEOP(x) (((x) >> S_TXEOP) & M_TXEOP)
+
+#define S_RXSOP 8
+#define M_RXSOP 0xffU
+#define V_RXSOP(x) ((x) << S_RXSOP)
+#define G_RXSOP(x) (((x) >> S_RXSOP) & M_RXSOP)
+
+#define A_XGMAC_PORT_LINK_STATUS 0x1034
+
+#define S_REMFLT 3
+#define V_REMFLT(x) ((x) << S_REMFLT)
+#define F_REMFLT V_REMFLT(1U)
+
+#define S_LOCFLT 2
+#define V_LOCFLT(x) ((x) << S_LOCFLT)
+#define F_LOCFLT V_LOCFLT(1U)
+
+#define S_LINKUP 1
+#define V_LINKUP(x) ((x) << S_LINKUP)
+#define F_LINKUP V_LINKUP(1U)
+
+#define S_LINKDN 0
+#define V_LINKDN(x) ((x) << S_LINKDN)
+#define F_LINKDN V_LINKDN(1U)
+
+#define A_XGMAC_PORT_CHECKIN 0x1038
+
+#define S_PREAMBLE 1
+#define V_PREAMBLE(x) ((x) << S_PREAMBLE)
+#define F_PREAMBLE V_PREAMBLE(1U)
+
+#define S_CHECKIN 0
+#define V_CHECKIN(x) ((x) << S_CHECKIN)
+#define F_CHECKIN V_CHECKIN(1U)
+
+#define A_XGMAC_PORT_FAULT_TEST 0x103c
+
+#define S_FLTTYPE 1
+#define V_FLTTYPE(x) ((x) << S_FLTTYPE)
+#define F_FLTTYPE V_FLTTYPE(1U)
+
+#define S_FLTCTRL 0
+#define V_FLTCTRL(x) ((x) << S_FLTCTRL)
+#define F_FLTCTRL V_FLTCTRL(1U)
+
+#define A_XGMAC_PORT_SPARE 0x1040
+#define A_XGMAC_PORT_HSS_SIGDET_STATUS 0x1044
+
+#define S_SIGNALDETECT 0
+#define M_SIGNALDETECT 0xfU
+#define V_SIGNALDETECT(x) ((x) << S_SIGNALDETECT)
+#define G_SIGNALDETECT(x) (((x) >> S_SIGNALDETECT) & M_SIGNALDETECT)
+
+#define A_XGMAC_PORT_EXT_LOS_STATUS 0x1048
+#define A_XGMAC_PORT_EXT_LOS_CTRL 0x104c
+
+#define S_CTRL 0
+#define M_CTRL 0xfU
+#define V_CTRL(x) ((x) << S_CTRL)
+#define G_CTRL(x) (((x) >> S_CTRL) & M_CTRL)
+
+#define A_XGMAC_PORT_FPGA_PAUSE_CTL 0x1050
+
+#define S_CTL 31
+#define V_CTL(x) ((x) << S_CTL)
+#define F_CTL V_CTL(1U)
+
+#define S_HWM 13
+#define M_HWM 0x1fffU
+#define V_HWM(x) ((x) << S_HWM)
+#define G_HWM(x) (((x) >> S_HWM) & M_HWM)
+
+#define S_LWM 0
+#define M_LWM 0x1fffU
+#define V_LWM(x) ((x) << S_LWM)
+#define G_LWM(x) (((x) >> S_LWM) & M_LWM)
+
+#define A_XGMAC_PORT_FPGA_ERRPKT_CNT 0x1054
+#define A_XGMAC_PORT_LA_TX_0 0x1058
+#define A_XGMAC_PORT_LA_RX_0 0x105c
+#define A_XGMAC_PORT_FPGA_LA_CTL 0x1060
+
+#define S_RXRST 5
+#define V_RXRST(x) ((x) << S_RXRST)
+#define F_RXRST V_RXRST(1U)
+
+#define S_TXRST 4
+#define V_TXRST(x) ((x) << S_TXRST)
+#define F_TXRST V_TXRST(1U)
+
+#define S_XGMII 3
+#define V_XGMII(x) ((x) << S_XGMII)
+#define F_XGMII V_XGMII(1U)
+
+#define S_LAPAUSE 2
+#define V_LAPAUSE(x) ((x) << S_LAPAUSE)
+#define F_LAPAUSE V_LAPAUSE(1U)
+
+#define S_STOPERR 1
+#define V_STOPERR(x) ((x) << S_STOPERR)
+#define F_STOPERR V_STOPERR(1U)
+
+#define S_LASTOP 0
+#define V_LASTOP(x) ((x) << S_LASTOP)
+#define F_LASTOP V_LASTOP(1U)
+
+#define A_XGMAC_PORT_EPIO_DATA0 0x10c0
+#define A_XGMAC_PORT_EPIO_DATA1 0x10c4
+#define A_XGMAC_PORT_EPIO_DATA2 0x10c8
+#define A_XGMAC_PORT_EPIO_DATA3 0x10cc
+#define A_XGMAC_PORT_EPIO_OP 0x10d0
+
+#define S_EPIOWR 8
+#define V_EPIOWR(x) ((x) << S_EPIOWR)
+#define F_EPIOWR V_EPIOWR(1U)
+
+#define S_ADDRESS 0
+#define M_ADDRESS 0xffU
+#define V_ADDRESS(x) ((x) << S_ADDRESS)
+#define G_ADDRESS(x) (((x) >> S_ADDRESS) & M_ADDRESS)
+
+#define A_XGMAC_PORT_WOL_STATUS 0x10d4
+
+#define S_MAGICDETECTED 31
+#define V_MAGICDETECTED(x) ((x) << S_MAGICDETECTED)
+#define F_MAGICDETECTED V_MAGICDETECTED(1U)
+
+#define S_PATDETECTED 30
+#define V_PATDETECTED(x) ((x) << S_PATDETECTED)
+#define F_PATDETECTED V_PATDETECTED(1U)
+
+#define S_CLEARMAGIC 4
+#define V_CLEARMAGIC(x) ((x) << S_CLEARMAGIC)
+#define F_CLEARMAGIC V_CLEARMAGIC(1U)
+
+#define S_CLEARMATCH 3
+#define V_CLEARMATCH(x) ((x) << S_CLEARMATCH)
+#define F_CLEARMATCH V_CLEARMATCH(1U)
+
+#define S_MATCHEDFILTER 0
+#define M_MATCHEDFILTER 0x7U
+#define V_MATCHEDFILTER(x) ((x) << S_MATCHEDFILTER)
+#define G_MATCHEDFILTER(x) (((x) >> S_MATCHEDFILTER) & M_MATCHEDFILTER)
+
+#define A_XGMAC_PORT_INT_EN 0x10d8
+
+#define S_EXT_LOS 28
+#define V_EXT_LOS(x) ((x) << S_EXT_LOS)
+#define F_EXT_LOS V_EXT_LOS(1U)
+
+#define S_INCMPTBL_LINK 27
+#define V_INCMPTBL_LINK(x) ((x) << S_INCMPTBL_LINK)
+#define F_INCMPTBL_LINK V_INCMPTBL_LINK(1U)
+
+#define S_PATDETWAKE 26
+#define V_PATDETWAKE(x) ((x) << S_PATDETWAKE)
+#define F_PATDETWAKE V_PATDETWAKE(1U)
+
+#define S_MAGICWAKE 25
+#define V_MAGICWAKE(x) ((x) << S_MAGICWAKE)
+#define F_MAGICWAKE V_MAGICWAKE(1U)
+
+#define S_SIGDETCHG 24
+#define V_SIGDETCHG(x) ((x) << S_SIGDETCHG)
+#define F_SIGDETCHG V_SIGDETCHG(1U)
+
+#define S_PCSR_FEC_CORR 23
+#define V_PCSR_FEC_CORR(x) ((x) << S_PCSR_FEC_CORR)
+#define F_PCSR_FEC_CORR V_PCSR_FEC_CORR(1U)
+
+#define S_AE_TRAIN_LOCAL 22
+#define V_AE_TRAIN_LOCAL(x) ((x) << S_AE_TRAIN_LOCAL)
+#define F_AE_TRAIN_LOCAL V_AE_TRAIN_LOCAL(1U)
+
+#define S_HSSPLL_LOCK 21
+#define V_HSSPLL_LOCK(x) ((x) << S_HSSPLL_LOCK)
+#define F_HSSPLL_LOCK V_HSSPLL_LOCK(1U)
+
+#define S_HSSPRT_READY 20
+#define V_HSSPRT_READY(x) ((x) << S_HSSPRT_READY)
+#define F_HSSPRT_READY V_HSSPRT_READY(1U)
+
+#define S_AUTONEG_DONE 19
+#define V_AUTONEG_DONE(x) ((x) << S_AUTONEG_DONE)
+#define F_AUTONEG_DONE V_AUTONEG_DONE(1U)
+
+#define S_PCSR_HI_BER 18
+#define V_PCSR_HI_BER(x) ((x) << S_PCSR_HI_BER)
+#define F_PCSR_HI_BER V_PCSR_HI_BER(1U)
+
+#define S_PCSR_FEC_ERROR 17
+#define V_PCSR_FEC_ERROR(x) ((x) << S_PCSR_FEC_ERROR)
+#define F_PCSR_FEC_ERROR V_PCSR_FEC_ERROR(1U)
+
+#define S_PCSR_LINK_FAIL 16
+#define V_PCSR_LINK_FAIL(x) ((x) << S_PCSR_LINK_FAIL)
+#define F_PCSR_LINK_FAIL V_PCSR_LINK_FAIL(1U)
+
+#define S_XAUI_DEC_ERROR 15
+#define V_XAUI_DEC_ERROR(x) ((x) << S_XAUI_DEC_ERROR)
+#define F_XAUI_DEC_ERROR V_XAUI_DEC_ERROR(1U)
+
+#define S_XAUI_LINK_FAIL 14
+#define V_XAUI_LINK_FAIL(x) ((x) << S_XAUI_LINK_FAIL)
+#define F_XAUI_LINK_FAIL V_XAUI_LINK_FAIL(1U)
+
+#define S_PCS_CTC_ERROR 13
+#define V_PCS_CTC_ERROR(x) ((x) << S_PCS_CTC_ERROR)
+#define F_PCS_CTC_ERROR V_PCS_CTC_ERROR(1U)
+
+#define S_PCS_LINK_GOOD 12
+#define V_PCS_LINK_GOOD(x) ((x) << S_PCS_LINK_GOOD)
+#define F_PCS_LINK_GOOD V_PCS_LINK_GOOD(1U)
+
+#define S_PCS_LINK_FAIL 11
+#define V_PCS_LINK_FAIL(x) ((x) << S_PCS_LINK_FAIL)
+#define F_PCS_LINK_FAIL V_PCS_LINK_FAIL(1U)
+
+#define S_RXFIFOOVERFLOW 10
+#define V_RXFIFOOVERFLOW(x) ((x) << S_RXFIFOOVERFLOW)
+#define F_RXFIFOOVERFLOW V_RXFIFOOVERFLOW(1U)
+
+#define S_HSSPRBSERR 9
+#define V_HSSPRBSERR(x) ((x) << S_HSSPRBSERR)
+#define F_HSSPRBSERR V_HSSPRBSERR(1U)
+
+#define S_HSSEYEQUAL 8
+#define V_HSSEYEQUAL(x) ((x) << S_HSSEYEQUAL)
+#define F_HSSEYEQUAL V_HSSEYEQUAL(1U)
+
+#define S_REMOTEFAULT 7
+#define V_REMOTEFAULT(x) ((x) << S_REMOTEFAULT)
+#define F_REMOTEFAULT V_REMOTEFAULT(1U)
+
+#define S_LOCALFAULT 6
+#define V_LOCALFAULT(x) ((x) << S_LOCALFAULT)
+#define F_LOCALFAULT V_LOCALFAULT(1U)
+
+#define S_MAC_LINK_DOWN 5
+#define V_MAC_LINK_DOWN(x) ((x) << S_MAC_LINK_DOWN)
+#define F_MAC_LINK_DOWN V_MAC_LINK_DOWN(1U)
+
+#define S_MAC_LINK_UP 4
+#define V_MAC_LINK_UP(x) ((x) << S_MAC_LINK_UP)
+#define F_MAC_LINK_UP V_MAC_LINK_UP(1U)
+
+#define S_BEAN_INT 3
+#define V_BEAN_INT(x) ((x) << S_BEAN_INT)
+#define F_BEAN_INT V_BEAN_INT(1U)
+
+#define S_XGM_INT 2
+#define V_XGM_INT(x) ((x) << S_XGM_INT)
+#define F_XGM_INT V_XGM_INT(1U)
+
+#define A_XGMAC_PORT_INT_CAUSE 0x10dc
+#define A_XGMAC_PORT_HSS_CFG0 0x10e0
+
+#define S_TXDTS 31
+#define V_TXDTS(x) ((x) << S_TXDTS)
+#define F_TXDTS V_TXDTS(1U)
+
+#define S_TXCTS 30
+#define V_TXCTS(x) ((x) << S_TXCTS)
+#define F_TXCTS V_TXCTS(1U)
+
+#define S_TXBTS 29
+#define V_TXBTS(x) ((x) << S_TXBTS)
+#define F_TXBTS V_TXBTS(1U)
+
+#define S_TXATS 28
+#define V_TXATS(x) ((x) << S_TXATS)
+#define F_TXATS V_TXATS(1U)
+
+#define S_TXDOBS 27
+#define V_TXDOBS(x) ((x) << S_TXDOBS)
+#define F_TXDOBS V_TXDOBS(1U)
+
+#define S_TXCOBS 26
+#define V_TXCOBS(x) ((x) << S_TXCOBS)
+#define F_TXCOBS V_TXCOBS(1U)
+
+#define S_TXBOBS 25
+#define V_TXBOBS(x) ((x) << S_TXBOBS)
+#define F_TXBOBS V_TXBOBS(1U)
+
+#define S_TXAOBS 24
+#define V_TXAOBS(x) ((x) << S_TXAOBS)
+#define F_TXAOBS V_TXAOBS(1U)
+
+#define S_HSSREFCLKSEL 20
+#define V_HSSREFCLKSEL(x) ((x) << S_HSSREFCLKSEL)
+#define F_HSSREFCLKSEL V_HSSREFCLKSEL(1U)
+
+#define S_HSSAVDHI 17
+#define V_HSSAVDHI(x) ((x) << S_HSSAVDHI)
+#define F_HSSAVDHI V_HSSAVDHI(1U)
+
+#define S_HSSRXTS 16
+#define V_HSSRXTS(x) ((x) << S_HSSRXTS)
+#define F_HSSRXTS V_HSSRXTS(1U)
+
+#define S_HSSTXACMODE 15
+#define V_HSSTXACMODE(x) ((x) << S_HSSTXACMODE)
+#define F_HSSTXACMODE V_HSSTXACMODE(1U)
+
+#define S_HSSRXACMODE 14
+#define V_HSSRXACMODE(x) ((x) << S_HSSRXACMODE)
+#define F_HSSRXACMODE V_HSSRXACMODE(1U)
+
+#define S_HSSRESYNC 13
+#define V_HSSRESYNC(x) ((x) << S_HSSRESYNC)
+#define F_HSSRESYNC V_HSSRESYNC(1U)
+
+#define S_HSSRECCAL 12
+#define V_HSSRECCAL(x) ((x) << S_HSSRECCAL)
+#define F_HSSRECCAL V_HSSRECCAL(1U)
+
+#define S_HSSPDWNPLL 11
+#define V_HSSPDWNPLL(x) ((x) << S_HSSPDWNPLL)
+#define F_HSSPDWNPLL V_HSSPDWNPLL(1U)
+
+#define S_HSSDIVSEL 9
+#define M_HSSDIVSEL 0x3U
+#define V_HSSDIVSEL(x) ((x) << S_HSSDIVSEL)
+#define G_HSSDIVSEL(x) (((x) >> S_HSSDIVSEL) & M_HSSDIVSEL)
+
+#define S_HSSREFDIV 8
+#define V_HSSREFDIV(x) ((x) << S_HSSREFDIV)
+#define F_HSSREFDIV V_HSSREFDIV(1U)
+
+#define S_HSSPLLBYP 7
+#define V_HSSPLLBYP(x) ((x) << S_HSSPLLBYP)
+#define F_HSSPLLBYP V_HSSPLLBYP(1U)
+
+#define S_HSSLOFREQPLL 6
+#define V_HSSLOFREQPLL(x) ((x) << S_HSSLOFREQPLL)
+#define F_HSSLOFREQPLL V_HSSLOFREQPLL(1U)
+
+#define S_HSSLOFREQ2PLL 5
+#define V_HSSLOFREQ2PLL(x) ((x) << S_HSSLOFREQ2PLL)
+#define F_HSSLOFREQ2PLL V_HSSLOFREQ2PLL(1U)
+
+#define S_HSSEXTC16SEL 4
+#define V_HSSEXTC16SEL(x) ((x) << S_HSSEXTC16SEL)
+#define F_HSSEXTC16SEL V_HSSEXTC16SEL(1U)
+
+#define S_HSSRSTCONFIG 1
+#define M_HSSRSTCONFIG 0x7U
+#define V_HSSRSTCONFIG(x) ((x) << S_HSSRSTCONFIG)
+#define G_HSSRSTCONFIG(x) (((x) >> S_HSSRSTCONFIG) & M_HSSRSTCONFIG)
+
+#define S_HSSPRBSEN 0
+#define V_HSSPRBSEN(x) ((x) << S_HSSPRBSEN)
+#define F_HSSPRBSEN V_HSSPRBSEN(1U)
+
+#define A_XGMAC_PORT_HSS_CFG1 0x10e4
+
+#define S_RXDPRBSRST 28
+#define V_RXDPRBSRST(x) ((x) << S_RXDPRBSRST)
+#define F_RXDPRBSRST V_RXDPRBSRST(1U)
+
+#define S_RXDPRBSEN 27
+#define V_RXDPRBSEN(x) ((x) << S_RXDPRBSEN)
+#define F_RXDPRBSEN V_RXDPRBSEN(1U)
+
+#define S_RXDPRBSFRCERR 26
+#define V_RXDPRBSFRCERR(x) ((x) << S_RXDPRBSFRCERR)
+#define F_RXDPRBSFRCERR V_RXDPRBSFRCERR(1U)
+
+#define S_TXDPRBSRST 25
+#define V_TXDPRBSRST(x) ((x) << S_TXDPRBSRST)
+#define F_TXDPRBSRST V_TXDPRBSRST(1U)
+
+#define S_TXDPRBSEN 24
+#define V_TXDPRBSEN(x) ((x) << S_TXDPRBSEN)
+#define F_TXDPRBSEN V_TXDPRBSEN(1U)
+
+#define S_RXCPRBSRST 20
+#define V_RXCPRBSRST(x) ((x) << S_RXCPRBSRST)
+#define F_RXCPRBSRST V_RXCPRBSRST(1U)
+
+#define S_RXCPRBSEN 19
+#define V_RXCPRBSEN(x) ((x) << S_RXCPRBSEN)
+#define F_RXCPRBSEN V_RXCPRBSEN(1U)
+
+#define S_RXCPRBSFRCERR 18
+#define V_RXCPRBSFRCERR(x) ((x) << S_RXCPRBSFRCERR)
+#define F_RXCPRBSFRCERR V_RXCPRBSFRCERR(1U)
+
+#define S_TXCPRBSRST 17
+#define V_TXCPRBSRST(x) ((x) << S_TXCPRBSRST)
+#define F_TXCPRBSRST V_TXCPRBSRST(1U)
+
+#define S_TXCPRBSEN 16
+#define V_TXCPRBSEN(x) ((x) << S_TXCPRBSEN)
+#define F_TXCPRBSEN V_TXCPRBSEN(1U)
+
+#define S_RXBPRBSRST 12
+#define V_RXBPRBSRST(x) ((x) << S_RXBPRBSRST)
+#define F_RXBPRBSRST V_RXBPRBSRST(1U)
+
+#define S_RXBPRBSEN 11
+#define V_RXBPRBSEN(x) ((x) << S_RXBPRBSEN)
+#define F_RXBPRBSEN V_RXBPRBSEN(1U)
+
+#define S_RXBPRBSFRCERR 10
+#define V_RXBPRBSFRCERR(x) ((x) << S_RXBPRBSFRCERR)
+#define F_RXBPRBSFRCERR V_RXBPRBSFRCERR(1U)
+
+#define S_TXBPRBSRST 9
+#define V_TXBPRBSRST(x) ((x) << S_TXBPRBSRST)
+#define F_TXBPRBSRST V_TXBPRBSRST(1U)
+
+#define S_TXBPRBSEN 8
+#define V_TXBPRBSEN(x) ((x) << S_TXBPRBSEN)
+#define F_TXBPRBSEN V_TXBPRBSEN(1U)
+
+#define S_RXAPRBSRST 4
+#define V_RXAPRBSRST(x) ((x) << S_RXAPRBSRST)
+#define F_RXAPRBSRST V_RXAPRBSRST(1U)
+
+#define S_RXAPRBSEN 3
+#define V_RXAPRBSEN(x) ((x) << S_RXAPRBSEN)
+#define F_RXAPRBSEN V_RXAPRBSEN(1U)
+
+#define S_RXAPRBSFRCERR 2
+#define V_RXAPRBSFRCERR(x) ((x) << S_RXAPRBSFRCERR)
+#define F_RXAPRBSFRCERR V_RXAPRBSFRCERR(1U)
+
+#define S_TXAPRBSRST 1
+#define V_TXAPRBSRST(x) ((x) << S_TXAPRBSRST)
+#define F_TXAPRBSRST V_TXAPRBSRST(1U)
+
+#define S_TXAPRBSEN 0
+#define V_TXAPRBSEN(x) ((x) << S_TXAPRBSEN)
+#define F_TXAPRBSEN V_TXAPRBSEN(1U)
+
+#define A_XGMAC_PORT_HSS_CFG2 0x10e8
+
+#define S_RXDDATASYNC 23
+#define V_RXDDATASYNC(x) ((x) << S_RXDDATASYNC)
+#define F_RXDDATASYNC V_RXDDATASYNC(1U)
+
+#define S_RXCDATASYNC 22
+#define V_RXCDATASYNC(x) ((x) << S_RXCDATASYNC)
+#define F_RXCDATASYNC V_RXCDATASYNC(1U)
+
+#define S_RXBDATASYNC 21
+#define V_RXBDATASYNC(x) ((x) << S_RXBDATASYNC)
+#define F_RXBDATASYNC V_RXBDATASYNC(1U)
+
+#define S_RXADATASYNC 20
+#define V_RXADATASYNC(x) ((x) << S_RXADATASYNC)
+#define F_RXADATASYNC V_RXADATASYNC(1U)
+
+#define S_RXDEARLYIN 19
+#define V_RXDEARLYIN(x) ((x) << S_RXDEARLYIN)
+#define F_RXDEARLYIN V_RXDEARLYIN(1U)
+
+#define S_RXDLATEIN 18
+#define V_RXDLATEIN(x) ((x) << S_RXDLATEIN)
+#define F_RXDLATEIN V_RXDLATEIN(1U)
+
+#define S_RXDPHSLOCK 17
+#define V_RXDPHSLOCK(x) ((x) << S_RXDPHSLOCK)
+#define F_RXDPHSLOCK V_RXDPHSLOCK(1U)
+
+#define S_RXDPHSDNIN 16
+#define V_RXDPHSDNIN(x) ((x) << S_RXDPHSDNIN)
+#define F_RXDPHSDNIN V_RXDPHSDNIN(1U)
+
+#define S_RXDPHSUPIN 15
+#define V_RXDPHSUPIN(x) ((x) << S_RXDPHSUPIN)
+#define F_RXDPHSUPIN V_RXDPHSUPIN(1U)
+
+#define S_RXCEARLYIN 14
+#define V_RXCEARLYIN(x) ((x) << S_RXCEARLYIN)
+#define F_RXCEARLYIN V_RXCEARLYIN(1U)
+
+#define S_RXCLATEIN 13
+#define V_RXCLATEIN(x) ((x) << S_RXCLATEIN)
+#define F_RXCLATEIN V_RXCLATEIN(1U)
+
+#define S_RXCPHSLOCK 12
+#define V_RXCPHSLOCK(x) ((x) << S_RXCPHSLOCK)
+#define F_RXCPHSLOCK V_RXCPHSLOCK(1U)
+
+#define S_RXCPHSDNIN 11
+#define V_RXCPHSDNIN(x) ((x) << S_RXCPHSDNIN)
+#define F_RXCPHSDNIN V_RXCPHSDNIN(1U)
+
+#define S_RXCPHSUPIN 10
+#define V_RXCPHSUPIN(x) ((x) << S_RXCPHSUPIN)
+#define F_RXCPHSUPIN V_RXCPHSUPIN(1U)
+
+#define S_RXBEARLYIN 9
+#define V_RXBEARLYIN(x) ((x) << S_RXBEARLYIN)
+#define F_RXBEARLYIN V_RXBEARLYIN(1U)
+
+#define S_RXBLATEIN 8
+#define V_RXBLATEIN(x) ((x) << S_RXBLATEIN)
+#define F_RXBLATEIN V_RXBLATEIN(1U)
+
+#define S_RXBPHSLOCK 7
+#define V_RXBPHSLOCK(x) ((x) << S_RXBPHSLOCK)
+#define F_RXBPHSLOCK V_RXBPHSLOCK(1U)
+
+#define S_RXBPHSDNIN 6
+#define V_RXBPHSDNIN(x) ((x) << S_RXBPHSDNIN)
+#define F_RXBPHSDNIN V_RXBPHSDNIN(1U)
+
+#define S_RXBPHSUPIN 5
+#define V_RXBPHSUPIN(x) ((x) << S_RXBPHSUPIN)
+#define F_RXBPHSUPIN V_RXBPHSUPIN(1U)
+
+#define S_RXAEARLYIN 4
+#define V_RXAEARLYIN(x) ((x) << S_RXAEARLYIN)
+#define F_RXAEARLYIN V_RXAEARLYIN(1U)
+
+#define S_RXALATEIN 3
+#define V_RXALATEIN(x) ((x) << S_RXALATEIN)
+#define F_RXALATEIN V_RXALATEIN(1U)
+
+#define S_RXAPHSLOCK 2
+#define V_RXAPHSLOCK(x) ((x) << S_RXAPHSLOCK)
+#define F_RXAPHSLOCK V_RXAPHSLOCK(1U)
+
+#define S_RXAPHSDNIN 1
+#define V_RXAPHSDNIN(x) ((x) << S_RXAPHSDNIN)
+#define F_RXAPHSDNIN V_RXAPHSDNIN(1U)
+
+#define S_RXAPHSUPIN 0
+#define V_RXAPHSUPIN(x) ((x) << S_RXAPHSUPIN)
+#define F_RXAPHSUPIN V_RXAPHSUPIN(1U)
+
+#define A_XGMAC_PORT_HSS_STATUS 0x10ec
+
+#define S_RXDPRBSSYNC 15
+#define V_RXDPRBSSYNC(x) ((x) << S_RXDPRBSSYNC)
+#define F_RXDPRBSSYNC V_RXDPRBSSYNC(1U)
+
+#define S_RXCPRBSSYNC 14
+#define V_RXCPRBSSYNC(x) ((x) << S_RXCPRBSSYNC)
+#define F_RXCPRBSSYNC V_RXCPRBSSYNC(1U)
+
+#define S_RXBPRBSSYNC 13
+#define V_RXBPRBSSYNC(x) ((x) << S_RXBPRBSSYNC)
+#define F_RXBPRBSSYNC V_RXBPRBSSYNC(1U)
+
+#define S_RXAPRBSSYNC 12
+#define V_RXAPRBSSYNC(x) ((x) << S_RXAPRBSSYNC)
+#define F_RXAPRBSSYNC V_RXAPRBSSYNC(1U)
+
+#define S_RXDPRBSERR 11
+#define V_RXDPRBSERR(x) ((x) << S_RXDPRBSERR)
+#define F_RXDPRBSERR V_RXDPRBSERR(1U)
+
+#define S_RXCPRBSERR 10
+#define V_RXCPRBSERR(x) ((x) << S_RXCPRBSERR)
+#define F_RXCPRBSERR V_RXCPRBSERR(1U)
+
+#define S_RXBPRBSERR 9
+#define V_RXBPRBSERR(x) ((x) << S_RXBPRBSERR)
+#define F_RXBPRBSERR V_RXBPRBSERR(1U)
+
+#define S_RXAPRBSERR 8
+#define V_RXAPRBSERR(x) ((x) << S_RXAPRBSERR)
+#define F_RXAPRBSERR V_RXAPRBSERR(1U)
+
+#define S_RXDSIGDET 7
+#define V_RXDSIGDET(x) ((x) << S_RXDSIGDET)
+#define F_RXDSIGDET V_RXDSIGDET(1U)
+
+#define S_RXCSIGDET 6
+#define V_RXCSIGDET(x) ((x) << S_RXCSIGDET)
+#define F_RXCSIGDET V_RXCSIGDET(1U)
+
+#define S_RXBSIGDET 5
+#define V_RXBSIGDET(x) ((x) << S_RXBSIGDET)
+#define F_RXBSIGDET V_RXBSIGDET(1U)
+
+#define S_RXASIGDET 4
+#define V_RXASIGDET(x) ((x) << S_RXASIGDET)
+#define F_RXASIGDET V_RXASIGDET(1U)
+
+#define S_HSSPLLLOCK 1
+#define V_HSSPLLLOCK(x) ((x) << S_HSSPLLLOCK)
+#define F_HSSPLLLOCK V_HSSPLLLOCK(1U)
+
+#define S_HSSPRTREADY 0
+#define V_HSSPRTREADY(x) ((x) << S_HSSPRTREADY)
+#define F_HSSPRTREADY V_HSSPRTREADY(1U)
+
+#define A_XGMAC_PORT_XGM_TX_CTRL 0x1200
+
+#define S_SENDPAUSE 2
+#define V_SENDPAUSE(x) ((x) << S_SENDPAUSE)
+#define F_SENDPAUSE V_SENDPAUSE(1U)
+
+#define S_SENDZEROPAUSE 1
+#define V_SENDZEROPAUSE(x) ((x) << S_SENDZEROPAUSE)
+#define F_SENDZEROPAUSE V_SENDZEROPAUSE(1U)
+
+#define S_XGM_TXEN 0
+#define V_XGM_TXEN(x) ((x) << S_XGM_TXEN)
+#define F_XGM_TXEN V_XGM_TXEN(1U)
+
+#define A_XGMAC_PORT_XGM_TX_CFG 0x1204
+
+#define S_CRCCAL 8
+#define M_CRCCAL 0x3U
+#define V_CRCCAL(x) ((x) << S_CRCCAL)
+#define G_CRCCAL(x) (((x) >> S_CRCCAL) & M_CRCCAL)
+
+#define S_DISDEFIDLECNT 7
+#define V_DISDEFIDLECNT(x) ((x) << S_DISDEFIDLECNT)
+#define F_DISDEFIDLECNT V_DISDEFIDLECNT(1U)
+
+#define S_DECAVGTXIPG 6
+#define V_DECAVGTXIPG(x) ((x) << S_DECAVGTXIPG)
+#define F_DECAVGTXIPG V_DECAVGTXIPG(1U)
+
+#define S_UNIDIRTXEN 5
+#define V_UNIDIRTXEN(x) ((x) << S_UNIDIRTXEN)
+#define F_UNIDIRTXEN V_UNIDIRTXEN(1U)
+
+#define S_CFGCLKSPEED 2
+#define M_CFGCLKSPEED 0x7U
+#define V_CFGCLKSPEED(x) ((x) << S_CFGCLKSPEED)
+#define G_CFGCLKSPEED(x) (((x) >> S_CFGCLKSPEED) & M_CFGCLKSPEED)
+
+#define S_STRETCHMODE 1
+#define V_STRETCHMODE(x) ((x) << S_STRETCHMODE)
+#define F_STRETCHMODE V_STRETCHMODE(1U)
+
+#define S_TXPAUSEEN 0
+#define V_TXPAUSEEN(x) ((x) << S_TXPAUSEEN)
+#define F_TXPAUSEEN V_TXPAUSEEN(1U)
+
+#define A_XGMAC_PORT_XGM_TX_PAUSE_QUANTA 0x1208
+
+#define S_TXPAUSEQUANTA 0
+#define M_TXPAUSEQUANTA 0xffffU
+#define V_TXPAUSEQUANTA(x) ((x) << S_TXPAUSEQUANTA)
+#define G_TXPAUSEQUANTA(x) (((x) >> S_TXPAUSEQUANTA) & M_TXPAUSEQUANTA)
+
+#define A_XGMAC_PORT_XGM_RX_CTRL 0x120c
+#define A_XGMAC_PORT_XGM_RX_CFG 0x1210
+
+#define S_RXCRCCAL 16
+#define M_RXCRCCAL 0x3U
+#define V_RXCRCCAL(x) ((x) << S_RXCRCCAL)
+#define G_RXCRCCAL(x) (((x) >> S_RXCRCCAL) & M_RXCRCCAL)
+
+#define S_STATLOCALFAULT 15
+#define V_STATLOCALFAULT(x) ((x) << S_STATLOCALFAULT)
+#define F_STATLOCALFAULT V_STATLOCALFAULT(1U)
+
+#define S_STATREMOTEFAULT 14
+#define V_STATREMOTEFAULT(x) ((x) << S_STATREMOTEFAULT)
+#define F_STATREMOTEFAULT V_STATREMOTEFAULT(1U)
+
+#define S_LENERRFRAMEDIS 13
+#define V_LENERRFRAMEDIS(x) ((x) << S_LENERRFRAMEDIS)
+#define F_LENERRFRAMEDIS V_LENERRFRAMEDIS(1U)
+
+#define S_CON802_3PREAMBLE 12
+#define V_CON802_3PREAMBLE(x) ((x) << S_CON802_3PREAMBLE)
+#define F_CON802_3PREAMBLE V_CON802_3PREAMBLE(1U)
+
+#define S_ENNON802_3PREAMBLE 11
+#define V_ENNON802_3PREAMBLE(x) ((x) << S_ENNON802_3PREAMBLE)
+#define F_ENNON802_3PREAMBLE V_ENNON802_3PREAMBLE(1U)
+
+#define S_COPYPREAMBLE 10
+#define V_COPYPREAMBLE(x) ((x) << S_COPYPREAMBLE)
+#define F_COPYPREAMBLE V_COPYPREAMBLE(1U)
+
+#define S_DISPAUSEFRAMES 9
+#define V_DISPAUSEFRAMES(x) ((x) << S_DISPAUSEFRAMES)
+#define F_DISPAUSEFRAMES V_DISPAUSEFRAMES(1U)
+
+#define S_EN1536BFRAMES 8
+#define V_EN1536BFRAMES(x) ((x) << S_EN1536BFRAMES)
+#define F_EN1536BFRAMES V_EN1536BFRAMES(1U)
+
+#define S_ENJUMBO 7
+#define V_ENJUMBO(x) ((x) << S_ENJUMBO)
+#define F_ENJUMBO V_ENJUMBO(1U)
+
+#define S_RMFCS 6
+#define V_RMFCS(x) ((x) << S_RMFCS)
+#define F_RMFCS V_RMFCS(1U)
+
+#define S_DISNONVLAN 5
+#define V_DISNONVLAN(x) ((x) << S_DISNONVLAN)
+#define F_DISNONVLAN V_DISNONVLAN(1U)
+
+#define S_ENEXTMATCH 4
+#define V_ENEXTMATCH(x) ((x) << S_ENEXTMATCH)
+#define F_ENEXTMATCH V_ENEXTMATCH(1U)
+
+#define S_ENHASHUCAST 3
+#define V_ENHASHUCAST(x) ((x) << S_ENHASHUCAST)
+#define F_ENHASHUCAST V_ENHASHUCAST(1U)
+
+#define S_ENHASHMCAST 2
+#define V_ENHASHMCAST(x) ((x) << S_ENHASHMCAST)
+#define F_ENHASHMCAST V_ENHASHMCAST(1U)
+
+#define S_DISBCAST 1
+#define V_DISBCAST(x) ((x) << S_DISBCAST)
+#define F_DISBCAST V_DISBCAST(1U)
+
+#define S_COPYALLFRAMES 0
+#define V_COPYALLFRAMES(x) ((x) << S_COPYALLFRAMES)
+#define F_COPYALLFRAMES V_COPYALLFRAMES(1U)
+
+#define A_XGMAC_PORT_XGM_RX_HASH_LOW 0x1214
+#define A_XGMAC_PORT_XGM_RX_HASH_HIGH 0x1218
+#define A_XGMAC_PORT_XGM_RX_EXACT_MATCH_LOW_1 0x121c
+#define A_XGMAC_PORT_XGM_RX_EXACT_MATCH_HIGH_1 0x1220
+
+#define S_ADDRESS_HIGH 0
+#define M_ADDRESS_HIGH 0xffffU
+#define V_ADDRESS_HIGH(x) ((x) << S_ADDRESS_HIGH)
+#define G_ADDRESS_HIGH(x) (((x) >> S_ADDRESS_HIGH) & M_ADDRESS_HIGH)
+
+#define A_XGMAC_PORT_XGM_RX_EXACT_MATCH_LOW_2 0x1224
+#define A_XGMAC_PORT_XGM_RX_EXACT_MATCH_HIGH_2 0x1228
+#define A_XGMAC_PORT_XGM_RX_EXACT_MATCH_LOW_3 0x122c
+#define A_XGMAC_PORT_XGM_RX_EXACT_MATCH_HIGH_3 0x1230
+#define A_XGMAC_PORT_XGM_RX_EXACT_MATCH_LOW_4 0x1234
+#define A_XGMAC_PORT_XGM_RX_EXACT_MATCH_HIGH_4 0x1238
+#define A_XGMAC_PORT_XGM_RX_EXACT_MATCH_LOW_5 0x123c
+#define A_XGMAC_PORT_XGM_RX_EXACT_MATCH_HIGH_5 0x1240
+#define A_XGMAC_PORT_XGM_RX_EXACT_MATCH_LOW_6 0x1244
+#define A_XGMAC_PORT_XGM_RX_EXACT_MATCH_HIGH_6 0x1248
+#define A_XGMAC_PORT_XGM_RX_EXACT_MATCH_LOW_7 0x124c
+#define A_XGMAC_PORT_XGM_RX_EXACT_MATCH_HIGH_7 0x1250
+#define A_XGMAC_PORT_XGM_RX_EXACT_MATCH_LOW_8 0x1254
+#define A_XGMAC_PORT_XGM_RX_EXACT_MATCH_HIGH_8 0x1258
+#define A_XGMAC_PORT_XGM_RX_TYPE_MATCH_1 0x125c
+
+#define S_ENTYPEMATCH 31
+#define V_ENTYPEMATCH(x) ((x) << S_ENTYPEMATCH)
+#define F_ENTYPEMATCH V_ENTYPEMATCH(1U)
+
+#define S_TYPE 0
+#define M_TYPE 0xffffU
+#define V_TYPE(x) ((x) << S_TYPE)
+#define G_TYPE(x) (((x) >> S_TYPE) & M_TYPE)
+
+#define A_XGMAC_PORT_XGM_RX_TYPE_MATCH_2 0x1260
+#define A_XGMAC_PORT_XGM_RX_TYPE_MATCH_3 0x1264
+#define A_XGMAC_PORT_XGM_RX_TYPE_MATCH_4 0x1268
+#define A_XGMAC_PORT_XGM_INT_STATUS 0x126c
+
+#define S_XGMIIEXTINT 10
+#define V_XGMIIEXTINT(x) ((x) << S_XGMIIEXTINT)
+#define F_XGMIIEXTINT V_XGMIIEXTINT(1U)
+
+#define S_LINKFAULTCHANGE 9
+#define V_LINKFAULTCHANGE(x) ((x) << S_LINKFAULTCHANGE)
+#define F_LINKFAULTCHANGE V_LINKFAULTCHANGE(1U)
+
+#define S_PHYFRAMECOMPLETE 8
+#define V_PHYFRAMECOMPLETE(x) ((x) << S_PHYFRAMECOMPLETE)
+#define F_PHYFRAMECOMPLETE V_PHYFRAMECOMPLETE(1U)
+
+#define S_PAUSEFRAMETXMT 7
+#define V_PAUSEFRAMETXMT(x) ((x) << S_PAUSEFRAMETXMT)
+#define F_PAUSEFRAMETXMT V_PAUSEFRAMETXMT(1U)
+
+#define S_PAUSECNTRTIMEOUT 6
+#define V_PAUSECNTRTIMEOUT(x) ((x) << S_PAUSECNTRTIMEOUT)
+#define F_PAUSECNTRTIMEOUT V_PAUSECNTRTIMEOUT(1U)
+
+#define S_NON0PAUSERCVD 5
+#define V_NON0PAUSERCVD(x) ((x) << S_NON0PAUSERCVD)
+#define F_NON0PAUSERCVD V_NON0PAUSERCVD(1U)
+
+#define S_STATOFLOW 4
+#define V_STATOFLOW(x) ((x) << S_STATOFLOW)
+#define F_STATOFLOW V_STATOFLOW(1U)
+
+#define S_TXERRFIFO 3
+#define V_TXERRFIFO(x) ((x) << S_TXERRFIFO)
+#define F_TXERRFIFO V_TXERRFIFO(1U)
+
+#define S_TXUFLOW 2
+#define V_TXUFLOW(x) ((x) << S_TXUFLOW)
+#define F_TXUFLOW V_TXUFLOW(1U)
+
+#define S_FRAMETXMT 1
+#define V_FRAMETXMT(x) ((x) << S_FRAMETXMT)
+#define F_FRAMETXMT V_FRAMETXMT(1U)
+
+#define S_FRAMERCVD 0
+#define V_FRAMERCVD(x) ((x) << S_FRAMERCVD)
+#define F_FRAMERCVD V_FRAMERCVD(1U)
+
+#define A_XGMAC_PORT_XGM_INT_MASK 0x1270
+#define A_XGMAC_PORT_XGM_INT_EN 0x1274
+#define A_XGMAC_PORT_XGM_INT_DISABLE 0x1278
+#define A_XGMAC_PORT_XGM_TX_PAUSE_TIMER 0x127c
+
+#define S_CURPAUSETIMER 0
+#define M_CURPAUSETIMER 0xffffU
+#define V_CURPAUSETIMER(x) ((x) << S_CURPAUSETIMER)
+#define G_CURPAUSETIMER(x) (((x) >> S_CURPAUSETIMER) & M_CURPAUSETIMER)
+
+#define A_XGMAC_PORT_XGM_STAT_CTRL 0x1280
+
+#define S_READSNPSHOT 4
+#define V_READSNPSHOT(x) ((x) << S_READSNPSHOT)
+#define F_READSNPSHOT V_READSNPSHOT(1U)
+
+#define S_TAKESNPSHOT 3
+#define V_TAKESNPSHOT(x) ((x) << S_TAKESNPSHOT)
+#define F_TAKESNPSHOT V_TAKESNPSHOT(1U)
+
+#define S_CLRSTATS 2
+#define V_CLRSTATS(x) ((x) << S_CLRSTATS)
+#define F_CLRSTATS V_CLRSTATS(1U)
+
+#define S_INCRSTATS 1
+#define V_INCRSTATS(x) ((x) << S_INCRSTATS)
+#define F_INCRSTATS V_INCRSTATS(1U)
+
+#define S_ENTESTMODEWR 0
+#define V_ENTESTMODEWR(x) ((x) << S_ENTESTMODEWR)
+#define F_ENTESTMODEWR V_ENTESTMODEWR(1U)
+
+#define A_XGMAC_PORT_XGM_MDIO_CTRL 0x1284
+
+#define S_FRAMETYPE 30
+#define M_FRAMETYPE 0x3U
+#define V_FRAMETYPE(x) ((x) << S_FRAMETYPE)
+#define G_FRAMETYPE(x) (((x) >> S_FRAMETYPE) & M_FRAMETYPE)
+
+#define S_OPERATION 28
+#define M_OPERATION 0x3U
+#define V_OPERATION(x) ((x) << S_OPERATION)
+#define G_OPERATION(x) (((x) >> S_OPERATION) & M_OPERATION)
+
+#define S_PORTADDR 23
+#define M_PORTADDR 0x1fU
+#define V_PORTADDR(x) ((x) << S_PORTADDR)
+#define G_PORTADDR(x) (((x) >> S_PORTADDR) & M_PORTADDR)
+
+#define S_DEVADDR 18
+#define M_DEVADDR 0x1fU
+#define V_DEVADDR(x) ((x) << S_DEVADDR)
+#define G_DEVADDR(x) (((x) >> S_DEVADDR) & M_DEVADDR)
+
+#define S_RESRV 16
+#define M_RESRV 0x3U
+#define V_RESRV(x) ((x) << S_RESRV)
+#define G_RESRV(x) (((x) >> S_RESRV) & M_RESRV)
+
+#define S_DATA 0
+#define M_DATA 0xffffU
+#define V_DATA(x) ((x) << S_DATA)
+#define G_DATA(x) (((x) >> S_DATA) & M_DATA)
+
+#define A_XGMAC_PORT_XGM_MODULE_ID 0x12fc
+
+#define S_MODULEID 16
+#define M_MODULEID 0xffffU
+#define V_MODULEID(x) ((x) << S_MODULEID)
+#define G_MODULEID(x) (((x) >> S_MODULEID) & M_MODULEID)
+
+#define S_MODULEREV 0
+#define M_MODULEREV 0xffffU
+#define V_MODULEREV(x) ((x) << S_MODULEREV)
+#define G_MODULEREV(x) (((x) >> S_MODULEREV) & M_MODULEREV)
+
+#define A_XGMAC_PORT_XGM_STAT_TX_BYTE_LOW 0x1300
+#define A_XGMAC_PORT_XGM_STAT_TX_BYTE_HIGH 0x1304
+
+#define S_TXBYTES_HIGH 0
+#define M_TXBYTES_HIGH 0x1fffU
+#define V_TXBYTES_HIGH(x) ((x) << S_TXBYTES_HIGH)
+#define G_TXBYTES_HIGH(x) (((x) >> S_TXBYTES_HIGH) & M_TXBYTES_HIGH)
+
+#define A_XGMAC_PORT_XGM_STAT_TX_FRAME_LOW 0x1308
+#define A_XGMAC_PORT_XGM_STAT_TX_FRAME_HIGH 0x130c
+
+#define S_TXFRAMES_HIGH 0
+#define M_TXFRAMES_HIGH 0xfU
+#define V_TXFRAMES_HIGH(x) ((x) << S_TXFRAMES_HIGH)
+#define G_TXFRAMES_HIGH(x) (((x) >> S_TXFRAMES_HIGH) & M_TXFRAMES_HIGH)
+
+#define A_XGMAC_PORT_XGM_STAT_TX_BCAST 0x1310
+#define A_XGMAC_PORT_XGM_STAT_TX_MCAST 0x1314
+#define A_XGMAC_PORT_XGM_STAT_TX_PAUSE 0x1318
+#define A_XGMAC_PORT_XGM_STAT_TX_64B_FRAMES 0x131c
+#define A_XGMAC_PORT_XGM_STAT_TX_65_127B_FRAMES 0x1320
+#define A_XGMAC_PORT_XGM_STAT_TX_128_255B_FRAMES 0x1324
+#define A_XGMAC_PORT_XGM_STAT_TX_256_511B_FRAMES 0x1328
+#define A_XGMAC_PORT_XGM_STAT_TX_512_1023B_FRAMES 0x132c
+#define A_XGMAC_PORT_XGM_STAT_TX_1024_1518B_FRAMES 0x1330
+#define A_XGMAC_PORT_XGM_STAT_TX_1519_MAXB_FRAMES 0x1334
+#define A_XGMAC_PORT_XGM_STAT_TX_ERR_FRAMES 0x1338
+#define A_XGMAC_PORT_XGM_STAT_RX_BYTES_LOW 0x133c
+#define A_XGMAC_PORT_XGM_STAT_RX_BYTES_HIGH 0x1340
+
+#define S_RXBYTES_HIGH 0
+#define M_RXBYTES_HIGH 0x1fffU
+#define V_RXBYTES_HIGH(x) ((x) << S_RXBYTES_HIGH)
+#define G_RXBYTES_HIGH(x) (((x) >> S_RXBYTES_HIGH) & M_RXBYTES_HIGH)
+
+#define A_XGMAC_PORT_XGM_STAT_RX_FRAMES_LOW 0x1344
+#define A_XGMAC_PORT_XGM_STAT_RX_FRAMES_HIGH 0x1348
+
+#define S_RXFRAMES_HIGH 0
+#define M_RXFRAMES_HIGH 0xfU
+#define V_RXFRAMES_HIGH(x) ((x) << S_RXFRAMES_HIGH)
+#define G_RXFRAMES_HIGH(x) (((x) >> S_RXFRAMES_HIGH) & M_RXFRAMES_HIGH)
+
+#define A_XGMAC_PORT_XGM_STAT_RX_BCAST_FRAMES 0x134c
+#define A_XGMAC_PORT_XGM_STAT_RX_MCAST_FRAMES 0x1350
+#define A_XGMAC_PORT_XGM_STAT_RX_PAUSE_FRAMES 0x1354
+
+#define S_RXPAUSEFRAMES 0
+#define M_RXPAUSEFRAMES 0xffffU
+#define V_RXPAUSEFRAMES(x) ((x) << S_RXPAUSEFRAMES)
+#define G_RXPAUSEFRAMES(x) (((x) >> S_RXPAUSEFRAMES) & M_RXPAUSEFRAMES)
+
+#define A_XGMAC_PORT_XGM_STAT_RX_64B_FRAMES 0x1358
+#define A_XGMAC_PORT_XGM_STAT_RX_65_127B_FRAMES 0x135c
+#define A_XGMAC_PORT_XGM_STAT_RX_128_255B_FRAMES 0x1360
+#define A_XGMAC_PORT_XGM_STAT_RX_256_511B_FRAMES 0x1364
+#define A_XGMAC_PORT_XGM_STAT_RX_512_1023B_FRAMES 0x1368
+#define A_XGMAC_PORT_XGM_STAT_RX_1024_1518B_FRAMES 0x136c
+#define A_XGMAC_PORT_XGM_STAT_RX_1519_MAXB_FRAMES 0x1370
+#define A_XGMAC_PORT_XGM_STAT_RX_SHORT_FRAMES 0x1374
+
+#define S_RXSHORTFRAMES 0
+#define M_RXSHORTFRAMES 0xffffU
+#define V_RXSHORTFRAMES(x) ((x) << S_RXSHORTFRAMES)
+#define G_RXSHORTFRAMES(x) (((x) >> S_RXSHORTFRAMES) & M_RXSHORTFRAMES)
+
+#define A_XGMAC_PORT_XGM_STAT_RX_OVERSIZE_FRAMES 0x1378
+
+#define S_RXOVERSIZEFRAMES 0
+#define M_RXOVERSIZEFRAMES 0xffffU
+#define V_RXOVERSIZEFRAMES(x) ((x) << S_RXOVERSIZEFRAMES)
+#define G_RXOVERSIZEFRAMES(x) (((x) >> S_RXOVERSIZEFRAMES) & M_RXOVERSIZEFRAMES)
+
+#define A_XGMAC_PORT_XGM_STAT_RX_JABBER_FRAMES 0x137c
+
+#define S_RXJABBERFRAMES 0
+#define M_RXJABBERFRAMES 0xffffU
+#define V_RXJABBERFRAMES(x) ((x) << S_RXJABBERFRAMES)
+#define G_RXJABBERFRAMES(x) (((x) >> S_RXJABBERFRAMES) & M_RXJABBERFRAMES)
+
+#define A_XGMAC_PORT_XGM_STAT_RX_CRC_ERR_FRAMES 0x1380
+
+#define S_RXCRCERRFRAMES 0
+#define M_RXCRCERRFRAMES 0xffffU
+#define V_RXCRCERRFRAMES(x) ((x) << S_RXCRCERRFRAMES)
+#define G_RXCRCERRFRAMES(x) (((x) >> S_RXCRCERRFRAMES) & M_RXCRCERRFRAMES)
+
+#define A_XGMAC_PORT_XGM_STAT_RX_LENGTH_ERR_FRAMES 0x1384
+
+#define S_RXLENGTHERRFRAMES 0
+#define M_RXLENGTHERRFRAMES 0xffffU
+#define V_RXLENGTHERRFRAMES(x) ((x) << S_RXLENGTHERRFRAMES)
+#define G_RXLENGTHERRFRAMES(x) (((x) >> S_RXLENGTHERRFRAMES) & M_RXLENGTHERRFRAMES)
+
+#define A_XGMAC_PORT_XGM_STAT_RX_SYM_CODE_ERR_FRAMES 0x1388
+
+#define S_RXSYMCODEERRFRAMES 0
+#define M_RXSYMCODEERRFRAMES 0xffffU
+#define V_RXSYMCODEERRFRAMES(x) ((x) << S_RXSYMCODEERRFRAMES)
+#define G_RXSYMCODEERRFRAMES(x) (((x) >> S_RXSYMCODEERRFRAMES) & M_RXSYMCODEERRFRAMES)
+
+#define A_XGMAC_PORT_XAUI_CTRL 0x1400
+
+#define S_POLARITY_INV_RX 8
+#define M_POLARITY_INV_RX 0xfU
+#define V_POLARITY_INV_RX(x) ((x) << S_POLARITY_INV_RX)
+#define G_POLARITY_INV_RX(x) (((x) >> S_POLARITY_INV_RX) & M_POLARITY_INV_RX)
+
+#define S_POLARITY_INV_TX 4
+#define M_POLARITY_INV_TX 0xfU
+#define V_POLARITY_INV_TX(x) ((x) << S_POLARITY_INV_TX)
+#define G_POLARITY_INV_TX(x) (((x) >> S_POLARITY_INV_TX) & M_POLARITY_INV_TX)
+
+#define S_TEST_SEL 2
+#define M_TEST_SEL 0x3U
+#define V_TEST_SEL(x) ((x) << S_TEST_SEL)
+#define G_TEST_SEL(x) (((x) >> S_TEST_SEL) & M_TEST_SEL)
+
+#define S_TEST_EN 0
+#define V_TEST_EN(x) ((x) << S_TEST_EN)
+#define F_TEST_EN V_TEST_EN(1U)
+
+#define A_XGMAC_PORT_XAUI_STATUS 0x1404
+
+#define S_DECODE_ERROR 12
+#define M_DECODE_ERROR 0xffU
+#define V_DECODE_ERROR(x) ((x) << S_DECODE_ERROR)
+#define G_DECODE_ERROR(x) (((x) >> S_DECODE_ERROR) & M_DECODE_ERROR)
+
+#define S_LANE3_CTC_STATUS 11
+#define V_LANE3_CTC_STATUS(x) ((x) << S_LANE3_CTC_STATUS)
+#define F_LANE3_CTC_STATUS V_LANE3_CTC_STATUS(1U)
+
+#define S_LANE2_CTC_STATUS 10
+#define V_LANE2_CTC_STATUS(x) ((x) << S_LANE2_CTC_STATUS)
+#define F_LANE2_CTC_STATUS V_LANE2_CTC_STATUS(1U)
+
+#define S_LANE1_CTC_STATUS 9
+#define V_LANE1_CTC_STATUS(x) ((x) << S_LANE1_CTC_STATUS)
+#define F_LANE1_CTC_STATUS V_LANE1_CTC_STATUS(1U)
+
+#define S_LANE0_CTC_STATUS 8
+#define V_LANE0_CTC_STATUS(x) ((x) << S_LANE0_CTC_STATUS)
+#define F_LANE0_CTC_STATUS V_LANE0_CTC_STATUS(1U)
+
+#define S_ALIGN_STATUS 4
+#define V_ALIGN_STATUS(x) ((x) << S_ALIGN_STATUS)
+#define F_ALIGN_STATUS V_ALIGN_STATUS(1U)
+
+#define S_LANE3_SYNC_STATUS 3
+#define V_LANE3_SYNC_STATUS(x) ((x) << S_LANE3_SYNC_STATUS)
+#define F_LANE3_SYNC_STATUS V_LANE3_SYNC_STATUS(1U)
+
+#define S_LANE2_SYNC_STATUS 2
+#define V_LANE2_SYNC_STATUS(x) ((x) << S_LANE2_SYNC_STATUS)
+#define F_LANE2_SYNC_STATUS V_LANE2_SYNC_STATUS(1U)
+
+#define S_LANE1_SYNC_STATUS 1
+#define V_LANE1_SYNC_STATUS(x) ((x) << S_LANE1_SYNC_STATUS)
+#define F_LANE1_SYNC_STATUS V_LANE1_SYNC_STATUS(1U)
+
+#define S_LANE0_SYNC_STATUS 0
+#define V_LANE0_SYNC_STATUS(x) ((x) << S_LANE0_SYNC_STATUS)
+#define F_LANE0_SYNC_STATUS V_LANE0_SYNC_STATUS(1U)
+
+#define A_XGMAC_PORT_PCSR_CTRL 0x1500
+
+#define S_RX_CLK_SPEED 7
+#define V_RX_CLK_SPEED(x) ((x) << S_RX_CLK_SPEED)
+#define F_RX_CLK_SPEED V_RX_CLK_SPEED(1U)
+
+#define S_SCRBYPASS 6
+#define V_SCRBYPASS(x) ((x) << S_SCRBYPASS)
+#define F_SCRBYPASS V_SCRBYPASS(1U)
+
+#define S_FECERRINDEN 5
+#define V_FECERRINDEN(x) ((x) << S_FECERRINDEN)
+#define F_FECERRINDEN V_FECERRINDEN(1U)
+
+#define S_FECEN 4
+#define V_FECEN(x) ((x) << S_FECEN)
+#define F_FECEN V_FECEN(1U)
+
+#define S_TESTSEL 2
+#define M_TESTSEL 0x3U
+#define V_TESTSEL(x) ((x) << S_TESTSEL)
+#define G_TESTSEL(x) (((x) >> S_TESTSEL) & M_TESTSEL)
+
+#define S_SCRLOOPEN 1
+#define V_SCRLOOPEN(x) ((x) << S_SCRLOOPEN)
+#define F_SCRLOOPEN V_SCRLOOPEN(1U)
+
+#define S_XGMIILOOPEN 0
+#define V_XGMIILOOPEN(x) ((x) << S_XGMIILOOPEN)
+#define F_XGMIILOOPEN V_XGMIILOOPEN(1U)
+
+#define A_XGMAC_PORT_PCSR_TXTEST_CTRL 0x1510
+
+#define S_TX_PRBS9_EN 4
+#define V_TX_PRBS9_EN(x) ((x) << S_TX_PRBS9_EN)
+#define F_TX_PRBS9_EN V_TX_PRBS9_EN(1U)
+
+#define S_TX_PRBS31_EN 3
+#define V_TX_PRBS31_EN(x) ((x) << S_TX_PRBS31_EN)
+#define F_TX_PRBS31_EN V_TX_PRBS31_EN(1U)
+
+#define S_TX_TST_DAT_SEL 2
+#define V_TX_TST_DAT_SEL(x) ((x) << S_TX_TST_DAT_SEL)
+#define F_TX_TST_DAT_SEL V_TX_TST_DAT_SEL(1U)
+
+#define S_TX_TST_SEL 1
+#define V_TX_TST_SEL(x) ((x) << S_TX_TST_SEL)
+#define F_TX_TST_SEL V_TX_TST_SEL(1U)
+
+#define S_TX_TST_EN 0
+#define V_TX_TST_EN(x) ((x) << S_TX_TST_EN)
+#define F_TX_TST_EN V_TX_TST_EN(1U)
+
+#define A_XGMAC_PORT_PCSR_TXTEST_SEEDA_LOWER 0x1514
+#define A_XGMAC_PORT_PCSR_TXTEST_SEEDA_UPPER 0x1518
+
+#define S_SEEDA_UPPER 0
+#define M_SEEDA_UPPER 0x3ffffffU
+#define V_SEEDA_UPPER(x) ((x) << S_SEEDA_UPPER)
+#define G_SEEDA_UPPER(x) (((x) >> S_SEEDA_UPPER) & M_SEEDA_UPPER)
+
+#define A_XGMAC_PORT_PCSR_TXTEST_SEEDB_LOWER 0x152c
+#define A_XGMAC_PORT_PCSR_TXTEST_SEEDB_UPPER 0x1530
+
+#define S_SEEDB_UPPER 0
+#define M_SEEDB_UPPER 0x3ffffffU
+#define V_SEEDB_UPPER(x) ((x) << S_SEEDB_UPPER)
+#define G_SEEDB_UPPER(x) (((x) >> S_SEEDB_UPPER) & M_SEEDB_UPPER)
+
+#define A_XGMAC_PORT_PCSR_RXTEST_CTRL 0x153c
+
+#define S_TPTER_CNT_RST 7
+#define V_TPTER_CNT_RST(x) ((x) << S_TPTER_CNT_RST)
+#define F_TPTER_CNT_RST V_TPTER_CNT_RST(1U)
+
+#define S_TEST_CNT_125US 6
+#define V_TEST_CNT_125US(x) ((x) << S_TEST_CNT_125US)
+#define F_TEST_CNT_125US V_TEST_CNT_125US(1U)
+
+#define S_TEST_CNT_PRE 5
+#define V_TEST_CNT_PRE(x) ((x) << S_TEST_CNT_PRE)
+#define F_TEST_CNT_PRE V_TEST_CNT_PRE(1U)
+
+#define S_BER_CNT_RST 4
+#define V_BER_CNT_RST(x) ((x) << S_BER_CNT_RST)
+#define F_BER_CNT_RST V_BER_CNT_RST(1U)
+
+#define S_ERR_BLK_CNT_RST 3
+#define V_ERR_BLK_CNT_RST(x) ((x) << S_ERR_BLK_CNT_RST)
+#define F_ERR_BLK_CNT_RST V_ERR_BLK_CNT_RST(1U)
+
+#define S_RX_PRBS31_EN 2
+#define V_RX_PRBS31_EN(x) ((x) << S_RX_PRBS31_EN)
+#define F_RX_PRBS31_EN V_RX_PRBS31_EN(1U)
+
+#define S_RX_TST_DAT_SEL 1
+#define V_RX_TST_DAT_SEL(x) ((x) << S_RX_TST_DAT_SEL)
+#define F_RX_TST_DAT_SEL V_RX_TST_DAT_SEL(1U)
+
+#define S_RX_TST_EN 0
+#define V_RX_TST_EN(x) ((x) << S_RX_TST_EN)
+#define F_RX_TST_EN V_RX_TST_EN(1U)
+
+#define A_XGMAC_PORT_PCSR_STATUS 0x1550
+
+#define S_ERR_BLK_CNT 16
+#define M_ERR_BLK_CNT 0xffU
+#define V_ERR_BLK_CNT(x) ((x) << S_ERR_BLK_CNT)
+#define G_ERR_BLK_CNT(x) (((x) >> S_ERR_BLK_CNT) & M_ERR_BLK_CNT)
+
+#define S_BER_COUNT 8
+#define M_BER_COUNT 0x3fU
+#define V_BER_COUNT(x) ((x) << S_BER_COUNT)
+#define G_BER_COUNT(x) (((x) >> S_BER_COUNT) & M_BER_COUNT)
+
+#define S_HI_BER 2
+#define V_HI_BER(x) ((x) << S_HI_BER)
+#define F_HI_BER V_HI_BER(1U)
+
+#define S_RX_FAULT 1
+#define V_RX_FAULT(x) ((x) << S_RX_FAULT)
+#define F_RX_FAULT V_RX_FAULT(1U)
+
+#define S_TX_FAULT 0
+#define V_TX_FAULT(x) ((x) << S_TX_FAULT)
+#define F_TX_FAULT V_TX_FAULT(1U)
+
+#define A_XGMAC_PORT_PCSR_TEST_STATUS 0x1554
+
+#define S_TPT_ERR_CNT 0
+#define M_TPT_ERR_CNT 0xffffU
+#define V_TPT_ERR_CNT(x) ((x) << S_TPT_ERR_CNT)
+#define G_TPT_ERR_CNT(x) (((x) >> S_TPT_ERR_CNT) & M_TPT_ERR_CNT)
+
+#define A_XGMAC_PORT_AN_CONTROL 0x1600
+
+#define S_SOFT_RESET 15
+#define V_SOFT_RESET(x) ((x) << S_SOFT_RESET)
+#define F_SOFT_RESET V_SOFT_RESET(1U)
+
+#define S_AN_ENABLE 12
+#define V_AN_ENABLE(x) ((x) << S_AN_ENABLE)
+#define F_AN_ENABLE V_AN_ENABLE(1U)
+
+#define S_RESTART_AN 9
+#define V_RESTART_AN(x) ((x) << S_RESTART_AN)
+#define F_RESTART_AN V_RESTART_AN(1U)
+
+#define A_XGMAC_PORT_AN_STATUS 0x1604
+
+#define S_NONCER_MATCH 31
+#define V_NONCER_MATCH(x) ((x) << S_NONCER_MATCH)
+#define F_NONCER_MATCH V_NONCER_MATCH(1U)
+
+#define S_PARALLEL_DET_FAULT 9
+#define V_PARALLEL_DET_FAULT(x) ((x) << S_PARALLEL_DET_FAULT)
+#define F_PARALLEL_DET_FAULT V_PARALLEL_DET_FAULT(1U)
+
+#define S_PAGE_RECEIVED 6
+#define V_PAGE_RECEIVED(x) ((x) << S_PAGE_RECEIVED)
+#define F_PAGE_RECEIVED V_PAGE_RECEIVED(1U)
+
+#define S_AN_COMPLETE 5
+#define V_AN_COMPLETE(x) ((x) << S_AN_COMPLETE)
+#define F_AN_COMPLETE V_AN_COMPLETE(1U)
+
+#define S_STAT_REMFAULT 4
+#define V_STAT_REMFAULT(x) ((x) << S_STAT_REMFAULT)
+#define F_STAT_REMFAULT V_STAT_REMFAULT(1U)
+
+#define S_AN_ABILITY 3
+#define V_AN_ABILITY(x) ((x) << S_AN_ABILITY)
+#define F_AN_ABILITY V_AN_ABILITY(1U)
+
+#define S_LINK_STATUS 2
+#define V_LINK_STATUS(x) ((x) << S_LINK_STATUS)
+#define F_LINK_STATUS V_LINK_STATUS(1U)
+
+#define S_PARTNER_AN_ABILITY 0
+#define V_PARTNER_AN_ABILITY(x) ((x) << S_PARTNER_AN_ABILITY)
+#define F_PARTNER_AN_ABILITY V_PARTNER_AN_ABILITY(1U)
+
+#define A_XGMAC_PORT_AN_ADVERTISEMENT 0x1608
+
+#define S_FEC_ENABLE 31
+#define V_FEC_ENABLE(x) ((x) << S_FEC_ENABLE)
+#define F_FEC_ENABLE V_FEC_ENABLE(1U)
+
+#define S_FEC_ABILITY 30
+#define V_FEC_ABILITY(x) ((x) << S_FEC_ABILITY)
+#define F_FEC_ABILITY V_FEC_ABILITY(1U)
+
+#define S_10GBASE_KR_CAPABLE 23
+#define V_10GBASE_KR_CAPABLE(x) ((x) << S_10GBASE_KR_CAPABLE)
+#define F_10GBASE_KR_CAPABLE V_10GBASE_KR_CAPABLE(1U)
+
+#define S_10GBASE_KX4_CAPABLE 22
+#define V_10GBASE_KX4_CAPABLE(x) ((x) << S_10GBASE_KX4_CAPABLE)
+#define F_10GBASE_KX4_CAPABLE V_10GBASE_KX4_CAPABLE(1U)
+
+#define S_1000BASE_KX_CAPABLE 21
+#define V_1000BASE_KX_CAPABLE(x) ((x) << S_1000BASE_KX_CAPABLE)
+#define F_1000BASE_KX_CAPABLE V_1000BASE_KX_CAPABLE(1U)
+
+#define S_TRANSMITTED_NONCE 16
+#define M_TRANSMITTED_NONCE 0x1fU
+#define V_TRANSMITTED_NONCE(x) ((x) << S_TRANSMITTED_NONCE)
+#define G_TRANSMITTED_NONCE(x) (((x) >> S_TRANSMITTED_NONCE) & M_TRANSMITTED_NONCE)
+
+#define S_NP 15
+#define V_NP(x) ((x) << S_NP)
+#define F_NP V_NP(1U)
+
+#define S_ACK 14
+#define V_ACK(x) ((x) << S_ACK)
+#define F_ACK V_ACK(1U)
+
+#define S_REMOTE_FAULT 13
+#define V_REMOTE_FAULT(x) ((x) << S_REMOTE_FAULT)
+#define F_REMOTE_FAULT V_REMOTE_FAULT(1U)
+
+#define S_ASM_DIR 11
+#define V_ASM_DIR(x) ((x) << S_ASM_DIR)
+#define F_ASM_DIR V_ASM_DIR(1U)
+
+#define S_PAUSE 10
+#define V_PAUSE(x) ((x) << S_PAUSE)
+#define F_PAUSE V_PAUSE(1U)
+
+#define S_ECHOED_NONCE 5
+#define M_ECHOED_NONCE 0x1fU
+#define V_ECHOED_NONCE(x) ((x) << S_ECHOED_NONCE)
+#define G_ECHOED_NONCE(x) (((x) >> S_ECHOED_NONCE) & M_ECHOED_NONCE)
+
+#define A_XGMAC_PORT_AN_LINK_PARTNER_ABILITY 0x160c
+
+#define S_SELECTOR_FIELD 0
+#define M_SELECTOR_FIELD 0x1fU
+#define V_SELECTOR_FIELD(x) ((x) << S_SELECTOR_FIELD)
+#define G_SELECTOR_FIELD(x) (((x) >> S_SELECTOR_FIELD) & M_SELECTOR_FIELD)
+
+#define A_XGMAC_PORT_AN_NP_LOWER_TRANSMIT 0x1610
+
+#define S_NP_INFO 16
+#define M_NP_INFO 0xffffU
+#define V_NP_INFO(x) ((x) << S_NP_INFO)
+#define G_NP_INFO(x) (((x) >> S_NP_INFO) & M_NP_INFO)
+
+#define S_NP_INDICATION 15
+#define V_NP_INDICATION(x) ((x) << S_NP_INDICATION)
+#define F_NP_INDICATION V_NP_INDICATION(1U)
+
+#define S_MESSAGE_PAGE 13
+#define V_MESSAGE_PAGE(x) ((x) << S_MESSAGE_PAGE)
+#define F_MESSAGE_PAGE V_MESSAGE_PAGE(1U)
+
+#define S_ACK_2 12
+#define V_ACK_2(x) ((x) << S_ACK_2)
+#define F_ACK_2 V_ACK_2(1U)
+
+#define S_TOGGLE 11
+#define V_TOGGLE(x) ((x) << S_TOGGLE)
+#define F_TOGGLE V_TOGGLE(1U)
+
+#define A_XGMAC_PORT_AN_NP_UPPER_TRANSMIT 0x1614
+
+#define S_NP_INFO_HI 0
+#define M_NP_INFO_HI 0xffffU
+#define V_NP_INFO_HI(x) ((x) << S_NP_INFO_HI)
+#define G_NP_INFO_HI(x) (((x) >> S_NP_INFO_HI) & M_NP_INFO_HI)
+
+#define A_XGMAC_PORT_AN_LP_NP_LOWER 0x1618
+#define A_XGMAC_PORT_AN_LP_NP_UPPER 0x161c
+#define A_XGMAC_PORT_AN_BACKPLANE_ETHERNET_STATUS 0x1624
+
+#define S_TX_PAUSE_OKAY 6
+#define V_TX_PAUSE_OKAY(x) ((x) << S_TX_PAUSE_OKAY)
+#define F_TX_PAUSE_OKAY V_TX_PAUSE_OKAY(1U)
+
+#define S_RX_PAUSE_OKAY 5
+#define V_RX_PAUSE_OKAY(x) ((x) << S_RX_PAUSE_OKAY)
+#define F_RX_PAUSE_OKAY V_RX_PAUSE_OKAY(1U)
+
+#define S_10GBASE_KR_FEC_NEG 4
+#define V_10GBASE_KR_FEC_NEG(x) ((x) << S_10GBASE_KR_FEC_NEG)
+#define F_10GBASE_KR_FEC_NEG V_10GBASE_KR_FEC_NEG(1U)
+
+#define S_10GBASE_KR_NEG 3
+#define V_10GBASE_KR_NEG(x) ((x) << S_10GBASE_KR_NEG)
+#define F_10GBASE_KR_NEG V_10GBASE_KR_NEG(1U)
+
+#define S_10GBASE_KX4_NEG 2
+#define V_10GBASE_KX4_NEG(x) ((x) << S_10GBASE_KX4_NEG)
+#define F_10GBASE_KX4_NEG V_10GBASE_KX4_NEG(1U)
+
+#define S_1000BASE_KX_NEG 1
+#define V_1000BASE_KX_NEG(x) ((x) << S_1000BASE_KX_NEG)
+#define F_1000BASE_KX_NEG V_1000BASE_KX_NEG(1U)
+
+#define S_BP_AN_ABILITY 0
+#define V_BP_AN_ABILITY(x) ((x) << S_BP_AN_ABILITY)
+#define F_BP_AN_ABILITY V_BP_AN_ABILITY(1U)
+
+#define A_XGMAC_PORT_AN_TX_NONCE_CONTROL 0x1628
+
+#define S_BYPASS_LFSR 15
+#define V_BYPASS_LFSR(x) ((x) << S_BYPASS_LFSR)
+#define F_BYPASS_LFSR V_BYPASS_LFSR(1U)
+
+#define S_LFSR_INIT 0
+#define M_LFSR_INIT 0x7fffU
+#define V_LFSR_INIT(x) ((x) << S_LFSR_INIT)
+#define G_LFSR_INIT(x) (((x) >> S_LFSR_INIT) & M_LFSR_INIT)
+
+#define A_XGMAC_PORT_AN_INTERRUPT_STATUS 0x162c
+
+#define S_NP_FROM_LP 3
+#define V_NP_FROM_LP(x) ((x) << S_NP_FROM_LP)
+#define F_NP_FROM_LP V_NP_FROM_LP(1U)
+
+#define S_PARALLELDETFAULTINT 2
+#define V_PARALLELDETFAULTINT(x) ((x) << S_PARALLELDETFAULTINT)
+#define F_PARALLELDETFAULTINT V_PARALLELDETFAULTINT(1U)
+
+#define S_BP_FROM_LP 1
+#define V_BP_FROM_LP(x) ((x) << S_BP_FROM_LP)
+#define F_BP_FROM_LP V_BP_FROM_LP(1U)
+
+#define S_PCS_AN_COMPLETE 0
+#define V_PCS_AN_COMPLETE(x) ((x) << S_PCS_AN_COMPLETE)
+#define F_PCS_AN_COMPLETE V_PCS_AN_COMPLETE(1U)
+
+#define A_XGMAC_PORT_AN_GENERIC_TIMER_TIMEOUT 0x1630
+
+#define S_GENERIC_TIMEOUT 0
+#define M_GENERIC_TIMEOUT 0x7fffffU
+#define V_GENERIC_TIMEOUT(x) ((x) << S_GENERIC_TIMEOUT)
+#define G_GENERIC_TIMEOUT(x) (((x) >> S_GENERIC_TIMEOUT) & M_GENERIC_TIMEOUT)
+
+#define A_XGMAC_PORT_AN_BREAK_LINK_TIMEOUT 0x1634
+
+#define S_BREAK_LINK_TIMEOUT 0
+#define M_BREAK_LINK_TIMEOUT 0xffffffU
+#define V_BREAK_LINK_TIMEOUT(x) ((x) << S_BREAK_LINK_TIMEOUT)
+#define G_BREAK_LINK_TIMEOUT(x) (((x) >> S_BREAK_LINK_TIMEOUT) & M_BREAK_LINK_TIMEOUT)
+
+#define A_XGMAC_PORT_AN_MODULE_ID 0x163c
+
+#define S_MODULE_ID 16
+#define M_MODULE_ID 0xffffU
+#define V_MODULE_ID(x) ((x) << S_MODULE_ID)
+#define G_MODULE_ID(x) (((x) >> S_MODULE_ID) & M_MODULE_ID)
+
+#define S_MODULE_REVISION 0
+#define M_MODULE_REVISION 0xffffU
+#define V_MODULE_REVISION(x) ((x) << S_MODULE_REVISION)
+#define G_MODULE_REVISION(x) (((x) >> S_MODULE_REVISION) & M_MODULE_REVISION)
+
+#define A_XGMAC_PORT_AE_RX_COEF_REQ 0x1700
+
+#define S_RXREQ_CPRE 13
+#define V_RXREQ_CPRE(x) ((x) << S_RXREQ_CPRE)
+#define F_RXREQ_CPRE V_RXREQ_CPRE(1U)
+
+#define S_RXREQ_CINIT 12
+#define V_RXREQ_CINIT(x) ((x) << S_RXREQ_CINIT)
+#define F_RXREQ_CINIT V_RXREQ_CINIT(1U)
+
+#define S_RXREQ_C0 4
+#define M_RXREQ_C0 0x3U
+#define V_RXREQ_C0(x) ((x) << S_RXREQ_C0)
+#define G_RXREQ_C0(x) (((x) >> S_RXREQ_C0) & M_RXREQ_C0)
+
+#define S_RXREQ_C1 2
+#define M_RXREQ_C1 0x3U
+#define V_RXREQ_C1(x) ((x) << S_RXREQ_C1)
+#define G_RXREQ_C1(x) (((x) >> S_RXREQ_C1) & M_RXREQ_C1)
+
+#define S_RXREQ_C2 0
+#define M_RXREQ_C2 0x3U
+#define V_RXREQ_C2(x) ((x) << S_RXREQ_C2)
+#define G_RXREQ_C2(x) (((x) >> S_RXREQ_C2) & M_RXREQ_C2)
+
+#define A_XGMAC_PORT_AE_RX_COEF_STAT 0x1704
+
+#define S_RXSTAT_RDY 15
+#define V_RXSTAT_RDY(x) ((x) << S_RXSTAT_RDY)
+#define F_RXSTAT_RDY V_RXSTAT_RDY(1U)
+
+#define S_RXSTAT_C0 4
+#define M_RXSTAT_C0 0x3U
+#define V_RXSTAT_C0(x) ((x) << S_RXSTAT_C0)
+#define G_RXSTAT_C0(x) (((x) >> S_RXSTAT_C0) & M_RXSTAT_C0)
+
+#define S_RXSTAT_C1 2
+#define M_RXSTAT_C1 0x3U
+#define V_RXSTAT_C1(x) ((x) << S_RXSTAT_C1)
+#define G_RXSTAT_C1(x) (((x) >> S_RXSTAT_C1) & M_RXSTAT_C1)
+
+#define S_RXSTAT_C2 0
+#define M_RXSTAT_C2 0x3U
+#define V_RXSTAT_C2(x) ((x) << S_RXSTAT_C2)
+#define G_RXSTAT_C2(x) (((x) >> S_RXSTAT_C2) & M_RXSTAT_C2)
+
+#define A_XGMAC_PORT_AE_TX_COEF_REQ 0x1708
+
+#define S_TXREQ_CPRE 13
+#define V_TXREQ_CPRE(x) ((x) << S_TXREQ_CPRE)
+#define F_TXREQ_CPRE V_TXREQ_CPRE(1U)
+
+#define S_TXREQ_CINIT 12
+#define V_TXREQ_CINIT(x) ((x) << S_TXREQ_CINIT)
+#define F_TXREQ_CINIT V_TXREQ_CINIT(1U)
+
+#define S_TXREQ_C0 4
+#define M_TXREQ_C0 0x3U
+#define V_TXREQ_C0(x) ((x) << S_TXREQ_C0)
+#define G_TXREQ_C0(x) (((x) >> S_TXREQ_C0) & M_TXREQ_C0)
+
+#define S_TXREQ_C1 2
+#define M_TXREQ_C1 0x3U
+#define V_TXREQ_C1(x) ((x) << S_TXREQ_C1)
+#define G_TXREQ_C1(x) (((x) >> S_TXREQ_C1) & M_TXREQ_C1)
+
+#define S_TXREQ_C2 0
+#define M_TXREQ_C2 0x3U
+#define V_TXREQ_C2(x) ((x) << S_TXREQ_C2)
+#define G_TXREQ_C2(x) (((x) >> S_TXREQ_C2) & M_TXREQ_C2)
+
+#define A_XGMAC_PORT_AE_TX_COEF_STAT 0x170c
+
+#define S_TXSTAT_RDY 15
+#define V_TXSTAT_RDY(x) ((x) << S_TXSTAT_RDY)
+#define F_TXSTAT_RDY V_TXSTAT_RDY(1U)
+
+#define S_TXSTAT_C0 4
+#define M_TXSTAT_C0 0x3U
+#define V_TXSTAT_C0(x) ((x) << S_TXSTAT_C0)
+#define G_TXSTAT_C0(x) (((x) >> S_TXSTAT_C0) & M_TXSTAT_C0)
+
+#define S_TXSTAT_C1 2
+#define M_TXSTAT_C1 0x3U
+#define V_TXSTAT_C1(x) ((x) << S_TXSTAT_C1)
+#define G_TXSTAT_C1(x) (((x) >> S_TXSTAT_C1) & M_TXSTAT_C1)
+
+#define S_TXSTAT_C2 0
+#define M_TXSTAT_C2 0x3U
+#define V_TXSTAT_C2(x) ((x) << S_TXSTAT_C2)
+#define G_TXSTAT_C2(x) (((x) >> S_TXSTAT_C2) & M_TXSTAT_C2)
+
+#define A_XGMAC_PORT_AE_REG_MODE 0x1710
+
+#define S_MAN_DEC 4
+#define M_MAN_DEC 0x3U
+#define V_MAN_DEC(x) ((x) << S_MAN_DEC)
+#define G_MAN_DEC(x) (((x) >> S_MAN_DEC) & M_MAN_DEC)
+
+#define S_MANUAL_RDY 3
+#define V_MANUAL_RDY(x) ((x) << S_MANUAL_RDY)
+#define F_MANUAL_RDY V_MANUAL_RDY(1U)
+
+#define S_MWT_DISABLE 2
+#define V_MWT_DISABLE(x) ((x) << S_MWT_DISABLE)
+#define F_MWT_DISABLE V_MWT_DISABLE(1U)
+
+#define S_MDIO_OVR 1
+#define V_MDIO_OVR(x) ((x) << S_MDIO_OVR)
+#define F_MDIO_OVR V_MDIO_OVR(1U)
+
+#define S_STICKY_MODE 0
+#define V_STICKY_MODE(x) ((x) << S_STICKY_MODE)
+#define F_STICKY_MODE V_STICKY_MODE(1U)
+
+#define A_XGMAC_PORT_AE_PRBS_CTL 0x1714
+
+#define S_PRBS_CHK_ERRCNT 8
+#define M_PRBS_CHK_ERRCNT 0xffU
+#define V_PRBS_CHK_ERRCNT(x) ((x) << S_PRBS_CHK_ERRCNT)
+#define G_PRBS_CHK_ERRCNT(x) (((x) >> S_PRBS_CHK_ERRCNT) & M_PRBS_CHK_ERRCNT)
+
+#define S_PRBS_SYNCCNT 5
+#define M_PRBS_SYNCCNT 0x7U
+#define V_PRBS_SYNCCNT(x) ((x) << S_PRBS_SYNCCNT)
+#define G_PRBS_SYNCCNT(x) (((x) >> S_PRBS_SYNCCNT) & M_PRBS_SYNCCNT)
+
+#define S_PRBS_CHK_SYNC 4
+#define V_PRBS_CHK_SYNC(x) ((x) << S_PRBS_CHK_SYNC)
+#define F_PRBS_CHK_SYNC V_PRBS_CHK_SYNC(1U)
+
+#define S_PRBS_CHK_RST 3
+#define V_PRBS_CHK_RST(x) ((x) << S_PRBS_CHK_RST)
+#define F_PRBS_CHK_RST V_PRBS_CHK_RST(1U)
+
+#define S_PRBS_CHK_OFF 2
+#define V_PRBS_CHK_OFF(x) ((x) << S_PRBS_CHK_OFF)
+#define F_PRBS_CHK_OFF V_PRBS_CHK_OFF(1U)
+
+#define S_PRBS_GEN_FRCERR 1
+#define V_PRBS_GEN_FRCERR(x) ((x) << S_PRBS_GEN_FRCERR)
+#define F_PRBS_GEN_FRCERR V_PRBS_GEN_FRCERR(1U)
+
+#define S_PRBS_GEN_OFF 0
+#define V_PRBS_GEN_OFF(x) ((x) << S_PRBS_GEN_OFF)
+#define F_PRBS_GEN_OFF V_PRBS_GEN_OFF(1U)
+
+#define A_XGMAC_PORT_AE_FSM_CTL 0x1718
+
+#define S_FSM_TR_LCL 14
+#define V_FSM_TR_LCL(x) ((x) << S_FSM_TR_LCL)
+#define F_FSM_TR_LCL V_FSM_TR_LCL(1U)
+
+#define S_FSM_GDMRK 11
+#define M_FSM_GDMRK 0x7U
+#define V_FSM_GDMRK(x) ((x) << S_FSM_GDMRK)
+#define G_FSM_GDMRK(x) (((x) >> S_FSM_GDMRK) & M_FSM_GDMRK)
+
+#define S_FSM_BADMRK 8
+#define M_FSM_BADMRK 0x7U
+#define V_FSM_BADMRK(x) ((x) << S_FSM_BADMRK)
+#define G_FSM_BADMRK(x) (((x) >> S_FSM_BADMRK) & M_FSM_BADMRK)
+
+#define S_FSM_TR_FAIL 7
+#define V_FSM_TR_FAIL(x) ((x) << S_FSM_TR_FAIL)
+#define F_FSM_TR_FAIL V_FSM_TR_FAIL(1U)
+
+#define S_FSM_TR_ACT 6
+#define V_FSM_TR_ACT(x) ((x) << S_FSM_TR_ACT)
+#define F_FSM_TR_ACT V_FSM_TR_ACT(1U)
+
+#define S_FSM_FRM_LCK 5
+#define V_FSM_FRM_LCK(x) ((x) << S_FSM_FRM_LCK)
+#define F_FSM_FRM_LCK V_FSM_FRM_LCK(1U)
+
+#define S_FSM_TR_COMP 4
+#define V_FSM_TR_COMP(x) ((x) << S_FSM_TR_COMP)
+#define F_FSM_TR_COMP V_FSM_TR_COMP(1U)
+
+#define S_MC_RX_RDY 3
+#define V_MC_RX_RDY(x) ((x) << S_MC_RX_RDY)
+#define F_MC_RX_RDY V_MC_RX_RDY(1U)
+
+#define S_FSM_CU_DIS 2
+#define V_FSM_CU_DIS(x) ((x) << S_FSM_CU_DIS)
+#define F_FSM_CU_DIS V_FSM_CU_DIS(1U)
+
+#define S_FSM_TR_RST 1
+#define V_FSM_TR_RST(x) ((x) << S_FSM_TR_RST)
+#define F_FSM_TR_RST V_FSM_TR_RST(1U)
+
+#define S_FSM_TR_EN 0
+#define V_FSM_TR_EN(x) ((x) << S_FSM_TR_EN)
+#define F_FSM_TR_EN V_FSM_TR_EN(1U)
+
+#define A_XGMAC_PORT_AE_FSM_STATE 0x171c
+
+#define S_CC2FSM_STATE 13
+#define M_CC2FSM_STATE 0x7U
+#define V_CC2FSM_STATE(x) ((x) << S_CC2FSM_STATE)
+#define G_CC2FSM_STATE(x) (((x) >> S_CC2FSM_STATE) & M_CC2FSM_STATE)
+
+#define S_CC1FSM_STATE 10
+#define M_CC1FSM_STATE 0x7U
+#define V_CC1FSM_STATE(x) ((x) << S_CC1FSM_STATE)
+#define G_CC1FSM_STATE(x) (((x) >> S_CC1FSM_STATE) & M_CC1FSM_STATE)
+
+#define S_CC0FSM_STATE 7
+#define M_CC0FSM_STATE 0x7U
+#define V_CC0FSM_STATE(x) ((x) << S_CC0FSM_STATE)
+#define G_CC0FSM_STATE(x) (((x) >> S_CC0FSM_STATE) & M_CC0FSM_STATE)
+
+#define S_FLFSM_STATE 4
+#define M_FLFSM_STATE 0x7U
+#define V_FLFSM_STATE(x) ((x) << S_FLFSM_STATE)
+#define G_FLFSM_STATE(x) (((x) >> S_FLFSM_STATE) & M_FLFSM_STATE)
+
+#define S_TFSM_STATE 0
+#define M_TFSM_STATE 0x7U
+#define V_TFSM_STATE(x) ((x) << S_TFSM_STATE)
+#define G_TFSM_STATE(x) (((x) >> S_TFSM_STATE) & M_TFSM_STATE)
+
+#define A_XGMAC_PORT_AE_TX_DIS 0x1780
+
+#define S_PMD_TX_DIS 0
+#define V_PMD_TX_DIS(x) ((x) << S_PMD_TX_DIS)
+#define F_PMD_TX_DIS V_PMD_TX_DIS(1U)
+
+#define A_XGMAC_PORT_AE_KR_CTRL 0x1784
+
+#define S_TRAINING_ENABLE 1
+#define V_TRAINING_ENABLE(x) ((x) << S_TRAINING_ENABLE)
+#define F_TRAINING_ENABLE V_TRAINING_ENABLE(1U)
+
+#define S_RESTART_TRAINING 0
+#define V_RESTART_TRAINING(x) ((x) << S_RESTART_TRAINING)
+#define F_RESTART_TRAINING V_RESTART_TRAINING(1U)
+
+#define A_XGMAC_PORT_AE_RX_SIGDET 0x1788
+
+#define S_PMD_SIGDET 0
+#define V_PMD_SIGDET(x) ((x) << S_PMD_SIGDET)
+#define F_PMD_SIGDET V_PMD_SIGDET(1U)
+
+#define A_XGMAC_PORT_AE_KR_STATUS 0x178c
+
+#define S_TRAINING_FAILURE 3
+#define V_TRAINING_FAILURE(x) ((x) << S_TRAINING_FAILURE)
+#define F_TRAINING_FAILURE V_TRAINING_FAILURE(1U)
+
+#define S_TRAINING 2
+#define V_TRAINING(x) ((x) << S_TRAINING)
+#define F_TRAINING V_TRAINING(1U)
+
+#define S_FRAME_LOCK 1
+#define V_FRAME_LOCK(x) ((x) << S_FRAME_LOCK)
+#define F_FRAME_LOCK V_FRAME_LOCK(1U)
+
+#define S_RX_TRAINED 0
+#define V_RX_TRAINED(x) ((x) << S_RX_TRAINED)
+#define F_RX_TRAINED V_RX_TRAINED(1U)
+
+#define A_XGMAC_PORT_HSS_TXA_MODE_CFG 0x1800
+
+#define S_BWSEL 2
+#define M_BWSEL 0x3U
+#define V_BWSEL(x) ((x) << S_BWSEL)
+#define G_BWSEL(x) (((x) >> S_BWSEL) & M_BWSEL)
+
+#define S_RTSEL 0
+#define M_RTSEL 0x3U
+#define V_RTSEL(x) ((x) << S_RTSEL)
+#define G_RTSEL(x) (((x) >> S_RTSEL) & M_RTSEL)
+
+#define A_XGMAC_PORT_HSS_TXA_TEST_CTRL 0x1804
+
+#define S_TWDP 5
+#define V_TWDP(x) ((x) << S_TWDP)
+#define F_TWDP V_TWDP(1U)
+
+#define S_TPGRST 4
+#define V_TPGRST(x) ((x) << S_TPGRST)
+#define F_TPGRST V_TPGRST(1U)
+
+#define S_TPGEN 3
+#define V_TPGEN(x) ((x) << S_TPGEN)
+#define F_TPGEN V_TPGEN(1U)
+
+#define S_TPSEL 0
+#define M_TPSEL 0x7U
+#define V_TPSEL(x) ((x) << S_TPSEL)
+#define G_TPSEL(x) (((x) >> S_TPSEL) & M_TPSEL)
+
+#define A_XGMAC_PORT_HSS_TXA_COEFF_CTRL 0x1808
+
+#define S_AEINVPOL 6
+#define V_AEINVPOL(x) ((x) << S_AEINVPOL)
+#define F_AEINVPOL V_AEINVPOL(1U)
+
+#define S_AESOURCE 5
+#define V_AESOURCE(x) ((x) << S_AESOURCE)
+#define F_AESOURCE V_AESOURCE(1U)
+
+#define S_EQMODE 4
+#define V_EQMODE(x) ((x) << S_EQMODE)
+#define F_EQMODE V_EQMODE(1U)
+
+#define S_OCOEF 3
+#define V_OCOEF(x) ((x) << S_OCOEF)
+#define F_OCOEF V_OCOEF(1U)
+
+#define S_COEFRST 2
+#define V_COEFRST(x) ((x) << S_COEFRST)
+#define F_COEFRST V_COEFRST(1U)
+
+#define S_SPEN 1
+#define V_SPEN(x) ((x) << S_SPEN)
+#define F_SPEN V_SPEN(1U)
+
+#define S_ALOAD 0
+#define V_ALOAD(x) ((x) << S_ALOAD)
+#define F_ALOAD V_ALOAD(1U)
+
+#define A_XGMAC_PORT_HSS_TXA_DRIVER_MODE 0x180c
+
+#define S_DRVOFFT 5
+#define V_DRVOFFT(x) ((x) << S_DRVOFFT)
+#define F_DRVOFFT V_DRVOFFT(1U)
+
+#define S_SLEW 2
+#define M_SLEW 0x7U
+#define V_SLEW(x) ((x) << S_SLEW)
+#define G_SLEW(x) (((x) >> S_SLEW) & M_SLEW)
+
+#define S_FFE 0
+#define M_FFE 0x3U
+#define V_FFE(x) ((x) << S_FFE)
+#define G_FFE(x) (((x) >> S_FFE) & M_FFE)
+
+#define A_XGMAC_PORT_HSS_TXA_DRIVER_OVR_CTRL 0x1810
+
+#define S_VLINC 7
+#define V_VLINC(x) ((x) << S_VLINC)
+#define F_VLINC V_VLINC(1U)
+
+#define S_VLDEC 6
+#define V_VLDEC(x) ((x) << S_VLDEC)
+#define F_VLDEC V_VLDEC(1U)
+
+#define S_LOPWR 5
+#define V_LOPWR(x) ((x) << S_LOPWR)
+#define F_LOPWR V_LOPWR(1U)
+
+#define S_TDMEN 4
+#define V_TDMEN(x) ((x) << S_TDMEN)
+#define F_TDMEN V_TDMEN(1U)
+
+#define S_DCCEN 3
+#define V_DCCEN(x) ((x) << S_DCCEN)
+#define F_DCCEN V_DCCEN(1U)
+
+#define S_VHSEL 2
+#define V_VHSEL(x) ((x) << S_VHSEL)
+#define F_VHSEL V_VHSEL(1U)
+
+#define S_IDAC 0
+#define M_IDAC 0x3U
+#define V_IDAC(x) ((x) << S_IDAC)
+#define G_IDAC(x) (((x) >> S_IDAC) & M_IDAC)
+
+#define A_XGMAC_PORT_HSS_TXA_TDM_BIASGEN_STANDBY_TIMER 0x1814
+
+#define S_STBY 0
+#define M_STBY 0xffffU
+#define V_STBY(x) ((x) << S_STBY)
+#define G_STBY(x) (((x) >> S_STBY) & M_STBY)
+
+#define A_XGMAC_PORT_HSS_TXA_TDM_BIASGEN_PWRON_TIMER 0x1818
+
+#define S_PON 0
+#define M_PON 0xffffU
+#define V_PON(x) ((x) << S_PON)
+#define G_PON(x) (((x) >> S_PON) & M_PON)
+
+#define A_XGMAC_PORT_HSS_TXA_TAP0_COEFF 0x1820
+
+#define S_NXTT0 0
+#define M_NXTT0 0xfU
+#define V_NXTT0(x) ((x) << S_NXTT0)
+#define G_NXTT0(x) (((x) >> S_NXTT0) & M_NXTT0)
+
+#define A_XGMAC_PORT_HSS_TXA_TAP1_COEFF 0x1824
+
+#define S_NXTT1 0
+#define M_NXTT1 0x3fU
+#define V_NXTT1(x) ((x) << S_NXTT1)
+#define G_NXTT1(x) (((x) >> S_NXTT1) & M_NXTT1)
+
+#define A_XGMAC_PORT_HSS_TXA_TAP2_COEFF 0x1828
+
+#define S_NXTT2 0
+#define M_NXTT2 0x1fU
+#define V_NXTT2(x) ((x) << S_NXTT2)
+#define G_NXTT2(x) (((x) >> S_NXTT2) & M_NXTT2)
+
+#define A_XGMAC_PORT_HSS_TXA_PWR 0x1830
+
+#define S_TXPWR 0
+#define M_TXPWR 0x7fU
+#define V_TXPWR(x) ((x) << S_TXPWR)
+#define G_TXPWR(x) (((x) >> S_TXPWR) & M_TXPWR)
+
+#define A_XGMAC_PORT_HSS_TXA_POLARITY 0x1834
+
+#define S_TXPOL 4
+#define M_TXPOL 0x7U
+#define V_TXPOL(x) ((x) << S_TXPOL)
+#define G_TXPOL(x) (((x) >> S_TXPOL) & M_TXPOL)
+
+#define S_NTXPOL 0
+#define M_NTXPOL 0x7U
+#define V_NTXPOL(x) ((x) << S_NTXPOL)
+#define G_NTXPOL(x) (((x) >> S_NTXPOL) & M_NTXPOL)
+
+#define A_XGMAC_PORT_HSS_TXA_8023AP_AE_CMD 0x1838
+
+#define S_CXPRESET 13
+#define V_CXPRESET(x) ((x) << S_CXPRESET)
+#define F_CXPRESET V_CXPRESET(1U)
+
+#define S_CXINIT 12
+#define V_CXINIT(x) ((x) << S_CXINIT)
+#define F_CXINIT V_CXINIT(1U)
+
+#define S_C2UPDT 4
+#define M_C2UPDT 0x3U
+#define V_C2UPDT(x) ((x) << S_C2UPDT)
+#define G_C2UPDT(x) (((x) >> S_C2UPDT) & M_C2UPDT)
+
+#define S_C1UPDT 2
+#define M_C1UPDT 0x3U
+#define V_C1UPDT(x) ((x) << S_C1UPDT)
+#define G_C1UPDT(x) (((x) >> S_C1UPDT) & M_C1UPDT)
+
+#define S_C0UPDT 0
+#define M_C0UPDT 0x3U
+#define V_C0UPDT(x) ((x) << S_C0UPDT)
+#define G_C0UPDT(x) (((x) >> S_C0UPDT) & M_C0UPDT)
+
+#define A_XGMAC_PORT_HSS_TXA_8023AP_AE_STATUS 0x183c
+
+#define S_C2STAT 4
+#define M_C2STAT 0x3U
+#define V_C2STAT(x) ((x) << S_C2STAT)
+#define G_C2STAT(x) (((x) >> S_C2STAT) & M_C2STAT)
+
+#define S_C1STAT 2
+#define M_C1STAT 0x3U
+#define V_C1STAT(x) ((x) << S_C1STAT)
+#define G_C1STAT(x) (((x) >> S_C1STAT) & M_C1STAT)
+
+#define S_C0STAT 0
+#define M_C0STAT 0x3U
+#define V_C0STAT(x) ((x) << S_C0STAT)
+#define G_C0STAT(x) (((x) >> S_C0STAT) & M_C0STAT)
+
+#define A_XGMAC_PORT_HSS_TXA_TAP0_IDAC_OVR 0x1840
+
+#define S_NIDAC0 0
+#define M_NIDAC0 0x1fU
+#define V_NIDAC0(x) ((x) << S_NIDAC0)
+#define G_NIDAC0(x) (((x) >> S_NIDAC0) & M_NIDAC0)
+
+#define A_XGMAC_PORT_HSS_TXA_TAP1_IDAC_OVR 0x1844
+
+#define S_NIDAC1 0
+#define M_NIDAC1 0x7fU
+#define V_NIDAC1(x) ((x) << S_NIDAC1)
+#define G_NIDAC1(x) (((x) >> S_NIDAC1) & M_NIDAC1)
+
+#define A_XGMAC_PORT_HSS_TXA_TAP2_IDAC_OVR 0x1848
+
+#define S_NIDAC2 0
+#define M_NIDAC2 0x3fU
+#define V_NIDAC2(x) ((x) << S_NIDAC2)
+#define G_NIDAC2(x) (((x) >> S_NIDAC2) & M_NIDAC2)
+
+#define A_XGMAC_PORT_HSS_TXA_PWR_DAC_OVR 0x1850
+
+#define S_OPEN 7
+#define V_OPEN(x) ((x) << S_OPEN)
+#define F_OPEN V_OPEN(1U)
+
+#define S_OPVAL 0
+#define M_OPVAL 0x1fU
+#define V_OPVAL(x) ((x) << S_OPVAL)
+#define G_OPVAL(x) (((x) >> S_OPVAL) & M_OPVAL)
+
+#define A_XGMAC_PORT_HSS_TXA_PWR_DAC 0x1854
+
+#define S_PDAC 0
+#define M_PDAC 0x1fU
+#define V_PDAC(x) ((x) << S_PDAC)
+#define G_PDAC(x) (((x) >> S_PDAC) & M_PDAC)
+
+#define A_XGMAC_PORT_HSS_TXA_TAP0_IDAC_APP 0x1860
+
+#define S_AIDAC0 0
+#define M_AIDAC0 0x1fU
+#define V_AIDAC0(x) ((x) << S_AIDAC0)
+#define G_AIDAC0(x) (((x) >> S_AIDAC0) & M_AIDAC0)
+
+#define A_XGMAC_PORT_HSS_TXA_TAP1_IDAC_APP 0x1864
+
+#define S_AIDAC1 0
+#define M_AIDAC1 0x1fU
+#define V_AIDAC1(x) ((x) << S_AIDAC1)
+#define G_AIDAC1(x) (((x) >> S_AIDAC1) & M_AIDAC1)
+
+#define A_XGMAC_PORT_HSS_TXA_TAP2_IDAC_APP 0x1868
+
+#define S_TXA_AIDAC2 0
+#define M_TXA_AIDAC2 0x1fU
+#define V_TXA_AIDAC2(x) ((x) << S_TXA_AIDAC2)
+#define G_TXA_AIDAC2(x) (((x) >> S_TXA_AIDAC2) & M_TXA_AIDAC2)
+
+#define A_XGMAC_PORT_HSS_TXA_SEG_DIS_APP 0x1870
+
+#define S_CURSD 0
+#define M_CURSD 0x7fU
+#define V_CURSD(x) ((x) << S_CURSD)
+#define G_CURSD(x) (((x) >> S_CURSD) & M_CURSD)
+
+#define A_XGMAC_PORT_HSS_TXA_EXT_ADDR_DATA 0x1878
+
+#define S_XDATA 0
+#define M_XDATA 0xffffU
+#define V_XDATA(x) ((x) << S_XDATA)
+#define G_XDATA(x) (((x) >> S_XDATA) & M_XDATA)
+
+#define A_XGMAC_PORT_HSS_TXA_EXT_ADDR 0x187c
+
+#define S_EXTADDR 1
+#define M_EXTADDR 0x1fU
+#define V_EXTADDR(x) ((x) << S_EXTADDR)
+#define G_EXTADDR(x) (((x) >> S_EXTADDR) & M_EXTADDR)
+
+#define S_XWR 0
+#define V_XWR(x) ((x) << S_XWR)
+#define F_XWR V_XWR(1U)
+
+#define A_XGMAC_PORT_HSS_TXB_MODE_CFG 0x1880
+#define A_XGMAC_PORT_HSS_TXB_TEST_CTRL 0x1884
+#define A_XGMAC_PORT_HSS_TXB_COEFF_CTRL 0x1888
+#define A_XGMAC_PORT_HSS_TXB_DRIVER_MODE 0x188c
+#define A_XGMAC_PORT_HSS_TXB_DRIVER_OVR_CTRL 0x1890
+#define A_XGMAC_PORT_HSS_TXB_TDM_BIASGEN_STANDBY_TIMER 0x1894
+#define A_XGMAC_PORT_HSS_TXB_TDM_BIASGEN_PWRON_TIMER 0x1898
+#define A_XGMAC_PORT_HSS_TXB_TAP0_COEFF 0x18a0
+#define A_XGMAC_PORT_HSS_TXB_TAP1_COEFF 0x18a4
+#define A_XGMAC_PORT_HSS_TXB_TAP2_COEFF 0x18a8
+#define A_XGMAC_PORT_HSS_TXB_PWR 0x18b0
+#define A_XGMAC_PORT_HSS_TXB_POLARITY 0x18b4
+#define A_XGMAC_PORT_HSS_TXB_8023AP_AE_CMD 0x18b8
+#define A_XGMAC_PORT_HSS_TXB_8023AP_AE_STATUS 0x18bc
+#define A_XGMAC_PORT_HSS_TXB_TAP0_IDAC_OVR 0x18c0
+#define A_XGMAC_PORT_HSS_TXB_TAP1_IDAC_OVR 0x18c4
+#define A_XGMAC_PORT_HSS_TXB_TAP2_IDAC_OVR 0x18c8
+#define A_XGMAC_PORT_HSS_TXB_PWR_DAC_OVR 0x18d0
+#define A_XGMAC_PORT_HSS_TXB_PWR_DAC 0x18d4
+#define A_XGMAC_PORT_HSS_TXB_TAP0_IDAC_APP 0x18e0
+#define A_XGMAC_PORT_HSS_TXB_TAP1_IDAC_APP 0x18e4
+#define A_XGMAC_PORT_HSS_TXB_TAP2_IDAC_APP 0x18e8
+
+#define S_AIDAC2 0
+#define M_AIDAC2 0x3fU
+#define V_AIDAC2(x) ((x) << S_AIDAC2)
+#define G_AIDAC2(x) (((x) >> S_AIDAC2) & M_AIDAC2)
+
+#define A_XGMAC_PORT_HSS_TXB_SEG_DIS_APP 0x18f0
+#define A_XGMAC_PORT_HSS_TXB_EXT_ADDR_DATA 0x18f8
+#define A_XGMAC_PORT_HSS_TXB_EXT_ADDR 0x18fc
+
+#define S_XADDR 2
+#define M_XADDR 0xfU
+#define V_XADDR(x) ((x) << S_XADDR)
+#define G_XADDR(x) (((x) >> S_XADDR) & M_XADDR)
+
+#define A_XGMAC_PORT_HSS_RXA_CFG_MODE 0x1900
+
+#define S_BW810 8
+#define V_BW810(x) ((x) << S_BW810)
+#define F_BW810 V_BW810(1U)
+
+#define S_AUXCLK 7
+#define V_AUXCLK(x) ((x) << S_AUXCLK)
+#define F_AUXCLK V_AUXCLK(1U)
+
+#define S_DMSEL 4
+#define M_DMSEL 0x7U
+#define V_DMSEL(x) ((x) << S_DMSEL)
+#define G_DMSEL(x) (((x) >> S_DMSEL) & M_DMSEL)
+
+#define A_XGMAC_PORT_HSS_RXA_TEST_CTRL 0x1904
+
+#define S_RCLKEN 15
+#define V_RCLKEN(x) ((x) << S_RCLKEN)
+#define F_RCLKEN V_RCLKEN(1U)
+
+#define S_RRATE 13
+#define M_RRATE 0x3U
+#define V_RRATE(x) ((x) << S_RRATE)
+#define G_RRATE(x) (((x) >> S_RRATE) & M_RRATE)
+
+#define S_LBFRCERROR 10
+#define V_LBFRCERROR(x) ((x) << S_LBFRCERROR)
+#define F_LBFRCERROR V_LBFRCERROR(1U)
+
+#define S_LBERROR 9
+#define V_LBERROR(x) ((x) << S_LBERROR)
+#define F_LBERROR V_LBERROR(1U)
+
+#define S_LBSYNC 8
+#define V_LBSYNC(x) ((x) << S_LBSYNC)
+#define F_LBSYNC V_LBSYNC(1U)
+
+#define S_FDWRAPCLK 7
+#define V_FDWRAPCLK(x) ((x) << S_FDWRAPCLK)
+#define F_FDWRAPCLK V_FDWRAPCLK(1U)
+
+#define S_FDWRAP 6
+#define V_FDWRAP(x) ((x) << S_FDWRAP)
+#define F_FDWRAP V_FDWRAP(1U)
+
+#define S_PRST 4
+#define V_PRST(x) ((x) << S_PRST)
+#define F_PRST V_PRST(1U)
+
+#define S_PCHKEN 3
+#define V_PCHKEN(x) ((x) << S_PCHKEN)
+#define F_PCHKEN V_PCHKEN(1U)
+
+#define S_PRBSSEL 0
+#define M_PRBSSEL 0x7U
+#define V_PRBSSEL(x) ((x) << S_PRBSSEL)
+#define G_PRBSSEL(x) (((x) >> S_PRBSSEL) & M_PRBSSEL)
+
+#define A_XGMAC_PORT_HSS_RXA_PH_ROTATOR_CTRL 0x1908
+
+#define S_FTHROT 12
+#define M_FTHROT 0xfU
+#define V_FTHROT(x) ((x) << S_FTHROT)
+#define G_FTHROT(x) (((x) >> S_FTHROT) & M_FTHROT)
+
+#define S_RTHROT 11
+#define V_RTHROT(x) ((x) << S_RTHROT)
+#define F_RTHROT V_RTHROT(1U)
+
+#define S_FILTCTL 7
+#define M_FILTCTL 0xfU
+#define V_FILTCTL(x) ((x) << S_FILTCTL)
+#define G_FILTCTL(x) (((x) >> S_FILTCTL) & M_FILTCTL)
+
+#define S_RSRVO 5
+#define M_RSRVO 0x3U
+#define V_RSRVO(x) ((x) << S_RSRVO)
+#define G_RSRVO(x) (((x) >> S_RSRVO) & M_RSRVO)
+
+#define S_EXTEL 4
+#define V_EXTEL(x) ((x) << S_EXTEL)
+#define F_EXTEL V_EXTEL(1U)
+
+#define S_RSTONSTUCK 3
+#define V_RSTONSTUCK(x) ((x) << S_RSTONSTUCK)
+#define F_RSTONSTUCK V_RSTONSTUCK(1U)
+
+#define S_FREEZEFW 2
+#define V_FREEZEFW(x) ((x) << S_FREEZEFW)
+#define F_FREEZEFW V_FREEZEFW(1U)
+
+#define S_RESETFW 1
+#define V_RESETFW(x) ((x) << S_RESETFW)
+#define F_RESETFW V_RESETFW(1U)
+
+#define S_SSCENABLE 0
+#define V_SSCENABLE(x) ((x) << S_SSCENABLE)
+#define F_SSCENABLE V_SSCENABLE(1U)
+
+#define A_XGMAC_PORT_HSS_RXA_PH_ROTATOR_OFFSET_CTRL 0x190c
+
+#define S_RSNP 11
+#define V_RSNP(x) ((x) << S_RSNP)
+#define F_RSNP V_RSNP(1U)
+
+#define S_TSOEN 10
+#define V_TSOEN(x) ((x) << S_TSOEN)
+#define F_TSOEN V_TSOEN(1U)
+
+#define S_OFFEN 9
+#define V_OFFEN(x) ((x) << S_OFFEN)
+#define F_OFFEN V_OFFEN(1U)
+
+#define S_TMSCAL 7
+#define M_TMSCAL 0x3U
+#define V_TMSCAL(x) ((x) << S_TMSCAL)
+#define G_TMSCAL(x) (((x) >> S_TMSCAL) & M_TMSCAL)
+
+#define S_APADJ 6
+#define V_APADJ(x) ((x) << S_APADJ)
+#define F_APADJ V_APADJ(1U)
+
+#define S_RSEL 5
+#define V_RSEL(x) ((x) << S_RSEL)
+#define F_RSEL V_RSEL(1U)
+
+#define S_PHOFFS 0
+#define M_PHOFFS 0x1fU
+#define V_PHOFFS(x) ((x) << S_PHOFFS)
+#define G_PHOFFS(x) (((x) >> S_PHOFFS) & M_PHOFFS)
+
+#define A_XGMAC_PORT_HSS_RXA_PH_ROTATOR_POSITION1 0x1910
+
+#define S_ROT0A 8
+#define M_ROT0A 0x3fU
+#define V_ROT0A(x) ((x) << S_ROT0A)
+#define G_ROT0A(x) (((x) >> S_ROT0A) & M_ROT0A)
+
+#define S_RTSEL_SNAPSHOT 0
+#define M_RTSEL_SNAPSHOT 0x3fU
+#define V_RTSEL_SNAPSHOT(x) ((x) << S_RTSEL_SNAPSHOT)
+#define G_RTSEL_SNAPSHOT(x) (((x) >> S_RTSEL_SNAPSHOT) & M_RTSEL_SNAPSHOT)
+
+#define A_XGMAC_PORT_HSS_RXA_PH_ROTATOR_POSITION2 0x1914
+
+#define S_ROT90 0
+#define M_ROT90 0x3fU
+#define V_ROT90(x) ((x) << S_ROT90)
+#define G_ROT90(x) (((x) >> S_ROT90) & M_ROT90)
+
+#define A_XGMAC_PORT_HSS_RXA_PH_ROTATOR_STATIC_PH_OFFSET 0x1918
+
+#define S_RCALER 15
+#define V_RCALER(x) ((x) << S_RCALER)
+#define F_RCALER V_RCALER(1U)
+
+#define S_RAOOFF 10
+#define M_RAOOFF 0x1fU
+#define V_RAOOFF(x) ((x) << S_RAOOFF)
+#define G_RAOOFF(x) (((x) >> S_RAOOFF) & M_RAOOFF)
+
+#define S_RAEOFF 5
+#define M_RAEOFF 0x1fU
+#define V_RAEOFF(x) ((x) << S_RAEOFF)
+#define G_RAEOFF(x) (((x) >> S_RAEOFF) & M_RAEOFF)
+
+#define S_RDOFF 0
+#define M_RDOFF 0x1fU
+#define V_RDOFF(x) ((x) << S_RDOFF)
+#define G_RDOFF(x) (((x) >> S_RDOFF) & M_RDOFF)
+
+#define A_XGMAC_PORT_HSS_RXA_SIGDET_CTRL 0x191c
+
+#define S_SIGNSD 13
+#define M_SIGNSD 0x3U
+#define V_SIGNSD(x) ((x) << S_SIGNSD)
+#define G_SIGNSD(x) (((x) >> S_SIGNSD) & M_SIGNSD)
+
+#define S_DACSD 8
+#define M_DACSD 0x1fU
+#define V_DACSD(x) ((x) << S_DACSD)
+#define G_DACSD(x) (((x) >> S_DACSD) & M_DACSD)
+
+#define S_SDPDN 6
+#define V_SDPDN(x) ((x) << S_SDPDN)
+#define F_SDPDN V_SDPDN(1U)
+
+#define S_SIGDET 5
+#define V_SIGDET(x) ((x) << S_SIGDET)
+#define F_SIGDET V_SIGDET(1U)
+
+#define S_SDLVL 0
+#define M_SDLVL 0x1fU
+#define V_SDLVL(x) ((x) << S_SDLVL)
+#define G_SDLVL(x) (((x) >> S_SDLVL) & M_SDLVL)
+
+#define A_XGMAC_PORT_HSS_RXA_DFE_CTRL 0x1920
+
+#define S_REQCMP 15
+#define V_REQCMP(x) ((x) << S_REQCMP)
+#define F_REQCMP V_REQCMP(1U)
+
+#define S_DFEREQ 14
+#define V_DFEREQ(x) ((x) << S_DFEREQ)
+#define F_DFEREQ V_DFEREQ(1U)
+
+#define S_SPCEN 13
+#define V_SPCEN(x) ((x) << S_SPCEN)
+#define F_SPCEN V_SPCEN(1U)
+
+#define S_GATEEN 12
+#define V_GATEEN(x) ((x) << S_GATEEN)
+#define F_GATEEN V_GATEEN(1U)
+
+#define S_SPIFMT 9
+#define M_SPIFMT 0x7U
+#define V_SPIFMT(x) ((x) << S_SPIFMT)
+#define G_SPIFMT(x) (((x) >> S_SPIFMT) & M_SPIFMT)
+
+#define S_DFEPWR 6
+#define M_DFEPWR 0x7U
+#define V_DFEPWR(x) ((x) << S_DFEPWR)
+#define G_DFEPWR(x) (((x) >> S_DFEPWR) & M_DFEPWR)
+
+#define S_STNDBY 5
+#define V_STNDBY(x) ((x) << S_STNDBY)
+#define F_STNDBY V_STNDBY(1U)
+
+#define S_FRCH 4
+#define V_FRCH(x) ((x) << S_FRCH)
+#define F_FRCH V_FRCH(1U)
+
+#define S_NONRND 3
+#define V_NONRND(x) ((x) << S_NONRND)
+#define F_NONRND V_NONRND(1U)
+
+#define S_NONRNF 2
+#define V_NONRNF(x) ((x) << S_NONRNF)
+#define F_NONRNF V_NONRNF(1U)
+
+#define S_FSTLCK 1
+#define V_FSTLCK(x) ((x) << S_FSTLCK)
+#define F_FSTLCK V_FSTLCK(1U)
+
+#define S_DFERST 0
+#define V_DFERST(x) ((x) << S_DFERST)
+#define F_DFERST V_DFERST(1U)
+
+#define A_XGMAC_PORT_HSS_RXA_DFE_DATA_EDGE_SAMPLE 0x1924
+
+#define S_ESAMP 8
+#define M_ESAMP 0xffU
+#define V_ESAMP(x) ((x) << S_ESAMP)
+#define G_ESAMP(x) (((x) >> S_ESAMP) & M_ESAMP)
+
+#define S_DSAMP 0
+#define M_DSAMP 0xffU
+#define V_DSAMP(x) ((x) << S_DSAMP)
+#define G_DSAMP(x) (((x) >> S_DSAMP) & M_DSAMP)
+
+#define A_XGMAC_PORT_HSS_RXA_DFE_AMP_SAMPLE 0x1928
+
+#define S_SMODE 8
+#define M_SMODE 0xfU
+#define V_SMODE(x) ((x) << S_SMODE)
+#define G_SMODE(x) (((x) >> S_SMODE) & M_SMODE)
+
+#define S_ADCORR 7
+#define V_ADCORR(x) ((x) << S_ADCORR)
+#define F_ADCORR V_ADCORR(1U)
+
+#define S_TRAINEN 6
+#define V_TRAINEN(x) ((x) << S_TRAINEN)
+#define F_TRAINEN V_TRAINEN(1U)
+
+#define S_ASAMPQ 3
+#define M_ASAMPQ 0x7U
+#define V_ASAMPQ(x) ((x) << S_ASAMPQ)
+#define G_ASAMPQ(x) (((x) >> S_ASAMPQ) & M_ASAMPQ)
+
+#define S_ASAMP 0
+#define M_ASAMP 0x7U
+#define V_ASAMP(x) ((x) << S_ASAMP)
+#define G_ASAMP(x) (((x) >> S_ASAMP) & M_ASAMP)
+
+#define A_XGMAC_PORT_HSS_RXA_VGA_CTRL1 0x192c
+
+#define S_POLE 12
+#define M_POLE 0x3U
+#define V_POLE(x) ((x) << S_POLE)
+#define G_POLE(x) (((x) >> S_POLE) & M_POLE)
+
+#define S_PEAK 8
+#define M_PEAK 0x7U
+#define V_PEAK(x) ((x) << S_PEAK)
+#define G_PEAK(x) (((x) >> S_PEAK) & M_PEAK)
+
+#define S_VOFFSN 6
+#define M_VOFFSN 0x3U
+#define V_VOFFSN(x) ((x) << S_VOFFSN)
+#define G_VOFFSN(x) (((x) >> S_VOFFSN) & M_VOFFSN)
+
+#define S_VOFFA 0
+#define M_VOFFA 0x3fU
+#define V_VOFFA(x) ((x) << S_VOFFA)
+#define G_VOFFA(x) (((x) >> S_VOFFA) & M_VOFFA)
+
+#define A_XGMAC_PORT_HSS_RXA_VGA_CTRL2 0x1930
+
+#define S_SHORTV 10
+#define V_SHORTV(x) ((x) << S_SHORTV)
+#define F_SHORTV V_SHORTV(1U)
+
+#define S_VGAIN 0
+#define M_VGAIN 0xfU
+#define V_VGAIN(x) ((x) << S_VGAIN)
+#define G_VGAIN(x) (((x) >> S_VGAIN) & M_VGAIN)
+
+#define A_XGMAC_PORT_HSS_RXA_VGA_CTRL3 0x1934
+
+#define S_HBND1 10
+#define V_HBND1(x) ((x) << S_HBND1)
+#define F_HBND1 V_HBND1(1U)
+
+#define S_HBND0 9
+#define V_HBND0(x) ((x) << S_HBND0)
+#define F_HBND0 V_HBND0(1U)
+
+#define S_VLCKD 8
+#define V_VLCKD(x) ((x) << S_VLCKD)
+#define F_VLCKD V_VLCKD(1U)
+
+#define S_VLCKDF 7
+#define V_VLCKDF(x) ((x) << S_VLCKDF)
+#define F_VLCKDF V_VLCKDF(1U)
+
+#define S_AMAXT 0
+#define M_AMAXT 0x7fU
+#define V_AMAXT(x) ((x) << S_AMAXT)
+#define G_AMAXT(x) (((x) >> S_AMAXT) & M_AMAXT)
+
+#define A_XGMAC_PORT_HSS_RXA_DFE_D00_D01_OFFSET 0x1938
+
+#define S_D01SN 13
+#define M_D01SN 0x3U
+#define V_D01SN(x) ((x) << S_D01SN)
+#define G_D01SN(x) (((x) >> S_D01SN) & M_D01SN)
+
+#define S_D01AMP 8
+#define M_D01AMP 0x1fU
+#define V_D01AMP(x) ((x) << S_D01AMP)
+#define G_D01AMP(x) (((x) >> S_D01AMP) & M_D01AMP)
+
+#define S_D00SN 5
+#define M_D00SN 0x3U
+#define V_D00SN(x) ((x) << S_D00SN)
+#define G_D00SN(x) (((x) >> S_D00SN) & M_D00SN)
+
+#define S_D00AMP 0
+#define M_D00AMP 0x1fU
+#define V_D00AMP(x) ((x) << S_D00AMP)
+#define G_D00AMP(x) (((x) >> S_D00AMP) & M_D00AMP)
+
+#define A_XGMAC_PORT_HSS_RXA_DFE_D10_D11_OFFSET 0x193c
+
+#define S_D11SN 13
+#define M_D11SN 0x3U
+#define V_D11SN(x) ((x) << S_D11SN)
+#define G_D11SN(x) (((x) >> S_D11SN) & M_D11SN)
+
+#define S_D11AMP 8
+#define M_D11AMP 0x1fU
+#define V_D11AMP(x) ((x) << S_D11AMP)
+#define G_D11AMP(x) (((x) >> S_D11AMP) & M_D11AMP)
+
+#define S_D10SN 5
+#define M_D10SN 0x3U
+#define V_D10SN(x) ((x) << S_D10SN)
+#define G_D10SN(x) (((x) >> S_D10SN) & M_D10SN)
+
+#define S_D10AMP 0
+#define M_D10AMP 0x1fU
+#define V_D10AMP(x) ((x) << S_D10AMP)
+#define G_D10AMP(x) (((x) >> S_D10AMP) & M_D10AMP)
+
+#define A_XGMAC_PORT_HSS_RXA_DFE_E0_E1_OFFSET 0x1940
+
+#define S_E1SN 13
+#define M_E1SN 0x3U
+#define V_E1SN(x) ((x) << S_E1SN)
+#define G_E1SN(x) (((x) >> S_E1SN) & M_E1SN)
+
+#define S_E1AMP 8
+#define M_E1AMP 0x1fU
+#define V_E1AMP(x) ((x) << S_E1AMP)
+#define G_E1AMP(x) (((x) >> S_E1AMP) & M_E1AMP)
+
+#define S_E0SN 5
+#define M_E0SN 0x3U
+#define V_E0SN(x) ((x) << S_E0SN)
+#define G_E0SN(x) (((x) >> S_E0SN) & M_E0SN)
+
+#define S_E0AMP 0
+#define M_E0AMP 0x1fU
+#define V_E0AMP(x) ((x) << S_E0AMP)
+#define G_E0AMP(x) (((x) >> S_E0AMP) & M_E0AMP)
+
+#define A_XGMAC_PORT_HSS_RXA_DACA_OFFSET 0x1944
+
+#define S_AOFFO 8
+#define M_AOFFO 0x3fU
+#define V_AOFFO(x) ((x) << S_AOFFO)
+#define G_AOFFO(x) (((x) >> S_AOFFO) & M_AOFFO)
+
+#define S_AOFFE 0
+#define M_AOFFE 0x3fU
+#define V_AOFFE(x) ((x) << S_AOFFE)
+#define G_AOFFE(x) (((x) >> S_AOFFE) & M_AOFFE)
+
+#define A_XGMAC_PORT_HSS_RXA_DACAP_DAC_AN_OFFSET 0x1948
+
+#define S_DACAN 8
+#define M_DACAN 0xffU
+#define V_DACAN(x) ((x) << S_DACAN)
+#define G_DACAN(x) (((x) >> S_DACAN) & M_DACAN)
+
+#define S_DACAP 0
+#define M_DACAP 0xffU
+#define V_DACAP(x) ((x) << S_DACAP)
+#define G_DACAP(x) (((x) >> S_DACAP) & M_DACAP)
+
+#define A_XGMAC_PORT_HSS_RXA_DACA_MIN 0x194c
+
+#define S_DACAZ 8
+#define M_DACAZ 0xffU
+#define V_DACAZ(x) ((x) << S_DACAZ)
+#define G_DACAZ(x) (((x) >> S_DACAZ) & M_DACAZ)
+
+#define S_DACAM 0
+#define M_DACAM 0xffU
+#define V_DACAM(x) ((x) << S_DACAM)
+#define G_DACAM(x) (((x) >> S_DACAM) & M_DACAM)
+
+#define A_XGMAC_PORT_HSS_RXA_ADAC_CTRL 0x1950
+
+#define S_ADSN 7
+#define M_ADSN 0x3U
+#define V_ADSN(x) ((x) << S_ADSN)
+#define G_ADSN(x) (((x) >> S_ADSN) & M_ADSN)
+
+#define S_ADMAG 0
+#define M_ADMAG 0x7fU
+#define V_ADMAG(x) ((x) << S_ADMAG)
+#define G_ADMAG(x) (((x) >> S_ADMAG) & M_ADMAG)
+
+#define A_XGMAC_PORT_HSS_RXA_DIGITAL_EYE_CTRL 0x1954
+
+#define S_BLKAZ 15
+#define V_BLKAZ(x) ((x) << S_BLKAZ)
+#define F_BLKAZ V_BLKAZ(1U)
+
+#define S_WIDTH 10
+#define M_WIDTH 0x1fU
+#define V_WIDTH(x) ((x) << S_WIDTH)
+#define G_WIDTH(x) (((x) >> S_WIDTH) & M_WIDTH)
+
+#define S_MINWIDTH 5
+#define M_MINWIDTH 0x1fU
+#define V_MINWIDTH(x) ((x) << S_MINWIDTH)
+#define G_MINWIDTH(x) (((x) >> S_MINWIDTH) & M_MINWIDTH)
+
+#define S_MINAMP 0
+#define M_MINAMP 0x1fU
+#define V_MINAMP(x) ((x) << S_MINAMP)
+#define G_MINAMP(x) (((x) >> S_MINAMP) & M_MINAMP)
+
+#define A_XGMAC_PORT_HSS_RXA_DIGITAL_EYE_METRICS 0x1958
+
+#define S_EMBRDY 10
+#define V_EMBRDY(x) ((x) << S_EMBRDY)
+#define F_EMBRDY V_EMBRDY(1U)
+
+#define S_EMBUMP 7
+#define V_EMBUMP(x) ((x) << S_EMBUMP)
+#define F_EMBUMP V_EMBUMP(1U)
+
+#define S_EMMD 5
+#define M_EMMD 0x3U
+#define V_EMMD(x) ((x) << S_EMMD)
+#define G_EMMD(x) (((x) >> S_EMMD) & M_EMMD)
+
+#define S_EMPAT 1
+#define V_EMPAT(x) ((x) << S_EMPAT)
+#define F_EMPAT V_EMPAT(1U)
+
+#define S_EMEN 0
+#define V_EMEN(x) ((x) << S_EMEN)
+#define F_EMEN V_EMEN(1U)
+
+#define A_XGMAC_PORT_HSS_RXA_DFE_H1 0x195c
+
+#define S_H1OSN 14
+#define M_H1OSN 0x3U
+#define V_H1OSN(x) ((x) << S_H1OSN)
+#define G_H1OSN(x) (((x) >> S_H1OSN) & M_H1OSN)
+
+#define S_H1OMAG 8
+#define M_H1OMAG 0x3fU
+#define V_H1OMAG(x) ((x) << S_H1OMAG)
+#define G_H1OMAG(x) (((x) >> S_H1OMAG) & M_H1OMAG)
+
+#define S_H1ESN 6
+#define M_H1ESN 0x3U
+#define V_H1ESN(x) ((x) << S_H1ESN)
+#define G_H1ESN(x) (((x) >> S_H1ESN) & M_H1ESN)
+
+#define S_H1EMAG 0
+#define M_H1EMAG 0x3fU
+#define V_H1EMAG(x) ((x) << S_H1EMAG)
+#define G_H1EMAG(x) (((x) >> S_H1EMAG) & M_H1EMAG)
+
+#define A_XGMAC_PORT_HSS_RXA_DFE_H2 0x1960
+
+#define S_H2OSN 13
+#define M_H2OSN 0x3U
+#define V_H2OSN(x) ((x) << S_H2OSN)
+#define G_H2OSN(x) (((x) >> S_H2OSN) & M_H2OSN)
+
+#define S_H2OMAG 8
+#define M_H2OMAG 0x1fU
+#define V_H2OMAG(x) ((x) << S_H2OMAG)
+#define G_H2OMAG(x) (((x) >> S_H2OMAG) & M_H2OMAG)
+
+#define S_H2ESN 5
+#define M_H2ESN 0x3U
+#define V_H2ESN(x) ((x) << S_H2ESN)
+#define G_H2ESN(x) (((x) >> S_H2ESN) & M_H2ESN)
+
+#define S_H2EMAG 0
+#define M_H2EMAG 0x1fU
+#define V_H2EMAG(x) ((x) << S_H2EMAG)
+#define G_H2EMAG(x) (((x) >> S_H2EMAG) & M_H2EMAG)
+
+#define A_XGMAC_PORT_HSS_RXA_DFE_H3 0x1964
+
+#define S_H3OSN 12
+#define M_H3OSN 0x3U
+#define V_H3OSN(x) ((x) << S_H3OSN)
+#define G_H3OSN(x) (((x) >> S_H3OSN) & M_H3OSN)
+
+#define S_H3OMAG 8
+#define M_H3OMAG 0xfU
+#define V_H3OMAG(x) ((x) << S_H3OMAG)
+#define G_H3OMAG(x) (((x) >> S_H3OMAG) & M_H3OMAG)
+
+#define S_H3ESN 4
+#define M_H3ESN 0x3U
+#define V_H3ESN(x) ((x) << S_H3ESN)
+#define G_H3ESN(x) (((x) >> S_H3ESN) & M_H3ESN)
+
+#define S_H3EMAG 0
+#define M_H3EMAG 0xfU
+#define V_H3EMAG(x) ((x) << S_H3EMAG)
+#define G_H3EMAG(x) (((x) >> S_H3EMAG) & M_H3EMAG)
+
+#define A_XGMAC_PORT_HSS_RXA_DFE_H4 0x1968
+
+#define S_H4OSN 12
+#define M_H4OSN 0x3U
+#define V_H4OSN(x) ((x) << S_H4OSN)
+#define G_H4OSN(x) (((x) >> S_H4OSN) & M_H4OSN)
+
+#define S_H4OMAG 8
+#define M_H4OMAG 0xfU
+#define V_H4OMAG(x) ((x) << S_H4OMAG)
+#define G_H4OMAG(x) (((x) >> S_H4OMAG) & M_H4OMAG)
+
+#define S_H4ESN 4
+#define M_H4ESN 0x3U
+#define V_H4ESN(x) ((x) << S_H4ESN)
+#define G_H4ESN(x) (((x) >> S_H4ESN) & M_H4ESN)
+
+#define S_H4EMAG 0
+#define M_H4EMAG 0xfU
+#define V_H4EMAG(x) ((x) << S_H4EMAG)
+#define G_H4EMAG(x) (((x) >> S_H4EMAG) & M_H4EMAG)
+
+#define A_XGMAC_PORT_HSS_RXA_DFE_H5 0x196c
+
+#define S_H5OSN 12
+#define M_H5OSN 0x3U
+#define V_H5OSN(x) ((x) << S_H5OSN)
+#define G_H5OSN(x) (((x) >> S_H5OSN) & M_H5OSN)
+
+#define S_H5OMAG 8
+#define M_H5OMAG 0xfU
+#define V_H5OMAG(x) ((x) << S_H5OMAG)
+#define G_H5OMAG(x) (((x) >> S_H5OMAG) & M_H5OMAG)
+
+#define S_H5ESN 4
+#define M_H5ESN 0x3U
+#define V_H5ESN(x) ((x) << S_H5ESN)
+#define G_H5ESN(x) (((x) >> S_H5ESN) & M_H5ESN)
+
+#define S_H5EMAG 0
+#define M_H5EMAG 0xfU
+#define V_H5EMAG(x) ((x) << S_H5EMAG)
+#define G_H5EMAG(x) (((x) >> S_H5EMAG) & M_H5EMAG)
+
+#define A_XGMAC_PORT_HSS_RXA_DAC_DPC 0x1970
+
+#define S_DPCCVG 13
+#define V_DPCCVG(x) ((x) << S_DPCCVG)
+#define F_DPCCVG V_DPCCVG(1U)
+
+#define S_DACCVG 12
+#define V_DACCVG(x) ((x) << S_DACCVG)
+#define F_DACCVG V_DACCVG(1U)
+
+#define S_DPCTGT 9
+#define M_DPCTGT 0x7U
+#define V_DPCTGT(x) ((x) << S_DPCTGT)
+#define G_DPCTGT(x) (((x) >> S_DPCTGT) & M_DPCTGT)
+
+#define S_BLKH1T 8
+#define V_BLKH1T(x) ((x) << S_BLKH1T)
+#define F_BLKH1T V_BLKH1T(1U)
+
+#define S_BLKOAE 7
+#define V_BLKOAE(x) ((x) << S_BLKOAE)
+#define F_BLKOAE V_BLKOAE(1U)
+
+#define S_H1TGT 4
+#define M_H1TGT 0x7U
+#define V_H1TGT(x) ((x) << S_H1TGT)
+#define G_H1TGT(x) (((x) >> S_H1TGT) & M_H1TGT)
+
+#define S_OAE 0
+#define M_OAE 0xfU
+#define V_OAE(x) ((x) << S_OAE)
+#define G_OAE(x) (((x) >> S_OAE) & M_OAE)
+
+#define A_XGMAC_PORT_HSS_RXA_DDC 0x1974
+
+#define S_OLS 11
+#define M_OLS 0x1fU
+#define V_OLS(x) ((x) << S_OLS)
+#define G_OLS(x) (((x) >> S_OLS) & M_OLS)
+
+#define S_OES 6
+#define M_OES 0x1fU
+#define V_OES(x) ((x) << S_OES)
+#define G_OES(x) (((x) >> S_OES) & M_OES)
+
+#define S_BLKODEC 5
+#define V_BLKODEC(x) ((x) << S_BLKODEC)
+#define F_BLKODEC V_BLKODEC(1U)
+
+#define S_ODEC 0
+#define M_ODEC 0x1fU
+#define V_ODEC(x) ((x) << S_ODEC)
+#define G_ODEC(x) (((x) >> S_ODEC) & M_ODEC)
+
+#define A_XGMAC_PORT_HSS_RXA_INTERNAL_STATUS 0x1978
+
+#define S_BER6 15
+#define V_BER6(x) ((x) << S_BER6)
+#define F_BER6 V_BER6(1U)
+
+#define S_BER6VAL 14
+#define V_BER6VAL(x) ((x) << S_BER6VAL)
+#define F_BER6VAL V_BER6VAL(1U)
+
+#define S_BER3VAL 13
+#define V_BER3VAL(x) ((x) << S_BER3VAL)
+#define F_BER3VAL V_BER3VAL(1U)
+
+#define S_DPCCMP 9
+#define V_DPCCMP(x) ((x) << S_DPCCMP)
+#define F_DPCCMP V_DPCCMP(1U)
+
+#define S_DACCMP 8
+#define V_DACCMP(x) ((x) << S_DACCMP)
+#define F_DACCMP V_DACCMP(1U)
+
+#define S_DDCCMP 7
+#define V_DDCCMP(x) ((x) << S_DDCCMP)
+#define F_DDCCMP V_DDCCMP(1U)
+
+#define S_AERRFLG 6
+#define V_AERRFLG(x) ((x) << S_AERRFLG)
+#define F_AERRFLG V_AERRFLG(1U)
+
+#define S_WERRFLG 5
+#define V_WERRFLG(x) ((x) << S_WERRFLG)
+#define F_WERRFLG V_WERRFLG(1U)
+
+#define S_TRCMP 4
+#define V_TRCMP(x) ((x) << S_TRCMP)
+#define F_TRCMP V_TRCMP(1U)
+
+#define S_VLCKF 3
+#define V_VLCKF(x) ((x) << S_VLCKF)
+#define F_VLCKF V_VLCKF(1U)
+
+#define S_ROCADJ 2
+#define V_ROCADJ(x) ((x) << S_ROCADJ)
+#define F_ROCADJ V_ROCADJ(1U)
+
+#define S_ROCCMP 1
+#define V_ROCCMP(x) ((x) << S_ROCCMP)
+#define F_ROCCMP V_ROCCMP(1U)
+
+#define S_OCCMP 0
+#define V_OCCMP(x) ((x) << S_OCCMP)
+#define F_OCCMP V_OCCMP(1U)
+
+#define A_XGMAC_PORT_HSS_RXA_DFE_FUNC_CTRL 0x197c
+
+#define S_FDPC 15
+#define V_FDPC(x) ((x) << S_FDPC)
+#define F_FDPC V_FDPC(1U)
+
+#define S_FDAC 14
+#define V_FDAC(x) ((x) << S_FDAC)
+#define F_FDAC V_FDAC(1U)
+
+#define S_FDDC 13
+#define V_FDDC(x) ((x) << S_FDDC)
+#define F_FDDC V_FDDC(1U)
+
+#define S_FNRND 12
+#define V_FNRND(x) ((x) << S_FNRND)
+#define F_FNRND V_FNRND(1U)
+
+#define S_FVGAIN 11
+#define V_FVGAIN(x) ((x) << S_FVGAIN)
+#define F_FVGAIN V_FVGAIN(1U)
+
+#define S_FVOFF 10
+#define V_FVOFF(x) ((x) << S_FVOFF)
+#define F_FVOFF V_FVOFF(1U)
+
+#define S_FSDET 9
+#define V_FSDET(x) ((x) << S_FSDET)
+#define F_FSDET V_FSDET(1U)
+
+#define S_FBER6 8
+#define V_FBER6(x) ((x) << S_FBER6)
+#define F_FBER6 V_FBER6(1U)
+
+#define S_FROTO 7
+#define V_FROTO(x) ((x) << S_FROTO)
+#define F_FROTO V_FROTO(1U)
+
+#define S_FH4H5 6
+#define V_FH4H5(x) ((x) << S_FH4H5)
+#define F_FH4H5 V_FH4H5(1U)
+
+#define S_FH2H3 5
+#define V_FH2H3(x) ((x) << S_FH2H3)
+#define F_FH2H3 V_FH2H3(1U)
+
+#define S_FH1 4
+#define V_FH1(x) ((x) << S_FH1)
+#define F_FH1 V_FH1(1U)
+
+#define S_FH1SN 3
+#define V_FH1SN(x) ((x) << S_FH1SN)
+#define F_FH1SN V_FH1SN(1U)
+
+#define S_FNRDF 2
+#define V_FNRDF(x) ((x) << S_FNRDF)
+#define F_FNRDF V_FNRDF(1U)
+
+#define S_FADAC 0
+#define V_FADAC(x) ((x) << S_FADAC)
+#define F_FADAC V_FADAC(1U)
+
+#define A_XGMAC_PORT_HSS_RXB_CFG_MODE 0x1980
+#define A_XGMAC_PORT_HSS_RXB_TEST_CTRL 0x1984
+#define A_XGMAC_PORT_HSS_RXB_PH_ROTATOR_CTRL 0x1988
+#define A_XGMAC_PORT_HSS_RXB_PH_ROTATOR_OFFSET_CTRL 0x198c
+#define A_XGMAC_PORT_HSS_RXB_PH_ROTATOR_POSITION1 0x1990
+#define A_XGMAC_PORT_HSS_RXB_PH_ROTATOR_POSITION2 0x1994
+#define A_XGMAC_PORT_HSS_RXB_PH_ROTATOR_STATIC_PH_OFFSET 0x1998
+#define A_XGMAC_PORT_HSS_RXB_SIGDET_CTRL 0x199c
+#define A_XGMAC_PORT_HSS_RXB_DFE_CTRL 0x19a0
+#define A_XGMAC_PORT_HSS_RXB_DFE_DATA_EDGE_SAMPLE 0x19a4
+#define A_XGMAC_PORT_HSS_RXB_DFE_AMP_SAMPLE 0x19a8
+#define A_XGMAC_PORT_HSS_RXB_VGA_CTRL1 0x19ac
+#define A_XGMAC_PORT_HSS_RXB_VGA_CTRL2 0x19b0
+#define A_XGMAC_PORT_HSS_RXB_VGA_CTRL3 0x19b4
+#define A_XGMAC_PORT_HSS_RXB_DFE_D00_D01_OFFSET 0x19b8
+#define A_XGMAC_PORT_HSS_RXB_DFE_D10_D11_OFFSET 0x19bc
+#define A_XGMAC_PORT_HSS_RXB_DFE_E0_E1_OFFSET 0x19c0
+#define A_XGMAC_PORT_HSS_RXB_DACA_OFFSET 0x19c4
+#define A_XGMAC_PORT_HSS_RXB_DACAP_DAC_AN_OFFSET 0x19c8
+#define A_XGMAC_PORT_HSS_RXB_DACA_MIN 0x19cc
+#define A_XGMAC_PORT_HSS_RXB_ADAC_CTRL 0x19d0
+#define A_XGMAC_PORT_HSS_RXB_DIGITAL_EYE_CTRL 0x19d4
+#define A_XGMAC_PORT_HSS_RXB_DIGITAL_EYE_METRICS 0x19d8
+#define A_XGMAC_PORT_HSS_RXB_DFE_H1 0x19dc
+#define A_XGMAC_PORT_HSS_RXB_DFE_H2 0x19e0
+#define A_XGMAC_PORT_HSS_RXB_DFE_H3 0x19e4
+#define A_XGMAC_PORT_HSS_RXB_DFE_H4 0x19e8
+#define A_XGMAC_PORT_HSS_RXB_DFE_H5 0x19ec
+#define A_XGMAC_PORT_HSS_RXB_DAC_DPC 0x19f0
+#define A_XGMAC_PORT_HSS_RXB_DDC 0x19f4
+#define A_XGMAC_PORT_HSS_RXB_INTERNAL_STATUS 0x19f8
+#define A_XGMAC_PORT_HSS_RXB_DFE_FUNC_CTRL 0x19fc
+#define A_XGMAC_PORT_HSS_TXC_MODE_CFG 0x1a00
+#define A_XGMAC_PORT_HSS_TXC_TEST_CTRL 0x1a04
+#define A_XGMAC_PORT_HSS_TXC_COEFF_CTRL 0x1a08
+#define A_XGMAC_PORT_HSS_TXC_DRIVER_MODE 0x1a0c
+#define A_XGMAC_PORT_HSS_TXC_DRIVER_OVR_CTRL 0x1a10
+#define A_XGMAC_PORT_HSS_TXC_TDM_BIASGEN_STANDBY_TIMER 0x1a14
+#define A_XGMAC_PORT_HSS_TXC_TDM_BIASGEN_PWRON_TIMER 0x1a18
+#define A_XGMAC_PORT_HSS_TXC_TAP0_COEFF 0x1a20
+#define A_XGMAC_PORT_HSS_TXC_TAP1_COEFF 0x1a24
+#define A_XGMAC_PORT_HSS_TXC_TAP2_COEFF 0x1a28
+#define A_XGMAC_PORT_HSS_TXC_PWR 0x1a30
+#define A_XGMAC_PORT_HSS_TXC_POLARITY 0x1a34
+#define A_XGMAC_PORT_HSS_TXC_8023AP_AE_CMD 0x1a38
+#define A_XGMAC_PORT_HSS_TXC_8023AP_AE_STATUS 0x1a3c
+#define A_XGMAC_PORT_HSS_TXC_TAP0_IDAC_OVR 0x1a40
+#define A_XGMAC_PORT_HSS_TXC_TAP1_IDAC_OVR 0x1a44
+#define A_XGMAC_PORT_HSS_TXC_TAP2_IDAC_OVR 0x1a48
+#define A_XGMAC_PORT_HSS_TXC_PWR_DAC_OVR 0x1a50
+#define A_XGMAC_PORT_HSS_TXC_PWR_DAC 0x1a54
+#define A_XGMAC_PORT_HSS_TXC_TAP0_IDAC_APP 0x1a60
+#define A_XGMAC_PORT_HSS_TXC_TAP1_IDAC_APP 0x1a64
+#define A_XGMAC_PORT_HSS_TXC_TAP2_IDAC_APP 0x1a68
+#define A_XGMAC_PORT_HSS_TXC_SEG_DIS_APP 0x1a70
+#define A_XGMAC_PORT_HSS_TXC_EXT_ADDR_DATA 0x1a78
+#define A_XGMAC_PORT_HSS_TXC_EXT_ADDR 0x1a7c
+#define A_XGMAC_PORT_HSS_TXD_MODE_CFG 0x1a80
+#define A_XGMAC_PORT_HSS_TXD_TEST_CTRL 0x1a84
+#define A_XGMAC_PORT_HSS_TXD_COEFF_CTRL 0x1a88
+#define A_XGMAC_PORT_HSS_TXD_DRIVER_MODE 0x1a8c
+#define A_XGMAC_PORT_HSS_TXD_DRIVER_OVR_CTRL 0x1a90
+#define A_XGMAC_PORT_HSS_TXD_TDM_BIASGEN_STANDBY_TIMER 0x1a94
+#define A_XGMAC_PORT_HSS_TXD_TDM_BIASGEN_PWRON_TIMER 0x1a98
+#define A_XGMAC_PORT_HSS_TXD_TAP0_COEFF 0x1aa0
+#define A_XGMAC_PORT_HSS_TXD_TAP1_COEFF 0x1aa4
+#define A_XGMAC_PORT_HSS_TXD_TAP2_COEFF 0x1aa8
+#define A_XGMAC_PORT_HSS_TXD_PWR 0x1ab0
+#define A_XGMAC_PORT_HSS_TXD_POLARITY 0x1ab4
+#define A_XGMAC_PORT_HSS_TXD_8023AP_AE_CMD 0x1ab8
+#define A_XGMAC_PORT_HSS_TXD_8023AP_AE_STATUS 0x1abc
+#define A_XGMAC_PORT_HSS_TXD_TAP0_IDAC_OVR 0x1ac0
+#define A_XGMAC_PORT_HSS_TXD_TAP1_IDAC_OVR 0x1ac4
+#define A_XGMAC_PORT_HSS_TXD_TAP2_IDAC_OVR 0x1ac8
+#define A_XGMAC_PORT_HSS_TXD_PWR_DAC_OVR 0x1ad0
+#define A_XGMAC_PORT_HSS_TXD_PWR_DAC 0x1ad4
+#define A_XGMAC_PORT_HSS_TXD_TAP0_IDAC_APP 0x1ae0
+#define A_XGMAC_PORT_HSS_TXD_TAP1_IDAC_APP 0x1ae4
+#define A_XGMAC_PORT_HSS_TXD_TAP2_IDAC_APP 0x1ae8
+#define A_XGMAC_PORT_HSS_TXD_SEG_DIS_APP 0x1af0
+#define A_XGMAC_PORT_HSS_TXD_EXT_ADDR_DATA 0x1af8
+#define A_XGMAC_PORT_HSS_TXD_EXT_ADDR 0x1afc
+#define A_XGMAC_PORT_HSS_RXC_CFG_MODE 0x1b00
+#define A_XGMAC_PORT_HSS_RXC_TEST_CTRL 0x1b04
+#define A_XGMAC_PORT_HSS_RXC_PH_ROTATOR_CTRL 0x1b08
+#define A_XGMAC_PORT_HSS_RXC_PH_ROTATOR_OFFSET_CTRL 0x1b0c
+#define A_XGMAC_PORT_HSS_RXC_PH_ROTATOR_POSITION1 0x1b10
+#define A_XGMAC_PORT_HSS_RXC_PH_ROTATOR_POSITION2 0x1b14
+#define A_XGMAC_PORT_HSS_RXC_PH_ROTATOR_STATIC_PH_OFFSET 0x1b18
+#define A_XGMAC_PORT_HSS_RXC_SIGDET_CTRL 0x1b1c
+#define A_XGMAC_PORT_HSS_RXC_DFE_CTRL 0x1b20
+#define A_XGMAC_PORT_HSS_RXC_DFE_DATA_EDGE_SAMPLE 0x1b24
+#define A_XGMAC_PORT_HSS_RXC_DFE_AMP_SAMPLE 0x1b28
+#define A_XGMAC_PORT_HSS_RXC_VGA_CTRL1 0x1b2c
+#define A_XGMAC_PORT_HSS_RXC_VGA_CTRL2 0x1b30
+#define A_XGMAC_PORT_HSS_RXC_VGA_CTRL3 0x1b34
+#define A_XGMAC_PORT_HSS_RXC_DFE_D00_D01_OFFSET 0x1b38
+#define A_XGMAC_PORT_HSS_RXC_DFE_D10_D11_OFFSET 0x1b3c
+#define A_XGMAC_PORT_HSS_RXC_DFE_E0_E1_OFFSET 0x1b40
+#define A_XGMAC_PORT_HSS_RXC_DACA_OFFSET 0x1b44
+#define A_XGMAC_PORT_HSS_RXC_DACAP_DAC_AN_OFFSET 0x1b48
+#define A_XGMAC_PORT_HSS_RXC_DACA_MIN 0x1b4c
+#define A_XGMAC_PORT_HSS_RXC_ADAC_CTRL 0x1b50
+#define A_XGMAC_PORT_HSS_RXC_DIGITAL_EYE_CTRL 0x1b54
+#define A_XGMAC_PORT_HSS_RXC_DIGITAL_EYE_METRICS 0x1b58
+#define A_XGMAC_PORT_HSS_RXC_DFE_H1 0x1b5c
+#define A_XGMAC_PORT_HSS_RXC_DFE_H2 0x1b60
+#define A_XGMAC_PORT_HSS_RXC_DFE_H3 0x1b64
+#define A_XGMAC_PORT_HSS_RXC_DFE_H4 0x1b68
+#define A_XGMAC_PORT_HSS_RXC_DFE_H5 0x1b6c
+#define A_XGMAC_PORT_HSS_RXC_DAC_DPC 0x1b70
+#define A_XGMAC_PORT_HSS_RXC_DDC 0x1b74
+#define A_XGMAC_PORT_HSS_RXC_INTERNAL_STATUS 0x1b78
+#define A_XGMAC_PORT_HSS_RXC_DFE_FUNC_CTRL 0x1b7c
+#define A_XGMAC_PORT_HSS_RXD_CFG_MODE 0x1b80
+#define A_XGMAC_PORT_HSS_RXD_TEST_CTRL 0x1b84
+#define A_XGMAC_PORT_HSS_RXD_PH_ROTATOR_CTRL 0x1b88
+#define A_XGMAC_PORT_HSS_RXD_PH_ROTATOR_OFFSET_CTRL 0x1b8c
+#define A_XGMAC_PORT_HSS_RXD_PH_ROTATOR_POSITION1 0x1b90
+#define A_XGMAC_PORT_HSS_RXD_PH_ROTATOR_POSITION2 0x1b94
+#define A_XGMAC_PORT_HSS_RXD_PH_ROTATOR_STATIC_PH_OFFSET 0x1b98
+#define A_XGMAC_PORT_HSS_RXD_SIGDET_CTRL 0x1b9c
+#define A_XGMAC_PORT_HSS_RXD_DFE_CTRL 0x1ba0
+#define A_XGMAC_PORT_HSS_RXD_DFE_DATA_EDGE_SAMPLE 0x1ba4
+#define A_XGMAC_PORT_HSS_RXD_DFE_AMP_SAMPLE 0x1ba8
+#define A_XGMAC_PORT_HSS_RXD_VGA_CTRL1 0x1bac
+#define A_XGMAC_PORT_HSS_RXD_VGA_CTRL2 0x1bb0
+#define A_XGMAC_PORT_HSS_RXD_VGA_CTRL3 0x1bb4
+#define A_XGMAC_PORT_HSS_RXD_DFE_D00_D01_OFFSET 0x1bb8
+#define A_XGMAC_PORT_HSS_RXD_DFE_D10_D11_OFFSET 0x1bbc
+#define A_XGMAC_PORT_HSS_RXD_DFE_E0_E1_OFFSET 0x1bc0
+#define A_XGMAC_PORT_HSS_RXD_DACA_OFFSET 0x1bc4
+#define A_XGMAC_PORT_HSS_RXD_DACAP_DAC_AN_OFFSET 0x1bc8
+#define A_XGMAC_PORT_HSS_RXD_DACA_MIN 0x1bcc
+#define A_XGMAC_PORT_HSS_RXD_ADAC_CTRL 0x1bd0
+#define A_XGMAC_PORT_HSS_RXD_DIGITAL_EYE_CTRL 0x1bd4
+#define A_XGMAC_PORT_HSS_RXD_DIGITAL_EYE_METRICS 0x1bd8
+#define A_XGMAC_PORT_HSS_RXD_DFE_H1 0x1bdc
+#define A_XGMAC_PORT_HSS_RXD_DFE_H2 0x1be0
+#define A_XGMAC_PORT_HSS_RXD_DFE_H3 0x1be4
+#define A_XGMAC_PORT_HSS_RXD_DFE_H4 0x1be8
+#define A_XGMAC_PORT_HSS_RXD_DFE_H5 0x1bec
+#define A_XGMAC_PORT_HSS_RXD_DAC_DPC 0x1bf0
+#define A_XGMAC_PORT_HSS_RXD_DDC 0x1bf4
+#define A_XGMAC_PORT_HSS_RXD_INTERNAL_STATUS 0x1bf8
+#define A_XGMAC_PORT_HSS_RXD_DFE_FUNC_CTRL 0x1bfc
+#define A_XGMAC_PORT_HSS_VCO_COARSE_CALIBRATION_0 0x1c00
+
+#define S_BSELO 0
+#define M_BSELO 0xfU
+#define V_BSELO(x) ((x) << S_BSELO)
+#define G_BSELO(x) (((x) >> S_BSELO) & M_BSELO)
+
+#define A_XGMAC_PORT_HSS_VCO_COARSE_CALIBRATION_1 0x1c04
+
+#define S_LDET 4
+#define V_LDET(x) ((x) << S_LDET)
+#define F_LDET V_LDET(1U)
+
+#define S_CCERR 3
+#define V_CCERR(x) ((x) << S_CCERR)
+#define F_CCERR V_CCERR(1U)
+
+#define S_CCCMP 2
+#define V_CCCMP(x) ((x) << S_CCCMP)
+#define F_CCCMP V_CCCMP(1U)
+
+#define A_XGMAC_PORT_HSS_VCO_COARSE_CALIBRATION_2 0x1c08
+
+#define S_BSELI 0
+#define M_BSELI 0xfU
+#define V_BSELI(x) ((x) << S_BSELI)
+#define G_BSELI(x) (((x) >> S_BSELI) & M_BSELI)
+
+#define A_XGMAC_PORT_HSS_VCO_COARSE_CALIBRATION_3 0x1c0c
+
+#define S_VISEL 4
+#define V_VISEL(x) ((x) << S_VISEL)
+#define F_VISEL V_VISEL(1U)
+
+#define S_FMIN 3
+#define V_FMIN(x) ((x) << S_FMIN)
+#define F_FMIN V_FMIN(1U)
+
+#define S_FMAX 2
+#define V_FMAX(x) ((x) << S_FMAX)
+#define F_FMAX V_FMAX(1U)
+
+#define S_CVHOLD 1
+#define V_CVHOLD(x) ((x) << S_CVHOLD)
+#define F_CVHOLD V_CVHOLD(1U)
+
+#define S_TCDIS 0
+#define V_TCDIS(x) ((x) << S_TCDIS)
+#define F_TCDIS V_TCDIS(1U)
+
+#define A_XGMAC_PORT_HSS_VCO_COARSE_CALIBRATION_4 0x1c10
+
+#define S_CMETH 2
+#define V_CMETH(x) ((x) << S_CMETH)
+#define F_CMETH V_CMETH(1U)
+
+#define S_RECAL 1
+#define V_RECAL(x) ((x) << S_RECAL)
+#define F_RECAL V_RECAL(1U)
+
+#define S_CCLD 0
+#define V_CCLD(x) ((x) << S_CCLD)
+#define F_CCLD V_CCLD(1U)
+
+#define A_XGMAC_PORT_HSS_ANALOG_TEST_MUX 0x1c14
+
+#define S_ATST 0
+#define M_ATST 0x1fU
+#define V_ATST(x) ((x) << S_ATST)
+#define G_ATST(x) (((x) >> S_ATST) & M_ATST)
+
+#define A_XGMAC_PORT_HSS_PORT_EN_0 0x1c18
+
+#define S_RXDEN 7
+#define V_RXDEN(x) ((x) << S_RXDEN)
+#define F_RXDEN V_RXDEN(1U)
+
+#define S_RXCEN 6
+#define V_RXCEN(x) ((x) << S_RXCEN)
+#define F_RXCEN V_RXCEN(1U)
+
+#define S_TXDEN 5
+#define V_TXDEN(x) ((x) << S_TXDEN)
+#define F_TXDEN V_TXDEN(1U)
+
+#define S_TXCEN 4
+#define V_TXCEN(x) ((x) << S_TXCEN)
+#define F_TXCEN V_TXCEN(1U)
+
+#define S_RXBEN 3
+#define V_RXBEN(x) ((x) << S_RXBEN)
+#define F_RXBEN V_RXBEN(1U)
+
+#define S_RXAEN 2
+#define V_RXAEN(x) ((x) << S_RXAEN)
+#define F_RXAEN V_RXAEN(1U)
+
+#define S_TXBEN 1
+#define V_TXBEN(x) ((x) << S_TXBEN)
+#define F_TXBEN V_TXBEN(1U)
+
+#define S_TXAEN 0
+#define V_TXAEN(x) ((x) << S_TXAEN)
+#define F_TXAEN V_TXAEN(1U)
+
+#define A_XGMAC_PORT_HSS_PORT_RESET_0 0x1c20
+
+#define S_RXDRST 7
+#define V_RXDRST(x) ((x) << S_RXDRST)
+#define F_RXDRST V_RXDRST(1U)
+
+#define S_RXCRST 6
+#define V_RXCRST(x) ((x) << S_RXCRST)
+#define F_RXCRST V_RXCRST(1U)
+
+#define S_TXDRST 5
+#define V_TXDRST(x) ((x) << S_TXDRST)
+#define F_TXDRST V_TXDRST(1U)
+
+#define S_TXCRST 4
+#define V_TXCRST(x) ((x) << S_TXCRST)
+#define F_TXCRST V_TXCRST(1U)
+
+#define S_RXBRST 3
+#define V_RXBRST(x) ((x) << S_RXBRST)
+#define F_RXBRST V_RXBRST(1U)
+
+#define S_RXARST 2
+#define V_RXARST(x) ((x) << S_RXARST)
+#define F_RXARST V_RXARST(1U)
+
+#define S_TXBRST 1
+#define V_TXBRST(x) ((x) << S_TXBRST)
+#define F_TXBRST V_TXBRST(1U)
+
+#define S_TXARST 0
+#define V_TXARST(x) ((x) << S_TXARST)
+#define F_TXARST V_TXARST(1U)
+
+#define A_XGMAC_PORT_HSS_CHARGE_PUMP_CTRL 0x1c28
+
+#define S_ENCPIS 2
+#define V_ENCPIS(x) ((x) << S_ENCPIS)
+#define F_ENCPIS V_ENCPIS(1U)
+
+#define S_CPISEL 0
+#define M_CPISEL 0x3U
+#define V_CPISEL(x) ((x) << S_CPISEL)
+#define G_CPISEL(x) (((x) >> S_CPISEL) & M_CPISEL)
+
+#define A_XGMAC_PORT_HSS_BAND_GAP_CTRL 0x1c2c
+
+#define S_BGCTL 0
+#define M_BGCTL 0x1fU
+#define V_BGCTL(x) ((x) << S_BGCTL)
+#define G_BGCTL(x) (((x) >> S_BGCTL) & M_BGCTL)
+
+#define A_XGMAC_PORT_HSS_LOFREQ_OVR 0x1c30
+
+#define S_LFREQ2 3
+#define V_LFREQ2(x) ((x) << S_LFREQ2)
+#define F_LFREQ2 V_LFREQ2(1U)
+
+#define S_LFREQ1 2
+#define V_LFREQ1(x) ((x) << S_LFREQ1)
+#define F_LFREQ1 V_LFREQ1(1U)
+
+#define S_LFREQO 1
+#define V_LFREQO(x) ((x) << S_LFREQO)
+#define F_LFREQO V_LFREQO(1U)
+
+#define S_LFSEL 0
+#define V_LFSEL(x) ((x) << S_LFSEL)
+#define F_LFSEL V_LFSEL(1U)
+
+#define A_XGMAC_PORT_HSS_VOLTAGE_BOOST_CTRL 0x1c38
+
+#define S_PFVAL 2
+#define V_PFVAL(x) ((x) << S_PFVAL)
+#define F_PFVAL V_PFVAL(1U)
+
+#define S_PFEN 1
+#define V_PFEN(x) ((x) << S_PFEN)
+#define F_PFEN V_PFEN(1U)
+
+#define S_VBADJ 0
+#define V_VBADJ(x) ((x) << S_VBADJ)
+#define F_VBADJ V_VBADJ(1U)
+
+#define A_XGMAC_PORT_HSS_TX_MODE_CFG 0x1c80
+#define A_XGMAC_PORT_HSS_TXTEST_CTRL 0x1c84
+#define A_XGMAC_PORT_HSS_TX_COEFF_CTRL 0x1c88
+#define A_XGMAC_PORT_HSS_TX_DRIVER_MODE 0x1c8c
+#define A_XGMAC_PORT_HSS_TX_DRIVER_OVR_CTRL 0x1c90
+#define A_XGMAC_PORT_HSS_TX_TDM_BIASGEN_STANDBY_TIMER 0x1c94
+#define A_XGMAC_PORT_HSS_TX_TDM_BIASGEN_PWRON_TIMER 0x1c98
+#define A_XGMAC_PORT_HSS_TX_TAP0_COEFF 0x1ca0
+#define A_XGMAC_PORT_HSS_TX_TAP1_COEFF 0x1ca4
+#define A_XGMAC_PORT_HSS_TX_TAP2_COEFF 0x1ca8
+#define A_XGMAC_PORT_HSS_TX_PWR 0x1cb0
+#define A_XGMAC_PORT_HSS_TX_POLARITY 0x1cb4
+#define A_XGMAC_PORT_HSS_TX_8023AP_AE_CMD 0x1cb8
+#define A_XGMAC_PORT_HSS_TX_8023AP_AE_STATUS 0x1cbc
+#define A_XGMAC_PORT_HSS_TX_TAP0_IDAC_OVR 0x1cc0
+#define A_XGMAC_PORT_HSS_TX_TAP1_IDAC_OVR 0x1cc4
+#define A_XGMAC_PORT_HSS_TX_TAP2_IDAC_OVR 0x1cc8
+#define A_XGMAC_PORT_HSS_TX_PWR_DAC_OVR 0x1cd0
+#define A_XGMAC_PORT_HSS_TX_PWR_DAC 0x1cd4
+#define A_XGMAC_PORT_HSS_TX_TAP0_IDAC_APP 0x1ce0
+#define A_XGMAC_PORT_HSS_TX_TAP1_IDAC_APP 0x1ce4
+#define A_XGMAC_PORT_HSS_TX_TAP2_IDAC_APP 0x1ce8
+#define A_XGMAC_PORT_HSS_TX_SEG_DIS_APP 0x1cf0
+#define A_XGMAC_PORT_HSS_TX_EXT_ADDR_DATA 0x1cf8
+#define A_XGMAC_PORT_HSS_TX_EXT_ADDR 0x1cfc
+#define A_XGMAC_PORT_HSS_RX_CFG_MODE 0x1d00
+#define A_XGMAC_PORT_HSS_RXTEST_CTRL 0x1d04
+#define A_XGMAC_PORT_HSS_RX_PH_ROTATOR_CTRL 0x1d08
+#define A_XGMAC_PORT_HSS_RX_PH_ROTATOR_OFFSET_CTRL 0x1d0c
+#define A_XGMAC_PORT_HSS_RX_PH_ROTATOR_POSITION1 0x1d10
+#define A_XGMAC_PORT_HSS_RX_PH_ROTATOR_POSITION2 0x1d14
+#define A_XGMAC_PORT_HSS_RX_PH_ROTATOR_STATIC_PH_OFFSET 0x1d18
+#define A_XGMAC_PORT_HSS_RX_SIGDET_CTRL 0x1d1c
+#define A_XGMAC_PORT_HSS_RX_DFE_CTRL 0x1d20
+#define A_XGMAC_PORT_HSS_RX_DFE_DATA_EDGE_SAMPLE 0x1d24
+#define A_XGMAC_PORT_HSS_RX_DFE_AMP_SAMPLE 0x1d28
+#define A_XGMAC_PORT_HSS_RX_VGA_CTRL1 0x1d2c
+#define A_XGMAC_PORT_HSS_RX_VGA_CTRL2 0x1d30
+#define A_XGMAC_PORT_HSS_RX_VGA_CTRL3 0x1d34
+#define A_XGMAC_PORT_HSS_RX_DFE_D00_D01_OFFSET 0x1d38
+#define A_XGMAC_PORT_HSS_RX_DFE_D10_D11_OFFSET 0x1d3c
+#define A_XGMAC_PORT_HSS_RX_DFE_E0_E1_OFFSET 0x1d40
+#define A_XGMAC_PORT_HSS_RX_DACA_OFFSET 0x1d44
+#define A_XGMAC_PORT_HSS_RX_DACAP_DAC_AN_OFFSET 0x1d48
+#define A_XGMAC_PORT_HSS_RX_DACA_MIN 0x1d4c
+#define A_XGMAC_PORT_HSS_RX_ADAC_CTRL 0x1d50
+#define A_XGMAC_PORT_HSS_RX_DIGITAL_EYE_CTRL 0x1d54
+#define A_XGMAC_PORT_HSS_RX_DIGITAL_EYE_METRICS 0x1d58
+#define A_XGMAC_PORT_HSS_RX_DFE_H1 0x1d5c
+#define A_XGMAC_PORT_HSS_RX_DFE_H2 0x1d60
+#define A_XGMAC_PORT_HSS_RX_DFE_H3 0x1d64
+#define A_XGMAC_PORT_HSS_RX_DFE_H4 0x1d68
+#define A_XGMAC_PORT_HSS_RX_DFE_H5 0x1d6c
+#define A_XGMAC_PORT_HSS_RX_DAC_DPC 0x1d70
+#define A_XGMAC_PORT_HSS_RX_DDC 0x1d74
+#define A_XGMAC_PORT_HSS_RX_INTERNAL_STATUS 0x1d78
+#define A_XGMAC_PORT_HSS_RX_DFE_FUNC_CTRL 0x1d7c
+#define A_XGMAC_PORT_HSS_TXRX_CFG_MODE 0x1e00
+#define A_XGMAC_PORT_HSS_TXRXTEST_CTRL 0x1e04
+
+/* registers for module UP */
+#define UP_BASE_ADDR 0x0
+
+#define A_UP_IBQ_CONFIG 0x0
+
+#define S_IBQGEN2 2
+#define M_IBQGEN2 0x3fffffffU
+#define V_IBQGEN2(x) ((x) << S_IBQGEN2)
+#define G_IBQGEN2(x) (((x) >> S_IBQGEN2) & M_IBQGEN2)
+
+#define S_IBQBUSY 1
+#define V_IBQBUSY(x) ((x) << S_IBQBUSY)
+#define F_IBQBUSY V_IBQBUSY(1U)
+
+#define S_IBQEN 0
+#define V_IBQEN(x) ((x) << S_IBQEN)
+#define F_IBQEN V_IBQEN(1U)
+
+#define A_UP_OBQ_CONFIG 0x4
+
+#define S_OBQGEN2 2
+#define M_OBQGEN2 0x3fffffffU
+#define V_OBQGEN2(x) ((x) << S_OBQGEN2)
+#define G_OBQGEN2(x) (((x) >> S_OBQGEN2) & M_OBQGEN2)
+
+#define S_OBQBUSY 1
+#define V_OBQBUSY(x) ((x) << S_OBQBUSY)
+#define F_OBQBUSY V_OBQBUSY(1U)
+
+#define S_OBQEN 0
+#define V_OBQEN(x) ((x) << S_OBQEN)
+#define F_OBQEN V_OBQEN(1U)
+
+#define A_UP_IBQ_GEN 0x8
+
+#define S_IBQGEN0 22
+#define M_IBQGEN0 0x3ffU
+#define V_IBQGEN0(x) ((x) << S_IBQGEN0)
+#define G_IBQGEN0(x) (((x) >> S_IBQGEN0) & M_IBQGEN0)
+
+#define S_IBQTSCHCHNLRDY 18
+#define M_IBQTSCHCHNLRDY 0xfU
+#define V_IBQTSCHCHNLRDY(x) ((x) << S_IBQTSCHCHNLRDY)
+#define G_IBQTSCHCHNLRDY(x) (((x) >> S_IBQTSCHCHNLRDY) & M_IBQTSCHCHNLRDY)
+
+#define S_IBQMBVFSTATUS 17
+#define V_IBQMBVFSTATUS(x) ((x) << S_IBQMBVFSTATUS)
+#define F_IBQMBVFSTATUS V_IBQMBVFSTATUS(1U)
+
+#define S_IBQMBSTATUS 16
+#define V_IBQMBSTATUS(x) ((x) << S_IBQMBSTATUS)
+#define F_IBQMBSTATUS V_IBQMBSTATUS(1U)
+
+#define S_IBQGEN1 6
+#define M_IBQGEN1 0x3ffU
+#define V_IBQGEN1(x) ((x) << S_IBQGEN1)
+#define G_IBQGEN1(x) (((x) >> S_IBQGEN1) & M_IBQGEN1)
+
+#define S_IBQEMPTY 0
+#define M_IBQEMPTY 0x3fU
+#define V_IBQEMPTY(x) ((x) << S_IBQEMPTY)
+#define G_IBQEMPTY(x) (((x) >> S_IBQEMPTY) & M_IBQEMPTY)
+
+#define A_UP_OBQ_GEN 0xc
+
+#define S_OBQGEN 6
+#define M_OBQGEN 0x3ffffffU
+#define V_OBQGEN(x) ((x) << S_OBQGEN)
+#define G_OBQGEN(x) (((x) >> S_OBQGEN) & M_OBQGEN)
+
+#define S_OBQFULL 0
+#define M_OBQFULL 0x3fU
+#define V_OBQFULL(x) ((x) << S_OBQFULL)
+#define G_OBQFULL(x) (((x) >> S_OBQFULL) & M_OBQFULL)
+
+#define A_UP_IBQ_0_RDADDR 0x10
+
+#define S_QUEID 13
+#define M_QUEID 0x7ffffU
+#define V_QUEID(x) ((x) << S_QUEID)
+#define G_QUEID(x) (((x) >> S_QUEID) & M_QUEID)
+
+#define S_IBQRDADDR 0
+#define M_IBQRDADDR 0x1fffU
+#define V_IBQRDADDR(x) ((x) << S_IBQRDADDR)
+#define G_IBQRDADDR(x) (((x) >> S_IBQRDADDR) & M_IBQRDADDR)
+
+#define A_UP_IBQ_0_WRADDR 0x14
+
+#define S_IBQWRADDR 0
+#define M_IBQWRADDR 0x1fffU
+#define V_IBQWRADDR(x) ((x) << S_IBQWRADDR)
+#define G_IBQWRADDR(x) (((x) >> S_IBQWRADDR) & M_IBQWRADDR)
+
+#define A_UP_IBQ_0_STATUS 0x18
+
+#define S_QUEERRFRAME 31
+#define V_QUEERRFRAME(x) ((x) << S_QUEERRFRAME)
+#define F_QUEERRFRAME V_QUEERRFRAME(1U)
+
+#define S_QUEREMFLITS 0
+#define M_QUEREMFLITS 0x7ffU
+#define V_QUEREMFLITS(x) ((x) << S_QUEREMFLITS)
+#define G_QUEREMFLITS(x) (((x) >> S_QUEREMFLITS) & M_QUEREMFLITS)
+
+#define A_UP_IBQ_0_PKTCNT 0x1c
+
+#define S_QUEEOPCNT 16
+#define M_QUEEOPCNT 0xfffU
+#define V_QUEEOPCNT(x) ((x) << S_QUEEOPCNT)
+#define G_QUEEOPCNT(x) (((x) >> S_QUEEOPCNT) & M_QUEEOPCNT)
+
+#define S_QUESOPCNT 0
+#define M_QUESOPCNT 0xfffU
+#define V_QUESOPCNT(x) ((x) << S_QUESOPCNT)
+#define G_QUESOPCNT(x) (((x) >> S_QUESOPCNT) & M_QUESOPCNT)
+
+#define A_UP_IBQ_1_RDADDR 0x20
+#define A_UP_IBQ_1_WRADDR 0x24
+#define A_UP_IBQ_1_STATUS 0x28
+#define A_UP_IBQ_1_PKTCNT 0x2c
+#define A_UP_IBQ_2_RDADDR 0x30
+#define A_UP_IBQ_2_WRADDR 0x34
+#define A_UP_IBQ_2_STATUS 0x38
+#define A_UP_IBQ_2_PKTCNT 0x3c
+#define A_UP_IBQ_3_RDADDR 0x40
+#define A_UP_IBQ_3_WRADDR 0x44
+#define A_UP_IBQ_3_STATUS 0x48
+#define A_UP_IBQ_3_PKTCNT 0x4c
+#define A_UP_IBQ_4_RDADDR 0x50
+#define A_UP_IBQ_4_WRADDR 0x54
+#define A_UP_IBQ_4_STATUS 0x58
+#define A_UP_IBQ_4_PKTCNT 0x5c
+#define A_UP_IBQ_5_RDADDR 0x60
+#define A_UP_IBQ_5_WRADDR 0x64
+#define A_UP_IBQ_5_STATUS 0x68
+#define A_UP_IBQ_5_PKTCNT 0x6c
+#define A_UP_OBQ_0_RDADDR 0x70
+
+#define S_OBQID 15
+#define M_OBQID 0x1ffffU
+#define V_OBQID(x) ((x) << S_OBQID)
+#define G_OBQID(x) (((x) >> S_OBQID) & M_OBQID)
+
+#define S_QUERDADDR 0
+#define M_QUERDADDR 0x7fffU
+#define V_QUERDADDR(x) ((x) << S_QUERDADDR)
+#define G_QUERDADDR(x) (((x) >> S_QUERDADDR) & M_QUERDADDR)
+
+#define A_UP_OBQ_0_WRADDR 0x74
+
+#define S_QUEWRADDR 0
+#define M_QUEWRADDR 0x7fffU
+#define V_QUEWRADDR(x) ((x) << S_QUEWRADDR)
+#define G_QUEWRADDR(x) (((x) >> S_QUEWRADDR) & M_QUEWRADDR)
+
+#define A_UP_OBQ_0_STATUS 0x78
+#define A_UP_OBQ_0_PKTCNT 0x7c
+#define A_UP_OBQ_1_RDADDR 0x80
+#define A_UP_OBQ_1_WRADDR 0x84
+#define A_UP_OBQ_1_STATUS 0x88
+#define A_UP_OBQ_1_PKTCNT 0x8c
+#define A_UP_OBQ_2_RDADDR 0x90
+#define A_UP_OBQ_2_WRADDR 0x94
+#define A_UP_OBQ_2_STATUS 0x98
+#define A_UP_OBQ_2_PKTCNT 0x9c
+#define A_UP_OBQ_3_RDADDR 0xa0
+#define A_UP_OBQ_3_WRADDR 0xa4
+#define A_UP_OBQ_3_STATUS 0xa8
+#define A_UP_OBQ_3_PKTCNT 0xac
+#define A_UP_OBQ_4_RDADDR 0xb0
+#define A_UP_OBQ_4_WRADDR 0xb4
+#define A_UP_OBQ_4_STATUS 0xb8
+#define A_UP_OBQ_4_PKTCNT 0xbc
+#define A_UP_OBQ_5_RDADDR 0xc0
+#define A_UP_OBQ_5_WRADDR 0xc4
+#define A_UP_OBQ_5_STATUS 0xc8
+#define A_UP_OBQ_5_PKTCNT 0xcc
+#define A_UP_IBQ_0_CONFIG 0xd0
+
+#define S_QUESIZE 26
+#define M_QUESIZE 0x3fU
+#define V_QUESIZE(x) ((x) << S_QUESIZE)
+#define G_QUESIZE(x) (((x) >> S_QUESIZE) & M_QUESIZE)
+
+#define S_QUEBASE 8
+#define M_QUEBASE 0x3fU
+#define V_QUEBASE(x) ((x) << S_QUEBASE)
+#define G_QUEBASE(x) (((x) >> S_QUEBASE) & M_QUEBASE)
+
+#define S_QUEDBG8BEN 7
+#define V_QUEDBG8BEN(x) ((x) << S_QUEDBG8BEN)
+#define F_QUEDBG8BEN V_QUEDBG8BEN(1U)
+
+#define S_QUEBAREADDR 0
+#define V_QUEBAREADDR(x) ((x) << S_QUEBAREADDR)
+#define F_QUEBAREADDR V_QUEBAREADDR(1U)
+
+#define A_UP_IBQ_0_REALADDR 0xd4
+
+#define S_QUERDADDRWRAP 31
+#define V_QUERDADDRWRAP(x) ((x) << S_QUERDADDRWRAP)
+#define F_QUERDADDRWRAP V_QUERDADDRWRAP(1U)
+
+#define S_QUEWRADDRWRAP 30
+#define V_QUEWRADDRWRAP(x) ((x) << S_QUEWRADDRWRAP)
+#define F_QUEWRADDRWRAP V_QUEWRADDRWRAP(1U)
+
+#define S_QUEMEMADDR 3
+#define M_QUEMEMADDR 0x7ffU
+#define V_QUEMEMADDR(x) ((x) << S_QUEMEMADDR)
+#define G_QUEMEMADDR(x) (((x) >> S_QUEMEMADDR) & M_QUEMEMADDR)
+
+#define A_UP_IBQ_1_CONFIG 0xd8
+#define A_UP_IBQ_1_REALADDR 0xdc
+#define A_UP_IBQ_2_CONFIG 0xe0
+#define A_UP_IBQ_2_REALADDR 0xe4
+#define A_UP_IBQ_3_CONFIG 0xe8
+#define A_UP_IBQ_3_REALADDR 0xec
+#define A_UP_IBQ_4_CONFIG 0xf0
+#define A_UP_IBQ_4_REALADDR 0xf4
+#define A_UP_IBQ_5_CONFIG 0xf8
+#define A_UP_IBQ_5_REALADDR 0xfc
+#define A_UP_OBQ_0_CONFIG 0x100
+#define A_UP_OBQ_0_REALADDR 0x104
+#define A_UP_OBQ_1_CONFIG 0x108
+#define A_UP_OBQ_1_REALADDR 0x10c
+#define A_UP_OBQ_2_CONFIG 0x110
+#define A_UP_OBQ_2_REALADDR 0x114
+#define A_UP_OBQ_3_CONFIG 0x118
+#define A_UP_OBQ_3_REALADDR 0x11c
+#define A_UP_OBQ_4_CONFIG 0x120
+#define A_UP_OBQ_4_REALADDR 0x124
+#define A_UP_OBQ_5_CONFIG 0x128
+#define A_UP_OBQ_5_REALADDR 0x12c
+#define A_UP_MAILBOX_STATUS 0x130
+
+#define S_MBGEN0 20
+#define M_MBGEN0 0xfffU
+#define V_MBGEN0(x) ((x) << S_MBGEN0)
+#define G_MBGEN0(x) (((x) >> S_MBGEN0) & M_MBGEN0)
+
+#define S_GENTIMERTRIGGER 16
+#define M_GENTIMERTRIGGER 0xfU
+#define V_GENTIMERTRIGGER(x) ((x) << S_GENTIMERTRIGGER)
+#define G_GENTIMERTRIGGER(x) (((x) >> S_GENTIMERTRIGGER) & M_GENTIMERTRIGGER)
+
+#define S_MBGEN1 8
+#define M_MBGEN1 0xffU
+#define V_MBGEN1(x) ((x) << S_MBGEN1)
+#define G_MBGEN1(x) (((x) >> S_MBGEN1) & M_MBGEN1)
+
+#define S_MBPFINT 0
+#define M_MBPFINT 0xffU
+#define V_MBPFINT(x) ((x) << S_MBPFINT)
+#define G_MBPFINT(x) (((x) >> S_MBPFINT) & M_MBPFINT)
+
+#define A_UP_UP_DBG_LA_CFG 0x140
+
+#define S_UPDBGLACAPTBUB 31
+#define V_UPDBGLACAPTBUB(x) ((x) << S_UPDBGLACAPTBUB)
+#define F_UPDBGLACAPTBUB V_UPDBGLACAPTBUB(1U)
+
+#define S_UPDBGLACAPTPCONLY 30
+#define V_UPDBGLACAPTPCONLY(x) ((x) << S_UPDBGLACAPTPCONLY)
+#define F_UPDBGLACAPTPCONLY V_UPDBGLACAPTPCONLY(1U)
+
+#define S_UPDBGLAMASKSTOP 29
+#define V_UPDBGLAMASKSTOP(x) ((x) << S_UPDBGLAMASKSTOP)
+#define F_UPDBGLAMASKSTOP V_UPDBGLAMASKSTOP(1U)
+
+#define S_UPDBGLAMASKTRIG 28
+#define V_UPDBGLAMASKTRIG(x) ((x) << S_UPDBGLAMASKTRIG)
+#define F_UPDBGLAMASKTRIG V_UPDBGLAMASKTRIG(1U)
+
+#define S_UPDBGLAWRPTR 16
+#define M_UPDBGLAWRPTR 0xfffU
+#define V_UPDBGLAWRPTR(x) ((x) << S_UPDBGLAWRPTR)
+#define G_UPDBGLAWRPTR(x) (((x) >> S_UPDBGLAWRPTR) & M_UPDBGLAWRPTR)
+
+#define S_UPDBGLARDPTR 2
+#define M_UPDBGLARDPTR 0xfffU
+#define V_UPDBGLARDPTR(x) ((x) << S_UPDBGLARDPTR)
+#define G_UPDBGLARDPTR(x) (((x) >> S_UPDBGLARDPTR) & M_UPDBGLARDPTR)
+
+#define S_UPDBGLARDEN 1
+#define V_UPDBGLARDEN(x) ((x) << S_UPDBGLARDEN)
+#define F_UPDBGLARDEN V_UPDBGLARDEN(1U)
+
+#define S_UPDBGLAEN 0
+#define V_UPDBGLAEN(x) ((x) << S_UPDBGLAEN)
+#define F_UPDBGLAEN V_UPDBGLAEN(1U)
+
+#define A_UP_UP_DBG_LA_DATA 0x144
+#define A_UP_PIO_MST_CONFIG 0x148
+
+#define S_FLSRC 24
+#define M_FLSRC 0x7U
+#define V_FLSRC(x) ((x) << S_FLSRC)
+#define G_FLSRC(x) (((x) >> S_FLSRC) & M_FLSRC)
+
+#define S_SEPROT 23
+#define V_SEPROT(x) ((x) << S_SEPROT)
+#define F_SEPROT V_SEPROT(1U)
+
+#define S_SESRC 20
+#define M_SESRC 0x7U
+#define V_SESRC(x) ((x) << S_SESRC)
+#define G_SESRC(x) (((x) >> S_SESRC) & M_SESRC)
+
+#define S_UPRGN 19
+#define V_UPRGN(x) ((x) << S_UPRGN)
+#define F_UPRGN V_UPRGN(1U)
+
+#define S_UPPF 16
+#define M_UPPF 0x7U
+#define V_UPPF(x) ((x) << S_UPPF)
+#define G_UPPF(x) (((x) >> S_UPPF) & M_UPPF)
+
+#define S_UPRID 0
+#define M_UPRID 0xffffU
+#define V_UPRID(x) ((x) << S_UPRID)
+#define G_UPRID(x) (((x) >> S_UPRID) & M_UPRID)
+
+#define A_UP_UP_SELF_CONTROL 0x14c
+
+#define S_UPSELFRESET 0
+#define V_UPSELFRESET(x) ((x) << S_UPSELFRESET)
+#define F_UPSELFRESET V_UPSELFRESET(1U)
+
+#define A_UP_MAILBOX_PF0_CTL 0x180
+#define A_UP_MAILBOX_PF1_CTL 0x190
+#define A_UP_MAILBOX_PF2_CTL 0x1a0
+#define A_UP_MAILBOX_PF3_CTL 0x1b0
+#define A_UP_MAILBOX_PF4_CTL 0x1c0
+#define A_UP_MAILBOX_PF5_CTL 0x1d0
+#define A_UP_MAILBOX_PF6_CTL 0x1e0
+#define A_UP_MAILBOX_PF7_CTL 0x1f0
+#define A_UP_TSCH_CHNLN_CLASS_RDY 0x200
+#define A_UP_TSCH_CHNLN_CLASS_WATCH_RDY 0x204
+
+#define S_TSCHWRRLIMIT 16
+#define M_TSCHWRRLIMIT 0xffffU
+#define V_TSCHWRRLIMIT(x) ((x) << S_TSCHWRRLIMIT)
+#define G_TSCHWRRLIMIT(x) (((x) >> S_TSCHWRRLIMIT) & M_TSCHWRRLIMIT)
+
+#define S_TSCHCHNLCWRDY 0
+#define M_TSCHCHNLCWRDY 0xffffU
+#define V_TSCHCHNLCWRDY(x) ((x) << S_TSCHCHNLCWRDY)
+#define G_TSCHCHNLCWRDY(x) (((x) >> S_TSCHCHNLCWRDY) & M_TSCHCHNLCWRDY)
+
+#define A_UP_TSCH_CHNLN_CLASS_WATCH_LIST 0x208
+
+#define S_TSCHWRRRELOAD 16
+#define M_TSCHWRRRELOAD 0xffffU
+#define V_TSCHWRRRELOAD(x) ((x) << S_TSCHWRRRELOAD)
+#define G_TSCHWRRRELOAD(x) (((x) >> S_TSCHWRRRELOAD) & M_TSCHWRRRELOAD)
+
+#define S_TSCHCHNLCWATCH 0
+#define M_TSCHCHNLCWATCH 0xffffU
+#define V_TSCHCHNLCWATCH(x) ((x) << S_TSCHCHNLCWATCH)
+#define G_TSCHCHNLCWATCH(x) (((x) >> S_TSCHCHNLCWATCH) & M_TSCHCHNLCWATCH)
+
+#define A_UP_TSCH_CHNLN_CLASS_TAKE 0x20c
+
+#define S_TSCHCHNLCNUM 24
+#define M_TSCHCHNLCNUM 0x1fU
+#define V_TSCHCHNLCNUM(x) ((x) << S_TSCHCHNLCNUM)
+#define G_TSCHCHNLCNUM(x) (((x) >> S_TSCHCHNLCNUM) & M_TSCHCHNLCNUM)
+
+#define S_TSCHCHNLCCNT 0
+#define M_TSCHCHNLCCNT 0xffffffU
+#define V_TSCHCHNLCCNT(x) ((x) << S_TSCHCHNLCCNT)
+#define G_TSCHCHNLCCNT(x) (((x) >> S_TSCHCHNLCCNT) & M_TSCHCHNLCCNT)
+
+#define A_UP_UPLADBGPCCHKDATA_0 0x240
+#define A_UP_UPLADBGPCCHKMASK_0 0x244
+#define A_UP_UPLADBGPCCHKDATA_1 0x250
+#define A_UP_UPLADBGPCCHKMASK_1 0x254
+#define A_UP_UPLADBGPCCHKDATA_2 0x260
+#define A_UP_UPLADBGPCCHKMASK_2 0x264
+#define A_UP_UPLADBGPCCHKDATA_3 0x270
+#define A_UP_UPLADBGPCCHKMASK_3 0x274
+
+/* registers for module CIM_CTL */
+#define CIM_CTL_BASE_ADDR 0x0
+
+#define A_CIM_CTL_CONFIG 0x0
+
+#define S_AUTOPREFLOC 17
+#define M_AUTOPREFLOC 0x1fU
+#define V_AUTOPREFLOC(x) ((x) << S_AUTOPREFLOC)
+#define G_AUTOPREFLOC(x) (((x) >> S_AUTOPREFLOC) & M_AUTOPREFLOC)
+
+#define S_AUTOPREFEN 16
+#define V_AUTOPREFEN(x) ((x) << S_AUTOPREFEN)
+#define F_AUTOPREFEN V_AUTOPREFEN(1U)
+
+#define S_DISMATIMEOUT 15
+#define V_DISMATIMEOUT(x) ((x) << S_DISMATIMEOUT)
+#define F_DISMATIMEOUT V_DISMATIMEOUT(1U)
+
+#define S_PIFMULTICMD 8
+#define V_PIFMULTICMD(x) ((x) << S_PIFMULTICMD)
+#define F_PIFMULTICMD V_PIFMULTICMD(1U)
+
+#define S_UPSELFRESETTOUT 7
+#define V_UPSELFRESETTOUT(x) ((x) << S_UPSELFRESETTOUT)
+#define F_UPSELFRESETTOUT V_UPSELFRESETTOUT(1U)
+
+#define S_PLSWAPDISWR 6
+#define V_PLSWAPDISWR(x) ((x) << S_PLSWAPDISWR)
+#define F_PLSWAPDISWR V_PLSWAPDISWR(1U)
+
+#define S_PLSWAPDISRD 5
+#define V_PLSWAPDISRD(x) ((x) << S_PLSWAPDISRD)
+#define F_PLSWAPDISRD V_PLSWAPDISRD(1U)
+
+#define S_PREFEN 0
+#define V_PREFEN(x) ((x) << S_PREFEN)
+#define F_PREFEN V_PREFEN(1U)
+
+#define A_CIM_CTL_PREFADDR 0x4
+#define A_CIM_CTL_ALLOCADDR 0x8
+#define A_CIM_CTL_INVLDTADDR 0xc
+#define A_CIM_CTL_STATIC_PREFADDR0 0x10
+#define A_CIM_CTL_STATIC_PREFADDR1 0x14
+#define A_CIM_CTL_STATIC_PREFADDR2 0x18
+#define A_CIM_CTL_STATIC_PREFADDR3 0x1c
+#define A_CIM_CTL_STATIC_PREFADDR4 0x20
+#define A_CIM_CTL_STATIC_PREFADDR5 0x24
+#define A_CIM_CTL_STATIC_PREFADDR6 0x28
+#define A_CIM_CTL_STATIC_PREFADDR7 0x2c
+#define A_CIM_CTL_STATIC_PREFADDR8 0x30
+#define A_CIM_CTL_STATIC_PREFADDR9 0x34
+#define A_CIM_CTL_STATIC_PREFADDR10 0x38
+#define A_CIM_CTL_STATIC_PREFADDR11 0x3c
+#define A_CIM_CTL_STATIC_PREFADDR12 0x40
+#define A_CIM_CTL_STATIC_PREFADDR13 0x44
+#define A_CIM_CTL_STATIC_PREFADDR14 0x48
+#define A_CIM_CTL_STATIC_PREFADDR15 0x4c
+#define A_CIM_CTL_STATIC_ALLOCADDR0 0x50
+#define A_CIM_CTL_STATIC_ALLOCADDR1 0x54
+#define A_CIM_CTL_STATIC_ALLOCADDR2 0x58
+#define A_CIM_CTL_STATIC_ALLOCADDR3 0x5c
+#define A_CIM_CTL_STATIC_ALLOCADDR4 0x60
+#define A_CIM_CTL_STATIC_ALLOCADDR5 0x64
+#define A_CIM_CTL_STATIC_ALLOCADDR6 0x68
+#define A_CIM_CTL_STATIC_ALLOCADDR7 0x6c
+#define A_CIM_CTL_STATIC_ALLOCADDR8 0x70
+#define A_CIM_CTL_STATIC_ALLOCADDR9 0x74
+#define A_CIM_CTL_STATIC_ALLOCADDR10 0x78
+#define A_CIM_CTL_STATIC_ALLOCADDR11 0x7c
+#define A_CIM_CTL_STATIC_ALLOCADDR12 0x80
+#define A_CIM_CTL_STATIC_ALLOCADDR13 0x84
+#define A_CIM_CTL_STATIC_ALLOCADDR14 0x88
+#define A_CIM_CTL_STATIC_ALLOCADDR15 0x8c
+#define A_CIM_CTL_FIFO_CNT 0x90
+
+#define S_CTLFIFOCNT 0
+#define M_CTLFIFOCNT 0xfU
+#define V_CTLFIFOCNT(x) ((x) << S_CTLFIFOCNT)
+#define G_CTLFIFOCNT(x) (((x) >> S_CTLFIFOCNT) & M_CTLFIFOCNT)
+
+#define A_CIM_CTL_GLB_TIMER 0x94
+#define A_CIM_CTL_TIMER0 0x98
+#define A_CIM_CTL_TIMER1 0x9c
+#define A_CIM_CTL_GEN0 0xa0
+#define A_CIM_CTL_GEN1 0xa4
+#define A_CIM_CTL_GEN2 0xa8
+#define A_CIM_CTL_GEN3 0xac
+#define A_CIM_CTL_GLB_TIMER_TICK 0xb0
+#define A_CIM_CTL_GEN_TIMER0_CTL 0xb4
+
+#define S_GENTIMERRUN 7
+#define V_GENTIMERRUN(x) ((x) << S_GENTIMERRUN)
+#define F_GENTIMERRUN V_GENTIMERRUN(1U)
+
+#define S_GENTIMERTRIG 6
+#define V_GENTIMERTRIG(x) ((x) << S_GENTIMERTRIG)
+#define F_GENTIMERTRIG V_GENTIMERTRIG(1U)
+
+#define S_GENTIMERACT 4
+#define M_GENTIMERACT 0x3U
+#define V_GENTIMERACT(x) ((x) << S_GENTIMERACT)
+#define G_GENTIMERACT(x) (((x) >> S_GENTIMERACT) & M_GENTIMERACT)
+
+#define S_GENTIMERCFG 2
+#define M_GENTIMERCFG 0x3U
+#define V_GENTIMERCFG(x) ((x) << S_GENTIMERCFG)
+#define G_GENTIMERCFG(x) (((x) >> S_GENTIMERCFG) & M_GENTIMERCFG)
+
+#define S_GENTIMERSTOP 1
+#define V_GENTIMERSTOP(x) ((x) << S_GENTIMERSTOP)
+#define F_GENTIMERSTOP V_GENTIMERSTOP(1U)
+
+#define S_GENTIMERSTRT 0
+#define V_GENTIMERSTRT(x) ((x) << S_GENTIMERSTRT)
+#define F_GENTIMERSTRT V_GENTIMERSTRT(1U)
+
+#define A_CIM_CTL_GEN_TIMER0 0xb8
+#define A_CIM_CTL_GEN_TIMER1_CTL 0xbc
+#define A_CIM_CTL_GEN_TIMER1 0xc0
+#define A_CIM_CTL_GEN_TIMER2_CTL 0xc4
+#define A_CIM_CTL_GEN_TIMER2 0xc8
+#define A_CIM_CTL_GEN_TIMER3_CTL 0xcc
+#define A_CIM_CTL_GEN_TIMER3 0xd0
+#define A_CIM_CTL_MAILBOX_VF_STATUS 0xe0
+#define A_CIM_CTL_MAILBOX_VFN_CTL 0x100
+#define A_CIM_CTL_TSCH_CHNLN_CTL 0x900
+
+#define S_TSCHNLEN 31
+#define V_TSCHNLEN(x) ((x) << S_TSCHNLEN)
+#define F_TSCHNLEN V_TSCHNLEN(1U)
+
+#define S_TSCHNRESET 30
+#define V_TSCHNRESET(x) ((x) << S_TSCHNRESET)
+#define F_TSCHNRESET V_TSCHNRESET(1U)
+
+#define A_CIM_CTL_TSCH_CHNLN_TICK 0x904
+
+#define S_TSCHNLTICK 0
+#define M_TSCHNLTICK 0xffffU
+#define V_TSCHNLTICK(x) ((x) << S_TSCHNLTICK)
+#define G_TSCHNLTICK(x) (((x) >> S_TSCHNLTICK) & M_TSCHNLTICK)
+
+#define A_CIM_CTL_TSCH_CHNLN_CLASS_ENABLE_A 0x908
+
+#define S_TSC15WRREN 31
+#define V_TSC15WRREN(x) ((x) << S_TSC15WRREN)
+#define F_TSC15WRREN V_TSC15WRREN(1U)
+
+#define S_TSC15RATEEN 30
+#define V_TSC15RATEEN(x) ((x) << S_TSC15RATEEN)
+#define F_TSC15RATEEN V_TSC15RATEEN(1U)
+
+#define S_TSC14WRREN 29
+#define V_TSC14WRREN(x) ((x) << S_TSC14WRREN)
+#define F_TSC14WRREN V_TSC14WRREN(1U)
+
+#define S_TSC14RATEEN 28
+#define V_TSC14RATEEN(x) ((x) << S_TSC14RATEEN)
+#define F_TSC14RATEEN V_TSC14RATEEN(1U)
+
+#define S_TSC13WRREN 27
+#define V_TSC13WRREN(x) ((x) << S_TSC13WRREN)
+#define F_TSC13WRREN V_TSC13WRREN(1U)
+
+#define S_TSC13RATEEN 26
+#define V_TSC13RATEEN(x) ((x) << S_TSC13RATEEN)
+#define F_TSC13RATEEN V_TSC13RATEEN(1U)
+
+#define S_TSC12WRREN 25
+#define V_TSC12WRREN(x) ((x) << S_TSC12WRREN)
+#define F_TSC12WRREN V_TSC12WRREN(1U)
+
+#define S_TSC12RATEEN 24
+#define V_TSC12RATEEN(x) ((x) << S_TSC12RATEEN)
+#define F_TSC12RATEEN V_TSC12RATEEN(1U)
+
+#define S_TSC11WRREN 23
+#define V_TSC11WRREN(x) ((x) << S_TSC11WRREN)
+#define F_TSC11WRREN V_TSC11WRREN(1U)
+
+#define S_TSC11RATEEN 22
+#define V_TSC11RATEEN(x) ((x) << S_TSC11RATEEN)
+#define F_TSC11RATEEN V_TSC11RATEEN(1U)
+
+#define S_TSC10WRREN 21
+#define V_TSC10WRREN(x) ((x) << S_TSC10WRREN)
+#define F_TSC10WRREN V_TSC10WRREN(1U)
+
+#define S_TSC10RATEEN 20
+#define V_TSC10RATEEN(x) ((x) << S_TSC10RATEEN)
+#define F_TSC10RATEEN V_TSC10RATEEN(1U)
+
+#define S_TSC9WRREN 19
+#define V_TSC9WRREN(x) ((x) << S_TSC9WRREN)
+#define F_TSC9WRREN V_TSC9WRREN(1U)
+
+#define S_TSC9RATEEN 18
+#define V_TSC9RATEEN(x) ((x) << S_TSC9RATEEN)
+#define F_TSC9RATEEN V_TSC9RATEEN(1U)
+
+#define S_TSC8WRREN 17
+#define V_TSC8WRREN(x) ((x) << S_TSC8WRREN)
+#define F_TSC8WRREN V_TSC8WRREN(1U)
+
+#define S_TSC8RATEEN 16
+#define V_TSC8RATEEN(x) ((x) << S_TSC8RATEEN)
+#define F_TSC8RATEEN V_TSC8RATEEN(1U)
+
+#define S_TSC7WRREN 15
+#define V_TSC7WRREN(x) ((x) << S_TSC7WRREN)
+#define F_TSC7WRREN V_TSC7WRREN(1U)
+
+#define S_TSC7RATEEN 14
+#define V_TSC7RATEEN(x) ((x) << S_TSC7RATEEN)
+#define F_TSC7RATEEN V_TSC7RATEEN(1U)
+
+#define S_TSC6WRREN 13
+#define V_TSC6WRREN(x) ((x) << S_TSC6WRREN)
+#define F_TSC6WRREN V_TSC6WRREN(1U)
+
+#define S_TSC6RATEEN 12
+#define V_TSC6RATEEN(x) ((x) << S_TSC6RATEEN)
+#define F_TSC6RATEEN V_TSC6RATEEN(1U)
+
+#define S_TSC5WRREN 11
+#define V_TSC5WRREN(x) ((x) << S_TSC5WRREN)
+#define F_TSC5WRREN V_TSC5WRREN(1U)
+
+#define S_TSC5RATEEN 10
+#define V_TSC5RATEEN(x) ((x) << S_TSC5RATEEN)
+#define F_TSC5RATEEN V_TSC5RATEEN(1U)
+
+#define S_TSC4WRREN 9
+#define V_TSC4WRREN(x) ((x) << S_TSC4WRREN)
+#define F_TSC4WRREN V_TSC4WRREN(1U)
+
+#define S_TSC4RATEEN 8
+#define V_TSC4RATEEN(x) ((x) << S_TSC4RATEEN)
+#define F_TSC4RATEEN V_TSC4RATEEN(1U)
+
+#define S_TSC3WRREN 7
+#define V_TSC3WRREN(x) ((x) << S_TSC3WRREN)
+#define F_TSC3WRREN V_TSC3WRREN(1U)
+
+#define S_TSC3RATEEN 6
+#define V_TSC3RATEEN(x) ((x) << S_TSC3RATEEN)
+#define F_TSC3RATEEN V_TSC3RATEEN(1U)
+
+#define S_TSC2WRREN 5
+#define V_TSC2WRREN(x) ((x) << S_TSC2WRREN)
+#define F_TSC2WRREN V_TSC2WRREN(1U)
+
+#define S_TSC2RATEEN 4
+#define V_TSC2RATEEN(x) ((x) << S_TSC2RATEEN)
+#define F_TSC2RATEEN V_TSC2RATEEN(1U)
+
+#define S_TSC1WRREN 3
+#define V_TSC1WRREN(x) ((x) << S_TSC1WRREN)
+#define F_TSC1WRREN V_TSC1WRREN(1U)
+
+#define S_TSC1RATEEN 2
+#define V_TSC1RATEEN(x) ((x) << S_TSC1RATEEN)
+#define F_TSC1RATEEN V_TSC1RATEEN(1U)
+
+#define S_TSC0WRREN 1
+#define V_TSC0WRREN(x) ((x) << S_TSC0WRREN)
+#define F_TSC0WRREN V_TSC0WRREN(1U)
+
+#define S_TSC0RATEEN 0
+#define V_TSC0RATEEN(x) ((x) << S_TSC0RATEEN)
+#define F_TSC0RATEEN V_TSC0RATEEN(1U)
+
+#define A_CIM_CTL_TSCH_MIN_MAX_EN 0x90c
+
+#define S_MIN_MAX_EN 0
+#define V_MIN_MAX_EN(x) ((x) << S_MIN_MAX_EN)
+#define F_MIN_MAX_EN V_MIN_MAX_EN(1U)
+
+#define A_CIM_CTL_TSCH_CHNLN_RATE_LIMITER 0x910
+
+#define S_TSCHNLRATENEG 31
+#define V_TSCHNLRATENEG(x) ((x) << S_TSCHNLRATENEG)
+#define F_TSCHNLRATENEG V_TSCHNLRATENEG(1U)
+
+#define S_TSCHNLRATEL 0
+#define M_TSCHNLRATEL 0x7fffffffU
+#define V_TSCHNLRATEL(x) ((x) << S_TSCHNLRATEL)
+#define G_TSCHNLRATEL(x) (((x) >> S_TSCHNLRATEL) & M_TSCHNLRATEL)
+
+#define A_CIM_CTL_TSCH_CHNLN_RATE_PROPERTIES 0x914
+
+#define S_TSCHNLRMAX 16
+#define M_TSCHNLRMAX 0xffffU
+#define V_TSCHNLRMAX(x) ((x) << S_TSCHNLRMAX)
+#define G_TSCHNLRMAX(x) (((x) >> S_TSCHNLRMAX) & M_TSCHNLRMAX)
+
+#define S_TSCHNLRINCR 0
+#define M_TSCHNLRINCR 0xffffU
+#define V_TSCHNLRINCR(x) ((x) << S_TSCHNLRINCR)
+#define G_TSCHNLRINCR(x) (((x) >> S_TSCHNLRINCR) & M_TSCHNLRINCR)
+
+#define A_CIM_CTL_TSCH_CHNLN_WRR 0x918
+#define A_CIM_CTL_TSCH_CHNLN_WEIGHT 0x91c
+
+#define S_TSCHNLWEIGHT 0
+#define M_TSCHNLWEIGHT 0x3fffffU
+#define V_TSCHNLWEIGHT(x) ((x) << S_TSCHNLWEIGHT)
+#define G_TSCHNLWEIGHT(x) (((x) >> S_TSCHNLWEIGHT) & M_TSCHNLWEIGHT)
+
+#define A_CIM_CTL_TSCH_CHNLN_CLASSM_RATE_PROPERTIES 0x924
+
+#define S_TSCCLRMAX 16
+#define M_TSCCLRMAX 0xffffU
+#define V_TSCCLRMAX(x) ((x) << S_TSCCLRMAX)
+#define G_TSCCLRMAX(x) (((x) >> S_TSCCLRMAX) & M_TSCCLRMAX)
+
+#define S_TSCCLRINCR 0
+#define M_TSCCLRINCR 0xffffU
+#define V_TSCCLRINCR(x) ((x) << S_TSCCLRINCR)
+#define G_TSCCLRINCR(x) (((x) >> S_TSCCLRINCR) & M_TSCCLRINCR)
+
+#define A_CIM_CTL_TSCH_CHNLN_CLASSM_WRR 0x928
+
+#define S_TSCCLWRRNEG 31
+#define V_TSCCLWRRNEG(x) ((x) << S_TSCCLWRRNEG)
+#define F_TSCCLWRRNEG V_TSCCLWRRNEG(1U)
+
+#define S_TSCCLWRR 0
+#define M_TSCCLWRR 0x3ffffffU
+#define V_TSCCLWRR(x) ((x) << S_TSCCLWRR)
+#define G_TSCCLWRR(x) (((x) >> S_TSCCLWRR) & M_TSCCLWRR)
+
+#define A_CIM_CTL_TSCH_CHNLN_CLASSM_WEIGHT 0x92c
+
+#define S_TSCCLWEIGHT 0
+#define M_TSCCLWEIGHT 0xffffU
+#define V_TSCCLWEIGHT(x) ((x) << S_TSCCLWEIGHT)
+#define G_TSCCLWEIGHT(x) (((x) >> S_TSCCLWEIGHT) & M_TSCCLWEIGHT)
diff --git a/sys/dev/cxgbe/common/t4_regs_values.h b/sys/dev/cxgbe/common/t4_regs_values.h
new file mode 100644
index 0000000..aacda81
--- /dev/null
+++ b/sys/dev/cxgbe/common/t4_regs_values.h
@@ -0,0 +1,192 @@
+/*-
+ * Copyright (c) 2011 Chelsio Communications, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ *
+ */
+
+#ifndef __T4_REGS_VALUES_H__
+#define __T4_REGS_VALUES_H__
+
+/*
+ * This file contains definitions for various T4 register value hardware
+ * constants. The types of values encoded here are predominantly those for
+ * register fields which control "modal" behavior. For the most part, we do
+ * not include definitions for register fields which are simple numeric
+ * metrics, etc.
+ *
+ * These new "modal values" use a naming convention which matches the
+ * currently existing macros in t4_reg.h. For register field FOO which would
+ * have S_FOO, M_FOO, V_FOO() and G_FOO() macros, we introduce X_FOO_{MODE}
+ * definitions. These can be used as V_FOO(X_FOO_MODE) or as (G_FOO(x) ==
+ * X_FOO_MODE).
+ *
+ * Note that this should all be part of t4_regs.h but the toolset used to
+ * generate that file doesn't [yet] have the capability of collecting these
+ * constants.
+ */
+
+/*
+ * SGE definitions.
+ * ================
+ */
+
+/*
+ * SGE register field values.
+ */
+
+/* CONTROL register */
+#define X_FLSPLITMODE_FLSPLITMIN 0
+#define X_FLSPLITMODE_ETHHDR 1
+#define X_FLSPLITMODE_IPHDR 2
+#define X_FLSPLITMODE_TCPHDR 3
+
+#define X_DCASYSTYPE_FSB 0
+#define X_DCASYSTYPE_CSI 1
+
+#define X_EGSTATPAGESIZE_64B 0
+#define X_EGSTATPAGESIZE_128B 1
+
+#define X_RXPKTCPLMODE_DATA 0
+#define X_RXPKTCPLMODE_SPLIT 1
+
+#define X_INGPCIEBOUNDARY_SHIFT 5
+#define X_INGPCIEBOUNDARY_32B 0
+#define X_INGPCIEBOUNDARY_64B 1
+#define X_INGPCIEBOUNDARY_128B 2
+#define X_INGPCIEBOUNDARY_256B 3
+#define X_INGPCIEBOUNDARY_512B 4
+#define X_INGPCIEBOUNDARY_1024B 5
+#define X_INGPCIEBOUNDARY_2048B 6
+#define X_INGPCIEBOUNDARY_4096B 7
+
+#define X_INGPADBOUNDARY_SHIFT 5
+#define X_INGPADBOUNDARY_32B 0
+#define X_INGPADBOUNDARY_64B 1
+#define X_INGPADBOUNDARY_128B 2
+#define X_INGPADBOUNDARY_256B 3
+#define X_INGPADBOUNDARY_512B 4
+#define X_INGPADBOUNDARY_1024B 5
+#define X_INGPADBOUNDARY_2048B 6
+#define X_INGPADBOUNDARY_4096B 7
+
+#define X_EGRPCIEBOUNDARY_SHIFT 5
+#define X_EGRPCIEBOUNDARY_32B 0
+#define X_EGRPCIEBOUNDARY_64B 1
+#define X_EGRPCIEBOUNDARY_128B 2
+#define X_EGRPCIEBOUNDARY_256B 3
+#define X_EGRPCIEBOUNDARY_512B 4
+#define X_EGRPCIEBOUNDARY_1024B 5
+#define X_EGRPCIEBOUNDARY_2048B 6
+#define X_EGRPCIEBOUNDARY_4096B 7
+
+/* GTS register */
+#define SGE_TIMERREGS 6
+#define X_TIMERREG_COUNTER0 0
+#define X_TIMERREG_COUNTER1 1
+#define X_TIMERREG_COUNTER2 2
+#define X_TIMERREG_COUNTER3 3
+#define X_TIMERREG_COUNTER4 4
+#define X_TIMERREG_COUNTER5 5
+#define X_TIMERREG_RESTART_COUNTER 6
+#define X_TIMERREG_UPDATE_CIDX 7
+
+/*
+ * Egress Context field values
+ */
+#define EC_WR_UNITS 16
+
+#define X_FETCHBURSTMIN_SHIFT 4
+#define X_FETCHBURSTMIN_16B 0
+#define X_FETCHBURSTMIN_32B 1
+#define X_FETCHBURSTMIN_64B 2
+#define X_FETCHBURSTMIN_128B 3
+
+#define X_FETCHBURSTMAX_SHIFT 6
+#define X_FETCHBURSTMAX_64B 0
+#define X_FETCHBURSTMAX_128B 1
+#define X_FETCHBURSTMAX_256B 2
+#define X_FETCHBURSTMAX_512B 3
+
+#define X_HOSTFCMODE_NONE 0
+#define X_HOSTFCMODE_INGRESS_QUEUE 1
+#define X_HOSTFCMODE_STATUS_PAGE 2
+#define X_HOSTFCMODE_BOTH 3
+
+#define X_HOSTFCOWNER_UP 0
+#define X_HOSTFCOWNER_SGE 1
+
+#define X_CIDXFLUSHTHRESH_1 0
+#define X_CIDXFLUSHTHRESH_2 1
+#define X_CIDXFLUSHTHRESH_4 2
+#define X_CIDXFLUSHTHRESH_8 3
+#define X_CIDXFLUSHTHRESH_16 4
+#define X_CIDXFLUSHTHRESH_32 5
+#define X_CIDXFLUSHTHRESH_64 6
+#define X_CIDXFLUSHTHRESH_128 7
+
+#define X_IDXSIZE_UNIT 64
+
+#define X_BASEADDRESS_ALIGN 512
+
+/*
+ * Ingress Context field values
+ */
+#define X_UPDATESCHEDULING_TIMER 0
+#define X_UPDATESCHEDULING_COUNTER_OPTTIMER 1
+
+#define X_UPDATEDELIVERY_NONE 0
+#define X_UPDATEDELIVERY_INTERRUPT 1
+#define X_UPDATEDELIVERY_STATUS_PAGE 2
+#define X_UPDATEDELIVERY_BOTH 3
+
+#define X_INTERRUPTDESTINATION_PCIE 0
+#define X_INTERRUPTDESTINATION_IQ 1
+
+#define X_QUEUEENTRYSIZE_16B 0
+#define X_QUEUEENTRYSIZE_32B 1
+#define X_QUEUEENTRYSIZE_64B 2
+#define X_QUEUEENTRYSIZE_128B 3
+
+#define IC_SIZE_UNIT 16
+#define IC_BASEADDRESS_ALIGN 512
+
+#define X_RSPD_TYPE_FLBUF 0
+#define X_RSPD_TYPE_CPL 1
+#define X_RSPD_TYPE_INTR 2
+
+/*
+ * CIM definitions.
+ * ================
+ */
+
+/*
+ * CIM register field values.
+ */
+#define X_MBOWNER_NONE 0
+#define X_MBOWNER_FW 1
+#define X_MBOWNER_PL 2
+
+#endif /* __T4_REGS_VALUES_H__ */
diff --git a/sys/dev/cxgbe/common/t4_tcb.h b/sys/dev/cxgbe/common/t4_tcb.h
new file mode 100644
index 0000000..774b058
--- /dev/null
+++ b/sys/dev/cxgbe/common/t4_tcb.h
@@ -0,0 +1,753 @@
+/*-
+ * Copyright (c) 2011 Chelsio Communications, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ *
+ */
+
+/* This file is automatically generated --- changes will be lost */
+
+#ifndef _T4_TCB_DEFS_H
+#define _T4_TCB_DEFS_H
+
+/* 3:0 */
+#define W_TCB_ULP_TYPE 0
+#define S_TCB_ULP_TYPE 0
+#define M_TCB_ULP_TYPE 0xfULL
+#define V_TCB_ULP_TYPE(x) ((x) << S_TCB_ULP_TYPE)
+
+/* 11:4 */
+#define W_TCB_ULP_RAW 0
+#define S_TCB_ULP_RAW 4
+#define M_TCB_ULP_RAW 0xffULL
+#define V_TCB_ULP_RAW(x) ((x) << S_TCB_ULP_RAW)
+
+/* 23:12 */
+#define W_TCB_L2T_IX 0
+#define S_TCB_L2T_IX 12
+#define M_TCB_L2T_IX 0xfffULL
+#define V_TCB_L2T_IX(x) ((x) << S_TCB_L2T_IX)
+
+/* 31:24 */
+#define W_TCB_SMAC_SEL 0
+#define S_TCB_SMAC_SEL 24
+#define M_TCB_SMAC_SEL 0xffULL
+#define V_TCB_SMAC_SEL(x) ((x) << S_TCB_SMAC_SEL)
+
+/* 95:32 */
+#define W_TCB_T_FLAGS 1
+#define S_TCB_T_FLAGS 0
+#define M_TCB_T_FLAGS 0xffffffffffffffffULL
+#define V_TCB_T_FLAGS(x) ((__u64)(x) << S_TCB_T_FLAGS)
+
+/* 105:96 */
+#define W_TCB_RSS_INFO 3
+#define S_TCB_RSS_INFO 0
+#define M_TCB_RSS_INFO 0x3ffULL
+#define V_TCB_RSS_INFO(x) ((x) << S_TCB_RSS_INFO)
+
+/* 111:106 */
+#define W_TCB_TOS 3
+#define S_TCB_TOS 10
+#define M_TCB_TOS 0x3fULL
+#define V_TCB_TOS(x) ((x) << S_TCB_TOS)
+
+/* 115:112 */
+#define W_TCB_T_STATE 3
+#define S_TCB_T_STATE 16
+#define M_TCB_T_STATE 0xfULL
+#define V_TCB_T_STATE(x) ((x) << S_TCB_T_STATE)
+
+/* 119:116 */
+#define W_TCB_MAX_RT 3
+#define S_TCB_MAX_RT 20
+#define M_TCB_MAX_RT 0xfULL
+#define V_TCB_MAX_RT(x) ((x) << S_TCB_MAX_RT)
+
+/* 123:120 */
+#define W_TCB_T_MAXSEG 3
+#define S_TCB_T_MAXSEG 24
+#define M_TCB_T_MAXSEG 0xfULL
+#define V_TCB_T_MAXSEG(x) ((x) << S_TCB_T_MAXSEG)
+
+/* 127:124 */
+#define W_TCB_SND_SCALE 3
+#define S_TCB_SND_SCALE 28
+#define M_TCB_SND_SCALE 0xfULL
+#define V_TCB_SND_SCALE(x) ((x) << S_TCB_SND_SCALE)
+
+/* 131:128 */
+#define W_TCB_RCV_SCALE 4
+#define S_TCB_RCV_SCALE 0
+#define M_TCB_RCV_SCALE 0xfULL
+#define V_TCB_RCV_SCALE(x) ((x) << S_TCB_RCV_SCALE)
+
+/* 135:132 */
+#define W_TCB_T_RXTSHIFT 4
+#define S_TCB_T_RXTSHIFT 4
+#define M_TCB_T_RXTSHIFT 0xfULL
+#define V_TCB_T_RXTSHIFT(x) ((x) << S_TCB_T_RXTSHIFT)
+
+/* 139:136 */
+#define W_TCB_T_DUPACKS 4
+#define S_TCB_T_DUPACKS 8
+#define M_TCB_T_DUPACKS 0xfULL
+#define V_TCB_T_DUPACKS(x) ((x) << S_TCB_T_DUPACKS)
+
+/* 143:140 */
+#define W_TCB_TIMESTAMP_OFFSET 4
+#define S_TCB_TIMESTAMP_OFFSET 12
+#define M_TCB_TIMESTAMP_OFFSET 0xfULL
+#define V_TCB_TIMESTAMP_OFFSET(x) ((x) << S_TCB_TIMESTAMP_OFFSET)
+
+/* 159:144 */
+#define W_TCB_RCV_ADV 4
+#define S_TCB_RCV_ADV 16
+#define M_TCB_RCV_ADV 0xffffULL
+#define V_TCB_RCV_ADV(x) ((x) << S_TCB_RCV_ADV)
+
+/* 191:160 */
+#define W_TCB_TIMESTAMP 5
+#define S_TCB_TIMESTAMP 0
+#define M_TCB_TIMESTAMP 0xffffffffULL
+#define V_TCB_TIMESTAMP(x) ((x) << S_TCB_TIMESTAMP)
+
+/* 223:192 */
+#define W_TCB_T_RTT_TS_RECENT_AGE 6
+#define S_TCB_T_RTT_TS_RECENT_AGE 0
+#define M_TCB_T_RTT_TS_RECENT_AGE 0xffffffffULL
+#define V_TCB_T_RTT_TS_RECENT_AGE(x) ((x) << S_TCB_T_RTT_TS_RECENT_AGE)
+
+/* 255:224 */
+#define W_TCB_T_RTSEQ_RECENT 7
+#define S_TCB_T_RTSEQ_RECENT 0
+#define M_TCB_T_RTSEQ_RECENT 0xffffffffULL
+#define V_TCB_T_RTSEQ_RECENT(x) ((x) << S_TCB_T_RTSEQ_RECENT)
+
+/* 271:256 */
+#define W_TCB_T_SRTT 8
+#define S_TCB_T_SRTT 0
+#define M_TCB_T_SRTT 0xffffULL
+#define V_TCB_T_SRTT(x) ((x) << S_TCB_T_SRTT)
+
+/* 287:272 */
+#define W_TCB_T_RTTVAR 8
+#define S_TCB_T_RTTVAR 16
+#define M_TCB_T_RTTVAR 0xffffULL
+#define V_TCB_T_RTTVAR(x) ((x) << S_TCB_T_RTTVAR)
+
+/* 319:288 */
+#define W_TCB_TX_MAX 9
+#define S_TCB_TX_MAX 0
+#define M_TCB_TX_MAX 0xffffffffULL
+#define V_TCB_TX_MAX(x) ((x) << S_TCB_TX_MAX)
+
+/* 347:320 */
+#define W_TCB_SND_UNA_RAW 10
+#define S_TCB_SND_UNA_RAW 0
+#define M_TCB_SND_UNA_RAW 0xfffffffULL
+#define V_TCB_SND_UNA_RAW(x) ((x) << S_TCB_SND_UNA_RAW)
+
+/* 375:348 */
+#define W_TCB_SND_NXT_RAW 10
+#define S_TCB_SND_NXT_RAW 28
+#define M_TCB_SND_NXT_RAW 0xfffffffULL
+#define V_TCB_SND_NXT_RAW(x) ((__u64)(x) << S_TCB_SND_NXT_RAW)
+
+/* 403:376 */
+#define W_TCB_SND_MAX_RAW 11
+#define S_TCB_SND_MAX_RAW 24
+#define M_TCB_SND_MAX_RAW 0xfffffffULL
+#define V_TCB_SND_MAX_RAW(x) ((__u64)(x) << S_TCB_SND_MAX_RAW)
+
+/* 431:404 */
+#define W_TCB_SND_REC_RAW 12
+#define S_TCB_SND_REC_RAW 20
+#define M_TCB_SND_REC_RAW 0xfffffffULL
+#define V_TCB_SND_REC_RAW(x) ((__u64)(x) << S_TCB_SND_REC_RAW)
+
+/* 459:432 */
+#define W_TCB_SND_CWND 13
+#define S_TCB_SND_CWND 16
+#define M_TCB_SND_CWND 0xfffffffULL
+#define V_TCB_SND_CWND(x) ((__u64)(x) << S_TCB_SND_CWND)
+
+/* 487:460 */
+#define W_TCB_SND_SSTHRESH 14
+#define S_TCB_SND_SSTHRESH 12
+#define M_TCB_SND_SSTHRESH 0xfffffffULL
+#define V_TCB_SND_SSTHRESH(x) ((__u64)(x) << S_TCB_SND_SSTHRESH)
+
+/* 504:488 */
+#define W_TCB_TX_HDR_PTR_RAW 15
+#define S_TCB_TX_HDR_PTR_RAW 8
+#define M_TCB_TX_HDR_PTR_RAW 0x1ffffULL
+#define V_TCB_TX_HDR_PTR_RAW(x) ((x) << S_TCB_TX_HDR_PTR_RAW)
+
+/* 521:505 */
+#define W_TCB_TX_LAST_PTR_RAW 15
+#define S_TCB_TX_LAST_PTR_RAW 25
+#define M_TCB_TX_LAST_PTR_RAW 0x1ffffULL
+#define V_TCB_TX_LAST_PTR_RAW(x) ((__u64)(x) << S_TCB_TX_LAST_PTR_RAW)
+
+/* 553:522 */
+#define W_TCB_RCV_NXT 16
+#define S_TCB_RCV_NXT 10
+#define M_TCB_RCV_NXT 0xffffffffULL
+#define V_TCB_RCV_NXT(x) ((__u64)(x) << S_TCB_RCV_NXT)
+
+/* 581:554 */
+#define W_TCB_RCV_WND 17
+#define S_TCB_RCV_WND 10
+#define M_TCB_RCV_WND 0xfffffffULL
+#define V_TCB_RCV_WND(x) ((__u64)(x) << S_TCB_RCV_WND)
+
+/* 609:582 */
+#define W_TCB_RX_HDR_OFFSET 18
+#define S_TCB_RX_HDR_OFFSET 6
+#define M_TCB_RX_HDR_OFFSET 0xfffffffULL
+#define V_TCB_RX_HDR_OFFSET(x) ((__u64)(x) << S_TCB_RX_HDR_OFFSET)
+
+/* 637:610 */
+#define W_TCB_TS_LAST_ACK_SENT_RAW 19
+#define S_TCB_TS_LAST_ACK_SENT_RAW 2
+#define M_TCB_TS_LAST_ACK_SENT_RAW 0xfffffffULL
+#define V_TCB_TS_LAST_ACK_SENT_RAW(x) ((x) << S_TCB_TS_LAST_ACK_SENT_RAW)
+
+/* 665:638 */
+#define W_TCB_RX_FRAG0_START_IDX_RAW 19
+#define S_TCB_RX_FRAG0_START_IDX_RAW 30
+#define M_TCB_RX_FRAG0_START_IDX_RAW 0xfffffffULL
+#define V_TCB_RX_FRAG0_START_IDX_RAW(x) ((__u64)(x) << S_TCB_RX_FRAG0_START_IDX_RAW)
+
+/* 693:666 */
+#define W_TCB_RX_FRAG1_START_IDX_OFFSET 20
+#define S_TCB_RX_FRAG1_START_IDX_OFFSET 26
+#define M_TCB_RX_FRAG1_START_IDX_OFFSET 0xfffffffULL
+#define V_TCB_RX_FRAG1_START_IDX_OFFSET(x) ((__u64)(x) << S_TCB_RX_FRAG1_START_IDX_OFFSET)
+
+/* 721:694 */
+#define W_TCB_RX_FRAG0_LEN 21
+#define S_TCB_RX_FRAG0_LEN 22
+#define M_TCB_RX_FRAG0_LEN 0xfffffffULL
+#define V_TCB_RX_FRAG0_LEN(x) ((__u64)(x) << S_TCB_RX_FRAG0_LEN)
+
+/* 749:722 */
+#define W_TCB_RX_FRAG1_LEN 22
+#define S_TCB_RX_FRAG1_LEN 18
+#define M_TCB_RX_FRAG1_LEN 0xfffffffULL
+#define V_TCB_RX_FRAG1_LEN(x) ((__u64)(x) << S_TCB_RX_FRAG1_LEN)
+
+/* 765:750 */
+#define W_TCB_PDU_LEN 23
+#define S_TCB_PDU_LEN 14
+#define M_TCB_PDU_LEN 0xffffULL
+#define V_TCB_PDU_LEN(x) ((x) << S_TCB_PDU_LEN)
+
+/* 782:766 */
+#define W_TCB_RX_PTR_RAW 23
+#define S_TCB_RX_PTR_RAW 30
+#define M_TCB_RX_PTR_RAW 0x1ffffULL
+#define V_TCB_RX_PTR_RAW(x) ((__u64)(x) << S_TCB_RX_PTR_RAW)
+
+/* 799:783 */
+#define W_TCB_RX_FRAG1_PTR_RAW 24
+#define S_TCB_RX_FRAG1_PTR_RAW 15
+#define M_TCB_RX_FRAG1_PTR_RAW 0x1ffffULL
+#define V_TCB_RX_FRAG1_PTR_RAW(x) ((x) << S_TCB_RX_FRAG1_PTR_RAW)
+
+/* 831:800 */
+#define W_TCB_MAIN_SLUSH 25
+#define S_TCB_MAIN_SLUSH 0
+#define M_TCB_MAIN_SLUSH 0xffffffffULL
+#define V_TCB_MAIN_SLUSH(x) ((x) << S_TCB_MAIN_SLUSH)
+
+/* 846:832 */
+#define W_TCB_AUX1_SLUSH0 26
+#define S_TCB_AUX1_SLUSH0 0
+#define M_TCB_AUX1_SLUSH0 0x7fffULL
+#define V_TCB_AUX1_SLUSH0(x) ((x) << S_TCB_AUX1_SLUSH0)
+
+/* 874:847 */
+#define W_TCB_RX_FRAG2_START_IDX_OFFSET_RAW 26
+#define S_TCB_RX_FRAG2_START_IDX_OFFSET_RAW 15
+#define M_TCB_RX_FRAG2_START_IDX_OFFSET_RAW 0xfffffffULL
+#define V_TCB_RX_FRAG2_START_IDX_OFFSET_RAW(x) ((__u64)(x) << S_TCB_RX_FRAG2_START_IDX_OFFSET_RAW)
+
+/* 891:875 */
+#define W_TCB_RX_FRAG2_PTR_RAW 27
+#define S_TCB_RX_FRAG2_PTR_RAW 11
+#define M_TCB_RX_FRAG2_PTR_RAW 0x1ffffULL
+#define V_TCB_RX_FRAG2_PTR_RAW(x) ((x) << S_TCB_RX_FRAG2_PTR_RAW)
+
+/* 919:892 */
+#define W_TCB_RX_FRAG2_LEN_RAW 27
+#define S_TCB_RX_FRAG2_LEN_RAW 28
+#define M_TCB_RX_FRAG2_LEN_RAW 0xfffffffULL
+#define V_TCB_RX_FRAG2_LEN_RAW(x) ((__u64)(x) << S_TCB_RX_FRAG2_LEN_RAW)
+
+/* 936:920 */
+#define W_TCB_RX_FRAG3_PTR_RAW 28
+#define S_TCB_RX_FRAG3_PTR_RAW 24
+#define M_TCB_RX_FRAG3_PTR_RAW 0x1ffffULL
+#define V_TCB_RX_FRAG3_PTR_RAW(x) ((__u64)(x) << S_TCB_RX_FRAG3_PTR_RAW)
+
+/* 964:937 */
+#define W_TCB_RX_FRAG3_LEN_RAW 29
+#define S_TCB_RX_FRAG3_LEN_RAW 9
+#define M_TCB_RX_FRAG3_LEN_RAW 0xfffffffULL
+#define V_TCB_RX_FRAG3_LEN_RAW(x) ((__u64)(x) << S_TCB_RX_FRAG3_LEN_RAW)
+
+/* 992:965 */
+#define W_TCB_RX_FRAG3_START_IDX_OFFSET_RAW 30
+#define S_TCB_RX_FRAG3_START_IDX_OFFSET_RAW 5
+#define M_TCB_RX_FRAG3_START_IDX_OFFSET_RAW 0xfffffffULL
+#define V_TCB_RX_FRAG3_START_IDX_OFFSET_RAW(x) ((__u64)(x) << S_TCB_RX_FRAG3_START_IDX_OFFSET_RAW)
+
+/* 1000:993 */
+#define W_TCB_PDU_HDR_LEN 31
+#define S_TCB_PDU_HDR_LEN 1
+#define M_TCB_PDU_HDR_LEN 0xffULL
+#define V_TCB_PDU_HDR_LEN(x) ((x) << S_TCB_PDU_HDR_LEN)
+
+/* 1023:1001 */
+#define W_TCB_AUX1_SLUSH1 31
+#define S_TCB_AUX1_SLUSH1 9
+#define M_TCB_AUX1_SLUSH1 0x7fffffULL
+#define V_TCB_AUX1_SLUSH1(x) ((x) << S_TCB_AUX1_SLUSH1)
+
+/* 840:832 */
+#define W_TCB_IRS_ULP 26
+#define S_TCB_IRS_ULP 0
+#define M_TCB_IRS_ULP 0x1ffULL
+#define V_TCB_IRS_ULP(x) ((x) << S_TCB_IRS_ULP)
+
+/* 849:841 */
+#define W_TCB_ISS_ULP 26
+#define S_TCB_ISS_ULP 9
+#define M_TCB_ISS_ULP 0x1ffULL
+#define V_TCB_ISS_ULP(x) ((x) << S_TCB_ISS_ULP)
+
+/* 863:850 */
+#define W_TCB_TX_PDU_LEN 26
+#define S_TCB_TX_PDU_LEN 18
+#define M_TCB_TX_PDU_LEN 0x3fffULL
+#define V_TCB_TX_PDU_LEN(x) ((x) << S_TCB_TX_PDU_LEN)
+
+/* 879:864 */
+#define W_TCB_CQ_IDX_SQ 27
+#define S_TCB_CQ_IDX_SQ 0
+#define M_TCB_CQ_IDX_SQ 0xffffULL
+#define V_TCB_CQ_IDX_SQ(x) ((x) << S_TCB_CQ_IDX_SQ)
+
+/* 895:880 */
+#define W_TCB_CQ_IDX_RQ 27
+#define S_TCB_CQ_IDX_RQ 16
+#define M_TCB_CQ_IDX_RQ 0xffffULL
+#define V_TCB_CQ_IDX_RQ(x) ((x) << S_TCB_CQ_IDX_RQ)
+
+/* 911:896 */
+#define W_TCB_QP_ID 28
+#define S_TCB_QP_ID 0
+#define M_TCB_QP_ID 0xffffULL
+#define V_TCB_QP_ID(x) ((x) << S_TCB_QP_ID)
+
+/* 927:912 */
+#define W_TCB_PD_ID 28
+#define S_TCB_PD_ID 16
+#define M_TCB_PD_ID 0xffffULL
+#define V_TCB_PD_ID(x) ((x) << S_TCB_PD_ID)
+
+/* 959:928 */
+#define W_TCB_STAG 29
+#define S_TCB_STAG 0
+#define M_TCB_STAG 0xffffffffULL
+#define V_TCB_STAG(x) ((x) << S_TCB_STAG)
+
+/* 985:960 */
+#define W_TCB_RQ_START 30
+#define S_TCB_RQ_START 0
+#define M_TCB_RQ_START 0x3ffffffULL
+#define V_TCB_RQ_START(x) ((x) << S_TCB_RQ_START)
+
+/* 998:986 */
+#define W_TCB_RQ_MSN 30
+#define S_TCB_RQ_MSN 26
+#define M_TCB_RQ_MSN 0x1fffULL
+#define V_TCB_RQ_MSN(x) ((__u64)(x) << S_TCB_RQ_MSN)
+
+/* 1002:999 */
+#define W_TCB_RQ_MAX_OFFSET 31
+#define S_TCB_RQ_MAX_OFFSET 7
+#define M_TCB_RQ_MAX_OFFSET 0xfULL
+#define V_TCB_RQ_MAX_OFFSET(x) ((x) << S_TCB_RQ_MAX_OFFSET)
+
+/* 1015:1003 */
+#define W_TCB_RQ_WRITE_PTR 31
+#define S_TCB_RQ_WRITE_PTR 11
+#define M_TCB_RQ_WRITE_PTR 0x1fffULL
+#define V_TCB_RQ_WRITE_PTR(x) ((x) << S_TCB_RQ_WRITE_PTR)
+
+/* 1019:1016 */
+#define W_TCB_RDMAP_OPCODE 31
+#define S_TCB_RDMAP_OPCODE 24
+#define M_TCB_RDMAP_OPCODE 0xfULL
+#define V_TCB_RDMAP_OPCODE(x) ((x) << S_TCB_RDMAP_OPCODE)
+
+/* 1020:1020 */
+#define W_TCB_ORD_L_BIT_VLD 31
+#define S_TCB_ORD_L_BIT_VLD 28
+#define M_TCB_ORD_L_BIT_VLD 0x1ULL
+#define V_TCB_ORD_L_BIT_VLD(x) ((x) << S_TCB_ORD_L_BIT_VLD)
+
+/* 1021:1021 */
+#define W_TCB_TX_FLUSH 31
+#define S_TCB_TX_FLUSH 29
+#define M_TCB_TX_FLUSH 0x1ULL
+#define V_TCB_TX_FLUSH(x) ((x) << S_TCB_TX_FLUSH)
+
+/* 1022:1022 */
+#define W_TCB_TX_OOS_RXMT 31
+#define S_TCB_TX_OOS_RXMT 30
+#define M_TCB_TX_OOS_RXMT 0x1ULL
+#define V_TCB_TX_OOS_RXMT(x) ((x) << S_TCB_TX_OOS_RXMT)
+
+/* 1023:1023 */
+#define W_TCB_TX_OOS_TXMT 31
+#define S_TCB_TX_OOS_TXMT 31
+#define M_TCB_TX_OOS_TXMT 0x1ULL
+#define V_TCB_TX_OOS_TXMT(x) ((x) << S_TCB_TX_OOS_TXMT)
+
+/* 855:832 */
+#define W_TCB_RX_DDP_BUF0_OFFSET 26
+#define S_TCB_RX_DDP_BUF0_OFFSET 0
+#define M_TCB_RX_DDP_BUF0_OFFSET 0xffffffULL
+#define V_TCB_RX_DDP_BUF0_OFFSET(x) ((x) << S_TCB_RX_DDP_BUF0_OFFSET)
+
+/* 879:856 */
+#define W_TCB_RX_DDP_BUF0_LEN 26
+#define S_TCB_RX_DDP_BUF0_LEN 24
+#define M_TCB_RX_DDP_BUF0_LEN 0xffffffULL
+#define V_TCB_RX_DDP_BUF0_LEN(x) ((__u64)(x) << S_TCB_RX_DDP_BUF0_LEN)
+
+/* 903:880 */
+#define W_TCB_RX_DDP_FLAGS 27
+#define S_TCB_RX_DDP_FLAGS 16
+#define M_TCB_RX_DDP_FLAGS 0xffffffULL
+#define V_TCB_RX_DDP_FLAGS(x) ((__u64)(x) << S_TCB_RX_DDP_FLAGS)
+
+/* 927:904 */
+#define W_TCB_RX_DDP_BUF1_OFFSET 28
+#define S_TCB_RX_DDP_BUF1_OFFSET 8
+#define M_TCB_RX_DDP_BUF1_OFFSET 0xffffffULL
+#define V_TCB_RX_DDP_BUF1_OFFSET(x) ((x) << S_TCB_RX_DDP_BUF1_OFFSET)
+
+/* 951:928 */
+#define W_TCB_RX_DDP_BUF1_LEN 29
+#define S_TCB_RX_DDP_BUF1_LEN 0
+#define M_TCB_RX_DDP_BUF1_LEN 0xffffffULL
+#define V_TCB_RX_DDP_BUF1_LEN(x) ((x) << S_TCB_RX_DDP_BUF1_LEN)
+
+/* 959:952 */
+#define W_TCB_AUX3_SLUSH 29
+#define S_TCB_AUX3_SLUSH 24
+#define M_TCB_AUX3_SLUSH 0xffULL
+#define V_TCB_AUX3_SLUSH(x) ((x) << S_TCB_AUX3_SLUSH)
+
+/* 991:960 */
+#define W_TCB_RX_DDP_BUF0_TAG 30
+#define S_TCB_RX_DDP_BUF0_TAG 0
+#define M_TCB_RX_DDP_BUF0_TAG 0xffffffffULL
+#define V_TCB_RX_DDP_BUF0_TAG(x) ((x) << S_TCB_RX_DDP_BUF0_TAG)
+
+/* 1023:992 */
+#define W_TCB_RX_DDP_BUF1_TAG 31
+#define S_TCB_RX_DDP_BUF1_TAG 0
+#define M_TCB_RX_DDP_BUF1_TAG 0xffffffffULL
+#define V_TCB_RX_DDP_BUF1_TAG(x) ((x) << S_TCB_RX_DDP_BUF1_TAG)
+
+#define S_TF_MIGRATING 0
+#define V_TF_MIGRATING(x) ((x) << S_TF_MIGRATING)
+
+#define S_TF_NON_OFFLOAD 1
+#define V_TF_NON_OFFLOAD(x) ((x) << S_TF_NON_OFFLOAD)
+
+#define S_TF_LOCK_TID 2
+#define V_TF_LOCK_TID(x) ((x) << S_TF_LOCK_TID)
+
+#define S_TF_KEEPALIVE 3
+#define V_TF_KEEPALIVE(x) ((x) << S_TF_KEEPALIVE)
+
+#define S_TF_DACK 4
+#define V_TF_DACK(x) ((x) << S_TF_DACK)
+
+#define S_TF_DACK_MSS 5
+#define V_TF_DACK_MSS(x) ((x) << S_TF_DACK_MSS)
+
+#define S_TF_DACK_NOT_ACKED 6
+#define V_TF_DACK_NOT_ACKED(x) ((x) << S_TF_DACK_NOT_ACKED)
+
+#define S_TF_NAGLE 7
+#define V_TF_NAGLE(x) ((x) << S_TF_NAGLE)
+
+#define S_TF_SSWS_DISABLED 8
+#define V_TF_SSWS_DISABLED(x) ((x) << S_TF_SSWS_DISABLED)
+
+#define S_TF_RX_FLOW_CONTROL_DDP 9
+#define V_TF_RX_FLOW_CONTROL_DDP(x) ((x) << S_TF_RX_FLOW_CONTROL_DDP)
+
+#define S_TF_RX_FLOW_CONTROL_DISABLE 10
+#define V_TF_RX_FLOW_CONTROL_DISABLE(x) ((x) << S_TF_RX_FLOW_CONTROL_DISABLE)
+
+#define S_TF_RX_CHANNEL 11
+#define V_TF_RX_CHANNEL(x) ((x) << S_TF_RX_CHANNEL)
+
+#define S_TF_TX_CHANNEL0 12
+#define V_TF_TX_CHANNEL0(x) ((x) << S_TF_TX_CHANNEL0)
+
+#define S_TF_TX_CHANNEL1 13
+#define V_TF_TX_CHANNEL1(x) ((x) << S_TF_TX_CHANNEL1)
+
+#define S_TF_TX_QUIESCE 14
+#define V_TF_TX_QUIESCE(x) ((x) << S_TF_TX_QUIESCE)
+
+#define S_TF_RX_QUIESCE 15
+#define V_TF_RX_QUIESCE(x) ((x) << S_TF_RX_QUIESCE)
+
+#define S_TF_TX_PACE_AUTO 16
+#define V_TF_TX_PACE_AUTO(x) ((x) << S_TF_TX_PACE_AUTO)
+
+#define S_TF_MASK_HASH 16
+#define V_TF_MASK_HASH(x) ((x) << S_TF_MASK_HASH)
+
+#define S_TF_TX_PACE_FIXED 17
+#define V_TF_TX_PACE_FIXED(x) ((x) << S_TF_TX_PACE_FIXED)
+
+#define S_TF_DIRECT_STEER_HASH 17
+#define V_TF_DIRECT_STEER_HASH(x) ((x) << S_TF_DIRECT_STEER_HASH)
+
+#define S_TF_TX_QUEUE 18
+#define M_TF_TX_QUEUE 0x7ULL
+#define V_TF_TX_QUEUE(x) ((x) << S_TF_TX_QUEUE)
+
+#define S_TF_TURBO 21
+#define V_TF_TURBO(x) ((x) << S_TF_TURBO)
+
+#define S_TF_REPORT_TID 21
+#define V_TF_REPORT_TID(x) ((x) << S_TF_REPORT_TID)
+
+#define S_TF_CCTRL_SEL0 22
+#define V_TF_CCTRL_SEL0(x) ((x) << S_TF_CCTRL_SEL0)
+
+#define S_TF_DROP 22
+#define V_TF_DROP(x) ((x) << S_TF_DROP)
+
+#define S_TF_CCTRL_SEL1 23
+#define V_TF_CCTRL_SEL1(x) ((x) << S_TF_CCTRL_SEL1)
+
+#define S_TF_DIRECT_STEER 23
+#define V_TF_DIRECT_STEER(x) ((x) << S_TF_DIRECT_STEER)
+
+#define S_TF_CORE_FIN 24
+#define V_TF_CORE_FIN(x) ((x) << S_TF_CORE_FIN)
+
+#define S_TF_CORE_URG 25
+#define V_TF_CORE_URG(x) ((x) << S_TF_CORE_URG)
+
+#define S_TF_CORE_MORE 26
+#define V_TF_CORE_MORE(x) ((x) << S_TF_CORE_MORE)
+
+#define S_TF_CORE_PUSH 27
+#define V_TF_CORE_PUSH(x) ((x) << S_TF_CORE_PUSH)
+
+#define S_TF_CORE_FLUSH 28
+#define V_TF_CORE_FLUSH(x) ((x) << S_TF_CORE_FLUSH)
+
+#define S_TF_RCV_COALESCE_ENABLE 29
+#define V_TF_RCV_COALESCE_ENABLE(x) ((x) << S_TF_RCV_COALESCE_ENABLE)
+
+#define S_TF_RCV_COALESCE_PUSH 30
+#define V_TF_RCV_COALESCE_PUSH(x) ((x) << S_TF_RCV_COALESCE_PUSH)
+
+#define S_TF_RCV_COALESCE_LAST_PSH 31
+#define V_TF_RCV_COALESCE_LAST_PSH(x) ((x) << S_TF_RCV_COALESCE_LAST_PSH)
+
+#define S_TF_RCV_COALESCE_HEARTBEAT 32
+#define V_TF_RCV_COALESCE_HEARTBEAT(x) ((__u64)(x) << S_TF_RCV_COALESCE_HEARTBEAT)
+
+#define S_TF_INIT 33
+#define V_TF_INIT(x) ((__u64)(x) << S_TF_INIT)
+
+#define S_TF_ACTIVE_OPEN 34
+#define V_TF_ACTIVE_OPEN(x) ((__u64)(x) << S_TF_ACTIVE_OPEN)
+
+#define S_TF_ASK_MODE 35
+#define V_TF_ASK_MODE(x) ((__u64)(x) << S_TF_ASK_MODE)
+
+#define S_TF_MOD_SCHD_REASON0 36
+#define V_TF_MOD_SCHD_REASON0(x) ((__u64)(x) << S_TF_MOD_SCHD_REASON0)
+
+#define S_TF_MOD_SCHD_REASON1 37
+#define V_TF_MOD_SCHD_REASON1(x) ((__u64)(x) << S_TF_MOD_SCHD_REASON1)
+
+#define S_TF_MOD_SCHD_REASON2 38
+#define V_TF_MOD_SCHD_REASON2(x) ((__u64)(x) << S_TF_MOD_SCHD_REASON2)
+
+#define S_TF_MOD_SCHD_TX 39
+#define V_TF_MOD_SCHD_TX(x) ((__u64)(x) << S_TF_MOD_SCHD_TX)
+
+#define S_TF_MOD_SCHD_RX 40
+#define V_TF_MOD_SCHD_RX(x) ((__u64)(x) << S_TF_MOD_SCHD_RX)
+
+#define S_TF_TIMER 41
+#define V_TF_TIMER(x) ((__u64)(x) << S_TF_TIMER)
+
+#define S_TF_DACK_TIMER 42
+#define V_TF_DACK_TIMER(x) ((__u64)(x) << S_TF_DACK_TIMER)
+
+#define S_TF_PEER_FIN 43
+#define V_TF_PEER_FIN(x) ((__u64)(x) << S_TF_PEER_FIN)
+
+#define S_TF_TX_COMPACT 44
+#define V_TF_TX_COMPACT(x) ((__u64)(x) << S_TF_TX_COMPACT)
+
+#define S_TF_RX_COMPACT 45
+#define V_TF_RX_COMPACT(x) ((__u64)(x) << S_TF_RX_COMPACT)
+
+#define S_TF_RDMA_ERROR 46
+#define V_TF_RDMA_ERROR(x) ((__u64)(x) << S_TF_RDMA_ERROR)
+
+#define S_TF_RDMA_FLM_ERROR 47
+#define V_TF_RDMA_FLM_ERROR(x) ((__u64)(x) << S_TF_RDMA_FLM_ERROR)
+
+#define S_TF_TX_PDU_OUT 48
+#define V_TF_TX_PDU_OUT(x) ((__u64)(x) << S_TF_TX_PDU_OUT)
+
+#define S_TF_RX_PDU_OUT 49
+#define V_TF_RX_PDU_OUT(x) ((__u64)(x) << S_TF_RX_PDU_OUT)
+
+#define S_TF_DUPACK_COUNT_ODD 50
+#define V_TF_DUPACK_COUNT_ODD(x) ((__u64)(x) << S_TF_DUPACK_COUNT_ODD)
+
+#define S_TF_FAST_RECOVERY 51
+#define V_TF_FAST_RECOVERY(x) ((__u64)(x) << S_TF_FAST_RECOVERY)
+
+#define S_TF_RECV_SCALE 52
+#define V_TF_RECV_SCALE(x) ((__u64)(x) << S_TF_RECV_SCALE)
+
+#define S_TF_RECV_TSTMP 53
+#define V_TF_RECV_TSTMP(x) ((__u64)(x) << S_TF_RECV_TSTMP)
+
+#define S_TF_RECV_SACK 54
+#define V_TF_RECV_SACK(x) ((__u64)(x) << S_TF_RECV_SACK)
+
+#define S_TF_PEND_CTL0 55
+#define V_TF_PEND_CTL0(x) ((__u64)(x) << S_TF_PEND_CTL0)
+
+#define S_TF_PEND_CTL1 56
+#define V_TF_PEND_CTL1(x) ((__u64)(x) << S_TF_PEND_CTL1)
+
+#define S_TF_PEND_CTL2 57
+#define V_TF_PEND_CTL2(x) ((__u64)(x) << S_TF_PEND_CTL2)
+
+#define S_TF_IP_VERSION 58
+#define V_TF_IP_VERSION(x) ((__u64)(x) << S_TF_IP_VERSION)
+
+#define S_TF_CCTRL_ECN 59
+#define V_TF_CCTRL_ECN(x) ((__u64)(x) << S_TF_CCTRL_ECN)
+
+#define S_TF_LPBK 59
+#define V_TF_LPBK(x) ((__u64)(x) << S_TF_LPBK)
+
+#define S_TF_CCTRL_ECE 60
+#define V_TF_CCTRL_ECE(x) ((__u64)(x) << S_TF_CCTRL_ECE)
+
+#define S_TF_REWRITE_DMAC 60
+#define V_TF_REWRITE_DMAC(x) ((__u64)(x) << S_TF_REWRITE_DMAC)
+
+#define S_TF_CCTRL_CWR 61
+#define V_TF_CCTRL_CWR(x) ((__u64)(x) << S_TF_CCTRL_CWR)
+
+#define S_TF_REWRITE_SMAC 61
+#define V_TF_REWRITE_SMAC(x) ((__u64)(x) << S_TF_REWRITE_SMAC)
+
+#define S_TF_CCTRL_RFR 62
+#define V_TF_CCTRL_RFR(x) ((__u64)(x) << S_TF_CCTRL_RFR)
+
+#define S_TF_DDP_INDICATE_OUT 16
+#define V_TF_DDP_INDICATE_OUT(x) ((x) << S_TF_DDP_INDICATE_OUT)
+
+#define S_TF_DDP_ACTIVE_BUF 17
+#define V_TF_DDP_ACTIVE_BUF(x) ((x) << S_TF_DDP_ACTIVE_BUF)
+
+#define S_TF_DDP_OFF 18
+#define V_TF_DDP_OFF(x) ((x) << S_TF_DDP_OFF)
+
+#define S_TF_DDP_WAIT_FRAG 19
+#define V_TF_DDP_WAIT_FRAG(x) ((x) << S_TF_DDP_WAIT_FRAG)
+
+#define S_TF_DDP_BUF_INF 20
+#define V_TF_DDP_BUF_INF(x) ((x) << S_TF_DDP_BUF_INF)
+
+#define S_TF_DDP_RX2TX 21
+#define V_TF_DDP_RX2TX(x) ((x) << S_TF_DDP_RX2TX)
+
+#define S_TF_DDP_BUF0_VALID 24
+#define V_TF_DDP_BUF0_VALID(x) ((x) << S_TF_DDP_BUF0_VALID)
+
+#define S_TF_DDP_BUF0_INDICATE 25
+#define V_TF_DDP_BUF0_INDICATE(x) ((x) << S_TF_DDP_BUF0_INDICATE)
+
+#define S_TF_DDP_BUF0_FLUSH 26
+#define V_TF_DDP_BUF0_FLUSH(x) ((x) << S_TF_DDP_BUF0_FLUSH)
+
+#define S_TF_DDP_PSHF_ENABLE_0 27
+#define V_TF_DDP_PSHF_ENABLE_0(x) ((x) << S_TF_DDP_PSHF_ENABLE_0)
+
+#define S_TF_DDP_PUSH_DISABLE_0 28
+#define V_TF_DDP_PUSH_DISABLE_0(x) ((x) << S_TF_DDP_PUSH_DISABLE_0)
+
+#define S_TF_DDP_PSH_NO_INVALIDATE0 29
+#define V_TF_DDP_PSH_NO_INVALIDATE0(x) ((x) << S_TF_DDP_PSH_NO_INVALIDATE0)
+
+#define S_TF_DDP_BUF1_VALID 32
+#define V_TF_DDP_BUF1_VALID(x) ((__u64)(x) << S_TF_DDP_BUF1_VALID)
+
+#define S_TF_DDP_BUF1_INDICATE 33
+#define V_TF_DDP_BUF1_INDICATE(x) ((__u64)(x) << S_TF_DDP_BUF1_INDICATE)
+
+#define S_TF_DDP_BUF1_FLUSH 34
+#define V_TF_DDP_BUF1_FLUSH(x) ((__u64)(x) << S_TF_DDP_BUF1_FLUSH)
+
+#define S_TF_DDP_PSHF_ENABLE_1 35
+#define V_TF_DDP_PSHF_ENABLE_1(x) ((__u64)(x) << S_TF_DDP_PSHF_ENABLE_1)
+
+#define S_TF_DDP_PUSH_DISABLE_1 36
+#define V_TF_DDP_PUSH_DISABLE_1(x) ((__u64)(x) << S_TF_DDP_PUSH_DISABLE_1)
+
+#define S_TF_DDP_PSH_NO_INVALIDATE1 37
+#define V_TF_DDP_PSH_NO_INVALIDATE1(x) ((__u64)(x) << S_TF_DDP_PSH_NO_INVALIDATE1)
+
+#endif /* _T4_TCB_DEFS_H */
diff --git a/sys/dev/cxgbe/common/t4fw_interface.h b/sys/dev/cxgbe/common/t4fw_interface.h
new file mode 100644
index 0000000..5fac13c
--- /dev/null
+++ b/sys/dev/cxgbe/common/t4fw_interface.h
@@ -0,0 +1,5392 @@
+/*-
+ * Copyright (c) 2011 Chelsio Communications, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ *
+ */
+
+#ifndef _T4FW_INTERFACE_H_
+#define _T4FW_INTERFACE_H_
+
+/******************************************************************************
+ * R E T U R N V A L U E S
+ ********************************/
+
+enum fw_retval {
+ FW_SUCCESS = 0, /* completed sucessfully */
+ FW_EPERM = 1, /* operation not permitted */
+ FW_EIO = 5, /* input/output error; hw bad */
+ FW_ENOEXEC = 8, /* Exec format error; inv microcode */
+ FW_EAGAIN = 11, /* try again */
+ FW_ENOMEM = 12, /* out of memory */
+ FW_EFAULT = 14, /* bad address; fw bad */
+ FW_EBUSY = 16, /* resource busy */
+ FW_EINVAL = 22, /* invalid argument */
+ FW_ENOSYS = 38, /* functionality not implemented */
+ FW_EPROTO = 71, /* protocol error */
+ FW_ETIMEDOUT = 110, /* timeout */
+ FW_TIMEDOUT = 110, /* timeout */
+ FW_SCSI_ABORT_REQUESTED = 128, /* */
+ FW_SCSI_ABORT_TIMEDOUT = 129, /* */
+ FW_SCSI_ABORTED = 130, /* */
+ FW_SCSI_CLOSE_REQUESTED = 131, /* */
+ FW_ERR_LINK_DOWN = 132, /* */
+ FW_RDEV_NOT_READY = 133, /* */
+ FW_ERR_RDEV_LOST = 134, /* */
+ FW_ERR_RDEV_LOGO = 135, /* */
+ FW_FCOE_NO_XCHG = 136, /* */
+ FW_SCSI_RSP_ERR = 137, /* */
+ FW_ERR_RDEV_IMPL_LOGO = 138, /* */
+};
+
+/******************************************************************************
+ * W O R K R E Q U E S T s
+ ********************************/
+
+enum fw_wr_opcodes {
+ FW_FILTER_WR = 0x02,
+ FW_ULPTX_WR = 0x04,
+ FW_TP_WR = 0x05,
+ FW_ETH_TX_PKT_WR = 0x08,
+ FW_ETH_TX_PKTS_WR = 0x09,
+ FW_EQ_FLUSH_WR = 0x1b,
+ FW_FLOWC_WR = 0x0a,
+ FW_OFLD_TX_DATA_WR = 0x0b,
+ FW_CMD_WR = 0x10,
+ FW_ETH_TX_PKT_VM_WR = 0x11,
+ FW_RI_RES_WR = 0x0c,
+ FW_RI_RDMA_WRITE_WR = 0x14,
+ FW_RI_SEND_WR = 0x15,
+ FW_RI_RDMA_READ_WR = 0x16,
+ FW_RI_RECV_WR = 0x17,
+ FW_RI_BIND_MW_WR = 0x18,
+ FW_RI_FR_NSMR_WR = 0x19,
+ FW_RI_INV_LSTAG_WR = 0x1a,
+ FW_RI_WR = 0x0d,
+ FW_LASTC2E_WR = 0x4a
+};
+
+/*
+ * Generic work request header flit0
+ */
+struct fw_wr_hdr {
+ __be32 hi;
+ __be32 lo;
+};
+
+/* work request opcode (hi)
+ */
+#define S_FW_WR_OP 24
+#define M_FW_WR_OP 0xff
+#define V_FW_WR_OP(x) ((x) << S_FW_WR_OP)
+#define G_FW_WR_OP(x) (((x) >> S_FW_WR_OP) & M_FW_WR_OP)
+
+/* atomic flag (hi) - firmware encapsulates CPLs in CPL_BARRIER
+ */
+#define S_FW_WR_ATOMIC 23
+#define M_FW_WR_ATOMIC 0x1
+#define V_FW_WR_ATOMIC(x) ((x) << S_FW_WR_ATOMIC)
+#define G_FW_WR_ATOMIC(x) \
+ (((x) >> S_FW_WR_ATOMIC) & M_FW_WR_ATOMIC)
+#define F_FW_WR_ATOMIC V_FW_WR_ATOMIC(1U)
+
+/* flush flag (hi) - firmware flushes flushable work request buffered
+ * in the flow context.
+ */
+#define S_FW_WR_FLUSH 22
+#define M_FW_WR_FLUSH 0x1
+#define V_FW_WR_FLUSH(x) ((x) << S_FW_WR_FLUSH)
+#define G_FW_WR_FLUSH(x) \
+ (((x) >> S_FW_WR_FLUSH) & M_FW_WR_FLUSH)
+#define F_FW_WR_FLUSH V_FW_WR_FLUSH(1U)
+
+/* completion flag (hi) - firmware generates a cpl_fw6_ack
+ */
+#define S_FW_WR_COMPL 21
+#define M_FW_WR_COMPL 0x1
+#define V_FW_WR_COMPL(x) ((x) << S_FW_WR_COMPL)
+#define G_FW_WR_COMPL(x) \
+ (((x) >> S_FW_WR_COMPL) & M_FW_WR_COMPL)
+#define F_FW_WR_COMPL V_FW_WR_COMPL(1U)
+
+
+/* work request immediate data lengh (hi)
+ */
+#define S_FW_WR_IMMDLEN 0
+#define M_FW_WR_IMMDLEN 0xff
+#define V_FW_WR_IMMDLEN(x) ((x) << S_FW_WR_IMMDLEN)
+#define G_FW_WR_IMMDLEN(x) \
+ (((x) >> S_FW_WR_IMMDLEN) & M_FW_WR_IMMDLEN)
+
+/* egress queue status update to associated ingress queue entry (lo)
+ */
+#define S_FW_WR_EQUIQ 31
+#define M_FW_WR_EQUIQ 0x1
+#define V_FW_WR_EQUIQ(x) ((x) << S_FW_WR_EQUIQ)
+#define G_FW_WR_EQUIQ(x) (((x) >> S_FW_WR_EQUIQ) & M_FW_WR_EQUIQ)
+#define F_FW_WR_EQUIQ V_FW_WR_EQUIQ(1U)
+
+/* egress queue status update to egress queue status entry (lo)
+ */
+#define S_FW_WR_EQUEQ 30
+#define M_FW_WR_EQUEQ 0x1
+#define V_FW_WR_EQUEQ(x) ((x) << S_FW_WR_EQUEQ)
+#define G_FW_WR_EQUEQ(x) (((x) >> S_FW_WR_EQUEQ) & M_FW_WR_EQUEQ)
+#define F_FW_WR_EQUEQ V_FW_WR_EQUEQ(1U)
+
+/* flow context identifier (lo)
+ */
+#define S_FW_WR_FLOWID 8
+#define M_FW_WR_FLOWID 0xfffff
+#define V_FW_WR_FLOWID(x) ((x) << S_FW_WR_FLOWID)
+#define G_FW_WR_FLOWID(x) (((x) >> S_FW_WR_FLOWID) & M_FW_WR_FLOWID)
+
+/* length in units of 16-bytes (lo)
+ */
+#define S_FW_WR_LEN16 0
+#define M_FW_WR_LEN16 0xff
+#define V_FW_WR_LEN16(x) ((x) << S_FW_WR_LEN16)
+#define G_FW_WR_LEN16(x) (((x) >> S_FW_WR_LEN16) & M_FW_WR_LEN16)
+
+/* valid filter configurations for compressed tuple
+ * Encodings: TPL - Compressed TUPLE for filter in addition to 4-tuple
+ * FR - FRAGMENT, FC - FCoE, MT - MPS MATCH TYPE, M - MPS MATCH,
+ * E - Ethertype, P - Port, PR - Protocol, T - TOS, IV - Inner VLAN,
+ * OV - Outer VLAN/VNIC_ID,
+*/
+#define HW_TPL_FR_MT_M_E_P_FC 0x3C3
+#define HW_TPL_FR_MT_M_PR_T_FC 0x3B3
+#define HW_TPL_FR_MT_M_IV_P_FC 0x38B
+#define HW_TPL_FR_MT_M_OV_P_FC 0x387
+#define HW_TPL_FR_MT_E_PR_T 0x370
+#define HW_TPL_FR_MT_E_PR_P_FC 0X363
+#define HW_TPL_FR_MT_E_T_P_FC 0X353
+#define HW_TPL_FR_MT_PR_IV_P_FC 0X32B
+#define HW_TPL_FR_MT_PR_OV_P_FC 0X327
+#define HW_TPL_FR_MT_T_IV_P_FC 0X31B
+#define HW_TPL_FR_MT_T_OV_P_FC 0X317
+#define HW_TPL_FR_M_E_PR_FC 0X2E1
+#define HW_TPL_FR_M_E_T_FC 0X2D1
+#define HW_TPL_FR_M_PR_IV_FC 0X2A9
+#define HW_TPL_FR_M_PR_OV_FC 0X2A5
+#define HW_TPL_FR_M_T_IV_FC 0X299
+#define HW_TPL_FR_M_T_OV_FC 0X295
+#define HW_TPL_FR_E_PR_T_P 0X272
+#define HW_TPL_FR_E_PR_T_FC 0X271
+#define HW_TPL_FR_E_IV_FC 0X249
+#define HW_TPL_FR_E_OV_FC 0X245
+#define HW_TPL_FR_PR_T_IV_FC 0X239
+#define HW_TPL_FR_PR_T_OV_FC 0X235
+#define HW_TPL_FR_IV_OV_FC 0X20D
+#define HW_TPL_MT_M_E_PR 0X1E0
+#define HW_TPL_MT_M_E_T 0X1D0
+#define HW_TPL_MT_E_PR_T_FC 0X171
+#define HW_TPL_MT_E_IV 0X148
+#define HW_TPL_MT_E_OV 0X144
+#define HW_TPL_MT_PR_T_IV 0X138
+#define HW_TPL_MT_PR_T_OV 0X134
+#define HW_TPL_M_E_PR_P 0X0E2
+#define HW_TPL_M_E_T_P 0X0D2
+#define HW_TPL_E_PR_T_P_FC 0X073
+#define HW_TPL_E_IV_P 0X04A
+#define HW_TPL_E_OV_P 0X046
+#define HW_TPL_PR_T_IV_P 0X03A
+#define HW_TPL_PR_T_OV_P 0X036
+
+/* filter wr reply code in cookie in CPL_SET_TCB_RPL */
+enum fw_filter_wr_cookie {
+ FW_FILTER_WR_SUCCESS,
+ FW_FILTER_WR_FLT_ADDED,
+ FW_FILTER_WR_FLT_DELETED,
+ FW_FILTER_WR_SMT_TBL_FULL,
+ FW_FILTER_WR_EINVAL,
+};
+
+struct fw_filter_wr {
+ __be32 op_pkd;
+ __be32 len16_pkd;
+ __be64 r3;
+ __be32 tid_to_iq;
+ __be32 del_filter_to_l2tix;
+ __be16 ethtype;
+ __be16 ethtypem;
+ __u8 frag_to_ovlan_vldm;
+ __u8 smac_sel;
+ __be16 rx_chan_rx_rpl_iq;
+ __be32 maci_to_matchtypem;
+ __u8 ptcl;
+ __u8 ptclm;
+ __u8 ttyp;
+ __u8 ttypm;
+ __be16 ivlan;
+ __be16 ivlanm;
+ __be16 ovlan;
+ __be16 ovlanm;
+ __u8 lip[16];
+ __u8 lipm[16];
+ __u8 fip[16];
+ __u8 fipm[16];
+ __be16 lp;
+ __be16 lpm;
+ __be16 fp;
+ __be16 fpm;
+ __be16 r7;
+ __u8 sma[6];
+};
+
+#define S_FW_FILTER_WR_TID 12
+#define M_FW_FILTER_WR_TID 0xfffff
+#define V_FW_FILTER_WR_TID(x) ((x) << S_FW_FILTER_WR_TID)
+#define G_FW_FILTER_WR_TID(x) \
+ (((x) >> S_FW_FILTER_WR_TID) & M_FW_FILTER_WR_TID)
+
+#define S_FW_FILTER_WR_RQTYPE 11
+#define M_FW_FILTER_WR_RQTYPE 0x1
+#define V_FW_FILTER_WR_RQTYPE(x) ((x) << S_FW_FILTER_WR_RQTYPE)
+#define G_FW_FILTER_WR_RQTYPE(x) \
+ (((x) >> S_FW_FILTER_WR_RQTYPE) & M_FW_FILTER_WR_RQTYPE)
+#define F_FW_FILTER_WR_RQTYPE V_FW_FILTER_WR_RQTYPE(1U)
+
+#define S_FW_FILTER_WR_NOREPLY 10
+#define M_FW_FILTER_WR_NOREPLY 0x1
+#define V_FW_FILTER_WR_NOREPLY(x) ((x) << S_FW_FILTER_WR_NOREPLY)
+#define G_FW_FILTER_WR_NOREPLY(x) \
+ (((x) >> S_FW_FILTER_WR_NOREPLY) & M_FW_FILTER_WR_NOREPLY)
+#define F_FW_FILTER_WR_NOREPLY V_FW_FILTER_WR_NOREPLY(1U)
+
+#define S_FW_FILTER_WR_IQ 0
+#define M_FW_FILTER_WR_IQ 0x3ff
+#define V_FW_FILTER_WR_IQ(x) ((x) << S_FW_FILTER_WR_IQ)
+#define G_FW_FILTER_WR_IQ(x) \
+ (((x) >> S_FW_FILTER_WR_IQ) & M_FW_FILTER_WR_IQ)
+
+#define S_FW_FILTER_WR_DEL_FILTER 31
+#define M_FW_FILTER_WR_DEL_FILTER 0x1
+#define V_FW_FILTER_WR_DEL_FILTER(x) ((x) << S_FW_FILTER_WR_DEL_FILTER)
+#define G_FW_FILTER_WR_DEL_FILTER(x) \
+ (((x) >> S_FW_FILTER_WR_DEL_FILTER) & M_FW_FILTER_WR_DEL_FILTER)
+#define F_FW_FILTER_WR_DEL_FILTER V_FW_FILTER_WR_DEL_FILTER(1U)
+
+#define S_FW_FILTER_WR_RPTTID 25
+#define M_FW_FILTER_WR_RPTTID 0x1
+#define V_FW_FILTER_WR_RPTTID(x) ((x) << S_FW_FILTER_WR_RPTTID)
+#define G_FW_FILTER_WR_RPTTID(x) \
+ (((x) >> S_FW_FILTER_WR_RPTTID) & M_FW_FILTER_WR_RPTTID)
+#define F_FW_FILTER_WR_RPTTID V_FW_FILTER_WR_RPTTID(1U)
+
+#define S_FW_FILTER_WR_DROP 24
+#define M_FW_FILTER_WR_DROP 0x1
+#define V_FW_FILTER_WR_DROP(x) ((x) << S_FW_FILTER_WR_DROP)
+#define G_FW_FILTER_WR_DROP(x) \
+ (((x) >> S_FW_FILTER_WR_DROP) & M_FW_FILTER_WR_DROP)
+#define F_FW_FILTER_WR_DROP V_FW_FILTER_WR_DROP(1U)
+
+#define S_FW_FILTER_WR_DIRSTEER 23
+#define M_FW_FILTER_WR_DIRSTEER 0x1
+#define V_FW_FILTER_WR_DIRSTEER(x) ((x) << S_FW_FILTER_WR_DIRSTEER)
+#define G_FW_FILTER_WR_DIRSTEER(x) \
+ (((x) >> S_FW_FILTER_WR_DIRSTEER) & M_FW_FILTER_WR_DIRSTEER)
+#define F_FW_FILTER_WR_DIRSTEER V_FW_FILTER_WR_DIRSTEER(1U)
+
+#define S_FW_FILTER_WR_MASKHASH 22
+#define M_FW_FILTER_WR_MASKHASH 0x1
+#define V_FW_FILTER_WR_MASKHASH(x) ((x) << S_FW_FILTER_WR_MASKHASH)
+#define G_FW_FILTER_WR_MASKHASH(x) \
+ (((x) >> S_FW_FILTER_WR_MASKHASH) & M_FW_FILTER_WR_MASKHASH)
+#define F_FW_FILTER_WR_MASKHASH V_FW_FILTER_WR_MASKHASH(1U)
+
+#define S_FW_FILTER_WR_DIRSTEERHASH 21
+#define M_FW_FILTER_WR_DIRSTEERHASH 0x1
+#define V_FW_FILTER_WR_DIRSTEERHASH(x) ((x) << S_FW_FILTER_WR_DIRSTEERHASH)
+#define G_FW_FILTER_WR_DIRSTEERHASH(x) \
+ (((x) >> S_FW_FILTER_WR_DIRSTEERHASH) & M_FW_FILTER_WR_DIRSTEERHASH)
+#define F_FW_FILTER_WR_DIRSTEERHASH V_FW_FILTER_WR_DIRSTEERHASH(1U)
+
+#define S_FW_FILTER_WR_LPBK 20
+#define M_FW_FILTER_WR_LPBK 0x1
+#define V_FW_FILTER_WR_LPBK(x) ((x) << S_FW_FILTER_WR_LPBK)
+#define G_FW_FILTER_WR_LPBK(x) \
+ (((x) >> S_FW_FILTER_WR_LPBK) & M_FW_FILTER_WR_LPBK)
+#define F_FW_FILTER_WR_LPBK V_FW_FILTER_WR_LPBK(1U)
+
+#define S_FW_FILTER_WR_DMAC 19
+#define M_FW_FILTER_WR_DMAC 0x1
+#define V_FW_FILTER_WR_DMAC(x) ((x) << S_FW_FILTER_WR_DMAC)
+#define G_FW_FILTER_WR_DMAC(x) \
+ (((x) >> S_FW_FILTER_WR_DMAC) & M_FW_FILTER_WR_DMAC)
+#define F_FW_FILTER_WR_DMAC V_FW_FILTER_WR_DMAC(1U)
+
+#define S_FW_FILTER_WR_SMAC 18
+#define M_FW_FILTER_WR_SMAC 0x1
+#define V_FW_FILTER_WR_SMAC(x) ((x) << S_FW_FILTER_WR_SMAC)
+#define G_FW_FILTER_WR_SMAC(x) \
+ (((x) >> S_FW_FILTER_WR_SMAC) & M_FW_FILTER_WR_SMAC)
+#define F_FW_FILTER_WR_SMAC V_FW_FILTER_WR_SMAC(1U)
+
+#define S_FW_FILTER_WR_INSVLAN 17
+#define M_FW_FILTER_WR_INSVLAN 0x1
+#define V_FW_FILTER_WR_INSVLAN(x) ((x) << S_FW_FILTER_WR_INSVLAN)
+#define G_FW_FILTER_WR_INSVLAN(x) \
+ (((x) >> S_FW_FILTER_WR_INSVLAN) & M_FW_FILTER_WR_INSVLAN)
+#define F_FW_FILTER_WR_INSVLAN V_FW_FILTER_WR_INSVLAN(1U)
+
+#define S_FW_FILTER_WR_RMVLAN 16
+#define M_FW_FILTER_WR_RMVLAN 0x1
+#define V_FW_FILTER_WR_RMVLAN(x) ((x) << S_FW_FILTER_WR_RMVLAN)
+#define G_FW_FILTER_WR_RMVLAN(x) \
+ (((x) >> S_FW_FILTER_WR_RMVLAN) & M_FW_FILTER_WR_RMVLAN)
+#define F_FW_FILTER_WR_RMVLAN V_FW_FILTER_WR_RMVLAN(1U)
+
+#define S_FW_FILTER_WR_HITCNTS 15
+#define M_FW_FILTER_WR_HITCNTS 0x1
+#define V_FW_FILTER_WR_HITCNTS(x) ((x) << S_FW_FILTER_WR_HITCNTS)
+#define G_FW_FILTER_WR_HITCNTS(x) \
+ (((x) >> S_FW_FILTER_WR_HITCNTS) & M_FW_FILTER_WR_HITCNTS)
+#define F_FW_FILTER_WR_HITCNTS V_FW_FILTER_WR_HITCNTS(1U)
+
+#define S_FW_FILTER_WR_TXCHAN 13
+#define M_FW_FILTER_WR_TXCHAN 0x3
+#define V_FW_FILTER_WR_TXCHAN(x) ((x) << S_FW_FILTER_WR_TXCHAN)
+#define G_FW_FILTER_WR_TXCHAN(x) \
+ (((x) >> S_FW_FILTER_WR_TXCHAN) & M_FW_FILTER_WR_TXCHAN)
+
+#define S_FW_FILTER_WR_PRIO 12
+#define M_FW_FILTER_WR_PRIO 0x1
+#define V_FW_FILTER_WR_PRIO(x) ((x) << S_FW_FILTER_WR_PRIO)
+#define G_FW_FILTER_WR_PRIO(x) \
+ (((x) >> S_FW_FILTER_WR_PRIO) & M_FW_FILTER_WR_PRIO)
+#define F_FW_FILTER_WR_PRIO V_FW_FILTER_WR_PRIO(1U)
+
+#define S_FW_FILTER_WR_L2TIX 0
+#define M_FW_FILTER_WR_L2TIX 0xfff
+#define V_FW_FILTER_WR_L2TIX(x) ((x) << S_FW_FILTER_WR_L2TIX)
+#define G_FW_FILTER_WR_L2TIX(x) \
+ (((x) >> S_FW_FILTER_WR_L2TIX) & M_FW_FILTER_WR_L2TIX)
+
+#define S_FW_FILTER_WR_FRAG 7
+#define M_FW_FILTER_WR_FRAG 0x1
+#define V_FW_FILTER_WR_FRAG(x) ((x) << S_FW_FILTER_WR_FRAG)
+#define G_FW_FILTER_WR_FRAG(x) \
+ (((x) >> S_FW_FILTER_WR_FRAG) & M_FW_FILTER_WR_FRAG)
+#define F_FW_FILTER_WR_FRAG V_FW_FILTER_WR_FRAG(1U)
+
+#define S_FW_FILTER_WR_FRAGM 6
+#define M_FW_FILTER_WR_FRAGM 0x1
+#define V_FW_FILTER_WR_FRAGM(x) ((x) << S_FW_FILTER_WR_FRAGM)
+#define G_FW_FILTER_WR_FRAGM(x) \
+ (((x) >> S_FW_FILTER_WR_FRAGM) & M_FW_FILTER_WR_FRAGM)
+#define F_FW_FILTER_WR_FRAGM V_FW_FILTER_WR_FRAGM(1U)
+
+#define S_FW_FILTER_WR_IVLAN_VLD 5
+#define M_FW_FILTER_WR_IVLAN_VLD 0x1
+#define V_FW_FILTER_WR_IVLAN_VLD(x) ((x) << S_FW_FILTER_WR_IVLAN_VLD)
+#define G_FW_FILTER_WR_IVLAN_VLD(x) \
+ (((x) >> S_FW_FILTER_WR_IVLAN_VLD) & M_FW_FILTER_WR_IVLAN_VLD)
+#define F_FW_FILTER_WR_IVLAN_VLD V_FW_FILTER_WR_IVLAN_VLD(1U)
+
+#define S_FW_FILTER_WR_OVLAN_VLD 4
+#define M_FW_FILTER_WR_OVLAN_VLD 0x1
+#define V_FW_FILTER_WR_OVLAN_VLD(x) ((x) << S_FW_FILTER_WR_OVLAN_VLD)
+#define G_FW_FILTER_WR_OVLAN_VLD(x) \
+ (((x) >> S_FW_FILTER_WR_OVLAN_VLD) & M_FW_FILTER_WR_OVLAN_VLD)
+#define F_FW_FILTER_WR_OVLAN_VLD V_FW_FILTER_WR_OVLAN_VLD(1U)
+
+#define S_FW_FILTER_WR_IVLAN_VLDM 3
+#define M_FW_FILTER_WR_IVLAN_VLDM 0x1
+#define V_FW_FILTER_WR_IVLAN_VLDM(x) ((x) << S_FW_FILTER_WR_IVLAN_VLDM)
+#define G_FW_FILTER_WR_IVLAN_VLDM(x) \
+ (((x) >> S_FW_FILTER_WR_IVLAN_VLDM) & M_FW_FILTER_WR_IVLAN_VLDM)
+#define F_FW_FILTER_WR_IVLAN_VLDM V_FW_FILTER_WR_IVLAN_VLDM(1U)
+
+#define S_FW_FILTER_WR_OVLAN_VLDM 2
+#define M_FW_FILTER_WR_OVLAN_VLDM 0x1
+#define V_FW_FILTER_WR_OVLAN_VLDM(x) ((x) << S_FW_FILTER_WR_OVLAN_VLDM)
+#define G_FW_FILTER_WR_OVLAN_VLDM(x) \
+ (((x) >> S_FW_FILTER_WR_OVLAN_VLDM) & M_FW_FILTER_WR_OVLAN_VLDM)
+#define F_FW_FILTER_WR_OVLAN_VLDM V_FW_FILTER_WR_OVLAN_VLDM(1U)
+
+#define S_FW_FILTER_WR_RX_CHAN 15
+#define M_FW_FILTER_WR_RX_CHAN 0x1
+#define V_FW_FILTER_WR_RX_CHAN(x) ((x) << S_FW_FILTER_WR_RX_CHAN)
+#define G_FW_FILTER_WR_RX_CHAN(x) \
+ (((x) >> S_FW_FILTER_WR_RX_CHAN) & M_FW_FILTER_WR_RX_CHAN)
+#define F_FW_FILTER_WR_RX_CHAN V_FW_FILTER_WR_RX_CHAN(1U)
+
+#define S_FW_FILTER_WR_RX_RPL_IQ 0
+#define M_FW_FILTER_WR_RX_RPL_IQ 0x3ff
+#define V_FW_FILTER_WR_RX_RPL_IQ(x) ((x) << S_FW_FILTER_WR_RX_RPL_IQ)
+#define G_FW_FILTER_WR_RX_RPL_IQ(x) \
+ (((x) >> S_FW_FILTER_WR_RX_RPL_IQ) & M_FW_FILTER_WR_RX_RPL_IQ)
+
+#define S_FW_FILTER_WR_MACI 23
+#define M_FW_FILTER_WR_MACI 0x1ff
+#define V_FW_FILTER_WR_MACI(x) ((x) << S_FW_FILTER_WR_MACI)
+#define G_FW_FILTER_WR_MACI(x) \
+ (((x) >> S_FW_FILTER_WR_MACI) & M_FW_FILTER_WR_MACI)
+
+#define S_FW_FILTER_WR_MACIM 14
+#define M_FW_FILTER_WR_MACIM 0x1ff
+#define V_FW_FILTER_WR_MACIM(x) ((x) << S_FW_FILTER_WR_MACIM)
+#define G_FW_FILTER_WR_MACIM(x) \
+ (((x) >> S_FW_FILTER_WR_MACIM) & M_FW_FILTER_WR_MACIM)
+
+#define S_FW_FILTER_WR_FCOE 13
+#define M_FW_FILTER_WR_FCOE 0x1
+#define V_FW_FILTER_WR_FCOE(x) ((x) << S_FW_FILTER_WR_FCOE)
+#define G_FW_FILTER_WR_FCOE(x) \
+ (((x) >> S_FW_FILTER_WR_FCOE) & M_FW_FILTER_WR_FCOE)
+#define F_FW_FILTER_WR_FCOE V_FW_FILTER_WR_FCOE(1U)
+
+#define S_FW_FILTER_WR_FCOEM 12
+#define M_FW_FILTER_WR_FCOEM 0x1
+#define V_FW_FILTER_WR_FCOEM(x) ((x) << S_FW_FILTER_WR_FCOEM)
+#define G_FW_FILTER_WR_FCOEM(x) \
+ (((x) >> S_FW_FILTER_WR_FCOEM) & M_FW_FILTER_WR_FCOEM)
+#define F_FW_FILTER_WR_FCOEM V_FW_FILTER_WR_FCOEM(1U)
+
+#define S_FW_FILTER_WR_PORT 9
+#define M_FW_FILTER_WR_PORT 0x7
+#define V_FW_FILTER_WR_PORT(x) ((x) << S_FW_FILTER_WR_PORT)
+#define G_FW_FILTER_WR_PORT(x) \
+ (((x) >> S_FW_FILTER_WR_PORT) & M_FW_FILTER_WR_PORT)
+
+#define S_FW_FILTER_WR_PORTM 6
+#define M_FW_FILTER_WR_PORTM 0x7
+#define V_FW_FILTER_WR_PORTM(x) ((x) << S_FW_FILTER_WR_PORTM)
+#define G_FW_FILTER_WR_PORTM(x) \
+ (((x) >> S_FW_FILTER_WR_PORTM) & M_FW_FILTER_WR_PORTM)
+
+#define S_FW_FILTER_WR_MATCHTYPE 3
+#define M_FW_FILTER_WR_MATCHTYPE 0x7
+#define V_FW_FILTER_WR_MATCHTYPE(x) ((x) << S_FW_FILTER_WR_MATCHTYPE)
+#define G_FW_FILTER_WR_MATCHTYPE(x) \
+ (((x) >> S_FW_FILTER_WR_MATCHTYPE) & M_FW_FILTER_WR_MATCHTYPE)
+
+#define S_FW_FILTER_WR_MATCHTYPEM 0
+#define M_FW_FILTER_WR_MATCHTYPEM 0x7
+#define V_FW_FILTER_WR_MATCHTYPEM(x) ((x) << S_FW_FILTER_WR_MATCHTYPEM)
+#define G_FW_FILTER_WR_MATCHTYPEM(x) \
+ (((x) >> S_FW_FILTER_WR_MATCHTYPEM) & M_FW_FILTER_WR_MATCHTYPEM)
+
+struct fw_ulptx_wr {
+ __be32 op_to_compl;
+ __be32 flowid_len16;
+ __u64 cookie;
+};
+
+struct fw_tp_wr {
+ __be32 op_to_immdlen;
+ __be32 flowid_len16;
+ __u64 cookie;
+};
+
+struct fw_eth_tx_pkt_wr {
+ __be32 op_immdlen;
+ __be32 equiq_to_len16;
+ __be64 r3;
+};
+
+struct fw_eth_tx_pkts_wr {
+ __be32 op_immdlen;
+ __be32 equiq_to_len16;
+ __be32 r3;
+ __be16 plen;
+ __u8 npkt;
+ __u8 r4;
+};
+
+struct fw_eq_flush_wr {
+ __u8 opcode;
+ __u8 r1[3];
+ __be32 equiq_to_len16;
+ __be64 r3;
+};
+
+enum fw_flowc_mnem {
+ FW_FLOWC_MNEM_PFNVFN, /* PFN [15:8] VFN [7:0] */
+ FW_FLOWC_MNEM_CH,
+ FW_FLOWC_MNEM_PORT,
+ FW_FLOWC_MNEM_IQID,
+ FW_FLOWC_MNEM_SNDNXT,
+ FW_FLOWC_MNEM_RCVNXT,
+ FW_FLOWC_MNEM_SNDBUF,
+ FW_FLOWC_MNEM_MSS,
+ FW_FLOWC_MEM_TXDATAPLEN_MAX,
+};
+
+struct fw_flowc_mnemval {
+ __u8 mnemonic;
+ __u8 r4[3];
+ __be32 val;
+};
+
+struct fw_flowc_wr {
+ __be32 op_to_nparams;
+ __be32 flowid_len16;
+#ifndef C99_NOT_SUPPORTED
+ struct fw_flowc_mnemval mnemval[0];
+#endif
+};
+
+#define S_FW_FLOWC_WR_NPARAMS 0
+#define M_FW_FLOWC_WR_NPARAMS 0xff
+#define V_FW_FLOWC_WR_NPARAMS(x) ((x) << S_FW_FLOWC_WR_NPARAMS)
+#define G_FW_FLOWC_WR_NPARAMS(x) \
+ (((x) >> S_FW_FLOWC_WR_NPARAMS) & M_FW_FLOWC_WR_NPARAMS)
+
+struct fw_ofld_tx_data_wr {
+ __be32 op_to_immdlen;
+ __be32 flowid_len16;
+ __be32 plen;
+ __be32 tunnel_to_proxy;
+};
+
+#define S_FW_OFLD_TX_DATA_WR_TUNNEL 19
+#define M_FW_OFLD_TX_DATA_WR_TUNNEL 0x1
+#define V_FW_OFLD_TX_DATA_WR_TUNNEL(x) ((x) << S_FW_OFLD_TX_DATA_WR_TUNNEL)
+#define G_FW_OFLD_TX_DATA_WR_TUNNEL(x) \
+ (((x) >> S_FW_OFLD_TX_DATA_WR_TUNNEL) & M_FW_OFLD_TX_DATA_WR_TUNNEL)
+#define F_FW_OFLD_TX_DATA_WR_TUNNEL V_FW_OFLD_TX_DATA_WR_TUNNEL(1U)
+
+#define S_FW_OFLD_TX_DATA_WR_SAVE 18
+#define M_FW_OFLD_TX_DATA_WR_SAVE 0x1
+#define V_FW_OFLD_TX_DATA_WR_SAVE(x) ((x) << S_FW_OFLD_TX_DATA_WR_SAVE)
+#define G_FW_OFLD_TX_DATA_WR_SAVE(x) \
+ (((x) >> S_FW_OFLD_TX_DATA_WR_SAVE) & M_FW_OFLD_TX_DATA_WR_SAVE)
+#define F_FW_OFLD_TX_DATA_WR_SAVE V_FW_OFLD_TX_DATA_WR_SAVE(1U)
+
+#define S_FW_OFLD_TX_DATA_WR_FLUSH 17
+#define M_FW_OFLD_TX_DATA_WR_FLUSH 0x1
+#define V_FW_OFLD_TX_DATA_WR_FLUSH(x) ((x) << S_FW_OFLD_TX_DATA_WR_FLUSH)
+#define G_FW_OFLD_TX_DATA_WR_FLUSH(x) \
+ (((x) >> S_FW_OFLD_TX_DATA_WR_FLUSH) & M_FW_OFLD_TX_DATA_WR_FLUSH)
+#define F_FW_OFLD_TX_DATA_WR_FLUSH V_FW_OFLD_TX_DATA_WR_FLUSH(1U)
+
+#define S_FW_OFLD_TX_DATA_WR_URGENT 16
+#define M_FW_OFLD_TX_DATA_WR_URGENT 0x1
+#define V_FW_OFLD_TX_DATA_WR_URGENT(x) ((x) << S_FW_OFLD_TX_DATA_WR_URGENT)
+#define G_FW_OFLD_TX_DATA_WR_URGENT(x) \
+ (((x) >> S_FW_OFLD_TX_DATA_WR_URGENT) & M_FW_OFLD_TX_DATA_WR_URGENT)
+#define F_FW_OFLD_TX_DATA_WR_URGENT V_FW_OFLD_TX_DATA_WR_URGENT(1U)
+
+#define S_FW_OFLD_TX_DATA_WR_MORE 15
+#define M_FW_OFLD_TX_DATA_WR_MORE 0x1
+#define V_FW_OFLD_TX_DATA_WR_MORE(x) ((x) << S_FW_OFLD_TX_DATA_WR_MORE)
+#define G_FW_OFLD_TX_DATA_WR_MORE(x) \
+ (((x) >> S_FW_OFLD_TX_DATA_WR_MORE) & M_FW_OFLD_TX_DATA_WR_MORE)
+#define F_FW_OFLD_TX_DATA_WR_MORE V_FW_OFLD_TX_DATA_WR_MORE(1U)
+
+#define S_FW_OFLD_TX_DATA_WR_SHOVE 14
+#define M_FW_OFLD_TX_DATA_WR_SHOVE 0x1
+#define V_FW_OFLD_TX_DATA_WR_SHOVE(x) ((x) << S_FW_OFLD_TX_DATA_WR_SHOVE)
+#define G_FW_OFLD_TX_DATA_WR_SHOVE(x) \
+ (((x) >> S_FW_OFLD_TX_DATA_WR_SHOVE) & M_FW_OFLD_TX_DATA_WR_SHOVE)
+#define F_FW_OFLD_TX_DATA_WR_SHOVE V_FW_OFLD_TX_DATA_WR_SHOVE(1U)
+
+#define S_FW_OFLD_TX_DATA_WR_ULPMODE 10
+#define M_FW_OFLD_TX_DATA_WR_ULPMODE 0xf
+#define V_FW_OFLD_TX_DATA_WR_ULPMODE(x) ((x) << S_FW_OFLD_TX_DATA_WR_ULPMODE)
+#define G_FW_OFLD_TX_DATA_WR_ULPMODE(x) \
+ (((x) >> S_FW_OFLD_TX_DATA_WR_ULPMODE) & M_FW_OFLD_TX_DATA_WR_ULPMODE)
+
+#define S_FW_OFLD_TX_DATA_WR_ULPSUBMODE 6
+#define M_FW_OFLD_TX_DATA_WR_ULPSUBMODE 0xf
+#define V_FW_OFLD_TX_DATA_WR_ULPSUBMODE(x) \
+ ((x) << S_FW_OFLD_TX_DATA_WR_ULPSUBMODE)
+#define G_FW_OFLD_TX_DATA_WR_ULPSUBMODE(x) \
+ (((x) >> S_FW_OFLD_TX_DATA_WR_ULPSUBMODE) & \
+ M_FW_OFLD_TX_DATA_WR_ULPSUBMODE)
+
+#define S_FW_OFLD_TX_DATA_WR_PROXY 5
+#define M_FW_OFLD_TX_DATA_WR_PROXY 0x1
+#define V_FW_OFLD_TX_DATA_WR_PROXY(x) ((x) << S_FW_OFLD_TX_DATA_WR_PROXY)
+#define G_FW_OFLD_TX_DATA_WR_PROXY(x) \
+ (((x) >> S_FW_OFLD_TX_DATA_WR_PROXY) & M_FW_OFLD_TX_DATA_WR_PROXY)
+#define F_FW_OFLD_TX_DATA_WR_PROXY V_FW_OFLD_TX_DATA_WR_PROXY(1U)
+
+struct fw_cmd_wr {
+ __be32 op_dma;
+ __be32 len16_pkd;
+ __be64 cookie_daddr;
+};
+
+#define S_FW_CMD_WR_DMA 17
+#define M_FW_CMD_WR_DMA 0x1
+#define V_FW_CMD_WR_DMA(x) ((x) << S_FW_CMD_WR_DMA)
+#define G_FW_CMD_WR_DMA(x) (((x) >> S_FW_CMD_WR_DMA) & M_FW_CMD_WR_DMA)
+#define F_FW_CMD_WR_DMA V_FW_CMD_WR_DMA(1U)
+
+struct fw_eth_tx_pkt_vm_wr {
+ __be32 op_immdlen;
+ __be32 equiq_to_len16;
+ __be32 r3[2];
+ __u8 ethmacdst[6];
+ __u8 ethmacsrc[6];
+ __be16 ethtype;
+ __be16 vlantci;
+};
+
+/******************************************************************************
+ * R I W O R K R E Q U E S T s
+ **************************************/
+
+enum fw_ri_wr_opcode {
+ FW_RI_RDMA_WRITE = 0x0, /* IETF RDMAP v1.0 ... */
+ FW_RI_READ_REQ = 0x1,
+ FW_RI_READ_RESP = 0x2,
+ FW_RI_SEND = 0x3,
+ FW_RI_SEND_WITH_INV = 0x4,
+ FW_RI_SEND_WITH_SE = 0x5,
+ FW_RI_SEND_WITH_SE_INV = 0x6,
+ FW_RI_TERMINATE = 0x7,
+ FW_RI_RDMA_INIT = 0x8, /* CHELSIO RI specific ... */
+ FW_RI_BIND_MW = 0x9,
+ FW_RI_FAST_REGISTER = 0xa,
+ FW_RI_LOCAL_INV = 0xb,
+ FW_RI_QP_MODIFY = 0xc,
+ FW_RI_BYPASS = 0xd,
+ FW_RI_RECEIVE = 0xe,
+
+ FW_RI_SGE_EC_CR_RETURN = 0xf
+};
+
+enum fw_ri_wr_flags {
+ FW_RI_COMPLETION_FLAG = 0x01,
+ FW_RI_NOTIFICATION_FLAG = 0x02,
+ FW_RI_SOLICITED_EVENT_FLAG = 0x04,
+ FW_RI_READ_FENCE_FLAG = 0x08,
+ FW_RI_LOCAL_FENCE_FLAG = 0x10,
+ FW_RI_RDMA_READ_INVALIDATE = 0x20
+};
+
+enum fw_ri_mpa_attrs {
+ FW_RI_MPA_RX_MARKER_ENABLE = 0x01,
+ FW_RI_MPA_TX_MARKER_ENABLE = 0x02,
+ FW_RI_MPA_CRC_ENABLE = 0x04,
+ FW_RI_MPA_IETF_ENABLE = 0x08
+};
+
+enum fw_ri_qp_caps {
+ FW_RI_QP_RDMA_READ_ENABLE = 0x01,
+ FW_RI_QP_RDMA_WRITE_ENABLE = 0x02,
+ FW_RI_QP_BIND_ENABLE = 0x04,
+ FW_RI_QP_FAST_REGISTER_ENABLE = 0x08,
+ FW_RI_QP_STAG0_ENABLE = 0x10,
+ FW_RI_QP_RDMA_READ_REQ_0B_ENABLE= 0x80,
+};
+
+enum fw_ri_addr_type {
+ FW_RI_ZERO_BASED_TO = 0x00,
+ FW_RI_VA_BASED_TO = 0x01
+};
+
+enum fw_ri_mem_perms {
+ FW_RI_MEM_ACCESS_REM_WRITE = 0x01,
+ FW_RI_MEM_ACCESS_REM_READ = 0x02,
+ FW_RI_MEM_ACCESS_REM = 0x03,
+ FW_RI_MEM_ACCESS_LOCAL_WRITE = 0x04,
+ FW_RI_MEM_ACCESS_LOCAL_READ = 0x08,
+ FW_RI_MEM_ACCESS_LOCAL = 0x0C
+};
+
+enum fw_ri_stag_type {
+ FW_RI_STAG_NSMR = 0x00,
+ FW_RI_STAG_SMR = 0x01,
+ FW_RI_STAG_MW = 0x02,
+ FW_RI_STAG_MW_RELAXED = 0x03
+};
+
+enum fw_ri_data_op {
+ FW_RI_DATA_IMMD = 0x81,
+ FW_RI_DATA_DSGL = 0x82,
+ FW_RI_DATA_ISGL = 0x83
+};
+
+enum fw_ri_sgl_depth {
+ FW_RI_SGL_DEPTH_MAX_SQ = 16,
+ FW_RI_SGL_DEPTH_MAX_RQ = 4
+};
+
+enum fw_ri_cqe_err {
+ FW_RI_CQE_ERR_SUCCESS = 0x00, /* success, no error detected */
+ FW_RI_CQE_ERR_STAG = 0x01, /* STAG invalid */
+ FW_RI_CQE_ERR_PDID = 0x02, /* PDID mismatch */
+ FW_RI_CQE_ERR_QPID = 0x03, /* QPID mismatch */
+ FW_RI_CQE_ERR_ACCESS = 0x04, /* Invalid access right */
+ FW_RI_CQE_ERR_WRAP = 0x05, /* Wrap error */
+ FW_RI_CQE_ERR_BOUND = 0x06, /* base and bounds violation */
+ FW_RI_CQE_ERR_INVALIDATE_SHARED_MR = 0x07, /* attempt to invalidate a SMR */
+ FW_RI_CQE_ERR_INVALIDATE_MR_WITH_MW_BOUND = 0x08, /* attempt to invalidate a MR w MW */
+ FW_RI_CQE_ERR_ECC = 0x09, /* ECC error detected */
+ FW_RI_CQE_ERR_ECC_PSTAG = 0x0A, /* ECC error detected when reading the PSTAG for a MW Invalidate */
+ FW_RI_CQE_ERR_PBL_ADDR_BOUND = 0x0B, /* pbl address out of bound : software error */
+ FW_RI_CQE_ERR_CRC = 0x10, /* CRC error */
+ FW_RI_CQE_ERR_MARKER = 0x11, /* Marker error */
+ FW_RI_CQE_ERR_PDU_LEN_ERR = 0x12, /* invalid PDU length */
+ FW_RI_CQE_ERR_OUT_OF_RQE = 0x13, /* out of RQE */
+ FW_RI_CQE_ERR_DDP_VERSION = 0x14, /* wrong DDP version */
+ FW_RI_CQE_ERR_RDMA_VERSION = 0x15, /* wrong RDMA version */
+ FW_RI_CQE_ERR_OPCODE = 0x16, /* invalid rdma opcode */
+ FW_RI_CQE_ERR_DDP_QUEUE_NUM = 0x17, /* invalid ddp queue number */
+ FW_RI_CQE_ERR_MSN = 0x18, /* MSN error */
+ FW_RI_CQE_ERR_TBIT = 0x19, /* tag bit not set correctly */
+ FW_RI_CQE_ERR_MO = 0x1A, /* MO not zero for TERMINATE or READ_REQ */
+ FW_RI_CQE_ERR_MSN_GAP = 0x1B, /* */
+ FW_RI_CQE_ERR_MSN_RANGE = 0x1C, /* */
+ FW_RI_CQE_ERR_IRD_OVERFLOW = 0x1D, /* */
+ FW_RI_CQE_ERR_RQE_ADDR_BOUND = 0x1E, /* RQE address out of bound : software error */
+ FW_RI_CQE_ERR_INTERNAL_ERR = 0x1F /* internel error (opcode mismatch) */
+
+};
+
+struct fw_ri_dsge_pair {
+ __be32 len[2];
+ __be64 addr[2];
+};
+
+struct fw_ri_dsgl {
+ __u8 op;
+ __u8 r1;
+ __be16 nsge;
+ __be32 len0;
+ __be64 addr0;
+#ifndef C99_NOT_SUPPORTED
+ struct fw_ri_dsge_pair sge[0];
+#endif
+};
+
+struct fw_ri_sge {
+ __be32 stag;
+ __be32 len;
+ __be64 to;
+};
+
+struct fw_ri_isgl {
+ __u8 op;
+ __u8 r1;
+ __be16 nsge;
+ __be32 r2;
+#ifndef C99_NOT_SUPPORTED
+ struct fw_ri_sge sge[0];
+#endif
+};
+
+struct fw_ri_immd {
+ __u8 op;
+ __u8 r1;
+ __be16 r2;
+ __be32 immdlen;
+#ifndef C99_NOT_SUPPORTED
+ __u8 data[0];
+#endif
+};
+
+struct fw_ri_tpte {
+ __be32 valid_to_pdid;
+ __be32 locread_to_qpid;
+ __be32 nosnoop_pbladdr;
+ __be32 len_lo;
+ __be32 va_hi;
+ __be32 va_lo_fbo;
+ __be32 dca_mwbcnt_pstag;
+ __be32 len_hi;
+};
+
+#define S_FW_RI_TPTE_VALID 31
+#define M_FW_RI_TPTE_VALID 0x1
+#define V_FW_RI_TPTE_VALID(x) ((x) << S_FW_RI_TPTE_VALID)
+#define G_FW_RI_TPTE_VALID(x) \
+ (((x) >> S_FW_RI_TPTE_VALID) & M_FW_RI_TPTE_VALID)
+#define F_FW_RI_TPTE_VALID V_FW_RI_TPTE_VALID(1U)
+
+#define S_FW_RI_TPTE_STAGKEY 23
+#define M_FW_RI_TPTE_STAGKEY 0xff
+#define V_FW_RI_TPTE_STAGKEY(x) ((x) << S_FW_RI_TPTE_STAGKEY)
+#define G_FW_RI_TPTE_STAGKEY(x) \
+ (((x) >> S_FW_RI_TPTE_STAGKEY) & M_FW_RI_TPTE_STAGKEY)
+
+#define S_FW_RI_TPTE_STAGSTATE 22
+#define M_FW_RI_TPTE_STAGSTATE 0x1
+#define V_FW_RI_TPTE_STAGSTATE(x) ((x) << S_FW_RI_TPTE_STAGSTATE)
+#define G_FW_RI_TPTE_STAGSTATE(x) \
+ (((x) >> S_FW_RI_TPTE_STAGSTATE) & M_FW_RI_TPTE_STAGSTATE)
+#define F_FW_RI_TPTE_STAGSTATE V_FW_RI_TPTE_STAGSTATE(1U)
+
+#define S_FW_RI_TPTE_STAGTYPE 20
+#define M_FW_RI_TPTE_STAGTYPE 0x3
+#define V_FW_RI_TPTE_STAGTYPE(x) ((x) << S_FW_RI_TPTE_STAGTYPE)
+#define G_FW_RI_TPTE_STAGTYPE(x) \
+ (((x) >> S_FW_RI_TPTE_STAGTYPE) & M_FW_RI_TPTE_STAGTYPE)
+
+#define S_FW_RI_TPTE_PDID 0
+#define M_FW_RI_TPTE_PDID 0xfffff
+#define V_FW_RI_TPTE_PDID(x) ((x) << S_FW_RI_TPTE_PDID)
+#define G_FW_RI_TPTE_PDID(x) \
+ (((x) >> S_FW_RI_TPTE_PDID) & M_FW_RI_TPTE_PDID)
+
+#define S_FW_RI_TPTE_PERM 28
+#define M_FW_RI_TPTE_PERM 0xf
+#define V_FW_RI_TPTE_PERM(x) ((x) << S_FW_RI_TPTE_PERM)
+#define G_FW_RI_TPTE_PERM(x) \
+ (((x) >> S_FW_RI_TPTE_PERM) & M_FW_RI_TPTE_PERM)
+
+#define S_FW_RI_TPTE_REMINVDIS 27
+#define M_FW_RI_TPTE_REMINVDIS 0x1
+#define V_FW_RI_TPTE_REMINVDIS(x) ((x) << S_FW_RI_TPTE_REMINVDIS)
+#define G_FW_RI_TPTE_REMINVDIS(x) \
+ (((x) >> S_FW_RI_TPTE_REMINVDIS) & M_FW_RI_TPTE_REMINVDIS)
+#define F_FW_RI_TPTE_REMINVDIS V_FW_RI_TPTE_REMINVDIS(1U)
+
+#define S_FW_RI_TPTE_ADDRTYPE 26
+#define M_FW_RI_TPTE_ADDRTYPE 1
+#define V_FW_RI_TPTE_ADDRTYPE(x) ((x) << S_FW_RI_TPTE_ADDRTYPE)
+#define G_FW_RI_TPTE_ADDRTYPE(x) \
+ (((x) >> S_FW_RI_TPTE_ADDRTYPE) & M_FW_RI_TPTE_ADDRTYPE)
+#define F_FW_RI_TPTE_ADDRTYPE V_FW_RI_TPTE_ADDRTYPE(1U)
+
+#define S_FW_RI_TPTE_MWBINDEN 25
+#define M_FW_RI_TPTE_MWBINDEN 0x1
+#define V_FW_RI_TPTE_MWBINDEN(x) ((x) << S_FW_RI_TPTE_MWBINDEN)
+#define G_FW_RI_TPTE_MWBINDEN(x) \
+ (((x) >> S_FW_RI_TPTE_MWBINDEN) & M_FW_RI_TPTE_MWBINDEN)
+#define F_FW_RI_TPTE_MWBINDEN V_FW_RI_TPTE_MWBINDEN(1U)
+
+#define S_FW_RI_TPTE_PS 20
+#define M_FW_RI_TPTE_PS 0x1f
+#define V_FW_RI_TPTE_PS(x) ((x) << S_FW_RI_TPTE_PS)
+#define G_FW_RI_TPTE_PS(x) \
+ (((x) >> S_FW_RI_TPTE_PS) & M_FW_RI_TPTE_PS)
+
+#define S_FW_RI_TPTE_QPID 0
+#define M_FW_RI_TPTE_QPID 0xfffff
+#define V_FW_RI_TPTE_QPID(x) ((x) << S_FW_RI_TPTE_QPID)
+#define G_FW_RI_TPTE_QPID(x) \
+ (((x) >> S_FW_RI_TPTE_QPID) & M_FW_RI_TPTE_QPID)
+
+#define S_FW_RI_TPTE_NOSNOOP 31
+#define M_FW_RI_TPTE_NOSNOOP 0x1
+#define V_FW_RI_TPTE_NOSNOOP(x) ((x) << S_FW_RI_TPTE_NOSNOOP)
+#define G_FW_RI_TPTE_NOSNOOP(x) \
+ (((x) >> S_FW_RI_TPTE_NOSNOOP) & M_FW_RI_TPTE_NOSNOOP)
+#define F_FW_RI_TPTE_NOSNOOP V_FW_RI_TPTE_NOSNOOP(1U)
+
+#define S_FW_RI_TPTE_PBLADDR 0
+#define M_FW_RI_TPTE_PBLADDR 0x1fffffff
+#define V_FW_RI_TPTE_PBLADDR(x) ((x) << S_FW_RI_TPTE_PBLADDR)
+#define G_FW_RI_TPTE_PBLADDR(x) \
+ (((x) >> S_FW_RI_TPTE_PBLADDR) & M_FW_RI_TPTE_PBLADDR)
+
+#define S_FW_RI_TPTE_DCA 24
+#define M_FW_RI_TPTE_DCA 0x1f
+#define V_FW_RI_TPTE_DCA(x) ((x) << S_FW_RI_TPTE_DCA)
+#define G_FW_RI_TPTE_DCA(x) \
+ (((x) >> S_FW_RI_TPTE_DCA) & M_FW_RI_TPTE_DCA)
+
+#define S_FW_RI_TPTE_MWBCNT_PSTAG 0
+#define M_FW_RI_TPTE_MWBCNT_PSTAG 0xffffff
+#define V_FW_RI_TPTE_MWBCNT_PSTAT(x) \
+ ((x) << S_FW_RI_TPTE_MWBCNT_PSTAG)
+#define G_FW_RI_TPTE_MWBCNT_PSTAG(x) \
+ (((x) >> S_FW_RI_TPTE_MWBCNT_PSTAG) & M_FW_RI_TPTE_MWBCNT_PSTAG)
+
+enum fw_ri_cqe_rxtx {
+ FW_RI_CQE_RXTX_RX = 0x0,
+ FW_RI_CQE_RXTX_TX = 0x1,
+};
+
+struct fw_ri_cqe {
+ union fw_ri_rxtx {
+ struct fw_ri_scqe {
+ __be32 qpid_n_stat_rxtx_type;
+ __be32 plen;
+ __be32 reserved;
+ __be32 wrid;
+ } scqe;
+ struct fw_ri_rcqe {
+ __be32 qpid_n_stat_rxtx_type;
+ __be32 plen;
+ __be32 stag;
+ __be32 msn;
+ } rcqe;
+ } u;
+};
+
+#define S_FW_RI_CQE_QPID 12
+#define M_FW_RI_CQE_QPID 0xfffff
+#define V_FW_RI_CQE_QPID(x) ((x) << S_FW_RI_CQE_QPID)
+#define G_FW_RI_CQE_QPID(x) \
+ (((x) >> S_FW_RI_CQE_QPID) & M_FW_RI_CQE_QPID)
+
+#define S_FW_RI_CQE_NOTIFY 10
+#define M_FW_RI_CQE_NOTIFY 0x1
+#define V_FW_RI_CQE_NOTIFY(x) ((x) << S_FW_RI_CQE_NOTIFY)
+#define G_FW_RI_CQE_NOTIFY(x) \
+ (((x) >> S_FW_RI_CQE_NOTIFY) & M_FW_RI_CQE_NOTIFY)
+
+#define S_FW_RI_CQE_STATUS 5
+#define M_FW_RI_CQE_STATUS 0x1f
+#define V_FW_RI_CQE_STATUS(x) ((x) << S_FW_RI_CQE_STATUS)
+#define G_FW_RI_CQE_STATUS(x) \
+ (((x) >> S_FW_RI_CQE_STATUS) & M_FW_RI_CQE_STATUS)
+
+
+#define S_FW_RI_CQE_RXTX 4
+#define M_FW_RI_CQE_RXTX 0x1
+#define V_FW_RI_CQE_RXTX(x) ((x) << S_FW_RI_CQE_RXTX)
+#define G_FW_RI_CQE_RXTX(x) \
+ (((x) >> S_FW_RI_CQE_RXTX) & M_FW_RI_CQE_RXTX)
+
+#define S_FW_RI_CQE_TYPE 0
+#define M_FW_RI_CQE_TYPE 0xf
+#define V_FW_RI_CQE_TYPE(x) ((x) << S_FW_RI_CQE_TYPE)
+#define G_FW_RI_CQE_TYPE(x) \
+ (((x) >> S_FW_RI_CQE_TYPE) & M_FW_RI_CQE_TYPE)
+
+enum fw_ri_res_type {
+ FW_RI_RES_TYPE_SQ,
+ FW_RI_RES_TYPE_RQ,
+ FW_RI_RES_TYPE_CQ,
+};
+
+enum fw_ri_res_op {
+ FW_RI_RES_OP_WRITE,
+ FW_RI_RES_OP_RESET,
+};
+
+struct fw_ri_res {
+ union fw_ri_restype {
+ struct fw_ri_res_sqrq {
+ __u8 restype;
+ __u8 op;
+ __be16 r3;
+ __be32 eqid;
+ __be32 r4[2];
+ __be32 fetchszm_to_iqid;
+ __be32 dcaen_to_eqsize;
+ __be64 eqaddr;
+ } sqrq;
+ struct fw_ri_res_cq {
+ __u8 restype;
+ __u8 op;
+ __be16 r3;
+ __be32 iqid;
+ __be32 r4[2];
+ __be32 iqandst_to_iqandstindex;
+ __be16 iqdroprss_to_iqesize;
+ __be16 iqsize;
+ __be64 iqaddr;
+ __be32 iqns_iqro;
+ __be32 r6_lo;
+ __be64 r7;
+ } cq;
+ } u;
+};
+
+struct fw_ri_res_wr {
+ __be32 op_nres;
+ __be32 len16_pkd;
+ __u64 cookie;
+#ifndef C99_NOT_SUPPORTED
+ struct fw_ri_res res[0];
+#endif
+};
+
+#define S_FW_RI_RES_WR_NRES 0
+#define M_FW_RI_RES_WR_NRES 0xff
+#define V_FW_RI_RES_WR_NRES(x) ((x) << S_FW_RI_RES_WR_NRES)
+#define G_FW_RI_RES_WR_NRES(x) \
+ (((x) >> S_FW_RI_RES_WR_NRES) & M_FW_RI_RES_WR_NRES)
+
+#define S_FW_RI_RES_WR_FETCHSZM 26
+#define M_FW_RI_RES_WR_FETCHSZM 0x1
+#define V_FW_RI_RES_WR_FETCHSZM(x) ((x) << S_FW_RI_RES_WR_FETCHSZM)
+#define G_FW_RI_RES_WR_FETCHSZM(x) \
+ (((x) >> S_FW_RI_RES_WR_FETCHSZM) & M_FW_RI_RES_WR_FETCHSZM)
+#define F_FW_RI_RES_WR_FETCHSZM V_FW_RI_RES_WR_FETCHSZM(1U)
+
+#define S_FW_RI_RES_WR_STATUSPGNS 25
+#define M_FW_RI_RES_WR_STATUSPGNS 0x1
+#define V_FW_RI_RES_WR_STATUSPGNS(x) ((x) << S_FW_RI_RES_WR_STATUSPGNS)
+#define G_FW_RI_RES_WR_STATUSPGNS(x) \
+ (((x) >> S_FW_RI_RES_WR_STATUSPGNS) & M_FW_RI_RES_WR_STATUSPGNS)
+#define F_FW_RI_RES_WR_STATUSPGNS V_FW_RI_RES_WR_STATUSPGNS(1U)
+
+#define S_FW_RI_RES_WR_STATUSPGRO 24
+#define M_FW_RI_RES_WR_STATUSPGRO 0x1
+#define V_FW_RI_RES_WR_STATUSPGRO(x) ((x) << S_FW_RI_RES_WR_STATUSPGRO)
+#define G_FW_RI_RES_WR_STATUSPGRO(x) \
+ (((x) >> S_FW_RI_RES_WR_STATUSPGRO) & M_FW_RI_RES_WR_STATUSPGRO)
+#define F_FW_RI_RES_WR_STATUSPGRO V_FW_RI_RES_WR_STATUSPGRO(1U)
+
+#define S_FW_RI_RES_WR_FETCHNS 23
+#define M_FW_RI_RES_WR_FETCHNS 0x1
+#define V_FW_RI_RES_WR_FETCHNS(x) ((x) << S_FW_RI_RES_WR_FETCHNS)
+#define G_FW_RI_RES_WR_FETCHNS(x) \
+ (((x) >> S_FW_RI_RES_WR_FETCHNS) & M_FW_RI_RES_WR_FETCHNS)
+#define F_FW_RI_RES_WR_FETCHNS V_FW_RI_RES_WR_FETCHNS(1U)
+
+#define S_FW_RI_RES_WR_FETCHRO 22
+#define M_FW_RI_RES_WR_FETCHRO 0x1
+#define V_FW_RI_RES_WR_FETCHRO(x) ((x) << S_FW_RI_RES_WR_FETCHRO)
+#define G_FW_RI_RES_WR_FETCHRO(x) \
+ (((x) >> S_FW_RI_RES_WR_FETCHRO) & M_FW_RI_RES_WR_FETCHRO)
+#define F_FW_RI_RES_WR_FETCHRO V_FW_RI_RES_WR_FETCHRO(1U)
+
+#define S_FW_RI_RES_WR_HOSTFCMODE 20
+#define M_FW_RI_RES_WR_HOSTFCMODE 0x3
+#define V_FW_RI_RES_WR_HOSTFCMODE(x) ((x) << S_FW_RI_RES_WR_HOSTFCMODE)
+#define G_FW_RI_RES_WR_HOSTFCMODE(x) \
+ (((x) >> S_FW_RI_RES_WR_HOSTFCMODE) & M_FW_RI_RES_WR_HOSTFCMODE)
+
+#define S_FW_RI_RES_WR_CPRIO 19
+#define M_FW_RI_RES_WR_CPRIO 0x1
+#define V_FW_RI_RES_WR_CPRIO(x) ((x) << S_FW_RI_RES_WR_CPRIO)
+#define G_FW_RI_RES_WR_CPRIO(x) \
+ (((x) >> S_FW_RI_RES_WR_CPRIO) & M_FW_RI_RES_WR_CPRIO)
+#define F_FW_RI_RES_WR_CPRIO V_FW_RI_RES_WR_CPRIO(1U)
+
+#define S_FW_RI_RES_WR_ONCHIP 18
+#define M_FW_RI_RES_WR_ONCHIP 0x1
+#define V_FW_RI_RES_WR_ONCHIP(x) ((x) << S_FW_RI_RES_WR_ONCHIP)
+#define G_FW_RI_RES_WR_ONCHIP(x) \
+ (((x) >> S_FW_RI_RES_WR_ONCHIP) & M_FW_RI_RES_WR_ONCHIP)
+#define F_FW_RI_RES_WR_ONCHIP V_FW_RI_RES_WR_ONCHIP(1U)
+
+#define S_FW_RI_RES_WR_PCIECHN 16
+#define M_FW_RI_RES_WR_PCIECHN 0x3
+#define V_FW_RI_RES_WR_PCIECHN(x) ((x) << S_FW_RI_RES_WR_PCIECHN)
+#define G_FW_RI_RES_WR_PCIECHN(x) \
+ (((x) >> S_FW_RI_RES_WR_PCIECHN) & M_FW_RI_RES_WR_PCIECHN)
+
+#define S_FW_RI_RES_WR_IQID 0
+#define M_FW_RI_RES_WR_IQID 0xffff
+#define V_FW_RI_RES_WR_IQID(x) ((x) << S_FW_RI_RES_WR_IQID)
+#define G_FW_RI_RES_WR_IQID(x) \
+ (((x) >> S_FW_RI_RES_WR_IQID) & M_FW_RI_RES_WR_IQID)
+
+#define S_FW_RI_RES_WR_DCAEN 31
+#define M_FW_RI_RES_WR_DCAEN 0x1
+#define V_FW_RI_RES_WR_DCAEN(x) ((x) << S_FW_RI_RES_WR_DCAEN)
+#define G_FW_RI_RES_WR_DCAEN(x) \
+ (((x) >> S_FW_RI_RES_WR_DCAEN) & M_FW_RI_RES_WR_DCAEN)
+#define F_FW_RI_RES_WR_DCAEN V_FW_RI_RES_WR_DCAEN(1U)
+
+#define S_FW_RI_RES_WR_DCACPU 26
+#define M_FW_RI_RES_WR_DCACPU 0x1f
+#define V_FW_RI_RES_WR_DCACPU(x) ((x) << S_FW_RI_RES_WR_DCACPU)
+#define G_FW_RI_RES_WR_DCACPU(x) \
+ (((x) >> S_FW_RI_RES_WR_DCACPU) & M_FW_RI_RES_WR_DCACPU)
+
+#define S_FW_RI_RES_WR_FBMIN 23
+#define M_FW_RI_RES_WR_FBMIN 0x7
+#define V_FW_RI_RES_WR_FBMIN(x) ((x) << S_FW_RI_RES_WR_FBMIN)
+#define G_FW_RI_RES_WR_FBMIN(x) \
+ (((x) >> S_FW_RI_RES_WR_FBMIN) & M_FW_RI_RES_WR_FBMIN)
+
+#define S_FW_RI_RES_WR_FBMAX 20
+#define M_FW_RI_RES_WR_FBMAX 0x7
+#define V_FW_RI_RES_WR_FBMAX(x) ((x) << S_FW_RI_RES_WR_FBMAX)
+#define G_FW_RI_RES_WR_FBMAX(x) \
+ (((x) >> S_FW_RI_RES_WR_FBMAX) & M_FW_RI_RES_WR_FBMAX)
+
+#define S_FW_RI_RES_WR_CIDXFTHRESHO 19
+#define M_FW_RI_RES_WR_CIDXFTHRESHO 0x1
+#define V_FW_RI_RES_WR_CIDXFTHRESHO(x) ((x) << S_FW_RI_RES_WR_CIDXFTHRESHO)
+#define G_FW_RI_RES_WR_CIDXFTHRESHO(x) \
+ (((x) >> S_FW_RI_RES_WR_CIDXFTHRESHO) & M_FW_RI_RES_WR_CIDXFTHRESHO)
+#define F_FW_RI_RES_WR_CIDXFTHRESHO V_FW_RI_RES_WR_CIDXFTHRESHO(1U)
+
+#define S_FW_RI_RES_WR_CIDXFTHRESH 16
+#define M_FW_RI_RES_WR_CIDXFTHRESH 0x7
+#define V_FW_RI_RES_WR_CIDXFTHRESH(x) ((x) << S_FW_RI_RES_WR_CIDXFTHRESH)
+#define G_FW_RI_RES_WR_CIDXFTHRESH(x) \
+ (((x) >> S_FW_RI_RES_WR_CIDXFTHRESH) & M_FW_RI_RES_WR_CIDXFTHRESH)
+
+#define S_FW_RI_RES_WR_EQSIZE 0
+#define M_FW_RI_RES_WR_EQSIZE 0xffff
+#define V_FW_RI_RES_WR_EQSIZE(x) ((x) << S_FW_RI_RES_WR_EQSIZE)
+#define G_FW_RI_RES_WR_EQSIZE(x) \
+ (((x) >> S_FW_RI_RES_WR_EQSIZE) & M_FW_RI_RES_WR_EQSIZE)
+
+#define S_FW_RI_RES_WR_IQANDST 15
+#define M_FW_RI_RES_WR_IQANDST 0x1
+#define V_FW_RI_RES_WR_IQANDST(x) ((x) << S_FW_RI_RES_WR_IQANDST)
+#define G_FW_RI_RES_WR_IQANDST(x) \
+ (((x) >> S_FW_RI_RES_WR_IQANDST) & M_FW_RI_RES_WR_IQANDST)
+#define F_FW_RI_RES_WR_IQANDST V_FW_RI_RES_WR_IQANDST(1U)
+
+#define S_FW_RI_RES_WR_IQANUS 14
+#define M_FW_RI_RES_WR_IQANUS 0x1
+#define V_FW_RI_RES_WR_IQANUS(x) ((x) << S_FW_RI_RES_WR_IQANUS)
+#define G_FW_RI_RES_WR_IQANUS(x) \
+ (((x) >> S_FW_RI_RES_WR_IQANUS) & M_FW_RI_RES_WR_IQANUS)
+#define F_FW_RI_RES_WR_IQANUS V_FW_RI_RES_WR_IQANUS(1U)
+
+#define S_FW_RI_RES_WR_IQANUD 12
+#define M_FW_RI_RES_WR_IQANUD 0x3
+#define V_FW_RI_RES_WR_IQANUD(x) ((x) << S_FW_RI_RES_WR_IQANUD)
+#define G_FW_RI_RES_WR_IQANUD(x) \
+ (((x) >> S_FW_RI_RES_WR_IQANUD) & M_FW_RI_RES_WR_IQANUD)
+
+#define S_FW_RI_RES_WR_IQANDSTINDEX 0
+#define M_FW_RI_RES_WR_IQANDSTINDEX 0xfff
+#define V_FW_RI_RES_WR_IQANDSTINDEX(x) ((x) << S_FW_RI_RES_WR_IQANDSTINDEX)
+#define G_FW_RI_RES_WR_IQANDSTINDEX(x) \
+ (((x) >> S_FW_RI_RES_WR_IQANDSTINDEX) & M_FW_RI_RES_WR_IQANDSTINDEX)
+
+#define S_FW_RI_RES_WR_IQDROPRSS 15
+#define M_FW_RI_RES_WR_IQDROPRSS 0x1
+#define V_FW_RI_RES_WR_IQDROPRSS(x) ((x) << S_FW_RI_RES_WR_IQDROPRSS)
+#define G_FW_RI_RES_WR_IQDROPRSS(x) \
+ (((x) >> S_FW_RI_RES_WR_IQDROPRSS) & M_FW_RI_RES_WR_IQDROPRSS)
+#define F_FW_RI_RES_WR_IQDROPRSS V_FW_RI_RES_WR_IQDROPRSS(1U)
+
+#define S_FW_RI_RES_WR_IQGTSMODE 14
+#define M_FW_RI_RES_WR_IQGTSMODE 0x1
+#define V_FW_RI_RES_WR_IQGTSMODE(x) ((x) << S_FW_RI_RES_WR_IQGTSMODE)
+#define G_FW_RI_RES_WR_IQGTSMODE(x) \
+ (((x) >> S_FW_RI_RES_WR_IQGTSMODE) & M_FW_RI_RES_WR_IQGTSMODE)
+#define F_FW_RI_RES_WR_IQGTSMODE V_FW_RI_RES_WR_IQGTSMODE(1U)
+
+#define S_FW_RI_RES_WR_IQPCIECH 12
+#define M_FW_RI_RES_WR_IQPCIECH 0x3
+#define V_FW_RI_RES_WR_IQPCIECH(x) ((x) << S_FW_RI_RES_WR_IQPCIECH)
+#define G_FW_RI_RES_WR_IQPCIECH(x) \
+ (((x) >> S_FW_RI_RES_WR_IQPCIECH) & M_FW_RI_RES_WR_IQPCIECH)
+
+#define S_FW_RI_RES_WR_IQDCAEN 11
+#define M_FW_RI_RES_WR_IQDCAEN 0x1
+#define V_FW_RI_RES_WR_IQDCAEN(x) ((x) << S_FW_RI_RES_WR_IQDCAEN)
+#define G_FW_RI_RES_WR_IQDCAEN(x) \
+ (((x) >> S_FW_RI_RES_WR_IQDCAEN) & M_FW_RI_RES_WR_IQDCAEN)
+#define F_FW_RI_RES_WR_IQDCAEN V_FW_RI_RES_WR_IQDCAEN(1U)
+
+#define S_FW_RI_RES_WR_IQDCACPU 6
+#define M_FW_RI_RES_WR_IQDCACPU 0x1f
+#define V_FW_RI_RES_WR_IQDCACPU(x) ((x) << S_FW_RI_RES_WR_IQDCACPU)
+#define G_FW_RI_RES_WR_IQDCACPU(x) \
+ (((x) >> S_FW_RI_RES_WR_IQDCACPU) & M_FW_RI_RES_WR_IQDCACPU)
+
+#define S_FW_RI_RES_WR_IQINTCNTTHRESH 4
+#define M_FW_RI_RES_WR_IQINTCNTTHRESH 0x3
+#define V_FW_RI_RES_WR_IQINTCNTTHRESH(x) \
+ ((x) << S_FW_RI_RES_WR_IQINTCNTTHRESH)
+#define G_FW_RI_RES_WR_IQINTCNTTHRESH(x) \
+ (((x) >> S_FW_RI_RES_WR_IQINTCNTTHRESH) & M_FW_RI_RES_WR_IQINTCNTTHRESH)
+
+#define S_FW_RI_RES_WR_IQO 3
+#define M_FW_RI_RES_WR_IQO 0x1
+#define V_FW_RI_RES_WR_IQO(x) ((x) << S_FW_RI_RES_WR_IQO)
+#define G_FW_RI_RES_WR_IQO(x) \
+ (((x) >> S_FW_RI_RES_WR_IQO) & M_FW_RI_RES_WR_IQO)
+#define F_FW_RI_RES_WR_IQO V_FW_RI_RES_WR_IQO(1U)
+
+#define S_FW_RI_RES_WR_IQCPRIO 2
+#define M_FW_RI_RES_WR_IQCPRIO 0x1
+#define V_FW_RI_RES_WR_IQCPRIO(x) ((x) << S_FW_RI_RES_WR_IQCPRIO)
+#define G_FW_RI_RES_WR_IQCPRIO(x) \
+ (((x) >> S_FW_RI_RES_WR_IQCPRIO) & M_FW_RI_RES_WR_IQCPRIO)
+#define F_FW_RI_RES_WR_IQCPRIO V_FW_RI_RES_WR_IQCPRIO(1U)
+
+#define S_FW_RI_RES_WR_IQESIZE 0
+#define M_FW_RI_RES_WR_IQESIZE 0x3
+#define V_FW_RI_RES_WR_IQESIZE(x) ((x) << S_FW_RI_RES_WR_IQESIZE)
+#define G_FW_RI_RES_WR_IQESIZE(x) \
+ (((x) >> S_FW_RI_RES_WR_IQESIZE) & M_FW_RI_RES_WR_IQESIZE)
+
+#define S_FW_RI_RES_WR_IQNS 31
+#define M_FW_RI_RES_WR_IQNS 0x1
+#define V_FW_RI_RES_WR_IQNS(x) ((x) << S_FW_RI_RES_WR_IQNS)
+#define G_FW_RI_RES_WR_IQNS(x) \
+ (((x) >> S_FW_RI_RES_WR_IQNS) & M_FW_RI_RES_WR_IQNS)
+#define F_FW_RI_RES_WR_IQNS V_FW_RI_RES_WR_IQNS(1U)
+
+#define S_FW_RI_RES_WR_IQRO 30
+#define M_FW_RI_RES_WR_IQRO 0x1
+#define V_FW_RI_RES_WR_IQRO(x) ((x) << S_FW_RI_RES_WR_IQRO)
+#define G_FW_RI_RES_WR_IQRO(x) \
+ (((x) >> S_FW_RI_RES_WR_IQRO) & M_FW_RI_RES_WR_IQRO)
+#define F_FW_RI_RES_WR_IQRO V_FW_RI_RES_WR_IQRO(1U)
+
+struct fw_ri_rdma_write_wr {
+ __u8 opcode;
+ __u8 flags;
+ __u16 wrid;
+ __u8 r1[3];
+ __u8 len16;
+ __be64 r2;
+ __be32 plen;
+ __be32 stag_sink;
+ __be64 to_sink;
+#ifndef C99_NOT_SUPPORTED
+ union {
+ struct fw_ri_immd immd_src[0];
+ struct fw_ri_isgl isgl_src[0];
+ } u;
+#endif
+};
+
+struct fw_ri_send_wr {
+ __u8 opcode;
+ __u8 flags;
+ __u16 wrid;
+ __u8 r1[3];
+ __u8 len16;
+ __be32 sendop_pkd;
+ __be32 stag_inv;
+ __be32 plen;
+ __be32 r3;
+ __be64 r4;
+#ifndef C99_NOT_SUPPORTED
+ union {
+ struct fw_ri_immd immd_src[0];
+ struct fw_ri_isgl isgl_src[0];
+ } u;
+#endif
+};
+
+#define S_FW_RI_SEND_WR_SENDOP 0
+#define M_FW_RI_SEND_WR_SENDOP 0xf
+#define V_FW_RI_SEND_WR_SENDOP(x) ((x) << S_FW_RI_SEND_WR_SENDOP)
+#define G_FW_RI_SEND_WR_SENDOP(x) \
+ (((x) >> S_FW_RI_SEND_WR_SENDOP) & M_FW_RI_SEND_WR_SENDOP)
+
+struct fw_ri_rdma_read_wr {
+ __u8 opcode;
+ __u8 flags;
+ __u16 wrid;
+ __u8 r1[3];
+ __u8 len16;
+ __be64 r2;
+ __be32 stag_sink;
+ __be32 to_sink_hi;
+ __be32 to_sink_lo;
+ __be32 plen;
+ __be32 stag_src;
+ __be32 to_src_hi;
+ __be32 to_src_lo;
+ __be32 r5;
+};
+
+struct fw_ri_recv_wr {
+ __u8 opcode;
+ __u8 r1;
+ __u16 wrid;
+ __u8 r2[3];
+ __u8 len16;
+ struct fw_ri_isgl isgl;
+};
+
+struct fw_ri_bind_mw_wr {
+ __u8 opcode;
+ __u8 flags;
+ __u16 wrid;
+ __u8 r1[3];
+ __u8 len16;
+ __u8 qpbinde_to_dcacpu;
+ __u8 pgsz_shift;
+ __u8 addr_type;
+ __u8 mem_perms;
+ __be32 stag_mr;
+ __be32 stag_mw;
+ __be32 r3;
+ __be64 len_mw;
+ __be64 va_fbo;
+ __be64 r4;
+};
+
+#define S_FW_RI_BIND_MW_WR_QPBINDE 6
+#define M_FW_RI_BIND_MW_WR_QPBINDE 0x1
+#define V_FW_RI_BIND_MW_WR_QPBINDE(x) ((x) << S_FW_RI_BIND_MW_WR_QPBINDE)
+#define G_FW_RI_BIND_MW_WR_QPBINDE(x) \
+ (((x) >> S_FW_RI_BIND_MW_WR_QPBINDE) & M_FW_RI_BIND_MW_WR_QPBINDE)
+#define F_FW_RI_BIND_MW_WR_QPBINDE V_FW_RI_BIND_MW_WR_QPBINDE(1U)
+
+#define S_FW_RI_BIND_MW_WR_NS 5
+#define M_FW_RI_BIND_MW_WR_NS 0x1
+#define V_FW_RI_BIND_MW_WR_NS(x) ((x) << S_FW_RI_BIND_MW_WR_NS)
+#define G_FW_RI_BIND_MW_WR_NS(x) \
+ (((x) >> S_FW_RI_BIND_MW_WR_NS) & M_FW_RI_BIND_MW_WR_NS)
+#define F_FW_RI_BIND_MW_WR_NS V_FW_RI_BIND_MW_WR_NS(1U)
+
+#define S_FW_RI_BIND_MW_WR_DCACPU 0
+#define M_FW_RI_BIND_MW_WR_DCACPU 0x1f
+#define V_FW_RI_BIND_MW_WR_DCACPU(x) ((x) << S_FW_RI_BIND_MW_WR_DCACPU)
+#define G_FW_RI_BIND_MW_WR_DCACPU(x) \
+ (((x) >> S_FW_RI_BIND_MW_WR_DCACPU) & M_FW_RI_BIND_MW_WR_DCACPU)
+
+struct fw_ri_fr_nsmr_wr {
+ __u8 opcode;
+ __u8 flags;
+ __u16 wrid;
+ __u8 r1[3];
+ __u8 len16;
+ __u8 qpbinde_to_dcacpu;
+ __u8 pgsz_shift;
+ __u8 addr_type;
+ __u8 mem_perms;
+ __be32 stag;
+ __be32 len_hi;
+ __be32 len_lo;
+ __be32 va_hi;
+ __be32 va_lo_fbo;
+};
+
+#define S_FW_RI_FR_NSMR_WR_QPBINDE 6
+#define M_FW_RI_FR_NSMR_WR_QPBINDE 0x1
+#define V_FW_RI_FR_NSMR_WR_QPBINDE(x) ((x) << S_FW_RI_FR_NSMR_WR_QPBINDE)
+#define G_FW_RI_FR_NSMR_WR_QPBINDE(x) \
+ (((x) >> S_FW_RI_FR_NSMR_WR_QPBINDE) & M_FW_RI_FR_NSMR_WR_QPBINDE)
+#define F_FW_RI_FR_NSMR_WR_QPBINDE V_FW_RI_FR_NSMR_WR_QPBINDE(1U)
+
+#define S_FW_RI_FR_NSMR_WR_NS 5
+#define M_FW_RI_FR_NSMR_WR_NS 0x1
+#define V_FW_RI_FR_NSMR_WR_NS(x) ((x) << S_FW_RI_FR_NSMR_WR_NS)
+#define G_FW_RI_FR_NSMR_WR_NS(x) \
+ (((x) >> S_FW_RI_FR_NSMR_WR_NS) & M_FW_RI_FR_NSMR_WR_NS)
+#define F_FW_RI_FR_NSMR_WR_NS V_FW_RI_FR_NSMR_WR_NS(1U)
+
+#define S_FW_RI_FR_NSMR_WR_DCACPU 0
+#define M_FW_RI_FR_NSMR_WR_DCACPU 0x1f
+#define V_FW_RI_FR_NSMR_WR_DCACPU(x) ((x) << S_FW_RI_FR_NSMR_WR_DCACPU)
+#define G_FW_RI_FR_NSMR_WR_DCACPU(x) \
+ (((x) >> S_FW_RI_FR_NSMR_WR_DCACPU) & M_FW_RI_FR_NSMR_WR_DCACPU)
+
+struct fw_ri_inv_lstag_wr {
+ __u8 opcode;
+ __u8 flags;
+ __u16 wrid;
+ __u8 r1[3];
+ __u8 len16;
+ __be32 r2;
+ __be32 stag_inv;
+};
+
+enum fw_ri_type {
+ FW_RI_TYPE_INIT,
+ FW_RI_TYPE_FINI,
+ FW_RI_TYPE_TERMINATE
+};
+
+enum fw_ri_init_p2ptype {
+ FW_RI_INIT_P2PTYPE_RDMA_WRITE = FW_RI_RDMA_WRITE,
+ FW_RI_INIT_P2PTYPE_READ_REQ = FW_RI_READ_REQ,
+ FW_RI_INIT_P2PTYPE_SEND = FW_RI_SEND,
+ FW_RI_INIT_P2PTYPE_SEND_WITH_INV = FW_RI_SEND_WITH_INV,
+ FW_RI_INIT_P2PTYPE_SEND_WITH_SE = FW_RI_SEND_WITH_SE,
+ FW_RI_INIT_P2PTYPE_SEND_WITH_SE_INV = FW_RI_SEND_WITH_SE_INV,
+ FW_RI_INIT_P2PTYPE_DISABLED = 0xf,
+};
+
+struct fw_ri_wr {
+ __be32 op_compl;
+ __be32 flowid_len16;
+ __u64 cookie;
+ union fw_ri {
+ struct fw_ri_init {
+ __u8 type;
+ __u8 mpareqbit_p2ptype;
+ __u8 r4[2];
+ __u8 mpa_attrs;
+ __u8 qp_caps;
+ __be16 nrqe;
+ __be32 pdid;
+ __be32 qpid;
+ __be32 sq_eqid;
+ __be32 rq_eqid;
+ __be32 scqid;
+ __be32 rcqid;
+ __be32 ord_max;
+ __be32 ird_max;
+ __be32 iss;
+ __be32 irs;
+ __be32 hwrqsize;
+ __be32 hwrqaddr;
+ __be64 r5;
+ union fw_ri_init_p2p {
+ struct fw_ri_rdma_write_wr write;
+ struct fw_ri_rdma_read_wr read;
+ struct fw_ri_send_wr send;
+ } u;
+ } init;
+ struct fw_ri_fini {
+ __u8 type;
+ __u8 r3[7];
+ __be64 r4;
+ } fini;
+ struct fw_ri_terminate {
+ __u8 type;
+ __u8 r3[3];
+ __be32 immdlen;
+ __u8 termmsg[40];
+ } terminate;
+ } u;
+};
+
+#define S_FW_RI_WR_MPAREQBIT 7
+#define M_FW_RI_WR_MPAREQBIT 0x1
+#define V_FW_RI_WR_MPAREQBIT(x) ((x) << S_FW_RI_WR_MPAREQBIT)
+#define G_FW_RI_WR_MPAREQBIT(x) \
+ (((x) >> S_FW_RI_WR_MPAREQBIT) & M_FW_RI_WR_MPAREQBIT)
+#define F_FW_RI_WR_MPAREQBIT V_FW_RI_WR_MPAREQBIT(1U)
+
+#define S_FW_RI_WR_0BRRBIT 6
+#define M_FW_RI_WR_0BRRBIT 0x1
+#define V_FW_RI_WR_0BRRBIT(x) ((x) << S_FW_RI_WR_0BRRBIT)
+#define G_FW_RI_WR_0BRRBIT(x) \
+ (((x) >> S_FW_RI_WR_0BRRBIT) & M_FW_RI_WR_0BRRBIT)
+#define F_FW_RI_WR_0BRRBIT V_FW_RI_WR_0BRRBIT(1U)
+
+#define S_FW_RI_WR_P2PTYPE 0
+#define M_FW_RI_WR_P2PTYPE 0xf
+#define V_FW_RI_WR_P2PTYPE(x) ((x) << S_FW_RI_WR_P2PTYPE)
+#define G_FW_RI_WR_P2PTYPE(x) \
+ (((x) >> S_FW_RI_WR_P2PTYPE) & M_FW_RI_WR_P2PTYPE)
+
+
+/******************************************************************************
+ * C O M M A N D s
+ *********************/
+
+/*
+ * The maximum length of time, in miliseconds, that we expect any firmware
+ * command to take to execute and return a reply to the host. The RESET
+ * and INITIALIZE commands can take a fair amount of time to execute but
+ * most execute in far less time than this maximum. This constant is used
+ * by host software to determine how long to wait for a firmware command
+ * reply before declaring the firmware as dead/unreachable ...
+ */
+#define FW_CMD_MAX_TIMEOUT 10000
+
+enum fw_cmd_opcodes {
+ FW_LDST_CMD = 0x01,
+ FW_RESET_CMD = 0x03,
+ FW_HELLO_CMD = 0x04,
+ FW_BYE_CMD = 0x05,
+ FW_INITIALIZE_CMD = 0x06,
+ FW_CAPS_CONFIG_CMD = 0x07,
+ FW_PARAMS_CMD = 0x08,
+ FW_PFVF_CMD = 0x09,
+ FW_IQ_CMD = 0x10,
+ FW_EQ_MNGT_CMD = 0x11,
+ FW_EQ_ETH_CMD = 0x12,
+ FW_EQ_CTRL_CMD = 0x13,
+ FW_EQ_OFLD_CMD = 0x21,
+ FW_VI_CMD = 0x14,
+ FW_VI_MAC_CMD = 0x15,
+ FW_VI_RXMODE_CMD = 0x16,
+ FW_VI_ENABLE_CMD = 0x17,
+ FW_VI_STATS_CMD = 0x1a,
+ FW_ACL_MAC_CMD = 0x18,
+ FW_ACL_VLAN_CMD = 0x19,
+ FW_PORT_CMD = 0x1b,
+ FW_PORT_STATS_CMD = 0x1c,
+ FW_PORT_LB_STATS_CMD = 0x1d,
+ FW_PORT_TRACE_CMD = 0x1e,
+ FW_PORT_TRACE_MMAP_CMD = 0x1f,
+ FW_RSS_IND_TBL_CMD = 0x20,
+ FW_RSS_GLB_CONFIG_CMD = 0x22,
+ FW_RSS_VI_CONFIG_CMD = 0x23,
+ FW_SCHED_CMD = 0x24,
+ FW_DEVLOG_CMD = 0x25,
+ FW_LASTC2E_CMD = 0x40,
+ FW_ERROR_CMD = 0x80,
+ FW_DEBUG_CMD = 0x81,
+
+};
+
+enum fw_cmd_cap {
+ FW_CMD_CAP_PF = 0x01,
+ FW_CMD_CAP_DMAQ = 0x02,
+ FW_CMD_CAP_PORT = 0x04,
+ FW_CMD_CAP_PORTPROMISC = 0x08,
+ FW_CMD_CAP_PORTSTATS = 0x10,
+ FW_CMD_CAP_VF = 0x80,
+};
+
+/*
+ * Generic command header flit0
+ */
+struct fw_cmd_hdr {
+ __be32 hi;
+ __be32 lo;
+};
+
+#define S_FW_CMD_OP 24
+#define M_FW_CMD_OP 0xff
+#define V_FW_CMD_OP(x) ((x) << S_FW_CMD_OP)
+#define G_FW_CMD_OP(x) (((x) >> S_FW_CMD_OP) & M_FW_CMD_OP)
+
+#define S_FW_CMD_REQUEST 23
+#define M_FW_CMD_REQUEST 0x1
+#define V_FW_CMD_REQUEST(x) ((x) << S_FW_CMD_REQUEST)
+#define G_FW_CMD_REQUEST(x) (((x) >> S_FW_CMD_REQUEST) & M_FW_CMD_REQUEST)
+#define F_FW_CMD_REQUEST V_FW_CMD_REQUEST(1U)
+
+#define S_FW_CMD_READ 22
+#define M_FW_CMD_READ 0x1
+#define V_FW_CMD_READ(x) ((x) << S_FW_CMD_READ)
+#define G_FW_CMD_READ(x) (((x) >> S_FW_CMD_READ) & M_FW_CMD_READ)
+#define F_FW_CMD_READ V_FW_CMD_READ(1U)
+
+#define S_FW_CMD_WRITE 21
+#define M_FW_CMD_WRITE 0x1
+#define V_FW_CMD_WRITE(x) ((x) << S_FW_CMD_WRITE)
+#define G_FW_CMD_WRITE(x) (((x) >> S_FW_CMD_WRITE) & M_FW_CMD_WRITE)
+#define F_FW_CMD_WRITE V_FW_CMD_WRITE(1U)
+
+#define S_FW_CMD_EXEC 20
+#define M_FW_CMD_EXEC 0x1
+#define V_FW_CMD_EXEC(x) ((x) << S_FW_CMD_EXEC)
+#define G_FW_CMD_EXEC(x) (((x) >> S_FW_CMD_EXEC) & M_FW_CMD_EXEC)
+#define F_FW_CMD_EXEC V_FW_CMD_EXEC(1U)
+
+#define S_FW_CMD_RAMASK 20
+#define M_FW_CMD_RAMASK 0xf
+#define V_FW_CMD_RAMASK(x) ((x) << S_FW_CMD_RAMASK)
+#define G_FW_CMD_RAMASK(x) (((x) >> S_FW_CMD_RAMASK) & M_FW_CMD_RAMASK)
+
+#define S_FW_CMD_RETVAL 8
+#define M_FW_CMD_RETVAL 0xff
+#define V_FW_CMD_RETVAL(x) ((x) << S_FW_CMD_RETVAL)
+#define G_FW_CMD_RETVAL(x) (((x) >> S_FW_CMD_RETVAL) & M_FW_CMD_RETVAL)
+
+#define S_FW_CMD_LEN16 0
+#define M_FW_CMD_LEN16 0xff
+#define V_FW_CMD_LEN16(x) ((x) << S_FW_CMD_LEN16)
+#define G_FW_CMD_LEN16(x) (((x) >> S_FW_CMD_LEN16) & M_FW_CMD_LEN16)
+
+#define FW_LEN16(fw_struct) V_FW_CMD_LEN16(sizeof(fw_struct) / 16)
+
+/*
+ * address spaces
+ */
+enum fw_ldst_addrspc {
+ FW_LDST_ADDRSPC_FIRMWARE = 0x0001,
+ FW_LDST_ADDRSPC_SGE_EGRC = 0x0008,
+ FW_LDST_ADDRSPC_SGE_INGC = 0x0009,
+ FW_LDST_ADDRSPC_SGE_FLMC = 0x000a,
+ FW_LDST_ADDRSPC_SGE_CONMC = 0x000b,
+ FW_LDST_ADDRSPC_TP_PIO = 0x0010,
+ FW_LDST_ADDRSPC_TP_TM_PIO = 0x0011,
+ FW_LDST_ADDRSPC_TP_MIB = 0x0012,
+ FW_LDST_ADDRSPC_MDIO = 0x0018,
+ FW_LDST_ADDRSPC_MPS = 0x0020,
+ FW_LDST_ADDRSPC_FUNC = 0x0028,
+ FW_LDST_ADDRSPC_FUNC_PCIE = 0x0029,
+ FW_LDST_ADDRSPC_FUNC_I2C = 0x002A,
+};
+
+/*
+ * MDIO VSC8634 register access control field
+ */
+enum fw_ldst_mdio_vsc8634_aid {
+ FW_LDST_MDIO_VS_STANDARD,
+ FW_LDST_MDIO_VS_EXTENDED,
+ FW_LDST_MDIO_VS_GPIO
+};
+
+enum fw_ldst_mps_fid {
+ FW_LDST_MPS_ATRB,
+ FW_LDST_MPS_RPLC
+};
+
+enum fw_ldst_func_access_ctl {
+ FW_LDST_FUNC_ACC_CTL_VIID,
+ FW_LDST_FUNC_ACC_CTL_FID
+};
+
+enum fw_ldst_func_mod_index {
+ FW_LDST_FUNC_MPS
+};
+
+struct fw_ldst_cmd {
+ __be32 op_to_addrspace;
+ __be32 cycles_to_len16;
+ union fw_ldst {
+ struct fw_ldst_addrval {
+ __be32 addr;
+ __be32 val;
+ } addrval;
+ struct fw_ldst_idctxt {
+ __be32 physid;
+ __be32 msg_pkd;
+ __be32 ctxt_data7;
+ __be32 ctxt_data6;
+ __be32 ctxt_data5;
+ __be32 ctxt_data4;
+ __be32 ctxt_data3;
+ __be32 ctxt_data2;
+ __be32 ctxt_data1;
+ __be32 ctxt_data0;
+ } idctxt;
+ struct fw_ldst_mdio {
+ __be16 paddr_mmd;
+ __be16 raddr;
+ __be16 vctl;
+ __be16 rval;
+ } mdio;
+ struct fw_ldst_mps {
+ __be16 fid_ctl;
+ __be16 rplcpf_pkd;
+ __be32 rplc127_96;
+ __be32 rplc95_64;
+ __be32 rplc63_32;
+ __be32 rplc31_0;
+ __be32 atrb;
+ __be16 vlan[16];
+ } mps;
+ struct fw_ldst_func {
+ __u8 access_ctl;
+ __u8 mod_index;
+ __be16 ctl_id;
+ __be32 offset;
+ __be64 data0;
+ __be64 data1;
+ } func;
+ struct fw_ldst_pcie {
+ __u8 ctrl_to_fn;
+ __u8 bnum;
+ __u8 r;
+ __u8 ext_r;
+ __u8 select_naccess;
+ __u8 pcie_fn;
+ __be16 nset_pkd;
+ __be32 data[12];
+ } pcie;
+ struct fw_ldst_i2c {
+ __u8 pid_pkd;
+ __u8 base;
+ __u8 boffset;
+ __u8 data;
+ __be32 r9;
+ } i2c;
+ } u;
+};
+
+#define S_FW_LDST_CMD_ADDRSPACE 0
+#define M_FW_LDST_CMD_ADDRSPACE 0xff
+#define V_FW_LDST_CMD_ADDRSPACE(x) ((x) << S_FW_LDST_CMD_ADDRSPACE)
+#define G_FW_LDST_CMD_ADDRSPACE(x) \
+ (((x) >> S_FW_LDST_CMD_ADDRSPACE) & M_FW_LDST_CMD_ADDRSPACE)
+
+#define S_FW_LDST_CMD_CYCLES 16
+#define M_FW_LDST_CMD_CYCLES 0xffff
+#define V_FW_LDST_CMD_CYCLES(x) ((x) << S_FW_LDST_CMD_CYCLES)
+#define G_FW_LDST_CMD_CYCLES(x) \
+ (((x) >> S_FW_LDST_CMD_CYCLES) & M_FW_LDST_CMD_CYCLES)
+
+#define S_FW_LDST_CMD_MSG 31
+#define M_FW_LDST_CMD_MSG 0x1
+#define V_FW_LDST_CMD_MSG(x) ((x) << S_FW_LDST_CMD_MSG)
+#define G_FW_LDST_CMD_MSG(x) \
+ (((x) >> S_FW_LDST_CMD_MSG) & M_FW_LDST_CMD_MSG)
+#define F_FW_LDST_CMD_MSG V_FW_LDST_CMD_MSG(1U)
+
+#define S_FW_LDST_CMD_PADDR 8
+#define M_FW_LDST_CMD_PADDR 0x1f
+#define V_FW_LDST_CMD_PADDR(x) ((x) << S_FW_LDST_CMD_PADDR)
+#define G_FW_LDST_CMD_PADDR(x) \
+ (((x) >> S_FW_LDST_CMD_PADDR) & M_FW_LDST_CMD_PADDR)
+
+#define S_FW_LDST_CMD_MMD 0
+#define M_FW_LDST_CMD_MMD 0x1f
+#define V_FW_LDST_CMD_MMD(x) ((x) << S_FW_LDST_CMD_MMD)
+#define G_FW_LDST_CMD_MMD(x) \
+ (((x) >> S_FW_LDST_CMD_MMD) & M_FW_LDST_CMD_MMD)
+
+#define S_FW_LDST_CMD_FID 15
+#define M_FW_LDST_CMD_FID 0x1
+#define V_FW_LDST_CMD_FID(x) ((x) << S_FW_LDST_CMD_FID)
+#define G_FW_LDST_CMD_FID(x) \
+ (((x) >> S_FW_LDST_CMD_FID) & M_FW_LDST_CMD_FID)
+#define F_FW_LDST_CMD_FID V_FW_LDST_CMD_FID(1U)
+
+#define S_FW_LDST_CMD_CTL 0
+#define M_FW_LDST_CMD_CTL 0x7fff
+#define V_FW_LDST_CMD_CTL(x) ((x) << S_FW_LDST_CMD_CTL)
+#define G_FW_LDST_CMD_CTL(x) \
+ (((x) >> S_FW_LDST_CMD_CTL) & M_FW_LDST_CMD_CTL)
+
+#define S_FW_LDST_CMD_RPLCPF 0
+#define M_FW_LDST_CMD_RPLCPF 0xff
+#define V_FW_LDST_CMD_RPLCPF(x) ((x) << S_FW_LDST_CMD_RPLCPF)
+#define G_FW_LDST_CMD_RPLCPF(x) \
+ (((x) >> S_FW_LDST_CMD_RPLCPF) & M_FW_LDST_CMD_RPLCPF)
+
+#define S_FW_LDST_CMD_CTRL 7
+#define M_FW_LDST_CMD_CTRL 0x1
+#define V_FW_LDST_CMD_CTRL(x) ((x) << S_FW_LDST_CMD_CTRL)
+#define G_FW_LDST_CMD_CTRL(x) \
+ (((x) >> S_FW_LDST_CMD_CTRL) & M_FW_LDST_CMD_CTRL)
+#define F_FW_LDST_CMD_CTRL V_FW_LDST_CMD_CTRL(1U)
+
+#define S_FW_LDST_CMD_LC 4
+#define M_FW_LDST_CMD_LC 0x1
+#define V_FW_LDST_CMD_LC(x) ((x) << S_FW_LDST_CMD_LC)
+#define G_FW_LDST_CMD_LC(x) (((x) >> S_FW_LDST_CMD_LC) & M_FW_LDST_CMD_LC)
+#define F_FW_LDST_CMD_LC V_FW_LDST_CMD_LC(1U)
+
+#define S_FW_LDST_CMD_AI 3
+#define M_FW_LDST_CMD_AI 0x1
+#define V_FW_LDST_CMD_AI(x) ((x) << S_FW_LDST_CMD_AI)
+#define G_FW_LDST_CMD_AI(x) (((x) >> S_FW_LDST_CMD_AI) & M_FW_LDST_CMD_AI)
+#define F_FW_LDST_CMD_AI V_FW_LDST_CMD_AI(1U)
+
+#define S_FW_LDST_CMD_FN 0
+#define M_FW_LDST_CMD_FN 0x7
+#define V_FW_LDST_CMD_FN(x) ((x) << S_FW_LDST_CMD_FN)
+#define G_FW_LDST_CMD_FN(x) (((x) >> S_FW_LDST_CMD_FN) & M_FW_LDST_CMD_FN)
+
+#define S_FW_LDST_CMD_SELECT 4
+#define M_FW_LDST_CMD_SELECT 0xf
+#define V_FW_LDST_CMD_SELECT(x) ((x) << S_FW_LDST_CMD_SELECT)
+#define G_FW_LDST_CMD_SELECT(x) \
+ (((x) >> S_FW_LDST_CMD_SELECT) & M_FW_LDST_CMD_SELECT)
+
+#define S_FW_LDST_CMD_NACCESS 0
+#define M_FW_LDST_CMD_NACCESS 0xf
+#define V_FW_LDST_CMD_NACCESS(x) ((x) << S_FW_LDST_CMD_NACCESS)
+#define G_FW_LDST_CMD_NACCESS(x) \
+ (((x) >> S_FW_LDST_CMD_NACCESS) & M_FW_LDST_CMD_NACCESS)
+
+#define S_FW_LDST_CMD_NSET 14
+#define M_FW_LDST_CMD_NSET 0x3
+#define V_FW_LDST_CMD_NSET(x) ((x) << S_FW_LDST_CMD_NSET)
+#define G_FW_LDST_CMD_NSET(x) \
+ (((x) >> S_FW_LDST_CMD_NSET) & M_FW_LDST_CMD_NSET)
+
+#define S_FW_LDST_CMD_PID 6
+#define M_FW_LDST_CMD_PID 0x3
+#define V_FW_LDST_CMD_PID(x) ((x) << S_FW_LDST_CMD_PID)
+#define G_FW_LDST_CMD_PID(x) \
+ (((x) >> S_FW_LDST_CMD_PID) & M_FW_LDST_CMD_PID)
+
+struct fw_reset_cmd {
+ __be32 op_to_write;
+ __be32 retval_len16;
+ __be32 val;
+ __be32 r3;
+};
+
+struct fw_hello_cmd {
+ __be32 op_to_write;
+ __be32 retval_len16;
+ __be32 err_to_mbasyncnot;
+ __be32 fwrev;
+};
+
+#define S_FW_HELLO_CMD_ERR 31
+#define M_FW_HELLO_CMD_ERR 0x1
+#define V_FW_HELLO_CMD_ERR(x) ((x) << S_FW_HELLO_CMD_ERR)
+#define G_FW_HELLO_CMD_ERR(x) \
+ (((x) >> S_FW_HELLO_CMD_ERR) & M_FW_HELLO_CMD_ERR)
+#define F_FW_HELLO_CMD_ERR V_FW_HELLO_CMD_ERR(1U)
+
+#define S_FW_HELLO_CMD_INIT 30
+#define M_FW_HELLO_CMD_INIT 0x1
+#define V_FW_HELLO_CMD_INIT(x) ((x) << S_FW_HELLO_CMD_INIT)
+#define G_FW_HELLO_CMD_INIT(x) \
+ (((x) >> S_FW_HELLO_CMD_INIT) & M_FW_HELLO_CMD_INIT)
+#define F_FW_HELLO_CMD_INIT V_FW_HELLO_CMD_INIT(1U)
+
+#define S_FW_HELLO_CMD_MASTERDIS 29
+#define M_FW_HELLO_CMD_MASTERDIS 0x1
+#define V_FW_HELLO_CMD_MASTERDIS(x) ((x) << S_FW_HELLO_CMD_MASTERDIS)
+#define G_FW_HELLO_CMD_MASTERDIS(x) \
+ (((x) >> S_FW_HELLO_CMD_MASTERDIS) & M_FW_HELLO_CMD_MASTERDIS)
+#define F_FW_HELLO_CMD_MASTERDIS V_FW_HELLO_CMD_MASTERDIS(1U)
+
+#define S_FW_HELLO_CMD_MASTERFORCE 28
+#define M_FW_HELLO_CMD_MASTERFORCE 0x1
+#define V_FW_HELLO_CMD_MASTERFORCE(x) ((x) << S_FW_HELLO_CMD_MASTERFORCE)
+#define G_FW_HELLO_CMD_MASTERFORCE(x) \
+ (((x) >> S_FW_HELLO_CMD_MASTERFORCE) & M_FW_HELLO_CMD_MASTERFORCE)
+#define F_FW_HELLO_CMD_MASTERFORCE V_FW_HELLO_CMD_MASTERFORCE(1U)
+
+#define S_FW_HELLO_CMD_MBMASTER 24
+#define M_FW_HELLO_CMD_MBMASTER 0xf
+#define V_FW_HELLO_CMD_MBMASTER(x) ((x) << S_FW_HELLO_CMD_MBMASTER)
+#define G_FW_HELLO_CMD_MBMASTER(x) \
+ (((x) >> S_FW_HELLO_CMD_MBMASTER) & M_FW_HELLO_CMD_MBMASTER)
+
+#define S_FW_HELLO_CMD_MBASYNCNOTINT 23
+#define M_FW_HELLO_CMD_MBASYNCNOTINT 0x1
+#define V_FW_HELLO_CMD_MBASYNCNOTINT(x) ((x) << S_FW_HELLO_CMD_MBASYNCNOTINT)
+#define G_FW_HELLO_CMD_MBASYNCNOTINT(x) \
+ (((x) >> S_FW_HELLO_CMD_MBASYNCNOTINT) & M_FW_HELLO_CMD_MBASYNCNOTINT)
+#define F_FW_HELLO_CMD_MBASYNCNOTINT V_FW_HELLO_CMD_MBASYNCNOTINT(1U)
+
+#define S_FW_HELLO_CMD_MBASYNCNOT 20
+#define M_FW_HELLO_CMD_MBASYNCNOT 0x7
+#define V_FW_HELLO_CMD_MBASYNCNOT(x) ((x) << S_FW_HELLO_CMD_MBASYNCNOT)
+#define G_FW_HELLO_CMD_MBASYNCNOT(x) \
+ (((x) >> S_FW_HELLO_CMD_MBASYNCNOT) & M_FW_HELLO_CMD_MBASYNCNOT)
+
+struct fw_bye_cmd {
+ __be32 op_to_write;
+ __be32 retval_len16;
+ __be64 r3;
+};
+
+struct fw_initialize_cmd {
+ __be32 op_to_write;
+ __be32 retval_len16;
+ __be64 r3;
+};
+
+enum fw_caps_config_hm {
+ FW_CAPS_CONFIG_HM_PCIE = 0x00000001,
+ FW_CAPS_CONFIG_HM_PL = 0x00000002,
+ FW_CAPS_CONFIG_HM_SGE = 0x00000004,
+ FW_CAPS_CONFIG_HM_CIM = 0x00000008,
+ FW_CAPS_CONFIG_HM_ULPTX = 0x00000010,
+ FW_CAPS_CONFIG_HM_TP = 0x00000020,
+ FW_CAPS_CONFIG_HM_ULPRX = 0x00000040,
+ FW_CAPS_CONFIG_HM_PMRX = 0x00000080,
+ FW_CAPS_CONFIG_HM_PMTX = 0x00000100,
+ FW_CAPS_CONFIG_HM_MC = 0x00000200,
+ FW_CAPS_CONFIG_HM_LE = 0x00000400,
+ FW_CAPS_CONFIG_HM_MPS = 0x00000800,
+ FW_CAPS_CONFIG_HM_XGMAC = 0x00001000,
+ FW_CAPS_CONFIG_HM_CPLSWITCH = 0x00002000,
+ FW_CAPS_CONFIG_HM_T4DBG = 0x00004000,
+ FW_CAPS_CONFIG_HM_MI = 0x00008000,
+ FW_CAPS_CONFIG_HM_I2CM = 0x00010000,
+ FW_CAPS_CONFIG_HM_NCSI = 0x00020000,
+ FW_CAPS_CONFIG_HM_SMB = 0x00040000,
+ FW_CAPS_CONFIG_HM_MA = 0x00080000,
+ FW_CAPS_CONFIG_HM_EDRAM = 0x00100000,
+ FW_CAPS_CONFIG_HM_PMU = 0x00200000,
+ FW_CAPS_CONFIG_HM_UART = 0x00400000,
+ FW_CAPS_CONFIG_HM_SF = 0x00800000,
+};
+
+/*
+ * The VF Register Map.
+ *
+ * The Scatter Gather Engine (SGE), Multiport Support module (MPS), PIO Local
+ * bus module (PL) and CPU Interface Module (CIM) components are mapped via
+ * the Slice to Module Map Table (see below) in the Physical Function Register
+ * Map. The Mail Box Data (MBDATA) range is mapped via the PCI-E Mailbox Base
+ * and Offset registers in the PF Register Map. The MBDATA base address is
+ * quite constrained as it determines the Mailbox Data addresses for both PFs
+ * and VFs, and therefore must fit in both the VF and PF Register Maps without
+ * overlapping other registers.
+ */
+#define FW_T4VF_SGE_BASE_ADDR 0x0000
+#define FW_T4VF_MPS_BASE_ADDR 0x0100
+#define FW_T4VF_PL_BASE_ADDR 0x0200
+#define FW_T4VF_MBDATA_BASE_ADDR 0x0240
+#define FW_T4VF_CIM_BASE_ADDR 0x0300
+
+#define FW_T4VF_REGMAP_START 0x0000
+#define FW_T4VF_REGMAP_SIZE 0x0400
+
+enum fw_caps_config_nbm {
+ FW_CAPS_CONFIG_NBM_IPMI = 0x00000001,
+ FW_CAPS_CONFIG_NBM_NCSI = 0x00000002,
+};
+
+enum fw_caps_config_link {
+ FW_CAPS_CONFIG_LINK_PPP = 0x00000001,
+ FW_CAPS_CONFIG_LINK_QFC = 0x00000002,
+ FW_CAPS_CONFIG_LINK_DCBX = 0x00000004,
+};
+
+enum fw_caps_config_switch {
+ FW_CAPS_CONFIG_SWITCH_INGRESS = 0x00000001,
+ FW_CAPS_CONFIG_SWITCH_EGRESS = 0x00000002,
+};
+
+enum fw_caps_config_nic {
+ FW_CAPS_CONFIG_NIC = 0x00000001,
+ FW_CAPS_CONFIG_NIC_VM = 0x00000002,
+ FW_CAPS_CONFIG_NIC_IDS = 0x00000004,
+};
+
+enum fw_caps_config_toe {
+ FW_CAPS_CONFIG_TOE = 0x00000001,
+};
+
+enum fw_caps_config_rdma {
+ FW_CAPS_CONFIG_RDMA_RDDP = 0x00000001,
+ FW_CAPS_CONFIG_RDMA_RDMAC = 0x00000002,
+};
+
+enum fw_caps_config_iscsi {
+ FW_CAPS_CONFIG_ISCSI_INITIATOR_PDU = 0x00000001,
+ FW_CAPS_CONFIG_ISCSI_TARGET_PDU = 0x00000002,
+ FW_CAPS_CONFIG_ISCSI_INITIATOR_CNXOFLD = 0x00000004,
+ FW_CAPS_CONFIG_ISCSI_TARGET_CNXOFLD = 0x00000008,
+};
+
+enum fw_caps_config_fcoe {
+ FW_CAPS_CONFIG_FCOE_INITIATOR = 0x00000001,
+ FW_CAPS_CONFIG_FCOE_TARGET = 0x00000002,
+ FW_CAPS_CONFIG_FCOE_CTRL_OFLD = 0x00000004,
+};
+
+struct fw_caps_config_cmd {
+ __be32 op_to_write;
+ __be32 retval_len16;
+ __be32 r2;
+ __be32 hwmbitmap;
+ __be16 nbmcaps;
+ __be16 linkcaps;
+ __be16 switchcaps;
+ __be16 r3;
+ __be16 niccaps;
+ __be16 toecaps;
+ __be16 rdmacaps;
+ __be16 r4;
+ __be16 iscsicaps;
+ __be16 fcoecaps;
+ __be32 r5;
+ __be64 r6;
+};
+
+/*
+ * params command mnemonics
+ */
+enum fw_params_mnem {
+ FW_PARAMS_MNEM_DEV = 1, /* device params */
+ FW_PARAMS_MNEM_PFVF = 2, /* function params */
+ FW_PARAMS_MNEM_REG = 3, /* limited register access */
+ FW_PARAMS_MNEM_DMAQ = 4, /* dma queue params */
+ FW_PARAMS_MNEM_LAST
+};
+
+/*
+ * device parameters
+ */
+enum fw_params_param_dev {
+ FW_PARAMS_PARAM_DEV_CCLK = 0x00, /* chip core clock in khz */
+ FW_PARAMS_PARAM_DEV_PORTVEC = 0x01, /* the port vector */
+ FW_PARAMS_PARAM_DEV_NTID = 0x02, /* reads the number of TIDs
+ * allocated by the device's
+ * Lookup Engine
+ */
+ FW_PARAMS_PARAM_DEV_FLOWC_BUFFIFO_SZ = 0x03,
+ FW_PARAMS_PARAM_DEV_INTVER_NIC = 0x04,
+ FW_PARAMS_PARAM_DEV_INTVER_VNIC = 0x05,
+ FW_PARAMS_PARAM_DEV_INTVER_OFLD = 0x06,
+ FW_PARAMS_PARAM_DEV_INTVER_RI = 0x07,
+ FW_PARAMS_PARAM_DEV_INTVER_ISCSIPDU = 0x08,
+ FW_PARAMS_PARAM_DEV_INTVER_ISCSI = 0x09,
+ FW_PARAMS_PARAM_DEV_INTVER_FCOE = 0x0A,
+ FW_PARAMS_PARAM_DEV_FWREV = 0x0B,
+ FW_PARAMS_PARAM_DEV_TPREV = 0x0C,
+};
+
+/*
+ * physical and virtual function parameters
+ */
+enum fw_params_param_pfvf {
+ FW_PARAMS_PARAM_PFVF_RWXCAPS = 0x00,
+ FW_PARAMS_PARAM_PFVF_ROUTE_START = 0x01,
+ FW_PARAMS_PARAM_PFVF_ROUTE_END = 0x02,
+ FW_PARAMS_PARAM_PFVF_CLIP_START = 0x03,
+ FW_PARAMS_PARAM_PFVF_CLIP_END = 0x04,
+ FW_PARAMS_PARAM_PFVF_FILTER_START = 0x05,
+ FW_PARAMS_PARAM_PFVF_FILTER_END = 0x06,
+ FW_PARAMS_PARAM_PFVF_SERVER_START = 0x07,
+ FW_PARAMS_PARAM_PFVF_SERVER_END = 0x08,
+ FW_PARAMS_PARAM_PFVF_TDDP_START = 0x09,
+ FW_PARAMS_PARAM_PFVF_TDDP_END = 0x0A,
+ FW_PARAMS_PARAM_PFVF_ISCSI_START = 0x0B,
+ FW_PARAMS_PARAM_PFVF_ISCSI_END = 0x0C,
+ FW_PARAMS_PARAM_PFVF_STAG_START = 0x0D,
+ FW_PARAMS_PARAM_PFVF_STAG_END = 0x0E,
+ FW_PARAMS_PARAM_PFVF_RQ_START = 0x1F,
+ FW_PARAMS_PARAM_PFVF_RQ_END = 0x10,
+ FW_PARAMS_PARAM_PFVF_PBL_START = 0x11,
+ FW_PARAMS_PARAM_PFVF_PBL_END = 0x12,
+ FW_PARAMS_PARAM_PFVF_L2T_START = 0x13,
+ FW_PARAMS_PARAM_PFVF_L2T_END = 0x14,
+ FW_PARAMS_PARAM_PFVF_SQRQ_START = 0x15,
+ FW_PARAMS_PARAM_PFVF_SQRQ_END = 0x16,
+ FW_PARAMS_PARAM_PFVF_CQ_START = 0x17,
+ FW_PARAMS_PARAM_PFVF_CQ_END = 0x18,
+ FW_PARAMS_PARAM_PFVF_SCHEDCLASS_ETH = 0x20,
+ FW_PARAMS_PARAM_PFVF_VIID = 0x24,
+ FW_PARAMS_PARAM_PFVF_CPMASK = 0x25,
+ FW_PARAMS_PARAM_PFVF_OCQ_START = 0x26,
+ FW_PARAMS_PARAM_PFVF_OCQ_END = 0x27,
+ FW_PARAMS_PARAM_PFVF_CONM_MAP = 0x28,
+ FW_PARAMS_PARAM_PFVF_IQFLINT_START = 0x29,
+ FW_PARAMS_PARAM_PFVF_IQFLINT_END = 0x2A,
+ FW_PARAMS_PARAM_PFVF_EQ_START = 0x2B,
+ FW_PARAMS_PARAM_PFVF_EQ_END = 0x2C
+};
+
+/*
+ * dma queue parameters
+ */
+enum fw_params_param_dmaq {
+ FW_PARAMS_PARAM_DMAQ_IQ_DCAEN_DCACPU = 0x00,
+ FW_PARAMS_PARAM_DMAQ_IQ_INTCNTTHRESH = 0x01,
+ FW_PARAMS_PARAM_DMAQ_EQ_CMPLIQID_MNGT = 0x10,
+ FW_PARAMS_PARAM_DMAQ_EQ_CMPLIQID_CTRL = 0x11,
+ FW_PARAMS_PARAM_DMAQ_EQ_SCHEDCLASS_ETH = 0x12,
+};
+
+#define S_FW_PARAMS_MNEM 24
+#define M_FW_PARAMS_MNEM 0xff
+#define V_FW_PARAMS_MNEM(x) ((x) << S_FW_PARAMS_MNEM)
+#define G_FW_PARAMS_MNEM(x) \
+ (((x) >> S_FW_PARAMS_MNEM) & M_FW_PARAMS_MNEM)
+
+#define S_FW_PARAMS_PARAM_X 16
+#define M_FW_PARAMS_PARAM_X 0xff
+#define V_FW_PARAMS_PARAM_X(x) ((x) << S_FW_PARAMS_PARAM_X)
+#define G_FW_PARAMS_PARAM_X(x) \
+ (((x) >> S_FW_PARAMS_PARAM_X) & M_FW_PARAMS_PARAM_X)
+
+#define S_FW_PARAMS_PARAM_Y 8
+#define M_FW_PARAMS_PARAM_Y 0xff
+#define V_FW_PARAMS_PARAM_Y(x) ((x) << S_FW_PARAMS_PARAM_Y)
+#define G_FW_PARAMS_PARAM_Y(x) \
+ (((x) >> S_FW_PARAMS_PARAM_Y) & M_FW_PARAMS_PARAM_Y)
+
+#define S_FW_PARAMS_PARAM_Z 0
+#define M_FW_PARAMS_PARAM_Z 0xff
+#define V_FW_PARAMS_PARAM_Z(x) ((x) << S_FW_PARAMS_PARAM_Z)
+#define G_FW_PARAMS_PARAM_Z(x) \
+ (((x) >> S_FW_PARAMS_PARAM_Z) & M_FW_PARAMS_PARAM_Z)
+
+#define S_FW_PARAMS_PARAM_XYZ 0
+#define M_FW_PARAMS_PARAM_XYZ 0xffffff
+#define V_FW_PARAMS_PARAM_XYZ(x) ((x) << S_FW_PARAMS_PARAM_XYZ)
+#define G_FW_PARAMS_PARAM_XYZ(x) \
+ (((x) >> S_FW_PARAMS_PARAM_XYZ) & M_FW_PARAMS_PARAM_XYZ)
+
+#define S_FW_PARAMS_PARAM_YZ 0
+#define M_FW_PARAMS_PARAM_YZ 0xffff
+#define V_FW_PARAMS_PARAM_YZ(x) ((x) << S_FW_PARAMS_PARAM_YZ)
+#define G_FW_PARAMS_PARAM_YZ(x) \
+ (((x) >> S_FW_PARAMS_PARAM_YZ) & M_FW_PARAMS_PARAM_YZ)
+
+struct fw_params_cmd {
+ __be32 op_to_vfn;
+ __be32 retval_len16;
+ struct fw_params_param {
+ __be32 mnem;
+ __be32 val;
+ } param[7];
+};
+
+#define S_FW_PARAMS_CMD_PFN 8
+#define M_FW_PARAMS_CMD_PFN 0x7
+#define V_FW_PARAMS_CMD_PFN(x) ((x) << S_FW_PARAMS_CMD_PFN)
+#define G_FW_PARAMS_CMD_PFN(x) \
+ (((x) >> S_FW_PARAMS_CMD_PFN) & M_FW_PARAMS_CMD_PFN)
+
+#define S_FW_PARAMS_CMD_VFN 0
+#define M_FW_PARAMS_CMD_VFN 0xff
+#define V_FW_PARAMS_CMD_VFN(x) ((x) << S_FW_PARAMS_CMD_VFN)
+#define G_FW_PARAMS_CMD_VFN(x) \
+ (((x) >> S_FW_PARAMS_CMD_VFN) & M_FW_PARAMS_CMD_VFN)
+
+struct fw_pfvf_cmd {
+ __be32 op_to_vfn;
+ __be32 retval_len16;
+ __be32 niqflint_niq;
+ __be32 type_to_neq;
+ __be32 tc_to_nexactf;
+ __be32 r_caps_to_nethctrl;
+ __be16 nricq;
+ __be16 nriqp;
+ __be32 r4;
+};
+
+#define S_FW_PFVF_CMD_PFN 8
+#define M_FW_PFVF_CMD_PFN 0x7
+#define V_FW_PFVF_CMD_PFN(x) ((x) << S_FW_PFVF_CMD_PFN)
+#define G_FW_PFVF_CMD_PFN(x) \
+ (((x) >> S_FW_PFVF_CMD_PFN) & M_FW_PFVF_CMD_PFN)
+
+#define S_FW_PFVF_CMD_VFN 0
+#define M_FW_PFVF_CMD_VFN 0xff
+#define V_FW_PFVF_CMD_VFN(x) ((x) << S_FW_PFVF_CMD_VFN)
+#define G_FW_PFVF_CMD_VFN(x) \
+ (((x) >> S_FW_PFVF_CMD_VFN) & M_FW_PFVF_CMD_VFN)
+
+#define S_FW_PFVF_CMD_NIQFLINT 20
+#define M_FW_PFVF_CMD_NIQFLINT 0xfff
+#define V_FW_PFVF_CMD_NIQFLINT(x) ((x) << S_FW_PFVF_CMD_NIQFLINT)
+#define G_FW_PFVF_CMD_NIQFLINT(x) \
+ (((x) >> S_FW_PFVF_CMD_NIQFLINT) & M_FW_PFVF_CMD_NIQFLINT)
+
+#define S_FW_PFVF_CMD_NIQ 0
+#define M_FW_PFVF_CMD_NIQ 0xfffff
+#define V_FW_PFVF_CMD_NIQ(x) ((x) << S_FW_PFVF_CMD_NIQ)
+#define G_FW_PFVF_CMD_NIQ(x) \
+ (((x) >> S_FW_PFVF_CMD_NIQ) & M_FW_PFVF_CMD_NIQ)
+
+#define S_FW_PFVF_CMD_TYPE 31
+#define M_FW_PFVF_CMD_TYPE 0x1
+#define V_FW_PFVF_CMD_TYPE(x) ((x) << S_FW_PFVF_CMD_TYPE)
+#define G_FW_PFVF_CMD_TYPE(x) \
+ (((x) >> S_FW_PFVF_CMD_TYPE) & M_FW_PFVF_CMD_TYPE)
+#define F_FW_PFVF_CMD_TYPE V_FW_PFVF_CMD_TYPE(1U)
+
+#define S_FW_PFVF_CMD_CMASK 24
+#define M_FW_PFVF_CMD_CMASK 0xf
+#define V_FW_PFVF_CMD_CMASK(x) ((x) << S_FW_PFVF_CMD_CMASK)
+#define G_FW_PFVF_CMD_CMASK(x) \
+ (((x) >> S_FW_PFVF_CMD_CMASK) & M_FW_PFVF_CMD_CMASK)
+
+#define S_FW_PFVF_CMD_PMASK 20
+#define M_FW_PFVF_CMD_PMASK 0xf
+#define V_FW_PFVF_CMD_PMASK(x) ((x) << S_FW_PFVF_CMD_PMASK)
+#define G_FW_PFVF_CMD_PMASK(x) \
+ (((x) >> S_FW_PFVF_CMD_PMASK) & M_FW_PFVF_CMD_PMASK)
+
+#define S_FW_PFVF_CMD_NEQ 0
+#define M_FW_PFVF_CMD_NEQ 0xfffff
+#define V_FW_PFVF_CMD_NEQ(x) ((x) << S_FW_PFVF_CMD_NEQ)
+#define G_FW_PFVF_CMD_NEQ(x) \
+ (((x) >> S_FW_PFVF_CMD_NEQ) & M_FW_PFVF_CMD_NEQ)
+
+#define S_FW_PFVF_CMD_TC 24
+#define M_FW_PFVF_CMD_TC 0xff
+#define V_FW_PFVF_CMD_TC(x) ((x) << S_FW_PFVF_CMD_TC)
+#define G_FW_PFVF_CMD_TC(x) (((x) >> S_FW_PFVF_CMD_TC) & M_FW_PFVF_CMD_TC)
+
+#define S_FW_PFVF_CMD_NVI 16
+#define M_FW_PFVF_CMD_NVI 0xff
+#define V_FW_PFVF_CMD_NVI(x) ((x) << S_FW_PFVF_CMD_NVI)
+#define G_FW_PFVF_CMD_NVI(x) \
+ (((x) >> S_FW_PFVF_CMD_NVI) & M_FW_PFVF_CMD_NVI)
+
+#define S_FW_PFVF_CMD_NEXACTF 0
+#define M_FW_PFVF_CMD_NEXACTF 0xffff
+#define V_FW_PFVF_CMD_NEXACTF(x) ((x) << S_FW_PFVF_CMD_NEXACTF)
+#define G_FW_PFVF_CMD_NEXACTF(x) \
+ (((x) >> S_FW_PFVF_CMD_NEXACTF) & M_FW_PFVF_CMD_NEXACTF)
+
+#define S_FW_PFVF_CMD_R_CAPS 24
+#define M_FW_PFVF_CMD_R_CAPS 0xff
+#define V_FW_PFVF_CMD_R_CAPS(x) ((x) << S_FW_PFVF_CMD_R_CAPS)
+#define G_FW_PFVF_CMD_R_CAPS(x) \
+ (((x) >> S_FW_PFVF_CMD_R_CAPS) & M_FW_PFVF_CMD_R_CAPS)
+
+#define S_FW_PFVF_CMD_WX_CAPS 16
+#define M_FW_PFVF_CMD_WX_CAPS 0xff
+#define V_FW_PFVF_CMD_WX_CAPS(x) ((x) << S_FW_PFVF_CMD_WX_CAPS)
+#define G_FW_PFVF_CMD_WX_CAPS(x) \
+ (((x) >> S_FW_PFVF_CMD_WX_CAPS) & M_FW_PFVF_CMD_WX_CAPS)
+
+#define S_FW_PFVF_CMD_NETHCTRL 0
+#define M_FW_PFVF_CMD_NETHCTRL 0xffff
+#define V_FW_PFVF_CMD_NETHCTRL(x) ((x) << S_FW_PFVF_CMD_NETHCTRL)
+#define G_FW_PFVF_CMD_NETHCTRL(x) \
+ (((x) >> S_FW_PFVF_CMD_NETHCTRL) & M_FW_PFVF_CMD_NETHCTRL)
+/*
+ * ingress queue type; the first 1K ingress queues can have associated 0,
+ * 1 or 2 free lists and an interrupt, all other ingress queues lack these
+ * capabilities
+ */
+enum fw_iq_type {
+ FW_IQ_TYPE_FL_INT_CAP,
+ FW_IQ_TYPE_NO_FL_INT_CAP
+};
+
+struct fw_iq_cmd {
+ __be32 op_to_vfn;
+ __be32 alloc_to_len16;
+ __be16 physiqid;
+ __be16 iqid;
+ __be16 fl0id;
+ __be16 fl1id;
+ __be32 type_to_iqandstindex;
+ __be16 iqdroprss_to_iqesize;
+ __be16 iqsize;
+ __be64 iqaddr;
+ __be32 iqns_to_fl0congen;
+ __be16 fl0dcaen_to_fl0cidxfthresh;
+ __be16 fl0size;
+ __be64 fl0addr;
+ __be32 fl1cngchmap_to_fl1congen;
+ __be16 fl1dcaen_to_fl1cidxfthresh;
+ __be16 fl1size;
+ __be64 fl1addr;
+};
+
+#define S_FW_IQ_CMD_PFN 8
+#define M_FW_IQ_CMD_PFN 0x7
+#define V_FW_IQ_CMD_PFN(x) ((x) << S_FW_IQ_CMD_PFN)
+#define G_FW_IQ_CMD_PFN(x) (((x) >> S_FW_IQ_CMD_PFN) & M_FW_IQ_CMD_PFN)
+
+#define S_FW_IQ_CMD_VFN 0
+#define M_FW_IQ_CMD_VFN 0xff
+#define V_FW_IQ_CMD_VFN(x) ((x) << S_FW_IQ_CMD_VFN)
+#define G_FW_IQ_CMD_VFN(x) (((x) >> S_FW_IQ_CMD_VFN) & M_FW_IQ_CMD_VFN)
+
+#define S_FW_IQ_CMD_ALLOC 31
+#define M_FW_IQ_CMD_ALLOC 0x1
+#define V_FW_IQ_CMD_ALLOC(x) ((x) << S_FW_IQ_CMD_ALLOC)
+#define G_FW_IQ_CMD_ALLOC(x) \
+ (((x) >> S_FW_IQ_CMD_ALLOC) & M_FW_IQ_CMD_ALLOC)
+#define F_FW_IQ_CMD_ALLOC V_FW_IQ_CMD_ALLOC(1U)
+
+#define S_FW_IQ_CMD_FREE 30
+#define M_FW_IQ_CMD_FREE 0x1
+#define V_FW_IQ_CMD_FREE(x) ((x) << S_FW_IQ_CMD_FREE)
+#define G_FW_IQ_CMD_FREE(x) (((x) >> S_FW_IQ_CMD_FREE) & M_FW_IQ_CMD_FREE)
+#define F_FW_IQ_CMD_FREE V_FW_IQ_CMD_FREE(1U)
+
+#define S_FW_IQ_CMD_MODIFY 29
+#define M_FW_IQ_CMD_MODIFY 0x1
+#define V_FW_IQ_CMD_MODIFY(x) ((x) << S_FW_IQ_CMD_MODIFY)
+#define G_FW_IQ_CMD_MODIFY(x) \
+ (((x) >> S_FW_IQ_CMD_MODIFY) & M_FW_IQ_CMD_MODIFY)
+#define F_FW_IQ_CMD_MODIFY V_FW_IQ_CMD_MODIFY(1U)
+
+#define S_FW_IQ_CMD_IQSTART 28
+#define M_FW_IQ_CMD_IQSTART 0x1
+#define V_FW_IQ_CMD_IQSTART(x) ((x) << S_FW_IQ_CMD_IQSTART)
+#define G_FW_IQ_CMD_IQSTART(x) \
+ (((x) >> S_FW_IQ_CMD_IQSTART) & M_FW_IQ_CMD_IQSTART)
+#define F_FW_IQ_CMD_IQSTART V_FW_IQ_CMD_IQSTART(1U)
+
+#define S_FW_IQ_CMD_IQSTOP 27
+#define M_FW_IQ_CMD_IQSTOP 0x1
+#define V_FW_IQ_CMD_IQSTOP(x) ((x) << S_FW_IQ_CMD_IQSTOP)
+#define G_FW_IQ_CMD_IQSTOP(x) \
+ (((x) >> S_FW_IQ_CMD_IQSTOP) & M_FW_IQ_CMD_IQSTOP)
+#define F_FW_IQ_CMD_IQSTOP V_FW_IQ_CMD_IQSTOP(1U)
+
+#define S_FW_IQ_CMD_TYPE 29
+#define M_FW_IQ_CMD_TYPE 0x7
+#define V_FW_IQ_CMD_TYPE(x) ((x) << S_FW_IQ_CMD_TYPE)
+#define G_FW_IQ_CMD_TYPE(x) (((x) >> S_FW_IQ_CMD_TYPE) & M_FW_IQ_CMD_TYPE)
+
+#define S_FW_IQ_CMD_IQASYNCH 28
+#define M_FW_IQ_CMD_IQASYNCH 0x1
+#define V_FW_IQ_CMD_IQASYNCH(x) ((x) << S_FW_IQ_CMD_IQASYNCH)
+#define G_FW_IQ_CMD_IQASYNCH(x) \
+ (((x) >> S_FW_IQ_CMD_IQASYNCH) & M_FW_IQ_CMD_IQASYNCH)
+#define F_FW_IQ_CMD_IQASYNCH V_FW_IQ_CMD_IQASYNCH(1U)
+
+#define S_FW_IQ_CMD_VIID 16
+#define M_FW_IQ_CMD_VIID 0xfff
+#define V_FW_IQ_CMD_VIID(x) ((x) << S_FW_IQ_CMD_VIID)
+#define G_FW_IQ_CMD_VIID(x) (((x) >> S_FW_IQ_CMD_VIID) & M_FW_IQ_CMD_VIID)
+
+#define S_FW_IQ_CMD_IQANDST 15
+#define M_FW_IQ_CMD_IQANDST 0x1
+#define V_FW_IQ_CMD_IQANDST(x) ((x) << S_FW_IQ_CMD_IQANDST)
+#define G_FW_IQ_CMD_IQANDST(x) \
+ (((x) >> S_FW_IQ_CMD_IQANDST) & M_FW_IQ_CMD_IQANDST)
+#define F_FW_IQ_CMD_IQANDST V_FW_IQ_CMD_IQANDST(1U)
+
+#define S_FW_IQ_CMD_IQANUS 14
+#define M_FW_IQ_CMD_IQANUS 0x1
+#define V_FW_IQ_CMD_IQANUS(x) ((x) << S_FW_IQ_CMD_IQANUS)
+#define G_FW_IQ_CMD_IQANUS(x) \
+ (((x) >> S_FW_IQ_CMD_IQANUS) & M_FW_IQ_CMD_IQANUS)
+#define F_FW_IQ_CMD_IQANUS V_FW_IQ_CMD_IQANUS(1U)
+
+#define S_FW_IQ_CMD_IQANUD 12
+#define M_FW_IQ_CMD_IQANUD 0x3
+#define V_FW_IQ_CMD_IQANUD(x) ((x) << S_FW_IQ_CMD_IQANUD)
+#define G_FW_IQ_CMD_IQANUD(x) \
+ (((x) >> S_FW_IQ_CMD_IQANUD) & M_FW_IQ_CMD_IQANUD)
+
+#define S_FW_IQ_CMD_IQANDSTINDEX 0
+#define M_FW_IQ_CMD_IQANDSTINDEX 0xfff
+#define V_FW_IQ_CMD_IQANDSTINDEX(x) ((x) << S_FW_IQ_CMD_IQANDSTINDEX)
+#define G_FW_IQ_CMD_IQANDSTINDEX(x) \
+ (((x) >> S_FW_IQ_CMD_IQANDSTINDEX) & M_FW_IQ_CMD_IQANDSTINDEX)
+
+#define S_FW_IQ_CMD_IQDROPRSS 15
+#define M_FW_IQ_CMD_IQDROPRSS 0x1
+#define V_FW_IQ_CMD_IQDROPRSS(x) ((x) << S_FW_IQ_CMD_IQDROPRSS)
+#define G_FW_IQ_CMD_IQDROPRSS(x) \
+ (((x) >> S_FW_IQ_CMD_IQDROPRSS) & M_FW_IQ_CMD_IQDROPRSS)
+#define F_FW_IQ_CMD_IQDROPRSS V_FW_IQ_CMD_IQDROPRSS(1U)
+
+#define S_FW_IQ_CMD_IQGTSMODE 14
+#define M_FW_IQ_CMD_IQGTSMODE 0x1
+#define V_FW_IQ_CMD_IQGTSMODE(x) ((x) << S_FW_IQ_CMD_IQGTSMODE)
+#define G_FW_IQ_CMD_IQGTSMODE(x) \
+ (((x) >> S_FW_IQ_CMD_IQGTSMODE) & M_FW_IQ_CMD_IQGTSMODE)
+#define F_FW_IQ_CMD_IQGTSMODE V_FW_IQ_CMD_IQGTSMODE(1U)
+
+#define S_FW_IQ_CMD_IQPCIECH 12
+#define M_FW_IQ_CMD_IQPCIECH 0x3
+#define V_FW_IQ_CMD_IQPCIECH(x) ((x) << S_FW_IQ_CMD_IQPCIECH)
+#define G_FW_IQ_CMD_IQPCIECH(x) \
+ (((x) >> S_FW_IQ_CMD_IQPCIECH) & M_FW_IQ_CMD_IQPCIECH)
+
+#define S_FW_IQ_CMD_IQDCAEN 11
+#define M_FW_IQ_CMD_IQDCAEN 0x1
+#define V_FW_IQ_CMD_IQDCAEN(x) ((x) << S_FW_IQ_CMD_IQDCAEN)
+#define G_FW_IQ_CMD_IQDCAEN(x) \
+ (((x) >> S_FW_IQ_CMD_IQDCAEN) & M_FW_IQ_CMD_IQDCAEN)
+#define F_FW_IQ_CMD_IQDCAEN V_FW_IQ_CMD_IQDCAEN(1U)
+
+#define S_FW_IQ_CMD_IQDCACPU 6
+#define M_FW_IQ_CMD_IQDCACPU 0x1f
+#define V_FW_IQ_CMD_IQDCACPU(x) ((x) << S_FW_IQ_CMD_IQDCACPU)
+#define G_FW_IQ_CMD_IQDCACPU(x) \
+ (((x) >> S_FW_IQ_CMD_IQDCACPU) & M_FW_IQ_CMD_IQDCACPU)
+
+#define S_FW_IQ_CMD_IQINTCNTTHRESH 4
+#define M_FW_IQ_CMD_IQINTCNTTHRESH 0x3
+#define V_FW_IQ_CMD_IQINTCNTTHRESH(x) ((x) << S_FW_IQ_CMD_IQINTCNTTHRESH)
+#define G_FW_IQ_CMD_IQINTCNTTHRESH(x) \
+ (((x) >> S_FW_IQ_CMD_IQINTCNTTHRESH) & M_FW_IQ_CMD_IQINTCNTTHRESH)
+
+#define S_FW_IQ_CMD_IQO 3
+#define M_FW_IQ_CMD_IQO 0x1
+#define V_FW_IQ_CMD_IQO(x) ((x) << S_FW_IQ_CMD_IQO)
+#define G_FW_IQ_CMD_IQO(x) (((x) >> S_FW_IQ_CMD_IQO) & M_FW_IQ_CMD_IQO)
+#define F_FW_IQ_CMD_IQO V_FW_IQ_CMD_IQO(1U)
+
+#define S_FW_IQ_CMD_IQCPRIO 2
+#define M_FW_IQ_CMD_IQCPRIO 0x1
+#define V_FW_IQ_CMD_IQCPRIO(x) ((x) << S_FW_IQ_CMD_IQCPRIO)
+#define G_FW_IQ_CMD_IQCPRIO(x) \
+ (((x) >> S_FW_IQ_CMD_IQCPRIO) & M_FW_IQ_CMD_IQCPRIO)
+#define F_FW_IQ_CMD_IQCPRIO V_FW_IQ_CMD_IQCPRIO(1U)
+
+#define S_FW_IQ_CMD_IQESIZE 0
+#define M_FW_IQ_CMD_IQESIZE 0x3
+#define V_FW_IQ_CMD_IQESIZE(x) ((x) << S_FW_IQ_CMD_IQESIZE)
+#define G_FW_IQ_CMD_IQESIZE(x) \
+ (((x) >> S_FW_IQ_CMD_IQESIZE) & M_FW_IQ_CMD_IQESIZE)
+
+#define S_FW_IQ_CMD_IQNS 31
+#define M_FW_IQ_CMD_IQNS 0x1
+#define V_FW_IQ_CMD_IQNS(x) ((x) << S_FW_IQ_CMD_IQNS)
+#define G_FW_IQ_CMD_IQNS(x) (((x) >> S_FW_IQ_CMD_IQNS) & M_FW_IQ_CMD_IQNS)
+#define F_FW_IQ_CMD_IQNS V_FW_IQ_CMD_IQNS(1U)
+
+#define S_FW_IQ_CMD_IQRO 30
+#define M_FW_IQ_CMD_IQRO 0x1
+#define V_FW_IQ_CMD_IQRO(x) ((x) << S_FW_IQ_CMD_IQRO)
+#define G_FW_IQ_CMD_IQRO(x) (((x) >> S_FW_IQ_CMD_IQRO) & M_FW_IQ_CMD_IQRO)
+#define F_FW_IQ_CMD_IQRO V_FW_IQ_CMD_IQRO(1U)
+
+#define S_FW_IQ_CMD_IQFLINTIQHSEN 28
+#define M_FW_IQ_CMD_IQFLINTIQHSEN 0x3
+#define V_FW_IQ_CMD_IQFLINTIQHSEN(x) ((x) << S_FW_IQ_CMD_IQFLINTIQHSEN)
+#define G_FW_IQ_CMD_IQFLINTIQHSEN(x) \
+ (((x) >> S_FW_IQ_CMD_IQFLINTIQHSEN) & M_FW_IQ_CMD_IQFLINTIQHSEN)
+
+#define S_FW_IQ_CMD_IQFLINTCONGEN 27
+#define M_FW_IQ_CMD_IQFLINTCONGEN 0x1
+#define V_FW_IQ_CMD_IQFLINTCONGEN(x) ((x) << S_FW_IQ_CMD_IQFLINTCONGEN)
+#define G_FW_IQ_CMD_IQFLINTCONGEN(x) \
+ (((x) >> S_FW_IQ_CMD_IQFLINTCONGEN) & M_FW_IQ_CMD_IQFLINTCONGEN)
+#define F_FW_IQ_CMD_IQFLINTCONGEN V_FW_IQ_CMD_IQFLINTCONGEN(1U)
+
+#define S_FW_IQ_CMD_IQFLINTISCSIC 26
+#define M_FW_IQ_CMD_IQFLINTISCSIC 0x1
+#define V_FW_IQ_CMD_IQFLINTISCSIC(x) ((x) << S_FW_IQ_CMD_IQFLINTISCSIC)
+#define G_FW_IQ_CMD_IQFLINTISCSIC(x) \
+ (((x) >> S_FW_IQ_CMD_IQFLINTISCSIC) & M_FW_IQ_CMD_IQFLINTISCSIC)
+#define F_FW_IQ_CMD_IQFLINTISCSIC V_FW_IQ_CMD_IQFLINTISCSIC(1U)
+
+#define S_FW_IQ_CMD_FL0CNGCHMAP 20
+#define M_FW_IQ_CMD_FL0CNGCHMAP 0xf
+#define V_FW_IQ_CMD_FL0CNGCHMAP(x) ((x) << S_FW_IQ_CMD_FL0CNGCHMAP)
+#define G_FW_IQ_CMD_FL0CNGCHMAP(x) \
+ (((x) >> S_FW_IQ_CMD_FL0CNGCHMAP) & M_FW_IQ_CMD_FL0CNGCHMAP)
+
+#define S_FW_IQ_CMD_FL0CACHELOCK 15
+#define M_FW_IQ_CMD_FL0CACHELOCK 0x1
+#define V_FW_IQ_CMD_FL0CACHELOCK(x) ((x) << S_FW_IQ_CMD_FL0CACHELOCK)
+#define G_FW_IQ_CMD_FL0CACHELOCK(x) \
+ (((x) >> S_FW_IQ_CMD_FL0CACHELOCK) & M_FW_IQ_CMD_FL0CACHELOCK)
+#define F_FW_IQ_CMD_FL0CACHELOCK V_FW_IQ_CMD_FL0CACHELOCK(1U)
+
+#define S_FW_IQ_CMD_FL0DBP 14
+#define M_FW_IQ_CMD_FL0DBP 0x1
+#define V_FW_IQ_CMD_FL0DBP(x) ((x) << S_FW_IQ_CMD_FL0DBP)
+#define G_FW_IQ_CMD_FL0DBP(x) \
+ (((x) >> S_FW_IQ_CMD_FL0DBP) & M_FW_IQ_CMD_FL0DBP)
+#define F_FW_IQ_CMD_FL0DBP V_FW_IQ_CMD_FL0DBP(1U)
+
+#define S_FW_IQ_CMD_FL0DATANS 13
+#define M_FW_IQ_CMD_FL0DATANS 0x1
+#define V_FW_IQ_CMD_FL0DATANS(x) ((x) << S_FW_IQ_CMD_FL0DATANS)
+#define G_FW_IQ_CMD_FL0DATANS(x) \
+ (((x) >> S_FW_IQ_CMD_FL0DATANS) & M_FW_IQ_CMD_FL0DATANS)
+#define F_FW_IQ_CMD_FL0DATANS V_FW_IQ_CMD_FL0DATANS(1U)
+
+#define S_FW_IQ_CMD_FL0DATARO 12
+#define M_FW_IQ_CMD_FL0DATARO 0x1
+#define V_FW_IQ_CMD_FL0DATARO(x) ((x) << S_FW_IQ_CMD_FL0DATARO)
+#define G_FW_IQ_CMD_FL0DATARO(x) \
+ (((x) >> S_FW_IQ_CMD_FL0DATARO) & M_FW_IQ_CMD_FL0DATARO)
+#define F_FW_IQ_CMD_FL0DATARO V_FW_IQ_CMD_FL0DATARO(1U)
+
+#define S_FW_IQ_CMD_FL0CONGCIF 11
+#define M_FW_IQ_CMD_FL0CONGCIF 0x1
+#define V_FW_IQ_CMD_FL0CONGCIF(x) ((x) << S_FW_IQ_CMD_FL0CONGCIF)
+#define G_FW_IQ_CMD_FL0CONGCIF(x) \
+ (((x) >> S_FW_IQ_CMD_FL0CONGCIF) & M_FW_IQ_CMD_FL0CONGCIF)
+#define F_FW_IQ_CMD_FL0CONGCIF V_FW_IQ_CMD_FL0CONGCIF(1U)
+
+#define S_FW_IQ_CMD_FL0ONCHIP 10
+#define M_FW_IQ_CMD_FL0ONCHIP 0x1
+#define V_FW_IQ_CMD_FL0ONCHIP(x) ((x) << S_FW_IQ_CMD_FL0ONCHIP)
+#define G_FW_IQ_CMD_FL0ONCHIP(x) \
+ (((x) >> S_FW_IQ_CMD_FL0ONCHIP) & M_FW_IQ_CMD_FL0ONCHIP)
+#define F_FW_IQ_CMD_FL0ONCHIP V_FW_IQ_CMD_FL0ONCHIP(1U)
+
+#define S_FW_IQ_CMD_FL0STATUSPGNS 9
+#define M_FW_IQ_CMD_FL0STATUSPGNS 0x1
+#define V_FW_IQ_CMD_FL0STATUSPGNS(x) ((x) << S_FW_IQ_CMD_FL0STATUSPGNS)
+#define G_FW_IQ_CMD_FL0STATUSPGNS(x) \
+ (((x) >> S_FW_IQ_CMD_FL0STATUSPGNS) & M_FW_IQ_CMD_FL0STATUSPGNS)
+#define F_FW_IQ_CMD_FL0STATUSPGNS V_FW_IQ_CMD_FL0STATUSPGNS(1U)
+
+#define S_FW_IQ_CMD_FL0STATUSPGRO 8
+#define M_FW_IQ_CMD_FL0STATUSPGRO 0x1
+#define V_FW_IQ_CMD_FL0STATUSPGRO(x) ((x) << S_FW_IQ_CMD_FL0STATUSPGRO)
+#define G_FW_IQ_CMD_FL0STATUSPGRO(x) \
+ (((x) >> S_FW_IQ_CMD_FL0STATUSPGRO) & M_FW_IQ_CMD_FL0STATUSPGRO)
+#define F_FW_IQ_CMD_FL0STATUSPGRO V_FW_IQ_CMD_FL0STATUSPGRO(1U)
+
+#define S_FW_IQ_CMD_FL0FETCHNS 7
+#define M_FW_IQ_CMD_FL0FETCHNS 0x1
+#define V_FW_IQ_CMD_FL0FETCHNS(x) ((x) << S_FW_IQ_CMD_FL0FETCHNS)
+#define G_FW_IQ_CMD_FL0FETCHNS(x) \
+ (((x) >> S_FW_IQ_CMD_FL0FETCHNS) & M_FW_IQ_CMD_FL0FETCHNS)
+#define F_FW_IQ_CMD_FL0FETCHNS V_FW_IQ_CMD_FL0FETCHNS(1U)
+
+#define S_FW_IQ_CMD_FL0FETCHRO 6
+#define M_FW_IQ_CMD_FL0FETCHRO 0x1
+#define V_FW_IQ_CMD_FL0FETCHRO(x) ((x) << S_FW_IQ_CMD_FL0FETCHRO)
+#define G_FW_IQ_CMD_FL0FETCHRO(x) \
+ (((x) >> S_FW_IQ_CMD_FL0FETCHRO) & M_FW_IQ_CMD_FL0FETCHRO)
+#define F_FW_IQ_CMD_FL0FETCHRO V_FW_IQ_CMD_FL0FETCHRO(1U)
+
+#define S_FW_IQ_CMD_FL0HOSTFCMODE 4
+#define M_FW_IQ_CMD_FL0HOSTFCMODE 0x3
+#define V_FW_IQ_CMD_FL0HOSTFCMODE(x) ((x) << S_FW_IQ_CMD_FL0HOSTFCMODE)
+#define G_FW_IQ_CMD_FL0HOSTFCMODE(x) \
+ (((x) >> S_FW_IQ_CMD_FL0HOSTFCMODE) & M_FW_IQ_CMD_FL0HOSTFCMODE)
+
+#define S_FW_IQ_CMD_FL0CPRIO 3
+#define M_FW_IQ_CMD_FL0CPRIO 0x1
+#define V_FW_IQ_CMD_FL0CPRIO(x) ((x) << S_FW_IQ_CMD_FL0CPRIO)
+#define G_FW_IQ_CMD_FL0CPRIO(x) \
+ (((x) >> S_FW_IQ_CMD_FL0CPRIO) & M_FW_IQ_CMD_FL0CPRIO)
+#define F_FW_IQ_CMD_FL0CPRIO V_FW_IQ_CMD_FL0CPRIO(1U)
+
+#define S_FW_IQ_CMD_FL0PADEN 2
+#define M_FW_IQ_CMD_FL0PADEN 0x1
+#define V_FW_IQ_CMD_FL0PADEN(x) ((x) << S_FW_IQ_CMD_FL0PADEN)
+#define G_FW_IQ_CMD_FL0PADEN(x) \
+ (((x) >> S_FW_IQ_CMD_FL0PADEN) & M_FW_IQ_CMD_FL0PADEN)
+#define F_FW_IQ_CMD_FL0PADEN V_FW_IQ_CMD_FL0PADEN(1U)
+
+#define S_FW_IQ_CMD_FL0PACKEN 1
+#define M_FW_IQ_CMD_FL0PACKEN 0x1
+#define V_FW_IQ_CMD_FL0PACKEN(x) ((x) << S_FW_IQ_CMD_FL0PACKEN)
+#define G_FW_IQ_CMD_FL0PACKEN(x) \
+ (((x) >> S_FW_IQ_CMD_FL0PACKEN) & M_FW_IQ_CMD_FL0PACKEN)
+#define F_FW_IQ_CMD_FL0PACKEN V_FW_IQ_CMD_FL0PACKEN(1U)
+
+#define S_FW_IQ_CMD_FL0CONGEN 0
+#define M_FW_IQ_CMD_FL0CONGEN 0x1
+#define V_FW_IQ_CMD_FL0CONGEN(x) ((x) << S_FW_IQ_CMD_FL0CONGEN)
+#define G_FW_IQ_CMD_FL0CONGEN(x) \
+ (((x) >> S_FW_IQ_CMD_FL0CONGEN) & M_FW_IQ_CMD_FL0CONGEN)
+#define F_FW_IQ_CMD_FL0CONGEN V_FW_IQ_CMD_FL0CONGEN(1U)
+
+#define S_FW_IQ_CMD_FL0DCAEN 15
+#define M_FW_IQ_CMD_FL0DCAEN 0x1
+#define V_FW_IQ_CMD_FL0DCAEN(x) ((x) << S_FW_IQ_CMD_FL0DCAEN)
+#define G_FW_IQ_CMD_FL0DCAEN(x) \
+ (((x) >> S_FW_IQ_CMD_FL0DCAEN) & M_FW_IQ_CMD_FL0DCAEN)
+#define F_FW_IQ_CMD_FL0DCAEN V_FW_IQ_CMD_FL0DCAEN(1U)
+
+#define S_FW_IQ_CMD_FL0DCACPU 10
+#define M_FW_IQ_CMD_FL0DCACPU 0x1f
+#define V_FW_IQ_CMD_FL0DCACPU(x) ((x) << S_FW_IQ_CMD_FL0DCACPU)
+#define G_FW_IQ_CMD_FL0DCACPU(x) \
+ (((x) >> S_FW_IQ_CMD_FL0DCACPU) & M_FW_IQ_CMD_FL0DCACPU)
+
+#define S_FW_IQ_CMD_FL0FBMIN 7
+#define M_FW_IQ_CMD_FL0FBMIN 0x7
+#define V_FW_IQ_CMD_FL0FBMIN(x) ((x) << S_FW_IQ_CMD_FL0FBMIN)
+#define G_FW_IQ_CMD_FL0FBMIN(x) \
+ (((x) >> S_FW_IQ_CMD_FL0FBMIN) & M_FW_IQ_CMD_FL0FBMIN)
+
+#define S_FW_IQ_CMD_FL0FBMAX 4
+#define M_FW_IQ_CMD_FL0FBMAX 0x7
+#define V_FW_IQ_CMD_FL0FBMAX(x) ((x) << S_FW_IQ_CMD_FL0FBMAX)
+#define G_FW_IQ_CMD_FL0FBMAX(x) \
+ (((x) >> S_FW_IQ_CMD_FL0FBMAX) & M_FW_IQ_CMD_FL0FBMAX)
+
+#define S_FW_IQ_CMD_FL0CIDXFTHRESHO 3
+#define M_FW_IQ_CMD_FL0CIDXFTHRESHO 0x1
+#define V_FW_IQ_CMD_FL0CIDXFTHRESHO(x) ((x) << S_FW_IQ_CMD_FL0CIDXFTHRESHO)
+#define G_FW_IQ_CMD_FL0CIDXFTHRESHO(x) \
+ (((x) >> S_FW_IQ_CMD_FL0CIDXFTHRESHO) & M_FW_IQ_CMD_FL0CIDXFTHRESHO)
+#define F_FW_IQ_CMD_FL0CIDXFTHRESHO V_FW_IQ_CMD_FL0CIDXFTHRESHO(1U)
+
+#define S_FW_IQ_CMD_FL0CIDXFTHRESH 0
+#define M_FW_IQ_CMD_FL0CIDXFTHRESH 0x7
+#define V_FW_IQ_CMD_FL0CIDXFTHRESH(x) ((x) << S_FW_IQ_CMD_FL0CIDXFTHRESH)
+#define G_FW_IQ_CMD_FL0CIDXFTHRESH(x) \
+ (((x) >> S_FW_IQ_CMD_FL0CIDXFTHRESH) & M_FW_IQ_CMD_FL0CIDXFTHRESH)
+
+#define S_FW_IQ_CMD_FL1CNGCHMAP 20
+#define M_FW_IQ_CMD_FL1CNGCHMAP 0xf
+#define V_FW_IQ_CMD_FL1CNGCHMAP(x) ((x) << S_FW_IQ_CMD_FL1CNGCHMAP)
+#define G_FW_IQ_CMD_FL1CNGCHMAP(x) \
+ (((x) >> S_FW_IQ_CMD_FL1CNGCHMAP) & M_FW_IQ_CMD_FL1CNGCHMAP)
+
+#define S_FW_IQ_CMD_FL1CACHELOCK 15
+#define M_FW_IQ_CMD_FL1CACHELOCK 0x1
+#define V_FW_IQ_CMD_FL1CACHELOCK(x) ((x) << S_FW_IQ_CMD_FL1CACHELOCK)
+#define G_FW_IQ_CMD_FL1CACHELOCK(x) \
+ (((x) >> S_FW_IQ_CMD_FL1CACHELOCK) & M_FW_IQ_CMD_FL1CACHELOCK)
+#define F_FW_IQ_CMD_FL1CACHELOCK V_FW_IQ_CMD_FL1CACHELOCK(1U)
+
+#define S_FW_IQ_CMD_FL1DBP 14
+#define M_FW_IQ_CMD_FL1DBP 0x1
+#define V_FW_IQ_CMD_FL1DBP(x) ((x) << S_FW_IQ_CMD_FL1DBP)
+#define G_FW_IQ_CMD_FL1DBP(x) \
+ (((x) >> S_FW_IQ_CMD_FL1DBP) & M_FW_IQ_CMD_FL1DBP)
+#define F_FW_IQ_CMD_FL1DBP V_FW_IQ_CMD_FL1DBP(1U)
+
+#define S_FW_IQ_CMD_FL1DATANS 13
+#define M_FW_IQ_CMD_FL1DATANS 0x1
+#define V_FW_IQ_CMD_FL1DATANS(x) ((x) << S_FW_IQ_CMD_FL1DATANS)
+#define G_FW_IQ_CMD_FL1DATANS(x) \
+ (((x) >> S_FW_IQ_CMD_FL1DATANS) & M_FW_IQ_CMD_FL1DATANS)
+#define F_FW_IQ_CMD_FL1DATANS V_FW_IQ_CMD_FL1DATANS(1U)
+
+#define S_FW_IQ_CMD_FL1DATARO 12
+#define M_FW_IQ_CMD_FL1DATARO 0x1
+#define V_FW_IQ_CMD_FL1DATARO(x) ((x) << S_FW_IQ_CMD_FL1DATARO)
+#define G_FW_IQ_CMD_FL1DATARO(x) \
+ (((x) >> S_FW_IQ_CMD_FL1DATARO) & M_FW_IQ_CMD_FL1DATARO)
+#define F_FW_IQ_CMD_FL1DATARO V_FW_IQ_CMD_FL1DATARO(1U)
+
+#define S_FW_IQ_CMD_FL1CONGCIF 11
+#define M_FW_IQ_CMD_FL1CONGCIF 0x1
+#define V_FW_IQ_CMD_FL1CONGCIF(x) ((x) << S_FW_IQ_CMD_FL1CONGCIF)
+#define G_FW_IQ_CMD_FL1CONGCIF(x) \
+ (((x) >> S_FW_IQ_CMD_FL1CONGCIF) & M_FW_IQ_CMD_FL1CONGCIF)
+#define F_FW_IQ_CMD_FL1CONGCIF V_FW_IQ_CMD_FL1CONGCIF(1U)
+
+#define S_FW_IQ_CMD_FL1ONCHIP 10
+#define M_FW_IQ_CMD_FL1ONCHIP 0x1
+#define V_FW_IQ_CMD_FL1ONCHIP(x) ((x) << S_FW_IQ_CMD_FL1ONCHIP)
+#define G_FW_IQ_CMD_FL1ONCHIP(x) \
+ (((x) >> S_FW_IQ_CMD_FL1ONCHIP) & M_FW_IQ_CMD_FL1ONCHIP)
+#define F_FW_IQ_CMD_FL1ONCHIP V_FW_IQ_CMD_FL1ONCHIP(1U)
+
+#define S_FW_IQ_CMD_FL1STATUSPGNS 9
+#define M_FW_IQ_CMD_FL1STATUSPGNS 0x1
+#define V_FW_IQ_CMD_FL1STATUSPGNS(x) ((x) << S_FW_IQ_CMD_FL1STATUSPGNS)
+#define G_FW_IQ_CMD_FL1STATUSPGNS(x) \
+ (((x) >> S_FW_IQ_CMD_FL1STATUSPGNS) & M_FW_IQ_CMD_FL1STATUSPGNS)
+#define F_FW_IQ_CMD_FL1STATUSPGNS V_FW_IQ_CMD_FL1STATUSPGNS(1U)
+
+#define S_FW_IQ_CMD_FL1STATUSPGRO 8
+#define M_FW_IQ_CMD_FL1STATUSPGRO 0x1
+#define V_FW_IQ_CMD_FL1STATUSPGRO(x) ((x) << S_FW_IQ_CMD_FL1STATUSPGRO)
+#define G_FW_IQ_CMD_FL1STATUSPGRO(x) \
+ (((x) >> S_FW_IQ_CMD_FL1STATUSPGRO) & M_FW_IQ_CMD_FL1STATUSPGRO)
+#define F_FW_IQ_CMD_FL1STATUSPGRO V_FW_IQ_CMD_FL1STATUSPGRO(1U)
+
+#define S_FW_IQ_CMD_FL1FETCHNS 7
+#define M_FW_IQ_CMD_FL1FETCHNS 0x1
+#define V_FW_IQ_CMD_FL1FETCHNS(x) ((x) << S_FW_IQ_CMD_FL1FETCHNS)
+#define G_FW_IQ_CMD_FL1FETCHNS(x) \
+ (((x) >> S_FW_IQ_CMD_FL1FETCHNS) & M_FW_IQ_CMD_FL1FETCHNS)
+#define F_FW_IQ_CMD_FL1FETCHNS V_FW_IQ_CMD_FL1FETCHNS(1U)
+
+#define S_FW_IQ_CMD_FL1FETCHRO 6
+#define M_FW_IQ_CMD_FL1FETCHRO 0x1
+#define V_FW_IQ_CMD_FL1FETCHRO(x) ((x) << S_FW_IQ_CMD_FL1FETCHRO)
+#define G_FW_IQ_CMD_FL1FETCHRO(x) \
+ (((x) >> S_FW_IQ_CMD_FL1FETCHRO) & M_FW_IQ_CMD_FL1FETCHRO)
+#define F_FW_IQ_CMD_FL1FETCHRO V_FW_IQ_CMD_FL1FETCHRO(1U)
+
+#define S_FW_IQ_CMD_FL1HOSTFCMODE 4
+#define M_FW_IQ_CMD_FL1HOSTFCMODE 0x3
+#define V_FW_IQ_CMD_FL1HOSTFCMODE(x) ((x) << S_FW_IQ_CMD_FL1HOSTFCMODE)
+#define G_FW_IQ_CMD_FL1HOSTFCMODE(x) \
+ (((x) >> S_FW_IQ_CMD_FL1HOSTFCMODE) & M_FW_IQ_CMD_FL1HOSTFCMODE)
+
+#define S_FW_IQ_CMD_FL1CPRIO 3
+#define M_FW_IQ_CMD_FL1CPRIO 0x1
+#define V_FW_IQ_CMD_FL1CPRIO(x) ((x) << S_FW_IQ_CMD_FL1CPRIO)
+#define G_FW_IQ_CMD_FL1CPRIO(x) \
+ (((x) >> S_FW_IQ_CMD_FL1CPRIO) & M_FW_IQ_CMD_FL1CPRIO)
+#define F_FW_IQ_CMD_FL1CPRIO V_FW_IQ_CMD_FL1CPRIO(1U)
+
+#define S_FW_IQ_CMD_FL1PADEN 2
+#define M_FW_IQ_CMD_FL1PADEN 0x1
+#define V_FW_IQ_CMD_FL1PADEN(x) ((x) << S_FW_IQ_CMD_FL1PADEN)
+#define G_FW_IQ_CMD_FL1PADEN(x) \
+ (((x) >> S_FW_IQ_CMD_FL1PADEN) & M_FW_IQ_CMD_FL1PADEN)
+#define F_FW_IQ_CMD_FL1PADEN V_FW_IQ_CMD_FL1PADEN(1U)
+
+#define S_FW_IQ_CMD_FL1PACKEN 1
+#define M_FW_IQ_CMD_FL1PACKEN 0x1
+#define V_FW_IQ_CMD_FL1PACKEN(x) ((x) << S_FW_IQ_CMD_FL1PACKEN)
+#define G_FW_IQ_CMD_FL1PACKEN(x) \
+ (((x) >> S_FW_IQ_CMD_FL1PACKEN) & M_FW_IQ_CMD_FL1PACKEN)
+#define F_FW_IQ_CMD_FL1PACKEN V_FW_IQ_CMD_FL1PACKEN(1U)
+
+#define S_FW_IQ_CMD_FL1CONGEN 0
+#define M_FW_IQ_CMD_FL1CONGEN 0x1
+#define V_FW_IQ_CMD_FL1CONGEN(x) ((x) << S_FW_IQ_CMD_FL1CONGEN)
+#define G_FW_IQ_CMD_FL1CONGEN(x) \
+ (((x) >> S_FW_IQ_CMD_FL1CONGEN) & M_FW_IQ_CMD_FL1CONGEN)
+#define F_FW_IQ_CMD_FL1CONGEN V_FW_IQ_CMD_FL1CONGEN(1U)
+
+#define S_FW_IQ_CMD_FL1DCAEN 15
+#define M_FW_IQ_CMD_FL1DCAEN 0x1
+#define V_FW_IQ_CMD_FL1DCAEN(x) ((x) << S_FW_IQ_CMD_FL1DCAEN)
+#define G_FW_IQ_CMD_FL1DCAEN(x) \
+ (((x) >> S_FW_IQ_CMD_FL1DCAEN) & M_FW_IQ_CMD_FL1DCAEN)
+#define F_FW_IQ_CMD_FL1DCAEN V_FW_IQ_CMD_FL1DCAEN(1U)
+
+#define S_FW_IQ_CMD_FL1DCACPU 10
+#define M_FW_IQ_CMD_FL1DCACPU 0x1f
+#define V_FW_IQ_CMD_FL1DCACPU(x) ((x) << S_FW_IQ_CMD_FL1DCACPU)
+#define G_FW_IQ_CMD_FL1DCACPU(x) \
+ (((x) >> S_FW_IQ_CMD_FL1DCACPU) & M_FW_IQ_CMD_FL1DCACPU)
+
+#define S_FW_IQ_CMD_FL1FBMIN 7
+#define M_FW_IQ_CMD_FL1FBMIN 0x7
+#define V_FW_IQ_CMD_FL1FBMIN(x) ((x) << S_FW_IQ_CMD_FL1FBMIN)
+#define G_FW_IQ_CMD_FL1FBMIN(x) \
+ (((x) >> S_FW_IQ_CMD_FL1FBMIN) & M_FW_IQ_CMD_FL1FBMIN)
+
+#define S_FW_IQ_CMD_FL1FBMAX 4
+#define M_FW_IQ_CMD_FL1FBMAX 0x7
+#define V_FW_IQ_CMD_FL1FBMAX(x) ((x) << S_FW_IQ_CMD_FL1FBMAX)
+#define G_FW_IQ_CMD_FL1FBMAX(x) \
+ (((x) >> S_FW_IQ_CMD_FL1FBMAX) & M_FW_IQ_CMD_FL1FBMAX)
+
+#define S_FW_IQ_CMD_FL1CIDXFTHRESHO 3
+#define M_FW_IQ_CMD_FL1CIDXFTHRESHO 0x1
+#define V_FW_IQ_CMD_FL1CIDXFTHRESHO(x) ((x) << S_FW_IQ_CMD_FL1CIDXFTHRESHO)
+#define G_FW_IQ_CMD_FL1CIDXFTHRESHO(x) \
+ (((x) >> S_FW_IQ_CMD_FL1CIDXFTHRESHO) & M_FW_IQ_CMD_FL1CIDXFTHRESHO)
+#define F_FW_IQ_CMD_FL1CIDXFTHRESHO V_FW_IQ_CMD_FL1CIDXFTHRESHO(1U)
+
+#define S_FW_IQ_CMD_FL1CIDXFTHRESH 0
+#define M_FW_IQ_CMD_FL1CIDXFTHRESH 0x7
+#define V_FW_IQ_CMD_FL1CIDXFTHRESH(x) ((x) << S_FW_IQ_CMD_FL1CIDXFTHRESH)
+#define G_FW_IQ_CMD_FL1CIDXFTHRESH(x) \
+ (((x) >> S_FW_IQ_CMD_FL1CIDXFTHRESH) & M_FW_IQ_CMD_FL1CIDXFTHRESH)
+
+struct fw_eq_mngt_cmd {
+ __be32 op_to_vfn;
+ __be32 alloc_to_len16;
+ __be32 cmpliqid_eqid;
+ __be32 physeqid_pkd;
+ __be32 fetchszm_to_iqid;
+ __be32 dcaen_to_eqsize;
+ __be64 eqaddr;
+};
+
+#define S_FW_EQ_MNGT_CMD_PFN 8
+#define M_FW_EQ_MNGT_CMD_PFN 0x7
+#define V_FW_EQ_MNGT_CMD_PFN(x) ((x) << S_FW_EQ_MNGT_CMD_PFN)
+#define G_FW_EQ_MNGT_CMD_PFN(x) \
+ (((x) >> S_FW_EQ_MNGT_CMD_PFN) & M_FW_EQ_MNGT_CMD_PFN)
+
+#define S_FW_EQ_MNGT_CMD_VFN 0
+#define M_FW_EQ_MNGT_CMD_VFN 0xff
+#define V_FW_EQ_MNGT_CMD_VFN(x) ((x) << S_FW_EQ_MNGT_CMD_VFN)
+#define G_FW_EQ_MNGT_CMD_VFN(x) \
+ (((x) >> S_FW_EQ_MNGT_CMD_VFN) & M_FW_EQ_MNGT_CMD_VFN)
+
+#define S_FW_EQ_MNGT_CMD_ALLOC 31
+#define M_FW_EQ_MNGT_CMD_ALLOC 0x1
+#define V_FW_EQ_MNGT_CMD_ALLOC(x) ((x) << S_FW_EQ_MNGT_CMD_ALLOC)
+#define G_FW_EQ_MNGT_CMD_ALLOC(x) \
+ (((x) >> S_FW_EQ_MNGT_CMD_ALLOC) & M_FW_EQ_MNGT_CMD_ALLOC)
+#define F_FW_EQ_MNGT_CMD_ALLOC V_FW_EQ_MNGT_CMD_ALLOC(1U)
+
+#define S_FW_EQ_MNGT_CMD_FREE 30
+#define M_FW_EQ_MNGT_CMD_FREE 0x1
+#define V_FW_EQ_MNGT_CMD_FREE(x) ((x) << S_FW_EQ_MNGT_CMD_FREE)
+#define G_FW_EQ_MNGT_CMD_FREE(x) \
+ (((x) >> S_FW_EQ_MNGT_CMD_FREE) & M_FW_EQ_MNGT_CMD_FREE)
+#define F_FW_EQ_MNGT_CMD_FREE V_FW_EQ_MNGT_CMD_FREE(1U)
+
+#define S_FW_EQ_MNGT_CMD_MODIFY 29
+#define M_FW_EQ_MNGT_CMD_MODIFY 0x1
+#define V_FW_EQ_MNGT_CMD_MODIFY(x) ((x) << S_FW_EQ_MNGT_CMD_MODIFY)
+#define G_FW_EQ_MNGT_CMD_MODIFY(x) \
+ (((x) >> S_FW_EQ_MNGT_CMD_MODIFY) & M_FW_EQ_MNGT_CMD_MODIFY)
+#define F_FW_EQ_MNGT_CMD_MODIFY V_FW_EQ_MNGT_CMD_MODIFY(1U)
+
+#define S_FW_EQ_MNGT_CMD_EQSTART 28
+#define M_FW_EQ_MNGT_CMD_EQSTART 0x1
+#define V_FW_EQ_MNGT_CMD_EQSTART(x) ((x) << S_FW_EQ_MNGT_CMD_EQSTART)
+#define G_FW_EQ_MNGT_CMD_EQSTART(x) \
+ (((x) >> S_FW_EQ_MNGT_CMD_EQSTART) & M_FW_EQ_MNGT_CMD_EQSTART)
+#define F_FW_EQ_MNGT_CMD_EQSTART V_FW_EQ_MNGT_CMD_EQSTART(1U)
+
+#define S_FW_EQ_MNGT_CMD_EQSTOP 27
+#define M_FW_EQ_MNGT_CMD_EQSTOP 0x1
+#define V_FW_EQ_MNGT_CMD_EQSTOP(x) ((x) << S_FW_EQ_MNGT_CMD_EQSTOP)
+#define G_FW_EQ_MNGT_CMD_EQSTOP(x) \
+ (((x) >> S_FW_EQ_MNGT_CMD_EQSTOP) & M_FW_EQ_MNGT_CMD_EQSTOP)
+#define F_FW_EQ_MNGT_CMD_EQSTOP V_FW_EQ_MNGT_CMD_EQSTOP(1U)
+
+#define S_FW_EQ_MNGT_CMD_CMPLIQID 20
+#define M_FW_EQ_MNGT_CMD_CMPLIQID 0xfff
+#define V_FW_EQ_MNGT_CMD_CMPLIQID(x) ((x) << S_FW_EQ_MNGT_CMD_CMPLIQID)
+#define G_FW_EQ_MNGT_CMD_CMPLIQID(x) \
+ (((x) >> S_FW_EQ_MNGT_CMD_CMPLIQID) & M_FW_EQ_MNGT_CMD_CMPLIQID)
+
+#define S_FW_EQ_MNGT_CMD_EQID 0
+#define M_FW_EQ_MNGT_CMD_EQID 0xfffff
+#define V_FW_EQ_MNGT_CMD_EQID(x) ((x) << S_FW_EQ_MNGT_CMD_EQID)
+#define G_FW_EQ_MNGT_CMD_EQID(x) \
+ (((x) >> S_FW_EQ_MNGT_CMD_EQID) & M_FW_EQ_MNGT_CMD_EQID)
+
+#define S_FW_EQ_MNGT_CMD_PHYSEQID 0
+#define M_FW_EQ_MNGT_CMD_PHYSEQID 0xfffff
+#define V_FW_EQ_MNGT_CMD_PHYSEQID(x) ((x) << S_FW_EQ_MNGT_CMD_PHYSEQID)
+#define G_FW_EQ_MNGT_CMD_PHYSEQID(x) \
+ (((x) >> S_FW_EQ_MNGT_CMD_PHYSEQID) & M_FW_EQ_MNGT_CMD_PHYSEQID)
+
+#define S_FW_EQ_MNGT_CMD_FETCHSZM 26
+#define M_FW_EQ_MNGT_CMD_FETCHSZM 0x1
+#define V_FW_EQ_MNGT_CMD_FETCHSZM(x) ((x) << S_FW_EQ_MNGT_CMD_FETCHSZM)
+#define G_FW_EQ_MNGT_CMD_FETCHSZM(x) \
+ (((x) >> S_FW_EQ_MNGT_CMD_FETCHSZM) & M_FW_EQ_MNGT_CMD_FETCHSZM)
+#define F_FW_EQ_MNGT_CMD_FETCHSZM V_FW_EQ_MNGT_CMD_FETCHSZM(1U)
+
+#define S_FW_EQ_MNGT_CMD_STATUSPGNS 25
+#define M_FW_EQ_MNGT_CMD_STATUSPGNS 0x1
+#define V_FW_EQ_MNGT_CMD_STATUSPGNS(x) ((x) << S_FW_EQ_MNGT_CMD_STATUSPGNS)
+#define G_FW_EQ_MNGT_CMD_STATUSPGNS(x) \
+ (((x) >> S_FW_EQ_MNGT_CMD_STATUSPGNS) & M_FW_EQ_MNGT_CMD_STATUSPGNS)
+#define F_FW_EQ_MNGT_CMD_STATUSPGNS V_FW_EQ_MNGT_CMD_STATUSPGNS(1U)
+
+#define S_FW_EQ_MNGT_CMD_STATUSPGRO 24
+#define M_FW_EQ_MNGT_CMD_STATUSPGRO 0x1
+#define V_FW_EQ_MNGT_CMD_STATUSPGRO(x) ((x) << S_FW_EQ_MNGT_CMD_STATUSPGRO)
+#define G_FW_EQ_MNGT_CMD_STATUSPGRO(x) \
+ (((x) >> S_FW_EQ_MNGT_CMD_STATUSPGRO) & M_FW_EQ_MNGT_CMD_STATUSPGRO)
+#define F_FW_EQ_MNGT_CMD_STATUSPGRO V_FW_EQ_MNGT_CMD_STATUSPGRO(1U)
+
+#define S_FW_EQ_MNGT_CMD_FETCHNS 23
+#define M_FW_EQ_MNGT_CMD_FETCHNS 0x1
+#define V_FW_EQ_MNGT_CMD_FETCHNS(x) ((x) << S_FW_EQ_MNGT_CMD_FETCHNS)
+#define G_FW_EQ_MNGT_CMD_FETCHNS(x) \
+ (((x) >> S_FW_EQ_MNGT_CMD_FETCHNS) & M_FW_EQ_MNGT_CMD_FETCHNS)
+#define F_FW_EQ_MNGT_CMD_FETCHNS V_FW_EQ_MNGT_CMD_FETCHNS(1U)
+
+#define S_FW_EQ_MNGT_CMD_FETCHRO 22
+#define M_FW_EQ_MNGT_CMD_FETCHRO 0x1
+#define V_FW_EQ_MNGT_CMD_FETCHRO(x) ((x) << S_FW_EQ_MNGT_CMD_FETCHRO)
+#define G_FW_EQ_MNGT_CMD_FETCHRO(x) \
+ (((x) >> S_FW_EQ_MNGT_CMD_FETCHRO) & M_FW_EQ_MNGT_CMD_FETCHRO)
+#define F_FW_EQ_MNGT_CMD_FETCHRO V_FW_EQ_MNGT_CMD_FETCHRO(1U)
+
+#define S_FW_EQ_MNGT_CMD_HOSTFCMODE 20
+#define M_FW_EQ_MNGT_CMD_HOSTFCMODE 0x3
+#define V_FW_EQ_MNGT_CMD_HOSTFCMODE(x) ((x) << S_FW_EQ_MNGT_CMD_HOSTFCMODE)
+#define G_FW_EQ_MNGT_CMD_HOSTFCMODE(x) \
+ (((x) >> S_FW_EQ_MNGT_CMD_HOSTFCMODE) & M_FW_EQ_MNGT_CMD_HOSTFCMODE)
+
+#define S_FW_EQ_MNGT_CMD_CPRIO 19
+#define M_FW_EQ_MNGT_CMD_CPRIO 0x1
+#define V_FW_EQ_MNGT_CMD_CPRIO(x) ((x) << S_FW_EQ_MNGT_CMD_CPRIO)
+#define G_FW_EQ_MNGT_CMD_CPRIO(x) \
+ (((x) >> S_FW_EQ_MNGT_CMD_CPRIO) & M_FW_EQ_MNGT_CMD_CPRIO)
+#define F_FW_EQ_MNGT_CMD_CPRIO V_FW_EQ_MNGT_CMD_CPRIO(1U)
+
+#define S_FW_EQ_MNGT_CMD_ONCHIP 18
+#define M_FW_EQ_MNGT_CMD_ONCHIP 0x1
+#define V_FW_EQ_MNGT_CMD_ONCHIP(x) ((x) << S_FW_EQ_MNGT_CMD_ONCHIP)
+#define G_FW_EQ_MNGT_CMD_ONCHIP(x) \
+ (((x) >> S_FW_EQ_MNGT_CMD_ONCHIP) & M_FW_EQ_MNGT_CMD_ONCHIP)
+#define F_FW_EQ_MNGT_CMD_ONCHIP V_FW_EQ_MNGT_CMD_ONCHIP(1U)
+
+#define S_FW_EQ_MNGT_CMD_PCIECHN 16
+#define M_FW_EQ_MNGT_CMD_PCIECHN 0x3
+#define V_FW_EQ_MNGT_CMD_PCIECHN(x) ((x) << S_FW_EQ_MNGT_CMD_PCIECHN)
+#define G_FW_EQ_MNGT_CMD_PCIECHN(x) \
+ (((x) >> S_FW_EQ_MNGT_CMD_PCIECHN) & M_FW_EQ_MNGT_CMD_PCIECHN)
+
+#define S_FW_EQ_MNGT_CMD_IQID 0
+#define M_FW_EQ_MNGT_CMD_IQID 0xffff
+#define V_FW_EQ_MNGT_CMD_IQID(x) ((x) << S_FW_EQ_MNGT_CMD_IQID)
+#define G_FW_EQ_MNGT_CMD_IQID(x) \
+ (((x) >> S_FW_EQ_MNGT_CMD_IQID) & M_FW_EQ_MNGT_CMD_IQID)
+
+#define S_FW_EQ_MNGT_CMD_DCAEN 31
+#define M_FW_EQ_MNGT_CMD_DCAEN 0x1
+#define V_FW_EQ_MNGT_CMD_DCAEN(x) ((x) << S_FW_EQ_MNGT_CMD_DCAEN)
+#define G_FW_EQ_MNGT_CMD_DCAEN(x) \
+ (((x) >> S_FW_EQ_MNGT_CMD_DCAEN) & M_FW_EQ_MNGT_CMD_DCAEN)
+#define F_FW_EQ_MNGT_CMD_DCAEN V_FW_EQ_MNGT_CMD_DCAEN(1U)
+
+#define S_FW_EQ_MNGT_CMD_DCACPU 26
+#define M_FW_EQ_MNGT_CMD_DCACPU 0x1f
+#define V_FW_EQ_MNGT_CMD_DCACPU(x) ((x) << S_FW_EQ_MNGT_CMD_DCACPU)
+#define G_FW_EQ_MNGT_CMD_DCACPU(x) \
+ (((x) >> S_FW_EQ_MNGT_CMD_DCACPU) & M_FW_EQ_MNGT_CMD_DCACPU)
+
+#define S_FW_EQ_MNGT_CMD_FBMIN 23
+#define M_FW_EQ_MNGT_CMD_FBMIN 0x7
+#define V_FW_EQ_MNGT_CMD_FBMIN(x) ((x) << S_FW_EQ_MNGT_CMD_FBMIN)
+#define G_FW_EQ_MNGT_CMD_FBMIN(x) \
+ (((x) >> S_FW_EQ_MNGT_CMD_FBMIN) & M_FW_EQ_MNGT_CMD_FBMIN)
+
+#define S_FW_EQ_MNGT_CMD_FBMAX 20
+#define M_FW_EQ_MNGT_CMD_FBMAX 0x7
+#define V_FW_EQ_MNGT_CMD_FBMAX(x) ((x) << S_FW_EQ_MNGT_CMD_FBMAX)
+#define G_FW_EQ_MNGT_CMD_FBMAX(x) \
+ (((x) >> S_FW_EQ_MNGT_CMD_FBMAX) & M_FW_EQ_MNGT_CMD_FBMAX)
+
+#define S_FW_EQ_MNGT_CMD_CIDXFTHRESHO 19
+#define M_FW_EQ_MNGT_CMD_CIDXFTHRESHO 0x1
+#define V_FW_EQ_MNGT_CMD_CIDXFTHRESHO(x) \
+ ((x) << S_FW_EQ_MNGT_CMD_CIDXFTHRESHO)
+#define G_FW_EQ_MNGT_CMD_CIDXFTHRESHO(x) \
+ (((x) >> S_FW_EQ_MNGT_CMD_CIDXFTHRESHO) & M_FW_EQ_MNGT_CMD_CIDXFTHRESHO)
+#define F_FW_EQ_MNGT_CMD_CIDXFTHRESHO V_FW_EQ_MNGT_CMD_CIDXFTHRESHO(1U)
+
+#define S_FW_EQ_MNGT_CMD_CIDXFTHRESH 16
+#define M_FW_EQ_MNGT_CMD_CIDXFTHRESH 0x7
+#define V_FW_EQ_MNGT_CMD_CIDXFTHRESH(x) ((x) << S_FW_EQ_MNGT_CMD_CIDXFTHRESH)
+#define G_FW_EQ_MNGT_CMD_CIDXFTHRESH(x) \
+ (((x) >> S_FW_EQ_MNGT_CMD_CIDXFTHRESH) & M_FW_EQ_MNGT_CMD_CIDXFTHRESH)
+
+#define S_FW_EQ_MNGT_CMD_EQSIZE 0
+#define M_FW_EQ_MNGT_CMD_EQSIZE 0xffff
+#define V_FW_EQ_MNGT_CMD_EQSIZE(x) ((x) << S_FW_EQ_MNGT_CMD_EQSIZE)
+#define G_FW_EQ_MNGT_CMD_EQSIZE(x) \
+ (((x) >> S_FW_EQ_MNGT_CMD_EQSIZE) & M_FW_EQ_MNGT_CMD_EQSIZE)
+
+struct fw_eq_eth_cmd {
+ __be32 op_to_vfn;
+ __be32 alloc_to_len16;
+ __be32 eqid_pkd;
+ __be32 physeqid_pkd;
+ __be32 fetchszm_to_iqid;
+ __be32 dcaen_to_eqsize;
+ __be64 eqaddr;
+ __be32 viid_pkd;
+ __be32 r8_lo;
+ __be64 r9;
+};
+
+#define S_FW_EQ_ETH_CMD_PFN 8
+#define M_FW_EQ_ETH_CMD_PFN 0x7
+#define V_FW_EQ_ETH_CMD_PFN(x) ((x) << S_FW_EQ_ETH_CMD_PFN)
+#define G_FW_EQ_ETH_CMD_PFN(x) \
+ (((x) >> S_FW_EQ_ETH_CMD_PFN) & M_FW_EQ_ETH_CMD_PFN)
+
+#define S_FW_EQ_ETH_CMD_VFN 0
+#define M_FW_EQ_ETH_CMD_VFN 0xff
+#define V_FW_EQ_ETH_CMD_VFN(x) ((x) << S_FW_EQ_ETH_CMD_VFN)
+#define G_FW_EQ_ETH_CMD_VFN(x) \
+ (((x) >> S_FW_EQ_ETH_CMD_VFN) & M_FW_EQ_ETH_CMD_VFN)
+
+#define S_FW_EQ_ETH_CMD_ALLOC 31
+#define M_FW_EQ_ETH_CMD_ALLOC 0x1
+#define V_FW_EQ_ETH_CMD_ALLOC(x) ((x) << S_FW_EQ_ETH_CMD_ALLOC)
+#define G_FW_EQ_ETH_CMD_ALLOC(x) \
+ (((x) >> S_FW_EQ_ETH_CMD_ALLOC) & M_FW_EQ_ETH_CMD_ALLOC)
+#define F_FW_EQ_ETH_CMD_ALLOC V_FW_EQ_ETH_CMD_ALLOC(1U)
+
+#define S_FW_EQ_ETH_CMD_FREE 30
+#define M_FW_EQ_ETH_CMD_FREE 0x1
+#define V_FW_EQ_ETH_CMD_FREE(x) ((x) << S_FW_EQ_ETH_CMD_FREE)
+#define G_FW_EQ_ETH_CMD_FREE(x) \
+ (((x) >> S_FW_EQ_ETH_CMD_FREE) & M_FW_EQ_ETH_CMD_FREE)
+#define F_FW_EQ_ETH_CMD_FREE V_FW_EQ_ETH_CMD_FREE(1U)
+
+#define S_FW_EQ_ETH_CMD_MODIFY 29
+#define M_FW_EQ_ETH_CMD_MODIFY 0x1
+#define V_FW_EQ_ETH_CMD_MODIFY(x) ((x) << S_FW_EQ_ETH_CMD_MODIFY)
+#define G_FW_EQ_ETH_CMD_MODIFY(x) \
+ (((x) >> S_FW_EQ_ETH_CMD_MODIFY) & M_FW_EQ_ETH_CMD_MODIFY)
+#define F_FW_EQ_ETH_CMD_MODIFY V_FW_EQ_ETH_CMD_MODIFY(1U)
+
+#define S_FW_EQ_ETH_CMD_EQSTART 28
+#define M_FW_EQ_ETH_CMD_EQSTART 0x1
+#define V_FW_EQ_ETH_CMD_EQSTART(x) ((x) << S_FW_EQ_ETH_CMD_EQSTART)
+#define G_FW_EQ_ETH_CMD_EQSTART(x) \
+ (((x) >> S_FW_EQ_ETH_CMD_EQSTART) & M_FW_EQ_ETH_CMD_EQSTART)
+#define F_FW_EQ_ETH_CMD_EQSTART V_FW_EQ_ETH_CMD_EQSTART(1U)
+
+#define S_FW_EQ_ETH_CMD_EQSTOP 27
+#define M_FW_EQ_ETH_CMD_EQSTOP 0x1
+#define V_FW_EQ_ETH_CMD_EQSTOP(x) ((x) << S_FW_EQ_ETH_CMD_EQSTOP)
+#define G_FW_EQ_ETH_CMD_EQSTOP(x) \
+ (((x) >> S_FW_EQ_ETH_CMD_EQSTOP) & M_FW_EQ_ETH_CMD_EQSTOP)
+#define F_FW_EQ_ETH_CMD_EQSTOP V_FW_EQ_ETH_CMD_EQSTOP(1U)
+
+#define S_FW_EQ_ETH_CMD_EQID 0
+#define M_FW_EQ_ETH_CMD_EQID 0xfffff
+#define V_FW_EQ_ETH_CMD_EQID(x) ((x) << S_FW_EQ_ETH_CMD_EQID)
+#define G_FW_EQ_ETH_CMD_EQID(x) \
+ (((x) >> S_FW_EQ_ETH_CMD_EQID) & M_FW_EQ_ETH_CMD_EQID)
+
+#define S_FW_EQ_ETH_CMD_PHYSEQID 0
+#define M_FW_EQ_ETH_CMD_PHYSEQID 0xfffff
+#define V_FW_EQ_ETH_CMD_PHYSEQID(x) ((x) << S_FW_EQ_ETH_CMD_PHYSEQID)
+#define G_FW_EQ_ETH_CMD_PHYSEQID(x) \
+ (((x) >> S_FW_EQ_ETH_CMD_PHYSEQID) & M_FW_EQ_ETH_CMD_PHYSEQID)
+
+#define S_FW_EQ_ETH_CMD_FETCHSZM 26
+#define M_FW_EQ_ETH_CMD_FETCHSZM 0x1
+#define V_FW_EQ_ETH_CMD_FETCHSZM(x) ((x) << S_FW_EQ_ETH_CMD_FETCHSZM)
+#define G_FW_EQ_ETH_CMD_FETCHSZM(x) \
+ (((x) >> S_FW_EQ_ETH_CMD_FETCHSZM) & M_FW_EQ_ETH_CMD_FETCHSZM)
+#define F_FW_EQ_ETH_CMD_FETCHSZM V_FW_EQ_ETH_CMD_FETCHSZM(1U)
+
+#define S_FW_EQ_ETH_CMD_STATUSPGNS 25
+#define M_FW_EQ_ETH_CMD_STATUSPGNS 0x1
+#define V_FW_EQ_ETH_CMD_STATUSPGNS(x) ((x) << S_FW_EQ_ETH_CMD_STATUSPGNS)
+#define G_FW_EQ_ETH_CMD_STATUSPGNS(x) \
+ (((x) >> S_FW_EQ_ETH_CMD_STATUSPGNS) & M_FW_EQ_ETH_CMD_STATUSPGNS)
+#define F_FW_EQ_ETH_CMD_STATUSPGNS V_FW_EQ_ETH_CMD_STATUSPGNS(1U)
+
+#define S_FW_EQ_ETH_CMD_STATUSPGRO 24
+#define M_FW_EQ_ETH_CMD_STATUSPGRO 0x1
+#define V_FW_EQ_ETH_CMD_STATUSPGRO(x) ((x) << S_FW_EQ_ETH_CMD_STATUSPGRO)
+#define G_FW_EQ_ETH_CMD_STATUSPGRO(x) \
+ (((x) >> S_FW_EQ_ETH_CMD_STATUSPGRO) & M_FW_EQ_ETH_CMD_STATUSPGRO)
+#define F_FW_EQ_ETH_CMD_STATUSPGRO V_FW_EQ_ETH_CMD_STATUSPGRO(1U)
+
+#define S_FW_EQ_ETH_CMD_FETCHNS 23
+#define M_FW_EQ_ETH_CMD_FETCHNS 0x1
+#define V_FW_EQ_ETH_CMD_FETCHNS(x) ((x) << S_FW_EQ_ETH_CMD_FETCHNS)
+#define G_FW_EQ_ETH_CMD_FETCHNS(x) \
+ (((x) >> S_FW_EQ_ETH_CMD_FETCHNS) & M_FW_EQ_ETH_CMD_FETCHNS)
+#define F_FW_EQ_ETH_CMD_FETCHNS V_FW_EQ_ETH_CMD_FETCHNS(1U)
+
+#define S_FW_EQ_ETH_CMD_FETCHRO 22
+#define M_FW_EQ_ETH_CMD_FETCHRO 0x1
+#define V_FW_EQ_ETH_CMD_FETCHRO(x) ((x) << S_FW_EQ_ETH_CMD_FETCHRO)
+#define G_FW_EQ_ETH_CMD_FETCHRO(x) \
+ (((x) >> S_FW_EQ_ETH_CMD_FETCHRO) & M_FW_EQ_ETH_CMD_FETCHRO)
+#define F_FW_EQ_ETH_CMD_FETCHRO V_FW_EQ_ETH_CMD_FETCHRO(1U)
+
+#define S_FW_EQ_ETH_CMD_HOSTFCMODE 20
+#define M_FW_EQ_ETH_CMD_HOSTFCMODE 0x3
+#define V_FW_EQ_ETH_CMD_HOSTFCMODE(x) ((x) << S_FW_EQ_ETH_CMD_HOSTFCMODE)
+#define G_FW_EQ_ETH_CMD_HOSTFCMODE(x) \
+ (((x) >> S_FW_EQ_ETH_CMD_HOSTFCMODE) & M_FW_EQ_ETH_CMD_HOSTFCMODE)
+
+#define S_FW_EQ_ETH_CMD_CPRIO 19
+#define M_FW_EQ_ETH_CMD_CPRIO 0x1
+#define V_FW_EQ_ETH_CMD_CPRIO(x) ((x) << S_FW_EQ_ETH_CMD_CPRIO)
+#define G_FW_EQ_ETH_CMD_CPRIO(x) \
+ (((x) >> S_FW_EQ_ETH_CMD_CPRIO) & M_FW_EQ_ETH_CMD_CPRIO)
+#define F_FW_EQ_ETH_CMD_CPRIO V_FW_EQ_ETH_CMD_CPRIO(1U)
+
+#define S_FW_EQ_ETH_CMD_ONCHIP 18
+#define M_FW_EQ_ETH_CMD_ONCHIP 0x1
+#define V_FW_EQ_ETH_CMD_ONCHIP(x) ((x) << S_FW_EQ_ETH_CMD_ONCHIP)
+#define G_FW_EQ_ETH_CMD_ONCHIP(x) \
+ (((x) >> S_FW_EQ_ETH_CMD_ONCHIP) & M_FW_EQ_ETH_CMD_ONCHIP)
+#define F_FW_EQ_ETH_CMD_ONCHIP V_FW_EQ_ETH_CMD_ONCHIP(1U)
+
+#define S_FW_EQ_ETH_CMD_PCIECHN 16
+#define M_FW_EQ_ETH_CMD_PCIECHN 0x3
+#define V_FW_EQ_ETH_CMD_PCIECHN(x) ((x) << S_FW_EQ_ETH_CMD_PCIECHN)
+#define G_FW_EQ_ETH_CMD_PCIECHN(x) \
+ (((x) >> S_FW_EQ_ETH_CMD_PCIECHN) & M_FW_EQ_ETH_CMD_PCIECHN)
+
+#define S_FW_EQ_ETH_CMD_IQID 0
+#define M_FW_EQ_ETH_CMD_IQID 0xffff
+#define V_FW_EQ_ETH_CMD_IQID(x) ((x) << S_FW_EQ_ETH_CMD_IQID)
+#define G_FW_EQ_ETH_CMD_IQID(x) \
+ (((x) >> S_FW_EQ_ETH_CMD_IQID) & M_FW_EQ_ETH_CMD_IQID)
+
+#define S_FW_EQ_ETH_CMD_DCAEN 31
+#define M_FW_EQ_ETH_CMD_DCAEN 0x1
+#define V_FW_EQ_ETH_CMD_DCAEN(x) ((x) << S_FW_EQ_ETH_CMD_DCAEN)
+#define G_FW_EQ_ETH_CMD_DCAEN(x) \
+ (((x) >> S_FW_EQ_ETH_CMD_DCAEN) & M_FW_EQ_ETH_CMD_DCAEN)
+#define F_FW_EQ_ETH_CMD_DCAEN V_FW_EQ_ETH_CMD_DCAEN(1U)
+
+#define S_FW_EQ_ETH_CMD_DCACPU 26
+#define M_FW_EQ_ETH_CMD_DCACPU 0x1f
+#define V_FW_EQ_ETH_CMD_DCACPU(x) ((x) << S_FW_EQ_ETH_CMD_DCACPU)
+#define G_FW_EQ_ETH_CMD_DCACPU(x) \
+ (((x) >> S_FW_EQ_ETH_CMD_DCACPU) & M_FW_EQ_ETH_CMD_DCACPU)
+
+#define S_FW_EQ_ETH_CMD_FBMIN 23
+#define M_FW_EQ_ETH_CMD_FBMIN 0x7
+#define V_FW_EQ_ETH_CMD_FBMIN(x) ((x) << S_FW_EQ_ETH_CMD_FBMIN)
+#define G_FW_EQ_ETH_CMD_FBMIN(x) \
+ (((x) >> S_FW_EQ_ETH_CMD_FBMIN) & M_FW_EQ_ETH_CMD_FBMIN)
+
+#define S_FW_EQ_ETH_CMD_FBMAX 20
+#define M_FW_EQ_ETH_CMD_FBMAX 0x7
+#define V_FW_EQ_ETH_CMD_FBMAX(x) ((x) << S_FW_EQ_ETH_CMD_FBMAX)
+#define G_FW_EQ_ETH_CMD_FBMAX(x) \
+ (((x) >> S_FW_EQ_ETH_CMD_FBMAX) & M_FW_EQ_ETH_CMD_FBMAX)
+
+#define S_FW_EQ_ETH_CMD_CIDXFTHRESHO 19
+#define M_FW_EQ_ETH_CMD_CIDXFTHRESHO 0x1
+#define V_FW_EQ_ETH_CMD_CIDXFTHRESHO(x) ((x) << S_FW_EQ_ETH_CMD_CIDXFTHRESHO)
+#define G_FW_EQ_ETH_CMD_CIDXFTHRESHO(x) \
+ (((x) >> S_FW_EQ_ETH_CMD_CIDXFTHRESHO) & M_FW_EQ_ETH_CMD_CIDXFTHRESHO)
+#define F_FW_EQ_ETH_CMD_CIDXFTHRESHO V_FW_EQ_ETH_CMD_CIDXFTHRESHO(1U)
+
+#define S_FW_EQ_ETH_CMD_CIDXFTHRESH 16
+#define M_FW_EQ_ETH_CMD_CIDXFTHRESH 0x7
+#define V_FW_EQ_ETH_CMD_CIDXFTHRESH(x) ((x) << S_FW_EQ_ETH_CMD_CIDXFTHRESH)
+#define G_FW_EQ_ETH_CMD_CIDXFTHRESH(x) \
+ (((x) >> S_FW_EQ_ETH_CMD_CIDXFTHRESH) & M_FW_EQ_ETH_CMD_CIDXFTHRESH)
+
+#define S_FW_EQ_ETH_CMD_EQSIZE 0
+#define M_FW_EQ_ETH_CMD_EQSIZE 0xffff
+#define V_FW_EQ_ETH_CMD_EQSIZE(x) ((x) << S_FW_EQ_ETH_CMD_EQSIZE)
+#define G_FW_EQ_ETH_CMD_EQSIZE(x) \
+ (((x) >> S_FW_EQ_ETH_CMD_EQSIZE) & M_FW_EQ_ETH_CMD_EQSIZE)
+
+#define S_FW_EQ_ETH_CMD_VIID 16
+#define M_FW_EQ_ETH_CMD_VIID 0xfff
+#define V_FW_EQ_ETH_CMD_VIID(x) ((x) << S_FW_EQ_ETH_CMD_VIID)
+#define G_FW_EQ_ETH_CMD_VIID(x) \
+ (((x) >> S_FW_EQ_ETH_CMD_VIID) & M_FW_EQ_ETH_CMD_VIID)
+
+struct fw_eq_ctrl_cmd {
+ __be32 op_to_vfn;
+ __be32 alloc_to_len16;
+ __be32 cmpliqid_eqid;
+ __be32 physeqid_pkd;
+ __be32 fetchszm_to_iqid;
+ __be32 dcaen_to_eqsize;
+ __be64 eqaddr;
+};
+
+#define S_FW_EQ_CTRL_CMD_PFN 8
+#define M_FW_EQ_CTRL_CMD_PFN 0x7
+#define V_FW_EQ_CTRL_CMD_PFN(x) ((x) << S_FW_EQ_CTRL_CMD_PFN)
+#define G_FW_EQ_CTRL_CMD_PFN(x) \
+ (((x) >> S_FW_EQ_CTRL_CMD_PFN) & M_FW_EQ_CTRL_CMD_PFN)
+
+#define S_FW_EQ_CTRL_CMD_VFN 0
+#define M_FW_EQ_CTRL_CMD_VFN 0xff
+#define V_FW_EQ_CTRL_CMD_VFN(x) ((x) << S_FW_EQ_CTRL_CMD_VFN)
+#define G_FW_EQ_CTRL_CMD_VFN(x) \
+ (((x) >> S_FW_EQ_CTRL_CMD_VFN) & M_FW_EQ_CTRL_CMD_VFN)
+
+#define S_FW_EQ_CTRL_CMD_ALLOC 31
+#define M_FW_EQ_CTRL_CMD_ALLOC 0x1
+#define V_FW_EQ_CTRL_CMD_ALLOC(x) ((x) << S_FW_EQ_CTRL_CMD_ALLOC)
+#define G_FW_EQ_CTRL_CMD_ALLOC(x) \
+ (((x) >> S_FW_EQ_CTRL_CMD_ALLOC) & M_FW_EQ_CTRL_CMD_ALLOC)
+#define F_FW_EQ_CTRL_CMD_ALLOC V_FW_EQ_CTRL_CMD_ALLOC(1U)
+
+#define S_FW_EQ_CTRL_CMD_FREE 30
+#define M_FW_EQ_CTRL_CMD_FREE 0x1
+#define V_FW_EQ_CTRL_CMD_FREE(x) ((x) << S_FW_EQ_CTRL_CMD_FREE)
+#define G_FW_EQ_CTRL_CMD_FREE(x) \
+ (((x) >> S_FW_EQ_CTRL_CMD_FREE) & M_FW_EQ_CTRL_CMD_FREE)
+#define F_FW_EQ_CTRL_CMD_FREE V_FW_EQ_CTRL_CMD_FREE(1U)
+
+#define S_FW_EQ_CTRL_CMD_MODIFY 29
+#define M_FW_EQ_CTRL_CMD_MODIFY 0x1
+#define V_FW_EQ_CTRL_CMD_MODIFY(x) ((x) << S_FW_EQ_CTRL_CMD_MODIFY)
+#define G_FW_EQ_CTRL_CMD_MODIFY(x) \
+ (((x) >> S_FW_EQ_CTRL_CMD_MODIFY) & M_FW_EQ_CTRL_CMD_MODIFY)
+#define F_FW_EQ_CTRL_CMD_MODIFY V_FW_EQ_CTRL_CMD_MODIFY(1U)
+
+#define S_FW_EQ_CTRL_CMD_EQSTART 28
+#define M_FW_EQ_CTRL_CMD_EQSTART 0x1
+#define V_FW_EQ_CTRL_CMD_EQSTART(x) ((x) << S_FW_EQ_CTRL_CMD_EQSTART)
+#define G_FW_EQ_CTRL_CMD_EQSTART(x) \
+ (((x) >> S_FW_EQ_CTRL_CMD_EQSTART) & M_FW_EQ_CTRL_CMD_EQSTART)
+#define F_FW_EQ_CTRL_CMD_EQSTART V_FW_EQ_CTRL_CMD_EQSTART(1U)
+
+#define S_FW_EQ_CTRL_CMD_EQSTOP 27
+#define M_FW_EQ_CTRL_CMD_EQSTOP 0x1
+#define V_FW_EQ_CTRL_CMD_EQSTOP(x) ((x) << S_FW_EQ_CTRL_CMD_EQSTOP)
+#define G_FW_EQ_CTRL_CMD_EQSTOP(x) \
+ (((x) >> S_FW_EQ_CTRL_CMD_EQSTOP) & M_FW_EQ_CTRL_CMD_EQSTOP)
+#define F_FW_EQ_CTRL_CMD_EQSTOP V_FW_EQ_CTRL_CMD_EQSTOP(1U)
+
+#define S_FW_EQ_CTRL_CMD_CMPLIQID 20
+#define M_FW_EQ_CTRL_CMD_CMPLIQID 0xfff
+#define V_FW_EQ_CTRL_CMD_CMPLIQID(x) ((x) << S_FW_EQ_CTRL_CMD_CMPLIQID)
+#define G_FW_EQ_CTRL_CMD_CMPLIQID(x) \
+ (((x) >> S_FW_EQ_CTRL_CMD_CMPLIQID) & M_FW_EQ_CTRL_CMD_CMPLIQID)
+
+#define S_FW_EQ_CTRL_CMD_EQID 0
+#define M_FW_EQ_CTRL_CMD_EQID 0xfffff
+#define V_FW_EQ_CTRL_CMD_EQID(x) ((x) << S_FW_EQ_CTRL_CMD_EQID)
+#define G_FW_EQ_CTRL_CMD_EQID(x) \
+ (((x) >> S_FW_EQ_CTRL_CMD_EQID) & M_FW_EQ_CTRL_CMD_EQID)
+
+#define S_FW_EQ_CTRL_CMD_PHYSEQID 0
+#define M_FW_EQ_CTRL_CMD_PHYSEQID 0xfffff
+#define V_FW_EQ_CTRL_CMD_PHYSEQID(x) ((x) << S_FW_EQ_CTRL_CMD_PHYSEQID)
+#define G_FW_EQ_CTRL_CMD_PHYSEQID(x) \
+ (((x) >> S_FW_EQ_CTRL_CMD_PHYSEQID) & M_FW_EQ_CTRL_CMD_PHYSEQID)
+
+#define S_FW_EQ_CTRL_CMD_FETCHSZM 26
+#define M_FW_EQ_CTRL_CMD_FETCHSZM 0x1
+#define V_FW_EQ_CTRL_CMD_FETCHSZM(x) ((x) << S_FW_EQ_CTRL_CMD_FETCHSZM)
+#define G_FW_EQ_CTRL_CMD_FETCHSZM(x) \
+ (((x) >> S_FW_EQ_CTRL_CMD_FETCHSZM) & M_FW_EQ_CTRL_CMD_FETCHSZM)
+#define F_FW_EQ_CTRL_CMD_FETCHSZM V_FW_EQ_CTRL_CMD_FETCHSZM(1U)
+
+#define S_FW_EQ_CTRL_CMD_STATUSPGNS 25
+#define M_FW_EQ_CTRL_CMD_STATUSPGNS 0x1
+#define V_FW_EQ_CTRL_CMD_STATUSPGNS(x) ((x) << S_FW_EQ_CTRL_CMD_STATUSPGNS)
+#define G_FW_EQ_CTRL_CMD_STATUSPGNS(x) \
+ (((x) >> S_FW_EQ_CTRL_CMD_STATUSPGNS) & M_FW_EQ_CTRL_CMD_STATUSPGNS)
+#define F_FW_EQ_CTRL_CMD_STATUSPGNS V_FW_EQ_CTRL_CMD_STATUSPGNS(1U)
+
+#define S_FW_EQ_CTRL_CMD_STATUSPGRO 24
+#define M_FW_EQ_CTRL_CMD_STATUSPGRO 0x1
+#define V_FW_EQ_CTRL_CMD_STATUSPGRO(x) ((x) << S_FW_EQ_CTRL_CMD_STATUSPGRO)
+#define G_FW_EQ_CTRL_CMD_STATUSPGRO(x) \
+ (((x) >> S_FW_EQ_CTRL_CMD_STATUSPGRO) & M_FW_EQ_CTRL_CMD_STATUSPGRO)
+#define F_FW_EQ_CTRL_CMD_STATUSPGRO V_FW_EQ_CTRL_CMD_STATUSPGRO(1U)
+
+#define S_FW_EQ_CTRL_CMD_FETCHNS 23
+#define M_FW_EQ_CTRL_CMD_FETCHNS 0x1
+#define V_FW_EQ_CTRL_CMD_FETCHNS(x) ((x) << S_FW_EQ_CTRL_CMD_FETCHNS)
+#define G_FW_EQ_CTRL_CMD_FETCHNS(x) \
+ (((x) >> S_FW_EQ_CTRL_CMD_FETCHNS) & M_FW_EQ_CTRL_CMD_FETCHNS)
+#define F_FW_EQ_CTRL_CMD_FETCHNS V_FW_EQ_CTRL_CMD_FETCHNS(1U)
+
+#define S_FW_EQ_CTRL_CMD_FETCHRO 22
+#define M_FW_EQ_CTRL_CMD_FETCHRO 0x1
+#define V_FW_EQ_CTRL_CMD_FETCHRO(x) ((x) << S_FW_EQ_CTRL_CMD_FETCHRO)
+#define G_FW_EQ_CTRL_CMD_FETCHRO(x) \
+ (((x) >> S_FW_EQ_CTRL_CMD_FETCHRO) & M_FW_EQ_CTRL_CMD_FETCHRO)
+#define F_FW_EQ_CTRL_CMD_FETCHRO V_FW_EQ_CTRL_CMD_FETCHRO(1U)
+
+#define S_FW_EQ_CTRL_CMD_HOSTFCMODE 20
+#define M_FW_EQ_CTRL_CMD_HOSTFCMODE 0x3
+#define V_FW_EQ_CTRL_CMD_HOSTFCMODE(x) ((x) << S_FW_EQ_CTRL_CMD_HOSTFCMODE)
+#define G_FW_EQ_CTRL_CMD_HOSTFCMODE(x) \
+ (((x) >> S_FW_EQ_CTRL_CMD_HOSTFCMODE) & M_FW_EQ_CTRL_CMD_HOSTFCMODE)
+
+#define S_FW_EQ_CTRL_CMD_CPRIO 19
+#define M_FW_EQ_CTRL_CMD_CPRIO 0x1
+#define V_FW_EQ_CTRL_CMD_CPRIO(x) ((x) << S_FW_EQ_CTRL_CMD_CPRIO)
+#define G_FW_EQ_CTRL_CMD_CPRIO(x) \
+ (((x) >> S_FW_EQ_CTRL_CMD_CPRIO) & M_FW_EQ_CTRL_CMD_CPRIO)
+#define F_FW_EQ_CTRL_CMD_CPRIO V_FW_EQ_CTRL_CMD_CPRIO(1U)
+
+#define S_FW_EQ_CTRL_CMD_ONCHIP 18
+#define M_FW_EQ_CTRL_CMD_ONCHIP 0x1
+#define V_FW_EQ_CTRL_CMD_ONCHIP(x) ((x) << S_FW_EQ_CTRL_CMD_ONCHIP)
+#define G_FW_EQ_CTRL_CMD_ONCHIP(x) \
+ (((x) >> S_FW_EQ_CTRL_CMD_ONCHIP) & M_FW_EQ_CTRL_CMD_ONCHIP)
+#define F_FW_EQ_CTRL_CMD_ONCHIP V_FW_EQ_CTRL_CMD_ONCHIP(1U)
+
+#define S_FW_EQ_CTRL_CMD_PCIECHN 16
+#define M_FW_EQ_CTRL_CMD_PCIECHN 0x3
+#define V_FW_EQ_CTRL_CMD_PCIECHN(x) ((x) << S_FW_EQ_CTRL_CMD_PCIECHN)
+#define G_FW_EQ_CTRL_CMD_PCIECHN(x) \
+ (((x) >> S_FW_EQ_CTRL_CMD_PCIECHN) & M_FW_EQ_CTRL_CMD_PCIECHN)
+
+#define S_FW_EQ_CTRL_CMD_IQID 0
+#define M_FW_EQ_CTRL_CMD_IQID 0xffff
+#define V_FW_EQ_CTRL_CMD_IQID(x) ((x) << S_FW_EQ_CTRL_CMD_IQID)
+#define G_FW_EQ_CTRL_CMD_IQID(x) \
+ (((x) >> S_FW_EQ_CTRL_CMD_IQID) & M_FW_EQ_CTRL_CMD_IQID)
+
+#define S_FW_EQ_CTRL_CMD_DCAEN 31
+#define M_FW_EQ_CTRL_CMD_DCAEN 0x1
+#define V_FW_EQ_CTRL_CMD_DCAEN(x) ((x) << S_FW_EQ_CTRL_CMD_DCAEN)
+#define G_FW_EQ_CTRL_CMD_DCAEN(x) \
+ (((x) >> S_FW_EQ_CTRL_CMD_DCAEN) & M_FW_EQ_CTRL_CMD_DCAEN)
+#define F_FW_EQ_CTRL_CMD_DCAEN V_FW_EQ_CTRL_CMD_DCAEN(1U)
+
+#define S_FW_EQ_CTRL_CMD_DCACPU 26
+#define M_FW_EQ_CTRL_CMD_DCACPU 0x1f
+#define V_FW_EQ_CTRL_CMD_DCACPU(x) ((x) << S_FW_EQ_CTRL_CMD_DCACPU)
+#define G_FW_EQ_CTRL_CMD_DCACPU(x) \
+ (((x) >> S_FW_EQ_CTRL_CMD_DCACPU) & M_FW_EQ_CTRL_CMD_DCACPU)
+
+#define S_FW_EQ_CTRL_CMD_FBMIN 23
+#define M_FW_EQ_CTRL_CMD_FBMIN 0x7
+#define V_FW_EQ_CTRL_CMD_FBMIN(x) ((x) << S_FW_EQ_CTRL_CMD_FBMIN)
+#define G_FW_EQ_CTRL_CMD_FBMIN(x) \
+ (((x) >> S_FW_EQ_CTRL_CMD_FBMIN) & M_FW_EQ_CTRL_CMD_FBMIN)
+
+#define S_FW_EQ_CTRL_CMD_FBMAX 20
+#define M_FW_EQ_CTRL_CMD_FBMAX 0x7
+#define V_FW_EQ_CTRL_CMD_FBMAX(x) ((x) << S_FW_EQ_CTRL_CMD_FBMAX)
+#define G_FW_EQ_CTRL_CMD_FBMAX(x) \
+ (((x) >> S_FW_EQ_CTRL_CMD_FBMAX) & M_FW_EQ_CTRL_CMD_FBMAX)
+
+#define S_FW_EQ_CTRL_CMD_CIDXFTHRESHO 19
+#define M_FW_EQ_CTRL_CMD_CIDXFTHRESHO 0x1
+#define V_FW_EQ_CTRL_CMD_CIDXFTHRESHO(x) \
+ ((x) << S_FW_EQ_CTRL_CMD_CIDXFTHRESHO)
+#define G_FW_EQ_CTRL_CMD_CIDXFTHRESHO(x) \
+ (((x) >> S_FW_EQ_CTRL_CMD_CIDXFTHRESHO) & M_FW_EQ_CTRL_CMD_CIDXFTHRESHO)
+#define F_FW_EQ_CTRL_CMD_CIDXFTHRESHO V_FW_EQ_CTRL_CMD_CIDXFTHRESHO(1U)
+
+#define S_FW_EQ_CTRL_CMD_CIDXFTHRESH 16
+#define M_FW_EQ_CTRL_CMD_CIDXFTHRESH 0x7
+#define V_FW_EQ_CTRL_CMD_CIDXFTHRESH(x) ((x) << S_FW_EQ_CTRL_CMD_CIDXFTHRESH)
+#define G_FW_EQ_CTRL_CMD_CIDXFTHRESH(x) \
+ (((x) >> S_FW_EQ_CTRL_CMD_CIDXFTHRESH) & M_FW_EQ_CTRL_CMD_CIDXFTHRESH)
+
+#define S_FW_EQ_CTRL_CMD_EQSIZE 0
+#define M_FW_EQ_CTRL_CMD_EQSIZE 0xffff
+#define V_FW_EQ_CTRL_CMD_EQSIZE(x) ((x) << S_FW_EQ_CTRL_CMD_EQSIZE)
+#define G_FW_EQ_CTRL_CMD_EQSIZE(x) \
+ (((x) >> S_FW_EQ_CTRL_CMD_EQSIZE) & M_FW_EQ_CTRL_CMD_EQSIZE)
+
+struct fw_eq_ofld_cmd {
+ __be32 op_to_vfn;
+ __be32 alloc_to_len16;
+ __be32 eqid_pkd;
+ __be32 physeqid_pkd;
+ __be32 fetchszm_to_iqid;
+ __be32 dcaen_to_eqsize;
+ __be64 eqaddr;
+};
+
+#define S_FW_EQ_OFLD_CMD_PFN 8
+#define M_FW_EQ_OFLD_CMD_PFN 0x7
+#define V_FW_EQ_OFLD_CMD_PFN(x) ((x) << S_FW_EQ_OFLD_CMD_PFN)
+#define G_FW_EQ_OFLD_CMD_PFN(x) \
+ (((x) >> S_FW_EQ_OFLD_CMD_PFN) & M_FW_EQ_OFLD_CMD_PFN)
+
+#define S_FW_EQ_OFLD_CMD_VFN 0
+#define M_FW_EQ_OFLD_CMD_VFN 0xff
+#define V_FW_EQ_OFLD_CMD_VFN(x) ((x) << S_FW_EQ_OFLD_CMD_VFN)
+#define G_FW_EQ_OFLD_CMD_VFN(x) \
+ (((x) >> S_FW_EQ_OFLD_CMD_VFN) & M_FW_EQ_OFLD_CMD_VFN)
+
+#define S_FW_EQ_OFLD_CMD_ALLOC 31
+#define M_FW_EQ_OFLD_CMD_ALLOC 0x1
+#define V_FW_EQ_OFLD_CMD_ALLOC(x) ((x) << S_FW_EQ_OFLD_CMD_ALLOC)
+#define G_FW_EQ_OFLD_CMD_ALLOC(x) \
+ (((x) >> S_FW_EQ_OFLD_CMD_ALLOC) & M_FW_EQ_OFLD_CMD_ALLOC)
+#define F_FW_EQ_OFLD_CMD_ALLOC V_FW_EQ_OFLD_CMD_ALLOC(1U)
+
+#define S_FW_EQ_OFLD_CMD_FREE 30
+#define M_FW_EQ_OFLD_CMD_FREE 0x1
+#define V_FW_EQ_OFLD_CMD_FREE(x) ((x) << S_FW_EQ_OFLD_CMD_FREE)
+#define G_FW_EQ_OFLD_CMD_FREE(x) \
+ (((x) >> S_FW_EQ_OFLD_CMD_FREE) & M_FW_EQ_OFLD_CMD_FREE)
+#define F_FW_EQ_OFLD_CMD_FREE V_FW_EQ_OFLD_CMD_FREE(1U)
+
+#define S_FW_EQ_OFLD_CMD_MODIFY 29
+#define M_FW_EQ_OFLD_CMD_MODIFY 0x1
+#define V_FW_EQ_OFLD_CMD_MODIFY(x) ((x) << S_FW_EQ_OFLD_CMD_MODIFY)
+#define G_FW_EQ_OFLD_CMD_MODIFY(x) \
+ (((x) >> S_FW_EQ_OFLD_CMD_MODIFY) & M_FW_EQ_OFLD_CMD_MODIFY)
+#define F_FW_EQ_OFLD_CMD_MODIFY V_FW_EQ_OFLD_CMD_MODIFY(1U)
+
+#define S_FW_EQ_OFLD_CMD_EQSTART 28
+#define M_FW_EQ_OFLD_CMD_EQSTART 0x1
+#define V_FW_EQ_OFLD_CMD_EQSTART(x) ((x) << S_FW_EQ_OFLD_CMD_EQSTART)
+#define G_FW_EQ_OFLD_CMD_EQSTART(x) \
+ (((x) >> S_FW_EQ_OFLD_CMD_EQSTART) & M_FW_EQ_OFLD_CMD_EQSTART)
+#define F_FW_EQ_OFLD_CMD_EQSTART V_FW_EQ_OFLD_CMD_EQSTART(1U)
+
+#define S_FW_EQ_OFLD_CMD_EQSTOP 27
+#define M_FW_EQ_OFLD_CMD_EQSTOP 0x1
+#define V_FW_EQ_OFLD_CMD_EQSTOP(x) ((x) << S_FW_EQ_OFLD_CMD_EQSTOP)
+#define G_FW_EQ_OFLD_CMD_EQSTOP(x) \
+ (((x) >> S_FW_EQ_OFLD_CMD_EQSTOP) & M_FW_EQ_OFLD_CMD_EQSTOP)
+#define F_FW_EQ_OFLD_CMD_EQSTOP V_FW_EQ_OFLD_CMD_EQSTOP(1U)
+
+#define S_FW_EQ_OFLD_CMD_EQID 0
+#define M_FW_EQ_OFLD_CMD_EQID 0xfffff
+#define V_FW_EQ_OFLD_CMD_EQID(x) ((x) << S_FW_EQ_OFLD_CMD_EQID)
+#define G_FW_EQ_OFLD_CMD_EQID(x) \
+ (((x) >> S_FW_EQ_OFLD_CMD_EQID) & M_FW_EQ_OFLD_CMD_EQID)
+
+#define S_FW_EQ_OFLD_CMD_PHYSEQID 0
+#define M_FW_EQ_OFLD_CMD_PHYSEQID 0xfffff
+#define V_FW_EQ_OFLD_CMD_PHYSEQID(x) ((x) << S_FW_EQ_OFLD_CMD_PHYSEQID)
+#define G_FW_EQ_OFLD_CMD_PHYSEQID(x) \
+ (((x) >> S_FW_EQ_OFLD_CMD_PHYSEQID) & M_FW_EQ_OFLD_CMD_PHYSEQID)
+
+#define S_FW_EQ_OFLD_CMD_FETCHSZM 26
+#define M_FW_EQ_OFLD_CMD_FETCHSZM 0x1
+#define V_FW_EQ_OFLD_CMD_FETCHSZM(x) ((x) << S_FW_EQ_OFLD_CMD_FETCHSZM)
+#define G_FW_EQ_OFLD_CMD_FETCHSZM(x) \
+ (((x) >> S_FW_EQ_OFLD_CMD_FETCHSZM) & M_FW_EQ_OFLD_CMD_FETCHSZM)
+#define F_FW_EQ_OFLD_CMD_FETCHSZM V_FW_EQ_OFLD_CMD_FETCHSZM(1U)
+
+#define S_FW_EQ_OFLD_CMD_STATUSPGNS 25
+#define M_FW_EQ_OFLD_CMD_STATUSPGNS 0x1
+#define V_FW_EQ_OFLD_CMD_STATUSPGNS(x) ((x) << S_FW_EQ_OFLD_CMD_STATUSPGNS)
+#define G_FW_EQ_OFLD_CMD_STATUSPGNS(x) \
+ (((x) >> S_FW_EQ_OFLD_CMD_STATUSPGNS) & M_FW_EQ_OFLD_CMD_STATUSPGNS)
+#define F_FW_EQ_OFLD_CMD_STATUSPGNS V_FW_EQ_OFLD_CMD_STATUSPGNS(1U)
+
+#define S_FW_EQ_OFLD_CMD_STATUSPGRO 24
+#define M_FW_EQ_OFLD_CMD_STATUSPGRO 0x1
+#define V_FW_EQ_OFLD_CMD_STATUSPGRO(x) ((x) << S_FW_EQ_OFLD_CMD_STATUSPGRO)
+#define G_FW_EQ_OFLD_CMD_STATUSPGRO(x) \
+ (((x) >> S_FW_EQ_OFLD_CMD_STATUSPGRO) & M_FW_EQ_OFLD_CMD_STATUSPGRO)
+#define F_FW_EQ_OFLD_CMD_STATUSPGRO V_FW_EQ_OFLD_CMD_STATUSPGRO(1U)
+
+#define S_FW_EQ_OFLD_CMD_FETCHNS 23
+#define M_FW_EQ_OFLD_CMD_FETCHNS 0x1
+#define V_FW_EQ_OFLD_CMD_FETCHNS(x) ((x) << S_FW_EQ_OFLD_CMD_FETCHNS)
+#define G_FW_EQ_OFLD_CMD_FETCHNS(x) \
+ (((x) >> S_FW_EQ_OFLD_CMD_FETCHNS) & M_FW_EQ_OFLD_CMD_FETCHNS)
+#define F_FW_EQ_OFLD_CMD_FETCHNS V_FW_EQ_OFLD_CMD_FETCHNS(1U)
+
+#define S_FW_EQ_OFLD_CMD_FETCHRO 22
+#define M_FW_EQ_OFLD_CMD_FETCHRO 0x1
+#define V_FW_EQ_OFLD_CMD_FETCHRO(x) ((x) << S_FW_EQ_OFLD_CMD_FETCHRO)
+#define G_FW_EQ_OFLD_CMD_FETCHRO(x) \
+ (((x) >> S_FW_EQ_OFLD_CMD_FETCHRO) & M_FW_EQ_OFLD_CMD_FETCHRO)
+#define F_FW_EQ_OFLD_CMD_FETCHRO V_FW_EQ_OFLD_CMD_FETCHRO(1U)
+
+#define S_FW_EQ_OFLD_CMD_HOSTFCMODE 20
+#define M_FW_EQ_OFLD_CMD_HOSTFCMODE 0x3
+#define V_FW_EQ_OFLD_CMD_HOSTFCMODE(x) ((x) << S_FW_EQ_OFLD_CMD_HOSTFCMODE)
+#define G_FW_EQ_OFLD_CMD_HOSTFCMODE(x) \
+ (((x) >> S_FW_EQ_OFLD_CMD_HOSTFCMODE) & M_FW_EQ_OFLD_CMD_HOSTFCMODE)
+
+#define S_FW_EQ_OFLD_CMD_CPRIO 19
+#define M_FW_EQ_OFLD_CMD_CPRIO 0x1
+#define V_FW_EQ_OFLD_CMD_CPRIO(x) ((x) << S_FW_EQ_OFLD_CMD_CPRIO)
+#define G_FW_EQ_OFLD_CMD_CPRIO(x) \
+ (((x) >> S_FW_EQ_OFLD_CMD_CPRIO) & M_FW_EQ_OFLD_CMD_CPRIO)
+#define F_FW_EQ_OFLD_CMD_CPRIO V_FW_EQ_OFLD_CMD_CPRIO(1U)
+
+#define S_FW_EQ_OFLD_CMD_ONCHIP 18
+#define M_FW_EQ_OFLD_CMD_ONCHIP 0x1
+#define V_FW_EQ_OFLD_CMD_ONCHIP(x) ((x) << S_FW_EQ_OFLD_CMD_ONCHIP)
+#define G_FW_EQ_OFLD_CMD_ONCHIP(x) \
+ (((x) >> S_FW_EQ_OFLD_CMD_ONCHIP) & M_FW_EQ_OFLD_CMD_ONCHIP)
+#define F_FW_EQ_OFLD_CMD_ONCHIP V_FW_EQ_OFLD_CMD_ONCHIP(1U)
+
+#define S_FW_EQ_OFLD_CMD_PCIECHN 16
+#define M_FW_EQ_OFLD_CMD_PCIECHN 0x3
+#define V_FW_EQ_OFLD_CMD_PCIECHN(x) ((x) << S_FW_EQ_OFLD_CMD_PCIECHN)
+#define G_FW_EQ_OFLD_CMD_PCIECHN(x) \
+ (((x) >> S_FW_EQ_OFLD_CMD_PCIECHN) & M_FW_EQ_OFLD_CMD_PCIECHN)
+
+#define S_FW_EQ_OFLD_CMD_IQID 0
+#define M_FW_EQ_OFLD_CMD_IQID 0xffff
+#define V_FW_EQ_OFLD_CMD_IQID(x) ((x) << S_FW_EQ_OFLD_CMD_IQID)
+#define G_FW_EQ_OFLD_CMD_IQID(x) \
+ (((x) >> S_FW_EQ_OFLD_CMD_IQID) & M_FW_EQ_OFLD_CMD_IQID)
+
+#define S_FW_EQ_OFLD_CMD_DCAEN 31
+#define M_FW_EQ_OFLD_CMD_DCAEN 0x1
+#define V_FW_EQ_OFLD_CMD_DCAEN(x) ((x) << S_FW_EQ_OFLD_CMD_DCAEN)
+#define G_FW_EQ_OFLD_CMD_DCAEN(x) \
+ (((x) >> S_FW_EQ_OFLD_CMD_DCAEN) & M_FW_EQ_OFLD_CMD_DCAEN)
+#define F_FW_EQ_OFLD_CMD_DCAEN V_FW_EQ_OFLD_CMD_DCAEN(1U)
+
+#define S_FW_EQ_OFLD_CMD_DCACPU 26
+#define M_FW_EQ_OFLD_CMD_DCACPU 0x1f
+#define V_FW_EQ_OFLD_CMD_DCACPU(x) ((x) << S_FW_EQ_OFLD_CMD_DCACPU)
+#define G_FW_EQ_OFLD_CMD_DCACPU(x) \
+ (((x) >> S_FW_EQ_OFLD_CMD_DCACPU) & M_FW_EQ_OFLD_CMD_DCACPU)
+
+#define S_FW_EQ_OFLD_CMD_FBMIN 23
+#define M_FW_EQ_OFLD_CMD_FBMIN 0x7
+#define V_FW_EQ_OFLD_CMD_FBMIN(x) ((x) << S_FW_EQ_OFLD_CMD_FBMIN)
+#define G_FW_EQ_OFLD_CMD_FBMIN(x) \
+ (((x) >> S_FW_EQ_OFLD_CMD_FBMIN) & M_FW_EQ_OFLD_CMD_FBMIN)
+
+#define S_FW_EQ_OFLD_CMD_FBMAX 20
+#define M_FW_EQ_OFLD_CMD_FBMAX 0x7
+#define V_FW_EQ_OFLD_CMD_FBMAX(x) ((x) << S_FW_EQ_OFLD_CMD_FBMAX)
+#define G_FW_EQ_OFLD_CMD_FBMAX(x) \
+ (((x) >> S_FW_EQ_OFLD_CMD_FBMAX) & M_FW_EQ_OFLD_CMD_FBMAX)
+
+#define S_FW_EQ_OFLD_CMD_CIDXFTHRESHO 19
+#define M_FW_EQ_OFLD_CMD_CIDXFTHRESHO 0x1
+#define V_FW_EQ_OFLD_CMD_CIDXFTHRESHO(x) \
+ ((x) << S_FW_EQ_OFLD_CMD_CIDXFTHRESHO)
+#define G_FW_EQ_OFLD_CMD_CIDXFTHRESHO(x) \
+ (((x) >> S_FW_EQ_OFLD_CMD_CIDXFTHRESHO) & M_FW_EQ_OFLD_CMD_CIDXFTHRESHO)
+#define F_FW_EQ_OFLD_CMD_CIDXFTHRESHO V_FW_EQ_OFLD_CMD_CIDXFTHRESHO(1U)
+
+#define S_FW_EQ_OFLD_CMD_CIDXFTHRESH 16
+#define M_FW_EQ_OFLD_CMD_CIDXFTHRESH 0x7
+#define V_FW_EQ_OFLD_CMD_CIDXFTHRESH(x) ((x) << S_FW_EQ_OFLD_CMD_CIDXFTHRESH)
+#define G_FW_EQ_OFLD_CMD_CIDXFTHRESH(x) \
+ (((x) >> S_FW_EQ_OFLD_CMD_CIDXFTHRESH) & M_FW_EQ_OFLD_CMD_CIDXFTHRESH)
+
+#define S_FW_EQ_OFLD_CMD_EQSIZE 0
+#define M_FW_EQ_OFLD_CMD_EQSIZE 0xffff
+#define V_FW_EQ_OFLD_CMD_EQSIZE(x) ((x) << S_FW_EQ_OFLD_CMD_EQSIZE)
+#define G_FW_EQ_OFLD_CMD_EQSIZE(x) \
+ (((x) >> S_FW_EQ_OFLD_CMD_EQSIZE) & M_FW_EQ_OFLD_CMD_EQSIZE)
+/* Macros for VIID parsing:
+ VIID - [10:8] PFN, [7] VI Valid, [6:0] VI number */
+#define S_FW_VIID_PFN 8
+#define M_FW_VIID_PFN 0x7
+#define V_FW_VIID_PFN(x) ((x) << S_FW_VIID_PFN)
+#define G_FW_VIID_PFN(x) (((x) >> S_FW_VIID_PFN) & M_FW_VIID_PFN)
+
+#define S_FW_VIID_VIVLD 7
+#define M_FW_VIID_VIVLD 0x1
+#define V_FW_VIID_VIVLD(x) ((x) << S_FW_VIID_VIVLD)
+#define G_FW_VIID_VIVLD(x) (((x) >> S_FW_VIID_VIVLD) & M_FW_VIID_VIVLD)
+
+#define S_FW_VIID_VIN 0
+#define M_FW_VIID_VIN 0x7F
+#define V_FW_VIID_VIN(x) ((x) << S_FW_VIID_VIN)
+#define G_FW_VIID_VIN(x) (((x) >> S_FW_VIID_VIN) & M_FW_VIID_VIN)
+
+enum fw_vi_func {
+ FW_VI_FUNC_ETH,
+ FW_VI_FUNC_OFLD,
+ FW_VI_FUNC_IWARP,
+ FW_VI_FUNC_OPENISCSI,
+ FW_VI_FUNC_OPENFCOE,
+ FW_VI_FUNC_FOISCSI,
+ FW_VI_FUNC_FOFCOE,
+ FW_VI_FUNC_FW,
+};
+
+struct fw_vi_cmd {
+ __be32 op_to_vfn;
+ __be32 alloc_to_len16;
+ __be16 type_to_viid;
+ __u8 mac[6];
+ __u8 portid_pkd;
+ __u8 nmac;
+ __u8 nmac0[6];
+ __be16 rsssize_pkd;
+ __u8 nmac1[6];
+ __be16 idsiiq_pkd;
+ __u8 nmac2[6];
+ __be16 idseiq_pkd;
+ __u8 nmac3[6];
+ __be64 r9;
+ __be64 r10;
+};
+
+#define S_FW_VI_CMD_PFN 8
+#define M_FW_VI_CMD_PFN 0x7
+#define V_FW_VI_CMD_PFN(x) ((x) << S_FW_VI_CMD_PFN)
+#define G_FW_VI_CMD_PFN(x) (((x) >> S_FW_VI_CMD_PFN) & M_FW_VI_CMD_PFN)
+
+#define S_FW_VI_CMD_VFN 0
+#define M_FW_VI_CMD_VFN 0xff
+#define V_FW_VI_CMD_VFN(x) ((x) << S_FW_VI_CMD_VFN)
+#define G_FW_VI_CMD_VFN(x) (((x) >> S_FW_VI_CMD_VFN) & M_FW_VI_CMD_VFN)
+
+#define S_FW_VI_CMD_ALLOC 31
+#define M_FW_VI_CMD_ALLOC 0x1
+#define V_FW_VI_CMD_ALLOC(x) ((x) << S_FW_VI_CMD_ALLOC)
+#define G_FW_VI_CMD_ALLOC(x) \
+ (((x) >> S_FW_VI_CMD_ALLOC) & M_FW_VI_CMD_ALLOC)
+#define F_FW_VI_CMD_ALLOC V_FW_VI_CMD_ALLOC(1U)
+
+#define S_FW_VI_CMD_FREE 30
+#define M_FW_VI_CMD_FREE 0x1
+#define V_FW_VI_CMD_FREE(x) ((x) << S_FW_VI_CMD_FREE)
+#define G_FW_VI_CMD_FREE(x) (((x) >> S_FW_VI_CMD_FREE) & M_FW_VI_CMD_FREE)
+#define F_FW_VI_CMD_FREE V_FW_VI_CMD_FREE(1U)
+
+#define S_FW_VI_CMD_TYPE 15
+#define M_FW_VI_CMD_TYPE 0x1
+#define V_FW_VI_CMD_TYPE(x) ((x) << S_FW_VI_CMD_TYPE)
+#define G_FW_VI_CMD_TYPE(x) (((x) >> S_FW_VI_CMD_TYPE) & M_FW_VI_CMD_TYPE)
+#define F_FW_VI_CMD_TYPE V_FW_VI_CMD_TYPE(1U)
+
+#define S_FW_VI_CMD_FUNC 12
+#define M_FW_VI_CMD_FUNC 0x7
+#define V_FW_VI_CMD_FUNC(x) ((x) << S_FW_VI_CMD_FUNC)
+#define G_FW_VI_CMD_FUNC(x) (((x) >> S_FW_VI_CMD_FUNC) & M_FW_VI_CMD_FUNC)
+
+#define S_FW_VI_CMD_VIID 0
+#define M_FW_VI_CMD_VIID 0xfff
+#define V_FW_VI_CMD_VIID(x) ((x) << S_FW_VI_CMD_VIID)
+#define G_FW_VI_CMD_VIID(x) (((x) >> S_FW_VI_CMD_VIID) & M_FW_VI_CMD_VIID)
+
+#define S_FW_VI_CMD_PORTID 4
+#define M_FW_VI_CMD_PORTID 0xf
+#define V_FW_VI_CMD_PORTID(x) ((x) << S_FW_VI_CMD_PORTID)
+#define G_FW_VI_CMD_PORTID(x) \
+ (((x) >> S_FW_VI_CMD_PORTID) & M_FW_VI_CMD_PORTID)
+
+#define S_FW_VI_CMD_RSSSIZE 0
+#define M_FW_VI_CMD_RSSSIZE 0x7ff
+#define V_FW_VI_CMD_RSSSIZE(x) ((x) << S_FW_VI_CMD_RSSSIZE)
+#define G_FW_VI_CMD_RSSSIZE(x) \
+ (((x) >> S_FW_VI_CMD_RSSSIZE) & M_FW_VI_CMD_RSSSIZE)
+
+#define S_FW_VI_CMD_IDSIIQ 0
+#define M_FW_VI_CMD_IDSIIQ 0x3ff
+#define V_FW_VI_CMD_IDSIIQ(x) ((x) << S_FW_VI_CMD_IDSIIQ)
+#define G_FW_VI_CMD_IDSIIQ(x) \
+ (((x) >> S_FW_VI_CMD_IDSIIQ) & M_FW_VI_CMD_IDSIIQ)
+
+#define S_FW_VI_CMD_IDSEIQ 0
+#define M_FW_VI_CMD_IDSEIQ 0x3ff
+#define V_FW_VI_CMD_IDSEIQ(x) ((x) << S_FW_VI_CMD_IDSEIQ)
+#define G_FW_VI_CMD_IDSEIQ(x) \
+ (((x) >> S_FW_VI_CMD_IDSEIQ) & M_FW_VI_CMD_IDSEIQ)
+
+/* Special VI_MAC command index ids */
+#define FW_VI_MAC_ADD_MAC 0x3FF
+#define FW_VI_MAC_ADD_PERSIST_MAC 0x3FE
+#define FW_VI_MAC_MAC_BASED_FREE 0x3FD
+#define FW_CLS_TCAM_NUM_ENTRIES 336
+
+enum fw_vi_mac_smac {
+ FW_VI_MAC_MPS_TCAM_ENTRY,
+ FW_VI_MAC_MPS_TCAM_ONLY,
+ FW_VI_MAC_SMT_ONLY,
+ FW_VI_MAC_SMT_AND_MPSTCAM
+};
+
+enum fw_vi_mac_result {
+ FW_VI_MAC_R_SUCCESS,
+ FW_VI_MAC_R_F_NONEXISTENT_NOMEM,
+ FW_VI_MAC_R_SMAC_FAIL,
+ FW_VI_MAC_R_F_ACL_CHECK
+};
+
+struct fw_vi_mac_cmd {
+ __be32 op_to_viid;
+ __be32 freemacs_to_len16;
+ union fw_vi_mac {
+ struct fw_vi_mac_exact {
+ __be16 valid_to_idx;
+ __u8 macaddr[6];
+ } exact[7];
+ struct fw_vi_mac_hash {
+ __be64 hashvec;
+ } hash;
+ } u;
+};
+
+#define S_FW_VI_MAC_CMD_VIID 0
+#define M_FW_VI_MAC_CMD_VIID 0xfff
+#define V_FW_VI_MAC_CMD_VIID(x) ((x) << S_FW_VI_MAC_CMD_VIID)
+#define G_FW_VI_MAC_CMD_VIID(x) \
+ (((x) >> S_FW_VI_MAC_CMD_VIID) & M_FW_VI_MAC_CMD_VIID)
+
+#define S_FW_VI_MAC_CMD_FREEMACS 31
+#define M_FW_VI_MAC_CMD_FREEMACS 0x1
+#define V_FW_VI_MAC_CMD_FREEMACS(x) ((x) << S_FW_VI_MAC_CMD_FREEMACS)
+#define G_FW_VI_MAC_CMD_FREEMACS(x) \
+ (((x) >> S_FW_VI_MAC_CMD_FREEMACS) & M_FW_VI_MAC_CMD_FREEMACS)
+#define F_FW_VI_MAC_CMD_FREEMACS V_FW_VI_MAC_CMD_FREEMACS(1U)
+
+#define S_FW_VI_MAC_CMD_HASHVECEN 23
+#define M_FW_VI_MAC_CMD_HASHVECEN 0x1
+#define V_FW_VI_MAC_CMD_HASHVECEN(x) ((x) << S_FW_VI_MAC_CMD_HASHVECEN)
+#define G_FW_VI_MAC_CMD_HASHVECEN(x) \
+ (((x) >> S_FW_VI_MAC_CMD_HASHVECEN) & M_FW_VI_MAC_CMD_HASHVECEN)
+#define F_FW_VI_MAC_CMD_HASHVECEN V_FW_VI_MAC_CMD_HASHVECEN(1U)
+
+#define S_FW_VI_MAC_CMD_HASHUNIEN 22
+#define M_FW_VI_MAC_CMD_HASHUNIEN 0x1
+#define V_FW_VI_MAC_CMD_HASHUNIEN(x) ((x) << S_FW_VI_MAC_CMD_HASHUNIEN)
+#define G_FW_VI_MAC_CMD_HASHUNIEN(x) \
+ (((x) >> S_FW_VI_MAC_CMD_HASHUNIEN) & M_FW_VI_MAC_CMD_HASHUNIEN)
+#define F_FW_VI_MAC_CMD_HASHUNIEN V_FW_VI_MAC_CMD_HASHUNIEN(1U)
+
+#define S_FW_VI_MAC_CMD_VALID 15
+#define M_FW_VI_MAC_CMD_VALID 0x1
+#define V_FW_VI_MAC_CMD_VALID(x) ((x) << S_FW_VI_MAC_CMD_VALID)
+#define G_FW_VI_MAC_CMD_VALID(x) \
+ (((x) >> S_FW_VI_MAC_CMD_VALID) & M_FW_VI_MAC_CMD_VALID)
+#define F_FW_VI_MAC_CMD_VALID V_FW_VI_MAC_CMD_VALID(1U)
+
+#define S_FW_VI_MAC_CMD_PRIO 12
+#define M_FW_VI_MAC_CMD_PRIO 0x7
+#define V_FW_VI_MAC_CMD_PRIO(x) ((x) << S_FW_VI_MAC_CMD_PRIO)
+#define G_FW_VI_MAC_CMD_PRIO(x) \
+ (((x) >> S_FW_VI_MAC_CMD_PRIO) & M_FW_VI_MAC_CMD_PRIO)
+
+#define S_FW_VI_MAC_CMD_SMAC_RESULT 10
+#define M_FW_VI_MAC_CMD_SMAC_RESULT 0x3
+#define V_FW_VI_MAC_CMD_SMAC_RESULT(x) ((x) << S_FW_VI_MAC_CMD_SMAC_RESULT)
+#define G_FW_VI_MAC_CMD_SMAC_RESULT(x) \
+ (((x) >> S_FW_VI_MAC_CMD_SMAC_RESULT) & M_FW_VI_MAC_CMD_SMAC_RESULT)
+
+#define S_FW_VI_MAC_CMD_IDX 0
+#define M_FW_VI_MAC_CMD_IDX 0x3ff
+#define V_FW_VI_MAC_CMD_IDX(x) ((x) << S_FW_VI_MAC_CMD_IDX)
+#define G_FW_VI_MAC_CMD_IDX(x) \
+ (((x) >> S_FW_VI_MAC_CMD_IDX) & M_FW_VI_MAC_CMD_IDX)
+
+/* T4 max MTU supported */
+#define T4_MAX_MTU_SUPPORTED 9600
+#define FW_RXMODE_MTU_NO_CHG 65535
+
+struct fw_vi_rxmode_cmd {
+ __be32 op_to_viid;
+ __be32 retval_len16;
+ __be32 mtu_to_vlanexen;
+ __be32 r4_lo;
+};
+
+#define S_FW_VI_RXMODE_CMD_VIID 0
+#define M_FW_VI_RXMODE_CMD_VIID 0xfff
+#define V_FW_VI_RXMODE_CMD_VIID(x) ((x) << S_FW_VI_RXMODE_CMD_VIID)
+#define G_FW_VI_RXMODE_CMD_VIID(x) \
+ (((x) >> S_FW_VI_RXMODE_CMD_VIID) & M_FW_VI_RXMODE_CMD_VIID)
+
+#define S_FW_VI_RXMODE_CMD_MTU 16
+#define M_FW_VI_RXMODE_CMD_MTU 0xffff
+#define V_FW_VI_RXMODE_CMD_MTU(x) ((x) << S_FW_VI_RXMODE_CMD_MTU)
+#define G_FW_VI_RXMODE_CMD_MTU(x) \
+ (((x) >> S_FW_VI_RXMODE_CMD_MTU) & M_FW_VI_RXMODE_CMD_MTU)
+
+#define S_FW_VI_RXMODE_CMD_PROMISCEN 14
+#define M_FW_VI_RXMODE_CMD_PROMISCEN 0x3
+#define V_FW_VI_RXMODE_CMD_PROMISCEN(x) ((x) << S_FW_VI_RXMODE_CMD_PROMISCEN)
+#define G_FW_VI_RXMODE_CMD_PROMISCEN(x) \
+ (((x) >> S_FW_VI_RXMODE_CMD_PROMISCEN) & M_FW_VI_RXMODE_CMD_PROMISCEN)
+
+#define S_FW_VI_RXMODE_CMD_ALLMULTIEN 12
+#define M_FW_VI_RXMODE_CMD_ALLMULTIEN 0x3
+#define V_FW_VI_RXMODE_CMD_ALLMULTIEN(x) \
+ ((x) << S_FW_VI_RXMODE_CMD_ALLMULTIEN)
+#define G_FW_VI_RXMODE_CMD_ALLMULTIEN(x) \
+ (((x) >> S_FW_VI_RXMODE_CMD_ALLMULTIEN) & M_FW_VI_RXMODE_CMD_ALLMULTIEN)
+
+#define S_FW_VI_RXMODE_CMD_BROADCASTEN 10
+#define M_FW_VI_RXMODE_CMD_BROADCASTEN 0x3
+#define V_FW_VI_RXMODE_CMD_BROADCASTEN(x) \
+ ((x) << S_FW_VI_RXMODE_CMD_BROADCASTEN)
+#define G_FW_VI_RXMODE_CMD_BROADCASTEN(x) \
+ (((x) >> S_FW_VI_RXMODE_CMD_BROADCASTEN) & M_FW_VI_RXMODE_CMD_BROADCASTEN)
+
+#define S_FW_VI_RXMODE_CMD_VLANEXEN 8
+#define M_FW_VI_RXMODE_CMD_VLANEXEN 0x3
+#define V_FW_VI_RXMODE_CMD_VLANEXEN(x) ((x) << S_FW_VI_RXMODE_CMD_VLANEXEN)
+#define G_FW_VI_RXMODE_CMD_VLANEXEN(x) \
+ (((x) >> S_FW_VI_RXMODE_CMD_VLANEXEN) & M_FW_VI_RXMODE_CMD_VLANEXEN)
+
+struct fw_vi_enable_cmd {
+ __be32 op_to_viid;
+ __be32 ien_to_len16;
+ __be16 blinkdur;
+ __be16 r3;
+ __be32 r4;
+};
+
+#define S_FW_VI_ENABLE_CMD_VIID 0
+#define M_FW_VI_ENABLE_CMD_VIID 0xfff
+#define V_FW_VI_ENABLE_CMD_VIID(x) ((x) << S_FW_VI_ENABLE_CMD_VIID)
+#define G_FW_VI_ENABLE_CMD_VIID(x) \
+ (((x) >> S_FW_VI_ENABLE_CMD_VIID) & M_FW_VI_ENABLE_CMD_VIID)
+
+#define S_FW_VI_ENABLE_CMD_IEN 31
+#define M_FW_VI_ENABLE_CMD_IEN 0x1
+#define V_FW_VI_ENABLE_CMD_IEN(x) ((x) << S_FW_VI_ENABLE_CMD_IEN)
+#define G_FW_VI_ENABLE_CMD_IEN(x) \
+ (((x) >> S_FW_VI_ENABLE_CMD_IEN) & M_FW_VI_ENABLE_CMD_IEN)
+#define F_FW_VI_ENABLE_CMD_IEN V_FW_VI_ENABLE_CMD_IEN(1U)
+
+#define S_FW_VI_ENABLE_CMD_EEN 30
+#define M_FW_VI_ENABLE_CMD_EEN 0x1
+#define V_FW_VI_ENABLE_CMD_EEN(x) ((x) << S_FW_VI_ENABLE_CMD_EEN)
+#define G_FW_VI_ENABLE_CMD_EEN(x) \
+ (((x) >> S_FW_VI_ENABLE_CMD_EEN) & M_FW_VI_ENABLE_CMD_EEN)
+#define F_FW_VI_ENABLE_CMD_EEN V_FW_VI_ENABLE_CMD_EEN(1U)
+
+#define S_FW_VI_ENABLE_CMD_LED 29
+#define M_FW_VI_ENABLE_CMD_LED 0x1
+#define V_FW_VI_ENABLE_CMD_LED(x) ((x) << S_FW_VI_ENABLE_CMD_LED)
+#define G_FW_VI_ENABLE_CMD_LED(x) \
+ (((x) >> S_FW_VI_ENABLE_CMD_LED) & M_FW_VI_ENABLE_CMD_LED)
+#define F_FW_VI_ENABLE_CMD_LED V_FW_VI_ENABLE_CMD_LED(1U)
+
+/* VI VF stats offset definitions */
+#define VI_VF_NUM_STATS 16
+enum fw_vi_stats_vf_index {
+ FW_VI_VF_STAT_TX_BCAST_BYTES_IX,
+ FW_VI_VF_STAT_TX_BCAST_FRAMES_IX,
+ FW_VI_VF_STAT_TX_MCAST_BYTES_IX,
+ FW_VI_VF_STAT_TX_MCAST_FRAMES_IX,
+ FW_VI_VF_STAT_TX_UCAST_BYTES_IX,
+ FW_VI_VF_STAT_TX_UCAST_FRAMES_IX,
+ FW_VI_VF_STAT_TX_DROP_FRAMES_IX,
+ FW_VI_VF_STAT_TX_OFLD_BYTES_IX,
+ FW_VI_VF_STAT_TX_OFLD_FRAMES_IX,
+ FW_VI_VF_STAT_RX_BCAST_BYTES_IX,
+ FW_VI_VF_STAT_RX_BCAST_FRAMES_IX,
+ FW_VI_VF_STAT_RX_MCAST_BYTES_IX,
+ FW_VI_VF_STAT_RX_MCAST_FRAMES_IX,
+ FW_VI_VF_STAT_RX_UCAST_BYTES_IX,
+ FW_VI_VF_STAT_RX_UCAST_FRAMES_IX,
+ FW_VI_VF_STAT_RX_ERR_FRAMES_IX
+};
+
+/* VI PF stats offset definitions */
+#define VI_PF_NUM_STATS 17
+enum fw_vi_stats_pf_index {
+ FW_VI_PF_STAT_TX_BCAST_BYTES_IX,
+ FW_VI_PF_STAT_TX_BCAST_FRAMES_IX,
+ FW_VI_PF_STAT_TX_MCAST_BYTES_IX,
+ FW_VI_PF_STAT_TX_MCAST_FRAMES_IX,
+ FW_VI_PF_STAT_TX_UCAST_BYTES_IX,
+ FW_VI_PF_STAT_TX_UCAST_FRAMES_IX,
+ FW_VI_PF_STAT_TX_OFLD_BYTES_IX,
+ FW_VI_PF_STAT_TX_OFLD_FRAMES_IX,
+ FW_VI_PF_STAT_RX_BYTES_IX,
+ FW_VI_PF_STAT_RX_FRAMES_IX,
+ FW_VI_PF_STAT_RX_BCAST_BYTES_IX,
+ FW_VI_PF_STAT_RX_BCAST_FRAMES_IX,
+ FW_VI_PF_STAT_RX_MCAST_BYTES_IX,
+ FW_VI_PF_STAT_RX_MCAST_FRAMES_IX,
+ FW_VI_PF_STAT_RX_UCAST_BYTES_IX,
+ FW_VI_PF_STAT_RX_UCAST_FRAMES_IX,
+ FW_VI_PF_STAT_RX_ERR_FRAMES_IX
+};
+
+struct fw_vi_stats_cmd {
+ __be32 op_to_viid;
+ __be32 retval_len16;
+ union fw_vi_stats {
+ struct fw_vi_stats_ctl {
+ __be16 nstats_ix;
+ __be16 r6;
+ __be32 r7;
+ __be64 stat0;
+ __be64 stat1;
+ __be64 stat2;
+ __be64 stat3;
+ __be64 stat4;
+ __be64 stat5;
+ } ctl;
+ struct fw_vi_stats_pf {
+ __be64 tx_bcast_bytes;
+ __be64 tx_bcast_frames;
+ __be64 tx_mcast_bytes;
+ __be64 tx_mcast_frames;
+ __be64 tx_ucast_bytes;
+ __be64 tx_ucast_frames;
+ __be64 tx_offload_bytes;
+ __be64 tx_offload_frames;
+ __be64 rx_pf_bytes;
+ __be64 rx_pf_frames;
+ __be64 rx_bcast_bytes;
+ __be64 rx_bcast_frames;
+ __be64 rx_mcast_bytes;
+ __be64 rx_mcast_frames;
+ __be64 rx_ucast_bytes;
+ __be64 rx_ucast_frames;
+ __be64 rx_err_frames;
+ } pf;
+ struct fw_vi_stats_vf {
+ __be64 tx_bcast_bytes;
+ __be64 tx_bcast_frames;
+ __be64 tx_mcast_bytes;
+ __be64 tx_mcast_frames;
+ __be64 tx_ucast_bytes;
+ __be64 tx_ucast_frames;
+ __be64 tx_drop_frames;
+ __be64 tx_offload_bytes;
+ __be64 tx_offload_frames;
+ __be64 rx_bcast_bytes;
+ __be64 rx_bcast_frames;
+ __be64 rx_mcast_bytes;
+ __be64 rx_mcast_frames;
+ __be64 rx_ucast_bytes;
+ __be64 rx_ucast_frames;
+ __be64 rx_err_frames;
+ } vf;
+ } u;
+};
+
+#define S_FW_VI_STATS_CMD_VIID 0
+#define M_FW_VI_STATS_CMD_VIID 0xfff
+#define V_FW_VI_STATS_CMD_VIID(x) ((x) << S_FW_VI_STATS_CMD_VIID)
+#define G_FW_VI_STATS_CMD_VIID(x) \
+ (((x) >> S_FW_VI_STATS_CMD_VIID) & M_FW_VI_STATS_CMD_VIID)
+
+#define S_FW_VI_STATS_CMD_NSTATS 12
+#define M_FW_VI_STATS_CMD_NSTATS 0x7
+#define V_FW_VI_STATS_CMD_NSTATS(x) ((x) << S_FW_VI_STATS_CMD_NSTATS)
+#define G_FW_VI_STATS_CMD_NSTATS(x) \
+ (((x) >> S_FW_VI_STATS_CMD_NSTATS) & M_FW_VI_STATS_CMD_NSTATS)
+
+#define S_FW_VI_STATS_CMD_IX 0
+#define M_FW_VI_STATS_CMD_IX 0x1f
+#define V_FW_VI_STATS_CMD_IX(x) ((x) << S_FW_VI_STATS_CMD_IX)
+#define G_FW_VI_STATS_CMD_IX(x) \
+ (((x) >> S_FW_VI_STATS_CMD_IX) & M_FW_VI_STATS_CMD_IX)
+
+struct fw_acl_mac_cmd {
+ __be32 op_to_vfn;
+ __be32 en_to_len16;
+ __u8 nmac;
+ __u8 r3[7];
+ __be16 r4;
+ __u8 macaddr0[6];
+ __be16 r5;
+ __u8 macaddr1[6];
+ __be16 r6;
+ __u8 macaddr2[6];
+ __be16 r7;
+ __u8 macaddr3[6];
+};
+
+#define S_FW_ACL_MAC_CMD_PFN 8
+#define M_FW_ACL_MAC_CMD_PFN 0x7
+#define V_FW_ACL_MAC_CMD_PFN(x) ((x) << S_FW_ACL_MAC_CMD_PFN)
+#define G_FW_ACL_MAC_CMD_PFN(x) \
+ (((x) >> S_FW_ACL_MAC_CMD_PFN) & M_FW_ACL_MAC_CMD_PFN)
+
+#define S_FW_ACL_MAC_CMD_VFN 0
+#define M_FW_ACL_MAC_CMD_VFN 0xff
+#define V_FW_ACL_MAC_CMD_VFN(x) ((x) << S_FW_ACL_MAC_CMD_VFN)
+#define G_FW_ACL_MAC_CMD_VFN(x) \
+ (((x) >> S_FW_ACL_MAC_CMD_VFN) & M_FW_ACL_MAC_CMD_VFN)
+
+#define S_FW_ACL_MAC_CMD_EN 31
+#define M_FW_ACL_MAC_CMD_EN 0x1
+#define V_FW_ACL_MAC_CMD_EN(x) ((x) << S_FW_ACL_MAC_CMD_EN)
+#define G_FW_ACL_MAC_CMD_EN(x) \
+ (((x) >> S_FW_ACL_MAC_CMD_EN) & M_FW_ACL_MAC_CMD_EN)
+#define F_FW_ACL_MAC_CMD_EN V_FW_ACL_MAC_CMD_EN(1U)
+
+struct fw_acl_vlan_cmd {
+ __be32 op_to_vfn;
+ __be32 en_to_len16;
+ __u8 nvlan;
+ __u8 dropnovlan_fm;
+ __u8 r3_lo[6];
+ __be16 vlanid[16];
+};
+
+#define S_FW_ACL_VLAN_CMD_PFN 8
+#define M_FW_ACL_VLAN_CMD_PFN 0x7
+#define V_FW_ACL_VLAN_CMD_PFN(x) ((x) << S_FW_ACL_VLAN_CMD_PFN)
+#define G_FW_ACL_VLAN_CMD_PFN(x) \
+ (((x) >> S_FW_ACL_VLAN_CMD_PFN) & M_FW_ACL_VLAN_CMD_PFN)
+
+#define S_FW_ACL_VLAN_CMD_VFN 0
+#define M_FW_ACL_VLAN_CMD_VFN 0xff
+#define V_FW_ACL_VLAN_CMD_VFN(x) ((x) << S_FW_ACL_VLAN_CMD_VFN)
+#define G_FW_ACL_VLAN_CMD_VFN(x) \
+ (((x) >> S_FW_ACL_VLAN_CMD_VFN) & M_FW_ACL_VLAN_CMD_VFN)
+
+#define S_FW_ACL_VLAN_CMD_EN 31
+#define M_FW_ACL_VLAN_CMD_EN 0x1
+#define V_FW_ACL_VLAN_CMD_EN(x) ((x) << S_FW_ACL_VLAN_CMD_EN)
+#define G_FW_ACL_VLAN_CMD_EN(x) \
+ (((x) >> S_FW_ACL_VLAN_CMD_EN) & M_FW_ACL_VLAN_CMD_EN)
+#define F_FW_ACL_VLAN_CMD_EN V_FW_ACL_VLAN_CMD_EN(1U)
+
+#define S_FW_ACL_VLAN_CMD_DROPNOVLAN 7
+#define M_FW_ACL_VLAN_CMD_DROPNOVLAN 0x1
+#define V_FW_ACL_VLAN_CMD_DROPNOVLAN(x) ((x) << S_FW_ACL_VLAN_CMD_DROPNOVLAN)
+#define G_FW_ACL_VLAN_CMD_DROPNOVLAN(x) \
+ (((x) >> S_FW_ACL_VLAN_CMD_DROPNOVLAN) & M_FW_ACL_VLAN_CMD_DROPNOVLAN)
+#define F_FW_ACL_VLAN_CMD_DROPNOVLAN V_FW_ACL_VLAN_CMD_DROPNOVLAN(1U)
+
+#define S_FW_ACL_VLAN_CMD_FM 6
+#define M_FW_ACL_VLAN_CMD_FM 0x1
+#define V_FW_ACL_VLAN_CMD_FM(x) ((x) << S_FW_ACL_VLAN_CMD_FM)
+#define G_FW_ACL_VLAN_CMD_FM(x) \
+ (((x) >> S_FW_ACL_VLAN_CMD_FM) & M_FW_ACL_VLAN_CMD_FM)
+#define F_FW_ACL_VLAN_CMD_FM V_FW_ACL_VLAN_CMD_FM(1U)
+
+/* port capabilities bitmap */
+enum fw_port_cap {
+ FW_PORT_CAP_SPEED_100M = 0x0001,
+ FW_PORT_CAP_SPEED_1G = 0x0002,
+ FW_PORT_CAP_SPEED_2_5G = 0x0004,
+ FW_PORT_CAP_SPEED_10G = 0x0008,
+ FW_PORT_CAP_SPEED_40G = 0x0010,
+ FW_PORT_CAP_SPEED_100G = 0x0020,
+ FW_PORT_CAP_FC_RX = 0x0040,
+ FW_PORT_CAP_FC_TX = 0x0080,
+ FW_PORT_CAP_ANEG = 0x0100,
+ FW_PORT_CAP_MDI_0 = 0x0200,
+ FW_PORT_CAP_MDI_1 = 0x0400,
+ FW_PORT_CAP_BEAN = 0x0800,
+ FW_PORT_CAP_PMA_LPBK = 0x1000,
+ FW_PORT_CAP_PCS_LPBK = 0x2000,
+ FW_PORT_CAP_PHYXS_LPBK = 0x4000,
+ FW_PORT_CAP_FAR_END_LPBK = 0x8000,
+};
+
+#define S_FW_PORT_CAP_SPEED 0
+#define M_FW_PORT_CAP_SPEED 0x3f
+#define V_FW_PORT_CAP_SPEED(x) ((x) << S_FW_PORT_CAP_SPEED)
+#define G_FW_PORT_CAP_SPEED(x) \
+ (((x) >> S_FW_PORT_CAP_SPEED) & M_FW_PORT_CAP_SPEED)
+
+#define S_FW_PORT_CAP_FC 6
+#define M_FW_PORT_CAP_FC 0x3
+#define V_FW_PORT_CAP_FC(x) ((x) << S_FW_PORT_CAP_FC)
+#define G_FW_PORT_CAP_FC(x) \
+ (((x) >> S_FW_PORT_CAP_FC) & M_FW_PORT_CAP_FC)
+
+enum fw_port_mdi {
+ FW_PORT_CAP_MDI_UNCHANGED,
+ FW_PORT_CAP_MDI_AUTO,
+ FW_PORT_CAP_MDI_F_STRAIGHT,
+ FW_PORT_CAP_MDI_F_CROSSOVER
+};
+
+#define S_FW_PORT_CAP_MDI 9
+#define M_FW_PORT_CAP_MDI 3
+#define V_FW_PORT_CAP_MDI(x) ((x) << S_FW_PORT_CAP_MDI)
+#define G_FW_PORT_CAP_MDI(x) (((x) >> S_FW_PORT_CAP_MDI) & M_FW_PORT_CAP_MDI)
+
+enum fw_port_action {
+ FW_PORT_ACTION_L1_CFG = 0x0001,
+ FW_PORT_ACTION_L2_CFG = 0x0002,
+ FW_PORT_ACTION_GET_PORT_INFO = 0x0003,
+ FW_PORT_ACTION_L2_PPP_CFG = 0x0004,
+ FW_PORT_ACTION_L2_DCB_CFG = 0x0005,
+ FW_PORT_ACTION_LOW_PWR_TO_NORMAL = 0x0010,
+ FW_PORT_ACTION_L1_LOW_PWR_EN = 0x0011,
+ FW_PORT_ACTION_L2_WOL_MODE_EN = 0x0012,
+ FW_PORT_ACTION_LPBK_TO_NORMAL = 0x0020,
+ FW_PORT_ACTION_L1_SS_LPBK_ASIC = 0x0021,
+ FW_PORT_ACTION_L1_WS_LPBK_ASIC = 0x0023,
+ FW_PORT_ACTION_L1_EXT_LPBK = 0x0026,
+ FW_PORT_ACTION_PHY_RESET = 0x0040,
+ FW_PORT_ACTION_PMA_RESET = 0x0041,
+ FW_PORT_ACTION_PCS_RESET = 0x0042,
+ FW_PORT_ACTION_PHYXS_RESET = 0x0043,
+ FW_PORT_ACTION_DTEXS_REEST = 0x0044,
+ FW_PORT_ACTION_AN_RESET = 0x0045
+};
+
+enum fw_port_l2cfg_ctlbf {
+ FW_PORT_L2_CTLBF_OVLAN0 = 0x01,
+ FW_PORT_L2_CTLBF_OVLAN1 = 0x02,
+ FW_PORT_L2_CTLBF_OVLAN2 = 0x04,
+ FW_PORT_L2_CTLBF_OVLAN3 = 0x08,
+ FW_PORT_L2_CTLBF_IVLAN = 0x10,
+ FW_PORT_L2_CTLBF_TXIPG = 0x20,
+ FW_PORT_L2_CTLBF_MTU = 0x40
+};
+
+enum fw_port_dcb_type {
+ FW_PORT_DCB_TYPE_PGID = 0x00,
+ FW_PORT_DCB_TYPE_PGRATE = 0x01,
+ FW_PORT_DCB_TYPE_PRIORATE = 0x02,
+ FW_PORT_DCB_TYPE_PFC = 0x03
+};
+
+struct fw_port_cmd {
+ __be32 op_to_portid;
+ __be32 action_to_len16;
+ union fw_port {
+ struct fw_port_l1cfg {
+ __be32 rcap;
+ __be32 r;
+ } l1cfg;
+ struct fw_port_l2cfg {
+ __u8 ctlbf;
+ __u8 ovlan3_to_ivlan0;
+ __be16 ivlantype;
+ __be16 txipg_force_pinfo;
+ __be16 mtu;
+ __be16 ovlan0mask;
+ __be16 ovlan0type;
+ __be16 ovlan1mask;
+ __be16 ovlan1type;
+ __be16 ovlan2mask;
+ __be16 ovlan2type;
+ __be16 ovlan3mask;
+ __be16 ovlan3type;
+ } l2cfg;
+ struct fw_port_info {
+ __be32 lstatus_to_modtype;
+ __be16 pcap;
+ __be16 acap;
+ __be16 mtu;
+ __u8 cbllen;
+ __u8 r7;
+ __be32 r8;
+ __be64 r9;
+ } info;
+ union fw_port_dcb {
+ struct fw_port_dcb_pgid {
+ __u8 type;
+ __u8 apply_pkd;
+ __u8 r10_lo[2];
+ __be32 pgid;
+ __be64 r11;
+ } pgid;
+ struct fw_port_dcb_pgrate {
+ __u8 type;
+ __u8 apply_pkd;
+ __u8 r10_lo[6];
+ __u8 pgrate[8];
+ } pgrate;
+ struct fw_port_dcb_priorate {
+ __u8 type;
+ __u8 apply_pkd;
+ __u8 r10_lo[6];
+ __u8 strict_priorate[8];
+ } priorate;
+ struct fw_port_dcb_pfc {
+ __u8 type;
+ __u8 pfcen;
+ __be16 r10[3];
+ __be64 r11;
+ } pfc;
+ } dcb;
+ } u;
+};
+
+#define S_FW_PORT_CMD_READ 22
+#define M_FW_PORT_CMD_READ 0x1
+#define V_FW_PORT_CMD_READ(x) ((x) << S_FW_PORT_CMD_READ)
+#define G_FW_PORT_CMD_READ(x) \
+ (((x) >> S_FW_PORT_CMD_READ) & M_FW_PORT_CMD_READ)
+#define F_FW_PORT_CMD_READ V_FW_PORT_CMD_READ(1U)
+
+#define S_FW_PORT_CMD_PORTID 0
+#define M_FW_PORT_CMD_PORTID 0xf
+#define V_FW_PORT_CMD_PORTID(x) ((x) << S_FW_PORT_CMD_PORTID)
+#define G_FW_PORT_CMD_PORTID(x) \
+ (((x) >> S_FW_PORT_CMD_PORTID) & M_FW_PORT_CMD_PORTID)
+
+#define S_FW_PORT_CMD_ACTION 16
+#define M_FW_PORT_CMD_ACTION 0xffff
+#define V_FW_PORT_CMD_ACTION(x) ((x) << S_FW_PORT_CMD_ACTION)
+#define G_FW_PORT_CMD_ACTION(x) \
+ (((x) >> S_FW_PORT_CMD_ACTION) & M_FW_PORT_CMD_ACTION)
+
+#define S_FW_PORT_CMD_OVLAN3 7
+#define M_FW_PORT_CMD_OVLAN3 0x1
+#define V_FW_PORT_CMD_OVLAN3(x) ((x) << S_FW_PORT_CMD_OVLAN3)
+#define G_FW_PORT_CMD_OVLAN3(x) \
+ (((x) >> S_FW_PORT_CMD_OVLAN3) & M_FW_PORT_CMD_OVLAN3)
+#define F_FW_PORT_CMD_OVLAN3 V_FW_PORT_CMD_OVLAN3(1U)
+
+#define S_FW_PORT_CMD_OVLAN2 6
+#define M_FW_PORT_CMD_OVLAN2 0x1
+#define V_FW_PORT_CMD_OVLAN2(x) ((x) << S_FW_PORT_CMD_OVLAN2)
+#define G_FW_PORT_CMD_OVLAN2(x) \
+ (((x) >> S_FW_PORT_CMD_OVLAN2) & M_FW_PORT_CMD_OVLAN2)
+#define F_FW_PORT_CMD_OVLAN2 V_FW_PORT_CMD_OVLAN2(1U)
+
+#define S_FW_PORT_CMD_OVLAN1 5
+#define M_FW_PORT_CMD_OVLAN1 0x1
+#define V_FW_PORT_CMD_OVLAN1(x) ((x) << S_FW_PORT_CMD_OVLAN1)
+#define G_FW_PORT_CMD_OVLAN1(x) \
+ (((x) >> S_FW_PORT_CMD_OVLAN1) & M_FW_PORT_CMD_OVLAN1)
+#define F_FW_PORT_CMD_OVLAN1 V_FW_PORT_CMD_OVLAN1(1U)
+
+#define S_FW_PORT_CMD_OVLAN0 4
+#define M_FW_PORT_CMD_OVLAN0 0x1
+#define V_FW_PORT_CMD_OVLAN0(x) ((x) << S_FW_PORT_CMD_OVLAN0)
+#define G_FW_PORT_CMD_OVLAN0(x) \
+ (((x) >> S_FW_PORT_CMD_OVLAN0) & M_FW_PORT_CMD_OVLAN0)
+#define F_FW_PORT_CMD_OVLAN0 V_FW_PORT_CMD_OVLAN0(1U)
+
+#define S_FW_PORT_CMD_IVLAN0 3
+#define M_FW_PORT_CMD_IVLAN0 0x1
+#define V_FW_PORT_CMD_IVLAN0(x) ((x) << S_FW_PORT_CMD_IVLAN0)
+#define G_FW_PORT_CMD_IVLAN0(x) \
+ (((x) >> S_FW_PORT_CMD_IVLAN0) & M_FW_PORT_CMD_IVLAN0)
+#define F_FW_PORT_CMD_IVLAN0 V_FW_PORT_CMD_IVLAN0(1U)
+
+#define S_FW_PORT_CMD_TXIPG 3
+#define M_FW_PORT_CMD_TXIPG 0x1fff
+#define V_FW_PORT_CMD_TXIPG(x) ((x) << S_FW_PORT_CMD_TXIPG)
+#define G_FW_PORT_CMD_TXIPG(x) \
+ (((x) >> S_FW_PORT_CMD_TXIPG) & M_FW_PORT_CMD_TXIPG)
+
+#define S_FW_PORT_CMD_FORCE_PINFO 0
+#define M_FW_PORT_CMD_FORCE_PINFO 0x1
+#define V_FW_PORT_CMD_FORCE_PINFO(x) ((x) << S_FW_PORT_CMD_FORCE_PINFO)
+#define G_FW_PORT_CMD_FORCE_PINFO(x) \
+ (((x) >> S_FW_PORT_CMD_FORCE_PINFO) & M_FW_PORT_CMD_FORCE_PINFO)
+#define F_FW_PORT_CMD_FORCE_PINFO V_FW_PORT_CMD_FORCE_PINFO(1U)
+
+#define S_FW_PORT_CMD_LSTATUS 31
+#define M_FW_PORT_CMD_LSTATUS 0x1
+#define V_FW_PORT_CMD_LSTATUS(x) ((x) << S_FW_PORT_CMD_LSTATUS)
+#define G_FW_PORT_CMD_LSTATUS(x) \
+ (((x) >> S_FW_PORT_CMD_LSTATUS) & M_FW_PORT_CMD_LSTATUS)
+#define F_FW_PORT_CMD_LSTATUS V_FW_PORT_CMD_LSTATUS(1U)
+
+#define S_FW_PORT_CMD_LSPEED 24
+#define M_FW_PORT_CMD_LSPEED 0x3f
+#define V_FW_PORT_CMD_LSPEED(x) ((x) << S_FW_PORT_CMD_LSPEED)
+#define G_FW_PORT_CMD_LSPEED(x) \
+ (((x) >> S_FW_PORT_CMD_LSPEED) & M_FW_PORT_CMD_LSPEED)
+
+#define S_FW_PORT_CMD_TXPAUSE 23
+#define M_FW_PORT_CMD_TXPAUSE 0x1
+#define V_FW_PORT_CMD_TXPAUSE(x) ((x) << S_FW_PORT_CMD_TXPAUSE)
+#define G_FW_PORT_CMD_TXPAUSE(x) \
+ (((x) >> S_FW_PORT_CMD_TXPAUSE) & M_FW_PORT_CMD_TXPAUSE)
+#define F_FW_PORT_CMD_TXPAUSE V_FW_PORT_CMD_TXPAUSE(1U)
+
+#define S_FW_PORT_CMD_RXPAUSE 22
+#define M_FW_PORT_CMD_RXPAUSE 0x1
+#define V_FW_PORT_CMD_RXPAUSE(x) ((x) << S_FW_PORT_CMD_RXPAUSE)
+#define G_FW_PORT_CMD_RXPAUSE(x) \
+ (((x) >> S_FW_PORT_CMD_RXPAUSE) & M_FW_PORT_CMD_RXPAUSE)
+#define F_FW_PORT_CMD_RXPAUSE V_FW_PORT_CMD_RXPAUSE(1U)
+
+#define S_FW_PORT_CMD_MDIOCAP 21
+#define M_FW_PORT_CMD_MDIOCAP 0x1
+#define V_FW_PORT_CMD_MDIOCAP(x) ((x) << S_FW_PORT_CMD_MDIOCAP)
+#define G_FW_PORT_CMD_MDIOCAP(x) \
+ (((x) >> S_FW_PORT_CMD_MDIOCAP) & M_FW_PORT_CMD_MDIOCAP)
+#define F_FW_PORT_CMD_MDIOCAP V_FW_PORT_CMD_MDIOCAP(1U)
+
+#define S_FW_PORT_CMD_MDIOADDR 16
+#define M_FW_PORT_CMD_MDIOADDR 0x1f
+#define V_FW_PORT_CMD_MDIOADDR(x) ((x) << S_FW_PORT_CMD_MDIOADDR)
+#define G_FW_PORT_CMD_MDIOADDR(x) \
+ (((x) >> S_FW_PORT_CMD_MDIOADDR) & M_FW_PORT_CMD_MDIOADDR)
+
+#define S_FW_PORT_CMD_LPTXPAUSE 15
+#define M_FW_PORT_CMD_LPTXPAUSE 0x1
+#define V_FW_PORT_CMD_LPTXPAUSE(x) ((x) << S_FW_PORT_CMD_LPTXPAUSE)
+#define G_FW_PORT_CMD_LPTXPAUSE(x) \
+ (((x) >> S_FW_PORT_CMD_LPTXPAUSE) & M_FW_PORT_CMD_LPTXPAUSE)
+#define F_FW_PORT_CMD_LPTXPAUSE V_FW_PORT_CMD_LPTXPAUSE(1U)
+
+#define S_FW_PORT_CMD_LPRXPAUSE 14
+#define M_FW_PORT_CMD_LPRXPAUSE 0x1
+#define V_FW_PORT_CMD_LPRXPAUSE(x) ((x) << S_FW_PORT_CMD_LPRXPAUSE)
+#define G_FW_PORT_CMD_LPRXPAUSE(x) \
+ (((x) >> S_FW_PORT_CMD_LPRXPAUSE) & M_FW_PORT_CMD_LPRXPAUSE)
+#define F_FW_PORT_CMD_LPRXPAUSE V_FW_PORT_CMD_LPRXPAUSE(1U)
+
+#define S_FW_PORT_CMD_PTYPE 8
+#define M_FW_PORT_CMD_PTYPE 0x1f
+#define V_FW_PORT_CMD_PTYPE(x) ((x) << S_FW_PORT_CMD_PTYPE)
+#define G_FW_PORT_CMD_PTYPE(x) \
+ (((x) >> S_FW_PORT_CMD_PTYPE) & M_FW_PORT_CMD_PTYPE)
+
+#define S_FW_PORT_CMD_LINKDNRC 5
+#define M_FW_PORT_CMD_LINKDNRC 0x7
+#define V_FW_PORT_CMD_LINKDNRC(x) ((x) << S_FW_PORT_CMD_LINKDNRC)
+#define G_FW_PORT_CMD_LINKDNRC(x) \
+ (((x) >> S_FW_PORT_CMD_LINKDNRC) & M_FW_PORT_CMD_LINKDNRC)
+
+#define S_FW_PORT_CMD_MODTYPE 0
+#define M_FW_PORT_CMD_MODTYPE 0x1f
+#define V_FW_PORT_CMD_MODTYPE(x) ((x) << S_FW_PORT_CMD_MODTYPE)
+#define G_FW_PORT_CMD_MODTYPE(x) \
+ (((x) >> S_FW_PORT_CMD_MODTYPE) & M_FW_PORT_CMD_MODTYPE)
+
+#define S_FW_PORT_CMD_APPLY 7
+#define M_FW_PORT_CMD_APPLY 0x1
+#define V_FW_PORT_CMD_APPLY(x) ((x) << S_FW_PORT_CMD_APPLY)
+#define G_FW_PORT_CMD_APPLY(x) \
+ (((x) >> S_FW_PORT_CMD_APPLY) & M_FW_PORT_CMD_APPLY)
+#define F_FW_PORT_CMD_APPLY V_FW_PORT_CMD_APPLY(1U)
+
+#define S_FW_PORT_CMD_APPLY 7
+#define M_FW_PORT_CMD_APPLY 0x1
+#define V_FW_PORT_CMD_APPLY(x) ((x) << S_FW_PORT_CMD_APPLY)
+#define G_FW_PORT_CMD_APPLY(x) \
+ (((x) >> S_FW_PORT_CMD_APPLY) & M_FW_PORT_CMD_APPLY)
+#define F_FW_PORT_CMD_APPLY V_FW_PORT_CMD_APPLY(1U)
+
+#define S_FW_PORT_CMD_APPLY 7
+#define M_FW_PORT_CMD_APPLY 0x1
+#define V_FW_PORT_CMD_APPLY(x) ((x) << S_FW_PORT_CMD_APPLY)
+#define G_FW_PORT_CMD_APPLY(x) \
+ (((x) >> S_FW_PORT_CMD_APPLY) & M_FW_PORT_CMD_APPLY)
+#define F_FW_PORT_CMD_APPLY V_FW_PORT_CMD_APPLY(1U)
+
+/*
+ * These are configured into the VPD and hence tools that generate
+ * VPD may use this enumeration.
+ * extPHY #lanes T4_I2C extI2C BP_Eq BP_ANEG Speed
+ */
+enum fw_port_type {
+ FW_PORT_TYPE_FIBER_XFI = 0, /* Y, 1, N, Y, N, N, 10G */
+ FW_PORT_TYPE_FIBER_XAUI = 1, /* Y, 4, N, Y, N, N, 10G */
+ FW_PORT_TYPE_BT_SGMII = 2, /* Y, 1, No, No, No, No, 1G/100M */
+ FW_PORT_TYPE_BT_XFI = 3, /* Y, 1, No, No, No, No, 10G */
+ FW_PORT_TYPE_BT_XAUI = 4, /* Y, 4, No, No, No, No, 10G/1G/100M? */
+ FW_PORT_TYPE_KX4 = 5, /* No, 4, No, No, Yes, Yes, 10G */
+ FW_PORT_TYPE_CX4 = 6, /* No, 4, No, No, No, No, 10G */
+ FW_PORT_TYPE_KX = 7, /* No, 1, No, No, Yes, No, 1G */
+ FW_PORT_TYPE_KR = 8, /* No, 1, No, No, Yes, Yes, 10G */
+ FW_PORT_TYPE_SFP = 9, /* No, 1, Yes, No, No, No, 10G */
+ FW_PORT_TYPE_BP_AP = 10, /* No, 1, No, No, Yes, Yes, 10G, BP ANGE */
+ FW_PORT_TYPE_BP4_AP = 11, /* No, 4, No, No, Yes, Yes, 10G, BP ANGE */
+
+ FW_PORT_TYPE_NONE = M_FW_PORT_CMD_PTYPE
+};
+
+/* These are read from module's EEPROM and determined once the
+ module is inserted. */
+enum fw_port_module_type {
+ FW_PORT_MOD_TYPE_NA,
+ FW_PORT_MOD_TYPE_LR = 0x1,
+ FW_PORT_MOD_TYPE_SR = 0x2,
+ FW_PORT_MOD_TYPE_ER = 0x3,
+ FW_PORT_MOD_TYPE_TWINAX_PASSIVE = 0x4,
+ FW_PORT_MOD_TYPE_TWINAX_ACTIVE = 0x5,
+
+ FW_PORT_MOD_TYPE_LRM = 0x6,
+
+ FW_PORT_MOD_TYPE_NONE = M_FW_PORT_CMD_MODTYPE
+};
+
+/* used by FW and tools may use this to generate VPD */
+enum fw_port_mod_sub_type {
+ FW_PORT_MOD_SUB_TYPE_NA,
+ FW_PORT_MOD_SUB_TYPE_MV88E114X=0x1,
+ FW_PORT_MOD_SUB_TYPE_BT_VSC8634=0x8,
+
+ /*
+ * The following will never been in the VPD. They are TWINAX cable
+ * lengths decoded from SFP+ module i2c PROMs. These should almost
+ * certainly go somewhere else ...
+ */
+ FW_PORT_MOD_SUB_TYPE_TWINAX_1=0x9,
+ FW_PORT_MOD_SUB_TYPE_TWINAX_3=0xA,
+ FW_PORT_MOD_SUB_TYPE_TWINAX_5=0xB,
+ FW_PORT_MOD_SUB_TYPE_TWINAX_7=0xC,
+};
+
+/* link down reason codes (3b) */
+enum fw_port_link_dn_rc {
+ FW_PORT_LINK_DN_RC_NONE,
+ FW_PORT_LINK_DN_RC_REMFLT,
+ FW_PORT_LINK_DN_ANEG_F,
+ FW_PORT_LINK_DN_MS_RES_F,
+ FW_PORT_LINK_DN_UNKNOWN
+};
+
+/* port stats */
+#define FW_NUM_PORT_STATS 50
+#define FW_NUM_PORT_TX_STATS 23
+#define FW_NUM_PORT_RX_STATS 27
+
+enum fw_port_stats_tx_index {
+ FW_STAT_TX_PORT_BYTES_IX,
+ FW_STAT_TX_PORT_FRAMES_IX,
+ FW_STAT_TX_PORT_BCAST_IX,
+ FW_STAT_TX_PORT_MCAST_IX,
+ FW_STAT_TX_PORT_UCAST_IX,
+ FW_STAT_TX_PORT_ERROR_IX,
+ FW_STAT_TX_PORT_64B_IX,
+ FW_STAT_TX_PORT_65B_127B_IX,
+ FW_STAT_TX_PORT_128B_255B_IX,
+ FW_STAT_TX_PORT_256B_511B_IX,
+ FW_STAT_TX_PORT_512B_1023B_IX,
+ FW_STAT_TX_PORT_1024B_1518B_IX,
+ FW_STAT_TX_PORT_1519B_MAX_IX,
+ FW_STAT_TX_PORT_DROP_IX,
+ FW_STAT_TX_PORT_PAUSE_IX,
+ FW_STAT_TX_PORT_PPP0_IX,
+ FW_STAT_TX_PORT_PPP1_IX,
+ FW_STAT_TX_PORT_PPP2_IX,
+ FW_STAT_TX_PORT_PPP3_IX,
+ FW_STAT_TX_PORT_PPP4_IX,
+ FW_STAT_TX_PORT_PPP5_IX,
+ FW_STAT_TX_PORT_PPP6_IX,
+ FW_STAT_TX_PORT_PPP7_IX
+};
+
+enum fw_port_stat_rx_index {
+ FW_STAT_RX_PORT_BYTES_IX,
+ FW_STAT_RX_PORT_FRAMES_IX,
+ FW_STAT_RX_PORT_BCAST_IX,
+ FW_STAT_RX_PORT_MCAST_IX,
+ FW_STAT_RX_PORT_UCAST_IX,
+ FW_STAT_RX_PORT_MTU_ERROR_IX,
+ FW_STAT_RX_PORT_MTU_CRC_ERROR_IX,
+ FW_STAT_RX_PORT_CRC_ERROR_IX,
+ FW_STAT_RX_PORT_LEN_ERROR_IX,
+ FW_STAT_RX_PORT_SYM_ERROR_IX,
+ FW_STAT_RX_PORT_64B_IX,
+ FW_STAT_RX_PORT_65B_127B_IX,
+ FW_STAT_RX_PORT_128B_255B_IX,
+ FW_STAT_RX_PORT_256B_511B_IX,
+ FW_STAT_RX_PORT_512B_1023B_IX,
+ FW_STAT_RX_PORT_1024B_1518B_IX,
+ FW_STAT_RX_PORT_1519B_MAX_IX,
+ FW_STAT_RX_PORT_PAUSE_IX,
+ FW_STAT_RX_PORT_PPP0_IX,
+ FW_STAT_RX_PORT_PPP1_IX,
+ FW_STAT_RX_PORT_PPP2_IX,
+ FW_STAT_RX_PORT_PPP3_IX,
+ FW_STAT_RX_PORT_PPP4_IX,
+ FW_STAT_RX_PORT_PPP5_IX,
+ FW_STAT_RX_PORT_PPP6_IX,
+ FW_STAT_RX_PORT_PPP7_IX,
+ FW_STAT_RX_PORT_LESS_64B_IX
+};
+
+struct fw_port_stats_cmd {
+ __be32 op_to_portid;
+ __be32 retval_len16;
+ union fw_port_stats {
+ struct fw_port_stats_ctl {
+ __u8 nstats_bg_bm;
+ __u8 tx_ix;
+ __be16 r6;
+ __be32 r7;
+ __be64 stat0;
+ __be64 stat1;
+ __be64 stat2;
+ __be64 stat3;
+ __be64 stat4;
+ __be64 stat5;
+ } ctl;
+ struct fw_port_stats_all {
+ __be64 tx_bytes;
+ __be64 tx_frames;
+ __be64 tx_bcast;
+ __be64 tx_mcast;
+ __be64 tx_ucast;
+ __be64 tx_error;
+ __be64 tx_64b;
+ __be64 tx_65b_127b;
+ __be64 tx_128b_255b;
+ __be64 tx_256b_511b;
+ __be64 tx_512b_1023b;
+ __be64 tx_1024b_1518b;
+ __be64 tx_1519b_max;
+ __be64 tx_drop;
+ __be64 tx_pause;
+ __be64 tx_ppp0;
+ __be64 tx_ppp1;
+ __be64 tx_ppp2;
+ __be64 tx_ppp3;
+ __be64 tx_ppp4;
+ __be64 tx_ppp5;
+ __be64 tx_ppp6;
+ __be64 tx_ppp7;
+ __be64 rx_bytes;
+ __be64 rx_frames;
+ __be64 rx_bcast;
+ __be64 rx_mcast;
+ __be64 rx_ucast;
+ __be64 rx_mtu_error;
+ __be64 rx_mtu_crc_error;
+ __be64 rx_crc_error;
+ __be64 rx_len_error;
+ __be64 rx_sym_error;
+ __be64 rx_64b;
+ __be64 rx_65b_127b;
+ __be64 rx_128b_255b;
+ __be64 rx_256b_511b;
+ __be64 rx_512b_1023b;
+ __be64 rx_1024b_1518b;
+ __be64 rx_1519b_max;
+ __be64 rx_pause;
+ __be64 rx_ppp0;
+ __be64 rx_ppp1;
+ __be64 rx_ppp2;
+ __be64 rx_ppp3;
+ __be64 rx_ppp4;
+ __be64 rx_ppp5;
+ __be64 rx_ppp6;
+ __be64 rx_ppp7;
+ __be64 rx_less_64b;
+ __be64 rx_bg_drop;
+ __be64 rx_bg_trunc;
+ } all;
+ } u;
+};
+
+#define S_FW_PORT_STATS_CMD_NSTATS 4
+#define M_FW_PORT_STATS_CMD_NSTATS 0x7
+#define V_FW_PORT_STATS_CMD_NSTATS(x) ((x) << S_FW_PORT_STATS_CMD_NSTATS)
+#define G_FW_PORT_STATS_CMD_NSTATS(x) \
+ (((x) >> S_FW_PORT_STATS_CMD_NSTATS) & M_FW_PORT_STATS_CMD_NSTATS)
+
+#define S_FW_PORT_STATS_CMD_BG_BM 0
+#define M_FW_PORT_STATS_CMD_BG_BM 0x3
+#define V_FW_PORT_STATS_CMD_BG_BM(x) ((x) << S_FW_PORT_STATS_CMD_BG_BM)
+#define G_FW_PORT_STATS_CMD_BG_BM(x) \
+ (((x) >> S_FW_PORT_STATS_CMD_BG_BM) & M_FW_PORT_STATS_CMD_BG_BM)
+
+#define S_FW_PORT_STATS_CMD_TX 7
+#define M_FW_PORT_STATS_CMD_TX 0x1
+#define V_FW_PORT_STATS_CMD_TX(x) ((x) << S_FW_PORT_STATS_CMD_TX)
+#define G_FW_PORT_STATS_CMD_TX(x) \
+ (((x) >> S_FW_PORT_STATS_CMD_TX) & M_FW_PORT_STATS_CMD_TX)
+#define F_FW_PORT_STATS_CMD_TX V_FW_PORT_STATS_CMD_TX(1U)
+
+#define S_FW_PORT_STATS_CMD_IX 0
+#define M_FW_PORT_STATS_CMD_IX 0x3f
+#define V_FW_PORT_STATS_CMD_IX(x) ((x) << S_FW_PORT_STATS_CMD_IX)
+#define G_FW_PORT_STATS_CMD_IX(x) \
+ (((x) >> S_FW_PORT_STATS_CMD_IX) & M_FW_PORT_STATS_CMD_IX)
+
+/* port loopback stats */
+#define FW_NUM_LB_STATS 14
+enum fw_port_lb_stats_index {
+ FW_STAT_LB_PORT_BYTES_IX,
+ FW_STAT_LB_PORT_FRAMES_IX,
+ FW_STAT_LB_PORT_BCAST_IX,
+ FW_STAT_LB_PORT_MCAST_IX,
+ FW_STAT_LB_PORT_UCAST_IX,
+ FW_STAT_LB_PORT_ERROR_IX,
+ FW_STAT_LB_PORT_64B_IX,
+ FW_STAT_LB_PORT_65B_127B_IX,
+ FW_STAT_LB_PORT_128B_255B_IX,
+ FW_STAT_LB_PORT_256B_511B_IX,
+ FW_STAT_LB_PORT_512B_1023B_IX,
+ FW_STAT_LB_PORT_1024B_1518B_IX,
+ FW_STAT_LB_PORT_1519B_MAX_IX,
+ FW_STAT_LB_PORT_DROP_FRAMES_IX
+};
+
+struct fw_port_lb_stats_cmd {
+ __be32 op_to_lbport;
+ __be32 retval_len16;
+ union fw_port_lb_stats {
+ struct fw_port_lb_stats_ctl {
+ __u8 nstats_bg_bm;
+ __u8 ix_pkd;
+ __be16 r6;
+ __be32 r7;
+ __be64 stat0;
+ __be64 stat1;
+ __be64 stat2;
+ __be64 stat3;
+ __be64 stat4;
+ __be64 stat5;
+ } ctl;
+ struct fw_port_lb_stats_all {
+ __be64 tx_bytes;
+ __be64 tx_frames;
+ __be64 tx_bcast;
+ __be64 tx_mcast;
+ __be64 tx_ucast;
+ __be64 tx_error;
+ __be64 tx_64b;
+ __be64 tx_65b_127b;
+ __be64 tx_128b_255b;
+ __be64 tx_256b_511b;
+ __be64 tx_512b_1023b;
+ __be64 tx_1024b_1518b;
+ __be64 tx_1519b_max;
+ __be64 rx_lb_drop;
+ __be64 rx_lb_trunc;
+ } all;
+ } u;
+};
+
+#define S_FW_PORT_LB_STATS_CMD_LBPORT 0
+#define M_FW_PORT_LB_STATS_CMD_LBPORT 0xf
+#define V_FW_PORT_LB_STATS_CMD_LBPORT(x) \
+ ((x) << S_FW_PORT_LB_STATS_CMD_LBPORT)
+#define G_FW_PORT_LB_STATS_CMD_LBPORT(x) \
+ (((x) >> S_FW_PORT_LB_STATS_CMD_LBPORT) & M_FW_PORT_LB_STATS_CMD_LBPORT)
+
+#define S_FW_PORT_LB_STATS_CMD_NSTATS 4
+#define M_FW_PORT_LB_STATS_CMD_NSTATS 0x7
+#define V_FW_PORT_LB_STATS_CMD_NSTATS(x) \
+ ((x) << S_FW_PORT_LB_STATS_CMD_NSTATS)
+#define G_FW_PORT_LB_STATS_CMD_NSTATS(x) \
+ (((x) >> S_FW_PORT_LB_STATS_CMD_NSTATS) & M_FW_PORT_LB_STATS_CMD_NSTATS)
+
+#define S_FW_PORT_LB_STATS_CMD_BG_BM 0
+#define M_FW_PORT_LB_STATS_CMD_BG_BM 0x3
+#define V_FW_PORT_LB_STATS_CMD_BG_BM(x) ((x) << S_FW_PORT_LB_STATS_CMD_BG_BM)
+#define G_FW_PORT_LB_STATS_CMD_BG_BM(x) \
+ (((x) >> S_FW_PORT_LB_STATS_CMD_BG_BM) & M_FW_PORT_LB_STATS_CMD_BG_BM)
+
+#define S_FW_PORT_LB_STATS_CMD_IX 0
+#define M_FW_PORT_LB_STATS_CMD_IX 0xf
+#define V_FW_PORT_LB_STATS_CMD_IX(x) ((x) << S_FW_PORT_LB_STATS_CMD_IX)
+#define G_FW_PORT_LB_STATS_CMD_IX(x) \
+ (((x) >> S_FW_PORT_LB_STATS_CMD_IX) & M_FW_PORT_LB_STATS_CMD_IX)
+
+/* Trace related defines */
+#define FW_TRACE_CAPTURE_MAX_SINGLE_FLT_MODE 10240
+#define FW_TRACE_CAPTURE_MAX_MULTI_FLT_MODE 2560
+
+struct fw_port_trace_cmd {
+ __be32 op_to_portid;
+ __be32 retval_len16;
+ __be16 traceen_to_pciech;
+ __be16 qnum;
+ __be32 r5;
+};
+
+#define S_FW_PORT_TRACE_CMD_PORTID 0
+#define M_FW_PORT_TRACE_CMD_PORTID 0xf
+#define V_FW_PORT_TRACE_CMD_PORTID(x) ((x) << S_FW_PORT_TRACE_CMD_PORTID)
+#define G_FW_PORT_TRACE_CMD_PORTID(x) \
+ (((x) >> S_FW_PORT_TRACE_CMD_PORTID) & M_FW_PORT_TRACE_CMD_PORTID)
+
+#define S_FW_PORT_TRACE_CMD_TRACEEN 15
+#define M_FW_PORT_TRACE_CMD_TRACEEN 0x1
+#define V_FW_PORT_TRACE_CMD_TRACEEN(x) ((x) << S_FW_PORT_TRACE_CMD_TRACEEN)
+#define G_FW_PORT_TRACE_CMD_TRACEEN(x) \
+ (((x) >> S_FW_PORT_TRACE_CMD_TRACEEN) & M_FW_PORT_TRACE_CMD_TRACEEN)
+#define F_FW_PORT_TRACE_CMD_TRACEEN V_FW_PORT_TRACE_CMD_TRACEEN(1U)
+
+#define S_FW_PORT_TRACE_CMD_FLTMODE 14
+#define M_FW_PORT_TRACE_CMD_FLTMODE 0x1
+#define V_FW_PORT_TRACE_CMD_FLTMODE(x) ((x) << S_FW_PORT_TRACE_CMD_FLTMODE)
+#define G_FW_PORT_TRACE_CMD_FLTMODE(x) \
+ (((x) >> S_FW_PORT_TRACE_CMD_FLTMODE) & M_FW_PORT_TRACE_CMD_FLTMODE)
+#define F_FW_PORT_TRACE_CMD_FLTMODE V_FW_PORT_TRACE_CMD_FLTMODE(1U)
+
+#define S_FW_PORT_TRACE_CMD_DUPLEN 13
+#define M_FW_PORT_TRACE_CMD_DUPLEN 0x1
+#define V_FW_PORT_TRACE_CMD_DUPLEN(x) ((x) << S_FW_PORT_TRACE_CMD_DUPLEN)
+#define G_FW_PORT_TRACE_CMD_DUPLEN(x) \
+ (((x) >> S_FW_PORT_TRACE_CMD_DUPLEN) & M_FW_PORT_TRACE_CMD_DUPLEN)
+#define F_FW_PORT_TRACE_CMD_DUPLEN V_FW_PORT_TRACE_CMD_DUPLEN(1U)
+
+#define S_FW_PORT_TRACE_CMD_RUNTFLTSIZE 8
+#define M_FW_PORT_TRACE_CMD_RUNTFLTSIZE 0x1f
+#define V_FW_PORT_TRACE_CMD_RUNTFLTSIZE(x) \
+ ((x) << S_FW_PORT_TRACE_CMD_RUNTFLTSIZE)
+#define G_FW_PORT_TRACE_CMD_RUNTFLTSIZE(x) \
+ (((x) >> S_FW_PORT_TRACE_CMD_RUNTFLTSIZE) & \
+ M_FW_PORT_TRACE_CMD_RUNTFLTSIZE)
+
+#define S_FW_PORT_TRACE_CMD_PCIECH 6
+#define M_FW_PORT_TRACE_CMD_PCIECH 0x3
+#define V_FW_PORT_TRACE_CMD_PCIECH(x) ((x) << S_FW_PORT_TRACE_CMD_PCIECH)
+#define G_FW_PORT_TRACE_CMD_PCIECH(x) \
+ (((x) >> S_FW_PORT_TRACE_CMD_PCIECH) & M_FW_PORT_TRACE_CMD_PCIECH)
+
+struct fw_port_trace_mmap_cmd {
+ __be32 op_to_portid;
+ __be32 retval_len16;
+ __be32 fid_to_skipoffset;
+ __be32 minpktsize_capturemax;
+ __u8 map[224];
+};
+
+#define S_FW_PORT_TRACE_MMAP_CMD_PORTID 0
+#define M_FW_PORT_TRACE_MMAP_CMD_PORTID 0xf
+#define V_FW_PORT_TRACE_MMAP_CMD_PORTID(x) \
+ ((x) << S_FW_PORT_TRACE_MMAP_CMD_PORTID)
+#define G_FW_PORT_TRACE_MMAP_CMD_PORTID(x) \
+ (((x) >> S_FW_PORT_TRACE_MMAP_CMD_PORTID) & \
+ M_FW_PORT_TRACE_MMAP_CMD_PORTID)
+
+#define S_FW_PORT_TRACE_MMAP_CMD_FID 30
+#define M_FW_PORT_TRACE_MMAP_CMD_FID 0x3
+#define V_FW_PORT_TRACE_MMAP_CMD_FID(x) ((x) << S_FW_PORT_TRACE_MMAP_CMD_FID)
+#define G_FW_PORT_TRACE_MMAP_CMD_FID(x) \
+ (((x) >> S_FW_PORT_TRACE_MMAP_CMD_FID) & M_FW_PORT_TRACE_MMAP_CMD_FID)
+
+#define S_FW_PORT_TRACE_MMAP_CMD_MMAPEN 29
+#define M_FW_PORT_TRACE_MMAP_CMD_MMAPEN 0x1
+#define V_FW_PORT_TRACE_MMAP_CMD_MMAPEN(x) \
+ ((x) << S_FW_PORT_TRACE_MMAP_CMD_MMAPEN)
+#define G_FW_PORT_TRACE_MMAP_CMD_MMAPEN(x) \
+ (((x) >> S_FW_PORT_TRACE_MMAP_CMD_MMAPEN) & \
+ M_FW_PORT_TRACE_MMAP_CMD_MMAPEN)
+#define F_FW_PORT_TRACE_MMAP_CMD_MMAPEN V_FW_PORT_TRACE_MMAP_CMD_MMAPEN(1U)
+
+#define S_FW_PORT_TRACE_MMAP_CMD_DCMAPEN 28
+#define M_FW_PORT_TRACE_MMAP_CMD_DCMAPEN 0x1
+#define V_FW_PORT_TRACE_MMAP_CMD_DCMAPEN(x) \
+ ((x) << S_FW_PORT_TRACE_MMAP_CMD_DCMAPEN)
+#define G_FW_PORT_TRACE_MMAP_CMD_DCMAPEN(x) \
+ (((x) >> S_FW_PORT_TRACE_MMAP_CMD_DCMAPEN) & \
+ M_FW_PORT_TRACE_MMAP_CMD_DCMAPEN)
+#define F_FW_PORT_TRACE_MMAP_CMD_DCMAPEN \
+ V_FW_PORT_TRACE_MMAP_CMD_DCMAPEN(1U)
+
+#define S_FW_PORT_TRACE_MMAP_CMD_SKIPLENGTH 8
+#define M_FW_PORT_TRACE_MMAP_CMD_SKIPLENGTH 0x1f
+#define V_FW_PORT_TRACE_MMAP_CMD_SKIPLENGTH(x) \
+ ((x) << S_FW_PORT_TRACE_MMAP_CMD_SKIPLENGTH)
+#define G_FW_PORT_TRACE_MMAP_CMD_SKIPLENGTH(x) \
+ (((x) >> S_FW_PORT_TRACE_MMAP_CMD_SKIPLENGTH) & \
+ M_FW_PORT_TRACE_MMAP_CMD_SKIPLENGTH)
+
+#define S_FW_PORT_TRACE_MMAP_CMD_SKIPOFFSET 0
+#define M_FW_PORT_TRACE_MMAP_CMD_SKIPOFFSET 0x1f
+#define V_FW_PORT_TRACE_MMAP_CMD_SKIPOFFSET(x) \
+ ((x) << S_FW_PORT_TRACE_MMAP_CMD_SKIPOFFSET)
+#define G_FW_PORT_TRACE_MMAP_CMD_SKIPOFFSET(x) \
+ (((x) >> S_FW_PORT_TRACE_MMAP_CMD_SKIPOFFSET) & \
+ M_FW_PORT_TRACE_MMAP_CMD_SKIPOFFSET)
+
+#define S_FW_PORT_TRACE_MMAP_CMD_MINPKTSIZE 18
+#define M_FW_PORT_TRACE_MMAP_CMD_MINPKTSIZE 0x3fff
+#define V_FW_PORT_TRACE_MMAP_CMD_MINPKTSIZE(x) \
+ ((x) << S_FW_PORT_TRACE_MMAP_CMD_MINPKTSIZE)
+#define G_FW_PORT_TRACE_MMAP_CMD_MINPKTSIZE(x) \
+ (((x) >> S_FW_PORT_TRACE_MMAP_CMD_MINPKTSIZE) & \
+ M_FW_PORT_TRACE_MMAP_CMD_MINPKTSIZE)
+
+#define S_FW_PORT_TRACE_MMAP_CMD_CAPTUREMAX 0
+#define M_FW_PORT_TRACE_MMAP_CMD_CAPTUREMAX 0x3fff
+#define V_FW_PORT_TRACE_MMAP_CMD_CAPTUREMAX(x) \
+ ((x) << S_FW_PORT_TRACE_MMAP_CMD_CAPTUREMAX)
+#define G_FW_PORT_TRACE_MMAP_CMD_CAPTUREMAX(x) \
+ (((x) >> S_FW_PORT_TRACE_MMAP_CMD_CAPTUREMAX) & \
+ M_FW_PORT_TRACE_MMAP_CMD_CAPTUREMAX)
+
+struct fw_rss_ind_tbl_cmd {
+ __be32 op_to_viid;
+ __be32 retval_len16;
+ __be16 niqid;
+ __be16 startidx;
+ __be32 r3;
+ __be32 iq0_to_iq2;
+ __be32 iq3_to_iq5;
+ __be32 iq6_to_iq8;
+ __be32 iq9_to_iq11;
+ __be32 iq12_to_iq14;
+ __be32 iq15_to_iq17;
+ __be32 iq18_to_iq20;
+ __be32 iq21_to_iq23;
+ __be32 iq24_to_iq26;
+ __be32 iq27_to_iq29;
+ __be32 iq30_iq31;
+ __be32 r15_lo;
+};
+
+#define S_FW_RSS_IND_TBL_CMD_VIID 0
+#define M_FW_RSS_IND_TBL_CMD_VIID 0xfff
+#define V_FW_RSS_IND_TBL_CMD_VIID(x) ((x) << S_FW_RSS_IND_TBL_CMD_VIID)
+#define G_FW_RSS_IND_TBL_CMD_VIID(x) \
+ (((x) >> S_FW_RSS_IND_TBL_CMD_VIID) & M_FW_RSS_IND_TBL_CMD_VIID)
+
+#define S_FW_RSS_IND_TBL_CMD_IQ0 20
+#define M_FW_RSS_IND_TBL_CMD_IQ0 0x3ff
+#define V_FW_RSS_IND_TBL_CMD_IQ0(x) ((x) << S_FW_RSS_IND_TBL_CMD_IQ0)
+#define G_FW_RSS_IND_TBL_CMD_IQ0(x) \
+ (((x) >> S_FW_RSS_IND_TBL_CMD_IQ0) & M_FW_RSS_IND_TBL_CMD_IQ0)
+
+#define S_FW_RSS_IND_TBL_CMD_IQ1 10
+#define M_FW_RSS_IND_TBL_CMD_IQ1 0x3ff
+#define V_FW_RSS_IND_TBL_CMD_IQ1(x) ((x) << S_FW_RSS_IND_TBL_CMD_IQ1)
+#define G_FW_RSS_IND_TBL_CMD_IQ1(x) \
+ (((x) >> S_FW_RSS_IND_TBL_CMD_IQ1) & M_FW_RSS_IND_TBL_CMD_IQ1)
+
+#define S_FW_RSS_IND_TBL_CMD_IQ2 0
+#define M_FW_RSS_IND_TBL_CMD_IQ2 0x3ff
+#define V_FW_RSS_IND_TBL_CMD_IQ2(x) ((x) << S_FW_RSS_IND_TBL_CMD_IQ2)
+#define G_FW_RSS_IND_TBL_CMD_IQ2(x) \
+ (((x) >> S_FW_RSS_IND_TBL_CMD_IQ2) & M_FW_RSS_IND_TBL_CMD_IQ2)
+
+#define S_FW_RSS_IND_TBL_CMD_IQ3 20
+#define M_FW_RSS_IND_TBL_CMD_IQ3 0x3ff
+#define V_FW_RSS_IND_TBL_CMD_IQ3(x) ((x) << S_FW_RSS_IND_TBL_CMD_IQ3)
+#define G_FW_RSS_IND_TBL_CMD_IQ3(x) \
+ (((x) >> S_FW_RSS_IND_TBL_CMD_IQ3) & M_FW_RSS_IND_TBL_CMD_IQ3)
+
+#define S_FW_RSS_IND_TBL_CMD_IQ4 10
+#define M_FW_RSS_IND_TBL_CMD_IQ4 0x3ff
+#define V_FW_RSS_IND_TBL_CMD_IQ4(x) ((x) << S_FW_RSS_IND_TBL_CMD_IQ4)
+#define G_FW_RSS_IND_TBL_CMD_IQ4(x) \
+ (((x) >> S_FW_RSS_IND_TBL_CMD_IQ4) & M_FW_RSS_IND_TBL_CMD_IQ4)
+
+#define S_FW_RSS_IND_TBL_CMD_IQ5 0
+#define M_FW_RSS_IND_TBL_CMD_IQ5 0x3ff
+#define V_FW_RSS_IND_TBL_CMD_IQ5(x) ((x) << S_FW_RSS_IND_TBL_CMD_IQ5)
+#define G_FW_RSS_IND_TBL_CMD_IQ5(x) \
+ (((x) >> S_FW_RSS_IND_TBL_CMD_IQ5) & M_FW_RSS_IND_TBL_CMD_IQ5)
+
+#define S_FW_RSS_IND_TBL_CMD_IQ6 20
+#define M_FW_RSS_IND_TBL_CMD_IQ6 0x3ff
+#define V_FW_RSS_IND_TBL_CMD_IQ6(x) ((x) << S_FW_RSS_IND_TBL_CMD_IQ6)
+#define G_FW_RSS_IND_TBL_CMD_IQ6(x) \
+ (((x) >> S_FW_RSS_IND_TBL_CMD_IQ6) & M_FW_RSS_IND_TBL_CMD_IQ6)
+
+#define S_FW_RSS_IND_TBL_CMD_IQ7 10
+#define M_FW_RSS_IND_TBL_CMD_IQ7 0x3ff
+#define V_FW_RSS_IND_TBL_CMD_IQ7(x) ((x) << S_FW_RSS_IND_TBL_CMD_IQ7)
+#define G_FW_RSS_IND_TBL_CMD_IQ7(x) \
+ (((x) >> S_FW_RSS_IND_TBL_CMD_IQ7) & M_FW_RSS_IND_TBL_CMD_IQ7)
+
+#define S_FW_RSS_IND_TBL_CMD_IQ8 0
+#define M_FW_RSS_IND_TBL_CMD_IQ8 0x3ff
+#define V_FW_RSS_IND_TBL_CMD_IQ8(x) ((x) << S_FW_RSS_IND_TBL_CMD_IQ8)
+#define G_FW_RSS_IND_TBL_CMD_IQ8(x) \
+ (((x) >> S_FW_RSS_IND_TBL_CMD_IQ8) & M_FW_RSS_IND_TBL_CMD_IQ8)
+
+#define S_FW_RSS_IND_TBL_CMD_IQ9 20
+#define M_FW_RSS_IND_TBL_CMD_IQ9 0x3ff
+#define V_FW_RSS_IND_TBL_CMD_IQ9(x) ((x) << S_FW_RSS_IND_TBL_CMD_IQ9)
+#define G_FW_RSS_IND_TBL_CMD_IQ9(x) \
+ (((x) >> S_FW_RSS_IND_TBL_CMD_IQ9) & M_FW_RSS_IND_TBL_CMD_IQ9)
+
+#define S_FW_RSS_IND_TBL_CMD_IQ10 10
+#define M_FW_RSS_IND_TBL_CMD_IQ10 0x3ff
+#define V_FW_RSS_IND_TBL_CMD_IQ10(x) ((x) << S_FW_RSS_IND_TBL_CMD_IQ10)
+#define G_FW_RSS_IND_TBL_CMD_IQ10(x) \
+ (((x) >> S_FW_RSS_IND_TBL_CMD_IQ10) & M_FW_RSS_IND_TBL_CMD_IQ10)
+
+#define S_FW_RSS_IND_TBL_CMD_IQ11 0
+#define M_FW_RSS_IND_TBL_CMD_IQ11 0x3ff
+#define V_FW_RSS_IND_TBL_CMD_IQ11(x) ((x) << S_FW_RSS_IND_TBL_CMD_IQ11)
+#define G_FW_RSS_IND_TBL_CMD_IQ11(x) \
+ (((x) >> S_FW_RSS_IND_TBL_CMD_IQ11) & M_FW_RSS_IND_TBL_CMD_IQ11)
+
+#define S_FW_RSS_IND_TBL_CMD_IQ12 20
+#define M_FW_RSS_IND_TBL_CMD_IQ12 0x3ff
+#define V_FW_RSS_IND_TBL_CMD_IQ12(x) ((x) << S_FW_RSS_IND_TBL_CMD_IQ12)
+#define G_FW_RSS_IND_TBL_CMD_IQ12(x) \
+ (((x) >> S_FW_RSS_IND_TBL_CMD_IQ12) & M_FW_RSS_IND_TBL_CMD_IQ12)
+
+#define S_FW_RSS_IND_TBL_CMD_IQ13 10
+#define M_FW_RSS_IND_TBL_CMD_IQ13 0x3ff
+#define V_FW_RSS_IND_TBL_CMD_IQ13(x) ((x) << S_FW_RSS_IND_TBL_CMD_IQ13)
+#define G_FW_RSS_IND_TBL_CMD_IQ13(x) \
+ (((x) >> S_FW_RSS_IND_TBL_CMD_IQ13) & M_FW_RSS_IND_TBL_CMD_IQ13)
+
+#define S_FW_RSS_IND_TBL_CMD_IQ14 0
+#define M_FW_RSS_IND_TBL_CMD_IQ14 0x3ff
+#define V_FW_RSS_IND_TBL_CMD_IQ14(x) ((x) << S_FW_RSS_IND_TBL_CMD_IQ14)
+#define G_FW_RSS_IND_TBL_CMD_IQ14(x) \
+ (((x) >> S_FW_RSS_IND_TBL_CMD_IQ14) & M_FW_RSS_IND_TBL_CMD_IQ14)
+
+#define S_FW_RSS_IND_TBL_CMD_IQ15 20
+#define M_FW_RSS_IND_TBL_CMD_IQ15 0x3ff
+#define V_FW_RSS_IND_TBL_CMD_IQ15(x) ((x) << S_FW_RSS_IND_TBL_CMD_IQ15)
+#define G_FW_RSS_IND_TBL_CMD_IQ15(x) \
+ (((x) >> S_FW_RSS_IND_TBL_CMD_IQ15) & M_FW_RSS_IND_TBL_CMD_IQ15)
+
+#define S_FW_RSS_IND_TBL_CMD_IQ16 10
+#define M_FW_RSS_IND_TBL_CMD_IQ16 0x3ff
+#define V_FW_RSS_IND_TBL_CMD_IQ16(x) ((x) << S_FW_RSS_IND_TBL_CMD_IQ16)
+#define G_FW_RSS_IND_TBL_CMD_IQ16(x) \
+ (((x) >> S_FW_RSS_IND_TBL_CMD_IQ16) & M_FW_RSS_IND_TBL_CMD_IQ16)
+
+#define S_FW_RSS_IND_TBL_CMD_IQ17 0
+#define M_FW_RSS_IND_TBL_CMD_IQ17 0x3ff
+#define V_FW_RSS_IND_TBL_CMD_IQ17(x) ((x) << S_FW_RSS_IND_TBL_CMD_IQ17)
+#define G_FW_RSS_IND_TBL_CMD_IQ17(x) \
+ (((x) >> S_FW_RSS_IND_TBL_CMD_IQ17) & M_FW_RSS_IND_TBL_CMD_IQ17)
+
+#define S_FW_RSS_IND_TBL_CMD_IQ18 20
+#define M_FW_RSS_IND_TBL_CMD_IQ18 0x3ff
+#define V_FW_RSS_IND_TBL_CMD_IQ18(x) ((x) << S_FW_RSS_IND_TBL_CMD_IQ18)
+#define G_FW_RSS_IND_TBL_CMD_IQ18(x) \
+ (((x) >> S_FW_RSS_IND_TBL_CMD_IQ18) & M_FW_RSS_IND_TBL_CMD_IQ18)
+
+#define S_FW_RSS_IND_TBL_CMD_IQ19 10
+#define M_FW_RSS_IND_TBL_CMD_IQ19 0x3ff
+#define V_FW_RSS_IND_TBL_CMD_IQ19(x) ((x) << S_FW_RSS_IND_TBL_CMD_IQ19)
+#define G_FW_RSS_IND_TBL_CMD_IQ19(x) \
+ (((x) >> S_FW_RSS_IND_TBL_CMD_IQ19) & M_FW_RSS_IND_TBL_CMD_IQ19)
+
+#define S_FW_RSS_IND_TBL_CMD_IQ20 0
+#define M_FW_RSS_IND_TBL_CMD_IQ20 0x3ff
+#define V_FW_RSS_IND_TBL_CMD_IQ20(x) ((x) << S_FW_RSS_IND_TBL_CMD_IQ20)
+#define G_FW_RSS_IND_TBL_CMD_IQ20(x) \
+ (((x) >> S_FW_RSS_IND_TBL_CMD_IQ20) & M_FW_RSS_IND_TBL_CMD_IQ20)
+
+#define S_FW_RSS_IND_TBL_CMD_IQ21 20
+#define M_FW_RSS_IND_TBL_CMD_IQ21 0x3ff
+#define V_FW_RSS_IND_TBL_CMD_IQ21(x) ((x) << S_FW_RSS_IND_TBL_CMD_IQ21)
+#define G_FW_RSS_IND_TBL_CMD_IQ21(x) \
+ (((x) >> S_FW_RSS_IND_TBL_CMD_IQ21) & M_FW_RSS_IND_TBL_CMD_IQ21)
+
+#define S_FW_RSS_IND_TBL_CMD_IQ22 10
+#define M_FW_RSS_IND_TBL_CMD_IQ22 0x3ff
+#define V_FW_RSS_IND_TBL_CMD_IQ22(x) ((x) << S_FW_RSS_IND_TBL_CMD_IQ22)
+#define G_FW_RSS_IND_TBL_CMD_IQ22(x) \
+ (((x) >> S_FW_RSS_IND_TBL_CMD_IQ22) & M_FW_RSS_IND_TBL_CMD_IQ22)
+
+#define S_FW_RSS_IND_TBL_CMD_IQ23 0
+#define M_FW_RSS_IND_TBL_CMD_IQ23 0x3ff
+#define V_FW_RSS_IND_TBL_CMD_IQ23(x) ((x) << S_FW_RSS_IND_TBL_CMD_IQ23)
+#define G_FW_RSS_IND_TBL_CMD_IQ23(x) \
+ (((x) >> S_FW_RSS_IND_TBL_CMD_IQ23) & M_FW_RSS_IND_TBL_CMD_IQ23)
+
+#define S_FW_RSS_IND_TBL_CMD_IQ24 20
+#define M_FW_RSS_IND_TBL_CMD_IQ24 0x3ff
+#define V_FW_RSS_IND_TBL_CMD_IQ24(x) ((x) << S_FW_RSS_IND_TBL_CMD_IQ24)
+#define G_FW_RSS_IND_TBL_CMD_IQ24(x) \
+ (((x) >> S_FW_RSS_IND_TBL_CMD_IQ24) & M_FW_RSS_IND_TBL_CMD_IQ24)
+
+#define S_FW_RSS_IND_TBL_CMD_IQ25 10
+#define M_FW_RSS_IND_TBL_CMD_IQ25 0x3ff
+#define V_FW_RSS_IND_TBL_CMD_IQ25(x) ((x) << S_FW_RSS_IND_TBL_CMD_IQ25)
+#define G_FW_RSS_IND_TBL_CMD_IQ25(x) \
+ (((x) >> S_FW_RSS_IND_TBL_CMD_IQ25) & M_FW_RSS_IND_TBL_CMD_IQ25)
+
+#define S_FW_RSS_IND_TBL_CMD_IQ26 0
+#define M_FW_RSS_IND_TBL_CMD_IQ26 0x3ff
+#define V_FW_RSS_IND_TBL_CMD_IQ26(x) ((x) << S_FW_RSS_IND_TBL_CMD_IQ26)
+#define G_FW_RSS_IND_TBL_CMD_IQ26(x) \
+ (((x) >> S_FW_RSS_IND_TBL_CMD_IQ26) & M_FW_RSS_IND_TBL_CMD_IQ26)
+
+#define S_FW_RSS_IND_TBL_CMD_IQ27 20
+#define M_FW_RSS_IND_TBL_CMD_IQ27 0x3ff
+#define V_FW_RSS_IND_TBL_CMD_IQ27(x) ((x) << S_FW_RSS_IND_TBL_CMD_IQ27)
+#define G_FW_RSS_IND_TBL_CMD_IQ27(x) \
+ (((x) >> S_FW_RSS_IND_TBL_CMD_IQ27) & M_FW_RSS_IND_TBL_CMD_IQ27)
+
+#define S_FW_RSS_IND_TBL_CMD_IQ28 10
+#define M_FW_RSS_IND_TBL_CMD_IQ28 0x3ff
+#define V_FW_RSS_IND_TBL_CMD_IQ28(x) ((x) << S_FW_RSS_IND_TBL_CMD_IQ28)
+#define G_FW_RSS_IND_TBL_CMD_IQ28(x) \
+ (((x) >> S_FW_RSS_IND_TBL_CMD_IQ28) & M_FW_RSS_IND_TBL_CMD_IQ28)
+
+#define S_FW_RSS_IND_TBL_CMD_IQ29 0
+#define M_FW_RSS_IND_TBL_CMD_IQ29 0x3ff
+#define V_FW_RSS_IND_TBL_CMD_IQ29(x) ((x) << S_FW_RSS_IND_TBL_CMD_IQ29)
+#define G_FW_RSS_IND_TBL_CMD_IQ29(x) \
+ (((x) >> S_FW_RSS_IND_TBL_CMD_IQ29) & M_FW_RSS_IND_TBL_CMD_IQ29)
+
+#define S_FW_RSS_IND_TBL_CMD_IQ30 20
+#define M_FW_RSS_IND_TBL_CMD_IQ30 0x3ff
+#define V_FW_RSS_IND_TBL_CMD_IQ30(x) ((x) << S_FW_RSS_IND_TBL_CMD_IQ30)
+#define G_FW_RSS_IND_TBL_CMD_IQ30(x) \
+ (((x) >> S_FW_RSS_IND_TBL_CMD_IQ30) & M_FW_RSS_IND_TBL_CMD_IQ30)
+
+#define S_FW_RSS_IND_TBL_CMD_IQ31 10
+#define M_FW_RSS_IND_TBL_CMD_IQ31 0x3ff
+#define V_FW_RSS_IND_TBL_CMD_IQ31(x) ((x) << S_FW_RSS_IND_TBL_CMD_IQ31)
+#define G_FW_RSS_IND_TBL_CMD_IQ31(x) \
+ (((x) >> S_FW_RSS_IND_TBL_CMD_IQ31) & M_FW_RSS_IND_TBL_CMD_IQ31)
+
+struct fw_rss_glb_config_cmd {
+ __be32 op_to_write;
+ __be32 retval_len16;
+ union fw_rss_glb_config {
+ struct fw_rss_glb_config_manual {
+ __be32 mode_pkd;
+ __be32 r3;
+ __be64 r4;
+ __be64 r5;
+ } manual;
+ struct fw_rss_glb_config_basicvirtual {
+ __be32 mode_pkd;
+ __be32 synmapen_to_hashtoeplitz;
+ __be64 r8;
+ __be64 r9;
+ } basicvirtual;
+ } u;
+};
+
+#define S_FW_RSS_GLB_CONFIG_CMD_MODE 28
+#define M_FW_RSS_GLB_CONFIG_CMD_MODE 0xf
+#define V_FW_RSS_GLB_CONFIG_CMD_MODE(x) ((x) << S_FW_RSS_GLB_CONFIG_CMD_MODE)
+#define G_FW_RSS_GLB_CONFIG_CMD_MODE(x) \
+ (((x) >> S_FW_RSS_GLB_CONFIG_CMD_MODE) & M_FW_RSS_GLB_CONFIG_CMD_MODE)
+
+#define FW_RSS_GLB_CONFIG_CMD_MODE_MANUAL 0
+#define FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL 1
+#define FW_RSS_GLB_CONFIG_CMD_MODE_MAX 1
+
+#define S_FW_RSS_GLB_CONFIG_CMD_SYNMAPEN 8
+#define M_FW_RSS_GLB_CONFIG_CMD_SYNMAPEN 0x1
+#define V_FW_RSS_GLB_CONFIG_CMD_SYNMAPEN(x) \
+ ((x) << S_FW_RSS_GLB_CONFIG_CMD_SYNMAPEN)
+#define G_FW_RSS_GLB_CONFIG_CMD_SYNMAPEN(x) \
+ (((x) >> S_FW_RSS_GLB_CONFIG_CMD_SYNMAPEN) & \
+ M_FW_RSS_GLB_CONFIG_CMD_SYNMAPEN)
+#define F_FW_RSS_GLB_CONFIG_CMD_SYNMAPEN \
+ V_FW_RSS_GLB_CONFIG_CMD_SYNMAPEN(1U)
+
+#define S_FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV6 7
+#define M_FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV6 0x1
+#define V_FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV6(x) \
+ ((x) << S_FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV6)
+#define G_FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV6(x) \
+ (((x) >> S_FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV6) & \
+ M_FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV6)
+#define F_FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV6 \
+ V_FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV6(1U)
+
+#define S_FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV6 6
+#define M_FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV6 0x1
+#define V_FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV6(x) \
+ ((x) << S_FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV6)
+#define G_FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV6(x) \
+ (((x) >> S_FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV6) & \
+ M_FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV6)
+#define F_FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV6 \
+ V_FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV6(1U)
+
+#define S_FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV4 5
+#define M_FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV4 0x1
+#define V_FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV4(x) \
+ ((x) << S_FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV4)
+#define G_FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV4(x) \
+ (((x) >> S_FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV4) & \
+ M_FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV4)
+#define F_FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV4 \
+ V_FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV4(1U)
+
+#define S_FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV4 4
+#define M_FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV4 0x1
+#define V_FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV4(x) \
+ ((x) << S_FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV4)
+#define G_FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV4(x) \
+ (((x) >> S_FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV4) & \
+ M_FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV4)
+#define F_FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV4 \
+ V_FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV4(1U)
+
+#define S_FW_RSS_GLB_CONFIG_CMD_OFDMAPEN 3
+#define M_FW_RSS_GLB_CONFIG_CMD_OFDMAPEN 0x1
+#define V_FW_RSS_GLB_CONFIG_CMD_OFDMAPEN(x) \
+ ((x) << S_FW_RSS_GLB_CONFIG_CMD_OFDMAPEN)
+#define G_FW_RSS_GLB_CONFIG_CMD_OFDMAPEN(x) \
+ (((x) >> S_FW_RSS_GLB_CONFIG_CMD_OFDMAPEN) & \
+ M_FW_RSS_GLB_CONFIG_CMD_OFDMAPEN)
+#define F_FW_RSS_GLB_CONFIG_CMD_OFDMAPEN \
+ V_FW_RSS_GLB_CONFIG_CMD_OFDMAPEN(1U)
+
+#define S_FW_RSS_GLB_CONFIG_CMD_TNLMAPEN 2
+#define M_FW_RSS_GLB_CONFIG_CMD_TNLMAPEN 0x1
+#define V_FW_RSS_GLB_CONFIG_CMD_TNLMAPEN(x) \
+ ((x) << S_FW_RSS_GLB_CONFIG_CMD_TNLMAPEN)
+#define G_FW_RSS_GLB_CONFIG_CMD_TNLMAPEN(x) \
+ (((x) >> S_FW_RSS_GLB_CONFIG_CMD_TNLMAPEN) & \
+ M_FW_RSS_GLB_CONFIG_CMD_TNLMAPEN)
+#define F_FW_RSS_GLB_CONFIG_CMD_TNLMAPEN \
+ V_FW_RSS_GLB_CONFIG_CMD_TNLMAPEN(1U)
+
+#define S_FW_RSS_GLB_CONFIG_CMD_TNLALLLKP 1
+#define M_FW_RSS_GLB_CONFIG_CMD_TNLALLLKP 0x1
+#define V_FW_RSS_GLB_CONFIG_CMD_TNLALLLKP(x) \
+ ((x) << S_FW_RSS_GLB_CONFIG_CMD_TNLALLLKP)
+#define G_FW_RSS_GLB_CONFIG_CMD_TNLALLLKP(x) \
+ (((x) >> S_FW_RSS_GLB_CONFIG_CMD_TNLALLLKP) & \
+ M_FW_RSS_GLB_CONFIG_CMD_TNLALLLKP)
+#define F_FW_RSS_GLB_CONFIG_CMD_TNLALLLKP \
+ V_FW_RSS_GLB_CONFIG_CMD_TNLALLLKP(1U)
+
+#define S_FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ 0
+#define M_FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ 0x1
+#define V_FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ(x) \
+ ((x) << S_FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ)
+#define G_FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ(x) \
+ (((x) >> S_FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ) & \
+ M_FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ)
+#define F_FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ \
+ V_FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ(1U)
+
+struct fw_rss_vi_config_cmd {
+ __be32 op_to_viid;
+ __be32 retval_len16;
+ union fw_rss_vi_config {
+ struct fw_rss_vi_config_manual {
+ __be64 r3;
+ __be64 r4;
+ __be64 r5;
+ } manual;
+ struct fw_rss_vi_config_basicvirtual {
+ __be32 r6;
+ __be32 defaultq_to_udpen;
+ __be64 r9;
+ __be64 r10;
+ } basicvirtual;
+ } u;
+};
+
+#define S_FW_RSS_VI_CONFIG_CMD_VIID 0
+#define M_FW_RSS_VI_CONFIG_CMD_VIID 0xfff
+#define V_FW_RSS_VI_CONFIG_CMD_VIID(x) ((x) << S_FW_RSS_VI_CONFIG_CMD_VIID)
+#define G_FW_RSS_VI_CONFIG_CMD_VIID(x) \
+ (((x) >> S_FW_RSS_VI_CONFIG_CMD_VIID) & M_FW_RSS_VI_CONFIG_CMD_VIID)
+
+#define S_FW_RSS_VI_CONFIG_CMD_DEFAULTQ 16
+#define M_FW_RSS_VI_CONFIG_CMD_DEFAULTQ 0x3ff
+#define V_FW_RSS_VI_CONFIG_CMD_DEFAULTQ(x) \
+ ((x) << S_FW_RSS_VI_CONFIG_CMD_DEFAULTQ)
+#define G_FW_RSS_VI_CONFIG_CMD_DEFAULTQ(x) \
+ (((x) >> S_FW_RSS_VI_CONFIG_CMD_DEFAULTQ) & \
+ M_FW_RSS_VI_CONFIG_CMD_DEFAULTQ)
+
+#define S_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN 4
+#define M_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN 0x1
+#define V_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN(x) \
+ ((x) << S_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN)
+#define G_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN(x) \
+ (((x) >> S_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN) & \
+ M_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN)
+#define F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN \
+ V_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN(1U)
+
+#define S_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN 3
+#define M_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN 0x1
+#define V_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN(x) \
+ ((x) << S_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN)
+#define G_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN(x) \
+ (((x) >> S_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN) & \
+ M_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN)
+#define F_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN \
+ V_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN(1U)
+
+#define S_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN 2
+#define M_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN 0x1
+#define V_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN(x) \
+ ((x) << S_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN)
+#define G_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN(x) \
+ (((x) >> S_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN) & \
+ M_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN)
+#define F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN \
+ V_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN(1U)
+
+#define S_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN 1
+#define M_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN 0x1
+#define V_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN(x) \
+ ((x) << S_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN)
+#define G_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN(x) \
+ (((x) >> S_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN) & \
+ M_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN)
+#define F_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN \
+ V_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN(1U)
+
+#define S_FW_RSS_VI_CONFIG_CMD_UDPEN 0
+#define M_FW_RSS_VI_CONFIG_CMD_UDPEN 0x1
+#define V_FW_RSS_VI_CONFIG_CMD_UDPEN(x) \
+ ((x) << S_FW_RSS_VI_CONFIG_CMD_UDPEN)
+#define G_FW_RSS_VI_CONFIG_CMD_UDPEN(x) \
+ (((x) >> S_FW_RSS_VI_CONFIG_CMD_UDPEN) & \
+ M_FW_RSS_VI_CONFIG_CMD_UDPEN)
+#define F_FW_RSS_VI_CONFIG_CMD_UDPEN \
+ V_FW_RSS_VI_CONFIG_CMD_UDPEN(1U)
+
+enum fw_sched_sc {
+ FW_SCHED_SC_CONFIG = 0,
+ FW_SCHED_SC_PARAMS = 1,
+};
+
+enum fw_sched_type {
+ FW_SCHED_TYPE_PKTSCHED = 0,
+ FW_SCHED_TYPE_STREAMSCHED = 1,
+};
+
+enum fw_sched_params_level {
+ FW_SCHED_PARAMS_LEVEL_CL_RL = 0,
+ FW_SCHED_PARAMS_LEVEL_CL_WRR = 1,
+ FW_SCHED_PARAMS_LEVEL_CH_RL = 2,
+ FW_SCHED_PARAMS_LEVEL_CH_WRR = 3,
+};
+
+enum fw_sched_params_mode {
+ FW_SCHED_PARAMS_MODE_CLASS = 0,
+ FW_SCHED_PARAMS_MODE_FLOW = 1,
+};
+
+enum fw_sched_params_unit {
+ FW_SCHED_PARAMS_UNIT_BITRATE = 0,
+ FW_SCHED_PARAMS_UNIT_PKTRATE = 1,
+};
+
+enum fw_sched_params_rate {
+ FW_SCHED_PARAMS_RATE_REL = 0,
+ FW_SCHED_PARAMS_RATE_ABS = 1,
+};
+
+struct fw_sched_cmd {
+ __be32 op_to_write;
+ __be32 retval_len16;
+ union fw_sched {
+ struct fw_sched_config {
+ __u8 sc;
+ __u8 type;
+ __u8 minmaxen;
+ __u8 r3[5];
+ } config;
+ struct fw_sched_params {
+ __u8 sc;
+ __u8 type;
+ __u8 level;
+ __u8 mode;
+ __u8 unit;
+ __u8 rate;
+ __u8 ch;
+ __u8 cl;
+ __be32 min;
+ __be32 max;
+ __be16 weight;
+ __be16 pktsize;
+ __be32 r4;
+ } params;
+ } u;
+};
+
+/*
+ * length of the formatting string
+ */
+#define FW_DEVLOG_FMT_LEN 192
+
+/*
+ * maximum number of the formatting string parameters
+ */
+#define FW_DEVLOG_FMT_PARAMS_NUM 8
+
+/*
+ * priority levels
+ */
+enum fw_devlog_level {
+ FW_DEVLOG_LEVEL_EMERG = 0x0,
+ FW_DEVLOG_LEVEL_CRIT = 0x1,
+ FW_DEVLOG_LEVEL_ERR = 0x2,
+ FW_DEVLOG_LEVEL_NOTICE = 0x3,
+ FW_DEVLOG_LEVEL_INFO = 0x4,
+ FW_DEVLOG_LEVEL_DEBUG = 0x5,
+ FW_DEVLOG_LEVEL_MAX = 0x5,
+};
+
+/*
+ * facilities that may send a log message
+ */
+enum fw_devlog_facility {
+ FW_DEVLOG_FACILITY_CORE = 0x00,
+ FW_DEVLOG_FACILITY_SCHED = 0x02,
+ FW_DEVLOG_FACILITY_TIMER = 0x04,
+ FW_DEVLOG_FACILITY_RES = 0x06,
+ FW_DEVLOG_FACILITY_HW = 0x08,
+ FW_DEVLOG_FACILITY_FLR = 0x10,
+ FW_DEVLOG_FACILITY_DMAQ = 0x12,
+ FW_DEVLOG_FACILITY_PHY = 0x14,
+ FW_DEVLOG_FACILITY_MAC = 0x16,
+ FW_DEVLOG_FACILITY_PORT = 0x18,
+ FW_DEVLOG_FACILITY_VI = 0x1A,
+ FW_DEVLOG_FACILITY_FILTER = 0x1C,
+ FW_DEVLOG_FACILITY_ACL = 0x1E,
+ FW_DEVLOG_FACILITY_TM = 0x20,
+ FW_DEVLOG_FACILITY_QFC = 0x22,
+ FW_DEVLOG_FACILITY_DCB = 0x24,
+ FW_DEVLOG_FACILITY_ETH = 0x26,
+ FW_DEVLOG_FACILITY_OFLD = 0x28,
+ FW_DEVLOG_FACILITY_RI = 0x2A,
+ FW_DEVLOG_FACILITY_ISCSI = 0x2C,
+ FW_DEVLOG_FACILITY_FCOE = 0x2E,
+ FW_DEVLOG_FACILITY_FOISCSI = 0x30,
+ FW_DEVLOG_FACILITY_FOFCOE = 0x32,
+ FW_DEVLOG_FACILITY_MAX = 0x32,
+};
+
+/*
+ * log message format
+ */
+struct fw_devlog_e {
+ __be64 timestamp;
+ __be32 seqno;
+ __be16 reserved1;
+ __u8 level;
+ __u8 facility;
+ __u8 fmt[FW_DEVLOG_FMT_LEN];
+ __be32 params[FW_DEVLOG_FMT_PARAMS_NUM];
+ __be32 reserved3[4];
+};
+
+struct fw_devlog_cmd {
+ __be32 op_to_write;
+ __be32 retval_len16;
+ __u8 level;
+ __u8 r2[7];
+ __be32 memtype_devlog_memaddr16_devlog;
+ __be32 memsize_devlog;
+ __be32 r3[2];
+};
+
+#define S_FW_DEVLOG_CMD_MEMTYPE_DEVLOG 28
+#define M_FW_DEVLOG_CMD_MEMTYPE_DEVLOG 0xf
+#define V_FW_DEVLOG_CMD_MEMTYPE_DEVLOG(x) \
+ ((x) << S_FW_DEVLOG_CMD_MEMTYPE_DEVLOG)
+#define G_FW_DEVLOG_CMD_MEMTYPE_DEVLOG(x) \
+ (((x) >> S_FW_DEVLOG_CMD_MEMTYPE_DEVLOG) & M_FW_DEVLOG_CMD_MEMTYPE_DEVLOG)
+
+#define S_FW_DEVLOG_CMD_MEMADDR16_DEVLOG 0
+#define M_FW_DEVLOG_CMD_MEMADDR16_DEVLOG 0xfffffff
+#define V_FW_DEVLOG_CMD_MEMADDR16_DEVLOG(x) \
+ ((x) << S_FW_DEVLOG_CMD_MEMADDR16_DEVLOG)
+#define G_FW_DEVLOG_CMD_MEMADDR16_DEVLOG(x) \
+ (((x) >> S_FW_DEVLOG_CMD_MEMADDR16_DEVLOG) & \
+ M_FW_DEVLOG_CMD_MEMADDR16_DEVLOG)
+
+enum fw_error_type {
+ FW_ERROR_TYPE_EXCEPTION = 0x0,
+ FW_ERROR_TYPE_HWMODULE = 0x1,
+ FW_ERROR_TYPE_WR = 0x2,
+ FW_ERROR_TYPE_ACL = 0x3,
+};
+
+struct fw_error_cmd {
+ __be32 op_to_type;
+ __be32 len16_pkd;
+ union fw_error {
+ struct fw_error_exception {
+ __be32 info[6];
+ } exception;
+ struct fw_error_hwmodule {
+ __be32 regaddr;
+ __be32 regval;
+ } hwmodule;
+ struct fw_error_wr {
+ __be16 cidx;
+ __be16 pfn_vfn;
+ __be32 eqid;
+ __u8 wrhdr[16];
+ } wr;
+ struct fw_error_acl {
+ __be16 cidx;
+ __be16 pfn_vfn;
+ __be32 eqid;
+ __be16 mv_pkd;
+ __u8 val[6];
+ __be64 r4;
+ } acl;
+ } u;
+};
+
+#define S_FW_ERROR_CMD_FATAL 4
+#define M_FW_ERROR_CMD_FATAL 0x1
+#define V_FW_ERROR_CMD_FATAL(x) ((x) << S_FW_ERROR_CMD_FATAL)
+#define G_FW_ERROR_CMD_FATAL(x) \
+ (((x) >> S_FW_ERROR_CMD_FATAL) & M_FW_ERROR_CMD_FATAL)
+#define F_FW_ERROR_CMD_FATAL V_FW_ERROR_CMD_FATAL(1U)
+
+#define S_FW_ERROR_CMD_TYPE 0
+#define M_FW_ERROR_CMD_TYPE 0xf
+#define V_FW_ERROR_CMD_TYPE(x) ((x) << S_FW_ERROR_CMD_TYPE)
+#define G_FW_ERROR_CMD_TYPE(x) \
+ (((x) >> S_FW_ERROR_CMD_TYPE) & M_FW_ERROR_CMD_TYPE)
+
+#define S_FW_ERROR_CMD_PFN 8
+#define M_FW_ERROR_CMD_PFN 0x7
+#define V_FW_ERROR_CMD_PFN(x) ((x) << S_FW_ERROR_CMD_PFN)
+#define G_FW_ERROR_CMD_PFN(x) \
+ (((x) >> S_FW_ERROR_CMD_PFN) & M_FW_ERROR_CMD_PFN)
+
+#define S_FW_ERROR_CMD_VFN 0
+#define M_FW_ERROR_CMD_VFN 0xff
+#define V_FW_ERROR_CMD_VFN(x) ((x) << S_FW_ERROR_CMD_VFN)
+#define G_FW_ERROR_CMD_VFN(x) \
+ (((x) >> S_FW_ERROR_CMD_VFN) & M_FW_ERROR_CMD_VFN)
+
+#define S_FW_ERROR_CMD_PFN 8
+#define M_FW_ERROR_CMD_PFN 0x7
+#define V_FW_ERROR_CMD_PFN(x) ((x) << S_FW_ERROR_CMD_PFN)
+#define G_FW_ERROR_CMD_PFN(x) \
+ (((x) >> S_FW_ERROR_CMD_PFN) & M_FW_ERROR_CMD_PFN)
+
+#define S_FW_ERROR_CMD_VFN 0
+#define M_FW_ERROR_CMD_VFN 0xff
+#define V_FW_ERROR_CMD_VFN(x) ((x) << S_FW_ERROR_CMD_VFN)
+#define G_FW_ERROR_CMD_VFN(x) \
+ (((x) >> S_FW_ERROR_CMD_VFN) & M_FW_ERROR_CMD_VFN)
+
+#define S_FW_ERROR_CMD_MV 15
+#define M_FW_ERROR_CMD_MV 0x1
+#define V_FW_ERROR_CMD_MV(x) ((x) << S_FW_ERROR_CMD_MV)
+#define G_FW_ERROR_CMD_MV(x) \
+ (((x) >> S_FW_ERROR_CMD_MV) & M_FW_ERROR_CMD_MV)
+#define F_FW_ERROR_CMD_MV V_FW_ERROR_CMD_MV(1U)
+
+struct fw_debug_cmd {
+ __be32 op_type;
+ __be32 len16_pkd;
+ union fw_debug {
+ struct fw_debug_assert {
+ __be32 fcid;
+ __be32 line;
+ __be32 x;
+ __be32 y;
+ __u8 filename_0_7[8];
+ __u8 filename_8_15[8];
+ __be64 r3;
+ } assert;
+ struct fw_debug_prt {
+ __be16 dprtstridx;
+ __be16 r3[3];
+ __be32 dprtstrparam0;
+ __be32 dprtstrparam1;
+ __be32 dprtstrparam2;
+ __be32 dprtstrparam3;
+ } prt;
+ } u;
+};
+
+#define S_FW_DEBUG_CMD_TYPE 0
+#define M_FW_DEBUG_CMD_TYPE 0xff
+#define V_FW_DEBUG_CMD_TYPE(x) ((x) << S_FW_DEBUG_CMD_TYPE)
+#define G_FW_DEBUG_CMD_TYPE(x) \
+ (((x) >> S_FW_DEBUG_CMD_TYPE) & M_FW_DEBUG_CMD_TYPE)
+
+/******************************************************************************
+ * B I N A R Y H E A D E R F O R M A T
+ **********************************************/
+
+/*
+ * firmware binary header format
+ */
+struct fw_hdr {
+ __u8 ver;
+ __u8 reserved1;
+ __be16 len512; /* bin length in units of 512-bytes */
+ __be32 fw_ver; /* firmware version */
+ __be32 tp_microcode_ver; /* tcp processor microcode version */
+ __u8 intfver_nic;
+ __u8 intfver_vnic;
+ __u8 intfver_ofld;
+ __u8 intfver_ri;
+ __u8 intfver_iscsipdu;
+ __u8 intfver_iscsi;
+ __u8 intfver_fcoe;
+ __u8 reserved2;
+ __be32 reserved3[27];
+};
+
+#define S_FW_HDR_FW_VER_MAJOR 24
+#define M_FW_HDR_FW_VER_MAJOR 0xff
+#define V_FW_HDR_FW_VER_MAJOR(x) \
+ ((x) << S_FW_HDR_FW_VER_MAJOR)
+#define G_FW_HDR_FW_VER_MAJOR(x) \
+ (((x) >> S_FW_HDR_FW_VER_MAJOR) & M_FW_HDR_FW_VER_MAJOR)
+
+#define S_FW_HDR_FW_VER_MINOR 16
+#define M_FW_HDR_FW_VER_MINOR 0xff
+#define V_FW_HDR_FW_VER_MINOR(x) \
+ ((x) << S_FW_HDR_FW_VER_MINOR)
+#define G_FW_HDR_FW_VER_MINOR(x) \
+ (((x) >> S_FW_HDR_FW_VER_MINOR) & M_FW_HDR_FW_VER_MINOR)
+
+#define S_FW_HDR_FW_VER_MICRO 8
+#define M_FW_HDR_FW_VER_MICRO 0xff
+#define V_FW_HDR_FW_VER_MICRO(x) \
+ ((x) << S_FW_HDR_FW_VER_MICRO)
+#define G_FW_HDR_FW_VER_MICRO(x) \
+ (((x) >> S_FW_HDR_FW_VER_MICRO) & M_FW_HDR_FW_VER_MICRO)
+
+#define S_FW_HDR_FW_VER_BUILD 0
+#define M_FW_HDR_FW_VER_BUILD 0xff
+#define V_FW_HDR_FW_VER_BUILD(x) \
+ ((x) << S_FW_HDR_FW_VER_BUILD)
+#define G_FW_HDR_FW_VER_BUILD(x) \
+ (((x) >> S_FW_HDR_FW_VER_BUILD) & M_FW_HDR_FW_VER_BUILD)
+
+#endif /* _T4FW_INTERFACE_H_ */
diff --git a/sys/dev/cxgbe/offload.h b/sys/dev/cxgbe/offload.h
new file mode 100644
index 0000000..85f4fae
--- /dev/null
+++ b/sys/dev/cxgbe/offload.h
@@ -0,0 +1,86 @@
+/*-
+ * Copyright (c) 2010 Chelsio Communications, Inc.
+ * All rights reserved.
+ * Written by: Navdeep Parhar <np@FreeBSD.org>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ *
+ */
+
+#ifndef __T4_OFFLOAD_H__
+#define __T4_OFFLOAD_H__
+
+/*
+ * Max # of ATIDs. The absolute HW max is 16K but we keep it lower.
+ */
+#define MAX_ATIDS 8192U
+
+struct serv_entry {
+ void *data;
+};
+
+union aopen_entry {
+ void *data;
+ union aopen_entry *next;
+};
+
+/*
+ * Holds the size, base address, free list start, etc of the TID, server TID,
+ * and active-open TID tables. The tables themselves are allocated dynamically.
+ */
+struct tid_info {
+ void **tid_tab;
+ unsigned int ntids;
+
+ struct serv_entry *stid_tab;
+ unsigned long *stid_bmap;
+ unsigned int nstids;
+ unsigned int stid_base;
+
+ union aopen_entry *atid_tab;
+ unsigned int natids;
+
+ unsigned int nftids;
+ unsigned int ftid_base;
+
+ union aopen_entry *afree;
+ unsigned int atids_in_use;
+
+ unsigned int stids_in_use;
+};
+
+struct t4_range {
+ unsigned int start;
+ unsigned int size;
+};
+
+struct t4_virt_res { /* virtualized HW resources */
+ struct t4_range ddp;
+ struct t4_range iscsi;
+ struct t4_range stag;
+ struct t4_range rq;
+ struct t4_range pbl;
+};
+
+#endif
diff --git a/sys/dev/cxgbe/osdep.h b/sys/dev/cxgbe/osdep.h
new file mode 100644
index 0000000..693cac2
--- /dev/null
+++ b/sys/dev/cxgbe/osdep.h
@@ -0,0 +1,153 @@
+/*-
+ * Copyright (c) 2010 Chelsio Communications, Inc.
+ * All rights reserved.
+ * Written by: Navdeep Parhar <np@FreeBSD.org>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ *
+ */
+
+#ifndef __CXGBE_OSDEP_H_
+#define __CXGBE_OSDEP_H_
+
+#include <sys/cdefs.h>
+#include <sys/ctype.h>
+#include <sys/types.h>
+#include <sys/param.h>
+#include <sys/endian.h>
+#include <sys/systm.h>
+#include <sys/syslog.h>
+#include <dev/pci/pcireg.h>
+
+#define CH_ERR(adap, fmt, ...) log(LOG_ERR, fmt, ##__VA_ARGS__)
+#define CH_WARN(adap, fmt, ...) log(LOG_WARNING, fmt, ##__VA_ARGS__)
+#define CH_ALERT(adap, fmt, ...) log(LOG_ALERT, fmt, ##__VA_ARGS__)
+#define CH_WARN_RATELIMIT(adap, fmt, ...) log(LOG_WARNING, fmt, ##__VA_ARGS__)
+
+typedef int8_t s8;
+typedef int16_t s16;
+typedef int32_t s32;
+typedef int64_t s64;
+typedef uint8_t u8;
+typedef uint16_t u16;
+typedef uint32_t u32;
+typedef uint64_t u64;
+typedef uint8_t __u8;
+typedef uint16_t __u16;
+typedef uint32_t __u32;
+typedef uint64_t __u64;
+typedef uint8_t __be8;
+typedef uint16_t __be16;
+typedef uint32_t __be32;
+typedef uint64_t __be64;
+
+#if BYTE_ORDER == BIG_ENDIAN
+#define __BIG_ENDIAN_BITFIELD
+#elif BYTE_ORDER == LITTLE_ENDIAN
+#define __LITTLE_ENDIAN_BITFIELD
+#else
+#error "Must set BYTE_ORDER"
+#endif
+
+typedef boolean_t bool;
+#define false FALSE
+#define true TRUE
+
+#undef msleep
+#define msleep(x) DELAY((x) * 1000)
+#define mdelay(x) DELAY((x) * 1000)
+#define udelay(x) DELAY(x)
+
+#define __devinit
+#define simple_strtoul strtoul
+#define DIV_ROUND_UP(x, y) howmany(x, y)
+
+#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
+
+#define swab16(x) bswap16(x)
+#define swab32(x) bswap32(x)
+#define swab64(x) bswap64(x)
+#define le16_to_cpu(x) le16toh(x)
+#define le32_to_cpu(x) le32toh(x)
+#define le64_to_cpu(x) le64toh(x)
+#define cpu_to_le16(x) htole16(x)
+#define cpu_to_le32(x) htole32(x)
+#define cpu_to_le64(x) htole64(x)
+#define be16_to_cpu(x) be16toh(x)
+#define be32_to_cpu(x) be32toh(x)
+#define be64_to_cpu(x) be64toh(x)
+#define cpu_to_be16(x) htobe16(x)
+#define cpu_to_be32(x) htobe32(x)
+#define cpu_to_be64(x) htobe64(x)
+
+#define SPEED_10 10
+#define SPEED_100 100
+#define SPEED_1000 1000
+#define SPEED_10000 10000
+#define DUPLEX_HALF 0
+#define DUPLEX_FULL 1
+#define AUTONEG_DISABLE 0
+#define AUTONEG_ENABLE 1
+
+#define PCI_CAP_ID_VPD PCIY_VPD
+#define PCI_VPD_ADDR PCIR_VPD_ADDR
+#define PCI_VPD_ADDR_F 0x8000
+#define PCI_VPD_DATA PCIR_VPD_DATA
+
+#define PCI_CAP_ID_EXP PCIY_EXPRESS
+#define PCI_EXP_DEVCTL PCIR_EXPRESS_DEVICE_CTL
+#define PCI_EXP_DEVCTL_PAYLOAD PCIM_EXP_CTL_MAX_PAYLOAD
+#define PCI_EXP_DEVCTL_READRQ PCIM_EXP_CTL_MAX_READ_REQUEST
+#define PCI_EXP_LNKCTL PCIR_EXPRESS_LINK_CTL
+#define PCI_EXP_LNKSTA PCIR_EXPRESS_LINK_STA
+#define PCI_EXP_LNKSTA_CLS PCIM_LINK_STA_SPEED
+#define PCI_EXP_LNKSTA_NLW PCIM_LINK_STA_WIDTH
+
+static inline int
+ilog2(long x)
+{
+ KASSERT(x > 0 && powerof2(x), ("%s: invalid arg %ld", __func__, x));
+
+ return (flsl(x) - 1);
+}
+
+static inline char *
+strstrip(char *s)
+{
+ char c, *r, *trim_at;
+
+ while (isspace(*s))
+ s++;
+ r = trim_at = s;
+
+ while ((c = *s++) != 0) {
+ if (!isspace(c))
+ trim_at = s;
+ }
+ *trim_at = 0;
+
+ return (r);
+}
+
+#endif
diff --git a/sys/dev/cxgbe/t4_ioctl.h b/sys/dev/cxgbe/t4_ioctl.h
new file mode 100644
index 0000000..abcb8a4
--- /dev/null
+++ b/sys/dev/cxgbe/t4_ioctl.h
@@ -0,0 +1,58 @@
+/*-
+ * Copyright (c) 2011 Chelsio Communications, Inc.
+ * All rights reserved.
+ * Written by: Navdeep Parhar <np@FreeBSD.org>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ *
+ */
+
+#ifndef __T4_IOCTL_H__
+#define __T4_IOCTL_H__
+
+/*
+ * Ioctl commands specific to this driver.
+ */
+enum {
+ T4_GET32 = 0x40, /* read 32 bit register */
+ T4_SET32, /* write 32 bit register */
+ T4_REGDUMP, /* dump of all registers */
+};
+
+struct t4_reg32 {
+ uint32_t addr;
+ uint32_t val;
+};
+
+#define T4_REGDUMP_SIZE (160 * 1024)
+struct t4_regdump {
+ uint32_t version;
+ uint32_t len; /* bytes */
+ uint8_t *data;
+};
+
+#define CHELSIO_T4_GETREG32 _IOWR('f', T4_GET32, struct t4_reg32)
+#define CHELSIO_T4_SETREG32 _IOW('f', T4_SET32, struct t4_reg32)
+#define CHELSIO_T4_REGDUMP _IOWR('f', T4_REGDUMP, struct t4_regdump)
+#endif
diff --git a/sys/dev/cxgbe/t4_main.c b/sys/dev/cxgbe/t4_main.c
new file mode 100644
index 0000000..122d916
--- /dev/null
+++ b/sys/dev/cxgbe/t4_main.c
@@ -0,0 +1,2747 @@
+/*-
+ * Copyright (c) 2011 Chelsio Communications, Inc.
+ * All rights reserved.
+ * Written by: Navdeep Parhar <np@FreeBSD.org>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include "opt_inet.h"
+
+#include <sys/param.h>
+#include <sys/conf.h>
+#include <sys/priv.h>
+#include <sys/kernel.h>
+#include <sys/bus.h>
+#include <sys/module.h>
+#include <sys/pciio.h>
+#include <dev/pci/pcireg.h>
+#include <dev/pci/pcivar.h>
+#include <dev/pci/pci_private.h>
+#include <sys/firmware.h>
+#include <sys/smp.h>
+#include <sys/socket.h>
+#include <sys/sockio.h>
+#include <sys/sysctl.h>
+#include <net/ethernet.h>
+#include <net/if.h>
+#include <net/if_types.h>
+#include <net/if_dl.h>
+
+#include "common/t4_hw.h"
+#include "common/common.h"
+#include "common/t4_regs.h"
+#include "common/t4_regs_values.h"
+#include "common/t4fw_interface.h"
+#include "t4_ioctl.h"
+
+/* T4 bus driver interface */
+static int t4_probe(device_t);
+static int t4_attach(device_t);
+static int t4_detach(device_t);
+static device_method_t t4_methods[] = {
+ DEVMETHOD(device_probe, t4_probe),
+ DEVMETHOD(device_attach, t4_attach),
+ DEVMETHOD(device_detach, t4_detach),
+
+ /* bus interface */
+ DEVMETHOD(bus_print_child, bus_generic_print_child),
+ DEVMETHOD(bus_driver_added, bus_generic_driver_added),
+
+ { 0, 0 }
+};
+static driver_t t4_driver = {
+ "t4nex",
+ t4_methods,
+ sizeof(struct adapter)
+};
+
+
+/* T4 port (cxgbe) interface */
+static int cxgbe_probe(device_t);
+static int cxgbe_attach(device_t);
+static int cxgbe_detach(device_t);
+static device_method_t cxgbe_methods[] = {
+ DEVMETHOD(device_probe, cxgbe_probe),
+ DEVMETHOD(device_attach, cxgbe_attach),
+ DEVMETHOD(device_detach, cxgbe_detach),
+ { 0, 0 }
+};
+static driver_t cxgbe_driver = {
+ "cxgbe",
+ cxgbe_methods,
+ sizeof(struct port_info)
+};
+
+static d_ioctl_t t4_ioctl;
+static d_open_t t4_open;
+static d_close_t t4_close;
+
+static struct cdevsw t4_cdevsw = {
+ .d_version = D_VERSION,
+ .d_flags = 0,
+ .d_open = t4_open,
+ .d_close = t4_close,
+ .d_ioctl = t4_ioctl,
+ .d_name = "t4nex",
+};
+
+/* ifnet + media interface */
+static void cxgbe_init(void *);
+static int cxgbe_ioctl(struct ifnet *, unsigned long, caddr_t);
+static void cxgbe_start(struct ifnet *);
+static int cxgbe_transmit(struct ifnet *, struct mbuf *);
+static void cxgbe_qflush(struct ifnet *);
+static int cxgbe_media_change(struct ifnet *);
+static void cxgbe_media_status(struct ifnet *, struct ifmediareq *);
+
+MALLOC_DEFINE(M_CXGBE, "cxgbe", "Chelsio T4 Ethernet driver and services");
+
+/*
+ * Tunables.
+ */
+SYSCTL_NODE(_hw, OID_AUTO, cxgbe, CTLFLAG_RD, 0, "cxgbe driver parameters");
+
+static int force_firmware_install = 0;
+TUNABLE_INT("hw.cxgbe.force_firmware_install", &force_firmware_install);
+SYSCTL_UINT(_hw_cxgbe, OID_AUTO, force_firmware_install, CTLFLAG_RDTUN,
+ &force_firmware_install, 0, "install firmware on every attach.");
+
+/*
+ * Holdoff timer and packet counter values.
+ */
+static unsigned int intr_timer[SGE_NTIMERS] = {1, 5, 10, 50, 100, 200};
+static unsigned int intr_pktcount[SGE_NCOUNTERS] = {1, 8, 16, 32}; /* 63 max */
+
+/*
+ * Max # of tx and rx queues to use for each 10G and 1G port.
+ */
+static unsigned int max_ntxq_10g = 8;
+TUNABLE_INT("hw.cxgbe.max_ntxq_10G_port", &max_ntxq_10g);
+SYSCTL_UINT(_hw_cxgbe, OID_AUTO, max_ntxq_10G_port, CTLFLAG_RDTUN,
+ &max_ntxq_10g, 0, "maximum number of tx queues per 10G port.");
+
+static unsigned int max_nrxq_10g = 8;
+TUNABLE_INT("hw.cxgbe.max_nrxq_10G_port", &max_nrxq_10g);
+SYSCTL_UINT(_hw_cxgbe, OID_AUTO, max_nrxq_10G_port, CTLFLAG_RDTUN,
+ &max_nrxq_10g, 0, "maximum number of rxq's (per 10G port).");
+
+static unsigned int max_ntxq_1g = 2;
+TUNABLE_INT("hw.cxgbe.max_ntxq_1G_port", &max_ntxq_1g);
+SYSCTL_UINT(_hw_cxgbe, OID_AUTO, max_ntxq_1G_port, CTLFLAG_RDTUN,
+ &max_ntxq_1g, 0, "maximum number of tx queues per 1G port.");
+
+static unsigned int max_nrxq_1g = 2;
+TUNABLE_INT("hw.cxgbe.max_nrxq_1G_port", &max_nrxq_1g);
+SYSCTL_UINT(_hw_cxgbe, OID_AUTO, max_nrxq_1G_port, CTLFLAG_RDTUN,
+ &max_nrxq_1g, 0, "maximum number of rxq's (per 1G port).");
+
+/*
+ * Holdoff parameters for 10G and 1G ports.
+ */
+static unsigned int tmr_idx_10g = 1;
+TUNABLE_INT("hw.cxgbe.holdoff_timer_idx_10G", &tmr_idx_10g);
+SYSCTL_UINT(_hw_cxgbe, OID_AUTO, holdoff_timer_idx_10G, CTLFLAG_RDTUN,
+ &tmr_idx_10g, 0,
+ "default timer index for interrupt holdoff (10G ports).");
+
+static int pktc_idx_10g = 2;
+TUNABLE_INT("hw.cxgbe.holdoff_pktc_idx_10G", &pktc_idx_10g);
+SYSCTL_UINT(_hw_cxgbe, OID_AUTO, holdoff_pktc_idx_10G, CTLFLAG_RDTUN,
+ &pktc_idx_10g, 0,
+ "default pkt counter index for interrupt holdoff (10G ports).");
+
+static unsigned int tmr_idx_1g = 1;
+TUNABLE_INT("hw.cxgbe.holdoff_timer_idx_1G", &tmr_idx_1g);
+SYSCTL_UINT(_hw_cxgbe, OID_AUTO, holdoff_timer_idx_1G, CTLFLAG_RDTUN,
+ &tmr_idx_1g, 0,
+ "default timer index for interrupt holdoff (1G ports).");
+
+static int pktc_idx_1g = 2;
+TUNABLE_INT("hw.cxgbe.holdoff_pktc_idx_1G", &pktc_idx_1g);
+SYSCTL_UINT(_hw_cxgbe, OID_AUTO, holdoff_pktc_idx_1G, CTLFLAG_RDTUN,
+ &pktc_idx_1g, 0,
+ "default pkt counter index for interrupt holdoff (1G ports).");
+
+/*
+ * Size (# of entries) of each tx and rx queue.
+ */
+static unsigned int qsize_txq = TX_EQ_QSIZE;
+TUNABLE_INT("hw.cxgbe.qsize_txq", &qsize_txq);
+SYSCTL_UINT(_hw_cxgbe, OID_AUTO, qsize_txq, CTLFLAG_RDTUN,
+ &qsize_txq, 0, "default queue size of NIC tx queues.");
+
+static unsigned int qsize_rxq = RX_IQ_QSIZE;
+TUNABLE_INT("hw.cxgbe.qsize_rxq", &qsize_rxq);
+SYSCTL_UINT(_hw_cxgbe, OID_AUTO, qsize_rxq, CTLFLAG_RDTUN,
+ &qsize_rxq, 0, "default queue size of NIC rx queues.");
+
+/*
+ * Interrupt types allowed.
+ */
+static int intr_types = 7;
+TUNABLE_INT("hw.cxgbe.interrupt_types", &intr_types);
+SYSCTL_UINT(_hw_cxgbe, OID_AUTO, interrupt_types, CTLFLAG_RDTUN, &intr_types, 0,
+ "interrupt types allowed (bits 0, 1, 2 = INTx, MSI, MSI-X respectively)");
+
+/*
+ * Force the driver to use interrupt forwarding.
+ */
+static int intr_fwd = 0;
+TUNABLE_INT("hw.cxgbe.interrupt_forwarding", &intr_fwd);
+SYSCTL_UINT(_hw_cxgbe, OID_AUTO, interrupt_forwarding, CTLFLAG_RDTUN,
+ &intr_fwd, 0, "always use forwarded interrupts");
+
+struct intrs_and_queues {
+ int intr_type; /* 1, 2, or 4 for INTx, MSI, or MSI-X */
+ int nirq; /* Number of vectors */
+ int intr_fwd; /* Interrupts forwarded */
+ int ntxq10g; /* # of NIC txq's for each 10G port */
+ int nrxq10g; /* # of NIC rxq's for each 10G port */
+ int ntxq1g; /* # of NIC txq's for each 1G port */
+ int nrxq1g; /* # of NIC rxq's for each 1G port */
+};
+
+enum {
+ MEMWIN0_APERTURE = 2048,
+ MEMWIN0_BASE = 0x1b800,
+ MEMWIN1_APERTURE = 32768,
+ MEMWIN1_BASE = 0x28000,
+ MEMWIN2_APERTURE = 65536,
+ MEMWIN2_BASE = 0x30000,
+};
+
+enum {
+ XGMAC_MTU = (1 << 0),
+ XGMAC_PROMISC = (1 << 1),
+ XGMAC_ALLMULTI = (1 << 2),
+ XGMAC_VLANEX = (1 << 3),
+ XGMAC_UCADDR = (1 << 4),
+ XGMAC_MCADDRS = (1 << 5),
+
+ XGMAC_ALL = 0xffff
+};
+
+static int map_bars(struct adapter *);
+static void setup_memwin(struct adapter *);
+static int cfg_itype_and_nqueues(struct adapter *, int, int,
+ struct intrs_and_queues *);
+static int prep_firmware(struct adapter *);
+static int get_capabilities(struct adapter *, struct fw_caps_config_cmd *);
+static int get_params(struct adapter *, struct fw_caps_config_cmd *);
+static void t4_set_desc(struct adapter *);
+static void build_medialist(struct port_info *);
+static int update_mac_settings(struct port_info *, int);
+static int cxgbe_init_locked(struct port_info *);
+static int cxgbe_init_synchronized(struct port_info *);
+static int cxgbe_uninit_locked(struct port_info *);
+static int cxgbe_uninit_synchronized(struct port_info *);
+static int first_port_up(struct adapter *);
+static int last_port_down(struct adapter *);
+static int t4_alloc_irq(struct adapter *, struct irq *, int rid,
+ iq_intr_handler_t *, void *, char *);
+static int t4_free_irq(struct adapter *, struct irq *);
+static void reg_block_dump(struct adapter *, uint8_t *, unsigned int,
+ unsigned int);
+static void t4_get_regs(struct adapter *, struct t4_regdump *, uint8_t *);
+static void cxgbe_tick(void *);
+static int t4_sysctls(struct adapter *);
+static int cxgbe_sysctls(struct port_info *);
+static int sysctl_holdoff_tmr_idx(SYSCTL_HANDLER_ARGS);
+static int sysctl_holdoff_pktc_idx(SYSCTL_HANDLER_ARGS);
+static int sysctl_qsize_rxq(SYSCTL_HANDLER_ARGS);
+static int sysctl_qsize_txq(SYSCTL_HANDLER_ARGS);
+static int sysctl_handle_t4_reg64(SYSCTL_HANDLER_ARGS);
+
+
+struct t4_pciids {
+ uint16_t device;
+ uint8_t mpf;
+ char *desc;
+} t4_pciids[] = {
+ {0xa000, 0, "Chelsio Terminator 4 FPGA"},
+ {0x4400, 4, "Chelsio T440-dbg"},
+ {0x4401, 4, "Chelsio T420-CR"},
+ {0x4402, 4, "Chelsio T422-CR"},
+ {0x4403, 4, "Chelsio T440-CR"},
+ {0x4404, 4, "Chelsio T420-BCH"},
+ {0x4405, 4, "Chelsio T440-BCH"},
+ {0x4406, 4, "Chelsio T440-CH"},
+ {0x4407, 4, "Chelsio T420-SO"},
+ {0x4408, 4, "Chelsio T420-CX"},
+ {0x4409, 4, "Chelsio T420-BT"},
+ {0x440a, 4, "Chelsio T404-BT"},
+};
+
+static int
+t4_probe(device_t dev)
+{
+ int i;
+ uint16_t v = pci_get_vendor(dev);
+ uint16_t d = pci_get_device(dev);
+
+ if (v != PCI_VENDOR_ID_CHELSIO)
+ return (ENXIO);
+
+ for (i = 0; i < ARRAY_SIZE(t4_pciids); i++) {
+ if (d == t4_pciids[i].device &&
+ pci_get_function(dev) == t4_pciids[i].mpf) {
+ device_set_desc(dev, t4_pciids[i].desc);
+ return (BUS_PROBE_DEFAULT);
+ }
+ }
+
+ return (ENXIO);
+}
+
+static int
+t4_attach(device_t dev)
+{
+ struct adapter *sc;
+ int rc = 0, i, n10g, n1g, rqidx, tqidx;
+ struct fw_caps_config_cmd caps;
+ uint32_t p, v;
+ struct intrs_and_queues iaq;
+ struct sge *s;
+
+ sc = device_get_softc(dev);
+ sc->dev = dev;
+ sc->pf = pci_get_function(dev);
+ sc->mbox = sc->pf;
+
+ pci_enable_busmaster(dev);
+ pci_set_max_read_req(dev, 4096);
+ snprintf(sc->lockname, sizeof(sc->lockname), "%s",
+ device_get_nameunit(dev));
+ mtx_init(&sc->sc_lock, sc->lockname, 0, MTX_DEF);
+
+ rc = map_bars(sc);
+ if (rc != 0)
+ goto done; /* error message displayed already */
+
+ memset(sc->chan_map, 0xff, sizeof(sc->chan_map));
+
+ /* Prepare the adapter for operation */
+ rc = -t4_prep_adapter(sc);
+ if (rc != 0) {
+ device_printf(dev, "failed to prepare adapter: %d.\n", rc);
+ goto done;
+ }
+
+ /* Do this really early */
+ sc->cdev = make_dev(&t4_cdevsw, device_get_unit(dev), UID_ROOT,
+ GID_WHEEL, 0600, "%s", device_get_nameunit(dev));
+ sc->cdev->si_drv1 = sc;
+
+ /* Prepare the firmware for operation */
+ rc = prep_firmware(sc);
+ if (rc != 0)
+ goto done; /* error message displayed already */
+
+ /* Get device capabilities and select which ones we'll use */
+ rc = get_capabilities(sc, &caps);
+ if (rc != 0) {
+ device_printf(dev,
+ "failed to initialize adapter capabilities: %d.\n", rc);
+ goto done;
+ }
+
+ /* Choose the global RSS mode. */
+ rc = -t4_config_glbl_rss(sc, sc->mbox,
+ FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL,
+ F_FW_RSS_GLB_CONFIG_CMD_TNLMAPEN |
+ F_FW_RSS_GLB_CONFIG_CMD_TNLALLLKP);
+ if (rc != 0) {
+ device_printf(dev,
+ "failed to select global RSS mode: %d.\n", rc);
+ goto done;
+ }
+
+ /* These are total (sum of all ports) limits for a bus driver */
+ rc = -t4_cfg_pfvf(sc, sc->mbox, sc->pf, 0,
+ 64, /* max # of egress queues */
+ 64, /* max # of egress Ethernet or control queues */
+ 64, /* max # of ingress queues with fl/interrupt */
+ 0, /* max # of ingress queues without interrupt */
+ 0, /* PCIe traffic class */
+ 4, /* max # of virtual interfaces */
+ M_FW_PFVF_CMD_CMASK, M_FW_PFVF_CMD_PMASK, 16,
+ FW_CMD_CAP_PF, FW_CMD_CAP_PF);
+ if (rc != 0) {
+ device_printf(dev,
+ "failed to configure pf/vf resources: %d.\n", rc);
+ goto done;
+ }
+
+ /* Need this before sge_init */
+ for (i = 0; i < SGE_NTIMERS; i++)
+ sc->sge.timer_val[i] = min(intr_timer[i], 200U);
+ for (i = 0; i < SGE_NCOUNTERS; i++)
+ sc->sge.counter_val[i] = min(intr_pktcount[i], M_THRESHOLD_0);
+
+ /* Also need the cooked value of cclk before sge_init */
+ p = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
+ V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_CCLK));
+ rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, &p, &v);
+ if (rc != 0) {
+ device_printf(sc->dev,
+ "failed to obtain core clock value: %d.\n", rc);
+ goto done;
+ }
+ sc->params.vpd.cclk = v;
+
+ t4_sge_init(sc);
+
+ /*
+ * XXX: This is the place to call t4_set_filter_mode()
+ */
+
+ /* get basic stuff going */
+ rc = -t4_early_init(sc, sc->mbox);
+ if (rc != 0) {
+ device_printf(dev, "early init failed: %d.\n", rc);
+ goto done;
+ }
+
+ rc = get_params(sc, &caps);
+ if (rc != 0)
+ goto done; /* error message displayed already */
+
+ /* These are finalized by FW initialization, load their values now */
+ v = t4_read_reg(sc, A_TP_TIMER_RESOLUTION);
+ sc->params.tp.tre = G_TIMERRESOLUTION(v);
+ sc->params.tp.dack_re = G_DELAYEDACKRESOLUTION(v);
+ t4_read_mtu_tbl(sc, sc->params.mtus, NULL);
+
+ /* tweak some settings */
+ t4_write_reg(sc, A_TP_SHIFT_CNT, V_SYNSHIFTMAX(6) | V_RXTSHIFTMAXR1(4) |
+ V_RXTSHIFTMAXR2(15) | V_PERSHIFTBACKOFFMAX(8) | V_PERSHIFTMAX(8) |
+ V_KEEPALIVEMAXR1(4) | V_KEEPALIVEMAXR2(9));
+ t4_write_reg(sc, A_ULP_RX_TDDP_PSZ, V_HPZ0(PAGE_SHIFT - 12));
+
+ setup_memwin(sc);
+
+ rc = t4_create_dma_tag(sc);
+ if (rc != 0)
+ goto done; /* error message displayed already */
+
+ /*
+ * First pass over all the ports - allocate VIs and initialize some
+ * basic parameters like mac address, port type, etc. We also figure
+ * out whether a port is 10G or 1G and use that information when
+ * calculating how many interrupts to attempt to allocate.
+ */
+ n10g = n1g = 0;
+ for_each_port(sc, i) {
+ struct port_info *pi;
+
+ pi = malloc(sizeof(*pi), M_CXGBE, M_ZERO | M_WAITOK);
+ sc->port[i] = pi;
+
+ /* These must be set before t4_port_init */
+ pi->adapter = sc;
+ pi->port_id = i;
+
+ /* Allocate the vi and initialize parameters like mac addr */
+ rc = -t4_port_init(pi, sc->mbox, sc->pf, 0);
+ if (rc != 0) {
+ device_printf(dev, "unable to initialize port %d: %d\n",
+ i, rc);
+ free(pi, M_CXGBE);
+ sc->port[i] = NULL; /* indicates init failed */
+ continue;
+ }
+
+ snprintf(pi->lockname, sizeof(pi->lockname), "%sp%d",
+ device_get_nameunit(dev), i);
+ mtx_init(&pi->pi_lock, pi->lockname, 0, MTX_DEF);
+
+ if (is_10G_port(pi)) {
+ n10g++;
+ pi->tmr_idx = tmr_idx_10g;
+ pi->pktc_idx = pktc_idx_10g;
+ } else {
+ n1g++;
+ pi->tmr_idx = tmr_idx_1g;
+ pi->pktc_idx = pktc_idx_1g;
+ }
+
+ pi->xact_addr_filt = -1;
+
+ pi->qsize_rxq = max(qsize_rxq, 128);
+ while (pi->qsize_rxq & 7)
+ pi->qsize_rxq++;
+ pi->qsize_txq = max(qsize_txq, 128);
+
+ if (pi->qsize_rxq != qsize_rxq) {
+ device_printf(dev,
+ "using %d instead of %d as the rx queue size.\n",
+ pi->qsize_rxq, qsize_rxq);
+ }
+ if (pi->qsize_txq != qsize_txq) {
+ device_printf(dev,
+ "using %d instead of %d as the tx queue size.\n",
+ pi->qsize_txq, qsize_txq);
+ }
+
+ pi->dev = device_add_child(dev, "cxgbe", -1);
+ if (pi->dev == NULL) {
+ device_printf(dev,
+ "failed to add device for port %d.\n", i);
+ rc = ENXIO;
+ goto done;
+ }
+ device_set_softc(pi->dev, pi);
+
+ setbit(&sc->registered_device_map, i);
+ }
+
+ if (sc->registered_device_map == 0) {
+ device_printf(dev, "no usable ports\n");
+ rc = ENXIO;
+ goto done;
+ }
+
+ /*
+ * Interrupt type, # of interrupts, # of rx/tx queues, etc.
+ */
+ rc = cfg_itype_and_nqueues(sc, n10g, n1g, &iaq);
+ if (rc != 0)
+ goto done; /* error message displayed already */
+
+ sc->intr_type = iaq.intr_type;
+ sc->intr_count = iaq.nirq;
+
+ s = &sc->sge;
+ s->nrxq = n10g * iaq.nrxq10g + n1g * iaq.nrxq1g;
+ s->ntxq = n10g * iaq.ntxq10g + n1g * iaq.ntxq1g;
+ s->neq = s->ntxq + s->nrxq; /* the fl in an rxq is an eq */
+ s->niq = s->nrxq + 1; /* 1 extra for firmware event queue */
+ if (iaq.intr_fwd) {
+ sc->flags |= INTR_FWD;
+ s->niq += NFIQ(sc); /* forwarded interrupt queues */
+ s->fiq = malloc(NFIQ(sc) * sizeof(struct sge_iq), M_CXGBE,
+ M_ZERO | M_WAITOK);
+ }
+ s->rxq = malloc(s->nrxq * sizeof(struct sge_rxq), M_CXGBE,
+ M_ZERO | M_WAITOK);
+ s->txq = malloc(s->ntxq * sizeof(struct sge_txq), M_CXGBE,
+ M_ZERO | M_WAITOK);
+ s->iqmap = malloc(s->niq * sizeof(struct sge_iq *), M_CXGBE,
+ M_ZERO | M_WAITOK);
+ s->eqmap = malloc(s->neq * sizeof(struct sge_eq *), M_CXGBE,
+ M_ZERO | M_WAITOK);
+
+ sc->irq = malloc(sc->intr_count * sizeof(struct irq), M_CXGBE,
+ M_ZERO | M_WAITOK);
+
+ t4_sysctls(sc);
+
+ /*
+ * Second pass over the ports. This time we know the number of rx and
+ * tx queues that each port should get.
+ */
+ rqidx = tqidx = 0;
+ for_each_port(sc, i) {
+ struct port_info *pi = sc->port[i];
+
+ if (pi == NULL)
+ continue;
+
+ pi->first_rxq = rqidx;
+ pi->nrxq = is_10G_port(pi) ? iaq.nrxq10g : iaq.nrxq1g;
+
+ pi->first_txq = tqidx;
+ pi->ntxq = is_10G_port(pi) ? iaq.ntxq10g : iaq.ntxq1g;
+
+ rqidx += pi->nrxq;
+ tqidx += pi->ntxq;
+ }
+
+ rc = bus_generic_attach(dev);
+ if (rc != 0) {
+ device_printf(dev,
+ "failed to attach all child ports: %d\n", rc);
+ goto done;
+ }
+
+#ifdef INVARIANTS
+ device_printf(dev,
+ "%p, %d ports (0x%x), %d intr_type, %d intr_count\n",
+ sc, sc->params.nports, sc->params.portvec,
+ sc->intr_type, sc->intr_count);
+#endif
+ t4_set_desc(sc);
+
+done:
+ if (rc != 0)
+ t4_detach(dev);
+
+ return (rc);
+}
+
+/*
+ * Idempotent
+ */
+static int
+t4_detach(device_t dev)
+{
+ struct adapter *sc;
+ struct port_info *pi;
+ int i;
+
+ sc = device_get_softc(dev);
+
+ if (sc->cdev)
+ destroy_dev(sc->cdev);
+
+ bus_generic_detach(dev);
+ for (i = 0; i < MAX_NPORTS; i++) {
+ pi = sc->port[i];
+ if (pi) {
+ t4_free_vi(pi->adapter, sc->mbox, sc->pf, 0, pi->viid);
+ if (pi->dev)
+ device_delete_child(dev, pi->dev);
+
+ mtx_destroy(&pi->pi_lock);
+ free(pi, M_CXGBE);
+ }
+ }
+
+ if (sc->flags & FW_OK)
+ t4_fw_bye(sc, sc->mbox);
+
+ if (sc->intr_type == 2 || sc->intr_type == 4)
+ pci_release_msi(dev);
+
+ if (sc->regs_res)
+ bus_release_resource(dev, SYS_RES_MEMORY, sc->regs_rid,
+ sc->regs_res);
+
+ if (sc->msix_res)
+ bus_release_resource(dev, SYS_RES_MEMORY, sc->msix_rid,
+ sc->msix_res);
+
+ free(sc->irq, M_CXGBE);
+ free(sc->sge.rxq, M_CXGBE);
+ free(sc->sge.txq, M_CXGBE);
+ free(sc->sge.fiq, M_CXGBE);
+ free(sc->sge.iqmap, M_CXGBE);
+ free(sc->sge.eqmap, M_CXGBE);
+ t4_destroy_dma_tag(sc);
+ mtx_destroy(&sc->sc_lock);
+
+ bzero(sc, sizeof(*sc));
+
+ return (0);
+}
+
+
+static int
+cxgbe_probe(device_t dev)
+{
+ char buf[128];
+ struct port_info *pi = device_get_softc(dev);
+
+ snprintf(buf, sizeof(buf), "Port %d", pi->port_id);
+ device_set_desc_copy(dev, buf);
+
+ return (BUS_PROBE_DEFAULT);
+}
+
+#define T4_CAP (IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU | IFCAP_HWCSUM | \
+ IFCAP_VLAN_HWCSUM | IFCAP_TSO | IFCAP_JUMBO_MTU | IFCAP_LRO | \
+ IFCAP_VLAN_HWTSO)
+#define T4_CAP_ENABLE (T4_CAP & ~IFCAP_TSO6)
+
+static int
+cxgbe_attach(device_t dev)
+{
+ struct port_info *pi = device_get_softc(dev);
+ struct ifnet *ifp;
+
+ /* Allocate an ifnet and set it up */
+ ifp = if_alloc(IFT_ETHER);
+ if (ifp == NULL) {
+ device_printf(dev, "Cannot allocate ifnet\n");
+ return (ENOMEM);
+ }
+ pi->ifp = ifp;
+ ifp->if_softc = pi;
+
+ callout_init(&pi->tick, CALLOUT_MPSAFE);
+
+ if_initname(ifp, device_get_name(dev), device_get_unit(dev));
+ ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
+
+ ifp->if_init = cxgbe_init;
+ ifp->if_ioctl = cxgbe_ioctl;
+ ifp->if_start = cxgbe_start;
+ ifp->if_transmit = cxgbe_transmit;
+ ifp->if_qflush = cxgbe_qflush;
+
+ ifp->if_snd.ifq_drv_maxlen = 1024;
+ IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
+ IFQ_SET_READY(&ifp->if_snd);
+
+ ifp->if_capabilities = T4_CAP;
+ ifp->if_capenable = T4_CAP_ENABLE;
+ ifp->if_hwassist = CSUM_TCP | CSUM_UDP | CSUM_IP | CSUM_TSO;
+
+ /* Initialize ifmedia for this port */
+ ifmedia_init(&pi->media, IFM_IMASK, cxgbe_media_change,
+ cxgbe_media_status);
+ build_medialist(pi);
+
+ ether_ifattach(ifp, pi->hw_addr);
+
+#ifdef INVARIANTS
+ device_printf(dev, "%p, %d txq, %d rxq\n", pi, pi->ntxq, pi->nrxq);
+#endif
+
+ cxgbe_sysctls(pi);
+
+ return (0);
+}
+
+static int
+cxgbe_detach(device_t dev)
+{
+ struct port_info *pi = device_get_softc(dev);
+ struct adapter *sc = pi->adapter;
+ int rc;
+
+ /* Tell if_ioctl and if_init that the port is going away */
+ ADAPTER_LOCK(sc);
+ SET_DOOMED(pi);
+ wakeup(&sc->flags);
+ while (IS_BUSY(sc))
+ mtx_sleep(&sc->flags, &sc->sc_lock, 0, "t4detach", 0);
+ SET_BUSY(sc);
+ ADAPTER_UNLOCK(sc);
+
+ rc = cxgbe_uninit_synchronized(pi);
+ if (rc != 0)
+ device_printf(dev, "port uninit failed: %d.\n", rc);
+
+ ifmedia_removeall(&pi->media);
+ ether_ifdetach(pi->ifp);
+ if_free(pi->ifp);
+
+ ADAPTER_LOCK(sc);
+ CLR_BUSY(sc);
+ wakeup_one(&sc->flags);
+ ADAPTER_UNLOCK(sc);
+
+ return (0);
+}
+
+static void
+cxgbe_init(void *arg)
+{
+ struct port_info *pi = arg;
+ struct adapter *sc = pi->adapter;
+
+ ADAPTER_LOCK(sc);
+ cxgbe_init_locked(pi); /* releases adapter lock */
+ ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
+}
+
+static int
+cxgbe_ioctl(struct ifnet *ifp, unsigned long cmd, caddr_t data)
+{
+ int rc = 0, mtu, flags;
+ struct port_info *pi = ifp->if_softc;
+ struct adapter *sc = pi->adapter;
+ struct ifreq *ifr = (struct ifreq *)data;
+ uint32_t mask;
+
+ switch (cmd) {
+ case SIOCSIFMTU:
+ ADAPTER_LOCK(sc);
+ rc = IS_DOOMED(pi) ? ENXIO : (IS_BUSY(sc) ? EBUSY : 0);
+ if (rc) {
+fail:
+ ADAPTER_UNLOCK(sc);
+ return (rc);
+ }
+
+ mtu = ifr->ifr_mtu;
+ if ((mtu < ETHERMIN) || (mtu > ETHERMTU_JUMBO)) {
+ rc = EINVAL;
+ } else {
+ ifp->if_mtu = mtu;
+ if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
+ t4_update_fl_bufsize(ifp);
+ PORT_LOCK(pi);
+ rc = update_mac_settings(pi, XGMAC_MTU);
+ PORT_UNLOCK(pi);
+ }
+ }
+ ADAPTER_UNLOCK(sc);
+ break;
+
+ case SIOCSIFFLAGS:
+ ADAPTER_LOCK(sc);
+ if (IS_DOOMED(pi)) {
+ rc = ENXIO;
+ goto fail;
+ }
+ if (ifp->if_flags & IFF_UP) {
+ if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
+ flags = pi->if_flags;
+ if ((ifp->if_flags ^ flags) &
+ (IFF_PROMISC | IFF_ALLMULTI)) {
+ if (IS_BUSY(sc)) {
+ rc = EBUSY;
+ goto fail;
+ }
+ PORT_LOCK(pi);
+ rc = update_mac_settings(pi,
+ XGMAC_PROMISC | XGMAC_ALLMULTI);
+ PORT_UNLOCK(pi);
+ }
+ ADAPTER_UNLOCK(sc);
+ } else
+ rc = cxgbe_init_locked(pi);
+ pi->if_flags = ifp->if_flags;
+ } else if (ifp->if_drv_flags & IFF_DRV_RUNNING)
+ rc = cxgbe_uninit_locked(pi);
+ else
+ ADAPTER_UNLOCK(sc);
+
+ ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
+ break;
+
+ case SIOCADDMULTI:
+ case SIOCDELMULTI: /* these two can be called with a mutex held :-( */
+ ADAPTER_LOCK(sc);
+ rc = IS_DOOMED(pi) ? ENXIO : (IS_BUSY(sc) ? EBUSY : 0);
+ if (rc)
+ goto fail;
+
+ if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
+ PORT_LOCK(pi);
+ rc = update_mac_settings(pi, XGMAC_MCADDRS);
+ PORT_UNLOCK(pi);
+ }
+ ADAPTER_UNLOCK(sc);
+ break;
+
+ case SIOCSIFCAP:
+ ADAPTER_LOCK(sc);
+ rc = IS_DOOMED(pi) ? ENXIO : (IS_BUSY(sc) ? EBUSY : 0);
+ if (rc)
+ goto fail;
+
+ mask = ifr->ifr_reqcap ^ ifp->if_capenable;
+ if (mask & IFCAP_TXCSUM) {
+ ifp->if_capenable ^= IFCAP_TXCSUM;
+ ifp->if_hwassist ^= (CSUM_TCP | CSUM_UDP | CSUM_IP);
+
+ if (IFCAP_TSO & ifp->if_capenable &&
+ !(IFCAP_TXCSUM & ifp->if_capenable)) {
+ ifp->if_capenable &= ~IFCAP_TSO;
+ ifp->if_hwassist &= ~CSUM_TSO;
+ if_printf(ifp,
+ "tso disabled due to -txcsum.\n");
+ }
+ }
+ if (mask & IFCAP_RXCSUM)
+ ifp->if_capenable ^= IFCAP_RXCSUM;
+ if (mask & IFCAP_TSO4) {
+ ifp->if_capenable ^= IFCAP_TSO4;
+
+ if (IFCAP_TSO & ifp->if_capenable) {
+ if (IFCAP_TXCSUM & ifp->if_capenable)
+ ifp->if_hwassist |= CSUM_TSO;
+ else {
+ ifp->if_capenable &= ~IFCAP_TSO;
+ ifp->if_hwassist &= ~CSUM_TSO;
+ if_printf(ifp,
+ "enable txcsum first.\n");
+ rc = EAGAIN;
+ }
+ } else
+ ifp->if_hwassist &= ~CSUM_TSO;
+ }
+ if (mask & IFCAP_LRO) {
+#ifdef INET
+ int i;
+ struct sge_rxq *rxq;
+
+ ifp->if_capenable ^= IFCAP_LRO;
+ for_each_rxq(pi, i, rxq) {
+ if (ifp->if_capenable & IFCAP_LRO)
+ rxq->flags |= RXQ_LRO_ENABLED;
+ else
+ rxq->flags &= ~RXQ_LRO_ENABLED;
+ }
+#endif
+ }
+#ifndef TCP_OFFLOAD_DISABLE
+ if (mask & IFCAP_TOE4) {
+ rc = EOPNOTSUPP;
+ }
+#endif
+ if (mask & IFCAP_VLAN_HWTAGGING) {
+ ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
+ if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
+ PORT_LOCK(pi);
+ rc = update_mac_settings(pi, XGMAC_VLANEX);
+ PORT_UNLOCK(pi);
+ }
+ }
+ if (mask & IFCAP_VLAN_MTU) {
+ ifp->if_capenable ^= IFCAP_VLAN_MTU;
+
+ /* Need to find out how to disable auto-mtu-inflation */
+ }
+ if (mask & IFCAP_VLAN_HWTSO)
+ ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
+ if (mask & IFCAP_VLAN_HWCSUM)
+ ifp->if_capenable ^= IFCAP_VLAN_HWCSUM;
+
+#ifdef VLAN_CAPABILITIES
+ VLAN_CAPABILITIES(ifp);
+#endif
+ ADAPTER_UNLOCK(sc);
+ break;
+
+ case SIOCSIFMEDIA:
+ case SIOCGIFMEDIA:
+ ifmedia_ioctl(ifp, ifr, &pi->media, cmd);
+ break;
+
+ default:
+ rc = ether_ioctl(ifp, cmd, data);
+ }
+
+ return (rc);
+}
+
+static void
+cxgbe_start(struct ifnet *ifp)
+{
+ struct port_info *pi = ifp->if_softc;
+ struct sge_txq *txq;
+ int i;
+
+ for_each_txq(pi, i, txq) {
+ if (TXQ_TRYLOCK(txq)) {
+ struct buf_ring *br = txq->eq.br;
+ struct mbuf *m;
+
+ m = txq->m ? txq->m : drbr_dequeue(ifp, br);
+ if (m)
+ t4_eth_tx(ifp, txq, m);
+
+ TXQ_UNLOCK(txq);
+ }
+ }
+}
+
+static int
+cxgbe_transmit(struct ifnet *ifp, struct mbuf *m)
+{
+ struct port_info *pi = ifp->if_softc;
+ struct adapter *sc = pi->adapter;
+ struct sge_txq *txq = &sc->sge.txq[pi->first_txq];
+ struct buf_ring *br;
+ int rc;
+
+ M_ASSERTPKTHDR(m);
+
+ if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
+ m_freem(m);
+ return (0);
+ }
+
+ if (m->m_flags & M_FLOWID)
+ txq += (m->m_pkthdr.flowid % pi->ntxq);
+ br = txq->eq.br;
+
+ if (TXQ_TRYLOCK(txq) == 0) {
+ /*
+ * XXX: make sure that this packet really is sent out. There is
+ * a small race where t4_eth_tx may stop draining the drbr and
+ * goes away, just before we enqueued this mbuf.
+ */
+
+ return (drbr_enqueue(ifp, br, m));
+ }
+
+ /*
+ * txq->m is the mbuf that is held up due to a temporary shortage of
+ * resources and it should be put on the wire first. Then what's in
+ * drbr and finally the mbuf that was just passed in to us.
+ *
+ * Return code should indicate the fate of the mbuf that was passed in
+ * this time.
+ */
+
+ TXQ_LOCK_ASSERT_OWNED(txq);
+ if (drbr_needs_enqueue(ifp, br) || txq->m) {
+
+ /* Queued for transmission. */
+
+ rc = drbr_enqueue(ifp, br, m);
+ m = txq->m ? txq->m : drbr_dequeue(ifp, br);
+ (void) t4_eth_tx(ifp, txq, m);
+ TXQ_UNLOCK(txq);
+ return (rc);
+ }
+
+ /* Direct transmission. */
+ rc = t4_eth_tx(ifp, txq, m);
+ if (rc != 0 && txq->m)
+ rc = 0; /* held, will be transmitted soon (hopefully) */
+
+ TXQ_UNLOCK(txq);
+ return (rc);
+}
+
+static void
+cxgbe_qflush(struct ifnet *ifp)
+{
+ struct port_info *pi = ifp->if_softc;
+
+ device_printf(pi->dev, "%s unimplemented.\n", __func__);
+}
+
+static int
+cxgbe_media_change(struct ifnet *ifp)
+{
+ struct port_info *pi = ifp->if_softc;
+
+ device_printf(pi->dev, "%s unimplemented.\n", __func__);
+
+ return (EOPNOTSUPP);
+}
+
+static void
+cxgbe_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
+{
+ struct port_info *pi = ifp->if_softc;
+ struct ifmedia_entry *cur = pi->media.ifm_cur;
+ int speed = pi->link_cfg.speed;
+ int data = (pi->port_type << 8) | pi->mod_type;
+
+ if (cur->ifm_data != data) {
+ build_medialist(pi);
+ cur = pi->media.ifm_cur;
+ }
+
+ ifmr->ifm_status = IFM_AVALID;
+ if (!pi->link_cfg.link_ok)
+ return;
+
+ ifmr->ifm_status |= IFM_ACTIVE;
+
+ /* active and current will differ iff current media is autoselect. */
+ if (IFM_SUBTYPE(cur->ifm_media) != IFM_AUTO)
+ return;
+
+ ifmr->ifm_active = IFM_ETHER | IFM_FDX;
+ if (speed == SPEED_10000)
+ ifmr->ifm_active |= IFM_10G_T;
+ else if (speed == SPEED_1000)
+ ifmr->ifm_active |= IFM_1000_T;
+ else if (speed == SPEED_100)
+ ifmr->ifm_active |= IFM_100_TX;
+ else if (speed == SPEED_10)
+ ifmr->ifm_active |= IFM_10_T;
+ else
+ KASSERT(0, ("%s: link up but speed unknown (%u)", __func__,
+ speed));
+}
+
+void
+t4_fatal_err(struct adapter *sc)
+{
+ t4_set_reg_field(sc, A_SGE_CONTROL, F_GLOBALENABLE, 0);
+ t4_intr_disable(sc);
+ log(LOG_EMERG, "%s: encountered fatal error, adapter stopped.\n",
+ device_get_nameunit(sc->dev));
+}
+
+static int
+map_bars(struct adapter *sc)
+{
+ sc->regs_rid = PCIR_BAR(0);
+ sc->regs_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY,
+ &sc->regs_rid, RF_ACTIVE);
+ if (sc->regs_res == NULL) {
+ device_printf(sc->dev, "cannot map registers.\n");
+ return (ENXIO);
+ }
+ sc->bt = rman_get_bustag(sc->regs_res);
+ sc->bh = rman_get_bushandle(sc->regs_res);
+ sc->mmio_len = rman_get_size(sc->regs_res);
+
+ sc->msix_rid = PCIR_BAR(4);
+ sc->msix_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY,
+ &sc->msix_rid, RF_ACTIVE);
+ if (sc->msix_res == NULL) {
+ device_printf(sc->dev, "cannot map MSI-X BAR.\n");
+ return (ENXIO);
+ }
+
+ return (0);
+}
+
+static void
+setup_memwin(struct adapter *sc)
+{
+ u_long bar0;
+
+ bar0 = rman_get_start(sc->regs_res);
+
+ t4_write_reg(sc, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, 0),
+ (bar0 + MEMWIN0_BASE) | V_BIR(0) |
+ V_WINDOW(ilog2(MEMWIN0_APERTURE) - 10));
+
+ t4_write_reg(sc, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, 1),
+ (bar0 + MEMWIN1_BASE) | V_BIR(0) |
+ V_WINDOW(ilog2(MEMWIN1_APERTURE) - 10));
+
+ t4_write_reg(sc, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, 2),
+ (bar0 + MEMWIN2_BASE) | V_BIR(0) |
+ V_WINDOW(ilog2(MEMWIN2_APERTURE) - 10));
+}
+
+static int
+cfg_itype_and_nqueues(struct adapter *sc, int n10g, int n1g,
+ struct intrs_and_queues *iaq)
+{
+ int rc, itype, navail, nc, nrxq10g, nrxq1g;
+
+ bzero(iaq, sizeof(*iaq));
+ nc = mp_ncpus; /* our snapshot of the number of CPUs */
+
+ for (itype = 4; itype; itype >>= 1) {
+
+ if ((itype & intr_types) == 0)
+ continue; /* not allowed */
+
+ if (itype == 4)
+ navail = pci_msix_count(sc->dev);
+ else if (itype == 2)
+ navail = pci_msi_count(sc->dev);
+ else
+ navail = 1;
+
+ if (navail == 0)
+ continue;
+
+ iaq->intr_type = itype;
+
+ iaq->ntxq10g = min(nc, max_ntxq_10g);
+ iaq->ntxq1g = min(nc, max_ntxq_1g);
+
+ nrxq10g = min(nc, max_nrxq_10g);
+ nrxq1g = min(nc, max_nrxq_1g);
+
+ /* Extra 2 is for a) error interrupt b) firmware event */
+ iaq->nirq = n10g * nrxq10g + n1g * nrxq1g + 2;
+ if (iaq->nirq <= navail && intr_fwd == 0) {
+
+ /* One for err, one for fwq, and one for each rxq */
+
+ iaq->intr_fwd = 0;
+ iaq->nrxq10g = nrxq10g;
+ iaq->nrxq1g = nrxq1g;
+ if (itype == 2) {
+ /* # of vectors requested must be power of 2 */
+ while (!powerof2(iaq->nirq))
+ iaq->nirq++;
+ KASSERT(iaq->nirq <= navail,
+ ("%s: bad MSI calculation", __func__));
+ }
+ } else {
+fwd:
+ iaq->intr_fwd = 1;
+ iaq->nirq = navail;
+
+ /*
+ * If we have multiple vectors available reserve one
+ * exclusively for errors. The rest will be shared by
+ * the fwq and data.
+ */
+ if (navail > 1) {
+ navail--;
+
+ if (navail > nc && itype == 4)
+ iaq->nirq = nc + 1;
+ }
+
+ iaq->nrxq10g = min(nrxq10g, navail);
+ iaq->nrxq1g = min(nrxq1g, navail);
+ }
+
+ navail = iaq->nirq;
+ rc = 0;
+ if (itype == 4)
+ rc = pci_alloc_msix(sc->dev, &navail);
+ else if (itype == 2)
+ rc = pci_alloc_msi(sc->dev, &navail);
+
+ if (rc == 0) {
+ if (navail == iaq->nirq)
+ return (0);
+
+ /*
+ * Didn't get the number requested. Use whatever number
+ * the kernel is willing to allocate (it's in navail).
+ */
+ pci_release_msi(sc->dev);
+ goto fwd;
+ }
+
+ device_printf(sc->dev,
+ "failed to allocate vectors:%d, type=%d, req=%d, rcvd=%d\n",
+ itype, rc, iaq->nirq, navail);
+ }
+
+ device_printf(sc->dev,
+ "failed to find a usable interrupt type. "
+ "allowed=%d, msi-x=%d, msi=%d, intx=1", intr_types,
+ pci_msix_count(sc->dev), pci_msi_count(sc->dev));
+
+ return (ENXIO);
+}
+
+/*
+ * Install a compatible firmware (if required), establish contact with it,
+ * become the master, and reset the device.
+ */
+static int
+prep_firmware(struct adapter *sc)
+{
+ const struct firmware *fw;
+ int rc;
+ enum dev_state state;
+
+ /* Check firmware version and install a different one if necessary */
+ rc = t4_check_fw_version(sc);
+ if (rc != 0 || force_firmware_install) {
+
+ fw = firmware_get(T4_FWNAME);
+ if (fw == NULL) {
+ device_printf(sc->dev,
+ "Could not find firmware image %s\n", T4_FWNAME);
+ return (ENOENT);
+ }
+
+ device_printf(sc->dev,
+ "installing firmware %d.%d.%d on card.\n",
+ FW_VERSION_MAJOR, FW_VERSION_MINOR, FW_VERSION_MICRO);
+ rc = -t4_load_fw(sc, fw->data, fw->datasize);
+ if (rc != 0) {
+ device_printf(sc->dev,
+ "failed to install firmware: %d\n", rc);
+ return (rc);
+ } else {
+ t4_get_fw_version(sc, &sc->params.fw_vers);
+ t4_get_tp_version(sc, &sc->params.tp_vers);
+ }
+
+ firmware_put(fw, FIRMWARE_UNLOAD);
+ }
+
+ /* Contact firmware, request master */
+ rc = t4_fw_hello(sc, sc->mbox, sc->mbox, MASTER_MUST, &state);
+ if (rc < 0) {
+ rc = -rc;
+ device_printf(sc->dev,
+ "failed to connect to the firmware: %d.\n", rc);
+ return (rc);
+ }
+
+ /* Reset device */
+ rc = -t4_fw_reset(sc, sc->mbox, F_PIORSTMODE | F_PIORST);
+ if (rc != 0) {
+ device_printf(sc->dev, "firmware reset failed: %d.\n", rc);
+ if (rc != ETIMEDOUT && rc != EIO)
+ t4_fw_bye(sc, sc->mbox);
+ return (rc);
+ }
+
+ snprintf(sc->fw_version, sizeof(sc->fw_version), "%u.%u.%u.%u",
+ G_FW_HDR_FW_VER_MAJOR(sc->params.fw_vers),
+ G_FW_HDR_FW_VER_MINOR(sc->params.fw_vers),
+ G_FW_HDR_FW_VER_MICRO(sc->params.fw_vers),
+ G_FW_HDR_FW_VER_BUILD(sc->params.fw_vers));
+ sc->flags |= FW_OK;
+
+ return (0);
+}
+
+static int
+get_capabilities(struct adapter *sc, struct fw_caps_config_cmd *caps)
+{
+ int rc;
+
+ bzero(caps, sizeof(*caps));
+ caps->op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
+ F_FW_CMD_REQUEST | F_FW_CMD_READ);
+ caps->retval_len16 = htobe32(FW_LEN16(*caps));
+
+ rc = -t4_wr_mbox(sc, sc->mbox, caps, sizeof(*caps), caps);
+ if (rc != 0)
+ return (rc);
+
+ if (caps->niccaps & htobe16(FW_CAPS_CONFIG_NIC_VM))
+ caps->niccaps ^= htobe16(FW_CAPS_CONFIG_NIC_VM);
+
+ caps->op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
+ F_FW_CMD_REQUEST | F_FW_CMD_WRITE);
+ rc = -t4_wr_mbox(sc, sc->mbox, caps, sizeof(*caps), NULL);
+
+ return (rc);
+}
+
+static int
+get_params(struct adapter *sc, struct fw_caps_config_cmd *caps)
+{
+ int rc;
+ uint32_t params[7], val[7];
+
+#define FW_PARAM_DEV(param) \
+ (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \
+ V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param))
+#define FW_PARAM_PFVF(param) \
+ (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \
+ V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param))
+
+ params[0] = FW_PARAM_DEV(PORTVEC);
+ params[1] = FW_PARAM_PFVF(IQFLINT_START);
+ params[2] = FW_PARAM_PFVF(EQ_START);
+ params[3] = FW_PARAM_PFVF(FILTER_START);
+ params[4] = FW_PARAM_PFVF(FILTER_END);
+ rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 5, params, val);
+ if (rc != 0) {
+ device_printf(sc->dev,
+ "failed to query parameters: %d.\n", rc);
+ goto done;
+ }
+
+ sc->params.portvec = val[0];
+ sc->params.nports = 0;
+ while (val[0]) {
+ sc->params.nports++;
+ val[0] &= val[0] - 1;
+ }
+
+ sc->sge.iq_start = val[1];
+ sc->sge.eq_start = val[2];
+ sc->tids.ftid_base = val[3];
+ sc->tids.nftids = val[4] - val[3] + 1;
+
+ if (caps->toecaps) {
+ /* query offload-related parameters */
+ params[0] = FW_PARAM_DEV(NTID);
+ params[1] = FW_PARAM_PFVF(SERVER_START);
+ params[2] = FW_PARAM_PFVF(SERVER_END);
+ params[3] = FW_PARAM_PFVF(TDDP_START);
+ params[4] = FW_PARAM_PFVF(TDDP_END);
+ params[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ);
+ rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, params, val);
+ if (rc != 0) {
+ device_printf(sc->dev,
+ "failed to query TOE parameters: %d.\n", rc);
+ goto done;
+ }
+ sc->tids.ntids = val[0];
+ sc->tids.natids = min(sc->tids.ntids / 2, MAX_ATIDS);
+ sc->tids.stid_base = val[1];
+ sc->tids.nstids = val[2] - val[1] + 1;
+ sc->vres.ddp.start = val[3];
+ sc->vres.ddp.size = val[4] - val[3] + 1;
+ sc->params.ofldq_wr_cred = val[5];
+ sc->params.offload = 1;
+ }
+ if (caps->rdmacaps) {
+ params[0] = FW_PARAM_PFVF(STAG_START);
+ params[1] = FW_PARAM_PFVF(STAG_END);
+ params[2] = FW_PARAM_PFVF(RQ_START);
+ params[3] = FW_PARAM_PFVF(RQ_END);
+ params[4] = FW_PARAM_PFVF(PBL_START);
+ params[5] = FW_PARAM_PFVF(PBL_END);
+ rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, params, val);
+ if (rc != 0) {
+ device_printf(sc->dev,
+ "failed to query RDMA parameters: %d.\n", rc);
+ goto done;
+ }
+ sc->vres.stag.start = val[0];
+ sc->vres.stag.size = val[1] - val[0] + 1;
+ sc->vres.rq.start = val[2];
+ sc->vres.rq.size = val[3] - val[2] + 1;
+ sc->vres.pbl.start = val[4];
+ sc->vres.pbl.size = val[5] - val[4] + 1;
+ }
+ if (caps->iscsicaps) {
+ params[0] = FW_PARAM_PFVF(ISCSI_START);
+ params[1] = FW_PARAM_PFVF(ISCSI_END);
+ rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, params, val);
+ if (rc != 0) {
+ device_printf(sc->dev,
+ "failed to query iSCSI parameters: %d.\n", rc);
+ goto done;
+ }
+ sc->vres.iscsi.start = val[0];
+ sc->vres.iscsi.size = val[1] - val[0] + 1;
+ }
+#undef FW_PARAM_PFVF
+#undef FW_PARAM_DEV
+
+done:
+ return (rc);
+}
+
+static void
+t4_set_desc(struct adapter *sc)
+{
+ char buf[128];
+ struct adapter_params *p = &sc->params;
+
+ snprintf(buf, sizeof(buf),
+ "Chelsio %s (rev %d) %d port %sNIC PCIe-x%d %s, S/N:%s, E/C:%s",
+ p->vpd.id, p->rev, p->nports, is_offload(sc) ? "R" : "",
+ p->pci.width, (sc->intr_type == 4 ) ? "MSI-X" :
+ (sc->intr_type == 2) ? "MSI" : "INTx", p->vpd.sn, p->vpd.ec);
+
+ device_set_desc_copy(sc->dev, buf);
+}
+
+static void
+build_medialist(struct port_info *pi)
+{
+ struct ifmedia *media = &pi->media;
+ int data, m;
+
+ PORT_LOCK(pi);
+
+ ifmedia_removeall(media);
+
+ m = IFM_ETHER | IFM_FDX;
+ data = (pi->port_type << 8) | pi->mod_type;
+
+ switch(pi->port_type) {
+ case FW_PORT_TYPE_BT_XFI:
+ ifmedia_add(media, m | IFM_10G_T, data, NULL);
+ break;
+
+ case FW_PORT_TYPE_BT_XAUI:
+ ifmedia_add(media, m | IFM_10G_T, data, NULL);
+ /* fall through */
+
+ case FW_PORT_TYPE_BT_SGMII:
+ ifmedia_add(media, m | IFM_1000_T, data, NULL);
+ ifmedia_add(media, m | IFM_100_TX, data, NULL);
+ ifmedia_add(media, IFM_ETHER | IFM_AUTO, data, NULL);
+ ifmedia_set(media, IFM_ETHER | IFM_AUTO);
+ break;
+
+ case FW_PORT_TYPE_CX4:
+ ifmedia_add(media, m | IFM_10G_CX4, data, NULL);
+ ifmedia_set(media, m | IFM_10G_CX4);
+ break;
+
+ case FW_PORT_TYPE_SFP:
+ case FW_PORT_TYPE_FIBER_XFI:
+ case FW_PORT_TYPE_FIBER_XAUI:
+ switch (pi->mod_type) {
+
+ case FW_PORT_MOD_TYPE_LR:
+ ifmedia_add(media, m | IFM_10G_LR, data, NULL);
+ ifmedia_set(media, m | IFM_10G_LR);
+ break;
+
+ case FW_PORT_MOD_TYPE_SR:
+ ifmedia_add(media, m | IFM_10G_SR, data, NULL);
+ ifmedia_set(media, m | IFM_10G_SR);
+ break;
+
+ case FW_PORT_MOD_TYPE_LRM:
+ ifmedia_add(media, m | IFM_10G_LRM, data, NULL);
+ ifmedia_set(media, m | IFM_10G_LRM);
+ break;
+
+ case FW_PORT_MOD_TYPE_TWINAX_PASSIVE:
+ case FW_PORT_MOD_TYPE_TWINAX_ACTIVE:
+ ifmedia_add(media, m | IFM_10G_TWINAX, data, NULL);
+ ifmedia_set(media, m | IFM_10G_TWINAX);
+ break;
+
+ case FW_PORT_MOD_TYPE_NONE:
+ m &= ~IFM_FDX;
+ ifmedia_add(media, m | IFM_NONE, data, NULL);
+ ifmedia_set(media, m | IFM_NONE);
+ break;
+
+ case FW_PORT_MOD_TYPE_NA:
+ case FW_PORT_MOD_TYPE_ER:
+ default:
+ ifmedia_add(media, m | IFM_UNKNOWN, data, NULL);
+ ifmedia_set(media, m | IFM_UNKNOWN);
+ break;
+ }
+ break;
+
+ case FW_PORT_TYPE_KX4:
+ case FW_PORT_TYPE_KX:
+ case FW_PORT_TYPE_KR:
+ default:
+ ifmedia_add(media, m | IFM_UNKNOWN, data, NULL);
+ ifmedia_set(media, m | IFM_UNKNOWN);
+ break;
+ }
+
+ PORT_UNLOCK(pi);
+}
+
+/*
+ * Program the port's XGMAC based on parameters in ifnet. The caller also
+ * indicates which parameters should be programmed (the rest are left alone).
+ */
+static int
+update_mac_settings(struct port_info *pi, int flags)
+{
+ int rc;
+ struct ifnet *ifp = pi->ifp;
+ struct adapter *sc = pi->adapter;
+ int mtu = -1, promisc = -1, allmulti = -1, vlanex = -1;
+
+ PORT_LOCK_ASSERT_OWNED(pi);
+ KASSERT(flags, ("%s: not told what to update.", __func__));
+
+ if (flags & XGMAC_MTU)
+ mtu = ifp->if_mtu;
+
+ if (flags & XGMAC_PROMISC)
+ promisc = ifp->if_flags & IFF_PROMISC ? 1 : 0;
+
+ if (flags & XGMAC_ALLMULTI)
+ allmulti = ifp->if_flags & IFF_ALLMULTI ? 1 : 0;
+
+ if (flags & XGMAC_VLANEX)
+ vlanex = ifp->if_capenable & IFCAP_VLAN_HWTAGGING ? 1 : 0;
+
+ rc = -t4_set_rxmode(sc, sc->mbox, pi->viid, mtu, promisc, allmulti, 1,
+ vlanex, false);
+ if (rc) {
+ if_printf(ifp, "set_rxmode (%x) failed: %d\n", flags, rc);
+ return (rc);
+ }
+
+ if (flags & XGMAC_UCADDR) {
+ uint8_t ucaddr[ETHER_ADDR_LEN];
+
+ bcopy(IF_LLADDR(ifp), ucaddr, sizeof(ucaddr));
+ rc = t4_change_mac(sc, sc->mbox, pi->viid, pi->xact_addr_filt,
+ ucaddr, true, true);
+ if (rc < 0) {
+ rc = -rc;
+ if_printf(ifp, "change_mac failed: %d\n", rc);
+ return (rc);
+ } else {
+ pi->xact_addr_filt = rc;
+ rc = 0;
+ }
+ }
+
+ if (flags & XGMAC_MCADDRS) {
+ const uint8_t *mcaddr;
+ int del = 1;
+ uint64_t hash = 0;
+ struct ifmultiaddr *ifma;
+
+ if_maddr_rlock(ifp);
+ TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
+ if (ifma->ifma_addr->sa_family != AF_LINK)
+ continue;
+ mcaddr = LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
+
+ rc = t4_alloc_mac_filt(sc, sc->mbox, pi->viid, del, 1,
+ &mcaddr, NULL, &hash, 0);
+ if (rc < 0) {
+ rc = -rc;
+ if_printf(ifp, "failed to add mc address"
+ " %02x:%02x:%02x:%02x:%02x:%02x rc=%d\n",
+ mcaddr[0], mcaddr[1], mcaddr[2], mcaddr[3],
+ mcaddr[4], mcaddr[5], rc);
+ goto mcfail;
+ }
+ del = 0;
+ }
+
+ rc = -t4_set_addr_hash(sc, sc->mbox, pi->viid, 0, hash, 0);
+ if (rc != 0)
+ if_printf(ifp, "failed to set mc address hash: %d", rc);
+mcfail:
+ if_maddr_runlock(ifp);
+ }
+
+ return (rc);
+}
+
+static int
+cxgbe_init_locked(struct port_info *pi)
+{
+ struct adapter *sc = pi->adapter;
+ int rc = 0;
+
+ ADAPTER_LOCK_ASSERT_OWNED(sc);
+
+ while (!IS_DOOMED(pi) && IS_BUSY(sc)) {
+ if (mtx_sleep(&sc->flags, &sc->sc_lock, PCATCH, "t4init", 0)) {
+ rc = EINTR;
+ goto done;
+ }
+ }
+ if (IS_DOOMED(pi)) {
+ rc = ENXIO;
+ goto done;
+ }
+ KASSERT(!IS_BUSY(sc), ("%s: controller busy.", __func__));
+
+ /* Give up the adapter lock, port init code can sleep. */
+ SET_BUSY(sc);
+ ADAPTER_UNLOCK(sc);
+
+ rc = cxgbe_init_synchronized(pi);
+
+done:
+ ADAPTER_LOCK(sc);
+ KASSERT(IS_BUSY(sc), ("%s: controller not busy.", __func__));
+ CLR_BUSY(sc);
+ wakeup_one(&sc->flags);
+ ADAPTER_UNLOCK(sc);
+ return (rc);
+}
+
+static int
+cxgbe_init_synchronized(struct port_info *pi)
+{
+ struct adapter *sc = pi->adapter;
+ struct ifnet *ifp = pi->ifp;
+ int rc = 0, i;
+ uint16_t *rss;
+ struct sge_rxq *rxq;
+
+ ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
+
+ if (isset(&sc->open_device_map, pi->port_id)) {
+ KASSERT(ifp->if_drv_flags & IFF_DRV_RUNNING,
+ ("mismatch between open_device_map and if_drv_flags"));
+ return (0); /* already running */
+ }
+
+ if (sc->open_device_map == 0 && ((rc = first_port_up(sc)) != 0))
+ return (rc); /* error message displayed already */
+
+ /*
+ * Allocate tx/rx/fl queues for this port.
+ */
+ rc = t4_setup_eth_queues(pi);
+ if (rc != 0)
+ goto done; /* error message displayed already */
+
+ /*
+ * Setup RSS for this port.
+ */
+ rss = malloc(pi->nrxq * sizeof (*rss), M_CXGBE, M_ZERO | M_WAITOK);
+ for_each_rxq(pi, i, rxq) {
+ rss[i] = rxq->iq.abs_id;
+ }
+ rc = -t4_config_rss_range(sc, sc->mbox, pi->viid, 0, pi->rss_size, rss,
+ pi->nrxq);
+ free(rss, M_CXGBE);
+ if (rc != 0) {
+ if_printf(ifp, "rss_config failed: %d\n", rc);
+ goto done;
+ }
+
+ PORT_LOCK(pi);
+ rc = update_mac_settings(pi, XGMAC_ALL);
+ PORT_UNLOCK(pi);
+ if (rc)
+ goto done; /* error message displayed already */
+
+ rc = -t4_link_start(sc, sc->mbox, pi->tx_chan, &pi->link_cfg);
+ if (rc != 0) {
+ if_printf(ifp, "start_link failed: %d\n", rc);
+ goto done;
+ }
+
+ rc = -t4_enable_vi(sc, sc->mbox, pi->viid, true, true);
+ if (rc != 0) {
+ if_printf(ifp, "enable_vi failed: %d\n", rc);
+ goto done;
+ }
+ pi->flags |= VI_ENABLED;
+
+ /* all ok */
+ setbit(&sc->open_device_map, pi->port_id);
+ ifp->if_drv_flags |= IFF_DRV_RUNNING;
+ ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
+
+ callout_reset(&pi->tick, hz, cxgbe_tick, pi);
+done:
+ if (rc != 0)
+ cxgbe_uninit_synchronized(pi);
+
+ return (rc);
+}
+
+static int
+cxgbe_uninit_locked(struct port_info *pi)
+{
+ struct adapter *sc = pi->adapter;
+ int rc;
+
+ ADAPTER_LOCK_ASSERT_OWNED(sc);
+
+ while (!IS_DOOMED(pi) && IS_BUSY(sc)) {
+ if (mtx_sleep(&sc->flags, &sc->sc_lock, PCATCH, "t4uninit", 0)) {
+ rc = EINTR;
+ goto done;
+ }
+ }
+ if (IS_DOOMED(pi)) {
+ rc = ENXIO;
+ goto done;
+ }
+ KASSERT(!IS_BUSY(sc), ("%s: controller busy.", __func__));
+ SET_BUSY(sc);
+ ADAPTER_UNLOCK(sc);
+
+ rc = cxgbe_uninit_synchronized(pi);
+
+ ADAPTER_LOCK(sc);
+ KASSERT(IS_BUSY(sc), ("%s: controller not busy.", __func__));
+ CLR_BUSY(sc);
+ wakeup_one(&sc->flags);
+done:
+ ADAPTER_UNLOCK(sc);
+ return (rc);
+}
+
+/*
+ * Idempotent.
+ */
+static int
+cxgbe_uninit_synchronized(struct port_info *pi)
+{
+ struct adapter *sc = pi->adapter;
+ struct ifnet *ifp = pi->ifp;
+ int rc;
+
+ /*
+ * taskqueue_drain may cause a deadlock if the adapter lock is held.
+ */
+ ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
+
+ /*
+ * Clear this port's bit from the open device map, and then drain
+ * tasks and callouts.
+ */
+ clrbit(&sc->open_device_map, pi->port_id);
+
+ PORT_LOCK(pi);
+ ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
+ callout_stop(&pi->tick);
+ PORT_UNLOCK(pi);
+ callout_drain(&pi->tick);
+
+ /*
+ * Stop and then free the queues' resources, including the queues
+ * themselves.
+ *
+ * XXX: we could just stop the queues here (on ifconfig down) and free
+ * them later (on port detach), but having up/down go through the entire
+ * allocate/activate/deactivate/free sequence is a good way to find
+ * leaks and bugs.
+ */
+ rc = t4_teardown_eth_queues(pi);
+ if (rc != 0)
+ if_printf(ifp, "teardown failed: %d\n", rc);
+
+ if (pi->flags & VI_ENABLED) {
+ rc = -t4_enable_vi(sc, sc->mbox, pi->viid, false, false);
+ if (rc)
+ if_printf(ifp, "disable_vi failed: %d\n", rc);
+ else
+ pi->flags &= ~VI_ENABLED;
+ }
+
+ pi->link_cfg.link_ok = 0;
+ pi->link_cfg.speed = 0;
+ t4_os_link_changed(sc, pi->port_id, 0);
+
+ if (sc->open_device_map == 0)
+ last_port_down(sc);
+
+ return (0);
+}
+
+#define T4_ALLOC_IRQ(sc, irqid, rid, handler, arg, name) do { \
+ rc = t4_alloc_irq(sc, &sc->irq[irqid], rid, handler, arg, name); \
+ if (rc != 0) \
+ goto done; \
+} while (0)
+static int
+first_port_up(struct adapter *sc)
+{
+ int rc, i;
+ char name[8];
+
+ ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
+
+ /*
+ * The firmware event queue and the optional forwarded interrupt queues.
+ */
+ rc = t4_setup_adapter_iqs(sc);
+ if (rc != 0)
+ goto done;
+
+ /*
+ * Setup interrupts.
+ */
+ if (sc->intr_count == 1) {
+ KASSERT(sc->flags & INTR_FWD,
+ ("%s: single interrupt but not forwarded?", __func__));
+ T4_ALLOC_IRQ(sc, 0, 0, t4_intr_all, sc, "all");
+ } else {
+ /* Multiple interrupts. The first one is always error intr */
+ T4_ALLOC_IRQ(sc, 0, 1, t4_intr_err, sc, "err");
+
+ if (sc->flags & INTR_FWD) {
+ /* The rest are shared by the fwq and all data intr */
+ for (i = 1; i < sc->intr_count; i++) {
+ snprintf(name, sizeof(name), "mux%d", i - 1);
+ T4_ALLOC_IRQ(sc, i, i + 1, t4_intr_fwd,
+ &sc->sge.fiq[i - 1], name);
+ }
+ } else {
+ struct port_info *pi;
+ int p, q;
+
+ T4_ALLOC_IRQ(sc, 1, 2, t4_intr_evt, &sc->sge.fwq,
+ "evt");
+
+ p = q = 0;
+ pi = sc->port[p];
+ for (i = 2; i < sc->intr_count; i++) {
+ snprintf(name, sizeof(name), "p%dq%d", p, q);
+ if (++q >= pi->nrxq) {
+ p++;
+ q = 0;
+ pi = sc->port[p];
+ }
+ T4_ALLOC_IRQ(sc, i, i + 1, t4_intr_data,
+ &sc->sge.rxq[i - 2], name);
+ }
+ }
+ }
+
+ t4_intr_enable(sc);
+ sc->flags |= FULL_INIT_DONE;
+
+done:
+ if (rc != 0)
+ last_port_down(sc);
+
+ return (rc);
+}
+#undef T4_ALLOC_IRQ
+
+/*
+ * Idempotent.
+ */
+static int
+last_port_down(struct adapter *sc)
+{
+ int i;
+
+ ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
+
+ t4_intr_disable(sc);
+
+ t4_teardown_adapter_iqs(sc);
+
+ for (i = 0; i < sc->intr_count; i++)
+ t4_free_irq(sc, &sc->irq[i]);
+
+ sc->flags &= ~FULL_INIT_DONE;
+
+ return (0);
+}
+
+static int
+t4_alloc_irq(struct adapter *sc, struct irq *irq, int rid,
+ iq_intr_handler_t *handler, void *arg, char *name)
+{
+ int rc;
+
+ irq->rid = rid;
+ irq->res = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ, &irq->rid,
+ RF_SHAREABLE | RF_ACTIVE);
+ if (irq->res == NULL) {
+ device_printf(sc->dev,
+ "failed to allocate IRQ for rid %d, name %s.\n", rid, name);
+ return (ENOMEM);
+ }
+
+ rc = bus_setup_intr(sc->dev, irq->res, INTR_MPSAFE | INTR_TYPE_NET,
+ NULL, handler, arg, &irq->tag);
+ if (rc != 0) {
+ device_printf(sc->dev,
+ "failed to setup interrupt for rid %d, name %s: %d\n",
+ rid, name, rc);
+ } else if (name)
+ bus_describe_intr(sc->dev, irq->res, irq->tag, name);
+
+ return (rc);
+}
+
+static int
+t4_free_irq(struct adapter *sc, struct irq *irq)
+{
+ if (irq->tag)
+ bus_teardown_intr(sc->dev, irq->res, irq->tag);
+ if (irq->res)
+ bus_release_resource(sc->dev, SYS_RES_IRQ, irq->rid, irq->res);
+
+ bzero(irq, sizeof(*irq));
+
+ return (0);
+}
+
+static void
+reg_block_dump(struct adapter *sc, uint8_t *buf, unsigned int start,
+ unsigned int end)
+{
+ uint32_t *p = (uint32_t *)(buf + start);
+
+ for ( ; start <= end; start += sizeof(uint32_t))
+ *p++ = t4_read_reg(sc, start);
+}
+
+static void
+t4_get_regs(struct adapter *sc, struct t4_regdump *regs, uint8_t *buf)
+{
+ int i;
+ static const unsigned int reg_ranges[] = {
+ 0x1008, 0x1108,
+ 0x1180, 0x11b4,
+ 0x11fc, 0x123c,
+ 0x1300, 0x173c,
+ 0x1800, 0x18fc,
+ 0x3000, 0x30d8,
+ 0x30e0, 0x5924,
+ 0x5960, 0x59d4,
+ 0x5a00, 0x5af8,
+ 0x6000, 0x6098,
+ 0x6100, 0x6150,
+ 0x6200, 0x6208,
+ 0x6240, 0x6248,
+ 0x6280, 0x6338,
+ 0x6370, 0x638c,
+ 0x6400, 0x643c,
+ 0x6500, 0x6524,
+ 0x6a00, 0x6a38,
+ 0x6a60, 0x6a78,
+ 0x6b00, 0x6b84,
+ 0x6bf0, 0x6c84,
+ 0x6cf0, 0x6d84,
+ 0x6df0, 0x6e84,
+ 0x6ef0, 0x6f84,
+ 0x6ff0, 0x7084,
+ 0x70f0, 0x7184,
+ 0x71f0, 0x7284,
+ 0x72f0, 0x7384,
+ 0x73f0, 0x7450,
+ 0x7500, 0x7530,
+ 0x7600, 0x761c,
+ 0x7680, 0x76cc,
+ 0x7700, 0x7798,
+ 0x77c0, 0x77fc,
+ 0x7900, 0x79fc,
+ 0x7b00, 0x7c38,
+ 0x7d00, 0x7efc,
+ 0x8dc0, 0x8e1c,
+ 0x8e30, 0x8e78,
+ 0x8ea0, 0x8f6c,
+ 0x8fc0, 0x9074,
+ 0x90fc, 0x90fc,
+ 0x9400, 0x9458,
+ 0x9600, 0x96bc,
+ 0x9800, 0x9808,
+ 0x9820, 0x983c,
+ 0x9850, 0x9864,
+ 0x9c00, 0x9c6c,
+ 0x9c80, 0x9cec,
+ 0x9d00, 0x9d6c,
+ 0x9d80, 0x9dec,
+ 0x9e00, 0x9e6c,
+ 0x9e80, 0x9eec,
+ 0x9f00, 0x9f6c,
+ 0x9f80, 0x9fec,
+ 0xd004, 0xd03c,
+ 0xdfc0, 0xdfe0,
+ 0xe000, 0xea7c,
+ 0xf000, 0x11190,
+ 0x19040, 0x19124,
+ 0x19150, 0x191b0,
+ 0x191d0, 0x191e8,
+ 0x19238, 0x1924c,
+ 0x193f8, 0x19474,
+ 0x19490, 0x194f8,
+ 0x19800, 0x19f30,
+ 0x1a000, 0x1a06c,
+ 0x1a0b0, 0x1a120,
+ 0x1a128, 0x1a138,
+ 0x1a190, 0x1a1c4,
+ 0x1a1fc, 0x1a1fc,
+ 0x1e040, 0x1e04c,
+ 0x1e240, 0x1e28c,
+ 0x1e2c0, 0x1e2c0,
+ 0x1e2e0, 0x1e2e0,
+ 0x1e300, 0x1e384,
+ 0x1e3c0, 0x1e3c8,
+ 0x1e440, 0x1e44c,
+ 0x1e640, 0x1e68c,
+ 0x1e6c0, 0x1e6c0,
+ 0x1e6e0, 0x1e6e0,
+ 0x1e700, 0x1e784,
+ 0x1e7c0, 0x1e7c8,
+ 0x1e840, 0x1e84c,
+ 0x1ea40, 0x1ea8c,
+ 0x1eac0, 0x1eac0,
+ 0x1eae0, 0x1eae0,
+ 0x1eb00, 0x1eb84,
+ 0x1ebc0, 0x1ebc8,
+ 0x1ec40, 0x1ec4c,
+ 0x1ee40, 0x1ee8c,
+ 0x1eec0, 0x1eec0,
+ 0x1eee0, 0x1eee0,
+ 0x1ef00, 0x1ef84,
+ 0x1efc0, 0x1efc8,
+ 0x1f040, 0x1f04c,
+ 0x1f240, 0x1f28c,
+ 0x1f2c0, 0x1f2c0,
+ 0x1f2e0, 0x1f2e0,
+ 0x1f300, 0x1f384,
+ 0x1f3c0, 0x1f3c8,
+ 0x1f440, 0x1f44c,
+ 0x1f640, 0x1f68c,
+ 0x1f6c0, 0x1f6c0,
+ 0x1f6e0, 0x1f6e0,
+ 0x1f700, 0x1f784,
+ 0x1f7c0, 0x1f7c8,
+ 0x1f840, 0x1f84c,
+ 0x1fa40, 0x1fa8c,
+ 0x1fac0, 0x1fac0,
+ 0x1fae0, 0x1fae0,
+ 0x1fb00, 0x1fb84,
+ 0x1fbc0, 0x1fbc8,
+ 0x1fc40, 0x1fc4c,
+ 0x1fe40, 0x1fe8c,
+ 0x1fec0, 0x1fec0,
+ 0x1fee0, 0x1fee0,
+ 0x1ff00, 0x1ff84,
+ 0x1ffc0, 0x1ffc8,
+ 0x20000, 0x2002c,
+ 0x20100, 0x2013c,
+ 0x20190, 0x201c8,
+ 0x20200, 0x20318,
+ 0x20400, 0x20528,
+ 0x20540, 0x20614,
+ 0x21000, 0x21040,
+ 0x2104c, 0x21060,
+ 0x210c0, 0x210ec,
+ 0x21200, 0x21268,
+ 0x21270, 0x21284,
+ 0x212fc, 0x21388,
+ 0x21400, 0x21404,
+ 0x21500, 0x21518,
+ 0x2152c, 0x2153c,
+ 0x21550, 0x21554,
+ 0x21600, 0x21600,
+ 0x21608, 0x21628,
+ 0x21630, 0x2163c,
+ 0x21700, 0x2171c,
+ 0x21780, 0x2178c,
+ 0x21800, 0x21c38,
+ 0x21c80, 0x21d7c,
+ 0x21e00, 0x21e04,
+ 0x22000, 0x2202c,
+ 0x22100, 0x2213c,
+ 0x22190, 0x221c8,
+ 0x22200, 0x22318,
+ 0x22400, 0x22528,
+ 0x22540, 0x22614,
+ 0x23000, 0x23040,
+ 0x2304c, 0x23060,
+ 0x230c0, 0x230ec,
+ 0x23200, 0x23268,
+ 0x23270, 0x23284,
+ 0x232fc, 0x23388,
+ 0x23400, 0x23404,
+ 0x23500, 0x23518,
+ 0x2352c, 0x2353c,
+ 0x23550, 0x23554,
+ 0x23600, 0x23600,
+ 0x23608, 0x23628,
+ 0x23630, 0x2363c,
+ 0x23700, 0x2371c,
+ 0x23780, 0x2378c,
+ 0x23800, 0x23c38,
+ 0x23c80, 0x23d7c,
+ 0x23e00, 0x23e04,
+ 0x24000, 0x2402c,
+ 0x24100, 0x2413c,
+ 0x24190, 0x241c8,
+ 0x24200, 0x24318,
+ 0x24400, 0x24528,
+ 0x24540, 0x24614,
+ 0x25000, 0x25040,
+ 0x2504c, 0x25060,
+ 0x250c0, 0x250ec,
+ 0x25200, 0x25268,
+ 0x25270, 0x25284,
+ 0x252fc, 0x25388,
+ 0x25400, 0x25404,
+ 0x25500, 0x25518,
+ 0x2552c, 0x2553c,
+ 0x25550, 0x25554,
+ 0x25600, 0x25600,
+ 0x25608, 0x25628,
+ 0x25630, 0x2563c,
+ 0x25700, 0x2571c,
+ 0x25780, 0x2578c,
+ 0x25800, 0x25c38,
+ 0x25c80, 0x25d7c,
+ 0x25e00, 0x25e04,
+ 0x26000, 0x2602c,
+ 0x26100, 0x2613c,
+ 0x26190, 0x261c8,
+ 0x26200, 0x26318,
+ 0x26400, 0x26528,
+ 0x26540, 0x26614,
+ 0x27000, 0x27040,
+ 0x2704c, 0x27060,
+ 0x270c0, 0x270ec,
+ 0x27200, 0x27268,
+ 0x27270, 0x27284,
+ 0x272fc, 0x27388,
+ 0x27400, 0x27404,
+ 0x27500, 0x27518,
+ 0x2752c, 0x2753c,
+ 0x27550, 0x27554,
+ 0x27600, 0x27600,
+ 0x27608, 0x27628,
+ 0x27630, 0x2763c,
+ 0x27700, 0x2771c,
+ 0x27780, 0x2778c,
+ 0x27800, 0x27c38,
+ 0x27c80, 0x27d7c,
+ 0x27e00, 0x27e04
+ };
+
+ regs->version = 4 | (sc->params.rev << 10);
+ for (i = 0; i < ARRAY_SIZE(reg_ranges); i += 2)
+ reg_block_dump(sc, buf, reg_ranges[i], reg_ranges[i + 1]);
+}
+
+static void
+cxgbe_tick(void *arg)
+{
+ struct port_info *pi = arg;
+ struct ifnet *ifp = pi->ifp;
+ struct sge_txq *txq;
+ int i, drops;
+ struct port_stats *s = &pi->stats;
+
+ PORT_LOCK(pi);
+ if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
+ PORT_UNLOCK(pi);
+ return; /* without scheduling another callout */
+ }
+
+ t4_get_port_stats(pi->adapter, pi->tx_chan, s);
+
+ ifp->if_opackets = s->tx_frames;
+ ifp->if_ipackets = s->rx_frames;
+ ifp->if_obytes = s->tx_octets;
+ ifp->if_ibytes = s->rx_octets;
+ ifp->if_omcasts = s->tx_mcast_frames;
+ ifp->if_imcasts = s->rx_mcast_frames;
+ ifp->if_iqdrops = s->rx_ovflow0 + s->rx_ovflow1 + s->rx_ovflow2 +
+ s->rx_ovflow3;
+
+ drops = s->tx_drop;
+ for_each_txq(pi, i, txq)
+ drops += txq->eq.br->br_drops;
+ ifp->if_snd.ifq_drops = drops;
+
+ ifp->if_oerrors = s->tx_error_frames;
+ ifp->if_ierrors = s->rx_jabber + s->rx_runt + s->rx_too_long +
+ s->rx_fcs_err + s->rx_len_err;
+
+ callout_schedule(&pi->tick, hz);
+ PORT_UNLOCK(pi);
+}
+
+static int
+t4_sysctls(struct adapter *sc)
+{
+ struct sysctl_ctx_list *ctx;
+ struct sysctl_oid *oid;
+ struct sysctl_oid_list *children;
+
+ ctx = device_get_sysctl_ctx(sc->dev);
+ oid = device_get_sysctl_tree(sc->dev);
+ children = SYSCTL_CHILDREN(oid);
+
+ SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nports", CTLFLAG_RD,
+ &sc->params.nports, 0, "# of ports");
+
+ SYSCTL_ADD_INT(ctx, children, OID_AUTO, "hw_revision", CTLFLAG_RD,
+ &sc->params.rev, 0, "chip hardware revision");
+
+ SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "firmware_version",
+ CTLFLAG_RD, &sc->fw_version, 0, "firmware version");
+
+ SYSCTL_ADD_INT(ctx, children, OID_AUTO, "TOE", CTLFLAG_RD,
+ &sc->params.offload, 0, "hardware is capable of TCP offload");
+
+ SYSCTL_ADD_INT(ctx, children, OID_AUTO, "core_clock", CTLFLAG_RD,
+ &sc->params.vpd.cclk, 0, "core clock frequency (in KHz)");
+
+ /* XXX: this doesn't seem to show up */
+ SYSCTL_ADD_OPAQUE(ctx, children, OID_AUTO, "holdoff_tmr",
+ CTLFLAG_RD, &intr_timer, sizeof(intr_timer), "IU",
+ "interrupt holdoff timer values (us)");
+
+ /* XXX: this doesn't seem to show up */
+ SYSCTL_ADD_OPAQUE(ctx, children, OID_AUTO, "holdoff_pktc",
+ CTLFLAG_RD, &intr_pktcount, sizeof(intr_pktcount), "IU",
+ "interrupt holdoff packet counter values");
+
+ return (0);
+}
+
+static int
+cxgbe_sysctls(struct port_info *pi)
+{
+ struct sysctl_ctx_list *ctx;
+ struct sysctl_oid *oid;
+ struct sysctl_oid_list *children;
+
+ ctx = device_get_sysctl_ctx(pi->dev);
+
+ /*
+ * dev.cxgbe.X.
+ */
+ oid = device_get_sysctl_tree(pi->dev);
+ children = SYSCTL_CHILDREN(oid);
+
+ SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nrxq", CTLFLAG_RD,
+ &pi->nrxq, 0, "# of rx queues");
+ SYSCTL_ADD_INT(ctx, children, OID_AUTO, "ntxq", CTLFLAG_RD,
+ &pi->ntxq, 0, "# of tx queues");
+ SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_rxq", CTLFLAG_RD,
+ &pi->first_rxq, 0, "index of first rx queue");
+ SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_txq", CTLFLAG_RD,
+ &pi->first_txq, 0, "index of first tx queue");
+
+ SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_tmr_idx",
+ CTLTYPE_INT | CTLFLAG_RW, pi, 0, sysctl_holdoff_tmr_idx, "I",
+ "holdoff timer index");
+ SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_pktc_idx",
+ CTLTYPE_INT | CTLFLAG_RW, pi, 0, sysctl_holdoff_pktc_idx, "I",
+ "holdoff packet counter index");
+
+ SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "qsize_rxq",
+ CTLTYPE_INT | CTLFLAG_RW, pi, 0, sysctl_qsize_rxq, "I",
+ "rx queue size");
+ SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "qsize_txq",
+ CTLTYPE_INT | CTLFLAG_RW, pi, 0, sysctl_qsize_txq, "I",
+ "tx queue size");
+
+ /*
+ * dev.cxgbe.X.stats.
+ */
+ oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats", CTLFLAG_RD,
+ NULL, "port statistics");
+ children = SYSCTL_CHILDREN(oid);
+
+#define SYSCTL_ADD_T4_REG64(pi, name, desc, reg) \
+ SYSCTL_ADD_OID(ctx, children, OID_AUTO, name, \
+ CTLTYPE_U64 | CTLFLAG_RD, pi->adapter, reg, \
+ sysctl_handle_t4_reg64, "QU", desc)
+
+ SYSCTL_ADD_T4_REG64(pi, "tx_octets", "# of octets in good frames",
+ PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_BYTES_L));
+ SYSCTL_ADD_T4_REG64(pi, "tx_frames", "total # of good frames",
+ PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_FRAMES_L));
+ SYSCTL_ADD_T4_REG64(pi, "tx_bcast_frames", "# of broadcast frames",
+ PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_BCAST_L));
+ SYSCTL_ADD_T4_REG64(pi, "tx_mcast_frames", "# of multicast frames",
+ PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_MCAST_L));
+ SYSCTL_ADD_T4_REG64(pi, "tx_ucast_frames", "# of unicast frames",
+ PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_UCAST_L));
+ SYSCTL_ADD_T4_REG64(pi, "tx_error_frames", "# of error frames",
+ PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_ERROR_L));
+ SYSCTL_ADD_T4_REG64(pi, "tx_frames_64",
+ "# of tx frames in this range",
+ PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_64B_L));
+ SYSCTL_ADD_T4_REG64(pi, "tx_frames_65_127",
+ "# of tx frames in this range",
+ PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_65B_127B_L));
+ SYSCTL_ADD_T4_REG64(pi, "tx_frames_128_255",
+ "# of tx frames in this range",
+ PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_128B_255B_L));
+ SYSCTL_ADD_T4_REG64(pi, "tx_frames_256_511",
+ "# of tx frames in this range",
+ PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_256B_511B_L));
+ SYSCTL_ADD_T4_REG64(pi, "tx_frames_512_1023",
+ "# of tx frames in this range",
+ PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_512B_1023B_L));
+ SYSCTL_ADD_T4_REG64(pi, "tx_frames_1024_1518",
+ "# of tx frames in this range",
+ PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_1024B_1518B_L));
+ SYSCTL_ADD_T4_REG64(pi, "tx_frames_1519_max",
+ "# of tx frames in this range",
+ PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_1519B_MAX_L));
+ SYSCTL_ADD_T4_REG64(pi, "tx_drop", "# of dropped tx frames",
+ PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_DROP_L));
+ SYSCTL_ADD_T4_REG64(pi, "tx_pause", "# of pause frames transmitted",
+ PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PAUSE_L));
+ SYSCTL_ADD_T4_REG64(pi, "tx_ppp0", "# of PPP prio 0 frames transmitted",
+ PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP0_L));
+ SYSCTL_ADD_T4_REG64(pi, "tx_ppp1", "# of PPP prio 1 frames transmitted",
+ PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP1_L));
+ SYSCTL_ADD_T4_REG64(pi, "tx_ppp2", "# of PPP prio 2 frames transmitted",
+ PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP2_L));
+ SYSCTL_ADD_T4_REG64(pi, "tx_ppp3", "# of PPP prio 3 frames transmitted",
+ PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP3_L));
+ SYSCTL_ADD_T4_REG64(pi, "tx_ppp4", "# of PPP prio 4 frames transmitted",
+ PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP4_L));
+ SYSCTL_ADD_T4_REG64(pi, "tx_ppp5", "# of PPP prio 5 frames transmitted",
+ PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP5_L));
+ SYSCTL_ADD_T4_REG64(pi, "tx_ppp6", "# of PPP prio 6 frames transmitted",
+ PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP6_L));
+ SYSCTL_ADD_T4_REG64(pi, "tx_ppp7", "# of PPP prio 7 frames transmitted",
+ PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP7_L));
+
+ SYSCTL_ADD_T4_REG64(pi, "rx_octets", "# of octets in good frames",
+ PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_BYTES_L));
+ SYSCTL_ADD_T4_REG64(pi, "rx_frames", "total # of good frames",
+ PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_FRAMES_L));
+ SYSCTL_ADD_T4_REG64(pi, "rx_bcast_frames", "# of broadcast frames",
+ PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_BCAST_L));
+ SYSCTL_ADD_T4_REG64(pi, "rx_mcast_frames", "# of multicast frames",
+ PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_MCAST_L));
+ SYSCTL_ADD_T4_REG64(pi, "rx_ucast_frames", "# of unicast frames",
+ PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_UCAST_L));
+ SYSCTL_ADD_T4_REG64(pi, "rx_too_long", "# of frames exceeding MTU",
+ PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_MTU_ERROR_L));
+ SYSCTL_ADD_T4_REG64(pi, "rx_jabber", "# of jabber frames",
+ PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_MTU_CRC_ERROR_L));
+ SYSCTL_ADD_T4_REG64(pi, "rx_fcs_err",
+ "# of frames received with bad FCS",
+ PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_CRC_ERROR_L));
+ SYSCTL_ADD_T4_REG64(pi, "rx_len_err",
+ "# of frames received with length error",
+ PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_LEN_ERROR_L));
+ SYSCTL_ADD_T4_REG64(pi, "rx_symbol_err", "symbol errors",
+ PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_SYM_ERROR_L));
+ SYSCTL_ADD_T4_REG64(pi, "rx_runt", "# of short frames received",
+ PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_LESS_64B_L));
+ SYSCTL_ADD_T4_REG64(pi, "rx_frames_64",
+ "# of rx frames in this range",
+ PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_64B_L));
+ SYSCTL_ADD_T4_REG64(pi, "rx_frames_65_127",
+ "# of rx frames in this range",
+ PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_65B_127B_L));
+ SYSCTL_ADD_T4_REG64(pi, "rx_frames_128_255",
+ "# of rx frames in this range",
+ PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_128B_255B_L));
+ SYSCTL_ADD_T4_REG64(pi, "rx_frames_256_511",
+ "# of rx frames in this range",
+ PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_256B_511B_L));
+ SYSCTL_ADD_T4_REG64(pi, "rx_frames_512_1023",
+ "# of rx frames in this range",
+ PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_512B_1023B_L));
+ SYSCTL_ADD_T4_REG64(pi, "rx_frames_1024_1518",
+ "# of rx frames in this range",
+ PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_1024B_1518B_L));
+ SYSCTL_ADD_T4_REG64(pi, "rx_frames_1519_max",
+ "# of rx frames in this range",
+ PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_1519B_MAX_L));
+ SYSCTL_ADD_T4_REG64(pi, "rx_pause", "# of pause frames received",
+ PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PAUSE_L));
+ SYSCTL_ADD_T4_REG64(pi, "rx_ppp0", "# of PPP prio 0 frames received",
+ PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP0_L));
+ SYSCTL_ADD_T4_REG64(pi, "rx_ppp1", "# of PPP prio 1 frames received",
+ PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP1_L));
+ SYSCTL_ADD_T4_REG64(pi, "rx_ppp2", "# of PPP prio 2 frames received",
+ PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP2_L));
+ SYSCTL_ADD_T4_REG64(pi, "rx_ppp3", "# of PPP prio 3 frames received",
+ PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP3_L));
+ SYSCTL_ADD_T4_REG64(pi, "rx_ppp4", "# of PPP prio 4 frames received",
+ PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP4_L));
+ SYSCTL_ADD_T4_REG64(pi, "rx_ppp5", "# of PPP prio 5 frames received",
+ PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP5_L));
+ SYSCTL_ADD_T4_REG64(pi, "rx_ppp6", "# of PPP prio 6 frames received",
+ PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP6_L));
+ SYSCTL_ADD_T4_REG64(pi, "rx_ppp7", "# of PPP prio 7 frames received",
+ PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP7_L));
+
+#undef SYSCTL_ADD_T4_REG64
+
+#define SYSCTL_ADD_T4_PORTSTAT(name, desc) \
+ SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, #name, CTLFLAG_RD, \
+ &pi->stats.name, desc)
+
+ /* We get these from port_stats and they may be stale by upto 1s */
+ SYSCTL_ADD_T4_PORTSTAT(rx_ovflow0,
+ "# drops due to buffer-group 0 overflows");
+ SYSCTL_ADD_T4_PORTSTAT(rx_ovflow1,
+ "# drops due to buffer-group 1 overflows");
+ SYSCTL_ADD_T4_PORTSTAT(rx_ovflow2,
+ "# drops due to buffer-group 2 overflows");
+ SYSCTL_ADD_T4_PORTSTAT(rx_ovflow3,
+ "# drops due to buffer-group 3 overflows");
+ SYSCTL_ADD_T4_PORTSTAT(rx_trunc0,
+ "# of buffer-group 0 truncated packets");
+ SYSCTL_ADD_T4_PORTSTAT(rx_trunc1,
+ "# of buffer-group 1 truncated packets");
+ SYSCTL_ADD_T4_PORTSTAT(rx_trunc2,
+ "# of buffer-group 2 truncated packets");
+ SYSCTL_ADD_T4_PORTSTAT(rx_trunc3,
+ "# of buffer-group 3 truncated packets");
+
+#undef SYSCTL_ADD_T4_PORTSTAT
+
+ return (0);
+}
+
+static int
+sysctl_holdoff_tmr_idx(SYSCTL_HANDLER_ARGS)
+{
+ struct port_info *pi = arg1;
+ struct adapter *sc = pi->adapter;
+ struct sge_rxq *rxq;
+ int idx, rc, i;
+
+ idx = pi->tmr_idx;
+
+ rc = sysctl_handle_int(oidp, &idx, 0, req);
+ if (rc != 0 || req->newptr == NULL)
+ return (rc);
+
+ if (idx < 0 || idx >= SGE_NTIMERS)
+ return (EINVAL);
+
+ ADAPTER_LOCK(sc);
+ rc = IS_DOOMED(pi) ? ENXIO : (IS_BUSY(sc) ? EBUSY : 0);
+ if (rc == 0) {
+ for_each_rxq(pi, i, rxq) {
+ rxq->iq.intr_params = V_QINTR_TIMER_IDX(idx) |
+ V_QINTR_CNT_EN(pi->pktc_idx != -1);
+ }
+ pi->tmr_idx = idx;
+ }
+
+ ADAPTER_UNLOCK(sc);
+ return (rc);
+}
+
+static int
+sysctl_holdoff_pktc_idx(SYSCTL_HANDLER_ARGS)
+{
+ struct port_info *pi = arg1;
+ struct adapter *sc = pi->adapter;
+ int idx, rc;
+
+ idx = pi->pktc_idx;
+
+ rc = sysctl_handle_int(oidp, &idx, 0, req);
+ if (rc != 0 || req->newptr == NULL)
+ return (rc);
+
+ if (idx < -1 || idx >= SGE_NCOUNTERS)
+ return (EINVAL);
+
+ ADAPTER_LOCK(sc);
+ rc = IS_DOOMED(pi) ? ENXIO : (IS_BUSY(sc) ? EBUSY : 0);
+ if (rc == 0 && pi->ifp->if_drv_flags & IFF_DRV_RUNNING)
+ rc = EBUSY; /* can be changed only when port is down */
+
+ if (rc == 0)
+ pi->pktc_idx = idx;
+
+ ADAPTER_UNLOCK(sc);
+ return (rc);
+}
+
+static int
+sysctl_qsize_rxq(SYSCTL_HANDLER_ARGS)
+{
+ struct port_info *pi = arg1;
+ struct adapter *sc = pi->adapter;
+ int qsize, rc;
+
+ qsize = pi->qsize_rxq;
+
+ rc = sysctl_handle_int(oidp, &qsize, 0, req);
+ if (rc != 0 || req->newptr == NULL)
+ return (rc);
+
+ if (qsize < 128 || (qsize & 7))
+ return (EINVAL);
+
+ ADAPTER_LOCK(sc);
+ rc = IS_DOOMED(pi) ? ENXIO : (IS_BUSY(sc) ? EBUSY : 0);
+ if (rc == 0 && pi->ifp->if_drv_flags & IFF_DRV_RUNNING)
+ rc = EBUSY; /* can be changed only when port is down */
+
+ if (rc == 0)
+ pi->qsize_rxq = qsize;
+
+ ADAPTER_UNLOCK(sc);
+ return (rc);
+}
+
+static int
+sysctl_qsize_txq(SYSCTL_HANDLER_ARGS)
+{
+ struct port_info *pi = arg1;
+ struct adapter *sc = pi->adapter;
+ int qsize, rc;
+
+ qsize = pi->qsize_txq;
+
+ rc = sysctl_handle_int(oidp, &qsize, 0, req);
+ if (rc != 0 || req->newptr == NULL)
+ return (rc);
+
+ if (qsize < 128)
+ return (EINVAL);
+
+ ADAPTER_LOCK(sc);
+ rc = IS_DOOMED(pi) ? ENXIO : (IS_BUSY(sc) ? EBUSY : 0);
+ if (rc == 0 && pi->ifp->if_drv_flags & IFF_DRV_RUNNING)
+ rc = EBUSY; /* can be changed only when port is down */
+
+ if (rc == 0)
+ pi->qsize_txq = qsize;
+
+ ADAPTER_UNLOCK(sc);
+ return (rc);
+}
+
+static int
+sysctl_handle_t4_reg64(SYSCTL_HANDLER_ARGS)
+{
+ struct adapter *sc = arg1;
+ int reg = arg2;
+ uint64_t val;
+
+ val = t4_read_reg64(sc, reg);
+
+ return (sysctl_handle_64(oidp, &val, 0, req));
+}
+
+int
+t4_os_find_pci_capability(struct adapter *sc, int cap)
+{
+ device_t dev;
+ struct pci_devinfo *dinfo;
+ pcicfgregs *cfg;
+ uint32_t status;
+ uint8_t ptr;
+
+ dev = sc->dev;
+ dinfo = device_get_ivars(dev);
+ cfg = &dinfo->cfg;
+
+ status = pci_read_config(dev, PCIR_STATUS, 2);
+ if (!(status & PCIM_STATUS_CAPPRESENT))
+ return (0);
+
+ switch (cfg->hdrtype & PCIM_HDRTYPE) {
+ case 0:
+ case 1:
+ ptr = PCIR_CAP_PTR;
+ break;
+ case 2:
+ ptr = PCIR_CAP_PTR_2;
+ break;
+ default:
+ return (0);
+ break;
+ }
+ ptr = pci_read_config(dev, ptr, 1);
+
+ while (ptr != 0) {
+ if (pci_read_config(dev, ptr + PCICAP_ID, 1) == cap)
+ return (ptr);
+ ptr = pci_read_config(dev, ptr + PCICAP_NEXTPTR, 1);
+ }
+
+ return (0);
+}
+
+int
+t4_os_pci_save_state(struct adapter *sc)
+{
+ device_t dev;
+ struct pci_devinfo *dinfo;
+
+ dev = sc->dev;
+ dinfo = device_get_ivars(dev);
+
+ pci_cfg_save(dev, dinfo, 0);
+ return (0);
+}
+
+int
+t4_os_pci_restore_state(struct adapter *sc)
+{
+ device_t dev;
+ struct pci_devinfo *dinfo;
+
+ dev = sc->dev;
+ dinfo = device_get_ivars(dev);
+
+ pci_cfg_restore(dev, dinfo);
+ return (0);
+}
+void
+t4_os_portmod_changed(const struct adapter *sc, int idx)
+{
+ struct port_info *pi = sc->port[idx];
+ static const char *mod_str[] = {
+ NULL, "LR", "SR", "ER", "TWINAX", "active TWINAX"
+ };
+
+ if (pi->mod_type == FW_PORT_MOD_TYPE_NONE)
+ if_printf(pi->ifp, "transceiver unplugged.\n");
+ else
+ if_printf(pi->ifp, "%s transceiver inserted.\n",
+ mod_str[pi->mod_type]);
+
+}
+
+void
+t4_os_link_changed(struct adapter *sc, int idx, int link_stat)
+{
+ struct port_info *pi = sc->port[idx];
+ struct ifnet *ifp = pi->ifp;
+
+ if (link_stat) {
+ ifp->if_baudrate = IF_Mbps(pi->link_cfg.speed);
+ if_link_state_change(ifp, LINK_STATE_UP);
+ } else
+ if_link_state_change(ifp, LINK_STATE_DOWN);
+}
+
+static int
+t4_open(struct cdev *dev, int flags, int type, struct thread *td)
+{
+ return (0);
+}
+
+static int
+t4_close(struct cdev *dev, int flags, int type, struct thread *td)
+{
+ return (0);
+}
+
+static int
+t4_ioctl(struct cdev *dev, unsigned long cmd, caddr_t data, int fflag,
+ struct thread *td)
+{
+ int rc;
+ struct adapter *sc = dev->si_drv1;
+
+ rc = priv_check(td, PRIV_DRIVER);
+ if (rc != 0)
+ return (rc);
+
+ switch (cmd) {
+ case CHELSIO_T4_GETREG32: {
+ struct t4_reg32 *edata = (struct t4_reg32 *)data;
+ if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len)
+ return (EFAULT);
+ edata->val = t4_read_reg(sc, edata->addr);
+ break;
+ }
+ case CHELSIO_T4_SETREG32: {
+ struct t4_reg32 *edata = (struct t4_reg32 *)data;
+ if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len)
+ return (EFAULT);
+ t4_write_reg(sc, edata->addr, edata->val);
+ break;
+ }
+ case CHELSIO_T4_REGDUMP: {
+ struct t4_regdump *regs = (struct t4_regdump *)data;
+ int reglen = T4_REGDUMP_SIZE;
+ uint8_t *buf;
+
+ if (regs->len < reglen) {
+ regs->len = reglen; /* hint to the caller */
+ return (ENOBUFS);
+ }
+
+ regs->len = reglen;
+ buf = malloc(reglen, M_CXGBE, M_WAITOK | M_ZERO);
+ t4_get_regs(sc, regs, buf);
+ rc = copyout(buf, regs->data, reglen);
+ free(buf, M_CXGBE);
+ break;
+ }
+ default:
+ rc = EINVAL;
+ }
+
+ return (rc);
+}
+
+static devclass_t t4_devclass;
+static devclass_t cxgbe_devclass;
+
+DRIVER_MODULE(t4nex, pci, t4_driver, t4_devclass, 0, 0);
+MODULE_VERSION(t4nex, 1);
+
+DRIVER_MODULE(cxgbe, t4nex, cxgbe_driver, cxgbe_devclass, 0, 0);
+MODULE_VERSION(cxgbe, 1);
diff --git a/sys/dev/cxgbe/t4_sge.c b/sys/dev/cxgbe/t4_sge.c
new file mode 100644
index 0000000..8f8554b
--- /dev/null
+++ b/sys/dev/cxgbe/t4_sge.c
@@ -0,0 +1,2392 @@
+/*-
+ * Copyright (c) 2011 Chelsio Communications, Inc.
+ * All rights reserved.
+ * Written by: Navdeep Parhar <np@FreeBSD.org>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include "opt_inet.h"
+
+#include <sys/types.h>
+#include <sys/mbuf.h>
+#include <sys/socket.h>
+#include <sys/kernel.h>
+#include <sys/sysctl.h>
+#include <net/bpf.h>
+#include <net/ethernet.h>
+#include <net/if.h>
+#include <net/if_vlan_var.h>
+#include <netinet/in.h>
+#include <netinet/ip.h>
+#include <netinet/tcp.h>
+
+#include "common/common.h"
+#include "common/t4_regs.h"
+#include "common/t4_regs_values.h"
+#include "common/t4_msg.h"
+#include "common/t4fw_interface.h"
+
+struct fl_buf_info {
+ int size;
+ int type;
+ uma_zone_t zone;
+};
+
+/* t4_sge_init will fill up the zone */
+static struct fl_buf_info fl_buf_info[FL_BUF_SIZES] = {
+ { MCLBYTES, EXT_CLUSTER, NULL},
+ { MJUMPAGESIZE, EXT_JUMBOP, NULL},
+ { MJUM9BYTES, EXT_JUMBO9, NULL},
+ { MJUM16BYTES, EXT_JUMBO16, NULL}
+};
+#define FL_BUF_SIZE(x) (fl_buf_info[x].size)
+#define FL_BUF_TYPE(x) (fl_buf_info[x].type)
+#define FL_BUF_ZONE(x) (fl_buf_info[x].zone)
+
+enum {
+ FL_PKTSHIFT = 2
+};
+
+#define FL_ALIGN min(CACHE_LINE_SIZE, 32)
+#if CACHE_LINE_SIZE > 64
+#define SPG_LEN 128
+#else
+#define SPG_LEN 64
+#endif
+
+/* Used to track coalesced tx work request */
+struct txpkts {
+ uint64_t *flitp; /* ptr to flit where next pkt should start */
+ uint8_t npkt; /* # of packets in this work request */
+ uint8_t nflits; /* # of flits used by this work request */
+ uint16_t plen; /* total payload (sum of all packets) */
+};
+
+/* A packet's SGL. This + m_pkthdr has all info needed for tx */
+struct sgl {
+ int nsegs; /* # of segments in the SGL, 0 means imm. tx */
+ int nflits; /* # of flits needed for the SGL */
+ bus_dma_segment_t seg[TX_SGL_SEGS];
+};
+
+static inline void init_iq(struct sge_iq *, struct adapter *, int, int, int,
+ int, iq_intr_handler_t *, char *);
+static inline void init_fl(struct sge_fl *, int, char *);
+static inline void init_txq(struct sge_txq *, int, char *);
+static int alloc_ring(struct adapter *, size_t, bus_dma_tag_t *, bus_dmamap_t *,
+ bus_addr_t *, void **);
+static int free_ring(struct adapter *, bus_dma_tag_t, bus_dmamap_t, bus_addr_t,
+ void *);
+static int alloc_iq_fl(struct port_info *, struct sge_iq *, struct sge_fl *,
+ int);
+static int free_iq_fl(struct port_info *, struct sge_iq *, struct sge_fl *);
+static int alloc_iq(struct sge_iq *, int);
+static int free_iq(struct sge_iq *);
+static int alloc_rxq(struct port_info *, struct sge_rxq *, int, int);
+static int free_rxq(struct port_info *, struct sge_rxq *);
+static int alloc_txq(struct port_info *, struct sge_txq *, int);
+static int free_txq(struct port_info *, struct sge_txq *);
+static void oneseg_dma_callback(void *, bus_dma_segment_t *, int, int);
+static inline bool is_new_response(const struct sge_iq *, struct rsp_ctrl **);
+static inline void iq_next(struct sge_iq *);
+static inline void ring_fl_db(struct adapter *, struct sge_fl *);
+static void refill_fl(struct sge_fl *, int);
+static int alloc_fl_sdesc(struct sge_fl *);
+static void free_fl_sdesc(struct sge_fl *);
+static int alloc_eq_maps(struct sge_eq *);
+static void free_eq_maps(struct sge_eq *);
+static struct mbuf *get_fl_sdesc_data(struct sge_fl *, int, int);
+static void set_fl_tag_idx(struct sge_fl *, int);
+
+static int get_pkt_sgl(struct sge_txq *, struct mbuf **, struct sgl *, int);
+static int free_pkt_sgl(struct sge_txq *, struct sgl *);
+static int write_txpkt_wr(struct port_info *, struct sge_txq *, struct mbuf *,
+ struct sgl *);
+static int add_to_txpkts(struct port_info *, struct sge_txq *, struct txpkts *,
+ struct mbuf *, struct sgl *);
+static void write_txpkts_wr(struct sge_txq *, struct txpkts *);
+static inline void write_ulp_cpl_sgl(struct port_info *, struct sge_txq *,
+ struct txpkts *, struct mbuf *, struct sgl *);
+static int write_sgl_to_txd(struct sge_eq *, struct sgl *, caddr_t *);
+static inline void copy_to_txd(struct sge_eq *, caddr_t, caddr_t *, int);
+static inline void ring_tx_db(struct adapter *, struct sge_eq *);
+static int reclaim_tx_descs(struct sge_eq *, int, int);
+static void write_eqflush_wr(struct sge_eq *);
+static __be64 get_flit(bus_dma_segment_t *, int, int);
+
+/**
+ * t4_sge_init - initialize SGE
+ * @sc: the adapter
+ *
+ * Performs SGE initialization needed every time after a chip reset.
+ * We do not initialize any of the queues here, instead the driver
+ * top-level must request them individually.
+ */
+void
+t4_sge_init(struct adapter *sc)
+{
+ struct sge *s = &sc->sge;
+ int i;
+
+ FL_BUF_ZONE(0) = zone_clust;
+ FL_BUF_ZONE(1) = zone_jumbop;
+ FL_BUF_ZONE(2) = zone_jumbo9;
+ FL_BUF_ZONE(3) = zone_jumbo16;
+
+ t4_set_reg_field(sc, A_SGE_CONTROL, V_PKTSHIFT(M_PKTSHIFT) |
+ V_INGPADBOUNDARY(M_INGPADBOUNDARY) |
+ F_EGRSTATUSPAGESIZE,
+ V_INGPADBOUNDARY(ilog2(FL_ALIGN) - 5) |
+ V_PKTSHIFT(FL_PKTSHIFT) |
+ F_RXPKTCPLMODE |
+ V_EGRSTATUSPAGESIZE(SPG_LEN == 128));
+ t4_set_reg_field(sc, A_SGE_HOST_PAGE_SIZE,
+ V_HOSTPAGESIZEPF0(M_HOSTPAGESIZEPF0),
+ V_HOSTPAGESIZEPF0(PAGE_SHIFT - 10));
+
+ for (i = 0; i < FL_BUF_SIZES; i++) {
+ t4_write_reg(sc, A_SGE_FL_BUFFER_SIZE0 + (4 * i),
+ FL_BUF_SIZE(i));
+ }
+
+ t4_write_reg(sc, A_SGE_INGRESS_RX_THRESHOLD,
+ V_THRESHOLD_0(s->counter_val[0]) |
+ V_THRESHOLD_1(s->counter_val[1]) |
+ V_THRESHOLD_2(s->counter_val[2]) |
+ V_THRESHOLD_3(s->counter_val[3]));
+
+ t4_write_reg(sc, A_SGE_TIMER_VALUE_0_AND_1,
+ V_TIMERVALUE0(us_to_core_ticks(sc, s->timer_val[0])) |
+ V_TIMERVALUE1(us_to_core_ticks(sc, s->timer_val[1])));
+ t4_write_reg(sc, A_SGE_TIMER_VALUE_2_AND_3,
+ V_TIMERVALUE2(us_to_core_ticks(sc, s->timer_val[2])) |
+ V_TIMERVALUE3(us_to_core_ticks(sc, s->timer_val[3])));
+ t4_write_reg(sc, A_SGE_TIMER_VALUE_4_AND_5,
+ V_TIMERVALUE4(us_to_core_ticks(sc, s->timer_val[4])) |
+ V_TIMERVALUE5(us_to_core_ticks(sc, s->timer_val[5])));
+}
+
+int
+t4_create_dma_tag(struct adapter *sc)
+{
+ int rc;
+
+ rc = bus_dma_tag_create(bus_get_dma_tag(sc->dev), 1, 0,
+ BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, BUS_SPACE_MAXSIZE,
+ BUS_SPACE_UNRESTRICTED, BUS_SPACE_MAXSIZE, BUS_DMA_ALLOCNOW, NULL,
+ NULL, &sc->dmat);
+ if (rc != 0) {
+ device_printf(sc->dev,
+ "failed to create main DMA tag: %d\n", rc);
+ }
+
+ return (rc);
+}
+
+int
+t4_destroy_dma_tag(struct adapter *sc)
+{
+ if (sc->dmat)
+ bus_dma_tag_destroy(sc->dmat);
+
+ return (0);
+}
+
+/*
+ * Allocate and initialize the firmware event queue and the forwarded interrupt
+ * queues, if any. The adapter owns all these queues as they are not associated
+ * with any particular port.
+ *
+ * Returns errno on failure. Resources allocated up to that point may still be
+ * allocated. Caller is responsible for cleanup in case this function fails.
+ */
+int
+t4_setup_adapter_iqs(struct adapter *sc)
+{
+ int i, rc;
+ struct sge_iq *iq, *fwq;
+ iq_intr_handler_t *handler;
+ char name[16];
+
+ ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
+
+ fwq = &sc->sge.fwq;
+ if (sc->flags & INTR_FWD) {
+ iq = &sc->sge.fiq[0];
+
+ /*
+ * Forwarded interrupt queues - allocate 1 if there's only 1
+ * vector available, one less than the number of vectors
+ * otherwise (the first vector is reserved for the error
+ * interrupt in that case).
+ */
+ i = sc->intr_count > 1 ? 1 : 0;
+ for (; i < sc->intr_count; i++, iq++) {
+
+ snprintf(name, sizeof(name), "%s fiq%d",
+ device_get_nameunit(sc->dev), i);
+ init_iq(iq, sc, 0, 0, (sc->sge.nrxq + 1) * 2, 16, NULL,
+ name);
+
+ rc = alloc_iq(iq, i);
+ if (rc != 0) {
+ device_printf(sc->dev,
+ "failed to create fwd intr queue %d: %d\n",
+ i, rc);
+ return (rc);
+ }
+ }
+
+ handler = t4_intr_evt;
+ i = 0; /* forward fwq's interrupt to the first fiq */
+ } else {
+ handler = NULL;
+ i = 1; /* fwq should use vector 1 (0 is used by error) */
+ }
+
+ snprintf(name, sizeof(name), "%s fwq", device_get_nameunit(sc->dev));
+ init_iq(fwq, sc, 0, 0, FW_IQ_QSIZE, FW_IQ_ESIZE, handler, name);
+ rc = alloc_iq(fwq, i);
+ if (rc != 0) {
+ device_printf(sc->dev,
+ "failed to create firmware event queue: %d\n", rc);
+ }
+
+ return (rc);
+}
+
+/*
+ * Idempotent
+ */
+int
+t4_teardown_adapter_iqs(struct adapter *sc)
+{
+ int i;
+ struct sge_iq *iq;
+
+ ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
+
+ iq = &sc->sge.fwq;
+ free_iq(iq);
+ if (sc->flags & INTR_FWD) {
+ for (i = 0; i < NFIQ(sc); i++) {
+ iq = &sc->sge.fiq[i];
+ free_iq(iq);
+ }
+ }
+
+ return (0);
+}
+
+int
+t4_setup_eth_queues(struct port_info *pi)
+{
+ int rc = 0, i, intr_idx;
+ struct sge_rxq *rxq;
+ struct sge_txq *txq;
+ char name[16];
+ struct adapter *sc = pi->adapter;
+
+ if (sysctl_ctx_init(&pi->ctx) == 0) {
+ struct sysctl_oid *oid = device_get_sysctl_tree(pi->dev);
+ struct sysctl_oid_list *children = SYSCTL_CHILDREN(oid);
+
+ pi->oid_rxq = SYSCTL_ADD_NODE(&pi->ctx, children, OID_AUTO,
+ "rxq", CTLFLAG_RD, NULL, "rx queues");
+ pi->oid_txq = SYSCTL_ADD_NODE(&pi->ctx, children, OID_AUTO,
+ "txq", CTLFLAG_RD, NULL, "tx queues");
+ }
+
+ for_each_rxq(pi, i, rxq) {
+
+ snprintf(name, sizeof(name), "%s rxq%d-iq",
+ device_get_nameunit(pi->dev), i);
+ init_iq(&rxq->iq, sc, pi->tmr_idx, pi->pktc_idx,
+ pi->qsize_rxq, RX_IQ_ESIZE,
+ sc->flags & INTR_FWD ? t4_intr_data: NULL, name);
+
+ snprintf(name, sizeof(name), "%s rxq%d-fl",
+ device_get_nameunit(pi->dev), i);
+ init_fl(&rxq->fl, pi->qsize_rxq / 8, name);
+
+ if (sc->flags & INTR_FWD)
+ intr_idx = (pi->first_rxq + i) % NFIQ(sc);
+ else
+ intr_idx = pi->first_rxq + i + 2;
+
+ rc = alloc_rxq(pi, rxq, intr_idx, i);
+ if (rc != 0)
+ goto done;
+
+ intr_idx++;
+ }
+
+ for_each_txq(pi, i, txq) {
+
+ snprintf(name, sizeof(name), "%s txq%d",
+ device_get_nameunit(pi->dev), i);
+ init_txq(txq, pi->qsize_txq, name);
+
+ rc = alloc_txq(pi, txq, i);
+ if (rc != 0)
+ goto done;
+ }
+
+done:
+ if (rc)
+ t4_teardown_eth_queues(pi);
+
+ return (rc);
+}
+
+/*
+ * Idempotent
+ */
+int
+t4_teardown_eth_queues(struct port_info *pi)
+{
+ int i;
+ struct sge_rxq *rxq;
+ struct sge_txq *txq;
+
+ /* Do this before freeing the queues */
+ if (pi->oid_txq || pi->oid_rxq) {
+ sysctl_ctx_free(&pi->ctx);
+ pi->oid_txq = pi->oid_rxq = NULL;
+ }
+
+ for_each_txq(pi, i, txq) {
+ free_txq(pi, txq);
+ }
+
+ for_each_rxq(pi, i, rxq) {
+ free_rxq(pi, rxq);
+ }
+
+ return (0);
+}
+
+/* Deals with errors and forwarded interrupts */
+void
+t4_intr_all(void *arg)
+{
+ struct adapter *sc = arg;
+
+ t4_intr_err(arg);
+ t4_intr_fwd(&sc->sge.fiq[0]);
+}
+
+/* Deals with forwarded interrupts on the given ingress queue */
+void
+t4_intr_fwd(void *arg)
+{
+ struct sge_iq *iq = arg, *q;
+ struct adapter *sc = iq->adapter;
+ struct rsp_ctrl *ctrl;
+ int ndesc_pending = 0, ndesc_total = 0;
+ int qid;
+
+ IQ_LOCK(iq);
+ while (is_new_response(iq, &ctrl)) {
+
+ rmb();
+
+ /* Only interrupt muxing expected on this queue */
+ KASSERT(G_RSPD_TYPE(ctrl->u.type_gen) == X_RSPD_TYPE_INTR,
+ ("unexpected event on forwarded interrupt queue: %x",
+ G_RSPD_TYPE(ctrl->u.type_gen)));
+
+ qid = ntohl(ctrl->pldbuflen_qid) - sc->sge.iq_start;
+ q = sc->sge.iqmap[qid];
+
+ q->handler(q);
+
+ ndesc_total++;
+ if (++ndesc_pending >= iq->qsize / 4) {
+ t4_write_reg(sc, MYPF_REG(A_SGE_PF_GTS),
+ V_CIDXINC(ndesc_pending) |
+ V_INGRESSQID(iq->cntxt_id) |
+ V_SEINTARM(
+ V_QINTR_TIMER_IDX(X_TIMERREG_UPDATE_CIDX)));
+ ndesc_pending = 0;
+ }
+
+ iq_next(iq);
+ }
+ IQ_UNLOCK(iq);
+
+ if (ndesc_total > 0) {
+ t4_write_reg(sc, MYPF_REG(A_SGE_PF_GTS),
+ V_CIDXINC(ndesc_pending) | V_INGRESSQID((u32)iq->cntxt_id) |
+ V_SEINTARM(iq->intr_params));
+ }
+}
+
+/* Deals with error interrupts */
+void
+t4_intr_err(void *arg)
+{
+ struct adapter *sc = arg;
+
+ if (sc->intr_type == 1)
+ t4_write_reg(sc, MYPF_REG(A_PCIE_PF_CLI), 0);
+
+ t4_slow_intr_handler(sc);
+}
+
+/* Deals with the firmware event queue */
+void
+t4_intr_evt(void *arg)
+{
+ struct sge_iq *iq = arg;
+ struct adapter *sc = iq->adapter;
+ struct rsp_ctrl *ctrl;
+ const struct rss_header *rss;
+ int ndesc_pending = 0, ndesc_total = 0;
+
+ KASSERT(iq == &sc->sge.fwq, ("%s: unexpected ingress queue", __func__));
+
+ IQ_LOCK(iq);
+ while (is_new_response(iq, &ctrl)) {
+
+ rmb();
+
+ rss = (const void *)iq->cdesc;
+
+ /* Should only get CPL on this queue */
+ KASSERT(G_RSPD_TYPE(ctrl->u.type_gen) == X_RSPD_TYPE_CPL,
+ ("%s: unexpected type %d", __func__,
+ G_RSPD_TYPE(ctrl->u.type_gen)));
+
+ switch (rss->opcode) {
+ case CPL_FW4_MSG:
+ case CPL_FW6_MSG: {
+ const struct cpl_fw6_msg *cpl;
+
+ cpl = (const void *)(rss + 1);
+ if (cpl->type == FW6_TYPE_CMD_RPL)
+ t4_handle_fw_rpl(sc, cpl->data);
+
+ break;
+ }
+ case CPL_SGE_EGR_UPDATE: {
+ const struct cpl_sge_egr_update *cpl;
+ unsigned int qid;
+ struct sge *s = &sc->sge;
+ struct sge_txq *txq;
+
+ cpl = (const void *)(rss + 1);
+ qid = G_EGR_QID(ntohl(cpl->opcode_qid));
+ txq = (void *)s->eqmap[qid - s->eq_start];
+ txq->egr_update++;
+
+ /* XXX: wake up stalled tx */
+
+ break;
+ }
+
+ default:
+ device_printf(sc->dev,
+ "can't handle CPL opcode %d.", rss->opcode);
+ }
+
+ ndesc_total++;
+ if (++ndesc_pending >= iq->qsize / 4) {
+ t4_write_reg(sc, MYPF_REG(A_SGE_PF_GTS),
+ V_CIDXINC(ndesc_pending) |
+ V_INGRESSQID(iq->cntxt_id) |
+ V_SEINTARM(
+ V_QINTR_TIMER_IDX(X_TIMERREG_UPDATE_CIDX)));
+ ndesc_pending = 0;
+ }
+ iq_next(iq);
+ }
+ IQ_UNLOCK(iq);
+
+ if (ndesc_total > 0) {
+ t4_write_reg(sc, MYPF_REG(A_SGE_PF_GTS),
+ V_CIDXINC(ndesc_pending) | V_INGRESSQID(iq->cntxt_id) |
+ V_SEINTARM(iq->intr_params));
+ }
+}
+
+void
+t4_intr_data(void *arg)
+{
+ struct sge_rxq *rxq = arg;
+ struct sge_iq *iq = arg;
+ struct rsp_ctrl *ctrl;
+ struct sge_fl *fl = &rxq->fl;
+ struct port_info *pi = rxq->port;
+ struct ifnet *ifp = pi->ifp;
+ struct adapter *sc = pi->adapter;
+ const struct rss_header *rss;
+ const struct cpl_rx_pkt *cpl;
+ int ndescs = 0, rsp_type;
+ uint32_t len;
+ struct mbuf *m0, *m;
+#ifdef INET
+ struct lro_ctrl *lro = &rxq->lro;
+ struct lro_entry *l;
+#endif
+
+ IQ_LOCK(iq);
+ iq->intr_next = iq->intr_params;
+ while (is_new_response(iq, &ctrl)) {
+
+ rmb();
+
+ rss = (const void *)iq->cdesc;
+ cpl = (const void *)(rss + 1);
+
+ rsp_type = G_RSPD_TYPE(ctrl->u.type_gen);
+
+ if (__predict_false(rsp_type == X_RSPD_TYPE_CPL)) {
+ const struct cpl_sge_egr_update *p = (const void *)cpl;
+ unsigned int qid = G_EGR_QID(ntohl(p->opcode_qid));
+
+ KASSERT(cpl->opcode == CPL_SGE_EGR_UPDATE,
+ ("unexpected opcode on data ingress queue: %x",
+ cpl->opcode));
+
+ /* XXX: noone's waiting to be woken up... */
+ wakeup(sc->sge.eqmap[qid - sc->sge.eq_start]);
+
+ ndescs++;
+ iq_next(iq);
+
+ continue;
+ }
+
+ KASSERT(G_RSPD_TYPE(ctrl->u.type_gen) == X_RSPD_TYPE_FLBUF,
+ ("unexpected event on data ingress queue: %x",
+ G_RSPD_TYPE(ctrl->u.type_gen)));
+
+ len = be32toh(ctrl->pldbuflen_qid);
+
+ KASSERT(len & F_RSPD_NEWBUF,
+ ("%s: T4 misconfigured to pack buffers.", __func__));
+
+ len = G_RSPD_LEN(len);
+ m0 = get_fl_sdesc_data(fl, len, M_PKTHDR);
+ if (m0 == NULL) {
+ iq->intr_next = V_QINTR_TIMER_IDX(SGE_NTIMERS - 1);
+ break;
+ }
+
+ len -= FL_PKTSHIFT;
+ m0->m_len -= FL_PKTSHIFT;
+ m0->m_data += FL_PKTSHIFT;
+
+ m0->m_pkthdr.len = len;
+ m0->m_pkthdr.rcvif = ifp;
+ m0->m_flags |= M_FLOWID;
+ m0->m_pkthdr.flowid = rss->hash_val;
+
+ if (cpl->csum_calc && !cpl->err_vec &&
+ ifp->if_capenable & IFCAP_RXCSUM) {
+ m0->m_pkthdr.csum_flags |= (CSUM_IP_CHECKED |
+ CSUM_IP_VALID | CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
+ if (cpl->ip_frag)
+ m0->m_pkthdr.csum_data = be16toh(cpl->csum);
+ else
+ m0->m_pkthdr.csum_data = 0xffff;
+ rxq->rxcsum++;
+ }
+
+ if (cpl->vlan_ex) {
+ m0->m_pkthdr.ether_vtag = be16toh(cpl->vlan);
+ m0->m_flags |= M_VLANTAG;
+ rxq->vlan_extraction++;
+ }
+
+ len -= m0->m_len;
+ m = m0;
+ while (len) {
+ m->m_next = get_fl_sdesc_data(fl, len, 0);
+ if (m->m_next == NULL)
+ CXGBE_UNIMPLEMENTED("mbuf recovery");
+
+ m = m->m_next;
+ len -= m->m_len;
+ }
+#ifdef INET
+ if (cpl->l2info & htobe32(F_RXF_LRO) &&
+ rxq->flags & RXQ_LRO_ENABLED &&
+ tcp_lro_rx(lro, m0, 0) == 0) {
+ /* queued for LRO */
+ } else
+#endif
+ (*ifp->if_input)(ifp, m0);
+
+ FL_LOCK(fl);
+ if (fl->needed >= 32) {
+ refill_fl(fl, 64);
+ if (fl->pending >= 32)
+ ring_fl_db(sc, fl);
+ }
+ FL_UNLOCK(fl);
+
+ ndescs++;
+ iq_next(iq);
+
+ if (ndescs > 32) {
+ t4_write_reg(sc, MYPF_REG(A_SGE_PF_GTS),
+ V_CIDXINC(ndescs) |
+ V_INGRESSQID((u32)iq->cntxt_id) |
+ V_SEINTARM(V_QINTR_TIMER_IDX(X_TIMERREG_UPDATE_CIDX)));
+ ndescs = 0;
+ }
+ }
+
+#ifdef INET
+ while (!SLIST_EMPTY(&lro->lro_active)) {
+ l = SLIST_FIRST(&lro->lro_active);
+ SLIST_REMOVE_HEAD(&lro->lro_active, next);
+ tcp_lro_flush(lro, l);
+ }
+#endif
+
+ t4_write_reg(sc, MYPF_REG(A_SGE_PF_GTS), V_CIDXINC(ndescs) |
+ V_INGRESSQID((u32)iq->cntxt_id) | V_SEINTARM(iq->intr_next));
+
+ IQ_UNLOCK(iq);
+
+ FL_LOCK(fl);
+ if (fl->needed) {
+ refill_fl(fl, -1);
+ if (fl->pending >= 8)
+ ring_fl_db(sc, fl);
+ }
+ FL_UNLOCK(fl);
+}
+
+/* Per-packet header in a coalesced tx WR, before the SGL starts (in flits) */
+#define TXPKTS_PKT_HDR ((\
+ sizeof(struct ulp_txpkt) + \
+ sizeof(struct ulptx_idata) + \
+ sizeof(struct cpl_tx_pkt_core) \
+ ) / 8)
+
+/* Header of a coalesced tx WR, before SGL of first packet (in flits) */
+#define TXPKTS_WR_HDR (\
+ sizeof(struct fw_eth_tx_pkts_wr) / 8 + \
+ TXPKTS_PKT_HDR)
+
+/* Header of a tx WR, before SGL of first packet (in flits) */
+#define TXPKT_WR_HDR ((\
+ sizeof(struct fw_eth_tx_pkt_wr) + \
+ sizeof(struct cpl_tx_pkt_core) \
+ ) / 8 )
+
+/* Header of a tx LSO WR, before SGL of first packet (in flits) */
+#define TXPKT_LSO_WR_HDR ((\
+ sizeof(struct fw_eth_tx_pkt_wr) + \
+ sizeof(struct cpl_tx_pkt_lso) + \
+ sizeof(struct cpl_tx_pkt_core) \
+ ) / 8 )
+
+int
+t4_eth_tx(struct ifnet *ifp, struct sge_txq *txq, struct mbuf *m)
+{
+ struct port_info *pi = (void *)ifp->if_softc;
+ struct adapter *sc = pi->adapter;
+ struct sge_eq *eq = &txq->eq;
+ struct buf_ring *br = eq->br;
+ struct mbuf *next;
+ int rc, coalescing;
+ struct txpkts txpkts;
+ struct sgl sgl;
+
+ TXQ_LOCK_ASSERT_OWNED(txq);
+ KASSERT(m, ("%s: called with nothing to do.", __func__));
+
+ txpkts.npkt = 0;/* indicates there's nothing in txpkts */
+ coalescing = 0;
+
+ prefetch(&eq->sdesc[eq->pidx]);
+ prefetch(&eq->desc[eq->pidx]);
+ prefetch(&eq->maps[eq->map_pidx]);
+
+ if (eq->avail < 8)
+ reclaim_tx_descs(eq, 1, 8);
+
+ for (; m; m = next ? next : drbr_dequeue(ifp, br)) {
+
+ if (eq->avail < 8)
+ break;
+
+ next = m->m_nextpkt;
+ m->m_nextpkt = NULL;
+
+ if (next || buf_ring_peek(br))
+ coalescing = 1;
+
+ rc = get_pkt_sgl(txq, &m, &sgl, coalescing);
+ if (rc != 0) {
+ if (rc == ENOMEM) {
+
+ /* Short of resources, suspend tx */
+
+ m->m_nextpkt = next;
+ break;
+ }
+
+ /*
+ * Unrecoverable error for this packet, throw it away
+ * and move on to the next. get_pkt_sgl may already
+ * have freed m (it will be NULL in that case and the
+ * m_freem here is still safe).
+ */
+
+ m_freem(m);
+ continue;
+ }
+
+ if (coalescing &&
+ add_to_txpkts(pi, txq, &txpkts, m, &sgl) == 0) {
+
+ /* Successfully absorbed into txpkts */
+
+ write_ulp_cpl_sgl(pi, txq, &txpkts, m, &sgl);
+ goto doorbell;
+ }
+
+ /*
+ * We weren't coalescing to begin with, or current frame could
+ * not be coalesced (add_to_txpkts flushes txpkts if a frame
+ * given to it can't be coalesced). Either way there should be
+ * nothing in txpkts.
+ */
+ KASSERT(txpkts.npkt == 0,
+ ("%s: txpkts not empty: %d", __func__, txpkts.npkt));
+
+ /* We're sending out individual packets now */
+ coalescing = 0;
+
+ if (eq->avail < 8)
+ reclaim_tx_descs(eq, 1, 8);
+ rc = write_txpkt_wr(pi, txq, m, &sgl);
+ if (rc != 0) {
+
+ /* Short of hardware descriptors, suspend tx */
+
+ /*
+ * This is an unlikely but expensive failure. We've
+ * done all the hard work (DMA mappings etc.) and now we
+ * can't send out the packet. What's worse, we have to
+ * spend even more time freeing up everything in sgl.
+ */
+ txq->no_desc++;
+ free_pkt_sgl(txq, &sgl);
+
+ m->m_nextpkt = next;
+ break;
+ }
+
+ ETHER_BPF_MTAP(ifp, m);
+ if (sgl.nsegs == 0)
+ m_freem(m);
+
+doorbell:
+ /* Fewer and fewer doorbells as the queue fills up */
+ if (eq->pending >= (1 << (fls(eq->qsize - eq->avail) / 2)))
+ ring_tx_db(sc, eq);
+ reclaim_tx_descs(eq, 16, 32);
+ }
+
+ if (txpkts.npkt > 0)
+ write_txpkts_wr(txq, &txpkts);
+
+ /*
+ * m not NULL means there was an error but we haven't thrown it away.
+ * This can happen when we're short of tx descriptors (no_desc) or maybe
+ * even DMA maps (no_dmamap). Either way, a credit flush and reclaim
+ * will get things going again.
+ *
+ * If eq->avail is already 0 we know a credit flush was requested in the
+ * WR that reduced it to 0 so we don't need another flush (we don't have
+ * any descriptor for a flush WR anyway, duh).
+ */
+ if (m && eq->avail > 0)
+ write_eqflush_wr(eq);
+ txq->m = m;
+
+ if (eq->pending)
+ ring_tx_db(sc, eq);
+
+ reclaim_tx_descs(eq, 16, eq->qsize);
+
+ return (0);
+}
+
+void
+t4_update_fl_bufsize(struct ifnet *ifp)
+{
+ struct port_info *pi = ifp->if_softc;
+ struct sge_rxq *rxq;
+ struct sge_fl *fl;
+ int i;
+
+ for_each_rxq(pi, i, rxq) {
+ fl = &rxq->fl;
+
+ FL_LOCK(fl);
+ set_fl_tag_idx(fl, ifp->if_mtu);
+ FL_UNLOCK(fl);
+ }
+}
+
+/*
+ * A non-NULL handler indicates this iq will not receive direct interrupts, the
+ * handler will be invoked by a forwarded interrupt queue.
+ */
+static inline void
+init_iq(struct sge_iq *iq, struct adapter *sc, int tmr_idx, int pktc_idx,
+ int qsize, int esize, iq_intr_handler_t *handler, char *name)
+{
+ KASSERT(tmr_idx >= 0 && tmr_idx < SGE_NTIMERS,
+ ("%s: bad tmr_idx %d", __func__, tmr_idx));
+ KASSERT(pktc_idx < SGE_NCOUNTERS, /* -ve is ok, means don't use */
+ ("%s: bad pktc_idx %d", __func__, pktc_idx));
+
+ iq->flags = 0;
+ iq->adapter = sc;
+ iq->intr_params = V_QINTR_TIMER_IDX(tmr_idx) |
+ V_QINTR_CNT_EN(pktc_idx >= 0);
+ iq->intr_pktc_idx = pktc_idx;
+ iq->qsize = roundup(qsize, 16); /* See FW_IQ_CMD/iqsize */
+ iq->esize = max(esize, 16); /* See FW_IQ_CMD/iqesize */
+ iq->handler = handler;
+ strlcpy(iq->lockname, name, sizeof(iq->lockname));
+}
+
+static inline void
+init_fl(struct sge_fl *fl, int qsize, char *name)
+{
+ fl->qsize = qsize;
+ strlcpy(fl->lockname, name, sizeof(fl->lockname));
+}
+
+static inline void
+init_txq(struct sge_txq *txq, int qsize, char *name)
+{
+ txq->eq.qsize = qsize;
+ strlcpy(txq->eq.lockname, name, sizeof(txq->eq.lockname));
+}
+
+static int
+alloc_ring(struct adapter *sc, size_t len, bus_dma_tag_t *tag,
+ bus_dmamap_t *map, bus_addr_t *pa, void **va)
+{
+ int rc;
+
+ rc = bus_dma_tag_create(sc->dmat, 512, 0, BUS_SPACE_MAXADDR,
+ BUS_SPACE_MAXADDR, NULL, NULL, len, 1, len, 0, NULL, NULL, tag);
+ if (rc != 0) {
+ device_printf(sc->dev, "cannot allocate DMA tag: %d\n", rc);
+ goto done;
+ }
+
+ rc = bus_dmamem_alloc(*tag, va,
+ BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO, map);
+ if (rc != 0) {
+ device_printf(sc->dev, "cannot allocate DMA memory: %d\n", rc);
+ goto done;
+ }
+
+ rc = bus_dmamap_load(*tag, *map, *va, len, oneseg_dma_callback, pa, 0);
+ if (rc != 0) {
+ device_printf(sc->dev, "cannot load DMA map: %d\n", rc);
+ goto done;
+ }
+done:
+ if (rc)
+ free_ring(sc, *tag, *map, *pa, *va);
+
+ return (rc);
+}
+
+static int
+free_ring(struct adapter *sc, bus_dma_tag_t tag, bus_dmamap_t map,
+ bus_addr_t pa, void *va)
+{
+ if (pa)
+ bus_dmamap_unload(tag, map);
+ if (va)
+ bus_dmamem_free(tag, va, map);
+ if (tag)
+ bus_dma_tag_destroy(tag);
+
+ return (0);
+}
+
+/*
+ * Allocates the ring for an ingress queue and an optional freelist. If the
+ * freelist is specified it will be allocated and then associated with the
+ * ingress queue.
+ *
+ * Returns errno on failure. Resources allocated up to that point may still be
+ * allocated. Caller is responsible for cleanup in case this function fails.
+ *
+ * If the ingress queue will take interrupts directly (iq->handler == NULL) then
+ * the intr_idx specifies the vector, starting from 0. Otherwise it specifies
+ * the index of the queue to which its interrupts will be forwarded.
+ */
+static int
+alloc_iq_fl(struct port_info *pi, struct sge_iq *iq, struct sge_fl *fl,
+ int intr_idx)
+{
+ int rc, i, cntxt_id;
+ size_t len;
+ struct fw_iq_cmd c;
+ struct adapter *sc = iq->adapter;
+ __be32 v = 0;
+
+ /* The adapter queues are nominally allocated in port[0]'s name */
+ if (pi == NULL)
+ pi = sc->port[0];
+
+ mtx_init(&iq->iq_lock, iq->lockname, NULL, MTX_DEF);
+
+ len = iq->qsize * iq->esize;
+ rc = alloc_ring(sc, len, &iq->desc_tag, &iq->desc_map, &iq->ba,
+ (void **)&iq->desc);
+ if (rc != 0)
+ return (rc);
+
+ bzero(&c, sizeof(c));
+ c.op_to_vfn = htobe32(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST |
+ F_FW_CMD_WRITE | F_FW_CMD_EXEC | V_FW_IQ_CMD_PFN(sc->pf) |
+ V_FW_IQ_CMD_VFN(0));
+
+ c.alloc_to_len16 = htobe32(F_FW_IQ_CMD_ALLOC | F_FW_IQ_CMD_IQSTART |
+ FW_LEN16(c));
+
+ /* Special handling for firmware event queue */
+ if (iq == &sc->sge.fwq)
+ v |= F_FW_IQ_CMD_IQASYNCH;
+
+ if (iq->handler) {
+ KASSERT(intr_idx < NFIQ(sc),
+ ("%s: invalid indirect intr_idx %d", __func__, intr_idx));
+ v |= F_FW_IQ_CMD_IQANDST;
+ v |= V_FW_IQ_CMD_IQANDSTINDEX(sc->sge.fiq[intr_idx].abs_id);
+ } else {
+ KASSERT(intr_idx < sc->intr_count,
+ ("%s: invalid direct intr_idx %d", __func__, intr_idx));
+ v |= V_FW_IQ_CMD_IQANDSTINDEX(intr_idx);
+ }
+
+ c.type_to_iqandstindex = htobe32(v |
+ V_FW_IQ_CMD_TYPE(FW_IQ_TYPE_FL_INT_CAP) |
+ V_FW_IQ_CMD_VIID(pi->viid) |
+ V_FW_IQ_CMD_IQANUD(X_UPDATEDELIVERY_INTERRUPT));
+ c.iqdroprss_to_iqesize = htobe16(V_FW_IQ_CMD_IQPCIECH(pi->tx_chan) |
+ F_FW_IQ_CMD_IQGTSMODE |
+ V_FW_IQ_CMD_IQINTCNTTHRESH(iq->intr_pktc_idx) |
+ V_FW_IQ_CMD_IQESIZE(ilog2(iq->esize) - 4));
+ c.iqsize = htobe16(iq->qsize);
+ c.iqaddr = htobe64(iq->ba);
+
+ if (fl) {
+ mtx_init(&fl->fl_lock, fl->lockname, NULL, MTX_DEF);
+
+ for (i = 0; i < FL_BUF_SIZES; i++) {
+
+ /*
+ * A freelist buffer must be 16 byte aligned as the SGE
+ * uses the low 4 bits of the bus addr to figure out the
+ * buffer size.
+ */
+ rc = bus_dma_tag_create(sc->dmat, 16, 0,
+ BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
+ FL_BUF_SIZE(i), 1, FL_BUF_SIZE(i), BUS_DMA_ALLOCNOW,
+ NULL, NULL, &fl->tag[i]);
+ if (rc != 0) {
+ device_printf(sc->dev,
+ "failed to create fl DMA tag[%d]: %d\n",
+ i, rc);
+ return (rc);
+ }
+ }
+ len = fl->qsize * RX_FL_ESIZE;
+ rc = alloc_ring(sc, len, &fl->desc_tag, &fl->desc_map,
+ &fl->ba, (void **)&fl->desc);
+ if (rc)
+ return (rc);
+
+ /* Allocate space for one software descriptor per buffer. */
+ fl->cap = (fl->qsize - SPG_LEN / RX_FL_ESIZE) * 8;
+ FL_LOCK(fl);
+ set_fl_tag_idx(fl, pi->ifp->if_mtu);
+ rc = alloc_fl_sdesc(fl);
+ FL_UNLOCK(fl);
+ if (rc != 0) {
+ device_printf(sc->dev,
+ "failed to setup fl software descriptors: %d\n",
+ rc);
+ return (rc);
+ }
+ fl->needed = fl->cap - 1; /* one less to avoid cidx = pidx */
+
+ c.iqns_to_fl0congen =
+ htobe32(V_FW_IQ_CMD_FL0HOSTFCMODE(X_HOSTFCMODE_NONE));
+ c.fl0dcaen_to_fl0cidxfthresh =
+ htobe16(V_FW_IQ_CMD_FL0FBMIN(X_FETCHBURSTMIN_64B) |
+ V_FW_IQ_CMD_FL0FBMAX(X_FETCHBURSTMAX_512B));
+ c.fl0size = htobe16(fl->qsize);
+ c.fl0addr = htobe64(fl->ba);
+ }
+
+ rc = -t4_wr_mbox(sc, sc->mbox, &c, sizeof(c), &c);
+ if (rc != 0) {
+ device_printf(sc->dev,
+ "failed to create ingress queue: %d\n", rc);
+ return (rc);
+ }
+
+ iq->cdesc = iq->desc;
+ iq->cidx = 0;
+ iq->gen = 1;
+ iq->intr_next = iq->intr_params;
+ iq->cntxt_id = be16toh(c.iqid);
+ iq->abs_id = be16toh(c.physiqid);
+ iq->flags |= (IQ_ALLOCATED | IQ_STARTED);
+
+ cntxt_id = iq->cntxt_id - sc->sge.iq_start;
+ KASSERT(cntxt_id < sc->sge.niq,
+ ("%s: iq->cntxt_id (%d) more than the max (%d)", __func__,
+ cntxt_id, sc->sge.niq - 1));
+ sc->sge.iqmap[cntxt_id] = iq;
+
+ if (fl) {
+ fl->cntxt_id = be16toh(c.fl0id);
+ fl->pidx = fl->cidx = 0;
+
+ cntxt_id = iq->cntxt_id - sc->sge.eq_start;
+ KASSERT(cntxt_id < sc->sge.neq,
+ ("%s: fl->cntxt_id (%d) more than the max (%d)", __func__,
+ cntxt_id, sc->sge.neq - 1));
+ sc->sge.eqmap[cntxt_id] = (void *)fl;
+
+ FL_LOCK(fl);
+ refill_fl(fl, -1);
+ if (fl->pending >= 8)
+ ring_fl_db(sc, fl);
+ FL_UNLOCK(fl);
+ }
+
+ /* Enable IQ interrupts */
+ t4_write_reg(sc, MYPF_REG(A_SGE_PF_GTS), V_SEINTARM(iq->intr_params) |
+ V_INGRESSQID(iq->cntxt_id));
+
+ return (0);
+}
+
+/*
+ * This can be called with the iq/fl in any state - fully allocated and
+ * functional, partially allocated, even all-zeroed out.
+ */
+static int
+free_iq_fl(struct port_info *pi, struct sge_iq *iq, struct sge_fl *fl)
+{
+ int i, rc;
+ struct adapter *sc = iq->adapter;
+ device_t dev;
+
+ if (sc == NULL)
+ return (0); /* nothing to do */
+
+ dev = pi ? pi->dev : sc->dev;
+
+ if (iq->flags & IQ_STARTED) {
+ rc = -t4_iq_start_stop(sc, sc->mbox, 0, sc->pf, 0,
+ iq->cntxt_id, fl ? fl->cntxt_id : 0xffff, 0xffff);
+ if (rc != 0) {
+ device_printf(dev,
+ "failed to stop queue %p: %d\n", iq, rc);
+ return (rc);
+ }
+ iq->flags &= ~IQ_STARTED;
+ }
+
+ if (iq->flags & IQ_ALLOCATED) {
+
+ rc = -t4_iq_free(sc, sc->mbox, sc->pf, 0,
+ FW_IQ_TYPE_FL_INT_CAP, iq->cntxt_id,
+ fl ? fl->cntxt_id : 0xffff, 0xffff);
+ if (rc != 0) {
+ device_printf(dev,
+ "failed to free queue %p: %d\n", iq, rc);
+ return (rc);
+ }
+ iq->flags &= ~IQ_ALLOCATED;
+ }
+
+ free_ring(sc, iq->desc_tag, iq->desc_map, iq->ba, iq->desc);
+
+ if (mtx_initialized(&iq->iq_lock))
+ mtx_destroy(&iq->iq_lock);
+
+ bzero(iq, sizeof(*iq));
+
+ if (fl) {
+ free_ring(sc, fl->desc_tag, fl->desc_map, fl->ba,
+ fl->desc);
+
+ if (fl->sdesc) {
+ FL_LOCK(fl);
+ free_fl_sdesc(fl);
+ FL_UNLOCK(fl);
+ }
+
+ if (mtx_initialized(&fl->fl_lock))
+ mtx_destroy(&fl->fl_lock);
+
+ for (i = 0; i < FL_BUF_SIZES; i++) {
+ if (fl->tag[i])
+ bus_dma_tag_destroy(fl->tag[i]);
+ }
+
+ bzero(fl, sizeof(*fl));
+ }
+
+ return (0);
+}
+
+static int
+alloc_iq(struct sge_iq *iq, int intr_idx)
+{
+ return alloc_iq_fl(NULL, iq, NULL, intr_idx);
+}
+
+static int
+free_iq(struct sge_iq *iq)
+{
+ return free_iq_fl(NULL, iq, NULL);
+}
+
+static int
+alloc_rxq(struct port_info *pi, struct sge_rxq *rxq, int intr_idx, int idx)
+{
+ int rc;
+ struct sysctl_oid *oid;
+ struct sysctl_oid_list *children;
+ char name[16];
+
+ rc = alloc_iq_fl(pi, &rxq->iq, &rxq->fl, intr_idx);
+ if (rc != 0)
+ return (rc);
+
+#ifdef INET
+ rc = tcp_lro_init(&rxq->lro);
+ if (rc != 0)
+ return (rc);
+ rxq->lro.ifp = pi->ifp; /* also indicates LRO init'ed */
+
+ if (pi->ifp->if_capenable & IFCAP_LRO)
+ rxq->flags |= RXQ_LRO_ENABLED;
+#endif
+ rxq->port = pi;
+
+ children = SYSCTL_CHILDREN(pi->oid_rxq);
+
+ snprintf(name, sizeof(name), "%d", idx);
+ oid = SYSCTL_ADD_NODE(&pi->ctx, children, OID_AUTO, name, CTLFLAG_RD,
+ NULL, "rx queue");
+ children = SYSCTL_CHILDREN(oid);
+
+ SYSCTL_ADD_INT(&pi->ctx, children, OID_AUTO, "lro_queued", CTLFLAG_RD,
+ &rxq->lro.lro_queued, 0, NULL);
+ SYSCTL_ADD_INT(&pi->ctx, children, OID_AUTO, "lro_flushed", CTLFLAG_RD,
+ &rxq->lro.lro_flushed, 0, NULL);
+ SYSCTL_ADD_UQUAD(&pi->ctx, children, OID_AUTO, "rxcsum", CTLFLAG_RD,
+ &rxq->rxcsum, "# of times hardware assisted with checksum");
+ SYSCTL_ADD_UQUAD(&pi->ctx, children, OID_AUTO, "vlan_extraction",
+ CTLFLAG_RD, &rxq->vlan_extraction,
+ "# of times hardware extracted 802.1Q tag");
+
+ return (rc);
+}
+
+static int
+free_rxq(struct port_info *pi, struct sge_rxq *rxq)
+{
+ int rc;
+
+#ifdef INET
+ if (rxq->lro.ifp) {
+ tcp_lro_free(&rxq->lro);
+ rxq->lro.ifp = NULL;
+ }
+#endif
+
+ rc = free_iq_fl(pi, &rxq->iq, &rxq->fl);
+ if (rc == 0)
+ bzero(rxq, sizeof(*rxq));
+
+ return (rc);
+}
+
+static int
+alloc_txq(struct port_info *pi, struct sge_txq *txq, int idx)
+{
+ int rc, cntxt_id;
+ size_t len;
+ struct adapter *sc = pi->adapter;
+ struct fw_eq_eth_cmd c;
+ struct sge_eq *eq = &txq->eq;
+ char name[16];
+ struct sysctl_oid *oid;
+ struct sysctl_oid_list *children;
+
+ mtx_init(&eq->eq_lock, eq->lockname, NULL, MTX_DEF);
+
+ len = eq->qsize * TX_EQ_ESIZE;
+ rc = alloc_ring(sc, len, &eq->desc_tag, &eq->desc_map,
+ &eq->ba, (void **)&eq->desc);
+ if (rc)
+ return (rc);
+
+ eq->cap = eq->qsize - SPG_LEN / TX_EQ_ESIZE;
+ eq->spg = (void *)&eq->desc[eq->cap];
+ eq->avail = eq->cap - 1; /* one less to avoid cidx = pidx */
+ eq->sdesc = malloc(eq->cap * sizeof(struct tx_sdesc), M_CXGBE,
+ M_ZERO | M_WAITOK);
+ eq->br = buf_ring_alloc(eq->qsize, M_CXGBE, M_WAITOK, &eq->eq_lock);
+
+ rc = bus_dma_tag_create(sc->dmat, 1, 0, BUS_SPACE_MAXADDR,
+ BUS_SPACE_MAXADDR, NULL, NULL, 64 * 1024, TX_SGL_SEGS,
+ BUS_SPACE_MAXSIZE, BUS_DMA_ALLOCNOW, NULL, NULL, &eq->tx_tag);
+ if (rc != 0) {
+ device_printf(sc->dev,
+ "failed to create tx DMA tag: %d\n", rc);
+ return (rc);
+ }
+
+ rc = alloc_eq_maps(eq);
+ if (rc != 0) {
+ device_printf(sc->dev, "failed to setup tx DMA maps: %d\n", rc);
+ return (rc);
+ }
+
+ bzero(&c, sizeof(c));
+
+ c.op_to_vfn = htobe32(V_FW_CMD_OP(FW_EQ_ETH_CMD) | F_FW_CMD_REQUEST |
+ F_FW_CMD_WRITE | F_FW_CMD_EXEC | V_FW_EQ_ETH_CMD_PFN(sc->pf) |
+ V_FW_EQ_ETH_CMD_VFN(0));
+ c.alloc_to_len16 = htobe32(F_FW_EQ_ETH_CMD_ALLOC |
+ F_FW_EQ_ETH_CMD_EQSTART | FW_LEN16(c));
+ c.viid_pkd = htobe32(V_FW_EQ_ETH_CMD_VIID(pi->viid));
+ c.fetchszm_to_iqid =
+ htobe32(V_FW_EQ_ETH_CMD_HOSTFCMODE(X_HOSTFCMODE_STATUS_PAGE) |
+ V_FW_EQ_ETH_CMD_PCIECHN(pi->tx_chan) |
+ V_FW_EQ_ETH_CMD_IQID(sc->sge.rxq[pi->first_rxq].iq.cntxt_id));
+ c.dcaen_to_eqsize = htobe32(V_FW_EQ_ETH_CMD_FBMIN(X_FETCHBURSTMIN_64B) |
+ V_FW_EQ_ETH_CMD_FBMAX(X_FETCHBURSTMAX_512B) |
+ V_FW_EQ_ETH_CMD_CIDXFTHRESH(X_CIDXFLUSHTHRESH_32) |
+ V_FW_EQ_ETH_CMD_EQSIZE(eq->qsize));
+ c.eqaddr = htobe64(eq->ba);
+
+ rc = -t4_wr_mbox(sc, sc->mbox, &c, sizeof(c), &c);
+ if (rc != 0) {
+ device_printf(pi->dev,
+ "failed to create egress queue: %d\n", rc);
+ return (rc);
+ }
+
+ eq->pidx = eq->cidx = 0;
+ eq->cntxt_id = G_FW_EQ_ETH_CMD_EQID(be32toh(c.eqid_pkd));
+ eq->flags |= (EQ_ALLOCATED | EQ_STARTED);
+
+ cntxt_id = eq->cntxt_id - sc->sge.eq_start;
+ KASSERT(cntxt_id < sc->sge.neq,
+ ("%s: eq->cntxt_id (%d) more than the max (%d)", __func__,
+ cntxt_id, sc->sge.neq - 1));
+ sc->sge.eqmap[cntxt_id] = eq;
+
+ children = SYSCTL_CHILDREN(pi->oid_txq);
+
+ snprintf(name, sizeof(name), "%d", idx);
+ oid = SYSCTL_ADD_NODE(&pi->ctx, children, OID_AUTO, name, CTLFLAG_RD,
+ NULL, "tx queue");
+ children = SYSCTL_CHILDREN(oid);
+
+ SYSCTL_ADD_UQUAD(&pi->ctx, children, OID_AUTO, "txcsum", CTLFLAG_RD,
+ &txq->txcsum, "# of times hardware assisted with checksum");
+ SYSCTL_ADD_UQUAD(&pi->ctx, children, OID_AUTO, "vlan_insertion",
+ CTLFLAG_RD, &txq->vlan_insertion,
+ "# of times hardware inserted 802.1Q tag");
+ SYSCTL_ADD_UQUAD(&pi->ctx, children, OID_AUTO, "tso_wrs", CTLFLAG_RD,
+ &txq->tso_wrs, "# of IPv4 TSO work requests");
+ SYSCTL_ADD_UQUAD(&pi->ctx, children, OID_AUTO, "imm_wrs", CTLFLAG_RD,
+ &txq->imm_wrs, "# of work requests with immediate data");
+ SYSCTL_ADD_UQUAD(&pi->ctx, children, OID_AUTO, "sgl_wrs", CTLFLAG_RD,
+ &txq->sgl_wrs, "# of work requests with direct SGL");
+ SYSCTL_ADD_UQUAD(&pi->ctx, children, OID_AUTO, "txpkt_wrs", CTLFLAG_RD,
+ &txq->txpkt_wrs, "# of txpkt work requests (one pkt/WR)");
+ SYSCTL_ADD_UQUAD(&pi->ctx, children, OID_AUTO, "txpkts_wrs", CTLFLAG_RD,
+ &txq->txpkts_wrs, "# of txpkts work requests (multiple pkts/WR)");
+ SYSCTL_ADD_UQUAD(&pi->ctx, children, OID_AUTO, "txpkts_pkts", CTLFLAG_RD,
+ &txq->txpkts_pkts, "# of frames tx'd using txpkts work requests");
+
+ SYSCTL_ADD_UINT(&pi->ctx, children, OID_AUTO, "no_dmamap", CTLFLAG_RD,
+ &txq->no_dmamap, 0, "# of times txq ran out of DMA maps");
+ SYSCTL_ADD_UINT(&pi->ctx, children, OID_AUTO, "no_desc", CTLFLAG_RD,
+ &txq->no_desc, 0, "# of times txq ran out of hardware descriptors");
+ SYSCTL_ADD_UINT(&pi->ctx, children, OID_AUTO, "egr_update", CTLFLAG_RD,
+ &txq->egr_update, 0, "egress update notifications from the SGE");
+
+ return (rc);
+}
+
+static int
+free_txq(struct port_info *pi, struct sge_txq *txq)
+{
+ int rc;
+ struct adapter *sc = pi->adapter;
+ struct sge_eq *eq = &txq->eq;
+
+ if (eq->flags & (EQ_ALLOCATED | EQ_STARTED)) {
+ rc = -t4_eth_eq_free(sc, sc->mbox, sc->pf, 0, eq->cntxt_id);
+ if (rc != 0) {
+ device_printf(pi->dev,
+ "failed to free egress queue %p: %d\n", eq, rc);
+ return (rc);
+ }
+ eq->flags &= ~(EQ_ALLOCATED | EQ_STARTED);
+ }
+
+ free_ring(sc, eq->desc_tag, eq->desc_map, eq->ba, eq->desc);
+
+ free(eq->sdesc, M_CXGBE);
+
+ if (eq->maps)
+ free_eq_maps(eq);
+
+ buf_ring_free(eq->br, M_CXGBE);
+
+ if (eq->tx_tag)
+ bus_dma_tag_destroy(eq->tx_tag);
+
+ if (mtx_initialized(&eq->eq_lock))
+ mtx_destroy(&eq->eq_lock);
+
+ bzero(txq, sizeof(*txq));
+ return (0);
+}
+
+static void
+oneseg_dma_callback(void *arg, bus_dma_segment_t *segs, int nseg, int error)
+{
+ bus_addr_t *ba = arg;
+
+ KASSERT(nseg == 1,
+ ("%s meant for single segment mappings only.", __func__));
+
+ *ba = error ? 0 : segs->ds_addr;
+}
+
+static inline bool
+is_new_response(const struct sge_iq *iq, struct rsp_ctrl **ctrl)
+{
+ *ctrl = (void *)((uintptr_t)iq->cdesc +
+ (iq->esize - sizeof(struct rsp_ctrl)));
+
+ return (((*ctrl)->u.type_gen >> S_RSPD_GEN) == iq->gen);
+}
+
+static inline void
+iq_next(struct sge_iq *iq)
+{
+ iq->cdesc = (void *) ((uintptr_t)iq->cdesc + iq->esize);
+ if (__predict_false(++iq->cidx == iq->qsize - 1)) {
+ iq->cidx = 0;
+ iq->gen ^= 1;
+ iq->cdesc = iq->desc;
+ }
+}
+
+static inline void
+ring_fl_db(struct adapter *sc, struct sge_fl *fl)
+{
+ int ndesc = fl->pending / 8;
+
+ /* Caller responsible for ensuring there's something useful to do */
+ KASSERT(ndesc > 0, ("%s called with no useful work to do.", __func__));
+
+ wmb();
+
+ t4_write_reg(sc, MYPF_REG(A_SGE_PF_KDOORBELL), F_DBPRIO |
+ V_QID(fl->cntxt_id) | V_PIDX(ndesc));
+
+ fl->pending &= 7;
+}
+
+static void
+refill_fl(struct sge_fl *fl, int nbufs)
+{
+ __be64 *d = &fl->desc[fl->pidx];
+ struct fl_sdesc *sd = &fl->sdesc[fl->pidx];
+ bus_dma_tag_t tag;
+ bus_addr_t pa;
+ caddr_t cl;
+ int rc;
+
+ FL_LOCK_ASSERT_OWNED(fl);
+
+ if (nbufs < 0 || nbufs > fl->needed)
+ nbufs = fl->needed;
+
+ while (nbufs--) {
+
+ if (sd->cl != NULL) {
+
+ /*
+ * This happens when a frame small enough to fit
+ * entirely in an mbuf was received in cl last time.
+ * We'd held on to cl and can reuse it now. Note that
+ * we reuse a cluster of the old size if fl->tag_idx is
+ * no longer the same as sd->tag_idx.
+ */
+
+ KASSERT(*d == sd->ba_tag,
+ ("%s: recyling problem at pidx %d",
+ __func__, fl->pidx));
+
+ d++;
+ goto recycled;
+ }
+
+
+ if (fl->tag_idx != sd->tag_idx) {
+ bus_dmamap_t map;
+ bus_dma_tag_t newtag = fl->tag[fl->tag_idx];
+ bus_dma_tag_t oldtag = fl->tag[sd->tag_idx];
+
+ /*
+ * An MTU change can get us here. Discard the old map
+ * which was created with the old tag, but only if
+ * we're able to get a new one.
+ */
+ rc = bus_dmamap_create(newtag, 0, &map);
+ if (rc == 0) {
+ bus_dmamap_destroy(oldtag, sd->map);
+ sd->map = map;
+ sd->tag_idx = fl->tag_idx;
+ }
+ }
+
+ tag = fl->tag[sd->tag_idx];
+
+ cl = m_cljget(NULL, M_NOWAIT, FL_BUF_SIZE(sd->tag_idx));
+ if (cl == NULL)
+ break;
+
+ rc = bus_dmamap_load(tag, sd->map, cl,
+ FL_BUF_SIZE(sd->tag_idx), oneseg_dma_callback,
+ &pa, 0);
+ if (rc != 0 || pa == 0) {
+ fl->dmamap_failed++;
+ uma_zfree(FL_BUF_ZONE(sd->tag_idx), cl);
+ break;
+ }
+
+ sd->cl = cl;
+ *d++ = htobe64(pa | sd->tag_idx);
+
+#ifdef INVARIANTS
+ sd->ba_tag = htobe64(pa | sd->tag_idx);
+#endif
+
+recycled: fl->pending++;
+ fl->needed--;
+ sd++;
+ if (++fl->pidx == fl->cap) {
+ fl->pidx = 0;
+ sd = fl->sdesc;
+ d = fl->desc;
+ }
+
+ /* No harm if gethdr fails, we'll retry after rx */
+ if (sd->m == NULL)
+ sd->m = m_gethdr(M_NOWAIT, MT_NOINIT);
+ }
+}
+
+static int
+alloc_fl_sdesc(struct sge_fl *fl)
+{
+ struct fl_sdesc *sd;
+ bus_dma_tag_t tag;
+ int i, rc;
+
+ FL_LOCK_ASSERT_OWNED(fl);
+
+ fl->sdesc = malloc(fl->cap * sizeof(struct fl_sdesc), M_CXGBE,
+ M_ZERO | M_WAITOK);
+
+ tag = fl->tag[fl->tag_idx];
+ sd = fl->sdesc;
+ for (i = 0; i < fl->cap; i++, sd++) {
+
+ sd->tag_idx = fl->tag_idx;
+ rc = bus_dmamap_create(tag, 0, &sd->map);
+ if (rc != 0)
+ goto failed;
+
+ /* Doesn't matter if this succeeds or not */
+ sd->m = m_gethdr(M_NOWAIT, MT_NOINIT);
+ }
+
+ return (0);
+failed:
+ while (--i >= 0) {
+ sd--;
+ bus_dmamap_destroy(tag, sd->map);
+ if (sd->m) {
+ m_init(sd->m, zone_mbuf, MLEN, M_NOWAIT, MT_DATA, 0);
+ m_free(sd->m);
+ sd->m = NULL;
+ }
+ }
+ KASSERT(sd == fl->sdesc, ("%s: EDOOFUS", __func__));
+
+ free(fl->sdesc, M_CXGBE);
+ fl->sdesc = NULL;
+
+ return (rc);
+}
+
+static void
+free_fl_sdesc(struct sge_fl *fl)
+{
+ struct fl_sdesc *sd;
+ int i;
+
+ FL_LOCK_ASSERT_OWNED(fl);
+
+ sd = fl->sdesc;
+ for (i = 0; i < fl->cap; i++, sd++) {
+
+ if (sd->m) {
+ m_init(sd->m, zone_mbuf, MLEN, M_NOWAIT, MT_DATA, 0);
+ m_free(sd->m);
+ sd->m = NULL;
+ }
+
+ if (sd->cl) {
+ bus_dmamap_unload(fl->tag[sd->tag_idx], sd->map);
+ uma_zfree(FL_BUF_ZONE(sd->tag_idx), sd->cl);
+ sd->cl = NULL;
+ }
+
+ bus_dmamap_destroy(fl->tag[sd->tag_idx], sd->map);
+ }
+
+ free(fl->sdesc, M_CXGBE);
+ fl->sdesc = NULL;
+}
+
+static int
+alloc_eq_maps(struct sge_eq *eq)
+{
+ struct tx_map *txm;
+ int i, rc, count;
+
+ /*
+ * We can stuff ~10 frames in an 8-descriptor txpkts WR (8 is the SGE
+ * limit for any WR). txq->no_dmamap events shouldn't occur if maps is
+ * sized for the worst case.
+ */
+ count = eq->qsize * 10 / 8;
+ eq->map_total = eq->map_avail = count;
+ eq->map_cidx = eq->map_pidx = 0;
+
+ eq->maps = malloc(count * sizeof(struct tx_map), M_CXGBE,
+ M_ZERO | M_WAITOK);
+
+ txm = eq->maps;
+ for (i = 0; i < count; i++, txm++) {
+ rc = bus_dmamap_create(eq->tx_tag, 0, &txm->map);
+ if (rc != 0)
+ goto failed;
+ }
+
+ return (0);
+failed:
+ while (--i >= 0) {
+ txm--;
+ bus_dmamap_destroy(eq->tx_tag, txm->map);
+ }
+ KASSERT(txm == eq->maps, ("%s: EDOOFUS", __func__));
+
+ free(eq->maps, M_CXGBE);
+ eq->maps = NULL;
+
+ return (rc);
+}
+
+static void
+free_eq_maps(struct sge_eq *eq)
+{
+ struct tx_map *txm;
+ int i;
+
+ txm = eq->maps;
+ for (i = 0; i < eq->map_total; i++, txm++) {
+
+ if (txm->m) {
+ bus_dmamap_unload(eq->tx_tag, txm->map);
+ m_freem(txm->m);
+ txm->m = NULL;
+ }
+
+ bus_dmamap_destroy(eq->tx_tag, txm->map);
+ }
+
+ free(eq->maps, M_CXGBE);
+ eq->maps = NULL;
+}
+
+/*
+ * We'll do immediate data tx for non-TSO, but only when not coalescing. We're
+ * willing to use upto 2 hardware descriptors which means a maximum of 96 bytes
+ * of immediate data.
+ */
+#define IMM_LEN ( \
+ 2 * TX_EQ_ESIZE \
+ - sizeof(struct fw_eth_tx_pkt_wr) \
+ - sizeof(struct cpl_tx_pkt_core))
+
+/*
+ * Returns non-zero on failure, no need to cleanup anything in that case.
+ *
+ * Note 1: We always try to defrag the mbuf if required and return EFBIG only
+ * if the resulting chain still won't fit in a tx descriptor.
+ *
+ * Note 2: We'll pullup the mbuf chain if TSO is requested and the first mbuf
+ * does not have the TCP header in it.
+ */
+static int
+get_pkt_sgl(struct sge_txq *txq, struct mbuf **fp, struct sgl *sgl,
+ int sgl_only)
+{
+ struct mbuf *m = *fp;
+ struct sge_eq *eq = &txq->eq;
+ struct tx_map *txm;
+ int rc, defragged = 0, n;
+
+ TXQ_LOCK_ASSERT_OWNED(txq);
+
+ if (m->m_pkthdr.tso_segsz)
+ sgl_only = 1; /* Do not allow immediate data with LSO */
+
+start: sgl->nsegs = 0;
+
+ if (m->m_pkthdr.len <= IMM_LEN && !sgl_only)
+ return (0); /* nsegs = 0 tells caller to use imm. tx */
+
+ if (eq->map_avail == 0) {
+ txq->no_dmamap++;
+ return (ENOMEM);
+ }
+ txm = &eq->maps[eq->map_pidx];
+
+ if (m->m_pkthdr.tso_segsz && m->m_len < 50) {
+ *fp = m_pullup(m, 50);
+ m = *fp;
+ if (m == NULL)
+ return (ENOBUFS);
+ }
+
+ rc = bus_dmamap_load_mbuf_sg(eq->tx_tag, txm->map, m, sgl->seg,
+ &sgl->nsegs, BUS_DMA_NOWAIT);
+ if (rc == EFBIG && defragged == 0) {
+ m = m_defrag(m, M_DONTWAIT);
+ if (m == NULL)
+ return (EFBIG);
+
+ defragged = 1;
+ *fp = m;
+ goto start;
+ }
+ if (rc != 0)
+ return (rc);
+
+ txm->m = m;
+ eq->map_avail--;
+ if (++eq->map_pidx == eq->map_total)
+ eq->map_pidx = 0;
+
+ KASSERT(sgl->nsegs > 0 && sgl->nsegs <= TX_SGL_SEGS,
+ ("%s: bad DMA mapping (%d segments)", __func__, sgl->nsegs));
+
+ /*
+ * Store the # of flits required to hold this frame's SGL in nflits. An
+ * SGL has a (ULPTX header + len0, addr0) tuple optionally followed by
+ * multiple (len0 + len1, addr0, addr1) tuples. If addr1 is not used
+ * then len1 must be set to 0.
+ */
+ n = sgl->nsegs - 1;
+ sgl->nflits = (3 * n) / 2 + (n & 1) + 2;
+
+ return (0);
+}
+
+
+/*
+ * Releases all the txq resources used up in the specified sgl.
+ */
+static int
+free_pkt_sgl(struct sge_txq *txq, struct sgl *sgl)
+{
+ struct sge_eq *eq = &txq->eq;
+ struct tx_map *txm;
+
+ TXQ_LOCK_ASSERT_OWNED(txq);
+
+ if (sgl->nsegs == 0)
+ return (0); /* didn't use any map */
+
+ /* 1 pkt uses exactly 1 map, back it out */
+
+ eq->map_avail++;
+ if (eq->map_pidx > 0)
+ eq->map_pidx--;
+ else
+ eq->map_pidx = eq->map_total - 1;
+
+ txm = &eq->maps[eq->map_pidx];
+ bus_dmamap_unload(eq->tx_tag, txm->map);
+ txm->m = NULL;
+
+ return (0);
+}
+
+static int
+write_txpkt_wr(struct port_info *pi, struct sge_txq *txq, struct mbuf *m,
+ struct sgl *sgl)
+{
+ struct sge_eq *eq = &txq->eq;
+ struct fw_eth_tx_pkt_wr *wr;
+ struct cpl_tx_pkt_core *cpl;
+ uint32_t ctrl; /* used in many unrelated places */
+ uint64_t ctrl1;
+ int nflits, ndesc;
+ struct tx_sdesc *txsd;
+ caddr_t dst;
+
+ TXQ_LOCK_ASSERT_OWNED(txq);
+
+ /*
+ * Do we have enough flits to send this frame out?
+ */
+ ctrl = sizeof(struct cpl_tx_pkt_core);
+ if (m->m_pkthdr.tso_segsz) {
+ nflits = TXPKT_LSO_WR_HDR;
+ ctrl += sizeof(struct cpl_tx_pkt_lso);
+ } else
+ nflits = TXPKT_WR_HDR;
+ if (sgl->nsegs > 0)
+ nflits += sgl->nflits;
+ else {
+ nflits += howmany(m->m_pkthdr.len, 8);
+ ctrl += m->m_pkthdr.len;
+ }
+ ndesc = howmany(nflits, 8);
+ if (ndesc > eq->avail)
+ return (ENOMEM);
+
+ /* Firmware work request header */
+ wr = (void *)&eq->desc[eq->pidx];
+ wr->op_immdlen = htobe32(V_FW_WR_OP(FW_ETH_TX_PKT_WR) |
+ V_FW_WR_IMMDLEN(ctrl));
+ ctrl = V_FW_WR_LEN16(howmany(nflits, 2));
+ if (eq->avail == ndesc)
+ ctrl |= F_FW_WR_EQUEQ | F_FW_WR_EQUIQ;
+ wr->equiq_to_len16 = htobe32(ctrl);
+ wr->r3 = 0;
+
+ if (m->m_pkthdr.tso_segsz) {
+ struct cpl_tx_pkt_lso *lso = (void *)(wr + 1);
+ struct ether_header *eh;
+ struct ip *ip;
+ struct tcphdr *tcp;
+
+ ctrl = V_LSO_OPCODE(CPL_TX_PKT_LSO) | F_LSO_FIRST_SLICE |
+ F_LSO_LAST_SLICE;
+
+ eh = mtod(m, struct ether_header *);
+ if (eh->ether_type == htons(ETHERTYPE_VLAN)) {
+ ctrl |= V_LSO_ETHHDR_LEN(1);
+ ip = (void *)((struct ether_vlan_header *)eh + 1);
+ } else
+ ip = (void *)(eh + 1);
+
+ tcp = (void *)((uintptr_t)ip + ip->ip_hl * 4);
+ ctrl |= V_LSO_IPHDR_LEN(ip->ip_hl) |
+ V_LSO_TCPHDR_LEN(tcp->th_off);
+
+ lso->lso_ctrl = htobe32(ctrl);
+ lso->ipid_ofst = htobe16(0);
+ lso->mss = htobe16(m->m_pkthdr.tso_segsz);
+ lso->seqno_offset = htobe32(0);
+ lso->len = htobe32(m->m_pkthdr.len);
+
+ cpl = (void *)(lso + 1);
+
+ txq->tso_wrs++;
+ } else
+ cpl = (void *)(wr + 1);
+
+ /* Checksum offload */
+ ctrl1 = 0;
+ if (!(m->m_pkthdr.csum_flags & CSUM_IP))
+ ctrl1 |= F_TXPKT_IPCSUM_DIS;
+ if (!(m->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP)))
+ ctrl1 |= F_TXPKT_L4CSUM_DIS;
+ if (m->m_pkthdr.csum_flags & (CSUM_IP | CSUM_TCP | CSUM_UDP))
+ txq->txcsum++; /* some hardware assistance provided */
+
+ /* VLAN tag insertion */
+ if (m->m_flags & M_VLANTAG) {
+ ctrl1 |= F_TXPKT_VLAN_VLD | V_TXPKT_VLAN(m->m_pkthdr.ether_vtag);
+ txq->vlan_insertion++;
+ }
+
+ /* CPL header */
+ cpl->ctrl0 = htobe32(V_TXPKT_OPCODE(CPL_TX_PKT) |
+ V_TXPKT_INTF(pi->tx_chan) | V_TXPKT_PF(pi->adapter->pf));
+ cpl->pack = 0;
+ cpl->len = htobe16(m->m_pkthdr.len);
+ cpl->ctrl1 = htobe64(ctrl1);
+
+ /* Software descriptor */
+ txsd = &eq->sdesc[eq->pidx];
+ txsd->desc_used = ndesc;
+
+ eq->pending += ndesc;
+ eq->avail -= ndesc;
+ eq->pidx += ndesc;
+ if (eq->pidx >= eq->cap)
+ eq->pidx -= eq->cap;
+
+ /* SGL */
+ dst = (void *)(cpl + 1);
+ if (sgl->nsegs > 0) {
+ txsd->map_used = 1;
+ txq->sgl_wrs++;
+ write_sgl_to_txd(eq, sgl, &dst);
+ } else {
+ txsd->map_used = 0;
+ txq->imm_wrs++;
+ for (; m; m = m->m_next) {
+ copy_to_txd(eq, mtod(m, caddr_t), &dst, m->m_len);
+ }
+ }
+
+ txq->txpkt_wrs++;
+ return (0);
+}
+
+/*
+ * Returns 0 to indicate that m has been accepted into a coalesced tx work
+ * request. It has either been folded into txpkts or txpkts was flushed and m
+ * has started a new coalesced work request (as the first frame in a fresh
+ * txpkts).
+ *
+ * Returns non-zero to indicate a failure - caller is responsible for
+ * transmitting m, if there was anything in txpkts it has been flushed.
+ */
+static int
+add_to_txpkts(struct port_info *pi, struct sge_txq *txq, struct txpkts *txpkts,
+ struct mbuf *m, struct sgl *sgl)
+{
+ struct sge_eq *eq = &txq->eq;
+ int can_coalesce;
+ struct tx_sdesc *txsd;
+ int flits;
+
+ TXQ_LOCK_ASSERT_OWNED(txq);
+
+ if (txpkts->npkt > 0) {
+ flits = TXPKTS_PKT_HDR + sgl->nflits;
+ can_coalesce = m->m_pkthdr.tso_segsz == 0 &&
+ txpkts->nflits + flits <= TX_WR_FLITS &&
+ txpkts->nflits + flits <= eq->avail * 8 &&
+ txpkts->plen + m->m_pkthdr.len < 65536;
+
+ if (can_coalesce) {
+ txpkts->npkt++;
+ txpkts->nflits += flits;
+ txpkts->plen += m->m_pkthdr.len;
+
+ txsd = &eq->sdesc[eq->pidx];
+ txsd->map_used++;
+
+ return (0);
+ }
+
+ /*
+ * Couldn't coalesce m into txpkts. The first order of business
+ * is to send txpkts on its way. Then we'll revisit m.
+ */
+ write_txpkts_wr(txq, txpkts);
+ }
+
+ /*
+ * Check if we can start a new coalesced tx work request with m as
+ * the first packet in it.
+ */
+
+ KASSERT(txpkts->npkt == 0, ("%s: txpkts not empty", __func__));
+
+ flits = TXPKTS_WR_HDR + sgl->nflits;
+ can_coalesce = m->m_pkthdr.tso_segsz == 0 &&
+ flits <= eq->avail * 8 && flits <= TX_WR_FLITS;
+
+ if (can_coalesce == 0)
+ return (EINVAL);
+
+ /*
+ * Start a fresh coalesced tx WR with m as the first frame in it.
+ */
+ txpkts->npkt = 1;
+ txpkts->nflits = flits;
+ txpkts->flitp = &eq->desc[eq->pidx].flit[2];
+ txpkts->plen = m->m_pkthdr.len;
+
+ txsd = &eq->sdesc[eq->pidx];
+ txsd->map_used = 1;
+
+ return (0);
+}
+
+/*
+ * Note that write_txpkts_wr can never run out of hardware descriptors (but
+ * write_txpkt_wr can). add_to_txpkts ensures that a frame is accepted for
+ * coalescing only if sufficient hardware descriptors are available.
+ */
+static void
+write_txpkts_wr(struct sge_txq *txq, struct txpkts *txpkts)
+{
+ struct sge_eq *eq = &txq->eq;
+ struct fw_eth_tx_pkts_wr *wr;
+ struct tx_sdesc *txsd;
+ uint32_t ctrl;
+ int ndesc;
+
+ TXQ_LOCK_ASSERT_OWNED(txq);
+
+ ndesc = howmany(txpkts->nflits, 8);
+
+ wr = (void *)&eq->desc[eq->pidx];
+ wr->op_immdlen = htobe32(V_FW_WR_OP(FW_ETH_TX_PKTS_WR) |
+ V_FW_WR_IMMDLEN(0)); /* immdlen does not matter in this WR */
+ ctrl = V_FW_WR_LEN16(howmany(txpkts->nflits, 2));
+ if (eq->avail == ndesc)
+ ctrl |= F_FW_WR_EQUEQ | F_FW_WR_EQUIQ;
+ wr->equiq_to_len16 = htobe32(ctrl);
+ wr->plen = htobe16(txpkts->plen);
+ wr->npkt = txpkts->npkt;
+ wr->r3 = wr->r4 = 0;
+
+ /* Everything else already written */
+
+ txsd = &eq->sdesc[eq->pidx];
+ txsd->desc_used = ndesc;
+
+ KASSERT(eq->avail >= ndesc, ("%s: out ouf descriptors", __func__));
+
+ eq->pending += ndesc;
+ eq->avail -= ndesc;
+ eq->pidx += ndesc;
+ if (eq->pidx >= eq->cap)
+ eq->pidx -= eq->cap;
+
+ txq->txpkts_pkts += txpkts->npkt;
+ txq->txpkts_wrs++;
+ txpkts->npkt = 0; /* emptied */
+}
+
+static inline void
+write_ulp_cpl_sgl(struct port_info *pi, struct sge_txq *txq,
+ struct txpkts *txpkts, struct mbuf *m, struct sgl *sgl)
+{
+ struct ulp_txpkt *ulpmc;
+ struct ulptx_idata *ulpsc;
+ struct cpl_tx_pkt_core *cpl;
+ struct sge_eq *eq = &txq->eq;
+ uintptr_t flitp, start, end;
+ uint64_t ctrl;
+ caddr_t dst;
+
+ KASSERT(txpkts->npkt > 0, ("%s: txpkts is empty", __func__));
+
+ start = (uintptr_t)eq->desc;
+ end = (uintptr_t)eq->spg;
+
+ /* Checksum offload */
+ ctrl = 0;
+ if (!(m->m_pkthdr.csum_flags & CSUM_IP))
+ ctrl |= F_TXPKT_IPCSUM_DIS;
+ if (!(m->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP)))
+ ctrl |= F_TXPKT_L4CSUM_DIS;
+ if (m->m_pkthdr.csum_flags & (CSUM_IP | CSUM_TCP | CSUM_UDP))
+ txq->txcsum++; /* some hardware assistance provided */
+
+ /* VLAN tag insertion */
+ if (m->m_flags & M_VLANTAG) {
+ ctrl |= F_TXPKT_VLAN_VLD | V_TXPKT_VLAN(m->m_pkthdr.ether_vtag);
+ txq->vlan_insertion++;
+ }
+
+ /*
+ * The previous packet's SGL must have ended at a 16 byte boundary (this
+ * is required by the firmware/hardware). It follows that flitp cannot
+ * wrap around between the ULPTX master command and ULPTX subcommand (8
+ * bytes each), and that it can not wrap around in the middle of the
+ * cpl_tx_pkt_core either.
+ */
+ flitp = (uintptr_t)txpkts->flitp;
+ KASSERT((flitp & 0xf) == 0,
+ ("%s: last SGL did not end at 16 byte boundary: %p",
+ __func__, txpkts->flitp));
+
+ /* ULP master command */
+ ulpmc = (void *)flitp;
+ ulpmc->cmd_dest = htonl(V_ULPTX_CMD(ULP_TX_PKT) | V_ULP_TXPKT_DEST(0));
+ ulpmc->len = htonl(howmany(sizeof(*ulpmc) + sizeof(*ulpsc) +
+ sizeof(*cpl) + 8 * sgl->nflits, 16));
+
+ /* ULP subcommand */
+ ulpsc = (void *)(ulpmc + 1);
+ ulpsc->cmd_more = htobe32(V_ULPTX_CMD((u32)ULP_TX_SC_IMM) |
+ F_ULP_TX_SC_MORE);
+ ulpsc->len = htobe32(sizeof(struct cpl_tx_pkt_core));
+
+ flitp += sizeof(*ulpmc) + sizeof(*ulpsc);
+ if (flitp == end)
+ flitp = start;
+
+ /* CPL_TX_PKT */
+ cpl = (void *)flitp;
+ cpl->ctrl0 = htobe32(V_TXPKT_OPCODE(CPL_TX_PKT) |
+ V_TXPKT_INTF(pi->tx_chan) | V_TXPKT_PF(pi->adapter->pf));
+ cpl->pack = 0;
+ cpl->len = htobe16(m->m_pkthdr.len);
+ cpl->ctrl1 = htobe64(ctrl);
+
+ flitp += sizeof(*cpl);
+ if (flitp == end)
+ flitp = start;
+
+ /* SGL for this frame */
+ dst = (caddr_t)flitp;
+ txpkts->nflits += write_sgl_to_txd(eq, sgl, &dst);
+ txpkts->flitp = (void *)dst;
+
+ KASSERT(((uintptr_t)dst & 0xf) == 0,
+ ("%s: SGL ends at %p (not a 16 byte boundary)", __func__, dst));
+}
+
+/*
+ * If the SGL ends on an address that is not 16 byte aligned, this function will
+ * add a 0 filled flit at the end. It returns 1 in that case.
+ */
+static int
+write_sgl_to_txd(struct sge_eq *eq, struct sgl *sgl, caddr_t *to)
+{
+ __be64 *flitp, *end;
+ struct ulptx_sgl *usgl;
+ bus_dma_segment_t *seg;
+ int i, padded;
+
+ KASSERT(sgl->nsegs > 0 && sgl->nflits > 0,
+ ("%s: bad SGL - nsegs=%d, nflits=%d",
+ __func__, sgl->nsegs, sgl->nflits));
+
+ KASSERT(((uintptr_t)(*to) & 0xf) == 0,
+ ("%s: SGL must start at a 16 byte boundary: %p", __func__, *to));
+
+ flitp = (__be64 *)(*to);
+ end = flitp + sgl->nflits;
+ seg = &sgl->seg[0];
+ usgl = (void *)flitp;
+
+ /*
+ * We start at a 16 byte boundary somewhere inside the tx descriptor
+ * ring, so we're at least 16 bytes away from the status page. There is
+ * no chance of a wrap around in the middle of usgl (which is 16 bytes).
+ */
+
+ usgl->cmd_nsge = htobe32(V_ULPTX_CMD(ULP_TX_SC_DSGL) |
+ V_ULPTX_NSGE(sgl->nsegs));
+ usgl->len0 = htobe32(seg->ds_len);
+ usgl->addr0 = htobe64(seg->ds_addr);
+ seg++;
+
+ if ((uintptr_t)end <= (uintptr_t)eq->spg) {
+
+ /* Won't wrap around at all */
+
+ for (i = 0; i < sgl->nsegs - 1; i++, seg++) {
+ usgl->sge[i / 2].len[i & 1] = htobe32(seg->ds_len);
+ usgl->sge[i / 2].addr[i & 1] = htobe64(seg->ds_addr);
+ }
+ if (i & 1)
+ usgl->sge[i / 2].len[1] = htobe32(0);
+ } else {
+
+ /* Will wrap somewhere in the rest of the SGL */
+
+ /* 2 flits already written, write the rest flit by flit */
+ flitp = (void *)(usgl + 1);
+ for (i = 0; i < sgl->nflits - 2; i++) {
+ if ((uintptr_t)flitp == (uintptr_t)eq->spg)
+ flitp = (void *)eq->desc;
+ *flitp++ = get_flit(seg, sgl->nsegs - 1, i);
+ }
+ end = flitp;
+ }
+
+ if ((uintptr_t)end & 0xf) {
+ *(uint64_t *)end = 0;
+ end++;
+ padded = 1;
+ } else
+ padded = 0;
+
+ if ((uintptr_t)end == (uintptr_t)eq->spg)
+ *to = (void *)eq->desc;
+ else
+ *to = (void *)end;
+
+ return (padded);
+}
+
+static inline void
+copy_to_txd(struct sge_eq *eq, caddr_t from, caddr_t *to, int len)
+{
+ if ((uintptr_t)(*to) + len <= (uintptr_t)eq->spg) {
+ bcopy(from, *to, len);
+ (*to) += len;
+ } else {
+ int portion = (uintptr_t)eq->spg - (uintptr_t)(*to);
+
+ bcopy(from, *to, portion);
+ from += portion;
+ portion = len - portion; /* remaining */
+ bcopy(from, (void *)eq->desc, portion);
+ (*to) = (caddr_t)eq->desc + portion;
+ }
+}
+
+static inline void
+ring_tx_db(struct adapter *sc, struct sge_eq *eq)
+{
+ wmb();
+ t4_write_reg(sc, MYPF_REG(A_SGE_PF_KDOORBELL),
+ V_QID(eq->cntxt_id) | V_PIDX(eq->pending));
+ eq->pending = 0;
+}
+
+static int
+reclaim_tx_descs(struct sge_eq *eq, int atleast, int howmany)
+{
+ struct tx_sdesc *txsd;
+ struct tx_map *txm, *next_txm;
+ unsigned int cidx, can_reclaim, reclaimed, maps, next_map_cidx;
+
+ EQ_LOCK_ASSERT_OWNED(eq);
+
+ cidx = eq->spg->cidx; /* stable snapshot */
+ cidx = be16_to_cpu(cidx);
+
+ if (cidx >= eq->cidx)
+ can_reclaim = cidx - eq->cidx;
+ else
+ can_reclaim = cidx + eq->cap - eq->cidx;
+
+ if (can_reclaim < atleast)
+ return (0);
+
+ next_map_cidx = eq->map_cidx;
+ next_txm = txm = &eq->maps[next_map_cidx];
+ prefetch(txm);
+
+ maps = reclaimed = 0;
+ do {
+ int ndesc;
+
+ txsd = &eq->sdesc[eq->cidx];
+ ndesc = txsd->desc_used;
+
+ /* Firmware doesn't return "partial" credits. */
+ KASSERT(can_reclaim >= ndesc,
+ ("%s: unexpected number of credits: %d, %d",
+ __func__, can_reclaim, ndesc));
+
+ maps += txsd->map_used;
+ reclaimed += ndesc;
+
+ eq->cidx += ndesc;
+ if (eq->cidx >= eq->cap)
+ eq->cidx -= eq->cap;
+
+ can_reclaim -= ndesc;
+
+ } while (can_reclaim && reclaimed < howmany);
+
+ eq->avail += reclaimed;
+ KASSERT(eq->avail < eq->cap, /* avail tops out at (cap - 1) */
+ ("%s: too many descriptors available", __func__));
+
+ eq->map_avail += maps;
+ KASSERT(eq->map_avail <= eq->map_total,
+ ("%s: too many maps available", __func__));
+
+ prefetch(txm->m);
+ while (maps--) {
+ next_txm++;
+ if (++next_map_cidx == eq->map_total) {
+ next_map_cidx = 0;
+ next_txm = eq->maps;
+ }
+ prefetch(next_txm->m);
+
+ bus_dmamap_unload(eq->tx_tag, txm->map);
+ m_freem(txm->m);
+ txm->m = NULL;
+
+ txm = next_txm;
+ }
+ eq->map_cidx = next_map_cidx;
+
+ return (reclaimed);
+}
+
+static void
+write_eqflush_wr(struct sge_eq *eq)
+{
+ struct fw_eq_flush_wr *wr;
+ struct tx_sdesc *txsd;
+
+ EQ_LOCK_ASSERT_OWNED(eq);
+ KASSERT(eq->avail > 0, ("%s: no descriptors left.", __func__));
+
+ wr = (void *)&eq->desc[eq->pidx];
+ bzero(wr, sizeof(*wr));
+ wr->opcode = FW_EQ_FLUSH_WR;
+ wr->equiq_to_len16 = htobe32(V_FW_WR_LEN16(sizeof(*wr) / 16) |
+ F_FW_WR_EQUEQ | F_FW_WR_EQUIQ);
+
+ txsd = &eq->sdesc[eq->pidx];
+ txsd->desc_used = 1;
+ txsd->map_used = 0;
+
+ eq->pending++;
+ eq->avail--;
+ if (++eq->pidx == eq->cap)
+ eq->pidx = 0;
+}
+
+static __be64
+get_flit(bus_dma_segment_t *sgl, int nsegs, int idx)
+{
+ int i = (idx / 3) * 2;
+
+ switch (idx % 3) {
+ case 0: {
+ __be64 rc;
+
+ rc = htobe32(sgl[i].ds_len);
+ if (i + 1 < nsegs)
+ rc |= (uint64_t)htobe32(sgl[i + 1].ds_len) << 32;
+
+ return (rc);
+ }
+ case 1:
+ return htobe64(sgl[i].ds_addr);
+ case 2:
+ return htobe64(sgl[i + 1].ds_addr);
+ }
+
+ return (0);
+}
+
+static struct mbuf *
+get_fl_sdesc_data(struct sge_fl *fl, int len, int flags)
+{
+ struct fl_sdesc *sd;
+ struct mbuf *m;
+
+ sd = &fl->sdesc[fl->cidx];
+ FL_LOCK(fl);
+ if (++fl->cidx == fl->cap)
+ fl->cidx = 0;
+ fl->needed++;
+ FL_UNLOCK(fl);
+
+ m = sd->m;
+ if (m == NULL) {
+ m = m_gethdr(M_NOWAIT, MT_NOINIT);
+ if (m == NULL)
+ return (NULL);
+ }
+ sd->m = NULL; /* consumed */
+
+ bus_dmamap_sync(fl->tag[sd->tag_idx], sd->map, BUS_DMASYNC_POSTREAD);
+ m_init(m, zone_mbuf, MLEN, M_NOWAIT, MT_DATA, flags);
+ if ((flags && len < MINCLSIZE) || (!flags && len <= MLEN))
+ bcopy(sd->cl, mtod(m, caddr_t), len);
+ else {
+ bus_dmamap_unload(fl->tag[sd->tag_idx], sd->map);
+ m_cljset(m, sd->cl, FL_BUF_TYPE(sd->tag_idx));
+ sd->cl = NULL; /* consumed */
+ }
+
+ m->m_len = min(len, FL_BUF_SIZE(sd->tag_idx));
+
+ return (m);
+}
+
+static void
+set_fl_tag_idx(struct sge_fl *fl, int mtu)
+{
+ int i;
+
+ FL_LOCK_ASSERT_OWNED(fl);
+
+ for (i = 0; i < FL_BUF_SIZES - 1; i++) {
+ if (FL_BUF_SIZE(i) >= (mtu + FL_PKTSHIFT))
+ break;
+ }
+
+ fl->tag_idx = i;
+}
diff --git a/sys/modules/Makefile b/sys/modules/Makefile
index 096e4d2..41ed4bf 100644
--- a/sys/modules/Makefile
+++ b/sys/modules/Makefile
@@ -70,6 +70,7 @@ SUBDIR= ${_3dfx} \
${_cs} \
${_ctau} \
${_cxgb} \
+ cxgbe \
${_cyclic} \
dc \
dcons \
diff --git a/sys/modules/cxgbe/Makefile b/sys/modules/cxgbe/Makefile
new file mode 100644
index 0000000..cf6f66b
--- /dev/null
+++ b/sys/modules/cxgbe/Makefile
@@ -0,0 +1,16 @@
+#
+# $FreeBSD$
+#
+
+CXGBE = ${.CURDIR}/../../dev/cxgbe
+.PATH: ${CXGBE} ${CXGBE}/common
+
+KMOD = if_cxgbe
+SRCS = t4_main.c t4_sge.c
+SRCS+= t4_hw.c
+SRCS+= device_if.h bus_if.h pci_if.h
+SRCS+= opt_inet.h
+
+CFLAGS+= -g -I${CXGBE}
+
+.include <bsd.kmod.mk>
diff --git a/usr.sbin/sysinstall/devices.c b/usr.sbin/sysinstall/devices.c
index 91216ff..53ebb89 100644
--- a/usr.sbin/sysinstall/devices.c
+++ b/usr.sbin/sysinstall/devices.c
@@ -109,6 +109,7 @@ static struct _devname {
NETWORK("cas", "Sun Cassini/Cassini+ or NS DP83065 Saturn Ethernet"),
NETWORK("cue", "CATC USB Ethernet adapter"),
NETWORK("cxgb", "Chelsio T3 10Gb Ethernet card"),
+ NETWORK("cxgbe", "Chelsio T4 10Gb Ethernet card"),
NETWORK("fpa", "DEC DEFPA PCI FDDI card"),
NETWORK("sr", "SDL T1/E1 sync serial PCI card"),
NETWORK("cc3i", "SDL HSSI sync serial PCI card"),
OpenPOWER on IntegriCloud