summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorkmacy <kmacy@FreeBSD.org>2007-03-14 02:37:44 +0000
committerkmacy <kmacy@FreeBSD.org>2007-03-14 02:37:44 +0000
commit9e34f4996bf6727e29c495e677b9fb13266ef62d (patch)
treee5e0defa48f269482c2469e5ddccab6185036712
parent91100b16538751b4c905a045bd31ebb6d554b1ec (diff)
downloadFreeBSD-src-9e34f4996bf6727e29c495e677b9fb13266ef62d.zip
FreeBSD-src-9e34f4996bf6727e29c495e677b9fb13266ef62d.tar.gz
First of several commits for driver support for the Chelsio T3B 10 Gigabit
Ethernet adapter. Reviewed by: scottl, sam For those interested in the preliminary performance work see below. Plots of mxge vs. cxgb running netpipe: blocksize vs. bandwidth: http://www.fsmware.com/chelsio.random/bsvsbw.gif blocksize vs. RTT: First of several commits for driver support for the Chelsio T3B 10 Gigabit Ethernet adapter. Reviewed by: scottl, sam For those interested in the preliminary performance work see below. Plots of mxge vs. cxgb running netpipe: blocksize vs. bandwidth: http://www.fsmware.com/chelsio.random/bsvsbw.gif blocksize vs. RTT: http://www.fsmware.com/chelsio.random/bsvstime.gif blocksize vs. RTT for block sizes <= 10kb: http://www.fsmware.com/chelsio.random/bsvstime_10kb.gif http://www.fsmware.com/chelsio.random/bsvstime_10kb3.gif
-rw-r--r--sys/dev/cxgb/common/cxgb_ael1002.c328
-rw-r--r--sys/dev/cxgb/common/cxgb_common.h687
-rw-r--r--sys/dev/cxgb/common/cxgb_firmware_exports.h181
-rw-r--r--sys/dev/cxgb/common/cxgb_mc5.c474
-rw-r--r--sys/dev/cxgb/common/cxgb_mv88e1xxx.c302
-rw-r--r--sys/dev/cxgb/common/cxgb_regs.h7645
-rw-r--r--sys/dev/cxgb/common/cxgb_sge_defs.h289
-rw-r--r--sys/dev/cxgb/common/cxgb_t3_cpl.h1490
-rw-r--r--sys/dev/cxgb/common/cxgb_t3_hw.c3399
-rw-r--r--sys/dev/cxgb/common/cxgb_tcb.h678
-rw-r--r--sys/dev/cxgb/common/cxgb_version.h41
-rw-r--r--sys/dev/cxgb/common/cxgb_vsc8211.c251
-rw-r--r--sys/dev/cxgb/common/cxgb_xgmac.c415
-rw-r--r--sys/dev/cxgb/cxgb_adapter.h438
-rw-r--r--sys/dev/cxgb/cxgb_ioctl.h222
-rw-r--r--sys/dev/cxgb/cxgb_lro.c427
-rw-r--r--sys/dev/cxgb/cxgb_main.c1792
-rw-r--r--sys/dev/cxgb/cxgb_osdep.h246
-rw-r--r--sys/dev/cxgb/cxgb_sge.c2323
19 files changed, 21628 insertions, 0 deletions
diff --git a/sys/dev/cxgb/common/cxgb_ael1002.c b/sys/dev/cxgb/common/cxgb_ael1002.c
new file mode 100644
index 0000000..02c4c2f
--- /dev/null
+++ b/sys/dev/cxgb/common/cxgb_ael1002.c
@@ -0,0 +1,328 @@
+/**************************************************************************
+
+Copyright (c) 2007, Chelsio Inc.
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Chelsio Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <dev/cxgb/common/cxgb_common.h>
+#include <dev/cxgb/common/cxgb_regs.h>
+
+enum {
+ AEL100X_TX_DISABLE = 9,
+ AEL100X_TX_CONFIG1 = 0xc002,
+ AEL1002_PWR_DOWN_HI = 0xc011,
+ AEL1002_PWR_DOWN_LO = 0xc012,
+ AEL1002_XFI_EQL = 0xc015,
+ AEL1002_LB_EN = 0xc017,
+
+ LASI_CTRL = 0x9002,
+ LASI_STAT = 0x9005
+};
+
+static void ael100x_txon(struct cphy *phy)
+{
+ int tx_on_gpio = phy->addr == 0 ? F_GPIO7_OUT_VAL : F_GPIO2_OUT_VAL;
+
+ t3_os_sleep(100);
+ t3_set_reg_field(phy->adapter, A_T3DBG_GPIO_EN, 0, tx_on_gpio);
+ t3_os_sleep(30);
+}
+
+static int ael1002_power_down(struct cphy *phy, int enable)
+{
+ int err;
+
+ err = mdio_write(phy, MDIO_DEV_PMA_PMD, AEL100X_TX_DISABLE, !!enable);
+ if (!err)
+ err = t3_mdio_change_bits(phy, MDIO_DEV_PMA_PMD, MII_BMCR,
+ BMCR_PDOWN, enable ? BMCR_PDOWN : 0);
+ return err;
+}
+
+static int ael1002_reset(struct cphy *phy, int wait)
+{
+ int err;
+
+ if ((err = ael1002_power_down(phy, 0)) ||
+ (err = mdio_write(phy, MDIO_DEV_PMA_PMD, AEL100X_TX_CONFIG1, 1)) ||
+ (err = mdio_write(phy, MDIO_DEV_PMA_PMD, AEL1002_PWR_DOWN_HI, 0)) ||
+ (err = mdio_write(phy, MDIO_DEV_PMA_PMD, AEL1002_PWR_DOWN_LO, 0)) ||
+ (err = mdio_write(phy, MDIO_DEV_PMA_PMD, AEL1002_XFI_EQL, 0x18)) ||
+ (err = t3_mdio_change_bits(phy, MDIO_DEV_PMA_PMD, AEL1002_LB_EN,
+ 0, 1 << 5)))
+ return err;
+ return 0;
+}
+
+static int ael1002_intr_noop(struct cphy *phy)
+{
+ return 0;
+}
+
+static int ael100x_get_link_status(struct cphy *phy, int *link_ok,
+ int *speed, int *duplex, int *fc)
+{
+ if (link_ok) {
+ unsigned int status;
+ int err = mdio_read(phy, MDIO_DEV_PMA_PMD, MII_BMSR, &status);
+
+ /*
+ * BMSR_LSTATUS is latch-low, so if it is 0 we need to read it
+ * once more to get the current link state.
+ */
+ if (!err && !(status & BMSR_LSTATUS))
+ err = mdio_read(phy, MDIO_DEV_PMA_PMD, MII_BMSR,
+ &status);
+ if (err)
+ return err;
+ *link_ok = !!(status & BMSR_LSTATUS);
+ }
+ if (speed)
+ *speed = SPEED_10000;
+ if (duplex)
+ *duplex = DUPLEX_FULL;
+ return 0;
+}
+
+#ifdef C99_NOT_SUPPORTED
+static struct cphy_ops ael1002_ops = {
+ NULL,
+ ael1002_reset,
+ ael1002_intr_noop,
+ ael1002_intr_noop,
+ ael1002_intr_noop,
+ ael1002_intr_noop,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ ael100x_get_link_status,
+ ael1002_power_down,
+};
+#else
+static struct cphy_ops ael1002_ops = {
+ .reset = ael1002_reset,
+ .intr_enable = ael1002_intr_noop,
+ .intr_disable = ael1002_intr_noop,
+ .intr_clear = ael1002_intr_noop,
+ .intr_handler = ael1002_intr_noop,
+ .get_link_status = ael100x_get_link_status,
+ .power_down = ael1002_power_down,
+};
+#endif
+
+void t3_ael1002_phy_prep(struct cphy *phy, adapter_t *adapter, int phy_addr,
+ const struct mdio_ops *mdio_ops)
+{
+ cphy_init(phy, adapter, phy_addr, &ael1002_ops, mdio_ops);
+ ael100x_txon(phy);
+}
+
+static int ael1006_reset(struct cphy *phy, int wait)
+{
+ return t3_phy_reset(phy, MDIO_DEV_PMA_PMD, wait);
+}
+
+static int ael1006_intr_enable(struct cphy *phy)
+{
+ return mdio_write(phy, MDIO_DEV_PMA_PMD, LASI_CTRL, 1);
+}
+
+static int ael1006_intr_disable(struct cphy *phy)
+{
+ return mdio_write(phy, MDIO_DEV_PMA_PMD, LASI_CTRL, 0);
+}
+
+static int ael1006_intr_clear(struct cphy *phy)
+{
+ u32 val;
+
+ return mdio_read(phy, MDIO_DEV_PMA_PMD, LASI_STAT, &val);
+}
+
+static int ael1006_intr_handler(struct cphy *phy)
+{
+ unsigned int status;
+ int err = mdio_read(phy, MDIO_DEV_PMA_PMD, LASI_STAT, &status);
+
+ if (err)
+ return err;
+ return (status & 1) ? cphy_cause_link_change : 0;
+}
+
+static int ael1006_power_down(struct cphy *phy, int enable)
+{
+ return t3_mdio_change_bits(phy, MDIO_DEV_PMA_PMD, MII_BMCR,
+ BMCR_PDOWN, enable ? BMCR_PDOWN : 0);
+}
+
+#ifdef C99_NOT_SUPPORTED
+static struct cphy_ops ael1006_ops = {
+ NULL,
+ ael1006_reset,
+ ael1006_intr_enable,
+ ael1006_intr_disable,
+ ael1006_intr_clear,
+ ael1006_intr_handler,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ ael100x_get_link_status,
+ ael1006_power_down,
+};
+#else
+static struct cphy_ops ael1006_ops = {
+ .reset = ael1006_reset,
+ .intr_enable = ael1006_intr_enable,
+ .intr_disable = ael1006_intr_disable,
+ .intr_clear = ael1006_intr_clear,
+ .intr_handler = ael1006_intr_handler,
+ .get_link_status = ael100x_get_link_status,
+ .power_down = ael1006_power_down,
+};
+#endif
+
+void t3_ael1006_phy_prep(struct cphy *phy, adapter_t *adapter, int phy_addr,
+ const struct mdio_ops *mdio_ops)
+{
+ cphy_init(phy, adapter, phy_addr, &ael1006_ops, mdio_ops);
+ ael100x_txon(phy);
+}
+
+#ifdef C99_NOT_SUPPORTED
+static struct cphy_ops qt2045_ops = {
+ NULL,
+ ael1006_reset,
+ ael1006_intr_enable,
+ ael1006_intr_disable,
+ ael1006_intr_clear,
+ ael1006_intr_handler,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ ael100x_get_link_status,
+ ael1006_power_down,
+};
+#else
+static struct cphy_ops qt2045_ops = {
+ .reset = ael1006_reset,
+ .intr_enable = ael1006_intr_enable,
+ .intr_disable = ael1006_intr_disable,
+ .intr_clear = ael1006_intr_clear,
+ .intr_handler = ael1006_intr_handler,
+ .get_link_status = ael100x_get_link_status,
+ .power_down = ael1006_power_down,
+};
+#endif
+
+void t3_qt2045_phy_prep(struct cphy *phy, adapter_t *adapter, int phy_addr,
+ const struct mdio_ops *mdio_ops)
+{
+ unsigned int stat;
+
+ cphy_init(phy, adapter, phy_addr, &qt2045_ops, mdio_ops);
+
+ /*
+ * Some cards where the PHY is supposed to be at address 0 actually
+ * have it at 1.
+ */
+ if (!phy_addr && !mdio_read(phy, MDIO_DEV_PMA_PMD, MII_BMSR, &stat) &&
+ stat == 0xffff)
+ phy->addr = 1;
+}
+
+static int xaui_direct_reset(struct cphy *phy, int wait)
+{
+ return 0;
+}
+
+static int xaui_direct_get_link_status(struct cphy *phy, int *link_ok,
+ int *speed, int *duplex, int *fc)
+{
+ if (link_ok) {
+ unsigned int status;
+
+ status = t3_read_reg(phy->adapter,
+ XGM_REG(A_XGM_SERDES_STAT0, phy->addr));
+ *link_ok = !(status & F_LOWSIG0);
+ }
+ if (speed)
+ *speed = SPEED_10000;
+ if (duplex)
+ *duplex = DUPLEX_FULL;
+ return 0;
+}
+
+static int xaui_direct_power_down(struct cphy *phy, int enable)
+{
+ return 0;
+}
+
+#ifdef C99_NOT_SUPPORTED
+static struct cphy_ops xaui_direct_ops = {
+ NULL,
+ xaui_direct_reset,
+ ael1002_intr_noop,
+ ael1002_intr_noop,
+ ael1002_intr_noop,
+ ael1002_intr_noop,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ xaui_direct_get_link_status,
+ xaui_direct_power_down,
+};
+#else
+static struct cphy_ops xaui_direct_ops = {
+ .reset = xaui_direct_reset,
+ .intr_enable = ael1002_intr_noop,
+ .intr_disable = ael1002_intr_noop,
+ .intr_clear = ael1002_intr_noop,
+ .intr_handler = ael1002_intr_noop,
+ .get_link_status = xaui_direct_get_link_status,
+ .power_down = xaui_direct_power_down,
+};
+#endif
+
+void t3_xaui_direct_phy_prep(struct cphy *phy, adapter_t *adapter, int phy_addr,
+ const struct mdio_ops *mdio_ops)
+{
+ cphy_init(phy, adapter, 1, &xaui_direct_ops, mdio_ops);
+}
diff --git a/sys/dev/cxgb/common/cxgb_common.h b/sys/dev/cxgb/common/cxgb_common.h
new file mode 100644
index 0000000..4d7c5966
--- /dev/null
+++ b/sys/dev/cxgb/common/cxgb_common.h
@@ -0,0 +1,687 @@
+/**************************************************************************
+
+Copyright (c) 2007, Chelsio Inc.
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Chelsio Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+$FreeBSD$
+
+***************************************************************************/
+#ifndef __CHELSIO_COMMON_H
+#define __CHELSIO_COMMON_H
+
+#include <dev/cxgb/cxgb_osdep.h>
+
+enum {
+ MAX_NPORTS = 2, /* max # of ports */
+ MAX_FRAME_SIZE = 10240, /* max MAC frame size, including header + FCS */
+ EEPROMSIZE = 8192, /* Serial EEPROM size */
+ RSS_TABLE_SIZE = 64, /* size of RSS lookup and mapping tables */
+ TCB_SIZE = 128, /* TCB size */
+ NMTUS = 16, /* size of MTU table */
+ NCCTRL_WIN = 32, /* # of congestion control windows */
+};
+
+#define MAX_RX_COALESCING_LEN 16224U
+
+enum {
+ PAUSE_RX = 1 << 0,
+ PAUSE_TX = 1 << 1,
+ PAUSE_AUTONEG = 1 << 2
+};
+
+enum {
+ SUPPORTED_OFFLOAD = 1 << 24,
+ SUPPORTED_IRQ = 1 << 25
+};
+
+enum { /* adapter interrupt-maintained statistics */
+ STAT_ULP_CH0_PBL_OOB,
+ STAT_ULP_CH1_PBL_OOB,
+ STAT_PCI_CORR_ECC,
+
+ IRQ_NUM_STATS /* keep last */
+};
+
+enum {
+ SGE_QSETS = 8, /* # of SGE Tx/Rx/RspQ sets */
+ SGE_RXQ_PER_SET = 2, /* # of Rx queues per set */
+ SGE_TXQ_PER_SET = 3 /* # of Tx queues per set */
+};
+
+enum sge_context_type { /* SGE egress context types */
+ SGE_CNTXT_RDMA = 0,
+ SGE_CNTXT_ETH = 2,
+ SGE_CNTXT_OFLD = 4,
+ SGE_CNTXT_CTRL = 5
+};
+
+enum {
+ AN_PKT_SIZE = 32, /* async notification packet size */
+ IMMED_PKT_SIZE = 48 /* packet size for immediate data */
+};
+
+struct sg_ent { /* SGE scatter/gather entry */
+ u32 len[2];
+ u64 addr[2];
+};
+
+#ifndef SGE_NUM_GENBITS
+/* Must be 1 or 2 */
+# define SGE_NUM_GENBITS 2
+#endif
+
+#define TX_DESC_FLITS 16U
+#define WR_FLITS (TX_DESC_FLITS + 1 - SGE_NUM_GENBITS)
+
+struct cphy;
+
+struct mdio_ops {
+ int (*read)(adapter_t *adapter, int phy_addr, int mmd_addr,
+ int reg_addr, unsigned int *val);
+ int (*write)(adapter_t *adapter, int phy_addr, int mmd_addr,
+ int reg_addr, unsigned int val);
+};
+
+struct adapter_info {
+ unsigned char nports; /* # of ports */
+ unsigned char phy_base_addr; /* MDIO PHY base address */
+ unsigned char mdien;
+ unsigned char mdiinv;
+ unsigned int gpio_out; /* GPIO output settings */
+ unsigned int gpio_intr; /* GPIO IRQ enable mask */
+ unsigned long caps; /* adapter capabilities */
+ const struct mdio_ops *mdio_ops; /* MDIO operations */
+ const char *desc; /* product description */
+};
+
+struct port_type_info {
+ void (*phy_prep)(struct cphy *phy, adapter_t *adapter, int phy_addr,
+ const struct mdio_ops *ops);
+ unsigned int caps;
+ const char *desc;
+};
+
+struct mc5_stats {
+ unsigned long parity_err;
+ unsigned long active_rgn_full;
+ unsigned long nfa_srch_err;
+ unsigned long unknown_cmd;
+ unsigned long reqq_parity_err;
+ unsigned long dispq_parity_err;
+ unsigned long del_act_empty;
+};
+
+struct mc7_stats {
+ unsigned long corr_err;
+ unsigned long uncorr_err;
+ unsigned long parity_err;
+ unsigned long addr_err;
+};
+
+struct mac_stats {
+ u64 tx_octets; /* total # of octets in good frames */
+ u64 tx_octets_bad; /* total # of octets in error frames */
+ u64 tx_frames; /* all good frames */
+ u64 tx_mcast_frames; /* good multicast frames */
+ u64 tx_bcast_frames; /* good broadcast frames */
+ u64 tx_pause; /* # of transmitted pause frames */
+ u64 tx_deferred; /* frames with deferred transmissions */
+ u64 tx_late_collisions; /* # of late collisions */
+ u64 tx_total_collisions; /* # of total collisions */
+ u64 tx_excess_collisions; /* frame errors from excessive collissions */
+ u64 tx_underrun; /* # of Tx FIFO underruns */
+ u64 tx_len_errs; /* # of Tx length errors */
+ u64 tx_mac_internal_errs; /* # of internal MAC errors on Tx */
+ u64 tx_excess_deferral; /* # of frames with excessive deferral */
+ u64 tx_fcs_errs; /* # of frames with bad FCS */
+
+ u64 tx_frames_64; /* # of Tx frames in a particular range */
+ u64 tx_frames_65_127;
+ u64 tx_frames_128_255;
+ u64 tx_frames_256_511;
+ u64 tx_frames_512_1023;
+ u64 tx_frames_1024_1518;
+ u64 tx_frames_1519_max;
+
+ u64 rx_octets; /* total # of octets in good frames */
+ u64 rx_octets_bad; /* total # of octets in error frames */
+ u64 rx_frames; /* all good frames */
+ u64 rx_mcast_frames; /* good multicast frames */
+ u64 rx_bcast_frames; /* good broadcast frames */
+ u64 rx_pause; /* # of received pause frames */
+ u64 rx_fcs_errs; /* # of received frames with bad FCS */
+ u64 rx_align_errs; /* alignment errors */
+ u64 rx_symbol_errs; /* symbol errors */
+ u64 rx_data_errs; /* data errors */
+ u64 rx_sequence_errs; /* sequence errors */
+ u64 rx_runt; /* # of runt frames */
+ u64 rx_jabber; /* # of jabber frames */
+ u64 rx_short; /* # of short frames */
+ u64 rx_too_long; /* # of oversized frames */
+ u64 rx_mac_internal_errs; /* # of internal MAC errors on Rx */
+
+ u64 rx_frames_64; /* # of Rx frames in a particular range */
+ u64 rx_frames_65_127;
+ u64 rx_frames_128_255;
+ u64 rx_frames_256_511;
+ u64 rx_frames_512_1023;
+ u64 rx_frames_1024_1518;
+ u64 rx_frames_1519_max;
+
+ u64 rx_cong_drops; /* # of Rx drops due to SGE congestion */
+
+ unsigned long tx_fifo_parity_err;
+ unsigned long rx_fifo_parity_err;
+ unsigned long tx_fifo_urun;
+ unsigned long rx_fifo_ovfl;
+ unsigned long serdes_signal_loss;
+ unsigned long xaui_pcs_ctc_err;
+ unsigned long xaui_pcs_align_change;
+};
+
+struct tp_mib_stats {
+ u32 ipInReceive_hi;
+ u32 ipInReceive_lo;
+ u32 ipInHdrErrors_hi;
+ u32 ipInHdrErrors_lo;
+ u32 ipInAddrErrors_hi;
+ u32 ipInAddrErrors_lo;
+ u32 ipInUnknownProtos_hi;
+ u32 ipInUnknownProtos_lo;
+ u32 ipInDiscards_hi;
+ u32 ipInDiscards_lo;
+ u32 ipInDelivers_hi;
+ u32 ipInDelivers_lo;
+ u32 ipOutRequests_hi;
+ u32 ipOutRequests_lo;
+ u32 ipOutDiscards_hi;
+ u32 ipOutDiscards_lo;
+ u32 ipOutNoRoutes_hi;
+ u32 ipOutNoRoutes_lo;
+ u32 ipReasmTimeout;
+ u32 ipReasmReqds;
+ u32 ipReasmOKs;
+ u32 ipReasmFails;
+
+ u32 reserved[8];
+
+ u32 tcpActiveOpens;
+ u32 tcpPassiveOpens;
+ u32 tcpAttemptFails;
+ u32 tcpEstabResets;
+ u32 tcpOutRsts;
+ u32 tcpCurrEstab;
+ u32 tcpInSegs_hi;
+ u32 tcpInSegs_lo;
+ u32 tcpOutSegs_hi;
+ u32 tcpOutSegs_lo;
+ u32 tcpRetransSeg_hi;
+ u32 tcpRetransSeg_lo;
+ u32 tcpInErrs_hi;
+ u32 tcpInErrs_lo;
+ u32 tcpRtoMin;
+ u32 tcpRtoMax;
+};
+
+struct tp_params {
+ unsigned int nchan; /* # of channels */
+ unsigned int pmrx_size; /* total PMRX capacity */
+ unsigned int pmtx_size; /* total PMTX capacity */
+ unsigned int cm_size; /* total CM capacity */
+ unsigned int chan_rx_size; /* per channel Rx size */
+ unsigned int chan_tx_size; /* per channel Tx size */
+ unsigned int rx_pg_size; /* Rx page size */
+ unsigned int tx_pg_size; /* Tx page size */
+ unsigned int rx_num_pgs; /* # of Rx pages */
+ unsigned int tx_num_pgs; /* # of Tx pages */
+ unsigned int ntimer_qs; /* # of timer queues */
+};
+
+struct qset_params { /* SGE queue set parameters */
+ unsigned int polling; /* polling/interrupt service for rspq */
+ unsigned int coalesce_nsecs; /* irq coalescing timer */
+ unsigned int rspq_size; /* # of entries in response queue */
+ unsigned int fl_size; /* # of entries in regular free list */
+ unsigned int jumbo_size; /* # of entries in jumbo free list */
+ unsigned int txq_size[SGE_TXQ_PER_SET]; /* Tx queue sizes */
+ unsigned int cong_thres; /* FL congestion threshold */
+};
+
+struct sge_params {
+ unsigned int max_pkt_size; /* max offload pkt size */
+ struct qset_params qset[SGE_QSETS];
+};
+
+struct mc5_params {
+ unsigned int mode; /* selects MC5 width */
+ unsigned int nservers; /* size of server region */
+ unsigned int nfilters; /* size of filter region */
+ unsigned int nroutes; /* size of routing region */
+};
+
+/* Default MC5 region sizes */
+enum {
+ DEFAULT_NSERVERS = 512,
+ DEFAULT_NFILTERS = 128
+};
+
+/* MC5 modes, these must be non-0 */
+enum {
+ MC5_MODE_144_BIT = 1,
+ MC5_MODE_72_BIT = 2
+};
+
+struct vpd_params {
+ unsigned int cclk;
+ unsigned int mclk;
+ unsigned int uclk;
+ unsigned int mdc;
+ unsigned int mem_timing;
+ u8 eth_base[6];
+ u8 port_type[MAX_NPORTS];
+ unsigned short xauicfg[2];
+};
+
+struct pci_params {
+ unsigned int vpd_cap_addr;
+ unsigned int pcie_cap_addr;
+ unsigned short speed;
+ unsigned char width;
+ unsigned char variant;
+};
+
+enum {
+ PCI_VARIANT_PCI,
+ PCI_VARIANT_PCIX_MODE1_PARITY,
+ PCI_VARIANT_PCIX_MODE1_ECC,
+ PCI_VARIANT_PCIX_266_MODE2,
+ PCI_VARIANT_PCIE
+};
+
+struct adapter_params {
+ struct sge_params sge;
+ struct mc5_params mc5;
+ struct tp_params tp;
+ struct vpd_params vpd;
+ struct pci_params pci;
+
+ const struct adapter_info *info;
+
+#ifdef CONFIG_CHELSIO_T3_CORE
+ unsigned short mtus[NMTUS];
+ unsigned short a_wnd[NCCTRL_WIN];
+ unsigned short b_wnd[NCCTRL_WIN];
+#endif
+ unsigned int nports; /* # of ethernet ports */
+ unsigned int stats_update_period; /* MAC stats accumulation period */
+ unsigned int linkpoll_period; /* link poll period in 0.1s */
+ unsigned int rev; /* chip revision */
+};
+
+struct trace_params {
+ u32 sip;
+ u32 sip_mask;
+ u32 dip;
+ u32 dip_mask;
+ u16 sport;
+ u16 sport_mask;
+ u16 dport;
+ u16 dport_mask;
+ u32 vlan:12;
+ u32 vlan_mask:12;
+ u32 intf:4;
+ u32 intf_mask:4;
+ u8 proto;
+ u8 proto_mask;
+};
+
+struct link_config {
+ unsigned int supported; /* link capabilities */
+ unsigned int advertising; /* advertised capabilities */
+ unsigned short requested_speed; /* speed user has requested */
+ unsigned short speed; /* actual link speed */
+ unsigned char requested_duplex; /* duplex user has requested */
+ unsigned char duplex; /* actual link duplex */
+ unsigned char requested_fc; /* flow control user has requested */
+ unsigned char fc; /* actual link flow control */
+ unsigned char autoneg; /* autonegotiating? */
+ unsigned int link_ok; /* link up? */
+};
+
+#define SPEED_INVALID 0xffff
+#define DUPLEX_INVALID 0xff
+
+struct mc5 {
+ adapter_t *adapter;
+ unsigned int tcam_size;
+ unsigned char part_type;
+ unsigned char parity_enabled;
+ unsigned char mode;
+ struct mc5_stats stats;
+};
+
+static inline unsigned int t3_mc5_size(const struct mc5 *p)
+{
+ return p->tcam_size;
+}
+
+struct mc7 {
+ adapter_t *adapter; /* backpointer to adapter */
+ unsigned int size; /* memory size in bytes */
+ unsigned int width; /* MC7 interface width */
+ unsigned int offset; /* register address offset for MC7 instance */
+ const char *name; /* name of MC7 instance */
+ struct mc7_stats stats; /* MC7 statistics */
+};
+
+static inline unsigned int t3_mc7_size(const struct mc7 *p)
+{
+ return p->size;
+}
+
+struct cmac {
+ adapter_t *adapter;
+ unsigned int offset;
+ unsigned int nucast; /* # of address filters for unicast MACs */
+ struct mac_stats stats;
+};
+
+enum {
+ MAC_DIRECTION_RX = 1,
+ MAC_DIRECTION_TX = 2,
+ MAC_RXFIFO_SIZE = 32768
+};
+
+/* IEEE 802.3ae specified MDIO devices */
+enum {
+ MDIO_DEV_PMA_PMD = 1,
+ MDIO_DEV_WIS = 2,
+ MDIO_DEV_PCS = 3,
+ MDIO_DEV_XGXS = 4
+};
+
+/* PHY loopback direction */
+enum {
+ PHY_LOOPBACK_TX = 1,
+ PHY_LOOPBACK_RX = 2
+};
+
+/* PHY interrupt types */
+enum {
+ cphy_cause_link_change = 1,
+ cphy_cause_fifo_error = 2
+};
+
+/* PHY operations */
+struct cphy_ops {
+ void (*destroy)(struct cphy *phy);
+ int (*reset)(struct cphy *phy, int wait);
+
+ int (*intr_enable)(struct cphy *phy);
+ int (*intr_disable)(struct cphy *phy);
+ int (*intr_clear)(struct cphy *phy);
+ int (*intr_handler)(struct cphy *phy);
+
+ int (*autoneg_enable)(struct cphy *phy);
+ int (*autoneg_restart)(struct cphy *phy);
+
+ int (*advertise)(struct cphy *phy, unsigned int advertise_map);
+ int (*set_loopback)(struct cphy *phy, int mmd, int dir, int enable);
+ int (*set_speed_duplex)(struct cphy *phy, int speed, int duplex);
+ int (*get_link_status)(struct cphy *phy, int *link_ok, int *speed,
+ int *duplex, int *fc);
+ int (*power_down)(struct cphy *phy, int enable);
+};
+
+/* A PHY instance */
+struct cphy {
+ int addr; /* PHY address */
+ adapter_t *adapter; /* associated adapter */
+ unsigned long fifo_errors; /* FIFO over/under-flows */
+ const struct cphy_ops *ops; /* PHY operations */
+ int (*mdio_read)(adapter_t *adapter, int phy_addr, int mmd_addr,
+ int reg_addr, unsigned int *val);
+ int (*mdio_write)(adapter_t *adapter, int phy_addr, int mmd_addr,
+ int reg_addr, unsigned int val);
+};
+
+/* Convenience MDIO read/write wrappers */
+static inline int mdio_read(struct cphy *phy, int mmd, int reg,
+ unsigned int *valp)
+{
+ return phy->mdio_read(phy->adapter, phy->addr, mmd, reg, valp);
+}
+
+static inline int mdio_write(struct cphy *phy, int mmd, int reg,
+ unsigned int val)
+{
+ return phy->mdio_write(phy->adapter, phy->addr, mmd, reg, val);
+}
+
+/* Convenience initializer */
+static inline void cphy_init(struct cphy *phy, adapter_t *adapter,
+ int phy_addr, struct cphy_ops *phy_ops,
+ const struct mdio_ops *mdio_ops)
+{
+ phy->adapter = adapter;
+ phy->addr = phy_addr;
+ phy->ops = phy_ops;
+ if (mdio_ops) {
+ phy->mdio_read = mdio_ops->read;
+ phy->mdio_write = mdio_ops->write;
+ }
+}
+
+/* Accumulate MAC statistics every 180 seconds. For 1G we multiply by 10. */
+#define MAC_STATS_ACCUM_SECS 180
+
+#define XGM_REG(reg_addr, idx) \
+ ((reg_addr) + (idx) * (XGMAC0_1_BASE_ADDR - XGMAC0_0_BASE_ADDR))
+
+struct addr_val_pair {
+ unsigned int reg_addr;
+ unsigned int val;
+};
+
+#include <dev/cxgb/cxgb_adapter.h>
+
+#ifndef PCI_VENDOR_ID_CHELSIO
+# define PCI_VENDOR_ID_CHELSIO 0x1425
+#endif
+
+#define for_each_port(adapter, iter) \
+ for (iter = 0; iter < (adapter)->params.nports; ++iter)
+
+#define adapter_info(adap) ((adap)->params.info)
+
+static inline int uses_xaui(const adapter_t *adap)
+{
+ return adapter_info(adap)->caps & SUPPORTED_AUI;
+}
+
+static inline int is_10G(const adapter_t *adap)
+{
+ return adapter_info(adap)->caps & SUPPORTED_10000baseT_Full;
+}
+
+static inline int is_offload(const adapter_t *adap)
+{
+#ifdef CONFIG_CHELSIO_T3_CORE
+ return adapter_info(adap)->caps & SUPPORTED_OFFLOAD;
+#else
+ return 0;
+#endif
+}
+
+static inline unsigned int core_ticks_per_usec(const adapter_t *adap)
+{
+ return adap->params.vpd.cclk / 1000;
+}
+
+static inline unsigned int is_pcie(const adapter_t *adap)
+{
+ return adap->params.pci.variant == PCI_VARIANT_PCIE;
+}
+
+void t3_set_reg_field(adapter_t *adap, unsigned int addr, u32 mask, u32 val);
+void t3_read_indirect(adapter_t *adap, unsigned int addr_reg,
+ unsigned int data_reg, u32 *vals, unsigned int nregs,
+ unsigned int start_idx);
+void t3_write_regs(adapter_t *adapter, const struct addr_val_pair *p, int n,
+ unsigned int offset);
+int t3_wait_op_done_val(adapter_t *adapter, int reg, u32 mask, int polarity,
+ int attempts, int delay, u32 *valp);
+
+static inline int t3_wait_op_done(adapter_t *adapter, int reg, u32 mask,
+ int polarity, int attempts, int delay)
+{
+ return t3_wait_op_done_val(adapter, reg, mask, polarity, attempts,
+ delay, NULL);
+}
+
+int t3_mdio_change_bits(struct cphy *phy, int mmd, int reg, unsigned int clear,
+ unsigned int set);
+int t3_phy_reset(struct cphy *phy, int mmd, int wait);
+int t3_phy_advertise(struct cphy *phy, unsigned int advert);
+int t3_set_phy_speed_duplex(struct cphy *phy, int speed, int duplex);
+
+void t3_intr_enable(adapter_t *adapter);
+void t3_intr_disable(adapter_t *adapter);
+void t3_intr_clear(adapter_t *adapter);
+void t3_port_intr_enable(adapter_t *adapter, int idx);
+void t3_port_intr_disable(adapter_t *adapter, int idx);
+void t3_port_intr_clear(adapter_t *adapter, int idx);
+int t3_slow_intr_handler(adapter_t *adapter);
+int t3_phy_intr_handler(adapter_t *adapter);
+
+void t3_link_changed(adapter_t *adapter, int port_id);
+int t3_link_start(struct cphy *phy, struct cmac *mac, struct link_config *lc);
+const struct adapter_info *t3_get_adapter_info(unsigned int board_id);
+int t3_seeprom_read(adapter_t *adapter, u32 addr, u32 *data);
+int t3_seeprom_write(adapter_t *adapter, u32 addr, u32 data);
+int t3_seeprom_wp(adapter_t *adapter, int enable);
+int t3_read_flash(adapter_t *adapter, unsigned int addr, unsigned int nwords,
+ u32 *data, int byte_oriented);
+int t3_load_fw(adapter_t *adapter, const u8 *fw_data, unsigned int size);
+int t3_get_fw_version(adapter_t *adapter, u32 *vers);
+int t3_check_fw_version(adapter_t *adapter);
+int t3_init_hw(adapter_t *adapter, u32 fw_params);
+void mac_prep(struct cmac *mac, adapter_t *adapter, int index);
+void early_hw_init(adapter_t *adapter, const struct adapter_info *ai);
+int t3_reset_adapter(adapter_t *adapter);
+int t3_prep_adapter(adapter_t *adapter, const struct adapter_info *ai, int reset);
+void t3_led_ready(adapter_t *adapter);
+void t3_fatal_err(adapter_t *adapter);
+void t3_set_vlan_accel(adapter_t *adapter, unsigned int ports, int on);
+void t3_config_rss(adapter_t *adapter, unsigned int rss_config, const u8 *cpus,
+ const u16 *rspq);
+int t3_read_rss(adapter_t *adapter, u8 *lkup, u16 *map);
+int t3_mps_set_active_ports(adapter_t *adap, unsigned int port_mask);
+void t3_port_failover(adapter_t *adapter, int port);
+void t3_failover_done(adapter_t *adapter, int port);
+void t3_failover_clear(adapter_t *adapter);
+int t3_cim_ctl_blk_read(adapter_t *adap, unsigned int addr, unsigned int n,
+ unsigned int *valp);
+int t3_mc7_bd_read(struct mc7 *mc7, unsigned int start, unsigned int n,
+ u64 *buf);
+
+int t3_mac_reset(struct cmac *mac);
+void t3b_pcs_reset(struct cmac *mac);
+int t3_mac_enable(struct cmac *mac, int which);
+int t3_mac_disable(struct cmac *mac, int which);
+int t3_mac_set_mtu(struct cmac *mac, unsigned int mtu);
+int t3_mac_set_rx_mode(struct cmac *mac, struct t3_rx_mode *rm);
+int t3_mac_set_address(struct cmac *mac, unsigned int idx, u8 addr[6]);
+int t3_mac_set_num_ucast(struct cmac *mac, int n);
+const struct mac_stats *t3_mac_update_stats(struct cmac *mac);
+int t3_mac_set_speed_duplex_fc(struct cmac *mac, int speed, int duplex,
+ int fc);
+
+void t3_mc5_prep(adapter_t *adapter, struct mc5 *mc5, int mode);
+int t3_mc5_init(struct mc5 *mc5, unsigned int nservers, unsigned int nfilters,
+ unsigned int nroutes);
+void t3_mc5_intr_handler(struct mc5 *mc5);
+int t3_read_mc5_range(const struct mc5 *mc5, unsigned int start, unsigned int n,
+ u32 *buf);
+
+#ifdef CONFIG_CHELSIO_T3_CORE
+int t3_tp_set_coalescing_size(adapter_t *adap, unsigned int size, int psh);
+void t3_tp_set_max_rxsize(adapter_t *adap, unsigned int size);
+void t3_tp_set_offload_mode(adapter_t *adap, int enable);
+void t3_tp_get_mib_stats(adapter_t *adap, struct tp_mib_stats *tps);
+void t3_load_mtus(adapter_t *adap, unsigned short mtus[NMTUS],
+ unsigned short alpha[NCCTRL_WIN],
+ unsigned short beta[NCCTRL_WIN], unsigned short mtu_cap);
+void t3_read_hw_mtus(adapter_t *adap, unsigned short mtus[NMTUS]);
+void t3_get_cong_cntl_tab(adapter_t *adap,
+ unsigned short incr[NMTUS][NCCTRL_WIN]);
+void t3_config_trace_filter(adapter_t *adapter, const struct trace_params *tp,
+ int filter_index, int invert, int enable);
+int t3_config_sched(adapter_t *adap, unsigned int kbps, int sched);
+#endif
+
+void t3_sge_prep(adapter_t *adap, struct sge_params *p);
+void t3_sge_init(adapter_t *adap, struct sge_params *p);
+int t3_sge_init_ecntxt(adapter_t *adapter, unsigned int id, int gts_enable,
+ enum sge_context_type type, int respq, u64 base_addr,
+ unsigned int size, unsigned int token, int gen,
+ unsigned int cidx);
+int t3_sge_init_flcntxt(adapter_t *adapter, unsigned int id, int gts_enable,
+ u64 base_addr, unsigned int size, unsigned int esize,
+ unsigned int cong_thres, int gen, unsigned int cidx);
+int t3_sge_init_rspcntxt(adapter_t *adapter, unsigned int id, int irq_vec_idx,
+ u64 base_addr, unsigned int size,
+ unsigned int fl_thres, int gen, unsigned int cidx);
+int t3_sge_init_cqcntxt(adapter_t *adapter, unsigned int id, u64 base_addr,
+ unsigned int size, int rspq, int ovfl_mode,
+ unsigned int credits, unsigned int credit_thres);
+int t3_sge_enable_ecntxt(adapter_t *adapter, unsigned int id, int enable);
+int t3_sge_disable_fl(adapter_t *adapter, unsigned int id);
+int t3_sge_disable_rspcntxt(adapter_t *adapter, unsigned int id);
+int t3_sge_disable_cqcntxt(adapter_t *adapter, unsigned int id);
+int t3_sge_read_ecntxt(adapter_t *adapter, unsigned int id, u32 data[4]);
+int t3_sge_read_fl(adapter_t *adapter, unsigned int id, u32 data[4]);
+int t3_sge_read_cq(adapter_t *adapter, unsigned int id, u32 data[4]);
+int t3_sge_read_rspq(adapter_t *adapter, unsigned int id, u32 data[4]);
+int t3_sge_cqcntxt_op(adapter_t *adapter, unsigned int id, unsigned int op,
+ unsigned int credits);
+
+void t3_mv88e1xxx_phy_prep(struct cphy *phy, adapter_t *adapter, int phy_addr,
+ const struct mdio_ops *mdio_ops);
+void t3_vsc8211_phy_prep(struct cphy *phy, adapter_t *adapter, int phy_addr,
+ const struct mdio_ops *mdio_ops);
+void t3_ael1002_phy_prep(struct cphy *phy, adapter_t *adapter, int phy_addr,
+ const struct mdio_ops *mdio_ops);
+void t3_ael1006_phy_prep(struct cphy *phy, adapter_t *adapter, int phy_addr,
+ const struct mdio_ops *mdio_ops);
+void t3_qt2045_phy_prep(struct cphy *phy, adapter_t *adapter, int phy_addr,
+ const struct mdio_ops *mdio_ops);
+void t3_xaui_direct_phy_prep(struct cphy *phy, adapter_t *adapter, int phy_addr,
+ const struct mdio_ops *mdio_ops);
+#endif /* __CHELSIO_COMMON_H */
diff --git a/sys/dev/cxgb/common/cxgb_firmware_exports.h b/sys/dev/cxgb/common/cxgb_firmware_exports.h
new file mode 100644
index 0000000..99f97a8
--- /dev/null
+++ b/sys/dev/cxgb/common/cxgb_firmware_exports.h
@@ -0,0 +1,181 @@
+/**************************************************************************
+
+Copyright (c) 2007, Chelsio Inc.
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Chelsio Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+$FreeBSD$
+
+***************************************************************************/
+#ifndef _FIRMWARE_EXPORTS_H_
+#define _FIRMWARE_EXPORTS_H_
+
+/* WR OPCODES supported by the firmware.
+ */
+#define FW_WROPCODE_FORWARD 0x01
+#define FW_WROPCODE_BYPASS 0x05
+
+#define FW_WROPCODE_TUNNEL_TX_PKT 0x03
+
+#define FW_WROPOCDE_ULPTX_DATA_SGL 0x00
+#define FW_WROPCODE_ULPTX_MEM_READ 0x02
+#define FW_WROPCODE_ULPTX_PKT 0x04
+#define FW_WROPCODE_ULPTX_INVALIDATE 0x06
+
+#define FW_WROPCODE_TUNNEL_RX_PKT 0x07
+
+#define FW_WROPCODE_OFLD_GETTCB_RPL 0x08
+#define FW_WROPCODE_OFLD_CLOSE_CON 0x09
+#define FW_WROPCODE_OFLD_TP_ABORT_CON_REQ 0x0A
+#define FW_WROPCODE_OFLD_HOST_ABORT_CON_RPL 0x0F
+#define FW_WROPCODE_OFLD_HOST_ABORT_CON_REQ 0x0B
+#define FW_WROPCODE_OFLD_TP_ABORT_CON_RPL 0x0C
+#define FW_WROPCODE_OFLD_TX_DATA 0x0D
+#define FW_WROPCODE_OFLD_TX_DATA_ACK 0x0E
+
+#define FW_WROPCODE_RI_RDMA_INIT 0x10
+#define FW_WROPCODE_RI_RDMA_WRITE 0x11
+#define FW_WROPCODE_RI_RDMA_READ_REQ 0x12
+#define FW_WROPCODE_RI_RDMA_READ_RESP 0x13
+#define FW_WROPCODE_RI_SEND 0x14
+#define FW_WROPCODE_RI_TERMINATE 0x15
+#define FW_WROPCODE_RI_RDMA_READ 0x16
+#define FW_WROPCODE_RI_RECEIVE 0x17
+#define FW_WROPCODE_RI_BIND_MW 0x18
+#define FW_WROPCODE_RI_FASTREGISTER_MR 0x19
+#define FW_WROPCODE_RI_LOCAL_INV 0x1A
+#define FW_WROPCODE_RI_MODIFY_QP 0x1B
+#define FW_WROPCODE_RI_BYPASS 0x1C
+
+#define FW_WROPOCDE_RSVD 0x1E
+
+#define FW_WROPCODE_SGE_EGRESSCONTEXT_RR 0x1F
+
+#define FW_WROPCODE_MNGT 0x1D
+#define FW_MNGTOPCODE_PKTSCHED_SET 0x00
+
+/* Maximum size of a WR sent from the host, limited by the SGE.
+ *
+ * Note: WR coming from ULP or TP are only limited by CIM.
+ */
+#define FW_WR_SIZE 128
+
+/* Maximum number of outstanding WRs sent from the host. Value must be
+ * programmed in the CTRL/TUNNEL/QP SGE Egress Context and used by
+ * offload modules to limit the number of WRs per connection.
+ */
+#define FW_T3_WR_NUM 16
+#define FW_N3_WR_NUM 7
+
+#ifndef N3
+# define FW_WR_NUM FW_T3_WR_NUM
+#else
+# define FW_WR_NUM FW_N3_WR_NUM
+#endif
+
+/* FW_TUNNEL_NUM corresponds to the number of supported TUNNEL Queues. These
+ * queues must start at SGE Egress Context FW_TUNNEL_SGEEC_START and must
+ * start at 'TID' (or 'uP Token') FW_TUNNEL_TID_START.
+ *
+ * Ingress Traffic (e.g. DMA completion credit) for TUNNEL Queue[i] is sent
+ * to RESP Queue[i].
+ */
+#define FW_TUNNEL_NUM 8
+#define FW_TUNNEL_SGEEC_START 8
+#define FW_TUNNEL_TID_START 65544
+
+
+/* FW_CTRL_NUM corresponds to the number of supported CTRL Queues. These queues
+ * must start at SGE Egress Context FW_CTRL_SGEEC_START and must start at 'TID'
+ * (or 'uP Token') FW_CTRL_TID_START.
+ *
+ * Ingress Traffic for CTRL Queue[i] is sent to RESP Queue[i].
+ */
+#define FW_CTRL_NUM 8
+#define FW_CTRL_SGEEC_START 65528
+#define FW_CTRL_TID_START 65536
+
+/* FW_OFLD_NUM corresponds to the number of supported OFFLOAD Queues. These
+ * queues must start at SGE Egress Context FW_OFLD_SGEEC_START.
+ *
+ * Note: the 'uP Token' in the SGE Egress Context fields is irrelevant for
+ * OFFLOAD Queues, as the host is responsible for providing the correct TID in
+ * every WR.
+ *
+ * Ingress Trafffic for OFFLOAD Queue[i] is sent to RESP Queue[i].
+ */
+#define FW_OFLD_NUM 8
+#define FW_OFLD_SGEEC_START 0
+
+/*
+ *
+ */
+#define FW_RI_NUM 1
+#define FW_RI_SGEEC_START 65527
+#define FW_RI_TID_START 65552
+
+/*
+ * The RX_PKT_TID
+ */
+#define FW_RX_PKT_NUM 1
+#define FW_RX_PKT_TID_START 65553
+
+/* FW_WRC_NUM corresponds to the number of Work Request Context that supported
+ * by the firmware.
+ */
+#define FW_WRC_NUM \
+ (65536 + FW_TUNNEL_NUM + FW_CTRL_NUM + FW_RI_NUM + FW_RX_PKT_NUM)
+
+/*
+ * FW type and version.
+ */
+#define S_FW_VERSION_TYPE 28
+#define M_FW_VERSION_TYPE 0xF
+#define V_FW_VERSION_TYPE(x) ((x) << S_FW_VERSION_TYPE)
+#define G_FW_VERSION_TYPE(x) \
+ (((x) >> S_FW_VERSION_TYPE) & M_FW_VERSION_TYPE)
+
+#define S_FW_VERSION_MAJOR 16
+#define M_FW_VERSION_MAJOR 0xFFF
+#define V_FW_VERSION_MAJOR(x) ((x) << S_FW_VERSION_MAJOR)
+#define G_FW_VERSION_MAJOR(x) \
+ (((x) >> S_FW_VERSION_MAJOR) & M_FW_VERSION_MAJOR)
+
+#define S_FW_VERSION_MINOR 8
+#define M_FW_VERSION_MINOR 0xFF
+#define V_FW_VERSION_MINOR(x) ((x) << S_FW_VERSION_MINOR)
+#define G_FW_VERSION_MINOR(x) \
+ (((x) >> S_FW_VERSION_MINOR) & M_FW_VERSION_MINOR)
+
+#define S_FW_VERSION_MICRO 0
+#define M_FW_VERSION_MICRO 0xFF
+#define V_FW_VERSION_MICRO(x) ((x) << S_FW_VERSION_MICRO)
+#define G_FW_VERSION_MICRO(x) \
+ (((x) >> S_FW_VERSION_MICRO) & M_FW_VERSION_MICRO)
+
+#endif /* _FIRMWARE_EXPORTS_H_ */
diff --git a/sys/dev/cxgb/common/cxgb_mc5.c b/sys/dev/cxgb/common/cxgb_mc5.c
new file mode 100644
index 0000000..191d77d
--- /dev/null
+++ b/sys/dev/cxgb/common/cxgb_mc5.c
@@ -0,0 +1,474 @@
+/**************************************************************************
+
+Copyright (c) 2007, Chelsio Inc.
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Chelsio Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <dev/cxgb/common/cxgb_common.h>
+#include <dev/cxgb/common/cxgb_regs.h>
+
+enum {
+ IDT75P52100 = 4,
+ IDT75N43102 = 5
+};
+
+/* DBGI command mode */
+enum {
+ DBGI_MODE_MBUS = 0,
+ DBGI_MODE_IDT52100 = 5
+};
+
+/* IDT 75P52100 commands */
+#define IDT_CMD_READ 0
+#define IDT_CMD_WRITE 1
+#define IDT_CMD_SEARCH 2
+#define IDT_CMD_LEARN 3
+
+/* IDT LAR register address and value for 144-bit mode (low 32 bits) */
+#define IDT_LAR_ADR0 0x180006
+#define IDT_LAR_MODE144 0xffff0000
+
+/* IDT SCR and SSR addresses (low 32 bits) */
+#define IDT_SCR_ADR0 0x180000
+#define IDT_SSR0_ADR0 0x180002
+#define IDT_SSR1_ADR0 0x180004
+
+/* IDT GMR base address (low 32 bits) */
+#define IDT_GMR_BASE_ADR0 0x180020
+
+/* IDT data and mask array base addresses (low 32 bits) */
+#define IDT_DATARY_BASE_ADR0 0
+#define IDT_MSKARY_BASE_ADR0 0x80000
+
+/* IDT 75N43102 commands */
+#define IDT4_CMD_SEARCH144 3
+#define IDT4_CMD_WRITE 4
+#define IDT4_CMD_READ 5
+
+/* IDT 75N43102 SCR address (low 32 bits) */
+#define IDT4_SCR_ADR0 0x3
+
+/* IDT 75N43102 GMR base addresses (low 32 bits) */
+#define IDT4_GMR_BASE0 0x10
+#define IDT4_GMR_BASE1 0x20
+#define IDT4_GMR_BASE2 0x30
+
+/* IDT 75N43102 data and mask array base addresses (low 32 bits) */
+#define IDT4_DATARY_BASE_ADR0 0x1000000
+#define IDT4_MSKARY_BASE_ADR0 0x2000000
+
+#define MAX_WRITE_ATTEMPTS 5
+
+#define MAX_ROUTES 2048
+
+/*
+ * Issue a command to the TCAM and wait for its completion. The address and
+ * any data required by the command must have been setup by the caller.
+ */
+static int mc5_cmd_write(adapter_t *adapter, u32 cmd)
+{
+ t3_write_reg(adapter, A_MC5_DB_DBGI_REQ_CMD, cmd);
+ return t3_wait_op_done(adapter, A_MC5_DB_DBGI_RSP_STATUS,
+ F_DBGIRSPVALID, 1, MAX_WRITE_ATTEMPTS, 1);
+}
+
+static inline void dbgi_wr_addr3(adapter_t *adapter, u32 v1, u32 v2, u32 v3)
+{
+ t3_write_reg(adapter, A_MC5_DB_DBGI_REQ_ADDR0, v1);
+ t3_write_reg(adapter, A_MC5_DB_DBGI_REQ_ADDR1, v2);
+ t3_write_reg(adapter, A_MC5_DB_DBGI_REQ_ADDR2, v3);
+}
+
+static inline void dbgi_wr_data3(adapter_t *adapter, u32 v1, u32 v2, u32 v3)
+{
+ t3_write_reg(adapter, A_MC5_DB_DBGI_REQ_DATA0, v1);
+ t3_write_reg(adapter, A_MC5_DB_DBGI_REQ_DATA1, v2);
+ t3_write_reg(adapter, A_MC5_DB_DBGI_REQ_DATA2, v3);
+}
+
+static inline void dbgi_rd_rsp3(adapter_t *adapter, u32 *v1, u32 *v2, u32 *v3)
+{
+ *v1 = t3_read_reg(adapter, A_MC5_DB_DBGI_RSP_DATA0);
+ *v2 = t3_read_reg(adapter, A_MC5_DB_DBGI_RSP_DATA1);
+ *v3 = t3_read_reg(adapter, A_MC5_DB_DBGI_RSP_DATA2);
+}
+
+/*
+ * Write data to the TCAM register at address (0, 0, addr_lo) using the TCAM
+ * command cmd. The data to be written must have been set up by the caller.
+ * Returns -1 on failure, 0 on success.
+ */
+static int mc5_write(adapter_t *adapter, u32 addr_lo, u32 cmd)
+{
+ t3_write_reg(adapter, A_MC5_DB_DBGI_REQ_ADDR0, addr_lo);
+ if (mc5_cmd_write(adapter, cmd) == 0)
+ return 0;
+ CH_ERR(adapter, "MC5 timeout writing to TCAM address 0x%x\n", addr_lo);
+ return -1;
+}
+
+static int init_mask_data_array(struct mc5 *mc5, u32 mask_array_base,
+ u32 data_array_base, u32 write_cmd,
+ int addr_shift)
+{
+ unsigned int i;
+ adapter_t *adap = mc5->adapter;
+
+ /*
+ * We need the size of the TCAM data and mask arrays in terms of
+ * 72-bit entries.
+ */
+ unsigned int size72 = mc5->tcam_size;
+ unsigned int server_base = t3_read_reg(adap, A_MC5_DB_SERVER_INDEX);
+
+ if (mc5->mode == MC5_MODE_144_BIT) {
+ size72 *= 2; /* 1 144-bit entry is 2 72-bit entries */
+ server_base *= 2;
+ }
+
+ /* Clear the data array */
+ dbgi_wr_data3(adap, 0, 0, 0);
+ for (i = 0; i < size72; i++)
+ if (mc5_write(adap, data_array_base + (i << addr_shift),
+ write_cmd))
+ return -1;
+
+ /* Initialize the mask array. */
+ dbgi_wr_data3(adap, 0xffffffff, 0xffffffff, 0xff);
+ for (i = 0; i < size72; i++) {
+ if (i == server_base) /* entering server or routing region */
+ t3_write_reg(adap, A_MC5_DB_DBGI_REQ_DATA0,
+ mc5->mode == MC5_MODE_144_BIT ?
+ 0xfffffff9 : 0xfffffffd);
+ if (mc5_write(adap, mask_array_base + (i << addr_shift),
+ write_cmd))
+ return -1;
+ }
+ return 0;
+}
+
+static int init_idt52100(struct mc5 *mc5)
+{
+ int i;
+ adapter_t *adap = mc5->adapter;
+
+ t3_write_reg(adap, A_MC5_DB_RSP_LATENCY,
+ V_RDLAT(0x15) | V_LRNLAT(0x15) | V_SRCHLAT(0x15));
+ t3_write_reg(adap, A_MC5_DB_PART_ID_INDEX, 2);
+
+ /*
+ * Use GMRs 14-15 for ELOOKUP, GMRs 12-13 for SYN lookups, and
+ * GMRs 8-9 for ACK- and AOPEN searches.
+ */
+ t3_write_reg(adap, A_MC5_DB_POPEN_DATA_WR_CMD, IDT_CMD_WRITE);
+ t3_write_reg(adap, A_MC5_DB_POPEN_MASK_WR_CMD, IDT_CMD_WRITE);
+ t3_write_reg(adap, A_MC5_DB_AOPEN_SRCH_CMD, IDT_CMD_SEARCH);
+ t3_write_reg(adap, A_MC5_DB_AOPEN_LRN_CMD, IDT_CMD_LEARN);
+ t3_write_reg(adap, A_MC5_DB_SYN_SRCH_CMD, IDT_CMD_SEARCH | 0x6000);
+ t3_write_reg(adap, A_MC5_DB_SYN_LRN_CMD, IDT_CMD_LEARN);
+ t3_write_reg(adap, A_MC5_DB_ACK_SRCH_CMD, IDT_CMD_SEARCH);
+ t3_write_reg(adap, A_MC5_DB_ACK_LRN_CMD, IDT_CMD_LEARN);
+ t3_write_reg(adap, A_MC5_DB_ILOOKUP_CMD, IDT_CMD_SEARCH);
+ t3_write_reg(adap, A_MC5_DB_ELOOKUP_CMD, IDT_CMD_SEARCH | 0x7000);
+ t3_write_reg(adap, A_MC5_DB_DATA_WRITE_CMD, IDT_CMD_WRITE);
+ t3_write_reg(adap, A_MC5_DB_DATA_READ_CMD, IDT_CMD_READ);
+
+ /* Set DBGI command mode for IDT TCAM. */
+ t3_write_reg(adap, A_MC5_DB_DBGI_CONFIG, DBGI_MODE_IDT52100);
+
+ /* Set up LAR */
+ dbgi_wr_data3(adap, IDT_LAR_MODE144, 0, 0);
+ if (mc5_write(adap, IDT_LAR_ADR0, IDT_CMD_WRITE))
+ goto err;
+
+ /* Set up SSRs */
+ dbgi_wr_data3(adap, 0xffffffff, 0xffffffff, 0);
+ if (mc5_write(adap, IDT_SSR0_ADR0, IDT_CMD_WRITE) ||
+ mc5_write(adap, IDT_SSR1_ADR0, IDT_CMD_WRITE))
+ goto err;
+
+ /* Set up GMRs */
+ for (i = 0; i < 32; ++i) {
+ if (i >= 12 && i < 15)
+ dbgi_wr_data3(adap, 0xfffffff9, 0xffffffff, 0xff);
+ else if (i == 15)
+ dbgi_wr_data3(adap, 0xfffffff9, 0xffff8007, 0xff);
+ else
+ dbgi_wr_data3(adap, 0xffffffff, 0xffffffff, 0xff);
+
+ if (mc5_write(adap, IDT_GMR_BASE_ADR0 + i, IDT_CMD_WRITE))
+ goto err;
+ }
+
+ /* Set up SCR */
+ dbgi_wr_data3(adap, 1, 0, 0);
+ if (mc5_write(adap, IDT_SCR_ADR0, IDT_CMD_WRITE))
+ goto err;
+
+ return init_mask_data_array(mc5, IDT_MSKARY_BASE_ADR0,
+ IDT_DATARY_BASE_ADR0, IDT_CMD_WRITE, 0);
+ err:
+ return -EIO;
+}
+
+static int init_idt43102(struct mc5 *mc5)
+{
+ int i;
+ adapter_t *adap = mc5->adapter;
+
+ t3_write_reg(adap, A_MC5_DB_RSP_LATENCY,
+ adap->params.rev == 0 ? V_RDLAT(0xd) | V_SRCHLAT(0x11) :
+ V_RDLAT(0xd) | V_SRCHLAT(0x12));
+
+ /*
+ * Use GMRs 24-25 for ELOOKUP, GMRs 20-21 for SYN lookups, and no mask
+ * for ACK- and AOPEN searches.
+ */
+ t3_write_reg(adap, A_MC5_DB_POPEN_DATA_WR_CMD, IDT4_CMD_WRITE);
+ t3_write_reg(adap, A_MC5_DB_POPEN_MASK_WR_CMD, IDT4_CMD_WRITE);
+ t3_write_reg(adap, A_MC5_DB_AOPEN_SRCH_CMD,
+ IDT4_CMD_SEARCH144 | 0x3800);
+ t3_write_reg(adap, A_MC5_DB_SYN_SRCH_CMD, IDT4_CMD_SEARCH144);
+ t3_write_reg(adap, A_MC5_DB_ACK_SRCH_CMD, IDT4_CMD_SEARCH144 | 0x3800);
+ t3_write_reg(adap, A_MC5_DB_ILOOKUP_CMD, IDT4_CMD_SEARCH144 | 0x3800);
+ t3_write_reg(adap, A_MC5_DB_ELOOKUP_CMD, IDT4_CMD_SEARCH144 | 0x800);
+ t3_write_reg(adap, A_MC5_DB_DATA_WRITE_CMD, IDT4_CMD_WRITE);
+ t3_write_reg(adap, A_MC5_DB_DATA_READ_CMD, IDT4_CMD_READ);
+
+ t3_write_reg(adap, A_MC5_DB_PART_ID_INDEX, 3);
+
+ /* Set DBGI command mode for IDT TCAM. */
+ t3_write_reg(adap, A_MC5_DB_DBGI_CONFIG, DBGI_MODE_IDT52100);
+
+ /* Set up GMRs */
+ dbgi_wr_data3(adap, 0xffffffff, 0xffffffff, 0xff);
+ for (i = 0; i < 7; ++i)
+ if (mc5_write(adap, IDT4_GMR_BASE0 + i, IDT4_CMD_WRITE))
+ goto err;
+
+ for (i = 0; i < 4; ++i)
+ if (mc5_write(adap, IDT4_GMR_BASE2 + i, IDT4_CMD_WRITE))
+ goto err;
+
+ dbgi_wr_data3(adap, 0xfffffff9, 0xffffffff, 0xff);
+ if (mc5_write(adap, IDT4_GMR_BASE1, IDT4_CMD_WRITE) ||
+ mc5_write(adap, IDT4_GMR_BASE1 + 1, IDT4_CMD_WRITE) ||
+ mc5_write(adap, IDT4_GMR_BASE1 + 4, IDT4_CMD_WRITE))
+ goto err;
+
+ dbgi_wr_data3(adap, 0xfffffff9, 0xffff8007, 0xff);
+ if (mc5_write(adap, IDT4_GMR_BASE1 + 5, IDT4_CMD_WRITE))
+ goto err;
+
+ /* Set up SCR */
+ dbgi_wr_data3(adap, 0xf0000000, 0, 0);
+ if (mc5_write(adap, IDT4_SCR_ADR0, IDT4_CMD_WRITE))
+ goto err;
+
+ return init_mask_data_array(mc5, IDT4_MSKARY_BASE_ADR0,
+ IDT4_DATARY_BASE_ADR0, IDT4_CMD_WRITE, 1);
+ err:
+ return -EIO;
+}
+
+/* Put MC5 in DBGI mode. */
+static inline void mc5_dbgi_mode_enable(const struct mc5 *mc5)
+{
+ t3_write_reg(mc5->adapter, A_MC5_DB_CONFIG,
+ V_TMMODE(mc5->mode == MC5_MODE_72_BIT) | F_DBGIEN);
+}
+
+/* Put MC5 in M-Bus mode. */
+static void mc5_dbgi_mode_disable(const struct mc5 *mc5)
+{
+ t3_write_reg(mc5->adapter, A_MC5_DB_CONFIG,
+ V_TMMODE(mc5->mode == MC5_MODE_72_BIT) |
+ V_COMPEN(mc5->mode == MC5_MODE_72_BIT) |
+ V_PRTYEN(mc5->parity_enabled) | F_MBUSEN);
+}
+
+/*
+ * Initialization that requires the OS and protocol layers to already
+ * be intialized goes here.
+ */
+int t3_mc5_init(struct mc5 *mc5, unsigned int nservers, unsigned int nfilters,
+ unsigned int nroutes)
+{
+ u32 cfg;
+ int err;
+ unsigned int tcam_size = mc5->tcam_size;
+ adapter_t *adap = mc5->adapter;
+
+ if (nroutes > MAX_ROUTES || nroutes + nservers + nfilters > tcam_size)
+ return -EINVAL;
+
+ /* Reset the TCAM */
+ cfg = t3_read_reg(adap, A_MC5_DB_CONFIG) & ~F_TMMODE;
+ cfg |= V_TMMODE(mc5->mode == MC5_MODE_72_BIT) | F_TMRST;
+ t3_write_reg(adap, A_MC5_DB_CONFIG, cfg);
+ if (t3_wait_op_done(adap, A_MC5_DB_CONFIG, F_TMRDY, 1, 500, 0)) {
+ CH_ERR(adap, "TCAM reset timed out\n");
+ return -1;
+ }
+
+ t3_write_reg(adap, A_MC5_DB_ROUTING_TABLE_INDEX, tcam_size - nroutes);
+ t3_write_reg(adap, A_MC5_DB_FILTER_TABLE,
+ tcam_size - nroutes - nfilters);
+ t3_write_reg(adap, A_MC5_DB_SERVER_INDEX,
+ tcam_size - nroutes - nfilters - nservers);
+
+ mc5->parity_enabled = 1;
+
+ /* All the TCAM addresses we access have only the low 32 bits non 0 */
+ t3_write_reg(adap, A_MC5_DB_DBGI_REQ_ADDR1, 0);
+ t3_write_reg(adap, A_MC5_DB_DBGI_REQ_ADDR2, 0);
+
+ mc5_dbgi_mode_enable(mc5);
+
+ switch (mc5->part_type) {
+ case IDT75P52100:
+ err = init_idt52100(mc5);
+ break;
+ case IDT75N43102:
+ err = init_idt43102(mc5);
+ break;
+ default:
+ CH_ERR(adap, "Unsupported TCAM type %d\n", mc5->part_type);
+ err = -EINVAL;
+ break;
+ }
+
+ mc5_dbgi_mode_disable(mc5);
+ return err;
+}
+
+/*
+ * read_mc5_range - dump a part of the memory managed by MC5
+ * @mc5: the MC5 handle
+ * @start: the start address for the dump
+ * @n: number of 72-bit words to read
+ * @buf: result buffer
+ *
+ * Read n 72-bit words from MC5 memory from the given start location.
+ */
+int t3_read_mc5_range(const struct mc5 *mc5, unsigned int start,
+ unsigned int n, u32 *buf)
+{
+ u32 read_cmd;
+ int err = 0;
+ adapter_t *adap = mc5->adapter;
+
+ if (mc5->part_type == IDT75P52100)
+ read_cmd = IDT_CMD_READ;
+ else if (mc5->part_type == IDT75N43102)
+ read_cmd = IDT4_CMD_READ;
+ else
+ return -EINVAL;
+
+ mc5_dbgi_mode_enable(mc5);
+
+ while (n--) {
+ t3_write_reg(adap, A_MC5_DB_DBGI_REQ_ADDR0, start++);
+ if (mc5_cmd_write(adap, read_cmd)) {
+ err = -EIO;
+ break;
+ }
+ dbgi_rd_rsp3(adap, buf + 2, buf + 1, buf);
+ buf += 3;
+ }
+
+ mc5_dbgi_mode_disable(mc5);
+ return 0;
+}
+
+#define MC5_INT_FATAL (F_PARITYERR | F_REQQPARERR | F_DISPQPARERR)
+
+/*
+ * MC5 interrupt handler
+ */
+void t3_mc5_intr_handler(struct mc5 *mc5)
+{
+ adapter_t *adap = mc5->adapter;
+ u32 cause = t3_read_reg(adap, A_MC5_DB_INT_CAUSE);
+
+ if ((cause & F_PARITYERR) && mc5->parity_enabled) {
+ CH_ALERT(adap, "MC5 parity error\n");
+ mc5->stats.parity_err++;
+ }
+
+ if (cause & F_REQQPARERR) {
+ CH_ALERT(adap, "MC5 request queue parity error\n");
+ mc5->stats.reqq_parity_err++;
+ }
+
+ if (cause & F_DISPQPARERR) {
+ CH_ALERT(adap, "MC5 dispatch queue parity error\n");
+ mc5->stats.dispq_parity_err++;
+ }
+
+ if (cause & F_ACTRGNFULL)
+ mc5->stats.active_rgn_full++;
+ if (cause & F_NFASRCHFAIL)
+ mc5->stats.nfa_srch_err++;
+ if (cause & F_UNKNOWNCMD)
+ mc5->stats.unknown_cmd++;
+ if (cause & F_DELACTEMPTY)
+ mc5->stats.del_act_empty++;
+ if (cause & MC5_INT_FATAL)
+ t3_fatal_err(adap);
+
+ t3_write_reg(adap, A_MC5_DB_INT_CAUSE, cause);
+}
+
+void __devinit t3_mc5_prep(adapter_t *adapter, struct mc5 *mc5, int mode)
+{
+#define K * 1024
+
+ static unsigned int tcam_part_size[] = { /* in K 72-bit entries */
+ 64 K, 128 K, 256 K, 32 K
+ };
+
+#undef K
+
+ u32 cfg = t3_read_reg(adapter, A_MC5_DB_CONFIG);
+
+ mc5->adapter = adapter;
+ mc5->mode = (unsigned char) mode;
+ mc5->part_type = (unsigned char) G_TMTYPE(cfg);
+ if (cfg & F_TMTYPEHI)
+ mc5->part_type |= 4;
+
+ mc5->tcam_size = tcam_part_size[G_TMPARTSIZE(cfg)];
+ if (mode == MC5_MODE_144_BIT)
+ mc5->tcam_size /= 2;
+}
diff --git a/sys/dev/cxgb/common/cxgb_mv88e1xxx.c b/sys/dev/cxgb/common/cxgb_mv88e1xxx.c
new file mode 100644
index 0000000..688b6e0
--- /dev/null
+++ b/sys/dev/cxgb/common/cxgb_mv88e1xxx.c
@@ -0,0 +1,302 @@
+/**************************************************************************
+
+Copyright (c) 2007, Chelsio Inc.
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Chelsio Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <dev/cxgb/common/cxgb_common.h>
+
+/* Marvell PHY interrupt status bits. */
+#define MV_INTR_JABBER 0x0001
+#define MV_INTR_POLARITY_CHNG 0x0002
+#define MV_INTR_ENG_DETECT_CHNG 0x0010
+#define MV_INTR_DOWNSHIFT 0x0020
+#define MV_INTR_MDI_XOVER_CHNG 0x0040
+#define MV_INTR_FIFO_OVER_UNDER 0x0080
+#define MV_INTR_FALSE_CARRIER 0x0100
+#define MV_INTR_SYMBOL_ERROR 0x0200
+#define MV_INTR_LINK_CHNG 0x0400
+#define MV_INTR_AUTONEG_DONE 0x0800
+#define MV_INTR_PAGE_RECV 0x1000
+#define MV_INTR_DUPLEX_CHNG 0x2000
+#define MV_INTR_SPEED_CHNG 0x4000
+#define MV_INTR_AUTONEG_ERR 0x8000
+
+/* Marvell PHY specific registers. */
+#define MV88E1XXX_SPECIFIC_CNTRL 16
+#define MV88E1XXX_SPECIFIC_STATUS 17
+#define MV88E1XXX_INTR_ENABLE 18
+#define MV88E1XXX_INTR_STATUS 19
+#define MV88E1XXX_EXT_SPECIFIC_CNTRL 20
+#define MV88E1XXX_RECV_ERR 21
+#define MV88E1XXX_EXT_ADDR 22
+#define MV88E1XXX_GLOBAL_STATUS 23
+#define MV88E1XXX_LED_CNTRL 24
+#define MV88E1XXX_LED_OVERRIDE 25
+#define MV88E1XXX_EXT_SPECIFIC_CNTRL2 26
+#define MV88E1XXX_EXT_SPECIFIC_STATUS 27
+#define MV88E1XXX_VIRTUAL_CABLE_TESTER 28
+#define MV88E1XXX_EXTENDED_ADDR 29
+#define MV88E1XXX_EXTENDED_DATA 30
+
+/* PHY specific control register fields */
+#define S_PSCR_MDI_XOVER_MODE 5
+#define M_PSCR_MDI_XOVER_MODE 0x3
+#define V_PSCR_MDI_XOVER_MODE(x) ((x) << S_PSCR_MDI_XOVER_MODE)
+
+/* Extended PHY specific control register fields */
+#define S_DOWNSHIFT_ENABLE 8
+#define V_DOWNSHIFT_ENABLE (1 << S_DOWNSHIFT_ENABLE)
+
+#define S_DOWNSHIFT_CNT 9
+#define M_DOWNSHIFT_CNT 0x7
+#define V_DOWNSHIFT_CNT(x) ((x) << S_DOWNSHIFT_CNT)
+
+/* PHY specific status register fields */
+#define S_PSSR_JABBER 0
+#define V_PSSR_JABBER (1 << S_PSSR_JABBER)
+
+#define S_PSSR_POLARITY 1
+#define V_PSSR_POLARITY (1 << S_PSSR_POLARITY)
+
+#define S_PSSR_RX_PAUSE 2
+#define V_PSSR_RX_PAUSE (1 << S_PSSR_RX_PAUSE)
+
+#define S_PSSR_TX_PAUSE 3
+#define V_PSSR_TX_PAUSE (1 << S_PSSR_TX_PAUSE)
+
+#define S_PSSR_ENERGY_DETECT 4
+#define V_PSSR_ENERGY_DETECT (1 << S_PSSR_ENERGY_DETECT)
+
+#define S_PSSR_DOWNSHIFT_STATUS 5
+#define V_PSSR_DOWNSHIFT_STATUS (1 << S_PSSR_DOWNSHIFT_STATUS)
+
+#define S_PSSR_MDI 6
+#define V_PSSR_MDI (1 << S_PSSR_MDI)
+
+#define S_PSSR_CABLE_LEN 7
+#define M_PSSR_CABLE_LEN 0x7
+#define V_PSSR_CABLE_LEN(x) ((x) << S_PSSR_CABLE_LEN)
+#define G_PSSR_CABLE_LEN(x) (((x) >> S_PSSR_CABLE_LEN) & M_PSSR_CABLE_LEN)
+
+#define S_PSSR_LINK 10
+#define V_PSSR_LINK (1 << S_PSSR_LINK)
+
+#define S_PSSR_STATUS_RESOLVED 11
+#define V_PSSR_STATUS_RESOLVED (1 << S_PSSR_STATUS_RESOLVED)
+
+#define S_PSSR_PAGE_RECEIVED 12
+#define V_PSSR_PAGE_RECEIVED (1 << S_PSSR_PAGE_RECEIVED)
+
+#define S_PSSR_DUPLEX 13
+#define V_PSSR_DUPLEX (1 << S_PSSR_DUPLEX)
+
+#define S_PSSR_SPEED 14
+#define M_PSSR_SPEED 0x3
+#define V_PSSR_SPEED(x) ((x) << S_PSSR_SPEED)
+#define G_PSSR_SPEED(x) (((x) >> S_PSSR_SPEED) & M_PSSR_SPEED)
+
+/* MV88E1XXX MDI crossover register values */
+#define CROSSOVER_MDI 0
+#define CROSSOVER_MDIX 1
+#define CROSSOVER_AUTO 3
+
+#define INTR_ENABLE_MASK (MV_INTR_SPEED_CHNG | MV_INTR_DUPLEX_CHNG | \
+ MV_INTR_AUTONEG_DONE | MV_INTR_LINK_CHNG | MV_INTR_FIFO_OVER_UNDER | \
+ MV_INTR_ENG_DETECT_CHNG)
+
+/*
+ * Reset the PHY. If 'wait' is set wait until the reset completes.
+ */
+static int mv88e1xxx_reset(struct cphy *cphy, int wait)
+{
+ return t3_phy_reset(cphy, 0, wait);
+}
+
+static int mv88e1xxx_intr_enable(struct cphy *cphy)
+{
+ return mdio_write(cphy, 0, MV88E1XXX_INTR_ENABLE, INTR_ENABLE_MASK);
+}
+
+static int mv88e1xxx_intr_disable(struct cphy *cphy)
+{
+ return mdio_write(cphy, 0, MV88E1XXX_INTR_ENABLE, 0);
+}
+
+static int mv88e1xxx_intr_clear(struct cphy *cphy)
+{
+ u32 val;
+
+ /* Clear PHY interrupts by reading the register. */
+ return mdio_read(cphy, 0, MV88E1XXX_INTR_STATUS, &val);
+}
+
+static int mv88e1xxx_crossover_set(struct cphy *cphy, int crossover)
+{
+ return t3_mdio_change_bits(cphy, 0, MV88E1XXX_SPECIFIC_CNTRL,
+ V_PSCR_MDI_XOVER_MODE(M_PSCR_MDI_XOVER_MODE),
+ V_PSCR_MDI_XOVER_MODE(crossover));
+}
+
+static int mv88e1xxx_autoneg_enable(struct cphy *cphy)
+{
+ mv88e1xxx_crossover_set(cphy, CROSSOVER_AUTO);
+
+ /* restart autoneg for change to take effect */
+ return t3_mdio_change_bits(cphy, 0, MII_BMCR, BMCR_PDOWN | BMCR_ISOLATE,
+ BMCR_ANENABLE | BMCR_ANRESTART);
+}
+
+static int mv88e1xxx_autoneg_restart(struct cphy *cphy)
+{
+ return t3_mdio_change_bits(cphy, 0, MII_BMCR, BMCR_PDOWN | BMCR_ISOLATE,
+ BMCR_ANRESTART);
+}
+
+static int mv88e1xxx_set_loopback(struct cphy *cphy, int mmd, int dir, int on)
+{
+ return t3_mdio_change_bits(cphy, 0, MII_BMCR, BMCR_LOOPBACK,
+ on ? BMCR_LOOPBACK : 0);
+}
+
+static int mv88e1xxx_get_link_status(struct cphy *cphy, int *link_ok,
+ int *speed, int *duplex, int *fc)
+{
+ u32 status;
+ int sp = -1, dplx = -1, pause = 0;
+
+ mdio_read(cphy, 0, MV88E1XXX_SPECIFIC_STATUS, &status);
+ if ((status & V_PSSR_STATUS_RESOLVED) != 0) {
+ if (status & V_PSSR_RX_PAUSE)
+ pause |= PAUSE_RX;
+ if (status & V_PSSR_TX_PAUSE)
+ pause |= PAUSE_TX;
+ dplx = (status & V_PSSR_DUPLEX) ? DUPLEX_FULL : DUPLEX_HALF;
+ sp = G_PSSR_SPEED(status);
+ if (sp == 0)
+ sp = SPEED_10;
+ else if (sp == 1)
+ sp = SPEED_100;
+ else
+ sp = SPEED_1000;
+ }
+ if (link_ok)
+ *link_ok = (status & V_PSSR_LINK) != 0;
+ if (speed)
+ *speed = sp;
+ if (duplex)
+ *duplex = dplx;
+ if (fc)
+ *fc = pause;
+ return 0;
+}
+
+static int mv88e1xxx_downshift_set(struct cphy *cphy, int downshift_enable)
+{
+ /*
+ * Set the downshift counter to 2 so we try to establish Gb link
+ * twice before downshifting.
+ */
+ return t3_mdio_change_bits(cphy, 0, MV88E1XXX_EXT_SPECIFIC_CNTRL,
+ V_DOWNSHIFT_ENABLE | V_DOWNSHIFT_CNT(M_DOWNSHIFT_CNT),
+ downshift_enable ? V_DOWNSHIFT_ENABLE | V_DOWNSHIFT_CNT(2) : 0);
+}
+
+static int mv88e1xxx_power_down(struct cphy *cphy, int enable)
+{
+ return t3_mdio_change_bits(cphy, 0, MII_BMCR, BMCR_PDOWN,
+ enable ? BMCR_PDOWN : 0);
+}
+
+static int mv88e1xxx_intr_handler(struct cphy *cphy)
+{
+ const u32 link_change_intrs = MV_INTR_LINK_CHNG |
+ MV_INTR_AUTONEG_DONE | MV_INTR_DUPLEX_CHNG |
+ MV_INTR_SPEED_CHNG | MV_INTR_DOWNSHIFT;
+
+ u32 cause;
+ int cphy_cause = 0;
+
+ mdio_read(cphy, 0, MV88E1XXX_INTR_STATUS, &cause);
+ cause &= INTR_ENABLE_MASK;
+ if (cause & link_change_intrs)
+ cphy_cause |= cphy_cause_link_change;
+ if (cause & MV_INTR_FIFO_OVER_UNDER)
+ cphy_cause |= cphy_cause_fifo_error;
+ return cphy_cause;
+}
+
+#ifdef C99_NOT_SUPPORTED
+static struct cphy_ops mv88e1xxx_ops = {
+ NULL,
+ mv88e1xxx_reset,
+ mv88e1xxx_intr_enable,
+ mv88e1xxx_intr_disable,
+ mv88e1xxx_intr_clear,
+ mv88e1xxx_intr_handler,
+ mv88e1xxx_autoneg_enable,
+ mv88e1xxx_autoneg_restart,
+ t3_phy_advertise,
+ mv88e1xxx_set_loopback,
+ t3_set_phy_speed_duplex,
+ mv88e1xxx_get_link_status,
+ mv88e1xxx_power_down,
+};
+#else
+static struct cphy_ops mv88e1xxx_ops = {
+ .reset = mv88e1xxx_reset,
+ .intr_enable = mv88e1xxx_intr_enable,
+ .intr_disable = mv88e1xxx_intr_disable,
+ .intr_clear = mv88e1xxx_intr_clear,
+ .intr_handler = mv88e1xxx_intr_handler,
+ .autoneg_enable = mv88e1xxx_autoneg_enable,
+ .autoneg_restart = mv88e1xxx_autoneg_restart,
+ .advertise = t3_phy_advertise,
+ .set_loopback = mv88e1xxx_set_loopback,
+ .set_speed_duplex = t3_set_phy_speed_duplex,
+ .get_link_status = mv88e1xxx_get_link_status,
+ .power_down = mv88e1xxx_power_down,
+};
+#endif
+
+void t3_mv88e1xxx_phy_prep(struct cphy *phy, adapter_t *adapter, int phy_addr,
+ const struct mdio_ops *mdio_ops)
+{
+ cphy_init(phy, adapter, phy_addr, &mv88e1xxx_ops, mdio_ops);
+
+ /* Configure copper PHY transmitter as class A to reduce EMI. */
+ mdio_write(phy, 0, MV88E1XXX_EXTENDED_ADDR, 0xb);
+ mdio_write(phy, 0, MV88E1XXX_EXTENDED_DATA, 0x8004);
+
+ mv88e1xxx_downshift_set(phy, 1); /* Enable downshift */
+}
diff --git a/sys/dev/cxgb/common/cxgb_regs.h b/sys/dev/cxgb/common/cxgb_regs.h
new file mode 100644
index 0000000..153116f
--- /dev/null
+++ b/sys/dev/cxgb/common/cxgb_regs.h
@@ -0,0 +1,7645 @@
+/**************************************************************************
+
+Copyright (c) 2007, Chelsio Inc.
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Chelsio Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+$FreeBSD$
+
+***************************************************************************/
+/* This file is automatically generated --- do not edit */
+
+/* registers for module SGE3 */
+#define SGE3_BASE_ADDR 0x0
+
+#define A_SG_CONTROL 0x0
+
+#define S_EGRENUPBP 21
+#define V_EGRENUPBP(x) ((x) << S_EGRENUPBP)
+#define F_EGRENUPBP V_EGRENUPBP(1U)
+
+#define S_DROPPKT 20
+#define V_DROPPKT(x) ((x) << S_DROPPKT)
+#define F_DROPPKT V_DROPPKT(1U)
+
+#define S_EGRGENCTRL 19
+#define V_EGRGENCTRL(x) ((x) << S_EGRGENCTRL)
+#define F_EGRGENCTRL V_EGRGENCTRL(1U)
+
+#define S_USERSPACESIZE 14
+#define M_USERSPACESIZE 0x1f
+#define V_USERSPACESIZE(x) ((x) << S_USERSPACESIZE)
+#define G_USERSPACESIZE(x) (((x) >> S_USERSPACESIZE) & M_USERSPACESIZE)
+
+#define S_HOSTPAGESIZE 11
+#define M_HOSTPAGESIZE 0x7
+#define V_HOSTPAGESIZE(x) ((x) << S_HOSTPAGESIZE)
+#define G_HOSTPAGESIZE(x) (((x) >> S_HOSTPAGESIZE) & M_HOSTPAGESIZE)
+
+#define S_PCIRELAX 10
+#define V_PCIRELAX(x) ((x) << S_PCIRELAX)
+#define F_PCIRELAX V_PCIRELAX(1U)
+
+#define S_FLMODE 9
+#define V_FLMODE(x) ((x) << S_FLMODE)
+#define F_FLMODE V_FLMODE(1U)
+
+#define S_PKTSHIFT 6
+#define M_PKTSHIFT 0x7
+#define V_PKTSHIFT(x) ((x) << S_PKTSHIFT)
+#define G_PKTSHIFT(x) (((x) >> S_PKTSHIFT) & M_PKTSHIFT)
+
+#define S_ONEINTMULTQ 5
+#define V_ONEINTMULTQ(x) ((x) << S_ONEINTMULTQ)
+#define F_ONEINTMULTQ V_ONEINTMULTQ(1U)
+
+#define S_FLPICKAVAIL 4
+#define V_FLPICKAVAIL(x) ((x) << S_FLPICKAVAIL)
+#define F_FLPICKAVAIL V_FLPICKAVAIL(1U)
+
+#define S_BIGENDIANEGRESS 3
+#define V_BIGENDIANEGRESS(x) ((x) << S_BIGENDIANEGRESS)
+#define F_BIGENDIANEGRESS V_BIGENDIANEGRESS(1U)
+
+#define S_BIGENDIANINGRESS 2
+#define V_BIGENDIANINGRESS(x) ((x) << S_BIGENDIANINGRESS)
+#define F_BIGENDIANINGRESS V_BIGENDIANINGRESS(1U)
+
+#define S_ISCSICOALESCING 1
+#define V_ISCSICOALESCING(x) ((x) << S_ISCSICOALESCING)
+#define F_ISCSICOALESCING V_ISCSICOALESCING(1U)
+
+#define S_GLOBALENABLE 0
+#define V_GLOBALENABLE(x) ((x) << S_GLOBALENABLE)
+#define F_GLOBALENABLE V_GLOBALENABLE(1U)
+
+#define S_URGTNL 26
+#define V_URGTNL(x) ((x) << S_URGTNL)
+#define F_URGTNL V_URGTNL(1U)
+
+#define S_NEWNOTIFY 25
+#define V_NEWNOTIFY(x) ((x) << S_NEWNOTIFY)
+#define F_NEWNOTIFY V_NEWNOTIFY(1U)
+
+#define S_AVOIDCQOVFL 24
+#define V_AVOIDCQOVFL(x) ((x) << S_AVOIDCQOVFL)
+#define F_AVOIDCQOVFL V_AVOIDCQOVFL(1U)
+
+#define S_OPTONEINTMULTQ 23
+#define V_OPTONEINTMULTQ(x) ((x) << S_OPTONEINTMULTQ)
+#define F_OPTONEINTMULTQ V_OPTONEINTMULTQ(1U)
+
+#define S_CQCRDTCTRL 22
+#define V_CQCRDTCTRL(x) ((x) << S_CQCRDTCTRL)
+#define F_CQCRDTCTRL V_CQCRDTCTRL(1U)
+
+#define A_SG_KDOORBELL 0x4
+
+#define S_SELEGRCNTX 31
+#define V_SELEGRCNTX(x) ((x) << S_SELEGRCNTX)
+#define F_SELEGRCNTX V_SELEGRCNTX(1U)
+
+#define S_EGRCNTX 0
+#define M_EGRCNTX 0xffff
+#define V_EGRCNTX(x) ((x) << S_EGRCNTX)
+#define G_EGRCNTX(x) (((x) >> S_EGRCNTX) & M_EGRCNTX)
+
+#define A_SG_GTS 0x8
+
+#define S_RSPQ 29
+#define M_RSPQ 0x7
+#define V_RSPQ(x) ((x) << S_RSPQ)
+#define G_RSPQ(x) (((x) >> S_RSPQ) & M_RSPQ)
+
+#define S_NEWTIMER 16
+#define M_NEWTIMER 0x1fff
+#define V_NEWTIMER(x) ((x) << S_NEWTIMER)
+#define G_NEWTIMER(x) (((x) >> S_NEWTIMER) & M_NEWTIMER)
+
+#define S_NEWINDEX 0
+#define M_NEWINDEX 0xffff
+#define V_NEWINDEX(x) ((x) << S_NEWINDEX)
+#define G_NEWINDEX(x) (((x) >> S_NEWINDEX) & M_NEWINDEX)
+
+#define A_SG_CONTEXT_CMD 0xc
+
+#define S_CONTEXT_CMD_OPCODE 28
+#define M_CONTEXT_CMD_OPCODE 0xf
+#define V_CONTEXT_CMD_OPCODE(x) ((x) << S_CONTEXT_CMD_OPCODE)
+#define G_CONTEXT_CMD_OPCODE(x) (((x) >> S_CONTEXT_CMD_OPCODE) & M_CONTEXT_CMD_OPCODE)
+
+#define S_CONTEXT_CMD_BUSY 27
+#define V_CONTEXT_CMD_BUSY(x) ((x) << S_CONTEXT_CMD_BUSY)
+#define F_CONTEXT_CMD_BUSY V_CONTEXT_CMD_BUSY(1U)
+
+#define S_CQ_CREDIT 20
+#define M_CQ_CREDIT 0x7f
+#define V_CQ_CREDIT(x) ((x) << S_CQ_CREDIT)
+#define G_CQ_CREDIT(x) (((x) >> S_CQ_CREDIT) & M_CQ_CREDIT)
+
+#define S_CQ 19
+#define V_CQ(x) ((x) << S_CQ)
+#define F_CQ V_CQ(1U)
+
+#define S_RESPONSEQ 18
+#define V_RESPONSEQ(x) ((x) << S_RESPONSEQ)
+#define F_RESPONSEQ V_RESPONSEQ(1U)
+
+#define S_EGRESS 17
+#define V_EGRESS(x) ((x) << S_EGRESS)
+#define F_EGRESS V_EGRESS(1U)
+
+#define S_FREELIST 16
+#define V_FREELIST(x) ((x) << S_FREELIST)
+#define F_FREELIST V_FREELIST(1U)
+
+#define S_CONTEXT 0
+#define M_CONTEXT 0xffff
+#define V_CONTEXT(x) ((x) << S_CONTEXT)
+#define G_CONTEXT(x) (((x) >> S_CONTEXT) & M_CONTEXT)
+
+#define A_SG_CONTEXT_DATA0 0x10
+#define A_SG_CONTEXT_DATA1 0x14
+#define A_SG_CONTEXT_DATA2 0x18
+#define A_SG_CONTEXT_DATA3 0x1c
+#define A_SG_CONTEXT_MASK0 0x20
+#define A_SG_CONTEXT_MASK1 0x24
+#define A_SG_CONTEXT_MASK2 0x28
+#define A_SG_CONTEXT_MASK3 0x2c
+#define A_SG_RSPQ_CREDIT_RETURN 0x30
+
+#define S_CREDITS 0
+#define M_CREDITS 0xffff
+#define V_CREDITS(x) ((x) << S_CREDITS)
+#define G_CREDITS(x) (((x) >> S_CREDITS) & M_CREDITS)
+
+#define A_SG_DATA_INTR 0x34
+
+#define S_ERRINTR 31
+#define V_ERRINTR(x) ((x) << S_ERRINTR)
+#define F_ERRINTR V_ERRINTR(1U)
+
+#define S_DATAINTR 0
+#define M_DATAINTR 0xff
+#define V_DATAINTR(x) ((x) << S_DATAINTR)
+#define G_DATAINTR(x) (((x) >> S_DATAINTR) & M_DATAINTR)
+
+#define A_SG_HI_DRB_HI_THRSH 0x38
+
+#define S_HIDRBHITHRSH 0
+#define M_HIDRBHITHRSH 0x3ff
+#define V_HIDRBHITHRSH(x) ((x) << S_HIDRBHITHRSH)
+#define G_HIDRBHITHRSH(x) (((x) >> S_HIDRBHITHRSH) & M_HIDRBHITHRSH)
+
+#define A_SG_HI_DRB_LO_THRSH 0x3c
+
+#define S_HIDRBLOTHRSH 0
+#define M_HIDRBLOTHRSH 0x3ff
+#define V_HIDRBLOTHRSH(x) ((x) << S_HIDRBLOTHRSH)
+#define G_HIDRBLOTHRSH(x) (((x) >> S_HIDRBLOTHRSH) & M_HIDRBLOTHRSH)
+
+#define A_SG_LO_DRB_HI_THRSH 0x40
+
+#define S_LODRBHITHRSH 0
+#define M_LODRBHITHRSH 0x3ff
+#define V_LODRBHITHRSH(x) ((x) << S_LODRBHITHRSH)
+#define G_LODRBHITHRSH(x) (((x) >> S_LODRBHITHRSH) & M_LODRBHITHRSH)
+
+#define A_SG_LO_DRB_LO_THRSH 0x44
+
+#define S_LODRBLOTHRSH 0
+#define M_LODRBLOTHRSH 0x3ff
+#define V_LODRBLOTHRSH(x) ((x) << S_LODRBLOTHRSH)
+#define G_LODRBLOTHRSH(x) (((x) >> S_LODRBLOTHRSH) & M_LODRBLOTHRSH)
+
+#define A_SG_ONE_INT_MULT_Q_COALESCING_TIMER 0x48
+#define A_SG_RSPQ_FL_STATUS 0x4c
+
+#define S_RSPQ0STARVED 0
+#define V_RSPQ0STARVED(x) ((x) << S_RSPQ0STARVED)
+#define F_RSPQ0STARVED V_RSPQ0STARVED(1U)
+
+#define S_RSPQ1STARVED 1
+#define V_RSPQ1STARVED(x) ((x) << S_RSPQ1STARVED)
+#define F_RSPQ1STARVED V_RSPQ1STARVED(1U)
+
+#define S_RSPQ2STARVED 2
+#define V_RSPQ2STARVED(x) ((x) << S_RSPQ2STARVED)
+#define F_RSPQ2STARVED V_RSPQ2STARVED(1U)
+
+#define S_RSPQ3STARVED 3
+#define V_RSPQ3STARVED(x) ((x) << S_RSPQ3STARVED)
+#define F_RSPQ3STARVED V_RSPQ3STARVED(1U)
+
+#define S_RSPQ4STARVED 4
+#define V_RSPQ4STARVED(x) ((x) << S_RSPQ4STARVED)
+#define F_RSPQ4STARVED V_RSPQ4STARVED(1U)
+
+#define S_RSPQ5STARVED 5
+#define V_RSPQ5STARVED(x) ((x) << S_RSPQ5STARVED)
+#define F_RSPQ5STARVED V_RSPQ5STARVED(1U)
+
+#define S_RSPQ6STARVED 6
+#define V_RSPQ6STARVED(x) ((x) << S_RSPQ6STARVED)
+#define F_RSPQ6STARVED V_RSPQ6STARVED(1U)
+
+#define S_RSPQ7STARVED 7
+#define V_RSPQ7STARVED(x) ((x) << S_RSPQ7STARVED)
+#define F_RSPQ7STARVED V_RSPQ7STARVED(1U)
+
+#define S_RSPQ0DISABLED 8
+#define V_RSPQ0DISABLED(x) ((x) << S_RSPQ0DISABLED)
+#define F_RSPQ0DISABLED V_RSPQ0DISABLED(1U)
+
+#define S_RSPQ1DISABLED 9
+#define V_RSPQ1DISABLED(x) ((x) << S_RSPQ1DISABLED)
+#define F_RSPQ1DISABLED V_RSPQ1DISABLED(1U)
+
+#define S_RSPQ2DISABLED 10
+#define V_RSPQ2DISABLED(x) ((x) << S_RSPQ2DISABLED)
+#define F_RSPQ2DISABLED V_RSPQ2DISABLED(1U)
+
+#define S_RSPQ3DISABLED 11
+#define V_RSPQ3DISABLED(x) ((x) << S_RSPQ3DISABLED)
+#define F_RSPQ3DISABLED V_RSPQ3DISABLED(1U)
+
+#define S_RSPQ4DISABLED 12
+#define V_RSPQ4DISABLED(x) ((x) << S_RSPQ4DISABLED)
+#define F_RSPQ4DISABLED V_RSPQ4DISABLED(1U)
+
+#define S_RSPQ5DISABLED 13
+#define V_RSPQ5DISABLED(x) ((x) << S_RSPQ5DISABLED)
+#define F_RSPQ5DISABLED V_RSPQ5DISABLED(1U)
+
+#define S_RSPQ6DISABLED 14
+#define V_RSPQ6DISABLED(x) ((x) << S_RSPQ6DISABLED)
+#define F_RSPQ6DISABLED V_RSPQ6DISABLED(1U)
+
+#define S_RSPQ7DISABLED 15
+#define V_RSPQ7DISABLED(x) ((x) << S_RSPQ7DISABLED)
+#define F_RSPQ7DISABLED V_RSPQ7DISABLED(1U)
+
+#define S_FL0EMPTY 16
+#define V_FL0EMPTY(x) ((x) << S_FL0EMPTY)
+#define F_FL0EMPTY V_FL0EMPTY(1U)
+
+#define S_FL1EMPTY 17
+#define V_FL1EMPTY(x) ((x) << S_FL1EMPTY)
+#define F_FL1EMPTY V_FL1EMPTY(1U)
+
+#define S_FL2EMPTY 18
+#define V_FL2EMPTY(x) ((x) << S_FL2EMPTY)
+#define F_FL2EMPTY V_FL2EMPTY(1U)
+
+#define S_FL3EMPTY 19
+#define V_FL3EMPTY(x) ((x) << S_FL3EMPTY)
+#define F_FL3EMPTY V_FL3EMPTY(1U)
+
+#define S_FL4EMPTY 20
+#define V_FL4EMPTY(x) ((x) << S_FL4EMPTY)
+#define F_FL4EMPTY V_FL4EMPTY(1U)
+
+#define S_FL5EMPTY 21
+#define V_FL5EMPTY(x) ((x) << S_FL5EMPTY)
+#define F_FL5EMPTY V_FL5EMPTY(1U)
+
+#define S_FL6EMPTY 22
+#define V_FL6EMPTY(x) ((x) << S_FL6EMPTY)
+#define F_FL6EMPTY V_FL6EMPTY(1U)
+
+#define S_FL7EMPTY 23
+#define V_FL7EMPTY(x) ((x) << S_FL7EMPTY)
+#define F_FL7EMPTY V_FL7EMPTY(1U)
+
+#define S_FL8EMPTY 24
+#define V_FL8EMPTY(x) ((x) << S_FL8EMPTY)
+#define F_FL8EMPTY V_FL8EMPTY(1U)
+
+#define S_FL9EMPTY 25
+#define V_FL9EMPTY(x) ((x) << S_FL9EMPTY)
+#define F_FL9EMPTY V_FL9EMPTY(1U)
+
+#define S_FL10EMPTY 26
+#define V_FL10EMPTY(x) ((x) << S_FL10EMPTY)
+#define F_FL10EMPTY V_FL10EMPTY(1U)
+
+#define S_FL11EMPTY 27
+#define V_FL11EMPTY(x) ((x) << S_FL11EMPTY)
+#define F_FL11EMPTY V_FL11EMPTY(1U)
+
+#define S_FL12EMPTY 28
+#define V_FL12EMPTY(x) ((x) << S_FL12EMPTY)
+#define F_FL12EMPTY V_FL12EMPTY(1U)
+
+#define S_FL13EMPTY 29
+#define V_FL13EMPTY(x) ((x) << S_FL13EMPTY)
+#define F_FL13EMPTY V_FL13EMPTY(1U)
+
+#define S_FL14EMPTY 30
+#define V_FL14EMPTY(x) ((x) << S_FL14EMPTY)
+#define F_FL14EMPTY V_FL14EMPTY(1U)
+
+#define S_FL15EMPTY 31
+#define V_FL15EMPTY(x) ((x) << S_FL15EMPTY)
+#define F_FL15EMPTY V_FL15EMPTY(1U)
+
+#define A_SG_EGR_PRI_CNT 0x50
+
+#define S_EGRPRICNT 0
+#define M_EGRPRICNT 0x1f
+#define V_EGRPRICNT(x) ((x) << S_EGRPRICNT)
+#define G_EGRPRICNT(x) (((x) >> S_EGRPRICNT) & M_EGRPRICNT)
+
+#define S_EGRERROPCODE 24
+#define M_EGRERROPCODE 0xff
+#define V_EGRERROPCODE(x) ((x) << S_EGRERROPCODE)
+#define G_EGRERROPCODE(x) (((x) >> S_EGRERROPCODE) & M_EGRERROPCODE)
+
+#define S_EGRHIOPCODE 16
+#define M_EGRHIOPCODE 0xff
+#define V_EGRHIOPCODE(x) ((x) << S_EGRHIOPCODE)
+#define G_EGRHIOPCODE(x) (((x) >> S_EGRHIOPCODE) & M_EGRHIOPCODE)
+
+#define S_EGRLOOPCODE 8
+#define M_EGRLOOPCODE 0xff
+#define V_EGRLOOPCODE(x) ((x) << S_EGRLOOPCODE)
+#define G_EGRLOOPCODE(x) (((x) >> S_EGRLOOPCODE) & M_EGRLOOPCODE)
+
+#define A_SG_EGR_RCQ_DRB_THRSH 0x54
+
+#define S_HIRCQDRBTHRSH 16
+#define M_HIRCQDRBTHRSH 0x7ff
+#define V_HIRCQDRBTHRSH(x) ((x) << S_HIRCQDRBTHRSH)
+#define G_HIRCQDRBTHRSH(x) (((x) >> S_HIRCQDRBTHRSH) & M_HIRCQDRBTHRSH)
+
+#define S_LORCQDRBTHRSH 0
+#define M_LORCQDRBTHRSH 0x7ff
+#define V_LORCQDRBTHRSH(x) ((x) << S_LORCQDRBTHRSH)
+#define G_LORCQDRBTHRSH(x) (((x) >> S_LORCQDRBTHRSH) & M_LORCQDRBTHRSH)
+
+#define A_SG_EGR_CNTX_BADDR 0x58
+
+#define S_EGRCNTXBADDR 5
+#define M_EGRCNTXBADDR 0x7ffffff
+#define V_EGRCNTXBADDR(x) ((x) << S_EGRCNTXBADDR)
+#define G_EGRCNTXBADDR(x) (((x) >> S_EGRCNTXBADDR) & M_EGRCNTXBADDR)
+
+#define A_SG_INT_CAUSE 0x5c
+
+#define S_HICTLDRBDROPERR 13
+#define V_HICTLDRBDROPERR(x) ((x) << S_HICTLDRBDROPERR)
+#define F_HICTLDRBDROPERR V_HICTLDRBDROPERR(1U)
+
+#define S_LOCTLDRBDROPERR 12
+#define V_LOCTLDRBDROPERR(x) ((x) << S_LOCTLDRBDROPERR)
+#define F_LOCTLDRBDROPERR V_LOCTLDRBDROPERR(1U)
+
+#define S_HIPIODRBDROPERR 11
+#define V_HIPIODRBDROPERR(x) ((x) << S_HIPIODRBDROPERR)
+#define F_HIPIODRBDROPERR V_HIPIODRBDROPERR(1U)
+
+#define S_LOPIODRBDROPERR 10
+#define V_LOPIODRBDROPERR(x) ((x) << S_LOPIODRBDROPERR)
+#define F_LOPIODRBDROPERR V_LOPIODRBDROPERR(1U)
+
+#define S_HICRDTUNDFLOWERR 9
+#define V_HICRDTUNDFLOWERR(x) ((x) << S_HICRDTUNDFLOWERR)
+#define F_HICRDTUNDFLOWERR V_HICRDTUNDFLOWERR(1U)
+
+#define S_LOCRDTUNDFLOWERR 8
+#define V_LOCRDTUNDFLOWERR(x) ((x) << S_LOCRDTUNDFLOWERR)
+#define F_LOCRDTUNDFLOWERR V_LOCRDTUNDFLOWERR(1U)
+
+#define S_HIPRIORITYDBFULL 7
+#define V_HIPRIORITYDBFULL(x) ((x) << S_HIPRIORITYDBFULL)
+#define F_HIPRIORITYDBFULL V_HIPRIORITYDBFULL(1U)
+
+#define S_HIPRIORITYDBEMPTY 6
+#define V_HIPRIORITYDBEMPTY(x) ((x) << S_HIPRIORITYDBEMPTY)
+#define F_HIPRIORITYDBEMPTY V_HIPRIORITYDBEMPTY(1U)
+
+#define S_LOPRIORITYDBFULL 5
+#define V_LOPRIORITYDBFULL(x) ((x) << S_LOPRIORITYDBFULL)
+#define F_LOPRIORITYDBFULL V_LOPRIORITYDBFULL(1U)
+
+#define S_LOPRIORITYDBEMPTY 4
+#define V_LOPRIORITYDBEMPTY(x) ((x) << S_LOPRIORITYDBEMPTY)
+#define F_LOPRIORITYDBEMPTY V_LOPRIORITYDBEMPTY(1U)
+
+#define S_RSPQDISABLED 3
+#define V_RSPQDISABLED(x) ((x) << S_RSPQDISABLED)
+#define F_RSPQDISABLED V_RSPQDISABLED(1U)
+
+#define S_RSPQCREDITOVERFOW 2
+#define V_RSPQCREDITOVERFOW(x) ((x) << S_RSPQCREDITOVERFOW)
+#define F_RSPQCREDITOVERFOW V_RSPQCREDITOVERFOW(1U)
+
+#define S_FLEMPTY 1
+#define V_FLEMPTY(x) ((x) << S_FLEMPTY)
+#define F_FLEMPTY V_FLEMPTY(1U)
+
+#define S_RSPQSTARVE 0
+#define V_RSPQSTARVE(x) ((x) << S_RSPQSTARVE)
+#define F_RSPQSTARVE V_RSPQSTARVE(1U)
+
+#define A_SG_INT_ENABLE 0x60
+#define A_SG_CMDQ_CREDIT_TH 0x64
+
+#define S_TIMEOUT 8
+#define M_TIMEOUT 0xffffff
+#define V_TIMEOUT(x) ((x) << S_TIMEOUT)
+#define G_TIMEOUT(x) (((x) >> S_TIMEOUT) & M_TIMEOUT)
+
+#define S_THRESHOLD 0
+#define M_THRESHOLD 0xff
+#define V_THRESHOLD(x) ((x) << S_THRESHOLD)
+#define G_THRESHOLD(x) (((x) >> S_THRESHOLD) & M_THRESHOLD)
+
+#define A_SG_TIMER_TICK 0x68
+#define A_SG_CQ_CONTEXT_BADDR 0x6c
+
+#define S_BASEADDR 5
+#define M_BASEADDR 0x7ffffff
+#define V_BASEADDR(x) ((x) << S_BASEADDR)
+#define G_BASEADDR(x) (((x) >> S_BASEADDR) & M_BASEADDR)
+
+#define A_SG_OCO_BASE 0x70
+
+#define S_BASE1 16
+#define M_BASE1 0xffff
+#define V_BASE1(x) ((x) << S_BASE1)
+#define G_BASE1(x) (((x) >> S_BASE1) & M_BASE1)
+
+#define S_BASE0 0
+#define M_BASE0 0xffff
+#define V_BASE0(x) ((x) << S_BASE0)
+#define G_BASE0(x) (((x) >> S_BASE0) & M_BASE0)
+
+#define A_SG_DRB_PRI_THRESH 0x74
+
+#define S_DRBPRITHRSH 0
+#define M_DRBPRITHRSH 0xffff
+#define V_DRBPRITHRSH(x) ((x) << S_DRBPRITHRSH)
+#define G_DRBPRITHRSH(x) (((x) >> S_DRBPRITHRSH) & M_DRBPRITHRSH)
+
+#define A_SG_DEBUG_INDEX 0x78
+#define A_SG_DEBUG_DATA 0x7c
+
+/* registers for module PCIX1 */
+#define PCIX1_BASE_ADDR 0x80
+
+#define A_PCIX_INT_ENABLE 0x80
+
+#define S_MSIXPARERR 22
+#define M_MSIXPARERR 0x7
+#define V_MSIXPARERR(x) ((x) << S_MSIXPARERR)
+#define G_MSIXPARERR(x) (((x) >> S_MSIXPARERR) & M_MSIXPARERR)
+
+#define S_CFPARERR 18
+#define M_CFPARERR 0xf
+#define V_CFPARERR(x) ((x) << S_CFPARERR)
+#define G_CFPARERR(x) (((x) >> S_CFPARERR) & M_CFPARERR)
+
+#define S_RFPARERR 14
+#define M_RFPARERR 0xf
+#define V_RFPARERR(x) ((x) << S_RFPARERR)
+#define G_RFPARERR(x) (((x) >> S_RFPARERR) & M_RFPARERR)
+
+#define S_WFPARERR 12
+#define M_WFPARERR 0x3
+#define V_WFPARERR(x) ((x) << S_WFPARERR)
+#define G_WFPARERR(x) (((x) >> S_WFPARERR) & M_WFPARERR)
+
+#define S_PIOPARERR 11
+#define V_PIOPARERR(x) ((x) << S_PIOPARERR)
+#define F_PIOPARERR V_PIOPARERR(1U)
+
+#define S_DETUNCECCERR 10
+#define V_DETUNCECCERR(x) ((x) << S_DETUNCECCERR)
+#define F_DETUNCECCERR V_DETUNCECCERR(1U)
+
+#define S_DETCORECCERR 9
+#define V_DETCORECCERR(x) ((x) << S_DETCORECCERR)
+#define F_DETCORECCERR V_DETCORECCERR(1U)
+
+#define S_RCVSPLCMPERR 8
+#define V_RCVSPLCMPERR(x) ((x) << S_RCVSPLCMPERR)
+#define F_RCVSPLCMPERR V_RCVSPLCMPERR(1U)
+
+#define S_UNXSPLCMP 7
+#define V_UNXSPLCMP(x) ((x) << S_UNXSPLCMP)
+#define F_UNXSPLCMP V_UNXSPLCMP(1U)
+
+#define S_SPLCMPDIS 6
+#define V_SPLCMPDIS(x) ((x) << S_SPLCMPDIS)
+#define F_SPLCMPDIS V_SPLCMPDIS(1U)
+
+#define S_DETPARERR 5
+#define V_DETPARERR(x) ((x) << S_DETPARERR)
+#define F_DETPARERR V_DETPARERR(1U)
+
+#define S_SIGSYSERR 4
+#define V_SIGSYSERR(x) ((x) << S_SIGSYSERR)
+#define F_SIGSYSERR V_SIGSYSERR(1U)
+
+#define S_RCVMSTABT 3
+#define V_RCVMSTABT(x) ((x) << S_RCVMSTABT)
+#define F_RCVMSTABT V_RCVMSTABT(1U)
+
+#define S_RCVTARABT 2
+#define V_RCVTARABT(x) ((x) << S_RCVTARABT)
+#define F_RCVTARABT V_RCVTARABT(1U)
+
+#define S_SIGTARABT 1
+#define V_SIGTARABT(x) ((x) << S_SIGTARABT)
+#define F_SIGTARABT V_SIGTARABT(1U)
+
+#define S_MSTDETPARERR 0
+#define V_MSTDETPARERR(x) ((x) << S_MSTDETPARERR)
+#define F_MSTDETPARERR V_MSTDETPARERR(1U)
+
+#define A_PCIX_INT_CAUSE 0x84
+#define A_PCIX_CFG 0x88
+
+#define S_CLIDECEN 18
+#define V_CLIDECEN(x) ((x) << S_CLIDECEN)
+#define F_CLIDECEN V_CLIDECEN(1U)
+
+#define S_LATTMRDIS 17
+#define V_LATTMRDIS(x) ((x) << S_LATTMRDIS)
+#define F_LATTMRDIS V_LATTMRDIS(1U)
+
+#define S_LOWPWREN 16
+#define V_LOWPWREN(x) ((x) << S_LOWPWREN)
+#define F_LOWPWREN V_LOWPWREN(1U)
+
+#define S_ASYNCINTVEC 11
+#define M_ASYNCINTVEC 0x1f
+#define V_ASYNCINTVEC(x) ((x) << S_ASYNCINTVEC)
+#define G_ASYNCINTVEC(x) (((x) >> S_ASYNCINTVEC) & M_ASYNCINTVEC)
+
+#define S_MAXSPLTRNC 8
+#define M_MAXSPLTRNC 0x7
+#define V_MAXSPLTRNC(x) ((x) << S_MAXSPLTRNC)
+#define G_MAXSPLTRNC(x) (((x) >> S_MAXSPLTRNC) & M_MAXSPLTRNC)
+
+#define S_MAXSPLTRNR 5
+#define M_MAXSPLTRNR 0x7
+#define V_MAXSPLTRNR(x) ((x) << S_MAXSPLTRNR)
+#define G_MAXSPLTRNR(x) (((x) >> S_MAXSPLTRNR) & M_MAXSPLTRNR)
+
+#define S_MAXWRBYTECNT 3
+#define M_MAXWRBYTECNT 0x3
+#define V_MAXWRBYTECNT(x) ((x) << S_MAXWRBYTECNT)
+#define G_MAXWRBYTECNT(x) (((x) >> S_MAXWRBYTECNT) & M_MAXWRBYTECNT)
+
+#define S_WRREQATOMICEN 2
+#define V_WRREQATOMICEN(x) ((x) << S_WRREQATOMICEN)
+#define F_WRREQATOMICEN V_WRREQATOMICEN(1U)
+
+#define S_RSTWRMMODE 1
+#define V_RSTWRMMODE(x) ((x) << S_RSTWRMMODE)
+#define F_RSTWRMMODE V_RSTWRMMODE(1U)
+
+#define S_PIOACK64EN 0
+#define V_PIOACK64EN(x) ((x) << S_PIOACK64EN)
+#define F_PIOACK64EN V_PIOACK64EN(1U)
+
+#define A_PCIX_MODE 0x8c
+
+#define S_PCLKRANGE 6
+#define M_PCLKRANGE 0x3
+#define V_PCLKRANGE(x) ((x) << S_PCLKRANGE)
+#define G_PCLKRANGE(x) (((x) >> S_PCLKRANGE) & M_PCLKRANGE)
+
+#define S_PCIXINITPAT 2
+#define M_PCIXINITPAT 0xf
+#define V_PCIXINITPAT(x) ((x) << S_PCIXINITPAT)
+#define G_PCIXINITPAT(x) (((x) >> S_PCIXINITPAT) & M_PCIXINITPAT)
+
+#define S_66MHZ 1
+#define V_66MHZ(x) ((x) << S_66MHZ)
+#define F_66MHZ V_66MHZ(1U)
+
+#define S_64BIT 0
+#define V_64BIT(x) ((x) << S_64BIT)
+#define F_64BIT V_64BIT(1U)
+
+#define A_PCIX_CAL 0x90
+
+#define S_BUSY 31
+#define V_BUSY(x) ((x) << S_BUSY)
+#define F_BUSY V_BUSY(1U)
+
+#define S_PERCALDIV 22
+#define M_PERCALDIV 0xff
+#define V_PERCALDIV(x) ((x) << S_PERCALDIV)
+#define G_PERCALDIV(x) (((x) >> S_PERCALDIV) & M_PERCALDIV)
+
+#define S_PERCALEN 21
+#define V_PERCALEN(x) ((x) << S_PERCALEN)
+#define F_PERCALEN V_PERCALEN(1U)
+
+#define S_SGLCALEN 20
+#define V_SGLCALEN(x) ((x) << S_SGLCALEN)
+#define F_SGLCALEN V_SGLCALEN(1U)
+
+#define S_ZINUPDMODE 19
+#define V_ZINUPDMODE(x) ((x) << S_ZINUPDMODE)
+#define F_ZINUPDMODE V_ZINUPDMODE(1U)
+
+#define S_ZINSEL 18
+#define V_ZINSEL(x) ((x) << S_ZINSEL)
+#define F_ZINSEL V_ZINSEL(1U)
+
+#define S_ZPDMAN 15
+#define M_ZPDMAN 0x7
+#define V_ZPDMAN(x) ((x) << S_ZPDMAN)
+#define G_ZPDMAN(x) (((x) >> S_ZPDMAN) & M_ZPDMAN)
+
+#define S_ZPUMAN 12
+#define M_ZPUMAN 0x7
+#define V_ZPUMAN(x) ((x) << S_ZPUMAN)
+#define G_ZPUMAN(x) (((x) >> S_ZPUMAN) & M_ZPUMAN)
+
+#define S_ZPDOUT 9
+#define M_ZPDOUT 0x7
+#define V_ZPDOUT(x) ((x) << S_ZPDOUT)
+#define G_ZPDOUT(x) (((x) >> S_ZPDOUT) & M_ZPDOUT)
+
+#define S_ZPUOUT 6
+#define M_ZPUOUT 0x7
+#define V_ZPUOUT(x) ((x) << S_ZPUOUT)
+#define G_ZPUOUT(x) (((x) >> S_ZPUOUT) & M_ZPUOUT)
+
+#define S_ZPDIN 3
+#define M_ZPDIN 0x7
+#define V_ZPDIN(x) ((x) << S_ZPDIN)
+#define G_ZPDIN(x) (((x) >> S_ZPDIN) & M_ZPDIN)
+
+#define S_ZPUIN 0
+#define M_ZPUIN 0x7
+#define V_ZPUIN(x) ((x) << S_ZPUIN)
+#define G_ZPUIN(x) (((x) >> S_ZPUIN) & M_ZPUIN)
+
+#define A_PCIX_WOL 0x94
+
+#define S_WAKEUP1 3
+#define V_WAKEUP1(x) ((x) << S_WAKEUP1)
+#define F_WAKEUP1 V_WAKEUP1(1U)
+
+#define S_WAKEUP0 2
+#define V_WAKEUP0(x) ((x) << S_WAKEUP0)
+#define F_WAKEUP0 V_WAKEUP0(1U)
+
+#define S_SLEEPMODE1 1
+#define V_SLEEPMODE1(x) ((x) << S_SLEEPMODE1)
+#define F_SLEEPMODE1 V_SLEEPMODE1(1U)
+
+#define S_SLEEPMODE0 0
+#define V_SLEEPMODE0(x) ((x) << S_SLEEPMODE0)
+#define F_SLEEPMODE0 V_SLEEPMODE0(1U)
+
+/* registers for module PCIE0 */
+#define PCIE0_BASE_ADDR 0x80
+
+#define A_PCIE_INT_ENABLE 0x80
+
+#define S_BISTERR 15
+#define M_BISTERR 0xff
+#define V_BISTERR(x) ((x) << S_BISTERR)
+#define G_BISTERR(x) (((x) >> S_BISTERR) & M_BISTERR)
+
+#define S_PCIE_MSIXPARERR 12
+#define M_PCIE_MSIXPARERR 0x7
+#define V_PCIE_MSIXPARERR(x) ((x) << S_PCIE_MSIXPARERR)
+#define G_PCIE_MSIXPARERR(x) (((x) >> S_PCIE_MSIXPARERR) & M_PCIE_MSIXPARERR)
+
+#define S_PCIE_CFPARERR 11
+#define V_PCIE_CFPARERR(x) ((x) << S_PCIE_CFPARERR)
+#define F_PCIE_CFPARERR V_PCIE_CFPARERR(1U)
+
+#define S_PCIE_RFPARERR 10
+#define V_PCIE_RFPARERR(x) ((x) << S_PCIE_RFPARERR)
+#define F_PCIE_RFPARERR V_PCIE_RFPARERR(1U)
+
+#define S_PCIE_WFPARERR 9
+#define V_PCIE_WFPARERR(x) ((x) << S_PCIE_WFPARERR)
+#define F_PCIE_WFPARERR V_PCIE_WFPARERR(1U)
+
+#define S_PCIE_PIOPARERR 8
+#define V_PCIE_PIOPARERR(x) ((x) << S_PCIE_PIOPARERR)
+#define F_PCIE_PIOPARERR V_PCIE_PIOPARERR(1U)
+
+#define S_UNXSPLCPLERRC 7
+#define V_UNXSPLCPLERRC(x) ((x) << S_UNXSPLCPLERRC)
+#define F_UNXSPLCPLERRC V_UNXSPLCPLERRC(1U)
+
+#define S_UNXSPLCPLERRR 6
+#define V_UNXSPLCPLERRR(x) ((x) << S_UNXSPLCPLERRR)
+#define F_UNXSPLCPLERRR V_UNXSPLCPLERRR(1U)
+
+#define S_VPDADDRCHNG 5
+#define V_VPDADDRCHNG(x) ((x) << S_VPDADDRCHNG)
+#define F_VPDADDRCHNG V_VPDADDRCHNG(1U)
+
+#define S_BUSMSTREN 4
+#define V_BUSMSTREN(x) ((x) << S_BUSMSTREN)
+#define F_BUSMSTREN V_BUSMSTREN(1U)
+
+#define S_PMSTCHNG 3
+#define V_PMSTCHNG(x) ((x) << S_PMSTCHNG)
+#define F_PMSTCHNG V_PMSTCHNG(1U)
+
+#define S_PEXMSG 2
+#define V_PEXMSG(x) ((x) << S_PEXMSG)
+#define F_PEXMSG V_PEXMSG(1U)
+
+#define S_ZEROLENRD 1
+#define V_ZEROLENRD(x) ((x) << S_ZEROLENRD)
+#define F_ZEROLENRD V_ZEROLENRD(1U)
+
+#define S_PEXERR 0
+#define V_PEXERR(x) ((x) << S_PEXERR)
+#define F_PEXERR V_PEXERR(1U)
+
+#define A_PCIE_INT_CAUSE 0x84
+#define A_PCIE_CFG 0x88
+
+#define S_ENABLELINKDWNDRST 21
+#define V_ENABLELINKDWNDRST(x) ((x) << S_ENABLELINKDWNDRST)
+#define F_ENABLELINKDWNDRST V_ENABLELINKDWNDRST(1U)
+
+#define S_ENABLELINKDOWNRST 20
+#define V_ENABLELINKDOWNRST(x) ((x) << S_ENABLELINKDOWNRST)
+#define F_ENABLELINKDOWNRST V_ENABLELINKDOWNRST(1U)
+
+#define S_ENABLEHOTRST 19
+#define V_ENABLEHOTRST(x) ((x) << S_ENABLEHOTRST)
+#define F_ENABLEHOTRST V_ENABLEHOTRST(1U)
+
+#define S_INIWAITFORGNT 18
+#define V_INIWAITFORGNT(x) ((x) << S_INIWAITFORGNT)
+#define F_INIWAITFORGNT V_INIWAITFORGNT(1U)
+
+#define S_INIBEDIS 17
+#define V_INIBEDIS(x) ((x) << S_INIBEDIS)
+#define F_INIBEDIS V_INIBEDIS(1U)
+
+#define S_PCIE_CLIDECEN 16
+#define V_PCIE_CLIDECEN(x) ((x) << S_PCIE_CLIDECEN)
+#define F_PCIE_CLIDECEN V_PCIE_CLIDECEN(1U)
+
+#define S_PCIE_MAXSPLTRNC 7
+#define M_PCIE_MAXSPLTRNC 0xf
+#define V_PCIE_MAXSPLTRNC(x) ((x) << S_PCIE_MAXSPLTRNC)
+#define G_PCIE_MAXSPLTRNC(x) (((x) >> S_PCIE_MAXSPLTRNC) & M_PCIE_MAXSPLTRNC)
+
+#define S_PCIE_MAXSPLTRNR 1
+#define M_PCIE_MAXSPLTRNR 0x3f
+#define V_PCIE_MAXSPLTRNR(x) ((x) << S_PCIE_MAXSPLTRNR)
+#define G_PCIE_MAXSPLTRNR(x) (((x) >> S_PCIE_MAXSPLTRNR) & M_PCIE_MAXSPLTRNR)
+
+#define S_CRSTWRMMODE 0
+#define V_CRSTWRMMODE(x) ((x) << S_CRSTWRMMODE)
+#define F_CRSTWRMMODE V_CRSTWRMMODE(1U)
+
+#define S_PRIORITYINTA 23
+#define V_PRIORITYINTA(x) ((x) << S_PRIORITYINTA)
+#define F_PRIORITYINTA V_PRIORITYINTA(1U)
+
+#define S_INIFULLPKT 22
+#define V_INIFULLPKT(x) ((x) << S_INIFULLPKT)
+#define F_INIFULLPKT V_INIFULLPKT(1U)
+
+#define A_PCIE_MODE 0x8c
+
+#define S_LNKCNTLSTATE 2
+#define M_LNKCNTLSTATE 0xff
+#define V_LNKCNTLSTATE(x) ((x) << S_LNKCNTLSTATE)
+#define G_LNKCNTLSTATE(x) (((x) >> S_LNKCNTLSTATE) & M_LNKCNTLSTATE)
+
+#define S_VC0UP 1
+#define V_VC0UP(x) ((x) << S_VC0UP)
+#define F_VC0UP V_VC0UP(1U)
+
+#define S_LNKINITIAL 0
+#define V_LNKINITIAL(x) ((x) << S_LNKINITIAL)
+#define F_LNKINITIAL V_LNKINITIAL(1U)
+
+#define S_NUMFSTTRNSEQRX 10
+#define M_NUMFSTTRNSEQRX 0xff
+#define V_NUMFSTTRNSEQRX(x) ((x) << S_NUMFSTTRNSEQRX)
+#define G_NUMFSTTRNSEQRX(x) (((x) >> S_NUMFSTTRNSEQRX) & M_NUMFSTTRNSEQRX)
+
+#define A_PCIE_CAL 0x90
+
+#define S_CALBUSY 31
+#define V_CALBUSY(x) ((x) << S_CALBUSY)
+#define F_CALBUSY V_CALBUSY(1U)
+
+#define S_CALFAULT 30
+#define V_CALFAULT(x) ((x) << S_CALFAULT)
+#define F_CALFAULT V_CALFAULT(1U)
+
+#define S_PCIE_ZINSEL 11
+#define V_PCIE_ZINSEL(x) ((x) << S_PCIE_ZINSEL)
+#define F_PCIE_ZINSEL V_PCIE_ZINSEL(1U)
+
+#define S_ZMAN 8
+#define M_ZMAN 0x7
+#define V_ZMAN(x) ((x) << S_ZMAN)
+#define G_ZMAN(x) (((x) >> S_ZMAN) & M_ZMAN)
+
+#define S_ZOUT 3
+#define M_ZOUT 0x1f
+#define V_ZOUT(x) ((x) << S_ZOUT)
+#define G_ZOUT(x) (((x) >> S_ZOUT) & M_ZOUT)
+
+#define S_ZIN 0
+#define M_ZIN 0x7
+#define V_ZIN(x) ((x) << S_ZIN)
+#define G_ZIN(x) (((x) >> S_ZIN) & M_ZIN)
+
+#define A_PCIE_WOL 0x94
+#define A_PCIE_PEX_CTRL0 0x98
+
+#define S_NUMFSTTRNSEQ 22
+#define M_NUMFSTTRNSEQ 0xff
+#define V_NUMFSTTRNSEQ(x) ((x) << S_NUMFSTTRNSEQ)
+#define G_NUMFSTTRNSEQ(x) (((x) >> S_NUMFSTTRNSEQ) & M_NUMFSTTRNSEQ)
+
+#define S_REPLAYLMT 2
+#define M_REPLAYLMT 0xfffff
+#define V_REPLAYLMT(x) ((x) << S_REPLAYLMT)
+#define G_REPLAYLMT(x) (((x) >> S_REPLAYLMT) & M_REPLAYLMT)
+
+#define S_TXPNDCHKEN 1
+#define V_TXPNDCHKEN(x) ((x) << S_TXPNDCHKEN)
+#define F_TXPNDCHKEN V_TXPNDCHKEN(1U)
+
+#define S_CPLPNDCHKEN 0
+#define V_CPLPNDCHKEN(x) ((x) << S_CPLPNDCHKEN)
+#define F_CPLPNDCHKEN V_CPLPNDCHKEN(1U)
+
+#define S_CPLTIMEOUTRETRY 31
+#define V_CPLTIMEOUTRETRY(x) ((x) << S_CPLTIMEOUTRETRY)
+#define F_CPLTIMEOUTRETRY V_CPLTIMEOUTRETRY(1U)
+
+#define S_STRICTTSMN 30
+#define V_STRICTTSMN(x) ((x) << S_STRICTTSMN)
+#define F_STRICTTSMN V_STRICTTSMN(1U)
+
+#define A_PCIE_PEX_CTRL1 0x9c
+
+#define S_T3A_DLLPTIMEOUTLMT 11
+#define M_T3A_DLLPTIMEOUTLMT 0xfffff
+#define V_T3A_DLLPTIMEOUTLMT(x) ((x) << S_T3A_DLLPTIMEOUTLMT)
+#define G_T3A_DLLPTIMEOUTLMT(x) (((x) >> S_T3A_DLLPTIMEOUTLMT) & M_T3A_DLLPTIMEOUTLMT)
+
+#define S_T3A_ACKLAT 0
+#define M_T3A_ACKLAT 0x7ff
+#define V_T3A_ACKLAT(x) ((x) << S_T3A_ACKLAT)
+#define G_T3A_ACKLAT(x) (((x) >> S_T3A_ACKLAT) & M_T3A_ACKLAT)
+
+#define S_RXPHYERREN 31
+#define V_RXPHYERREN(x) ((x) << S_RXPHYERREN)
+#define F_RXPHYERREN V_RXPHYERREN(1U)
+
+#define S_DLLPTIMEOUTLMT 13
+#define M_DLLPTIMEOUTLMT 0x3ffff
+#define V_DLLPTIMEOUTLMT(x) ((x) << S_DLLPTIMEOUTLMT)
+#define G_DLLPTIMEOUTLMT(x) (((x) >> S_DLLPTIMEOUTLMT) & M_DLLPTIMEOUTLMT)
+
+#define S_ACKLAT 0
+#define M_ACKLAT 0x1fff
+#define V_ACKLAT(x) ((x) << S_ACKLAT)
+#define G_ACKLAT(x) (((x) >> S_ACKLAT) & M_ACKLAT)
+
+#define A_PCIE_PEX_CTRL2 0xa0
+
+#define S_PMEXITL1REQ 29
+#define V_PMEXITL1REQ(x) ((x) << S_PMEXITL1REQ)
+#define F_PMEXITL1REQ V_PMEXITL1REQ(1U)
+
+#define S_PMTXIDLE 28
+#define V_PMTXIDLE(x) ((x) << S_PMTXIDLE)
+#define F_PMTXIDLE V_PMTXIDLE(1U)
+
+#define S_PCIMODELOOP 27
+#define V_PCIMODELOOP(x) ((x) << S_PCIMODELOOP)
+#define F_PCIMODELOOP V_PCIMODELOOP(1U)
+
+#define S_L1ASPMTXRXL0STIME 15
+#define M_L1ASPMTXRXL0STIME 0xfff
+#define V_L1ASPMTXRXL0STIME(x) ((x) << S_L1ASPMTXRXL0STIME)
+#define G_L1ASPMTXRXL0STIME(x) (((x) >> S_L1ASPMTXRXL0STIME) & M_L1ASPMTXRXL0STIME)
+
+#define S_L0SIDLETIME 4
+#define M_L0SIDLETIME 0x7ff
+#define V_L0SIDLETIME(x) ((x) << S_L0SIDLETIME)
+#define G_L0SIDLETIME(x) (((x) >> S_L0SIDLETIME) & M_L0SIDLETIME)
+
+#define S_ENTERL23 3
+#define V_ENTERL23(x) ((x) << S_ENTERL23)
+#define F_ENTERL23 V_ENTERL23(1U)
+
+#define S_ENTERL1ASPMEN 2
+#define V_ENTERL1ASPMEN(x) ((x) << S_ENTERL1ASPMEN)
+#define F_ENTERL1ASPMEN V_ENTERL1ASPMEN(1U)
+
+#define S_ENTERL1EN 1
+#define V_ENTERL1EN(x) ((x) << S_ENTERL1EN)
+#define F_ENTERL1EN V_ENTERL1EN(1U)
+
+#define S_ENTERL0SEN 0
+#define V_ENTERL0SEN(x) ((x) << S_ENTERL0SEN)
+#define F_ENTERL0SEN V_ENTERL0SEN(1U)
+
+#define S_LNKCNTLDETDIR 30
+#define V_LNKCNTLDETDIR(x) ((x) << S_LNKCNTLDETDIR)
+#define F_LNKCNTLDETDIR V_LNKCNTLDETDIR(1U)
+
+#define S_ENTERL1REN 29
+#define V_ENTERL1REN(x) ((x) << S_ENTERL1REN)
+#define F_ENTERL1REN V_ENTERL1REN(1U)
+
+#define A_PCIE_PEX_ERR 0xa4
+
+#define S_FLOWCTLOFLOWERR 17
+#define V_FLOWCTLOFLOWERR(x) ((x) << S_FLOWCTLOFLOWERR)
+#define F_FLOWCTLOFLOWERR V_FLOWCTLOFLOWERR(1U)
+
+#define S_REPLAYTIMEOUT 16
+#define V_REPLAYTIMEOUT(x) ((x) << S_REPLAYTIMEOUT)
+#define F_REPLAYTIMEOUT V_REPLAYTIMEOUT(1U)
+
+#define S_REPLAYROLLOVER 15
+#define V_REPLAYROLLOVER(x) ((x) << S_REPLAYROLLOVER)
+#define F_REPLAYROLLOVER V_REPLAYROLLOVER(1U)
+
+#define S_BADDLLP 14
+#define V_BADDLLP(x) ((x) << S_BADDLLP)
+#define F_BADDLLP V_BADDLLP(1U)
+
+#define S_DLLPERR 13
+#define V_DLLPERR(x) ((x) << S_DLLPERR)
+#define F_DLLPERR V_DLLPERR(1U)
+
+#define S_FLOWCTLPROTERR 12
+#define V_FLOWCTLPROTERR(x) ((x) << S_FLOWCTLPROTERR)
+#define F_FLOWCTLPROTERR V_FLOWCTLPROTERR(1U)
+
+#define S_CPLTIMEOUT 11
+#define V_CPLTIMEOUT(x) ((x) << S_CPLTIMEOUT)
+#define F_CPLTIMEOUT V_CPLTIMEOUT(1U)
+
+#define S_PHYRCVERR 10
+#define V_PHYRCVERR(x) ((x) << S_PHYRCVERR)
+#define F_PHYRCVERR V_PHYRCVERR(1U)
+
+#define S_DISTLP 9
+#define V_DISTLP(x) ((x) << S_DISTLP)
+#define F_DISTLP V_DISTLP(1U)
+
+#define S_BADECRC 8
+#define V_BADECRC(x) ((x) << S_BADECRC)
+#define F_BADECRC V_BADECRC(1U)
+
+#define S_BADTLP 7
+#define V_BADTLP(x) ((x) << S_BADTLP)
+#define F_BADTLP V_BADTLP(1U)
+
+#define S_MALTLP 6
+#define V_MALTLP(x) ((x) << S_MALTLP)
+#define F_MALTLP V_MALTLP(1U)
+
+#define S_UNXCPL 5
+#define V_UNXCPL(x) ((x) << S_UNXCPL)
+#define F_UNXCPL V_UNXCPL(1U)
+
+#define S_UNSREQ 4
+#define V_UNSREQ(x) ((x) << S_UNSREQ)
+#define F_UNSREQ V_UNSREQ(1U)
+
+#define S_PSNREQ 3
+#define V_PSNREQ(x) ((x) << S_PSNREQ)
+#define F_PSNREQ V_PSNREQ(1U)
+
+#define S_UNSCPL 2
+#define V_UNSCPL(x) ((x) << S_UNSCPL)
+#define F_UNSCPL V_UNSCPL(1U)
+
+#define S_CPLABT 1
+#define V_CPLABT(x) ((x) << S_CPLABT)
+#define F_CPLABT V_CPLABT(1U)
+
+#define S_PSNCPL 0
+#define V_PSNCPL(x) ((x) << S_PSNCPL)
+#define F_PSNCPL V_PSNCPL(1U)
+
+#define S_CPLTIMEOUTID 18
+#define M_CPLTIMEOUTID 0x7f
+#define V_CPLTIMEOUTID(x) ((x) << S_CPLTIMEOUTID)
+#define G_CPLTIMEOUTID(x) (((x) >> S_CPLTIMEOUTID) & M_CPLTIMEOUTID)
+
+#define A_PCIE_PIPE_CTRL 0xa8
+
+#define S_RECDETUSEC 19
+#define M_RECDETUSEC 0x7
+#define V_RECDETUSEC(x) ((x) << S_RECDETUSEC)
+#define G_RECDETUSEC(x) (((x) >> S_RECDETUSEC) & M_RECDETUSEC)
+
+#define S_PLLLCKCYC 6
+#define M_PLLLCKCYC 0x1fff
+#define V_PLLLCKCYC(x) ((x) << S_PLLLCKCYC)
+#define G_PLLLCKCYC(x) (((x) >> S_PLLLCKCYC) & M_PLLLCKCYC)
+
+#define S_ELECIDLEDETCYC 3
+#define M_ELECIDLEDETCYC 0x7
+#define V_ELECIDLEDETCYC(x) ((x) << S_ELECIDLEDETCYC)
+#define G_ELECIDLEDETCYC(x) (((x) >> S_ELECIDLEDETCYC) & M_ELECIDLEDETCYC)
+
+#define S_USECDRLOS 2
+#define V_USECDRLOS(x) ((x) << S_USECDRLOS)
+#define F_USECDRLOS V_USECDRLOS(1U)
+
+#define S_PCLKREQINP1 1
+#define V_PCLKREQINP1(x) ((x) << S_PCLKREQINP1)
+#define F_PCLKREQINP1 V_PCLKREQINP1(1U)
+
+#define S_PCLKOFFINP1 0
+#define V_PCLKOFFINP1(x) ((x) << S_PCLKOFFINP1)
+#define F_PCLKOFFINP1 V_PCLKOFFINP1(1U)
+
+#define S_PMASEL 3
+#define V_PMASEL(x) ((x) << S_PMASEL)
+#define F_PMASEL V_PMASEL(1U)
+
+#define S_LANE 0
+#define M_LANE 0x7
+#define V_LANE(x) ((x) << S_LANE)
+#define G_LANE(x) (((x) >> S_LANE) & M_LANE)
+
+#define A_PCIE_SERDES_CTRL 0xac
+
+#define S_MANMODE 31
+#define V_MANMODE(x) ((x) << S_MANMODE)
+#define F_MANMODE V_MANMODE(1U)
+
+#define S_MANLPBKEN 29
+#define M_MANLPBKEN 0x3
+#define V_MANLPBKEN(x) ((x) << S_MANLPBKEN)
+#define G_MANLPBKEN(x) (((x) >> S_MANLPBKEN) & M_MANLPBKEN)
+
+#define S_MANTXRECDETEN 28
+#define V_MANTXRECDETEN(x) ((x) << S_MANTXRECDETEN)
+#define F_MANTXRECDETEN V_MANTXRECDETEN(1U)
+
+#define S_MANTXBEACON 27
+#define V_MANTXBEACON(x) ((x) << S_MANTXBEACON)
+#define F_MANTXBEACON V_MANTXBEACON(1U)
+
+#define S_MANTXEI 26
+#define V_MANTXEI(x) ((x) << S_MANTXEI)
+#define F_MANTXEI V_MANTXEI(1U)
+
+#define S_MANRXPOLARITY 25
+#define V_MANRXPOLARITY(x) ((x) << S_MANRXPOLARITY)
+#define F_MANRXPOLARITY V_MANRXPOLARITY(1U)
+
+#define S_MANTXRST 24
+#define V_MANTXRST(x) ((x) << S_MANTXRST)
+#define F_MANTXRST V_MANTXRST(1U)
+
+#define S_MANRXRST 23
+#define V_MANRXRST(x) ((x) << S_MANRXRST)
+#define F_MANRXRST V_MANRXRST(1U)
+
+#define S_MANTXEN 22
+#define V_MANTXEN(x) ((x) << S_MANTXEN)
+#define F_MANTXEN V_MANTXEN(1U)
+
+#define S_MANRXEN 21
+#define V_MANRXEN(x) ((x) << S_MANRXEN)
+#define F_MANRXEN V_MANRXEN(1U)
+
+#define S_MANEN 20
+#define V_MANEN(x) ((x) << S_MANEN)
+#define F_MANEN V_MANEN(1U)
+
+#define S_PCIE_CMURANGE 17
+#define M_PCIE_CMURANGE 0x7
+#define V_PCIE_CMURANGE(x) ((x) << S_PCIE_CMURANGE)
+#define G_PCIE_CMURANGE(x) (((x) >> S_PCIE_CMURANGE) & M_PCIE_CMURANGE)
+
+#define S_PCIE_BGENB 16
+#define V_PCIE_BGENB(x) ((x) << S_PCIE_BGENB)
+#define F_PCIE_BGENB V_PCIE_BGENB(1U)
+
+#define S_PCIE_ENSKPDROP 15
+#define V_PCIE_ENSKPDROP(x) ((x) << S_PCIE_ENSKPDROP)
+#define F_PCIE_ENSKPDROP V_PCIE_ENSKPDROP(1U)
+
+#define S_PCIE_ENCOMMA 14
+#define V_PCIE_ENCOMMA(x) ((x) << S_PCIE_ENCOMMA)
+#define F_PCIE_ENCOMMA V_PCIE_ENCOMMA(1U)
+
+#define S_PCIE_EN8B10B 13
+#define V_PCIE_EN8B10B(x) ((x) << S_PCIE_EN8B10B)
+#define F_PCIE_EN8B10B V_PCIE_EN8B10B(1U)
+
+#define S_PCIE_ENELBUF 12
+#define V_PCIE_ENELBUF(x) ((x) << S_PCIE_ENELBUF)
+#define F_PCIE_ENELBUF V_PCIE_ENELBUF(1U)
+
+#define S_PCIE_GAIN 7
+#define M_PCIE_GAIN 0x1f
+#define V_PCIE_GAIN(x) ((x) << S_PCIE_GAIN)
+#define G_PCIE_GAIN(x) (((x) >> S_PCIE_GAIN) & M_PCIE_GAIN)
+
+#define S_PCIE_BANDGAP 3
+#define M_PCIE_BANDGAP 0xf
+#define V_PCIE_BANDGAP(x) ((x) << S_PCIE_BANDGAP)
+#define G_PCIE_BANDGAP(x) (((x) >> S_PCIE_BANDGAP) & M_PCIE_BANDGAP)
+
+#define S_RXCOMADJ 2
+#define V_RXCOMADJ(x) ((x) << S_RXCOMADJ)
+#define F_RXCOMADJ V_RXCOMADJ(1U)
+
+#define S_PREEMPH 0
+#define M_PREEMPH 0x3
+#define V_PREEMPH(x) ((x) << S_PREEMPH)
+#define G_PREEMPH(x) (((x) >> S_PREEMPH) & M_PREEMPH)
+
+#define A_PCIE_SERDES_QUAD_CTRL0 0xac
+
+#define S_TESTSIG 10
+#define M_TESTSIG 0x7ffff
+#define V_TESTSIG(x) ((x) << S_TESTSIG)
+#define G_TESTSIG(x) (((x) >> S_TESTSIG) & M_TESTSIG)
+
+#define S_OFFSET 2
+#define M_OFFSET 0xff
+#define V_OFFSET(x) ((x) << S_OFFSET)
+#define G_OFFSET(x) (((x) >> S_OFFSET) & M_OFFSET)
+
+#define S_OFFSETEN 1
+#define V_OFFSETEN(x) ((x) << S_OFFSETEN)
+#define F_OFFSETEN V_OFFSETEN(1U)
+
+#define S_IDDQB 0
+#define V_IDDQB(x) ((x) << S_IDDQB)
+#define F_IDDQB V_IDDQB(1U)
+
+#define A_PCIE_SERDES_STATUS0 0xb0
+
+#define S_RXERRLANE7 21
+#define M_RXERRLANE7 0x7
+#define V_RXERRLANE7(x) ((x) << S_RXERRLANE7)
+#define G_RXERRLANE7(x) (((x) >> S_RXERRLANE7) & M_RXERRLANE7)
+
+#define S_RXERRLANE6 18
+#define M_RXERRLANE6 0x7
+#define V_RXERRLANE6(x) ((x) << S_RXERRLANE6)
+#define G_RXERRLANE6(x) (((x) >> S_RXERRLANE6) & M_RXERRLANE6)
+
+#define S_RXERRLANE5 15
+#define M_RXERRLANE5 0x7
+#define V_RXERRLANE5(x) ((x) << S_RXERRLANE5)
+#define G_RXERRLANE5(x) (((x) >> S_RXERRLANE5) & M_RXERRLANE5)
+
+#define S_RXERRLANE4 12
+#define M_RXERRLANE4 0x7
+#define V_RXERRLANE4(x) ((x) << S_RXERRLANE4)
+#define G_RXERRLANE4(x) (((x) >> S_RXERRLANE4) & M_RXERRLANE4)
+
+#define S_PCIE_RXERRLANE3 9
+#define M_PCIE_RXERRLANE3 0x7
+#define V_PCIE_RXERRLANE3(x) ((x) << S_PCIE_RXERRLANE3)
+#define G_PCIE_RXERRLANE3(x) (((x) >> S_PCIE_RXERRLANE3) & M_PCIE_RXERRLANE3)
+
+#define S_PCIE_RXERRLANE2 6
+#define M_PCIE_RXERRLANE2 0x7
+#define V_PCIE_RXERRLANE2(x) ((x) << S_PCIE_RXERRLANE2)
+#define G_PCIE_RXERRLANE2(x) (((x) >> S_PCIE_RXERRLANE2) & M_PCIE_RXERRLANE2)
+
+#define S_PCIE_RXERRLANE1 3
+#define M_PCIE_RXERRLANE1 0x7
+#define V_PCIE_RXERRLANE1(x) ((x) << S_PCIE_RXERRLANE1)
+#define G_PCIE_RXERRLANE1(x) (((x) >> S_PCIE_RXERRLANE1) & M_PCIE_RXERRLANE1)
+
+#define S_PCIE_RXERRLANE0 0
+#define M_PCIE_RXERRLANE0 0x7
+#define V_PCIE_RXERRLANE0(x) ((x) << S_PCIE_RXERRLANE0)
+#define G_PCIE_RXERRLANE0(x) (((x) >> S_PCIE_RXERRLANE0) & M_PCIE_RXERRLANE0)
+
+#define A_PCIE_SERDES_QUAD_CTRL1 0xb0
+
+#define S_FASTINIT 28
+#define V_FASTINIT(x) ((x) << S_FASTINIT)
+#define F_FASTINIT V_FASTINIT(1U)
+
+#define S_CTCDISABLE 27
+#define V_CTCDISABLE(x) ((x) << S_CTCDISABLE)
+#define F_CTCDISABLE V_CTCDISABLE(1U)
+
+#define S_MANRESETPLL 26
+#define V_MANRESETPLL(x) ((x) << S_MANRESETPLL)
+#define F_MANRESETPLL V_MANRESETPLL(1U)
+
+#define S_MANL2PWRDN 25
+#define V_MANL2PWRDN(x) ((x) << S_MANL2PWRDN)
+#define F_MANL2PWRDN V_MANL2PWRDN(1U)
+
+#define S_MANQUADEN 24
+#define V_MANQUADEN(x) ((x) << S_MANQUADEN)
+#define F_MANQUADEN V_MANQUADEN(1U)
+
+#define S_RXEQCTL 22
+#define M_RXEQCTL 0x3
+#define V_RXEQCTL(x) ((x) << S_RXEQCTL)
+#define G_RXEQCTL(x) (((x) >> S_RXEQCTL) & M_RXEQCTL)
+
+#define S_HIVMODE 21
+#define V_HIVMODE(x) ((x) << S_HIVMODE)
+#define F_HIVMODE V_HIVMODE(1U)
+
+#define S_REFSEL 19
+#define M_REFSEL 0x3
+#define V_REFSEL(x) ((x) << S_REFSEL)
+#define G_REFSEL(x) (((x) >> S_REFSEL) & M_REFSEL)
+
+#define S_RXTERMADJ 17
+#define M_RXTERMADJ 0x3
+#define V_RXTERMADJ(x) ((x) << S_RXTERMADJ)
+#define G_RXTERMADJ(x) (((x) >> S_RXTERMADJ) & M_RXTERMADJ)
+
+#define S_TXTERMADJ 15
+#define M_TXTERMADJ 0x3
+#define V_TXTERMADJ(x) ((x) << S_TXTERMADJ)
+#define G_TXTERMADJ(x) (((x) >> S_TXTERMADJ) & M_TXTERMADJ)
+
+#define S_DEQ 11
+#define M_DEQ 0xf
+#define V_DEQ(x) ((x) << S_DEQ)
+#define G_DEQ(x) (((x) >> S_DEQ) & M_DEQ)
+
+#define S_DTX 7
+#define M_DTX 0xf
+#define V_DTX(x) ((x) << S_DTX)
+#define G_DTX(x) (((x) >> S_DTX) & M_DTX)
+
+#define S_LODRV 6
+#define V_LODRV(x) ((x) << S_LODRV)
+#define F_LODRV V_LODRV(1U)
+
+#define S_HIDRV 5
+#define V_HIDRV(x) ((x) << S_HIDRV)
+#define F_HIDRV V_HIDRV(1U)
+
+#define S_INTPARRESET 4
+#define V_INTPARRESET(x) ((x) << S_INTPARRESET)
+#define F_INTPARRESET V_INTPARRESET(1U)
+
+#define S_INTPARLPBK 3
+#define V_INTPARLPBK(x) ((x) << S_INTPARLPBK)
+#define F_INTPARLPBK V_INTPARLPBK(1U)
+
+#define S_INTSERLPBKWDRV 2
+#define V_INTSERLPBKWDRV(x) ((x) << S_INTSERLPBKWDRV)
+#define F_INTSERLPBKWDRV V_INTSERLPBKWDRV(1U)
+
+#define S_PW 1
+#define V_PW(x) ((x) << S_PW)
+#define F_PW V_PW(1U)
+
+#define S_PCLKDETECT 0
+#define V_PCLKDETECT(x) ((x) << S_PCLKDETECT)
+#define F_PCLKDETECT V_PCLKDETECT(1U)
+
+#define A_PCIE_SERDES_STATUS1 0xb4
+
+#define S_CMULOCK 31
+#define V_CMULOCK(x) ((x) << S_CMULOCK)
+#define F_CMULOCK V_CMULOCK(1U)
+
+#define S_RXKLOCKLANE7 23
+#define V_RXKLOCKLANE7(x) ((x) << S_RXKLOCKLANE7)
+#define F_RXKLOCKLANE7 V_RXKLOCKLANE7(1U)
+
+#define S_RXKLOCKLANE6 22
+#define V_RXKLOCKLANE6(x) ((x) << S_RXKLOCKLANE6)
+#define F_RXKLOCKLANE6 V_RXKLOCKLANE6(1U)
+
+#define S_RXKLOCKLANE5 21
+#define V_RXKLOCKLANE5(x) ((x) << S_RXKLOCKLANE5)
+#define F_RXKLOCKLANE5 V_RXKLOCKLANE5(1U)
+
+#define S_RXKLOCKLANE4 20
+#define V_RXKLOCKLANE4(x) ((x) << S_RXKLOCKLANE4)
+#define F_RXKLOCKLANE4 V_RXKLOCKLANE4(1U)
+
+#define S_PCIE_RXKLOCKLANE3 19
+#define V_PCIE_RXKLOCKLANE3(x) ((x) << S_PCIE_RXKLOCKLANE3)
+#define F_PCIE_RXKLOCKLANE3 V_PCIE_RXKLOCKLANE3(1U)
+
+#define S_PCIE_RXKLOCKLANE2 18
+#define V_PCIE_RXKLOCKLANE2(x) ((x) << S_PCIE_RXKLOCKLANE2)
+#define F_PCIE_RXKLOCKLANE2 V_PCIE_RXKLOCKLANE2(1U)
+
+#define S_PCIE_RXKLOCKLANE1 17
+#define V_PCIE_RXKLOCKLANE1(x) ((x) << S_PCIE_RXKLOCKLANE1)
+#define F_PCIE_RXKLOCKLANE1 V_PCIE_RXKLOCKLANE1(1U)
+
+#define S_PCIE_RXKLOCKLANE0 16
+#define V_PCIE_RXKLOCKLANE0(x) ((x) << S_PCIE_RXKLOCKLANE0)
+#define F_PCIE_RXKLOCKLANE0 V_PCIE_RXKLOCKLANE0(1U)
+
+#define S_RXUFLOWLANE7 15
+#define V_RXUFLOWLANE7(x) ((x) << S_RXUFLOWLANE7)
+#define F_RXUFLOWLANE7 V_RXUFLOWLANE7(1U)
+
+#define S_RXUFLOWLANE6 14
+#define V_RXUFLOWLANE6(x) ((x) << S_RXUFLOWLANE6)
+#define F_RXUFLOWLANE6 V_RXUFLOWLANE6(1U)
+
+#define S_RXUFLOWLANE5 13
+#define V_RXUFLOWLANE5(x) ((x) << S_RXUFLOWLANE5)
+#define F_RXUFLOWLANE5 V_RXUFLOWLANE5(1U)
+
+#define S_RXUFLOWLANE4 12
+#define V_RXUFLOWLANE4(x) ((x) << S_RXUFLOWLANE4)
+#define F_RXUFLOWLANE4 V_RXUFLOWLANE4(1U)
+
+#define S_PCIE_RXUFLOWLANE3 11
+#define V_PCIE_RXUFLOWLANE3(x) ((x) << S_PCIE_RXUFLOWLANE3)
+#define F_PCIE_RXUFLOWLANE3 V_PCIE_RXUFLOWLANE3(1U)
+
+#define S_PCIE_RXUFLOWLANE2 10
+#define V_PCIE_RXUFLOWLANE2(x) ((x) << S_PCIE_RXUFLOWLANE2)
+#define F_PCIE_RXUFLOWLANE2 V_PCIE_RXUFLOWLANE2(1U)
+
+#define S_PCIE_RXUFLOWLANE1 9
+#define V_PCIE_RXUFLOWLANE1(x) ((x) << S_PCIE_RXUFLOWLANE1)
+#define F_PCIE_RXUFLOWLANE1 V_PCIE_RXUFLOWLANE1(1U)
+
+#define S_PCIE_RXUFLOWLANE0 8
+#define V_PCIE_RXUFLOWLANE0(x) ((x) << S_PCIE_RXUFLOWLANE0)
+#define F_PCIE_RXUFLOWLANE0 V_PCIE_RXUFLOWLANE0(1U)
+
+#define S_RXOFLOWLANE7 7
+#define V_RXOFLOWLANE7(x) ((x) << S_RXOFLOWLANE7)
+#define F_RXOFLOWLANE7 V_RXOFLOWLANE7(1U)
+
+#define S_RXOFLOWLANE6 6
+#define V_RXOFLOWLANE6(x) ((x) << S_RXOFLOWLANE6)
+#define F_RXOFLOWLANE6 V_RXOFLOWLANE6(1U)
+
+#define S_RXOFLOWLANE5 5
+#define V_RXOFLOWLANE5(x) ((x) << S_RXOFLOWLANE5)
+#define F_RXOFLOWLANE5 V_RXOFLOWLANE5(1U)
+
+#define S_RXOFLOWLANE4 4
+#define V_RXOFLOWLANE4(x) ((x) << S_RXOFLOWLANE4)
+#define F_RXOFLOWLANE4 V_RXOFLOWLANE4(1U)
+
+#define S_PCIE_RXOFLOWLANE3 3
+#define V_PCIE_RXOFLOWLANE3(x) ((x) << S_PCIE_RXOFLOWLANE3)
+#define F_PCIE_RXOFLOWLANE3 V_PCIE_RXOFLOWLANE3(1U)
+
+#define S_PCIE_RXOFLOWLANE2 2
+#define V_PCIE_RXOFLOWLANE2(x) ((x) << S_PCIE_RXOFLOWLANE2)
+#define F_PCIE_RXOFLOWLANE2 V_PCIE_RXOFLOWLANE2(1U)
+
+#define S_PCIE_RXOFLOWLANE1 1
+#define V_PCIE_RXOFLOWLANE1(x) ((x) << S_PCIE_RXOFLOWLANE1)
+#define F_PCIE_RXOFLOWLANE1 V_PCIE_RXOFLOWLANE1(1U)
+
+#define S_PCIE_RXOFLOWLANE0 0
+#define V_PCIE_RXOFLOWLANE0(x) ((x) << S_PCIE_RXOFLOWLANE0)
+#define F_PCIE_RXOFLOWLANE0 V_PCIE_RXOFLOWLANE0(1U)
+
+#define A_PCIE_SERDES_LANE_CTRL 0xb4
+
+#define S_EXTBISTCHKERRCLR 22
+#define V_EXTBISTCHKERRCLR(x) ((x) << S_EXTBISTCHKERRCLR)
+#define F_EXTBISTCHKERRCLR V_EXTBISTCHKERRCLR(1U)
+
+#define S_EXTBISTCHKEN 21
+#define V_EXTBISTCHKEN(x) ((x) << S_EXTBISTCHKEN)
+#define F_EXTBISTCHKEN V_EXTBISTCHKEN(1U)
+
+#define S_EXTBISTGENEN 20
+#define V_EXTBISTGENEN(x) ((x) << S_EXTBISTGENEN)
+#define F_EXTBISTGENEN V_EXTBISTGENEN(1U)
+
+#define S_EXTBISTPAT 17
+#define M_EXTBISTPAT 0x7
+#define V_EXTBISTPAT(x) ((x) << S_EXTBISTPAT)
+#define G_EXTBISTPAT(x) (((x) >> S_EXTBISTPAT) & M_EXTBISTPAT)
+
+#define S_EXTPARRESET 16
+#define V_EXTPARRESET(x) ((x) << S_EXTPARRESET)
+#define F_EXTPARRESET V_EXTPARRESET(1U)
+
+#define S_EXTPARLPBK 15
+#define V_EXTPARLPBK(x) ((x) << S_EXTPARLPBK)
+#define F_EXTPARLPBK V_EXTPARLPBK(1U)
+
+#define S_MANRXTERMEN 14
+#define V_MANRXTERMEN(x) ((x) << S_MANRXTERMEN)
+#define F_MANRXTERMEN V_MANRXTERMEN(1U)
+
+#define S_MANBEACONTXEN 13
+#define V_MANBEACONTXEN(x) ((x) << S_MANBEACONTXEN)
+#define F_MANBEACONTXEN V_MANBEACONTXEN(1U)
+
+#define S_MANRXDETECTEN 12
+#define V_MANRXDETECTEN(x) ((x) << S_MANRXDETECTEN)
+#define F_MANRXDETECTEN V_MANRXDETECTEN(1U)
+
+#define S_MANTXIDLEEN 11
+#define V_MANTXIDLEEN(x) ((x) << S_MANTXIDLEEN)
+#define F_MANTXIDLEEN V_MANTXIDLEEN(1U)
+
+#define S_MANRXIDLEEN 10
+#define V_MANRXIDLEEN(x) ((x) << S_MANRXIDLEEN)
+#define F_MANRXIDLEEN V_MANRXIDLEEN(1U)
+
+#define S_MANL1PWRDN 9
+#define V_MANL1PWRDN(x) ((x) << S_MANL1PWRDN)
+#define F_MANL1PWRDN V_MANL1PWRDN(1U)
+
+#define S_MANRESET 8
+#define V_MANRESET(x) ((x) << S_MANRESET)
+#define F_MANRESET V_MANRESET(1U)
+
+#define S_MANFMOFFSET 3
+#define M_MANFMOFFSET 0x1f
+#define V_MANFMOFFSET(x) ((x) << S_MANFMOFFSET)
+#define G_MANFMOFFSET(x) (((x) >> S_MANFMOFFSET) & M_MANFMOFFSET)
+
+#define S_MANFMOFFSETEN 2
+#define V_MANFMOFFSETEN(x) ((x) << S_MANFMOFFSETEN)
+#define F_MANFMOFFSETEN V_MANFMOFFSETEN(1U)
+
+#define S_MANLANEEN 1
+#define V_MANLANEEN(x) ((x) << S_MANLANEEN)
+#define F_MANLANEEN V_MANLANEEN(1U)
+
+#define S_INTSERLPBK 0
+#define V_INTSERLPBK(x) ((x) << S_INTSERLPBK)
+#define F_INTSERLPBK V_INTSERLPBK(1U)
+
+#define A_PCIE_SERDES_STATUS2 0xb8
+
+#define S_TXRECDETLANE7 31
+#define V_TXRECDETLANE7(x) ((x) << S_TXRECDETLANE7)
+#define F_TXRECDETLANE7 V_TXRECDETLANE7(1U)
+
+#define S_TXRECDETLANE6 30
+#define V_TXRECDETLANE6(x) ((x) << S_TXRECDETLANE6)
+#define F_TXRECDETLANE6 V_TXRECDETLANE6(1U)
+
+#define S_TXRECDETLANE5 29
+#define V_TXRECDETLANE5(x) ((x) << S_TXRECDETLANE5)
+#define F_TXRECDETLANE5 V_TXRECDETLANE5(1U)
+
+#define S_TXRECDETLANE4 28
+#define V_TXRECDETLANE4(x) ((x) << S_TXRECDETLANE4)
+#define F_TXRECDETLANE4 V_TXRECDETLANE4(1U)
+
+#define S_TXRECDETLANE3 27
+#define V_TXRECDETLANE3(x) ((x) << S_TXRECDETLANE3)
+#define F_TXRECDETLANE3 V_TXRECDETLANE3(1U)
+
+#define S_TXRECDETLANE2 26
+#define V_TXRECDETLANE2(x) ((x) << S_TXRECDETLANE2)
+#define F_TXRECDETLANE2 V_TXRECDETLANE2(1U)
+
+#define S_TXRECDETLANE1 25
+#define V_TXRECDETLANE1(x) ((x) << S_TXRECDETLANE1)
+#define F_TXRECDETLANE1 V_TXRECDETLANE1(1U)
+
+#define S_TXRECDETLANE0 24
+#define V_TXRECDETLANE0(x) ((x) << S_TXRECDETLANE0)
+#define F_TXRECDETLANE0 V_TXRECDETLANE0(1U)
+
+#define S_RXEIDLANE7 23
+#define V_RXEIDLANE7(x) ((x) << S_RXEIDLANE7)
+#define F_RXEIDLANE7 V_RXEIDLANE7(1U)
+
+#define S_RXEIDLANE6 22
+#define V_RXEIDLANE6(x) ((x) << S_RXEIDLANE6)
+#define F_RXEIDLANE6 V_RXEIDLANE6(1U)
+
+#define S_RXEIDLANE5 21
+#define V_RXEIDLANE5(x) ((x) << S_RXEIDLANE5)
+#define F_RXEIDLANE5 V_RXEIDLANE5(1U)
+
+#define S_RXEIDLANE4 20
+#define V_RXEIDLANE4(x) ((x) << S_RXEIDLANE4)
+#define F_RXEIDLANE4 V_RXEIDLANE4(1U)
+
+#define S_RXEIDLANE3 19
+#define V_RXEIDLANE3(x) ((x) << S_RXEIDLANE3)
+#define F_RXEIDLANE3 V_RXEIDLANE3(1U)
+
+#define S_RXEIDLANE2 18
+#define V_RXEIDLANE2(x) ((x) << S_RXEIDLANE2)
+#define F_RXEIDLANE2 V_RXEIDLANE2(1U)
+
+#define S_RXEIDLANE1 17
+#define V_RXEIDLANE1(x) ((x) << S_RXEIDLANE1)
+#define F_RXEIDLANE1 V_RXEIDLANE1(1U)
+
+#define S_RXEIDLANE0 16
+#define V_RXEIDLANE0(x) ((x) << S_RXEIDLANE0)
+#define F_RXEIDLANE0 V_RXEIDLANE0(1U)
+
+#define S_RXREMSKIPLANE7 15
+#define V_RXREMSKIPLANE7(x) ((x) << S_RXREMSKIPLANE7)
+#define F_RXREMSKIPLANE7 V_RXREMSKIPLANE7(1U)
+
+#define S_RXREMSKIPLANE6 14
+#define V_RXREMSKIPLANE6(x) ((x) << S_RXREMSKIPLANE6)
+#define F_RXREMSKIPLANE6 V_RXREMSKIPLANE6(1U)
+
+#define S_RXREMSKIPLANE5 13
+#define V_RXREMSKIPLANE5(x) ((x) << S_RXREMSKIPLANE5)
+#define F_RXREMSKIPLANE5 V_RXREMSKIPLANE5(1U)
+
+#define S_RXREMSKIPLANE4 12
+#define V_RXREMSKIPLANE4(x) ((x) << S_RXREMSKIPLANE4)
+#define F_RXREMSKIPLANE4 V_RXREMSKIPLANE4(1U)
+
+#define S_PCIE_RXREMSKIPLANE3 11
+#define V_PCIE_RXREMSKIPLANE3(x) ((x) << S_PCIE_RXREMSKIPLANE3)
+#define F_PCIE_RXREMSKIPLANE3 V_PCIE_RXREMSKIPLANE3(1U)
+
+#define S_PCIE_RXREMSKIPLANE2 10
+#define V_PCIE_RXREMSKIPLANE2(x) ((x) << S_PCIE_RXREMSKIPLANE2)
+#define F_PCIE_RXREMSKIPLANE2 V_PCIE_RXREMSKIPLANE2(1U)
+
+#define S_PCIE_RXREMSKIPLANE1 9
+#define V_PCIE_RXREMSKIPLANE1(x) ((x) << S_PCIE_RXREMSKIPLANE1)
+#define F_PCIE_RXREMSKIPLANE1 V_PCIE_RXREMSKIPLANE1(1U)
+
+#define S_PCIE_RXREMSKIPLANE0 8
+#define V_PCIE_RXREMSKIPLANE0(x) ((x) << S_PCIE_RXREMSKIPLANE0)
+#define F_PCIE_RXREMSKIPLANE0 V_PCIE_RXREMSKIPLANE0(1U)
+
+#define S_RXADDSKIPLANE7 7
+#define V_RXADDSKIPLANE7(x) ((x) << S_RXADDSKIPLANE7)
+#define F_RXADDSKIPLANE7 V_RXADDSKIPLANE7(1U)
+
+#define S_RXADDSKIPLANE6 6
+#define V_RXADDSKIPLANE6(x) ((x) << S_RXADDSKIPLANE6)
+#define F_RXADDSKIPLANE6 V_RXADDSKIPLANE6(1U)
+
+#define S_RXADDSKIPLANE5 5
+#define V_RXADDSKIPLANE5(x) ((x) << S_RXADDSKIPLANE5)
+#define F_RXADDSKIPLANE5 V_RXADDSKIPLANE5(1U)
+
+#define S_RXADDSKIPLANE4 4
+#define V_RXADDSKIPLANE4(x) ((x) << S_RXADDSKIPLANE4)
+#define F_RXADDSKIPLANE4 V_RXADDSKIPLANE4(1U)
+
+#define S_PCIE_RXADDSKIPLANE3 3
+#define V_PCIE_RXADDSKIPLANE3(x) ((x) << S_PCIE_RXADDSKIPLANE3)
+#define F_PCIE_RXADDSKIPLANE3 V_PCIE_RXADDSKIPLANE3(1U)
+
+#define S_PCIE_RXADDSKIPLANE2 2
+#define V_PCIE_RXADDSKIPLANE2(x) ((x) << S_PCIE_RXADDSKIPLANE2)
+#define F_PCIE_RXADDSKIPLANE2 V_PCIE_RXADDSKIPLANE2(1U)
+
+#define S_PCIE_RXADDSKIPLANE1 1
+#define V_PCIE_RXADDSKIPLANE1(x) ((x) << S_PCIE_RXADDSKIPLANE1)
+#define F_PCIE_RXADDSKIPLANE1 V_PCIE_RXADDSKIPLANE1(1U)
+
+#define S_PCIE_RXADDSKIPLANE0 0
+#define V_PCIE_RXADDSKIPLANE0(x) ((x) << S_PCIE_RXADDSKIPLANE0)
+#define F_PCIE_RXADDSKIPLANE0 V_PCIE_RXADDSKIPLANE0(1U)
+
+#define A_PCIE_SERDES_LANE_STAT 0xb8
+
+#define S_EXTBISTCHKERRCNT 8
+#define M_EXTBISTCHKERRCNT 0xffffff
+#define V_EXTBISTCHKERRCNT(x) ((x) << S_EXTBISTCHKERRCNT)
+#define G_EXTBISTCHKERRCNT(x) (((x) >> S_EXTBISTCHKERRCNT) & M_EXTBISTCHKERRCNT)
+
+#define S_EXTBISTCHKFMD 7
+#define V_EXTBISTCHKFMD(x) ((x) << S_EXTBISTCHKFMD)
+#define F_EXTBISTCHKFMD V_EXTBISTCHKFMD(1U)
+
+#define S_BEACONDETECTCHG 6
+#define V_BEACONDETECTCHG(x) ((x) << S_BEACONDETECTCHG)
+#define F_BEACONDETECTCHG V_BEACONDETECTCHG(1U)
+
+#define S_RXDETECTCHG 5
+#define V_RXDETECTCHG(x) ((x) << S_RXDETECTCHG)
+#define F_RXDETECTCHG V_RXDETECTCHG(1U)
+
+#define S_TXIDLEDETECTCHG 4
+#define V_TXIDLEDETECTCHG(x) ((x) << S_TXIDLEDETECTCHG)
+#define F_TXIDLEDETECTCHG V_TXIDLEDETECTCHG(1U)
+
+#define S_BEACONDETECT 2
+#define V_BEACONDETECT(x) ((x) << S_BEACONDETECT)
+#define F_BEACONDETECT V_BEACONDETECT(1U)
+
+#define S_RXDETECT 1
+#define V_RXDETECT(x) ((x) << S_RXDETECT)
+#define F_RXDETECT V_RXDETECT(1U)
+
+#define S_TXIDLEDETECT 0
+#define V_TXIDLEDETECT(x) ((x) << S_TXIDLEDETECT)
+#define F_TXIDLEDETECT V_TXIDLEDETECT(1U)
+
+#define A_PCIE_SERDES_BIST 0xbc
+
+#define S_PCIE_BISTDONE 24
+#define M_PCIE_BISTDONE 0xff
+#define V_PCIE_BISTDONE(x) ((x) << S_PCIE_BISTDONE)
+#define G_PCIE_BISTDONE(x) (((x) >> S_PCIE_BISTDONE) & M_PCIE_BISTDONE)
+
+#define S_PCIE_BISTCYCLETHRESH 3
+#define M_PCIE_BISTCYCLETHRESH 0xffff
+#define V_PCIE_BISTCYCLETHRESH(x) ((x) << S_PCIE_BISTCYCLETHRESH)
+#define G_PCIE_BISTCYCLETHRESH(x) (((x) >> S_PCIE_BISTCYCLETHRESH) & M_PCIE_BISTCYCLETHRESH)
+
+#define S_BISTMODE 0
+#define M_BISTMODE 0x7
+#define V_BISTMODE(x) ((x) << S_BISTMODE)
+#define G_BISTMODE(x) (((x) >> S_BISTMODE) & M_BISTMODE)
+
+/* registers for module T3DBG */
+#define T3DBG_BASE_ADDR 0xc0
+
+#define A_T3DBG_DBG0_CFG 0xc0
+
+#define S_REGSELECT 9
+#define M_REGSELECT 0xff
+#define V_REGSELECT(x) ((x) << S_REGSELECT)
+#define G_REGSELECT(x) (((x) >> S_REGSELECT) & M_REGSELECT)
+
+#define S_MODULESELECT 4
+#define M_MODULESELECT 0x1f
+#define V_MODULESELECT(x) ((x) << S_MODULESELECT)
+#define G_MODULESELECT(x) (((x) >> S_MODULESELECT) & M_MODULESELECT)
+
+#define S_CLKSELECT 0
+#define M_CLKSELECT 0xf
+#define V_CLKSELECT(x) ((x) << S_CLKSELECT)
+#define G_CLKSELECT(x) (((x) >> S_CLKSELECT) & M_CLKSELECT)
+
+#define A_T3DBG_DBG0_EN 0xc4
+
+#define S_SDRBYTE0 8
+#define V_SDRBYTE0(x) ((x) << S_SDRBYTE0)
+#define F_SDRBYTE0 V_SDRBYTE0(1U)
+
+#define S_DDREN 4
+#define V_DDREN(x) ((x) << S_DDREN)
+#define F_DDREN V_DDREN(1U)
+
+#define S_PORTEN 0
+#define V_PORTEN(x) ((x) << S_PORTEN)
+#define F_PORTEN V_PORTEN(1U)
+
+#define A_T3DBG_DBG1_CFG 0xc8
+#define A_T3DBG_DBG1_EN 0xcc
+#define A_T3DBG_GPIO_EN 0xd0
+
+#define S_GPIO11_OEN 27
+#define V_GPIO11_OEN(x) ((x) << S_GPIO11_OEN)
+#define F_GPIO11_OEN V_GPIO11_OEN(1U)
+
+#define S_GPIO10_OEN 26
+#define V_GPIO10_OEN(x) ((x) << S_GPIO10_OEN)
+#define F_GPIO10_OEN V_GPIO10_OEN(1U)
+
+#define S_GPIO9_OEN 25
+#define V_GPIO9_OEN(x) ((x) << S_GPIO9_OEN)
+#define F_GPIO9_OEN V_GPIO9_OEN(1U)
+
+#define S_GPIO8_OEN 24
+#define V_GPIO8_OEN(x) ((x) << S_GPIO8_OEN)
+#define F_GPIO8_OEN V_GPIO8_OEN(1U)
+
+#define S_GPIO7_OEN 23
+#define V_GPIO7_OEN(x) ((x) << S_GPIO7_OEN)
+#define F_GPIO7_OEN V_GPIO7_OEN(1U)
+
+#define S_GPIO6_OEN 22
+#define V_GPIO6_OEN(x) ((x) << S_GPIO6_OEN)
+#define F_GPIO6_OEN V_GPIO6_OEN(1U)
+
+#define S_GPIO5_OEN 21
+#define V_GPIO5_OEN(x) ((x) << S_GPIO5_OEN)
+#define F_GPIO5_OEN V_GPIO5_OEN(1U)
+
+#define S_GPIO4_OEN 20
+#define V_GPIO4_OEN(x) ((x) << S_GPIO4_OEN)
+#define F_GPIO4_OEN V_GPIO4_OEN(1U)
+
+#define S_GPIO3_OEN 19
+#define V_GPIO3_OEN(x) ((x) << S_GPIO3_OEN)
+#define F_GPIO3_OEN V_GPIO3_OEN(1U)
+
+#define S_GPIO2_OEN 18
+#define V_GPIO2_OEN(x) ((x) << S_GPIO2_OEN)
+#define F_GPIO2_OEN V_GPIO2_OEN(1U)
+
+#define S_GPIO1_OEN 17
+#define V_GPIO1_OEN(x) ((x) << S_GPIO1_OEN)
+#define F_GPIO1_OEN V_GPIO1_OEN(1U)
+
+#define S_GPIO0_OEN 16
+#define V_GPIO0_OEN(x) ((x) << S_GPIO0_OEN)
+#define F_GPIO0_OEN V_GPIO0_OEN(1U)
+
+#define S_GPIO11_OUT_VAL 11
+#define V_GPIO11_OUT_VAL(x) ((x) << S_GPIO11_OUT_VAL)
+#define F_GPIO11_OUT_VAL V_GPIO11_OUT_VAL(1U)
+
+#define S_GPIO10_OUT_VAL 10
+#define V_GPIO10_OUT_VAL(x) ((x) << S_GPIO10_OUT_VAL)
+#define F_GPIO10_OUT_VAL V_GPIO10_OUT_VAL(1U)
+
+#define S_GPIO9_OUT_VAL 9
+#define V_GPIO9_OUT_VAL(x) ((x) << S_GPIO9_OUT_VAL)
+#define F_GPIO9_OUT_VAL V_GPIO9_OUT_VAL(1U)
+
+#define S_GPIO8_OUT_VAL 8
+#define V_GPIO8_OUT_VAL(x) ((x) << S_GPIO8_OUT_VAL)
+#define F_GPIO8_OUT_VAL V_GPIO8_OUT_VAL(1U)
+
+#define S_GPIO7_OUT_VAL 7
+#define V_GPIO7_OUT_VAL(x) ((x) << S_GPIO7_OUT_VAL)
+#define F_GPIO7_OUT_VAL V_GPIO7_OUT_VAL(1U)
+
+#define S_GPIO6_OUT_VAL 6
+#define V_GPIO6_OUT_VAL(x) ((x) << S_GPIO6_OUT_VAL)
+#define F_GPIO6_OUT_VAL V_GPIO6_OUT_VAL(1U)
+
+#define S_GPIO5_OUT_VAL 5
+#define V_GPIO5_OUT_VAL(x) ((x) << S_GPIO5_OUT_VAL)
+#define F_GPIO5_OUT_VAL V_GPIO5_OUT_VAL(1U)
+
+#define S_GPIO4_OUT_VAL 4
+#define V_GPIO4_OUT_VAL(x) ((x) << S_GPIO4_OUT_VAL)
+#define F_GPIO4_OUT_VAL V_GPIO4_OUT_VAL(1U)
+
+#define S_GPIO3_OUT_VAL 3
+#define V_GPIO3_OUT_VAL(x) ((x) << S_GPIO3_OUT_VAL)
+#define F_GPIO3_OUT_VAL V_GPIO3_OUT_VAL(1U)
+
+#define S_GPIO2_OUT_VAL 2
+#define V_GPIO2_OUT_VAL(x) ((x) << S_GPIO2_OUT_VAL)
+#define F_GPIO2_OUT_VAL V_GPIO2_OUT_VAL(1U)
+
+#define S_GPIO1_OUT_VAL 1
+#define V_GPIO1_OUT_VAL(x) ((x) << S_GPIO1_OUT_VAL)
+#define F_GPIO1_OUT_VAL V_GPIO1_OUT_VAL(1U)
+
+#define S_GPIO0_OUT_VAL 0
+#define V_GPIO0_OUT_VAL(x) ((x) << S_GPIO0_OUT_VAL)
+#define F_GPIO0_OUT_VAL V_GPIO0_OUT_VAL(1U)
+
+#define A_T3DBG_GPIO_IN 0xd4
+
+#define S_GPIO11_IN 11
+#define V_GPIO11_IN(x) ((x) << S_GPIO11_IN)
+#define F_GPIO11_IN V_GPIO11_IN(1U)
+
+#define S_GPIO10_IN 10
+#define V_GPIO10_IN(x) ((x) << S_GPIO10_IN)
+#define F_GPIO10_IN V_GPIO10_IN(1U)
+
+#define S_GPIO9_IN 9
+#define V_GPIO9_IN(x) ((x) << S_GPIO9_IN)
+#define F_GPIO9_IN V_GPIO9_IN(1U)
+
+#define S_GPIO8_IN 8
+#define V_GPIO8_IN(x) ((x) << S_GPIO8_IN)
+#define F_GPIO8_IN V_GPIO8_IN(1U)
+
+#define S_GPIO7_IN 7
+#define V_GPIO7_IN(x) ((x) << S_GPIO7_IN)
+#define F_GPIO7_IN V_GPIO7_IN(1U)
+
+#define S_GPIO6_IN 6
+#define V_GPIO6_IN(x) ((x) << S_GPIO6_IN)
+#define F_GPIO6_IN V_GPIO6_IN(1U)
+
+#define S_GPIO5_IN 5
+#define V_GPIO5_IN(x) ((x) << S_GPIO5_IN)
+#define F_GPIO5_IN V_GPIO5_IN(1U)
+
+#define S_GPIO4_IN 4
+#define V_GPIO4_IN(x) ((x) << S_GPIO4_IN)
+#define F_GPIO4_IN V_GPIO4_IN(1U)
+
+#define S_GPIO3_IN 3
+#define V_GPIO3_IN(x) ((x) << S_GPIO3_IN)
+#define F_GPIO3_IN V_GPIO3_IN(1U)
+
+#define S_GPIO2_IN 2
+#define V_GPIO2_IN(x) ((x) << S_GPIO2_IN)
+#define F_GPIO2_IN V_GPIO2_IN(1U)
+
+#define S_GPIO1_IN 1
+#define V_GPIO1_IN(x) ((x) << S_GPIO1_IN)
+#define F_GPIO1_IN V_GPIO1_IN(1U)
+
+#define S_GPIO0_IN 0
+#define V_GPIO0_IN(x) ((x) << S_GPIO0_IN)
+#define F_GPIO0_IN V_GPIO0_IN(1U)
+
+#define S_GPIO11_CHG_DET 27
+#define V_GPIO11_CHG_DET(x) ((x) << S_GPIO11_CHG_DET)
+#define F_GPIO11_CHG_DET V_GPIO11_CHG_DET(1U)
+
+#define S_GPIO10_CHG_DET 26
+#define V_GPIO10_CHG_DET(x) ((x) << S_GPIO10_CHG_DET)
+#define F_GPIO10_CHG_DET V_GPIO10_CHG_DET(1U)
+
+#define S_GPIO9_CHG_DET 25
+#define V_GPIO9_CHG_DET(x) ((x) << S_GPIO9_CHG_DET)
+#define F_GPIO9_CHG_DET V_GPIO9_CHG_DET(1U)
+
+#define S_GPIO8_CHG_DET 24
+#define V_GPIO8_CHG_DET(x) ((x) << S_GPIO8_CHG_DET)
+#define F_GPIO8_CHG_DET V_GPIO8_CHG_DET(1U)
+
+#define S_GPIO7_CHG_DET 23
+#define V_GPIO7_CHG_DET(x) ((x) << S_GPIO7_CHG_DET)
+#define F_GPIO7_CHG_DET V_GPIO7_CHG_DET(1U)
+
+#define S_GPIO6_CHG_DET 22
+#define V_GPIO6_CHG_DET(x) ((x) << S_GPIO6_CHG_DET)
+#define F_GPIO6_CHG_DET V_GPIO6_CHG_DET(1U)
+
+#define S_GPIO5_CHG_DET 21
+#define V_GPIO5_CHG_DET(x) ((x) << S_GPIO5_CHG_DET)
+#define F_GPIO5_CHG_DET V_GPIO5_CHG_DET(1U)
+
+#define S_GPIO4_CHG_DET 20
+#define V_GPIO4_CHG_DET(x) ((x) << S_GPIO4_CHG_DET)
+#define F_GPIO4_CHG_DET V_GPIO4_CHG_DET(1U)
+
+#define S_GPIO3_CHG_DET 19
+#define V_GPIO3_CHG_DET(x) ((x) << S_GPIO3_CHG_DET)
+#define F_GPIO3_CHG_DET V_GPIO3_CHG_DET(1U)
+
+#define S_GPIO2_CHG_DET 18
+#define V_GPIO2_CHG_DET(x) ((x) << S_GPIO2_CHG_DET)
+#define F_GPIO2_CHG_DET V_GPIO2_CHG_DET(1U)
+
+#define S_GPIO1_CHG_DET 17
+#define V_GPIO1_CHG_DET(x) ((x) << S_GPIO1_CHG_DET)
+#define F_GPIO1_CHG_DET V_GPIO1_CHG_DET(1U)
+
+#define S_GPIO0_CHG_DET 16
+#define V_GPIO0_CHG_DET(x) ((x) << S_GPIO0_CHG_DET)
+#define F_GPIO0_CHG_DET V_GPIO0_CHG_DET(1U)
+
+#define A_T3DBG_INT_ENABLE 0xd8
+
+#define S_C_LOCK 21
+#define V_C_LOCK(x) ((x) << S_C_LOCK)
+#define F_C_LOCK V_C_LOCK(1U)
+
+#define S_M_LOCK 20
+#define V_M_LOCK(x) ((x) << S_M_LOCK)
+#define F_M_LOCK V_M_LOCK(1U)
+
+#define S_U_LOCK 19
+#define V_U_LOCK(x) ((x) << S_U_LOCK)
+#define F_U_LOCK V_U_LOCK(1U)
+
+#define S_R_LOCK 18
+#define V_R_LOCK(x) ((x) << S_R_LOCK)
+#define F_R_LOCK V_R_LOCK(1U)
+
+#define S_PX_LOCK 17
+#define V_PX_LOCK(x) ((x) << S_PX_LOCK)
+#define F_PX_LOCK V_PX_LOCK(1U)
+
+#define S_PE_LOCK 16
+#define V_PE_LOCK(x) ((x) << S_PE_LOCK)
+#define F_PE_LOCK V_PE_LOCK(1U)
+
+#define S_GPIO11 11
+#define V_GPIO11(x) ((x) << S_GPIO11)
+#define F_GPIO11 V_GPIO11(1U)
+
+#define S_GPIO10 10
+#define V_GPIO10(x) ((x) << S_GPIO10)
+#define F_GPIO10 V_GPIO10(1U)
+
+#define S_GPIO9 9
+#define V_GPIO9(x) ((x) << S_GPIO9)
+#define F_GPIO9 V_GPIO9(1U)
+
+#define S_GPIO8 8
+#define V_GPIO8(x) ((x) << S_GPIO8)
+#define F_GPIO8 V_GPIO8(1U)
+
+#define S_GPIO7 7
+#define V_GPIO7(x) ((x) << S_GPIO7)
+#define F_GPIO7 V_GPIO7(1U)
+
+#define S_GPIO6 6
+#define V_GPIO6(x) ((x) << S_GPIO6)
+#define F_GPIO6 V_GPIO6(1U)
+
+#define S_GPIO5 5
+#define V_GPIO5(x) ((x) << S_GPIO5)
+#define F_GPIO5 V_GPIO5(1U)
+
+#define S_GPIO4 4
+#define V_GPIO4(x) ((x) << S_GPIO4)
+#define F_GPIO4 V_GPIO4(1U)
+
+#define S_GPIO3 3
+#define V_GPIO3(x) ((x) << S_GPIO3)
+#define F_GPIO3 V_GPIO3(1U)
+
+#define S_GPIO2 2
+#define V_GPIO2(x) ((x) << S_GPIO2)
+#define F_GPIO2 V_GPIO2(1U)
+
+#define S_GPIO1 1
+#define V_GPIO1(x) ((x) << S_GPIO1)
+#define F_GPIO1 V_GPIO1(1U)
+
+#define S_GPIO0 0
+#define V_GPIO0(x) ((x) << S_GPIO0)
+#define F_GPIO0 V_GPIO0(1U)
+
+#define A_T3DBG_INT_CAUSE 0xdc
+#define A_T3DBG_DBG0_RST_VALUE 0xe0
+
+#define S_DEBUGDATA 0
+#define V_DEBUGDATA(x) ((x) << S_DEBUGDATA)
+#define F_DEBUGDATA V_DEBUGDATA(1U)
+
+#define A_T3DBG_PLL_OCLK_PAD_EN 0xe4
+
+#define S_PCIE_OCLK_EN 20
+#define V_PCIE_OCLK_EN(x) ((x) << S_PCIE_OCLK_EN)
+#define F_PCIE_OCLK_EN V_PCIE_OCLK_EN(1U)
+
+#define S_PCIX_OCLK_EN 16
+#define V_PCIX_OCLK_EN(x) ((x) << S_PCIX_OCLK_EN)
+#define F_PCIX_OCLK_EN V_PCIX_OCLK_EN(1U)
+
+#define S_U_OCLK_EN 12
+#define V_U_OCLK_EN(x) ((x) << S_U_OCLK_EN)
+#define F_U_OCLK_EN V_U_OCLK_EN(1U)
+
+#define S_R_OCLK_EN 8
+#define V_R_OCLK_EN(x) ((x) << S_R_OCLK_EN)
+#define F_R_OCLK_EN V_R_OCLK_EN(1U)
+
+#define S_M_OCLK_EN 4
+#define V_M_OCLK_EN(x) ((x) << S_M_OCLK_EN)
+#define F_M_OCLK_EN V_M_OCLK_EN(1U)
+
+#define S_C_OCLK_EN 0
+#define V_C_OCLK_EN(x) ((x) << S_C_OCLK_EN)
+#define F_C_OCLK_EN V_C_OCLK_EN(1U)
+
+#define S_PCLKTREE_DBG_EN 17
+#define V_PCLKTREE_DBG_EN(x) ((x) << S_PCLKTREE_DBG_EN)
+#define F_PCLKTREE_DBG_EN V_PCLKTREE_DBG_EN(1U)
+
+#define A_T3DBG_PLL_LOCK 0xe8
+
+#define S_PCIE_LOCK 20
+#define V_PCIE_LOCK(x) ((x) << S_PCIE_LOCK)
+#define F_PCIE_LOCK V_PCIE_LOCK(1U)
+
+#define S_PCIX_LOCK 16
+#define V_PCIX_LOCK(x) ((x) << S_PCIX_LOCK)
+#define F_PCIX_LOCK V_PCIX_LOCK(1U)
+
+#define S_PLL_U_LOCK 12
+#define V_PLL_U_LOCK(x) ((x) << S_PLL_U_LOCK)
+#define F_PLL_U_LOCK V_PLL_U_LOCK(1U)
+
+#define S_PLL_R_LOCK 8
+#define V_PLL_R_LOCK(x) ((x) << S_PLL_R_LOCK)
+#define F_PLL_R_LOCK V_PLL_R_LOCK(1U)
+
+#define S_PLL_M_LOCK 4
+#define V_PLL_M_LOCK(x) ((x) << S_PLL_M_LOCK)
+#define F_PLL_M_LOCK V_PLL_M_LOCK(1U)
+
+#define S_PLL_C_LOCK 0
+#define V_PLL_C_LOCK(x) ((x) << S_PLL_C_LOCK)
+#define F_PLL_C_LOCK V_PLL_C_LOCK(1U)
+
+#define A_T3DBG_SERDES_RBC_CFG 0xec
+
+#define S_X_RBC_LANE_SEL 16
+#define V_X_RBC_LANE_SEL(x) ((x) << S_X_RBC_LANE_SEL)
+#define F_X_RBC_LANE_SEL V_X_RBC_LANE_SEL(1U)
+
+#define S_X_RBC_DBG_EN 12
+#define V_X_RBC_DBG_EN(x) ((x) << S_X_RBC_DBG_EN)
+#define F_X_RBC_DBG_EN V_X_RBC_DBG_EN(1U)
+
+#define S_X_SERDES_SEL 8
+#define V_X_SERDES_SEL(x) ((x) << S_X_SERDES_SEL)
+#define F_X_SERDES_SEL V_X_SERDES_SEL(1U)
+
+#define S_PE_RBC_LANE_SEL 4
+#define V_PE_RBC_LANE_SEL(x) ((x) << S_PE_RBC_LANE_SEL)
+#define F_PE_RBC_LANE_SEL V_PE_RBC_LANE_SEL(1U)
+
+#define S_PE_RBC_DBG_EN 0
+#define V_PE_RBC_DBG_EN(x) ((x) << S_PE_RBC_DBG_EN)
+#define F_PE_RBC_DBG_EN V_PE_RBC_DBG_EN(1U)
+
+#define A_T3DBG_GPIO_ACT_LOW 0xf0
+
+#define S_C_LOCK_ACT_LOW 21
+#define V_C_LOCK_ACT_LOW(x) ((x) << S_C_LOCK_ACT_LOW)
+#define F_C_LOCK_ACT_LOW V_C_LOCK_ACT_LOW(1U)
+
+#define S_M_LOCK_ACT_LOW 20
+#define V_M_LOCK_ACT_LOW(x) ((x) << S_M_LOCK_ACT_LOW)
+#define F_M_LOCK_ACT_LOW V_M_LOCK_ACT_LOW(1U)
+
+#define S_U_LOCK_ACT_LOW 19
+#define V_U_LOCK_ACT_LOW(x) ((x) << S_U_LOCK_ACT_LOW)
+#define F_U_LOCK_ACT_LOW V_U_LOCK_ACT_LOW(1U)
+
+#define S_R_LOCK_ACT_LOW 18
+#define V_R_LOCK_ACT_LOW(x) ((x) << S_R_LOCK_ACT_LOW)
+#define F_R_LOCK_ACT_LOW V_R_LOCK_ACT_LOW(1U)
+
+#define S_PX_LOCK_ACT_LOW 17
+#define V_PX_LOCK_ACT_LOW(x) ((x) << S_PX_LOCK_ACT_LOW)
+#define F_PX_LOCK_ACT_LOW V_PX_LOCK_ACT_LOW(1U)
+
+#define S_PE_LOCK_ACT_LOW 16
+#define V_PE_LOCK_ACT_LOW(x) ((x) << S_PE_LOCK_ACT_LOW)
+#define F_PE_LOCK_ACT_LOW V_PE_LOCK_ACT_LOW(1U)
+
+#define S_GPIO11_ACT_LOW 11
+#define V_GPIO11_ACT_LOW(x) ((x) << S_GPIO11_ACT_LOW)
+#define F_GPIO11_ACT_LOW V_GPIO11_ACT_LOW(1U)
+
+#define S_GPIO10_ACT_LOW 10
+#define V_GPIO10_ACT_LOW(x) ((x) << S_GPIO10_ACT_LOW)
+#define F_GPIO10_ACT_LOW V_GPIO10_ACT_LOW(1U)
+
+#define S_GPIO9_ACT_LOW 9
+#define V_GPIO9_ACT_LOW(x) ((x) << S_GPIO9_ACT_LOW)
+#define F_GPIO9_ACT_LOW V_GPIO9_ACT_LOW(1U)
+
+#define S_GPIO8_ACT_LOW 8
+#define V_GPIO8_ACT_LOW(x) ((x) << S_GPIO8_ACT_LOW)
+#define F_GPIO8_ACT_LOW V_GPIO8_ACT_LOW(1U)
+
+#define S_GPIO7_ACT_LOW 7
+#define V_GPIO7_ACT_LOW(x) ((x) << S_GPIO7_ACT_LOW)
+#define F_GPIO7_ACT_LOW V_GPIO7_ACT_LOW(1U)
+
+#define S_GPIO6_ACT_LOW 6
+#define V_GPIO6_ACT_LOW(x) ((x) << S_GPIO6_ACT_LOW)
+#define F_GPIO6_ACT_LOW V_GPIO6_ACT_LOW(1U)
+
+#define S_GPIO5_ACT_LOW 5
+#define V_GPIO5_ACT_LOW(x) ((x) << S_GPIO5_ACT_LOW)
+#define F_GPIO5_ACT_LOW V_GPIO5_ACT_LOW(1U)
+
+#define S_GPIO4_ACT_LOW 4
+#define V_GPIO4_ACT_LOW(x) ((x) << S_GPIO4_ACT_LOW)
+#define F_GPIO4_ACT_LOW V_GPIO4_ACT_LOW(1U)
+
+#define S_GPIO3_ACT_LOW 3
+#define V_GPIO3_ACT_LOW(x) ((x) << S_GPIO3_ACT_LOW)
+#define F_GPIO3_ACT_LOW V_GPIO3_ACT_LOW(1U)
+
+#define S_GPIO2_ACT_LOW 2
+#define V_GPIO2_ACT_LOW(x) ((x) << S_GPIO2_ACT_LOW)
+#define F_GPIO2_ACT_LOW V_GPIO2_ACT_LOW(1U)
+
+#define S_GPIO1_ACT_LOW 1
+#define V_GPIO1_ACT_LOW(x) ((x) << S_GPIO1_ACT_LOW)
+#define F_GPIO1_ACT_LOW V_GPIO1_ACT_LOW(1U)
+
+#define S_GPIO0_ACT_LOW 0
+#define V_GPIO0_ACT_LOW(x) ((x) << S_GPIO0_ACT_LOW)
+#define F_GPIO0_ACT_LOW V_GPIO0_ACT_LOW(1U)
+
+#define A_T3DBG_PMON_CFG 0xf4
+
+#define S_PMON_DONE 29
+#define V_PMON_DONE(x) ((x) << S_PMON_DONE)
+#define F_PMON_DONE V_PMON_DONE(1U)
+
+#define S_PMON_FAIL 28
+#define V_PMON_FAIL(x) ((x) << S_PMON_FAIL)
+#define F_PMON_FAIL V_PMON_FAIL(1U)
+
+#define S_PMON_FDEL_AUTO 22
+#define V_PMON_FDEL_AUTO(x) ((x) << S_PMON_FDEL_AUTO)
+#define F_PMON_FDEL_AUTO V_PMON_FDEL_AUTO(1U)
+
+#define S_PMON_CDEL_AUTO 16
+#define V_PMON_CDEL_AUTO(x) ((x) << S_PMON_CDEL_AUTO)
+#define F_PMON_CDEL_AUTO V_PMON_CDEL_AUTO(1U)
+
+#define S_PMON_FDEL_MANUAL 10
+#define V_PMON_FDEL_MANUAL(x) ((x) << S_PMON_FDEL_MANUAL)
+#define F_PMON_FDEL_MANUAL V_PMON_FDEL_MANUAL(1U)
+
+#define S_PMON_CDEL_MANUAL 4
+#define V_PMON_CDEL_MANUAL(x) ((x) << S_PMON_CDEL_MANUAL)
+#define F_PMON_CDEL_MANUAL V_PMON_CDEL_MANUAL(1U)
+
+#define S_PMON_MANUAL 1
+#define V_PMON_MANUAL(x) ((x) << S_PMON_MANUAL)
+#define F_PMON_MANUAL V_PMON_MANUAL(1U)
+
+#define S_PMON_AUTO 0
+#define V_PMON_AUTO(x) ((x) << S_PMON_AUTO)
+#define F_PMON_AUTO V_PMON_AUTO(1U)
+
+#define A_T3DBG_SERDES_REFCLK_CFG 0xf8
+
+#define S_PE_REFCLK_DBG_EN 12
+#define V_PE_REFCLK_DBG_EN(x) ((x) << S_PE_REFCLK_DBG_EN)
+#define F_PE_REFCLK_DBG_EN V_PE_REFCLK_DBG_EN(1U)
+
+#define S_X_REFCLK_DBG_EN 8
+#define V_X_REFCLK_DBG_EN(x) ((x) << S_X_REFCLK_DBG_EN)
+#define F_X_REFCLK_DBG_EN V_X_REFCLK_DBG_EN(1U)
+
+#define S_PE_REFCLK_TERMADJ 5
+#define M_PE_REFCLK_TERMADJ 0x3
+#define V_PE_REFCLK_TERMADJ(x) ((x) << S_PE_REFCLK_TERMADJ)
+#define G_PE_REFCLK_TERMADJ(x) (((x) >> S_PE_REFCLK_TERMADJ) & M_PE_REFCLK_TERMADJ)
+
+#define S_PE_REFCLK_PD 4
+#define V_PE_REFCLK_PD(x) ((x) << S_PE_REFCLK_PD)
+#define F_PE_REFCLK_PD V_PE_REFCLK_PD(1U)
+
+#define S_X_REFCLK_TERMADJ 1
+#define M_X_REFCLK_TERMADJ 0x3
+#define V_X_REFCLK_TERMADJ(x) ((x) << S_X_REFCLK_TERMADJ)
+#define G_X_REFCLK_TERMADJ(x) (((x) >> S_X_REFCLK_TERMADJ) & M_X_REFCLK_TERMADJ)
+
+#define S_X_REFCLK_PD 0
+#define V_X_REFCLK_PD(x) ((x) << S_X_REFCLK_PD)
+#define F_X_REFCLK_PD V_X_REFCLK_PD(1U)
+
+#define A_T3DBG_PCIE_PMA_BSPIN_CFG 0xfc
+
+#define S_BSMODEQUAD1 31
+#define V_BSMODEQUAD1(x) ((x) << S_BSMODEQUAD1)
+#define F_BSMODEQUAD1 V_BSMODEQUAD1(1U)
+
+#define S_BSINSELLANE7 29
+#define M_BSINSELLANE7 0x3
+#define V_BSINSELLANE7(x) ((x) << S_BSINSELLANE7)
+#define G_BSINSELLANE7(x) (((x) >> S_BSINSELLANE7) & M_BSINSELLANE7)
+
+#define S_BSENLANE7 28
+#define V_BSENLANE7(x) ((x) << S_BSENLANE7)
+#define F_BSENLANE7 V_BSENLANE7(1U)
+
+#define S_BSINSELLANE6 25
+#define M_BSINSELLANE6 0x3
+#define V_BSINSELLANE6(x) ((x) << S_BSINSELLANE6)
+#define G_BSINSELLANE6(x) (((x) >> S_BSINSELLANE6) & M_BSINSELLANE6)
+
+#define S_BSENLANE6 24
+#define V_BSENLANE6(x) ((x) << S_BSENLANE6)
+#define F_BSENLANE6 V_BSENLANE6(1U)
+
+#define S_BSINSELLANE5 21
+#define M_BSINSELLANE5 0x3
+#define V_BSINSELLANE5(x) ((x) << S_BSINSELLANE5)
+#define G_BSINSELLANE5(x) (((x) >> S_BSINSELLANE5) & M_BSINSELLANE5)
+
+#define S_BSENLANE5 20
+#define V_BSENLANE5(x) ((x) << S_BSENLANE5)
+#define F_BSENLANE5 V_BSENLANE5(1U)
+
+#define S_BSINSELLANE4 17
+#define M_BSINSELLANE4 0x3
+#define V_BSINSELLANE4(x) ((x) << S_BSINSELLANE4)
+#define G_BSINSELLANE4(x) (((x) >> S_BSINSELLANE4) & M_BSINSELLANE4)
+
+#define S_BSENLANE4 16
+#define V_BSENLANE4(x) ((x) << S_BSENLANE4)
+#define F_BSENLANE4 V_BSENLANE4(1U)
+
+#define S_BSMODEQUAD0 15
+#define V_BSMODEQUAD0(x) ((x) << S_BSMODEQUAD0)
+#define F_BSMODEQUAD0 V_BSMODEQUAD0(1U)
+
+#define S_BSINSELLANE3 13
+#define M_BSINSELLANE3 0x3
+#define V_BSINSELLANE3(x) ((x) << S_BSINSELLANE3)
+#define G_BSINSELLANE3(x) (((x) >> S_BSINSELLANE3) & M_BSINSELLANE3)
+
+#define S_BSENLANE3 12
+#define V_BSENLANE3(x) ((x) << S_BSENLANE3)
+#define F_BSENLANE3 V_BSENLANE3(1U)
+
+#define S_BSINSELLANE2 9
+#define M_BSINSELLANE2 0x3
+#define V_BSINSELLANE2(x) ((x) << S_BSINSELLANE2)
+#define G_BSINSELLANE2(x) (((x) >> S_BSINSELLANE2) & M_BSINSELLANE2)
+
+#define S_BSENLANE2 8
+#define V_BSENLANE2(x) ((x) << S_BSENLANE2)
+#define F_BSENLANE2 V_BSENLANE2(1U)
+
+#define S_BSINSELLANE1 5
+#define M_BSINSELLANE1 0x3
+#define V_BSINSELLANE1(x) ((x) << S_BSINSELLANE1)
+#define G_BSINSELLANE1(x) (((x) >> S_BSINSELLANE1) & M_BSINSELLANE1)
+
+#define S_BSENLANE1 4
+#define V_BSENLANE1(x) ((x) << S_BSENLANE1)
+#define F_BSENLANE1 V_BSENLANE1(1U)
+
+#define S_BSINSELLANE0 1
+#define M_BSINSELLANE0 0x3
+#define V_BSINSELLANE0(x) ((x) << S_BSINSELLANE0)
+#define G_BSINSELLANE0(x) (((x) >> S_BSINSELLANE0) & M_BSINSELLANE0)
+
+#define S_BSENLANE0 0
+#define V_BSENLANE0(x) ((x) << S_BSENLANE0)
+#define F_BSENLANE0 V_BSENLANE0(1U)
+
+/* registers for module MC7_PMRX */
+#define MC7_PMRX_BASE_ADDR 0x100
+
+#define A_MC7_CFG 0x100
+
+#define S_IMPSETUPDATE 14
+#define V_IMPSETUPDATE(x) ((x) << S_IMPSETUPDATE)
+#define F_IMPSETUPDATE V_IMPSETUPDATE(1U)
+
+#define S_IFEN 13
+#define V_IFEN(x) ((x) << S_IFEN)
+#define F_IFEN V_IFEN(1U)
+
+#define S_TERM300 12
+#define V_TERM300(x) ((x) << S_TERM300)
+#define F_TERM300 V_TERM300(1U)
+
+#define S_TERM150 11
+#define V_TERM150(x) ((x) << S_TERM150)
+#define F_TERM150 V_TERM150(1U)
+
+#define S_SLOW 10
+#define V_SLOW(x) ((x) << S_SLOW)
+#define F_SLOW V_SLOW(1U)
+
+#define S_WIDTH 8
+#define M_WIDTH 0x3
+#define V_WIDTH(x) ((x) << S_WIDTH)
+#define G_WIDTH(x) (((x) >> S_WIDTH) & M_WIDTH)
+
+#define S_ODTEN 7
+#define V_ODTEN(x) ((x) << S_ODTEN)
+#define F_ODTEN V_ODTEN(1U)
+
+#define S_BKS 6
+#define V_BKS(x) ((x) << S_BKS)
+#define F_BKS V_BKS(1U)
+
+#define S_ORG 5
+#define V_ORG(x) ((x) << S_ORG)
+#define F_ORG V_ORG(1U)
+
+#define S_DEN 2
+#define M_DEN 0x7
+#define V_DEN(x) ((x) << S_DEN)
+#define G_DEN(x) (((x) >> S_DEN) & M_DEN)
+
+#define S_RDY 1
+#define V_RDY(x) ((x) << S_RDY)
+#define F_RDY V_RDY(1U)
+
+#define S_CLKEN 0
+#define V_CLKEN(x) ((x) << S_CLKEN)
+#define F_CLKEN V_CLKEN(1U)
+
+#define A_MC7_MODE 0x104
+
+#define S_MODE 0
+#define M_MODE 0xffff
+#define V_MODE(x) ((x) << S_MODE)
+#define G_MODE(x) (((x) >> S_MODE) & M_MODE)
+
+#define A_MC7_EXT_MODE1 0x108
+
+#define S_OCDADJUSTMODE 20
+#define V_OCDADJUSTMODE(x) ((x) << S_OCDADJUSTMODE)
+#define F_OCDADJUSTMODE V_OCDADJUSTMODE(1U)
+
+#define S_OCDCODE 16
+#define M_OCDCODE 0xf
+#define V_OCDCODE(x) ((x) << S_OCDCODE)
+#define G_OCDCODE(x) (((x) >> S_OCDCODE) & M_OCDCODE)
+
+#define S_EXTMODE1 0
+#define M_EXTMODE1 0xffff
+#define V_EXTMODE1(x) ((x) << S_EXTMODE1)
+#define G_EXTMODE1(x) (((x) >> S_EXTMODE1) & M_EXTMODE1)
+
+#define A_MC7_EXT_MODE2 0x10c
+
+#define S_EXTMODE2 0
+#define M_EXTMODE2 0xffff
+#define V_EXTMODE2(x) ((x) << S_EXTMODE2)
+#define G_EXTMODE2(x) (((x) >> S_EXTMODE2) & M_EXTMODE2)
+
+#define A_MC7_EXT_MODE3 0x110
+
+#define S_EXTMODE3 0
+#define M_EXTMODE3 0xffff
+#define V_EXTMODE3(x) ((x) << S_EXTMODE3)
+#define G_EXTMODE3(x) (((x) >> S_EXTMODE3) & M_EXTMODE3)
+
+#define A_MC7_PRE 0x114
+#define A_MC7_REF 0x118
+
+#define S_PREREFDIV 1
+#define M_PREREFDIV 0x3fff
+#define V_PREREFDIV(x) ((x) << S_PREREFDIV)
+#define G_PREREFDIV(x) (((x) >> S_PREREFDIV) & M_PREREFDIV)
+
+#define S_PERREFEN 0
+#define V_PERREFEN(x) ((x) << S_PERREFEN)
+#define F_PERREFEN V_PERREFEN(1U)
+
+#define A_MC7_DLL 0x11c
+
+#define S_DLLLOCK 31
+#define V_DLLLOCK(x) ((x) << S_DLLLOCK)
+#define F_DLLLOCK V_DLLLOCK(1U)
+
+#define S_DLLDELTA 24
+#define M_DLLDELTA 0x7f
+#define V_DLLDELTA(x) ((x) << S_DLLDELTA)
+#define G_DLLDELTA(x) (((x) >> S_DLLDELTA) & M_DLLDELTA)
+
+#define S_MANDELTA 3
+#define M_MANDELTA 0x7f
+#define V_MANDELTA(x) ((x) << S_MANDELTA)
+#define G_MANDELTA(x) (((x) >> S_MANDELTA) & M_MANDELTA)
+
+#define S_DLLDELTASEL 2
+#define V_DLLDELTASEL(x) ((x) << S_DLLDELTASEL)
+#define F_DLLDELTASEL V_DLLDELTASEL(1U)
+
+#define S_DLLENB 1
+#define V_DLLENB(x) ((x) << S_DLLENB)
+#define F_DLLENB V_DLLENB(1U)
+
+#define S_DLLRST 0
+#define V_DLLRST(x) ((x) << S_DLLRST)
+#define F_DLLRST V_DLLRST(1U)
+
+#define A_MC7_PARM 0x120
+
+#define S_ACTTOPREDLY 26
+#define M_ACTTOPREDLY 0xf
+#define V_ACTTOPREDLY(x) ((x) << S_ACTTOPREDLY)
+#define G_ACTTOPREDLY(x) (((x) >> S_ACTTOPREDLY) & M_ACTTOPREDLY)
+
+#define S_ACTTORDWRDLY 23
+#define M_ACTTORDWRDLY 0x7
+#define V_ACTTORDWRDLY(x) ((x) << S_ACTTORDWRDLY)
+#define G_ACTTORDWRDLY(x) (((x) >> S_ACTTORDWRDLY) & M_ACTTORDWRDLY)
+
+#define S_PRECYC 20
+#define M_PRECYC 0x7
+#define V_PRECYC(x) ((x) << S_PRECYC)
+#define G_PRECYC(x) (((x) >> S_PRECYC) & M_PRECYC)
+
+#define S_REFCYC 13
+#define M_REFCYC 0x7f
+#define V_REFCYC(x) ((x) << S_REFCYC)
+#define G_REFCYC(x) (((x) >> S_REFCYC) & M_REFCYC)
+
+#define S_BKCYC 8
+#define M_BKCYC 0x1f
+#define V_BKCYC(x) ((x) << S_BKCYC)
+#define G_BKCYC(x) (((x) >> S_BKCYC) & M_BKCYC)
+
+#define S_WRTORDDLY 4
+#define M_WRTORDDLY 0xf
+#define V_WRTORDDLY(x) ((x) << S_WRTORDDLY)
+#define G_WRTORDDLY(x) (((x) >> S_WRTORDDLY) & M_WRTORDDLY)
+
+#define S_RDTOWRDLY 0
+#define M_RDTOWRDLY 0xf
+#define V_RDTOWRDLY(x) ((x) << S_RDTOWRDLY)
+#define G_RDTOWRDLY(x) (((x) >> S_RDTOWRDLY) & M_RDTOWRDLY)
+
+#define A_MC7_HWM_WRR 0x124
+
+#define S_MEM_HWM 26
+#define M_MEM_HWM 0x3f
+#define V_MEM_HWM(x) ((x) << S_MEM_HWM)
+#define G_MEM_HWM(x) (((x) >> S_MEM_HWM) & M_MEM_HWM)
+
+#define S_ULP_HWM 22
+#define M_ULP_HWM 0xf
+#define V_ULP_HWM(x) ((x) << S_ULP_HWM)
+#define G_ULP_HWM(x) (((x) >> S_ULP_HWM) & M_ULP_HWM)
+
+#define S_TOT_RLD_WT 14
+#define M_TOT_RLD_WT 0xff
+#define V_TOT_RLD_WT(x) ((x) << S_TOT_RLD_WT)
+#define G_TOT_RLD_WT(x) (((x) >> S_TOT_RLD_WT) & M_TOT_RLD_WT)
+
+#define S_MEM_RLD_WT 7
+#define M_MEM_RLD_WT 0x7f
+#define V_MEM_RLD_WT(x) ((x) << S_MEM_RLD_WT)
+#define G_MEM_RLD_WT(x) (((x) >> S_MEM_RLD_WT) & M_MEM_RLD_WT)
+
+#define S_ULP_RLD_WT 0
+#define M_ULP_RLD_WT 0x7f
+#define V_ULP_RLD_WT(x) ((x) << S_ULP_RLD_WT)
+#define G_ULP_RLD_WT(x) (((x) >> S_ULP_RLD_WT) & M_ULP_RLD_WT)
+
+#define A_MC7_CAL 0x128
+
+#define S_BUSY 31
+#define V_BUSY(x) ((x) << S_BUSY)
+#define F_BUSY V_BUSY(1U)
+
+#define S_CAL_FAULT 30
+#define V_CAL_FAULT(x) ((x) << S_CAL_FAULT)
+#define F_CAL_FAULT V_CAL_FAULT(1U)
+
+#define S_PER_CAL_DIV 22
+#define M_PER_CAL_DIV 0xff
+#define V_PER_CAL_DIV(x) ((x) << S_PER_CAL_DIV)
+#define G_PER_CAL_DIV(x) (((x) >> S_PER_CAL_DIV) & M_PER_CAL_DIV)
+
+#define S_PER_CAL_EN 21
+#define V_PER_CAL_EN(x) ((x) << S_PER_CAL_EN)
+#define F_PER_CAL_EN V_PER_CAL_EN(1U)
+
+#define S_SGL_CAL_EN 20
+#define V_SGL_CAL_EN(x) ((x) << S_SGL_CAL_EN)
+#define F_SGL_CAL_EN V_SGL_CAL_EN(1U)
+
+#define S_IMP_UPD_MODE 19
+#define V_IMP_UPD_MODE(x) ((x) << S_IMP_UPD_MODE)
+#define F_IMP_UPD_MODE V_IMP_UPD_MODE(1U)
+
+#define S_IMP_SEL 18
+#define V_IMP_SEL(x) ((x) << S_IMP_SEL)
+#define F_IMP_SEL V_IMP_SEL(1U)
+
+#define S_IMP_MAN_PD 15
+#define M_IMP_MAN_PD 0x7
+#define V_IMP_MAN_PD(x) ((x) << S_IMP_MAN_PD)
+#define G_IMP_MAN_PD(x) (((x) >> S_IMP_MAN_PD) & M_IMP_MAN_PD)
+
+#define S_IMP_MAN_PU 12
+#define M_IMP_MAN_PU 0x7
+#define V_IMP_MAN_PU(x) ((x) << S_IMP_MAN_PU)
+#define G_IMP_MAN_PU(x) (((x) >> S_IMP_MAN_PU) & M_IMP_MAN_PU)
+
+#define S_IMP_CAL_PD 9
+#define M_IMP_CAL_PD 0x7
+#define V_IMP_CAL_PD(x) ((x) << S_IMP_CAL_PD)
+#define G_IMP_CAL_PD(x) (((x) >> S_IMP_CAL_PD) & M_IMP_CAL_PD)
+
+#define S_IMP_CAL_PU 6
+#define M_IMP_CAL_PU 0x7
+#define V_IMP_CAL_PU(x) ((x) << S_IMP_CAL_PU)
+#define G_IMP_CAL_PU(x) (((x) >> S_IMP_CAL_PU) & M_IMP_CAL_PU)
+
+#define S_IMP_SET_PD 3
+#define M_IMP_SET_PD 0x7
+#define V_IMP_SET_PD(x) ((x) << S_IMP_SET_PD)
+#define G_IMP_SET_PD(x) (((x) >> S_IMP_SET_PD) & M_IMP_SET_PD)
+
+#define S_IMP_SET_PU 0
+#define M_IMP_SET_PU 0x7
+#define V_IMP_SET_PU(x) ((x) << S_IMP_SET_PU)
+#define G_IMP_SET_PU(x) (((x) >> S_IMP_SET_PU) & M_IMP_SET_PU)
+
+#define A_MC7_ERR_ADDR 0x12c
+
+#define S_ERRADDRESS 3
+#define M_ERRADDRESS 0x1fffffff
+#define V_ERRADDRESS(x) ((x) << S_ERRADDRESS)
+#define G_ERRADDRESS(x) (((x) >> S_ERRADDRESS) & M_ERRADDRESS)
+
+#define S_ERRAGENT 1
+#define M_ERRAGENT 0x3
+#define V_ERRAGENT(x) ((x) << S_ERRAGENT)
+#define G_ERRAGENT(x) (((x) >> S_ERRAGENT) & M_ERRAGENT)
+
+#define S_ERROP 0
+#define V_ERROP(x) ((x) << S_ERROP)
+#define F_ERROP V_ERROP(1U)
+
+#define A_MC7_ECC 0x130
+
+#define S_UECNT 10
+#define M_UECNT 0xff
+#define V_UECNT(x) ((x) << S_UECNT)
+#define G_UECNT(x) (((x) >> S_UECNT) & M_UECNT)
+
+#define S_CECNT 2
+#define M_CECNT 0xff
+#define V_CECNT(x) ((x) << S_CECNT)
+#define G_CECNT(x) (((x) >> S_CECNT) & M_CECNT)
+
+#define S_ECCCHKEN 1
+#define V_ECCCHKEN(x) ((x) << S_ECCCHKEN)
+#define F_ECCCHKEN V_ECCCHKEN(1U)
+
+#define S_ECCGENEN 0
+#define V_ECCGENEN(x) ((x) << S_ECCGENEN)
+#define F_ECCGENEN V_ECCGENEN(1U)
+
+#define A_MC7_CE_ADDR 0x134
+#define A_MC7_CE_DATA0 0x138
+#define A_MC7_CE_DATA1 0x13c
+#define A_MC7_CE_DATA2 0x140
+
+#define S_DATA 0
+#define M_DATA 0xff
+#define V_DATA(x) ((x) << S_DATA)
+#define G_DATA(x) (((x) >> S_DATA) & M_DATA)
+
+#define A_MC7_UE_ADDR 0x144
+#define A_MC7_UE_DATA0 0x148
+#define A_MC7_UE_DATA1 0x14c
+#define A_MC7_UE_DATA2 0x150
+#define A_MC7_BD_ADDR 0x154
+
+#define S_ADDR 3
+#define M_ADDR 0x1fffffff
+#define V_ADDR(x) ((x) << S_ADDR)
+#define G_ADDR(x) (((x) >> S_ADDR) & M_ADDR)
+
+#define A_MC7_BD_DATA0 0x158
+#define A_MC7_BD_DATA1 0x15c
+#define A_MC7_BD_DATA2 0x160
+#define A_MC7_BD_OP 0x164
+
+#define S_OP 0
+#define V_OP(x) ((x) << S_OP)
+#define F_OP V_OP(1U)
+
+#define A_MC7_BIST_ADDR_BEG 0x168
+
+#define S_ADDRBEG 5
+#define M_ADDRBEG 0x7ffffff
+#define V_ADDRBEG(x) ((x) << S_ADDRBEG)
+#define G_ADDRBEG(x) (((x) >> S_ADDRBEG) & M_ADDRBEG)
+
+#define A_MC7_BIST_ADDR_END 0x16c
+
+#define S_ADDREND 5
+#define M_ADDREND 0x7ffffff
+#define V_ADDREND(x) ((x) << S_ADDREND)
+#define G_ADDREND(x) (((x) >> S_ADDREND) & M_ADDREND)
+
+#define A_MC7_BIST_DATA 0x170
+#define A_MC7_BIST_OP 0x174
+
+#define S_GAP 4
+#define M_GAP 0x1f
+#define V_GAP(x) ((x) << S_GAP)
+#define G_GAP(x) (((x) >> S_GAP) & M_GAP)
+
+#define S_CONT 3
+#define V_CONT(x) ((x) << S_CONT)
+#define F_CONT V_CONT(1U)
+
+#define S_DATAPAT 1
+#define M_DATAPAT 0x3
+#define V_DATAPAT(x) ((x) << S_DATAPAT)
+#define G_DATAPAT(x) (((x) >> S_DATAPAT) & M_DATAPAT)
+
+#define A_MC7_INT_ENABLE 0x178
+
+#define S_AE 17
+#define V_AE(x) ((x) << S_AE)
+#define F_AE V_AE(1U)
+
+#define S_PE 2
+#define M_PE 0x7fff
+#define V_PE(x) ((x) << S_PE)
+#define G_PE(x) (((x) >> S_PE) & M_PE)
+
+#define S_UE 1
+#define V_UE(x) ((x) << S_UE)
+#define F_UE V_UE(1U)
+
+#define S_CE 0
+#define V_CE(x) ((x) << S_CE)
+#define F_CE V_CE(1U)
+
+#define A_MC7_INT_CAUSE 0x17c
+
+/* registers for module MC7_PMTX */
+#define MC7_PMTX_BASE_ADDR 0x180
+
+/* registers for module MC7_CM */
+#define MC7_CM_BASE_ADDR 0x200
+
+/* registers for module CIM */
+#define CIM_BASE_ADDR 0x280
+
+#define A_CIM_BOOT_CFG 0x280
+
+#define S_BOOTADDR 2
+#define M_BOOTADDR 0x3fffffff
+#define V_BOOTADDR(x) ((x) << S_BOOTADDR)
+#define G_BOOTADDR(x) (((x) >> S_BOOTADDR) & M_BOOTADDR)
+
+#define S_BOOTSDRAM 1
+#define V_BOOTSDRAM(x) ((x) << S_BOOTSDRAM)
+#define F_BOOTSDRAM V_BOOTSDRAM(1U)
+
+#define S_UPCRST 0
+#define V_UPCRST(x) ((x) << S_UPCRST)
+#define F_UPCRST V_UPCRST(1U)
+
+#define A_CIM_FLASH_BASE_ADDR 0x284
+
+#define S_FLASHBASEADDR 2
+#define M_FLASHBASEADDR 0x3fffff
+#define V_FLASHBASEADDR(x) ((x) << S_FLASHBASEADDR)
+#define G_FLASHBASEADDR(x) (((x) >> S_FLASHBASEADDR) & M_FLASHBASEADDR)
+
+#define A_CIM_FLASH_ADDR_SIZE 0x288
+
+#define S_FLASHADDRSIZE 2
+#define M_FLASHADDRSIZE 0x3fffff
+#define V_FLASHADDRSIZE(x) ((x) << S_FLASHADDRSIZE)
+#define G_FLASHADDRSIZE(x) (((x) >> S_FLASHADDRSIZE) & M_FLASHADDRSIZE)
+
+#define A_CIM_SDRAM_BASE_ADDR 0x28c
+
+#define S_SDRAMBASEADDR 2
+#define M_SDRAMBASEADDR 0x3fffffff
+#define V_SDRAMBASEADDR(x) ((x) << S_SDRAMBASEADDR)
+#define G_SDRAMBASEADDR(x) (((x) >> S_SDRAMBASEADDR) & M_SDRAMBASEADDR)
+
+#define A_CIM_SDRAM_ADDR_SIZE 0x290
+
+#define S_SDRAMADDRSIZE 2
+#define M_SDRAMADDRSIZE 0x3fffffff
+#define V_SDRAMADDRSIZE(x) ((x) << S_SDRAMADDRSIZE)
+#define G_SDRAMADDRSIZE(x) (((x) >> S_SDRAMADDRSIZE) & M_SDRAMADDRSIZE)
+
+#define A_CIM_UP_SPARE_INT 0x294
+
+#define S_UPSPAREINT 0
+#define M_UPSPAREINT 0x7
+#define V_UPSPAREINT(x) ((x) << S_UPSPAREINT)
+#define G_UPSPAREINT(x) (((x) >> S_UPSPAREINT) & M_UPSPAREINT)
+
+#define A_CIM_HOST_INT_ENABLE 0x298
+
+#define S_TIMER1INTEN 15
+#define V_TIMER1INTEN(x) ((x) << S_TIMER1INTEN)
+#define F_TIMER1INTEN V_TIMER1INTEN(1U)
+
+#define S_TIMER0INTEN 14
+#define V_TIMER0INTEN(x) ((x) << S_TIMER0INTEN)
+#define F_TIMER0INTEN V_TIMER0INTEN(1U)
+
+#define S_PREFDROPINTEN 13
+#define V_PREFDROPINTEN(x) ((x) << S_PREFDROPINTEN)
+#define F_PREFDROPINTEN V_PREFDROPINTEN(1U)
+
+#define S_BLKWRPLINTEN 12
+#define V_BLKWRPLINTEN(x) ((x) << S_BLKWRPLINTEN)
+#define F_BLKWRPLINTEN V_BLKWRPLINTEN(1U)
+
+#define S_BLKRDPLINTEN 11
+#define V_BLKRDPLINTEN(x) ((x) << S_BLKRDPLINTEN)
+#define F_BLKRDPLINTEN V_BLKRDPLINTEN(1U)
+
+#define S_BLKWRCTLINTEN 10
+#define V_BLKWRCTLINTEN(x) ((x) << S_BLKWRCTLINTEN)
+#define F_BLKWRCTLINTEN V_BLKWRCTLINTEN(1U)
+
+#define S_BLKRDCTLINTEN 9
+#define V_BLKRDCTLINTEN(x) ((x) << S_BLKRDCTLINTEN)
+#define F_BLKRDCTLINTEN V_BLKRDCTLINTEN(1U)
+
+#define S_BLKWRFLASHINTEN 8
+#define V_BLKWRFLASHINTEN(x) ((x) << S_BLKWRFLASHINTEN)
+#define F_BLKWRFLASHINTEN V_BLKWRFLASHINTEN(1U)
+
+#define S_BLKRDFLASHINTEN 7
+#define V_BLKRDFLASHINTEN(x) ((x) << S_BLKRDFLASHINTEN)
+#define F_BLKRDFLASHINTEN V_BLKRDFLASHINTEN(1U)
+
+#define S_SGLWRFLASHINTEN 6
+#define V_SGLWRFLASHINTEN(x) ((x) << S_SGLWRFLASHINTEN)
+#define F_SGLWRFLASHINTEN V_SGLWRFLASHINTEN(1U)
+
+#define S_WRBLKFLASHINTEN 5
+#define V_WRBLKFLASHINTEN(x) ((x) << S_WRBLKFLASHINTEN)
+#define F_WRBLKFLASHINTEN V_WRBLKFLASHINTEN(1U)
+
+#define S_BLKWRBOOTINTEN 4
+#define V_BLKWRBOOTINTEN(x) ((x) << S_BLKWRBOOTINTEN)
+#define F_BLKWRBOOTINTEN V_BLKWRBOOTINTEN(1U)
+
+#define S_BLKRDBOOTINTEN 3
+#define V_BLKRDBOOTINTEN(x) ((x) << S_BLKRDBOOTINTEN)
+#define F_BLKRDBOOTINTEN V_BLKRDBOOTINTEN(1U)
+
+#define S_FLASHRANGEINTEN 2
+#define V_FLASHRANGEINTEN(x) ((x) << S_FLASHRANGEINTEN)
+#define F_FLASHRANGEINTEN V_FLASHRANGEINTEN(1U)
+
+#define S_SDRAMRANGEINTEN 1
+#define V_SDRAMRANGEINTEN(x) ((x) << S_SDRAMRANGEINTEN)
+#define F_SDRAMRANGEINTEN V_SDRAMRANGEINTEN(1U)
+
+#define S_RSVDSPACEINTEN 0
+#define V_RSVDSPACEINTEN(x) ((x) << S_RSVDSPACEINTEN)
+#define F_RSVDSPACEINTEN V_RSVDSPACEINTEN(1U)
+
+#define A_CIM_HOST_INT_CAUSE 0x29c
+
+#define S_TIMER1INT 15
+#define V_TIMER1INT(x) ((x) << S_TIMER1INT)
+#define F_TIMER1INT V_TIMER1INT(1U)
+
+#define S_TIMER0INT 14
+#define V_TIMER0INT(x) ((x) << S_TIMER0INT)
+#define F_TIMER0INT V_TIMER0INT(1U)
+
+#define S_PREFDROPINT 13
+#define V_PREFDROPINT(x) ((x) << S_PREFDROPINT)
+#define F_PREFDROPINT V_PREFDROPINT(1U)
+
+#define S_BLKWRPLINT 12
+#define V_BLKWRPLINT(x) ((x) << S_BLKWRPLINT)
+#define F_BLKWRPLINT V_BLKWRPLINT(1U)
+
+#define S_BLKRDPLINT 11
+#define V_BLKRDPLINT(x) ((x) << S_BLKRDPLINT)
+#define F_BLKRDPLINT V_BLKRDPLINT(1U)
+
+#define S_BLKWRCTLINT 10
+#define V_BLKWRCTLINT(x) ((x) << S_BLKWRCTLINT)
+#define F_BLKWRCTLINT V_BLKWRCTLINT(1U)
+
+#define S_BLKRDCTLINT 9
+#define V_BLKRDCTLINT(x) ((x) << S_BLKRDCTLINT)
+#define F_BLKRDCTLINT V_BLKRDCTLINT(1U)
+
+#define S_BLKWRFLASHINT 8
+#define V_BLKWRFLASHINT(x) ((x) << S_BLKWRFLASHINT)
+#define F_BLKWRFLASHINT V_BLKWRFLASHINT(1U)
+
+#define S_BLKRDFLASHINT 7
+#define V_BLKRDFLASHINT(x) ((x) << S_BLKRDFLASHINT)
+#define F_BLKRDFLASHINT V_BLKRDFLASHINT(1U)
+
+#define S_SGLWRFLASHINT 6
+#define V_SGLWRFLASHINT(x) ((x) << S_SGLWRFLASHINT)
+#define F_SGLWRFLASHINT V_SGLWRFLASHINT(1U)
+
+#define S_WRBLKFLASHINT 5
+#define V_WRBLKFLASHINT(x) ((x) << S_WRBLKFLASHINT)
+#define F_WRBLKFLASHINT V_WRBLKFLASHINT(1U)
+
+#define S_BLKWRBOOTINT 4
+#define V_BLKWRBOOTINT(x) ((x) << S_BLKWRBOOTINT)
+#define F_BLKWRBOOTINT V_BLKWRBOOTINT(1U)
+
+#define S_BLKRDBOOTINT 3
+#define V_BLKRDBOOTINT(x) ((x) << S_BLKRDBOOTINT)
+#define F_BLKRDBOOTINT V_BLKRDBOOTINT(1U)
+
+#define S_FLASHRANGEINT 2
+#define V_FLASHRANGEINT(x) ((x) << S_FLASHRANGEINT)
+#define F_FLASHRANGEINT V_FLASHRANGEINT(1U)
+
+#define S_SDRAMRANGEINT 1
+#define V_SDRAMRANGEINT(x) ((x) << S_SDRAMRANGEINT)
+#define F_SDRAMRANGEINT V_SDRAMRANGEINT(1U)
+
+#define S_RSVDSPACEINT 0
+#define V_RSVDSPACEINT(x) ((x) << S_RSVDSPACEINT)
+#define F_RSVDSPACEINT V_RSVDSPACEINT(1U)
+
+#define A_CIM_UP_INT_ENABLE 0x2a0
+
+#define S_MSTPLINTEN 16
+#define V_MSTPLINTEN(x) ((x) << S_MSTPLINTEN)
+#define F_MSTPLINTEN V_MSTPLINTEN(1U)
+
+#define A_CIM_UP_INT_CAUSE 0x2a4
+
+#define S_MSTPLINT 16
+#define V_MSTPLINT(x) ((x) << S_MSTPLINT)
+#define F_MSTPLINT V_MSTPLINT(1U)
+
+#define A_CIM_IBQ_FULLA_THRSH 0x2a8
+
+#define S_IBQ0FULLTHRSH 0
+#define M_IBQ0FULLTHRSH 0x1ff
+#define V_IBQ0FULLTHRSH(x) ((x) << S_IBQ0FULLTHRSH)
+#define G_IBQ0FULLTHRSH(x) (((x) >> S_IBQ0FULLTHRSH) & M_IBQ0FULLTHRSH)
+
+#define S_IBQ1FULLTHRSH 16
+#define M_IBQ1FULLTHRSH 0x1ff
+#define V_IBQ1FULLTHRSH(x) ((x) << S_IBQ1FULLTHRSH)
+#define G_IBQ1FULLTHRSH(x) (((x) >> S_IBQ1FULLTHRSH) & M_IBQ1FULLTHRSH)
+
+#define A_CIM_IBQ_FULLB_THRSH 0x2ac
+
+#define S_IBQ2FULLTHRSH 0
+#define M_IBQ2FULLTHRSH 0x1ff
+#define V_IBQ2FULLTHRSH(x) ((x) << S_IBQ2FULLTHRSH)
+#define G_IBQ2FULLTHRSH(x) (((x) >> S_IBQ2FULLTHRSH) & M_IBQ2FULLTHRSH)
+
+#define S_IBQ3FULLTHRSH 16
+#define M_IBQ3FULLTHRSH 0x1ff
+#define V_IBQ3FULLTHRSH(x) ((x) << S_IBQ3FULLTHRSH)
+#define G_IBQ3FULLTHRSH(x) (((x) >> S_IBQ3FULLTHRSH) & M_IBQ3FULLTHRSH)
+
+#define A_CIM_HOST_ACC_CTRL 0x2b0
+
+#define S_HOSTBUSY 17
+#define V_HOSTBUSY(x) ((x) << S_HOSTBUSY)
+#define F_HOSTBUSY V_HOSTBUSY(1U)
+
+#define S_HOSTWRITE 16
+#define V_HOSTWRITE(x) ((x) << S_HOSTWRITE)
+#define F_HOSTWRITE V_HOSTWRITE(1U)
+
+#define S_HOSTADDR 0
+#define M_HOSTADDR 0xffff
+#define V_HOSTADDR(x) ((x) << S_HOSTADDR)
+#define G_HOSTADDR(x) (((x) >> S_HOSTADDR) & M_HOSTADDR)
+
+#define A_CIM_HOST_ACC_DATA 0x2b4
+#define A_CIM_IBQ_DBG_CFG 0x2c0
+
+#define S_IBQDBGADDR 16
+#define M_IBQDBGADDR 0x1ff
+#define V_IBQDBGADDR(x) ((x) << S_IBQDBGADDR)
+#define G_IBQDBGADDR(x) (((x) >> S_IBQDBGADDR) & M_IBQDBGADDR)
+
+#define S_IBQDBGQID 3
+#define M_IBQDBGQID 0x3
+#define V_IBQDBGQID(x) ((x) << S_IBQDBGQID)
+#define G_IBQDBGQID(x) (((x) >> S_IBQDBGQID) & M_IBQDBGQID)
+
+#define S_IBQDBGWR 2
+#define V_IBQDBGWR(x) ((x) << S_IBQDBGWR)
+#define F_IBQDBGWR V_IBQDBGWR(1U)
+
+#define S_IBQDBGBUSY 1
+#define V_IBQDBGBUSY(x) ((x) << S_IBQDBGBUSY)
+#define F_IBQDBGBUSY V_IBQDBGBUSY(1U)
+
+#define S_IBQDBGEN 0
+#define V_IBQDBGEN(x) ((x) << S_IBQDBGEN)
+#define F_IBQDBGEN V_IBQDBGEN(1U)
+
+#define A_CIM_OBQ_DBG_CFG 0x2c4
+
+#define S_OBQDBGADDR 16
+#define M_OBQDBGADDR 0x1ff
+#define V_OBQDBGADDR(x) ((x) << S_OBQDBGADDR)
+#define G_OBQDBGADDR(x) (((x) >> S_OBQDBGADDR) & M_OBQDBGADDR)
+
+#define S_OBQDBGQID 3
+#define M_OBQDBGQID 0x3
+#define V_OBQDBGQID(x) ((x) << S_OBQDBGQID)
+#define G_OBQDBGQID(x) (((x) >> S_OBQDBGQID) & M_OBQDBGQID)
+
+#define S_OBQDBGWR 2
+#define V_OBQDBGWR(x) ((x) << S_OBQDBGWR)
+#define F_OBQDBGWR V_OBQDBGWR(1U)
+
+#define S_OBQDBGBUSY 1
+#define V_OBQDBGBUSY(x) ((x) << S_OBQDBGBUSY)
+#define F_OBQDBGBUSY V_OBQDBGBUSY(1U)
+
+#define S_OBQDBGEN 0
+#define V_OBQDBGEN(x) ((x) << S_OBQDBGEN)
+#define F_OBQDBGEN V_OBQDBGEN(1U)
+
+#define A_CIM_IBQ_DBG_DATA 0x2c8
+#define A_CIM_OBQ_DBG_DATA 0x2cc
+#define A_CIM_CDEBUGDATA 0x2d0
+
+#define S_CDEBUGDATAH 16
+#define M_CDEBUGDATAH 0xffff
+#define V_CDEBUGDATAH(x) ((x) << S_CDEBUGDATAH)
+#define G_CDEBUGDATAH(x) (((x) >> S_CDEBUGDATAH) & M_CDEBUGDATAH)
+
+#define S_CDEBUGDATAL 0
+#define M_CDEBUGDATAL 0xffff
+#define V_CDEBUGDATAL(x) ((x) << S_CDEBUGDATAL)
+#define G_CDEBUGDATAL(x) (((x) >> S_CDEBUGDATAL) & M_CDEBUGDATAL)
+
+#define A_CIM_DEBUGCFG 0x2e0
+
+#define S_POLADBGRDPTR 23
+#define M_POLADBGRDPTR 0x1ff
+#define V_POLADBGRDPTR(x) ((x) << S_POLADBGRDPTR)
+#define G_POLADBGRDPTR(x) (((x) >> S_POLADBGRDPTR) & M_POLADBGRDPTR)
+
+#define S_PILADBGRDPTR 14
+#define M_PILADBGRDPTR 0x1ff
+#define V_PILADBGRDPTR(x) ((x) << S_PILADBGRDPTR)
+#define G_PILADBGRDPTR(x) (((x) >> S_PILADBGRDPTR) & M_PILADBGRDPTR)
+
+#define S_CIM_LADBGEN 12
+#define V_CIM_LADBGEN(x) ((x) << S_CIM_LADBGEN)
+#define F_CIM_LADBGEN V_CIM_LADBGEN(1U)
+
+#define S_DEBUGSELHI 5
+#define M_DEBUGSELHI 0x1f
+#define V_DEBUGSELHI(x) ((x) << S_DEBUGSELHI)
+#define G_DEBUGSELHI(x) (((x) >> S_DEBUGSELHI) & M_DEBUGSELHI)
+
+#define S_DEBUGSELLO 0
+#define M_DEBUGSELLO 0x1f
+#define V_DEBUGSELLO(x) ((x) << S_DEBUGSELLO)
+#define G_DEBUGSELLO(x) (((x) >> S_DEBUGSELLO) & M_DEBUGSELLO)
+
+#define A_CIM_DEBUGSTS 0x2e4
+
+#define S_POLADBGWRPTR 16
+#define M_POLADBGWRPTR 0x1ff
+#define V_POLADBGWRPTR(x) ((x) << S_POLADBGWRPTR)
+#define G_POLADBGWRPTR(x) (((x) >> S_POLADBGWRPTR) & M_POLADBGWRPTR)
+
+#define S_PILADBGWRPTR 0
+#define M_PILADBGWRPTR 0x1ff
+#define V_PILADBGWRPTR(x) ((x) << S_PILADBGWRPTR)
+#define G_PILADBGWRPTR(x) (((x) >> S_PILADBGWRPTR) & M_PILADBGWRPTR)
+
+#define A_CIM_PO_LA_DEBUGDATA 0x2e8
+#define A_CIM_PI_LA_DEBUGDATA 0x2ec
+
+/* registers for module TP1 */
+#define TP1_BASE_ADDR 0x300
+
+#define A_TP_IN_CONFIG 0x300
+
+#define S_RXFBARBPRIO 25
+#define V_RXFBARBPRIO(x) ((x) << S_RXFBARBPRIO)
+#define F_RXFBARBPRIO V_RXFBARBPRIO(1U)
+
+#define S_TXFBARBPRIO 24
+#define V_TXFBARBPRIO(x) ((x) << S_TXFBARBPRIO)
+#define F_TXFBARBPRIO V_TXFBARBPRIO(1U)
+
+#define S_DBMAXOPCNT 16
+#define M_DBMAXOPCNT 0xff
+#define V_DBMAXOPCNT(x) ((x) << S_DBMAXOPCNT)
+#define G_DBMAXOPCNT(x) (((x) >> S_DBMAXOPCNT) & M_DBMAXOPCNT)
+
+#define S_NICMODE 14
+#define V_NICMODE(x) ((x) << S_NICMODE)
+#define F_NICMODE V_NICMODE(1U)
+
+#define S_ECHECKSUMCHECKTCP 13
+#define V_ECHECKSUMCHECKTCP(x) ((x) << S_ECHECKSUMCHECKTCP)
+#define F_ECHECKSUMCHECKTCP V_ECHECKSUMCHECKTCP(1U)
+
+#define S_ECHECKSUMCHECKIP 12
+#define V_ECHECKSUMCHECKIP(x) ((x) << S_ECHECKSUMCHECKIP)
+#define F_ECHECKSUMCHECKIP V_ECHECKSUMCHECKIP(1U)
+
+#define S_ECPL 10
+#define V_ECPL(x) ((x) << S_ECPL)
+#define F_ECPL V_ECPL(1U)
+
+#define S_EETHERNET 8
+#define V_EETHERNET(x) ((x) << S_EETHERNET)
+#define F_EETHERNET V_EETHERNET(1U)
+
+#define S_ETUNNEL 7
+#define V_ETUNNEL(x) ((x) << S_ETUNNEL)
+#define F_ETUNNEL V_ETUNNEL(1U)
+
+#define S_CCHECKSUMCHECKTCP 6
+#define V_CCHECKSUMCHECKTCP(x) ((x) << S_CCHECKSUMCHECKTCP)
+#define F_CCHECKSUMCHECKTCP V_CCHECKSUMCHECKTCP(1U)
+
+#define S_CCHECKSUMCHECKIP 5
+#define V_CCHECKSUMCHECKIP(x) ((x) << S_CCHECKSUMCHECKIP)
+#define F_CCHECKSUMCHECKIP V_CCHECKSUMCHECKIP(1U)
+
+#define S_CCPL 3
+#define V_CCPL(x) ((x) << S_CCPL)
+#define F_CCPL V_CCPL(1U)
+
+#define S_CETHERNET 1
+#define V_CETHERNET(x) ((x) << S_CETHERNET)
+#define F_CETHERNET V_CETHERNET(1U)
+
+#define S_CTUNNEL 0
+#define V_CTUNNEL(x) ((x) << S_CTUNNEL)
+#define F_CTUNNEL V_CTUNNEL(1U)
+
+#define S_IPV6ENABLE 15
+#define V_IPV6ENABLE(x) ((x) << S_IPV6ENABLE)
+#define F_IPV6ENABLE V_IPV6ENABLE(1U)
+
+#define A_TP_OUT_CONFIG 0x304
+
+#define S_VLANEXTRACTIONENABLE 12
+#define V_VLANEXTRACTIONENABLE(x) ((x) << S_VLANEXTRACTIONENABLE)
+#define F_VLANEXTRACTIONENABLE V_VLANEXTRACTIONENABLE(1U)
+
+#define S_ECHECKSUMGENERATETCP 11
+#define V_ECHECKSUMGENERATETCP(x) ((x) << S_ECHECKSUMGENERATETCP)
+#define F_ECHECKSUMGENERATETCP V_ECHECKSUMGENERATETCP(1U)
+
+#define S_ECHECKSUMGENERATEIP 10
+#define V_ECHECKSUMGENERATEIP(x) ((x) << S_ECHECKSUMGENERATEIP)
+#define F_ECHECKSUMGENERATEIP V_ECHECKSUMGENERATEIP(1U)
+
+#define S_OUT_ECPL 8
+#define V_OUT_ECPL(x) ((x) << S_OUT_ECPL)
+#define F_OUT_ECPL V_OUT_ECPL(1U)
+
+#define S_OUT_EETHERNET 6
+#define V_OUT_EETHERNET(x) ((x) << S_OUT_EETHERNET)
+#define F_OUT_EETHERNET V_OUT_EETHERNET(1U)
+
+#define S_CCHECKSUMGENERATETCP 5
+#define V_CCHECKSUMGENERATETCP(x) ((x) << S_CCHECKSUMGENERATETCP)
+#define F_CCHECKSUMGENERATETCP V_CCHECKSUMGENERATETCP(1U)
+
+#define S_CCHECKSUMGENERATEIP 4
+#define V_CCHECKSUMGENERATEIP(x) ((x) << S_CCHECKSUMGENERATEIP)
+#define F_CCHECKSUMGENERATEIP V_CCHECKSUMGENERATEIP(1U)
+
+#define S_OUT_CCPL 2
+#define V_OUT_CCPL(x) ((x) << S_OUT_CCPL)
+#define F_OUT_CCPL V_OUT_CCPL(1U)
+
+#define S_OUT_CETHERNET 0
+#define V_OUT_CETHERNET(x) ((x) << S_OUT_CETHERNET)
+#define F_OUT_CETHERNET V_OUT_CETHERNET(1U)
+
+#define S_IPIDSPLITMODE 16
+#define V_IPIDSPLITMODE(x) ((x) << S_IPIDSPLITMODE)
+#define F_IPIDSPLITMODE V_IPIDSPLITMODE(1U)
+
+#define S_VLANEXTRACTIONENABLE2NDPORT 13
+#define V_VLANEXTRACTIONENABLE2NDPORT(x) ((x) << S_VLANEXTRACTIONENABLE2NDPORT)
+#define F_VLANEXTRACTIONENABLE2NDPORT V_VLANEXTRACTIONENABLE2NDPORT(1U)
+
+#define A_TP_GLOBAL_CONFIG 0x308
+
+#define S_RXFLOWCONTROLDISABLE 25
+#define V_RXFLOWCONTROLDISABLE(x) ((x) << S_RXFLOWCONTROLDISABLE)
+#define F_RXFLOWCONTROLDISABLE V_RXFLOWCONTROLDISABLE(1U)
+
+#define S_TXPACINGENABLE 24
+#define V_TXPACINGENABLE(x) ((x) << S_TXPACINGENABLE)
+#define F_TXPACINGENABLE V_TXPACINGENABLE(1U)
+
+#define S_ATTACKFILTERENABLE 23
+#define V_ATTACKFILTERENABLE(x) ((x) << S_ATTACKFILTERENABLE)
+#define F_ATTACKFILTERENABLE V_ATTACKFILTERENABLE(1U)
+
+#define S_SYNCOOKIENOOPTIONS 22
+#define V_SYNCOOKIENOOPTIONS(x) ((x) << S_SYNCOOKIENOOPTIONS)
+#define F_SYNCOOKIENOOPTIONS V_SYNCOOKIENOOPTIONS(1U)
+
+#define S_PROTECTEDMODE 21
+#define V_PROTECTEDMODE(x) ((x) << S_PROTECTEDMODE)
+#define F_PROTECTEDMODE V_PROTECTEDMODE(1U)
+
+#define S_PINGDROP 20
+#define V_PINGDROP(x) ((x) << S_PINGDROP)
+#define F_PINGDROP V_PINGDROP(1U)
+
+#define S_FRAGMENTDROP 19
+#define V_FRAGMENTDROP(x) ((x) << S_FRAGMENTDROP)
+#define F_FRAGMENTDROP V_FRAGMENTDROP(1U)
+
+#define S_FIVETUPLELOOKUP 17
+#define M_FIVETUPLELOOKUP 0x3
+#define V_FIVETUPLELOOKUP(x) ((x) << S_FIVETUPLELOOKUP)
+#define G_FIVETUPLELOOKUP(x) (((x) >> S_FIVETUPLELOOKUP) & M_FIVETUPLELOOKUP)
+
+#define S_PATHMTU 15
+#define V_PATHMTU(x) ((x) << S_PATHMTU)
+#define F_PATHMTU V_PATHMTU(1U)
+
+#define S_IPIDENTSPLIT 14
+#define V_IPIDENTSPLIT(x) ((x) << S_IPIDENTSPLIT)
+#define F_IPIDENTSPLIT V_IPIDENTSPLIT(1U)
+
+#define S_IPCHECKSUMOFFLOAD 13
+#define V_IPCHECKSUMOFFLOAD(x) ((x) << S_IPCHECKSUMOFFLOAD)
+#define F_IPCHECKSUMOFFLOAD V_IPCHECKSUMOFFLOAD(1U)
+
+#define S_UDPCHECKSUMOFFLOAD 12
+#define V_UDPCHECKSUMOFFLOAD(x) ((x) << S_UDPCHECKSUMOFFLOAD)
+#define F_UDPCHECKSUMOFFLOAD V_UDPCHECKSUMOFFLOAD(1U)
+
+#define S_TCPCHECKSUMOFFLOAD 11
+#define V_TCPCHECKSUMOFFLOAD(x) ((x) << S_TCPCHECKSUMOFFLOAD)
+#define F_TCPCHECKSUMOFFLOAD V_TCPCHECKSUMOFFLOAD(1U)
+
+#define S_QOSMAPPING 10
+#define V_QOSMAPPING(x) ((x) << S_QOSMAPPING)
+#define F_QOSMAPPING V_QOSMAPPING(1U)
+
+#define S_TCAMSERVERUSE 8
+#define M_TCAMSERVERUSE 0x3
+#define V_TCAMSERVERUSE(x) ((x) << S_TCAMSERVERUSE)
+#define G_TCAMSERVERUSE(x) (((x) >> S_TCAMSERVERUSE) & M_TCAMSERVERUSE)
+
+#define S_IPTTL 0
+#define M_IPTTL 0xff
+#define V_IPTTL(x) ((x) << S_IPTTL)
+#define G_IPTTL(x) (((x) >> S_IPTTL) & M_IPTTL)
+
+#define S_SYNCOOKIEPARAMS 26
+#define M_SYNCOOKIEPARAMS 0x3f
+#define V_SYNCOOKIEPARAMS(x) ((x) << S_SYNCOOKIEPARAMS)
+#define G_SYNCOOKIEPARAMS(x) (((x) >> S_SYNCOOKIEPARAMS) & M_SYNCOOKIEPARAMS)
+
+#define A_TP_GLOBAL_RX_CREDIT 0x30c
+#define A_TP_CMM_SIZE 0x310
+
+#define S_CMMEMMGRSIZE 0
+#define M_CMMEMMGRSIZE 0xfffffff
+#define V_CMMEMMGRSIZE(x) ((x) << S_CMMEMMGRSIZE)
+#define G_CMMEMMGRSIZE(x) (((x) >> S_CMMEMMGRSIZE) & M_CMMEMMGRSIZE)
+
+#define A_TP_CMM_MM_BASE 0x314
+
+#define S_CMMEMMGRBASE 0
+#define M_CMMEMMGRBASE 0xfffffff
+#define V_CMMEMMGRBASE(x) ((x) << S_CMMEMMGRBASE)
+#define G_CMMEMMGRBASE(x) (((x) >> S_CMMEMMGRBASE) & M_CMMEMMGRBASE)
+
+#define A_TP_CMM_TIMER_BASE 0x318
+
+#define S_CMTIMERBASE 0
+#define M_CMTIMERBASE 0xfffffff
+#define V_CMTIMERBASE(x) ((x) << S_CMTIMERBASE)
+#define G_CMTIMERBASE(x) (((x) >> S_CMTIMERBASE) & M_CMTIMERBASE)
+
+#define S_CMTIMERMAXNUM 28
+#define M_CMTIMERMAXNUM 0x3
+#define V_CMTIMERMAXNUM(x) ((x) << S_CMTIMERMAXNUM)
+#define G_CMTIMERMAXNUM(x) (((x) >> S_CMTIMERMAXNUM) & M_CMTIMERMAXNUM)
+
+#define A_TP_PMM_SIZE 0x31c
+
+#define S_PMSIZE 0
+#define M_PMSIZE 0xfffffff
+#define V_PMSIZE(x) ((x) << S_PMSIZE)
+#define G_PMSIZE(x) (((x) >> S_PMSIZE) & M_PMSIZE)
+
+#define A_TP_PMM_TX_BASE 0x320
+#define A_TP_PMM_DEFRAG_BASE 0x324
+#define A_TP_PMM_RX_BASE 0x328
+#define A_TP_PMM_RX_PAGE_SIZE 0x32c
+#define A_TP_PMM_RX_MAX_PAGE 0x330
+
+#define S_PMRXMAXPAGE 0
+#define M_PMRXMAXPAGE 0x1fffff
+#define V_PMRXMAXPAGE(x) ((x) << S_PMRXMAXPAGE)
+#define G_PMRXMAXPAGE(x) (((x) >> S_PMRXMAXPAGE) & M_PMRXMAXPAGE)
+
+#define A_TP_PMM_TX_PAGE_SIZE 0x334
+#define A_TP_PMM_TX_MAX_PAGE 0x338
+
+#define S_PMTXMAXPAGE 0
+#define M_PMTXMAXPAGE 0x1fffff
+#define V_PMTXMAXPAGE(x) ((x) << S_PMTXMAXPAGE)
+#define G_PMTXMAXPAGE(x) (((x) >> S_PMTXMAXPAGE) & M_PMTXMAXPAGE)
+
+#define A_TP_TCP_OPTIONS 0x340
+
+#define S_MTUDEFAULT 16
+#define M_MTUDEFAULT 0xffff
+#define V_MTUDEFAULT(x) ((x) << S_MTUDEFAULT)
+#define G_MTUDEFAULT(x) (((x) >> S_MTUDEFAULT) & M_MTUDEFAULT)
+
+#define S_MTUENABLE 10
+#define V_MTUENABLE(x) ((x) << S_MTUENABLE)
+#define F_MTUENABLE V_MTUENABLE(1U)
+
+#define S_SACKTX 9
+#define V_SACKTX(x) ((x) << S_SACKTX)
+#define F_SACKTX V_SACKTX(1U)
+
+#define S_SACKRX 8
+#define V_SACKRX(x) ((x) << S_SACKRX)
+#define F_SACKRX V_SACKRX(1U)
+
+#define S_SACKMODE 4
+#define M_SACKMODE 0x3
+#define V_SACKMODE(x) ((x) << S_SACKMODE)
+#define G_SACKMODE(x) (((x) >> S_SACKMODE) & M_SACKMODE)
+
+#define S_WINDOWSCALEMODE 2
+#define M_WINDOWSCALEMODE 0x3
+#define V_WINDOWSCALEMODE(x) ((x) << S_WINDOWSCALEMODE)
+#define G_WINDOWSCALEMODE(x) (((x) >> S_WINDOWSCALEMODE) & M_WINDOWSCALEMODE)
+
+#define S_TIMESTAMPSMODE 0
+#define M_TIMESTAMPSMODE 0x3
+#define V_TIMESTAMPSMODE(x) ((x) << S_TIMESTAMPSMODE)
+#define G_TIMESTAMPSMODE(x) (((x) >> S_TIMESTAMPSMODE) & M_TIMESTAMPSMODE)
+
+#define A_TP_DACK_CONFIG 0x344
+
+#define S_AUTOSTATE3 30
+#define M_AUTOSTATE3 0x3
+#define V_AUTOSTATE3(x) ((x) << S_AUTOSTATE3)
+#define G_AUTOSTATE3(x) (((x) >> S_AUTOSTATE3) & M_AUTOSTATE3)
+
+#define S_AUTOSTATE2 28
+#define M_AUTOSTATE2 0x3
+#define V_AUTOSTATE2(x) ((x) << S_AUTOSTATE2)
+#define G_AUTOSTATE2(x) (((x) >> S_AUTOSTATE2) & M_AUTOSTATE2)
+
+#define S_AUTOSTATE1 26
+#define M_AUTOSTATE1 0x3
+#define V_AUTOSTATE1(x) ((x) << S_AUTOSTATE1)
+#define G_AUTOSTATE1(x) (((x) >> S_AUTOSTATE1) & M_AUTOSTATE1)
+
+#define S_BYTETHRESHOLD 5
+#define M_BYTETHRESHOLD 0xfffff
+#define V_BYTETHRESHOLD(x) ((x) << S_BYTETHRESHOLD)
+#define G_BYTETHRESHOLD(x) (((x) >> S_BYTETHRESHOLD) & M_BYTETHRESHOLD)
+
+#define S_MSSTHRESHOLD 3
+#define M_MSSTHRESHOLD 0x3
+#define V_MSSTHRESHOLD(x) ((x) << S_MSSTHRESHOLD)
+#define G_MSSTHRESHOLD(x) (((x) >> S_MSSTHRESHOLD) & M_MSSTHRESHOLD)
+
+#define S_AUTOCAREFUL 2
+#define V_AUTOCAREFUL(x) ((x) << S_AUTOCAREFUL)
+#define F_AUTOCAREFUL V_AUTOCAREFUL(1U)
+
+#define S_AUTOENABLE 1
+#define V_AUTOENABLE(x) ((x) << S_AUTOENABLE)
+#define F_AUTOENABLE V_AUTOENABLE(1U)
+
+#define S_DACK_MODE 0
+#define V_DACK_MODE(x) ((x) << S_DACK_MODE)
+#define F_DACK_MODE V_DACK_MODE(1U)
+
+#define A_TP_PC_CONFIG 0x348
+
+#define S_TXTOSQUEUEMAPMODE 26
+#define V_TXTOSQUEUEMAPMODE(x) ((x) << S_TXTOSQUEUEMAPMODE)
+#define F_TXTOSQUEUEMAPMODE V_TXTOSQUEUEMAPMODE(1U)
+
+#define S_RDDPCONGEN 25
+#define V_RDDPCONGEN(x) ((x) << S_RDDPCONGEN)
+#define F_RDDPCONGEN V_RDDPCONGEN(1U)
+
+#define S_ENABLEONFLYPDU 24
+#define V_ENABLEONFLYPDU(x) ((x) << S_ENABLEONFLYPDU)
+#define F_ENABLEONFLYPDU V_ENABLEONFLYPDU(1U)
+
+#define S_ENABLEEPCMDAFULL 23
+#define V_ENABLEEPCMDAFULL(x) ((x) << S_ENABLEEPCMDAFULL)
+#define F_ENABLEEPCMDAFULL V_ENABLEEPCMDAFULL(1U)
+
+#define S_MODULATEUNIONMODE 22
+#define V_MODULATEUNIONMODE(x) ((x) << S_MODULATEUNIONMODE)
+#define F_MODULATEUNIONMODE V_MODULATEUNIONMODE(1U)
+
+#define S_TXDATAACKRATEENABLE 21
+#define V_TXDATAACKRATEENABLE(x) ((x) << S_TXDATAACKRATEENABLE)
+#define F_TXDATAACKRATEENABLE V_TXDATAACKRATEENABLE(1U)
+
+#define S_TXDEFERENABLE 20
+#define V_TXDEFERENABLE(x) ((x) << S_TXDEFERENABLE)
+#define F_TXDEFERENABLE V_TXDEFERENABLE(1U)
+
+#define S_RXCONGESTIONMODE 19
+#define V_RXCONGESTIONMODE(x) ((x) << S_RXCONGESTIONMODE)
+#define F_RXCONGESTIONMODE V_RXCONGESTIONMODE(1U)
+
+#define S_HEARBEATONCEDACK 18
+#define V_HEARBEATONCEDACK(x) ((x) << S_HEARBEATONCEDACK)
+#define F_HEARBEATONCEDACK V_HEARBEATONCEDACK(1U)
+
+#define S_HEARBEATONCEHEAP 17
+#define V_HEARBEATONCEHEAP(x) ((x) << S_HEARBEATONCEHEAP)
+#define F_HEARBEATONCEHEAP V_HEARBEATONCEHEAP(1U)
+
+#define S_HEARBEATDACK 16
+#define V_HEARBEATDACK(x) ((x) << S_HEARBEATDACK)
+#define F_HEARBEATDACK V_HEARBEATDACK(1U)
+
+#define S_TXCONGESTIONMODE 15
+#define V_TXCONGESTIONMODE(x) ((x) << S_TXCONGESTIONMODE)
+#define F_TXCONGESTIONMODE V_TXCONGESTIONMODE(1U)
+
+#define S_ACCEPTLATESTRCVADV 14
+#define V_ACCEPTLATESTRCVADV(x) ((x) << S_ACCEPTLATESTRCVADV)
+#define F_ACCEPTLATESTRCVADV V_ACCEPTLATESTRCVADV(1U)
+
+#define S_DISABLESYNDATA 13
+#define V_DISABLESYNDATA(x) ((x) << S_DISABLESYNDATA)
+#define F_DISABLESYNDATA V_DISABLESYNDATA(1U)
+
+#define S_DISABLEWINDOWPSH 12
+#define V_DISABLEWINDOWPSH(x) ((x) << S_DISABLEWINDOWPSH)
+#define F_DISABLEWINDOWPSH V_DISABLEWINDOWPSH(1U)
+
+#define S_DISABLEFINOLDDATA 11
+#define V_DISABLEFINOLDDATA(x) ((x) << S_DISABLEFINOLDDATA)
+#define F_DISABLEFINOLDDATA V_DISABLEFINOLDDATA(1U)
+
+#define S_ENABLEFLMERROR 10
+#define V_ENABLEFLMERROR(x) ((x) << S_ENABLEFLMERROR)
+#define F_ENABLEFLMERROR V_ENABLEFLMERROR(1U)
+
+#define S_DISABLENEXTMTU 9
+#define V_DISABLENEXTMTU(x) ((x) << S_DISABLENEXTMTU)
+#define F_DISABLENEXTMTU V_DISABLENEXTMTU(1U)
+
+#define S_FILTERPEERFIN 8
+#define V_FILTERPEERFIN(x) ((x) << S_FILTERPEERFIN)
+#define F_FILTERPEERFIN V_FILTERPEERFIN(1U)
+
+#define S_ENABLEFEEDBACKSEND 7
+#define V_ENABLEFEEDBACKSEND(x) ((x) << S_ENABLEFEEDBACKSEND)
+#define F_ENABLEFEEDBACKSEND V_ENABLEFEEDBACKSEND(1U)
+
+#define S_ENABLERDMAERROR 6
+#define V_ENABLERDMAERROR(x) ((x) << S_ENABLERDMAERROR)
+#define F_ENABLERDMAERROR V_ENABLERDMAERROR(1U)
+
+#define S_ENABLEDDPFLOWCONTROL 5
+#define V_ENABLEDDPFLOWCONTROL(x) ((x) << S_ENABLEDDPFLOWCONTROL)
+#define F_ENABLEDDPFLOWCONTROL V_ENABLEDDPFLOWCONTROL(1U)
+
+#define S_DISABLEHELDFIN 4
+#define V_DISABLEHELDFIN(x) ((x) << S_DISABLEHELDFIN)
+#define F_DISABLEHELDFIN V_DISABLEHELDFIN(1U)
+
+#define S_TABLELATENCYDELTA 0
+#define M_TABLELATENCYDELTA 0xf
+#define V_TABLELATENCYDELTA(x) ((x) << S_TABLELATENCYDELTA)
+#define G_TABLELATENCYDELTA(x) (((x) >> S_TABLELATENCYDELTA) & M_TABLELATENCYDELTA)
+
+#define S_CMCACHEDISABLE 31
+#define V_CMCACHEDISABLE(x) ((x) << S_CMCACHEDISABLE)
+#define F_CMCACHEDISABLE V_CMCACHEDISABLE(1U)
+
+#define S_ENABLEOCSPIFULL 30
+#define V_ENABLEOCSPIFULL(x) ((x) << S_ENABLEOCSPIFULL)
+#define F_ENABLEOCSPIFULL V_ENABLEOCSPIFULL(1U)
+
+#define S_ENABLEFLMERRORDDP 29
+#define V_ENABLEFLMERRORDDP(x) ((x) << S_ENABLEFLMERRORDDP)
+#define F_ENABLEFLMERRORDDP V_ENABLEFLMERRORDDP(1U)
+
+#define S_LOCKTID 28
+#define V_LOCKTID(x) ((x) << S_LOCKTID)
+#define F_LOCKTID V_LOCKTID(1U)
+
+#define S_FIXRCVWND 27
+#define V_FIXRCVWND(x) ((x) << S_FIXRCVWND)
+#define F_FIXRCVWND V_FIXRCVWND(1U)
+
+#define A_TP_PC_CONFIG2 0x34c
+
+#define S_ENABLEDROPRQEMPTYPKT 10
+#define V_ENABLEDROPRQEMPTYPKT(x) ((x) << S_ENABLEDROPRQEMPTYPKT)
+#define F_ENABLEDROPRQEMPTYPKT V_ENABLEDROPRQEMPTYPKT(1U)
+
+#define S_ENABLETXPORTFROMDA2 9
+#define V_ENABLETXPORTFROMDA2(x) ((x) << S_ENABLETXPORTFROMDA2)
+#define F_ENABLETXPORTFROMDA2 V_ENABLETXPORTFROMDA2(1U)
+
+#define S_ENABLERXPKTTMSTPRSS 8
+#define V_ENABLERXPKTTMSTPRSS(x) ((x) << S_ENABLERXPKTTMSTPRSS)
+#define F_ENABLERXPKTTMSTPRSS V_ENABLERXPKTTMSTPRSS(1U)
+
+#define S_ENABLESNDUNAINRXDATA 7
+#define V_ENABLESNDUNAINRXDATA(x) ((x) << S_ENABLESNDUNAINRXDATA)
+#define F_ENABLESNDUNAINRXDATA V_ENABLESNDUNAINRXDATA(1U)
+
+#define S_ENABLERXPORTFROMADDR 6
+#define V_ENABLERXPORTFROMADDR(x) ((x) << S_ENABLERXPORTFROMADDR)
+#define F_ENABLERXPORTFROMADDR V_ENABLERXPORTFROMADDR(1U)
+
+#define S_ENABLETXPORTFROMDA 5
+#define V_ENABLETXPORTFROMDA(x) ((x) << S_ENABLETXPORTFROMDA)
+#define F_ENABLETXPORTFROMDA V_ENABLETXPORTFROMDA(1U)
+
+#define S_CHDRAFULL 4
+#define V_CHDRAFULL(x) ((x) << S_CHDRAFULL)
+#define F_CHDRAFULL V_CHDRAFULL(1U)
+
+#define S_ENABLENONOFDSCBBIT 3
+#define V_ENABLENONOFDSCBBIT(x) ((x) << S_ENABLENONOFDSCBBIT)
+#define F_ENABLENONOFDSCBBIT V_ENABLENONOFDSCBBIT(1U)
+
+#define S_ENABLENONOFDTIDRSS 2
+#define V_ENABLENONOFDTIDRSS(x) ((x) << S_ENABLENONOFDTIDRSS)
+#define F_ENABLENONOFDTIDRSS V_ENABLENONOFDTIDRSS(1U)
+
+#define S_ENABLENONOFDTCBRSS 1
+#define V_ENABLENONOFDTCBRSS(x) ((x) << S_ENABLENONOFDTCBRSS)
+#define F_ENABLENONOFDTCBRSS V_ENABLENONOFDTCBRSS(1U)
+
+#define S_ENABLEOLDRXFORWARD 0
+#define V_ENABLEOLDRXFORWARD(x) ((x) << S_ENABLEOLDRXFORWARD)
+#define F_ENABLEOLDRXFORWARD V_ENABLEOLDRXFORWARD(1U)
+
+#define A_TP_TCP_BACKOFF_REG0 0x350
+
+#define S_TIMERBACKOFFINDEX3 24
+#define M_TIMERBACKOFFINDEX3 0xff
+#define V_TIMERBACKOFFINDEX3(x) ((x) << S_TIMERBACKOFFINDEX3)
+#define G_TIMERBACKOFFINDEX3(x) (((x) >> S_TIMERBACKOFFINDEX3) & M_TIMERBACKOFFINDEX3)
+
+#define S_TIMERBACKOFFINDEX2 16
+#define M_TIMERBACKOFFINDEX2 0xff
+#define V_TIMERBACKOFFINDEX2(x) ((x) << S_TIMERBACKOFFINDEX2)
+#define G_TIMERBACKOFFINDEX2(x) (((x) >> S_TIMERBACKOFFINDEX2) & M_TIMERBACKOFFINDEX2)
+
+#define S_TIMERBACKOFFINDEX1 8
+#define M_TIMERBACKOFFINDEX1 0xff
+#define V_TIMERBACKOFFINDEX1(x) ((x) << S_TIMERBACKOFFINDEX1)
+#define G_TIMERBACKOFFINDEX1(x) (((x) >> S_TIMERBACKOFFINDEX1) & M_TIMERBACKOFFINDEX1)
+
+#define S_TIMERBACKOFFINDEX0 0
+#define M_TIMERBACKOFFINDEX0 0xff
+#define V_TIMERBACKOFFINDEX0(x) ((x) << S_TIMERBACKOFFINDEX0)
+#define G_TIMERBACKOFFINDEX0(x) (((x) >> S_TIMERBACKOFFINDEX0) & M_TIMERBACKOFFINDEX0)
+
+#define A_TP_TCP_BACKOFF_REG1 0x354
+
+#define S_TIMERBACKOFFINDEX7 24
+#define M_TIMERBACKOFFINDEX7 0xff
+#define V_TIMERBACKOFFINDEX7(x) ((x) << S_TIMERBACKOFFINDEX7)
+#define G_TIMERBACKOFFINDEX7(x) (((x) >> S_TIMERBACKOFFINDEX7) & M_TIMERBACKOFFINDEX7)
+
+#define S_TIMERBACKOFFINDEX6 16
+#define M_TIMERBACKOFFINDEX6 0xff
+#define V_TIMERBACKOFFINDEX6(x) ((x) << S_TIMERBACKOFFINDEX6)
+#define G_TIMERBACKOFFINDEX6(x) (((x) >> S_TIMERBACKOFFINDEX6) & M_TIMERBACKOFFINDEX6)
+
+#define S_TIMERBACKOFFINDEX5 8
+#define M_TIMERBACKOFFINDEX5 0xff
+#define V_TIMERBACKOFFINDEX5(x) ((x) << S_TIMERBACKOFFINDEX5)
+#define G_TIMERBACKOFFINDEX5(x) (((x) >> S_TIMERBACKOFFINDEX5) & M_TIMERBACKOFFINDEX5)
+
+#define S_TIMERBACKOFFINDEX4 0
+#define M_TIMERBACKOFFINDEX4 0xff
+#define V_TIMERBACKOFFINDEX4(x) ((x) << S_TIMERBACKOFFINDEX4)
+#define G_TIMERBACKOFFINDEX4(x) (((x) >> S_TIMERBACKOFFINDEX4) & M_TIMERBACKOFFINDEX4)
+
+#define A_TP_TCP_BACKOFF_REG2 0x358
+
+#define S_TIMERBACKOFFINDEX11 24
+#define M_TIMERBACKOFFINDEX11 0xff
+#define V_TIMERBACKOFFINDEX11(x) ((x) << S_TIMERBACKOFFINDEX11)
+#define G_TIMERBACKOFFINDEX11(x) (((x) >> S_TIMERBACKOFFINDEX11) & M_TIMERBACKOFFINDEX11)
+
+#define S_TIMERBACKOFFINDEX10 16
+#define M_TIMERBACKOFFINDEX10 0xff
+#define V_TIMERBACKOFFINDEX10(x) ((x) << S_TIMERBACKOFFINDEX10)
+#define G_TIMERBACKOFFINDEX10(x) (((x) >> S_TIMERBACKOFFINDEX10) & M_TIMERBACKOFFINDEX10)
+
+#define S_TIMERBACKOFFINDEX9 8
+#define M_TIMERBACKOFFINDEX9 0xff
+#define V_TIMERBACKOFFINDEX9(x) ((x) << S_TIMERBACKOFFINDEX9)
+#define G_TIMERBACKOFFINDEX9(x) (((x) >> S_TIMERBACKOFFINDEX9) & M_TIMERBACKOFFINDEX9)
+
+#define S_TIMERBACKOFFINDEX8 0
+#define M_TIMERBACKOFFINDEX8 0xff
+#define V_TIMERBACKOFFINDEX8(x) ((x) << S_TIMERBACKOFFINDEX8)
+#define G_TIMERBACKOFFINDEX8(x) (((x) >> S_TIMERBACKOFFINDEX8) & M_TIMERBACKOFFINDEX8)
+
+#define A_TP_TCP_BACKOFF_REG3 0x35c
+
+#define S_TIMERBACKOFFINDEX15 24
+#define M_TIMERBACKOFFINDEX15 0xff
+#define V_TIMERBACKOFFINDEX15(x) ((x) << S_TIMERBACKOFFINDEX15)
+#define G_TIMERBACKOFFINDEX15(x) (((x) >> S_TIMERBACKOFFINDEX15) & M_TIMERBACKOFFINDEX15)
+
+#define S_TIMERBACKOFFINDEX14 16
+#define M_TIMERBACKOFFINDEX14 0xff
+#define V_TIMERBACKOFFINDEX14(x) ((x) << S_TIMERBACKOFFINDEX14)
+#define G_TIMERBACKOFFINDEX14(x) (((x) >> S_TIMERBACKOFFINDEX14) & M_TIMERBACKOFFINDEX14)
+
+#define S_TIMERBACKOFFINDEX13 8
+#define M_TIMERBACKOFFINDEX13 0xff
+#define V_TIMERBACKOFFINDEX13(x) ((x) << S_TIMERBACKOFFINDEX13)
+#define G_TIMERBACKOFFINDEX13(x) (((x) >> S_TIMERBACKOFFINDEX13) & M_TIMERBACKOFFINDEX13)
+
+#define S_TIMERBACKOFFINDEX12 0
+#define M_TIMERBACKOFFINDEX12 0xff
+#define V_TIMERBACKOFFINDEX12(x) ((x) << S_TIMERBACKOFFINDEX12)
+#define G_TIMERBACKOFFINDEX12(x) (((x) >> S_TIMERBACKOFFINDEX12) & M_TIMERBACKOFFINDEX12)
+
+#define A_TP_PARA_REG0 0x360
+
+#define S_INITCWND 24
+#define M_INITCWND 0x7
+#define V_INITCWND(x) ((x) << S_INITCWND)
+#define G_INITCWND(x) (((x) >> S_INITCWND) & M_INITCWND)
+
+#define S_DUPACKTHRESH 20
+#define M_DUPACKTHRESH 0xf
+#define V_DUPACKTHRESH(x) ((x) << S_DUPACKTHRESH)
+#define G_DUPACKTHRESH(x) (((x) >> S_DUPACKTHRESH) & M_DUPACKTHRESH)
+
+#define A_TP_PARA_REG1 0x364
+
+#define S_INITRWND 16
+#define M_INITRWND 0xffff
+#define V_INITRWND(x) ((x) << S_INITRWND)
+#define G_INITRWND(x) (((x) >> S_INITRWND) & M_INITRWND)
+
+#define S_INITIALSSTHRESH 0
+#define M_INITIALSSTHRESH 0xffff
+#define V_INITIALSSTHRESH(x) ((x) << S_INITIALSSTHRESH)
+#define G_INITIALSSTHRESH(x) (((x) >> S_INITIALSSTHRESH) & M_INITIALSSTHRESH)
+
+#define A_TP_PARA_REG2 0x368
+
+#define S_MAXRXDATA 16
+#define M_MAXRXDATA 0xffff
+#define V_MAXRXDATA(x) ((x) << S_MAXRXDATA)
+#define G_MAXRXDATA(x) (((x) >> S_MAXRXDATA) & M_MAXRXDATA)
+
+#define S_RXCOALESCESIZE 0
+#define M_RXCOALESCESIZE 0xffff
+#define V_RXCOALESCESIZE(x) ((x) << S_RXCOALESCESIZE)
+#define G_RXCOALESCESIZE(x) (((x) >> S_RXCOALESCESIZE) & M_RXCOALESCESIZE)
+
+#define A_TP_PARA_REG3 0x36c
+
+#define S_TUNNELCNGDROP1 21
+#define V_TUNNELCNGDROP1(x) ((x) << S_TUNNELCNGDROP1)
+#define F_TUNNELCNGDROP1 V_TUNNELCNGDROP1(1U)
+
+#define S_TUNNELCNGDROP0 20
+#define V_TUNNELCNGDROP0(x) ((x) << S_TUNNELCNGDROP0)
+#define F_TUNNELCNGDROP0 V_TUNNELCNGDROP0(1U)
+
+#define S_TXDATAACKIDX 16
+#define M_TXDATAACKIDX 0xf
+#define V_TXDATAACKIDX(x) ((x) << S_TXDATAACKIDX)
+#define G_TXDATAACKIDX(x) (((x) >> S_TXDATAACKIDX) & M_TXDATAACKIDX)
+
+#define S_RXFRAGENABLE 12
+#define M_RXFRAGENABLE 0x7
+#define V_RXFRAGENABLE(x) ((x) << S_RXFRAGENABLE)
+#define G_RXFRAGENABLE(x) (((x) >> S_RXFRAGENABLE) & M_RXFRAGENABLE)
+
+#define S_TXPACEFIXEDSTRICT 11
+#define V_TXPACEFIXEDSTRICT(x) ((x) << S_TXPACEFIXEDSTRICT)
+#define F_TXPACEFIXEDSTRICT V_TXPACEFIXEDSTRICT(1U)
+
+#define S_TXPACEAUTOSTRICT 10
+#define V_TXPACEAUTOSTRICT(x) ((x) << S_TXPACEAUTOSTRICT)
+#define F_TXPACEAUTOSTRICT V_TXPACEAUTOSTRICT(1U)
+
+#define S_TXPACEFIXED 9
+#define V_TXPACEFIXED(x) ((x) << S_TXPACEFIXED)
+#define F_TXPACEFIXED V_TXPACEFIXED(1U)
+
+#define S_TXPACEAUTO 8
+#define V_TXPACEAUTO(x) ((x) << S_TXPACEAUTO)
+#define F_TXPACEAUTO V_TXPACEAUTO(1U)
+
+#define S_RXURGMODE 5
+#define V_RXURGMODE(x) ((x) << S_RXURGMODE)
+#define F_RXURGMODE V_RXURGMODE(1U)
+
+#define S_TXURGMODE 4
+#define V_TXURGMODE(x) ((x) << S_TXURGMODE)
+#define F_TXURGMODE V_TXURGMODE(1U)
+
+#define S_CNGCTRLMODE 2
+#define M_CNGCTRLMODE 0x3
+#define V_CNGCTRLMODE(x) ((x) << S_CNGCTRLMODE)
+#define G_CNGCTRLMODE(x) (((x) >> S_CNGCTRLMODE) & M_CNGCTRLMODE)
+
+#define S_RXCOALESCEENABLE 1
+#define V_RXCOALESCEENABLE(x) ((x) << S_RXCOALESCEENABLE)
+#define F_RXCOALESCEENABLE V_RXCOALESCEENABLE(1U)
+
+#define S_RXCOALESCEPSHEN 0
+#define V_RXCOALESCEPSHEN(x) ((x) << S_RXCOALESCEPSHEN)
+#define F_RXCOALESCEPSHEN V_RXCOALESCEPSHEN(1U)
+
+#define S_RXURGTUNNEL 6
+#define V_RXURGTUNNEL(x) ((x) << S_RXURGTUNNEL)
+#define F_RXURGTUNNEL V_RXURGTUNNEL(1U)
+
+#define A_TP_PARA_REG4 0x370
+
+#define S_HIGHSPEEDCFG 24
+#define M_HIGHSPEEDCFG 0xff
+#define V_HIGHSPEEDCFG(x) ((x) << S_HIGHSPEEDCFG)
+#define G_HIGHSPEEDCFG(x) (((x) >> S_HIGHSPEEDCFG) & M_HIGHSPEEDCFG)
+
+#define S_NEWRENOCFG 16
+#define M_NEWRENOCFG 0xff
+#define V_NEWRENOCFG(x) ((x) << S_NEWRENOCFG)
+#define G_NEWRENOCFG(x) (((x) >> S_NEWRENOCFG) & M_NEWRENOCFG)
+
+#define S_TAHOECFG 8
+#define M_TAHOECFG 0xff
+#define V_TAHOECFG(x) ((x) << S_TAHOECFG)
+#define G_TAHOECFG(x) (((x) >> S_TAHOECFG) & M_TAHOECFG)
+
+#define S_RENOCFG 0
+#define M_RENOCFG 0xff
+#define V_RENOCFG(x) ((x) << S_RENOCFG)
+#define G_RENOCFG(x) (((x) >> S_RENOCFG) & M_RENOCFG)
+
+#define A_TP_PARA_REG5 0x374
+
+#define S_INDICATESIZE 16
+#define M_INDICATESIZE 0xffff
+#define V_INDICATESIZE(x) ((x) << S_INDICATESIZE)
+#define G_INDICATESIZE(x) (((x) >> S_INDICATESIZE) & M_INDICATESIZE)
+
+#define S_SCHDENABLE 8
+#define V_SCHDENABLE(x) ((x) << S_SCHDENABLE)
+#define F_SCHDENABLE V_SCHDENABLE(1U)
+
+#define S_ONFLYDDPENABLE 2
+#define V_ONFLYDDPENABLE(x) ((x) << S_ONFLYDDPENABLE)
+#define F_ONFLYDDPENABLE V_ONFLYDDPENABLE(1U)
+
+#define S_DACKTIMERSPIN 1
+#define V_DACKTIMERSPIN(x) ((x) << S_DACKTIMERSPIN)
+#define F_DACKTIMERSPIN V_DACKTIMERSPIN(1U)
+
+#define S_PUSHTIMERENABLE 0
+#define V_PUSHTIMERENABLE(x) ((x) << S_PUSHTIMERENABLE)
+#define F_PUSHTIMERENABLE V_PUSHTIMERENABLE(1U)
+
+#define A_TP_PARA_REG6 0x378
+
+#define S_TXPDUSIZEADJ 16
+#define M_TXPDUSIZEADJ 0xff
+#define V_TXPDUSIZEADJ(x) ((x) << S_TXPDUSIZEADJ)
+#define G_TXPDUSIZEADJ(x) (((x) >> S_TXPDUSIZEADJ) & M_TXPDUSIZEADJ)
+
+#define S_ENABLEEPDU 14
+#define V_ENABLEEPDU(x) ((x) << S_ENABLEEPDU)
+#define F_ENABLEEPDU V_ENABLEEPDU(1U)
+
+#define S_T3A_ENABLEESND 13
+#define V_T3A_ENABLEESND(x) ((x) << S_T3A_ENABLEESND)
+#define F_T3A_ENABLEESND V_T3A_ENABLEESND(1U)
+
+#define S_T3A_ENABLECSND 12
+#define V_T3A_ENABLECSND(x) ((x) << S_T3A_ENABLECSND)
+#define F_T3A_ENABLECSND V_T3A_ENABLECSND(1U)
+
+#define S_T3A_ENABLEDEFERACK 9
+#define V_T3A_ENABLEDEFERACK(x) ((x) << S_T3A_ENABLEDEFERACK)
+#define F_T3A_ENABLEDEFERACK V_T3A_ENABLEDEFERACK(1U)
+
+#define S_ENABLEPDUC 8
+#define V_ENABLEPDUC(x) ((x) << S_ENABLEPDUC)
+#define F_ENABLEPDUC V_ENABLEPDUC(1U)
+
+#define S_ENABLEPDUI 7
+#define V_ENABLEPDUI(x) ((x) << S_ENABLEPDUI)
+#define F_ENABLEPDUI V_ENABLEPDUI(1U)
+
+#define S_T3A_ENABLEPDUE 6
+#define V_T3A_ENABLEPDUE(x) ((x) << S_T3A_ENABLEPDUE)
+#define F_T3A_ENABLEPDUE V_T3A_ENABLEPDUE(1U)
+
+#define S_ENABLEDEFER 5
+#define V_ENABLEDEFER(x) ((x) << S_ENABLEDEFER)
+#define F_ENABLEDEFER V_ENABLEDEFER(1U)
+
+#define S_ENABLECLEARRXMTOOS 4
+#define V_ENABLECLEARRXMTOOS(x) ((x) << S_ENABLECLEARRXMTOOS)
+#define F_ENABLECLEARRXMTOOS V_ENABLECLEARRXMTOOS(1U)
+
+#define S_DISABLEPDUCNG 3
+#define V_DISABLEPDUCNG(x) ((x) << S_DISABLEPDUCNG)
+#define F_DISABLEPDUCNG V_DISABLEPDUCNG(1U)
+
+#define S_DISABLEPDUTIMEOUT 2
+#define V_DISABLEPDUTIMEOUT(x) ((x) << S_DISABLEPDUTIMEOUT)
+#define F_DISABLEPDUTIMEOUT V_DISABLEPDUTIMEOUT(1U)
+
+#define S_DISABLEPDURXMT 1
+#define V_DISABLEPDURXMT(x) ((x) << S_DISABLEPDURXMT)
+#define F_DISABLEPDURXMT V_DISABLEPDURXMT(1U)
+
+#define S_DISABLEPDUXMT 0
+#define V_DISABLEPDUXMT(x) ((x) << S_DISABLEPDUXMT)
+#define F_DISABLEPDUXMT V_DISABLEPDUXMT(1U)
+
+#define S_ENABLEDEFERACK 12
+#define V_ENABLEDEFERACK(x) ((x) << S_ENABLEDEFERACK)
+#define F_ENABLEDEFERACK V_ENABLEDEFERACK(1U)
+
+#define S_ENABLEESND 11
+#define V_ENABLEESND(x) ((x) << S_ENABLEESND)
+#define F_ENABLEESND V_ENABLEESND(1U)
+
+#define S_ENABLECSND 10
+#define V_ENABLECSND(x) ((x) << S_ENABLECSND)
+#define F_ENABLECSND V_ENABLECSND(1U)
+
+#define S_ENABLEPDUE 9
+#define V_ENABLEPDUE(x) ((x) << S_ENABLEPDUE)
+#define F_ENABLEPDUE V_ENABLEPDUE(1U)
+
+#define S_ENABLEBUFI 7
+#define V_ENABLEBUFI(x) ((x) << S_ENABLEBUFI)
+#define F_ENABLEBUFI V_ENABLEBUFI(1U)
+
+#define S_ENABLEBUFE 6
+#define V_ENABLEBUFE(x) ((x) << S_ENABLEBUFE)
+#define F_ENABLEBUFE V_ENABLEBUFE(1U)
+
+#define A_TP_PARA_REG7 0x37c
+
+#define S_PMMAXXFERLEN1 16
+#define M_PMMAXXFERLEN1 0xffff
+#define V_PMMAXXFERLEN1(x) ((x) << S_PMMAXXFERLEN1)
+#define G_PMMAXXFERLEN1(x) (((x) >> S_PMMAXXFERLEN1) & M_PMMAXXFERLEN1)
+
+#define S_PMMAXXFERLEN0 0
+#define M_PMMAXXFERLEN0 0xffff
+#define V_PMMAXXFERLEN0(x) ((x) << S_PMMAXXFERLEN0)
+#define G_PMMAXXFERLEN0(x) (((x) >> S_PMMAXXFERLEN0) & M_PMMAXXFERLEN0)
+
+#define A_TP_TIMER_RESOLUTION 0x390
+
+#define S_TIMERRESOLUTION 16
+#define M_TIMERRESOLUTION 0xff
+#define V_TIMERRESOLUTION(x) ((x) << S_TIMERRESOLUTION)
+#define G_TIMERRESOLUTION(x) (((x) >> S_TIMERRESOLUTION) & M_TIMERRESOLUTION)
+
+#define S_TIMESTAMPRESOLUTION 8
+#define M_TIMESTAMPRESOLUTION 0xff
+#define V_TIMESTAMPRESOLUTION(x) ((x) << S_TIMESTAMPRESOLUTION)
+#define G_TIMESTAMPRESOLUTION(x) (((x) >> S_TIMESTAMPRESOLUTION) & M_TIMESTAMPRESOLUTION)
+
+#define S_DELAYEDACKRESOLUTION 0
+#define M_DELAYEDACKRESOLUTION 0xff
+#define V_DELAYEDACKRESOLUTION(x) ((x) << S_DELAYEDACKRESOLUTION)
+#define G_DELAYEDACKRESOLUTION(x) (((x) >> S_DELAYEDACKRESOLUTION) & M_DELAYEDACKRESOLUTION)
+
+#define A_TP_MSL 0x394
+
+#define S_MSL 0
+#define M_MSL 0x3fffffff
+#define V_MSL(x) ((x) << S_MSL)
+#define G_MSL(x) (((x) >> S_MSL) & M_MSL)
+
+#define A_TP_RXT_MIN 0x398
+
+#define S_RXTMIN 0
+#define M_RXTMIN 0x3fffffff
+#define V_RXTMIN(x) ((x) << S_RXTMIN)
+#define G_RXTMIN(x) (((x) >> S_RXTMIN) & M_RXTMIN)
+
+#define A_TP_RXT_MAX 0x39c
+
+#define S_RXTMAX 0
+#define M_RXTMAX 0x3fffffff
+#define V_RXTMAX(x) ((x) << S_RXTMAX)
+#define G_RXTMAX(x) (((x) >> S_RXTMAX) & M_RXTMAX)
+
+#define A_TP_PERS_MIN 0x3a0
+
+#define S_PERSMIN 0
+#define M_PERSMIN 0x3fffffff
+#define V_PERSMIN(x) ((x) << S_PERSMIN)
+#define G_PERSMIN(x) (((x) >> S_PERSMIN) & M_PERSMIN)
+
+#define A_TP_PERS_MAX 0x3a4
+
+#define S_PERSMAX 0
+#define M_PERSMAX 0x3fffffff
+#define V_PERSMAX(x) ((x) << S_PERSMAX)
+#define G_PERSMAX(x) (((x) >> S_PERSMAX) & M_PERSMAX)
+
+#define A_TP_KEEP_IDLE 0x3a8
+
+#define S_KEEPALIVEIDLE 0
+#define M_KEEPALIVEIDLE 0x3fffffff
+#define V_KEEPALIVEIDLE(x) ((x) << S_KEEPALIVEIDLE)
+#define G_KEEPALIVEIDLE(x) (((x) >> S_KEEPALIVEIDLE) & M_KEEPALIVEIDLE)
+
+#define A_TP_KEEP_INTVL 0x3ac
+
+#define S_KEEPALIVEINTVL 0
+#define M_KEEPALIVEINTVL 0x3fffffff
+#define V_KEEPALIVEINTVL(x) ((x) << S_KEEPALIVEINTVL)
+#define G_KEEPALIVEINTVL(x) (((x) >> S_KEEPALIVEINTVL) & M_KEEPALIVEINTVL)
+
+#define A_TP_INIT_SRTT 0x3b0
+
+#define S_INITSRTT 0
+#define M_INITSRTT 0xffff
+#define V_INITSRTT(x) ((x) << S_INITSRTT)
+#define G_INITSRTT(x) (((x) >> S_INITSRTT) & M_INITSRTT)
+
+#define A_TP_DACK_TIMER 0x3b4
+
+#define S_DACKTIME 0
+#define M_DACKTIME 0xfff
+#define V_DACKTIME(x) ((x) << S_DACKTIME)
+#define G_DACKTIME(x) (((x) >> S_DACKTIME) & M_DACKTIME)
+
+#define A_TP_FINWAIT2_TIMER 0x3b8
+
+#define S_FINWAIT2TIME 0
+#define M_FINWAIT2TIME 0x3fffffff
+#define V_FINWAIT2TIME(x) ((x) << S_FINWAIT2TIME)
+#define G_FINWAIT2TIME(x) (((x) >> S_FINWAIT2TIME) & M_FINWAIT2TIME)
+
+#define A_TP_FAST_FINWAIT2_TIMER 0x3bc
+
+#define S_FASTFINWAIT2TIME 0
+#define M_FASTFINWAIT2TIME 0x3fffffff
+#define V_FASTFINWAIT2TIME(x) ((x) << S_FASTFINWAIT2TIME)
+#define G_FASTFINWAIT2TIME(x) (((x) >> S_FASTFINWAIT2TIME) & M_FASTFINWAIT2TIME)
+
+#define A_TP_SHIFT_CNT 0x3c0
+
+#define S_SYNSHIFTMAX 24
+#define M_SYNSHIFTMAX 0xff
+#define V_SYNSHIFTMAX(x) ((x) << S_SYNSHIFTMAX)
+#define G_SYNSHIFTMAX(x) (((x) >> S_SYNSHIFTMAX) & M_SYNSHIFTMAX)
+
+#define S_RXTSHIFTMAXR1 20
+#define M_RXTSHIFTMAXR1 0xf
+#define V_RXTSHIFTMAXR1(x) ((x) << S_RXTSHIFTMAXR1)
+#define G_RXTSHIFTMAXR1(x) (((x) >> S_RXTSHIFTMAXR1) & M_RXTSHIFTMAXR1)
+
+#define S_RXTSHIFTMAXR2 16
+#define M_RXTSHIFTMAXR2 0xf
+#define V_RXTSHIFTMAXR2(x) ((x) << S_RXTSHIFTMAXR2)
+#define G_RXTSHIFTMAXR2(x) (((x) >> S_RXTSHIFTMAXR2) & M_RXTSHIFTMAXR2)
+
+#define S_PERSHIFTBACKOFFMAX 12
+#define M_PERSHIFTBACKOFFMAX 0xf
+#define V_PERSHIFTBACKOFFMAX(x) ((x) << S_PERSHIFTBACKOFFMAX)
+#define G_PERSHIFTBACKOFFMAX(x) (((x) >> S_PERSHIFTBACKOFFMAX) & M_PERSHIFTBACKOFFMAX)
+
+#define S_PERSHIFTMAX 8
+#define M_PERSHIFTMAX 0xf
+#define V_PERSHIFTMAX(x) ((x) << S_PERSHIFTMAX)
+#define G_PERSHIFTMAX(x) (((x) >> S_PERSHIFTMAX) & M_PERSHIFTMAX)
+
+#define S_KEEPALIVEMAX 0
+#define M_KEEPALIVEMAX 0xff
+#define V_KEEPALIVEMAX(x) ((x) << S_KEEPALIVEMAX)
+#define G_KEEPALIVEMAX(x) (((x) >> S_KEEPALIVEMAX) & M_KEEPALIVEMAX)
+
+#define A_TP_TIME_HI 0x3c8
+#define A_TP_TIME_LO 0x3cc
+#define A_TP_MTU_PORT_TABLE 0x3d0
+
+#define S_PORT1MTUVALUE 16
+#define M_PORT1MTUVALUE 0xffff
+#define V_PORT1MTUVALUE(x) ((x) << S_PORT1MTUVALUE)
+#define G_PORT1MTUVALUE(x) (((x) >> S_PORT1MTUVALUE) & M_PORT1MTUVALUE)
+
+#define S_PORT0MTUVALUE 0
+#define M_PORT0MTUVALUE 0xffff
+#define V_PORT0MTUVALUE(x) ((x) << S_PORT0MTUVALUE)
+#define G_PORT0MTUVALUE(x) (((x) >> S_PORT0MTUVALUE) & M_PORT0MTUVALUE)
+
+#define A_TP_ULP_TABLE 0x3d4
+
+#define S_ULPTYPE7FIELD 28
+#define M_ULPTYPE7FIELD 0xf
+#define V_ULPTYPE7FIELD(x) ((x) << S_ULPTYPE7FIELD)
+#define G_ULPTYPE7FIELD(x) (((x) >> S_ULPTYPE7FIELD) & M_ULPTYPE7FIELD)
+
+#define S_ULPTYPE6FIELD 24
+#define M_ULPTYPE6FIELD 0xf
+#define V_ULPTYPE6FIELD(x) ((x) << S_ULPTYPE6FIELD)
+#define G_ULPTYPE6FIELD(x) (((x) >> S_ULPTYPE6FIELD) & M_ULPTYPE6FIELD)
+
+#define S_ULPTYPE5FIELD 20
+#define M_ULPTYPE5FIELD 0xf
+#define V_ULPTYPE5FIELD(x) ((x) << S_ULPTYPE5FIELD)
+#define G_ULPTYPE5FIELD(x) (((x) >> S_ULPTYPE5FIELD) & M_ULPTYPE5FIELD)
+
+#define S_ULPTYPE4FIELD 16
+#define M_ULPTYPE4FIELD 0xf
+#define V_ULPTYPE4FIELD(x) ((x) << S_ULPTYPE4FIELD)
+#define G_ULPTYPE4FIELD(x) (((x) >> S_ULPTYPE4FIELD) & M_ULPTYPE4FIELD)
+
+#define S_ULPTYPE3FIELD 12
+#define M_ULPTYPE3FIELD 0xf
+#define V_ULPTYPE3FIELD(x) ((x) << S_ULPTYPE3FIELD)
+#define G_ULPTYPE3FIELD(x) (((x) >> S_ULPTYPE3FIELD) & M_ULPTYPE3FIELD)
+
+#define S_ULPTYPE2FIELD 8
+#define M_ULPTYPE2FIELD 0xf
+#define V_ULPTYPE2FIELD(x) ((x) << S_ULPTYPE2FIELD)
+#define G_ULPTYPE2FIELD(x) (((x) >> S_ULPTYPE2FIELD) & M_ULPTYPE2FIELD)
+
+#define S_ULPTYPE1FIELD 4
+#define M_ULPTYPE1FIELD 0xf
+#define V_ULPTYPE1FIELD(x) ((x) << S_ULPTYPE1FIELD)
+#define G_ULPTYPE1FIELD(x) (((x) >> S_ULPTYPE1FIELD) & M_ULPTYPE1FIELD)
+
+#define S_ULPTYPE0FIELD 0
+#define M_ULPTYPE0FIELD 0xf
+#define V_ULPTYPE0FIELD(x) ((x) << S_ULPTYPE0FIELD)
+#define G_ULPTYPE0FIELD(x) (((x) >> S_ULPTYPE0FIELD) & M_ULPTYPE0FIELD)
+
+#define A_TP_PACE_TABLE 0x3d8
+#define A_TP_CCTRL_TABLE 0x3dc
+#define A_TP_TOS_TABLE 0x3e0
+#define A_TP_MTU_TABLE 0x3e4
+#define A_TP_RSS_MAP_TABLE 0x3e8
+#define A_TP_RSS_LKP_TABLE 0x3ec
+#define A_TP_RSS_CONFIG 0x3f0
+
+#define S_TNL4TUPEN 29
+#define V_TNL4TUPEN(x) ((x) << S_TNL4TUPEN)
+#define F_TNL4TUPEN V_TNL4TUPEN(1U)
+
+#define S_TNL2TUPEN 28
+#define V_TNL2TUPEN(x) ((x) << S_TNL2TUPEN)
+#define F_TNL2TUPEN V_TNL2TUPEN(1U)
+
+#define S_TNLPRTEN 26
+#define V_TNLPRTEN(x) ((x) << S_TNLPRTEN)
+#define F_TNLPRTEN V_TNLPRTEN(1U)
+
+#define S_TNLMAPEN 25
+#define V_TNLMAPEN(x) ((x) << S_TNLMAPEN)
+#define F_TNLMAPEN V_TNLMAPEN(1U)
+
+#define S_TNLLKPEN 24
+#define V_TNLLKPEN(x) ((x) << S_TNLLKPEN)
+#define F_TNLLKPEN V_TNLLKPEN(1U)
+
+#define S_OFD4TUPEN 21
+#define V_OFD4TUPEN(x) ((x) << S_OFD4TUPEN)
+#define F_OFD4TUPEN V_OFD4TUPEN(1U)
+
+#define S_OFD2TUPEN 20
+#define V_OFD2TUPEN(x) ((x) << S_OFD2TUPEN)
+#define F_OFD2TUPEN V_OFD2TUPEN(1U)
+
+#define S_OFDMAPEN 17
+#define V_OFDMAPEN(x) ((x) << S_OFDMAPEN)
+#define F_OFDMAPEN V_OFDMAPEN(1U)
+
+#define S_OFDLKPEN 16
+#define V_OFDLKPEN(x) ((x) << S_OFDLKPEN)
+#define F_OFDLKPEN V_OFDLKPEN(1U)
+
+#define S_SYN4TUPEN 13
+#define V_SYN4TUPEN(x) ((x) << S_SYN4TUPEN)
+#define F_SYN4TUPEN V_SYN4TUPEN(1U)
+
+#define S_SYN2TUPEN 12
+#define V_SYN2TUPEN(x) ((x) << S_SYN2TUPEN)
+#define F_SYN2TUPEN V_SYN2TUPEN(1U)
+
+#define S_SYNMAPEN 9
+#define V_SYNMAPEN(x) ((x) << S_SYNMAPEN)
+#define F_SYNMAPEN V_SYNMAPEN(1U)
+
+#define S_SYNLKPEN 8
+#define V_SYNLKPEN(x) ((x) << S_SYNLKPEN)
+#define F_SYNLKPEN V_SYNLKPEN(1U)
+
+#define S_RRCPLMAPEN 7
+#define V_RRCPLMAPEN(x) ((x) << S_RRCPLMAPEN)
+#define F_RRCPLMAPEN V_RRCPLMAPEN(1U)
+
+#define S_RRCPLCPUSIZE 4
+#define M_RRCPLCPUSIZE 0x7
+#define V_RRCPLCPUSIZE(x) ((x) << S_RRCPLCPUSIZE)
+#define G_RRCPLCPUSIZE(x) (((x) >> S_RRCPLCPUSIZE) & M_RRCPLCPUSIZE)
+
+#define S_RQFEEDBACKENABLE 3
+#define V_RQFEEDBACKENABLE(x) ((x) << S_RQFEEDBACKENABLE)
+#define F_RQFEEDBACKENABLE V_RQFEEDBACKENABLE(1U)
+
+#define S_HASHTOEPLITZ 2
+#define V_HASHTOEPLITZ(x) ((x) << S_HASHTOEPLITZ)
+#define F_HASHTOEPLITZ V_HASHTOEPLITZ(1U)
+
+#define S_HASHSAVE 1
+#define V_HASHSAVE(x) ((x) << S_HASHSAVE)
+#define F_HASHSAVE V_HASHSAVE(1U)
+
+#define S_DISABLE 0
+#define V_DISABLE(x) ((x) << S_DISABLE)
+#define F_DISABLE V_DISABLE(1U)
+
+#define A_TP_RSS_CONFIG_TNL 0x3f4
+
+#define S_MASKSIZE 28
+#define M_MASKSIZE 0x7
+#define V_MASKSIZE(x) ((x) << S_MASKSIZE)
+#define G_MASKSIZE(x) (((x) >> S_MASKSIZE) & M_MASKSIZE)
+
+#define S_DEFAULTCPUBASE 22
+#define M_DEFAULTCPUBASE 0x3f
+#define V_DEFAULTCPUBASE(x) ((x) << S_DEFAULTCPUBASE)
+#define G_DEFAULTCPUBASE(x) (((x) >> S_DEFAULTCPUBASE) & M_DEFAULTCPUBASE)
+
+#define S_DEFAULTCPU 16
+#define M_DEFAULTCPU 0x3f
+#define V_DEFAULTCPU(x) ((x) << S_DEFAULTCPU)
+#define G_DEFAULTCPU(x) (((x) >> S_DEFAULTCPU) & M_DEFAULTCPU)
+
+#define S_DEFAULTQUEUE 0
+#define M_DEFAULTQUEUE 0xffff
+#define V_DEFAULTQUEUE(x) ((x) << S_DEFAULTQUEUE)
+#define G_DEFAULTQUEUE(x) (((x) >> S_DEFAULTQUEUE) & M_DEFAULTQUEUE)
+
+#define A_TP_RSS_CONFIG_OFD 0x3f8
+#define A_TP_RSS_CONFIG_SYN 0x3fc
+#define A_TP_RSS_SECRET_KEY0 0x400
+#define A_TP_RSS_SECRET_KEY1 0x404
+#define A_TP_RSS_SECRET_KEY2 0x408
+#define A_TP_RSS_SECRET_KEY3 0x40c
+#define A_TP_TM_PIO_ADDR 0x418
+#define A_TP_TM_PIO_DATA 0x41c
+#define A_TP_TX_MOD_QUE_TABLE 0x420
+#define A_TP_TX_RESOURCE_LIMIT 0x424
+
+#define S_TX_RESOURCE_LIMIT_CH1_PC 24
+#define M_TX_RESOURCE_LIMIT_CH1_PC 0xff
+#define V_TX_RESOURCE_LIMIT_CH1_PC(x) ((x) << S_TX_RESOURCE_LIMIT_CH1_PC)
+#define G_TX_RESOURCE_LIMIT_CH1_PC(x) (((x) >> S_TX_RESOURCE_LIMIT_CH1_PC) & M_TX_RESOURCE_LIMIT_CH1_PC)
+
+#define S_TX_RESOURCE_LIMIT_CH1_NON_PC 16
+#define M_TX_RESOURCE_LIMIT_CH1_NON_PC 0xff
+#define V_TX_RESOURCE_LIMIT_CH1_NON_PC(x) ((x) << S_TX_RESOURCE_LIMIT_CH1_NON_PC)
+#define G_TX_RESOURCE_LIMIT_CH1_NON_PC(x) (((x) >> S_TX_RESOURCE_LIMIT_CH1_NON_PC) & M_TX_RESOURCE_LIMIT_CH1_NON_PC)
+
+#define S_TX_RESOURCE_LIMIT_CH0_PC 8
+#define M_TX_RESOURCE_LIMIT_CH0_PC 0xff
+#define V_TX_RESOURCE_LIMIT_CH0_PC(x) ((x) << S_TX_RESOURCE_LIMIT_CH0_PC)
+#define G_TX_RESOURCE_LIMIT_CH0_PC(x) (((x) >> S_TX_RESOURCE_LIMIT_CH0_PC) & M_TX_RESOURCE_LIMIT_CH0_PC)
+
+#define S_TX_RESOURCE_LIMIT_CH0_NON_PC 0
+#define M_TX_RESOURCE_LIMIT_CH0_NON_PC 0xff
+#define V_TX_RESOURCE_LIMIT_CH0_NON_PC(x) ((x) << S_TX_RESOURCE_LIMIT_CH0_NON_PC)
+#define G_TX_RESOURCE_LIMIT_CH0_NON_PC(x) (((x) >> S_TX_RESOURCE_LIMIT_CH0_NON_PC) & M_TX_RESOURCE_LIMIT_CH0_NON_PC)
+
+#define A_TP_TX_MOD_QUEUE_REQ_MAP 0x428
+
+#define S_RX_MOD_WEIGHT 24
+#define M_RX_MOD_WEIGHT 0xff
+#define V_RX_MOD_WEIGHT(x) ((x) << S_RX_MOD_WEIGHT)
+#define G_RX_MOD_WEIGHT(x) (((x) >> S_RX_MOD_WEIGHT) & M_RX_MOD_WEIGHT)
+
+#define S_TX_MOD_WEIGHT 16
+#define M_TX_MOD_WEIGHT 0xff
+#define V_TX_MOD_WEIGHT(x) ((x) << S_TX_MOD_WEIGHT)
+#define G_TX_MOD_WEIGHT(x) (((x) >> S_TX_MOD_WEIGHT) & M_TX_MOD_WEIGHT)
+
+#define S_TX_MOD_TIMER_MODE 9
+#define M_TX_MOD_TIMER_MODE 0x7f
+#define V_TX_MOD_TIMER_MODE(x) ((x) << S_TX_MOD_TIMER_MODE)
+#define G_TX_MOD_TIMER_MODE(x) (((x) >> S_TX_MOD_TIMER_MODE) & M_TX_MOD_TIMER_MODE)
+
+#define S_TX_MOD_QUEUE_REQ_MAP 0
+#define M_TX_MOD_QUEUE_REQ_MAP 0xff
+#define V_TX_MOD_QUEUE_REQ_MAP(x) ((x) << S_TX_MOD_QUEUE_REQ_MAP)
+#define G_TX_MOD_QUEUE_REQ_MAP(x) (((x) >> S_TX_MOD_QUEUE_REQ_MAP) & M_TX_MOD_QUEUE_REQ_MAP)
+
+#define A_TP_TX_MOD_QUEUE_WEIGHT1 0x42c
+
+#define S_TP_TX_MODQ_WGHT7 24
+#define M_TP_TX_MODQ_WGHT7 0xff
+#define V_TP_TX_MODQ_WGHT7(x) ((x) << S_TP_TX_MODQ_WGHT7)
+#define G_TP_TX_MODQ_WGHT7(x) (((x) >> S_TP_TX_MODQ_WGHT7) & M_TP_TX_MODQ_WGHT7)
+
+#define S_TP_TX_MODQ_WGHT6 16
+#define M_TP_TX_MODQ_WGHT6 0xff
+#define V_TP_TX_MODQ_WGHT6(x) ((x) << S_TP_TX_MODQ_WGHT6)
+#define G_TP_TX_MODQ_WGHT6(x) (((x) >> S_TP_TX_MODQ_WGHT6) & M_TP_TX_MODQ_WGHT6)
+
+#define S_TP_TX_MODQ_WGHT5 8
+#define M_TP_TX_MODQ_WGHT5 0xff
+#define V_TP_TX_MODQ_WGHT5(x) ((x) << S_TP_TX_MODQ_WGHT5)
+#define G_TP_TX_MODQ_WGHT5(x) (((x) >> S_TP_TX_MODQ_WGHT5) & M_TP_TX_MODQ_WGHT5)
+
+#define S_TP_TX_MODQ_WGHT4 0
+#define M_TP_TX_MODQ_WGHT4 0xff
+#define V_TP_TX_MODQ_WGHT4(x) ((x) << S_TP_TX_MODQ_WGHT4)
+#define G_TP_TX_MODQ_WGHT4(x) (((x) >> S_TP_TX_MODQ_WGHT4) & M_TP_TX_MODQ_WGHT4)
+
+#define A_TP_TX_MOD_QUEUE_WEIGHT0 0x430
+
+#define S_TP_TX_MODQ_WGHT3 24
+#define M_TP_TX_MODQ_WGHT3 0xff
+#define V_TP_TX_MODQ_WGHT3(x) ((x) << S_TP_TX_MODQ_WGHT3)
+#define G_TP_TX_MODQ_WGHT3(x) (((x) >> S_TP_TX_MODQ_WGHT3) & M_TP_TX_MODQ_WGHT3)
+
+#define S_TP_TX_MODQ_WGHT2 16
+#define M_TP_TX_MODQ_WGHT2 0xff
+#define V_TP_TX_MODQ_WGHT2(x) ((x) << S_TP_TX_MODQ_WGHT2)
+#define G_TP_TX_MODQ_WGHT2(x) (((x) >> S_TP_TX_MODQ_WGHT2) & M_TP_TX_MODQ_WGHT2)
+
+#define S_TP_TX_MODQ_WGHT1 8
+#define M_TP_TX_MODQ_WGHT1 0xff
+#define V_TP_TX_MODQ_WGHT1(x) ((x) << S_TP_TX_MODQ_WGHT1)
+#define G_TP_TX_MODQ_WGHT1(x) (((x) >> S_TP_TX_MODQ_WGHT1) & M_TP_TX_MODQ_WGHT1)
+
+#define S_TP_TX_MODQ_WGHT0 0
+#define M_TP_TX_MODQ_WGHT0 0xff
+#define V_TP_TX_MODQ_WGHT0(x) ((x) << S_TP_TX_MODQ_WGHT0)
+#define G_TP_TX_MODQ_WGHT0(x) (((x) >> S_TP_TX_MODQ_WGHT0) & M_TP_TX_MODQ_WGHT0)
+
+#define A_TP_MOD_CHANNEL_WEIGHT 0x434
+
+#define S_RX_MOD_CHANNEL_WEIGHT1 24
+#define M_RX_MOD_CHANNEL_WEIGHT1 0xff
+#define V_RX_MOD_CHANNEL_WEIGHT1(x) ((x) << S_RX_MOD_CHANNEL_WEIGHT1)
+#define G_RX_MOD_CHANNEL_WEIGHT1(x) (((x) >> S_RX_MOD_CHANNEL_WEIGHT1) & M_RX_MOD_CHANNEL_WEIGHT1)
+
+#define S_RX_MOD_CHANNEL_WEIGHT0 16
+#define M_RX_MOD_CHANNEL_WEIGHT0 0xff
+#define V_RX_MOD_CHANNEL_WEIGHT0(x) ((x) << S_RX_MOD_CHANNEL_WEIGHT0)
+#define G_RX_MOD_CHANNEL_WEIGHT0(x) (((x) >> S_RX_MOD_CHANNEL_WEIGHT0) & M_RX_MOD_CHANNEL_WEIGHT0)
+
+#define S_TX_MOD_CHANNEL_WEIGHT1 8
+#define M_TX_MOD_CHANNEL_WEIGHT1 0xff
+#define V_TX_MOD_CHANNEL_WEIGHT1(x) ((x) << S_TX_MOD_CHANNEL_WEIGHT1)
+#define G_TX_MOD_CHANNEL_WEIGHT1(x) (((x) >> S_TX_MOD_CHANNEL_WEIGHT1) & M_TX_MOD_CHANNEL_WEIGHT1)
+
+#define S_TX_MOD_CHANNEL_WEIGHT0 0
+#define M_TX_MOD_CHANNEL_WEIGHT0 0xff
+#define V_TX_MOD_CHANNEL_WEIGHT0(x) ((x) << S_TX_MOD_CHANNEL_WEIGHT0)
+#define G_TX_MOD_CHANNEL_WEIGHT0(x) (((x) >> S_TX_MOD_CHANNEL_WEIGHT0) & M_TX_MOD_CHANNEL_WEIGHT0)
+
+#define A_TP_MOD_RATE_LIMIT 0x438
+
+#define S_RX_MOD_RATE_LIMIT_INC 24
+#define M_RX_MOD_RATE_LIMIT_INC 0xff
+#define V_RX_MOD_RATE_LIMIT_INC(x) ((x) << S_RX_MOD_RATE_LIMIT_INC)
+#define G_RX_MOD_RATE_LIMIT_INC(x) (((x) >> S_RX_MOD_RATE_LIMIT_INC) & M_RX_MOD_RATE_LIMIT_INC)
+
+#define S_RX_MOD_RATE_LIMIT_TICK 16
+#define M_RX_MOD_RATE_LIMIT_TICK 0xff
+#define V_RX_MOD_RATE_LIMIT_TICK(x) ((x) << S_RX_MOD_RATE_LIMIT_TICK)
+#define G_RX_MOD_RATE_LIMIT_TICK(x) (((x) >> S_RX_MOD_RATE_LIMIT_TICK) & M_RX_MOD_RATE_LIMIT_TICK)
+
+#define S_TX_MOD_RATE_LIMIT_INC 8
+#define M_TX_MOD_RATE_LIMIT_INC 0xff
+#define V_TX_MOD_RATE_LIMIT_INC(x) ((x) << S_TX_MOD_RATE_LIMIT_INC)
+#define G_TX_MOD_RATE_LIMIT_INC(x) (((x) >> S_TX_MOD_RATE_LIMIT_INC) & M_TX_MOD_RATE_LIMIT_INC)
+
+#define S_TX_MOD_RATE_LIMIT_TICK 0
+#define M_TX_MOD_RATE_LIMIT_TICK 0xff
+#define V_TX_MOD_RATE_LIMIT_TICK(x) ((x) << S_TX_MOD_RATE_LIMIT_TICK)
+#define G_TX_MOD_RATE_LIMIT_TICK(x) (((x) >> S_TX_MOD_RATE_LIMIT_TICK) & M_TX_MOD_RATE_LIMIT_TICK)
+
+#define A_TP_PIO_ADDR 0x440
+#define A_TP_PIO_DATA 0x444
+#define A_TP_RESET 0x44c
+
+#define S_FLSTINITENABLE 1
+#define V_FLSTINITENABLE(x) ((x) << S_FLSTINITENABLE)
+#define F_FLSTINITENABLE V_FLSTINITENABLE(1U)
+
+#define S_TPRESET 0
+#define V_TPRESET(x) ((x) << S_TPRESET)
+#define F_TPRESET V_TPRESET(1U)
+
+#define A_TP_MIB_INDEX 0x450
+#define A_TP_MIB_RDATA 0x454
+#define A_TP_SYNC_TIME_HI 0x458
+#define A_TP_SYNC_TIME_LO 0x45c
+#define A_TP_CMM_MM_RX_FLST_BASE 0x460
+
+#define S_CMRXFLSTBASE 0
+#define M_CMRXFLSTBASE 0xfffffff
+#define V_CMRXFLSTBASE(x) ((x) << S_CMRXFLSTBASE)
+#define G_CMRXFLSTBASE(x) (((x) >> S_CMRXFLSTBASE) & M_CMRXFLSTBASE)
+
+#define A_TP_CMM_MM_TX_FLST_BASE 0x464
+
+#define S_CMTXFLSTBASE 0
+#define M_CMTXFLSTBASE 0xfffffff
+#define V_CMTXFLSTBASE(x) ((x) << S_CMTXFLSTBASE)
+#define G_CMTXFLSTBASE(x) (((x) >> S_CMTXFLSTBASE) & M_CMTXFLSTBASE)
+
+#define A_TP_CMM_MM_PS_FLST_BASE 0x468
+
+#define S_CMPSFLSTBASE 0
+#define M_CMPSFLSTBASE 0xfffffff
+#define V_CMPSFLSTBASE(x) ((x) << S_CMPSFLSTBASE)
+#define G_CMPSFLSTBASE(x) (((x) >> S_CMPSFLSTBASE) & M_CMPSFLSTBASE)
+
+#define A_TP_CMM_MM_MAX_PSTRUCT 0x46c
+
+#define S_CMMAXPSTRUCT 0
+#define M_CMMAXPSTRUCT 0x1fffff
+#define V_CMMAXPSTRUCT(x) ((x) << S_CMMAXPSTRUCT)
+#define G_CMMAXPSTRUCT(x) (((x) >> S_CMMAXPSTRUCT) & M_CMMAXPSTRUCT)
+
+#define A_TP_INT_ENABLE 0x470
+#define A_TP_INT_CAUSE 0x474
+#define A_TP_FLM_FREE_PS_CNT 0x480
+
+#define S_FREEPSTRUCTCOUNT 0
+#define M_FREEPSTRUCTCOUNT 0x1fffff
+#define V_FREEPSTRUCTCOUNT(x) ((x) << S_FREEPSTRUCTCOUNT)
+#define G_FREEPSTRUCTCOUNT(x) (((x) >> S_FREEPSTRUCTCOUNT) & M_FREEPSTRUCTCOUNT)
+
+#define A_TP_FLM_FREE_RX_CNT 0x484
+
+#define S_FREERXPAGECOUNT 0
+#define M_FREERXPAGECOUNT 0x1fffff
+#define V_FREERXPAGECOUNT(x) ((x) << S_FREERXPAGECOUNT)
+#define G_FREERXPAGECOUNT(x) (((x) >> S_FREERXPAGECOUNT) & M_FREERXPAGECOUNT)
+
+#define A_TP_FLM_FREE_TX_CNT 0x488
+
+#define S_FREETXPAGECOUNT 0
+#define M_FREETXPAGECOUNT 0x1fffff
+#define V_FREETXPAGECOUNT(x) ((x) << S_FREETXPAGECOUNT)
+#define G_FREETXPAGECOUNT(x) (((x) >> S_FREETXPAGECOUNT) & M_FREETXPAGECOUNT)
+
+#define A_TP_TM_HEAP_PUSH_CNT 0x48c
+#define A_TP_TM_HEAP_POP_CNT 0x490
+#define A_TP_TM_DACK_PUSH_CNT 0x494
+#define A_TP_TM_DACK_POP_CNT 0x498
+#define A_TP_TM_MOD_PUSH_CNT 0x49c
+#define A_TP_MOD_POP_CNT 0x4a0
+#define A_TP_TIMER_SEPARATOR 0x4a4
+#define A_TP_DEBUG_SEL 0x4a8
+#define A_TP_DEBUG_FLAGS 0x4ac
+
+#define S_RXDEBUGFLAGS 16
+#define M_RXDEBUGFLAGS 0xffff
+#define V_RXDEBUGFLAGS(x) ((x) << S_RXDEBUGFLAGS)
+#define G_RXDEBUGFLAGS(x) (((x) >> S_RXDEBUGFLAGS) & M_RXDEBUGFLAGS)
+
+#define S_TXDEBUGFLAGS 0
+#define M_TXDEBUGFLAGS 0xffff
+#define V_TXDEBUGFLAGS(x) ((x) << S_TXDEBUGFLAGS)
+#define G_TXDEBUGFLAGS(x) (((x) >> S_TXDEBUGFLAGS) & M_TXDEBUGFLAGS)
+
+#define S_RXTIMERDACKFIRST 26
+#define V_RXTIMERDACKFIRST(x) ((x) << S_RXTIMERDACKFIRST)
+#define F_RXTIMERDACKFIRST V_RXTIMERDACKFIRST(1U)
+
+#define S_RXTIMERDACK 25
+#define V_RXTIMERDACK(x) ((x) << S_RXTIMERDACK)
+#define F_RXTIMERDACK V_RXTIMERDACK(1U)
+
+#define S_RXTIMERHEARTBEAT 24
+#define V_RXTIMERHEARTBEAT(x) ((x) << S_RXTIMERHEARTBEAT)
+#define F_RXTIMERHEARTBEAT V_RXTIMERHEARTBEAT(1U)
+
+#define S_RXPAWSDROP 23
+#define V_RXPAWSDROP(x) ((x) << S_RXPAWSDROP)
+#define F_RXPAWSDROP V_RXPAWSDROP(1U)
+
+#define S_RXURGDATADROP 22
+#define V_RXURGDATADROP(x) ((x) << S_RXURGDATADROP)
+#define F_RXURGDATADROP V_RXURGDATADROP(1U)
+
+#define S_RXFUTUREDATA 21
+#define V_RXFUTUREDATA(x) ((x) << S_RXFUTUREDATA)
+#define F_RXFUTUREDATA V_RXFUTUREDATA(1U)
+
+#define S_RXRCVRXMDATA 20
+#define V_RXRCVRXMDATA(x) ((x) << S_RXRCVRXMDATA)
+#define F_RXRCVRXMDATA V_RXRCVRXMDATA(1U)
+
+#define S_RXRCVOOODATAFIN 19
+#define V_RXRCVOOODATAFIN(x) ((x) << S_RXRCVOOODATAFIN)
+#define F_RXRCVOOODATAFIN V_RXRCVOOODATAFIN(1U)
+
+#define S_RXRCVOOODATA 18
+#define V_RXRCVOOODATA(x) ((x) << S_RXRCVOOODATA)
+#define F_RXRCVOOODATA V_RXRCVOOODATA(1U)
+
+#define S_RXRCVWNDZERO 17
+#define V_RXRCVWNDZERO(x) ((x) << S_RXRCVWNDZERO)
+#define F_RXRCVWNDZERO V_RXRCVWNDZERO(1U)
+
+#define S_RXRCVWNDLTMSS 16
+#define V_RXRCVWNDLTMSS(x) ((x) << S_RXRCVWNDLTMSS)
+#define F_RXRCVWNDLTMSS V_RXRCVWNDLTMSS(1U)
+
+#define S_TXDUPACKINC 11
+#define V_TXDUPACKINC(x) ((x) << S_TXDUPACKINC)
+#define F_TXDUPACKINC V_TXDUPACKINC(1U)
+
+#define S_TXRXMURG 10
+#define V_TXRXMURG(x) ((x) << S_TXRXMURG)
+#define F_TXRXMURG V_TXRXMURG(1U)
+
+#define S_TXRXMFIN 9
+#define V_TXRXMFIN(x) ((x) << S_TXRXMFIN)
+#define F_TXRXMFIN V_TXRXMFIN(1U)
+
+#define S_TXRXMSYN 8
+#define V_TXRXMSYN(x) ((x) << S_TXRXMSYN)
+#define F_TXRXMSYN V_TXRXMSYN(1U)
+
+#define S_TXRXMNEWRENO 7
+#define V_TXRXMNEWRENO(x) ((x) << S_TXRXMNEWRENO)
+#define F_TXRXMNEWRENO V_TXRXMNEWRENO(1U)
+
+#define S_TXRXMFAST 6
+#define V_TXRXMFAST(x) ((x) << S_TXRXMFAST)
+#define F_TXRXMFAST V_TXRXMFAST(1U)
+
+#define S_TXRXMTIMER 5
+#define V_TXRXMTIMER(x) ((x) << S_TXRXMTIMER)
+#define F_TXRXMTIMER V_TXRXMTIMER(1U)
+
+#define S_TXRXMTIMERKEEPALIVE 4
+#define V_TXRXMTIMERKEEPALIVE(x) ((x) << S_TXRXMTIMERKEEPALIVE)
+#define F_TXRXMTIMERKEEPALIVE V_TXRXMTIMERKEEPALIVE(1U)
+
+#define S_TXRXMTIMERPERSIST 3
+#define V_TXRXMTIMERPERSIST(x) ((x) << S_TXRXMTIMERPERSIST)
+#define F_TXRXMTIMERPERSIST V_TXRXMTIMERPERSIST(1U)
+
+#define S_TXRCVADVSHRUNK 2
+#define V_TXRCVADVSHRUNK(x) ((x) << S_TXRCVADVSHRUNK)
+#define F_TXRCVADVSHRUNK V_TXRCVADVSHRUNK(1U)
+
+#define S_TXRCVADVZERO 1
+#define V_TXRCVADVZERO(x) ((x) << S_TXRCVADVZERO)
+#define F_TXRCVADVZERO V_TXRCVADVZERO(1U)
+
+#define S_TXRCVADVLTMSS 0
+#define V_TXRCVADVLTMSS(x) ((x) << S_TXRCVADVLTMSS)
+#define F_TXRCVADVLTMSS V_TXRCVADVLTMSS(1U)
+
+#define A_TP_CM_FLOW_CNTL_MODE 0x4b0
+
+#define S_CMFLOWCACHEDISABLE 0
+#define V_CMFLOWCACHEDISABLE(x) ((x) << S_CMFLOWCACHEDISABLE)
+#define F_CMFLOWCACHEDISABLE V_CMFLOWCACHEDISABLE(1U)
+
+#define A_TP_PROXY_FLOW_CNTL 0x4b0
+#define A_TP_PC_CONGESTION_CNTL 0x4b4
+
+#define S_EDROPTUNNEL 19
+#define V_EDROPTUNNEL(x) ((x) << S_EDROPTUNNEL)
+#define F_EDROPTUNNEL V_EDROPTUNNEL(1U)
+
+#define S_CDROPTUNNEL 18
+#define V_CDROPTUNNEL(x) ((x) << S_CDROPTUNNEL)
+#define F_CDROPTUNNEL V_CDROPTUNNEL(1U)
+
+#define S_ETHRESHOLD 12
+#define M_ETHRESHOLD 0x3f
+#define V_ETHRESHOLD(x) ((x) << S_ETHRESHOLD)
+#define G_ETHRESHOLD(x) (((x) >> S_ETHRESHOLD) & M_ETHRESHOLD)
+
+#define S_CTHRESHOLD 6
+#define M_CTHRESHOLD 0x3f
+#define V_CTHRESHOLD(x) ((x) << S_CTHRESHOLD)
+#define G_CTHRESHOLD(x) (((x) >> S_CTHRESHOLD) & M_CTHRESHOLD)
+
+#define S_TXTHRESHOLD 0
+#define M_TXTHRESHOLD 0x3f
+#define V_TXTHRESHOLD(x) ((x) << S_TXTHRESHOLD)
+#define G_TXTHRESHOLD(x) (((x) >> S_TXTHRESHOLD) & M_TXTHRESHOLD)
+
+#define A_TP_TX_DROP_COUNT 0x4bc
+#define A_TP_CLEAR_DEBUG 0x4c0
+
+#define S_CLRDEBUG 0
+#define V_CLRDEBUG(x) ((x) << S_CLRDEBUG)
+#define F_CLRDEBUG V_CLRDEBUG(1U)
+
+#define A_TP_DEBUG_VEC 0x4c4
+#define A_TP_DEBUG_VEC2 0x4c8
+#define A_TP_DEBUG_REG_SEL 0x4cc
+#define A_TP_DEBUG 0x4d0
+#define A_TP_DBG_LA_CONFIG 0x4d4
+#define A_TP_DBG_LA_DATAH 0x4d8
+#define A_TP_DBG_LA_DATAL 0x4dc
+#define A_TP_EMBED_OP_FIELD0 0x4e8
+#define A_TP_EMBED_OP_FIELD1 0x4ec
+#define A_TP_EMBED_OP_FIELD2 0x4f0
+#define A_TP_EMBED_OP_FIELD3 0x4f4
+#define A_TP_EMBED_OP_FIELD4 0x4f8
+#define A_TP_EMBED_OP_FIELD5 0x4fc
+#define A_TP_TX_MOD_Q7_Q6_TIMER_SEPARATOR 0x0
+#define A_TP_TX_MOD_Q5_Q4_TIMER_SEPARATOR 0x1
+#define A_TP_TX_MOD_Q3_Q2_TIMER_SEPARATOR 0x2
+#define A_TP_TX_MOD_Q1_Q0_TIMER_SEPARATOR 0x3
+#define A_TP_RX_MOD_Q1_Q0_TIMER_SEPARATOR 0x4
+#define A_TP_TX_MOD_Q7_Q6_RATE_LIMIT 0x5
+#define A_TP_TX_MOD_Q5_Q4_RATE_LIMIT 0x6
+#define A_TP_TX_MOD_Q3_Q2_RATE_LIMIT 0x7
+#define A_TP_TX_MOD_Q1_Q0_RATE_LIMIT 0x8
+#define A_TP_RX_MOD_Q1_Q0_RATE_LIMIT 0x9
+#define A_TP_TX_TRC_KEY0 0x20
+#define A_TP_TX_TRC_MASK0 0x21
+#define A_TP_TX_TRC_KEY1 0x22
+#define A_TP_TX_TRC_MASK1 0x23
+#define A_TP_TX_TRC_KEY2 0x24
+#define A_TP_TX_TRC_MASK2 0x25
+#define A_TP_TX_TRC_KEY3 0x26
+#define A_TP_TX_TRC_MASK3 0x27
+#define A_TP_IPMI_CFG1 0x28
+
+#define S_VLANENABLE 31
+#define V_VLANENABLE(x) ((x) << S_VLANENABLE)
+#define F_VLANENABLE V_VLANENABLE(1U)
+
+#define S_PRIMARYPORTENABLE 30
+#define V_PRIMARYPORTENABLE(x) ((x) << S_PRIMARYPORTENABLE)
+#define F_PRIMARYPORTENABLE V_PRIMARYPORTENABLE(1U)
+
+#define S_SECUREPORTENABLE 29
+#define V_SECUREPORTENABLE(x) ((x) << S_SECUREPORTENABLE)
+#define F_SECUREPORTENABLE V_SECUREPORTENABLE(1U)
+
+#define S_ARPENABLE 28
+#define V_ARPENABLE(x) ((x) << S_ARPENABLE)
+#define F_ARPENABLE V_ARPENABLE(1U)
+
+#define S_VLAN 0
+#define M_VLAN 0xffff
+#define V_VLAN(x) ((x) << S_VLAN)
+#define G_VLAN(x) (((x) >> S_VLAN) & M_VLAN)
+
+#define A_TP_IPMI_CFG2 0x29
+
+#define S_SECUREPORT 16
+#define M_SECUREPORT 0xffff
+#define V_SECUREPORT(x) ((x) << S_SECUREPORT)
+#define G_SECUREPORT(x) (((x) >> S_SECUREPORT) & M_SECUREPORT)
+
+#define S_PRIMARYPORT 0
+#define M_PRIMARYPORT 0xffff
+#define V_PRIMARYPORT(x) ((x) << S_PRIMARYPORT)
+#define G_PRIMARYPORT(x) (((x) >> S_PRIMARYPORT) & M_PRIMARYPORT)
+
+#define A_TP_RX_TRC_KEY0 0x120
+#define A_TP_RX_TRC_MASK0 0x121
+#define A_TP_RX_TRC_KEY1 0x122
+#define A_TP_RX_TRC_MASK1 0x123
+#define A_TP_RX_TRC_KEY2 0x124
+#define A_TP_RX_TRC_MASK2 0x125
+#define A_TP_RX_TRC_KEY3 0x126
+#define A_TP_RX_TRC_MASK3 0x127
+#define A_TP_QOS_RX_TOS_MAP_H 0x128
+#define A_TP_QOS_RX_TOS_MAP_L 0x129
+#define A_TP_QOS_RX_MAP_MODE 0x12a
+
+#define S_DEFAULTCH 11
+#define V_DEFAULTCH(x) ((x) << S_DEFAULTCH)
+#define F_DEFAULTCH V_DEFAULTCH(1U)
+
+#define S_RXMAPMODE 8
+#define M_RXMAPMODE 0x7
+#define V_RXMAPMODE(x) ((x) << S_RXMAPMODE)
+#define G_RXMAPMODE(x) (((x) >> S_RXMAPMODE) & M_RXMAPMODE)
+
+#define S_RXVLANMAP 7
+#define V_RXVLANMAP(x) ((x) << S_RXVLANMAP)
+#define F_RXVLANMAP V_RXVLANMAP(1U)
+
+#define A_TP_TX_DROP_CFG_CH0 0x12b
+
+#define S_TIMERENABLED 31
+#define V_TIMERENABLED(x) ((x) << S_TIMERENABLED)
+#define F_TIMERENABLED V_TIMERENABLED(1U)
+
+#define S_TIMERERRORENABLE 30
+#define V_TIMERERRORENABLE(x) ((x) << S_TIMERERRORENABLE)
+#define F_TIMERERRORENABLE V_TIMERERRORENABLE(1U)
+
+#define S_TIMERTHRESHOLD 4
+#define M_TIMERTHRESHOLD 0x3ffffff
+#define V_TIMERTHRESHOLD(x) ((x) << S_TIMERTHRESHOLD)
+#define G_TIMERTHRESHOLD(x) (((x) >> S_TIMERTHRESHOLD) & M_TIMERTHRESHOLD)
+
+#define S_PACKETDROPS 0
+#define M_PACKETDROPS 0xf
+#define V_PACKETDROPS(x) ((x) << S_PACKETDROPS)
+#define G_PACKETDROPS(x) (((x) >> S_PACKETDROPS) & M_PACKETDROPS)
+
+#define A_TP_TX_DROP_CFG_CH1 0x12c
+#define A_TP_TX_DROP_CNT_CH0 0x12d
+
+#define S_TXDROPCNTCH0SENT 16
+#define M_TXDROPCNTCH0SENT 0xffff
+#define V_TXDROPCNTCH0SENT(x) ((x) << S_TXDROPCNTCH0SENT)
+#define G_TXDROPCNTCH0SENT(x) (((x) >> S_TXDROPCNTCH0SENT) & M_TXDROPCNTCH0SENT)
+
+#define S_TXDROPCNTCH0RCVD 0
+#define M_TXDROPCNTCH0RCVD 0xffff
+#define V_TXDROPCNTCH0RCVD(x) ((x) << S_TXDROPCNTCH0RCVD)
+#define G_TXDROPCNTCH0RCVD(x) (((x) >> S_TXDROPCNTCH0RCVD) & M_TXDROPCNTCH0RCVD)
+
+#define A_TP_TX_DROP_CNT_CH1 0x12e
+
+#define S_TXDROPCNTCH1SENT 16
+#define M_TXDROPCNTCH1SENT 0xffff
+#define V_TXDROPCNTCH1SENT(x) ((x) << S_TXDROPCNTCH1SENT)
+#define G_TXDROPCNTCH1SENT(x) (((x) >> S_TXDROPCNTCH1SENT) & M_TXDROPCNTCH1SENT)
+
+#define S_TXDROPCNTCH1RCVD 0
+#define M_TXDROPCNTCH1RCVD 0xffff
+#define V_TXDROPCNTCH1RCVD(x) ((x) << S_TXDROPCNTCH1RCVD)
+#define G_TXDROPCNTCH1RCVD(x) (((x) >> S_TXDROPCNTCH1RCVD) & M_TXDROPCNTCH1RCVD)
+
+#define A_TP_TX_DROP_MODE 0x12f
+
+#define S_TXDROPMODECH1 1
+#define V_TXDROPMODECH1(x) ((x) << S_TXDROPMODECH1)
+#define F_TXDROPMODECH1 V_TXDROPMODECH1(1U)
+
+#define S_TXDROPMODECH0 0
+#define V_TXDROPMODECH0(x) ((x) << S_TXDROPMODECH0)
+#define F_TXDROPMODECH0 V_TXDROPMODECH0(1U)
+
+#define A_TP_VLAN_PRI_MAP 0x137
+
+#define S_VLANPRIMAP7 14
+#define M_VLANPRIMAP7 0x3
+#define V_VLANPRIMAP7(x) ((x) << S_VLANPRIMAP7)
+#define G_VLANPRIMAP7(x) (((x) >> S_VLANPRIMAP7) & M_VLANPRIMAP7)
+
+#define S_VLANPRIMAP6 12
+#define M_VLANPRIMAP6 0x3
+#define V_VLANPRIMAP6(x) ((x) << S_VLANPRIMAP6)
+#define G_VLANPRIMAP6(x) (((x) >> S_VLANPRIMAP6) & M_VLANPRIMAP6)
+
+#define S_VLANPRIMAP5 10
+#define M_VLANPRIMAP5 0x3
+#define V_VLANPRIMAP5(x) ((x) << S_VLANPRIMAP5)
+#define G_VLANPRIMAP5(x) (((x) >> S_VLANPRIMAP5) & M_VLANPRIMAP5)
+
+#define S_VLANPRIMAP4 8
+#define M_VLANPRIMAP4 0x3
+#define V_VLANPRIMAP4(x) ((x) << S_VLANPRIMAP4)
+#define G_VLANPRIMAP4(x) (((x) >> S_VLANPRIMAP4) & M_VLANPRIMAP4)
+
+#define S_VLANPRIMAP3 6
+#define M_VLANPRIMAP3 0x3
+#define V_VLANPRIMAP3(x) ((x) << S_VLANPRIMAP3)
+#define G_VLANPRIMAP3(x) (((x) >> S_VLANPRIMAP3) & M_VLANPRIMAP3)
+
+#define S_VLANPRIMAP2 4
+#define M_VLANPRIMAP2 0x3
+#define V_VLANPRIMAP2(x) ((x) << S_VLANPRIMAP2)
+#define G_VLANPRIMAP2(x) (((x) >> S_VLANPRIMAP2) & M_VLANPRIMAP2)
+
+#define S_VLANPRIMAP1 2
+#define M_VLANPRIMAP1 0x3
+#define V_VLANPRIMAP1(x) ((x) << S_VLANPRIMAP1)
+#define G_VLANPRIMAP1(x) (((x) >> S_VLANPRIMAP1) & M_VLANPRIMAP1)
+
+#define S_VLANPRIMAP0 0
+#define M_VLANPRIMAP0 0x3
+#define V_VLANPRIMAP0(x) ((x) << S_VLANPRIMAP0)
+#define G_VLANPRIMAP0(x) (((x) >> S_VLANPRIMAP0) & M_VLANPRIMAP0)
+
+#define A_TP_MAC_MATCH_MAP0 0x138
+
+#define S_MACMATCHMAP7 21
+#define M_MACMATCHMAP7 0x7
+#define V_MACMATCHMAP7(x) ((x) << S_MACMATCHMAP7)
+#define G_MACMATCHMAP7(x) (((x) >> S_MACMATCHMAP7) & M_MACMATCHMAP7)
+
+#define S_MACMATCHMAP6 18
+#define M_MACMATCHMAP6 0x7
+#define V_MACMATCHMAP6(x) ((x) << S_MACMATCHMAP6)
+#define G_MACMATCHMAP6(x) (((x) >> S_MACMATCHMAP6) & M_MACMATCHMAP6)
+
+#define S_MACMATCHMAP5 15
+#define M_MACMATCHMAP5 0x7
+#define V_MACMATCHMAP5(x) ((x) << S_MACMATCHMAP5)
+#define G_MACMATCHMAP5(x) (((x) >> S_MACMATCHMAP5) & M_MACMATCHMAP5)
+
+#define S_MACMATCHMAP4 12
+#define M_MACMATCHMAP4 0x7
+#define V_MACMATCHMAP4(x) ((x) << S_MACMATCHMAP4)
+#define G_MACMATCHMAP4(x) (((x) >> S_MACMATCHMAP4) & M_MACMATCHMAP4)
+
+#define S_MACMATCHMAP3 9
+#define M_MACMATCHMAP3 0x7
+#define V_MACMATCHMAP3(x) ((x) << S_MACMATCHMAP3)
+#define G_MACMATCHMAP3(x) (((x) >> S_MACMATCHMAP3) & M_MACMATCHMAP3)
+
+#define S_MACMATCHMAP2 6
+#define M_MACMATCHMAP2 0x7
+#define V_MACMATCHMAP2(x) ((x) << S_MACMATCHMAP2)
+#define G_MACMATCHMAP2(x) (((x) >> S_MACMATCHMAP2) & M_MACMATCHMAP2)
+
+#define S_MACMATCHMAP1 3
+#define M_MACMATCHMAP1 0x7
+#define V_MACMATCHMAP1(x) ((x) << S_MACMATCHMAP1)
+#define G_MACMATCHMAP1(x) (((x) >> S_MACMATCHMAP1) & M_MACMATCHMAP1)
+
+#define S_MACMATCHMAP0 0
+#define M_MACMATCHMAP0 0x7
+#define V_MACMATCHMAP0(x) ((x) << S_MACMATCHMAP0)
+#define G_MACMATCHMAP0(x) (((x) >> S_MACMATCHMAP0) & M_MACMATCHMAP0)
+
+#define A_TP_MAC_MATCH_MAP1 0x139
+#define A_TP_INGRESS_CONFIG 0x141
+
+#define S_LOOKUPEVERYPKT 28
+#define V_LOOKUPEVERYPKT(x) ((x) << S_LOOKUPEVERYPKT)
+#define F_LOOKUPEVERYPKT V_LOOKUPEVERYPKT(1U)
+
+#define S_ENABLEINSERTIONSFD 27
+#define V_ENABLEINSERTIONSFD(x) ((x) << S_ENABLEINSERTIONSFD)
+#define F_ENABLEINSERTIONSFD V_ENABLEINSERTIONSFD(1U)
+
+#define S_ENABLEINSERTION 26
+#define V_ENABLEINSERTION(x) ((x) << S_ENABLEINSERTION)
+#define F_ENABLEINSERTION V_ENABLEINSERTION(1U)
+
+#define S_ENABLEEXTRACTIONSFD 25
+#define V_ENABLEEXTRACTIONSFD(x) ((x) << S_ENABLEEXTRACTIONSFD)
+#define F_ENABLEEXTRACTIONSFD V_ENABLEEXTRACTIONSFD(1U)
+
+#define S_ENABLEEXTRACT 24
+#define V_ENABLEEXTRACT(x) ((x) << S_ENABLEEXTRACT)
+#define F_ENABLEEXTRACT V_ENABLEEXTRACT(1U)
+
+#define S_BITPOS3 18
+#define M_BITPOS3 0x3f
+#define V_BITPOS3(x) ((x) << S_BITPOS3)
+#define G_BITPOS3(x) (((x) >> S_BITPOS3) & M_BITPOS3)
+
+#define S_BITPOS2 12
+#define M_BITPOS2 0x3f
+#define V_BITPOS2(x) ((x) << S_BITPOS2)
+#define G_BITPOS2(x) (((x) >> S_BITPOS2) & M_BITPOS2)
+
+#define S_BITPOS1 6
+#define M_BITPOS1 0x3f
+#define V_BITPOS1(x) ((x) << S_BITPOS1)
+#define G_BITPOS1(x) (((x) >> S_BITPOS1) & M_BITPOS1)
+
+#define S_BITPOS0 0
+#define M_BITPOS0 0x3f
+#define V_BITPOS0(x) ((x) << S_BITPOS0)
+#define G_BITPOS0(x) (((x) >> S_BITPOS0) & M_BITPOS0)
+
+#define A_TP_PREAMBLE_MSB 0x142
+#define A_TP_PREAMBLE_LSB 0x143
+#define A_TP_EGRESS_CONFIG 0x145
+
+#define S_REWRITEFORCETOSIZE 0
+#define V_REWRITEFORCETOSIZE(x) ((x) << S_REWRITEFORCETOSIZE)
+#define F_REWRITEFORCETOSIZE V_REWRITEFORCETOSIZE(1U)
+
+#define A_TP_INTF_FROM_TX_PKT 0x244
+
+#define S_INTFFROMTXPKT 0
+#define V_INTFFROMTXPKT(x) ((x) << S_INTFFROMTXPKT)
+#define F_INTFFROMTXPKT V_INTFFROMTXPKT(1U)
+
+#define A_TP_FIFO_CONFIG 0x8c0
+
+#define S_RXFIFOCONFIG 10
+#define M_RXFIFOCONFIG 0x3f
+#define V_RXFIFOCONFIG(x) ((x) << S_RXFIFOCONFIG)
+#define G_RXFIFOCONFIG(x) (((x) >> S_RXFIFOCONFIG) & M_RXFIFOCONFIG)
+
+#define S_TXFIFOCONFIG 2
+#define M_TXFIFOCONFIG 0x3f
+#define V_TXFIFOCONFIG(x) ((x) << S_TXFIFOCONFIG)
+#define G_TXFIFOCONFIG(x) (((x) >> S_TXFIFOCONFIG) & M_TXFIFOCONFIG)
+
+/* registers for module ULP2_RX */
+#define ULP2_RX_BASE_ADDR 0x500
+
+#define A_ULPRX_CTL 0x500
+
+#define S_PCMD1THRESHOLD 24
+#define M_PCMD1THRESHOLD 0xff
+#define V_PCMD1THRESHOLD(x) ((x) << S_PCMD1THRESHOLD)
+#define G_PCMD1THRESHOLD(x) (((x) >> S_PCMD1THRESHOLD) & M_PCMD1THRESHOLD)
+
+#define S_PCMD0THRESHOLD 16
+#define M_PCMD0THRESHOLD 0xff
+#define V_PCMD0THRESHOLD(x) ((x) << S_PCMD0THRESHOLD)
+#define G_PCMD0THRESHOLD(x) (((x) >> S_PCMD0THRESHOLD) & M_PCMD0THRESHOLD)
+
+#define S_ROUND_ROBIN 4
+#define V_ROUND_ROBIN(x) ((x) << S_ROUND_ROBIN)
+#define F_ROUND_ROBIN V_ROUND_ROBIN(1U)
+
+#define S_RDMA_PERMISSIVE_MODE 3
+#define V_RDMA_PERMISSIVE_MODE(x) ((x) << S_RDMA_PERMISSIVE_MODE)
+#define F_RDMA_PERMISSIVE_MODE V_RDMA_PERMISSIVE_MODE(1U)
+
+#define S_PAGEPODME 2
+#define V_PAGEPODME(x) ((x) << S_PAGEPODME)
+#define F_PAGEPODME V_PAGEPODME(1U)
+
+#define S_ISCSITAGTCB 1
+#define V_ISCSITAGTCB(x) ((x) << S_ISCSITAGTCB)
+#define F_ISCSITAGTCB V_ISCSITAGTCB(1U)
+
+#define S_TDDPTAGTCB 0
+#define V_TDDPTAGTCB(x) ((x) << S_TDDPTAGTCB)
+#define F_TDDPTAGTCB V_TDDPTAGTCB(1U)
+
+#define A_ULPRX_INT_ENABLE 0x504
+
+#define S_PARERR 0
+#define V_PARERR(x) ((x) << S_PARERR)
+#define F_PARERR V_PARERR(1U)
+
+#define A_ULPRX_INT_CAUSE 0x508
+#define A_ULPRX_ISCSI_LLIMIT 0x50c
+
+#define S_ISCSILLIMIT 6
+#define M_ISCSILLIMIT 0x3ffffff
+#define V_ISCSILLIMIT(x) ((x) << S_ISCSILLIMIT)
+#define G_ISCSILLIMIT(x) (((x) >> S_ISCSILLIMIT) & M_ISCSILLIMIT)
+
+#define A_ULPRX_ISCSI_ULIMIT 0x510
+
+#define S_ISCSIULIMIT 6
+#define M_ISCSIULIMIT 0x3ffffff
+#define V_ISCSIULIMIT(x) ((x) << S_ISCSIULIMIT)
+#define G_ISCSIULIMIT(x) (((x) >> S_ISCSIULIMIT) & M_ISCSIULIMIT)
+
+#define A_ULPRX_ISCSI_TAGMASK 0x514
+
+#define S_ISCSITAGMASK 6
+#define M_ISCSITAGMASK 0x3ffffff
+#define V_ISCSITAGMASK(x) ((x) << S_ISCSITAGMASK)
+#define G_ISCSITAGMASK(x) (((x) >> S_ISCSITAGMASK) & M_ISCSITAGMASK)
+
+#define A_ULPRX_ISCSI_PSZ 0x518
+
+#define S_HPZ3 24
+#define M_HPZ3 0xf
+#define V_HPZ3(x) ((x) << S_HPZ3)
+#define G_HPZ3(x) (((x) >> S_HPZ3) & M_HPZ3)
+
+#define S_HPZ2 16
+#define M_HPZ2 0xf
+#define V_HPZ2(x) ((x) << S_HPZ2)
+#define G_HPZ2(x) (((x) >> S_HPZ2) & M_HPZ2)
+
+#define S_HPZ1 8
+#define M_HPZ1 0xf
+#define V_HPZ1(x) ((x) << S_HPZ1)
+#define G_HPZ1(x) (((x) >> S_HPZ1) & M_HPZ1)
+
+#define S_HPZ0 0
+#define M_HPZ0 0xf
+#define V_HPZ0(x) ((x) << S_HPZ0)
+#define G_HPZ0(x) (((x) >> S_HPZ0) & M_HPZ0)
+
+#define A_ULPRX_TDDP_LLIMIT 0x51c
+
+#define S_TDDPLLIMIT 6
+#define M_TDDPLLIMIT 0x3ffffff
+#define V_TDDPLLIMIT(x) ((x) << S_TDDPLLIMIT)
+#define G_TDDPLLIMIT(x) (((x) >> S_TDDPLLIMIT) & M_TDDPLLIMIT)
+
+#define A_ULPRX_TDDP_ULIMIT 0x520
+
+#define S_TDDPULIMIT 6
+#define M_TDDPULIMIT 0x3ffffff
+#define V_TDDPULIMIT(x) ((x) << S_TDDPULIMIT)
+#define G_TDDPULIMIT(x) (((x) >> S_TDDPULIMIT) & M_TDDPULIMIT)
+
+#define A_ULPRX_TDDP_TAGMASK 0x524
+
+#define S_TDDPTAGMASK 6
+#define M_TDDPTAGMASK 0x3ffffff
+#define V_TDDPTAGMASK(x) ((x) << S_TDDPTAGMASK)
+#define G_TDDPTAGMASK(x) (((x) >> S_TDDPTAGMASK) & M_TDDPTAGMASK)
+
+#define A_ULPRX_TDDP_PSZ 0x528
+#define A_ULPRX_STAG_LLIMIT 0x52c
+#define A_ULPRX_STAG_ULIMIT 0x530
+#define A_ULPRX_RQ_LLIMIT 0x534
+#define A_ULPRX_RQ_ULIMIT 0x538
+#define A_ULPRX_PBL_LLIMIT 0x53c
+#define A_ULPRX_PBL_ULIMIT 0x540
+
+/* registers for module ULP2_TX */
+#define ULP2_TX_BASE_ADDR 0x580
+
+#define A_ULPTX_CONFIG 0x580
+
+#define S_CFG_RR_ARB 0
+#define V_CFG_RR_ARB(x) ((x) << S_CFG_RR_ARB)
+#define F_CFG_RR_ARB V_CFG_RR_ARB(1U)
+
+#define A_ULPTX_INT_ENABLE 0x584
+
+#define S_PBL_BOUND_ERR_CH1 1
+#define V_PBL_BOUND_ERR_CH1(x) ((x) << S_PBL_BOUND_ERR_CH1)
+#define F_PBL_BOUND_ERR_CH1 V_PBL_BOUND_ERR_CH1(1U)
+
+#define S_PBL_BOUND_ERR_CH0 0
+#define V_PBL_BOUND_ERR_CH0(x) ((x) << S_PBL_BOUND_ERR_CH0)
+#define F_PBL_BOUND_ERR_CH0 V_PBL_BOUND_ERR_CH0(1U)
+
+#define A_ULPTX_INT_CAUSE 0x588
+#define A_ULPTX_TPT_LLIMIT 0x58c
+#define A_ULPTX_TPT_ULIMIT 0x590
+#define A_ULPTX_PBL_LLIMIT 0x594
+#define A_ULPTX_PBL_ULIMIT 0x598
+#define A_ULPTX_CPL_ERR_OFFSET 0x59c
+#define A_ULPTX_CPL_ERR_MASK 0x5a0
+#define A_ULPTX_CPL_ERR_VALUE 0x5a4
+#define A_ULPTX_CPL_PACK_SIZE 0x5a8
+
+#define S_VALUE 24
+#define M_VALUE 0xff
+#define V_VALUE(x) ((x) << S_VALUE)
+#define G_VALUE(x) (((x) >> S_VALUE) & M_VALUE)
+
+#define S_CH1SIZE2 24
+#define M_CH1SIZE2 0xff
+#define V_CH1SIZE2(x) ((x) << S_CH1SIZE2)
+#define G_CH1SIZE2(x) (((x) >> S_CH1SIZE2) & M_CH1SIZE2)
+
+#define S_CH1SIZE1 16
+#define M_CH1SIZE1 0xff
+#define V_CH1SIZE1(x) ((x) << S_CH1SIZE1)
+#define G_CH1SIZE1(x) (((x) >> S_CH1SIZE1) & M_CH1SIZE1)
+
+#define S_CH0SIZE2 8
+#define M_CH0SIZE2 0xff
+#define V_CH0SIZE2(x) ((x) << S_CH0SIZE2)
+#define G_CH0SIZE2(x) (((x) >> S_CH0SIZE2) & M_CH0SIZE2)
+
+#define S_CH0SIZE1 0
+#define M_CH0SIZE1 0xff
+#define V_CH0SIZE1(x) ((x) << S_CH0SIZE1)
+#define G_CH0SIZE1(x) (((x) >> S_CH0SIZE1) & M_CH0SIZE1)
+
+#define A_ULPTX_DMA_WEIGHT 0x5ac
+
+#define S_D1_WEIGHT 16
+#define M_D1_WEIGHT 0xffff
+#define V_D1_WEIGHT(x) ((x) << S_D1_WEIGHT)
+#define G_D1_WEIGHT(x) (((x) >> S_D1_WEIGHT) & M_D1_WEIGHT)
+
+#define S_D0_WEIGHT 0
+#define M_D0_WEIGHT 0xffff
+#define V_D0_WEIGHT(x) ((x) << S_D0_WEIGHT)
+#define G_D0_WEIGHT(x) (((x) >> S_D0_WEIGHT) & M_D0_WEIGHT)
+
+/* registers for module PM1_RX */
+#define PM1_RX_BASE_ADDR 0x5c0
+
+#define A_PM1_RX_CFG 0x5c0
+#define A_PM1_RX_MODE 0x5c4
+
+#define S_STAT_CHANNEL 1
+#define V_STAT_CHANNEL(x) ((x) << S_STAT_CHANNEL)
+#define F_STAT_CHANNEL V_STAT_CHANNEL(1U)
+
+#define S_PRIORITY_CH 0
+#define V_PRIORITY_CH(x) ((x) << S_PRIORITY_CH)
+#define F_PRIORITY_CH V_PRIORITY_CH(1U)
+
+#define A_PM1_RX_STAT_CONFIG 0x5c8
+#define A_PM1_RX_STAT_COUNT 0x5cc
+#define A_PM1_RX_STAT_MSB 0x5d0
+#define A_PM1_RX_STAT_LSB 0x5d4
+#define A_PM1_RX_INT_ENABLE 0x5d8
+
+#define S_ZERO_E_CMD_ERROR 18
+#define V_ZERO_E_CMD_ERROR(x) ((x) << S_ZERO_E_CMD_ERROR)
+#define F_ZERO_E_CMD_ERROR V_ZERO_E_CMD_ERROR(1U)
+
+#define S_IESPI0_FIFO2X_RX_FRAMING_ERROR 17
+#define V_IESPI0_FIFO2X_RX_FRAMING_ERROR(x) ((x) << S_IESPI0_FIFO2X_RX_FRAMING_ERROR)
+#define F_IESPI0_FIFO2X_RX_FRAMING_ERROR V_IESPI0_FIFO2X_RX_FRAMING_ERROR(1U)
+
+#define S_IESPI1_FIFO2X_RX_FRAMING_ERROR 16
+#define V_IESPI1_FIFO2X_RX_FRAMING_ERROR(x) ((x) << S_IESPI1_FIFO2X_RX_FRAMING_ERROR)
+#define F_IESPI1_FIFO2X_RX_FRAMING_ERROR V_IESPI1_FIFO2X_RX_FRAMING_ERROR(1U)
+
+#define S_IESPI0_RX_FRAMING_ERROR 15
+#define V_IESPI0_RX_FRAMING_ERROR(x) ((x) << S_IESPI0_RX_FRAMING_ERROR)
+#define F_IESPI0_RX_FRAMING_ERROR V_IESPI0_RX_FRAMING_ERROR(1U)
+
+#define S_IESPI1_RX_FRAMING_ERROR 14
+#define V_IESPI1_RX_FRAMING_ERROR(x) ((x) << S_IESPI1_RX_FRAMING_ERROR)
+#define F_IESPI1_RX_FRAMING_ERROR V_IESPI1_RX_FRAMING_ERROR(1U)
+
+#define S_IESPI0_TX_FRAMING_ERROR 13
+#define V_IESPI0_TX_FRAMING_ERROR(x) ((x) << S_IESPI0_TX_FRAMING_ERROR)
+#define F_IESPI0_TX_FRAMING_ERROR V_IESPI0_TX_FRAMING_ERROR(1U)
+
+#define S_IESPI1_TX_FRAMING_ERROR 12
+#define V_IESPI1_TX_FRAMING_ERROR(x) ((x) << S_IESPI1_TX_FRAMING_ERROR)
+#define F_IESPI1_TX_FRAMING_ERROR V_IESPI1_TX_FRAMING_ERROR(1U)
+
+#define S_OCSPI0_RX_FRAMING_ERROR 11
+#define V_OCSPI0_RX_FRAMING_ERROR(x) ((x) << S_OCSPI0_RX_FRAMING_ERROR)
+#define F_OCSPI0_RX_FRAMING_ERROR V_OCSPI0_RX_FRAMING_ERROR(1U)
+
+#define S_OCSPI1_RX_FRAMING_ERROR 10
+#define V_OCSPI1_RX_FRAMING_ERROR(x) ((x) << S_OCSPI1_RX_FRAMING_ERROR)
+#define F_OCSPI1_RX_FRAMING_ERROR V_OCSPI1_RX_FRAMING_ERROR(1U)
+
+#define S_OCSPI0_TX_FRAMING_ERROR 9
+#define V_OCSPI0_TX_FRAMING_ERROR(x) ((x) << S_OCSPI0_TX_FRAMING_ERROR)
+#define F_OCSPI0_TX_FRAMING_ERROR V_OCSPI0_TX_FRAMING_ERROR(1U)
+
+#define S_OCSPI1_TX_FRAMING_ERROR 8
+#define V_OCSPI1_TX_FRAMING_ERROR(x) ((x) << S_OCSPI1_TX_FRAMING_ERROR)
+#define F_OCSPI1_TX_FRAMING_ERROR V_OCSPI1_TX_FRAMING_ERROR(1U)
+
+#define S_OCSPI0_OFIFO2X_TX_FRAMING_ERROR 7
+#define V_OCSPI0_OFIFO2X_TX_FRAMING_ERROR(x) ((x) << S_OCSPI0_OFIFO2X_TX_FRAMING_ERROR)
+#define F_OCSPI0_OFIFO2X_TX_FRAMING_ERROR V_OCSPI0_OFIFO2X_TX_FRAMING_ERROR(1U)
+
+#define S_OCSPI1_OFIFO2X_TX_FRAMING_ERROR 6
+#define V_OCSPI1_OFIFO2X_TX_FRAMING_ERROR(x) ((x) << S_OCSPI1_OFIFO2X_TX_FRAMING_ERROR)
+#define F_OCSPI1_OFIFO2X_TX_FRAMING_ERROR V_OCSPI1_OFIFO2X_TX_FRAMING_ERROR(1U)
+
+#define S_IESPI_PAR_ERROR 3
+#define M_IESPI_PAR_ERROR 0x7
+#define V_IESPI_PAR_ERROR(x) ((x) << S_IESPI_PAR_ERROR)
+#define G_IESPI_PAR_ERROR(x) (((x) >> S_IESPI_PAR_ERROR) & M_IESPI_PAR_ERROR)
+
+#define S_OCSPI_PAR_ERROR 0
+#define M_OCSPI_PAR_ERROR 0x7
+#define V_OCSPI_PAR_ERROR(x) ((x) << S_OCSPI_PAR_ERROR)
+#define G_OCSPI_PAR_ERROR(x) (((x) >> S_OCSPI_PAR_ERROR) & M_OCSPI_PAR_ERROR)
+
+#define A_PM1_RX_INT_CAUSE 0x5dc
+
+/* registers for module PM1_TX */
+#define PM1_TX_BASE_ADDR 0x5e0
+
+#define A_PM1_TX_CFG 0x5e0
+#define A_PM1_TX_MODE 0x5e4
+#define A_PM1_TX_STAT_CONFIG 0x5e8
+#define A_PM1_TX_STAT_COUNT 0x5ec
+#define A_PM1_TX_STAT_MSB 0x5f0
+#define A_PM1_TX_STAT_LSB 0x5f4
+#define A_PM1_TX_INT_ENABLE 0x5f8
+
+#define S_ZERO_C_CMD_ERROR 18
+#define V_ZERO_C_CMD_ERROR(x) ((x) << S_ZERO_C_CMD_ERROR)
+#define F_ZERO_C_CMD_ERROR V_ZERO_C_CMD_ERROR(1U)
+
+#define S_ICSPI0_FIFO2X_RX_FRAMING_ERROR 17
+#define V_ICSPI0_FIFO2X_RX_FRAMING_ERROR(x) ((x) << S_ICSPI0_FIFO2X_RX_FRAMING_ERROR)
+#define F_ICSPI0_FIFO2X_RX_FRAMING_ERROR V_ICSPI0_FIFO2X_RX_FRAMING_ERROR(1U)
+
+#define S_ICSPI1_FIFO2X_RX_FRAMING_ERROR 16
+#define V_ICSPI1_FIFO2X_RX_FRAMING_ERROR(x) ((x) << S_ICSPI1_FIFO2X_RX_FRAMING_ERROR)
+#define F_ICSPI1_FIFO2X_RX_FRAMING_ERROR V_ICSPI1_FIFO2X_RX_FRAMING_ERROR(1U)
+
+#define S_ICSPI0_RX_FRAMING_ERROR 15
+#define V_ICSPI0_RX_FRAMING_ERROR(x) ((x) << S_ICSPI0_RX_FRAMING_ERROR)
+#define F_ICSPI0_RX_FRAMING_ERROR V_ICSPI0_RX_FRAMING_ERROR(1U)
+
+#define S_ICSPI1_RX_FRAMING_ERROR 14
+#define V_ICSPI1_RX_FRAMING_ERROR(x) ((x) << S_ICSPI1_RX_FRAMING_ERROR)
+#define F_ICSPI1_RX_FRAMING_ERROR V_ICSPI1_RX_FRAMING_ERROR(1U)
+
+#define S_ICSPI0_TX_FRAMING_ERROR 13
+#define V_ICSPI0_TX_FRAMING_ERROR(x) ((x) << S_ICSPI0_TX_FRAMING_ERROR)
+#define F_ICSPI0_TX_FRAMING_ERROR V_ICSPI0_TX_FRAMING_ERROR(1U)
+
+#define S_ICSPI1_TX_FRAMING_ERROR 12
+#define V_ICSPI1_TX_FRAMING_ERROR(x) ((x) << S_ICSPI1_TX_FRAMING_ERROR)
+#define F_ICSPI1_TX_FRAMING_ERROR V_ICSPI1_TX_FRAMING_ERROR(1U)
+
+#define S_OESPI0_RX_FRAMING_ERROR 11
+#define V_OESPI0_RX_FRAMING_ERROR(x) ((x) << S_OESPI0_RX_FRAMING_ERROR)
+#define F_OESPI0_RX_FRAMING_ERROR V_OESPI0_RX_FRAMING_ERROR(1U)
+
+#define S_OESPI1_RX_FRAMING_ERROR 10
+#define V_OESPI1_RX_FRAMING_ERROR(x) ((x) << S_OESPI1_RX_FRAMING_ERROR)
+#define F_OESPI1_RX_FRAMING_ERROR V_OESPI1_RX_FRAMING_ERROR(1U)
+
+#define S_OESPI0_TX_FRAMING_ERROR 9
+#define V_OESPI0_TX_FRAMING_ERROR(x) ((x) << S_OESPI0_TX_FRAMING_ERROR)
+#define F_OESPI0_TX_FRAMING_ERROR V_OESPI0_TX_FRAMING_ERROR(1U)
+
+#define S_OESPI1_TX_FRAMING_ERROR 8
+#define V_OESPI1_TX_FRAMING_ERROR(x) ((x) << S_OESPI1_TX_FRAMING_ERROR)
+#define F_OESPI1_TX_FRAMING_ERROR V_OESPI1_TX_FRAMING_ERROR(1U)
+
+#define S_OESPI0_OFIFO2X_TX_FRAMING_ERROR 7
+#define V_OESPI0_OFIFO2X_TX_FRAMING_ERROR(x) ((x) << S_OESPI0_OFIFO2X_TX_FRAMING_ERROR)
+#define F_OESPI0_OFIFO2X_TX_FRAMING_ERROR V_OESPI0_OFIFO2X_TX_FRAMING_ERROR(1U)
+
+#define S_OESPI1_OFIFO2X_TX_FRAMING_ERROR 6
+#define V_OESPI1_OFIFO2X_TX_FRAMING_ERROR(x) ((x) << S_OESPI1_OFIFO2X_TX_FRAMING_ERROR)
+#define F_OESPI1_OFIFO2X_TX_FRAMING_ERROR V_OESPI1_OFIFO2X_TX_FRAMING_ERROR(1U)
+
+#define S_ICSPI_PAR_ERROR 3
+#define M_ICSPI_PAR_ERROR 0x7
+#define V_ICSPI_PAR_ERROR(x) ((x) << S_ICSPI_PAR_ERROR)
+#define G_ICSPI_PAR_ERROR(x) (((x) >> S_ICSPI_PAR_ERROR) & M_ICSPI_PAR_ERROR)
+
+#define S_OESPI_PAR_ERROR 0
+#define M_OESPI_PAR_ERROR 0x7
+#define V_OESPI_PAR_ERROR(x) ((x) << S_OESPI_PAR_ERROR)
+#define G_OESPI_PAR_ERROR(x) (((x) >> S_OESPI_PAR_ERROR) & M_OESPI_PAR_ERROR)
+
+#define A_PM1_TX_INT_CAUSE 0x5fc
+
+/* registers for module MPS0 */
+#define MPS0_BASE_ADDR 0x600
+
+#define A_MPS_CFG 0x600
+
+#define S_SGETPQID 8
+#define M_SGETPQID 0x7
+#define V_SGETPQID(x) ((x) << S_SGETPQID)
+#define G_SGETPQID(x) (((x) >> S_SGETPQID) & M_SGETPQID)
+
+#define S_TPRXPORTSIZE 7
+#define V_TPRXPORTSIZE(x) ((x) << S_TPRXPORTSIZE)
+#define F_TPRXPORTSIZE V_TPRXPORTSIZE(1U)
+
+#define S_TPTXPORT1SIZE 6
+#define V_TPTXPORT1SIZE(x) ((x) << S_TPTXPORT1SIZE)
+#define F_TPTXPORT1SIZE V_TPTXPORT1SIZE(1U)
+
+#define S_TPTXPORT0SIZE 5
+#define V_TPTXPORT0SIZE(x) ((x) << S_TPTXPORT0SIZE)
+#define F_TPTXPORT0SIZE V_TPTXPORT0SIZE(1U)
+
+#define S_TPRXPORTEN 4
+#define V_TPRXPORTEN(x) ((x) << S_TPRXPORTEN)
+#define F_TPRXPORTEN V_TPRXPORTEN(1U)
+
+#define S_TPTXPORT1EN 3
+#define V_TPTXPORT1EN(x) ((x) << S_TPTXPORT1EN)
+#define F_TPTXPORT1EN V_TPTXPORT1EN(1U)
+
+#define S_TPTXPORT0EN 2
+#define V_TPTXPORT0EN(x) ((x) << S_TPTXPORT0EN)
+#define F_TPTXPORT0EN V_TPTXPORT0EN(1U)
+
+#define S_PORT1ACTIVE 1
+#define V_PORT1ACTIVE(x) ((x) << S_PORT1ACTIVE)
+#define F_PORT1ACTIVE V_PORT1ACTIVE(1U)
+
+#define S_PORT0ACTIVE 0
+#define V_PORT0ACTIVE(x) ((x) << S_PORT0ACTIVE)
+#define F_PORT0ACTIVE V_PORT0ACTIVE(1U)
+
+#define S_ENFORCEPKT 11
+#define V_ENFORCEPKT(x) ((x) << S_ENFORCEPKT)
+#define F_ENFORCEPKT V_ENFORCEPKT(1U)
+
+#define A_MPS_DRR_CFG1 0x604
+
+#define S_RLDWTTPD1 11
+#define M_RLDWTTPD1 0x7ff
+#define V_RLDWTTPD1(x) ((x) << S_RLDWTTPD1)
+#define G_RLDWTTPD1(x) (((x) >> S_RLDWTTPD1) & M_RLDWTTPD1)
+
+#define S_RLDWTTPD0 0
+#define M_RLDWTTPD0 0x7ff
+#define V_RLDWTTPD0(x) ((x) << S_RLDWTTPD0)
+#define G_RLDWTTPD0(x) (((x) >> S_RLDWTTPD0) & M_RLDWTTPD0)
+
+#define A_MPS_DRR_CFG2 0x608
+
+#define S_RLDWTTOTAL 0
+#define M_RLDWTTOTAL 0xfff
+#define V_RLDWTTOTAL(x) ((x) << S_RLDWTTOTAL)
+#define G_RLDWTTOTAL(x) (((x) >> S_RLDWTTOTAL) & M_RLDWTTOTAL)
+
+#define A_MPS_MCA_STATUS 0x60c
+
+#define S_MCAPKTCNT 12
+#define M_MCAPKTCNT 0xfffff
+#define V_MCAPKTCNT(x) ((x) << S_MCAPKTCNT)
+#define G_MCAPKTCNT(x) (((x) >> S_MCAPKTCNT) & M_MCAPKTCNT)
+
+#define S_MCADEPTH 0
+#define M_MCADEPTH 0xfff
+#define V_MCADEPTH(x) ((x) << S_MCADEPTH)
+#define G_MCADEPTH(x) (((x) >> S_MCADEPTH) & M_MCADEPTH)
+
+#define A_MPS_TX0_TP_CNT 0x610
+
+#define S_TX0TPDISCNT 24
+#define M_TX0TPDISCNT 0xff
+#define V_TX0TPDISCNT(x) ((x) << S_TX0TPDISCNT)
+#define G_TX0TPDISCNT(x) (((x) >> S_TX0TPDISCNT) & M_TX0TPDISCNT)
+
+#define S_TX0TPCNT 0
+#define M_TX0TPCNT 0xffffff
+#define V_TX0TPCNT(x) ((x) << S_TX0TPCNT)
+#define G_TX0TPCNT(x) (((x) >> S_TX0TPCNT) & M_TX0TPCNT)
+
+#define A_MPS_TX1_TP_CNT 0x614
+
+#define S_TX1TPDISCNT 24
+#define M_TX1TPDISCNT 0xff
+#define V_TX1TPDISCNT(x) ((x) << S_TX1TPDISCNT)
+#define G_TX1TPDISCNT(x) (((x) >> S_TX1TPDISCNT) & M_TX1TPDISCNT)
+
+#define S_TX1TPCNT 0
+#define M_TX1TPCNT 0xffffff
+#define V_TX1TPCNT(x) ((x) << S_TX1TPCNT)
+#define G_TX1TPCNT(x) (((x) >> S_TX1TPCNT) & M_TX1TPCNT)
+
+#define A_MPS_RX_TP_CNT 0x618
+
+#define S_RXTPDISCNT 24
+#define M_RXTPDISCNT 0xff
+#define V_RXTPDISCNT(x) ((x) << S_RXTPDISCNT)
+#define G_RXTPDISCNT(x) (((x) >> S_RXTPDISCNT) & M_RXTPDISCNT)
+
+#define S_RXTPCNT 0
+#define M_RXTPCNT 0xffffff
+#define V_RXTPCNT(x) ((x) << S_RXTPCNT)
+#define G_RXTPCNT(x) (((x) >> S_RXTPCNT) & M_RXTPCNT)
+
+#define A_MPS_INT_ENABLE 0x61c
+
+#define S_MCAPARERRENB 6
+#define M_MCAPARERRENB 0x7
+#define V_MCAPARERRENB(x) ((x) << S_MCAPARERRENB)
+#define G_MCAPARERRENB(x) (((x) >> S_MCAPARERRENB) & M_MCAPARERRENB)
+
+#define S_RXTPPARERRENB 4
+#define M_RXTPPARERRENB 0x3
+#define V_RXTPPARERRENB(x) ((x) << S_RXTPPARERRENB)
+#define G_RXTPPARERRENB(x) (((x) >> S_RXTPPARERRENB) & M_RXTPPARERRENB)
+
+#define S_TX1TPPARERRENB 2
+#define M_TX1TPPARERRENB 0x3
+#define V_TX1TPPARERRENB(x) ((x) << S_TX1TPPARERRENB)
+#define G_TX1TPPARERRENB(x) (((x) >> S_TX1TPPARERRENB) & M_TX1TPPARERRENB)
+
+#define S_TX0TPPARERRENB 0
+#define M_TX0TPPARERRENB 0x3
+#define V_TX0TPPARERRENB(x) ((x) << S_TX0TPPARERRENB)
+#define G_TX0TPPARERRENB(x) (((x) >> S_TX0TPPARERRENB) & M_TX0TPPARERRENB)
+
+#define A_MPS_INT_CAUSE 0x620
+
+#define S_MCAPARERR 6
+#define M_MCAPARERR 0x7
+#define V_MCAPARERR(x) ((x) << S_MCAPARERR)
+#define G_MCAPARERR(x) (((x) >> S_MCAPARERR) & M_MCAPARERR)
+
+#define S_RXTPPARERR 4
+#define M_RXTPPARERR 0x3
+#define V_RXTPPARERR(x) ((x) << S_RXTPPARERR)
+#define G_RXTPPARERR(x) (((x) >> S_RXTPPARERR) & M_RXTPPARERR)
+
+#define S_TX1TPPARERR 2
+#define M_TX1TPPARERR 0x3
+#define V_TX1TPPARERR(x) ((x) << S_TX1TPPARERR)
+#define G_TX1TPPARERR(x) (((x) >> S_TX1TPPARERR) & M_TX1TPPARERR)
+
+#define S_TX0TPPARERR 0
+#define M_TX0TPPARERR 0x3
+#define V_TX0TPPARERR(x) ((x) << S_TX0TPPARERR)
+#define G_TX0TPPARERR(x) (((x) >> S_TX0TPPARERR) & M_TX0TPPARERR)
+
+/* registers for module CPL_SWITCH */
+#define CPL_SWITCH_BASE_ADDR 0x640
+
+#define A_CPL_SWITCH_CNTRL 0x640
+
+#define S_CPL_PKT_TID 8
+#define M_CPL_PKT_TID 0xffffff
+#define V_CPL_PKT_TID(x) ((x) << S_CPL_PKT_TID)
+#define G_CPL_PKT_TID(x) (((x) >> S_CPL_PKT_TID) & M_CPL_PKT_TID)
+
+#define S_CPU_NO_3F_CIM_ENABLE 3
+#define V_CPU_NO_3F_CIM_ENABLE(x) ((x) << S_CPU_NO_3F_CIM_ENABLE)
+#define F_CPU_NO_3F_CIM_ENABLE V_CPU_NO_3F_CIM_ENABLE(1U)
+
+#define S_SWITCH_TABLE_ENABLE 2
+#define V_SWITCH_TABLE_ENABLE(x) ((x) << S_SWITCH_TABLE_ENABLE)
+#define F_SWITCH_TABLE_ENABLE V_SWITCH_TABLE_ENABLE(1U)
+
+#define S_SGE_ENABLE 1
+#define V_SGE_ENABLE(x) ((x) << S_SGE_ENABLE)
+#define F_SGE_ENABLE V_SGE_ENABLE(1U)
+
+#define S_CIM_ENABLE 0
+#define V_CIM_ENABLE(x) ((x) << S_CIM_ENABLE)
+#define F_CIM_ENABLE V_CIM_ENABLE(1U)
+
+#define A_CPL_SWITCH_TBL_IDX 0x644
+
+#define S_SWITCH_TBL_IDX 0
+#define M_SWITCH_TBL_IDX 0xf
+#define V_SWITCH_TBL_IDX(x) ((x) << S_SWITCH_TBL_IDX)
+#define G_SWITCH_TBL_IDX(x) (((x) >> S_SWITCH_TBL_IDX) & M_SWITCH_TBL_IDX)
+
+#define A_CPL_SWITCH_TBL_DATA 0x648
+#define A_CPL_SWITCH_ZERO_ERROR 0x64c
+
+#define S_ZERO_CMD 0
+#define M_ZERO_CMD 0xff
+#define V_ZERO_CMD(x) ((x) << S_ZERO_CMD)
+#define G_ZERO_CMD(x) (((x) >> S_ZERO_CMD) & M_ZERO_CMD)
+
+#define A_CPL_INTR_ENABLE 0x650
+
+#define S_CIM_OVFL_ERROR 4
+#define V_CIM_OVFL_ERROR(x) ((x) << S_CIM_OVFL_ERROR)
+#define F_CIM_OVFL_ERROR V_CIM_OVFL_ERROR(1U)
+
+#define S_TP_FRAMING_ERROR 3
+#define V_TP_FRAMING_ERROR(x) ((x) << S_TP_FRAMING_ERROR)
+#define F_TP_FRAMING_ERROR V_TP_FRAMING_ERROR(1U)
+
+#define S_SGE_FRAMING_ERROR 2
+#define V_SGE_FRAMING_ERROR(x) ((x) << S_SGE_FRAMING_ERROR)
+#define F_SGE_FRAMING_ERROR V_SGE_FRAMING_ERROR(1U)
+
+#define S_CIM_FRAMING_ERROR 1
+#define V_CIM_FRAMING_ERROR(x) ((x) << S_CIM_FRAMING_ERROR)
+#define F_CIM_FRAMING_ERROR V_CIM_FRAMING_ERROR(1U)
+
+#define S_ZERO_SWITCH_ERROR 0
+#define V_ZERO_SWITCH_ERROR(x) ((x) << S_ZERO_SWITCH_ERROR)
+#define F_ZERO_SWITCH_ERROR V_ZERO_SWITCH_ERROR(1U)
+
+#define A_CPL_INTR_CAUSE 0x654
+#define A_CPL_MAP_TBL_IDX 0x658
+
+#define S_CPL_MAP_TBL_IDX 0
+#define M_CPL_MAP_TBL_IDX 0xff
+#define V_CPL_MAP_TBL_IDX(x) ((x) << S_CPL_MAP_TBL_IDX)
+#define G_CPL_MAP_TBL_IDX(x) (((x) >> S_CPL_MAP_TBL_IDX) & M_CPL_MAP_TBL_IDX)
+
+#define A_CPL_MAP_TBL_DATA 0x65c
+
+#define S_CPL_MAP_TBL_DATA 0
+#define M_CPL_MAP_TBL_DATA 0xff
+#define V_CPL_MAP_TBL_DATA(x) ((x) << S_CPL_MAP_TBL_DATA)
+#define G_CPL_MAP_TBL_DATA(x) (((x) >> S_CPL_MAP_TBL_DATA) & M_CPL_MAP_TBL_DATA)
+
+/* registers for module SMB0 */
+#define SMB0_BASE_ADDR 0x660
+
+#define A_SMB_GLOBAL_TIME_CFG 0x660
+
+#define S_LADBGWRPTR 24
+#define M_LADBGWRPTR 0xff
+#define V_LADBGWRPTR(x) ((x) << S_LADBGWRPTR)
+#define G_LADBGWRPTR(x) (((x) >> S_LADBGWRPTR) & M_LADBGWRPTR)
+
+#define S_LADBGRDPTR 16
+#define M_LADBGRDPTR 0xff
+#define V_LADBGRDPTR(x) ((x) << S_LADBGRDPTR)
+#define G_LADBGRDPTR(x) (((x) >> S_LADBGRDPTR) & M_LADBGRDPTR)
+
+#define S_LADBGEN 13
+#define V_LADBGEN(x) ((x) << S_LADBGEN)
+#define F_LADBGEN V_LADBGEN(1U)
+
+#define S_MACROCNTCFG 8
+#define M_MACROCNTCFG 0x1f
+#define V_MACROCNTCFG(x) ((x) << S_MACROCNTCFG)
+#define G_MACROCNTCFG(x) (((x) >> S_MACROCNTCFG) & M_MACROCNTCFG)
+
+#define S_MICROCNTCFG 0
+#define M_MICROCNTCFG 0xff
+#define V_MICROCNTCFG(x) ((x) << S_MICROCNTCFG)
+#define G_MICROCNTCFG(x) (((x) >> S_MICROCNTCFG) & M_MICROCNTCFG)
+
+#define A_SMB_MST_TIMEOUT_CFG 0x664
+
+#define S_DEBUGSELH 28
+#define M_DEBUGSELH 0xf
+#define V_DEBUGSELH(x) ((x) << S_DEBUGSELH)
+#define G_DEBUGSELH(x) (((x) >> S_DEBUGSELH) & M_DEBUGSELH)
+
+#define S_DEBUGSELL 24
+#define M_DEBUGSELL 0xf
+#define V_DEBUGSELL(x) ((x) << S_DEBUGSELL)
+#define G_DEBUGSELL(x) (((x) >> S_DEBUGSELL) & M_DEBUGSELL)
+
+#define S_MSTTIMEOUTCFG 0
+#define M_MSTTIMEOUTCFG 0xffffff
+#define V_MSTTIMEOUTCFG(x) ((x) << S_MSTTIMEOUTCFG)
+#define G_MSTTIMEOUTCFG(x) (((x) >> S_MSTTIMEOUTCFG) & M_MSTTIMEOUTCFG)
+
+#define A_SMB_MST_CTL_CFG 0x668
+
+#define S_MSTFIFODBG 31
+#define V_MSTFIFODBG(x) ((x) << S_MSTFIFODBG)
+#define F_MSTFIFODBG V_MSTFIFODBG(1U)
+
+#define S_MSTFIFODBGCLR 30
+#define V_MSTFIFODBGCLR(x) ((x) << S_MSTFIFODBGCLR)
+#define F_MSTFIFODBGCLR V_MSTFIFODBGCLR(1U)
+
+#define S_MSTRXBYTECFG 12
+#define M_MSTRXBYTECFG 0x3f
+#define V_MSTRXBYTECFG(x) ((x) << S_MSTRXBYTECFG)
+#define G_MSTRXBYTECFG(x) (((x) >> S_MSTRXBYTECFG) & M_MSTRXBYTECFG)
+
+#define S_MSTTXBYTECFG 6
+#define M_MSTTXBYTECFG 0x3f
+#define V_MSTTXBYTECFG(x) ((x) << S_MSTTXBYTECFG)
+#define G_MSTTXBYTECFG(x) (((x) >> S_MSTTXBYTECFG) & M_MSTTXBYTECFG)
+
+#define S_MSTRESET 1
+#define V_MSTRESET(x) ((x) << S_MSTRESET)
+#define F_MSTRESET V_MSTRESET(1U)
+
+#define S_MSTCTLEN 0
+#define V_MSTCTLEN(x) ((x) << S_MSTCTLEN)
+#define F_MSTCTLEN V_MSTCTLEN(1U)
+
+#define A_SMB_MST_CTL_STS 0x66c
+
+#define S_MSTRXBYTECNT 12
+#define M_MSTRXBYTECNT 0x3f
+#define V_MSTRXBYTECNT(x) ((x) << S_MSTRXBYTECNT)
+#define G_MSTRXBYTECNT(x) (((x) >> S_MSTRXBYTECNT) & M_MSTRXBYTECNT)
+
+#define S_MSTTXBYTECNT 6
+#define M_MSTTXBYTECNT 0x3f
+#define V_MSTTXBYTECNT(x) ((x) << S_MSTTXBYTECNT)
+#define G_MSTTXBYTECNT(x) (((x) >> S_MSTTXBYTECNT) & M_MSTTXBYTECNT)
+
+#define S_MSTBUSYSTS 0
+#define V_MSTBUSYSTS(x) ((x) << S_MSTBUSYSTS)
+#define F_MSTBUSYSTS V_MSTBUSYSTS(1U)
+
+#define A_SMB_MST_TX_FIFO_RDWR 0x670
+#define A_SMB_MST_RX_FIFO_RDWR 0x674
+#define A_SMB_SLV_TIMEOUT_CFG 0x678
+
+#define S_SLVTIMEOUTCFG 0
+#define M_SLVTIMEOUTCFG 0xffffff
+#define V_SLVTIMEOUTCFG(x) ((x) << S_SLVTIMEOUTCFG)
+#define G_SLVTIMEOUTCFG(x) (((x) >> S_SLVTIMEOUTCFG) & M_SLVTIMEOUTCFG)
+
+#define A_SMB_SLV_CTL_CFG 0x67c
+
+#define S_SLVFIFODBG 31
+#define V_SLVFIFODBG(x) ((x) << S_SLVFIFODBG)
+#define F_SLVFIFODBG V_SLVFIFODBG(1U)
+
+#define S_SLVFIFODBGCLR 30
+#define V_SLVFIFODBGCLR(x) ((x) << S_SLVFIFODBGCLR)
+#define F_SLVFIFODBGCLR V_SLVFIFODBGCLR(1U)
+
+#define S_SLVADDRCFG 4
+#define M_SLVADDRCFG 0x7f
+#define V_SLVADDRCFG(x) ((x) << S_SLVADDRCFG)
+#define G_SLVADDRCFG(x) (((x) >> S_SLVADDRCFG) & M_SLVADDRCFG)
+
+#define S_SLVALRTSET 2
+#define V_SLVALRTSET(x) ((x) << S_SLVALRTSET)
+#define F_SLVALRTSET V_SLVALRTSET(1U)
+
+#define S_SLVRESET 1
+#define V_SLVRESET(x) ((x) << S_SLVRESET)
+#define F_SLVRESET V_SLVRESET(1U)
+
+#define S_SLVCTLEN 0
+#define V_SLVCTLEN(x) ((x) << S_SLVCTLEN)
+#define F_SLVCTLEN V_SLVCTLEN(1U)
+
+#define A_SMB_SLV_CTL_STS 0x680
+
+#define S_SLVFIFOTXCNT 12
+#define M_SLVFIFOTXCNT 0x3f
+#define V_SLVFIFOTXCNT(x) ((x) << S_SLVFIFOTXCNT)
+#define G_SLVFIFOTXCNT(x) (((x) >> S_SLVFIFOTXCNT) & M_SLVFIFOTXCNT)
+
+#define S_SLVFIFOCNT 6
+#define M_SLVFIFOCNT 0x3f
+#define V_SLVFIFOCNT(x) ((x) << S_SLVFIFOCNT)
+#define G_SLVFIFOCNT(x) (((x) >> S_SLVFIFOCNT) & M_SLVFIFOCNT)
+
+#define S_SLVALRTSTS 2
+#define V_SLVALRTSTS(x) ((x) << S_SLVALRTSTS)
+#define F_SLVALRTSTS V_SLVALRTSTS(1U)
+
+#define S_SLVBUSYSTS 0
+#define V_SLVBUSYSTS(x) ((x) << S_SLVBUSYSTS)
+#define F_SLVBUSYSTS V_SLVBUSYSTS(1U)
+
+#define A_SMB_SLV_FIFO_RDWR 0x684
+#define A_SMB_SLV_CMD_FIFO_RDWR 0x688
+#define A_SMB_INT_ENABLE 0x68c
+
+#define S_SLVTIMEOUTINTEN 7
+#define V_SLVTIMEOUTINTEN(x) ((x) << S_SLVTIMEOUTINTEN)
+#define F_SLVTIMEOUTINTEN V_SLVTIMEOUTINTEN(1U)
+
+#define S_SLVERRINTEN 6
+#define V_SLVERRINTEN(x) ((x) << S_SLVERRINTEN)
+#define F_SLVERRINTEN V_SLVERRINTEN(1U)
+
+#define S_SLVDONEINTEN 5
+#define V_SLVDONEINTEN(x) ((x) << S_SLVDONEINTEN)
+#define F_SLVDONEINTEN V_SLVDONEINTEN(1U)
+
+#define S_SLVRXRDYINTEN 4
+#define V_SLVRXRDYINTEN(x) ((x) << S_SLVRXRDYINTEN)
+#define F_SLVRXRDYINTEN V_SLVRXRDYINTEN(1U)
+
+#define S_MSTTIMEOUTINTEN 3
+#define V_MSTTIMEOUTINTEN(x) ((x) << S_MSTTIMEOUTINTEN)
+#define F_MSTTIMEOUTINTEN V_MSTTIMEOUTINTEN(1U)
+
+#define S_MSTNACKINTEN 2
+#define V_MSTNACKINTEN(x) ((x) << S_MSTNACKINTEN)
+#define F_MSTNACKINTEN V_MSTNACKINTEN(1U)
+
+#define S_MSTLOSTARBINTEN 1
+#define V_MSTLOSTARBINTEN(x) ((x) << S_MSTLOSTARBINTEN)
+#define F_MSTLOSTARBINTEN V_MSTLOSTARBINTEN(1U)
+
+#define S_MSTDONEINTEN 0
+#define V_MSTDONEINTEN(x) ((x) << S_MSTDONEINTEN)
+#define F_MSTDONEINTEN V_MSTDONEINTEN(1U)
+
+#define A_SMB_INT_CAUSE 0x690
+
+#define S_SLVTIMEOUTINT 7
+#define V_SLVTIMEOUTINT(x) ((x) << S_SLVTIMEOUTINT)
+#define F_SLVTIMEOUTINT V_SLVTIMEOUTINT(1U)
+
+#define S_SLVERRINT 6
+#define V_SLVERRINT(x) ((x) << S_SLVERRINT)
+#define F_SLVERRINT V_SLVERRINT(1U)
+
+#define S_SLVDONEINT 5
+#define V_SLVDONEINT(x) ((x) << S_SLVDONEINT)
+#define F_SLVDONEINT V_SLVDONEINT(1U)
+
+#define S_SLVRXRDYINT 4
+#define V_SLVRXRDYINT(x) ((x) << S_SLVRXRDYINT)
+#define F_SLVRXRDYINT V_SLVRXRDYINT(1U)
+
+#define S_MSTTIMEOUTINT 3
+#define V_MSTTIMEOUTINT(x) ((x) << S_MSTTIMEOUTINT)
+#define F_MSTTIMEOUTINT V_MSTTIMEOUTINT(1U)
+
+#define S_MSTNACKINT 2
+#define V_MSTNACKINT(x) ((x) << S_MSTNACKINT)
+#define F_MSTNACKINT V_MSTNACKINT(1U)
+
+#define S_MSTLOSTARBINT 1
+#define V_MSTLOSTARBINT(x) ((x) << S_MSTLOSTARBINT)
+#define F_MSTLOSTARBINT V_MSTLOSTARBINT(1U)
+
+#define S_MSTDONEINT 0
+#define V_MSTDONEINT(x) ((x) << S_MSTDONEINT)
+#define F_MSTDONEINT V_MSTDONEINT(1U)
+
+#define A_SMB_DEBUG_DATA 0x694
+
+#define S_DEBUGDATAH 16
+#define M_DEBUGDATAH 0xffff
+#define V_DEBUGDATAH(x) ((x) << S_DEBUGDATAH)
+#define G_DEBUGDATAH(x) (((x) >> S_DEBUGDATAH) & M_DEBUGDATAH)
+
+#define S_DEBUGDATAL 0
+#define M_DEBUGDATAL 0xffff
+#define V_DEBUGDATAL(x) ((x) << S_DEBUGDATAL)
+#define G_DEBUGDATAL(x) (((x) >> S_DEBUGDATAL) & M_DEBUGDATAL)
+
+#define A_SMB_DEBUG_LA 0x69c
+
+#define S_DEBUGLAREQADDR 0
+#define M_DEBUGLAREQADDR 0x3ff
+#define V_DEBUGLAREQADDR(x) ((x) << S_DEBUGLAREQADDR)
+#define G_DEBUGLAREQADDR(x) (((x) >> S_DEBUGLAREQADDR) & M_DEBUGLAREQADDR)
+
+/* registers for module I2CM0 */
+#define I2CM0_BASE_ADDR 0x6a0
+
+#define A_I2C_CFG 0x6a0
+
+#define S_I2C_CLKDIV 0
+#define M_I2C_CLKDIV 0xfff
+#define V_I2C_CLKDIV(x) ((x) << S_I2C_CLKDIV)
+#define G_I2C_CLKDIV(x) (((x) >> S_I2C_CLKDIV) & M_I2C_CLKDIV)
+
+#define A_I2C_DATA 0x6a4
+#define A_I2C_OP 0x6a8
+
+#define S_ACK 30
+#define V_ACK(x) ((x) << S_ACK)
+#define F_ACK V_ACK(1U)
+
+#define S_I2C_CONT 1
+#define V_I2C_CONT(x) ((x) << S_I2C_CONT)
+#define F_I2C_CONT V_I2C_CONT(1U)
+
+/* registers for module MI1 */
+#define MI1_BASE_ADDR 0x6b0
+
+#define A_MI1_CFG 0x6b0
+
+#define S_CLKDIV 5
+#define M_CLKDIV 0xff
+#define V_CLKDIV(x) ((x) << S_CLKDIV)
+#define G_CLKDIV(x) (((x) >> S_CLKDIV) & M_CLKDIV)
+
+#define S_ST 3
+#define M_ST 0x3
+#define V_ST(x) ((x) << S_ST)
+#define G_ST(x) (((x) >> S_ST) & M_ST)
+
+#define S_PREEN 2
+#define V_PREEN(x) ((x) << S_PREEN)
+#define F_PREEN V_PREEN(1U)
+
+#define S_MDIINV 1
+#define V_MDIINV(x) ((x) << S_MDIINV)
+#define F_MDIINV V_MDIINV(1U)
+
+#define S_MDIEN 0
+#define V_MDIEN(x) ((x) << S_MDIEN)
+#define F_MDIEN V_MDIEN(1U)
+
+#define A_MI1_ADDR 0x6b4
+
+#define S_PHYADDR 5
+#define M_PHYADDR 0x1f
+#define V_PHYADDR(x) ((x) << S_PHYADDR)
+#define G_PHYADDR(x) (((x) >> S_PHYADDR) & M_PHYADDR)
+
+#define S_REGADDR 0
+#define M_REGADDR 0x1f
+#define V_REGADDR(x) ((x) << S_REGADDR)
+#define G_REGADDR(x) (((x) >> S_REGADDR) & M_REGADDR)
+
+#define A_MI1_DATA 0x6b8
+
+#define S_MDI_DATA 0
+#define M_MDI_DATA 0xffff
+#define V_MDI_DATA(x) ((x) << S_MDI_DATA)
+#define G_MDI_DATA(x) (((x) >> S_MDI_DATA) & M_MDI_DATA)
+
+#define A_MI1_OP 0x6bc
+
+#define S_INC 2
+#define V_INC(x) ((x) << S_INC)
+#define F_INC V_INC(1U)
+
+#define S_MDI_OP 0
+#define M_MDI_OP 0x3
+#define V_MDI_OP(x) ((x) << S_MDI_OP)
+#define G_MDI_OP(x) (((x) >> S_MDI_OP) & M_MDI_OP)
+
+/* registers for module JM1 */
+#define JM1_BASE_ADDR 0x6c0
+
+#define A_JM_CFG 0x6c0
+
+#define S_JM_CLKDIV 2
+#define M_JM_CLKDIV 0xff
+#define V_JM_CLKDIV(x) ((x) << S_JM_CLKDIV)
+#define G_JM_CLKDIV(x) (((x) >> S_JM_CLKDIV) & M_JM_CLKDIV)
+
+#define S_TRST 1
+#define V_TRST(x) ((x) << S_TRST)
+#define F_TRST V_TRST(1U)
+
+#define S_EN 0
+#define V_EN(x) ((x) << S_EN)
+#define F_EN V_EN(1U)
+
+#define A_JM_MODE 0x6c4
+#define A_JM_DATA 0x6c8
+#define A_JM_OP 0x6cc
+
+#define S_CNT 0
+#define M_CNT 0x1f
+#define V_CNT(x) ((x) << S_CNT)
+#define G_CNT(x) (((x) >> S_CNT) & M_CNT)
+
+/* registers for module SF1 */
+#define SF1_BASE_ADDR 0x6d8
+
+#define A_SF_DATA 0x6d8
+#define A_SF_OP 0x6dc
+
+#define S_BYTECNT 1
+#define M_BYTECNT 0x3
+#define V_BYTECNT(x) ((x) << S_BYTECNT)
+#define G_BYTECNT(x) (((x) >> S_BYTECNT) & M_BYTECNT)
+
+/* registers for module PL3 */
+#define PL3_BASE_ADDR 0x6e0
+
+#define A_PL_INT_ENABLE0 0x6e0
+
+#define S_EXT 24
+#define V_EXT(x) ((x) << S_EXT)
+#define F_EXT V_EXT(1U)
+
+#define S_T3DBG 23
+#define V_T3DBG(x) ((x) << S_T3DBG)
+#define F_T3DBG V_T3DBG(1U)
+
+#define S_XGMAC0_1 20
+#define V_XGMAC0_1(x) ((x) << S_XGMAC0_1)
+#define F_XGMAC0_1 V_XGMAC0_1(1U)
+
+#define S_XGMAC0_0 19
+#define V_XGMAC0_0(x) ((x) << S_XGMAC0_0)
+#define F_XGMAC0_0 V_XGMAC0_0(1U)
+
+#define S_MC5A 18
+#define V_MC5A(x) ((x) << S_MC5A)
+#define F_MC5A V_MC5A(1U)
+
+#define S_SF1 17
+#define V_SF1(x) ((x) << S_SF1)
+#define F_SF1 V_SF1(1U)
+
+#define S_SMB0 15
+#define V_SMB0(x) ((x) << S_SMB0)
+#define F_SMB0 V_SMB0(1U)
+
+#define S_I2CM0 14
+#define V_I2CM0(x) ((x) << S_I2CM0)
+#define F_I2CM0 V_I2CM0(1U)
+
+#define S_MI1 13
+#define V_MI1(x) ((x) << S_MI1)
+#define F_MI1 V_MI1(1U)
+
+#define S_CPL_SWITCH 12
+#define V_CPL_SWITCH(x) ((x) << S_CPL_SWITCH)
+#define F_CPL_SWITCH V_CPL_SWITCH(1U)
+
+#define S_MPS0 11
+#define V_MPS0(x) ((x) << S_MPS0)
+#define F_MPS0 V_MPS0(1U)
+
+#define S_PM1_TX 10
+#define V_PM1_TX(x) ((x) << S_PM1_TX)
+#define F_PM1_TX V_PM1_TX(1U)
+
+#define S_PM1_RX 9
+#define V_PM1_RX(x) ((x) << S_PM1_RX)
+#define F_PM1_RX V_PM1_RX(1U)
+
+#define S_ULP2_TX 8
+#define V_ULP2_TX(x) ((x) << S_ULP2_TX)
+#define F_ULP2_TX V_ULP2_TX(1U)
+
+#define S_ULP2_RX 7
+#define V_ULP2_RX(x) ((x) << S_ULP2_RX)
+#define F_ULP2_RX V_ULP2_RX(1U)
+
+#define S_TP1 6
+#define V_TP1(x) ((x) << S_TP1)
+#define F_TP1 V_TP1(1U)
+
+#define S_CIM 5
+#define V_CIM(x) ((x) << S_CIM)
+#define F_CIM V_CIM(1U)
+
+#define S_MC7_CM 4
+#define V_MC7_CM(x) ((x) << S_MC7_CM)
+#define F_MC7_CM V_MC7_CM(1U)
+
+#define S_MC7_PMTX 3
+#define V_MC7_PMTX(x) ((x) << S_MC7_PMTX)
+#define F_MC7_PMTX V_MC7_PMTX(1U)
+
+#define S_MC7_PMRX 2
+#define V_MC7_PMRX(x) ((x) << S_MC7_PMRX)
+#define F_MC7_PMRX V_MC7_PMRX(1U)
+
+#define S_PCIM0 1
+#define V_PCIM0(x) ((x) << S_PCIM0)
+#define F_PCIM0 V_PCIM0(1U)
+
+#define S_SGE3 0
+#define V_SGE3(x) ((x) << S_SGE3)
+#define F_SGE3 V_SGE3(1U)
+
+#define S_SW 25
+#define V_SW(x) ((x) << S_SW)
+#define F_SW V_SW(1U)
+
+#define A_PL_INT_CAUSE0 0x6e4
+#define A_PL_INT_ENABLE1 0x6e8
+#define A_PL_INT_CAUSE1 0x6ec
+#define A_PL_RST 0x6f0
+
+#define S_CRSTWRM 1
+#define V_CRSTWRM(x) ((x) << S_CRSTWRM)
+#define F_CRSTWRM V_CRSTWRM(1U)
+
+#define S_SWINT1 3
+#define V_SWINT1(x) ((x) << S_SWINT1)
+#define F_SWINT1 V_SWINT1(1U)
+
+#define S_SWINT0 2
+#define V_SWINT0(x) ((x) << S_SWINT0)
+#define F_SWINT0 V_SWINT0(1U)
+
+#define A_PL_REV 0x6f4
+
+#define S_REV 0
+#define M_REV 0xf
+#define V_REV(x) ((x) << S_REV)
+#define G_REV(x) (((x) >> S_REV) & M_REV)
+
+#define A_PL_CLI 0x6f8
+#define A_PL_LCK 0x6fc
+
+#define S_LCK 0
+#define M_LCK 0x3
+#define V_LCK(x) ((x) << S_LCK)
+#define G_LCK(x) (((x) >> S_LCK) & M_LCK)
+
+/* registers for module MC5A */
+#define MC5A_BASE_ADDR 0x700
+
+#define A_MC5_BUF_CONFIG 0x700
+
+#define S_TERM300_240 31
+#define V_TERM300_240(x) ((x) << S_TERM300_240)
+#define F_TERM300_240 V_TERM300_240(1U)
+
+#define S_MC5_TERM150 30
+#define V_MC5_TERM150(x) ((x) << S_MC5_TERM150)
+#define F_MC5_TERM150 V_MC5_TERM150(1U)
+
+#define S_TERM60 29
+#define V_TERM60(x) ((x) << S_TERM60)
+#define F_TERM60 V_TERM60(1U)
+
+#define S_GDDRIII 28
+#define V_GDDRIII(x) ((x) << S_GDDRIII)
+#define F_GDDRIII V_GDDRIII(1U)
+
+#define S_GDDRII 27
+#define V_GDDRII(x) ((x) << S_GDDRII)
+#define F_GDDRII V_GDDRII(1U)
+
+#define S_GDDRI 26
+#define V_GDDRI(x) ((x) << S_GDDRI)
+#define F_GDDRI V_GDDRI(1U)
+
+#define S_READ 25
+#define V_READ(x) ((x) << S_READ)
+#define F_READ V_READ(1U)
+
+#define S_CAL_IMP_UPD 23
+#define V_CAL_IMP_UPD(x) ((x) << S_CAL_IMP_UPD)
+#define F_CAL_IMP_UPD V_CAL_IMP_UPD(1U)
+
+#define S_CAL_BUSY 22
+#define V_CAL_BUSY(x) ((x) << S_CAL_BUSY)
+#define F_CAL_BUSY V_CAL_BUSY(1U)
+
+#define S_CAL_ERROR 21
+#define V_CAL_ERROR(x) ((x) << S_CAL_ERROR)
+#define F_CAL_ERROR V_CAL_ERROR(1U)
+
+#define S_SGL_CAL_EN 20
+#define V_SGL_CAL_EN(x) ((x) << S_SGL_CAL_EN)
+#define F_SGL_CAL_EN V_SGL_CAL_EN(1U)
+
+#define S_IMP_UPD_MODE 19
+#define V_IMP_UPD_MODE(x) ((x) << S_IMP_UPD_MODE)
+#define F_IMP_UPD_MODE V_IMP_UPD_MODE(1U)
+
+#define S_IMP_SEL 18
+#define V_IMP_SEL(x) ((x) << S_IMP_SEL)
+#define F_IMP_SEL V_IMP_SEL(1U)
+
+#define S_MAN_PU 15
+#define M_MAN_PU 0x7
+#define V_MAN_PU(x) ((x) << S_MAN_PU)
+#define G_MAN_PU(x) (((x) >> S_MAN_PU) & M_MAN_PU)
+
+#define S_MAN_PD 12
+#define M_MAN_PD 0x7
+#define V_MAN_PD(x) ((x) << S_MAN_PD)
+#define G_MAN_PD(x) (((x) >> S_MAN_PD) & M_MAN_PD)
+
+#define S_CAL_PU 9
+#define M_CAL_PU 0x7
+#define V_CAL_PU(x) ((x) << S_CAL_PU)
+#define G_CAL_PU(x) (((x) >> S_CAL_PU) & M_CAL_PU)
+
+#define S_CAL_PD 6
+#define M_CAL_PD 0x7
+#define V_CAL_PD(x) ((x) << S_CAL_PD)
+#define G_CAL_PD(x) (((x) >> S_CAL_PD) & M_CAL_PD)
+
+#define S_SET_PU 3
+#define M_SET_PU 0x7
+#define V_SET_PU(x) ((x) << S_SET_PU)
+#define G_SET_PU(x) (((x) >> S_SET_PU) & M_SET_PU)
+
+#define S_SET_PD 0
+#define M_SET_PD 0x7
+#define V_SET_PD(x) ((x) << S_SET_PD)
+#define G_SET_PD(x) (((x) >> S_SET_PD) & M_SET_PD)
+
+#define S_IMP_SET_UPDATE 24
+#define V_IMP_SET_UPDATE(x) ((x) << S_IMP_SET_UPDATE)
+#define F_IMP_SET_UPDATE V_IMP_SET_UPDATE(1U)
+
+#define S_CAL_UPDATE 23
+#define V_CAL_UPDATE(x) ((x) << S_CAL_UPDATE)
+#define F_CAL_UPDATE V_CAL_UPDATE(1U)
+
+#define A_MC5_DB_CONFIG 0x704
+
+#define S_TMCFGWRLOCK 31
+#define V_TMCFGWRLOCK(x) ((x) << S_TMCFGWRLOCK)
+#define F_TMCFGWRLOCK V_TMCFGWRLOCK(1U)
+
+#define S_TMTYPEHI 30
+#define V_TMTYPEHI(x) ((x) << S_TMTYPEHI)
+#define F_TMTYPEHI V_TMTYPEHI(1U)
+
+#define S_TMPARTSIZE 28
+#define M_TMPARTSIZE 0x3
+#define V_TMPARTSIZE(x) ((x) << S_TMPARTSIZE)
+#define G_TMPARTSIZE(x) (((x) >> S_TMPARTSIZE) & M_TMPARTSIZE)
+
+#define S_TMTYPE 26
+#define M_TMTYPE 0x3
+#define V_TMTYPE(x) ((x) << S_TMTYPE)
+#define G_TMTYPE(x) (((x) >> S_TMTYPE) & M_TMTYPE)
+
+#define S_TMPARTCOUNT 24
+#define M_TMPARTCOUNT 0x3
+#define V_TMPARTCOUNT(x) ((x) << S_TMPARTCOUNT)
+#define G_TMPARTCOUNT(x) (((x) >> S_TMPARTCOUNT) & M_TMPARTCOUNT)
+
+#define S_NLIP 18
+#define M_NLIP 0x3f
+#define V_NLIP(x) ((x) << S_NLIP)
+#define G_NLIP(x) (((x) >> S_NLIP) & M_NLIP)
+
+#define S_COMPEN 17
+#define V_COMPEN(x) ((x) << S_COMPEN)
+#define F_COMPEN V_COMPEN(1U)
+
+#define S_BUILD 16
+#define V_BUILD(x) ((x) << S_BUILD)
+#define F_BUILD V_BUILD(1U)
+
+#define S_TM_IO_PDOWN 9
+#define V_TM_IO_PDOWN(x) ((x) << S_TM_IO_PDOWN)
+#define F_TM_IO_PDOWN V_TM_IO_PDOWN(1U)
+
+#define S_SYNMODE 7
+#define M_SYNMODE 0x3
+#define V_SYNMODE(x) ((x) << S_SYNMODE)
+#define G_SYNMODE(x) (((x) >> S_SYNMODE) & M_SYNMODE)
+
+#define S_PRTYEN 6
+#define V_PRTYEN(x) ((x) << S_PRTYEN)
+#define F_PRTYEN V_PRTYEN(1U)
+
+#define S_MBUSEN 5
+#define V_MBUSEN(x) ((x) << S_MBUSEN)
+#define F_MBUSEN V_MBUSEN(1U)
+
+#define S_DBGIEN 4
+#define V_DBGIEN(x) ((x) << S_DBGIEN)
+#define F_DBGIEN V_DBGIEN(1U)
+
+#define S_TMRDY 2
+#define V_TMRDY(x) ((x) << S_TMRDY)
+#define F_TMRDY V_TMRDY(1U)
+
+#define S_TMRST 1
+#define V_TMRST(x) ((x) << S_TMRST)
+#define F_TMRST V_TMRST(1U)
+
+#define S_TMMODE 0
+#define V_TMMODE(x) ((x) << S_TMMODE)
+#define F_TMMODE V_TMMODE(1U)
+
+#define S_FILTEREN 11
+#define V_FILTEREN(x) ((x) << S_FILTEREN)
+#define F_FILTEREN V_FILTEREN(1U)
+
+#define S_CLIPUPDATE 10
+#define V_CLIPUPDATE(x) ((x) << S_CLIPUPDATE)
+#define F_CLIPUPDATE V_CLIPUPDATE(1U)
+
+#define S_TCMCFGOVR 3
+#define V_TCMCFGOVR(x) ((x) << S_TCMCFGOVR)
+#define F_TCMCFGOVR V_TCMCFGOVR(1U)
+
+#define A_MC5_MISC 0x708
+
+#define S_LIP_CMP_UNAVAILABLE 0
+#define M_LIP_CMP_UNAVAILABLE 0xf
+#define V_LIP_CMP_UNAVAILABLE(x) ((x) << S_LIP_CMP_UNAVAILABLE)
+#define G_LIP_CMP_UNAVAILABLE(x) (((x) >> S_LIP_CMP_UNAVAILABLE) & M_LIP_CMP_UNAVAILABLE)
+
+#define A_MC5_DB_ROUTING_TABLE_INDEX 0x70c
+
+#define S_RTINDX 0
+#define M_RTINDX 0x3fffff
+#define V_RTINDX(x) ((x) << S_RTINDX)
+#define G_RTINDX(x) (((x) >> S_RTINDX) & M_RTINDX)
+
+#define A_MC5_DB_FILTER_TABLE 0x710
+#define A_MC5_DB_SERVER_INDEX 0x714
+
+#define S_SRINDX 0
+#define M_SRINDX 0x3fffff
+#define V_SRINDX(x) ((x) << S_SRINDX)
+#define G_SRINDX(x) (((x) >> S_SRINDX) & M_SRINDX)
+
+#define A_MC5_DB_LIP_RAM_ADDR 0x718
+
+#define S_RAMWR 8
+#define V_RAMWR(x) ((x) << S_RAMWR)
+#define F_RAMWR V_RAMWR(1U)
+
+#define S_RAMADDR 0
+#define M_RAMADDR 0x3f
+#define V_RAMADDR(x) ((x) << S_RAMADDR)
+#define G_RAMADDR(x) (((x) >> S_RAMADDR) & M_RAMADDR)
+
+#define A_MC5_DB_LIP_RAM_DATA 0x71c
+#define A_MC5_DB_RSP_LATENCY 0x720
+
+#define S_RDLAT 16
+#define M_RDLAT 0x1f
+#define V_RDLAT(x) ((x) << S_RDLAT)
+#define G_RDLAT(x) (((x) >> S_RDLAT) & M_RDLAT)
+
+#define S_LRNLAT 8
+#define M_LRNLAT 0x1f
+#define V_LRNLAT(x) ((x) << S_LRNLAT)
+#define G_LRNLAT(x) (((x) >> S_LRNLAT) & M_LRNLAT)
+
+#define S_SRCHLAT 0
+#define M_SRCHLAT 0x1f
+#define V_SRCHLAT(x) ((x) << S_SRCHLAT)
+#define G_SRCHLAT(x) (((x) >> S_SRCHLAT) & M_SRCHLAT)
+
+#define A_MC5_DB_PARITY_LATENCY 0x724
+
+#define S_PARLAT 0
+#define M_PARLAT 0xf
+#define V_PARLAT(x) ((x) << S_PARLAT)
+#define G_PARLAT(x) (((x) >> S_PARLAT) & M_PARLAT)
+
+#define A_MC5_DB_WR_LRN_VERIFY 0x728
+
+#define S_VWVEREN 2
+#define V_VWVEREN(x) ((x) << S_VWVEREN)
+#define F_VWVEREN V_VWVEREN(1U)
+
+#define S_LRNVEREN 1
+#define V_LRNVEREN(x) ((x) << S_LRNVEREN)
+#define F_LRNVEREN V_LRNVEREN(1U)
+
+#define S_POVEREN 0
+#define V_POVEREN(x) ((x) << S_POVEREN)
+#define F_POVEREN V_POVEREN(1U)
+
+#define A_MC5_DB_PART_ID_INDEX 0x72c
+
+#define S_IDINDEX 0
+#define M_IDINDEX 0xf
+#define V_IDINDEX(x) ((x) << S_IDINDEX)
+#define G_IDINDEX(x) (((x) >> S_IDINDEX) & M_IDINDEX)
+
+#define A_MC5_DB_RESET_MAX 0x730
+
+#define S_RSTMAX 0
+#define M_RSTMAX 0xf
+#define V_RSTMAX(x) ((x) << S_RSTMAX)
+#define G_RSTMAX(x) (((x) >> S_RSTMAX) & M_RSTMAX)
+
+#define A_MC5_DB_ACT_CNT 0x734
+
+#define S_ACTCNT 0
+#define M_ACTCNT 0xfffff
+#define V_ACTCNT(x) ((x) << S_ACTCNT)
+#define G_ACTCNT(x) (((x) >> S_ACTCNT) & M_ACTCNT)
+
+#define A_MC5_DB_CLIP_MAP 0x738
+
+#define S_CLIPMAPOP 31
+#define V_CLIPMAPOP(x) ((x) << S_CLIPMAPOP)
+#define F_CLIPMAPOP V_CLIPMAPOP(1U)
+
+#define S_CLIPMAPVAL 16
+#define M_CLIPMAPVAL 0x3f
+#define V_CLIPMAPVAL(x) ((x) << S_CLIPMAPVAL)
+#define G_CLIPMAPVAL(x) (((x) >> S_CLIPMAPVAL) & M_CLIPMAPVAL)
+
+#define S_CLIPMAPADDR 0
+#define M_CLIPMAPADDR 0x3f
+#define V_CLIPMAPADDR(x) ((x) << S_CLIPMAPADDR)
+#define G_CLIPMAPADDR(x) (((x) >> S_CLIPMAPADDR) & M_CLIPMAPADDR)
+
+#define A_MC5_DB_INT_ENABLE 0x740
+
+#define S_MSGSEL 28
+#define M_MSGSEL 0xf
+#define V_MSGSEL(x) ((x) << S_MSGSEL)
+#define G_MSGSEL(x) (((x) >> S_MSGSEL) & M_MSGSEL)
+
+#define S_DELACTEMPTY 18
+#define V_DELACTEMPTY(x) ((x) << S_DELACTEMPTY)
+#define F_DELACTEMPTY V_DELACTEMPTY(1U)
+
+#define S_DISPQPARERR 17
+#define V_DISPQPARERR(x) ((x) << S_DISPQPARERR)
+#define F_DISPQPARERR V_DISPQPARERR(1U)
+
+#define S_REQQPARERR 16
+#define V_REQQPARERR(x) ((x) << S_REQQPARERR)
+#define F_REQQPARERR V_REQQPARERR(1U)
+
+#define S_UNKNOWNCMD 15
+#define V_UNKNOWNCMD(x) ((x) << S_UNKNOWNCMD)
+#define F_UNKNOWNCMD V_UNKNOWNCMD(1U)
+
+#define S_SYNCOOKIEOFF 11
+#define V_SYNCOOKIEOFF(x) ((x) << S_SYNCOOKIEOFF)
+#define F_SYNCOOKIEOFF V_SYNCOOKIEOFF(1U)
+
+#define S_SYNCOOKIEBAD 10
+#define V_SYNCOOKIEBAD(x) ((x) << S_SYNCOOKIEBAD)
+#define F_SYNCOOKIEBAD V_SYNCOOKIEBAD(1U)
+
+#define S_SYNCOOKIE 9
+#define V_SYNCOOKIE(x) ((x) << S_SYNCOOKIE)
+#define F_SYNCOOKIE V_SYNCOOKIE(1U)
+
+#define S_NFASRCHFAIL 8
+#define V_NFASRCHFAIL(x) ((x) << S_NFASRCHFAIL)
+#define F_NFASRCHFAIL V_NFASRCHFAIL(1U)
+
+#define S_ACTRGNFULL 7
+#define V_ACTRGNFULL(x) ((x) << S_ACTRGNFULL)
+#define F_ACTRGNFULL V_ACTRGNFULL(1U)
+
+#define S_PARITYERR 6
+#define V_PARITYERR(x) ((x) << S_PARITYERR)
+#define F_PARITYERR V_PARITYERR(1U)
+
+#define S_LIPMISS 5
+#define V_LIPMISS(x) ((x) << S_LIPMISS)
+#define F_LIPMISS V_LIPMISS(1U)
+
+#define S_LIP0 4
+#define V_LIP0(x) ((x) << S_LIP0)
+#define F_LIP0 V_LIP0(1U)
+
+#define S_MISS 3
+#define V_MISS(x) ((x) << S_MISS)
+#define F_MISS V_MISS(1U)
+
+#define S_ROUTINGHIT 2
+#define V_ROUTINGHIT(x) ((x) << S_ROUTINGHIT)
+#define F_ROUTINGHIT V_ROUTINGHIT(1U)
+
+#define S_ACTIVEHIT 1
+#define V_ACTIVEHIT(x) ((x) << S_ACTIVEHIT)
+#define F_ACTIVEHIT V_ACTIVEHIT(1U)
+
+#define S_ACTIVEOUTHIT 0
+#define V_ACTIVEOUTHIT(x) ((x) << S_ACTIVEOUTHIT)
+#define F_ACTIVEOUTHIT V_ACTIVEOUTHIT(1U)
+
+#define A_MC5_DB_INT_CAUSE 0x744
+#define A_MC5_DB_INT_TID 0x748
+
+#define S_INTTID 0
+#define M_INTTID 0xfffff
+#define V_INTTID(x) ((x) << S_INTTID)
+#define G_INTTID(x) (((x) >> S_INTTID) & M_INTTID)
+
+#define A_MC5_DB_INT_PTID 0x74c
+
+#define S_INTPTID 0
+#define M_INTPTID 0xfffff
+#define V_INTPTID(x) ((x) << S_INTPTID)
+#define G_INTPTID(x) (((x) >> S_INTPTID) & M_INTPTID)
+
+#define A_MC5_DB_DBGI_CONFIG 0x774
+
+#define S_WRREQSIZE 22
+#define M_WRREQSIZE 0x3ff
+#define V_WRREQSIZE(x) ((x) << S_WRREQSIZE)
+#define G_WRREQSIZE(x) (((x) >> S_WRREQSIZE) & M_WRREQSIZE)
+
+#define S_SADRSEL 4
+#define V_SADRSEL(x) ((x) << S_SADRSEL)
+#define F_SADRSEL V_SADRSEL(1U)
+
+#define S_CMDMODE 0
+#define M_CMDMODE 0x7
+#define V_CMDMODE(x) ((x) << S_CMDMODE)
+#define G_CMDMODE(x) (((x) >> S_CMDMODE) & M_CMDMODE)
+
+#define A_MC5_DB_DBGI_REQ_CMD 0x778
+
+#define S_MBUSCMD 0
+#define M_MBUSCMD 0xf
+#define V_MBUSCMD(x) ((x) << S_MBUSCMD)
+#define G_MBUSCMD(x) (((x) >> S_MBUSCMD) & M_MBUSCMD)
+
+#define S_IDTCMDHI 11
+#define M_IDTCMDHI 0x7
+#define V_IDTCMDHI(x) ((x) << S_IDTCMDHI)
+#define G_IDTCMDHI(x) (((x) >> S_IDTCMDHI) & M_IDTCMDHI)
+
+#define S_IDTCMDLO 0
+#define M_IDTCMDLO 0xf
+#define V_IDTCMDLO(x) ((x) << S_IDTCMDLO)
+#define G_IDTCMDLO(x) (((x) >> S_IDTCMDLO) & M_IDTCMDLO)
+
+#define S_IDTCMD 0
+#define M_IDTCMD 0xfffff
+#define V_IDTCMD(x) ((x) << S_IDTCMD)
+#define G_IDTCMD(x) (((x) >> S_IDTCMD) & M_IDTCMD)
+
+#define S_LCMDB 16
+#define M_LCMDB 0x7ff
+#define V_LCMDB(x) ((x) << S_LCMDB)
+#define G_LCMDB(x) (((x) >> S_LCMDB) & M_LCMDB)
+
+#define S_LCMDA 0
+#define M_LCMDA 0x7ff
+#define V_LCMDA(x) ((x) << S_LCMDA)
+#define G_LCMDA(x) (((x) >> S_LCMDA) & M_LCMDA)
+
+#define A_MC5_DB_DBGI_REQ_ADDR0 0x77c
+#define A_MC5_DB_DBGI_REQ_ADDR1 0x780
+#define A_MC5_DB_DBGI_REQ_ADDR2 0x784
+
+#define S_DBGIREQADRHI 0
+#define M_DBGIREQADRHI 0xff
+#define V_DBGIREQADRHI(x) ((x) << S_DBGIREQADRHI)
+#define G_DBGIREQADRHI(x) (((x) >> S_DBGIREQADRHI) & M_DBGIREQADRHI)
+
+#define A_MC5_DB_DBGI_REQ_DATA0 0x788
+#define A_MC5_DB_DBGI_REQ_DATA1 0x78c
+#define A_MC5_DB_DBGI_REQ_DATA2 0x790
+#define A_MC5_DB_DBGI_REQ_DATA3 0x794
+#define A_MC5_DB_DBGI_REQ_DATA4 0x798
+
+#define S_DBGIREQDATA4 0
+#define M_DBGIREQDATA4 0xffff
+#define V_DBGIREQDATA4(x) ((x) << S_DBGIREQDATA4)
+#define G_DBGIREQDATA4(x) (((x) >> S_DBGIREQDATA4) & M_DBGIREQDATA4)
+
+#define A_MC5_DB_DBGI_REQ_MASK0 0x79c
+#define A_MC5_DB_DBGI_REQ_MASK1 0x7a0
+#define A_MC5_DB_DBGI_REQ_MASK2 0x7a4
+#define A_MC5_DB_DBGI_REQ_MASK3 0x7a8
+#define A_MC5_DB_DBGI_REQ_MASK4 0x7ac
+
+#define S_DBGIREQMSK4 0
+#define M_DBGIREQMSK4 0xffff
+#define V_DBGIREQMSK4(x) ((x) << S_DBGIREQMSK4)
+#define G_DBGIREQMSK4(x) (((x) >> S_DBGIREQMSK4) & M_DBGIREQMSK4)
+
+#define A_MC5_DB_DBGI_RSP_STATUS 0x7b0
+
+#define S_DBGIRSPMSG 8
+#define M_DBGIRSPMSG 0xf
+#define V_DBGIRSPMSG(x) ((x) << S_DBGIRSPMSG)
+#define G_DBGIRSPMSG(x) (((x) >> S_DBGIRSPMSG) & M_DBGIRSPMSG)
+
+#define S_DBGIRSPMSGVLD 2
+#define V_DBGIRSPMSGVLD(x) ((x) << S_DBGIRSPMSGVLD)
+#define F_DBGIRSPMSGVLD V_DBGIRSPMSGVLD(1U)
+
+#define S_DBGIRSPHIT 1
+#define V_DBGIRSPHIT(x) ((x) << S_DBGIRSPHIT)
+#define F_DBGIRSPHIT V_DBGIRSPHIT(1U)
+
+#define S_DBGIRSPVALID 0
+#define V_DBGIRSPVALID(x) ((x) << S_DBGIRSPVALID)
+#define F_DBGIRSPVALID V_DBGIRSPVALID(1U)
+
+#define A_MC5_DB_DBGI_RSP_DATA0 0x7b4
+#define A_MC5_DB_DBGI_RSP_DATA1 0x7b8
+#define A_MC5_DB_DBGI_RSP_DATA2 0x7bc
+#define A_MC5_DB_DBGI_RSP_DATA3 0x7c0
+#define A_MC5_DB_DBGI_RSP_DATA4 0x7c4
+
+#define S_DBGIRSPDATA3 0
+#define M_DBGIRSPDATA3 0xffff
+#define V_DBGIRSPDATA3(x) ((x) << S_DBGIRSPDATA3)
+#define G_DBGIRSPDATA3(x) (((x) >> S_DBGIRSPDATA3) & M_DBGIRSPDATA3)
+
+#define A_MC5_DB_DBGI_RSP_LAST_CMD 0x7c8
+
+#define S_LASTCMDB 16
+#define M_LASTCMDB 0x7ff
+#define V_LASTCMDB(x) ((x) << S_LASTCMDB)
+#define G_LASTCMDB(x) (((x) >> S_LASTCMDB) & M_LASTCMDB)
+
+#define S_LASTCMDA 0
+#define M_LASTCMDA 0x7ff
+#define V_LASTCMDA(x) ((x) << S_LASTCMDA)
+#define G_LASTCMDA(x) (((x) >> S_LASTCMDA) & M_LASTCMDA)
+
+#define A_MC5_DB_POPEN_DATA_WR_CMD 0x7cc
+
+#define S_PO_DWR 0
+#define M_PO_DWR 0xfffff
+#define V_PO_DWR(x) ((x) << S_PO_DWR)
+#define G_PO_DWR(x) (((x) >> S_PO_DWR) & M_PO_DWR)
+
+#define A_MC5_DB_POPEN_MASK_WR_CMD 0x7d0
+
+#define S_PO_MWR 0
+#define M_PO_MWR 0xfffff
+#define V_PO_MWR(x) ((x) << S_PO_MWR)
+#define G_PO_MWR(x) (((x) >> S_PO_MWR) & M_PO_MWR)
+
+#define A_MC5_DB_AOPEN_SRCH_CMD 0x7d4
+
+#define S_AO_SRCH 0
+#define M_AO_SRCH 0xfffff
+#define V_AO_SRCH(x) ((x) << S_AO_SRCH)
+#define G_AO_SRCH(x) (((x) >> S_AO_SRCH) & M_AO_SRCH)
+
+#define A_MC5_DB_AOPEN_LRN_CMD 0x7d8
+
+#define S_AO_LRN 0
+#define M_AO_LRN 0xfffff
+#define V_AO_LRN(x) ((x) << S_AO_LRN)
+#define G_AO_LRN(x) (((x) >> S_AO_LRN) & M_AO_LRN)
+
+#define A_MC5_DB_SYN_SRCH_CMD 0x7dc
+
+#define S_SYN_SRCH 0
+#define M_SYN_SRCH 0xfffff
+#define V_SYN_SRCH(x) ((x) << S_SYN_SRCH)
+#define G_SYN_SRCH(x) (((x) >> S_SYN_SRCH) & M_SYN_SRCH)
+
+#define A_MC5_DB_SYN_LRN_CMD 0x7e0
+
+#define S_SYN_LRN 0
+#define M_SYN_LRN 0xfffff
+#define V_SYN_LRN(x) ((x) << S_SYN_LRN)
+#define G_SYN_LRN(x) (((x) >> S_SYN_LRN) & M_SYN_LRN)
+
+#define A_MC5_DB_ACK_SRCH_CMD 0x7e4
+
+#define S_ACK_SRCH 0
+#define M_ACK_SRCH 0xfffff
+#define V_ACK_SRCH(x) ((x) << S_ACK_SRCH)
+#define G_ACK_SRCH(x) (((x) >> S_ACK_SRCH) & M_ACK_SRCH)
+
+#define A_MC5_DB_ACK_LRN_CMD 0x7e8
+
+#define S_ACK_LRN 0
+#define M_ACK_LRN 0xfffff
+#define V_ACK_LRN(x) ((x) << S_ACK_LRN)
+#define G_ACK_LRN(x) (((x) >> S_ACK_LRN) & M_ACK_LRN)
+
+#define A_MC5_DB_ILOOKUP_CMD 0x7ec
+
+#define S_I_SRCH 0
+#define M_I_SRCH 0xfffff
+#define V_I_SRCH(x) ((x) << S_I_SRCH)
+#define G_I_SRCH(x) (((x) >> S_I_SRCH) & M_I_SRCH)
+
+#define A_MC5_DB_ELOOKUP_CMD 0x7f0
+
+#define S_E_SRCH 0
+#define M_E_SRCH 0xfffff
+#define V_E_SRCH(x) ((x) << S_E_SRCH)
+#define G_E_SRCH(x) (((x) >> S_E_SRCH) & M_E_SRCH)
+
+#define A_MC5_DB_DATA_WRITE_CMD 0x7f4
+
+#define S_WRITE 0
+#define M_WRITE 0xfffff
+#define V_WRITE(x) ((x) << S_WRITE)
+#define G_WRITE(x) (((x) >> S_WRITE) & M_WRITE)
+
+#define A_MC5_DB_DATA_READ_CMD 0x7f8
+
+#define S_READCMD 0
+#define M_READCMD 0xfffff
+#define V_READCMD(x) ((x) << S_READCMD)
+#define G_READCMD(x) (((x) >> S_READCMD) & M_READCMD)
+
+#define A_MC5_DB_MASK_WRITE_CMD 0x7fc
+
+#define S_MASKWR 0
+#define M_MASKWR 0xffff
+#define V_MASKWR(x) ((x) << S_MASKWR)
+#define G_MASKWR(x) (((x) >> S_MASKWR) & M_MASKWR)
+
+/* registers for module XGMAC0_0 */
+#define XGMAC0_0_BASE_ADDR 0x800
+
+#define A_XGM_TX_CTRL 0x800
+
+#define S_SENDPAUSE 2
+#define V_SENDPAUSE(x) ((x) << S_SENDPAUSE)
+#define F_SENDPAUSE V_SENDPAUSE(1U)
+
+#define S_SENDZEROPAUSE 1
+#define V_SENDZEROPAUSE(x) ((x) << S_SENDZEROPAUSE)
+#define F_SENDZEROPAUSE V_SENDZEROPAUSE(1U)
+
+#define S_TXEN 0
+#define V_TXEN(x) ((x) << S_TXEN)
+#define F_TXEN V_TXEN(1U)
+
+#define A_XGM_TX_CFG 0x804
+
+#define S_CFGCLKSPEED 2
+#define M_CFGCLKSPEED 0x7
+#define V_CFGCLKSPEED(x) ((x) << S_CFGCLKSPEED)
+#define G_CFGCLKSPEED(x) (((x) >> S_CFGCLKSPEED) & M_CFGCLKSPEED)
+
+#define S_STRETCHMODE 1
+#define V_STRETCHMODE(x) ((x) << S_STRETCHMODE)
+#define F_STRETCHMODE V_STRETCHMODE(1U)
+
+#define S_TXPAUSEEN 0
+#define V_TXPAUSEEN(x) ((x) << S_TXPAUSEEN)
+#define F_TXPAUSEEN V_TXPAUSEEN(1U)
+
+#define A_XGM_TX_PAUSE_QUANTA 0x808
+
+#define S_TXPAUSEQUANTA 0
+#define M_TXPAUSEQUANTA 0xffff
+#define V_TXPAUSEQUANTA(x) ((x) << S_TXPAUSEQUANTA)
+#define G_TXPAUSEQUANTA(x) (((x) >> S_TXPAUSEQUANTA) & M_TXPAUSEQUANTA)
+
+#define A_XGM_RX_CTRL 0x80c
+
+#define S_RXEN 0
+#define V_RXEN(x) ((x) << S_RXEN)
+#define F_RXEN V_RXEN(1U)
+
+#define A_XGM_RX_CFG 0x810
+
+#define S_CON802_3PREAMBLE 12
+#define V_CON802_3PREAMBLE(x) ((x) << S_CON802_3PREAMBLE)
+#define F_CON802_3PREAMBLE V_CON802_3PREAMBLE(1U)
+
+#define S_ENNON802_3PREAMBLE 11
+#define V_ENNON802_3PREAMBLE(x) ((x) << S_ENNON802_3PREAMBLE)
+#define F_ENNON802_3PREAMBLE V_ENNON802_3PREAMBLE(1U)
+
+#define S_COPYPREAMBLE 10
+#define V_COPYPREAMBLE(x) ((x) << S_COPYPREAMBLE)
+#define F_COPYPREAMBLE V_COPYPREAMBLE(1U)
+
+#define S_DISPAUSEFRAMES 9
+#define V_DISPAUSEFRAMES(x) ((x) << S_DISPAUSEFRAMES)
+#define F_DISPAUSEFRAMES V_DISPAUSEFRAMES(1U)
+
+#define S_EN1536BFRAMES 8
+#define V_EN1536BFRAMES(x) ((x) << S_EN1536BFRAMES)
+#define F_EN1536BFRAMES V_EN1536BFRAMES(1U)
+
+#define S_ENJUMBO 7
+#define V_ENJUMBO(x) ((x) << S_ENJUMBO)
+#define F_ENJUMBO V_ENJUMBO(1U)
+
+#define S_RMFCS 6
+#define V_RMFCS(x) ((x) << S_RMFCS)
+#define F_RMFCS V_RMFCS(1U)
+
+#define S_DISNONVLAN 5
+#define V_DISNONVLAN(x) ((x) << S_DISNONVLAN)
+#define F_DISNONVLAN V_DISNONVLAN(1U)
+
+#define S_ENEXTMATCH 4
+#define V_ENEXTMATCH(x) ((x) << S_ENEXTMATCH)
+#define F_ENEXTMATCH V_ENEXTMATCH(1U)
+
+#define S_ENHASHUCAST 3
+#define V_ENHASHUCAST(x) ((x) << S_ENHASHUCAST)
+#define F_ENHASHUCAST V_ENHASHUCAST(1U)
+
+#define S_ENHASHMCAST 2
+#define V_ENHASHMCAST(x) ((x) << S_ENHASHMCAST)
+#define F_ENHASHMCAST V_ENHASHMCAST(1U)
+
+#define S_DISBCAST 1
+#define V_DISBCAST(x) ((x) << S_DISBCAST)
+#define F_DISBCAST V_DISBCAST(1U)
+
+#define S_COPYALLFRAMES 0
+#define V_COPYALLFRAMES(x) ((x) << S_COPYALLFRAMES)
+#define F_COPYALLFRAMES V_COPYALLFRAMES(1U)
+
+#define A_XGM_RX_HASH_LOW 0x814
+#define A_XGM_RX_HASH_HIGH 0x818
+#define A_XGM_RX_EXACT_MATCH_LOW_1 0x81c
+#define A_XGM_RX_EXACT_MATCH_HIGH_1 0x820
+
+#define S_ADDRESS_HIGH 0
+#define M_ADDRESS_HIGH 0xffff
+#define V_ADDRESS_HIGH(x) ((x) << S_ADDRESS_HIGH)
+#define G_ADDRESS_HIGH(x) (((x) >> S_ADDRESS_HIGH) & M_ADDRESS_HIGH)
+
+#define A_XGM_RX_EXACT_MATCH_LOW_2 0x824
+#define A_XGM_RX_EXACT_MATCH_HIGH_2 0x828
+#define A_XGM_RX_EXACT_MATCH_LOW_3 0x82c
+#define A_XGM_RX_EXACT_MATCH_HIGH_3 0x830
+#define A_XGM_RX_EXACT_MATCH_LOW_4 0x834
+#define A_XGM_RX_EXACT_MATCH_HIGH_4 0x838
+#define A_XGM_RX_EXACT_MATCH_LOW_5 0x83c
+#define A_XGM_RX_EXACT_MATCH_HIGH_5 0x840
+#define A_XGM_RX_EXACT_MATCH_LOW_6 0x844
+#define A_XGM_RX_EXACT_MATCH_HIGH_6 0x848
+#define A_XGM_RX_EXACT_MATCH_LOW_7 0x84c
+#define A_XGM_RX_EXACT_MATCH_HIGH_7 0x850
+#define A_XGM_RX_EXACT_MATCH_LOW_8 0x854
+#define A_XGM_RX_EXACT_MATCH_HIGH_8 0x858
+#define A_XGM_RX_TYPE_MATCH_1 0x85c
+
+#define S_ENTYPEMATCH 31
+#define V_ENTYPEMATCH(x) ((x) << S_ENTYPEMATCH)
+#define F_ENTYPEMATCH V_ENTYPEMATCH(1U)
+
+#define S_TYPE 0
+#define M_TYPE 0xffff
+#define V_TYPE(x) ((x) << S_TYPE)
+#define G_TYPE(x) (((x) >> S_TYPE) & M_TYPE)
+
+#define A_XGM_RX_TYPE_MATCH_2 0x860
+#define A_XGM_RX_TYPE_MATCH_3 0x864
+#define A_XGM_RX_TYPE_MATCH_4 0x868
+#define A_XGM_INT_STATUS 0x86c
+
+#define S_XGMIIEXTINT 10
+#define V_XGMIIEXTINT(x) ((x) << S_XGMIIEXTINT)
+#define F_XGMIIEXTINT V_XGMIIEXTINT(1U)
+
+#define S_LINKFAULTCHANGE 9
+#define V_LINKFAULTCHANGE(x) ((x) << S_LINKFAULTCHANGE)
+#define F_LINKFAULTCHANGE V_LINKFAULTCHANGE(1U)
+
+#define S_PHYFRAMECOMPLETE 8
+#define V_PHYFRAMECOMPLETE(x) ((x) << S_PHYFRAMECOMPLETE)
+#define F_PHYFRAMECOMPLETE V_PHYFRAMECOMPLETE(1U)
+
+#define S_PAUSEFRAMETXMT 7
+#define V_PAUSEFRAMETXMT(x) ((x) << S_PAUSEFRAMETXMT)
+#define F_PAUSEFRAMETXMT V_PAUSEFRAMETXMT(1U)
+
+#define S_PAUSECNTRTIMEOUT 6
+#define V_PAUSECNTRTIMEOUT(x) ((x) << S_PAUSECNTRTIMEOUT)
+#define F_PAUSECNTRTIMEOUT V_PAUSECNTRTIMEOUT(1U)
+
+#define S_NON0PAUSERCVD 5
+#define V_NON0PAUSERCVD(x) ((x) << S_NON0PAUSERCVD)
+#define F_NON0PAUSERCVD V_NON0PAUSERCVD(1U)
+
+#define S_STATOFLOW 4
+#define V_STATOFLOW(x) ((x) << S_STATOFLOW)
+#define F_STATOFLOW V_STATOFLOW(1U)
+
+#define S_TXERRFIFO 3
+#define V_TXERRFIFO(x) ((x) << S_TXERRFIFO)
+#define F_TXERRFIFO V_TXERRFIFO(1U)
+
+#define S_TXUFLOW 2
+#define V_TXUFLOW(x) ((x) << S_TXUFLOW)
+#define F_TXUFLOW V_TXUFLOW(1U)
+
+#define S_FRAMETXMT 1
+#define V_FRAMETXMT(x) ((x) << S_FRAMETXMT)
+#define F_FRAMETXMT V_FRAMETXMT(1U)
+
+#define S_FRAMERCVD 0
+#define V_FRAMERCVD(x) ((x) << S_FRAMERCVD)
+#define F_FRAMERCVD V_FRAMERCVD(1U)
+
+#define A_XGM_XGM_INT_MASK 0x870
+#define A_XGM_XGM_INT_ENABLE 0x874
+#define A_XGM_XGM_INT_DISABLE 0x878
+#define A_XGM_TX_PAUSE_TIMER 0x87c
+
+#define S_CURPAUSETIMER 0
+#define M_CURPAUSETIMER 0xffff
+#define V_CURPAUSETIMER(x) ((x) << S_CURPAUSETIMER)
+#define G_CURPAUSETIMER(x) (((x) >> S_CURPAUSETIMER) & M_CURPAUSETIMER)
+
+#define A_XGM_STAT_CTRL 0x880
+
+#define S_READSNPSHOT 4
+#define V_READSNPSHOT(x) ((x) << S_READSNPSHOT)
+#define F_READSNPSHOT V_READSNPSHOT(1U)
+
+#define S_TAKESNPSHOT 3
+#define V_TAKESNPSHOT(x) ((x) << S_TAKESNPSHOT)
+#define F_TAKESNPSHOT V_TAKESNPSHOT(1U)
+
+#define S_CLRSTATS 2
+#define V_CLRSTATS(x) ((x) << S_CLRSTATS)
+#define F_CLRSTATS V_CLRSTATS(1U)
+
+#define S_INCRSTATS 1
+#define V_INCRSTATS(x) ((x) << S_INCRSTATS)
+#define F_INCRSTATS V_INCRSTATS(1U)
+
+#define S_ENTESTMODEWR 0
+#define V_ENTESTMODEWR(x) ((x) << S_ENTESTMODEWR)
+#define F_ENTESTMODEWR V_ENTESTMODEWR(1U)
+
+#define A_XGM_RXFIFO_CFG 0x884
+
+#define S_RXFIFOPAUSEHWM 17
+#define M_RXFIFOPAUSEHWM 0xfff
+#define V_RXFIFOPAUSEHWM(x) ((x) << S_RXFIFOPAUSEHWM)
+#define G_RXFIFOPAUSEHWM(x) (((x) >> S_RXFIFOPAUSEHWM) & M_RXFIFOPAUSEHWM)
+
+#define S_RXFIFOPAUSELWM 5
+#define M_RXFIFOPAUSELWM 0xfff
+#define V_RXFIFOPAUSELWM(x) ((x) << S_RXFIFOPAUSELWM)
+#define G_RXFIFOPAUSELWM(x) (((x) >> S_RXFIFOPAUSELWM) & M_RXFIFOPAUSELWM)
+
+#define S_FORCEDPAUSE 4
+#define V_FORCEDPAUSE(x) ((x) << S_FORCEDPAUSE)
+#define F_FORCEDPAUSE V_FORCEDPAUSE(1U)
+
+#define S_EXTERNLOOPBACK 3
+#define V_EXTERNLOOPBACK(x) ((x) << S_EXTERNLOOPBACK)
+#define F_EXTERNLOOPBACK V_EXTERNLOOPBACK(1U)
+
+#define S_RXBYTESWAP 2
+#define V_RXBYTESWAP(x) ((x) << S_RXBYTESWAP)
+#define F_RXBYTESWAP V_RXBYTESWAP(1U)
+
+#define S_RXSTRFRWRD 1
+#define V_RXSTRFRWRD(x) ((x) << S_RXSTRFRWRD)
+#define F_RXSTRFRWRD V_RXSTRFRWRD(1U)
+
+#define S_DISERRFRAMES 0
+#define V_DISERRFRAMES(x) ((x) << S_DISERRFRAMES)
+#define F_DISERRFRAMES V_DISERRFRAMES(1U)
+
+#define A_XGM_TXFIFO_CFG 0x888
+
+#define S_TXIPG 13
+#define M_TXIPG 0xff
+#define V_TXIPG(x) ((x) << S_TXIPG)
+#define G_TXIPG(x) (((x) >> S_TXIPG) & M_TXIPG)
+
+#define S_TXFIFOTHRESH 4
+#define M_TXFIFOTHRESH 0x1ff
+#define V_TXFIFOTHRESH(x) ((x) << S_TXFIFOTHRESH)
+#define G_TXFIFOTHRESH(x) (((x) >> S_TXFIFOTHRESH) & M_TXFIFOTHRESH)
+
+#define S_INTERNLOOPBACK 3
+#define V_INTERNLOOPBACK(x) ((x) << S_INTERNLOOPBACK)
+#define F_INTERNLOOPBACK V_INTERNLOOPBACK(1U)
+
+#define S_TXBYTESWAP 2
+#define V_TXBYTESWAP(x) ((x) << S_TXBYTESWAP)
+#define F_TXBYTESWAP V_TXBYTESWAP(1U)
+
+#define S_DISCRC 1
+#define V_DISCRC(x) ((x) << S_DISCRC)
+#define F_DISCRC V_DISCRC(1U)
+
+#define S_DISPREAMBLE 0
+#define V_DISPREAMBLE(x) ((x) << S_DISPREAMBLE)
+#define F_DISPREAMBLE V_DISPREAMBLE(1U)
+
+#define S_ENDROPPKT 21
+#define V_ENDROPPKT(x) ((x) << S_ENDROPPKT)
+#define F_ENDROPPKT V_ENDROPPKT(1U)
+
+#define A_XGM_SLOW_TIMER 0x88c
+
+#define S_PAUSESLOWTIMEREN 31
+#define V_PAUSESLOWTIMEREN(x) ((x) << S_PAUSESLOWTIMEREN)
+#define F_PAUSESLOWTIMEREN V_PAUSESLOWTIMEREN(1U)
+
+#define S_PAUSESLOWTIMER 0
+#define M_PAUSESLOWTIMER 0xfffff
+#define V_PAUSESLOWTIMER(x) ((x) << S_PAUSESLOWTIMER)
+#define G_PAUSESLOWTIMER(x) (((x) >> S_PAUSESLOWTIMER) & M_PAUSESLOWTIMER)
+
+#define A_XGM_SERDES_CTRL 0x890
+
+#define S_SERDESEN 25
+#define V_SERDESEN(x) ((x) << S_SERDESEN)
+#define F_SERDESEN V_SERDESEN(1U)
+
+#define S_SERDESRESET_ 24
+#define V_SERDESRESET_(x) ((x) << S_SERDESRESET_)
+#define F_SERDESRESET_ V_SERDESRESET_(1U)
+
+#define S_CMURANGE 21
+#define M_CMURANGE 0x7
+#define V_CMURANGE(x) ((x) << S_CMURANGE)
+#define G_CMURANGE(x) (((x) >> S_CMURANGE) & M_CMURANGE)
+
+#define S_BGENB 20
+#define V_BGENB(x) ((x) << S_BGENB)
+#define F_BGENB V_BGENB(1U)
+
+#define S_ENSKPDROP 19
+#define V_ENSKPDROP(x) ((x) << S_ENSKPDROP)
+#define F_ENSKPDROP V_ENSKPDROP(1U)
+
+#define S_ENCOMMA 18
+#define V_ENCOMMA(x) ((x) << S_ENCOMMA)
+#define F_ENCOMMA V_ENCOMMA(1U)
+
+#define S_EN8B10B 17
+#define V_EN8B10B(x) ((x) << S_EN8B10B)
+#define F_EN8B10B V_EN8B10B(1U)
+
+#define S_ENELBUF 16
+#define V_ENELBUF(x) ((x) << S_ENELBUF)
+#define F_ENELBUF V_ENELBUF(1U)
+
+#define S_GAIN 11
+#define M_GAIN 0x1f
+#define V_GAIN(x) ((x) << S_GAIN)
+#define G_GAIN(x) (((x) >> S_GAIN) & M_GAIN)
+
+#define S_BANDGAP 7
+#define M_BANDGAP 0xf
+#define V_BANDGAP(x) ((x) << S_BANDGAP)
+#define G_BANDGAP(x) (((x) >> S_BANDGAP) & M_BANDGAP)
+
+#define S_LPBKEN 5
+#define M_LPBKEN 0x3
+#define V_LPBKEN(x) ((x) << S_LPBKEN)
+#define G_LPBKEN(x) (((x) >> S_LPBKEN) & M_LPBKEN)
+
+#define S_RXENABLE 4
+#define V_RXENABLE(x) ((x) << S_RXENABLE)
+#define F_RXENABLE V_RXENABLE(1U)
+
+#define S_TXENABLE 3
+#define V_TXENABLE(x) ((x) << S_TXENABLE)
+#define F_TXENABLE V_TXENABLE(1U)
+
+#define A_XGM_PAUSE_TIMER 0x890
+
+#define S_PAUSETIMER 0
+#define M_PAUSETIMER 0xfffff
+#define V_PAUSETIMER(x) ((x) << S_PAUSETIMER)
+#define G_PAUSETIMER(x) (((x) >> S_PAUSETIMER) & M_PAUSETIMER)
+
+#define A_XGM_XAUI_PCS_TEST 0x894
+
+#define S_TESTPATTERN 1
+#define M_TESTPATTERN 0x3
+#define V_TESTPATTERN(x) ((x) << S_TESTPATTERN)
+#define G_TESTPATTERN(x) (((x) >> S_TESTPATTERN) & M_TESTPATTERN)
+
+#define S_ENTEST 0
+#define V_ENTEST(x) ((x) << S_ENTEST)
+#define F_ENTEST V_ENTEST(1U)
+
+#define A_XGM_RGMII_CTRL 0x898
+
+#define S_PHALIGNFIFOTHRESH 1
+#define M_PHALIGNFIFOTHRESH 0x3
+#define V_PHALIGNFIFOTHRESH(x) ((x) << S_PHALIGNFIFOTHRESH)
+#define G_PHALIGNFIFOTHRESH(x) (((x) >> S_PHALIGNFIFOTHRESH) & M_PHALIGNFIFOTHRESH)
+
+#define S_TXCLK90SHIFT 0
+#define V_TXCLK90SHIFT(x) ((x) << S_TXCLK90SHIFT)
+#define F_TXCLK90SHIFT V_TXCLK90SHIFT(1U)
+
+#define A_XGM_RGMII_IMP 0x89c
+
+#define S_XGM_IMPSETUPDATE 6
+#define V_XGM_IMPSETUPDATE(x) ((x) << S_XGM_IMPSETUPDATE)
+#define F_XGM_IMPSETUPDATE V_XGM_IMPSETUPDATE(1U)
+
+#define S_RGMIIIMPPD 3
+#define M_RGMIIIMPPD 0x7
+#define V_RGMIIIMPPD(x) ((x) << S_RGMIIIMPPD)
+#define G_RGMIIIMPPD(x) (((x) >> S_RGMIIIMPPD) & M_RGMIIIMPPD)
+
+#define S_RGMIIIMPPU 0
+#define M_RGMIIIMPPU 0x7
+#define V_RGMIIIMPPU(x) ((x) << S_RGMIIIMPPU)
+#define G_RGMIIIMPPU(x) (((x) >> S_RGMIIIMPPU) & M_RGMIIIMPPU)
+
+#define S_CALRESET 8
+#define V_CALRESET(x) ((x) << S_CALRESET)
+#define F_CALRESET V_CALRESET(1U)
+
+#define S_CALUPDATE 7
+#define V_CALUPDATE(x) ((x) << S_CALUPDATE)
+#define F_CALUPDATE V_CALUPDATE(1U)
+
+#define A_XGM_XAUI_IMP 0x8a0
+
+#define S_XGM_CALFAULT 29
+#define V_XGM_CALFAULT(x) ((x) << S_XGM_CALFAULT)
+#define F_XGM_CALFAULT V_XGM_CALFAULT(1U)
+
+#define S_CALIMP 24
+#define M_CALIMP 0x1f
+#define V_CALIMP(x) ((x) << S_CALIMP)
+#define G_CALIMP(x) (((x) >> S_CALIMP) & M_CALIMP)
+
+#define S_XAUIIMP 0
+#define M_XAUIIMP 0x7
+#define V_XAUIIMP(x) ((x) << S_XAUIIMP)
+#define G_XAUIIMP(x) (((x) >> S_XAUIIMP) & M_XAUIIMP)
+
+#define A_XGM_SERDES_BIST 0x8a4
+
+#define S_BISTDONE 28
+#define M_BISTDONE 0xf
+#define V_BISTDONE(x) ((x) << S_BISTDONE)
+#define G_BISTDONE(x) (((x) >> S_BISTDONE) & M_BISTDONE)
+
+#define S_BISTCYCLETHRESH 3
+#define M_BISTCYCLETHRESH 0x1ffff
+#define V_BISTCYCLETHRESH(x) ((x) << S_BISTCYCLETHRESH)
+#define G_BISTCYCLETHRESH(x) (((x) >> S_BISTCYCLETHRESH) & M_BISTCYCLETHRESH)
+
+#define A_XGM_RX_MAX_PKT_SIZE 0x8a8
+
+#define S_RXMAXPKTSIZE 0
+#define M_RXMAXPKTSIZE 0x3fff
+#define V_RXMAXPKTSIZE(x) ((x) << S_RXMAXPKTSIZE)
+#define G_RXMAXPKTSIZE(x) (((x) >> S_RXMAXPKTSIZE) & M_RXMAXPKTSIZE)
+
+#define A_XGM_RESET_CTRL 0x8ac
+
+#define S_XG2G_RESET_ 3
+#define V_XG2G_RESET_(x) ((x) << S_XG2G_RESET_)
+#define F_XG2G_RESET_ V_XG2G_RESET_(1U)
+
+#define S_RGMII_RESET_ 2
+#define V_RGMII_RESET_(x) ((x) << S_RGMII_RESET_)
+#define F_RGMII_RESET_ V_RGMII_RESET_(1U)
+
+#define S_PCS_RESET_ 1
+#define V_PCS_RESET_(x) ((x) << S_PCS_RESET_)
+#define F_PCS_RESET_ V_PCS_RESET_(1U)
+
+#define S_MAC_RESET_ 0
+#define V_MAC_RESET_(x) ((x) << S_MAC_RESET_)
+#define F_MAC_RESET_ V_MAC_RESET_(1U)
+
+#define A_XGM_XAUI1G_CTRL 0x8b0
+
+#define S_XAUI1GLINKID 0
+#define M_XAUI1GLINKID 0x3
+#define V_XAUI1GLINKID(x) ((x) << S_XAUI1GLINKID)
+#define G_XAUI1GLINKID(x) (((x) >> S_XAUI1GLINKID) & M_XAUI1GLINKID)
+
+#define A_XGM_SERDES_LANE_CTRL 0x8b4
+
+#define S_LANEREVERSAL 8
+#define V_LANEREVERSAL(x) ((x) << S_LANEREVERSAL)
+#define F_LANEREVERSAL V_LANEREVERSAL(1U)
+
+#define S_TXPOLARITY 4
+#define M_TXPOLARITY 0xf
+#define V_TXPOLARITY(x) ((x) << S_TXPOLARITY)
+#define G_TXPOLARITY(x) (((x) >> S_TXPOLARITY) & M_TXPOLARITY)
+
+#define S_RXPOLARITY 0
+#define M_RXPOLARITY 0xf
+#define V_RXPOLARITY(x) ((x) << S_RXPOLARITY)
+#define G_RXPOLARITY(x) (((x) >> S_RXPOLARITY) & M_RXPOLARITY)
+
+#define A_XGM_PORT_CFG 0x8b8
+
+#define S_SAFESPEEDCHANGE 4
+#define V_SAFESPEEDCHANGE(x) ((x) << S_SAFESPEEDCHANGE)
+#define F_SAFESPEEDCHANGE V_SAFESPEEDCHANGE(1U)
+
+#define S_CLKDIVRESET_ 3
+#define V_CLKDIVRESET_(x) ((x) << S_CLKDIVRESET_)
+#define F_CLKDIVRESET_ V_CLKDIVRESET_(1U)
+
+#define S_PORTSPEED 1
+#define M_PORTSPEED 0x3
+#define V_PORTSPEED(x) ((x) << S_PORTSPEED)
+#define G_PORTSPEED(x) (((x) >> S_PORTSPEED) & M_PORTSPEED)
+
+#define S_ENRGMII 0
+#define V_ENRGMII(x) ((x) << S_ENRGMII)
+#define F_ENRGMII V_ENRGMII(1U)
+
+#define A_XGM_EPIO_DATA0 0x8c0
+#define A_XGM_EPIO_DATA1 0x8c4
+#define A_XGM_EPIO_DATA2 0x8c8
+#define A_XGM_EPIO_DATA3 0x8cc
+#define A_XGM_EPIO_OP 0x8d0
+
+#define S_PIO_READY 31
+#define V_PIO_READY(x) ((x) << S_PIO_READY)
+#define F_PIO_READY V_PIO_READY(1U)
+
+#define S_PIO_WRRD 24
+#define V_PIO_WRRD(x) ((x) << S_PIO_WRRD)
+#define F_PIO_WRRD V_PIO_WRRD(1U)
+
+#define S_PIO_ADDRESS 0
+#define M_PIO_ADDRESS 0xff
+#define V_PIO_ADDRESS(x) ((x) << S_PIO_ADDRESS)
+#define G_PIO_ADDRESS(x) (((x) >> S_PIO_ADDRESS) & M_PIO_ADDRESS)
+
+#define A_XGM_INT_ENABLE 0x8d4
+
+#define S_SERDESCMULOCK_LOSS 24
+#define V_SERDESCMULOCK_LOSS(x) ((x) << S_SERDESCMULOCK_LOSS)
+#define F_SERDESCMULOCK_LOSS V_SERDESCMULOCK_LOSS(1U)
+
+#define S_RGMIIRXFIFOOVERFLOW 23
+#define V_RGMIIRXFIFOOVERFLOW(x) ((x) << S_RGMIIRXFIFOOVERFLOW)
+#define F_RGMIIRXFIFOOVERFLOW V_RGMIIRXFIFOOVERFLOW(1U)
+
+#define S_RGMIIRXFIFOUNDERFLOW 22
+#define V_RGMIIRXFIFOUNDERFLOW(x) ((x) << S_RGMIIRXFIFOUNDERFLOW)
+#define F_RGMIIRXFIFOUNDERFLOW V_RGMIIRXFIFOUNDERFLOW(1U)
+
+#define S_RXPKTSIZEERROR 21
+#define V_RXPKTSIZEERROR(x) ((x) << S_RXPKTSIZEERROR)
+#define F_RXPKTSIZEERROR V_RXPKTSIZEERROR(1U)
+
+#define S_WOLPATDETECTED 20
+#define V_WOLPATDETECTED(x) ((x) << S_WOLPATDETECTED)
+#define F_WOLPATDETECTED V_WOLPATDETECTED(1U)
+
+#define S_TXFIFO_PRTY_ERR 17
+#define M_TXFIFO_PRTY_ERR 0x7
+#define V_TXFIFO_PRTY_ERR(x) ((x) << S_TXFIFO_PRTY_ERR)
+#define G_TXFIFO_PRTY_ERR(x) (((x) >> S_TXFIFO_PRTY_ERR) & M_TXFIFO_PRTY_ERR)
+
+#define S_RXFIFO_PRTY_ERR 14
+#define M_RXFIFO_PRTY_ERR 0x7
+#define V_RXFIFO_PRTY_ERR(x) ((x) << S_RXFIFO_PRTY_ERR)
+#define G_RXFIFO_PRTY_ERR(x) (((x) >> S_RXFIFO_PRTY_ERR) & M_RXFIFO_PRTY_ERR)
+
+#define S_TXFIFO_UNDERRUN 13
+#define V_TXFIFO_UNDERRUN(x) ((x) << S_TXFIFO_UNDERRUN)
+#define F_TXFIFO_UNDERRUN V_TXFIFO_UNDERRUN(1U)
+
+#define S_RXFIFO_OVERFLOW 12
+#define V_RXFIFO_OVERFLOW(x) ((x) << S_RXFIFO_OVERFLOW)
+#define F_RXFIFO_OVERFLOW V_RXFIFO_OVERFLOW(1U)
+
+#define S_SERDESBIST_ERR 8
+#define M_SERDESBIST_ERR 0xf
+#define V_SERDESBIST_ERR(x) ((x) << S_SERDESBIST_ERR)
+#define G_SERDESBIST_ERR(x) (((x) >> S_SERDESBIST_ERR) & M_SERDESBIST_ERR)
+
+#define S_SERDES_LOS 4
+#define M_SERDES_LOS 0xf
+#define V_SERDES_LOS(x) ((x) << S_SERDES_LOS)
+#define G_SERDES_LOS(x) (((x) >> S_SERDES_LOS) & M_SERDES_LOS)
+
+#define S_XAUIPCSCTCERR 3
+#define V_XAUIPCSCTCERR(x) ((x) << S_XAUIPCSCTCERR)
+#define F_XAUIPCSCTCERR V_XAUIPCSCTCERR(1U)
+
+#define S_XAUIPCSALIGNCHANGE 2
+#define V_XAUIPCSALIGNCHANGE(x) ((x) << S_XAUIPCSALIGNCHANGE)
+#define F_XAUIPCSALIGNCHANGE V_XAUIPCSALIGNCHANGE(1U)
+
+#define S_RGMIILINKSTSCHANGE 1
+#define V_RGMIILINKSTSCHANGE(x) ((x) << S_RGMIILINKSTSCHANGE)
+#define F_RGMIILINKSTSCHANGE V_RGMIILINKSTSCHANGE(1U)
+
+#define S_XGM_INT 0
+#define V_XGM_INT(x) ((x) << S_XGM_INT)
+#define F_XGM_INT V_XGM_INT(1U)
+
+#define S_SERDESBISTERR 8
+#define M_SERDESBISTERR 0xf
+#define V_SERDESBISTERR(x) ((x) << S_SERDESBISTERR)
+#define G_SERDESBISTERR(x) (((x) >> S_SERDESBISTERR) & M_SERDESBISTERR)
+
+#define S_SERDESLOWSIGCHANGE 4
+#define M_SERDESLOWSIGCHANGE 0xf
+#define V_SERDESLOWSIGCHANGE(x) ((x) << S_SERDESLOWSIGCHANGE)
+#define G_SERDESLOWSIGCHANGE(x) (((x) >> S_SERDESLOWSIGCHANGE) & M_SERDESLOWSIGCHANGE)
+
+#define A_XGM_INT_CAUSE 0x8d8
+#define A_XGM_XAUI_ACT_CTRL 0x8dc
+
+#define S_TXACTENABLE 1
+#define V_TXACTENABLE(x) ((x) << S_TXACTENABLE)
+#define F_TXACTENABLE V_TXACTENABLE(1U)
+
+#define A_XGM_SERDES_CTRL0 0x8e0
+
+#define S_INTSERLPBK3 27
+#define V_INTSERLPBK3(x) ((x) << S_INTSERLPBK3)
+#define F_INTSERLPBK3 V_INTSERLPBK3(1U)
+
+#define S_INTSERLPBK2 26
+#define V_INTSERLPBK2(x) ((x) << S_INTSERLPBK2)
+#define F_INTSERLPBK2 V_INTSERLPBK2(1U)
+
+#define S_INTSERLPBK1 25
+#define V_INTSERLPBK1(x) ((x) << S_INTSERLPBK1)
+#define F_INTSERLPBK1 V_INTSERLPBK1(1U)
+
+#define S_INTSERLPBK0 24
+#define V_INTSERLPBK0(x) ((x) << S_INTSERLPBK0)
+#define F_INTSERLPBK0 V_INTSERLPBK0(1U)
+
+#define S_RESET3 23
+#define V_RESET3(x) ((x) << S_RESET3)
+#define F_RESET3 V_RESET3(1U)
+
+#define S_RESET2 22
+#define V_RESET2(x) ((x) << S_RESET2)
+#define F_RESET2 V_RESET2(1U)
+
+#define S_RESET1 21
+#define V_RESET1(x) ((x) << S_RESET1)
+#define F_RESET1 V_RESET1(1U)
+
+#define S_RESET0 20
+#define V_RESET0(x) ((x) << S_RESET0)
+#define F_RESET0 V_RESET0(1U)
+
+#define S_PWRDN3 19
+#define V_PWRDN3(x) ((x) << S_PWRDN3)
+#define F_PWRDN3 V_PWRDN3(1U)
+
+#define S_PWRDN2 18
+#define V_PWRDN2(x) ((x) << S_PWRDN2)
+#define F_PWRDN2 V_PWRDN2(1U)
+
+#define S_PWRDN1 17
+#define V_PWRDN1(x) ((x) << S_PWRDN1)
+#define F_PWRDN1 V_PWRDN1(1U)
+
+#define S_PWRDN0 16
+#define V_PWRDN0(x) ((x) << S_PWRDN0)
+#define F_PWRDN0 V_PWRDN0(1U)
+
+#define S_RESETPLL23 15
+#define V_RESETPLL23(x) ((x) << S_RESETPLL23)
+#define F_RESETPLL23 V_RESETPLL23(1U)
+
+#define S_RESETPLL01 14
+#define V_RESETPLL01(x) ((x) << S_RESETPLL01)
+#define F_RESETPLL01 V_RESETPLL01(1U)
+
+#define S_PW23 12
+#define M_PW23 0x3
+#define V_PW23(x) ((x) << S_PW23)
+#define G_PW23(x) (((x) >> S_PW23) & M_PW23)
+
+#define S_PW01 10
+#define M_PW01 0x3
+#define V_PW01(x) ((x) << S_PW01)
+#define G_PW01(x) (((x) >> S_PW01) & M_PW01)
+
+#define S_XGM_DEQ 6
+#define M_XGM_DEQ 0xf
+#define V_XGM_DEQ(x) ((x) << S_XGM_DEQ)
+#define G_XGM_DEQ(x) (((x) >> S_XGM_DEQ) & M_XGM_DEQ)
+
+#define S_XGM_DTX 2
+#define M_XGM_DTX 0xf
+#define V_XGM_DTX(x) ((x) << S_XGM_DTX)
+#define G_XGM_DTX(x) (((x) >> S_XGM_DTX) & M_XGM_DTX)
+
+#define S_XGM_LODRV 1
+#define V_XGM_LODRV(x) ((x) << S_XGM_LODRV)
+#define F_XGM_LODRV V_XGM_LODRV(1U)
+
+#define S_XGM_HIDRV 0
+#define V_XGM_HIDRV(x) ((x) << S_XGM_HIDRV)
+#define F_XGM_HIDRV V_XGM_HIDRV(1U)
+
+#define A_XGM_SERDES_CTRL1 0x8e4
+
+#define S_FMOFFSET3 19
+#define M_FMOFFSET3 0x1f
+#define V_FMOFFSET3(x) ((x) << S_FMOFFSET3)
+#define G_FMOFFSET3(x) (((x) >> S_FMOFFSET3) & M_FMOFFSET3)
+
+#define S_FMOFFSETEN3 18
+#define V_FMOFFSETEN3(x) ((x) << S_FMOFFSETEN3)
+#define F_FMOFFSETEN3 V_FMOFFSETEN3(1U)
+
+#define S_FMOFFSET2 13
+#define M_FMOFFSET2 0x1f
+#define V_FMOFFSET2(x) ((x) << S_FMOFFSET2)
+#define G_FMOFFSET2(x) (((x) >> S_FMOFFSET2) & M_FMOFFSET2)
+
+#define S_FMOFFSETEN2 12
+#define V_FMOFFSETEN2(x) ((x) << S_FMOFFSETEN2)
+#define F_FMOFFSETEN2 V_FMOFFSETEN2(1U)
+
+#define S_FMOFFSET1 7
+#define M_FMOFFSET1 0x1f
+#define V_FMOFFSET1(x) ((x) << S_FMOFFSET1)
+#define G_FMOFFSET1(x) (((x) >> S_FMOFFSET1) & M_FMOFFSET1)
+
+#define S_FMOFFSETEN1 6
+#define V_FMOFFSETEN1(x) ((x) << S_FMOFFSETEN1)
+#define F_FMOFFSETEN1 V_FMOFFSETEN1(1U)
+
+#define S_FMOFFSET0 1
+#define M_FMOFFSET0 0x1f
+#define V_FMOFFSET0(x) ((x) << S_FMOFFSET0)
+#define G_FMOFFSET0(x) (((x) >> S_FMOFFSET0) & M_FMOFFSET0)
+
+#define S_FMOFFSETEN0 0
+#define V_FMOFFSETEN0(x) ((x) << S_FMOFFSETEN0)
+#define F_FMOFFSETEN0 V_FMOFFSETEN0(1U)
+
+#define A_XGM_SERDES_CTRL2 0x8e8
+
+#define S_DNIN3 11
+#define V_DNIN3(x) ((x) << S_DNIN3)
+#define F_DNIN3 V_DNIN3(1U)
+
+#define S_UPIN3 10
+#define V_UPIN3(x) ((x) << S_UPIN3)
+#define F_UPIN3 V_UPIN3(1U)
+
+#define S_RXSLAVE3 9
+#define V_RXSLAVE3(x) ((x) << S_RXSLAVE3)
+#define F_RXSLAVE3 V_RXSLAVE3(1U)
+
+#define S_DNIN2 8
+#define V_DNIN2(x) ((x) << S_DNIN2)
+#define F_DNIN2 V_DNIN2(1U)
+
+#define S_UPIN2 7
+#define V_UPIN2(x) ((x) << S_UPIN2)
+#define F_UPIN2 V_UPIN2(1U)
+
+#define S_RXSLAVE2 6
+#define V_RXSLAVE2(x) ((x) << S_RXSLAVE2)
+#define F_RXSLAVE2 V_RXSLAVE2(1U)
+
+#define S_DNIN1 5
+#define V_DNIN1(x) ((x) << S_DNIN1)
+#define F_DNIN1 V_DNIN1(1U)
+
+#define S_UPIN1 4
+#define V_UPIN1(x) ((x) << S_UPIN1)
+#define F_UPIN1 V_UPIN1(1U)
+
+#define S_RXSLAVE1 3
+#define V_RXSLAVE1(x) ((x) << S_RXSLAVE1)
+#define F_RXSLAVE1 V_RXSLAVE1(1U)
+
+#define S_DNIN0 2
+#define V_DNIN0(x) ((x) << S_DNIN0)
+#define F_DNIN0 V_DNIN0(1U)
+
+#define S_UPIN0 1
+#define V_UPIN0(x) ((x) << S_UPIN0)
+#define F_UPIN0 V_UPIN0(1U)
+
+#define S_RXSLAVE0 0
+#define V_RXSLAVE0(x) ((x) << S_RXSLAVE0)
+#define F_RXSLAVE0 V_RXSLAVE0(1U)
+
+#define A_XGM_SERDES_CTRL3 0x8ec
+
+#define S_EXTBISTCHKERRCLR3 31
+#define V_EXTBISTCHKERRCLR3(x) ((x) << S_EXTBISTCHKERRCLR3)
+#define F_EXTBISTCHKERRCLR3 V_EXTBISTCHKERRCLR3(1U)
+
+#define S_EXTBISTCHKEN3 30
+#define V_EXTBISTCHKEN3(x) ((x) << S_EXTBISTCHKEN3)
+#define F_EXTBISTCHKEN3 V_EXTBISTCHKEN3(1U)
+
+#define S_EXTBISTGENEN3 29
+#define V_EXTBISTGENEN3(x) ((x) << S_EXTBISTGENEN3)
+#define F_EXTBISTGENEN3 V_EXTBISTGENEN3(1U)
+
+#define S_EXTBISTPAT3 26
+#define M_EXTBISTPAT3 0x7
+#define V_EXTBISTPAT3(x) ((x) << S_EXTBISTPAT3)
+#define G_EXTBISTPAT3(x) (((x) >> S_EXTBISTPAT3) & M_EXTBISTPAT3)
+
+#define S_EXTPARRESET3 25
+#define V_EXTPARRESET3(x) ((x) << S_EXTPARRESET3)
+#define F_EXTPARRESET3 V_EXTPARRESET3(1U)
+
+#define S_EXTPARLPBK3 24
+#define V_EXTPARLPBK3(x) ((x) << S_EXTPARLPBK3)
+#define F_EXTPARLPBK3 V_EXTPARLPBK3(1U)
+
+#define S_EXTBISTCHKERRCLR2 23
+#define V_EXTBISTCHKERRCLR2(x) ((x) << S_EXTBISTCHKERRCLR2)
+#define F_EXTBISTCHKERRCLR2 V_EXTBISTCHKERRCLR2(1U)
+
+#define S_EXTBISTCHKEN2 22
+#define V_EXTBISTCHKEN2(x) ((x) << S_EXTBISTCHKEN2)
+#define F_EXTBISTCHKEN2 V_EXTBISTCHKEN2(1U)
+
+#define S_EXTBISTGENEN2 21
+#define V_EXTBISTGENEN2(x) ((x) << S_EXTBISTGENEN2)
+#define F_EXTBISTGENEN2 V_EXTBISTGENEN2(1U)
+
+#define S_EXTBISTPAT2 18
+#define M_EXTBISTPAT2 0x7
+#define V_EXTBISTPAT2(x) ((x) << S_EXTBISTPAT2)
+#define G_EXTBISTPAT2(x) (((x) >> S_EXTBISTPAT2) & M_EXTBISTPAT2)
+
+#define S_EXTPARRESET2 17
+#define V_EXTPARRESET2(x) ((x) << S_EXTPARRESET2)
+#define F_EXTPARRESET2 V_EXTPARRESET2(1U)
+
+#define S_EXTPARLPBK2 16
+#define V_EXTPARLPBK2(x) ((x) << S_EXTPARLPBK2)
+#define F_EXTPARLPBK2 V_EXTPARLPBK2(1U)
+
+#define S_EXTBISTCHKERRCLR1 15
+#define V_EXTBISTCHKERRCLR1(x) ((x) << S_EXTBISTCHKERRCLR1)
+#define F_EXTBISTCHKERRCLR1 V_EXTBISTCHKERRCLR1(1U)
+
+#define S_EXTBISTCHKEN1 14
+#define V_EXTBISTCHKEN1(x) ((x) << S_EXTBISTCHKEN1)
+#define F_EXTBISTCHKEN1 V_EXTBISTCHKEN1(1U)
+
+#define S_EXTBISTGENEN1 13
+#define V_EXTBISTGENEN1(x) ((x) << S_EXTBISTGENEN1)
+#define F_EXTBISTGENEN1 V_EXTBISTGENEN1(1U)
+
+#define S_EXTBISTPAT1 10
+#define M_EXTBISTPAT1 0x7
+#define V_EXTBISTPAT1(x) ((x) << S_EXTBISTPAT1)
+#define G_EXTBISTPAT1(x) (((x) >> S_EXTBISTPAT1) & M_EXTBISTPAT1)
+
+#define S_EXTPARRESET1 9
+#define V_EXTPARRESET1(x) ((x) << S_EXTPARRESET1)
+#define F_EXTPARRESET1 V_EXTPARRESET1(1U)
+
+#define S_EXTPARLPBK1 8
+#define V_EXTPARLPBK1(x) ((x) << S_EXTPARLPBK1)
+#define F_EXTPARLPBK1 V_EXTPARLPBK1(1U)
+
+#define S_EXTBISTCHKERRCLR0 7
+#define V_EXTBISTCHKERRCLR0(x) ((x) << S_EXTBISTCHKERRCLR0)
+#define F_EXTBISTCHKERRCLR0 V_EXTBISTCHKERRCLR0(1U)
+
+#define S_EXTBISTCHKEN0 6
+#define V_EXTBISTCHKEN0(x) ((x) << S_EXTBISTCHKEN0)
+#define F_EXTBISTCHKEN0 V_EXTBISTCHKEN0(1U)
+
+#define S_EXTBISTGENEN0 5
+#define V_EXTBISTGENEN0(x) ((x) << S_EXTBISTGENEN0)
+#define F_EXTBISTGENEN0 V_EXTBISTGENEN0(1U)
+
+#define S_EXTBISTPAT0 2
+#define M_EXTBISTPAT0 0x7
+#define V_EXTBISTPAT0(x) ((x) << S_EXTBISTPAT0)
+#define G_EXTBISTPAT0(x) (((x) >> S_EXTBISTPAT0) & M_EXTBISTPAT0)
+
+#define S_EXTPARRESET0 1
+#define V_EXTPARRESET0(x) ((x) << S_EXTPARRESET0)
+#define F_EXTPARRESET0 V_EXTPARRESET0(1U)
+
+#define S_EXTPARLPBK0 0
+#define V_EXTPARLPBK0(x) ((x) << S_EXTPARLPBK0)
+#define F_EXTPARLPBK0 V_EXTPARLPBK0(1U)
+
+#define A_XGM_SERDES_STAT0 0x8f0
+
+#define S_EXTBISTCHKERRCNT0 4
+#define M_EXTBISTCHKERRCNT0 0xffffff
+#define V_EXTBISTCHKERRCNT0(x) ((x) << S_EXTBISTCHKERRCNT0)
+#define G_EXTBISTCHKERRCNT0(x) (((x) >> S_EXTBISTCHKERRCNT0) & M_EXTBISTCHKERRCNT0)
+
+#define S_EXTBISTCHKFMD0 3
+#define V_EXTBISTCHKFMD0(x) ((x) << S_EXTBISTCHKFMD0)
+#define F_EXTBISTCHKFMD0 V_EXTBISTCHKFMD0(1U)
+
+#define S_LOWSIG0 0
+#define V_LOWSIG0(x) ((x) << S_LOWSIG0)
+#define F_LOWSIG0 V_LOWSIG0(1U)
+
+#define A_XGM_SERDES_STAT1 0x8f4
+
+#define S_EXTBISTCHKERRCNT1 4
+#define M_EXTBISTCHKERRCNT1 0xffffff
+#define V_EXTBISTCHKERRCNT1(x) ((x) << S_EXTBISTCHKERRCNT1)
+#define G_EXTBISTCHKERRCNT1(x) (((x) >> S_EXTBISTCHKERRCNT1) & M_EXTBISTCHKERRCNT1)
+
+#define S_EXTBISTCHKFMD1 3
+#define V_EXTBISTCHKFMD1(x) ((x) << S_EXTBISTCHKFMD1)
+#define F_EXTBISTCHKFMD1 V_EXTBISTCHKFMD1(1U)
+
+#define S_LOWSIG1 0
+#define V_LOWSIG1(x) ((x) << S_LOWSIG1)
+#define F_LOWSIG1 V_LOWSIG1(1U)
+
+#define A_XGM_SERDES_STAT2 0x8f8
+
+#define S_EXTBISTCHKERRCNT2 4
+#define M_EXTBISTCHKERRCNT2 0xffffff
+#define V_EXTBISTCHKERRCNT2(x) ((x) << S_EXTBISTCHKERRCNT2)
+#define G_EXTBISTCHKERRCNT2(x) (((x) >> S_EXTBISTCHKERRCNT2) & M_EXTBISTCHKERRCNT2)
+
+#define S_EXTBISTCHKFMD2 3
+#define V_EXTBISTCHKFMD2(x) ((x) << S_EXTBISTCHKFMD2)
+#define F_EXTBISTCHKFMD2 V_EXTBISTCHKFMD2(1U)
+
+#define S_LOWSIG2 0
+#define V_LOWSIG2(x) ((x) << S_LOWSIG2)
+#define F_LOWSIG2 V_LOWSIG2(1U)
+
+#define A_XGM_SERDES_STAT3 0x8fc
+
+#define S_EXTBISTCHKERRCNT3 4
+#define M_EXTBISTCHKERRCNT3 0xffffff
+#define V_EXTBISTCHKERRCNT3(x) ((x) << S_EXTBISTCHKERRCNT3)
+#define G_EXTBISTCHKERRCNT3(x) (((x) >> S_EXTBISTCHKERRCNT3) & M_EXTBISTCHKERRCNT3)
+
+#define S_EXTBISTCHKFMD3 3
+#define V_EXTBISTCHKFMD3(x) ((x) << S_EXTBISTCHKFMD3)
+#define F_EXTBISTCHKFMD3 V_EXTBISTCHKFMD3(1U)
+
+#define S_LOWSIG3 0
+#define V_LOWSIG3(x) ((x) << S_LOWSIG3)
+#define F_LOWSIG3 V_LOWSIG3(1U)
+
+#define A_XGM_STAT_TX_BYTE_LOW 0x900
+#define A_XGM_STAT_TX_BYTE_HIGH 0x904
+
+#define S_TXBYTES_HIGH 0
+#define M_TXBYTES_HIGH 0x1fff
+#define V_TXBYTES_HIGH(x) ((x) << S_TXBYTES_HIGH)
+#define G_TXBYTES_HIGH(x) (((x) >> S_TXBYTES_HIGH) & M_TXBYTES_HIGH)
+
+#define A_XGM_STAT_TX_FRAME_LOW 0x908
+#define A_XGM_STAT_TX_FRAME_HIGH 0x90c
+
+#define S_TXFRAMES_HIGH 0
+#define M_TXFRAMES_HIGH 0xf
+#define V_TXFRAMES_HIGH(x) ((x) << S_TXFRAMES_HIGH)
+#define G_TXFRAMES_HIGH(x) (((x) >> S_TXFRAMES_HIGH) & M_TXFRAMES_HIGH)
+
+#define A_XGM_STAT_TX_BCAST 0x910
+#define A_XGM_STAT_TX_MCAST 0x914
+#define A_XGM_STAT_TX_PAUSE 0x918
+#define A_XGM_STAT_TX_64B_FRAMES 0x91c
+#define A_XGM_STAT_TX_65_127B_FRAMES 0x920
+#define A_XGM_STAT_TX_128_255B_FRAMES 0x924
+#define A_XGM_STAT_TX_256_511B_FRAMES 0x928
+#define A_XGM_STAT_TX_512_1023B_FRAMES 0x92c
+#define A_XGM_STAT_TX_1024_1518B_FRAMES 0x930
+#define A_XGM_STAT_TX_1519_MAXB_FRAMES 0x934
+#define A_XGM_STAT_TX_ERR_FRAMES 0x938
+#define A_XGM_STAT_RX_BYTES_LOW 0x93c
+#define A_XGM_STAT_RX_BYTES_HIGH 0x940
+
+#define S_RXBYTES_HIGH 0
+#define M_RXBYTES_HIGH 0x1fff
+#define V_RXBYTES_HIGH(x) ((x) << S_RXBYTES_HIGH)
+#define G_RXBYTES_HIGH(x) (((x) >> S_RXBYTES_HIGH) & M_RXBYTES_HIGH)
+
+#define A_XGM_STAT_RX_FRAMES_LOW 0x944
+#define A_XGM_STAT_RX_FRAMES_HIGH 0x948
+
+#define S_RXFRAMES_HIGH 0
+#define M_RXFRAMES_HIGH 0xf
+#define V_RXFRAMES_HIGH(x) ((x) << S_RXFRAMES_HIGH)
+#define G_RXFRAMES_HIGH(x) (((x) >> S_RXFRAMES_HIGH) & M_RXFRAMES_HIGH)
+
+#define A_XGM_STAT_RX_BCAST_FRAMES 0x94c
+#define A_XGM_STAT_RX_MCAST_FRAMES 0x950
+#define A_XGM_STAT_RX_PAUSE_FRAMES 0x954
+
+#define S_RXPAUSEFRAMES 0
+#define M_RXPAUSEFRAMES 0xffff
+#define V_RXPAUSEFRAMES(x) ((x) << S_RXPAUSEFRAMES)
+#define G_RXPAUSEFRAMES(x) (((x) >> S_RXPAUSEFRAMES) & M_RXPAUSEFRAMES)
+
+#define A_XGM_STAT_RX_64B_FRAMES 0x958
+#define A_XGM_STAT_RX_65_127B_FRAMES 0x95c
+#define A_XGM_STAT_RX_128_255B_FRAMES 0x960
+#define A_XGM_STAT_RX_256_511B_FRAMES 0x964
+#define A_XGM_STAT_RX_512_1023B_FRAMES 0x968
+#define A_XGM_STAT_RX_1024_1518B_FRAMES 0x96c
+#define A_XGM_STAT_RX_1519_MAXB_FRAMES 0x970
+#define A_XGM_STAT_RX_SHORT_FRAMES 0x974
+
+#define S_RXSHORTFRAMES 0
+#define M_RXSHORTFRAMES 0xffff
+#define V_RXSHORTFRAMES(x) ((x) << S_RXSHORTFRAMES)
+#define G_RXSHORTFRAMES(x) (((x) >> S_RXSHORTFRAMES) & M_RXSHORTFRAMES)
+
+#define A_XGM_STAT_RX_OVERSIZE_FRAMES 0x978
+
+#define S_RXOVERSIZEFRAMES 0
+#define M_RXOVERSIZEFRAMES 0xffff
+#define V_RXOVERSIZEFRAMES(x) ((x) << S_RXOVERSIZEFRAMES)
+#define G_RXOVERSIZEFRAMES(x) (((x) >> S_RXOVERSIZEFRAMES) & M_RXOVERSIZEFRAMES)
+
+#define A_XGM_STAT_RX_JABBER_FRAMES 0x97c
+
+#define S_RXJABBERFRAMES 0
+#define M_RXJABBERFRAMES 0xffff
+#define V_RXJABBERFRAMES(x) ((x) << S_RXJABBERFRAMES)
+#define G_RXJABBERFRAMES(x) (((x) >> S_RXJABBERFRAMES) & M_RXJABBERFRAMES)
+
+#define A_XGM_STAT_RX_CRC_ERR_FRAMES 0x980
+
+#define S_RXCRCERRFRAMES 0
+#define M_RXCRCERRFRAMES 0xffff
+#define V_RXCRCERRFRAMES(x) ((x) << S_RXCRCERRFRAMES)
+#define G_RXCRCERRFRAMES(x) (((x) >> S_RXCRCERRFRAMES) & M_RXCRCERRFRAMES)
+
+#define A_XGM_STAT_RX_LENGTH_ERR_FRAMES 0x984
+
+#define S_RXLENGTHERRFRAMES 0
+#define M_RXLENGTHERRFRAMES 0xffff
+#define V_RXLENGTHERRFRAMES(x) ((x) << S_RXLENGTHERRFRAMES)
+#define G_RXLENGTHERRFRAMES(x) (((x) >> S_RXLENGTHERRFRAMES) & M_RXLENGTHERRFRAMES)
+
+#define A_XGM_STAT_RX_SYM_CODE_ERR_FRAMES 0x988
+
+#define S_RXSYMCODEERRFRAMES 0
+#define M_RXSYMCODEERRFRAMES 0xffff
+#define V_RXSYMCODEERRFRAMES(x) ((x) << S_RXSYMCODEERRFRAMES)
+#define G_RXSYMCODEERRFRAMES(x) (((x) >> S_RXSYMCODEERRFRAMES) & M_RXSYMCODEERRFRAMES)
+
+#define A_XGM_SERDES_STATUS0 0x98c
+
+#define S_RXERRLANE3 9
+#define M_RXERRLANE3 0x7
+#define V_RXERRLANE3(x) ((x) << S_RXERRLANE3)
+#define G_RXERRLANE3(x) (((x) >> S_RXERRLANE3) & M_RXERRLANE3)
+
+#define S_RXERRLANE2 6
+#define M_RXERRLANE2 0x7
+#define V_RXERRLANE2(x) ((x) << S_RXERRLANE2)
+#define G_RXERRLANE2(x) (((x) >> S_RXERRLANE2) & M_RXERRLANE2)
+
+#define S_RXERRLANE1 3
+#define M_RXERRLANE1 0x7
+#define V_RXERRLANE1(x) ((x) << S_RXERRLANE1)
+#define G_RXERRLANE1(x) (((x) >> S_RXERRLANE1) & M_RXERRLANE1)
+
+#define S_RXERRLANE0 0
+#define M_RXERRLANE0 0x7
+#define V_RXERRLANE0(x) ((x) << S_RXERRLANE0)
+#define G_RXERRLANE0(x) (((x) >> S_RXERRLANE0) & M_RXERRLANE0)
+
+#define A_XGM_SERDES_STATUS1 0x990
+
+#define S_RXKLOCKLANE3 11
+#define V_RXKLOCKLANE3(x) ((x) << S_RXKLOCKLANE3)
+#define F_RXKLOCKLANE3 V_RXKLOCKLANE3(1U)
+
+#define S_RXKLOCKLANE2 10
+#define V_RXKLOCKLANE2(x) ((x) << S_RXKLOCKLANE2)
+#define F_RXKLOCKLANE2 V_RXKLOCKLANE2(1U)
+
+#define S_RXKLOCKLANE1 9
+#define V_RXKLOCKLANE1(x) ((x) << S_RXKLOCKLANE1)
+#define F_RXKLOCKLANE1 V_RXKLOCKLANE1(1U)
+
+#define S_RXKLOCKLANE0 8
+#define V_RXKLOCKLANE0(x) ((x) << S_RXKLOCKLANE0)
+#define F_RXKLOCKLANE0 V_RXKLOCKLANE0(1U)
+
+#define S_RXUFLOWLANE3 7
+#define V_RXUFLOWLANE3(x) ((x) << S_RXUFLOWLANE3)
+#define F_RXUFLOWLANE3 V_RXUFLOWLANE3(1U)
+
+#define S_RXUFLOWLANE2 6
+#define V_RXUFLOWLANE2(x) ((x) << S_RXUFLOWLANE2)
+#define F_RXUFLOWLANE2 V_RXUFLOWLANE2(1U)
+
+#define S_RXUFLOWLANE1 5
+#define V_RXUFLOWLANE1(x) ((x) << S_RXUFLOWLANE1)
+#define F_RXUFLOWLANE1 V_RXUFLOWLANE1(1U)
+
+#define S_RXUFLOWLANE0 4
+#define V_RXUFLOWLANE0(x) ((x) << S_RXUFLOWLANE0)
+#define F_RXUFLOWLANE0 V_RXUFLOWLANE0(1U)
+
+#define S_RXOFLOWLANE3 3
+#define V_RXOFLOWLANE3(x) ((x) << S_RXOFLOWLANE3)
+#define F_RXOFLOWLANE3 V_RXOFLOWLANE3(1U)
+
+#define S_RXOFLOWLANE2 2
+#define V_RXOFLOWLANE2(x) ((x) << S_RXOFLOWLANE2)
+#define F_RXOFLOWLANE2 V_RXOFLOWLANE2(1U)
+
+#define S_RXOFLOWLANE1 1
+#define V_RXOFLOWLANE1(x) ((x) << S_RXOFLOWLANE1)
+#define F_RXOFLOWLANE1 V_RXOFLOWLANE1(1U)
+
+#define S_RXOFLOWLANE0 0
+#define V_RXOFLOWLANE0(x) ((x) << S_RXOFLOWLANE0)
+#define F_RXOFLOWLANE0 V_RXOFLOWLANE0(1U)
+
+#define A_XGM_SERDES_STATUS2 0x994
+
+#define S_XGM_RXEIDLANE3 11
+#define V_XGM_RXEIDLANE3(x) ((x) << S_XGM_RXEIDLANE3)
+#define F_XGM_RXEIDLANE3 V_XGM_RXEIDLANE3(1U)
+
+#define S_XGM_RXEIDLANE2 10
+#define V_XGM_RXEIDLANE2(x) ((x) << S_XGM_RXEIDLANE2)
+#define F_XGM_RXEIDLANE2 V_XGM_RXEIDLANE2(1U)
+
+#define S_XGM_RXEIDLANE1 9
+#define V_XGM_RXEIDLANE1(x) ((x) << S_XGM_RXEIDLANE1)
+#define F_XGM_RXEIDLANE1 V_XGM_RXEIDLANE1(1U)
+
+#define S_XGM_RXEIDLANE0 8
+#define V_XGM_RXEIDLANE0(x) ((x) << S_XGM_RXEIDLANE0)
+#define F_XGM_RXEIDLANE0 V_XGM_RXEIDLANE0(1U)
+
+#define S_RXREMSKIPLANE3 7
+#define V_RXREMSKIPLANE3(x) ((x) << S_RXREMSKIPLANE3)
+#define F_RXREMSKIPLANE3 V_RXREMSKIPLANE3(1U)
+
+#define S_RXREMSKIPLANE2 6
+#define V_RXREMSKIPLANE2(x) ((x) << S_RXREMSKIPLANE2)
+#define F_RXREMSKIPLANE2 V_RXREMSKIPLANE2(1U)
+
+#define S_RXREMSKIPLANE1 5
+#define V_RXREMSKIPLANE1(x) ((x) << S_RXREMSKIPLANE1)
+#define F_RXREMSKIPLANE1 V_RXREMSKIPLANE1(1U)
+
+#define S_RXREMSKIPLANE0 4
+#define V_RXREMSKIPLANE0(x) ((x) << S_RXREMSKIPLANE0)
+#define F_RXREMSKIPLANE0 V_RXREMSKIPLANE0(1U)
+
+#define S_RXADDSKIPLANE3 3
+#define V_RXADDSKIPLANE3(x) ((x) << S_RXADDSKIPLANE3)
+#define F_RXADDSKIPLANE3 V_RXADDSKIPLANE3(1U)
+
+#define S_RXADDSKIPLANE2 2
+#define V_RXADDSKIPLANE2(x) ((x) << S_RXADDSKIPLANE2)
+#define F_RXADDSKIPLANE2 V_RXADDSKIPLANE2(1U)
+
+#define S_RXADDSKIPLANE1 1
+#define V_RXADDSKIPLANE1(x) ((x) << S_RXADDSKIPLANE1)
+#define F_RXADDSKIPLANE1 V_RXADDSKIPLANE1(1U)
+
+#define S_RXADDSKIPLANE0 0
+#define V_RXADDSKIPLANE0(x) ((x) << S_RXADDSKIPLANE0)
+#define F_RXADDSKIPLANE0 V_RXADDSKIPLANE0(1U)
+
+#define A_XGM_XAUI_PCS_ERR 0x998
+
+#define S_PCS_SYNCSTATUS 5
+#define M_PCS_SYNCSTATUS 0xf
+#define V_PCS_SYNCSTATUS(x) ((x) << S_PCS_SYNCSTATUS)
+#define G_PCS_SYNCSTATUS(x) (((x) >> S_PCS_SYNCSTATUS) & M_PCS_SYNCSTATUS)
+
+#define S_PCS_CTCFIFOERR 1
+#define M_PCS_CTCFIFOERR 0xf
+#define V_PCS_CTCFIFOERR(x) ((x) << S_PCS_CTCFIFOERR)
+#define G_PCS_CTCFIFOERR(x) (((x) >> S_PCS_CTCFIFOERR) & M_PCS_CTCFIFOERR)
+
+#define S_PCS_NOTALIGNED 0
+#define V_PCS_NOTALIGNED(x) ((x) << S_PCS_NOTALIGNED)
+#define F_PCS_NOTALIGNED V_PCS_NOTALIGNED(1U)
+
+#define A_XGM_RGMII_STATUS 0x99c
+
+#define S_GMIIDUPLEX 3
+#define V_GMIIDUPLEX(x) ((x) << S_GMIIDUPLEX)
+#define F_GMIIDUPLEX V_GMIIDUPLEX(1U)
+
+#define S_GMIISPEED 1
+#define M_GMIISPEED 0x3
+#define V_GMIISPEED(x) ((x) << S_GMIISPEED)
+#define G_GMIISPEED(x) (((x) >> S_GMIISPEED) & M_GMIISPEED)
+
+#define S_GMIILINKSTATUS 0
+#define V_GMIILINKSTATUS(x) ((x) << S_GMIILINKSTATUS)
+#define F_GMIILINKSTATUS V_GMIILINKSTATUS(1U)
+
+#define A_XGM_WOL_STATUS 0x9a0
+
+#define S_PATDETECTED 31
+#define V_PATDETECTED(x) ((x) << S_PATDETECTED)
+#define F_PATDETECTED V_PATDETECTED(1U)
+
+#define S_MATCHEDFILTER 0
+#define M_MATCHEDFILTER 0x7
+#define V_MATCHEDFILTER(x) ((x) << S_MATCHEDFILTER)
+#define G_MATCHEDFILTER(x) (((x) >> S_MATCHEDFILTER) & M_MATCHEDFILTER)
+
+#define A_XGM_RX_MAX_PKT_SIZE_ERR_CNT 0x9a4
+#define A_XGM_TX_SPI4_SOP_EOP_CNT 0x9a8
+
+#define S_TXSPI4SOPCNT 16
+#define M_TXSPI4SOPCNT 0xffff
+#define V_TXSPI4SOPCNT(x) ((x) << S_TXSPI4SOPCNT)
+#define G_TXSPI4SOPCNT(x) (((x) >> S_TXSPI4SOPCNT) & M_TXSPI4SOPCNT)
+
+#define S_TXSPI4EOPCNT 0
+#define M_TXSPI4EOPCNT 0xffff
+#define V_TXSPI4EOPCNT(x) ((x) << S_TXSPI4EOPCNT)
+#define G_TXSPI4EOPCNT(x) (((x) >> S_TXSPI4EOPCNT) & M_TXSPI4EOPCNT)
+
+#define A_XGM_RX_SPI4_SOP_EOP_CNT 0x9ac
+
+#define S_RXSPI4SOPCNT 16
+#define M_RXSPI4SOPCNT 0xffff
+#define V_RXSPI4SOPCNT(x) ((x) << S_RXSPI4SOPCNT)
+#define G_RXSPI4SOPCNT(x) (((x) >> S_RXSPI4SOPCNT) & M_RXSPI4SOPCNT)
+
+#define S_RXSPI4EOPCNT 0
+#define M_RXSPI4EOPCNT 0xffff
+#define V_RXSPI4EOPCNT(x) ((x) << S_RXSPI4EOPCNT)
+#define G_RXSPI4EOPCNT(x) (((x) >> S_RXSPI4EOPCNT) & M_RXSPI4EOPCNT)
+
+/* registers for module XGMAC0_1 */
+#define XGMAC0_1_BASE_ADDR 0xa00
diff --git a/sys/dev/cxgb/common/cxgb_sge_defs.h b/sys/dev/cxgb/common/cxgb_sge_defs.h
new file mode 100644
index 0000000..d421673
--- /dev/null
+++ b/sys/dev/cxgb/common/cxgb_sge_defs.h
@@ -0,0 +1,289 @@
+/**************************************************************************
+
+Copyright (c) 2007, Chelsio Inc.
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Chelsio Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+$FreeBSD$
+
+***************************************************************************/
+/*
+ * This file is automatically generated --- any changes will be lost.
+ */
+
+#ifndef _SGE_DEFS_H
+#define _SGE_DEFS_H
+
+#define S_EC_CREDITS 0
+#define M_EC_CREDITS 0x7FFF
+#define V_EC_CREDITS(x) ((x) << S_EC_CREDITS)
+#define G_EC_CREDITS(x) (((x) >> S_EC_CREDITS) & M_EC_CREDITS)
+
+#define S_EC_GTS 15
+#define V_EC_GTS(x) ((x) << S_EC_GTS)
+#define F_EC_GTS V_EC_GTS(1U)
+
+#define S_EC_INDEX 16
+#define M_EC_INDEX 0xFFFF
+#define V_EC_INDEX(x) ((x) << S_EC_INDEX)
+#define G_EC_INDEX(x) (((x) >> S_EC_INDEX) & M_EC_INDEX)
+
+#define S_EC_SIZE 0
+#define M_EC_SIZE 0xFFFF
+#define V_EC_SIZE(x) ((x) << S_EC_SIZE)
+#define G_EC_SIZE(x) (((x) >> S_EC_SIZE) & M_EC_SIZE)
+
+#define S_EC_BASE_LO 16
+#define M_EC_BASE_LO 0xFFFF
+#define V_EC_BASE_LO(x) ((x) << S_EC_BASE_LO)
+#define G_EC_BASE_LO(x) (((x) >> S_EC_BASE_LO) & M_EC_BASE_LO)
+
+#define S_EC_BASE_HI 0
+#define M_EC_BASE_HI 0xF
+#define V_EC_BASE_HI(x) ((x) << S_EC_BASE_HI)
+#define G_EC_BASE_HI(x) (((x) >> S_EC_BASE_HI) & M_EC_BASE_HI)
+
+#define S_EC_RESPQ 4
+#define M_EC_RESPQ 0x7
+#define V_EC_RESPQ(x) ((x) << S_EC_RESPQ)
+#define G_EC_RESPQ(x) (((x) >> S_EC_RESPQ) & M_EC_RESPQ)
+
+#define S_EC_TYPE 7
+#define M_EC_TYPE 0x7
+#define V_EC_TYPE(x) ((x) << S_EC_TYPE)
+#define G_EC_TYPE(x) (((x) >> S_EC_TYPE) & M_EC_TYPE)
+
+#define S_EC_GEN 10
+#define V_EC_GEN(x) ((x) << S_EC_GEN)
+#define F_EC_GEN V_EC_GEN(1U)
+
+#define S_EC_UP_TOKEN 11
+#define M_EC_UP_TOKEN 0xFFFFF
+#define V_EC_UP_TOKEN(x) ((x) << S_EC_UP_TOKEN)
+#define G_EC_UP_TOKEN(x) (((x) >> S_EC_UP_TOKEN) & M_EC_UP_TOKEN)
+
+#define S_EC_VALID 31
+#define V_EC_VALID(x) ((x) << S_EC_VALID)
+#define F_EC_VALID V_EC_VALID(1U)
+
+#define S_RQ_MSI_VEC 20
+#define M_RQ_MSI_VEC 0x3F
+#define V_RQ_MSI_VEC(x) ((x) << S_RQ_MSI_VEC)
+#define G_RQ_MSI_VEC(x) (((x) >> S_RQ_MSI_VEC) & M_RQ_MSI_VEC)
+
+#define S_RQ_INTR_EN 26
+#define V_RQ_INTR_EN(x) ((x) << S_RQ_INTR_EN)
+#define F_RQ_INTR_EN V_RQ_INTR_EN(1U)
+
+#define S_RQ_GEN 28
+#define V_RQ_GEN(x) ((x) << S_RQ_GEN)
+#define F_RQ_GEN V_RQ_GEN(1U)
+
+#define S_CQ_INDEX 0
+#define M_CQ_INDEX 0xFFFF
+#define V_CQ_INDEX(x) ((x) << S_CQ_INDEX)
+#define G_CQ_INDEX(x) (((x) >> S_CQ_INDEX) & M_CQ_INDEX)
+
+#define S_CQ_SIZE 16
+#define M_CQ_SIZE 0xFFFF
+#define V_CQ_SIZE(x) ((x) << S_CQ_SIZE)
+#define G_CQ_SIZE(x) (((x) >> S_CQ_SIZE) & M_CQ_SIZE)
+
+#define S_CQ_BASE_HI 0
+#define M_CQ_BASE_HI 0xFFFFF
+#define V_CQ_BASE_HI(x) ((x) << S_CQ_BASE_HI)
+#define G_CQ_BASE_HI(x) (((x) >> S_CQ_BASE_HI) & M_CQ_BASE_HI)
+
+#define S_CQ_RSPQ 20
+#define M_CQ_RSPQ 0x3F
+#define V_CQ_RSPQ(x) ((x) << S_CQ_RSPQ)
+#define G_CQ_RSPQ(x) (((x) >> S_CQ_RSPQ) & M_CQ_RSPQ)
+
+#define S_CQ_ASYNC_NOTIF 26
+#define V_CQ_ASYNC_NOTIF(x) ((x) << S_CQ_ASYNC_NOTIF)
+#define F_CQ_ASYNC_NOTIF V_CQ_ASYNC_NOTIF(1U)
+
+#define S_CQ_ARMED 27
+#define V_CQ_ARMED(x) ((x) << S_CQ_ARMED)
+#define F_CQ_ARMED V_CQ_ARMED(1U)
+
+#define S_CQ_ASYNC_NOTIF_SOL 28
+#define V_CQ_ASYNC_NOTIF_SOL(x) ((x) << S_CQ_ASYNC_NOTIF_SOL)
+#define F_CQ_ASYNC_NOTIF_SOL V_CQ_ASYNC_NOTIF_SOL(1U)
+
+#define S_CQ_GEN 29
+#define V_CQ_GEN(x) ((x) << S_CQ_GEN)
+#define F_CQ_GEN V_CQ_GEN(1U)
+
+#define S_CQ_OVERFLOW_MODE 31
+#define V_CQ_OVERFLOW_MODE(x) ((x) << S_CQ_OVERFLOW_MODE)
+#define F_CQ_OVERFLOW_MODE V_CQ_OVERFLOW_MODE(1U)
+
+#define S_CQ_CREDITS 0
+#define M_CQ_CREDITS 0xFFFF
+#define V_CQ_CREDITS(x) ((x) << S_CQ_CREDITS)
+#define G_CQ_CREDITS(x) (((x) >> S_CQ_CREDITS) & M_CQ_CREDITS)
+
+#define S_CQ_CREDIT_THRES 16
+#define M_CQ_CREDIT_THRES 0x1FFF
+#define V_CQ_CREDIT_THRES(x) ((x) << S_CQ_CREDIT_THRES)
+#define G_CQ_CREDIT_THRES(x) (((x) >> S_CQ_CREDIT_THRES) & M_CQ_CREDIT_THRES)
+
+#define S_FL_BASE_HI 0
+#define M_FL_BASE_HI 0xFFFFF
+#define V_FL_BASE_HI(x) ((x) << S_FL_BASE_HI)
+#define G_FL_BASE_HI(x) (((x) >> S_FL_BASE_HI) & M_FL_BASE_HI)
+
+#define S_FL_INDEX_LO 20
+#define M_FL_INDEX_LO 0xFFF
+#define V_FL_INDEX_LO(x) ((x) << S_FL_INDEX_LO)
+#define G_FL_INDEX_LO(x) (((x) >> S_FL_INDEX_LO) & M_FL_INDEX_LO)
+
+#define S_FL_INDEX_HI 0
+#define M_FL_INDEX_HI 0xF
+#define V_FL_INDEX_HI(x) ((x) << S_FL_INDEX_HI)
+#define G_FL_INDEX_HI(x) (((x) >> S_FL_INDEX_HI) & M_FL_INDEX_HI)
+
+#define S_FL_SIZE 4
+#define M_FL_SIZE 0xFFFF
+#define V_FL_SIZE(x) ((x) << S_FL_SIZE)
+#define G_FL_SIZE(x) (((x) >> S_FL_SIZE) & M_FL_SIZE)
+
+#define S_FL_GEN 20
+#define V_FL_GEN(x) ((x) << S_FL_GEN)
+#define F_FL_GEN V_FL_GEN(1U)
+
+#define S_FL_ENTRY_SIZE_LO 21
+#define M_FL_ENTRY_SIZE_LO 0x7FF
+#define V_FL_ENTRY_SIZE_LO(x) ((x) << S_FL_ENTRY_SIZE_LO)
+#define G_FL_ENTRY_SIZE_LO(x) (((x) >> S_FL_ENTRY_SIZE_LO) & M_FL_ENTRY_SIZE_LO)
+
+#define S_FL_ENTRY_SIZE_HI 0
+#define M_FL_ENTRY_SIZE_HI 0x1FFFFF
+#define V_FL_ENTRY_SIZE_HI(x) ((x) << S_FL_ENTRY_SIZE_HI)
+#define G_FL_ENTRY_SIZE_HI(x) (((x) >> S_FL_ENTRY_SIZE_HI) & M_FL_ENTRY_SIZE_HI)
+
+#define S_FL_CONG_THRES 21
+#define M_FL_CONG_THRES 0x3FF
+#define V_FL_CONG_THRES(x) ((x) << S_FL_CONG_THRES)
+#define G_FL_CONG_THRES(x) (((x) >> S_FL_CONG_THRES) & M_FL_CONG_THRES)
+
+#define S_FL_GTS 31
+#define V_FL_GTS(x) ((x) << S_FL_GTS)
+#define F_FL_GTS V_FL_GTS(1U)
+
+#define S_FLD_GEN1 31
+#define V_FLD_GEN1(x) ((x) << S_FLD_GEN1)
+#define F_FLD_GEN1 V_FLD_GEN1(1U)
+
+#define S_FLD_GEN2 0
+#define V_FLD_GEN2(x) ((x) << S_FLD_GEN2)
+#define F_FLD_GEN2 V_FLD_GEN2(1U)
+
+#define S_RSPD_TXQ1_CR 0
+#define M_RSPD_TXQ1_CR 0x7F
+#define V_RSPD_TXQ1_CR(x) ((x) << S_RSPD_TXQ1_CR)
+#define G_RSPD_TXQ1_CR(x) (((x) >> S_RSPD_TXQ1_CR) & M_RSPD_TXQ1_CR)
+
+#define S_RSPD_TXQ1_GTS 7
+#define V_RSPD_TXQ1_GTS(x) ((x) << S_RSPD_TXQ1_GTS)
+#define F_RSPD_TXQ1_GTS V_RSPD_TXQ1_GTS(1U)
+
+#define S_RSPD_TXQ2_CR 8
+#define M_RSPD_TXQ2_CR 0x7F
+#define V_RSPD_TXQ2_CR(x) ((x) << S_RSPD_TXQ2_CR)
+#define G_RSPD_TXQ2_CR(x) (((x) >> S_RSPD_TXQ2_CR) & M_RSPD_TXQ2_CR)
+
+#define S_RSPD_TXQ2_GTS 15
+#define V_RSPD_TXQ2_GTS(x) ((x) << S_RSPD_TXQ2_GTS)
+#define F_RSPD_TXQ2_GTS V_RSPD_TXQ2_GTS(1U)
+
+#define S_RSPD_TXQ0_CR 16
+#define M_RSPD_TXQ0_CR 0x7F
+#define V_RSPD_TXQ0_CR(x) ((x) << S_RSPD_TXQ0_CR)
+#define G_RSPD_TXQ0_CR(x) (((x) >> S_RSPD_TXQ0_CR) & M_RSPD_TXQ0_CR)
+
+#define S_RSPD_TXQ0_GTS 23
+#define V_RSPD_TXQ0_GTS(x) ((x) << S_RSPD_TXQ0_GTS)
+#define F_RSPD_TXQ0_GTS V_RSPD_TXQ0_GTS(1U)
+
+#define S_RSPD_EOP 24
+#define V_RSPD_EOP(x) ((x) << S_RSPD_EOP)
+#define F_RSPD_EOP V_RSPD_EOP(1U)
+#define G_RSPD_EOP(x) ((x) & F_RSPD_EOP)
+
+#define S_RSPD_SOP 25
+#define V_RSPD_SOP(x) ((x) << S_RSPD_SOP)
+#define F_RSPD_SOP V_RSPD_SOP(1U)
+#define G_RSPD_SOP(x) ((x) & F_RSPD_SOP)
+
+#define G_RSPD_SOP_EOP(x) ((G_RSPD_SOP(x) | G_RSPD_EOP(x)) >> S_RSPD_EOP)
+
+#define S_RSPD_ASYNC_NOTIF 26
+#define V_RSPD_ASYNC_NOTIF(x) ((x) << S_RSPD_ASYNC_NOTIF)
+#define F_RSPD_ASYNC_NOTIF V_RSPD_ASYNC_NOTIF(1U)
+
+#define S_RSPD_FL0_GTS 27
+#define V_RSPD_FL0_GTS(x) ((x) << S_RSPD_FL0_GTS)
+#define F_RSPD_FL0_GTS V_RSPD_FL0_GTS(1U)
+
+#define S_RSPD_FL1_GTS 28
+#define V_RSPD_FL1_GTS(x) ((x) << S_RSPD_FL1_GTS)
+#define F_RSPD_FL1_GTS V_RSPD_FL1_GTS(1U)
+
+#define S_RSPD_IMM_DATA_VALID 29
+#define V_RSPD_IMM_DATA_VALID(x) ((x) << S_RSPD_IMM_DATA_VALID)
+#define F_RSPD_IMM_DATA_VALID V_RSPD_IMM_DATA_VALID(1U)
+
+#define S_RSPD_OFFLOAD 30
+#define V_RSPD_OFFLOAD(x) ((x) << S_RSPD_OFFLOAD)
+#define F_RSPD_OFFLOAD V_RSPD_OFFLOAD(1U)
+
+#define S_RSPD_GEN1 31
+#define V_RSPD_GEN1(x) ((x) << S_RSPD_GEN1)
+#define F_RSPD_GEN1 V_RSPD_GEN1(1U)
+
+#define S_RSPD_LEN 0
+#define M_RSPD_LEN 0x7FFFFFFF
+#define V_RSPD_LEN(x) ((x) << S_RSPD_LEN)
+#define G_RSPD_LEN(x) (((x) >> S_RSPD_LEN) & M_RSPD_LEN)
+
+#define S_RSPD_FLQ 31
+#define V_RSPD_FLQ(x) ((x) << S_RSPD_FLQ)
+#define F_RSPD_FLQ V_RSPD_FLQ(1U)
+
+#define S_RSPD_GEN2 0
+#define V_RSPD_GEN2(x) ((x) << S_RSPD_GEN2)
+#define F_RSPD_GEN2 V_RSPD_GEN2(1U)
+
+#define S_RSPD_INR_VEC 1
+#define M_RSPD_INR_VEC 0x7F
+#define V_RSPD_INR_VEC(x) ((x) << S_RSPD_INR_VEC)
+#define G_RSPD_INR_VEC(x) (((x) >> S_RSPD_INR_VEC) & M_RSPD_INR_VEC)
+
+#endif /* _SGE_DEFS_H */
diff --git a/sys/dev/cxgb/common/cxgb_t3_cpl.h b/sys/dev/cxgb/common/cxgb_t3_cpl.h
new file mode 100644
index 0000000..26c4b05
--- /dev/null
+++ b/sys/dev/cxgb/common/cxgb_t3_cpl.h
@@ -0,0 +1,1490 @@
+/**************************************************************************
+
+Copyright (c) 2007, Chelsio Inc.
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Chelsio Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+$FreeBSD$
+
+***************************************************************************/
+#ifndef T3_CPL_H
+#define T3_CPL_H
+
+#if !defined(__LITTLE_ENDIAN_BITFIELD) && !defined(__BIG_ENDIAN_BITFIELD)
+# include <asm/byteorder.h>
+#endif
+
+enum CPL_opcode {
+ CPL_PASS_OPEN_REQ = 0x1,
+ CPL_PASS_ACCEPT_RPL = 0x2,
+ CPL_ACT_OPEN_REQ = 0x3,
+ CPL_SET_TCB = 0x4,
+ CPL_SET_TCB_FIELD = 0x5,
+ CPL_GET_TCB = 0x6,
+ CPL_PCMD = 0x7,
+ CPL_CLOSE_CON_REQ = 0x8,
+ CPL_CLOSE_LISTSRV_REQ = 0x9,
+ CPL_ABORT_REQ = 0xA,
+ CPL_ABORT_RPL = 0xB,
+ CPL_TX_DATA = 0xC,
+ CPL_RX_DATA_ACK = 0xD,
+ CPL_TX_PKT = 0xE,
+ CPL_RTE_DELETE_REQ = 0xF,
+ CPL_RTE_WRITE_REQ = 0x10,
+ CPL_RTE_READ_REQ = 0x11,
+ CPL_L2T_WRITE_REQ = 0x12,
+ CPL_L2T_READ_REQ = 0x13,
+ CPL_SMT_WRITE_REQ = 0x14,
+ CPL_SMT_READ_REQ = 0x15,
+ CPL_TX_PKT_LSO = 0x16,
+ CPL_PCMD_READ = 0x17,
+ CPL_BARRIER = 0x18,
+ CPL_TID_RELEASE = 0x1A,
+
+ CPL_CLOSE_LISTSRV_RPL = 0x20,
+ CPL_ERROR = 0x21,
+ CPL_GET_TCB_RPL = 0x22,
+ CPL_L2T_WRITE_RPL = 0x23,
+ CPL_PCMD_READ_RPL = 0x24,
+ CPL_PCMD_RPL = 0x25,
+ CPL_PEER_CLOSE = 0x26,
+ CPL_RTE_DELETE_RPL = 0x27,
+ CPL_RTE_WRITE_RPL = 0x28,
+ CPL_RX_DDP_COMPLETE = 0x29,
+ CPL_RX_PHYS_ADDR = 0x2A,
+ CPL_RX_PKT = 0x2B,
+ CPL_RX_URG_NOTIFY = 0x2C,
+ CPL_SET_TCB_RPL = 0x2D,
+ CPL_SMT_WRITE_RPL = 0x2E,
+ CPL_TX_DATA_ACK = 0x2F,
+
+ CPL_ABORT_REQ_RSS = 0x30,
+ CPL_ABORT_RPL_RSS = 0x31,
+ CPL_CLOSE_CON_RPL = 0x32,
+ CPL_ISCSI_HDR = 0x33,
+ CPL_L2T_READ_RPL = 0x34,
+ CPL_RDMA_CQE = 0x35,
+ CPL_RDMA_CQE_READ_RSP = 0x36,
+ CPL_RDMA_CQE_ERR = 0x37,
+ CPL_RTE_READ_RPL = 0x38,
+ CPL_RX_DATA = 0x39,
+
+ CPL_ACT_OPEN_RPL = 0x40,
+ CPL_PASS_OPEN_RPL = 0x41,
+ CPL_RX_DATA_DDP = 0x42,
+ CPL_SMT_READ_RPL = 0x43,
+
+ CPL_ACT_ESTABLISH = 0x50,
+ CPL_PASS_ESTABLISH = 0x51,
+
+ CPL_PASS_ACCEPT_REQ = 0x70,
+
+ CPL_ASYNC_NOTIF = 0x80, /* fake opcode for async notifications */
+
+ CPL_TX_DMA_ACK = 0xA0,
+ CPL_RDMA_READ_REQ = 0xA1,
+ CPL_RDMA_TERMINATE = 0xA2,
+ CPL_TRACE_PKT = 0xA3,
+ CPL_RDMA_EC_STATUS = 0xA5,
+
+ NUM_CPL_CMDS /* must be last and previous entries must be sorted */
+};
+
+enum CPL_error {
+ CPL_ERR_NONE = 0,
+ CPL_ERR_TCAM_PARITY = 1,
+ CPL_ERR_TCAM_FULL = 3,
+ CPL_ERR_CONN_RESET = 20,
+ CPL_ERR_CONN_EXIST = 22,
+ CPL_ERR_ARP_MISS = 23,
+ CPL_ERR_BAD_SYN = 24,
+ CPL_ERR_CONN_TIMEDOUT = 30,
+ CPL_ERR_XMIT_TIMEDOUT = 31,
+ CPL_ERR_PERSIST_TIMEDOUT = 32,
+ CPL_ERR_FINWAIT2_TIMEDOUT = 33,
+ CPL_ERR_KEEPALIVE_TIMEDOUT = 34,
+ CPL_ERR_RTX_NEG_ADVICE = 35,
+ CPL_ERR_PERSIST_NEG_ADVICE = 36,
+ CPL_ERR_ABORT_FAILED = 42,
+ CPL_ERR_GENERAL = 99
+};
+
+enum {
+ CPL_CONN_POLICY_AUTO = 0,
+ CPL_CONN_POLICY_ASK = 1,
+ CPL_CONN_POLICY_DENY = 3
+};
+
+enum {
+ ULP_MODE_NONE = 0,
+ ULP_MODE_TCP_DDP = 1,
+ ULP_MODE_ISCSI = 2,
+ ULP_MODE_RDMA = 4,
+ ULP_MODE_TCPDDP = 5
+};
+
+enum {
+ ULP_CRC_HEADER = 1 << 0,
+ ULP_CRC_DATA = 1 << 1
+};
+
+enum {
+ CPL_PASS_OPEN_ACCEPT,
+ CPL_PASS_OPEN_REJECT
+};
+
+enum {
+ CPL_ABORT_SEND_RST = 0,
+ CPL_ABORT_NO_RST,
+ CPL_ABORT_POST_CLOSE_REQ = 2
+};
+
+enum { /* TX_PKT_LSO ethernet types */
+ CPL_ETH_II,
+ CPL_ETH_II_VLAN,
+ CPL_ETH_802_3,
+ CPL_ETH_802_3_VLAN
+};
+
+enum { /* TCP congestion control algorithms */
+ CONG_ALG_RENO,
+ CONG_ALG_TAHOE,
+ CONG_ALG_NEWRENO,
+ CONG_ALG_HIGHSPEED
+};
+
+enum { /* RSS hash type */
+ RSS_HASH_NONE = 0,
+ RSS_HASH_2_TUPLE = 1 << 0,
+ RSS_HASH_4_TUPLE = 1 << 1
+};
+
+union opcode_tid {
+ __be32 opcode_tid;
+ __u8 opcode;
+};
+
+#define S_OPCODE 24
+#define V_OPCODE(x) ((x) << S_OPCODE)
+#define G_OPCODE(x) (((x) >> S_OPCODE) & 0xFF)
+#define G_TID(x) ((x) & 0xFFFFFF)
+
+#define S_HASHTYPE 22
+#define M_HASHTYPE 0x3
+#define G_HASHTYPE(x) (((x) >> S_HASHTYPE) & M_HASHTYPE)
+
+#define S_QNUM 0
+#define G_QNUM(x) (((x) >> S_QNUM) & 0xFFFF)
+
+/* tid is assumed to be 24-bits */
+#define MK_OPCODE_TID(opcode, tid) (V_OPCODE(opcode) | (tid))
+
+#define OPCODE_TID(cmd) ((cmd)->ot.opcode_tid)
+
+/* extract the TID from a CPL command */
+#define GET_TID(cmd) (G_TID(ntohl(OPCODE_TID(cmd))))
+
+struct tcp_options {
+ __be16 mss;
+ __u8 wsf;
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ __u8 :5;
+ __u8 ecn:1;
+ __u8 sack:1;
+ __u8 tstamp:1;
+#else
+ __u8 tstamp:1;
+ __u8 sack:1;
+ __u8 ecn:1;
+ __u8 :5;
+#endif
+};
+
+struct rss_header {
+ __u8 opcode;
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ __u8 cpu_idx:6;
+ __u8 hash_type:2;
+#else
+ __u8 hash_type:2;
+ __u8 cpu_idx:6;
+#endif
+ __be16 cq_idx;
+ __be32 rss_hash_val;
+};
+
+#ifndef CHELSIO_FW
+struct work_request_hdr {
+ __be32 wr_hi;
+ __be32 wr_lo;
+};
+
+/* wr_hi fields */
+#define S_WR_SGE_CREDITS 0
+#define M_WR_SGE_CREDITS 0xFF
+#define V_WR_SGE_CREDITS(x) ((x) << S_WR_SGE_CREDITS)
+#define G_WR_SGE_CREDITS(x) (((x) >> S_WR_SGE_CREDITS) & M_WR_SGE_CREDITS)
+
+#define S_WR_SGLSFLT 8
+#define M_WR_SGLSFLT 0xFF
+#define V_WR_SGLSFLT(x) ((x) << S_WR_SGLSFLT)
+#define G_WR_SGLSFLT(x) (((x) >> S_WR_SGLSFLT) & M_WR_SGLSFLT)
+
+#define S_WR_BCNTLFLT 16
+#define M_WR_BCNTLFLT 0xF
+#define V_WR_BCNTLFLT(x) ((x) << S_WR_BCNTLFLT)
+#define G_WR_BCNTLFLT(x) (((x) >> S_WR_BCNTLFLT) & M_WR_BCNTLFLT)
+
+#define S_WR_DATATYPE 20
+#define V_WR_DATATYPE(x) ((x) << S_WR_DATATYPE)
+#define F_WR_DATATYPE V_WR_DATATYPE(1U)
+
+#define S_WR_COMPL 21
+#define V_WR_COMPL(x) ((x) << S_WR_COMPL)
+#define F_WR_COMPL V_WR_COMPL(1U)
+
+#define S_WR_EOP 22
+#define V_WR_EOP(x) ((x) << S_WR_EOP)
+#define F_WR_EOP V_WR_EOP(1U)
+
+#define S_WR_SOP 23
+#define V_WR_SOP(x) ((x) << S_WR_SOP)
+#define F_WR_SOP V_WR_SOP(1U)
+
+#define S_WR_OP 24
+#define M_WR_OP 0xFF
+#define V_WR_OP(x) ((x) << S_WR_OP)
+#define G_WR_OP(x) (((x) >> S_WR_OP) & M_WR_OP)
+
+/* wr_lo fields */
+#define S_WR_LEN 0
+#define M_WR_LEN 0xFF
+#define V_WR_LEN(x) ((x) << S_WR_LEN)
+#define G_WR_LEN(x) (((x) >> S_WR_LEN) & M_WR_LEN)
+
+#define S_WR_TID 8
+#define M_WR_TID 0xFFFFF
+#define V_WR_TID(x) ((x) << S_WR_TID)
+#define G_WR_TID(x) (((x) >> S_WR_TID) & M_WR_TID)
+
+#define S_WR_CR_FLUSH 30
+#define V_WR_CR_FLUSH(x) ((x) << S_WR_CR_FLUSH)
+#define F_WR_CR_FLUSH V_WR_CR_FLUSH(1U)
+
+#define S_WR_GEN 31
+#define V_WR_GEN(x) ((x) << S_WR_GEN)
+#define F_WR_GEN V_WR_GEN(1U)
+
+# define WR_HDR struct work_request_hdr wr
+# define RSS_HDR
+#else
+# define WR_HDR
+# define RSS_HDR struct rss_header rss_hdr;
+#endif
+
+/* option 0 lower-half fields */
+#define S_CPL_STATUS 0
+#define M_CPL_STATUS 0xFF
+#define V_CPL_STATUS(x) ((x) << S_CPL_STATUS)
+#define G_CPL_STATUS(x) (((x) >> S_CPL_STATUS) & M_CPL_STATUS)
+
+#define S_INJECT_TIMER 6
+#define V_INJECT_TIMER(x) ((x) << S_INJECT_TIMER)
+#define F_INJECT_TIMER V_INJECT_TIMER(1U)
+
+#define S_NO_OFFLOAD 7
+#define V_NO_OFFLOAD(x) ((x) << S_NO_OFFLOAD)
+#define F_NO_OFFLOAD V_NO_OFFLOAD(1U)
+
+#define S_ULP_MODE 8
+#define M_ULP_MODE 0xF
+#define V_ULP_MODE(x) ((x) << S_ULP_MODE)
+#define G_ULP_MODE(x) (((x) >> S_ULP_MODE) & M_ULP_MODE)
+
+#define S_RCV_BUFSIZ 12
+#define M_RCV_BUFSIZ 0x3FFF
+#define V_RCV_BUFSIZ(x) ((x) << S_RCV_BUFSIZ)
+#define G_RCV_BUFSIZ(x) (((x) >> S_RCV_BUFSIZ) & M_RCV_BUFSIZ)
+
+#define S_TOS 26
+#define M_TOS 0x3F
+#define V_TOS(x) ((x) << S_TOS)
+#define G_TOS(x) (((x) >> S_TOS) & M_TOS)
+
+/* option 0 upper-half fields */
+#define S_DELACK 0
+#define V_DELACK(x) ((x) << S_DELACK)
+#define F_DELACK V_DELACK(1U)
+
+#define S_NO_CONG 1
+#define V_NO_CONG(x) ((x) << S_NO_CONG)
+#define F_NO_CONG V_NO_CONG(1U)
+
+#define S_SRC_MAC_SEL 2
+#define M_SRC_MAC_SEL 0x3
+#define V_SRC_MAC_SEL(x) ((x) << S_SRC_MAC_SEL)
+#define G_SRC_MAC_SEL(x) (((x) >> S_SRC_MAC_SEL) & M_SRC_MAC_SEL)
+
+#define S_L2T_IDX 4
+#define M_L2T_IDX 0x7FF
+#define V_L2T_IDX(x) ((x) << S_L2T_IDX)
+#define G_L2T_IDX(x) (((x) >> S_L2T_IDX) & M_L2T_IDX)
+
+#define S_TX_CHANNEL 15
+#define V_TX_CHANNEL(x) ((x) << S_TX_CHANNEL)
+#define F_TX_CHANNEL V_TX_CHANNEL(1U)
+
+#define S_TCAM_BYPASS 16
+#define V_TCAM_BYPASS(x) ((x) << S_TCAM_BYPASS)
+#define F_TCAM_BYPASS V_TCAM_BYPASS(1U)
+
+#define S_NAGLE 17
+#define V_NAGLE(x) ((x) << S_NAGLE)
+#define F_NAGLE V_NAGLE(1U)
+
+#define S_WND_SCALE 18
+#define M_WND_SCALE 0xF
+#define V_WND_SCALE(x) ((x) << S_WND_SCALE)
+#define G_WND_SCALE(x) (((x) >> S_WND_SCALE) & M_WND_SCALE)
+
+#define S_KEEP_ALIVE 22
+#define V_KEEP_ALIVE(x) ((x) << S_KEEP_ALIVE)
+#define F_KEEP_ALIVE V_KEEP_ALIVE(1U)
+
+#define S_MAX_RETRANS 23
+#define M_MAX_RETRANS 0xF
+#define V_MAX_RETRANS(x) ((x) << S_MAX_RETRANS)
+#define G_MAX_RETRANS(x) (((x) >> S_MAX_RETRANS) & M_MAX_RETRANS)
+
+#define S_MAX_RETRANS_OVERRIDE 27
+#define V_MAX_RETRANS_OVERRIDE(x) ((x) << S_MAX_RETRANS_OVERRIDE)
+#define F_MAX_RETRANS_OVERRIDE V_MAX_RETRANS_OVERRIDE(1U)
+
+#define S_MSS_IDX 28
+#define M_MSS_IDX 0xF
+#define V_MSS_IDX(x) ((x) << S_MSS_IDX)
+#define G_MSS_IDX(x) (((x) >> S_MSS_IDX) & M_MSS_IDX)
+
+/* option 1 fields */
+#define S_RSS_ENABLE 0
+#define V_RSS_ENABLE(x) ((x) << S_RSS_ENABLE)
+#define F_RSS_ENABLE V_RSS_ENABLE(1U)
+
+#define S_RSS_MASK_LEN 1
+#define M_RSS_MASK_LEN 0x7
+#define V_RSS_MASK_LEN(x) ((x) << S_RSS_MASK_LEN)
+#define G_RSS_MASK_LEN(x) (((x) >> S_RSS_MASK_LEN) & M_RSS_MASK_LEN)
+
+#define S_CPU_IDX 4
+#define M_CPU_IDX 0x3F
+#define V_CPU_IDX(x) ((x) << S_CPU_IDX)
+#define G_CPU_IDX(x) (((x) >> S_CPU_IDX) & M_CPU_IDX)
+
+#define S_MAC_MATCH_VALID 18
+#define V_MAC_MATCH_VALID(x) ((x) << S_MAC_MATCH_VALID)
+#define F_MAC_MATCH_VALID V_MAC_MATCH_VALID(1U)
+
+#define S_CONN_POLICY 19
+#define M_CONN_POLICY 0x3
+#define V_CONN_POLICY(x) ((x) << S_CONN_POLICY)
+#define G_CONN_POLICY(x) (((x) >> S_CONN_POLICY) & M_CONN_POLICY)
+
+#define S_SYN_DEFENSE 21
+#define V_SYN_DEFENSE(x) ((x) << S_SYN_DEFENSE)
+#define F_SYN_DEFENSE V_SYN_DEFENSE(1U)
+
+#define S_VLAN_PRI 22
+#define M_VLAN_PRI 0x3
+#define V_VLAN_PRI(x) ((x) << S_VLAN_PRI)
+#define G_VLAN_PRI(x) (((x) >> S_VLAN_PRI) & M_VLAN_PRI)
+
+#define S_VLAN_PRI_VALID 24
+#define V_VLAN_PRI_VALID(x) ((x) << S_VLAN_PRI_VALID)
+#define F_VLAN_PRI_VALID V_VLAN_PRI_VALID(1U)
+
+#define S_PKT_TYPE 25
+#define M_PKT_TYPE 0x3
+#define V_PKT_TYPE(x) ((x) << S_PKT_TYPE)
+#define G_PKT_TYPE(x) (((x) >> S_PKT_TYPE) & M_PKT_TYPE)
+
+#define S_MAC_MATCH 27
+#define M_MAC_MATCH 0x1F
+#define V_MAC_MATCH(x) ((x) << S_MAC_MATCH)
+#define G_MAC_MATCH(x) (((x) >> S_MAC_MATCH) & M_MAC_MATCH)
+
+/* option 2 fields */
+#define S_CPU_INDEX 0
+#define M_CPU_INDEX 0x7F
+#define V_CPU_INDEX(x) ((x) << S_CPU_INDEX)
+#define G_CPU_INDEX(x) (((x) >> S_CPU_INDEX) & M_CPU_INDEX)
+
+#define S_CPU_INDEX_VALID 7
+#define V_CPU_INDEX_VALID(x) ((x) << S_CPU_INDEX_VALID)
+#define F_CPU_INDEX_VALID V_CPU_INDEX_VALID(1U)
+
+#define S_RX_COALESCE 8
+#define M_RX_COALESCE 0x3
+#define V_RX_COALESCE(x) ((x) << S_RX_COALESCE)
+#define G_RX_COALESCE(x) (((x) >> S_RX_COALESCE) & M_RX_COALESCE)
+
+#define S_RX_COALESCE_VALID 10
+#define V_RX_COALESCE_VALID(x) ((x) << S_RX_COALESCE_VALID)
+#define F_RX_COALESCE_VALID V_RX_COALESCE_VALID(1U)
+
+#define S_CONG_CONTROL_FLAVOR 11
+#define M_CONG_CONTROL_FLAVOR 0x3
+#define V_CONG_CONTROL_FLAVOR(x) ((x) << S_CONG_CONTROL_FLAVOR)
+#define G_CONG_CONTROL_FLAVOR(x) (((x) >> S_CONG_CONTROL_FLAVOR) & M_CONG_CONTROL_FLAVOR)
+
+#define S_PACING_FLAVOR 13
+#define M_PACING_FLAVOR 0x3
+#define V_PACING_FLAVOR(x) ((x) << S_PACING_FLAVOR)
+#define G_PACING_FLAVOR(x) (((x) >> S_PACING_FLAVOR) & M_PACING_FLAVOR)
+
+#define S_FLAVORS_VALID 15
+#define V_FLAVORS_VALID(x) ((x) << S_FLAVORS_VALID)
+#define F_FLAVORS_VALID V_FLAVORS_VALID(1U)
+
+#define S_RX_FC_DISABLE 16
+#define V_RX_FC_DISABLE(x) ((x) << S_RX_FC_DISABLE)
+#define F_RX_FC_DISABLE V_RX_FC_DISABLE(1U)
+
+#define S_RX_FC_VALID 17
+#define V_RX_FC_VALID(x) ((x) << S_RX_FC_VALID)
+#define F_RX_FC_VALID V_RX_FC_VALID(1U)
+
+struct cpl_pass_open_req {
+ WR_HDR;
+ union opcode_tid ot;
+ __be16 local_port;
+ __be16 peer_port;
+ __be32 local_ip;
+ __be32 peer_ip;
+ __be32 opt0h;
+ __be32 opt0l;
+ __be32 peer_netmask;
+ __be32 opt1;
+};
+
+struct cpl_pass_open_rpl {
+ RSS_HDR
+ union opcode_tid ot;
+ __be16 local_port;
+ __be16 peer_port;
+ __be32 local_ip;
+ __be32 peer_ip;
+ __u8 resvd[7];
+ __u8 status;
+};
+
+struct cpl_pass_establish {
+ RSS_HDR
+ union opcode_tid ot;
+ __be16 local_port;
+ __be16 peer_port;
+ __be32 local_ip;
+ __be32 peer_ip;
+ __be32 tos_tid;
+ __be16 l2t_idx;
+ __be16 tcp_opt;
+ __be32 snd_isn;
+ __be32 rcv_isn;
+};
+
+/* cpl_pass_establish.tos_tid fields */
+#define S_PASS_OPEN_TID 0
+#define M_PASS_OPEN_TID 0xFFFFFF
+#define V_PASS_OPEN_TID(x) ((x) << S_PASS_OPEN_TID)
+#define G_PASS_OPEN_TID(x) (((x) >> S_PASS_OPEN_TID) & M_PASS_OPEN_TID)
+
+#define S_PASS_OPEN_TOS 24
+#define M_PASS_OPEN_TOS 0xFF
+#define V_PASS_OPEN_TOS(x) ((x) << S_PASS_OPEN_TOS)
+#define G_PASS_OPEN_TOS(x) (((x) >> S_PASS_OPEN_TOS) & M_PASS_OPEN_TOS)
+
+/* cpl_pass_establish.l2t_idx fields */
+#define S_L2T_IDX16 5
+#define M_L2T_IDX16 0x7FF
+#define V_L2T_IDX16(x) ((x) << S_L2T_IDX16)
+#define G_L2T_IDX16(x) (((x) >> S_L2T_IDX16) & M_L2T_IDX16)
+
+/* cpl_pass_establish.tcp_opt fields (also applies act_open_establish) */
+#define G_TCPOPT_WSCALE_OK(x) (((x) >> 5) & 1)
+#define G_TCPOPT_SACK(x) (((x) >> 6) & 1)
+#define G_TCPOPT_TSTAMP(x) (((x) >> 7) & 1)
+#define G_TCPOPT_SND_WSCALE(x) (((x) >> 8) & 0xf)
+#define G_TCPOPT_MSS(x) (((x) >> 12) & 0xf)
+
+struct cpl_pass_accept_req {
+ RSS_HDR
+ union opcode_tid ot;
+ __be16 local_port;
+ __be16 peer_port;
+ __be32 local_ip;
+ __be32 peer_ip;
+ __be32 tos_tid;
+ struct tcp_options tcp_options;
+ __u8 dst_mac[6];
+ __be16 vlan_tag;
+ __u8 src_mac[6];
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ __u8 :3;
+ __u8 addr_idx:3;
+ __u8 port_idx:1;
+ __u8 exact_match:1;
+#else
+ __u8 exact_match:1;
+ __u8 port_idx:1;
+ __u8 addr_idx:3;
+ __u8 :3;
+#endif
+ __u8 rsvd;
+ __be32 rcv_isn;
+ __be32 rsvd2;
+};
+
+struct cpl_pass_accept_rpl {
+ WR_HDR;
+ union opcode_tid ot;
+ __be32 opt2;
+ __be32 rsvd;
+ __be32 peer_ip;
+ __be32 opt0h;
+ __be32 opt0l_status;
+};
+
+struct cpl_act_open_req {
+ WR_HDR;
+ union opcode_tid ot;
+ __be16 local_port;
+ __be16 peer_port;
+ __be32 local_ip;
+ __be32 peer_ip;
+ __be32 opt0h;
+ __be32 opt0l;
+ __be32 params;
+ __be32 opt2;
+};
+
+/* cpl_act_open_req.params fields */
+#define S_AOPEN_VLAN_PRI 9
+#define M_AOPEN_VLAN_PRI 0x3
+#define V_AOPEN_VLAN_PRI(x) ((x) << S_AOPEN_VLAN_PRI)
+#define G_AOPEN_VLAN_PRI(x) (((x) >> S_AOPEN_VLAN_PRI) & M_AOPEN_VLAN_PRI)
+
+#define S_AOPEN_VLAN_PRI_VALID 11
+#define V_AOPEN_VLAN_PRI_VALID(x) ((x) << S_AOPEN_VLAN_PRI_VALID)
+#define F_AOPEN_VLAN_PRI_VALID V_AOPEN_VLAN_PRI_VALID(1U)
+
+#define S_AOPEN_PKT_TYPE 12
+#define M_AOPEN_PKT_TYPE 0x3
+#define V_AOPEN_PKT_TYPE(x) ((x) << S_AOPEN_PKT_TYPE)
+#define G_AOPEN_PKT_TYPE(x) (((x) >> S_AOPEN_PKT_TYPE) & M_AOPEN_PKT_TYPE)
+
+#define S_AOPEN_MAC_MATCH 14
+#define M_AOPEN_MAC_MATCH 0x1F
+#define V_AOPEN_MAC_MATCH(x) ((x) << S_AOPEN_MAC_MATCH)
+#define G_AOPEN_MAC_MATCH(x) (((x) >> S_AOPEN_MAC_MATCH) & M_AOPEN_MAC_MATCH)
+
+#define S_AOPEN_MAC_MATCH_VALID 19
+#define V_AOPEN_MAC_MATCH_VALID(x) ((x) << S_AOPEN_MAC_MATCH_VALID)
+#define F_AOPEN_MAC_MATCH_VALID V_AOPEN_MAC_MATCH_VALID(1U)
+
+#define S_AOPEN_IFF_VLAN 20
+#define M_AOPEN_IFF_VLAN 0xFFF
+#define V_AOPEN_IFF_VLAN(x) ((x) << S_AOPEN_IFF_VLAN)
+#define G_AOPEN_IFF_VLAN(x) (((x) >> S_AOPEN_IFF_VLAN) & M_AOPEN_IFF_VLAN)
+
+struct cpl_act_open_rpl {
+ RSS_HDR
+ union opcode_tid ot;
+ __be16 local_port;
+ __be16 peer_port;
+ __be32 local_ip;
+ __be32 peer_ip;
+ __be32 atid;
+ __u8 rsvd[3];
+ __u8 status;
+};
+
+struct cpl_act_establish {
+ RSS_HDR
+ union opcode_tid ot;
+ __be16 local_port;
+ __be16 peer_port;
+ __be32 local_ip;
+ __be32 peer_ip;
+ __be32 tos_tid;
+ __be16 l2t_idx;
+ __be16 tcp_opt;
+ __be32 snd_isn;
+ __be32 rcv_isn;
+};
+
+struct cpl_get_tcb {
+ WR_HDR;
+ union opcode_tid ot;
+ __be16 cpuno;
+ __be16 rsvd;
+};
+
+struct cpl_get_tcb_rpl {
+ RSS_HDR
+ union opcode_tid ot;
+ __u8 rsvd;
+ __u8 status;
+ __be16 len;
+};
+
+struct cpl_set_tcb {
+ WR_HDR;
+ union opcode_tid ot;
+ __u8 reply;
+ __u8 cpu_idx;
+ __be16 len;
+};
+
+/* cpl_set_tcb.reply fields */
+#define S_NO_REPLY 7
+#define V_NO_REPLY(x) ((x) << S_NO_REPLY)
+#define F_NO_REPLY V_NO_REPLY(1U)
+
+struct cpl_set_tcb_field {
+ WR_HDR;
+ union opcode_tid ot;
+ __u8 reply;
+ __u8 cpu_idx;
+ __be16 word;
+ __be64 mask;
+ __be64 val;
+};
+
+struct cpl_set_tcb_rpl {
+ RSS_HDR
+ union opcode_tid ot;
+ __u8 rsvd[3];
+ __u8 status;
+};
+
+struct cpl_pcmd {
+ WR_HDR;
+ union opcode_tid ot;
+ __u8 rsvd[3];
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ __u8 src:1;
+ __u8 bundle:1;
+ __u8 channel:1;
+ __u8 :5;
+#else
+ __u8 :5;
+ __u8 channel:1;
+ __u8 bundle:1;
+ __u8 src:1;
+#endif
+ __be32 pcmd_parm[2];
+};
+
+struct cpl_pcmd_reply {
+ RSS_HDR
+ union opcode_tid ot;
+ __u8 status;
+ __u8 rsvd;
+ __be16 len;
+};
+
+struct cpl_close_con_req {
+ WR_HDR;
+ union opcode_tid ot;
+ __be32 rsvd;
+};
+
+struct cpl_close_con_rpl {
+ RSS_HDR
+ union opcode_tid ot;
+ __u8 rsvd[3];
+ __u8 status;
+ __be32 snd_nxt;
+ __be32 rcv_nxt;
+};
+
+struct cpl_close_listserv_req {
+ WR_HDR;
+ union opcode_tid ot;
+ __u8 rsvd0;
+ __u8 cpu_idx;
+ __be16 rsvd1;
+};
+
+struct cpl_close_listserv_rpl {
+ RSS_HDR
+ union opcode_tid ot;
+ __u8 rsvd[3];
+ __u8 status;
+};
+
+struct cpl_abort_req_rss {
+ RSS_HDR
+ union opcode_tid ot;
+ __be32 rsvd0;
+ __u8 rsvd1;
+ __u8 status;
+ __u8 rsvd2[6];
+};
+
+struct cpl_abort_req {
+ WR_HDR;
+ union opcode_tid ot;
+ __be32 rsvd0;
+ __u8 rsvd1;
+ __u8 cmd;
+ __u8 rsvd2[6];
+};
+
+struct cpl_abort_rpl_rss {
+ RSS_HDR
+ union opcode_tid ot;
+ __be32 rsvd0;
+ __u8 rsvd1;
+ __u8 status;
+ __u8 rsvd2[6];
+};
+
+struct cpl_abort_rpl {
+ WR_HDR;
+ union opcode_tid ot;
+ __be32 rsvd0;
+ __u8 rsvd1;
+ __u8 cmd;
+ __u8 rsvd2[6];
+};
+
+struct cpl_peer_close {
+ RSS_HDR
+ union opcode_tid ot;
+ __be32 rcv_nxt;
+};
+
+struct tx_data_wr {
+ __be32 wr_hi;
+ __be32 wr_lo;
+ __be32 len;
+ __be32 flags;
+ __be32 sndseq;
+ __be32 param;
+};
+
+/* tx_data_wr.param fields */
+#define S_TX_PORT 0
+#define M_TX_PORT 0x7
+#define V_TX_PORT(x) ((x) << S_TX_PORT)
+#define G_TX_PORT(x) (((x) >> S_TX_PORT) & M_TX_PORT)
+
+#define S_TX_MSS 4
+#define M_TX_MSS 0xF
+#define V_TX_MSS(x) ((x) << S_TX_MSS)
+#define G_TX_MSS(x) (((x) >> S_TX_MSS) & M_TX_MSS)
+
+#define S_TX_QOS 8
+#define M_TX_QOS 0xFF
+#define V_TX_QOS(x) ((x) << S_TX_QOS)
+#define G_TX_QOS(x) (((x) >> S_TX_QOS) & M_TX_QOS)
+
+#define S_TX_SNDBUF 16
+#define M_TX_SNDBUF 0xFFFF
+#define V_TX_SNDBUF(x) ((x) << S_TX_SNDBUF)
+#define G_TX_SNDBUF(x) (((x) >> S_TX_SNDBUF) & M_TX_SNDBUF)
+
+struct cpl_tx_data {
+ union opcode_tid ot;
+ __be32 len;
+ __be32 rsvd;
+ __be16 urg;
+ __be16 flags;
+};
+
+/* cpl_tx_data.flags fields */
+#define S_TX_ULP_SUBMODE 6
+#define M_TX_ULP_SUBMODE 0xF
+#define V_TX_ULP_SUBMODE(x) ((x) << S_TX_ULP_SUBMODE)
+#define G_TX_ULP_SUBMODE(x) (((x) >> S_TX_ULP_SUBMODE) & M_TX_ULP_SUBMODE)
+
+#define S_TX_ULP_MODE 10
+#define M_TX_ULP_MODE 0xF
+#define V_TX_ULP_MODE(x) ((x) << S_TX_ULP_MODE)
+#define G_TX_ULP_MODE(x) (((x) >> S_TX_ULP_MODE) & M_TX_ULP_MODE)
+
+#define S_TX_SHOVE 14
+#define V_TX_SHOVE(x) ((x) << S_TX_SHOVE)
+#define F_TX_SHOVE V_TX_SHOVE(1U)
+
+#define S_TX_MORE 15
+#define V_TX_MORE(x) ((x) << S_TX_MORE)
+#define F_TX_MORE V_TX_MORE(1U)
+
+/* additional tx_data_wr.flags fields */
+#define S_TX_CPU_IDX 0
+#define M_TX_CPU_IDX 0x3F
+#define V_TX_CPU_IDX(x) ((x) << S_TX_CPU_IDX)
+#define G_TX_CPU_IDX(x) (((x) >> S_TX_CPU_IDX) & M_TX_CPU_IDX)
+
+#define S_TX_URG 16
+#define V_TX_URG(x) ((x) << S_TX_URG)
+#define F_TX_URG V_TX_URG(1U)
+
+#define S_TX_CLOSE 17
+#define V_TX_CLOSE(x) ((x) << S_TX_CLOSE)
+#define F_TX_CLOSE V_TX_CLOSE(1U)
+
+#define S_TX_INIT 18
+#define V_TX_INIT(x) ((x) << S_TX_INIT)
+#define F_TX_INIT V_TX_INIT(1U)
+
+#define S_TX_IMM_ACK 19
+#define V_TX_IMM_ACK(x) ((x) << S_TX_IMM_ACK)
+#define F_TX_IMM_ACK V_TX_IMM_ACK(1U)
+
+#define S_TX_IMM_DMA 20
+#define V_TX_IMM_DMA(x) ((x) << S_TX_IMM_DMA)
+#define F_TX_IMM_DMA V_TX_IMM_DMA(1U)
+
+struct cpl_tx_data_ack {
+ RSS_HDR
+ union opcode_tid ot;
+ __be32 ack_seq;
+};
+
+struct cpl_wr_ack {
+ RSS_HDR
+ union opcode_tid ot;
+ __be16 credits;
+ __be16 rsvd;
+ __be32 snd_nxt;
+ __be32 snd_una;
+};
+
+struct cpl_rdma_ec_status {
+ RSS_HDR
+ union opcode_tid ot;
+ __u8 rsvd[3];
+ __u8 status;
+};
+
+struct mngt_pktsched_wr {
+ __be32 wr_hi;
+ __be32 wr_lo;
+ __u8 mngt_opcode;
+ __u8 rsvd[7];
+ __u8 sched;
+ __u8 idx;
+ __u8 min;
+ __u8 max;
+ __u8 binding;
+ __u8 rsvd1[3];
+};
+
+struct cpl_iscsi_hdr {
+ RSS_HDR
+ union opcode_tid ot;
+ __be16 pdu_len_ddp;
+ __be16 len;
+ __be32 seq;
+ __be16 urg;
+ __u8 rsvd;
+ __u8 status;
+};
+
+/* cpl_iscsi_hdr.pdu_len_ddp fields */
+#define S_ISCSI_PDU_LEN 0
+#define M_ISCSI_PDU_LEN 0x7FFF
+#define V_ISCSI_PDU_LEN(x) ((x) << S_ISCSI_PDU_LEN)
+#define G_ISCSI_PDU_LEN(x) (((x) >> S_ISCSI_PDU_LEN) & M_ISCSI_PDU_LEN)
+
+#define S_ISCSI_DDP 15
+#define V_ISCSI_DDP(x) ((x) << S_ISCSI_DDP)
+#define F_ISCSI_DDP V_ISCSI_DDP(1U)
+
+struct cpl_rx_data {
+ RSS_HDR
+ union opcode_tid ot;
+ __be16 rsvd;
+ __be16 len;
+ __be32 seq;
+ __be16 urg;
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ __u8 dack_mode:2;
+ __u8 psh:1;
+ __u8 heartbeat:1;
+ __u8 :4;
+#else
+ __u8 :4;
+ __u8 heartbeat:1;
+ __u8 psh:1;
+ __u8 dack_mode:2;
+#endif
+ __u8 status;
+};
+
+struct cpl_rx_data_ack {
+ WR_HDR;
+ union opcode_tid ot;
+ __be32 credit_dack;
+};
+
+/* cpl_rx_data_ack.ack_seq fields */
+#define S_RX_CREDITS 0
+#define M_RX_CREDITS 0x7FFFFFF
+#define V_RX_CREDITS(x) ((x) << S_RX_CREDITS)
+#define G_RX_CREDITS(x) (((x) >> S_RX_CREDITS) & M_RX_CREDITS)
+
+#define S_RX_MODULATE 27
+#define V_RX_MODULATE(x) ((x) << S_RX_MODULATE)
+#define F_RX_MODULATE V_RX_MODULATE(1U)
+
+#define S_RX_FORCE_ACK 28
+#define V_RX_FORCE_ACK(x) ((x) << S_RX_FORCE_ACK)
+#define F_RX_FORCE_ACK V_RX_FORCE_ACK(1U)
+
+#define S_RX_DACK_MODE 29
+#define M_RX_DACK_MODE 0x3
+#define V_RX_DACK_MODE(x) ((x) << S_RX_DACK_MODE)
+#define G_RX_DACK_MODE(x) (((x) >> S_RX_DACK_MODE) & M_RX_DACK_MODE)
+
+#define S_RX_DACK_CHANGE 31
+#define V_RX_DACK_CHANGE(x) ((x) << S_RX_DACK_CHANGE)
+#define F_RX_DACK_CHANGE V_RX_DACK_CHANGE(1U)
+
+struct cpl_rx_urg_notify {
+ RSS_HDR
+ union opcode_tid ot;
+ __be32 seq;
+};
+
+struct cpl_rx_ddp_complete {
+ RSS_HDR
+ union opcode_tid ot;
+ __be32 ddp_report;
+};
+
+struct cpl_rx_data_ddp {
+ RSS_HDR
+ union opcode_tid ot;
+ __be16 urg;
+ __be16 len;
+ __be32 seq;
+ union {
+ __be32 nxt_seq;
+ __be32 ddp_report;
+ } __U;
+ __be32 ulp_crc;
+ __be32 ddpvld_status;
+};
+
+/* cpl_rx_data_ddp.ddpvld_status fields */
+#define S_DDP_STATUS 0
+#define M_DDP_STATUS 0xFF
+#define V_DDP_STATUS(x) ((x) << S_DDP_STATUS)
+#define G_DDP_STATUS(x) (((x) >> S_DDP_STATUS) & M_DDP_STATUS)
+
+#define S_DDP_VALID 15
+#define M_DDP_VALID 0x1FFFF
+#define V_DDP_VALID(x) ((x) << S_DDP_VALID)
+#define G_DDP_VALID(x) (((x) >> S_DDP_VALID) & M_DDP_VALID)
+
+#define S_DDP_PPOD_MISMATCH 15
+#define V_DDP_PPOD_MISMATCH(x) ((x) << S_DDP_PPOD_MISMATCH)
+#define F_DDP_PPOD_MISMATCH V_DDP_PPOD_MISMATCH(1U)
+
+#define S_DDP_PDU 16
+#define V_DDP_PDU(x) ((x) << S_DDP_PDU)
+#define F_DDP_PDU V_DDP_PDU(1U)
+
+#define S_DDP_LLIMIT_ERR 17
+#define V_DDP_LLIMIT_ERR(x) ((x) << S_DDP_LLIMIT_ERR)
+#define F_DDP_LLIMIT_ERR V_DDP_LLIMIT_ERR(1U)
+
+#define S_DDP_PPOD_PARITY_ERR 18
+#define V_DDP_PPOD_PARITY_ERR(x) ((x) << S_DDP_PPOD_PARITY_ERR)
+#define F_DDP_PPOD_PARITY_ERR V_DDP_PPOD_PARITY_ERR(1U)
+
+#define S_DDP_PADDING_ERR 19
+#define V_DDP_PADDING_ERR(x) ((x) << S_DDP_PADDING_ERR)
+#define F_DDP_PADDING_ERR V_DDP_PADDING_ERR(1U)
+
+#define S_DDP_HDRCRC_ERR 20
+#define V_DDP_HDRCRC_ERR(x) ((x) << S_DDP_HDRCRC_ERR)
+#define F_DDP_HDRCRC_ERR V_DDP_HDRCRC_ERR(1U)
+
+#define S_DDP_DATACRC_ERR 21
+#define V_DDP_DATACRC_ERR(x) ((x) << S_DDP_DATACRC_ERR)
+#define F_DDP_DATACRC_ERR V_DDP_DATACRC_ERR(1U)
+
+#define S_DDP_INVALID_TAG 22
+#define V_DDP_INVALID_TAG(x) ((x) << S_DDP_INVALID_TAG)
+#define F_DDP_INVALID_TAG V_DDP_INVALID_TAG(1U)
+
+#define S_DDP_ULIMIT_ERR 23
+#define V_DDP_ULIMIT_ERR(x) ((x) << S_DDP_ULIMIT_ERR)
+#define F_DDP_ULIMIT_ERR V_DDP_ULIMIT_ERR(1U)
+
+#define S_DDP_OFFSET_ERR 24
+#define V_DDP_OFFSET_ERR(x) ((x) << S_DDP_OFFSET_ERR)
+#define F_DDP_OFFSET_ERR V_DDP_OFFSET_ERR(1U)
+
+#define S_DDP_COLOR_ERR 25
+#define V_DDP_COLOR_ERR(x) ((x) << S_DDP_COLOR_ERR)
+#define F_DDP_COLOR_ERR V_DDP_COLOR_ERR(1U)
+
+#define S_DDP_TID_MISMATCH 26
+#define V_DDP_TID_MISMATCH(x) ((x) << S_DDP_TID_MISMATCH)
+#define F_DDP_TID_MISMATCH V_DDP_TID_MISMATCH(1U)
+
+#define S_DDP_INVALID_PPOD 27
+#define V_DDP_INVALID_PPOD(x) ((x) << S_DDP_INVALID_PPOD)
+#define F_DDP_INVALID_PPOD V_DDP_INVALID_PPOD(1U)
+
+#define S_DDP_ULP_MODE 28
+#define M_DDP_ULP_MODE 0xF
+#define V_DDP_ULP_MODE(x) ((x) << S_DDP_ULP_MODE)
+#define G_DDP_ULP_MODE(x) (((x) >> S_DDP_ULP_MODE) & M_DDP_ULP_MODE)
+
+/* cpl_rx_data_ddp.ddp_report fields */
+#define S_DDP_OFFSET 0
+#define M_DDP_OFFSET 0x3FFFFF
+#define V_DDP_OFFSET(x) ((x) << S_DDP_OFFSET)
+#define G_DDP_OFFSET(x) (((x) >> S_DDP_OFFSET) & M_DDP_OFFSET)
+
+#define S_DDP_URG 24
+#define V_DDP_URG(x) ((x) << S_DDP_URG)
+#define F_DDP_URG V_DDP_URG(1U)
+
+#define S_DDP_PSH 25
+#define V_DDP_PSH(x) ((x) << S_DDP_PSH)
+#define F_DDP_PSH V_DDP_PSH(1U)
+
+#define S_DDP_BUF_COMPLETE 26
+#define V_DDP_BUF_COMPLETE(x) ((x) << S_DDP_BUF_COMPLETE)
+#define F_DDP_BUF_COMPLETE V_DDP_BUF_COMPLETE(1U)
+
+#define S_DDP_BUF_TIMED_OUT 27
+#define V_DDP_BUF_TIMED_OUT(x) ((x) << S_DDP_BUF_TIMED_OUT)
+#define F_DDP_BUF_TIMED_OUT V_DDP_BUF_TIMED_OUT(1U)
+
+#define S_DDP_BUF_IDX 28
+#define V_DDP_BUF_IDX(x) ((x) << S_DDP_BUF_IDX)
+#define F_DDP_BUF_IDX V_DDP_BUF_IDX(1U)
+
+struct cpl_tx_pkt {
+ WR_HDR;
+ __be32 cntrl;
+ __be32 len;
+};
+
+struct cpl_tx_pkt_lso {
+ WR_HDR;
+ __be32 cntrl;
+ __be32 len;
+
+ __be32 rsvd;
+ __be32 lso_info;
+};
+
+/* cpl_tx_pkt*.cntrl fields */
+#define S_TXPKT_VLAN 0
+#define M_TXPKT_VLAN 0xFFFF
+#define V_TXPKT_VLAN(x) ((x) << S_TXPKT_VLAN)
+#define G_TXPKT_VLAN(x) (((x) >> S_TXPKT_VLAN) & M_TXPKT_VLAN)
+
+#define S_TXPKT_INTF 16
+#define M_TXPKT_INTF 0xF
+#define V_TXPKT_INTF(x) ((x) << S_TXPKT_INTF)
+#define G_TXPKT_INTF(x) (((x) >> S_TXPKT_INTF) & M_TXPKT_INTF)
+
+#define S_TXPKT_IPCSUM_DIS 20
+#define V_TXPKT_IPCSUM_DIS(x) ((x) << S_TXPKT_IPCSUM_DIS)
+#define F_TXPKT_IPCSUM_DIS V_TXPKT_IPCSUM_DIS(1U)
+
+#define S_TXPKT_L4CSUM_DIS 21
+#define V_TXPKT_L4CSUM_DIS(x) ((x) << S_TXPKT_L4CSUM_DIS)
+#define F_TXPKT_L4CSUM_DIS V_TXPKT_L4CSUM_DIS(1U)
+
+#define S_TXPKT_VLAN_VLD 22
+#define V_TXPKT_VLAN_VLD(x) ((x) << S_TXPKT_VLAN_VLD)
+#define F_TXPKT_VLAN_VLD V_TXPKT_VLAN_VLD(1U)
+
+#define S_TXPKT_LOOPBACK 23
+#define V_TXPKT_LOOPBACK(x) ((x) << S_TXPKT_LOOPBACK)
+#define F_TXPKT_LOOPBACK V_TXPKT_LOOPBACK(1U)
+
+#define S_TXPKT_OPCODE 24
+#define M_TXPKT_OPCODE 0xFF
+#define V_TXPKT_OPCODE(x) ((x) << S_TXPKT_OPCODE)
+#define G_TXPKT_OPCODE(x) (((x) >> S_TXPKT_OPCODE) & M_TXPKT_OPCODE)
+
+/* cpl_tx_pkt_lso.lso_info fields */
+#define S_LSO_MSS 0
+#define M_LSO_MSS 0x3FFF
+#define V_LSO_MSS(x) ((x) << S_LSO_MSS)
+#define G_LSO_MSS(x) (((x) >> S_LSO_MSS) & M_LSO_MSS)
+
+#define S_LSO_ETH_TYPE 14
+#define M_LSO_ETH_TYPE 0x3
+#define V_LSO_ETH_TYPE(x) ((x) << S_LSO_ETH_TYPE)
+#define G_LSO_ETH_TYPE(x) (((x) >> S_LSO_ETH_TYPE) & M_LSO_ETH_TYPE)
+
+#define S_LSO_TCPHDR_WORDS 16
+#define M_LSO_TCPHDR_WORDS 0xF
+#define V_LSO_TCPHDR_WORDS(x) ((x) << S_LSO_TCPHDR_WORDS)
+#define G_LSO_TCPHDR_WORDS(x) (((x) >> S_LSO_TCPHDR_WORDS) & M_LSO_TCPHDR_WORDS)
+
+#define S_LSO_IPHDR_WORDS 20
+#define M_LSO_IPHDR_WORDS 0xF
+#define V_LSO_IPHDR_WORDS(x) ((x) << S_LSO_IPHDR_WORDS)
+#define G_LSO_IPHDR_WORDS(x) (((x) >> S_LSO_IPHDR_WORDS) & M_LSO_IPHDR_WORDS)
+
+#define S_LSO_IPV6 24
+#define V_LSO_IPV6(x) ((x) << S_LSO_IPV6)
+#define F_LSO_IPV6 V_LSO_IPV6(1U)
+
+struct cpl_trace_pkt {
+#ifdef CHELSIO_FW
+ __u8 rss_opcode;
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ __u8 err:1;
+ __u8 :7;
+#else
+ __u8 :7;
+ __u8 err:1;
+#endif
+ __u8 rsvd0;
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ __u8 qid:4;
+ __u8 :4;
+#else
+ __u8 :4;
+ __u8 qid:4;
+#endif
+ __be32 tstamp;
+#endif /* CHELSIO_FW */
+
+ __u8 opcode;
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ __u8 iff:4;
+ __u8 :4;
+#else
+ __u8 :4;
+ __u8 iff:4;
+#endif
+ __u8 rsvd[4];
+ __be16 len;
+};
+
+struct cpl_rx_pkt {
+ RSS_HDR
+ __u8 opcode;
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ __u8 iff:4;
+ __u8 csum_valid:1;
+ __u8 ipmi_pkt:1;
+ __u8 vlan_valid:1;
+ __u8 fragment:1;
+#else
+ __u8 fragment:1;
+ __u8 vlan_valid:1;
+ __u8 ipmi_pkt:1;
+ __u8 csum_valid:1;
+ __u8 iff:4;
+#endif
+ __be16 csum;
+ __be16 vlan;
+ __be16 len;
+};
+
+struct cpl_l2t_write_req {
+ WR_HDR;
+ union opcode_tid ot;
+ __be32 params;
+ __u8 rsvd[2];
+ __u8 dst_mac[6];
+};
+
+/* cpl_l2t_write_req.params fields */
+#define S_L2T_W_IDX 0
+#define M_L2T_W_IDX 0x7FF
+#define V_L2T_W_IDX(x) ((x) << S_L2T_W_IDX)
+#define G_L2T_W_IDX(x) (((x) >> S_L2T_W_IDX) & M_L2T_W_IDX)
+
+#define S_L2T_W_VLAN 11
+#define M_L2T_W_VLAN 0xFFF
+#define V_L2T_W_VLAN(x) ((x) << S_L2T_W_VLAN)
+#define G_L2T_W_VLAN(x) (((x) >> S_L2T_W_VLAN) & M_L2T_W_VLAN)
+
+#define S_L2T_W_IFF 23
+#define M_L2T_W_IFF 0xF
+#define V_L2T_W_IFF(x) ((x) << S_L2T_W_IFF)
+#define G_L2T_W_IFF(x) (((x) >> S_L2T_W_IFF) & M_L2T_W_IFF)
+
+#define S_L2T_W_PRIO 27
+#define M_L2T_W_PRIO 0x7
+#define V_L2T_W_PRIO(x) ((x) << S_L2T_W_PRIO)
+#define G_L2T_W_PRIO(x) (((x) >> S_L2T_W_PRIO) & M_L2T_W_PRIO)
+
+struct cpl_l2t_write_rpl {
+ RSS_HDR
+ union opcode_tid ot;
+ __u8 status;
+ __u8 rsvd[3];
+};
+
+struct cpl_l2t_read_req {
+ WR_HDR;
+ union opcode_tid ot;
+ __be16 rsvd;
+ __be16 l2t_idx;
+};
+
+struct cpl_l2t_read_rpl {
+ RSS_HDR
+ union opcode_tid ot;
+ __be32 params;
+ __u8 rsvd[2];
+ __u8 dst_mac[6];
+};
+
+/* cpl_l2t_read_rpl.params fields */
+#define S_L2T_R_PRIO 0
+#define M_L2T_R_PRIO 0x7
+#define V_L2T_R_PRIO(x) ((x) << S_L2T_R_PRIO)
+#define G_L2T_R_PRIO(x) (((x) >> S_L2T_R_PRIO) & M_L2T_R_PRIO)
+
+#define S_L2T_R_VLAN 8
+#define M_L2T_R_VLAN 0xFFF
+#define V_L2T_R_VLAN(x) ((x) << S_L2T_R_VLAN)
+#define G_L2T_R_VLAN(x) (((x) >> S_L2T_R_VLAN) & M_L2T_R_VLAN)
+
+#define S_L2T_R_IFF 20
+#define M_L2T_R_IFF 0xF
+#define V_L2T_R_IFF(x) ((x) << S_L2T_R_IFF)
+#define G_L2T_R_IFF(x) (((x) >> S_L2T_R_IFF) & M_L2T_R_IFF)
+
+#define S_L2T_STATUS 24
+#define M_L2T_STATUS 0xFF
+#define V_L2T_STATUS(x) ((x) << S_L2T_STATUS)
+#define G_L2T_STATUS(x) (((x) >> S_L2T_STATUS) & M_L2T_STATUS)
+
+struct cpl_smt_write_req {
+ WR_HDR;
+ union opcode_tid ot;
+ __u8 rsvd0;
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ __u8 mtu_idx:4;
+ __u8 iff:4;
+#else
+ __u8 iff:4;
+ __u8 mtu_idx:4;
+#endif
+ __be16 rsvd2;
+ __be16 rsvd3;
+ __u8 src_mac1[6];
+ __be16 rsvd4;
+ __u8 src_mac0[6];
+};
+
+struct cpl_smt_write_rpl {
+ RSS_HDR
+ union opcode_tid ot;
+ __u8 status;
+ __u8 rsvd[3];
+};
+
+struct cpl_smt_read_req {
+ WR_HDR;
+ union opcode_tid ot;
+ __u8 rsvd0;
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ __u8 :4;
+ __u8 iff:4;
+#else
+ __u8 iff:4;
+ __u8 :4;
+#endif
+ __be16 rsvd2;
+};
+
+struct cpl_smt_read_rpl {
+ RSS_HDR
+ union opcode_tid ot;
+ __u8 status;
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ __u8 mtu_idx:4;
+ __u8 :4;
+#else
+ __u8 :4;
+ __u8 mtu_idx:4;
+#endif
+ __be16 rsvd2;
+ __be16 rsvd3;
+ __u8 src_mac1[6];
+ __be16 rsvd4;
+ __u8 src_mac0[6];
+};
+
+struct cpl_rte_delete_req {
+ WR_HDR;
+ union opcode_tid ot;
+ __be32 params;
+};
+
+/* { cpl_rte_delete_req, cpl_rte_read_req }.params fields */
+#define S_RTE_REQ_LUT_IX 8
+#define M_RTE_REQ_LUT_IX 0x7FF
+#define V_RTE_REQ_LUT_IX(x) ((x) << S_RTE_REQ_LUT_IX)
+#define G_RTE_REQ_LUT_IX(x) (((x) >> S_RTE_REQ_LUT_IX) & M_RTE_REQ_LUT_IX)
+
+#define S_RTE_REQ_LUT_BASE 19
+#define M_RTE_REQ_LUT_BASE 0x7FF
+#define V_RTE_REQ_LUT_BASE(x) ((x) << S_RTE_REQ_LUT_BASE)
+#define G_RTE_REQ_LUT_BASE(x) (((x) >> S_RTE_REQ_LUT_BASE) & M_RTE_REQ_LUT_BASE)
+
+#define S_RTE_READ_REQ_SELECT 31
+#define V_RTE_READ_REQ_SELECT(x) ((x) << S_RTE_READ_REQ_SELECT)
+#define F_RTE_READ_REQ_SELECT V_RTE_READ_REQ_SELECT(1U)
+
+struct cpl_rte_delete_rpl {
+ RSS_HDR
+ union opcode_tid ot;
+ __u8 status;
+ __u8 rsvd[3];
+};
+
+struct cpl_rte_write_req {
+ WR_HDR;
+ union opcode_tid ot;
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ __u8 :6;
+ __u8 write_tcam:1;
+ __u8 write_l2t_lut:1;
+#else
+ __u8 write_l2t_lut:1;
+ __u8 write_tcam:1;
+ __u8 :6;
+#endif
+ __u8 rsvd[3];
+ __be32 lut_params;
+ __be16 rsvd2;
+ __be16 l2t_idx;
+ __be32 netmask;
+ __be32 faddr;
+};
+
+/* cpl_rte_write_req.lut_params fields */
+#define S_RTE_WRITE_REQ_LUT_IX 10
+#define M_RTE_WRITE_REQ_LUT_IX 0x7FF
+#define V_RTE_WRITE_REQ_LUT_IX(x) ((x) << S_RTE_WRITE_REQ_LUT_IX)
+#define G_RTE_WRITE_REQ_LUT_IX(x) (((x) >> S_RTE_WRITE_REQ_LUT_IX) & M_RTE_WRITE_REQ_LUT_IX)
+
+#define S_RTE_WRITE_REQ_LUT_BASE 21
+#define M_RTE_WRITE_REQ_LUT_BASE 0x7FF
+#define V_RTE_WRITE_REQ_LUT_BASE(x) ((x) << S_RTE_WRITE_REQ_LUT_BASE)
+#define G_RTE_WRITE_REQ_LUT_BASE(x) (((x) >> S_RTE_WRITE_REQ_LUT_BASE) & M_RTE_WRITE_REQ_LUT_BASE)
+
+struct cpl_rte_write_rpl {
+ RSS_HDR
+ union opcode_tid ot;
+ __u8 status;
+ __u8 rsvd[3];
+};
+
+struct cpl_rte_read_req {
+ WR_HDR;
+ union opcode_tid ot;
+ __be32 params;
+};
+
+struct cpl_rte_read_rpl {
+ RSS_HDR
+ union opcode_tid ot;
+ __u8 status;
+ __u8 rsvd0;
+ __be16 l2t_idx;
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ __u8 :7;
+ __u8 select:1;
+#else
+ __u8 select:1;
+ __u8 :7;
+#endif
+ __u8 rsvd2[3];
+ __be32 addr;
+};
+
+struct cpl_tid_release {
+ WR_HDR;
+ union opcode_tid ot;
+ __be32 rsvd;
+};
+
+struct cpl_barrier {
+ WR_HDR;
+ __u8 opcode;
+ __u8 rsvd[7];
+};
+
+struct cpl_rdma_read_req {
+ __u8 opcode;
+ __u8 rsvd[15];
+};
+
+struct cpl_rdma_terminate {
+#ifdef CHELSIO_FW
+ __u8 opcode;
+ __u8 rsvd[2];
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ __u8 rspq:3;
+ __u8 :5;
+#else
+ __u8 :5;
+ __u8 rspq:3;
+#endif
+ __be32 tid_len;
+#endif
+ __be32 msn;
+ __be32 mo;
+ __u8 data[0];
+};
+
+/* cpl_rdma_terminate.tid_len fields */
+#define S_FLIT_CNT 0
+#define M_FLIT_CNT 0xFF
+#define V_FLIT_CNT(x) ((x) << S_FLIT_CNT)
+#define G_FLIT_CNT(x) (((x) >> S_FLIT_CNT) & M_FLIT_CNT)
+
+#define S_TERM_TID 8
+#define M_TERM_TID 0xFFFFF
+#define V_TERM_TID(x) ((x) << S_TERM_TID)
+#define G_TERM_TID(x) (((x) >> S_TERM_TID) & M_TERM_TID)
+#endif /* T3_CPL_H */
diff --git a/sys/dev/cxgb/common/cxgb_t3_hw.c b/sys/dev/cxgb/common/cxgb_t3_hw.c
new file mode 100644
index 0000000..701b5c7
--- /dev/null
+++ b/sys/dev/cxgb/common/cxgb_t3_hw.c
@@ -0,0 +1,3399 @@
+/**************************************************************************
+
+Copyright (c) 2007, Chelsio Inc.
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Chelsio Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <dev/cxgb/common/cxgb_common.h>
+#include <dev/cxgb/common/cxgb_regs.h>
+#include <dev/cxgb/common/cxgb_sge_defs.h>
+#include <dev/cxgb/common/cxgb_firmware_exports.h>
+
+/**
+ * t3_wait_op_done_val - wait until an operation is completed
+ * @adapter: the adapter performing the operation
+ * @reg: the register to check for completion
+ * @mask: a single-bit field within @reg that indicates completion
+ * @polarity: the value of the field when the operation is completed
+ * @attempts: number of check iterations
+ * @delay: delay in usecs between iterations
+ * @valp: where to store the value of the register at completion time
+ *
+ * Wait until an operation is completed by checking a bit in a register
+ * up to @attempts times. If @valp is not NULL the value of the register
+ * at the time it indicated completion is stored there. Returns 0 if the
+ * operation completes and -EAGAIN otherwise.
+ */
+int t3_wait_op_done_val(adapter_t *adapter, int reg, u32 mask, int polarity,
+ int attempts, int delay, u32 *valp)
+{
+ while (1) {
+ u32 val = t3_read_reg(adapter, reg);
+
+ if (!!(val & mask) == polarity) {
+ if (valp)
+ *valp = val;
+ return 0;
+ }
+ if (--attempts == 0)
+ return -EAGAIN;
+ if (delay)
+ udelay(delay);
+ }
+}
+
+/**
+ * t3_write_regs - write a bunch of registers
+ * @adapter: the adapter to program
+ * @p: an array of register address/register value pairs
+ * @n: the number of address/value pairs
+ * @offset: register address offset
+ *
+ * Takes an array of register address/register value pairs and writes each
+ * value to the corresponding register. Register addresses are adjusted
+ * by the supplied offset.
+ */
+void t3_write_regs(adapter_t *adapter, const struct addr_val_pair *p, int n,
+ unsigned int offset)
+{
+ while (n--) {
+ t3_write_reg(adapter, p->reg_addr + offset, p->val);
+ p++;
+ }
+}
+
+/**
+ * t3_set_reg_field - set a register field to a value
+ * @adapter: the adapter to program
+ * @addr: the register address
+ * @mask: specifies the portion of the register to modify
+ * @val: the new value for the register field
+ *
+ * Sets a register field specified by the supplied mask to the
+ * given value.
+ */
+void t3_set_reg_field(adapter_t *adapter, unsigned int addr, u32 mask, u32 val)
+{
+ u32 v = t3_read_reg(adapter, addr) & ~mask;
+
+ t3_write_reg(adapter, addr, v | val);
+ (void) t3_read_reg(adapter, addr); /* flush */
+}
+
+/**
+ * t3_read_indirect - read indirectly addressed registers
+ * @adap: the adapter
+ * @addr_reg: register holding the indirect address
+ * @data_reg: register holding the value of the indirect register
+ * @vals: where the read register values are stored
+ * @start_idx: index of first indirect register to read
+ * @nregs: how many indirect registers to read
+ *
+ * Reads registers that are accessed indirectly through an address/data
+ * register pair.
+ */
+void t3_read_indirect(adapter_t *adap, unsigned int addr_reg,
+ unsigned int data_reg, u32 *vals, unsigned int nregs,
+ unsigned int start_idx)
+{
+ while (nregs--) {
+ t3_write_reg(adap, addr_reg, start_idx);
+ *vals++ = t3_read_reg(adap, data_reg);
+ start_idx++;
+ }
+}
+
+/**
+ * t3_mc7_bd_read - read from MC7 through backdoor accesses
+ * @mc7: identifies MC7 to read from
+ * @start: index of first 64-bit word to read
+ * @n: number of 64-bit words to read
+ * @buf: where to store the read result
+ *
+ * Read n 64-bit words from MC7 starting at word start, using backdoor
+ * accesses.
+ */
+int t3_mc7_bd_read(struct mc7 *mc7, unsigned int start, unsigned int n,
+ u64 *buf)
+{
+ static int shift[] = { 0, 0, 16, 24 };
+ static int step[] = { 0, 32, 16, 8 };
+
+ unsigned int size64 = mc7->size / 8; /* # of 64-bit words */
+ adapter_t *adap = mc7->adapter;
+
+ if (start >= size64 || start + n > size64)
+ return -EINVAL;
+
+ start *= (8 << mc7->width);
+ while (n--) {
+ int i;
+ u64 val64 = 0;
+
+ for (i = (1 << mc7->width) - 1; i >= 0; --i) {
+ int attempts = 10;
+ u32 val;
+
+ t3_write_reg(adap, mc7->offset + A_MC7_BD_ADDR,
+ start);
+ t3_write_reg(adap, mc7->offset + A_MC7_BD_OP, 0);
+ val = t3_read_reg(adap, mc7->offset + A_MC7_BD_OP);
+ while ((val & F_BUSY) && attempts--)
+ val = t3_read_reg(adap,
+ mc7->offset + A_MC7_BD_OP);
+ if (val & F_BUSY)
+ return -EIO;
+
+ val = t3_read_reg(adap, mc7->offset + A_MC7_BD_DATA1);
+ if (mc7->width == 0) {
+ val64 = t3_read_reg(adap,
+ mc7->offset + A_MC7_BD_DATA0);
+ val64 |= (u64)val << 32;
+ } else {
+ if (mc7->width > 1)
+ val >>= shift[mc7->width];
+ val64 |= (u64)val << (step[mc7->width] * i);
+ }
+ start += 8;
+ }
+ *buf++ = val64;
+ }
+ return 0;
+}
+
+/*
+ * Initialize MI1.
+ */
+static void mi1_init(adapter_t *adap, const struct adapter_info *ai)
+{
+ u32 clkdiv = adap->params.vpd.cclk / (2 * adap->params.vpd.mdc) - 1;
+ u32 val = F_PREEN | V_MDIINV(ai->mdiinv) | V_MDIEN(ai->mdien) |
+ V_CLKDIV(clkdiv);
+
+ if (!(ai->caps & SUPPORTED_10000baseT_Full))
+ val |= V_ST(1);
+ t3_write_reg(adap, A_MI1_CFG, val);
+}
+
+#define MDIO_ATTEMPTS 10
+
+/*
+ * MI1 read/write operations for direct-addressed PHYs.
+ */
+static int mi1_read(adapter_t *adapter, int phy_addr, int mmd_addr,
+ int reg_addr, unsigned int *valp)
+{
+ int ret;
+ u32 addr = V_REGADDR(reg_addr) | V_PHYADDR(phy_addr);
+
+ if (mmd_addr)
+ return -EINVAL;
+
+ MDIO_LOCK(adapter);
+ t3_write_reg(adapter, A_MI1_ADDR, addr);
+ t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(2));
+ ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 20);
+ if (!ret)
+ *valp = t3_read_reg(adapter, A_MI1_DATA);
+ MDIO_UNLOCK(adapter);
+ return ret;
+}
+
+static int mi1_write(adapter_t *adapter, int phy_addr, int mmd_addr,
+ int reg_addr, unsigned int val)
+{
+ int ret;
+ u32 addr = V_REGADDR(reg_addr) | V_PHYADDR(phy_addr);
+
+ if (mmd_addr)
+ return -EINVAL;
+
+ MDIO_LOCK(adapter);
+ t3_write_reg(adapter, A_MI1_ADDR, addr);
+ t3_write_reg(adapter, A_MI1_DATA, val);
+ t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(1));
+ ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 20);
+ MDIO_UNLOCK(adapter);
+ return ret;
+}
+
+static struct mdio_ops mi1_mdio_ops = {
+ mi1_read,
+ mi1_write
+};
+
+/*
+ * MI1 read/write operations for indirect-addressed PHYs.
+ */
+static int mi1_ext_read(adapter_t *adapter, int phy_addr, int mmd_addr,
+ int reg_addr, unsigned int *valp)
+{
+ int ret;
+ u32 addr = V_REGADDR(mmd_addr) | V_PHYADDR(phy_addr);
+
+ MDIO_LOCK(adapter);
+ t3_write_reg(adapter, A_MI1_ADDR, addr);
+ t3_write_reg(adapter, A_MI1_DATA, reg_addr);
+ t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(0));
+ ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 20);
+ if (!ret) {
+ t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(3));
+ ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0,
+ MDIO_ATTEMPTS, 20);
+ if (!ret)
+ *valp = t3_read_reg(adapter, A_MI1_DATA);
+ }
+ MDIO_UNLOCK(adapter);
+ return ret;
+}
+
+static int mi1_ext_write(adapter_t *adapter, int phy_addr, int mmd_addr,
+ int reg_addr, unsigned int val)
+{
+ int ret;
+ u32 addr = V_REGADDR(mmd_addr) | V_PHYADDR(phy_addr);
+
+ MDIO_LOCK(adapter);
+ t3_write_reg(adapter, A_MI1_ADDR, addr);
+ t3_write_reg(adapter, A_MI1_DATA, reg_addr);
+ t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(0));
+ ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 20);
+ if (!ret) {
+ t3_write_reg(adapter, A_MI1_DATA, val);
+ t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(1));
+ ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0,
+ MDIO_ATTEMPTS, 20);
+ }
+ MDIO_UNLOCK(adapter);
+ return ret;
+}
+
+static struct mdio_ops mi1_mdio_ext_ops = {
+ mi1_ext_read,
+ mi1_ext_write
+};
+
+/**
+ * t3_mdio_change_bits - modify the value of a PHY register
+ * @phy: the PHY to operate on
+ * @mmd: the device address
+ * @reg: the register address
+ * @clear: what part of the register value to mask off
+ * @set: what part of the register value to set
+ *
+ * Changes the value of a PHY register by applying a mask to its current
+ * value and ORing the result with a new value.
+ */
+int t3_mdio_change_bits(struct cphy *phy, int mmd, int reg, unsigned int clear,
+ unsigned int set)
+{
+ int ret;
+ unsigned int val;
+
+ ret = mdio_read(phy, mmd, reg, &val);
+ if (!ret) {
+ val &= ~clear;
+ ret = mdio_write(phy, mmd, reg, val | set);
+ }
+ return ret;
+}
+
+/**
+ * t3_phy_reset - reset a PHY block
+ * @phy: the PHY to operate on
+ * @mmd: the device address of the PHY block to reset
+ * @wait: how long to wait for the reset to complete in 1ms increments
+ *
+ * Resets a PHY block and optionally waits for the reset to complete.
+ * @mmd should be 0 for 10/100/1000 PHYs and the device address to reset
+ * for 10G PHYs.
+ */
+int t3_phy_reset(struct cphy *phy, int mmd, int wait)
+{
+ int err;
+ unsigned int ctl;
+
+ err = t3_mdio_change_bits(phy, mmd, MII_BMCR, BMCR_PDOWN, BMCR_RESET);
+ if (err || !wait)
+ return err;
+
+ do {
+ err = mdio_read(phy, mmd, MII_BMCR, &ctl);
+ if (err)
+ return err;
+ ctl &= BMCR_RESET;
+ if (ctl)
+ t3_os_sleep(1);
+ } while (ctl && --wait);
+
+ return ctl ? -1 : 0;
+}
+
+/**
+ * t3_phy_advertise - set the PHY advertisement registers for autoneg
+ * @phy: the PHY to operate on
+ * @advert: bitmap of capabilities the PHY should advertise
+ *
+ * Sets a 10/100/1000 PHY's advertisement registers to advertise the
+ * requested capabilities.
+ */
+int t3_phy_advertise(struct cphy *phy, unsigned int advert)
+{
+ int err;
+ unsigned int val = 0;
+
+ err = mdio_read(phy, 0, MII_CTRL1000, &val);
+ if (err)
+ return err;
+
+ val &= ~(ADVERTISE_1000HALF | ADVERTISE_1000FULL);
+ if (advert & ADVERTISED_1000baseT_Half)
+ val |= ADVERTISE_1000HALF;
+ if (advert & ADVERTISED_1000baseT_Full)
+ val |= ADVERTISE_1000FULL;
+
+ err = mdio_write(phy, 0, MII_CTRL1000, val);
+ if (err)
+ return err;
+
+ val = 1;
+ if (advert & ADVERTISED_10baseT_Half)
+ val |= ADVERTISE_10HALF;
+ if (advert & ADVERTISED_10baseT_Full)
+ val |= ADVERTISE_10FULL;
+ if (advert & ADVERTISED_100baseT_Half)
+ val |= ADVERTISE_100HALF;
+ if (advert & ADVERTISED_100baseT_Full)
+ val |= ADVERTISE_100FULL;
+ if (advert & ADVERTISED_Pause)
+ val |= ADVERTISE_PAUSE_CAP;
+ if (advert & ADVERTISED_Asym_Pause)
+ val |= ADVERTISE_PAUSE_ASYM;
+ return mdio_write(phy, 0, MII_ADVERTISE, val);
+}
+
+/**
+ * t3_set_phy_speed_duplex - force PHY speed and duplex
+ * @phy: the PHY to operate on
+ * @speed: requested PHY speed
+ * @duplex: requested PHY duplex
+ *
+ * Force a 10/100/1000 PHY's speed and duplex. This also disables
+ * auto-negotiation except for GigE, where auto-negotiation is mandatory.
+ */
+int t3_set_phy_speed_duplex(struct cphy *phy, int speed, int duplex)
+{
+ int err;
+ unsigned int ctl;
+
+ err = mdio_read(phy, 0, MII_BMCR, &ctl);
+ if (err)
+ return err;
+
+ if (speed >= 0) {
+ ctl &= ~(BMCR_SPEED100 | BMCR_SPEED1000 | BMCR_ANENABLE);
+ if (speed == SPEED_100)
+ ctl |= BMCR_SPEED100;
+ else if (speed == SPEED_1000)
+ ctl |= BMCR_SPEED1000;
+ }
+ if (duplex >= 0) {
+ ctl &= ~(BMCR_FULLDPLX | BMCR_ANENABLE);
+ if (duplex == DUPLEX_FULL)
+ ctl |= BMCR_FULLDPLX;
+ }
+ if (ctl & BMCR_SPEED1000) /* auto-negotiation required for GigE */
+ ctl |= BMCR_ANENABLE;
+ return mdio_write(phy, 0, MII_BMCR, ctl);
+}
+
+static struct adapter_info t3_adap_info[] = {
+ { 2, 0, 0, 0,
+ F_GPIO2_OEN | F_GPIO4_OEN |
+ F_GPIO2_OUT_VAL | F_GPIO4_OUT_VAL, F_GPIO3 | F_GPIO5,
+ SUPPORTED_OFFLOAD,
+ &mi1_mdio_ops, "Chelsio PE9000" },
+ { 2, 0, 0, 0,
+ F_GPIO2_OEN | F_GPIO4_OEN |
+ F_GPIO2_OUT_VAL | F_GPIO4_OUT_VAL, F_GPIO3 | F_GPIO5,
+ SUPPORTED_OFFLOAD,
+ &mi1_mdio_ops, "Chelsio T302" },
+ { 1, 0, 0, 0,
+ F_GPIO1_OEN | F_GPIO6_OEN | F_GPIO7_OEN | F_GPIO10_OEN |
+ F_GPIO1_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL, 0,
+ SUPPORTED_10000baseT_Full | SUPPORTED_AUI | SUPPORTED_OFFLOAD,
+ &mi1_mdio_ext_ops, "Chelsio T310" },
+ { 2, 0, 0, 0,
+ F_GPIO1_OEN | F_GPIO2_OEN | F_GPIO4_OEN | F_GPIO5_OEN | F_GPIO6_OEN |
+ F_GPIO7_OEN | F_GPIO10_OEN | F_GPIO11_OEN | F_GPIO1_OUT_VAL |
+ F_GPIO5_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL, 0,
+ SUPPORTED_10000baseT_Full | SUPPORTED_AUI | SUPPORTED_OFFLOAD,
+ &mi1_mdio_ext_ops, "Chelsio T320" },
+};
+
+/*
+ * Return the adapter_info structure with a given index. Out-of-range indices
+ * return NULL.
+ */
+const struct adapter_info *t3_get_adapter_info(unsigned int id)
+{
+ return id < ARRAY_SIZE(t3_adap_info) ? &t3_adap_info[id] : NULL;
+}
+
+#define CAPS_1G (SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Full | \
+ SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg | SUPPORTED_MII)
+#define CAPS_10G (SUPPORTED_10000baseT_Full | SUPPORTED_AUI)
+
+static struct port_type_info port_types[] = {
+ { NULL },
+ { t3_ael1002_phy_prep, CAPS_10G | SUPPORTED_FIBRE,
+ "10GBASE-XR" },
+ { t3_vsc8211_phy_prep, CAPS_1G | SUPPORTED_TP | SUPPORTED_IRQ,
+ "10/100/1000BASE-T" },
+ { t3_mv88e1xxx_phy_prep, CAPS_1G | SUPPORTED_TP | SUPPORTED_IRQ,
+ "10/100/1000BASE-T" },
+ { t3_xaui_direct_phy_prep, CAPS_10G | SUPPORTED_TP, "10GBASE-CX4" },
+ { NULL, CAPS_10G, "10GBASE-KX4" },
+ { t3_qt2045_phy_prep, CAPS_10G | SUPPORTED_TP, "10GBASE-CX4" },
+ { t3_ael1006_phy_prep, CAPS_10G | SUPPORTED_FIBRE,
+ "10GBASE-SR" },
+ { NULL, CAPS_10G | SUPPORTED_TP, "10GBASE-CX4" },
+};
+
+#undef CAPS_1G
+#undef CAPS_10G
+
+#define VPD_ENTRY(name, len) \
+ u8 name##_kword[2]; u8 name##_len; u8 name##_data[len]
+
+/*
+ * Partial EEPROM Vital Product Data structure. Includes only the ID and
+ * VPD-R sections.
+ */
+struct t3_vpd {
+ u8 id_tag;
+ u8 id_len[2];
+ u8 id_data[16];
+ u8 vpdr_tag;
+ u8 vpdr_len[2];
+ VPD_ENTRY(pn, 16); /* part number */
+ VPD_ENTRY(ec, 16); /* EC level */
+ VPD_ENTRY(sn, 16); /* serial number */
+ VPD_ENTRY(na, 12); /* MAC address base */
+ VPD_ENTRY(cclk, 6); /* core clock */
+ VPD_ENTRY(mclk, 6); /* mem clock */
+ VPD_ENTRY(uclk, 6); /* uP clk */
+ VPD_ENTRY(mdc, 6); /* MDIO clk */
+ VPD_ENTRY(mt, 2); /* mem timing */
+ VPD_ENTRY(xaui0cfg, 6); /* XAUI0 config */
+ VPD_ENTRY(xaui1cfg, 6); /* XAUI1 config */
+ VPD_ENTRY(port0, 2); /* PHY0 complex */
+ VPD_ENTRY(port1, 2); /* PHY1 complex */
+ VPD_ENTRY(port2, 2); /* PHY2 complex */
+ VPD_ENTRY(port3, 2); /* PHY3 complex */
+ VPD_ENTRY(rv, 1); /* csum */
+ u32 pad; /* for multiple-of-4 sizing and alignment */
+};
+
+#define EEPROM_MAX_POLL 4
+#define EEPROM_STAT_ADDR 0x4000
+#define VPD_BASE 0xc00
+
+/**
+ * t3_seeprom_read - read a VPD EEPROM location
+ * @adapter: adapter to read
+ * @addr: EEPROM address
+ * @data: where to store the read data
+ *
+ * Read a 32-bit word from a location in VPD EEPROM using the card's PCI
+ * VPD ROM capability. A zero is written to the flag bit when the
+ * addres is written to the control register. The hardware device will
+ * set the flag to 1 when 4 bytes have been read into the data register.
+ */
+int t3_seeprom_read(adapter_t *adapter, u32 addr, u32 *data)
+{
+ u16 val;
+ int attempts = EEPROM_MAX_POLL;
+ unsigned int base = adapter->params.pci.vpd_cap_addr;
+
+ if ((addr >= EEPROMSIZE && addr != EEPROM_STAT_ADDR) || (addr & 3))
+ return -EINVAL;
+
+ t3_os_pci_write_config_2(adapter, base + PCI_VPD_ADDR, (u16)addr);
+ do {
+ udelay(10);
+ t3_os_pci_read_config_2(adapter, base + PCI_VPD_ADDR, &val);
+ } while (!(val & PCI_VPD_ADDR_F) && --attempts);
+
+ if (!(val & PCI_VPD_ADDR_F)) {
+ CH_ERR(adapter, "reading EEPROM address 0x%x failed\n", addr);
+ return -EIO;
+ }
+ t3_os_pci_read_config_4(adapter, base + PCI_VPD_DATA, data);
+ *data = le32_to_cpu(*data);
+ return 0;
+}
+
+/**
+ * t3_seeprom_write - write a VPD EEPROM location
+ * @adapter: adapter to write
+ * @addr: EEPROM address
+ * @data: value to write
+ *
+ * Write a 32-bit word to a location in VPD EEPROM using the card's PCI
+ * VPD ROM capability.
+ */
+int t3_seeprom_write(adapter_t *adapter, u32 addr, u32 data)
+{
+ u16 val;
+ int attempts = EEPROM_MAX_POLL;
+ unsigned int base = adapter->params.pci.vpd_cap_addr;
+
+ if ((addr >= EEPROMSIZE && addr != EEPROM_STAT_ADDR) || (addr & 3))
+ return -EINVAL;
+
+ t3_os_pci_write_config_4(adapter, base + PCI_VPD_DATA,
+ cpu_to_le32(data));
+ t3_os_pci_write_config_2(adapter, base + PCI_VPD_ADDR,
+ (u16)addr | PCI_VPD_ADDR_F);
+ do {
+ t3_os_sleep(1);
+ t3_os_pci_read_config_2(adapter, base + PCI_VPD_ADDR, &val);
+ } while ((val & PCI_VPD_ADDR_F) && --attempts);
+
+ if (val & PCI_VPD_ADDR_F) {
+ CH_ERR(adapter, "write to EEPROM address 0x%x failed\n", addr);
+ return -EIO;
+ }
+ return 0;
+}
+
+/**
+ * t3_seeprom_wp - enable/disable EEPROM write protection
+ * @adapter: the adapter
+ * @enable: 1 to enable write protection, 0 to disable it
+ *
+ * Enables or disables write protection on the serial EEPROM.
+ */
+int t3_seeprom_wp(adapter_t *adapter, int enable)
+{
+ return t3_seeprom_write(adapter, EEPROM_STAT_ADDR, enable ? 0xc : 0);
+}
+
+/*
+ * Convert a character holding a hex digit to a number.
+ */
+static unsigned int hex2int(unsigned char c)
+{
+ return isdigit(c) ? c - '0' : toupper(c) - 'A' + 10;
+}
+
+/**
+ * get_vpd_params - read VPD parameters from VPD EEPROM
+ * @adapter: adapter to read
+ * @p: where to store the parameters
+ *
+ * Reads card parameters stored in VPD EEPROM.
+ */
+static int get_vpd_params(adapter_t *adapter, struct vpd_params *p)
+{
+ int i, addr, ret;
+ struct t3_vpd vpd;
+
+ /*
+ * Card information is normally at VPD_BASE but some early cards had
+ * it at 0.
+ */
+ ret = t3_seeprom_read(adapter, VPD_BASE, (u32 *)&vpd);
+ if (ret)
+ return ret;
+ addr = vpd.id_tag == 0x82 ? VPD_BASE : 0;
+
+ for (i = 0; i < sizeof(vpd); i += 4) {
+ ret = t3_seeprom_read(adapter, addr + i,
+ (u32 *)((u8 *)&vpd + i));
+ if (ret)
+ return ret;
+ }
+
+ p->cclk = simple_strtoul(vpd.cclk_data, NULL, 10);
+ p->mclk = simple_strtoul(vpd.mclk_data, NULL, 10);
+ p->uclk = simple_strtoul(vpd.uclk_data, NULL, 10);
+ p->mdc = simple_strtoul(vpd.mdc_data, NULL, 10);
+ p->mem_timing = simple_strtoul(vpd.mt_data, NULL, 10);
+
+ /* Old eeproms didn't have port information */
+ if (adapter->params.rev == 0 && !vpd.port0_data[0]) {
+ p->port_type[0] = uses_xaui(adapter) ? 1 : 2;
+ p->port_type[1] = uses_xaui(adapter) ? 6 : 2;
+ } else {
+ p->port_type[0] = (u8)hex2int(vpd.port0_data[0]);
+ p->port_type[1] = (u8)hex2int(vpd.port1_data[0]);
+ p->xauicfg[0] = simple_strtoul(vpd.xaui0cfg_data, NULL, 16);
+ p->xauicfg[1] = simple_strtoul(vpd.xaui1cfg_data, NULL, 16);
+ }
+
+ for (i = 0; i < 6; i++)
+ p->eth_base[i] = hex2int(vpd.na_data[2 * i]) * 16 +
+ hex2int(vpd.na_data[2 * i + 1]);
+ return 0;
+}
+
+/* serial flash and firmware constants */
+enum {
+ SF_ATTEMPTS = 5, /* max retries for SF1 operations */
+ SF_SEC_SIZE = 64 * 1024, /* serial flash sector size */
+ SF_SIZE = SF_SEC_SIZE * 8, /* serial flash size */
+
+ /* flash command opcodes */
+ SF_PROG_PAGE = 2, /* program page */
+ SF_WR_DISABLE = 4, /* disable writes */
+ SF_RD_STATUS = 5, /* read status register */
+ SF_WR_ENABLE = 6, /* enable writes */
+ SF_RD_DATA_FAST = 0xb, /* read flash */
+ SF_ERASE_SECTOR = 0xd8, /* erase sector */
+
+ FW_FLASH_BOOT_ADDR = 0x70000, /* start address of FW in flash */
+ FW_VERS_ADDR = 0x77ffc /* flash address holding FW version */
+};
+
+/**
+ * sf1_read - read data from the serial flash
+ * @adapter: the adapter
+ * @byte_cnt: number of bytes to read
+ * @cont: whether another operation will be chained
+ * @valp: where to store the read data
+ *
+ * Reads up to 4 bytes of data from the serial flash. The location of
+ * the read needs to be specified prior to calling this by issuing the
+ * appropriate commands to the serial flash.
+ */
+static int sf1_read(adapter_t *adapter, unsigned int byte_cnt, int cont,
+ u32 *valp)
+{
+ int ret;
+
+ if (!byte_cnt || byte_cnt > 4)
+ return -EINVAL;
+ if (t3_read_reg(adapter, A_SF_OP) & F_BUSY)
+ return -EBUSY;
+ t3_write_reg(adapter, A_SF_OP, V_CONT(cont) | V_BYTECNT(byte_cnt - 1));
+ ret = t3_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 10);
+ if (!ret)
+ *valp = t3_read_reg(adapter, A_SF_DATA);
+ return ret;
+}
+
+/**
+ * sf1_write - write data to the serial flash
+ * @adapter: the adapter
+ * @byte_cnt: number of bytes to write
+ * @cont: whether another operation will be chained
+ * @val: value to write
+ *
+ * Writes up to 4 bytes of data to the serial flash. The location of
+ * the write needs to be specified prior to calling this by issuing the
+ * appropriate commands to the serial flash.
+ */
+static int sf1_write(adapter_t *adapter, unsigned int byte_cnt, int cont,
+ u32 val)
+{
+ if (!byte_cnt || byte_cnt > 4)
+ return -EINVAL;
+ if (t3_read_reg(adapter, A_SF_OP) & F_BUSY)
+ return -EBUSY;
+ t3_write_reg(adapter, A_SF_DATA, val);
+ t3_write_reg(adapter, A_SF_OP,
+ V_CONT(cont) | V_BYTECNT(byte_cnt - 1) | V_OP(1));
+ return t3_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 10);
+}
+
+/**
+ * flash_wait_op - wait for a flash operation to complete
+ * @adapter: the adapter
+ * @attempts: max number of polls of the status register
+ * @delay: delay between polls in ms
+ *
+ * Wait for a flash operation to complete by polling the status register.
+ */
+static int flash_wait_op(adapter_t *adapter, int attempts, int delay)
+{
+ int ret;
+ u32 status;
+
+ while (1) {
+ if ((ret = sf1_write(adapter, 1, 1, SF_RD_STATUS)) != 0 ||
+ (ret = sf1_read(adapter, 1, 0, &status)) != 0)
+ return ret;
+ if (!(status & 1))
+ return 0;
+ if (--attempts == 0)
+ return -EAGAIN;
+ if (delay)
+ t3_os_sleep(delay);
+ }
+}
+
+/**
+ * t3_read_flash - read words from serial flash
+ * @adapter: the adapter
+ * @addr: the start address for the read
+ * @nwords: how many 32-bit words to read
+ * @data: where to store the read data
+ * @byte_oriented: whether to store data as bytes or as words
+ *
+ * Read the specified number of 32-bit words from the serial flash.
+ * If @byte_oriented is set the read data is stored as a byte array
+ * (i.e., big-endian), otherwise as 32-bit words in the platform's
+ * natural endianess.
+ */
+int t3_read_flash(adapter_t *adapter, unsigned int addr, unsigned int nwords,
+ u32 *data, int byte_oriented)
+{
+ int ret;
+
+ if (addr + nwords * sizeof(u32) > SF_SIZE || (addr & 3))
+ return -EINVAL;
+
+ addr = swab32(addr) | SF_RD_DATA_FAST;
+
+ if ((ret = sf1_write(adapter, 4, 1, addr)) != 0 ||
+ (ret = sf1_read(adapter, 1, 1, data)) != 0)
+ return ret;
+
+ for ( ; nwords; nwords--, data++) {
+ ret = sf1_read(adapter, 4, nwords > 1, data);
+ if (ret)
+ return ret;
+ if (byte_oriented)
+ *data = htonl(*data);
+ }
+ return 0;
+}
+
+/**
+ * t3_write_flash - write up to a page of data to the serial flash
+ * @adapter: the adapter
+ * @addr: the start address to write
+ * @n: length of data to write
+ * @data: the data to write
+ *
+ * Writes up to a page of data (256 bytes) to the serial flash starting
+ * at the given address.
+ */
+static int t3_write_flash(adapter_t *adapter, unsigned int addr,
+ unsigned int n, const u8 *data)
+{
+ int ret;
+ u32 buf[64];
+ unsigned int i, c, left, val, offset = addr & 0xff;
+
+ if (addr + n > SF_SIZE || offset + n > 256)
+ return -EINVAL;
+
+ val = swab32(addr) | SF_PROG_PAGE;
+
+ if ((ret = sf1_write(adapter, 1, 0, SF_WR_ENABLE)) != 0 ||
+ (ret = sf1_write(adapter, 4, 1, val)) != 0)
+ return ret;
+
+ for (left = n; left; left -= c) {
+ c = min(left, 4U);
+ for (val = 0, i = 0; i < c; ++i)
+ val = (val << 8) + *data++;
+
+ ret = sf1_write(adapter, c, c != left, val);
+ if (ret)
+ return ret;
+ }
+ if ((ret = flash_wait_op(adapter, 5, 1)) != 0)
+ return ret;
+
+ /* Read the page to verify the write succeeded */
+ ret = t3_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf, 1);
+ if (ret)
+ return ret;
+
+ if (memcmp(data - n, (u8 *)buf + offset, n))
+ return -EIO;
+ return 0;
+}
+
+enum fw_version_type {
+ FW_VERSION_N3,
+ FW_VERSION_T3
+};
+
+/**
+ * t3_get_fw_version - read the firmware version
+ * @adapter: the adapter
+ * @vers: where to place the version
+ *
+ * Reads the FW version from flash.
+ */
+int t3_get_fw_version(adapter_t *adapter, u32 *vers)
+{
+ return t3_read_flash(adapter, FW_VERS_ADDR, 1, vers, 0);
+}
+
+/**
+ * t3_check_fw_version - check if the FW is compatible with this driver
+ * @adapter: the adapter
+ *
+ * Checks if an adapter's FW is compatible with the driver. Returns 0
+ * if the versions are compatible, a negative error otherwise.
+ */
+int t3_check_fw_version(adapter_t *adapter)
+{
+ int ret;
+ u32 vers;
+ unsigned int type, major, minor;
+
+ ret = t3_get_fw_version(adapter, &vers);
+ if (ret)
+ return ret;
+
+ type = G_FW_VERSION_TYPE(vers);
+ major = G_FW_VERSION_MAJOR(vers);
+ minor = G_FW_VERSION_MINOR(vers);
+
+ if (type == FW_VERSION_T3 && major == CHELSIO_FW_MAJOR && minor == CHELSIO_FW_MINOR)
+ return 0;
+
+ CH_ERR(adapter, "found wrong FW version(%u.%u), "
+ "driver needs version %d.%d\n", major, minor,
+ CHELSIO_FW_MAJOR, CHELSIO_FW_MINOR);
+ return -EINVAL;
+}
+
+/**
+ * t3_flash_erase_sectors - erase a range of flash sectors
+ * @adapter: the adapter
+ * @start: the first sector to erase
+ * @end: the last sector to erase
+ *
+ * Erases the sectors in the given range.
+ */
+static int t3_flash_erase_sectors(adapter_t *adapter, int start, int end)
+{
+ while (start <= end) {
+ int ret;
+
+ if ((ret = sf1_write(adapter, 1, 0, SF_WR_ENABLE)) != 0 ||
+ (ret = sf1_write(adapter, 4, 0,
+ SF_ERASE_SECTOR | (start << 8))) != 0 ||
+ (ret = flash_wait_op(adapter, 5, 500)) != 0)
+ return ret;
+ start++;
+ }
+ return 0;
+}
+
+/*
+ * t3_load_fw - download firmware
+ * @adapter: the adapter
+ * @fw_data: the firrware image to write
+ * @size: image size
+ *
+ * Write the supplied firmware image to the card's serial flash.
+ * The FW image has the following sections: @size - 8 bytes of code and
+ * data, followed by 4 bytes of FW version, followed by the 32-bit
+ * 1's complement checksum of the whole image.
+ */
+int t3_load_fw(adapter_t *adapter, const u8 *fw_data, unsigned int size)
+{
+ u32 csum;
+ unsigned int i;
+ const u32 *p = (const u32 *)fw_data;
+ int ret, addr, fw_sector = FW_FLASH_BOOT_ADDR >> 16;
+
+ if (size & 3)
+ return -EINVAL;
+ if (size > FW_VERS_ADDR + 8 - FW_FLASH_BOOT_ADDR)
+ return -EFBIG;
+
+ for (csum = 0, i = 0; i < size / sizeof(csum); i++)
+ csum += ntohl(p[i]);
+ if (csum != 0xffffffff) {
+ CH_ERR(adapter, "corrupted firmware image, checksum %u\n",
+ csum);
+ return -EINVAL;
+ }
+
+ ret = t3_flash_erase_sectors(adapter, fw_sector, fw_sector);
+ if (ret)
+ goto out;
+
+ size -= 8; /* trim off version and checksum */
+ for (addr = FW_FLASH_BOOT_ADDR; size; ) {
+ unsigned int chunk_size = min(size, 256U);
+
+ ret = t3_write_flash(adapter, addr, chunk_size, fw_data);
+ if (ret)
+ goto out;
+
+ addr += chunk_size;
+ fw_data += chunk_size;
+ size -= chunk_size;
+ }
+
+ ret = t3_write_flash(adapter, FW_VERS_ADDR, 4, fw_data);
+out:
+ if (ret)
+ CH_ERR(adapter, "firmware download failed, error %d\n", ret);
+ return ret;
+}
+
+#define CIM_CTL_BASE 0x2000
+
+/**
+ * t3_cim_ctl_blk_read - read a block from CIM control region
+ *
+ * @adap: the adapter
+ * @addr: the start address within the CIM control region
+ * @n: number of words to read
+ * @valp: where to store the result
+ *
+ * Reads a block of 4-byte words from the CIM control region.
+ */
+int t3_cim_ctl_blk_read(adapter_t *adap, unsigned int addr, unsigned int n,
+ unsigned int *valp)
+{
+ int ret = 0;
+
+ if (t3_read_reg(adap, A_CIM_HOST_ACC_CTRL) & F_HOSTBUSY)
+ return -EBUSY;
+
+ for ( ; !ret && n--; addr += 4) {
+ t3_write_reg(adap, A_CIM_HOST_ACC_CTRL, CIM_CTL_BASE + addr);
+ ret = t3_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, F_HOSTBUSY,
+ 0, 5, 2);
+ if (!ret)
+ *valp++ = t3_read_reg(adap, A_CIM_HOST_ACC_DATA);
+ }
+ return ret;
+}
+
+/**
+ * t3_link_changed - handle interface link changes
+ * @adapter: the adapter
+ * @port_id: the port index that changed link state
+ *
+ * Called when a port's link settings change to propagate the new values
+ * to the associated PHY and MAC. After performing the common tasks it
+ * invokes an OS-specific handler.
+ */
+void t3_link_changed(adapter_t *adapter, int port_id)
+{
+ int link_ok, speed, duplex, fc;
+ struct cphy *phy = &adapter->port[port_id].phy;
+ struct cmac *mac = &adapter->port[port_id].mac;
+ struct link_config *lc = &adapter->port[port_id].link_config;
+
+ phy->ops->get_link_status(phy, &link_ok, &speed, &duplex, &fc);
+
+ if (link_ok != lc->link_ok && adapter->params.rev > 0 &&
+ uses_xaui(adapter)) {
+ if (link_ok)
+ t3b_pcs_reset(mac);
+ t3_write_reg(adapter, A_XGM_XAUI_ACT_CTRL + mac->offset,
+ link_ok ? F_TXACTENABLE | F_RXEN : 0);
+ }
+ lc->link_ok = (unsigned char)link_ok;
+ lc->speed = speed < 0 ? SPEED_INVALID : speed;
+ lc->duplex = duplex < 0 ? DUPLEX_INVALID : duplex;
+ if (lc->requested_fc & PAUSE_AUTONEG)
+ fc &= lc->requested_fc;
+ else
+ fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
+
+ if (link_ok && speed >= 0 && lc->autoneg == AUTONEG_ENABLE) {
+ /* Set MAC speed, duplex, and flow control to match PHY. */
+ t3_mac_set_speed_duplex_fc(mac, speed, duplex, fc);
+ lc->fc = (unsigned char)fc;
+ }
+
+ t3_os_link_changed(adapter, port_id, link_ok, speed, duplex, fc);
+}
+
+/**
+ * t3_link_start - apply link configuration to MAC/PHY
+ * @phy: the PHY to setup
+ * @mac: the MAC to setup
+ * @lc: the requested link configuration
+ *
+ * Set up a port's MAC and PHY according to a desired link configuration.
+ * - If the PHY can auto-negotiate first decide what to advertise, then
+ * enable/disable auto-negotiation as desired, and reset.
+ * - If the PHY does not auto-negotiate just reset it.
+ * - If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
+ * otherwise do it later based on the outcome of auto-negotiation.
+ */
+int t3_link_start(struct cphy *phy, struct cmac *mac, struct link_config *lc)
+{
+ unsigned int fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
+
+ lc->link_ok = 0;
+ if (lc->supported & SUPPORTED_Autoneg) {
+ lc->advertising &= ~(ADVERTISED_Asym_Pause | ADVERTISED_Pause);
+ if (fc) {
+ lc->advertising |= ADVERTISED_Asym_Pause;
+ if (fc & PAUSE_RX)
+ lc->advertising |= ADVERTISED_Pause;
+ }
+ phy->ops->advertise(phy, lc->advertising);
+
+ if (lc->autoneg == AUTONEG_DISABLE) {
+ lc->speed = lc->requested_speed;
+ lc->duplex = lc->requested_duplex;
+ lc->fc = (unsigned char)fc;
+ t3_mac_set_speed_duplex_fc(mac, lc->speed, lc->duplex,
+ fc);
+ /* Also disables autoneg */
+ phy->ops->set_speed_duplex(phy, lc->speed, lc->duplex);
+ phy->ops->reset(phy, 0);
+ } else
+ phy->ops->autoneg_enable(phy);
+ } else {
+ t3_mac_set_speed_duplex_fc(mac, -1, -1, fc);
+ lc->fc = (unsigned char)fc;
+ phy->ops->reset(phy, 0);
+ }
+ return 0;
+}
+
+/**
+ * t3_set_vlan_accel - control HW VLAN extraction
+ * @adapter: the adapter
+ * @ports: bitmap of adapter ports to operate on
+ * @on: enable (1) or disable (0) HW VLAN extraction
+ *
+ * Enables or disables HW extraction of VLAN tags for the given port.
+ */
+void t3_set_vlan_accel(adapter_t *adapter, unsigned int ports, int on)
+{
+ t3_set_reg_field(adapter, A_TP_OUT_CONFIG,
+ ports << S_VLANEXTRACTIONENABLE,
+ on ? (ports << S_VLANEXTRACTIONENABLE) : 0);
+}
+
+struct intr_info {
+ unsigned int mask; /* bits to check in interrupt status */
+ const char *msg; /* message to print or NULL */
+ short stat_idx; /* stat counter to increment or -1 */
+ unsigned short fatal:1; /* whether the condition reported is fatal */
+};
+
+/**
+ * t3_handle_intr_status - table driven interrupt handler
+ * @adapter: the adapter that generated the interrupt
+ * @reg: the interrupt status register to process
+ * @mask: a mask to apply to the interrupt status
+ * @acts: table of interrupt actions
+ * @stats: statistics counters tracking interrupt occurences
+ *
+ * A table driven interrupt handler that applies a set of masks to an
+ * interrupt status word and performs the corresponding actions if the
+ * interrupts described by the mask have occured. The actions include
+ * optionally printing a warning or alert message, and optionally
+ * incrementing a stat counter. The table is terminated by an entry
+ * specifying mask 0. Returns the number of fatal interrupt conditions.
+ */
+static int t3_handle_intr_status(adapter_t *adapter, unsigned int reg,
+ unsigned int mask,
+ const struct intr_info *acts,
+ unsigned long *stats)
+{
+ int fatal = 0;
+ unsigned int status = t3_read_reg(adapter, reg) & mask;
+
+ for ( ; acts->mask; ++acts) {
+ if (!(status & acts->mask)) continue;
+ if (acts->fatal) {
+ fatal++;
+ CH_ALERT(adapter, "%s (0x%x)\n",
+ acts->msg, status & acts->mask);
+ } else if (acts->msg)
+ CH_WARN(adapter, "%s (0x%x)\n",
+ acts->msg, status & acts->mask);
+ if (acts->stat_idx >= 0)
+ stats[acts->stat_idx]++;
+ }
+ if (status) /* clear processed interrupts */
+ t3_write_reg(adapter, reg, status);
+ return fatal;
+}
+
+#define SGE_INTR_MASK (F_RSPQDISABLED)
+#define MC5_INTR_MASK (F_PARITYERR | F_ACTRGNFULL | F_UNKNOWNCMD | \
+ F_REQQPARERR | F_DISPQPARERR | F_DELACTEMPTY | \
+ F_NFASRCHFAIL)
+#define MC7_INTR_MASK (F_AE | F_UE | F_CE | V_PE(M_PE))
+#define XGM_INTR_MASK (V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR) | \
+ V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR) | \
+ F_TXFIFO_UNDERRUN | F_RXFIFO_OVERFLOW)
+#define PCIX_INTR_MASK (F_MSTDETPARERR | F_SIGTARABT | F_RCVTARABT | \
+ F_RCVMSTABT | F_SIGSYSERR | F_DETPARERR | \
+ F_SPLCMPDIS | F_UNXSPLCMP | F_RCVSPLCMPERR | \
+ F_DETCORECCERR | F_DETUNCECCERR | F_PIOPARERR | \
+ V_WFPARERR(M_WFPARERR) | V_RFPARERR(M_RFPARERR) | \
+ V_CFPARERR(M_CFPARERR) /* | V_MSIXPARERR(M_MSIXPARERR) */)
+#define PCIE_INTR_MASK (F_UNXSPLCPLERRR | F_UNXSPLCPLERRC | F_PCIE_PIOPARERR |\
+ F_PCIE_WFPARERR | F_PCIE_RFPARERR | F_PCIE_CFPARERR | \
+ /* V_PCIE_MSIXPARERR(M_PCIE_MSIXPARERR) | */ \
+ V_BISTERR(M_BISTERR) | F_PEXERR)
+#define ULPRX_INTR_MASK F_PARERR
+#define ULPTX_INTR_MASK 0
+#define CPLSW_INTR_MASK (F_TP_FRAMING_ERROR | \
+ F_SGE_FRAMING_ERROR | F_CIM_FRAMING_ERROR | \
+ F_ZERO_SWITCH_ERROR)
+#define CIM_INTR_MASK (F_BLKWRPLINT | F_BLKRDPLINT | F_BLKWRCTLINT | \
+ F_BLKRDCTLINT | F_BLKWRFLASHINT | F_BLKRDFLASHINT | \
+ F_SGLWRFLASHINT | F_WRBLKFLASHINT | F_BLKWRBOOTINT | \
+ F_FLASHRANGEINT | F_SDRAMRANGEINT | F_RSVDSPACEINT)
+#define PMTX_INTR_MASK (F_ZERO_C_CMD_ERROR | ICSPI_FRM_ERR | OESPI_FRM_ERR | \
+ V_ICSPI_PAR_ERROR(M_ICSPI_PAR_ERROR) | \
+ V_OESPI_PAR_ERROR(M_OESPI_PAR_ERROR))
+#define PMRX_INTR_MASK (F_ZERO_E_CMD_ERROR | IESPI_FRM_ERR | OCSPI_FRM_ERR | \
+ V_IESPI_PAR_ERROR(M_IESPI_PAR_ERROR) | \
+ V_OCSPI_PAR_ERROR(M_OCSPI_PAR_ERROR))
+#define MPS_INTR_MASK (V_TX0TPPARERRENB(M_TX0TPPARERRENB) | \
+ V_TX1TPPARERRENB(M_TX1TPPARERRENB) | \
+ V_RXTPPARERRENB(M_RXTPPARERRENB) | \
+ V_MCAPARERRENB(M_MCAPARERRENB))
+#define PL_INTR_MASK (F_T3DBG | F_XGMAC0_0 | F_XGMAC0_1 | F_MC5A | F_PM1_TX | \
+ F_PM1_RX | F_ULP2_TX | F_ULP2_RX | F_TP1 | F_CIM | \
+ F_MC7_CM | F_MC7_PMTX | F_MC7_PMRX | F_SGE3 | F_PCIM0 | \
+ F_MPS0 | F_CPL_SWITCH)
+
+/*
+ * Interrupt handler for the PCIX1 module.
+ */
+static void pci_intr_handler(adapter_t *adapter)
+{
+ static struct intr_info pcix1_intr_info[] = {
+ { F_MSTDETPARERR, "PCI master detected parity error", -1, 1 },
+ { F_SIGTARABT, "PCI signaled target abort", -1, 1 },
+ { F_RCVTARABT, "PCI received target abort", -1, 1 },
+ { F_RCVMSTABT, "PCI received master abort", -1, 1 },
+ { F_SIGSYSERR, "PCI signaled system error", -1, 1 },
+ { F_DETPARERR, "PCI detected parity error", -1, 1 },
+ { F_SPLCMPDIS, "PCI split completion discarded", -1, 1 },
+ { F_UNXSPLCMP, "PCI unexpected split completion error", -1, 1 },
+ { F_RCVSPLCMPERR, "PCI received split completion error", -1,
+ 1 },
+ { F_DETCORECCERR, "PCI correctable ECC error",
+ STAT_PCI_CORR_ECC, 0 },
+ { F_DETUNCECCERR, "PCI uncorrectable ECC error", -1, 1 },
+ { F_PIOPARERR, "PCI PIO FIFO parity error", -1, 1 },
+ { V_WFPARERR(M_WFPARERR), "PCI write FIFO parity error", -1,
+ 1 },
+ { V_RFPARERR(M_RFPARERR), "PCI read FIFO parity error", -1,
+ 1 },
+ { V_CFPARERR(M_CFPARERR), "PCI command FIFO parity error", -1,
+ 1 },
+ { V_MSIXPARERR(M_MSIXPARERR), "PCI MSI-X table/PBA parity "
+ "error", -1, 1 },
+ { 0 }
+ };
+
+ if (t3_handle_intr_status(adapter, A_PCIX_INT_CAUSE, PCIX_INTR_MASK,
+ pcix1_intr_info, adapter->irq_stats))
+ t3_fatal_err(adapter);
+}
+
+/*
+ * Interrupt handler for the PCIE module.
+ */
+static void pcie_intr_handler(adapter_t *adapter)
+{
+ static struct intr_info pcie_intr_info[] = {
+ { F_PEXERR, "PCI PEX error", -1, 1 },
+ { F_UNXSPLCPLERRR,
+ "PCI unexpected split completion DMA read error", -1, 1 },
+ { F_UNXSPLCPLERRC,
+ "PCI unexpected split completion DMA command error", -1, 1 },
+ { F_PCIE_PIOPARERR, "PCI PIO FIFO parity error", -1, 1 },
+ { F_PCIE_WFPARERR, "PCI write FIFO parity error", -1, 1 },
+ { F_PCIE_RFPARERR, "PCI read FIFO parity error", -1, 1 },
+ { F_PCIE_CFPARERR, "PCI command FIFO parity error", -1, 1 },
+ { V_PCIE_MSIXPARERR(M_PCIE_MSIXPARERR),
+ "PCI MSI-X table/PBA parity error", -1, 1 },
+ { V_BISTERR(M_BISTERR), "PCI BIST error", -1, 1 },
+ { 0 }
+ };
+
+ if (t3_handle_intr_status(adapter, A_PCIE_INT_CAUSE, PCIE_INTR_MASK,
+ pcie_intr_info, adapter->irq_stats))
+ t3_fatal_err(adapter);
+}
+
+/*
+ * TP interrupt handler.
+ */
+static void tp_intr_handler(adapter_t *adapter)
+{
+ static struct intr_info tp_intr_info[] = {
+ { 0xffffff, "TP parity error", -1, 1 },
+ { 0x1000000, "TP out of Rx pages", -1, 1 },
+ { 0x2000000, "TP out of Tx pages", -1, 1 },
+ { 0 }
+ };
+
+ if (t3_handle_intr_status(adapter, A_TP_INT_CAUSE, 0xffffffff,
+ tp_intr_info, NULL))
+ t3_fatal_err(adapter);
+}
+
+/*
+ * CIM interrupt handler.
+ */
+static void cim_intr_handler(adapter_t *adapter)
+{
+ static struct intr_info cim_intr_info[] = {
+ { F_RSVDSPACEINT, "CIM reserved space write", -1, 1 },
+ { F_SDRAMRANGEINT, "CIM SDRAM address out of range", -1, 1 },
+ { F_FLASHRANGEINT, "CIM flash address out of range", -1, 1 },
+ { F_BLKWRBOOTINT, "CIM block write to boot space", -1, 1 },
+ { F_WRBLKFLASHINT, "CIM write to cached flash space", -1, 1 },
+ { F_SGLWRFLASHINT, "CIM single write to flash space", -1, 1 },
+ { F_BLKRDFLASHINT, "CIM block read from flash space", -1, 1 },
+ { F_BLKWRFLASHINT, "CIM block write to flash space", -1, 1 },
+ { F_BLKRDCTLINT, "CIM block read from CTL space", -1, 1 },
+ { F_BLKWRCTLINT, "CIM block write to CTL space", -1, 1 },
+ { F_BLKRDPLINT, "CIM block read from PL space", -1, 1 },
+ { F_BLKWRPLINT, "CIM block write to PL space", -1, 1 },
+ { 0 }
+ };
+
+ if (t3_handle_intr_status(adapter, A_CIM_HOST_INT_CAUSE, 0xffffffff,
+ cim_intr_info, NULL))
+ t3_fatal_err(adapter);
+}
+
+/*
+ * ULP RX interrupt handler.
+ */
+static void ulprx_intr_handler(adapter_t *adapter)
+{
+ static struct intr_info ulprx_intr_info[] = {
+ { F_PARERR, "ULP RX parity error", -1, 1 },
+ { 0 }
+ };
+
+ if (t3_handle_intr_status(adapter, A_ULPRX_INT_CAUSE, 0xffffffff,
+ ulprx_intr_info, NULL))
+ t3_fatal_err(adapter);
+}
+
+/*
+ * ULP TX interrupt handler.
+ */
+static void ulptx_intr_handler(adapter_t *adapter)
+{
+ static struct intr_info ulptx_intr_info[] = {
+ { F_PBL_BOUND_ERR_CH0, "ULP TX channel 0 PBL out of bounds",
+ STAT_ULP_CH0_PBL_OOB, 0 },
+ { F_PBL_BOUND_ERR_CH1, "ULP TX channel 1 PBL out of bounds",
+ STAT_ULP_CH1_PBL_OOB, 0 },
+ { 0 }
+ };
+
+ if (t3_handle_intr_status(adapter, A_ULPTX_INT_CAUSE, 0xffffffff,
+ ulptx_intr_info, adapter->irq_stats))
+ t3_fatal_err(adapter);
+}
+
+#define ICSPI_FRM_ERR (F_ICSPI0_FIFO2X_RX_FRAMING_ERROR | \
+ F_ICSPI1_FIFO2X_RX_FRAMING_ERROR | F_ICSPI0_RX_FRAMING_ERROR | \
+ F_ICSPI1_RX_FRAMING_ERROR | F_ICSPI0_TX_FRAMING_ERROR | \
+ F_ICSPI1_TX_FRAMING_ERROR)
+#define OESPI_FRM_ERR (F_OESPI0_RX_FRAMING_ERROR | \
+ F_OESPI1_RX_FRAMING_ERROR | F_OESPI0_TX_FRAMING_ERROR | \
+ F_OESPI1_TX_FRAMING_ERROR | F_OESPI0_OFIFO2X_TX_FRAMING_ERROR | \
+ F_OESPI1_OFIFO2X_TX_FRAMING_ERROR)
+
+/*
+ * PM TX interrupt handler.
+ */
+static void pmtx_intr_handler(adapter_t *adapter)
+{
+ static struct intr_info pmtx_intr_info[] = {
+ { F_ZERO_C_CMD_ERROR, "PMTX 0-length pcmd", -1, 1 },
+ { ICSPI_FRM_ERR, "PMTX ispi framing error", -1, 1 },
+ { OESPI_FRM_ERR, "PMTX ospi framing error", -1, 1 },
+ { V_ICSPI_PAR_ERROR(M_ICSPI_PAR_ERROR),
+ "PMTX ispi parity error", -1, 1 },
+ { V_OESPI_PAR_ERROR(M_OESPI_PAR_ERROR),
+ "PMTX ospi parity error", -1, 1 },
+ { 0 }
+ };
+
+ if (t3_handle_intr_status(adapter, A_PM1_TX_INT_CAUSE, 0xffffffff,
+ pmtx_intr_info, NULL))
+ t3_fatal_err(adapter);
+}
+
+#define IESPI_FRM_ERR (F_IESPI0_FIFO2X_RX_FRAMING_ERROR | \
+ F_IESPI1_FIFO2X_RX_FRAMING_ERROR | F_IESPI0_RX_FRAMING_ERROR | \
+ F_IESPI1_RX_FRAMING_ERROR | F_IESPI0_TX_FRAMING_ERROR | \
+ F_IESPI1_TX_FRAMING_ERROR)
+#define OCSPI_FRM_ERR (F_OCSPI0_RX_FRAMING_ERROR | \
+ F_OCSPI1_RX_FRAMING_ERROR | F_OCSPI0_TX_FRAMING_ERROR | \
+ F_OCSPI1_TX_FRAMING_ERROR | F_OCSPI0_OFIFO2X_TX_FRAMING_ERROR | \
+ F_OCSPI1_OFIFO2X_TX_FRAMING_ERROR)
+
+/*
+ * PM RX interrupt handler.
+ */
+static void pmrx_intr_handler(adapter_t *adapter)
+{
+ static struct intr_info pmrx_intr_info[] = {
+ { F_ZERO_E_CMD_ERROR, "PMRX 0-length pcmd", -1, 1 },
+ { IESPI_FRM_ERR, "PMRX ispi framing error", -1, 1 },
+ { OCSPI_FRM_ERR, "PMRX ospi framing error", -1, 1 },
+ { V_IESPI_PAR_ERROR(M_IESPI_PAR_ERROR),
+ "PMRX ispi parity error", -1, 1 },
+ { V_OCSPI_PAR_ERROR(M_OCSPI_PAR_ERROR),
+ "PMRX ospi parity error", -1, 1 },
+ { 0 }
+ };
+
+ if (t3_handle_intr_status(adapter, A_PM1_RX_INT_CAUSE, 0xffffffff,
+ pmrx_intr_info, NULL))
+ t3_fatal_err(adapter);
+}
+
+/*
+ * CPL switch interrupt handler.
+ */
+static void cplsw_intr_handler(adapter_t *adapter)
+{
+ static struct intr_info cplsw_intr_info[] = {
+// { F_CIM_OVFL_ERROR, "CPL switch CIM overflow", -1, 1 },
+ { F_TP_FRAMING_ERROR, "CPL switch TP framing error", -1, 1 },
+ { F_SGE_FRAMING_ERROR, "CPL switch SGE framing error", -1, 1 },
+ { F_CIM_FRAMING_ERROR, "CPL switch CIM framing error", -1, 1 },
+ { F_ZERO_SWITCH_ERROR, "CPL switch no-switch error", -1, 1 },
+ { 0 }
+ };
+
+ if (t3_handle_intr_status(adapter, A_CPL_INTR_CAUSE, 0xffffffff,
+ cplsw_intr_info, NULL))
+ t3_fatal_err(adapter);
+}
+
+/*
+ * MPS interrupt handler.
+ */
+static void mps_intr_handler(adapter_t *adapter)
+{
+ static struct intr_info mps_intr_info[] = {
+ { 0x1ff, "MPS parity error", -1, 1 },
+ { 0 }
+ };
+
+ if (t3_handle_intr_status(adapter, A_MPS_INT_CAUSE, 0xffffffff,
+ mps_intr_info, NULL))
+ t3_fatal_err(adapter);
+}
+
+#define MC7_INTR_FATAL (F_UE | V_PE(M_PE) | F_AE)
+
+/*
+ * MC7 interrupt handler.
+ */
+static void mc7_intr_handler(struct mc7 *mc7)
+{
+ adapter_t *adapter = mc7->adapter;
+ u32 cause = t3_read_reg(adapter, mc7->offset + A_MC7_INT_CAUSE);
+
+ if (cause & F_CE) {
+ mc7->stats.corr_err++;
+ CH_WARN(adapter, "%s MC7 correctable error at addr 0x%x, "
+ "data 0x%x 0x%x 0x%x\n", mc7->name,
+ t3_read_reg(adapter, mc7->offset + A_MC7_CE_ADDR),
+ t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA0),
+ t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA1),
+ t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA2));
+ }
+
+ if (cause & F_UE) {
+ mc7->stats.uncorr_err++;
+ CH_ALERT(adapter, "%s MC7 uncorrectable error at addr 0x%x, "
+ "data 0x%x 0x%x 0x%x\n", mc7->name,
+ t3_read_reg(adapter, mc7->offset + A_MC7_UE_ADDR),
+ t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA0),
+ t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA1),
+ t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA2));
+ }
+
+ if (G_PE(cause)) {
+ mc7->stats.parity_err++;
+ CH_ALERT(adapter, "%s MC7 parity error 0x%x\n",
+ mc7->name, G_PE(cause));
+ }
+
+ if (cause & F_AE) {
+ u32 addr = 0;
+
+ if (adapter->params.rev > 0)
+ addr = t3_read_reg(adapter,
+ mc7->offset + A_MC7_ERR_ADDR);
+ mc7->stats.addr_err++;
+ CH_ALERT(adapter, "%s MC7 address error: 0x%x\n",
+ mc7->name, addr);
+ }
+
+ if (cause & MC7_INTR_FATAL)
+ t3_fatal_err(adapter);
+
+ t3_write_reg(adapter, mc7->offset + A_MC7_INT_CAUSE, cause);
+}
+
+#define XGM_INTR_FATAL (V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR) | \
+ V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR))
+/*
+ * XGMAC interrupt handler.
+ */
+static int mac_intr_handler(adapter_t *adap, unsigned int idx)
+{
+ struct cmac *mac = &adap->port[idx].mac;
+ u32 cause = t3_read_reg(adap, A_XGM_INT_CAUSE + mac->offset);
+
+ if (cause & V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR)) {
+ mac->stats.tx_fifo_parity_err++;
+ CH_ALERT(adap, "port%d: MAC TX FIFO parity error\n", idx);
+ }
+ if (cause & V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR)) {
+ mac->stats.rx_fifo_parity_err++;
+ CH_ALERT(adap, "port%d: MAC RX FIFO parity error\n", idx);
+ }
+ if (cause & F_TXFIFO_UNDERRUN)
+ mac->stats.tx_fifo_urun++;
+ if (cause & F_RXFIFO_OVERFLOW)
+ mac->stats.rx_fifo_ovfl++;
+ if (cause & V_SERDES_LOS(M_SERDES_LOS))
+ mac->stats.serdes_signal_loss++;
+ if (cause & F_XAUIPCSCTCERR)
+ mac->stats.xaui_pcs_ctc_err++;
+ if (cause & F_XAUIPCSALIGNCHANGE)
+ mac->stats.xaui_pcs_align_change++;
+
+ t3_write_reg(adap, A_XGM_INT_CAUSE + mac->offset, cause);
+ if (cause & XGM_INTR_FATAL)
+ t3_fatal_err(adap);
+ return cause != 0;
+}
+
+/*
+ * Interrupt handler for PHY events.
+ */
+int t3_phy_intr_handler(adapter_t *adapter)
+{
+ u32 mask, gpi = adapter_info(adapter)->gpio_intr;
+ u32 i, cause = t3_read_reg(adapter, A_T3DBG_INT_CAUSE);
+
+ for_each_port(adapter, i) {
+ struct port_info *p = &adapter->port[i];
+
+ mask = gpi - (gpi & (gpi - 1));
+ gpi -= mask;
+
+ if (!(p->port_type->caps & SUPPORTED_IRQ))
+ continue;
+
+ if (cause & mask) {
+ int phy_cause = p->phy.ops->intr_handler(&p->phy);
+
+ if (phy_cause & cphy_cause_link_change)
+ t3_link_changed(adapter, i);
+ if (phy_cause & cphy_cause_fifo_error)
+ p->phy.fifo_errors++;
+ }
+ }
+
+ t3_write_reg(adapter, A_T3DBG_INT_CAUSE, cause);
+ return 0;
+}
+
+/*
+ * T3 slow path (non-data) interrupt handler.
+ */
+int t3_slow_intr_handler(adapter_t *adapter)
+{
+ u32 cause = t3_read_reg(adapter, A_PL_INT_CAUSE0);
+
+ cause &= adapter->slow_intr_mask;
+ if (!cause)
+ return 0;
+
+ printf("slow intr handler\n");
+ if (cause & F_PCIM0) {
+ if (is_pcie(adapter))
+ pcie_intr_handler(adapter);
+ else
+ pci_intr_handler(adapter);
+ }
+ if (cause & F_SGE3)
+ t3_sge_err_intr_handler(adapter);
+ if (cause & F_MC7_PMRX)
+ mc7_intr_handler(&adapter->pmrx);
+ if (cause & F_MC7_PMTX)
+ mc7_intr_handler(&adapter->pmtx);
+ if (cause & F_MC7_CM)
+ mc7_intr_handler(&adapter->cm);
+ if (cause & F_CIM)
+ cim_intr_handler(adapter);
+ if (cause & F_TP1)
+ tp_intr_handler(adapter);
+ if (cause & F_ULP2_RX)
+ ulprx_intr_handler(adapter);
+ if (cause & F_ULP2_TX)
+ ulptx_intr_handler(adapter);
+ if (cause & F_PM1_RX)
+ pmrx_intr_handler(adapter);
+ if (cause & F_PM1_TX)
+ pmtx_intr_handler(adapter);
+ if (cause & F_CPL_SWITCH)
+ cplsw_intr_handler(adapter);
+ if (cause & F_MPS0)
+ mps_intr_handler(adapter);
+ if (cause & F_MC5A)
+ t3_mc5_intr_handler(&adapter->mc5);
+ if (cause & F_XGMAC0_0)
+ mac_intr_handler(adapter, 0);
+ if (cause & F_XGMAC0_1)
+ mac_intr_handler(adapter, 1);
+ if (cause & F_T3DBG)
+ t3_os_ext_intr_handler(adapter);
+
+ /* Clear the interrupts just processed. */
+ t3_write_reg(adapter, A_PL_INT_CAUSE0, cause);
+ (void) t3_read_reg(adapter, A_PL_INT_CAUSE0); /* flush */
+ return 1;
+}
+
+/**
+ * t3_intr_enable - enable interrupts
+ * @adapter: the adapter whose interrupts should be enabled
+ *
+ * Enable interrupts by setting the interrupt enable registers of the
+ * various HW modules and then enabling the top-level interrupt
+ * concentrator.
+ */
+void t3_intr_enable(adapter_t *adapter)
+{
+ static struct addr_val_pair intr_en_avp[] = {
+ { A_SG_INT_ENABLE, SGE_INTR_MASK },
+ { A_MC7_INT_ENABLE, MC7_INTR_MASK },
+ { A_MC7_INT_ENABLE - MC7_PMRX_BASE_ADDR + MC7_PMTX_BASE_ADDR,
+ MC7_INTR_MASK },
+ { A_MC7_INT_ENABLE - MC7_PMRX_BASE_ADDR + MC7_CM_BASE_ADDR,
+ MC7_INTR_MASK },
+ { A_MC5_DB_INT_ENABLE, MC5_INTR_MASK },
+ { A_ULPRX_INT_ENABLE, ULPRX_INTR_MASK },
+ { A_TP_INT_ENABLE, 0x3bfffff },
+ { A_PM1_TX_INT_ENABLE, PMTX_INTR_MASK },
+ { A_PM1_RX_INT_ENABLE, PMRX_INTR_MASK },
+ { A_CIM_HOST_INT_ENABLE, CIM_INTR_MASK },
+ { A_MPS_INT_ENABLE, MPS_INTR_MASK },
+ };
+
+ adapter->slow_intr_mask = PL_INTR_MASK;
+
+ t3_write_regs(adapter, intr_en_avp, ARRAY_SIZE(intr_en_avp), 0);
+
+ if (adapter->params.rev > 0) {
+ t3_write_reg(adapter, A_CPL_INTR_ENABLE,
+ CPLSW_INTR_MASK | F_CIM_OVFL_ERROR);
+ t3_write_reg(adapter, A_ULPTX_INT_ENABLE,
+ ULPTX_INTR_MASK | F_PBL_BOUND_ERR_CH0 |
+ F_PBL_BOUND_ERR_CH1);
+ } else {
+ t3_write_reg(adapter, A_CPL_INTR_ENABLE, CPLSW_INTR_MASK);
+ t3_write_reg(adapter, A_ULPTX_INT_ENABLE, ULPTX_INTR_MASK);
+ }
+
+ t3_write_reg(adapter, A_T3DBG_GPIO_ACT_LOW,
+ adapter_info(adapter)->gpio_intr);
+ t3_write_reg(adapter, A_T3DBG_INT_ENABLE,
+ adapter_info(adapter)->gpio_intr);
+ if (is_pcie(adapter)) {
+ t3_write_reg(adapter, A_PCIE_INT_ENABLE, PCIE_INTR_MASK);
+ } else {
+ t3_write_reg(adapter, A_PCIX_INT_ENABLE, PCIX_INTR_MASK);
+ }
+ t3_write_reg(adapter, A_PL_INT_ENABLE0, adapter->slow_intr_mask);
+ (void) t3_read_reg(adapter, A_PL_INT_ENABLE0); /* flush */
+}
+
+/**
+ * t3_intr_disable - disable a card's interrupts
+ * @adapter: the adapter whose interrupts should be disabled
+ *
+ * Disable interrupts. We only disable the top-level interrupt
+ * concentrator and the SGE data interrupts.
+ */
+void t3_intr_disable(adapter_t *adapter)
+{
+ t3_write_reg(adapter, A_PL_INT_ENABLE0, 0);
+ (void) t3_read_reg(adapter, A_PL_INT_ENABLE0); /* flush */
+ adapter->slow_intr_mask = 0;
+}
+
+/**
+ * t3_intr_clear - clear all interrupts
+ * @adapter: the adapter whose interrupts should be cleared
+ *
+ * Clears all interrupts.
+ */
+void t3_intr_clear(adapter_t *adapter)
+{
+ static const unsigned int cause_reg_addr[] = {
+ A_SG_INT_CAUSE,
+ A_SG_RSPQ_FL_STATUS,
+ A_PCIX_INT_CAUSE,
+ A_MC7_INT_CAUSE,
+ A_MC7_INT_CAUSE - MC7_PMRX_BASE_ADDR + MC7_PMTX_BASE_ADDR,
+ A_MC7_INT_CAUSE - MC7_PMRX_BASE_ADDR + MC7_CM_BASE_ADDR,
+ A_CIM_HOST_INT_CAUSE,
+ A_TP_INT_CAUSE,
+ A_MC5_DB_INT_CAUSE,
+ A_ULPRX_INT_CAUSE,
+ A_ULPTX_INT_CAUSE,
+ A_CPL_INTR_CAUSE,
+ A_PM1_TX_INT_CAUSE,
+ A_PM1_RX_INT_CAUSE,
+ A_MPS_INT_CAUSE,
+ A_T3DBG_INT_CAUSE,
+ };
+ unsigned int i;
+
+ /* Clear PHY and MAC interrupts for each port. */
+ for_each_port(adapter, i)
+ t3_port_intr_clear(adapter, i);
+
+ for (i = 0; i < ARRAY_SIZE(cause_reg_addr); ++i)
+ t3_write_reg(adapter, cause_reg_addr[i], 0xffffffff);
+
+ t3_write_reg(adapter, A_PL_INT_CAUSE0, 0xffffffff);
+ (void) t3_read_reg(adapter, A_PL_INT_CAUSE0); /* flush */
+}
+
+/**
+ * t3_port_intr_enable - enable port-specific interrupts
+ * @adapter: associated adapter
+ * @idx: index of port whose interrupts should be enabled
+ *
+ * Enable port-specific (i.e., MAC and PHY) interrupts for the given
+ * adapter port.
+ */
+void t3_port_intr_enable(adapter_t *adapter, int idx)
+{
+ t3_write_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx), XGM_INTR_MASK);
+ adapter->port[idx].phy.ops->intr_enable(&adapter->port[idx].phy);
+}
+
+/**
+ * t3_port_intr_disable - disable port-specific interrupts
+ * @adapter: associated adapter
+ * @idx: index of port whose interrupts should be disabled
+ *
+ * Disable port-specific (i.e., MAC and PHY) interrupts for the given
+ * adapter port.
+ */
+void t3_port_intr_disable(adapter_t *adapter, int idx)
+{
+ t3_write_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx), 0);
+ adapter->port[idx].phy.ops->intr_disable(&adapter->port[idx].phy);
+}
+
+/**
+ * t3_port_intr_clear - clear port-specific interrupts
+ * @adapter: associated adapter
+ * @idx: index of port whose interrupts to clear
+ *
+ * Clear port-specific (i.e., MAC and PHY) interrupts for the given
+ * adapter port.
+ */
+void t3_port_intr_clear(adapter_t *adapter, int idx)
+{
+ t3_write_reg(adapter, XGM_REG(A_XGM_INT_CAUSE, idx), 0xffffffff);
+ adapter->port[idx].phy.ops->intr_clear(&adapter->port[idx].phy);
+}
+
+
+/**
+ * t3_sge_write_context - write an SGE context
+ * @adapter: the adapter
+ * @id: the context id
+ * @type: the context type
+ *
+ * Program an SGE context with the values already loaded in the
+ * CONTEXT_DATA? registers.
+ */
+static int t3_sge_write_context(adapter_t *adapter, unsigned int id,
+ unsigned int type)
+{
+ t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0xffffffff);
+ t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0xffffffff);
+ t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0xffffffff);
+ t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0xffffffff);
+ t3_write_reg(adapter, A_SG_CONTEXT_CMD,
+ V_CONTEXT_CMD_OPCODE(1) | type | V_CONTEXT(id));
+ return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
+ 0, 5, 1);
+}
+
+/**
+ * t3_sge_init_ecntxt - initialize an SGE egress context
+ * @adapter: the adapter to configure
+ * @id: the context id
+ * @gts_enable: whether to enable GTS for the context
+ * @type: the egress context type
+ * @respq: associated response queue
+ * @base_addr: base address of queue
+ * @size: number of queue entries
+ * @token: uP token
+ * @gen: initial generation value for the context
+ * @cidx: consumer pointer
+ *
+ * Initialize an SGE egress context and make it ready for use. If the
+ * platform allows concurrent context operations, the caller is
+ * responsible for appropriate locking.
+ */
+int t3_sge_init_ecntxt(adapter_t *adapter, unsigned int id, int gts_enable,
+ enum sge_context_type type, int respq, u64 base_addr,
+ unsigned int size, unsigned int token, int gen,
+ unsigned int cidx)
+{
+ unsigned int credits = type == SGE_CNTXT_OFLD ? 0 : FW_WR_NUM;
+
+ if (base_addr & 0xfff) /* must be 4K aligned */
+ return -EINVAL;
+ if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
+ return -EBUSY;
+
+ base_addr >>= 12;
+ t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_EC_INDEX(cidx) |
+ V_EC_CREDITS(credits) | V_EC_GTS(gts_enable));
+ t3_write_reg(adapter, A_SG_CONTEXT_DATA1, V_EC_SIZE(size) |
+ V_EC_BASE_LO((u32)base_addr & 0xffff));
+ base_addr >>= 16;
+ t3_write_reg(adapter, A_SG_CONTEXT_DATA2, (u32)base_addr);
+ base_addr >>= 32;
+ t3_write_reg(adapter, A_SG_CONTEXT_DATA3,
+ V_EC_BASE_HI((u32)base_addr & 0xf) | V_EC_RESPQ(respq) |
+ V_EC_TYPE(type) | V_EC_GEN(gen) | V_EC_UP_TOKEN(token) |
+ F_EC_VALID);
+ return t3_sge_write_context(adapter, id, F_EGRESS);
+}
+
+/**
+ * t3_sge_init_flcntxt - initialize an SGE free-buffer list context
+ * @adapter: the adapter to configure
+ * @id: the context id
+ * @gts_enable: whether to enable GTS for the context
+ * @base_addr: base address of queue
+ * @size: number of queue entries
+ * @bsize: size of each buffer for this queue
+ * @cong_thres: threshold to signal congestion to upstream producers
+ * @gen: initial generation value for the context
+ * @cidx: consumer pointer
+ *
+ * Initialize an SGE free list context and make it ready for use. The
+ * caller is responsible for ensuring only one context operation occurs
+ * at a time.
+ */
+int t3_sge_init_flcntxt(adapter_t *adapter, unsigned int id, int gts_enable,
+ u64 base_addr, unsigned int size, unsigned int bsize,
+ unsigned int cong_thres, int gen, unsigned int cidx)
+{
+ if (base_addr & 0xfff) /* must be 4K aligned */
+ return -EINVAL;
+ if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
+ return -EBUSY;
+
+ base_addr >>= 12;
+ t3_write_reg(adapter, A_SG_CONTEXT_DATA0, (u32)base_addr);
+ base_addr >>= 32;
+ t3_write_reg(adapter, A_SG_CONTEXT_DATA1,
+ V_FL_BASE_HI((u32)base_addr) |
+ V_FL_INDEX_LO(cidx & M_FL_INDEX_LO));
+ t3_write_reg(adapter, A_SG_CONTEXT_DATA2, V_FL_SIZE(size) |
+ V_FL_GEN(gen) | V_FL_INDEX_HI(cidx >> 12) |
+ V_FL_ENTRY_SIZE_LO(bsize & M_FL_ENTRY_SIZE_LO));
+ t3_write_reg(adapter, A_SG_CONTEXT_DATA3,
+ V_FL_ENTRY_SIZE_HI(bsize >> (32 - S_FL_ENTRY_SIZE_LO)) |
+ V_FL_CONG_THRES(cong_thres) | V_FL_GTS(gts_enable));
+ return t3_sge_write_context(adapter, id, F_FREELIST);
+}
+
+/**
+ * t3_sge_init_rspcntxt - initialize an SGE response queue context
+ * @adapter: the adapter to configure
+ * @id: the context id
+ * @irq_vec_idx: MSI-X interrupt vector index, 0 if no MSI-X, -1 if no IRQ
+ * @base_addr: base address of queue
+ * @size: number of queue entries
+ * @fl_thres: threshold for selecting the normal or jumbo free list
+ * @gen: initial generation value for the context
+ * @cidx: consumer pointer
+ *
+ * Initialize an SGE response queue context and make it ready for use.
+ * The caller is responsible for ensuring only one context operation
+ * occurs at a time.
+ */
+int t3_sge_init_rspcntxt(adapter_t *adapter, unsigned int id, int irq_vec_idx,
+ u64 base_addr, unsigned int size,
+ unsigned int fl_thres, int gen, unsigned int cidx)
+{
+ unsigned int intr = 0;
+
+ if (base_addr & 0xfff) /* must be 4K aligned */
+ return -EINVAL;
+ if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
+ return -EBUSY;
+
+ base_addr >>= 12;
+ t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_CQ_SIZE(size) |
+ V_CQ_INDEX(cidx));
+ t3_write_reg(adapter, A_SG_CONTEXT_DATA1, (u32)base_addr);
+ base_addr >>= 32;
+ if (irq_vec_idx >= 0)
+ intr = V_RQ_MSI_VEC(irq_vec_idx) | F_RQ_INTR_EN;
+ t3_write_reg(adapter, A_SG_CONTEXT_DATA2,
+ V_CQ_BASE_HI((u32)base_addr) | intr | V_RQ_GEN(gen));
+ t3_write_reg(adapter, A_SG_CONTEXT_DATA3, fl_thres);
+ return t3_sge_write_context(adapter, id, F_RESPONSEQ);
+}
+
+/**
+ * t3_sge_init_cqcntxt - initialize an SGE completion queue context
+ * @adapter: the adapter to configure
+ * @id: the context id
+ * @base_addr: base address of queue
+ * @size: number of queue entries
+ * @rspq: response queue for async notifications
+ * @ovfl_mode: CQ overflow mode
+ * @credits: completion queue credits
+ * @credit_thres: the credit threshold
+ *
+ * Initialize an SGE completion queue context and make it ready for use.
+ * The caller is responsible for ensuring only one context operation
+ * occurs at a time.
+ */
+int t3_sge_init_cqcntxt(adapter_t *adapter, unsigned int id, u64 base_addr,
+ unsigned int size, int rspq, int ovfl_mode,
+ unsigned int credits, unsigned int credit_thres)
+{
+ if (base_addr & 0xfff) /* must be 4K aligned */
+ return -EINVAL;
+ if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
+ return -EBUSY;
+
+ base_addr >>= 12;
+ t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_CQ_SIZE(size));
+ t3_write_reg(adapter, A_SG_CONTEXT_DATA1, (u32)base_addr);
+ base_addr >>= 32;
+ t3_write_reg(adapter, A_SG_CONTEXT_DATA2,
+ V_CQ_BASE_HI((u32)base_addr) | V_CQ_RSPQ(rspq) |
+ V_CQ_GEN(1) | V_CQ_OVERFLOW_MODE(ovfl_mode));
+ t3_write_reg(adapter, A_SG_CONTEXT_DATA3, V_CQ_CREDITS(credits) |
+ V_CQ_CREDIT_THRES(credit_thres));
+ return t3_sge_write_context(adapter, id, F_CQ);
+}
+
+/**
+ * t3_sge_enable_ecntxt - enable/disable an SGE egress context
+ * @adapter: the adapter
+ * @id: the egress context id
+ * @enable: enable (1) or disable (0) the context
+ *
+ * Enable or disable an SGE egress context. The caller is responsible for
+ * ensuring only one context operation occurs at a time.
+ */
+int t3_sge_enable_ecntxt(adapter_t *adapter, unsigned int id, int enable)
+{
+ if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
+ return -EBUSY;
+
+ t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0);
+ t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
+ t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
+ t3_write_reg(adapter, A_SG_CONTEXT_MASK3, F_EC_VALID);
+ t3_write_reg(adapter, A_SG_CONTEXT_DATA3, V_EC_VALID(enable));
+ t3_write_reg(adapter, A_SG_CONTEXT_CMD,
+ V_CONTEXT_CMD_OPCODE(1) | F_EGRESS | V_CONTEXT(id));
+ return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
+ 0, 5, 1);
+}
+
+/**
+ * t3_sge_disable_fl - disable an SGE free-buffer list
+ * @adapter: the adapter
+ * @id: the free list context id
+ *
+ * Disable an SGE free-buffer list. The caller is responsible for
+ * ensuring only one context operation occurs at a time.
+ */
+int t3_sge_disable_fl(adapter_t *adapter, unsigned int id)
+{
+ if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
+ return -EBUSY;
+
+ t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0);
+ t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
+ t3_write_reg(adapter, A_SG_CONTEXT_MASK2, V_FL_SIZE(M_FL_SIZE));
+ t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
+ t3_write_reg(adapter, A_SG_CONTEXT_DATA2, 0);
+ t3_write_reg(adapter, A_SG_CONTEXT_CMD,
+ V_CONTEXT_CMD_OPCODE(1) | F_FREELIST | V_CONTEXT(id));
+ return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
+ 0, 5, 1);
+}
+
+/**
+ * t3_sge_disable_rspcntxt - disable an SGE response queue
+ * @adapter: the adapter
+ * @id: the response queue context id
+ *
+ * Disable an SGE response queue. The caller is responsible for
+ * ensuring only one context operation occurs at a time.
+ */
+int t3_sge_disable_rspcntxt(adapter_t *adapter, unsigned int id)
+{
+ if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
+ return -EBUSY;
+
+ t3_write_reg(adapter, A_SG_CONTEXT_MASK0, V_CQ_SIZE(M_CQ_SIZE));
+ t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
+ t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
+ t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
+ t3_write_reg(adapter, A_SG_CONTEXT_DATA0, 0);
+ t3_write_reg(adapter, A_SG_CONTEXT_CMD,
+ V_CONTEXT_CMD_OPCODE(1) | F_RESPONSEQ | V_CONTEXT(id));
+ return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
+ 0, 5, 1);
+}
+
+/**
+ * t3_sge_disable_cqcntxt - disable an SGE completion queue
+ * @adapter: the adapter
+ * @id: the completion queue context id
+ *
+ * Disable an SGE completion queue. The caller is responsible for
+ * ensuring only one context operation occurs at a time.
+ */
+int t3_sge_disable_cqcntxt(adapter_t *adapter, unsigned int id)
+{
+ if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
+ return -EBUSY;
+
+ t3_write_reg(adapter, A_SG_CONTEXT_MASK0, V_CQ_SIZE(M_CQ_SIZE));
+ t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
+ t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
+ t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
+ t3_write_reg(adapter, A_SG_CONTEXT_DATA0, 0);
+ t3_write_reg(adapter, A_SG_CONTEXT_CMD,
+ V_CONTEXT_CMD_OPCODE(1) | F_CQ | V_CONTEXT(id));
+ return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
+ 0, 5, 1);
+}
+
+/**
+ * t3_sge_cqcntxt_op - perform an operation on a completion queue context
+ * @adapter: the adapter
+ * @id: the context id
+ * @op: the operation to perform
+ *
+ * Perform the selected operation on an SGE completion queue context.
+ * The caller is responsible for ensuring only one context operation
+ * occurs at a time.
+ */
+int t3_sge_cqcntxt_op(adapter_t *adapter, unsigned int id, unsigned int op,
+ unsigned int credits)
+{
+ u32 val;
+
+ if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
+ return -EBUSY;
+
+ t3_write_reg(adapter, A_SG_CONTEXT_DATA0, credits << 16);
+ t3_write_reg(adapter, A_SG_CONTEXT_CMD, V_CONTEXT_CMD_OPCODE(op) |
+ V_CONTEXT(id) | F_CQ);
+ if (t3_wait_op_done_val(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
+ 0, 5, 1, &val))
+ return -EIO;
+
+ if (op >= 2 && op < 7) {
+ if (adapter->params.rev > 0)
+ return G_CQ_INDEX(val);
+
+ t3_write_reg(adapter, A_SG_CONTEXT_CMD,
+ V_CONTEXT_CMD_OPCODE(0) | F_CQ | V_CONTEXT(id));
+ if (t3_wait_op_done(adapter, A_SG_CONTEXT_CMD,
+ F_CONTEXT_CMD_BUSY, 0, 5, 1))
+ return -EIO;
+ return G_CQ_INDEX(t3_read_reg(adapter, A_SG_CONTEXT_DATA0));
+ }
+ return 0;
+}
+
+/**
+ * t3_sge_read_context - read an SGE context
+ * @type: the context type
+ * @adapter: the adapter
+ * @id: the context id
+ * @data: holds the retrieved context
+ *
+ * Read an SGE egress context. The caller is responsible for ensuring
+ * only one context operation occurs at a time.
+ */
+static int t3_sge_read_context(unsigned int type, adapter_t *adapter,
+ unsigned int id, u32 data[4])
+{
+ if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
+ return -EBUSY;
+
+ t3_write_reg(adapter, A_SG_CONTEXT_CMD,
+ V_CONTEXT_CMD_OPCODE(0) | type | V_CONTEXT(id));
+ if (t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY, 0,
+ 5, 1))
+ return -EIO;
+ data[0] = t3_read_reg(adapter, A_SG_CONTEXT_DATA0);
+ data[1] = t3_read_reg(adapter, A_SG_CONTEXT_DATA1);
+ data[2] = t3_read_reg(adapter, A_SG_CONTEXT_DATA2);
+ data[3] = t3_read_reg(adapter, A_SG_CONTEXT_DATA3);
+ return 0;
+}
+
+/**
+ * t3_sge_read_ecntxt - read an SGE egress context
+ * @adapter: the adapter
+ * @id: the context id
+ * @data: holds the retrieved context
+ *
+ * Read an SGE egress context. The caller is responsible for ensuring
+ * only one context operation occurs at a time.
+ */
+int t3_sge_read_ecntxt(adapter_t *adapter, unsigned int id, u32 data[4])
+{
+ if (id >= 65536)
+ return -EINVAL;
+ return t3_sge_read_context(F_EGRESS, adapter, id, data);
+}
+
+/**
+ * t3_sge_read_cq - read an SGE CQ context
+ * @adapter: the adapter
+ * @id: the context id
+ * @data: holds the retrieved context
+ *
+ * Read an SGE CQ context. The caller is responsible for ensuring
+ * only one context operation occurs at a time.
+ */
+int t3_sge_read_cq(adapter_t *adapter, unsigned int id, u32 data[4])
+{
+ if (id >= 65536)
+ return -EINVAL;
+ return t3_sge_read_context(F_CQ, adapter, id, data);
+}
+
+/**
+ * t3_sge_read_fl - read an SGE free-list context
+ * @adapter: the adapter
+ * @id: the context id
+ * @data: holds the retrieved context
+ *
+ * Read an SGE free-list context. The caller is responsible for ensuring
+ * only one context operation occurs at a time.
+ */
+int t3_sge_read_fl(adapter_t *adapter, unsigned int id, u32 data[4])
+{
+ if (id >= SGE_QSETS * 2)
+ return -EINVAL;
+ return t3_sge_read_context(F_FREELIST, adapter, id, data);
+}
+
+/**
+ * t3_sge_read_rspq - read an SGE response queue context
+ * @adapter: the adapter
+ * @id: the context id
+ * @data: holds the retrieved context
+ *
+ * Read an SGE response queue context. The caller is responsible for
+ * ensuring only one context operation occurs at a time.
+ */
+int t3_sge_read_rspq(adapter_t *adapter, unsigned int id, u32 data[4])
+{
+ if (id >= SGE_QSETS)
+ return -EINVAL;
+ return t3_sge_read_context(F_RESPONSEQ, adapter, id, data);
+}
+
+/**
+ * t3_config_rss - configure Rx packet steering
+ * @adapter: the adapter
+ * @rss_config: RSS settings (written to TP_RSS_CONFIG)
+ * @cpus: values for the CPU lookup table (0xff terminated)
+ * @rspq: values for the response queue lookup table (0xffff terminated)
+ *
+ * Programs the receive packet steering logic. @cpus and @rspq provide
+ * the values for the CPU and response queue lookup tables. If they
+ * provide fewer values than the size of the tables the supplied values
+ * are used repeatedly until the tables are fully populated.
+ */
+void t3_config_rss(adapter_t *adapter, unsigned int rss_config, const u8 *cpus,
+ const u16 *rspq)
+{
+ int i, j, cpu_idx = 0, q_idx = 0;
+
+ if (cpus)
+ for (i = 0; i < RSS_TABLE_SIZE; ++i) {
+ u32 val = i << 16;
+
+ for (j = 0; j < 2; ++j) {
+ val |= (cpus[cpu_idx++] & 0x3f) << (8 * j);
+ if (cpus[cpu_idx] == 0xff)
+ cpu_idx = 0;
+ }
+ t3_write_reg(adapter, A_TP_RSS_LKP_TABLE, val);
+ }
+
+ if (rspq)
+ for (i = 0; i < RSS_TABLE_SIZE; ++i) {
+ t3_write_reg(adapter, A_TP_RSS_MAP_TABLE,
+ (i << 16) | rspq[q_idx++]);
+ if (rspq[q_idx] == 0xffff)
+ q_idx = 0;
+ }
+
+ t3_write_reg(adapter, A_TP_RSS_CONFIG, rss_config);
+}
+
+/**
+ * t3_read_rss - read the contents of the RSS tables
+ * @adapter: the adapter
+ * @lkup: holds the contents of the RSS lookup table
+ * @map: holds the contents of the RSS map table
+ *
+ * Reads the contents of the receive packet steering tables.
+ */
+int t3_read_rss(adapter_t *adapter, u8 *lkup, u16 *map)
+{
+ int i;
+ u32 val;
+
+ if (lkup)
+ for (i = 0; i < RSS_TABLE_SIZE; ++i) {
+ t3_write_reg(adapter, A_TP_RSS_LKP_TABLE,
+ 0xffff0000 | i);
+ val = t3_read_reg(adapter, A_TP_RSS_LKP_TABLE);
+ if (!(val & 0x80000000))
+ return -EAGAIN;
+ *lkup++ = (u8)val;
+ *lkup++ = (u8)(val >> 8);
+ }
+
+ if (map)
+ for (i = 0; i < RSS_TABLE_SIZE; ++i) {
+ t3_write_reg(adapter, A_TP_RSS_MAP_TABLE,
+ 0xffff0000 | i);
+ val = t3_read_reg(adapter, A_TP_RSS_MAP_TABLE);
+ if (!(val & 0x80000000))
+ return -EAGAIN;
+ *map++ = (u16)val;
+ }
+ return 0;
+}
+
+/**
+ * t3_tp_set_offload_mode - put TP in NIC/offload mode
+ * @adap: the adapter
+ * @enable: 1 to select offload mode, 0 for regular NIC
+ *
+ * Switches TP to NIC/offload mode.
+ */
+void t3_tp_set_offload_mode(adapter_t *adap, int enable)
+{
+ if (is_offload(adap) || !enable)
+ t3_set_reg_field(adap, A_TP_IN_CONFIG, F_NICMODE,
+ V_NICMODE(!enable));
+}
+
+/**
+ * pm_num_pages - calculate the number of pages of the payload memory
+ * @mem_size: the size of the payload memory
+ * @pg_size: the size of each payload memory page
+ *
+ * Calculate the number of pages, each of the given size, that fit in a
+ * memory of the specified size, respecting the HW requirement that the
+ * number of pages must be a multiple of 24.
+ */
+static inline unsigned int pm_num_pages(unsigned int mem_size,
+ unsigned int pg_size)
+{
+ unsigned int n = mem_size / pg_size;
+
+ return n - n % 24;
+}
+
+#define mem_region(adap, start, size, reg) \
+ t3_write_reg((adap), A_ ## reg, (start)); \
+ start += size
+
+/*
+ * partition_mem - partition memory and configure TP memory settings
+ * @adap: the adapter
+ * @p: the TP parameters
+ *
+ * Partitions context and payload memory and configures TP's memory
+ * registers.
+ */
+static void partition_mem(adapter_t *adap, const struct tp_params *p)
+{
+ unsigned int m, pstructs, tids = t3_mc5_size(&adap->mc5);
+ unsigned int timers = 0, timers_shift = 22;
+
+ if (adap->params.rev > 0) {
+ if (tids <= 16 * 1024) {
+ timers = 1;
+ timers_shift = 16;
+ } else if (tids <= 64 * 1024) {
+ timers = 2;
+ timers_shift = 18;
+ } else if (tids <= 256 * 1024) {
+ timers = 3;
+ timers_shift = 20;
+ }
+ }
+
+ t3_write_reg(adap, A_TP_PMM_SIZE,
+ p->chan_rx_size | (p->chan_tx_size >> 16));
+
+ t3_write_reg(adap, A_TP_PMM_TX_BASE, 0);
+ t3_write_reg(adap, A_TP_PMM_TX_PAGE_SIZE, p->tx_pg_size);
+ t3_write_reg(adap, A_TP_PMM_TX_MAX_PAGE, p->tx_num_pgs);
+ t3_set_reg_field(adap, A_TP_PARA_REG3, V_TXDATAACKIDX(M_TXDATAACKIDX),
+ V_TXDATAACKIDX(fls(p->tx_pg_size) - 12));
+
+ t3_write_reg(adap, A_TP_PMM_RX_BASE, 0);
+ t3_write_reg(adap, A_TP_PMM_RX_PAGE_SIZE, p->rx_pg_size);
+ t3_write_reg(adap, A_TP_PMM_RX_MAX_PAGE, p->rx_num_pgs);
+
+ pstructs = p->rx_num_pgs + p->tx_num_pgs;
+ /* Add a bit of headroom and make multiple of 24 */
+ pstructs += 48;
+ pstructs -= pstructs % 24;
+ t3_write_reg(adap, A_TP_CMM_MM_MAX_PSTRUCT, pstructs);
+
+ m = tids * TCB_SIZE;
+ mem_region(adap, m, (64 << 10) * 64, SG_EGR_CNTX_BADDR);
+ mem_region(adap, m, (64 << 10) * 64, SG_CQ_CONTEXT_BADDR);
+ t3_write_reg(adap, A_TP_CMM_TIMER_BASE, V_CMTIMERMAXNUM(timers) | m);
+ m += ((p->ntimer_qs - 1) << timers_shift) + (1 << 22);
+ mem_region(adap, m, pstructs * 64, TP_CMM_MM_BASE);
+ mem_region(adap, m, 64 * (pstructs / 24), TP_CMM_MM_PS_FLST_BASE);
+ mem_region(adap, m, 64 * (p->rx_num_pgs / 24), TP_CMM_MM_RX_FLST_BASE);
+ mem_region(adap, m, 64 * (p->tx_num_pgs / 24), TP_CMM_MM_TX_FLST_BASE);
+
+ m = (m + 4095) & ~0xfff;
+ t3_write_reg(adap, A_CIM_SDRAM_BASE_ADDR, m);
+ t3_write_reg(adap, A_CIM_SDRAM_ADDR_SIZE, p->cm_size - m);
+
+ tids = (p->cm_size - m - (3 << 20)) / 3072 - 32;
+ m = t3_mc5_size(&adap->mc5) - adap->params.mc5.nservers -
+ adap->params.mc5.nfilters - adap->params.mc5.nroutes;
+ if (tids < m)
+ adap->params.mc5.nservers += m - tids;
+}
+
+static inline void tp_wr_indirect(adapter_t *adap, unsigned int addr, u32 val)
+{
+ t3_write_reg(adap, A_TP_PIO_ADDR, addr);
+ t3_write_reg(adap, A_TP_PIO_DATA, val);
+}
+
+static void tp_config(adapter_t *adap, const struct tp_params *p)
+{
+ t3_write_reg(adap, A_TP_GLOBAL_CONFIG, F_TXPACINGENABLE | F_PATHMTU |
+ F_IPCHECKSUMOFFLOAD | F_UDPCHECKSUMOFFLOAD |
+ F_TCPCHECKSUMOFFLOAD | V_IPTTL(64));
+ t3_write_reg(adap, A_TP_TCP_OPTIONS, V_MTUDEFAULT(576) |
+ F_MTUENABLE | V_WINDOWSCALEMODE(1) |
+ V_TIMESTAMPSMODE(1) | V_SACKMODE(1) | V_SACKRX(1));
+ t3_write_reg(adap, A_TP_DACK_CONFIG, V_AUTOSTATE3(1) |
+ V_AUTOSTATE2(1) | V_AUTOSTATE1(0) |
+ V_BYTETHRESHOLD(16384) | V_MSSTHRESHOLD(2) |
+ F_AUTOCAREFUL | F_AUTOENABLE | V_DACK_MODE(1));
+ t3_set_reg_field(adap, A_TP_IN_CONFIG, F_IPV6ENABLE | F_NICMODE,
+ F_IPV6ENABLE | F_NICMODE);
+ t3_write_reg(adap, A_TP_TX_RESOURCE_LIMIT, 0x18141814);
+ t3_write_reg(adap, A_TP_PARA_REG4, 0x5050105);
+ t3_set_reg_field(adap, A_TP_PARA_REG6,
+ adap->params.rev > 0 ? F_ENABLEESND : F_T3A_ENABLEESND,
+ 0);
+ t3_set_reg_field(adap, A_TP_PC_CONFIG,
+ F_ENABLEEPCMDAFULL | F_ENABLEOCSPIFULL,
+ F_TXDEFERENABLE | F_HEARBEATDACK | F_TXCONGESTIONMODE |
+ F_RXCONGESTIONMODE);
+ t3_set_reg_field(adap, A_TP_PC_CONFIG2, F_CHDRAFULL, 0);
+
+ if (adap->params.rev > 0) {
+ tp_wr_indirect(adap, A_TP_EGRESS_CONFIG, F_REWRITEFORCETOSIZE);
+ t3_set_reg_field(adap, A_TP_PARA_REG3, F_TXPACEAUTO,
+ F_TXPACEAUTO);
+ t3_set_reg_field(adap, A_TP_PC_CONFIG, F_LOCKTID, F_LOCKTID);
+ t3_set_reg_field(adap, A_TP_PARA_REG3, 0, F_TXPACEAUTOSTRICT);
+ } else
+ t3_set_reg_field(adap, A_TP_PARA_REG3, 0, F_TXPACEFIXED);
+
+ t3_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT1, 0x12121212);
+ t3_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT0, 0x12121212);
+ t3_write_reg(adap, A_TP_MOD_CHANNEL_WEIGHT, 0x1212);
+}
+
+/* Desired TP timer resolution in usec */
+#define TP_TMR_RES 50
+
+/* TCP timer values in ms */
+#define TP_DACK_TIMER 50
+#define TP_RTO_MIN 250
+
+/**
+ * tp_set_timers - set TP timing parameters
+ * @adap: the adapter to set
+ * @core_clk: the core clock frequency in Hz
+ *
+ * Set TP's timing parameters, such as the various timer resolutions and
+ * the TCP timer values.
+ */
+static void tp_set_timers(adapter_t *adap, unsigned int core_clk)
+{
+ unsigned int tre = fls(core_clk / (1000000 / TP_TMR_RES)) - 1;
+ unsigned int dack_re = fls(core_clk / 5000) - 1; /* 200us */
+ unsigned int tstamp_re = fls(core_clk / 1000); /* 1ms, at least */
+ unsigned int tps = core_clk >> tre;
+
+ t3_write_reg(adap, A_TP_TIMER_RESOLUTION, V_TIMERRESOLUTION(tre) |
+ V_DELAYEDACKRESOLUTION(dack_re) |
+ V_TIMESTAMPRESOLUTION(tstamp_re));
+ t3_write_reg(adap, A_TP_DACK_TIMER,
+ (core_clk >> dack_re) / (1000 / TP_DACK_TIMER));
+ t3_write_reg(adap, A_TP_TCP_BACKOFF_REG0, 0x3020100);
+ t3_write_reg(adap, A_TP_TCP_BACKOFF_REG1, 0x7060504);
+ t3_write_reg(adap, A_TP_TCP_BACKOFF_REG2, 0xb0a0908);
+ t3_write_reg(adap, A_TP_TCP_BACKOFF_REG3, 0xf0e0d0c);
+ t3_write_reg(adap, A_TP_SHIFT_CNT, V_SYNSHIFTMAX(6) |
+ V_RXTSHIFTMAXR1(4) | V_RXTSHIFTMAXR2(15) |
+ V_PERSHIFTBACKOFFMAX(8) | V_PERSHIFTMAX(8) |
+ V_KEEPALIVEMAX(9));
+
+#define SECONDS * tps
+
+ t3_write_reg(adap, A_TP_MSL,
+ adap->params.rev > 0 ? 0 : 2 SECONDS);
+ t3_write_reg(adap, A_TP_RXT_MIN, tps / (1000 / TP_RTO_MIN));
+ t3_write_reg(adap, A_TP_RXT_MAX, 64 SECONDS);
+ t3_write_reg(adap, A_TP_PERS_MIN, 5 SECONDS);
+ t3_write_reg(adap, A_TP_PERS_MAX, 64 SECONDS);
+ t3_write_reg(adap, A_TP_KEEP_IDLE, 7200 SECONDS);
+ t3_write_reg(adap, A_TP_KEEP_INTVL, 75 SECONDS);
+ t3_write_reg(adap, A_TP_INIT_SRTT, 3 SECONDS);
+ t3_write_reg(adap, A_TP_FINWAIT2_TIMER, 600 SECONDS);
+
+#undef SECONDS
+}
+
+#ifdef CONFIG_CHELSIO_T3_CORE
+/**
+ * t3_tp_set_coalescing_size - set receive coalescing size
+ * @adap: the adapter
+ * @size: the receive coalescing size
+ * @psh: whether a set PSH bit should deliver coalesced data
+ *
+ * Set the receive coalescing size and PSH bit handling.
+ */
+int t3_tp_set_coalescing_size(adapter_t *adap, unsigned int size, int psh)
+{
+ u32 val;
+
+ if (size > MAX_RX_COALESCING_LEN)
+ return -EINVAL;
+
+ val = t3_read_reg(adap, A_TP_PARA_REG3);
+ val &= ~(F_RXCOALESCEENABLE | F_RXCOALESCEPSHEN);
+
+ if (size) {
+ val |= F_RXCOALESCEENABLE;
+ if (psh)
+ val |= F_RXCOALESCEPSHEN;
+ t3_write_reg(adap, A_TP_PARA_REG2, V_RXCOALESCESIZE(size) |
+ V_MAXRXDATA(MAX_RX_COALESCING_LEN));
+ }
+ t3_write_reg(adap, A_TP_PARA_REG3, val);
+ return 0;
+}
+
+/**
+ * t3_tp_set_max_rxsize - set the max receive size
+ * @adap: the adapter
+ * @size: the max receive size
+ *
+ * Set TP's max receive size. This is the limit that applies when
+ * receive coalescing is disabled.
+ */
+void t3_tp_set_max_rxsize(adapter_t *adap, unsigned int size)
+{
+ t3_write_reg(adap, A_TP_PARA_REG7,
+ V_PMMAXXFERLEN0(size) | V_PMMAXXFERLEN1(size));
+}
+
+static void __devinit init_mtus(unsigned short mtus[])
+{
+ /*
+ * See draft-mathis-plpmtud-00.txt for the values. The min is 88 so
+ * it can accomodate max size TCP/IP headers when SACK and timestamps
+ * are enabled and still have at least 8 bytes of payload.
+ */
+ mtus[0] = 88;
+ mtus[1] = 256;
+ mtus[2] = 512;
+ mtus[3] = 576;
+ mtus[4] = 808;
+ mtus[5] = 1024;
+ mtus[6] = 1280;
+ mtus[7] = 1492;
+ mtus[8] = 1500;
+ mtus[9] = 2002;
+ mtus[10] = 2048;
+ mtus[11] = 4096;
+ mtus[12] = 4352;
+ mtus[13] = 8192;
+ mtus[14] = 9000;
+ mtus[15] = 9600;
+}
+
+/*
+ * Initial congestion control parameters.
+ */
+static void __devinit init_cong_ctrl(unsigned short *a, unsigned short *b)
+{
+ a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1;
+ a[9] = 2;
+ a[10] = 3;
+ a[11] = 4;
+ a[12] = 5;
+ a[13] = 6;
+ a[14] = 7;
+ a[15] = 8;
+ a[16] = 9;
+ a[17] = 10;
+ a[18] = 14;
+ a[19] = 17;
+ a[20] = 21;
+ a[21] = 25;
+ a[22] = 30;
+ a[23] = 35;
+ a[24] = 45;
+ a[25] = 60;
+ a[26] = 80;
+ a[27] = 100;
+ a[28] = 200;
+ a[29] = 300;
+ a[30] = 400;
+ a[31] = 500;
+
+ b[0] = b[1] = b[2] = b[3] = b[4] = b[5] = b[6] = b[7] = b[8] = 0;
+ b[9] = b[10] = 1;
+ b[11] = b[12] = 2;
+ b[13] = b[14] = b[15] = b[16] = 3;
+ b[17] = b[18] = b[19] = b[20] = b[21] = 4;
+ b[22] = b[23] = b[24] = b[25] = b[26] = b[27] = 5;
+ b[28] = b[29] = 6;
+ b[30] = b[31] = 7;
+}
+
+/* The minimum additive increment value for the congestion control table */
+#define CC_MIN_INCR 2U
+
+/**
+ * t3_load_mtus - write the MTU and congestion control HW tables
+ * @adap: the adapter
+ * @mtus: the unrestricted values for the MTU table
+ * @alphs: the values for the congestion control alpha parameter
+ * @beta: the values for the congestion control beta parameter
+ * @mtu_cap: the maximum permitted effective MTU
+ *
+ * Write the MTU table with the supplied MTUs capping each at &mtu_cap.
+ * Update the high-speed congestion control table with the supplied alpha,
+ * beta, and MTUs.
+ */
+void t3_load_mtus(adapter_t *adap, unsigned short mtus[NMTUS],
+ unsigned short alpha[NCCTRL_WIN],
+ unsigned short beta[NCCTRL_WIN], unsigned short mtu_cap)
+{
+ static const unsigned int avg_pkts[NCCTRL_WIN] = {
+ 2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640,
+ 896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480,
+ 28672, 40960, 57344, 81920, 114688, 163840, 229376 };
+
+ unsigned int i, w;
+
+ for (i = 0; i < NMTUS; ++i) {
+ unsigned int mtu = min(mtus[i], mtu_cap);
+ unsigned int log2 = fls(mtu);
+
+ if (!(mtu & ((1 << log2) >> 2))) /* round */
+ log2--;
+ t3_write_reg(adap, A_TP_MTU_TABLE,
+ (i << 24) | (log2 << 16) | mtu);
+
+ for (w = 0; w < NCCTRL_WIN; ++w) {
+ unsigned int inc;
+
+ inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w],
+ CC_MIN_INCR);
+
+ t3_write_reg(adap, A_TP_CCTRL_TABLE, (i << 21) |
+ (w << 16) | (beta[w] << 13) | inc);
+ }
+ }
+}
+
+/**
+ * t3_read_hw_mtus - returns the values in the HW MTU table
+ * @adap: the adapter
+ * @mtus: where to store the HW MTU values
+ *
+ * Reads the HW MTU table.
+ */
+void t3_read_hw_mtus(adapter_t *adap, unsigned short mtus[NMTUS])
+{
+ int i;
+
+ for (i = 0; i < NMTUS; ++i) {
+ unsigned int val;
+
+ t3_write_reg(adap, A_TP_MTU_TABLE, 0xff000000 | i);
+ val = t3_read_reg(adap, A_TP_MTU_TABLE);
+ mtus[i] = val & 0x3fff;
+ }
+}
+
+/**
+ * t3_get_cong_cntl_tab - reads the congestion control table
+ * @adap: the adapter
+ * @incr: where to store the alpha values
+ *
+ * Reads the additive increments programmed into the HW congestion
+ * control table.
+ */
+void t3_get_cong_cntl_tab(adapter_t *adap,
+ unsigned short incr[NMTUS][NCCTRL_WIN])
+{
+ unsigned int mtu, w;
+
+ for (mtu = 0; mtu < NMTUS; ++mtu)
+ for (w = 0; w < NCCTRL_WIN; ++w) {
+ t3_write_reg(adap, A_TP_CCTRL_TABLE,
+ 0xffff0000 | (mtu << 5) | w);
+ incr[mtu][w] = (unsigned short)t3_read_reg(adap,
+ A_TP_CCTRL_TABLE) & 0x1fff;
+ }
+}
+
+/**
+ * t3_tp_get_mib_stats - read TP's MIB counters
+ * @adap: the adapter
+ * @tps: holds the returned counter values
+ *
+ * Returns the values of TP's MIB counters.
+ */
+void t3_tp_get_mib_stats(adapter_t *adap, struct tp_mib_stats *tps)
+{
+ t3_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_RDATA, (u32 *)tps,
+ sizeof(*tps) / sizeof(u32), 0);
+}
+
+#define ulp_region(adap, name, start, len) \
+ t3_write_reg((adap), A_ULPRX_ ## name ## _LLIMIT, (start)); \
+ t3_write_reg((adap), A_ULPRX_ ## name ## _ULIMIT, \
+ (start) + (len) - 1); \
+ start += len
+
+#define ulptx_region(adap, name, start, len) \
+ t3_write_reg((adap), A_ULPTX_ ## name ## _LLIMIT, (start)); \
+ t3_write_reg((adap), A_ULPTX_ ## name ## _ULIMIT, \
+ (start) + (len) - 1)
+
+static void ulp_config(adapter_t *adap, const struct tp_params *p)
+{
+ unsigned int m = p->chan_rx_size;
+
+ ulp_region(adap, ISCSI, m, p->chan_rx_size / 8);
+ ulp_region(adap, TDDP, m, p->chan_rx_size / 8);
+ ulptx_region(adap, TPT, m, p->chan_rx_size / 4);
+ ulp_region(adap, STAG, m, p->chan_rx_size / 4);
+ ulp_region(adap, RQ, m, p->chan_rx_size / 4);
+ ulptx_region(adap, PBL, m, p->chan_rx_size / 4);
+ ulp_region(adap, PBL, m, p->chan_rx_size / 4);
+ t3_write_reg(adap, A_ULPRX_TDDP_TAGMASK, 0xffffffff);
+}
+#endif
+
+void t3_config_trace_filter(adapter_t *adapter, const struct trace_params *tp,
+ int filter_index, int invert, int enable)
+{
+ u32 addr, key[4], mask[4];
+
+ key[0] = tp->sport | (tp->sip << 16);
+ key[1] = (tp->sip >> 16) | (tp->dport << 16);
+ key[2] = tp->dip;
+ key[3] = tp->proto | (tp->vlan << 8) | (tp->intf << 20);
+
+ mask[0] = tp->sport_mask | (tp->sip_mask << 16);
+ mask[1] = (tp->sip_mask >> 16) | (tp->dport_mask << 16);
+ mask[2] = tp->dip_mask;
+ mask[3] = tp->proto_mask | (tp->vlan_mask << 8) | (tp->intf_mask << 20);
+
+ if (invert)
+ key[3] |= (1 << 29);
+ if (enable)
+ key[3] |= (1 << 28);
+
+ addr = filter_index ? A_TP_RX_TRC_KEY0 : A_TP_TX_TRC_KEY0;
+ tp_wr_indirect(adapter, addr++, key[0]);
+ tp_wr_indirect(adapter, addr++, mask[0]);
+ tp_wr_indirect(adapter, addr++, key[1]);
+ tp_wr_indirect(adapter, addr++, mask[1]);
+ tp_wr_indirect(adapter, addr++, key[2]);
+ tp_wr_indirect(adapter, addr++, mask[2]);
+ tp_wr_indirect(adapter, addr++, key[3]);
+ tp_wr_indirect(adapter, addr, mask[3]);
+ (void) t3_read_reg(adapter, A_TP_PIO_DATA);
+}
+
+/**
+ * t3_config_sched - configure a HW traffic scheduler
+ * @adap: the adapter
+ * @kbps: target rate in Kbps
+ * @sched: the scheduler index
+ *
+ * Configure a HW scheduler for the target rate
+ */
+int t3_config_sched(adapter_t *adap, unsigned int kbps, int sched)
+{
+ unsigned int v, tps, cpt, bpt, delta, mindelta = ~0;
+ unsigned int clk = adap->params.vpd.cclk * 1000;
+ unsigned int selected_cpt = 0, selected_bpt = 0;
+
+ if (kbps > 0) {
+ kbps *= 125; /* -> bytes */
+ for (cpt = 1; cpt <= 255; cpt++) {
+ tps = clk / cpt;
+ bpt = (kbps + tps / 2) / tps;
+ if (bpt > 0 && bpt <= 255) {
+ v = bpt * tps;
+ delta = v >= kbps ? v - kbps : kbps - v;
+ if (delta <= mindelta) {
+ mindelta = delta;
+ selected_cpt = cpt;
+ selected_bpt = bpt;
+ }
+ } else if (selected_cpt)
+ break;
+ }
+ if (!selected_cpt)
+ return -EINVAL;
+ }
+ t3_write_reg(adap, A_TP_TM_PIO_ADDR,
+ A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2);
+ v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
+ if (sched & 1)
+ v = (v & 0xffff) | (selected_cpt << 16) | (selected_bpt << 24);
+ else
+ v = (v & 0xffff0000) | selected_cpt | (selected_bpt << 8);
+ t3_write_reg(adap, A_TP_TM_PIO_DATA, v);
+ return 0;
+}
+
+static int tp_init(adapter_t *adap, const struct tp_params *p)
+{
+ int busy = 0;
+
+ tp_config(adap, p);
+ t3_set_vlan_accel(adap, 3, 0);
+
+ if (is_offload(adap)) {
+ tp_set_timers(adap, adap->params.vpd.cclk * 1000);
+ t3_write_reg(adap, A_TP_RESET, F_FLSTINITENABLE);
+ busy = t3_wait_op_done(adap, A_TP_RESET, F_FLSTINITENABLE,
+ 0, 1000, 5);
+ if (busy)
+ CH_ERR(adap, "TP initialization timed out\n");
+ }
+
+ if (!busy)
+ t3_write_reg(adap, A_TP_RESET, F_TPRESET);
+ return busy;
+}
+
+int t3_mps_set_active_ports(adapter_t *adap, unsigned int port_mask)
+{
+ if (port_mask & ~((1 << adap->params.nports) - 1))
+ return -EINVAL;
+ t3_set_reg_field(adap, A_MPS_CFG, F_PORT1ACTIVE | F_PORT0ACTIVE,
+ port_mask << S_PORT0ACTIVE);
+ return 0;
+}
+
+/*
+ * Perform the bits of HW initialization that are dependent on the number
+ * of available ports.
+ */
+static void init_hw_for_avail_ports(adapter_t *adap, int nports)
+{
+ int i;
+
+ if (nports == 1) {
+ t3_set_reg_field(adap, A_ULPRX_CTL, F_ROUND_ROBIN, 0);
+ t3_set_reg_field(adap, A_ULPTX_CONFIG, F_CFG_RR_ARB, 0);
+ t3_write_reg(adap, A_MPS_CFG, F_TPRXPORTEN | F_TPTXPORT0EN |
+ F_PORT0ACTIVE | F_ENFORCEPKT);
+ t3_write_reg(adap, A_PM1_TX_CFG, 0xc000c000);
+ } else {
+ t3_set_reg_field(adap, A_ULPRX_CTL, 0, F_ROUND_ROBIN);
+ t3_set_reg_field(adap, A_ULPTX_CONFIG, 0, F_CFG_RR_ARB);
+ t3_write_reg(adap, A_ULPTX_DMA_WEIGHT,
+ V_D1_WEIGHT(16) | V_D0_WEIGHT(16));
+ t3_write_reg(adap, A_MPS_CFG, F_TPTXPORT0EN | F_TPTXPORT1EN |
+ F_TPRXPORTEN | F_PORT0ACTIVE | F_PORT1ACTIVE |
+ F_ENFORCEPKT);
+ t3_write_reg(adap, A_PM1_TX_CFG, 0x80008000);
+ t3_set_reg_field(adap, A_TP_PC_CONFIG, 0, F_TXTOSQUEUEMAPMODE);
+ t3_write_reg(adap, A_TP_TX_MOD_QUEUE_REQ_MAP,
+ V_TX_MOD_QUEUE_REQ_MAP(0xaa));
+ for (i = 0; i < 16; i++)
+ t3_write_reg(adap, A_TP_TX_MOD_QUE_TABLE,
+ (i << 16) | 0x1010);
+ }
+}
+
+static int calibrate_xgm(adapter_t *adapter)
+{
+ if (uses_xaui(adapter)) {
+ unsigned int v, i;
+
+ for (i = 0; i < 5; ++i) {
+ t3_write_reg(adapter, A_XGM_XAUI_IMP, 0);
+ (void) t3_read_reg(adapter, A_XGM_XAUI_IMP);
+ t3_os_sleep(1);
+ v = t3_read_reg(adapter, A_XGM_XAUI_IMP);
+ if (!(v & (F_XGM_CALFAULT | F_CALBUSY))) {
+ t3_write_reg(adapter, A_XGM_XAUI_IMP,
+ V_XAUIIMP(G_CALIMP(v) >> 2));
+ return 0;
+ }
+ }
+ CH_ERR(adapter, "MAC calibration failed\n");
+ return -1;
+ } else {
+ t3_write_reg(adapter, A_XGM_RGMII_IMP,
+ V_RGMIIIMPPD(2) | V_RGMIIIMPPU(3));
+ t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_XGM_IMPSETUPDATE,
+ F_XGM_IMPSETUPDATE);
+ }
+ return 0;
+}
+
+static void calibrate_xgm_t3b(adapter_t *adapter)
+{
+ if (!uses_xaui(adapter)) {
+ t3_write_reg(adapter, A_XGM_RGMII_IMP, F_CALRESET |
+ F_CALUPDATE | V_RGMIIIMPPD(2) | V_RGMIIIMPPU(3));
+ t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_CALRESET, 0);
+ t3_set_reg_field(adapter, A_XGM_RGMII_IMP, 0,
+ F_XGM_IMPSETUPDATE);
+ t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_XGM_IMPSETUPDATE,
+ 0);
+ t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_CALUPDATE, 0);
+ t3_set_reg_field(adapter, A_XGM_RGMII_IMP, 0, F_CALUPDATE);
+ }
+}
+
+struct mc7_timing_params {
+ unsigned char ActToPreDly;
+ unsigned char ActToRdWrDly;
+ unsigned char PreCyc;
+ unsigned char RefCyc[5];
+ unsigned char BkCyc;
+ unsigned char WrToRdDly;
+ unsigned char RdToWrDly;
+};
+
+/*
+ * Write a value to a register and check that the write completed. These
+ * writes normally complete in a cycle or two, so one read should suffice.
+ * The very first read exists to flush the posted write to the device.
+ */
+static int wrreg_wait(adapter_t *adapter, unsigned int addr, u32 val)
+{
+ t3_write_reg(adapter, addr, val);
+ (void) t3_read_reg(adapter, addr); /* flush */
+ if (!(t3_read_reg(adapter, addr) & F_BUSY))
+ return 0;
+ CH_ERR(adapter, "write to MC7 register 0x%x timed out\n", addr);
+ return -EIO;
+}
+
+static int mc7_init(struct mc7 *mc7, unsigned int mc7_clock, int mem_type)
+{
+ static const unsigned int mc7_mode[] = {
+ 0x632, 0x642, 0x652, 0x432, 0x442
+ };
+ static const struct mc7_timing_params mc7_timings[] = {
+ { 12, 3, 4, { 20, 28, 34, 52, 0 }, 15, 6, 4 },
+ { 12, 4, 5, { 20, 28, 34, 52, 0 }, 16, 7, 4 },
+ { 12, 5, 6, { 20, 28, 34, 52, 0 }, 17, 8, 4 },
+ { 9, 3, 4, { 15, 21, 26, 39, 0 }, 12, 6, 4 },
+ { 9, 4, 5, { 15, 21, 26, 39, 0 }, 13, 7, 4 }
+ };
+
+ u32 val;
+ unsigned int width, density, slow, attempts;
+ adapter_t *adapter = mc7->adapter;
+ const struct mc7_timing_params *p = &mc7_timings[mem_type];
+
+ val = t3_read_reg(adapter, mc7->offset + A_MC7_CFG);
+ slow = val & F_SLOW;
+ width = G_WIDTH(val);
+ density = G_DEN(val);
+
+ t3_write_reg(adapter, mc7->offset + A_MC7_CFG, val | F_IFEN);
+ val = t3_read_reg(adapter, mc7->offset + A_MC7_CFG); /* flush */
+ t3_os_sleep(1);
+
+ if (!slow) {
+ t3_write_reg(adapter, mc7->offset + A_MC7_CAL, F_SGL_CAL_EN);
+ (void) t3_read_reg(adapter, mc7->offset + A_MC7_CAL);
+ t3_os_sleep(1);
+ if (t3_read_reg(adapter, mc7->offset + A_MC7_CAL) &
+ (F_BUSY | F_SGL_CAL_EN | F_CAL_FAULT)) {
+ CH_ERR(adapter, "%s MC7 calibration timed out\n",
+ mc7->name);
+ goto out_fail;
+ }
+ }
+
+ t3_write_reg(adapter, mc7->offset + A_MC7_PARM,
+ V_ACTTOPREDLY(p->ActToPreDly) |
+ V_ACTTORDWRDLY(p->ActToRdWrDly) | V_PRECYC(p->PreCyc) |
+ V_REFCYC(p->RefCyc[density]) | V_BKCYC(p->BkCyc) |
+ V_WRTORDDLY(p->WrToRdDly) | V_RDTOWRDLY(p->RdToWrDly));
+
+ t3_write_reg(adapter, mc7->offset + A_MC7_CFG,
+ val | F_CLKEN | F_TERM150);
+ (void) t3_read_reg(adapter, mc7->offset + A_MC7_CFG); /* flush */
+
+ if (!slow)
+ t3_set_reg_field(adapter, mc7->offset + A_MC7_DLL, F_DLLENB,
+ F_DLLENB);
+ udelay(1);
+
+ val = slow ? 3 : 6;
+ if (wrreg_wait(adapter, mc7->offset + A_MC7_PRE, 0) ||
+ wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE2, 0) ||
+ wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE3, 0) ||
+ wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val))
+ goto out_fail;
+
+ if (!slow) {
+ t3_write_reg(adapter, mc7->offset + A_MC7_MODE, 0x100);
+ t3_set_reg_field(adapter, mc7->offset + A_MC7_DLL,
+ F_DLLRST, 0);
+ udelay(5);
+ }
+
+ if (wrreg_wait(adapter, mc7->offset + A_MC7_PRE, 0) ||
+ wrreg_wait(adapter, mc7->offset + A_MC7_REF, 0) ||
+ wrreg_wait(adapter, mc7->offset + A_MC7_REF, 0) ||
+ wrreg_wait(adapter, mc7->offset + A_MC7_MODE,
+ mc7_mode[mem_type]) ||
+ wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val | 0x380) ||
+ wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val))
+ goto out_fail;
+
+ /* clock value is in KHz */
+ mc7_clock = mc7_clock * 7812 + mc7_clock / 2; /* ns */
+ mc7_clock /= 1000000; /* KHz->MHz, ns->us */
+
+ t3_write_reg(adapter, mc7->offset + A_MC7_REF,
+ F_PERREFEN | V_PREREFDIV(mc7_clock));
+ (void) t3_read_reg(adapter, mc7->offset + A_MC7_REF); /* flush */
+
+ t3_write_reg(adapter, mc7->offset + A_MC7_ECC,
+ F_ECCGENEN | F_ECCCHKEN);
+ t3_write_reg(adapter, mc7->offset + A_MC7_BIST_DATA, 0);
+ t3_write_reg(adapter, mc7->offset + A_MC7_BIST_ADDR_BEG, 0);
+ t3_write_reg(adapter, mc7->offset + A_MC7_BIST_ADDR_END,
+ (mc7->size << width) - 1);
+ t3_write_reg(adapter, mc7->offset + A_MC7_BIST_OP, V_OP(1));
+ (void) t3_read_reg(adapter, mc7->offset + A_MC7_BIST_OP); /* flush */
+
+ attempts = 50;
+ do {
+ t3_os_sleep(250);
+ val = t3_read_reg(adapter, mc7->offset + A_MC7_BIST_OP);
+ } while ((val & F_BUSY) && --attempts);
+ if (val & F_BUSY) {
+ CH_ERR(adapter, "%s MC7 BIST timed out\n", mc7->name);
+ goto out_fail;
+ }
+
+ /* Enable normal memory accesses. */
+ t3_set_reg_field(adapter, mc7->offset + A_MC7_CFG, 0, F_RDY);
+ return 0;
+
+ out_fail:
+ return -1;
+}
+
+static void config_pcie(adapter_t *adap)
+{
+ static const u16 ack_lat[4][6] = {
+ { 237, 416, 559, 1071, 2095, 4143 },
+ { 128, 217, 289, 545, 1057, 2081 },
+ { 73, 118, 154, 282, 538, 1050 },
+ { 67, 107, 86, 150, 278, 534 }
+ };
+ static const u16 rpl_tmr[4][6] = {
+ { 711, 1248, 1677, 3213, 6285, 12429 },
+ { 384, 651, 867, 1635, 3171, 6243 },
+ { 219, 354, 462, 846, 1614, 3150 },
+ { 201, 321, 258, 450, 834, 1602 }
+ };
+
+ u16 val;
+ unsigned int log2_width, pldsize;
+ unsigned int fst_trn_rx, fst_trn_tx, acklat, rpllmt;
+
+ t3_os_pci_read_config_2(adap,
+ adap->params.pci.pcie_cap_addr + PCI_EXP_DEVCTL,
+ &val);
+ pldsize = (val & PCI_EXP_DEVCTL_PAYLOAD) >> 5;
+
+ t3_os_pci_read_config_2(adap,
+ adap->params.pci.pcie_cap_addr + PCI_EXP_LNKCTL,
+ &val);
+
+ fst_trn_tx = G_NUMFSTTRNSEQ(t3_read_reg(adap, A_PCIE_PEX_CTRL0));
+ fst_trn_rx = adap->params.rev == 0 ? fst_trn_tx :
+ G_NUMFSTTRNSEQRX(t3_read_reg(adap, A_PCIE_MODE));
+ log2_width = fls(adap->params.pci.width) - 1;
+ acklat = ack_lat[log2_width][pldsize];
+ if (val & 1) /* check LOsEnable */
+ acklat += fst_trn_tx * 4;
+ rpllmt = rpl_tmr[log2_width][pldsize] + fst_trn_rx * 4;
+
+ if (adap->params.rev == 0)
+ t3_set_reg_field(adap, A_PCIE_PEX_CTRL1,
+ V_T3A_ACKLAT(M_T3A_ACKLAT),
+ V_T3A_ACKLAT(acklat));
+ else
+ t3_set_reg_field(adap, A_PCIE_PEX_CTRL1, V_ACKLAT(M_ACKLAT),
+ V_ACKLAT(acklat));
+
+ t3_set_reg_field(adap, A_PCIE_PEX_CTRL0, V_REPLAYLMT(M_REPLAYLMT),
+ V_REPLAYLMT(rpllmt));
+
+ t3_write_reg(adap, A_PCIE_PEX_ERR, 0xffffffff);
+ t3_set_reg_field(adap, A_PCIE_CFG, F_PCIE_CLIDECEN, F_PCIE_CLIDECEN);
+}
+
+/*
+ * Initialize and configure T3 HW modules. This performs the
+ * initialization steps that need to be done once after a card is reset.
+ * MAC and PHY initialization is handled separarely whenever a port is enabled.
+ *
+ * fw_params are passed to FW and their value is platform dependent. Only the
+ * top 8 bits are available for use, the rest must be 0.
+ */
+int t3_init_hw(adapter_t *adapter, u32 fw_params)
+{
+ int err = -EIO, attempts = 100;
+ const struct vpd_params *vpd = &adapter->params.vpd;
+
+ if (adapter->params.rev > 0)
+ calibrate_xgm_t3b(adapter);
+ else if (calibrate_xgm(adapter))
+ goto out_err;
+
+ if (vpd->mclk) {
+ partition_mem(adapter, &adapter->params.tp);
+
+ if (mc7_init(&adapter->pmrx, vpd->mclk, vpd->mem_timing) ||
+ mc7_init(&adapter->pmtx, vpd->mclk, vpd->mem_timing) ||
+ mc7_init(&adapter->cm, vpd->mclk, vpd->mem_timing) ||
+ t3_mc5_init(&adapter->mc5, adapter->params.mc5.nservers,
+ adapter->params.mc5.nfilters,
+ adapter->params.mc5.nroutes))
+ goto out_err;
+ }
+
+ if (tp_init(adapter, &adapter->params.tp))
+ goto out_err;
+
+#ifdef CONFIG_CHELSIO_T3_CORE
+ t3_tp_set_coalescing_size(adapter,
+ min(adapter->params.sge.max_pkt_size,
+ MAX_RX_COALESCING_LEN), 1);
+ t3_tp_set_max_rxsize(adapter,
+ min(adapter->params.sge.max_pkt_size, 16384U));
+ ulp_config(adapter, &adapter->params.tp);
+#endif
+ if (is_pcie(adapter))
+ config_pcie(adapter);
+ else
+ t3_set_reg_field(adapter, A_PCIX_CFG, 0, F_CLIDECEN);
+
+ t3_write_reg(adapter, A_PM1_RX_CFG, 0xf000f000);
+ init_hw_for_avail_ports(adapter, adapter->params.nports);
+ t3_sge_init(adapter, &adapter->params.sge);
+
+ t3_write_reg(adapter, A_CIM_HOST_ACC_DATA, vpd->uclk | fw_params);
+ t3_write_reg(adapter, A_CIM_BOOT_CFG,
+ V_BOOTADDR(FW_FLASH_BOOT_ADDR >> 2));
+ (void) t3_read_reg(adapter, A_CIM_BOOT_CFG); /* flush */
+
+ do { /* wait for uP to initialize */
+ t3_os_sleep(20);
+ } while (t3_read_reg(adapter, A_CIM_HOST_ACC_DATA) && --attempts);
+ if (!attempts)
+ goto out_err;
+
+ err = 0;
+ out_err:
+ return err;
+}
+
+/**
+ * get_pci_mode - determine a card's PCI mode
+ * @adapter: the adapter
+ * @p: where to store the PCI settings
+ *
+ * Determines a card's PCI mode and associated parameters, such as speed
+ * and width.
+ */
+static void __devinit get_pci_mode(adapter_t *adapter, struct pci_params *p)
+{
+ static unsigned short speed_map[] = { 33, 66, 100, 133 };
+ u32 pci_mode, pcie_cap;
+
+ pcie_cap = t3_os_find_pci_capability(adapter, PCI_CAP_ID_EXP);
+ if (pcie_cap) {
+ u16 val;
+
+ p->variant = PCI_VARIANT_PCIE;
+ p->pcie_cap_addr = pcie_cap;
+ t3_os_pci_read_config_2(adapter, pcie_cap + PCI_EXP_LNKSTA,
+ &val);
+ p->width = (val >> 4) & 0x3f;
+ return;
+ }
+
+ pci_mode = t3_read_reg(adapter, A_PCIX_MODE);
+ p->speed = speed_map[G_PCLKRANGE(pci_mode)];
+ p->width = (pci_mode & F_64BIT) ? 64 : 32;
+ pci_mode = G_PCIXINITPAT(pci_mode);
+ if (pci_mode == 0)
+ p->variant = PCI_VARIANT_PCI;
+ else if (pci_mode < 4)
+ p->variant = PCI_VARIANT_PCIX_MODE1_PARITY;
+ else if (pci_mode < 8)
+ p->variant = PCI_VARIANT_PCIX_MODE1_ECC;
+ else
+ p->variant = PCI_VARIANT_PCIX_266_MODE2;
+}
+
+/**
+ * init_link_config - initialize a link's SW state
+ * @lc: structure holding the link state
+ * @ai: information about the current card
+ *
+ * Initializes the SW state maintained for each link, including the link's
+ * capabilities and default speed/duplex/flow-control/autonegotiation
+ * settings.
+ */
+static void __devinit init_link_config(struct link_config *lc,
+ unsigned int caps)
+{
+ lc->supported = caps;
+ lc->requested_speed = lc->speed = SPEED_INVALID;
+ lc->requested_duplex = lc->duplex = DUPLEX_INVALID;
+ lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX;
+ if (lc->supported & SUPPORTED_Autoneg) {
+ lc->advertising = lc->supported;
+ lc->autoneg = AUTONEG_ENABLE;
+ lc->requested_fc |= PAUSE_AUTONEG;
+ } else {
+ lc->advertising = 0;
+ lc->autoneg = AUTONEG_DISABLE;
+ }
+}
+
+/**
+ * mc7_calc_size - calculate MC7 memory size
+ * @cfg: the MC7 configuration
+ *
+ * Calculates the size of an MC7 memory in bytes from the value of its
+ * configuration register.
+ */
+static unsigned int __devinit mc7_calc_size(u32 cfg)
+{
+ unsigned int width = G_WIDTH(cfg);
+ unsigned int banks = !!(cfg & F_BKS) + 1;
+ unsigned int org = !!(cfg & F_ORG) + 1;
+ unsigned int density = G_DEN(cfg);
+ unsigned int MBs = ((256 << density) * banks) / (org << width);
+
+ return MBs << 20;
+}
+
+static void __devinit mc7_prep(adapter_t *adapter, struct mc7 *mc7,
+ unsigned int base_addr, const char *name)
+{
+ u32 cfg;
+
+ mc7->adapter = adapter;
+ mc7->name = name;
+ mc7->offset = base_addr - MC7_PMRX_BASE_ADDR;
+ cfg = t3_read_reg(adapter, mc7->offset + A_MC7_CFG);
+ mc7->size = mc7_calc_size(cfg);
+ mc7->width = G_WIDTH(cfg);
+}
+
+void mac_prep(struct cmac *mac, adapter_t *adapter, int index)
+{
+ mac->adapter = adapter;
+ mac->offset = (XGMAC0_1_BASE_ADDR - XGMAC0_0_BASE_ADDR) * index;
+ mac->nucast = 1;
+
+ if (adapter->params.rev == 0 && uses_xaui(adapter)) {
+ t3_write_reg(adapter, A_XGM_SERDES_CTRL + mac->offset,
+ is_10G(adapter) ? 0x2901c04 : 0x2301c04);
+ t3_set_reg_field(adapter, A_XGM_PORT_CFG + mac->offset,
+ F_ENRGMII, 0);
+ }
+}
+
+void early_hw_init(adapter_t *adapter, const struct adapter_info *ai)
+{
+ u32 val = V_PORTSPEED(is_10G(adapter) ? 3 : 2);
+
+ mi1_init(adapter, ai);
+ t3_write_reg(adapter, A_I2C_CFG, /* set for 80KHz */
+ V_I2C_CLKDIV(adapter->params.vpd.cclk / 80 - 1));
+ t3_write_reg(adapter, A_T3DBG_GPIO_EN,
+ ai->gpio_out | F_GPIO0_OEN | F_GPIO0_OUT_VAL);
+
+ if (adapter->params.rev == 0 || !uses_xaui(adapter))
+ val |= F_ENRGMII;
+
+ /* Enable MAC clocks so we can access the registers */
+ t3_write_reg(adapter, A_XGM_PORT_CFG, val);
+ (void) t3_read_reg(adapter, A_XGM_PORT_CFG);
+
+ val |= F_CLKDIVRESET_;
+ t3_write_reg(adapter, A_XGM_PORT_CFG, val);
+ (void) t3_read_reg(adapter, A_XGM_PORT_CFG);
+ t3_write_reg(adapter, XGM_REG(A_XGM_PORT_CFG, 1), val);
+ (void) t3_read_reg(adapter, A_XGM_PORT_CFG);
+}
+
+/*
+ * Reset the adapter. PCIe cards lose their config space during reset, PCI-X
+ * ones don't.
+ */
+int t3_reset_adapter(adapter_t *adapter)
+{
+ int i;
+ uint16_t devid = 0;
+
+ if (is_pcie(adapter))
+ t3_os_pci_save_state(adapter);
+ t3_write_reg(adapter, A_PL_RST, F_CRSTWRM | F_CRSTWRMMODE);
+
+ /*
+ * Delay. Give Some time to device to reset fully.
+ * XXX The delay time should be modified.
+ */
+ for (i = 0; i < 10; i++) {
+ t3_os_sleep(50);
+ t3_os_pci_read_config_2(adapter, 0x00, &devid);
+ if (devid == 0x1425)
+ break;
+ }
+
+ if (devid != 0x1425)
+ return -1;
+
+ if (is_pcie(adapter))
+ t3_os_pci_restore_state(adapter);
+ return 0;
+}
+
+/*
+ * Initialize adapter SW state for the various HW modules, set initial values
+ * for some adapter tunables, take PHYs out of reset, and initialize the MDIO
+ * interface.
+ */
+int __devinit t3_prep_adapter(adapter_t *adapter,
+ const struct adapter_info *ai, int reset)
+{
+ int ret;
+ unsigned int i, j = 0;
+
+ get_pci_mode(adapter, &adapter->params.pci);
+
+ adapter->params.info = ai;
+ adapter->params.nports = ai->nports;
+ adapter->params.rev = t3_read_reg(adapter, A_PL_REV);
+ adapter->params.linkpoll_period = 0;
+ adapter->params.stats_update_period = is_10G(adapter) ?
+ MAC_STATS_ACCUM_SECS : (MAC_STATS_ACCUM_SECS * 10);
+ adapter->params.pci.vpd_cap_addr =
+ t3_os_find_pci_capability(adapter, PCI_CAP_ID_VPD);
+
+ ret = get_vpd_params(adapter, &adapter->params.vpd);
+ if (ret < 0)
+ return ret;
+
+ if (reset && t3_reset_adapter(adapter))
+ return -1;
+
+ t3_sge_prep(adapter, &adapter->params.sge);
+
+ if (adapter->params.vpd.mclk) {
+ struct tp_params *p = &adapter->params.tp;
+
+ mc7_prep(adapter, &adapter->pmrx, MC7_PMRX_BASE_ADDR, "PMRX");
+ mc7_prep(adapter, &adapter->pmtx, MC7_PMTX_BASE_ADDR, "PMTX");
+ mc7_prep(adapter, &adapter->cm, MC7_CM_BASE_ADDR, "CM");
+
+ p->nchan = ai->nports;
+ p->pmrx_size = t3_mc7_size(&adapter->pmrx);
+ p->pmtx_size = t3_mc7_size(&adapter->pmtx);
+ p->cm_size = t3_mc7_size(&adapter->cm);
+ p->chan_rx_size = p->pmrx_size / 2; /* only 1 Rx channel */
+ p->chan_tx_size = p->pmtx_size / p->nchan;
+ p->rx_pg_size = 64 * 1024;
+ p->tx_pg_size = is_10G(adapter) ? 64 * 1024 : 16 * 1024;
+ p->rx_num_pgs = pm_num_pages(p->chan_rx_size, p->rx_pg_size);
+ p->tx_num_pgs = pm_num_pages(p->chan_tx_size, p->tx_pg_size);
+ p->ntimer_qs = p->cm_size >= (128 << 20) ||
+ adapter->params.rev > 0 ? 12 : 6;
+
+ adapter->params.mc5.nservers = DEFAULT_NSERVERS;
+ adapter->params.mc5.nfilters = adapter->params.rev > 0 ?
+ DEFAULT_NFILTERS : 0;
+ adapter->params.mc5.nroutes = 0;
+ t3_mc5_prep(adapter, &adapter->mc5, MC5_MODE_144_BIT);
+
+#ifdef CONFIG_CHELSIO_T3_CORE
+ init_mtus(adapter->params.mtus);
+ init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd);
+#endif
+ }
+
+ early_hw_init(adapter, ai);
+
+ for_each_port(adapter, i) {
+ u8 hw_addr[6];
+ struct port_info *p = &adapter->port[i];
+
+ while (!adapter->params.vpd.port_type[j])
+ ++j;
+
+ p->port_type = &port_types[adapter->params.vpd.port_type[j]];
+ p->port_type->phy_prep(&p->phy, adapter, ai->phy_base_addr + j,
+ ai->mdio_ops);
+ mac_prep(&p->mac, adapter, j);
+ ++j;
+
+ /*
+ * The VPD EEPROM stores the base Ethernet address for the
+ * card. A port's address is derived from the base by adding
+ * the port's index to the base's low octet.
+ */
+ memcpy(hw_addr, adapter->params.vpd.eth_base, 5);
+ hw_addr[5] = adapter->params.vpd.eth_base[5] + i;
+
+ t3_os_set_hw_addr(adapter, i, hw_addr);
+ init_link_config(&p->link_config, p->port_type->caps);
+ p->phy.ops->power_down(&p->phy, 1);
+ if (!(p->port_type->caps & SUPPORTED_IRQ))
+ adapter->params.linkpoll_period = 10;
+ }
+
+ return 0;
+}
+
+void t3_led_ready(adapter_t *adapter)
+{
+ t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
+ F_GPIO0_OUT_VAL);
+}
+
+void t3_port_failover(adapter_t *adapter, int port)
+{
+ u32 val;
+
+ val = port ? F_PORT1ACTIVE : F_PORT0ACTIVE;
+ t3_set_reg_field(adapter, A_MPS_CFG, F_PORT0ACTIVE | F_PORT1ACTIVE,
+ val);
+}
+
+void t3_failover_done(adapter_t *adapter, int port)
+{
+ t3_set_reg_field(adapter, A_MPS_CFG, F_PORT0ACTIVE | F_PORT1ACTIVE,
+ F_PORT0ACTIVE | F_PORT1ACTIVE);
+}
+
+void t3_failover_clear(adapter_t *adapter)
+{
+ t3_set_reg_field(adapter, A_MPS_CFG, F_PORT0ACTIVE | F_PORT1ACTIVE,
+ F_PORT0ACTIVE | F_PORT1ACTIVE);
+}
diff --git a/sys/dev/cxgb/common/cxgb_tcb.h b/sys/dev/cxgb/common/cxgb_tcb.h
new file mode 100644
index 0000000..82a67ea
--- /dev/null
+++ b/sys/dev/cxgb/common/cxgb_tcb.h
@@ -0,0 +1,678 @@
+/**************************************************************************
+
+Copyright (c) 2007, Chelsio Inc.
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Chelsio Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+$FreeBSD$
+
+***************************************************************************/
+
+/* This file is automatically generated --- do not edit */
+
+#ifndef _TCB_DEFS_H
+#define _TCB_DEFS_H
+
+#define W_TCB_T_STATE 0
+#define S_TCB_T_STATE 0
+#define M_TCB_T_STATE 0xfULL
+#define V_TCB_T_STATE(x) ((x) << S_TCB_T_STATE)
+
+#define W_TCB_TIMER 0
+#define S_TCB_TIMER 4
+#define M_TCB_TIMER 0x1ULL
+#define V_TCB_TIMER(x) ((x) << S_TCB_TIMER)
+
+#define W_TCB_DACK_TIMER 0
+#define S_TCB_DACK_TIMER 5
+#define M_TCB_DACK_TIMER 0x1ULL
+#define V_TCB_DACK_TIMER(x) ((x) << S_TCB_DACK_TIMER)
+
+#define W_TCB_DEL_FLAG 0
+#define S_TCB_DEL_FLAG 6
+#define M_TCB_DEL_FLAG 0x1ULL
+#define V_TCB_DEL_FLAG(x) ((x) << S_TCB_DEL_FLAG)
+
+#define W_TCB_L2T_IX 0
+#define S_TCB_L2T_IX 7
+#define M_TCB_L2T_IX 0x7ffULL
+#define V_TCB_L2T_IX(x) ((x) << S_TCB_L2T_IX)
+
+#define W_TCB_SMAC_SEL 0
+#define S_TCB_SMAC_SEL 18
+#define M_TCB_SMAC_SEL 0x3ULL
+#define V_TCB_SMAC_SEL(x) ((x) << S_TCB_SMAC_SEL)
+
+#define W_TCB_TOS 0
+#define S_TCB_TOS 20
+#define M_TCB_TOS 0x3fULL
+#define V_TCB_TOS(x) ((x) << S_TCB_TOS)
+
+#define W_TCB_MAX_RT 0
+#define S_TCB_MAX_RT 26
+#define M_TCB_MAX_RT 0xfULL
+#define V_TCB_MAX_RT(x) ((x) << S_TCB_MAX_RT)
+
+#define W_TCB_T_RXTSHIFT 0
+#define S_TCB_T_RXTSHIFT 30
+#define M_TCB_T_RXTSHIFT 0xfULL
+#define V_TCB_T_RXTSHIFT(x) ((x) << S_TCB_T_RXTSHIFT)
+
+#define W_TCB_T_DUPACKS 1
+#define S_TCB_T_DUPACKS 2
+#define M_TCB_T_DUPACKS 0xfULL
+#define V_TCB_T_DUPACKS(x) ((x) << S_TCB_T_DUPACKS)
+
+#define W_TCB_T_MAXSEG 1
+#define S_TCB_T_MAXSEG 6
+#define M_TCB_T_MAXSEG 0xfULL
+#define V_TCB_T_MAXSEG(x) ((x) << S_TCB_T_MAXSEG)
+
+#define W_TCB_T_FLAGS1 1
+#define S_TCB_T_FLAGS1 10
+#define M_TCB_T_FLAGS1 0xffffffffULL
+#define V_TCB_T_FLAGS1(x) ((x) << S_TCB_T_FLAGS1)
+
+#define W_TCB_T_FLAGS2 2
+#define S_TCB_T_FLAGS2 10
+#define M_TCB_T_FLAGS2 0x7fULL
+#define V_TCB_T_FLAGS2(x) ((x) << S_TCB_T_FLAGS2)
+
+#define W_TCB_SND_SCALE 2
+#define S_TCB_SND_SCALE 17
+#define M_TCB_SND_SCALE 0xfULL
+#define V_TCB_SND_SCALE(x) ((x) << S_TCB_SND_SCALE)
+
+#define W_TCB_RCV_SCALE 2
+#define S_TCB_RCV_SCALE 21
+#define M_TCB_RCV_SCALE 0xfULL
+#define V_TCB_RCV_SCALE(x) ((x) << S_TCB_RCV_SCALE)
+
+#define W_TCB_SND_UNA_RAW 2
+#define S_TCB_SND_UNA_RAW 25
+#define M_TCB_SND_UNA_RAW 0x7ffffffULL
+#define V_TCB_SND_UNA_RAW(x) ((x) << S_TCB_SND_UNA_RAW)
+
+#define W_TCB_SND_NXT_RAW 3
+#define S_TCB_SND_NXT_RAW 20
+#define M_TCB_SND_NXT_RAW 0x7ffffffULL
+#define V_TCB_SND_NXT_RAW(x) ((x) << S_TCB_SND_NXT_RAW)
+
+#define W_TCB_RCV_NXT 4
+#define S_TCB_RCV_NXT 15
+#define M_TCB_RCV_NXT 0xffffffffULL
+#define V_TCB_RCV_NXT(x) ((x) << S_TCB_RCV_NXT)
+
+#define W_TCB_RCV_ADV 5
+#define S_TCB_RCV_ADV 15
+#define M_TCB_RCV_ADV 0xffffULL
+#define V_TCB_RCV_ADV(x) ((x) << S_TCB_RCV_ADV)
+
+#define W_TCB_SND_MAX_RAW 5
+#define S_TCB_SND_MAX_RAW 31
+#define M_TCB_SND_MAX_RAW 0x7ffffffULL
+#define V_TCB_SND_MAX_RAW(x) ((x) << S_TCB_SND_MAX_RAW)
+
+#define W_TCB_SND_CWND 6
+#define S_TCB_SND_CWND 26
+#define M_TCB_SND_CWND 0x7ffffffULL
+#define V_TCB_SND_CWND(x) ((x) << S_TCB_SND_CWND)
+
+#define W_TCB_SND_SSTHRESH 7
+#define S_TCB_SND_SSTHRESH 21
+#define M_TCB_SND_SSTHRESH 0x7ffffffULL
+#define V_TCB_SND_SSTHRESH(x) ((x) << S_TCB_SND_SSTHRESH)
+
+#define W_TCB_T_RTT_TS_RECENT_AGE 8
+#define S_TCB_T_RTT_TS_RECENT_AGE 16
+#define M_TCB_T_RTT_TS_RECENT_AGE 0xffffffffULL
+#define V_TCB_T_RTT_TS_RECENT_AGE(x) ((x) << S_TCB_T_RTT_TS_RECENT_AGE)
+
+#define W_TCB_T_RTSEQ_RECENT 9
+#define S_TCB_T_RTSEQ_RECENT 16
+#define M_TCB_T_RTSEQ_RECENT 0xffffffffULL
+#define V_TCB_T_RTSEQ_RECENT(x) ((x) << S_TCB_T_RTSEQ_RECENT)
+
+#define W_TCB_T_SRTT 10
+#define S_TCB_T_SRTT 16
+#define M_TCB_T_SRTT 0xffffULL
+#define V_TCB_T_SRTT(x) ((x) << S_TCB_T_SRTT)
+
+#define W_TCB_T_RTTVAR 11
+#define S_TCB_T_RTTVAR 0
+#define M_TCB_T_RTTVAR 0xffffULL
+#define V_TCB_T_RTTVAR(x) ((x) << S_TCB_T_RTTVAR)
+
+#define W_TCB_TS_LAST_ACK_SENT_RAW 11
+#define S_TCB_TS_LAST_ACK_SENT_RAW 16
+#define M_TCB_TS_LAST_ACK_SENT_RAW 0x7ffffffULL
+#define V_TCB_TS_LAST_ACK_SENT_RAW(x) ((x) << S_TCB_TS_LAST_ACK_SENT_RAW)
+
+#define W_TCB_DIP 12
+#define S_TCB_DIP 11
+#define M_TCB_DIP 0xffffffffULL
+#define V_TCB_DIP(x) ((x) << S_TCB_DIP)
+
+#define W_TCB_SIP 13
+#define S_TCB_SIP 11
+#define M_TCB_SIP 0xffffffffULL
+#define V_TCB_SIP(x) ((x) << S_TCB_SIP)
+
+#define W_TCB_DP 14
+#define S_TCB_DP 11
+#define M_TCB_DP 0xffffULL
+#define V_TCB_DP(x) ((x) << S_TCB_DP)
+
+#define W_TCB_SP 14
+#define S_TCB_SP 27
+#define M_TCB_SP 0xffffULL
+#define V_TCB_SP(x) ((x) << S_TCB_SP)
+
+#define W_TCB_TIMESTAMP 15
+#define S_TCB_TIMESTAMP 11
+#define M_TCB_TIMESTAMP 0xffffffffULL
+#define V_TCB_TIMESTAMP(x) ((x) << S_TCB_TIMESTAMP)
+
+#define W_TCB_TIMESTAMP_OFFSET 16
+#define S_TCB_TIMESTAMP_OFFSET 11
+#define M_TCB_TIMESTAMP_OFFSET 0xfULL
+#define V_TCB_TIMESTAMP_OFFSET(x) ((x) << S_TCB_TIMESTAMP_OFFSET)
+
+#define W_TCB_TX_MAX 16
+#define S_TCB_TX_MAX 15
+#define M_TCB_TX_MAX 0xffffffffULL
+#define V_TCB_TX_MAX(x) ((x) << S_TCB_TX_MAX)
+
+#define W_TCB_TX_HDR_PTR_RAW 17
+#define S_TCB_TX_HDR_PTR_RAW 15
+#define M_TCB_TX_HDR_PTR_RAW 0x1ffffULL
+#define V_TCB_TX_HDR_PTR_RAW(x) ((x) << S_TCB_TX_HDR_PTR_RAW)
+
+#define W_TCB_TX_LAST_PTR_RAW 18
+#define S_TCB_TX_LAST_PTR_RAW 0
+#define M_TCB_TX_LAST_PTR_RAW 0x1ffffULL
+#define V_TCB_TX_LAST_PTR_RAW(x) ((x) << S_TCB_TX_LAST_PTR_RAW)
+
+#define W_TCB_TX_COMPACT 18
+#define S_TCB_TX_COMPACT 17
+#define M_TCB_TX_COMPACT 0x1ULL
+#define V_TCB_TX_COMPACT(x) ((x) << S_TCB_TX_COMPACT)
+
+#define W_TCB_RX_COMPACT 18
+#define S_TCB_RX_COMPACT 18
+#define M_TCB_RX_COMPACT 0x1ULL
+#define V_TCB_RX_COMPACT(x) ((x) << S_TCB_RX_COMPACT)
+
+#define W_TCB_RCV_WND 18
+#define S_TCB_RCV_WND 19
+#define M_TCB_RCV_WND 0x7ffffffULL
+#define V_TCB_RCV_WND(x) ((x) << S_TCB_RCV_WND)
+
+#define W_TCB_RX_HDR_OFFSET 19
+#define S_TCB_RX_HDR_OFFSET 14
+#define M_TCB_RX_HDR_OFFSET 0x7ffffffULL
+#define V_TCB_RX_HDR_OFFSET(x) ((x) << S_TCB_RX_HDR_OFFSET)
+
+#define W_TCB_RX_FRAG0_START_IDX_RAW 20
+#define S_TCB_RX_FRAG0_START_IDX_RAW 9
+#define M_TCB_RX_FRAG0_START_IDX_RAW 0x7ffffffULL
+#define V_TCB_RX_FRAG0_START_IDX_RAW(x) ((x) << S_TCB_RX_FRAG0_START_IDX_RAW)
+
+#define W_TCB_RX_FRAG1_START_IDX_OFFSET 21
+#define S_TCB_RX_FRAG1_START_IDX_OFFSET 4
+#define M_TCB_RX_FRAG1_START_IDX_OFFSET 0x7ffffffULL
+#define V_TCB_RX_FRAG1_START_IDX_OFFSET(x) ((x) << S_TCB_RX_FRAG1_START_IDX_OFFSET)
+
+#define W_TCB_RX_FRAG0_LEN 21
+#define S_TCB_RX_FRAG0_LEN 31
+#define M_TCB_RX_FRAG0_LEN 0x7ffffffULL
+#define V_TCB_RX_FRAG0_LEN(x) ((x) << S_TCB_RX_FRAG0_LEN)
+
+#define W_TCB_RX_FRAG1_LEN 22
+#define S_TCB_RX_FRAG1_LEN 26
+#define M_TCB_RX_FRAG1_LEN 0x7ffffffULL
+#define V_TCB_RX_FRAG1_LEN(x) ((x) << S_TCB_RX_FRAG1_LEN)
+
+#define W_TCB_NEWRENO_RECOVER 23
+#define S_TCB_NEWRENO_RECOVER 21
+#define M_TCB_NEWRENO_RECOVER 0x7ffffffULL
+#define V_TCB_NEWRENO_RECOVER(x) ((x) << S_TCB_NEWRENO_RECOVER)
+
+#define W_TCB_PDU_HAVE_LEN 24
+#define S_TCB_PDU_HAVE_LEN 16
+#define M_TCB_PDU_HAVE_LEN 0x1ULL
+#define V_TCB_PDU_HAVE_LEN(x) ((x) << S_TCB_PDU_HAVE_LEN)
+
+#define W_TCB_PDU_LEN 24
+#define S_TCB_PDU_LEN 17
+#define M_TCB_PDU_LEN 0xffffULL
+#define V_TCB_PDU_LEN(x) ((x) << S_TCB_PDU_LEN)
+
+#define W_TCB_RX_QUIESCE 25
+#define S_TCB_RX_QUIESCE 1
+#define M_TCB_RX_QUIESCE 0x1ULL
+#define V_TCB_RX_QUIESCE(x) ((x) << S_TCB_RX_QUIESCE)
+
+#define W_TCB_RX_PTR_RAW 25
+#define S_TCB_RX_PTR_RAW 2
+#define M_TCB_RX_PTR_RAW 0x1ffffULL
+#define V_TCB_RX_PTR_RAW(x) ((x) << S_TCB_RX_PTR_RAW)
+
+#define W_TCB_CPU_NO 25
+#define S_TCB_CPU_NO 19
+#define M_TCB_CPU_NO 0x7fULL
+#define V_TCB_CPU_NO(x) ((x) << S_TCB_CPU_NO)
+
+#define W_TCB_ULP_TYPE 25
+#define S_TCB_ULP_TYPE 26
+#define M_TCB_ULP_TYPE 0xfULL
+#define V_TCB_ULP_TYPE(x) ((x) << S_TCB_ULP_TYPE)
+
+#define W_TCB_RX_FRAG1_PTR_RAW 25
+#define S_TCB_RX_FRAG1_PTR_RAW 30
+#define M_TCB_RX_FRAG1_PTR_RAW 0x1ffffULL
+#define V_TCB_RX_FRAG1_PTR_RAW(x) ((x) << S_TCB_RX_FRAG1_PTR_RAW)
+
+#define W_TCB_RX_FRAG2_START_IDX_OFFSET_RAW 26
+#define S_TCB_RX_FRAG2_START_IDX_OFFSET_RAW 15
+#define M_TCB_RX_FRAG2_START_IDX_OFFSET_RAW 0x7ffffffULL
+#define V_TCB_RX_FRAG2_START_IDX_OFFSET_RAW(x) ((x) << S_TCB_RX_FRAG2_START_IDX_OFFSET_RAW)
+
+#define W_TCB_RX_FRAG2_PTR_RAW 27
+#define S_TCB_RX_FRAG2_PTR_RAW 10
+#define M_TCB_RX_FRAG2_PTR_RAW 0x1ffffULL
+#define V_TCB_RX_FRAG2_PTR_RAW(x) ((x) << S_TCB_RX_FRAG2_PTR_RAW)
+
+#define W_TCB_RX_FRAG2_LEN_RAW 27
+#define S_TCB_RX_FRAG2_LEN_RAW 27
+#define M_TCB_RX_FRAG2_LEN_RAW 0x7ffffffULL
+#define V_TCB_RX_FRAG2_LEN_RAW(x) ((x) << S_TCB_RX_FRAG2_LEN_RAW)
+
+#define W_TCB_RX_FRAG3_PTR_RAW 28
+#define S_TCB_RX_FRAG3_PTR_RAW 22
+#define M_TCB_RX_FRAG3_PTR_RAW 0x1ffffULL
+#define V_TCB_RX_FRAG3_PTR_RAW(x) ((x) << S_TCB_RX_FRAG3_PTR_RAW)
+
+#define W_TCB_RX_FRAG3_LEN_RAW 29
+#define S_TCB_RX_FRAG3_LEN_RAW 7
+#define M_TCB_RX_FRAG3_LEN_RAW 0x7ffffffULL
+#define V_TCB_RX_FRAG3_LEN_RAW(x) ((x) << S_TCB_RX_FRAG3_LEN_RAW)
+
+#define W_TCB_RX_FRAG3_START_IDX_OFFSET_RAW 30
+#define S_TCB_RX_FRAG3_START_IDX_OFFSET_RAW 2
+#define M_TCB_RX_FRAG3_START_IDX_OFFSET_RAW 0x7ffffffULL
+#define V_TCB_RX_FRAG3_START_IDX_OFFSET_RAW(x) ((x) << S_TCB_RX_FRAG3_START_IDX_OFFSET_RAW)
+
+#define W_TCB_PDU_HDR_LEN 30
+#define S_TCB_PDU_HDR_LEN 29
+#define M_TCB_PDU_HDR_LEN 0xffULL
+#define V_TCB_PDU_HDR_LEN(x) ((x) << S_TCB_PDU_HDR_LEN)
+
+#define W_TCB_SLUSH1 31
+#define S_TCB_SLUSH1 5
+#define M_TCB_SLUSH1 0x7ffffULL
+#define V_TCB_SLUSH1(x) ((x) << S_TCB_SLUSH1)
+
+#define W_TCB_ULP_RAW 31
+#define S_TCB_ULP_RAW 24
+#define M_TCB_ULP_RAW 0xffULL
+#define V_TCB_ULP_RAW(x) ((x) << S_TCB_ULP_RAW)
+
+#define W_TCB_DDP_RDMAP_VERSION 25
+#define S_TCB_DDP_RDMAP_VERSION 30
+#define M_TCB_DDP_RDMAP_VERSION 0x1ULL
+#define V_TCB_DDP_RDMAP_VERSION(x) ((x) << S_TCB_DDP_RDMAP_VERSION)
+
+#define W_TCB_MARKER_ENABLE_RX 25
+#define S_TCB_MARKER_ENABLE_RX 31
+#define M_TCB_MARKER_ENABLE_RX 0x1ULL
+#define V_TCB_MARKER_ENABLE_RX(x) ((x) << S_TCB_MARKER_ENABLE_RX)
+
+#define W_TCB_MARKER_ENABLE_TX 26
+#define S_TCB_MARKER_ENABLE_TX 0
+#define M_TCB_MARKER_ENABLE_TX 0x1ULL
+#define V_TCB_MARKER_ENABLE_TX(x) ((x) << S_TCB_MARKER_ENABLE_TX)
+
+#define W_TCB_CRC_ENABLE 26
+#define S_TCB_CRC_ENABLE 1
+#define M_TCB_CRC_ENABLE 0x1ULL
+#define V_TCB_CRC_ENABLE(x) ((x) << S_TCB_CRC_ENABLE)
+
+#define W_TCB_IRS_ULP 26
+#define S_TCB_IRS_ULP 2
+#define M_TCB_IRS_ULP 0x1ffULL
+#define V_TCB_IRS_ULP(x) ((x) << S_TCB_IRS_ULP)
+
+#define W_TCB_ISS_ULP 26
+#define S_TCB_ISS_ULP 11
+#define M_TCB_ISS_ULP 0x1ffULL
+#define V_TCB_ISS_ULP(x) ((x) << S_TCB_ISS_ULP)
+
+#define W_TCB_TX_PDU_LEN 26
+#define S_TCB_TX_PDU_LEN 20
+#define M_TCB_TX_PDU_LEN 0x3fffULL
+#define V_TCB_TX_PDU_LEN(x) ((x) << S_TCB_TX_PDU_LEN)
+
+#define W_TCB_TX_PDU_OUT 27
+#define S_TCB_TX_PDU_OUT 2
+#define M_TCB_TX_PDU_OUT 0x1ULL
+#define V_TCB_TX_PDU_OUT(x) ((x) << S_TCB_TX_PDU_OUT)
+
+#define W_TCB_CQ_IDX_SQ 27
+#define S_TCB_CQ_IDX_SQ 3
+#define M_TCB_CQ_IDX_SQ 0xffffULL
+#define V_TCB_CQ_IDX_SQ(x) ((x) << S_TCB_CQ_IDX_SQ)
+
+#define W_TCB_CQ_IDX_RQ 27
+#define S_TCB_CQ_IDX_RQ 19
+#define M_TCB_CQ_IDX_RQ 0xffffULL
+#define V_TCB_CQ_IDX_RQ(x) ((x) << S_TCB_CQ_IDX_RQ)
+
+#define W_TCB_QP_ID 28
+#define S_TCB_QP_ID 3
+#define M_TCB_QP_ID 0xffffULL
+#define V_TCB_QP_ID(x) ((x) << S_TCB_QP_ID)
+
+#define W_TCB_PD_ID 28
+#define S_TCB_PD_ID 19
+#define M_TCB_PD_ID 0xffffULL
+#define V_TCB_PD_ID(x) ((x) << S_TCB_PD_ID)
+
+#define W_TCB_STAG 29
+#define S_TCB_STAG 3
+#define M_TCB_STAG 0xffffffffULL
+#define V_TCB_STAG(x) ((x) << S_TCB_STAG)
+
+#define W_TCB_RQ_START 30
+#define S_TCB_RQ_START 3
+#define M_TCB_RQ_START 0x3ffffffULL
+#define V_TCB_RQ_START(x) ((x) << S_TCB_RQ_START)
+
+#define W_TCB_RQ_MSN 30
+#define S_TCB_RQ_MSN 29
+#define M_TCB_RQ_MSN 0x3ffULL
+#define V_TCB_RQ_MSN(x) ((x) << S_TCB_RQ_MSN)
+
+#define W_TCB_RQ_MAX_OFFSET 31
+#define S_TCB_RQ_MAX_OFFSET 7
+#define M_TCB_RQ_MAX_OFFSET 0xfULL
+#define V_TCB_RQ_MAX_OFFSET(x) ((x) << S_TCB_RQ_MAX_OFFSET)
+
+#define W_TCB_RQ_WRITE_PTR 31
+#define S_TCB_RQ_WRITE_PTR 11
+#define M_TCB_RQ_WRITE_PTR 0x3ffULL
+#define V_TCB_RQ_WRITE_PTR(x) ((x) << S_TCB_RQ_WRITE_PTR)
+
+#define W_TCB_INB_WRITE_PERM 31
+#define S_TCB_INB_WRITE_PERM 21
+#define M_TCB_INB_WRITE_PERM 0x1ULL
+#define V_TCB_INB_WRITE_PERM(x) ((x) << S_TCB_INB_WRITE_PERM)
+
+#define W_TCB_INB_READ_PERM 31
+#define S_TCB_INB_READ_PERM 22
+#define M_TCB_INB_READ_PERM 0x1ULL
+#define V_TCB_INB_READ_PERM(x) ((x) << S_TCB_INB_READ_PERM)
+
+#define W_TCB_ORD_L_BIT_VLD 31
+#define S_TCB_ORD_L_BIT_VLD 23
+#define M_TCB_ORD_L_BIT_VLD 0x1ULL
+#define V_TCB_ORD_L_BIT_VLD(x) ((x) << S_TCB_ORD_L_BIT_VLD)
+
+#define W_TCB_RDMAP_OPCODE 31
+#define S_TCB_RDMAP_OPCODE 24
+#define M_TCB_RDMAP_OPCODE 0xfULL
+#define V_TCB_RDMAP_OPCODE(x) ((x) << S_TCB_RDMAP_OPCODE)
+
+#define W_TCB_TX_FLUSH 31
+#define S_TCB_TX_FLUSH 28
+#define M_TCB_TX_FLUSH 0x1ULL
+#define V_TCB_TX_FLUSH(x) ((x) << S_TCB_TX_FLUSH)
+
+#define W_TCB_TX_OOS_RXMT 31
+#define S_TCB_TX_OOS_RXMT 29
+#define M_TCB_TX_OOS_RXMT 0x1ULL
+#define V_TCB_TX_OOS_RXMT(x) ((x) << S_TCB_TX_OOS_RXMT)
+
+#define W_TCB_TX_OOS_TXMT 31
+#define S_TCB_TX_OOS_TXMT 30
+#define M_TCB_TX_OOS_TXMT 0x1ULL
+#define V_TCB_TX_OOS_TXMT(x) ((x) << S_TCB_TX_OOS_TXMT)
+
+#define W_TCB_SLUSH_AUX2 31
+#define S_TCB_SLUSH_AUX2 31
+#define M_TCB_SLUSH_AUX2 0x1ULL
+#define V_TCB_SLUSH_AUX2(x) ((x) << S_TCB_SLUSH_AUX2)
+
+#define W_TCB_RX_FRAG1_PTR_RAW2 25
+#define S_TCB_RX_FRAG1_PTR_RAW2 30
+#define M_TCB_RX_FRAG1_PTR_RAW2 0x1ffffULL
+#define V_TCB_RX_FRAG1_PTR_RAW2(x) ((x) << S_TCB_RX_FRAG1_PTR_RAW2)
+
+#define W_TCB_RX_DDP_FLAGS 26
+#define S_TCB_RX_DDP_FLAGS 15
+#define M_TCB_RX_DDP_FLAGS 0xffffULL
+#define V_TCB_RX_DDP_FLAGS(x) ((x) << S_TCB_RX_DDP_FLAGS)
+
+#define W_TCB_SLUSH_AUX3 26
+#define S_TCB_SLUSH_AUX3 31
+#define M_TCB_SLUSH_AUX3 0x1ffULL
+#define V_TCB_SLUSH_AUX3(x) ((x) << S_TCB_SLUSH_AUX3)
+
+#define W_TCB_RX_DDP_BUF0_OFFSET 27
+#define S_TCB_RX_DDP_BUF0_OFFSET 8
+#define M_TCB_RX_DDP_BUF0_OFFSET 0x3fffffULL
+#define V_TCB_RX_DDP_BUF0_OFFSET(x) ((x) << S_TCB_RX_DDP_BUF0_OFFSET)
+
+#define W_TCB_RX_DDP_BUF0_LEN 27
+#define S_TCB_RX_DDP_BUF0_LEN 30
+#define M_TCB_RX_DDP_BUF0_LEN 0x3fffffULL
+#define V_TCB_RX_DDP_BUF0_LEN(x) ((x) << S_TCB_RX_DDP_BUF0_LEN)
+
+#define W_TCB_RX_DDP_BUF1_OFFSET 28
+#define S_TCB_RX_DDP_BUF1_OFFSET 20
+#define M_TCB_RX_DDP_BUF1_OFFSET 0x3fffffULL
+#define V_TCB_RX_DDP_BUF1_OFFSET(x) ((x) << S_TCB_RX_DDP_BUF1_OFFSET)
+
+#define W_TCB_RX_DDP_BUF1_LEN 29
+#define S_TCB_RX_DDP_BUF1_LEN 10
+#define M_TCB_RX_DDP_BUF1_LEN 0x3fffffULL
+#define V_TCB_RX_DDP_BUF1_LEN(x) ((x) << S_TCB_RX_DDP_BUF1_LEN)
+
+#define W_TCB_RX_DDP_BUF0_TAG 30
+#define S_TCB_RX_DDP_BUF0_TAG 0
+#define M_TCB_RX_DDP_BUF0_TAG 0xffffffffULL
+#define V_TCB_RX_DDP_BUF0_TAG(x) ((x) << S_TCB_RX_DDP_BUF0_TAG)
+
+#define W_TCB_RX_DDP_BUF1_TAG 31
+#define S_TCB_RX_DDP_BUF1_TAG 0
+#define M_TCB_RX_DDP_BUF1_TAG 0xffffffffULL
+#define V_TCB_RX_DDP_BUF1_TAG(x) ((x) << S_TCB_RX_DDP_BUF1_TAG)
+
+#define S_TF_DACK 10
+#define V_TF_DACK(x) ((x) << S_TF_DACK)
+
+#define S_TF_NAGLE 11
+#define V_TF_NAGLE(x) ((x) << S_TF_NAGLE)
+
+#define S_TF_RECV_SCALE 12
+#define V_TF_RECV_SCALE(x) ((x) << S_TF_RECV_SCALE)
+
+#define S_TF_RECV_TSTMP 13
+#define V_TF_RECV_TSTMP(x) ((x) << S_TF_RECV_TSTMP)
+
+#define S_TF_RECV_SACK 14
+#define V_TF_RECV_SACK(x) ((x) << S_TF_RECV_SACK)
+
+#define S_TF_TURBO 15
+#define V_TF_TURBO(x) ((x) << S_TF_TURBO)
+
+#define S_TF_KEEPALIVE 16
+#define V_TF_KEEPALIVE(x) ((x) << S_TF_KEEPALIVE)
+
+#define S_TF_TCAM_BYPASS 17
+#define V_TF_TCAM_BYPASS(x) ((x) << S_TF_TCAM_BYPASS)
+
+#define S_TF_CORE_FIN 18
+#define V_TF_CORE_FIN(x) ((x) << S_TF_CORE_FIN)
+
+#define S_TF_CORE_MORE 19
+#define V_TF_CORE_MORE(x) ((x) << S_TF_CORE_MORE)
+
+#define S_TF_MIGRATING 20
+#define V_TF_MIGRATING(x) ((x) << S_TF_MIGRATING)
+
+#define S_TF_ACTIVE_OPEN 21
+#define V_TF_ACTIVE_OPEN(x) ((x) << S_TF_ACTIVE_OPEN)
+
+#define S_TF_ASK_MODE 22
+#define V_TF_ASK_MODE(x) ((x) << S_TF_ASK_MODE)
+
+#define S_TF_NON_OFFLOAD 23
+#define V_TF_NON_OFFLOAD(x) ((x) << S_TF_NON_OFFLOAD)
+
+#define S_TF_MOD_SCHD 24
+#define V_TF_MOD_SCHD(x) ((x) << S_TF_MOD_SCHD)
+
+#define S_TF_MOD_SCHD_REASON0 25
+#define V_TF_MOD_SCHD_REASON0(x) ((x) << S_TF_MOD_SCHD_REASON0)
+
+#define S_TF_MOD_SCHD_REASON1 26
+#define V_TF_MOD_SCHD_REASON1(x) ((x) << S_TF_MOD_SCHD_REASON1)
+
+#define S_TF_MOD_SCHD_RX 27
+#define V_TF_MOD_SCHD_RX(x) ((x) << S_TF_MOD_SCHD_RX)
+
+#define S_TF_CORE_PUSH 28
+#define V_TF_CORE_PUSH(x) ((x) << S_TF_CORE_PUSH)
+
+#define S_TF_RCV_COALESCE_ENABLE 29
+#define V_TF_RCV_COALESCE_ENABLE(x) ((x) << S_TF_RCV_COALESCE_ENABLE)
+
+#define S_TF_RCV_COALESCE_PUSH 30
+#define V_TF_RCV_COALESCE_PUSH(x) ((x) << S_TF_RCV_COALESCE_PUSH)
+
+#define S_TF_RCV_COALESCE_LAST_PSH 31
+#define V_TF_RCV_COALESCE_LAST_PSH(x) ((x) << S_TF_RCV_COALESCE_LAST_PSH)
+
+#define S_TF_RCV_COALESCE_HEARTBEAT 32
+#define V_TF_RCV_COALESCE_HEARTBEAT(x) ((x) << S_TF_RCV_COALESCE_HEARTBEAT)
+
+#define S_TF_LOCK_TID 33
+#define V_TF_LOCK_TID(x) ((x) << S_TF_LOCK_TID)
+
+#define S_TF_DACK_MSS 34
+#define V_TF_DACK_MSS(x) ((x) << S_TF_DACK_MSS)
+
+#define S_TF_CCTRL_SEL0 35
+#define V_TF_CCTRL_SEL0(x) ((x) << S_TF_CCTRL_SEL0)
+
+#define S_TF_CCTRL_SEL1 36
+#define V_TF_CCTRL_SEL1(x) ((x) << S_TF_CCTRL_SEL1)
+
+#define S_TF_TCP_NEWRENO_FAST_RECOVERY 37
+#define V_TF_TCP_NEWRENO_FAST_RECOVERY(x) ((x) << S_TF_TCP_NEWRENO_FAST_RECOVERY)
+
+#define S_TF_TX_PACE_AUTO 38
+#define V_TF_TX_PACE_AUTO(x) ((x) << S_TF_TX_PACE_AUTO)
+
+#define S_TF_PEER_FIN_HELD 39
+#define V_TF_PEER_FIN_HELD(x) ((x) << S_TF_PEER_FIN_HELD)
+
+#define S_TF_CORE_URG 40
+#define V_TF_CORE_URG(x) ((x) << S_TF_CORE_URG)
+
+#define S_TF_RDMA_ERROR 41
+#define V_TF_RDMA_ERROR(x) ((x) << S_TF_RDMA_ERROR)
+
+#define S_TF_SSWS_DISABLED 42
+#define V_TF_SSWS_DISABLED(x) ((x) << S_TF_SSWS_DISABLED)
+
+#define S_TF_DUPACK_COUNT_ODD 43
+#define V_TF_DUPACK_COUNT_ODD(x) ((x) << S_TF_DUPACK_COUNT_ODD)
+
+#define S_TF_TX_CHANNEL 44
+#define V_TF_TX_CHANNEL(x) ((x) << S_TF_TX_CHANNEL)
+
+#define S_TF_RX_CHANNEL 45
+#define V_TF_RX_CHANNEL(x) ((x) << S_TF_RX_CHANNEL)
+
+#define S_TF_TX_PACE_FIXED 46
+#define V_TF_TX_PACE_FIXED(x) ((x) << S_TF_TX_PACE_FIXED)
+
+#define S_TF_RDMA_FLM_ERROR 47
+#define V_TF_RDMA_FLM_ERROR(x) ((x) << S_TF_RDMA_FLM_ERROR)
+
+#define S_TF_RX_FLOW_CONTROL_DISABLE 48
+#define V_TF_RX_FLOW_CONTROL_DISABLE(x) ((x) << S_TF_RX_FLOW_CONTROL_DISABLE)
+
+#define S_TF_DDP_INDICATE_OUT 15
+#define V_TF_DDP_INDICATE_OUT(x) ((x) << S_TF_DDP_INDICATE_OUT)
+
+#define S_TF_DDP_ACTIVE_BUF 16
+#define V_TF_DDP_ACTIVE_BUF(x) ((x) << S_TF_DDP_ACTIVE_BUF)
+
+#define S_TF_DDP_BUF0_VALID 17
+#define V_TF_DDP_BUF0_VALID(x) ((x) << S_TF_DDP_BUF0_VALID)
+
+#define S_TF_DDP_BUF1_VALID 18
+#define V_TF_DDP_BUF1_VALID(x) ((x) << S_TF_DDP_BUF1_VALID)
+
+#define S_TF_DDP_BUF0_INDICATE 19
+#define V_TF_DDP_BUF0_INDICATE(x) ((x) << S_TF_DDP_BUF0_INDICATE)
+
+#define S_TF_DDP_BUF1_INDICATE 20
+#define V_TF_DDP_BUF1_INDICATE(x) ((x) << S_TF_DDP_BUF1_INDICATE)
+
+#define S_TF_DDP_PUSH_DISABLE_0 21
+#define V_TF_DDP_PUSH_DISABLE_0(x) ((x) << S_TF_DDP_PUSH_DISABLE_0)
+
+#define S_TF_DDP_PUSH_DISABLE_1 22
+#define V_TF_DDP_PUSH_DISABLE_1(x) ((x) << S_TF_DDP_PUSH_DISABLE_1)
+
+#define S_TF_DDP_OFF 23
+#define V_TF_DDP_OFF(x) ((x) << S_TF_DDP_OFF)
+
+#define S_TF_DDP_WAIT_FRAG 24
+#define V_TF_DDP_WAIT_FRAG(x) ((x) << S_TF_DDP_WAIT_FRAG)
+
+#define S_TF_DDP_BUF_INF 25
+#define V_TF_DDP_BUF_INF(x) ((x) << S_TF_DDP_BUF_INF)
+
+#define S_TF_DDP_RX2TX 26
+#define V_TF_DDP_RX2TX(x) ((x) << S_TF_DDP_RX2TX)
+
+#define S_TF_DDP_BUF0_FLUSH 27
+#define V_TF_DDP_BUF0_FLUSH(x) ((x) << S_TF_DDP_BUF0_FLUSH)
+
+#define S_TF_DDP_BUF1_FLUSH 28
+#define V_TF_DDP_BUF1_FLUSH(x) ((x) << S_TF_DDP_BUF1_FLUSH)
+
+#define S_TF_DDP_PSH_NO_INVALIDATE 29
+#define V_TF_DDP_PSH_NO_INVALIDATE(x) ((x) << S_TF_DDP_PSH_NO_INVALIDATE)
+
+#endif /* _TCB_DEFS_H */
diff --git a/sys/dev/cxgb/common/cxgb_version.h b/sys/dev/cxgb/common/cxgb_version.h
new file mode 100644
index 0000000..7d15ca4
--- /dev/null
+++ b/sys/dev/cxgb/common/cxgb_version.h
@@ -0,0 +1,41 @@
+/**************************************************************************
+
+Copyright (c) 2007, Chelsio Inc.
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Chelsio Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+$FreeBSD$
+
+***************************************************************************/
+
+#ifndef __CHELSIO_VERSION_H
+#define __CHELSIO_VERSION_H
+#define DRV_DESC "Chelsio T3 Network Driver"
+#define DRV_NAME "cxgb"
+#define DRV_VERSION "1.0"
+#endif
diff --git a/sys/dev/cxgb/common/cxgb_vsc8211.c b/sys/dev/cxgb/common/cxgb_vsc8211.c
new file mode 100644
index 0000000..f118c02
--- /dev/null
+++ b/sys/dev/cxgb/common/cxgb_vsc8211.c
@@ -0,0 +1,251 @@
+/**************************************************************************
+
+Copyright (c) 2007, Chelsio Inc.
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Chelsio Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <dev/cxgb/common/cxgb_common.h>
+
+/* VSC8211 PHY specific registers. */
+enum {
+ VSC8211_INTR_ENABLE = 25,
+ VSC8211_INTR_STATUS = 26,
+ VSC8211_AUX_CTRL_STAT = 28,
+};
+
+enum {
+ VSC_INTR_RX_ERR = 1 << 0,
+ VSC_INTR_MS_ERR = 1 << 1, /* master/slave resolution error */
+ VSC_INTR_CABLE = 1 << 2, /* cable impairment */
+ VSC_INTR_FALSE_CARR = 1 << 3, /* false carrier */
+ VSC_INTR_MEDIA_CHG = 1 << 4, /* AMS media change */
+ VSC_INTR_RX_FIFO = 1 << 5, /* Rx FIFO over/underflow */
+ VSC_INTR_TX_FIFO = 1 << 6, /* Tx FIFO over/underflow */
+ VSC_INTR_DESCRAMBL = 1 << 7, /* descrambler lock-lost */
+ VSC_INTR_SYMBOL_ERR = 1 << 8, /* symbol error */
+ VSC_INTR_NEG_DONE = 1 << 10, /* autoneg done */
+ VSC_INTR_NEG_ERR = 1 << 11, /* autoneg error */
+ VSC_INTR_LINK_CHG = 1 << 13, /* link change */
+ VSC_INTR_ENABLE = 1 << 15, /* interrupt enable */
+};
+
+#define CFG_CHG_INTR_MASK (VSC_INTR_LINK_CHG | VSC_INTR_NEG_ERR | \
+ VSC_INTR_NEG_DONE)
+#define INTR_MASK (CFG_CHG_INTR_MASK | VSC_INTR_TX_FIFO | VSC_INTR_RX_FIFO | \
+ VSC_INTR_ENABLE)
+
+/* PHY specific auxiliary control & status register fields */
+#define S_ACSR_ACTIPHY_TMR 0
+#define M_ACSR_ACTIPHY_TMR 0x3
+#define V_ACSR_ACTIPHY_TMR(x) ((x) << S_ACSR_ACTIPHY_TMR)
+
+#define S_ACSR_SPEED 3
+#define M_ACSR_SPEED 0x3
+#define G_ACSR_SPEED(x) (((x) >> S_ACSR_SPEED) & M_ACSR_SPEED)
+
+#define S_ACSR_DUPLEX 5
+#define F_ACSR_DUPLEX (1 << S_ACSR_DUPLEX)
+
+#define S_ACSR_ACTIPHY 6
+#define F_ACSR_ACTIPHY (1 << S_ACSR_ACTIPHY)
+
+/*
+ * Reset the PHY. This PHY completes reset immediately so we never wait.
+ */
+static int vsc8211_reset(struct cphy *cphy, int wait)
+{
+ return t3_phy_reset(cphy, 0, 0);
+}
+
+static int vsc8211_intr_enable(struct cphy *cphy)
+{
+ return mdio_write(cphy, 0, VSC8211_INTR_ENABLE, INTR_MASK);
+}
+
+static int vsc8211_intr_disable(struct cphy *cphy)
+{
+ return mdio_write(cphy, 0, VSC8211_INTR_ENABLE, 0);
+}
+
+static int vsc8211_intr_clear(struct cphy *cphy)
+{
+ u32 val;
+
+ /* Clear PHY interrupts by reading the register. */
+ return mdio_read(cphy, 0, VSC8211_INTR_STATUS, &val);
+}
+
+static int vsc8211_autoneg_enable(struct cphy *cphy)
+{
+ return t3_mdio_change_bits(cphy, 0, MII_BMCR, BMCR_PDOWN | BMCR_ISOLATE,
+ BMCR_ANENABLE | BMCR_ANRESTART);
+}
+
+static int vsc8211_autoneg_restart(struct cphy *cphy)
+{
+ return t3_mdio_change_bits(cphy, 0, MII_BMCR, BMCR_PDOWN | BMCR_ISOLATE,
+ BMCR_ANRESTART);
+}
+
+static int vsc8211_get_link_status(struct cphy *cphy, int *link_ok,
+ int *speed, int *duplex, int *fc)
+{
+ unsigned int bmcr, status, lpa, adv;
+ int err, sp = -1, dplx = -1, pause = 0;
+
+ err = mdio_read(cphy, 0, MII_BMCR, &bmcr);
+ if (!err)
+ err = mdio_read(cphy, 0, MII_BMSR, &status);
+ if (err)
+ return err;
+
+ if (link_ok) {
+ /*
+ * BMSR_LSTATUS is latch-low, so if it is 0 we need to read it
+ * once more to get the current link state.
+ */
+ if (!(status & BMSR_LSTATUS))
+ err = mdio_read(cphy, 0, MII_BMSR, &status);
+ if (err)
+ return err;
+ *link_ok = (status & BMSR_LSTATUS) != 0;
+ }
+ if (!(bmcr & BMCR_ANENABLE)) {
+ dplx = (bmcr & BMCR_FULLDPLX) ? DUPLEX_FULL : DUPLEX_HALF;
+ if (bmcr & BMCR_SPEED1000)
+ sp = SPEED_1000;
+ else if (bmcr & BMCR_SPEED100)
+ sp = SPEED_100;
+ else
+ sp = SPEED_10;
+ } else if (status & BMSR_ANEGCOMPLETE) {
+ err = mdio_read(cphy, 0, VSC8211_AUX_CTRL_STAT, &status);
+ if (err)
+ return err;
+
+ dplx = (status & F_ACSR_DUPLEX) ? DUPLEX_FULL : DUPLEX_HALF;
+ sp = G_ACSR_SPEED(status);
+ if (sp == 0)
+ sp = SPEED_10;
+ else if (sp == 1)
+ sp = SPEED_100;
+ else
+ sp = SPEED_1000;
+
+ if (fc && dplx == DUPLEX_FULL) {
+ err = mdio_read(cphy, 0, MII_LPA, &lpa);
+ if (!err)
+ err = mdio_read(cphy, 0, MII_ADVERTISE, &adv);
+ if (err)
+ return err;
+
+ if (lpa & adv & ADVERTISE_PAUSE_CAP)
+ pause = PAUSE_RX | PAUSE_TX;
+ else if ((lpa & ADVERTISE_PAUSE_CAP) &&
+ (lpa & ADVERTISE_PAUSE_ASYM) &&
+ (adv & ADVERTISE_PAUSE_ASYM))
+ pause = PAUSE_TX;
+ else if ((lpa & ADVERTISE_PAUSE_ASYM) &&
+ (adv & ADVERTISE_PAUSE_CAP))
+ pause = PAUSE_RX;
+ }
+ }
+ if (speed)
+ *speed = sp;
+ if (duplex)
+ *duplex = dplx;
+ if (fc)
+ *fc = pause;
+ return 0;
+}
+
+static int vsc8211_power_down(struct cphy *cphy, int enable)
+{
+ return t3_mdio_change_bits(cphy, 0, MII_BMCR, BMCR_PDOWN,
+ enable ? BMCR_PDOWN : 0);
+}
+
+static int vsc8211_intr_handler(struct cphy *cphy)
+{
+ unsigned int cause;
+ int err, cphy_cause = 0;
+
+ err = mdio_read(cphy, 0, VSC8211_INTR_STATUS, &cause);
+ if (err)
+ return err;
+
+ cause &= INTR_MASK;
+ if (cause & CFG_CHG_INTR_MASK)
+ cphy_cause |= cphy_cause_link_change;
+ if (cause & (VSC_INTR_RX_FIFO | VSC_INTR_TX_FIFO))
+ cphy_cause |= cphy_cause_fifo_error;
+ return cphy_cause;
+}
+
+#ifdef C99_NOT_SUPPORTED
+static struct cphy_ops vsc8211_ops = {
+ NULL,
+ vsc8211_reset,
+ vsc8211_intr_enable,
+ vsc8211_intr_disable,
+ vsc8211_intr_clear,
+ vsc8211_intr_handler,
+ vsc8211_autoneg_enable,
+ vsc8211_autoneg_restart,
+ t3_phy_advertise,
+ NULL,
+ t3_set_phy_speed_duplex,
+ vsc8211_get_link_status,
+ vsc8211_power_down,
+};
+#else
+static struct cphy_ops vsc8211_ops = {
+ .reset = vsc8211_reset,
+ .intr_enable = vsc8211_intr_enable,
+ .intr_disable = vsc8211_intr_disable,
+ .intr_clear = vsc8211_intr_clear,
+ .intr_handler = vsc8211_intr_handler,
+ .autoneg_enable = vsc8211_autoneg_enable,
+ .autoneg_restart = vsc8211_autoneg_restart,
+ .advertise = t3_phy_advertise,
+ .set_speed_duplex = t3_set_phy_speed_duplex,
+ .get_link_status = vsc8211_get_link_status,
+ .power_down = vsc8211_power_down,
+};
+#endif
+
+void t3_vsc8211_phy_prep(struct cphy *phy, adapter_t *adapter, int phy_addr,
+ const struct mdio_ops *mdio_ops)
+{
+ cphy_init(phy, adapter, phy_addr, &vsc8211_ops, mdio_ops);
+}
diff --git a/sys/dev/cxgb/common/cxgb_xgmac.c b/sys/dev/cxgb/common/cxgb_xgmac.c
new file mode 100644
index 0000000..f958c20
--- /dev/null
+++ b/sys/dev/cxgb/common/cxgb_xgmac.c
@@ -0,0 +1,415 @@
+/**************************************************************************
+
+Copyright (c) 2007, Chelsio Inc.
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Chelsio Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <dev/cxgb/common/cxgb_common.h>
+#include <dev/cxgb/common/cxgb_regs.h>
+
+/*
+ * # of exact address filters. The first one is used for the station address,
+ * the rest are available for multicast addresses.
+ */
+#define EXACT_ADDR_FILTERS 8
+
+static inline int macidx(const struct cmac *mac)
+{
+ return mac->offset / (XGMAC0_1_BASE_ADDR - XGMAC0_0_BASE_ADDR);
+}
+
+static void xaui_serdes_reset(struct cmac *mac)
+{
+ static const unsigned int clear[] = {
+ F_PWRDN0 | F_PWRDN1, F_RESETPLL01, F_RESET0 | F_RESET1,
+ F_PWRDN2 | F_PWRDN3, F_RESETPLL23, F_RESET2 | F_RESET3
+ };
+
+ int i;
+ adapter_t *adap = mac->adapter;
+ u32 ctrl = A_XGM_SERDES_CTRL0 + mac->offset;
+
+ t3_write_reg(adap, ctrl, adap->params.vpd.xauicfg[macidx(mac)] |
+ F_RESET3 | F_RESET2 | F_RESET1 | F_RESET0 |
+ F_PWRDN3 | F_PWRDN2 | F_PWRDN1 | F_PWRDN0 |
+ F_RESETPLL23 | F_RESETPLL01);
+ (void)t3_read_reg(adap, ctrl);
+ udelay(15);
+
+ for (i = 0; i < ARRAY_SIZE(clear); i++) {
+ t3_set_reg_field(adap, ctrl, clear[i], 0);
+ udelay(15);
+ }
+}
+
+void t3b_pcs_reset(struct cmac *mac)
+{
+ t3_set_reg_field(mac->adapter, A_XGM_RESET_CTRL + mac->offset,
+ F_PCS_RESET_, 0);
+ udelay(20);
+ t3_set_reg_field(mac->adapter, A_XGM_RESET_CTRL + mac->offset, 0,
+ F_PCS_RESET_);
+}
+
+int t3_mac_reset(struct cmac *mac)
+{
+ static struct addr_val_pair mac_reset_avp[] = {
+ { A_XGM_TX_CTRL, 0 },
+ { A_XGM_RX_CTRL, 0 },
+ { A_XGM_RX_CFG, F_DISPAUSEFRAMES | F_EN1536BFRAMES |
+ F_RMFCS | F_ENJUMBO | F_ENHASHMCAST },
+ { A_XGM_RX_HASH_LOW, 0 },
+ { A_XGM_RX_HASH_HIGH, 0 },
+ { A_XGM_RX_EXACT_MATCH_LOW_1, 0 },
+ { A_XGM_RX_EXACT_MATCH_LOW_2, 0 },
+ { A_XGM_RX_EXACT_MATCH_LOW_3, 0 },
+ { A_XGM_RX_EXACT_MATCH_LOW_4, 0 },
+ { A_XGM_RX_EXACT_MATCH_LOW_5, 0 },
+ { A_XGM_RX_EXACT_MATCH_LOW_6, 0 },
+ { A_XGM_RX_EXACT_MATCH_LOW_7, 0 },
+ { A_XGM_RX_EXACT_MATCH_LOW_8, 0 },
+ { A_XGM_STAT_CTRL, F_CLRSTATS }
+ };
+ u32 val;
+ adapter_t *adap = mac->adapter;
+ unsigned int oft = mac->offset;
+
+ t3_write_reg(adap, A_XGM_RESET_CTRL + oft, F_MAC_RESET_);
+ (void) t3_read_reg(adap, A_XGM_RESET_CTRL + oft); /* flush */
+
+ t3_write_regs(adap, mac_reset_avp, ARRAY_SIZE(mac_reset_avp), oft);
+ t3_set_reg_field(adap, A_XGM_RXFIFO_CFG + oft,
+ F_RXSTRFRWRD | F_DISERRFRAMES,
+ uses_xaui(adap) ? 0 : F_RXSTRFRWRD);
+
+ if (uses_xaui(adap)) {
+ if (adap->params.rev == 0) {
+ t3_set_reg_field(adap, A_XGM_SERDES_CTRL + oft, 0,
+ F_RXENABLE | F_TXENABLE);
+ if (t3_wait_op_done(adap, A_XGM_SERDES_STATUS1 + oft,
+ F_CMULOCK, 1, 5, 2)) {
+ CH_ERR(adap,
+ "MAC %d XAUI SERDES CMU lock failed\n",
+ macidx(mac));
+ return -1;
+ }
+ t3_set_reg_field(adap, A_XGM_SERDES_CTRL + oft, 0,
+ F_SERDESRESET_);
+ } else
+ xaui_serdes_reset(mac);
+ }
+
+ if (adap->params.rev > 0)
+ t3_write_reg(adap, A_XGM_PAUSE_TIMER + oft, 0xf000);
+
+ val = F_MAC_RESET_;
+ if (is_10G(adap))
+ val |= F_PCS_RESET_;
+ else if (uses_xaui(adap))
+ val |= F_PCS_RESET_ | F_XG2G_RESET_;
+ else
+ val |= F_RGMII_RESET_ | F_XG2G_RESET_;
+ t3_write_reg(adap, A_XGM_RESET_CTRL + oft, val);
+ (void) t3_read_reg(adap, A_XGM_RESET_CTRL + oft); /* flush */
+ if ((val & F_PCS_RESET_) && adap->params.rev) {
+ t3_os_sleep(1);
+ t3b_pcs_reset(mac);
+ }
+
+ memset(&mac->stats, 0, sizeof(mac->stats));
+ return 0;
+}
+
+/*
+ * Set the exact match register 'idx' to recognize the given Ethernet address.
+ */
+static void set_addr_filter(struct cmac *mac, int idx, const u8 *addr)
+{
+ u32 addr_lo, addr_hi;
+ unsigned int oft = mac->offset + idx * 8;
+
+ addr_lo = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0];
+ addr_hi = (addr[5] << 8) | addr[4];
+
+ t3_write_reg(mac->adapter, A_XGM_RX_EXACT_MATCH_LOW_1 + oft, addr_lo);
+ t3_write_reg(mac->adapter, A_XGM_RX_EXACT_MATCH_HIGH_1 + oft, addr_hi);
+}
+
+/* Set one of the station's unicast MAC addresses. */
+int t3_mac_set_address(struct cmac *mac, unsigned int idx, u8 addr[6])
+{
+ if (idx >= mac->nucast)
+ return -EINVAL;
+ set_addr_filter(mac, idx, addr);
+ return 0;
+}
+
+/*
+ * Specify the number of exact address filters that should be reserved for
+ * unicast addresses. Caller should reload the unicast and multicast addresses
+ * after calling this.
+ */
+int t3_mac_set_num_ucast(struct cmac *mac, int n)
+{
+ if (n > EXACT_ADDR_FILTERS)
+ return -EINVAL;
+ mac->nucast = n;
+ return 0;
+}
+
+/* Calculate the RX hash filter index of an Ethernet address */
+static int hash_hw_addr(const u8 *addr)
+{
+ int hash = 0, octet, bit, i = 0, c;
+
+ for (octet = 0; octet < 6; ++octet)
+ for (c = addr[octet], bit = 0; bit < 8; c >>= 1, ++bit) {
+ hash ^= (c & 1) << i;
+ if (++i == 6)
+ i = 0;
+ }
+ return hash;
+}
+
+int t3_mac_set_rx_mode(struct cmac *mac, struct t3_rx_mode *rm)
+{
+ u32 val, hash_lo, hash_hi;
+ adapter_t *adap = mac->adapter;
+ unsigned int oft = mac->offset;
+
+ val = t3_read_reg(adap, A_XGM_RX_CFG + oft) & ~F_COPYALLFRAMES;
+ if (promisc_rx_mode(rm))
+ val |= F_COPYALLFRAMES;
+ t3_write_reg(adap, A_XGM_RX_CFG + oft, val);
+
+ if (allmulti_rx_mode(rm))
+ hash_lo = hash_hi = 0xffffffff;
+ else {
+ u8 *addr;
+ int exact_addr_idx = mac->nucast;
+
+ hash_lo = hash_hi = 0;
+ while ((addr = t3_get_next_mcaddr(rm)))
+ if (exact_addr_idx < EXACT_ADDR_FILTERS)
+ set_addr_filter(mac, exact_addr_idx++, addr);
+ else {
+ int hash = hash_hw_addr(addr);
+
+ if (hash < 32)
+ hash_lo |= (1 << hash);
+ else
+ hash_hi |= (1 << (hash - 32));
+ }
+ }
+
+ t3_write_reg(adap, A_XGM_RX_HASH_LOW + oft, hash_lo);
+ t3_write_reg(adap, A_XGM_RX_HASH_HIGH + oft, hash_hi);
+ return 0;
+}
+
+int t3_mac_set_mtu(struct cmac *mac, unsigned int mtu)
+{
+ int hwm, lwm;
+ unsigned int thres, v;
+ adapter_t *adap = mac->adapter;
+
+ /*
+ * MAX_FRAME_SIZE inludes header + FCS, mtu doesn't. The HW max
+ * packet size register includes header, but not FCS.
+ */
+ mtu += 14;
+ if (mtu > MAX_FRAME_SIZE - 4)
+ return -EINVAL;
+ t3_write_reg(adap, A_XGM_RX_MAX_PKT_SIZE + mac->offset, mtu);
+
+ /*
+ * Adjust the PAUSE frame watermarks. We always set the LWM, and the
+ * HWM only if flow-control is enabled.
+ */
+ hwm = max(MAC_RXFIFO_SIZE - 3 * mtu, MAC_RXFIFO_SIZE / 2U);
+ hwm = min(hwm, 3 * MAC_RXFIFO_SIZE / 4 + 1024);
+ lwm = hwm - 1024;
+ v = t3_read_reg(adap, A_XGM_RXFIFO_CFG + mac->offset);
+ v &= ~V_RXFIFOPAUSELWM(M_RXFIFOPAUSELWM);
+ v |= V_RXFIFOPAUSELWM(lwm / 8);
+ if (G_RXFIFOPAUSEHWM(v))
+ v = (v & ~V_RXFIFOPAUSEHWM(M_RXFIFOPAUSEHWM)) |
+ V_RXFIFOPAUSEHWM(hwm / 8);
+ t3_write_reg(adap, A_XGM_RXFIFO_CFG + mac->offset, v);
+
+ /* Adjust the TX FIFO threshold based on the MTU */
+ thres = (adap->params.vpd.cclk * 1000) / 15625;
+ thres = (thres * mtu) / 1000;
+ if (is_10G(adap))
+ thres /= 10;
+ thres = mtu > thres ? (mtu - thres + 7) / 8 : 0;
+ thres = max(thres, 8U); /* need at least 8 */
+ t3_set_reg_field(adap, A_XGM_TXFIFO_CFG + mac->offset,
+ V_TXFIFOTHRESH(M_TXFIFOTHRESH),
+ V_TXFIFOTHRESH(thres));
+ return 0;
+}
+
+int t3_mac_set_speed_duplex_fc(struct cmac *mac, int speed, int duplex, int fc)
+{
+ u32 val;
+ adapter_t *adap = mac->adapter;
+ unsigned int oft = mac->offset;
+
+ if (duplex >= 0 && duplex != DUPLEX_FULL)
+ return -EINVAL;
+ if (speed >= 0) {
+ if (speed == SPEED_10)
+ val = V_PORTSPEED(0);
+ else if (speed == SPEED_100)
+ val = V_PORTSPEED(1);
+ else if (speed == SPEED_1000)
+ val = V_PORTSPEED(2);
+ else if (speed == SPEED_10000)
+ val = V_PORTSPEED(3);
+ else
+ return -EINVAL;
+
+ t3_set_reg_field(adap, A_XGM_PORT_CFG + oft,
+ V_PORTSPEED(M_PORTSPEED), val);
+ }
+
+ val = t3_read_reg(adap, A_XGM_RXFIFO_CFG + oft);
+ val &= ~V_RXFIFOPAUSEHWM(M_RXFIFOPAUSEHWM);
+ if (fc & PAUSE_TX)
+ val |= V_RXFIFOPAUSEHWM(G_RXFIFOPAUSELWM(val) + 128); /* +1KB */
+ t3_write_reg(adap, A_XGM_RXFIFO_CFG + oft, val);
+
+ t3_set_reg_field(adap, A_XGM_TX_CFG + oft, F_TXPAUSEEN,
+ (fc & PAUSE_RX) ? F_TXPAUSEEN : 0);
+ return 0;
+}
+
+int t3_mac_enable(struct cmac *mac, int which)
+{
+ int idx = macidx(mac);
+ adapter_t *adap = mac->adapter;
+ unsigned int oft = mac->offset;
+
+ if (which & MAC_DIRECTION_TX) {
+ t3_write_reg(adap, A_XGM_TX_CTRL + oft, F_TXEN);
+ t3_write_reg(adap, A_TP_PIO_ADDR, A_TP_TX_DROP_CFG_CH0 + idx);
+ t3_write_reg(adap, A_TP_PIO_DATA, 0xbf000001);
+ t3_write_reg(adap, A_TP_PIO_ADDR, A_TP_TX_DROP_MODE);
+ t3_set_reg_field(adap, A_TP_PIO_DATA, 1 << idx, 1 << idx);
+ }
+ if (which & MAC_DIRECTION_RX)
+ t3_write_reg(adap, A_XGM_RX_CTRL + oft, F_RXEN);
+ return 0;
+}
+
+int t3_mac_disable(struct cmac *mac, int which)
+{
+ int idx = macidx(mac);
+ adapter_t *adap = mac->adapter;
+
+ if (which & MAC_DIRECTION_TX) {
+ t3_write_reg(adap, A_XGM_TX_CTRL + mac->offset, 0);
+ t3_write_reg(adap, A_TP_PIO_ADDR, A_TP_TX_DROP_CFG_CH0 + idx);
+ t3_write_reg(adap, A_TP_PIO_DATA, 0xc000001f);
+ t3_write_reg(adap, A_TP_PIO_ADDR, A_TP_TX_DROP_MODE);
+ t3_set_reg_field(adap, A_TP_PIO_DATA, 1 << idx, 0);
+ }
+ if (which & MAC_DIRECTION_RX)
+ t3_write_reg(adap, A_XGM_RX_CTRL + mac->offset, 0);
+ return 0;
+}
+
+/*
+ * This function is called periodically to accumulate the current values of the
+ * RMON counters into the port statistics. Since the packet counters are only
+ * 32 bits they can overflow in ~286 secs at 10G, so the function should be
+ * called more frequently than that. The byte counters are 45-bit wide, they
+ * would overflow in ~7.8 hours.
+ */
+const struct mac_stats *t3_mac_update_stats(struct cmac *mac)
+{
+#define RMON_READ(mac, addr) t3_read_reg(mac->adapter, addr + mac->offset)
+#define RMON_UPDATE(mac, name, reg) \
+ (mac)->stats.name += (u64)RMON_READ(mac, A_XGM_STAT_##reg)
+#define RMON_UPDATE64(mac, name, reg_lo, reg_hi) \
+ (mac)->stats.name += RMON_READ(mac, A_XGM_STAT_##reg_lo) + \
+ ((u64)RMON_READ(mac, A_XGM_STAT_##reg_hi) << 32)
+
+ u32 v, lo;
+
+ RMON_UPDATE64(mac, rx_octets, RX_BYTES_LOW, RX_BYTES_HIGH);
+ RMON_UPDATE64(mac, rx_frames, RX_FRAMES_LOW, RX_FRAMES_HIGH);
+ RMON_UPDATE(mac, rx_mcast_frames, RX_MCAST_FRAMES);
+ RMON_UPDATE(mac, rx_bcast_frames, RX_BCAST_FRAMES);
+ RMON_UPDATE(mac, rx_fcs_errs, RX_CRC_ERR_FRAMES);
+ RMON_UPDATE(mac, rx_pause, RX_PAUSE_FRAMES);
+ RMON_UPDATE(mac, rx_jabber, RX_JABBER_FRAMES);
+ RMON_UPDATE(mac, rx_short, RX_SHORT_FRAMES);
+ RMON_UPDATE(mac, rx_symbol_errs, RX_SYM_CODE_ERR_FRAMES);
+
+ RMON_UPDATE(mac, rx_too_long, RX_OVERSIZE_FRAMES);
+ mac->stats.rx_too_long += RMON_READ(mac, A_XGM_RX_MAX_PKT_SIZE_ERR_CNT);
+
+ RMON_UPDATE(mac, rx_frames_64, RX_64B_FRAMES);
+ RMON_UPDATE(mac, rx_frames_65_127, RX_65_127B_FRAMES);
+ RMON_UPDATE(mac, rx_frames_128_255, RX_128_255B_FRAMES);
+ RMON_UPDATE(mac, rx_frames_256_511, RX_256_511B_FRAMES);
+ RMON_UPDATE(mac, rx_frames_512_1023, RX_512_1023B_FRAMES);
+ RMON_UPDATE(mac, rx_frames_1024_1518, RX_1024_1518B_FRAMES);
+ RMON_UPDATE(mac, rx_frames_1519_max, RX_1519_MAXB_FRAMES);
+
+ RMON_UPDATE64(mac, tx_octets, TX_BYTE_LOW, TX_BYTE_HIGH);
+ RMON_UPDATE64(mac, tx_frames, TX_FRAME_LOW, TX_FRAME_HIGH);
+ RMON_UPDATE(mac, tx_mcast_frames, TX_MCAST);
+ RMON_UPDATE(mac, tx_bcast_frames, TX_BCAST);
+ RMON_UPDATE(mac, tx_pause, TX_PAUSE);
+ /* This counts error frames in general (bad FCS, underrun, etc). */
+ RMON_UPDATE(mac, tx_underrun, TX_ERR_FRAMES);
+
+ RMON_UPDATE(mac, tx_frames_64, TX_64B_FRAMES);
+ RMON_UPDATE(mac, tx_frames_65_127, TX_65_127B_FRAMES);
+ RMON_UPDATE(mac, tx_frames_128_255, TX_128_255B_FRAMES);
+ RMON_UPDATE(mac, tx_frames_256_511, TX_256_511B_FRAMES);
+ RMON_UPDATE(mac, tx_frames_512_1023, TX_512_1023B_FRAMES);
+ RMON_UPDATE(mac, tx_frames_1024_1518, TX_1024_1518B_FRAMES);
+ RMON_UPDATE(mac, tx_frames_1519_max, TX_1519_MAXB_FRAMES);
+
+ /* The next stat isn't clear-on-read. */
+ t3_write_reg(mac->adapter, A_TP_MIB_INDEX, mac->offset ? 51 : 50);
+ v = t3_read_reg(mac->adapter, A_TP_MIB_RDATA);
+ lo = (u32)mac->stats.rx_cong_drops;
+ mac->stats.rx_cong_drops += (u64)(v - lo);
+
+ return &mac->stats;
+}
diff --git a/sys/dev/cxgb/cxgb_adapter.h b/sys/dev/cxgb/cxgb_adapter.h
new file mode 100644
index 0000000..d401f9b
--- /dev/null
+++ b/sys/dev/cxgb/cxgb_adapter.h
@@ -0,0 +1,438 @@
+/**************************************************************************
+
+Copyright (c) 2007, Chelsio Inc.
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Chelsio Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+
+$FreeBSD$
+
+***************************************************************************/
+
+
+
+#ifndef _CXGB_ADAPTER_H_
+#define _CXGB_ADAPTER_H_
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/lock.h>
+#include <sys/mutex.h>
+#include <sys/rman.h>
+#include <sys/mbuf.h>
+#include <sys/socket.h>
+#include <sys/sockio.h>
+
+#include <net/ethernet.h>
+#include <net/if.h>
+#include <net/if_media.h>
+
+#include <machine/bus.h>
+#include <machine/resource.h>
+#include <sys/bus_dma.h>
+#include <dev/pci/pcireg.h>
+#include <dev/pci/pcivar.h>
+
+struct adapter;
+struct sge_qset;
+extern int cxgb_debug;
+
+struct port_info {
+ struct adapter *adapter;
+ struct ifnet *ifp;
+ int if_flags;
+ const struct port_type_info *port_type;
+ struct cphy phy;
+ struct cmac mac;
+ struct link_config link_config;
+ int activity;
+ struct ifmedia media;
+ struct mtx lock;
+
+ int port;
+ uint8_t hw_addr[ETHER_ADDR_LEN];
+ uint8_t nqsets;
+ uint8_t first_qset;
+ struct taskqueue *tq;
+ struct task start_task;
+ struct cdev *port_cdev;
+};
+
+enum { /* adapter flags */
+ FULL_INIT_DONE = (1 << 0),
+ USING_MSI = (1 << 1),
+ USING_MSIX = (1 << 2),
+ QUEUES_BOUND = (1 << 3),
+ FW_UPTODATE = (1 << 4),
+};
+
+/* Max active LRO sessions per queue set */
+#define MAX_LRO_PER_QSET 8
+
+
+#define FL_Q_SIZE 4096
+#define JUMBO_Q_SIZE 512
+#define RSPQ_Q_SIZE 1024
+#define TX_ETH_Q_SIZE 1024
+
+/*
+ * Types of Tx queues in each queue set. Order here matters, do not change.
+ * XXX TOE is not implemented yet, so the extra queues are just placeholders.
+ */
+enum { TXQ_ETH, TXQ_OFLD, TXQ_CTRL };
+
+
+/* careful, the following are set on priv_flags and must not collide with
+ * IFF_ flags!
+ */
+enum {
+ LRO_ACTIVE = (1 << 8),
+};
+
+struct sge_lro_session {
+ struct t3_mbuf_hdr mh;
+ uint32_t seq;
+ uint16_t ip_len;
+};
+
+struct sge_lro {
+ unsigned int enabled;
+ unsigned int num_active;
+ struct sge_lro_session *last_s;
+ struct sge_lro_session s[MAX_LRO_PER_QSET];
+};
+
+/* has its own header on linux XXX
+ * but I don't even know what it is :-/
+ */
+
+struct t3cdev {
+ int foo; /* XXX fill in */
+};
+
+#define RX_BUNDLE_SIZE 8
+
+struct rsp_desc;
+
+struct sge_rspq {
+ uint32_t credits;
+ uint32_t size;
+ uint32_t cidx;
+ uint32_t gen;
+ uint32_t polling;
+ uint32_t holdoff_tmr;
+ uint32_t next_holdoff;
+ uint32_t imm_data;
+ uint32_t pure_rsps;
+ struct rsp_desc *desc;
+ bus_addr_t phys_addr;
+ uint32_t cntxt_id;
+ bus_dma_tag_t desc_tag;
+ bus_dmamap_t desc_map;
+ struct t3_mbuf_hdr mh;
+ struct mtx lock;
+};
+
+struct rx_desc;
+struct rx_sw_desc;
+
+struct sge_fl {
+ uint32_t buf_size;
+ uint32_t credits;
+ uint32_t size;
+ uint32_t cidx;
+ uint32_t pidx;
+ uint32_t gen;
+ struct rx_desc *desc;
+ struct rx_sw_desc *sdesc;
+ bus_addr_t phys_addr;
+ uint32_t cntxt_id;
+ uint64_t empty;
+ bus_dma_tag_t desc_tag;
+ bus_dmamap_t desc_map;
+ struct mtx fl_locks[8];
+};
+
+struct tx_desc;
+struct tx_sw_desc;
+
+struct sge_txq {
+ uint64_t flags;
+ uint32_t in_use;
+ uint32_t size;
+ uint32_t processed;
+ uint32_t cleaned;
+ uint32_t stop_thres;
+ uint32_t cidx;
+ uint32_t pidx;
+ uint32_t gen;
+ uint32_t unacked;
+ struct tx_desc *desc;
+ struct tx_sw_desc *sdesc;
+ uint32_t token;
+ bus_addr_t phys_addr;
+ uint32_t cntxt_id;
+ uint64_t stops;
+ uint64_t restarts;
+ bus_dma_tag_t desc_tag;
+ bus_dmamap_t desc_map;
+ struct mtx lock;
+};
+
+
+enum {
+ SGE_PSTAT_TSO, /* # of TSO requests */
+ SGE_PSTAT_RX_CSUM_GOOD, /* # of successful RX csum offloads */
+ SGE_PSTAT_TX_CSUM, /* # of TX checksum offloads */
+ SGE_PSTAT_VLANEX, /* # of VLAN tag extractions */
+ SGE_PSTAT_VLANINS, /* # of VLAN tag insertions */
+ SGE_PSTATS_LRO_QUEUED, /* # of LRO appended packets */
+ SGE_PSTATS_LRO_FLUSHED, /* # of LRO flushed packets */
+ SGE_PSTATS_LRO_X_STREAMS, /* # of exceeded LRO contexts */
+};
+
+#define SGE_PSTAT_MAX (SGE_PSTATS_LRO_X_STREAMS+1)
+
+struct sge_qset {
+ struct sge_rspq rspq;
+ struct sge_fl fl[SGE_RXQ_PER_SET];
+ struct sge_lro lro;
+ struct sge_txq txq[SGE_TXQ_PER_SET];
+ unsigned long txq_stopped; /* which Tx queues are stopped */
+ uint64_t port_stats[SGE_PSTAT_MAX];
+ struct port_info *port;
+};
+
+struct sge {
+ struct sge_qset qs[SGE_QSETS];
+ struct mtx reg_lock;
+};
+
+struct adapter {
+ device_t dev;
+ int flags;
+
+ /* PCI register resources */
+ uint32_t regs_rid;
+ struct resource *regs_res;
+ bus_space_handle_t bh;
+ bus_space_tag_t bt;
+ bus_size_t mmio_len;
+
+ /* DMA resources */
+ bus_dma_tag_t parent_dmat;
+ bus_dma_tag_t rx_dmat;
+ bus_dma_tag_t rx_jumbo_dmat;
+ bus_dma_tag_t tx_dmat;
+
+ /* Interrupt resources */
+ struct resource *irq_res;
+ int irq_rid;
+ void *intr_tag;
+
+ uint32_t msix_regs_rid;
+ struct resource *msix_regs_res;
+
+ struct resource *msix_irq_res[SGE_QSETS];
+ int msix_irq_rid[SGE_QSETS];
+ void *msix_intr_tag[SGE_QSETS];
+
+ /* Tasks */
+ struct task ext_intr_task;
+ struct task timer_reclaim_task;
+ struct task slow_intr_task;
+ struct task process_responses_task;
+ struct task mr_refresh_task;
+ struct taskqueue *tq;
+ struct callout cxgb_tick_ch;
+ struct callout sge_timer_ch;
+
+ /* Register lock for use by the hardware layer */
+ struct mtx mdio_lock;
+
+ /* Bookkeeping for the hardware layer */
+ struct adapter_params params;
+ unsigned int slow_intr_mask;
+ unsigned long irq_stats[IRQ_NUM_STATS];
+
+ struct sge sge;
+ struct mc7 pmrx;
+ struct mc7 pmtx;
+ struct mc7 cm;
+ struct mc5 mc5;
+
+ struct port_info port[MAX_NPORTS];
+ device_t portdev[MAX_NPORTS];
+ struct t3cdev tdev;
+ char fw_version[64];
+ uint32_t open_device_map;
+ struct mtx lock;
+};
+
+struct t3_rx_mode {
+
+ uint32_t idx;
+ struct port_info *port;
+};
+
+
+#define MDIO_LOCK(adapter) mtx_lock(&(adapter)->mdio_lock)
+#define MDIO_UNLOCK(adapter) mtx_unlock(&(adapter)->mdio_lock)
+
+#define PORT_LOCK(port) mtx_lock(&(port)->lock);
+#define PORT_UNLOCK(port) mtx_unlock(&(port)->lock);
+
+#define ADAPTER_LOCK(adap) mtx_lock(&(adap)->lock);
+#define ADAPTER_UNLOCK(adap) mtx_unlock(&(adap)->lock);
+
+
+
+static __inline uint32_t
+t3_read_reg(adapter_t *adapter, uint32_t reg_addr)
+{
+ return (bus_space_read_4(adapter->bt, adapter->bh, reg_addr));
+}
+
+static __inline void
+t3_write_reg(adapter_t *adapter, uint32_t reg_addr, uint32_t val)
+{
+ bus_space_write_4(adapter->bt, adapter->bh, reg_addr, val);
+}
+
+static __inline void
+t3_os_pci_read_config_4(adapter_t *adapter, int reg, uint32_t *val)
+{
+ *val = pci_read_config(adapter->dev, reg, 4);
+}
+
+static __inline void
+t3_os_pci_write_config_4(adapter_t *adapter, int reg, uint32_t val)
+{
+ pci_write_config(adapter->dev, reg, val, 4);
+}
+
+static __inline void
+t3_os_pci_read_config_2(adapter_t *adapter, int reg, uint16_t *val)
+{
+ *val = pci_read_config(adapter->dev, reg, 2);
+}
+
+static __inline void
+t3_os_pci_write_config_2(adapter_t *adapter, int reg, uint16_t val)
+{
+ pci_write_config(adapter->dev, reg, val, 2);
+}
+
+static __inline uint8_t *
+t3_get_next_mcaddr(struct t3_rx_mode *rm)
+{
+ uint8_t *macaddr = NULL;
+
+ if (rm->idx == 0)
+ macaddr = rm->port->hw_addr;
+
+ rm->idx++;
+ return (macaddr);
+}
+
+static __inline void
+t3_init_rx_mode(struct t3_rx_mode *rm, struct port_info *port)
+{
+ rm->idx = 0;
+ rm->port = port;
+}
+
+static __inline struct port_info *
+adap2pinfo(struct adapter *adap, int idx)
+{
+ return &adap->port[idx];
+}
+
+int t3_os_find_pci_capability(adapter_t *adapter, int cap);
+int t3_os_pci_save_state(struct adapter *adapter);
+int t3_os_pci_restore_state(struct adapter *adapter);
+void t3_os_link_changed(adapter_t *adapter, int port_id, int link_status,
+ int speed, int duplex, int fc);
+void t3_sge_err_intr_handler(adapter_t *adapter);
+void t3_os_ext_intr_handler(adapter_t *adapter);
+void t3_os_set_hw_addr(adapter_t *adapter, int port_idx, u8 hw_addr[]);
+int t3_mgmt_tx(adapter_t *adap, struct mbuf *m);
+
+
+int t3_sge_alloc(struct adapter *);
+int t3_sge_free(struct adapter *);
+int t3_sge_alloc_qset(adapter_t *, uint32_t, int, int, const struct qset_params *,
+ int, struct port_info *);
+void t3_free_sge_resources(adapter_t *);
+void t3_sge_start(adapter_t *);
+void t3b_intr(void *data);
+void t3_intr_msi(void *data);
+void t3_intr_msix(void *data);
+int t3_encap(struct port_info *, struct mbuf **);
+
+int t3_sge_init_sw(adapter_t *);
+void t3_sge_deinit_sw(adapter_t *);
+
+void t3_rx_eth_lro(adapter_t *adap, struct sge_rspq *rq, struct t3_mbuf_hdr *mh,
+ int ethpad, uint32_t rss_hash, uint32_t rss_csum, int lro);
+void t3_rx_eth(struct port_info *p, struct sge_rspq *rq, struct mbuf *m, int ethpad);
+void t3_sge_lro_flush_all(adapter_t *adap, struct sge_qset *qs);
+
+void t3_add_sysctls(adapter_t *sc);
+int t3_get_desc(const struct sge_qset *qs, unsigned int qnum, unsigned int idx,
+ unsigned char *data);
+void t3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p);
+/*
+ * XXX figure out how we can return this to being private to sge
+ */
+#define desc_reclaimable(q) (q->processed - q->cleaned - TX_MAX_DESC)
+
+#define container_of(p, stype, field) ((stype *)(((uint8_t *)(p)) - offsetof(stype, field)))
+
+static __inline struct sge_qset *
+fl_to_qset(struct sge_fl *q, int qidx)
+{
+ return container_of(q, struct sge_qset, fl[qidx]);
+}
+
+static __inline struct sge_qset *
+rspq_to_qset(struct sge_rspq *q)
+{
+ return container_of(q, struct sge_qset, rspq);
+}
+
+static __inline struct sge_qset *
+txq_to_qset(struct sge_txq *q, int qidx)
+{
+ return container_of(q, struct sge_qset, txq[qidx]);
+}
+
+#undef container_of
+
+#endif
diff --git a/sys/dev/cxgb/cxgb_ioctl.h b/sys/dev/cxgb/cxgb_ioctl.h
new file mode 100644
index 0000000..f74b9a2
--- /dev/null
+++ b/sys/dev/cxgb/cxgb_ioctl.h
@@ -0,0 +1,222 @@
+/**************************************************************************
+
+Copyright (c) 2007, Chelsio Inc.
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Chelsio Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+$FreeBSD$
+
+***************************************************************************/
+#ifndef __CHIOCTL_H__
+#define __CHIOCTL_H__
+
+/*
+ * Ioctl commands specific to this driver.
+ */
+enum {
+ CH_SETREG = 0x40,
+ CH_GETREG,
+ CH_SETTPI,
+ CH_GETTPI,
+ CH_DEVUP,
+ CH_GETMTUTAB,
+ CH_SETMTUTAB,
+ CH_GETMTU,
+ CH_SET_PM,
+ CH_GET_PM,
+ CH_GET_TCAM,
+ CH_SET_TCAM,
+ CH_GET_TCB,
+ CH_READ_TCAM_WORD,
+ CH_GET_MEM,
+ CH_GET_SGE_CONTEXT,
+ CH_GET_SGE_DESC,
+ CH_LOAD_FW,
+ CH_GET_PROTO,
+ CH_SET_PROTO,
+ CH_SET_TRACE_FILTER,
+ CH_SET_QSET_PARAMS,
+ CH_GET_QSET_PARAMS,
+ CH_SET_QSET_NUM,
+ CH_GET_QSET_NUM,
+ CH_SET_PKTSCHED,
+ CH_IFCONF_GETREGS,
+ CH_GETMIIREGS,
+ CH_SETMIIREGS,
+
+};
+
+struct ch_reg {
+ uint32_t addr;
+ uint32_t val;
+};
+
+struct ch_cntxt {
+ uint32_t cntxt_type;
+ uint32_t cntxt_id;
+ uint32_t data[4];
+};
+
+/* context types */
+enum { CNTXT_TYPE_EGRESS, CNTXT_TYPE_FL, CNTXT_TYPE_RSP, CNTXT_TYPE_CQ };
+
+struct ch_desc {
+ uint32_t cmd;
+ uint32_t queue_num;
+ uint32_t idx;
+ uint32_t size;
+ uint8_t data[128];
+};
+
+struct ch_mem_range {
+ uint32_t cmd;
+ uint32_t mem_id;
+ uint32_t addr;
+ uint32_t len;
+ uint32_t version;
+ uint8_t *buf;
+};
+
+struct ch_qset_params {
+ uint32_t qset_idx;
+ int32_t txq_size[3];
+ int32_t rspq_size;
+ int32_t fl_size[2];
+ int32_t intr_lat;
+ int32_t polling;
+ int32_t cong_thres;
+};
+
+struct ch_pktsched_params {
+ uint32_t cmd;
+ uint8_t sched;
+ uint8_t idx;
+ uint8_t min;
+ uint8_t max;
+ uint8_t binding;
+};
+
+#ifndef TCB_SIZE
+# define TCB_SIZE 128
+#endif
+
+/* TCB size in 32-bit words */
+#define TCB_WORDS (TCB_SIZE / 4)
+
+enum { MEM_CM, MEM_PMRX, MEM_PMTX }; /* ch_mem_range.mem_id values */
+
+struct ch_mtus {
+ uint32_t cmd;
+ uint32_t nmtus;
+ uint16_t mtus[NMTUS];
+};
+
+struct ch_pm {
+ uint32_t cmd;
+ uint32_t tx_pg_sz;
+ uint32_t tx_num_pg;
+ uint32_t rx_pg_sz;
+ uint32_t rx_num_pg;
+ uint32_t pm_total;
+};
+
+struct ch_tcam {
+ uint32_t cmd;
+ uint32_t tcam_size;
+ uint32_t nservers;
+ uint32_t nroutes;
+ uint32_t nfilters;
+};
+
+struct ch_tcb {
+ uint32_t cmd;
+ uint32_t tcb_index;
+ uint32_t tcb_data[TCB_WORDS];
+};
+
+struct ch_tcam_word {
+ uint32_t cmd;
+ uint32_t addr;
+ uint32_t buf[3];
+};
+
+struct ch_trace {
+ uint32_t cmd;
+ uint32_t sip;
+ uint32_t sip_mask;
+ uint32_t dip;
+ uint32_t dip_mask;
+ uint16_t sport;
+ uint16_t sport_mask;
+ uint16_t dport;
+ uint16_t dport_mask;
+ uint32_t vlan:12,
+ vlan_mask:12,
+ intf:4,
+ intf_mask:4;
+ uint8_t proto;
+ uint8_t proto_mask;
+ uint8_t invert_match:1,
+ config_tx:1,
+ config_rx:1,
+ trace_tx:1,
+ trace_rx:1;
+};
+
+#define REGDUMP_SIZE (4 * 1024)
+
+struct ifconf_regs {
+ uint32_t version;
+ uint32_t len; /* bytes */
+ uint8_t *data;
+};
+
+struct mii_data {
+ uint32_t phy_id;
+ uint32_t reg_num;
+ uint32_t val_in;
+ uint32_t val_out;
+};
+
+#define CHELSIO_SETREG _IOW('f', CH_SETREG, struct ch_reg)
+#define CHELSIO_GETREG _IOWR('f', CH_GETREG, struct ch_reg)
+#define CHELSIO_GET_MEM _IOWR('f', CH_GET_MEM, struct ch_mem_range)
+#define CHELSIO_GET_SGE_CONTEXT _IOWR('f', CH_GET_SGE_CONTEXT, struct ch_cntxt)
+#define CHELSIO_GET_SGE_DESC _IOWR('f', CH_GET_SGE_DESC, struct ch_desc)
+#define CHELSIO_GET_QSET_PARAMS _IOWR('f', CH_GET_QSET_PARAMS, struct ch_qset_params)
+#define CHELSIO_SET_QSET_PARAMS _IOW('f', CH_SET_QSET_PARAMS, struct ch_qset_params)
+#define CHELSIO_GET_QSET_NUM _IOWR('f', CH_GET_QSET_NUM, struct ch_reg)
+#define CHELSIO_SET_QSET_NUM _IOW('f', CH_SET_QSET_NUM, struct ch_reg)
+
+
+#define CHELSIO_SET_TRACE_FILTER _IOW('f', CH_SET_TRACE_FILTER, struct ch_trace)
+#define CHELSIO_SET_PKTSCHED _IOW('f', CH_SET_PKTSCHED, struct ch_pktsched_params)
+#define CHELSIO_IFCONF_GETREGS _IOWR('f', CH_IFCONF_GETREGS, struct ifconf_regs)
+#define SIOCGMIIREG _IOWR('f', CH_GETMIIREGS, struct mii_data)
+#define SIOCSMIIREG _IOWR('f', CH_SETMIIREGS, struct mii_data)
+#endif
diff --git a/sys/dev/cxgb/cxgb_lro.c b/sys/dev/cxgb/cxgb_lro.c
new file mode 100644
index 0000000..c888291
--- /dev/null
+++ b/sys/dev/cxgb/cxgb_lro.c
@@ -0,0 +1,427 @@
+/**************************************************************************
+
+Copyright (c) 2007, Chelsio Inc.
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Chelsio Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/kernel.h>
+#include <sys/module.h>
+#include <sys/bus.h>
+#include <sys/conf.h>
+#include <machine/bus.h>
+#include <machine/resource.h>
+#include <sys/bus_dma.h>
+#include <sys/rman.h>
+#include <sys/queue.h>
+#include <sys/taskqueue.h>
+
+#include <netinet/in_systm.h>
+#include <netinet/in.h>
+#include <netinet/ip.h>
+#include <netinet/tcp.h>
+
+#include <dev/cxgb/cxgb_osdep.h>
+#include <dev/cxgb/common/cxgb_common.h>
+#include <dev/cxgb/common/cxgb_t3_cpl.h>
+
+#include <machine/in_cksum.h>
+
+
+#ifndef M_LRO
+#define M_LRO 0x0200
+#endif
+
+#ifdef DEBUG
+#define MBUF_HEADER_CHECK(mh) do { \
+ struct mbuf *head = mh->mh_head; \
+ struct mbuf *tail = mh->mh_tail; \
+ if (head->m_len == 0 || head->m_pkthdr.len == 0 \
+ || (head->m_flags & M_PKTHDR) == 0) \
+ panic("lro_flush_session - mbuf len=%d pktlen=%d flags=0x%x\n", \
+ head->m_len, head->m_pkthdr.len, head->m_flags); \
+ if ((head->m_flags & M_PKTHDR) == 0) \
+ panic("first mbuf is not packet header - flags=0x%x\n", \
+ head->m_flags); \
+ if ((head == tail && head->m_len != head->m_pkthdr.len)) \
+ panic("len=%d pktlen=%d mismatch\n", \
+ head->m_len, head->m_pkthdr.len); \
+ if (head->m_len < ETHER_HDR_LEN || head->m_pkthdr.len < ETHER_HDR_LEN) \
+ panic("packet too small len=%d pktlen=%d\n", \
+ head->m_len, head->m_pkthdr.len);\
+} while (0)
+#else
+#define MBUF_HEADER_CHECK(m)
+#endif
+
+#define IPH_OFFSET (2 + sizeof (struct cpl_rx_pkt) + ETHER_HDR_LEN)
+#define LRO_SESSION_IDX_HINT_HASH(hash) (hash & (MAX_LRO_PER_QSET - 1))
+#define LRO_IDX_INC(idx) idx = (idx + 1) & (MAX_LRO_PER_QSET - 1)
+
+static __inline struct sge_lro_session *
+lro_session(struct sge_lro *l, int idx)
+{
+ return l->s + idx;
+}
+
+static __inline int
+lro_match_session(struct sge_lro_session *s,
+ struct ip *ih, struct tcphdr *th)
+{
+ struct ip *sih = (struct ip *)(s->mh.mh_head->m_data + IPH_OFFSET);
+ struct tcphdr *sth = (struct tcphdr *) (sih + 1);
+
+ /*
+ * Linux driver doesn't include destination port check --
+ * need to find out why XXX
+ */
+ return (*(uint32_t *)&th->th_sport == *(uint32_t *)&sth->th_sport &&
+ *(uint32_t *)&th->th_dport == *(uint32_t *)&sth->th_dport &&
+ ih->ip_src.s_addr == ih->ip_src.s_addr &&
+ ih->ip_dst.s_addr == sih->ip_dst.s_addr);
+}
+
+static __inline struct sge_lro_session *
+lro_find_session(struct sge_lro *l, int idx, struct ip *ih, struct tcphdr *th)
+{
+ struct sge_lro_session *s;
+ int active = 0;
+
+ while (active < l->num_active) {
+ s = lro_session(l, idx);
+ if (s->mh.mh_head) {
+ if (lro_match_session(s, ih, th)) {
+ l->last_s = s;
+ return s;
+ }
+ active++;
+ }
+ LRO_IDX_INC(idx);
+ }
+
+ return NULL;
+}
+
+static __inline int
+can_lro_packet(struct cpl_rx_pkt *cpl, unsigned int rss_hi)
+{
+ struct ether_header *eh = (struct ether_header *)(cpl + 1);
+ struct ip *ih = (struct ip *)(eh + 1);
+
+ /*
+ * XXX VLAN support?
+ */
+ if (__predict_false(G_HASHTYPE(ntohl(rss_hi)) != RSS_HASH_4_TUPLE ||
+ (*((uint8_t *)cpl + 1) & 0x90) != 0x10 ||
+ cpl->csum != 0xffff || eh->ether_type != ntohs(ETHERTYPE_IP) ||
+ ih->ip_hl != (sizeof (*ih) >> 2))) {
+ return 0;
+ }
+
+ return 1;
+}
+
+static int
+can_lro_tcpsegment(struct tcphdr *th)
+{
+ int olen = (th->th_off << 2) - sizeof (*th);
+ u8 control_bits = *((u8 *)th + 13);
+
+ if (__predict_false((control_bits & 0xB7) != 0x10))
+ goto no_lro;
+
+ if (olen) {
+ uint32_t *ptr = (u32 *)(th + 1);
+ if (__predict_false(olen != TCPOLEN_TSTAMP_APPA ||
+ *ptr != ntohl((TCPOPT_NOP << 24) |
+ (TCPOPT_NOP << 16) |
+ (TCPOPT_TIMESTAMP << 8) |
+ TCPOLEN_TIMESTAMP)))
+ goto no_lro;
+ }
+
+ return 1;
+
+ no_lro:
+ return 0;
+}
+
+static __inline void
+lro_new_session_init(struct sge_lro_session *s, struct t3_mbuf_hdr *mh)
+{
+ struct ip *ih = (struct ip *)(mh->mh_head->m_data + IPH_OFFSET);
+ struct tcphdr *th = (struct tcphdr *) (ih + 1);
+ int ip_len = ntohs(ih->ip_len);
+
+ DPRINTF("%s(s=%p, mh->mh_head=%p, mh->mh_tail=%p)\n", __FUNCTION__,
+ s, mh->mh_head, mh->mh_tail);
+
+ *&(s->mh) = *mh;
+
+ MBUF_HEADER_CHECK(mh);
+ s->ip_len = ip_len;
+ s->seq = ntohl(th->th_seq) + ip_len - sizeof(*ih) - (th->th_off << 2);
+
+}
+
+static void
+lro_flush_session(struct sge_qset *qs, struct sge_lro_session *s, struct t3_mbuf_hdr *mh)
+{
+ struct sge_lro *l = &qs->lro;
+ struct t3_mbuf_hdr *smh = &s->mh;
+ struct ip *ih = (struct ip *)(smh->mh_head->m_data + IPH_OFFSET);
+
+
+ DPRINTF("%s(qs=%p, s=%p, ", __FUNCTION__,
+ qs, s);
+
+ if (mh)
+ DPRINTF("mh->mh_head=%p, mh->mh_tail=%p)\n",
+ mh->mh_head, mh->mh_tail);
+ else
+ DPRINTF("mh=NULL)\n");
+
+ ih->ip_len = htons(s->ip_len);
+ ih->ip_sum = 0;
+ ih->ip_sum = in_cksum_hdr(ih);
+
+ MBUF_HEADER_CHECK(smh);
+
+ smh->mh_head->m_flags |= M_LRO;
+ t3_rx_eth(qs->port, &qs->rspq, smh->mh_head, 2);
+
+ if (mh) {
+ *smh = *mh;
+ lro_new_session_init(s, mh);
+ } else {
+ smh->mh_head = NULL;
+ smh->mh_tail = NULL;
+ l->num_active--;
+ }
+
+ qs->port_stats[SGE_PSTATS_LRO_FLUSHED]++;
+}
+
+static __inline struct sge_lro_session *
+lro_new_session(struct sge_qset *qs, struct t3_mbuf_hdr *mh, uint32_t rss_hash)
+{
+ struct sge_lro *l = &qs->lro;
+ int idx = LRO_SESSION_IDX_HINT_HASH(rss_hash);
+ struct sge_lro_session *s = lro_session(l, idx);
+
+ DPRINTF("%s(qs=%p, mh->mh_head=%p, mh->mh_tail=%p, rss_hash=0x%x)\n", __FUNCTION__,
+ qs, mh->mh_head, mh->mh_tail, rss_hash);
+
+ if (__predict_true(!s->mh.mh_head))
+ goto done;
+
+ if (l->num_active > MAX_LRO_PER_QSET)
+ panic("MAX_LRO_PER_QSET exceeded");
+
+ if (l->num_active == MAX_LRO_PER_QSET) {
+ lro_flush_session(qs, s, mh);
+ qs->port_stats[SGE_PSTATS_LRO_X_STREAMS]++;
+ return s;
+ }
+
+ while (1) {
+ LRO_IDX_INC(idx);
+ s = lro_session(l, idx);
+ if (!s->mh.mh_head)
+ break;
+ }
+done:
+ lro_new_session_init(s, mh);
+
+ l->num_active++;
+
+ return s;
+
+}
+
+static __inline int
+lro_update_session(struct sge_lro_session *s, struct t3_mbuf_hdr *mh)
+{
+ struct mbuf *m = mh->mh_head;
+ struct t3_mbuf_hdr *smh = &s->mh;
+ struct cpl_rx_pkt *cpl = (struct cpl_rx_pkt *)(smh->mh_head->m_data + 2);
+ struct cpl_rx_pkt *ncpl = (struct cpl_rx_pkt *)(m->m_data + 2);
+ struct ip *nih = (struct ip *)(m->m_data + IPH_OFFSET);
+ struct tcphdr *th, *nth = (struct tcphdr *)(nih + 1);
+ uint32_t seq = ntohl(nth->th_seq);
+ int plen, tcpiphlen, olen = (nth->th_off << 2) - sizeof (*nth);
+
+
+ DPRINTF("%s(s=%p, mh->mh_head=%p, mh->mh_tail=%p)\n", __FUNCTION__,
+ s, mh->mh_head, mh->mh_tail);
+ if (cpl->vlan_valid && cpl->vlan != ncpl->vlan) {
+ return -1;
+ }
+ if (__predict_false(seq != s->seq)) {
+ DPRINTF("sequence mismatch\n");
+ return -1;
+ }
+
+ MBUF_HEADER_CHECK(smh);
+ th = (struct tcphdr *)(smh->mh_head->m_data + IPH_OFFSET + sizeof (struct ip));
+
+ if (olen) {
+ uint32_t *ptr = (uint32_t *)(th + 1);
+ uint32_t *nptr = (uint32_t *)(nth + 1);
+
+ if (__predict_false(ntohl(*(ptr + 1)) > ntohl(*(nptr + 1)) ||
+ !*(nptr + 2))) {
+ return -1;
+ }
+ *(ptr + 1) = *(nptr + 1);
+ *(ptr + 2) = *(nptr + 2);
+ }
+ th->th_ack = nth->th_ack;
+ th->th_win = nth->th_win;
+
+ tcpiphlen = (nth->th_off << 2) + sizeof (*nih);
+ plen = ntohs(nih->ip_len) - tcpiphlen;
+ s->seq += plen;
+ s->ip_len += plen;
+ smh->mh_head->m_pkthdr.len += plen;
+
+#if 0
+ /* XXX this I *do not* understand */
+ if (plen > skb_shinfo(s->skb)->gso_size)
+ skb_shinfo(s->skb)->gso_size = plen;
+#endif
+#if __FreeBSD_version > 700000
+ if (plen > smh->mh_head->m_pkthdr.tso_segsz)
+ smh->mh_head->m_pkthdr.tso_segsz = plen;
+#endif
+ DPRINTF("m_adj(%d)\n", (int)(IPH_OFFSET + tcpiphlen));
+ m_adj(m, IPH_OFFSET + tcpiphlen);
+#if 0
+ if (__predict_false(!skb_shinfo(s->skb)->frag_list))
+ skb_shinfo(s->skb)->frag_list = skb;
+
+#endif
+ mh->mh_head->m_flags &= ~M_PKTHDR;
+ smh->mh_tail->m_next = mh->mh_head;
+ smh->mh_tail = mh->mh_tail;
+#if 0
+
+ /*
+ * XXX we really need to be able to
+ * support vectors of buffers in FreeBSD
+ */
+ int nr = skb_shinfo(s->skb)->nr_frags;
+ skb_shinfo(s->skb)->frags[nr].page = frag->page;
+ skb_shinfo(s->skb)->frags[nr].page_offset =
+ frag->page_offset + IPH_OFFSET + tcpiphlen;
+ skb_shinfo(s->skb)->frags[nr].size = plen;
+ skb_shinfo(s->skb)->nr_frags = ++nr;
+
+#endif
+ return (0);
+}
+
+void
+t3_rx_eth_lro(adapter_t *adap, struct sge_rspq *rq, struct t3_mbuf_hdr *mh,
+ int ethpad, uint32_t rss_hash, uint32_t rss_csum, int lro)
+{
+ struct mbuf *m = mh->mh_head;
+ struct sge_qset *qs = rspq_to_qset(rq);
+ struct cpl_rx_pkt *cpl = (struct cpl_rx_pkt *)(m->m_data + ethpad);
+ struct ether_header *eh = (struct ether_header *)(cpl + 1);
+ struct ip *ih;
+ struct tcphdr *th;
+ struct sge_lro_session *s = NULL;
+ struct port_info *pi = qs->port;
+
+ if (lro == 0)
+ goto no_lro;
+
+ if (!can_lro_packet(cpl, rss_csum))
+ goto no_lro;
+
+ if (&adap->port[cpl->iff] != pi)
+ panic("bad port index %d\n", cpl->iff);
+
+ ih = (struct ip *)(eh + 1);
+ th = (struct tcphdr *)(ih + 1);
+
+ s = lro_find_session(&qs->lro,
+ LRO_SESSION_IDX_HINT_HASH(rss_hash), ih, th);
+
+ if (__predict_false(!can_lro_tcpsegment(th))) {
+ goto no_lro;
+ } else if (__predict_false(!s)) {
+ s = lro_new_session(qs, mh, rss_hash);
+ } else {
+ if (lro_update_session(s, mh)) {
+ lro_flush_session(qs, s, mh);
+ }
+ if (__predict_false(s->mh.mh_head->m_pkthdr.len + pi->ifp->if_mtu > 65535)) {
+ lro_flush_session(qs, s, NULL);
+ }
+ }
+
+ qs->port_stats[SGE_PSTATS_LRO_QUEUED]++;
+ return;
+no_lro:
+ if (s)
+ lro_flush_session(qs, s, NULL);
+
+ if (m->m_len == 0 || m->m_pkthdr.len == 0 || (m->m_flags & M_PKTHDR) == 0)
+ DPRINTF("rx_eth_lro mbuf len=%d pktlen=%d flags=0x%x\n",
+ m->m_len, m->m_pkthdr.len, m->m_flags);
+ t3_rx_eth(pi, rq, m, ethpad);
+}
+
+void
+t3_sge_lro_flush_all(adapter_t *adap, struct sge_qset *qs)
+{
+ struct sge_lro *l = &qs->lro;
+ struct sge_lro_session *s = l->last_s;
+ int active = 0, idx = 0, num_active = l->num_active;
+
+ if (__predict_false(!s))
+ s = lro_session(l, idx);
+
+ while (active < num_active) {
+ if (s->mh.mh_head) {
+ lro_flush_session(qs, s, NULL);
+ active++;
+ }
+ LRO_IDX_INC(idx);
+ s = lro_session(l, idx);
+ }
+}
diff --git a/sys/dev/cxgb/cxgb_main.c b/sys/dev/cxgb/cxgb_main.c
new file mode 100644
index 0000000..e33015a
--- /dev/null
+++ b/sys/dev/cxgb/cxgb_main.c
@@ -0,0 +1,1792 @@
+/**************************************************************************
+
+Copyright (c) 2007, Chelsio Inc.
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Chelsio Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/kernel.h>
+#include <sys/bus.h>
+#include <sys/module.h>
+#include <sys/pciio.h>
+#include <sys/conf.h>
+#include <machine/bus.h>
+#include <machine/resource.h>
+#include <sys/bus_dma.h>
+#include <sys/rman.h>
+#include <sys/ioccom.h>
+#include <sys/mbuf.h>
+#include <sys/linker.h>
+#include <sys/firmware.h>
+#include <sys/socket.h>
+#include <sys/sockio.h>
+#include <sys/smp.h>
+#include <sys/sysctl.h>
+#include <sys/queue.h>
+#include <sys/taskqueue.h>
+
+
+
+#include <net/bpf.h>
+#include <net/ethernet.h>
+#include <net/if.h>
+#include <net/if_arp.h>
+#include <net/if_dl.h>
+#include <net/if_media.h>
+#include <net/if_types.h>
+
+#include <netinet/in_systm.h>
+#include <netinet/in.h>
+#include <netinet/if_ether.h>
+#include <netinet/ip.h>
+#include <netinet/ip.h>
+#include <netinet/tcp.h>
+#include <netinet/udp.h>
+
+#include <dev/pci/pcireg.h>
+#include <dev/pci/pcivar.h>
+#include <dev/pci/pci_private.h>
+
+#include <dev/cxgb/cxgb_osdep.h>
+#include <dev/cxgb/common/cxgb_common.h>
+#include <dev/cxgb/cxgb_ioctl.h>
+#include <dev/cxgb/common/cxgb_regs.h>
+#include <dev/cxgb/common/cxgb_t3_cpl.h>
+#include <dev/cxgb/common/cxgb_firmware_exports.h>
+
+
+#ifdef PRIV_SUPPORTED
+#include <sys/priv.h>
+#endif
+
+static int cxgb_setup_msix(adapter_t *, int);
+static void cxgb_init(void *);
+static void cxgb_init_locked(struct port_info *);
+static void cxgb_stop(struct port_info *);
+static void cxgb_set_rxmode(struct port_info *);
+static int cxgb_ioctl(struct ifnet *, unsigned long, caddr_t);
+static void cxgb_start(struct ifnet *);
+static void cxgb_start_proc(void *, int ncount);
+static int cxgb_media_change(struct ifnet *);
+static void cxgb_media_status(struct ifnet *, struct ifmediareq *);
+static int setup_sge_qsets(adapter_t *);
+static void cxgb_async_intr(void *);
+static void cxgb_ext_intr_handler(void *, int);
+static void cxgb_tick(void *);
+static void check_link_status(adapter_t *sc);
+static void setup_rss(adapter_t *sc);
+
+/* Attachment glue for the PCI controller end of the device. Each port of
+ * the device is attached separately, as defined later.
+ */
+static int cxgb_controller_probe(device_t);
+static int cxgb_controller_attach(device_t);
+static int cxgb_controller_detach(device_t);
+static void cxgb_free(struct adapter *);
+static __inline void reg_block_dump(struct adapter *ap, uint8_t *buf, unsigned int start,
+ unsigned int end);
+static void cxgb_get_regs(adapter_t *sc, struct ifconf_regs *regs, uint8_t *buf);
+static int cxgb_get_regs_len(void);
+
+static device_method_t cxgb_controller_methods[] = {
+ DEVMETHOD(device_probe, cxgb_controller_probe),
+ DEVMETHOD(device_attach, cxgb_controller_attach),
+ DEVMETHOD(device_detach, cxgb_controller_detach),
+
+ /* bus interface */
+ DEVMETHOD(bus_print_child, bus_generic_print_child),
+ DEVMETHOD(bus_driver_added, bus_generic_driver_added),
+
+ { 0, 0 }
+};
+
+static driver_t cxgb_controller_driver = {
+ "cxgbc",
+ cxgb_controller_methods,
+ sizeof(struct adapter)
+};
+
+static devclass_t cxgb_controller_devclass;
+DRIVER_MODULE(cxgbc, pci, cxgb_controller_driver, cxgb_controller_devclass, 0, 0);
+
+/*
+ * Attachment glue for the ports. Attachment is done directly to the
+ * controller device.
+ */
+static int cxgb_port_probe(device_t);
+static int cxgb_port_attach(device_t);
+static int cxgb_port_detach(device_t);
+
+static device_method_t cxgb_port_methods[] = {
+ DEVMETHOD(device_probe, cxgb_port_probe),
+ DEVMETHOD(device_attach, cxgb_port_attach),
+ DEVMETHOD(device_detach, cxgb_port_detach),
+ { 0, 0 }
+};
+
+static driver_t cxgb_port_driver = {
+ "cxgb",
+ cxgb_port_methods,
+ 0
+};
+
+static d_ioctl_t cxgb_extension_ioctl;
+
+static devclass_t cxgb_port_devclass;
+DRIVER_MODULE(cxgb, cxgbc, cxgb_port_driver, cxgb_port_devclass, 0, 0);
+
+#define SGE_MSIX_COUNT (SGE_QSETS + 1)
+
+/*
+ * The driver uses the best interrupt scheme available on a platform in the
+ * order MSI-X, MSI, legacy pin interrupts. This parameter determines which
+ * of these schemes the driver may consider as follows:
+ *
+ * msi = 2: choose from among all three options
+ * msi = 1 : only consider MSI and pin interrupts
+ * msi = 0: force pin interrupts
+ */
+static int msi_allowed = 0;
+TUNABLE_INT("hw.cxgb.msi_allowed", &msi_allowed);
+
+SYSCTL_NODE(_hw, OID_AUTO, cxgb, CTLFLAG_RD, 0, "CXGB driver parameters");
+SYSCTL_UINT(_hw_cxgb, OID_AUTO, msi_allowed, CTLFLAG_RDTUN, &msi_allowed, 0,
+ "MSI-X, MSI, INTx selector");
+
+enum {
+ MAX_TXQ_ENTRIES = 16384,
+ MAX_CTRL_TXQ_ENTRIES = 1024,
+ MAX_RSPQ_ENTRIES = 16384,
+ MAX_RX_BUFFERS = 16384,
+ MAX_RX_JUMBO_BUFFERS = 16384,
+ MIN_TXQ_ENTRIES = 4,
+ MIN_CTRL_TXQ_ENTRIES = 4,
+ MIN_RSPQ_ENTRIES = 32,
+ MIN_FL_ENTRIES = 32
+};
+
+#define PORT_MASK ((1 << MAX_NPORTS) - 1)
+
+/* Table for probing the cards. The desc field isn't actually used */
+struct cxgb_ident {
+ uint16_t vendor;
+ uint16_t device;
+ int index;
+ char *desc;
+} cxgb_identifiers[] = {
+ {PCI_VENDOR_ID_CHELSIO, 0x0020, 0, "PE9000"},
+ {PCI_VENDOR_ID_CHELSIO, 0x0021, 1, "T302E"},
+ {PCI_VENDOR_ID_CHELSIO, 0x0022, 2, "T310E"},
+ {PCI_VENDOR_ID_CHELSIO, 0x0023, 3, "T320X"},
+ {PCI_VENDOR_ID_CHELSIO, 0x0024, 1, "T302X"},
+ {PCI_VENDOR_ID_CHELSIO, 0x0025, 3, "T320E"},
+ {PCI_VENDOR_ID_CHELSIO, 0x0026, 2, "T310X"},
+ {PCI_VENDOR_ID_CHELSIO, 0x0030, 2, "T3B10"},
+ {PCI_VENDOR_ID_CHELSIO, 0x0031, 3, "T3B20"},
+ {PCI_VENDOR_ID_CHELSIO, 0x0032, 1, "T3B02"},
+ {0, 0, 0, NULL}
+};
+
+static struct cxgb_ident *
+cxgb_get_ident(device_t dev)
+{
+ struct cxgb_ident *id;
+
+ for (id = cxgb_identifiers; id->desc != NULL; id++) {
+ if ((id->vendor == pci_get_vendor(dev)) &&
+ (id->device == pci_get_device(dev))) {
+ return (id);
+ }
+ }
+ return (NULL);
+}
+
+static const struct adapter_info *
+cxgb_get_adapter_info(device_t dev)
+{
+ struct cxgb_ident *id;
+ const struct adapter_info *ai;
+
+ id = cxgb_get_ident(dev);
+ if (id == NULL)
+ return (NULL);
+
+ ai = t3_get_adapter_info(id->index);
+
+ return (ai);
+}
+
+static int
+cxgb_controller_probe(device_t dev)
+{
+ const struct adapter_info *ai;
+ char *ports, buf[80];
+
+ ai = cxgb_get_adapter_info(dev);
+ if (ai == NULL)
+ return (ENXIO);
+
+ if (ai->nports == 1)
+ ports = "port";
+ else
+ ports = "ports";
+
+ snprintf(buf, sizeof(buf), "%s RNIC, %d %s", ai->desc, ai->nports, ports);
+ device_set_desc_copy(dev, buf);
+ return (BUS_PROBE_DEFAULT);
+}
+
+static int
+cxgb_fw_download(adapter_t *sc, device_t dev)
+{
+ char buf[32];
+#ifdef FIRMWARE_LATEST
+ const struct firmware *fw;
+#else
+ struct firmware *fw;
+#endif
+ int status;
+
+ snprintf(&buf[0], sizeof(buf), "t3fw%d%d", CHELSIO_FW_MAJOR, CHELSIO_FW_MINOR);
+
+ fw = firmware_get(buf);
+
+
+ if (fw == NULL) {
+ device_printf(dev, "Could not find firmware image %s\n", buf);
+ return ENOENT;
+ }
+
+ status = t3_load_fw(sc, (const uint8_t *)fw->data, fw->datasize);
+
+ firmware_put(fw, FIRMWARE_UNLOAD);
+
+ return (status);
+}
+
+
+static int
+cxgb_controller_attach(device_t dev)
+{
+ driver_intr_t *cxgb_intr = NULL;
+ device_t child;
+ const struct adapter_info *ai;
+ struct adapter *sc;
+ int i, msi_count = 0, error = 0;
+ uint32_t vers;
+
+ sc = device_get_softc(dev);
+ sc->dev = dev;
+
+ pci_enable_busmaster(dev);
+
+ /*
+ * Allocate the registers and make them available to the driver.
+ * The registers that we care about for NIC mode are in BAR 0
+ */
+ sc->regs_rid = PCIR_BAR(0);
+ if ((sc->regs_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
+ &sc->regs_rid, RF_ACTIVE)) == NULL) {
+ device_printf(dev, "Cannot allocate BAR\n");
+ return (ENXIO);
+ }
+
+ mtx_init(&sc->sge.reg_lock, "SGE reg lock", NULL, MTX_DEF);
+ mtx_init(&sc->lock, "cxgb controller lock", NULL, MTX_DEF);
+ mtx_init(&sc->mdio_lock, "cxgb mdio", NULL, MTX_DEF);
+
+ sc->bt = rman_get_bustag(sc->regs_res);
+ sc->bh = rman_get_bushandle(sc->regs_res);
+ sc->mmio_len = rman_get_size(sc->regs_res);
+
+ /* Allocate the BAR for doing MSI-X. If it succeeds, try to allocate
+ * enough messages for the queue sets. If that fails, try falling
+ * back to MSI. If that fails, then try falling back to the legacy
+ * interrupt pin model.
+ */
+#ifdef MSI_SUPPORTED
+ sc->msix_regs_rid = 0x20;
+ if ((msi_allowed >= 2) &&
+ (sc->msix_regs_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
+ &sc->msix_regs_rid, RF_ACTIVE)) != NULL) {
+
+ msi_count = SGE_MSIX_COUNT;
+ if ((pci_alloc_msix(dev, &msi_count) != 0) ||
+ (msi_count != SGE_MSIX_COUNT)) {
+ msi_count = 0;
+ pci_release_msi(dev);
+ bus_release_resource(dev, SYS_RES_MEMORY,
+ sc->msix_regs_rid, sc->msix_regs_res);
+ sc->msix_regs_res = NULL;
+ } else {
+ sc->flags |= USING_MSIX;
+ cxgb_intr = t3_intr_msix;
+ }
+
+ printf("allocated %d msix intrs\n", msi_count);
+ }
+
+ if ((msi_allowed >= 1) && (msi_count == 0)) {
+ msi_count = 1;
+ if (pci_alloc_msi(dev, &msi_count)) {
+ device_printf(dev, "alloc msi failed\n");
+ msi_count = 0;
+ pci_release_msi(dev);
+ } else {
+ sc->flags |= USING_MSI;
+ sc->irq_rid = 1;
+ cxgb_intr = t3_intr_msi;
+ }
+ }
+#endif
+ if (msi_count == 0) {
+ sc->irq_rid = 0;
+ cxgb_intr = t3b_intr;
+ }
+
+
+ /* Create a private taskqueue thread for handling driver events */
+#ifdef TASKQUEUE_CURRENT
+ sc->tq = taskqueue_create("cxgb_taskq", M_NOWAIT,
+ taskqueue_thread_enqueue, &sc->tq);
+#else
+ sc->tq = taskqueue_create_fast("cxgb_taskq", M_NOWAIT,
+ taskqueue_thread_enqueue, &sc->tq);
+#endif
+ if (sc->tq == NULL) {
+ device_printf(dev, "failed to allocate controller task queue\n");
+ goto out;
+ }
+
+ taskqueue_start_threads(&sc->tq, 1, PI_NET, "%s taskq",
+ device_get_nameunit(dev));
+ TASK_INIT(&sc->ext_intr_task, 0, cxgb_ext_intr_handler, sc);
+
+
+ /* Create a periodic callout for checking adapter status */
+ callout_init_mtx(&sc->cxgb_tick_ch, &sc->lock, 0);
+
+ ai = cxgb_get_adapter_info(dev);
+ if (t3_prep_adapter(sc, ai, 1) < 0) {
+ error = ENODEV;
+ goto out;
+ }
+ if (t3_check_fw_version(sc) != 0) {
+ /*
+ * Warn user that a firmware update will be attempted in init.
+ */
+ device_printf(dev, "firmware needs to be updated to version %d.%d\n",
+ CHELSIO_FW_MAJOR, CHELSIO_FW_MINOR);
+ sc->flags &= ~FW_UPTODATE;
+ } else {
+ sc->flags |= FW_UPTODATE;
+ }
+
+ if (t3_init_hw(sc, 0) != 0) {
+ device_printf(dev, "hw initialization failed\n");
+ error = ENXIO;
+ goto out;
+ }
+ t3_write_reg(sc, A_ULPRX_TDDP_PSZ, V_HPZ0(PAGE_SHIFT - 12));
+
+ /*
+ * Create a child device for each MAC. The ethernet attachment
+ * will be done in these children.
+ */
+ for (i = 0; i < (sc)->params.nports; ++i) {
+ if ((child = device_add_child(dev, "cxgb", -1)) == NULL) {
+ device_printf(dev, "failed to add child port\n");
+ error = EINVAL;
+ goto out;
+ }
+ sc->portdev[i] = child;
+ sc->port[i].adapter = sc;
+#ifdef MULTIQ
+ sc->port[i].nqsets = mp_ncpus;
+#else
+ sc->port[i].nqsets = 1;
+#endif
+ sc->port[i].first_qset = i;
+ sc->port[i].port = i;
+ device_set_softc(child, &sc->port[i]);
+ }
+ if ((error = bus_generic_attach(dev)) != 0)
+ goto out;;
+
+ if ((error = setup_sge_qsets(sc)) != 0)
+ goto out;
+
+ setup_rss(sc);
+
+ /* If it's MSI or INTx, allocate a single interrupt for everything */
+ if ((sc->flags & USING_MSIX) == 0) {
+ if ((sc->irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
+ &sc->irq_rid, RF_SHAREABLE | RF_ACTIVE)) == NULL) {
+ device_printf(dev, "Cannot allocate interrupt rid=%d\n", sc->irq_rid);
+ error = EINVAL;
+ goto out;
+ }
+ device_printf(dev, "allocated irq_res=%p\n", sc->irq_res);
+
+ if (bus_setup_intr(dev, sc->irq_res, INTR_MPSAFE|INTR_TYPE_NET,
+#ifdef INTR_FILTERS
+ NULL,
+#endif
+ cxgb_intr, sc, &sc->intr_tag)) {
+ device_printf(dev, "Cannot set up interrupt\n");
+ error = EINVAL;
+ goto out;
+ }
+ } else {
+ cxgb_setup_msix(sc, msi_count);
+ }
+
+ sc->params.stats_update_period = 1;
+
+ /* initialize sge private state */
+ t3_sge_init_sw(sc);
+
+ t3_led_ready(sc);
+
+ error = t3_get_fw_version(sc, &vers);
+ if (error)
+ goto out;
+
+ snprintf(&sc->fw_version[0], sizeof(sc->fw_version), "%d.%d", G_FW_VERSION_MAJOR(vers),
+ G_FW_VERSION_MINOR(vers));
+
+ t3_add_sysctls(sc);
+
+out:
+ if (error)
+ cxgb_free(sc);
+
+ return (error);
+}
+
+static int
+cxgb_controller_detach(device_t dev)
+{
+ struct adapter *sc;
+
+ sc = device_get_softc(dev);
+
+ cxgb_free(sc);
+
+ return (0);
+}
+
+static void
+cxgb_free(struct adapter *sc)
+{
+ int i;
+
+ for (i = 0; i < (sc)->params.nports; ++i) {
+ if (sc->portdev[i] != NULL)
+ device_delete_child(sc->dev, sc->portdev[i]);
+ }
+
+ t3_sge_deinit_sw(sc);
+
+ if (sc->tq != NULL) {
+ taskqueue_drain(sc->tq, &sc->ext_intr_task);
+ taskqueue_free(sc->tq);
+ }
+
+ callout_drain(&sc->cxgb_tick_ch);
+
+ bus_generic_detach(sc->dev);
+
+ t3_free_sge_resources(sc);
+ t3_sge_free(sc);
+
+ for (i = 0; i < SGE_QSETS; i++) {
+ if (sc->msix_intr_tag[i] != NULL) {
+ bus_teardown_intr(sc->dev, sc->msix_irq_res[i],
+ sc->msix_intr_tag[i]);
+ }
+ if (sc->msix_irq_res[i] != NULL) {
+ bus_release_resource(sc->dev, SYS_RES_IRQ,
+ sc->msix_irq_rid[i], sc->msix_irq_res[i]);
+ }
+ }
+
+ if (sc->intr_tag != NULL) {
+ bus_teardown_intr(sc->dev, sc->irq_res, sc->intr_tag);
+ }
+
+ if (sc->irq_res != NULL) {
+ device_printf(sc->dev, "de-allocating interrupt irq_rid=%d irq_res=%p\n",
+ sc->irq_rid, sc->irq_res);
+ bus_release_resource(sc->dev, SYS_RES_IRQ, sc->irq_rid,
+ sc->irq_res);
+ }
+#ifdef MSI_SUPPORTED
+ if (sc->flags & (USING_MSI | USING_MSIX)) {
+ device_printf(sc->dev, "releasing msi message(s)\n");
+ pci_release_msi(sc->dev);
+ }
+#endif
+ if (sc->msix_regs_res != NULL) {
+ bus_release_resource(sc->dev, SYS_RES_MEMORY, sc->msix_regs_rid,
+ sc->msix_regs_res);
+ }
+
+ if (sc->regs_res != NULL)
+ bus_release_resource(sc->dev, SYS_RES_MEMORY, sc->regs_rid,
+ sc->regs_res);
+
+ mtx_destroy(&sc->mdio_lock);
+ mtx_destroy(&sc->sge.reg_lock);
+ mtx_destroy(&sc->lock);
+
+ return;
+}
+
+/**
+ * setup_sge_qsets - configure SGE Tx/Rx/response queues
+ * @sc: the controller softc
+ *
+ * Determines how many sets of SGE queues to use and initializes them.
+ * We support multiple queue sets per port if we have MSI-X, otherwise
+ * just one queue set per port.
+ */
+static int
+setup_sge_qsets(adapter_t *sc)
+{
+ int i, j, err, irq_idx, qset_idx;
+ u_int ntxq = 3;
+
+ if ((err = t3_sge_alloc(sc)) != 0) {
+ printf("t3_sge_alloc returned %d\n", err);
+ return (err);
+ }
+
+ if (sc->params.rev > 0 && !(sc->flags & USING_MSI))
+ irq_idx = -1;
+ else
+ irq_idx = 0;
+
+ for (qset_idx = 0, i = 0; i < (sc)->params.nports; ++i) {
+ struct port_info *pi = &sc->port[i];
+
+ for (j = 0; j < pi->nqsets; ++j, ++qset_idx) {
+ err = t3_sge_alloc_qset(sc, qset_idx, 1,
+ (sc->flags & USING_MSIX) ? qset_idx + 1 : irq_idx,
+ &sc->params.sge.qset[qset_idx], ntxq, pi);
+ if (err) {
+ t3_free_sge_resources(sc);
+ printf("t3_sge_alloc_qset failed with %d\n", err);
+ return (err);
+ }
+ }
+ }
+
+ return (0);
+}
+
+static int
+cxgb_setup_msix(adapter_t *sc, int msix_count)
+{
+ int i, j, k, nqsets, rid;
+
+ /* The first message indicates link changes and error conditions */
+ sc->irq_rid = 1;
+ if ((sc->irq_res = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ,
+ &sc->irq_rid, RF_SHAREABLE | RF_ACTIVE)) == NULL) {
+ device_printf(sc->dev, "Cannot allocate msix interrupt\n");
+ return (EINVAL);
+ }
+ if (bus_setup_intr(sc->dev, sc->irq_res, INTR_MPSAFE|INTR_TYPE_NET,
+#ifdef INTR_FILTERS
+ NULL,
+#endif
+ cxgb_async_intr, sc, &sc->intr_tag)) {
+ device_printf(sc->dev, "Cannot set up interrupt\n");
+ return (EINVAL);
+ }
+
+ for (i = 0, k = 0; i < (sc)->params.nports; ++i) {
+ nqsets = sc->port[i].nqsets;
+ for (j = 0; j < nqsets; ++j, k++) {
+ struct sge_qset *qs = &sc->sge.qs[k];
+
+ rid = k + 2;
+ if (cxgb_debug)
+ printf("rid=%d ", rid);
+ if ((sc->msix_irq_res[k] = bus_alloc_resource_any(
+ sc->dev, SYS_RES_IRQ, &rid,
+ RF_SHAREABLE | RF_ACTIVE)) == NULL) {
+ device_printf(sc->dev, "Cannot allocate "
+ "interrupt for message %d\n", rid);
+ return (EINVAL);
+ }
+ sc->msix_irq_rid[k] = rid;
+ if (bus_setup_intr(sc->dev, sc->msix_irq_res[j],
+ INTR_MPSAFE|INTR_TYPE_NET,
+#ifdef INTR_FILTERS
+ NULL,
+#endif
+ t3_intr_msix, qs, &sc->msix_intr_tag[k])) {
+ device_printf(sc->dev, "Cannot set up "
+ "interrupt for message %d\n", rid);
+ return (EINVAL);
+ }
+ }
+ }
+ return (0);
+}
+
+static int
+cxgb_port_probe(device_t dev)
+{
+ struct port_info *p;
+ char buf[80];
+
+ p = device_get_softc(dev);
+
+ snprintf(buf, sizeof(buf), "Port %d %s", p->port, p->port_type->desc);
+ device_set_desc_copy(dev, buf);
+ return (0);
+}
+
+
+static int
+cxgb_makedev(struct port_info *pi)
+{
+ struct cdevsw *cxgb_cdevsw;
+
+ if ((cxgb_cdevsw = malloc(sizeof(struct cdevsw), M_DEVBUF, M_NOWAIT|M_ZERO)) == NULL)
+ return (ENOMEM);
+
+ cxgb_cdevsw->d_version = D_VERSION;
+ cxgb_cdevsw->d_name = strdup(pi->ifp->if_xname, M_DEVBUF);
+ cxgb_cdevsw->d_ioctl = cxgb_extension_ioctl;
+
+ pi->port_cdev = make_dev(cxgb_cdevsw, 0, UID_ROOT, GID_WHEEL, 0600,
+ pi->ifp->if_xname);
+
+ if (pi->port_cdev == NULL)
+ return (ENOMEM);
+
+ pi->port_cdev->si_drv1 = (void *)pi;
+
+ return (0);
+}
+
+
+#ifdef TSO_SUPPORTED
+#define CXGB_CAP (IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU | IFCAP_HWCSUM | IFCAP_VLAN_HWCSUM | IFCAP_TSO | IFCAP_JUMBO_MTU)
+/* Don't enable TSO6 yet */
+#define CXGB_CAP_ENABLE (IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU | IFCAP_HWCSUM | IFCAP_VLAN_HWCSUM | IFCAP_TSO4 | IFCAP_JUMBO_MTU)
+#else
+#define CXGB_CAP (IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU | IFCAP_HWCSUM | IFCAP_JUMBO_MTU)
+/* Don't enable TSO6 yet */
+#define CXGB_CAP_ENABLE (IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU | IFCAP_HWCSUM | IFCAP_JUMBO_MTU)
+#define IFCAP_TSO4 0x0
+#define CSUM_TSO 0x0
+#endif
+
+
+static int
+cxgb_port_attach(device_t dev)
+{
+ struct port_info *p;
+ struct ifnet *ifp;
+ int media_flags;
+ int err;
+ char buf[64];
+
+ p = device_get_softc(dev);
+
+ snprintf(buf, sizeof(buf), "cxgb port %d", p->port);
+ mtx_init(&p->lock, buf, 0, MTX_DEF);
+
+ /* Allocate an ifnet object and set it up */
+ ifp = p->ifp = if_alloc(IFT_ETHER);
+ if (ifp == NULL) {
+ device_printf(dev, "Cannot allocate ifnet\n");
+ return (ENOMEM);
+ }
+
+ /*
+ * Note that there is currently no watchdog timer.
+ */
+ if_initname(ifp, device_get_name(dev), device_get_unit(dev));
+ ifp->if_init = cxgb_init;
+ ifp->if_softc = p;
+ ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
+ ifp->if_ioctl = cxgb_ioctl;
+ ifp->if_start = cxgb_start;
+ ifp->if_timer = 0; /* Disable ifnet watchdog */
+ ifp->if_watchdog = NULL;
+
+ ifp->if_snd.ifq_drv_maxlen = TX_ETH_Q_SIZE;
+ IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
+ IFQ_SET_READY(&ifp->if_snd);
+
+ ifp->if_hwassist = ifp->if_capabilities = ifp->if_capenable = 0;
+ ifp->if_capabilities |= CXGB_CAP;
+ ifp->if_capenable |= CXGB_CAP_ENABLE;
+ ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP | CSUM_IP | CSUM_TSO);
+ ifp->if_baudrate = 100000000;
+
+ ether_ifattach(ifp, p->hw_addr);
+#ifdef DEFAULT_JUMBO
+ ifp->if_mtu = 9000;
+#endif
+ if ((err = cxgb_makedev(p)) != 0) {
+ printf("makedev failed %d\n", err);
+ return (err);
+ }
+ ifmedia_init(&p->media, IFM_IMASK, cxgb_media_change,
+ cxgb_media_status);
+
+ if (!strcmp(p->port_type->desc, "10GBASE-CX4"))
+ media_flags = IFM_ETHER | IFM_10G_CX4;
+ else if (!strcmp(p->port_type->desc, "10GBASE-SR"))
+ media_flags = IFM_ETHER | IFM_10G_SR;
+ else if (!strcmp(p->port_type->desc, "10GBASE-XR"))
+ media_flags = IFM_ETHER | IFM_10G_LR;
+ else {
+ printf("unsupported media type %s\n", p->port_type->desc);
+ return (ENXIO);
+ }
+
+ ifmedia_add(&p->media, media_flags, 0, NULL);
+ ifmedia_add(&p->media, IFM_ETHER | IFM_AUTO, 0, NULL);
+ ifmedia_set(&p->media, media_flags);
+
+ snprintf(buf, sizeof(buf), "cxgb_port_taskq%d", p->port);
+#ifdef TASKQUEUE_CURRENT
+ /* Create a port for handling TX without starvation */
+ p->tq = taskqueue_create(buf, M_NOWAIT,
+ taskqueue_thread_enqueue, &p->tq);
+#else
+ /* Create a port for handling TX without starvation */
+ p->tq = taskqueue_create_fast(buf, M_NOWAIT,
+ taskqueue_thread_enqueue, &p->tq);
+#endif
+
+
+ if (p->tq == NULL) {
+ device_printf(dev, "failed to allocate port task queue\n");
+ return (ENOMEM);
+ }
+ taskqueue_start_threads(&p->tq, 1, PI_NET, "%s taskq",
+ device_get_nameunit(dev));
+ TASK_INIT(&p->start_task, 0, cxgb_start_proc, ifp);
+
+
+ return (0);
+}
+
+static int
+cxgb_port_detach(device_t dev)
+{
+ struct port_info *p;
+
+ p = device_get_softc(dev);
+ mtx_destroy(&p->lock);
+ if (p->tq != NULL) {
+ taskqueue_drain(p->tq, &p->start_task);
+ taskqueue_free(p->tq);
+ p->tq = NULL;
+ }
+
+ ether_ifdetach(p->ifp);
+ if_free(p->ifp);
+
+ destroy_dev(p->port_cdev);
+
+
+ return (0);
+}
+
+void
+t3_fatal_err(struct adapter *sc)
+{
+ u_int fw_status[4];
+
+ device_printf(sc->dev,"encountered fatal error, operation suspended\n");
+ if (!t3_cim_ctl_blk_read(sc, 0xa0, 4, fw_status))
+ device_printf(sc->dev, "FW_ status: 0x%x, 0x%x, 0x%x, 0x%x\n",
+ fw_status[0], fw_status[1], fw_status[2], fw_status[3]);
+}
+
+int
+t3_os_find_pci_capability(adapter_t *sc, int cap)
+{
+ device_t dev;
+ struct pci_devinfo *dinfo;
+ pcicfgregs *cfg;
+ uint32_t status;
+ uint8_t ptr;
+
+ dev = sc->dev;
+ dinfo = device_get_ivars(dev);
+ cfg = &dinfo->cfg;
+
+ status = pci_read_config(dev, PCIR_STATUS, 2);
+ if (!(status & PCIM_STATUS_CAPPRESENT))
+ return (0);
+
+ switch (cfg->hdrtype & PCIM_HDRTYPE) {
+ case 0:
+ case 1:
+ ptr = PCIR_CAP_PTR;
+ break;
+ case 2:
+ ptr = PCIR_CAP_PTR_2;
+ break;
+ default:
+ return (0);
+ break;
+ }
+ ptr = pci_read_config(dev, ptr, 1);
+
+ while (ptr != 0) {
+ if (pci_read_config(dev, ptr + PCICAP_ID, 1) == cap)
+ return (ptr);
+ ptr = pci_read_config(dev, ptr + PCICAP_NEXTPTR, 1);
+ }
+
+ return (0);
+}
+
+int
+t3_os_pci_save_state(struct adapter *sc)
+{
+ device_t dev;
+ struct pci_devinfo *dinfo;
+
+ dev = sc->dev;
+ dinfo = device_get_ivars(dev);
+
+ pci_cfg_save(dev, dinfo, 0);
+ return (0);
+}
+
+int
+t3_os_pci_restore_state(struct adapter *sc)
+{
+ device_t dev;
+ struct pci_devinfo *dinfo;
+
+ dev = sc->dev;
+ dinfo = device_get_ivars(dev);
+
+ pci_cfg_restore(dev, dinfo);
+ return (0);
+}
+
+/**
+ * t3_os_link_changed - handle link status changes
+ * @adapter: the adapter associated with the link change
+ * @port_id: the port index whose limk status has changed
+ * @link_stat: the new status of the link
+ * @speed: the new speed setting
+ * @duplex: the new duplex setting
+ * @fc: the new flow-control setting
+ *
+ * This is the OS-dependent handler for link status changes. The OS
+ * neutral handler takes care of most of the processing for these events,
+ * then calls this handler for any OS-specific processing.
+ */
+void
+t3_os_link_changed(adapter_t *adapter, int port_id, int link_status, int speed,
+ int duplex, int fc)
+{
+ struct port_info *pi = &adapter->port[port_id];
+
+ if ((pi->ifp->if_flags & IFF_UP) == 0)
+ return;
+
+ if (link_status)
+ if_link_state_change(pi->ifp, LINK_STATE_UP);
+ else
+ if_link_state_change(pi->ifp, LINK_STATE_DOWN);
+
+}
+
+
+/*
+ * Interrupt-context handler for external (PHY) interrupts.
+ */
+void
+t3_os_ext_intr_handler(adapter_t *sc)
+{
+ if (cxgb_debug)
+ printf("t3_os_ext_intr_handler\n");
+ /*
+ * Schedule a task to handle external interrupts as they may be slow
+ * and we use a mutex to protect MDIO registers. We disable PHY
+ * interrupts in the meantime and let the task reenable them when
+ * it's done.
+ */
+ if (sc->slow_intr_mask) {
+ sc->slow_intr_mask &= ~F_T3DBG;
+ t3_write_reg(sc, A_PL_INT_ENABLE0, sc->slow_intr_mask);
+ taskqueue_enqueue(sc->tq, &sc->ext_intr_task);
+ }
+}
+
+void
+t3_os_set_hw_addr(adapter_t *adapter, int port_idx, u8 hw_addr[])
+{
+
+ /*
+ * The ifnet might not be allocated before this gets called,
+ * as this is called early on in attach by t3_prep_adapter
+ * save the address off in the port structure
+ */
+ if (cxgb_debug)
+ printf("set_hw_addr on idx %d addr %6D\n", port_idx, hw_addr, ":");
+ bcopy(hw_addr, adapter->port[port_idx].hw_addr, ETHER_ADDR_LEN);
+}
+
+/**
+ * link_start - enable a port
+ * @p: the port to enable
+ *
+ * Performs the MAC and PHY actions needed to enable a port.
+ */
+static void
+cxgb_link_start(struct port_info *p)
+{
+ struct ifnet *ifp;
+ struct t3_rx_mode rm;
+ struct cmac *mac = &p->mac;
+
+ ifp = p->ifp;
+
+ t3_init_rx_mode(&rm, p);
+ t3_mac_reset(mac);
+ t3_mac_set_mtu(mac, ifp->if_mtu);
+ t3_mac_set_address(mac, 0, p->hw_addr);
+ t3_mac_set_rx_mode(mac, &rm);
+ t3_link_start(&p->phy, mac, &p->link_config);
+ t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
+}
+
+/**
+ * setup_rss - configure Receive Side Steering (per-queue connection demux)
+ * @adap: the adapter
+ *
+ * Sets up RSS to distribute packets to multiple receive queues. We
+ * configure the RSS CPU lookup table to distribute to the number of HW
+ * receive queues, and the response queue lookup table to narrow that
+ * down to the response queues actually configured for each port.
+ * We always configure the RSS mapping for two ports since the mapping
+ * table has plenty of entries.
+ */
+static void
+setup_rss(adapter_t *adap)
+{
+ int i;
+ u_int nq0 = adap->port[0].nqsets;
+ u_int nq1 = max((u_int)adap->port[1].nqsets, 1U);
+ uint8_t cpus[SGE_QSETS + 1];
+ uint16_t rspq_map[RSS_TABLE_SIZE];
+
+ for (i = 0; i < SGE_QSETS; ++i)
+ cpus[i] = i;
+ cpus[SGE_QSETS] = 0xff;
+
+ for (i = 0; i < RSS_TABLE_SIZE / 2; ++i) {
+ rspq_map[i] = i % nq0;
+ rspq_map[i + RSS_TABLE_SIZE / 2] = (i % nq1) + nq0;
+ }
+
+ t3_config_rss(adap, F_RQFEEDBACKENABLE | F_TNLLKPEN | F_TNLMAPEN |
+ F_TNLPRTEN | F_TNL2TUPEN | F_TNL4TUPEN |
+ V_RRCPLCPUSIZE(6), cpus, rspq_map);
+}
+
+static void
+send_pktsched_cmd(struct adapter *adap, int sched, int qidx, int lo,
+ int hi, int port)
+{
+ struct mbuf *m;
+ struct mngt_pktsched_wr *req;
+
+ m = m_gethdr(M_NOWAIT, MT_DATA);
+ req = (struct mngt_pktsched_wr *)m->m_data;
+ req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_MNGT));
+ req->mngt_opcode = FW_MNGTOPCODE_PKTSCHED_SET;
+ req->sched = sched;
+ req->idx = qidx;
+ req->min = lo;
+ req->max = hi;
+ req->binding = port;
+ m->m_len = m->m_pkthdr.len = sizeof(*req);
+ t3_mgmt_tx(adap, m);
+}
+
+static void
+bind_qsets(adapter_t *sc)
+{
+ int i, j;
+
+ for (i = 0; i < (sc)->params.nports; ++i) {
+ const struct port_info *pi = adap2pinfo(sc, i);
+
+ for (j = 0; j < pi->nqsets; ++j)
+ send_pktsched_cmd(sc, 1, pi->first_qset + j, -1,
+ -1, i);
+ }
+}
+
+static void
+cxgb_init(void *arg)
+{
+ struct port_info *p = arg;
+
+ PORT_LOCK(p);
+ cxgb_init_locked(p);
+ PORT_UNLOCK(p);
+}
+
+static void
+cxgb_init_locked(struct port_info *p)
+{
+ struct ifnet *ifp;
+ adapter_t *sc = p->adapter;
+ int error;
+
+ mtx_assert(&p->lock, MA_OWNED);
+
+ ifp = p->ifp;
+ if ((sc->flags & FW_UPTODATE) == 0) {
+ device_printf(sc->dev, "updating firmware to version %d.%d\n",
+ CHELSIO_FW_MAJOR, CHELSIO_FW_MINOR);
+ if ((error = cxgb_fw_download(sc, sc->dev)) != 0) {
+ device_printf(sc->dev, "firmware download failed err: %d"
+ "interface will be unavailable\n", error);
+ return;
+ }
+ sc->flags |= FW_UPTODATE;
+ }
+
+ cxgb_link_start(p);
+ ADAPTER_LOCK(p->adapter);
+ if (p->adapter->open_device_map == 0)
+ t3_intr_clear(sc);
+ t3_sge_start(sc);
+
+ p->adapter->open_device_map |= (1 << p->port);
+ ADAPTER_UNLOCK(p->adapter);
+ t3_intr_enable(sc);
+ t3_port_intr_enable(sc, p->port);
+ if ((p->adapter->flags & (USING_MSIX | QUEUES_BOUND)) == USING_MSIX)
+ bind_qsets(sc);
+ p->adapter->flags |= QUEUES_BOUND;
+ callout_reset(&sc->cxgb_tick_ch, sc->params.stats_update_period * hz,
+ cxgb_tick, sc);
+
+
+ ifp->if_drv_flags |= IFF_DRV_RUNNING;
+ ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
+}
+
+static void
+cxgb_set_rxmode(struct port_info *p)
+{
+ struct t3_rx_mode rm;
+ struct cmac *mac = &p->mac;
+
+ t3_init_rx_mode(&rm, p);
+ t3_mac_set_rx_mode(mac, &rm);
+}
+
+static void
+cxgb_stop(struct port_info *p)
+{
+ struct ifnet *ifp;
+
+ callout_drain(&p->adapter->cxgb_tick_ch);
+ ifp = p->ifp;
+
+ PORT_LOCK(p);
+ ADAPTER_LOCK(p->adapter);
+ ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
+ p->adapter->open_device_map &= ~(1 << p->port);
+ if (p->adapter->open_device_map == 0)
+ t3_intr_disable(p->adapter);
+ ADAPTER_UNLOCK(p->adapter);
+ t3_port_intr_disable(p->adapter, p->port);
+ t3_mac_disable(&p->mac, MAC_DIRECTION_TX | MAC_DIRECTION_RX);
+ PORT_UNLOCK(p);
+
+}
+
+static int
+cxgb_ioctl(struct ifnet *ifp, unsigned long command, caddr_t data)
+{
+ struct port_info *p = ifp->if_softc;
+ struct ifaddr *ifa = (struct ifaddr *)data;
+ struct ifreq *ifr = (struct ifreq *)data;
+ int flags, error = 0;
+ uint32_t mask;
+
+ switch (command) {
+ case SIOCSIFMTU:
+ if ((ifr->ifr_mtu < ETHERMIN) ||
+ (ifr->ifr_mtu > ETHER_MAX_LEN_JUMBO))
+ error = EINVAL;
+ else if (ifp->if_mtu != ifr->ifr_mtu) {
+ PORT_LOCK(p);
+ ifp->if_mtu = ifr->ifr_mtu;
+ t3_mac_set_mtu(&p->mac, ifp->if_mtu);
+ PORT_UNLOCK(p);
+ }
+ break;
+ case SIOCSIFADDR:
+ case SIOCGIFADDR:
+ if (ifa->ifa_addr->sa_family == AF_INET) {
+ ifp->if_flags |= IFF_UP;
+ if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
+ cxgb_init(p);
+ }
+ arp_ifinit(ifp, ifa);
+ } else
+ error = ether_ioctl(ifp, command, data);
+ break;
+ case SIOCSIFFLAGS:
+ PORT_LOCK(p);
+ if (ifp->if_flags & IFF_UP) {
+ if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
+ flags = p->if_flags;
+ if (((ifp->if_flags ^ flags) & IFF_PROMISC) ||
+ ((ifp->if_flags ^ flags) & IFF_ALLMULTI))
+ cxgb_set_rxmode(p);
+
+ } else
+ cxgb_init_locked(p);
+ } else {
+ if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
+ cxgb_stop(p);
+ }
+ }
+ p->if_flags = ifp->if_flags;
+ PORT_UNLOCK(p);
+ break;
+ case SIOCSIFMEDIA:
+ case SIOCGIFMEDIA:
+ error = ifmedia_ioctl(ifp, ifr, &p->media, command);
+ break;
+ case SIOCSIFCAP:
+ PORT_LOCK(p);
+ mask = ifr->ifr_reqcap ^ ifp->if_capenable;
+ if (mask & IFCAP_TXCSUM) {
+ if (IFCAP_TXCSUM & ifp->if_capenable) {
+ ifp->if_capenable &= ~(IFCAP_TXCSUM|IFCAP_TSO4);
+ ifp->if_hwassist &= ~(CSUM_TCP | CSUM_UDP
+ | CSUM_TSO);
+ } else {
+ ifp->if_capenable |= IFCAP_TXCSUM;
+ ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
+ }
+ } else if (mask & IFCAP_RXCSUM) {
+ if (IFCAP_RXCSUM & ifp->if_capenable) {
+ ifp->if_capenable &= ~IFCAP_RXCSUM;
+ } else {
+ ifp->if_capenable |= IFCAP_RXCSUM;
+ }
+ }
+ if (mask & IFCAP_TSO4) {
+ if (IFCAP_TSO4 & ifp->if_capenable) {
+ ifp->if_capenable &= ~IFCAP_TSO4;
+ ifp->if_hwassist &= ~CSUM_TSO;
+ } else if (IFCAP_TXCSUM & ifp->if_capenable) {
+ ifp->if_capenable |= IFCAP_TSO4;
+ ifp->if_hwassist |= CSUM_TSO;
+ } else {
+ if (cxgb_debug)
+ printf("cxgb requires tx checksum offload"
+ " be enabled to use TSO\n");
+ error = EINVAL;
+ }
+ }
+ PORT_UNLOCK(p);
+ break;
+ default:
+ error = ether_ioctl(ifp, command, data);
+ break;
+ }
+
+ return (error);
+}
+
+static int
+cxgb_start_tx(struct ifnet *ifp, uint32_t txmax)
+{
+ struct sge_qset *qs;
+ struct sge_txq *txq;
+ struct port_info *p = ifp->if_softc;
+ struct mbuf *m = NULL;
+ int err, in_use_init;
+
+
+ if (!p->link_config.link_ok)
+ return (ENXIO);
+
+ if (IFQ_DRV_IS_EMPTY(&ifp->if_snd))
+ return (ENOBUFS);
+
+ qs = &p->adapter->sge.qs[p->first_qset];
+ txq = &qs->txq[TXQ_ETH];
+ err = 0;
+
+ mtx_lock(&txq->lock);
+ in_use_init = txq->in_use;
+ while ((txq->in_use - in_use_init < txmax) &&
+ (txq->size > txq->in_use + TX_MAX_DESC)) {
+ IFQ_DRV_DEQUEUE(&ifp->if_snd, m);
+ if (m == NULL)
+ break;
+ if ((err = t3_encap(p, &m)) != 0)
+ break;
+ BPF_MTAP(ifp, m);
+ }
+ mtx_unlock(&txq->lock);
+
+ if (__predict_false(err)) {
+ if (cxgb_debug)
+ printf("would set OFLAGS\n");
+ if (err == ENOMEM) {
+ IFQ_LOCK(&ifp->if_snd);
+ IFQ_DRV_PREPEND(&ifp->if_snd, m);
+ IFQ_UNLOCK(&ifp->if_snd);
+ }
+ }
+ if (err == 0 && m == NULL)
+ err = ENOBUFS;
+
+ return (err);
+}
+
+static void
+cxgb_start_proc(void *arg, int ncount)
+{
+ struct ifnet *ifp = arg;
+ struct port_info *pi = ifp->if_softc;
+ struct sge_qset *qs;
+ struct sge_txq *txq;
+ int error = 0;
+
+ qs = &pi->adapter->sge.qs[pi->first_qset];
+ txq = &qs->txq[TXQ_ETH];
+
+ while (error == 0) {
+ if (desc_reclaimable(txq) > TX_START_MAX_DESC)
+ taskqueue_enqueue(pi->adapter->tq, &pi->adapter->timer_reclaim_task);
+
+ error = cxgb_start_tx(ifp, TX_MAX_DESC + 1);
+ }
+}
+
+static void
+cxgb_start(struct ifnet *ifp)
+{
+ struct port_info *pi = ifp->if_softc;
+ struct sge_qset *qs;
+ struct sge_txq *txq;
+ int err;
+
+ qs = &pi->adapter->sge.qs[pi->first_qset];
+ txq = &qs->txq[TXQ_ETH];
+
+ if (desc_reclaimable(txq) > TX_START_MAX_DESC)
+ taskqueue_enqueue(pi->adapter->tq, &pi->adapter->timer_reclaim_task);
+
+ err = cxgb_start_tx(ifp, TX_START_MAX_DESC);
+
+ if (err == 0)
+ taskqueue_enqueue(pi->tq, &pi->start_task);
+}
+
+
+static int
+cxgb_media_change(struct ifnet *ifp)
+{
+ if_printf(ifp, "media change not supported\n");
+ return (ENXIO);
+}
+
+static void
+cxgb_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
+{
+ struct port_info *p = ifp->if_softc;
+
+ ifmr->ifm_status = IFM_AVALID;
+ ifmr->ifm_active = IFM_ETHER;
+
+ if (!p->link_config.link_ok)
+ return;
+
+ ifmr->ifm_status |= IFM_ACTIVE;
+
+ if (p->link_config.duplex)
+ ifmr->ifm_active |= IFM_FDX;
+ else
+ ifmr->ifm_active |= IFM_HDX;
+}
+
+static void
+cxgb_async_intr(void *data)
+{
+ if (cxgb_debug)
+ printf("cxgb_async_intr\n");
+}
+
+static void
+cxgb_ext_intr_handler(void *arg, int count)
+{
+ adapter_t *sc = (adapter_t *)arg;
+
+ if (cxgb_debug)
+ printf("cxgb_ext_intr_handler\n");
+
+ t3_phy_intr_handler(sc);
+
+ /* Now reenable external interrupts */
+ if (sc->slow_intr_mask) {
+ sc->slow_intr_mask |= F_T3DBG;
+ t3_write_reg(sc, A_PL_INT_CAUSE0, F_T3DBG);
+ t3_write_reg(sc, A_PL_INT_ENABLE0, sc->slow_intr_mask);
+ }
+}
+
+static void
+cxgb_tick(void *arg)
+{
+ adapter_t *sc = (adapter_t *)arg;
+ const struct adapter_params *p = &sc->params;
+
+ if (p->linkpoll_period)
+ check_link_status(sc);
+
+ callout_reset(&sc->cxgb_tick_ch, sc->params.stats_update_period * hz,
+ cxgb_tick, sc);
+}
+
+static void
+check_link_status(adapter_t *sc)
+{
+ int i;
+
+ for (i = 0; i < (sc)->params.nports; ++i) {
+ struct port_info *p = &sc->port[i];
+
+ if (!(p->port_type->caps & SUPPORTED_IRQ))
+ t3_link_changed(sc, i);
+ }
+}
+
+static int
+in_range(int val, int lo, int hi)
+{
+ return val < 0 || (val <= hi && val >= lo);
+}
+
+static int
+cxgb_extension_ioctl(struct cdev *dev, unsigned long cmd, caddr_t data,
+ int fflag, struct thread *td)
+{
+ int mmd, error = 0;
+ struct port_info *pi = dev->si_drv1;
+ adapter_t *sc = pi->adapter;
+
+#ifdef PRIV_SUPPORTED
+ if (priv_check(td, PRIV_DRIVER)) {
+ if (cxgb_debug)
+ printf("user does not have access to privileged ioctls\n");
+ return (EPERM);
+ }
+#else
+ if (suser(td)) {
+ if (cxgb_debug)
+ printf("user does not have access to privileged ioctls\n");
+ return (EPERM);
+ }
+#endif
+
+ switch (cmd) {
+ case SIOCGMIIREG: {
+ uint32_t val;
+ struct cphy *phy = &pi->phy;
+ struct mii_data *mid = (struct mii_data *)data;
+
+ if (!phy->mdio_read)
+ return (EOPNOTSUPP);
+ if (is_10G(sc)) {
+ mmd = mid->phy_id >> 8;
+ if (!mmd)
+ mmd = MDIO_DEV_PCS;
+ else if (mmd > MDIO_DEV_XGXS)
+ return -EINVAL;
+
+ error = phy->mdio_read(sc, mid->phy_id & 0x1f, mmd,
+ mid->reg_num, &val);
+ } else
+ error = phy->mdio_read(sc, mid->phy_id & 0x1f, 0,
+ mid->reg_num & 0x1f, &val);
+ if (error == 0)
+ mid->val_out = val;
+ break;
+ }
+ case SIOCSMIIREG: {
+ struct cphy *phy = &pi->phy;
+ struct mii_data *mid = (struct mii_data *)data;
+
+ if (!phy->mdio_write)
+ return (EOPNOTSUPP);
+ if (is_10G(sc)) {
+ mmd = mid->phy_id >> 8;
+ if (!mmd)
+ mmd = MDIO_DEV_PCS;
+ else if (mmd > MDIO_DEV_XGXS)
+ return (EINVAL);
+
+ error = phy->mdio_write(sc, mid->phy_id & 0x1f,
+ mmd, mid->reg_num, mid->val_in);
+ } else
+ error = phy->mdio_write(sc, mid->phy_id & 0x1f, 0,
+ mid->reg_num & 0x1f,
+ mid->val_in);
+ break;
+ }
+ case CHELSIO_SETREG: {
+ struct ch_reg *edata = (struct ch_reg *)data;
+ if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len)
+ return (EFAULT);
+ t3_write_reg(sc, edata->addr, edata->val);
+ break;
+ }
+ case CHELSIO_GETREG: {
+ struct ch_reg *edata = (struct ch_reg *)data;
+ if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len)
+ return (EFAULT);
+ edata->val = t3_read_reg(sc, edata->addr);
+ break;
+ }
+ case CHELSIO_GET_SGE_CONTEXT: {
+ struct ch_cntxt *ecntxt = (struct ch_cntxt *)data;
+ mtx_lock(&sc->sge.reg_lock);
+ switch (ecntxt->cntxt_type) {
+ case CNTXT_TYPE_EGRESS:
+ error = t3_sge_read_ecntxt(sc, ecntxt->cntxt_id,
+ ecntxt->data);
+ break;
+ case CNTXT_TYPE_FL:
+ error = t3_sge_read_fl(sc, ecntxt->cntxt_id,
+ ecntxt->data);
+ break;
+ case CNTXT_TYPE_RSP:
+ error = t3_sge_read_rspq(sc, ecntxt->cntxt_id,
+ ecntxt->data);
+ break;
+ case CNTXT_TYPE_CQ:
+ error = t3_sge_read_cq(sc, ecntxt->cntxt_id,
+ ecntxt->data);
+ break;
+ default:
+ error = EINVAL;
+ break;
+ }
+ mtx_unlock(&sc->sge.reg_lock);
+ break;
+ }
+ case CHELSIO_GET_SGE_DESC: {
+ struct ch_desc *edesc = (struct ch_desc *)data;
+ int ret;
+ if (edesc->queue_num >= SGE_QSETS * 6)
+ return (EINVAL);
+ ret = t3_get_desc(&sc->sge.qs[edesc->queue_num / 6],
+ edesc->queue_num % 6, edesc->idx, edesc->data);
+ if (ret < 0)
+ return (EINVAL);
+ edesc->size = ret;
+ break;
+ }
+ case CHELSIO_SET_QSET_PARAMS: {
+ struct qset_params *q;
+ struct ch_qset_params *t = (struct ch_qset_params *)data;
+
+ if (t->qset_idx >= SGE_QSETS)
+ return -EINVAL;
+ if (!in_range(t->intr_lat, 0, M_NEWTIMER) ||
+ !in_range(t->cong_thres, 0, 255) ||
+ !in_range(t->txq_size[0], MIN_TXQ_ENTRIES,
+ MAX_TXQ_ENTRIES) ||
+ !in_range(t->txq_size[1], MIN_TXQ_ENTRIES,
+ MAX_TXQ_ENTRIES) ||
+ !in_range(t->txq_size[2], MIN_CTRL_TXQ_ENTRIES,
+ MAX_CTRL_TXQ_ENTRIES) ||
+ !in_range(t->fl_size[0], MIN_FL_ENTRIES, MAX_RX_BUFFERS) ||
+ !in_range(t->fl_size[1], MIN_FL_ENTRIES,
+ MAX_RX_JUMBO_BUFFERS) ||
+ !in_range(t->rspq_size, MIN_RSPQ_ENTRIES, MAX_RSPQ_ENTRIES))
+ return -EINVAL;
+ if ((sc->flags & FULL_INIT_DONE) &&
+ (t->rspq_size >= 0 || t->fl_size[0] >= 0 ||
+ t->fl_size[1] >= 0 || t->txq_size[0] >= 0 ||
+ t->txq_size[1] >= 0 || t->txq_size[2] >= 0 ||
+ t->polling >= 0 || t->cong_thres >= 0))
+ return -EBUSY;
+
+ q = &sc->params.sge.qset[t->qset_idx];
+
+ if (t->rspq_size >= 0)
+ q->rspq_size = t->rspq_size;
+ if (t->fl_size[0] >= 0)
+ q->fl_size = t->fl_size[0];
+ if (t->fl_size[1] >= 0)
+ q->jumbo_size = t->fl_size[1];
+ if (t->txq_size[0] >= 0)
+ q->txq_size[0] = t->txq_size[0];
+ if (t->txq_size[1] >= 0)
+ q->txq_size[1] = t->txq_size[1];
+ if (t->txq_size[2] >= 0)
+ q->txq_size[2] = t->txq_size[2];
+ if (t->cong_thres >= 0)
+ q->cong_thres = t->cong_thres;
+ if (t->intr_lat >= 0) {
+ struct sge_qset *qs = &sc->sge.qs[t->qset_idx];
+
+ q->coalesce_nsecs = t->intr_lat*1000;
+ t3_update_qset_coalesce(qs, q);
+ }
+ break;
+ }
+ case CHELSIO_GET_QSET_PARAMS: {
+ struct qset_params *q;
+ struct ch_qset_params *t = (struct ch_qset_params *)data;
+
+ if (t->qset_idx >= SGE_QSETS)
+ return (EINVAL);
+
+ q = &(sc)->params.sge.qset[t->qset_idx];
+ t->rspq_size = q->rspq_size;
+ t->txq_size[0] = q->txq_size[0];
+ t->txq_size[1] = q->txq_size[1];
+ t->txq_size[2] = q->txq_size[2];
+ t->fl_size[0] = q->fl_size;
+ t->fl_size[1] = q->jumbo_size;
+ t->polling = q->polling;
+ t->intr_lat = q->coalesce_nsecs / 1000;
+ t->cong_thres = q->cong_thres;
+ break;
+ }
+ case CHELSIO_SET_QSET_NUM: {
+ struct ch_reg *edata = (struct ch_reg *)data;
+ unsigned int port_idx = pi->port;
+
+ if (sc->flags & FULL_INIT_DONE)
+ return (EBUSY);
+ if (edata->val < 1 ||
+ (edata->val > 1 && !(sc->flags & USING_MSIX)))
+ return (EINVAL);
+ if (edata->val + sc->port[!port_idx].nqsets > SGE_QSETS)
+ return (EINVAL);
+ sc->port[port_idx].nqsets = edata->val;
+ /*
+ * XXX we're hardcoding ourselves to 2 ports
+ * just like the LEENUX
+ */
+ sc->port[1].first_qset = sc->port[0].nqsets;
+ break;
+ }
+ case CHELSIO_GET_QSET_NUM: {
+ struct ch_reg *edata = (struct ch_reg *)data;
+ edata->val = pi->nqsets;
+ break;
+ }
+#ifdef notyet
+ /*
+ * XXX FreeBSD driver does not currently support any
+ * offload functionality
+ */
+ case CHELSIO_LOAD_FW:
+ case CHELSIO_DEVUP:
+ case CHELSIO_SETMTUTAB:
+ case CHELSIO_GET_PM:
+ case CHELSIO_SET_PM:
+ case CHELSIO_READ_TCAM_WORD:
+ return (EOPNOTSUPP);
+ break;
+#endif
+ case CHELSIO_GET_MEM: {
+ struct ch_mem_range *t = (struct ch_mem_range *)data;
+ struct mc7 *mem;
+ uint8_t *useraddr;
+ u64 buf[32];
+
+ if (!is_offload(sc))
+ return (EOPNOTSUPP);
+ if (!(sc->flags & FULL_INIT_DONE))
+ return (EIO); /* need the memory controllers */
+ if ((t->addr & 0x7) || (t->len & 0x7))
+ return (EINVAL);
+ if (t->mem_id == MEM_CM)
+ mem = &sc->cm;
+ else if (t->mem_id == MEM_PMRX)
+ mem = &sc->pmrx;
+ else if (t->mem_id == MEM_PMTX)
+ mem = &sc->pmtx;
+ else
+ return (EINVAL);
+
+ /*
+ * Version scheme:
+ * bits 0..9: chip version
+ * bits 10..15: chip revision
+ */
+ t->version = 3 | (sc->params.rev << 10);
+
+ /*
+ * Read 256 bytes at a time as len can be large and we don't
+ * want to use huge intermediate buffers.
+ */
+ useraddr = (uint8_t *)(t + 1); /* advance to start of buffer */
+ while (t->len) {
+ unsigned int chunk = min(t->len, sizeof(buf));
+
+ error = t3_mc7_bd_read(mem, t->addr / 8, chunk / 8, buf);
+ if (error)
+ return (-error);
+ if (copyout(buf, useraddr, chunk))
+ return (EFAULT);
+ useraddr += chunk;
+ t->addr += chunk;
+ t->len -= chunk;
+ }
+ break;
+ }
+ case CHELSIO_SET_TRACE_FILTER: {
+ struct ch_trace *t = (struct ch_trace *)data;
+ const struct trace_params *tp;
+
+ tp = (const struct trace_params *)&t->sip;
+ if (t->config_tx)
+ t3_config_trace_filter(sc, tp, 0, t->invert_match,
+ t->trace_tx);
+ if (t->config_rx)
+ t3_config_trace_filter(sc, tp, 1, t->invert_match,
+ t->trace_rx);
+ break;
+ }
+ case CHELSIO_SET_PKTSCHED: {
+ struct ch_pktsched_params *p = (struct ch_pktsched_params *)data;
+ if (sc->open_device_map == 0)
+ return (EAGAIN);
+ send_pktsched_cmd(sc, p->sched, p->idx, p->min, p->max,
+ p->binding);
+ break;
+ }
+ case CHELSIO_IFCONF_GETREGS: {
+ struct ifconf_regs *regs = (struct ifconf_regs *)data;
+ int reglen = cxgb_get_regs_len();
+ uint8_t *buf = malloc(REGDUMP_SIZE, M_DEVBUF, M_NOWAIT);
+ if (buf == NULL) {
+ return (ENOMEM);
+ } if (regs->len > reglen)
+ regs->len = reglen;
+ else if (regs->len < reglen) {
+ error = E2BIG;
+ goto done;
+ }
+ cxgb_get_regs(sc, regs, buf);
+ error = copyout(buf, regs->data, reglen);
+
+ done:
+ free(buf, M_DEVBUF);
+
+ break;
+ }
+ default:
+ return (EOPNOTSUPP);
+ break;
+ }
+
+ return (error);
+}
+
+static __inline void
+reg_block_dump(struct adapter *ap, uint8_t *buf, unsigned int start,
+ unsigned int end)
+{
+ uint32_t *p = (uint32_t *)buf + start;
+
+ for ( ; start <= end; start += sizeof(uint32_t))
+ *p++ = t3_read_reg(ap, start);
+}
+
+#define T3_REGMAP_SIZE (3 * 1024)
+static int
+cxgb_get_regs_len(void)
+{
+ return T3_REGMAP_SIZE;
+}
+#undef T3_REGMAP_SIZE
+
+static void
+cxgb_get_regs(adapter_t *sc, struct ifconf_regs *regs, uint8_t *buf)
+{
+
+ /*
+ * Version scheme:
+ * bits 0..9: chip version
+ * bits 10..15: chip revision
+ * bit 31: set for PCIe cards
+ */
+ regs->version = 3 | (sc->params.rev << 10) | (is_pcie(sc) << 31);
+
+ /*
+ * We skip the MAC statistics registers because they are clear-on-read.
+ * Also reading multi-register stats would need to synchronize with the
+ * periodic mac stats accumulation. Hard to justify the complexity.
+ */
+ memset(buf, 0, REGDUMP_SIZE);
+ reg_block_dump(sc, buf, 0, A_SG_RSPQ_CREDIT_RETURN);
+ reg_block_dump(sc, buf, A_SG_HI_DRB_HI_THRSH, A_ULPRX_PBL_ULIMIT);
+ reg_block_dump(sc, buf, A_ULPTX_CONFIG, A_MPS_INT_CAUSE);
+ reg_block_dump(sc, buf, A_CPL_SWITCH_CNTRL, A_CPL_MAP_TBL_DATA);
+ reg_block_dump(sc, buf, A_SMB_GLOBAL_TIME_CFG, A_XGM_SERDES_STAT3);
+ reg_block_dump(sc, buf, A_XGM_SERDES_STATUS0,
+ XGM_REG(A_XGM_SERDES_STAT3, 1));
+ reg_block_dump(sc, buf, XGM_REG(A_XGM_SERDES_STATUS0, 1),
+ XGM_REG(A_XGM_RX_SPI4_SOP_EOP_CNT, 1));
+}
diff --git a/sys/dev/cxgb/cxgb_osdep.h b/sys/dev/cxgb/cxgb_osdep.h
new file mode 100644
index 0000000..bf4bffa
--- /dev/null
+++ b/sys/dev/cxgb/cxgb_osdep.h
@@ -0,0 +1,246 @@
+/**************************************************************************
+
+Copyright (c) 2007, Chelsio Inc.
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Chelsio Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+
+$FreeBSD$
+
+***************************************************************************/
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/ctype.h>
+#include <sys/endian.h>
+#include <sys/bus.h>
+
+#include <dev/mii/mii.h>
+
+#include <dev/cxgb/common/cxgb_version.h>
+
+#ifndef _CXGB_OSDEP_H_
+#define _CXGB_OSDEP_H_
+
+typedef struct adapter adapter_t;
+
+struct sge_rspq;
+
+struct t3_mbuf_hdr {
+ struct mbuf *mh_head;
+ struct mbuf *mh_tail;
+};
+
+#if __FreeBSD_version > 700030
+#define INTR_FILTERS
+#define FIRMWARE_LATEST
+#endif
+
+#if ((__FreeBSD_version > 602103) && (__FreeBSD_version < 700000))
+#define FIRMWARE_LATEST
+#endif
+
+#if __FreeBSD_version > 700000
+#define MSI_SUPPORTED
+#define TSO_SUPPORTED
+#define VLAN_SUPPORTED
+#define TASKQUEUE_CURRENT
+#endif
+
+/*
+ * Workaround for weird Chelsio issue
+ */
+#if __FreeBSD_version > 700029
+#define PRIV_SUPPORTED
+#endif
+
+#define CXGB_TX_CLEANUP_THRESHOLD 32
+
+
+#ifdef DEBUG_PRINT
+#define DPRINTF printf
+#else
+#define DPRINTF(...)
+#endif
+
+#define TX_MAX_SIZE (1 << 16) /* 64KB */
+#define TX_MAX_SEGS 36 /* maximum supported by card */
+#define TX_MAX_DESC 4 /* max descriptors per packet */
+#define TX_START_MAX_DESC (TX_MAX_DESC << 1) /* maximum number of descriptors
+ * call to start used per */
+#define TX_CLEAN_MAX_DESC (TX_MAX_DESC << 2) /* maximum tx descriptors
+ * to clean per iteration */
+
+
+#if defined(__i386__) || defined(__amd64__)
+#define mb() __asm volatile("mfence":::"memory")
+#define rmb() __asm volatile("lfence":::"memory")
+#define wmb() __asm volatile("sfence" ::: "memory")
+#define smp_mb() mb()
+#endif
+#define DBG_RX (1 << 0)
+static const int debug_flags = DBG_RX;
+
+#ifdef DEBUG_PRINT
+#define DBG(flag, msg) do { \
+ if ((flag & debug_flags)) \
+ printf msg; \
+} while (0)
+#else
+#define DBG(...)
+#endif
+
+#define promisc_rx_mode(rm) ((rm)->port->ifp->if_flags & IFF_PROMISC)
+#define allmulti_rx_mode(rm) ((rm)->port->ifp->if_flags & IFF_ALLMULTI)
+
+#define CH_ERR(adap, fmt, ...)device_printf(adap->dev, fmt, ##__VA_ARGS__);
+
+#define CH_WARN(adap, fmt, ...) device_printf(adap->dev, fmt, ##__VA_ARGS__)
+#define CH_ALERT(adap, fmt, ...) device_printf(adap->dev, fmt, ##__VA_ARGS__)
+
+#define t3_os_sleep(x) DELAY((x) * 1000)
+
+/* Standard PHY definitions */
+#define BMCR_LOOPBACK BMCR_LOOP
+#define BMCR_ISOLATE BMCR_ISO
+#define BMCR_ANENABLE BMCR_AUTOEN
+#define BMCR_SPEED1000 BMCR_SPEED1
+#define BMCR_SPEED100 BMCR_SPEED0
+#define BMCR_ANRESTART BMCR_STARTNEG
+#define BMCR_FULLDPLX BMCR_FDX
+#define BMSR_LSTATUS BMSR_LINK
+#define BMSR_ANEGCOMPLETE BMSR_ACOMP
+
+#define MII_LPA MII_ANLPAR
+#define MII_ADVERTISE MII_ANAR
+#define MII_CTRL1000 MII_100T2CR
+
+#define ADVERTISE_PAUSE_CAP ANAR_FC
+#define ADVERTISE_PAUSE_ASYM 0x0800
+#define ADVERTISE_1000HALF ANAR_X_HD
+#define ADVERTISE_1000FULL ANAR_X_FD
+#define ADVERTISE_10FULL ANAR_10_FD
+#define ADVERTISE_10HALF ANAR_10
+#define ADVERTISE_100FULL ANAR_TX_FD
+#define ADVERTISE_100HALF ANAR_TX
+
+/* Standard PCI Extended Capaibilities definitions */
+#define PCI_CAP_ID_VPD 0x03
+#define PCI_VPD_ADDR 2
+#define PCI_VPD_ADDR_F 0x8000
+#define PCI_VPD_DATA 4
+
+#define PCI_CAP_ID_EXP 0x10
+#define PCI_EXP_DEVCTL 8
+#define PCI_EXP_DEVCTL_PAYLOAD 0x00e0
+#define PCI_EXP_LNKCTL 16
+#define PCI_EXP_LNKSTA 18
+
+/*
+ * Linux compatibility macros
+ */
+
+/* Some simple translations */
+#define __devinit
+#define udelay(x) DELAY(x)
+#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
+#define le32_to_cpu(x) le32toh(x)
+#define cpu_to_le32(x) htole32(x)
+#define swab32(x) bswap32(x)
+#define simple_strtoul strtoul
+
+/* More types and endian definitions */
+typedef uint8_t u8;
+typedef uint16_t u16;
+typedef uint32_t u32;
+typedef uint64_t u64;
+
+typedef uint8_t __u8;
+typedef uint16_t __u16;
+typedef uint32_t __u32;
+typedef uint8_t __be8;
+typedef uint16_t __be16;
+typedef uint32_t __be32;
+typedef uint64_t __be64;
+
+#if BYTE_ORDER == BIG_ENDIAN
+#define __BIG_ENDIAN_BITFIELD
+#elif BYTE_ORDER == LITTLE_ENDIAN
+#define __LITTLE_ENDIAN_BITFIELD
+#else
+#error "Must set BYTE_ORDER"
+#endif
+
+/* Indicates what features are supported by the interface. */
+#define SUPPORTED_10baseT_Half (1 << 0)
+#define SUPPORTED_10baseT_Full (1 << 1)
+#define SUPPORTED_100baseT_Half (1 << 2)
+#define SUPPORTED_100baseT_Full (1 << 3)
+#define SUPPORTED_1000baseT_Half (1 << 4)
+#define SUPPORTED_1000baseT_Full (1 << 5)
+#define SUPPORTED_Autoneg (1 << 6)
+#define SUPPORTED_TP (1 << 7)
+#define SUPPORTED_AUI (1 << 8)
+#define SUPPORTED_MII (1 << 9)
+#define SUPPORTED_FIBRE (1 << 10)
+#define SUPPORTED_BNC (1 << 11)
+#define SUPPORTED_10000baseT_Full (1 << 12)
+#define SUPPORTED_Pause (1 << 13)
+#define SUPPORTED_Asym_Pause (1 << 14)
+
+/* Indicates what features are advertised by the interface. */
+#define ADVERTISED_10baseT_Half (1 << 0)
+#define ADVERTISED_10baseT_Full (1 << 1)
+#define ADVERTISED_100baseT_Half (1 << 2)
+#define ADVERTISED_100baseT_Full (1 << 3)
+#define ADVERTISED_1000baseT_Half (1 << 4)
+#define ADVERTISED_1000baseT_Full (1 << 5)
+#define ADVERTISED_Autoneg (1 << 6)
+#define ADVERTISED_TP (1 << 7)
+#define ADVERTISED_AUI (1 << 8)
+#define ADVERTISED_MII (1 << 9)
+#define ADVERTISED_FIBRE (1 << 10)
+#define ADVERTISED_BNC (1 << 11)
+#define ADVERTISED_10000baseT_Full (1 << 12)
+#define ADVERTISED_Pause (1 << 13)
+#define ADVERTISED_Asym_Pause (1 << 14)
+
+/* Enable or disable autonegotiation. If this is set to enable,
+ * the forced link modes above are completely ignored.
+ */
+#define AUTONEG_DISABLE 0x00
+#define AUTONEG_ENABLE 0x01
+
+#define SPEED_10 10
+#define SPEED_100 100
+#define SPEED_1000 1000
+#define SPEED_10000 10000
+#define DUPLEX_HALF 0
+#define DUPLEX_FULL 1
+
+#endif
diff --git a/sys/dev/cxgb/cxgb_sge.c b/sys/dev/cxgb/cxgb_sge.c
new file mode 100644
index 0000000..f6001cd
--- /dev/null
+++ b/sys/dev/cxgb/cxgb_sge.c
@@ -0,0 +1,2323 @@
+/**************************************************************************
+
+Copyright (c) 2007, Chelsio Inc.
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Chelsio Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/kernel.h>
+#include <sys/module.h>
+#include <sys/bus.h>
+#include <sys/conf.h>
+#include <machine/bus.h>
+#include <machine/resource.h>
+#include <sys/bus_dma.h>
+#include <sys/rman.h>
+#include <sys/queue.h>
+#include <sys/sysctl.h>
+#include <sys/taskqueue.h>
+
+
+#include <sys/proc.h>
+#include <sys/sched.h>
+#include <sys/smp.h>
+
+#include <netinet/in_systm.h>
+#include <netinet/in.h>
+#include <netinet/ip.h>
+#include <netinet/tcp.h>
+
+#include <dev/pci/pcireg.h>
+#include <dev/pci/pcivar.h>
+#include <dev/cxgb/common/cxgb_common.h>
+#include <dev/cxgb/common/cxgb_regs.h>
+#include <dev/cxgb/common/cxgb_sge_defs.h>
+#include <dev/cxgb/common/cxgb_t3_cpl.h>
+#include <dev/cxgb/common/cxgb_firmware_exports.h>
+
+#define USE_GTS 0
+
+
+#define SGE_RX_SM_BUF_SIZE 1536
+#if 1
+#define SGE_RX_COPY_THRES 384
+#else
+#define SGE_RX_COPY_THRES MHLEN
+#endif
+#define SGE_RX_DROP_THRES 16
+
+/*
+ * Period of the Tx buffer reclaim timer. This timer does not need to run
+ * frequently as Tx buffers are usually reclaimed by new Tx packets.
+ */
+#define TX_RECLAIM_PERIOD (hz >> 2)
+
+/*
+ * work request size in bytes
+ */
+#define WR_LEN (WR_FLITS * 8)
+
+/*
+ * Values for sge_txq.flags
+ */
+enum {
+ TXQ_RUNNING = 1 << 0, /* fetch engine is running */
+ TXQ_LAST_PKT_DB = 1 << 1, /* last packet rang the doorbell */
+};
+
+struct tx_desc {
+ uint64_t flit[TX_DESC_FLITS];
+} __packed;
+
+struct rx_desc {
+ uint32_t addr_lo;
+ uint32_t len_gen;
+ uint32_t gen2;
+ uint32_t addr_hi;
+} __packed;;
+
+struct rsp_desc { /* response queue descriptor */
+ struct rss_header rss_hdr;
+ uint32_t flags;
+ uint32_t len_cq;
+ uint8_t imm_data[47];
+ uint8_t intr_gen;
+} __packed;
+
+#define RX_SW_DESC_MAP_CREATED (1 << 0)
+#define RX_SW_DESC_INUSE (1 << 3)
+#define TX_SW_DESC_MAPPED (1 << 4)
+
+#define RSPQ_NSOP_NEOP G_RSPD_SOP_EOP(0)
+#define RSPQ_EOP G_RSPD_SOP_EOP(F_RSPD_EOP)
+#define RSPQ_SOP G_RSPD_SOP_EOP(F_RSPD_SOP)
+#define RSPQ_SOP_EOP G_RSPD_SOP_EOP(F_RSPD_SOP|F_RSPD_EOP)
+
+struct tx_sw_desc { /* SW state per Tx descriptor */
+ struct mbuf *m;
+ bus_dmamap_t map;
+ int flags;
+};
+
+struct rx_sw_desc { /* SW state per Rx descriptor */
+ struct mbuf *m;
+ bus_dmamap_t map;
+ int flags;
+};
+
+struct txq_state {
+ unsigned int compl;
+ unsigned int gen;
+ unsigned int pidx;
+};
+
+/*
+ * Maps a number of flits to the number of Tx descriptors that can hold them.
+ * The formula is
+ *
+ * desc = 1 + (flits - 2) / (WR_FLITS - 1).
+ *
+ * HW allows up to 4 descriptors to be combined into a WR.
+ */
+static uint8_t flit_desc_map[] = {
+ 0,
+#if SGE_NUM_GENBITS == 1
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
+ 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4
+#elif SGE_NUM_GENBITS == 2
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
+ 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
+#else
+# error "SGE_NUM_GENBITS must be 1 or 2"
+#endif
+};
+
+
+static int lro_default = 0;
+int cxgb_debug = 0;
+
+/*
+ * XXX move to arch header
+ */
+
+#define USE_PREFETCH
+#ifdef USE_PREFETCH
+#define L1_CACHE_BYTES 64
+static __inline
+void prefetch(void *x)
+{
+ __asm volatile("prefetcht0 %0" :: "m" (*(unsigned long *)x));
+}
+#else
+#define prefetch(x)
+#endif
+
+static void t3_free_qset(adapter_t *sc, struct sge_qset *q);
+static void sge_timer_cb(void *arg);
+static void sge_timer_reclaim(void *arg, int ncount);
+static __inline void refill_rspq(adapter_t *sc, const struct sge_rspq *q,
+ u_int credits);
+static int free_tx_desc(adapter_t *sc, struct sge_txq *q, int n, struct mbuf **m_vec);
+
+/**
+ * reclaim_completed_tx - reclaims completed Tx descriptors
+ * @adapter: the adapter
+ * @q: the Tx queue to reclaim completed descriptors from
+ *
+ * Reclaims Tx descriptors that the SGE has indicated it has processed,
+ * and frees the associated buffers if possible. Called with the Tx
+ * queue's lock held.
+ */
+static __inline int
+reclaim_completed_tx(adapter_t *adapter, struct sge_txq *q, int nbufs, struct mbuf **mvec)
+{
+ int reclaimed, reclaim = desc_reclaimable(q);
+ int n = 0;
+
+ mtx_assert(&q->lock, MA_OWNED);
+
+ if (reclaim > 0) {
+ n = free_tx_desc(adapter, q, min(reclaim, nbufs), mvec);
+ reclaimed = min(reclaim, nbufs);
+ q->cleaned += reclaimed;
+ q->in_use -= reclaimed;
+ }
+
+ return (n);
+}
+
+/**
+ * t3_sge_init - initialize SGE
+ * @adap: the adapter
+ * @p: the SGE parameters
+ *
+ * Performs SGE initialization needed every time after a chip reset.
+ * We do not initialize any of the queue sets here, instead the driver
+ * top-level must request those individually. We also do not enable DMA
+ * here, that should be done after the queues have been set up.
+ */
+void
+t3_sge_init(adapter_t *adap, struct sge_params *p)
+{
+ u_int ctrl, ups;
+
+ ups = 0; /* = ffs(pci_resource_len(adap->pdev, 2) >> 12); */
+
+ ctrl = F_DROPPKT | V_PKTSHIFT(2) | F_FLMODE | F_AVOIDCQOVFL |
+ F_CQCRDTCTRL |
+ V_HOSTPAGESIZE(PAGE_SHIFT - 11) | F_BIGENDIANINGRESS |
+ V_USERSPACESIZE(ups ? ups - 1 : 0) | F_ISCSICOALESCING;
+#if SGE_NUM_GENBITS == 1
+ ctrl |= F_EGRGENCTRL;
+#endif
+ if (adap->params.rev > 0) {
+ if (!(adap->flags & (USING_MSIX | USING_MSI)))
+ ctrl |= F_ONEINTMULTQ | F_OPTONEINTMULTQ;
+ ctrl |= F_CQCRDTCTRL | F_AVOIDCQOVFL;
+ }
+ t3_write_reg(adap, A_SG_CONTROL, ctrl);
+ t3_write_reg(adap, A_SG_EGR_RCQ_DRB_THRSH, V_HIRCQDRBTHRSH(512) |
+ V_LORCQDRBTHRSH(512));
+ t3_write_reg(adap, A_SG_TIMER_TICK, core_ticks_per_usec(adap) / 10);
+ t3_write_reg(adap, A_SG_CMDQ_CREDIT_TH, V_THRESHOLD(32) |
+ V_TIMEOUT(200 * core_ticks_per_usec(adap)));
+ t3_write_reg(adap, A_SG_HI_DRB_HI_THRSH, 1000);
+ t3_write_reg(adap, A_SG_HI_DRB_LO_THRSH, 256);
+ t3_write_reg(adap, A_SG_LO_DRB_HI_THRSH, 1000);
+ t3_write_reg(adap, A_SG_LO_DRB_LO_THRSH, 256);
+ t3_write_reg(adap, A_SG_OCO_BASE, V_BASE1(0xfff));
+ t3_write_reg(adap, A_SG_DRB_PRI_THRESH, 63 * 1024);
+}
+
+
+/**
+ * sgl_len - calculates the size of an SGL of the given capacity
+ * @n: the number of SGL entries
+ *
+ * Calculates the number of flits needed for a scatter/gather list that
+ * can hold the given number of entries.
+ */
+static __inline unsigned int
+sgl_len(unsigned int n)
+{
+ return ((3 * n) / 2 + (n & 1));
+}
+
+/**
+ * get_imm_packet - return the next ingress packet buffer from a response
+ * @resp: the response descriptor containing the packet data
+ *
+ * Return a packet containing the immediate data of the given response.
+ */
+static __inline int
+get_imm_packet(adapter_t *sc, const struct rsp_desc *resp, struct t3_mbuf_hdr *mh)
+{
+ struct mbuf *m;
+ int len;
+ uint32_t flags = ntohl(resp->flags);
+ uint8_t sopeop = G_RSPD_SOP_EOP(flags);
+
+ m = m_gethdr(M_NOWAIT, MT_DATA);
+ len = G_RSPD_LEN(ntohl(resp->len_cq));
+ /*
+ * would be a firmware bug
+ */
+ if (sopeop == RSPQ_NSOP_NEOP || sopeop == RSPQ_SOP)
+ return (0);
+
+ if (m) {
+ MH_ALIGN(m, IMMED_PKT_SIZE);
+ memcpy(m->m_data, resp->imm_data, IMMED_PKT_SIZE);
+ m->m_len = len;
+
+ switch (sopeop) {
+ case RSPQ_SOP_EOP:
+ mh->mh_head = mh->mh_tail = m;
+ m->m_pkthdr.len = len;
+ m->m_flags |= M_PKTHDR;
+ break;
+ case RSPQ_EOP:
+ m->m_flags &= ~M_PKTHDR;
+ mh->mh_head->m_pkthdr.len += len;
+ mh->mh_tail->m_next = m;
+ mh->mh_tail = m;
+ break;
+ }
+ }
+ return (m != NULL);
+}
+
+
+static __inline u_int
+flits_to_desc(u_int n)
+{
+ return (flit_desc_map[n]);
+}
+
+void
+t3_sge_err_intr_handler(adapter_t *adapter)
+{
+ unsigned int v, status;
+
+
+ status = t3_read_reg(adapter, A_SG_INT_CAUSE);
+
+ if (status & F_RSPQCREDITOVERFOW)
+ CH_ALERT(adapter, "SGE response queue credit overflow\n");
+
+ if (status & F_RSPQDISABLED) {
+ v = t3_read_reg(adapter, A_SG_RSPQ_FL_STATUS);
+
+ CH_ALERT(adapter,
+ "packet delivered to disabled response queue (0x%x)\n",
+ (v >> S_RSPQ0DISABLED) & 0xff);
+ }
+
+ t3_write_reg(adapter, A_SG_INT_CAUSE, status);
+ if (status & (F_RSPQCREDITOVERFOW | F_RSPQDISABLED))
+ t3_fatal_err(adapter);
+}
+
+void
+t3_sge_prep(adapter_t *adap, struct sge_params *p)
+{
+ int i;
+
+ /* XXX Does ETHER_ALIGN need to be accounted for here? */
+ p->max_pkt_size = MJUM16BYTES - sizeof(struct cpl_rx_data);
+
+ for (i = 0; i < SGE_QSETS; ++i) {
+ struct qset_params *q = p->qset + i;
+
+ q->polling = adap->params.rev > 0;
+ q->coalesce_nsecs = 3500;
+ q->rspq_size = RSPQ_Q_SIZE;
+ q->fl_size = FL_Q_SIZE;
+ q->jumbo_size = JUMBO_Q_SIZE;
+ q->txq_size[TXQ_ETH] = TX_ETH_Q_SIZE;
+ q->txq_size[TXQ_OFLD] = 1024;
+ q->txq_size[TXQ_CTRL] = 256;
+ q->cong_thres = 0;
+ }
+}
+
+int
+t3_sge_alloc(adapter_t *sc)
+{
+
+ /* The parent tag. */
+ if (bus_dma_tag_create( NULL, /* parent */
+ 1, 0, /* algnmnt, boundary */
+ BUS_SPACE_MAXADDR, /* lowaddr */
+ BUS_SPACE_MAXADDR, /* highaddr */
+ NULL, NULL, /* filter, filterarg */
+ BUS_SPACE_MAXSIZE_32BIT,/* maxsize */
+ BUS_SPACE_UNRESTRICTED, /* nsegments */
+ BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */
+ 0, /* flags */
+ NULL, NULL, /* lock, lockarg */
+ &sc->parent_dmat)) {
+ device_printf(sc->dev, "Cannot allocate parent DMA tag\n");
+ return (ENOMEM);
+ }
+
+ /*
+ * DMA tag for normal sized RX frames
+ */
+ if (bus_dma_tag_create(sc->parent_dmat, MCLBYTES, 0, BUS_SPACE_MAXADDR,
+ BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, 1,
+ MCLBYTES, BUS_DMA_ALLOCNOW, NULL, NULL, &sc->rx_dmat)) {
+ device_printf(sc->dev, "Cannot allocate RX DMA tag\n");
+ return (ENOMEM);
+ }
+
+ /*
+ * DMA tag for jumbo sized RX frames.
+ */
+ if (bus_dma_tag_create(sc->parent_dmat, MJUMPAGESIZE, 0, BUS_SPACE_MAXADDR,
+ BUS_SPACE_MAXADDR, NULL, NULL, MJUMPAGESIZE, 1, MJUMPAGESIZE,
+ BUS_DMA_ALLOCNOW, NULL, NULL, &sc->rx_jumbo_dmat)) {
+ device_printf(sc->dev, "Cannot allocate RX jumbo DMA tag\n");
+ return (ENOMEM);
+ }
+
+ /*
+ * DMA tag for TX frames.
+ */
+ if (bus_dma_tag_create(sc->parent_dmat, 1, 0, BUS_SPACE_MAXADDR,
+ BUS_SPACE_MAXADDR, NULL, NULL, TX_MAX_SIZE, TX_MAX_SEGS,
+ TX_MAX_SIZE, BUS_DMA_ALLOCNOW,
+ NULL, NULL, &sc->tx_dmat)) {
+ device_printf(sc->dev, "Cannot allocate TX DMA tag\n");
+ return (ENOMEM);
+ }
+
+ return (0);
+}
+
+int
+t3_sge_free(struct adapter * sc)
+{
+
+ if (sc->tx_dmat != NULL)
+ bus_dma_tag_destroy(sc->tx_dmat);
+
+ if (sc->rx_jumbo_dmat != NULL)
+ bus_dma_tag_destroy(sc->rx_jumbo_dmat);
+
+ if (sc->rx_dmat != NULL)
+ bus_dma_tag_destroy(sc->rx_dmat);
+
+ if (sc->parent_dmat != NULL)
+ bus_dma_tag_destroy(sc->parent_dmat);
+
+ return (0);
+}
+
+void
+t3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p)
+{
+
+ qs->rspq.holdoff_tmr = max(p->coalesce_nsecs/100, 1U);
+ qs->rspq.polling = 0 /* p->polling */;
+}
+
+
+/**
+ * refill_fl - refill an SGE free-buffer list
+ * @sc: the controller softc
+ * @q: the free-list to refill
+ * @n: the number of new buffers to allocate
+ *
+ * (Re)populate an SGE free-buffer list with up to @n new packet buffers.
+ * The caller must assure that @n does not exceed the queue's capacity.
+ */
+static void
+refill_fl(adapter_t *sc, struct sge_fl *q, int n)
+{
+ bus_dma_segment_t seg;
+ struct rx_sw_desc *sd = &q->sdesc[q->pidx];
+ struct rx_desc *d = &q->desc[q->pidx];
+ struct mbuf *m;
+ int err, nsegs;
+
+ while (n--) {
+ m = m_getjcl(M_DONTWAIT, MT_DATA, M_PKTHDR, q->buf_size);
+
+ if (m == NULL) {
+ printf("Failed to allocate mbuf\n");
+ goto done;
+ }
+
+ if ((sd->flags & RX_SW_DESC_MAP_CREATED) == 0) {
+ bus_dmamap_create(sc->rx_jumbo_dmat, 0, &sd->map);
+ sd->flags |= RX_SW_DESC_MAP_CREATED;
+ }
+ sd->flags |= RX_SW_DESC_INUSE;
+
+ m->m_pkthdr.len = m->m_len = q->buf_size;
+ err = bus_dmamap_load_mbuf_sg(sc->rx_jumbo_dmat, sd->map, m, &seg,
+ &nsegs, BUS_DMA_NOWAIT);
+ if (err != 0) {
+ printf("failure in refill_fl %d\n", err);
+ m_freem(m);
+ return;
+ }
+
+ sd->m = m;
+ d->addr_lo = htobe32(seg.ds_addr & 0xffffffff);
+ d->addr_hi = htobe32(((uint64_t)seg.ds_addr >>32) & 0xffffffff);
+ d->len_gen = htobe32(V_FLD_GEN1(q->gen));
+ d->gen2 = htobe32(V_FLD_GEN2(q->gen));
+
+ d++;
+ sd++;
+
+ if (++q->pidx == q->size) {
+ q->pidx = 0;
+ q->gen ^= 1;
+ sd = q->sdesc;
+ d = q->desc;
+ }
+ q->credits++;
+ }
+
+done:
+ t3_write_reg(sc, A_SG_KDOORBELL, V_EGRCNTX(q->cntxt_id));
+}
+
+
+/**
+ * free_rx_bufs - free the Rx buffers on an SGE free list
+ * @sc: the controle softc
+ * @q: the SGE free list to clean up
+ *
+ * Release the buffers on an SGE free-buffer Rx queue. HW fetching from
+ * this queue should be stopped before calling this function.
+ */
+static void
+free_rx_bufs(adapter_t *sc, struct sge_fl *q)
+{
+ u_int cidx = q->cidx;
+
+ while (q->credits--) {
+ struct rx_sw_desc *d = &q->sdesc[cidx];
+
+ if (d->flags & RX_SW_DESC_INUSE) {
+ bus_dmamap_unload(sc->rx_jumbo_dmat, d->map);
+ bus_dmamap_destroy(sc->rx_jumbo_dmat, d->map);
+ m_freem(d->m);
+ }
+ d->m = NULL;
+ if (++cidx == q->size)
+ cidx = 0;
+ }
+}
+
+static __inline void
+__refill_fl(adapter_t *adap, struct sge_fl *fl)
+{
+ refill_fl(adap, fl, min(16U, fl->size - fl->credits));
+}
+
+#ifdef RECYCLE
+static void
+recycle_rx_buf(adapter_t *adap, struct sge_fl *q, unsigned int idx)
+{
+ struct rx_desc *from = &q->desc[idx];
+ struct rx_desc *to = &q->desc[q->pidx];
+
+ if (to == from)
+ return;
+ memcpy(&q->sdesc[q->pidx], &q->sdesc[idx], sizeof (struct rx_sw_desc));
+ /*
+ * mark as unused and unmapped
+ */
+ q->sdesc[idx].flags = 0;
+ q->sdesc[idx].m = NULL;
+
+ to->addr_lo = from->addr_lo; // already big endian
+ to->addr_hi = from->addr_hi; // likewise
+
+ wmb();
+ to->len_gen = htobe32(V_FLD_GEN1(q->gen));
+ to->gen2 = htobe32(V_FLD_GEN2(q->gen));
+ q->credits++;
+
+ if (++q->pidx == q->size) {
+ q->pidx = 0;
+ q->gen ^= 1;
+ }
+ t3_write_reg(adap, A_SG_KDOORBELL, V_EGRCNTX(q->cntxt_id));
+}
+#endif
+
+static void
+alloc_ring_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
+{
+ uint32_t *addr;
+
+ addr = arg;
+ *addr = segs[0].ds_addr;
+}
+
+static int
+alloc_ring(adapter_t *sc, size_t nelem, size_t elem_size, size_t sw_size,
+ bus_addr_t *phys, void *desc, void *sdesc, bus_dma_tag_t *tag,
+ bus_dmamap_t *map)
+{
+ size_t len = nelem * elem_size;
+ void *s = NULL;
+ void *p = NULL;
+ int err;
+
+ if ((err = bus_dma_tag_create(sc->parent_dmat, PAGE_SIZE, 0,
+ BUS_SPACE_MAXADDR_32BIT,
+ BUS_SPACE_MAXADDR, NULL, NULL, len, 1,
+ len, 0, NULL, NULL, tag)) != 0) {
+ device_printf(sc->dev, "Cannot allocate descriptor tag\n");
+ return (ENOMEM);
+ }
+
+ if ((err = bus_dmamem_alloc(*tag, (void **)&p, BUS_DMA_NOWAIT,
+ map)) != 0) {
+ device_printf(sc->dev, "Cannot allocate descriptor memory\n");
+ return (ENOMEM);
+ }
+
+ bus_dmamap_load(*tag, *map, p, len, alloc_ring_cb, phys, 0);
+ bzero(p, len);
+ *(void **)desc = p;
+
+ if (sw_size) {
+ len = nelem * sw_size;
+ s = malloc(len, M_DEVBUF, M_WAITOK);
+ bzero(s, len);
+ *(void **)sdesc = s;
+ }
+ return (0);
+}
+
+static void
+sge_slow_intr_handler(void *arg, int ncount)
+{
+ adapter_t *sc = arg;
+
+ t3_slow_intr_handler(sc);
+}
+
+
+
+static void
+sge_timer_cb(void *arg)
+{
+ adapter_t *sc = arg;
+ struct sge_qset *qs;
+ struct sge_txq *txq;
+ int i, j;
+ int reclaim_eth, reclaim_ofl, refill_rx;
+
+ for (i = 0; i < sc->params.nports; i++)
+ for (j = 0; j < sc->port[i].nqsets; j++) {
+ qs = &sc->sge.qs[i + j];
+ txq = &qs->txq[0];
+ reclaim_eth = txq[TXQ_ETH].processed - txq[TXQ_ETH].cleaned;
+ reclaim_ofl = txq[TXQ_OFLD].processed - txq[TXQ_OFLD].cleaned;
+ refill_rx = ((qs->fl[0].credits < qs->fl[0].size) ||
+ (qs->fl[1].credits < qs->fl[1].size));
+ if (reclaim_eth || reclaim_ofl || refill_rx) {
+ taskqueue_enqueue(sc->tq, &sc->timer_reclaim_task);
+ goto done;
+ }
+ }
+done:
+ callout_reset(&sc->sge_timer_ch, TX_RECLAIM_PERIOD, sge_timer_cb, sc);
+}
+
+/*
+ * This is meant to be a catch-all function to keep sge state private
+ * to sge.c
+ *
+ */
+int
+t3_sge_init_sw(adapter_t *sc)
+{
+
+ callout_init(&sc->sge_timer_ch, CALLOUT_MPSAFE);
+ callout_reset(&sc->sge_timer_ch, TX_RECLAIM_PERIOD, sge_timer_cb, sc);
+ TASK_INIT(&sc->timer_reclaim_task, 0, sge_timer_reclaim, sc);
+ TASK_INIT(&sc->slow_intr_task, 0, sge_slow_intr_handler, sc);
+ return (0);
+}
+
+void
+t3_sge_deinit_sw(adapter_t *sc)
+{
+ callout_drain(&sc->sge_timer_ch);
+ taskqueue_drain(sc->tq, &sc->timer_reclaim_task);
+ taskqueue_drain(sc->tq, &sc->slow_intr_task);
+}
+
+
+static void
+sge_timer_reclaim(void *arg, int ncount)
+{
+ adapter_t *sc = arg;
+ int i, j, nqsets = 0;
+ struct sge_qset *qs;
+ struct sge_txq *txq;
+ struct mtx *lock;
+ struct mbuf *m_vec[TX_CLEAN_MAX_DESC];
+ int n;
+ /*
+ * XXX assuming these quantities are allowed to change during operation
+ */
+ for (i = 0; i < sc->params.nports; i++) {
+ for (j = 0; j < sc->port[i].nqsets; j++)
+ nqsets++;
+ }
+ for (i = 0; i < nqsets; i++) {
+ qs = &sc->sge.qs[i];
+ txq = &qs->txq[TXQ_ETH];
+ if (desc_reclaimable(txq) > 0) {
+ mtx_lock(&txq->lock);
+ n = reclaim_completed_tx(sc, txq, TX_CLEAN_MAX_DESC, m_vec);
+ mtx_unlock(&txq->lock);
+
+ for (i = 0; i < n; i++) {
+ m_freem(m_vec[i]);
+ }
+ }
+
+ txq = &qs->txq[TXQ_OFLD];
+ if (desc_reclaimable(txq) > 0) {
+ mtx_lock(&txq->lock);
+ n = reclaim_completed_tx(sc, txq, TX_CLEAN_MAX_DESC, m_vec);
+ mtx_unlock(&txq->lock);
+
+ for (i = 0; i < n; i++) {
+ m_freem(m_vec[i]);
+ }
+ }
+
+
+ lock = (sc->flags & USING_MSIX) ? &qs->rspq.lock :
+ &sc->sge.qs[0].rspq.lock;
+
+ if (mtx_trylock(lock)) {
+ /* XXX currently assume that we are *NOT* polling */
+ uint32_t status = t3_read_reg(sc, A_SG_RSPQ_FL_STATUS);
+
+ if (qs->fl[0].credits < qs->fl[0].size)
+ __refill_fl(sc, &qs->fl[0]);
+ if (qs->fl[1].credits < qs->fl[1].size)
+ __refill_fl(sc, &qs->fl[1]);
+
+ if (status & (1 << qs->rspq.cntxt_id)) {
+ if (qs->rspq.credits) {
+ refill_rspq(sc, &qs->rspq, 1);
+ qs->rspq.credits--;
+ t3_write_reg(sc, A_SG_RSPQ_FL_STATUS,
+ 1 << qs->rspq.cntxt_id);
+ }
+ }
+ mtx_unlock(lock);
+ }
+ }
+}
+
+/**
+ * init_qset_cntxt - initialize an SGE queue set context info
+ * @qs: the queue set
+ * @id: the queue set id
+ *
+ * Initializes the TIDs and context ids for the queues of a queue set.
+ */
+static void
+init_qset_cntxt(struct sge_qset *qs, u_int id)
+{
+
+ qs->rspq.cntxt_id = id;
+ qs->fl[0].cntxt_id = 2 * id;
+ qs->fl[1].cntxt_id = 2 * id + 1;
+ qs->txq[TXQ_ETH].cntxt_id = FW_TUNNEL_SGEEC_START + id;
+ qs->txq[TXQ_ETH].token = FW_TUNNEL_TID_START + id;
+ qs->txq[TXQ_OFLD].cntxt_id = FW_OFLD_SGEEC_START + id;
+ qs->txq[TXQ_CTRL].cntxt_id = FW_CTRL_SGEEC_START + id;
+ qs->txq[TXQ_CTRL].token = FW_CTRL_TID_START + id;
+}
+
+
+static void
+txq_prod(struct sge_txq *txq, unsigned int ndesc, struct txq_state *txqs)
+{
+ txq->in_use += ndesc;
+ /*
+ * XXX we don't handle stopping of queue
+ * presumably start handles this when we bump against the end
+ */
+ txqs->gen = txq->gen;
+ txq->unacked += ndesc;
+ txqs->compl = (txq->unacked & 8) << (S_WR_COMPL - 3);
+ txq->unacked &= 7;
+ txqs->pidx = txq->pidx;
+ txq->pidx += ndesc;
+
+ if (txq->pidx >= txq->size) {
+ txq->pidx -= txq->size;
+ txq->gen ^= 1;
+ }
+
+}
+
+/**
+ * calc_tx_descs - calculate the number of Tx descriptors for a packet
+ * @m: the packet mbufs
+ * @nsegs: the number of segments
+ *
+ * Returns the number of Tx descriptors needed for the given Ethernet
+ * packet. Ethernet packets require addition of WR and CPL headers.
+ */
+static __inline unsigned int
+calc_tx_descs(const struct mbuf *m, int nsegs)
+{
+ unsigned int flits;
+
+ if (m->m_pkthdr.len <= WR_LEN - sizeof(struct cpl_tx_pkt))
+ return 1;
+
+ flits = sgl_len(nsegs) + 2;
+#ifdef TSO_SUPPORTED
+ if (m->m_pkthdr.tso_segsz)
+ flits++;
+#endif
+ return flits_to_desc(flits);
+}
+
+static __inline unsigned int
+busdma_map_mbufs(struct mbuf **m, adapter_t *sc, struct tx_sw_desc *stx,
+ bus_dma_segment_t *segs, int *nsegs)
+{
+ struct mbuf *m0, *mtmp;
+ int err, pktlen;
+
+ m0 = *m;
+ pktlen = m0->m_pkthdr.len;
+ err = bus_dmamap_load_mbuf_sg(sc->tx_dmat, stx->map, m0, segs, nsegs, 0);
+ if (err) {
+ int n = 0;
+ mtmp = m0;
+ while(mtmp) {
+ n++;
+ mtmp = mtmp->m_next;
+ }
+#ifdef DEBUG
+ printf("map_mbufs: bus_dmamap_load_mbuf_sg failed with %d - pkthdr.len==%d nmbufs=%d\n",
+ err, m0->m_pkthdr.len, n);
+#endif
+ }
+
+
+ if (err == EFBIG) {
+ /* Too many segments, try to defrag */
+ m0 = m_defrag(m0, M_NOWAIT);
+ if (m0 == NULL) {
+ m_freem(*m);
+ *m = NULL;
+ return (ENOBUFS);
+ }
+ *m = m0;
+ err = bus_dmamap_load_mbuf_sg(sc->tx_dmat, stx->map, m0, segs, nsegs, 0);
+ }
+
+ if (err == ENOMEM) {
+ return (err);
+ }
+
+ if (err) {
+ if (cxgb_debug)
+ printf("map failure err=%d pktlen=%d\n", err, pktlen);
+ m_freem(m0);
+ *m = NULL;
+ return (err);
+ }
+
+ bus_dmamap_sync(sc->tx_dmat, stx->map, BUS_DMASYNC_PREWRITE);
+ stx->flags |= TX_SW_DESC_MAPPED;
+
+ return (0);
+}
+
+/**
+ * make_sgl - populate a scatter/gather list for a packet
+ * @sgp: the SGL to populate
+ * @segs: the packet dma segments
+ * @nsegs: the number of segments
+ *
+ * Generates a scatter/gather list for the buffers that make up a packet
+ * and returns the SGL size in 8-byte words. The caller must size the SGL
+ * appropriately.
+ */
+static __inline void
+make_sgl(struct sg_ent *sgp, bus_dma_segment_t *segs, int nsegs)
+{
+ int i, idx;
+
+ for (idx = 0, i = 0; i < nsegs; i++, idx ^= 1) {
+ if (i && idx == 0)
+ ++sgp;
+
+ sgp->len[idx] = htobe32(segs[i].ds_len);
+ sgp->addr[idx] = htobe64(segs[i].ds_addr);
+ }
+
+ if (idx)
+ sgp->len[idx] = 0;
+}
+
+/**
+ * check_ring_tx_db - check and potentially ring a Tx queue's doorbell
+ * @adap: the adapter
+ * @q: the Tx queue
+ *
+ * Ring the doorbel if a Tx queue is asleep. There is a natural race,
+ * where the HW is going to sleep just after we checked, however,
+ * then the interrupt handler will detect the outstanding TX packet
+ * and ring the doorbell for us.
+ *
+ * When GTS is disabled we unconditionally ring the doorbell.
+ */
+static __inline void
+check_ring_tx_db(adapter_t *adap, struct sge_txq *q)
+{
+#if USE_GTS
+ clear_bit(TXQ_LAST_PKT_DB, &q->flags);
+ if (test_and_set_bit(TXQ_RUNNING, &q->flags) == 0) {
+ set_bit(TXQ_LAST_PKT_DB, &q->flags);
+#ifdef T3_TRACE
+ T3_TRACE1(adap->tb[q->cntxt_id & 7], "doorbell Tx, cntxt %d",
+ q->cntxt_id);
+#endif
+ t3_write_reg(adap, A_SG_KDOORBELL,
+ F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
+ }
+#else
+ wmb(); /* write descriptors before telling HW */
+ t3_write_reg(adap, A_SG_KDOORBELL,
+ F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
+#endif
+}
+
+static __inline void
+wr_gen2(struct tx_desc *d, unsigned int gen)
+{
+#if SGE_NUM_GENBITS == 2
+ d->flit[TX_DESC_FLITS - 1] = htobe64(gen);
+#endif
+}
+
+/* sizeof(*eh) + sizeof(*vhdr) + sizeof(*ip) + sizeof(*tcp) */
+#define TCPPKTHDRSIZE (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN + 20 + 20)
+
+int
+t3_encap(struct port_info *p, struct mbuf **m)
+{
+ adapter_t *sc;
+ struct mbuf *m0;
+ struct sge_qset *qs;
+ struct sge_txq *txq;
+ struct tx_sw_desc *stx;
+ struct txq_state txqs;
+ unsigned int nsegs, ndesc, flits, cntrl, mlen, tso_info;
+ int err;
+
+ struct work_request_hdr *wrp;
+ struct tx_sw_desc *txsd;
+ struct sg_ent *sgp, sgl[TX_MAX_SEGS / 2 + 1];
+ bus_dma_segment_t segs[TX_MAX_SEGS];
+ uint32_t wr_hi, wr_lo, sgl_flits;
+
+ struct tx_desc *txd;
+ struct cpl_tx_pkt *cpl;
+
+ DPRINTF("t3_encap ");
+ m0 = *m;
+ sc = p->adapter;
+ qs = &sc->sge.qs[p->first_qset];
+ txq = &qs->txq[TXQ_ETH];
+ stx = &txq->sdesc[txq->pidx];
+ txd = &txq->desc[txq->pidx];
+ cpl = (struct cpl_tx_pkt *)txd;
+ mlen = m0->m_pkthdr.len;
+ cpl->len = htonl(mlen | 0x80000000);
+
+ DPRINTF("mlen=%d\n", mlen);
+ /*
+ * XXX handle checksum, TSO, and VLAN here
+ *
+ */
+ cntrl = V_TXPKT_INTF(p->port);
+
+ /*
+ * XXX need to add VLAN support for 6.x
+ */
+#ifdef VLAN_SUPPORTED
+ if (m0->m_flags & M_VLANTAG)
+ cntrl |= F_TXPKT_VLAN_VLD | V_TXPKT_VLAN(m0->m_pkthdr.ether_vtag);
+
+ tso_info = V_LSO_MSS(m0->m_pkthdr.tso_segsz);
+#else
+ tso_info = 0;
+#endif
+ if (tso_info) {
+ int eth_type;
+ struct cpl_tx_pkt_lso *hdr = (struct cpl_tx_pkt_lso *) cpl;
+ struct ip *ip;
+ struct tcphdr *tcp;
+ uint8_t *pkthdr, tmp[TCPPKTHDRSIZE]; /* is this too large for the stack? */
+
+ txd->flit[2] = 0;
+ cntrl |= V_TXPKT_OPCODE(CPL_TX_PKT_LSO);
+ hdr->cntrl = htonl(cntrl);
+
+ if (__predict_false(m0->m_len < TCPPKTHDRSIZE)) {
+ pkthdr = &tmp[0];
+ m_copydata(m0, 0, TCPPKTHDRSIZE, pkthdr);
+ } else {
+ pkthdr = m0->m_data;
+ }
+
+ if (__predict_false(m0->m_flags & M_VLANTAG)) {
+ eth_type = CPL_ETH_II_VLAN;
+ ip = (struct ip *)(pkthdr + ETHER_HDR_LEN +
+ ETHER_VLAN_ENCAP_LEN);
+ } else {
+ eth_type = CPL_ETH_II;
+ ip = (struct ip *)(pkthdr + ETHER_HDR_LEN);
+ }
+ tcp = (struct tcphdr *)((uint8_t *)ip +
+ sizeof(*ip));
+
+ tso_info |= V_LSO_ETH_TYPE(eth_type) |
+ V_LSO_IPHDR_WORDS(ip->ip_hl) |
+ V_LSO_TCPHDR_WORDS(tcp->th_off);
+ hdr->lso_info = htonl(tso_info);
+
+ flits = 3;
+ } else {
+ cntrl |= V_TXPKT_OPCODE(CPL_TX_PKT);
+ cpl->cntrl = htonl(cntrl);
+
+ if (mlen <= WR_LEN - sizeof(*cpl)) {
+ txq_prod(txq, 1, &txqs);
+ txq->sdesc[txqs.pidx].m = m0;
+
+ if (m0->m_len == m0->m_pkthdr.len)
+ memcpy(&txd->flit[2], m0->m_data, mlen);
+ else
+ m_copydata(m0, 0, mlen, (caddr_t)&txd->flit[2]);
+
+ flits = (mlen + 7) / 8 + 2;
+ cpl->wr.wr_hi = htonl(V_WR_BCNTLFLT(mlen & 7) |
+ V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT) |
+ F_WR_SOP | F_WR_EOP | txqs.compl);
+ wmb();
+ cpl->wr.wr_lo = htonl(V_WR_LEN(flits) |
+ V_WR_GEN(txqs.gen) | V_WR_TID(txq->token));
+
+ wr_gen2(txd, txqs.gen);
+ check_ring_tx_db(sc, txq);
+ return (0);
+ }
+ flits = 2;
+ }
+
+ wrp = (struct work_request_hdr *)txd;
+
+ if ((err = busdma_map_mbufs(m, sc, stx, segs, &nsegs)) != 0) {
+ return (err);
+ }
+ m0 = *m;
+ ndesc = calc_tx_descs(m0, nsegs);
+
+ sgp = (ndesc == 1) ? (struct sg_ent *)&txd->flit[flits] : &sgl[0];
+ make_sgl(sgp, segs, nsegs);
+
+ sgl_flits = sgl_len(nsegs);
+
+ DPRINTF("make_sgl success nsegs==%d ndesc==%d\n", nsegs, ndesc);
+ txq_prod(txq, ndesc, &txqs);
+ txsd = &txq->sdesc[txqs.pidx];
+ wr_hi = htonl(V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT) | txqs.compl);
+ wr_lo = htonl(V_WR_TID(txq->token));
+ txsd->m = m0;
+
+ if (__predict_true(ndesc == 1)) {
+ wrp->wr_hi = htonl(F_WR_SOP | F_WR_EOP | V_WR_DATATYPE(1) |
+ V_WR_SGLSFLT(flits)) | wr_hi;
+ wmb();
+ wrp->wr_lo = htonl(V_WR_LEN(flits + sgl_flits) |
+ V_WR_GEN(txqs.gen)) | wr_lo;
+ /* XXX gen? */
+ wr_gen2(txd, txqs.gen);
+ } else {
+ unsigned int ogen = txqs.gen;
+ const uint64_t *fp = (const uint64_t *)sgl;
+ struct work_request_hdr *wp = wrp;
+
+ /* XXX - CHECK ME */
+ wrp->wr_hi = htonl(F_WR_SOP | V_WR_DATATYPE(1) |
+ V_WR_SGLSFLT(flits)) | wr_hi;
+
+ while (sgl_flits) {
+ unsigned int avail = WR_FLITS - flits;
+
+ if (avail > sgl_flits)
+ avail = sgl_flits;
+ memcpy(&txd->flit[flits], fp, avail * sizeof(*fp));
+ sgl_flits -= avail;
+ ndesc--;
+ if (!sgl_flits)
+ break;
+
+ fp += avail;
+ txd++;
+ txsd++;
+ if (++txqs.pidx == txq->size) {
+ txqs.pidx = 0;
+ txqs.gen ^= 1;
+ txd = txq->desc;
+ txsd = txq->sdesc;
+ }
+
+ /*
+ * when the head of the mbuf chain
+ * is freed all clusters will be freed
+ * with it
+ */
+ txsd->m = NULL;
+ wrp = (struct work_request_hdr *)txd;
+ wrp->wr_hi = htonl(V_WR_DATATYPE(1) |
+ V_WR_SGLSFLT(1)) | wr_hi;
+ wrp->wr_lo = htonl(V_WR_LEN(min(WR_FLITS,
+ sgl_flits + 1)) |
+ V_WR_GEN(txqs.gen)) | wr_lo;
+ wr_gen2(txd, txqs.gen);
+ flits = 1;
+ }
+#ifdef WHY
+ skb->priority = pidx;
+#endif
+ wrp->wr_hi |= htonl(F_WR_EOP);
+ wmb();
+ wp->wr_lo = htonl(V_WR_LEN(WR_FLITS) | V_WR_GEN(ogen)) | wr_lo;
+ wr_gen2((struct tx_desc *)wp, ogen);
+ }
+ check_ring_tx_db(p->adapter, txq);
+
+ return (0);
+}
+
+
+/**
+ * write_imm - write a packet into a Tx descriptor as immediate data
+ * @d: the Tx descriptor to write
+ * @m: the packet
+ * @len: the length of packet data to write as immediate data
+ * @gen: the generation bit value to write
+ *
+ * Writes a packet as immediate data into a Tx descriptor. The packet
+ * contains a work request at its beginning. We must write the packet
+ * carefully so the SGE doesn't read accidentally before it's written in
+ * its entirety.
+ */
+static __inline void write_imm(struct tx_desc *d, struct mbuf *m,
+ unsigned int len, unsigned int gen)
+{
+ struct work_request_hdr *from = (struct work_request_hdr *)m->m_data;
+ struct work_request_hdr *to = (struct work_request_hdr *)d;
+
+ memcpy(&to[1], &from[1], len - sizeof(*from));
+ to->wr_hi = from->wr_hi | htonl(F_WR_SOP | F_WR_EOP |
+ V_WR_BCNTLFLT(len & 7));
+ wmb();
+ to->wr_lo = from->wr_lo | htonl(V_WR_GEN(gen) |
+ V_WR_LEN((len + 7) / 8));
+ wr_gen2(d, gen);
+ m_freem(m);
+}
+
+/**
+ * check_desc_avail - check descriptor availability on a send queue
+ * @adap: the adapter
+ * @q: the TX queue
+ * @m: the packet needing the descriptors
+ * @ndesc: the number of Tx descriptors needed
+ * @qid: the Tx queue number in its queue set (TXQ_OFLD or TXQ_CTRL)
+ *
+ * Checks if the requested number of Tx descriptors is available on an
+ * SGE send queue. If the queue is already suspended or not enough
+ * descriptors are available the packet is queued for later transmission.
+ * Must be called with the Tx queue locked.
+ *
+ * Returns 0 if enough descriptors are available, 1 if there aren't
+ * enough descriptors and the packet has been queued, and 2 if the caller
+ * needs to retry because there weren't enough descriptors at the
+ * beginning of the call but some freed up in the mean time.
+ */
+static __inline int
+check_desc_avail(adapter_t *adap, struct sge_txq *q,
+ struct mbuf *m, unsigned int ndesc,
+ unsigned int qid)
+{
+ /*
+ * XXX We currently only use this for checking the control queue
+ * the control queue is only used for binding qsets which happens
+ * at init time so we are guaranteed enough descriptors
+ */
+#if 0
+ if (__predict_false(!skb_queue_empty(&q->sendq))) {
+addq_exit: __skb_queue_tail(&q->sendq, skb);
+ return 1;
+ }
+ if (__predict_false(q->size - q->in_use < ndesc)) {
+
+ struct sge_qset *qs = txq_to_qset(q, qid);
+
+ set_bit(qid, &qs->txq_stopped);
+ smp_mb__after_clear_bit();
+
+ if (should_restart_tx(q) &&
+ test_and_clear_bit(qid, &qs->txq_stopped))
+ return 2;
+
+ q->stops++;
+ goto addq_exit;
+ }
+#endif
+ return 0;
+}
+
+
+/**
+ * reclaim_completed_tx_imm - reclaim completed control-queue Tx descs
+ * @q: the SGE control Tx queue
+ *
+ * This is a variant of reclaim_completed_tx() that is used for Tx queues
+ * that send only immediate data (presently just the control queues) and
+ * thus do not have any sk_buffs to release.
+ */
+static __inline void
+reclaim_completed_tx_imm(struct sge_txq *q)
+{
+ unsigned int reclaim = q->processed - q->cleaned;
+
+ mtx_assert(&q->lock, MA_OWNED);
+
+ q->in_use -= reclaim;
+ q->cleaned += reclaim;
+}
+
+static __inline int
+immediate(const struct mbuf *m)
+{
+ return m->m_len <= WR_LEN && m->m_pkthdr.len <= WR_LEN ;
+}
+
+/**
+ * ctrl_xmit - send a packet through an SGE control Tx queue
+ * @adap: the adapter
+ * @q: the control queue
+ * @m: the packet
+ *
+ * Send a packet through an SGE control Tx queue. Packets sent through
+ * a control queue must fit entirely as immediate data in a single Tx
+ * descriptor and have no page fragments.
+ */
+static int
+ctrl_xmit(adapter_t *adap, struct sge_txq *q, struct mbuf *m)
+{
+ int ret;
+ struct work_request_hdr *wrp = (struct work_request_hdr *)m->m_data;
+
+ if (__predict_false(!immediate(m))) {
+ m_freem(m);
+ return 0;
+ }
+
+ wrp->wr_hi |= htonl(F_WR_SOP | F_WR_EOP);
+ wrp->wr_lo = htonl(V_WR_TID(q->token));
+
+ mtx_lock(&q->lock);
+again: reclaim_completed_tx_imm(q);
+
+ ret = check_desc_avail(adap, q, m, 1, TXQ_CTRL);
+ if (__predict_false(ret)) {
+ if (ret == 1) {
+ mtx_unlock(&q->lock);
+ return (-1);
+ }
+ goto again;
+ }
+
+ write_imm(&q->desc[q->pidx], m, m->m_len, q->gen);
+
+ q->in_use++;
+ if (++q->pidx >= q->size) {
+ q->pidx = 0;
+ q->gen ^= 1;
+ }
+ mtx_unlock(&q->lock);
+ wmb();
+ t3_write_reg(adap, A_SG_KDOORBELL,
+ F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
+ return (0);
+}
+
+#ifdef RESTART_CTRLQ
+/**
+ * restart_ctrlq - restart a suspended control queue
+ * @qs: the queue set cotaining the control queue
+ *
+ * Resumes transmission on a suspended Tx control queue.
+ */
+static void
+restart_ctrlq(unsigned long data)
+{
+ struct mbuf *m;
+ struct sge_qset *qs = (struct sge_qset *)data;
+ struct sge_txq *q = &qs->txq[TXQ_CTRL];
+ adapter_t *adap = qs->port->adapter;
+
+ mtx_lock(&q->lock);
+again: reclaim_completed_tx_imm(q);
+
+ while (q->in_use < q->size &&
+ (skb = __skb_dequeue(&q->sendq)) != NULL) {
+
+ write_imm(&q->desc[q->pidx], skb, skb->len, q->gen);
+
+ if (++q->pidx >= q->size) {
+ q->pidx = 0;
+ q->gen ^= 1;
+ }
+ q->in_use++;
+ }
+ if (!skb_queue_empty(&q->sendq)) {
+ set_bit(TXQ_CTRL, &qs->txq_stopped);
+ smp_mb__after_clear_bit();
+
+ if (should_restart_tx(q) &&
+ test_and_clear_bit(TXQ_CTRL, &qs->txq_stopped))
+ goto again;
+ q->stops++;
+ }
+
+ mtx_unlock(&q->lock);
+ t3_write_reg(adap, A_SG_KDOORBELL,
+ F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
+}
+#endif
+
+/*
+ * Send a management message through control queue 0
+ */
+int
+t3_mgmt_tx(struct adapter *adap, struct mbuf *m)
+{
+ return ctrl_xmit(adap, &adap->sge.qs[0].txq[TXQ_CTRL], m);
+}
+
+/**
+ * t3_sge_alloc_qset - initialize an SGE queue set
+ * @sc: the controller softc
+ * @id: the queue set id
+ * @nports: how many Ethernet ports will be using this queue set
+ * @irq_vec_idx: the IRQ vector index for response queue interrupts
+ * @p: configuration parameters for this queue set
+ * @ntxq: number of Tx queues for the queue set
+ * @pi: port info for queue set
+ *
+ * Allocate resources and initialize an SGE queue set. A queue set
+ * comprises a response queue, two Rx free-buffer queues, and up to 3
+ * Tx queues. The Tx queues are assigned roles in the order Ethernet
+ * queue, offload queue, and control queue.
+ */
+int
+t3_sge_alloc_qset(adapter_t *sc, u_int id, int nports, int irq_vec_idx,
+ const struct qset_params *p, int ntxq, struct port_info *pi)
+{
+ struct sge_qset *q = &sc->sge.qs[id];
+ int i, ret = 0;
+
+ init_qset_cntxt(q, id);
+
+ if ((ret = alloc_ring(sc, p->fl_size, sizeof(struct rx_desc),
+ sizeof(struct rx_sw_desc), &q->fl[0].phys_addr,
+ &q->fl[0].desc, &q->fl[0].sdesc,
+ &q->fl[0].desc_tag, &q->fl[0].desc_map)) != 0) {
+ printf("error %d from alloc ring fl0\n", ret);
+ goto err;
+ }
+
+ if ((ret = alloc_ring(sc, p->jumbo_size, sizeof(struct rx_desc),
+ sizeof(struct rx_sw_desc), &q->fl[1].phys_addr,
+ &q->fl[1].desc, &q->fl[1].sdesc,
+ &q->fl[1].desc_tag, &q->fl[1].desc_map)) != 0) {
+ printf("error %d from alloc ring fl1\n", ret);
+ goto err;
+ }
+
+ if ((ret = alloc_ring(sc, p->rspq_size, sizeof(struct rsp_desc), 0,
+ &q->rspq.phys_addr, &q->rspq.desc, NULL,
+ &q->rspq.desc_tag, &q->rspq.desc_map)) != 0) {
+ printf("error %d from alloc ring rspq\n", ret);
+ goto err;
+ }
+
+ for (i = 0; i < ntxq; ++i) {
+ /*
+ * The control queue always uses immediate data so does not
+ * need to keep track of any mbufs.
+ * XXX Placeholder for future TOE support.
+ */
+ size_t sz = i == TXQ_CTRL ? 0 : sizeof(struct tx_sw_desc);
+
+ if ((ret = alloc_ring(sc, p->txq_size[i],
+ sizeof(struct tx_desc), sz,
+ &q->txq[i].phys_addr, &q->txq[i].desc,
+ &q->txq[i].sdesc, &q->txq[i].desc_tag,
+ &q->txq[i].desc_map)) != 0) {
+ printf("error %d from alloc ring tx %i\n", ret, i);
+ goto err;
+ }
+
+ q->txq[i].gen = 1;
+ q->txq[i].size = p->txq_size[i];
+ mtx_init(&q->txq[i].lock, "t3 txq lock", NULL, MTX_DEF);
+ }
+
+ q->fl[0].gen = q->fl[1].gen = 1;
+ q->fl[0].size = p->fl_size;
+ q->fl[1].size = p->jumbo_size;
+
+ q->rspq.gen = 1;
+ q->rspq.size = p->rspq_size;
+ mtx_init(&q->rspq.lock, "t3 rspq lock", NULL, MTX_DEF);
+
+ q->txq[TXQ_ETH].stop_thres = nports *
+ flits_to_desc(sgl_len(TX_MAX_SEGS + 1) + 3);
+
+ q->fl[0].buf_size = MCLBYTES;
+ q->fl[1].buf_size = MJUMPAGESIZE;
+ q->lro.enabled = lro_default;
+
+ mtx_lock(&sc->sge.reg_lock);
+ ret = -t3_sge_init_rspcntxt(sc, q->rspq.cntxt_id, irq_vec_idx,
+ q->rspq.phys_addr, q->rspq.size,
+ q->fl[0].buf_size, 1, 0);
+ if (ret) {
+ printf("error %d from t3_sge_init_rspcntxt\n", ret);
+ goto err_unlock;
+ }
+
+ for (i = 0; i < SGE_RXQ_PER_SET; ++i) {
+ ret = -t3_sge_init_flcntxt(sc, q->fl[i].cntxt_id, 0,
+ q->fl[i].phys_addr, q->fl[i].size,
+ q->fl[i].buf_size, p->cong_thres, 1,
+ 0);
+ if (ret) {
+ printf("error %d from t3_sge_init_flcntxt for index i=%d\n", ret, i);
+ goto err_unlock;
+ }
+ }
+
+ ret = -t3_sge_init_ecntxt(sc, q->txq[TXQ_ETH].cntxt_id, USE_GTS,
+ SGE_CNTXT_ETH, id, q->txq[TXQ_ETH].phys_addr,
+ q->txq[TXQ_ETH].size, q->txq[TXQ_ETH].token,
+ 1, 0);
+ if (ret) {
+ printf("error %d from t3_sge_init_ecntxt\n", ret);
+ goto err_unlock;
+ }
+
+ if (ntxq > 1) {
+ ret = -t3_sge_init_ecntxt(sc, q->txq[TXQ_OFLD].cntxt_id,
+ USE_GTS, SGE_CNTXT_OFLD, id,
+ q->txq[TXQ_OFLD].phys_addr,
+ q->txq[TXQ_OFLD].size, 0, 1, 0);
+ if (ret) {
+ printf("error %d from t3_sge_init_ecntxt\n", ret);
+ goto err_unlock;
+ }
+ }
+
+ if (ntxq > 2) {
+ ret = -t3_sge_init_ecntxt(sc, q->txq[TXQ_CTRL].cntxt_id, 0,
+ SGE_CNTXT_CTRL, id,
+ q->txq[TXQ_CTRL].phys_addr,
+ q->txq[TXQ_CTRL].size,
+ q->txq[TXQ_CTRL].token, 1, 0);
+ if (ret) {
+ printf("error %d from t3_sge_init_ecntxt\n", ret);
+ goto err_unlock;
+ }
+ }
+
+ mtx_unlock(&sc->sge.reg_lock);
+ t3_update_qset_coalesce(q, p);
+ q->port = pi;
+
+ refill_fl(sc, &q->fl[0], q->fl[0].size);
+ refill_fl(sc, &q->fl[1], q->fl[1].size);
+ refill_rspq(sc, &q->rspq, q->rspq.size - 1);
+
+ t3_write_reg(sc, A_SG_GTS, V_RSPQ(q->rspq.cntxt_id) |
+ V_NEWTIMER(q->rspq.holdoff_tmr));
+
+ return (0);
+
+err_unlock:
+ mtx_unlock(&sc->sge.reg_lock);
+err:
+ t3_free_qset(sc, q);
+
+ return (ret);
+}
+
+
+/**
+ * free_qset - free the resources of an SGE queue set
+ * @sc: the controller owning the queue set
+ * @q: the queue set
+ *
+ * Release the HW and SW resources associated with an SGE queue set, such
+ * as HW contexts, packet buffers, and descriptor rings. Traffic to the
+ * queue set must be quiesced prior to calling this.
+ */
+static void
+t3_free_qset(adapter_t *sc, struct sge_qset *q)
+{
+ int i;
+
+ for (i = 0; i < SGE_RXQ_PER_SET; ++i) {
+ if (q->fl[i].desc) {
+ mtx_lock(&sc->sge.reg_lock);
+ t3_sge_disable_fl(sc, q->fl[i].cntxt_id);
+ mtx_unlock(&sc->sge.reg_lock);
+ bus_dmamap_unload(q->fl[i].desc_tag, q->fl[i].desc_map);
+ bus_dmamem_free(q->fl[i].desc_tag, q->fl[i].desc,
+ q->fl[i].desc_map);
+ bus_dma_tag_destroy(q->fl[i].desc_tag);
+ }
+ if (q->fl[i].sdesc) {
+ free_rx_bufs(sc, &q->fl[i]);
+ free(q->fl[i].sdesc, M_DEVBUF);
+ }
+ }
+
+ for (i = 0; i < SGE_TXQ_PER_SET; ++i) {
+ if (q->txq[i].desc) {
+ mtx_lock(&sc->sge.reg_lock);
+ t3_sge_enable_ecntxt(sc, q->txq[i].cntxt_id, 0);
+ mtx_unlock(&sc->sge.reg_lock);
+ bus_dmamap_unload(q->txq[i].desc_tag,
+ q->txq[i].desc_map);
+ bus_dmamem_free(q->txq[i].desc_tag, q->txq[i].desc,
+ q->txq[i].desc_map);
+ bus_dma_tag_destroy(q->txq[i].desc_tag);
+ }
+ if (q->txq[i].sdesc) {
+ free(q->txq[i].sdesc, M_DEVBUF);
+ }
+ if (mtx_initialized(&q->txq[i].lock)) {
+ mtx_destroy(&q->txq[i].lock);
+ }
+ }
+
+ if (q->rspq.desc) {
+ mtx_lock(&sc->sge.reg_lock);
+ t3_sge_disable_rspcntxt(sc, q->rspq.cntxt_id);
+ mtx_unlock(&sc->sge.reg_lock);
+
+ bus_dmamap_unload(q->rspq.desc_tag, q->rspq.desc_map);
+ bus_dmamem_free(q->rspq.desc_tag, q->rspq.desc,
+ q->rspq.desc_map);
+ bus_dma_tag_destroy(q->rspq.desc_tag);
+ }
+ if (mtx_initialized(&q->rspq.lock)) {
+ mtx_destroy(&q->rspq.lock);
+ }
+
+ bzero(q, sizeof(*q));
+}
+
+/**
+ * t3_free_sge_resources - free SGE resources
+ * @sc: the adapter softc
+ *
+ * Frees resources used by the SGE queue sets.
+ */
+void
+t3_free_sge_resources(adapter_t *sc)
+{
+ int i;
+
+ for (i = 0; i < SGE_QSETS; ++i)
+ t3_free_qset(sc, &sc->sge.qs[i]);
+}
+
+/**
+ * t3_sge_start - enable SGE
+ * @sc: the controller softc
+ *
+ * Enables the SGE for DMAs. This is the last step in starting packet
+ * transfers.
+ */
+void
+t3_sge_start(adapter_t *sc)
+{
+ t3_set_reg_field(sc, A_SG_CONTROL, F_GLOBALENABLE, F_GLOBALENABLE);
+}
+
+
+/**
+ * refill_rspq - replenish an SGE response queue
+ * @adapter: the adapter
+ * @q: the response queue to replenish
+ * @credits: how many new responses to make available
+ *
+ * Replenishes a response queue by making the supplied number of responses
+ * available to HW.
+ */
+static __inline void
+refill_rspq(adapter_t *sc, const struct sge_rspq *q, u_int credits)
+{
+
+ /* mbufs are allocated on demand when a rspq entry is processed. */
+ t3_write_reg(sc, A_SG_RSPQ_CREDIT_RETURN,
+ V_RSPQ(q->cntxt_id) | V_CREDITS(credits));
+}
+
+/**
+ * free_tx_desc - reclaims Tx descriptors and their buffers
+ * @adapter: the adapter
+ * @q: the Tx queue to reclaim descriptors from
+ * @n: the number of descriptors to reclaim
+ *
+ * Reclaims Tx descriptors from an SGE Tx queue and frees the associated
+ * Tx buffers. Called with the Tx queue lock held.
+ */
+int
+free_tx_desc(adapter_t *sc, struct sge_txq *q, int n, struct mbuf **m_vec)
+{
+ struct tx_sw_desc *d;
+ unsigned int cidx = q->cidx;
+ int nbufs = 0;
+
+#ifdef T3_TRACE
+ T3_TRACE2(sc->tb[q->cntxt_id & 7],
+ "reclaiming %u Tx descriptors at cidx %u", n, cidx);
+#endif
+ d = &q->sdesc[cidx];
+
+ while (n-- > 0) {
+ DPRINTF("cidx=%d d=%p\n", cidx, d);
+ if (d->m) {
+ if (d->flags & TX_SW_DESC_MAPPED) {
+ bus_dmamap_unload(sc->tx_dmat, d->map);
+ bus_dmamap_destroy(sc->tx_dmat, d->map);
+ d->flags &= ~TX_SW_DESC_MAPPED;
+ }
+ m_vec[nbufs] = d->m;
+ d->m = NULL;
+ nbufs++;
+ }
+ ++d;
+ if (++cidx == q->size) {
+ cidx = 0;
+ d = q->sdesc;
+ }
+ }
+ q->cidx = cidx;
+
+ return (nbufs);
+}
+
+/**
+ * is_new_response - check if a response is newly written
+ * @r: the response descriptor
+ * @q: the response queue
+ *
+ * Returns true if a response descriptor contains a yet unprocessed
+ * response.
+ */
+static __inline int
+is_new_response(const struct rsp_desc *r,
+ const struct sge_rspq *q)
+{
+ return (r->intr_gen & F_RSPD_GEN2) == q->gen;
+}
+
+#define RSPD_GTS_MASK (F_RSPD_TXQ0_GTS | F_RSPD_TXQ1_GTS)
+#define RSPD_CTRL_MASK (RSPD_GTS_MASK | \
+ V_RSPD_TXQ0_CR(M_RSPD_TXQ0_CR) | \
+ V_RSPD_TXQ1_CR(M_RSPD_TXQ1_CR) | \
+ V_RSPD_TXQ2_CR(M_RSPD_TXQ2_CR))
+
+/* How long to delay the next interrupt in case of memory shortage, in 0.1us. */
+#define NOMEM_INTR_DELAY 2500
+
+static __inline void
+deliver_partial_bundle(struct t3cdev *tdev,
+ struct sge_rspq *q)
+{
+ ;
+}
+
+static __inline void
+rx_offload(struct t3cdev *tdev, struct sge_rspq *rq,
+ struct mbuf *m)
+{
+#ifdef notyet
+ if (rq->polling) {
+ rq->offload_skbs[rq->offload_skbs_idx++] = skb;
+ if (rq->offload_skbs_idx == RX_BUNDLE_SIZE) {
+ cxgb_ofld_recv(tdev, rq->offload_skbs, RX_BUNDLE_SIZE);
+ rq->offload_skbs_idx = 0;
+ rq->offload_bundles++;
+ }
+ } else
+#endif
+ {
+ /* XXX */
+ panic("implement offload enqueue\n");
+ }
+
+}
+
+static void
+restart_tx(struct sge_qset *qs)
+{
+ ;
+}
+
+void
+t3_rx_eth(struct port_info *pi, struct sge_rspq *rq, struct mbuf *m, int ethpad)
+{
+ struct cpl_rx_pkt *cpl = (struct cpl_rx_pkt *)(m->m_data + ethpad);
+ struct ifnet *ifp = pi->ifp;
+
+ DPRINTF("rx_eth m=%p m->m_data=%p p->iff=%d\n", m, m->m_data, cpl->iff);
+ if (&pi->adapter->port[cpl->iff] != pi)
+ panic("bad port index %d m->m_data=%p\n", cpl->iff, m->m_data);
+
+
+ m_adj(m, sizeof(*cpl) + ethpad);
+
+
+ if ((ifp->if_capenable & IFCAP_RXCSUM) && !cpl->fragment &&
+ cpl->csum_valid && cpl->csum == 0xffff) {
+ m->m_pkthdr.csum_flags = (CSUM_IP_CHECKED|CSUM_IP_VALID);
+ rspq_to_qset(rq)->port_stats[SGE_PSTAT_RX_CSUM_GOOD]++;
+ m->m_pkthdr.csum_flags = (CSUM_IP_CHECKED|CSUM_IP_VALID|CSUM_DATA_VALID|CSUM_PSEUDO_HDR);
+ m->m_pkthdr.csum_data = 0xffff;
+ }
+ /*
+ * XXX need to add VLAN support for 6.x
+ */
+#ifdef VLAN_SUPPORTED
+ if (__predict_false(cpl->vlan_valid)) {
+ m->m_pkthdr.ether_vtag = ntohs(cpl->vlan);
+ m->m_flags |= M_VLANTAG;
+ }
+#endif
+ m->m_pkthdr.rcvif = ifp;
+
+ (*ifp->if_input)(ifp, m);
+}
+
+/**
+ * get_packet - return the next ingress packet buffer from a free list
+ * @adap: the adapter that received the packet
+ * @drop_thres: # of remaining buffers before we start dropping packets
+ * @qs: the qset that the SGE free list holding the packet belongs to
+ * @mh: the mbuf header, contains a pointer to the head and tail of the mbuf chain
+ * @r: response descriptor
+ *
+ * Get the next packet from a free list and complete setup of the
+ * sk_buff. If the packet is small we make a copy and recycle the
+ * original buffer, otherwise we use the original buffer itself. If a
+ * positive drop threshold is supplied packets are dropped and their
+ * buffers recycled if (a) the number of remaining buffers is under the
+ * threshold and the packet is too big to copy, or (b) the packet should
+ * be copied but there is no memory for the copy.
+ */
+static int
+get_packet(adapter_t *adap, unsigned int drop_thres, struct sge_qset *qs,
+ struct t3_mbuf_hdr *mh, struct rsp_desc *r)
+{
+
+ struct mbuf *m = NULL;
+ unsigned int len_cq = ntohl(r->len_cq);
+ struct sge_fl *fl = (len_cq & F_RSPD_FLQ) ? &qs->fl[1] : &qs->fl[0];
+ struct rx_sw_desc *sd = &fl->sdesc[fl->cidx];
+ uint32_t len = G_RSPD_LEN(len_cq);
+ uint32_t flags = ntohl(r->flags);
+ uint8_t sopeop = G_RSPD_SOP_EOP(flags);
+ int ret = 0;
+
+ prefetch(sd->m->m_data);
+
+ fl->credits--;
+ bus_dmamap_sync(adap->rx_jumbo_dmat, sd->map, BUS_DMASYNC_POSTREAD);
+#ifdef RECYCLE
+ if (len < SGE_RX_COPY_THRES) {
+ if (len > MHLEN) {
+ m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
+ } else
+ m = m_gethdr(M_DONTWAIT, MT_DATA);
+
+ if (__predict_true(m != NULL)) {
+ memcpy(m->m_data, (char *)sd->m->m_data, len);
+ DPRINTF("copied len=%d\n", len);
+ m->m_len = len;
+
+ } else if (!drop_thres)
+ goto use_orig_buf;
+ recycle:
+ recycle_rx_buf(adap, fl, fl->cidx);
+ goto done;
+ }
+
+ if (__predict_false((fl->credits < drop_thres) && !mh->mh_head))
+ goto recycle;
+
+ DPRINTF("using original buf\n");
+
+use_orig_buf:
+#endif
+ bus_dmamap_unload(adap->rx_jumbo_dmat, sd->map);
+ m = sd->m;
+ m->m_len = len;
+#ifdef RECYCLE
+done:
+#endif
+ switch(sopeop) {
+ case RSPQ_SOP_EOP:
+ DBG(DBG_RX, ("get_packet: SOP-EOP m %p\n", m));
+ mh->mh_head = mh->mh_tail = m;
+ m->m_pkthdr.len = len;
+ m->m_flags |= M_PKTHDR;
+ ret = 1;
+ break;
+ case RSPQ_NSOP_NEOP:
+ DBG(DBG_RX, ("get_packet: NO_SOP-NO_EOP m %p\n", m));
+ m->m_flags &= ~M_PKTHDR;
+ if (mh->mh_tail == NULL) {
+ if (cxgb_debug)
+ printf("discarding intermediate descriptor entry\n");
+ m_freem(m);
+ break;
+ }
+ mh->mh_tail->m_next = m;
+ mh->mh_tail = m;
+ mh->mh_head->m_pkthdr.len += len;
+ ret = 0;
+ break;
+ case RSPQ_SOP:
+ DBG(DBG_RX, ("get_packet: SOP m %p\n", m));
+ m->m_pkthdr.len = len;
+ mh->mh_head = mh->mh_tail = m;
+ m->m_flags |= M_PKTHDR;
+ ret = 0;
+ break;
+ case RSPQ_EOP:
+ DBG(DBG_RX, ("get_packet: EOP m %p\n", m));
+ m->m_flags &= ~M_PKTHDR;
+ mh->mh_head->m_pkthdr.len += len;
+ mh->mh_tail->m_next = m;
+ mh->mh_tail = m;
+ ret = 1;
+ break;
+ }
+ if (++fl->cidx == fl->size)
+ fl->cidx = 0;
+
+ return (ret);
+}
+
+
+/**
+ * handle_rsp_cntrl_info - handles control information in a response
+ * @qs: the queue set corresponding to the response
+ * @flags: the response control flags
+ *
+ * Handles the control information of an SGE response, such as GTS
+ * indications and completion credits for the queue set's Tx queues.
+ * HW coalesces credits, we don't do any extra SW coalescing.
+ */
+static __inline void
+handle_rsp_cntrl_info(struct sge_qset *qs, uint32_t flags)
+{
+ unsigned int credits;
+
+#if USE_GTS
+ if (flags & F_RSPD_TXQ0_GTS)
+ clear_bit(TXQ_RUNNING, &qs->txq[TXQ_ETH].flags);
+#endif
+
+ credits = G_RSPD_TXQ0_CR(flags);
+ if (credits)
+ qs->txq[TXQ_ETH].processed += credits;
+
+ credits = G_RSPD_TXQ2_CR(flags);
+ if (credits)
+ qs->txq[TXQ_CTRL].processed += credits;
+
+# if USE_GTS
+ if (flags & F_RSPD_TXQ1_GTS)
+ clear_bit(TXQ_RUNNING, &qs->txq[TXQ_OFLD].flags);
+# endif
+ credits = G_RSPD_TXQ1_CR(flags);
+ if (credits)
+ qs->txq[TXQ_OFLD].processed += credits;
+}
+
+static void
+check_ring_db(adapter_t *adap, struct sge_qset *qs,
+ unsigned int sleeping)
+{
+ ;
+}
+
+/*
+ * This is an awful hack to bind the ithread to CPU 1
+ * to work around lack of ithread affinity
+ */
+static void
+bind_ithread(void)
+{
+ if (mp_ncpus > 1) {
+ mtx_lock_spin(&sched_lock);
+ sched_bind(curthread, 1);
+ mtx_unlock_spin(&sched_lock);
+ }
+
+}
+
+/**
+ * process_responses - process responses from an SGE response queue
+ * @adap: the adapter
+ * @qs: the queue set to which the response queue belongs
+ * @budget: how many responses can be processed in this round
+ *
+ * Process responses from an SGE response queue up to the supplied budget.
+ * Responses include received packets as well as credits and other events
+ * for the queues that belong to the response queue's queue set.
+ * A negative budget is effectively unlimited.
+ *
+ * Additionally choose the interrupt holdoff time for the next interrupt
+ * on this queue. If the system is under memory shortage use a fairly
+ * long delay to help recovery.
+ */
+static int
+process_responses(adapter_t *adap, struct sge_qset *qs, int budget)
+{
+ struct sge_rspq *rspq = &qs->rspq;
+ struct rsp_desc *r = &rspq->desc[rspq->cidx];
+ int budget_left = budget;
+ unsigned int sleeping = 0;
+ int lro = qs->lro.enabled;
+
+ static int pinned = 0;
+
+#ifdef DEBUG
+ static int last_holdoff = 0;
+ if (rspq->holdoff_tmr != last_holdoff) {
+ printf("next_holdoff=%d\n", rspq->holdoff_tmr);
+ last_holdoff = rspq->holdoff_tmr;
+ }
+#endif
+ if (pinned == 0) {
+ bind_ithread();
+ pinned = 1;
+ }
+ rspq->next_holdoff = rspq->holdoff_tmr;
+
+ while (__predict_true(budget_left && is_new_response(r, rspq))) {
+ int eth, eop = 0, ethpad = 0;
+ uint32_t flags = ntohl(r->flags);
+ uint32_t rss_csum = *(const uint32_t *)r;
+ uint32_t rss_hash = r->rss_hdr.rss_hash_val;
+
+ eth = (r->rss_hdr.opcode == CPL_RX_PKT);
+
+ if (__predict_false(flags & F_RSPD_ASYNC_NOTIF)) {
+ /* XXX */
+ printf("async notification\n");
+
+ } else if (flags & F_RSPD_IMM_DATA_VALID) {
+ if (cxgb_debug)
+ printf("IMM DATA VALID\n");
+
+ if(get_imm_packet(adap, r, &rspq->mh) == 0) {
+ rspq->next_holdoff = NOMEM_INTR_DELAY;
+ budget_left--;
+ break;
+ } else {
+ eop = 1;
+ }
+
+ rspq->imm_data++;
+ } else if (r->len_cq) {
+ int drop_thresh = eth ? SGE_RX_DROP_THRES : 0;
+
+ ethpad = 2;
+ eop = get_packet(adap, drop_thresh, qs, &rspq->mh, r);
+ } else {
+ DPRINTF("pure response\n");
+ rspq->pure_rsps++;
+ }
+
+ if (flags & RSPD_CTRL_MASK) {
+ sleeping |= flags & RSPD_GTS_MASK;
+ handle_rsp_cntrl_info(qs, flags);
+ }
+
+ r++;
+ if (__predict_false(++rspq->cidx == rspq->size)) {
+ rspq->cidx = 0;
+ rspq->gen ^= 1;
+ r = rspq->desc;
+ }
+
+ prefetch(r);
+ if (++rspq->credits >= (rspq->size / 4)) {
+ refill_rspq(adap, rspq, rspq->credits);
+ rspq->credits = 0;
+ }
+
+ if (eop) {
+ prefetch(rspq->mh.mh_head->m_data);
+ prefetch(rspq->mh.mh_head->m_data + L1_CACHE_BYTES);
+
+ if (eth) {
+ t3_rx_eth_lro(adap, rspq, &rspq->mh, ethpad,
+ rss_hash, rss_csum, lro);
+
+ rspq->mh.mh_tail = rspq->mh.mh_head = NULL;
+ } else {
+#ifdef notyet
+ if (__predict_false(r->rss_hdr.opcode == CPL_TRACE_PKT))
+ m_adj(m, 2);
+
+ rx_offload(&adap->tdev, rspq, m);
+#endif
+ }
+#ifdef notyet
+ taskqueue_enqueue(adap->tq, &adap->timer_reclaim_task);
+#else
+ __refill_fl(adap, &qs->fl[0]);
+ __refill_fl(adap, &qs->fl[1]);
+#endif
+
+ }
+ --budget_left;
+ }
+ t3_sge_lro_flush_all(adap, qs);
+ deliver_partial_bundle(&adap->tdev, rspq);
+
+ if (sleeping)
+ check_ring_db(adap, qs, sleeping);
+
+ smp_mb(); /* commit Tx queue processed updates */
+ if (__predict_false(qs->txq_stopped != 0))
+ restart_tx(qs);
+
+ budget -= budget_left;
+ return (budget);
+}
+
+/*
+ * A helper function that processes responses and issues GTS.
+ */
+static __inline int
+process_responses_gts(adapter_t *adap, struct sge_rspq *rq)
+{
+ int work;
+ static int last_holdoff = 0;
+
+ work = process_responses(adap, rspq_to_qset(rq), -1);
+
+ if (cxgb_debug && (rq->next_holdoff != last_holdoff)) {
+ printf("next_holdoff=%d\n", rq->next_holdoff);
+ last_holdoff = rq->next_holdoff;
+ }
+
+ t3_write_reg(adap, A_SG_GTS, V_RSPQ(rq->cntxt_id) |
+ V_NEWTIMER(rq->next_holdoff) | V_NEWINDEX(rq->cidx));
+ return work;
+}
+
+
+/*
+ * Interrupt handler for legacy INTx interrupts for T3B-based cards.
+ * Handles data events from SGE response queues as well as error and other
+ * async events as they all use the same interrupt pin. We use one SGE
+ * response queue per port in this mode and protect all response queues with
+ * queue 0's lock.
+ */
+void
+t3b_intr(void *data)
+{
+ uint32_t map;
+ adapter_t *adap = data;
+ struct sge_rspq *q0 = &adap->sge.qs[0].rspq;
+ struct sge_rspq *q1 = &adap->sge.qs[1].rspq;
+
+
+ t3_write_reg(adap, A_PL_CLI, 0);
+ map = t3_read_reg(adap, A_SG_DATA_INTR);
+
+ if (!map)
+ return;
+
+ if (__predict_false(map & F_ERRINTR))
+ taskqueue_enqueue(adap->tq, &adap->slow_intr_task);
+
+ mtx_lock(&q0->lock);
+
+ if (__predict_true(map & 1))
+ process_responses_gts(adap, q0);
+
+ if (map & 2)
+ process_responses_gts(adap, q1);
+
+ mtx_unlock(&q0->lock);
+}
+
+/*
+ * The MSI interrupt handler. This needs to handle data events from SGE
+ * response queues as well as error and other async events as they all use
+ * the same MSI vector. We use one SGE response queue per port in this mode
+ * and protect all response queues with queue 0's lock.
+ */
+void
+t3_intr_msi(void *data)
+{
+ adapter_t *adap = data;
+ struct sge_rspq *q0 = &adap->sge.qs[0].rspq;
+ struct sge_rspq *q1 = &adap->sge.qs[1].rspq;
+ int new_packets = 0;
+
+ mtx_lock(&q0->lock);
+ if (process_responses_gts(adap, q0)) {
+ new_packets = 1;
+ }
+
+ if (adap->params.nports == 2 &&
+ process_responses_gts(adap, q1)) {
+ new_packets = 1;
+ }
+
+
+ mtx_unlock(&q0->lock);
+ if (new_packets == 0)
+ taskqueue_enqueue(adap->tq, &adap->slow_intr_task);
+}
+
+void
+t3_intr_msix(void *data)
+{
+ struct sge_qset *qs = data;
+ adapter_t *adap = qs->port->adapter;
+ struct sge_rspq *rspq = &qs->rspq;
+
+ if (cxgb_debug)
+ printf("got msi-x interrupt\n");
+ mtx_lock(&rspq->lock);
+ if (process_responses_gts(adap, rspq) == 0) {
+#ifdef notyet
+ rspq->unhandled_irqs++;
+#endif
+ }
+ mtx_unlock(&rspq->lock);
+}
+
+static int
+t3_lro_enable(SYSCTL_HANDLER_ARGS)
+{
+ adapter_t *sc;
+ int i, j, enabled, err, nqsets = 0;
+
+ sc = arg1;
+ enabled = sc->sge.qs[0].lro.enabled;
+ err = sysctl_handle_int(oidp, &enabled, arg2, req);
+
+ if (err != 0) {
+ return (err);
+ }
+ if (enabled == sc->sge.qs[0].lro.enabled)
+ return (0);
+
+ for (i = 0; i < sc->params.nports; i++)
+ for (j = 0; j < sc->port[i].nqsets; j++)
+ nqsets++;
+
+ for (i = 0; i < nqsets; i++) {
+ sc->sge.qs[i].lro.enabled = enabled;
+ }
+
+ return (0);
+}
+
+static int
+t3_set_coalesce_nsecs(SYSCTL_HANDLER_ARGS)
+{
+ adapter_t *sc = arg1;
+ struct qset_params *qsp = &sc->params.sge.qset[0];
+ int coalesce_nsecs;
+ struct sge_qset *qs;
+ int i, j, err, nqsets = 0;
+ struct mtx *lock;
+
+ coalesce_nsecs = qsp->coalesce_nsecs;
+ err = sysctl_handle_int(oidp, &coalesce_nsecs, arg2, req);
+
+ if (err != 0) {
+ return (err);
+ }
+ if (coalesce_nsecs == qsp->coalesce_nsecs)
+ return (0);
+
+ for (i = 0; i < sc->params.nports; i++)
+ for (j = 0; j < sc->port[i].nqsets; j++)
+ nqsets++;
+
+ coalesce_nsecs = max(100, coalesce_nsecs);
+
+ for (i = 0; i < nqsets; i++) {
+ qs = &sc->sge.qs[i];
+ qsp = &sc->params.sge.qset[i];
+ qsp->coalesce_nsecs = coalesce_nsecs;
+
+ lock = (sc->flags & USING_MSIX) ? &qs->rspq.lock :
+ &sc->sge.qs[0].rspq.lock;
+
+ mtx_lock(lock);
+ t3_update_qset_coalesce(qs, qsp);
+ t3_write_reg(sc, A_SG_GTS, V_RSPQ(qs->rspq.cntxt_id) |
+ V_NEWTIMER(qs->rspq.holdoff_tmr));
+ mtx_unlock(lock);
+ }
+
+ return (0);
+}
+
+
+void
+t3_add_sysctls(adapter_t *sc)
+{
+ struct sysctl_ctx_list *ctx;
+ struct sysctl_oid_list *children;
+
+ ctx = device_get_sysctl_ctx(sc->dev);
+ children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev));
+
+ /* random information */
+ SYSCTL_ADD_STRING(ctx, children, OID_AUTO,
+ "firmware_version",
+ CTLFLAG_RD, &sc->fw_version,
+ 0, "firmware version");
+
+ SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
+ "enable_lro",
+ CTLTYPE_INT|CTLFLAG_RW, sc,
+ 0, t3_lro_enable,
+ "I", "enable large receive offload");
+
+ SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
+ "intr_coal",
+ CTLTYPE_INT|CTLFLAG_RW, sc,
+ 0, t3_set_coalesce_nsecs,
+ "I", "interrupt coalescing timer (ns)");
+ SYSCTL_ADD_INT(ctx, children, OID_AUTO,
+ "enable_debug",
+ CTLFLAG_RW, &cxgb_debug,
+ 0, "enable verbose debugging output");
+
+}
+
+/**
+ * t3_get_desc - dump an SGE descriptor for debugging purposes
+ * @qs: the queue set
+ * @qnum: identifies the specific queue (0..2: Tx, 3:response, 4..5: Rx)
+ * @idx: the descriptor index in the queue
+ * @data: where to dump the descriptor contents
+ *
+ * Dumps the contents of a HW descriptor of an SGE queue. Returns the
+ * size of the descriptor.
+ */
+int
+t3_get_desc(const struct sge_qset *qs, unsigned int qnum, unsigned int idx,
+ unsigned char *data)
+{
+ if (qnum >= 6)
+ return (EINVAL);
+
+ if (qnum < 3) {
+ if (!qs->txq[qnum].desc || idx >= qs->txq[qnum].size)
+ return -EINVAL;
+ memcpy(data, &qs->txq[qnum].desc[idx], sizeof(struct tx_desc));
+ return sizeof(struct tx_desc);
+ }
+
+ if (qnum == 3) {
+ if (!qs->rspq.desc || idx >= qs->rspq.size)
+ return (EINVAL);
+ memcpy(data, &qs->rspq.desc[idx], sizeof(struct rsp_desc));
+ return sizeof(struct rsp_desc);
+ }
+
+ qnum -= 4;
+ if (!qs->fl[qnum].desc || idx >= qs->fl[qnum].size)
+ return (EINVAL);
+ memcpy(data, &qs->fl[qnum].desc[idx], sizeof(struct rx_desc));
+ return sizeof(struct rx_desc);
+}
OpenPOWER on IntegriCloud