summaryrefslogtreecommitdiffstats
path: root/sys
diff options
context:
space:
mode:
authorRenato Botelho <renato@netgate.com>2015-12-14 11:12:41 -0200
committerRenato Botelho <renato@netgate.com>2015-12-14 11:12:41 -0200
commit6edad5be89050ac3cfa5512ddceca99affe9ad8a (patch)
tree2c88bc0aac442001b8a020f1b41cfce5c79b4959 /sys
parent1b7c02947f339bceaede5ea0a8aa91cb00b44eb3 (diff)
parent98e6718d915fa5a645555a7bf1f41f75c5b53107 (diff)
downloadFreeBSD-src-6edad5be89050ac3cfa5512ddceca99affe9ad8a.zip
FreeBSD-src-6edad5be89050ac3cfa5512ddceca99affe9ad8a.tar.gz
Merge remote-tracking branch 'origin/stable/10' into devel
Diffstat (limited to 'sys')
-rw-r--r--sys/amd64/amd64/machdep.c7
-rw-r--r--[-rwxr-xr-x]sys/dev/ixl/i40e_adminq.c67
-rw-r--r--[-rwxr-xr-x]sys/dev/ixl/i40e_adminq.h3
-rw-r--r--[-rwxr-xr-x]sys/dev/ixl/i40e_adminq_cmd.h125
-rw-r--r--[-rwxr-xr-x]sys/dev/ixl/i40e_alloc.h0
-rw-r--r--[-rwxr-xr-x]sys/dev/ixl/i40e_common.c649
-rw-r--r--sys/dev/ixl/i40e_devids.h68
-rw-r--r--[-rwxr-xr-x]sys/dev/ixl/i40e_hmc.c69
-rw-r--r--[-rwxr-xr-x]sys/dev/ixl/i40e_hmc.h12
-rw-r--r--[-rwxr-xr-x]sys/dev/ixl/i40e_lan_hmc.c54
-rw-r--r--[-rwxr-xr-x]sys/dev/ixl/i40e_lan_hmc.h0
-rw-r--r--[-rwxr-xr-x]sys/dev/ixl/i40e_nvm.c53
-rw-r--r--[-rwxr-xr-x]sys/dev/ixl/i40e_osdep.c0
-rw-r--r--[-rwxr-xr-x]sys/dev/ixl/i40e_osdep.h10
-rw-r--r--[-rwxr-xr-x]sys/dev/ixl/i40e_prototype.h34
-rw-r--r--[-rwxr-xr-x]sys/dev/ixl/i40e_register.h1941
-rw-r--r--[-rwxr-xr-x]sys/dev/ixl/i40e_status.h0
-rw-r--r--[-rwxr-xr-x]sys/dev/ixl/i40e_type.h206
-rw-r--r--[-rwxr-xr-x]sys/dev/ixl/i40e_virtchnl.h21
-rw-r--r--[-rwxr-xr-x]sys/dev/ixl/if_ixl.c2590
-rw-r--r--sys/dev/ixl/if_ixlv.c264
-rw-r--r--sys/dev/ixl/ixl.h68
-rw-r--r--sys/dev/ixl/ixl_pf.h49
-rw-r--r--[-rwxr-xr-x]sys/dev/ixl/ixl_txrx.c182
-rw-r--r--sys/dev/ixl/ixlv.h6
-rw-r--r--sys/dev/ixl/ixlvc.c44
-rw-r--r--sys/dev/mlx5/device.h35
-rw-r--r--sys/dev/mlx5/driver.h2
-rw-r--r--sys/dev/mlx5/eswitch_vacl.h46
-rw-r--r--sys/dev/mlx5/mlx5_core/mlx5_eq.c28
-rw-r--r--sys/dev/mlx5/mlx5_core/mlx5_eswitch_vacl.c804
-rw-r--r--sys/dev/mlx5/mlx5_core/mlx5_vport.c128
-rw-r--r--sys/dev/mlx5/mlx5_en/en.h11
-rw-r--r--sys/dev/mlx5/mlx5_en/mlx5_en_ethtool.c6
-rw-r--r--sys/dev/mlx5/mlx5_en/mlx5_en_main.c107
-rw-r--r--sys/dev/mlx5/mlx5_en/mlx5_en_rx.c39
-rw-r--r--sys/dev/mlx5/mlx5_en/mlx5_en_tx.c10
-rw-r--r--sys/dev/mlx5/vport.h7
-rw-r--r--sys/dev/netmap/if_ixl_netmap.h422
-rw-r--r--sys/dev/sfxge/sfxge.c31
-rw-r--r--sys/dev/sfxge/sfxge.h5
-rw-r--r--sys/dev/sfxge/sfxge_port.c35
-rw-r--r--sys/dev/sfxge/sfxge_tx.c29
-rw-r--r--sys/dev/sfxge/sfxge_tx.h1
-rw-r--r--sys/dev/sfxge/sfxge_version.h2
-rw-r--r--sys/dev/usb/net/if_smsc.c2
-rw-r--r--sys/dev/usb/usbdevs68
-rw-r--r--sys/dev/usb/wlan/if_run.c1
-rwxr-xr-xsys/modules/ixl/Makefile2
-rwxr-xr-xsys/modules/ixlv/Makefile2
-rw-r--r--sys/modules/mlx5/Makefile1
-rw-r--r--sys/modules/mlxen/Makefile2
-rw-r--r--sys/ofed/drivers/infiniband/hw/mthca/mthca_reset.c8
-rw-r--r--sys/ofed/drivers/net/mlx4/en_ethtool.c1616
-rw-r--r--sys/ofed/drivers/net/mlx4/en_main.c11
-rw-r--r--sys/ofed/drivers/net/mlx4/en_netdev.c274
-rw-r--r--sys/ofed/drivers/net/mlx4/en_port.c1
-rw-r--r--sys/ofed/drivers/net/mlx4/en_rx.c420
-rw-r--r--sys/ofed/drivers/net/mlx4/en_selftest.c178
-rw-r--r--sys/ofed/drivers/net/mlx4/en_tx.c852
-rw-r--r--sys/ofed/drivers/net/mlx4/main.c88
-rw-r--r--sys/ofed/drivers/net/mlx4/mlx4.h10
-rw-r--r--sys/ofed/drivers/net/mlx4/mlx4_en.h83
-rw-r--r--sys/ofed/drivers/net/mlx4/mlx4_stats.h1
-rw-r--r--sys/ofed/drivers/net/mlx4/port.c7
-rw-r--r--sys/ofed/include/linux/compat.h4
-rw-r--r--sys/ofed/include/linux/etherdevice.h19
-rw-r--r--sys/ofed/include/linux/interrupt.h19
-rw-r--r--sys/ofed/include/linux/mlx4/cq.h2
-rw-r--r--sys/ofed/include/linux/mlx4/device.h7
-rw-r--r--sys/ofed/include/linux/mlx4/doorbell.h2
-rw-r--r--sys/ofed/include/linux/mlx4/qp.h8
-rw-r--r--sys/vm/vm_pageout.c11
73 files changed, 8655 insertions, 3314 deletions
diff --git a/sys/amd64/amd64/machdep.c b/sys/amd64/amd64/machdep.c
index f501e99..d20e3121 100644
--- a/sys/amd64/amd64/machdep.c
+++ b/sys/amd64/amd64/machdep.c
@@ -1158,12 +1158,7 @@ struct soft_segment_descriptor gdt_segs[] = {
};
void
-setidt(idx, func, typ, dpl, ist)
- int idx;
- inthand_t *func;
- int typ;
- int dpl;
- int ist;
+setidt(int idx, inthand_t *func, int typ, int dpl, int ist)
{
struct gate_descriptor *ip;
diff --git a/sys/dev/ixl/i40e_adminq.c b/sys/dev/ixl/i40e_adminq.c
index 2dc0807..5eacdc4 100755..100644
--- a/sys/dev/ixl/i40e_adminq.c
+++ b/sys/dev/ixl/i40e_adminq.c
@@ -1,6 +1,6 @@
/******************************************************************************
- Copyright (c) 2013-2014, Intel Corporation
+ Copyright (c) 2013-2015, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -315,8 +315,12 @@ static enum i40e_status_code i40e_config_asq_regs(struct i40e_hw *hw)
wr32(hw, hw->aq.asq.tail, 0);
/* set starting point */
- wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries |
- I40E_PF_ATQLEN_ATQENABLE_MASK));
+ if (!i40e_is_vf(hw))
+ wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries |
+ I40E_PF_ATQLEN_ATQENABLE_MASK));
+ if (i40e_is_vf(hw))
+ wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries |
+ I40E_VF_ATQLEN1_ATQENABLE_MASK));
wr32(hw, hw->aq.asq.bal, I40E_LO_DWORD(hw->aq.asq.desc_buf.pa));
wr32(hw, hw->aq.asq.bah, I40E_HI_DWORD(hw->aq.asq.desc_buf.pa));
@@ -344,8 +348,12 @@ static enum i40e_status_code i40e_config_arq_regs(struct i40e_hw *hw)
wr32(hw, hw->aq.arq.tail, 0);
/* set starting point */
- wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries |
- I40E_PF_ARQLEN_ARQENABLE_MASK));
+ if (!i40e_is_vf(hw))
+ wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries |
+ I40E_PF_ARQLEN_ARQENABLE_MASK));
+ if (i40e_is_vf(hw))
+ wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries |
+ I40E_VF_ARQLEN1_ARQENABLE_MASK));
wr32(hw, hw->aq.arq.bal, I40E_LO_DWORD(hw->aq.arq.desc_buf.pa));
wr32(hw, hw->aq.arq.bah, I40E_HI_DWORD(hw->aq.arq.desc_buf.pa));
@@ -559,6 +567,7 @@ enum i40e_status_code i40e_init_adminq(struct i40e_hw *hw)
{
enum i40e_status_code ret_code;
u16 eetrack_lo, eetrack_hi;
+ u16 cfg_ptr, oem_hi, oem_lo;
int retry = 0;
/* verify input for valid configuration */
if ((hw->aq.num_arq_entries == 0) ||
@@ -614,10 +623,17 @@ enum i40e_status_code i40e_init_adminq(struct i40e_hw *hw)
goto init_adminq_free_arq;
/* get the NVM version info */
- i40e_read_nvm_word(hw, I40E_SR_NVM_IMAGE_VERSION, &hw->nvm.version);
+ i40e_read_nvm_word(hw, I40E_SR_NVM_DEV_STARTER_VERSION,
+ &hw->nvm.version);
i40e_read_nvm_word(hw, I40E_SR_NVM_EETRACK_LO, &eetrack_lo);
i40e_read_nvm_word(hw, I40E_SR_NVM_EETRACK_HI, &eetrack_hi);
hw->nvm.eetrack = (eetrack_hi << 16) | eetrack_lo;
+ i40e_read_nvm_word(hw, I40E_SR_BOOT_CONFIG_PTR, &cfg_ptr);
+ i40e_read_nvm_word(hw, (cfg_ptr + I40E_NVM_OEM_VER_OFF),
+ &oem_hi);
+ i40e_read_nvm_word(hw, (cfg_ptr + (I40E_NVM_OEM_VER_OFF + 1)),
+ &oem_lo);
+ hw->nvm.oem_ver = ((u32)oem_hi << 16) | oem_lo;
if (hw->aq.api_maj_ver > I40E_FW_API_VERSION_MAJOR) {
ret_code = I40E_ERR_FIRMWARE_API_VERSION;
@@ -668,6 +684,9 @@ enum i40e_status_code i40e_shutdown_adminq(struct i40e_hw *hw)
i40e_destroy_spinlock(&hw->aq.asq_spinlock);
i40e_destroy_spinlock(&hw->aq.arq_spinlock);
+ if (hw->nvm_buff.va)
+ i40e_free_virt_mem(hw, &hw->nvm_buff);
+
return ret_code;
}
@@ -687,16 +706,16 @@ u16 i40e_clean_asq(struct i40e_hw *hw)
desc = I40E_ADMINQ_DESC(*asq, ntc);
details = I40E_ADMINQ_DETAILS(*asq, ntc);
+
while (rd32(hw, hw->aq.asq.head) != ntc) {
i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
- "%s: ntc %d head %d.\n", __FUNCTION__, ntc,
- rd32(hw, hw->aq.asq.head));
+ "ntc %d head %d.\n", ntc, rd32(hw, hw->aq.asq.head));
if (details->callback) {
I40E_ADMINQ_CALLBACK cb_func =
(I40E_ADMINQ_CALLBACK)details->callback;
- i40e_memcpy(&desc_cb, desc,
- sizeof(struct i40e_aq_desc), I40E_DMA_TO_DMA);
+ i40e_memcpy(&desc_cb, desc, sizeof(struct i40e_aq_desc),
+ I40E_DMA_TO_DMA);
cb_func(hw, &desc_cb);
}
i40e_memset(desc, 0, sizeof(*desc), I40E_DMA_MEM);
@@ -754,6 +773,8 @@ enum i40e_status_code i40e_asq_send_command(struct i40e_hw *hw,
u16 retval = 0;
u32 val = 0;
+ hw->aq.asq_last_status = I40E_AQ_RC_OK;
+
val = rd32(hw, hw->aq.asq.head);
if (val >= hw->aq.num_asq_entries) {
i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
@@ -911,6 +932,11 @@ enum i40e_status_code i40e_asq_send_command(struct i40e_hw *hw,
"AQTX: desc and buffer writeback:\n");
i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, buff, buff_size);
+ /* save writeback aq if requested */
+ if (details->wb_desc)
+ i40e_memcpy(details->wb_desc, desc_on_ring,
+ sizeof(struct i40e_aq_desc), I40E_DMA_TO_NONDMA);
+
/* update the error if time out occurred */
if ((!cmd_completed) &&
(!details->async && !details->postpone)) {
@@ -970,7 +996,10 @@ enum i40e_status_code i40e_clean_arq_element(struct i40e_hw *hw,
i40e_acquire_spinlock(&hw->aq.arq_spinlock);
/* set next_to_use to head */
- ntu = (rd32(hw, hw->aq.arq.head) & I40E_PF_ARQH_ARQH_MASK);
+ if (!i40e_is_vf(hw))
+ ntu = (rd32(hw, hw->aq.arq.head) & I40E_PF_ARQH_ARQH_MASK);
+ if (i40e_is_vf(hw))
+ ntu = (rd32(hw, hw->aq.arq.head) & I40E_VF_ARQH1_ARQH_MASK);
if (ntu == ntc) {
/* nothing to do - shouldn't need to update ring's values */
ret_code = I40E_ERR_ADMIN_QUEUE_NO_WORK;
@@ -1039,6 +1068,19 @@ clean_arq_element_out:
i40e_release_nvm(hw);
hw->aq.nvm_release_on_done = FALSE;
}
+
+ switch (hw->nvmupd_state) {
+ case I40E_NVMUPD_STATE_INIT_WAIT:
+ hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
+ break;
+
+ case I40E_NVMUPD_STATE_WRITE_WAIT:
+ hw->nvmupd_state = I40E_NVMUPD_STATE_WRITING;
+ break;
+
+ default:
+ break;
+ }
}
return ret_code;
@@ -1050,9 +1092,6 @@ void i40e_resume_aq(struct i40e_hw *hw)
hw->aq.asq.next_to_use = 0;
hw->aq.asq.next_to_clean = 0;
-#if (I40E_VF_ATQLEN_ATQENABLE_MASK != I40E_PF_ATQLEN_ATQENABLE_MASK)
-#error I40E_VF_ATQLEN_ATQENABLE_MASK != I40E_PF_ATQLEN_ATQENABLE_MASK
-#endif
i40e_config_asq_regs(hw);
hw->aq.arq.next_to_use = 0;
diff --git a/sys/dev/ixl/i40e_adminq.h b/sys/dev/ixl/i40e_adminq.h
index 9db4c9d..76dab8e 100755..100644
--- a/sys/dev/ixl/i40e_adminq.h
+++ b/sys/dev/ixl/i40e_adminq.h
@@ -1,6 +1,6 @@
/******************************************************************************
- Copyright (c) 2013-2014, Intel Corporation
+ Copyright (c) 2013-2015, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -77,6 +77,7 @@ struct i40e_asq_cmd_details {
u16 flags_dis;
bool async;
bool postpone;
+ struct i40e_aq_desc *wb_desc;
};
#define I40E_ADMINQ_DETAILS(R, i) \
diff --git a/sys/dev/ixl/i40e_adminq_cmd.h b/sys/dev/ixl/i40e_adminq_cmd.h
index 02a3ea2..f1f267e 100755..100644
--- a/sys/dev/ixl/i40e_adminq_cmd.h
+++ b/sys/dev/ixl/i40e_adminq_cmd.h
@@ -1,6 +1,6 @@
/******************************************************************************
- Copyright (c) 2013-2014, Intel Corporation
+ Copyright (c) 2013-2015, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -42,7 +42,11 @@
*/
#define I40E_FW_API_VERSION_MAJOR 0x0001
-#define I40E_FW_API_VERSION_MINOR 0x0002
+#ifdef X722_SUPPORT
+#define I40E_FW_API_VERSION_MINOR 0x0003
+#else
+#define I40E_FW_API_VERSION_MINOR 0x0004
+#endif
struct i40e_aq_desc {
__le16 flags;
@@ -140,12 +144,7 @@ enum i40e_admin_queue_opc {
i40e_aqc_opc_list_func_capabilities = 0x000A,
i40e_aqc_opc_list_dev_capabilities = 0x000B,
- i40e_aqc_opc_set_cppm_configuration = 0x0103,
- i40e_aqc_opc_set_arp_proxy_entry = 0x0104,
- i40e_aqc_opc_set_ns_proxy_entry = 0x0105,
-
/* LAA */
- i40e_aqc_opc_mng_laa = 0x0106, /* AQ obsolete */
i40e_aqc_opc_mac_address_read = 0x0107,
i40e_aqc_opc_mac_address_write = 0x0108,
@@ -240,6 +239,7 @@ enum i40e_admin_queue_opc {
i40e_aqc_opc_nvm_update = 0x0703,
i40e_aqc_opc_nvm_config_read = 0x0704,
i40e_aqc_opc_nvm_config_write = 0x0705,
+ i40e_aqc_opc_oem_post_update = 0x0720,
/* virtualization commands */
i40e_aqc_opc_send_msg_to_pf = 0x0801,
@@ -270,7 +270,12 @@ enum i40e_admin_queue_opc {
/* Tunnel commands */
i40e_aqc_opc_add_udp_tunnel = 0x0B00,
i40e_aqc_opc_del_udp_tunnel = 0x0B01,
- i40e_aqc_opc_tunnel_key_structure = 0x0B10,
+#ifdef X722_SUPPORT
+ i40e_aqc_opc_set_rss_key = 0x0B02,
+ i40e_aqc_opc_set_rss_lut = 0x0B03,
+ i40e_aqc_opc_get_rss_key = 0x0B04,
+ i40e_aqc_opc_get_rss_lut = 0x0B05,
+#endif
/* Async Events */
i40e_aqc_opc_event_lan_overflow = 0x1001,
@@ -282,8 +287,6 @@ enum i40e_admin_queue_opc {
i40e_aqc_opc_oem_ocbb_initialize = 0xFE03,
/* debug commands */
- i40e_aqc_opc_debug_get_deviceid = 0xFF00,
- i40e_aqc_opc_debug_set_mode = 0xFF01,
i40e_aqc_opc_debug_read_reg = 0xFF03,
i40e_aqc_opc_debug_write_reg = 0xFF04,
i40e_aqc_opc_debug_modify_reg = 0xFF07,
@@ -517,7 +520,8 @@ struct i40e_aqc_mac_address_read {
#define I40E_AQC_SAN_ADDR_VALID 0x20
#define I40E_AQC_PORT_ADDR_VALID 0x40
#define I40E_AQC_WOL_ADDR_VALID 0x80
-#define I40E_AQC_ADDR_VALID_MASK 0xf0
+#define I40E_AQC_MC_MAG_EN_VALID 0x100
+#define I40E_AQC_ADDR_VALID_MASK 0x1F0
u8 reserved[6];
__le32 addr_high;
__le32 addr_low;
@@ -540,7 +544,9 @@ struct i40e_aqc_mac_address_write {
#define I40E_AQC_WRITE_TYPE_LAA_ONLY 0x0000
#define I40E_AQC_WRITE_TYPE_LAA_WOL 0x4000
#define I40E_AQC_WRITE_TYPE_PORT 0x8000
-#define I40E_AQC_WRITE_TYPE_MASK 0xc000
+#define I40E_AQC_WRITE_TYPE_UPDATE_MC_MAG 0xC000
+#define I40E_AQC_WRITE_TYPE_MASK 0xC000
+
__le16 mac_sah;
__le32 mac_sal;
u8 reserved[8];
@@ -834,8 +840,16 @@ struct i40e_aqc_vsi_properties_data {
I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT)
/* queueing option section */
u8 queueing_opt_flags;
+#ifdef X722_SUPPORT
+#define I40E_AQ_VSI_QUE_OPT_MULTICAST_UDP_ENA 0x04
+#define I40E_AQ_VSI_QUE_OPT_UNICAST_UDP_ENA 0x08
+#endif
#define I40E_AQ_VSI_QUE_OPT_TCP_ENA 0x10
#define I40E_AQ_VSI_QUE_OPT_FCOE_ENA 0x20
+#ifdef X722_SUPPORT
+#define I40E_AQ_VSI_QUE_OPT_RSS_LUT_PF 0x00
+#define I40E_AQ_VSI_QUE_OPT_RSS_LUT_VSI 0x40
+#endif
u8 queueing_opt_reserved[3];
/* scheduler section */
u8 up_enable_bits;
@@ -1076,6 +1090,7 @@ struct i40e_aqc_set_vsi_promiscuous_modes {
__le16 seid;
#define I40E_AQC_VSI_PROM_CMD_SEID_MASK 0x3FF
__le16 vlan_tag;
+#define I40E_AQC_SET_VSI_VLAN_MASK 0x0FFF
#define I40E_AQC_SET_VSI_VLAN_VALID 0x8000
u8 reserved[8];
};
@@ -1725,11 +1740,13 @@ struct i40e_aqc_get_link_status {
u8 phy_type; /* i40e_aq_phy_type */
u8 link_speed; /* i40e_aq_link_speed */
u8 link_info;
-#define I40E_AQ_LINK_UP 0x01
+#define I40E_AQ_LINK_UP 0x01 /* obsolete */
+#define I40E_AQ_LINK_UP_FUNCTION 0x01
#define I40E_AQ_LINK_FAULT 0x02
#define I40E_AQ_LINK_FAULT_TX 0x04
#define I40E_AQ_LINK_FAULT_RX 0x08
#define I40E_AQ_LINK_FAULT_REMOTE 0x10
+#define I40E_AQ_LINK_UP_PORT 0x20
#define I40E_AQ_MEDIA_AVAILABLE 0x40
#define I40E_AQ_SIGNAL_DETECT 0x80
u8 an_info;
@@ -1891,6 +1908,26 @@ struct i40e_aqc_nvm_config_data_immediate_field {
I40E_CHECK_STRUCT_LEN(0xc, i40e_aqc_nvm_config_data_immediate_field);
+/* OEM Post Update (indirect 0x0720)
+ * no command data struct used
+ */
+ struct i40e_aqc_nvm_oem_post_update {
+#define I40E_AQ_NVM_OEM_POST_UPDATE_EXTERNAL_DATA 0x01
+ u8 sel_data;
+ u8 reserved[7];
+};
+
+I40E_CHECK_STRUCT_LEN(0x8, i40e_aqc_nvm_oem_post_update);
+
+struct i40e_aqc_nvm_oem_post_update_buffer {
+ u8 str_len;
+ u8 dev_addr;
+ __le16 eeprom_addr;
+ u8 data[36];
+};
+
+I40E_CHECK_STRUCT_LEN(0x28, i40e_aqc_nvm_oem_post_update_buffer);
+
/* Send to PF command (indirect 0x0801) id is only used by PF
* Send to VF command (indirect 0x0802) id is only used by PF
* Send to Peer PF command (indirect 0x0803)
@@ -2064,12 +2101,28 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_start);
#define I40E_AQC_CEE_APP_ISCSI_MASK (0x7 << I40E_AQC_CEE_APP_ISCSI_SHIFT)
#define I40E_AQC_CEE_APP_FIP_SHIFT 0x8
#define I40E_AQC_CEE_APP_FIP_MASK (0x7 << I40E_AQC_CEE_APP_FIP_SHIFT)
+
#define I40E_AQC_CEE_PG_STATUS_SHIFT 0x0
#define I40E_AQC_CEE_PG_STATUS_MASK (0x7 << I40E_AQC_CEE_PG_STATUS_SHIFT)
#define I40E_AQC_CEE_PFC_STATUS_SHIFT 0x3
#define I40E_AQC_CEE_PFC_STATUS_MASK (0x7 << I40E_AQC_CEE_PFC_STATUS_SHIFT)
#define I40E_AQC_CEE_APP_STATUS_SHIFT 0x8
#define I40E_AQC_CEE_APP_STATUS_MASK (0x7 << I40E_AQC_CEE_APP_STATUS_SHIFT)
+#define I40E_AQC_CEE_FCOE_STATUS_SHIFT 0x8
+#define I40E_AQC_CEE_FCOE_STATUS_MASK (0x7 << I40E_AQC_CEE_FCOE_STATUS_SHIFT)
+#define I40E_AQC_CEE_ISCSI_STATUS_SHIFT 0xB
+#define I40E_AQC_CEE_ISCSI_STATUS_MASK (0x7 << I40E_AQC_CEE_ISCSI_STATUS_SHIFT)
+#define I40E_AQC_CEE_FIP_STATUS_SHIFT 0x10
+#define I40E_AQC_CEE_FIP_STATUS_MASK (0x7 << I40E_AQC_CEE_FIP_STATUS_SHIFT)
+
+/* struct i40e_aqc_get_cee_dcb_cfg_v1_resp was originally defined with
+ * word boundary layout issues, which the Linux compilers silently deal
+ * with by adding padding, making the actual struct larger than designed.
+ * However, the FW compiler for the NIC is less lenient and complains
+ * about the struct. Hence, the struct defined here has an extra byte in
+ * fields reserved3 and reserved4 to directly acknowledge that padding,
+ * and the new length is used in the length check macro.
+ */
struct i40e_aqc_get_cee_dcb_cfg_v1_resp {
u8 reserved1;
u8 oper_num_tc;
@@ -2077,9 +2130,9 @@ struct i40e_aqc_get_cee_dcb_cfg_v1_resp {
u8 reserved2;
u8 oper_tc_bw[8];
u8 oper_pfc_en;
- u8 reserved3;
+ u8 reserved3[2];
__le16 oper_app_prio;
- u8 reserved4;
+ u8 reserved4[2];
__le16 tlv_status;
};
@@ -2168,6 +2221,48 @@ struct i40e_aqc_del_udp_tunnel_completion {
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_del_udp_tunnel_completion);
+#ifdef X722_SUPPORT
+
+struct i40e_aqc_get_set_rss_key {
+#define I40E_AQC_SET_RSS_KEY_VSI_VALID (0x1 << 15)
+#define I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT 0
+#define I40E_AQC_SET_RSS_KEY_VSI_ID_MASK (0x3FF << \
+ I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT)
+ __le16 vsi_id;
+ u8 reserved[6];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_get_set_rss_key);
+
+struct i40e_aqc_get_set_rss_key_data {
+ u8 standard_rss_key[0x28];
+ u8 extended_hash_key[0xc];
+};
+
+I40E_CHECK_STRUCT_LEN(0x34, i40e_aqc_get_set_rss_key_data);
+
+struct i40e_aqc_get_set_rss_lut {
+#define I40E_AQC_SET_RSS_LUT_VSI_VALID (0x1 << 15)
+#define I40E_AQC_SET_RSS_LUT_VSI_ID_SHIFT 0
+#define I40E_AQC_SET_RSS_LUT_VSI_ID_MASK (0x3FF << \
+ I40E_AQC_SET_RSS_LUT_VSI_ID_SHIFT)
+ __le16 vsi_id;
+#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT 0
+#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK (0x1 << \
+ I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT)
+
+#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_VSI 0
+#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_PF 1
+ __le16 flags;
+ u8 reserved[4];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_get_set_rss_lut);
+#endif
/* tunnel key structure 0x0B10 */
diff --git a/sys/dev/ixl/i40e_alloc.h b/sys/dev/ixl/i40e_alloc.h
index dc6fadd..dc6fadd 100755..100644
--- a/sys/dev/ixl/i40e_alloc.h
+++ b/sys/dev/ixl/i40e_alloc.h
diff --git a/sys/dev/ixl/i40e_common.c b/sys/dev/ixl/i40e_common.c
index dfb60aa..2a5f499 100755..100644
--- a/sys/dev/ixl/i40e_common.c
+++ b/sys/dev/ixl/i40e_common.c
@@ -1,6 +1,6 @@
/******************************************************************************
- Copyright (c) 2013-2014, Intel Corporation
+ Copyright (c) 2013-2015, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -37,6 +37,7 @@
#include "i40e_prototype.h"
#include "i40e_virtchnl.h"
+
/**
* i40e_set_mac_type - Sets MAC type
* @hw: pointer to the HW structure
@@ -61,8 +62,24 @@ enum i40e_status_code i40e_set_mac_type(struct i40e_hw *hw)
case I40E_DEV_ID_QSFP_B:
case I40E_DEV_ID_QSFP_C:
case I40E_DEV_ID_10G_BASE_T:
+ case I40E_DEV_ID_10G_BASE_T4:
+ case I40E_DEV_ID_20G_KR2:
+ case I40E_DEV_ID_20G_KR2_A:
hw->mac.type = I40E_MAC_XL710;
break;
+#ifdef X722_SUPPORT
+ case I40E_DEV_ID_SFP_X722:
+ case I40E_DEV_ID_1G_BASE_T_X722:
+ case I40E_DEV_ID_10G_BASE_T_X722:
+ hw->mac.type = I40E_MAC_X722;
+ break;
+#endif
+#ifdef X722_SUPPORT
+ case I40E_DEV_ID_X722_VF:
+ case I40E_DEV_ID_X722_VF_HV:
+ hw->mac.type = I40E_MAC_X722_VF;
+ break;
+#endif
case I40E_DEV_ID_VF:
case I40E_DEV_ID_VF_HV:
hw->mac.type = I40E_MAC_VF;
@@ -81,6 +98,212 @@ enum i40e_status_code i40e_set_mac_type(struct i40e_hw *hw)
}
/**
+ * i40e_aq_str - convert AQ err code to a string
+ * @hw: pointer to the HW structure
+ * @aq_err: the AQ error code to convert
+ **/
+char *i40e_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err)
+{
+ switch (aq_err) {
+ case I40E_AQ_RC_OK:
+ return "OK";
+ case I40E_AQ_RC_EPERM:
+ return "I40E_AQ_RC_EPERM";
+ case I40E_AQ_RC_ENOENT:
+ return "I40E_AQ_RC_ENOENT";
+ case I40E_AQ_RC_ESRCH:
+ return "I40E_AQ_RC_ESRCH";
+ case I40E_AQ_RC_EINTR:
+ return "I40E_AQ_RC_EINTR";
+ case I40E_AQ_RC_EIO:
+ return "I40E_AQ_RC_EIO";
+ case I40E_AQ_RC_ENXIO:
+ return "I40E_AQ_RC_ENXIO";
+ case I40E_AQ_RC_E2BIG:
+ return "I40E_AQ_RC_E2BIG";
+ case I40E_AQ_RC_EAGAIN:
+ return "I40E_AQ_RC_EAGAIN";
+ case I40E_AQ_RC_ENOMEM:
+ return "I40E_AQ_RC_ENOMEM";
+ case I40E_AQ_RC_EACCES:
+ return "I40E_AQ_RC_EACCES";
+ case I40E_AQ_RC_EFAULT:
+ return "I40E_AQ_RC_EFAULT";
+ case I40E_AQ_RC_EBUSY:
+ return "I40E_AQ_RC_EBUSY";
+ case I40E_AQ_RC_EEXIST:
+ return "I40E_AQ_RC_EEXIST";
+ case I40E_AQ_RC_EINVAL:
+ return "I40E_AQ_RC_EINVAL";
+ case I40E_AQ_RC_ENOTTY:
+ return "I40E_AQ_RC_ENOTTY";
+ case I40E_AQ_RC_ENOSPC:
+ return "I40E_AQ_RC_ENOSPC";
+ case I40E_AQ_RC_ENOSYS:
+ return "I40E_AQ_RC_ENOSYS";
+ case I40E_AQ_RC_ERANGE:
+ return "I40E_AQ_RC_ERANGE";
+ case I40E_AQ_RC_EFLUSHED:
+ return "I40E_AQ_RC_EFLUSHED";
+ case I40E_AQ_RC_BAD_ADDR:
+ return "I40E_AQ_RC_BAD_ADDR";
+ case I40E_AQ_RC_EMODE:
+ return "I40E_AQ_RC_EMODE";
+ case I40E_AQ_RC_EFBIG:
+ return "I40E_AQ_RC_EFBIG";
+ }
+
+ snprintf(hw->err_str, sizeof(hw->err_str), "%d", aq_err);
+ return hw->err_str;
+}
+
+/**
+ * i40e_stat_str - convert status err code to a string
+ * @hw: pointer to the HW structure
+ * @stat_err: the status error code to convert
+ **/
+char *i40e_stat_str(struct i40e_hw *hw, enum i40e_status_code stat_err)
+{
+ switch (stat_err) {
+ case I40E_SUCCESS:
+ return "OK";
+ case I40E_ERR_NVM:
+ return "I40E_ERR_NVM";
+ case I40E_ERR_NVM_CHECKSUM:
+ return "I40E_ERR_NVM_CHECKSUM";
+ case I40E_ERR_PHY:
+ return "I40E_ERR_PHY";
+ case I40E_ERR_CONFIG:
+ return "I40E_ERR_CONFIG";
+ case I40E_ERR_PARAM:
+ return "I40E_ERR_PARAM";
+ case I40E_ERR_MAC_TYPE:
+ return "I40E_ERR_MAC_TYPE";
+ case I40E_ERR_UNKNOWN_PHY:
+ return "I40E_ERR_UNKNOWN_PHY";
+ case I40E_ERR_LINK_SETUP:
+ return "I40E_ERR_LINK_SETUP";
+ case I40E_ERR_ADAPTER_STOPPED:
+ return "I40E_ERR_ADAPTER_STOPPED";
+ case I40E_ERR_INVALID_MAC_ADDR:
+ return "I40E_ERR_INVALID_MAC_ADDR";
+ case I40E_ERR_DEVICE_NOT_SUPPORTED:
+ return "I40E_ERR_DEVICE_NOT_SUPPORTED";
+ case I40E_ERR_MASTER_REQUESTS_PENDING:
+ return "I40E_ERR_MASTER_REQUESTS_PENDING";
+ case I40E_ERR_INVALID_LINK_SETTINGS:
+ return "I40E_ERR_INVALID_LINK_SETTINGS";
+ case I40E_ERR_AUTONEG_NOT_COMPLETE:
+ return "I40E_ERR_AUTONEG_NOT_COMPLETE";
+ case I40E_ERR_RESET_FAILED:
+ return "I40E_ERR_RESET_FAILED";
+ case I40E_ERR_SWFW_SYNC:
+ return "I40E_ERR_SWFW_SYNC";
+ case I40E_ERR_NO_AVAILABLE_VSI:
+ return "I40E_ERR_NO_AVAILABLE_VSI";
+ case I40E_ERR_NO_MEMORY:
+ return "I40E_ERR_NO_MEMORY";
+ case I40E_ERR_BAD_PTR:
+ return "I40E_ERR_BAD_PTR";
+ case I40E_ERR_RING_FULL:
+ return "I40E_ERR_RING_FULL";
+ case I40E_ERR_INVALID_PD_ID:
+ return "I40E_ERR_INVALID_PD_ID";
+ case I40E_ERR_INVALID_QP_ID:
+ return "I40E_ERR_INVALID_QP_ID";
+ case I40E_ERR_INVALID_CQ_ID:
+ return "I40E_ERR_INVALID_CQ_ID";
+ case I40E_ERR_INVALID_CEQ_ID:
+ return "I40E_ERR_INVALID_CEQ_ID";
+ case I40E_ERR_INVALID_AEQ_ID:
+ return "I40E_ERR_INVALID_AEQ_ID";
+ case I40E_ERR_INVALID_SIZE:
+ return "I40E_ERR_INVALID_SIZE";
+ case I40E_ERR_INVALID_ARP_INDEX:
+ return "I40E_ERR_INVALID_ARP_INDEX";
+ case I40E_ERR_INVALID_FPM_FUNC_ID:
+ return "I40E_ERR_INVALID_FPM_FUNC_ID";
+ case I40E_ERR_QP_INVALID_MSG_SIZE:
+ return "I40E_ERR_QP_INVALID_MSG_SIZE";
+ case I40E_ERR_QP_TOOMANY_WRS_POSTED:
+ return "I40E_ERR_QP_TOOMANY_WRS_POSTED";
+ case I40E_ERR_INVALID_FRAG_COUNT:
+ return "I40E_ERR_INVALID_FRAG_COUNT";
+ case I40E_ERR_QUEUE_EMPTY:
+ return "I40E_ERR_QUEUE_EMPTY";
+ case I40E_ERR_INVALID_ALIGNMENT:
+ return "I40E_ERR_INVALID_ALIGNMENT";
+ case I40E_ERR_FLUSHED_QUEUE:
+ return "I40E_ERR_FLUSHED_QUEUE";
+ case I40E_ERR_INVALID_PUSH_PAGE_INDEX:
+ return "I40E_ERR_INVALID_PUSH_PAGE_INDEX";
+ case I40E_ERR_INVALID_IMM_DATA_SIZE:
+ return "I40E_ERR_INVALID_IMM_DATA_SIZE";
+ case I40E_ERR_TIMEOUT:
+ return "I40E_ERR_TIMEOUT";
+ case I40E_ERR_OPCODE_MISMATCH:
+ return "I40E_ERR_OPCODE_MISMATCH";
+ case I40E_ERR_CQP_COMPL_ERROR:
+ return "I40E_ERR_CQP_COMPL_ERROR";
+ case I40E_ERR_INVALID_VF_ID:
+ return "I40E_ERR_INVALID_VF_ID";
+ case I40E_ERR_INVALID_HMCFN_ID:
+ return "I40E_ERR_INVALID_HMCFN_ID";
+ case I40E_ERR_BACKING_PAGE_ERROR:
+ return "I40E_ERR_BACKING_PAGE_ERROR";
+ case I40E_ERR_NO_PBLCHUNKS_AVAILABLE:
+ return "I40E_ERR_NO_PBLCHUNKS_AVAILABLE";
+ case I40E_ERR_INVALID_PBLE_INDEX:
+ return "I40E_ERR_INVALID_PBLE_INDEX";
+ case I40E_ERR_INVALID_SD_INDEX:
+ return "I40E_ERR_INVALID_SD_INDEX";
+ case I40E_ERR_INVALID_PAGE_DESC_INDEX:
+ return "I40E_ERR_INVALID_PAGE_DESC_INDEX";
+ case I40E_ERR_INVALID_SD_TYPE:
+ return "I40E_ERR_INVALID_SD_TYPE";
+ case I40E_ERR_MEMCPY_FAILED:
+ return "I40E_ERR_MEMCPY_FAILED";
+ case I40E_ERR_INVALID_HMC_OBJ_INDEX:
+ return "I40E_ERR_INVALID_HMC_OBJ_INDEX";
+ case I40E_ERR_INVALID_HMC_OBJ_COUNT:
+ return "I40E_ERR_INVALID_HMC_OBJ_COUNT";
+ case I40E_ERR_INVALID_SRQ_ARM_LIMIT:
+ return "I40E_ERR_INVALID_SRQ_ARM_LIMIT";
+ case I40E_ERR_SRQ_ENABLED:
+ return "I40E_ERR_SRQ_ENABLED";
+ case I40E_ERR_ADMIN_QUEUE_ERROR:
+ return "I40E_ERR_ADMIN_QUEUE_ERROR";
+ case I40E_ERR_ADMIN_QUEUE_TIMEOUT:
+ return "I40E_ERR_ADMIN_QUEUE_TIMEOUT";
+ case I40E_ERR_BUF_TOO_SHORT:
+ return "I40E_ERR_BUF_TOO_SHORT";
+ case I40E_ERR_ADMIN_QUEUE_FULL:
+ return "I40E_ERR_ADMIN_QUEUE_FULL";
+ case I40E_ERR_ADMIN_QUEUE_NO_WORK:
+ return "I40E_ERR_ADMIN_QUEUE_NO_WORK";
+ case I40E_ERR_BAD_IWARP_CQE:
+ return "I40E_ERR_BAD_IWARP_CQE";
+ case I40E_ERR_NVM_BLANK_MODE:
+ return "I40E_ERR_NVM_BLANK_MODE";
+ case I40E_ERR_NOT_IMPLEMENTED:
+ return "I40E_ERR_NOT_IMPLEMENTED";
+ case I40E_ERR_PE_DOORBELL_NOT_ENABLED:
+ return "I40E_ERR_PE_DOORBELL_NOT_ENABLED";
+ case I40E_ERR_DIAG_TEST_FAILED:
+ return "I40E_ERR_DIAG_TEST_FAILED";
+ case I40E_ERR_NOT_READY:
+ return "I40E_ERR_NOT_READY";
+ case I40E_NOT_SUPPORTED:
+ return "I40E_NOT_SUPPORTED";
+ case I40E_ERR_FIRMWARE_API_VERSION:
+ return "I40E_ERR_FIRMWARE_API_VERSION";
+ }
+
+ snprintf(hw->err_str, sizeof(hw->err_str), "%d", stat_err);
+ return hw->err_str;
+}
+
+/**
* i40e_debug_aq
* @hw: debug mask related to admin queue
* @mask: debug mask
@@ -152,9 +375,13 @@ void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc,
bool i40e_check_asq_alive(struct i40e_hw *hw)
{
if (hw->aq.asq.len)
- return !!(rd32(hw, hw->aq.asq.len) & I40E_PF_ATQLEN_ATQENABLE_MASK);
- else
- return FALSE;
+ if (!i40e_is_vf(hw))
+ return !!(rd32(hw, hw->aq.asq.len) &
+ I40E_PF_ATQLEN_ATQENABLE_MASK);
+ if (i40e_is_vf(hw))
+ return !!(rd32(hw, hw->aq.asq.len) &
+ I40E_VF_ATQLEN1_ATQENABLE_MASK);
+ return FALSE;
}
/**
@@ -182,6 +409,171 @@ enum i40e_status_code i40e_aq_queue_shutdown(struct i40e_hw *hw,
return status;
}
+#ifdef X722_SUPPORT
+
+/**
+ * i40e_aq_get_set_rss_lut
+ * @hw: pointer to the hardware structure
+ * @vsi_id: vsi fw index
+ * @pf_lut: for PF table set TRUE, for VSI table set FALSE
+ * @lut: pointer to the lut buffer provided by the caller
+ * @lut_size: size of the lut buffer
+ * @set: set TRUE to set the table, FALSE to get the table
+ *
+ * Internal function to get or set RSS look up table
+ **/
+static enum i40e_status_code i40e_aq_get_set_rss_lut(struct i40e_hw *hw,
+ u16 vsi_id, bool pf_lut,
+ u8 *lut, u16 lut_size,
+ bool set)
+{
+ enum i40e_status_code status;
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_get_set_rss_lut *cmd_resp =
+ (struct i40e_aqc_get_set_rss_lut *)&desc.params.raw;
+
+ if (set)
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_set_rss_lut);
+ else
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_get_rss_lut);
+
+ /* Indirect command */
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF);
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_RD);
+
+ cmd_resp->vsi_id =
+ CPU_TO_LE16((u16)((vsi_id <<
+ I40E_AQC_SET_RSS_LUT_VSI_ID_SHIFT) &
+ I40E_AQC_SET_RSS_LUT_VSI_ID_MASK));
+ cmd_resp->vsi_id |= CPU_TO_LE16((u16)I40E_AQC_SET_RSS_LUT_VSI_VALID);
+
+ if (pf_lut)
+ cmd_resp->flags |= CPU_TO_LE16((u16)
+ ((I40E_AQC_SET_RSS_LUT_TABLE_TYPE_PF <<
+ I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) &
+ I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK));
+ else
+ cmd_resp->flags |= CPU_TO_LE16((u16)
+ ((I40E_AQC_SET_RSS_LUT_TABLE_TYPE_VSI <<
+ I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) &
+ I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK));
+
+ cmd_resp->addr_high = CPU_TO_LE32(I40E_HI_WORD((u64)lut));
+ cmd_resp->addr_low = CPU_TO_LE32(I40E_LO_DWORD((u64)lut));
+
+ status = i40e_asq_send_command(hw, &desc, lut, lut_size, NULL);
+
+ return status;
+}
+
+/**
+ * i40e_aq_get_rss_lut
+ * @hw: pointer to the hardware structure
+ * @vsi_id: vsi fw index
+ * @pf_lut: for PF table set TRUE, for VSI table set FALSE
+ * @lut: pointer to the lut buffer provided by the caller
+ * @lut_size: size of the lut buffer
+ *
+ * get the RSS lookup table, PF or VSI type
+ **/
+enum i40e_status_code i40e_aq_get_rss_lut(struct i40e_hw *hw, u16 vsi_id,
+ bool pf_lut, u8 *lut, u16 lut_size)
+{
+ return i40e_aq_get_set_rss_lut(hw, vsi_id, pf_lut, lut, lut_size,
+ FALSE);
+}
+
+/**
+ * i40e_aq_set_rss_lut
+ * @hw: pointer to the hardware structure
+ * @vsi_id: vsi fw index
+ * @pf_lut: for PF table set TRUE, for VSI table set FALSE
+ * @lut: pointer to the lut buffer provided by the caller
+ * @lut_size: size of the lut buffer
+ *
+ * set the RSS lookup table, PF or VSI type
+ **/
+enum i40e_status_code i40e_aq_set_rss_lut(struct i40e_hw *hw, u16 vsi_id,
+ bool pf_lut, u8 *lut, u16 lut_size)
+{
+ return i40e_aq_get_set_rss_lut(hw, vsi_id, pf_lut, lut, lut_size, TRUE);
+}
+
+/**
+ * i40e_aq_get_set_rss_key
+ * @hw: pointer to the hw struct
+ * @vsi_id: vsi fw index
+ * @key: pointer to key info struct
+ * @set: set TRUE to set the key, FALSE to get the key
+ *
+ * get the RSS key per VSI
+ **/
+static enum i40e_status_code i40e_aq_get_set_rss_key(struct i40e_hw *hw,
+ u16 vsi_id,
+ struct i40e_aqc_get_set_rss_key_data *key,
+ bool set)
+{
+ enum i40e_status_code status;
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_get_set_rss_key *cmd_resp =
+ (struct i40e_aqc_get_set_rss_key *)&desc.params.raw;
+ u16 key_size = sizeof(struct i40e_aqc_get_set_rss_key_data);
+
+ if (set)
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_set_rss_key);
+ else
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_get_rss_key);
+
+ /* Indirect command */
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF);
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_RD);
+
+ cmd_resp->vsi_id =
+ CPU_TO_LE16((u16)((vsi_id <<
+ I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT) &
+ I40E_AQC_SET_RSS_KEY_VSI_ID_MASK));
+ cmd_resp->vsi_id |= CPU_TO_LE16((u16)I40E_AQC_SET_RSS_KEY_VSI_VALID);
+ cmd_resp->addr_high = CPU_TO_LE32(I40E_HI_WORD((u64)key));
+ cmd_resp->addr_low = CPU_TO_LE32(I40E_LO_DWORD((u64)key));
+
+ status = i40e_asq_send_command(hw, &desc, key, key_size, NULL);
+
+ return status;
+}
+
+/**
+ * i40e_aq_get_rss_key
+ * @hw: pointer to the hw struct
+ * @vsi_id: vsi fw index
+ * @key: pointer to key info struct
+ *
+ **/
+enum i40e_status_code i40e_aq_get_rss_key(struct i40e_hw *hw,
+ u16 vsi_id,
+ struct i40e_aqc_get_set_rss_key_data *key)
+{
+ return i40e_aq_get_set_rss_key(hw, vsi_id, key, FALSE);
+}
+
+/**
+ * i40e_aq_set_rss_key
+ * @hw: pointer to the hw struct
+ * @vsi_id: vsi fw index
+ * @key: pointer to key info struct
+ *
+ * set the RSS key per VSI
+ **/
+enum i40e_status_code i40e_aq_set_rss_key(struct i40e_hw *hw,
+ u16 vsi_id,
+ struct i40e_aqc_get_set_rss_key_data *key)
+{
+ return i40e_aq_get_set_rss_key(hw, vsi_id, key, TRUE);
+}
+#endif /* X722_SUPPORT */
/* The i40e_ptype_lookup table is used to convert from the 8-bit ptype in the
* hardware to a bit-field that can be used by SW to more easily determine the
@@ -596,6 +988,9 @@ enum i40e_status_code i40e_init_shared_code(struct i40e_hw *hw)
switch (hw->mac.type) {
case I40E_MAC_XL710:
+#ifdef X722_SUPPORT
+ case I40E_MAC_X722:
+#endif
break;
default:
return I40E_ERR_DEVICE_NOT_SUPPORTED;
@@ -840,12 +1235,15 @@ static enum i40e_media_type i40e_get_media_type(struct i40e_hw *hw)
case I40E_PHY_TYPE_10GBASE_CR1:
case I40E_PHY_TYPE_40GBASE_CR4:
case I40E_PHY_TYPE_10GBASE_SFPP_CU:
+ case I40E_PHY_TYPE_40GBASE_AOC:
+ case I40E_PHY_TYPE_10GBASE_AOC:
media = I40E_MEDIA_TYPE_DA;
break;
case I40E_PHY_TYPE_1000BASE_KX:
case I40E_PHY_TYPE_10GBASE_KX4:
case I40E_PHY_TYPE_10GBASE_KR:
case I40E_PHY_TYPE_40GBASE_KR4:
+ case I40E_PHY_TYPE_20GBASE_KR2:
media = I40E_MEDIA_TYPE_BACKPLANE;
break;
case I40E_PHY_TYPE_SGMII:
@@ -861,7 +1259,7 @@ static enum i40e_media_type i40e_get_media_type(struct i40e_hw *hw)
return media;
}
-#define I40E_PF_RESET_WAIT_COUNT 110
+#define I40E_PF_RESET_WAIT_COUNT 200
/**
* i40e_pf_reset - Reset the PF
* @hw: pointer to the hardware structure
@@ -883,7 +1281,7 @@ enum i40e_status_code i40e_pf_reset(struct i40e_hw *hw)
grst_del = (rd32(hw, I40E_GLGEN_RSTCTL) &
I40E_GLGEN_RSTCTL_GRSTDEL_MASK) >>
I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT;
- for (cnt = 0; cnt < grst_del + 2; cnt++) {
+ for (cnt = 0; cnt < grst_del + 10; cnt++) {
reg = rd32(hw, I40E_GLGEN_RSTAT);
if (!(reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK))
break;
@@ -1174,9 +1572,9 @@ void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink)
blink = FALSE;
if (blink)
- gpio_val |= (1 << I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT);
+ gpio_val |= BIT(I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT);
else
- gpio_val &= ~(1 << I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT);
+ gpio_val &= ~BIT(I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT);
wr32(hw, I40E_GLGEN_GPIO_CTL(i), gpio_val);
break;
@@ -1328,14 +1726,14 @@ enum i40e_status_code i40e_set_fc(struct i40e_hw *hw, u8 *aq_failures,
*aq_failures |= I40E_SET_FC_AQ_FAIL_SET;
}
/* Update the link info */
- status = i40e_aq_get_link_info(hw, TRUE, NULL, NULL);
+ status = i40e_update_link_info(hw);
if (status) {
/* Wait a little bit (on 40G cards it sometimes takes a really
* long time for link to come back from the atomic reset)
* and try once more
*/
i40e_msec_delay(1000);
- status = i40e_aq_get_link_info(hw, TRUE, NULL, NULL);
+ status = i40e_update_link_info(hw);
}
if (status)
*aq_failures |= I40E_SET_FC_AQ_FAIL_UPDATE;
@@ -1508,6 +1906,10 @@ enum i40e_status_code i40e_aq_get_link_info(struct i40e_hw *hw,
else
hw_link_info->lse_enable = FALSE;
+ if ((hw->aq.fw_maj_ver < 4 || (hw->aq.fw_maj_ver == 4 &&
+ hw->aq.fw_min_ver < 40)) && hw_link_info->phy_type == 0xE)
+ hw_link_info->phy_type = I40E_PHY_TYPE_10GBASE_SFPP_CU;
+
/* save link status information */
if (link)
i40e_memcpy(link, hw_link_info, sizeof(*hw_link_info),
@@ -1831,6 +2233,74 @@ enum i40e_status_code i40e_aq_set_vsi_multicast_promiscuous(struct i40e_hw *hw,
}
/**
+ * i40e_aq_set_vsi_mc_promisc_on_vlan
+ * @hw: pointer to the hw struct
+ * @seid: vsi number
+ * @enable: set MAC L2 layer unicast promiscuous enable/disable for a given VLAN
+ * @vid: The VLAN tag filter - capture any multicast packet with this VLAN tag
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+enum i40e_status_code i40e_aq_set_vsi_mc_promisc_on_vlan(struct i40e_hw *hw,
+ u16 seid, bool enable, u16 vid,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
+ (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
+ enum i40e_status_code status;
+ u16 flags = 0;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_set_vsi_promiscuous_modes);
+
+ if (enable)
+ flags |= I40E_AQC_SET_VSI_PROMISC_MULTICAST;
+
+ cmd->promiscuous_flags = CPU_TO_LE16(flags);
+ cmd->valid_flags = CPU_TO_LE16(I40E_AQC_SET_VSI_PROMISC_MULTICAST);
+ cmd->seid = CPU_TO_LE16(seid);
+ cmd->vlan_tag = CPU_TO_LE16(vid | I40E_AQC_SET_VSI_VLAN_VALID);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_set_vsi_uc_promisc_on_vlan
+ * @hw: pointer to the hw struct
+ * @seid: vsi number
+ * @enable: set MAC L2 layer unicast promiscuous enable/disable for a given VLAN
+ * @vid: The VLAN tag filter - capture any unicast packet with this VLAN tag
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+enum i40e_status_code i40e_aq_set_vsi_uc_promisc_on_vlan(struct i40e_hw *hw,
+ u16 seid, bool enable, u16 vid,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
+ (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
+ enum i40e_status_code status;
+ u16 flags = 0;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_set_vsi_promiscuous_modes);
+
+ if (enable)
+ flags |= I40E_AQC_SET_VSI_PROMISC_UNICAST;
+
+ cmd->promiscuous_flags = CPU_TO_LE16(flags);
+ cmd->valid_flags = CPU_TO_LE16(I40E_AQC_SET_VSI_PROMISC_UNICAST);
+ cmd->seid = CPU_TO_LE16(seid);
+ cmd->vlan_tag = CPU_TO_LE16(vid | I40E_AQC_SET_VSI_VLAN_VALID);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
* i40e_aq_set_vsi_broadcast
* @hw: pointer to the hw struct
* @seid: vsi number
@@ -2061,30 +2531,56 @@ enum i40e_status_code i40e_aq_send_driver_version(struct i40e_hw *hw,
/**
* i40e_get_link_status - get status of the HW network link
* @hw: pointer to the hw struct
+ * @link_up: pointer to bool (TRUE/FALSE = linkup/linkdown)
*
- * Returns TRUE if link is up, FALSE if link is down.
+ * Variable link_up TRUE if link is up, FALSE if link is down.
+ * The variable link_up is invalid if returned value of status != I40E_SUCCESS
*
* Side effect: LinkStatusEvent reporting becomes enabled
**/
-bool i40e_get_link_status(struct i40e_hw *hw)
+enum i40e_status_code i40e_get_link_status(struct i40e_hw *hw, bool *link_up)
{
enum i40e_status_code status = I40E_SUCCESS;
- bool link_status = FALSE;
if (hw->phy.get_link_info) {
- status = i40e_aq_get_link_info(hw, TRUE, NULL, NULL);
+ status = i40e_update_link_info(hw);
if (status != I40E_SUCCESS)
- goto i40e_get_link_status_exit;
+ i40e_debug(hw, I40E_DEBUG_LINK, "get link failed: status %d\n",
+ status);
}
- link_status = hw->phy.link_info.link_info & I40E_AQ_LINK_UP;
+ *link_up = hw->phy.link_info.link_info & I40E_AQ_LINK_UP;
-i40e_get_link_status_exit:
- return link_status;
+ return status;
}
/**
+ * i40e_updatelink_status - update status of the HW network link
+ * @hw: pointer to the hw struct
+ **/
+enum i40e_status_code i40e_update_link_info(struct i40e_hw *hw)
+{
+ struct i40e_aq_get_phy_abilities_resp abilities;
+ enum i40e_status_code status = I40E_SUCCESS;
+
+ status = i40e_aq_get_link_info(hw, TRUE, NULL, NULL);
+ if (status)
+ return status;
+
+ status = i40e_aq_get_phy_capabilities(hw, FALSE, false, &abilities,
+ NULL);
+ if (status)
+ return status;
+
+ memcpy(hw->phy.link_info.module_type, &abilities.module_type,
+ sizeof(hw->phy.link_info.module_type));
+
+ return status;
+}
+
+
+/**
* i40e_get_link_speed
* @hw: pointer to the hw struct
*
@@ -2212,6 +2708,7 @@ enum i40e_status_code i40e_aq_get_veb_parameters(struct i40e_hw *hw,
*vebs_free = LE16_TO_CPU(cmd_resp->vebs_free);
if (floating) {
u16 flags = LE16_TO_CPU(cmd_resp->veb_flags);
+
if (flags & I40E_AQC_ADD_VEB_FLOATING)
*floating = TRUE;
else
@@ -2744,6 +3241,27 @@ enum i40e_status_code i40e_aq_write_nvm_config(struct i40e_hw *hw,
}
/**
+ * i40e_aq_oem_post_update - triggers an OEM specific flow after update
+ * @hw: pointer to the hw struct
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+enum i40e_status_code i40e_aq_oem_post_update(struct i40e_hw *hw,
+ void *buff, u16 buff_size,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ enum i40e_status_code status;
+
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_oem_post_update);
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+ if (status && LE16_TO_CPU(desc.retval) == I40E_AQ_RC_ESRCH)
+ status = I40E_ERR_NOT_IMPLEMENTED;
+
+ return status;
+}
+
+/**
* i40e_aq_erase_nvm
* @hw: pointer to the hw struct
* @module_pointer: module pointer location in words from the NVM beginning
@@ -2807,12 +3325,13 @@ i40e_aq_erase_nvm_exit:
#define I40E_DEV_FUNC_CAP_MSIX_VF 0x44
#define I40E_DEV_FUNC_CAP_FLOW_DIRECTOR 0x45
#define I40E_DEV_FUNC_CAP_IEEE_1588 0x46
-#define I40E_DEV_FUNC_CAP_MFP_MODE_1 0xF1
+#define I40E_DEV_FUNC_CAP_FLEX10 0xF1
#define I40E_DEV_FUNC_CAP_CEM 0xF2
#define I40E_DEV_FUNC_CAP_IWARP 0x51
#define I40E_DEV_FUNC_CAP_LED 0x61
#define I40E_DEV_FUNC_CAP_SDP 0x62
#define I40E_DEV_FUNC_CAP_MDIO 0x63
+#define I40E_DEV_FUNC_CAP_WR_CSR_PROT 0x64
/**
* i40e_parse_discover_capabilities
@@ -2831,6 +3350,7 @@ static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff,
u32 valid_functions, num_functions;
u32 number, logical_id, phys_id;
struct i40e_hw_capabilities *p;
+ u8 major_rev;
u32 i = 0;
u16 id;
@@ -2848,6 +3368,7 @@ static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff,
number = LE32_TO_CPU(cap->number);
logical_id = LE32_TO_CPU(cap->logical_id);
phys_id = LE32_TO_CPU(cap->phys_id);
+ major_rev = cap->major_rev;
switch (id) {
case I40E_DEV_FUNC_CAP_SWITCH_MODE:
@@ -2922,9 +3443,21 @@ static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff,
case I40E_DEV_FUNC_CAP_MSIX_VF:
p->num_msix_vectors_vf = number;
break;
- case I40E_DEV_FUNC_CAP_MFP_MODE_1:
- if (number == 1)
- p->mfp_mode_1 = TRUE;
+ case I40E_DEV_FUNC_CAP_FLEX10:
+ if (major_rev == 1) {
+ if (number == 1) {
+ p->flex10_enable = TRUE;
+ p->flex10_capable = TRUE;
+ }
+ } else {
+ /* Capability revision >= 2 */
+ if (number & 1)
+ p->flex10_enable = TRUE;
+ if (number & 2)
+ p->flex10_capable = TRUE;
+ }
+ p->flex10_mode = logical_id;
+ p->flex10_status = phys_id;
break;
case I40E_DEV_FUNC_CAP_CEM:
if (number == 1)
@@ -2957,11 +3490,18 @@ static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff,
p->fd_filters_guaranteed = number;
p->fd_filters_best_effort = logical_id;
break;
+ case I40E_DEV_FUNC_CAP_WR_CSR_PROT:
+ p->wr_csr_prot = (u64)number;
+ p->wr_csr_prot |= (u64)logical_id << 32;
+ break;
default:
break;
}
}
+ if (p->fcoe)
+ i40e_debug(hw, I40E_DEBUG_ALL, "device is FCoE capable\n");
+
/* Always disable FCoE if compiled without the I40E_FCOE_ENA flag */
p->fcoe = FALSE;
@@ -4917,6 +5457,63 @@ void i40e_set_pci_config_data(struct i40e_hw *hw, u16 link_status)
}
/**
+ * i40e_aq_debug_dump
+ * @hw: pointer to the hardware structure
+ * @cluster_id: specific cluster to dump
+ * @table_id: table id within cluster
+ * @start_index: index of line in the block to read
+ * @buff_size: dump buffer size
+ * @buff: dump buffer
+ * @ret_buff_size: actual buffer size returned
+ * @ret_next_table: next block to read
+ * @ret_next_index: next index to read
+ *
+ * Dump internal FW/HW data for debug purposes.
+ *
+ **/
+enum i40e_status_code i40e_aq_debug_dump(struct i40e_hw *hw, u8 cluster_id,
+ u8 table_id, u32 start_index, u16 buff_size,
+ void *buff, u16 *ret_buff_size,
+ u8 *ret_next_table, u32 *ret_next_index,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_debug_dump_internals *cmd =
+ (struct i40e_aqc_debug_dump_internals *)&desc.params.raw;
+ struct i40e_aqc_debug_dump_internals *resp =
+ (struct i40e_aqc_debug_dump_internals *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ if (buff_size == 0 || !buff)
+ return I40E_ERR_PARAM;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_debug_dump_internals);
+ /* Indirect Command */
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF);
+ if (buff_size > I40E_AQ_LARGE_BUF)
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
+
+ cmd->cluster_id = cluster_id;
+ cmd->table_id = table_id;
+ cmd->idx = CPU_TO_LE32(start_index);
+
+ desc.datalen = CPU_TO_LE16(buff_size);
+
+ status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
+ if (!status) {
+ if (ret_buff_size != NULL)
+ *ret_buff_size = LE16_TO_CPU(desc.datalen);
+ if (ret_next_table != NULL)
+ *ret_next_table = resp->table_id;
+ if (ret_next_index != NULL)
+ *ret_next_index = LE32_TO_CPU(resp->idx);
+ }
+
+ return status;
+}
+
+/**
* i40e_read_bw_from_alt_ram
* @hw: pointer to the hardware structure
* @max_bw: pointer for max_bw read
@@ -4935,11 +5532,11 @@ enum i40e_status_code i40e_read_bw_from_alt_ram(struct i40e_hw *hw,
/* Calculate the address of the min/max bw registers */
max_bw_addr = I40E_ALT_STRUCT_FIRST_PF_OFFSET +
- I40E_ALT_STRUCT_MAX_BW_OFFSET +
- (I40E_ALT_STRUCT_DWORDS_PER_PF*hw->pf_id);
+ I40E_ALT_STRUCT_MAX_BW_OFFSET +
+ (I40E_ALT_STRUCT_DWORDS_PER_PF * hw->pf_id);
min_bw_addr = I40E_ALT_STRUCT_FIRST_PF_OFFSET +
- I40E_ALT_STRUCT_MIN_BW_OFFSET +
- (I40E_ALT_STRUCT_DWORDS_PER_PF*hw->pf_id);
+ I40E_ALT_STRUCT_MIN_BW_OFFSET +
+ (I40E_ALT_STRUCT_DWORDS_PER_PF * hw->pf_id);
/* Read the bandwidths from alt ram */
status = i40e_aq_alternate_read(hw, max_bw_addr, max_bw,
diff --git a/sys/dev/ixl/i40e_devids.h b/sys/dev/ixl/i40e_devids.h
new file mode 100644
index 0000000..abd597a
--- /dev/null
+++ b/sys/dev/ixl/i40e_devids.h
@@ -0,0 +1,68 @@
+/******************************************************************************
+
+ Copyright (c) 2013-2015, Intel Corporation
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ POSSIBILITY OF SUCH DAMAGE.
+
+******************************************************************************/
+/*$FreeBSD$*/
+
+#ifndef _I40E_DEVIDS_H_
+#define _I40E_DEVIDS_H_
+
+/* Vendor ID */
+#define I40E_INTEL_VENDOR_ID 0x8086
+
+/* Device IDs */
+#define I40E_DEV_ID_SFP_XL710 0x1572
+#define I40E_DEV_ID_QEMU 0x1574
+#define I40E_DEV_ID_KX_A 0x157F
+#define I40E_DEV_ID_KX_B 0x1580
+#define I40E_DEV_ID_KX_C 0x1581
+#define I40E_DEV_ID_QSFP_A 0x1583
+#define I40E_DEV_ID_QSFP_B 0x1584
+#define I40E_DEV_ID_QSFP_C 0x1585
+#define I40E_DEV_ID_10G_BASE_T 0x1586
+#define I40E_DEV_ID_20G_KR2 0x1587
+#define I40E_DEV_ID_20G_KR2_A 0x1588
+#define I40E_DEV_ID_10G_BASE_T4 0x1589
+#define I40E_DEV_ID_VF 0x154C
+#define I40E_DEV_ID_VF_HV 0x1571
+#ifdef X722_SUPPORT
+#define I40E_DEV_ID_SFP_X722 0x37D0
+#define I40E_DEV_ID_1G_BASE_T_X722 0x37D1
+#define I40E_DEV_ID_10G_BASE_T_X722 0x37D2
+#define I40E_DEV_ID_X722_VF 0x37CD
+#define I40E_DEV_ID_X722_VF_HV 0x37D9
+#endif /* X722_SUPPORT */
+
+#define i40e_is_40G_device(d) ((d) == I40E_DEV_ID_QSFP_A || \
+ (d) == I40E_DEV_ID_QSFP_B || \
+ (d) == I40E_DEV_ID_QSFP_C)
+
+#endif /* _I40E_DEVIDS_H_ */
diff --git a/sys/dev/ixl/i40e_hmc.c b/sys/dev/ixl/i40e_hmc.c
index 81cb2ae..513f1d5 100755..100644
--- a/sys/dev/ixl/i40e_hmc.c
+++ b/sys/dev/ixl/i40e_hmc.c
@@ -1,6 +1,6 @@
/******************************************************************************
- Copyright (c) 2013-2014, Intel Corporation
+ Copyright (c) 2013-2015, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -130,6 +130,7 @@ exit:
* @hw: pointer to our HW structure
* @hmc_info: pointer to the HMC configuration information structure
* @pd_index: which page descriptor index to manipulate
+ * @rsrc_pg: if not NULL, use preallocated page instead of allocating new one.
*
* This function:
* 1. Initializes the pd entry
@@ -143,12 +144,14 @@ exit:
**/
enum i40e_status_code i40e_add_pd_table_entry(struct i40e_hw *hw,
struct i40e_hmc_info *hmc_info,
- u32 pd_index)
+ u32 pd_index,
+ struct i40e_dma_mem *rsrc_pg)
{
enum i40e_status_code ret_code = I40E_SUCCESS;
struct i40e_hmc_pd_table *pd_table;
struct i40e_hmc_pd_entry *pd_entry;
struct i40e_dma_mem mem;
+ struct i40e_dma_mem *page = &mem;
u32 sd_idx, rel_pd_idx;
u64 *pd_addr;
u64 page_desc;
@@ -169,19 +172,25 @@ enum i40e_status_code i40e_add_pd_table_entry(struct i40e_hw *hw,
pd_table = &hmc_info->sd_table.sd_entry[sd_idx].u.pd_table;
pd_entry = &pd_table->pd_entry[rel_pd_idx];
if (!pd_entry->valid) {
- /* allocate a 4K backing page */
- ret_code = i40e_allocate_dma_mem(hw, &mem, i40e_mem_bp,
- I40E_HMC_PAGED_BP_SIZE,
- I40E_HMC_PD_BP_BUF_ALIGNMENT);
- if (ret_code)
- goto exit;
+ if (rsrc_pg) {
+ pd_entry->rsrc_pg = TRUE;
+ page = rsrc_pg;
+ } else {
+ /* allocate a 4K backing page */
+ ret_code = i40e_allocate_dma_mem(hw, page, i40e_mem_bp,
+ I40E_HMC_PAGED_BP_SIZE,
+ I40E_HMC_PD_BP_BUF_ALIGNMENT);
+ if (ret_code)
+ goto exit;
+ pd_entry->rsrc_pg = FALSE;
+ }
- i40e_memcpy(&pd_entry->bp.addr, &mem,
+ i40e_memcpy(&pd_entry->bp.addr, page,
sizeof(struct i40e_dma_mem), I40E_NONDMA_TO_NONDMA);
pd_entry->bp.sd_pd_index = pd_index;
pd_entry->bp.entry_type = I40E_SD_TYPE_PAGED;
/* Set page address and valid bit */
- page_desc = mem.pa | 0x1;
+ page_desc = page->pa | 0x1;
pd_addr = (u64 *)pd_table->pd_page_addr.va;
pd_addr += rel_pd_idx;
@@ -256,7 +265,8 @@ enum i40e_status_code i40e_remove_pd_bp(struct i40e_hw *hw,
I40E_INVALIDATE_PF_HMC_PD(hw, sd_idx, idx);
/* free memory here */
- ret_code = i40e_free_dma_mem(hw, &(pd_entry->bp.addr));
+ if (!pd_entry->rsrc_pg)
+ ret_code = i40e_free_dma_mem(hw, &(pd_entry->bp.addr));
if (I40E_SUCCESS != ret_code)
goto exit;
if (!pd_table->ref_cnt)
@@ -303,21 +313,15 @@ enum i40e_status_code i40e_remove_sd_bp_new(struct i40e_hw *hw,
u32 idx, bool is_pf)
{
struct i40e_hmc_sd_entry *sd_entry;
- enum i40e_status_code ret_code = I40E_SUCCESS;
+
+ if (!is_pf)
+ return I40E_NOT_SUPPORTED;
/* get the entry and decrease its ref counter */
sd_entry = &hmc_info->sd_table.sd_entry[idx];
- if (is_pf) {
- I40E_CLEAR_PF_SD_ENTRY(hw, idx, I40E_SD_TYPE_DIRECT);
- } else {
- ret_code = I40E_NOT_SUPPORTED;
- goto exit;
- }
- ret_code = i40e_free_dma_mem(hw, &(sd_entry->u.bp.addr));
- if (I40E_SUCCESS != ret_code)
- goto exit;
-exit:
- return ret_code;
+ I40E_CLEAR_PF_SD_ENTRY(hw, idx, I40E_SD_TYPE_DIRECT);
+
+ return i40e_free_dma_mem(hw, &(sd_entry->u.bp.addr));
}
/**
@@ -357,20 +361,13 @@ enum i40e_status_code i40e_remove_pd_page_new(struct i40e_hw *hw,
struct i40e_hmc_info *hmc_info,
u32 idx, bool is_pf)
{
- enum i40e_status_code ret_code = I40E_SUCCESS;
struct i40e_hmc_sd_entry *sd_entry;
+ if (!is_pf)
+ return I40E_NOT_SUPPORTED;
+
sd_entry = &hmc_info->sd_table.sd_entry[idx];
- if (is_pf) {
- I40E_CLEAR_PF_SD_ENTRY(hw, idx, I40E_SD_TYPE_PAGED);
- } else {
- ret_code = I40E_NOT_SUPPORTED;
- goto exit;
- }
- /* free memory here */
- ret_code = i40e_free_dma_mem(hw, &(sd_entry->u.pd_table.pd_page_addr));
- if (I40E_SUCCESS != ret_code)
- goto exit;
-exit:
- return ret_code;
+ I40E_CLEAR_PF_SD_ENTRY(hw, idx, I40E_SD_TYPE_PAGED);
+
+ return i40e_free_dma_mem(hw, &(sd_entry->u.pd_table.pd_page_addr));
}
diff --git a/sys/dev/ixl/i40e_hmc.h b/sys/dev/ixl/i40e_hmc.h
index cba325f..1a9995a 100755..100644
--- a/sys/dev/ixl/i40e_hmc.h
+++ b/sys/dev/ixl/i40e_hmc.h
@@ -1,6 +1,6 @@
/******************************************************************************
- Copyright (c) 2013-2014, Intel Corporation
+ Copyright (c) 2013-2015, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -70,6 +70,7 @@ struct i40e_hmc_bp {
struct i40e_hmc_pd_entry {
struct i40e_hmc_bp bp;
u32 sd_index;
+ bool rsrc_pg;
bool valid;
};
@@ -134,8 +135,8 @@ struct i40e_hmc_info {
I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT) | \
((((type) == I40E_SD_TYPE_PAGED) ? 0 : 1) << \
I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT) | \
- (1 << I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT); \
- val3 = (sd_index) | (1u << I40E_PFHMC_SDCMD_PMSDWR_SHIFT); \
+ BIT(I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT); \
+ val3 = (sd_index) | BIT_ULL(I40E_PFHMC_SDCMD_PMSDWR_SHIFT); \
wr32((hw), I40E_PFHMC_SDDATAHIGH, val1); \
wr32((hw), I40E_PFHMC_SDDATALOW, val2); \
wr32((hw), I40E_PFHMC_SDCMD, val3); \
@@ -154,7 +155,7 @@ struct i40e_hmc_info {
I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT) | \
((((type) == I40E_SD_TYPE_PAGED) ? 0 : 1) << \
I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT); \
- val3 = (sd_index) | (1u << I40E_PFHMC_SDCMD_PMSDWR_SHIFT); \
+ val3 = (sd_index) | BIT_ULL(I40E_PFHMC_SDCMD_PMSDWR_SHIFT); \
wr32((hw), I40E_PFHMC_SDDATAHIGH, 0); \
wr32((hw), I40E_PFHMC_SDDATALOW, val2); \
wr32((hw), I40E_PFHMC_SDCMD, val3); \
@@ -226,7 +227,8 @@ enum i40e_status_code i40e_add_sd_table_entry(struct i40e_hw *hw,
enum i40e_status_code i40e_add_pd_table_entry(struct i40e_hw *hw,
struct i40e_hmc_info *hmc_info,
- u32 pd_index);
+ u32 pd_index,
+ struct i40e_dma_mem *rsrc_pg);
enum i40e_status_code i40e_remove_pd_bp(struct i40e_hw *hw,
struct i40e_hmc_info *hmc_info,
u32 idx);
diff --git a/sys/dev/ixl/i40e_lan_hmc.c b/sys/dev/ixl/i40e_lan_hmc.c
index 078adef..f6c9514 100755..100644
--- a/sys/dev/ixl/i40e_lan_hmc.c
+++ b/sys/dev/ixl/i40e_lan_hmc.c
@@ -1,6 +1,6 @@
/******************************************************************************
- Copyright (c) 2013-2014, Intel Corporation
+ Copyright (c) 2013-2015, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -137,7 +137,7 @@ enum i40e_status_code i40e_init_lan_hmc(struct i40e_hw *hw, u32 txq_num,
obj->cnt = txq_num;
obj->base = 0;
size_exp = rd32(hw, I40E_GLHMC_LANTXOBJSZ);
- obj->size = (u64)1 << size_exp;
+ obj->size = BIT_ULL(size_exp);
/* validate values requested by driver don't exceed HMC capacity */
if (txq_num > obj->max_cnt) {
@@ -160,7 +160,7 @@ enum i40e_status_code i40e_init_lan_hmc(struct i40e_hw *hw, u32 txq_num,
hw->hmc.hmc_obj[I40E_HMC_LAN_TX].size);
obj->base = i40e_align_l2obj_base(obj->base);
size_exp = rd32(hw, I40E_GLHMC_LANRXOBJSZ);
- obj->size = (u64)1 << size_exp;
+ obj->size = BIT_ULL(size_exp);
/* validate values requested by driver don't exceed HMC capacity */
if (rxq_num > obj->max_cnt) {
@@ -183,7 +183,7 @@ enum i40e_status_code i40e_init_lan_hmc(struct i40e_hw *hw, u32 txq_num,
hw->hmc.hmc_obj[I40E_HMC_LAN_RX].size);
obj->base = i40e_align_l2obj_base(obj->base);
size_exp = rd32(hw, I40E_GLHMC_FCOEDDPOBJSZ);
- obj->size = (u64)1 << size_exp;
+ obj->size = BIT_ULL(size_exp);
/* validate values requested by driver don't exceed HMC capacity */
if (fcoe_cntx_num > obj->max_cnt) {
@@ -206,7 +206,7 @@ enum i40e_status_code i40e_init_lan_hmc(struct i40e_hw *hw, u32 txq_num,
hw->hmc.hmc_obj[I40E_HMC_FCOE_CTX].size);
obj->base = i40e_align_l2obj_base(obj->base);
size_exp = rd32(hw, I40E_GLHMC_FCOEFOBJSZ);
- obj->size = (u64)1 << size_exp;
+ obj->size = BIT_ULL(size_exp);
/* validate values requested by driver don't exceed HMC capacity */
if (fcoe_filt_num > obj->max_cnt) {
@@ -395,7 +395,7 @@ enum i40e_status_code i40e_create_lan_hmc_object(struct i40e_hw *hw,
/* update the pd table entry */
ret_code = i40e_add_pd_table_entry(hw,
info->hmc_info,
- i);
+ i, NULL);
if (I40E_SUCCESS != ret_code) {
pd_error = TRUE;
break;
@@ -439,9 +439,8 @@ exit_sd_error:
pd_idx1 = max(pd_idx,
((j - 1) * I40E_HMC_MAX_BP_COUNT));
pd_lmt1 = min(pd_lmt, (j * I40E_HMC_MAX_BP_COUNT));
- for (i = pd_idx1; i < pd_lmt1; i++) {
+ for (i = pd_idx1; i < pd_lmt1; i++)
i40e_remove_pd_bp(hw, info->hmc_info, i);
- }
i40e_remove_pd_page(hw, info->hmc_info, (j - 1));
break;
case I40E_SD_TYPE_DIRECT:
@@ -771,7 +770,7 @@ static void i40e_write_byte(u8 *hmc_bits,
/* prepare the bits and mask */
shift_width = ce_info->lsb % 8;
- mask = ((u8)1 << ce_info->width) - 1;
+ mask = BIT(ce_info->width) - 1;
src_byte = *from;
src_byte &= mask;
@@ -812,7 +811,7 @@ static void i40e_write_word(u8 *hmc_bits,
/* prepare the bits and mask */
shift_width = ce_info->lsb % 8;
- mask = ((u16)1 << ce_info->width) - 1;
+ mask = BIT(ce_info->width) - 1;
/* don't swizzle the bits until after the mask because the mask bits
* will be in a different bit position on big endian machines
@@ -862,7 +861,7 @@ static void i40e_write_dword(u8 *hmc_bits,
* to 5 bits so the shift will do nothing
*/
if (ce_info->width < 32)
- mask = ((u32)1 << ce_info->width) - 1;
+ mask = BIT(ce_info->width) - 1;
else
mask = ~(u32)0;
@@ -914,7 +913,7 @@ static void i40e_write_qword(u8 *hmc_bits,
* to 6 bits so the shift will do nothing
*/
if (ce_info->width < 64)
- mask = ((u64)1 << ce_info->width) - 1;
+ mask = BIT_ULL(ce_info->width) - 1;
else
mask = ~(u64)0;
@@ -956,7 +955,7 @@ static void i40e_read_byte(u8 *hmc_bits,
/* prepare the bits and mask */
shift_width = ce_info->lsb % 8;
- mask = ((u8)1 << ce_info->width) - 1;
+ mask = BIT(ce_info->width) - 1;
/* shift to correct alignment */
mask <<= shift_width;
@@ -994,7 +993,7 @@ static void i40e_read_word(u8 *hmc_bits,
/* prepare the bits and mask */
shift_width = ce_info->lsb % 8;
- mask = ((u16)1 << ce_info->width) - 1;
+ mask = BIT(ce_info->width) - 1;
/* shift to correct alignment */
mask <<= shift_width;
@@ -1044,7 +1043,7 @@ static void i40e_read_dword(u8 *hmc_bits,
* to 5 bits so the shift will do nothing
*/
if (ce_info->width < 32)
- mask = ((u32)1 << ce_info->width) - 1;
+ mask = BIT(ce_info->width) - 1;
else
mask = ~(u32)0;
@@ -1097,7 +1096,7 @@ static void i40e_read_qword(u8 *hmc_bits,
* to 6 bits so the shift will do nothing
*/
if (ce_info->width < 64)
- mask = ((u64)1 << ce_info->width) - 1;
+ mask = BIT_ULL(ce_info->width) - 1;
else
mask = ~(u64)0;
@@ -1218,7 +1217,7 @@ static enum i40e_status_code i40e_set_hmc_context(u8 *context_bytes,
/**
* i40e_hmc_get_object_va - retrieves an object's virtual address
- * @hmc_info: pointer to i40e_hmc_info struct
+ * @hw: pointer to the hw structure
* @object_base: pointer to u64 to get the va
* @rsrc_type: the hmc resource type
* @obj_idx: hmc object index
@@ -1227,12 +1226,13 @@ static enum i40e_status_code i40e_set_hmc_context(u8 *context_bytes,
* base pointer. This function is used for LAN Queue contexts.
**/
static
-enum i40e_status_code i40e_hmc_get_object_va(struct i40e_hmc_info *hmc_info,
+enum i40e_status_code i40e_hmc_get_object_va(struct i40e_hw *hw,
u8 **object_base,
enum i40e_hmc_lan_rsrc_type rsrc_type,
u32 obj_idx)
{
u32 obj_offset_in_sd, obj_offset_in_pd;
+ struct i40e_hmc_info *hmc_info = &hw->hmc;
struct i40e_hmc_sd_entry *sd_entry;
struct i40e_hmc_pd_entry *pd_entry;
u32 pd_idx, pd_lmt, rel_pd_idx;
@@ -1304,8 +1304,7 @@ enum i40e_status_code i40e_get_lan_tx_queue_context(struct i40e_hw *hw,
enum i40e_status_code err;
u8 *context_bytes;
- err = i40e_hmc_get_object_va(&hw->hmc, &context_bytes,
- I40E_HMC_LAN_TX, queue);
+ err = i40e_hmc_get_object_va(hw, &context_bytes, I40E_HMC_LAN_TX, queue);
if (err < 0)
return err;
@@ -1324,8 +1323,7 @@ enum i40e_status_code i40e_clear_lan_tx_queue_context(struct i40e_hw *hw,
enum i40e_status_code err;
u8 *context_bytes;
- err = i40e_hmc_get_object_va(&hw->hmc, &context_bytes,
- I40E_HMC_LAN_TX, queue);
+ err = i40e_hmc_get_object_va(hw, &context_bytes, I40E_HMC_LAN_TX, queue);
if (err < 0)
return err;
@@ -1345,8 +1343,7 @@ enum i40e_status_code i40e_set_lan_tx_queue_context(struct i40e_hw *hw,
enum i40e_status_code err;
u8 *context_bytes;
- err = i40e_hmc_get_object_va(&hw->hmc, &context_bytes,
- I40E_HMC_LAN_TX, queue);
+ err = i40e_hmc_get_object_va(hw, &context_bytes, I40E_HMC_LAN_TX, queue);
if (err < 0)
return err;
@@ -1367,8 +1364,7 @@ enum i40e_status_code i40e_get_lan_rx_queue_context(struct i40e_hw *hw,
enum i40e_status_code err;
u8 *context_bytes;
- err = i40e_hmc_get_object_va(&hw->hmc, &context_bytes,
- I40E_HMC_LAN_RX, queue);
+ err = i40e_hmc_get_object_va(hw, &context_bytes, I40E_HMC_LAN_RX, queue);
if (err < 0)
return err;
@@ -1387,8 +1383,7 @@ enum i40e_status_code i40e_clear_lan_rx_queue_context(struct i40e_hw *hw,
enum i40e_status_code err;
u8 *context_bytes;
- err = i40e_hmc_get_object_va(&hw->hmc, &context_bytes,
- I40E_HMC_LAN_RX, queue);
+ err = i40e_hmc_get_object_va(hw, &context_bytes, I40E_HMC_LAN_RX, queue);
if (err < 0)
return err;
@@ -1408,8 +1403,7 @@ enum i40e_status_code i40e_set_lan_rx_queue_context(struct i40e_hw *hw,
enum i40e_status_code err;
u8 *context_bytes;
- err = i40e_hmc_get_object_va(&hw->hmc, &context_bytes,
- I40E_HMC_LAN_RX, queue);
+ err = i40e_hmc_get_object_va(hw, &context_bytes, I40E_HMC_LAN_RX, queue);
if (err < 0)
return err;
diff --git a/sys/dev/ixl/i40e_lan_hmc.h b/sys/dev/ixl/i40e_lan_hmc.h
index 8b73570..8b73570 100755..100644
--- a/sys/dev/ixl/i40e_lan_hmc.h
+++ b/sys/dev/ixl/i40e_lan_hmc.h
diff --git a/sys/dev/ixl/i40e_nvm.c b/sys/dev/ixl/i40e_nvm.c
index 35001a5..5ae7515 100755..100644
--- a/sys/dev/ixl/i40e_nvm.c
+++ b/sys/dev/ixl/i40e_nvm.c
@@ -1,6 +1,6 @@
/******************************************************************************
- Copyright (c) 2013-2014, Intel Corporation
+ Copyright (c) 2013-2015, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -72,7 +72,7 @@ enum i40e_status_code i40e_init_nvm(struct i40e_hw *hw)
sr_size = ((gens & I40E_GLNVM_GENS_SR_SIZE_MASK) >>
I40E_GLNVM_GENS_SR_SIZE_SHIFT);
/* Switching to words (sr_size contains power of 2KB) */
- nvm->sr_size = (1 << sr_size) * I40E_SR_WORDS_IN_1KB;
+ nvm->sr_size = BIT(sr_size) * I40E_SR_WORDS_IN_1KB;
/* Check if we are in the normal or blank NVM programming mode */
fla = rd32(hw, I40E_GLNVM_FLA);
@@ -158,10 +158,26 @@ i40e_i40e_acquire_nvm_exit:
**/
void i40e_release_nvm(struct i40e_hw *hw)
{
+ enum i40e_status_code ret_code = I40E_SUCCESS;
+ u32 total_delay = 0;
+
DEBUGFUNC("i40e_release_nvm");
- if (!hw->nvm.blank_nvm_mode)
- i40e_aq_release_resource(hw, I40E_NVM_RESOURCE_ID, 0, NULL);
+ if (hw->nvm.blank_nvm_mode)
+ return;
+
+ ret_code = i40e_aq_release_resource(hw, I40E_NVM_RESOURCE_ID, 0, NULL);
+
+ /* there are some rare cases when trying to release the resource
+ * results in an admin Q timeout, so handle them correctly
+ */
+ while ((ret_code == I40E_ERR_ADMIN_QUEUE_TIMEOUT) &&
+ (total_delay < hw->aq.asq_cmd_timeout)) {
+ i40e_msec_delay(1);
+ ret_code = i40e_aq_release_resource(hw,
+ I40E_NVM_RESOURCE_ID, 0, NULL);
+ total_delay++;
+ }
}
/**
@@ -202,6 +218,10 @@ static enum i40e_status_code i40e_poll_sr_srctl_done_bit(struct i40e_hw *hw)
enum i40e_status_code i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
u16 *data)
{
+#ifdef X722_SUPPORT
+ if (hw->mac.type == I40E_MAC_X722)
+ return i40e_read_nvm_word_aq(hw, offset, data);
+#endif
return i40e_read_nvm_word_srctl(hw, offset, data);
}
@@ -233,8 +253,8 @@ enum i40e_status_code i40e_read_nvm_word_srctl(struct i40e_hw *hw, u16 offset,
ret_code = i40e_poll_sr_srctl_done_bit(hw);
if (ret_code == I40E_SUCCESS) {
/* Write the address and start reading */
- sr_reg = (u32)(offset << I40E_GLNVM_SRCTL_ADDR_SHIFT) |
- (1 << I40E_GLNVM_SRCTL_START_SHIFT);
+ sr_reg = ((u32)offset << I40E_GLNVM_SRCTL_ADDR_SHIFT) |
+ BIT(I40E_GLNVM_SRCTL_START_SHIFT);
wr32(hw, I40E_GLNVM_SRCTL, sr_reg);
/* Poll I40E_GLNVM_SRCTL until the done bit is set */
@@ -290,6 +310,10 @@ enum i40e_status_code i40e_read_nvm_word_aq(struct i40e_hw *hw, u16 offset,
enum i40e_status_code i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset,
u16 *words, u16 *data)
{
+#ifdef X722_SUPPORT
+ if (hw->mac.type == I40E_MAC_X722)
+ return i40e_read_nvm_buffer_aq(hw, offset, words, data);
+#endif
return i40e_read_nvm_buffer_srctl(hw, offset, words, data);
}
@@ -401,9 +425,13 @@ enum i40e_status_code i40e_read_nvm_aq(struct i40e_hw *hw, u8 module_pointer,
bool last_command)
{
enum i40e_status_code ret_code = I40E_ERR_NVM;
+ struct i40e_asq_cmd_details cmd_details;
DEBUGFUNC("i40e_read_nvm_aq");
+ memset(&cmd_details, 0, sizeof(cmd_details));
+ cmd_details.wb_desc = &hw->nvm_wb_desc;
+
/* Here we are checking the SR limit only for the flat memory model.
* We cannot do it for the module-based model, as we did not acquire
* the NVM resource yet (we cannot get the module pointer value).
@@ -428,7 +456,7 @@ enum i40e_status_code i40e_read_nvm_aq(struct i40e_hw *hw, u8 module_pointer,
ret_code = i40e_aq_read_nvm(hw, module_pointer,
2 * offset, /*bytes*/
2 * words, /*bytes*/
- data, last_command, NULL);
+ data, last_command, &cmd_details);
return ret_code;
}
@@ -449,9 +477,13 @@ enum i40e_status_code i40e_write_nvm_aq(struct i40e_hw *hw, u8 module_pointer,
bool last_command)
{
enum i40e_status_code ret_code = I40E_ERR_NVM;
+ struct i40e_asq_cmd_details cmd_details;
DEBUGFUNC("i40e_write_nvm_aq");
+ memset(&cmd_details, 0, sizeof(cmd_details));
+ cmd_details.wb_desc = &hw->nvm_wb_desc;
+
/* Here we are checking the SR limit only for the flat memory model.
* We cannot do it for the module-based model, as we did not acquire
* the NVM resource yet (we cannot get the module pointer value).
@@ -470,7 +502,7 @@ enum i40e_status_code i40e_write_nvm_aq(struct i40e_hw *hw, u8 module_pointer,
ret_code = i40e_aq_update_nvm(hw, module_pointer,
2 * offset, /*bytes*/
2 * words, /*bytes*/
- data, last_command, NULL);
+ data, last_command, &cmd_details);
return ret_code;
}
@@ -580,6 +612,7 @@ enum i40e_status_code i40e_calc_nvm_checksum(struct i40e_hw *hw, u16 *checksum)
/* Read SR page */
if ((i % I40E_SR_SECTOR_SIZE_IN_WORDS) == 0) {
u16 words = I40E_SR_SECTOR_SIZE_IN_WORDS;
+
ret_code = i40e_read_nvm_buffer(hw, i, &words, data);
if (ret_code != I40E_SUCCESS) {
ret_code = I40E_ERR_NVM_CHECKSUM;
@@ -625,13 +658,15 @@ enum i40e_status_code i40e_update_nvm_checksum(struct i40e_hw *hw)
{
enum i40e_status_code ret_code = I40E_SUCCESS;
u16 checksum;
+ __le16 le_sum;
DEBUGFUNC("i40e_update_nvm_checksum");
ret_code = i40e_calc_nvm_checksum(hw, &checksum);
+ le_sum = CPU_TO_LE16(checksum);
if (ret_code == I40E_SUCCESS)
ret_code = i40e_write_nvm_aq(hw, 0x00, I40E_SR_SW_CHECKSUM_WORD,
- 1, &checksum, TRUE);
+ 1, &le_sum, TRUE);
return ret_code;
}
diff --git a/sys/dev/ixl/i40e_osdep.c b/sys/dev/ixl/i40e_osdep.c
index b29db64..b29db64 100755..100644
--- a/sys/dev/ixl/i40e_osdep.c
+++ b/sys/dev/ixl/i40e_osdep.c
diff --git a/sys/dev/ixl/i40e_osdep.h b/sys/dev/ixl/i40e_osdep.h
index 83e8922..67a4b18 100755..100644
--- a/sys/dev/ixl/i40e_osdep.h
+++ b/sys/dev/ixl/i40e_osdep.h
@@ -1,6 +1,6 @@
/******************************************************************************
- Copyright (c) 2013-2014, Intel Corporation
+ Copyright (c) 2013-2015, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -112,6 +112,9 @@
#define FIELD_SIZEOF(x, y) (sizeof(((x*)0)->y))
+#define BIT(a) (1UL << (a))
+#define BIT_ULL(a) (1ULL << (a))
+
typedef uint8_t u8;
typedef int8_t s8;
typedef uint16_t u16;
@@ -189,7 +192,7 @@ rd32_osdep(struct i40e_osdep *osdep, uint32_t reg)
{
KASSERT(reg < osdep->mem_bus_space_size,
- ("ixl: register offset %#jx too large (max is %#jx",
+ ("ixl: register offset %#jx too large (max is %#jx)",
(uintmax_t)reg, (uintmax_t)osdep->mem_bus_space_size));
return (bus_space_read_4(osdep->mem_bus_space_tag,
@@ -201,7 +204,7 @@ wr32_osdep(struct i40e_osdep *osdep, uint32_t reg, uint32_t value)
{
KASSERT(reg < osdep->mem_bus_space_size,
- ("ixl: register offset %#jx too large (max is %#jx",
+ ("ixl: register offset %#jx too large (max is %#jx)",
(uintmax_t)reg, (uintmax_t)osdep->mem_bus_space_size));
bus_space_write_4(osdep->mem_bus_space_tag,
@@ -211,7 +214,6 @@ wr32_osdep(struct i40e_osdep *osdep, uint32_t reg, uint32_t value)
static __inline void
ixl_flush_osdep(struct i40e_osdep *osdep)
{
-
rd32_osdep(osdep, osdep->flush_reg);
}
diff --git a/sys/dev/ixl/i40e_prototype.h b/sys/dev/ixl/i40e_prototype.h
index 2b72383..c2395d2 100755..100644
--- a/sys/dev/ixl/i40e_prototype.h
+++ b/sys/dev/ixl/i40e_prototype.h
@@ -1,6 +1,6 @@
/******************************************************************************
- Copyright (c) 2013-2014, Intel Corporation
+ Copyright (c) 2013-2015, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -78,6 +78,21 @@ void i40e_idle_aq(struct i40e_hw *hw);
void i40e_resume_aq(struct i40e_hw *hw);
bool i40e_check_asq_alive(struct i40e_hw *hw);
enum i40e_status_code i40e_aq_queue_shutdown(struct i40e_hw *hw, bool unloading);
+#ifdef X722_SUPPORT
+
+enum i40e_status_code i40e_aq_get_rss_lut(struct i40e_hw *hw, u16 seid,
+ bool pf_lut, u8 *lut, u16 lut_size);
+enum i40e_status_code i40e_aq_set_rss_lut(struct i40e_hw *hw, u16 seid,
+ bool pf_lut, u8 *lut, u16 lut_size);
+enum i40e_status_code i40e_aq_get_rss_key(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_get_set_rss_key_data *key);
+enum i40e_status_code i40e_aq_set_rss_key(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_get_set_rss_key_data *key);
+#endif
+char *i40e_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err);
+char *i40e_stat_str(struct i40e_hw *hw, enum i40e_status_code stat_err);
u32 i40e_led_get(struct i40e_hw *hw);
@@ -145,6 +160,12 @@ enum i40e_status_code i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw *hw,
u16 vsi_id, bool set, struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_set_vsi_multicast_promiscuous(struct i40e_hw *hw,
u16 vsi_id, bool set, struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_set_vsi_mc_promisc_on_vlan(struct i40e_hw *hw,
+ u16 seid, bool enable, u16 vid,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_set_vsi_uc_promisc_on_vlan(struct i40e_hw *hw,
+ u16 seid, bool enable, u16 vid,
+ struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_get_vsi_params(struct i40e_hw *hw,
struct i40e_vsi_context *vsi_ctx,
struct i40e_asq_cmd_details *cmd_details);
@@ -204,6 +225,9 @@ enum i40e_status_code i40e_aq_write_nvm_config(struct i40e_hw *hw,
u8 cmd_flags, void *data, u16 buf_size,
u16 element_count,
struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_oem_post_update(struct i40e_hw *hw,
+ void *buff, u16 buff_size,
+ struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_discover_capabilities(struct i40e_hw *hw,
void *buff, u16 buff_size, u16 *data_size,
enum i40e_admin_queue_opc list_type_opc,
@@ -377,7 +401,8 @@ enum i40e_status_code i40e_init_shared_code(struct i40e_hw *hw);
enum i40e_status_code i40e_pf_reset(struct i40e_hw *hw);
void i40e_clear_hw(struct i40e_hw *hw);
void i40e_clear_pxe_mode(struct i40e_hw *hw);
-bool i40e_get_link_status(struct i40e_hw *hw);
+enum i40e_status_code i40e_get_link_status(struct i40e_hw *hw, bool *link_up);
+enum i40e_status_code i40e_update_link_info(struct i40e_hw *hw);
enum i40e_status_code i40e_get_mac_addr(struct i40e_hw *hw, u8 *mac_addr);
enum i40e_status_code i40e_read_bw_from_alt_ram(struct i40e_hw *hw,
u32 *max_bw, u32 *min_bw, bool *min_valid, bool *max_valid);
@@ -445,4 +470,9 @@ enum i40e_status_code i40e_aq_add_rem_control_packet_filter(struct i40e_hw *hw,
u16 vsi_seid, u16 queue, bool is_add,
struct i40e_control_filter_stats *stats,
struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_debug_dump(struct i40e_hw *hw, u8 cluster_id,
+ u8 table_id, u32 start_index, u16 buff_size,
+ void *buff, u16 *ret_buff_size,
+ u8 *ret_next_table, u32 *ret_next_index,
+ struct i40e_asq_cmd_details *cmd_details);
#endif /* _I40E_PROTOTYPE_H_ */
diff --git a/sys/dev/ixl/i40e_register.h b/sys/dev/ixl/i40e_register.h
index a8e47b3..adfc744 100755..100644
--- a/sys/dev/ixl/i40e_register.h
+++ b/sys/dev/ixl/i40e_register.h
@@ -1,6 +1,6 @@
/******************************************************************************
- Copyright (c) 2013-2014, Intel Corporation
+ Copyright (c) 2013-2015, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -882,6 +882,13 @@
#define I40E_PFINT_CEQCTL_CAUSE_ENA_MASK I40E_MASK(0x1, I40E_PFINT_CEQCTL_CAUSE_ENA_SHIFT)
#define I40E_PFINT_CEQCTL_INTEVENT_SHIFT 31
#define I40E_PFINT_CEQCTL_INTEVENT_MASK I40E_MASK(0x1, I40E_PFINT_CEQCTL_INTEVENT_SHIFT)
+#define I40E_GLINT_CTL 0x0003F800 /* Reset: CORER */
+#define I40E_GLINT_CTL_DIS_AUTOMASK_PF0_SHIFT 0
+#define I40E_GLINT_CTL_DIS_AUTOMASK_PF0_MASK I40E_MASK(0x1, I40E_GLINT_CTL_DIS_AUTOMASK_PF0_SHIFT)
+#define I40E_GLINT_CTL_DIS_AUTOMASK_VF0_SHIFT 1
+#define I40E_GLINT_CTL_DIS_AUTOMASK_VF0_MASK I40E_MASK(0x1, I40E_GLINT_CTL_DIS_AUTOMASK_VF0_SHIFT)
+#define I40E_GLINT_CTL_DIS_AUTOMASK_N_SHIFT 2
+#define I40E_GLINT_CTL_DIS_AUTOMASK_N_MASK I40E_MASK(0x1, I40E_GLINT_CTL_DIS_AUTOMASK_N_SHIFT)
#define I40E_PFINT_DYN_CTL0 0x00038480 /* Reset: PFR */
#define I40E_PFINT_DYN_CTL0_INTENA_SHIFT 0
#define I40E_PFINT_DYN_CTL0_INTENA_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTL0_INTENA_SHIFT)
@@ -3375,4 +3382,1936 @@
#define I40E_VFQF_HREGION_OVERRIDE_ENA_7_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_7_SHIFT)
#define I40E_VFQF_HREGION_REGION_7_SHIFT 29
#define I40E_VFQF_HREGION_REGION_7_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_7_SHIFT)
+#ifdef X722_SUPPORT
+
+#define I40E_MNGSB_FDCRC 0x000B7050 /* Reset: POR */
+#define I40E_MNGSB_FDCRC_CRC_RES_SHIFT 0
+#define I40E_MNGSB_FDCRC_CRC_RES_MASK I40E_MASK(0xFF, I40E_MNGSB_FDCRC_CRC_RES_SHIFT)
+#define I40E_MNGSB_FDCS 0x000B7040 /* Reset: POR */
+#define I40E_MNGSB_FDCS_CRC_CONT_SHIFT 2
+#define I40E_MNGSB_FDCS_CRC_CONT_MASK I40E_MASK(0x1, I40E_MNGSB_FDCS_CRC_CONT_SHIFT)
+#define I40E_MNGSB_FDCS_CRC_SEED_EN_SHIFT 3
+#define I40E_MNGSB_FDCS_CRC_SEED_EN_MASK I40E_MASK(0x1, I40E_MNGSB_FDCS_CRC_SEED_EN_SHIFT)
+#define I40E_MNGSB_FDCS_CRC_WR_INH_SHIFT 4
+#define I40E_MNGSB_FDCS_CRC_WR_INH_MASK I40E_MASK(0x1, I40E_MNGSB_FDCS_CRC_WR_INH_SHIFT)
+#define I40E_MNGSB_FDCS_CRC_SEED_SHIFT 8
+#define I40E_MNGSB_FDCS_CRC_SEED_MASK I40E_MASK(0xFF, I40E_MNGSB_FDCS_CRC_SEED_SHIFT)
+#define I40E_MNGSB_FDS 0x000B7048 /* Reset: POR */
+#define I40E_MNGSB_FDS_START_BC_SHIFT 0
+#define I40E_MNGSB_FDS_START_BC_MASK I40E_MASK(0xFFF, I40E_MNGSB_FDS_START_BC_SHIFT)
+#define I40E_MNGSB_FDS_LAST_BC_SHIFT 16
+#define I40E_MNGSB_FDS_LAST_BC_MASK I40E_MASK(0xFFF, I40E_MNGSB_FDS_LAST_BC_SHIFT)
+
+#define I40E_GL_VF_CTRL_RX(_VF) (0x00083600 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
+#define I40E_GL_VF_CTRL_RX_MAX_INDEX 127
+#define I40E_GL_VF_CTRL_RX_AQ_RX_EN_SHIFT 0
+#define I40E_GL_VF_CTRL_RX_AQ_RX_EN_MASK I40E_MASK(0x1, I40E_GL_VF_CTRL_RX_AQ_RX_EN_SHIFT)
+#define I40E_GL_VF_CTRL_TX(_VF) (0x00083400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
+#define I40E_GL_VF_CTRL_TX_MAX_INDEX 127
+#define I40E_GL_VF_CTRL_TX_AQ_TX_EN_SHIFT 0
+#define I40E_GL_VF_CTRL_TX_AQ_TX_EN_MASK I40E_MASK(0x1, I40E_GL_VF_CTRL_TX_AQ_TX_EN_SHIFT)
+
+#define I40E_GLCM_LAN_CACHESIZE 0x0010C4D8 /* Reset: CORER */
+#define I40E_GLCM_LAN_CACHESIZE_WORD_SIZE_SHIFT 0
+#define I40E_GLCM_LAN_CACHESIZE_WORD_SIZE_MASK I40E_MASK(0xFFF, I40E_GLCM_LAN_CACHESIZE_WORD_SIZE_SHIFT)
+#define I40E_GLCM_LAN_CACHESIZE_SETS_SHIFT 12
+#define I40E_GLCM_LAN_CACHESIZE_SETS_MASK I40E_MASK(0xF, I40E_GLCM_LAN_CACHESIZE_SETS_SHIFT)
+#define I40E_GLCM_LAN_CACHESIZE_WAYS_SHIFT 16
+#define I40E_GLCM_LAN_CACHESIZE_WAYS_MASK I40E_MASK(0x3FF, I40E_GLCM_LAN_CACHESIZE_WAYS_SHIFT)
+#define I40E_GLCM_PE_CACHESIZE 0x00138FE4 /* Reset: CORER */
+#define I40E_GLCM_PE_CACHESIZE_WORD_SIZE_SHIFT 0
+#define I40E_GLCM_PE_CACHESIZE_WORD_SIZE_MASK I40E_MASK(0xFFF, I40E_GLCM_PE_CACHESIZE_WORD_SIZE_SHIFT)
+#define I40E_GLCM_PE_CACHESIZE_SETS_SHIFT 12
+#define I40E_GLCM_PE_CACHESIZE_SETS_MASK I40E_MASK(0xF, I40E_GLCM_PE_CACHESIZE_SETS_SHIFT)
+#define I40E_GLCM_PE_CACHESIZE_WAYS_SHIFT 16
+#define I40E_GLCM_PE_CACHESIZE_WAYS_MASK I40E_MASK(0x1FF, I40E_GLCM_PE_CACHESIZE_WAYS_SHIFT)
+#define I40E_PFCM_PE_ERRDATA 0x00138D00 /* Reset: PFR */
+#define I40E_PFCM_PE_ERRDATA_ERROR_CODE_SHIFT 0
+#define I40E_PFCM_PE_ERRDATA_ERROR_CODE_MASK I40E_MASK(0xF, I40E_PFCM_PE_ERRDATA_ERROR_CODE_SHIFT)
+#define I40E_PFCM_PE_ERRDATA_Q_TYPE_SHIFT 4
+#define I40E_PFCM_PE_ERRDATA_Q_TYPE_MASK I40E_MASK(0x7, I40E_PFCM_PE_ERRDATA_Q_TYPE_SHIFT)
+#define I40E_PFCM_PE_ERRDATA_Q_NUM_SHIFT 8
+#define I40E_PFCM_PE_ERRDATA_Q_NUM_MASK I40E_MASK(0x3FFFF, I40E_PFCM_PE_ERRDATA_Q_NUM_SHIFT)
+#define I40E_PFCM_PE_ERRINFO 0x00138C80 /* Reset: PFR */
+#define I40E_PFCM_PE_ERRINFO_ERROR_VALID_SHIFT 0
+#define I40E_PFCM_PE_ERRINFO_ERROR_VALID_MASK I40E_MASK(0x1, I40E_PFCM_PE_ERRINFO_ERROR_VALID_SHIFT)
+#define I40E_PFCM_PE_ERRINFO_ERROR_INST_SHIFT 4
+#define I40E_PFCM_PE_ERRINFO_ERROR_INST_MASK I40E_MASK(0x7, I40E_PFCM_PE_ERRINFO_ERROR_INST_SHIFT)
+#define I40E_PFCM_PE_ERRINFO_DBL_ERROR_CNT_SHIFT 8
+#define I40E_PFCM_PE_ERRINFO_DBL_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_PFCM_PE_ERRINFO_DBL_ERROR_CNT_SHIFT)
+#define I40E_PFCM_PE_ERRINFO_RLU_ERROR_CNT_SHIFT 16
+#define I40E_PFCM_PE_ERRINFO_RLU_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_PFCM_PE_ERRINFO_RLU_ERROR_CNT_SHIFT)
+#define I40E_PFCM_PE_ERRINFO_RLS_ERROR_CNT_SHIFT 24
+#define I40E_PFCM_PE_ERRINFO_RLS_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_PFCM_PE_ERRINFO_RLS_ERROR_CNT_SHIFT)
+
+#define I40E_PRTDCB_TFMSTC(_i) (0x000A0040 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */
+#define I40E_PRTDCB_TFMSTC_MAX_INDEX 7
+#define I40E_PRTDCB_TFMSTC_MSTC_SHIFT 0
+#define I40E_PRTDCB_TFMSTC_MSTC_MASK I40E_MASK(0xFFFFF, I40E_PRTDCB_TFMSTC_MSTC_SHIFT)
+#define I40E_GL_FWSTS_FWROWD_SHIFT 8
+#define I40E_GL_FWSTS_FWROWD_MASK I40E_MASK(0x1, I40E_GL_FWSTS_FWROWD_SHIFT)
+#define I40E_GLFOC_CACHESIZE 0x000AA0DC /* Reset: CORER */
+#define I40E_GLFOC_CACHESIZE_WORD_SIZE_SHIFT 0
+#define I40E_GLFOC_CACHESIZE_WORD_SIZE_MASK I40E_MASK(0xFF, I40E_GLFOC_CACHESIZE_WORD_SIZE_SHIFT)
+#define I40E_GLFOC_CACHESIZE_SETS_SHIFT 8
+#define I40E_GLFOC_CACHESIZE_SETS_MASK I40E_MASK(0xFFF, I40E_GLFOC_CACHESIZE_SETS_SHIFT)
+#define I40E_GLFOC_CACHESIZE_WAYS_SHIFT 20
+#define I40E_GLFOC_CACHESIZE_WAYS_MASK I40E_MASK(0xF, I40E_GLFOC_CACHESIZE_WAYS_SHIFT)
+#define I40E_GLHMC_APBVTINUSEBASE(_i) (0x000C4a00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_APBVTINUSEBASE_MAX_INDEX 15
+#define I40E_GLHMC_APBVTINUSEBASE_FPMAPBINUSEBASE_SHIFT 0
+#define I40E_GLHMC_APBVTINUSEBASE_FPMAPBINUSEBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_APBVTINUSEBASE_FPMAPBINUSEBASE_SHIFT)
+#define I40E_GLHMC_CEQPART(_i) (0x001312C0 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_CEQPART_MAX_INDEX 15
+#define I40E_GLHMC_CEQPART_PMCEQBASE_SHIFT 0
+#define I40E_GLHMC_CEQPART_PMCEQBASE_MASK I40E_MASK(0xFF, I40E_GLHMC_CEQPART_PMCEQBASE_SHIFT)
+#define I40E_GLHMC_CEQPART_PMCEQSIZE_SHIFT 16
+#define I40E_GLHMC_CEQPART_PMCEQSIZE_MASK I40E_MASK(0x1FF, I40E_GLHMC_CEQPART_PMCEQSIZE_SHIFT)
+#define I40E_GLHMC_DBCQMAX 0x000C20F0 /* Reset: CORER */
+#define I40E_GLHMC_DBCQMAX_GLHMC_DBCQMAX_SHIFT 0
+#define I40E_GLHMC_DBCQMAX_GLHMC_DBCQMAX_MASK I40E_MASK(0x3FFFF, I40E_GLHMC_DBCQMAX_GLHMC_DBCQMAX_SHIFT)
+#define I40E_GLHMC_DBCQPART(_i) (0x00131240 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_DBCQPART_MAX_INDEX 15
+#define I40E_GLHMC_DBCQPART_PMDBCQBASE_SHIFT 0
+#define I40E_GLHMC_DBCQPART_PMDBCQBASE_MASK I40E_MASK(0x3FFF, I40E_GLHMC_DBCQPART_PMDBCQBASE_SHIFT)
+#define I40E_GLHMC_DBCQPART_PMDBCQSIZE_SHIFT 16
+#define I40E_GLHMC_DBCQPART_PMDBCQSIZE_MASK I40E_MASK(0x7FFF, I40E_GLHMC_DBCQPART_PMDBCQSIZE_SHIFT)
+#define I40E_GLHMC_DBQPMAX 0x000C20EC /* Reset: CORER */
+#define I40E_GLHMC_DBQPMAX_GLHMC_DBQPMAX_SHIFT 0
+#define I40E_GLHMC_DBQPMAX_GLHMC_DBQPMAX_MASK I40E_MASK(0x7FFFF, I40E_GLHMC_DBQPMAX_GLHMC_DBQPMAX_SHIFT)
+#define I40E_GLHMC_DBQPPART(_i) (0x00138D80 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_DBQPPART_MAX_INDEX 15
+#define I40E_GLHMC_DBQPPART_PMDBQPBASE_SHIFT 0
+#define I40E_GLHMC_DBQPPART_PMDBQPBASE_MASK I40E_MASK(0x3FFF, I40E_GLHMC_DBQPPART_PMDBQPBASE_SHIFT)
+#define I40E_GLHMC_DBQPPART_PMDBQPSIZE_SHIFT 16
+#define I40E_GLHMC_DBQPPART_PMDBQPSIZE_MASK I40E_MASK(0x7FFF, I40E_GLHMC_DBQPPART_PMDBQPSIZE_SHIFT)
+#define I40E_GLHMC_PEARPBASE(_i) (0x000C4800 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEARPBASE_MAX_INDEX 15
+#define I40E_GLHMC_PEARPBASE_FPMPEARPBASE_SHIFT 0
+#define I40E_GLHMC_PEARPBASE_FPMPEARPBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PEARPBASE_FPMPEARPBASE_SHIFT)
+#define I40E_GLHMC_PEARPCNT(_i) (0x000C4900 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEARPCNT_MAX_INDEX 15
+#define I40E_GLHMC_PEARPCNT_FPMPEARPCNT_SHIFT 0
+#define I40E_GLHMC_PEARPCNT_FPMPEARPCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PEARPCNT_FPMPEARPCNT_SHIFT)
+#define I40E_GLHMC_PEARPMAX 0x000C2038 /* Reset: CORER */
+#define I40E_GLHMC_PEARPMAX_PMPEARPMAX_SHIFT 0
+#define I40E_GLHMC_PEARPMAX_PMPEARPMAX_MASK I40E_MASK(0x1FFFF, I40E_GLHMC_PEARPMAX_PMPEARPMAX_SHIFT)
+#define I40E_GLHMC_PEARPOBJSZ 0x000C2034 /* Reset: CORER */
+#define I40E_GLHMC_PEARPOBJSZ_PMPEARPOBJSZ_SHIFT 0
+#define I40E_GLHMC_PEARPOBJSZ_PMPEARPOBJSZ_MASK I40E_MASK(0x7, I40E_GLHMC_PEARPOBJSZ_PMPEARPOBJSZ_SHIFT)
+#define I40E_GLHMC_PECQBASE(_i) (0x000C4200 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PECQBASE_MAX_INDEX 15
+#define I40E_GLHMC_PECQBASE_FPMPECQBASE_SHIFT 0
+#define I40E_GLHMC_PECQBASE_FPMPECQBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PECQBASE_FPMPECQBASE_SHIFT)
+#define I40E_GLHMC_PECQCNT(_i) (0x000C4300 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PECQCNT_MAX_INDEX 15
+#define I40E_GLHMC_PECQCNT_FPMPECQCNT_SHIFT 0
+#define I40E_GLHMC_PECQCNT_FPMPECQCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PECQCNT_FPMPECQCNT_SHIFT)
+#define I40E_GLHMC_PECQOBJSZ 0x000C2020 /* Reset: CORER */
+#define I40E_GLHMC_PECQOBJSZ_PMPECQOBJSZ_SHIFT 0
+#define I40E_GLHMC_PECQOBJSZ_PMPECQOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_PECQOBJSZ_PMPECQOBJSZ_SHIFT)
+#define I40E_GLHMC_PEHTCNT(_i) (0x000C4700 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEHTCNT_MAX_INDEX 15
+#define I40E_GLHMC_PEHTCNT_FPMPEHTCNT_SHIFT 0
+#define I40E_GLHMC_PEHTCNT_FPMPEHTCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PEHTCNT_FPMPEHTCNT_SHIFT)
+#define I40E_GLHMC_PEHTEBASE(_i) (0x000C4600 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEHTEBASE_MAX_INDEX 15
+#define I40E_GLHMC_PEHTEBASE_FPMPEHTEBASE_SHIFT 0
+#define I40E_GLHMC_PEHTEBASE_FPMPEHTEBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PEHTEBASE_FPMPEHTEBASE_SHIFT)
+#define I40E_GLHMC_PEHTEOBJSZ 0x000C202c /* Reset: CORER */
+#define I40E_GLHMC_PEHTEOBJSZ_PMPEHTEOBJSZ_SHIFT 0
+#define I40E_GLHMC_PEHTEOBJSZ_PMPEHTEOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_PEHTEOBJSZ_PMPEHTEOBJSZ_SHIFT)
+#define I40E_GLHMC_PEHTMAX 0x000C2030 /* Reset: CORER */
+#define I40E_GLHMC_PEHTMAX_PMPEHTMAX_SHIFT 0
+#define I40E_GLHMC_PEHTMAX_PMPEHTMAX_MASK I40E_MASK(0x1FFFFF, I40E_GLHMC_PEHTMAX_PMPEHTMAX_SHIFT)
+#define I40E_GLHMC_PEMRBASE(_i) (0x000C4c00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEMRBASE_MAX_INDEX 15
+#define I40E_GLHMC_PEMRBASE_FPMPEMRBASE_SHIFT 0
+#define I40E_GLHMC_PEMRBASE_FPMPEMRBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PEMRBASE_FPMPEMRBASE_SHIFT)
+#define I40E_GLHMC_PEMRCNT(_i) (0x000C4d00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEMRCNT_MAX_INDEX 15
+#define I40E_GLHMC_PEMRCNT_FPMPEMRSZ_SHIFT 0
+#define I40E_GLHMC_PEMRCNT_FPMPEMRSZ_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PEMRCNT_FPMPEMRSZ_SHIFT)
+#define I40E_GLHMC_PEMRMAX 0x000C2040 /* Reset: CORER */
+#define I40E_GLHMC_PEMRMAX_PMPEMRMAX_SHIFT 0
+#define I40E_GLHMC_PEMRMAX_PMPEMRMAX_MASK I40E_MASK(0x7FFFFF, I40E_GLHMC_PEMRMAX_PMPEMRMAX_SHIFT)
+#define I40E_GLHMC_PEMROBJSZ 0x000C203c /* Reset: CORER */
+#define I40E_GLHMC_PEMROBJSZ_PMPEMROBJSZ_SHIFT 0
+#define I40E_GLHMC_PEMROBJSZ_PMPEMROBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_PEMROBJSZ_PMPEMROBJSZ_SHIFT)
+#define I40E_GLHMC_PEPBLBASE(_i) (0x000C5800 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEPBLBASE_MAX_INDEX 15
+#define I40E_GLHMC_PEPBLBASE_FPMPEPBLBASE_SHIFT 0
+#define I40E_GLHMC_PEPBLBASE_FPMPEPBLBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PEPBLBASE_FPMPEPBLBASE_SHIFT)
+#define I40E_GLHMC_PEPBLCNT(_i) (0x000C5900 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEPBLCNT_MAX_INDEX 15
+#define I40E_GLHMC_PEPBLCNT_FPMPEPBLCNT_SHIFT 0
+#define I40E_GLHMC_PEPBLCNT_FPMPEPBLCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PEPBLCNT_FPMPEPBLCNT_SHIFT)
+#define I40E_GLHMC_PEPBLMAX 0x000C206c /* Reset: CORER */
+#define I40E_GLHMC_PEPBLMAX_PMPEPBLMAX_SHIFT 0
+#define I40E_GLHMC_PEPBLMAX_PMPEPBLMAX_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PEPBLMAX_PMPEPBLMAX_SHIFT)
+#define I40E_GLHMC_PEPFFIRSTSD 0x000C20E4 /* Reset: CORER */
+#define I40E_GLHMC_PEPFFIRSTSD_GLHMC_PEPFFIRSTSD_SHIFT 0
+#define I40E_GLHMC_PEPFFIRSTSD_GLHMC_PEPFFIRSTSD_MASK I40E_MASK(0xFFF, I40E_GLHMC_PEPFFIRSTSD_GLHMC_PEPFFIRSTSD_SHIFT)
+#define I40E_GLHMC_PEQ1BASE(_i) (0x000C5200 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEQ1BASE_MAX_INDEX 15
+#define I40E_GLHMC_PEQ1BASE_FPMPEQ1BASE_SHIFT 0
+#define I40E_GLHMC_PEQ1BASE_FPMPEQ1BASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PEQ1BASE_FPMPEQ1BASE_SHIFT)
+#define I40E_GLHMC_PEQ1CNT(_i) (0x000C5300 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEQ1CNT_MAX_INDEX 15
+#define I40E_GLHMC_PEQ1CNT_FPMPEQ1CNT_SHIFT 0
+#define I40E_GLHMC_PEQ1CNT_FPMPEQ1CNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PEQ1CNT_FPMPEQ1CNT_SHIFT)
+#define I40E_GLHMC_PEQ1FLBASE(_i) (0x000C5400 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEQ1FLBASE_MAX_INDEX 15
+#define I40E_GLHMC_PEQ1FLBASE_FPMPEQ1FLBASE_SHIFT 0
+#define I40E_GLHMC_PEQ1FLBASE_FPMPEQ1FLBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PEQ1FLBASE_FPMPEQ1FLBASE_SHIFT)
+#define I40E_GLHMC_PEQ1FLMAX 0x000C2058 /* Reset: CORER */
+#define I40E_GLHMC_PEQ1FLMAX_PMPEQ1FLMAX_SHIFT 0
+#define I40E_GLHMC_PEQ1FLMAX_PMPEQ1FLMAX_MASK I40E_MASK(0x3FFFFFF, I40E_GLHMC_PEQ1FLMAX_PMPEQ1FLMAX_SHIFT)
+#define I40E_GLHMC_PEQ1MAX 0x000C2054 /* Reset: CORER */
+#define I40E_GLHMC_PEQ1MAX_PMPEQ1MAX_SHIFT 0
+#define I40E_GLHMC_PEQ1MAX_PMPEQ1MAX_MASK I40E_MASK(0x3FFFFFF, I40E_GLHMC_PEQ1MAX_PMPEQ1MAX_SHIFT)
+#define I40E_GLHMC_PEQ1OBJSZ 0x000C2050 /* Reset: CORER */
+#define I40E_GLHMC_PEQ1OBJSZ_PMPEQ1OBJSZ_SHIFT 0
+#define I40E_GLHMC_PEQ1OBJSZ_PMPEQ1OBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_PEQ1OBJSZ_PMPEQ1OBJSZ_SHIFT)
+#define I40E_GLHMC_PEQPBASE(_i) (0x000C4000 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEQPBASE_MAX_INDEX 15
+#define I40E_GLHMC_PEQPBASE_FPMPEQPBASE_SHIFT 0
+#define I40E_GLHMC_PEQPBASE_FPMPEQPBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PEQPBASE_FPMPEQPBASE_SHIFT)
+#define I40E_GLHMC_PEQPCNT(_i) (0x000C4100 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEQPCNT_MAX_INDEX 15
+#define I40E_GLHMC_PEQPCNT_FPMPEQPCNT_SHIFT 0
+#define I40E_GLHMC_PEQPCNT_FPMPEQPCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PEQPCNT_FPMPEQPCNT_SHIFT)
+#define I40E_GLHMC_PEQPOBJSZ 0x000C201c /* Reset: CORER */
+#define I40E_GLHMC_PEQPOBJSZ_PMPEQPOBJSZ_SHIFT 0
+#define I40E_GLHMC_PEQPOBJSZ_PMPEQPOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_PEQPOBJSZ_PMPEQPOBJSZ_SHIFT)
+#define I40E_GLHMC_PESRQBASE(_i) (0x000C4400 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PESRQBASE_MAX_INDEX 15
+#define I40E_GLHMC_PESRQBASE_FPMPESRQBASE_SHIFT 0
+#define I40E_GLHMC_PESRQBASE_FPMPESRQBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PESRQBASE_FPMPESRQBASE_SHIFT)
+#define I40E_GLHMC_PESRQCNT(_i) (0x000C4500 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PESRQCNT_MAX_INDEX 15
+#define I40E_GLHMC_PESRQCNT_FPMPESRQCNT_SHIFT 0
+#define I40E_GLHMC_PESRQCNT_FPMPESRQCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PESRQCNT_FPMPESRQCNT_SHIFT)
+#define I40E_GLHMC_PESRQMAX 0x000C2028 /* Reset: CORER */
+#define I40E_GLHMC_PESRQMAX_PMPESRQMAX_SHIFT 0
+#define I40E_GLHMC_PESRQMAX_PMPESRQMAX_MASK I40E_MASK(0xFFFF, I40E_GLHMC_PESRQMAX_PMPESRQMAX_SHIFT)
+#define I40E_GLHMC_PESRQOBJSZ 0x000C2024 /* Reset: CORER */
+#define I40E_GLHMC_PESRQOBJSZ_PMPESRQOBJSZ_SHIFT 0
+#define I40E_GLHMC_PESRQOBJSZ_PMPESRQOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_PESRQOBJSZ_PMPESRQOBJSZ_SHIFT)
+#define I40E_GLHMC_PETIMERBASE(_i) (0x000C5A00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PETIMERBASE_MAX_INDEX 15
+#define I40E_GLHMC_PETIMERBASE_FPMPETIMERBASE_SHIFT 0
+#define I40E_GLHMC_PETIMERBASE_FPMPETIMERBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PETIMERBASE_FPMPETIMERBASE_SHIFT)
+#define I40E_GLHMC_PETIMERCNT(_i) (0x000C5B00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PETIMERCNT_MAX_INDEX 15
+#define I40E_GLHMC_PETIMERCNT_FPMPETIMERCNT_SHIFT 0
+#define I40E_GLHMC_PETIMERCNT_FPMPETIMERCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PETIMERCNT_FPMPETIMERCNT_SHIFT)
+#define I40E_GLHMC_PETIMERMAX 0x000C2084 /* Reset: CORER */
+#define I40E_GLHMC_PETIMERMAX_PMPETIMERMAX_SHIFT 0
+#define I40E_GLHMC_PETIMERMAX_PMPETIMERMAX_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PETIMERMAX_PMPETIMERMAX_SHIFT)
+#define I40E_GLHMC_PETIMEROBJSZ 0x000C2080 /* Reset: CORER */
+#define I40E_GLHMC_PETIMEROBJSZ_PMPETIMEROBJSZ_SHIFT 0
+#define I40E_GLHMC_PETIMEROBJSZ_PMPETIMEROBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_PETIMEROBJSZ_PMPETIMEROBJSZ_SHIFT)
+#define I40E_GLHMC_PEXFBASE(_i) (0x000C4e00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEXFBASE_MAX_INDEX 15
+#define I40E_GLHMC_PEXFBASE_FPMPEXFBASE_SHIFT 0
+#define I40E_GLHMC_PEXFBASE_FPMPEXFBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PEXFBASE_FPMPEXFBASE_SHIFT)
+#define I40E_GLHMC_PEXFCNT(_i) (0x000C4f00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEXFCNT_MAX_INDEX 15
+#define I40E_GLHMC_PEXFCNT_FPMPEXFCNT_SHIFT 0
+#define I40E_GLHMC_PEXFCNT_FPMPEXFCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PEXFCNT_FPMPEXFCNT_SHIFT)
+#define I40E_GLHMC_PEXFFLBASE(_i) (0x000C5000 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEXFFLBASE_MAX_INDEX 15
+#define I40E_GLHMC_PEXFFLBASE_FPMPEXFFLBASE_SHIFT 0
+#define I40E_GLHMC_PEXFFLBASE_FPMPEXFFLBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PEXFFLBASE_FPMPEXFFLBASE_SHIFT)
+#define I40E_GLHMC_PEXFFLMAX 0x000C204c /* Reset: CORER */
+#define I40E_GLHMC_PEXFFLMAX_PMPEXFFLMAX_SHIFT 0
+#define I40E_GLHMC_PEXFFLMAX_PMPEXFFLMAX_MASK I40E_MASK(0x1FFFFFF, I40E_GLHMC_PEXFFLMAX_PMPEXFFLMAX_SHIFT)
+#define I40E_GLHMC_PEXFMAX 0x000C2048 /* Reset: CORER */
+#define I40E_GLHMC_PEXFMAX_PMPEXFMAX_SHIFT 0
+#define I40E_GLHMC_PEXFMAX_PMPEXFMAX_MASK I40E_MASK(0x3FFFFFF, I40E_GLHMC_PEXFMAX_PMPEXFMAX_SHIFT)
+#define I40E_GLHMC_PEXFOBJSZ 0x000C2044 /* Reset: CORER */
+#define I40E_GLHMC_PEXFOBJSZ_PMPEXFOBJSZ_SHIFT 0
+#define I40E_GLHMC_PEXFOBJSZ_PMPEXFOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_PEXFOBJSZ_PMPEXFOBJSZ_SHIFT)
+#define I40E_GLHMC_PFPESDPART(_i) (0x000C0880 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PFPESDPART_MAX_INDEX 15
+#define I40E_GLHMC_PFPESDPART_PMSDBASE_SHIFT 0
+#define I40E_GLHMC_PFPESDPART_PMSDBASE_MASK I40E_MASK(0xFFF, I40E_GLHMC_PFPESDPART_PMSDBASE_SHIFT)
+#define I40E_GLHMC_PFPESDPART_PMSDSIZE_SHIFT 16
+#define I40E_GLHMC_PFPESDPART_PMSDSIZE_MASK I40E_MASK(0x1FFF, I40E_GLHMC_PFPESDPART_PMSDSIZE_SHIFT)
+#define I40E_GLHMC_VFAPBVTINUSEBASE(_i) (0x000Cca00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFAPBVTINUSEBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFAPBVTINUSEBASE_FPMAPBINUSEBASE_SHIFT 0
+#define I40E_GLHMC_VFAPBVTINUSEBASE_FPMAPBINUSEBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFAPBVTINUSEBASE_FPMAPBINUSEBASE_SHIFT)
+#define I40E_GLHMC_VFCEQPART(_i) (0x00132240 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFCEQPART_MAX_INDEX 31
+#define I40E_GLHMC_VFCEQPART_PMCEQBASE_SHIFT 0
+#define I40E_GLHMC_VFCEQPART_PMCEQBASE_MASK I40E_MASK(0xFF, I40E_GLHMC_VFCEQPART_PMCEQBASE_SHIFT)
+#define I40E_GLHMC_VFCEQPART_PMCEQSIZE_SHIFT 16
+#define I40E_GLHMC_VFCEQPART_PMCEQSIZE_MASK I40E_MASK(0x1FF, I40E_GLHMC_VFCEQPART_PMCEQSIZE_SHIFT)
+#define I40E_GLHMC_VFDBCQPART(_i) (0x00132140 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFDBCQPART_MAX_INDEX 31
+#define I40E_GLHMC_VFDBCQPART_PMDBCQBASE_SHIFT 0
+#define I40E_GLHMC_VFDBCQPART_PMDBCQBASE_MASK I40E_MASK(0x3FFF, I40E_GLHMC_VFDBCQPART_PMDBCQBASE_SHIFT)
+#define I40E_GLHMC_VFDBCQPART_PMDBCQSIZE_SHIFT 16
+#define I40E_GLHMC_VFDBCQPART_PMDBCQSIZE_MASK I40E_MASK(0x7FFF, I40E_GLHMC_VFDBCQPART_PMDBCQSIZE_SHIFT)
+#define I40E_GLHMC_VFDBQPPART(_i) (0x00138E00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFDBQPPART_MAX_INDEX 31
+#define I40E_GLHMC_VFDBQPPART_PMDBQPBASE_SHIFT 0
+#define I40E_GLHMC_VFDBQPPART_PMDBQPBASE_MASK I40E_MASK(0x3FFF, I40E_GLHMC_VFDBQPPART_PMDBQPBASE_SHIFT)
+#define I40E_GLHMC_VFDBQPPART_PMDBQPSIZE_SHIFT 16
+#define I40E_GLHMC_VFDBQPPART_PMDBQPSIZE_MASK I40E_MASK(0x7FFF, I40E_GLHMC_VFDBQPPART_PMDBQPSIZE_SHIFT)
+#define I40E_GLHMC_VFFSIAVBASE(_i) (0x000Cd600 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFFSIAVBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFFSIAVBASE_FPMFSIAVBASE_SHIFT 0
+#define I40E_GLHMC_VFFSIAVBASE_FPMFSIAVBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFFSIAVBASE_FPMFSIAVBASE_SHIFT)
+#define I40E_GLHMC_VFFSIAVCNT(_i) (0x000Cd700 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFFSIAVCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFFSIAVCNT_FPMFSIAVCNT_SHIFT 0
+#define I40E_GLHMC_VFFSIAVCNT_FPMFSIAVCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFFSIAVCNT_FPMFSIAVCNT_SHIFT)
+#define I40E_GLHMC_VFPDINV(_i) (0x000C8300 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPDINV_MAX_INDEX 31
+#define I40E_GLHMC_VFPDINV_PMSDIDX_SHIFT 0
+#define I40E_GLHMC_VFPDINV_PMSDIDX_MASK I40E_MASK(0xFFF, I40E_GLHMC_VFPDINV_PMSDIDX_SHIFT)
+#define I40E_GLHMC_VFPDINV_PMSDPARTSEL_SHIFT 15
+#define I40E_GLHMC_VFPDINV_PMSDPARTSEL_MASK I40E_MASK(0x1, I40E_GLHMC_VFPDINV_PMSDPARTSEL_SHIFT)
+#define I40E_GLHMC_VFPDINV_PMPDIDX_SHIFT 16
+#define I40E_GLHMC_VFPDINV_PMPDIDX_MASK I40E_MASK(0x1FF, I40E_GLHMC_VFPDINV_PMPDIDX_SHIFT)
+#define I40E_GLHMC_VFPEARPBASE(_i) (0x000Cc800 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEARPBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPEARPBASE_FPMPEARPBASE_SHIFT 0
+#define I40E_GLHMC_VFPEARPBASE_FPMPEARPBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEARPBASE_FPMPEARPBASE_SHIFT)
+#define I40E_GLHMC_VFPEARPCNT(_i) (0x000Cc900 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEARPCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPEARPCNT_FPMPEARPCNT_SHIFT 0
+#define I40E_GLHMC_VFPEARPCNT_FPMPEARPCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPEARPCNT_FPMPEARPCNT_SHIFT)
+#define I40E_GLHMC_VFPECQBASE(_i) (0x000Cc200 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPECQBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPECQBASE_FPMPECQBASE_SHIFT 0
+#define I40E_GLHMC_VFPECQBASE_FPMPECQBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPECQBASE_FPMPECQBASE_SHIFT)
+#define I40E_GLHMC_VFPECQCNT(_i) (0x000Cc300 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPECQCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPECQCNT_FPMPECQCNT_SHIFT 0
+#define I40E_GLHMC_VFPECQCNT_FPMPECQCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPECQCNT_FPMPECQCNT_SHIFT)
+#define I40E_GLHMC_VFPEHTCNT(_i) (0x000Cc700 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEHTCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPEHTCNT_FPMPEHTCNT_SHIFT 0
+#define I40E_GLHMC_VFPEHTCNT_FPMPEHTCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPEHTCNT_FPMPEHTCNT_SHIFT)
+#define I40E_GLHMC_VFPEHTEBASE(_i) (0x000Cc600 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEHTEBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPEHTEBASE_FPMPEHTEBASE_SHIFT 0
+#define I40E_GLHMC_VFPEHTEBASE_FPMPEHTEBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEHTEBASE_FPMPEHTEBASE_SHIFT)
+#define I40E_GLHMC_VFPEMRBASE(_i) (0x000Ccc00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEMRBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPEMRBASE_FPMPEMRBASE_SHIFT 0
+#define I40E_GLHMC_VFPEMRBASE_FPMPEMRBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEMRBASE_FPMPEMRBASE_SHIFT)
+#define I40E_GLHMC_VFPEMRCNT(_i) (0x000Ccd00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEMRCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPEMRCNT_FPMPEMRSZ_SHIFT 0
+#define I40E_GLHMC_VFPEMRCNT_FPMPEMRSZ_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPEMRCNT_FPMPEMRSZ_SHIFT)
+#define I40E_GLHMC_VFPEPBLBASE(_i) (0x000Cd800 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEPBLBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPEPBLBASE_FPMPEPBLBASE_SHIFT 0
+#define I40E_GLHMC_VFPEPBLBASE_FPMPEPBLBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEPBLBASE_FPMPEPBLBASE_SHIFT)
+#define I40E_GLHMC_VFPEPBLCNT(_i) (0x000Cd900 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEPBLCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPEPBLCNT_FPMPEPBLCNT_SHIFT 0
+#define I40E_GLHMC_VFPEPBLCNT_FPMPEPBLCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPEPBLCNT_FPMPEPBLCNT_SHIFT)
+#define I40E_GLHMC_VFPEQ1BASE(_i) (0x000Cd200 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEQ1BASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPEQ1BASE_FPMPEQ1BASE_SHIFT 0
+#define I40E_GLHMC_VFPEQ1BASE_FPMPEQ1BASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEQ1BASE_FPMPEQ1BASE_SHIFT)
+#define I40E_GLHMC_VFPEQ1CNT(_i) (0x000Cd300 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEQ1CNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPEQ1CNT_FPMPEQ1CNT_SHIFT 0
+#define I40E_GLHMC_VFPEQ1CNT_FPMPEQ1CNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPEQ1CNT_FPMPEQ1CNT_SHIFT)
+#define I40E_GLHMC_VFPEQ1FLBASE(_i) (0x000Cd400 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEQ1FLBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPEQ1FLBASE_FPMPEQ1FLBASE_SHIFT 0
+#define I40E_GLHMC_VFPEQ1FLBASE_FPMPEQ1FLBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEQ1FLBASE_FPMPEQ1FLBASE_SHIFT)
+#define I40E_GLHMC_VFPEQPBASE(_i) (0x000Cc000 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEQPBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPEQPBASE_FPMPEQPBASE_SHIFT 0
+#define I40E_GLHMC_VFPEQPBASE_FPMPEQPBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEQPBASE_FPMPEQPBASE_SHIFT)
+#define I40E_GLHMC_VFPEQPCNT(_i) (0x000Cc100 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEQPCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPEQPCNT_FPMPEQPCNT_SHIFT 0
+#define I40E_GLHMC_VFPEQPCNT_FPMPEQPCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPEQPCNT_FPMPEQPCNT_SHIFT)
+#define I40E_GLHMC_VFPESRQBASE(_i) (0x000Cc400 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPESRQBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPESRQBASE_FPMPESRQBASE_SHIFT 0
+#define I40E_GLHMC_VFPESRQBASE_FPMPESRQBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPESRQBASE_FPMPESRQBASE_SHIFT)
+#define I40E_GLHMC_VFPESRQCNT(_i) (0x000Cc500 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPESRQCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPESRQCNT_FPMPESRQCNT_SHIFT 0
+#define I40E_GLHMC_VFPESRQCNT_FPMPESRQCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPESRQCNT_FPMPESRQCNT_SHIFT)
+#define I40E_GLHMC_VFPETIMERBASE(_i) (0x000CDA00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPETIMERBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPETIMERBASE_FPMPETIMERBASE_SHIFT 0
+#define I40E_GLHMC_VFPETIMERBASE_FPMPETIMERBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPETIMERBASE_FPMPETIMERBASE_SHIFT)
+#define I40E_GLHMC_VFPETIMERCNT(_i) (0x000CDB00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPETIMERCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPETIMERCNT_FPMPETIMERCNT_SHIFT 0
+#define I40E_GLHMC_VFPETIMERCNT_FPMPETIMERCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPETIMERCNT_FPMPETIMERCNT_SHIFT)
+#define I40E_GLHMC_VFPEXFBASE(_i) (0x000Cce00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEXFBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPEXFBASE_FPMPEXFBASE_SHIFT 0
+#define I40E_GLHMC_VFPEXFBASE_FPMPEXFBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEXFBASE_FPMPEXFBASE_SHIFT)
+#define I40E_GLHMC_VFPEXFCNT(_i) (0x000Ccf00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEXFCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPEXFCNT_FPMPEXFCNT_SHIFT 0
+#define I40E_GLHMC_VFPEXFCNT_FPMPEXFCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPEXFCNT_FPMPEXFCNT_SHIFT)
+#define I40E_GLHMC_VFPEXFFLBASE(_i) (0x000Cd000 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEXFFLBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPEXFFLBASE_FPMPEXFFLBASE_SHIFT 0
+#define I40E_GLHMC_VFPEXFFLBASE_FPMPEXFFLBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEXFFLBASE_FPMPEXFFLBASE_SHIFT)
+#define I40E_GLHMC_VFSDPART(_i) (0x000C8800 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFSDPART_MAX_INDEX 31
+#define I40E_GLHMC_VFSDPART_PMSDBASE_SHIFT 0
+#define I40E_GLHMC_VFSDPART_PMSDBASE_MASK I40E_MASK(0xFFF, I40E_GLHMC_VFSDPART_PMSDBASE_SHIFT)
+#define I40E_GLHMC_VFSDPART_PMSDSIZE_SHIFT 16
+#define I40E_GLHMC_VFSDPART_PMSDSIZE_MASK I40E_MASK(0x1FFF, I40E_GLHMC_VFSDPART_PMSDSIZE_SHIFT)
+#define I40E_GLPBLOC_CACHESIZE 0x000A80BC /* Reset: CORER */
+#define I40E_GLPBLOC_CACHESIZE_WORD_SIZE_SHIFT 0
+#define I40E_GLPBLOC_CACHESIZE_WORD_SIZE_MASK I40E_MASK(0xFF, I40E_GLPBLOC_CACHESIZE_WORD_SIZE_SHIFT)
+#define I40E_GLPBLOC_CACHESIZE_SETS_SHIFT 8
+#define I40E_GLPBLOC_CACHESIZE_SETS_MASK I40E_MASK(0xFFF, I40E_GLPBLOC_CACHESIZE_SETS_SHIFT)
+#define I40E_GLPBLOC_CACHESIZE_WAYS_SHIFT 20
+#define I40E_GLPBLOC_CACHESIZE_WAYS_MASK I40E_MASK(0xF, I40E_GLPBLOC_CACHESIZE_WAYS_SHIFT)
+#define I40E_GLPDOC_CACHESIZE 0x000D0088 /* Reset: CORER */
+#define I40E_GLPDOC_CACHESIZE_WORD_SIZE_SHIFT 0
+#define I40E_GLPDOC_CACHESIZE_WORD_SIZE_MASK I40E_MASK(0xFF, I40E_GLPDOC_CACHESIZE_WORD_SIZE_SHIFT)
+#define I40E_GLPDOC_CACHESIZE_SETS_SHIFT 8
+#define I40E_GLPDOC_CACHESIZE_SETS_MASK I40E_MASK(0xFFF, I40E_GLPDOC_CACHESIZE_SETS_SHIFT)
+#define I40E_GLPDOC_CACHESIZE_WAYS_SHIFT 20
+#define I40E_GLPDOC_CACHESIZE_WAYS_MASK I40E_MASK(0xF, I40E_GLPDOC_CACHESIZE_WAYS_SHIFT)
+#define I40E_GLPEOC_CACHESIZE 0x000A60E8 /* Reset: CORER */
+#define I40E_GLPEOC_CACHESIZE_WORD_SIZE_SHIFT 0
+#define I40E_GLPEOC_CACHESIZE_WORD_SIZE_MASK I40E_MASK(0xFF, I40E_GLPEOC_CACHESIZE_WORD_SIZE_SHIFT)
+#define I40E_GLPEOC_CACHESIZE_SETS_SHIFT 8
+#define I40E_GLPEOC_CACHESIZE_SETS_MASK I40E_MASK(0xFFF, I40E_GLPEOC_CACHESIZE_SETS_SHIFT)
+#define I40E_GLPEOC_CACHESIZE_WAYS_SHIFT 20
+#define I40E_GLPEOC_CACHESIZE_WAYS_MASK I40E_MASK(0xF, I40E_GLPEOC_CACHESIZE_WAYS_SHIFT)
+#define I40E_PFHMC_PDINV_PMSDPARTSEL_SHIFT 15
+#define I40E_PFHMC_PDINV_PMSDPARTSEL_MASK I40E_MASK(0x1, I40E_PFHMC_PDINV_PMSDPARTSEL_SHIFT)
+#define I40E_PFHMC_SDCMD_PMSDPARTSEL_SHIFT 15
+#define I40E_PFHMC_SDCMD_PMSDPARTSEL_MASK I40E_MASK(0x1, I40E_PFHMC_SDCMD_PMSDPARTSEL_SHIFT)
+#define I40E_GL_PPRS_SPARE 0x000856E0 /* Reset: CORER */
+#define I40E_GL_PPRS_SPARE_GL_PPRS_SPARE_SHIFT 0
+#define I40E_GL_PPRS_SPARE_GL_PPRS_SPARE_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_PPRS_SPARE_GL_PPRS_SPARE_SHIFT)
+#define I40E_GL_TLAN_SPARE 0x000E64E0 /* Reset: CORER */
+#define I40E_GL_TLAN_SPARE_GL_TLAN_SPARE_SHIFT 0
+#define I40E_GL_TLAN_SPARE_GL_TLAN_SPARE_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_TLAN_SPARE_GL_TLAN_SPARE_SHIFT)
+#define I40E_GL_TUPM_SPARE 0x000a2230 /* Reset: CORER */
+#define I40E_GL_TUPM_SPARE_GL_TUPM_SPARE_SHIFT 0
+#define I40E_GL_TUPM_SPARE_GL_TUPM_SPARE_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_TUPM_SPARE_GL_TUPM_SPARE_SHIFT)
+#define I40E_GLGEN_CAR_DEBUG 0x000B81C0 /* Reset: POR */
+#define I40E_GLGEN_CAR_DEBUG_CAR_UPPER_CORE_CLK_EN_SHIFT 0
+#define I40E_GLGEN_CAR_DEBUG_CAR_UPPER_CORE_CLK_EN_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_UPPER_CORE_CLK_EN_SHIFT)
+#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_HIU_CLK_EN_SHIFT 1
+#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_HIU_CLK_EN_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_PCIE_HIU_CLK_EN_SHIFT)
+#define I40E_GLGEN_CAR_DEBUG_CAR_PE_CLK_EN_SHIFT 2
+#define I40E_GLGEN_CAR_DEBUG_CAR_PE_CLK_EN_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_PE_CLK_EN_SHIFT)
+#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_PRIM_CLK_ACTIVE_SHIFT 3
+#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_PRIM_CLK_ACTIVE_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_PCIE_PRIM_CLK_ACTIVE_SHIFT)
+#define I40E_GLGEN_CAR_DEBUG_CDC_PE_ACTIVE_SHIFT 4
+#define I40E_GLGEN_CAR_DEBUG_CDC_PE_ACTIVE_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CDC_PE_ACTIVE_SHIFT)
+#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_PRST_RESET_N_SHIFT 5
+#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_PRST_RESET_N_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_PRST_RESET_N_SHIFT)
+#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_SCLR_RESET_N_SHIFT 6
+#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_SCLR_RESET_N_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_SCLR_RESET_N_SHIFT)
+#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_IB_RESET_N_SHIFT 7
+#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_IB_RESET_N_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_IB_RESET_N_SHIFT)
+#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_IMIB_RESET_N_SHIFT 8
+#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_IMIB_RESET_N_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_IMIB_RESET_N_SHIFT)
+#define I40E_GLGEN_CAR_DEBUG_CAR_RAW_EMP_RESET_N_SHIFT 9
+#define I40E_GLGEN_CAR_DEBUG_CAR_RAW_EMP_RESET_N_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_RAW_EMP_RESET_N_SHIFT)
+#define I40E_GLGEN_CAR_DEBUG_CAR_RAW_GLOBAL_RESET_N_SHIFT 10
+#define I40E_GLGEN_CAR_DEBUG_CAR_RAW_GLOBAL_RESET_N_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_RAW_GLOBAL_RESET_N_SHIFT)
+#define I40E_GLGEN_CAR_DEBUG_CAR_RAW_LAN_POWER_GOOD_SHIFT 11
+#define I40E_GLGEN_CAR_DEBUG_CAR_RAW_LAN_POWER_GOOD_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_RAW_LAN_POWER_GOOD_SHIFT)
+#define I40E_GLGEN_CAR_DEBUG_CDC_IOSF_PRIMERY_RST_B_SHIFT 12
+#define I40E_GLGEN_CAR_DEBUG_CDC_IOSF_PRIMERY_RST_B_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CDC_IOSF_PRIMERY_RST_B_SHIFT)
+#define I40E_GLGEN_CAR_DEBUG_GBE_GLOBALRST_B_SHIFT 13
+#define I40E_GLGEN_CAR_DEBUG_GBE_GLOBALRST_B_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_GBE_GLOBALRST_B_SHIFT)
+#define I40E_GLGEN_CAR_DEBUG_FLEEP_AL_GLOBR_DONE_SHIFT 14
+#define I40E_GLGEN_CAR_DEBUG_FLEEP_AL_GLOBR_DONE_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_FLEEP_AL_GLOBR_DONE_SHIFT)
+#define I40E_GLGEN_MISC_SPARE 0x000880E0 /* Reset: POR */
+#define I40E_GLGEN_MISC_SPARE_GLGEN_MISC_SPARE_SHIFT 0
+#define I40E_GLGEN_MISC_SPARE_GLGEN_MISC_SPARE_MASK I40E_MASK(0xFFFFFFFF, I40E_GLGEN_MISC_SPARE_GLGEN_MISC_SPARE_SHIFT)
+#define I40E_GL_UFUSE_SOC 0x000BE550 /* Reset: POR */
+#define I40E_GL_UFUSE_SOC_PORT_MODE_SHIFT 0
+#define I40E_GL_UFUSE_SOC_PORT_MODE_MASK I40E_MASK(0x3, I40E_GL_UFUSE_SOC_PORT_MODE_SHIFT)
+#define I40E_GL_UFUSE_SOC_NIC_ID_SHIFT 2
+#define I40E_GL_UFUSE_SOC_NIC_ID_MASK I40E_MASK(0x1, I40E_GL_UFUSE_SOC_NIC_ID_SHIFT)
+#define I40E_GL_UFUSE_SOC_SPARE_FUSES_SHIFT 3
+#define I40E_GL_UFUSE_SOC_SPARE_FUSES_MASK I40E_MASK(0x1FFF, I40E_GL_UFUSE_SOC_SPARE_FUSES_SHIFT)
+#define I40E_PFINT_DYN_CTL0_WB_ON_ITR_SHIFT 30
+#define I40E_PFINT_DYN_CTL0_WB_ON_ITR_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTL0_WB_ON_ITR_SHIFT)
+#define I40E_PFINT_DYN_CTLN_WB_ON_ITR_SHIFT 30
+#define I40E_PFINT_DYN_CTLN_WB_ON_ITR_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTLN_WB_ON_ITR_SHIFT)
+#define I40E_VFINT_DYN_CTL0_WB_ON_ITR_SHIFT 30
+#define I40E_VFINT_DYN_CTL0_WB_ON_ITR_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL0_WB_ON_ITR_SHIFT)
+#define I40E_VFINT_DYN_CTLN_WB_ON_ITR_SHIFT 30
+#define I40E_VFINT_DYN_CTLN_WB_ON_ITR_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN_WB_ON_ITR_SHIFT)
+#define I40E_VPLAN_QBASE(_VF) (0x00074800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VPLAN_QBASE_MAX_INDEX 127
+#define I40E_VPLAN_QBASE_VFFIRSTQ_SHIFT 0
+#define I40E_VPLAN_QBASE_VFFIRSTQ_MASK I40E_MASK(0x7FF, I40E_VPLAN_QBASE_VFFIRSTQ_SHIFT)
+#define I40E_VPLAN_QBASE_VFNUMQ_SHIFT 11
+#define I40E_VPLAN_QBASE_VFNUMQ_MASK I40E_MASK(0xFF, I40E_VPLAN_QBASE_VFNUMQ_SHIFT)
+#define I40E_VPLAN_QBASE_VFQTABLE_ENA_SHIFT 31
+#define I40E_VPLAN_QBASE_VFQTABLE_ENA_MASK I40E_MASK(0x1, I40E_VPLAN_QBASE_VFQTABLE_ENA_SHIFT)
+#define I40E_PRTMAC_LINK_DOWN_COUNTER 0x001E2440 /* Reset: GLOBR */
+#define I40E_PRTMAC_LINK_DOWN_COUNTER_LINK_DOWN_COUNTER_SHIFT 0
+#define I40E_PRTMAC_LINK_DOWN_COUNTER_LINK_DOWN_COUNTER_MASK I40E_MASK(0xFFFF, I40E_PRTMAC_LINK_DOWN_COUNTER_LINK_DOWN_COUNTER_SHIFT)
+#define I40E_GLNVM_AL_REQ 0x000B6164 /* Reset: POR */
+#define I40E_GLNVM_AL_REQ_POR_SHIFT 0
+#define I40E_GLNVM_AL_REQ_POR_MASK I40E_MASK(0x1, I40E_GLNVM_AL_REQ_POR_SHIFT)
+#define I40E_GLNVM_AL_REQ_PCIE_IMIB_SHIFT 1
+#define I40E_GLNVM_AL_REQ_PCIE_IMIB_MASK I40E_MASK(0x1, I40E_GLNVM_AL_REQ_PCIE_IMIB_SHIFT)
+#define I40E_GLNVM_AL_REQ_GLOBR_SHIFT 2
+#define I40E_GLNVM_AL_REQ_GLOBR_MASK I40E_MASK(0x1, I40E_GLNVM_AL_REQ_GLOBR_SHIFT)
+#define I40E_GLNVM_AL_REQ_CORER_SHIFT 3
+#define I40E_GLNVM_AL_REQ_CORER_MASK I40E_MASK(0x1, I40E_GLNVM_AL_REQ_CORER_SHIFT)
+#define I40E_GLNVM_AL_REQ_PE_SHIFT 4
+#define I40E_GLNVM_AL_REQ_PE_MASK I40E_MASK(0x1, I40E_GLNVM_AL_REQ_PE_SHIFT)
+#define I40E_GLNVM_AL_REQ_PCIE_IMIB_ASSERT_SHIFT 5
+#define I40E_GLNVM_AL_REQ_PCIE_IMIB_ASSERT_MASK I40E_MASK(0x1, I40E_GLNVM_AL_REQ_PCIE_IMIB_ASSERT_SHIFT)
+#define I40E_GLNVM_ALTIMERS 0x000B6140 /* Reset: POR */
+#define I40E_GLNVM_ALTIMERS_PCI_ALTIMER_SHIFT 0
+#define I40E_GLNVM_ALTIMERS_PCI_ALTIMER_MASK I40E_MASK(0xFFF, I40E_GLNVM_ALTIMERS_PCI_ALTIMER_SHIFT)
+#define I40E_GLNVM_ALTIMERS_GEN_ALTIMER_SHIFT 12
+#define I40E_GLNVM_ALTIMERS_GEN_ALTIMER_MASK I40E_MASK(0xFFFFF, I40E_GLNVM_ALTIMERS_GEN_ALTIMER_SHIFT)
+#define I40E_GLNVM_FLA 0x000B6108 /* Reset: POR */
+#define I40E_GLNVM_FLA_LOCKED_SHIFT 6
+#define I40E_GLNVM_FLA_LOCKED_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_LOCKED_SHIFT)
+
+#define I40E_GLNVM_ULD 0x000B6008 /* Reset: POR */
+#define I40E_GLNVM_ULD_PCIER_DONE_SHIFT 0
+#define I40E_GLNVM_ULD_PCIER_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_PCIER_DONE_SHIFT)
+#define I40E_GLNVM_ULD_PCIER_DONE_1_SHIFT 1
+#define I40E_GLNVM_ULD_PCIER_DONE_1_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_PCIER_DONE_1_SHIFT)
+#define I40E_GLNVM_ULD_CORER_DONE_SHIFT 3
+#define I40E_GLNVM_ULD_CORER_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CORER_DONE_SHIFT)
+#define I40E_GLNVM_ULD_GLOBR_DONE_SHIFT 4
+#define I40E_GLNVM_ULD_GLOBR_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_GLOBR_DONE_SHIFT)
+#define I40E_GLNVM_ULD_POR_DONE_SHIFT 5
+#define I40E_GLNVM_ULD_POR_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_POR_DONE_SHIFT)
+#define I40E_GLNVM_ULD_POR_DONE_1_SHIFT 8
+#define I40E_GLNVM_ULD_POR_DONE_1_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_POR_DONE_1_SHIFT)
+#define I40E_GLNVM_ULD_PCIER_DONE_2_SHIFT 9
+#define I40E_GLNVM_ULD_PCIER_DONE_2_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_PCIER_DONE_2_SHIFT)
+#define I40E_GLNVM_ULD_PE_DONE_SHIFT 10
+#define I40E_GLNVM_ULD_PE_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_PE_DONE_SHIFT)
+#define I40E_GLNVM_ULT 0x000B6154 /* Reset: POR */
+#define I40E_GLNVM_ULT_CONF_PCIR_AE_SHIFT 0
+#define I40E_GLNVM_ULT_CONF_PCIR_AE_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_CONF_PCIR_AE_SHIFT)
+#define I40E_GLNVM_ULT_CONF_PCIRTL_AE_SHIFT 1
+#define I40E_GLNVM_ULT_CONF_PCIRTL_AE_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_CONF_PCIRTL_AE_SHIFT)
+#define I40E_GLNVM_ULT_RESERVED_1_SHIFT 2
+#define I40E_GLNVM_ULT_RESERVED_1_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_RESERVED_1_SHIFT)
+#define I40E_GLNVM_ULT_CONF_CORE_AE_SHIFT 3
+#define I40E_GLNVM_ULT_CONF_CORE_AE_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_CONF_CORE_AE_SHIFT)
+#define I40E_GLNVM_ULT_CONF_GLOBAL_AE_SHIFT 4
+#define I40E_GLNVM_ULT_CONF_GLOBAL_AE_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_CONF_GLOBAL_AE_SHIFT)
+#define I40E_GLNVM_ULT_CONF_POR_AE_SHIFT 5
+#define I40E_GLNVM_ULT_CONF_POR_AE_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_CONF_POR_AE_SHIFT)
+#define I40E_GLNVM_ULT_RESERVED_2_SHIFT 6
+#define I40E_GLNVM_ULT_RESERVED_2_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_RESERVED_2_SHIFT)
+#define I40E_GLNVM_ULT_RESERVED_3_SHIFT 7
+#define I40E_GLNVM_ULT_RESERVED_3_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_RESERVED_3_SHIFT)
+#define I40E_GLNVM_ULT_CONF_EMP_AE_SHIFT 8
+#define I40E_GLNVM_ULT_CONF_EMP_AE_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_CONF_EMP_AE_SHIFT)
+#define I40E_GLNVM_ULT_CONF_PCIALT_AE_SHIFT 9
+#define I40E_GLNVM_ULT_CONF_PCIALT_AE_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_CONF_PCIALT_AE_SHIFT)
+#define I40E_GLNVM_ULT_RESERVED_4_SHIFT 10
+#define I40E_GLNVM_ULT_RESERVED_4_MASK I40E_MASK(0x3FFFFF, I40E_GLNVM_ULT_RESERVED_4_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT 0x000B615C /* Reset: POR */
+#define I40E_MEM_INIT_DONE_STAT_CMLAN_MEM_INIT_DONE_SHIFT 0
+#define I40E_MEM_INIT_DONE_STAT_CMLAN_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_CMLAN_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_PMAT_MEM_INIT_DONE_SHIFT 1
+#define I40E_MEM_INIT_DONE_STAT_PMAT_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_PMAT_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_RCU_MEM_INIT_DONE_SHIFT 2
+#define I40E_MEM_INIT_DONE_STAT_RCU_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_RCU_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_TDPU_MEM_INIT_DONE_SHIFT 3
+#define I40E_MEM_INIT_DONE_STAT_TDPU_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_TDPU_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_TLAN_MEM_INIT_DONE_SHIFT 4
+#define I40E_MEM_INIT_DONE_STAT_TLAN_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_TLAN_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_RLAN_MEM_INIT_DONE_SHIFT 5
+#define I40E_MEM_INIT_DONE_STAT_RLAN_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_RLAN_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_RDPU_MEM_INIT_DONE_SHIFT 6
+#define I40E_MEM_INIT_DONE_STAT_RDPU_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_RDPU_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_PPRS_MEM_INIT_DONE_SHIFT 7
+#define I40E_MEM_INIT_DONE_STAT_PPRS_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_PPRS_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_RPB_MEM_INIT_DONE_SHIFT 8
+#define I40E_MEM_INIT_DONE_STAT_RPB_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_RPB_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_TPB_MEM_INIT_DONE_SHIFT 9
+#define I40E_MEM_INIT_DONE_STAT_TPB_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_TPB_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_FOC_MEM_INIT_DONE_SHIFT 10
+#define I40E_MEM_INIT_DONE_STAT_FOC_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_FOC_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_TSCD_MEM_INIT_DONE_SHIFT 11
+#define I40E_MEM_INIT_DONE_STAT_TSCD_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_TSCD_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_TCB_MEM_INIT_DONE_SHIFT 12
+#define I40E_MEM_INIT_DONE_STAT_TCB_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_TCB_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_RCB_MEM_INIT_DONE_SHIFT 13
+#define I40E_MEM_INIT_DONE_STAT_RCB_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_RCB_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_WUC_MEM_INIT_DONE_SHIFT 14
+#define I40E_MEM_INIT_DONE_STAT_WUC_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_WUC_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_STAT_MEM_INIT_DONE_SHIFT 15
+#define I40E_MEM_INIT_DONE_STAT_STAT_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_STAT_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_ITR_MEM_INIT_DONE_SHIFT 16
+#define I40E_MEM_INIT_DONE_STAT_ITR_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_ITR_MEM_INIT_DONE_SHIFT)
+#define I40E_MNGSB_DADD 0x000B7030 /* Reset: POR */
+#define I40E_MNGSB_DADD_ADDR_SHIFT 0
+#define I40E_MNGSB_DADD_ADDR_MASK I40E_MASK(0xFFFFFFFF, I40E_MNGSB_DADD_ADDR_SHIFT)
+#define I40E_MNGSB_DCNT 0x000B7034 /* Reset: POR */
+#define I40E_MNGSB_DCNT_BYTE_CNT_SHIFT 0
+#define I40E_MNGSB_DCNT_BYTE_CNT_MASK I40E_MASK(0xFFFFFFFF, I40E_MNGSB_DCNT_BYTE_CNT_SHIFT)
+#define I40E_MNGSB_MSGCTL 0x000B7020 /* Reset: POR */
+#define I40E_MNGSB_MSGCTL_HDR_DWS_SHIFT 0
+#define I40E_MNGSB_MSGCTL_HDR_DWS_MASK I40E_MASK(0x3, I40E_MNGSB_MSGCTL_HDR_DWS_SHIFT)
+#define I40E_MNGSB_MSGCTL_EXP_RDW_SHIFT 8
+#define I40E_MNGSB_MSGCTL_EXP_RDW_MASK I40E_MASK(0x1FF, I40E_MNGSB_MSGCTL_EXP_RDW_SHIFT)
+#define I40E_MNGSB_MSGCTL_MSG_MODE_SHIFT 26
+#define I40E_MNGSB_MSGCTL_MSG_MODE_MASK I40E_MASK(0x3, I40E_MNGSB_MSGCTL_MSG_MODE_SHIFT)
+#define I40E_MNGSB_MSGCTL_TOKEN_MODE_SHIFT 28
+#define I40E_MNGSB_MSGCTL_TOKEN_MODE_MASK I40E_MASK(0x3, I40E_MNGSB_MSGCTL_TOKEN_MODE_SHIFT)
+#define I40E_MNGSB_MSGCTL_BARCLR_SHIFT 30
+#define I40E_MNGSB_MSGCTL_BARCLR_MASK I40E_MASK(0x1, I40E_MNGSB_MSGCTL_BARCLR_SHIFT)
+#define I40E_MNGSB_MSGCTL_CMDV_SHIFT 31
+#define I40E_MNGSB_MSGCTL_CMDV_MASK I40E_MASK(0x1, I40E_MNGSB_MSGCTL_CMDV_SHIFT)
+#define I40E_MNGSB_RDATA 0x000B7300 /* Reset: POR */
+#define I40E_MNGSB_RDATA_DATA_SHIFT 0
+#define I40E_MNGSB_RDATA_DATA_MASK I40E_MASK(0xFFFFFFFF, I40E_MNGSB_RDATA_DATA_SHIFT)
+#define I40E_MNGSB_RHDR0 0x000B72FC /* Reset: POR */
+#define I40E_MNGSB_RHDR0_DESTINATION_SHIFT 0
+#define I40E_MNGSB_RHDR0_DESTINATION_MASK I40E_MASK(0xFF, I40E_MNGSB_RHDR0_DESTINATION_SHIFT)
+#define I40E_MNGSB_RHDR0_SOURCE_SHIFT 8
+#define I40E_MNGSB_RHDR0_SOURCE_MASK I40E_MASK(0xFF, I40E_MNGSB_RHDR0_SOURCE_SHIFT)
+#define I40E_MNGSB_RHDR0_OPCODE_SHIFT 16
+#define I40E_MNGSB_RHDR0_OPCODE_MASK I40E_MASK(0xFF, I40E_MNGSB_RHDR0_OPCODE_SHIFT)
+#define I40E_MNGSB_RHDR0_TAG_SHIFT 24
+#define I40E_MNGSB_RHDR0_TAG_MASK I40E_MASK(0x7, I40E_MNGSB_RHDR0_TAG_SHIFT)
+#define I40E_MNGSB_RHDR0_RESPONSE_SHIFT 27
+#define I40E_MNGSB_RHDR0_RESPONSE_MASK I40E_MASK(0x7, I40E_MNGSB_RHDR0_RESPONSE_SHIFT)
+#define I40E_MNGSB_RHDR0_EH_SHIFT 31
+#define I40E_MNGSB_RHDR0_EH_MASK I40E_MASK(0x1, I40E_MNGSB_RHDR0_EH_SHIFT)
+#define I40E_MNGSB_RSPCTL 0x000B7024 /* Reset: POR */
+#define I40E_MNGSB_RSPCTL_DMA_MSG_DWORDS_SHIFT 0
+#define I40E_MNGSB_RSPCTL_DMA_MSG_DWORDS_MASK I40E_MASK(0x1FF, I40E_MNGSB_RSPCTL_DMA_MSG_DWORDS_SHIFT)
+#define I40E_MNGSB_RSPCTL_RSP_MODE_SHIFT 26
+#define I40E_MNGSB_RSPCTL_RSP_MODE_MASK I40E_MASK(0x3, I40E_MNGSB_RSPCTL_RSP_MODE_SHIFT)
+#define I40E_MNGSB_RSPCTL_RSP_BAD_LEN_SHIFT 30
+#define I40E_MNGSB_RSPCTL_RSP_BAD_LEN_MASK I40E_MASK(0x1, I40E_MNGSB_RSPCTL_RSP_BAD_LEN_SHIFT)
+#define I40E_MNGSB_RSPCTL_RSP_ERR_SHIFT 31
+#define I40E_MNGSB_RSPCTL_RSP_ERR_MASK I40E_MASK(0x1, I40E_MNGSB_RSPCTL_RSP_ERR_SHIFT)
+#define I40E_MNGSB_WDATA 0x000B7100 /* Reset: POR */
+#define I40E_MNGSB_WDATA_DATA_SHIFT 0
+#define I40E_MNGSB_WDATA_DATA_MASK I40E_MASK(0xFFFFFFFF, I40E_MNGSB_WDATA_DATA_SHIFT)
+#define I40E_MNGSB_WHDR0 0x000B70F4 /* Reset: POR */
+#define I40E_MNGSB_WHDR0_RAW_DEST_SHIFT 0
+#define I40E_MNGSB_WHDR0_RAW_DEST_MASK I40E_MASK(0xFF, I40E_MNGSB_WHDR0_RAW_DEST_SHIFT)
+#define I40E_MNGSB_WHDR0_DEST_SEL_SHIFT 12
+#define I40E_MNGSB_WHDR0_DEST_SEL_MASK I40E_MASK(0xF, I40E_MNGSB_WHDR0_DEST_SEL_SHIFT)
+#define I40E_MNGSB_WHDR0_OPCODE_SEL_SHIFT 16
+#define I40E_MNGSB_WHDR0_OPCODE_SEL_MASK I40E_MASK(0xFF, I40E_MNGSB_WHDR0_OPCODE_SEL_SHIFT)
+#define I40E_MNGSB_WHDR0_TAG_SHIFT 24
+#define I40E_MNGSB_WHDR0_TAG_MASK I40E_MASK(0x7F, I40E_MNGSB_WHDR0_TAG_SHIFT)
+#define I40E_MNGSB_WHDR1 0x000B70F8 /* Reset: POR */
+#define I40E_MNGSB_WHDR1_ADDR_SHIFT 0
+#define I40E_MNGSB_WHDR1_ADDR_MASK I40E_MASK(0xFFFFFFFF, I40E_MNGSB_WHDR1_ADDR_SHIFT)
+#define I40E_MNGSB_WHDR2 0x000B70FC /* Reset: POR */
+#define I40E_MNGSB_WHDR2_LENGTH_SHIFT 0
+#define I40E_MNGSB_WHDR2_LENGTH_MASK I40E_MASK(0xFFFFFFFF, I40E_MNGSB_WHDR2_LENGTH_SHIFT)
+
+#define I40E_GLPCI_CAPSUP_WAKUP_EN_SHIFT 21
+#define I40E_GLPCI_CAPSUP_WAKUP_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_WAKUP_EN_SHIFT)
+
+#define I40E_GLPCI_CUR_CLNT_COMMON 0x0009CA18 /* Reset: PCIR */
+#define I40E_GLPCI_CUR_CLNT_COMMON_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_CLNT_COMMON_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_CLNT_COMMON_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_CLNT_COMMON_OSR_SHIFT 16
+#define I40E_GLPCI_CUR_CLNT_COMMON_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_CLNT_COMMON_OSR_SHIFT)
+#define I40E_GLPCI_CUR_CLNT_PIPEMON 0x0009CA20 /* Reset: PCIR */
+#define I40E_GLPCI_CUR_CLNT_PIPEMON_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_CLNT_PIPEMON_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_CLNT_PIPEMON_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_MNG_ALWD 0x0009c514 /* Reset: PCIR */
+#define I40E_GLPCI_CUR_MNG_ALWD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_MNG_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_MNG_ALWD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_MNG_ALWD_OSR_SHIFT 16
+#define I40E_GLPCI_CUR_MNG_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_MNG_ALWD_OSR_SHIFT)
+#define I40E_GLPCI_CUR_MNG_RSVD 0x0009c594 /* Reset: PCIR */
+#define I40E_GLPCI_CUR_MNG_RSVD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_MNG_RSVD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_MNG_RSVD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_MNG_RSVD_OSR_SHIFT 16
+#define I40E_GLPCI_CUR_MNG_RSVD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_MNG_RSVD_OSR_SHIFT)
+#define I40E_GLPCI_CUR_PMAT_ALWD 0x0009c510 /* Reset: PCIR */
+#define I40E_GLPCI_CUR_PMAT_ALWD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_PMAT_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_PMAT_ALWD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_PMAT_ALWD_OSR_SHIFT 16
+#define I40E_GLPCI_CUR_PMAT_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_PMAT_ALWD_OSR_SHIFT)
+#define I40E_GLPCI_CUR_PMAT_RSVD 0x0009c590 /* Reset: PCIR */
+#define I40E_GLPCI_CUR_PMAT_RSVD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_PMAT_RSVD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_PMAT_RSVD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_PMAT_RSVD_OSR_SHIFT 16
+#define I40E_GLPCI_CUR_PMAT_RSVD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_PMAT_RSVD_OSR_SHIFT)
+#define I40E_GLPCI_CUR_RLAN_ALWD 0x0009c500 /* Reset: PCIR */
+#define I40E_GLPCI_CUR_RLAN_ALWD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_RLAN_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_RLAN_ALWD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_RLAN_ALWD_OSR_SHIFT 16
+#define I40E_GLPCI_CUR_RLAN_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_RLAN_ALWD_OSR_SHIFT)
+#define I40E_GLPCI_CUR_RLAN_RSVD 0x0009c580 /* Reset: PCIR */
+#define I40E_GLPCI_CUR_RLAN_RSVD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_RLAN_RSVD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_RLAN_RSVD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_RLAN_RSVD_OSR_SHIFT 16
+#define I40E_GLPCI_CUR_RLAN_RSVD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_RLAN_RSVD_OSR_SHIFT)
+#define I40E_GLPCI_CUR_RXPE_ALWD 0x0009c508 /* Reset: PCIR */
+#define I40E_GLPCI_CUR_RXPE_ALWD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_RXPE_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_RXPE_ALWD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_RXPE_ALWD_OSR_SHIFT 16
+#define I40E_GLPCI_CUR_RXPE_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_RXPE_ALWD_OSR_SHIFT)
+#define I40E_GLPCI_CUR_RXPE_RSVD 0x0009c588 /* Reset: PCIR */
+#define I40E_GLPCI_CUR_RXPE_RSVD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_RXPE_RSVD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_RXPE_RSVD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_RXPE_RSVD_OSR_SHIFT 16
+#define I40E_GLPCI_CUR_RXPE_RSVD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_RXPE_RSVD_OSR_SHIFT)
+#define I40E_GLPCI_CUR_TDPU_ALWD 0x0009c518 /* Reset: PCIR */
+#define I40E_GLPCI_CUR_TDPU_ALWD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_TDPU_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TDPU_ALWD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_TDPU_ALWD_OSR_SHIFT 16
+#define I40E_GLPCI_CUR_TDPU_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TDPU_ALWD_OSR_SHIFT)
+#define I40E_GLPCI_CUR_TDPU_RSVD 0x0009c598 /* Reset: PCIR */
+#define I40E_GLPCI_CUR_TDPU_RSVD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_TDPU_RSVD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TDPU_RSVD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_TDPU_RSVD_OSR_SHIFT 16
+#define I40E_GLPCI_CUR_TDPU_RSVD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TDPU_RSVD_OSR_SHIFT)
+#define I40E_GLPCI_CUR_TLAN_ALWD 0x0009c504 /* Reset: PCIR */
+#define I40E_GLPCI_CUR_TLAN_ALWD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_TLAN_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TLAN_ALWD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_TLAN_ALWD_OSR_SHIFT 16
+#define I40E_GLPCI_CUR_TLAN_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TLAN_ALWD_OSR_SHIFT)
+#define I40E_GLPCI_CUR_TLAN_RSVD 0x0009c584 /* Reset: PCIR */
+#define I40E_GLPCI_CUR_TLAN_RSVD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_TLAN_RSVD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TLAN_RSVD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_TLAN_RSVD_OSR_SHIFT 16
+#define I40E_GLPCI_CUR_TLAN_RSVD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TLAN_RSVD_OSR_SHIFT)
+#define I40E_GLPCI_CUR_TXPE_ALWD 0x0009c50C /* Reset: PCIR */
+#define I40E_GLPCI_CUR_TXPE_ALWD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_TXPE_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TXPE_ALWD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_TXPE_ALWD_OSR_SHIFT 16
+#define I40E_GLPCI_CUR_TXPE_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TXPE_ALWD_OSR_SHIFT)
+#define I40E_GLPCI_CUR_TXPE_RSVD 0x0009c58c /* Reset: PCIR */
+#define I40E_GLPCI_CUR_TXPE_RSVD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_TXPE_RSVD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TXPE_RSVD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_TXPE_RSVD_OSR_SHIFT 16
+#define I40E_GLPCI_CUR_TXPE_RSVD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TXPE_RSVD_OSR_SHIFT)
+#define I40E_GLPCI_CUR_WATMK_CLNT_COMMON 0x0009CA28 /* Reset: PCIR */
+#define I40E_GLPCI_CUR_WATMK_CLNT_COMMON_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_WATMK_CLNT_COMMON_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_WATMK_CLNT_COMMON_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_WATMK_CLNT_COMMON_OSR_SHIFT 16
+#define I40E_GLPCI_CUR_WATMK_CLNT_COMMON_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_WATMK_CLNT_COMMON_OSR_SHIFT)
+
+#define I40E_GLPCI_LBARCTRL_PE_DB_SIZE_SHIFT 4
+#define I40E_GLPCI_LBARCTRL_PE_DB_SIZE_MASK I40E_MASK(0x3, I40E_GLPCI_LBARCTRL_PE_DB_SIZE_SHIFT)
+#define I40E_GLPCI_LBARCTRL_VF_PE_DB_SIZE_SHIFT 10
+#define I40E_GLPCI_LBARCTRL_VF_PE_DB_SIZE_MASK I40E_MASK(0x1, I40E_GLPCI_LBARCTRL_VF_PE_DB_SIZE_SHIFT)
+#define I40E_GLPCI_NPQ_CFG 0x0009CA00 /* Reset: PCIR */
+#define I40E_GLPCI_NPQ_CFG_EXTEND_TO_SHIFT 0
+#define I40E_GLPCI_NPQ_CFG_EXTEND_TO_MASK I40E_MASK(0x1, I40E_GLPCI_NPQ_CFG_EXTEND_TO_SHIFT)
+#define I40E_GLPCI_NPQ_CFG_SMALL_TO_SHIFT 1
+#define I40E_GLPCI_NPQ_CFG_SMALL_TO_MASK I40E_MASK(0x1, I40E_GLPCI_NPQ_CFG_SMALL_TO_SHIFT)
+#define I40E_GLPCI_NPQ_CFG_WEIGHT_AVG_SHIFT 2
+#define I40E_GLPCI_NPQ_CFG_WEIGHT_AVG_MASK I40E_MASK(0xF, I40E_GLPCI_NPQ_CFG_WEIGHT_AVG_SHIFT)
+#define I40E_GLPCI_NPQ_CFG_NPQ_SPARE_SHIFT 6
+#define I40E_GLPCI_NPQ_CFG_NPQ_SPARE_MASK I40E_MASK(0x3FF, I40E_GLPCI_NPQ_CFG_NPQ_SPARE_SHIFT)
+#define I40E_GLPCI_NPQ_CFG_NPQ_ERR_STAT_SHIFT 16
+#define I40E_GLPCI_NPQ_CFG_NPQ_ERR_STAT_MASK I40E_MASK(0xF, I40E_GLPCI_NPQ_CFG_NPQ_ERR_STAT_SHIFT)
+#define I40E_GLPCI_WATMK_CLNT_PIPEMON 0x0009CA30 /* Reset: PCIR */
+#define I40E_GLPCI_WATMK_CLNT_PIPEMON_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_WATMK_CLNT_PIPEMON_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_CLNT_PIPEMON_DATA_LINES_SHIFT)
+#define I40E_GLPCI_WATMK_MNG_ALWD 0x0009CB14 /* Reset: PCIR */
+#define I40E_GLPCI_WATMK_MNG_ALWD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_WATMK_MNG_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_MNG_ALWD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_WATMK_MNG_ALWD_OSR_SHIFT 16
+#define I40E_GLPCI_WATMK_MNG_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_MNG_ALWD_OSR_SHIFT)
+#define I40E_GLPCI_WATMK_PMAT_ALWD 0x0009CB10 /* Reset: PCIR */
+#define I40E_GLPCI_WATMK_PMAT_ALWD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_WATMK_PMAT_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_PMAT_ALWD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_WATMK_PMAT_ALWD_OSR_SHIFT 16
+#define I40E_GLPCI_WATMK_PMAT_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_PMAT_ALWD_OSR_SHIFT)
+#define I40E_GLPCI_WATMK_RLAN_ALWD 0x0009CB00 /* Reset: PCIR */
+#define I40E_GLPCI_WATMK_RLAN_ALWD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_WATMK_RLAN_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_RLAN_ALWD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_WATMK_RLAN_ALWD_OSR_SHIFT 16
+#define I40E_GLPCI_WATMK_RLAN_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_RLAN_ALWD_OSR_SHIFT)
+#define I40E_GLPCI_WATMK_RXPE_ALWD 0x0009CB08 /* Reset: PCIR */
+#define I40E_GLPCI_WATMK_RXPE_ALWD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_WATMK_RXPE_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_RXPE_ALWD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_WATMK_RXPE_ALWD_OSR_SHIFT 16
+#define I40E_GLPCI_WATMK_RXPE_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_RXPE_ALWD_OSR_SHIFT)
+#define I40E_GLPCI_WATMK_TLAN_ALWD 0x0009CB04 /* Reset: PCIR */
+#define I40E_GLPCI_WATMK_TLAN_ALWD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_WATMK_TLAN_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_TLAN_ALWD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_WATMK_TLAN_ALWD_OSR_SHIFT 16
+#define I40E_GLPCI_WATMK_TLAN_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_TLAN_ALWD_OSR_SHIFT)
+#define I40E_GLPCI_WATMK_TPDU_ALWD 0x0009CB18 /* Reset: PCIR */
+#define I40E_GLPCI_WATMK_TPDU_ALWD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_WATMK_TPDU_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_TPDU_ALWD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_WATMK_TPDU_ALWD_OSR_SHIFT 16
+#define I40E_GLPCI_WATMK_TPDU_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_TPDU_ALWD_OSR_SHIFT)
+#define I40E_GLPCI_WATMK_TXPE_ALWD 0x0009CB0c /* Reset: PCIR */
+#define I40E_GLPCI_WATMK_TXPE_ALWD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_WATMK_TXPE_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_TXPE_ALWD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_WATMK_TXPE_ALWD_OSR_SHIFT 16
+#define I40E_GLPCI_WATMK_TXPE_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_TXPE_ALWD_OSR_SHIFT)
+#define I40E_GLPE_CPUSTATUS0 0x0000D040 /* Reset: PE_CORER */
+#define I40E_GLPE_CPUSTATUS0_PECPUSTATUS0_SHIFT 0
+#define I40E_GLPE_CPUSTATUS0_PECPUSTATUS0_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPE_CPUSTATUS0_PECPUSTATUS0_SHIFT)
+#define I40E_GLPE_CPUSTATUS1 0x0000D044 /* Reset: PE_CORER */
+#define I40E_GLPE_CPUSTATUS1_PECPUSTATUS1_SHIFT 0
+#define I40E_GLPE_CPUSTATUS1_PECPUSTATUS1_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPE_CPUSTATUS1_PECPUSTATUS1_SHIFT)
+#define I40E_GLPE_CPUSTATUS2 0x0000D048 /* Reset: PE_CORER */
+#define I40E_GLPE_CPUSTATUS2_PECPUSTATUS2_SHIFT 0
+#define I40E_GLPE_CPUSTATUS2_PECPUSTATUS2_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPE_CPUSTATUS2_PECPUSTATUS2_SHIFT)
+#define I40E_GLPE_CPUTRIG0 0x0000D060 /* Reset: PE_CORER */
+#define I40E_GLPE_CPUTRIG0_PECPUTRIG0_SHIFT 0
+#define I40E_GLPE_CPUTRIG0_PECPUTRIG0_MASK I40E_MASK(0xFFFF, I40E_GLPE_CPUTRIG0_PECPUTRIG0_SHIFT)
+#define I40E_GLPE_CPUTRIG0_TEPREQUEST0_SHIFT 17
+#define I40E_GLPE_CPUTRIG0_TEPREQUEST0_MASK I40E_MASK(0x1, I40E_GLPE_CPUTRIG0_TEPREQUEST0_SHIFT)
+#define I40E_GLPE_CPUTRIG0_OOPREQUEST0_SHIFT 18
+#define I40E_GLPE_CPUTRIG0_OOPREQUEST0_MASK I40E_MASK(0x1, I40E_GLPE_CPUTRIG0_OOPREQUEST0_SHIFT)
+#define I40E_GLPE_DUAL40_RUPM 0x0000DA04 /* Reset: PE_CORER */
+#define I40E_GLPE_DUAL40_RUPM_DUAL_40G_MODE_SHIFT 0
+#define I40E_GLPE_DUAL40_RUPM_DUAL_40G_MODE_MASK I40E_MASK(0x1, I40E_GLPE_DUAL40_RUPM_DUAL_40G_MODE_SHIFT)
+#define I40E_GLPE_PFAEQEDROPCNT(_i) (0x00131440 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLPE_PFAEQEDROPCNT_MAX_INDEX 15
+#define I40E_GLPE_PFAEQEDROPCNT_AEQEDROPCNT_SHIFT 0
+#define I40E_GLPE_PFAEQEDROPCNT_AEQEDROPCNT_MASK I40E_MASK(0xFFFF, I40E_GLPE_PFAEQEDROPCNT_AEQEDROPCNT_SHIFT)
+#define I40E_GLPE_PFCEQEDROPCNT(_i) (0x001313C0 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLPE_PFCEQEDROPCNT_MAX_INDEX 15
+#define I40E_GLPE_PFCEQEDROPCNT_CEQEDROPCNT_SHIFT 0
+#define I40E_GLPE_PFCEQEDROPCNT_CEQEDROPCNT_MASK I40E_MASK(0xFFFF, I40E_GLPE_PFCEQEDROPCNT_CEQEDROPCNT_SHIFT)
+#define I40E_GLPE_PFCQEDROPCNT(_i) (0x00131340 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLPE_PFCQEDROPCNT_MAX_INDEX 15
+#define I40E_GLPE_PFCQEDROPCNT_CQEDROPCNT_SHIFT 0
+#define I40E_GLPE_PFCQEDROPCNT_CQEDROPCNT_MASK I40E_MASK(0xFFFF, I40E_GLPE_PFCQEDROPCNT_CQEDROPCNT_SHIFT)
+#define I40E_GLPE_RUPM_CQPPOOL 0x0000DACC /* Reset: PE_CORER */
+#define I40E_GLPE_RUPM_CQPPOOL_CQPSPADS_SHIFT 0
+#define I40E_GLPE_RUPM_CQPPOOL_CQPSPADS_MASK I40E_MASK(0xFF, I40E_GLPE_RUPM_CQPPOOL_CQPSPADS_SHIFT)
+#define I40E_GLPE_RUPM_FLRPOOL 0x0000DAC4 /* Reset: PE_CORER */
+#define I40E_GLPE_RUPM_FLRPOOL_FLRSPADS_SHIFT 0
+#define I40E_GLPE_RUPM_FLRPOOL_FLRSPADS_MASK I40E_MASK(0xFF, I40E_GLPE_RUPM_FLRPOOL_FLRSPADS_SHIFT)
+#define I40E_GLPE_RUPM_GCTL 0x0000DA00 /* Reset: PE_CORER */
+#define I40E_GLPE_RUPM_GCTL_ALLOFFTH_SHIFT 0
+#define I40E_GLPE_RUPM_GCTL_ALLOFFTH_MASK I40E_MASK(0xFF, I40E_GLPE_RUPM_GCTL_ALLOFFTH_SHIFT)
+#define I40E_GLPE_RUPM_GCTL_RUPM_P0_DIS_SHIFT 26
+#define I40E_GLPE_RUPM_GCTL_RUPM_P0_DIS_MASK I40E_MASK(0x1, I40E_GLPE_RUPM_GCTL_RUPM_P0_DIS_SHIFT)
+#define I40E_GLPE_RUPM_GCTL_RUPM_P1_DIS_SHIFT 27
+#define I40E_GLPE_RUPM_GCTL_RUPM_P1_DIS_MASK I40E_MASK(0x1, I40E_GLPE_RUPM_GCTL_RUPM_P1_DIS_SHIFT)
+#define I40E_GLPE_RUPM_GCTL_RUPM_P2_DIS_SHIFT 28
+#define I40E_GLPE_RUPM_GCTL_RUPM_P2_DIS_MASK I40E_MASK(0x1, I40E_GLPE_RUPM_GCTL_RUPM_P2_DIS_SHIFT)
+#define I40E_GLPE_RUPM_GCTL_RUPM_P3_DIS_SHIFT 29
+#define I40E_GLPE_RUPM_GCTL_RUPM_P3_DIS_MASK I40E_MASK(0x1, I40E_GLPE_RUPM_GCTL_RUPM_P3_DIS_SHIFT)
+#define I40E_GLPE_RUPM_GCTL_RUPM_DIS_SHIFT 30
+#define I40E_GLPE_RUPM_GCTL_RUPM_DIS_MASK I40E_MASK(0x1, I40E_GLPE_RUPM_GCTL_RUPM_DIS_SHIFT)
+#define I40E_GLPE_RUPM_GCTL_SWLB_MODE_SHIFT 31
+#define I40E_GLPE_RUPM_GCTL_SWLB_MODE_MASK I40E_MASK(0x1, I40E_GLPE_RUPM_GCTL_SWLB_MODE_SHIFT)
+#define I40E_GLPE_RUPM_PTXPOOL 0x0000DAC8 /* Reset: PE_CORER */
+#define I40E_GLPE_RUPM_PTXPOOL_PTXSPADS_SHIFT 0
+#define I40E_GLPE_RUPM_PTXPOOL_PTXSPADS_MASK I40E_MASK(0xFF, I40E_GLPE_RUPM_PTXPOOL_PTXSPADS_SHIFT)
+#define I40E_GLPE_RUPM_PUSHPOOL 0x0000DAC0 /* Reset: PE_CORER */
+#define I40E_GLPE_RUPM_PUSHPOOL_PUSHSPADS_SHIFT 0
+#define I40E_GLPE_RUPM_PUSHPOOL_PUSHSPADS_MASK I40E_MASK(0xFF, I40E_GLPE_RUPM_PUSHPOOL_PUSHSPADS_SHIFT)
+#define I40E_GLPE_RUPM_TXHOST_EN 0x0000DA08 /* Reset: PE_CORER */
+#define I40E_GLPE_RUPM_TXHOST_EN_TXHOST_EN_SHIFT 0
+#define I40E_GLPE_RUPM_TXHOST_EN_TXHOST_EN_MASK I40E_MASK(0x1, I40E_GLPE_RUPM_TXHOST_EN_TXHOST_EN_SHIFT)
+#define I40E_GLPE_VFAEQEDROPCNT(_i) (0x00132540 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLPE_VFAEQEDROPCNT_MAX_INDEX 31
+#define I40E_GLPE_VFAEQEDROPCNT_AEQEDROPCNT_SHIFT 0
+#define I40E_GLPE_VFAEQEDROPCNT_AEQEDROPCNT_MASK I40E_MASK(0xFFFF, I40E_GLPE_VFAEQEDROPCNT_AEQEDROPCNT_SHIFT)
+#define I40E_GLPE_VFCEQEDROPCNT(_i) (0x00132440 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLPE_VFCEQEDROPCNT_MAX_INDEX 31
+#define I40E_GLPE_VFCEQEDROPCNT_CEQEDROPCNT_SHIFT 0
+#define I40E_GLPE_VFCEQEDROPCNT_CEQEDROPCNT_MASK I40E_MASK(0xFFFF, I40E_GLPE_VFCEQEDROPCNT_CEQEDROPCNT_SHIFT)
+#define I40E_GLPE_VFCQEDROPCNT(_i) (0x00132340 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLPE_VFCQEDROPCNT_MAX_INDEX 31
+#define I40E_GLPE_VFCQEDROPCNT_CQEDROPCNT_SHIFT 0
+#define I40E_GLPE_VFCQEDROPCNT_CQEDROPCNT_MASK I40E_MASK(0xFFFF, I40E_GLPE_VFCQEDROPCNT_CQEDROPCNT_SHIFT)
+#define I40E_GLPE_VFFLMOBJCTRL(_i) (0x0000D400 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPE_VFFLMOBJCTRL_MAX_INDEX 31
+#define I40E_GLPE_VFFLMOBJCTRL_XMIT_BLOCKSIZE_SHIFT 0
+#define I40E_GLPE_VFFLMOBJCTRL_XMIT_BLOCKSIZE_MASK I40E_MASK(0x7, I40E_GLPE_VFFLMOBJCTRL_XMIT_BLOCKSIZE_SHIFT)
+#define I40E_GLPE_VFFLMOBJCTRL_Q1_BLOCKSIZE_SHIFT 8
+#define I40E_GLPE_VFFLMOBJCTRL_Q1_BLOCKSIZE_MASK I40E_MASK(0x7, I40E_GLPE_VFFLMOBJCTRL_Q1_BLOCKSIZE_SHIFT)
+#define I40E_GLPE_VFFLMQ1ALLOCERR(_i) (0x0000C700 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPE_VFFLMQ1ALLOCERR_MAX_INDEX 31
+#define I40E_GLPE_VFFLMQ1ALLOCERR_ERROR_COUNT_SHIFT 0
+#define I40E_GLPE_VFFLMQ1ALLOCERR_ERROR_COUNT_MASK I40E_MASK(0xFFFF, I40E_GLPE_VFFLMQ1ALLOCERR_ERROR_COUNT_SHIFT)
+#define I40E_GLPE_VFFLMXMITALLOCERR(_i) (0x0000C600 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPE_VFFLMXMITALLOCERR_MAX_INDEX 31
+#define I40E_GLPE_VFFLMXMITALLOCERR_ERROR_COUNT_SHIFT 0
+#define I40E_GLPE_VFFLMXMITALLOCERR_ERROR_COUNT_MASK I40E_MASK(0xFFFF, I40E_GLPE_VFFLMXMITALLOCERR_ERROR_COUNT_SHIFT)
+#define I40E_GLPE_VFUDACTRL(_i) (0x0000C000 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPE_VFUDACTRL_MAX_INDEX 31
+#define I40E_GLPE_VFUDACTRL_IPV4MCFRAGRESBP_SHIFT 0
+#define I40E_GLPE_VFUDACTRL_IPV4MCFRAGRESBP_MASK I40E_MASK(0x1, I40E_GLPE_VFUDACTRL_IPV4MCFRAGRESBP_SHIFT)
+#define I40E_GLPE_VFUDACTRL_IPV4UCFRAGRESBP_SHIFT 1
+#define I40E_GLPE_VFUDACTRL_IPV4UCFRAGRESBP_MASK I40E_MASK(0x1, I40E_GLPE_VFUDACTRL_IPV4UCFRAGRESBP_SHIFT)
+#define I40E_GLPE_VFUDACTRL_IPV6MCFRAGRESBP_SHIFT 2
+#define I40E_GLPE_VFUDACTRL_IPV6MCFRAGRESBP_MASK I40E_MASK(0x1, I40E_GLPE_VFUDACTRL_IPV6MCFRAGRESBP_SHIFT)
+#define I40E_GLPE_VFUDACTRL_IPV6UCFRAGRESBP_SHIFT 3
+#define I40E_GLPE_VFUDACTRL_IPV6UCFRAGRESBP_MASK I40E_MASK(0x1, I40E_GLPE_VFUDACTRL_IPV6UCFRAGRESBP_SHIFT)
+#define I40E_GLPE_VFUDACTRL_UDPMCFRAGRESFAIL_SHIFT 4
+#define I40E_GLPE_VFUDACTRL_UDPMCFRAGRESFAIL_MASK I40E_MASK(0x1, I40E_GLPE_VFUDACTRL_UDPMCFRAGRESFAIL_SHIFT)
+#define I40E_GLPE_VFUDAUCFBQPN(_i) (0x0000C100 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPE_VFUDAUCFBQPN_MAX_INDEX 31
+#define I40E_GLPE_VFUDAUCFBQPN_QPN_SHIFT 0
+#define I40E_GLPE_VFUDAUCFBQPN_QPN_MASK I40E_MASK(0x3FFFF, I40E_GLPE_VFUDAUCFBQPN_QPN_SHIFT)
+#define I40E_GLPE_VFUDAUCFBQPN_VALID_SHIFT 31
+#define I40E_GLPE_VFUDAUCFBQPN_VALID_MASK I40E_MASK(0x1, I40E_GLPE_VFUDAUCFBQPN_VALID_SHIFT)
+#define I40E_PFPE_AEQALLOC 0x00131180 /* Reset: PFR */
+#define I40E_PFPE_AEQALLOC_AECOUNT_SHIFT 0
+#define I40E_PFPE_AEQALLOC_AECOUNT_MASK I40E_MASK(0xFFFFFFFF, I40E_PFPE_AEQALLOC_AECOUNT_SHIFT)
+#define I40E_PFPE_CCQPHIGH 0x00008200 /* Reset: PFR */
+#define I40E_PFPE_CCQPHIGH_PECCQPHIGH_SHIFT 0
+#define I40E_PFPE_CCQPHIGH_PECCQPHIGH_MASK I40E_MASK(0xFFFFFFFF, I40E_PFPE_CCQPHIGH_PECCQPHIGH_SHIFT)
+#define I40E_PFPE_CCQPLOW 0x00008180 /* Reset: PFR */
+#define I40E_PFPE_CCQPLOW_PECCQPLOW_SHIFT 0
+#define I40E_PFPE_CCQPLOW_PECCQPLOW_MASK I40E_MASK(0xFFFFFFFF, I40E_PFPE_CCQPLOW_PECCQPLOW_SHIFT)
+#define I40E_PFPE_CCQPSTATUS 0x00008100 /* Reset: PFR */
+#define I40E_PFPE_CCQPSTATUS_CCQP_DONE_SHIFT 0
+#define I40E_PFPE_CCQPSTATUS_CCQP_DONE_MASK I40E_MASK(0x1, I40E_PFPE_CCQPSTATUS_CCQP_DONE_SHIFT)
+#define I40E_PFPE_CCQPSTATUS_HMC_PROFILE_SHIFT 4
+#define I40E_PFPE_CCQPSTATUS_HMC_PROFILE_MASK I40E_MASK(0x7, I40E_PFPE_CCQPSTATUS_HMC_PROFILE_SHIFT)
+#define I40E_PFPE_CCQPSTATUS_RDMA_EN_VFS_SHIFT 16
+#define I40E_PFPE_CCQPSTATUS_RDMA_EN_VFS_MASK I40E_MASK(0x3F, I40E_PFPE_CCQPSTATUS_RDMA_EN_VFS_SHIFT)
+#define I40E_PFPE_CCQPSTATUS_CCQP_ERR_SHIFT 31
+#define I40E_PFPE_CCQPSTATUS_CCQP_ERR_MASK I40E_MASK(0x1, I40E_PFPE_CCQPSTATUS_CCQP_ERR_SHIFT)
+#define I40E_PFPE_CQACK 0x00131100 /* Reset: PFR */
+#define I40E_PFPE_CQACK_PECQID_SHIFT 0
+#define I40E_PFPE_CQACK_PECQID_MASK I40E_MASK(0x1FFFF, I40E_PFPE_CQACK_PECQID_SHIFT)
+#define I40E_PFPE_CQARM 0x00131080 /* Reset: PFR */
+#define I40E_PFPE_CQARM_PECQID_SHIFT 0
+#define I40E_PFPE_CQARM_PECQID_MASK I40E_MASK(0x1FFFF, I40E_PFPE_CQARM_PECQID_SHIFT)
+#define I40E_PFPE_CQPDB 0x00008000 /* Reset: PFR */
+#define I40E_PFPE_CQPDB_WQHEAD_SHIFT 0
+#define I40E_PFPE_CQPDB_WQHEAD_MASK I40E_MASK(0x7FF, I40E_PFPE_CQPDB_WQHEAD_SHIFT)
+#define I40E_PFPE_CQPERRCODES 0x00008880 /* Reset: PFR */
+#define I40E_PFPE_CQPERRCODES_CQP_MINOR_CODE_SHIFT 0
+#define I40E_PFPE_CQPERRCODES_CQP_MINOR_CODE_MASK I40E_MASK(0xFFFF, I40E_PFPE_CQPERRCODES_CQP_MINOR_CODE_SHIFT)
+#define I40E_PFPE_CQPERRCODES_CQP_MAJOR_CODE_SHIFT 16
+#define I40E_PFPE_CQPERRCODES_CQP_MAJOR_CODE_MASK I40E_MASK(0xFFFF, I40E_PFPE_CQPERRCODES_CQP_MAJOR_CODE_SHIFT)
+#define I40E_PFPE_CQPTAIL 0x00008080 /* Reset: PFR */
+#define I40E_PFPE_CQPTAIL_WQTAIL_SHIFT 0
+#define I40E_PFPE_CQPTAIL_WQTAIL_MASK I40E_MASK(0x7FF, I40E_PFPE_CQPTAIL_WQTAIL_SHIFT)
+#define I40E_PFPE_CQPTAIL_CQP_OP_ERR_SHIFT 31
+#define I40E_PFPE_CQPTAIL_CQP_OP_ERR_MASK I40E_MASK(0x1, I40E_PFPE_CQPTAIL_CQP_OP_ERR_SHIFT)
+#define I40E_PFPE_FLMQ1ALLOCERR 0x00008980 /* Reset: PFR */
+#define I40E_PFPE_FLMQ1ALLOCERR_ERROR_COUNT_SHIFT 0
+#define I40E_PFPE_FLMQ1ALLOCERR_ERROR_COUNT_MASK I40E_MASK(0xFFFF, I40E_PFPE_FLMQ1ALLOCERR_ERROR_COUNT_SHIFT)
+#define I40E_PFPE_FLMXMITALLOCERR 0x00008900 /* Reset: PFR */
+#define I40E_PFPE_FLMXMITALLOCERR_ERROR_COUNT_SHIFT 0
+#define I40E_PFPE_FLMXMITALLOCERR_ERROR_COUNT_MASK I40E_MASK(0xFFFF, I40E_PFPE_FLMXMITALLOCERR_ERROR_COUNT_SHIFT)
+#define I40E_PFPE_IPCONFIG0 0x00008280 /* Reset: PFR */
+#define I40E_PFPE_IPCONFIG0_PEIPID_SHIFT 0
+#define I40E_PFPE_IPCONFIG0_PEIPID_MASK I40E_MASK(0xFFFF, I40E_PFPE_IPCONFIG0_PEIPID_SHIFT)
+#define I40E_PFPE_IPCONFIG0_USEENTIREIDRANGE_SHIFT 16
+#define I40E_PFPE_IPCONFIG0_USEENTIREIDRANGE_MASK I40E_MASK(0x1, I40E_PFPE_IPCONFIG0_USEENTIREIDRANGE_SHIFT)
+#define I40E_PFPE_MRTEIDXMASK 0x00008600 /* Reset: PFR */
+#define I40E_PFPE_MRTEIDXMASK_MRTEIDXMASKBITS_SHIFT 0
+#define I40E_PFPE_MRTEIDXMASK_MRTEIDXMASKBITS_MASK I40E_MASK(0x1F, I40E_PFPE_MRTEIDXMASK_MRTEIDXMASKBITS_SHIFT)
+#define I40E_PFPE_RCVUNEXPECTEDERROR 0x00008680 /* Reset: PFR */
+#define I40E_PFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_SHIFT 0
+#define I40E_PFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_MASK I40E_MASK(0xFFFFFF, I40E_PFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_SHIFT)
+#define I40E_PFPE_TCPNOWTIMER 0x00008580 /* Reset: PFR */
+#define I40E_PFPE_TCPNOWTIMER_TCP_NOW_SHIFT 0
+#define I40E_PFPE_TCPNOWTIMER_TCP_NOW_MASK I40E_MASK(0xFFFFFFFF, I40E_PFPE_TCPNOWTIMER_TCP_NOW_SHIFT)
+#define I40E_PFPE_UDACTRL 0x00008700 /* Reset: PFR */
+#define I40E_PFPE_UDACTRL_IPV4MCFRAGRESBP_SHIFT 0
+#define I40E_PFPE_UDACTRL_IPV4MCFRAGRESBP_MASK I40E_MASK(0x1, I40E_PFPE_UDACTRL_IPV4MCFRAGRESBP_SHIFT)
+#define I40E_PFPE_UDACTRL_IPV4UCFRAGRESBP_SHIFT 1
+#define I40E_PFPE_UDACTRL_IPV4UCFRAGRESBP_MASK I40E_MASK(0x1, I40E_PFPE_UDACTRL_IPV4UCFRAGRESBP_SHIFT)
+#define I40E_PFPE_UDACTRL_IPV6MCFRAGRESBP_SHIFT 2
+#define I40E_PFPE_UDACTRL_IPV6MCFRAGRESBP_MASK I40E_MASK(0x1, I40E_PFPE_UDACTRL_IPV6MCFRAGRESBP_SHIFT)
+#define I40E_PFPE_UDACTRL_IPV6UCFRAGRESBP_SHIFT 3
+#define I40E_PFPE_UDACTRL_IPV6UCFRAGRESBP_MASK I40E_MASK(0x1, I40E_PFPE_UDACTRL_IPV6UCFRAGRESBP_SHIFT)
+#define I40E_PFPE_UDACTRL_UDPMCFRAGRESFAIL_SHIFT 4
+#define I40E_PFPE_UDACTRL_UDPMCFRAGRESFAIL_MASK I40E_MASK(0x1, I40E_PFPE_UDACTRL_UDPMCFRAGRESFAIL_SHIFT)
+#define I40E_PFPE_UDAUCFBQPN 0x00008780 /* Reset: PFR */
+#define I40E_PFPE_UDAUCFBQPN_QPN_SHIFT 0
+#define I40E_PFPE_UDAUCFBQPN_QPN_MASK I40E_MASK(0x3FFFF, I40E_PFPE_UDAUCFBQPN_QPN_SHIFT)
+#define I40E_PFPE_UDAUCFBQPN_VALID_SHIFT 31
+#define I40E_PFPE_UDAUCFBQPN_VALID_MASK I40E_MASK(0x1, I40E_PFPE_UDAUCFBQPN_VALID_SHIFT)
+#define I40E_PFPE_WQEALLOC 0x00138C00 /* Reset: PFR */
+#define I40E_PFPE_WQEALLOC_PEQPID_SHIFT 0
+#define I40E_PFPE_WQEALLOC_PEQPID_MASK I40E_MASK(0x3FFFF, I40E_PFPE_WQEALLOC_PEQPID_SHIFT)
+#define I40E_PFPE_WQEALLOC_WQE_DESC_INDEX_SHIFT 20
+#define I40E_PFPE_WQEALLOC_WQE_DESC_INDEX_MASK I40E_MASK(0xFFF, I40E_PFPE_WQEALLOC_WQE_DESC_INDEX_SHIFT)
+#define I40E_PRTDCB_RLPMC 0x0001F140 /* Reset: PE_CORER */
+#define I40E_PRTDCB_RLPMC_TC2PFC_SHIFT 0
+#define I40E_PRTDCB_RLPMC_TC2PFC_MASK I40E_MASK(0xFF, I40E_PRTDCB_RLPMC_TC2PFC_SHIFT)
+#define I40E_PRTDCB_TCMSTC_RLPM(_i) (0x0001F040 + ((_i) * 32)) /* _i=0...7 */ /* Reset: PE_CORER */
+#define I40E_PRTDCB_TCMSTC_RLPM_MAX_INDEX 7
+#define I40E_PRTDCB_TCMSTC_RLPM_MSTC_SHIFT 0
+#define I40E_PRTDCB_TCMSTC_RLPM_MSTC_MASK I40E_MASK(0xFFFFF, I40E_PRTDCB_TCMSTC_RLPM_MSTC_SHIFT)
+#define I40E_PRTDCB_TCPMC_RLPM 0x0001F1A0 /* Reset: PE_CORER */
+#define I40E_PRTDCB_TCPMC_RLPM_CPM_SHIFT 0
+#define I40E_PRTDCB_TCPMC_RLPM_CPM_MASK I40E_MASK(0x1FFF, I40E_PRTDCB_TCPMC_RLPM_CPM_SHIFT)
+#define I40E_PRTDCB_TCPMC_RLPM_LLTC_SHIFT 13
+#define I40E_PRTDCB_TCPMC_RLPM_LLTC_MASK I40E_MASK(0xFF, I40E_PRTDCB_TCPMC_RLPM_LLTC_SHIFT)
+#define I40E_PRTDCB_TCPMC_RLPM_TCPM_MODE_SHIFT 30
+#define I40E_PRTDCB_TCPMC_RLPM_TCPM_MODE_MASK I40E_MASK(0x1, I40E_PRTDCB_TCPMC_RLPM_TCPM_MODE_SHIFT)
+#define I40E_PRTE_RUPM_TCCNTR03 0x0000DAE0 /* Reset: PE_CORER */
+#define I40E_PRTE_RUPM_TCCNTR03_TC0COUNT_SHIFT 0
+#define I40E_PRTE_RUPM_TCCNTR03_TC0COUNT_MASK I40E_MASK(0xFF, I40E_PRTE_RUPM_TCCNTR03_TC0COUNT_SHIFT)
+#define I40E_PRTE_RUPM_TCCNTR03_TC1COUNT_SHIFT 8
+#define I40E_PRTE_RUPM_TCCNTR03_TC1COUNT_MASK I40E_MASK(0xFF, I40E_PRTE_RUPM_TCCNTR03_TC1COUNT_SHIFT)
+#define I40E_PRTE_RUPM_TCCNTR03_TC2COUNT_SHIFT 16
+#define I40E_PRTE_RUPM_TCCNTR03_TC2COUNT_MASK I40E_MASK(0xFF, I40E_PRTE_RUPM_TCCNTR03_TC2COUNT_SHIFT)
+#define I40E_PRTE_RUPM_TCCNTR03_TC3COUNT_SHIFT 24
+#define I40E_PRTE_RUPM_TCCNTR03_TC3COUNT_MASK I40E_MASK(0xFF, I40E_PRTE_RUPM_TCCNTR03_TC3COUNT_SHIFT)
+#define I40E_PRTPE_RUPM_CNTR 0x0000DB20 /* Reset: PE_CORER */
+#define I40E_PRTPE_RUPM_CNTR_COUNT_SHIFT 0
+#define I40E_PRTPE_RUPM_CNTR_COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_CNTR_COUNT_SHIFT)
+#define I40E_PRTPE_RUPM_CTL 0x0000DA40 /* Reset: PE_CORER */
+#define I40E_PRTPE_RUPM_CTL_LLTC_SHIFT 13
+#define I40E_PRTPE_RUPM_CTL_LLTC_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_CTL_LLTC_SHIFT)
+#define I40E_PRTPE_RUPM_CTL_RUPM_MODE_SHIFT 30
+#define I40E_PRTPE_RUPM_CTL_RUPM_MODE_MASK I40E_MASK(0x1, I40E_PRTPE_RUPM_CTL_RUPM_MODE_SHIFT)
+#define I40E_PRTPE_RUPM_PFCCTL 0x0000DA60 /* Reset: PE_CORER */
+#define I40E_PRTPE_RUPM_PFCCTL_TC2PFC_SHIFT 0
+#define I40E_PRTPE_RUPM_PFCCTL_TC2PFC_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PFCCTL_TC2PFC_SHIFT)
+#define I40E_PRTPE_RUPM_PFCPC 0x0000DA80 /* Reset: PE_CORER */
+#define I40E_PRTPE_RUPM_PFCPC_PORTOFFTH_SHIFT 0
+#define I40E_PRTPE_RUPM_PFCPC_PORTOFFTH_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PFCPC_PORTOFFTH_SHIFT)
+#define I40E_PRTPE_RUPM_PFCTCC 0x0000DAA0 /* Reset: PE_CORER */
+#define I40E_PRTPE_RUPM_PFCTCC_TCOFFTH_SHIFT 0
+#define I40E_PRTPE_RUPM_PFCTCC_TCOFFTH_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PFCTCC_TCOFFTH_SHIFT)
+#define I40E_PRTPE_RUPM_PFCTCC_LL_PRI_TH_SHIFT 16
+#define I40E_PRTPE_RUPM_PFCTCC_LL_PRI_TH_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PFCTCC_LL_PRI_TH_SHIFT)
+#define I40E_PRTPE_RUPM_PFCTCC_LL_PRI_EN_SHIFT 31
+#define I40E_PRTPE_RUPM_PFCTCC_LL_PRI_EN_MASK I40E_MASK(0x1, I40E_PRTPE_RUPM_PFCTCC_LL_PRI_EN_SHIFT)
+#define I40E_PRTPE_RUPM_PTCTCCNTR47 0x0000DB60 /* Reset: PE_CORER */
+#define I40E_PRTPE_RUPM_PTCTCCNTR47_TC4COUNT_SHIFT 0
+#define I40E_PRTPE_RUPM_PTCTCCNTR47_TC4COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PTCTCCNTR47_TC4COUNT_SHIFT)
+#define I40E_PRTPE_RUPM_PTCTCCNTR47_TC5COUNT_SHIFT 8
+#define I40E_PRTPE_RUPM_PTCTCCNTR47_TC5COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PTCTCCNTR47_TC5COUNT_SHIFT)
+#define I40E_PRTPE_RUPM_PTCTCCNTR47_TC6COUNT_SHIFT 16
+#define I40E_PRTPE_RUPM_PTCTCCNTR47_TC6COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PTCTCCNTR47_TC6COUNT_SHIFT)
+#define I40E_PRTPE_RUPM_PTCTCCNTR47_TC7COUNT_SHIFT 24
+#define I40E_PRTPE_RUPM_PTCTCCNTR47_TC7COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PTCTCCNTR47_TC7COUNT_SHIFT)
+#define I40E_PRTPE_RUPM_PTXTCCNTR03 0x0000DB40 /* Reset: PE_CORER */
+#define I40E_PRTPE_RUPM_PTXTCCNTR03_TC0COUNT_SHIFT 0
+#define I40E_PRTPE_RUPM_PTXTCCNTR03_TC0COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PTXTCCNTR03_TC0COUNT_SHIFT)
+#define I40E_PRTPE_RUPM_PTXTCCNTR03_TC1COUNT_SHIFT 8
+#define I40E_PRTPE_RUPM_PTXTCCNTR03_TC1COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PTXTCCNTR03_TC1COUNT_SHIFT)
+#define I40E_PRTPE_RUPM_PTXTCCNTR03_TC2COUNT_SHIFT 16
+#define I40E_PRTPE_RUPM_PTXTCCNTR03_TC2COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PTXTCCNTR03_TC2COUNT_SHIFT)
+#define I40E_PRTPE_RUPM_PTXTCCNTR03_TC3COUNT_SHIFT 24
+#define I40E_PRTPE_RUPM_PTXTCCNTR03_TC3COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PTXTCCNTR03_TC3COUNT_SHIFT)
+#define I40E_PRTPE_RUPM_TCCNTR47 0x0000DB00 /* Reset: PE_CORER */
+#define I40E_PRTPE_RUPM_TCCNTR47_TC4COUNT_SHIFT 0
+#define I40E_PRTPE_RUPM_TCCNTR47_TC4COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_TCCNTR47_TC4COUNT_SHIFT)
+#define I40E_PRTPE_RUPM_TCCNTR47_TC5COUNT_SHIFT 8
+#define I40E_PRTPE_RUPM_TCCNTR47_TC5COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_TCCNTR47_TC5COUNT_SHIFT)
+#define I40E_PRTPE_RUPM_TCCNTR47_TC6COUNT_SHIFT 16
+#define I40E_PRTPE_RUPM_TCCNTR47_TC6COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_TCCNTR47_TC6COUNT_SHIFT)
+#define I40E_PRTPE_RUPM_TCCNTR47_TC7COUNT_SHIFT 24
+#define I40E_PRTPE_RUPM_TCCNTR47_TC7COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_TCCNTR47_TC7COUNT_SHIFT)
+#define I40E_PRTPE_RUPM_THRES 0x0000DA20 /* Reset: PE_CORER */
+#define I40E_PRTPE_RUPM_THRES_MINSPADSPERTC_SHIFT 0
+#define I40E_PRTPE_RUPM_THRES_MINSPADSPERTC_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_THRES_MINSPADSPERTC_SHIFT)
+#define I40E_PRTPE_RUPM_THRES_MAXSPADS_SHIFT 8
+#define I40E_PRTPE_RUPM_THRES_MAXSPADS_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_THRES_MAXSPADS_SHIFT)
+#define I40E_PRTPE_RUPM_THRES_MAXSPADSPERTC_SHIFT 16
+#define I40E_PRTPE_RUPM_THRES_MAXSPADSPERTC_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_THRES_MAXSPADSPERTC_SHIFT)
+#define I40E_VFPE_AEQALLOC(_VF) (0x00130C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_AEQALLOC_MAX_INDEX 127
+#define I40E_VFPE_AEQALLOC_AECOUNT_SHIFT 0
+#define I40E_VFPE_AEQALLOC_AECOUNT_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_AEQALLOC_AECOUNT_SHIFT)
+#define I40E_VFPE_CCQPHIGH(_VF) (0x00001000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_CCQPHIGH_MAX_INDEX 127
+#define I40E_VFPE_CCQPHIGH_PECCQPHIGH_SHIFT 0
+#define I40E_VFPE_CCQPHIGH_PECCQPHIGH_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_CCQPHIGH_PECCQPHIGH_SHIFT)
+#define I40E_VFPE_CCQPLOW(_VF) (0x00000C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_CCQPLOW_MAX_INDEX 127
+#define I40E_VFPE_CCQPLOW_PECCQPLOW_SHIFT 0
+#define I40E_VFPE_CCQPLOW_PECCQPLOW_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_CCQPLOW_PECCQPLOW_SHIFT)
+#define I40E_VFPE_CCQPSTATUS(_VF) (0x00000800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_CCQPSTATUS_MAX_INDEX 127
+#define I40E_VFPE_CCQPSTATUS_CCQP_DONE_SHIFT 0
+#define I40E_VFPE_CCQPSTATUS_CCQP_DONE_MASK I40E_MASK(0x1, I40E_VFPE_CCQPSTATUS_CCQP_DONE_SHIFT)
+#define I40E_VFPE_CCQPSTATUS_HMC_PROFILE_SHIFT 4
+#define I40E_VFPE_CCQPSTATUS_HMC_PROFILE_MASK I40E_MASK(0x7, I40E_VFPE_CCQPSTATUS_HMC_PROFILE_SHIFT)
+#define I40E_VFPE_CCQPSTATUS_RDMA_EN_VFS_SHIFT 16
+#define I40E_VFPE_CCQPSTATUS_RDMA_EN_VFS_MASK I40E_MASK(0x3F, I40E_VFPE_CCQPSTATUS_RDMA_EN_VFS_SHIFT)
+#define I40E_VFPE_CCQPSTATUS_CCQP_ERR_SHIFT 31
+#define I40E_VFPE_CCQPSTATUS_CCQP_ERR_MASK I40E_MASK(0x1, I40E_VFPE_CCQPSTATUS_CCQP_ERR_SHIFT)
+#define I40E_VFPE_CQACK(_VF) (0x00130800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_CQACK_MAX_INDEX 127
+#define I40E_VFPE_CQACK_PECQID_SHIFT 0
+#define I40E_VFPE_CQACK_PECQID_MASK I40E_MASK(0x1FFFF, I40E_VFPE_CQACK_PECQID_SHIFT)
+#define I40E_VFPE_CQARM(_VF) (0x00130400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_CQARM_MAX_INDEX 127
+#define I40E_VFPE_CQARM_PECQID_SHIFT 0
+#define I40E_VFPE_CQARM_PECQID_MASK I40E_MASK(0x1FFFF, I40E_VFPE_CQARM_PECQID_SHIFT)
+#define I40E_VFPE_CQPDB(_VF) (0x00000000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_CQPDB_MAX_INDEX 127
+#define I40E_VFPE_CQPDB_WQHEAD_SHIFT 0
+#define I40E_VFPE_CQPDB_WQHEAD_MASK I40E_MASK(0x7FF, I40E_VFPE_CQPDB_WQHEAD_SHIFT)
+#define I40E_VFPE_CQPERRCODES(_VF) (0x00001800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_CQPERRCODES_MAX_INDEX 127
+#define I40E_VFPE_CQPERRCODES_CQP_MINOR_CODE_SHIFT 0
+#define I40E_VFPE_CQPERRCODES_CQP_MINOR_CODE_MASK I40E_MASK(0xFFFF, I40E_VFPE_CQPERRCODES_CQP_MINOR_CODE_SHIFT)
+#define I40E_VFPE_CQPERRCODES_CQP_MAJOR_CODE_SHIFT 16
+#define I40E_VFPE_CQPERRCODES_CQP_MAJOR_CODE_MASK I40E_MASK(0xFFFF, I40E_VFPE_CQPERRCODES_CQP_MAJOR_CODE_SHIFT)
+#define I40E_VFPE_CQPTAIL(_VF) (0x00000400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_CQPTAIL_MAX_INDEX 127
+#define I40E_VFPE_CQPTAIL_WQTAIL_SHIFT 0
+#define I40E_VFPE_CQPTAIL_WQTAIL_MASK I40E_MASK(0x7FF, I40E_VFPE_CQPTAIL_WQTAIL_SHIFT)
+#define I40E_VFPE_CQPTAIL_CQP_OP_ERR_SHIFT 31
+#define I40E_VFPE_CQPTAIL_CQP_OP_ERR_MASK I40E_MASK(0x1, I40E_VFPE_CQPTAIL_CQP_OP_ERR_SHIFT)
+#define I40E_VFPE_IPCONFIG0(_VF) (0x00001400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_IPCONFIG0_MAX_INDEX 127
+#define I40E_VFPE_IPCONFIG0_PEIPID_SHIFT 0
+#define I40E_VFPE_IPCONFIG0_PEIPID_MASK I40E_MASK(0xFFFF, I40E_VFPE_IPCONFIG0_PEIPID_SHIFT)
+#define I40E_VFPE_IPCONFIG0_USEENTIREIDRANGE_SHIFT 16
+#define I40E_VFPE_IPCONFIG0_USEENTIREIDRANGE_MASK I40E_MASK(0x1, I40E_VFPE_IPCONFIG0_USEENTIREIDRANGE_SHIFT)
+#define I40E_VFPE_MRTEIDXMASK(_VF) (0x00003000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_MRTEIDXMASK_MAX_INDEX 127
+#define I40E_VFPE_MRTEIDXMASK_MRTEIDXMASKBITS_SHIFT 0
+#define I40E_VFPE_MRTEIDXMASK_MRTEIDXMASKBITS_MASK I40E_MASK(0x1F, I40E_VFPE_MRTEIDXMASK_MRTEIDXMASKBITS_SHIFT)
+#define I40E_VFPE_RCVUNEXPECTEDERROR(_VF) (0x00003400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_RCVUNEXPECTEDERROR_MAX_INDEX 127
+#define I40E_VFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_SHIFT 0
+#define I40E_VFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_MASK I40E_MASK(0xFFFFFF, I40E_VFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_SHIFT)
+#define I40E_VFPE_TCPNOWTIMER(_VF) (0x00002C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_TCPNOWTIMER_MAX_INDEX 127
+#define I40E_VFPE_TCPNOWTIMER_TCP_NOW_SHIFT 0
+#define I40E_VFPE_TCPNOWTIMER_TCP_NOW_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_TCPNOWTIMER_TCP_NOW_SHIFT)
+#define I40E_VFPE_WQEALLOC(_VF) (0x00138000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_WQEALLOC_MAX_INDEX 127
+#define I40E_VFPE_WQEALLOC_PEQPID_SHIFT 0
+#define I40E_VFPE_WQEALLOC_PEQPID_MASK I40E_MASK(0x3FFFF, I40E_VFPE_WQEALLOC_PEQPID_SHIFT)
+#define I40E_VFPE_WQEALLOC_WQE_DESC_INDEX_SHIFT 20
+#define I40E_VFPE_WQEALLOC_WQE_DESC_INDEX_MASK I40E_MASK(0xFFF, I40E_VFPE_WQEALLOC_WQE_DESC_INDEX_SHIFT)
+#define I40E_GLPES_PFIP4RXDISCARD(_i) (0x00010600 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXDISCARD_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXDISCARD_IP4RXDISCARD_SHIFT 0
+#define I40E_GLPES_PFIP4RXDISCARD_IP4RXDISCARD_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4RXDISCARD_IP4RXDISCARD_SHIFT)
+#define I40E_GLPES_PFIP4RXFRAGSHI(_i) (0x00010804 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXFRAGSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXFRAGSHI_IP4RXFRAGSHI_SHIFT 0
+#define I40E_GLPES_PFIP4RXFRAGSHI_IP4RXFRAGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4RXFRAGSHI_IP4RXFRAGSHI_SHIFT)
+#define I40E_GLPES_PFIP4RXFRAGSLO(_i) (0x00010800 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXFRAGSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXFRAGSLO_IP4RXFRAGSLO_SHIFT 0
+#define I40E_GLPES_PFIP4RXFRAGSLO_IP4RXFRAGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4RXFRAGSLO_IP4RXFRAGSLO_SHIFT)
+#define I40E_GLPES_PFIP4RXMCOCTSHI(_i) (0x00010A04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXMCOCTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXMCOCTSHI_IP4RXMCOCTSHI_SHIFT 0
+#define I40E_GLPES_PFIP4RXMCOCTSHI_IP4RXMCOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4RXMCOCTSHI_IP4RXMCOCTSHI_SHIFT)
+#define I40E_GLPES_PFIP4RXMCOCTSLO(_i) (0x00010A00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXMCOCTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXMCOCTSLO_IP4RXMCOCTSLO_SHIFT 0
+#define I40E_GLPES_PFIP4RXMCOCTSLO_IP4RXMCOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4RXMCOCTSLO_IP4RXMCOCTSLO_SHIFT)
+#define I40E_GLPES_PFIP4RXMCPKTSHI(_i) (0x00010C04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXMCPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXMCPKTSHI_IP4RXMCPKTSHI_SHIFT 0
+#define I40E_GLPES_PFIP4RXMCPKTSHI_IP4RXMCPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4RXMCPKTSHI_IP4RXMCPKTSHI_SHIFT)
+#define I40E_GLPES_PFIP4RXMCPKTSLO(_i) (0x00010C00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXMCPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXMCPKTSLO_IP4RXMCPKTSLO_SHIFT 0
+#define I40E_GLPES_PFIP4RXMCPKTSLO_IP4RXMCPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4RXMCPKTSLO_IP4RXMCPKTSLO_SHIFT)
+#define I40E_GLPES_PFIP4RXOCTSHI(_i) (0x00010204 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXOCTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXOCTSHI_IP4RXOCTSHI_SHIFT 0
+#define I40E_GLPES_PFIP4RXOCTSHI_IP4RXOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4RXOCTSHI_IP4RXOCTSHI_SHIFT)
+#define I40E_GLPES_PFIP4RXOCTSLO(_i) (0x00010200 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXOCTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXOCTSLO_IP4RXOCTSLO_SHIFT 0
+#define I40E_GLPES_PFIP4RXOCTSLO_IP4RXOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4RXOCTSLO_IP4RXOCTSLO_SHIFT)
+#define I40E_GLPES_PFIP4RXPKTSHI(_i) (0x00010404 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXPKTSHI_IP4RXPKTSHI_SHIFT 0
+#define I40E_GLPES_PFIP4RXPKTSHI_IP4RXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4RXPKTSHI_IP4RXPKTSHI_SHIFT)
+#define I40E_GLPES_PFIP4RXPKTSLO(_i) (0x00010400 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXPKTSLO_IP4RXPKTSLO_SHIFT 0
+#define I40E_GLPES_PFIP4RXPKTSLO_IP4RXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4RXPKTSLO_IP4RXPKTSLO_SHIFT)
+#define I40E_GLPES_PFIP4RXTRUNC(_i) (0x00010700 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXTRUNC_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXTRUNC_IP4RXTRUNC_SHIFT 0
+#define I40E_GLPES_PFIP4RXTRUNC_IP4RXTRUNC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4RXTRUNC_IP4RXTRUNC_SHIFT)
+#define I40E_GLPES_PFIP4TXFRAGSHI(_i) (0x00011E04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4TXFRAGSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXFRAGSHI_IP4TXFRAGSHI_SHIFT 0
+#define I40E_GLPES_PFIP4TXFRAGSHI_IP4TXFRAGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4TXFRAGSHI_IP4TXFRAGSHI_SHIFT)
+#define I40E_GLPES_PFIP4TXFRAGSLO(_i) (0x00011E00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4TXFRAGSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXFRAGSLO_IP4TXFRAGSLO_SHIFT 0
+#define I40E_GLPES_PFIP4TXFRAGSLO_IP4TXFRAGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4TXFRAGSLO_IP4TXFRAGSLO_SHIFT)
+#define I40E_GLPES_PFIP4TXMCOCTSHI(_i) (0x00012004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4TXMCOCTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXMCOCTSHI_IP4TXMCOCTSHI_SHIFT 0
+#define I40E_GLPES_PFIP4TXMCOCTSHI_IP4TXMCOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4TXMCOCTSHI_IP4TXMCOCTSHI_SHIFT)
+#define I40E_GLPES_PFIP4TXMCOCTSLO(_i) (0x00012000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4TXMCOCTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXMCOCTSLO_IP4TXMCOCTSLO_SHIFT 0
+#define I40E_GLPES_PFIP4TXMCOCTSLO_IP4TXMCOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4TXMCOCTSLO_IP4TXMCOCTSLO_SHIFT)
+#define I40E_GLPES_PFIP4TXMCPKTSHI(_i) (0x00012204 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4TXMCPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXMCPKTSHI_IP4TXMCPKTSHI_SHIFT 0
+#define I40E_GLPES_PFIP4TXMCPKTSHI_IP4TXMCPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4TXMCPKTSHI_IP4TXMCPKTSHI_SHIFT)
+#define I40E_GLPES_PFIP4TXMCPKTSLO(_i) (0x00012200 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4TXMCPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXMCPKTSLO_IP4TXMCPKTSLO_SHIFT 0
+#define I40E_GLPES_PFIP4TXMCPKTSLO_IP4TXMCPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4TXMCPKTSLO_IP4TXMCPKTSLO_SHIFT)
+#define I40E_GLPES_PFIP4TXNOROUTE(_i) (0x00012E00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4TXNOROUTE_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXNOROUTE_IP4TXNOROUTE_SHIFT 0
+#define I40E_GLPES_PFIP4TXNOROUTE_IP4TXNOROUTE_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_PFIP4TXNOROUTE_IP4TXNOROUTE_SHIFT)
+#define I40E_GLPES_PFIP4TXOCTSHI(_i) (0x00011A04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4TXOCTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXOCTSHI_IP4TXOCTSHI_SHIFT 0
+#define I40E_GLPES_PFIP4TXOCTSHI_IP4TXOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4TXOCTSHI_IP4TXOCTSHI_SHIFT)
+#define I40E_GLPES_PFIP4TXOCTSLO(_i) (0x00011A00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4TXOCTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXOCTSLO_IP4TXOCTSLO_SHIFT 0
+#define I40E_GLPES_PFIP4TXOCTSLO_IP4TXOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4TXOCTSLO_IP4TXOCTSLO_SHIFT)
+#define I40E_GLPES_PFIP4TXPKTSHI(_i) (0x00011C04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4TXPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXPKTSHI_IP4TXPKTSHI_SHIFT 0
+#define I40E_GLPES_PFIP4TXPKTSHI_IP4TXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4TXPKTSHI_IP4TXPKTSHI_SHIFT)
+#define I40E_GLPES_PFIP4TXPKTSLO(_i) (0x00011C00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4TXPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXPKTSLO_IP4TXPKTSLO_SHIFT 0
+#define I40E_GLPES_PFIP4TXPKTSLO_IP4TXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4TXPKTSLO_IP4TXPKTSLO_SHIFT)
+#define I40E_GLPES_PFIP6RXDISCARD(_i) (0x00011200 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXDISCARD_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXDISCARD_IP6RXDISCARD_SHIFT 0
+#define I40E_GLPES_PFIP6RXDISCARD_IP6RXDISCARD_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6RXDISCARD_IP6RXDISCARD_SHIFT)
+#define I40E_GLPES_PFIP6RXFRAGSHI(_i) (0x00011404 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXFRAGSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXFRAGSHI_IP6RXFRAGSHI_SHIFT 0
+#define I40E_GLPES_PFIP6RXFRAGSHI_IP6RXFRAGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6RXFRAGSHI_IP6RXFRAGSHI_SHIFT)
+#define I40E_GLPES_PFIP6RXFRAGSLO(_i) (0x00011400 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXFRAGSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXFRAGSLO_IP6RXFRAGSLO_SHIFT 0
+#define I40E_GLPES_PFIP6RXFRAGSLO_IP6RXFRAGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6RXFRAGSLO_IP6RXFRAGSLO_SHIFT)
+#define I40E_GLPES_PFIP6RXMCOCTSHI(_i) (0x00011604 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXMCOCTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXMCOCTSHI_IP6RXMCOCTSHI_SHIFT 0
+#define I40E_GLPES_PFIP6RXMCOCTSHI_IP6RXMCOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6RXMCOCTSHI_IP6RXMCOCTSHI_SHIFT)
+#define I40E_GLPES_PFIP6RXMCOCTSLO(_i) (0x00011600 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXMCOCTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXMCOCTSLO_IP6RXMCOCTSLO_SHIFT 0
+#define I40E_GLPES_PFIP6RXMCOCTSLO_IP6RXMCOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6RXMCOCTSLO_IP6RXMCOCTSLO_SHIFT)
+#define I40E_GLPES_PFIP6RXMCPKTSHI(_i) (0x00011804 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXMCPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXMCPKTSHI_IP6RXMCPKTSHI_SHIFT 0
+#define I40E_GLPES_PFIP6RXMCPKTSHI_IP6RXMCPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6RXMCPKTSHI_IP6RXMCPKTSHI_SHIFT)
+#define I40E_GLPES_PFIP6RXMCPKTSLO(_i) (0x00011800 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXMCPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXMCPKTSLO_IP6RXMCPKTSLO_SHIFT 0
+#define I40E_GLPES_PFIP6RXMCPKTSLO_IP6RXMCPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6RXMCPKTSLO_IP6RXMCPKTSLO_SHIFT)
+#define I40E_GLPES_PFIP6RXOCTSHI(_i) (0x00010E04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXOCTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXOCTSHI_IP6RXOCTSHI_SHIFT 0
+#define I40E_GLPES_PFIP6RXOCTSHI_IP6RXOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6RXOCTSHI_IP6RXOCTSHI_SHIFT)
+#define I40E_GLPES_PFIP6RXOCTSLO(_i) (0x00010E00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXOCTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXOCTSLO_IP6RXOCTSLO_SHIFT 0
+#define I40E_GLPES_PFIP6RXOCTSLO_IP6RXOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6RXOCTSLO_IP6RXOCTSLO_SHIFT)
+#define I40E_GLPES_PFIP6RXPKTSHI(_i) (0x00011004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXPKTSHI_IP6RXPKTSHI_SHIFT 0
+#define I40E_GLPES_PFIP6RXPKTSHI_IP6RXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6RXPKTSHI_IP6RXPKTSHI_SHIFT)
+#define I40E_GLPES_PFIP6RXPKTSLO(_i) (0x00011000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXPKTSLO_IP6RXPKTSLO_SHIFT 0
+#define I40E_GLPES_PFIP6RXPKTSLO_IP6RXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6RXPKTSLO_IP6RXPKTSLO_SHIFT)
+#define I40E_GLPES_PFIP6RXTRUNC(_i) (0x00011300 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXTRUNC_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXTRUNC_IP6RXTRUNC_SHIFT 0
+#define I40E_GLPES_PFIP6RXTRUNC_IP6RXTRUNC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6RXTRUNC_IP6RXTRUNC_SHIFT)
+#define I40E_GLPES_PFIP6TXFRAGSHI(_i) (0x00012804 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6TXFRAGSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXFRAGSHI_IP6TXFRAGSHI_SHIFT 0
+#define I40E_GLPES_PFIP6TXFRAGSHI_IP6TXFRAGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6TXFRAGSHI_IP6TXFRAGSHI_SHIFT)
+#define I40E_GLPES_PFIP6TXFRAGSLO(_i) (0x00012800 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6TXFRAGSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXFRAGSLO_IP6TXFRAGSLO_SHIFT 0
+#define I40E_GLPES_PFIP6TXFRAGSLO_IP6TXFRAGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6TXFRAGSLO_IP6TXFRAGSLO_SHIFT)
+#define I40E_GLPES_PFIP6TXMCOCTSHI(_i) (0x00012A04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6TXMCOCTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXMCOCTSHI_IP6TXMCOCTSHI_SHIFT 0
+#define I40E_GLPES_PFIP6TXMCOCTSHI_IP6TXMCOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6TXMCOCTSHI_IP6TXMCOCTSHI_SHIFT)
+#define I40E_GLPES_PFIP6TXMCOCTSLO(_i) (0x00012A00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6TXMCOCTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXMCOCTSLO_IP6TXMCOCTSLO_SHIFT 0
+#define I40E_GLPES_PFIP6TXMCOCTSLO_IP6TXMCOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6TXMCOCTSLO_IP6TXMCOCTSLO_SHIFT)
+#define I40E_GLPES_PFIP6TXMCPKTSHI(_i) (0x00012C04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6TXMCPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXMCPKTSHI_IP6TXMCPKTSHI_SHIFT 0
+#define I40E_GLPES_PFIP6TXMCPKTSHI_IP6TXMCPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6TXMCPKTSHI_IP6TXMCPKTSHI_SHIFT)
+#define I40E_GLPES_PFIP6TXMCPKTSLO(_i) (0x00012C00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6TXMCPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXMCPKTSLO_IP6TXMCPKTSLO_SHIFT 0
+#define I40E_GLPES_PFIP6TXMCPKTSLO_IP6TXMCPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6TXMCPKTSLO_IP6TXMCPKTSLO_SHIFT)
+#define I40E_GLPES_PFIP6TXNOROUTE(_i) (0x00012F00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6TXNOROUTE_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXNOROUTE_IP6TXNOROUTE_SHIFT 0
+#define I40E_GLPES_PFIP6TXNOROUTE_IP6TXNOROUTE_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_PFIP6TXNOROUTE_IP6TXNOROUTE_SHIFT)
+#define I40E_GLPES_PFIP6TXOCTSHI(_i) (0x00012404 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6TXOCTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXOCTSHI_IP6TXOCTSHI_SHIFT 0
+#define I40E_GLPES_PFIP6TXOCTSHI_IP6TXOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6TXOCTSHI_IP6TXOCTSHI_SHIFT)
+#define I40E_GLPES_PFIP6TXOCTSLO(_i) (0x00012400 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6TXOCTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXOCTSLO_IP6TXOCTSLO_SHIFT 0
+#define I40E_GLPES_PFIP6TXOCTSLO_IP6TXOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6TXOCTSLO_IP6TXOCTSLO_SHIFT)
+#define I40E_GLPES_PFIP6TXPKTSHI(_i) (0x00012604 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6TXPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXPKTSHI_IP6TXPKTSHI_SHIFT 0
+#define I40E_GLPES_PFIP6TXPKTSHI_IP6TXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6TXPKTSHI_IP6TXPKTSHI_SHIFT)
+#define I40E_GLPES_PFIP6TXPKTSLO(_i) (0x00012600 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6TXPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXPKTSLO_IP6TXPKTSLO_SHIFT 0
+#define I40E_GLPES_PFIP6TXPKTSLO_IP6TXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6TXPKTSLO_IP6TXPKTSLO_SHIFT)
+#define I40E_GLPES_PFRDMARXRDSHI(_i) (0x00013E04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMARXRDSHI_MAX_INDEX 15
+#define I40E_GLPES_PFRDMARXRDSHI_RDMARXRDSHI_SHIFT 0
+#define I40E_GLPES_PFRDMARXRDSHI_RDMARXRDSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFRDMARXRDSHI_RDMARXRDSHI_SHIFT)
+#define I40E_GLPES_PFRDMARXRDSLO(_i) (0x00013E00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMARXRDSLO_MAX_INDEX 15
+#define I40E_GLPES_PFRDMARXRDSLO_RDMARXRDSLO_SHIFT 0
+#define I40E_GLPES_PFRDMARXRDSLO_RDMARXRDSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMARXRDSLO_RDMARXRDSLO_SHIFT)
+#define I40E_GLPES_PFRDMARXSNDSHI(_i) (0x00014004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMARXSNDSHI_MAX_INDEX 15
+#define I40E_GLPES_PFRDMARXSNDSHI_RDMARXSNDSHI_SHIFT 0
+#define I40E_GLPES_PFRDMARXSNDSHI_RDMARXSNDSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFRDMARXSNDSHI_RDMARXSNDSHI_SHIFT)
+#define I40E_GLPES_PFRDMARXSNDSLO(_i) (0x00014000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMARXSNDSLO_MAX_INDEX 15
+#define I40E_GLPES_PFRDMARXSNDSLO_RDMARXSNDSLO_SHIFT 0
+#define I40E_GLPES_PFRDMARXSNDSLO_RDMARXSNDSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMARXSNDSLO_RDMARXSNDSLO_SHIFT)
+#define I40E_GLPES_PFRDMARXWRSHI(_i) (0x00013C04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMARXWRSHI_MAX_INDEX 15
+#define I40E_GLPES_PFRDMARXWRSHI_RDMARXWRSHI_SHIFT 0
+#define I40E_GLPES_PFRDMARXWRSHI_RDMARXWRSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFRDMARXWRSHI_RDMARXWRSHI_SHIFT)
+#define I40E_GLPES_PFRDMARXWRSLO(_i) (0x00013C00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMARXWRSLO_MAX_INDEX 15
+#define I40E_GLPES_PFRDMARXWRSLO_RDMARXWRSLO_SHIFT 0
+#define I40E_GLPES_PFRDMARXWRSLO_RDMARXWRSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMARXWRSLO_RDMARXWRSLO_SHIFT)
+#define I40E_GLPES_PFRDMATXRDSHI(_i) (0x00014404 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMATXRDSHI_MAX_INDEX 15
+#define I40E_GLPES_PFRDMATXRDSHI_RDMARXRDSHI_SHIFT 0
+#define I40E_GLPES_PFRDMATXRDSHI_RDMARXRDSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFRDMATXRDSHI_RDMARXRDSHI_SHIFT)
+#define I40E_GLPES_PFRDMATXRDSLO(_i) (0x00014400 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMATXRDSLO_MAX_INDEX 15
+#define I40E_GLPES_PFRDMATXRDSLO_RDMARXRDSLO_SHIFT 0
+#define I40E_GLPES_PFRDMATXRDSLO_RDMARXRDSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMATXRDSLO_RDMARXRDSLO_SHIFT)
+#define I40E_GLPES_PFRDMATXSNDSHI(_i) (0x00014604 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMATXSNDSHI_MAX_INDEX 15
+#define I40E_GLPES_PFRDMATXSNDSHI_RDMARXSNDSHI_SHIFT 0
+#define I40E_GLPES_PFRDMATXSNDSHI_RDMARXSNDSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFRDMATXSNDSHI_RDMARXSNDSHI_SHIFT)
+#define I40E_GLPES_PFRDMATXSNDSLO(_i) (0x00014600 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMATXSNDSLO_MAX_INDEX 15
+#define I40E_GLPES_PFRDMATXSNDSLO_RDMARXSNDSLO_SHIFT 0
+#define I40E_GLPES_PFRDMATXSNDSLO_RDMARXSNDSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMATXSNDSLO_RDMARXSNDSLO_SHIFT)
+#define I40E_GLPES_PFRDMATXWRSHI(_i) (0x00014204 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMATXWRSHI_MAX_INDEX 15
+#define I40E_GLPES_PFRDMATXWRSHI_RDMARXWRSHI_SHIFT 0
+#define I40E_GLPES_PFRDMATXWRSHI_RDMARXWRSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFRDMATXWRSHI_RDMARXWRSHI_SHIFT)
+#define I40E_GLPES_PFRDMATXWRSLO(_i) (0x00014200 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMATXWRSLO_MAX_INDEX 15
+#define I40E_GLPES_PFRDMATXWRSLO_RDMARXWRSLO_SHIFT 0
+#define I40E_GLPES_PFRDMATXWRSLO_RDMARXWRSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMATXWRSLO_RDMARXWRSLO_SHIFT)
+#define I40E_GLPES_PFRDMAVBNDHI(_i) (0x00014804 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMAVBNDHI_MAX_INDEX 15
+#define I40E_GLPES_PFRDMAVBNDHI_RDMAVBNDHI_SHIFT 0
+#define I40E_GLPES_PFRDMAVBNDHI_RDMAVBNDHI_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMAVBNDHI_RDMAVBNDHI_SHIFT)
+#define I40E_GLPES_PFRDMAVBNDLO(_i) (0x00014800 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMAVBNDLO_MAX_INDEX 15
+#define I40E_GLPES_PFRDMAVBNDLO_RDMAVBNDLO_SHIFT 0
+#define I40E_GLPES_PFRDMAVBNDLO_RDMAVBNDLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMAVBNDLO_RDMAVBNDLO_SHIFT)
+#define I40E_GLPES_PFRDMAVINVHI(_i) (0x00014A04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMAVINVHI_MAX_INDEX 15
+#define I40E_GLPES_PFRDMAVINVHI_RDMAVINVHI_SHIFT 0
+#define I40E_GLPES_PFRDMAVINVHI_RDMAVINVHI_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMAVINVHI_RDMAVINVHI_SHIFT)
+#define I40E_GLPES_PFRDMAVINVLO(_i) (0x00014A00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMAVINVLO_MAX_INDEX 15
+#define I40E_GLPES_PFRDMAVINVLO_RDMAVINVLO_SHIFT 0
+#define I40E_GLPES_PFRDMAVINVLO_RDMAVINVLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMAVINVLO_RDMAVINVLO_SHIFT)
+#define I40E_GLPES_PFRXVLANERR(_i) (0x00010000 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRXVLANERR_MAX_INDEX 15
+#define I40E_GLPES_PFRXVLANERR_RXVLANERR_SHIFT 0
+#define I40E_GLPES_PFRXVLANERR_RXVLANERR_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_PFRXVLANERR_RXVLANERR_SHIFT)
+#define I40E_GLPES_PFTCPRTXSEG(_i) (0x00013600 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFTCPRTXSEG_MAX_INDEX 15
+#define I40E_GLPES_PFTCPRTXSEG_TCPRTXSEG_SHIFT 0
+#define I40E_GLPES_PFTCPRTXSEG_TCPRTXSEG_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFTCPRTXSEG_TCPRTXSEG_SHIFT)
+#define I40E_GLPES_PFTCPRXOPTERR(_i) (0x00013200 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFTCPRXOPTERR_MAX_INDEX 15
+#define I40E_GLPES_PFTCPRXOPTERR_TCPRXOPTERR_SHIFT 0
+#define I40E_GLPES_PFTCPRXOPTERR_TCPRXOPTERR_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_PFTCPRXOPTERR_TCPRXOPTERR_SHIFT)
+#define I40E_GLPES_PFTCPRXPROTOERR(_i) (0x00013300 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFTCPRXPROTOERR_MAX_INDEX 15
+#define I40E_GLPES_PFTCPRXPROTOERR_TCPRXPROTOERR_SHIFT 0
+#define I40E_GLPES_PFTCPRXPROTOERR_TCPRXPROTOERR_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_PFTCPRXPROTOERR_TCPRXPROTOERR_SHIFT)
+#define I40E_GLPES_PFTCPRXSEGSHI(_i) (0x00013004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFTCPRXSEGSHI_MAX_INDEX 15
+#define I40E_GLPES_PFTCPRXSEGSHI_TCPRXSEGSHI_SHIFT 0
+#define I40E_GLPES_PFTCPRXSEGSHI_TCPRXSEGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFTCPRXSEGSHI_TCPRXSEGSHI_SHIFT)
+#define I40E_GLPES_PFTCPRXSEGSLO(_i) (0x00013000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFTCPRXSEGSLO_MAX_INDEX 15
+#define I40E_GLPES_PFTCPRXSEGSLO_TCPRXSEGSLO_SHIFT 0
+#define I40E_GLPES_PFTCPRXSEGSLO_TCPRXSEGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFTCPRXSEGSLO_TCPRXSEGSLO_SHIFT)
+#define I40E_GLPES_PFTCPTXSEGHI(_i) (0x00013404 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFTCPTXSEGHI_MAX_INDEX 15
+#define I40E_GLPES_PFTCPTXSEGHI_TCPTXSEGHI_SHIFT 0
+#define I40E_GLPES_PFTCPTXSEGHI_TCPTXSEGHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFTCPTXSEGHI_TCPTXSEGHI_SHIFT)
+#define I40E_GLPES_PFTCPTXSEGLO(_i) (0x00013400 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFTCPTXSEGLO_MAX_INDEX 15
+#define I40E_GLPES_PFTCPTXSEGLO_TCPTXSEGLO_SHIFT 0
+#define I40E_GLPES_PFTCPTXSEGLO_TCPTXSEGLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFTCPTXSEGLO_TCPTXSEGLO_SHIFT)
+#define I40E_GLPES_PFUDPRXPKTSHI(_i) (0x00013804 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFUDPRXPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFUDPRXPKTSHI_UDPRXPKTSHI_SHIFT 0
+#define I40E_GLPES_PFUDPRXPKTSHI_UDPRXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFUDPRXPKTSHI_UDPRXPKTSHI_SHIFT)
+#define I40E_GLPES_PFUDPRXPKTSLO(_i) (0x00013800 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFUDPRXPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFUDPRXPKTSLO_UDPRXPKTSLO_SHIFT 0
+#define I40E_GLPES_PFUDPRXPKTSLO_UDPRXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFUDPRXPKTSLO_UDPRXPKTSLO_SHIFT)
+#define I40E_GLPES_PFUDPTXPKTSHI(_i) (0x00013A04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFUDPTXPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFUDPTXPKTSHI_UDPTXPKTSHI_SHIFT 0
+#define I40E_GLPES_PFUDPTXPKTSHI_UDPTXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFUDPTXPKTSHI_UDPTXPKTSHI_SHIFT)
+#define I40E_GLPES_PFUDPTXPKTSLO(_i) (0x00013A00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFUDPTXPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFUDPTXPKTSLO_UDPTXPKTSLO_SHIFT 0
+#define I40E_GLPES_PFUDPTXPKTSLO_UDPTXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFUDPTXPKTSLO_UDPTXPKTSLO_SHIFT)
+#define I40E_GLPES_RDMARXMULTFPDUSHI 0x0001E014 /* Reset: PE_CORER */
+#define I40E_GLPES_RDMARXMULTFPDUSHI_RDMARXMULTFPDUSHI_SHIFT 0
+#define I40E_GLPES_RDMARXMULTFPDUSHI_RDMARXMULTFPDUSHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_RDMARXMULTFPDUSHI_RDMARXMULTFPDUSHI_SHIFT)
+#define I40E_GLPES_RDMARXMULTFPDUSLO 0x0001E010 /* Reset: PE_CORER */
+#define I40E_GLPES_RDMARXMULTFPDUSLO_RDMARXMULTFPDUSLO_SHIFT 0
+#define I40E_GLPES_RDMARXMULTFPDUSLO_RDMARXMULTFPDUSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_RDMARXMULTFPDUSLO_RDMARXMULTFPDUSLO_SHIFT)
+#define I40E_GLPES_RDMARXOOODDPHI 0x0001E01C /* Reset: PE_CORER */
+#define I40E_GLPES_RDMARXOOODDPHI_RDMARXOOODDPHI_SHIFT 0
+#define I40E_GLPES_RDMARXOOODDPHI_RDMARXOOODDPHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_RDMARXOOODDPHI_RDMARXOOODDPHI_SHIFT)
+#define I40E_GLPES_RDMARXOOODDPLO 0x0001E018 /* Reset: PE_CORER */
+#define I40E_GLPES_RDMARXOOODDPLO_RDMARXOOODDPLO_SHIFT 0
+#define I40E_GLPES_RDMARXOOODDPLO_RDMARXOOODDPLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_RDMARXOOODDPLO_RDMARXOOODDPLO_SHIFT)
+#define I40E_GLPES_RDMARXOOONOMARK 0x0001E004 /* Reset: PE_CORER */
+#define I40E_GLPES_RDMARXOOONOMARK_RDMAOOONOMARK_SHIFT 0
+#define I40E_GLPES_RDMARXOOONOMARK_RDMAOOONOMARK_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_RDMARXOOONOMARK_RDMAOOONOMARK_SHIFT)
+#define I40E_GLPES_RDMARXUNALIGN 0x0001E000 /* Reset: PE_CORER */
+#define I40E_GLPES_RDMARXUNALIGN_RDMRXAUNALIGN_SHIFT 0
+#define I40E_GLPES_RDMARXUNALIGN_RDMRXAUNALIGN_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_RDMARXUNALIGN_RDMRXAUNALIGN_SHIFT)
+#define I40E_GLPES_TCPRXFOURHOLEHI 0x0001E044 /* Reset: PE_CORER */
+#define I40E_GLPES_TCPRXFOURHOLEHI_TCPRXFOURHOLEHI_SHIFT 0
+#define I40E_GLPES_TCPRXFOURHOLEHI_TCPRXFOURHOLEHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_TCPRXFOURHOLEHI_TCPRXFOURHOLEHI_SHIFT)
+#define I40E_GLPES_TCPRXFOURHOLELO 0x0001E040 /* Reset: PE_CORER */
+#define I40E_GLPES_TCPRXFOURHOLELO_TCPRXFOURHOLELO_SHIFT 0
+#define I40E_GLPES_TCPRXFOURHOLELO_TCPRXFOURHOLELO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_TCPRXFOURHOLELO_TCPRXFOURHOLELO_SHIFT)
+#define I40E_GLPES_TCPRXONEHOLEHI 0x0001E02C /* Reset: PE_CORER */
+#define I40E_GLPES_TCPRXONEHOLEHI_TCPRXONEHOLEHI_SHIFT 0
+#define I40E_GLPES_TCPRXONEHOLEHI_TCPRXONEHOLEHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_TCPRXONEHOLEHI_TCPRXONEHOLEHI_SHIFT)
+#define I40E_GLPES_TCPRXONEHOLELO 0x0001E028 /* Reset: PE_CORER */
+#define I40E_GLPES_TCPRXONEHOLELO_TCPRXONEHOLELO_SHIFT 0
+#define I40E_GLPES_TCPRXONEHOLELO_TCPRXONEHOLELO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_TCPRXONEHOLELO_TCPRXONEHOLELO_SHIFT)
+#define I40E_GLPES_TCPRXPUREACKHI 0x0001E024 /* Reset: PE_CORER */
+#define I40E_GLPES_TCPRXPUREACKHI_TCPRXPUREACKSHI_SHIFT 0
+#define I40E_GLPES_TCPRXPUREACKHI_TCPRXPUREACKSHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_TCPRXPUREACKHI_TCPRXPUREACKSHI_SHIFT)
+#define I40E_GLPES_TCPRXPUREACKSLO 0x0001E020 /* Reset: PE_CORER */
+#define I40E_GLPES_TCPRXPUREACKSLO_TCPRXPUREACKLO_SHIFT 0
+#define I40E_GLPES_TCPRXPUREACKSLO_TCPRXPUREACKLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_TCPRXPUREACKSLO_TCPRXPUREACKLO_SHIFT)
+#define I40E_GLPES_TCPRXTHREEHOLEHI 0x0001E03C /* Reset: PE_CORER */
+#define I40E_GLPES_TCPRXTHREEHOLEHI_TCPRXTHREEHOLEHI_SHIFT 0
+#define I40E_GLPES_TCPRXTHREEHOLEHI_TCPRXTHREEHOLEHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_TCPRXTHREEHOLEHI_TCPRXTHREEHOLEHI_SHIFT)
+#define I40E_GLPES_TCPRXTHREEHOLELO 0x0001E038 /* Reset: PE_CORER */
+#define I40E_GLPES_TCPRXTHREEHOLELO_TCPRXTHREEHOLELO_SHIFT 0
+#define I40E_GLPES_TCPRXTHREEHOLELO_TCPRXTHREEHOLELO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_TCPRXTHREEHOLELO_TCPRXTHREEHOLELO_SHIFT)
+#define I40E_GLPES_TCPRXTWOHOLEHI 0x0001E034 /* Reset: PE_CORER */
+#define I40E_GLPES_TCPRXTWOHOLEHI_TCPRXTWOHOLEHI_SHIFT 0
+#define I40E_GLPES_TCPRXTWOHOLEHI_TCPRXTWOHOLEHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_TCPRXTWOHOLEHI_TCPRXTWOHOLEHI_SHIFT)
+#define I40E_GLPES_TCPRXTWOHOLELO 0x0001E030 /* Reset: PE_CORER */
+#define I40E_GLPES_TCPRXTWOHOLELO_TCPRXTWOHOLELO_SHIFT 0
+#define I40E_GLPES_TCPRXTWOHOLELO_TCPRXTWOHOLELO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_TCPRXTWOHOLELO_TCPRXTWOHOLELO_SHIFT)
+#define I40E_GLPES_TCPTXRETRANSFASTHI 0x0001E04C /* Reset: PE_CORER */
+#define I40E_GLPES_TCPTXRETRANSFASTHI_TCPTXRETRANSFASTHI_SHIFT 0
+#define I40E_GLPES_TCPTXRETRANSFASTHI_TCPTXRETRANSFASTHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_TCPTXRETRANSFASTHI_TCPTXRETRANSFASTHI_SHIFT)
+#define I40E_GLPES_TCPTXRETRANSFASTLO 0x0001E048 /* Reset: PE_CORER */
+#define I40E_GLPES_TCPTXRETRANSFASTLO_TCPTXRETRANSFASTLO_SHIFT 0
+#define I40E_GLPES_TCPTXRETRANSFASTLO_TCPTXRETRANSFASTLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_TCPTXRETRANSFASTLO_TCPTXRETRANSFASTLO_SHIFT)
+#define I40E_GLPES_TCPTXTOUTSFASTHI 0x0001E054 /* Reset: PE_CORER */
+#define I40E_GLPES_TCPTXTOUTSFASTHI_TCPTXTOUTSFASTHI_SHIFT 0
+#define I40E_GLPES_TCPTXTOUTSFASTHI_TCPTXTOUTSFASTHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_TCPTXTOUTSFASTHI_TCPTXTOUTSFASTHI_SHIFT)
+#define I40E_GLPES_TCPTXTOUTSFASTLO 0x0001E050 /* Reset: PE_CORER */
+#define I40E_GLPES_TCPTXTOUTSFASTLO_TCPTXTOUTSFASTLO_SHIFT 0
+#define I40E_GLPES_TCPTXTOUTSFASTLO_TCPTXTOUTSFASTLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_TCPTXTOUTSFASTLO_TCPTXTOUTSFASTLO_SHIFT)
+#define I40E_GLPES_TCPTXTOUTSHI 0x0001E05C /* Reset: PE_CORER */
+#define I40E_GLPES_TCPTXTOUTSHI_TCPTXTOUTSHI_SHIFT 0
+#define I40E_GLPES_TCPTXTOUTSHI_TCPTXTOUTSHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_TCPTXTOUTSHI_TCPTXTOUTSHI_SHIFT)
+#define I40E_GLPES_TCPTXTOUTSLO 0x0001E058 /* Reset: PE_CORER */
+#define I40E_GLPES_TCPTXTOUTSLO_TCPTXTOUTSLO_SHIFT 0
+#define I40E_GLPES_TCPTXTOUTSLO_TCPTXTOUTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_TCPTXTOUTSLO_TCPTXTOUTSLO_SHIFT)
+#define I40E_GLPES_VFIP4RXDISCARD(_i) (0x00018600 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4RXDISCARD_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXDISCARD_IP4RXDISCARD_SHIFT 0
+#define I40E_GLPES_VFIP4RXDISCARD_IP4RXDISCARD_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4RXDISCARD_IP4RXDISCARD_SHIFT)
+#define I40E_GLPES_VFIP4RXFRAGSHI(_i) (0x00018804 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4RXFRAGSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXFRAGSHI_IP4RXFRAGSHI_SHIFT 0
+#define I40E_GLPES_VFIP4RXFRAGSHI_IP4RXFRAGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4RXFRAGSHI_IP4RXFRAGSHI_SHIFT)
+#define I40E_GLPES_VFIP4RXFRAGSLO(_i) (0x00018800 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4RXFRAGSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXFRAGSLO_IP4RXFRAGSLO_SHIFT 0
+#define I40E_GLPES_VFIP4RXFRAGSLO_IP4RXFRAGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4RXFRAGSLO_IP4RXFRAGSLO_SHIFT)
+#define I40E_GLPES_VFIP4RXMCOCTSHI(_i) (0x00018A04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4RXMCOCTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXMCOCTSHI_IP4RXMCOCTSHI_SHIFT 0
+#define I40E_GLPES_VFIP4RXMCOCTSHI_IP4RXMCOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4RXMCOCTSHI_IP4RXMCOCTSHI_SHIFT)
+#define I40E_GLPES_VFIP4RXMCOCTSLO(_i) (0x00018A00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4RXMCOCTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXMCOCTSLO_IP4RXMCOCTSLO_SHIFT 0
+#define I40E_GLPES_VFIP4RXMCOCTSLO_IP4RXMCOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4RXMCOCTSLO_IP4RXMCOCTSLO_SHIFT)
+#define I40E_GLPES_VFIP4RXMCPKTSHI(_i) (0x00018C04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4RXMCPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXMCPKTSHI_IP4RXMCPKTSHI_SHIFT 0
+#define I40E_GLPES_VFIP4RXMCPKTSHI_IP4RXMCPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4RXMCPKTSHI_IP4RXMCPKTSHI_SHIFT)
+#define I40E_GLPES_VFIP4RXMCPKTSLO(_i) (0x00018C00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4RXMCPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXMCPKTSLO_IP4RXMCPKTSLO_SHIFT 0
+#define I40E_GLPES_VFIP4RXMCPKTSLO_IP4RXMCPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4RXMCPKTSLO_IP4RXMCPKTSLO_SHIFT)
+#define I40E_GLPES_VFIP4RXOCTSHI(_i) (0x00018204 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4RXOCTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXOCTSHI_IP4RXOCTSHI_SHIFT 0
+#define I40E_GLPES_VFIP4RXOCTSHI_IP4RXOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4RXOCTSHI_IP4RXOCTSHI_SHIFT)
+#define I40E_GLPES_VFIP4RXOCTSLO(_i) (0x00018200 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4RXOCTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXOCTSLO_IP4RXOCTSLO_SHIFT 0
+#define I40E_GLPES_VFIP4RXOCTSLO_IP4RXOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4RXOCTSLO_IP4RXOCTSLO_SHIFT)
+#define I40E_GLPES_VFIP4RXPKTSHI(_i) (0x00018404 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4RXPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXPKTSHI_IP4RXPKTSHI_SHIFT 0
+#define I40E_GLPES_VFIP4RXPKTSHI_IP4RXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4RXPKTSHI_IP4RXPKTSHI_SHIFT)
+#define I40E_GLPES_VFIP4RXPKTSLO(_i) (0x00018400 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4RXPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXPKTSLO_IP4RXPKTSLO_SHIFT 0
+#define I40E_GLPES_VFIP4RXPKTSLO_IP4RXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4RXPKTSLO_IP4RXPKTSLO_SHIFT)
+#define I40E_GLPES_VFIP4RXTRUNC(_i) (0x00018700 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4RXTRUNC_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXTRUNC_IP4RXTRUNC_SHIFT 0
+#define I40E_GLPES_VFIP4RXTRUNC_IP4RXTRUNC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4RXTRUNC_IP4RXTRUNC_SHIFT)
+#define I40E_GLPES_VFIP4TXFRAGSHI(_i) (0x00019E04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4TXFRAGSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXFRAGSHI_IP4TXFRAGSHI_SHIFT 0
+#define I40E_GLPES_VFIP4TXFRAGSHI_IP4TXFRAGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4TXFRAGSHI_IP4TXFRAGSHI_SHIFT)
+#define I40E_GLPES_VFIP4TXFRAGSLO(_i) (0x00019E00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4TXFRAGSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXFRAGSLO_IP4TXFRAGSLO_SHIFT 0
+#define I40E_GLPES_VFIP4TXFRAGSLO_IP4TXFRAGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4TXFRAGSLO_IP4TXFRAGSLO_SHIFT)
+#define I40E_GLPES_VFIP4TXMCOCTSHI(_i) (0x0001A004 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4TXMCOCTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXMCOCTSHI_IP4TXMCOCTSHI_SHIFT 0
+#define I40E_GLPES_VFIP4TXMCOCTSHI_IP4TXMCOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4TXMCOCTSHI_IP4TXMCOCTSHI_SHIFT)
+#define I40E_GLPES_VFIP4TXMCOCTSLO(_i) (0x0001A000 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4TXMCOCTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXMCOCTSLO_IP4TXMCOCTSLO_SHIFT 0
+#define I40E_GLPES_VFIP4TXMCOCTSLO_IP4TXMCOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4TXMCOCTSLO_IP4TXMCOCTSLO_SHIFT)
+#define I40E_GLPES_VFIP4TXMCPKTSHI(_i) (0x0001A204 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4TXMCPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXMCPKTSHI_IP4TXMCPKTSHI_SHIFT 0
+#define I40E_GLPES_VFIP4TXMCPKTSHI_IP4TXMCPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4TXMCPKTSHI_IP4TXMCPKTSHI_SHIFT)
+#define I40E_GLPES_VFIP4TXMCPKTSLO(_i) (0x0001A200 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4TXMCPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXMCPKTSLO_IP4TXMCPKTSLO_SHIFT 0
+#define I40E_GLPES_VFIP4TXMCPKTSLO_IP4TXMCPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4TXMCPKTSLO_IP4TXMCPKTSLO_SHIFT)
+#define I40E_GLPES_VFIP4TXNOROUTE(_i) (0x0001AE00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4TXNOROUTE_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXNOROUTE_IP4TXNOROUTE_SHIFT 0
+#define I40E_GLPES_VFIP4TXNOROUTE_IP4TXNOROUTE_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_VFIP4TXNOROUTE_IP4TXNOROUTE_SHIFT)
+#define I40E_GLPES_VFIP4TXOCTSHI(_i) (0x00019A04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4TXOCTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXOCTSHI_IP4TXOCTSHI_SHIFT 0
+#define I40E_GLPES_VFIP4TXOCTSHI_IP4TXOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4TXOCTSHI_IP4TXOCTSHI_SHIFT)
+#define I40E_GLPES_VFIP4TXOCTSLO(_i) (0x00019A00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4TXOCTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXOCTSLO_IP4TXOCTSLO_SHIFT 0
+#define I40E_GLPES_VFIP4TXOCTSLO_IP4TXOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4TXOCTSLO_IP4TXOCTSLO_SHIFT)
+#define I40E_GLPES_VFIP4TXPKTSHI(_i) (0x00019C04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4TXPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXPKTSHI_IP4TXPKTSHI_SHIFT 0
+#define I40E_GLPES_VFIP4TXPKTSHI_IP4TXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4TXPKTSHI_IP4TXPKTSHI_SHIFT)
+#define I40E_GLPES_VFIP4TXPKTSLO(_i) (0x00019C00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4TXPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXPKTSLO_IP4TXPKTSLO_SHIFT 0
+#define I40E_GLPES_VFIP4TXPKTSLO_IP4TXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4TXPKTSLO_IP4TXPKTSLO_SHIFT)
+#define I40E_GLPES_VFIP6RXDISCARD(_i) (0x00019200 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6RXDISCARD_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXDISCARD_IP6RXDISCARD_SHIFT 0
+#define I40E_GLPES_VFIP6RXDISCARD_IP6RXDISCARD_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6RXDISCARD_IP6RXDISCARD_SHIFT)
+#define I40E_GLPES_VFIP6RXFRAGSHI(_i) (0x00019404 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6RXFRAGSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXFRAGSHI_IP6RXFRAGSHI_SHIFT 0
+#define I40E_GLPES_VFIP6RXFRAGSHI_IP6RXFRAGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6RXFRAGSHI_IP6RXFRAGSHI_SHIFT)
+#define I40E_GLPES_VFIP6RXFRAGSLO(_i) (0x00019400 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6RXFRAGSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXFRAGSLO_IP6RXFRAGSLO_SHIFT 0
+#define I40E_GLPES_VFIP6RXFRAGSLO_IP6RXFRAGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6RXFRAGSLO_IP6RXFRAGSLO_SHIFT)
+#define I40E_GLPES_VFIP6RXMCOCTSHI(_i) (0x00019604 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6RXMCOCTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXMCOCTSHI_IP6RXMCOCTSHI_SHIFT 0
+#define I40E_GLPES_VFIP6RXMCOCTSHI_IP6RXMCOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6RXMCOCTSHI_IP6RXMCOCTSHI_SHIFT)
+#define I40E_GLPES_VFIP6RXMCOCTSLO(_i) (0x00019600 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6RXMCOCTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXMCOCTSLO_IP6RXMCOCTSLO_SHIFT 0
+#define I40E_GLPES_VFIP6RXMCOCTSLO_IP6RXMCOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6RXMCOCTSLO_IP6RXMCOCTSLO_SHIFT)
+#define I40E_GLPES_VFIP6RXMCPKTSHI(_i) (0x00019804 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6RXMCPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXMCPKTSHI_IP6RXMCPKTSHI_SHIFT 0
+#define I40E_GLPES_VFIP6RXMCPKTSHI_IP6RXMCPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6RXMCPKTSHI_IP6RXMCPKTSHI_SHIFT)
+#define I40E_GLPES_VFIP6RXMCPKTSLO(_i) (0x00019800 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6RXMCPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXMCPKTSLO_IP6RXMCPKTSLO_SHIFT 0
+#define I40E_GLPES_VFIP6RXMCPKTSLO_IP6RXMCPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6RXMCPKTSLO_IP6RXMCPKTSLO_SHIFT)
+#define I40E_GLPES_VFIP6RXOCTSHI(_i) (0x00018E04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6RXOCTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXOCTSHI_IP6RXOCTSHI_SHIFT 0
+#define I40E_GLPES_VFIP6RXOCTSHI_IP6RXOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6RXOCTSHI_IP6RXOCTSHI_SHIFT)
+#define I40E_GLPES_VFIP6RXOCTSLO(_i) (0x00018E00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6RXOCTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXOCTSLO_IP6RXOCTSLO_SHIFT 0
+#define I40E_GLPES_VFIP6RXOCTSLO_IP6RXOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6RXOCTSLO_IP6RXOCTSLO_SHIFT)
+#define I40E_GLPES_VFIP6RXPKTSHI(_i) (0x00019004 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6RXPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXPKTSHI_IP6RXPKTSHI_SHIFT 0
+#define I40E_GLPES_VFIP6RXPKTSHI_IP6RXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6RXPKTSHI_IP6RXPKTSHI_SHIFT)
+#define I40E_GLPES_VFIP6RXPKTSLO(_i) (0x00019000 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6RXPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXPKTSLO_IP6RXPKTSLO_SHIFT 0
+#define I40E_GLPES_VFIP6RXPKTSLO_IP6RXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6RXPKTSLO_IP6RXPKTSLO_SHIFT)
+#define I40E_GLPES_VFIP6RXTRUNC(_i) (0x00019300 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6RXTRUNC_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXTRUNC_IP6RXTRUNC_SHIFT 0
+#define I40E_GLPES_VFIP6RXTRUNC_IP6RXTRUNC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6RXTRUNC_IP6RXTRUNC_SHIFT)
+#define I40E_GLPES_VFIP6TXFRAGSHI(_i) (0x0001A804 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6TXFRAGSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXFRAGSHI_IP6TXFRAGSHI_SHIFT 0
+#define I40E_GLPES_VFIP6TXFRAGSHI_IP6TXFRAGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6TXFRAGSHI_IP6TXFRAGSHI_SHIFT)
+#define I40E_GLPES_VFIP6TXFRAGSLO(_i) (0x0001A800 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6TXFRAGSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXFRAGSLO_IP6TXFRAGSLO_SHIFT 0
+#define I40E_GLPES_VFIP6TXFRAGSLO_IP6TXFRAGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6TXFRAGSLO_IP6TXFRAGSLO_SHIFT)
+#define I40E_GLPES_VFIP6TXMCOCTSHI(_i) (0x0001AA04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6TXMCOCTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXMCOCTSHI_IP6TXMCOCTSHI_SHIFT 0
+#define I40E_GLPES_VFIP6TXMCOCTSHI_IP6TXMCOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6TXMCOCTSHI_IP6TXMCOCTSHI_SHIFT)
+#define I40E_GLPES_VFIP6TXMCOCTSLO(_i) (0x0001AA00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6TXMCOCTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXMCOCTSLO_IP6TXMCOCTSLO_SHIFT 0
+#define I40E_GLPES_VFIP6TXMCOCTSLO_IP6TXMCOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6TXMCOCTSLO_IP6TXMCOCTSLO_SHIFT)
+#define I40E_GLPES_VFIP6TXMCPKTSHI(_i) (0x0001AC04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6TXMCPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXMCPKTSHI_IP6TXMCPKTSHI_SHIFT 0
+#define I40E_GLPES_VFIP6TXMCPKTSHI_IP6TXMCPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6TXMCPKTSHI_IP6TXMCPKTSHI_SHIFT)
+#define I40E_GLPES_VFIP6TXMCPKTSLO(_i) (0x0001AC00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6TXMCPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXMCPKTSLO_IP6TXMCPKTSLO_SHIFT 0
+#define I40E_GLPES_VFIP6TXMCPKTSLO_IP6TXMCPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6TXMCPKTSLO_IP6TXMCPKTSLO_SHIFT)
+#define I40E_GLPES_VFIP6TXNOROUTE(_i) (0x0001AF00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6TXNOROUTE_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXNOROUTE_IP6TXNOROUTE_SHIFT 0
+#define I40E_GLPES_VFIP6TXNOROUTE_IP6TXNOROUTE_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_VFIP6TXNOROUTE_IP6TXNOROUTE_SHIFT)
+#define I40E_GLPES_VFIP6TXOCTSHI(_i) (0x0001A404 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6TXOCTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXOCTSHI_IP6TXOCTSHI_SHIFT 0
+#define I40E_GLPES_VFIP6TXOCTSHI_IP6TXOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6TXOCTSHI_IP6TXOCTSHI_SHIFT)
+#define I40E_GLPES_VFIP6TXOCTSLO(_i) (0x0001A400 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6TXOCTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXOCTSLO_IP6TXOCTSLO_SHIFT 0
+#define I40E_GLPES_VFIP6TXOCTSLO_IP6TXOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6TXOCTSLO_IP6TXOCTSLO_SHIFT)
+#define I40E_GLPES_VFIP6TXPKTSHI(_i) (0x0001A604 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6TXPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXPKTSHI_IP6TXPKTSHI_SHIFT 0
+#define I40E_GLPES_VFIP6TXPKTSHI_IP6TXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6TXPKTSHI_IP6TXPKTSHI_SHIFT)
+#define I40E_GLPES_VFIP6TXPKTSLO(_i) (0x0001A600 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6TXPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXPKTSLO_IP6TXPKTSLO_SHIFT 0
+#define I40E_GLPES_VFIP6TXPKTSLO_IP6TXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6TXPKTSLO_IP6TXPKTSLO_SHIFT)
+#define I40E_GLPES_VFRDMARXRDSHI(_i) (0x0001BE04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMARXRDSHI_MAX_INDEX 31
+#define I40E_GLPES_VFRDMARXRDSHI_RDMARXRDSHI_SHIFT 0
+#define I40E_GLPES_VFRDMARXRDSHI_RDMARXRDSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFRDMARXRDSHI_RDMARXRDSHI_SHIFT)
+#define I40E_GLPES_VFRDMARXRDSLO(_i) (0x0001BE00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMARXRDSLO_MAX_INDEX 31
+#define I40E_GLPES_VFRDMARXRDSLO_RDMARXRDSLO_SHIFT 0
+#define I40E_GLPES_VFRDMARXRDSLO_RDMARXRDSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMARXRDSLO_RDMARXRDSLO_SHIFT)
+#define I40E_GLPES_VFRDMARXSNDSHI(_i) (0x0001C004 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMARXSNDSHI_MAX_INDEX 31
+#define I40E_GLPES_VFRDMARXSNDSHI_RDMARXSNDSHI_SHIFT 0
+#define I40E_GLPES_VFRDMARXSNDSHI_RDMARXSNDSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFRDMARXSNDSHI_RDMARXSNDSHI_SHIFT)
+#define I40E_GLPES_VFRDMARXSNDSLO(_i) (0x0001C000 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMARXSNDSLO_MAX_INDEX 31
+#define I40E_GLPES_VFRDMARXSNDSLO_RDMARXSNDSLO_SHIFT 0
+#define I40E_GLPES_VFRDMARXSNDSLO_RDMARXSNDSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMARXSNDSLO_RDMARXSNDSLO_SHIFT)
+#define I40E_GLPES_VFRDMARXWRSHI(_i) (0x0001BC04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMARXWRSHI_MAX_INDEX 31
+#define I40E_GLPES_VFRDMARXWRSHI_RDMARXWRSHI_SHIFT 0
+#define I40E_GLPES_VFRDMARXWRSHI_RDMARXWRSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFRDMARXWRSHI_RDMARXWRSHI_SHIFT)
+#define I40E_GLPES_VFRDMARXWRSLO(_i) (0x0001BC00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMARXWRSLO_MAX_INDEX 31
+#define I40E_GLPES_VFRDMARXWRSLO_RDMARXWRSLO_SHIFT 0
+#define I40E_GLPES_VFRDMARXWRSLO_RDMARXWRSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMARXWRSLO_RDMARXWRSLO_SHIFT)
+#define I40E_GLPES_VFRDMATXRDSHI(_i) (0x0001C404 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMATXRDSHI_MAX_INDEX 31
+#define I40E_GLPES_VFRDMATXRDSHI_RDMARXRDSHI_SHIFT 0
+#define I40E_GLPES_VFRDMATXRDSHI_RDMARXRDSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFRDMATXRDSHI_RDMARXRDSHI_SHIFT)
+#define I40E_GLPES_VFRDMATXRDSLO(_i) (0x0001C400 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMATXRDSLO_MAX_INDEX 31
+#define I40E_GLPES_VFRDMATXRDSLO_RDMARXRDSLO_SHIFT 0
+#define I40E_GLPES_VFRDMATXRDSLO_RDMARXRDSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMATXRDSLO_RDMARXRDSLO_SHIFT)
+#define I40E_GLPES_VFRDMATXSNDSHI(_i) (0x0001C604 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMATXSNDSHI_MAX_INDEX 31
+#define I40E_GLPES_VFRDMATXSNDSHI_RDMARXSNDSHI_SHIFT 0
+#define I40E_GLPES_VFRDMATXSNDSHI_RDMARXSNDSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFRDMATXSNDSHI_RDMARXSNDSHI_SHIFT)
+#define I40E_GLPES_VFRDMATXSNDSLO(_i) (0x0001C600 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMATXSNDSLO_MAX_INDEX 31
+#define I40E_GLPES_VFRDMATXSNDSLO_RDMARXSNDSLO_SHIFT 0
+#define I40E_GLPES_VFRDMATXSNDSLO_RDMARXSNDSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMATXSNDSLO_RDMARXSNDSLO_SHIFT)
+#define I40E_GLPES_VFRDMATXWRSHI(_i) (0x0001C204 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMATXWRSHI_MAX_INDEX 31
+#define I40E_GLPES_VFRDMATXWRSHI_RDMARXWRSHI_SHIFT 0
+#define I40E_GLPES_VFRDMATXWRSHI_RDMARXWRSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFRDMATXWRSHI_RDMARXWRSHI_SHIFT)
+#define I40E_GLPES_VFRDMATXWRSLO(_i) (0x0001C200 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMATXWRSLO_MAX_INDEX 31
+#define I40E_GLPES_VFRDMATXWRSLO_RDMARXWRSLO_SHIFT 0
+#define I40E_GLPES_VFRDMATXWRSLO_RDMARXWRSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMATXWRSLO_RDMARXWRSLO_SHIFT)
+#define I40E_GLPES_VFRDMAVBNDHI(_i) (0x0001C804 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMAVBNDHI_MAX_INDEX 31
+#define I40E_GLPES_VFRDMAVBNDHI_RDMAVBNDHI_SHIFT 0
+#define I40E_GLPES_VFRDMAVBNDHI_RDMAVBNDHI_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMAVBNDHI_RDMAVBNDHI_SHIFT)
+#define I40E_GLPES_VFRDMAVBNDLO(_i) (0x0001C800 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMAVBNDLO_MAX_INDEX 31
+#define I40E_GLPES_VFRDMAVBNDLO_RDMAVBNDLO_SHIFT 0
+#define I40E_GLPES_VFRDMAVBNDLO_RDMAVBNDLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMAVBNDLO_RDMAVBNDLO_SHIFT)
+#define I40E_GLPES_VFRDMAVINVHI(_i) (0x0001CA04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMAVINVHI_MAX_INDEX 31
+#define I40E_GLPES_VFRDMAVINVHI_RDMAVINVHI_SHIFT 0
+#define I40E_GLPES_VFRDMAVINVHI_RDMAVINVHI_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMAVINVHI_RDMAVINVHI_SHIFT)
+#define I40E_GLPES_VFRDMAVINVLO(_i) (0x0001CA00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMAVINVLO_MAX_INDEX 31
+#define I40E_GLPES_VFRDMAVINVLO_RDMAVINVLO_SHIFT 0
+#define I40E_GLPES_VFRDMAVINVLO_RDMAVINVLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMAVINVLO_RDMAVINVLO_SHIFT)
+#define I40E_GLPES_VFRXVLANERR(_i) (0x00018000 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRXVLANERR_MAX_INDEX 31
+#define I40E_GLPES_VFRXVLANERR_RXVLANERR_SHIFT 0
+#define I40E_GLPES_VFRXVLANERR_RXVLANERR_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_VFRXVLANERR_RXVLANERR_SHIFT)
+#define I40E_GLPES_VFTCPRTXSEG(_i) (0x0001B600 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFTCPRTXSEG_MAX_INDEX 31
+#define I40E_GLPES_VFTCPRTXSEG_TCPRTXSEG_SHIFT 0
+#define I40E_GLPES_VFTCPRTXSEG_TCPRTXSEG_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFTCPRTXSEG_TCPRTXSEG_SHIFT)
+#define I40E_GLPES_VFTCPRXOPTERR(_i) (0x0001B200 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFTCPRXOPTERR_MAX_INDEX 31
+#define I40E_GLPES_VFTCPRXOPTERR_TCPRXOPTERR_SHIFT 0
+#define I40E_GLPES_VFTCPRXOPTERR_TCPRXOPTERR_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_VFTCPRXOPTERR_TCPRXOPTERR_SHIFT)
+#define I40E_GLPES_VFTCPRXPROTOERR(_i) (0x0001B300 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFTCPRXPROTOERR_MAX_INDEX 31
+#define I40E_GLPES_VFTCPRXPROTOERR_TCPRXPROTOERR_SHIFT 0
+#define I40E_GLPES_VFTCPRXPROTOERR_TCPRXPROTOERR_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_VFTCPRXPROTOERR_TCPRXPROTOERR_SHIFT)
+#define I40E_GLPES_VFTCPRXSEGSHI(_i) (0x0001B004 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFTCPRXSEGSHI_MAX_INDEX 31
+#define I40E_GLPES_VFTCPRXSEGSHI_TCPRXSEGSHI_SHIFT 0
+#define I40E_GLPES_VFTCPRXSEGSHI_TCPRXSEGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFTCPRXSEGSHI_TCPRXSEGSHI_SHIFT)
+#define I40E_GLPES_VFTCPRXSEGSLO(_i) (0x0001B000 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFTCPRXSEGSLO_MAX_INDEX 31
+#define I40E_GLPES_VFTCPRXSEGSLO_TCPRXSEGSLO_SHIFT 0
+#define I40E_GLPES_VFTCPRXSEGSLO_TCPRXSEGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFTCPRXSEGSLO_TCPRXSEGSLO_SHIFT)
+#define I40E_GLPES_VFTCPTXSEGHI(_i) (0x0001B404 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFTCPTXSEGHI_MAX_INDEX 31
+#define I40E_GLPES_VFTCPTXSEGHI_TCPTXSEGHI_SHIFT 0
+#define I40E_GLPES_VFTCPTXSEGHI_TCPTXSEGHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFTCPTXSEGHI_TCPTXSEGHI_SHIFT)
+#define I40E_GLPES_VFTCPTXSEGLO(_i) (0x0001B400 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFTCPTXSEGLO_MAX_INDEX 31
+#define I40E_GLPES_VFTCPTXSEGLO_TCPTXSEGLO_SHIFT 0
+#define I40E_GLPES_VFTCPTXSEGLO_TCPTXSEGLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFTCPTXSEGLO_TCPTXSEGLO_SHIFT)
+#define I40E_GLPES_VFUDPRXPKTSHI(_i) (0x0001B804 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFUDPRXPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFUDPRXPKTSHI_UDPRXPKTSHI_SHIFT 0
+#define I40E_GLPES_VFUDPRXPKTSHI_UDPRXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFUDPRXPKTSHI_UDPRXPKTSHI_SHIFT)
+#define I40E_GLPES_VFUDPRXPKTSLO(_i) (0x0001B800 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFUDPRXPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFUDPRXPKTSLO_UDPRXPKTSLO_SHIFT 0
+#define I40E_GLPES_VFUDPRXPKTSLO_UDPRXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFUDPRXPKTSLO_UDPRXPKTSLO_SHIFT)
+#define I40E_GLPES_VFUDPTXPKTSHI(_i) (0x0001BA04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFUDPTXPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFUDPTXPKTSHI_UDPTXPKTSHI_SHIFT 0
+#define I40E_GLPES_VFUDPTXPKTSHI_UDPTXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFUDPTXPKTSHI_UDPTXPKTSHI_SHIFT)
+#define I40E_GLPES_VFUDPTXPKTSLO(_i) (0x0001BA00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFUDPTXPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFUDPTXPKTSLO_UDPTXPKTSLO_SHIFT 0
+#define I40E_GLPES_VFUDPTXPKTSLO_UDPTXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFUDPTXPKTSLO_UDPTXPKTSLO_SHIFT)
+#define I40E_GLGEN_PME_TO 0x000B81BC /* Reset: POR */
+#define I40E_GLGEN_PME_TO_PME_TO_FOR_PE_SHIFT 0
+#define I40E_GLGEN_PME_TO_PME_TO_FOR_PE_MASK I40E_MASK(0x1, I40E_GLGEN_PME_TO_PME_TO_FOR_PE_SHIFT)
+#define I40E_GLQF_APBVT(_i) (0x00260000 + ((_i) * 4)) /* _i=0...2047 */ /* Reset: CORER */
+#define I40E_GLQF_APBVT_MAX_INDEX 2047
+#define I40E_GLQF_APBVT_APBVT_SHIFT 0
+#define I40E_GLQF_APBVT_APBVT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLQF_APBVT_APBVT_SHIFT)
+#define I40E_GLQF_FD_PCTYPES(_i) (0x00268000 + ((_i) * 4)) /* _i=0...63 */ /* Reset: POR */
+#define I40E_GLQF_FD_PCTYPES_MAX_INDEX 63
+#define I40E_GLQF_FD_PCTYPES_FD_PCTYPE_SHIFT 0
+#define I40E_GLQF_FD_PCTYPES_FD_PCTYPE_MASK I40E_MASK(0x3F, I40E_GLQF_FD_PCTYPES_FD_PCTYPE_SHIFT)
+#define I40E_GLQF_FDEVICTENA(_i) (0x00270384 + ((_i) * 4)) /* _i=0...1 */ /* Reset: CORER */
+#define I40E_GLQF_FDEVICTENA_MAX_INDEX 1
+#define I40E_GLQF_FDEVICTENA_GLQF_FDEVICTENA_SHIFT 0
+#define I40E_GLQF_FDEVICTENA_GLQF_FDEVICTENA_MASK I40E_MASK(0xFFFFFFFF, I40E_GLQF_FDEVICTENA_GLQF_FDEVICTENA_SHIFT)
+#define I40E_GLQF_FDEVICTFLAG 0x00270280 /* Reset: CORER */
+#define I40E_GLQF_FDEVICTFLAG_TX_FLAGS_SHIFT 0
+#define I40E_GLQF_FDEVICTFLAG_TX_FLAGS_MASK I40E_MASK(0xFF, I40E_GLQF_FDEVICTFLAG_TX_FLAGS_SHIFT)
+#define I40E_GLQF_FDEVICTFLAG_RX_FLAGS_SHIFT 8
+#define I40E_GLQF_FDEVICTFLAG_RX_FLAGS_MASK I40E_MASK(0xFF, I40E_GLQF_FDEVICTFLAG_RX_FLAGS_SHIFT)
+#define I40E_PFQF_CTL_2 0x00270300 /* Reset: CORER */
+#define I40E_PFQF_CTL_2_PEHSIZE_SHIFT 0
+#define I40E_PFQF_CTL_2_PEHSIZE_MASK I40E_MASK(0x1F, I40E_PFQF_CTL_2_PEHSIZE_SHIFT)
+#define I40E_PFQF_CTL_2_PEDSIZE_SHIFT 5
+#define I40E_PFQF_CTL_2_PEDSIZE_MASK I40E_MASK(0x1F, I40E_PFQF_CTL_2_PEDSIZE_SHIFT)
+/* Redefined for X722 family */
+#define I40E_X722_PFQF_HLUT(_i) (0x00240000 + ((_i) * 128)) /* _i=0...127 */ /* Reset: CORER */
+#define I40E_X722_PFQF_HLUT_MAX_INDEX 127
+#define I40E_X722_PFQF_HLUT_LUT0_SHIFT 0
+#define I40E_X722_PFQF_HLUT_LUT0_MASK I40E_MASK(0x7F, I40E_X722_PFQF_HLUT_LUT0_SHIFT)
+#define I40E_X722_PFQF_HLUT_LUT1_SHIFT 8
+#define I40E_X722_PFQF_HLUT_LUT1_MASK I40E_MASK(0x7F, I40E_X722_PFQF_HLUT_LUT1_SHIFT)
+#define I40E_X722_PFQF_HLUT_LUT2_SHIFT 16
+#define I40E_X722_PFQF_HLUT_LUT2_MASK I40E_MASK(0x7F, I40E_X722_PFQF_HLUT_LUT2_SHIFT)
+#define I40E_X722_PFQF_HLUT_LUT3_SHIFT 24
+#define I40E_X722_PFQF_HLUT_LUT3_MASK I40E_MASK(0x7F, I40E_X722_PFQF_HLUT_LUT3_SHIFT)
+#define I40E_PFQF_HREGION(_i) (0x00245400 + ((_i) * 128)) /* _i=0...7 */ /* Reset: CORER */
+#define I40E_PFQF_HREGION_MAX_INDEX 7
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_0_SHIFT 0
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_0_MASK I40E_MASK(0x1, I40E_PFQF_HREGION_OVERRIDE_ENA_0_SHIFT)
+#define I40E_PFQF_HREGION_REGION_0_SHIFT 1
+#define I40E_PFQF_HREGION_REGION_0_MASK I40E_MASK(0x7, I40E_PFQF_HREGION_REGION_0_SHIFT)
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_1_SHIFT 4
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_1_MASK I40E_MASK(0x1, I40E_PFQF_HREGION_OVERRIDE_ENA_1_SHIFT)
+#define I40E_PFQF_HREGION_REGION_1_SHIFT 5
+#define I40E_PFQF_HREGION_REGION_1_MASK I40E_MASK(0x7, I40E_PFQF_HREGION_REGION_1_SHIFT)
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_2_SHIFT 8
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_2_MASK I40E_MASK(0x1, I40E_PFQF_HREGION_OVERRIDE_ENA_2_SHIFT)
+#define I40E_PFQF_HREGION_REGION_2_SHIFT 9
+#define I40E_PFQF_HREGION_REGION_2_MASK I40E_MASK(0x7, I40E_PFQF_HREGION_REGION_2_SHIFT)
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_3_SHIFT 12
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_3_MASK I40E_MASK(0x1, I40E_PFQF_HREGION_OVERRIDE_ENA_3_SHIFT)
+#define I40E_PFQF_HREGION_REGION_3_SHIFT 13
+#define I40E_PFQF_HREGION_REGION_3_MASK I40E_MASK(0x7, I40E_PFQF_HREGION_REGION_3_SHIFT)
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_4_SHIFT 16
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_4_MASK I40E_MASK(0x1, I40E_PFQF_HREGION_OVERRIDE_ENA_4_SHIFT)
+#define I40E_PFQF_HREGION_REGION_4_SHIFT 17
+#define I40E_PFQF_HREGION_REGION_4_MASK I40E_MASK(0x7, I40E_PFQF_HREGION_REGION_4_SHIFT)
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_5_SHIFT 20
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_5_MASK I40E_MASK(0x1, I40E_PFQF_HREGION_OVERRIDE_ENA_5_SHIFT)
+#define I40E_PFQF_HREGION_REGION_5_SHIFT 21
+#define I40E_PFQF_HREGION_REGION_5_MASK I40E_MASK(0x7, I40E_PFQF_HREGION_REGION_5_SHIFT)
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_6_SHIFT 24
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_6_MASK I40E_MASK(0x1, I40E_PFQF_HREGION_OVERRIDE_ENA_6_SHIFT)
+#define I40E_PFQF_HREGION_REGION_6_SHIFT 25
+#define I40E_PFQF_HREGION_REGION_6_MASK I40E_MASK(0x7, I40E_PFQF_HREGION_REGION_6_SHIFT)
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_7_SHIFT 28
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_7_MASK I40E_MASK(0x1, I40E_PFQF_HREGION_OVERRIDE_ENA_7_SHIFT)
+#define I40E_PFQF_HREGION_REGION_7_SHIFT 29
+#define I40E_PFQF_HREGION_REGION_7_MASK I40E_MASK(0x7, I40E_PFQF_HREGION_REGION_7_SHIFT)
+#define I40E_VSIQF_CTL_RSS_LUT_TYPE_SHIFT 8
+#define I40E_VSIQF_CTL_RSS_LUT_TYPE_MASK I40E_MASK(0x1, I40E_VSIQF_CTL_RSS_LUT_TYPE_SHIFT)
+#define I40E_VSIQF_HKEY(_i, _VSI) (0x002A0000 + ((_i) * 2048 + (_VSI) * 4)) /* _i=0...12, _VSI=0...383 */ /* Reset: CORER */
+#define I40E_VSIQF_HKEY_MAX_INDEX 12
+#define I40E_VSIQF_HKEY_KEY_0_SHIFT 0
+#define I40E_VSIQF_HKEY_KEY_0_MASK I40E_MASK(0xFF, I40E_VSIQF_HKEY_KEY_0_SHIFT)
+#define I40E_VSIQF_HKEY_KEY_1_SHIFT 8
+#define I40E_VSIQF_HKEY_KEY_1_MASK I40E_MASK(0xFF, I40E_VSIQF_HKEY_KEY_1_SHIFT)
+#define I40E_VSIQF_HKEY_KEY_2_SHIFT 16
+#define I40E_VSIQF_HKEY_KEY_2_MASK I40E_MASK(0xFF, I40E_VSIQF_HKEY_KEY_2_SHIFT)
+#define I40E_VSIQF_HKEY_KEY_3_SHIFT 24
+#define I40E_VSIQF_HKEY_KEY_3_MASK I40E_MASK(0xFF, I40E_VSIQF_HKEY_KEY_3_SHIFT)
+#define I40E_VSIQF_HLUT(_i, _VSI) (0x00220000 + ((_i) * 2048 + (_VSI) * 4)) /* _i=0...15, _VSI=0...383 */ /* Reset: CORER */
+#define I40E_VSIQF_HLUT_MAX_INDEX 15
+#define I40E_VSIQF_HLUT_LUT0_SHIFT 0
+#define I40E_VSIQF_HLUT_LUT0_MASK I40E_MASK(0xF, I40E_VSIQF_HLUT_LUT0_SHIFT)
+#define I40E_VSIQF_HLUT_LUT1_SHIFT 8
+#define I40E_VSIQF_HLUT_LUT1_MASK I40E_MASK(0xF, I40E_VSIQF_HLUT_LUT1_SHIFT)
+#define I40E_VSIQF_HLUT_LUT2_SHIFT 16
+#define I40E_VSIQF_HLUT_LUT2_MASK I40E_MASK(0xF, I40E_VSIQF_HLUT_LUT2_SHIFT)
+#define I40E_VSIQF_HLUT_LUT3_SHIFT 24
+#define I40E_VSIQF_HLUT_LUT3_MASK I40E_MASK(0xF, I40E_VSIQF_HLUT_LUT3_SHIFT)
+#define I40E_GLGEN_STAT_CLEAR 0x00390004 /* Reset: CORER */
+#define I40E_GLGEN_STAT_CLEAR_GLGEN_STAT_CLEAR_SHIFT 0
+#define I40E_GLGEN_STAT_CLEAR_GLGEN_STAT_CLEAR_MASK I40E_MASK(0x1, I40E_GLGEN_STAT_CLEAR_GLGEN_STAT_CLEAR_SHIFT)
+#define I40E_GLGEN_STAT_HALT 0x00390000 /* Reset: CORER */
+#define I40E_GLGEN_STAT_HALT_HALT_CELLS_SHIFT 0
+#define I40E_GLGEN_STAT_HALT_HALT_CELLS_MASK I40E_MASK(0x3FFFFFFF, I40E_GLGEN_STAT_HALT_HALT_CELLS_SHIFT)
+#define I40E_VFINT_DYN_CTL01_WB_ON_ITR_SHIFT 30
+#define I40E_VFINT_DYN_CTL01_WB_ON_ITR_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL01_WB_ON_ITR_SHIFT)
+#define I40E_VFINT_DYN_CTLN1_WB_ON_ITR_SHIFT 30
+#define I40E_VFINT_DYN_CTLN1_WB_ON_ITR_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN1_WB_ON_ITR_SHIFT)
+#define I40E_VFPE_AEQALLOC1 0x0000A400 /* Reset: VFR */
+#define I40E_VFPE_AEQALLOC1_AECOUNT_SHIFT 0
+#define I40E_VFPE_AEQALLOC1_AECOUNT_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_AEQALLOC1_AECOUNT_SHIFT)
+#define I40E_VFPE_CCQPHIGH1 0x00009800 /* Reset: VFR */
+#define I40E_VFPE_CCQPHIGH1_PECCQPHIGH_SHIFT 0
+#define I40E_VFPE_CCQPHIGH1_PECCQPHIGH_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_CCQPHIGH1_PECCQPHIGH_SHIFT)
+#define I40E_VFPE_CCQPLOW1 0x0000AC00 /* Reset: VFR */
+#define I40E_VFPE_CCQPLOW1_PECCQPLOW_SHIFT 0
+#define I40E_VFPE_CCQPLOW1_PECCQPLOW_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_CCQPLOW1_PECCQPLOW_SHIFT)
+#define I40E_VFPE_CCQPSTATUS1 0x0000B800 /* Reset: VFR */
+#define I40E_VFPE_CCQPSTATUS1_CCQP_DONE_SHIFT 0
+#define I40E_VFPE_CCQPSTATUS1_CCQP_DONE_MASK I40E_MASK(0x1, I40E_VFPE_CCQPSTATUS1_CCQP_DONE_SHIFT)
+#define I40E_VFPE_CCQPSTATUS1_HMC_PROFILE_SHIFT 4
+#define I40E_VFPE_CCQPSTATUS1_HMC_PROFILE_MASK I40E_MASK(0x7, I40E_VFPE_CCQPSTATUS1_HMC_PROFILE_SHIFT)
+#define I40E_VFPE_CCQPSTATUS1_RDMA_EN_VFS_SHIFT 16
+#define I40E_VFPE_CCQPSTATUS1_RDMA_EN_VFS_MASK I40E_MASK(0x3F, I40E_VFPE_CCQPSTATUS1_RDMA_EN_VFS_SHIFT)
+#define I40E_VFPE_CCQPSTATUS1_CCQP_ERR_SHIFT 31
+#define I40E_VFPE_CCQPSTATUS1_CCQP_ERR_MASK I40E_MASK(0x1, I40E_VFPE_CCQPSTATUS1_CCQP_ERR_SHIFT)
+#define I40E_VFPE_CQACK1 0x0000B000 /* Reset: VFR */
+#define I40E_VFPE_CQACK1_PECQID_SHIFT 0
+#define I40E_VFPE_CQACK1_PECQID_MASK I40E_MASK(0x1FFFF, I40E_VFPE_CQACK1_PECQID_SHIFT)
+#define I40E_VFPE_CQARM1 0x0000B400 /* Reset: VFR */
+#define I40E_VFPE_CQARM1_PECQID_SHIFT 0
+#define I40E_VFPE_CQARM1_PECQID_MASK I40E_MASK(0x1FFFF, I40E_VFPE_CQARM1_PECQID_SHIFT)
+#define I40E_VFPE_CQPDB1 0x0000BC00 /* Reset: VFR */
+#define I40E_VFPE_CQPDB1_WQHEAD_SHIFT 0
+#define I40E_VFPE_CQPDB1_WQHEAD_MASK I40E_MASK(0x7FF, I40E_VFPE_CQPDB1_WQHEAD_SHIFT)
+#define I40E_VFPE_CQPERRCODES1 0x00009C00 /* Reset: VFR */
+#define I40E_VFPE_CQPERRCODES1_CQP_MINOR_CODE_SHIFT 0
+#define I40E_VFPE_CQPERRCODES1_CQP_MINOR_CODE_MASK I40E_MASK(0xFFFF, I40E_VFPE_CQPERRCODES1_CQP_MINOR_CODE_SHIFT)
+#define I40E_VFPE_CQPERRCODES1_CQP_MAJOR_CODE_SHIFT 16
+#define I40E_VFPE_CQPERRCODES1_CQP_MAJOR_CODE_MASK I40E_MASK(0xFFFF, I40E_VFPE_CQPERRCODES1_CQP_MAJOR_CODE_SHIFT)
+#define I40E_VFPE_CQPTAIL1 0x0000A000 /* Reset: VFR */
+#define I40E_VFPE_CQPTAIL1_WQTAIL_SHIFT 0
+#define I40E_VFPE_CQPTAIL1_WQTAIL_MASK I40E_MASK(0x7FF, I40E_VFPE_CQPTAIL1_WQTAIL_SHIFT)
+#define I40E_VFPE_CQPTAIL1_CQP_OP_ERR_SHIFT 31
+#define I40E_VFPE_CQPTAIL1_CQP_OP_ERR_MASK I40E_MASK(0x1, I40E_VFPE_CQPTAIL1_CQP_OP_ERR_SHIFT)
+#define I40E_VFPE_IPCONFIG01 0x00008C00 /* Reset: VFR */
+#define I40E_VFPE_IPCONFIG01_PEIPID_SHIFT 0
+#define I40E_VFPE_IPCONFIG01_PEIPID_MASK I40E_MASK(0xFFFF, I40E_VFPE_IPCONFIG01_PEIPID_SHIFT)
+#define I40E_VFPE_IPCONFIG01_USEENTIREIDRANGE_SHIFT 16
+#define I40E_VFPE_IPCONFIG01_USEENTIREIDRANGE_MASK I40E_MASK(0x1, I40E_VFPE_IPCONFIG01_USEENTIREIDRANGE_SHIFT)
+#define I40E_VFPE_MRTEIDXMASK1 0x00009000 /* Reset: VFR */
+#define I40E_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_SHIFT 0
+#define I40E_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_MASK I40E_MASK(0x1F, I40E_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_SHIFT)
+#define I40E_VFPE_RCVUNEXPECTEDERROR1 0x00009400 /* Reset: VFR */
+#define I40E_VFPE_RCVUNEXPECTEDERROR1_TCP_RX_UNEXP_ERR_SHIFT 0
+#define I40E_VFPE_RCVUNEXPECTEDERROR1_TCP_RX_UNEXP_ERR_MASK I40E_MASK(0xFFFFFF, I40E_VFPE_RCVUNEXPECTEDERROR1_TCP_RX_UNEXP_ERR_SHIFT)
+#define I40E_VFPE_TCPNOWTIMER1 0x0000A800 /* Reset: VFR */
+#define I40E_VFPE_TCPNOWTIMER1_TCP_NOW_SHIFT 0
+#define I40E_VFPE_TCPNOWTIMER1_TCP_NOW_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_TCPNOWTIMER1_TCP_NOW_SHIFT)
+#define I40E_VFPE_WQEALLOC1 0x0000C000 /* Reset: VFR */
+#define I40E_VFPE_WQEALLOC1_PEQPID_SHIFT 0
+#define I40E_VFPE_WQEALLOC1_PEQPID_MASK I40E_MASK(0x3FFFF, I40E_VFPE_WQEALLOC1_PEQPID_SHIFT)
+#define I40E_VFPE_WQEALLOC1_WQE_DESC_INDEX_SHIFT 20
+#define I40E_VFPE_WQEALLOC1_WQE_DESC_INDEX_MASK I40E_MASK(0xFFF, I40E_VFPE_WQEALLOC1_WQE_DESC_INDEX_SHIFT)
+
+#endif /* X722_SUPPORT */
#endif /* _I40E_REGISTER_H_ */
diff --git a/sys/dev/ixl/i40e_status.h b/sys/dev/ixl/i40e_status.h
index 24d5e6b..24d5e6b 100755..100644
--- a/sys/dev/ixl/i40e_status.h
+++ b/sys/dev/ixl/i40e_status.h
diff --git a/sys/dev/ixl/i40e_type.h b/sys/dev/ixl/i40e_type.h
index 703fae1..01762e7 100755..100644
--- a/sys/dev/ixl/i40e_type.h
+++ b/sys/dev/ixl/i40e_type.h
@@ -1,6 +1,6 @@
/******************************************************************************
- Copyright (c) 2013-2014, Intel Corporation
+ Copyright (c) 2013-2015, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -41,28 +41,12 @@
#include "i40e_adminq.h"
#include "i40e_hmc.h"
#include "i40e_lan_hmc.h"
+#include "i40e_devids.h"
#define UNREFERENCED_XPARAMETER
-/* Vendor ID */
-#define I40E_INTEL_VENDOR_ID 0x8086
-
-/* Device IDs */
-#define I40E_DEV_ID_SFP_XL710 0x1572
-#define I40E_DEV_ID_QEMU 0x1574
-#define I40E_DEV_ID_KX_A 0x157F
-#define I40E_DEV_ID_KX_B 0x1580
-#define I40E_DEV_ID_KX_C 0x1581
-#define I40E_DEV_ID_QSFP_A 0x1583
-#define I40E_DEV_ID_QSFP_B 0x1584
-#define I40E_DEV_ID_QSFP_C 0x1585
-#define I40E_DEV_ID_10G_BASE_T 0x1586
-#define I40E_DEV_ID_VF 0x154C
-#define I40E_DEV_ID_VF_HV 0x1571
-
-#define i40e_is_40G_device(d) ((d) == I40E_DEV_ID_QSFP_A || \
- (d) == I40E_DEV_ID_QSFP_B || \
- (d) == I40E_DEV_ID_QSFP_C)
+#define BIT(a) (1UL << (a))
+#define BIT_ULL(a) (1ULL << (a))
#ifndef I40E_MASK
/* I40E_MASK is a macro used on 32 bit registers */
@@ -190,6 +174,10 @@ enum i40e_mac_type {
I40E_MAC_X710,
I40E_MAC_XL710,
I40E_MAC_VF,
+#ifdef X722_SUPPORT
+ I40E_MAC_X722,
+ I40E_MAC_X722_VF,
+#endif
I40E_MAC_GENERIC,
};
@@ -221,14 +209,14 @@ enum i40e_set_fc_aq_failures {
};
enum i40e_vsi_type {
- I40E_VSI_MAIN = 0,
- I40E_VSI_VMDQ1,
- I40E_VSI_VMDQ2,
- I40E_VSI_CTRL,
- I40E_VSI_FCOE,
- I40E_VSI_MIRROR,
- I40E_VSI_SRIOV,
- I40E_VSI_FDIR,
+ I40E_VSI_MAIN = 0,
+ I40E_VSI_VMDQ1 = 1,
+ I40E_VSI_VMDQ2 = 2,
+ I40E_VSI_CTRL = 3,
+ I40E_VSI_FCOE = 4,
+ I40E_VSI_MIRROR = 5,
+ I40E_VSI_SRIOV = 6,
+ I40E_VSI_FDIR = 7,
I40E_VSI_TYPE_UNKNOWN
};
@@ -252,16 +240,64 @@ struct i40e_link_status {
bool crc_enable;
u8 pacing;
u8 requested_speeds;
+ u8 module_type[3];
+ /* 1st byte: module identifier */
+#define I40E_MODULE_TYPE_SFP 0x03
+#define I40E_MODULE_TYPE_QSFP 0x0D
+ /* 2nd byte: ethernet compliance codes for 10/40G */
+#define I40E_MODULE_TYPE_40G_ACTIVE 0x01
+#define I40E_MODULE_TYPE_40G_LR4 0x02
+#define I40E_MODULE_TYPE_40G_SR4 0x04
+#define I40E_MODULE_TYPE_40G_CR4 0x08
+#define I40E_MODULE_TYPE_10G_BASE_SR 0x10
+#define I40E_MODULE_TYPE_10G_BASE_LR 0x20
+#define I40E_MODULE_TYPE_10G_BASE_LRM 0x40
+#define I40E_MODULE_TYPE_10G_BASE_ER 0x80
+ /* 3rd byte: ethernet compliance codes for 1G */
+#define I40E_MODULE_TYPE_1000BASE_SX 0x01
+#define I40E_MODULE_TYPE_1000BASE_LX 0x02
+#define I40E_MODULE_TYPE_1000BASE_CX 0x04
+#define I40E_MODULE_TYPE_1000BASE_T 0x08
+};
+
+enum i40e_aq_capabilities_phy_type {
+ I40E_CAP_PHY_TYPE_SGMII = BIT(I40E_PHY_TYPE_SGMII),
+ I40E_CAP_PHY_TYPE_1000BASE_KX = BIT(I40E_PHY_TYPE_1000BASE_KX),
+ I40E_CAP_PHY_TYPE_10GBASE_KX4 = BIT(I40E_PHY_TYPE_10GBASE_KX4),
+ I40E_CAP_PHY_TYPE_10GBASE_KR = BIT(I40E_PHY_TYPE_10GBASE_KR),
+ I40E_CAP_PHY_TYPE_40GBASE_KR4 = BIT(I40E_PHY_TYPE_40GBASE_KR4),
+ I40E_CAP_PHY_TYPE_XAUI = BIT(I40E_PHY_TYPE_XAUI),
+ I40E_CAP_PHY_TYPE_XFI = BIT(I40E_PHY_TYPE_XFI),
+ I40E_CAP_PHY_TYPE_SFI = BIT(I40E_PHY_TYPE_SFI),
+ I40E_CAP_PHY_TYPE_XLAUI = BIT(I40E_PHY_TYPE_XLAUI),
+ I40E_CAP_PHY_TYPE_XLPPI = BIT(I40E_PHY_TYPE_XLPPI),
+ I40E_CAP_PHY_TYPE_40GBASE_CR4_CU = BIT(I40E_PHY_TYPE_40GBASE_CR4_CU),
+ I40E_CAP_PHY_TYPE_10GBASE_CR1_CU = BIT(I40E_PHY_TYPE_10GBASE_CR1_CU),
+ I40E_CAP_PHY_TYPE_10GBASE_AOC = BIT(I40E_PHY_TYPE_10GBASE_AOC),
+ I40E_CAP_PHY_TYPE_40GBASE_AOC = BIT(I40E_PHY_TYPE_40GBASE_AOC),
+ I40E_CAP_PHY_TYPE_100BASE_TX = BIT(I40E_PHY_TYPE_100BASE_TX),
+ I40E_CAP_PHY_TYPE_1000BASE_T = BIT(I40E_PHY_TYPE_1000BASE_T),
+ I40E_CAP_PHY_TYPE_10GBASE_T = BIT(I40E_PHY_TYPE_10GBASE_T),
+ I40E_CAP_PHY_TYPE_10GBASE_SR = BIT(I40E_PHY_TYPE_10GBASE_SR),
+ I40E_CAP_PHY_TYPE_10GBASE_LR = BIT(I40E_PHY_TYPE_10GBASE_LR),
+ I40E_CAP_PHY_TYPE_10GBASE_SFPP_CU = BIT(I40E_PHY_TYPE_10GBASE_SFPP_CU),
+ I40E_CAP_PHY_TYPE_10GBASE_CR1 = BIT(I40E_PHY_TYPE_10GBASE_CR1),
+ I40E_CAP_PHY_TYPE_40GBASE_CR4 = BIT(I40E_PHY_TYPE_40GBASE_CR4),
+ I40E_CAP_PHY_TYPE_40GBASE_SR4 = BIT(I40E_PHY_TYPE_40GBASE_SR4),
+ I40E_CAP_PHY_TYPE_40GBASE_LR4 = BIT(I40E_PHY_TYPE_40GBASE_LR4),
+ I40E_CAP_PHY_TYPE_1000BASE_SX = BIT(I40E_PHY_TYPE_1000BASE_SX),
+ I40E_CAP_PHY_TYPE_1000BASE_LX = BIT(I40E_PHY_TYPE_1000BASE_LX),
+ I40E_CAP_PHY_TYPE_1000BASE_T_OPTICAL = BIT(I40E_PHY_TYPE_1000BASE_T_OPTICAL),
+ I40E_CAP_PHY_TYPE_20GBASE_KR2 = BIT(I40E_PHY_TYPE_20GBASE_KR2)
};
struct i40e_phy_info {
struct i40e_link_status link_info;
struct i40e_link_status link_info_old;
- u32 autoneg_advertised;
- u32 phy_id;
- u32 module_type;
bool get_link_info;
enum i40e_media_type media_type;
+ /* all the phy types the NVM is capable of */
+ enum i40e_aq_capabilities_phy_type phy_types;
};
#define I40E_HW_CAP_MAX_GPIO 30
@@ -286,7 +322,17 @@ struct i40e_hw_capabilities {
bool dcb;
bool fcoe;
bool iscsi; /* Indicates iSCSI enabled */
- bool mfp_mode_1;
+ bool flex10_enable;
+ bool flex10_capable;
+ u32 flex10_mode;
+#define I40E_FLEX10_MODE_UNKNOWN 0x0
+#define I40E_FLEX10_MODE_DCC 0x1
+#define I40E_FLEX10_MODE_DCI 0x2
+
+ u32 flex10_status;
+#define I40E_FLEX10_STATUS_DCC_ERROR 0x1
+#define I40E_FLEX10_STATUS_VC_MODE 0x2
+
bool mgmt_cem;
bool ieee_1588;
bool iwarp;
@@ -315,6 +361,7 @@ struct i40e_hw_capabilities {
u8 rx_buf_chain_len;
u32 enabled_tcmap;
u32 maxtc;
+ u64 wr_csr_prot;
};
struct i40e_mac_info {
@@ -342,6 +389,7 @@ struct i40e_nvm_info {
bool blank_nvm_mode; /* is NVM empty (no FW present)*/
u16 version; /* NVM package version */
u32 eetrack; /* NVM data version */
+ u32 oem_ver; /* OEM version info */
};
/* definitions used in NVM update support */
@@ -360,12 +408,17 @@ enum i40e_nvmupd_cmd {
I40E_NVMUPD_CSUM_CON,
I40E_NVMUPD_CSUM_SA,
I40E_NVMUPD_CSUM_LCB,
+ I40E_NVMUPD_STATUS,
+ I40E_NVMUPD_EXEC_AQ,
+ I40E_NVMUPD_GET_AQ_RESULT,
};
enum i40e_nvmupd_state {
I40E_NVMUPD_STATE_INIT,
I40E_NVMUPD_STATE_READING,
- I40E_NVMUPD_STATE_WRITING
+ I40E_NVMUPD_STATE_WRITING,
+ I40E_NVMUPD_STATE_INIT_WAIT,
+ I40E_NVMUPD_STATE_WRITE_WAIT,
};
/* nvm_access definition and its masks/shifts need to be accessible to
@@ -384,6 +437,7 @@ enum i40e_nvmupd_state {
#define I40E_NVM_SA (I40E_NVM_SNT | I40E_NVM_LCB)
#define I40E_NVM_ERA 0x4
#define I40E_NVM_CSUM 0x8
+#define I40E_NVM_EXEC 0xf
#define I40E_NVM_ADAPT_SHIFT 16
#define I40E_NVM_ADAPT_MASK (0xffffULL << I40E_NVM_ADAPT_SHIFT)
@@ -464,6 +518,8 @@ struct i40e_fc_info {
#define I40E_APP_PROTOID_FIP 0x8914
#define I40E_APP_SEL_ETHTYPE 0x1
#define I40E_APP_SEL_TCPIP 0x2
+#define I40E_CEE_APP_SEL_ETHTYPE 0x0
+#define I40E_CEE_APP_SEL_TCPIP 0x1
/* CEE or IEEE 802.1Qaz ETS Configuration data */
struct i40e_dcb_ets_config {
@@ -495,6 +551,7 @@ struct i40e_dcbx_config {
#define I40E_DCBX_MODE_CEE 0x1
#define I40E_DCBX_MODE_IEEE 0x2
u32 numapps;
+ u32 tlv_status; /* CEE mode TLV status */
struct i40e_dcb_ets_config etscfg;
struct i40e_dcb_ets_config etsrec;
struct i40e_dcb_pfc_config pfc;
@@ -546,6 +603,8 @@ struct i40e_hw {
/* state of nvm update process */
enum i40e_nvmupd_state nvmupd_state;
+ struct i40e_aq_desc nvm_wb_desc;
+ struct i40e_virt_mem nvm_buff;
/* HMC info */
struct i40e_hmc_info hmc; /* HMC info struct */
@@ -554,13 +613,24 @@ struct i40e_hw {
u16 dcbx_status;
/* DCBX info */
- struct i40e_dcbx_config local_dcbx_config;
- struct i40e_dcbx_config remote_dcbx_config;
+ struct i40e_dcbx_config local_dcbx_config; /* Oper/Local Cfg */
+ struct i40e_dcbx_config remote_dcbx_config; /* Peer Cfg */
+ struct i40e_dcbx_config desired_dcbx_config; /* CEE Desired Cfg */
/* debug mask */
u32 debug_mask;
+ char err_str[16];
};
-#define i40e_is_vf(_hw) ((_hw)->mac.type == I40E_MAC_VF)
+
+static INLINE bool i40e_is_vf(struct i40e_hw *hw)
+{
+#ifdef X722_SUPPORT
+ return (hw->mac.type == I40E_MAC_VF ||
+ hw->mac.type == I40E_MAC_X722_VF);
+#else
+ return hw->mac.type == I40E_MAC_VF;
+#endif
+}
struct i40e_driver_version {
u8 major_version;
@@ -663,7 +733,11 @@ enum i40e_rx_desc_status_bits {
I40E_RX_DESC_STATUS_CRCP_SHIFT = 4,
I40E_RX_DESC_STATUS_TSYNINDX_SHIFT = 5, /* 2 BITS */
I40E_RX_DESC_STATUS_TSYNVALID_SHIFT = 7,
+#ifdef X722_SUPPORT
+ I40E_RX_DESC_STATUS_EXT_UDP_0_SHIFT = 8,
+#else
I40E_RX_DESC_STATUS_RESERVED1_SHIFT = 8,
+#endif
I40E_RX_DESC_STATUS_UMBCAST_SHIFT = 9, /* 2 BITS */
I40E_RX_DESC_STATUS_FLM_SHIFT = 11,
@@ -671,12 +745,16 @@ enum i40e_rx_desc_status_bits {
I40E_RX_DESC_STATUS_LPBK_SHIFT = 14,
I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT = 15,
I40E_RX_DESC_STATUS_RESERVED2_SHIFT = 16, /* 2 BITS */
+#ifdef X722_SUPPORT
+ I40E_RX_DESC_STATUS_INT_UDP_0_SHIFT = 18,
+#else
I40E_RX_DESC_STATUS_UDP_0_SHIFT = 18,
+#endif
I40E_RX_DESC_STATUS_LAST /* this entry must be last!!! */
};
#define I40E_RXD_QW1_STATUS_SHIFT 0
-#define I40E_RXD_QW1_STATUS_MASK (((1 << I40E_RX_DESC_STATUS_LAST) - 1) << \
+#define I40E_RXD_QW1_STATUS_MASK ((BIT(I40E_RX_DESC_STATUS_LAST) - 1) << \
I40E_RXD_QW1_STATUS_SHIFT)
#define I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT I40E_RX_DESC_STATUS_TSYNINDX_SHIFT
@@ -684,8 +762,7 @@ enum i40e_rx_desc_status_bits {
I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT)
#define I40E_RXD_QW1_STATUS_TSYNVALID_SHIFT I40E_RX_DESC_STATUS_TSYNVALID_SHIFT
-#define I40E_RXD_QW1_STATUS_TSYNVALID_MASK (0x1UL << \
- I40E_RXD_QW1_STATUS_TSYNVALID_SHIFT)
+#define I40E_RXD_QW1_STATUS_TSYNVALID_MASK BIT_ULL(I40E_RXD_QW1_STATUS_TSYNVALID_SHIFT)
#define I40E_RXD_QW1_STATUS_UMBCAST_SHIFT I40E_RX_DESC_STATUS_UMBCAST
#define I40E_RXD_QW1_STATUS_UMBCAST_MASK (0x3UL << \
@@ -831,8 +908,7 @@ enum i40e_rx_ptype_payload_layer {
I40E_RXD_QW1_LENGTH_HBUF_SHIFT)
#define I40E_RXD_QW1_LENGTH_SPH_SHIFT 63
-#define I40E_RXD_QW1_LENGTH_SPH_MASK (0x1ULL << \
- I40E_RXD_QW1_LENGTH_SPH_SHIFT)
+#define I40E_RXD_QW1_LENGTH_SPH_MASK BIT_ULL(I40E_RXD_QW1_LENGTH_SPH_SHIFT)
#define I40E_RXD_QW1_NEXTP_SHIFT 38
#define I40E_RXD_QW1_NEXTP_MASK (0x1FFFULL << I40E_RXD_QW1_NEXTP_SHIFT)
@@ -1035,12 +1111,11 @@ enum i40e_tx_ctx_desc_eipt_offload {
#define I40E_TXD_CTX_QW0_NATT_SHIFT 9
#define I40E_TXD_CTX_QW0_NATT_MASK (0x3ULL << I40E_TXD_CTX_QW0_NATT_SHIFT)
-#define I40E_TXD_CTX_UDP_TUNNELING (0x1ULL << I40E_TXD_CTX_QW0_NATT_SHIFT)
+#define I40E_TXD_CTX_UDP_TUNNELING BIT_ULL(I40E_TXD_CTX_QW0_NATT_SHIFT)
#define I40E_TXD_CTX_GRE_TUNNELING (0x2ULL << I40E_TXD_CTX_QW0_NATT_SHIFT)
#define I40E_TXD_CTX_QW0_EIP_NOINC_SHIFT 11
-#define I40E_TXD_CTX_QW0_EIP_NOINC_MASK (0x1ULL << \
- I40E_TXD_CTX_QW0_EIP_NOINC_SHIFT)
+#define I40E_TXD_CTX_QW0_EIP_NOINC_MASK BIT_ULL(I40E_TXD_CTX_QW0_EIP_NOINC_SHIFT)
#define I40E_TXD_CTX_EIP_NOINC_IPID_CONST I40E_TXD_CTX_QW0_EIP_NOINC_MASK
@@ -1052,6 +1127,10 @@ enum i40e_tx_ctx_desc_eipt_offload {
#define I40E_TXD_CTX_QW0_DECTTL_MASK (0xFULL << \
I40E_TXD_CTX_QW0_DECTTL_SHIFT)
+#ifdef X722_SUPPORT
+#define I40E_TXD_CTX_QW0_L4T_CS_SHIFT 23
+#define I40E_TXD_CTX_QW0_L4T_CS_MASK BIT_ULL(I40E_TXD_CTX_QW0_L4T_CS_SHIFT)
+#endif
struct i40e_nop_desc {
__le64 rsvd;
__le64 dtype_cmd;
@@ -1088,15 +1167,38 @@ struct i40e_filter_program_desc {
/* Packet Classifier Types for filters */
enum i40e_filter_pctype {
+#ifdef X722_SUPPORT
+ /* Note: Values 0-28 are reserved for future use.
+ * Value 29, 30, 32 are not supported on XL710 and X710.
+ */
+ I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP = 29,
+ I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP = 30,
+#else
/* Note: Values 0-30 are reserved for future use */
+#endif
I40E_FILTER_PCTYPE_NONF_IPV4_UDP = 31,
+#ifdef X722_SUPPORT
+ I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK = 32,
+#else
/* Note: Value 32 is reserved for future use */
+#endif
I40E_FILTER_PCTYPE_NONF_IPV4_TCP = 33,
I40E_FILTER_PCTYPE_NONF_IPV4_SCTP = 34,
I40E_FILTER_PCTYPE_NONF_IPV4_OTHER = 35,
I40E_FILTER_PCTYPE_FRAG_IPV4 = 36,
+#ifdef X722_SUPPORT
+ /* Note: Values 37-38 are reserved for future use.
+ * Value 39, 40, 42 are not supported on XL710 and X710.
+ */
+ I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP = 39,
+ I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP = 40,
+#else
/* Note: Values 37-40 are reserved for future use */
+#endif
I40E_FILTER_PCTYPE_NONF_IPV6_UDP = 41,
+#ifdef X722_SUPPORT
+ I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK = 42,
+#endif
I40E_FILTER_PCTYPE_NONF_IPV6_TCP = 43,
I40E_FILTER_PCTYPE_NONF_IPV6_SCTP = 44,
I40E_FILTER_PCTYPE_NONF_IPV6_OTHER = 45,
@@ -1123,8 +1225,7 @@ enum i40e_filter_program_desc_fd_status {
};
#define I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT 23
-#define I40E_TXD_FLTR_QW0_DEST_VSI_MASK (0x1FFUL << \
- I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT)
+#define I40E_TXD_FLTR_QW0_DEST_VSI_MASK BIT_ULL(I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT)
#define I40E_TXD_FLTR_QW1_DTYPE_SHIFT 0
#define I40E_TXD_FLTR_QW1_DTYPE_MASK (0xFUL << I40E_TXD_FLTR_QW1_DTYPE_SHIFT)
@@ -1145,13 +1246,18 @@ enum i40e_filter_program_desc_pcmd {
#define I40E_TXD_FLTR_QW1_DEST_MASK (0x3ULL << I40E_TXD_FLTR_QW1_DEST_SHIFT)
#define I40E_TXD_FLTR_QW1_CNT_ENA_SHIFT (0x7ULL + I40E_TXD_FLTR_QW1_CMD_SHIFT)
-#define I40E_TXD_FLTR_QW1_CNT_ENA_MASK (0x1ULL << \
- I40E_TXD_FLTR_QW1_CNT_ENA_SHIFT)
+#define I40E_TXD_FLTR_QW1_CNT_ENA_MASK BIT_ULL(I40E_TXD_FLTR_QW1_CNT_ENA_SHIFT)
#define I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT (0x9ULL + \
I40E_TXD_FLTR_QW1_CMD_SHIFT)
#define I40E_TXD_FLTR_QW1_FD_STATUS_MASK (0x3ULL << \
I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT)
+#ifdef X722_SUPPORT
+
+#define I40E_TXD_FLTR_QW1_ATR_SHIFT (0xEULL + \
+ I40E_TXD_FLTR_QW1_CMD_SHIFT)
+#define I40E_TXD_FLTR_QW1_ATR_MASK BIT_ULL(I40E_TXD_FLTR_QW1_ATR_SHIFT)
+#endif
#define I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT 20
#define I40E_TXD_FLTR_QW1_CNTINDEX_MASK (0x1FFUL << \
@@ -1258,6 +1364,9 @@ struct i40e_hw_port_stats {
/* flow director stats */
u64 fd_atr_match;
u64 fd_sb_match;
+ u64 fd_atr_tunnel_match;
+ u32 fd_atr_status;
+ u32 fd_sb_status;
/* EEE LPI */
u32 tx_lpi_status;
u32 rx_lpi_status;
@@ -1283,6 +1392,7 @@ struct i40e_hw_port_stats {
#define I40E_SR_PBA_FLAGS 0x15
#define I40E_SR_PBA_BLOCK_PTR 0x16
#define I40E_SR_BOOT_CONFIG_PTR 0x17
+#define I40E_NVM_OEM_VER_OFF 0x83
#define I40E_SR_NVM_DEV_STARTER_VERSION 0x18
#define I40E_SR_NVM_WAKE_ON_LAN 0x19
#define I40E_SR_ALTERNATE_SAN_MAC_ADDRESS_PTR 0x27
diff --git a/sys/dev/ixl/i40e_virtchnl.h b/sys/dev/ixl/i40e_virtchnl.h
index 5a5e8f3..9da33a0 100755..100644
--- a/sys/dev/ixl/i40e_virtchnl.h
+++ b/sys/dev/ixl/i40e_virtchnl.h
@@ -1,6 +1,6 @@
/******************************************************************************
- Copyright (c) 2013-2014, Intel Corporation
+ Copyright (c) 2013-2015, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -89,7 +89,6 @@ enum i40e_virtchnl_ops {
I40E_VIRTCHNL_OP_GET_STATS = 15,
I40E_VIRTCHNL_OP_FCOE = 16,
I40E_VIRTCHNL_OP_EVENT = 17,
- I40E_VIRTCHNL_OP_CONFIG_RSS = 18,
};
/* Virtual channel message descriptor. This overlays the admin queue
@@ -118,7 +117,9 @@ struct i40e_virtchnl_msg {
* error regardless of version mismatch.
*/
#define I40E_VIRTCHNL_VERSION_MAJOR 1
-#define I40E_VIRTCHNL_VERSION_MINOR 0
+#define I40E_VIRTCHNL_VERSION_MINOR 1
+#define I40E_VIRTCHNL_VERSION_MINOR_NO_VF_CAPS 0
+
struct i40e_virtchnl_version_info {
u32 major;
u32 minor;
@@ -137,7 +138,8 @@ struct i40e_virtchnl_version_info {
*/
/* I40E_VIRTCHNL_OP_GET_VF_RESOURCES
- * VF sends this request to PF with no parameters
+ * Version 1.0 VF sends this request to PF with no parameters
+ * Version 1.1 VF sends this request to PF with u32 bitmap of its capabilities
* PF responds with an indirect message containing
* i40e_virtchnl_vf_resource and one or more
* i40e_virtchnl_vsi_resource structures.
@@ -151,10 +153,13 @@ struct i40e_virtchnl_vsi_resource {
u8 default_mac_addr[I40E_ETH_LENGTH_OF_ADDRESS];
};
/* VF offload flags */
-#define I40E_VIRTCHNL_VF_OFFLOAD_L2 0x00000001
-#define I40E_VIRTCHNL_VF_OFFLOAD_IWARP 0x00000002
-#define I40E_VIRTCHNL_VF_OFFLOAD_FCOE 0x00000004
-#define I40E_VIRTCHNL_VF_OFFLOAD_VLAN 0x00010000
+#define I40E_VIRTCHNL_VF_OFFLOAD_L2 0x00000001
+#define I40E_VIRTCHNL_VF_OFFLOAD_IWARP 0x00000002
+#define I40E_VIRTCHNL_VF_OFFLOAD_FCOE 0x00000004
+#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ 0x00000008
+#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG 0x00000010
+#define I40E_VIRTCHNL_VF_OFFLOAD_VLAN 0x00010000
+#define I40E_VIRTCHNL_VF_OFFLOAD_RX_POLLING 0x00020000
struct i40e_virtchnl_vf_resource {
u16 num_vsis;
diff --git a/sys/dev/ixl/if_ixl.c b/sys/dev/ixl/if_ixl.c
index 63b6235..14d0aeb 100755..100644
--- a/sys/dev/ixl/if_ixl.c
+++ b/sys/dev/ixl/if_ixl.c
@@ -1,6 +1,6 @@
/******************************************************************************
- Copyright (c) 2013-2014, Intel Corporation
+ Copyright (c) 2013-2015, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -32,15 +32,22 @@
******************************************************************************/
/*$FreeBSD$*/
+#ifndef IXL_STANDALONE_BUILD
#include "opt_inet.h"
#include "opt_inet6.h"
+#endif
+
#include "ixl.h"
#include "ixl_pf.h"
+#ifdef RSS
+#include <net/rss_config.h>
+#endif
+
/*********************************************************************
* Driver version
*********************************************************************/
-char ixl_driver_version[] = "1.2.8";
+char ixl_driver_version[] = "1.4.3";
/*********************************************************************
* PCI Device ID Table
@@ -62,6 +69,14 @@ static ixl_vendor_info_t ixl_vendor_info_array[] =
{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_B, 0, 0, 0},
{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_C, 0, 0, 0},
{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T, 0, 0, 0},
+ {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T4, 0, 0, 0},
+ {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_20G_KR2, 0, 0, 0},
+ {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_20G_KR2_A, 0, 0, 0},
+#ifdef X722_SUPPORT
+ {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_X722, 0, 0, 0},
+ {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_1G_BASE_T_X722, 0, 0, 0},
+ {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T_X722, 0, 0, 0},
+#endif
/* required last entry */
{0, 0, 0, 0, 0}
};
@@ -94,7 +109,7 @@ static void ixl_update_link_status(struct ixl_pf *);
static int ixl_allocate_pci_resources(struct ixl_pf *);
static u16 ixl_get_bus_info(struct i40e_hw *, device_t);
static int ixl_setup_stations(struct ixl_pf *);
-static int ixl_setup_vsi(struct ixl_vsi *);
+static int ixl_switch_config(struct ixl_pf *);
static int ixl_initialize_vsi(struct ixl_vsi *);
static int ixl_assign_vsi_msix(struct ixl_pf *);
static int ixl_assign_vsi_legacy(struct ixl_pf *);
@@ -105,16 +120,17 @@ static void ixl_configure_legacy(struct ixl_pf *);
static void ixl_free_pci_resources(struct ixl_pf *);
static void ixl_local_timer(void *);
static int ixl_setup_interface(device_t, struct ixl_vsi *);
-static bool ixl_config_link(struct i40e_hw *);
+static void ixl_link_event(struct ixl_pf *, struct i40e_arq_event_info *);
static void ixl_config_rss(struct ixl_vsi *);
static void ixl_set_queue_rx_itr(struct ixl_queue *);
static void ixl_set_queue_tx_itr(struct ixl_queue *);
static int ixl_set_advertised_speeds(struct ixl_pf *, int);
-static void ixl_enable_rings(struct ixl_vsi *);
-static void ixl_disable_rings(struct ixl_vsi *);
-static void ixl_enable_intr(struct ixl_vsi *);
-static void ixl_disable_intr(struct ixl_vsi *);
+static int ixl_enable_rings(struct ixl_vsi *);
+static int ixl_disable_rings(struct ixl_vsi *);
+static void ixl_enable_intr(struct ixl_vsi *);
+static void ixl_disable_intr(struct ixl_vsi *);
+static void ixl_disable_rings_intr(struct ixl_vsi *);
static void ixl_enable_adminq(struct i40e_hw *);
static void ixl_disable_adminq(struct i40e_hw *);
@@ -131,6 +147,7 @@ static void ixl_unregister_vlan(void *, struct ifnet *, u16);
static void ixl_setup_vlan_filters(struct ixl_vsi *);
static void ixl_init_filters(struct ixl_vsi *);
+static void ixl_reconfigure_filters(struct ixl_vsi *vsi);
static void ixl_add_filter(struct ixl_vsi *, u8 *, s16 vlan);
static void ixl_del_filter(struct ixl_vsi *, u8 *, s16 vlan);
static void ixl_add_hw_filters(struct ixl_vsi *, int, int);
@@ -138,10 +155,14 @@ static void ixl_del_hw_filters(struct ixl_vsi *, int);
static struct ixl_mac_filter *
ixl_find_filter(struct ixl_vsi *, u8 *, s16);
static void ixl_add_mc_filter(struct ixl_vsi *, u8 *);
+static void ixl_free_mac_filters(struct ixl_vsi *vsi);
+
/* Sysctl debug interface */
+#ifdef IXL_DEBUG_SYSCTL
static int ixl_debug_info(SYSCTL_HANDLER_ARGS);
static void ixl_print_debug_info(struct ixl_pf *);
+#endif
/* The MSI/X Interrupt handlers */
static void ixl_intr(void *);
@@ -167,6 +188,7 @@ static void ixl_add_sysctls_eth_stats(struct sysctl_ctx_list *,
struct i40e_eth_stats *);
static void ixl_update_stats_counters(struct ixl_pf *);
static void ixl_update_eth_stats(struct ixl_vsi *);
+static void ixl_update_vsi_stats(struct ixl_vsi *);
static void ixl_pf_reset_stats(struct ixl_pf *);
static void ixl_vsi_reset_stats(struct ixl_vsi *);
static void ixl_stat_update48(struct i40e_hw *, u32, u32, bool,
@@ -174,13 +196,27 @@ static void ixl_stat_update48(struct i40e_hw *, u32, u32, bool,
static void ixl_stat_update32(struct i40e_hw *, u32, bool,
u64 *, u64 *);
-#ifdef IXL_DEBUG
+#ifdef IXL_DEBUG_SYSCTL
static int ixl_sysctl_link_status(SYSCTL_HANDLER_ARGS);
static int ixl_sysctl_phy_abilities(SYSCTL_HANDLER_ARGS);
static int ixl_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS);
static int ixl_sysctl_hw_res_alloc(SYSCTL_HANDLER_ARGS);
static int ixl_sysctl_switch_config(SYSCTL_HANDLER_ARGS);
-static int ixl_sysctl_dump_txd(SYSCTL_HANDLER_ARGS);
+#endif
+
+#ifdef PCI_IOV
+static int ixl_adminq_err_to_errno(enum i40e_admin_queue_err err);
+
+static int ixl_init_iov(device_t dev, uint16_t num_vfs, const nvlist_t*);
+static void ixl_uninit_iov(device_t dev);
+static int ixl_add_vf(device_t dev, uint16_t vfnum, const nvlist_t*);
+
+static void ixl_handle_vf_msg(struct ixl_pf *,
+ struct i40e_arq_event_info *);
+static void ixl_handle_vflr(void *arg, int pending);
+
+static void ixl_reset_vf(struct ixl_pf *pf, struct ixl_vf *vf);
+static void ixl_reinit_vf(struct ixl_pf *pf, struct ixl_vf *vf);
#endif
/*********************************************************************
@@ -193,6 +229,11 @@ static device_method_t ixl_methods[] = {
DEVMETHOD(device_attach, ixl_attach),
DEVMETHOD(device_detach, ixl_detach),
DEVMETHOD(device_shutdown, ixl_shutdown),
+#ifdef PCI_IOV
+ DEVMETHOD(pci_init_iov, ixl_init_iov),
+ DEVMETHOD(pci_uninit_iov, ixl_uninit_iov),
+ DEVMETHOD(pci_add_vf, ixl_add_vf),
+#endif
{0, 0}
};
@@ -205,6 +246,9 @@ DRIVER_MODULE(ixl, pci, ixl_driver, ixl_devclass, 0, 0);
MODULE_DEPEND(ixl, pci, 1, 1, 1);
MODULE_DEPEND(ixl, ether, 1, 1, 1);
+#ifdef DEV_NETMAP
+MODULE_DEPEND(ixl, netmap, 1, 1, 1);
+#endif /* DEV_NETMAP */
/*
** Global reset mutex
@@ -279,6 +323,10 @@ int ixl_atr_rate = 20;
TUNABLE_INT("hw.ixl.atr_rate", &ixl_atr_rate);
#endif
+#ifdef DEV_NETMAP
+#define NETMAP_IXL_MAIN /* only bring in one part of the netmap code */
+#include <dev/netmap/if_ixl_netmap.h>
+#endif /* DEV_NETMAP */
static char *ixl_fc_string[6] = {
"None",
@@ -289,6 +337,10 @@ static char *ixl_fc_string[6] = {
"Default"
};
+static MALLOC_DEFINE(M_IXL, "ixl", "ixl driver allocations");
+
+static uint8_t ixl_bcast_addr[ETHER_ADDR_LEN] =
+ {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
/*********************************************************************
* Device identification routine
@@ -365,6 +417,10 @@ ixl_attach(device_t dev)
struct ixl_vsi *vsi;
u16 bus;
int error = 0;
+#ifdef PCI_IOV
+ nvlist_t *pf_schema, *vf_schema;
+ int iov_error;
+#endif
INIT_DEBUGOUT("ixl_attach: begin");
@@ -427,7 +483,23 @@ ixl_attach(device_t dev)
OID_AUTO, "dynamic_tx_itr", CTLFLAG_RW,
&ixl_dynamic_tx_itr, 0, "Dynamic TX ITR");
-#ifdef IXL_DEBUG
+#ifdef IXL_DEBUG_SYSCTL
+ SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
+ SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
+ OID_AUTO, "debug", CTLTYPE_INT|CTLFLAG_RW, pf, 0,
+ ixl_debug_info, "I", "Debug Information");
+
+ /* Debug shared-code message level */
+ SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
+ SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
+ OID_AUTO, "debug_mask", CTLFLAG_RW,
+ &pf->hw.debug_mask, 0, "Debug Message Level");
+
+ SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
+ SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
+ OID_AUTO, "vc_debug_level", CTLFLAG_RW, &pf->vc_debug_lvl,
+ 0, "PF/VF Virtual Channel debug level");
+
SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
OID_AUTO, "link_status", CTLTYPE_STRING | CTLFLAG_RD,
@@ -452,11 +524,6 @@ ixl_attach(device_t dev)
SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
OID_AUTO, "switch_config", CTLTYPE_STRING | CTLFLAG_RD,
pf, 0, ixl_sysctl_switch_config, "A", "HW Switch Configuration");
-
- SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
- SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
- OID_AUTO, "dump_desc", CTLTYPE_INT | CTLFLAG_WR,
- pf, 0, ixl_sysctl_dump_txd, "I", "Desc dump");
#endif
/* Save off the PCI information */
@@ -471,6 +538,8 @@ ixl_attach(device_t dev)
hw->bus.device = pci_get_slot(dev);
hw->bus.func = pci_get_function(dev);
+ pf->vc_debug_lvl = 1;
+
/* Do PCI setup - map BAR0, etc */
if (ixl_allocate_pci_resources(pf)) {
device_printf(dev, "Allocation of PCI resources failed\n");
@@ -478,13 +547,6 @@ ixl_attach(device_t dev)
goto err_out;
}
- /* Create for initial debugging use */
- SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
- SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
- OID_AUTO, "debug", CTLTYPE_INT|CTLFLAG_RW, pf, 0,
- ixl_debug_info, "I", "Debug Information");
-
-
/* Establish a clean starting point */
i40e_clear_hw(hw);
error = i40e_pf_reset(hw);
@@ -494,48 +556,6 @@ ixl_attach(device_t dev)
goto err_out;
}
- /* For now always do an initial CORE reset on first device */
- {
- static int ixl_dev_count;
- static int ixl_dev_track[32];
- u32 my_dev;
- int i, found = FALSE;
- u16 bus = pci_get_bus(dev);
-
- mtx_lock(&ixl_reset_mtx);
- my_dev = (bus << 8) | hw->bus.device;
-
- for (i = 0; i < ixl_dev_count; i++) {
- if (ixl_dev_track[i] == my_dev)
- found = TRUE;
- }
-
- if (!found) {
- u32 reg;
-
- ixl_dev_track[ixl_dev_count] = my_dev;
- ixl_dev_count++;
-
- INIT_DEBUGOUT("Initial CORE RESET\n");
- wr32(hw, I40E_GLGEN_RTRIG, I40E_GLGEN_RTRIG_CORER_MASK);
- ixl_flush(hw);
- i = 50;
- do {
- i40e_msec_delay(50);
- reg = rd32(hw, I40E_GLGEN_RSTAT);
- if (!(reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK))
- break;
- } while (i--);
-
- /* paranoia */
- wr32(hw, I40E_PF_ATQLEN, 0);
- wr32(hw, I40E_PF_ATQBAL, 0);
- wr32(hw, I40E_PF_ATQBAH, 0);
- i40e_clear_pxe_mode(hw);
- }
- mtx_unlock(&ixl_reset_mtx);
- }
-
/* Set admin queue parameters */
hw->aq.num_arq_entries = IXL_AQ_LEN;
hw->aq.num_asq_entries = IXL_AQ_LEN;
@@ -583,7 +603,8 @@ ixl_attach(device_t dev)
}
/* Set up host memory cache */
- error = i40e_init_lan_hmc(hw, vsi->num_queues, vsi->num_queues, 0, 0);
+ error = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
+ hw->func_caps.num_rx_qp, 0, 0);
if (error) {
device_printf(dev, "init_lan_hmc failed: %d\n", error);
goto err_get_cap;
@@ -625,24 +646,18 @@ ixl_attach(device_t dev)
if (error)
goto err_late;
- i40e_msec_delay(75);
- error = i40e_aq_set_link_restart_an(hw, TRUE, NULL);
- if (error) {
- device_printf(dev, "link restart failed, aq_err=%d\n",
- pf->hw.aq.asq_last_status);
+ if (((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver < 33)) ||
+ (hw->aq.fw_maj_ver < 4)) {
+ i40e_msec_delay(75);
+ error = i40e_aq_set_link_restart_an(hw, TRUE, NULL);
+ if (error)
+ device_printf(dev, "link restart failed, aq_err=%d\n",
+ pf->hw.aq.asq_last_status);
}
-
- /* Determine link state */
- vsi->link_up = ixl_config_link(hw);
- /* Report if Unqualified modules are found */
- if ((vsi->link_up == FALSE) &&
- (pf->hw.phy.link_info.link_info &
- I40E_AQ_MEDIA_AVAILABLE) &&
- (!(pf->hw.phy.link_info.an_info &
- I40E_AQ_QUALIFIED_MODULE)))
- device_printf(dev, "Link failed because "
- "an unqualified module was detected\n");
+ /* Determine link state */
+ i40e_aq_get_link_info(hw, TRUE, NULL, NULL);
+ i40e_get_link_status(hw, &pf->link_up);
/* Setup OS specific network interface */
if (ixl_setup_interface(dev, vsi) != 0) {
@@ -651,6 +666,18 @@ ixl_attach(device_t dev)
goto err_late;
}
+ error = ixl_switch_config(pf);
+ if (error) {
+ device_printf(dev, "Initial switch config failed: %d\n", error);
+ goto err_mac_hmc;
+ }
+
+ /* Limit phy interrupts to link and modules failure */
+ error = i40e_aq_set_phy_int_mask(hw,
+ I40E_AQ_EVENT_LINK_UPDOWN | I40E_AQ_EVENT_MODULE_QUAL_FAIL, NULL);
+ if (error)
+ device_printf(dev, "set phy mask failed: %d\n", error);
+
/* Get the bus configuration and set the shared code */
bus = ixl_get_bus_info(hw, dev);
i40e_set_pci_config_data(hw, bus);
@@ -660,19 +687,36 @@ ixl_attach(device_t dev)
ixl_update_stats_counters(pf);
ixl_add_hw_stats(pf);
- /* Reset port's advertised speeds */
- if (!i40e_is_40G_device(hw->device_id)) {
- pf->advertised_speed = 0x7;
- ixl_set_advertised_speeds(pf, 0x7);
- }
-
/* Register for VLAN events */
vsi->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
ixl_register_vlan, vsi, EVENTHANDLER_PRI_FIRST);
vsi->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
ixl_unregister_vlan, vsi, EVENTHANDLER_PRI_FIRST);
+#ifdef PCI_IOV
+ /* SR-IOV is only supported when MSI-X is in use. */
+ if (pf->msix > 1) {
+ pf_schema = pci_iov_schema_alloc_node();
+ vf_schema = pci_iov_schema_alloc_node();
+ pci_iov_schema_add_unicast_mac(vf_schema, "mac-addr", 0, NULL);
+ pci_iov_schema_add_bool(vf_schema, "mac-anti-spoof",
+ IOV_SCHEMA_HASDEFAULT, TRUE);
+ pci_iov_schema_add_bool(vf_schema, "allow-set-mac",
+ IOV_SCHEMA_HASDEFAULT, FALSE);
+ pci_iov_schema_add_bool(vf_schema, "allow-promisc",
+ IOV_SCHEMA_HASDEFAULT, FALSE);
+
+ iov_error = pci_iov_attach(dev, pf_schema, vf_schema);
+ if (iov_error != 0)
+ device_printf(dev,
+ "Failed to initialize SR-IOV (error=%d)\n",
+ iov_error);
+ }
+#endif
+#ifdef DEV_NETMAP
+ ixl_netmap_attach(vsi);
+#endif /* DEV_NETMAP */
INIT_DEBUGOUT("ixl_attach: end");
return (0);
@@ -708,6 +752,9 @@ ixl_detach(device_t dev)
struct ixl_vsi *vsi = &pf->vsi;
struct ixl_queue *que = vsi->queues;
i40e_status status;
+#ifdef PCI_IOV
+ int error;
+#endif
INIT_DEBUGOUT("ixl_detach: begin");
@@ -717,9 +764,20 @@ ixl_detach(device_t dev)
return (EBUSY);
}
- IXL_PF_LOCK(pf);
- ixl_stop(pf);
- IXL_PF_UNLOCK(pf);
+#ifdef PCI_IOV
+ error = pci_iov_detach(dev);
+ if (error != 0) {
+ device_printf(dev, "SR-IOV in use; detach first.\n");
+ return (error);
+ }
+#endif
+
+ ether_ifdetach(vsi->ifp);
+ if (vsi->ifp->if_drv_flags & IFF_DRV_RUNNING) {
+ IXL_PF_LOCK(pf);
+ ixl_stop(pf);
+ IXL_PF_UNLOCK(pf);
+ }
for (int i = 0; i < vsi->num_queues; i++, que++) {
if (que->tq) {
@@ -747,10 +805,10 @@ ixl_detach(device_t dev)
if (vsi->vlan_detach != NULL)
EVENTHANDLER_DEREGISTER(vlan_unconfig, vsi->vlan_detach);
- ether_ifdetach(vsi->ifp);
callout_drain(&pf->timer);
-
-
+#ifdef DEV_NETMAP
+ netmap_detach(vsi->ifp);
+#endif /* DEV_NETMAP */
ixl_free_pci_resources(pf);
bus_generic_detach(dev);
if_free(vsi->ifp);
@@ -919,7 +977,7 @@ static int
ixl_ioctl(struct ifnet * ifp, u_long command, caddr_t data)
{
struct ixl_vsi *vsi = ifp->if_softc;
- struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
+ struct ixl_pf *pf = vsi->back;
struct ifreq *ifr = (struct ifreq *) data;
#if defined(INET) || defined(INET6)
struct ifaddr *ifa = (struct ifaddr *)data;
@@ -1009,6 +1067,9 @@ ixl_ioctl(struct ifnet * ifp, u_long command, caddr_t data)
break;
case SIOCSIFMEDIA:
case SIOCGIFMEDIA:
+#ifdef IFM_ETH_XTYPE
+ case SIOCGIFXMEDIA:
+#endif
IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
error = ifmedia_ioctl(ifp, ifr, &vsi->media, command);
break;
@@ -1081,7 +1142,8 @@ ixl_init_locked(struct ixl_pf *pf)
bcopy(IF_LLADDR(vsi->ifp), tmpaddr,
I40E_ETH_LENGTH_OF_ADDRESS);
if (!cmp_etheraddr(hw->mac.addr, tmpaddr) &&
- i40e_validate_mac_addr(tmpaddr)) {
+ (i40e_validate_mac_addr(tmpaddr) == I40E_SUCCESS)) {
+ ixl_del_filter(vsi, hw->mac.addr, IXL_VLAN_ANY);
bcopy(tmpaddr, hw->mac.addr,
I40E_ETH_LENGTH_OF_ADDRESS);
ret = i40e_aq_mac_address_write(hw,
@@ -1091,6 +1153,8 @@ ixl_init_locked(struct ixl_pf *pf)
device_printf(dev, "LLA address"
"change failed!!\n");
return;
+ } else {
+ ixl_add_filter(vsi, hw->mac.addr, IXL_VLAN_ANY);
}
}
@@ -1116,11 +1180,8 @@ ixl_init_locked(struct ixl_pf *pf)
/* Set up RSS */
ixl_config_rss(vsi);
- /* Setup the VSI */
- ixl_setup_vsi(vsi);
-
/*
- ** Prepare the rings, hmc contexts, etc...
+ ** Prepare the VSI: rings, hmc contexts, etc...
*/
if (ixl_initialize_vsi(vsi)) {
device_printf(dev, "initialize vsi failed!!\n");
@@ -1147,6 +1208,8 @@ ixl_init_locked(struct ixl_pf *pf)
i40e_aq_set_default_vsi(hw, vsi->seid, NULL);
+ ixl_reconfigure_filters(vsi);
+
/* Set MTU in hardware*/
int aq_error = i40e_aq_set_mac_config(hw, vsi->max_frame_size,
TRUE, 0, NULL);
@@ -1241,6 +1304,11 @@ ixl_intr(void *arg)
mask = rd32(hw, I40E_PFINT_ICR0_ENA);
+#ifdef PCI_IOV
+ if (icr0 & I40E_PFINT_ICR0_VFLR_MASK)
+ taskqueue_enqueue(pf->tq, &pf->vflr_task);
+#endif
+
if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
taskqueue_enqueue(pf->tq, &pf->adminq);
return;
@@ -1344,8 +1412,12 @@ ixl_msix_adminq(void *arg)
mask &= ~I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
}
- if (reg & I40E_PFINT_ICR0_VFLR_MASK)
+#ifdef PCI_IOV
+ if (reg & I40E_PFINT_ICR0_VFLR_MASK) {
mask &= ~I40E_PFINT_ICR0_ENA_VFLR_MASK;
+ taskqueue_enqueue(pf->tq, &pf->vflr_task);
+ }
+#endif
reg = rd32(hw, I40E_PFINT_DYN_CTL0);
reg = reg | I40E_PFINT_DYN_CTL0_CLEARPBA_MASK;
@@ -1367,18 +1439,20 @@ static void
ixl_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
{
struct ixl_vsi *vsi = ifp->if_softc;
- struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
+ struct ixl_pf *pf = vsi->back;
struct i40e_hw *hw = &pf->hw;
INIT_DEBUGOUT("ixl_media_status: begin");
IXL_PF_LOCK(pf);
+ hw->phy.get_link_info = TRUE;
+ i40e_get_link_status(hw, &pf->link_up);
ixl_update_link_status(pf);
ifmr->ifm_status = IFM_AVALID;
ifmr->ifm_active = IFM_ETHER;
- if (!vsi->link_up) {
+ if (!pf->link_up) {
IXL_PF_UNLOCK(pf);
return;
}
@@ -1403,7 +1477,6 @@ ixl_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
ifmr->ifm_active |= IFM_1000_LX;
break;
/* 10 G */
- case I40E_PHY_TYPE_10GBASE_CR1_CU:
case I40E_PHY_TYPE_10GBASE_SFPP_CU:
ifmr->ifm_active |= IFM_10G_TWINAX;
break;
@@ -1427,6 +1500,49 @@ ixl_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
case I40E_PHY_TYPE_40GBASE_LR4:
ifmr->ifm_active |= IFM_40G_LR4;
break;
+#ifndef IFM_ETH_XTYPE
+ case I40E_PHY_TYPE_1000BASE_KX:
+ ifmr->ifm_active |= IFM_1000_CX;
+ break;
+ case I40E_PHY_TYPE_10GBASE_CR1_CU:
+ case I40E_PHY_TYPE_10GBASE_CR1:
+ ifmr->ifm_active |= IFM_10G_TWINAX;
+ break;
+ case I40E_PHY_TYPE_10GBASE_KX4:
+ ifmr->ifm_active |= IFM_10G_CX4;
+ break;
+ case I40E_PHY_TYPE_10GBASE_KR:
+ ifmr->ifm_active |= IFM_10G_SR;
+ break;
+ case I40E_PHY_TYPE_40GBASE_KR4:
+ case I40E_PHY_TYPE_XLPPI:
+ ifmr->ifm_active |= IFM_40G_SR4;
+ break;
+#else
+ case I40E_PHY_TYPE_1000BASE_KX:
+ ifmr->ifm_active |= IFM_1000_KX;
+ break;
+ /* ERJ: What's the difference between these? */
+ case I40E_PHY_TYPE_10GBASE_CR1_CU:
+ case I40E_PHY_TYPE_10GBASE_CR1:
+ ifmr->ifm_active |= IFM_10G_CR1;
+ break;
+ case I40E_PHY_TYPE_10GBASE_KX4:
+ ifmr->ifm_active |= IFM_10G_KX4;
+ break;
+ case I40E_PHY_TYPE_10GBASE_KR:
+ ifmr->ifm_active |= IFM_10G_KR;
+ break;
+ case I40E_PHY_TYPE_20GBASE_KR2:
+ ifmr->ifm_active |= IFM_20G_KR2;
+ break;
+ case I40E_PHY_TYPE_40GBASE_KR4:
+ ifmr->ifm_active |= IFM_40G_KR4;
+ break;
+ case I40E_PHY_TYPE_XLPPI:
+ ifmr->ifm_active |= IFM_40G_XLPPI;
+ break;
+#endif
default:
ifmr->ifm_active |= IFM_UNKNOWN;
break;
@@ -1721,8 +1837,10 @@ ixl_local_timer(void *arg)
vsi->active_queues |= ((u64)1 << que->me);
}
if (que->busy >= IXL_MAX_TX_BUSY) {
+#ifdef IXL_DEBUG
device_printf(dev,"Warning queue %d "
"appears to be hung!\n", i);
+#endif
que->busy = IXL_QUEUE_HUNG;
++hung;
}
@@ -1751,20 +1869,29 @@ ixl_update_link_status(struct ixl_pf *pf)
struct i40e_hw *hw = &pf->hw;
struct ifnet *ifp = vsi->ifp;
device_t dev = pf->dev;
- enum i40e_fc_mode fc;
-
- if (vsi->link_up){
+ if (pf->link_up){
if (vsi->link_active == FALSE) {
- i40e_aq_get_link_info(hw, TRUE, NULL, NULL);
+ pf->fc = hw->fc.current_mode;
if (bootverbose) {
- fc = hw->fc.current_mode;
device_printf(dev,"Link is up %d Gbps %s,"
" Flow Control: %s\n",
- ((vsi->link_speed == I40E_LINK_SPEED_40GB)? 40:10),
- "Full Duplex", ixl_fc_string[fc]);
+ ((pf->link_speed ==
+ I40E_LINK_SPEED_40GB)? 40:10),
+ "Full Duplex", ixl_fc_string[pf->fc]);
}
vsi->link_active = TRUE;
+ /*
+ ** Warn user if link speed on NPAR enabled
+ ** partition is not at least 10GB
+ */
+ if (hw->func_caps.npar_enable &&
+ (hw->phy.link_info.link_speed ==
+ I40E_LINK_SPEED_1GB ||
+ hw->phy.link_info.link_speed ==
+ I40E_LINK_SPEED_100MB))
+ device_printf(dev, "The partition detected"
+ "link speed that is less than 10Gbps\n");
if_link_state_change(ifp, LINK_STATE_UP);
}
} else { /* Link down */
@@ -1795,7 +1922,10 @@ ixl_stop(struct ixl_pf *pf)
mtx_assert(&pf->pf_mtx, MA_OWNED);
INIT_DEBUGOUT("ixl_stop: begin\n");
- ixl_disable_intr(vsi);
+ if (pf->num_vfs == 0)
+ ixl_disable_intr(vsi);
+ else
+ ixl_disable_rings_intr(vsi);
ixl_disable_rings(vsi);
/* Tell the stack that the interface is no longer active */
@@ -1848,6 +1978,11 @@ ixl_assign_vsi_legacy(struct ixl_pf *pf)
taskqueue_start_threads(&que->tq, 1, PI_NET, "%s que",
device_get_nameunit(dev));
TASK_INIT(&pf->adminq, 0, ixl_do_adminq, pf);
+
+#ifdef PCI_IOV
+ TASK_INIT(&pf->vflr_task, 0, ixl_handle_vflr, pf);
+#endif
+
pf->tq = taskqueue_create_fast("ixl_adm", M_NOWAIT,
taskqueue_thread_enqueue, &pf->tq);
taskqueue_start_threads(&pf->tq, 1, PI_NET, "%s adminq",
@@ -1893,6 +2028,11 @@ ixl_assign_vsi_msix(struct ixl_pf *pf)
pf->admvec = vector;
/* Tasklet for Admin Queue */
TASK_INIT(&pf->adminq, 0, ixl_do_adminq, pf);
+
+#ifdef PCI_IOV
+ TASK_INIT(&pf->vflr_task, 0, ixl_handle_vflr, pf);
+#endif
+
pf->tq = taskqueue_create_fast("ixl_adm", M_NOWAIT,
taskqueue_thread_enqueue, &pf->tq);
taskqueue_start_threads(&pf->tq, 1, PI_NET, "%s adminq",
@@ -1901,6 +2041,7 @@ ixl_assign_vsi_msix(struct ixl_pf *pf)
/* Now set up the stations */
for (int i = 0; i < vsi->num_queues; i++, vector++, que++) {
+ int cpu_id = i;
rid = vector + 1;
txr = &que->txr;
que->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
@@ -1921,14 +2062,23 @@ ixl_assign_vsi_msix(struct ixl_pf *pf)
}
bus_describe_intr(dev, que->res, que->tag, "q%d", i);
/* Bind the vector to a CPU */
- bus_bind_intr(dev, que->res, i);
+#ifdef RSS
+ cpu_id = rss_getcpu(i % rss_getnumbuckets());
+#endif
+ bus_bind_intr(dev, que->res, cpu_id);
que->msix = vector;
TASK_INIT(&que->tx_task, 0, ixl_deferred_mq_start, que);
TASK_INIT(&que->task, 0, ixl_handle_que, que);
que->tq = taskqueue_create_fast("ixl_que", M_NOWAIT,
taskqueue_thread_enqueue, &que->tq);
- taskqueue_start_threads(&que->tq, 1, PI_NET, "%s que",
- device_get_nameunit(pf->dev));
+#ifdef RSS
+ taskqueue_start_threads_pinned(&que->tq, 1, PI_NET,
+ cpu_id, "%s (bucket %d)",
+ device_get_nameunit(dev), cpu_id);
+#else
+ taskqueue_start_threads(&que->tq, 1, PI_NET,
+ "%s que", device_get_nameunit(dev));
+#endif
}
return (0);
@@ -1995,6 +2145,12 @@ ixl_init_msix(struct ixl_pf *pf)
if ((ixl_max_queues != 0) && (ixl_max_queues <= queues))
queues = ixl_max_queues;
+#ifdef RSS
+ /* If we're doing RSS, clamp at the number of RSS buckets */
+ if (queues > rss_getnumbuckets())
+ queues = rss_getnumbuckets();
+#endif
+
/*
** Want one vector (RX/TX pair) per queue
** plus an additional for the admin queue.
@@ -2015,6 +2171,25 @@ ixl_init_msix(struct ixl_pf *pf)
"Using MSIX interrupts with %d vectors\n", vectors);
pf->msix = vectors;
pf->vsi.num_queues = queues;
+#ifdef RSS
+ /*
+ * If we're doing RSS, the number of queues needs to
+ * match the number of RSS buckets that are configured.
+ *
+ * + If there's more queues than RSS buckets, we'll end
+ * up with queues that get no traffic.
+ *
+ * + If there's more RSS buckets than queues, we'll end
+ * up having multiple RSS buckets map to the same queue,
+ * so there'll be some contention.
+ */
+ if (queues != rss_getnumbuckets()) {
+ device_printf(dev,
+ "%s: queues (%d) != RSS buckets (%d)"
+ "; performance will be impacted.\n",
+ __func__, queues, rss_getnumbuckets());
+ }
+#endif
return (vectors);
}
msi:
@@ -2281,10 +2456,16 @@ ixl_add_ifmedia(struct ixl_vsi *vsi, u32 phy_type)
if (phy_type & (1 << I40E_PHY_TYPE_1000BASE_T))
ifmedia_add(&vsi->media, IFM_ETHER | IFM_1000_T, 0, NULL);
+ if (phy_type & (1 << I40E_PHY_TYPE_1000BASE_SX))
+ ifmedia_add(&vsi->media, IFM_ETHER | IFM_1000_SX, 0, NULL);
+ if (phy_type & (1 << I40E_PHY_TYPE_1000BASE_LX))
+ ifmedia_add(&vsi->media, IFM_ETHER | IFM_1000_LX, 0, NULL);
- if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_CR1_CU) ||
+ if (phy_type & (1 << I40E_PHY_TYPE_XAUI) ||
+ phy_type & (1 << I40E_PHY_TYPE_XFI) ||
phy_type & (1 << I40E_PHY_TYPE_10GBASE_SFPP_CU))
ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_TWINAX, 0, NULL);
+
if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_SR))
ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_LR))
@@ -2292,13 +2473,59 @@ ixl_add_ifmedia(struct ixl_vsi *vsi, u32 phy_type)
if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_T))
ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_T, 0, NULL);
- if (phy_type & (1 << I40E_PHY_TYPE_40GBASE_CR4_CU) ||
- phy_type & (1 << I40E_PHY_TYPE_40GBASE_CR4))
+ if (phy_type & (1 << I40E_PHY_TYPE_40GBASE_CR4) ||
+ phy_type & (1 << I40E_PHY_TYPE_40GBASE_CR4_CU) ||
+ phy_type & (1 << I40E_PHY_TYPE_40GBASE_AOC) ||
+ phy_type & (1 << I40E_PHY_TYPE_XLAUI) ||
+ phy_type & (1 << I40E_PHY_TYPE_40GBASE_KR4))
ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_CR4, 0, NULL);
if (phy_type & (1 << I40E_PHY_TYPE_40GBASE_SR4))
ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_SR4, 0, NULL);
if (phy_type & (1 << I40E_PHY_TYPE_40GBASE_LR4))
ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_LR4, 0, NULL);
+
+#ifndef IFM_ETH_XTYPE
+ if (phy_type & (1 << I40E_PHY_TYPE_1000BASE_KX))
+ ifmedia_add(&vsi->media, IFM_ETHER | IFM_1000_CX, 0, NULL);
+
+ if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_CR1_CU) ||
+ phy_type & (1 << I40E_PHY_TYPE_10GBASE_CR1) ||
+ phy_type & (1 << I40E_PHY_TYPE_10GBASE_AOC) ||
+ phy_type & (1 << I40E_PHY_TYPE_SFI))
+ ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_TWINAX, 0, NULL);
+ if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_KX4))
+ ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_CX4, 0, NULL);
+ if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_KR))
+ ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
+
+ if (phy_type & (1 << I40E_PHY_TYPE_40GBASE_KR4))
+ ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_SR4, 0, NULL);
+ if (phy_type & (1 << I40E_PHY_TYPE_XLPPI))
+ ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_CR4, 0, NULL);
+#else
+ if (phy_type & (1 << I40E_PHY_TYPE_1000BASE_KX))
+ ifmedia_add(&vsi->media, IFM_ETHER | IFM_1000_KX, 0, NULL);
+
+ if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_CR1_CU)
+ || phy_type & (1 << I40E_PHY_TYPE_10GBASE_CR1))
+ ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_CR1, 0, NULL);
+ if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_AOC))
+ ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_TWINAX_LONG, 0, NULL);
+ if (phy_type & (1 << I40E_PHY_TYPE_SFI))
+ ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_SFI, 0, NULL);
+ if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_KX4))
+ ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_KX4, 0, NULL);
+ if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_KR))
+ ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_KR, 0, NULL);
+
+ if (phy_type & (1 << I40E_PHY_TYPE_20GBASE_KR2))
+ ifmedia_add(&vsi->media, IFM_ETHER | IFM_20G_KR2, 0, NULL);
+
+ if (phy_type & (1 << I40E_PHY_TYPE_40GBASE_KR4))
+ ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_KR4, 0, NULL);
+ if (phy_type & (1 << I40E_PHY_TYPE_XLPPI))
+ ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_XLPPI, 0, NULL);
+#endif
}
/*********************************************************************
@@ -2312,7 +2539,7 @@ ixl_setup_interface(device_t dev, struct ixl_vsi *vsi)
struct ifnet *ifp;
struct i40e_hw *hw = vsi->hw;
struct ixl_queue *que = vsi->queues;
- struct i40e_aq_get_phy_abilities_resp abilities_resp;
+ struct i40e_aq_get_phy_abilities_resp abilities;
enum i40e_status_code aq_error = 0;
INIT_DEBUGOUT("ixl_setup_interface: begin");
@@ -2379,20 +2606,25 @@ ixl_setup_interface(device_t dev, struct ixl_vsi *vsi)
ifmedia_init(&vsi->media, IFM_IMASK, ixl_media_change,
ixl_media_status);
- aq_error = i40e_aq_get_phy_capabilities(hw, FALSE, TRUE, &abilities_resp, NULL);
+ aq_error = i40e_aq_get_phy_capabilities(hw,
+ FALSE, TRUE, &abilities, NULL);
+ /* May need delay to detect fiber correctly */
if (aq_error == I40E_ERR_UNKNOWN_PHY) {
- /* Need delay to detect fiber correctly */
i40e_msec_delay(200);
- aq_error = i40e_aq_get_phy_capabilities(hw, FALSE, TRUE, &abilities_resp, NULL);
+ aq_error = i40e_aq_get_phy_capabilities(hw, FALSE,
+ TRUE, &abilities, NULL);
+ }
+ if (aq_error) {
if (aq_error == I40E_ERR_UNKNOWN_PHY)
device_printf(dev, "Unknown PHY type detected!\n");
else
- ixl_add_ifmedia(vsi, abilities_resp.phy_type);
- } else if (aq_error) {
- device_printf(dev, "Error getting supported media types, err %d,"
- " AQ error %d\n", aq_error, hw->aq.asq_last_status);
- } else
- ixl_add_ifmedia(vsi, abilities_resp.phy_type);
+ device_printf(dev,
+ "Error getting supported media types, err %d,"
+ " AQ error %d\n", aq_error, hw->aq.asq_last_status);
+ return (0);
+ }
+
+ ixl_add_ifmedia(vsi, abilities.phy_type);
/* Use autoselect media by default */
ifmedia_add(&vsi->media, IFM_ETHER | IFM_AUTO, 0, NULL);
@@ -2403,61 +2635,107 @@ ixl_setup_interface(device_t dev, struct ixl_vsi *vsi)
return (0);
}
-static bool
-ixl_config_link(struct i40e_hw *hw)
+/*
+** Run when the Admin Queue gets a
+** link transition interrupt.
+*/
+static void
+ixl_link_event(struct ixl_pf *pf, struct i40e_arq_event_info *e)
{
+ struct i40e_hw *hw = &pf->hw;
+ struct i40e_aqc_get_link_status *status =
+ (struct i40e_aqc_get_link_status *)&e->desc.params.raw;
bool check;
- i40e_aq_get_link_info(hw, TRUE, NULL, NULL);
- check = i40e_get_link_status(hw);
+ hw->phy.get_link_info = TRUE;
+ i40e_get_link_status(hw, &check);
+ pf->link_up = check;
#ifdef IXL_DEBUG
printf("Link is %s\n", check ? "up":"down");
#endif
- return (check);
+ /* Report if Unqualified modules are found */
+ if ((status->link_info & I40E_AQ_MEDIA_AVAILABLE) &&
+ (!(status->an_info & I40E_AQ_QUALIFIED_MODULE)) &&
+ (!(status->link_info & I40E_AQ_LINK_UP)))
+ device_printf(pf->dev, "Link failed because "
+ "an unqualified module was detected\n");
+
+ return;
}
/*********************************************************************
*
- * Initialize this VSI
+ * Get Firmware Switch configuration
+ * - this will need to be more robust when more complex
+ * switch configurations are enabled.
*
**********************************************************************/
static int
-ixl_setup_vsi(struct ixl_vsi *vsi)
+ixl_switch_config(struct ixl_pf *pf)
{
- struct i40e_hw *hw = vsi->hw;
+ struct i40e_hw *hw = &pf->hw;
+ struct ixl_vsi *vsi = &pf->vsi;
device_t dev = vsi->dev;
struct i40e_aqc_get_switch_config_resp *sw_config;
- struct i40e_vsi_context ctxt;
u8 aq_buf[I40E_AQ_LARGE_BUF];
- int ret = I40E_SUCCESS;
+ int ret;
u16 next = 0;
+ memset(&aq_buf, 0, sizeof(aq_buf));
sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
ret = i40e_aq_get_switch_config(hw, sw_config,
sizeof(aq_buf), &next, NULL);
if (ret) {
- device_printf(dev,"aq_get_switch_config failed!!\n");
+ device_printf(dev,"aq_get_switch_config failed (ret=%d)!!\n",
+ ret);
return (ret);
}
#ifdef IXL_DEBUG
- printf("Switch config: header reported: %d in structure, %d total\n",
+ device_printf(dev,
+ "Switch config: header reported: %d in structure, %d total\n",
sw_config->header.num_reported, sw_config->header.num_total);
- printf("type=%d seid=%d uplink=%d downlink=%d\n",
- sw_config->element[0].element_type,
- sw_config->element[0].seid,
- sw_config->element[0].uplink_seid,
- sw_config->element[0].downlink_seid);
+ for (int i = 0; i < sw_config->header.num_reported; i++) {
+ device_printf(dev,
+ "%d: type=%d seid=%d uplink=%d downlink=%d\n", i,
+ sw_config->element[i].element_type,
+ sw_config->element[i].seid,
+ sw_config->element[i].uplink_seid,
+ sw_config->element[i].downlink_seid);
+ }
#endif
- /* Save off this important value */
+ /* Simplified due to a single VSI at the moment */
+ vsi->uplink_seid = sw_config->element[0].uplink_seid;
+ vsi->downlink_seid = sw_config->element[0].downlink_seid;
vsi->seid = sw_config->element[0].seid;
+ return (ret);
+}
+
+/*********************************************************************
+ *
+ * Initialize the VSI: this handles contexts, which means things
+ * like the number of descriptors, buffer size,
+ * plus we init the rings thru this function.
+ *
+ **********************************************************************/
+static int
+ixl_initialize_vsi(struct ixl_vsi *vsi)
+{
+ struct ixl_pf *pf = vsi->back;
+ struct ixl_queue *que = vsi->queues;
+ device_t dev = vsi->dev;
+ struct i40e_hw *hw = vsi->hw;
+ struct i40e_vsi_context ctxt;
+ int err = 0;
memset(&ctxt, 0, sizeof(ctxt));
ctxt.seid = vsi->seid;
+ if (pf->veb_seid != 0)
+ ctxt.uplink_seid = pf->veb_seid;
ctxt.pf_num = hw->pf_id;
- ret = i40e_aq_get_vsi_params(hw, &ctxt, NULL);
- if (ret) {
- device_printf(dev,"get vsi params failed %x!!\n", ret);
- return (ret);
+ err = i40e_aq_get_vsi_params(hw, &ctxt, NULL);
+ if (err) {
+ device_printf(dev,"get vsi params failed %x!!\n", err);
+ return (err);
}
#ifdef IXL_DEBUG
printf("get_vsi_params: seid: %d, uplinkseid: %d, vsi_number: %d, "
@@ -2494,29 +2772,14 @@ ixl_setup_vsi(struct ixl_vsi *vsi)
vsi->hw_filters_add = 0;
vsi->hw_filters_del = 0;
- ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
- if (ret)
+ ctxt.flags = htole16(I40E_AQ_VSI_TYPE_PF);
+
+ err = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
+ if (err) {
device_printf(dev,"update vsi params failed %x!!\n",
hw->aq.asq_last_status);
- return (ret);
-}
-
-
-/*********************************************************************
- *
- * Initialize the VSI: this handles contexts, which means things
- * like the number of descriptors, buffer size,
- * plus we init the rings thru this function.
- *
- **********************************************************************/
-static int
-ixl_initialize_vsi(struct ixl_vsi *vsi)
-{
- struct ixl_queue *que = vsi->queues;
- device_t dev = vsi->dev;
- struct i40e_hw *hw = vsi->hw;
- int err = 0;
-
+ return (err);
+ }
for (int i = 0; i < vsi->num_queues; i++, que++) {
struct tx_ring *txr = &que->txr;
@@ -2531,7 +2794,7 @@ ixl_initialize_vsi(struct ixl_vsi *vsi)
size = que->num_desc * sizeof(struct i40e_tx_desc);
memset(&tctx, 0, sizeof(struct i40e_hmc_obj_txq));
tctx.new_context = 1;
- tctx.base = (txr->dma.pa/128);
+ tctx.base = (txr->dma.pa/IXL_TX_CTX_BASE_UNITS);
tctx.qlen = que->num_desc;
tctx.fc_ena = 0;
tctx.rdylist = vsi->info.qs_handle[0]; /* index is TC */
@@ -2561,7 +2824,7 @@ ixl_initialize_vsi(struct ixl_vsi *vsi)
ixl_init_tx_ring(que);
/* Next setup the HMC RX Context */
- if (vsi->max_frame_size <= 2048)
+ if (vsi->max_frame_size <= MCLBYTES)
rxr->mbuf_sz = MCLBYTES;
else
rxr->mbuf_sz = MJUMPAGESIZE;
@@ -2578,7 +2841,7 @@ ixl_initialize_vsi(struct ixl_vsi *vsi)
rctx.dtype = 0;
rctx.dsize = 1; /* do 32byte descriptors */
rctx.hsplit_0 = 0; /* no HDR split initially */
- rctx.base = (rxr->dma.pa/128);
+ rctx.base = (rxr->dma.pa/IXL_RX_CTX_BASE_UNITS);
rctx.qlen = que->num_desc;
rctx.tphrdesc_ena = 1;
rctx.tphwdesc_ena = 1;
@@ -2608,6 +2871,15 @@ ixl_initialize_vsi(struct ixl_vsi *vsi)
break;
}
wr32(vsi->hw, I40E_QRX_TAIL(que->me), 0);
+#ifdef DEV_NETMAP
+ /* preserve queue */
+ if (vsi->ifp->if_capenable & IFCAP_NETMAP) {
+ struct netmap_adapter *na = NA(vsi->ifp);
+ struct netmap_kring *kring = &na->rx_rings[i];
+ int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
+ wr32(vsi->hw, I40E_QRX_TAIL(que->me), t);
+ } else
+#endif /* DEV_NETMAP */
wr32(vsi->hw, I40E_QRX_TAIL(que->me), que->num_desc - 1);
}
return (err);
@@ -2624,7 +2896,6 @@ ixl_free_vsi(struct ixl_vsi *vsi)
{
struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
struct ixl_queue *que = vsi->queues;
- struct ixl_mac_filter *f;
/* Free station queues */
for (int i = 0; i < vsi->num_queues; i++, que++) {
@@ -2653,6 +2924,14 @@ ixl_free_vsi(struct ixl_vsi *vsi)
free(vsi->queues, M_DEVBUF);
/* Free VSI filter list */
+ ixl_free_mac_filters(vsi);
+}
+
+static void
+ixl_free_mac_filters(struct ixl_vsi *vsi)
+{
+ struct ixl_mac_filter *f;
+
while (!SLIST_EMPTY(&vsi->ftl)) {
f = SLIST_FIRST(&vsi->ftl);
SLIST_REMOVE_HEAD(&vsi->ftl, next);
@@ -2684,6 +2963,7 @@ ixl_setup_stations(struct ixl_pf *pf)
vsi->hw = &pf->hw;
vsi->id = 0;
vsi->num_vlans = 0;
+ vsi->back = pf;
/* Get memory for the station queues */
if (!(vsi->queues =
@@ -2936,6 +3216,24 @@ ixl_set_queue_tx_itr(struct ixl_queue *que)
return;
}
+#define QUEUE_NAME_LEN 32
+
+static void
+ixl_add_vsi_sysctls(struct ixl_pf *pf, struct ixl_vsi *vsi,
+ struct sysctl_ctx_list *ctx, const char *sysctl_name)
+{
+ struct sysctl_oid *tree;
+ struct sysctl_oid_list *child;
+ struct sysctl_oid_list *vsi_list;
+
+ tree = device_get_sysctl_tree(pf->dev);
+ child = SYSCTL_CHILDREN(tree);
+ vsi->vsi_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, sysctl_name,
+ CTLFLAG_RD, NULL, "VSI Number");
+ vsi_list = SYSCTL_CHILDREN(vsi->vsi_node);
+
+ ixl_add_sysctls_eth_stats(ctx, vsi_list, &vsi->eth_stats);
+}
static void
ixl_add_hw_stats(struct ixl_pf *pf)
@@ -2943,18 +3241,19 @@ ixl_add_hw_stats(struct ixl_pf *pf)
device_t dev = pf->dev;
struct ixl_vsi *vsi = &pf->vsi;
struct ixl_queue *queues = vsi->queues;
- struct i40e_eth_stats *vsi_stats = &vsi->eth_stats;
struct i40e_hw_port_stats *pf_stats = &pf->stats;
struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
struct sysctl_oid *tree = device_get_sysctl_tree(dev);
struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
+ struct sysctl_oid_list *vsi_list;
- struct sysctl_oid *vsi_node, *queue_node;
- struct sysctl_oid_list *vsi_list, *queue_list;
+ struct sysctl_oid *queue_node;
+ struct sysctl_oid_list *queue_list;
struct tx_ring *txr;
struct rx_ring *rxr;
+ char queue_namebuf[QUEUE_NAME_LEN];
/* Driver statistics */
SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events",
@@ -2964,23 +3263,14 @@ ixl_add_hw_stats(struct ixl_pf *pf)
CTLFLAG_RD, &pf->admin_irq,
"Admin Queue IRQ Handled");
- /* VSI statistics */
-#define QUEUE_NAME_LEN 32
- char queue_namebuf[QUEUE_NAME_LEN];
-
- // ERJ: Only one vsi now, re-do when >1 VSI enabled
- // snprintf(vsi_namebuf, QUEUE_NAME_LEN, "vsi%d", vsi->info.stat_counter_idx);
- vsi_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "vsi",
- CTLFLAG_RD, NULL, "VSI-specific stats");
- vsi_list = SYSCTL_CHILDREN(vsi_node);
-
- ixl_add_sysctls_eth_stats(ctx, vsi_list, vsi_stats);
+ ixl_add_vsi_sysctls(pf, &pf->vsi, ctx, "pf");
+ vsi_list = SYSCTL_CHILDREN(pf->vsi.vsi_node);
/* Queue statistics */
for (int q = 0; q < vsi->num_queues; q++) {
snprintf(queue_namebuf, QUEUE_NAME_LEN, "que%d", q);
- queue_node = SYSCTL_ADD_NODE(ctx, vsi_list, OID_AUTO, queue_namebuf,
- CTLFLAG_RD, NULL, "Queue #");
+ queue_node = SYSCTL_ADD_NODE(ctx, vsi_list,
+ OID_AUTO, queue_namebuf, CTLFLAG_RD, NULL, "Queue #");
queue_list = SYSCTL_CHILDREN(queue_node);
txr = &(queues[q].txr);
@@ -3043,7 +3333,6 @@ ixl_add_sysctls_eth_stats(struct sysctl_ctx_list *ctx,
"Multicast Packets Transmitted"},
{&eth_stats->tx_broadcast, "bcast_pkts_txd",
"Broadcast Packets Transmitted"},
- {&eth_stats->tx_discards, "tx_discards", "Discarded TX packets"},
// end
{0,0,0}
};
@@ -3117,6 +3406,7 @@ ixl_add_sysctls_mac_stats(struct sysctl_ctx_list *ctx,
}
}
+
/*
** ixl_config_rss - setup RSS
** - note this is done for the single vsi
@@ -3126,19 +3416,45 @@ static void ixl_config_rss(struct ixl_vsi *vsi)
struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
struct i40e_hw *hw = vsi->hw;
u32 lut = 0;
- u64 set_hena, hena;
- int i, j;
+ u64 set_hena = 0, hena;
+ int i, j, que_id;
+#ifdef RSS
+ u32 rss_hash_config;
+ u32 rss_seed[IXL_KEYSZ];
+#else
+ u32 rss_seed[IXL_KEYSZ] = {0x41b01687,
+ 0x183cfd8c, 0xce880440, 0x580cbc3c,
+ 0x35897377, 0x328b25e1, 0x4fa98922,
+ 0xb7d90c14, 0xd5bad70d, 0xcd15a2c1};
+#endif
- static const u32 seed[I40E_PFQF_HKEY_MAX_INDEX + 1] = {0x41b01687,
- 0x183cfd8c, 0xce880440, 0x580cbc3c, 0x35897377,
- 0x328b25e1, 0x4fa98922, 0xb7d90c14, 0xd5bad70d,
- 0xcd15a2c1, 0xe8580225, 0x4a1e9d11, 0xfe5731be};
+#ifdef RSS
+ /* Fetch the configured RSS key */
+ rss_getkey((uint8_t *) &rss_seed);
+#endif
/* Fill out hash function seed */
- for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
- wr32(hw, I40E_PFQF_HKEY(i), seed[i]);
+ for (i = 0; i < IXL_KEYSZ; i++)
+ wr32(hw, I40E_PFQF_HKEY(i), rss_seed[i]);
/* Enable PCTYPES for RSS: */
+#ifdef RSS
+ rss_hash_config = rss_gethashconfig();
+ if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
+ set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER);
+ if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
+ set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
+ if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
+ set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP);
+ if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
+ set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER);
+ if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
+ set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6);
+ if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
+ set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
+ if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
+ set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP);
+#else
set_hena =
((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP) |
@@ -3151,7 +3467,7 @@ static void ixl_config_rss(struct ixl_vsi *vsi)
((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) |
((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6) |
((u64)1 << I40E_FILTER_PCTYPE_L2_PAYLOAD);
-
+#endif
hena = (u64)rd32(hw, I40E_PFQF_HENA(0)) |
((u64)rd32(hw, I40E_PFQF_HENA(1)) << 32);
hena |= set_hena;
@@ -3162,8 +3478,19 @@ static void ixl_config_rss(struct ixl_vsi *vsi)
for (i = j = 0; i < pf->hw.func_caps.rss_table_size; i++, j++) {
if (j == vsi->num_queues)
j = 0;
+#ifdef RSS
+ /*
+ * Fetch the RSS bucket id for the given indirection entry.
+ * Cap it at the number of configured buckets (which is
+ * num_queues.)
+ */
+ que_id = rss_get_indirection_to_bucket(i);
+ que_id = que_id % vsi->num_queues;
+#else
+ que_id = j;
+#endif
/* lut = 4-byte sliding window of 4 lut entries */
- lut = (lut << 8) | (j &
+ lut = (lut << 8) | (que_id &
((0x1 << pf->hw.func_caps.rss_table_entry_width) - 1));
/* On i = 3, we have 4 entries in lut; write to the register */
if ((i & 3) == 3)
@@ -3267,8 +3594,7 @@ static void
ixl_init_filters(struct ixl_vsi *vsi)
{
/* Add broadcast address */
- u8 bc[6] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
- ixl_add_filter(vsi, bc, IXL_VLAN_ANY);
+ ixl_add_filter(vsi, ixl_bcast_addr, IXL_VLAN_ANY);
}
/*
@@ -3297,6 +3623,13 @@ ixl_add_mc_filter(struct ixl_vsi *vsi, u8 *macaddr)
return;
}
+static void
+ixl_reconfigure_filters(struct ixl_vsi *vsi)
+{
+
+ ixl_add_hw_filters(vsi, IXL_FILTER_USED, vsi->num_macs);
+}
+
/*
** This routine adds macvlan filters
*/
@@ -3304,10 +3637,14 @@ static void
ixl_add_filter(struct ixl_vsi *vsi, u8 *macaddr, s16 vlan)
{
struct ixl_mac_filter *f, *tmp;
- device_t dev = vsi->dev;
+ struct ixl_pf *pf;
+ device_t dev;
DEBUGOUT("ixl_add_filter: begin");
+ pf = vsi->back;
+ dev = pf->dev;
+
/* Does one already exist */
f = ixl_find_filter(vsi, macaddr, vlan);
if (f != NULL)
@@ -3335,6 +3672,8 @@ ixl_add_filter(struct ixl_vsi *vsi, u8 *macaddr, s16 vlan)
f->flags |= (IXL_FILTER_ADD | IXL_FILTER_USED);
if (f->vlan != IXL_VLAN_ANY)
f->flags |= IXL_FILTER_VLAN;
+ else
+ vsi->num_macs++;
ixl_add_hw_filters(vsi, f->flags, 1);
return;
@@ -3351,6 +3690,7 @@ ixl_del_filter(struct ixl_vsi *vsi, u8 *macaddr, s16 vlan)
f->flags |= IXL_FILTER_DEL;
ixl_del_hw_filters(vsi, 1);
+ vsi->num_macs--;
/* Check if this is the last vlan removal */
if (vlan != IXL_VLAN_ANY && vsi->num_vlans == 0) {
@@ -3394,14 +3734,20 @@ ixl_add_hw_filters(struct ixl_vsi *vsi, int flags, int cnt)
{
struct i40e_aqc_add_macvlan_element_data *a, *b;
struct ixl_mac_filter *f;
- struct i40e_hw *hw = vsi->hw;
- device_t dev = vsi->dev;
- int err, j = 0;
+ struct ixl_pf *pf;
+ struct i40e_hw *hw;
+ device_t dev;
+ int err, j = 0;
+
+ pf = vsi->back;
+ dev = pf->dev;
+ hw = &pf->hw;
+ IXL_PF_LOCK_ASSERT(pf);
a = malloc(sizeof(struct i40e_aqc_add_macvlan_element_data) * cnt,
M_DEVBUF, M_NOWAIT | M_ZERO);
if (a == NULL) {
- device_printf(dev, "add hw filter failed to get memory\n");
+ device_printf(dev, "add_hw_filters failed to get memory\n");
return;
}
@@ -3414,9 +3760,14 @@ ixl_add_hw_filters(struct ixl_vsi *vsi, int flags, int cnt)
if (f->flags == flags) {
b = &a[j]; // a pox on fvl long names :)
bcopy(f->macaddr, b->mac_addr, ETHER_ADDR_LEN);
- b->vlan_tag =
- (f->vlan == IXL_VLAN_ANY ? 0 : f->vlan);
- b->flags = I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
+ if (f->vlan == IXL_VLAN_ANY) {
+ b->vlan_tag = 0;
+ b->flags = I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
+ } else {
+ b->vlan_tag = f->vlan;
+ b->flags = 0;
+ }
+ b->flags |= I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
f->flags &= ~IXL_FILTER_ADD;
j++;
}
@@ -3426,8 +3777,8 @@ ixl_add_hw_filters(struct ixl_vsi *vsi, int flags, int cnt)
if (j > 0) {
err = i40e_aq_add_macvlan(hw, vsi->seid, a, j, NULL);
if (err)
- device_printf(dev, "aq_add_macvlan failure %d\n",
- hw->aq.asq_last_status);
+ device_printf(dev, "aq_add_macvlan err %d, "
+ "aq_error %d\n", err, hw->aq.asq_last_status);
else
vsi->hw_filters_add += j;
}
@@ -3444,13 +3795,18 @@ static void
ixl_del_hw_filters(struct ixl_vsi *vsi, int cnt)
{
struct i40e_aqc_remove_macvlan_element_data *d, *e;
- struct i40e_hw *hw = vsi->hw;
- device_t dev = vsi->dev;
+ struct ixl_pf *pf;
+ struct i40e_hw *hw;
+ device_t dev;
struct ixl_mac_filter *f, *f_temp;
int err, j = 0;
DEBUGOUT("ixl_del_hw_filters: begin\n");
+ pf = vsi->back;
+ hw = &pf->hw;
+ dev = pf->dev;
+
d = malloc(sizeof(struct i40e_aqc_remove_macvlan_element_data) * cnt,
M_DEVBUF, M_NOWAIT | M_ZERO);
if (d == NULL) {
@@ -3476,6 +3832,7 @@ ixl_del_hw_filters(struct ixl_vsi *vsi, int cnt)
err = i40e_aq_remove_macvlan(hw, vsi->seid, d, j, NULL);
/* NOTE: returns ENOENT every time but seems to work fine,
so we'll ignore that specific error. */
+ // TODO: Does this still occur on current firmwares?
if (err && hw->aq.asq_last_status != I40E_AQ_RC_ENOENT) {
int sc = 0;
for (int i = 0; i < j; i++)
@@ -3493,82 +3850,106 @@ ixl_del_hw_filters(struct ixl_vsi *vsi, int cnt)
return;
}
-
-static void
+static int
ixl_enable_rings(struct ixl_vsi *vsi)
{
- struct i40e_hw *hw = vsi->hw;
+ struct ixl_pf *pf = vsi->back;
+ struct i40e_hw *hw = &pf->hw;
+ int index, error;
u32 reg;
+ error = 0;
for (int i = 0; i < vsi->num_queues; i++) {
- i40e_pre_tx_queue_cfg(hw, i, TRUE);
+ index = vsi->first_queue + i;
+ i40e_pre_tx_queue_cfg(hw, index, TRUE);
- reg = rd32(hw, I40E_QTX_ENA(i));
+ reg = rd32(hw, I40E_QTX_ENA(index));
reg |= I40E_QTX_ENA_QENA_REQ_MASK |
I40E_QTX_ENA_QENA_STAT_MASK;
- wr32(hw, I40E_QTX_ENA(i), reg);
+ wr32(hw, I40E_QTX_ENA(index), reg);
/* Verify the enable took */
for (int j = 0; j < 10; j++) {
- reg = rd32(hw, I40E_QTX_ENA(i));
+ reg = rd32(hw, I40E_QTX_ENA(index));
if (reg & I40E_QTX_ENA_QENA_STAT_MASK)
break;
i40e_msec_delay(10);
}
- if ((reg & I40E_QTX_ENA_QENA_STAT_MASK) == 0)
- printf("TX queue %d disabled!\n", i);
+ if ((reg & I40E_QTX_ENA_QENA_STAT_MASK) == 0) {
+ device_printf(pf->dev, "TX queue %d disabled!\n",
+ index);
+ error = ETIMEDOUT;
+ }
- reg = rd32(hw, I40E_QRX_ENA(i));
+ reg = rd32(hw, I40E_QRX_ENA(index));
reg |= I40E_QRX_ENA_QENA_REQ_MASK |
I40E_QRX_ENA_QENA_STAT_MASK;
- wr32(hw, I40E_QRX_ENA(i), reg);
+ wr32(hw, I40E_QRX_ENA(index), reg);
/* Verify the enable took */
for (int j = 0; j < 10; j++) {
- reg = rd32(hw, I40E_QRX_ENA(i));
+ reg = rd32(hw, I40E_QRX_ENA(index));
if (reg & I40E_QRX_ENA_QENA_STAT_MASK)
break;
i40e_msec_delay(10);
}
- if ((reg & I40E_QRX_ENA_QENA_STAT_MASK) == 0)
- printf("RX queue %d disabled!\n", i);
+ if ((reg & I40E_QRX_ENA_QENA_STAT_MASK) == 0) {
+ device_printf(pf->dev, "RX queue %d disabled!\n",
+ index);
+ error = ETIMEDOUT;
+ }
}
+
+ return (error);
}
-static void
+static int
ixl_disable_rings(struct ixl_vsi *vsi)
{
- struct i40e_hw *hw = vsi->hw;
+ struct ixl_pf *pf = vsi->back;
+ struct i40e_hw *hw = &pf->hw;
+ int index, error;
u32 reg;
+ error = 0;
for (int i = 0; i < vsi->num_queues; i++) {
- i40e_pre_tx_queue_cfg(hw, i, FALSE);
+ index = vsi->first_queue + i;
+
+ i40e_pre_tx_queue_cfg(hw, index, FALSE);
i40e_usec_delay(500);
- reg = rd32(hw, I40E_QTX_ENA(i));
+ reg = rd32(hw, I40E_QTX_ENA(index));
reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
- wr32(hw, I40E_QTX_ENA(i), reg);
+ wr32(hw, I40E_QTX_ENA(index), reg);
/* Verify the disable took */
for (int j = 0; j < 10; j++) {
- reg = rd32(hw, I40E_QTX_ENA(i));
+ reg = rd32(hw, I40E_QTX_ENA(index));
if (!(reg & I40E_QTX_ENA_QENA_STAT_MASK))
break;
i40e_msec_delay(10);
}
- if (reg & I40E_QTX_ENA_QENA_STAT_MASK)
- printf("TX queue %d still enabled!\n", i);
+ if (reg & I40E_QTX_ENA_QENA_STAT_MASK) {
+ device_printf(pf->dev, "TX queue %d still enabled!\n",
+ index);
+ error = ETIMEDOUT;
+ }
- reg = rd32(hw, I40E_QRX_ENA(i));
+ reg = rd32(hw, I40E_QRX_ENA(index));
reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
- wr32(hw, I40E_QRX_ENA(i), reg);
+ wr32(hw, I40E_QRX_ENA(index), reg);
/* Verify the disable took */
for (int j = 0; j < 10; j++) {
- reg = rd32(hw, I40E_QRX_ENA(i));
+ reg = rd32(hw, I40E_QRX_ENA(index));
if (!(reg & I40E_QRX_ENA_QENA_STAT_MASK))
break;
i40e_msec_delay(10);
}
- if (reg & I40E_QRX_ENA_QENA_STAT_MASK)
- printf("RX queue %d still enabled!\n", i);
+ if (reg & I40E_QRX_ENA_QENA_STAT_MASK) {
+ device_printf(pf->dev, "RX queue %d still enabled!\n",
+ index);
+ error = ETIMEDOUT;
+ }
}
+
+ return (error);
}
/**
@@ -3658,16 +4039,23 @@ ixl_enable_intr(struct ixl_vsi *vsi)
}
static void
-ixl_disable_intr(struct ixl_vsi *vsi)
+ixl_disable_rings_intr(struct ixl_vsi *vsi)
{
struct i40e_hw *hw = vsi->hw;
struct ixl_queue *que = vsi->queues;
- if (ixl_enable_msix) {
+ for (int i = 0; i < vsi->num_queues; i++, que++)
+ ixl_disable_queue(hw, que->me);
+}
+
+static void
+ixl_disable_intr(struct ixl_vsi *vsi)
+{
+ struct i40e_hw *hw = vsi->hw;
+
+ if (ixl_enable_msix)
ixl_disable_adminq(hw);
- for (int i = 0; i < vsi->num_queues; i++, que++)
- ixl_disable_queue(hw, que->me);
- } else
+ else
ixl_disable_legacy(hw);
}
@@ -3742,7 +4130,8 @@ static void
ixl_update_stats_counters(struct ixl_pf *pf)
{
struct i40e_hw *hw = &pf->hw;
- struct ixl_vsi *vsi = &pf->vsi;
+ struct ixl_vsi *vsi = &pf->vsi;
+ struct ixl_vf *vf;
struct i40e_hw_port_stats *nsd = &pf->stats;
struct i40e_hw_port_stats *osd = &pf->stats_offsets;
@@ -3828,29 +4217,6 @@ ixl_update_stats_counters(struct ixl_pf *pf)
pf->stat_offsets_loaded,
&osd->link_xoff_tx, &nsd->link_xoff_tx);
- /* Priority flow control stats */
-#if 0
- for (int i = 0; i < 8; i++) {
- ixl_stat_update32(hw, I40E_GLPRT_PXONRXC(hw->port, i),
- pf->stat_offsets_loaded,
- &osd->priority_xon_rx[i],
- &nsd->priority_xon_rx[i]);
- ixl_stat_update32(hw, I40E_GLPRT_PXONTXC(hw->port, i),
- pf->stat_offsets_loaded,
- &osd->priority_xon_tx[i],
- &nsd->priority_xon_tx[i]);
- ixl_stat_update32(hw, I40E_GLPRT_PXOFFTXC(hw->port, i),
- pf->stat_offsets_loaded,
- &osd->priority_xoff_tx[i],
- &nsd->priority_xoff_tx[i]);
- ixl_stat_update32(hw,
- I40E_GLPRT_RXON2OFFCNT(hw->port, i),
- pf->stat_offsets_loaded,
- &osd->priority_xon_2_xoff[i],
- &nsd->priority_xon_2_xoff[i]);
- }
-#endif
-
/* Packet size stats rx */
ixl_stat_update48(hw, I40E_GLPRT_PRC64H(hw->port),
I40E_GLPRT_PRC64L(hw->port),
@@ -3927,11 +4293,13 @@ ixl_update_stats_counters(struct ixl_pf *pf)
/* End hw stats */
/* Update vsi stats */
- ixl_update_eth_stats(vsi);
+ ixl_update_vsi_stats(vsi);
- /* OS statistics */
- // ERJ - these are per-port, update all vsis?
- IXL_SET_IERRORS(vsi, nsd->crc_errors + nsd->illegal_bytes);
+ for (int i = 0; i < pf->num_vfs; i++) {
+ vf = &pf->vfs[i];
+ if (vf->vf_flags & VF_FLAG_ENABLED)
+ ixl_update_eth_stats(&pf->vfs[i].vsi);
+ }
}
/*
@@ -3957,6 +4325,7 @@ ixl_do_adminq(void *context, int pending)
return;
}
+ IXL_PF_LOCK(pf);
/* clean and process any events */
do {
ret = i40e_clean_arq_element(hw, &event, &result);
@@ -3965,11 +4334,13 @@ ixl_do_adminq(void *context, int pending)
opcode = LE16_TO_CPU(event.desc.opcode);
switch (opcode) {
case i40e_aqc_opc_get_link_status:
- vsi->link_up = ixl_config_link(hw);
+ ixl_link_event(pf, &event);
ixl_update_link_status(pf);
break;
case i40e_aqc_opc_send_msg_to_pf:
- /* process pf/vf communication here */
+#ifdef PCI_IOV
+ ixl_handle_vf_msg(pf, &event);
+#endif
break;
case i40e_aqc_opc_event_lan_overflow:
break;
@@ -3987,12 +4358,19 @@ ixl_do_adminq(void *context, int pending)
wr32(hw, I40E_PFINT_ICR0_ENA, reg);
free(event.msg_buf, M_DEVBUF);
- if (pf->msix > 1)
- ixl_enable_adminq(&pf->hw);
+ /*
+ * If there are still messages to process, reschedule ourselves.
+ * Otherwise, re-enable our interrupt and go to sleep.
+ */
+ if (result > 0)
+ taskqueue_enqueue(pf->tq, &pf->adminq);
else
ixl_enable_intr(vsi);
+
+ IXL_PF_UNLOCK(pf);
}
+#ifdef IXL_DEBUG_SYSCTL
static int
ixl_debug_info(SYSCTL_HANDLER_ARGS)
{
@@ -4057,6 +4435,7 @@ ixl_print_debug_info(struct ixl_pf *pf)
reg = rd32(hw, I40E_GLPRT_MLFC(hw->port));
printf("mac local fault = %x\n", reg);
}
+#endif
/**
* Update VSI-specific ethernet statistics counters.
@@ -4067,8 +4446,6 @@ void ixl_update_eth_stats(struct ixl_vsi *vsi)
struct i40e_hw *hw = &pf->hw;
struct i40e_eth_stats *es;
struct i40e_eth_stats *oes;
- int i;
- uint64_t tx_discards;
struct i40e_hw_port_stats *nsd;
u16 stat_idx = vsi->info.stat_counter_idx;
@@ -4118,9 +4495,27 @@ void ixl_update_eth_stats(struct ixl_vsi *vsi)
vsi->stat_offsets_loaded,
&oes->tx_broadcast, &es->tx_broadcast);
vsi->stat_offsets_loaded = true;
+}
+
+static void
+ixl_update_vsi_stats(struct ixl_vsi *vsi)
+{
+ struct ixl_pf *pf;
+ struct ifnet *ifp;
+ struct i40e_eth_stats *es;
+ u64 tx_discards;
+
+ struct i40e_hw_port_stats *nsd;
+
+ pf = vsi->back;
+ ifp = vsi->ifp;
+ es = &vsi->eth_stats;
+ nsd = &pf->stats;
+
+ ixl_update_eth_stats(vsi);
tx_discards = es->tx_discards + nsd->tx_dropped_link_down;
- for (i = 0; i < vsi->num_queues; i++)
+ for (int i = 0; i < vsi->num_queues; i++)
tx_discards += vsi->queues[i].txr.br->br_drops;
/* Update ifnet stats */
@@ -4135,6 +4530,9 @@ void ixl_update_eth_stats(struct ixl_vsi *vsi)
IXL_SET_IMCASTS(vsi, es->rx_multicast);
IXL_SET_OMCASTS(vsi, es->tx_multicast);
+ IXL_SET_IERRORS(vsi, nsd->crc_errors + nsd->illegal_bytes +
+ nsd->rx_undersize + nsd->rx_oversize + nsd->rx_fragments +
+ nsd->rx_jabber);
IXL_SET_OERRORS(vsi, es->tx_errors);
IXL_SET_IQDROPS(vsi, es->rx_discards + nsd->eth.rx_discards);
IXL_SET_OQDROPS(vsi, tx_discards);
@@ -4234,24 +4632,15 @@ ixl_set_flowcntl(SYSCTL_HANDLER_ARGS)
struct ixl_pf *pf = (struct ixl_pf *)arg1;
struct i40e_hw *hw = &pf->hw;
device_t dev = pf->dev;
- int requested_fc = 0, error = 0;
+ int error = 0;
enum i40e_status_code aq_error = 0;
u8 fc_aq_err = 0;
- aq_error = i40e_aq_get_link_info(hw, TRUE, NULL, NULL);
- if (aq_error) {
- device_printf(dev,
- "%s: Error retrieving link info from aq, %d\n",
- __func__, aq_error);
- return (EAGAIN);
- }
-
- /* Read in new mode */
- requested_fc = hw->fc.current_mode;
- error = sysctl_handle_int(oidp, &requested_fc, 0, req);
+ /* Get request */
+ error = sysctl_handle_int(oidp, &pf->fc, 0, req);
if ((error) || (req->newptr == NULL))
return (error);
- if (requested_fc < 0 || requested_fc > 3) {
+ if (pf->fc < 0 || pf->fc > 3) {
device_printf(dev,
"Invalid fc mode; valid modes are 0 through 3\n");
return (EINVAL);
@@ -4269,7 +4658,7 @@ ixl_set_flowcntl(SYSCTL_HANDLER_ARGS)
}
/* Set fc ability for port */
- hw->fc.requested_mode = requested_fc;
+ hw->fc.requested_mode = pf->fc;
aq_error = i40e_set_fc(hw, &fc_aq_err, TRUE);
if (aq_error) {
device_printf(dev,
@@ -4278,14 +4667,6 @@ ixl_set_flowcntl(SYSCTL_HANDLER_ARGS)
return (EAGAIN);
}
- if (hw->fc.current_mode != hw->fc.requested_mode) {
- device_printf(dev, "%s: FC set failure:\n", __func__);
- device_printf(dev, "%s: Current: %s / Requested: %s\n",
- __func__,
- ixl_fc_string[hw->fc.current_mode],
- ixl_fc_string[hw->fc.requested_mode]);
- }
-
return (0);
}
@@ -4344,9 +4725,11 @@ ixl_set_advertised_speeds(struct ixl_pf *pf, int speeds)
enum i40e_status_code aq_error = 0;
/* Get current capability information */
- aq_error = i40e_aq_get_phy_capabilities(hw, FALSE, FALSE, &abilities, NULL);
+ aq_error = i40e_aq_get_phy_capabilities(hw,
+ FALSE, FALSE, &abilities, NULL);
if (aq_error) {
- device_printf(dev, "%s: Error getting phy capabilities %d,"
+ device_printf(dev,
+ "%s: Error getting phy capabilities %d,"
" aq error: %d\n", __func__, aq_error,
hw->aq.asq_last_status);
return (EAGAIN);
@@ -4361,6 +4744,8 @@ ixl_set_advertised_speeds(struct ixl_pf *pf, int speeds)
config.eeer = abilities.eeer_val;
config.low_power_ctrl = abilities.d3_lpan;
/* Translate into aq cmd link_speed */
+ if (speeds & 0x8)
+ config.link_speed |= I40E_LINK_SPEED_20GB;
if (speeds & 0x4)
config.link_speed |= I40E_LINK_SPEED_10GB;
if (speeds & 0x2)
@@ -4371,12 +4756,22 @@ ixl_set_advertised_speeds(struct ixl_pf *pf, int speeds)
/* Do aq command & restart link */
aq_error = i40e_aq_set_phy_config(hw, &config, NULL);
if (aq_error) {
- device_printf(dev, "%s: Error setting new phy config %d,"
+ device_printf(dev,
+ "%s: Error setting new phy config %d,"
" aq error: %d\n", __func__, aq_error,
hw->aq.asq_last_status);
return (EAGAIN);
}
+ /*
+ ** This seems a bit heavy handed, but we
+ ** need to get a reinit on some devices
+ */
+ IXL_PF_LOCK(pf);
+ ixl_stop(pf);
+ ixl_init_locked(pf);
+ IXL_PF_UNLOCK(pf);
+
return (0);
}
@@ -4386,6 +4781,7 @@ ixl_set_advertised_speeds(struct ixl_pf *pf, int speeds)
** 0x1 - advertise 100 Mb
** 0x2 - advertise 1G
** 0x4 - advertise 10G
+** 0x8 - advertise 20G
**
** Does not work on 40G devices.
*/
@@ -4410,11 +4806,36 @@ ixl_set_advertise(SYSCTL_HANDLER_ARGS)
error = sysctl_handle_int(oidp, &requested_ls, 0, req);
if ((error) || (req->newptr == NULL))
return (error);
- if (requested_ls < 1 || requested_ls > 7) {
- device_printf(dev,
- "Invalid advertised speed; valid modes are 0x1 through 0x7\n");
+ /* Check for sane value */
+ if (requested_ls < 0x1 || requested_ls > 0xE) {
+ device_printf(dev, "Invalid advertised speed; "
+ "valid modes are 0x1 through 0xE\n");
return (EINVAL);
}
+ /* Then check for validity based on adapter type */
+ switch (hw->device_id) {
+ case I40E_DEV_ID_10G_BASE_T:
+ if (requested_ls & 0x8) {
+ device_printf(dev,
+ "20Gbs speed not supported on this device.\n");
+ return (EINVAL);
+ }
+ break;
+ case I40E_DEV_ID_20G_KR2:
+ if (requested_ls & 0x1) {
+ device_printf(dev,
+ "100Mbs speed not supported on this device.\n");
+ return (EINVAL);
+ }
+ break;
+ default:
+ if (requested_ls & ~0x6) {
+ device_printf(dev,
+ "Only 1/10Gbs speeds are supported on this device.\n");
+ return (EINVAL);
+ }
+ break;
+ }
/* Exit if no change */
if (pf->advertised_speed == requested_ls)
@@ -4492,8 +4913,8 @@ ixl_get_bus_info(struct i40e_hw *hw, device_t dev)
if ((hw->bus.width <= i40e_bus_width_pcie_x8) &&
(hw->bus.speed < i40e_bus_speed_8000)) {
device_printf(dev, "PCI-Express bandwidth available"
- " for this device\n is not sufficient for"
- " normal operation.\n");
+ " for this device\n may be insufficient for"
+ " optimal performance.\n");
device_printf(dev, "For expected performance a x8 "
"PCIE Gen3 slot is required.\n");
}
@@ -4521,7 +4942,7 @@ ixl_sysctl_show_fw(SYSCTL_HANDLER_ARGS)
}
-#ifdef IXL_DEBUG
+#ifdef IXL_DEBUG_SYSCTL
static int
ixl_sysctl_link_status(SYSCTL_HANDLER_ARGS)
{
@@ -4554,15 +4975,15 @@ ixl_sysctl_link_status(SYSCTL_HANDLER_ARGS)
static int
ixl_sysctl_phy_abilities(SYSCTL_HANDLER_ARGS)
{
- struct ixl_pf *pf = (struct ixl_pf *)arg1;
- struct i40e_hw *hw = &pf->hw;
- struct i40e_aq_get_phy_abilities_resp abilities_resp;
- char buf[512];
+ struct ixl_pf *pf = (struct ixl_pf *)arg1;
+ struct i40e_hw *hw = &pf->hw;
+ char buf[512];
+ enum i40e_status_code aq_error = 0;
- enum i40e_status_code aq_error = 0;
+ struct i40e_aq_get_phy_abilities_resp abilities;
- // TODO: Print out list of qualified modules as well?
- aq_error = i40e_aq_get_phy_capabilities(hw, TRUE, FALSE, &abilities_resp, NULL);
+ aq_error = i40e_aq_get_phy_capabilities(hw,
+ TRUE, FALSE, &abilities, NULL);
if (aq_error) {
printf("i40e_aq_get_phy_capabilities() error %d\n", aq_error);
return (EPERM);
@@ -4575,9 +4996,9 @@ ixl_sysctl_phy_abilities(SYSCTL_HANDLER_ARGS)
"EEE cap : %#06x\n"
"EEER reg : %#010x\n"
"D3 Lpan : %#04x",
- abilities_resp.phy_type, abilities_resp.link_speed,
- abilities_resp.abilities, abilities_resp.eee_capability,
- abilities_resp.eeer_val, abilities_resp.d3_lpan);
+ abilities.phy_type, abilities.link_speed,
+ abilities.abilities, abilities.eee_capability,
+ abilities.eeer_val, abilities.d3_lpan);
return (sysctl_handle_string(oidp, buf, strlen(buf), req));
}
@@ -4630,6 +5051,16 @@ ixl_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS)
#define IXL_SW_RES_SIZE 0x14
static int
+ixl_res_alloc_cmp(const void *a, const void *b)
+{
+ const struct i40e_aqc_switch_resource_alloc_element_resp *one, *two;
+ one = (const struct i40e_aqc_switch_resource_alloc_element_resp *)a;
+ two = (const struct i40e_aqc_switch_resource_alloc_element_resp *)b;
+
+ return ((int)one->resource_type - (int)two->resource_type);
+}
+
+static int
ixl_sysctl_hw_res_alloc(SYSCTL_HANDLER_ARGS)
{
struct ixl_pf *pf = (struct ixl_pf *)arg1;
@@ -4647,19 +5078,26 @@ ixl_sysctl_hw_res_alloc(SYSCTL_HANDLER_ARGS)
return (ENOMEM);
}
+ bzero(resp, sizeof(resp));
error = i40e_aq_get_switch_resource_alloc(hw, &num_entries,
resp,
IXL_SW_RES_SIZE,
NULL);
if (error) {
- device_printf(dev, "%s: get_switch_resource_alloc() error %d, aq error %d\n",
+ device_printf(dev,
+ "%s: get_switch_resource_alloc() error %d, aq error %d\n",
__func__, error, hw->aq.asq_last_status);
sbuf_delete(buf);
return error;
}
- device_printf(dev, "Num_entries: %d\n", num_entries);
+
+ /* Sort entries by type for display */
+ qsort(resp, num_entries,
+ sizeof(struct i40e_aqc_switch_resource_alloc_element_resp),
+ &ixl_res_alloc_cmp);
sbuf_cat(buf, "\n");
+ sbuf_printf(buf, "# of entries: %d\n", num_entries);
sbuf_printf(buf,
"Type | Guaranteed | Total | Used | Un-allocated\n"
" | (this) | (all) | (this) | (all) \n");
@@ -4676,6 +5114,15 @@ ixl_sysctl_hw_res_alloc(SYSCTL_HANDLER_ARGS)
}
error = sbuf_finish(buf);
+ if (error) {
+ device_printf(dev, "Error finishing sbuf: %d\n", error);
+ sbuf_delete(buf);
+ return error;
+ }
+
+ error = sysctl_handle_string(oidp, sbuf_data(buf), sbuf_len(buf), req);
+ if (error)
+ device_printf(dev, "sysctl error: %d\n", error);
sbuf_delete(buf);
return (error);
@@ -4743,7 +5190,8 @@ ixl_sysctl_switch_config(SYSCTL_HANDLER_ARGS)
error = i40e_aq_get_switch_config(hw, sw_config,
sizeof(aq_buf), &next, NULL);
if (error) {
- device_printf(dev, "%s: aq_get_switch_config() error %d, aq error %d\n",
+ device_printf(dev,
+ "%s: aq_get_switch_config() error %d, aq error %d\n",
__func__, error, hw->aq.asq_last_status);
sbuf_delete(buf);
return error;
@@ -4768,11 +5216,14 @@ ixl_sysctl_switch_config(SYSCTL_HANDLER_ARGS)
// "%4d (%8s) | %8s %8s %#8x",
sbuf_printf(buf, "%4d", sw_config->element[i].seid);
sbuf_cat(buf, " ");
- sbuf_printf(buf, "(%8s)", ixl_switch_element_string(nmbuf, sw_config->element[i].seid, false));
+ sbuf_printf(buf, "(%8s)", ixl_switch_element_string(nmbuf,
+ sw_config->element[i].seid, false));
sbuf_cat(buf, " | ");
- sbuf_printf(buf, "%8s", ixl_switch_element_string(nmbuf, sw_config->element[i].uplink_seid, true));
+ sbuf_printf(buf, "%8s", ixl_switch_element_string(nmbuf,
+ sw_config->element[i].uplink_seid, true));
sbuf_cat(buf, " ");
- sbuf_printf(buf, "%8s", ixl_switch_element_string(nmbuf, sw_config->element[i].downlink_seid, false));
+ sbuf_printf(buf, "%8s", ixl_switch_element_string(nmbuf,
+ sw_config->element[i].downlink_seid, false));
sbuf_cat(buf, " ");
sbuf_printf(buf, "%#8x", sw_config->element[i].connection_type);
if (i < sw_config->header.num_reported - 1)
@@ -4781,71 +5232,1462 @@ ixl_sysctl_switch_config(SYSCTL_HANDLER_ARGS)
sbuf_delete(nmbuf);
error = sbuf_finish(buf);
+ if (error) {
+ device_printf(dev, "Error finishing sbuf: %d\n", error);
+ sbuf_delete(buf);
+ return error;
+ }
+
+ error = sysctl_handle_string(oidp, sbuf_data(buf), sbuf_len(buf), req);
+ if (error)
+ device_printf(dev, "sysctl error: %d\n", error);
sbuf_delete(buf);
return (error);
}
+#endif /* IXL_DEBUG_SYSCTL */
-/*
-** Dump TX desc given index.
-** Doesn't work; don't use.
-** TODO: Also needs a queue index input!
-**/
+
+#ifdef PCI_IOV
static int
-ixl_sysctl_dump_txd(SYSCTL_HANDLER_ARGS)
+ixl_vf_alloc_vsi(struct ixl_pf *pf, struct ixl_vf *vf)
{
- struct ixl_pf *pf = (struct ixl_pf *)arg1;
- device_t dev = pf->dev;
- struct sbuf *buf;
- int error = 0;
+ struct i40e_hw *hw;
+ struct ixl_vsi *vsi;
+ struct i40e_vsi_context vsi_ctx;
+ int i;
+ uint16_t first_queue;
+ enum i40e_status_code code;
- u16 desc_idx = 0;
+ hw = &pf->hw;
+ vsi = &pf->vsi;
- buf = sbuf_new_for_sysctl(NULL, NULL, 0, req);
- if (!buf) {
- device_printf(dev, "Could not allocate sbuf for output.\n");
- return (ENOMEM);
+ vsi_ctx.pf_num = hw->pf_id;
+ vsi_ctx.uplink_seid = pf->veb_seid;
+ vsi_ctx.connection_type = IXL_VSI_DATA_PORT;
+ vsi_ctx.vf_num = hw->func_caps.vf_base_id + vf->vf_num;
+ vsi_ctx.flags = I40E_AQ_VSI_TYPE_VF;
+
+ bzero(&vsi_ctx.info, sizeof(vsi_ctx.info));
+
+ vsi_ctx.info.valid_sections = htole16(I40E_AQ_VSI_PROP_SWITCH_VALID);
+ vsi_ctx.info.switch_id = htole16(0);
+
+ vsi_ctx.info.valid_sections |= htole16(I40E_AQ_VSI_PROP_SECURITY_VALID);
+ vsi_ctx.info.sec_flags = 0;
+ if (vf->vf_flags & VF_FLAG_MAC_ANTI_SPOOF)
+ vsi_ctx.info.sec_flags |= I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK;
+
+ vsi_ctx.info.valid_sections |= htole16(I40E_AQ_VSI_PROP_VLAN_VALID);
+ vsi_ctx.info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
+ I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
+
+ vsi_ctx.info.valid_sections |=
+ htole16(I40E_AQ_VSI_PROP_QUEUE_MAP_VALID);
+ vsi_ctx.info.mapping_flags = htole16(I40E_AQ_VSI_QUE_MAP_NONCONTIG);
+ first_queue = vsi->num_queues + vf->vf_num * IXLV_MAX_QUEUES;
+ for (i = 0; i < IXLV_MAX_QUEUES; i++)
+ vsi_ctx.info.queue_mapping[i] = htole16(first_queue + i);
+ for (; i < nitems(vsi_ctx.info.queue_mapping); i++)
+ vsi_ctx.info.queue_mapping[i] = htole16(I40E_AQ_VSI_QUEUE_MASK);
+
+ vsi_ctx.info.tc_mapping[0] = htole16(
+ (0 << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
+ (1 << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT));
+
+ code = i40e_aq_add_vsi(hw, &vsi_ctx, NULL);
+ if (code != I40E_SUCCESS)
+ return (ixl_adminq_err_to_errno(hw->aq.asq_last_status));
+ vf->vsi.seid = vsi_ctx.seid;
+ vf->vsi.vsi_num = vsi_ctx.vsi_number;
+ vf->vsi.first_queue = first_queue;
+ vf->vsi.num_queues = IXLV_MAX_QUEUES;
+
+ code = i40e_aq_get_vsi_params(hw, &vsi_ctx, NULL);
+ if (code != I40E_SUCCESS)
+ return (ixl_adminq_err_to_errno(hw->aq.asq_last_status));
+
+ code = i40e_aq_config_vsi_bw_limit(hw, vf->vsi.seid, 0, 0, NULL);
+ if (code != I40E_SUCCESS) {
+ device_printf(pf->dev, "Failed to disable BW limit: %d\n",
+ ixl_adminq_err_to_errno(hw->aq.asq_last_status));
+ return (ixl_adminq_err_to_errno(hw->aq.asq_last_status));
}
- /* Read in index */
- error = sysctl_handle_int(oidp, &desc_idx, 0, req);
- if (error)
+ memcpy(&vf->vsi.info, &vsi_ctx.info, sizeof(vf->vsi.info));
+ return (0);
+}
+
+static int
+ixl_vf_setup_vsi(struct ixl_pf *pf, struct ixl_vf *vf)
+{
+ struct i40e_hw *hw;
+ int error;
+
+ hw = &pf->hw;
+
+ error = ixl_vf_alloc_vsi(pf, vf);
+ if (error != 0)
return (error);
- if (req->newptr == NULL)
- return (EIO); // fix
- if (desc_idx > 1024) { // fix
- device_printf(dev,
- "Invalid descriptor index, needs to be < 1024\n"); // fix
+
+ vf->vsi.hw_filters_add = 0;
+ vf->vsi.hw_filters_del = 0;
+ ixl_add_filter(&vf->vsi, ixl_bcast_addr, IXL_VLAN_ANY);
+ ixl_reconfigure_filters(&vf->vsi);
+
+ return (0);
+}
+
+static void
+ixl_vf_map_vsi_queue(struct i40e_hw *hw, struct ixl_vf *vf, int qnum,
+ uint32_t val)
+{
+ uint32_t qtable;
+ int index, shift;
+
+ /*
+ * Two queues are mapped in a single register, so we have to do some
+ * gymnastics to convert the queue number into a register index and
+ * shift.
+ */
+ index = qnum / 2;
+ shift = (qnum % 2) * I40E_VSILAN_QTABLE_QINDEX_1_SHIFT;
+
+ qtable = rd32(hw, I40E_VSILAN_QTABLE(index, vf->vsi.vsi_num));
+ qtable &= ~(I40E_VSILAN_QTABLE_QINDEX_0_MASK << shift);
+ qtable |= val << shift;
+ wr32(hw, I40E_VSILAN_QTABLE(index, vf->vsi.vsi_num), qtable);
+}
+
+static void
+ixl_vf_map_queues(struct ixl_pf *pf, struct ixl_vf *vf)
+{
+ struct i40e_hw *hw;
+ uint32_t qtable;
+ int i;
+
+ hw = &pf->hw;
+
+ /*
+ * Contiguous mappings aren't actually supported by the hardware,
+ * so we have to use non-contiguous mappings.
+ */
+ wr32(hw, I40E_VSILAN_QBASE(vf->vsi.vsi_num),
+ I40E_VSILAN_QBASE_VSIQTABLE_ENA_MASK);
+
+ wr32(hw, I40E_VPLAN_MAPENA(vf->vf_num),
+ I40E_VPLAN_MAPENA_TXRX_ENA_MASK);
+
+ for (i = 0; i < vf->vsi.num_queues; i++) {
+ qtable = (vf->vsi.first_queue + i) <<
+ I40E_VPLAN_QTABLE_QINDEX_SHIFT;
+
+ wr32(hw, I40E_VPLAN_QTABLE(i, vf->vf_num), qtable);
+ }
+
+ /* Map queues allocated to VF to its VSI. */
+ for (i = 0; i < vf->vsi.num_queues; i++)
+ ixl_vf_map_vsi_queue(hw, vf, i, vf->vsi.first_queue + i);
+
+ /* Set rest of VSI queues as unused. */
+ for (; i < IXL_MAX_VSI_QUEUES; i++)
+ ixl_vf_map_vsi_queue(hw, vf, i,
+ I40E_VSILAN_QTABLE_QINDEX_0_MASK);
+
+ ixl_flush(hw);
+}
+
+static void
+ixl_vf_vsi_release(struct ixl_pf *pf, struct ixl_vsi *vsi)
+{
+ struct i40e_hw *hw;
+
+ hw = &pf->hw;
+
+ if (vsi->seid == 0)
+ return;
+
+ i40e_aq_delete_element(hw, vsi->seid, NULL);
+}
+
+static void
+ixl_vf_disable_queue_intr(struct i40e_hw *hw, uint32_t vfint_reg)
+{
+
+ wr32(hw, vfint_reg, I40E_VFINT_DYN_CTLN_CLEARPBA_MASK);
+ ixl_flush(hw);
+}
+
+static void
+ixl_vf_unregister_intr(struct i40e_hw *hw, uint32_t vpint_reg)
+{
+
+ wr32(hw, vpint_reg, I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK |
+ I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK);
+ ixl_flush(hw);
+}
+
+static void
+ixl_vf_release_resources(struct ixl_pf *pf, struct ixl_vf *vf)
+{
+ struct i40e_hw *hw;
+ uint32_t vfint_reg, vpint_reg;
+ int i;
+
+ hw = &pf->hw;
+
+ ixl_vf_vsi_release(pf, &vf->vsi);
+
+ /* Index 0 has a special register. */
+ ixl_vf_disable_queue_intr(hw, I40E_VFINT_DYN_CTL0(vf->vf_num));
+
+ for (i = 1; i < hw->func_caps.num_msix_vectors_vf; i++) {
+ vfint_reg = IXL_VFINT_DYN_CTLN_REG(hw, i , vf->vf_num);
+ ixl_vf_disable_queue_intr(hw, vfint_reg);
+ }
+
+ /* Index 0 has a special register. */
+ ixl_vf_unregister_intr(hw, I40E_VPINT_LNKLST0(vf->vf_num));
+
+ for (i = 1; i < hw->func_caps.num_msix_vectors_vf; i++) {
+ vpint_reg = IXL_VPINT_LNKLSTN_REG(hw, i, vf->vf_num);
+ ixl_vf_unregister_intr(hw, vpint_reg);
+ }
+
+ vf->vsi.num_queues = 0;
+}
+
+static int
+ixl_flush_pcie(struct ixl_pf *pf, struct ixl_vf *vf)
+{
+ struct i40e_hw *hw;
+ int i;
+ uint16_t global_vf_num;
+ uint32_t ciad;
+
+ hw = &pf->hw;
+ global_vf_num = hw->func_caps.vf_base_id + vf->vf_num;
+
+ wr32(hw, I40E_PF_PCI_CIAA, IXL_PF_PCI_CIAA_VF_DEVICE_STATUS |
+ (global_vf_num << I40E_PF_PCI_CIAA_VF_NUM_SHIFT));
+ for (i = 0; i < IXL_VF_RESET_TIMEOUT; i++) {
+ ciad = rd32(hw, I40E_PF_PCI_CIAD);
+ if ((ciad & IXL_PF_PCI_CIAD_VF_TRANS_PENDING_MASK) == 0)
+ return (0);
+ DELAY(1);
+ }
+
+ return (ETIMEDOUT);
+}
+
+static void
+ixl_reset_vf(struct ixl_pf *pf, struct ixl_vf *vf)
+{
+ struct i40e_hw *hw;
+ uint32_t vfrtrig;
+
+ hw = &pf->hw;
+
+ vfrtrig = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_num));
+ vfrtrig |= I40E_VPGEN_VFRTRIG_VFSWR_MASK;
+ wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_num), vfrtrig);
+ ixl_flush(hw);
+
+ ixl_reinit_vf(pf, vf);
+}
+
+static void
+ixl_reinit_vf(struct ixl_pf *pf, struct ixl_vf *vf)
+{
+ struct i40e_hw *hw;
+ uint32_t vfrstat, vfrtrig;
+ int i, error;
+
+ hw = &pf->hw;
+
+ error = ixl_flush_pcie(pf, vf);
+ if (error != 0)
+ device_printf(pf->dev,
+ "Timed out waiting for PCIe activity to stop on VF-%d\n",
+ vf->vf_num);
+
+ for (i = 0; i < IXL_VF_RESET_TIMEOUT; i++) {
+ DELAY(10);
+
+ vfrstat = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_num));
+ if (vfrstat & I40E_VPGEN_VFRSTAT_VFRD_MASK)
+ break;
+ }
+
+ if (i == IXL_VF_RESET_TIMEOUT)
+ device_printf(pf->dev, "VF %d failed to reset\n", vf->vf_num);
+
+ wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_num), I40E_VFR_COMPLETED);
+
+ vfrtrig = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_num));
+ vfrtrig &= ~I40E_VPGEN_VFRTRIG_VFSWR_MASK;
+ wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_num), vfrtrig);
+
+ if (vf->vsi.seid != 0)
+ ixl_disable_rings(&vf->vsi);
+
+ ixl_vf_release_resources(pf, vf);
+ ixl_vf_setup_vsi(pf, vf);
+ ixl_vf_map_queues(pf, vf);
+
+ wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_num), I40E_VFR_VFACTIVE);
+ ixl_flush(hw);
+}
+
+static const char *
+ixl_vc_opcode_str(uint16_t op)
+{
+
+ switch (op) {
+ case I40E_VIRTCHNL_OP_VERSION:
+ return ("VERSION");
+ case I40E_VIRTCHNL_OP_RESET_VF:
+ return ("RESET_VF");
+ case I40E_VIRTCHNL_OP_GET_VF_RESOURCES:
+ return ("GET_VF_RESOURCES");
+ case I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE:
+ return ("CONFIG_TX_QUEUE");
+ case I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE:
+ return ("CONFIG_RX_QUEUE");
+ case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES:
+ return ("CONFIG_VSI_QUEUES");
+ case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP:
+ return ("CONFIG_IRQ_MAP");
+ case I40E_VIRTCHNL_OP_ENABLE_QUEUES:
+ return ("ENABLE_QUEUES");
+ case I40E_VIRTCHNL_OP_DISABLE_QUEUES:
+ return ("DISABLE_QUEUES");
+ case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS:
+ return ("ADD_ETHER_ADDRESS");
+ case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS:
+ return ("DEL_ETHER_ADDRESS");
+ case I40E_VIRTCHNL_OP_ADD_VLAN:
+ return ("ADD_VLAN");
+ case I40E_VIRTCHNL_OP_DEL_VLAN:
+ return ("DEL_VLAN");
+ case I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
+ return ("CONFIG_PROMISCUOUS_MODE");
+ case I40E_VIRTCHNL_OP_GET_STATS:
+ return ("GET_STATS");
+ case I40E_VIRTCHNL_OP_FCOE:
+ return ("FCOE");
+ case I40E_VIRTCHNL_OP_EVENT:
+ return ("EVENT");
+ default:
+ return ("UNKNOWN");
+ }
+}
+
+static int
+ixl_vc_opcode_level(uint16_t opcode)
+{
+
+ switch (opcode) {
+ case I40E_VIRTCHNL_OP_GET_STATS:
+ return (10);
+ default:
+ return (5);
+ }
+}
+
+static void
+ixl_send_vf_msg(struct ixl_pf *pf, struct ixl_vf *vf, uint16_t op,
+ enum i40e_status_code status, void *msg, uint16_t len)
+{
+ struct i40e_hw *hw;
+ int global_vf_id;
+
+ hw = &pf->hw;
+ global_vf_id = hw->func_caps.vf_base_id + vf->vf_num;
+
+ I40E_VC_DEBUG(pf, ixl_vc_opcode_level(op),
+ "Sending msg (op=%s[%d], status=%d) to VF-%d\n",
+ ixl_vc_opcode_str(op), op, status, vf->vf_num);
+
+ i40e_aq_send_msg_to_vf(hw, global_vf_id, op, status, msg, len, NULL);
+}
+
+static void
+ixl_send_vf_ack(struct ixl_pf *pf, struct ixl_vf *vf, uint16_t op)
+{
+
+ ixl_send_vf_msg(pf, vf, op, I40E_SUCCESS, NULL, 0);
+}
+
+static void
+ixl_send_vf_nack_msg(struct ixl_pf *pf, struct ixl_vf *vf, uint16_t op,
+ enum i40e_status_code status, const char *file, int line)
+{
+
+ I40E_VC_DEBUG(pf, 1,
+ "Sending NACK (op=%s[%d], err=%d) to VF-%d from %s:%d\n",
+ ixl_vc_opcode_str(op), op, status, vf->vf_num, file, line);
+ ixl_send_vf_msg(pf, vf, op, status, NULL, 0);
+}
+
+static void
+ixl_vf_version_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
+ uint16_t msg_size)
+{
+ struct i40e_virtchnl_version_info reply;
+
+ if (msg_size != sizeof(struct i40e_virtchnl_version_info)) {
+ i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_VERSION,
+ I40E_ERR_PARAM);
+ return;
+ }
+
+ reply.major = I40E_VIRTCHNL_VERSION_MAJOR;
+ reply.minor = I40E_VIRTCHNL_VERSION_MINOR;
+ ixl_send_vf_msg(pf, vf, I40E_VIRTCHNL_OP_VERSION, I40E_SUCCESS, &reply,
+ sizeof(reply));
+}
+
+static void
+ixl_vf_reset_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
+ uint16_t msg_size)
+{
+
+ if (msg_size != 0) {
+ i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_RESET_VF,
+ I40E_ERR_PARAM);
+ return;
+ }
+
+ ixl_reset_vf(pf, vf);
+
+ /* No response to a reset message. */
+}
+
+static void
+ixl_vf_get_resources_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
+ uint16_t msg_size)
+{
+ struct i40e_virtchnl_vf_resource reply;
+
+ if (msg_size != 0) {
+ i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_GET_VF_RESOURCES,
+ I40E_ERR_PARAM);
+ return;
+ }
+
+ bzero(&reply, sizeof(reply));
+
+ reply.vf_offload_flags = I40E_VIRTCHNL_VF_OFFLOAD_L2;
+
+ reply.num_vsis = 1;
+ reply.num_queue_pairs = vf->vsi.num_queues;
+ reply.max_vectors = pf->hw.func_caps.num_msix_vectors_vf;
+ reply.vsi_res[0].vsi_id = vf->vsi.vsi_num;
+ reply.vsi_res[0].vsi_type = I40E_VSI_SRIOV;
+ reply.vsi_res[0].num_queue_pairs = vf->vsi.num_queues;
+ memcpy(reply.vsi_res[0].default_mac_addr, vf->mac, ETHER_ADDR_LEN);
+
+ ixl_send_vf_msg(pf, vf, I40E_VIRTCHNL_OP_GET_VF_RESOURCES,
+ I40E_SUCCESS, &reply, sizeof(reply));
+}
+
+static int
+ixl_vf_config_tx_queue(struct ixl_pf *pf, struct ixl_vf *vf,
+ struct i40e_virtchnl_txq_info *info)
+{
+ struct i40e_hw *hw;
+ struct i40e_hmc_obj_txq txq;
+ uint16_t global_queue_num, global_vf_num;
+ enum i40e_status_code status;
+ uint32_t qtx_ctl;
+
+ hw = &pf->hw;
+ global_queue_num = vf->vsi.first_queue + info->queue_id;
+ global_vf_num = hw->func_caps.vf_base_id + vf->vf_num;
+ bzero(&txq, sizeof(txq));
+
+ status = i40e_clear_lan_tx_queue_context(hw, global_queue_num);
+ if (status != I40E_SUCCESS)
+ return (EINVAL);
+
+ txq.base = info->dma_ring_addr / IXL_TX_CTX_BASE_UNITS;
+
+ txq.head_wb_ena = info->headwb_enabled;
+ txq.head_wb_addr = info->dma_headwb_addr;
+ txq.qlen = info->ring_len;
+ txq.rdylist = le16_to_cpu(vf->vsi.info.qs_handle[0]);
+ txq.rdylist_act = 0;
+
+ status = i40e_set_lan_tx_queue_context(hw, global_queue_num, &txq);
+ if (status != I40E_SUCCESS)
return (EINVAL);
+
+ qtx_ctl = I40E_QTX_CTL_VF_QUEUE |
+ (hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) |
+ (global_vf_num << I40E_QTX_CTL_VFVM_INDX_SHIFT);
+ wr32(hw, I40E_QTX_CTL(global_queue_num), qtx_ctl);
+ ixl_flush(hw);
+
+ return (0);
+}
+
+static int
+ixl_vf_config_rx_queue(struct ixl_pf *pf, struct ixl_vf *vf,
+ struct i40e_virtchnl_rxq_info *info)
+{
+ struct i40e_hw *hw;
+ struct i40e_hmc_obj_rxq rxq;
+ uint16_t global_queue_num;
+ enum i40e_status_code status;
+
+ hw = &pf->hw;
+ global_queue_num = vf->vsi.first_queue + info->queue_id;
+ bzero(&rxq, sizeof(rxq));
+
+ if (info->databuffer_size > IXL_VF_MAX_BUFFER)
+ return (EINVAL);
+
+ if (info->max_pkt_size > IXL_VF_MAX_FRAME ||
+ info->max_pkt_size < ETHER_MIN_LEN)
+ return (EINVAL);
+
+ if (info->splithdr_enabled) {
+ if (info->hdr_size > IXL_VF_MAX_HDR_BUFFER)
+ return (EINVAL);
+
+ rxq.hsplit_0 = info->rx_split_pos &
+ (I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_L2 |
+ I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_IP |
+ I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_TCP_UDP |
+ I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_SCTP);
+ rxq.hbuff = info->hdr_size >> I40E_RXQ_CTX_HBUFF_SHIFT;
+
+ rxq.dtype = 2;
}
- // Don't use this sysctl yet
- if (TRUE)
- return (ENODEV);
+ status = i40e_clear_lan_rx_queue_context(hw, global_queue_num);
+ if (status != I40E_SUCCESS)
+ return (EINVAL);
- sbuf_cat(buf, "\n");
+ rxq.base = info->dma_ring_addr / IXL_RX_CTX_BASE_UNITS;
+ rxq.qlen = info->ring_len;
- // set to queue 1?
- struct ixl_queue *que = pf->vsi.queues;
- struct tx_ring *txr = &(que[1].txr);
- struct i40e_tx_desc *txd = &txr->base[desc_idx];
+ rxq.dbuff = info->databuffer_size >> I40E_RXQ_CTX_DBUFF_SHIFT;
- sbuf_printf(buf, "Que: %d, Desc: %d\n", que->me, desc_idx);
- sbuf_printf(buf, "Addr: %#18lx\n", txd->buffer_addr);
- sbuf_printf(buf, "Opts: %#18lx\n", txd->cmd_type_offset_bsz);
+ rxq.dsize = 1;
+ rxq.crcstrip = 1;
+ rxq.l2tsel = 1;
- error = sbuf_finish(buf);
+ rxq.rxmax = info->max_pkt_size;
+ rxq.tphrdesc_ena = 1;
+ rxq.tphwdesc_ena = 1;
+ rxq.tphdata_ena = 1;
+ rxq.tphhead_ena = 1;
+ rxq.lrxqthresh = 2;
+ rxq.prefena = 1;
+
+ status = i40e_set_lan_rx_queue_context(hw, global_queue_num, &rxq);
+ if (status != I40E_SUCCESS)
+ return (EINVAL);
+
+ return (0);
+}
+
+static void
+ixl_vf_config_vsi_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
+ uint16_t msg_size)
+{
+ struct i40e_virtchnl_vsi_queue_config_info *info;
+ struct i40e_virtchnl_queue_pair_info *pair;
+ int i;
+
+ if (msg_size < sizeof(*info)) {
+ i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES,
+ I40E_ERR_PARAM);
+ return;
+ }
+
+ info = msg;
+ if (info->num_queue_pairs == 0) {
+ i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES,
+ I40E_ERR_PARAM);
+ return;
+ }
+
+ if (msg_size != sizeof(*info) + info->num_queue_pairs * sizeof(*pair)) {
+ i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES,
+ I40E_ERR_PARAM);
+ return;
+ }
+
+ if (info->vsi_id != vf->vsi.vsi_num) {
+ i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES,
+ I40E_ERR_PARAM);
+ return;
+ }
+
+ for (i = 0; i < info->num_queue_pairs; i++) {
+ pair = &info->qpair[i];
+
+ if (pair->txq.vsi_id != vf->vsi.vsi_num ||
+ pair->rxq.vsi_id != vf->vsi.vsi_num ||
+ pair->txq.queue_id != pair->rxq.queue_id ||
+ pair->txq.queue_id >= vf->vsi.num_queues) {
+
+ i40e_send_vf_nack(pf, vf,
+ I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES, I40E_ERR_PARAM);
+ return;
+ }
+
+ if (ixl_vf_config_tx_queue(pf, vf, &pair->txq) != 0) {
+ i40e_send_vf_nack(pf, vf,
+ I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES, I40E_ERR_PARAM);
+ return;
+ }
+
+ if (ixl_vf_config_rx_queue(pf, vf, &pair->rxq) != 0) {
+ i40e_send_vf_nack(pf, vf,
+ I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES, I40E_ERR_PARAM);
+ return;
+ }
+ }
+
+ ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES);
+}
+
+static void
+ixl_vf_set_qctl(struct ixl_pf *pf,
+ const struct i40e_virtchnl_vector_map *vector,
+ enum i40e_queue_type cur_type, uint16_t cur_queue,
+ enum i40e_queue_type *last_type, uint16_t *last_queue)
+{
+ uint32_t offset, qctl;
+ uint16_t itr_indx;
+
+ if (cur_type == I40E_QUEUE_TYPE_RX) {
+ offset = I40E_QINT_RQCTL(cur_queue);
+ itr_indx = vector->rxitr_idx;
+ } else {
+ offset = I40E_QINT_TQCTL(cur_queue);
+ itr_indx = vector->txitr_idx;
+ }
+
+ qctl = htole32((vector->vector_id << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
+ (*last_type << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) |
+ (*last_queue << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
+ I40E_QINT_RQCTL_CAUSE_ENA_MASK |
+ (itr_indx << I40E_QINT_RQCTL_ITR_INDX_SHIFT));
+
+ wr32(&pf->hw, offset, qctl);
+
+ *last_type = cur_type;
+ *last_queue = cur_queue;
+}
+
+static void
+ixl_vf_config_vector(struct ixl_pf *pf, struct ixl_vf *vf,
+ const struct i40e_virtchnl_vector_map *vector)
+{
+ struct i40e_hw *hw;
+ u_int qindex;
+ enum i40e_queue_type type, last_type;
+ uint32_t lnklst_reg;
+ uint16_t rxq_map, txq_map, cur_queue, last_queue;
+
+ hw = &pf->hw;
+
+ rxq_map = vector->rxq_map;
+ txq_map = vector->txq_map;
+
+ last_queue = IXL_END_OF_INTR_LNKLST;
+ last_type = I40E_QUEUE_TYPE_RX;
+
+ /*
+ * The datasheet says to optimize performance, RX queues and TX queues
+ * should be interleaved in the interrupt linked list, so we process
+ * both at once here.
+ */
+ while ((rxq_map != 0) || (txq_map != 0)) {
+ if (txq_map != 0) {
+ qindex = ffs(txq_map) - 1;
+ type = I40E_QUEUE_TYPE_TX;
+ cur_queue = vf->vsi.first_queue + qindex;
+ ixl_vf_set_qctl(pf, vector, type, cur_queue,
+ &last_type, &last_queue);
+ txq_map &= ~(1 << qindex);
+ }
+
+ if (rxq_map != 0) {
+ qindex = ffs(rxq_map) - 1;
+ type = I40E_QUEUE_TYPE_RX;
+ cur_queue = vf->vsi.first_queue + qindex;
+ ixl_vf_set_qctl(pf, vector, type, cur_queue,
+ &last_type, &last_queue);
+ rxq_map &= ~(1 << qindex);
+ }
+ }
+
+ if (vector->vector_id == 0)
+ lnklst_reg = I40E_VPINT_LNKLST0(vf->vf_num);
+ else
+ lnklst_reg = IXL_VPINT_LNKLSTN_REG(hw, vector->vector_id,
+ vf->vf_num);
+ wr32(hw, lnklst_reg,
+ (last_queue << I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT) |
+ (last_type << I40E_VPINT_LNKLST0_FIRSTQ_TYPE_SHIFT));
+
+ ixl_flush(hw);
+}
+
+static void
+ixl_vf_config_irq_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
+ uint16_t msg_size)
+{
+ struct i40e_virtchnl_irq_map_info *map;
+ struct i40e_virtchnl_vector_map *vector;
+ struct i40e_hw *hw;
+ int i, largest_txq, largest_rxq;
+
+ hw = &pf->hw;
+
+ if (msg_size < sizeof(*map)) {
+ i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
+ I40E_ERR_PARAM);
+ return;
+ }
+
+ map = msg;
+ if (map->num_vectors == 0) {
+ i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
+ I40E_ERR_PARAM);
+ return;
+ }
+
+ if (msg_size != sizeof(*map) + map->num_vectors * sizeof(*vector)) {
+ i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
+ I40E_ERR_PARAM);
+ return;
+ }
+
+ for (i = 0; i < map->num_vectors; i++) {
+ vector = &map->vecmap[i];
+
+ if ((vector->vector_id >= hw->func_caps.num_msix_vectors_vf) ||
+ vector->vsi_id != vf->vsi.vsi_num) {
+ i40e_send_vf_nack(pf, vf,
+ I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP, I40E_ERR_PARAM);
+ return;
+ }
+
+ if (vector->rxq_map != 0) {
+ largest_rxq = fls(vector->rxq_map) - 1;
+ if (largest_rxq >= vf->vsi.num_queues) {
+ i40e_send_vf_nack(pf, vf,
+ I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
+ I40E_ERR_PARAM);
+ return;
+ }
+ }
+
+ if (vector->txq_map != 0) {
+ largest_txq = fls(vector->txq_map) - 1;
+ if (largest_txq >= vf->vsi.num_queues) {
+ i40e_send_vf_nack(pf, vf,
+ I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
+ I40E_ERR_PARAM);
+ return;
+ }
+ }
+
+ if (vector->rxitr_idx > IXL_MAX_ITR_IDX ||
+ vector->txitr_idx > IXL_MAX_ITR_IDX) {
+ i40e_send_vf_nack(pf, vf,
+ I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
+ I40E_ERR_PARAM);
+ return;
+ }
+
+ ixl_vf_config_vector(pf, vf, vector);
+ }
+
+ ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP);
+}
+
+static void
+ixl_vf_enable_queues_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
+ uint16_t msg_size)
+{
+ struct i40e_virtchnl_queue_select *select;
+ int error;
+
+ if (msg_size != sizeof(*select)) {
+ i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ENABLE_QUEUES,
+ I40E_ERR_PARAM);
+ return;
+ }
+
+ select = msg;
+ if (select->vsi_id != vf->vsi.vsi_num ||
+ select->rx_queues == 0 || select->tx_queues == 0) {
+ i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ENABLE_QUEUES,
+ I40E_ERR_PARAM);
+ return;
+ }
+
+ error = ixl_enable_rings(&vf->vsi);
if (error) {
- device_printf(dev, "Error finishing sbuf: %d\n", error);
- sbuf_delete(buf);
- return error;
+ i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ENABLE_QUEUES,
+ I40E_ERR_TIMEOUT);
+ return;
}
- error = sysctl_handle_string(oidp, sbuf_data(buf), sbuf_len(buf), req);
- if (error)
- device_printf(dev, "sysctl error: %d\n", error);
- sbuf_delete(buf);
- return error;
+ ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_ENABLE_QUEUES);
}
-#endif
+static void
+ixl_vf_disable_queues_msg(struct ixl_pf *pf, struct ixl_vf *vf,
+ void *msg, uint16_t msg_size)
+{
+ struct i40e_virtchnl_queue_select *select;
+ int error;
+
+ if (msg_size != sizeof(*select)) {
+ i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_DISABLE_QUEUES,
+ I40E_ERR_PARAM);
+ return;
+ }
+
+ select = msg;
+ if (select->vsi_id != vf->vsi.vsi_num ||
+ select->rx_queues == 0 || select->tx_queues == 0) {
+ i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_DISABLE_QUEUES,
+ I40E_ERR_PARAM);
+ return;
+ }
+
+ error = ixl_disable_rings(&vf->vsi);
+ if (error) {
+ i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_DISABLE_QUEUES,
+ I40E_ERR_TIMEOUT);
+ return;
+ }
+
+ ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_DISABLE_QUEUES);
+}
+
+static boolean_t
+ixl_zero_mac(const uint8_t *addr)
+{
+ uint8_t zero[ETHER_ADDR_LEN] = {0, 0, 0, 0, 0, 0};
+
+ return (cmp_etheraddr(addr, zero));
+}
+
+static boolean_t
+ixl_bcast_mac(const uint8_t *addr)
+{
+
+ return (cmp_etheraddr(addr, ixl_bcast_addr));
+}
+
+static int
+ixl_vf_mac_valid(struct ixl_vf *vf, const uint8_t *addr)
+{
+
+ if (ixl_zero_mac(addr) || ixl_bcast_mac(addr))
+ return (EINVAL);
+
+ /*
+ * If the VF is not allowed to change its MAC address, don't let it
+ * set a MAC filter for an address that is not a multicast address and
+ * is not its assigned MAC.
+ */
+ if (!(vf->vf_flags & VF_FLAG_SET_MAC_CAP) &&
+ !(ETHER_IS_MULTICAST(addr) || cmp_etheraddr(addr, vf->mac)))
+ return (EPERM);
+
+ return (0);
+}
+
+static void
+ixl_vf_add_mac_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
+ uint16_t msg_size)
+{
+ struct i40e_virtchnl_ether_addr_list *addr_list;
+ struct i40e_virtchnl_ether_addr *addr;
+ struct ixl_vsi *vsi;
+ int i;
+ size_t expected_size;
+
+ vsi = &vf->vsi;
+
+ if (msg_size < sizeof(*addr_list)) {
+ i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS,
+ I40E_ERR_PARAM);
+ return;
+ }
+
+ addr_list = msg;
+ expected_size = sizeof(*addr_list) +
+ addr_list->num_elements * sizeof(*addr);
+
+ if (addr_list->num_elements == 0 ||
+ addr_list->vsi_id != vsi->vsi_num ||
+ msg_size != expected_size) {
+ i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS,
+ I40E_ERR_PARAM);
+ return;
+ }
+
+ for (i = 0; i < addr_list->num_elements; i++) {
+ if (ixl_vf_mac_valid(vf, addr_list->list[i].addr) != 0) {
+ i40e_send_vf_nack(pf, vf,
+ I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS, I40E_ERR_PARAM);
+ return;
+ }
+ }
+
+ for (i = 0; i < addr_list->num_elements; i++) {
+ addr = &addr_list->list[i];
+ ixl_add_filter(vsi, addr->addr, IXL_VLAN_ANY);
+ }
+
+ ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS);
+}
+
+static void
+ixl_vf_del_mac_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
+ uint16_t msg_size)
+{
+ struct i40e_virtchnl_ether_addr_list *addr_list;
+ struct i40e_virtchnl_ether_addr *addr;
+ size_t expected_size;
+ int i;
+
+ if (msg_size < sizeof(*addr_list)) {
+ i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS,
+ I40E_ERR_PARAM);
+ return;
+ }
+
+ addr_list = msg;
+ expected_size = sizeof(*addr_list) +
+ addr_list->num_elements * sizeof(*addr);
+
+ if (addr_list->num_elements == 0 ||
+ addr_list->vsi_id != vf->vsi.vsi_num ||
+ msg_size != expected_size) {
+ i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS,
+ I40E_ERR_PARAM);
+ return;
+ }
+
+ for (i = 0; i < addr_list->num_elements; i++) {
+ addr = &addr_list->list[i];
+ if (ixl_zero_mac(addr->addr) || ixl_bcast_mac(addr->addr)) {
+ i40e_send_vf_nack(pf, vf,
+ I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS, I40E_ERR_PARAM);
+ return;
+ }
+ }
+
+ for (i = 0; i < addr_list->num_elements; i++) {
+ addr = &addr_list->list[i];
+ ixl_del_filter(&vf->vsi, addr->addr, IXL_VLAN_ANY);
+ }
+
+ ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS);
+}
+
+static enum i40e_status_code
+ixl_vf_enable_vlan_strip(struct ixl_pf *pf, struct ixl_vf *vf)
+{
+ struct i40e_vsi_context vsi_ctx;
+
+ vsi_ctx.seid = vf->vsi.seid;
+
+ bzero(&vsi_ctx.info, sizeof(vsi_ctx.info));
+ vsi_ctx.info.valid_sections = htole16(I40E_AQ_VSI_PROP_VLAN_VALID);
+ vsi_ctx.info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
+ I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
+ return (i40e_aq_update_vsi_params(&pf->hw, &vsi_ctx, NULL));
+}
+
+static void
+ixl_vf_add_vlan_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
+ uint16_t msg_size)
+{
+ struct i40e_virtchnl_vlan_filter_list *filter_list;
+ enum i40e_status_code code;
+ size_t expected_size;
+ int i;
+
+ if (msg_size < sizeof(*filter_list)) {
+ i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN,
+ I40E_ERR_PARAM);
+ return;
+ }
+
+ filter_list = msg;
+ expected_size = sizeof(*filter_list) +
+ filter_list->num_elements * sizeof(uint16_t);
+ if (filter_list->num_elements == 0 ||
+ filter_list->vsi_id != vf->vsi.vsi_num ||
+ msg_size != expected_size) {
+ i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN,
+ I40E_ERR_PARAM);
+ return;
+ }
+
+ if (!(vf->vf_flags & VF_FLAG_VLAN_CAP)) {
+ i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN,
+ I40E_ERR_PARAM);
+ return;
+ }
+
+ for (i = 0; i < filter_list->num_elements; i++) {
+ if (filter_list->vlan_id[i] > EVL_VLID_MASK) {
+ i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN,
+ I40E_ERR_PARAM);
+ return;
+ }
+ }
+
+ code = ixl_vf_enable_vlan_strip(pf, vf);
+ if (code != I40E_SUCCESS) {
+ i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN,
+ I40E_ERR_PARAM);
+ }
+
+ for (i = 0; i < filter_list->num_elements; i++)
+ ixl_add_filter(&vf->vsi, vf->mac, filter_list->vlan_id[i]);
+
+ ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN);
+}
+
+static void
+ixl_vf_del_vlan_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
+ uint16_t msg_size)
+{
+ struct i40e_virtchnl_vlan_filter_list *filter_list;
+ int i;
+ size_t expected_size;
+
+ if (msg_size < sizeof(*filter_list)) {
+ i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_DEL_VLAN,
+ I40E_ERR_PARAM);
+ return;
+ }
+
+ filter_list = msg;
+ expected_size = sizeof(*filter_list) +
+ filter_list->num_elements * sizeof(uint16_t);
+ if (filter_list->num_elements == 0 ||
+ filter_list->vsi_id != vf->vsi.vsi_num ||
+ msg_size != expected_size) {
+ i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_DEL_VLAN,
+ I40E_ERR_PARAM);
+ return;
+ }
+
+ for (i = 0; i < filter_list->num_elements; i++) {
+ if (filter_list->vlan_id[i] > EVL_VLID_MASK) {
+ i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN,
+ I40E_ERR_PARAM);
+ return;
+ }
+ }
+
+ if (!(vf->vf_flags & VF_FLAG_VLAN_CAP)) {
+ i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN,
+ I40E_ERR_PARAM);
+ return;
+ }
+
+ for (i = 0; i < filter_list->num_elements; i++)
+ ixl_del_filter(&vf->vsi, vf->mac, filter_list->vlan_id[i]);
+
+ ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_DEL_VLAN);
+}
+
+static void
+ixl_vf_config_promisc_msg(struct ixl_pf *pf, struct ixl_vf *vf,
+ void *msg, uint16_t msg_size)
+{
+ struct i40e_virtchnl_promisc_info *info;
+ enum i40e_status_code code;
+
+ if (msg_size != sizeof(*info)) {
+ i40e_send_vf_nack(pf, vf,
+ I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, I40E_ERR_PARAM);
+ return;
+ }
+
+ if (!vf->vf_flags & VF_FLAG_PROMISC_CAP) {
+ i40e_send_vf_nack(pf, vf,
+ I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, I40E_ERR_PARAM);
+ return;
+ }
+
+ info = msg;
+ if (info->vsi_id != vf->vsi.vsi_num) {
+ i40e_send_vf_nack(pf, vf,
+ I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, I40E_ERR_PARAM);
+ return;
+ }
+
+ code = i40e_aq_set_vsi_unicast_promiscuous(&pf->hw, info->vsi_id,
+ info->flags & I40E_FLAG_VF_UNICAST_PROMISC, NULL);
+ if (code != I40E_SUCCESS) {
+ i40e_send_vf_nack(pf, vf,
+ I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, code);
+ return;
+ }
+
+ code = i40e_aq_set_vsi_multicast_promiscuous(&pf->hw, info->vsi_id,
+ info->flags & I40E_FLAG_VF_MULTICAST_PROMISC, NULL);
+ if (code != I40E_SUCCESS) {
+ i40e_send_vf_nack(pf, vf,
+ I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, code);
+ return;
+ }
+
+ ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE);
+}
+
+static void
+ixl_vf_get_stats_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
+ uint16_t msg_size)
+{
+ struct i40e_virtchnl_queue_select *queue;
+
+ if (msg_size != sizeof(*queue)) {
+ i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_GET_STATS,
+ I40E_ERR_PARAM);
+ return;
+ }
+
+ queue = msg;
+ if (queue->vsi_id != vf->vsi.vsi_num) {
+ i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_GET_STATS,
+ I40E_ERR_PARAM);
+ return;
+ }
+
+ ixl_update_eth_stats(&vf->vsi);
+
+ ixl_send_vf_msg(pf, vf, I40E_VIRTCHNL_OP_GET_STATS,
+ I40E_SUCCESS, &vf->vsi.eth_stats, sizeof(vf->vsi.eth_stats));
+}
+
+static void
+ixl_handle_vf_msg(struct ixl_pf *pf, struct i40e_arq_event_info *event)
+{
+ struct ixl_vf *vf;
+ void *msg;
+ uint16_t vf_num, msg_size;
+ uint32_t opcode;
+
+ vf_num = le16toh(event->desc.retval) - pf->hw.func_caps.vf_base_id;
+ opcode = le32toh(event->desc.cookie_high);
+
+ if (vf_num >= pf->num_vfs) {
+ device_printf(pf->dev, "Got msg from illegal VF: %d\n", vf_num);
+ return;
+ }
+
+ vf = &pf->vfs[vf_num];
+ msg = event->msg_buf;
+ msg_size = event->msg_len;
+
+ I40E_VC_DEBUG(pf, ixl_vc_opcode_level(opcode),
+ "Got msg %s(%d) from VF-%d of size %d\n",
+ ixl_vc_opcode_str(opcode), opcode, vf_num, msg_size);
+
+ switch (opcode) {
+ case I40E_VIRTCHNL_OP_VERSION:
+ ixl_vf_version_msg(pf, vf, msg, msg_size);
+ break;
+ case I40E_VIRTCHNL_OP_RESET_VF:
+ ixl_vf_reset_msg(pf, vf, msg, msg_size);
+ break;
+ case I40E_VIRTCHNL_OP_GET_VF_RESOURCES:
+ ixl_vf_get_resources_msg(pf, vf, msg, msg_size);
+ break;
+ case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES:
+ ixl_vf_config_vsi_msg(pf, vf, msg, msg_size);
+ break;
+ case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP:
+ ixl_vf_config_irq_msg(pf, vf, msg, msg_size);
+ break;
+ case I40E_VIRTCHNL_OP_ENABLE_QUEUES:
+ ixl_vf_enable_queues_msg(pf, vf, msg, msg_size);
+ break;
+ case I40E_VIRTCHNL_OP_DISABLE_QUEUES:
+ ixl_vf_disable_queues_msg(pf, vf, msg, msg_size);
+ break;
+ case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS:
+ ixl_vf_add_mac_msg(pf, vf, msg, msg_size);
+ break;
+ case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS:
+ ixl_vf_del_mac_msg(pf, vf, msg, msg_size);
+ break;
+ case I40E_VIRTCHNL_OP_ADD_VLAN:
+ ixl_vf_add_vlan_msg(pf, vf, msg, msg_size);
+ break;
+ case I40E_VIRTCHNL_OP_DEL_VLAN:
+ ixl_vf_del_vlan_msg(pf, vf, msg, msg_size);
+ break;
+ case I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
+ ixl_vf_config_promisc_msg(pf, vf, msg, msg_size);
+ break;
+ case I40E_VIRTCHNL_OP_GET_STATS:
+ ixl_vf_get_stats_msg(pf, vf, msg, msg_size);
+ break;
+
+ /* These two opcodes have been superseded by CONFIG_VSI_QUEUES. */
+ case I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE:
+ case I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE:
+ default:
+ i40e_send_vf_nack(pf, vf, opcode, I40E_ERR_NOT_IMPLEMENTED);
+ break;
+ }
+}
+
+/* Handle any VFs that have reset themselves via a Function Level Reset(FLR). */
+static void
+ixl_handle_vflr(void *arg, int pending)
+{
+ struct ixl_pf *pf;
+ struct i40e_hw *hw;
+ uint16_t global_vf_num;
+ uint32_t vflrstat_index, vflrstat_mask, vflrstat, icr0;
+ int i;
+
+ pf = arg;
+ hw = &pf->hw;
+
+ IXL_PF_LOCK(pf);
+ for (i = 0; i < pf->num_vfs; i++) {
+ global_vf_num = hw->func_caps.vf_base_id + i;
+
+ vflrstat_index = IXL_GLGEN_VFLRSTAT_INDEX(global_vf_num);
+ vflrstat_mask = IXL_GLGEN_VFLRSTAT_MASK(global_vf_num);
+ vflrstat = rd32(hw, I40E_GLGEN_VFLRSTAT(vflrstat_index));
+ if (vflrstat & vflrstat_mask) {
+ wr32(hw, I40E_GLGEN_VFLRSTAT(vflrstat_index),
+ vflrstat_mask);
+
+ ixl_reinit_vf(pf, &pf->vfs[i]);
+ }
+ }
+
+ icr0 = rd32(hw, I40E_PFINT_ICR0_ENA);
+ icr0 |= I40E_PFINT_ICR0_ENA_VFLR_MASK;
+ wr32(hw, I40E_PFINT_ICR0_ENA, icr0);
+ ixl_flush(hw);
+
+ IXL_PF_UNLOCK(pf);
+}
+
+static int
+ixl_adminq_err_to_errno(enum i40e_admin_queue_err err)
+{
+
+ switch (err) {
+ case I40E_AQ_RC_EPERM:
+ return (EPERM);
+ case I40E_AQ_RC_ENOENT:
+ return (ENOENT);
+ case I40E_AQ_RC_ESRCH:
+ return (ESRCH);
+ case I40E_AQ_RC_EINTR:
+ return (EINTR);
+ case I40E_AQ_RC_EIO:
+ return (EIO);
+ case I40E_AQ_RC_ENXIO:
+ return (ENXIO);
+ case I40E_AQ_RC_E2BIG:
+ return (E2BIG);
+ case I40E_AQ_RC_EAGAIN:
+ return (EAGAIN);
+ case I40E_AQ_RC_ENOMEM:
+ return (ENOMEM);
+ case I40E_AQ_RC_EACCES:
+ return (EACCES);
+ case I40E_AQ_RC_EFAULT:
+ return (EFAULT);
+ case I40E_AQ_RC_EBUSY:
+ return (EBUSY);
+ case I40E_AQ_RC_EEXIST:
+ return (EEXIST);
+ case I40E_AQ_RC_EINVAL:
+ return (EINVAL);
+ case I40E_AQ_RC_ENOTTY:
+ return (ENOTTY);
+ case I40E_AQ_RC_ENOSPC:
+ return (ENOSPC);
+ case I40E_AQ_RC_ENOSYS:
+ return (ENOSYS);
+ case I40E_AQ_RC_ERANGE:
+ return (ERANGE);
+ case I40E_AQ_RC_EFLUSHED:
+ return (EINVAL); /* No exact equivalent in errno.h */
+ case I40E_AQ_RC_BAD_ADDR:
+ return (EFAULT);
+ case I40E_AQ_RC_EMODE:
+ return (EPERM);
+ case I40E_AQ_RC_EFBIG:
+ return (EFBIG);
+ default:
+ return (EINVAL);
+ }
+}
+
+static int
+ixl_init_iov(device_t dev, uint16_t num_vfs, const nvlist_t *params)
+{
+ struct ixl_pf *pf;
+ struct i40e_hw *hw;
+ struct ixl_vsi *pf_vsi;
+ enum i40e_status_code ret;
+ int i, error;
+
+ pf = device_get_softc(dev);
+ hw = &pf->hw;
+ pf_vsi = &pf->vsi;
+
+ IXL_PF_LOCK(pf);
+ pf->vfs = malloc(sizeof(struct ixl_vf) * num_vfs, M_IXL, M_NOWAIT |
+ M_ZERO);
+
+ if (pf->vfs == NULL) {
+ error = ENOMEM;
+ goto fail;
+ }
+
+ for (i = 0; i < num_vfs; i++)
+ sysctl_ctx_init(&pf->vfs[i].ctx);
+
+ ret = i40e_aq_add_veb(hw, pf_vsi->uplink_seid, pf_vsi->seid,
+ 1, FALSE, FALSE, &pf->veb_seid, NULL);
+ if (ret != I40E_SUCCESS) {
+ error = ixl_adminq_err_to_errno(hw->aq.asq_last_status);
+ device_printf(dev, "add_veb failed; code=%d error=%d", ret,
+ error);
+ goto fail;
+ }
+
+ ixl_configure_msix(pf);
+ ixl_enable_adminq(hw);
+
+ pf->num_vfs = num_vfs;
+ IXL_PF_UNLOCK(pf);
+ return (0);
+
+fail:
+ free(pf->vfs, M_IXL);
+ pf->vfs = NULL;
+ IXL_PF_UNLOCK(pf);
+ return (error);
+}
+
+static void
+ixl_uninit_iov(device_t dev)
+{
+ struct ixl_pf *pf;
+ struct i40e_hw *hw;
+ struct ixl_vsi *vsi;
+ struct ifnet *ifp;
+ struct ixl_vf *vfs;
+ int i, num_vfs;
+
+ pf = device_get_softc(dev);
+ hw = &pf->hw;
+ vsi = &pf->vsi;
+ ifp = vsi->ifp;
+
+ IXL_PF_LOCK(pf);
+ for (i = 0; i < pf->num_vfs; i++) {
+ if (pf->vfs[i].vsi.seid != 0)
+ i40e_aq_delete_element(hw, pf->vfs[i].vsi.seid, NULL);
+ }
+
+ if (pf->veb_seid != 0) {
+ i40e_aq_delete_element(hw, pf->veb_seid, NULL);
+ pf->veb_seid = 0;
+ }
+
+ if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0)
+ ixl_disable_intr(vsi);
+
+ vfs = pf->vfs;
+ num_vfs = pf->num_vfs;
+
+ pf->vfs = NULL;
+ pf->num_vfs = 0;
+ IXL_PF_UNLOCK(pf);
+
+ /* Do this after the unlock as sysctl_ctx_free might sleep. */
+ for (i = 0; i < num_vfs; i++)
+ sysctl_ctx_free(&vfs[i].ctx);
+ free(vfs, M_IXL);
+}
+
+static int
+ixl_add_vf(device_t dev, uint16_t vfnum, const nvlist_t *params)
+{
+ char sysctl_name[QUEUE_NAME_LEN];
+ struct ixl_pf *pf;
+ struct ixl_vf *vf;
+ const void *mac;
+ size_t size;
+ int error;
+
+ pf = device_get_softc(dev);
+ vf = &pf->vfs[vfnum];
+
+ IXL_PF_LOCK(pf);
+ vf->vf_num = vfnum;
+
+ vf->vsi.back = pf;
+ vf->vf_flags = VF_FLAG_ENABLED;
+ SLIST_INIT(&vf->vsi.ftl);
+
+ error = ixl_vf_setup_vsi(pf, vf);
+ if (error != 0)
+ goto out;
+
+ if (nvlist_exists_binary(params, "mac-addr")) {
+ mac = nvlist_get_binary(params, "mac-addr", &size);
+ bcopy(mac, vf->mac, ETHER_ADDR_LEN);
+
+ if (nvlist_get_bool(params, "allow-set-mac"))
+ vf->vf_flags |= VF_FLAG_SET_MAC_CAP;
+ } else
+ /*
+ * If the administrator has not specified a MAC address then
+ * we must allow the VF to choose one.
+ */
+ vf->vf_flags |= VF_FLAG_SET_MAC_CAP;
+
+ if (nvlist_get_bool(params, "mac-anti-spoof"))
+ vf->vf_flags |= VF_FLAG_MAC_ANTI_SPOOF;
+
+ if (nvlist_get_bool(params, "allow-promisc"))
+ vf->vf_flags |= VF_FLAG_PROMISC_CAP;
+
+ vf->vf_flags |= VF_FLAG_VLAN_CAP;
+
+ ixl_reset_vf(pf, vf);
+out:
+ IXL_PF_UNLOCK(pf);
+ if (error == 0) {
+ snprintf(sysctl_name, sizeof(sysctl_name), "vf%d", vfnum);
+ ixl_add_vsi_sysctls(pf, &vf->vsi, &vf->ctx, sysctl_name);
+ }
+
+ return (error);
+}
+#endif /* PCI_IOV */
diff --git a/sys/dev/ixl/if_ixlv.c b/sys/dev/ixl/if_ixlv.c
index 16599e4..2e041df 100644
--- a/sys/dev/ixl/if_ixlv.c
+++ b/sys/dev/ixl/if_ixlv.c
@@ -1,6 +1,6 @@
/******************************************************************************
- Copyright (c) 2013-2014, Intel Corporation
+ Copyright (c) 2013-2015, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -32,15 +32,22 @@
******************************************************************************/
/*$FreeBSD$*/
+#ifndef IXL_STANDALONE_BUILD
#include "opt_inet.h"
#include "opt_inet6.h"
+#endif
+
#include "ixl.h"
#include "ixlv.h"
+#ifdef RSS
+#include <net/rss_config.h>
+#endif
+
/*********************************************************************
* Driver version
*********************************************************************/
-char ixlv_driver_version[] = "1.1.18";
+char ixlv_driver_version[] = "1.2.6";
/*********************************************************************
* PCI Device ID Table
@@ -390,7 +397,7 @@ ixlv_attach(device_t dev)
vsi->id = sc->vsi_res->vsi_id;
vsi->back = (void *)sc;
- vsi->link_up = TRUE;
+ sc->link_up = TRUE;
/* This allocates the memory and early settings */
if (ixlv_setup_queues(sc) != 0) {
@@ -472,7 +479,7 @@ ixlv_detach(device_t dev)
/* Make sure VLANS are not using driver */
if (vsi->ifp->if_vlantrunk != NULL) {
- device_printf(dev, "Vlan in use, detach first\n");
+ if_printf(vsi->ifp, "Vlan in use, detach first\n");
INIT_DBG_DEV(dev, "end");
return (EBUSY);
}
@@ -885,7 +892,7 @@ ixlv_init_locked(struct ixlv_sc *sc)
ixl_init_tx_ring(que);
- if (vsi->max_frame_size <= 2048)
+ if (vsi->max_frame_size <= MCLBYTES)
rxr->mbuf_sz = MCLBYTES;
else
rxr->mbuf_sz = MJUMPAGESIZE;
@@ -1161,7 +1168,11 @@ ixlv_init_msix(struct ixlv_sc *sc)
/* Override with hardcoded value if sane */
if ((ixlv_max_queues != 0) && (ixlv_max_queues <= queues))
queues = ixlv_max_queues;
-
+#ifdef RSS
+ /* If we're doing RSS, clamp at the number of RSS buckets */
+ if (queues > rss_getnumbuckets())
+ queues = rss_getnumbuckets();
+#endif
/* Enforce the VF max value */
if (queues > IXLV_MAX_QUEUES)
queues = IXLV_MAX_QUEUES;
@@ -1181,6 +1192,26 @@ ixlv_init_msix(struct ixlv_sc *sc)
goto fail;
}
+#ifdef RSS
+ /*
+ * If we're doing RSS, the number of queues needs to
+ * match the number of RSS buckets that are configured.
+ *
+ * + If there's more queues than RSS buckets, we'll end
+ * up with queues that get no traffic.
+ *
+ * + If there's more RSS buckets than queues, we'll end
+ * up having multiple RSS buckets map to the same queue,
+ * so there'll be some contention.
+ */
+ if (queues != rss_getnumbuckets()) {
+ device_printf(dev,
+ "%s: queues (%d) != RSS buckets (%d)"
+ "; performance will be impacted.\n",
+ __func__, queues, rss_getnumbuckets());
+ }
+#endif
+
if (pci_alloc_msix(dev, &vectors) == 0) {
device_printf(sc->dev,
"Using MSIX interrupts with %d vectors\n", vectors);
@@ -1352,6 +1383,7 @@ ixlv_assign_msix(struct ixlv_sc *sc)
int error, rid, vector = 1;
for (int i = 0; i < vsi->num_queues; i++, vector++, que++) {
+ int cpu_id = i;
rid = vector + 1;
txr = &que->txr;
que->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
@@ -1372,15 +1404,25 @@ ixlv_assign_msix(struct ixlv_sc *sc)
}
bus_describe_intr(dev, que->res, que->tag, "que %d", i);
/* Bind the vector to a CPU */
- bus_bind_intr(dev, que->res, i);
+#ifdef RSS
+ cpu_id = rss_getcpu(i % rss_getnumbuckets());
+#endif
+ bus_bind_intr(dev, que->res, cpu_id);
que->msix = vector;
- vsi->que_mask |= (u64)(1 << que->msix);
+ vsi->que_mask |= (u64)(1 << que->msix);
TASK_INIT(&que->tx_task, 0, ixl_deferred_mq_start, que);
TASK_INIT(&que->task, 0, ixlv_handle_que, que);
que->tq = taskqueue_create_fast("ixlv_que", M_NOWAIT,
taskqueue_thread_enqueue, &que->tq);
- taskqueue_start_threads(&que->tq, 1, PI_NET, "%s que",
- device_get_nameunit(sc->dev));
+#ifdef RSS
+ taskqueue_start_threads_pinned(&que->tq, 1, PI_NET,
+ cpu_id, "%s (bucket %d)",
+ device_get_nameunit(dev), cpu_id);
+#else
+ taskqueue_start_threads(&que->tq, 1, PI_NET,
+ "%s que", device_get_nameunit(dev));
+#endif
+
}
return (0);
@@ -1671,12 +1713,12 @@ early:
static void
ixlv_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
{
- struct ixl_vsi *vsi = ifp->if_softc;
+ struct ixl_vsi *vsi = arg;
struct ixlv_sc *sc = vsi->back;
struct ixlv_vlan_filter *v;
- if (ifp->if_softc != arg) /* Not our event */
+ if (ifp->if_softc != arg) /* Not our event */
return;
if ((vtag == 0) || (vtag > 4095)) /* Invalid */
@@ -1708,12 +1750,12 @@ ixlv_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
static void
ixlv_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
{
- struct ixl_vsi *vsi = ifp->if_softc;
+ struct ixl_vsi *vsi = arg;
struct ixlv_sc *sc = vsi->back;
struct ixlv_vlan_filter *v;
- int i = 0;
+ int i = 0;
- if (ifp->if_softc != arg)
+ if (ifp->if_softc != arg)
return;
if ((vtag == 0) || (vtag > 4095)) /* Invalid */
@@ -1779,53 +1821,15 @@ ixlv_msix_adminq(void *arg)
{
struct ixlv_sc *sc = arg;
struct i40e_hw *hw = &sc->hw;
- device_t dev = sc->dev;
- u32 reg, mask, oldreg;
+ u32 reg, mask;
reg = rd32(hw, I40E_VFINT_ICR01);
mask = rd32(hw, I40E_VFINT_ICR0_ENA1);
reg = rd32(hw, I40E_VFINT_DYN_CTL01);
- reg |= I40E_PFINT_DYN_CTL0_CLEARPBA_MASK;
+ reg |= I40E_VFINT_DYN_CTL01_CLEARPBA_MASK;
wr32(hw, I40E_VFINT_DYN_CTL01, reg);
- /* check for Admin queue errors */
- oldreg = reg = rd32(hw, hw->aq.arq.len);
- if (reg & I40E_VF_ARQLEN_ARQVFE_MASK) {
- device_printf(dev, "ARQ VF Error detected\n");
- reg &= ~I40E_VF_ARQLEN_ARQVFE_MASK;
- }
- if (reg & I40E_VF_ARQLEN_ARQOVFL_MASK) {
- device_printf(dev, "ARQ Overflow Error detected\n");
- reg &= ~I40E_VF_ARQLEN_ARQOVFL_MASK;
- }
- if (reg & I40E_VF_ARQLEN_ARQCRIT_MASK) {
- device_printf(dev, "ARQ Critical Error detected\n");
- reg &= ~I40E_VF_ARQLEN_ARQCRIT_MASK;
- }
- if (oldreg != reg)
- wr32(hw, hw->aq.arq.len, reg);
-
- oldreg = reg = rd32(hw, hw->aq.asq.len);
- if (reg & I40E_VF_ATQLEN_ATQVFE_MASK) {
- device_printf(dev, "ASQ VF Error detected\n");
- reg &= ~I40E_VF_ATQLEN_ATQVFE_MASK;
- }
- if (reg & I40E_VF_ATQLEN_ATQOVFL_MASK) {
- device_printf(dev, "ASQ Overflow Error detected\n");
- reg &= ~I40E_VF_ATQLEN_ATQOVFL_MASK;
- }
- if (reg & I40E_VF_ATQLEN_ATQCRIT_MASK) {
- device_printf(dev, "ASQ Critical Error detected\n");
- reg &= ~I40E_VF_ATQLEN_ATQCRIT_MASK;
- }
- if (oldreg != reg)
- wr32(hw, hw->aq.asq.len, reg);
-
- /* re-enable interrupt causes */
- wr32(hw, I40E_VFINT_ICR0_ENA1, mask);
- wr32(hw, I40E_VFINT_DYN_CTL01, I40E_VFINT_DYN_CTL01_INTENA_MASK);
-
/* schedule task */
taskqueue_enqueue(sc->tq, &sc->aq_irq);
return;
@@ -1870,7 +1874,7 @@ ixlv_enable_adminq_irq(struct i40e_hw *hw)
wr32(hw, I40E_VFINT_DYN_CTL01,
I40E_VFINT_DYN_CTL01_INTENA_MASK |
I40E_VFINT_DYN_CTL01_ITR_INDX_MASK);
- wr32(hw, I40E_VFINT_ICR0_ENA1, I40E_VFINT_ICR0_ENA_ADMINQ_MASK);
+ wr32(hw, I40E_VFINT_ICR0_ENA1, I40E_VFINT_ICR0_ENA1_ADMINQ_MASK);
/* flush */
rd32(hw, I40E_VFGEN_RSTAT);
return;
@@ -1882,7 +1886,7 @@ ixlv_enable_queue_irq(struct i40e_hw *hw, int id)
u32 reg;
reg = I40E_VFINT_DYN_CTLN1_INTENA_MASK |
- I40E_VFINT_DYN_CTLN_CLEARPBA_MASK;
+ I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK;
wr32(hw, I40E_VFINT_DYN_CTLN1(id), reg);
}
@@ -2145,7 +2149,7 @@ ixlv_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
ifmr->ifm_status = IFM_AVALID;
ifmr->ifm_active = IFM_ETHER;
- if (!vsi->link_up) {
+ if (!sc->link_up) {
mtx_unlock(&sc->mtx);
INIT_DBG_IF(ifp, "end: link not up");
return;
@@ -2366,8 +2370,8 @@ ixlv_local_timer(void *arg)
/*
** Check status on the queues for a hang
*/
- mask = (I40E_VFINT_DYN_CTLN_INTENA_MASK |
- I40E_VFINT_DYN_CTLN_SWINT_TRIG_MASK);
+ mask = (I40E_VFINT_DYN_CTLN1_INTENA_MASK |
+ I40E_VFINT_DYN_CTLN1_SWINT_TRIG_MASK);
for (int i = 0; i < vsi->num_queues; i++,que++) {
/* Any queues with outstanding work get a sw irq */
@@ -2386,7 +2390,7 @@ ixlv_local_timer(void *arg)
} else {
/* Check if we've come back from hung */
if ((vsi->active_queues & ((u64)1 << que->me)) == 0)
- vsi->active_queues |= ((u64)1 << que->me);
+ vsi->active_queues |= ((u64)1 << que->me);
}
if (que->busy >= IXL_MAX_TX_BUSY) {
device_printf(dev,"Warning queue %d "
@@ -2417,20 +2421,19 @@ ixlv_update_link_status(struct ixlv_sc *sc)
{
struct ixl_vsi *vsi = &sc->vsi;
struct ifnet *ifp = vsi->ifp;
- device_t dev = sc->dev;
- if (vsi->link_up){
+ if (sc->link_up){
if (vsi->link_active == FALSE) {
if (bootverbose)
- device_printf(dev,"Link is Up, %d Gbps\n",
- (vsi->link_speed == I40E_LINK_SPEED_40GB) ? 40:10);
+ if_printf(ifp,"Link is Up, %d Gbps\n",
+ (sc->link_speed == I40E_LINK_SPEED_40GB) ? 40:10);
vsi->link_active = TRUE;
if_link_state_change(ifp, LINK_STATE_UP);
}
} else { /* Link down */
if (vsi->link_active == TRUE) {
if (bootverbose)
- device_printf(dev,"Link is Down\n");
+ if_printf(ifp,"Link is Down\n");
if_link_state_change(ifp, LINK_STATE_DOWN);
vsi->link_active = FALSE;
}
@@ -2521,16 +2524,18 @@ ixlv_config_rss(struct ixlv_sc *sc)
struct i40e_hw *hw = &sc->hw;
struct ixl_vsi *vsi = &sc->vsi;
u32 lut = 0;
- u64 set_hena, hena;
- int i, j;
-
- /* set up random bits */
- static const u32 seed[I40E_VFQF_HKEY_MAX_INDEX + 1] = {
- 0x794221b4, 0xbca0c5ab, 0x6cd5ebd9, 0x1ada6127,
- 0x983b3aa1, 0x1c4e71eb, 0x7f6328b2, 0xfcdc0da0,
- 0xc135cafa, 0x7a6f7e2d, 0xe7102d28, 0x163cd12e,
- 0x4954b126 };
-
+ u64 set_hena = 0, hena;
+ int i, j, que_id;
+#ifdef RSS
+ u32 rss_hash_config;
+ u32 rss_seed[IXL_KEYSZ];
+#else
+ u32 rss_seed[IXL_KEYSZ] = {0x41b01687,
+ 0x183cfd8c, 0xce880440, 0x580cbc3c,
+ 0x35897377, 0x328b25e1, 0x4fa98922,
+ 0xb7d90c14, 0xd5bad70d, 0xcd15a2c1};
+#endif
+
/* Don't set up RSS if using a single queue */
if (vsi->num_queues == 1) {
wr32(hw, I40E_VFQF_HENA(0), 0);
@@ -2539,11 +2544,32 @@ ixlv_config_rss(struct ixlv_sc *sc)
return;
}
+#ifdef RSS
+ /* Fetch the configured RSS key */
+ rss_getkey((uint8_t *) &rss_seed);
+#endif
/* Fill out hash function seed */
- for (i = 0; i <= I40E_VFQF_HKEY_MAX_INDEX; i++)
- wr32(hw, I40E_VFQF_HKEY(i), seed[i]);
+ for (i = 0; i <= IXL_KEYSZ; i++)
+ wr32(hw, I40E_VFQF_HKEY(i), rss_seed[i]);
/* Enable PCTYPES for RSS: */
+#ifdef RSS
+ rss_hash_config = rss_gethashconfig();
+ if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
+ set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER);
+ if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
+ set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
+ if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
+ set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP);
+ if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
+ set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER);
+ if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
+ set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6);
+ if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
+ set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
+ if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
+ set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP);
+#else
set_hena =
((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP) |
@@ -2556,7 +2582,7 @@ ixlv_config_rss(struct ixlv_sc *sc)
((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) |
((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6) |
((u64)1 << I40E_FILTER_PCTYPE_L2_PAYLOAD);
-
+#endif
hena = (u64)rd32(hw, I40E_VFQF_HENA(0)) |
((u64)rd32(hw, I40E_VFQF_HENA(1)) << 32);
hena |= set_hena;
@@ -2564,16 +2590,26 @@ ixlv_config_rss(struct ixlv_sc *sc)
wr32(hw, I40E_VFQF_HENA(1), (u32)(hena >> 32));
/* Populate the LUT with max no. of queues in round robin fashion */
- for (i = 0, j = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; j++) {
+ for (i = 0, j = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++, j++) {
if (j == vsi->num_queues)
j = 0;
+#ifdef RSS
+ /*
+ * Fetch the RSS bucket id for the given indirection entry.
+ * Cap it at the number of configured buckets (which is
+ * num_queues.)
+ */
+ que_id = rss_get_indirection_to_bucket(i);
+ que_id = que_id % vsi->num_queues;
+#else
+ que_id = j;
+#endif
/* lut = 4-byte sliding window of 4 lut entries */
- lut = (lut << 8) | (j & 0xF);
+ lut = (lut << 8) | (que_id & 0xF);
/* On i = 3, we have 4 entries in lut; write to the register */
- if ((j & 3) == 3) {
+ if ((i & 3) == 3) {
wr32(hw, I40E_VFQF_HLUT(i), lut);
DDPRINTF(sc->dev, "HLUT(%2d): %#010x", i, lut);
- i++;
}
}
ixl_flush(hw);
@@ -2615,7 +2651,6 @@ static int
ixlv_add_mac_filter(struct ixlv_sc *sc, u8 *macaddr, u16 flags)
{
struct ixlv_mac_filter *f;
- device_t dev = sc->dev;
/* Does one already exist? */
f = ixlv_find_mac_filter(sc, macaddr);
@@ -2628,7 +2663,7 @@ ixlv_add_mac_filter(struct ixlv_sc *sc, u8 *macaddr, u16 flags)
/* If not, get a new empty filter */
f = ixlv_get_mac_filter(sc);
if (f == NULL) {
- device_printf(dev, "%s: no filters available!!\n",
+ if_printf(sc->vsi.ifp, "%s: no filters available!!\n",
__func__);
return (ENOMEM);
}
@@ -2679,8 +2714,10 @@ ixlv_do_adminq_locked(struct ixlv_sc *sc)
struct i40e_hw *hw = &sc->hw;
struct i40e_arq_event_info event;
struct i40e_virtchnl_msg *v_msg;
- i40e_status ret;
+ device_t dev = sc->dev;
u16 result = 0;
+ u32 reg, oldreg;
+ i40e_status ret;
IXLV_CORE_LOCK_ASSERT(sc);
@@ -2698,6 +2735,39 @@ ixlv_do_adminq_locked(struct ixlv_sc *sc)
bzero(event.msg_buf, IXL_AQ_BUF_SZ);
} while (result);
+ /* check for Admin queue errors */
+ oldreg = reg = rd32(hw, hw->aq.arq.len);
+ if (reg & I40E_VF_ARQLEN1_ARQVFE_MASK) {
+ device_printf(dev, "ARQ VF Error detected\n");
+ reg &= ~I40E_VF_ARQLEN1_ARQVFE_MASK;
+ }
+ if (reg & I40E_VF_ARQLEN1_ARQOVFL_MASK) {
+ device_printf(dev, "ARQ Overflow Error detected\n");
+ reg &= ~I40E_VF_ARQLEN1_ARQOVFL_MASK;
+ }
+ if (reg & I40E_VF_ARQLEN1_ARQCRIT_MASK) {
+ device_printf(dev, "ARQ Critical Error detected\n");
+ reg &= ~I40E_VF_ARQLEN1_ARQCRIT_MASK;
+ }
+ if (oldreg != reg)
+ wr32(hw, hw->aq.arq.len, reg);
+
+ oldreg = reg = rd32(hw, hw->aq.asq.len);
+ if (reg & I40E_VF_ATQLEN1_ATQVFE_MASK) {
+ device_printf(dev, "ASQ VF Error detected\n");
+ reg &= ~I40E_VF_ATQLEN1_ATQVFE_MASK;
+ }
+ if (reg & I40E_VF_ATQLEN1_ATQOVFL_MASK) {
+ device_printf(dev, "ASQ Overflow Error detected\n");
+ reg &= ~I40E_VF_ATQLEN1_ATQOVFL_MASK;
+ }
+ if (reg & I40E_VF_ATQLEN1_ATQCRIT_MASK) {
+ device_printf(dev, "ASQ Critical Error detected\n");
+ reg &= ~I40E_VF_ATQLEN1_ATQCRIT_MASK;
+ }
+ if (oldreg != reg)
+ wr32(hw, hw->aq.asq.len, reg);
+
ixlv_enable_adminq_irq(hw);
}
@@ -2759,7 +2829,7 @@ ixlv_add_sysctls(struct ixlv_sc *sc)
struct ixl_sysctl_info *entry = ctls;
while (entry->stat != 0)
{
- SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, entry->name,
+ SYSCTL_ADD_QUAD(ctx, child, OID_AUTO, entry->name,
CTLFLAG_RD, entry->stat,
entry->description);
entry++;
@@ -2775,34 +2845,34 @@ ixlv_add_sysctls(struct ixlv_sc *sc)
txr = &(queues[q].txr);
rxr = &(queues[q].rxr);
- SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "mbuf_defrag_failed",
+ SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "mbuf_defrag_failed",
CTLFLAG_RD, &(queues[q].mbuf_defrag_failed),
"m_defrag() failed");
- SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "dropped",
+ SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "dropped",
CTLFLAG_RD, &(queues[q].dropped_pkts),
"Driver dropped packets");
- SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs",
+ SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "irqs",
CTLFLAG_RD, &(queues[q].irqs),
"irqs on this queue");
- SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tso_tx",
+ SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "tso_tx",
CTLFLAG_RD, &(queues[q].tso),
"TSO");
- SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_dma_setup",
+ SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "tx_dma_setup",
CTLFLAG_RD, &(queues[q].tx_dma_setup),
"Driver tx dma failure in xmit");
- SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "no_desc_avail",
+ SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "no_desc_avail",
CTLFLAG_RD, &(txr->no_desc),
"Queue No Descriptor Available");
- SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets",
+ SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "tx_packets",
CTLFLAG_RD, &(txr->total_packets),
"Queue Packets Transmitted");
- SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_bytes",
+ SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "tx_bytes",
CTLFLAG_RD, &(txr->tx_bytes),
"Queue Bytes Transmitted");
- SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_packets",
+ SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "rx_packets",
CTLFLAG_RD, &(rxr->rx_packets),
"Queue Packets Received");
- SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes",
+ SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "rx_bytes",
CTLFLAG_RD, &(rxr->rx_bytes),
"Queue Bytes Received");
diff --git a/sys/dev/ixl/ixl.h b/sys/dev/ixl/ixl.h
index 2b640ce..df8f04f 100644
--- a/sys/dev/ixl/ixl.h
+++ b/sys/dev/ixl/ixl.h
@@ -1,6 +1,6 @@
/******************************************************************************
- Copyright (c) 2013-2014, Intel Corporation
+ Copyright (c) 2013-2015, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -90,10 +90,15 @@
#include <sys/smp.h>
#include <machine/smp.h>
+#ifdef PCI_IOV
+#include <sys/nv.h>
+#include <sys/iov_schema.h>
+#endif
+
#include "i40e_type.h"
#include "i40e_prototype.h"
-#ifdef IXL_DEBUG
+#if defined(IXL_DEBUG) || defined(IXL_DEBUG_SYSCTL)
#include <sys/sbuf.h>
#define MAC_FORMAT "%02x:%02x:%02x:%02x:%02x:%02x"
@@ -101,7 +106,13 @@
(mac_addr)[0], (mac_addr)[1], (mac_addr)[2], (mac_addr)[3], \
(mac_addr)[4], (mac_addr)[5]
#define ON_OFF_STR(is_set) ((is_set) ? "On" : "Off")
+#endif /* IXL_DEBUG || IXL_DEBUG_SYSCTL */
+#ifdef IXL_DEBUG
+/* Enable debug sysctls */
+#ifndef IXL_DEBUG_SYSCTL
+#define IXL_DEBUG_SYSCTL 1
+#endif
#define _DBG_PRINTF(S, ...) printf("%s: " S "\n", __func__, ##__VA_ARGS__)
#define _DEV_DBG_PRINTF(dev, S, ...) device_printf(dev, "%s: " S "\n", __func__, ##__VA_ARGS__)
@@ -128,7 +139,7 @@
#define HW_DEBUGOUT(...) if (DEBUG_HW) _DBG_PRINTF(__VA_ARGS__)
-#else
+#else /* no IXL_DEBUG */
#define DEBUG_INIT 0
#define DEBUG_IOCTL 0
#define DEBUG_HW 0
@@ -144,7 +155,7 @@
#define IOCTL_DBG_IF2(...)
#define IOCTL_DBG_IF(...)
#define HW_DEBUGOUT(...)
-#endif
+#endif /* IXL_DEBUG */
/* Tunables */
@@ -202,7 +213,9 @@
#define IXL_TX_BUF_SZ ((u32) 1514)
#define IXL_AQ_BUF_SZ ((u32) 4096)
#define IXL_RX_HDR 128
+/* Controls the length of the Admin Queue */
#define IXL_AQ_LEN 256
+#define IXL_AQ_LEN_MAX 1024
#define IXL_AQ_BUFSZ 4096
#define IXL_RX_LIMIT 512
#define IXL_RX_ITR 0
@@ -214,6 +227,11 @@
#define IXL_MAX_TSO_SEGS 66
#define IXL_SPARSE_CHAIN 6
#define IXL_QUEUE_HUNG 0x80000000
+#define IXL_KEYSZ 10
+
+#define IXL_VF_MAX_BUFFER 0x3F80
+#define IXL_VF_MAX_HDR_BUFFER 0x840
+#define IXL_VF_MAX_FRAME 0x3FFF
/* ERJ: hardware can support ~1.5k filters between all functions */
#define IXL_MAX_FILTERS 256
@@ -256,6 +274,35 @@
#define IXL_FLAGS_KEEP_TSO4 (1 << 0)
#define IXL_FLAGS_KEEP_TSO6 (1 << 1)
+#define IXL_VF_RESET_TIMEOUT 100
+
+#define IXL_VSI_DATA_PORT 0x01
+
+#define IXLV_MAX_QUEUES 16
+#define IXL_MAX_VSI_QUEUES (2 * (I40E_VSILAN_QTABLE_MAX_INDEX + 1))
+
+#define IXL_RX_CTX_BASE_UNITS 128
+#define IXL_TX_CTX_BASE_UNITS 128
+
+#define IXL_VPINT_LNKLSTN_REG(hw, vector, vf_num) \
+ I40E_VPINT_LNKLSTN(((vector) - 1) + \
+ (((hw)->func_caps.num_msix_vectors_vf - 1) * (vf_num)))
+
+#define IXL_VFINT_DYN_CTLN_REG(hw, vector, vf_num) \
+ I40E_VFINT_DYN_CTLN(((vector) - 1) + \
+ (((hw)->func_caps.num_msix_vectors_vf - 1) * (vf_num)))
+
+#define IXL_PF_PCI_CIAA_VF_DEVICE_STATUS 0xAA
+
+#define IXL_PF_PCI_CIAD_VF_TRANS_PENDING_MASK 0x20
+
+#define IXL_GLGEN_VFLRSTAT_INDEX(glb_vf) ((glb_vf) / 32)
+#define IXL_GLGEN_VFLRSTAT_MASK(glb_vf) (1 << ((glb_vf) % 32))
+
+#define IXL_MAX_ITR_IDX 3
+
+#define IXL_END_OF_INTR_LNKLST 0x7FF
+
#define IXL_TX_LOCK(_sc) mtx_lock(&(_sc)->mtx)
#define IXL_TX_UNLOCK(_sc) mtx_unlock(&(_sc)->mtx)
#define IXL_TX_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->mtx)
@@ -266,7 +313,7 @@
#define IXL_RX_UNLOCK(_sc) mtx_unlock(&(_sc)->mtx)
#define IXL_RX_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->mtx)
-#if __FreeBSD_version >= 1100000
+#if __FreeBSD_version >= 1100036
#define IXL_SET_IPACKETS(vsi, count) (vsi)->ipackets = (count)
#define IXL_SET_IERRORS(vsi, count) (vsi)->ierrors = (count)
#define IXL_SET_OPACKETS(vsi, count) (vsi)->opackets = (count)
@@ -452,20 +499,22 @@ struct ixl_vsi {
struct ifmedia media;
u64 que_mask;
int id;
+ u16 vsi_num;
u16 msix_base; /* station base MSIX vector */
+ u16 first_queue;
u16 num_queues;
u16 rx_itr_setting;
u16 tx_itr_setting;
struct ixl_queue *queues; /* head of queues */
bool link_active;
u16 seid;
+ u16 uplink_seid;
+ u16 downlink_seid;
u16 max_frame_size;
- u32 link_speed;
- bool link_up;
- u32 fc; /* local flow ctrl setting */
/* MAC/VLAN Filter list */
struct ixl_ftl_head ftl;
+ u16 num_macs;
struct i40e_aqc_vsi_properties_data info;
@@ -497,6 +546,7 @@ struct ixl_vsi {
/* Misc. */
u64 active_queues;
u64 flags;
+ struct sysctl_oid *vsi_node;
};
/*
@@ -535,7 +585,7 @@ ixl_get_filter(struct ixl_vsi *vsi)
** Compare two ethernet addresses
*/
static inline bool
-cmp_etheraddr(u8 *ea1, u8 *ea2)
+cmp_etheraddr(const u8 *ea1, const u8 *ea2)
{
bool cmp = FALSE;
diff --git a/sys/dev/ixl/ixl_pf.h b/sys/dev/ixl/ixl_pf.h
index 055c54f..4f0bdda 100644
--- a/sys/dev/ixl/ixl_pf.h
+++ b/sys/dev/ixl/ixl_pf.h
@@ -1,6 +1,6 @@
/******************************************************************************
- Copyright (c) 2013-2014, Intel Corporation
+ Copyright (c) 2013-2015, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -36,6 +36,22 @@
#ifndef _IXL_PF_H_
#define _IXL_PF_H_
+#define VF_FLAG_ENABLED 0x01
+#define VF_FLAG_SET_MAC_CAP 0x02
+#define VF_FLAG_VLAN_CAP 0x04
+#define VF_FLAG_PROMISC_CAP 0x08
+#define VF_FLAG_MAC_ANTI_SPOOF 0x10
+
+struct ixl_vf {
+ struct ixl_vsi vsi;
+ uint32_t vf_flags;
+
+ uint8_t mac[ETHER_ADDR_LEN];
+ uint16_t vf_num;
+
+ struct sysctl_ctx_list ctx;
+};
+
/* Physical controller structure */
struct ixl_pf {
struct i40e_hw hw;
@@ -64,14 +80,18 @@ struct ixl_pf {
struct task adminq;
struct taskqueue *tq;
+ bool link_up;
+ u32 link_speed;
int advertised_speed;
+ int fc; /* local flow ctrl setting */
/*
- ** VSI - Stations:
+ ** Network interfaces
** These are the traffic class holders, and
** will have a stack interface and queues
** associated with them.
- ** NOTE: for now using just one, so embed it.
+ ** NOTE: The PF has only a single interface,
+ ** so it is embedded in the PF struct.
*/
struct ixl_vsi vsi;
@@ -83,8 +103,31 @@ struct ixl_pf {
struct i40e_hw_port_stats stats;
struct i40e_hw_port_stats stats_offsets;
bool stat_offsets_loaded;
+
+ struct ixl_vf *vfs;
+ int num_vfs;
+ uint16_t veb_seid;
+ struct task vflr_task;
+ int vc_debug_lvl;
};
+#define IXL_SET_ADVERTISE_HELP \
+"Control link advertise speed:\n" \
+"\tFlags:\n" \
+"\t\t0x1 - advertise 100 Mb\n" \
+"\t\t0x2 - advertise 1G\n" \
+"\t\t0x4 - advertise 10G\n" \
+"\t\t0x8 - advertise 20G\n\n" \
+"\tDoes not work on 40G devices."
+
+#define I40E_VC_DEBUG(pf, level, ...) \
+ do { \
+ if ((pf)->vc_debug_lvl >= (level)) \
+ device_printf((pf)->dev, __VA_ARGS__); \
+ } while (0)
+
+#define i40e_send_vf_nack(pf, vf, op, st) \
+ ixl_send_vf_nack_msg((pf), (vf), (op), (st), __FILE__, __LINE__)
#define IXL_PF_LOCK_INIT(_sc, _name) \
mtx_init(&(_sc)->pf_mtx, _name, "IXL PF Lock", MTX_DEF)
diff --git a/sys/dev/ixl/ixl_txrx.c b/sys/dev/ixl/ixl_txrx.c
index ea44ccb..830765a 100755..100644
--- a/sys/dev/ixl/ixl_txrx.c
+++ b/sys/dev/ixl/ixl_txrx.c
@@ -1,6 +1,6 @@
/******************************************************************************
- Copyright (c) 2013-2014, Intel Corporation
+ Copyright (c) 2013-2015, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -38,10 +38,17 @@
** both the BASE and the VF drivers.
*/
+#ifndef IXL_STANDALONE_BUILD
#include "opt_inet.h"
#include "opt_inet6.h"
+#endif
+
#include "ixl.h"
+#ifdef RSS
+#include <net/rss_config.h>
+#endif
+
/* Local Prototypes */
static void ixl_rx_checksum(struct mbuf *, u32, u32, u8);
static void ixl_refresh_mbufs(struct ixl_queue *, int);
@@ -54,9 +61,12 @@ static __inline void ixl_rx_discard(struct rx_ring *, int);
static __inline void ixl_rx_input(struct rx_ring *, struct ifnet *,
struct mbuf *, u8);
+#ifdef DEV_NETMAP
+#include <dev/netmap/if_ixl_netmap.h>
+#endif /* DEV_NETMAP */
+
/*
** Multiqueue Transmit driver
-**
*/
int
ixl_mq_start(struct ifnet *ifp, struct mbuf *m)
@@ -65,14 +75,33 @@ ixl_mq_start(struct ifnet *ifp, struct mbuf *m)
struct ixl_queue *que;
struct tx_ring *txr;
int err, i;
+#ifdef RSS
+ u32 bucket_id;
+#endif
- /* check if flowid is set */
- if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE)
- i = m->m_pkthdr.flowid % vsi->num_queues;
- else
+ /*
+ ** Which queue to use:
+ **
+ ** When doing RSS, map it to the same outbound
+ ** queue as the incoming flow would be mapped to.
+ ** If everything is setup correctly, it should be
+ ** the same bucket that the current CPU we're on is.
+ */
+ if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) {
+#ifdef RSS
+ if (rss_hash2bucket(m->m_pkthdr.flowid,
+ M_HASHTYPE_GET(m), &bucket_id) == 0) {
+ i = bucket_id % vsi->num_queues;
+ } else
+#endif
+ i = m->m_pkthdr.flowid % vsi->num_queues;
+ } else
i = curcpu % vsi->num_queues;
-
- /* Check for a hung queue and pick alternative */
+ /*
+ ** This may not be perfect, but until something
+ ** better comes along it will keep from scheduling
+ ** on stalled queues.
+ */
if (((1 << i) & vsi->active_queues) == 0)
i = ffsl(vsi->active_queues);
@@ -81,7 +110,7 @@ ixl_mq_start(struct ifnet *ifp, struct mbuf *m)
err = drbr_enqueue(ifp, txr->br, m);
if (err)
- return(err);
+ return (err);
if (IXL_TX_TRYLOCK(txr)) {
ixl_mq_start_locked(ifp, txr);
IXL_TX_UNLOCK(txr);
@@ -457,11 +486,24 @@ fail:
void
ixl_init_tx_ring(struct ixl_queue *que)
{
- struct tx_ring *txr = &que->txr;
- struct ixl_tx_buf *buf;
+#ifdef DEV_NETMAP
+ struct netmap_adapter *na = NA(que->vsi->ifp);
+ struct netmap_slot *slot;
+#endif /* DEV_NETMAP */
+ struct tx_ring *txr = &que->txr;
+ struct ixl_tx_buf *buf;
/* Clear the old ring contents */
IXL_TX_LOCK(txr);
+
+#ifdef DEV_NETMAP
+ /*
+ * (under lock): if in netmap mode, do some consistency
+ * checks and set slot to entry 0 of the netmap ring.
+ */
+ slot = netmap_reset(na, NR_TX, que->me, 0);
+#endif /* DEV_NETMAP */
+
bzero((void *)txr->base,
(sizeof(struct i40e_tx_desc)) * que->num_desc);
@@ -485,6 +527,19 @@ ixl_init_tx_ring(struct ixl_queue *que)
m_freem(buf->m_head);
buf->m_head = NULL;
}
+#ifdef DEV_NETMAP
+ /*
+ * In netmap mode, set the map for the packet buffer.
+ * NOTE: Some drivers (not this one) also need to set
+ * the physical buffer address in the NIC ring.
+ * netmap_idx_n2k() maps a nic index, i, into the corresponding
+ * netmap slot index, si
+ */
+ if (slot) {
+ int si = netmap_idx_n2k(&na->tx_rings[que->me], i);
+ netmap_load_map(na, buf->tag, buf->map, NMB(na, slot + si));
+ }
+#endif /* DEV_NETMAP */
/* Clear the EOP index */
buf->eop_index = -1;
}
@@ -796,6 +851,11 @@ ixl_txeof(struct ixl_queue *que)
mtx_assert(&txr->mtx, MA_OWNED);
+#ifdef DEV_NETMAP
+ // XXX todo: implement moderation
+ if (netmap_tx_irq(que->vsi->ifp, que->me))
+ return FALSE;
+#endif /* DEF_NETMAP */
/* These are not the descriptors you seek, move along :) */
if (txr->avail == que->num_desc) {
@@ -1089,16 +1149,24 @@ int
ixl_init_rx_ring(struct ixl_queue *que)
{
struct rx_ring *rxr = &que->rxr;
-#if defined(INET6) || defined(INET)
struct ixl_vsi *vsi = que->vsi;
+#if defined(INET6) || defined(INET)
struct ifnet *ifp = vsi->ifp;
struct lro_ctrl *lro = &rxr->lro;
#endif
struct ixl_rx_buf *buf;
bus_dma_segment_t pseg[1], hseg[1];
int rsize, nsegs, error = 0;
+#ifdef DEV_NETMAP
+ struct netmap_adapter *na = NA(que->vsi->ifp);
+ struct netmap_slot *slot;
+#endif /* DEV_NETMAP */
IXL_RX_LOCK(rxr);
+#ifdef DEV_NETMAP
+ /* same as in ixl_init_tx_ring() */
+ slot = netmap_reset(na, NR_RX, que->me, 0);
+#endif /* DEV_NETMAP */
/* Clear the ring contents */
rsize = roundup2(que->num_desc *
sizeof(union i40e_rx_desc), DBA_ALIGN);
@@ -1132,6 +1200,27 @@ ixl_init_rx_ring(struct ixl_queue *que)
struct mbuf *mh, *mp;
buf = &rxr->buffers[j];
+#ifdef DEV_NETMAP
+ /*
+ * In netmap mode, fill the map and set the buffer
+ * address in the NIC ring, considering the offset
+ * between the netmap and NIC rings (see comment in
+ * ixgbe_setup_transmit_ring() ). No need to allocate
+ * an mbuf, so end the block with a continue;
+ */
+ if (slot) {
+ int sj = netmap_idx_n2k(&na->rx_rings[que->me], j);
+ uint64_t paddr;
+ void *addr;
+
+ addr = PNMB(na, slot + sj, &paddr);
+ netmap_load_map(na, rxr->dma.tag, buf->pmap, addr);
+ /* Update descriptor and the cached value */
+ rxr->base[j].read.pkt_addr = htole64(paddr);
+ rxr->base[j].read.hdr_addr = 0;
+ continue;
+ }
+#endif /* DEV_NETMAP */
/*
** Don't allocate mbufs if not
** doing header split, its wasteful
@@ -1345,6 +1434,63 @@ ixl_rx_discard(struct rx_ring *rxr, int i)
return;
}
+#ifdef RSS
+/*
+** i40e_ptype_to_hash: parse the packet type
+** to determine the appropriate hash.
+*/
+static inline int
+ixl_ptype_to_hash(u8 ptype)
+{
+ struct i40e_rx_ptype_decoded decoded;
+ u8 ex = 0;
+
+ decoded = decode_rx_desc_ptype(ptype);
+ ex = decoded.outer_frag;
+
+ if (!decoded.known)
+ return M_HASHTYPE_OPAQUE;
+
+ if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_L2)
+ return M_HASHTYPE_OPAQUE;
+
+ /* Note: anything that gets to this point is IP */
+ if (decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV6) {
+ switch (decoded.inner_prot) {
+ case I40E_RX_PTYPE_INNER_PROT_TCP:
+ if (ex)
+ return M_HASHTYPE_RSS_TCP_IPV6_EX;
+ else
+ return M_HASHTYPE_RSS_TCP_IPV6;
+ case I40E_RX_PTYPE_INNER_PROT_UDP:
+ if (ex)
+ return M_HASHTYPE_RSS_UDP_IPV6_EX;
+ else
+ return M_HASHTYPE_RSS_UDP_IPV6;
+ default:
+ if (ex)
+ return M_HASHTYPE_RSS_IPV6_EX;
+ else
+ return M_HASHTYPE_RSS_IPV6;
+ }
+ }
+ if (decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV4) {
+ switch (decoded.inner_prot) {
+ case I40E_RX_PTYPE_INNER_PROT_TCP:
+ return M_HASHTYPE_RSS_TCP_IPV4;
+ case I40E_RX_PTYPE_INNER_PROT_UDP:
+ if (ex)
+ return M_HASHTYPE_RSS_UDP_IPV4_EX;
+ else
+ return M_HASHTYPE_RSS_UDP_IPV4;
+ default:
+ return M_HASHTYPE_RSS_IPV4;
+ }
+ }
+ /* We should never get here!! */
+ return M_HASHTYPE_OPAQUE;
+}
+#endif /* RSS */
/*********************************************************************
*
@@ -1374,6 +1520,12 @@ ixl_rxeof(struct ixl_queue *que, int count)
IXL_RX_LOCK(rxr);
+#ifdef DEV_NETMAP
+ if (netmap_rx_irq(ifp, que->me, &count)) {
+ IXL_RX_UNLOCK(rxr);
+ return (FALSE);
+ }
+#endif /* DEV_NETMAP */
for (i = rxr->next_check; count != 0;) {
struct mbuf *sendmp, *mh, *mp;
@@ -1542,8 +1694,14 @@ ixl_rxeof(struct ixl_queue *que, int count)
rxr->bytes += sendmp->m_pkthdr.len;
if ((ifp->if_capenable & IFCAP_RXCSUM) != 0)
ixl_rx_checksum(sendmp, status, error, ptype);
+#ifdef RSS
+ sendmp->m_pkthdr.flowid =
+ le32toh(cur->wb.qword0.hi_dword.rss);
+ M_HASHTYPE_SET(sendmp, ixl_ptype_to_hash(ptype));
+#else
sendmp->m_pkthdr.flowid = que->msix;
M_HASHTYPE_SET(sendmp, M_HASHTYPE_OPAQUE);
+#endif
}
next_desc:
bus_dmamap_sync(rxr->dma.tag, rxr->dma.map,
diff --git a/sys/dev/ixl/ixlv.h b/sys/dev/ixl/ixlv.h
index 77a02fa..695b1ef 100644
--- a/sys/dev/ixl/ixlv.h
+++ b/sys/dev/ixl/ixlv.h
@@ -1,6 +1,6 @@
/******************************************************************************
- Copyright (c) 2013-2014, Intel Corporation
+ Copyright (c) 2013-2015, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -117,8 +117,12 @@ struct ixlv_sc {
struct ifmedia media;
struct callout timer;
int msix;
+ int pf_version;
int if_flags;
+ bool link_up;
+ u32 link_speed;
+
struct mtx mtx;
u32 qbase;
diff --git a/sys/dev/ixl/ixlvc.c b/sys/dev/ixl/ixlvc.c
index aa81bc1..9333b60 100644
--- a/sys/dev/ixl/ixlvc.c
+++ b/sys/dev/ixl/ixlvc.c
@@ -1,6 +1,6 @@
/******************************************************************************
- Copyright (c) 2013-2014, Intel Corporation
+ Copyright (c) 2013-2015, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -249,9 +249,12 @@ ixlv_verify_api_ver(struct ixlv_sc *sc)
}
pf_vvi = (struct i40e_virtchnl_version_info *)event.msg_buf;
- if ((pf_vvi->major != I40E_VIRTCHNL_VERSION_MAJOR) ||
- (pf_vvi->minor != I40E_VIRTCHNL_VERSION_MINOR))
+ if ((pf_vvi->major > I40E_VIRTCHNL_VERSION_MAJOR) ||
+ ((pf_vvi->major == I40E_VIRTCHNL_VERSION_MAJOR) &&
+ (pf_vvi->minor > I40E_VIRTCHNL_VERSION_MINOR)))
err = EIO;
+ else
+ sc->pf_version = pf_vvi->minor;
out_alloc:
free(event.msg_buf, M_DEVBUF);
@@ -269,7 +272,18 @@ out:
int
ixlv_send_vf_config_msg(struct ixlv_sc *sc)
{
- return ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_GET_VF_RESOURCES,
+ u32 caps;
+
+ caps = I40E_VIRTCHNL_VF_OFFLOAD_L2 |
+ I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ |
+ I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG |
+ I40E_VIRTCHNL_VF_OFFLOAD_VLAN;
+
+ if (sc->pf_version)
+ return ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_GET_VF_RESOURCES,
+ (u8 *)&caps, sizeof(caps));
+ else
+ return ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_GET_VF_RESOURCES,
NULL, 0);
}
@@ -362,7 +376,7 @@ ixlv_configure_queues(struct ixlv_sc *sc)
struct i40e_virtchnl_vsi_queue_config_info *vqci;
struct i40e_virtchnl_queue_pair_info *vqpi;
-
+
pairs = vsi->num_queues;
len = sizeof(struct i40e_virtchnl_vsi_queue_config_info) +
(sizeof(struct i40e_virtchnl_queue_pair_info) * pairs);
@@ -770,11 +784,16 @@ void
ixlv_request_stats(struct ixlv_sc *sc)
{
struct i40e_virtchnl_queue_select vqs;
+ int error = 0;
vqs.vsi_id = sc->vsi_res->vsi_id;
/* Low priority, we don't need to error check */
- ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_GET_STATS,
+ error = ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_GET_STATS,
(u8 *)&vqs, sizeof(vqs));
+#ifdef IXL_DEBUG
+ if (error)
+ device_printf(sc->dev, "Error sending stats request to PF: %d\n", error);
+#endif
}
/*
@@ -783,14 +802,11 @@ ixlv_request_stats(struct ixlv_sc *sc)
void
ixlv_update_stats_counters(struct ixlv_sc *sc, struct i40e_eth_stats *es)
{
- struct ixl_vsi *vsi;
+ struct ixl_vsi *vsi = &sc->vsi;
uint64_t tx_discards;
- int i;
-
- vsi = &sc->vsi;
tx_discards = es->tx_discards;
- for (i = 0; i < sc->vsi.num_queues; i++)
+ for (int i = 0; i < vsi->num_queues; i++)
tx_discards += sc->vsi.queues[i].txr.br->br_drops;
/* Update ifnet stats */
@@ -811,7 +827,7 @@ ixlv_update_stats_counters(struct ixlv_sc *sc, struct i40e_eth_stats *es)
IXL_SET_NOPROTO(vsi, es->rx_unknown_protocol);
IXL_SET_COLLISIONS(vsi, 0);
- sc->vsi.eth_stats = *es;
+ vsi->eth_stats = *es;
}
/*
@@ -840,9 +856,9 @@ ixlv_vc_completion(struct ixlv_sc *sc,
vpe->event_data.link_event.link_status,
vpe->event_data.link_event.link_speed);
#endif
- vsi->link_up =
+ sc->link_up =
vpe->event_data.link_event.link_status;
- vsi->link_speed =
+ sc->link_speed =
vpe->event_data.link_event.link_speed;
ixlv_update_link_status(sc);
break;
diff --git a/sys/dev/mlx5/device.h b/sys/dev/mlx5/device.h
index 3e90859..b9a8eec 100644
--- a/sys/dev/mlx5/device.h
+++ b/sys/dev/mlx5/device.h
@@ -1023,6 +1023,25 @@ enum {
MLX5_MODIFY_ESW_VPORT_CONTEXT_FIELD_SELECT_CVLAN_INSERT = 1 << 3
};
+enum {
+ MLX5_UC_ADDR_CHANGE = (1 << 0),
+ MLX5_MC_ADDR_CHANGE = (1 << 1),
+ MLX5_VLAN_CHANGE = (1 << 2),
+ MLX5_PROMISC_CHANGE = (1 << 3),
+ MLX5_MTU_CHANGE = (1 << 4),
+};
+
+enum mlx5_list_type {
+ MLX5_NIC_VPORT_LIST_TYPE_UC = 0x0,
+ MLX5_NIC_VPORT_LIST_TYPE_MC = 0x1,
+ MLX5_NIC_VPORT_LIST_TYPE_VLAN = 0x2,
+};
+
+enum {
+ MLX5_ESW_VPORT_ADMIN_STATE_DOWN = 0x0,
+ MLX5_ESW_VPORT_ADMIN_STATE_UP = 0x1,
+ MLX5_ESW_VPORT_ADMIN_STATE_AUTO = 0x2,
+};
/* MLX5 DEV CAPs */
/* TODO: EAT.ME */
@@ -1087,6 +1106,22 @@ enum mlx5_cap_type {
MLX5_GET(flow_table_eswitch_cap, \
mdev->hca_caps_max[MLX5_CAP_ESWITCH_FLOW_TABLE], cap)
+#define MLX5_CAP_ESW_FLOWTABLE_EGRESS_ACL(mdev, cap) \
+ MLX5_CAP_ESW_FLOWTABLE(dev, \
+ flow_table_properties_esw_acl_egress.cap)
+
+#define MLX5_CAP_ESW_FLOWTABLE_EGRESS_ACL_MAX(mdev, cap) \
+ MLX5_CAP_ESW_FLOWTABLE_MAX(dev, \
+ flow_table_properties_esw_acl_egress.cap)
+
+#define MLX5_CAP_ESW_FLOWTABLE_INGRESS_ACL(mdev, cap) \
+ MLX5_CAP_ESW_FLOWTABLE(dev, \
+ flow_table_properties_esw_acl_ingress.cap)
+
+#define MLX5_CAP_ESW_FLOWTABLE_INGRESS_ACL_MAX(mdev, cap) \
+ MLX5_CAP_ESW_FLOWTABLE_MAX(dev, \
+ flow_table_properties_esw_acl_ingress.cap)
+
#define MLX5_CAP_ESW(mdev, cap) \
MLX5_GET(e_switch_cap, \
mdev->hca_caps_cur[MLX5_CAP_ESWITCH], cap)
diff --git a/sys/dev/mlx5/driver.h b/sys/dev/mlx5/driver.h
index 6f4031e..83793d5 100644
--- a/sys/dev/mlx5/driver.h
+++ b/sys/dev/mlx5/driver.h
@@ -934,7 +934,7 @@ struct mlx5_profile {
};
-#define MLX5_EEPROM_MAX_BYTES 48
+#define MLX5_EEPROM_MAX_BYTES 32
#define MLX5_EEPROM_IDENTIFIER_BYTE_MASK 0x000000ff
#define MLX5_EEPROM_REVISION_ID_BYTE_MASK 0x0000ff00
#define MLX5_EEPROM_PAGE_3_VALID_BIT_MASK 0x00040000
diff --git a/sys/dev/mlx5/eswitch_vacl.h b/sys/dev/mlx5/eswitch_vacl.h
new file mode 100644
index 0000000..f55fdb8
--- /dev/null
+++ b/sys/dev/mlx5/eswitch_vacl.h
@@ -0,0 +1,46 @@
+/*-
+ * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef MLX5_ESWITCH_VACL_TABLE_H
+#define MLX5_ESWITCH_VACL_TABLE_H
+
+#include <dev/mlx5/driver.h>
+
+void *mlx5_vacl_table_create(struct mlx5_core_dev *dev,
+ u16 vport, bool is_egress);
+void mlx5_vacl_table_cleanup(void *acl_t);
+int mlx5_vacl_table_add_vlan(void *acl_t, u16 vlan);
+void mlx5_vacl_table_del_vlan(void *acl_t, u16 vlan);
+int mlx5_vacl_table_enable_vlan_filter(void *acl_t);
+void mlx5_vacl_table_disable_vlan_filter(void *acl_t);
+int mlx5_vacl_table_drop_untagged(void *acl_t);
+int mlx5_vacl_table_allow_untagged(void *acl_t);
+int mlx5_vacl_table_drop_unknown_vlan(void *acl_t);
+int mlx5_vacl_table_allow_unknown_vlan(void *acl_t);
+int mlx5_vacl_table_set_spoofchk(void *acl_t, bool spoofchk, u8 *vport_mac);
+
+#endif /* MLX5_ESWITCH_VACL_TABLE_H */
diff --git a/sys/dev/mlx5/mlx5_core/mlx5_eq.c b/sys/dev/mlx5/mlx5_core/mlx5_eq.c
index 11b560a..0cbfc31 100644
--- a/sys/dev/mlx5/mlx5_core/mlx5_eq.c
+++ b/sys/dev/mlx5/mlx5_core/mlx5_eq.c
@@ -31,6 +31,15 @@
#include <dev/mlx5/mlx5_ifc.h>
#include "mlx5_core.h"
+#if (__FreeBSD_version >= 1100000)
+#include "opt_rss.h"
+#endif
+
+#ifdef RSS
+#include <net/rss_config.h>
+#include <netinet/in_rss.h>
+#endif
+
enum {
MLX5_EQE_SIZE = sizeof(struct mlx5_eqe),
MLX5_EQE_OWNER_INIT_VAL = 0x1,
@@ -55,7 +64,6 @@ enum {
(1ull << MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR) | \
(1ull << MLX5_EVENT_TYPE_WQ_ACCESS_ERROR) | \
(1ull << MLX5_EVENT_TYPE_PORT_CHANGE) | \
- (1ull << MLX5_EVENT_TYPE_NIC_VPORT_CHANGE) | \
(1ull << MLX5_EVENT_TYPE_SRQ_CATAS_ERROR) | \
(1ull << MLX5_EVENT_TYPE_SRQ_LAST_WQE) | \
(1ull << MLX5_EVENT_TYPE_SRQ_RQ_LIMIT))
@@ -389,6 +397,18 @@ int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx,
priv->irq_info[vecidx].name, eq);
if (err)
goto err_eq;
+#ifdef RSS
+ if (vecidx >= MLX5_EQ_VEC_COMP_BASE) {
+ u8 bucket = vecidx - MLX5_EQ_VEC_COMP_BASE;
+ err = bind_irq_to_cpu(priv->msix_arr[vecidx].vector,
+ rss_getcpu(bucket % rss_getnumbuckets()));
+ if (err)
+ goto err_irq;
+ }
+#else
+ if (0)
+ goto err_irq;
+#endif
/* EQs are created in ARMED state
@@ -398,6 +418,8 @@ int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx,
kvfree(in);
return 0;
+err_irq:
+ free_irq(priv->msix_arr[vecidx].vector, eq);
err_eq:
mlx5_cmd_destroy_eq(dev, eq->eqn);
@@ -452,6 +474,10 @@ int mlx5_start_eqs(struct mlx5_core_dev *dev)
async_event_mask |= (1ull <<
MLX5_EVENT_TYPE_CODING_PORT_MODULE_EVENT);
+ if (MLX5_CAP_GEN(dev, nic_vport_change_event))
+ async_event_mask |= (1ull <<
+ MLX5_EVENT_TYPE_NIC_VPORT_CHANGE);
+
err = mlx5_create_map_eq(dev, &table->cmd_eq, MLX5_EQ_VEC_CMD,
MLX5_NUM_CMD_EQE, 1ull << MLX5_EVENT_TYPE_CMD,
"mlx5_cmd_eq", &dev->priv.uuari.uars[0]);
diff --git a/sys/dev/mlx5/mlx5_core/mlx5_eswitch_vacl.c b/sys/dev/mlx5/mlx5_core/mlx5_eswitch_vacl.c
new file mode 100644
index 0000000..95d90a3
--- /dev/null
+++ b/sys/dev/mlx5/mlx5_core/mlx5_eswitch_vacl.c
@@ -0,0 +1,804 @@
+/*-
+ * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#include <linux/if_ether.h>
+#include <linux/etherdevice.h>
+#include <dev/mlx5/driver.h>
+#include <dev/mlx5/flow_table.h>
+#include <dev/mlx5/eswitch_vacl.h>
+#include "mlx5_core.h"
+
+enum {
+ MLX5_ACL_LOOPBACK_GROUP_IDX = 0,
+ MLX5_ACL_UNTAGGED_GROUP_IDX = 1,
+ MLX5_ACL_VLAN_GROUP_IDX = 2,
+ MLX5_ACL_UNKNOWN_VLAN_GROUP_IDX = 3,
+ MLX5_ACL_DEFAULT_GROUP_IDX = 4,
+ MLX5_ACL_GROUPS_NUM,
+};
+
+struct mlx_vacl_fr {
+ bool applied;
+ u32 fi;
+ u16 action;
+};
+
+struct mlx5_vacl_table {
+ struct mlx5_core_dev *dev;
+ u16 vport;
+ void *ft;
+ int max_ft_size;
+ int acl_type;
+
+ struct mlx_vacl_fr loopback_fr;
+ struct mlx_vacl_fr untagged_fr;
+ struct mlx_vacl_fr unknown_vlan_fr;
+ struct mlx_vacl_fr default_fr;
+
+ bool vlan_filter_enabled;
+ bool vlan_filter_applied;
+ unsigned long *vlan_allowed_bitmap;
+ u32 vlan_fi_table[4096];
+
+ bool spoofchk_enabled;
+ u8 smac[ETH_ALEN];
+};
+
+static int mlx5_vacl_table_allow_vlan(void *acl_t, u16 vlan)
+{
+ struct mlx5_vacl_table *acl_table = (struct mlx5_vacl_table *)acl_t;
+ u32 *flow_context = NULL;
+ void *in_match_criteria = NULL;
+ void *in_match_value = NULL;
+ u8 *smac;
+ int vlan_mc_enable = MLX5_MATCH_OUTER_HEADERS;
+ int err = 0;
+
+ if (!test_bit(vlan, acl_table->vlan_allowed_bitmap))
+ return -EINVAL;
+
+ flow_context = mlx5_vzalloc(MLX5_ST_SZ_BYTES(flow_context));
+ if (!flow_context) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ in_match_criteria = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
+ if (!in_match_criteria) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ /* Apply vlan rule */
+ MLX5_SET(flow_context, flow_context, action,
+ MLX5_FLOW_CONTEXT_ACTION_ALLOW);
+ in_match_value = MLX5_ADDR_OF(flow_context, flow_context, match_value);
+ MLX5_SET(fte_match_param, in_match_value, outer_headers.vlan_tag, 1);
+ MLX5_SET(fte_match_param, in_match_value, outer_headers.first_vid,
+ vlan);
+ MLX5_SET(fte_match_param, in_match_criteria, outer_headers.vlan_tag, 1);
+ MLX5_SET(fte_match_param, in_match_criteria, outer_headers.first_vid,
+ 0xfff);
+ if (acl_table->spoofchk_enabled) {
+ smac = MLX5_ADDR_OF(fte_match_param,
+ in_match_value,
+ outer_headers.smac_47_16);
+ ether_addr_copy(smac, acl_table->smac);
+ smac = MLX5_ADDR_OF(fte_match_param,
+ in_match_criteria,
+ outer_headers.smac_47_16);
+ memset(smac, 0xff, ETH_ALEN);
+ }
+ err = mlx5_add_flow_table_entry(acl_table->ft, vlan_mc_enable,
+ in_match_criteria, flow_context,
+ &acl_table->vlan_fi_table[vlan]);
+out:
+ if (flow_context)
+ vfree(flow_context);
+ if (in_match_criteria)
+ vfree(in_match_criteria);
+ return err;
+}
+
+static int mlx5_vacl_table_apply_loopback_filter(void *acl_t, u16 new_action)
+{
+ struct mlx5_vacl_table *acl_table = (struct mlx5_vacl_table *)acl_t;
+ u8 loopback_mc_enable = MLX5_MATCH_MISC_PARAMETERS;
+ u32 *flow_context = NULL;
+ void *in_match_criteria = NULL;
+ void *in_match_value = NULL;
+ void *mv_misc = NULL;
+ void *mc_misc = NULL;
+ int err = 0;
+
+ flow_context = mlx5_vzalloc(MLX5_ST_SZ_BYTES(flow_context));
+ if (!flow_context) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ in_match_criteria = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
+ if (!in_match_criteria) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ if (acl_table->loopback_fr.applied)
+ mlx5_del_flow_table_entry(acl_table->ft,
+ acl_table->loopback_fr.fi);
+
+ /* Apply new loopback rule */
+ MLX5_SET(flow_context, flow_context, action, new_action);
+ in_match_value = MLX5_ADDR_OF(flow_context, flow_context, match_value);
+ mv_misc = MLX5_ADDR_OF(fte_match_param, in_match_value,
+ misc_parameters);
+ mc_misc = MLX5_ADDR_OF(fte_match_param, in_match_criteria,
+ misc_parameters);
+ MLX5_SET(fte_match_set_misc, mv_misc, source_port, acl_table->vport);
+
+ MLX5_SET_TO_ONES(fte_match_set_misc, mc_misc, source_port);
+
+ err = mlx5_add_flow_table_entry(acl_table->ft, loopback_mc_enable,
+ in_match_criteria, flow_context,
+ &acl_table->loopback_fr.fi);
+ if (err) {
+ acl_table->loopback_fr.applied = false;
+ } else {
+ acl_table->loopback_fr.applied = true;
+ acl_table->loopback_fr.action = new_action;
+ }
+
+out:
+ if (flow_context)
+ vfree(flow_context);
+ if (in_match_criteria)
+ vfree(in_match_criteria);
+ return err;
+}
+
+static int mlx5_vacl_table_apply_default(void *acl_t, u16 new_action)
+{
+ struct mlx5_vacl_table *acl_table = (struct mlx5_vacl_table *)acl_t;
+ u8 default_mc_enable = 0;
+ u32 *flow_context = NULL;
+ void *in_match_criteria = NULL;
+ int err = 0;
+
+ if (!acl_table->spoofchk_enabled)
+ return -EINVAL;
+
+ flow_context = mlx5_vzalloc(MLX5_ST_SZ_BYTES(flow_context));
+ if (!flow_context) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ in_match_criteria = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
+ if (!in_match_criteria) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ if (acl_table->default_fr.applied)
+ mlx5_del_flow_table_entry(acl_table->ft,
+ acl_table->default_fr.fi);
+
+ /* Apply new default rule */
+ MLX5_SET(flow_context, flow_context, action, new_action);
+ err = mlx5_add_flow_table_entry(acl_table->ft, default_mc_enable,
+ in_match_criteria, flow_context,
+ &acl_table->default_fr.fi);
+ if (err) {
+ acl_table->default_fr.applied = false;
+ } else {
+ acl_table->default_fr.applied = true;
+ acl_table->default_fr.action = new_action;
+ }
+
+out:
+ if (flow_context)
+ vfree(flow_context);
+ if (in_match_criteria)
+ vfree(in_match_criteria);
+ return err;
+}
+
+static int mlx5_vacl_table_apply_untagged(void *acl_t, u16 new_action)
+{
+ struct mlx5_vacl_table *acl_table = (struct mlx5_vacl_table *)acl_t;
+ u8 untagged_mc_enable = MLX5_MATCH_OUTER_HEADERS;
+ u8 *smac;
+ u32 *flow_context = NULL;
+ void *in_match_criteria = NULL;
+ void *in_match_value = NULL;
+ int err = 0;
+
+ flow_context = mlx5_vzalloc(MLX5_ST_SZ_BYTES(flow_context));
+ if (!flow_context) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ in_match_criteria = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
+ if (!in_match_criteria) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ if (acl_table->untagged_fr.applied)
+ mlx5_del_flow_table_entry(acl_table->ft,
+ acl_table->untagged_fr.fi);
+
+ /* Apply new untagged rule */
+ MLX5_SET(flow_context, flow_context, action, new_action);
+ in_match_value = MLX5_ADDR_OF(flow_context, flow_context, match_value);
+ MLX5_SET(fte_match_param, in_match_value, outer_headers.vlan_tag, 0);
+ MLX5_SET(fte_match_param, in_match_criteria, outer_headers.vlan_tag, 1);
+ if (acl_table->spoofchk_enabled) {
+ smac = MLX5_ADDR_OF(fte_match_param,
+ in_match_value,
+ outer_headers.smac_47_16);
+ ether_addr_copy(smac, acl_table->smac);
+ smac = MLX5_ADDR_OF(fte_match_param,
+ in_match_criteria,
+ outer_headers.smac_47_16);
+ memset(smac, 0xff, ETH_ALEN);
+ }
+ err = mlx5_add_flow_table_entry(acl_table->ft, untagged_mc_enable,
+ in_match_criteria, flow_context,
+ &acl_table->untagged_fr.fi);
+ if (err) {
+ acl_table->untagged_fr.applied = false;
+ } else {
+ acl_table->untagged_fr.applied = true;
+ acl_table->untagged_fr.action = new_action;
+ }
+
+out:
+ if (flow_context)
+ vfree(flow_context);
+ if (in_match_criteria)
+ vfree(in_match_criteria);
+ return err;
+}
+
+static int mlx5_vacl_table_apply_unknown_vlan(void *acl_t, u16 new_action)
+{
+ struct mlx5_vacl_table *acl_table = (struct mlx5_vacl_table *)acl_t;
+ u8 default_mc_enable = (!acl_table->spoofchk_enabled) ? 0 :
+ MLX5_MATCH_OUTER_HEADERS;
+ u32 *flow_context = NULL;
+ void *in_match_criteria = NULL;
+ void *in_match_value = NULL;
+ u8 *smac;
+ int err = 0;
+
+ flow_context = mlx5_vzalloc(MLX5_ST_SZ_BYTES(flow_context));
+ if (!flow_context) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ in_match_criteria = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
+ if (!in_match_criteria) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ if (acl_table->unknown_vlan_fr.applied)
+ mlx5_del_flow_table_entry(acl_table->ft,
+ acl_table->unknown_vlan_fr.fi);
+
+ /* Apply new unknown vlan rule */
+ MLX5_SET(flow_context, flow_context, action, new_action);
+ if (acl_table->spoofchk_enabled) {
+ in_match_value = MLX5_ADDR_OF(flow_context, flow_context,
+ match_value);
+ smac = MLX5_ADDR_OF(fte_match_param,
+ in_match_value,
+ outer_headers.smac_47_16);
+ ether_addr_copy(smac, acl_table->smac);
+ smac = MLX5_ADDR_OF(fte_match_param,
+ in_match_criteria,
+ outer_headers.smac_47_16);
+ memset(smac, 0xff, ETH_ALEN);
+ }
+ err = mlx5_add_flow_table_entry(acl_table->ft, default_mc_enable,
+ in_match_criteria, flow_context,
+ &acl_table->unknown_vlan_fr.fi);
+ if (err) {
+ acl_table->unknown_vlan_fr.applied = false;
+ } else {
+ acl_table->unknown_vlan_fr.applied = true;
+ acl_table->unknown_vlan_fr.action = new_action;
+ }
+
+out:
+ if (flow_context)
+ vfree(flow_context);
+ if (in_match_criteria)
+ vfree(in_match_criteria);
+ return err;
+}
+
+static int mlx5_vacl_table_apply_vlan_filter(void *acl_t)
+{
+ struct mlx5_vacl_table *acl_table = (struct mlx5_vacl_table *)acl_t;
+ int index = 0;
+ int err_index = 0;
+ int err = 0;
+
+ if (acl_table->vlan_filter_applied)
+ return 0;
+
+ for (index = find_first_bit(acl_table->vlan_allowed_bitmap, 4096);
+ index < 4096;
+ index = find_next_bit(acl_table->vlan_allowed_bitmap,
+ 4096, ++index)) {
+ err = mlx5_vacl_table_allow_vlan(acl_t, index);
+ if (err)
+ goto err_disable_vlans;
+ }
+
+ acl_table->vlan_filter_applied = true;
+ return 0;
+
+err_disable_vlans:
+ for (err_index = find_first_bit(acl_table->vlan_allowed_bitmap, 4096);
+ err_index < index;
+ err_index = find_next_bit(acl_table->vlan_allowed_bitmap, 4096,
+ ++err_index)) {
+ mlx5_del_flow_table_entry(acl_table->ft,
+ acl_table->vlan_fi_table[err_index]);
+ }
+ return err;
+}
+
+static void mlx5_vacl_table_disapply_vlan_filter(void *acl_t)
+{
+ struct mlx5_vacl_table *acl_table = (struct mlx5_vacl_table *)acl_t;
+ int index = 0;
+
+ if (!acl_table->vlan_filter_applied)
+ return;
+
+ for (index = find_first_bit(acl_table->vlan_allowed_bitmap, 4096);
+ index < 4096;
+ index = find_next_bit(acl_table->vlan_allowed_bitmap, 4096,
+ ++index)) {
+ mlx5_del_flow_table_entry(acl_table->ft,
+ acl_table->vlan_fi_table[index]);
+ }
+
+ acl_table->vlan_filter_applied = false;
+}
+
+static void mlx5_vacl_table_disapply_all_filters(void *acl_t)
+{
+ struct mlx5_vacl_table *acl_table = (struct mlx5_vacl_table *)acl_t;
+
+ if (acl_table->default_fr.applied) {
+ mlx5_del_flow_table_entry(acl_table->ft,
+ acl_table->default_fr.fi);
+ acl_table->default_fr.applied = false;
+ }
+ if (acl_table->unknown_vlan_fr.applied) {
+ mlx5_del_flow_table_entry(acl_table->ft,
+ acl_table->unknown_vlan_fr.fi);
+ acl_table->unknown_vlan_fr.applied = false;
+ }
+ if (acl_table->loopback_fr.applied) {
+ mlx5_del_flow_table_entry(acl_table->ft,
+ acl_table->loopback_fr.fi);
+ acl_table->loopback_fr.applied = false;
+ }
+ if (acl_table->untagged_fr.applied) {
+ mlx5_del_flow_table_entry(acl_table->ft,
+ acl_table->untagged_fr.fi);
+ acl_table->untagged_fr.applied = false;
+ }
+ if (acl_table->vlan_filter_applied) {
+ mlx5_vacl_table_disapply_vlan_filter(acl_t);
+ acl_table->vlan_filter_applied = false;
+ }
+}
+
+static int mlx5_vacl_table_apply_all_filters(void *acl_t)
+{
+ struct mlx5_vacl_table *acl_table = (struct mlx5_vacl_table *)acl_t;
+ int err = 0;
+
+ if (!acl_table->default_fr.applied && acl_table->spoofchk_enabled) {
+ err = mlx5_vacl_table_apply_default(acl_table,
+ acl_table->default_fr.action);
+ if (err)
+ goto err_disapply_all;
+ }
+
+ if (!acl_table->unknown_vlan_fr.applied) {
+ err = mlx5_vacl_table_apply_unknown_vlan(acl_table,
+ acl_table->unknown_vlan_fr.action);
+ if (err)
+ goto err_disapply_all;
+ }
+
+ if (!acl_table->loopback_fr.applied &&
+ acl_table->acl_type == MLX5_FLOW_TABLE_TYPE_EGRESS_ACL) {
+ err = mlx5_vacl_table_apply_loopback_filter(
+ acl_table,
+ acl_table->loopback_fr.action);
+ if (err)
+ goto err_disapply_all;
+ }
+
+ if (!acl_table->untagged_fr.applied) {
+ err = mlx5_vacl_table_apply_untagged(acl_table,
+ acl_table->untagged_fr.action);
+ if (err)
+ goto err_disapply_all;
+ }
+
+ if (!acl_table->vlan_filter_applied && acl_table->vlan_filter_enabled) {
+ err = mlx5_vacl_table_apply_vlan_filter(acl_t);
+ if (err)
+ goto err_disapply_all;
+ }
+
+ goto out;
+
+err_disapply_all:
+ mlx5_vacl_table_disapply_all_filters(acl_t);
+
+out:
+ return err;
+}
+
+static void mlx5_vacl_table_destroy_ft(void *acl_t)
+{
+ struct mlx5_vacl_table *acl_table = (struct mlx5_vacl_table *)acl_t;
+
+ mlx5_vacl_table_disapply_all_filters(acl_t);
+ if (acl_table->ft)
+ mlx5_destroy_flow_table(acl_table->ft);
+ acl_table->ft = NULL;
+}
+
+static int mlx5_vacl_table_create_ft(void *acl_t, bool spoofchk)
+{
+ struct mlx5_vacl_table *acl_table = (struct mlx5_vacl_table *)acl_t;
+ int log_acl_ft_size;
+ int err = 0;
+ int groups_num = MLX5_ACL_GROUPS_NUM - 1;
+ int shift_idx = MLX5_ACL_UNTAGGED_GROUP_IDX;
+ u8 *smac;
+ struct mlx5_flow_table_group *g;
+
+ if (acl_table->ft)
+ return -EINVAL;
+
+ g = kcalloc(MLX5_ACL_GROUPS_NUM, sizeof(*g), GFP_KERNEL);
+ if (!g)
+ goto out;
+
+ acl_table->spoofchk_enabled = spoofchk;
+
+ /*
+ * for vlan group
+ */
+ log_acl_ft_size = 4096;
+ /*
+ * for loopback filter rule
+ */
+ log_acl_ft_size += 1;
+ /*
+ * for untagged rule
+ */
+ log_acl_ft_size += 1;
+ /*
+ * for unknown vlan rule
+ */
+ log_acl_ft_size += 1;
+ /*
+ * for default rule
+ */
+ log_acl_ft_size += 1;
+
+ log_acl_ft_size = order_base_2(log_acl_ft_size);
+ log_acl_ft_size = min_t(int, log_acl_ft_size, acl_table->max_ft_size);
+
+ if (log_acl_ft_size < 2)
+ goto out;
+
+ if (acl_table->acl_type == MLX5_FLOW_TABLE_TYPE_EGRESS_ACL) {
+ /* Loopback filter group */
+ g[MLX5_ACL_LOOPBACK_GROUP_IDX].log_sz = 0;
+ g[MLX5_ACL_LOOPBACK_GROUP_IDX].match_criteria_enable =
+ MLX5_MATCH_MISC_PARAMETERS;
+ MLX5_SET_TO_ONES(fte_match_param,
+ g[MLX5_ACL_LOOPBACK_GROUP_IDX].match_criteria,
+ misc_parameters.source_port);
+ groups_num++;
+ shift_idx = MLX5_ACL_LOOPBACK_GROUP_IDX;
+ }
+ /* Untagged traffic group */
+ g[MLX5_ACL_UNTAGGED_GROUP_IDX - shift_idx].log_sz = 0;
+ g[MLX5_ACL_UNTAGGED_GROUP_IDX - shift_idx].match_criteria_enable =
+ MLX5_MATCH_OUTER_HEADERS;
+ MLX5_SET(fte_match_param,
+ g[MLX5_ACL_UNTAGGED_GROUP_IDX - shift_idx].match_criteria,
+ outer_headers.vlan_tag, 1);
+ if (spoofchk) {
+ smac = MLX5_ADDR_OF(fte_match_param,
+ g[MLX5_ACL_UNTAGGED_GROUP_IDX - shift_idx]
+ .match_criteria,
+ outer_headers.smac_47_16);
+ memset(smac, 0xff, ETH_ALEN);
+ }
+
+ /* Allowed vlans group */
+ g[MLX5_ACL_VLAN_GROUP_IDX - shift_idx].log_sz = log_acl_ft_size - 1;
+ g[MLX5_ACL_VLAN_GROUP_IDX - shift_idx].match_criteria_enable =
+ MLX5_MATCH_OUTER_HEADERS;
+ MLX5_SET(fte_match_param,
+ g[MLX5_ACL_VLAN_GROUP_IDX - shift_idx].match_criteria,
+ outer_headers.vlan_tag, 1);
+ MLX5_SET(fte_match_param,
+ g[MLX5_ACL_VLAN_GROUP_IDX - shift_idx].match_criteria,
+ outer_headers.first_vid, 0xfff);
+ if (spoofchk) {
+ smac = MLX5_ADDR_OF(fte_match_param,
+ g[MLX5_ACL_VLAN_GROUP_IDX - shift_idx]
+ .match_criteria,
+ outer_headers.smac_47_16);
+ memset(smac, 0xff, ETH_ALEN);
+ }
+
+ /* Unknown vlan traffic group */
+ g[MLX5_ACL_UNKNOWN_VLAN_GROUP_IDX - shift_idx].log_sz = 0;
+ g[MLX5_ACL_UNKNOWN_VLAN_GROUP_IDX - shift_idx].match_criteria_enable =
+ (spoofchk ? MLX5_MATCH_OUTER_HEADERS : 0);
+ if (spoofchk) {
+ smac = MLX5_ADDR_OF(
+ fte_match_param,
+ g[MLX5_ACL_UNKNOWN_VLAN_GROUP_IDX - shift_idx]
+ .match_criteria,
+ outer_headers.smac_47_16);
+ memset(smac, 0xff, ETH_ALEN);
+ }
+
+ /*
+ * Default group - for spoofchk only.
+ */
+ g[MLX5_ACL_DEFAULT_GROUP_IDX - shift_idx].log_sz = 0;
+ g[MLX5_ACL_DEFAULT_GROUP_IDX - shift_idx].match_criteria_enable = 0;
+
+ acl_table->ft = mlx5_create_flow_table(acl_table->dev,
+ 0,
+ acl_table->acl_type,
+ acl_table->vport,
+ groups_num,
+ g);
+ if (!acl_table->ft) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ err = mlx5_vacl_table_apply_all_filters(acl_t);
+ if (err)
+ goto err_destroy_ft;
+
+ goto out;
+
+err_destroy_ft:
+ mlx5_vacl_table_destroy_ft(acl_table->ft);
+ acl_table->ft = NULL;
+
+out:
+ kfree(g);
+ return err;
+}
+
+void *mlx5_vacl_table_create(struct mlx5_core_dev *dev,
+ u16 vport, bool is_egress)
+{
+ struct mlx5_vacl_table *acl_table;
+ int err = 0;
+
+ if (is_egress && !MLX5_CAP_ESW_FLOWTABLE_EGRESS_ACL(dev, ft_support))
+ return NULL;
+
+ if (!is_egress && !MLX5_CAP_ESW_FLOWTABLE_INGRESS_ACL(dev, ft_support))
+ return NULL;
+
+ acl_table = kzalloc(sizeof(*acl_table), GFP_KERNEL);
+ if (!acl_table)
+ return NULL;
+
+ acl_table->acl_type = is_egress ? MLX5_FLOW_TABLE_TYPE_EGRESS_ACL :
+ MLX5_FLOW_TABLE_TYPE_INGRESS_ACL;
+ acl_table->max_ft_size = (is_egress ?
+ MLX5_CAP_ESW_FLOWTABLE_EGRESS_ACL(dev,
+ log_max_ft_size) :
+ MLX5_CAP_ESW_FLOWTABLE_INGRESS_ACL(dev,
+ log_max_ft_size));
+ acl_table->dev = dev;
+ acl_table->vport = vport;
+
+ /*
+ * default behavior : Allow and if spoofchk drop the default
+ */
+ acl_table->default_fr.action = MLX5_FLOW_CONTEXT_ACTION_DROP;
+ acl_table->loopback_fr.action = MLX5_FLOW_CONTEXT_ACTION_DROP;
+ acl_table->unknown_vlan_fr.action = MLX5_FLOW_CONTEXT_ACTION_ALLOW;
+ acl_table->untagged_fr.action = MLX5_FLOW_CONTEXT_ACTION_ALLOW;
+ err = mlx5_vacl_table_create_ft(acl_table, false);
+ if (err)
+ goto err_free_acl_table;
+
+ acl_table->vlan_allowed_bitmap = kcalloc(BITS_TO_LONGS(4096),
+ sizeof(uintptr_t),
+ GFP_KERNEL);
+ if (!acl_table->vlan_allowed_bitmap)
+ goto err_destroy_ft;
+
+ goto out;
+
+err_destroy_ft:
+ mlx5_vacl_table_destroy_ft(acl_table->ft);
+ acl_table->ft = NULL;
+
+err_free_acl_table:
+ kfree(acl_table);
+ acl_table = NULL;
+
+out:
+ return (void *)acl_table;
+}
+EXPORT_SYMBOL(mlx5_vacl_table_create);
+
+void mlx5_vacl_table_cleanup(void *acl_t)
+{
+ struct mlx5_vacl_table *acl_table = (struct mlx5_vacl_table *)acl_t;
+
+ mlx5_vacl_table_destroy_ft(acl_t);
+ kfree(acl_table->vlan_allowed_bitmap);
+ kfree(acl_table);
+}
+EXPORT_SYMBOL(mlx5_vacl_table_cleanup);
+
+int mlx5_vacl_table_add_vlan(void *acl_t, u16 vlan)
+{
+ struct mlx5_vacl_table *acl_table = (struct mlx5_vacl_table *)acl_t;
+ int err = 0;
+
+ if (test_bit(vlan, acl_table->vlan_allowed_bitmap))
+ return 0;
+ __set_bit(vlan, acl_table->vlan_allowed_bitmap);
+ if (!acl_table->vlan_filter_applied)
+ return 0;
+
+ err = mlx5_vacl_table_allow_vlan(acl_t, vlan);
+ if (err)
+ goto err_clear_vbit;
+
+ goto out;
+
+err_clear_vbit:
+ __clear_bit(vlan, acl_table->vlan_allowed_bitmap);
+
+out:
+ return err;
+}
+EXPORT_SYMBOL(mlx5_vacl_table_add_vlan);
+
+void mlx5_vacl_table_del_vlan(void *acl_t, u16 vlan)
+{
+ struct mlx5_vacl_table *acl_table = (struct mlx5_vacl_table *)acl_t;
+
+ if (!test_bit(vlan, acl_table->vlan_allowed_bitmap))
+ return;
+
+ __clear_bit(vlan, acl_table->vlan_allowed_bitmap);
+
+ if (!acl_table->vlan_filter_applied)
+ return;
+
+ mlx5_del_flow_table_entry(acl_table->ft,
+ acl_table->vlan_fi_table[vlan]);
+}
+EXPORT_SYMBOL(mlx5_vacl_table_del_vlan);
+
+int mlx5_vacl_table_enable_vlan_filter(void *acl_t)
+{
+ struct mlx5_vacl_table *acl_table = (struct mlx5_vacl_table *)acl_t;
+
+ acl_table->vlan_filter_enabled = true;
+ return mlx5_vacl_table_apply_vlan_filter(acl_t);
+}
+EXPORT_SYMBOL(mlx5_vacl_table_enable_vlan_filter);
+
+void mlx5_vacl_table_disable_vlan_filter(void *acl_t)
+{
+ struct mlx5_vacl_table *acl_table = (struct mlx5_vacl_table *)acl_t;
+
+ acl_table->vlan_filter_enabled = false;
+ mlx5_vacl_table_disapply_vlan_filter(acl_t);
+}
+EXPORT_SYMBOL(mlx5_vacl_table_disable_vlan_filter);
+
+int mlx5_vacl_table_drop_untagged(void *acl_t)
+{
+ return mlx5_vacl_table_apply_untagged(acl_t,
+ MLX5_FLOW_CONTEXT_ACTION_DROP);
+}
+EXPORT_SYMBOL(mlx5_vacl_table_drop_untagged);
+
+int mlx5_vacl_table_allow_untagged(void *acl_t)
+{
+ return mlx5_vacl_table_apply_untagged(acl_t,
+ MLX5_FLOW_CONTEXT_ACTION_ALLOW);
+}
+EXPORT_SYMBOL(mlx5_vacl_table_allow_untagged);
+
+int mlx5_vacl_table_drop_unknown_vlan(void *acl_t)
+{
+ return mlx5_vacl_table_apply_unknown_vlan(acl_t,
+ MLX5_FLOW_CONTEXT_ACTION_DROP);
+}
+EXPORT_SYMBOL(mlx5_vacl_table_drop_unknown_vlan);
+
+int mlx5_vacl_table_allow_unknown_vlan(void *acl_t)
+{
+ return mlx5_vacl_table_apply_unknown_vlan(acl_t,
+ MLX5_FLOW_CONTEXT_ACTION_ALLOW);
+}
+EXPORT_SYMBOL(mlx5_vacl_table_allow_unknown_vlan);
+
+int mlx5_vacl_table_set_spoofchk(void *acl_t, bool spoofchk, u8 *vport_mac)
+{
+ struct mlx5_vacl_table *acl_table = (struct mlx5_vacl_table *)acl_t;
+ int err = 0;
+
+ if (spoofchk == acl_table->spoofchk_enabled) {
+ if (!spoofchk ||
+ (spoofchk && !memcmp(acl_table->smac, vport_mac, ETH_ALEN)))
+ return 0;
+ }
+
+ ether_addr_copy(acl_table->smac, vport_mac);
+ if (spoofchk != acl_table->spoofchk_enabled) {
+ mlx5_vacl_table_destroy_ft(acl_t);
+ err = mlx5_vacl_table_create_ft(acl_t, spoofchk);
+ } else {
+ mlx5_vacl_table_disapply_all_filters(acl_t);
+ err = mlx5_vacl_table_apply_all_filters(acl_t);
+ }
+
+ return err;
+}
+EXPORT_SYMBOL(mlx5_vacl_table_set_spoofchk);
+
diff --git a/sys/dev/mlx5/mlx5_core/mlx5_vport.c b/sys/dev/mlx5/mlx5_core/mlx5_vport.c
index 803f119..5c8626b 100644
--- a/sys/dev/mlx5/mlx5_core/mlx5_vport.c
+++ b/sys/dev/mlx5/mlx5_core/mlx5_vport.c
@@ -328,7 +328,8 @@ int mlx5_set_nic_vport_current_mac(struct mlx5_core_dev *mdev, int vport,
MLX5_SET(modify_nic_vport_context_in, in,
field_select.addresses_list, 1);
MLX5_SET(modify_nic_vport_context_in, in,
- nic_vport_context.allowed_list_type, 0);
+ nic_vport_context.allowed_list_type,
+ MLX5_NIC_VPORT_LIST_TYPE_UC);
MLX5_SET(modify_nic_vport_context_in, in,
nic_vport_context.allowed_list_size, 1);
@@ -345,6 +346,131 @@ int mlx5_set_nic_vport_current_mac(struct mlx5_core_dev *mdev, int vport,
return err;
}
EXPORT_SYMBOL_GPL(mlx5_set_nic_vport_current_mac);
+
+int mlx5_set_nic_vport_vlan_list(struct mlx5_core_dev *dev, u32 vport,
+ u16 *vlan_list, int list_len)
+{
+ void *in, *ctx;
+ int i, err;
+ int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in)
+ + MLX5_ST_SZ_BYTES(vlan_layout) * (int)list_len;
+
+ int max_list_size = 1 << MLX5_CAP_GEN_MAX(dev, log_max_vlan_list);
+
+ if (list_len > max_list_size) {
+ mlx5_core_warn(dev, "Requested list size (%d) > (%d) max_list_size\n",
+ list_len, max_list_size);
+ return -ENOSPC;
+ }
+
+ in = mlx5_vzalloc(inlen);
+ if (!in) {
+ mlx5_core_warn(dev, "failed to allocate inbox\n");
+ return -ENOMEM;
+ }
+
+ MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
+ if (vport)
+ MLX5_SET(modify_nic_vport_context_in, in,
+ other_vport, 1);
+ MLX5_SET(modify_nic_vport_context_in, in,
+ field_select.addresses_list, 1);
+
+ ctx = MLX5_ADDR_OF(modify_nic_vport_context_in, in, nic_vport_context);
+
+ MLX5_SET(nic_vport_context, ctx, allowed_list_type,
+ MLX5_NIC_VPORT_LIST_TYPE_VLAN);
+ MLX5_SET(nic_vport_context, ctx, allowed_list_size, list_len);
+
+ for (i = 0; i < list_len; i++) {
+ u8 *vlan_lout = MLX5_ADDR_OF(nic_vport_context, ctx,
+ current_uc_mac_address[i]);
+ MLX5_SET(vlan_layout, vlan_lout, vlan, vlan_list[i]);
+ }
+
+ err = mlx5_modify_nic_vport_context(dev, in, inlen);
+
+ kvfree(in);
+ return err;
+}
+EXPORT_SYMBOL_GPL(mlx5_set_nic_vport_vlan_list);
+
+int mlx5_set_nic_vport_mc_list(struct mlx5_core_dev *mdev, int vport,
+ u64 *addr_list, size_t addr_list_len)
+{
+ void *in, *ctx;
+ int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in)
+ + MLX5_ST_SZ_BYTES(mac_address_layout) * (int)addr_list_len;
+ int err;
+ size_t i;
+ int max_list_sz = 1 << MLX5_CAP_GEN_MAX(mdev, log_max_current_mc_list);
+
+ if ((int)addr_list_len > max_list_sz) {
+ mlx5_core_warn(mdev, "Requested list size (%d) > (%d) max_list_size\n",
+ (int)addr_list_len, max_list_sz);
+ return -ENOSPC;
+ }
+
+ in = mlx5_vzalloc(inlen);
+ if (!in) {
+ mlx5_core_warn(mdev, "failed to allocate inbox\n");
+ return -ENOMEM;
+ }
+
+ MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
+ if (vport)
+ MLX5_SET(modify_nic_vport_context_in, in,
+ other_vport, 1);
+ MLX5_SET(modify_nic_vport_context_in, in,
+ field_select.addresses_list, 1);
+
+ ctx = MLX5_ADDR_OF(modify_nic_vport_context_in, in, nic_vport_context);
+
+ MLX5_SET(nic_vport_context, ctx, allowed_list_type,
+ MLX5_NIC_VPORT_LIST_TYPE_MC);
+ MLX5_SET(nic_vport_context, ctx, allowed_list_size, addr_list_len);
+
+ for (i = 0; i < addr_list_len; i++) {
+ u8 *mac_lout = (u8 *)MLX5_ADDR_OF(nic_vport_context, ctx,
+ current_uc_mac_address[i]);
+ u8 *mac_ptr = (u8 *)MLX5_ADDR_OF(mac_address_layout, mac_lout,
+ mac_addr_47_32);
+ ether_addr_copy(mac_ptr, (u8 *)&addr_list[i]);
+ }
+
+ err = mlx5_modify_nic_vport_context(mdev, in, inlen);
+
+ kvfree(in);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(mlx5_set_nic_vport_mc_list);
+
+int mlx5_set_nic_vport_promisc(struct mlx5_core_dev *mdev, int vport,
+ bool promisc_mc, bool promisc_uc,
+ bool promisc_all)
+{
+ u8 in[MLX5_ST_SZ_BYTES(modify_nic_vport_context_in)];
+ u8 *ctx = MLX5_ADDR_OF(modify_nic_vport_context_in, in,
+ nic_vport_context);
+
+ memset(in, 0, MLX5_ST_SZ_BYTES(modify_nic_vport_context_in));
+
+ MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
+ if (vport)
+ MLX5_SET(modify_nic_vport_context_in, in,
+ other_vport, 1);
+ MLX5_SET(modify_nic_vport_context_in, in, field_select.promisc, 1);
+ if (promisc_mc)
+ MLX5_SET(nic_vport_context, ctx, promisc_mc, 1);
+ if (promisc_uc)
+ MLX5_SET(nic_vport_context, ctx, promisc_uc, 1);
+ if (promisc_all)
+ MLX5_SET(nic_vport_context, ctx, promisc_all, 1);
+
+ return mlx5_modify_nic_vport_context(mdev, in, sizeof(in));
+}
+EXPORT_SYMBOL_GPL(mlx5_set_nic_vport_promisc);
int mlx5_set_nic_vport_permanent_mac(struct mlx5_core_dev *mdev, int vport,
u8 *addr)
{
diff --git a/sys/dev/mlx5/mlx5_en/en.h b/sys/dev/mlx5/mlx5_en/en.h
index 5339006..4203f5c 100644
--- a/sys/dev/mlx5/mlx5_en/en.h
+++ b/sys/dev/mlx5/mlx5_en/en.h
@@ -50,6 +50,15 @@
#include <net/ethernet.h>
#include <sys/buf_ring.h>
+#if (__FreeBSD_version >= 1100000)
+#include "opt_rss.h"
+#endif
+
+#ifdef RSS
+#include <net/rss_config.h>
+#include <netinet/in_rss.h>
+#endif
+
#include <machine/bus.h>
#ifdef HAVE_TURBO_LRO
@@ -356,6 +365,7 @@ struct mlx5e_params {
u8 default_vlan_prio;
u8 num_tc;
u8 rx_cq_moderation_mode;
+ u8 tx_cq_moderation_mode;
u16 rx_cq_moderation_usec;
u16 rx_cq_moderation_pkts;
u16 tx_cq_moderation_usec;
@@ -381,6 +391,7 @@ struct mlx5e_params {
m(+1, u64 rx_coalesce_mode, "rx_coalesce_mode", "0: EQE mode 1: CQE mode") \
m(+1, u64 tx_coalesce_usecs, "tx_coalesce_usecs", "Limit in usec for joining tx packets") \
m(+1, u64 tx_coalesce_pkts, "tx_coalesce_pkts", "Maximum number of tx packets to join") \
+ m(+1, u64 tx_coalesce_mode, "tx_coalesce_mode", "0: EQE mode 1: CQE mode") \
m(+1, u64 hw_lro, "hw_lro", "set to enable hw_lro")
#define MLX5E_PARAMS_NUM (0 MLX5E_PARAMS(MLX5E_STATS_COUNT))
diff --git a/sys/dev/mlx5/mlx5_en/mlx5_en_ethtool.c b/sys/dev/mlx5/mlx5_en/mlx5_en_ethtool.c
index 6a7b911..eede82f 100644
--- a/sys/dev/mlx5/mlx5_en/mlx5_en_ethtool.c
+++ b/sys/dev/mlx5/mlx5_en/mlx5_en_ethtool.c
@@ -136,6 +136,11 @@ mlx5e_ethtool_handler(SYSCTL_HANDLER_ARGS)
priv->params_ethtool.rx_coalesce_mode = 1;
priv->params.rx_cq_moderation_mode = priv->params_ethtool.rx_coalesce_mode;
+ /* import TX mode */
+ if (priv->params_ethtool.tx_coalesce_mode != 0)
+ priv->params_ethtool.tx_coalesce_mode = 1;
+ priv->params.tx_cq_moderation_mode = priv->params_ethtool.tx_coalesce_mode;
+
/* import RX coal time */
if (priv->params_ethtool.rx_coalesce_usecs < 1)
priv->params_ethtool.rx_coalesce_usecs = 0;
@@ -460,6 +465,7 @@ mlx5e_create_ethtool(struct mlx5e_priv *priv)
priv->params_ethtool.rx_coalesce_mode = priv->params.rx_cq_moderation_mode;
priv->params_ethtool.rx_coalesce_usecs = priv->params.rx_cq_moderation_usec;
priv->params_ethtool.rx_coalesce_pkts = priv->params.rx_cq_moderation_pkts;
+ priv->params_ethtool.tx_coalesce_mode = priv->params.tx_cq_moderation_mode;
priv->params_ethtool.tx_coalesce_usecs = priv->params.tx_cq_moderation_usec;
priv->params_ethtool.tx_coalesce_pkts = priv->params.tx_cq_moderation_pkts;
priv->params_ethtool.hw_lro = priv->params.hw_lro_en;
diff --git a/sys/dev/mlx5/mlx5_en/mlx5_en_main.c b/sys/dev/mlx5/mlx5_en/mlx5_en_main.c
index a4a1ec7..77aaf7b 100644
--- a/sys/dev/mlx5/mlx5_en/mlx5_en_main.c
+++ b/sys/dev/mlx5/mlx5_en/mlx5_en_main.c
@@ -931,6 +931,10 @@ mlx5e_create_sq(struct mlx5e_channel *c,
void *sqc = param->sqc;
void *sqc_wq = MLX5_ADDR_OF(sqc, sqc, wq);
+#ifdef RSS
+ cpuset_t cpu_mask;
+ int cpu_id;
+#endif
int err;
/* Create DMA descriptor TAG */
@@ -991,9 +995,15 @@ mlx5e_create_sq(struct mlx5e_channel *c,
}
TASK_INIT(&sq->sq_task, 0, mlx5e_tx_que, sq);
- taskqueue_start_threads(&sq->sq_tq, 1, PI_NET, "%s tx sq",
- c->ifp->if_xname);
-
+#ifdef RSS
+ cpu_id = rss_getcpu(c->ix % rss_getnumbuckets());
+ CPU_SETOF(cpu_id, &cpu_mask);
+ taskqueue_start_threads_cpuset(&sq->sq_tq, 1, PI_NET, &cpu_mask,
+ "%s TX SQ%d.%d CPU%d", c->ifp->if_xname, c->ix, tc, cpu_id);
+#else
+ taskqueue_start_threads(&sq->sq_tq, 1, PI_NET,
+ "%s TX SQ%d.%d", c->ifp->if_xname, c->ix, tc);
+#endif
snprintf(buffer, sizeof(buffer), "txstat%dtc%d", c->ix, tc);
mlx5e_create_stats(&sq->stats.ctx, SYSCTL_CHILDREN(priv->sysctl_ifnet),
buffer, mlx5e_sq_stats_desc, MLX5E_SQ_STATS_NUM,
@@ -1324,13 +1334,25 @@ static int
mlx5e_open_tx_cqs(struct mlx5e_channel *c,
struct mlx5e_channel_param *cparam)
{
+ u8 tx_moderation_mode;
int err;
int tc;
+ switch (c->priv->params.tx_cq_moderation_mode) {
+ case 0:
+ tx_moderation_mode = MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
+ break;
+ default:
+ if (MLX5_CAP_GEN(c->priv->mdev, cq_period_start_from_cqe))
+ tx_moderation_mode = MLX5_CQ_PERIOD_MODE_START_FROM_CQE;
+ else
+ tx_moderation_mode = MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
+ break;
+ }
for (tc = 0; tc < c->num_tc; tc++) {
/* open completion queue */
err = mlx5e_open_cq(c, &cparam->tx_cq, &c->sq[tc].cq,
- &mlx5e_tx_cq_comp, MLX5_CQ_PERIOD_MODE_START_FROM_EQE);
+ &mlx5e_tx_cq_comp, tx_moderation_mode);
if (err)
goto err_close_tx_cqs;
}
@@ -1756,8 +1778,14 @@ mlx5e_open_rqt(struct mlx5e_priv *priv)
MLX5_SET(rqtc, rqtc, rqt_max_size, sz);
for (i = 0; i < sz; i++) {
- int ix = i % priv->params.num_channels;
-
+ int ix;
+#ifdef RSS
+ ix = rss_get_indirection_to_bucket(i);
+#else
+ ix = i;
+#endif
+ /* ensure we don't overflow */
+ ix %= priv->params.num_channels;
MLX5_SET(rqtc, rqtc, rq_num[i], priv->channel[ix]->rq.rqn);
}
@@ -1822,6 +1850,8 @@ mlx5e_build_tir_ctx(struct mlx5e_priv *priv, u32 * tirc, int tt)
MLX5_CAP_ETH(priv->mdev,
lro_timer_supported_periods[2]));
}
+
+ /* setup parameters for hashing TIR type, if any */
switch (tt) {
case MLX5E_TT_ANY:
MLX5_SET(tirc, tirc, disp_type,
@@ -1836,8 +1866,16 @@ mlx5e_build_tir_ctx(struct mlx5e_priv *priv, u32 * tirc, int tt)
priv->rqtn);
MLX5_SET(tirc, tirc, rx_hash_fn,
MLX5_TIRC_RX_HASH_FN_HASH_TOEPLITZ);
- MLX5_SET(tirc, tirc, rx_hash_symmetric, 1);
hkey = (__be32 *) MLX5_ADDR_OF(tirc, tirc, rx_hash_toeplitz_key);
+#ifdef RSS
+ /*
+ * The FreeBSD RSS implementation does currently not
+ * support symmetric Toeplitz hashes:
+ */
+ MLX5_SET(tirc, tirc, rx_hash_symmetric, 0);
+ rss_getkey((uint8_t *)hkey);
+#else
+ MLX5_SET(tirc, tirc, rx_hash_symmetric, 1);
hkey[0] = cpu_to_be32(0xD181C62C);
hkey[1] = cpu_to_be32(0xF7F4DB5B);
hkey[2] = cpu_to_be32(0x1983A2FC);
@@ -1848,6 +1886,7 @@ mlx5e_build_tir_ctx(struct mlx5e_priv *priv, u32 * tirc, int tt)
hkey[7] = cpu_to_be32(0x593D56D9);
hkey[8] = cpu_to_be32(0xF3253C06);
hkey[9] = cpu_to_be32(0x2ADC1FFC);
+#endif
break;
}
@@ -1857,6 +1896,12 @@ mlx5e_build_tir_ctx(struct mlx5e_priv *priv, u32 * tirc, int tt)
MLX5_L3_PROT_TYPE_IPV4);
MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
MLX5_L4_PROT_TYPE_TCP);
+#ifdef RSS
+ if (!(rss_gethashconfig() & RSS_HASHTYPE_RSS_TCP_IPV4)) {
+ MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+ MLX5_HASH_IP);
+ } else
+#endif
MLX5_SET(rx_hash_field_select, hfso, selected_fields,
MLX5_HASH_ALL);
break;
@@ -1866,6 +1911,12 @@ mlx5e_build_tir_ctx(struct mlx5e_priv *priv, u32 * tirc, int tt)
MLX5_L3_PROT_TYPE_IPV6);
MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
MLX5_L4_PROT_TYPE_TCP);
+#ifdef RSS
+ if (!(rss_gethashconfig() & RSS_HASHTYPE_RSS_TCP_IPV6)) {
+ MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+ MLX5_HASH_IP);
+ } else
+#endif
MLX5_SET(rx_hash_field_select, hfso, selected_fields,
MLX5_HASH_ALL);
break;
@@ -1875,6 +1926,12 @@ mlx5e_build_tir_ctx(struct mlx5e_priv *priv, u32 * tirc, int tt)
MLX5_L3_PROT_TYPE_IPV4);
MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
MLX5_L4_PROT_TYPE_UDP);
+#ifdef RSS
+ if (!(rss_gethashconfig() & RSS_HASHTYPE_RSS_UDP_IPV4)) {
+ MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+ MLX5_HASH_IP);
+ } else
+#endif
MLX5_SET(rx_hash_field_select, hfso, selected_fields,
MLX5_HASH_ALL);
break;
@@ -1884,6 +1941,12 @@ mlx5e_build_tir_ctx(struct mlx5e_priv *priv, u32 * tirc, int tt)
MLX5_L3_PROT_TYPE_IPV6);
MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
MLX5_L4_PROT_TYPE_UDP);
+#ifdef RSS
+ if (!(rss_gethashconfig() & RSS_HASHTYPE_RSS_UDP_IPV6)) {
+ MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+ MLX5_HASH_IP);
+ } else
+#endif
MLX5_SET(rx_hash_field_select, hfso, selected_fields,
MLX5_HASH_ALL);
break;
@@ -2005,32 +2068,15 @@ mlx5e_set_dev_port_mtu(struct ifnet *ifp, int sw_mtu)
struct mlx5e_priv *priv = ifp->if_softc;
struct mlx5_core_dev *mdev = priv->mdev;
int hw_mtu;
- int min_mtu;
int err;
- /*
- * Trying to set MTU to zero, in order
- * to find out the FW's minimal MTU
- */
- err = mlx5_set_port_mtu(mdev, 0);
- if (err)
- return (err);
- err = mlx5_query_port_oper_mtu(mdev, &min_mtu);
+ err = mlx5_set_port_mtu(mdev, MLX5E_SW2HW_MTU(sw_mtu));
if (err) {
- if_printf(ifp, "Query port minimal MTU failed\n");
+ if_printf(ifp, "%s: mlx5_set_port_mtu failed setting %d, err=%d\n",
+ __func__, sw_mtu, err);
return (err);
}
-
- if (sw_mtu < MLX5E_HW2SW_MTU(min_mtu)) {
- ifp->if_mtu = sw_mtu;
- return (0);
- }
-
- err = mlx5_set_port_mtu(mdev, MLX5E_SW2HW_MTU(sw_mtu));
- if (err)
- return (err);
-
err = mlx5_query_port_oper_mtu(mdev, &hw_mtu);
if (!err) {
ifp->if_mtu = MLX5E_HW2SW_MTU(hw_mtu);
@@ -2057,6 +2103,13 @@ mlx5e_open_locked(struct ifnet *ifp)
if (test_bit(MLX5E_STATE_OPENED, &priv->state) != 0)
return (0);
+#ifdef RSS
+ if (rss_getnumbuckets() > priv->params.num_channels) {
+ if_printf(ifp, "NOTE: There are more RSS buckets(%u) than "
+ "channels(%u) available\n", rss_getnumbuckets(),
+ priv->params.num_channels);
+ }
+#endif
err = mlx5e_open_tises(priv);
if (err) {
if_printf(ifp, "%s: mlx5e_open_tises failed, %d\n",
diff --git a/sys/dev/mlx5/mlx5_en/mlx5_en_rx.c b/sys/dev/mlx5/mlx5_en/mlx5_en_rx.c
index 3e827bd..87fc524 100644
--- a/sys/dev/mlx5/mlx5_en/mlx5_en_rx.c
+++ b/sys/dev/mlx5/mlx5_en/mlx5_en_rx.c
@@ -192,12 +192,43 @@ mlx5e_build_rx_mbuf(struct mlx5_cqe64 *cqe,
mb->m_pkthdr.len = mb->m_len = cqe_bcnt;
/* check if a Toeplitz hash was computed */
- if (cqe->rss_hash_type != 0)
+ if (cqe->rss_hash_type != 0) {
mb->m_pkthdr.flowid = be32_to_cpu(cqe->rss_hash_result);
- else
+#ifdef RSS
+ /* decode the RSS hash type */
+ switch (cqe->rss_hash_type &
+ (CQE_RSS_DST_HTYPE_L4 | CQE_RSS_DST_HTYPE_IP)) {
+ /* IPv4 */
+ case (CQE_RSS_DST_HTYPE_TCP | CQE_RSS_DST_HTYPE_IPV4):
+ M_HASHTYPE_SET(mb, M_HASHTYPE_RSS_TCP_IPV4);
+ break;
+ case (CQE_RSS_DST_HTYPE_UDP | CQE_RSS_DST_HTYPE_IPV4):
+ M_HASHTYPE_SET(mb, M_HASHTYPE_RSS_UDP_IPV4);
+ break;
+ case CQE_RSS_DST_HTYPE_IPV4:
+ M_HASHTYPE_SET(mb, M_HASHTYPE_RSS_IPV4);
+ break;
+ /* IPv6 */
+ case (CQE_RSS_DST_HTYPE_TCP | CQE_RSS_DST_HTYPE_IPV6):
+ M_HASHTYPE_SET(mb, M_HASHTYPE_RSS_TCP_IPV6);
+ break;
+ case (CQE_RSS_DST_HTYPE_UDP | CQE_RSS_DST_HTYPE_IPV6):
+ M_HASHTYPE_SET(mb, M_HASHTYPE_RSS_UDP_IPV6);
+ break;
+ case CQE_RSS_DST_HTYPE_IPV6:
+ M_HASHTYPE_SET(mb, M_HASHTYPE_RSS_IPV6);
+ break;
+ default: /* Other */
+ M_HASHTYPE_SET(mb, M_HASHTYPE_OPAQUE);
+ break;
+ }
+#else
+ M_HASHTYPE_SET(mb, M_HASHTYPE_OPAQUE);
+#endif
+ } else {
mb->m_pkthdr.flowid = rq->ix;
-
- M_HASHTYPE_SET(mb, M_HASHTYPE_OPAQUE);
+ M_HASHTYPE_SET(mb, M_HASHTYPE_OPAQUE);
+ }
mb->m_pkthdr.rcvif = ifp;
if (likely(ifp->if_capenable & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6)) &&
diff --git a/sys/dev/mlx5/mlx5_en/mlx5_en_tx.c b/sys/dev/mlx5/mlx5_en/mlx5_en_tx.c
index 0256fc7..cb08727 100644
--- a/sys/dev/mlx5/mlx5_en/mlx5_en_tx.c
+++ b/sys/dev/mlx5/mlx5_en/mlx5_en_tx.c
@@ -85,7 +85,15 @@ mlx5e_select_queue(struct ifnet *ifp, struct mbuf *mb)
/* check if flowid is set */
if (M_HASHTYPE_GET(mb) != M_HASHTYPE_NONE) {
- ch = (mb->m_pkthdr.flowid % 128) % ch;
+#ifdef RSS
+ u32 temp;
+
+ if (rss_hash2bucket(mb->m_pkthdr.flowid,
+ M_HASHTYPE_GET(mb), &temp) == 0)
+ ch = temp % ch;
+ else
+#endif
+ ch = (mb->m_pkthdr.flowid % 128) % ch;
} else {
#if (__FreeBSD_version >= 1100000)
ch = m_ether_tcpip_hash(MBUF_HASHFLAG_L3 |
diff --git a/sys/dev/mlx5/vport.h b/sys/dev/mlx5/vport.h
index 7d79c59..c5948e7 100644
--- a/sys/dev/mlx5/vport.h
+++ b/sys/dev/mlx5/vport.h
@@ -42,6 +42,13 @@ int mlx5_query_nic_vport_mac_address(struct mlx5_core_dev *mdev,
u32 vport, u8 *addr);
int mlx5_set_nic_vport_current_mac(struct mlx5_core_dev *mdev, int vport,
bool other_vport, u8 *addr);
+int mlx5_set_nic_vport_vlan_list(struct mlx5_core_dev *dev, u32 vport,
+ u16 *vlan_list, int list_len);
+int mlx5_set_nic_vport_mc_list(struct mlx5_core_dev *mdev, int vport,
+ u64 *addr_list, size_t addr_list_len);
+int mlx5_set_nic_vport_promisc(struct mlx5_core_dev *mdev, int vport,
+ bool promisc_mc, bool promisc_uc,
+ bool promisc_all);
int mlx5_set_nic_vport_permanent_mac(struct mlx5_core_dev *mdev, int vport,
u8 *addr);
int mlx5_nic_vport_enable_roce(struct mlx5_core_dev *mdev);
diff --git a/sys/dev/netmap/if_ixl_netmap.h b/sys/dev/netmap/if_ixl_netmap.h
new file mode 100644
index 0000000..d6aff1f
--- /dev/null
+++ b/sys/dev/netmap/if_ixl_netmap.h
@@ -0,0 +1,422 @@
+/*
+ * Copyright (C) 2015, Luigi Rizzo. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/*
+ * $FreeBSD$
+ *
+ * netmap support for: ixl
+ *
+ * derived from ixgbe
+ * netmap support for a network driver.
+ * This file contains code but only static or inline functions used
+ * by a single driver. To avoid replication of code we just #include
+ * it near the beginning of the standard driver.
+ * For ixl the file is imported in two places, hence the conditional at the
+ * beginning.
+ */
+
+#include <net/netmap.h>
+#include <sys/selinfo.h>
+
+/*
+ * Some drivers may need the following headers. Others
+ * already include them by default
+
+#include <vm/vm.h>
+#include <vm/pmap.h>
+
+ */
+#include <dev/netmap/netmap_kern.h>
+
+int ixl_netmap_txsync(struct netmap_kring *kring, int flags);
+int ixl_netmap_rxsync(struct netmap_kring *kring, int flags);
+
+extern int ixl_rx_miss, ixl_rx_miss_bufs, ixl_crcstrip;
+
+#ifdef NETMAP_IXL_MAIN
+/*
+ * device-specific sysctl variables:
+ *
+ * ixl_crcstrip: 0: keep CRC in rx frames (default), 1: strip it.
+ * During regular operations the CRC is stripped, but on some
+ * hardware reception of frames not multiple of 64 is slower,
+ * so using crcstrip=0 helps in benchmarks.
+ *
+ * ixl_rx_miss, ixl_rx_miss_bufs:
+ * count packets that might be missed due to lost interrupts.
+ */
+SYSCTL_DECL(_dev_netmap);
+int ixl_rx_miss, ixl_rx_miss_bufs, ixl_crcstrip;
+SYSCTL_INT(_dev_netmap, OID_AUTO, ixl_crcstrip,
+ CTLFLAG_RW, &ixl_crcstrip, 0, "strip CRC on rx frames");
+SYSCTL_INT(_dev_netmap, OID_AUTO, ixl_rx_miss,
+ CTLFLAG_RW, &ixl_rx_miss, 0, "potentially missed rx intr");
+SYSCTL_INT(_dev_netmap, OID_AUTO, ixl_rx_miss_bufs,
+ CTLFLAG_RW, &ixl_rx_miss_bufs, 0, "potentially missed rx intr bufs");
+
+
+/*
+ * Register/unregister. We are already under netmap lock.
+ * Only called on the first register or the last unregister.
+ */
+static int
+ixl_netmap_reg(struct netmap_adapter *na, int onoff)
+{
+ struct ifnet *ifp = na->ifp;
+ struct ixl_vsi *vsi = ifp->if_softc;
+ struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
+
+ IXL_PF_LOCK(pf);
+ ixl_disable_intr(vsi);
+
+ /* Tell the stack that the interface is no longer active */
+ ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
+
+ //set_crcstrip(&adapter->hw, onoff);
+ /* enable or disable flags and callbacks in na and ifp */
+ if (onoff) {
+ nm_set_native_flags(na);
+ } else {
+ nm_clear_native_flags(na);
+ }
+ ixl_init_locked(pf); /* also enables intr */
+ //set_crcstrip(&adapter->hw, onoff); // XXX why twice ?
+ IXL_PF_UNLOCK(pf);
+ return (ifp->if_drv_flags & IFF_DRV_RUNNING ? 0 : 1);
+}
+
+
+/*
+ * The attach routine, called near the end of ixl_attach(),
+ * fills the parameters for netmap_attach() and calls it.
+ * It cannot fail, in the worst case (such as no memory)
+ * netmap mode will be disabled and the driver will only
+ * operate in standard mode.
+ */
+static void
+ixl_netmap_attach(struct ixl_vsi *vsi)
+{
+ struct netmap_adapter na;
+
+ bzero(&na, sizeof(na));
+
+ na.ifp = vsi->ifp;
+ na.na_flags = NAF_BDG_MAYSLEEP;
+ // XXX check that queues is set.
+ printf("queues is %p\n", vsi->queues);
+ if (vsi->queues) {
+ na.num_tx_desc = vsi->queues[0].num_desc;
+ na.num_rx_desc = vsi->queues[0].num_desc;
+ }
+ na.nm_txsync = ixl_netmap_txsync;
+ na.nm_rxsync = ixl_netmap_rxsync;
+ na.nm_register = ixl_netmap_reg;
+ na.num_tx_rings = na.num_rx_rings = vsi->num_queues;
+ netmap_attach(&na);
+}
+
+
+#else /* !NETMAP_IXL_MAIN, code for ixl_txrx.c */
+
+/*
+ * Reconcile kernel and user view of the transmit ring.
+ *
+ * All information is in the kring.
+ * Userspace wants to send packets up to the one before kring->rhead,
+ * kernel knows kring->nr_hwcur is the first unsent packet.
+ *
+ * Here we push packets out (as many as possible), and possibly
+ * reclaim buffers from previously completed transmission.
+ *
+ * The caller (netmap) guarantees that there is only one instance
+ * running at any time. Any interference with other driver
+ * methods should be handled by the individual drivers.
+ */
+int
+ixl_netmap_txsync(struct netmap_kring *kring, int flags)
+{
+ struct netmap_adapter *na = kring->na;
+ struct ifnet *ifp = na->ifp;
+ struct netmap_ring *ring = kring->ring;
+ u_int nm_i; /* index into the netmap ring */
+ u_int nic_i; /* index into the NIC ring */
+ u_int n;
+ u_int const lim = kring->nkr_num_slots - 1;
+ u_int const head = kring->rhead;
+ /*
+ * interrupts on every tx packet are expensive so request
+ * them every half ring, or where NS_REPORT is set
+ */
+ u_int report_frequency = kring->nkr_num_slots >> 1;
+
+ /* device-specific */
+ struct ixl_vsi *vsi = ifp->if_softc;
+ struct ixl_queue *que = &vsi->queues[kring->ring_id];
+ struct tx_ring *txr = &que->txr;
+
+ bus_dmamap_sync(txr->dma.tag, txr->dma.map,
+ BUS_DMASYNC_POSTREAD);
+
+ /*
+ * First part: process new packets to send.
+ * nm_i is the current index in the netmap ring,
+ * nic_i is the corresponding index in the NIC ring.
+ *
+ * If we have packets to send (nm_i != head)
+ * iterate over the netmap ring, fetch length and update
+ * the corresponding slot in the NIC ring. Some drivers also
+ * need to update the buffer's physical address in the NIC slot
+ * even NS_BUF_CHANGED is not set (PNMB computes the addresses).
+ *
+ * The netmap_reload_map() calls is especially expensive,
+ * even when (as in this case) the tag is 0, so do only
+ * when the buffer has actually changed.
+ *
+ * If possible do not set the report/intr bit on all slots,
+ * but only a few times per ring or when NS_REPORT is set.
+ *
+ * Finally, on 10G and faster drivers, it might be useful
+ * to prefetch the next slot and txr entry.
+ */
+
+ nm_i = kring->nr_hwcur;
+ if (nm_i != head) { /* we have new packets to send */
+ nic_i = netmap_idx_k2n(kring, nm_i);
+
+ __builtin_prefetch(&ring->slot[nm_i]);
+ __builtin_prefetch(&txr->buffers[nic_i]);
+
+ for (n = 0; nm_i != head; n++) {
+ struct netmap_slot *slot = &ring->slot[nm_i];
+ u_int len = slot->len;
+ uint64_t paddr;
+ void *addr = PNMB(na, slot, &paddr);
+
+ /* device-specific */
+ struct i40e_tx_desc *curr = &txr->base[nic_i];
+ struct ixl_tx_buf *txbuf = &txr->buffers[nic_i];
+ u64 flags = (slot->flags & NS_REPORT ||
+ nic_i == 0 || nic_i == report_frequency) ?
+ ((u64)I40E_TX_DESC_CMD_RS << I40E_TXD_QW1_CMD_SHIFT) : 0;
+
+ /* prefetch for next round */
+ __builtin_prefetch(&ring->slot[nm_i + 1]);
+ __builtin_prefetch(&txr->buffers[nic_i + 1]);
+
+ NM_CHECK_ADDR_LEN(na, addr, len);
+
+ if (slot->flags & NS_BUF_CHANGED) {
+ /* buffer has changed, reload map */
+ netmap_reload_map(na, txr->dma.tag, txbuf->map, addr);
+ }
+ slot->flags &= ~(NS_REPORT | NS_BUF_CHANGED);
+
+ /* Fill the slot in the NIC ring. */
+ curr->buffer_addr = htole64(paddr);
+ curr->cmd_type_offset_bsz = htole64(
+ ((u64)len << I40E_TXD_QW1_TX_BUF_SZ_SHIFT) |
+ flags |
+ ((u64)I40E_TX_DESC_CMD_EOP << I40E_TXD_QW1_CMD_SHIFT)
+ ); // XXX more ?
+
+ /* make sure changes to the buffer are synced */
+ bus_dmamap_sync(txr->dma.tag, txbuf->map,
+ BUS_DMASYNC_PREWRITE);
+
+ nm_i = nm_next(nm_i, lim);
+ nic_i = nm_next(nic_i, lim);
+ }
+ kring->nr_hwcur = head;
+
+ /* synchronize the NIC ring */
+ bus_dmamap_sync(txr->dma.tag, txr->dma.map,
+ BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
+
+ /* (re)start the tx unit up to slot nic_i (excluded) */
+ wr32(vsi->hw, txr->tail, nic_i);
+ }
+
+ /*
+ * Second part: reclaim buffers for completed transmissions.
+ */
+ nic_i = LE32_TO_CPU(*(volatile __le32 *)&txr->base[que->num_desc]);
+ if (nic_i != txr->next_to_clean) {
+ /* some tx completed, increment avail */
+ txr->next_to_clean = nic_i;
+ kring->nr_hwtail = nm_prev(netmap_idx_n2k(kring, nic_i), lim);
+ }
+
+ nm_txsync_finalize(kring);
+
+ return 0;
+}
+
+
+/*
+ * Reconcile kernel and user view of the receive ring.
+ * Same as for the txsync, this routine must be efficient.
+ * The caller guarantees a single invocations, but races against
+ * the rest of the driver should be handled here.
+ *
+ * On call, kring->rhead is the first packet that userspace wants
+ * to keep, and kring->rcur is the wakeup point.
+ * The kernel has previously reported packets up to kring->rtail.
+ *
+ * If (flags & NAF_FORCE_READ) also check for incoming packets irrespective
+ * of whether or not we received an interrupt.
+ */
+int
+ixl_netmap_rxsync(struct netmap_kring *kring, int flags)
+{
+ struct netmap_adapter *na = kring->na;
+ struct ifnet *ifp = na->ifp;
+ struct netmap_ring *ring = kring->ring;
+ u_int nm_i; /* index into the netmap ring */
+ u_int nic_i; /* index into the NIC ring */
+ u_int n;
+ u_int const lim = kring->nkr_num_slots - 1;
+ u_int const head = nm_rxsync_prologue(kring);
+ int force_update = (flags & NAF_FORCE_READ) || kring->nr_kflags & NKR_PENDINTR;
+
+ /* device-specific */
+ struct ixl_vsi *vsi = ifp->if_softc;
+ struct ixl_queue *que = &vsi->queues[kring->ring_id];
+ struct rx_ring *rxr = &que->rxr;
+
+ if (head > lim)
+ return netmap_ring_reinit(kring);
+
+ /* XXX check sync modes */
+ bus_dmamap_sync(rxr->dma.tag, rxr->dma.map,
+ BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
+
+ /*
+ * First part: import newly received packets.
+ *
+ * nm_i is the index of the next free slot in the netmap ring,
+ * nic_i is the index of the next received packet in the NIC ring,
+ * and they may differ in case if_init() has been called while
+ * in netmap mode. For the receive ring we have
+ *
+ * nic_i = rxr->next_check;
+ * nm_i = kring->nr_hwtail (previous)
+ * and
+ * nm_i == (nic_i + kring->nkr_hwofs) % ring_size
+ *
+ * rxr->next_check is set to 0 on a ring reinit
+ */
+ if (netmap_no_pendintr || force_update) {
+ int crclen = ixl_crcstrip ? 0 : 4;
+ uint16_t slot_flags = kring->nkr_slot_flags;
+
+ nic_i = rxr->next_check; // or also k2n(kring->nr_hwtail)
+ nm_i = netmap_idx_n2k(kring, nic_i);
+
+ for (n = 0; ; n++) {
+ union i40e_32byte_rx_desc *curr = &rxr->base[nic_i];
+ uint64_t qword = le64toh(curr->wb.qword1.status_error_len);
+ uint32_t staterr = (qword & I40E_RXD_QW1_STATUS_MASK)
+ >> I40E_RXD_QW1_STATUS_SHIFT;
+
+ if ((staterr & (1<<I40E_RX_DESC_STATUS_DD_SHIFT)) == 0)
+ break;
+ ring->slot[nm_i].len = ((qword & I40E_RXD_QW1_LENGTH_PBUF_MASK)
+ >> I40E_RXD_QW1_LENGTH_PBUF_SHIFT) - crclen;
+ ring->slot[nm_i].flags = slot_flags;
+ bus_dmamap_sync(rxr->ptag,
+ rxr->buffers[nic_i].pmap, BUS_DMASYNC_POSTREAD);
+ nm_i = nm_next(nm_i, lim);
+ nic_i = nm_next(nic_i, lim);
+ }
+ if (n) { /* update the state variables */
+ if (netmap_no_pendintr && !force_update) {
+ /* diagnostics */
+ ixl_rx_miss ++;
+ ixl_rx_miss_bufs += n;
+ }
+ rxr->next_check = nic_i;
+ kring->nr_hwtail = nm_i;
+ }
+ kring->nr_kflags &= ~NKR_PENDINTR;
+ }
+
+ /*
+ * Second part: skip past packets that userspace has released.
+ * (kring->nr_hwcur to head excluded),
+ * and make the buffers available for reception.
+ * As usual nm_i is the index in the netmap ring,
+ * nic_i is the index in the NIC ring, and
+ * nm_i == (nic_i + kring->nkr_hwofs) % ring_size
+ */
+ nm_i = kring->nr_hwcur;
+ if (nm_i != head) {
+ nic_i = netmap_idx_k2n(kring, nm_i);
+ for (n = 0; nm_i != head; n++) {
+ struct netmap_slot *slot = &ring->slot[nm_i];
+ uint64_t paddr;
+ void *addr = PNMB(na, slot, &paddr);
+
+ union i40e_32byte_rx_desc *curr = &rxr->base[nic_i];
+ struct ixl_rx_buf *rxbuf = &rxr->buffers[nic_i];
+
+ if (addr == NETMAP_BUF_BASE(na)) /* bad buf */
+ goto ring_reset;
+
+ if (slot->flags & NS_BUF_CHANGED) {
+ /* buffer has changed, reload map */
+ netmap_reload_map(na, rxr->ptag, rxbuf->pmap, addr);
+ slot->flags &= ~NS_BUF_CHANGED;
+ }
+ curr->read.pkt_addr = htole64(paddr);
+ curr->read.hdr_addr = 0; // XXX needed
+ bus_dmamap_sync(rxr->ptag, rxbuf->pmap,
+ BUS_DMASYNC_PREREAD);
+ nm_i = nm_next(nm_i, lim);
+ nic_i = nm_next(nic_i, lim);
+ }
+ kring->nr_hwcur = head;
+
+ bus_dmamap_sync(rxr->dma.tag, rxr->dma.map,
+ BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
+ /*
+ * IMPORTANT: we must leave one free slot in the ring,
+ * so move nic_i back by one unit
+ */
+ nic_i = nm_prev(nic_i, lim);
+ wr32(vsi->hw, rxr->tail, nic_i);
+ }
+
+ /* tell userspace that there might be new packets */
+ nm_rxsync_finalize(kring);
+
+ return 0;
+
+ring_reset:
+ return netmap_ring_reinit(kring);
+}
+
+#endif /* !NETMAP_IXL_MAIN */
+
+/* end of file */
diff --git a/sys/dev/sfxge/sfxge.c b/sys/dev/sfxge/sfxge.c
index 46b138f..f78d175 100644
--- a/sys/dev/sfxge/sfxge.c
+++ b/sys/dev/sfxge/sfxge.c
@@ -69,10 +69,10 @@ __FBSDID("$FreeBSD$");
IFCAP_RXCSUM_IPV6 | IFCAP_TXCSUM_IPV6 | \
IFCAP_TSO4 | IFCAP_TSO6 | \
IFCAP_JUMBO_MTU | \
- IFCAP_VLAN_HWTSO | IFCAP_LINKSTATE)
+ IFCAP_VLAN_HWTSO | IFCAP_LINKSTATE | IFCAP_HWSTATS)
#define SFXGE_CAP_ENABLE SFXGE_CAP
#define SFXGE_CAP_FIXED (IFCAP_VLAN_MTU | \
- IFCAP_JUMBO_MTU | IFCAP_LINKSTATE)
+ IFCAP_JUMBO_MTU | IFCAP_LINKSTATE | IFCAP_HWSTATS)
MALLOC_DEFINE(M_SFXGE, "sfxge", "Solarflare 10GigE driver");
@@ -94,6 +94,14 @@ SYSCTL_INT(_hw_sfxge, OID_AUTO, tx_ring, CTLFLAG_RDTUN,
&sfxge_tx_ring_entries, 0,
"Maximum number of descriptors in a transmit ring");
+#define SFXGE_PARAM_STATS_UPDATE_PERIOD SFXGE_PARAM(stats_update_period)
+static int sfxge_stats_update_period = SFXGE_CALLOUT_TICKS;
+TUNABLE_INT(SFXGE_PARAM_STATS_UPDATE_PERIOD,
+ &sfxge_stats_update_period);
+SYSCTL_INT(_hw_sfxge, OID_AUTO, stats_update_period, CTLFLAG_RDTUN,
+ &sfxge_stats_update_period, 0,
+ "netstat interface statistics update period in ticks");
+
static void
sfxge_reset(void *arg, int npending);
@@ -506,10 +514,24 @@ sfxge_if_ioctl(struct ifnet *ifp, unsigned long command, caddr_t data)
}
static void
+sfxge_tick(void *arg)
+{
+ struct sfxge_softc *sc = arg;
+
+ sfxge_port_update_stats(sc);
+ sfxge_tx_update_stats(sc);
+
+ callout_reset(&sc->tick_callout, sfxge_stats_update_period,
+ sfxge_tick, sc);
+}
+
+static void
sfxge_ifnet_fini(struct ifnet *ifp)
{
struct sfxge_softc *sc = ifp->if_softc;
+ callout_drain(&sc->tick_callout);
+
SFXGE_ADAPTER_LOCK(sc);
sfxge_stop(sc);
SFXGE_ADAPTER_UNLOCK(sc);
@@ -555,10 +577,15 @@ sfxge_ifnet_init(struct ifnet *ifp, struct sfxge_softc *sc)
ifp->if_transmit = sfxge_if_transmit;
ifp->if_qflush = sfxge_if_qflush;
+ callout_init(&sc->tick_callout, B_TRUE);
+
DBGPRINT(sc->dev, "ifmedia_init");
if ((rc = sfxge_port_ifmedia_init(sc)) != 0)
goto fail;
+ callout_reset(&sc->tick_callout, sfxge_stats_update_period,
+ sfxge_tick, sc);
+
return (0);
fail:
diff --git a/sys/dev/sfxge/sfxge.h b/sys/dev/sfxge/sfxge.h
index aa1830d..cfaf06d 100644
--- a/sys/dev/sfxge/sfxge.h
+++ b/sys/dev/sfxge/sfxge.h
@@ -121,6 +121,8 @@ enum sfxge_evq_state {
#define SFXGE_EV_BATCH 16384
+#define SFXGE_CALLOUT_TICKS 100
+
struct sfxge_evq {
/* Structure members below are sorted by usage order */
struct sfxge_softc *sc;
@@ -274,6 +276,8 @@ struct sfxge_softc {
size_t rx_buffer_align;
uma_zone_t rx_buffer_zone;
+ struct callout tick_callout;
+
unsigned int evq_max;
unsigned int evq_count;
unsigned int rxq_count;
@@ -349,6 +353,7 @@ extern void sfxge_mac_link_update(struct sfxge_softc *sc,
efx_link_mode_t mode);
extern int sfxge_mac_filter_set(struct sfxge_softc *sc);
extern int sfxge_port_ifmedia_init(struct sfxge_softc *sc);
+extern void sfxge_port_update_stats(struct sfxge_softc *sc);
#define SFXGE_MAX_MTU (9 * 1024)
diff --git a/sys/dev/sfxge/sfxge_port.c b/sys/dev/sfxge/sfxge_port.c
index 420aaad..277752a 100644
--- a/sys/dev/sfxge/sfxge_port.c
+++ b/sys/dev/sfxge/sfxge_port.c
@@ -87,6 +87,41 @@ out:
return (rc);
}
+void
+sfxge_port_update_stats(struct sfxge_softc *sc)
+{
+ struct ifnet *ifp;
+ uint64_t *mac_stats;
+
+ SFXGE_PORT_LOCK(&sc->port);
+
+ /* Ignore error and use old values */
+ (void)sfxge_mac_stat_update(sc);
+
+ ifp = sc->ifnet;
+ mac_stats = (uint64_t *)sc->port.mac_stats.decode_buf;
+
+ ifp->if_ipackets = mac_stats[EFX_MAC_RX_PKTS];
+ ifp->if_ierrors = mac_stats[EFX_MAC_RX_ERRORS];
+ ifp->if_opackets = mac_stats[EFX_MAC_TX_PKTS];
+ ifp->if_oerrors = mac_stats[EFX_MAC_TX_ERRORS];
+ ifp->if_collisions =
+ mac_stats[EFX_MAC_TX_SGL_COL_PKTS] +
+ mac_stats[EFX_MAC_TX_MULT_COL_PKTS] +
+ mac_stats[EFX_MAC_TX_EX_COL_PKTS] +
+ mac_stats[EFX_MAC_TX_LATE_COL_PKTS];
+ ifp->if_ibytes = mac_stats[EFX_MAC_RX_OCTETS];
+ ifp->if_obytes = mac_stats[EFX_MAC_TX_OCTETS];
+ /* if_imcasts is maintained in net/if_ethersubr.c */
+ ifp->if_omcasts =
+ mac_stats[EFX_MAC_TX_MULTICST_PKTS] +
+ mac_stats[EFX_MAC_TX_BRDCST_PKTS];
+ /* if_iqdrops is maintained in net/if_ethersubr.c */
+ /* if_noproto is maintained in net/if_ethersubr.c */
+
+ SFXGE_PORT_UNLOCK(&sc->port);
+}
+
static int
sfxge_mac_stat_handler(SYSCTL_HANDLER_ARGS)
{
diff --git a/sys/dev/sfxge/sfxge_tx.c b/sys/dev/sfxge/sfxge_tx.c
index 9579975..398272d 100644
--- a/sys/dev/sfxge/sfxge_tx.c
+++ b/sys/dev/sfxge/sfxge_tx.c
@@ -1683,6 +1683,35 @@ sfxge_tx_stat_init(struct sfxge_softc *sc)
}
}
+static uint64_t
+sfxge_tx_get_drops(struct sfxge_softc *sc)
+{
+ unsigned int index;
+ uint64_t drops = 0;
+ struct sfxge_txq *txq;
+
+ /* Sum across all TX queues */
+ for (index = 0; index < sc->txq_count; index++) {
+ txq = sc->txq[index];
+ /*
+ * In theory, txq->put_overflow and txq->netdown_drops
+ * should use atomic operation and other should be
+ * obtained under txq lock, but it is just statistics.
+ */
+ drops += txq->drops + txq->get_overflow +
+ txq->get_non_tcp_overflow +
+ txq->put_overflow + txq->netdown_drops +
+ txq->tso_pdrop_too_many + txq->tso_pdrop_no_rsrc;
+ }
+ return (drops);
+}
+
+void
+sfxge_tx_update_stats(struct sfxge_softc *sc)
+{
+ sc->ifnet->if_oerrors += sfxge_tx_get_drops(sc);
+}
+
void
sfxge_tx_fini(struct sfxge_softc *sc)
{
diff --git a/sys/dev/sfxge/sfxge_tx.h b/sys/dev/sfxge/sfxge_tx.h
index 67bedc2..9d411ea 100644
--- a/sys/dev/sfxge/sfxge_tx.h
+++ b/sys/dev/sfxge/sfxge_tx.h
@@ -220,6 +220,7 @@ struct sfxge_txq {
struct sfxge_evq;
+extern void sfxge_tx_update_stats(struct sfxge_softc *sc);
extern int sfxge_tx_init(struct sfxge_softc *sc);
extern void sfxge_tx_fini(struct sfxge_softc *sc);
diff --git a/sys/dev/sfxge/sfxge_version.h b/sys/dev/sfxge/sfxge_version.h
index f5f4882..9d93e79 100644
--- a/sys/dev/sfxge/sfxge_version.h
+++ b/sys/dev/sfxge/sfxge_version.h
@@ -36,6 +36,6 @@
#ifndef _SFXGE_VERSION_H
#define _SFXGE_VERSION_H
-#define SFXGE_VERSION_STRING "v4.5.2.1000"
+#define SFXGE_VERSION_STRING "v4.5.3.1002"
#endif /* _SFXGE_DRIVER_VERSION_H */
diff --git a/sys/dev/usb/net/if_smsc.c b/sys/dev/usb/net/if_smsc.c
index f009183..c8f2202 100644
--- a/sys/dev/usb/net/if_smsc.c
+++ b/sys/dev/usb/net/if_smsc.c
@@ -1686,7 +1686,7 @@ smsc_attach_post_sub(struct usb_ether *ue)
/* The chip supports TCP/UDP checksum offloading on TX and RX paths, however
* currently only RX checksum is supported in the driver (see top of file).
*/
- ifp->if_capabilities |= IFCAP_RXCSUM;
+ ifp->if_capabilities |= IFCAP_RXCSUM | IFCAP_VLAN_MTU;
ifp->if_hwassist = 0;
/* TX checksuming is disabled (for now?)
diff --git a/sys/dev/usb/usbdevs b/sys/dev/usb/usbdevs
index bbe9aa1..83d41e6 100644
--- a/sys/dev/usb/usbdevs
+++ b/sys/dev/usb/usbdevs
@@ -525,7 +525,7 @@ vendor DMI 0x0c0b DMI
vendor CANYON 0x0c10 Canyon
vendor ICOM 0x0c26 Icom Inc.
vendor GNOTOMETRICS 0x0c33 GN Otometrics
-vendor CHICONY2 0x0c45 Chicony
+vendor CHICONY2 0x0c45 Chicony / Microdia / Sonix Technology Co., Ltd.
vendor REINERSCT 0x0c4b Reiner-SCT
vendor SEALEVEL 0x0c52 Sealevel System
vendor JETI 0x0c6c Jeti
@@ -687,6 +687,7 @@ vendor ASUS2 0x1761 ASUS
vendor SWEEX2 0x177f Sweex
vendor METAGEEK 0x1781 MetaGeek
vendor KAMSTRUP 0x17a8 Kamstrup A/S
+vendor DISPLAYLINK 0x17e9 DisplayLink
vendor LENOVO 0x17ef Lenovo
vendor WAVESENSE 0x17f4 WaveSense
vendor VAISALA 0x1843 Vaisala
@@ -775,6 +776,7 @@ vendor MOSCHIP 0x9710 MosChip Semiconductor
vendor NETGEAR4 0x9846 Netgear
vendor MARVELL 0x9e88 Marvell Technology Group Ltd.
vendor 3COM3 0xa727 3Com
+vendor CACE 0xcace CACE Technologies
vendor EVOLUTION 0xdeee Evolution Robotics products
vendor DATAAPEX 0xdaae DataApex
vendor HP2 0xf003 Hewlett Packard
@@ -862,6 +864,7 @@ product ACCTON RT3070_3 0xc522 RT3070
product ACCTON RT3070_5 0xd522 RT3070
product ACCTON RTL8192SU 0xc512 RTL8192SU
product ACCTON ZD1211B 0xe501 ZD1211B
+product ACCTON WN7512 0xf522 WN7512
/* Aceeca products */
product ACEECA MEZ1000 0x0001 MEZ1000 RDA
@@ -1226,6 +1229,11 @@ product ATHEROS2 AR5523_2 0x0003 AR5523
product ATHEROS2 AR5523_2_NF 0x0004 AR5523 (no firmware)
product ATHEROS2 AR5523_3 0x0005 AR5523
product ATHEROS2 AR5523_3_NF 0x0006 AR5523 (no firmware)
+product ATHEROS2 TG121N 0x1001 TG121N
+product ATHEROS2 WN821NV2 0x1002 WN821NV2
+product ATHEROS2 3CRUSBN275 0x1010 3CRUSBN275
+product ATHEROS2 WN612 0x1011 WN612
+product ATHEROS2 AR9170 0x9170 AR9170
/* Atmel Comp. products */
product ATMEL STK541 0x2109 Zigbee Controller
@@ -1241,6 +1249,9 @@ product AUTHENTEC AES1610 0x1600 AES1610 Fingerprint Sensor
/* Avision products */
product AVISION 1200U 0x0268 1200U scanner
+/* AVM products */
+product AVM FRITZWLAN 0x8401 FRITZ!WLAN N
+
/* Axesstel products */
product AXESSTEL DATAMODEM 0x1000 Data Modem
@@ -1343,6 +1354,9 @@ product BROTHER MFC8600_9650 0x0100 MFC8600/9650 multifunction device
product BTC BTC6100 0x5550 6100C Keyboard
product BTC BTC7932 0x6782 Keyboard with mouse port
+/* CACE Technologies products */
+product CACE AIRPCAPNX 0x0300 AirPcap NX
+
/* Canon, Inc. products */
product CANON N656U 0x2206 CanoScan N656U
product CANON N1220U 0x2207 CanoScan N1220U
@@ -1603,6 +1617,7 @@ product DLINK DUBE100C1 0x1a02 DUB-E100 rev C1
product DLINK DSB650TX4 0x200c 10/100 Ethernet
product DLINK DWL120E 0x3200 DWL-120 rev E
product DLINK DWA125D1 0x330f DWA-125 rev D1
+product DLINK DWA123D1 0x3310 DWA-123 rev D1
product DLINK DWL122 0x3700 DWL-122
product DLINK DWLG120 0x3701 DWL-G120
product DLINK DWL120F 0x3702 DWL-120 rev F
@@ -1642,8 +1657,10 @@ product DLINK DWA131B 0x330d DWA-131 rev B
product DLINK2 RTL8192SU_1 0x3300 RTL8192SU
product DLINK2 RTL8192SU_2 0x3302 RTL8192SU
product DLINK2 DWA131A1 0x3303 DWA-131 A1
+product DLINK2 DWA160A2 0x3a09 DWA-160 A2
product DLINK2 DWA120 0x3a0c DWA-120
product DLINK2 DWA120_NF 0x3a0d DWA-120 (no firmware)
+product DLINK2 DWA130D1 0x3a0f DWA-130 D1
product DLINK2 DWLG122C1 0x3c03 DWL-G122 c1
product DLINK2 WUA1340 0x3c04 WUA-1340
product DLINK2 DWA111 0x3c06 DWA-111
@@ -1654,12 +1671,35 @@ product DLINK2 RT3072_1 0x3c0b RT3072
product DLINK2 RT3070_1 0x3c0d RT3070
product DLINK2 RT3070_2 0x3c0e RT3070
product DLINK2 RT3070_3 0x3c0f RT3070
+product DLINK2 DWA160A1 0x3c10 DWA-160 A1
product DLINK2 RT2870_2 0x3c11 RT2870
product DLINK2 DWA130 0x3c13 DWA-130
product DLINK2 RT3070_4 0x3c15 RT3070
product DLINK2 RT3070_5 0x3c16 RT3070
product DLINK3 DWM652 0x3e04 DWM-652
+/* DisplayLink products */
+product DISPLAYLINK LCD4300U 0x01ba LCD-4300U
+product DISPLAYLINK LCD8000U 0x01bb LCD-8000U
+product DISPLAYLINK LD220 0x0100 Samsung LD220
+product DISPLAYLINK GUC2020 0x0059 IOGEAR DVI GUC2020
+product DISPLAYLINK VCUD60 0x0136 Rextron DVI
+product DISPLAYLINK CONV 0x0138 StarTech CONV-USB2DVI
+product DISPLAYLINK DLDVI 0x0141 DisplayLink DVI
+product DISPLAYLINK VGA10 0x015a CMP-USBVGA10
+product DISPLAYLINK WSDVI 0x0198 WS Tech DVI
+product DISPLAYLINK EC008 0x019b EasyCAP008 DVI
+product DISPLAYLINK HPDOCK 0x01d4 HP USB Docking
+product DISPLAYLINK NL571 0x01d7 HP USB DVI
+product DISPLAYLINK M01061 0x01e2 Lenovo DVI
+product DISPLAYLINK SWDVI 0x024c SUNWEIT DVI
+product DISPLAYLINK NBDOCK 0x0215 VideoHome NBdock1920
+product DISPLAYLINK LUM70 0x02a9 Lilliput UM-70
+product DISPLAYLINK UM7X0 0x401a nanovision MiMo
+product DISPLAYLINK LT1421 0x03e0 Lenovo ThinkVision LT1421
+product DISPLAYLINK POLARIS2 0x0117 Polaris2 USB dock
+product DISPLAYLINK PLUGABLE 0x0377 Plugable docking station
+
/* DMI products */
product DMI CFSM_RW 0xa109 CF/SM Reader/Writer
product DMI DISK 0x2bcf Generic Disk
@@ -1720,6 +1760,7 @@ product ELECOM MOUSE29UO 0x0002 mouse 29UO
product ELECOM LDUSBTX0 0x200c LD-USB/TX
product ELECOM LDUSBTX1 0x4002 LD-USB/TX
product ELECOM LDUSBLTX 0x4005 LD-USBL/TX
+product ELECOM WDC150SU2M 0x4008 WDC-150SU2M
product ELECOM LDUSBTX2 0x400b LD-USB/TX
product ELECOM LDUSB20 0x4010 LD-USB20
product ELECOM UCSGT 0x5003 UC-SGT
@@ -2448,6 +2489,7 @@ product IODATA USBETTXS 0x0913 USB ETTX
product IODATA USBWNB11A 0x0919 USB WN-B11
product IODATA USBWNB11 0x0922 USB Airport WN-B11
product IODATA ETGUS2 0x0930 ETG-US2
+product IODATA WNGDNUS2 0x093f WN-GDN/US2
product IODATA RT3072_1 0x0944 RT3072
product IODATA RT3072_2 0x0945 RT3072
product IODATA RT3072_3 0x0947 RT3072
@@ -2613,6 +2655,7 @@ product LARSENBRUSGAARD ALTITRACK 0x0001 FTDI compatible adapter
product LEADTEK 9531 0x2101 9531 GPS
/* Lenovo products */
+product LENOVO GIGALAN 0x304b USB 3.0 Ethernet
product LENOVO ETHERNET 0x7203 USB 2.0 Ethernet
/* Lexar products */
@@ -3030,6 +3073,11 @@ product MGE UPS2 0xffff MGE UPS SYSTEMS PROTECTIONCENTER 2
product MEI CASHFLOW_SC 0x1100 Cashflow-SC Cash Acceptor
product MEI S2000 0x1101 Series 2000 Combo Acceptor
+/* Microdia / Sonix Techonology Co., Ltd. products */
+product CHICONY2 YUREX 0x1010 YUREX
+product CHICONY2 CAM_1 0x62c0 CAM_1
+product CHICONY2 TEMPER 0x7401 TEMPer sensor
+
/* Micro Star International products */
product MSI BT_DONGLE 0x1967 Bluetooth USB dongle
product MSI RT3070_1 0x3820 RT3070
@@ -3182,6 +3230,7 @@ product NATIONAL BEARPAW2400 0x1001 BearPaw 2400
/* NEC products */
product NEC HUB_0050 0x0050 USB 2.0 7-Port Hub
product NEC HUB_005A 0x005a USB 2.0 4-Port Hub
+product NEC WL300NUG 0x0249 WL300NU-G
product NEC HUB 0x55aa hub
product NEC HUB_B 0x55ab hub
@@ -3209,12 +3258,17 @@ product NETGEAR EA101X 0x1002 Ethernet
product NETGEAR FA101 0x1020 Ethernet 10/100, USB1.1
product NETGEAR FA120 0x1040 USB 2.0 Ethernet
product NETGEAR M4100 0x1100 M4100/M5300/M7100 series switch
-product NETGEAR WG111V2_2 0x4240 PrismGT USB 2.0 WLAN
+product NETGEAR WG111V1_2 0x4240 PrismGT USB 2.0 WLAN
product NETGEAR WG111V3 0x4260 WG111v3
product NETGEAR WG111U 0x4300 WG111U
product NETGEAR WG111U_NF 0x4301 WG111U (no firmware)
product NETGEAR WG111V2 0x6a00 WG111V2
+product NETGEAR WN111V2 0x9001 WN111V2
+product NETGEAR WNDA3100 0x9010 WNDA3100
+product NETGEAR WNDA4100 0x9012 WNDA4100
+product NETGEAR WNDA3200 0x9018 WNDA3200
product NETGEAR RTL8192CU 0x9021 RTL8192CU
+product NETGEAR WNA1000 0x9040 WNA1000
product NETGEAR WNA1000M 0x9041 WNA1000M
product NETGEAR2 MA101 0x4100 MA101
product NETGEAR2 MA101B 0x4102 MA101 Rev B
@@ -3478,6 +3532,7 @@ product PLANEX GW_US11H 0x14ea GW-US11H WLAN
product PLANEX2 RTL8188CUS 0x1201 RTL8188CUS
product PLANEX2 GW_US11S 0x3220 GW-US11S WLAN
product PLANEX2 GW_US54GXS 0x5303 GW-US54GXS WLAN
+product PLANEX2 GW_US300 0x5304 GW-US300
product PLANEX2 RTL8188CU_1 0xab2a RTL8188CU
product PLANEX2 RTL8188CU_2 0xed17 RTL8188CU
product PLANEX2 RTL8188CU_3 0x4902 RTL8188CU
@@ -3658,6 +3713,7 @@ product QUALCOMMINC E0076 0x0076 3G modem
product QUALCOMMINC E0078 0x0078 3G modem
product QUALCOMMINC E0082 0x0082 3G modem
product QUALCOMMINC E0086 0x0086 3G modem
+product QUALCOMMINC MF112 0x0103 3G modem
product QUALCOMMINC SURFSTICK 0x0117 1&1 Surf Stick
product QUALCOMMINC K3772_Z_INIT 0x1179 K3772-Z Initial
product QUALCOMMINC K3772_Z 0x1181 K3772-Z
@@ -4395,6 +4451,7 @@ product TREK THUMBDRIVE_8MB 0x9988 ThumbDrive_8MB
/* TRENDnet products */
product TRENDNET RTL8192CU 0x624d RTL8192CU
+product TRENDNET TEW646UBH 0x646b TEW-646UBH
product TRENDNET RTL8188CU 0x648b RTL8188CU
/* Tripp-Lite products */
@@ -4548,8 +4605,10 @@ product WINBOND UH104 0x5518 4-port USB Hub
product WINMAXGROUP FLASH64MC 0x6660 USB Flash Disk 64M-C
/* Wistron NeWeb products */
+product WISTRONNEWEB WNC0600 0x0326 WNC-0600USB
product WISTRONNEWEB UR045G 0x0427 PrismGT USB 2.0 WLAN
product WISTRONNEWEB UR055G 0x0711 UR055G
+product WISTRONNEWEB O8494 0x0804 ORiNOCO 802.11n
product WISTRONNEWEB AR5523_1 0x0826 AR5523
product WISTRONNEWEB AR5523_1_NF 0x0827 AR5523 (no firmware)
product WISTRONNEWEB AR5523_2 0x082a AR5523
@@ -4598,7 +4657,9 @@ product ZCOM AR5523_NF 0x0013 AR5523 driver (no firmware)
product ZCOM XM142 0x0015 XM-142
product ZCOM ZD1211B 0x001a ZD1211B
product ZCOM RT2870_1 0x0022 RT2870
+product ZCOM UB81 0x0023 UB81
product ZCOM RT2870_2 0x0025 RT2870
+product ZCOM UB82 0x0026 UB82
/* Zinwell products */
product ZINWELL RT2570 0x0260 RT2570
@@ -4617,6 +4678,7 @@ product ZORAN EX20DSC 0x4343 Digital Camera EX-20 DSC
/* Zydas Technology Corporation products */
product ZYDAS ZD1211 0x1211 ZD1211 WLAN abg
product ZYDAS ZD1211B 0x1215 ZD1211B
+product ZYDAS ZD1221 0x1221 ZD1221
/* ZyXEL Communication Co. products */
product ZYXEL OMNI56K 0x1500 Omni 56K Plus
@@ -4628,6 +4690,8 @@ product ZYXEL M202 0x340a M-202
product ZYXEL G220V2 0x340f G-220 v2
product ZYXEL G202 0x3410 G-202
product ZYXEL RT2870_1 0x3416 RT2870
+product ZYXEL NWD271N 0x3417 NWD-271N
+product ZYXEL NWD211AN 0x3418 NWD-211AN
product ZYXEL RT2870_2 0x341a RT2870
product ZYXEL RT3070 0x341e NWD2105
product ZYXEL RTL8192CU 0x341f RTL8192CU
diff --git a/sys/dev/usb/wlan/if_run.c b/sys/dev/usb/wlan/if_run.c
index 22cc180..eeb46d3 100644
--- a/sys/dev/usb/wlan/if_run.c
+++ b/sys/dev/usb/wlan/if_run.c
@@ -247,6 +247,7 @@ static const STRUCT_USB_HOST_ID run_devs[] = {
RUN_DEV(MSI, RT3070_9),
RUN_DEV(MSI, RT3070_10),
RUN_DEV(MSI, RT3070_11),
+ RUN_DEV(NETGEAR, WNDA4100),
RUN_DEV(OVISLINK, RT3072),
RUN_DEV(PARA, RT3070),
RUN_DEV(PEGATRON, RT2870),
diff --git a/sys/modules/ixl/Makefile b/sys/modules/ixl/Makefile
index 9b26105..71f4550 100755
--- a/sys/modules/ixl/Makefile
+++ b/sys/modules/ixl/Makefile
@@ -12,7 +12,7 @@ SRCS += if_ixl.c ixl_txrx.c i40e_osdep.c
# Shared source
SRCS += i40e_common.c i40e_nvm.c i40e_adminq.c i40e_lan_hmc.c i40e_hmc.c
-CFLAGS += -DSMP
+CFLAGS += -DSMP -DIXL_DEBUG_SYSCTL
# Add Flow Director support
# CFLAGS += -DIXL_FDIR
diff --git a/sys/modules/ixlv/Makefile b/sys/modules/ixlv/Makefile
index 1b2a4f8..127164c 100755
--- a/sys/modules/ixlv/Makefile
+++ b/sys/modules/ixlv/Makefile
@@ -10,7 +10,7 @@ SRCS += opt_inet.h opt_inet6.h
SRCS += if_ixlv.c ixlvc.c ixl_txrx.c i40e_osdep.c
# Shared source
-SRCS += i40e_common.c i40e_nvm.c i40e_adminq.c i40e_lan_hmc.c i40e_hmc.c
+SRCS += i40e_common.c i40e_nvm.c i40e_adminq.c
CFLAGS += -DSMP
diff --git a/sys/modules/mlx5/Makefile b/sys/modules/mlx5/Makefile
index 4c19c4c..a43dd24 100644
--- a/sys/modules/mlx5/Makefile
+++ b/sys/modules/mlx5/Makefile
@@ -7,6 +7,7 @@ mlx5_alloc.c \
mlx5_cmd.c \
mlx5_cq.c \
mlx5_eq.c \
+mlx5_eswitch_vacl.c \
mlx5_flow_table.c \
mlx5_fw.c \
mlx5_health.c \
diff --git a/sys/modules/mlxen/Makefile b/sys/modules/mlxen/Makefile
index 9c8c0b3..b75e0d0 100644
--- a/sys/modules/mlxen/Makefile
+++ b/sys/modules/mlxen/Makefile
@@ -26,4 +26,4 @@ opt_inet6.h:
.include <bsd.kmod.mk>
-CFLAGS+= -Wno-cast-qual -Wno-pointer-arith ${GCC_MS_EXTENSIONS}
+CFLAGS+= ${GCC_MS_EXTENSIONS}
diff --git a/sys/ofed/drivers/infiniband/hw/mthca/mthca_reset.c b/sys/ofed/drivers/infiniband/hw/mthca/mthca_reset.c
index ab059a6..f571bd6 100644
--- a/sys/ofed/drivers/infiniband/hw/mthca/mthca_reset.c
+++ b/sys/ofed/drivers/infiniband/hw/mthca/mthca_reset.c
@@ -43,9 +43,13 @@ int mthca_reset(struct mthca_dev *mdev)
int i;
int err = 0;
u32 *hca_header = NULL;
+#ifdef __linux__
u32 *bridge_header = NULL;
+#endif
struct pci_dev *bridge = NULL;
+#ifdef __linux__
int bridge_pcix_cap = 0;
+#endif
int hca_pcie_cap = 0;
int hca_pcix_cap = 0;
@@ -195,6 +199,7 @@ int mthca_reset(struct mthca_dev *mdev)
}
good:
+#ifdef __linux__
/* Now restore the PCI headers */
if (bridge) {
if (pci_write_config_dword(bridge, bridge_pcix_cap + 0x8,
@@ -235,6 +240,7 @@ good:
goto out;
}
}
+#endif
if (hca_pcix_cap) {
if (pci_write_config_dword(mdev->pdev, hca_pcix_cap,
@@ -289,8 +295,8 @@ out:
#ifdef __linux__
if (bridge)
pci_dev_put(bridge);
-#endif
kfree(bridge_header);
+#endif
kfree(hca_header);
return err;
diff --git a/sys/ofed/drivers/net/mlx4/en_ethtool.c b/sys/ofed/drivers/net/mlx4/en_ethtool.c
deleted file mode 100644
index 3ebeaf4..0000000
--- a/sys/ofed/drivers/net/mlx4/en_ethtool.c
+++ /dev/null
@@ -1,1616 +0,0 @@
-/*
- * Copyright (c) 2007, 2014 Mellanox Technologies. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- */
-
-#include <linux/kernel.h>
-#include <linux/netdevice.h>
-#include <linux/mlx4/driver.h>
-#include <linux/in.h>
-#include <net/ip.h>
-#include <linux/bitmap.h>
-
-#include "mlx4_en.h"
-#include "en_port.h"
-
-#define EN_ETHTOOL_QP_ATTACH (1ull << 63)
-
-union mlx4_ethtool_flow_union {
- struct ethtool_tcpip4_spec tcp_ip4_spec;
- struct ethtool_tcpip4_spec udp_ip4_spec;
- struct ethtool_tcpip4_spec sctp_ip4_spec;
- struct ethtool_ah_espip4_spec ah_ip4_spec;
- struct ethtool_ah_espip4_spec esp_ip4_spec;
- struct ethtool_usrip4_spec usr_ip4_spec;
- struct ethhdr ether_spec;
- __u8 hdata[52];
-};
-
-struct mlx4_ethtool_flow_ext {
- __u8 padding[2];
- unsigned char h_dest[ETH_ALEN];
- __be16 vlan_etype;
- __be16 vlan_tci;
- __be32 data[2];
-};
-
-struct mlx4_ethtool_rx_flow_spec {
- __u32 flow_type;
- union mlx4_ethtool_flow_union h_u;
- struct mlx4_ethtool_flow_ext h_ext;
- union mlx4_ethtool_flow_union m_u;
- struct mlx4_ethtool_flow_ext m_ext;
- __u64 ring_cookie;
- __u32 location;
-};
-
-struct mlx4_ethtool_rxnfc {
- __u32 cmd;
- __u32 flow_type;
- __u64 data;
- struct mlx4_ethtool_rx_flow_spec fs;
- __u32 rule_cnt;
- __u32 rule_locs[0];
-};
-
-#ifndef FLOW_MAC_EXT
-#define FLOW_MAC_EXT 0x40000000
-#endif
-
-static void
-mlx4_en_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo)
-{
- struct mlx4_en_priv *priv = netdev_priv(dev);
- struct mlx4_en_dev *mdev = priv->mdev;
-
- strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
- strlcpy(drvinfo->version, DRV_VERSION " (" DRV_RELDATE ")",
- sizeof(drvinfo->version));
- snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
- "%d.%d.%d",
- (u16) (mdev->dev->caps.fw_ver >> 32),
- (u16) ((mdev->dev->caps.fw_ver >> 16) & 0xffff),
- (u16) (mdev->dev->caps.fw_ver & 0xffff));
- strlcpy(drvinfo->bus_info, pci_name(mdev->dev->pdev),
- sizeof(drvinfo->bus_info));
- drvinfo->n_stats = 0;
- drvinfo->regdump_len = 0;
- drvinfo->eedump_len = 0;
-}
-
-static const char main_strings[][ETH_GSTRING_LEN] = {
- /* packet statistics */
- "rx_packets",
- "rx_bytes",
- "rx_multicast_packets",
- "rx_broadcast_packets",
- "rx_errors",
- "rx_dropped",
- "rx_length_errors",
- "rx_over_errors",
- "rx_crc_errors",
- "rx_jabbers",
- "rx_in_range_length_error",
- "rx_out_range_length_error",
- "rx_lt_64_bytes_packets",
- "rx_127_bytes_packets",
- "rx_255_bytes_packets",
- "rx_511_bytes_packets",
- "rx_1023_bytes_packets",
- "rx_1518_bytes_packets",
- "rx_1522_bytes_packets",
- "rx_1548_bytes_packets",
- "rx_gt_1548_bytes_packets",
- "tx_packets",
- "tx_bytes",
- "tx_multicast_packets",
- "tx_broadcast_packets",
- "tx_errors",
- "tx_dropped",
- "tx_lt_64_bytes_packets",
- "tx_127_bytes_packets",
- "tx_255_bytes_packets",
- "tx_511_bytes_packets",
- "tx_1023_bytes_packets",
- "tx_1518_bytes_packets",
- "tx_1522_bytes_packets",
- "tx_1548_bytes_packets",
- "tx_gt_1548_bytes_packets",
- "rx_prio_0_packets", "rx_prio_0_bytes",
- "rx_prio_1_packets", "rx_prio_1_bytes",
- "rx_prio_2_packets", "rx_prio_2_bytes",
- "rx_prio_3_packets", "rx_prio_3_bytes",
- "rx_prio_4_packets", "rx_prio_4_bytes",
- "rx_prio_5_packets", "rx_prio_5_bytes",
- "rx_prio_6_packets", "rx_prio_6_bytes",
- "rx_prio_7_packets", "rx_prio_7_bytes",
- "rx_novlan_packets", "rx_novlan_bytes",
- "tx_prio_0_packets", "tx_prio_0_bytes",
- "tx_prio_1_packets", "tx_prio_1_bytes",
- "tx_prio_2_packets", "tx_prio_2_bytes",
- "tx_prio_3_packets", "tx_prio_3_bytes",
- "tx_prio_4_packets", "tx_prio_4_bytes",
- "tx_prio_5_packets", "tx_prio_5_bytes",
- "tx_prio_6_packets", "tx_prio_6_bytes",
- "tx_prio_7_packets", "tx_prio_7_bytes",
- "tx_novlan_packets", "tx_novlan_bytes",
-
- /* flow control statistics */
- "rx_pause_prio_0", "rx_pause_duration_prio_0",
- "rx_pause_transition_prio_0", "tx_pause_prio_0",
- "tx_pause_duration_prio_0", "tx_pause_transition_prio_0",
- "rx_pause_prio_1", "rx_pause_duration_prio_1",
- "rx_pause_transition_prio_1", "tx_pause_prio_1",
- "tx_pause_duration_prio_1", "tx_pause_transition_prio_1",
- "rx_pause_prio_2", "rx_pause_duration_prio_2",
- "rx_pause_transition_prio_2", "tx_pause_prio_2",
- "tx_pause_duration_prio_2", "tx_pause_transition_prio_2",
- "rx_pause_prio_3", "rx_pause_duration_prio_3",
- "rx_pause_transition_prio_3", "tx_pause_prio_3",
- "tx_pause_duration_prio_3", "tx_pause_transition_prio_3",
- "rx_pause_prio_4", "rx_pause_duration_prio_4",
- "rx_pause_transition_prio_4", "tx_pause_prio_4",
- "tx_pause_duration_prio_4", "tx_pause_transition_prio_4",
- "rx_pause_prio_5", "rx_pause_duration_prio_5",
- "rx_pause_transition_prio_5", "tx_pause_prio_5",
- "tx_pause_duration_prio_5", "tx_pause_transition_prio_5",
- "rx_pause_prio_6", "rx_pause_duration_prio_6",
- "rx_pause_transition_prio_6", "tx_pause_prio_6",
- "tx_pause_duration_prio_6", "tx_pause_transition_prio_6",
- "rx_pause_prio_7", "rx_pause_duration_prio_7",
- "rx_pause_transition_prio_7", "tx_pause_prio_7",
- "tx_pause_duration_prio_7", "tx_pause_transition_prio_7",
-
- /* VF statistics */
- "rx_packets",
- "rx_bytes",
- "rx_multicast_packets",
- "rx_broadcast_packets",
- "rx_errors",
- "rx_dropped",
- "tx_packets",
- "tx_bytes",
- "tx_multicast_packets",
- "tx_broadcast_packets",
- "tx_errors",
-
- /* VPort statistics */
- "vport_rx_unicast_packets",
- "vport_rx_unicast_bytes",
- "vport_rx_multicast_packets",
- "vport_rx_multicast_bytes",
- "vport_rx_broadcast_packets",
- "vport_rx_broadcast_bytes",
- "vport_rx_dropped",
- "vport_rx_errors",
- "vport_tx_unicast_packets",
- "vport_tx_unicast_bytes",
- "vport_tx_multicast_packets",
- "vport_tx_multicast_bytes",
- "vport_tx_broadcast_packets",
- "vport_tx_broadcast_bytes",
- "vport_tx_errors",
-
- /* port statistics */
- "tx_tso_packets",
- "tx_queue_stopped", "tx_wake_queue", "tx_timeout", "rx_alloc_failed",
- "rx_csum_good", "rx_csum_none", "tx_chksum_offload",
-};
-
-static const char mlx4_en_test_names[][ETH_GSTRING_LEN]= {
- "Interrupt Test",
- "Link Test",
- "Speed Test",
- "Register Test",
- "Loopback Test",
-};
-
-static u32 mlx4_en_get_msglevel(struct net_device *dev)
-{
- return ((struct mlx4_en_priv *) netdev_priv(dev))->msg_enable;
-}
-
-static void mlx4_en_set_msglevel(struct net_device *dev, u32 val)
-{
- ((struct mlx4_en_priv *) netdev_priv(dev))->msg_enable = val;
-}
-
-static void mlx4_en_get_wol(struct net_device *netdev,
- struct ethtool_wolinfo *wol)
-{
- struct mlx4_en_priv *priv = netdev_priv(netdev);
- int err = 0;
- u64 config = 0;
- u64 mask;
-
- if ((priv->port < 1) || (priv->port > 2)) {
- en_err(priv, "Failed to get WoL information\n");
- return;
- }
-
- mask = (priv->port == 1) ? MLX4_DEV_CAP_FLAG_WOL_PORT1 :
- MLX4_DEV_CAP_FLAG_WOL_PORT2;
-
- if (!(priv->mdev->dev->caps.flags & mask)) {
- wol->supported = 0;
- wol->wolopts = 0;
- return;
- }
-
- err = mlx4_wol_read(priv->mdev->dev, &config, priv->port);
- if (err) {
- en_err(priv, "Failed to get WoL information\n");
- return;
- }
-
- if (config & MLX4_EN_WOL_MAGIC)
- wol->supported = WAKE_MAGIC;
- else
- wol->supported = 0;
-
- if (config & MLX4_EN_WOL_ENABLED)
- wol->wolopts = WAKE_MAGIC;
- else
- wol->wolopts = 0;
-}
-
-static int mlx4_en_set_wol(struct net_device *netdev,
- struct ethtool_wolinfo *wol)
-{
- struct mlx4_en_priv *priv = netdev_priv(netdev);
- u64 config = 0;
- int err = 0;
- u64 mask;
-
- if ((priv->port < 1) || (priv->port > 2))
- return -EOPNOTSUPP;
-
- mask = (priv->port == 1) ? MLX4_DEV_CAP_FLAG_WOL_PORT1 :
- MLX4_DEV_CAP_FLAG_WOL_PORT2;
-
- if (!(priv->mdev->dev->caps.flags & mask))
- return -EOPNOTSUPP;
-
- if (wol->supported & ~WAKE_MAGIC)
- return -EINVAL;
-
- err = mlx4_wol_read(priv->mdev->dev, &config, priv->port);
- if (err) {
- en_err(priv, "Failed to get WoL info, unable to modify\n");
- return err;
- }
-
- if (wol->wolopts & WAKE_MAGIC) {
- config |= MLX4_EN_WOL_DO_MODIFY | MLX4_EN_WOL_ENABLED |
- MLX4_EN_WOL_MAGIC;
- } else {
- config &= ~(MLX4_EN_WOL_ENABLED | MLX4_EN_WOL_MAGIC);
- config |= MLX4_EN_WOL_DO_MODIFY;
- }
-
- err = mlx4_wol_write(priv->mdev->dev, config, priv->port);
- if (err)
- en_err(priv, "Failed to set WoL information\n");
-
- return err;
-}
-
-struct bitmap_sim_iterator {
- bool advance_array;
- unsigned long *stats_bitmap;
- unsigned int count;
- unsigned int j;
-};
-
-static inline void bitmap_sim_iterator_init(struct bitmap_sim_iterator *h,
- unsigned long *stats_bitmap,
- int count)
-{
- h->j = 0;
- h->advance_array = !bitmap_empty(stats_bitmap, count);
- h->count = h->advance_array ? bitmap_weight(stats_bitmap, count)
- : count;
- h->stats_bitmap = stats_bitmap;
-}
-
-static inline int bitmap_sim_iterator_test(struct bitmap_sim_iterator *h)
-{
- return !h->advance_array ? 1 : test_bit(h->j, h->stats_bitmap);
-}
-
-static inline int bitmap_sim_iterator_inc(struct bitmap_sim_iterator *h)
-{
- return h->j++;
-}
-
-static inline unsigned int bitmap_sim_iterator_count(
- struct bitmap_sim_iterator *h)
-{
- return h->count;
-}
-
-int mlx4_en_get_sset_count(struct net_device *dev, int sset)
-{
- struct mlx4_en_priv *priv = netdev_priv(dev);
- struct bitmap_sim_iterator it;
-
- int num_of_stats = NUM_ALL_STATS -
- ((priv->mdev->dev->caps.flags2 &
- MLX4_DEV_CAP_FLAG2_FLOWSTATS_EN) ? 0 : NUM_FLOW_STATS);
-
- bitmap_sim_iterator_init(&it, priv->stats_bitmap, num_of_stats);
-
- switch (sset) {
- case ETH_SS_STATS:
- return bitmap_sim_iterator_count(&it) +
- (priv->tx_ring_num * 2) +
-#ifdef LL_EXTENDED_STATS
- (priv->rx_ring_num * 5);
-#else
- (priv->rx_ring_num * 2);
-#endif
- case ETH_SS_TEST:
- return MLX4_EN_NUM_SELF_TEST - !(priv->mdev->dev->caps.flags
- & MLX4_DEV_CAP_FLAG_UC_LOOPBACK) * 2;
- default:
- return -EOPNOTSUPP;
- }
-}
-
-void mlx4_en_get_ethtool_stats(struct net_device *dev,
- struct ethtool_stats *stats, u64 *data)
-{
- struct mlx4_en_priv *priv = netdev_priv(dev);
- int index = 0;
- int i;
- struct bitmap_sim_iterator it;
-
- int num_of_stats = NUM_ALL_STATS -
- ((priv->mdev->dev->caps.flags2 &
- MLX4_DEV_CAP_FLAG2_FLOWSTATS_EN) ? 0 : NUM_FLOW_STATS);
-
- bitmap_sim_iterator_init(&it, priv->stats_bitmap, num_of_stats);
-
- if (!data || !priv->port_up)
- return;
-
- spin_lock_bh(&priv->stats_lock);
-
- for (i = 0; i < NUM_PKT_STATS; i++,
- bitmap_sim_iterator_inc(&it))
- if (bitmap_sim_iterator_test(&it))
- data[index++] =
- ((unsigned long *)&priv->pkstats)[i];
- for (i = 0; i < NUM_FLOW_STATS; i++,
- bitmap_sim_iterator_inc(&it))
- if (priv->mdev->dev->caps.flags2 &
- MLX4_DEV_CAP_FLAG2_FLOWSTATS_EN)
- if (bitmap_sim_iterator_test(&it))
- data[index++] =
- ((u64 *)&priv->flowstats)[i];
- for (i = 0; i < NUM_VF_STATS; i++,
- bitmap_sim_iterator_inc(&it))
- if (bitmap_sim_iterator_test(&it))
- data[index++] =
- ((unsigned long *)&priv->vf_stats)[i];
- for (i = 0; i < NUM_VPORT_STATS; i++,
- bitmap_sim_iterator_inc(&it))
- if (bitmap_sim_iterator_test(&it))
- data[index++] =
- ((unsigned long *)&priv->vport_stats)[i];
- for (i = 0; i < NUM_PORT_STATS; i++,
- bitmap_sim_iterator_inc(&it))
- if (bitmap_sim_iterator_test(&it))
- data[index++] =
- ((unsigned long *)&priv->port_stats)[i];
-
- for (i = 0; i < priv->tx_ring_num; i++) {
- data[index++] = priv->tx_ring[i]->packets;
- data[index++] = priv->tx_ring[i]->bytes;
- }
- for (i = 0; i < priv->rx_ring_num; i++) {
- data[index++] = priv->rx_ring[i]->packets;
- data[index++] = priv->rx_ring[i]->bytes;
-#ifdef LL_EXTENDED_STATS
- data[index++] = priv->rx_ring[i]->yields;
- data[index++] = priv->rx_ring[i]->misses;
- data[index++] = priv->rx_ring[i]->cleaned;
-#endif
- }
- spin_unlock_bh(&priv->stats_lock);
-
-}
-
-void mlx4_en_restore_ethtool_stats(struct mlx4_en_priv *priv, u64 *data)
-{
- int index = 0;
- int i;
- struct bitmap_sim_iterator it;
-
- int num_of_stats = NUM_ALL_STATS -
- ((priv->mdev->dev->caps.flags2 &
- MLX4_DEV_CAP_FLAG2_FLOWSTATS_EN) ? 0 : NUM_FLOW_STATS);
-
- bitmap_sim_iterator_init(&it, priv->stats_bitmap, num_of_stats);
-
- if (!data || !priv->port_up)
- return;
-
- spin_lock_bh(&priv->stats_lock);
-
- for (i = 0; i < NUM_PKT_STATS; i++,
- bitmap_sim_iterator_inc(&it))
- if (bitmap_sim_iterator_test(&it))
- ((unsigned long *)&priv->pkstats)[i] =
- data[index++];
- for (i = 0; i < NUM_FLOW_STATS; i++,
- bitmap_sim_iterator_inc(&it))
- if (priv->mdev->dev->caps.flags2 &
- MLX4_DEV_CAP_FLAG2_FLOWSTATS_EN)
- if (bitmap_sim_iterator_test(&it))
- ((u64 *)&priv->flowstats)[i] =
- data[index++];
- for (i = 0; i < NUM_VF_STATS; i++,
- bitmap_sim_iterator_inc(&it))
- if (bitmap_sim_iterator_test(&it))
- ((unsigned long *)&priv->vf_stats)[i] =
- data[index++];
- for (i = 0; i < NUM_VPORT_STATS; i++,
- bitmap_sim_iterator_inc(&it))
- if (bitmap_sim_iterator_test(&it))
- ((unsigned long *)&priv->vport_stats)[i] =
- data[index++];
- for (i = 0; i < NUM_PORT_STATS; i++,
- bitmap_sim_iterator_inc(&it))
- if (bitmap_sim_iterator_test(&it))
- ((unsigned long *)&priv->port_stats)[i] =
- data[index++];
-
- for (i = 0; i < priv->tx_ring_num; i++) {
- priv->tx_ring[i]->packets = data[index++];
- priv->tx_ring[i]->bytes = data[index++];
- }
- for (i = 0; i < priv->rx_ring_num; i++) {
- priv->rx_ring[i]->packets = data[index++];
- priv->rx_ring[i]->bytes = data[index++];
- }
- spin_unlock_bh(&priv->stats_lock);
-}
-
-static void mlx4_en_self_test(struct net_device *dev,
- struct ethtool_test *etest, u64 *buf)
-{
- mlx4_en_ex_selftest(dev, &etest->flags, buf);
-}
-
-static void mlx4_en_get_strings(struct net_device *dev,
- uint32_t stringset, uint8_t *data)
-{
- struct mlx4_en_priv *priv = netdev_priv(dev);
- int index = 0;
- int i, k;
- struct bitmap_sim_iterator it;
-
- int num_of_stats = NUM_ALL_STATS -
- ((priv->mdev->dev->caps.flags2 &
- MLX4_DEV_CAP_FLAG2_FLOWSTATS_EN) ? 0 : NUM_FLOW_STATS);
-
- bitmap_sim_iterator_init(&it, priv->stats_bitmap, num_of_stats);
-
- switch (stringset) {
- case ETH_SS_TEST:
- for (i = 0; i < MLX4_EN_NUM_SELF_TEST - 2; i++)
- strcpy(data + i * ETH_GSTRING_LEN, mlx4_en_test_names[i]);
- if (priv->mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_UC_LOOPBACK)
- for (; i < MLX4_EN_NUM_SELF_TEST; i++)
- strcpy(data + i * ETH_GSTRING_LEN, mlx4_en_test_names[i]);
- break;
-
- case ETH_SS_STATS:
- /* Add main counters */
- for (i = 0; i < NUM_PKT_STATS; i++,
- bitmap_sim_iterator_inc(&it))
- if (bitmap_sim_iterator_test(&it))
- strcpy(data + (index++) * ETH_GSTRING_LEN,
- main_strings[i]);
-
- for (k = 0; k < NUM_FLOW_STATS; k++,
- bitmap_sim_iterator_inc(&it))
- if (priv->mdev->dev->caps.flags2 &
- MLX4_DEV_CAP_FLAG2_FLOWSTATS_EN)
- if (bitmap_sim_iterator_test(&it))
- strcpy(data + (index++) *
- ETH_GSTRING_LEN,
- main_strings[i + k]);
-
- for (; (i + k) < num_of_stats; i++,
- bitmap_sim_iterator_inc(&it))
- if (bitmap_sim_iterator_test(&it))
- strcpy(data + (index++) * ETH_GSTRING_LEN,
- main_strings[i + k]);
-
- for (i = 0; i < priv->tx_ring_num; i++) {
- sprintf(data + (index++) * ETH_GSTRING_LEN,
- "tx%d_packets", i);
- sprintf(data + (index++) * ETH_GSTRING_LEN,
- "tx%d_bytes", i);
- }
- for (i = 0; i < priv->rx_ring_num; i++) {
- sprintf(data + (index++) * ETH_GSTRING_LEN,
- "rx%d_packets", i);
- sprintf(data + (index++) * ETH_GSTRING_LEN,
- "rx%d_bytes", i);
-#ifdef LL_EXTENDED_STATS
- sprintf(data + (index++) * ETH_GSTRING_LEN,
- "rx%d_napi_yield", i);
- sprintf(data + (index++) * ETH_GSTRING_LEN,
- "rx%d_misses", i);
- sprintf(data + (index++) * ETH_GSTRING_LEN,
- "rx%d_cleaned", i);
-#endif
- }
- break;
- }
-}
-
-static u32 mlx4_en_autoneg_get(struct net_device *dev)
-{
- struct mlx4_en_priv *priv = netdev_priv(dev);
- struct mlx4_en_dev *mdev = priv->mdev;
- u32 autoneg = AUTONEG_DISABLE;
-
- if ((mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ETH_BACKPL_AN_REP) &&
- priv->port_state.autoneg) {
- autoneg = AUTONEG_ENABLE;
- }
-
- return autoneg;
-}
-
-static int mlx4_en_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
-{
- struct mlx4_en_priv *priv = netdev_priv(dev);
- int trans_type;
-
- /* SUPPORTED_1000baseT_Half isn't supported */
- cmd->supported = SUPPORTED_1000baseT_Full
- |SUPPORTED_10000baseT_Full;
-
- cmd->advertising = ADVERTISED_1000baseT_Full
- |ADVERTISED_10000baseT_Full;
-
- cmd->supported |= SUPPORTED_1000baseKX_Full
- |SUPPORTED_10000baseKX4_Full
- |SUPPORTED_10000baseKR_Full
- |SUPPORTED_10000baseR_FEC
- |SUPPORTED_40000baseKR4_Full
- |SUPPORTED_40000baseCR4_Full
- |SUPPORTED_40000baseSR4_Full
- |SUPPORTED_40000baseLR4_Full;
-
- /* ADVERTISED_1000baseT_Half isn't advertised */
- cmd->advertising |= ADVERTISED_1000baseKX_Full
- |ADVERTISED_10000baseKX4_Full
- |ADVERTISED_10000baseKR_Full
- |ADVERTISED_10000baseR_FEC
- |ADVERTISED_40000baseKR4_Full
- |ADVERTISED_40000baseCR4_Full
- |ADVERTISED_40000baseSR4_Full
- |ADVERTISED_40000baseLR4_Full;
-
- if (mlx4_en_QUERY_PORT(priv->mdev, priv->port))
- return -ENOMEM;
-
- cmd->autoneg = mlx4_en_autoneg_get(dev);
- if (cmd->autoneg == AUTONEG_ENABLE) {
- cmd->supported |= SUPPORTED_Autoneg;
- cmd->advertising |= ADVERTISED_Autoneg;
- }
-
- trans_type = priv->port_state.transciver;
- if (netif_carrier_ok(dev)) {
- ethtool_cmd_speed_set(cmd, priv->port_state.link_speed);
- cmd->duplex = DUPLEX_FULL;
- } else {
- ethtool_cmd_speed_set(cmd, -1);
- cmd->duplex = -1;
- }
-
- if (trans_type > 0 && trans_type <= 0xC) {
- cmd->port = PORT_FIBRE;
- cmd->transceiver = XCVR_EXTERNAL;
- cmd->supported |= SUPPORTED_FIBRE;
- cmd->advertising |= ADVERTISED_FIBRE;
- } else if (trans_type == 0x80 || trans_type == 0) {
- cmd->port = PORT_TP;
- cmd->transceiver = XCVR_INTERNAL;
- cmd->supported |= SUPPORTED_TP;
- cmd->advertising |= ADVERTISED_TP;
- } else {
- cmd->port = -1;
- cmd->transceiver = -1;
- }
- return 0;
-}
-
-static const char *mlx4_en_duplex_to_string(int duplex)
-{
- switch (duplex) {
- case DUPLEX_FULL:
- return "FULL";
- case DUPLEX_HALF:
- return "HALF";
- default:
- break;
- }
- return "UNKNOWN";
-}
-
-static int mlx4_en_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
-{
- struct mlx4_en_priv *priv = netdev_priv(dev);
- struct mlx4_en_port_state *port_state = &priv->port_state;
-
- if ((cmd->autoneg != port_state->autoneg) ||
- (ethtool_cmd_speed(cmd) != port_state->link_speed) ||
- (cmd->duplex != DUPLEX_FULL)) {
- en_info(priv, "Changing port state properties (auto-negotiation"
- " , speed/duplex) is not supported. Current:"
- " auto-negotiation=%d speed/duplex=%d/%s\n",
- port_state->autoneg, port_state->link_speed,
- mlx4_en_duplex_to_string(DUPLEX_FULL));
- return -EOPNOTSUPP;
- }
-
- /* User provided same port state properties that are currently set.
- * Nothing to change
- */
- return 0;
-}
-
-static int mlx4_en_get_coalesce(struct net_device *dev,
- struct ethtool_coalesce *coal)
-{
- struct mlx4_en_priv *priv = netdev_priv(dev);
-
- coal->tx_coalesce_usecs = priv->tx_usecs;
- coal->tx_max_coalesced_frames = priv->tx_frames;
- coal->rx_coalesce_usecs = priv->rx_usecs;
- coal->rx_max_coalesced_frames = priv->rx_frames;
-
- coal->pkt_rate_low = priv->pkt_rate_low;
- coal->rx_coalesce_usecs_low = priv->rx_usecs_low;
- coal->pkt_rate_high = priv->pkt_rate_high;
- coal->rx_coalesce_usecs_high = priv->rx_usecs_high;
- coal->rate_sample_interval = priv->sample_interval;
- coal->use_adaptive_rx_coalesce = priv->adaptive_rx_coal;
- return 0;
-}
-
-static int mlx4_en_set_coalesce(struct net_device *dev,
- struct ethtool_coalesce *coal)
-{
- struct mlx4_en_priv *priv = netdev_priv(dev);
- int err, i;
-
- priv->rx_frames = (coal->rx_max_coalesced_frames ==
- MLX4_EN_AUTO_CONF) ?
- MLX4_EN_RX_COAL_TARGET /
- priv->dev->mtu + 1 :
- coal->rx_max_coalesced_frames;
- priv->rx_usecs = (coal->rx_coalesce_usecs ==
- MLX4_EN_AUTO_CONF) ?
- MLX4_EN_RX_COAL_TIME :
- coal->rx_coalesce_usecs;
-
- /* Setting TX coalescing parameters */
- if (coal->tx_coalesce_usecs != priv->tx_usecs ||
- coal->tx_max_coalesced_frames != priv->tx_frames) {
- priv->tx_usecs = coal->tx_coalesce_usecs;
- priv->tx_frames = coal->tx_max_coalesced_frames;
- if (priv->port_up) {
- for (i = 0; i < priv->tx_ring_num; i++) {
- priv->tx_cq[i]->moder_cnt = priv->tx_frames;
- priv->tx_cq[i]->moder_time = priv->tx_usecs;
- if (mlx4_en_set_cq_moder(priv, priv->tx_cq[i]))
- en_warn(priv, "Failed changing moderation for TX cq %d\n", i);
- }
- }
- }
-
- /* Set adaptive coalescing params */
- priv->pkt_rate_low = coal->pkt_rate_low;
- priv->rx_usecs_low = coal->rx_coalesce_usecs_low;
- priv->pkt_rate_high = coal->pkt_rate_high;
- priv->rx_usecs_high = coal->rx_coalesce_usecs_high;
- priv->sample_interval = coal->rate_sample_interval;
- priv->adaptive_rx_coal = coal->use_adaptive_rx_coalesce;
- if (priv->adaptive_rx_coal)
- return 0;
-
- if (priv->port_up) {
- for (i = 0; i < priv->rx_ring_num; i++) {
- priv->rx_cq[i]->moder_cnt = priv->rx_frames;
- priv->rx_cq[i]->moder_time = priv->rx_usecs;
- priv->last_moder_time[i] = MLX4_EN_AUTO_CONF;
- err = mlx4_en_set_cq_moder(priv, priv->rx_cq[i]);
- if (err)
- return err;
- }
- }
-
- return 0;
-}
-
-static int mlx4_en_set_pauseparam(struct net_device *dev,
- struct ethtool_pauseparam *pause)
-{
- struct mlx4_en_priv *priv = netdev_priv(dev);
- struct mlx4_en_dev *mdev = priv->mdev;
- int err;
-
- if (pause->autoneg)
- return -EOPNOTSUPP;
-
- priv->prof->tx_pause = pause->tx_pause != 0;
- priv->prof->rx_pause = pause->rx_pause != 0;
- err = mlx4_SET_PORT_general(mdev->dev, priv->port,
- priv->rx_skb_size + ETH_FCS_LEN,
- priv->prof->tx_pause,
- priv->prof->tx_ppp,
- priv->prof->rx_pause,
- priv->prof->rx_ppp);
- if (err)
- en_err(priv, "Failed setting pause params\n");
-
- return err;
-}
-
-static void mlx4_en_get_pauseparam(struct net_device *dev,
- struct ethtool_pauseparam *pause)
-{
- struct mlx4_en_priv *priv = netdev_priv(dev);
-
- pause->tx_pause = priv->prof->tx_pause;
- pause->rx_pause = priv->prof->rx_pause;
- pause->autoneg = mlx4_en_autoneg_get(dev);
-}
-
-/* rtnl lock must be taken before calling */
-int mlx4_en_pre_config(struct mlx4_en_priv *priv)
-{
-#ifdef CONFIG_RFS_ACCEL
- struct cpu_rmap *rmap;
-
- if (!priv->dev->rx_cpu_rmap)
- return 0;
-
- /* Disable RFS events
- * Must have all RFS jobs flushed before freeing resources
- */
- rmap = priv->dev->rx_cpu_rmap;
- priv->dev->rx_cpu_rmap = NULL;
-
- rtnl_unlock();
- free_irq_cpu_rmap(rmap);
- rtnl_lock();
-
- if (priv->dev->rx_cpu_rmap)
- return -EBUSY; /* another configuration completed while lock
- * was free
- */
-
- /* Make sure all currently running filter_work are being processed
- * Other work will return immediatly because of disable_rfs
- */
- flush_workqueue(priv->mdev->workqueue);
-
-#endif
-
- return 0;
-}
-
-static int mlx4_en_set_ringparam(struct net_device *dev,
- struct ethtool_ringparam *param)
-{
- struct mlx4_en_priv *priv = netdev_priv(dev);
- struct mlx4_en_dev *mdev = priv->mdev;
- u32 rx_size, tx_size;
- int port_up = 0;
- int err = 0;
- int i, n_stats;
- u64 *data = NULL;
-
- if (!priv->port_up)
- return -ENOMEM;
-
- if (param->rx_jumbo_pending || param->rx_mini_pending)
- return -EINVAL;
-
- rx_size = roundup_pow_of_two(param->rx_pending);
- rx_size = max_t(u32, rx_size, MLX4_EN_MIN_RX_SIZE);
- rx_size = min_t(u32, rx_size, MLX4_EN_MAX_RX_SIZE);
- tx_size = roundup_pow_of_two(param->tx_pending);
- tx_size = max_t(u32, tx_size, MLX4_EN_MIN_TX_SIZE);
- tx_size = min_t(u32, tx_size, MLX4_EN_MAX_TX_SIZE);
-
- if (rx_size == (priv->port_up ? priv->rx_ring[0]->actual_size :
- priv->rx_ring[0]->size) &&
- tx_size == priv->tx_ring[0]->size)
- return 0;
- err = mlx4_en_pre_config(priv);
- if (err)
- return err;
-
- mutex_lock(&mdev->state_lock);
- if (priv->port_up) {
- port_up = 1;
- mlx4_en_stop_port(dev);
- }
-
- /* Cache port statistics */
- n_stats = mlx4_en_get_sset_count(dev, ETH_SS_STATS);
- if (n_stats > 0) {
- data = kmalloc(n_stats * sizeof(u64), GFP_KERNEL);
- if (data)
- mlx4_en_get_ethtool_stats(dev, NULL, data);
- }
-
- mlx4_en_free_resources(priv);
-
- priv->prof->tx_ring_size = tx_size;
- priv->prof->rx_ring_size = rx_size;
-
- err = mlx4_en_alloc_resources(priv);
- if (err) {
- en_err(priv, "Failed reallocating port resources\n");
- goto out;
- }
-
- /* Restore port statistics */
- if (n_stats > 0 && data)
- mlx4_en_restore_ethtool_stats(priv, data);
-
- if (port_up) {
- err = mlx4_en_start_port(dev);
- if (err) {
- en_err(priv, "Failed starting port\n");
- goto out;
- }
-
- for (i = 0; i < priv->rx_ring_num; i++) {
- priv->rx_cq[i]->moder_cnt = priv->rx_frames;
- priv->rx_cq[i]->moder_time = priv->rx_usecs;
- priv->last_moder_time[i] = MLX4_EN_AUTO_CONF;
- err = mlx4_en_set_cq_moder(priv, priv->rx_cq[i]);
- if (err)
- goto out;
- }
- }
-
-out:
- kfree(data);
- mutex_unlock(&mdev->state_lock);
- return err;
-}
-
-static void mlx4_en_get_ringparam(struct net_device *dev,
- struct ethtool_ringparam *param)
-{
- struct mlx4_en_priv *priv = netdev_priv(dev);
-
- if (!priv->port_up)
- return;
-
- memset(param, 0, sizeof(*param));
- param->rx_max_pending = MLX4_EN_MAX_RX_SIZE;
- param->tx_max_pending = MLX4_EN_MAX_TX_SIZE;
- param->rx_pending = priv->port_up ?
- priv->rx_ring[0]->actual_size : priv->rx_ring[0]->size;
- param->tx_pending = priv->tx_ring[0]->size;
-}
-
-static u32 mlx4_en_get_rxfh_indir_size(struct net_device *dev)
-{
- struct mlx4_en_priv *priv = netdev_priv(dev);
-
- return priv->rx_ring_num;
-}
-
-static int mlx4_en_get_rxfh_indir(struct net_device *dev, u32 *ring_index)
-{
- struct mlx4_en_priv *priv = netdev_priv(dev);
- struct mlx4_en_rss_map *rss_map = &priv->rss_map;
- int rss_rings;
- size_t n = priv->rx_ring_num;
- int err = 0;
-
- rss_rings = priv->prof->rss_rings ?: priv->rx_ring_num;
- rss_rings = 1 << ilog2(rss_rings);
-
- while (n--) {
- ring_index[n] = rss_map->qps[n % rss_rings].qpn -
- rss_map->base_qpn;
- }
-
- return err;
-}
-
-static int mlx4_en_set_rxfh_indir(struct net_device *dev,
- const u32 *ring_index)
-{
- struct mlx4_en_priv *priv = netdev_priv(dev);
- struct mlx4_en_dev *mdev = priv->mdev;
- int port_up = 0;
- int err = 0;
- int i;
- int rss_rings = 0;
-
- /* Calculate RSS table size and make sure flows are spread evenly
- * between rings
- */
- for (i = 0; i < priv->rx_ring_num; i++) {
- if (i > 0 && !ring_index[i] && !rss_rings)
- rss_rings = i;
-
- if (ring_index[i] != (i % (rss_rings ?: priv->rx_ring_num)))
- return -EINVAL;
- }
-
- if (!rss_rings)
- rss_rings = priv->rx_ring_num;
-
- /* RSS table size must be an order of 2 */
- if (!is_power_of_2(rss_rings))
- return -EINVAL;
-
- mutex_lock(&mdev->state_lock);
- if (priv->port_up) {
- port_up = 1;
- mlx4_en_stop_port(dev);
- }
-
- priv->prof->rss_rings = rss_rings;
-
- if (port_up) {
- err = mlx4_en_start_port(dev);
- if (err)
- en_err(priv, "Failed starting port\n");
- }
-
- mutex_unlock(&mdev->state_lock);
- return err;
-}
-
-#define all_zeros_or_all_ones(field) \
- ((field) == 0 || (field) == (__force typeof(field))-1)
-
-static int mlx4_en_validate_flow(struct net_device *dev,
- struct mlx4_ethtool_rxnfc *cmd)
-{
- struct ethtool_usrip4_spec *l3_mask;
- struct ethtool_tcpip4_spec *l4_mask;
- struct ethhdr *eth_mask;
-
- if (cmd->fs.location >= MAX_NUM_OF_FS_RULES)
- return -EINVAL;
-
- if (cmd->fs.flow_type & FLOW_MAC_EXT) {
- /* dest mac mask must be ff:ff:ff:ff:ff:ff */
- if (!is_broadcast_ether_addr(cmd->fs.m_ext.h_dest))
- return -EINVAL;
- }
-
- switch (cmd->fs.flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
- case TCP_V4_FLOW:
- case UDP_V4_FLOW:
- if (cmd->fs.m_u.tcp_ip4_spec.tos)
- return -EINVAL;
- l4_mask = &cmd->fs.m_u.tcp_ip4_spec;
- /* don't allow mask which isn't all 0 or 1 */
- if (!all_zeros_or_all_ones(l4_mask->ip4src) ||
- !all_zeros_or_all_ones(l4_mask->ip4dst) ||
- !all_zeros_or_all_ones(l4_mask->psrc) ||
- !all_zeros_or_all_ones(l4_mask->pdst))
- return -EINVAL;
- break;
- case IP_USER_FLOW:
- l3_mask = &cmd->fs.m_u.usr_ip4_spec;
- if (l3_mask->l4_4_bytes || l3_mask->tos || l3_mask->proto ||
- cmd->fs.h_u.usr_ip4_spec.ip_ver != ETH_RX_NFC_IP4 ||
- (!l3_mask->ip4src && !l3_mask->ip4dst) ||
- !all_zeros_or_all_ones(l3_mask->ip4src) ||
- !all_zeros_or_all_ones(l3_mask->ip4dst))
- return -EINVAL;
- break;
- case ETHER_FLOW:
- eth_mask = &cmd->fs.m_u.ether_spec;
- /* source mac mask must not be set */
- if (!is_zero_ether_addr(eth_mask->h_source))
- return -EINVAL;
-
- /* dest mac mask must be ff:ff:ff:ff:ff:ff */
- if (!is_broadcast_ether_addr(eth_mask->h_dest))
- return -EINVAL;
-
- if (!all_zeros_or_all_ones(eth_mask->h_proto))
- return -EINVAL;
- break;
- default:
- return -EINVAL;
- }
-
- if ((cmd->fs.flow_type & FLOW_EXT)) {
- if (cmd->fs.m_ext.vlan_etype ||
- !(cmd->fs.m_ext.vlan_tci == 0 ||
- cmd->fs.m_ext.vlan_tci == cpu_to_be16(0xfff)))
- return -EINVAL;
- if (cmd->fs.m_ext.vlan_tci) {
- if (be16_to_cpu(cmd->fs.h_ext.vlan_tci) <
- VLAN_MIN_VALUE ||
- be16_to_cpu(cmd->fs.h_ext.vlan_tci) >
- VLAN_MAX_VALUE)
- return -EINVAL;
- }
- }
-
- return 0;
-}
-
-static int mlx4_en_ethtool_add_mac_rule(struct mlx4_ethtool_rxnfc *cmd,
- struct list_head *rule_list_h,
- struct mlx4_spec_list *spec_l2,
- unsigned char *mac)
-{
- int err = 0;
- __be64 mac_msk = cpu_to_be64(MLX4_MAC_MASK << 16);
-
- spec_l2->id = MLX4_NET_TRANS_RULE_ID_ETH;
- memcpy(spec_l2->eth.dst_mac_msk, &mac_msk, ETH_ALEN);
- memcpy(spec_l2->eth.dst_mac, mac, ETH_ALEN);
-
- if ((cmd->fs.flow_type & FLOW_EXT) && cmd->fs.m_ext.vlan_tci) {
- spec_l2->eth.vlan_id = cmd->fs.h_ext.vlan_tci;
- spec_l2->eth.vlan_id_msk = cpu_to_be16(0xfff);
- }
-
- list_add_tail(&spec_l2->list, rule_list_h);
-
- return err;
-}
-
-static int mlx4_en_ethtool_add_mac_rule_by_ipv4(struct mlx4_en_priv *priv,
- struct mlx4_ethtool_rxnfc *cmd,
- struct list_head *rule_list_h,
- struct mlx4_spec_list *spec_l2,
- __be32 ipv4_dst)
-{
- unsigned char mac[ETH_ALEN];
-
- if (!ipv4_is_multicast(ipv4_dst)) {
- if (cmd->fs.flow_type & FLOW_MAC_EXT)
- memcpy(&mac, cmd->fs.h_ext.h_dest, ETH_ALEN);
- else
- memcpy(&mac, priv->dev->dev_addr, ETH_ALEN);
- } else {
- ip_eth_mc_map(ipv4_dst, mac);
- }
-
- return mlx4_en_ethtool_add_mac_rule(cmd, rule_list_h, spec_l2, &mac[0]);
-}
-
-static int add_ip_rule(struct mlx4_en_priv *priv,
- struct mlx4_ethtool_rxnfc *cmd,
- struct list_head *list_h)
-{
- struct mlx4_spec_list *spec_l2 = NULL;
- struct mlx4_spec_list *spec_l3 = NULL;
- struct ethtool_usrip4_spec *l3_mask = &cmd->fs.m_u.usr_ip4_spec;
-
- spec_l3 = kzalloc(sizeof(*spec_l3), GFP_KERNEL);
- spec_l2 = kzalloc(sizeof(*spec_l2), GFP_KERNEL);
- if (!spec_l2 || !spec_l3) {
- en_err(priv, "Fail to alloc ethtool rule.\n");
- kfree(spec_l2);
- kfree(spec_l3);
- return -ENOMEM;
- }
-
- mlx4_en_ethtool_add_mac_rule_by_ipv4(priv, cmd, list_h, spec_l2,
- cmd->fs.h_u.
- usr_ip4_spec.ip4dst);
- spec_l3->id = MLX4_NET_TRANS_RULE_ID_IPV4;
- spec_l3->ipv4.src_ip = cmd->fs.h_u.usr_ip4_spec.ip4src;
- if (l3_mask->ip4src)
- spec_l3->ipv4.src_ip_msk = MLX4_BE_WORD_MASK;
- spec_l3->ipv4.dst_ip = cmd->fs.h_u.usr_ip4_spec.ip4dst;
- if (l3_mask->ip4dst)
- spec_l3->ipv4.dst_ip_msk = MLX4_BE_WORD_MASK;
- list_add_tail(&spec_l3->list, list_h);
-
- return 0;
-}
-
-static int add_tcp_udp_rule(struct mlx4_en_priv *priv,
- struct mlx4_ethtool_rxnfc *cmd,
- struct list_head *list_h, int proto)
-{
- struct mlx4_spec_list *spec_l2 = NULL;
- struct mlx4_spec_list *spec_l3 = NULL;
- struct mlx4_spec_list *spec_l4 = NULL;
- struct ethtool_tcpip4_spec *l4_mask = &cmd->fs.m_u.tcp_ip4_spec;
-
- spec_l2 = kzalloc(sizeof(*spec_l2), GFP_KERNEL);
- spec_l3 = kzalloc(sizeof(*spec_l3), GFP_KERNEL);
- spec_l4 = kzalloc(sizeof(*spec_l4), GFP_KERNEL);
- if (!spec_l2 || !spec_l3 || !spec_l4) {
- en_err(priv, "Fail to alloc ethtool rule.\n");
- kfree(spec_l2);
- kfree(spec_l3);
- kfree(spec_l4);
- return -ENOMEM;
- }
-
- spec_l3->id = MLX4_NET_TRANS_RULE_ID_IPV4;
-
- if (proto == TCP_V4_FLOW) {
- mlx4_en_ethtool_add_mac_rule_by_ipv4(priv, cmd, list_h,
- spec_l2,
- cmd->fs.h_u.
- tcp_ip4_spec.ip4dst);
- spec_l4->id = MLX4_NET_TRANS_RULE_ID_TCP;
- spec_l3->ipv4.src_ip = cmd->fs.h_u.tcp_ip4_spec.ip4src;
- spec_l3->ipv4.dst_ip = cmd->fs.h_u.tcp_ip4_spec.ip4dst;
- spec_l4->tcp_udp.src_port = cmd->fs.h_u.tcp_ip4_spec.psrc;
- spec_l4->tcp_udp.dst_port = cmd->fs.h_u.tcp_ip4_spec.pdst;
- } else {
- mlx4_en_ethtool_add_mac_rule_by_ipv4(priv, cmd, list_h,
- spec_l2,
- cmd->fs.h_u.
- udp_ip4_spec.ip4dst);
- spec_l4->id = MLX4_NET_TRANS_RULE_ID_UDP;
- spec_l3->ipv4.src_ip = cmd->fs.h_u.udp_ip4_spec.ip4src;
- spec_l3->ipv4.dst_ip = cmd->fs.h_u.udp_ip4_spec.ip4dst;
- spec_l4->tcp_udp.src_port = cmd->fs.h_u.udp_ip4_spec.psrc;
- spec_l4->tcp_udp.dst_port = cmd->fs.h_u.udp_ip4_spec.pdst;
- }
-
- if (l4_mask->ip4src)
- spec_l3->ipv4.src_ip_msk = MLX4_BE_WORD_MASK;
- if (l4_mask->ip4dst)
- spec_l3->ipv4.dst_ip_msk = MLX4_BE_WORD_MASK;
-
- if (l4_mask->psrc)
- spec_l4->tcp_udp.src_port_msk = MLX4_BE_SHORT_MASK;
- if (l4_mask->pdst)
- spec_l4->tcp_udp.dst_port_msk = MLX4_BE_SHORT_MASK;
-
- list_add_tail(&spec_l3->list, list_h);
- list_add_tail(&spec_l4->list, list_h);
-
- return 0;
-}
-
-static int mlx4_en_ethtool_to_net_trans_rule(struct net_device *dev,
- struct mlx4_ethtool_rxnfc *cmd,
- struct list_head *rule_list_h)
-{
- int err;
- struct ethhdr *eth_spec;
- struct mlx4_spec_list *spec_l2;
- struct mlx4_en_priv *priv = netdev_priv(dev);
-
- err = mlx4_en_validate_flow(dev, cmd);
- if (err)
- return err;
-
- switch (cmd->fs.flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
- case ETHER_FLOW:
- spec_l2 = kzalloc(sizeof(*spec_l2), GFP_KERNEL);
- if (!spec_l2)
- return -ENOMEM;
-
- eth_spec = &cmd->fs.h_u.ether_spec;
- mlx4_en_ethtool_add_mac_rule(cmd, rule_list_h, spec_l2, &eth_spec->h_dest[0]);
- spec_l2->eth.ether_type = eth_spec->h_proto;
- if (eth_spec->h_proto)
- spec_l2->eth.ether_type_enable = 1;
- break;
- case IP_USER_FLOW:
- err = add_ip_rule(priv, cmd, rule_list_h);
- break;
- case TCP_V4_FLOW:
- err = add_tcp_udp_rule(priv, cmd, rule_list_h, TCP_V4_FLOW);
- break;
- case UDP_V4_FLOW:
- err = add_tcp_udp_rule(priv, cmd, rule_list_h, UDP_V4_FLOW);
- break;
- }
-
- return err;
-}
-
-static int mlx4_en_flow_replace(struct net_device *dev,
- struct mlx4_ethtool_rxnfc *cmd)
-{
- int err;
- struct mlx4_en_priv *priv = netdev_priv(dev);
- struct mlx4_en_dev *mdev = priv->mdev;
- struct ethtool_flow_id *loc_rule;
- struct mlx4_spec_list *spec, *tmp_spec;
- u32 qpn;
- u64 reg_id;
-
- struct mlx4_net_trans_rule rule = {
- .queue_mode = MLX4_NET_TRANS_Q_FIFO,
- .exclusive = 0,
- .allow_loopback = 1,
- .promisc_mode = MLX4_FS_REGULAR,
- };
-
- rule.port = priv->port;
- rule.priority = MLX4_DOMAIN_ETHTOOL | cmd->fs.location;
- INIT_LIST_HEAD(&rule.list);
-
- /* Allow direct QP attaches if the EN_ETHTOOL_QP_ATTACH flag is set */
- if (cmd->fs.ring_cookie == RX_CLS_FLOW_DISC)
- qpn = priv->drop_qp.qpn;
- else if (cmd->fs.ring_cookie & EN_ETHTOOL_QP_ATTACH) {
- qpn = cmd->fs.ring_cookie & (EN_ETHTOOL_QP_ATTACH - 1);
- } else {
- if (cmd->fs.ring_cookie >= priv->rx_ring_num) {
- en_warn(priv, "rxnfc: RX ring (%llu) doesn't exist.\n",
- cmd->fs.ring_cookie);
- return -EINVAL;
- }
- qpn = priv->rss_map.qps[cmd->fs.ring_cookie].qpn;
- if (!qpn) {
- en_warn(priv, "rxnfc: RX ring (%llu) is inactive.\n",
- cmd->fs.ring_cookie);
- return -EINVAL;
- }
- }
- rule.qpn = qpn;
- err = mlx4_en_ethtool_to_net_trans_rule(dev, cmd, &rule.list);
- if (err)
- goto out_free_list;
-
- mutex_lock(&mdev->state_lock);
- loc_rule = &priv->ethtool_rules[cmd->fs.location];
- if (loc_rule->id) {
- err = mlx4_flow_detach(priv->mdev->dev, loc_rule->id);
- if (err) {
- en_err(priv, "Fail to detach network rule at location %d. registration id = %llx\n",
- cmd->fs.location, loc_rule->id);
- goto unlock;
- }
- loc_rule->id = 0;
- memset(&loc_rule->flow_spec, 0,
- sizeof(struct ethtool_rx_flow_spec));
- list_del(&loc_rule->list);
- }
- err = mlx4_flow_attach(priv->mdev->dev, &rule, &reg_id);
- if (err) {
- en_err(priv, "Fail to attach network rule at location %d.\n",
- cmd->fs.location);
- goto unlock;
- }
- loc_rule->id = reg_id;
- memcpy(&loc_rule->flow_spec, &cmd->fs,
- sizeof(struct ethtool_rx_flow_spec));
- list_add_tail(&loc_rule->list, &priv->ethtool_list);
-
-unlock:
- mutex_unlock(&mdev->state_lock);
-out_free_list:
- list_for_each_entry_safe(spec, tmp_spec, &rule.list, list) {
- list_del(&spec->list);
- kfree(spec);
- }
- return err;
-}
-
-static int mlx4_en_flow_detach(struct net_device *dev,
- struct mlx4_ethtool_rxnfc *cmd)
-{
- int err = 0;
- struct ethtool_flow_id *rule;
- struct mlx4_en_priv *priv = netdev_priv(dev);
- struct mlx4_en_dev *mdev = priv->mdev;
-
- if (cmd->fs.location >= MAX_NUM_OF_FS_RULES)
- return -EINVAL;
-
- mutex_lock(&mdev->state_lock);
- rule = &priv->ethtool_rules[cmd->fs.location];
- if (!rule->id) {
- err = -ENOENT;
- goto out;
- }
-
- err = mlx4_flow_detach(priv->mdev->dev, rule->id);
- if (err) {
- en_err(priv, "Fail to detach network rule at location %d. registration id = 0x%llx\n",
- cmd->fs.location, rule->id);
- goto out;
- }
- rule->id = 0;
- memset(&rule->flow_spec, 0, sizeof(struct ethtool_rx_flow_spec));
-
- list_del(&rule->list);
-out:
- mutex_unlock(&mdev->state_lock);
- return err;
-
-}
-
-static int mlx4_en_get_flow(struct net_device *dev, struct mlx4_ethtool_rxnfc *cmd,
- int loc)
-{
- int err = 0;
- struct ethtool_flow_id *rule;
- struct mlx4_en_priv *priv = netdev_priv(dev);
-
- if (loc < 0 || loc >= MAX_NUM_OF_FS_RULES)
- return -EINVAL;
-
- rule = &priv->ethtool_rules[loc];
- if (rule->id)
- memcpy(&cmd->fs, &rule->flow_spec,
- sizeof(struct ethtool_rx_flow_spec));
- else
- err = -ENOENT;
-
- return err;
-}
-
-static int mlx4_en_get_num_flows(struct mlx4_en_priv *priv)
-{
-
- int i, res = 0;
- for (i = 0; i < MAX_NUM_OF_FS_RULES; i++) {
- if (priv->ethtool_rules[i].id)
- res++;
- }
- return res;
-
-}
-
-static int mlx4_en_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *c,
- u32 *rule_locs)
-{
- struct mlx4_en_priv *priv = netdev_priv(dev);
- struct mlx4_en_dev *mdev = priv->mdev;
- int err = 0;
- int i = 0, priority = 0;
- struct mlx4_ethtool_rxnfc *cmd = (struct mlx4_ethtool_rxnfc *)c;
-
- if ((cmd->cmd == ETHTOOL_GRXCLSRLCNT ||
- cmd->cmd == ETHTOOL_GRXCLSRULE ||
- cmd->cmd == ETHTOOL_GRXCLSRLALL) &&
- (mdev->dev->caps.steering_mode !=
- MLX4_STEERING_MODE_DEVICE_MANAGED || !priv->port_up))
- return -EINVAL;
-
- switch (cmd->cmd) {
- case ETHTOOL_GRXRINGS:
- cmd->data = priv->rx_ring_num;
- break;
- case ETHTOOL_GRXCLSRLCNT:
- cmd->rule_cnt = mlx4_en_get_num_flows(priv);
- break;
- case ETHTOOL_GRXCLSRULE:
- err = mlx4_en_get_flow(dev, cmd, cmd->fs.location);
- break;
- case ETHTOOL_GRXCLSRLALL:
- while ((!err || err == -ENOENT) && priority < cmd->rule_cnt) {
- err = mlx4_en_get_flow(dev, cmd, i);
- if (!err)
- rule_locs[priority++] = i;
- i++;
- }
- err = 0;
- break;
- default:
- err = -EOPNOTSUPP;
- break;
- }
-
- return err;
-}
-
-static int mlx4_en_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *c)
-{
- int err = 0;
- struct mlx4_en_priv *priv = netdev_priv(dev);
- struct mlx4_en_dev *mdev = priv->mdev;
- struct mlx4_ethtool_rxnfc *cmd = (struct mlx4_ethtool_rxnfc *)c;
-
- if (mdev->dev->caps.steering_mode !=
- MLX4_STEERING_MODE_DEVICE_MANAGED || !priv->port_up)
- return -EINVAL;
-
- switch (cmd->cmd) {
- case ETHTOOL_SRXCLSRLINS:
- err = mlx4_en_flow_replace(dev, cmd);
- break;
- case ETHTOOL_SRXCLSRLDEL:
- err = mlx4_en_flow_detach(dev, cmd);
- break;
- default:
- en_warn(priv, "Unsupported ethtool command. (%d)\n", cmd->cmd);
- return -EINVAL;
- }
-
- return err;
-}
-
-static void mlx4_en_get_channels(struct net_device *dev,
- struct ethtool_channels *channel)
-{
- struct mlx4_en_priv *priv = netdev_priv(dev);
-
- memset(channel, 0, sizeof(*channel));
-
- channel->max_rx = MAX_RX_RINGS;
- channel->max_tx = MLX4_EN_MAX_TX_RING_P_UP;
-
- channel->rx_count = priv->rx_ring_num;
- channel->tx_count = priv->tx_ring_num / MLX4_EN_NUM_UP;
-}
-
-static int mlx4_en_set_channels(struct net_device *dev,
- struct ethtool_channels *channel)
-{
- struct mlx4_en_priv *priv = netdev_priv(dev);
- struct mlx4_en_dev *mdev = priv->mdev;
- int port_up = 0;
- int i;
- int err = 0;
-
- if (channel->other_count || channel->combined_count ||
- channel->tx_count > MLX4_EN_MAX_TX_RING_P_UP ||
- channel->rx_count > MAX_RX_RINGS ||
- !channel->tx_count || !channel->rx_count)
- return -EINVAL;
-
- err = mlx4_en_pre_config(priv);
- if (err)
- return err;
-
- mutex_lock(&mdev->state_lock);
- if (priv->port_up) {
- port_up = 1;
- mlx4_en_stop_port(dev);
- }
-
- mlx4_en_free_resources(priv);
-
- priv->num_tx_rings_p_up = channel->tx_count;
- priv->tx_ring_num = channel->tx_count * MLX4_EN_NUM_UP;
- priv->rx_ring_num = channel->rx_count;
-
- err = mlx4_en_alloc_resources(priv);
- if (err) {
- en_err(priv, "Failed reallocating port resources\n");
- goto out;
- }
-
- netif_set_real_num_tx_queues(dev, priv->tx_ring_num);
- netif_set_real_num_rx_queues(dev, priv->rx_ring_num);
-
- mlx4_en_setup_tc(dev, MLX4_EN_NUM_UP);
-
- en_warn(priv, "Using %d TX rings\n", priv->tx_ring_num);
- en_warn(priv, "Using %d RX rings\n", priv->rx_ring_num);
-
- if (port_up) {
- err = mlx4_en_start_port(dev);
- if (err)
- en_err(priv, "Failed starting port\n");
-
- for (i = 0; i < priv->rx_ring_num; i++) {
- priv->rx_cq[i]->moder_cnt = priv->rx_frames;
- priv->rx_cq[i]->moder_time = priv->rx_usecs;
- priv->last_moder_time[i] = MLX4_EN_AUTO_CONF;
- err = mlx4_en_set_cq_moder(priv, priv->rx_cq[i]);
- if (err)
- goto out;
- }
- }
-
-out:
- mutex_unlock(&mdev->state_lock);
- return err;
-}
-
-static int mlx4_en_get_ts_info(struct net_device *dev,
- struct ethtool_ts_info *info)
-{
- struct mlx4_en_priv *priv = netdev_priv(dev);
- struct mlx4_en_dev *mdev = priv->mdev;
- int ret;
-
- ret = ethtool_op_get_ts_info(dev, info);
- if (ret)
- return ret;
-
- if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS) {
- info->so_timestamping |=
- SOF_TIMESTAMPING_TX_HARDWARE |
- SOF_TIMESTAMPING_RX_HARDWARE |
- SOF_TIMESTAMPING_RAW_HARDWARE;
-
- info->tx_types =
- (1 << HWTSTAMP_TX_OFF) |
- (1 << HWTSTAMP_TX_ON);
-
- info->rx_filters =
- (1 << HWTSTAMP_FILTER_NONE) |
- (1 << HWTSTAMP_FILTER_ALL);
- }
-
- return ret;
-}
-
-const struct ethtool_ops mlx4_en_ethtool_ops = {
- .get_drvinfo = mlx4_en_get_drvinfo,
- .get_settings = mlx4_en_get_settings,
- .set_settings = mlx4_en_set_settings,
- .get_link = ethtool_op_get_link,
- .get_strings = mlx4_en_get_strings,
- .get_sset_count = mlx4_en_get_sset_count,
- .get_ethtool_stats = mlx4_en_get_ethtool_stats,
- .self_test = mlx4_en_self_test,
- .get_wol = mlx4_en_get_wol,
- .set_wol = mlx4_en_set_wol,
- .get_msglevel = mlx4_en_get_msglevel,
- .set_msglevel = mlx4_en_set_msglevel,
- .get_coalesce = mlx4_en_get_coalesce,
- .set_coalesce = mlx4_en_set_coalesce,
- .get_pauseparam = mlx4_en_get_pauseparam,
- .set_pauseparam = mlx4_en_set_pauseparam,
- .get_ringparam = mlx4_en_get_ringparam,
- .set_ringparam = mlx4_en_set_ringparam,
- .get_rxnfc = mlx4_en_get_rxnfc,
- .set_rxnfc = mlx4_en_set_rxnfc,
- .get_rxfh_indir_size = mlx4_en_get_rxfh_indir_size,
- .get_rxfh_indir = mlx4_en_get_rxfh_indir,
- .set_rxfh_indir = mlx4_en_set_rxfh_indir,
- .get_channels = mlx4_en_get_channels,
- .set_channels = mlx4_en_set_channels,
- .get_ts_info = mlx4_en_get_ts_info,
-};
-
-
-
-
-
diff --git a/sys/ofed/drivers/net/mlx4/en_main.c b/sys/ofed/drivers/net/mlx4/en_main.c
index 757afe0..09c1824 100644
--- a/sys/ofed/drivers/net/mlx4/en_main.c
+++ b/sys/ofed/drivers/net/mlx4/en_main.c
@@ -42,14 +42,7 @@
#include "mlx4_en.h"
-MODULE_AUTHOR("Liran Liss, Yevgeny Petrilin");
-MODULE_DESCRIPTION("Mellanox ConnectX HCA Ethernet driver");
-MODULE_LICENSE("Dual BSD/GPL");
-MODULE_VERSION(DRV_VERSION " ("DRV_RELDATE")");
-
-static const char mlx4_en_version[] =
- DRV_NAME ": Mellanox ConnectX HCA Ethernet driver v"
- DRV_VERSION " (" DRV_RELDATE ")\n";
+/* Mellanox ConnectX HCA Ethernet driver */
#define MLX4_EN_PARM_INT(X, def_val, desc) \
static unsigned int X = def_val;\
@@ -174,8 +167,6 @@ static void *mlx4_en_add(struct mlx4_dev *dev)
int i;
int err;
- printk_once(KERN_INFO "%s", mlx4_en_version);
-
mdev = kzalloc(sizeof *mdev, GFP_KERNEL);
if (!mdev) {
dev_err(&dev->pdev->dev, "Device struct alloc failed, "
diff --git a/sys/ofed/drivers/net/mlx4/en_netdev.c b/sys/ofed/drivers/net/mlx4/en_netdev.c
index d8b2f4f..a9d87bb 100644
--- a/sys/ofed/drivers/net/mlx4/en_netdev.c
+++ b/sys/ofed/drivers/net/mlx4/en_netdev.c
@@ -659,8 +659,10 @@ static void mlx4_en_cache_mclist(struct net_device *dev)
continue;
/* Make sure the list didn't grow. */
tmp = kzalloc(sizeof(struct mlx4_en_mc_list), GFP_ATOMIC);
- if (tmp == NULL)
+ if (tmp == NULL) {
+ en_err(priv, "Failed to allocate multicast list\n");
break;
+ }
memcpy(tmp->addr,
LLADDR((struct sockaddr_dl *)ifma->ifma_addr), ETH_ALEN);
list_add_tail(&tmp->list, &priv->mc_list);
@@ -971,12 +973,12 @@ static void mlx4_en_do_set_rx_mode(struct work_struct *work)
if (!mlx4_en_QUERY_PORT(mdev, priv->port)) {
if (priv->port_state.link_state) {
priv->last_link_state = MLX4_DEV_EVENT_PORT_UP;
- /* Important note: the following call for if_link_state_change
- * is needed for interface up scenario (start port, link state
- * change) */
/* update netif baudrate */
priv->dev->if_baudrate =
IF_Mbps(priv->port_state.link_speed);
+ /* Important note: the following call for if_link_state_change
+ * is needed for interface up scenario (start port, link state
+ * change) */
if_link_state_change(priv->dev, LINK_STATE_UP);
en_dbg(HW, priv, "Link Up\n");
}
@@ -1196,8 +1198,8 @@ static void mlx4_en_linkstate(struct work_struct *work)
/* update netif baudrate */
priv->dev->if_baudrate = 0;
- /* make sure the port is up before notifying the OS.
- * This is tricky since we get here on INIT_PORT and
+ /* make sure the port is up before notifying the OS.
+ * This is tricky since we get here on INIT_PORT and
* in such case we can't tell the OS the port is up.
* To solve this there is a call to if_link_state_change
* in set_rx_mode.
@@ -1246,7 +1248,6 @@ int mlx4_en_start_port(struct net_device *dev)
PAGE_SIZE);
priv->rx_alloc_order = get_order(priv->rx_alloc_size);
priv->rx_buf_size = roundup_pow_of_two(priv->rx_mb_size);
- priv->log_rx_info = ROUNDUP_LOG2(sizeof(struct mlx4_en_rx_buf));
en_dbg(DRV, priv, "Rx buf size:%d\n", priv->rx_mb_size);
/* Configure rx cq's and rings */
@@ -1575,6 +1576,7 @@ static void mlx4_en_clear_stats(struct net_device *dev)
priv->tx_ring[i]->bytes = 0;
priv->tx_ring[i]->packets = 0;
priv->tx_ring[i]->tx_csum = 0;
+ priv->tx_ring[i]->oversized_packets = 0;
}
for (i = 0; i < priv->rx_ring_num; i++) {
priv->rx_ring[i]->bytes = 0;
@@ -1644,8 +1646,6 @@ void mlx4_en_free_resources(struct mlx4_en_priv *priv)
if (priv->sysctl)
sysctl_ctx_free(&priv->stat_ctx);
-
-
}
int mlx4_en_alloc_resources(struct mlx4_en_priv *priv)
@@ -1730,8 +1730,11 @@ void mlx4_en_destroy_netdev(struct net_device *dev)
EVENTHANDLER_DEREGISTER(vlan_unconfig, priv->vlan_detach);
/* Unregister device - this will close the port if it was up */
- if (priv->registered)
+ if (priv->registered) {
+ mutex_lock(&mdev->state_lock);
ether_ifdetach(dev);
+ mutex_unlock(&mdev->state_lock);
+ }
if (priv->allocated)
mlx4_free_hwq_res(mdev->dev, &priv->res, MLX4_EN_PAGE_SIZE);
@@ -1809,13 +1812,6 @@ static int mlx4_en_calc_media(struct mlx4_en_priv *priv)
active = IFM_ETHER;
if (priv->last_link_state == MLX4_DEV_EVENT_PORT_DOWN)
return (active);
- /*
- * [ShaharK] mlx4_en_QUERY_PORT sleeps and cannot be called under a
- * non-sleepable lock.
- * I moved it to the periodic mlx4_en_do_get_stats.
- if (mlx4_en_QUERY_PORT(priv->mdev, priv->port))
- return (active);
- */
active |= IFM_FDX;
trans_type = priv->port_state.transciver;
/* XXX I don't know all of the transceiver values. */
@@ -1948,12 +1944,55 @@ static int mlx4_en_ioctl(struct ifnet *dev, u_long command, caddr_t data)
case SIOCSIFCAP:
mutex_lock(&mdev->state_lock);
mask = ifr->ifr_reqcap ^ dev->if_capenable;
- if (mask & IFCAP_HWCSUM)
- dev->if_capenable ^= IFCAP_HWCSUM;
- if (mask & IFCAP_TSO4)
+ if (mask & IFCAP_TXCSUM) {
+ dev->if_capenable ^= IFCAP_TXCSUM;
+ dev->if_hwassist ^= (CSUM_TCP | CSUM_UDP | CSUM_IP);
+
+ if (IFCAP_TSO4 & dev->if_capenable &&
+ !(IFCAP_TXCSUM & dev->if_capenable)) {
+ dev->if_capenable &= ~IFCAP_TSO4;
+ dev->if_hwassist &= ~CSUM_IP_TSO;
+ if_printf(dev,
+ "tso4 disabled due to -txcsum.\n");
+ }
+ }
+ if (mask & IFCAP_TXCSUM_IPV6) {
+ dev->if_capenable ^= IFCAP_TXCSUM_IPV6;
+ dev->if_hwassist ^= (CSUM_UDP_IPV6 | CSUM_TCP_IPV6);
+
+ if (IFCAP_TSO6 & dev->if_capenable &&
+ !(IFCAP_TXCSUM_IPV6 & dev->if_capenable)) {
+ dev->if_capenable &= ~IFCAP_TSO6;
+ dev->if_hwassist &= ~CSUM_IP6_TSO;
+ if_printf(dev,
+ "tso6 disabled due to -txcsum6.\n");
+ }
+ }
+ if (mask & IFCAP_RXCSUM)
+ dev->if_capenable ^= IFCAP_RXCSUM;
+ if (mask & IFCAP_RXCSUM_IPV6)
+ dev->if_capenable ^= IFCAP_RXCSUM_IPV6;
+
+ if (mask & IFCAP_TSO4) {
+ if (!(IFCAP_TSO4 & dev->if_capenable) &&
+ !(IFCAP_TXCSUM & dev->if_capenable)) {
+ if_printf(dev, "enable txcsum first.\n");
+ error = EAGAIN;
+ goto out;
+ }
dev->if_capenable ^= IFCAP_TSO4;
- if (mask & IFCAP_TSO6)
+ dev->if_hwassist ^= CSUM_IP_TSO;
+ }
+ if (mask & IFCAP_TSO6) {
+ if (!(IFCAP_TSO6 & dev->if_capenable) &&
+ !(IFCAP_TXCSUM_IPV6 & dev->if_capenable)) {
+ if_printf(dev, "enable txcsum6 first.\n");
+ error = EAGAIN;
+ goto out;
+ }
dev->if_capenable ^= IFCAP_TSO6;
+ dev->if_hwassist ^= CSUM_IP6_TSO;
+ }
if (mask & IFCAP_LRO)
dev->if_capenable ^= IFCAP_LRO;
if (mask & IFCAP_VLAN_HWTAGGING)
@@ -1964,9 +2003,11 @@ static int mlx4_en_ioctl(struct ifnet *dev, u_long command, caddr_t data)
dev->if_capenable ^= IFCAP_WOL_MAGIC;
if (dev->if_drv_flags & IFF_DRV_RUNNING)
mlx4_en_start_port(dev);
+out:
mutex_unlock(&mdev->state_lock);
VLAN_CAPABILITIES(dev);
break;
+#if __FreeBSD_version >= 1100036
case SIOCGI2C: {
struct ifi2creq i2c;
@@ -1990,6 +2031,7 @@ static int mlx4_en_ioctl(struct ifnet *dev, u_long command, caddr_t data)
error = copyout(&i2c, ifr->ifr_data, sizeof(i2c));
break;
}
+#endif
default:
error = ether_ioctl(dev, command, data);
break;
@@ -2049,8 +2091,6 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
priv->port = port;
priv->port_up = false;
priv->flags = prof->flags;
- priv->ctrl_flags = cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE |
- MLX4_WQE_CTRL_SOLICITED);
priv->num_tx_rings_p_up = mdev->profile.num_tx_rings_p_up;
priv->tx_ring_num = prof->tx_ring_num;
@@ -2066,7 +2106,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
err = -ENOMEM;
goto out;
}
-
+
priv->rx_ring_num = prof->rx_ring_num;
priv->cqe_factor = (mdev->dev->caps.cqe_size == 64) ? 1 : 0;
priv->mac_index = -1;
@@ -2089,7 +2129,6 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
for (i = 0; i < MLX4_EN_MAC_HASH_SIZE; ++i)
INIT_HLIST_HEAD(&priv->mac_hash[i]);
-
/* Query for default mac and max mtu */
priv->max_mtu = mdev->dev->caps.eth_mtu_cap[priv->port];
priv->mac = mdev->dev->caps.def_mac[priv->port];
@@ -2105,8 +2144,6 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
goto out;
}
-
-
priv->stride = roundup_pow_of_two(sizeof(struct mlx4_en_rx_desc) +
DS_SIZE);
@@ -2128,7 +2165,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
/*
* Set driver features
*/
- dev->if_capabilities |= IFCAP_RXCSUM | IFCAP_TXCSUM;
+ dev->if_capabilities |= IFCAP_HWCSUM | IFCAP_HWCSUM_IPV6;
dev->if_capabilities |= IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING;
dev->if_capabilities |= IFCAP_VLAN_HWCSUM | IFCAP_VLAN_HWFILTER;
dev->if_capabilities |= IFCAP_LINKSTATE | IFCAP_JUMBO_MTU;
@@ -2138,9 +2175,9 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
dev->if_capabilities |= IFCAP_TSO4 | IFCAP_TSO6 | IFCAP_VLAN_HWTSO;
/* set TSO limits so that we don't have to drop TX packets */
- dev->if_hw_tsomax = 65536 - (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN);
- dev->if_hw_tsomaxsegcount = 16;
- dev->if_hw_tsomaxsegsize = 65536; /* XXX can do up to 4GByte */
+ dev->if_hw_tsomax = MLX4_EN_TX_MAX_PAYLOAD_SIZE - (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN) /* hdr */;
+ dev->if_hw_tsomaxsegcount = MLX4_EN_TX_MAX_MBUF_FRAGS - 1 /* hdr */;
+ dev->if_hw_tsomaxsegsize = MLX4_EN_TX_MAX_MBUF_SIZE;
dev->if_capenable = dev->if_capabilities;
@@ -2149,6 +2186,8 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
dev->if_hwassist |= CSUM_TSO;
if (dev->if_capenable & IFCAP_TXCSUM)
dev->if_hwassist |= (CSUM_TCP | CSUM_UDP | CSUM_IP);
+ if (dev->if_capenable & IFCAP_TXCSUM_IPV6)
+ dev->if_hwassist |= (CSUM_UDP_IPV6 | CSUM_TCP_IPV6);
/* Register for VLAN events */
@@ -2211,8 +2250,6 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS)
queue_delayed_work(mdev->workqueue, &priv->service_task, SERVICE_TASK_DELAY);
-
-
return 0;
out:
@@ -2294,6 +2331,162 @@ static int mlx4_en_set_tx_ring_size(SYSCTL_HANDLER_ARGS)
return (error);
}
+static int mlx4_en_get_module_info(struct net_device *dev,
+ struct ethtool_modinfo *modinfo)
+{
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+ struct mlx4_en_dev *mdev = priv->mdev;
+ int ret;
+ u8 data[4];
+
+ /* Read first 2 bytes to get Module & REV ID */
+ ret = mlx4_get_module_info(mdev->dev, priv->port,
+ 0/*offset*/, 2/*size*/, data);
+
+ if (ret < 2) {
+ en_err(priv, "Failed to read eeprom module first two bytes, error: 0x%x\n", -ret);
+ return -EIO;
+ }
+
+ switch (data[0] /* identifier */) {
+ case MLX4_MODULE_ID_QSFP:
+ modinfo->type = ETH_MODULE_SFF_8436;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN;
+ break;
+ case MLX4_MODULE_ID_QSFP_PLUS:
+ if (data[1] >= 0x3) { /* revision id */
+ modinfo->type = ETH_MODULE_SFF_8636;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN;
+ } else {
+ modinfo->type = ETH_MODULE_SFF_8436;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN;
+ }
+ break;
+ case MLX4_MODULE_ID_QSFP28:
+ modinfo->type = ETH_MODULE_SFF_8636;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN;
+ break;
+ case MLX4_MODULE_ID_SFP:
+ modinfo->type = ETH_MODULE_SFF_8472;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
+ break;
+ default:
+ en_err(priv, "mlx4_en_get_module_info : Not recognized cable type\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int mlx4_en_get_module_eeprom(struct net_device *dev,
+ struct ethtool_eeprom *ee,
+ u8 *data)
+{
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+ struct mlx4_en_dev *mdev = priv->mdev;
+ int offset = ee->offset;
+ int i = 0, ret;
+
+ if (ee->len == 0)
+ return -EINVAL;
+
+ memset(data, 0, ee->len);
+
+ while (i < ee->len) {
+ en_dbg(DRV, priv,
+ "mlx4_get_module_info i(%d) offset(%d) len(%d)\n",
+ i, offset, ee->len - i);
+
+ ret = mlx4_get_module_info(mdev->dev, priv->port,
+ offset, ee->len - i, data + i);
+
+ if (!ret) /* Done reading */
+ return 0;
+
+ if (ret < 0) {
+ en_err(priv,
+ "mlx4_get_module_info i(%d) offset(%d) bytes_to_read(%d) - FAILED (0x%x)\n",
+ i, offset, ee->len - i, ret);
+ return -1;
+ }
+
+ i += ret;
+ offset += ret;
+ }
+ return 0;
+}
+
+static void mlx4_en_print_eeprom(u8 *data, __u32 len)
+{
+ int i;
+ int j = 0;
+ int row = 0;
+ const int NUM_OF_BYTES = 16;
+
+ printf("\nOffset\t\tValues\n");
+ printf("------\t\t------\n");
+ while(row < len){
+ printf("0x%04x\t\t",row);
+ for(i=0; i < NUM_OF_BYTES; i++){
+ printf("%02x ", data[j]);
+ row++;
+ j++;
+ }
+ printf("\n");
+ }
+}
+
+/* Read cable EEPROM module information by first inspecting the first
+ * two bytes to get the length and then read the rest of the information.
+ * The information is printed to dmesg. */
+static int mlx4_en_read_eeprom(SYSCTL_HANDLER_ARGS)
+{
+
+ u8* data;
+ int error;
+ int result = 0;
+ struct mlx4_en_priv *priv;
+ struct net_device *dev;
+ struct ethtool_modinfo modinfo;
+ struct ethtool_eeprom ee;
+
+ error = sysctl_handle_int(oidp, &result, 0, req);
+ if (error || !req->newptr)
+ return (error);
+
+ if (result == 1) {
+ priv = arg1;
+ dev = priv->dev;
+ data = kmalloc(PAGE_SIZE, GFP_KERNEL);
+
+ error = mlx4_en_get_module_info(dev, &modinfo);
+ if (error) {
+ en_err(priv,
+ "mlx4_en_get_module_info returned with error - FAILED (0x%x)\n",
+ -error);
+ goto out;
+ }
+
+ ee.len = modinfo.eeprom_len;
+ ee.offset = 0;
+
+ error = mlx4_en_get_module_eeprom(dev, &ee, data);
+ if (error) {
+ en_err(priv,
+ "mlx4_en_get_module_eeprom returned with error - FAILED (0x%x)\n",
+ -error);
+ /* Continue printing partial information in case of an error */
+ }
+
+ /* EEPROM information will be printed in dmesg */
+ mlx4_en_print_eeprom(data, ee.len);
+out:
+ kfree(data);
+ }
+ /* Return zero to prevent sysctl failure. */
+ return (0);
+}
+
static int mlx4_en_set_tx_ppp(SYSCTL_HANDLER_ARGS)
{
struct mlx4_en_priv *priv;
@@ -2419,7 +2612,7 @@ static void mlx4_en_sysctl_conf(struct mlx4_en_priv *priv)
/* Add coalescer configuration. */
coal = SYSCTL_ADD_NODE(ctx, node_list, OID_AUTO,
"coalesce", CTLFLAG_RD, NULL, "Interrupt coalesce configuration");
- coal_list = SYSCTL_CHILDREN(node);
+ coal_list = SYSCTL_CHILDREN(coal);
SYSCTL_ADD_UINT(ctx, coal_list, OID_AUTO, "pkt_rate_low",
CTLFLAG_RW, &priv->pkt_rate_low, 0,
"Packets per-second for minimum delay");
@@ -2438,11 +2631,14 @@ static void mlx4_en_sysctl_conf(struct mlx4_en_priv *priv)
SYSCTL_ADD_UINT(ctx, coal_list, OID_AUTO, "adaptive_rx_coal",
CTLFLAG_RW, &priv->adaptive_rx_coal, 0,
"Enable adaptive rx coalescing");
+ /* EEPROM support */
+ SYSCTL_ADD_PROC(ctx, node_list, OID_AUTO, "eeprom_info",
+ CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, priv, 0,
+ mlx4_en_read_eeprom, "I", "EEPROM information");
}
static void mlx4_en_sysctl_stat(struct mlx4_en_priv *priv)
{
- struct net_device *dev;
struct sysctl_ctx_list *ctx;
struct sysctl_oid *node;
struct sysctl_oid_list *node_list;
@@ -2453,8 +2649,6 @@ static void mlx4_en_sysctl_stat(struct mlx4_en_priv *priv)
char namebuf[128];
int i;
- dev = priv->dev;
-
ctx = &priv->stat_ctx;
sysctl_ctx_init(ctx);
node = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(priv->sysctl), OID_AUTO,
@@ -2482,6 +2676,8 @@ static void mlx4_en_sysctl_stat(struct mlx4_en_priv *priv)
&priv->port_stats.wake_queue, "Queue resumed after full");
SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "tx_timeout", CTLFLAG_RD,
&priv->port_stats.tx_timeout, "Transmit timeouts");
+ SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "tx_oversized_packets", CTLFLAG_RD,
+ &priv->port_stats.oversized_packets, "TX oversized packets, m_defrag failed");
SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "rx_alloc_failed", CTLFLAG_RD,
&priv->port_stats.rx_alloc_failed, "RX failed to allocate mbuf");
SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "rx_chksum_good", CTLFLAG_RD,
@@ -2565,7 +2761,7 @@ struct mlx4_en_pkt_stats {
SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "tx_packets", CTLFLAG_RD,
&priv->pkstats.tx_packets, "TX packets");
SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "tx_bytes", CTLFLAG_RD,
- &priv->pkstats.tx_packets, "TX Bytes");
+ &priv->pkstats.tx_bytes, "TX Bytes");
SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "tx_multicast_packets", CTLFLAG_RD,
&priv->pkstats.tx_multicast_packets, "TX Multicast Packets");
SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "tx_broadcast_packets", CTLFLAG_RD,
@@ -2606,8 +2802,8 @@ struct mlx4_en_pkt_stats {
CTLFLAG_RD, &tx_ring->packets, "TX packets");
SYSCTL_ADD_ULONG(ctx, ring_list, OID_AUTO, "bytes",
CTLFLAG_RD, &tx_ring->bytes, "TX bytes");
-
}
+
for (i = 0; i < priv->rx_ring_num; i++) {
rx_ring = priv->rx_ring[i];
snprintf(namebuf, sizeof(namebuf), "rx_ring%d", i);
diff --git a/sys/ofed/drivers/net/mlx4/en_port.c b/sys/ofed/drivers/net/mlx4/en_port.c
index 645c9c4..6c71962 100644
--- a/sys/ofed/drivers/net/mlx4/en_port.c
+++ b/sys/ofed/drivers/net/mlx4/en_port.c
@@ -194,6 +194,7 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
priv->port_stats.tx_chksum_offload += priv->tx_ring[i]->tx_csum;
priv->port_stats.queue_stopped += priv->tx_ring[i]->queue_stopped;
priv->port_stats.wake_queue += priv->tx_ring[i]->wake_queue;
+ priv->port_stats.oversized_packets += priv->tx_ring[i]->oversized_packets;
}
/* RX Statistics */
priv->pkstats.rx_packets = be64_to_cpu(mlx4_en_stats->RTOT_prio_0) +
diff --git a/sys/ofed/drivers/net/mlx4/en_rx.c b/sys/ofed/drivers/net/mlx4/en_rx.c
index e011b87..b57bd26 100644
--- a/sys/ofed/drivers/net/mlx4/en_rx.c
+++ b/sys/ofed/drivers/net/mlx4/en_rx.c
@@ -49,106 +49,135 @@ static void mlx4_en_init_rx_desc(struct mlx4_en_priv *priv,
struct mlx4_en_rx_ring *ring,
int index)
{
- struct mlx4_en_rx_desc *rx_desc = ring->buf + ring->stride * index;
+ struct mlx4_en_rx_desc *rx_desc = (struct mlx4_en_rx_desc *)
+ (ring->buf + (ring->stride * index));
int possible_frags;
int i;
-
/* Set size and memtype fields */
- for (i = 0; i < priv->num_frags; i++) {
- rx_desc->data[i].byte_count =
- cpu_to_be32(priv->frag_info[i].frag_size);
- rx_desc->data[i].lkey = cpu_to_be32(priv->mdev->mr.key);
- }
-
- /* If the number of used fragments does not fill up the ring stride,
- * * remaining (unused) fragments must be padded with null address/size
- * * and a special memory key */
+ rx_desc->data[0].byte_count = cpu_to_be32(priv->rx_mb_size);
+ rx_desc->data[0].lkey = cpu_to_be32(priv->mdev->mr.key);
+
+ /*
+ * If the number of used fragments does not fill up the ring
+ * stride, remaining (unused) fragments must be padded with
+ * null address/size and a special memory key:
+ */
possible_frags = (ring->stride - sizeof(struct mlx4_en_rx_desc)) / DS_SIZE;
- for (i = priv->num_frags; i < possible_frags; i++) {
+ for (i = 1; i < possible_frags; i++) {
rx_desc->data[i].byte_count = 0;
rx_desc->data[i].lkey = cpu_to_be32(MLX4_EN_MEMTYPE_PAD);
rx_desc->data[i].addr = 0;
}
-
}
-static int mlx4_en_alloc_buf(struct mlx4_en_priv *priv,
- struct mlx4_en_rx_desc *rx_desc,
- struct mbuf **mb_list,
- int i)
+static int
+mlx4_en_alloc_buf(struct mlx4_en_rx_ring *ring,
+ __be64 *pdma, struct mlx4_en_rx_mbuf *mb_list)
{
- struct mlx4_en_dev *mdev = priv->mdev;
- struct mlx4_en_frag_info *frag_info = &priv->frag_info[i];
+ bus_dma_segment_t segs[1];
+ bus_dmamap_t map;
struct mbuf *mb;
- dma_addr_t dma;
+ int nsegs;
+ int err;
- if (i == 0)
- mb = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, frag_info->frag_size);
- else
- mb = m_getjcl(M_NOWAIT, MT_DATA, 0, frag_info->frag_size);
- if (mb == NULL) {
- priv->port_stats.rx_alloc_failed++;
- return -ENOMEM;
+ /* try to allocate a new spare mbuf */
+ if (unlikely(ring->spare.mbuf == NULL)) {
+ mb = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, ring->rx_mb_size);
+ if (unlikely(mb == NULL))
+ return (-ENOMEM);
+ /* setup correct length */
+ mb->m_len = ring->rx_mb_size;
+
+ /* load spare mbuf into BUSDMA */
+ err = -bus_dmamap_load_mbuf_sg(ring->dma_tag, ring->spare.dma_map,
+ mb, segs, &nsegs, BUS_DMA_NOWAIT);
+ if (unlikely(err != 0)) {
+ m_freem(mb);
+ return (err);
+ }
+
+ /* store spare info */
+ ring->spare.mbuf = mb;
+ ring->spare.paddr_be = cpu_to_be64(segs[0].ds_addr);
+
+ bus_dmamap_sync(ring->dma_tag, ring->spare.dma_map,
+ BUS_DMASYNC_PREREAD);
+ }
+
+ /* synchronize and unload the current mbuf, if any */
+ if (likely(mb_list->mbuf != NULL)) {
+ bus_dmamap_sync(ring->dma_tag, mb_list->dma_map,
+ BUS_DMASYNC_POSTREAD);
+ bus_dmamap_unload(ring->dma_tag, mb_list->dma_map);
}
- dma = pci_map_single(mdev->pdev, mb->m_data, frag_info->frag_size,
- PCI_DMA_FROMDEVICE);
- rx_desc->data[i].addr = cpu_to_be64(dma);
- mb_list[i] = mb;
- return 0;
-}
+ mb = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, ring->rx_mb_size);
+ if (unlikely(mb == NULL))
+ goto use_spare;
-static int mlx4_en_prepare_rx_desc(struct mlx4_en_priv *priv,
- struct mlx4_en_rx_ring *ring, int index)
-{
- struct mlx4_en_rx_desc *rx_desc = ring->buf + (index * ring->stride);
- struct mbuf **mb_list = ring->rx_info + (index << priv->log_rx_info);
- int i;
+ /* setup correct length */
+ mb->m_len = ring->rx_mb_size;
- for (i = 0; i < priv->num_frags; i++)
- if (mlx4_en_alloc_buf(priv, rx_desc, mb_list, i))
- goto err;
+ err = -bus_dmamap_load_mbuf_sg(ring->dma_tag, mb_list->dma_map,
+ mb, segs, &nsegs, BUS_DMA_NOWAIT);
+ if (unlikely(err != 0)) {
+ m_freem(mb);
+ goto use_spare;
+ }
+
+ *pdma = cpu_to_be64(segs[0].ds_addr);
+ mb_list->mbuf = mb;
+
+ bus_dmamap_sync(ring->dma_tag, mb_list->dma_map, BUS_DMASYNC_PREREAD);
+ return (0);
- return 0;
+use_spare:
+ /* swap DMA maps */
+ map = mb_list->dma_map;
+ mb_list->dma_map = ring->spare.dma_map;
+ ring->spare.dma_map = map;
-err:
- while (i--)
- m_free(mb_list[i]);
- return -ENOMEM;
+ /* swap MBUFs */
+ mb_list->mbuf = ring->spare.mbuf;
+ ring->spare.mbuf = NULL;
+
+ /* store physical address */
+ *pdma = ring->spare.paddr_be;
+ return (0);
}
-static inline void mlx4_en_update_rx_prod_db(struct mlx4_en_rx_ring *ring)
+static void
+mlx4_en_free_buf(struct mlx4_en_rx_ring *ring, struct mlx4_en_rx_mbuf *mb_list)
{
- *ring->wqres.db.db = cpu_to_be32(ring->prod & 0xffff);
+ bus_dmamap_t map = mb_list->dma_map;
+ bus_dmamap_sync(ring->dma_tag, map, BUS_DMASYNC_POSTREAD);
+ bus_dmamap_unload(ring->dma_tag, map);
+ m_freem(mb_list->mbuf);
+ mb_list->mbuf = NULL; /* safety clearing */
}
-static void mlx4_en_free_rx_desc(struct mlx4_en_priv *priv,
- struct mlx4_en_rx_ring *ring,
- int index)
+static int
+mlx4_en_prepare_rx_desc(struct mlx4_en_priv *priv,
+ struct mlx4_en_rx_ring *ring, int index)
{
- struct mlx4_en_frag_info *frag_info;
- struct mlx4_en_dev *mdev = priv->mdev;
- struct mbuf **mb_list;
- struct mlx4_en_rx_desc *rx_desc = ring->buf + (index << ring->log_stride);
- dma_addr_t dma;
- int nr;
-
- mb_list = ring->rx_info + (index << priv->log_rx_info);
- for (nr = 0; nr < priv->num_frags; nr++) {
- en_dbg(DRV, priv, "Freeing fragment:%d\n", nr);
- frag_info = &priv->frag_info[nr];
- dma = be64_to_cpu(rx_desc->data[nr].addr);
-
-#if BITS_PER_LONG == 64
- en_dbg(DRV, priv, "Unmaping buffer at dma:0x%lx\n", (u64) dma);
-#elif BITS_PER_LONG == 32
- en_dbg(DRV, priv, "Unmaping buffer at dma:0x%llx\n", (u64) dma);
-#endif
- pci_unmap_single(mdev->pdev, dma, frag_info->frag_size,
- PCI_DMA_FROMDEVICE);
- m_free(mb_list[nr]);
+ struct mlx4_en_rx_desc *rx_desc = (struct mlx4_en_rx_desc *)
+ (ring->buf + (index * ring->stride));
+ struct mlx4_en_rx_mbuf *mb_list = ring->mbuf + index;
+
+ mb_list->mbuf = NULL;
+
+ if (mlx4_en_alloc_buf(ring, &rx_desc->data[0].addr, mb_list)) {
+ priv->port_stats.rx_alloc_failed++;
+ return (-ENOMEM);
}
+ return (0);
+}
+
+static inline void
+mlx4_en_update_rx_prod_db(struct mlx4_en_rx_ring *ring)
+{
+ *ring->wqres.db.db = cpu_to_be32(ring->prod & 0xffff);
}
static int mlx4_en_fill_rx_buffers(struct mlx4_en_priv *priv)
@@ -191,7 +220,8 @@ reduce_rings:
while (ring->actual_size > new_size) {
ring->actual_size--;
ring->prod--;
- mlx4_en_free_rx_desc(priv, ring, ring->actual_size);
+ mlx4_en_free_buf(ring,
+ ring->mbuf + ring->actual_size);
}
}
@@ -211,100 +241,106 @@ static void mlx4_en_free_rx_buf(struct mlx4_en_priv *priv,
while (ring->cons != ring->prod) {
index = ring->cons & ring->size_mask;
en_dbg(DRV, priv, "Processing descriptor:%d\n", index);
- mlx4_en_free_rx_desc(priv, ring, index);
+ mlx4_en_free_buf(ring, ring->mbuf + index);
++ring->cons;
}
}
-#if MLX4_EN_MAX_RX_FRAGS == 3
-static int frag_sizes[] = {
- FRAG_SZ0,
- FRAG_SZ1,
- FRAG_SZ2,
-};
-#elif MLX4_EN_MAX_RX_FRAGS == 2
-static int frag_sizes[] = {
- FRAG_SZ0,
- FRAG_SZ1,
-};
-#else
-#error "Unknown MAX_RX_FRAGS"
-#endif
-
void mlx4_en_calc_rx_buf(struct net_device *dev)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
int eff_mtu = dev->if_mtu + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN;
- int buf_size = 0;
- int i, frag;
- for (i = 0, frag = 0; buf_size < eff_mtu; frag++, i++) {
- /*
- * Allocate small to large but only as much as is needed for
- * the tail.
- */
- while (i > 0 && eff_mtu - buf_size <= frag_sizes[i - 1])
- i--;
- priv->frag_info[frag].frag_size = frag_sizes[i];
- priv->frag_info[frag].frag_prefix_size = buf_size;
- buf_size += priv->frag_info[frag].frag_size;
- }
+ if (eff_mtu > MJUM16BYTES) {
+ en_err(priv, "MTU(%d) is too big\n", (int)dev->if_mtu);
+ eff_mtu = MJUM16BYTES;
+ } else if (eff_mtu > MJUM9BYTES) {
+ eff_mtu = MJUM16BYTES;
+ } else if (eff_mtu > MJUMPAGESIZE) {
+ eff_mtu = MJUM9BYTES;
+ } else if (eff_mtu > MCLBYTES) {
+ eff_mtu = MJUMPAGESIZE;
+ } else {
+ eff_mtu = MCLBYTES;
+ }
- priv->num_frags = frag;
priv->rx_mb_size = eff_mtu;
- priv->log_rx_info =
- ROUNDUP_LOG2(priv->num_frags * sizeof(struct mbuf *));
- en_dbg(DRV, priv, "Rx buffer scatter-list (effective-mtu:%d "
- "num_frags:%d):\n", eff_mtu, priv->num_frags);
- for (i = 0; i < priv->num_frags; i++) {
- en_dbg(DRV, priv, " frag:%d - size:%d prefix:%d\n", i,
- priv->frag_info[i].frag_size,
- priv->frag_info[i].frag_prefix_size);
- }
+ en_dbg(DRV, priv, "Effective RX MTU: %d bytes\n", eff_mtu);
}
-
int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
struct mlx4_en_rx_ring **pring,
u32 size, int node)
{
struct mlx4_en_dev *mdev = priv->mdev;
struct mlx4_en_rx_ring *ring;
- int err = -ENOMEM;
+ int err;
int tmp;
+ uint32_t x;
ring = kzalloc(sizeof(struct mlx4_en_rx_ring), GFP_KERNEL);
if (!ring) {
en_err(priv, "Failed to allocate RX ring structure\n");
return -ENOMEM;
}
-
+
+ /* Create DMA descriptor TAG */
+ if ((err = -bus_dma_tag_create(
+ bus_get_dma_tag(mdev->pdev->dev.bsddev),
+ 1, /* any alignment */
+ 0, /* no boundary */
+ BUS_SPACE_MAXADDR, /* lowaddr */
+ BUS_SPACE_MAXADDR, /* highaddr */
+ NULL, NULL, /* filter, filterarg */
+ MJUM16BYTES, /* maxsize */
+ 1, /* nsegments */
+ MJUM16BYTES, /* maxsegsize */
+ 0, /* flags */
+ NULL, NULL, /* lockfunc, lockfuncarg */
+ &ring->dma_tag))) {
+ en_err(priv, "Failed to create DMA tag\n");
+ goto err_ring;
+ }
+
ring->prod = 0;
ring->cons = 0;
ring->size = size;
ring->size_mask = size - 1;
- ring->stride = roundup_pow_of_two(sizeof(struct mlx4_en_rx_desc) +
- DS_SIZE * MLX4_EN_MAX_RX_FRAGS);
+ ring->stride = roundup_pow_of_two(
+ sizeof(struct mlx4_en_rx_desc) + DS_SIZE);
ring->log_stride = ffs(ring->stride) - 1;
ring->buf_size = ring->size * ring->stride + TXBB_SIZE;
- tmp = size * roundup_pow_of_two(MLX4_EN_MAX_RX_FRAGS *
- sizeof(struct mbuf *));
+ tmp = size * sizeof(struct mlx4_en_rx_mbuf);
- ring->rx_info = kmalloc(tmp, GFP_KERNEL);
- if (!ring->rx_info) {
+ ring->mbuf = kzalloc(tmp, GFP_KERNEL);
+ if (ring->mbuf == NULL) {
err = -ENOMEM;
- goto err_ring;
+ goto err_dma_tag;
}
- en_dbg(DRV, priv, "Allocated rx_info ring at addr:%p size:%d\n",
- ring->rx_info, tmp);
+ err = -bus_dmamap_create(ring->dma_tag, 0, &ring->spare.dma_map);
+ if (err != 0)
+ goto err_info;
+
+ for (x = 0; x != size; x++) {
+ err = -bus_dmamap_create(ring->dma_tag, 0,
+ &ring->mbuf[x].dma_map);
+ if (err != 0) {
+ while (x--)
+ bus_dmamap_destroy(ring->dma_tag,
+ ring->mbuf[x].dma_map);
+ goto err_info;
+ }
+ }
+ en_dbg(DRV, priv, "Allocated MBUF ring at addr:%p size:%d\n",
+ ring->mbuf, tmp);
err = mlx4_alloc_hwq_res(mdev->dev, &ring->wqres,
ring->buf_size, 2 * PAGE_SIZE);
if (err)
- goto err_info;
+ goto err_dma_map;
err = mlx4_en_map_buffer(&ring->wqres.buf);
if (err) {
@@ -317,23 +353,29 @@ int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
err_hwq:
mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size);
+err_dma_map:
+ for (x = 0; x != size; x++) {
+ bus_dmamap_destroy(ring->dma_tag,
+ ring->mbuf[x].dma_map);
+ }
+ bus_dmamap_destroy(ring->dma_tag, ring->spare.dma_map);
err_info:
- vfree(ring->rx_info);
+ vfree(ring->mbuf);
+err_dma_tag:
+ bus_dma_tag_destroy(ring->dma_tag);
err_ring:
kfree(ring);
-
- return err;
+ return (err);
}
-
int mlx4_en_activate_rx_rings(struct mlx4_en_priv *priv)
{
struct mlx4_en_rx_ring *ring;
int i;
int ring_ind;
int err;
- int stride = roundup_pow_of_two(sizeof(struct mlx4_en_rx_desc) +
- DS_SIZE * priv->num_frags);
+ int stride = roundup_pow_of_two(
+ sizeof(struct mlx4_en_rx_desc) + DS_SIZE);
for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) {
ring = priv->rx_ring[ring_ind];
@@ -409,10 +451,22 @@ void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv,
{
struct mlx4_en_dev *mdev = priv->mdev;
struct mlx4_en_rx_ring *ring = *pring;
+ uint32_t x;
mlx4_en_unmap_buffer(&ring->wqres.buf);
mlx4_free_hwq_res(mdev->dev, &ring->wqres, size * stride + TXBB_SIZE);
- vfree(ring->rx_info);
+ for (x = 0; x != size; x++)
+ bus_dmamap_destroy(ring->dma_tag, ring->mbuf[x].dma_map);
+ /* free spare mbuf, if any */
+ if (ring->spare.mbuf != NULL) {
+ bus_dmamap_sync(ring->dma_tag, ring->spare.dma_map,
+ BUS_DMASYNC_POSTREAD);
+ bus_dmamap_unload(ring->dma_tag, ring->spare.dma_map);
+ m_freem(ring->spare.mbuf);
+ }
+ bus_dmamap_destroy(ring->dma_tag, ring->spare.dma_map);
+ vfree(ring->mbuf);
+ bus_dma_tag_destroy(ring->dma_tag);
kfree(ring);
*pring = NULL;
#ifdef CONFIG_RFS_ACCEL
@@ -420,7 +474,6 @@ void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv,
#endif
}
-
void mlx4_en_deactivate_rx_ring(struct mlx4_en_priv *priv,
struct mlx4_en_rx_ring *ring)
{
@@ -469,69 +522,27 @@ static inline int invalid_cqe(struct mlx4_en_priv *priv,
return 0;
}
-
-/* Unmap a completed descriptor and free unused pages */
-static int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv,
- struct mlx4_en_rx_desc *rx_desc,
- struct mbuf **mb_list,
- int length)
+static struct mbuf *
+mlx4_en_rx_mb(struct mlx4_en_priv *priv, struct mlx4_en_rx_ring *ring,
+ struct mlx4_en_rx_desc *rx_desc, struct mlx4_en_rx_mbuf *mb_list,
+ int length)
{
- struct mlx4_en_dev *mdev = priv->mdev;
- struct mlx4_en_frag_info *frag_info;
- dma_addr_t dma;
struct mbuf *mb;
- int nr;
-
- mb = mb_list[0];
- mb->m_pkthdr.len = length;
- /* Collect used fragments while replacing them in the HW descirptors */
- for (nr = 0; nr < priv->num_frags; nr++) {
- frag_info = &priv->frag_info[nr];
- if (length <= frag_info->frag_prefix_size)
- break;
- if (nr)
- mb->m_next = mb_list[nr];
- mb = mb_list[nr];
- mb->m_len = frag_info->frag_size;
- dma = be64_to_cpu(rx_desc->data[nr].addr);
-
- /* Allocate a replacement page */
- if (mlx4_en_alloc_buf(priv, rx_desc, mb_list, nr))
- goto fail;
-
- /* Unmap buffer */
- pci_unmap_single(mdev->pdev, dma, frag_info->frag_size,
- PCI_DMA_FROMDEVICE);
- }
- /* Adjust size of last fragment to match actual length */
- mb->m_len = length - priv->frag_info[nr - 1].frag_prefix_size;
- mb->m_next = NULL;
- return 0;
-fail:
- /* Drop all accumulated fragments (which have already been replaced in
- * the descriptor) of this packet; remaining fragments are reused... */
- while (nr > 0) {
- nr--;
- m_free(mb_list[nr]);
- }
- return -ENOMEM;
-
-}
+ /* get mbuf */
+ mb = mb_list->mbuf;
-static struct mbuf *mlx4_en_rx_mb(struct mlx4_en_priv *priv,
- struct mlx4_en_rx_desc *rx_desc,
- struct mbuf **mb_list,
- unsigned int length)
-{
- struct mbuf *mb;
+ /* collect used fragment while atomically replacing it */
+ if (mlx4_en_alloc_buf(ring, &rx_desc->data[0].addr, mb_list))
+ return (NULL);
- mb = mb_list[0];
- /* Move relevant fragments to mb */
- if (unlikely(mlx4_en_complete_rx_desc(priv, rx_desc, mb_list, length)))
- return NULL;
+ /* range check hardware computed value */
+ if (unlikely(length > mb->m_len))
+ length = mb->m_len;
- return mb;
+ /* update total packet length in packet header */
+ mb->m_len = mb->m_pkthdr.len = length;
+ return (mb);
}
/* For cpu arch with cache line of 64B the performance is better when cqe size==64B
@@ -545,7 +556,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_cqe *cqe;
struct mlx4_en_rx_ring *ring = priv->rx_ring[cq->ring];
- struct mbuf **mb_list;
+ struct mlx4_en_rx_mbuf *mb_list;
struct mlx4_en_rx_desc *rx_desc;
struct mbuf *mb;
struct mlx4_cq *mcq = &cq->mcq;
@@ -573,8 +584,9 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
/* Process all completed CQEs */
while (XNOR(cqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK,
cons_index & size)) {
- mb_list = ring->rx_info + (index << priv->log_rx_info);
- rx_desc = ring->buf + (index << ring->log_stride);
+ mb_list = ring->mbuf + index;
+ rx_desc = (struct mlx4_en_rx_desc *)
+ (ring->buf + (index << ring->log_stride));
/*
* make sure we read the CQE after we read the ownership bit
@@ -589,8 +601,9 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
*/
length = be32_to_cpu(cqe->byte_cnt);
length -= ring->fcs_del;
- mb = mlx4_en_rx_mb(priv, rx_desc, mb_list, length);
- if (!mb) {
+
+ mb = mlx4_en_rx_mb(priv, ring, rx_desc, mb_list, length);
+ if (unlikely(!mb)) {
ring->errors++;
goto next;
}
@@ -611,7 +624,8 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
mb->m_pkthdr.ether_vtag = be16_to_cpu(cqe->sl_vid);
mb->m_flags |= M_VLANTAG;
}
- if (likely(dev->if_capabilities & IFCAP_RXCSUM) &&
+ if (likely(dev->if_capenable &
+ (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6)) &&
(cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPOK)) &&
(cqe->checksum == cpu_to_be16(0xffff))) {
priv->port_stats.rx_chksum_good++;
@@ -692,6 +706,7 @@ void mlx4_en_rx_irq(struct mlx4_cq *mcq)
// Because there is no NAPI in freeBSD
done = mlx4_en_poll_rx_cq(cq, MLX4_EN_RX_BUDGET);
if (priv->port_up && (done == MLX4_EN_RX_BUDGET) ) {
+ cq->curr_poll_rx_cpu_id = curcpu;
taskqueue_enqueue(cq->tq, &cq->cq_task);
}
else {
@@ -702,8 +717,15 @@ void mlx4_en_rx_irq(struct mlx4_cq *mcq)
void mlx4_en_rx_que(void *context, int pending)
{
struct mlx4_en_cq *cq;
+ struct thread *td;
cq = context;
+ td = curthread;
+
+ thread_lock(td);
+ sched_bind(td, cq->curr_poll_rx_cpu_id);
+ thread_unlock(td);
+
while (mlx4_en_poll_rx_cq(cq, MLX4_EN_RX_BUDGET)
== MLX4_EN_RX_BUDGET);
mlx4_en_arm_cq(cq->dev->if_softc, cq);
@@ -841,8 +863,8 @@ int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv)
else
rss_rings = priv->prof->rss_rings;
- ptr = ((void *) &context) + offsetof(struct mlx4_qp_context, pri_path)
- + MLX4_RSS_OFFSET_IN_QPC_PRI_PATH;
+ ptr = ((u8 *)&context) + offsetof(struct mlx4_qp_context, pri_path) +
+ MLX4_RSS_OFFSET_IN_QPC_PRI_PATH;
rss_context = ptr;
rss_context->base_qpn = cpu_to_be32(ilog2(rss_rings) << 24 |
(rss_map->base_qpn));
diff --git a/sys/ofed/drivers/net/mlx4/en_selftest.c b/sys/ofed/drivers/net/mlx4/en_selftest.c
deleted file mode 100644
index fb13bd6..0000000
--- a/sys/ofed/drivers/net/mlx4/en_selftest.c
+++ /dev/null
@@ -1,178 +0,0 @@
-/*
- * Copyright (c) 2007, 2014 Mellanox Technologies. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- */
-
-#include <linux/kernel.h>
-#include <linux/netdevice.h>
-#include <linux/delay.h>
-#include <linux/mlx4/driver.h>
-
-#include "mlx4_en.h"
-
-
-static int mlx4_en_test_registers(struct mlx4_en_priv *priv)
-{
- return mlx4_cmd(priv->mdev->dev, 0, 0, 0, MLX4_CMD_HW_HEALTH_CHECK,
- MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
-}
-
-static int mlx4_en_test_loopback_xmit(struct mlx4_en_priv *priv)
-{
- struct sk_buff *skb;
- struct ethhdr *ethh;
- unsigned char *packet;
- unsigned int packet_size = MLX4_LOOPBACK_TEST_PAYLOAD;
- unsigned int i;
- int err;
-
-
- /* build the pkt before xmit */
- skb = netdev_alloc_skb(priv->dev, MLX4_LOOPBACK_TEST_PAYLOAD + ETH_HLEN + NET_IP_ALIGN);
- if (!skb) {
- en_err(priv, "-LOOPBACK_TEST_XMIT- failed to create skb for xmit\n");
- return -ENOMEM;
- }
- skb_reserve(skb, NET_IP_ALIGN);
-
- ethh = (struct ethhdr *)skb_put(skb, sizeof(struct ethhdr));
- packet = (unsigned char *)skb_put(skb, packet_size);
- memcpy(ethh->h_dest, priv->dev->dev_addr, ETH_ALEN);
- memset(ethh->h_source, 0, ETH_ALEN);
- ethh->h_proto = htons(ETH_P_ARP);
- skb_set_mac_header(skb, 0);
- for (i = 0; i < packet_size; ++i) /* fill our packet */
- packet[i] = (unsigned char)(i & 0xff);
-
- /* xmit the pkt */
- err = mlx4_en_xmit(skb, priv->dev);
- return err;
-}
-
-static int mlx4_en_test_loopback(struct mlx4_en_priv *priv)
-{
- u32 loopback_ok = 0;
- int i;
-
-
- priv->loopback_ok = 0;
- priv->validate_loopback = 1;
-
- mlx4_en_update_loopback_state(priv->dev, priv->dev->features);
-
- /* xmit */
- if (mlx4_en_test_loopback_xmit(priv)) {
- en_err(priv, "Transmitting loopback packet failed\n");
- goto mlx4_en_test_loopback_exit;
- }
-
- /* polling for result */
- for (i = 0; i < MLX4_EN_LOOPBACK_RETRIES; ++i) {
- msleep(MLX4_EN_LOOPBACK_TIMEOUT);
- if (priv->loopback_ok) {
- loopback_ok = 1;
- break;
- }
- }
- if (!loopback_ok)
- en_err(priv, "Loopback packet didn't arrive\n");
-
-mlx4_en_test_loopback_exit:
-
- priv->validate_loopback = 0;
- mlx4_en_update_loopback_state(priv->dev, priv->dev->features);
- return !loopback_ok;
-}
-
-
-static int mlx4_en_test_link(struct mlx4_en_priv *priv)
-{
- if (mlx4_en_QUERY_PORT(priv->mdev, priv->port))
- return -ENOMEM;
- if (priv->port_state.link_state == 1)
- return 0;
- else
- return 1;
-}
-
-static int mlx4_en_test_speed(struct mlx4_en_priv *priv)
-{
-
- if (mlx4_en_QUERY_PORT(priv->mdev, priv->port))
- return -ENOMEM;
-
- /* The device supports 1G, 10G and 40G speed */
- if (priv->port_state.link_speed != MLX4_EN_LINK_SPEED_1G &&
- priv->port_state.link_speed != MLX4_EN_LINK_SPEED_10G &&
- priv->port_state.link_speed != MLX4_EN_LINK_SPEED_40G)
- return priv->port_state.link_speed;
- return 0;
-}
-
-
-void mlx4_en_ex_selftest(struct net_device *dev, u32 *flags, u64 *buf)
-{
- struct mlx4_en_priv *priv = netdev_priv(dev);
- struct mlx4_en_dev *mdev = priv->mdev;
- int i, carrier_ok;
-
- memset(buf, 0, sizeof(u64) * MLX4_EN_NUM_SELF_TEST);
-
- if (*flags & ETH_TEST_FL_OFFLINE) {
- /* disable the interface */
- carrier_ok = netif_carrier_ok(dev);
-
- netif_carrier_off(dev);
- /* Wait until all tx queues are empty.
- * there should not be any additional incoming traffic
- * since we turned the carrier off */
- msleep(200);
-
- if (priv->mdev->dev->caps.flags &
- MLX4_DEV_CAP_FLAG_UC_LOOPBACK) {
- buf[3] = mlx4_en_test_registers(priv);
- if (priv->port_up)
- buf[4] = mlx4_en_test_loopback(priv);
- }
-
- if (carrier_ok)
- netif_carrier_on(dev);
-
- }
- buf[0] = mlx4_test_interrupts(mdev->dev);
- buf[1] = mlx4_en_test_link(priv);
- buf[2] = mlx4_en_test_speed(priv);
-
- for (i = 0; i < MLX4_EN_NUM_SELF_TEST; i++) {
- if (buf[i])
- *flags |= ETH_TEST_FL_FAILED;
- }
-}
diff --git a/sys/ofed/drivers/net/mlx4/en_tx.c b/sys/ofed/drivers/net/mlx4/en_tx.c
index 115dd0b..c147dd3 100644
--- a/sys/ofed/drivers/net/mlx4/en_tx.c
+++ b/sys/ofed/drivers/net/mlx4/en_tx.c
@@ -68,6 +68,7 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
{
struct mlx4_en_dev *mdev = priv->mdev;
struct mlx4_en_tx_ring *ring;
+ uint32_t x;
int tmp;
int err;
@@ -80,11 +81,26 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
}
}
+ /* Create DMA descriptor TAG */
+ if ((err = -bus_dma_tag_create(
+ bus_get_dma_tag(mdev->pdev->dev.bsddev),
+ 1, /* any alignment */
+ 0, /* no boundary */
+ BUS_SPACE_MAXADDR, /* lowaddr */
+ BUS_SPACE_MAXADDR, /* highaddr */
+ NULL, NULL, /* filter, filterarg */
+ MLX4_EN_TX_MAX_PAYLOAD_SIZE, /* maxsize */
+ MLX4_EN_TX_MAX_MBUF_FRAGS, /* nsegments */
+ MLX4_EN_TX_MAX_MBUF_SIZE, /* maxsegsize */
+ 0, /* flags */
+ NULL, NULL, /* lockfunc, lockfuncarg */
+ &ring->dma_tag)))
+ goto done;
+
ring->size = size;
ring->size_mask = size - 1;
ring->stride = stride;
- ring->full_size = ring->size - HEADROOM - MAX_DESC_TXBBS;
- ring->inline_thold = min(inline_thold, MAX_INLINE);
+ ring->inline_thold = MAX(MIN_PKT_LEN, MIN(inline_thold, MAX_INLINE));
mtx_init(&ring->tx_lock.m, "mlx4 tx", NULL, MTX_DEF);
mtx_init(&ring->comp_lock.m, "mlx4 comp", NULL, MTX_DEF);
@@ -93,30 +109,36 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
M_WAITOK, &ring->tx_lock.m);
if (ring->br == NULL) {
en_err(priv, "Failed allocating tx_info ring\n");
- return -ENOMEM;
+ err = -ENOMEM;
+ goto err_free_dma_tag;
}
tmp = size * sizeof(struct mlx4_en_tx_info);
- ring->tx_info = vmalloc_node(tmp, node);
+ ring->tx_info = kzalloc_node(tmp, GFP_KERNEL, node);
if (!ring->tx_info) {
- ring->tx_info = vmalloc(tmp);
+ ring->tx_info = kzalloc(tmp, GFP_KERNEL);
if (!ring->tx_info) {
err = -ENOMEM;
goto err_ring;
}
}
- en_dbg(DRV, priv, "Allocated tx_info ring at addr:%p size:%d\n",
- ring->tx_info, tmp);
-
- ring->bounce_buf = kmalloc_node(MAX_DESC_SIZE, GFP_KERNEL, node);
- if (!ring->bounce_buf) {
- ring->bounce_buf = kmalloc(MAX_DESC_SIZE, GFP_KERNEL);
- if (!ring->bounce_buf) {
- err = -ENOMEM;
+ /* Create DMA descriptor MAPs */
+ for (x = 0; x != size; x++) {
+ err = -bus_dmamap_create(ring->dma_tag, 0,
+ &ring->tx_info[x].dma_map);
+ if (err != 0) {
+ while (x--) {
+ bus_dmamap_destroy(ring->dma_tag,
+ ring->tx_info[x].dma_map);
+ }
goto err_info;
}
}
+
+ en_dbg(DRV, priv, "Allocated tx_info ring at addr:%p size:%d\n",
+ ring->tx_info, tmp);
+
ring->buf_size = ALIGN(size * ring->stride, MLX4_EN_PAGE_SIZE);
/* Allocate HW buffers on provided NUMA node */
@@ -124,7 +146,7 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
2 * PAGE_SIZE);
if (err) {
en_err(priv, "Failed allocating hwq resources\n");
- goto err_bounce;
+ goto err_dma_map;
}
err = mlx4_en_map_buffer(&ring->wqres.buf);
@@ -174,12 +196,16 @@ err_map:
mlx4_en_unmap_buffer(&ring->wqres.buf);
err_hwq_res:
mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size);
-err_bounce:
- kfree(ring->bounce_buf);
+err_dma_map:
+ for (x = 0; x != size; x++)
+ bus_dmamap_destroy(ring->dma_tag, ring->tx_info[x].dma_map);
err_info:
vfree(ring->tx_info);
err_ring:
buf_ring_free(ring->br, M_DEVBUF);
+err_free_dma_tag:
+ bus_dma_tag_destroy(ring->dma_tag);
+done:
kfree(ring);
return err;
}
@@ -189,6 +215,7 @@ void mlx4_en_destroy_tx_ring(struct mlx4_en_priv *priv,
{
struct mlx4_en_dev *mdev = priv->mdev;
struct mlx4_en_tx_ring *ring = *pring;
+ uint32_t x;
en_dbg(DRV, priv, "Destroying tx ring, qpn: %d\n", ring->qpn);
buf_ring_free(ring->br, M_DEVBUF);
@@ -199,10 +226,12 @@ void mlx4_en_destroy_tx_ring(struct mlx4_en_priv *priv,
mlx4_qp_release_range(priv->mdev->dev, ring->qpn, 1);
mlx4_en_unmap_buffer(&ring->wqres.buf);
mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size);
- kfree(ring->bounce_buf);
+ for (x = 0; x != ring->size; x++)
+ bus_dmamap_destroy(ring->dma_tag, ring->tx_info[x].dma_map);
vfree(ring->tx_info);
mtx_destroy(&ring->tx_lock.m);
mtx_destroy(&ring->comp_lock.m);
+ bus_dma_tag_destroy(ring->dma_tag);
kfree(ring);
*pring = NULL;
}
@@ -220,7 +249,6 @@ int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv,
ring->last_nr_txbb = 1;
ring->poll_cnt = 0;
ring->blocked = 0;
- memset(ring->tx_info, 0, ring->size * sizeof(struct mlx4_en_tx_info));
memset(ring->buf, 0, ring->buf_size);
ring->qp_state = MLX4_QP_STATE_RST;
@@ -245,96 +273,63 @@ void mlx4_en_deactivate_tx_ring(struct mlx4_en_priv *priv,
MLX4_QP_STATE_RST, NULL, 0, 0, &ring->qp);
}
-static void mlx4_en_stamp_wqe(struct mlx4_en_priv *priv,
- struct mlx4_en_tx_ring *ring,
- int index, u8 owner)
+static volatile struct mlx4_wqe_data_seg *
+mlx4_en_store_inline_lso_data(volatile struct mlx4_wqe_data_seg *dseg,
+ struct mbuf *mb, int len, __be32 owner_bit)
{
- struct mlx4_en_tx_info *tx_info = &ring->tx_info[index];
- struct mlx4_en_tx_desc *tx_desc = ring->buf + index * TXBB_SIZE;
- void *end = ring->buf + ring->buf_size;
- __be32 *ptr = (__be32 *)tx_desc;
- __be32 stamp = cpu_to_be32(STAMP_VAL | (!!owner << STAMP_SHIFT));
- int i;
-
- /* Optimize the common case when there are no wraparounds */
- if (likely((void *)tx_desc + tx_info->nr_txbb * TXBB_SIZE <= end))
- /* Stamp the freed descriptor */
- for (i = 0; i < tx_info->nr_txbb * TXBB_SIZE; i += STAMP_STRIDE) {
- *ptr = stamp;
- ptr += STAMP_DWORDS;
- }
- else
- /* Stamp the freed descriptor */
- for (i = 0; i < tx_info->nr_txbb * TXBB_SIZE; i += STAMP_STRIDE) {
- *ptr = stamp;
- ptr += STAMP_DWORDS;
- if ((void *)ptr >= end) {
- ptr = ring->buf;
- stamp ^= cpu_to_be32(0x80000000);
- }
- }
+ uint8_t *inl = __DEVOLATILE(uint8_t *, dseg);
+
+ /* copy data into place */
+ m_copydata(mb, 0, len, inl + 4);
+ dseg += DIV_ROUND_UP(4 + len, DS_SIZE_ALIGNMENT);
+ return (dseg);
}
-static u32 mlx4_en_free_tx_desc(struct mlx4_en_priv *priv,
- struct mlx4_en_tx_ring *ring,
- int index, u8 owner, u64 timestamp)
+static void
+mlx4_en_store_inline_lso_header(volatile struct mlx4_wqe_data_seg *dseg,
+ int len, __be32 owner_bit)
+{
+}
+
+static void
+mlx4_en_stamp_wqe(struct mlx4_en_priv *priv,
+ struct mlx4_en_tx_ring *ring, u32 index, u8 owner)
{
- struct mlx4_en_dev *mdev = priv->mdev;
struct mlx4_en_tx_info *tx_info = &ring->tx_info[index];
- struct mlx4_en_tx_desc *tx_desc = ring->buf + index * TXBB_SIZE;
- struct mlx4_wqe_data_seg *data = (void *) tx_desc + tx_info->data_offset;
- struct mbuf *mb = tx_info->mb;
- void *end = ring->buf + ring->buf_size;
- int frags = tx_info->nr_segs;;
- int i;
-
- /* Optimize the common case when there are no wraparounds */
- if (likely((void *) tx_desc + tx_info->nr_txbb * TXBB_SIZE <= end)) {
- if (!tx_info->inl) {
- if (tx_info->linear) {
- dma_unmap_single(priv->ddev,
- (dma_addr_t) be64_to_cpu(data->addr),
- be32_to_cpu(data->byte_count),
- PCI_DMA_TODEVICE);
- ++data;
- }
+ struct mlx4_en_tx_desc *tx_desc = (struct mlx4_en_tx_desc *)
+ (ring->buf + (index * TXBB_SIZE));
+ volatile __be32 *ptr = (__be32 *)tx_desc;
+ const __be32 stamp = cpu_to_be32(STAMP_VAL |
+ ((u32)owner << STAMP_SHIFT));
+ u32 i;
+
+ /* Stamp the freed descriptor */
+ for (i = 0; i < tx_info->nr_txbb * TXBB_SIZE; i += STAMP_STRIDE) {
+ *ptr = stamp;
+ ptr += STAMP_DWORDS;
+ }
+}
- for (i = 0; i < frags; i++) {
- pci_unmap_single(mdev->pdev,
- (dma_addr_t) be64_to_cpu(data[i].addr),
- data[i].byte_count, PCI_DMA_TODEVICE);
- }
- }
- } else {
- if (!tx_info->inl) {
- if ((void *) data >= end) {
- data = ring->buf + ((void *)data - end);
- }
+static u32
+mlx4_en_free_tx_desc(struct mlx4_en_priv *priv,
+ struct mlx4_en_tx_ring *ring, u32 index)
+{
+ struct mlx4_en_tx_info *tx_info;
+ struct mbuf *mb;
- if (tx_info->linear) {
- dma_unmap_single(priv->ddev,
- (dma_addr_t) be64_to_cpu(data->addr),
- be32_to_cpu(data->byte_count),
- PCI_DMA_TODEVICE);
- ++data;
- }
+ tx_info = &ring->tx_info[index];
+ mb = tx_info->mb;
+
+ if (mb == NULL)
+ goto done;
+
+ bus_dmamap_sync(ring->dma_tag, tx_info->dma_map,
+ BUS_DMASYNC_POSTWRITE);
+ bus_dmamap_unload(ring->dma_tag, tx_info->dma_map);
- for (i = 0; i < frags; i++) {
- /* Check for wraparound before unmapping */
- if ((void *) data >= end)
- data = ring->buf;
- pci_unmap_single(mdev->pdev,
- (dma_addr_t) be64_to_cpu(data->addr),
- data->byte_count, PCI_DMA_TODEVICE);
- ++data;
- }
- }
- }
- /* Send a copy of the frame to the BPF listener */
- if (priv->dev && priv->dev->if_bpf)
- ETHER_BPF_MTAP(priv->dev, mb);
m_freem(mb);
- return tx_info->nr_txbb;
+done:
+ return (tx_info->nr_txbb);
}
int mlx4_en_free_tx_buf(struct net_device *dev, struct mlx4_en_tx_ring *ring)
@@ -354,8 +349,7 @@ int mlx4_en_free_tx_buf(struct net_device *dev, struct mlx4_en_tx_ring *ring)
while (ring->cons != ring->prod) {
ring->last_nr_txbb = mlx4_en_free_tx_desc(priv, ring,
- ring->cons & ring->size_mask,
- !!(ring->cons & ring->size), 0);
+ ring->cons & ring->size_mask);
ring->cons += ring->last_nr_txbb;
cnt++;
}
@@ -366,6 +360,14 @@ int mlx4_en_free_tx_buf(struct net_device *dev, struct mlx4_en_tx_ring *ring)
return cnt;
}
+static bool
+mlx4_en_tx_ring_is_full(struct mlx4_en_tx_ring *ring)
+{
+ int wqs;
+ wqs = ring->size - (ring->prod - ring->cons);
+ return (wqs < (HEADROOM + (2 * MLX4_EN_TX_WQE_MAX_WQEBBS)));
+}
+
static int mlx4_en_process_tx_cq(struct net_device *dev,
struct mlx4_en_cq *cq)
{
@@ -381,12 +383,7 @@ static int mlx4_en_process_tx_cq(struct net_device *dev,
int size = cq->size;
u32 size_mask = ring->size_mask;
struct mlx4_cqe *buf = cq->buf;
- u32 packets = 0;
- u32 bytes = 0;
int factor = priv->cqe_factor;
- u64 timestamp = 0;
- int done = 0;
-
if (!priv->port_up)
return 0;
@@ -421,16 +418,12 @@ static int mlx4_en_process_tx_cq(struct net_device *dev,
ring_index = (ring_index + ring->last_nr_txbb) & size_mask;
/* free next descriptor */
ring->last_nr_txbb = mlx4_en_free_tx_desc(
- priv, ring, ring_index,
- !!((ring->cons + txbbs_skipped) &
- ring->size), timestamp);
+ priv, ring, ring_index);
mlx4_en_stamp_wqe(priv, ring, stamp_index,
!!((ring->cons + txbbs_stamp) &
ring->size));
stamp_index = ring_index;
txbbs_stamp = txbbs_skipped;
- packets++;
- bytes += ring->tx_info[ring_index].nr_bytes;
} while (ring_index != new_index);
++cons_index;
@@ -449,15 +442,14 @@ static int mlx4_en_process_tx_cq(struct net_device *dev,
ring->cons += txbbs_skipped;
/* Wakeup Tx queue if it was stopped and ring is not full */
- if (unlikely(ring->blocked) &&
- (ring->prod - ring->cons) <= ring->full_size) {
+ if (unlikely(ring->blocked) && !mlx4_en_tx_ring_is_full(ring)) {
ring->blocked = 0;
if (atomic_fetchadd_int(&priv->blocked, -1) == 1)
atomic_clear_int(&dev->if_drv_flags ,IFF_DRV_OACTIVE);
ring->wake_queue++;
priv->port_stats.wake_queue++;
}
- return done;
+ return (0);
}
void mlx4_en_tx_irq(struct mlx4_cq *mcq)
@@ -498,34 +490,6 @@ void mlx4_en_poll_tx_cq(unsigned long data)
spin_unlock(&ring->comp_lock);
}
-static struct mlx4_en_tx_desc *mlx4_en_bounce_to_desc(struct mlx4_en_priv *priv,
- struct mlx4_en_tx_ring *ring,
- u32 index,
- unsigned int desc_size)
-{
- u32 copy = (ring->size - index) * TXBB_SIZE;
- int i;
-
- for (i = desc_size - copy - 4; i >= 0; i -= 4) {
- if ((i & (TXBB_SIZE - 1)) == 0)
- wmb();
-
- *((u32 *) (ring->buf + i)) =
- *((u32 *) (ring->bounce_buf + copy + i));
- }
-
- for (i = copy - 4; i >= 4 ; i -= 4) {
- if ((i & (TXBB_SIZE - 1)) == 0)
- wmb();
-
- *((u32 *) (ring->buf + index * TXBB_SIZE + i)) =
- *((u32 *) (ring->bounce_buf + i));
- }
-
- /* Return real descriptor location */
- return ring->buf + index * TXBB_SIZE;
-}
-
static inline void mlx4_en_xmit_poll(struct mlx4_en_priv *priv, int tx_ind)
{
struct mlx4_en_cq *cq = priv->tx_cq[tx_ind];
@@ -544,30 +508,22 @@ static inline void mlx4_en_xmit_poll(struct mlx4_en_priv *priv, int tx_ind)
}
}
-static int is_inline(struct mbuf *mb, int thold)
+static u16
+mlx4_en_get_inline_hdr_size(struct mlx4_en_tx_ring *ring, struct mbuf *mb)
{
- if (thold && mb->m_pkthdr.len <= thold &&
- (mb->m_pkthdr.csum_flags & CSUM_TSO) == 0)
- return 1;
+ u16 retval;
- return 0;
-}
-
-static int inline_size(struct mbuf *mb)
-{
- int len;
+ /* only copy from first fragment, if possible */
+ retval = MIN(ring->inline_thold, mb->m_len);
- len = mb->m_pkthdr.len;
- if (len + CTRL_SIZE + sizeof(struct mlx4_wqe_inline_seg)
- <= MLX4_INLINE_ALIGN)
- return ALIGN(len + CTRL_SIZE +
- sizeof(struct mlx4_wqe_inline_seg), 16);
- else
- return ALIGN(len + CTRL_SIZE + 2 *
- sizeof(struct mlx4_wqe_inline_seg), 16);
+ /* check for too little data */
+ if (unlikely(retval < MIN_PKT_LEN))
+ retval = MIN(ring->inline_thold, mb->m_pkthdr.len);
+ return (retval);
}
-static int get_head_size(struct mbuf *mb)
+static int
+mlx4_en_get_header_size(struct mbuf *mb)
{
struct ether_vlan_header *eh;
struct tcphdr *th;
@@ -620,83 +576,48 @@ static int get_head_size(struct mbuf *mb)
return (eth_hdr_len);
}
-static int get_real_size(struct mbuf *mb, struct net_device *dev, int *p_n_segs,
- int *lso_header_size, int inl)
+static volatile struct mlx4_wqe_data_seg *
+mlx4_en_store_inline_data(volatile struct mlx4_wqe_data_seg *dseg,
+ struct mbuf *mb, int len, __be32 owner_bit)
{
- struct mbuf *m;
- int nr_segs = 0;
-
- for (m = mb; m != NULL; m = m->m_next)
- if (m->m_len)
- nr_segs++;
-
- if (mb->m_pkthdr.csum_flags & CSUM_TSO) {
- *lso_header_size = get_head_size(mb);
- if (*lso_header_size) {
- if (mb->m_len == *lso_header_size)
- nr_segs--;
- *p_n_segs = nr_segs;
- return CTRL_SIZE + nr_segs * DS_SIZE +
- ALIGN(*lso_header_size + 4, DS_SIZE);
- }
- } else
- *lso_header_size = 0;
- *p_n_segs = nr_segs;
- if (inl)
- return inline_size(mb);
- return (CTRL_SIZE + nr_segs * DS_SIZE);
-}
-
-static struct mbuf *mb_copy(struct mbuf *mb, int *offp, char *data, int len)
-{
- int bytes;
- int off;
-
- off = *offp;
- while (len) {
- bytes = min(mb->m_len - off, len);
- if (bytes)
- memcpy(data, mb->m_data + off, bytes);
- len -= bytes;
- data += bytes;
- off += bytes;
- if (off == mb->m_len) {
- off = 0;
- mb = mb->m_next;
- }
- }
- *offp = off;
- return (mb);
+ uint8_t *inl = __DEVOLATILE(uint8_t *, dseg);
+ const int spc = MLX4_INLINE_ALIGN - CTRL_SIZE - 4;
+
+ if (unlikely(len < MIN_PKT_LEN)) {
+ m_copydata(mb, 0, len, inl + 4);
+ memset(inl + 4 + len, 0, MIN_PKT_LEN - len);
+ dseg += DIV_ROUND_UP(4 + MIN_PKT_LEN, DS_SIZE_ALIGNMENT);
+ } else if (len <= spc) {
+ m_copydata(mb, 0, len, inl + 4);
+ dseg += DIV_ROUND_UP(4 + len, DS_SIZE_ALIGNMENT);
+ } else {
+ m_copydata(mb, 0, spc, inl + 4);
+ m_copydata(mb, spc, len - spc, inl + 8 + spc);
+ dseg += DIV_ROUND_UP(8 + len, DS_SIZE_ALIGNMENT);
+ }
+ return (dseg);
}
-static void build_inline_wqe(struct mlx4_en_tx_desc *tx_desc, struct mbuf *mb,
- int real_size, u16 *vlan_tag, int tx_ind)
+static void
+mlx4_en_store_inline_header(volatile struct mlx4_wqe_data_seg *dseg,
+ int len, __be32 owner_bit)
{
- struct mlx4_wqe_inline_seg *inl = &tx_desc->inl;
- int spc = MLX4_INLINE_ALIGN - CTRL_SIZE - sizeof *inl;
- int len;
- int off;
-
- off = 0;
- len = mb->m_pkthdr.len;
- if (len <= spc) {
- inl->byte_count = cpu_to_be32(1 << 31 |
- (max_t(typeof(len), len, MIN_PKT_LEN)));
- mb_copy(mb, &off, (void *)(inl + 1), len);
- if (len < MIN_PKT_LEN)
- memset(((void *)(inl + 1)) + len, 0,
- MIN_PKT_LEN - len);
+ uint8_t *inl = __DEVOLATILE(uint8_t *, dseg);
+ const int spc = MLX4_INLINE_ALIGN - CTRL_SIZE - 4;
+
+ if (unlikely(len < MIN_PKT_LEN)) {
+ *(volatile uint32_t *)inl =
+ SET_BYTE_COUNT((1 << 31) | MIN_PKT_LEN);
+ } else if (len <= spc) {
+ *(volatile uint32_t *)inl =
+ SET_BYTE_COUNT((1 << 31) | len);
} else {
- inl->byte_count = cpu_to_be32(1 << 31 | spc);
- mb = mb_copy(mb, &off, (void *)(inl + 1), spc);
- inl = (void *) (inl + 1) + spc;
- mb_copy(mb, &off, (void *)(inl + 1), len - spc);
+ *(volatile uint32_t *)(inl + 4 + spc) =
+ SET_BYTE_COUNT((1 << 31) | (len - spc));
wmb();
- inl->byte_count = cpu_to_be32(1 << 31 | (len - spc));
+ *(volatile uint32_t *)inl =
+ SET_BYTE_COUNT((1 << 31) | spc);
}
- tx_desc->ctrl.vlan_tag = cpu_to_be16(*vlan_tag);
- tx_desc->ctrl.ins_vlan = MLX4_WQE_CTRL_INS_VLAN * !!(*vlan_tag);
- tx_desc->ctrl.fence_size = (real_size / 16) & 0x3f;
}
static unsigned long hashrandom;
@@ -720,18 +641,14 @@ u16 mlx4_en_select_queue(struct net_device *dev, struct mbuf *mb)
up = (vlan_tag >> 13) % MLX4_EN_NUM_UP;
}
#endif
- /* check if flowid is set */
- if (M_HASHTYPE_GET(mb) != M_HASHTYPE_NONE)
- queue_index = mb->m_pkthdr.flowid;
- else
- queue_index = mlx4_en_hashmbuf(MLX4_F_HASHL3 | MLX4_F_HASHL4, mb, hashrandom);
+ queue_index = mlx4_en_hashmbuf(MLX4_F_HASHL3 | MLX4_F_HASHL4, mb, hashrandom);
return ((queue_index % rings_p_up) + (up * rings_p_up));
}
-static void mlx4_bf_copy(void __iomem *dst, unsigned long *src, unsigned bytecnt)
+static void mlx4_bf_copy(void __iomem *dst, volatile unsigned long *src, unsigned bytecnt)
{
- __iowrite64_copy(dst, src, bytecnt / 8);
+ __iowrite64_copy(dst, __DEVOLATILE(void *, src), bytecnt / 8);
}
static u64 mlx4_en_mac_to_u64(u8 *addr)
@@ -746,168 +663,263 @@ static u64 mlx4_en_mac_to_u64(u8 *addr)
return mac;
}
-static int mlx4_en_xmit(struct net_device *dev, int tx_ind, struct mbuf **mbp)
+static int mlx4_en_xmit(struct mlx4_en_priv *priv, int tx_ind, struct mbuf **mbp)
{
- struct mlx4_en_priv *priv = netdev_priv(dev);
- struct mlx4_en_dev *mdev = priv->mdev;
- struct mlx4_en_tx_ring *ring;
- struct mlx4_en_cq *cq;
- struct mlx4_en_tx_desc *tx_desc;
- struct mlx4_wqe_data_seg *data;
+ enum {
+ DS_FACT = TXBB_SIZE / DS_SIZE_ALIGNMENT,
+ CTRL_FLAGS = cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE |
+ MLX4_WQE_CTRL_SOLICITED),
+ };
+ bus_dma_segment_t segs[MLX4_EN_TX_MAX_MBUF_FRAGS];
+ volatile struct mlx4_wqe_data_seg *dseg;
+ volatile struct mlx4_wqe_data_seg *dseg_inline;
+ volatile struct mlx4_en_tx_desc *tx_desc;
+ struct mlx4_en_tx_ring *ring = priv->tx_ring[tx_ind];
+ struct ifnet *ifp = priv->dev;
struct mlx4_en_tx_info *tx_info;
+ struct mbuf *mb = *mbp;
struct mbuf *m;
- int nr_txbb;
+ __be32 owner_bit;
int nr_segs;
- int desc_size;
- int real_size;
- dma_addr_t dma;
- u32 index, bf_index, ring_size;
- __be32 op_own;
- u16 vlan_tag = 0;
- int i;
- int lso_header_size;
- bool bounce = false;
- bool inl = false;
- struct mbuf *mb;
- mb = *mbp;
- int defrag = 1;
-
- if (!priv->port_up)
- goto tx_drop;
-
- ring = priv->tx_ring[tx_ind];
- ring_size = ring->size;
- inl = is_inline(mb, ring->inline_thold);
-
-retry:
- real_size = get_real_size(mb, dev, &nr_segs, &lso_header_size, inl);
- if (unlikely(!real_size))
- goto tx_drop;
+ int pad;
+ int err;
+ u32 bf_size;
+ u32 bf_prod;
+ u32 opcode;
+ u16 index;
+ u16 ds_cnt;
+ u16 ihs;
- /* Align descriptor to TXBB size */
- desc_size = ALIGN(real_size, TXBB_SIZE);
- nr_txbb = desc_size / TXBB_SIZE;
- if (unlikely(nr_txbb > MAX_DESC_TXBBS)) {
- if (defrag) {
- mb = m_defrag(*mbp, M_NOWAIT);
- if (mb == NULL) {
- mb = *mbp;
- goto tx_drop;
- }
- *mbp = mb;
- defrag = 0;
- goto retry;
- }
- en_warn(priv, "Oversized header or SG list\n");
+ if (unlikely(!priv->port_up)) {
+ err = EINVAL;
goto tx_drop;
}
- /* Obtain VLAN information if present */
- if (mb->m_flags & M_VLANTAG) {
- vlan_tag = mb->m_pkthdr.ether_vtag;
- }
-
- /* Check available TXBBs and 2K spare for prefetch
- * Even if netif_tx_stop_queue() will be called
- * driver will send current packet to ensure
- * that at least one completion will be issued after
- * stopping the queue
- */
- if (unlikely((int)(ring->prod - ring->cons) > ring->full_size)) {
- /* every full Tx ring stops queue */
- if (ring->blocked == 0)
- atomic_add_int(&priv->blocked, 1);
- /* Set HW-queue-is-full flag */
- atomic_set_int(&dev->if_drv_flags, IFF_DRV_OACTIVE);
+ /* check if TX ring is full */
+ if (unlikely(mlx4_en_tx_ring_is_full(ring))) {
+ /* every full native Tx ring stops queue */
+ if (ring->blocked == 0)
+ atomic_add_int(&priv->blocked, 1);
+ /* Set HW-queue-is-full flag */
+ atomic_set_int(&ifp->if_drv_flags, IFF_DRV_OACTIVE);
+ priv->port_stats.queue_stopped++;
ring->blocked = 1;
priv->port_stats.queue_stopped++;
ring->queue_stopped++;
/* Use interrupts to find out when queue opened */
- cq = priv->tx_cq[tx_ind];
- mlx4_en_arm_cq(priv, cq);
- return EBUSY;
+ mlx4_en_arm_cq(priv, priv->tx_cq[tx_ind]);
+ return (ENOBUFS);
}
+ /* sanity check we are not wrapping around */
+ KASSERT(((~ring->prod) & ring->size_mask) >=
+ (MLX4_EN_TX_WQE_MAX_WQEBBS - 1), ("Wrapping around TX ring"));
+
/* Track current inflight packets for performance analysis */
AVG_PERF_COUNTER(priv->pstats.inflight_avg,
(u32) (ring->prod - ring->cons - 1));
- /* Packet is good - grab an index and transmit it */
+ /* Track current mbuf packet header length */
+ AVG_PERF_COUNTER(priv->pstats.tx_pktsz_avg, mb->m_pkthdr.len);
+
+ /* Grab an index and try to transmit packet */
+ owner_bit = (ring->prod & ring->size) ?
+ cpu_to_be32(MLX4_EN_BIT_DESC_OWN) : 0;
index = ring->prod & ring->size_mask;
- bf_index = ring->prod;
+ tx_desc = (volatile struct mlx4_en_tx_desc *)
+ (ring->buf + index * TXBB_SIZE);
+ tx_info = &ring->tx_info[index];
+ dseg = &tx_desc->data;
- /* See if we have enough space for whole descriptor TXBB for setting
- * SW ownership on next descriptor; if not, use a bounce buffer. */
- if (likely(index + nr_txbb <= ring_size))
- tx_desc = ring->buf + index * TXBB_SIZE;
- else {
- tx_desc = (struct mlx4_en_tx_desc *) ring->bounce_buf;
- bounce = true;
+ /* send a copy of the frame to the BPF listener, if any */
+ if (ifp != NULL && ifp->if_bpf != NULL)
+ ETHER_BPF_MTAP(ifp, mb);
+
+ /* get default flags */
+ tx_desc->ctrl.srcrb_flags = CTRL_FLAGS;
+
+ if (mb->m_pkthdr.csum_flags & (CSUM_IP | CSUM_TSO))
+ tx_desc->ctrl.srcrb_flags |= cpu_to_be32(MLX4_WQE_CTRL_IP_CSUM);
+
+ if (mb->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP |
+ CSUM_UDP_IPV6 | CSUM_TCP_IPV6 | CSUM_TSO))
+ tx_desc->ctrl.srcrb_flags |= cpu_to_be32(MLX4_WQE_CTRL_TCP_UDP_CSUM);
+
+ /* do statistics */
+ if (likely(tx_desc->ctrl.srcrb_flags != CTRL_FLAGS)) {
+ priv->port_stats.tx_chksum_offload++;
+ ring->tx_csum++;
}
- /* Save mb in tx_info ring */
- tx_info = &ring->tx_info[index];
- tx_info->mb = mb;
- tx_info->nr_txbb = nr_txbb;
- tx_info->nr_segs = nr_segs;
-
- if (lso_header_size) {
- memcpy(tx_desc->lso.header, mb->m_data, lso_header_size);
- data = ((void *)&tx_desc->lso + ALIGN(lso_header_size + 4,
- DS_SIZE));
- /* lso header is part of m_data.
- * need to omit when mapping DMA */
- mb->m_data += lso_header_size;
- mb->m_len -= lso_header_size;
+ /* check for VLAN tag */
+ if (mb->m_flags & M_VLANTAG) {
+ tx_desc->ctrl.vlan_tag = cpu_to_be16(mb->m_pkthdr.ether_vtag);
+ tx_desc->ctrl.ins_vlan = MLX4_WQE_CTRL_INS_VLAN;
+ } else {
+ tx_desc->ctrl.vlan_tag = 0;
+ tx_desc->ctrl.ins_vlan = 0;
}
- else
- data = &tx_desc->data;
- /* valid only for none inline segments */
- tx_info->data_offset = (void *)data - (void *)tx_desc;
+ /* clear immediate field */
+ tx_desc->ctrl.imm = 0;
- if (inl) {
- tx_info->inl = 1;
+ /* Handle LSO (TSO) packets */
+ if (mb->m_pkthdr.csum_flags & CSUM_TSO) {
+ u32 payload_len;
+ u32 mss = mb->m_pkthdr.tso_segsz;
+ u32 num_pkts;
+
+ opcode = cpu_to_be32(MLX4_OPCODE_LSO | MLX4_WQE_CTRL_RR) |
+ owner_bit;
+ ihs = mlx4_en_get_header_size(mb);
+ if (unlikely(ihs > MAX_INLINE)) {
+ ring->oversized_packets++;
+ err = EINVAL;
+ goto tx_drop;
+ }
+ tx_desc->lso.mss_hdr_size = cpu_to_be32((mss << 16) | ihs);
+ payload_len = mb->m_pkthdr.len - ihs;
+ if (unlikely(payload_len == 0))
+ num_pkts = 1;
+ else
+ num_pkts = DIV_ROUND_UP(payload_len, mss);
+ ring->bytes += payload_len + (num_pkts * ihs);
+ ring->packets += num_pkts;
+ priv->port_stats.tso_packets++;
+ /* store pointer to inline header */
+ dseg_inline = dseg;
+ /* copy data inline */
+ dseg = mlx4_en_store_inline_lso_data(dseg,
+ mb, ihs, owner_bit);
} else {
- for (i = 0, m = mb; i < nr_segs; i++, m = m->m_next) {
- if (m->m_len == 0) {
- i--;
- continue;
- }
- dma = pci_map_single(mdev->dev->pdev, m->m_data,
- m->m_len, PCI_DMA_TODEVICE);
- data->addr = cpu_to_be64(dma);
- data->lkey = cpu_to_be32(mdev->mr.key);
- wmb();
- data->byte_count = cpu_to_be32(m->m_len);
- data++;
- }
- if (lso_header_size) {
- mb->m_data -= lso_header_size;
- mb->m_len += lso_header_size;
- }
- tx_info->inl = 0;
+ opcode = cpu_to_be32(MLX4_OPCODE_SEND) |
+ owner_bit;
+ ihs = mlx4_en_get_inline_hdr_size(ring, mb);
+ ring->bytes += max_t (unsigned int,
+ mb->m_pkthdr.len, ETHER_MIN_LEN - ETHER_CRC_LEN);
+ ring->packets++;
+ /* store pointer to inline header */
+ dseg_inline = dseg;
+ /* copy data inline */
+ dseg = mlx4_en_store_inline_data(dseg,
+ mb, ihs, owner_bit);
+ }
+ m_adj(mb, ihs);
+
+ /* trim off empty mbufs */
+ while (mb->m_len == 0) {
+ mb = m_free(mb);
+ /* check if all data has been inlined */
+ if (mb == NULL) {
+ nr_segs = 0;
+ goto skip_dma;
+ }
}
+ err = bus_dmamap_load_mbuf_sg(ring->dma_tag, tx_info->dma_map,
+ mb, segs, &nr_segs, BUS_DMA_NOWAIT);
+ if (unlikely(err == EFBIG)) {
+ /* Too many mbuf fragments */
+ m = m_defrag(mb, M_NOWAIT);
+ if (m == NULL) {
+ ring->oversized_packets++;
+ goto tx_drop;
+ }
+ mb = m;
+ /* Try again */
+ err = bus_dmamap_load_mbuf_sg(ring->dma_tag, tx_info->dma_map,
+ mb, segs, &nr_segs, BUS_DMA_NOWAIT);
+ }
+ /* catch errors */
+ if (unlikely(err != 0)) {
+ ring->oversized_packets++;
+ goto tx_drop;
+ }
+ /* make sure all mbuf data is written to RAM */
+ bus_dmamap_sync(ring->dma_tag, tx_info->dma_map,
+ BUS_DMASYNC_PREWRITE);
- /* Prepare ctrl segement apart opcode+ownership, which depends on
- * whether LSO is used */
- tx_desc->ctrl.vlan_tag = cpu_to_be16(vlan_tag);
- tx_desc->ctrl.ins_vlan = MLX4_WQE_CTRL_INS_VLAN *
- !!vlan_tag;
- tx_desc->ctrl.fence_size = (real_size / 16) & 0x3f;
- tx_desc->ctrl.srcrb_flags = priv->ctrl_flags;
- if (mb->m_pkthdr.csum_flags & (CSUM_IP | CSUM_TSO |
- CSUM_TCP | CSUM_UDP | CSUM_TCP_IPV6 | CSUM_UDP_IPV6)) {
- if (mb->m_pkthdr.csum_flags & (CSUM_IP | CSUM_TSO))
- tx_desc->ctrl.srcrb_flags |= cpu_to_be32(MLX4_WQE_CTRL_IP_CSUM);
- if (mb->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP |
- CSUM_UDP_IPV6 | CSUM_TCP_IPV6 | CSUM_TSO))
- tx_desc->ctrl.srcrb_flags |= cpu_to_be32(MLX4_WQE_CTRL_TCP_UDP_CSUM);
- priv->port_stats.tx_chksum_offload++;
- ring->tx_csum++;
- }
+skip_dma:
+ /* compute number of DS needed */
+ ds_cnt = (dseg - ((volatile struct mlx4_wqe_data_seg *)tx_desc)) + nr_segs;
+
+ /*
+ * Check if the next request can wrap around and fill the end
+ * of the current request with zero immediate data:
+ */
+ pad = DIV_ROUND_UP(ds_cnt, DS_FACT);
+ pad = (~(ring->prod + pad)) & ring->size_mask;
+
+ if (unlikely(pad < (MLX4_EN_TX_WQE_MAX_WQEBBS - 1))) {
+ /*
+ * Compute the least number of DS blocks we need to
+ * pad in order to achieve a TX ring wraparound:
+ */
+ pad = (DS_FACT * (pad + 1));
+ } else {
+ /*
+ * The hardware will automatically jump to the next
+ * TXBB. No need for padding.
+ */
+ pad = 0;
+ }
+
+ /* compute total number of DS blocks */
+ ds_cnt += pad;
+ /*
+ * When modifying this code, please ensure that the following
+ * computation is always less than or equal to 0x3F:
+ *
+ * ((MLX4_EN_TX_WQE_MAX_WQEBBS - 1) * DS_FACT) +
+ * (MLX4_EN_TX_WQE_MAX_WQEBBS * DS_FACT)
+ *
+ * Else the "ds_cnt" variable can become too big.
+ */
+ tx_desc->ctrl.fence_size = (ds_cnt & 0x3f);
+
+ /* store pointer to mbuf */
+ tx_info->mb = mb;
+ tx_info->nr_txbb = DIV_ROUND_UP(ds_cnt, DS_FACT);
+ bf_size = ds_cnt * DS_SIZE_ALIGNMENT;
+ bf_prod = ring->prod;
+
+ /* compute end of "dseg" array */
+ dseg += nr_segs + pad;
+
+ /* pad using zero immediate dseg */
+ while (pad--) {
+ dseg--;
+ dseg->addr = 0;
+ dseg->lkey = 0;
+ wmb();
+ dseg->byte_count = SET_BYTE_COUNT((1 << 31)|0);
+ }
+
+ /* fill segment list */
+ while (nr_segs--) {
+ if (unlikely(segs[nr_segs].ds_len == 0)) {
+ dseg--;
+ dseg->addr = 0;
+ dseg->lkey = 0;
+ wmb();
+ dseg->byte_count = SET_BYTE_COUNT((1 << 31)|0);
+ } else {
+ dseg--;
+ dseg->addr = cpu_to_be64((uint64_t)segs[nr_segs].ds_addr);
+ dseg->lkey = cpu_to_be32(priv->mdev->mr.key);
+ wmb();
+ dseg->byte_count = SET_BYTE_COUNT((uint32_t)segs[nr_segs].ds_len);
+ }
+ }
+
+ wmb();
+
+ /* write owner bits in reverse order */
+ if ((opcode & cpu_to_be32(0x1F)) == cpu_to_be32(MLX4_OPCODE_LSO))
+ mlx4_en_store_inline_lso_header(dseg_inline, ihs, owner_bit);
+ else
+ mlx4_en_store_inline_header(dseg_inline, ihs, owner_bit);
if (unlikely(priv->validate_loopback)) {
/* Copy dst mac address to wqe */
@@ -925,77 +937,46 @@ retry:
}
}
- /* Handle LSO (TSO) packets */
- if (lso_header_size) {
- int segsz;
- /* Mark opcode as LSO */
- op_own = cpu_to_be32(MLX4_OPCODE_LSO | (1 << 6)) |
- ((ring->prod & ring_size) ?
- cpu_to_be32(MLX4_EN_BIT_DESC_OWN) : 0);
-
- /* Fill in the LSO prefix */
- tx_desc->lso.mss_hdr_size = cpu_to_be32(
- mb->m_pkthdr.tso_segsz << 16 | lso_header_size);
-
- priv->port_stats.tso_packets++;
- segsz = mb->m_pkthdr.tso_segsz;
- i = ((mb->m_pkthdr.len - lso_header_size + segsz - 1) / segsz);
- tx_info->nr_bytes= mb->m_pkthdr.len + (i - 1) * lso_header_size;
- ring->packets += i;
- } else {
- /* Normal (Non LSO) packet */
- op_own = cpu_to_be32(MLX4_OPCODE_SEND) |
- ((ring->prod & ring_size) ?
- cpu_to_be32(MLX4_EN_BIT_DESC_OWN) : 0);
- tx_info->nr_bytes = max(mb->m_pkthdr.len,
- (unsigned int)ETHER_MIN_LEN - ETHER_CRC_LEN);
- ring->packets++;
-
- }
- ring->bytes += tx_info->nr_bytes;
- AVG_PERF_COUNTER(priv->pstats.tx_pktsz_avg, mb->m_pkthdr.len);
+ /* update producer counter */
+ ring->prod += tx_info->nr_txbb;
- if (tx_info->inl) {
- build_inline_wqe(tx_desc, mb, real_size, &vlan_tag, tx_ind);
- tx_info->inl = 1;
- }
+ if (ring->bf_enabled && bf_size <= MAX_BF &&
+ (tx_desc->ctrl.ins_vlan != MLX4_WQE_CTRL_INS_VLAN)) {
- ring->prod += nr_txbb;
+ /* store doorbell number */
+ *(volatile __be32 *) (&tx_desc->ctrl.vlan_tag) |= cpu_to_be32(ring->doorbell_qpn);
+ /* or in producer number for this WQE */
+ opcode |= cpu_to_be32((bf_prod & 0xffff) << 8);
- /* If we used a bounce buffer then copy descriptor back into place */
- if (unlikely(bounce))
- tx_desc = mlx4_en_bounce_to_desc(priv, ring, index, desc_size);
- if (ring->bf_enabled && desc_size <= MAX_BF && !bounce && !vlan_tag) {
- *(__be32 *) (&tx_desc->ctrl.vlan_tag) |= cpu_to_be32(ring->doorbell_qpn);
- op_own |= htonl((bf_index & 0xffff) << 8);
- /* Ensure new descirptor hits memory
- * before setting ownership of this descriptor to HW */
+ /*
+ * Ensure the new descriptor hits memory before
+ * setting ownership of this descriptor to HW:
+ */
wmb();
- tx_desc->ctrl.owner_opcode = op_own;
-
+ tx_desc->ctrl.owner_opcode = opcode;
wmb();
-
- mlx4_bf_copy(ring->bf.reg + ring->bf.offset, (unsigned long *) &tx_desc->ctrl,
- desc_size);
-
+ mlx4_bf_copy(((u8 *)ring->bf.reg) + ring->bf.offset,
+ (volatile unsigned long *) &tx_desc->ctrl, bf_size);
wmb();
-
ring->bf.offset ^= ring->bf.buf_size;
} else {
- /* Ensure new descirptor hits memory
- * before setting ownership of this descriptor to HW */
+ /*
+ * Ensure the new descriptor hits memory before
+ * setting ownership of this descriptor to HW:
+ */
wmb();
- tx_desc->ctrl.owner_opcode = op_own;
+ tx_desc->ctrl.owner_opcode = opcode;
wmb();
- writel(cpu_to_be32(ring->doorbell_qpn), ring->bf.uar->map + MLX4_SEND_DOORBELL);
+ writel(cpu_to_be32(ring->doorbell_qpn),
+ ((u8 *)ring->bf.uar->map) + MLX4_SEND_DOORBELL);
}
- return 0;
+ return (0);
tx_drop:
*mbp = NULL;
m_freem(mb);
- return EINVAL;
+ return (err);
}
static int
@@ -1015,13 +996,16 @@ mlx4_en_transmit_locked(struct ifnet *dev, int tx_ind, struct mbuf *m)
}
enqueued = 0;
- if (m != NULL) {
- if ((err = drbr_enqueue(dev, ring->br, m)) != 0)
- return (err);
- }
+ if (m != NULL)
+ /*
+ * If we can't insert mbuf into drbr, try to xmit anyway.
+ * We keep the error we got so we could return that after xmit.
+ */
+ err = drbr_enqueue(dev, ring->br, m);
+
/* Process the queue */
while ((next = drbr_peek(dev, ring->br)) != NULL) {
- if ((err = mlx4_en_xmit(dev, tx_ind, &next)) != 0) {
+ if (mlx4_en_xmit(priv, tx_ind, &next) != 0) {
if (next == NULL) {
drbr_advance(dev, ring->br);
} else {
@@ -1072,10 +1056,14 @@ mlx4_en_transmit(struct ifnet *dev, struct mbuf *m)
int i, err = 0;
/* Compute which queue to use */
- i = mlx4_en_select_queue(dev, m);
+ if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) {
+ i = m->m_pkthdr.flowid % priv->tx_ring_num;
+ }
+ else {
+ i = mlx4_en_select_queue(dev, m);
+ }
ring = priv->tx_ring[i];
-
if (spin_trylock(&ring->tx_lock)) {
err = mlx4_en_transmit_locked(dev, i, m);
spin_unlock(&ring->tx_lock);
diff --git a/sys/ofed/drivers/net/mlx4/main.c b/sys/ofed/drivers/net/mlx4/main.c
index ddcd7d1..217220a 100644
--- a/sys/ofed/drivers/net/mlx4/main.c
+++ b/sys/ofed/drivers/net/mlx4/main.c
@@ -33,7 +33,7 @@
* SOFTWARE.
*/
-#include <linux/kmod.h>
+#include <linux/kmod.h>
/*
* kmod.h must be included before module.h since it includes (indirectly) sys/module.h
* To use the FBSD macro sys/module.h should define MODULE_VERSION before linux/module does.
@@ -57,9 +57,7 @@
#include "icm.h"
#include "mlx4_stats.h"
-MODULE_AUTHOR("Roland Dreier");
-MODULE_DESCRIPTION("Mellanox ConnectX HCA low-level driver");
-MODULE_LICENSE("Dual BSD/GPL");
+/* Mellanox ConnectX HCA low-level driver */
struct workqueue_struct *mlx4_wq;
@@ -177,7 +175,7 @@ MODULE_PARM_DESC(enable_64b_cqe_eqe,
#define PF_CONTEXT_BEHAVIOUR_MASK MLX4_FUNC_CAP_64B_EQE_CQE
static char mlx4_version[] __devinitdata =
- DRV_NAME ": Mellanox ConnectX core driver v"
+ DRV_NAME ": Mellanox ConnectX VPI driver v"
DRV_VERSION " (" DRV_RELDATE ")\n";
static int log_num_mac = 7;
@@ -608,7 +606,7 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
if (dev_cap->min_page_sz > PAGE_SIZE) {
mlx4_err(dev, "HCA minimum page size of %d bigger than "
"kernel PAGE_SIZE of %d, aborting.\n",
- dev_cap->min_page_sz, PAGE_SIZE);
+ dev_cap->min_page_sz, (int)PAGE_SIZE);
return -ENODEV;
}
if (dev_cap->num_ports > MLX4_MAX_PORTS) {
@@ -979,7 +977,7 @@ static int mlx4_slave_cap(struct mlx4_dev *dev)
if (page_size > PAGE_SIZE) {
mlx4_err(dev, "HCA minimum page size of %d bigger than "
"kernel PAGE_SIZE of %d, aborting.\n",
- page_size, PAGE_SIZE);
+ page_size, (int)PAGE_SIZE);
return -ENODEV;
}
@@ -989,7 +987,7 @@ static int mlx4_slave_cap(struct mlx4_dev *dev)
/* TODO: relax this assumption */
if (dev->caps.uar_page_size != PAGE_SIZE) {
mlx4_err(dev, "UAR size:%d != kernel PAGE_SIZE of %d\n",
- dev->caps.uar_page_size, PAGE_SIZE);
+ dev->caps.uar_page_size, (int)PAGE_SIZE);
return -ENODEV;
}
@@ -1299,6 +1297,43 @@ static inline int ibta_mtu_to_int(enum ibta_mtu mtu)
}
}
+static ssize_t
+show_board(struct device *device, struct device_attribute *attr,
+ char *buf)
+{
+ struct mlx4_hca_info *info = container_of(attr, struct mlx4_hca_info,
+ board_attr);
+ struct mlx4_dev *mdev = info->dev;
+
+ return sprintf(buf, "%.*s\n", MLX4_BOARD_ID_LEN,
+ mdev->board_id);
+}
+
+static ssize_t
+show_hca(struct device *device, struct device_attribute *attr,
+ char *buf)
+{
+ struct mlx4_hca_info *info = container_of(attr, struct mlx4_hca_info,
+ hca_attr);
+ struct mlx4_dev *mdev = info->dev;
+
+ return sprintf(buf, "MT%d\n", mdev->pdev->device);
+}
+
+static ssize_t
+show_firmware_version(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct mlx4_hca_info *info = container_of(attr, struct mlx4_hca_info,
+ firmware_attr);
+ struct mlx4_dev *mdev = info->dev;
+
+ return sprintf(buf, "%d.%d.%d\n", (int)(mdev->caps.fw_ver >> 32),
+ (int)(mdev->caps.fw_ver >> 16) & 0xffff,
+ (int)mdev->caps.fw_ver & 0xffff);
+}
+
static ssize_t show_port_ib_mtu(struct device *dev,
struct device_attribute *attr,
char *buf)
@@ -2941,6 +2976,30 @@ no_msi:
no_irq:
dev->caps.num_comp_vectors = 0;
dev->caps.comp_pool = 0;
+ return;
+}
+
+static void
+mlx4_init_hca_info(struct mlx4_dev *dev)
+{
+ struct mlx4_hca_info *info = &mlx4_priv(dev)->hca_info;
+
+ info->dev = dev;
+
+ info->firmware_attr = (struct device_attribute)__ATTR(fw_ver, S_IRUGO,
+ show_firmware_version, NULL);
+ if (device_create_file(&dev->pdev->dev, &info->firmware_attr))
+ mlx4_err(dev, "Failed to add file firmware version");
+
+ info->hca_attr = (struct device_attribute)__ATTR(hca, S_IRUGO, show_hca,
+ NULL);
+ if (device_create_file(&dev->pdev->dev, &info->hca_attr))
+ mlx4_err(dev, "Failed to add file hca type");
+
+ info->board_attr = (struct device_attribute)__ATTR(board_id, S_IRUGO,
+ show_board, NULL);
+ if (device_create_file(&dev->pdev->dev, &info->board_attr))
+ mlx4_err(dev, "Failed to add file board id type");
}
static int mlx4_init_port_info(struct mlx4_dev *dev, int port)
@@ -2994,6 +3053,14 @@ static int mlx4_init_port_info(struct mlx4_dev *dev, int port)
return err;
}
+static void
+mlx4_cleanup_hca_info(struct mlx4_hca_info *info)
+{
+ device_remove_file(&info->dev->pdev->dev, &info->firmware_attr);
+ device_remove_file(&info->dev->pdev->dev, &info->board_attr);
+ device_remove_file(&info->dev->pdev->dev, &info->hca_attr);
+}
+
static void mlx4_cleanup_port_info(struct mlx4_port_info *info)
{
if (info->port < 0)
@@ -3351,6 +3418,7 @@ slave_start:
goto err_steer;
mlx4_init_quotas(dev);
+ mlx4_init_hca_info(dev);
for (port = 1; port <= dev->caps.num_ports; port++) {
err = mlx4_init_port_info(dev, port);
@@ -3443,8 +3511,7 @@ err_disable_pdev:
static int __devinit mlx4_init_one(struct pci_dev *pdev,
const struct pci_device_id *id)
{
- printk_once(KERN_INFO "%s", mlx4_version);
-
+ device_set_desc(pdev->dev.bsddev, mlx4_version);
return __mlx4_init_one(pdev, id->driver_data);
}
@@ -3464,6 +3531,7 @@ static void mlx4_remove_one(struct pci_dev *pdev)
mlx4_stop_sense(dev);
mlx4_unregister_device(dev);
+ mlx4_cleanup_hca_info(&priv->hca_info);
for (p = 1; p <= dev->caps.num_ports; p++) {
mlx4_cleanup_port_info(&priv->port[p]);
mlx4_CLOSE_PORT(dev, p);
diff --git a/sys/ofed/drivers/net/mlx4/mlx4.h b/sys/ofed/drivers/net/mlx4/mlx4.h
index 47fde25..bef2e00 100644
--- a/sys/ofed/drivers/net/mlx4/mlx4.h
+++ b/sys/ofed/drivers/net/mlx4/mlx4.h
@@ -51,7 +51,7 @@
#define DRV_NAME "mlx4_core"
#define PFX DRV_NAME ": "
-#define DRV_VERSION "2.1"
+#define DRV_VERSION "2.1.6"
#define DRV_RELDATE __DATE__
#define DRV_STACK_NAME "Linux-MLNX_OFED"
@@ -755,6 +755,13 @@ struct mlx4_set_port_rqp_calc_context {
__be32 mcast;
};
+struct mlx4_hca_info {
+ struct mlx4_dev *dev;
+ struct device_attribute firmware_attr;
+ struct device_attribute hca_attr;
+ struct device_attribute board_attr;
+};
+
struct mlx4_port_info {
struct mlx4_dev *dev;
int port;
@@ -845,6 +852,7 @@ struct mlx4_priv {
struct mlx4_uar driver_uar;
void __iomem *kar;
struct mlx4_port_info port[MLX4_MAX_PORTS + 1];
+ struct mlx4_hca_info hca_info;
struct mlx4_sense sense;
struct mutex port_mutex;
struct mlx4_msix_ctl msix_ctl;
diff --git a/sys/ofed/drivers/net/mlx4/mlx4_en.h b/sys/ofed/drivers/net/mlx4/mlx4_en.h
index b96563d..c776b0e 100644
--- a/sys/ofed/drivers/net/mlx4/mlx4_en.h
+++ b/sys/ofed/drivers/net/mlx4/mlx4_en.h
@@ -59,8 +59,6 @@
#include "mlx4_stats.h"
#define DRV_NAME "mlx4_en"
-#define DRV_VERSION "2.1"
-#define DRV_RELDATE __DATE__
#define MLX4_EN_MSG_LEVEL (NETIF_MSG_LINK | NETIF_MSG_IFDOWN)
@@ -95,10 +93,6 @@
#define VLAN_MIN_VALUE 1
#define VLAN_MAX_VALUE 4094
-/* Typical TSO descriptor with 16 gather entries is 352 bytes... */
-#define MAX_DESC_SIZE 512
-#define MAX_DESC_TXBBS (MAX_DESC_SIZE / TXBB_SIZE)
-
/*
* OS related constants and tunables
*/
@@ -113,26 +107,6 @@ enum mlx4_en_alloc_type {
MLX4_EN_ALLOC_REPLACEMENT = 1,
};
-/* Receive fragment sizes; we use at most 3 fragments (for 9600 byte MTU
- * and 4K allocations) */
-#if MJUMPAGESIZE == 4096
-enum {
- FRAG_SZ0 = MCLBYTES,
- FRAG_SZ1 = MJUMPAGESIZE,
- FRAG_SZ2 = MJUMPAGESIZE,
-};
-#define MLX4_EN_MAX_RX_FRAGS 3
-#elif MJUMPAGESIZE == 8192
-enum {
- FRAG_SZ0 = MCLBYTES,
- FRAG_SZ1 = MJUMPAGESIZE,
-};
-#define MLX4_EN_MAX_RX_FRAGS 2
-#elif MJUMPAGESIZE == 8192
-#else
-#error "Unknown PAGE_SIZE"
-#endif
-
/* Maximum ring sizes */
#define MLX4_EN_DEF_TX_QUEUE_SIZE 4096
@@ -154,7 +128,7 @@ enum {
#define MLX4_EN_NUM_UP 1
#define MAX_TX_RINGS (MLX4_EN_MAX_TX_RING_P_UP * \
- MLX4_EN_NUM_UP)
+ MLX4_EN_NUM_UP)
#define MLX4_EN_DEF_TX_RING_SIZE 1024
#define MLX4_EN_DEF_RX_RING_SIZE 1024
@@ -235,16 +209,10 @@ enum cq_type {
#define ILLEGAL_MAC(addr) (addr == 0xffffffffffffULL || addr == 0x0)
struct mlx4_en_tx_info {
+ bus_dmamap_t dma_map;
struct mbuf *mb;
u32 nr_txbb;
u32 nr_bytes;
- u8 linear;
- u8 nr_segs;
- u8 data_offset;
- u8 inl;
-#if 0
- u8 ts_requested;
-#endif
};
@@ -265,14 +233,22 @@ struct mlx4_en_tx_desc {
#define MLX4_EN_USE_SRQ 0x01000000
-#define MLX4_EN_TX_BUDGET 64*4 //Compensate for no NAPI in freeBSD - might need some fine tunning in the future.
#define MLX4_EN_RX_BUDGET 64
+#define MLX4_EN_TX_MAX_DESC_SIZE 512 /* bytes */
+#define MLX4_EN_TX_MAX_MBUF_SIZE 65536 /* bytes */
+#define MLX4_EN_TX_MAX_PAYLOAD_SIZE 65536 /* bytes */
+#define MLX4_EN_TX_MAX_MBUF_FRAGS \
+ ((MLX4_EN_TX_MAX_DESC_SIZE - 128) / DS_SIZE_ALIGNMENT) /* units */
+#define MLX4_EN_TX_WQE_MAX_WQEBBS \
+ (MLX4_EN_TX_MAX_DESC_SIZE / TXBB_SIZE) /* units */
+
#define MLX4_EN_CX3_LOW_ID 0x1000
#define MLX4_EN_CX3_HIGH_ID 0x1005
struct mlx4_en_tx_ring {
spinlock_t tx_lock;
+ bus_dma_tag_t dma_tag;
struct mlx4_hwq_resources wqres;
u32 size ; /* number of TXBBs */
u32 size_mask;
@@ -282,11 +258,10 @@ struct mlx4_en_tx_ring {
u32 cons;
u32 buf_size;
u32 doorbell_qpn;
- void *buf;
+ u8 *buf;
u16 poll_cnt;
int blocked;
struct mlx4_en_tx_info *tx_info;
- u8 *bounce_buf;
u8 queue_index;
cpuset_t affinity_mask;
struct buf_ring *br;
@@ -300,13 +275,12 @@ struct mlx4_en_tx_ring {
unsigned long packets;
unsigned long tx_csum;
unsigned long queue_stopped;
+ unsigned long oversized_packets;
unsigned long wake_queue;
struct mlx4_bf bf;
bool bf_enabled;
- struct netdev_queue *tx_queue;
int hwtstamp_tx_type;
spinlock_t comp_lock;
- int full_size;
int inline_thold;
u64 watchdog_time;
};
@@ -316,14 +290,21 @@ struct mlx4_en_rx_desc {
struct mlx4_wqe_data_seg data[0];
};
-struct mlx4_en_rx_buf {
- dma_addr_t dma;
- struct page *page;
- unsigned int page_offset;
+struct mlx4_en_rx_mbuf {
+ bus_dmamap_t dma_map;
+ struct mbuf *mbuf;
+};
+
+struct mlx4_en_rx_spare {
+ bus_dmamap_t dma_map;
+ struct mbuf *mbuf;
+ u64 paddr_be;
};
struct mlx4_en_rx_ring {
struct mlx4_hwq_resources wqres;
+ bus_dma_tag_t dma_tag;
+ struct mlx4_en_rx_spare spare;
u32 size ; /* number of Rx descs*/
u32 actual_size;
u32 size_mask;
@@ -339,8 +320,8 @@ struct mlx4_en_rx_ring {
u32 rx_buf_size;
u32 rx_mb_size;
int qpn;
- void *buf;
- void *rx_info;
+ u8 *buf;
+ struct mlx4_en_rx_mbuf *mbuf;
unsigned long errors;
unsigned long bytes;
unsigned long packets;
@@ -400,6 +381,7 @@ struct mlx4_en_cq {
#define MLX4_EN_OPCODE_ERROR 0x1e
u32 tot_rx;
u32 tot_tx;
+ u32 curr_poll_rx_cpu_id;
#ifdef CONFIG_NET_RX_BUSY_POLL
unsigned int state;
@@ -524,12 +506,6 @@ struct en_port {
u8 vport_num;
};
-struct mlx4_en_frag_info {
- u16 frag_size;
- u16 frag_prefix_size;
-};
-
-
struct mlx4_en_priv {
struct mlx4_en_dev *mdev;
struct mlx4_en_port_profile *prof;
@@ -575,18 +551,14 @@ struct mlx4_en_priv {
int cqe_factor;
struct mlx4_en_rss_map rss_map;
- __be32 ctrl_flags;
u32 flags;
u8 num_tx_rings_p_up;
u32 tx_ring_num;
u32 rx_ring_num;
u32 rx_mb_size;
- struct mlx4_en_frag_info frag_info[MLX4_EN_MAX_RX_FRAGS];
u16 rx_alloc_order;
u32 rx_alloc_size;
u32 rx_buf_size;
- u16 num_frags;
- u16 log_rx_info;
struct mlx4_en_tx_ring **tx_ring;
struct mlx4_en_rx_ring *rx_ring[MAX_RX_RINGS];
@@ -640,7 +612,6 @@ struct mlx4_en_priv {
unsigned long last_ifq_jiffies;
u64 if_counters_rx_errors;
u64 if_counters_rx_no_buffer;
-
};
enum mlx4_en_wol {
diff --git a/sys/ofed/drivers/net/mlx4/mlx4_stats.h b/sys/ofed/drivers/net/mlx4/mlx4_stats.h
index 0270cef..ef0b1c0 100644
--- a/sys/ofed/drivers/net/mlx4/mlx4_stats.h
+++ b/sys/ofed/drivers/net/mlx4/mlx4_stats.h
@@ -124,6 +124,7 @@ struct mlx4_en_port_stats {
unsigned long queue_stopped;
unsigned long wake_queue;
unsigned long tx_timeout;
+ unsigned long oversized_packets;
unsigned long rx_alloc_failed;
unsigned long rx_chksum_good;
unsigned long rx_chksum_none;
diff --git a/sys/ofed/drivers/net/mlx4/port.c b/sys/ofed/drivers/net/mlx4/port.c
index 4d7aa0b..9ef63f7 100644
--- a/sys/ofed/drivers/net/mlx4/port.c
+++ b/sys/ofed/drivers/net/mlx4/port.c
@@ -1145,12 +1145,17 @@ int mlx4_get_module_info(struct mlx4_dev *dev, u8 port, u16 offset,
size = MODULE_INFO_MAX_READ;
inbox = mlx4_alloc_cmd_mailbox(dev);
- if (IS_ERR(inbox))
+ if (IS_ERR(inbox)) {
+ mlx4_err(dev,
+ "mlx4_alloc_cmd_mailbox returned with error(%lx)", PTR_ERR(inbox));
return PTR_ERR(inbox);
+ }
outbox = mlx4_alloc_cmd_mailbox(dev);
if (IS_ERR(outbox)) {
mlx4_free_cmd_mailbox(dev, inbox);
+ mlx4_err(dev,
+ "mlx4_alloc_cmd_mailbox returned with error(%lx)", PTR_ERR(outbox));
return PTR_ERR(outbox);
}
diff --git a/sys/ofed/include/linux/compat.h b/sys/ofed/include/linux/compat.h
index a8929f3..6d10067 100644
--- a/sys/ofed/include/linux/compat.h
+++ b/sys/ofed/include/linux/compat.h
@@ -30,8 +30,4 @@
#ifndef _LINUX_COMPAT_H_
#define _LINUX_COMPAT_H_
-#define is_multicast_ether_addr(x) 0
-#define is_broadcast_ether_addr(x) 0
-
-
#endif /* _LINUX_COMPAT_H_ */
diff --git a/sys/ofed/include/linux/etherdevice.h b/sys/ofed/include/linux/etherdevice.h
index d863651..c50dc5d 100644
--- a/sys/ofed/include/linux/etherdevice.h
+++ b/sys/ofed/include/linux/etherdevice.h
@@ -37,6 +37,25 @@
#include <linux/types.h>
+#define ETH_MODULE_SFF_8079 1
+#define ETH_MODULE_SFF_8079_LEN 256
+#define ETH_MODULE_SFF_8472 2
+#define ETH_MODULE_SFF_8472_LEN 512
+#define ETH_MODULE_SFF_8636 3
+#define ETH_MODULE_SFF_8636_LEN 256
+#define ETH_MODULE_SFF_8436 4
+#define ETH_MODULE_SFF_8436_LEN 256
+
+struct ethtool_eeprom {
+ u32 offset;
+ u32 len;
+};
+
+struct ethtool_modinfo {
+ u32 type;
+ u32 eeprom_len;
+};
+
/**
* is_zero_ether_addr - Determine if give Ethernet address is all zeros.
* @addr: Pointer to a six-byte array containing the Ethernet address
diff --git a/sys/ofed/include/linux/interrupt.h b/sys/ofed/include/linux/interrupt.h
index d97d6a9..4c4b008 100644
--- a/sys/ofed/include/linux/interrupt.h
+++ b/sys/ofed/include/linux/interrupt.h
@@ -2,7 +2,7 @@
* Copyright (c) 2010 Isilon Systems, Inc.
* Copyright (c) 2010 iX Systems, Inc.
* Copyright (c) 2010 Panasas, Inc.
- * Copyright (c) 2013, 2014 Mellanox Technologies, Ltd.
+ * Copyright (c) 2013-2015 Mellanox Technologies, Ltd.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -117,6 +117,23 @@ request_irq(unsigned int irq, irq_handler_t handler, unsigned long flags,
return 0;
}
+static inline int
+bind_irq_to_cpu(unsigned int irq, int cpu_id)
+{
+ struct irq_ent *irqe;
+ struct device *dev;
+
+ dev = _pci_find_irq_dev(irq);
+ if (dev == NULL)
+ return (-ENOENT);
+
+ irqe = _irq_ent(dev, irq);
+ if (irqe == NULL)
+ return (-ENOENT);
+
+ return (-bus_bind_intr(dev->bsddev, irqe->res, cpu_id));
+}
+
static inline void
free_irq(unsigned int irq, void *device)
{
diff --git a/sys/ofed/include/linux/mlx4/cq.h b/sys/ofed/include/linux/mlx4/cq.h
index ed6bec1..b39910a 100644
--- a/sys/ofed/include/linux/mlx4/cq.h
+++ b/sys/ofed/include/linux/mlx4/cq.h
@@ -139,7 +139,7 @@ enum {
};
static inline void mlx4_cq_arm(struct mlx4_cq *cq, u32 cmd,
- void __iomem *uar_page,
+ u8 __iomem *uar_page,
spinlock_t *doorbell_lock)
{
__be32 doorbell[2];
diff --git a/sys/ofed/include/linux/mlx4/device.h b/sys/ofed/include/linux/mlx4/device.h
index 536c421..aca4aef 100644
--- a/sys/ofed/include/linux/mlx4/device.h
+++ b/sys/ofed/include/linux/mlx4/device.h
@@ -413,6 +413,13 @@ enum {
#define MSTR_SM_CHANGE_MASK (MLX4_EQ_PORT_INFO_MSTR_SM_SL_CHANGE_MASK | \
MLX4_EQ_PORT_INFO_MSTR_SM_LID_CHANGE_MASK)
+enum mlx4_module_id {
+ MLX4_MODULE_ID_SFP = 0x3,
+ MLX4_MODULE_ID_QSFP = 0xC,
+ MLX4_MODULE_ID_QSFP_PLUS = 0xD,
+ MLX4_MODULE_ID_QSFP28 = 0x11,
+};
+
static inline u64 mlx4_fw_ver(u64 major, u64 minor, u64 subminor)
{
return (major << 32) | (minor << 16) | subminor;
diff --git a/sys/ofed/include/linux/mlx4/doorbell.h b/sys/ofed/include/linux/mlx4/doorbell.h
index f31bba2..6724e5e 100644
--- a/sys/ofed/include/linux/mlx4/doorbell.h
+++ b/sys/ofed/include/linux/mlx4/doorbell.h
@@ -77,7 +77,7 @@ static inline void mlx4_write64(__be32 val[2], void __iomem *dest,
spin_lock_irqsave(doorbell_lock, flags);
__raw_writel((__force u32) val[0], dest);
- __raw_writel((__force u32) val[1], dest + 4);
+ __raw_writel((__force u32) val[1], (u8 *)dest + 4);
spin_unlock_irqrestore(doorbell_lock, flags);
}
diff --git a/sys/ofed/include/linux/mlx4/qp.h b/sys/ofed/include/linux/mlx4/qp.h
index 9b78167..c8b6cb1 100644
--- a/sys/ofed/include/linux/mlx4/qp.h
+++ b/sys/ofed/include/linux/mlx4/qp.h
@@ -39,6 +39,12 @@
#define MLX4_INVALID_LKEY 0x100
+#define DS_SIZE_ALIGNMENT 16
+
+#define SET_BYTE_COUNT(byte_count) cpu_to_be32(byte_count)
+#define SET_LSO_MSS(mss_hdr_size) cpu_to_be32(mss_hdr_size)
+#define DS_BYTE_COUNT_MASK cpu_to_be32(0x7fffffff)
+
enum ib_m_qp_attr_mask {
IB_M_EXT_CLASS_1 = 1 << 28,
IB_M_EXT_CLASS_2 = 1 << 29,
@@ -266,7 +272,9 @@ enum { /* param3 */
#define MLX4_FW_VER_WQE_CTRL_NEC mlx4_fw_ver(2, 2, 232)
enum {
+ MLX4_WQE_CTRL_OWN = 1 << 31,
MLX4_WQE_CTRL_NEC = 1 << 29,
+ MLX4_WQE_CTRL_RR = 1 << 6,
MLX4_WQE_CTRL_FENCE = 1 << 6,
MLX4_WQE_CTRL_CQ_UPDATE = 3 << 2,
MLX4_WQE_CTRL_SOLICITED = 1 << 1,
diff --git a/sys/vm/vm_pageout.c b/sys/vm/vm_pageout.c
index 156d11a..4a5d2c7 100644
--- a/sys/vm/vm_pageout.c
+++ b/sys/vm/vm_pageout.c
@@ -728,32 +728,33 @@ vm_pageout_grow_cache(int tries, vm_paddr_t low, vm_paddr_t high)
* the specified address range, as indicated by segments
* constituting the domain.
*/
-again:
+again_inact:
if (inactl < inactmax) {
if (vm_phys_domain_intersects(vm_dom[dom].vmd_segs,
low, high) &&
vm_pageout_launder(&vm_dom[dom].vmd_pagequeues[PQ_INACTIVE],
tries, low, high)) {
inactl++;
- goto again;
+ goto again_inact;
}
if (++dom == vm_ndomains)
dom = 0;
if (dom != initial_dom)
- goto again;
+ goto again_inact;
}
+again_act:
if (actl < actmax) {
if (vm_phys_domain_intersects(vm_dom[dom].vmd_segs,
low, high) &&
vm_pageout_launder(&vm_dom[dom].vmd_pagequeues[PQ_ACTIVE],
tries, low, high)) {
actl++;
- goto again;
+ goto again_act;
}
if (++dom == vm_ndomains)
dom = 0;
if (dom != initial_dom)
- goto again;
+ goto again_act;
}
}
OpenPOWER on IntegriCloud