summaryrefslogtreecommitdiffstats
path: root/sys/dev/mlx5
diff options
context:
space:
mode:
authorhselasky <hselasky@FreeBSD.org>2016-09-23 08:17:51 +0000
committerhselasky <hselasky@FreeBSD.org>2016-09-23 08:17:51 +0000
commitc09479e02ec36fee071c973614c3aff982640196 (patch)
tree20197bbaf460d7bed237ae57b4adac1a04996cac /sys/dev/mlx5
parent2db9387ac81b5c39996bd69f4d8e7d8d8ffcc4f5 (diff)
downloadFreeBSD-src-c09479e02ec36fee071c973614c3aff982640196.zip
FreeBSD-src-c09479e02ec36fee071c973614c3aff982640196.tar.gz
MFC r305867:
Update the MLX5 core module: - Add new firmware commands and update existing ones. - Add more firmware related structures and update existing ones. - Some minor fixes, like adding missing \n to some prints. Sponsored by: Mellanox Technologies
Diffstat (limited to 'sys/dev/mlx5')
-rw-r--r--sys/dev/mlx5/cq.h2
-rw-r--r--sys/dev/mlx5/device.h160
-rw-r--r--sys/dev/mlx5/driver.h130
-rw-r--r--sys/dev/mlx5/flow_table.h8
-rw-r--r--sys/dev/mlx5/mlx5_core/mlx5_cmd.c11
-rw-r--r--sys/dev/mlx5/mlx5_core/mlx5_core.h2
-rw-r--r--sys/dev/mlx5/mlx5_core/mlx5_cq.c12
-rw-r--r--sys/dev/mlx5/mlx5_core/mlx5_eq.c51
-rw-r--r--sys/dev/mlx5/mlx5_core/mlx5_eswitch_vacl.c20
-rw-r--r--sys/dev/mlx5/mlx5_core/mlx5_flow_table.c57
-rw-r--r--sys/dev/mlx5/mlx5_core/mlx5_fw.c72
-rw-r--r--sys/dev/mlx5/mlx5_core/mlx5_main.c35
-rw-r--r--sys/dev/mlx5/mlx5_core/mlx5_mr.c21
-rw-r--r--sys/dev/mlx5/mlx5_core/mlx5_port.c140
-rw-r--r--sys/dev/mlx5/mlx5_core/mlx5_qp.c319
-rw-r--r--sys/dev/mlx5/mlx5_core/mlx5_srq.c2
-rw-r--r--sys/dev/mlx5/mlx5_core/mlx5_transobj.c24
-rw-r--r--sys/dev/mlx5/mlx5_core/mlx5_uar.c5
-rw-r--r--sys/dev/mlx5/mlx5_core/mlx5_vport.c556
-rw-r--r--sys/dev/mlx5/mlx5_core/transobj.h2
-rw-r--r--sys/dev/mlx5/mlx5_en/mlx5_en_flow_table.c10
-rw-r--r--sys/dev/mlx5/mlx5_en/mlx5_en_main.c15
-rw-r--r--sys/dev/mlx5/mlx5_ifc.h741
-rw-r--r--sys/dev/mlx5/qp.h131
-rw-r--r--sys/dev/mlx5/vport.h74
25 files changed, 2198 insertions, 402 deletions
diff --git a/sys/dev/mlx5/cq.h b/sys/dev/mlx5/cq.h
index 92fd8ba..85e4846 100644
--- a/sys/dev/mlx5/cq.h
+++ b/sys/dev/mlx5/cq.h
@@ -49,6 +49,8 @@ struct mlx5_core_cq {
unsigned arm_sn;
struct mlx5_rsc_debug *dbg;
int pid;
+ int reset_notify_added;
+ struct list_head reset_notify;
};
diff --git a/sys/dev/mlx5/device.h b/sys/dev/mlx5/device.h
index 88ca731..60c8e50 100644
--- a/sys/dev/mlx5/device.h
+++ b/sys/dev/mlx5/device.h
@@ -57,6 +57,7 @@
#define MLX5_FLD_SZ_BYTES(typ, fld) (__mlx5_bit_sz(typ, fld) / 8)
#define MLX5_ST_SZ_BYTES(typ) (sizeof(struct mlx5_ifc_##typ##_bits) / 8)
#define MLX5_ST_SZ_DW(typ) (sizeof(struct mlx5_ifc_##typ##_bits) / 32)
+#define MLX5_ST_SZ_QW(typ) (sizeof(struct mlx5_ifc_##typ##_bits) / 64)
#define MLX5_UN_SZ_BYTES(typ) (sizeof(union mlx5_ifc_##typ##_bits) / 8)
#define MLX5_UN_SZ_DW(typ) (sizeof(union mlx5_ifc_##typ##_bits) / 32)
#define MLX5_BYTE_OFF(typ, fld) (__mlx5_bit_off(typ, fld) / 8)
@@ -112,6 +113,10 @@ enum {
};
enum {
+ MLX5_CQ_FLAGS_OI = 2,
+};
+
+enum {
MLX5_STAT_RATE_OFFSET = 5,
};
@@ -129,6 +134,10 @@ enum {
};
enum {
+ MLX5_MKEY_INBOX_PG_ACCESS = 1 << 31
+};
+
+enum {
MLX5_PERM_LOCAL_READ = 1 << 2,
MLX5_PERM_LOCAL_WRITE = 1 << 3,
MLX5_PERM_REMOTE_READ = 1 << 4,
@@ -184,6 +193,25 @@ enum {
};
enum {
+ MLX5_UMR_TRANSLATION_OFFSET_EN = (1 << 4),
+
+ MLX5_UMR_CHECK_NOT_FREE = (1 << 5),
+ MLX5_UMR_CHECK_FREE = (2 << 5),
+
+ MLX5_UMR_INLINE = (1 << 7),
+};
+
+#define MLX5_UMR_MTT_ALIGNMENT 0x40
+#define MLX5_UMR_MTT_MASK (MLX5_UMR_MTT_ALIGNMENT - 1)
+#define MLX5_UMR_MTT_MIN_CHUNK_SIZE MLX5_UMR_MTT_ALIGNMENT
+
+enum {
+ MLX5_EVENT_QUEUE_TYPE_QP = 0,
+ MLX5_EVENT_QUEUE_TYPE_RQ = 1,
+ MLX5_EVENT_QUEUE_TYPE_SQ = 2,
+};
+
+enum {
MLX5_PORT_CHANGE_SUBTYPE_DOWN = 1,
MLX5_PORT_CHANGE_SUBTYPE_ACTIVE = 4,
MLX5_PORT_CHANGE_SUBTYPE_INITIALIZED = 5,
@@ -194,19 +222,28 @@ enum {
};
enum {
+ MLX5_DCBX_EVENT_SUBTYPE_ERROR_STATE_DCBX = 1,
+ MLX5_DCBX_EVENT_SUBTYPE_REMOTE_CONFIG_CHANGE,
+ MLX5_DCBX_EVENT_SUBTYPE_LOCAL_OPER_CHANGE,
+ MLX5_DCBX_EVENT_SUBTYPE_REMOTE_CONFIG_APP_PRIORITY_CHANGE,
+ MLX5_MAX_INLINE_RECEIVE_SIZE = 64
+};
+
+enum {
MLX5_DEV_CAP_FLAG_XRC = 1LL << 3,
MLX5_DEV_CAP_FLAG_BAD_PKEY_CNTR = 1LL << 8,
MLX5_DEV_CAP_FLAG_BAD_QKEY_CNTR = 1LL << 9,
MLX5_DEV_CAP_FLAG_APM = 1LL << 17,
- MLX5_DEV_CAP_FLAG_ATOMIC = 1LL << 18,
MLX5_DEV_CAP_FLAG_SCQE_BRK_MOD = 1LL << 21,
MLX5_DEV_CAP_FLAG_BLOCK_MCAST = 1LL << 23,
MLX5_DEV_CAP_FLAG_CQ_MODER = 1LL << 29,
MLX5_DEV_CAP_FLAG_RESIZE_CQ = 1LL << 30,
+ MLX5_DEV_CAP_FLAG_ATOMIC = 1LL << 33,
MLX5_DEV_CAP_FLAG_ROCE = 1LL << 34,
MLX5_DEV_CAP_FLAG_DCT = 1LL << 37,
MLX5_DEV_CAP_FLAG_SIG_HAND_OVER = 1LL << 40,
MLX5_DEV_CAP_FLAG_CMDIF_CSUM = 3LL << 46,
+ MLX5_DEV_CAP_FLAG_DRAIN_SIGERR = 1LL << 48,
};
enum {
@@ -263,6 +300,7 @@ enum {
MLX5_OPCODE_UMR = 0x25,
+ MLX5_OPCODE_SIGNATURE_CANCELED = (1 << 15),
};
enum {
@@ -299,6 +337,18 @@ struct mlx5_outbox_hdr {
__be32 syndrome;
};
+struct mlx5_cmd_set_dc_cnak_mbox_in {
+ struct mlx5_inbox_hdr hdr;
+ u8 enable;
+ u8 reserved[47];
+ __be64 pa;
+};
+
+struct mlx5_cmd_set_dc_cnak_mbox_out {
+ struct mlx5_outbox_hdr hdr;
+ u8 rsvd[8];
+};
+
struct mlx5_cmd_layout {
u8 type;
u8 rsvd0[3];
@@ -339,9 +389,12 @@ struct mlx5_init_seg {
__be32 rsvd1[120];
__be32 initializing;
struct mlx5_health_buffer health;
- __be32 rsvd2[884];
+ __be32 rsvd2[880];
+ __be32 internal_timer_h;
+ __be32 internal_timer_l;
+ __be32 rsvd3[2];
__be32 health_counter;
- __be32 rsvd3[1019];
+ __be32 rsvd4[1019];
__be64 ieee1588_clk;
__be32 ieee1588_clk_type;
__be32 clr_intx;
@@ -420,6 +473,7 @@ enum {
MLX5_MODULE_EVENT_ERROR_ENFORCE_PART_NUMBER_LIST = 0x4,
MLX5_MODULE_EVENT_ERROR_UNKNOWN_IDENTIFIER = 0x5,
MLX5_MODULE_EVENT_ERROR_HIGH_TEMPERATURE = 0x6,
+ MLX5_MODULE_EVENT_ERROR_CABLE_IS_SHORTED = 0x7,
};
struct mlx5_eqe_port_module_event {
@@ -832,6 +886,10 @@ struct mlx5_query_eq_mbox_out {
struct mlx5_eq_context ctx;
};
+enum {
+ MLX5_MKEY_STATUS_FREE = 1 << 6,
+};
+
struct mlx5_mkey_seg {
/* This is a two bit field occupying bits 31-30.
* bit 31 is always 0,
@@ -868,7 +926,7 @@ struct mlx5_query_special_ctxs_mbox_out {
struct mlx5_create_mkey_mbox_in {
struct mlx5_inbox_hdr hdr;
__be32 input_mkey_index;
- u8 rsvd0[4];
+ __be32 flags;
struct mlx5_mkey_seg seg;
u8 rsvd1[16];
__be32 xlat_oct_act_size;
@@ -971,6 +1029,17 @@ struct mlx5_destroy_psv_out {
u8 rsvd[8];
};
+static inline int mlx5_host_is_le(void)
+{
+#if defined(__LITTLE_ENDIAN)
+ return 1;
+#elif defined(__BIG_ENDIAN)
+ return 0;
+#else
+#error Host endianness not defined
+#endif
+}
+
#define MLX5_CMD_OP_MAX 0x939
enum {
@@ -1008,6 +1077,8 @@ enum {
MLX5_FLOW_TABLE_TYPE_EGRESS_ACL = 2,
MLX5_FLOW_TABLE_TYPE_INGRESS_ACL = 3,
MLX5_FLOW_TABLE_TYPE_ESWITCH = 4,
+ MLX5_FLOW_TABLE_TYPE_SNIFFER_RX = 5,
+ MLX5_FLOW_TABLE_TYPE_SNIFFER_TX = 6,
};
enum {
@@ -1062,6 +1133,10 @@ enum mlx5_cap_type {
MLX5_CAP_FLOW_TABLE,
MLX5_CAP_ESWITCH_FLOW_TABLE,
MLX5_CAP_ESWITCH,
+ MLX5_CAP_SNAPSHOT,
+ MLX5_CAP_VECTOR_CALC,
+ MLX5_CAP_QOS,
+ MLX5_CAP_DEBUG,
/* NUM OF CAP Types */
MLX5_CAP_NUM
};
@@ -1107,21 +1182,23 @@ enum mlx5_cap_type {
MLX5_GET(flow_table_eswitch_cap, \
mdev->hca_caps_max[MLX5_CAP_ESWITCH_FLOW_TABLE], cap)
-#define MLX5_CAP_ESW_FLOWTABLE_EGRESS_ACL(mdev, cap) \
- MLX5_CAP_ESW_FLOWTABLE(dev, \
- flow_table_properties_esw_acl_egress.cap)
+#define MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, cap) \
+ MLX5_CAP_ESW_FLOWTABLE(mdev, flow_table_properties_nic_esw_fdb.cap)
+
+#define MLX5_CAP_ESW_FLOWTABLE_FDB_MAX(mdev, cap) \
+ MLX5_CAP_ESW_FLOWTABLE_MAX(mdev, flow_table_properties_nic_esw_fdb.cap)
-#define MLX5_CAP_ESW_FLOWTABLE_EGRESS_ACL_MAX(mdev, cap) \
- MLX5_CAP_ESW_FLOWTABLE_MAX(dev, \
- flow_table_properties_esw_acl_egress.cap)
+#define MLX5_CAP_ESW_EGRESS_ACL(mdev, cap) \
+ MLX5_CAP_ESW_FLOWTABLE(mdev, flow_table_properties_esw_acl_egress.cap)
-#define MLX5_CAP_ESW_FLOWTABLE_INGRESS_ACL(mdev, cap) \
- MLX5_CAP_ESW_FLOWTABLE(dev, \
- flow_table_properties_esw_acl_ingress.cap)
+#define MLX5_CAP_ESW_EGRESS_ACL_MAX(mdev, cap) \
+ MLX5_CAP_ESW_FLOWTABLE_MAX(mdev, flow_table_properties_esw_acl_egress.cap)
-#define MLX5_CAP_ESW_FLOWTABLE_INGRESS_ACL_MAX(mdev, cap) \
- MLX5_CAP_ESW_FLOWTABLE_MAX(dev, \
- flow_table_properties_esw_acl_ingress.cap)
+#define MLX5_CAP_ESW_INGRESS_ACL(mdev, cap) \
+ MLX5_CAP_ESW_FLOWTABLE(mdev, flow_table_properties_esw_acl_ingress.cap)
+
+#define MLX5_CAP_ESW_INGRESS_ACL_MAX(mdev, cap) \
+ MLX5_CAP_ESW_FLOWTABLE_MAX(mdev, flow_table_properties_esw_acl_ingress.cap)
#define MLX5_CAP_ESW(mdev, cap) \
MLX5_GET(e_switch_cap, \
@@ -1137,6 +1214,38 @@ enum mlx5_cap_type {
#define MLX5_CAP_ODP_MAX(mdev, cap)\
MLX5_GET(odp_cap, mdev->hca_caps_max[MLX5_CAP_ODP], cap)
+#define MLX5_CAP_SNAPSHOT(mdev, cap) \
+ MLX5_GET(snapshot_cap, \
+ mdev->hca_caps_cur[MLX5_CAP_SNAPSHOT], cap)
+
+#define MLX5_CAP_SNAPSHOT_MAX(mdev, cap) \
+ MLX5_GET(snapshot_cap, \
+ mdev->hca_caps_max[MLX5_CAP_SNAPSHOT], cap)
+
+#define MLX5_CAP_EOIB_OFFLOADS(mdev, cap) \
+ MLX5_GET(per_protocol_networking_offload_caps,\
+ mdev->hca_caps_cur[MLX5_CAP_EOIB_OFFLOADS], cap)
+
+#define MLX5_CAP_EOIB_OFFLOADS_MAX(mdev, cap) \
+ MLX5_GET(per_protocol_networking_offload_caps,\
+ mdev->hca_caps_max[MLX5_CAP_EOIB_OFFLOADS], cap)
+
+#define MLX5_CAP_DEBUG(mdev, cap) \
+ MLX5_GET(debug_cap, \
+ mdev->hca_caps_cur[MLX5_CAP_DEBUG], cap)
+
+#define MLX5_CAP_DEBUG_MAX(mdev, cap) \
+ MLX5_GET(debug_cap, \
+ mdev->hca_caps_max[MLX5_CAP_DEBUG], cap)
+
+#define MLX5_CAP_QOS(mdev, cap) \
+ MLX5_GET(qos_cap,\
+ mdev->hca_caps_cur[MLX5_CAP_QOS], cap)
+
+#define MLX5_CAP_QOS_MAX(mdev, cap) \
+ MLX5_GET(qos_cap,\
+ mdev->hca_caps_max[MLX5_CAP_QOS], cap)
+
enum {
MLX5_CMD_STAT_OK = 0x0,
MLX5_CMD_STAT_INT_ERR = 0x1,
@@ -1168,6 +1277,22 @@ enum {
};
enum {
+ MLX5_PCIE_PERFORMANCE_COUNTERS_GROUP = 0x0,
+ MLX5_PCIE_LANE_COUNTERS_GROUP = 0x1,
+ MLX5_PCIE_TIMERS_AND_STATES_COUNTERS_GROUP = 0x2,
+};
+
+enum {
+ MLX5_NUM_UUARS_PER_PAGE = MLX5_NON_FP_BF_REGS_PER_PAGE,
+ MLX5_DEF_TOT_UUARS = 8 * MLX5_NUM_UUARS_PER_PAGE,
+};
+
+enum {
+ NUM_DRIVER_UARS = 4,
+ NUM_LOW_LAT_UUARS = 4,
+};
+
+enum {
MLX5_CAP_PORT_TYPE_IB = 0x0,
MLX5_CAP_PORT_TYPE_ETH = 0x1,
};
@@ -1252,4 +1377,7 @@ static inline int mlx5_get_cqe_format(const struct mlx5_cqe64 *cqe)
return (cqe->op_own & MLX5E_CQE_FORMAT_MASK) >> 2;
}
+/* 8 regular priorities + 1 for multicast */
+#define MLX5_NUM_BYPASS_FTS 9
+
#endif /* MLX5_DEVICE_H */
diff --git a/sys/dev/mlx5/driver.h b/sys/dev/mlx5/driver.h
index 7f64850..f421636 100644
--- a/sys/dev/mlx5/driver.h
+++ b/sys/dev/mlx5/driver.h
@@ -42,16 +42,15 @@
#include <dev/mlx5/device.h>
#include <dev/mlx5/doorbell.h>
+#define MLX5_QCOUNTER_SETS_NETDEV 64
+
enum {
MLX5_BOARD_ID_LEN = 64,
MLX5_MAX_NAME_LEN = 16,
};
enum {
- /* one minute for the sake of bringup. Generally, commands must always
- * complete and we may need to increase this timeout value
- */
- MLX5_CMD_TIMEOUT_MSEC = 7200 * 1000,
+ MLX5_CMD_TIMEOUT_MSEC = 8 * 60 * 1000,
MLX5_CMD_WQ_MAX_NAME = 32,
};
@@ -85,20 +84,44 @@ enum {
};
enum {
- MLX5_ATOMIC_MODE_IB_COMP = 1 << 16,
- MLX5_ATOMIC_MODE_CX = 2 << 16,
- MLX5_ATOMIC_MODE_8B = 3 << 16,
- MLX5_ATOMIC_MODE_16B = 4 << 16,
- MLX5_ATOMIC_MODE_32B = 5 << 16,
- MLX5_ATOMIC_MODE_64B = 6 << 16,
- MLX5_ATOMIC_MODE_128B = 7 << 16,
- MLX5_ATOMIC_MODE_256B = 8 << 16,
+ MLX5_ATOMIC_MODE_OFF = 16,
+ MLX5_ATOMIC_MODE_NONE = 0 << MLX5_ATOMIC_MODE_OFF,
+ MLX5_ATOMIC_MODE_IB_COMP = 1 << MLX5_ATOMIC_MODE_OFF,
+ MLX5_ATOMIC_MODE_CX = 2 << MLX5_ATOMIC_MODE_OFF,
+ MLX5_ATOMIC_MODE_8B = 3 << MLX5_ATOMIC_MODE_OFF,
+ MLX5_ATOMIC_MODE_16B = 4 << MLX5_ATOMIC_MODE_OFF,
+ MLX5_ATOMIC_MODE_32B = 5 << MLX5_ATOMIC_MODE_OFF,
+ MLX5_ATOMIC_MODE_64B = 6 << MLX5_ATOMIC_MODE_OFF,
+ MLX5_ATOMIC_MODE_128B = 7 << MLX5_ATOMIC_MODE_OFF,
+ MLX5_ATOMIC_MODE_256B = 8 << MLX5_ATOMIC_MODE_OFF,
+};
+
+enum {
+ MLX5_ATOMIC_MODE_DCT_OFF = 20,
+ MLX5_ATOMIC_MODE_DCT_NONE = 0 << MLX5_ATOMIC_MODE_DCT_OFF,
+ MLX5_ATOMIC_MODE_DCT_IB_COMP = 1 << MLX5_ATOMIC_MODE_DCT_OFF,
+ MLX5_ATOMIC_MODE_DCT_CX = 2 << MLX5_ATOMIC_MODE_DCT_OFF,
+ MLX5_ATOMIC_MODE_DCT_8B = 3 << MLX5_ATOMIC_MODE_DCT_OFF,
+ MLX5_ATOMIC_MODE_DCT_16B = 4 << MLX5_ATOMIC_MODE_DCT_OFF,
+ MLX5_ATOMIC_MODE_DCT_32B = 5 << MLX5_ATOMIC_MODE_DCT_OFF,
+ MLX5_ATOMIC_MODE_DCT_64B = 6 << MLX5_ATOMIC_MODE_DCT_OFF,
+ MLX5_ATOMIC_MODE_DCT_128B = 7 << MLX5_ATOMIC_MODE_DCT_OFF,
+ MLX5_ATOMIC_MODE_DCT_256B = 8 << MLX5_ATOMIC_MODE_DCT_OFF,
+};
+
+enum {
+ MLX5_ATOMIC_OPS_CMP_SWAP = 1 << 0,
+ MLX5_ATOMIC_OPS_FETCH_ADD = 1 << 1,
+ MLX5_ATOMIC_OPS_MASKED_CMP_SWAP = 1 << 2,
+ MLX5_ATOMIC_OPS_MASKED_FETCH_ADD = 1 << 3,
};
enum {
MLX5_REG_QETCR = 0x4005,
MLX5_REG_QPDP = 0x4007,
MLX5_REG_QTCT = 0x400A,
+ MLX5_REG_DCBX_PARAM = 0x4020,
+ MLX5_REG_DCBX_APP = 0x4021,
MLX5_REG_PCAP = 0x5001,
MLX5_REG_PMTU = 0x5003,
MLX5_REG_PTYS = 0x5004,
@@ -116,6 +139,7 @@ enum {
MLX5_REG_NODE_DESC = 0x6001,
MLX5_REG_HOST_ENDIANNESS = 0x7004,
MLX5_REG_MCIA = 0x9014,
+ MLX5_REG_MPCNT = 0x9051,
};
enum dbg_rsc_type {
@@ -124,6 +148,12 @@ enum dbg_rsc_type {
MLX5_DBG_RSC_CQ,
};
+enum {
+ MLX5_INTERFACE_PROTOCOL_IB = 0,
+ MLX5_INTERFACE_PROTOCOL_ETH = 1,
+ MLX5_INTERFACE_NUMBER = 2,
+};
+
struct mlx5_field_desc {
struct dentry *dent;
int i;
@@ -147,6 +177,10 @@ enum mlx5_dev_event {
MLX5_DEV_EVENT_GUID_CHANGE,
MLX5_DEV_EVENT_CLIENT_REREG,
MLX5_DEV_EVENT_VPORT_CHANGE,
+ MLX5_DEV_EVENT_ERROR_STATE_DCBX,
+ MLX5_DEV_EVENT_REMOTE_CONFIG_CHANGE,
+ MLX5_DEV_EVENT_LOCAL_OPER_CHANGE,
+ MLX5_DEV_EVENT_REMOTE_CONFIG_APPLICATION_PRIORITY_CHANGE,
};
enum mlx5_port_status {
@@ -375,9 +409,12 @@ struct mlx5_core_mr {
};
enum mlx5_res_type {
- MLX5_RES_QP,
- MLX5_RES_SRQ,
- MLX5_RES_XSRQ,
+ MLX5_RES_QP = MLX5_EVENT_QUEUE_TYPE_QP,
+ MLX5_RES_RQ = MLX5_EVENT_QUEUE_TYPE_RQ,
+ MLX5_RES_SQ = MLX5_EVENT_QUEUE_TYPE_SQ,
+ MLX5_RES_SRQ = 3,
+ MLX5_RES_XSRQ = 4,
+ MLX5_RES_DCT = 5,
};
struct mlx5_core_rsc_common {
@@ -413,8 +450,6 @@ struct mlx5_eq_table {
struct mlx5_uar {
u32 index;
- struct list_head bf_list;
- unsigned free_bf_bmap;
void __iomem *bf_map;
void __iomem *map;
};
@@ -461,7 +496,7 @@ struct mlx5_srq_table {
struct mlx5_mr_table {
/* protect radix tree
*/
- rwlock_t lock;
+ spinlock_t lock;
struct radix_tree_root tree;
};
@@ -483,7 +518,7 @@ struct mlx5_priv {
struct workqueue_struct *pg_wq;
struct rb_root page_root;
int fw_pages;
- int reg_pages;
+ atomic_t reg_pages;
struct list_head free_list;
struct mlx5_core_health health;
@@ -521,6 +556,12 @@ struct mlx5_priv {
struct list_head dev_list;
struct list_head ctx_list;
spinlock_t ctx_lock;
+ unsigned long pci_dev_data;
+};
+
+enum mlx5_device_state {
+ MLX5_DEVICE_STATE_UP,
+ MLX5_DEVICE_STATE_INTERNAL_ERROR,
};
struct mlx5_special_contexts {
@@ -535,6 +576,7 @@ struct mlx5_core_dev {
u32 hca_caps_cur[MLX5_CAP_NUM][MLX5_UN_SZ_DW(hca_cap_union)];
u32 hca_caps_max[MLX5_CAP_NUM][MLX5_UN_SZ_DW(hca_cap_union)];
struct mlx5_init_seg __iomem *iseg;
+ enum mlx5_device_state state;
void (*event) (struct mlx5_core_dev *dev,
enum mlx5_dev_event event,
unsigned long param);
@@ -544,6 +586,7 @@ struct mlx5_core_dev {
u32 issi;
struct mlx5_special_contexts special_contexts;
unsigned int module_status[MLX5_MAX_PORTS];
+ u32 num_q_counter_allocated[MLX5_INTERFACE_NUMBER];
};
enum {
@@ -573,6 +616,8 @@ struct mlx5_net_counters {
};
struct mlx5_ptys_reg {
+ u8 an_dis_admin;
+ u8 an_dis_ap;
u8 local_port;
u8 proto_mask;
u32 eth_proto_cap;
@@ -620,6 +665,15 @@ enum {
MLX5_DB_PER_PAGE = PAGE_SIZE / L1_CACHE_BYTES,
};
+struct mlx5_core_dct {
+ struct mlx5_core_rsc_common common; /* must be first */
+ void (*event)(struct mlx5_core_dct *, int);
+ int dctn;
+ struct completion drained;
+ struct mlx5_rsc_debug *dbg;
+ int pid;
+};
+
enum {
MLX5_COMP_EQ_SIZE = 1024,
};
@@ -725,9 +779,14 @@ static inline void *mlx5_vzalloc(unsigned long size)
return rtn;
}
-static inline u32 mlx5_base_mkey(const u32 key)
+static inline void *mlx5_vmalloc(unsigned long size)
{
- return key & 0xffffff00u;
+ void *rtn;
+
+ rtn = kmalloc(size, GFP_KERNEL | __GFP_NOWARN);
+ if (!rtn)
+ rtn = vmalloc(size);
+ return rtn;
}
int mlx5_cmd_init(struct mlx5_core_dev *dev);
@@ -809,6 +868,8 @@ int mlx5_stop_eqs(struct mlx5_core_dev *dev);
int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn, int *irqn);
int mlx5_core_attach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn);
int mlx5_core_detach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn);
+int mlx5_core_set_dc_cnak_trace(struct mlx5_core_dev *dev, int enable,
+ u64 addr);
int mlx5_qp_debugfs_init(struct mlx5_core_dev *dev);
void mlx5_qp_debugfs_cleanup(struct mlx5_core_dev *dev);
@@ -816,11 +877,16 @@ int mlx5_core_access_reg(struct mlx5_core_dev *dev, void *data_in,
int size_in, void *data_out, int size_out,
u16 reg_num, int arg, int write);
+void mlx5_toggle_port_link(struct mlx5_core_dev *dev);
int mlx5_set_port_caps(struct mlx5_core_dev *dev, u8 port_num, u32 caps);
int mlx5_query_port_ptys(struct mlx5_core_dev *dev, u32 *ptys,
int ptys_size, int proto_mask);
int mlx5_query_port_proto_cap(struct mlx5_core_dev *dev,
u32 *proto_cap, int proto_mask);
+int mlx5_query_port_autoneg(struct mlx5_core_dev *dev, int proto_mask,
+ u8 *an_disable_cap, u8 *an_disable_status);
+int mlx5_set_port_autoneg(struct mlx5_core_dev *dev, bool disable,
+ u32 eth_proto_admin, int proto_mask);
int mlx5_query_port_proto_admin(struct mlx5_core_dev *dev,
u32 *proto_admin, int proto_mask);
int mlx5_set_port_proto(struct mlx5_core_dev *dev, u32 proto_admin,
@@ -828,10 +894,14 @@ int mlx5_set_port_proto(struct mlx5_core_dev *dev, u32 proto_admin,
int mlx5_set_port_status(struct mlx5_core_dev *dev,
enum mlx5_port_status status);
int mlx5_query_port_status(struct mlx5_core_dev *dev, u8 *status);
+int mlx5_query_port_admin_status(struct mlx5_core_dev *dev,
+ enum mlx5_port_status *status);
int mlx5_set_port_pause(struct mlx5_core_dev *dev, u32 port,
u32 rx_pause, u32 tx_pause);
int mlx5_query_port_pause(struct mlx5_core_dev *dev, u32 port,
u32 *rx_pause, u32 *tx_pause);
+int mlx5_set_port_pfc(struct mlx5_core_dev *dev, u8 pfc_en_tx, u8 pfc_en_rx);
+int mlx5_query_port_pfc(struct mlx5_core_dev *dev, u8 *pfc_en_tx, u8 *pfc_en_rx);
int mlx5_set_port_mtu(struct mlx5_core_dev *dev, int mtu);
int mlx5_query_port_max_mtu(struct mlx5_core_dev *dev, int *max_mtu);
@@ -884,6 +954,9 @@ int mlx5_modify_port_cong_params(struct mlx5_core_dev *mdev,
void *in, int in_size);
int mlx5_query_port_cong_statistics(struct mlx5_core_dev *mdev, int clear,
void *out, int out_size);
+int mlx5_set_diagnostics(struct mlx5_core_dev *mdev, void *in, int in_size);
+int mlx5_query_diagnostics(struct mlx5_core_dev *mdev, u8 num_of_samples,
+ u16 sample_index, void *out, int out_size);
static inline u32 mlx5_mkey_to_idx(u32 mkey)
{
return mkey >> 8;
@@ -905,12 +978,7 @@ enum {
};
enum {
- MAX_MR_CACHE_ENTRIES = 16,
-};
-
-enum {
- MLX5_INTERFACE_PROTOCOL_IB = 0,
- MLX5_INTERFACE_PROTOCOL_ETH = 1,
+ MAX_MR_CACHE_ENTRIES = 15,
};
struct mlx5_interface {
@@ -936,6 +1004,14 @@ struct mlx5_profile {
} mr_cache[MAX_MR_CACHE_ENTRIES];
};
+enum {
+ MLX5_PCI_DEV_IS_VF = 1 << 0,
+};
+
+static inline int mlx5_core_is_pf(struct mlx5_core_dev *dev)
+{
+ return !(dev->priv.pci_dev_data & MLX5_PCI_DEV_IS_VF);
+}
#define MLX5_EEPROM_MAX_BYTES 32
#define MLX5_EEPROM_IDENTIFIER_BYTE_MASK 0x000000ff
diff --git a/sys/dev/mlx5/flow_table.h b/sys/dev/mlx5/flow_table.h
index 61ce2baf..802f012 100644
--- a/sys/dev/mlx5/flow_table.h
+++ b/sys/dev/mlx5/flow_table.h
@@ -30,6 +30,9 @@
#include <dev/mlx5/driver.h>
+#define MLX5_SET_FLOW_TABLE_ROOT_OPMOD_SET 0x0
+#define MLX5_SET_FLOW_TABLE_ROOT_OPMOD_RESET 0x1
+
struct mlx5_flow_table_group {
u8 log_sz;
u8 match_criteria_enable;
@@ -44,7 +47,10 @@ void mlx5_destroy_flow_table(void *flow_table);
int mlx5_add_flow_table_entry(void *flow_table, u8 match_criteria_enable,
void *match_criteria, void *flow_context,
u32 *flow_index);
-void mlx5_del_flow_table_entry(void *flow_table, u32 flow_index);
+int mlx5_del_flow_table_entry(void *flow_table, u32 flow_index);
u32 mlx5_get_flow_table_id(void *flow_table);
+int mlx5_set_flow_table_root(struct mlx5_core_dev *mdev, u16 op_mod,
+ u8 vport_num, u8 table_type, u32 table_id,
+ u32 underlay_qpn);
#endif /* MLX5_FLOW_TABLE_H */
diff --git a/sys/dev/mlx5/mlx5_core/mlx5_cmd.c b/sys/dev/mlx5/mlx5_core/mlx5_cmd.c
index 42087da..2551d3e 100644
--- a/sys/dev/mlx5/mlx5_core/mlx5_cmd.c
+++ b/sys/dev/mlx5/mlx5_core/mlx5_cmd.c
@@ -121,7 +121,7 @@ static int alloc_ent(struct mlx5_cmd *cmd)
clear_bit(ret, &cmd->bitmask);
spin_unlock_irqrestore(&cmd->alloc_lock, flags);
- return ret < cmd->max_reg_cmds ? ret : -ENOMEM;
+ return ret < cmd->max_reg_cmds ? ret : -1;
}
static void free_ent(struct mlx5_cmd *cmd, int idx)
@@ -396,6 +396,9 @@ const char *mlx5_command_str(int command)
case MLX5_CMD_OP_CREATE_DCT:
return "CREATE_DCT";
+ case MLX5_CMD_OP_SET_DC_CNAK_TRACE:
+ return "SET_DC_CNAK_TRACE";
+
case MLX5_CMD_OP_DESTROY_DCT:
return "DESTROY_DCT";
@@ -639,6 +642,12 @@ const char *mlx5_command_str(int command)
case MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY:
return "DELETE_FLOW_TABLE_ENTRY";
+ case MLX5_CMD_OP_SET_DIAGNOSTICS:
+ return "MLX5_CMD_OP_SET_DIAGNOSTICS";
+
+ case MLX5_CMD_OP_QUERY_DIAGNOSTICS:
+ return "MLX5_CMD_OP_QUERY_DIAGNOSTICS";
+
default: return "unknown command opcode";
}
}
diff --git a/sys/dev/mlx5/mlx5_core/mlx5_core.h b/sys/dev/mlx5/mlx5_core/mlx5_core.h
index bac7fc0..b453634 100644
--- a/sys/dev/mlx5/mlx5_core/mlx5_core.h
+++ b/sys/dev/mlx5/mlx5_core/mlx5_core.h
@@ -64,6 +64,8 @@ enum {
MLX5_CMD_TIME, /* print command execution time */
};
+struct mlx5_core_dev;
+
int mlx5_query_hca_caps(struct mlx5_core_dev *dev);
int mlx5_query_board_id(struct mlx5_core_dev *dev);
int mlx5_cmd_init_hca(struct mlx5_core_dev *dev);
diff --git a/sys/dev/mlx5/mlx5_core/mlx5_cq.c b/sys/dev/mlx5/mlx5_core/mlx5_cq.c
index 47d3780..bab3f29 100644
--- a/sys/dev/mlx5/mlx5_core/mlx5_cq.c
+++ b/sys/dev/mlx5/mlx5_core/mlx5_cq.c
@@ -187,10 +187,12 @@ int mlx5_core_destroy_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq)
in.cqn = cpu_to_be32(cq->cqn);
err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
if (err)
- return err;
+ goto out;
- if (out.hdr.status)
- return mlx5_cmd_status_to_err(&out.hdr);
+ if (out.hdr.status) {
+ err = mlx5_cmd_status_to_err(&out.hdr);
+ goto out;
+ }
synchronize_irq(cq->irqn);
@@ -198,7 +200,9 @@ int mlx5_core_destroy_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq)
complete(&cq->free);
wait_for_completion(&cq->free);
- return 0;
+out:
+
+ return err;
}
EXPORT_SYMBOL(mlx5_core_destroy_cq);
diff --git a/sys/dev/mlx5/mlx5_core/mlx5_eq.c b/sys/dev/mlx5/mlx5_core/mlx5_eq.c
index 8321342..8089836 100644
--- a/sys/dev/mlx5/mlx5_core/mlx5_eq.c
+++ b/sys/dev/mlx5/mlx5_core/mlx5_eq.c
@@ -155,6 +155,8 @@ static const char *eqe_type_str(u8 type)
return "MLX5_EVENT_TYPE_PAGE_REQUEST";
case MLX5_EVENT_TYPE_NIC_VPORT_CHANGE:
return "MLX5_EVENT_TYPE_NIC_VPORT_CHANGE";
+ case MLX5_EVENT_TYPE_CODING_DCBX_CHANGE_EVENT:
+ return "MLX5_EVENT_TYPE_CODING_DCBX_CHANGE_EVENT";
default:
return "Unrecognized event";
}
@@ -181,6 +183,21 @@ static enum mlx5_dev_event port_subtype_event(u8 subtype)
return -1;
}
+static enum mlx5_dev_event dcbx_subevent(u8 subtype)
+{
+ switch (subtype) {
+ case MLX5_DCBX_EVENT_SUBTYPE_ERROR_STATE_DCBX:
+ return MLX5_DEV_EVENT_ERROR_STATE_DCBX;
+ case MLX5_DCBX_EVENT_SUBTYPE_REMOTE_CONFIG_CHANGE:
+ return MLX5_DEV_EVENT_REMOTE_CONFIG_CHANGE;
+ case MLX5_DCBX_EVENT_SUBTYPE_LOCAL_OPER_CHANGE:
+ return MLX5_DEV_EVENT_LOCAL_OPER_CHANGE;
+ case MLX5_DCBX_EVENT_SUBTYPE_REMOTE_CONFIG_APP_PRIORITY_CHANGE:
+ return MLX5_DEV_EVENT_REMOTE_CONFIG_APPLICATION_PRIORITY_CHANGE;
+ }
+ return -1;
+}
+
static void eq_update_ci(struct mlx5_eq *eq, int arm)
{
__be32 __iomem *addr = eq->doorbell + (arm ? 0 : 2);
@@ -259,6 +276,26 @@ static int mlx5_eq_int(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
port, eqe->sub_type);
}
break;
+
+ case MLX5_EVENT_TYPE_CODING_DCBX_CHANGE_EVENT:
+ port = (eqe->data.port.port >> 4) & 0xf;
+ switch (eqe->sub_type) {
+ case MLX5_DCBX_EVENT_SUBTYPE_ERROR_STATE_DCBX:
+ case MLX5_DCBX_EVENT_SUBTYPE_REMOTE_CONFIG_CHANGE:
+ case MLX5_DCBX_EVENT_SUBTYPE_LOCAL_OPER_CHANGE:
+ case MLX5_DCBX_EVENT_SUBTYPE_REMOTE_CONFIG_APP_PRIORITY_CHANGE:
+ if (dev->event)
+ dev->event(dev,
+ dcbx_subevent(eqe->sub_type),
+ 0);
+ break;
+ default:
+ mlx5_core_warn(dev,
+ "dcbx event with unrecognized subtype: port %d, sub_type %d\n",
+ port, eqe->sub_type);
+ }
+ break;
+
case MLX5_EVENT_TYPE_CQ_ERROR:
cqn = be32_to_cpu(eqe->data.cq_err.cqn) & 0xffffff;
mlx5_core_warn(dev, "CQ error on CQN 0x%x, syndrom 0x%x\n",
@@ -476,6 +513,10 @@ int mlx5_start_eqs(struct mlx5_core_dev *dev)
async_event_mask |= (1ull <<
MLX5_EVENT_TYPE_NIC_VPORT_CHANGE);
+ if (MLX5_CAP_GEN(dev, dcbx))
+ async_event_mask |= (1ull <<
+ MLX5_EVENT_TYPE_CODING_DCBX_CHANGE_EVENT);
+
err = mlx5_create_map_eq(dev, &table->cmd_eq, MLX5_EQ_VEC_CMD,
MLX5_NUM_CMD_EQE, 1ull << MLX5_EVENT_TYPE_CMD,
"mlx5_cmd_eq", &dev->priv.uuari.uars[0]);
@@ -573,6 +614,8 @@ static const char *mlx5_port_module_event_error_type_to_string(u8 error_type)
return "Unknown identifier";
case MLX5_MODULE_EVENT_ERROR_HIGH_TEMPERATURE:
return "High Temperature";
+ case MLX5_MODULE_EVENT_ERROR_CABLE_IS_SHORTED:
+ return "Cable is shorted";
default:
return "Unknown error type";
@@ -605,19 +648,19 @@ static void mlx5_port_module_event(struct mlx5_core_dev *dev,
switch (module_status) {
case MLX5_MODULE_STATUS_PLUGGED:
- device_printf((&pdev->dev)->bsddev, "INFO: ""Module %u, status: plugged", module_num);
+ device_printf((&pdev->dev)->bsddev, "INFO: ""Module %u, status: plugged\n", module_num);
break;
case MLX5_MODULE_STATUS_UNPLUGGED:
- device_printf((&pdev->dev)->bsddev, "INFO: ""Module %u, status: unplugged", module_num);
+ device_printf((&pdev->dev)->bsddev, "INFO: ""Module %u, status: unplugged\n", module_num);
break;
case MLX5_MODULE_STATUS_ERROR:
- device_printf((&pdev->dev)->bsddev, "INFO: ""Module %u, status: error, %s", module_num, mlx5_port_module_event_error_type_to_string(error_type));
+ device_printf((&pdev->dev)->bsddev, "INFO: ""Module %u, status: error, %s\n", module_num, mlx5_port_module_event_error_type_to_string(error_type));
break;
default:
- device_printf((&pdev->dev)->bsddev, "INFO: ""Module %u, unknown status", module_num);
+ device_printf((&pdev->dev)->bsddev, "INFO: ""Module %u, unknown status\n", module_num);
}
/* store module status */
if (module_num < MLX5_MAX_PORTS)
diff --git a/sys/dev/mlx5/mlx5_core/mlx5_eswitch_vacl.c b/sys/dev/mlx5/mlx5_core/mlx5_eswitch_vacl.c
index cb149b9..ea5c596 100644
--- a/sys/dev/mlx5/mlx5_core/mlx5_eswitch_vacl.c
+++ b/sys/dev/mlx5/mlx5_core/mlx5_eswitch_vacl.c
@@ -96,10 +96,10 @@ static int mlx5_vacl_table_allow_vlan(void *acl_t, u16 vlan)
MLX5_SET(flow_context, flow_context, action,
MLX5_FLOW_CONTEXT_ACTION_ALLOW);
in_match_value = MLX5_ADDR_OF(flow_context, flow_context, match_value);
- MLX5_SET(fte_match_param, in_match_value, outer_headers.vlan_tag, 1);
+ MLX5_SET(fte_match_param, in_match_value, outer_headers.cvlan_tag, 1);
MLX5_SET(fte_match_param, in_match_value, outer_headers.first_vid,
vlan);
- MLX5_SET(fte_match_param, in_match_criteria, outer_headers.vlan_tag, 1);
+ MLX5_SET(fte_match_param, in_match_criteria, outer_headers.cvlan_tag, 1);
MLX5_SET(fte_match_param, in_match_criteria, outer_headers.first_vid,
0xfff);
if (acl_table->spoofchk_enabled) {
@@ -255,8 +255,8 @@ static int mlx5_vacl_table_apply_untagged(void *acl_t, u16 new_action)
/* Apply new untagged rule */
MLX5_SET(flow_context, flow_context, action, new_action);
in_match_value = MLX5_ADDR_OF(flow_context, flow_context, match_value);
- MLX5_SET(fte_match_param, in_match_value, outer_headers.vlan_tag, 0);
- MLX5_SET(fte_match_param, in_match_criteria, outer_headers.vlan_tag, 1);
+ MLX5_SET(fte_match_param, in_match_value, outer_headers.cvlan_tag, 0);
+ MLX5_SET(fte_match_param, in_match_criteria, outer_headers.cvlan_tag, 1);
if (acl_table->spoofchk_enabled) {
smac = MLX5_ADDR_OF(fte_match_param,
in_match_value,
@@ -549,7 +549,7 @@ static int mlx5_vacl_table_create_ft(void *acl_t, bool spoofchk)
MLX5_MATCH_OUTER_HEADERS;
MLX5_SET(fte_match_param,
g[MLX5_ACL_UNTAGGED_GROUP_IDX - shift_idx].match_criteria,
- outer_headers.vlan_tag, 1);
+ outer_headers.cvlan_tag, 1);
if (spoofchk) {
smac = MLX5_ADDR_OF(fte_match_param,
g[MLX5_ACL_UNTAGGED_GROUP_IDX - shift_idx]
@@ -564,7 +564,7 @@ static int mlx5_vacl_table_create_ft(void *acl_t, bool spoofchk)
MLX5_MATCH_OUTER_HEADERS;
MLX5_SET(fte_match_param,
g[MLX5_ACL_VLAN_GROUP_IDX - shift_idx].match_criteria,
- outer_headers.vlan_tag, 1);
+ outer_headers.cvlan_tag, 1);
MLX5_SET(fte_match_param,
g[MLX5_ACL_VLAN_GROUP_IDX - shift_idx].match_criteria,
outer_headers.first_vid, 0xfff);
@@ -627,10 +627,10 @@ void *mlx5_vacl_table_create(struct mlx5_core_dev *dev,
struct mlx5_vacl_table *acl_table;
int err = 0;
- if (is_egress && !MLX5_CAP_ESW_FLOWTABLE_EGRESS_ACL(dev, ft_support))
+ if (is_egress && !MLX5_CAP_ESW_EGRESS_ACL(dev, ft_support))
return NULL;
- if (!is_egress && !MLX5_CAP_ESW_FLOWTABLE_INGRESS_ACL(dev, ft_support))
+ if (!is_egress && !MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support))
return NULL;
acl_table = kzalloc(sizeof(*acl_table), GFP_KERNEL);
@@ -640,9 +640,9 @@ void *mlx5_vacl_table_create(struct mlx5_core_dev *dev,
acl_table->acl_type = is_egress ? MLX5_FLOW_TABLE_TYPE_EGRESS_ACL :
MLX5_FLOW_TABLE_TYPE_INGRESS_ACL;
acl_table->max_ft_size = (is_egress ?
- MLX5_CAP_ESW_FLOWTABLE_EGRESS_ACL(dev,
+ MLX5_CAP_ESW_EGRESS_ACL(dev,
log_max_ft_size) :
- MLX5_CAP_ESW_FLOWTABLE_INGRESS_ACL(dev,
+ MLX5_CAP_ESW_INGRESS_ACL(dev,
log_max_ft_size));
acl_table->dev = dev;
acl_table->vport = vport;
diff --git a/sys/dev/mlx5/mlx5_core/mlx5_flow_table.c b/sys/dev/mlx5/mlx5_core/mlx5_flow_table.c
index 4eac6f3..24490d2 100644
--- a/sys/dev/mlx5/mlx5_core/mlx5_flow_table.c
+++ b/sys/dev/mlx5/mlx5_core/mlx5_flow_table.c
@@ -87,7 +87,7 @@ static int mlx5_set_flow_entry_cmd(struct mlx5_flow_table *ft, u32 group_ix,
return err;
}
-static void mlx5_del_flow_entry_cmd(struct mlx5_flow_table *ft, u32 flow_index)
+static int mlx5_del_flow_entry_cmd(struct mlx5_flow_table *ft, u32 flow_index)
{
u32 in[MLX5_ST_SZ_DW(delete_fte_in)];
u32 out[MLX5_ST_SZ_DW(delete_fte_out)];
@@ -103,7 +103,8 @@ static void mlx5_del_flow_entry_cmd(struct mlx5_flow_table *ft, u32 flow_index)
MLX5_SET_DFTEI(in, flow_index, flow_index);
MLX5_SET_DFTEI(in, opcode, MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY);
- mlx5_cmd_exec_check_status(ft->dev, in, sizeof(in), out, sizeof(out));
+ return mlx5_cmd_exec_check_status(ft->dev, in, sizeof(in), out,
+ sizeof(out));
}
static void mlx5_destroy_flow_group_cmd(struct mlx5_flow_table *ft, int i)
@@ -343,12 +344,15 @@ int mlx5_add_flow_table_entry(void *flow_table, u8 match_criteria_enable,
}
EXPORT_SYMBOL(mlx5_add_flow_table_entry);
-void mlx5_del_flow_table_entry(void *flow_table, u32 flow_index)
+int mlx5_del_flow_table_entry(void *flow_table, u32 flow_index)
{
struct mlx5_flow_table *ft = flow_table;
+ int ret;
- mlx5_del_flow_entry_cmd(ft, flow_index);
- mlx5_free_flow_index(ft, flow_index);
+ ret = mlx5_del_flow_entry_cmd(ft, flow_index);
+ if (!ret)
+ mlx5_free_flow_index(ft, flow_index);
+ return ret;
}
EXPORT_SYMBOL(mlx5_del_flow_table_entry);
@@ -430,3 +434,46 @@ u32 mlx5_get_flow_table_id(void *flow_table)
return ft->id;
}
EXPORT_SYMBOL(mlx5_get_flow_table_id);
+
+int mlx5_set_flow_table_root(struct mlx5_core_dev *mdev, u16 op_mod,
+ u8 vport_num, u8 table_type, u32 table_id,
+ u32 underlay_qpn)
+{
+ u32 in[MLX5_ST_SZ_DW(set_flow_table_root_in)];
+ u32 out[MLX5_ST_SZ_DW(set_flow_table_root_out)];
+ int err;
+ int is_group_manager;
+
+ is_group_manager = MLX5_CAP_GEN(mdev, vport_group_manager);
+
+ memset(in, 0, sizeof(in));
+
+ MLX5_SET(set_flow_table_root_in, in, op_mod, op_mod);
+ MLX5_SET(set_flow_table_root_in, in, table_type, table_type);
+ MLX5_SET(set_flow_table_root_in, in, underlay_qpn, underlay_qpn);
+ if (op_mod == MLX5_SET_FLOW_TABLE_ROOT_OPMOD_SET)
+ MLX5_SET(set_flow_table_root_in, in, table_id, table_id);
+
+ MLX5_SET(set_flow_table_root_in, in, opcode,
+ MLX5_CMD_OP_SET_FLOW_TABLE_ROOT);
+
+ if (vport_num) {
+ if (is_group_manager) {
+ MLX5_SET(set_flow_table_root_in, in, other_vport,
+ 1);
+ MLX5_SET(set_flow_table_root_in, in, vport_number,
+ vport_num);
+ } else {
+ return -EPERM;
+ }
+ }
+
+ memset(out, 0, sizeof(out));
+ err = mlx5_cmd_exec_check_status(mdev, in, sizeof(in), out,
+ sizeof(out));
+ if (err)
+ return err;
+
+ return 0;
+}
+EXPORT_SYMBOL(mlx5_set_flow_table_root);
diff --git a/sys/dev/mlx5/mlx5_core/mlx5_fw.c b/sys/dev/mlx5/mlx5_core/mlx5_fw.c
index 06fafdc..c9060c8 100644
--- a/sys/dev/mlx5/mlx5_core/mlx5_fw.c
+++ b/sys/dev/mlx5/mlx5_core/mlx5_fw.c
@@ -201,6 +201,50 @@ int mlx5_query_hca_caps(struct mlx5_core_dev *dev)
return err;
}
+ if (MLX5_CAP_GEN(dev, snapshot)) {
+ err = mlx5_core_get_caps(dev, MLX5_CAP_SNAPSHOT,
+ HCA_CAP_OPMOD_GET_CUR);
+ if (err)
+ return err;
+ err = mlx5_core_get_caps(dev, MLX5_CAP_SNAPSHOT,
+ HCA_CAP_OPMOD_GET_MAX);
+ if (err)
+ return err;
+ }
+
+ if (MLX5_CAP_GEN(dev, ipoib_enhanced_offloads)) {
+ err = mlx5_core_get_caps(dev, MLX5_CAP_EOIB_OFFLOADS,
+ HCA_CAP_OPMOD_GET_CUR);
+ if (err)
+ return err;
+ err = mlx5_core_get_caps(dev, MLX5_CAP_EOIB_OFFLOADS,
+ HCA_CAP_OPMOD_GET_MAX);
+ if (err)
+ return err;
+ }
+
+ if (MLX5_CAP_GEN(dev, debug)) {
+ err = mlx5_core_get_caps(dev, MLX5_CAP_DEBUG,
+ HCA_CAP_OPMOD_GET_CUR);
+ if (err)
+ return err;
+ err = mlx5_core_get_caps(dev, MLX5_CAP_DEBUG,
+ HCA_CAP_OPMOD_GET_MAX);
+ if (err)
+ return err;
+ }
+
+ if (MLX5_CAP_GEN(dev, qos)) {
+ err = mlx5_core_get_caps(dev, MLX5_CAP_QOS,
+ HCA_CAP_OPMOD_GET_CUR);
+ if (err)
+ return err;
+ err = mlx5_core_get_caps(dev, MLX5_CAP_QOS,
+ HCA_CAP_OPMOD_GET_MAX);
+ if (err)
+ return err;
+ }
+
err = mlx5_core_query_special_contexts(dev);
if (err)
return err;
@@ -235,3 +279,31 @@ int mlx5_cmd_teardown_hca(struct mlx5_core_dev *dev)
return mlx5_cmd_exec_check_status(dev, in, sizeof(in),
out, sizeof(out));
}
+
+int mlx5_core_set_dc_cnak_trace(struct mlx5_core_dev *dev, int enable,
+ u64 addr)
+{
+ struct mlx5_cmd_set_dc_cnak_mbox_in *in;
+ struct mlx5_cmd_set_dc_cnak_mbox_out out;
+ int err;
+
+ in = kzalloc(sizeof(*in), GFP_KERNEL);
+ if (!in)
+ return -ENOMEM;
+
+ memset(&out, 0, sizeof(out));
+ in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_SET_DC_CNAK_TRACE);
+ in->enable = !!enable << 7;
+ in->pa = cpu_to_be64(addr);
+ err = mlx5_cmd_exec(dev, in, sizeof(*in), &out, sizeof(out));
+ if (err)
+ goto out;
+
+ if (out.hdr.status)
+ err = mlx5_cmd_status_to_err(&out.hdr);
+
+out:
+ kfree(in);
+
+ return err;
+}
diff --git a/sys/dev/mlx5/mlx5_core/mlx5_main.c b/sys/dev/mlx5/mlx5_core/mlx5_main.c
index 2bf3cc0..776e2a6 100644
--- a/sys/dev/mlx5/mlx5_core/mlx5_main.c
+++ b/sys/dev/mlx5/mlx5_core/mlx5_main.c
@@ -145,10 +145,6 @@ static struct mlx5_profile profiles[] = {
.size = 16,
.limit = 8
},
- .mr_cache[15] = {
- .size = 8,
- .limit = 4
- },
},
[3] = {
.mask = MLX5_PROF_MASK_QP_SIZE,
@@ -256,7 +252,8 @@ struct mlx5_reg_host_endianess {
enum {
MLX5_CAP_BITS_RW_MASK = CAP_MASK(MLX5_CAP_OFF_CMDIF_CSUM, 2) |
- MLX5_DEV_CAP_FLAG_DCT,
+ MLX5_DEV_CAP_FLAG_DCT |
+ MLX5_DEV_CAP_FLAG_DRAIN_SIGERR,
};
static u16 to_fw_pkey_sz(u32 size)
@@ -383,6 +380,9 @@ static int handle_hca_cap(struct mlx5_core_dev *dev)
/* disable cmdif checksum */
MLX5_SET(cmd_hca_cap, set_hca_cap, cmdif_checksum, 0);
+ /* enable drain sigerr */
+ MLX5_SET(cmd_hca_cap, set_hca_cap, drain_sigerr, 1);
+
MLX5_SET(cmd_hca_cap, set_hca_cap, log_uar_page_sz, PAGE_SHIFT - 12);
err = set_caps(dev, set_ctx, set_sz);
@@ -398,6 +398,10 @@ static int set_hca_ctrl(struct mlx5_core_dev *dev)
struct mlx5_reg_host_endianess he_out;
int err;
+ if (MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_ETH &&
+ !MLX5_CAP_GEN(dev, roce))
+ return 0;
+
memset(&he_in, 0, sizeof(he_in));
he_in.he = MLX5_SET_HOST_ENDIANNESS;
err = mlx5_core_access_reg(dev, &he_in, sizeof(he_in),
@@ -668,6 +672,12 @@ static int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev)
}
device_printf((&pdev->dev)->bsddev, "INFO: ""firmware version: %d.%d.%d\n", fw_rev_maj(dev), fw_rev_min(dev), fw_rev_sub(dev));
+ /*
+ * On load removing any previous indication of internal error,
+ * device is up
+ */
+ dev->state = MLX5_DEVICE_STATE_UP;
+
err = mlx5_cmd_init(dev);
if (err) {
device_printf((&pdev->dev)->bsddev, "ERR: ""Failed initializing command interface, aborting\n");
@@ -706,15 +716,15 @@ static int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev)
goto err_pagealloc_stop;
}
- err = set_hca_ctrl(dev);
+ err = handle_hca_cap(dev);
if (err) {
- device_printf((&pdev->dev)->bsddev, "ERR: ""set_hca_ctrl failed\n");
+ device_printf((&pdev->dev)->bsddev, "ERR: ""handle_hca_cap failed\n");
goto reclaim_boot_pages;
}
- err = handle_hca_cap(dev);
+ err = set_hca_ctrl(dev);
if (err) {
- device_printf((&pdev->dev)->bsddev, "ERR: ""handle_hca_cap failed\n");
+ device_printf((&pdev->dev)->bsddev, "ERR: ""set_hca_ctrl failed\n");
goto reclaim_boot_pages;
}
@@ -830,6 +840,7 @@ err_disable:
pci_disable_device(dev->pdev);
err_dbg:
+ dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR;
return err;
}
@@ -1006,6 +1017,8 @@ static int init_one(struct pci_dev *pdev,
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
priv = &dev->priv;
+ if (id)
+ priv->pci_dev_data = id->driver_data;
if (prof_sel < 0 || prof_sel >= ARRAY_SIZE(profiles)) {
printf("mlx5_core: WARN: ""selected profile out of range, selecting default (%d)\n", MLX5_DEFAULT_PROF);
@@ -1054,8 +1067,8 @@ static const struct pci_device_id mlx5_core_pci_table[] = {
{ PCI_VDEVICE(MELLANOX, 4116) }, /* ConnectX-4 VF */
{ PCI_VDEVICE(MELLANOX, 4117) }, /* ConnectX-4LX */
{ PCI_VDEVICE(MELLANOX, 4118) }, /* ConnectX-4LX VF */
- { PCI_VDEVICE(MELLANOX, 4119) },
- { PCI_VDEVICE(MELLANOX, 4120) },
+ { PCI_VDEVICE(MELLANOX, 4119) }, /* ConnectX-5 */
+ { PCI_VDEVICE(MELLANOX, 4120) }, /* ConnectX-5 VF */
{ PCI_VDEVICE(MELLANOX, 4121) },
{ PCI_VDEVICE(MELLANOX, 4122) },
{ PCI_VDEVICE(MELLANOX, 4123) },
diff --git a/sys/dev/mlx5/mlx5_core/mlx5_mr.c b/sys/dev/mlx5/mlx5_core/mlx5_mr.c
index 0c7fa09..012f5d7 100644
--- a/sys/dev/mlx5/mlx5_core/mlx5_mr.c
+++ b/sys/dev/mlx5/mlx5_core/mlx5_mr.c
@@ -34,7 +34,7 @@ void mlx5_init_mr_table(struct mlx5_core_dev *dev)
{
struct mlx5_mr_table *table = &dev->priv.mr_table;
- rwlock_init(&table->lock);
+ spin_lock_init(&table->lock);
INIT_RADIX_TREE(&table->tree, GFP_ATOMIC);
}
@@ -49,9 +49,9 @@ int mlx5_core_create_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr,
{
struct mlx5_mr_table *table = &dev->priv.mr_table;
struct mlx5_create_mkey_mbox_out lout;
+ unsigned long flags;
int err;
u8 key;
- unsigned long irql;
memset(&lout, 0, sizeof(lout));
spin_lock_irq(&dev->priv.mkey_lock);
@@ -86,12 +86,12 @@ int mlx5_core_create_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr,
be32_to_cpu(lout.mkey), key, mr->key);
/* connect to MR tree */
- write_lock_irqsave(&table->lock, irql);
- err = radix_tree_insert(&table->tree, mlx5_base_mkey(mr->key), mr);
- write_unlock_irqrestore(&table->lock, irql);
+ spin_lock_irqsave(&table->lock, flags);
+ err = radix_tree_insert(&table->tree, mlx5_mkey_to_idx(mr->key), mr);
+ spin_unlock_irqrestore(&table->lock, flags);
if (err) {
mlx5_core_warn(dev, "failed radix tree insert of mr 0x%x, %d\n",
- mlx5_base_mkey(mr->key), err);
+ mr->key, err);
mlx5_core_destroy_mkey(dev, mr);
}
@@ -109,12 +109,11 @@ int mlx5_core_destroy_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr)
memset(in, 0, sizeof(in));
- write_lock_irqsave(&table->lock, flags);
- deleted_mr = radix_tree_delete(&table->tree, mlx5_base_mkey(mr->key));
- write_unlock_irqrestore(&table->lock, flags);
+ spin_lock_irqsave(&table->lock, flags);
+ deleted_mr = radix_tree_delete(&table->tree, mlx5_mkey_to_idx(mr->key));
+ spin_unlock_irqrestore(&table->lock, flags);
if (!deleted_mr) {
- mlx5_core_warn(dev, "failed radix tree delete of mr 0x%x\n",
- mlx5_base_mkey(mr->key));
+ mlx5_core_warn(dev, "failed radix tree delete of mr 0x%x\n", mr->key);
return -ENOENT;
}
diff --git a/sys/dev/mlx5/mlx5_core/mlx5_port.c b/sys/dev/mlx5/mlx5_core/mlx5_port.c
index 1e938a6..6ea910d 100644
--- a/sys/dev/mlx5/mlx5_core/mlx5_port.c
+++ b/sys/dev/mlx5/mlx5_core/mlx5_port.c
@@ -80,6 +80,18 @@ struct mlx5_reg_pcap {
__be32 caps_31_0;
};
+/* This function should be used after setting a port register only */
+void mlx5_toggle_port_link(struct mlx5_core_dev *dev)
+{
+ enum mlx5_port_status ps;
+
+ mlx5_query_port_admin_status(dev, &ps);
+ mlx5_set_port_status(dev, MLX5_PORT_DOWN);
+ if (ps == MLX5_PORT_UP)
+ mlx5_set_port_status(dev, MLX5_PORT_UP);
+}
+EXPORT_SYMBOL_GPL(mlx5_toggle_port_link);
+
int mlx5_set_port_caps(struct mlx5_core_dev *dev, u8 port_num, u32 caps)
{
struct mlx5_reg_pcap in;
@@ -133,6 +145,53 @@ int mlx5_query_port_proto_cap(struct mlx5_core_dev *dev,
}
EXPORT_SYMBOL_GPL(mlx5_query_port_proto_cap);
+int mlx5_query_port_autoneg(struct mlx5_core_dev *dev, int proto_mask,
+ u8 *an_disable_cap, u8 *an_disable_status)
+{
+ u32 out[MLX5_ST_SZ_DW(ptys_reg)];
+ int err;
+
+ err = mlx5_query_port_ptys(dev, out, sizeof(out), proto_mask);
+ if (err)
+ return err;
+
+ *an_disable_status = MLX5_GET(ptys_reg, out, an_disable_admin);
+ *an_disable_cap = MLX5_GET(ptys_reg, out, an_disable_cap);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mlx5_query_port_autoneg);
+
+int mlx5_set_port_autoneg(struct mlx5_core_dev *dev, bool disable,
+ u32 eth_proto_admin, int proto_mask)
+{
+ u32 in[MLX5_ST_SZ_DW(ptys_reg)];
+ u32 out[MLX5_ST_SZ_DW(ptys_reg)];
+ u8 an_disable_cap;
+ u8 an_disable_status;
+ int err;
+
+ err = mlx5_query_port_autoneg(dev, proto_mask, &an_disable_cap,
+ &an_disable_status);
+ if (err)
+ return err;
+ if (!an_disable_cap)
+ return -EPERM;
+
+ memset(in, 0, sizeof(in));
+
+ MLX5_SET(ptys_reg, in, local_port, 1);
+ MLX5_SET(ptys_reg, in, an_disable_admin, disable);
+ MLX5_SET(ptys_reg, in, proto_mask, proto_mask);
+ if (proto_mask == MLX5_PTYS_EN)
+ MLX5_SET(ptys_reg, in, eth_proto_admin, eth_proto_admin);
+
+ err = mlx5_core_access_reg(dev, in, sizeof(in), out,
+ sizeof(out), MLX5_REG_PTYS, 0, 1);
+ return err;
+}
+EXPORT_SYMBOL_GPL(mlx5_set_port_autoneg);
+
int mlx5_query_port_proto_admin(struct mlx5_core_dev *dev,
u32 *proto_admin, int proto_mask)
{
@@ -212,6 +271,23 @@ int mlx5_query_port_status(struct mlx5_core_dev *dev, u8 *status)
return err;
}
+int mlx5_query_port_admin_status(struct mlx5_core_dev *dev,
+ enum mlx5_port_status *status)
+{
+ u32 in[MLX5_ST_SZ_DW(paos_reg)] = {0};
+ u32 out[MLX5_ST_SZ_DW(paos_reg)];
+ int err;
+
+ MLX5_SET(paos_reg, in, local_port, 1);
+ err = mlx5_core_access_reg(dev, in, sizeof(in), out,
+ sizeof(out), MLX5_REG_PAOS, 0, 0);
+ if (err)
+ return err;
+ *status = MLX5_GET(paos_reg, out, admin_status);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mlx5_query_port_admin_status);
+
static int mlx5_query_port_mtu(struct mlx5_core_dev *dev,
int *admin_mtu, int *max_mtu, int *oper_mtu)
{
@@ -299,6 +375,44 @@ int mlx5_query_port_pause(struct mlx5_core_dev *dev, u32 port,
return 0;
}
+int mlx5_set_port_pfc(struct mlx5_core_dev *dev, u8 pfc_en_tx, u8 pfc_en_rx)
+{
+ u32 in[MLX5_ST_SZ_DW(pfcc_reg)] = {0};
+ u32 out[MLX5_ST_SZ_DW(pfcc_reg)];
+
+ MLX5_SET(pfcc_reg, in, local_port, 1);
+ MLX5_SET(pfcc_reg, in, pfctx, pfc_en_tx);
+ MLX5_SET(pfcc_reg, in, pfcrx, pfc_en_rx);
+ MLX5_SET_TO_ONES(pfcc_reg, in, prio_mask_tx);
+ MLX5_SET_TO_ONES(pfcc_reg, in, prio_mask_rx);
+
+ return mlx5_core_access_reg(dev, in, sizeof(in), out,
+ sizeof(out), MLX5_REG_PFCC, 0, 1);
+}
+EXPORT_SYMBOL_GPL(mlx5_set_port_pfc);
+
+int mlx5_query_port_pfc(struct mlx5_core_dev *dev, u8 *pfc_en_tx, u8 *pfc_en_rx)
+{
+ u32 in[MLX5_ST_SZ_DW(pfcc_reg)] = {0};
+ u32 out[MLX5_ST_SZ_DW(pfcc_reg)];
+ int err;
+
+ MLX5_SET(pfcc_reg, in, local_port, 1);
+ err = mlx5_core_access_reg(dev, in, sizeof(in), out,
+ sizeof(out), MLX5_REG_PFCC, 0, 0);
+ if (err)
+ return err;
+
+ if (pfc_en_tx)
+ *pfc_en_tx = MLX5_GET(pfcc_reg, out, pfctx);
+
+ if (pfc_en_rx)
+ *pfc_en_rx = MLX5_GET(pfcc_reg, out, pfcrx);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mlx5_query_port_pfc);
+
int mlx5_query_port_oper_mtu(struct mlx5_core_dev *dev, int *oper_mtu)
{
return mlx5_query_port_mtu(dev, NULL, NULL, oper_mtu);
@@ -716,3 +830,29 @@ int mlx5_query_port_cong_statistics(struct mlx5_core_dev *mdev, int clear,
return mlx5_cmd_exec_check_status(mdev, in, sizeof(in),
out, out_size);
}
+
+int mlx5_set_diagnostics(struct mlx5_core_dev *mdev, void *in, int in_size)
+{
+ u32 out[MLX5_ST_SZ_DW(set_diagnostics_out)];
+
+ memset(out, 0, sizeof(out));
+
+ MLX5_SET(set_diagnostics_in, in, opcode, MLX5_CMD_OP_SET_DIAGNOSTICS);
+
+ return mlx5_cmd_exec_check_status(mdev, in, in_size, out, sizeof(out));
+}
+
+int mlx5_query_diagnostics(struct mlx5_core_dev *mdev, u8 num_of_samples,
+ u16 sample_index, void *out, int out_size)
+{
+ u32 in[MLX5_ST_SZ_DW(query_diagnostics_in)];
+
+ memset(in, 0, sizeof(in));
+
+ MLX5_SET(query_diagnostics_in, in, opcode,
+ MLX5_CMD_OP_QUERY_DIAGNOSTICS);
+ MLX5_SET(query_diagnostics_in, in, num_of_samples, num_of_samples);
+ MLX5_SET(query_diagnostics_in, in, sample_index, sample_index);
+
+ return mlx5_cmd_exec_check_status(mdev, in, sizeof(in), out, out_size);
+}
diff --git a/sys/dev/mlx5/mlx5_core/mlx5_qp.c b/sys/dev/mlx5/mlx5_core/mlx5_qp.c
index c106abe..169dbea 100644
--- a/sys/dev/mlx5/mlx5_core/mlx5_qp.c
+++ b/sys/dev/mlx5/mlx5_core/mlx5_qp.c
@@ -32,6 +32,8 @@
#include "mlx5_core.h"
+#include "transobj.h"
+
static struct mlx5_core_rsc_common *mlx5_get_rsc(struct mlx5_core_dev *dev,
u32 rsn)
{
@@ -81,25 +83,53 @@ void mlx5_rsc_event(struct mlx5_core_dev *dev, u32 rsn, int event_type)
mlx5_core_put_rsc(common);
}
+static int create_qprqsq_common(struct mlx5_core_dev *dev,
+ struct mlx5_core_qp *qp, int rsc_type)
+{
+ struct mlx5_qp_table *table = &dev->priv.qp_table;
+ int err;
+
+ qp->common.res = rsc_type;
+
+ spin_lock_irq(&table->lock);
+ err = radix_tree_insert(&table->tree, qp->qpn | (rsc_type << 24), qp);
+ spin_unlock_irq(&table->lock);
+ if (err)
+ return err;
+
+ atomic_set(&qp->common.refcount, 1);
+ init_completion(&qp->common.free);
+ qp->pid = curthread->td_proc->p_pid;
+
+ return 0;
+}
+
+static void destroy_qprqsq_common(struct mlx5_core_dev *dev,
+ struct mlx5_core_qp *qp, int rsc_type)
+{
+ struct mlx5_qp_table *table = &dev->priv.qp_table;
+ unsigned long flags;
+
+ spin_lock_irqsave(&table->lock, flags);
+ radix_tree_delete(&table->tree, qp->qpn | (rsc_type << 24));
+ spin_unlock_irqrestore(&table->lock, flags);
+
+ mlx5_core_put_rsc((struct mlx5_core_rsc_common *)qp);
+ wait_for_completion(&qp->common.free);
+}
+
int mlx5_core_create_qp(struct mlx5_core_dev *dev,
struct mlx5_core_qp *qp,
struct mlx5_create_qp_mbox_in *in,
int inlen)
{
- struct mlx5_qp_table *table = &dev->priv.qp_table;
struct mlx5_create_qp_mbox_out out;
struct mlx5_destroy_qp_mbox_in din;
struct mlx5_destroy_qp_mbox_out dout;
int err;
- void *qpc;
memset(&out, 0, sizeof(out));
in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_CREATE_QP);
- if (dev->issi) {
- qpc = MLX5_ADDR_OF(create_qp_in, in, qpc);
- /* 0xffffff means we ask to work with cqe version 0 */
- MLX5_SET(qpc, qpc, user_index, 0xffffff);
- }
err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out));
if (err) {
@@ -116,19 +146,11 @@ int mlx5_core_create_qp(struct mlx5_core_dev *dev,
qp->qpn = be32_to_cpu(out.qpn) & 0xffffff;
mlx5_core_dbg(dev, "qpn = 0x%x\n", qp->qpn);
- qp->common.res = MLX5_RES_QP;
- spin_lock_irq(&table->lock);
- err = radix_tree_insert(&table->tree, qp->qpn, qp);
- spin_unlock_irq(&table->lock);
- if (err) {
- mlx5_core_warn(dev, "err %d\n", err);
+ err = create_qprqsq_common(dev, qp, MLX5_RES_QP);
+ if (err)
goto err_cmd;
- }
- qp->pid = curthread->td_proc->p_pid;
- atomic_set(&qp->common.refcount, 1);
atomic_inc(&dev->num_qps);
- init_completion(&qp->common.free);
return 0;
@@ -148,17 +170,10 @@ int mlx5_core_destroy_qp(struct mlx5_core_dev *dev,
{
struct mlx5_destroy_qp_mbox_in in;
struct mlx5_destroy_qp_mbox_out out;
- struct mlx5_qp_table *table = &dev->priv.qp_table;
- unsigned long flags;
int err;
- spin_lock_irqsave(&table->lock, flags);
- radix_tree_delete(&table->tree, qp->qpn);
- spin_unlock_irqrestore(&table->lock, flags);
-
- mlx5_core_put_rsc((struct mlx5_core_rsc_common *)qp);
- wait_for_completion(&qp->common.free);
+ destroy_qprqsq_common(dev, qp, MLX5_RES_QP);
memset(&in, 0, sizeof(in));
memset(&out, 0, sizeof(out));
@@ -176,59 +191,15 @@ int mlx5_core_destroy_qp(struct mlx5_core_dev *dev,
}
EXPORT_SYMBOL_GPL(mlx5_core_destroy_qp);
-int mlx5_core_qp_modify(struct mlx5_core_dev *dev, enum mlx5_qp_state cur_state,
- enum mlx5_qp_state new_state,
+int mlx5_core_qp_modify(struct mlx5_core_dev *dev, u16 operation,
struct mlx5_modify_qp_mbox_in *in, int sqd_event,
struct mlx5_core_qp *qp)
{
- static const u16 optab[MLX5_QP_NUM_STATE][MLX5_QP_NUM_STATE] = {
- [MLX5_QP_STATE_RST] = {
- [MLX5_QP_STATE_RST] = MLX5_CMD_OP_2RST_QP,
- [MLX5_QP_STATE_ERR] = MLX5_CMD_OP_2ERR_QP,
- [MLX5_QP_STATE_INIT] = MLX5_CMD_OP_RST2INIT_QP,
- },
- [MLX5_QP_STATE_INIT] = {
- [MLX5_QP_STATE_RST] = MLX5_CMD_OP_2RST_QP,
- [MLX5_QP_STATE_ERR] = MLX5_CMD_OP_2ERR_QP,
- [MLX5_QP_STATE_INIT] = MLX5_CMD_OP_INIT2INIT_QP,
- [MLX5_QP_STATE_RTR] = MLX5_CMD_OP_INIT2RTR_QP,
- },
- [MLX5_QP_STATE_RTR] = {
- [MLX5_QP_STATE_RST] = MLX5_CMD_OP_2RST_QP,
- [MLX5_QP_STATE_ERR] = MLX5_CMD_OP_2ERR_QP,
- [MLX5_QP_STATE_RTS] = MLX5_CMD_OP_RTR2RTS_QP,
- },
- [MLX5_QP_STATE_RTS] = {
- [MLX5_QP_STATE_RST] = MLX5_CMD_OP_2RST_QP,
- [MLX5_QP_STATE_ERR] = MLX5_CMD_OP_2ERR_QP,
- [MLX5_QP_STATE_RTS] = MLX5_CMD_OP_RTS2RTS_QP,
- },
- [MLX5_QP_STATE_SQD] = {
- [MLX5_QP_STATE_RST] = MLX5_CMD_OP_2RST_QP,
- [MLX5_QP_STATE_ERR] = MLX5_CMD_OP_2ERR_QP,
- },
- [MLX5_QP_STATE_SQER] = {
- [MLX5_QP_STATE_RST] = MLX5_CMD_OP_2RST_QP,
- [MLX5_QP_STATE_ERR] = MLX5_CMD_OP_2ERR_QP,
- [MLX5_QP_STATE_RTS] = MLX5_CMD_OP_SQERR2RTS_QP,
- },
- [MLX5_QP_STATE_ERR] = {
- [MLX5_QP_STATE_RST] = MLX5_CMD_OP_2RST_QP,
- [MLX5_QP_STATE_ERR] = MLX5_CMD_OP_2ERR_QP,
- }
- };
-
struct mlx5_modify_qp_mbox_out out;
int err = 0;
- u16 op;
-
- if (cur_state >= MLX5_QP_NUM_STATE || new_state >= MLX5_QP_NUM_STATE ||
- !optab[cur_state][new_state])
- return -EINVAL;
memset(&out, 0, sizeof(out));
- op = optab[cur_state][new_state];
- in->hdr.opcode = cpu_to_be16(op);
+ in->hdr.opcode = cpu_to_be16(operation);
in->qpn = cpu_to_be32(qp->qpn);
err = mlx5_cmd_exec(dev, in, sizeof(*in), &out, sizeof(out));
if (err)
@@ -306,3 +277,209 @@ int mlx5_core_xrcd_dealloc(struct mlx5_core_dev *dev, u32 xrcdn)
out, sizeof(out));
}
EXPORT_SYMBOL_GPL(mlx5_core_xrcd_dealloc);
+
+int mlx5_core_create_dct(struct mlx5_core_dev *dev,
+ struct mlx5_core_dct *dct,
+ struct mlx5_create_dct_mbox_in *in)
+{
+ struct mlx5_qp_table *table = &dev->priv.qp_table;
+ struct mlx5_create_dct_mbox_out out;
+ struct mlx5_destroy_dct_mbox_in din;
+ struct mlx5_destroy_dct_mbox_out dout;
+ int err;
+
+ init_completion(&dct->drained);
+ memset(&out, 0, sizeof(out));
+ in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_CREATE_DCT);
+
+ err = mlx5_cmd_exec(dev, in, sizeof(*in), &out, sizeof(out));
+ if (err) {
+ mlx5_core_warn(dev, "create DCT failed, ret %d", err);
+ return err;
+ }
+
+ if (out.hdr.status)
+ return mlx5_cmd_status_to_err(&out.hdr);
+
+ dct->dctn = be32_to_cpu(out.dctn) & 0xffffff;
+
+ dct->common.res = MLX5_RES_DCT;
+ spin_lock_irq(&table->lock);
+ err = radix_tree_insert(&table->tree, dct->dctn, dct);
+ spin_unlock_irq(&table->lock);
+ if (err) {
+ mlx5_core_warn(dev, "err %d", err);
+ goto err_cmd;
+ }
+
+ dct->pid = curthread->td_proc->p_pid;
+ atomic_set(&dct->common.refcount, 1);
+ init_completion(&dct->common.free);
+
+ return 0;
+
+err_cmd:
+ memset(&din, 0, sizeof(din));
+ memset(&dout, 0, sizeof(dout));
+ din.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_DCT);
+ din.dctn = cpu_to_be32(dct->dctn);
+ mlx5_cmd_exec(dev, &din, sizeof(din), &out, sizeof(dout));
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(mlx5_core_create_dct);
+
+static int mlx5_core_drain_dct(struct mlx5_core_dev *dev,
+ struct mlx5_core_dct *dct)
+{
+ struct mlx5_drain_dct_mbox_out out;
+ struct mlx5_drain_dct_mbox_in in;
+ int err;
+
+ memset(&in, 0, sizeof(in));
+ memset(&out, 0, sizeof(out));
+ in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DRAIN_DCT);
+ in.dctn = cpu_to_be32(dct->dctn);
+ err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
+ if (err)
+ return err;
+
+ if (out.hdr.status)
+ return mlx5_cmd_status_to_err(&out.hdr);
+
+ return 0;
+}
+
+int mlx5_core_destroy_dct(struct mlx5_core_dev *dev,
+ struct mlx5_core_dct *dct)
+{
+ struct mlx5_qp_table *table = &dev->priv.qp_table;
+ struct mlx5_destroy_dct_mbox_out out;
+ struct mlx5_destroy_dct_mbox_in in;
+ unsigned long flags;
+ int err;
+
+ err = mlx5_core_drain_dct(dev, dct);
+ if (err) {
+ mlx5_core_warn(dev, "failed drain DCT 0x%x\n", dct->dctn);
+ return err;
+ }
+
+ wait_for_completion(&dct->drained);
+
+ spin_lock_irqsave(&table->lock, flags);
+ if (radix_tree_delete(&table->tree, dct->dctn) != dct)
+ mlx5_core_warn(dev, "dct delete differs\n");
+ spin_unlock_irqrestore(&table->lock, flags);
+
+ if (atomic_dec_and_test(&dct->common.refcount))
+ complete(&dct->common.free);
+ wait_for_completion(&dct->common.free);
+
+ memset(&in, 0, sizeof(in));
+ memset(&out, 0, sizeof(out));
+ in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_DCT);
+ in.dctn = cpu_to_be32(dct->dctn);
+ err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
+ if (err)
+ return err;
+
+ if (out.hdr.status)
+ return mlx5_cmd_status_to_err(&out.hdr);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mlx5_core_destroy_dct);
+
+int mlx5_core_dct_query(struct mlx5_core_dev *dev, struct mlx5_core_dct *dct,
+ struct mlx5_query_dct_mbox_out *out)
+{
+ struct mlx5_query_dct_mbox_in in;
+ int err;
+
+ memset(&in, 0, sizeof(in));
+ memset(out, 0, sizeof(*out));
+ in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_DCT);
+ in.dctn = cpu_to_be32(dct->dctn);
+ err = mlx5_cmd_exec(dev, &in, sizeof(in), out, sizeof(*out));
+ if (err)
+ return err;
+
+ if (out->hdr.status)
+ return mlx5_cmd_status_to_err(&out->hdr);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(mlx5_core_dct_query);
+
+int mlx5_core_arm_dct(struct mlx5_core_dev *dev, struct mlx5_core_dct *dct)
+{
+ struct mlx5_arm_dct_mbox_out out;
+ struct mlx5_arm_dct_mbox_in in;
+ int err;
+
+ memset(&in, 0, sizeof(in));
+ memset(&out, 0, sizeof(out));
+
+ in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_ARM_DCT_FOR_KEY_VIOLATION);
+ in.dctn = cpu_to_be32(dct->dctn);
+ err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
+ if (err)
+ return err;
+
+ if (out.hdr.status)
+ return mlx5_cmd_status_to_err(&out.hdr);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(mlx5_core_arm_dct);
+
+int mlx5_core_create_rq_tracked(struct mlx5_core_dev *dev, u32 *in, int inlen,
+ struct mlx5_core_qp *rq)
+{
+ int err;
+
+ err = mlx5_core_create_rq(dev, in, inlen, &rq->qpn);
+ if (err)
+ return err;
+
+ err = create_qprqsq_common(dev, rq, MLX5_RES_RQ);
+ if (err)
+ mlx5_core_destroy_rq(dev, rq->qpn);
+
+ return err;
+}
+EXPORT_SYMBOL(mlx5_core_create_rq_tracked);
+
+void mlx5_core_destroy_rq_tracked(struct mlx5_core_dev *dev,
+ struct mlx5_core_qp *rq)
+{
+ destroy_qprqsq_common(dev, rq, MLX5_RES_RQ);
+ mlx5_core_destroy_rq(dev, rq->qpn);
+}
+EXPORT_SYMBOL(mlx5_core_destroy_rq_tracked);
+
+int mlx5_core_create_sq_tracked(struct mlx5_core_dev *dev, u32 *in, int inlen,
+ struct mlx5_core_qp *sq)
+{
+ int err;
+
+ err = mlx5_core_create_sq(dev, in, inlen, &sq->qpn);
+ if (err)
+ return err;
+
+ err = create_qprqsq_common(dev, sq, MLX5_RES_SQ);
+ if (err)
+ mlx5_core_destroy_sq(dev, sq->qpn);
+
+ return err;
+}
+EXPORT_SYMBOL(mlx5_core_create_sq_tracked);
+
+void mlx5_core_destroy_sq_tracked(struct mlx5_core_dev *dev,
+ struct mlx5_core_qp *sq)
+{
+ destroy_qprqsq_common(dev, sq, MLX5_RES_SQ);
+ mlx5_core_destroy_sq(dev, sq->qpn);
+}
+EXPORT_SYMBOL(mlx5_core_destroy_sq_tracked);
diff --git a/sys/dev/mlx5/mlx5_core/mlx5_srq.c b/sys/dev/mlx5/mlx5_core/mlx5_srq.c
index 3146111..b8c2e48 100644
--- a/sys/dev/mlx5/mlx5_core/mlx5_srq.c
+++ b/sys/dev/mlx5/mlx5_core/mlx5_srq.c
@@ -229,8 +229,6 @@ static int create_xrc_srq_cmd(struct mlx5_core_dev *dev,
memcpy(xrc_srqc, srqc, MLX5_ST_SZ_BYTES(srqc));
memcpy(pas, in->pas, pas_size);
- /* 0xffffff means we ask to work with cqe version 0 */
- MLX5_SET(xrc_srqc, xrc_srqc, user_index, 0xffffff);
err = mlx5_core_create_xsrq(dev, create_in, inlen, &srq->srqn);
if (err)
diff --git a/sys/dev/mlx5/mlx5_core/mlx5_transobj.c b/sys/dev/mlx5/mlx5_core/mlx5_transobj.c
index 0039b89..139bd30 100644
--- a/sys/dev/mlx5/mlx5_core/mlx5_transobj.c
+++ b/sys/dev/mlx5/mlx5_core/mlx5_transobj.c
@@ -103,6 +103,18 @@ void mlx5_core_destroy_rq(struct mlx5_core_dev *dev, u32 rqn)
mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out));
}
+int mlx5_core_query_rq(struct mlx5_core_dev *dev, u32 rqn, u32 *out)
+{
+ u32 in[MLX5_ST_SZ_DW(query_rq_in)];
+ int outlen = MLX5_ST_SZ_BYTES(query_rq_out);
+
+ memset(in, 0, sizeof(in));
+ MLX5_SET(query_rq_in, in, opcode, MLX5_CMD_OP_QUERY_RQ);
+ MLX5_SET(query_rq_in, in, rqn, rqn);
+
+ return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, outlen);
+}
+
int mlx5_core_create_sq(struct mlx5_core_dev *dev, u32 *in, int inlen, u32 *sqn)
{
u32 out[MLX5_ST_SZ_DW(create_sq_out)];
@@ -141,6 +153,18 @@ void mlx5_core_destroy_sq(struct mlx5_core_dev *dev, u32 sqn)
mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out));
}
+int mlx5_core_query_sq(struct mlx5_core_dev *dev, u32 sqn, u32 *out)
+{
+ u32 in[MLX5_ST_SZ_DW(query_sq_in)];
+ int outlen = MLX5_ST_SZ_BYTES(query_sq_out);
+
+ memset(in, 0, sizeof(in));
+ MLX5_SET(query_sq_in, in, opcode, MLX5_CMD_OP_QUERY_SQ);
+ MLX5_SET(query_sq_in, in, sqn, sqn);
+
+ return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, outlen);
+}
+
int mlx5_core_create_tir(struct mlx5_core_dev *dev, u32 *in, int inlen,
u32 *tirn)
{
diff --git a/sys/dev/mlx5/mlx5_core/mlx5_uar.c b/sys/dev/mlx5/mlx5_core/mlx5_uar.c
index 30725e7..7188f71 100644
--- a/sys/dev/mlx5/mlx5_core/mlx5_uar.c
+++ b/sys/dev/mlx5/mlx5_core/mlx5_uar.c
@@ -31,11 +31,6 @@
#include <dev/mlx5/driver.h>
#include "mlx5_core.h"
-enum {
- NUM_DRIVER_UARS = 4,
- NUM_LOW_LAT_UUARS = 4,
-};
-
int mlx5_cmd_alloc_uar(struct mlx5_core_dev *dev, u32 *uarn)
{
u32 in[MLX5_ST_SZ_DW(alloc_uar_in)];
diff --git a/sys/dev/mlx5/mlx5_core/mlx5_vport.c b/sys/dev/mlx5/mlx5_core/mlx5_vport.c
index a3e1751..0e83332 100644
--- a/sys/dev/mlx5/mlx5_core/mlx5_vport.c
+++ b/sys/dev/mlx5/mlx5_core/mlx5_vport.c
@@ -30,28 +30,80 @@
#include <dev/mlx5/vport.h>
#include "mlx5_core.h"
-u8 mlx5_query_vport_state(struct mlx5_core_dev *mdev, u8 opmod)
+static int mlx5_modify_nic_vport_context(struct mlx5_core_dev *mdev, void *in,
+ int inlen);
+
+static int _mlx5_query_vport_state(struct mlx5_core_dev *mdev, u8 opmod,
+ u16 vport, u32 *out, int outlen)
{
- u32 in[MLX5_ST_SZ_DW(query_vport_state_in)];
- u32 out[MLX5_ST_SZ_DW(query_vport_state_out)];
int err;
+ u32 in[MLX5_ST_SZ_DW(query_vport_state_in)];
memset(in, 0, sizeof(in));
MLX5_SET(query_vport_state_in, in, opcode,
MLX5_CMD_OP_QUERY_VPORT_STATE);
MLX5_SET(query_vport_state_in, in, op_mod, opmod);
+ MLX5_SET(query_vport_state_in, in, vport_number, vport);
+ if (vport)
+ MLX5_SET(query_vport_state_in, in, other_vport, 1);
- err = mlx5_cmd_exec_check_status(mdev, in, sizeof(in), out,
- sizeof(out));
+ err = mlx5_cmd_exec_check_status(mdev, in, sizeof(in), out, outlen);
if (err)
mlx5_core_warn(mdev, "MLX5_CMD_OP_QUERY_VPORT_STATE failed\n");
+ return err;
+}
+
+u8 mlx5_query_vport_state(struct mlx5_core_dev *mdev, u8 opmod, u16 vport)
+{
+ u32 out[MLX5_ST_SZ_DW(query_vport_state_out)] = {0};
+
+ _mlx5_query_vport_state(mdev, opmod, vport, out, sizeof(out));
+
return MLX5_GET(query_vport_state_out, out, state);
}
EXPORT_SYMBOL_GPL(mlx5_query_vport_state);
-static int mlx5_query_nic_vport_context(struct mlx5_core_dev *mdev, u32 vport,
+u8 mlx5_query_vport_admin_state(struct mlx5_core_dev *mdev, u8 opmod, u16 vport)
+{
+ u32 out[MLX5_ST_SZ_DW(query_vport_state_out)] = {0};
+
+ _mlx5_query_vport_state(mdev, opmod, vport, out, sizeof(out));
+
+ return MLX5_GET(query_vport_state_out, out, admin_state);
+}
+EXPORT_SYMBOL(mlx5_query_vport_admin_state);
+
+int mlx5_modify_vport_admin_state(struct mlx5_core_dev *mdev, u8 opmod,
+ u16 vport, u8 state)
+{
+ u32 in[MLX5_ST_SZ_DW(modify_vport_state_in)];
+ u32 out[MLX5_ST_SZ_DW(modify_vport_state_out)];
+ int err;
+
+ memset(in, 0, sizeof(in));
+
+ MLX5_SET(modify_vport_state_in, in, opcode,
+ MLX5_CMD_OP_MODIFY_VPORT_STATE);
+ MLX5_SET(modify_vport_state_in, in, op_mod, opmod);
+ MLX5_SET(modify_vport_state_in, in, vport_number, vport);
+
+ if (vport)
+ MLX5_SET(modify_vport_state_in, in, other_vport, 1);
+
+ MLX5_SET(modify_vport_state_in, in, admin_state, state);
+
+ err = mlx5_cmd_exec_check_status(mdev, in, sizeof(in), out,
+ sizeof(out));
+ if (err)
+ mlx5_core_warn(mdev, "MLX5_CMD_OP_MODIFY_VPORT_STATE failed\n");
+
+ return err;
+}
+EXPORT_SYMBOL(mlx5_modify_vport_admin_state);
+
+static int mlx5_query_nic_vport_context(struct mlx5_core_dev *mdev, u16 vport,
u32 *out, int outlen)
{
u32 in[MLX5_ST_SZ_DW(query_nic_vport_context_in)];
@@ -68,12 +120,32 @@ static int mlx5_query_nic_vport_context(struct mlx5_core_dev *mdev, u32 vport,
return mlx5_cmd_exec_check_status(mdev, in, sizeof(in), out, outlen);
}
-int mlx5_vport_alloc_q_counter(struct mlx5_core_dev *mdev, int *counter_set_id)
+static u32 mlx5_vport_max_q_counter_allocator(struct mlx5_core_dev *mdev,
+ int client_id)
+{
+ switch (client_id) {
+ case MLX5_INTERFACE_PROTOCOL_IB:
+ return (MLX5_CAP_GEN(mdev, max_qp_cnt) -
+ MLX5_QCOUNTER_SETS_NETDEV);
+ case MLX5_INTERFACE_PROTOCOL_ETH:
+ return MLX5_QCOUNTER_SETS_NETDEV;
+ default:
+ mlx5_core_warn(mdev, "Unknown Client: %d\n", client_id);
+ return 0;
+ }
+}
+
+int mlx5_vport_alloc_q_counter(struct mlx5_core_dev *mdev,
+ int client_id, u16 *counter_set_id)
{
u32 in[MLX5_ST_SZ_DW(alloc_q_counter_in)];
u32 out[MLX5_ST_SZ_DW(alloc_q_counter_out)];
int err;
+ if (mdev->num_q_counter_allocated[client_id] >
+ mlx5_vport_max_q_counter_allocator(mdev, client_id))
+ return -EINVAL;
+
memset(in, 0, sizeof(in));
memset(out, 0, sizeof(out));
@@ -83,19 +155,24 @@ int mlx5_vport_alloc_q_counter(struct mlx5_core_dev *mdev, int *counter_set_id)
err = mlx5_cmd_exec_check_status(mdev, in, sizeof(in),
out, sizeof(out));
- if (err)
- return err;
+ if (!err)
+ *counter_set_id = MLX5_GET(alloc_q_counter_out, out,
+ counter_set_id);
+
+ mdev->num_q_counter_allocated[client_id]++;
- *counter_set_id = MLX5_GET(alloc_q_counter_out, out,
- counter_set_id);
return err;
}
int mlx5_vport_dealloc_q_counter(struct mlx5_core_dev *mdev,
- int counter_set_id)
+ int client_id, u16 counter_set_id)
{
u32 in[MLX5_ST_SZ_DW(dealloc_q_counter_in)];
u32 out[MLX5_ST_SZ_DW(dealloc_q_counter_out)];
+ int err;
+
+ if (mdev->num_q_counter_allocated[client_id] <= 0)
+ return -EINVAL;
memset(in, 0, sizeof(in));
memset(out, 0, sizeof(out));
@@ -105,12 +182,16 @@ int mlx5_vport_dealloc_q_counter(struct mlx5_core_dev *mdev,
MLX5_SET(dealloc_q_counter_in, in, counter_set_id,
counter_set_id);
- return mlx5_cmd_exec_check_status(mdev, in, sizeof(in),
- out, sizeof(out));
+ err = mlx5_cmd_exec_check_status(mdev, in, sizeof(in),
+ out, sizeof(out));
+
+ mdev->num_q_counter_allocated[client_id]--;
+
+ return err;
}
-static int mlx5_vport_query_q_counter(struct mlx5_core_dev *mdev,
- int counter_set_id,
+int mlx5_vport_query_q_counter(struct mlx5_core_dev *mdev,
+ u16 counter_set_id,
int reset,
void *out,
int out_size)
@@ -128,7 +209,7 @@ static int mlx5_vport_query_q_counter(struct mlx5_core_dev *mdev,
}
int mlx5_vport_query_out_of_rx_buffer(struct mlx5_core_dev *mdev,
- int counter_set_id,
+ u16 counter_set_id,
u32 *out_of_rx_buffer)
{
u32 out[MLX5_ST_SZ_DW(query_q_counter_out)];
@@ -148,7 +229,7 @@ int mlx5_vport_query_out_of_rx_buffer(struct mlx5_core_dev *mdev,
}
int mlx5_query_nic_vport_mac_address(struct mlx5_core_dev *mdev,
- u32 vport, u8 *addr)
+ u16 vport, u8 *addr)
{
u32 *out;
int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
@@ -174,6 +255,43 @@ out:
}
EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_mac_address);
+int mlx5_modify_nic_vport_mac_address(struct mlx5_core_dev *mdev,
+ u16 vport, u8 *addr)
+{
+ void *in;
+ int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
+ int err;
+ void *nic_vport_ctx;
+ u8 *perm_mac;
+
+ in = mlx5_vzalloc(inlen);
+ if (!in) {
+ mlx5_core_warn(mdev, "failed to allocate inbox\n");
+ return -ENOMEM;
+ }
+
+ MLX5_SET(modify_nic_vport_context_in, in,
+ field_select.permanent_address, 1);
+ MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
+
+ if (vport)
+ MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1);
+
+ nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in,
+ in, nic_vport_context);
+ perm_mac = MLX5_ADDR_OF(nic_vport_context, nic_vport_ctx,
+ permanent_address);
+
+ ether_addr_copy(&perm_mac[2], addr);
+
+ err = mlx5_modify_nic_vport_context(mdev, in, inlen);
+
+ kvfree(in);
+
+ return err;
+}
+EXPORT_SYMBOL(mlx5_modify_nic_vport_mac_address);
+
int mlx5_query_nic_vport_system_image_guid(struct mlx5_core_dev *mdev,
u64 *system_image_guid)
{
@@ -220,7 +338,8 @@ out:
}
EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_node_guid);
-int mlx5_query_nic_vport_port_guid(struct mlx5_core_dev *mdev, u64 *port_guid)
+static int mlx5_query_nic_vport_port_guid(struct mlx5_core_dev *mdev,
+ u64 *port_guid)
{
u32 *out;
int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
@@ -241,7 +360,6 @@ out:
kvfree(out);
return err;
}
-EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_port_guid);
int mlx5_query_nic_vport_qkey_viol_cntr(struct mlx5_core_dev *mdev,
u16 *qkey_viol_cntr)
@@ -347,7 +465,85 @@ int mlx5_set_nic_vport_current_mac(struct mlx5_core_dev *mdev, int vport,
}
EXPORT_SYMBOL_GPL(mlx5_set_nic_vport_current_mac);
-int mlx5_set_nic_vport_vlan_list(struct mlx5_core_dev *dev, u32 vport,
+int mlx5_modify_nic_vport_node_guid(struct mlx5_core_dev *mdev,
+ u32 vport, u64 node_guid)
+{
+ void *in;
+ int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
+ int err;
+ void *nic_vport_context;
+
+ if (!vport)
+ return -EINVAL;
+ if (!MLX5_CAP_GEN(mdev, vport_group_manager))
+ return -EPERM;
+ if (!MLX5_CAP_ESW(mdev, nic_vport_node_guid_modify))
+ return -ENOTSUPP;
+
+ in = mlx5_vzalloc(inlen);
+ if (!in) {
+ mlx5_core_warn(mdev, "failed to allocate inbox\n");
+ return -ENOMEM;
+ }
+
+ MLX5_SET(modify_nic_vport_context_in, in,
+ field_select.node_guid, 1);
+ MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
+
+ MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1);
+
+ nic_vport_context = MLX5_ADDR_OF(modify_nic_vport_context_in,
+ in, nic_vport_context);
+ MLX5_SET64(nic_vport_context, nic_vport_context, node_guid, node_guid);
+
+ err = mlx5_modify_nic_vport_context(mdev, in, inlen);
+
+ kvfree(in);
+
+ return err;
+}
+EXPORT_SYMBOL(mlx5_modify_nic_vport_node_guid);
+
+int mlx5_modify_nic_vport_port_guid(struct mlx5_core_dev *mdev,
+ u32 vport, u64 port_guid)
+{
+ void *in;
+ int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
+ int err;
+ void *nic_vport_context;
+
+ if (!vport)
+ return -EINVAL;
+ if (!MLX5_CAP_GEN(mdev, vport_group_manager))
+ return -EPERM;
+ if (!MLX5_CAP_ESW(mdev, nic_vport_port_guid_modify))
+ return -ENOTSUPP;
+
+ in = mlx5_vzalloc(inlen);
+ if (!in) {
+ mlx5_core_warn(mdev, "failed to allocate inbox\n");
+ return -ENOMEM;
+ }
+
+ MLX5_SET(modify_nic_vport_context_in, in,
+ field_select.port_guid, 1);
+ MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
+
+ MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1);
+
+ nic_vport_context = MLX5_ADDR_OF(modify_nic_vport_context_in,
+ in, nic_vport_context);
+ MLX5_SET64(nic_vport_context, nic_vport_context, port_guid, port_guid);
+
+ err = mlx5_modify_nic_vport_context(mdev, in, inlen);
+
+ kvfree(in);
+
+ return err;
+}
+EXPORT_SYMBOL(mlx5_modify_nic_vport_port_guid);
+
+int mlx5_set_nic_vport_vlan_list(struct mlx5_core_dev *dev, u16 vport,
u16 *vlan_list, int list_len)
{
void *in, *ctx;
@@ -473,7 +669,7 @@ int mlx5_set_nic_vport_promisc(struct mlx5_core_dev *mdev, int vport,
EXPORT_SYMBOL_GPL(mlx5_set_nic_vport_promisc);
int mlx5_query_nic_vport_mac_list(struct mlx5_core_dev *dev,
- u32 vport,
+ u16 vport,
enum mlx5_list_type list_type,
u8 addr_list[][ETH_ALEN],
int *list_size)
@@ -482,7 +678,6 @@ int mlx5_query_nic_vport_mac_list(struct mlx5_core_dev *dev,
void *nic_vport_ctx;
int max_list_size;
int req_list_size;
- u8 *mac_addr;
int out_sz;
void *out;
int err;
@@ -527,7 +722,7 @@ int mlx5_query_nic_vport_mac_list(struct mlx5_core_dev *dev,
*list_size = req_list_size;
for (i = 0; i < req_list_size; i++) {
- mac_addr = MLX5_ADDR_OF(nic_vport_context,
+ u8 *mac_addr = MLX5_ADDR_OF(nic_vport_context,
nic_vport_ctx,
current_uc_mac_address[i]) + 2;
ether_addr_copy(addr_list[i], mac_addr);
@@ -592,27 +787,24 @@ int mlx5_modify_nic_vport_mac_list(struct mlx5_core_dev *dev,
}
EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_mac_list);
-int mlx5_query_nic_vport_vlan_list(struct mlx5_core_dev *dev,
- u32 vport,
- u16 *vlan_list,
- int *list_size)
+int mlx5_query_nic_vport_vlans(struct mlx5_core_dev *dev,
+ u16 vport,
+ u16 vlans[],
+ int *size)
{
u32 in[MLX5_ST_SZ_DW(query_nic_vport_context_in)];
void *nic_vport_ctx;
- int max_list_size;
int req_list_size;
+ int max_list_size;
int out_sz;
void *out;
- void *vlan_addr;
int err;
int i;
- req_list_size = *list_size;
-
- max_list_size = 1 << MLX5_CAP_GEN_MAX(dev, log_max_vlan_list);
-
+ req_list_size = *size;
+ max_list_size = 1 << MLX5_CAP_GEN(dev, log_max_vlan_list);
if (req_list_size > max_list_size) {
- mlx5_core_warn(dev, "Requested list size (%d) > (%d) max_list_size\n",
+ mlx5_core_warn(dev, "Requested list size (%d) > (%d) max list size\n",
req_list_size, max_list_size);
req_list_size = max_list_size;
}
@@ -643,17 +835,18 @@ int mlx5_query_nic_vport_vlan_list(struct mlx5_core_dev *dev,
req_list_size = MLX5_GET(nic_vport_context, nic_vport_ctx,
allowed_list_size);
- *list_size = req_list_size;
+ *size = req_list_size;
for (i = 0; i < req_list_size; i++) {
- vlan_addr = MLX5_ADDR_OF(nic_vport_context, nic_vport_ctx,
+ void *vlan_addr = MLX5_ADDR_OF(nic_vport_context,
+ nic_vport_ctx,
current_uc_mac_address[i]);
- vlan_list[i] = MLX5_GET(vlan_layout, vlan_addr, vlan);
+ vlans[i] = MLX5_GET(vlan_layout, vlan_addr, vlan);
}
out:
kfree(out);
return err;
}
-EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_vlan_list);
+EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_vlans);
int mlx5_modify_nic_vport_vlans(struct mlx5_core_dev *dev,
u16 vlans[],
@@ -706,6 +899,29 @@ int mlx5_modify_nic_vport_vlans(struct mlx5_core_dev *dev,
}
EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_vlans);
+int mlx5_query_nic_vport_roce_en(struct mlx5_core_dev *mdev, u8 *enable)
+{
+ u32 *out;
+ int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
+ int err;
+
+ out = kzalloc(outlen, GFP_KERNEL);
+ if (!out)
+ return -ENOMEM;
+
+ err = mlx5_query_nic_vport_context(mdev, 0, out, outlen);
+ if (err)
+ goto out;
+
+ *enable = MLX5_GET(query_nic_vport_context_out, out,
+ nic_vport_context.roce_en);
+
+out:
+ kfree(out);
+ return err;
+}
+EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_roce_en);
+
int mlx5_set_nic_vport_permanent_mac(struct mlx5_core_dev *mdev, int vport,
u8 *addr)
{
@@ -828,6 +1044,29 @@ out:
}
EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_node_guid);
+static int mlx5_query_hca_vport_port_guid(struct mlx5_core_dev *mdev,
+ u64 *port_guid)
+{
+ u32 *out;
+ int outlen = MLX5_ST_SZ_BYTES(query_hca_vport_context_out);
+ int err;
+
+ out = mlx5_vzalloc(outlen);
+ if (!out)
+ return -ENOMEM;
+
+ err = mlx5_query_hca_vport_context(mdev, 1, 0, out, outlen);
+ if (err)
+ goto out;
+
+ *port_guid = MLX5_GET64(query_hca_vport_context_out, out,
+ hca_vport_context.port_guid);
+
+out:
+ kvfree(out);
+ return err;
+}
+
int mlx5_query_hca_vport_gid(struct mlx5_core_dev *dev, u8 port_num,
u16 vport_num, u16 gid_index, union ib_gid *gid)
{
@@ -966,6 +1205,29 @@ out:
}
EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_pkey);
+static int mlx5_query_hca_min_wqe_header(struct mlx5_core_dev *mdev,
+ int *min_header)
+{
+ u32 *out;
+ u32 outlen = MLX5_ST_SZ_BYTES(query_hca_vport_context_out);
+ int err;
+
+ out = mlx5_vzalloc(outlen);
+ if (!out)
+ return -ENOMEM;
+
+ err = mlx5_query_hca_vport_context(mdev, 1, 0, out, outlen);
+ if (err)
+ goto out;
+
+ *min_header = MLX5_GET(query_hca_vport_context_out, out,
+ hca_vport_context.min_wqe_inline_mode);
+
+out:
+ kvfree(out);
+ return err;
+}
+
static int mlx5_modify_eswitch_vport_context(struct mlx5_core_dev *mdev,
u16 vport, void *in, int inlen)
{
@@ -1020,74 +1282,117 @@ int mlx5_set_eswitch_cvlan_info(struct mlx5_core_dev *mdev, u8 vport,
}
EXPORT_SYMBOL_GPL(mlx5_set_eswitch_cvlan_info);
-int mlx5_arm_vport_context_events(struct mlx5_core_dev *mdev,
- u8 vport,
- u32 events_mask)
+int mlx5_query_vport_mtu(struct mlx5_core_dev *mdev, int *mtu)
+{
+ u32 *out;
+ u32 outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
+ int err;
+
+ out = mlx5_vzalloc(outlen);
+ if (!out)
+ return -ENOMEM;
+
+ err = mlx5_query_nic_vport_context(mdev, 0, out, outlen);
+ if (err)
+ goto out;
+
+ *mtu = MLX5_GET(query_nic_vport_context_out, out,
+ nic_vport_context.mtu);
+
+out:
+ kvfree(out);
+ return err;
+}
+EXPORT_SYMBOL_GPL(mlx5_query_vport_mtu);
+
+int mlx5_set_vport_mtu(struct mlx5_core_dev *mdev, int mtu)
{
u32 *in;
u32 inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
- void *nic_vport_ctx;
int err;
in = mlx5_vzalloc(inlen);
if (!in)
return -ENOMEM;
- MLX5_SET(modify_nic_vport_context_in,
- in,
- opcode,
- MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
- MLX5_SET(modify_nic_vport_context_in,
- in,
- field_select.change_event,
- 1);
- MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
- if (vport)
- MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1);
- nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in,
- in,
- nic_vport_context);
+ MLX5_SET(modify_nic_vport_context_in, in, field_select.mtu, 1);
+ MLX5_SET(modify_nic_vport_context_in, in, nic_vport_context.mtu, mtu);
- MLX5_SET(nic_vport_context, nic_vport_ctx, arm_change_event, 1);
-
- if (events_mask & MLX5_UC_ADDR_CHANGE)
- MLX5_SET(nic_vport_context,
- nic_vport_ctx,
- event_on_uc_address_change,
- 1);
- if (events_mask & MLX5_MC_ADDR_CHANGE)
- MLX5_SET(nic_vport_context,
- nic_vport_ctx,
- event_on_mc_address_change,
- 1);
- if (events_mask & MLX5_VLAN_CHANGE)
- MLX5_SET(nic_vport_context,
- nic_vport_ctx,
- event_on_vlan_change,
- 1);
- if (events_mask & MLX5_PROMISC_CHANGE)
- MLX5_SET(nic_vport_context,
- nic_vport_ctx,
- event_on_promisc_change,
- 1);
- if (events_mask & MLX5_MTU_CHANGE)
- MLX5_SET(nic_vport_context,
- nic_vport_ctx,
- event_on_mtu,
- 1);
+ err = mlx5_modify_nic_vport_context(mdev, in, inlen);
+
+ kvfree(in);
+ return err;
+}
+EXPORT_SYMBOL_GPL(mlx5_set_vport_mtu);
+
+static int mlx5_query_vport_min_wqe_header(struct mlx5_core_dev *mdev,
+ int *min_header)
+{
+ u32 *out;
+ u32 outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
+ int err;
+
+ out = mlx5_vzalloc(outlen);
+ if (!out)
+ return -ENOMEM;
+
+ err = mlx5_query_nic_vport_context(mdev, 0, out, outlen);
+ if (err)
+ goto out;
+
+ *min_header = MLX5_GET(query_nic_vport_context_out, out,
+ nic_vport_context.min_wqe_inline_mode);
+
+out:
+ kvfree(out);
+ return err;
+}
+
+int mlx5_set_vport_min_wqe_header(struct mlx5_core_dev *mdev,
+ u8 vport, int min_header)
+{
+ u32 *in;
+ u32 inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
+ int err;
+
+ in = mlx5_vzalloc(inlen);
+ if (!in)
+ return -ENOMEM;
+
+ MLX5_SET(modify_nic_vport_context_in, in,
+ field_select.min_wqe_inline_mode, 1);
+ MLX5_SET(modify_nic_vport_context_in, in,
+ nic_vport_context.min_wqe_inline_mode, min_header);
+ MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
+ MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1);
err = mlx5_modify_nic_vport_context(mdev, in, inlen);
kvfree(in);
return err;
}
-EXPORT_SYMBOL_GPL(mlx5_arm_vport_context_events);
+EXPORT_SYMBOL_GPL(mlx5_set_vport_min_wqe_header);
-int mlx5_query_vport_promisc(struct mlx5_core_dev *mdev,
- u32 vport,
- u8 *promisc_uc,
- u8 *promisc_mc,
- u8 *promisc_all)
+int mlx5_query_min_wqe_header(struct mlx5_core_dev *dev, int *min_header)
+{
+ switch (MLX5_CAP_GEN(dev, port_type)) {
+ case MLX5_CMD_HCA_CAP_PORT_TYPE_IB:
+ return mlx5_query_hca_min_wqe_header(dev, min_header);
+
+ case MLX5_CMD_HCA_CAP_PORT_TYPE_ETHERNET:
+ return mlx5_query_vport_min_wqe_header(dev, min_header);
+
+ default:
+ return -EINVAL;
+ }
+}
+EXPORT_SYMBOL_GPL(mlx5_query_min_wqe_header);
+
+int mlx5_query_nic_vport_promisc(struct mlx5_core_dev *mdev,
+ u16 vport,
+ int *promisc_uc,
+ int *promisc_mc,
+ int *promisc_all)
{
u32 *out;
int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
@@ -1278,3 +1583,74 @@ ex:
kvfree(out);
return err;
}
+
+int mlx5_query_vport_system_image_guid(struct mlx5_core_dev *dev,
+ u64 *sys_image_guid)
+{
+ switch (MLX5_CAP_GEN(dev, port_type)) {
+ case MLX5_CMD_HCA_CAP_PORT_TYPE_IB:
+ return mlx5_query_hca_vport_system_image_guid(dev,
+ sys_image_guid);
+
+ case MLX5_CMD_HCA_CAP_PORT_TYPE_ETHERNET:
+ return mlx5_query_nic_vport_system_image_guid(dev,
+ sys_image_guid);
+
+ default:
+ return -EINVAL;
+ }
+}
+EXPORT_SYMBOL_GPL(mlx5_query_vport_system_image_guid);
+
+int mlx5_query_vport_node_guid(struct mlx5_core_dev *dev, u64 *node_guid)
+{
+ switch (MLX5_CAP_GEN(dev, port_type)) {
+ case MLX5_CMD_HCA_CAP_PORT_TYPE_IB:
+ return mlx5_query_hca_vport_node_guid(dev, node_guid);
+
+ case MLX5_CMD_HCA_CAP_PORT_TYPE_ETHERNET:
+ return mlx5_query_nic_vport_node_guid(dev, node_guid);
+
+ default:
+ return -EINVAL;
+ }
+}
+EXPORT_SYMBOL_GPL(mlx5_query_vport_node_guid);
+
+int mlx5_query_vport_port_guid(struct mlx5_core_dev *dev, u64 *port_guid)
+{
+ switch (MLX5_CAP_GEN(dev, port_type)) {
+ case MLX5_CMD_HCA_CAP_PORT_TYPE_IB:
+ return mlx5_query_hca_vport_port_guid(dev, port_guid);
+
+ case MLX5_CMD_HCA_CAP_PORT_TYPE_ETHERNET:
+ return mlx5_query_nic_vport_port_guid(dev, port_guid);
+
+ default:
+ return -EINVAL;
+ }
+}
+EXPORT_SYMBOL_GPL(mlx5_query_vport_port_guid);
+
+int mlx5_query_hca_vport_state(struct mlx5_core_dev *dev, u8 *vport_state)
+{
+ u32 *out;
+ int outlen = MLX5_ST_SZ_BYTES(query_hca_vport_context_out);
+ int err;
+
+ out = mlx5_vzalloc(outlen);
+ if (!out)
+ return -ENOMEM;
+
+ err = mlx5_query_hca_vport_context(dev, 1, 0, out, outlen);
+ if (err)
+ goto out;
+
+ *vport_state = MLX5_GET(query_hca_vport_context_out, out,
+ hca_vport_context.vport_state);
+
+out:
+ kvfree(out);
+ return err;
+}
+EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_state);
diff --git a/sys/dev/mlx5/mlx5_core/transobj.h b/sys/dev/mlx5/mlx5_core/transobj.h
index 655e625..4519b0f 100644
--- a/sys/dev/mlx5/mlx5_core/transobj.h
+++ b/sys/dev/mlx5/mlx5_core/transobj.h
@@ -34,10 +34,12 @@ int mlx5_core_create_rq(struct mlx5_core_dev *dev, u32 *in, int inlen,
u32 *rqn);
int mlx5_core_modify_rq(struct mlx5_core_dev *dev, u32 *in, int inlen);
void mlx5_core_destroy_rq(struct mlx5_core_dev *dev, u32 rqn);
+int mlx5_core_query_rq(struct mlx5_core_dev *dev, u32 rqn, u32 *out);
int mlx5_core_create_sq(struct mlx5_core_dev *dev, u32 *in, int inlen,
u32 *sqn);
int mlx5_core_modify_sq(struct mlx5_core_dev *dev, u32 *in, int inlen);
void mlx5_core_destroy_sq(struct mlx5_core_dev *dev, u32 sqn);
+int mlx5_core_query_sq(struct mlx5_core_dev *dev, u32 sqn, u32 *out);
int mlx5_core_create_tir(struct mlx5_core_dev *dev, u32 *in, int inlen,
u32 *tirn);
void mlx5_core_destroy_tir(struct mlx5_core_dev *dev, u32 tirn);
diff --git a/sys/dev/mlx5/mlx5_en/mlx5_en_flow_table.c b/sys/dev/mlx5/mlx5_en/mlx5_en_flow_table.c
index a0dcd2d..88bd3fd 100644
--- a/sys/dev/mlx5/mlx5_en/mlx5_en_flow_table.c
+++ b/sys/dev/mlx5/mlx5_en/mlx5_en_flow_table.c
@@ -472,7 +472,7 @@ mlx5e_add_vlan_rule(struct mlx5e_priv *priv,
match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
MLX5_SET_TO_ONES(fte_match_param, match_criteria,
- outer_headers.vlan_tag);
+ outer_headers.cvlan_tag);
switch (rule_type) {
case MLX5E_VLAN_RULE_TYPE_UNTAGGED:
@@ -480,12 +480,12 @@ mlx5e_add_vlan_rule(struct mlx5e_priv *priv,
break;
case MLX5E_VLAN_RULE_TYPE_ANY_VID:
ft_ix = &priv->vlan.any_vlan_rule_ft_ix;
- MLX5_SET(fte_match_param, match_value, outer_headers.vlan_tag,
+ MLX5_SET(fte_match_param, match_value, outer_headers.cvlan_tag,
1);
break;
default: /* MLX5E_VLAN_RULE_TYPE_MATCH_VID */
ft_ix = &priv->vlan.active_vlans_ft_ix[vid];
- MLX5_SET(fte_match_param, match_value, outer_headers.vlan_tag,
+ MLX5_SET(fte_match_param, match_value, outer_headers.cvlan_tag,
1);
MLX5_SET_TO_ONES(fte_match_param, match_criteria,
outer_headers.first_vid);
@@ -945,7 +945,7 @@ mlx5e_create_vlan_flow_table(struct mlx5e_priv *priv)
g[0].log_sz = 12;
g[0].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
MLX5_SET_TO_ONES(fte_match_param, g[0].match_criteria,
- outer_headers.vlan_tag);
+ outer_headers.cvlan_tag);
MLX5_SET_TO_ONES(fte_match_param, g[0].match_criteria,
outer_headers.first_vid);
@@ -953,7 +953,7 @@ mlx5e_create_vlan_flow_table(struct mlx5e_priv *priv)
g[1].log_sz = 1;
g[1].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
MLX5_SET_TO_ONES(fte_match_param, g[1].match_criteria,
- outer_headers.vlan_tag);
+ outer_headers.cvlan_tag);
priv->ft.vlan = mlx5_create_flow_table(priv->mdev, 0,
MLX5_FLOW_TABLE_TYPE_NIC_RCV,
diff --git a/sys/dev/mlx5/mlx5_en/mlx5_en_main.c b/sys/dev/mlx5/mlx5_en/mlx5_en_main.c
index 2136567..0a73b95 100644
--- a/sys/dev/mlx5/mlx5_en/mlx5_en_main.c
+++ b/sys/dev/mlx5/mlx5_en/mlx5_en_main.c
@@ -181,7 +181,7 @@ mlx5e_update_carrier(struct mlx5e_priv *priv)
u8 i;
port_state = mlx5_query_vport_state(mdev,
- MLX5_QUERY_VPORT_STATE_IN_OP_MOD_VNIC_VPORT);
+ MLX5_QUERY_VPORT_STATE_IN_OP_MOD_VNIC_VPORT, 0);
if (port_state == VPORT_STATE_UP) {
priv->media_status_last |= IFM_ACTIVE;
@@ -2235,6 +2235,7 @@ mlx5e_open_locked(struct ifnet *ifp)
{
struct mlx5e_priv *priv = ifp->if_softc;
int err;
+ u16 set_id;
/* check if already opened */
if (test_bit(MLX5E_STATE_OPENED, &priv->state) != 0)
@@ -2253,13 +2254,17 @@ mlx5e_open_locked(struct ifnet *ifp)
__func__, err);
return (err);
}
- err = mlx5_vport_alloc_q_counter(priv->mdev, &priv->counter_set_id);
+ err = mlx5_vport_alloc_q_counter(priv->mdev,
+ MLX5_INTERFACE_PROTOCOL_ETH, &set_id);
if (err) {
if_printf(priv->ifp,
"%s: mlx5_vport_alloc_q_counter failed: %d\n",
__func__, err);
goto err_close_tises;
}
+ /* store counter set ID */
+ priv->counter_set_id = set_id;
+
err = mlx5e_open_channels(priv);
if (err) {
if_printf(ifp, "%s: mlx5e_open_channels failed, %d\n",
@@ -2310,7 +2315,8 @@ err_close_channels:
mlx5e_close_channels(priv);
err_dalloc_q_counter:
- mlx5_vport_dealloc_q_counter(priv->mdev, priv->counter_set_id);
+ mlx5_vport_dealloc_q_counter(priv->mdev,
+ MLX5_INTERFACE_PROTOCOL_ETH, priv->counter_set_id);
err_close_tises:
mlx5e_close_tises(priv);
@@ -2352,7 +2358,8 @@ mlx5e_close_locked(struct ifnet *ifp)
mlx5e_close_tirs(priv);
mlx5e_close_rqt(priv);
mlx5e_close_channels(priv);
- mlx5_vport_dealloc_q_counter(priv->mdev, priv->counter_set_id);
+ mlx5_vport_dealloc_q_counter(priv->mdev,
+ MLX5_INTERFACE_PROTOCOL_ETH, priv->counter_set_id);
mlx5e_close_tises(priv);
return (0);
diff --git a/sys/dev/mlx5/mlx5_ifc.h b/sys/dev/mlx5/mlx5_ifc.h
index 461e5b0..e3e6c86 100644
--- a/sys/dev/mlx5/mlx5_ifc.h
+++ b/sys/dev/mlx5/mlx5_ifc.h
@@ -53,7 +53,9 @@ enum {
MLX5_EVENT_TYPE_PORT_CHANGE = 0x9,
MLX5_EVENT_TYPE_GPIO_EVENT = 0x15,
MLX5_EVENT_TYPE_CODING_PORT_MODULE_EVENT = 0x16,
+ MLX5_EVENT_TYPE_CODING_TEMP_WARNING_EVENT = 0x17,
MLX5_EVENT_TYPE_REMOTE_CONFIG = 0x19,
+ MLX5_EVENT_TYPE_CODING_DCBX_CHANGE_EVENT = 0x1e,
MLX5_EVENT_TYPE_DB_BF_CONGESTION = 0x1a,
MLX5_EVENT_TYPE_STALL_EVENT = 0x1b,
MLX5_EVENT_TYPE_DROPPED_PACKET_LOGGED_EVENT = 0x1f,
@@ -63,10 +65,11 @@ enum {
};
enum {
- MLX5_MODIFY_TIR_BITMASK_LRO = 0x0,
- MLX5_MODIFY_TIR_BITMASK_INDIRECT_TABLE = 0x1,
- MLX5_MODIFY_TIR_BITMASK_HASH = 0x2,
- MLX5_MODIFY_TIR_BITMASK_TUNNELED_OFFLOAD_EN = 0x3
+ MLX5_MODIFY_TIR_BITMASK_LRO = 0x0,
+ MLX5_MODIFY_TIR_BITMASK_INDIRECT_TABLE = 0x1,
+ MLX5_MODIFY_TIR_BITMASK_HASH = 0x2,
+ MLX5_MODIFY_TIR_BITMASK_TUNNELED_OFFLOAD_EN = 0x3,
+ MLX5_MODIFY_TIR_BITMASK_SELF_LB_EN = 0x4
};
enum {
@@ -144,6 +147,8 @@ enum {
MLX5_CMD_OP_ALLOC_Q_COUNTER = 0x771,
MLX5_CMD_OP_DEALLOC_Q_COUNTER = 0x772,
MLX5_CMD_OP_QUERY_Q_COUNTER = 0x773,
+ MLX5_CMD_OP_SET_RATE_LIMIT = 0x780,
+ MLX5_CMD_OP_QUERY_RATE_LIMIT = 0x781,
MLX5_CMD_OP_ALLOC_PD = 0x800,
MLX5_CMD_OP_DEALLOC_PD = 0x801,
MLX5_CMD_OP_ALLOC_UAR = 0x802,
@@ -165,6 +170,8 @@ enum {
MLX5_CMD_OP_DEACTIVATE_TRACER = 0x815,
MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN = 0x816,
MLX5_CMD_OP_DEALLOC_TRANSPORT_DOMAIN = 0x817,
+ MLX5_CMD_OP_SET_DIAGNOSTICS = 0x820,
+ MLX5_CMD_OP_QUERY_DIAGNOSTICS = 0x821,
MLX5_CMD_OP_QUERY_CONG_STATUS = 0x822,
MLX5_CMD_OP_MODIFY_CONG_STATUS = 0x823,
MLX5_CMD_OP_QUERY_CONG_PARAMS = 0x824,
@@ -370,8 +377,8 @@ struct mlx5_ifc_fte_match_set_lyr_2_4_bits {
u8 ip_protocol[0x8];
u8 ip_dscp[0x6];
u8 ip_ecn[0x2];
- u8 vlan_tag[0x1];
- u8 reserved_0[0x1];
+ u8 cvlan_tag[0x1];
+ u8 svlan_tag[0x1];
u8 frag[0x1];
u8 reserved_1[0x4];
u8 tcp_flags[0x9];
@@ -438,6 +445,14 @@ struct mlx5_ifc_uint64_bits {
u8 lo[0x20];
};
+struct mlx5_ifc_application_prio_entry_bits {
+ u8 reserved_0[0x8];
+ u8 priority[0x3];
+ u8 reserved_1[0x2];
+ u8 sel[0x3];
+ u8 protocol_id[0x10];
+};
+
struct mlx5_ifc_nodnic_ring_doorbell_bits {
u8 reserved_0[0x8];
u8 ring_pi[0x10];
@@ -499,13 +514,57 @@ struct mlx5_ifc_ads_bits {
u8 rmac_31_0[0x20];
};
+struct mlx5_ifc_diagnostic_counter_cap_bits {
+ u8 sync[0x1];
+ u8 reserved_0[0xf];
+ u8 counter_id[0x10];
+};
+
+struct mlx5_ifc_debug_cap_bits {
+ u8 reserved_0[0x18];
+ u8 log_max_samples[0x8];
+
+ u8 single[0x1];
+ u8 repetitive[0x1];
+ u8 health_mon_rx_activity[0x1];
+ u8 reserved_1[0x15];
+ u8 log_min_sample_period[0x8];
+
+ u8 reserved_2[0x1c0];
+
+ struct mlx5_ifc_diagnostic_counter_cap_bits diagnostic_counter[0x1f0];
+};
+
+struct mlx5_ifc_snapshot_cap_bits {
+ u8 reserved_0[0x1d];
+ u8 suspend_qp_uc[0x1];
+ u8 suspend_qp_ud[0x1];
+ u8 suspend_qp_rc[0x1];
+
+ u8 reserved_1[0x1c];
+ u8 restore_pd[0x1];
+ u8 restore_uar[0x1];
+ u8 restore_mkey[0x1];
+ u8 restore_qp[0x1];
+
+ u8 reserved_2[0x1e];
+ u8 named_mkey[0x1];
+ u8 named_qp[0x1];
+
+ u8 reserved_3[0x7a0];
+};
+
struct mlx5_ifc_e_switch_cap_bits {
u8 vport_svlan_strip[0x1];
u8 vport_cvlan_strip[0x1];
u8 vport_svlan_insert[0x1];
u8 vport_cvlan_insert_if_not_exist[0x1];
u8 vport_cvlan_insert_overwrite[0x1];
- u8 reserved_0[0x1b];
+
+ u8 reserved_0[0x19];
+
+ u8 nic_vport_node_guid_modify[0x1];
+ u8 nic_vport_port_guid_modify[0x1];
u8 reserved_1[0x7e0];
};
@@ -540,6 +599,17 @@ struct mlx5_ifc_flow_table_nic_cap_bits {
u8 reserved_1[0x7200];
};
+struct mlx5_ifc_qos_cap_bits {
+ u8 packet_pacing[0x1];
+ u8 reserved_0[0x1f];
+ u8 reserved_1[0x20];
+ u8 packet_pacing_max_rate[0x20];
+ u8 packet_pacing_min_rate[0x20];
+ u8 reserved_2[0x10];
+ u8 packet_pacing_rate_table_size[0x10];
+ u8 reserved_3[0x760];
+};
+
struct mlx5_ifc_per_protocol_networking_offload_caps_bits {
u8 csum_cap[0x1];
u8 vlan_cap[0x1];
@@ -581,8 +651,11 @@ enum {
struct mlx5_ifc_roce_cap_bits {
u8 roce_apm[0x1];
- u8 eth_prio_primary_in_rts2rts[0x1];
- u8 reserved_0[0x1e];
+ u8 rts2rts_primary_eth_prio[0x1];
+ u8 roce_rx_allow_untagged[0x1];
+ u8 rts2rts_src_addr_index_for_vlan_valid_vlan_id[0x1];
+
+ u8 reserved_0[0x1c];
u8 reserved_1[0x60];
@@ -630,21 +703,24 @@ enum {
struct mlx5_ifc_atomic_caps_bits {
u8 reserved_0[0x40];
- u8 atomic_req_endianess[0x1];
- u8 reserved_1[0x1f];
+ u8 atomic_req_8B_endianess_mode[0x2];
+ u8 reserved_1[0x4];
+ u8 supported_atomic_req_8B_endianess_mode_1[0x1];
- u8 reserved_2[0x20];
+ u8 reserved_2[0x19];
- u8 reserved_3[0x10];
- u8 atomic_operations[0x10];
+ u8 reserved_3[0x20];
u8 reserved_4[0x10];
- u8 atomic_size_qp[0x10];
+ u8 atomic_operations[0x10];
u8 reserved_5[0x10];
+ u8 atomic_size_qp[0x10];
+
+ u8 reserved_6[0x10];
u8 atomic_size_dc[0x10];
- u8 reserved_6[0x720];
+ u8 reserved_7[0x720];
};
struct mlx5_ifc_odp_cap_bits {
@@ -740,12 +816,16 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 pad_cap[0x1];
u8 cc_query_allowed[0x1];
u8 cc_modify_allowed[0x1];
- u8 reserved_15[0xd];
+ u8 start_pad[0x1];
+ u8 cache_line_128byte[0x1];
+ u8 reserved_15[0xb];
u8 gid_table_size[0x10];
u8 out_of_seq_cnt[0x1];
u8 vport_counters[0x1];
- u8 reserved_16[0x4];
+ u8 retransmission_q_counters[0x1];
+ u8 debug[0x1];
+ u8 reserved_16[0x2];
u8 max_qp_cnt[0xa];
u8 pkey_table_size[0x10];
@@ -769,7 +849,9 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 log_max_msg[0x5];
u8 reserved_21[0x4];
u8 max_tc[0x4];
- u8 reserved_22[0x6];
+ u8 temp_warn_event[0x1];
+ u8 dcbx[0x1];
+ u8 reserved_22[0x4];
u8 rol_s[0x1];
u8 rol_g[0x1];
u8 reserved_23[0x1];
@@ -787,29 +869,33 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 compact_address_vector[0x1];
u8 striding_rq[0x1];
- u8 reserved_25[0xc];
+ u8 reserved_25[0x1];
+ u8 ipoib_enhanced_offloads[0x1];
+ u8 ipoib_ipoib_offloads[0x1];
+ u8 reserved_26[0x8];
+ u8 dc_connect_qp[0x1];
u8 dc_cnak_trace[0x1];
u8 drain_sigerr[0x1];
u8 cmdif_checksum[0x2];
u8 sigerr_cqe[0x1];
- u8 reserved_26[0x1];
+ u8 reserved_27[0x1];
u8 wq_signature[0x1];
u8 sctr_data_cqe[0x1];
- u8 reserved_27[0x1];
+ u8 reserved_28[0x1];
u8 sho[0x1];
u8 tph[0x1];
u8 rf[0x1];
u8 dct[0x1];
- u8 reserved_28[0x1];
+ u8 qos[0x1];
u8 eth_net_offloads[0x1];
u8 roce[0x1];
u8 atomic[0x1];
- u8 reserved_29[0x1];
+ u8 reserved_30[0x1];
u8 cq_oi[0x1];
u8 cq_resize[0x1];
u8 cq_moderation[0x1];
- u8 reserved_30[0x3];
+ u8 reserved_31[0x3];
u8 cq_eq_remap[0x1];
u8 pg[0x1];
u8 block_lb_mc[0x1];
@@ -819,28 +905,28 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 cd[0x1];
u8 atm[0x1];
u8 apm[0x1];
- u8 reserved_31[0x7];
+ u8 reserved_32[0x7];
u8 qkv[0x1];
u8 pkv[0x1];
- u8 reserved_32[0x4];
+ u8 reserved_33[0x4];
u8 xrc[0x1];
u8 ud[0x1];
u8 uc[0x1];
u8 rc[0x1];
- u8 reserved_33[0xa];
+ u8 reserved_34[0xa];
u8 uar_sz[0x6];
- u8 reserved_34[0x8];
+ u8 reserved_35[0x8];
u8 log_pg_sz[0x8];
u8 bf[0x1];
u8 driver_version[0x1];
u8 pad_tx_eth_packet[0x1];
- u8 reserved_35[0x8];
+ u8 reserved_36[0x8];
u8 log_bf_reg_size[0x5];
- u8 reserved_36[0x10];
-
u8 reserved_37[0x10];
+
+ u8 num_of_diagnostic_counters[0x10];
u8 max_wqe_sz_sq[0x10];
u8 reserved_38[0x10];
@@ -913,9 +999,11 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 reserved_65[0x20];
- u8 device_frequency[0x20];
+ u8 device_frequency_mhz[0x20];
- u8 reserved_66[0xa0];
+ u8 device_frequency_khz[0x20];
+
+ u8 reserved_66[0x80];
u8 log_max_atomic_size_qp[0x8];
u8 reserved_67[0x10];
@@ -930,6 +1018,12 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 reserved_69[0x220];
};
+enum mlx5_flow_destination_type {
+ MLX5_FLOW_DESTINATION_TYPE_VPORT = 0x0,
+ MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE = 0x1,
+ MLX5_FLOW_DESTINATION_TYPE_TIR = 0x2,
+};
+
union mlx5_ifc_dest_format_struct_flow_counter_list_auto_bits {
struct mlx5_ifc_dest_format_struct_bits dest_format_struct;
struct mlx5_ifc_flow_counter_list_bits flow_counter_list;
@@ -967,6 +1061,11 @@ enum {
MLX5_WQ_TYPE_STRQ_CYCLIC = 0x3,
};
+enum rq_type {
+ RQ_TYPE_NONE,
+ RQ_TYPE_STRIDE,
+};
+
enum {
MLX5_WQ_END_PAD_MODE_NONE = 0x0,
MLX5_WQ_END_PAD_MODE_ALIGN = 0x1,
@@ -1177,6 +1276,95 @@ struct mlx5_ifc_field_select_802_1qau_rp_bits {
u8 field_select_8021qaurp[0x20];
};
+struct mlx5_ifc_pptb_reg_bits {
+ u8 reserved_0[0x2];
+ u8 mm[0x2];
+ u8 reserved_1[0x4];
+ u8 local_port[0x8];
+ u8 reserved_2[0x6];
+ u8 cm[0x1];
+ u8 um[0x1];
+ u8 pm[0x8];
+
+ u8 prio7buff[0x4];
+ u8 prio6buff[0x4];
+ u8 prio5buff[0x4];
+ u8 prio4buff[0x4];
+ u8 prio3buff[0x4];
+ u8 prio2buff[0x4];
+ u8 prio1buff[0x4];
+ u8 prio0buff[0x4];
+
+ u8 pm_msb[0x8];
+ u8 reserved_3[0x10];
+ u8 ctrl_buff[0x4];
+ u8 untagged_buff[0x4];
+};
+
+struct mlx5_ifc_dcbx_app_reg_bits {
+ u8 reserved_0[0x8];
+ u8 port_number[0x8];
+ u8 reserved_1[0x10];
+
+ u8 reserved_2[0x1a];
+ u8 num_app_prio[0x6];
+
+ u8 reserved_3[0x40];
+
+ struct mlx5_ifc_application_prio_entry_bits app_prio[0];
+};
+
+struct mlx5_ifc_dcbx_param_reg_bits {
+ u8 dcbx_cee_cap[0x1];
+ u8 dcbx_ieee_cap[0x1];
+ u8 dcbx_standby_cap[0x1];
+ u8 reserved_0[0x5];
+ u8 port_number[0x8];
+ u8 reserved_1[0xa];
+ u8 max_application_table_size[0x6];
+
+ u8 reserved_2[0x15];
+ u8 version_oper[0x3];
+ u8 reserved_3[0x5];
+ u8 version_admin[0x3];
+
+ u8 willing_admin[0x1];
+ u8 reserved_4[0x3];
+ u8 pfc_cap_oper[0x4];
+ u8 reserved_5[0x4];
+ u8 pfc_cap_admin[0x4];
+ u8 reserved_6[0x4];
+ u8 num_of_tc_oper[0x4];
+ u8 reserved_7[0x4];
+ u8 num_of_tc_admin[0x4];
+
+ u8 remote_willing[0x1];
+ u8 reserved_8[0x3];
+ u8 remote_pfc_cap[0x4];
+ u8 reserved_9[0x14];
+ u8 remote_num_of_tc[0x4];
+
+ u8 reserved_10[0x18];
+ u8 error[0x8];
+
+ u8 reserved_11[0x160];
+};
+
+struct mlx5_ifc_qetcr_reg_bits {
+ u8 operation_type[0x2];
+ u8 cap_local_admin[0x1];
+ u8 cap_remote_admin[0x1];
+ u8 reserved_0[0x4];
+ u8 port_number[0x8];
+ u8 reserved_1[0x10];
+
+ u8 reserved_2[0x20];
+
+ u8 tc[8][0x40];
+
+ u8 global_configuration[0x40];
+};
+
struct mlx5_ifc_nodnic_ring_config_reg_bits {
u8 queue_address_63_32[0x20];
@@ -1482,15 +1670,15 @@ struct mlx5_ifc_qpc_bits {
u8 log_sq_size[0x4];
u8 reserved_7[0x6];
u8 rlky[0x1];
- u8 reserved_8[0x4];
+ u8 ulp_stateless_offload_mode[0x4];
u8 counter_set_id[0x8];
u8 uar_page[0x18];
- u8 reserved_9[0x8];
+ u8 reserved_8[0x8];
u8 user_index[0x18];
- u8 reserved_10[0x3];
+ u8 reserved_9[0x3];
u8 log_page_size[0x5];
u8 remote_qpn[0x18];
@@ -1499,66 +1687,66 @@ struct mlx5_ifc_qpc_bits {
struct mlx5_ifc_ads_bits secondary_address_path;
u8 log_ack_req_freq[0x4];
- u8 reserved_11[0x4];
+ u8 reserved_10[0x4];
u8 log_sra_max[0x3];
- u8 reserved_12[0x2];
+ u8 reserved_11[0x2];
u8 retry_count[0x3];
u8 rnr_retry[0x3];
- u8 reserved_13[0x1];
+ u8 reserved_12[0x1];
u8 fre[0x1];
u8 cur_rnr_retry[0x3];
u8 cur_retry_count[0x3];
- u8 reserved_14[0x5];
+ u8 reserved_13[0x5];
- u8 reserved_15[0x20];
+ u8 reserved_14[0x20];
- u8 reserved_16[0x8];
+ u8 reserved_15[0x8];
u8 next_send_psn[0x18];
- u8 reserved_17[0x8];
+ u8 reserved_16[0x8];
u8 cqn_snd[0x18];
- u8 reserved_18[0x40];
+ u8 reserved_17[0x40];
- u8 reserved_19[0x8];
+ u8 reserved_18[0x8];
u8 last_acked_psn[0x18];
- u8 reserved_20[0x8];
+ u8 reserved_19[0x8];
u8 ssn[0x18];
- u8 reserved_21[0x8];
+ u8 reserved_20[0x8];
u8 log_rra_max[0x3];
- u8 reserved_22[0x1];
+ u8 reserved_21[0x1];
u8 atomic_mode[0x4];
u8 rre[0x1];
u8 rwe[0x1];
u8 rae[0x1];
- u8 reserved_23[0x1];
+ u8 reserved_22[0x1];
u8 page_offset[0x6];
- u8 reserved_24[0x3];
+ u8 reserved_23[0x3];
u8 cd_slave_receive[0x1];
u8 cd_slave_send[0x1];
u8 cd_master[0x1];
- u8 reserved_25[0x3];
+ u8 reserved_24[0x3];
u8 min_rnr_nak[0x5];
u8 next_rcv_psn[0x18];
- u8 reserved_26[0x8];
+ u8 reserved_25[0x8];
u8 xrcd[0x18];
- u8 reserved_27[0x8];
+ u8 reserved_26[0x8];
u8 cqn_rcv[0x18];
u8 dbr_addr[0x40];
u8 q_key[0x20];
- u8 reserved_28[0x5];
+ u8 reserved_27[0x5];
u8 rq_type[0x3];
u8 srqn_rmpn[0x18];
- u8 reserved_29[0x8];
+ u8 reserved_28[0x8];
u8 rmsn[0x18];
u8 hw_sq_wqebb_counter[0x10];
@@ -1568,9 +1756,9 @@ struct mlx5_ifc_qpc_bits {
u8 sw_rq_counter[0x20];
- u8 reserved_30[0x20];
+ u8 reserved_29[0x20];
- u8 reserved_31[0xf];
+ u8 reserved_30[0xf];
u8 cgs[0x1];
u8 cs_req[0x8];
u8 cs_res[0x8];
@@ -1580,17 +1768,17 @@ struct mlx5_ifc_qpc_bits {
u8 rdma_active[0x1];
u8 comm_est[0x1];
u8 suspended[0x1];
- u8 reserved_32[0x5];
+ u8 reserved_31[0x5];
u8 send_msg_psn[0x18];
- u8 reserved_33[0x8];
+ u8 reserved_32[0x8];
u8 rcv_msg_psn[0x18];
u8 rdma_va[0x40];
u8 rdma_key[0x20];
- u8 reserved_34[0x20];
+ u8 reserved_33[0x20];
};
struct mlx5_ifc_roce_addr_layout_bits {
@@ -1725,7 +1913,10 @@ struct mlx5_ifc_tisc_bits {
u8 reserved_3[0x8];
u8 transport_domain[0x18];
- u8 reserved_4[0x3c0];
+ u8 reserved_4[0x8];
+ u8 underlay_qpn[0x18];
+
+ u8 reserved_5[0x3a0];
};
enum {
@@ -1855,7 +2046,8 @@ struct mlx5_ifc_sqc_bits {
u8 reserved_2[0x8];
u8 cqn[0x18];
- u8 reserved_3[0xa0];
+ u8 reserved_3[0x90];
+ u8 packet_pacing_rate_limit_index[0x10];
u8 tis_lst_sz[0x10];
u8 reserved_4[0x10];
@@ -2095,13 +2287,15 @@ struct mlx5_ifc_hca_vport_context_bits {
u8 has_smi[0x1];
u8 has_raw[0x1];
u8 grh_required[0x1];
- u8 reserved_1[0xc];
+ u8 reserved_1[0x1];
+ u8 min_wqe_inline_mode[0x3];
+ u8 reserved_2[0x8];
u8 port_physical_state[0x4];
u8 vport_state_policy[0x4];
u8 port_state[0x4];
u8 vport_state[0x4];
- u8 reserved_2[0x20];
+ u8 reserved_3[0x20];
u8 system_image_guid[0x40];
@@ -2117,22 +2311,22 @@ struct mlx5_ifc_hca_vport_context_bits {
u8 cap_mask2_field_select[0x20];
- u8 reserved_3[0x80];
+ u8 reserved_4[0x80];
u8 lid[0x10];
- u8 reserved_4[0x4];
+ u8 reserved_5[0x4];
u8 init_type_reply[0x4];
u8 lmc[0x3];
u8 subnet_timeout[0x5];
u8 sm_lid[0x10];
u8 sm_sl[0x4];
- u8 reserved_5[0xc];
+ u8 reserved_6[0xc];
u8 qkey_violation_counter[0x10];
u8 pkey_violation_counter[0x10];
- u8 reserved_6[0xca0];
+ u8 reserved_7[0xca0];
};
union mlx5_ifc_hca_cap_union_bits {
@@ -2144,6 +2338,9 @@ union mlx5_ifc_hca_cap_union_bits {
struct mlx5_ifc_flow_table_nic_cap_bits flow_table_nic_cap;
struct mlx5_ifc_flow_table_eswitch_cap_bits flow_table_eswitch_cap;
struct mlx5_ifc_e_switch_cap_bits e_switch_cap;
+ struct mlx5_ifc_snapshot_cap_bits snapshot_cap;
+ struct mlx5_ifc_debug_cap_bits diagnostic_counters_cap;
+ struct mlx5_ifc_qos_cap_bits qos_cap;
u8 reserved_0[0x8000];
};
@@ -2534,6 +2731,18 @@ struct mlx5_ifc_register_loopback_control_bits {
u8 reserved_2[0x60];
};
+struct mlx5_ifc_lrh_bits {
+ u8 vl[4];
+ u8 lver[4];
+ u8 sl[4];
+ u8 reserved2[2];
+ u8 lnh[2];
+ u8 dlid[16];
+ u8 reserved5[5];
+ u8 pkt_len[11];
+ u8 slid[16];
+};
+
struct mlx5_ifc_icmd_set_wol_rol_out_bits {
u8 reserved_0[0x40];
@@ -2794,25 +3003,6 @@ struct mlx5_ifc_sqd2rts_qp_in_bits {
u8 reserved_5[0x80];
};
-struct mlx5_ifc_snapshot_cap_bits {
- u8 reserved_0[0x1d];
- u8 suspend_qp_uc[0x1];
- u8 suspend_qp_ud[0x1];
- u8 suspend_qp_rc[0x1];
-
- u8 reserved_1[0x1c];
- u8 restore_pd[0x1];
- u8 restore_uar[0x1];
- u8 restore_mkey[0x1];
- u8 restore_qp[0x1];
-
- u8 reserved_2[0x1e];
- u8 named_mkey[0x1];
- u8 named_qp[0x1];
-
- u8 reserved_3[0x7a0];
-};
-
struct mlx5_ifc_set_wol_rol_out_bits {
u8 status[0x8];
u8 reserved_0[0x18];
@@ -2990,6 +3180,13 @@ struct mlx5_ifc_set_hca_cap_in_bits {
union mlx5_ifc_hca_cap_union_bits capability;
};
+enum {
+ MLX5_SET_FTE_MODIFY_ENABLE_MASK_ACTION = 0x0,
+ MLX5_SET_FTE_MODIFY_ENABLE_MASK_FLOW_TAG = 0x1,
+ MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST = 0x2,
+ MLX5_SET_FTE_MODIFY_ENABLE_MASK_FLOW_COUNTERS = 0x3
+};
+
struct mlx5_ifc_set_flow_table_root_out_bits {
u8 status[0x8];
u8 reserved_0[0x18];
@@ -3018,7 +3215,10 @@ struct mlx5_ifc_set_flow_table_root_in_bits {
u8 reserved_5[0x8];
u8 table_id[0x18];
- u8 reserved_6[0x140];
+ u8 reserved_6[0x8];
+ u8 underlay_qpn[0x18];
+
+ u8 reserved_7[0x120];
};
struct mlx5_ifc_set_fte_out_bits {
@@ -3690,11 +3890,31 @@ struct mlx5_ifc_query_q_counter_out_bits {
u8 out_of_buffer[0x20];
- u8 reserved_6[0x20];
+ u8 reserved_7[0x20];
u8 out_of_sequence[0x20];
- u8 reserved_7[0x620];
+ u8 reserved_8[0x20];
+
+ u8 duplicate_request[0x20];
+
+ u8 reserved_9[0x20];
+
+ u8 rnr_nak_retry_err[0x20];
+
+ u8 reserved_10[0x20];
+
+ u8 packet_seq_err[0x20];
+
+ u8 reserved_11[0x20];
+
+ u8 implied_nak_seq_err[0x20];
+
+ u8 reserved_12[0x20];
+
+ u8 local_ack_timeout_err[0x20];
+
+ u8 reserved_13[0x4e0];
};
struct mlx5_ifc_query_q_counter_in_bits {
@@ -4671,6 +4891,14 @@ struct mlx5_ifc_modify_rq_out_bits {
u8 reserved_1[0x40];
};
+struct mlx5_ifc_rq_bitmask_bits {
+ u8 reserved[0x20];
+
+ u8 reserved1[0x1e];
+ u8 vlan_strip_disable[0x1];
+ u8 reserved2[0x1];
+};
+
struct mlx5_ifc_modify_rq_in_bits {
u8 opcode[0x10];
u8 reserved_0[0x10];
@@ -4684,7 +4912,7 @@ struct mlx5_ifc_modify_rq_in_bits {
u8 reserved_3[0x20];
- u8 modify_bitmask[0x40];
+ struct mlx5_ifc_rq_bitmask_bits bitmask;
u8 reserved_4[0x40];
@@ -4737,7 +4965,9 @@ struct mlx5_ifc_modify_nic_vport_context_out_bits {
};
struct mlx5_ifc_modify_nic_vport_field_select_bits {
- u8 reserved_0[0x18];
+ u8 reserved_0[0x16];
+ u8 node_guid[0x1];
+ u8 port_guid[0x1];
u8 min_wqe_inline_mode[0x1];
u8 mtu[0x1];
u8 change_event[0x1];
@@ -4775,6 +5005,43 @@ struct mlx5_ifc_modify_hca_vport_context_out_bits {
u8 reserved_1[0x40];
};
+struct mlx5_ifc_grh_bits {
+ u8 ip_version[4];
+ u8 traffic_class[8];
+ u8 flow_label[20];
+ u8 payload_length[16];
+ u8 next_header[8];
+ u8 hop_limit[8];
+ u8 sgid[128];
+ u8 dgid[128];
+};
+
+struct mlx5_ifc_bth_bits {
+ u8 opcode[8];
+ u8 se[1];
+ u8 migreq[1];
+ u8 pad_count[2];
+ u8 tver[4];
+ u8 p_key[16];
+ u8 reserved8[8];
+ u8 dest_qp[24];
+ u8 ack_req[1];
+ u8 reserved7[7];
+ u8 psn[24];
+};
+
+struct mlx5_ifc_aeth_bits {
+ u8 syndrome[8];
+ u8 msn[24];
+};
+
+struct mlx5_ifc_dceth_bits {
+ u8 reserved0[8];
+ u8 session_id[24];
+ u8 reserved1[8];
+ u8 dci_dct[24];
+};
+
struct mlx5_ifc_modify_hca_vport_context_in_bits {
u8 opcode[0x10];
u8 reserved_0[0x10];
@@ -4801,6 +5068,14 @@ struct mlx5_ifc_modify_esw_vport_context_out_bits {
u8 reserved_1[0x40];
};
+struct mlx5_ifc_esw_vport_context_fields_select_bits {
+ u8 reserved[0x1c];
+ u8 vport_cvlan_insert[0x1];
+ u8 vport_svlan_insert[0x1];
+ u8 vport_cvlan_strip[0x1];
+ u8 vport_svlan_strip[0x1];
+};
+
struct mlx5_ifc_modify_esw_vport_context_in_bits {
u8 opcode[0x10];
u8 reserved_0[0x10];
@@ -4812,7 +5087,7 @@ struct mlx5_ifc_modify_esw_vport_context_in_bits {
u8 reserved_2[0xf];
u8 vport_number[0x10];
- u8 field_select[0x20];
+ struct mlx5_ifc_esw_vport_context_fields_select_bits field_select;
struct mlx5_ifc_esw_vport_context_bits esw_vport_context;
};
@@ -5711,6 +5986,80 @@ struct mlx5_ifc_dealloc_q_counter_out_bits {
u8 reserved_1[0x40];
};
+struct mlx5_ifc_counter_id_bits {
+ u8 reserved[0x10];
+ u8 counter_id[0x10];
+};
+
+struct mlx5_ifc_set_diagnostics_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 num_of_counters[0x10];
+ u8 reserved_2[0x8];
+ u8 log_num_of_samples[0x8];
+
+ u8 single[0x1];
+ u8 repetitive[0x1];
+ u8 sync[0x1];
+ u8 clear[0x1];
+ u8 on_demand[0x1];
+ u8 enable[0x1];
+ u8 reserved_3[0x12];
+ u8 log_sample_period[0x8];
+
+ u8 reserved_4[0x80];
+
+ struct mlx5_ifc_counter_id_bits counter_id[0];
+};
+
+struct mlx5_ifc_set_diagnostics_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_query_diagnostics_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 num_of_samples[0x10];
+ u8 sample_index[0x10];
+
+ u8 reserved_2[0x20];
+};
+
+struct mlx5_ifc_diagnostic_counter_bits {
+ u8 counter_id[0x10];
+ u8 sample_id[0x10];
+
+ u8 time_stamp_31_0[0x20];
+
+ u8 counter_value_h[0x20];
+
+ u8 counter_value_l[0x20];
+};
+
+struct mlx5_ifc_query_diagnostics_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+
+ struct mlx5_ifc_diagnostic_counter_bits diag_counter[0];
+};
+
struct mlx5_ifc_dealloc_q_counter_in_bits {
u8 opcode[0x10];
u8 reserved_0[0x10];
@@ -6008,15 +6357,18 @@ struct mlx5_ifc_create_qp_in_bits {
u8 reserved_1[0x10];
u8 op_mod[0x10];
- u8 reserved_2[0x40];
+ u8 reserved_2[0x8];
+ u8 input_qpn[0x18];
+
+ u8 reserved_3[0x20];
u8 opt_param_mask[0x20];
- u8 reserved_3[0x20];
+ u8 reserved_4[0x20];
struct mlx5_ifc_qpc_bits qpc;
- u8 reserved_4[0x80];
+ u8 reserved_5[0x80];
u8 pas[0][0x40];
};
@@ -6577,6 +6929,30 @@ struct mlx5_ifc_activate_tracer_in_bits {
u8 reserved_2[0x20];
};
+struct mlx5_ifc_set_rate_limit_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_at_40[0x40];
+};
+
+struct mlx5_ifc_set_rate_limit_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_at_10[0x10];
+
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_at_40[0x10];
+ u8 rate_limit_index[0x10];
+
+ u8 reserved_at_60[0x20];
+
+ u8 rate_limit[0x20];
+};
+
struct mlx5_ifc_access_register_out_bits {
u8 status[0x8];
u8 reserved_0[0x18];
@@ -6763,37 +7139,45 @@ enum {
};
struct mlx5_ifc_ptys_reg_bits {
- u8 reserved_0[0x8];
+ u8 reserved_0[0x1];
+ u8 an_disable_admin[0x1];
+ u8 an_disable_cap[0x1];
+ u8 reserved_1[0x4];
+ u8 force_tx_aba_param[0x1];
u8 local_port[0x8];
- u8 reserved_1[0xd];
+ u8 reserved_2[0xd];
u8 proto_mask[0x3];
- u8 reserved_2[0x40];
+ u8 an_status[0x4];
+ u8 reserved_3[0xc];
+ u8 data_rate_oper[0x10];
+
+ u8 fc_proto_capability[0x20];
u8 eth_proto_capability[0x20];
u8 ib_link_width_capability[0x10];
u8 ib_proto_capability[0x10];
- u8 reserved_3[0x20];
+ u8 fc_proto_admin[0x20];
u8 eth_proto_admin[0x20];
u8 ib_link_width_admin[0x10];
u8 ib_proto_admin[0x10];
- u8 reserved_4[0x20];
+ u8 fc_proto_oper[0x20];
u8 eth_proto_oper[0x20];
u8 ib_link_width_oper[0x10];
u8 ib_proto_oper[0x10];
- u8 reserved_5[0x20];
+ u8 reserved_4[0x20];
u8 eth_proto_lp_advertise[0x20];
- u8 reserved_6[0x60];
+ u8 reserved_5[0x60];
};
struct mlx5_ifc_ptas_reg_bits {
@@ -7278,7 +7662,10 @@ enum {
};
struct mlx5_ifc_pfcc_reg_bits {
- u8 reserved_0[0x8];
+ u8 dcbx_operation_type[0x2];
+ u8 cap_local_admin[0x1];
+ u8 cap_remote_admin[0x1];
+ u8 reserved_0[0x4];
u8 local_port[0x8];
u8 pnat[0x2];
u8 reserved_1[0xc];
@@ -7295,13 +7682,15 @@ struct mlx5_ifc_pfcc_reg_bits {
u8 aptx[0x1];
u8 reserved_4[0x6];
u8 pfctx[0x8];
- u8 reserved_5[0x10];
+ u8 reserved_5[0x8];
+ u8 cbftx[0x8];
u8 pprx[0x1];
u8 aprx[0x1];
u8 reserved_6[0x6];
u8 pfcrx[0x8];
- u8 reserved_7[0x10];
+ u8 reserved_7[0x8];
+ u8 cbfrx[0x8];
u8 reserved_8[0x80];
};
@@ -7952,7 +8341,10 @@ struct mlx5_ifc_register_diag_buffer_ctrl_bits {
};
struct mlx5_ifc_qtct_reg_bits {
- u8 reserved_0[0x8];
+ u8 operation_type[0x2];
+ u8 cap_local_admin[0x1];
+ u8 cap_remote_admin[0x1];
+ u8 reserved_0[0x4];
u8 port_number[0x8];
u8 reserved_1[0xd];
u8 prio[0x3];
@@ -8409,6 +8801,139 @@ struct mlx5_ifc_ppcnt_reg_bits {
union mlx5_ifc_eth_cntrs_grp_data_layout_auto_bits counter_set;
};
+struct mlx5_ifc_pcie_performance_counters_data_layout_bits {
+ u8 life_time_counter_high[0x20];
+
+ u8 life_time_counter_low[0x20];
+
+ u8 rx_errors[0x20];
+
+ u8 tx_errors[0x20];
+
+ u8 l0_to_recovery_eieos[0x20];
+
+ u8 l0_to_recovery_ts[0x20];
+
+ u8 l0_to_recovery_framing[0x20];
+
+ u8 l0_to_recovery_retrain[0x20];
+
+ u8 crc_error_dllp[0x20];
+
+ u8 crc_error_tlp[0x20];
+
+ u8 reserved_0[0x680];
+};
+
+struct mlx5_ifc_pcie_timers_and_states_data_layout_bits {
+ u8 life_time_counter_high[0x20];
+
+ u8 life_time_counter_low[0x20];
+
+ u8 time_to_boot_image_start[0x20];
+
+ u8 time_to_link_image[0x20];
+
+ u8 calibration_time[0x20];
+
+ u8 time_to_first_perst[0x20];
+
+ u8 time_to_detect_state[0x20];
+
+ u8 time_to_l0[0x20];
+
+ u8 time_to_crs_en[0x20];
+
+ u8 time_to_plastic_image_start[0x20];
+
+ u8 time_to_iron_image_start[0x20];
+
+ u8 perst_handler[0x20];
+
+ u8 times_in_l1[0x20];
+
+ u8 times_in_l23[0x20];
+
+ u8 dl_down[0x20];
+
+ u8 config_cycle1usec[0x20];
+
+ u8 config_cycle2to7usec[0x20];
+
+ u8 config_cycle8to15usec[0x20];
+
+ u8 config_cycle16to63usec[0x20];
+
+ u8 config_cycle64usec[0x20];
+
+ u8 correctable_err_msg_sent[0x20];
+
+ u8 non_fatal_err_msg_sent[0x20];
+
+ u8 fatal_err_msg_sent[0x20];
+
+ u8 reserved_0[0x4e0];
+};
+
+struct mlx5_ifc_pcie_lanes_counters_data_layout_bits {
+ u8 life_time_counter_high[0x20];
+
+ u8 life_time_counter_low[0x20];
+
+ u8 error_counter_lane0[0x20];
+
+ u8 error_counter_lane1[0x20];
+
+ u8 error_counter_lane2[0x20];
+
+ u8 error_counter_lane3[0x20];
+
+ u8 error_counter_lane4[0x20];
+
+ u8 error_counter_lane5[0x20];
+
+ u8 error_counter_lane6[0x20];
+
+ u8 error_counter_lane7[0x20];
+
+ u8 error_counter_lane8[0x20];
+
+ u8 error_counter_lane9[0x20];
+
+ u8 error_counter_lane10[0x20];
+
+ u8 error_counter_lane11[0x20];
+
+ u8 error_counter_lane12[0x20];
+
+ u8 error_counter_lane13[0x20];
+
+ u8 error_counter_lane14[0x20];
+
+ u8 error_counter_lane15[0x20];
+
+ u8 reserved_0[0x580];
+};
+
+union mlx5_ifc_mpcnt_cntrs_grp_data_layout_bits {
+ struct mlx5_ifc_pcie_performance_counters_data_layout_bits pcie_performance_counters_data_layout;
+ struct mlx5_ifc_pcie_timers_and_states_data_layout_bits pcie_timers_and_states_data_layout;
+ struct mlx5_ifc_pcie_lanes_counters_data_layout_bits pcie_lanes_counters_data_layout;
+ u8 reserved_0[0xf8];
+};
+
+struct mlx5_ifc_mpcnt_reg_bits {
+ u8 reserved_0[0x8];
+ u8 pcie_index[0x8];
+ u8 reserved_1[0xa];
+ u8 grp[0x6];
+
+ u8 clr[0x1];
+ u8 reserved_2[0x1f];
+
+ union mlx5_ifc_mpcnt_cntrs_grp_data_layout_bits counter_set;
+};
+
union mlx5_ifc_ports_control_registers_document_bits {
struct mlx5_ifc_ib_portcntrs_attribute_grp_data_bits ib_portcntrs_attribute_grp_data;
struct mlx5_ifc_bufferx_reg_bits bufferx_reg;
diff --git a/sys/dev/mlx5/qp.h b/sys/dev/mlx5/qp.h
index f8a7575..c32a643 100644
--- a/sys/dev/mlx5/qp.h
+++ b/sys/dev/mlx5/qp.h
@@ -97,6 +97,7 @@ enum {
MLX5_QP_ST_SYNC_UMR = 0xe,
MLX5_QP_ST_PTP_1588 = 0xd,
MLX5_QP_ST_REG_UMR = 0xc,
+ MLX5_QP_ST_SW_CNAK = 0x10,
MLX5_QP_ST_MAX
};
@@ -117,6 +118,15 @@ enum {
MLX5_QP_BIT_RWE = 1 << 14,
MLX5_QP_BIT_RAE = 1 << 13,
MLX5_QP_BIT_RIC = 1 << 4,
+ MLX5_QP_BIT_COLL_SYNC_RQ = 1 << 2,
+ MLX5_QP_BIT_COLL_SYNC_SQ = 1 << 1,
+ MLX5_QP_BIT_COLL_MASTER = 1 << 0
+};
+
+enum {
+ MLX5_DCT_BIT_RRE = 1 << 19,
+ MLX5_DCT_BIT_RWE = 1 << 18,
+ MLX5_DCT_BIT_RAE = 1 << 17,
};
enum {
@@ -152,6 +162,7 @@ enum {
};
enum {
+ MLX5_QP_DRAIN_SIGERR = 1 << 26,
MLX5_QP_LAT_SENSITIVE = 1 << 28,
MLX5_QP_BLOCK_MCAST = 1 << 30,
MLX5_QP_ENABLE_SIG = 1 << 31,
@@ -188,6 +199,21 @@ struct mlx5_wqe_ctrl_seg {
};
enum {
+ MLX5_MLX_FLAG_MASK_VL15 = 0x40,
+ MLX5_MLX_FLAG_MASK_SLR = 0x20,
+ MLX5_MLX_FLAG_MASK_ICRC = 0x8,
+ MLX5_MLX_FLAG_MASK_FL = 4
+};
+
+struct mlx5_mlx_seg {
+ __be32 rsvd0;
+ u8 flags;
+ u8 stat_rate_sl;
+ u8 rsvd1[8];
+ __be16 dlid;
+};
+
+enum {
MLX5_ETH_WQE_L3_INNER_CSUM = 1 << 4,
MLX5_ETH_WQE_L4_INNER_CSUM = 1 << 5,
MLX5_ETH_WQE_L3_CSUM = 1 << 6,
@@ -462,6 +488,65 @@ struct mlx5_create_qp_mbox_in {
__be64 pas[0];
};
+struct mlx5_dct_context {
+ u8 state;
+ u8 rsvd0[7];
+ __be32 cqn;
+ __be32 flags;
+ u8 rsvd1;
+ u8 cs_res;
+ u8 min_rnr;
+ u8 rsvd2;
+ __be32 srqn;
+ __be32 pdn;
+ __be32 tclass_flow_label;
+ __be64 access_key;
+ u8 mtu;
+ u8 port;
+ __be16 pkey_index;
+ u8 rsvd4;
+ u8 mgid_index;
+ u8 rsvd5;
+ u8 hop_limit;
+ __be32 access_violations;
+ u8 rsvd[12];
+};
+
+struct mlx5_create_dct_mbox_in {
+ struct mlx5_inbox_hdr hdr;
+ u8 rsvd0[8];
+ struct mlx5_dct_context context;
+ u8 rsvd[48];
+};
+
+struct mlx5_create_dct_mbox_out {
+ struct mlx5_outbox_hdr hdr;
+ __be32 dctn;
+ u8 rsvd0[4];
+};
+
+struct mlx5_destroy_dct_mbox_in {
+ struct mlx5_inbox_hdr hdr;
+ __be32 dctn;
+ u8 rsvd0[4];
+};
+
+struct mlx5_destroy_dct_mbox_out {
+ struct mlx5_outbox_hdr hdr;
+ u8 rsvd0[8];
+};
+
+struct mlx5_drain_dct_mbox_in {
+ struct mlx5_inbox_hdr hdr;
+ __be32 dctn;
+ u8 rsvd0[4];
+};
+
+struct mlx5_drain_dct_mbox_out {
+ struct mlx5_outbox_hdr hdr;
+ u8 rsvd0[8];
+};
+
struct mlx5_create_qp_mbox_out {
struct mlx5_outbox_hdr hdr;
__be32 qpn;
@@ -486,6 +571,7 @@ struct mlx5_modify_qp_mbox_in {
__be32 optparam;
u8 rsvd0[4];
struct mlx5_qp_context ctx;
+ u8 rsvd2[16];
};
struct mlx5_modify_qp_mbox_out {
@@ -509,6 +595,30 @@ struct mlx5_query_qp_mbox_out {
__be64 pas[0];
};
+struct mlx5_query_dct_mbox_in {
+ struct mlx5_inbox_hdr hdr;
+ __be32 dctn;
+ u8 rsvd[4];
+};
+
+struct mlx5_query_dct_mbox_out {
+ struct mlx5_outbox_hdr hdr;
+ u8 rsvd0[8];
+ struct mlx5_dct_context ctx;
+ u8 rsvd1[48];
+};
+
+struct mlx5_arm_dct_mbox_in {
+ struct mlx5_inbox_hdr hdr;
+ __be32 dctn;
+ u8 rsvd[4];
+};
+
+struct mlx5_arm_dct_mbox_out {
+ struct mlx5_outbox_hdr hdr;
+ u8 rsvd0[8];
+};
+
struct mlx5_conf_sqp_mbox_in {
struct mlx5_inbox_hdr hdr;
__be32 qpn;
@@ -535,17 +645,32 @@ int mlx5_core_create_qp(struct mlx5_core_dev *dev,
struct mlx5_core_qp *qp,
struct mlx5_create_qp_mbox_in *in,
int inlen);
-int mlx5_core_qp_modify(struct mlx5_core_dev *dev, enum mlx5_qp_state cur_state,
- enum mlx5_qp_state new_state,
+int mlx5_core_qp_modify(struct mlx5_core_dev *dev, u16 operation,
struct mlx5_modify_qp_mbox_in *in, int sqd_event,
struct mlx5_core_qp *qp);
int mlx5_core_destroy_qp(struct mlx5_core_dev *dev,
struct mlx5_core_qp *qp);
int mlx5_core_qp_query(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp,
struct mlx5_query_qp_mbox_out *out, int outlen);
+int mlx5_core_dct_query(struct mlx5_core_dev *dev, struct mlx5_core_dct *dct,
+ struct mlx5_query_dct_mbox_out *out);
+int mlx5_core_arm_dct(struct mlx5_core_dev *dev, struct mlx5_core_dct *dct);
int mlx5_core_xrcd_alloc(struct mlx5_core_dev *dev, u32 *xrcdn);
int mlx5_core_xrcd_dealloc(struct mlx5_core_dev *dev, u32 xrcdn);
+int mlx5_core_create_dct(struct mlx5_core_dev *dev,
+ struct mlx5_core_dct *dct,
+ struct mlx5_create_dct_mbox_in *in);
+int mlx5_core_destroy_dct(struct mlx5_core_dev *dev,
+ struct mlx5_core_dct *dct);
+int mlx5_core_create_rq_tracked(struct mlx5_core_dev *dev, u32 *in, int inlen,
+ struct mlx5_core_qp *rq);
+void mlx5_core_destroy_rq_tracked(struct mlx5_core_dev *dev,
+ struct mlx5_core_qp *rq);
+int mlx5_core_create_sq_tracked(struct mlx5_core_dev *dev, u32 *in, int inlen,
+ struct mlx5_core_qp *sq);
+void mlx5_core_destroy_sq_tracked(struct mlx5_core_dev *dev,
+ struct mlx5_core_qp *sq);
void mlx5_init_qp_table(struct mlx5_core_dev *dev);
void mlx5_cleanup_qp_table(struct mlx5_core_dev *dev);
int mlx5_debug_qp_add(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp);
@@ -559,6 +684,7 @@ static inline const char *mlx5_qp_type_str(int type)
case MLX5_QP_ST_UD: return "UD";
case MLX5_QP_ST_XRC: return "XRC";
case MLX5_QP_ST_MLX: return "MLX";
+ case MLX5_QP_ST_DCI: return "DCI";
case MLX5_QP_ST_QP0: return "QP0";
case MLX5_QP_ST_QP1: return "QP1";
case MLX5_QP_ST_RAW_ETHERTYPE: return "RAW_ETHERTYPE";
@@ -567,6 +693,7 @@ static inline const char *mlx5_qp_type_str(int type)
case MLX5_QP_ST_SYNC_UMR: return "SYNC_UMR";
case MLX5_QP_ST_PTP_1588: return "PTP_1588";
case MLX5_QP_ST_REG_UMR: return "REG_UMR";
+ case MLX5_QP_ST_SW_CNAK: return "DC_CNAK";
default: return "Invalid transport type";
}
}
diff --git a/sys/dev/mlx5/vport.h b/sys/dev/mlx5/vport.h
index cf52785..edb8635 100644
--- a/sys/dev/mlx5/vport.h
+++ b/sys/dev/mlx5/vport.h
@@ -29,32 +29,51 @@
#define __MLX5_VPORT_H__
#include <dev/mlx5/driver.h>
-int mlx5_vport_alloc_q_counter(struct mlx5_core_dev *mdev,
- int *counter_set_id);
-int mlx5_vport_dealloc_q_counter(struct mlx5_core_dev *mdev,
- int counter_set_id);
+int mlx5_vport_alloc_q_counter(struct mlx5_core_dev *mdev, int client_id,
+ u16 *counter_set_id);
+int mlx5_vport_dealloc_q_counter(struct mlx5_core_dev *mdev, int client_id,
+ u16 counter_set_id);
+int mlx5_vport_query_q_counter(struct mlx5_core_dev *mdev,
+ u16 counter_set_id,
+ int reset,
+ void *out,
+ int out_size);
int mlx5_vport_query_out_of_rx_buffer(struct mlx5_core_dev *mdev,
- int counter_set_id,
+ u16 counter_set_id,
u32 *out_of_rx_buffer);
-u8 mlx5_query_vport_state(struct mlx5_core_dev *mdev, u8 opmod);
-int mlx5_arm_vport_context_events(struct mlx5_core_dev *mdev,
- u8 vport,
- u32 events_mask);
-int mlx5_query_vport_promisc(struct mlx5_core_dev *mdev,
- u32 vport,
- u8 *promisc_uc,
- u8 *promisc_mc,
- u8 *promisc_all);
+u8 mlx5_query_vport_state(struct mlx5_core_dev *mdev, u8 opmod, u16 vport);
+u8 mlx5_query_vport_admin_state(struct mlx5_core_dev *mdev, u8 opmod,
+ u16 vport);
+int mlx5_modify_vport_admin_state(struct mlx5_core_dev *mdev, u8 opmod,
+ u16 vport, u8 state);
+
+int mlx5_query_vport_mtu(struct mlx5_core_dev *mdev, int *mtu);
+int mlx5_set_vport_mtu(struct mlx5_core_dev *mdev, int mtu);
+int mlx5_query_min_wqe_header(struct mlx5_core_dev *dev, int *min_header);
+int mlx5_set_vport_min_wqe_header(struct mlx5_core_dev *mdev, u8 vport,
+ int min_header);
+int mlx5_query_nic_vport_promisc(struct mlx5_core_dev *mdev,
+ u16 vport,
+ int *promisc_uc,
+ int *promisc_mc,
+ int *promisc_all);
+
int mlx5_modify_nic_vport_promisc(struct mlx5_core_dev *mdev,
int promisc_uc,
int promisc_mc,
int promisc_all);
int mlx5_query_nic_vport_mac_address(struct mlx5_core_dev *mdev,
- u32 vport, u8 *addr);
+ u16 vport, u8 *addr);
+int mlx5_modify_nic_vport_mac_address(struct mlx5_core_dev *dev,
+ u16 vport, u8 mac[ETH_ALEN]);
int mlx5_set_nic_vport_current_mac(struct mlx5_core_dev *mdev, int vport,
bool other_vport, u8 *addr);
-int mlx5_set_nic_vport_vlan_list(struct mlx5_core_dev *dev, u32 vport,
+int mlx5_modify_nic_vport_port_guid(struct mlx5_core_dev *mdev,
+ u32 vport, u64 port_guid);
+int mlx5_modify_nic_vport_node_guid(struct mlx5_core_dev *mdev,
+ u32 vport, u64 node_guid);
+int mlx5_set_nic_vport_vlan_list(struct mlx5_core_dev *dev, u16 vport,
u16 *vlan_list, int list_len);
int mlx5_set_nic_vport_mc_list(struct mlx5_core_dev *mdev, int vport,
u64 *addr_list, size_t addr_list_len);
@@ -62,29 +81,34 @@ int mlx5_set_nic_vport_promisc(struct mlx5_core_dev *mdev, int vport,
bool promisc_mc, bool promisc_uc,
bool promisc_all);
int mlx5_query_nic_vport_mac_list(struct mlx5_core_dev *dev,
- u32 vport,
+ u16 vport,
enum mlx5_list_type list_type,
u8 addr_list[][ETH_ALEN],
int *list_size);
+int mlx5_query_nic_vport_vlans(struct mlx5_core_dev *dev,
+ u16 vport,
+ u16 vlans[],
+ int *size);
+int mlx5_modify_nic_vport_vlans(struct mlx5_core_dev *dev,
+ u16 vlans[],
+ int list_size);
+int mlx5_query_nic_vport_roce_en(struct mlx5_core_dev *mdev, u8 *enable);
int mlx5_modify_nic_vport_mac_list(struct mlx5_core_dev *dev,
enum mlx5_list_type list_type,
u8 addr_list[][ETH_ALEN],
int list_size);
-int mlx5_query_nic_vport_vlan_list(struct mlx5_core_dev *dev,
- u32 vport,
- u16 *vlan_list,
- int *list_size);
-int mlx5_modify_nic_vport_vlans(struct mlx5_core_dev *dev,
- u16 vlans[],
- int list_size);
int mlx5_set_nic_vport_permanent_mac(struct mlx5_core_dev *mdev, int vport,
u8 *addr);
int mlx5_nic_vport_enable_roce(struct mlx5_core_dev *mdev);
int mlx5_nic_vport_disable_roce(struct mlx5_core_dev *mdev);
int mlx5_query_nic_vport_system_image_guid(struct mlx5_core_dev *mdev,
u64 *system_image_guid);
+int mlx5_query_vport_system_image_guid(struct mlx5_core_dev *dev,
+ u64 *sys_image_guid);
+int mlx5_query_vport_node_guid(struct mlx5_core_dev *dev, u64 *node_guid);
+int mlx5_query_vport_port_guid(struct mlx5_core_dev *dev, u64 *port_guid);
+int mlx5_query_hca_vport_state(struct mlx5_core_dev *dev, u8 *vport_state);
int mlx5_query_nic_vport_node_guid(struct mlx5_core_dev *mdev, u64 *node_guid);
-int mlx5_query_nic_vport_port_guid(struct mlx5_core_dev *mdev, u64 *port_guid);
int mlx5_query_nic_vport_qkey_viol_cntr(struct mlx5_core_dev *mdev,
u16 *qkey_viol_cntr);
int mlx5_query_hca_vport_node_guid(struct mlx5_core_dev *mdev, u64 *node_guid);
OpenPOWER on IntegriCloud