From 51ff17251c9c2c2e71974149d22bc73ea09c27cc Mon Sep 17 00:00:00 2001 From: Ram Amrani Date: Sat, 1 Oct 2016 21:59:57 +0300 Subject: qed: Add support for RoCE hw init This adds the backbone required for the various HW initalizations which are necessary for the qedr driver - FW notification, resource initializations, etc. Signed-off-by: Ram Amrani Signed-off-by: Yuval Mintz Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/Makefile | 1 + drivers/net/ethernet/qlogic/qed/qed.h | 33 +- drivers/net/ethernet/qlogic/qed/qed_cxt.c | 6 + drivers/net/ethernet/qlogic/qed/qed_cxt.h | 6 + drivers/net/ethernet/qlogic/qed/qed_dev.c | 154 +++++ drivers/net/ethernet/qlogic/qed/qed_main.c | 44 +- drivers/net/ethernet/qlogic/qed/qed_reg_addr.h | 8 +- drivers/net/ethernet/qlogic/qed/qed_roce.c | 886 +++++++++++++++++++++++++ drivers/net/ethernet/qlogic/qed/qed_roce.h | 123 ++++ drivers/net/ethernet/qlogic/qed/qed_sp.h | 1 + drivers/net/ethernet/qlogic/qed/qed_spq.c | 8 + drivers/net/ethernet/qlogic/qed/qed_sriov.c | 4 +- drivers/net/ethernet/qlogic/qed/qed_vf.c | 2 +- 13 files changed, 1269 insertions(+), 7 deletions(-) create mode 100644 drivers/net/ethernet/qlogic/qed/qed_roce.c create mode 100644 drivers/net/ethernet/qlogic/qed/qed_roce.h (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/qlogic/qed/Makefile b/drivers/net/ethernet/qlogic/qed/Makefile index e067098..cda0af7 100644 --- a/drivers/net/ethernet/qlogic/qed/Makefile +++ b/drivers/net/ethernet/qlogic/qed/Makefile @@ -5,3 +5,4 @@ qed-y := qed_cxt.o qed_dev.o qed_hw.o qed_init_fw_funcs.o qed_init_ops.o \ qed_selftest.o qed_dcbx.o qed_debug.o qed-$(CONFIG_QED_SRIOV) += qed_sriov.o qed_vf.o qed-$(CONFIG_QED_LL2) += qed_ll2.o +qed-$(CONFIG_INFINIBAND_QEDR) += qed_roce.o diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h index 91b571a..c5a098a 100644 --- a/drivers/net/ethernet/qlogic/qed/qed.h +++ b/drivers/net/ethernet/qlogic/qed/qed.h @@ -35,6 +35,9 @@ extern const struct qed_common_ops qed_common_ops_pass; #define QED_WFQ_UNIT 100 +#define QED_WID_SIZE (1024) +#define QED_PF_DEMS_SIZE (4) + /* cau states */ enum qed_coalescing_mode { QED_COAL_MODE_DISABLE, @@ -50,6 +53,14 @@ enum qed_mcp_protocol_type; static inline u32 qed_db_addr(u32 cid, u32 DEMS) { u32 db_addr = FIELD_VALUE(DB_LEGACY_ADDR_DEMS, DEMS) | + (cid * QED_PF_DEMS_SIZE); + + return db_addr; +} + +static inline u32 qed_db_addr_vf(u32 cid, u32 DEMS) +{ + u32 db_addr = FIELD_VALUE(DB_LEGACY_ADDR_DEMS, DEMS) | FIELD_VALUE(DB_LEGACY_ADDR_ICID, cid); return db_addr; @@ -152,14 +163,17 @@ enum QED_RESOURCES { QED_RL, QED_MAC, QED_VLAN, + QED_RDMA_CNQ_RAM, QED_ILT, QED_LL2_QUEUE, + QED_RDMA_STATS_QUEUE, QED_MAX_RESC, }; enum QED_FEATURE { QED_PF_L2_QUE, QED_VF, + QED_RDMA_CNQ, QED_MAX_FEATURES, }; @@ -364,6 +378,7 @@ struct qed_hwfn { /* Protocol related */ bool using_ll2; struct qed_ll2_info *p_ll2_info; + struct qed_rdma_info *p_rdma_info; struct qed_pf_params pf_params; bool b_rdma_enabled_in_prs; @@ -402,6 +417,17 @@ struct qed_hwfn { struct dbg_tools_data dbg_info; + /* PWM region specific data */ + u32 dpi_size; + u32 dpi_count; + + /* This is used to calculate the doorbell address */ + u32 dpi_start_offset; + + /* If one of the following is set then EDPM shouldn't be used */ + u8 dcbx_no_edpm; + u8 db_bar_no_edpm; + struct qed_simd_fp_handler simd_proto_handler[64]; #ifdef CONFIG_QED_SRIOV @@ -435,6 +461,8 @@ struct qed_int_params { bool fp_initialized; u8 fp_msix_base; u8 fp_msix_cnt; + u8 rdma_msix_base; + u8 rdma_msix_cnt; }; struct qed_dbg_feature { @@ -541,7 +569,6 @@ struct qed_dev { bool b_is_vf; u32 drv_type; - struct qed_eth_stats *reset_stats; struct qed_fw_data *fw_data; @@ -574,6 +601,10 @@ struct qed_dev { #endif const struct firmware *firmware; + + u32 rdma_max_sge; + u32 rdma_max_inline; + u32 rdma_max_srq_sge; }; #define NUM_OF_VFS(dev) MAX_NUM_VFS_BB diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.c b/drivers/net/ethernet/qlogic/qed/qed_cxt.c index d9bea2a..82370a1 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_cxt.c +++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.c @@ -48,7 +48,13 @@ #define TM_ELEM_SIZE 4 /* ILT constants */ +#if IS_ENABLED(CONFIG_INFINIBAND_QEDR) +/* For RoCE we configure to 64K to cover for RoCE max tasks 256K purpose. */ +#define ILT_DEFAULT_HW_P_SIZE 4 +#else #define ILT_DEFAULT_HW_P_SIZE 3 +#endif + #define ILT_PAGE_IN_BYTES(hw_p_size) (1U << ((hw_p_size) + 12)) #define ILT_CFG_REG(cli, reg) PSWRQ2_REG_ ## cli ## _ ## reg ## _RT_OFFSET diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.h b/drivers/net/ethernet/qlogic/qed/qed_cxt.h index c6f6f2e..d00ad05 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_cxt.h +++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.h @@ -170,6 +170,12 @@ int qed_qm_reconf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); */ void qed_cxt_release_cid(struct qed_hwfn *p_hwfn, u32 cid); +int qed_cxt_dynamic_ilt_alloc(struct qed_hwfn *p_hwfn, + enum qed_cxt_elem_type elem_type, u32 iid); +u32 qed_cxt_get_proto_tid_count(struct qed_hwfn *p_hwfn, + enum protocol_type type); +u32 qed_cxt_get_proto_cid_start(struct qed_hwfn *p_hwfn, + enum protocol_type type); #define QED_CTX_WORKING_MEM 0 #define QED_CTX_FL_MEM 1 diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c index 9a8e153..754f6a9 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dev.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c @@ -35,9 +35,13 @@ #include "qed_sp.h" #include "qed_sriov.h" #include "qed_vf.h" +#include "qed_roce.h" static DEFINE_SPINLOCK(qm_lock); +#define QED_MIN_DPIS (4) +#define QED_MIN_PWM_REGION (QED_WID_SIZE * QED_MIN_DPIS) + /* API common to all protocols */ enum BAR_ID { BAR_ID_0, /* used for GRC */ @@ -787,6 +791,136 @@ static int qed_hw_init_common(struct qed_hwfn *p_hwfn, return rc; } +static int +qed_hw_init_dpi_size(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, u32 pwm_region_size, u32 n_cpus) +{ + u32 dpi_page_size_1, dpi_page_size_2, dpi_page_size; + u32 dpi_bit_shift, dpi_count; + u32 min_dpis; + + /* Calculate DPI size */ + dpi_page_size_1 = QED_WID_SIZE * n_cpus; + dpi_page_size_2 = max_t(u32, QED_WID_SIZE, PAGE_SIZE); + dpi_page_size = max_t(u32, dpi_page_size_1, dpi_page_size_2); + dpi_page_size = roundup_pow_of_two(dpi_page_size); + dpi_bit_shift = ilog2(dpi_page_size / 4096); + + dpi_count = pwm_region_size / dpi_page_size; + + min_dpis = p_hwfn->pf_params.rdma_pf_params.min_dpis; + min_dpis = max_t(u32, QED_MIN_DPIS, min_dpis); + + p_hwfn->dpi_size = dpi_page_size; + p_hwfn->dpi_count = dpi_count; + + qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_DPI_BIT_SHIFT, dpi_bit_shift); + + if (dpi_count < min_dpis) + return -EINVAL; + + return 0; +} + +enum QED_ROCE_EDPM_MODE { + QED_ROCE_EDPM_MODE_ENABLE = 0, + QED_ROCE_EDPM_MODE_FORCE_ON = 1, + QED_ROCE_EDPM_MODE_DISABLE = 2, +}; + +static int +qed_hw_init_pf_doorbell_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) +{ + u32 pwm_regsize, norm_regsize; + u32 non_pwm_conn, min_addr_reg1; + u32 db_bar_size, n_cpus; + u32 roce_edpm_mode; + u32 pf_dems_shift; + int rc = 0; + u8 cond; + + db_bar_size = qed_hw_bar_size(p_hwfn, BAR_ID_1); + if (p_hwfn->cdev->num_hwfns > 1) + db_bar_size /= 2; + + /* Calculate doorbell regions */ + non_pwm_conn = qed_cxt_get_proto_cid_start(p_hwfn, PROTOCOLID_CORE) + + qed_cxt_get_proto_cid_count(p_hwfn, PROTOCOLID_CORE, + NULL) + + qed_cxt_get_proto_cid_count(p_hwfn, PROTOCOLID_ETH, + NULL); + norm_regsize = roundup(QED_PF_DEMS_SIZE * non_pwm_conn, 4096); + min_addr_reg1 = norm_regsize / 4096; + pwm_regsize = db_bar_size - norm_regsize; + + /* Check that the normal and PWM sizes are valid */ + if (db_bar_size < norm_regsize) { + DP_ERR(p_hwfn->cdev, + "Doorbell BAR size 0x%x is too small (normal region is 0x%0x )\n", + db_bar_size, norm_regsize); + return -EINVAL; + } + + if (pwm_regsize < QED_MIN_PWM_REGION) { + DP_ERR(p_hwfn->cdev, + "PWM region size 0x%0x is too small. Should be at least 0x%0x (Doorbell BAR size is 0x%x and normal region size is 0x%0x)\n", + pwm_regsize, + QED_MIN_PWM_REGION, db_bar_size, norm_regsize); + return -EINVAL; + } + + /* Calculate number of DPIs */ + roce_edpm_mode = p_hwfn->pf_params.rdma_pf_params.roce_edpm_mode; + if ((roce_edpm_mode == QED_ROCE_EDPM_MODE_ENABLE) || + ((roce_edpm_mode == QED_ROCE_EDPM_MODE_FORCE_ON))) { + /* Either EDPM is mandatory, or we are attempting to allocate a + * WID per CPU. + */ + n_cpus = num_active_cpus(); + rc = qed_hw_init_dpi_size(p_hwfn, p_ptt, pwm_regsize, n_cpus); + } + + cond = (rc && (roce_edpm_mode == QED_ROCE_EDPM_MODE_ENABLE)) || + (roce_edpm_mode == QED_ROCE_EDPM_MODE_DISABLE); + if (cond || p_hwfn->dcbx_no_edpm) { + /* Either EDPM is disabled from user configuration, or it is + * disabled via DCBx, or it is not mandatory and we failed to + * allocated a WID per CPU. + */ + n_cpus = 1; + rc = qed_hw_init_dpi_size(p_hwfn, p_ptt, pwm_regsize, n_cpus); + + if (cond) + qed_rdma_dpm_bar(p_hwfn, p_ptt); + } + + DP_INFO(p_hwfn, + "doorbell bar: normal_region_size=%d, pwm_region_size=%d, dpi_size=%d, dpi_count=%d, roce_edpm=%s\n", + norm_regsize, + pwm_regsize, + p_hwfn->dpi_size, + p_hwfn->dpi_count, + ((p_hwfn->dcbx_no_edpm) || (p_hwfn->db_bar_no_edpm)) ? + "disabled" : "enabled"); + + if (rc) { + DP_ERR(p_hwfn, + "Failed to allocate enough DPIs. Allocated %d but the current minimum is %d.\n", + p_hwfn->dpi_count, + p_hwfn->pf_params.rdma_pf_params.min_dpis); + return -EINVAL; + } + + p_hwfn->dpi_start_offset = norm_regsize; + + /* DEMS size is configured log2 of DWORDs, hence the division by 4 */ + pf_dems_shift = ilog2(QED_PF_DEMS_SIZE / 4); + qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_ICID_BIT_SHIFT_NORM, pf_dems_shift); + qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_MIN_ADDR_REG1, min_addr_reg1); + + return 0; +} + static int qed_hw_init_port(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, int hw_mode) { @@ -860,6 +994,10 @@ static int qed_hw_init_pf(struct qed_hwfn *p_hwfn, /* Pure runtime initializations - directly to the HW */ qed_int_igu_init_pure_rt(p_hwfn, p_ptt, true, true); + rc = qed_hw_init_pf_doorbell_bar(p_hwfn, p_ptt); + if (rc) + return rc; + if (b_hw_start) { /* enable interrupts */ qed_int_igu_enable(p_hwfn, p_ptt, int_mode); @@ -1284,6 +1422,19 @@ static void qed_hw_set_feat(struct qed_hwfn *p_hwfn) u32 *feat_num = p_hwfn->hw_info.feat_num; int num_features = 1; +#if IS_ENABLED(CONFIG_INFINIBAND_QEDR) + /* Roce CNQ each requires: 1 status block + 1 CNQ. We divide the + * status blocks equally between L2 / RoCE but with consideration as + * to how many l2 queues / cnqs we have + */ + if (p_hwfn->hw_info.personality == QED_PCI_ETH_ROCE) { + num_features++; + + feat_num[QED_RDMA_CNQ] = + min_t(u32, RESC_NUM(p_hwfn, QED_SB) / num_features, + RESC_NUM(p_hwfn, QED_RDMA_CNQ_RAM)); + } +#endif feat_num[QED_PF_L2_QUE] = min_t(u32, RESC_NUM(p_hwfn, QED_SB) / num_features, RESC_NUM(p_hwfn, QED_L2_QUEUE)); @@ -1325,6 +1476,9 @@ static int qed_hw_get_resc(struct qed_hwfn *p_hwfn) num_funcs; resc_num[QED_ILT] = PXP_NUM_ILT_RECORDS_BB / num_funcs; resc_num[QED_LL2_QUEUE] = MAX_NUM_LL2_RX_QUEUES / num_funcs; + resc_num[QED_RDMA_CNQ_RAM] = NUM_OF_CMDQS_CQS / num_funcs; + resc_num[QED_RDMA_STATS_QUEUE] = RDMA_NUM_STATISTIC_COUNTERS_BB / + num_funcs; for (i = 0; i < QED_MAX_RESC; i++) resc_start[i] = resc_num[i] * enabled_func_idx; diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c index 48cdf62..4ee3151 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_main.c +++ b/drivers/net/ethernet/qlogic/qed/qed_main.c @@ -33,6 +33,11 @@ #include "qed_hw.h" #include "qed_selftest.h" +#if IS_ENABLED(CONFIG_INFINIBAND_QEDR) +#define QED_ROCE_QPS (8192) +#define QED_ROCE_DPIS (8) +#endif + static char version[] = "QLogic FastLinQ 4xxxx Core Module qed " DRV_MODULE_VERSION "\n"; @@ -206,8 +211,8 @@ int qed_fill_dev_info(struct qed_dev *cdev, dev_info->pci_mem_start = cdev->pci_params.mem_start; dev_info->pci_mem_end = cdev->pci_params.mem_end; dev_info->pci_irq = cdev->pci_params.irq; - dev_info->rdma_supported = - (cdev->hwfns[0].hw_info.personality == QED_PCI_ETH_ROCE); + dev_info->rdma_supported = (cdev->hwfns[0].hw_info.personality == + QED_PCI_ETH_ROCE); dev_info->is_mf_default = IS_MF_DEFAULT(&cdev->hwfns[0]); ether_addr_copy(dev_info->hw_mac, cdev->hwfns[0].hw_info.hw_mac_addr); @@ -677,6 +682,9 @@ static int qed_slowpath_setup_int(struct qed_dev *cdev, enum qed_int_mode int_mode) { struct qed_sb_cnt_info sb_cnt_info; +#if IS_ENABLED(CONFIG_INFINIBAND_QEDR) + int num_l2_queues; +#endif int rc; int i; @@ -707,6 +715,31 @@ static int qed_slowpath_setup_int(struct qed_dev *cdev, cdev->int_params.fp_msix_cnt = cdev->int_params.out.num_vectors - cdev->num_hwfns; +#if IS_ENABLED(CONFIG_INFINIBAND_QEDR) + num_l2_queues = 0; + for_each_hwfn(cdev, i) + num_l2_queues += FEAT_NUM(&cdev->hwfns[i], QED_PF_L2_QUE); + + DP_VERBOSE(cdev, QED_MSG_RDMA, + "cdev->int_params.fp_msix_cnt=%d num_l2_queues=%d\n", + cdev->int_params.fp_msix_cnt, num_l2_queues); + + if (cdev->int_params.fp_msix_cnt > num_l2_queues) { + cdev->int_params.rdma_msix_cnt = + (cdev->int_params.fp_msix_cnt - num_l2_queues) + / cdev->num_hwfns; + cdev->int_params.rdma_msix_base = + cdev->int_params.fp_msix_base + num_l2_queues; + cdev->int_params.fp_msix_cnt = num_l2_queues; + } else { + cdev->int_params.rdma_msix_cnt = 0; + } + + DP_VERBOSE(cdev, QED_MSG_RDMA, "roce_msix_cnt=%d roce_msix_base=%d\n", + cdev->int_params.rdma_msix_cnt, + cdev->int_params.rdma_msix_base); +#endif + return 0; } @@ -810,6 +843,13 @@ static void qed_update_pf_params(struct qed_dev *cdev, { int i; +#if IS_ENABLED(CONFIG_INFINIBAND_QEDR) + params->rdma_pf_params.num_qps = QED_ROCE_QPS; + params->rdma_pf_params.min_dpis = QED_ROCE_DPIS; + /* divide by 3 the MRs to avoid MF ILT overflow */ + params->rdma_pf_params.num_mrs = RDMA_MAX_TIDS; + params->rdma_pf_params.gl_pi = QED_ROCE_PROTOCOL_INDEX; +#endif for (i = 0; i < cdev->num_hwfns; i++) { struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; diff --git a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h index e75738d..b414a05 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h +++ b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h @@ -1448,5 +1448,11 @@ 0x620000UL #define PHY_PCIE_REG_PHY1 \ 0x624000UL - +#define NIG_REG_ROCE_DUPLICATE_TO_HOST 0x5088f0UL +#define PRS_REG_LIGHT_L2_ETHERTYPE_EN 0x1f0968UL +#define NIG_REG_LLH_ENG_CLS_ENG_ID_TBL 0x501b90UL +#define DORQ_REG_PF_DPM_ENABLE 0x100510UL +#define DORQ_REG_PF_ICID_BIT_SHIFT_NORM 0x100448UL +#define DORQ_REG_PF_MIN_ADDR_REG1 0x100400UL +#define DORQ_REG_PF_DPI_BIT_SHIFT 0x100450UL #endif diff --git a/drivers/net/ethernet/qlogic/qed/qed_roce.c b/drivers/net/ethernet/qlogic/qed/qed_roce.c new file mode 100644 index 0000000..4c53b85 --- /dev/null +++ b/drivers/net/ethernet/qlogic/qed/qed_roce.c @@ -0,0 +1,886 @@ +/* QLogic qed NIC Driver + * Copyright (c) 2015-2016 QLogic Corporation + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and /or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "qed.h" +#include "qed_cxt.h" +#include "qed_hsi.h" +#include "qed_hw.h" +#include "qed_init_ops.h" +#include "qed_int.h" +#include "qed_ll2.h" +#include "qed_mcp.h" +#include "qed_reg_addr.h" +#include "qed_sp.h" +#include "qed_roce.h" + +void qed_async_roce_event(struct qed_hwfn *p_hwfn, + struct event_ring_entry *p_eqe) +{ + struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info; + + p_rdma_info->events.affiliated_event(p_rdma_info->events.context, + p_eqe->opcode, &p_eqe->data); +} + +static int qed_rdma_bmap_alloc(struct qed_hwfn *p_hwfn, + struct qed_bmap *bmap, u32 max_count) +{ + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "max_count = %08x\n", max_count); + + bmap->max_count = max_count; + + bmap->bitmap = kzalloc(BITS_TO_LONGS(max_count) * sizeof(long), + GFP_KERNEL); + if (!bmap->bitmap) { + DP_NOTICE(p_hwfn, + "qed bmap alloc failed: cannot allocate memory (bitmap)\n"); + return -ENOMEM; + } + + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocated bitmap %p\n", + bmap->bitmap); + return 0; +} + +static int qed_rdma_bmap_alloc_id(struct qed_hwfn *p_hwfn, + struct qed_bmap *bmap, u32 *id_num) +{ + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "bmap = %p\n", bmap); + + *id_num = find_first_zero_bit(bmap->bitmap, bmap->max_count); + + if (*id_num >= bmap->max_count) { + DP_NOTICE(p_hwfn, "no id available max_count=%d\n", + bmap->max_count); + return -EINVAL; + } + + __set_bit(*id_num, bmap->bitmap); + + return 0; +} + +static void qed_bmap_release_id(struct qed_hwfn *p_hwfn, + struct qed_bmap *bmap, u32 id_num) +{ + bool b_acquired; + + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "id_num = %08x", id_num); + if (id_num >= bmap->max_count) + return; + + b_acquired = test_and_clear_bit(id_num, bmap->bitmap); + if (!b_acquired) { + DP_NOTICE(p_hwfn, "ID %d already released\n", id_num); + return; + } +} + +u32 qed_rdma_get_sb_id(void *p_hwfn, u32 rel_sb_id) +{ + /* First sb id for RoCE is after all the l2 sb */ + return FEAT_NUM((struct qed_hwfn *)p_hwfn, QED_PF_L2_QUE) + rel_sb_id; +} + +u32 qed_rdma_query_cau_timer_res(void *rdma_cxt) +{ + return QED_CAU_DEF_RX_TIMER_RES; +} + +static int qed_rdma_alloc(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_rdma_start_in_params *params) +{ + struct qed_rdma_info *p_rdma_info; + u32 num_cons, num_tasks; + int rc = -ENOMEM; + + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocating RDMA\n"); + + /* Allocate a struct with current pf rdma info */ + p_rdma_info = kzalloc(sizeof(*p_rdma_info), GFP_KERNEL); + if (!p_rdma_info) { + DP_NOTICE(p_hwfn, + "qed rdma alloc failed: cannot allocate memory (rdma info). rc = %d\n", + rc); + return rc; + } + + p_hwfn->p_rdma_info = p_rdma_info; + p_rdma_info->proto = PROTOCOLID_ROCE; + + num_cons = qed_cxt_get_proto_cid_count(p_hwfn, p_rdma_info->proto, 0); + + p_rdma_info->num_qps = num_cons / 2; + + num_tasks = qed_cxt_get_proto_tid_count(p_hwfn, PROTOCOLID_ROCE); + + /* Each MR uses a single task */ + p_rdma_info->num_mrs = num_tasks; + + /* Queue zone lines are shared between RoCE and L2 in such a way that + * they can be used by each without obstructing the other. + */ + p_rdma_info->queue_zone_base = (u16)FEAT_NUM(p_hwfn, QED_L2_QUEUE); + + /* Allocate a struct with device params and fill it */ + p_rdma_info->dev = kzalloc(sizeof(*p_rdma_info->dev), GFP_KERNEL); + if (!p_rdma_info->dev) { + DP_NOTICE(p_hwfn, + "qed rdma alloc failed: cannot allocate memory (rdma info dev). rc = %d\n", + rc); + goto free_rdma_info; + } + + /* Allocate a struct with port params and fill it */ + p_rdma_info->port = kzalloc(sizeof(*p_rdma_info->port), GFP_KERNEL); + if (!p_rdma_info->port) { + DP_NOTICE(p_hwfn, + "qed rdma alloc failed: cannot allocate memory (rdma info port). rc = %d\n", + rc); + goto free_rdma_dev; + } + + /* Allocate bit map for pd's */ + rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->pd_map, RDMA_MAX_PDS); + if (rc) { + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, + "Failed to allocate pd_map, rc = %d\n", + rc); + goto free_rdma_port; + } + + /* Allocate DPI bitmap */ + rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->dpi_map, + p_hwfn->dpi_count); + if (rc) { + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, + "Failed to allocate DPI bitmap, rc = %d\n", rc); + goto free_pd_map; + } + + /* Allocate bitmap for cq's. The maximum number of CQs is bounded to + * twice the number of QPs. + */ + rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->cq_map, + p_rdma_info->num_qps * 2); + if (rc) { + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, + "Failed to allocate cq bitmap, rc = %d\n", rc); + goto free_dpi_map; + } + + /* Allocate bitmap for toggle bit for cq icids + * We toggle the bit every time we create or resize cq for a given icid. + * The maximum number of CQs is bounded to twice the number of QPs. + */ + rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->toggle_bits, + p_rdma_info->num_qps * 2); + if (rc) { + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, + "Failed to allocate toogle bits, rc = %d\n", rc); + goto free_cq_map; + } + + /* Allocate bitmap for itids */ + rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->tid_map, + p_rdma_info->num_mrs); + if (rc) { + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, + "Failed to allocate itids bitmaps, rc = %d\n", rc); + goto free_toggle_map; + } + + /* Allocate bitmap for cids used for qps. */ + rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->cid_map, num_cons); + if (rc) { + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, + "Failed to allocate cid bitmap, rc = %d\n", rc); + goto free_tid_map; + } + + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocation successful\n"); + return 0; + +free_tid_map: + kfree(p_rdma_info->tid_map.bitmap); +free_toggle_map: + kfree(p_rdma_info->toggle_bits.bitmap); +free_cq_map: + kfree(p_rdma_info->cq_map.bitmap); +free_dpi_map: + kfree(p_rdma_info->dpi_map.bitmap); +free_pd_map: + kfree(p_rdma_info->pd_map.bitmap); +free_rdma_port: + kfree(p_rdma_info->port); +free_rdma_dev: + kfree(p_rdma_info->dev); +free_rdma_info: + kfree(p_rdma_info); + + return rc; +} + +void qed_rdma_resc_free(struct qed_hwfn *p_hwfn) +{ + struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info; + + kfree(p_rdma_info->cid_map.bitmap); + kfree(p_rdma_info->tid_map.bitmap); + kfree(p_rdma_info->toggle_bits.bitmap); + kfree(p_rdma_info->cq_map.bitmap); + kfree(p_rdma_info->dpi_map.bitmap); + kfree(p_rdma_info->pd_map.bitmap); + + kfree(p_rdma_info->port); + kfree(p_rdma_info->dev); + + kfree(p_rdma_info); +} + +static void qed_rdma_free(struct qed_hwfn *p_hwfn) +{ + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Freeing RDMA\n"); + + qed_rdma_resc_free(p_hwfn); +} + +static void qed_rdma_get_guid(struct qed_hwfn *p_hwfn, u8 *guid) +{ + guid[0] = p_hwfn->hw_info.hw_mac_addr[0] ^ 2; + guid[1] = p_hwfn->hw_info.hw_mac_addr[1]; + guid[2] = p_hwfn->hw_info.hw_mac_addr[2]; + guid[3] = 0xff; + guid[4] = 0xfe; + guid[5] = p_hwfn->hw_info.hw_mac_addr[3]; + guid[6] = p_hwfn->hw_info.hw_mac_addr[4]; + guid[7] = p_hwfn->hw_info.hw_mac_addr[5]; +} + +static void qed_rdma_init_events(struct qed_hwfn *p_hwfn, + struct qed_rdma_start_in_params *params) +{ + struct qed_rdma_events *events; + + events = &p_hwfn->p_rdma_info->events; + + events->unaffiliated_event = params->events->unaffiliated_event; + events->affiliated_event = params->events->affiliated_event; + events->context = params->events->context; +} + +static void qed_rdma_init_devinfo(struct qed_hwfn *p_hwfn, + struct qed_rdma_start_in_params *params) +{ + struct qed_rdma_device *dev = p_hwfn->p_rdma_info->dev; + struct qed_dev *cdev = p_hwfn->cdev; + u32 pci_status_control; + u32 num_qps; + + /* Vendor specific information */ + dev->vendor_id = cdev->vendor_id; + dev->vendor_part_id = cdev->device_id; + dev->hw_ver = 0; + dev->fw_ver = (FW_MAJOR_VERSION << 24) | (FW_MINOR_VERSION << 16) | + (FW_REVISION_VERSION << 8) | (FW_ENGINEERING_VERSION); + + qed_rdma_get_guid(p_hwfn, (u8 *)&dev->sys_image_guid); + dev->node_guid = dev->sys_image_guid; + + dev->max_sge = min_t(u32, RDMA_MAX_SGE_PER_SQ_WQE, + RDMA_MAX_SGE_PER_RQ_WQE); + + if (cdev->rdma_max_sge) + dev->max_sge = min_t(u32, cdev->rdma_max_sge, dev->max_sge); + + dev->max_inline = ROCE_REQ_MAX_INLINE_DATA_SIZE; + + dev->max_inline = (cdev->rdma_max_inline) ? + min_t(u32, cdev->rdma_max_inline, dev->max_inline) : + dev->max_inline; + + dev->max_wqe = QED_RDMA_MAX_WQE; + dev->max_cnq = (u8)FEAT_NUM(p_hwfn, QED_RDMA_CNQ); + + /* The number of QPs may be higher than QED_ROCE_MAX_QPS, because + * it is up-aligned to 16 and then to ILT page size within qed cxt. + * This is OK in terms of ILT but we don't want to configure the FW + * above its abilities + */ + num_qps = ROCE_MAX_QPS; + num_qps = min_t(u64, num_qps, p_hwfn->p_rdma_info->num_qps); + dev->max_qp = num_qps; + + /* CQs uses the same icids that QPs use hence they are limited by the + * number of icids. There are two icids per QP. + */ + dev->max_cq = num_qps * 2; + + /* The number of mrs is smaller by 1 since the first is reserved */ + dev->max_mr = p_hwfn->p_rdma_info->num_mrs - 1; + dev->max_mr_size = QED_RDMA_MAX_MR_SIZE; + + /* The maximum CQE capacity per CQ supported. + * max number of cqes will be in two layer pbl, + * 8 is the pointer size in bytes + * 32 is the size of cq element in bytes + */ + if (params->cq_mode == QED_RDMA_CQ_MODE_32_BITS) + dev->max_cqe = QED_RDMA_MAX_CQE_32_BIT; + else + dev->max_cqe = QED_RDMA_MAX_CQE_16_BIT; + + dev->max_mw = 0; + dev->max_fmr = QED_RDMA_MAX_FMR; + dev->max_mr_mw_fmr_pbl = (PAGE_SIZE / 8) * (PAGE_SIZE / 8); + dev->max_mr_mw_fmr_size = dev->max_mr_mw_fmr_pbl * PAGE_SIZE; + dev->max_pkey = QED_RDMA_MAX_P_KEY; + + dev->max_qp_resp_rd_atomic_resc = RDMA_RING_PAGE_SIZE / + (RDMA_RESP_RD_ATOMIC_ELM_SIZE * 2); + dev->max_qp_req_rd_atomic_resc = RDMA_RING_PAGE_SIZE / + RDMA_REQ_RD_ATOMIC_ELM_SIZE; + dev->max_dev_resp_rd_atomic_resc = dev->max_qp_resp_rd_atomic_resc * + p_hwfn->p_rdma_info->num_qps; + dev->page_size_caps = QED_RDMA_PAGE_SIZE_CAPS; + dev->dev_ack_delay = QED_RDMA_ACK_DELAY; + dev->max_pd = RDMA_MAX_PDS; + dev->max_ah = p_hwfn->p_rdma_info->num_qps; + dev->max_stats_queues = (u8)RESC_NUM(p_hwfn, QED_RDMA_STATS_QUEUE); + + /* Set capablities */ + dev->dev_caps = 0; + SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_RNR_NAK, 1); + SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_PORT_ACTIVE_EVENT, 1); + SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_PORT_CHANGE_EVENT, 1); + SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_RESIZE_CQ, 1); + SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_BASE_MEMORY_EXT, 1); + SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_BASE_QUEUE_EXT, 1); + SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_ZBVA, 1); + SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_LOCAL_INV_FENCE, 1); + + /* Check atomic operations support in PCI configuration space. */ + pci_read_config_dword(cdev->pdev, + cdev->pdev->pcie_cap + PCI_EXP_DEVCTL2, + &pci_status_control); + + if (pci_status_control & PCI_EXP_DEVCTL2_LTR_EN) + SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_ATOMIC_OP, 1); +} + +static void qed_rdma_init_port(struct qed_hwfn *p_hwfn) +{ + struct qed_rdma_port *port = p_hwfn->p_rdma_info->port; + struct qed_rdma_device *dev = p_hwfn->p_rdma_info->dev; + + port->port_state = p_hwfn->mcp_info->link_output.link_up ? + QED_RDMA_PORT_UP : QED_RDMA_PORT_DOWN; + + port->max_msg_size = min_t(u64, + (dev->max_mr_mw_fmr_size * + p_hwfn->cdev->rdma_max_sge), + BIT(31)); + + port->pkey_bad_counter = 0; +} + +static int qed_rdma_init_hw(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) +{ + u32 ll2_ethertype_en; + + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Initializing HW\n"); + p_hwfn->b_rdma_enabled_in_prs = false; + + qed_wr(p_hwfn, p_ptt, PRS_REG_ROCE_DEST_QP_MAX_PF, 0); + + p_hwfn->rdma_prs_search_reg = PRS_REG_SEARCH_ROCE; + + /* We delay writing to this reg until first cid is allocated. See + * qed_cxt_dynamic_ilt_alloc function for more details + */ + ll2_ethertype_en = qed_rd(p_hwfn, p_ptt, PRS_REG_LIGHT_L2_ETHERTYPE_EN); + qed_wr(p_hwfn, p_ptt, PRS_REG_LIGHT_L2_ETHERTYPE_EN, + (ll2_ethertype_en | 0x01)); + + if (qed_cxt_get_proto_cid_start(p_hwfn, PROTOCOLID_ROCE) % 2) { + DP_NOTICE(p_hwfn, "The first RoCE's cid should be even\n"); + return -EINVAL; + } + + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Initializing HW - Done\n"); + return 0; +} + +static int qed_rdma_start_fw(struct qed_hwfn *p_hwfn, + struct qed_rdma_start_in_params *params, + struct qed_ptt *p_ptt) +{ + struct rdma_init_func_ramrod_data *p_ramrod; + struct qed_rdma_cnq_params *p_cnq_pbl_list; + struct rdma_init_func_hdr *p_params_header; + struct rdma_cnq_params *p_cnq_params; + struct qed_sp_init_data init_data; + struct qed_spq_entry *p_ent; + u32 cnq_id, sb_id; + int rc; + + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Starting FW\n"); + + /* Save the number of cnqs for the function close ramrod */ + p_hwfn->p_rdma_info->num_cnqs = params->desired_cnq; + + /* Get SPQ entry */ + memset(&init_data, 0, sizeof(init_data)); + init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; + init_data.comp_mode = QED_SPQ_MODE_EBLOCK; + + rc = qed_sp_init_request(p_hwfn, &p_ent, RDMA_RAMROD_FUNC_INIT, + p_hwfn->p_rdma_info->proto, &init_data); + if (rc) + return rc; + + p_ramrod = &p_ent->ramrod.roce_init_func.rdma; + + p_params_header = &p_ramrod->params_header; + p_params_header->cnq_start_offset = (u8)RESC_START(p_hwfn, + QED_RDMA_CNQ_RAM); + p_params_header->num_cnqs = params->desired_cnq; + + if (params->cq_mode == QED_RDMA_CQ_MODE_16_BITS) + p_params_header->cq_ring_mode = 1; + else + p_params_header->cq_ring_mode = 0; + + for (cnq_id = 0; cnq_id < params->desired_cnq; cnq_id++) { + sb_id = qed_rdma_get_sb_id(p_hwfn, cnq_id); + p_cnq_params = &p_ramrod->cnq_params[cnq_id]; + p_cnq_pbl_list = ¶ms->cnq_pbl_list[cnq_id]; + p_cnq_params->sb_num = + cpu_to_le16(p_hwfn->sbs_info[sb_id]->igu_sb_id); + + p_cnq_params->sb_index = p_hwfn->pf_params.rdma_pf_params.gl_pi; + p_cnq_params->num_pbl_pages = p_cnq_pbl_list->num_pbl_pages; + + DMA_REGPAIR_LE(p_cnq_params->pbl_base_addr, + p_cnq_pbl_list->pbl_ptr); + + /* we assume here that cnq_id and qz_offset are the same */ + p_cnq_params->queue_zone_num = + cpu_to_le16(p_hwfn->p_rdma_info->queue_zone_base + + cnq_id); + } + + return qed_spq_post(p_hwfn, p_ent, NULL); +} + +static int qed_rdma_reserve_lkey(struct qed_hwfn *p_hwfn) +{ + struct qed_rdma_device *dev = p_hwfn->p_rdma_info->dev; + + /* The first DPI is reserved for the Kernel */ + __set_bit(0, p_hwfn->p_rdma_info->dpi_map.bitmap); + + /* Tid 0 will be used as the key for "reserved MR". + * The driver should allocate memory for it so it can be loaded but no + * ramrod should be passed on it. + */ + qed_rdma_alloc_tid(p_hwfn, &dev->reserved_lkey); + if (dev->reserved_lkey != RDMA_RESERVED_LKEY) { + DP_NOTICE(p_hwfn, + "Reserved lkey should be equal to RDMA_RESERVED_LKEY\n"); + return -EINVAL; + } + + return 0; +} + +static int qed_rdma_setup(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_rdma_start_in_params *params) +{ + int rc; + + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "RDMA setup\n"); + + spin_lock_init(&p_hwfn->p_rdma_info->lock); + + qed_rdma_init_devinfo(p_hwfn, params); + qed_rdma_init_port(p_hwfn); + qed_rdma_init_events(p_hwfn, params); + + rc = qed_rdma_reserve_lkey(p_hwfn); + if (rc) + return rc; + + rc = qed_rdma_init_hw(p_hwfn, p_ptt); + if (rc) + return rc; + + return qed_rdma_start_fw(p_hwfn, params, p_ptt); +} + +int qed_rdma_stop(void *rdma_cxt) +{ + struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; + struct rdma_close_func_ramrod_data *p_ramrod; + struct qed_sp_init_data init_data; + struct qed_spq_entry *p_ent; + struct qed_ptt *p_ptt; + u32 ll2_ethertype_en; + int rc = -EBUSY; + + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "RDMA stop\n"); + + p_ptt = qed_ptt_acquire(p_hwfn); + if (!p_ptt) { + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Failed to acquire PTT\n"); + return rc; + } + + /* Disable RoCE search */ + qed_wr(p_hwfn, p_ptt, p_hwfn->rdma_prs_search_reg, 0); + p_hwfn->b_rdma_enabled_in_prs = false; + + qed_wr(p_hwfn, p_ptt, PRS_REG_ROCE_DEST_QP_MAX_PF, 0); + + ll2_ethertype_en = qed_rd(p_hwfn, p_ptt, PRS_REG_LIGHT_L2_ETHERTYPE_EN); + + qed_wr(p_hwfn, p_ptt, PRS_REG_LIGHT_L2_ETHERTYPE_EN, + (ll2_ethertype_en & 0xFFFE)); + + qed_ptt_release(p_hwfn, p_ptt); + + /* Get SPQ entry */ + memset(&init_data, 0, sizeof(init_data)); + init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; + init_data.comp_mode = QED_SPQ_MODE_EBLOCK; + + /* Stop RoCE */ + rc = qed_sp_init_request(p_hwfn, &p_ent, RDMA_RAMROD_FUNC_CLOSE, + p_hwfn->p_rdma_info->proto, &init_data); + if (rc) + goto out; + + p_ramrod = &p_ent->ramrod.rdma_close_func; + + p_ramrod->num_cnqs = p_hwfn->p_rdma_info->num_cnqs; + p_ramrod->cnq_start_offset = (u8)RESC_START(p_hwfn, QED_RDMA_CNQ_RAM); + + rc = qed_spq_post(p_hwfn, p_ent, NULL); + +out: + qed_rdma_free(p_hwfn); + + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "RDMA stop done, rc = %d\n", rc); + return rc; +} + +int qed_rdma_add_user(void *rdma_cxt, + struct qed_rdma_add_user_out_params *out_params) +{ + struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; + u32 dpi_start_offset; + u32 returned_id = 0; + int rc; + + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Adding User\n"); + + /* Allocate DPI */ + spin_lock_bh(&p_hwfn->p_rdma_info->lock); + rc = qed_rdma_bmap_alloc_id(p_hwfn, &p_hwfn->p_rdma_info->dpi_map, + &returned_id); + spin_unlock_bh(&p_hwfn->p_rdma_info->lock); + + out_params->dpi = (u16)returned_id; + + /* Calculate the corresponding DPI address */ + dpi_start_offset = p_hwfn->dpi_start_offset; + + out_params->dpi_addr = (u64)((u8 __iomem *)p_hwfn->doorbells + + dpi_start_offset + + ((out_params->dpi) * p_hwfn->dpi_size)); + + out_params->dpi_phys_addr = p_hwfn->cdev->db_phys_addr + + dpi_start_offset + + ((out_params->dpi) * p_hwfn->dpi_size); + + out_params->dpi_size = p_hwfn->dpi_size; + + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Adding user - done, rc = %d\n", rc); + return rc; +} + +struct qed_rdma_device *qed_rdma_query_device(void *rdma_cxt) +{ + struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; + + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Query device\n"); + + /* Return struct with device parameters */ + return p_hwfn->p_rdma_info->dev; +} + +int qed_rdma_alloc_tid(void *rdma_cxt, u32 *itid) +{ + struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; + int rc; + + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocate TID\n"); + + spin_lock_bh(&p_hwfn->p_rdma_info->lock); + rc = qed_rdma_bmap_alloc_id(p_hwfn, + &p_hwfn->p_rdma_info->tid_map, itid); + spin_unlock_bh(&p_hwfn->p_rdma_info->lock); + if (rc) + goto out; + + rc = qed_cxt_dynamic_ilt_alloc(p_hwfn, QED_ELEM_TASK, *itid); +out: + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocate TID - done, rc = %d\n", rc); + return rc; +} + +void qed_rdma_cnq_prod_update(void *rdma_cxt, u8 qz_offset, u16 prod) +{ + struct qed_hwfn *p_hwfn; + u16 qz_num; + u32 addr; + + p_hwfn = (struct qed_hwfn *)rdma_cxt; + qz_num = p_hwfn->p_rdma_info->queue_zone_base + qz_offset; + addr = GTT_BAR0_MAP_REG_USDM_RAM + + USTORM_COMMON_QUEUE_CONS_OFFSET(qz_num); + + REG_WR16(p_hwfn, addr, prod); + + /* keep prod updates ordered */ + wmb(); +} + +static int qed_fill_rdma_dev_info(struct qed_dev *cdev, + struct qed_dev_rdma_info *info) +{ + memset(info, 0, sizeof(*info)); + + info->rdma_type = QED_RDMA_TYPE_ROCE; + + qed_fill_dev_info(cdev, &info->common); + + return 0; +} + +static int qed_rdma_get_sb_start(struct qed_dev *cdev) +{ + int feat_num; + + if (cdev->num_hwfns > 1) + feat_num = FEAT_NUM(QED_LEADING_HWFN(cdev), QED_PF_L2_QUE); + else + feat_num = FEAT_NUM(QED_LEADING_HWFN(cdev), QED_PF_L2_QUE) * + cdev->num_hwfns; + + return feat_num; +} + +static int qed_rdma_get_min_cnq_msix(struct qed_dev *cdev) +{ + int n_cnq = FEAT_NUM(QED_LEADING_HWFN(cdev), QED_RDMA_CNQ); + int n_msix = cdev->int_params.rdma_msix_cnt; + + return min_t(int, n_cnq, n_msix); +} + +static int qed_rdma_set_int(struct qed_dev *cdev, u16 cnt) +{ + int limit = 0; + + /* Mark the fastpath as free/used */ + cdev->int_params.fp_initialized = cnt ? true : false; + + if (cdev->int_params.out.int_mode != QED_INT_MODE_MSIX) { + DP_ERR(cdev, + "qed roce supports only MSI-X interrupts (detected %d).\n", + cdev->int_params.out.int_mode); + return -EINVAL; + } else if (cdev->int_params.fp_msix_cnt) { + limit = cdev->int_params.rdma_msix_cnt; + } + + if (!limit) + return -ENOMEM; + + return min_t(int, cnt, limit); +} + +static int qed_rdma_get_int(struct qed_dev *cdev, struct qed_int_info *info) +{ + memset(info, 0, sizeof(*info)); + + if (!cdev->int_params.fp_initialized) { + DP_INFO(cdev, + "Protocol driver requested interrupt information, but its support is not yet configured\n"); + return -EINVAL; + } + + if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) { + int msix_base = cdev->int_params.rdma_msix_base; + + info->msix_cnt = cdev->int_params.rdma_msix_cnt; + info->msix = &cdev->int_params.msix_table[msix_base]; + + DP_VERBOSE(cdev, QED_MSG_RDMA, "msix_cnt = %d msix_base=%d\n", + info->msix_cnt, msix_base); + } + + return 0; +} + +static void *qed_rdma_get_rdma_ctx(struct qed_dev *cdev) +{ + return QED_LEADING_HWFN(cdev); +} + +static void qed_rdma_dpm_conf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) +{ + u32 val; + + val = (p_hwfn->dcbx_no_edpm || p_hwfn->db_bar_no_edpm) ? 0 : 1; + + qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_DPM_ENABLE, val); + DP_VERBOSE(p_hwfn, (QED_MSG_DCB | QED_MSG_RDMA), + "Changing DPM_EN state to %d (DCBX=%d, DB_BAR=%d)\n", + val, p_hwfn->dcbx_no_edpm, p_hwfn->db_bar_no_edpm); +} + +void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) +{ + p_hwfn->db_bar_no_edpm = true; + + qed_rdma_dpm_conf(p_hwfn, p_ptt); +} + +int qed_rdma_start(void *rdma_cxt, struct qed_rdma_start_in_params *params) +{ + struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; + struct qed_ptt *p_ptt; + int rc = -EBUSY; + + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, + "desired_cnq = %08x\n", params->desired_cnq); + + p_ptt = qed_ptt_acquire(p_hwfn); + if (!p_ptt) + goto err; + + rc = qed_rdma_alloc(p_hwfn, p_ptt, params); + if (rc) + goto err1; + + rc = qed_rdma_setup(p_hwfn, p_ptt, params); + if (rc) + goto err2; + + qed_ptt_release(p_hwfn, p_ptt); + + return rc; + +err2: + qed_rdma_free(p_hwfn); +err1: + qed_ptt_release(p_hwfn, p_ptt); +err: + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "RDMA start - error, rc = %d\n", rc); + return rc; +} + +static int qed_rdma_init(struct qed_dev *cdev, + struct qed_rdma_start_in_params *params) +{ + return qed_rdma_start(QED_LEADING_HWFN(cdev), params); +} + +void qed_rdma_remove_user(void *rdma_cxt, u16 dpi) +{ + struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; + + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "dpi = %08x\n", dpi); + + spin_lock_bh(&p_hwfn->p_rdma_info->lock); + qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->dpi_map, dpi); + spin_unlock_bh(&p_hwfn->p_rdma_info->lock); +} + +static const struct qed_rdma_ops qed_rdma_ops_pass = { + .common = &qed_common_ops_pass, + .fill_dev_info = &qed_fill_rdma_dev_info, + .rdma_get_rdma_ctx = &qed_rdma_get_rdma_ctx, + .rdma_init = &qed_rdma_init, + .rdma_add_user = &qed_rdma_add_user, + .rdma_remove_user = &qed_rdma_remove_user, + .rdma_stop = &qed_rdma_stop, + .rdma_query_device = &qed_rdma_query_device, + .rdma_get_start_sb = &qed_rdma_get_sb_start, + .rdma_get_rdma_int = &qed_rdma_get_int, + .rdma_set_rdma_int = &qed_rdma_set_int, + .rdma_get_min_cnq_msix = &qed_rdma_get_min_cnq_msix, + .rdma_cnq_prod_update = &qed_rdma_cnq_prod_update, +}; + +const struct qed_rdma_ops *qed_get_rdma_ops() +{ + return &qed_rdma_ops_pass; +} +EXPORT_SYMBOL(qed_get_rdma_ops); diff --git a/drivers/net/ethernet/qlogic/qed/qed_roce.h b/drivers/net/ethernet/qlogic/qed/qed_roce.h new file mode 100644 index 0000000..e550481 --- /dev/null +++ b/drivers/net/ethernet/qlogic/qed/qed_roce.h @@ -0,0 +1,123 @@ +/* QLogic qed NIC Driver + * Copyright (c) 2015-2016 QLogic Corporation + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and /or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef _QED_ROCE_H +#define _QED_ROCE_H +#include +#include +#include +#include +#include +#include +#include +#include +#include "qed.h" +#include "qed_dev_api.h" +#include "qed_hsi.h" + +#define QED_RDMA_MAX_FMR (RDMA_MAX_TIDS) +#define QED_RDMA_MAX_P_KEY (1) +#define QED_RDMA_MAX_WQE (0x7FFF) +#define QED_RDMA_MAX_SRQ_WQE_ELEM (0x7FFF) +#define QED_RDMA_PAGE_SIZE_CAPS (0xFFFFF000) +#define QED_RDMA_ACK_DELAY (15) +#define QED_RDMA_MAX_MR_SIZE (0x10000000000ULL) +#define QED_RDMA_MAX_CQS (RDMA_MAX_CQS) +#define QED_RDMA_MAX_MRS (RDMA_MAX_TIDS) +/* Add 1 for header element */ +#define QED_RDMA_MAX_SRQ_ELEM_PER_WQE (RDMA_MAX_SGE_PER_RQ_WQE + 1) +#define QED_RDMA_MAX_SGE_PER_SRQ_WQE (RDMA_MAX_SGE_PER_RQ_WQE) +#define QED_RDMA_SRQ_WQE_ELEM_SIZE (16) +#define QED_RDMA_MAX_SRQS (32 * 1024) + +#define QED_RDMA_MAX_CQE_32_BIT (0x7FFFFFFF - 1) +#define QED_RDMA_MAX_CQE_16_BIT (0x7FFF - 1) + +enum qed_rdma_toggle_bit { + QED_RDMA_TOGGLE_BIT_CLEAR = 0, + QED_RDMA_TOGGLE_BIT_SET = 1 +}; + +struct qed_bmap { + unsigned long *bitmap; + u32 max_count; +}; + +struct qed_rdma_info { + /* spin lock to protect bitmaps */ + spinlock_t lock; + + struct qed_bmap cq_map; + struct qed_bmap pd_map; + struct qed_bmap tid_map; + struct qed_bmap qp_map; + struct qed_bmap srq_map; + struct qed_bmap cid_map; + struct qed_bmap dpi_map; + struct qed_bmap toggle_bits; + struct qed_rdma_events events; + struct qed_rdma_device *dev; + struct qed_rdma_port *port; + u32 last_tid; + u8 num_cnqs; + u32 num_qps; + u32 num_mrs; + u16 queue_zone_base; + enum protocol_type proto; +}; + +int +qed_rdma_add_user(void *rdma_cxt, + struct qed_rdma_add_user_out_params *out_params); +int qed_rdma_alloc_pd(void *rdma_cxt, u16 *pd); +int qed_rdma_alloc_tid(void *rdma_cxt, u32 *tid); +int qed_rdma_deregister_tid(void *rdma_cxt, u32 tid); +void qed_rdma_free_tid(void *rdma_cxt, u32 tid); +struct qed_rdma_device *qed_rdma_query_device(void *rdma_cxt); +int +qed_rdma_register_tid(void *rdma_cxt, + struct qed_rdma_register_tid_in_params *params); +void qed_rdma_remove_user(void *rdma_cxt, u16 dpi); +int qed_rdma_start(void *p_hwfn, struct qed_rdma_start_in_params *params); +int qed_rdma_stop(void *rdma_cxt); +u32 qed_rdma_get_sb_id(void *p_hwfn, u32 rel_sb_id); +u32 qed_rdma_query_cau_timer_res(void *p_hwfn); +void qed_rdma_cnq_prod_update(void *rdma_cxt, u8 cnq_index, u16 prod); +void qed_rdma_resc_free(struct qed_hwfn *p_hwfn); +void qed_async_roce_event(struct qed_hwfn *p_hwfn, + struct event_ring_entry *p_eqe); + +#if IS_ENABLED(CONFIG_INFINIBAND_QEDR) +void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); +#else +void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) {} +#endif +#endif diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp.h b/drivers/net/ethernet/qlogic/qed/qed_sp.h index a3c539f..652c908 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sp.h +++ b/drivers/net/ethernet/qlogic/qed/qed_sp.h @@ -85,6 +85,7 @@ union ramrod_data { struct rdma_srq_create_ramrod_data rdma_create_srq; struct rdma_srq_destroy_ramrod_data rdma_destroy_srq; struct rdma_srq_modify_ramrod_data rdma_modify_srq; + struct roce_init_func_ramrod_data roce_init_func; struct iscsi_slow_path_hdr iscsi_empty; struct iscsi_init_ramrod_params iscsi_init; diff --git a/drivers/net/ethernet/qlogic/qed/qed_spq.c b/drivers/net/ethernet/qlogic/qed/qed_spq.c index 349af18..caff415 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_spq.c +++ b/drivers/net/ethernet/qlogic/qed/qed_spq.c @@ -28,6 +28,9 @@ #include "qed_reg_addr.h" #include "qed_sp.h" #include "qed_sriov.h" +#if IS_ENABLED(CONFIG_INFINIBAND_QEDR) +#include "qed_roce.h" +#endif /*************************************************************************** * Structures & Definitions @@ -237,6 +240,11 @@ qed_async_event_completion(struct qed_hwfn *p_hwfn, struct event_ring_entry *p_eqe) { switch (p_eqe->protocol_id) { +#if IS_ENABLED(CONFIG_INFINIBAND_QEDR) + case PROTOCOLID_ROCE: + qed_async_roce_event(p_hwfn, p_eqe); + return 0; +#endif case PROTOCOLID_COMMON: return qed_sriov_eqe_event(p_hwfn, p_eqe->opcode, diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c index a4a3cea..d2d6621 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c +++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c @@ -1851,8 +1851,8 @@ static void qed_iov_vf_mbx_start_txq_resp(struct qed_hwfn *p_hwfn, if ((status == PFVF_STATUS_SUCCESS) && !b_legacy) { u16 qid = mbx->req_virt->start_txq.tx_qid; - p_tlv->offset = qed_db_addr(p_vf->vf_queues[qid].fw_cid, - DQ_DEMS_LEGACY); + p_tlv->offset = qed_db_addr_vf(p_vf->vf_queues[qid].fw_cid, + DQ_DEMS_LEGACY); } qed_iov_send_response(p_hwfn, p_ptt, p_vf, length, status); diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.c b/drivers/net/ethernet/qlogic/qed/qed_vf.c index 85334ce..abf5bf1 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_vf.c +++ b/drivers/net/ethernet/qlogic/qed/qed_vf.c @@ -544,7 +544,7 @@ int qed_vf_pf_txq_start(struct qed_hwfn *p_hwfn, u8 cid = p_iov->acquire_resp.resc.cid[tx_queue_id]; u32 db_addr; - db_addr = qed_db_addr(cid, DQ_DEMS_LEGACY); + db_addr = qed_db_addr_vf(cid, DQ_DEMS_LEGACY); *pp_doorbell = (u8 __iomem *)p_hwfn->doorbells + db_addr; } -- cgit v1.1